summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-05 17:04:01 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-05 17:04:01 -0300
commit57f0f512b273f60d52568b8c6b77e17f5636edc0 (patch)
tree5e910f0e82173f4ef4f51111366a3f1299037a7b /include/linux
Initial import
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/8250_pci.h37
-rw-r--r--include/linux/a.out.h17
-rw-r--r--include/linux/acct.h105
-rw-r--r--include/linux/acpi.h815
-rw-r--r--include/linux/acpi_dma.h121
-rw-r--r--include/linux/acpi_irq.h10
-rw-r--r--include/linux/acpi_pmtmr.h38
-rw-r--r--include/linux/adb.h66
-rw-r--r--include/linux/adfs_fs.h23
-rw-r--r--include/linux/aer.h67
-rw-r--r--include/linux/agp_backend.h109
-rw-r--r--include/linux/agpgart.h130
-rw-r--r--include/linux/ahci_platform.h44
-rw-r--r--include/linux/aio.h33
-rw-r--r--include/linux/alarmtimer.h59
-rw-r--r--include/linux/altera_jtaguart.h16
-rw-r--r--include/linux/altera_uart.h15
-rw-r--r--include/linux/amba/bus.h168
-rw-r--r--include/linux/amba/clcd.h330
-rw-r--r--include/linux/amba/kmi.h92
-rw-r--r--include/linux/amba/mmci.h36
-rw-r--r--include/linux/amba/pl022.h295
-rw-r--r--include/linux/amba/pl061.h16
-rw-r--r--include/linux/amba/pl080.h147
-rw-r--r--include/linux/amba/pl08x.h107
-rw-r--r--include/linux/amba/pl093.h80
-rw-r--r--include/linux/amba/pl330.h35
-rw-r--r--include/linux/amba/serial.h214
-rw-r--r--include/linux/amba/sp810.h62
-rw-r--r--include/linux/amd-iommu.h178
-rw-r--r--include/linux/amifd.h62
-rw-r--r--include/linux/amifdreg.h81
-rw-r--r--include/linux/amigaffs.h144
-rw-r--r--include/linux/anon_inodes.h20
-rw-r--r--include/linux/apm-emulation.h62
-rw-r--r--include/linux/apm_bios.h101
-rw-r--r--include/linux/apple_bl.h26
-rw-r--r--include/linux/arcdevice.h342
-rw-r--r--include/linux/arm-cci.h68
-rw-r--r--include/linux/asn1.h69
-rw-r--r--include/linux/asn1_ber_bytecode.h87
-rw-r--r--include/linux/asn1_decoder.h24
-rw-r--r--include/linux/assoc_array.h92
-rw-r--r--include/linux/assoc_array_priv.h182
-rw-r--r--include/linux/async.h50
-rw-r--r--include/linux/async_tx.h208
-rw-r--r--include/linux/ata.h1049
-rw-r--r--include/linux/ata_platform.h30
-rw-r--r--include/linux/atalk.h168
-rw-r--r--include/linux/ath9k_platform.h46
-rw-r--r--include/linux/atm.h15
-rw-r--r--include/linux/atm_suni.h12
-rw-r--r--include/linux/atm_tcp.h21
-rw-r--r--include/linux/atmdev.h317
-rw-r--r--include/linux/atmel-mci.h43
-rw-r--r--include/linux/atmel-ssc.h333
-rw-r--r--include/linux/atmel_pdc.h38
-rw-r--r--include/linux/atmel_serial.h130
-rw-r--r--include/linux/atmel_tc.h269
-rw-r--r--include/linux/atomic.h131
-rw-r--r--include/linux/attribute_container.h72
-rw-r--r--include/linux/audit.h550
-rw-r--r--include/linux/auto_dev-ioctl.h229
-rw-r--r--include/linux/auto_fs.h20
-rw-r--r--include/linux/auxvec.h8
-rw-r--r--include/linux/average.h30
-rw-r--r--include/linux/b1pcmcia.h21
-rw-r--r--include/linux/backing-dev.h321
-rw-r--r--include/linux/backlight.h170
-rw-r--r--include/linux/balloon_compaction.h216
-rw-r--r--include/linux/basic_mmio_gpio.h78
-rw-r--r--include/linux/bcd.h22
-rw-r--r--include/linux/bch.h79
-rw-r--r--include/linux/bcm47xx_nvram.h34
-rw-r--r--include/linux/bcm47xx_wdt.h29
-rw-r--r--include/linux/bcma/bcma.h474
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h699
-rw-r--r--include/linux/bcma/bcma_driver_gmac_cmn.h94
-rw-r--r--include/linux/bcma/bcma_driver_mips.h44
-rw-r--r--include/linux/bcma/bcma_driver_pci.h252
-rw-r--r--include/linux/bcma/bcma_driver_pcie2.h158
-rw-r--r--include/linux/bcma/bcma_regs.h101
-rw-r--r--include/linux/bcma/bcma_soc.h15
-rw-r--r--include/linux/bfin_mac.h30
-rw-r--r--include/linux/binfmts.h123
-rw-r--r--include/linux/bio.h777
-rw-r--r--include/linux/bit_spinlock.h100
-rw-r--r--include/linux/bitmap.h331
-rw-r--r--include/linux/bitops.h230
-rw-r--r--include/linux/bitrev.h85
-rw-r--r--include/linux/blk-iopoll.h46
-rw-r--r--include/linux/blk-mq.h266
-rw-r--r--include/linux/blk_types.h254
-rw-r--r--include/linux/blkdev.h1673
-rw-r--r--include/linux/blktrace_api.h116
-rw-r--r--include/linux/blockgroup_lock.h62
-rw-r--r--include/linux/bma150.h58
-rw-r--r--include/linux/bootmem.h368
-rw-r--r--include/linux/bottom_half.h35
-rw-r--r--include/linux/bpf.h164
-rw-r--r--include/linux/brcmphy.h228
-rw-r--r--include/linux/bsearch.h9
-rw-r--r--include/linux/bsg-lib.h71
-rw-r--r--include/linux/bsg.h33
-rw-r--r--include/linux/btree-128.h109
-rw-r--r--include/linux/btree-type.h147
-rw-r--r--include/linux/btree.h243
-rw-r--r--include/linux/btrfs.h6
-rw-r--r--include/linux/buffer_head.h403
-rw-r--r--include/linux/bug.h112
-rw-r--r--include/linux/byteorder/big_endian.h7
-rw-r--r--include/linux/byteorder/generic.h173
-rw-r--r--include/linux/byteorder/little_endian.h7
-rw-r--r--include/linux/c2port.h66
-rw-r--r--include/linux/cache.h67
-rw-r--r--include/linux/cacheinfo.h100
-rw-r--r--include/linux/can/core.h61
-rw-r--r--include/linux/can/dev.h146
-rw-r--r--include/linux/can/led.h53
-rw-r--r--include/linux/can/platform/cc770.h33
-rw-r--r--include/linux/can/platform/mcp251x.h21
-rw-r--r--include/linux/can/platform/rcar_can.h17
-rw-r--r--include/linux/can/platform/sja1000.h35
-rw-r--r--include/linux/can/platform/ti_hecc.h44
-rw-r--r--include/linux/can/skb.h78
-rw-r--r--include/linux/capability.h251
-rw-r--r--include/linux/cb710.h208
-rw-r--r--include/linux/cciss_ioctl.h31
-rw-r--r--include/linux/ccp.h556
-rw-r--r--include/linux/cdev.h33
-rw-r--r--include/linux/cdrom.h314
-rw-r--r--include/linux/ceph/auth.h142
-rw-r--r--include/linux/ceph/buffer.h37
-rw-r--r--include/linux/ceph/ceph_debug.h38
-rw-r--r--include/linux/ceph/ceph_features.h119
-rw-r--r--include/linux/ceph/ceph_frag.h109
-rw-r--r--include/linux/ceph/ceph_fs.h763
-rw-r--r--include/linux/ceph/ceph_hash.h13
-rw-r--r--include/linux/ceph/debugfs.h27
-rw-r--r--include/linux/ceph/decode.h259
-rw-r--r--include/linux/ceph/libceph.h230
-rw-r--r--include/linux/ceph/mdsmap.h63
-rw-r--r--include/linux/ceph/messenger.h303
-rw-r--r--include/linux/ceph/mon_client.h119
-rw-r--r--include/linux/ceph/msgpool.h26
-rw-r--r--include/linux/ceph/msgr.h185
-rw-r--r--include/linux/ceph/osd_client.h377
-rw-r--r--include/linux/ceph/osdmap.h224
-rw-r--r--include/linux/ceph/pagelist.h80
-rw-r--r--include/linux/ceph/rados.h469
-rw-r--r--include/linux/ceph/types.h29
-rw-r--r--include/linux/cfag12864b.h82
-rw-r--r--include/linux/cgroup.h971
-rw-r--r--include/linux/cgroup_subsys.h62
-rw-r--r--include/linux/circ_buf.h36
-rw-r--r--include/linux/cleancache.h125
-rw-r--r--include/linux/clk-provider.h698
-rw-r--r--include/linux/clk.h506
-rw-r--r--include/linux/clk/at91_pmc.h194
-rw-r--r--include/linux/clk/bcm2835.h24
-rw-r--r--include/linux/clk/clk-conf.h20
-rw-r--r--include/linux/clk/mxs.h14
-rw-r--r--include/linux/clk/shmobile.h23
-rw-r--r--include/linux/clk/tegra.h123
-rw-r--r--include/linux/clk/ti.h376
-rw-r--r--include/linux/clk/zynq.h30
-rw-r--r--include/linux/clkdev.h51
-rw-r--r--include/linux/clksrc-dbx500-prcmu.h20
-rw-r--r--include/linux/clock_cooling.h65
-rw-r--r--include/linux/clockchips.h218
-rw-r--r--include/linux/clocksource.h262
-rw-r--r--include/linux/cm4000_cs.h10
-rw-r--r--include/linux/cma.h31
-rw-r--r--include/linux/cmdline-parser.h45
-rw-r--r--include/linux/cn_proc.h58
-rw-r--r--include/linux/cnt32_to_63.h107
-rw-r--r--include/linux/coda.h65
-rw-r--r--include/linux/coda_psdev.h72
-rw-r--r--include/linux/com20020.h145
-rw-r--r--include/linux/compaction.h104
-rw-r--r--include/linux/compat.h721
-rw-r--r--include/linux/compiler-clang.h12
-rw-r--r--include/linux/compiler-gcc.h133
-rw-r--r--include/linux/compiler-gcc3.h23
-rw-r--r--include/linux/compiler-gcc4.h91
-rw-r--r--include/linux/compiler-gcc5.h67
-rw-r--r--include/linux/compiler-intel.h45
-rw-r--r--include/linux/compiler.h468
-rw-r--r--include/linux/completion.h109
-rw-r--r--include/linux/component.h39
-rw-r--r--include/linux/concap.h112
-rw-r--r--include/linux/configfs.h260
-rw-r--r--include/linux/connector.h88
-rw-r--r--include/linux/console.h194
-rw-r--r--include/linux/console_struct.h140
-rw-r--r--include/linux/consolemap.h34
-rw-r--r--include/linux/container.h25
-rw-r--r--include/linux/context_tracking.h120
-rw-r--r--include/linux/context_tracking_state.h47
-rw-r--r--include/linux/cordic.h48
-rw-r--r--include/linux/coredump.h23
-rw-r--r--include/linux/coresight.h251
-rw-r--r--include/linux/cper.h433
-rw-r--r--include/linux/cpu.h294
-rw-r--r--include/linux/cpu_cooling.h88
-rw-r--r--include/linux/cpu_pm.h109
-rw-r--r--include/linux/cpu_rmap.h69
-rw-r--r--include/linux/cpufeature.h60
-rw-r--r--include/linux/cpufreq-dt.h22
-rw-r--r--include/linux/cpufreq.h604
-rw-r--r--include/linux/cpuidle.h245
-rw-r--r--include/linux/cpumask.h820
-rw-r--r--include/linux/cpuset.h242
-rw-r--r--include/linux/cputime.h16
-rw-r--r--include/linux/crash_dump.h91
-rw-r--r--include/linux/crc-ccitt.h15
-rw-r--r--include/linux/crc-itu-t.h28
-rw-r--r--include/linux/crc-t10dif.h13
-rw-r--r--include/linux/crc16.h30
-rw-r--r--include/linux/crc32.h79
-rw-r--r--include/linux/crc32c.h11
-rw-r--r--include/linux/crc7.h14
-rw-r--r--include/linux/crc8.h101
-rw-r--r--include/linux/cred.h400
-rw-r--r--include/linux/crush/crush.h209
-rw-r--r--include/linux/crush/hash.h17
-rw-r--r--include/linux/crush/mapper.h20
-rw-r--r--include/linux/crypto.h2402
-rw-r--r--include/linux/cryptohash.h20
-rw-r--r--include/linux/cryptouser.h105
-rw-r--r--include/linux/cs5535.h239
-rw-r--r--include/linux/ctype.h70
-rw-r--r--include/linux/cuda.h18
-rw-r--r--include/linux/cyclades.h360
-rw-r--r--include/linux/davinci_emac.h50
-rw-r--r--include/linux/dca.h81
-rw-r--r--include/linux/dcache.h579
-rw-r--r--include/linux/dccp.h324
-rw-r--r--include/linux/dcookies.h68
-rw-r--r--include/linux/debug_locks.h75
-rw-r--r--include/linux/debugfs.h288
-rw-r--r--include/linux/debugobjects.h110
-rw-r--r--include/linux/decompress/bunzip2.h10
-rw-r--r--include/linux/decompress/generic.h39
-rw-r--r--include/linux/decompress/inflate.h10
-rw-r--r--include/linux/decompress/mm.h93
-rw-r--r--include/linux/decompress/unlz4.h10
-rw-r--r--include/linux/decompress/unlzma.h12
-rw-r--r--include/linux/decompress/unlzo.h10
-rw-r--r--include/linux/decompress/unxz.h19
-rw-r--r--include/linux/delay.h55
-rw-r--r--include/linux/delayacct.h153
-rw-r--r--include/linux/dell-led.h10
-rw-r--r--include/linux/devcoredump.h35
-rw-r--r--include/linux/devfreq-event.h196
-rw-r--r--include/linux/devfreq.h294
-rw-r--r--include/linux/device-mapper.h608
-rw-r--r--include/linux/device.h1272
-rw-r--r--include/linux/device_cgroup.h19
-rw-r--r--include/linux/devpts_fs.h49
-rw-r--r--include/linux/digsig.h64
-rw-r--r--include/linux/dio.h280
-rw-r--r--include/linux/dirent.h12
-rw-r--r--include/linux/dlm.h172
-rw-r--r--include/linux/dlm_plock.h19
-rw-r--r--include/linux/dm-dirty-log.h146
-rw-r--r--include/linux/dm-io.h84
-rw-r--r--include/linux/dm-kcopyd.h88
-rw-r--r--include/linux/dm-region-hash.h103
-rw-r--r--include/linux/dm9000.h42
-rw-r--r--include/linux/dma-attrs.h80
-rw-r--r--include/linux/dma-buf.h236
-rw-r--r--include/linux/dma-contiguous.h164
-rw-r--r--include/linux/dma-debug.h194
-rw-r--r--include/linux/dma-direction.h13
-rw-r--r--include/linux/dma-mapping.h317
-rw-r--r--include/linux/dma/dw.h64
-rw-r--r--include/linux/dma/hsu.h48
-rw-r--r--include/linux/dma/ipu-dma.h177
-rw-r--r--include/linux/dma/mmp-pdma.h15
-rw-r--r--include/linux/dma/xilinx_dma.h47
-rw-r--r--include/linux/dma_remapping.h48
-rw-r--r--include/linux/dmaengine.h1106
-rw-r--r--include/linux/dmapool.h37
-rw-r--r--include/linux/dmar.h232
-rw-r--r--include/linux/dmi.h146
-rw-r--r--include/linux/dnotify.h50
-rw-r--r--include/linux/dns_resolver.h34
-rw-r--r--include/linux/dqblk_qtree.h56
-rw-r--r--include/linux/dqblk_v1.h14
-rw-r--r--include/linux/dqblk_v2.h16
-rw-r--r--include/linux/drbd.h381
-rw-r--r--include/linux/drbd_genl.h384
-rw-r--r--include/linux/drbd_genl_api.h55
-rw-r--r--include/linux/drbd_limits.h233
-rw-r--r--include/linux/ds1286.h52
-rw-r--r--include/linux/ds17287rtc.h66
-rw-r--r--include/linux/ds2782_battery.h8
-rw-r--r--include/linux/dtlk.h85
-rw-r--r--include/linux/dw_apb_timer.h55
-rw-r--r--include/linux/dynamic_debug.h137
-rw-r--r--include/linux/dynamic_queue_limits.h105
-rw-r--r--include/linux/earlycpio.h17
-rw-r--r--include/linux/ecryptfs.h105
-rw-r--r--include/linux/edac.h785
-rw-r--r--include/linux/edd.h38
-rw-r--r--include/linux/edma.h29
-rw-r--r--include/linux/eeprom_93cx6.h86
-rw-r--r--include/linux/eeprom_93xx46.h18
-rw-r--r--include/linux/efi-bgrt.h21
-rw-r--r--include/linux/efi.h1254
-rw-r--r--include/linux/efs_vh.h53
-rw-r--r--include/linux/eisa.h111
-rw-r--r--include/linux/elevator.h212
-rw-r--r--include/linux/elf-fdpic.h51
-rw-r--r--include/linux/elf-randomize.h22
-rw-r--r--include/linux/elf.h56
-rw-r--r--include/linux/elfcore-compat.h55
-rw-r--r--include/linux/elfcore.h73
-rw-r--r--include/linux/elfnote.h98
-rw-r--r--include/linux/enclosure.h142
-rw-r--r--include/linux/err.h69
-rw-r--r--include/linux/errno.h32
-rw-r--r--include/linux/errqueue.h25
-rw-r--r--include/linux/etherdevice.h410
-rw-r--r--include/linux/ethtool.h285
-rw-r--r--include/linux/eventfd.h84
-rw-r--r--include/linux/eventpoll.h71
-rw-r--r--include/linux/evm.h100
-rw-r--r--include/linux/export.h98
-rw-r--r--include/linux/exportfs.h237
-rw-r--r--include/linux/ext2_fs.h42
-rw-r--r--include/linux/extcon.h377
-rw-r--r--include/linux/extcon/extcon-adc-jack.h71
-rw-r--r--include/linux/extcon/extcon-gpio.h59
-rw-r--r--include/linux/f2fs_fs.h476
-rw-r--r--include/linux/f75375s.h21
-rw-r--r--include/linux/falloc.h30
-rw-r--r--include/linux/fanotify.h8
-rw-r--r--include/linux/fault-inject.h74
-rw-r--r--include/linux/fb.h818
-rw-r--r--include/linux/fcdevice.h33
-rw-r--r--include/linux/fcntl.h31
-rw-r--r--include/linux/fd.h24
-rw-r--r--include/linux/fddidevice.h33
-rw-r--r--include/linux/fdtable.h118
-rw-r--r--include/linux/fec.h25
-rw-r--r--include/linux/fence.h360
-rw-r--r--include/linux/file.h77
-rw-r--r--include/linux/filter.h482
-rw-r--r--include/linux/fips.h10
-rw-r--r--include/linux/firewire.h472
-rw-r--r--include/linux/firmware-map.h49
-rw-r--r--include/linux/firmware.h170
-rw-r--r--include/linux/fixp-arith.h156
-rw-r--r--include/linux/flat.h52
-rw-r--r--include/linux/flex_array.h81
-rw-r--r--include/linux/flex_proportions.h102
-rw-r--r--include/linux/fmc-sdb.h38
-rw-r--r--include/linux/fmc.h237
-rw-r--r--include/linux/font.h60
-rw-r--r--include/linux/freezer.h301
-rw-r--r--include/linux/frontswap.h107
-rw-r--r--include/linux/fs.h3008
-rw-r--r--include/linux/fs_enet_pd.h165
-rw-r--r--include/linux/fs_pin.h24
-rw-r--r--include/linux/fs_stack.h29
-rw-r--r--include/linux/fs_struct.h44
-rw-r--r--include/linux/fs_uart_pd.h71
-rw-r--r--include/linux/fs_uuid.h19
-rw-r--r--include/linux/fscache-cache.h554
-rw-r--r--include/linux/fscache.h832
-rw-r--r--include/linux/fsl-diu-fb.h173
-rw-r--r--include/linux/fsl/bestcomm/ata.h30
-rw-r--r--include/linux/fsl/bestcomm/bestcomm.h213
-rw-r--r--include/linux/fsl/bestcomm/bestcomm_priv.h350
-rw-r--r--include/linux/fsl/bestcomm/fec.h61
-rw-r--r--include/linux/fsl/bestcomm/gen_bd.h53
-rw-r--r--include/linux/fsl/bestcomm/sram.h54
-rw-r--r--include/linux/fsl_devices.h147
-rw-r--r--include/linux/fsl_hypervisor.h63
-rw-r--r--include/linux/fsl_ifc.h849
-rw-r--r--include/linux/fsldma.h13
-rw-r--r--include/linux/fsnotify.h344
-rw-r--r--include/linux/fsnotify_backend.h405
-rw-r--r--include/linux/ftrace.h905
-rw-r--r--include/linux/ftrace_event.h627
-rw-r--r--include/linux/ftrace_irq.h13
-rw-r--r--include/linux/futex.h71
-rw-r--r--include/linux/fwnode.h27
-rw-r--r--include/linux/gameport.h219
-rw-r--r--include/linux/gcd.h8
-rw-r--r--include/linux/genalloc.h137
-rw-r--r--include/linux/genetlink.h42
-rw-r--r--include/linux/genhd.h733
-rw-r--r--include/linux/genl_magic_func.h413
-rw-r--r--include/linux/genl_magic_struct.h277
-rw-r--r--include/linux/getcpu.h18
-rw-r--r--include/linux/gfp.h420
-rw-r--r--include/linux/glob.h9
-rw-r--r--include/linux/goldfish.h15
-rw-r--r--include/linux/gpio-fan.h36
-rw-r--r--include/linux/gpio-pxa.h21
-rw-r--r--include/linux/gpio.h284
-rw-r--r--include/linux/gpio/consumer.h495
-rw-r--r--include/linux/gpio/driver.h239
-rw-r--r--include/linux/gpio/machine.h61
-rw-r--r--include/linux/gpio_keys.h58
-rw-r--r--include/linux/gpio_mouse.h61
-rw-r--r--include/linux/gsmmux.h36
-rw-r--r--include/linux/hardirq.h82
-rw-r--r--include/linux/hash.h86
-rw-r--r--include/linux/hashtable.h205
-rw-r--r--include/linux/hdlc.h120
-rw-r--r--include/linux/hdlcdrv.h275
-rw-r--r--include/linux/hdmi.h336
-rw-r--r--include/linux/hid-debug.h67
-rw-r--r--include/linux/hid-roccat.h29
-rw-r--r--include/linux/hid-sensor-hub.h273
-rw-r--r--include/linux/hid-sensor-ids.h157
-rw-r--r--include/linux/hid.h1144
-rw-r--r--include/linux/hiddev.h56
-rw-r--r--include/linux/hidraw.h59
-rw-r--r--include/linux/highmem.h249
-rw-r--r--include/linux/highuid.h97
-rw-r--r--include/linux/hil.h483
-rw-r--r--include/linux/hil_mlc.h168
-rw-r--r--include/linux/hippidevice.h41
-rw-r--r--include/linux/host1x.h309
-rw-r--r--include/linux/hp_sdc.h301
-rw-r--r--include/linux/hpet.h110
-rw-r--r--include/linux/hrtimer.h454
-rw-r--r--include/linux/hsi/hsi.h444
-rw-r--r--include/linux/hsi/ssi_protocol.h42
-rw-r--r--include/linux/htcpld.h24
-rw-r--r--include/linux/htirq.h24
-rw-r--r--include/linux/huge_mm.h223
-rw-r--r--include/linux/hugetlb.h520
-rw-r--r--include/linux/hugetlb_cgroup.h125
-rw-r--r--include/linux/hugetlb_inline.h22
-rw-r--r--include/linux/hw_breakpoint.h127
-rw-r--r--include/linux/hw_random.h64
-rw-r--r--include/linux/hwmon-sysfs.h57
-rw-r--r--include/linux/hwmon-vid.h45
-rw-r--r--include/linux/hwmon.h33
-rw-r--r--include/linux/hwspinlock.h313
-rw-r--r--include/linux/hyperv.h1256
-rw-r--r--include/linux/i2c-algo-bit.h55
-rw-r--r--include/linux/i2c-algo-pca.h71
-rw-r--r--include/linux/i2c-algo-pcf.h49
-rw-r--r--include/linux/i2c-dev.h28
-rw-r--r--include/linux/i2c-gpio.h38
-rw-r--r--include/linux/i2c-mux-gpio.h43
-rw-r--r--include/linux/i2c-mux-pinctrl.h41
-rw-r--r--include/linux/i2c-mux.h49
-rw-r--r--include/linux/i2c-ocores.h22
-rw-r--r--include/linux/i2c-omap.h38
-rw-r--r--include/linux/i2c-pca-platform.h12
-rw-r--r--include/linux/i2c-pnx.h38
-rw-r--r--include/linux/i2c-pxa.h17
-rw-r--r--include/linux/i2c-smbus.h51
-rw-r--r--include/linux/i2c-xiic.h43
-rw-r--r--include/linux/i2c.h654
-rw-r--r--include/linux/i2c/adp5588.h172
-rw-r--r--include/linux/i2c/adp8860.h154
-rw-r--r--include/linux/i2c/adp8870.h153
-rw-r--r--include/linux/i2c/ads1015.h36
-rw-r--r--include/linux/i2c/apds990x.h79
-rw-r--r--include/linux/i2c/atmel_mxt_ts.h25
-rw-r--r--include/linux/i2c/bfin_twi.h145
-rw-r--r--include/linux/i2c/bh1770glc.h53
-rw-r--r--include/linux/i2c/dm355evm_msp.h79
-rw-r--r--include/linux/i2c/ds620.h21
-rw-r--r--include/linux/i2c/i2c-hid.h36
-rw-r--r--include/linux/i2c/i2c-rcar.h10
-rw-r--r--include/linux/i2c/i2c-sh_mobile.h11
-rw-r--r--include/linux/i2c/lm8323.h46
-rw-r--r--include/linux/i2c/ltc4245.h21
-rw-r--r--include/linux/i2c/max6639.h14
-rw-r--r--include/linux/i2c/max732x.h22
-rw-r--r--include/linux/i2c/mcs.h35
-rw-r--r--include/linux/i2c/mms114.h24
-rw-r--r--include/linux/i2c/mpr121_touchkey.h20
-rw-r--r--include/linux/i2c/pca954x.h48
-rw-r--r--include/linux/i2c/pcf857x.h44
-rw-r--r--include/linux/i2c/pmbus.h49
-rw-r--r--include/linux/i2c/pxa-i2c.h85
-rw-r--r--include/linux/i2c/sx150x.h82
-rw-r--r--include/linux/i2c/tc35876x.h11
-rw-r--r--include/linux/i2c/tps65010.h205
-rw-r--r--include/linux/i2c/tsc2007.h22
-rw-r--r--include/linux/i2c/twl.h875
-rw-r--r--include/linux/i2c/twl4030-madc.h147
-rw-r--r--include/linux/i7300_idle.h83
-rw-r--r--include/linux/i8042.h105
-rw-r--r--include/linux/i8253.h29
-rw-r--r--include/linux/icmp.h27
-rw-r--r--include/linux/icmpv6.h45
-rw-r--r--include/linux/ide.h1554
-rw-r--r--include/linux/idr.h186
-rw-r--r--include/linux/ieee80211.h2548
-rw-r--r--include/linux/ieee802154.h252
-rw-r--r--include/linux/if_arp.h47
-rw-r--r--include/linux/if_bridge.h77
-rw-r--r--include/linux/if_eql.h49
-rw-r--r--include/linux/if_ether.h35
-rw-r--r--include/linux/if_fddi.h121
-rw-r--r--include/linux/if_frad.h97
-rw-r--r--include/linux/if_link.h19
-rw-r--r--include/linux/if_ltalk.h7
-rw-r--r--include/linux/if_macvlan.h115
-rw-r--r--include/linux/if_phonet.h14
-rw-r--r--include/linux/if_pppol2tp.h21
-rw-r--r--include/linux/if_pppox.h98
-rw-r--r--include/linux/if_team.h299
-rw-r--r--include/linux/if_tun.h32
-rw-r--r--include/linux/if_tunnel.h16
-rw-r--r--include/linux/if_vlan.h631
-rw-r--r--include/linux/igmp.h134
-rw-r--r--include/linux/ihex.h89
-rw-r--r--include/linux/iio/accel/kxcjk_1013.h22
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h173
-rw-r--r--include/linux/iio/buffer.h180
-rw-r--r--include/linux/iio/common/ssp_sensors.h82
-rw-r--r--include/linux/iio/common/st_sensors.h290
-rw-r--r--include/linux/iio/common/st_sensors_i2c.h31
-rw-r--r--include/linux/iio/common/st_sensors_spi.h20
-rw-r--r--include/linux/iio/consumer.h209
-rw-r--r--include/linux/iio/dac/ad5421.h28
-rw-r--r--include/linux/iio/dac/ad5504.h16
-rw-r--r--include/linux/iio/dac/ad5791.h25
-rw-r--r--include/linux/iio/dac/max517.h15
-rw-r--r--include/linux/iio/dac/mcp4725.h16
-rw-r--r--include/linux/iio/driver.h31
-rw-r--r--include/linux/iio/events.h59
-rw-r--r--include/linux/iio/frequency/ad9523.h195
-rw-r--r--include/linux/iio/frequency/adf4350.h128
-rw-r--r--include/linux/iio/gyro/itg3200.h154
-rw-r--r--include/linux/iio/iio.h652
-rw-r--r--include/linux/iio/imu/adis.h283
-rw-r--r--include/linux/iio/kfifo_buf.h14
-rw-r--r--include/linux/iio/machine.h31
-rw-r--r--include/linux/iio/sysfs.h127
-rw-r--r--include/linux/iio/trigger.h149
-rw-r--r--include/linux/iio/trigger_consumer.h63
-rw-r--r--include/linux/iio/triggered_buffer.h15
-rw-r--r--include/linux/iio/types.h30
-rw-r--r--include/linux/ima.h82
-rw-r--r--include/linux/in.h104
-rw-r--r--include/linux/in6.h48
-rw-r--r--include/linux/inet.h57
-rw-r--r--include/linux/inet_diag.h47
-rw-r--r--include/linux/inet_lro.h142
-rw-r--r--include/linux/inetdevice.h258
-rw-r--r--include/linux/init.h389
-rw-r--r--include/linux/init_ohci1394_dma.h4
-rw-r--r--include/linux/init_task.h339
-rw-r--r--include/linux/initrd.h20
-rw-r--r--include/linux/inotify.h22
-rw-r--r--include/linux/input-polldev.h61
-rw-r--r--include/linux/input.h534
-rw-r--r--include/linux/input/ad714x.h64
-rw-r--r--include/linux/input/adp5589.h188
-rw-r--r--include/linux/input/adxl34x.h358
-rw-r--r--include/linux/input/as5011.h20
-rw-r--r--include/linux/input/auo-pixcir-ts.h54
-rw-r--r--include/linux/input/bu21013.h34
-rw-r--r--include/linux/input/cma3000.h59
-rw-r--r--include/linux/input/cy8ctmg110_pdata.h10
-rw-r--r--include/linux/input/cyttsp.h58
-rw-r--r--include/linux/input/edt-ft5x06.h24
-rw-r--r--include/linux/input/eeti_ts.h10
-rw-r--r--include/linux/input/gp2ap002a00f.h22
-rw-r--r--include/linux/input/gpio_tilt.h73
-rw-r--r--include/linux/input/ili210x.h10
-rw-r--r--include/linux/input/kxtj9.h61
-rw-r--r--include/linux/input/lm8333.h24
-rw-r--r--include/linux/input/matrix_keypad.h103
-rw-r--r--include/linux/input/mt.h127
-rw-r--r--include/linux/input/navpoint.h12
-rw-r--r--include/linux/input/pixcir_ts.h64
-rw-r--r--include/linux/input/samsung-keypad.h43
-rw-r--r--include/linux/input/sh_keysc.h15
-rw-r--r--include/linux/input/sparse-keymap.h62
-rw-r--r--include/linux/input/tca8418_keypad.h44
-rw-r--r--include/linux/input/touchscreen.h22
-rw-r--r--include/linux/input/tps6507x-ts.h23
-rw-r--r--include/linux/integrity.h46
-rw-r--r--include/linux/intel-iommu.h384
-rw-r--r--include/linux/intel_pmic_gpio.h15
-rw-r--r--include/linux/interrupt.h672
-rw-r--r--include/linux/interval_tree.h27
-rw-r--r--include/linux/interval_tree_generic.h191
-rw-r--r--include/linux/io-mapping.h168
-rw-r--r--include/linux/io.h116
-rw-r--r--include/linux/ioc3.h93
-rw-r--r--include/linux/ioc4.h184
-rw-r--r--include/linux/iocontext.h157
-rw-r--r--include/linux/iommu-common.h51
-rw-r--r--include/linux/iommu-helper.h34
-rw-r--r--include/linux/iommu.h487
-rw-r--r--include/linux/iopoll.h144
-rw-r--r--include/linux/ioport.h252
-rw-r--r--include/linux/ioprio.h81
-rw-r--r--include/linux/iova.h91
-rw-r--r--include/linux/ip.h37
-rw-r--r--include/linux/ipack.h289
-rw-r--r--include/linux/ipc.h26
-rw-r--r--include/linux/ipc_namespace.h160
-rw-r--r--include/linux/ipmi-fru.h135
-rw-r--r--include/linux/ipmi.h316
-rw-r--r--include/linux/ipmi_smi.h247
-rw-r--r--include/linux/ipv6.h327
-rw-r--r--include/linux/ipv6_route.h19
-rw-r--r--include/linux/irq.h872
-rw-r--r--include/linux/irq_cpustat.h31
-rw-r--r--include/linux/irq_work.h54
-rw-r--r--include/linux/irqchip.h20
-rw-r--r--include/linux/irqchip/arm-gic-acpi.h31
-rw-r--r--include/linux/irqchip/arm-gic-v3.h394
-rw-r--r--include/linux/irqchip/arm-gic.h118
-rw-r--r--include/linux/irqchip/arm-vic.h38
-rw-r--r--include/linux/irqchip/chained_irq.h52
-rw-r--r--include/linux/irqchip/irq-omap-intc.h30
-rw-r--r--include/linux/irqchip/metag-ext.h33
-rw-r--r--include/linux/irqchip/metag.h24
-rw-r--r--include/linux/irqchip/mips-gic.h257
-rw-r--r--include/linux/irqchip/mmp.h6
-rw-r--r--include/linux/irqchip/mxs.h14
-rw-r--r--include/linux/irqchip/versatile-fpga.h13
-rw-r--r--include/linux/irqchip/xtensa-mx.h17
-rw-r--r--include/linux/irqchip/xtensa-pic.h18
-rw-r--r--include/linux/irqdesc.h217
-rw-r--r--include/linux/irqdomain.h328
-rw-r--r--include/linux/irqflags.h151
-rw-r--r--include/linux/irqhandler.h14
-rw-r--r--include/linux/irqnr.h39
-rw-r--r--include/linux/irqreturn.h19
-rw-r--r--include/linux/isa.h39
-rw-r--r--include/linux/isapnp.h121
-rw-r--r--include/linux/iscsi_boot_sysfs.h133
-rw-r--r--include/linux/iscsi_ibft.h46
-rw-r--r--include/linux/isdn.h472
-rw-r--r--include/linux/isdn/capilli.h113
-rw-r--r--include/linux/isdn/capiutil.h516
-rw-r--r--include/linux/isdn/hdlc.h82
-rw-r--r--include/linux/isdn_divertif.h35
-rw-r--r--include/linux/isdn_ppp.h194
-rw-r--r--include/linux/isdnif.h505
-rw-r--r--include/linux/isicom.h84
-rw-r--r--include/linux/jbd.h1047
-rw-r--r--include/linux/jbd2.h1407
-rw-r--r--include/linux/jbd_common.h46
-rw-r--r--include/linux/jhash.h175
-rw-r--r--include/linux/jiffies.h314
-rw-r--r--include/linux/journal-head.h106
-rw-r--r--include/linux/joystick.h37
-rw-r--r--include/linux/jump_label.h218
-rw-r--r--include/linux/jump_label_ratelimit.h36
-rw-r--r--include/linux/jz4740-adc.h32
-rw-r--r--include/linux/jz4780-nemc.h43
-rw-r--r--include/linux/kallsyms.h128
-rw-r--r--include/linux/kasan.h88
-rw-r--r--include/linux/kbd_diacr.h8
-rw-r--r--include/linux/kbd_kern.h146
-rw-r--r--include/linux/kbuild.h15
-rw-r--r--include/linux/kconfig.h54
-rw-r--r--include/linux/kcore.h38
-rw-r--r--include/linux/kd.h7
-rw-r--r--include/linux/kdb.h221
-rw-r--r--include/linux/kdebug.h22
-rw-r--r--include/linux/kdev_t.h92
-rw-r--r--include/linux/kern_levels.h38
-rw-r--r--include/linux/kernel-page-flags.h20
-rw-r--r--include/linux/kernel.h830
-rw-r--r--include/linux/kernel_stat.h98
-rw-r--r--include/linux/kernelcapi.h119
-rw-r--r--include/linux/kernfs.h474
-rw-r--r--include/linux/kexec.h330
-rw-r--r--include/linux/key-type.h186
-rw-r--r--include/linux/key.h376
-rw-r--r--include/linux/keyboard.h20
-rw-r--r--include/linux/kfifo.h833
-rw-r--r--include/linux/kgdb.h327
-rw-r--r--include/linux/khugepaged.h70
-rw-r--r--include/linux/klist.h68
-rw-r--r--include/linux/kmemcheck.h171
-rw-r--r--include/linux/kmemleak.h112
-rw-r--r--include/linux/kmod.h107
-rw-r--r--include/linux/kmsg_dump.h117
-rw-r--r--include/linux/kobj_map.h19
-rw-r--r--include/linux/kobject.h224
-rw-r--r--include/linux/kobject_ns.h60
-rw-r--r--include/linux/kprobes.h496
-rw-r--r--include/linux/kref.h171
-rw-r--r--include/linux/ks0108.h49
-rw-r--r--include/linux/ks8842.h38
-rw-r--r--include/linux/ks8851_mll.h33
-rw-r--r--include/linux/ksm.h123
-rw-r--r--include/linux/kthread.h131
-rw-r--r--include/linux/ktime.h299
-rw-r--r--include/linux/kvm_host.h1083
-rw-r--r--include/linux/kvm_para.h13
-rw-r--r--include/linux/kvm_types.h65
-rw-r--r--include/linux/l2tp.h13
-rw-r--r--include/linux/lapb.h57
-rw-r--r--include/linux/latencytop.h53
-rw-r--r--include/linux/lcd.h130
-rw-r--r--include/linux/lcm.h9
-rw-r--r--include/linux/led-class-flash.h192
-rw-r--r--include/linux/led-lm3530.h121
-rw-r--r--include/linux/leds-bd2802.h26
-rw-r--r--include/linux/leds-lp3944.h50
-rw-r--r--include/linux/leds-pca9532.h48
-rw-r--r--include/linux/leds-regulator.h46
-rw-r--r--include/linux/leds-tca6507.h34
-rw-r--r--include/linux/leds.h347
-rw-r--r--include/linux/leds_pwm.h21
-rw-r--r--include/linux/lglock.h76
-rw-r--r--include/linux/lguest.h73
-rw-r--r--include/linux/lguest_launcher.h44
-rw-r--r--include/linux/libata.h1964
-rw-r--r--include/linux/libfdt.h8
-rw-r--r--include/linux/libfdt_env.h13
-rw-r--r--include/linux/libps2.h56
-rw-r--r--include/linux/license.h14
-rw-r--r--include/linux/linkage.h112
-rw-r--r--include/linux/linux_logo.h61
-rw-r--r--include/linux/lis3lv02d.h127
-rw-r--r--include/linux/list.h744
-rw-r--r--include/linux/list_bl.h161
-rw-r--r--include/linux/list_lru.h189
-rw-r--r--include/linux/list_nulls.h116
-rw-r--r--include/linux/list_sort.h11
-rw-r--r--include/linux/livepatch.h133
-rw-r--r--include/linux/llc.h23
-rw-r--r--include/linux/llist.h200
-rw-r--r--include/linux/lockd/bind.h60
-rw-r--r--include/linux/lockd/debug.h43
-rw-r--r--include/linux/lockd/lockd.h365
-rw-r--r--include/linux/lockd/nlm.h57
-rw-r--r--include/linux/lockd/share.h31
-rw-r--r--include/linux/lockd/xdr.h118
-rw-r--r--include/linux/lockd/xdr4.h47
-rw-r--r--include/linux/lockdep.h543
-rw-r--r--include/linux/lockref.h51
-rw-r--r--include/linux/log2.h208
-rw-r--r--include/linux/lp.h100
-rw-r--r--include/linux/lru_cache.h314
-rw-r--r--include/linux/lsm_audit.h99
-rw-r--r--include/linux/lz4.h87
-rw-r--r--include/linux/lzo.h45
-rw-r--r--include/linux/m48t86.h16
-rw-r--r--include/linux/mISDNdsp.h39
-rw-r--r--include/linux/mISDNhw.h201
-rw-r--r--include/linux/mISDNif.h604
-rw-r--r--include/linux/mailbox_client.h49
-rw-r--r--include/linux/mailbox_controller.h133
-rw-r--r--include/linux/maple.h105
-rw-r--r--include/linux/marvell_phy.h25
-rw-r--r--include/linux/math64.h166
-rw-r--r--include/linux/max17040_battery.h19
-rw-r--r--include/linux/mbcache.h55
-rw-r--r--include/linux/mbus.h79
-rw-r--r--include/linux/mc146818rtc.h123
-rw-r--r--include/linux/mc6821.h51
-rw-r--r--include/linux/mcb.h123
-rw-r--r--include/linux/mdio-bitbang.h45
-rw-r--r--include/linux/mdio-gpio.h32
-rw-r--r--include/linux/mdio-mux.h21
-rw-r--r--include/linux/mdio.h176
-rw-r--r--include/linux/mei_cl_bus.h45
-rw-r--r--include/linux/memblock.h386
-rw-r--r--include/linux/memcontrol.h609
-rw-r--r--include/linux/memory.h156
-rw-r--r--include/linux/memory_hotplug.h279
-rw-r--r--include/linux/mempolicy.h295
-rw-r--r--include/linux/mempool.h75
-rw-r--r--include/linux/memstick.h347
-rw-r--r--include/linux/mfd/88pm80x.h372
-rw-r--r--include/linux/mfd/88pm860x.h487
-rw-r--r--include/linux/mfd/aat2870.h181
-rw-r--r--include/linux/mfd/ab3100.h129
-rw-r--r--include/linux/mfd/abx500.h348
-rw-r--r--include/linux/mfd/abx500/ab8500-bm.h478
-rw-r--r--include/linux/mfd/abx500/ab8500-codec.h54
-rw-r--r--include/linux/mfd/abx500/ab8500-gpadc.h75
-rw-r--r--include/linux/mfd/abx500/ab8500-sysctrl.h307
-rw-r--r--include/linux/mfd/abx500/ab8500.h516
-rw-r--r--include/linux/mfd/abx500/ux500_chargalg.h55
-rw-r--r--include/linux/mfd/adp5520.h299
-rw-r--r--include/linux/mfd/arizona/core.h163
-rw-r--r--include/linux/mfd/arizona/pdata.h178
-rw-r--r--include/linux/mfd/arizona/registers.h7832
-rw-r--r--include/linux/mfd/as3711.h126
-rw-r--r--include/linux/mfd/as3722.h428
-rw-r--r--include/linux/mfd/asic3.h316
-rw-r--r--include/linux/mfd/atmel-hlcdc.h85
-rw-r--r--include/linux/mfd/axp20x.h278
-rw-r--r--include/linux/mfd/bcm590xx.h34
-rw-r--r--include/linux/mfd/core.h123
-rw-r--r--include/linux/mfd/cros_ec.h201
-rw-r--r--include/linux/mfd/cros_ec_commands.h2350
-rw-r--r--include/linux/mfd/da903x.h247
-rw-r--r--include/linux/mfd/da9052/da9052.h226
-rw-r--r--include/linux/mfd/da9052/pdata.h40
-rw-r--r--include/linux/mfd/da9052/reg.h752
-rw-r--r--include/linux/mfd/da9055/core.h94
-rw-r--r--include/linux/mfd/da9055/pdata.h53
-rw-r--r--include/linux/mfd/da9055/reg.h699
-rw-r--r--include/linux/mfd/da9063/core.h99
-rw-r--r--include/linux/mfd/da9063/pdata.h111
-rw-r--r--include/linux/mfd/da9063/registers.h1073
-rw-r--r--include/linux/mfd/da9150/core.h68
-rw-r--r--include/linux/mfd/da9150/registers.h1155
-rw-r--r--include/linux/mfd/davinci_voicecodec.h121
-rw-r--r--include/linux/mfd/db8500-prcmu.h772
-rw-r--r--include/linux/mfd/dbx500-prcmu.h666
-rw-r--r--include/linux/mfd/dln2.h103
-rw-r--r--include/linux/mfd/ds1wm.h13
-rw-r--r--include/linux/mfd/ezx-pcap.h253
-rw-r--r--include/linux/mfd/hi6421-pmic.h41
-rw-r--r--include/linux/mfd/htc-egpio.h57
-rw-r--r--include/linux/mfd/htc-pasic3.h54
-rw-r--r--include/linux/mfd/intel_msic.h456
-rw-r--r--include/linux/mfd/intel_soc_pmic.h30
-rw-r--r--include/linux/mfd/ipaq-micro.h148
-rw-r--r--include/linux/mfd/janz.h54
-rw-r--r--include/linux/mfd/kempld.h129
-rw-r--r--include/linux/mfd/lm3533.h104
-rw-r--r--include/linux/mfd/lp3943.h114
-rw-r--r--include/linux/mfd/lp8788-isink.h52
-rw-r--r--include/linux/mfd/lp8788.h350
-rw-r--r--include/linux/mfd/lpc_ich.h52
-rw-r--r--include/linux/mfd/max14577-private.h485
-rw-r--r--include/linux/mfd/max14577.h107
-rw-r--r--include/linux/mfd/max77686-private.h464
-rw-r--r--include/linux/mfd/max77686.h133
-rw-r--r--include/linux/mfd/max77693-private.h564
-rw-r--r--include/linux/mfd/max77693.h91
-rw-r--r--include/linux/mfd/max77843-private.h454
-rw-r--r--include/linux/mfd/max8907.h252
-rw-r--r--include/linux/mfd/max8925.h277
-rw-r--r--include/linux/mfd/max8997-private.h430
-rw-r--r--include/linux/mfd/max8997.h224
-rw-r--r--include/linux/mfd/max8998-private.h182
-rw-r--r--include/linux/mfd/max8998.h118
-rw-r--r--include/linux/mfd/mc13783.h90
-rw-r--r--include/linux/mfd/mc13892.h39
-rw-r--r--include/linux/mfd/mc13xxx.h262
-rw-r--r--include/linux/mfd/mcp.h66
-rw-r--r--include/linux/mfd/menelaus.h40
-rw-r--r--include/linux/mfd/mt6397/core.h64
-rw-r--r--include/linux/mfd/mt6397/registers.h362
-rw-r--r--include/linux/mfd/palmas.h3772
-rw-r--r--include/linux/mfd/pcf50633/adc.h73
-rw-r--r--include/linux/mfd/pcf50633/backlight.h51
-rw-r--r--include/linux/mfd/pcf50633/core.h238
-rw-r--r--include/linux/mfd/pcf50633/gpio.h52
-rw-r--r--include/linux/mfd/pcf50633/mbc.h134
-rw-r--r--include/linux/mfd/pcf50633/pmic.h67
-rw-r--r--include/linux/mfd/qcom_rpm.h13
-rw-r--r--include/linux/mfd/rc5t583.h380
-rw-r--r--include/linux/mfd/rdc321x.h26
-rw-r--r--include/linux/mfd/retu.h28
-rw-r--r--include/linux/mfd/rk808.h199
-rw-r--r--include/linux/mfd/rn5t618.h228
-rw-r--r--include/linux/mfd/rt5033-private.h260
-rw-r--r--include/linux/mfd/rt5033.h62
-rw-r--r--include/linux/mfd/rtsx_common.h50
-rw-r--r--include/linux/mfd/rtsx_pci.h1045
-rw-r--r--include/linux/mfd/rtsx_usb.h628
-rw-r--r--include/linux/mfd/samsung/core.h176
-rw-r--r--include/linux/mfd/samsung/irq.h253
-rw-r--r--include/linux/mfd/samsung/rtc.h151
-rw-r--r--include/linux/mfd/samsung/s2mpa01.h180
-rw-r--r--include/linux/mfd/samsung/s2mps11.h195
-rw-r--r--include/linux/mfd/samsung/s2mps13.h188
-rw-r--r--include/linux/mfd/samsung/s2mps14.h146
-rw-r--r--include/linux/mfd/samsung/s2mpu02.h201
-rw-r--r--include/linux/mfd/samsung/s5m8763.h96
-rw-r--r--include/linux/mfd/samsung/s5m8767.h211
-rw-r--r--include/linux/mfd/si476x-core.h533
-rw-r--r--include/linux/mfd/si476x-platform.h267
-rw-r--r--include/linux/mfd/si476x-reports.h163
-rw-r--r--include/linux/mfd/sky81452.h31
-rw-r--r--include/linux/mfd/smsc.h109
-rw-r--r--include/linux/mfd/sta2x11-mfd.h518
-rw-r--r--include/linux/mfd/stmpe.h185
-rw-r--r--include/linux/mfd/stw481x.h52
-rw-r--r--include/linux/mfd/syscon.h53
-rw-r--r--include/linux/mfd/syscon/atmel-matrix.h117
-rw-r--r--include/linux/mfd/syscon/atmel-smc.h173
-rw-r--r--include/linux/mfd/syscon/atmel-st.h49
-rw-r--r--include/linux/mfd/syscon/clps711x.h94
-rw-r--r--include/linux/mfd/syscon/exynos4-pmu.h21
-rw-r--r--include/linux/mfd/syscon/exynos5-pmu.h47
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h438
-rw-r--r--include/linux/mfd/t7l66xb.h34
-rw-r--r--include/linux/mfd/tc3589x.h152
-rw-r--r--include/linux/mfd/tc6387xb.h20
-rw-r--r--include/linux/mfd/tc6393xb.h59
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h190
-rw-r--r--include/linux/mfd/tmio.h154
-rw-r--r--include/linux/mfd/tps6105x.h101
-rw-r--r--include/linux/mfd/tps6507x.h168
-rw-r--r--include/linux/mfd/tps65090.h156
-rw-r--r--include/linux/mfd/tps65217.h281
-rw-r--r--include/linux/mfd/tps65218.h283
-rw-r--r--include/linux/mfd/tps6586x.h110
-rw-r--r--include/linux/mfd/tps65910.h955
-rw-r--r--include/linux/mfd/tps65912.h328
-rw-r--r--include/linux/mfd/tps80031.h637
-rw-r--r--include/linux/mfd/twl4030-audio.h272
-rw-r--r--include/linux/mfd/twl6040.h269
-rw-r--r--include/linux/mfd/ucb1x00.h260
-rw-r--r--include/linux/mfd/viperboard.h110
-rw-r--r--include/linux/mfd/wl1273-core.h290
-rw-r--r--include/linux/mfd/wm831x/auxadc.h218
-rw-r--r--include/linux/mfd/wm831x/core.h430
-rw-r--r--include/linux/mfd/wm831x/gpio.h59
-rw-r--r--include/linux/mfd/wm831x/irq.h764
-rw-r--r--include/linux/mfd/wm831x/otp.h162
-rw-r--r--include/linux/mfd/wm831x/pdata.h150
-rw-r--r--include/linux/mfd/wm831x/pmu.h189
-rw-r--r--include/linux/mfd/wm831x/regulator.h1218
-rw-r--r--include/linux/mfd/wm831x/status.h34
-rw-r--r--include/linux/mfd/wm831x/watchdog.h52
-rw-r--r--include/linux/mfd/wm8350/audio.h628
-rw-r--r--include/linux/mfd/wm8350/comparator.h175
-rw-r--r--include/linux/mfd/wm8350/core.h694
-rw-r--r--include/linux/mfd/wm8350/gpio.h361
-rw-r--r--include/linux/mfd/wm8350/pmic.h781
-rw-r--r--include/linux/mfd/wm8350/rtc.h269
-rw-r--r--include/linux/mfd/wm8350/supply.h134
-rw-r--r--include/linux/mfd/wm8350/wdt.h28
-rw-r--r--include/linux/mfd/wm8400-audio.h1187
-rw-r--r--include/linux/mfd/wm8400-private.h935
-rw-r--r--include/linux/mfd/wm8400.h40
-rw-r--r--include/linux/mfd/wm8994/core.h145
-rw-r--r--include/linux/mfd/wm8994/gpio.h76
-rw-r--r--include/linux/mfd/wm8994/pdata.h238
-rw-r--r--include/linux/mfd/wm8994/registers.h4822
-rw-r--r--include/linux/mg_disk.h45
-rw-r--r--include/linux/mic_bus.h110
-rw-r--r--include/linux/micrel_phy.h46
-rw-r--r--include/linux/migrate.h101
-rw-r--r--include/linux/migrate_mode.h16
-rw-r--r--include/linux/mii.h339
-rw-r--r--include/linux/miscdevice.h75
-rw-r--r--include/linux/mlx4/cmd.h326
-rw-r--r--include/linux/mlx4/cq.h178
-rw-r--r--include/linux/mlx4/device.h1488
-rw-r--r--include/linux/mlx4/doorbell.h86
-rw-r--r--include/linux/mlx4/driver.h98
-rw-r--r--include/linux/mlx4/qp.h475
-rw-r--r--include/linux/mlx4/srq.h44
-rw-r--r--include/linux/mlx5/cmd.h51
-rw-r--r--include/linux/mlx5/cq.h175
-rw-r--r--include/linux/mlx5/device.h999
-rw-r--r--include/linux/mlx5/doorbell.h79
-rw-r--r--include/linux/mlx5/driver.h812
-rw-r--r--include/linux/mlx5/mlx5_ifc.h349
-rw-r--r--include/linux/mlx5/qp.h663
-rw-r--r--include/linux/mlx5/srq.h41
-rw-r--r--include/linux/mm.h2227
-rw-r--r--include/linux/mm_inline.h103
-rw-r--r--include/linux/mm_types.h553
-rw-r--r--include/linux/mman.h93
-rw-r--r--include/linux/mmc/boot.h7
-rw-r--r--include/linux/mmc/card.h531
-rw-r--r--include/linux/mmc/core.h215
-rw-r--r--include/linux/mmc/dw_mmc.h271
-rw-r--r--include/linux/mmc/host.h516
-rw-r--r--include/linux/mmc/mmc.h444
-rw-r--r--include/linux/mmc/pm.h30
-rw-r--r--include/linux/mmc/sd.h94
-rw-r--r--include/linux/mmc/sdhci-pci-data.h18
-rw-r--r--include/linux/mmc/sdio.h193
-rw-r--r--include/linux/mmc/sdio_func.h162
-rw-r--r--include/linux/mmc/sdio_ids.h61
-rw-r--r--include/linux/mmc/sh_mmcif.h218
-rw-r--r--include/linux/mmc/sh_mobile_sdhi.h10
-rw-r--r--include/linux/mmc/slot-gpio.h33
-rw-r--r--include/linux/mmc/tmio.h66
-rw-r--r--include/linux/mmdebug.h58
-rw-r--r--include/linux/mmiotrace.h111
-rw-r--r--include/linux/mmu_context.h9
-rw-r--r--include/linux/mmu_notifier.h437
-rw-r--r--include/linux/mmzone.h1275
-rw-r--r--include/linux/mnt_namespace.h18
-rw-r--r--include/linux/mod_devicetable.h632
-rw-r--r--include/linux/module.h658
-rw-r--r--include/linux/moduleloader.h95
-rw-r--r--include/linux/moduleparam.h508
-rw-r--r--include/linux/mount.h98
-rw-r--r--include/linux/mpage.h24
-rw-r--r--include/linux/mpi.h145
-rw-r--r--include/linux/mpls.h6
-rw-r--r--include/linux/mroute.h107
-rw-r--r--include/linux/mroute6.h133
-rw-r--r--include/linux/msdos_fs.h11
-rw-r--r--include/linux/msg.h42
-rw-r--r--include/linux/msi.h243
-rw-r--r--include/linux/msm_mdp.h79
-rw-r--r--include/linux/mtd/bbm.h172
-rw-r--r--include/linux/mtd/blktrans.h96
-rw-r--r--include/linux/mtd/cfi.h564
-rw-r--r--include/linux/mtd/cfi_endian.h53
-rw-r--r--include/linux/mtd/concat.h34
-rw-r--r--include/linux/mtd/doc2000.h220
-rw-r--r--include/linux/mtd/flashchip.h112
-rw-r--r--include/linux/mtd/fsmc.h174
-rw-r--r--include/linux/mtd/ftl.h74
-rw-r--r--include/linux/mtd/gen_probe.h37
-rw-r--r--include/linux/mtd/inftl.h63
-rw-r--r--include/linux/mtd/latch-addr-flash.h29
-rw-r--r--include/linux/mtd/lpc32xx_mlc.h20
-rw-r--r--include/linux/mtd/lpc32xx_slc.h20
-rw-r--r--include/linux/mtd/map.h486
-rw-r--r--include/linux/mtd/mtd.h414
-rw-r--r--include/linux/mtd/mtdram.h8
-rw-r--r--include/linux/mtd/nand-gpio.h19
-rw-r--r--include/linux/mtd/nand.h1029
-rw-r--r--include/linux/mtd/nand_bch.h72
-rw-r--r--include/linux/mtd/nand_ecc.h42
-rw-r--r--include/linux/mtd/ndfc.h67
-rw-r--r--include/linux/mtd/nftl.h72
-rw-r--r--include/linux/mtd/onenand.h242
-rw-r--r--include/linux/mtd/onenand_regs.h223
-rw-r--r--include/linux/mtd/partitions.h88
-rw-r--r--include/linux/mtd/pfow.h156
-rw-r--r--include/linux/mtd/physmap.h36
-rw-r--r--include/linux/mtd/pismo.h17
-rw-r--r--include/linux/mtd/plat-ram.h34
-rw-r--r--include/linux/mtd/qinfo.h91
-rw-r--r--include/linux/mtd/sh_flctl.h192
-rw-r--r--include/linux/mtd/sharpsl.h20
-rw-r--r--include/linux/mtd/spear_smi.h65
-rw-r--r--include/linux/mtd/spi-nor.h216
-rw-r--r--include/linux/mtd/super.h29
-rw-r--r--include/linux/mtd/ubi.h284
-rw-r--r--include/linux/mtd/xip.h99
-rw-r--r--include/linux/mutex-debug.h24
-rw-r--r--include/linux/mutex.h178
-rw-r--r--include/linux/mv643xx.h979
-rw-r--r--include/linux/mv643xx_eth.h86
-rw-r--r--include/linux/mv643xx_i2c.h22
-rw-r--r--include/linux/mvebu-pmsu.h20
-rw-r--r--include/linux/mxm-wmi.h33
-rw-r--r--include/linux/n_r3964.h177
-rw-r--r--include/linux/namei.h98
-rw-r--r--include/linux/net.h299
-rw-r--r--include/linux/netdev_features.h190
-rw-r--r--include/linux/netdevice.h3961
-rw-r--r--include/linux/netfilter.h379
-rw-r--r--include/linux/netfilter/ipset/ip_set.h573
-rw-r--r--include/linux/netfilter/ipset/ip_set_bitmap.h28
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h57
-rw-r--r--include/linux/netfilter/ipset/ip_set_getport.h33
-rw-r--r--include/linux/netfilter/ipset/ip_set_hash.h13
-rw-r--r--include/linux/netfilter/ipset/ip_set_list.h11
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h78
-rw-r--r--include/linux/netfilter/ipset/pfxlen.h53
-rw-r--r--include/linux/netfilter/nf_conntrack_amanda.h11
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h28
-rw-r--r--include/linux/netfilter/nf_conntrack_dccp.h40
-rw-r--r--include/linux/netfilter/nf_conntrack_ftp.h33
-rw-r--r--include/linux/netfilter/nf_conntrack_h323.h97
-rw-r--r--include/linux/netfilter/nf_conntrack_h323_asn1.h98
-rw-r--r--include/linux/netfilter/nf_conntrack_h323_types.h934
-rw-r--r--include/linux/netfilter/nf_conntrack_irc.h16
-rw-r--r--include/linux/netfilter/nf_conntrack_pptp.h326
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h93
-rw-r--r--include/linux/netfilter/nf_conntrack_sane.h21
-rw-r--r--include/linux/netfilter/nf_conntrack_sip.h199
-rw-r--r--include/linux/netfilter/nf_conntrack_snmp.h9
-rw-r--r--include/linux/netfilter/nf_conntrack_tcp.h32
-rw-r--r--include/linux/netfilter/nf_conntrack_tftp.h20
-rw-r--r--include/linux/netfilter/nfnetlink.h72
-rw-r--r--include/linux/netfilter/nfnetlink_acct.h19
-rw-r--r--include/linux/netfilter/x_tables.h436
-rw-r--r--include/linux/netfilter/xt_hashlimit.h9
-rw-r--r--include/linux/netfilter/xt_physdev.h7
-rw-r--r--include/linux/netfilter_arp/arp_tables.h79
-rw-r--r--include/linux/netfilter_bridge.h77
-rw-r--r--include/linux/netfilter_bridge/ebt_802_3.h11
-rw-r--r--include/linux/netfilter_bridge/ebtables.h128
-rw-r--r--include/linux/netfilter_ipv4.h12
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h92
-rw-r--r--include/linux/netfilter_ipv6.h41
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h69
-rw-r--r--include/linux/netlink.h181
-rw-r--r--include/linux/netpoll.h120
-rw-r--r--include/linux/nfs.h54
-rw-r--r--include/linux/nfs3.h13
-rw-r--r--include/linux/nfs4.h573
-rw-r--r--include/linux/nfs_fs.h568
-rw-r--r--include/linux/nfs_fs_i.h20
-rw-r--r--include/linux/nfs_fs_sb.h241
-rw-r--r--include/linux/nfs_iostat.h133
-rw-r--r--include/linux/nfs_page.h191
-rw-r--r--include/linux/nfs_xdr.h1541
-rw-r--r--include/linux/nfsacl.h41
-rw-r--r--include/linux/nilfs2_fs.h919
-rw-r--r--include/linux/nl802154.h178
-rw-r--r--include/linux/nls.h108
-rw-r--r--include/linux/nmi.h86
-rw-r--r--include/linux/node.h84
-rw-r--r--include/linux/nodemask.h527
-rw-r--r--include/linux/notifier.h215
-rw-r--r--include/linux/ns_common.h12
-rw-r--r--include/linux/nsc_gpio.h40
-rw-r--r--include/linux/nsproxy.h85
-rw-r--r--include/linux/ntb.h88
-rw-r--r--include/linux/nubus.h134
-rw-r--r--include/linux/numa.h15
-rw-r--r--include/linux/nvme.h179
-rw-r--r--include/linux/nvram.h13
-rw-r--r--include/linux/nwpserial.h18
-rw-r--r--include/linux/nx842.h11
-rw-r--r--include/linux/of.h1099
-rw-r--r--include/linux/of_address.h161
-rw-r--r--include/linux/of_device.h98
-rw-r--r--include/linux/of_dma.h75
-rw-r--r--include/linux/of_fdt.h100
-rw-r--r--include/linux/of_gpio.h154
-rw-r--r--include/linux/of_graph.h86
-rw-r--r--include/linux/of_iommu.h46
-rw-r--r--include/linux/of_irq.h91
-rw-r--r--include/linux/of_mdio.h87
-rw-r--r--include/linux/of_mtd.h50
-rw-r--r--include/linux/of_net.h34
-rw-r--r--include/linux/of_pci.h75
-rw-r--r--include/linux/of_pdt.h44
-rw-r--r--include/linux/of_platform.h93
-rw-r--r--include/linux/of_reserved_mem.h49
-rw-r--r--include/linux/oid_registry.h98
-rw-r--r--include/linux/olpc-ec.h42
-rw-r--r--include/linux/omap-dma.h352
-rw-r--r--include/linux/omap-dmaengine.h21
-rw-r--r--include/linux/omap-gpmc.h200
-rw-r--r--include/linux/omap-iommu.h19
-rw-r--r--include/linux/omap-mailbox.h29
-rw-r--r--include/linux/omapfb.h42
-rw-r--r--include/linux/oom.h102
-rw-r--r--include/linux/openvswitch.h24
-rw-r--r--include/linux/oprofile.h209
-rw-r--r--include/linux/osq_lock.h35
-rw-r--r--include/linux/oxu210hp.h7
-rw-r--r--include/linux/padata.h189
-rw-r--r--include/linux/page-flags-layout.h94
-rw-r--r--include/linux/page-flags.h682
-rw-r--r--include/linux/page-isolation.h76
-rw-r--r--include/linux/page_counter.h52
-rw-r--r--include/linux/page_ext.h84
-rw-r--r--include/linux/page_owner.h38
-rw-r--r--include/linux/pageblock-flags.h101
-rw-r--r--include/linux/pagemap.h673
-rw-r--r--include/linux/pagevec.h72
-rw-r--r--include/linux/parport.h481
-rw-r--r--include/linux/parport_pc.h238
-rw-r--r--include/linux/parser.h34
-rw-r--r--include/linux/pata_arasan_cf_data.h47
-rw-r--r--include/linux/patchkey.h25
-rw-r--r--include/linux/path.h20
-rw-r--r--include/linux/pch_dma.h37
-rw-r--r--include/linux/pci-acpi.h96
-rw-r--r--include/linux/pci-aspm.h65
-rw-r--r--include/linux/pci-ats.h110
-rw-r--r--include/linux/pci-dma.h11
-rw-r--r--include/linux/pci.h1910
-rw-r--r--include/linux/pci_hotplug.h189
-rw-r--r--include/linux/pci_ids.h2995
-rw-r--r--include/linux/pcieport_if.h68
-rw-r--r--include/linux/pda_power.h42
-rw-r--r--include/linux/pe.h448
-rw-r--r--include/linux/percpu-defs.h516
-rw-r--r--include/linux/percpu-refcount.h328
-rw-r--r--include/linux/percpu-rwsem.h34
-rw-r--r--include/linux/percpu.h141
-rw-r--r--include/linux/percpu_counter.h190
-rw-r--r--include/linux/percpu_ida.h82
-rw-r--r--include/linux/perf_event.h1050
-rw-r--r--include/linux/perf_regs.h41
-rw-r--r--include/linux/personality.h16
-rw-r--r--include/linux/pfn.h13
-rw-r--r--include/linux/phonet.h40
-rw-r--r--include/linux/phy.h813
-rw-r--r--include/linux/phy/omap_control_phy.h99
-rw-r--r--include/linux/phy/omap_usb.h77
-rw-r--r--include/linux/phy/phy-qcom-ufs.h59
-rw-r--r--include/linux/phy/phy.h336
-rw-r--r--include/linux/phy_fixed.h57
-rw-r--r--include/linux/pid.h200
-rw-r--r--include/linux/pid_namespace.h103
-rw-r--r--include/linux/pim.h27
-rw-r--r--include/linux/pinctrl/consumer.h195
-rw-r--r--include/linux/pinctrl/devinfo.h49
-rw-r--r--include/linux/pinctrl/machine.h170
-rw-r--r--include/linux/pinctrl/pinconf-generic.h210
-rw-r--r--include/linux/pinctrl/pinconf.h75
-rw-r--r--include/linux/pinctrl/pinctrl-state.h24
-rw-r--r--include/linux/pinctrl/pinctrl.h190
-rw-r--r--include/linux/pinctrl/pinmux.h86
-rw-r--r--include/linux/pipe_fs_i.h149
-rw-r--r--include/linux/pktcdvd.h205
-rw-r--r--include/linux/pl320-ipc.h17
-rw-r--r--include/linux/platform_data/ad5449.h40
-rw-r--r--include/linux/platform_data/ad5755.h103
-rw-r--r--include/linux/platform_data/ad7266.h54
-rw-r--r--include/linux/platform_data/ad7291.h12
-rw-r--r--include/linux/platform_data/ad7298.h20
-rw-r--r--include/linux/platform_data/ad7303.h21
-rw-r--r--include/linux/platform_data/ad7791.h17
-rw-r--r--include/linux/platform_data/ad7793.h112
-rw-r--r--include/linux/platform_data/ad7887.h26
-rw-r--r--include/linux/platform_data/adau17x1.h109
-rw-r--r--include/linux/platform_data/adau1977.h45
-rw-r--r--include/linux/platform_data/ads7828.h29
-rw-r--r--include/linux/platform_data/arm-ux500-pm.h21
-rw-r--r--include/linux/platform_data/asoc-imx-ssi.h23
-rw-r--r--include/linux/platform_data/asoc-kirkwood.h7
-rw-r--r--include/linux/platform_data/asoc-mx27vis.h11
-rw-r--r--include/linux/platform_data/asoc-palm27x.h8
-rw-r--r--include/linux/platform_data/asoc-s3c.h45
-rw-r--r--include/linux/platform_data/asoc-s3c24xx_simtec.h33
-rw-r--r--include/linux/platform_data/asoc-ti-mcbsp.h58
-rw-r--r--include/linux/platform_data/asoc-ux500-msp.h20
-rw-r--r--include/linux/platform_data/at24.h55
-rw-r--r--include/linux/platform_data/at91_adc.h50
-rw-r--r--include/linux/platform_data/ata-pxa.h33
-rw-r--r--include/linux/platform_data/ata-samsung_cf.h34
-rw-r--r--include/linux/platform_data/atmel.h97
-rw-r--r--include/linux/platform_data/bcmgenet.h18
-rw-r--r--include/linux/platform_data/bd6107.h19
-rw-r--r--include/linux/platform_data/bfin_rotary.h117
-rw-r--r--include/linux/platform_data/brcmfmac-sdio.h135
-rw-r--r--include/linux/platform_data/bt-nokia-h4p.h38
-rw-r--r--include/linux/platform_data/camera-mx2.h44
-rw-r--r--include/linux/platform_data/camera-mx3.h52
-rw-r--r--include/linux/platform_data/camera-pxa.h44
-rw-r--r--include/linux/platform_data/camera-rcar.h25
-rw-r--r--include/linux/platform_data/clk-integrator.h2
-rw-r--r--include/linux/platform_data/clk-lpss.h23
-rw-r--r--include/linux/platform_data/clk-realview.h1
-rw-r--r--include/linux/platform_data/clk-u300.h1
-rw-r--r--include/linux/platform_data/clk-ux500.h23
-rw-r--r--include/linux/platform_data/coda.h18
-rw-r--r--include/linux/platform_data/cpuidle-exynos.h20
-rw-r--r--include/linux/platform_data/crypto-atmel.h22
-rw-r--r--include/linux/platform_data/crypto-ux500.h22
-rw-r--r--include/linux/platform_data/cyttsp4.h76
-rw-r--r--include/linux/platform_data/davinci_asp.h112
-rw-r--r--include/linux/platform_data/db8500_thermal.h38
-rw-r--r--include/linux/platform_data/dma-atmel.h65
-rw-r--r--include/linux/platform_data/dma-coh901318.h72
-rw-r--r--include/linux/platform_data/dma-dw.h61
-rw-r--r--include/linux/platform_data/dma-ep93xx.h93
-rw-r--r--include/linux/platform_data/dma-hsu.h25
-rw-r--r--include/linux/platform_data/dma-imx-sdma.h67
-rw-r--r--include/linux/platform_data/dma-imx.h71
-rw-r--r--include/linux/platform_data/dma-mmp_tdma.h40
-rw-r--r--include/linux/platform_data/dma-mv_xor.h21
-rw-r--r--include/linux/platform_data/dma-rcar-audmapp.h34
-rw-r--r--include/linux/platform_data/dma-rcar-hpbdma.h103
-rw-r--r--include/linux/platform_data/dma-s3c24xx.h46
-rw-r--r--include/linux/platform_data/dma-ste-dma40.h209
-rw-r--r--include/linux/platform_data/dmtimer-omap.h31
-rw-r--r--include/linux/platform_data/drv260x-pdata.h28
-rw-r--r--include/linux/platform_data/dwc3-omap.h43
-rw-r--r--include/linux/platform_data/edma.h179
-rw-r--r--include/linux/platform_data/efm32-spi.h14
-rw-r--r--include/linux/platform_data/efm32-uart.h18
-rw-r--r--include/linux/platform_data/ehci-sh.h28
-rw-r--r--include/linux/platform_data/elm.h65
-rw-r--r--include/linux/platform_data/emif_plat.h129
-rw-r--r--include/linux/platform_data/eth-netx.h25
-rw-r--r--include/linux/platform_data/fsa9480.h27
-rw-r--r--include/linux/platform_data/g762.h37
-rw-r--r--include/linux/platform_data/gpio-davinci.h55
-rw-r--r--include/linux/platform_data/gpio-dwapb.h32
-rw-r--r--include/linux/platform_data/gpio-em.h11
-rw-r--r--include/linux/platform_data/gpio-lpc32xx.h50
-rw-r--r--include/linux/platform_data/gpio-omap.h216
-rw-r--r--include/linux/platform_data/gpio-rcar.h29
-rw-r--r--include/linux/platform_data/gpio-ts5500.h27
-rw-r--r--include/linux/platform_data/gpio_backlight.h21
-rw-r--r--include/linux/platform_data/hsmmc-omap.h88
-rw-r--r--include/linux/platform_data/hwmon-s3c.h49
-rw-r--r--include/linux/platform_data/i2c-cbus-gpio.h27
-rw-r--r--include/linux/platform_data/i2c-davinci.h27
-rw-r--r--include/linux/platform_data/i2c-designware.h21
-rw-r--r--include/linux/platform_data/i2c-imx.h21
-rw-r--r--include/linux/platform_data/i2c-nuc900.h9
-rw-r--r--include/linux/platform_data/i2c-s3c2410.h78
-rw-r--r--include/linux/platform_data/ina2xx.h19
-rw-r--r--include/linux/platform_data/intel-mid_wdt.h22
-rw-r--r--include/linux/platform_data/invensense_mpu6050.h31
-rw-r--r--include/linux/platform_data/iommu-omap.h48
-rw-r--r--include/linux/platform_data/irda-pxaficp.h25
-rw-r--r--include/linux/platform_data/irda-sa11x0.h20
-rw-r--r--include/linux/platform_data/irq-renesas-intc-irqpin.h29
-rw-r--r--include/linux/platform_data/irq-renesas-irqc.h27
-rw-r--r--include/linux/platform_data/isl9305.h30
-rw-r--r--include/linux/platform_data/keyboard-pxa930_rotary.h20
-rw-r--r--include/linux/platform_data/keyboard-spear.h164
-rw-r--r--include/linux/platform_data/keypad-ep93xx.h31
-rw-r--r--include/linux/platform_data/keypad-nomadik-ske.h50
-rw-r--r--include/linux/platform_data/keypad-omap.h50
-rw-r--r--include/linux/platform_data/keypad-pxa27x.h72
-rw-r--r--include/linux/platform_data/keypad-w90p910.h15
-rw-r--r--include/linux/platform_data/keyscan-davinci.h42
-rw-r--r--include/linux/platform_data/lcd-mipid.h29
-rw-r--r--include/linux/platform_data/leds-kirkwood-netxbig.h53
-rw-r--r--include/linux/platform_data/leds-kirkwood-ns2.h24
-rw-r--r--include/linux/platform_data/leds-lm355x.h66
-rw-r--r--include/linux/platform_data/leds-lm3642.h38
-rw-r--r--include/linux/platform_data/leds-lp55xx.h81
-rw-r--r--include/linux/platform_data/leds-omap.h22
-rw-r--r--include/linux/platform_data/leds-pca963x.h42
-rw-r--r--include/linux/platform_data/leds-s3c24xx.h27
-rw-r--r--include/linux/platform_data/lm3630a_bl.h65
-rw-r--r--include/linux/platform_data/lm3639_bl.h69
-rw-r--r--include/linux/platform_data/lp855x.h151
-rw-r--r--include/linux/platform_data/lp8727.h68
-rw-r--r--include/linux/platform_data/lp8755.h71
-rw-r--r--include/linux/platform_data/lv5207lp.h19
-rw-r--r--include/linux/platform_data/macb.h18
-rw-r--r--include/linux/platform_data/mailbox-omap.h58
-rw-r--r--include/linux/platform_data/max197.h26
-rw-r--r--include/linux/platform_data/max3421-hcd.h24
-rw-r--r--include/linux/platform_data/max6697.h36
-rw-r--r--include/linux/platform_data/mfd-mcp-sa11x0.h20
-rw-r--r--include/linux/platform_data/microread.h35
-rw-r--r--include/linux/platform_data/mmc-atmel-mci.h22
-rw-r--r--include/linux/platform_data/mmc-davinci.h36
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h50
-rw-r--r--include/linux/platform_data/mmc-mvsdio.h18
-rw-r--r--include/linux/platform_data/mmc-mxcmmc.h39
-rw-r--r--include/linux/platform_data/mmc-omap.h121
-rw-r--r--include/linux/platform_data/mmc-pxamci.h28
-rw-r--r--include/linux/platform_data/mmc-s3cmci.h52
-rw-r--r--include/linux/platform_data/mmc-sdhci-s3c.h56
-rw-r--r--include/linux/platform_data/mmp_audio.h22
-rw-r--r--include/linux/platform_data/mmp_dma.h19
-rw-r--r--include/linux/platform_data/mouse-pxa930_trkball.h10
-rw-r--r--include/linux/platform_data/mtd-davinci-aemif.h37
-rw-r--r--include/linux/platform_data/mtd-davinci.h90
-rw-r--r--include/linux/platform_data/mtd-mxc_nand.h32
-rw-r--r--include/linux/platform_data/mtd-nand-omap2.h84
-rw-r--r--include/linux/platform_data/mtd-nand-pxa3xx.h72
-rw-r--r--include/linux/platform_data/mtd-nand-s3c2410.h71
-rw-r--r--include/linux/platform_data/mtd-onenand-omap2.h34
-rw-r--r--include/linux/platform_data/mtd-orion_nand.h24
-rw-r--r--include/linux/platform_data/mv_usb.h53
-rw-r--r--include/linux/platform_data/net-cw1200.h81
-rw-r--r--include/linux/platform_data/ntc_thermistor.h60
-rw-r--r--include/linux/platform_data/nxp-nci.h27
-rw-r--r--include/linux/platform_data/omap-twl4030.h58
-rw-r--r--include/linux/platform_data/omap-wd-timer.h38
-rw-r--r--include/linux/platform_data/omap1_bl.h11
-rw-r--r--include/linux/platform_data/omap_drm.h53
-rw-r--r--include/linux/platform_data/pca953x.h30
-rw-r--r--include/linux/platform_data/pcmcia-pxa2xx_viper.h11
-rw-r--r--include/linux/platform_data/pinctrl-adi2.h40
-rw-r--r--include/linux/platform_data/pinctrl-single.h12
-rw-r--r--include/linux/platform_data/pn544.h43
-rw-r--r--include/linux/platform_data/pxa2xx_udc.h27
-rw-r--r--include/linux/platform_data/pxa_sdhci.h58
-rw-r--r--include/linux/platform_data/regulator-haptic.h29
-rw-r--r--include/linux/platform_data/remoteproc-omap.h59
-rw-r--r--include/linux/platform_data/s3c-hsotg.h42
-rw-r--r--include/linux/platform_data/s3c-hsudc.h34
-rw-r--r--include/linux/platform_data/sa11x0-serial.h33
-rw-r--r--include/linux/platform_data/sc18is602.h19
-rw-r--r--include/linux/platform_data/serial-imx.h28
-rw-r--r--include/linux/platform_data/serial-omap.h46
-rw-r--r--include/linux/platform_data/serial-sccnxp.h88
-rw-r--r--include/linux/platform_data/sh_ipmmu.h18
-rw-r--r--include/linux/platform_data/shmob_drm.h99
-rw-r--r--include/linux/platform_data/sht15.h38
-rw-r--r--include/linux/platform_data/shtc1.h23
-rw-r--r--include/linux/platform_data/si5351.h112
-rw-r--r--include/linux/platform_data/simplefb.h64
-rw-r--r--include/linux/platform_data/sky81452-backlight.h46
-rw-r--r--include/linux/platform_data/spi-clps711x.h21
-rw-r--r--include/linux/platform_data/spi-davinci.h89
-rw-r--r--include/linux/platform_data/spi-ep93xx.h29
-rw-r--r--include/linux/platform_data/spi-imx.h27
-rw-r--r--include/linux/platform_data/spi-nuc900.h33
-rw-r--r--include/linux/platform_data/spi-omap2-mcspi.h30
-rw-r--r--include/linux/platform_data/spi-s3c64xx.h70
-rw-r--r--include/linux/platform_data/ssm2518.h22
-rw-r--r--include/linux/platform_data/st1232_pdata.h13
-rw-r--r--include/linux/platform_data/st21nfca.h33
-rw-r--r--include/linux/platform_data/st21nfcb.h29
-rw-r--r--include/linux/platform_data/st33zp24.h28
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h24
-rw-r--r--include/linux/platform_data/syscon.h8
-rw-r--r--include/linux/platform_data/touchscreen-s3c2410.h24
-rw-r--r--include/linux/platform_data/tsl2563.h8
-rw-r--r--include/linux/platform_data/uio_dmem_genirq.h26
-rw-r--r--include/linux/platform_data/uio_pruss.h26
-rw-r--r--include/linux/platform_data/usb-davinci.h59
-rw-r--r--include/linux/platform_data/usb-ehci-mxc.h13
-rw-r--r--include/linux/platform_data/usb-ehci-orion.h24
-rw-r--r--include/linux/platform_data/usb-musb-ux500.h22
-rw-r--r--include/linux/platform_data/usb-mx2.h38
-rw-r--r--include/linux/platform_data/usb-ohci-pxa27x.h36
-rw-r--r--include/linux/platform_data/usb-ohci-s3c2410.h43
-rw-r--r--include/linux/platform_data/usb-omap.h88
-rw-r--r--include/linux/platform_data/usb-omap1.h53
-rw-r--r--include/linux/platform_data/usb-pxa3xx-ulpi.h35
-rw-r--r--include/linux/platform_data/usb-rcar-gen2-phy.h22
-rw-r--r--include/linux/platform_data/usb-rcar-phy.h28
-rw-r--r--include/linux/platform_data/usb-s3c2410_udc.h44
-rw-r--r--include/linux/platform_data/usb3503.h24
-rw-r--r--include/linux/platform_data/ux500_wdt.h19
-rw-r--r--include/linux/platform_data/video-clcd-versatile.h27
-rw-r--r--include/linux/platform_data/video-ep93xx.h52
-rw-r--r--include/linux/platform_data/video-imxfb.h72
-rw-r--r--include/linux/platform_data/video-msm_fb.h146
-rw-r--r--include/linux/platform_data/video-mx3fb.h53
-rw-r--r--include/linux/platform_data/video-nuc900fb.h83
-rw-r--r--include/linux/platform_data/video-pxafb.h173
-rw-r--r--include/linux/platform_data/video_s3c.h54
-rw-r--r--include/linux/platform_data/voltage-omap.h39
-rw-r--r--include/linux/platform_data/wiznet.h24
-rw-r--r--include/linux/platform_data/zforce_ts.h26
-rw-r--r--include/linux/platform_device.h334
-rw-r--r--include/linux/plist.h300
-rw-r--r--include/linux/pm-trace.h35
-rw-r--r--include/linux/pm.h773
-rw-r--r--include/linux/pm2301_charger.h61
-rw-r--r--include/linux/pm_clock.h79
-rw-r--r--include/linux/pm_domain.h326
-rw-r--r--include/linux/pm_opp.h129
-rw-r--r--include/linux/pm_qos.h237
-rw-r--r--include/linux/pm_runtime.h283
-rw-r--r--include/linux/pm_wakeup.h195
-rw-r--r--include/linux/pmu.h85
-rw-r--r--include/linux/pnfs_osd_xdr.h317
-rw-r--r--include/linux/pnp.h525
-rw-r--r--include/linux/poison.h89
-rw-r--r--include/linux/poll.h164
-rw-r--r--include/linux/posix-clock.h151
-rw-r--r--include/linux/posix-timers.h141
-rw-r--r--include/linux/posix_acl.h144
-rw-r--r--include/linux/posix_acl_xattr.h75
-rw-r--r--include/linux/power/ab8500.h16
-rw-r--r--include/linux/power/bq2415x_charger.h58
-rw-r--r--include/linux/power/bq24190_charger.h16
-rw-r--r--include/linux/power/bq24735-charger.h39
-rw-r--r--include/linux/power/bq27x00_battery.h19
-rw-r--r--include/linux/power/charger-manager.h259
-rw-r--r--include/linux/power/generic-adc-battery.h29
-rw-r--r--include/linux/power/gpio-charger.h41
-rw-r--r--include/linux/power/isp1704_charger.h30
-rw-r--r--include/linux/power/jz4740-battery.h24
-rw-r--r--include/linux/power/max17042_battery.h220
-rw-r--r--include/linux/power/max8903_charger.h57
-rw-r--r--include/linux/power/sbs-battery.h42
-rw-r--r--include/linux/power/smartreflex.h318
-rw-r--r--include/linux/power/smb347-charger.h117
-rw-r--r--include/linux/power/twl4030_madc_battery.h39
-rw-r--r--include/linux/power_supply.h390
-rw-r--r--include/linux/powercap.h325
-rw-r--r--include/linux/ppp-comp.h106
-rw-r--r--include/linux/ppp_channel.h88
-rw-r--r--include/linux/ppp_defs.h17
-rw-r--r--include/linux/pps-gpio.h32
-rw-r--r--include/linux/pps_kernel.h140
-rw-r--r--include/linux/preempt.h195
-rw-r--r--include/linux/preempt_mask.h117
-rw-r--r--include/linux/prefetch.h64
-rw-r--r--include/linux/printk.h464
-rw-r--r--include/linux/proc_fs.h85
-rw-r--r--include/linux/proc_ns.h77
-rw-r--r--include/linux/profile.h138
-rw-r--r--include/linux/projid.h89
-rw-r--r--include/linux/property.h167
-rw-r--r--include/linux/proportions.h137
-rw-r--r--include/linux/pstore.h94
-rw-r--r--include/linux/pstore_ram.h89
-rw-r--r--include/linux/pti.h43
-rw-r--r--include/linux/ptp_classify.h78
-rw-r--r--include/linux/ptp_clock_kernel.h192
-rw-r--r--include/linux/ptrace.h386
-rw-r--r--include/linux/pvclock_gtod.h16
-rw-r--r--include/linux/pwm.h312
-rw-r--r--include/linux/pwm_backlight.h25
-rw-r--r--include/linux/pxa168_eth.h33
-rw-r--r--include/linux/pxa2xx_ssp.h261
-rw-r--r--include/linux/qcom_scm.h28
-rw-r--r--include/linux/qnx6_fs.h134
-rw-r--r--include/linux/quicklist.h93
-rw-r--r--include/linux/quota.h528
-rw-r--r--include/linux/quotaops.h386
-rw-r--r--include/linux/radix-tree.h507
-rw-r--r--include/linux/raid/md_u.h20
-rw-r--r--include/linux/raid/pq.h178
-rw-r--r--include/linux/raid/xor.h22
-rw-r--r--include/linux/raid_class.h83
-rw-r--r--include/linux/ramfs.h25
-rw-r--r--include/linux/random.h115
-rw-r--r--include/linux/range.h30
-rw-r--r--include/linux/ras.h14
-rw-r--r--include/linux/ratelimit.h81
-rw-r--r--include/linux/rational.h19
-rw-r--r--include/linux/rbtree.h108
-rw-r--r--include/linux/rbtree_augmented.h242
-rw-r--r--include/linux/rculist.h556
-rw-r--r--include/linux/rculist_bl.h128
-rw-r--r--include/linux/rculist_nulls.h121
-rw-r--r--include/linux/rcupdate.h1191
-rw-r--r--include/linux/rcutiny.h195
-rw-r--r--include/linux/rcutree.h105
-rw-r--r--include/linux/reboot.h83
-rw-r--r--include/linux/reciprocal_div.h35
-rw-r--r--include/linux/regmap.h748
-rw-r--r--include/linux/regset.h375
-rw-r--r--include/linux/regulator/ab8500.h325
-rw-r--r--include/linux/regulator/act8865.h88
-rw-r--r--include/linux/regulator/consumer.h573
-rw-r--r--include/linux/regulator/da9211.h39
-rw-r--r--include/linux/regulator/db8500-prcmu.h45
-rw-r--r--include/linux/regulator/driver.h436
-rw-r--r--include/linux/regulator/fan53555.h61
-rw-r--r--include/linux/regulator/fixed.h76
-rw-r--r--include/linux/regulator/gpio-regulator.h87
-rw-r--r--include/linux/regulator/lp3971.h51
-rw-r--r--include/linux/regulator/lp3972.h48
-rw-r--r--include/linux/regulator/lp872x.h90
-rw-r--r--include/linux/regulator/machine.h212
-rw-r--r--include/linux/regulator/max1586.h63
-rw-r--r--include/linux/regulator/max8649.h44
-rw-r--r--include/linux/regulator/max8660.h57
-rw-r--r--include/linux/regulator/max8952.h135
-rw-r--r--include/linux/regulator/max8973-regulator.h72
-rw-r--r--include/linux/regulator/mt6397-regulator.h49
-rw-r--r--include/linux/regulator/of_regulator.h45
-rw-r--r--include/linux/regulator/pfuze100.h72
-rw-r--r--include/linux/regulator/tps51632-regulator.h47
-rw-r--r--include/linux/regulator/tps62360.h53
-rw-r--r--include/linux/regulator/tps6507x.h32
-rw-r--r--include/linux/regulator/userspace-consumer.h25
-rw-r--r--include/linux/relay.h289
-rw-r--r--include/linux/remoteproc.h507
-rw-r--r--include/linux/reservation.h142
-rw-r--r--include/linux/reset-controller.h54
-rw-r--r--include/linux/reset.h97
-rw-r--r--include/linux/resource.h13
-rw-r--r--include/linux/resource_ext.h77
-rw-r--r--include/linux/rfkill-gpio.h37
-rw-r--r--include/linux/rfkill-regulator.h48
-rw-r--r--include/linux/rfkill.h304
-rw-r--r--include/linux/rhashtable.h822
-rw-r--r--include/linux/ring_buffer.h201
-rw-r--r--include/linux/rio.h485
-rw-r--r--include/linux/rio_drv.h443
-rw-r--r--include/linux/rio_ids.h42
-rw-r--r--include/linux/rio_regs.h295
-rw-r--r--include/linux/rmap.h287
-rw-r--r--include/linux/rndis.h391
-rw-r--r--include/linux/root_dev.h23
-rw-r--r--include/linux/rotary_encoder.h16
-rw-r--r--include/linux/rpmsg.h332
-rw-r--r--include/linux/rslib.h109
-rw-r--r--include/linux/rtc-ds2404.h20
-rw-r--r--include/linux/rtc-v3020.h41
-rw-r--r--include/linux/rtc.h218
-rw-r--r--include/linux/rtc/ds1307.h22
-rw-r--r--include/linux/rtc/ds1685.h375
-rw-r--r--include/linux/rtc/m48t59.h64
-rw-r--r--include/linux/rtc/sirfsoc_rtciobrg.h18
-rw-r--r--include/linux/rtmutex.h101
-rw-r--r--include/linux/rtnetlink.h126
-rw-r--r--include/linux/rwlock.h125
-rw-r--r--include/linux/rwlock_api_smp.h278
-rw-r--r--include/linux/rwlock_types.h48
-rw-r--r--include/linux/rwsem-spinlock.h45
-rw-r--r--include/linux/rwsem.h180
-rw-r--r--include/linux/rxrpc.h69
-rw-r--r--include/linux/s3c_adc_battery.h41
-rw-r--r--include/linux/sa11x0-dma.h24
-rw-r--r--include/linux/scatterlist.h352
-rw-r--r--include/linux/scc.h85
-rw-r--r--include/linux/sched.h3179
-rw-r--r--include/linux/sched/deadline.h24
-rw-r--r--include/linux/sched/prio.h72
-rw-r--r--include/linux/sched/rt.h60
-rw-r--r--include/linux/sched/sysctl.h110
-rw-r--r--include/linux/sched_clock.h20
-rw-r--r--include/linux/screen_info.h8
-rw-r--r--include/linux/sctp.h708
-rw-r--r--include/linux/scx200.h51
-rw-r--r--include/linux/scx200_gpio.h88
-rw-r--r--include/linux/sdb.h159
-rw-r--r--include/linux/sdla.h244
-rw-r--r--include/linux/seccomp.h98
-rw-r--r--include/linux/securebits.h7
-rw-r--r--include/linux/security.h3253
-rw-r--r--include/linux/selection.h44
-rw-r--r--include/linux/selinux.h35
-rw-r--r--include/linux/sem.h52
-rw-r--r--include/linux/semaphore.h46
-rw-r--r--include/linux/seq_buf.h133
-rw-r--r--include/linux/seq_file.h187
-rw-r--r--include/linux/seq_file_net.h30
-rw-r--r--include/linux/seqlock.h478
-rw-r--r--include/linux/seqno-fence.h117
-rw-r--r--include/linux/serial.h33
-rw-r--r--include/linux/serial_8250.h157
-rw-r--r--include/linux/serial_bcm63xx.h119
-rw-r--r--include/linux/serial_core.h478
-rw-r--r--include/linux/serial_max3100.h52
-rw-r--r--include/linux/serial_pnx8xxx.h80
-rw-r--r--include/linux/serial_s3c.h290
-rw-r--r--include/linux/serial_sci.h142
-rw-r--r--include/linux/serio.h150
-rw-r--r--include/linux/sfi.h209
-rw-r--r--include/linux/sfi_acpi.h93
-rw-r--r--include/linux/sh_clk.h216
-rw-r--r--include/linux/sh_dma.h115
-rw-r--r--include/linux/sh_eth.h22
-rw-r--r--include/linux/sh_intc.h149
-rw-r--r--include/linux/sh_timer.h8
-rw-r--r--include/linux/shdma-base.h134
-rw-r--r--include/linux/shm.h81
-rw-r--r--include/linux/shmem_fs.h89
-rw-r--r--include/linux/shrinker.h72
-rw-r--r--include/linux/signal.h445
-rw-r--r--include/linux/signalfd.h34
-rw-r--r--include/linux/sirfsoc_dma.h6
-rw-r--r--include/linux/sizes.h47
-rw-r--r--include/linux/skbuff.h3457
-rw-r--r--include/linux/slab.h600
-rw-r--r--include/linux/slab_def.h79
-rw-r--r--include/linux/slub_def.h132
-rw-r--r--include/linux/sm501-regs.h388
-rw-r--r--include/linux/sm501.h182
-rw-r--r--include/linux/smc911x.h13
-rw-r--r--include/linux/smc91x.h34
-rw-r--r--include/linux/smp.h199
-rw-r--r--include/linux/smpboot.h51
-rw-r--r--include/linux/smsc911x.h63
-rw-r--r--include/linux/smscphy.h30
-rw-r--r--include/linux/soc/ti/knav_dma.h175
-rw-r--r--include/linux/soc/ti/knav_qmss.h90
-rw-r--r--include/linux/sock_diag.h29
-rw-r--r--include/linux/socket.h341
-rw-r--r--include/linux/sonet.h19
-rw-r--r--include/linux/sony-laptop.h34
-rw-r--r--include/linux/sonypi.h63
-rw-r--r--include/linux/sort.h10
-rw-r--r--include/linux/sound.h21
-rw-r--r--include/linux/soundcard.h37
-rw-r--r--include/linux/spi/ad7877.h24
-rw-r--r--include/linux/spi/ad7879.h41
-rw-r--r--include/linux/spi/adi_spi3.h254
-rw-r--r--include/linux/spi/ads7846.h61
-rw-r--r--include/linux/spi/at73c213.h25
-rw-r--r--include/linux/spi/at86rf230.h28
-rw-r--r--include/linux/spi/cc2520.h27
-rw-r--r--include/linux/spi/corgi_lcd.h20
-rw-r--r--include/linux/spi/ds1305.h35
-rw-r--r--include/linux/spi/eeprom.h38
-rw-r--r--include/linux/spi/flash.h31
-rw-r--r--include/linux/spi/ifx_modem.h19
-rw-r--r--include/linux/spi/l4f00242t03.h25
-rw-r--r--include/linux/spi/libertas_spi.h29
-rw-r--r--include/linux/spi/lms283gf05.h24
-rw-r--r--include/linux/spi/max7301.h35
-rw-r--r--include/linux/spi/mc33880.h10
-rw-r--r--include/linux/spi/mcp23s08.h43
-rw-r--r--include/linux/spi/mmc_spi.h64
-rw-r--r--include/linux/spi/mxs-spi.h144
-rw-r--r--include/linux/spi/pxa2xx_spi.h60
-rw-r--r--include/linux/spi/rspi.h26
-rw-r--r--include/linux/spi/s3c24xx.h28
-rw-r--r--include/linux/spi/sh_hspi.h19
-rw-r--r--include/linux/spi/sh_msiof.h14
-rw-r--r--include/linux/spi/spi.h1056
-rw-r--r--include/linux/spi/spi_bitbang.h47
-rw-r--r--include/linux/spi/spi_gpio.h71
-rw-r--r--include/linux/spi/spi_oc_tiny.h20
-rw-r--r--include/linux/spi/tdo24m.h13
-rw-r--r--include/linux/spi/tle62x0.h20
-rw-r--r--include/linux/spi/tsc2005.h34
-rw-r--r--include/linux/spi/xilinx_spi.h19
-rw-r--r--include/linux/spinlock.h429
-rw-r--r--include/linux/spinlock_api_smp.h194
-rw-r--r--include/linux/spinlock_api_up.h92
-rw-r--r--include/linux/spinlock_types.h88
-rw-r--r--include/linux/spinlock_types_up.h37
-rw-r--r--include/linux/spinlock_up.h85
-rw-r--r--include/linux/splice.h92
-rw-r--r--include/linux/spmi.h188
-rw-r--r--include/linux/sradix-tree.h77
-rw-r--r--include/linux/srcu.h252
-rw-r--r--include/linux/ssb/ssb.h681
-rw-r--r--include/linux/ssb/ssb_driver_chipcommon.h673
-rw-r--r--include/linux/ssb/ssb_driver_extif.h259
-rw-r--r--include/linux/ssb/ssb_driver_gige.h193
-rw-r--r--include/linux/ssb/ssb_driver_mips.h70
-rw-r--r--include/linux/ssb/ssb_driver_pci.h130
-rw-r--r--include/linux/ssb/ssb_embedded.h18
-rw-r--r--include/linux/ssb/ssb_regs.h686
-rw-r--r--include/linux/ssbi.h43
-rw-r--r--include/linux/stackprotector.h16
-rw-r--r--include/linux/stacktrace.h40
-rw-r--r--include/linux/start_kernel.h12
-rw-r--r--include/linux/stat.h37
-rw-r--r--include/linux/statfs.h43
-rw-r--r--include/linux/static_key.h1
-rw-r--r--include/linux/stddef.h30
-rw-r--r--include/linux/ste_modem_shm.h56
-rw-r--r--include/linux/stmmac.h147
-rw-r--r--include/linux/stmp3xxx_rtc_wdt.h15
-rw-r--r--include/linux/stmp_device.h20
-rw-r--r--include/linux/stop_machine.h157
-rw-r--r--include/linux/string.h161
-rw-r--r--include/linux/string_helpers.h71
-rw-r--r--include/linux/stringify.h12
-rw-r--r--include/linux/sudmac.h52
-rw-r--r--include/linux/sungem_phy.h132
-rw-r--r--include/linux/sunrpc/addr.h170
-rw-r--r--include/linux/sunrpc/auth.h198
-rw-r--r--include/linux/sunrpc/auth_gss.h92
-rw-r--r--include/linux/sunrpc/bc_xprt.h68
-rw-r--r--include/linux/sunrpc/cache.h298
-rw-r--r--include/linux/sunrpc/clnt.h185
-rw-r--r--include/linux/sunrpc/debug.h107
-rw-r--r--include/linux/sunrpc/gss_api.h162
-rw-r--r--include/linux/sunrpc/gss_asn1.h81
-rw-r--r--include/linux/sunrpc/gss_err.h167
-rw-r--r--include/linux/sunrpc/gss_krb5.h331
-rw-r--r--include/linux/sunrpc/gss_krb5_enctypes.h4
-rw-r--r--include/linux/sunrpc/metrics.h102
-rw-r--r--include/linux/sunrpc/msg_prot.h220
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h137
-rw-r--r--include/linux/sunrpc/rpc_rdma.h130
-rw-r--r--include/linux/sunrpc/sched.h272
-rw-r--r--include/linux/sunrpc/stats.h84
-rw-r--r--include/linux/sunrpc/svc.h468
-rw-r--r--include/linux/sunrpc/svc_rdma.h315
-rw-r--r--include/linux/sunrpc/svc_xprt.h210
-rw-r--r--include/linux/sunrpc/svcauth.h200
-rw-r--r--include/linux/sunrpc/svcauth_gss.h27
-rw-r--r--include/linux/sunrpc/svcsock.h74
-rw-r--r--include/linux/sunrpc/timer.h49
-rw-r--r--include/linux/sunrpc/types.h22
-rw-r--r--include/linux/sunrpc/xdr.h235
-rw-r--r--include/linux/sunrpc/xprt.h436
-rw-r--r--include/linux/sunrpc/xprtrdma.h71
-rw-r--r--include/linux/sunrpc/xprtsock.h81
-rw-r--r--include/linux/sunserialcore.h37
-rw-r--r--include/linux/superhyway.h107
-rw-r--r--include/linux/suspend.h568
-rw-r--r--include/linux/svga.h124
-rw-r--r--include/linux/swab.h21
-rw-r--r--include/linux/swap.h549
-rw-r--r--include/linux/swap_cgroup.h42
-rw-r--r--include/linux/swapfile.h13
-rw-r--r--include/linux/swapops.h205
-rw-r--r--include/linux/swiotlb.h121
-rw-r--r--include/linux/sxgbe_platform.h54
-rw-r--r--include/linux/synclink.h37
-rw-r--r--include/linux/sys.h29
-rw-r--r--include/linux/sys_soc.h37
-rw-r--r--include/linux/syscalls.h887
-rw-r--r--include/linux/syscore_ops.h29
-rw-r--r--include/linux/sysctl.h221
-rw-r--r--include/linux/sysfs.h510
-rw-r--r--include/linux/syslog.h61
-rw-r--r--include/linux/sysrq.h75
-rw-r--r--include/linux/sysv_fs.h213
-rw-r--r--include/linux/t10-pi.h22
-rw-r--r--include/linux/task_io_accounting.h45
-rw-r--r--include/linux/task_io_accounting_ops.h113
-rw-r--r--include/linux/task_work.h24
-rw-r--r--include/linux/taskstats_kern.h36
-rw-r--r--include/linux/tboot.h162
-rw-r--r--include/linux/tc.h141
-rw-r--r--include/linux/tca6416_keypad.h34
-rw-r--r--include/linux/tcp.h414
-rw-r--r--include/linux/textsearch.h178
-rw-r--r--include/linux/textsearch_fsm.h48
-rw-r--r--include/linux/tfrc.h55
-rw-r--r--include/linux/thermal.h407
-rw-r--r--include/linux/thinkpad_acpi.h15
-rw-r--r--include/linux/thinkpad_ec.h47
-rw-r--r--include/linux/thread_info.h150
-rw-r--r--include/linux/threads.h45
-rw-r--r--include/linux/ti_wilink_st.h452
-rw-r--r--include/linux/tick.h182
-rw-r--r--include/linux/tifm.h164
-rw-r--r--include/linux/timb_dma.h55
-rw-r--r--include/linux/timb_gpio.h37
-rw-r--r--include/linux/time.h239
-rw-r--r--include/linux/time64.h190
-rw-r--r--include/linux/timecounter.h139
-rw-r--r--include/linux/timekeeper_internal.h131
-rw-r--r--include/linux/timekeeping.h275
-rw-r--r--include/linux/timer.h267
-rw-r--r--include/linux/timerfd.h37
-rw-r--r--include/linux/timeriomem-rng.h16
-rw-r--r--include/linux/timerqueue.h50
-rw-r--r--include/linux/timex.h163
-rw-r--r--include/linux/topology.h214
-rw-r--r--include/linux/torture.h95
-rw-r--r--include/linux/toshiba.h25
-rw-r--r--include/linux/tpm.h67
-rw-r--r--include/linux/tpm_command.h28
-rw-r--r--include/linux/trace_clock.h23
-rw-r--r--include/linux/trace_seq.h138
-rw-r--r--include/linux/tracefs.h45
-rw-r--r--include/linux/tracehook.h193
-rw-r--r--include/linux/tracepoint.h480
-rw-r--r--include/linux/transport_class.h102
-rw-r--r--include/linux/tsacct_kern.h41
-rw-r--r--include/linux/tty.h712
-rw-r--r--include/linux/tty_driver.h440
-rw-r--r--include/linux/tty_flip.h41
-rw-r--r--include/linux/tty_ldisc.h233
-rw-r--r--include/linux/tuxonice.h48
-rw-r--r--include/linux/typecheck.h24
-rw-r--r--include/linux/types.h219
-rw-r--r--include/linux/u64_stats_sync.h147
-rw-r--r--include/linux/uaccess.h110
-rw-r--r--include/linux/ucb1400.h165
-rw-r--r--include/linux/ucs2_string.h14
-rw-r--r--include/linux/udp.h109
-rw-r--r--include/linux/uidgid.h190
-rw-r--r--include/linux/uinput.h76
-rw-r--r--include/linux/uio.h165
-rw-r--r--include/linux/uio_driver.h138
-rw-r--r--include/linux/uksm.h146
-rw-r--r--include/linux/unaligned/access_ok.h67
-rw-r--r--include/linux/unaligned/be_byteshift.h70
-rw-r--r--include/linux/unaligned/be_memmove.h36
-rw-r--r--include/linux/unaligned/be_struct.h36
-rw-r--r--include/linux/unaligned/generic.h68
-rw-r--r--include/linux/unaligned/le_byteshift.h70
-rw-r--r--include/linux/unaligned/le_memmove.h36
-rw-r--r--include/linux/unaligned/le_struct.h36
-rw-r--r--include/linux/unaligned/memmove.h45
-rw-r--r--include/linux/unaligned/packed_struct.h46
-rw-r--r--include/linux/uprobes.h189
-rw-r--r--include/linux/usb.h1900
-rw-r--r--include/linux/usb/association.h150
-rw-r--r--include/linux/usb/atmel_usba_udc.h23
-rw-r--r--include/linux/usb/audio-v2.h461
-rw-r--r--include/linux/usb/audio.h44
-rw-r--r--include/linux/usb/c67x00.h48
-rw-r--r--include/linux/usb/cdc-wdm.h21
-rw-r--r--include/linux/usb/cdc_ncm.h145
-rw-r--r--include/linux/usb/ch9.h55
-rw-r--r--include/linux/usb/chipidea.h50
-rw-r--r--include/linux/usb/composite.h627
-rw-r--r--include/linux/usb/ehci-dbgp.h83
-rw-r--r--include/linux/usb/ehci_def.h195
-rw-r--r--include/linux/usb/ehci_pdriver.h63
-rw-r--r--include/linux/usb/ezusb.h8
-rw-r--r--include/linux/usb/functionfs.h6
-rw-r--r--include/linux/usb/g_hid.h32
-rw-r--r--include/linux/usb/gadget.h1054
-rw-r--r--include/linux/usb/gadget_configfs.h110
-rw-r--r--include/linux/usb/gpio_vbus.h32
-rw-r--r--include/linux/usb/hcd.h709
-rw-r--r--include/linux/usb/input.h25
-rw-r--r--include/linux/usb/iowarrior.h42
-rw-r--r--include/linux/usb/irda.h151
-rw-r--r--include/linux/usb/isp116x.h33
-rw-r--r--include/linux/usb/isp1301.h80
-rw-r--r--include/linux/usb/isp1362.h46
-rw-r--r--include/linux/usb/isp1760.h18
-rw-r--r--include/linux/usb/m66592.h46
-rw-r--r--include/linux/usb/msm_hsusb.h171
-rw-r--r--include/linux/usb/msm_hsusb_hw.h67
-rw-r--r--include/linux/usb/musb-omap.h30
-rw-r--r--include/linux/usb/musb-ux500.h31
-rw-r--r--include/linux/usb/musb.h155
-rw-r--r--include/linux/usb/net2280.h443
-rw-r--r--include/linux/usb/of.h45
-rw-r--r--include/linux/usb/ohci_pdriver.h48
-rw-r--r--include/linux/usb/otg-fsm.h246
-rw-r--r--include/linux/usb/otg.h107
-rw-r--r--include/linux/usb/phy.h329
-rw-r--r--include/linux/usb/phy_companion.h34
-rw-r--r--include/linux/usb/quirks.h50
-rw-r--r--include/linux/usb/r8a66597.h481
-rw-r--r--include/linux/usb/renesas_usbhs.h216
-rw-r--r--include/linux/usb/rndis_host.h210
-rw-r--r--include/linux/usb/samsung_usb_phy.h16
-rw-r--r--include/linux/usb/serial.h414
-rw-r--r--include/linux/usb/sl811.h29
-rw-r--r--include/linux/usb/storage.h86
-rw-r--r--include/linux/usb/tegra_usb_phy.h89
-rw-r--r--include/linux/usb/tilegx.h34
-rw-r--r--include/linux/usb/uas.h109
-rw-r--r--include/linux/usb/ulpi.h200
-rw-r--r--include/linux/usb/usb338x.h199
-rw-r--r--include/linux/usb/usb_phy_generic.h33
-rw-r--r--include/linux/usb/usbnet.h282
-rw-r--r--include/linux/usb/wusb-wa.h303
-rw-r--r--include/linux/usb/wusb.h377
-rw-r--r--include/linux/usb/xhci_pdriver.h27
-rw-r--r--include/linux/usb_usual.h92
-rw-r--r--include/linux/usbdevice_fs.h80
-rw-r--r--include/linux/user-return-notifier.h49
-rw-r--r--include/linux/user.h1
-rw-r--r--include/linux/user_namespace.h105
-rw-r--r--include/linux/util_macros.h40
-rw-r--r--include/linux/uts.h19
-rw-r--r--include/linux/utsname.h84
-rw-r--r--include/linux/uuid.h39
-rw-r--r--include/linux/uwb.h831
-rw-r--r--include/linux/uwb/debug-cmd.h68
-rw-r--r--include/linux/uwb/spec.h781
-rw-r--r--include/linux/uwb/umc.h193
-rw-r--r--include/linux/uwb/whci.h117
-rw-r--r--include/linux/verify_pefile.h18
-rw-r--r--include/linux/vermagic.h33
-rw-r--r--include/linux/vexpress.h54
-rw-r--r--include/linux/vfio.h138
-rw-r--r--include/linux/vfs.h6
-rw-r--r--include/linux/vga_switcheroo.h90
-rw-r--r--include/linux/vgaarb.h254
-rw-r--r--include/linux/via-core.h236
-rw-r--r--include/linux/via-gpio.h14
-rw-r--r--include/linux/via.h22
-rw-r--r--include/linux/via_i2c.h42
-rw-r--r--include/linux/videodev2.h62
-rw-r--r--include/linux/virtio.h173
-rw-r--r--include/linux/virtio_byteorder.h59
-rw-r--r--include/linux/virtio_caif.h24
-rw-r--r--include/linux/virtio_config.h398
-rw-r--r--include/linux/virtio_console.h38
-rw-r--r--include/linux/virtio_mmio.h141
-rw-r--r--include/linux/virtio_ring.h67
-rw-r--r--include/linux/vlynq.h162
-rw-r--r--include/linux/vm_event_item.h98
-rw-r--r--include/linux/vm_sockets.h23
-rw-r--r--include/linux/vmacache.h38
-rw-r--r--include/linux/vmalloc.h203
-rw-r--r--include/linux/vme.h176
-rw-r--r--include/linux/vmpressure.h48
-rw-r--r--include/linux/vmstat.h290
-rw-r--r--include/linux/vmw_vmci_api.h83
-rw-r--r--include/linux/vmw_vmci_defs.h880
-rw-r--r--include/linux/vringh.h258
-rw-r--r--include/linux/vt.h27
-rw-r--r--include/linux/vt_buffer.h59
-rw-r--r--include/linux/vt_kern.h196
-rw-r--r--include/linux/vtime.h125
-rw-r--r--include/linux/w1-gpio.h26
-rw-r--r--include/linux/wait.h1155
-rw-r--r--include/linux/wanrouter.h10
-rw-r--r--include/linux/watchdog.h148
-rw-r--r--include/linux/wimax/debug.h526
-rw-r--r--include/linux/wireless.h44
-rw-r--r--include/linux/wl12xx.h58
-rw-r--r--include/linux/wm97xx.h337
-rw-r--r--include/linux/workqueue.h592
-rw-r--r--include/linux/writeback.h189
-rw-r--r--include/linux/ww_mutex.h378
-rw-r--r--include/linux/xattr.h100
-rw-r--r--include/linux/xz.h264
-rw-r--r--include/linux/yam.h82
-rw-r--r--include/linux/z2_battery.h17
-rw-r--r--include/linux/zbud.h22
-rw-r--r--include/linux/zconf.h57
-rw-r--r--include/linux/zlib.h593
-rw-r--r--include/linux/zorro.h151
-rw-r--r--include/linux/zpool.h107
-rw-r--r--include/linux/zsmalloc.h52
-rw-r--r--include/linux/zutil.h106
1979 files changed, 349845 insertions, 0 deletions
diff --git a/include/linux/8250_pci.h b/include/linux/8250_pci.h
new file mode 100644
index 000000000..b24ff086a
--- /dev/null
+++ b/include/linux/8250_pci.h
@@ -0,0 +1,37 @@
+/*
+ * Definitions for PCI support.
+ */
+#define FL_BASE_MASK 0x0007
+#define FL_BASE0 0x0000
+#define FL_BASE1 0x0001
+#define FL_BASE2 0x0002
+#define FL_BASE3 0x0003
+#define FL_BASE4 0x0004
+#define FL_GET_BASE(x) (x & FL_BASE_MASK)
+
+/* Use successive BARs (PCI base address registers),
+ else use offset into some specified BAR */
+#define FL_BASE_BARS 0x0008
+
+/* do not assign an irq */
+#define FL_NOIRQ 0x0080
+
+/* Use the Base address register size to cap number of ports */
+#define FL_REGION_SZ_CAP 0x0100
+
+struct pciserial_board {
+ unsigned int flags;
+ unsigned int num_ports;
+ unsigned int base_baud;
+ unsigned int uart_offset;
+ unsigned int reg_shift;
+ unsigned int first_offset;
+};
+
+struct serial_private;
+
+struct serial_private *
+pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board);
+void pciserial_remove_ports(struct serial_private *priv);
+void pciserial_suspend_ports(struct serial_private *priv);
+void pciserial_resume_ports(struct serial_private *priv);
diff --git a/include/linux/a.out.h b/include/linux/a.out.h
new file mode 100644
index 000000000..ee8841689
--- /dev/null
+++ b/include/linux/a.out.h
@@ -0,0 +1,17 @@
+#ifndef __A_OUT_GNU_H__
+#define __A_OUT_GNU_H__
+
+#include <uapi/linux/a.out.h>
+
+#ifndef __ASSEMBLY__
+#ifdef linux
+#include <asm/page.h>
+#if defined(__i386__) || defined(__mc68000__)
+#else
+#ifndef SEGMENT_SIZE
+#define SEGMENT_SIZE PAGE_SIZE
+#endif
+#endif
+#endif
+#endif /*__ASSEMBLY__ */
+#endif /* __A_OUT_GNU_H__ */
diff --git a/include/linux/acct.h b/include/linux/acct.h
new file mode 100644
index 000000000..dccc2d4fe
--- /dev/null
+++ b/include/linux/acct.h
@@ -0,0 +1,105 @@
+/*
+ * BSD Process Accounting for Linux - Definitions
+ *
+ * Author: Marco van Wieringen (mvw@planets.elm.net)
+ *
+ * This header file contains the definitions needed to implement
+ * BSD-style process accounting. The kernel accounting code and all
+ * user-level programs that try to do something useful with the
+ * process accounting log must include this file.
+ *
+ * Copyright (C) 1995 - 1997 Marco van Wieringen - ELM Consultancy B.V.
+ *
+ */
+#ifndef _LINUX_ACCT_H
+#define _LINUX_ACCT_H
+
+#include <uapi/linux/acct.h>
+
+
+
+#ifdef CONFIG_BSD_PROCESS_ACCT
+struct vfsmount;
+struct super_block;
+struct pacct_struct;
+struct pid_namespace;
+extern int acct_parm[]; /* for sysctl */
+extern void acct_collect(long exitcode, int group_dead);
+extern void acct_process(void);
+extern void acct_exit_ns(struct pid_namespace *);
+#else
+#define acct_collect(x,y) do { } while (0)
+#define acct_process() do { } while (0)
+#define acct_exit_ns(ns) do { } while (0)
+#endif
+
+/*
+ * ACCT_VERSION numbers as yet defined:
+ * 0: old format (until 2.6.7) with 16 bit uid/gid
+ * 1: extended variant (binary compatible on M68K)
+ * 2: extended variant (binary compatible on everything except M68K)
+ * 3: new binary incompatible format (64 bytes)
+ * 4: new binary incompatible format (128 bytes)
+ * 5: new binary incompatible format (128 bytes, second half)
+ *
+ */
+
+#undef ACCT_VERSION
+#undef AHZ
+
+#ifdef CONFIG_BSD_PROCESS_ACCT_V3
+#define ACCT_VERSION 3
+#define AHZ 100
+typedef struct acct_v3 acct_t;
+#else
+#ifdef CONFIG_M68K
+#define ACCT_VERSION 1
+#else
+#define ACCT_VERSION 2
+#endif
+#define AHZ (USER_HZ)
+typedef struct acct acct_t;
+#endif
+
+#include <linux/jiffies.h>
+/*
+ * Yet another set of HZ to *HZ helper functions.
+ * See <linux/jiffies.h> for the original.
+ */
+
+static inline u32 jiffies_to_AHZ(unsigned long x)
+{
+#if (TICK_NSEC % (NSEC_PER_SEC / AHZ)) == 0
+# if HZ < AHZ
+ return x * (AHZ / HZ);
+# else
+ return x / (HZ / AHZ);
+# endif
+#else
+ u64 tmp = (u64)x * TICK_NSEC;
+ do_div(tmp, (NSEC_PER_SEC / AHZ));
+ return (long)tmp;
+#endif
+}
+
+static inline u64 nsec_to_AHZ(u64 x)
+{
+#if (NSEC_PER_SEC % AHZ) == 0
+ do_div(x, (NSEC_PER_SEC / AHZ));
+#elif (AHZ % 512) == 0
+ x *= AHZ/512;
+ do_div(x, (NSEC_PER_SEC / 512));
+#else
+ /*
+ * max relative error 5.7e-8 (1.8s per year) for AHZ <= 1024,
+ * overflow after 64.99 years.
+ * exact for AHZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
+ */
+ x *= 9;
+ do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (AHZ/2))
+ / AHZ));
+#endif
+ return x;
+}
+
+#endif /* _LINUX_ACCT_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
new file mode 100644
index 000000000..4550be3bb
--- /dev/null
+++ b/include/linux/acpi.h
@@ -0,0 +1,815 @@
+/*
+ * acpi.h - ACPI Interface
+ *
+ * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#ifndef _LINUX_ACPI_H
+#define _LINUX_ACPI_H
+
+#include <linux/errno.h>
+#include <linux/ioport.h> /* for struct resource */
+#include <linux/resource_ext.h>
+#include <linux/device.h>
+#include <linux/property.h>
+
+#ifndef _LINUX
+#define _LINUX
+#endif
+#include <acpi/acpi.h>
+
+#ifdef CONFIG_ACPI
+
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/dynamic_debug.h>
+
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/acpi_numa.h>
+#include <acpi/acpi_io.h>
+#include <asm/acpi.h>
+
+static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
+{
+ return adev ? adev->handle : NULL;
+}
+
+#define ACPI_COMPANION(dev) acpi_node((dev)->fwnode)
+#define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \
+ acpi_fwnode_handle(adev) : NULL)
+#define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev))
+
+static inline bool has_acpi_companion(struct device *dev)
+{
+ return is_acpi_node(dev->fwnode);
+}
+
+static inline void acpi_preset_companion(struct device *dev,
+ struct acpi_device *parent, u64 addr)
+{
+ ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, NULL));
+}
+
+static inline const char *acpi_dev_name(struct acpi_device *adev)
+{
+ return dev_name(&adev->dev);
+}
+
+enum acpi_irq_model_id {
+ ACPI_IRQ_MODEL_PIC = 0,
+ ACPI_IRQ_MODEL_IOAPIC,
+ ACPI_IRQ_MODEL_IOSAPIC,
+ ACPI_IRQ_MODEL_PLATFORM,
+ ACPI_IRQ_MODEL_GIC,
+ ACPI_IRQ_MODEL_COUNT
+};
+
+extern enum acpi_irq_model_id acpi_irq_model;
+
+enum acpi_interrupt_id {
+ ACPI_INTERRUPT_PMI = 1,
+ ACPI_INTERRUPT_INIT,
+ ACPI_INTERRUPT_CPEI,
+ ACPI_INTERRUPT_COUNT
+};
+
+#define ACPI_SPACE_MEM 0
+
+enum acpi_address_range_id {
+ ACPI_ADDRESS_RANGE_MEMORY = 1,
+ ACPI_ADDRESS_RANGE_RESERVED = 2,
+ ACPI_ADDRESS_RANGE_ACPI = 3,
+ ACPI_ADDRESS_RANGE_NVS = 4,
+ ACPI_ADDRESS_RANGE_COUNT
+};
+
+
+/* Table Handlers */
+
+typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
+
+typedef int (*acpi_tbl_entry_handler)(struct acpi_subtable_header *header,
+ const unsigned long end);
+
+#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
+void acpi_initrd_override(void *data, size_t size);
+#else
+static inline void acpi_initrd_override(void *data, size_t size)
+{
+}
+#endif
+
+#define BAD_MADT_ENTRY(entry, end) ( \
+ (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
+ ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
+
+char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
+void __acpi_unmap_table(char *map, unsigned long size);
+int early_acpi_boot_init(void);
+int acpi_boot_init (void);
+void acpi_boot_table_init (void);
+int acpi_mps_check (void);
+int acpi_numa_init (void);
+
+int acpi_table_init (void);
+int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
+int __init acpi_parse_entries(char *id, unsigned long table_size,
+ acpi_tbl_entry_handler handler,
+ struct acpi_table_header *table_header,
+ int entry_id, unsigned int max_entries);
+int __init acpi_table_parse_entries(char *id, unsigned long table_size,
+ int entry_id,
+ acpi_tbl_entry_handler handler,
+ unsigned int max_entries);
+int acpi_table_parse_madt(enum acpi_madt_type id,
+ acpi_tbl_entry_handler handler,
+ unsigned int max_entries);
+int acpi_parse_mcfg (struct acpi_table_header *header);
+void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
+
+/* the following four functions are architecture-dependent */
+void acpi_numa_slit_init (struct acpi_table_slit *slit);
+void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
+void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
+int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
+void acpi_numa_arch_fixup(void);
+
+#ifndef PHYS_CPUID_INVALID
+typedef u32 phys_cpuid_t;
+#define PHYS_CPUID_INVALID (phys_cpuid_t)(-1)
+#endif
+
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+/* Arch dependent functions for cpu hotplug support */
+int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu);
+int acpi_unmap_cpu(int cpu);
+#endif /* CONFIG_ACPI_HOTPLUG_CPU */
+
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
+#endif
+
+int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
+int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base);
+int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base);
+void acpi_irq_stats_init(void);
+extern u32 acpi_irq_handled;
+extern u32 acpi_irq_not_handled;
+
+extern int sbf_port;
+extern unsigned long acpi_realmode_flags;
+
+int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity);
+int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
+int acpi_isa_irq_to_gsi (unsigned isa_irq, u32 *gsi);
+
+#ifdef CONFIG_X86_IO_APIC
+extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
+#else
+#define acpi_get_override_irq(gsi, trigger, polarity) (-1)
+#endif
+/*
+ * This function undoes the effect of one call to acpi_register_gsi().
+ * If this matches the last registration, any IRQ resources for gsi
+ * are freed.
+ */
+void acpi_unregister_gsi (u32 gsi);
+
+struct pci_dev;
+
+int acpi_pci_irq_enable (struct pci_dev *dev);
+void acpi_penalize_isa_irq(int irq, int active);
+
+void acpi_pci_irq_disable (struct pci_dev *dev);
+
+extern int ec_read(u8 addr, u8 *val);
+extern int ec_write(u8 addr, u8 val);
+extern int ec_transaction(u8 command,
+ const u8 *wdata, unsigned wdata_len,
+ u8 *rdata, unsigned rdata_len);
+extern acpi_handle ec_get_handle(void);
+
+extern bool acpi_is_pnp_device(struct acpi_device *);
+
+#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
+
+typedef void (*wmi_notify_handler) (u32 value, void *context);
+
+extern acpi_status wmi_evaluate_method(const char *guid, u8 instance,
+ u32 method_id,
+ const struct acpi_buffer *in,
+ struct acpi_buffer *out);
+extern acpi_status wmi_query_block(const char *guid, u8 instance,
+ struct acpi_buffer *out);
+extern acpi_status wmi_set_block(const char *guid, u8 instance,
+ const struct acpi_buffer *in);
+extern acpi_status wmi_install_notify_handler(const char *guid,
+ wmi_notify_handler handler, void *data);
+extern acpi_status wmi_remove_notify_handler(const char *guid);
+extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out);
+extern bool wmi_has_guid(const char *guid);
+
+#endif /* CONFIG_ACPI_WMI */
+
+#define ACPI_VIDEO_OUTPUT_SWITCHING 0x0001
+#define ACPI_VIDEO_DEVICE_POSTING 0x0002
+#define ACPI_VIDEO_ROM_AVAILABLE 0x0004
+#define ACPI_VIDEO_BACKLIGHT 0x0008
+#define ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR 0x0010
+#define ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO 0x0020
+#define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR 0x0040
+#define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO 0x0080
+#define ACPI_VIDEO_BACKLIGHT_DMI_VENDOR 0x0100
+#define ACPI_VIDEO_BACKLIGHT_DMI_VIDEO 0x0200
+#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400
+#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800
+
+#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
+
+extern long acpi_video_get_capabilities(acpi_handle graphics_dev_handle);
+extern long acpi_is_video_device(acpi_handle handle);
+extern void acpi_video_dmi_promote_vendor(void);
+extern void acpi_video_dmi_demote_vendor(void);
+extern int acpi_video_backlight_support(void);
+extern int acpi_video_display_switch_support(void);
+
+#else
+
+static inline long acpi_video_get_capabilities(acpi_handle graphics_dev_handle)
+{
+ return 0;
+}
+
+static inline long acpi_is_video_device(acpi_handle handle)
+{
+ return 0;
+}
+
+static inline void acpi_video_dmi_promote_vendor(void)
+{
+}
+
+static inline void acpi_video_dmi_demote_vendor(void)
+{
+}
+
+static inline int acpi_video_backlight_support(void)
+{
+ return 0;
+}
+
+static inline int acpi_video_display_switch_support(void)
+{
+ return 0;
+}
+
+#endif /* defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) */
+
+extern int acpi_blacklisted(void);
+extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d);
+extern void acpi_osi_setup(char *str);
+
+#ifdef CONFIG_ACPI_NUMA
+int acpi_get_node(acpi_handle handle);
+#else
+static inline int acpi_get_node(acpi_handle handle)
+{
+ return 0;
+}
+#endif
+extern int acpi_paddr_to_node(u64 start_addr, u64 size);
+
+extern int pnpacpi_disabled;
+
+#define PXM_INVAL (-1)
+
+bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res);
+bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res);
+bool acpi_dev_resource_address_space(struct acpi_resource *ares,
+ struct resource_win *win);
+bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares,
+ struct resource_win *win);
+unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable);
+bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
+ struct resource *res);
+
+void acpi_dev_free_resource_list(struct list_head *list);
+int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
+ int (*preproc)(struct acpi_resource *, void *),
+ void *preproc_data);
+int acpi_dev_filter_resource_type(struct acpi_resource *ares,
+ unsigned long types);
+
+static inline int acpi_dev_filter_resource_type_cb(struct acpi_resource *ares,
+ void *arg)
+{
+ return acpi_dev_filter_resource_type(ares, (unsigned long)arg);
+}
+
+int acpi_check_resource_conflict(const struct resource *res);
+
+int acpi_check_region(resource_size_t start, resource_size_t n,
+ const char *name);
+
+int acpi_resources_are_enforced(void);
+
+#ifdef CONFIG_HIBERNATION
+void __init acpi_no_s4_hw_signature(void);
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+void __init acpi_old_suspend_ordering(void);
+void __init acpi_nvs_nosave(void);
+void __init acpi_nvs_nosave_s3(void);
+#endif /* CONFIG_PM_SLEEP */
+
+struct acpi_osc_context {
+ char *uuid_str; /* UUID string */
+ int rev;
+ struct acpi_buffer cap; /* list of DWORD capabilities */
+ struct acpi_buffer ret; /* free by caller if success */
+};
+
+acpi_status acpi_str_to_uuid(char *str, u8 *uuid);
+acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
+
+/* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */
+#define OSC_QUERY_DWORD 0 /* DWORD 1 */
+#define OSC_SUPPORT_DWORD 1 /* DWORD 2 */
+#define OSC_CONTROL_DWORD 2 /* DWORD 3 */
+
+/* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */
+#define OSC_QUERY_ENABLE 0x00000001 /* input */
+#define OSC_REQUEST_ERROR 0x00000002 /* return */
+#define OSC_INVALID_UUID_ERROR 0x00000004 /* return */
+#define OSC_INVALID_REVISION_ERROR 0x00000008 /* return */
+#define OSC_CAPABILITIES_MASK_ERROR 0x00000010 /* return */
+
+/* Platform-Wide Capabilities _OSC: Capabilities DWORD 2: Support Field */
+#define OSC_SB_PAD_SUPPORT 0x00000001
+#define OSC_SB_PPC_OST_SUPPORT 0x00000002
+#define OSC_SB_PR3_SUPPORT 0x00000004
+#define OSC_SB_HOTPLUG_OST_SUPPORT 0x00000008
+#define OSC_SB_APEI_SUPPORT 0x00000010
+#define OSC_SB_CPC_SUPPORT 0x00000020
+
+extern bool osc_sb_apei_support_acked;
+
+/* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */
+#define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001
+#define OSC_PCI_ASPM_SUPPORT 0x00000002
+#define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004
+#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008
+#define OSC_PCI_MSI_SUPPORT 0x00000010
+#define OSC_PCI_SUPPORT_MASKS 0x0000001f
+
+/* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */
+#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001
+#define OSC_PCI_SHPC_NATIVE_HP_CONTROL 0x00000002
+#define OSC_PCI_EXPRESS_PME_CONTROL 0x00000004
+#define OSC_PCI_EXPRESS_AER_CONTROL 0x00000008
+#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010
+#define OSC_PCI_CONTROL_MASKS 0x0000001f
+
+#define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002
+#define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004
+#define ACPI_GSB_ACCESS_ATTRIB_BYTE 0x00000006
+#define ACPI_GSB_ACCESS_ATTRIB_WORD 0x00000008
+#define ACPI_GSB_ACCESS_ATTRIB_BLOCK 0x0000000A
+#define ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE 0x0000000B
+#define ACPI_GSB_ACCESS_ATTRIB_WORD_CALL 0x0000000C
+#define ACPI_GSB_ACCESS_ATTRIB_BLOCK_CALL 0x0000000D
+#define ACPI_GSB_ACCESS_ATTRIB_RAW_BYTES 0x0000000E
+#define ACPI_GSB_ACCESS_ATTRIB_RAW_PROCESS 0x0000000F
+
+extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
+ u32 *mask, u32 req);
+
+/* Enable _OST when all relevant hotplug operations are enabled */
+#if defined(CONFIG_ACPI_HOTPLUG_CPU) && \
+ defined(CONFIG_ACPI_HOTPLUG_MEMORY) && \
+ defined(CONFIG_ACPI_CONTAINER)
+#define ACPI_HOTPLUG_OST
+#endif
+
+/* _OST Source Event Code (OSPM Action) */
+#define ACPI_OST_EC_OSPM_SHUTDOWN 0x100
+#define ACPI_OST_EC_OSPM_EJECT 0x103
+#define ACPI_OST_EC_OSPM_INSERTION 0x200
+
+/* _OST General Processing Status Code */
+#define ACPI_OST_SC_SUCCESS 0x0
+#define ACPI_OST_SC_NON_SPECIFIC_FAILURE 0x1
+#define ACPI_OST_SC_UNRECOGNIZED_NOTIFY 0x2
+
+/* _OST OS Shutdown Processing (0x100) Status Code */
+#define ACPI_OST_SC_OS_SHUTDOWN_DENIED 0x80
+#define ACPI_OST_SC_OS_SHUTDOWN_IN_PROGRESS 0x81
+#define ACPI_OST_SC_OS_SHUTDOWN_COMPLETED 0x82
+#define ACPI_OST_SC_OS_SHUTDOWN_NOT_SUPPORTED 0x83
+
+/* _OST Ejection Request (0x3, 0x103) Status Code */
+#define ACPI_OST_SC_EJECT_NOT_SUPPORTED 0x80
+#define ACPI_OST_SC_DEVICE_IN_USE 0x81
+#define ACPI_OST_SC_DEVICE_BUSY 0x82
+#define ACPI_OST_SC_EJECT_DEPENDENCY_BUSY 0x83
+#define ACPI_OST_SC_EJECT_IN_PROGRESS 0x84
+
+/* _OST Insertion Request (0x200) Status Code */
+#define ACPI_OST_SC_INSERT_IN_PROGRESS 0x80
+#define ACPI_OST_SC_DRIVER_LOAD_FAILURE 0x81
+#define ACPI_OST_SC_INSERT_NOT_SUPPORTED 0x82
+
+extern void acpi_early_init(void);
+extern void acpi_subsystem_init(void);
+
+extern int acpi_nvs_register(__u64 start, __u64 size);
+
+extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
+ void *data);
+
+const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
+ const struct device *dev);
+
+extern bool acpi_driver_match_device(struct device *dev,
+ const struct device_driver *drv);
+int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
+int acpi_device_modalias(struct device *, char *, int);
+void acpi_walk_dep_device_list(acpi_handle handle);
+
+struct platform_device *acpi_create_platform_device(struct acpi_device *);
+#define ACPI_PTR(_ptr) (_ptr)
+
+#else /* !CONFIG_ACPI */
+
+#define acpi_disabled 1
+
+#define ACPI_COMPANION(dev) (NULL)
+#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
+#define ACPI_HANDLE(dev) (NULL)
+
+struct fwnode_handle;
+
+static inline bool is_acpi_node(struct fwnode_handle *fwnode)
+{
+ return false;
+}
+
+static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
+{
+ return NULL;
+}
+
+static inline bool has_acpi_companion(struct device *dev)
+{
+ return false;
+}
+
+static inline const char *acpi_dev_name(struct acpi_device *adev)
+{
+ return NULL;
+}
+
+static inline void acpi_early_init(void) { }
+static inline void acpi_subsystem_init(void) { }
+
+static inline int early_acpi_boot_init(void)
+{
+ return 0;
+}
+static inline int acpi_boot_init(void)
+{
+ return 0;
+}
+
+static inline void acpi_boot_table_init(void)
+{
+ return;
+}
+
+static inline int acpi_mps_check(void)
+{
+ return 0;
+}
+
+static inline int acpi_check_resource_conflict(struct resource *res)
+{
+ return 0;
+}
+
+static inline int acpi_check_region(resource_size_t start, resource_size_t n,
+ const char *name)
+{
+ return 0;
+}
+
+struct acpi_table_header;
+static inline int acpi_table_parse(char *id,
+ int (*handler)(struct acpi_table_header *))
+{
+ return -ENODEV;
+}
+
+static inline int acpi_nvs_register(__u64 start, __u64 size)
+{
+ return 0;
+}
+
+static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
+ void *data)
+{
+ return 0;
+}
+
+struct acpi_device_id;
+
+static inline const struct acpi_device_id *acpi_match_device(
+ const struct acpi_device_id *ids, const struct device *dev)
+{
+ return NULL;
+}
+
+static inline bool acpi_driver_match_device(struct device *dev,
+ const struct device_driver *drv)
+{
+ return false;
+}
+
+static inline int acpi_device_uevent_modalias(struct device *dev,
+ struct kobj_uevent_env *env)
+{
+ return -ENODEV;
+}
+
+static inline int acpi_device_modalias(struct device *dev,
+ char *buf, int size)
+{
+ return -ENODEV;
+}
+
+#define ACPI_PTR(_ptr) (NULL)
+
+#endif /* !CONFIG_ACPI */
+
+#ifdef CONFIG_ACPI
+void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
+ u32 pm1a_ctrl, u32 pm1b_ctrl));
+
+acpi_status acpi_os_prepare_sleep(u8 sleep_state,
+ u32 pm1a_control, u32 pm1b_control);
+
+void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
+ u32 val_a, u32 val_b));
+
+acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
+ u32 val_a, u32 val_b);
+
+#ifdef CONFIG_X86
+void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
+#else
+static inline void arch_reserve_mem_area(acpi_physical_address addr,
+ size_t size)
+{
+}
+#endif /* CONFIG_X86 */
+#else
+#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0)
+#endif
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PM)
+int acpi_dev_runtime_suspend(struct device *dev);
+int acpi_dev_runtime_resume(struct device *dev);
+int acpi_subsys_runtime_suspend(struct device *dev);
+int acpi_subsys_runtime_resume(struct device *dev);
+struct acpi_device *acpi_dev_pm_get_node(struct device *dev);
+int acpi_dev_pm_attach(struct device *dev, bool power_on);
+#else
+static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; }
+static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; }
+static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
+static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
+static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
+{
+ return NULL;
+}
+static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
+{
+ return -ENODEV;
+}
+#endif
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
+int acpi_dev_suspend_late(struct device *dev);
+int acpi_dev_resume_early(struct device *dev);
+int acpi_subsys_prepare(struct device *dev);
+void acpi_subsys_complete(struct device *dev);
+int acpi_subsys_suspend_late(struct device *dev);
+int acpi_subsys_resume_early(struct device *dev);
+int acpi_subsys_suspend(struct device *dev);
+int acpi_subsys_freeze(struct device *dev);
+#else
+static inline int acpi_dev_suspend_late(struct device *dev) { return 0; }
+static inline int acpi_dev_resume_early(struct device *dev) { return 0; }
+static inline int acpi_subsys_prepare(struct device *dev) { return 0; }
+static inline void acpi_subsys_complete(struct device *dev) {}
+static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; }
+static inline int acpi_subsys_resume_early(struct device *dev) { return 0; }
+static inline int acpi_subsys_suspend(struct device *dev) { return 0; }
+static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
+#endif
+
+#ifdef CONFIG_ACPI
+__printf(3, 4)
+void acpi_handle_printk(const char *level, acpi_handle handle,
+ const char *fmt, ...);
+#else /* !CONFIG_ACPI */
+static inline __printf(3, 4) void
+acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {}
+#endif /* !CONFIG_ACPI */
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG)
+__printf(3, 4)
+void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const char *fmt, ...);
+#else
+#define __acpi_handle_debug(descriptor, handle, fmt, ...) \
+ acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__);
+#endif
+
+/*
+ * acpi_handle_<level>: Print message with ACPI prefix and object path
+ *
+ * These interfaces acquire the global namespace mutex to obtain an object
+ * path. In interrupt context, it shows the object path as <n/a>.
+ */
+#define acpi_handle_emerg(handle, fmt, ...) \
+ acpi_handle_printk(KERN_EMERG, handle, fmt, ##__VA_ARGS__)
+#define acpi_handle_alert(handle, fmt, ...) \
+ acpi_handle_printk(KERN_ALERT, handle, fmt, ##__VA_ARGS__)
+#define acpi_handle_crit(handle, fmt, ...) \
+ acpi_handle_printk(KERN_CRIT, handle, fmt, ##__VA_ARGS__)
+#define acpi_handle_err(handle, fmt, ...) \
+ acpi_handle_printk(KERN_ERR, handle, fmt, ##__VA_ARGS__)
+#define acpi_handle_warn(handle, fmt, ...) \
+ acpi_handle_printk(KERN_WARNING, handle, fmt, ##__VA_ARGS__)
+#define acpi_handle_notice(handle, fmt, ...) \
+ acpi_handle_printk(KERN_NOTICE, handle, fmt, ##__VA_ARGS__)
+#define acpi_handle_info(handle, fmt, ...) \
+ acpi_handle_printk(KERN_INFO, handle, fmt, ##__VA_ARGS__)
+
+#if defined(DEBUG)
+#define acpi_handle_debug(handle, fmt, ...) \
+ acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__)
+#else
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define acpi_handle_debug(handle, fmt, ...) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ __acpi_handle_debug(&descriptor, handle, pr_fmt(fmt), \
+ ##__VA_ARGS__); \
+} while (0)
+#else
+#define acpi_handle_debug(handle, fmt, ...) \
+({ \
+ if (0) \
+ acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__); \
+ 0; \
+})
+#endif
+#endif
+
+struct acpi_gpio_params {
+ unsigned int crs_entry_index;
+ unsigned int line_index;
+ bool active_low;
+};
+
+struct acpi_gpio_mapping {
+ const char *name;
+ const struct acpi_gpio_params *data;
+ unsigned int size;
+};
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB)
+int acpi_dev_add_driver_gpios(struct acpi_device *adev,
+ const struct acpi_gpio_mapping *gpios);
+
+static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
+{
+ if (adev)
+ adev->driver_gpios = NULL;
+}
+#else
+static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
+ const struct acpi_gpio_mapping *gpios)
+{
+ return -ENXIO;
+}
+static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {}
+#endif
+
+/* Device properties */
+
+#define MAX_ACPI_REFERENCE_ARGS 8
+struct acpi_reference_args {
+ struct acpi_device *adev;
+ size_t nargs;
+ u64 args[MAX_ACPI_REFERENCE_ARGS];
+};
+
+#ifdef CONFIG_ACPI
+int acpi_dev_get_property(struct acpi_device *adev, const char *name,
+ acpi_object_type type, const union acpi_object **obj);
+int acpi_dev_get_property_array(struct acpi_device *adev, const char *name,
+ acpi_object_type type,
+ const union acpi_object **obj);
+int acpi_dev_get_property_reference(struct acpi_device *adev,
+ const char *name, size_t index,
+ struct acpi_reference_args *args);
+
+int acpi_dev_prop_get(struct acpi_device *adev, const char *propname,
+ void **valptr);
+int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname,
+ enum dev_prop_type proptype, void *val);
+int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
+ enum dev_prop_type proptype, void *val, size_t nval);
+
+struct acpi_device *acpi_get_next_child(struct device *dev,
+ struct acpi_device *child);
+#else
+static inline int acpi_dev_get_property(struct acpi_device *adev,
+ const char *name, acpi_object_type type,
+ const union acpi_object **obj)
+{
+ return -ENXIO;
+}
+static inline int acpi_dev_get_property_array(struct acpi_device *adev,
+ const char *name,
+ acpi_object_type type,
+ const union acpi_object **obj)
+{
+ return -ENXIO;
+}
+static inline int acpi_dev_get_property_reference(struct acpi_device *adev,
+ const char *name, const char *cells_name,
+ size_t index, struct acpi_reference_args *args)
+{
+ return -ENXIO;
+}
+
+static inline int acpi_dev_prop_get(struct acpi_device *adev,
+ const char *propname,
+ void **valptr)
+{
+ return -ENXIO;
+}
+
+static inline int acpi_dev_prop_read_single(struct acpi_device *adev,
+ const char *propname,
+ enum dev_prop_type proptype,
+ void *val)
+{
+ return -ENXIO;
+}
+
+static inline int acpi_dev_prop_read(struct acpi_device *adev,
+ const char *propname,
+ enum dev_prop_type proptype,
+ void *val, size_t nval)
+{
+ return -ENXIO;
+}
+
+static inline struct acpi_device *acpi_get_next_child(struct device *dev,
+ struct acpi_device *child)
+{
+ return NULL;
+}
+
+#endif
+
+#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h
new file mode 100644
index 000000000..329436d38
--- /dev/null
+++ b/include/linux/acpi_dma.h
@@ -0,0 +1,121 @@
+/*
+ * ACPI helpers for DMA request / controller
+ *
+ * Based on of_dma.h
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_ACPI_DMA_H
+#define __LINUX_ACPI_DMA_H
+
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/dmaengine.h>
+
+/**
+ * struct acpi_dma_spec - slave device DMA resources
+ * @chan_id: channel unique id
+ * @slave_id: request line unique id
+ * @dev: struct device of the DMA controller to be used in the filter
+ * function
+ */
+struct acpi_dma_spec {
+ int chan_id;
+ int slave_id;
+ struct device *dev;
+};
+
+/**
+ * struct acpi_dma - representation of the registered DMAC
+ * @dma_controllers: linked list node
+ * @dev: struct device of this controller
+ * @acpi_dma_xlate: callback function to find a suitable channel
+ * @data: private data used by a callback function
+ * @base_request_line: first supported request line (CSRT)
+ * @end_request_line: last supported request line (CSRT)
+ */
+struct acpi_dma {
+ struct list_head dma_controllers;
+ struct device *dev;
+ struct dma_chan *(*acpi_dma_xlate)
+ (struct acpi_dma_spec *, struct acpi_dma *);
+ void *data;
+ unsigned short base_request_line;
+ unsigned short end_request_line;
+};
+
+/* Used with acpi_dma_simple_xlate() */
+struct acpi_dma_filter_info {
+ dma_cap_mask_t dma_cap;
+ dma_filter_fn filter_fn;
+};
+
+#ifdef CONFIG_DMA_ACPI
+
+int acpi_dma_controller_register(struct device *dev,
+ struct dma_chan *(*acpi_dma_xlate)
+ (struct acpi_dma_spec *, struct acpi_dma *),
+ void *data);
+int acpi_dma_controller_free(struct device *dev);
+int devm_acpi_dma_controller_register(struct device *dev,
+ struct dma_chan *(*acpi_dma_xlate)
+ (struct acpi_dma_spec *, struct acpi_dma *),
+ void *data);
+void devm_acpi_dma_controller_free(struct device *dev);
+
+struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
+ size_t index);
+struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
+ const char *name);
+
+struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec,
+ struct acpi_dma *adma);
+#else
+
+static inline int acpi_dma_controller_register(struct device *dev,
+ struct dma_chan *(*acpi_dma_xlate)
+ (struct acpi_dma_spec *, struct acpi_dma *),
+ void *data)
+{
+ return -ENODEV;
+}
+static inline int acpi_dma_controller_free(struct device *dev)
+{
+ return -ENODEV;
+}
+static inline int devm_acpi_dma_controller_register(struct device *dev,
+ struct dma_chan *(*acpi_dma_xlate)
+ (struct acpi_dma_spec *, struct acpi_dma *),
+ void *data)
+{
+ return -ENODEV;
+}
+static inline void devm_acpi_dma_controller_free(struct device *dev)
+{
+}
+
+static inline struct dma_chan *acpi_dma_request_slave_chan_by_index(
+ struct device *dev, size_t index)
+{
+ return ERR_PTR(-ENODEV);
+}
+static inline struct dma_chan *acpi_dma_request_slave_chan_by_name(
+ struct device *dev, const char *name)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+#define acpi_dma_simple_xlate NULL
+
+#endif
+
+#define acpi_dma_request_slave_channel acpi_dma_request_slave_chan_by_index
+
+#endif /* __LINUX_ACPI_DMA_H */
diff --git a/include/linux/acpi_irq.h b/include/linux/acpi_irq.h
new file mode 100644
index 000000000..f10c87265
--- /dev/null
+++ b/include/linux/acpi_irq.h
@@ -0,0 +1,10 @@
+#ifndef _LINUX_ACPI_IRQ_H
+#define _LINUX_ACPI_IRQ_H
+
+#include <linux/irq.h>
+
+#ifndef acpi_irq_init
+static inline void acpi_irq_init(void) { }
+#endif
+
+#endif /* _LINUX_ACPI_IRQ_H */
diff --git a/include/linux/acpi_pmtmr.h b/include/linux/acpi_pmtmr.h
new file mode 100644
index 000000000..1d0ef1ae8
--- /dev/null
+++ b/include/linux/acpi_pmtmr.h
@@ -0,0 +1,38 @@
+#ifndef _ACPI_PMTMR_H_
+#define _ACPI_PMTMR_H_
+
+#include <linux/clocksource.h>
+
+/* Number of PMTMR ticks expected during calibration run */
+#define PMTMR_TICKS_PER_SEC 3579545
+
+/* limit it to 24 bits */
+#define ACPI_PM_MASK CLOCKSOURCE_MASK(24)
+
+/* Overrun value */
+#define ACPI_PM_OVRRUN (1<<24)
+
+#ifdef CONFIG_X86_PM_TIMER
+
+extern u32 acpi_pm_read_verified(void);
+extern u32 pmtmr_ioport;
+
+static inline u32 acpi_pm_read_early(void)
+{
+ if (!pmtmr_ioport)
+ return 0;
+ /* mask the output to 24 bits */
+ return acpi_pm_read_verified() & ACPI_PM_MASK;
+}
+
+#else
+
+static inline u32 acpi_pm_read_early(void)
+{
+ return 0;
+}
+
+#endif
+
+#endif
+
diff --git a/include/linux/adb.h b/include/linux/adb.h
new file mode 100644
index 000000000..cde41300c
--- /dev/null
+++ b/include/linux/adb.h
@@ -0,0 +1,66 @@
+/*
+ * Definitions for ADB (Apple Desktop Bus) support.
+ */
+#ifndef __ADB_H
+#define __ADB_H
+
+#include <uapi/linux/adb.h>
+
+
+struct adb_request {
+ unsigned char data[32];
+ int nbytes;
+ unsigned char reply[32];
+ int reply_len;
+ unsigned char reply_expected;
+ unsigned char sent;
+ unsigned char complete;
+ void (*done)(struct adb_request *);
+ void *arg;
+ struct adb_request *next;
+};
+
+struct adb_ids {
+ int nids;
+ unsigned char id[16];
+};
+
+/* Structure which encapsulates a low-level ADB driver */
+
+struct adb_driver {
+ char name[16];
+ int (*probe)(void);
+ int (*init)(void);
+ int (*send_request)(struct adb_request *req, int sync);
+ int (*autopoll)(int devs);
+ void (*poll)(void);
+ int (*reset_bus)(void);
+};
+
+/* Values for adb_request flags */
+#define ADBREQ_REPLY 1 /* expect reply */
+#define ADBREQ_SYNC 2 /* poll until done */
+#define ADBREQ_NOSEND 4 /* build the request, but don't send it */
+
+/* Messages sent thru the client_list notifier. You should NOT stop
+ the operation, at least not with this version */
+enum adb_message {
+ ADB_MSG_POWERDOWN, /* Currently called before sleep only */
+ ADB_MSG_PRE_RESET, /* Called before resetting the bus */
+ ADB_MSG_POST_RESET /* Called after resetting the bus (re-do init & register) */
+};
+extern struct blocking_notifier_head adb_client_list;
+
+int adb_request(struct adb_request *req, void (*done)(struct adb_request *),
+ int flags, int nbytes, ...);
+int adb_register(int default_id,int handler_id,struct adb_ids *ids,
+ void (*handler)(unsigned char *, int, int));
+int adb_unregister(int index);
+void adb_poll(void);
+void adb_input(unsigned char *, int, int);
+int adb_reset_bus(void);
+
+int adb_try_handler_change(int address, int new_id);
+int adb_get_infos(int address, int *original_address, int *handler_id);
+
+#endif /* __ADB_H */
diff --git a/include/linux/adfs_fs.h b/include/linux/adfs_fs.h
new file mode 100644
index 000000000..0d991071a
--- /dev/null
+++ b/include/linux/adfs_fs.h
@@ -0,0 +1,23 @@
+#ifndef _ADFS_FS_H
+#define _ADFS_FS_H
+
+#include <uapi/linux/adfs_fs.h>
+
+/*
+ * Calculate the boot block checksum on an ADFS drive. Note that this will
+ * appear to be correct if the sector contains all zeros, so also check that
+ * the disk size is non-zero!!!
+ */
+static inline int adfs_checkbblk(unsigned char *ptr)
+{
+ unsigned int result = 0;
+ unsigned char *p = ptr + 511;
+
+ do {
+ result = (result & 0xff) + (result >> 8);
+ result = result + *--p;
+ } while (p != ptr);
+
+ return (result & 0xff) != ptr[511];
+}
+#endif
diff --git a/include/linux/aer.h b/include/linux/aer.h
new file mode 100644
index 000000000..4fef65e57
--- /dev/null
+++ b/include/linux/aer.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2006 Intel Corp.
+ * Tom Long Nguyen (tom.l.nguyen@intel.com)
+ * Zhang Yanmin (yanmin.zhang@intel.com)
+ */
+
+#ifndef _AER_H_
+#define _AER_H_
+
+#include <linux/types.h>
+
+#define AER_NONFATAL 0
+#define AER_FATAL 1
+#define AER_CORRECTABLE 2
+
+struct pci_dev;
+
+struct aer_header_log_regs {
+ unsigned int dw0;
+ unsigned int dw1;
+ unsigned int dw2;
+ unsigned int dw3;
+};
+
+struct aer_capability_regs {
+ u32 header;
+ u32 uncor_status;
+ u32 uncor_mask;
+ u32 uncor_severity;
+ u32 cor_status;
+ u32 cor_mask;
+ u32 cap_control;
+ struct aer_header_log_regs header_log;
+ u32 root_command;
+ u32 root_status;
+ u16 cor_err_source;
+ u16 uncor_err_source;
+};
+
+#if defined(CONFIG_PCIEAER)
+/* pci-e port driver needs this function to enable aer */
+int pci_enable_pcie_error_reporting(struct pci_dev *dev);
+int pci_disable_pcie_error_reporting(struct pci_dev *dev);
+int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
+#else
+static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
+{
+ return -EINVAL;
+}
+static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
+{
+ return -EINVAL;
+}
+static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
+{
+ return -EINVAL;
+}
+#endif
+
+void cper_print_aer(struct pci_dev *dev, int cper_severity,
+ struct aer_capability_regs *aer);
+int cper_severity_to_aer(int cper_severity);
+void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
+ int severity,
+ struct aer_capability_regs *aer_regs);
+#endif //_AER_H_
+
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h
new file mode 100644
index 000000000..eaf6cd75a
--- /dev/null
+++ b/include/linux/agp_backend.h
@@ -0,0 +1,109 @@
+/*
+ * AGPGART backend specific includes. Not for userspace consumption.
+ *
+ * Copyright (C) 2004 Silicon Graphics, Inc.
+ * Copyright (C) 2002-2003 Dave Jones
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _AGP_BACKEND_H
+#define _AGP_BACKEND_H 1
+
+#include <linux/list.h>
+
+enum chipset_type {
+ NOT_SUPPORTED,
+ SUPPORTED,
+};
+
+struct agp_version {
+ u16 major;
+ u16 minor;
+};
+
+struct agp_kern_info {
+ struct agp_version version;
+ struct pci_dev *device;
+ enum chipset_type chipset;
+ unsigned long mode;
+ unsigned long aper_base;
+ size_t aper_size;
+ int max_memory; /* In pages */
+ int current_memory;
+ bool cant_use_aperture;
+ unsigned long page_mask;
+ const struct vm_operations_struct *vm_ops;
+};
+
+/*
+ * The agp_memory structure has information about the block of agp memory
+ * allocated. A caller may manipulate the next and prev pointers to link
+ * each allocated item into a list. These pointers are ignored by the backend.
+ * Everything else should never be written to, but the caller may read any of
+ * the items to determine the status of this block of agp memory.
+ */
+
+struct agp_bridge_data;
+
+struct agp_memory {
+ struct agp_memory *next;
+ struct agp_memory *prev;
+ struct agp_bridge_data *bridge;
+ struct page **pages;
+ size_t page_count;
+ int key;
+ int num_scratch_pages;
+ off_t pg_start;
+ u32 type;
+ u32 physical;
+ bool is_bound;
+ bool is_flushed;
+ /* list of agp_memory mapped to the aperture */
+ struct list_head mapped_list;
+ /* DMA-mapped addresses */
+ struct scatterlist *sg_list;
+ int num_sg;
+};
+
+#define AGP_NORMAL_MEMORY 0
+
+#define AGP_USER_TYPES (1 << 16)
+#define AGP_USER_MEMORY (AGP_USER_TYPES)
+#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
+
+extern struct agp_bridge_data *agp_bridge;
+extern struct list_head agp_bridges;
+
+extern struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *);
+
+extern void agp_free_memory(struct agp_memory *);
+extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t, u32);
+extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *);
+extern int agp_bind_memory(struct agp_memory *, off_t);
+extern int agp_unbind_memory(struct agp_memory *);
+extern void agp_enable(struct agp_bridge_data *, u32);
+extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *);
+extern void agp_backend_release(struct agp_bridge_data *);
+
+#endif /* _AGP_BACKEND_H */
diff --git a/include/linux/agpgart.h b/include/linux/agpgart.h
new file mode 100644
index 000000000..c6b61ca97
--- /dev/null
+++ b/include/linux/agpgart.h
@@ -0,0 +1,130 @@
+/*
+ * AGPGART module version 0.99
+ * Copyright (C) 1999 Jeff Hartmann
+ * Copyright (C) 1999 Precision Insight, Inc.
+ * Copyright (C) 1999 Xi Graphics, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _AGP_H
+#define _AGP_H 1
+
+#include <linux/mutex.h>
+#include <linux/agp_backend.h>
+#include <uapi/linux/agpgart.h>
+
+#define AGPGART_MINOR 175
+
+struct agp_info {
+ struct agp_version version; /* version of the driver */
+ u32 bridge_id; /* bridge vendor/device */
+ u32 agp_mode; /* mode info of bridge */
+ unsigned long aper_base;/* base of aperture */
+ size_t aper_size; /* size of aperture */
+ size_t pg_total; /* max pages (swap + system) */
+ size_t pg_system; /* max pages (system) */
+ size_t pg_used; /* current pages used */
+};
+
+struct agp_setup {
+ u32 agp_mode; /* mode info of bridge */
+};
+
+/*
+ * The "prot" down below needs still a "sleep" flag somehow ...
+ */
+struct agp_segment {
+ off_t pg_start; /* starting page to populate */
+ size_t pg_count; /* number of pages */
+ int prot; /* prot flags for mmap */
+};
+
+struct agp_segment_priv {
+ off_t pg_start;
+ size_t pg_count;
+ pgprot_t prot;
+};
+
+struct agp_region {
+ pid_t pid; /* pid of process */
+ size_t seg_count; /* number of segments */
+ struct agp_segment *seg_list;
+};
+
+struct agp_allocate {
+ int key; /* tag of allocation */
+ size_t pg_count; /* number of pages */
+ u32 type; /* 0 == normal, other devspec */
+ u32 physical; /* device specific (some devices
+ * need a phys address of the
+ * actual page behind the gatt
+ * table) */
+};
+
+struct agp_bind {
+ int key; /* tag of allocation */
+ off_t pg_start; /* starting page to populate */
+};
+
+struct agp_unbind {
+ int key; /* tag of allocation */
+ u32 priority; /* priority for paging out */
+};
+
+struct agp_client {
+ struct agp_client *next;
+ struct agp_client *prev;
+ pid_t pid;
+ int num_segments;
+ struct agp_segment_priv **segments;
+};
+
+struct agp_controller {
+ struct agp_controller *next;
+ struct agp_controller *prev;
+ pid_t pid;
+ int num_clients;
+ struct agp_memory *pool;
+ struct agp_client *clients;
+};
+
+#define AGP_FF_ALLOW_CLIENT 0
+#define AGP_FF_ALLOW_CONTROLLER 1
+#define AGP_FF_IS_CLIENT 2
+#define AGP_FF_IS_CONTROLLER 3
+#define AGP_FF_IS_VALID 4
+
+struct agp_file_private {
+ struct agp_file_private *next;
+ struct agp_file_private *prev;
+ pid_t my_pid;
+ unsigned long access_flags; /* long req'd for set_bit --RR */
+};
+
+struct agp_front_data {
+ struct mutex agp_mutex;
+ struct agp_controller *current_controller;
+ struct agp_controller *controllers;
+ struct agp_file_private *file_priv_list;
+ bool used_by_controller;
+ bool backend_acquired;
+};
+
+#endif /* _AGP_H */
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
new file mode 100644
index 000000000..a270f25ee
--- /dev/null
+++ b/include/linux/ahci_platform.h
@@ -0,0 +1,44 @@
+/*
+ * AHCI SATA platform driver
+ *
+ * Copyright 2004-2005 Red Hat, Inc.
+ * Jeff Garzik <jgarzik@pobox.com>
+ * Copyright 2010 MontaVista Software, LLC.
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ */
+
+#ifndef _AHCI_PLATFORM_H
+#define _AHCI_PLATFORM_H
+
+#include <linux/compiler.h>
+
+struct device;
+struct ata_port_info;
+struct ahci_host_priv;
+struct platform_device;
+struct scsi_host_template;
+
+int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
+void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
+int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
+void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv);
+int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
+void ahci_platform_disable_resources(struct ahci_host_priv *hpriv);
+struct ahci_host_priv *ahci_platform_get_resources(
+ struct platform_device *pdev);
+int ahci_platform_init_host(struct platform_device *pdev,
+ struct ahci_host_priv *hpriv,
+ const struct ata_port_info *pi_template,
+ struct scsi_host_template *sht);
+
+int ahci_platform_suspend_host(struct device *dev);
+int ahci_platform_resume_host(struct device *dev);
+int ahci_platform_suspend(struct device *dev);
+int ahci_platform_resume(struct device *dev);
+
+#endif /* _AHCI_PLATFORM_H */
diff --git a/include/linux/aio.h b/include/linux/aio.h
new file mode 100644
index 000000000..9eb42dbc5
--- /dev/null
+++ b/include/linux/aio.h
@@ -0,0 +1,33 @@
+#ifndef __LINUX__AIO_H
+#define __LINUX__AIO_H
+
+#include <linux/aio_abi.h>
+
+struct kioctx;
+struct kiocb;
+struct mm_struct;
+
+#define KIOCB_KEY 0
+
+typedef int (kiocb_cancel_fn)(struct kiocb *);
+
+/* prototypes */
+#ifdef CONFIG_AIO
+extern void exit_aio(struct mm_struct *mm);
+extern long do_io_submit(aio_context_t ctx_id, long nr,
+ struct iocb __user *__user *iocbpp, bool compat);
+void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
+#else
+static inline void exit_aio(struct mm_struct *mm) { }
+static inline long do_io_submit(aio_context_t ctx_id, long nr,
+ struct iocb __user * __user *iocbpp,
+ bool compat) { return 0; }
+static inline void kiocb_set_cancel_fn(struct kiocb *req,
+ kiocb_cancel_fn *cancel) { }
+#endif /* CONFIG_AIO */
+
+/* for sysctl: */
+extern unsigned long aio_nr;
+extern unsigned long aio_max_nr;
+
+#endif /* __LINUX__AIO_H */
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
new file mode 100644
index 000000000..a899402a5
--- /dev/null
+++ b/include/linux/alarmtimer.h
@@ -0,0 +1,59 @@
+#ifndef _LINUX_ALARMTIMER_H
+#define _LINUX_ALARMTIMER_H
+
+#include <linux/time.h>
+#include <linux/hrtimer.h>
+#include <linux/timerqueue.h>
+#include <linux/rtc.h>
+
+enum alarmtimer_type {
+ ALARM_REALTIME,
+ ALARM_BOOTTIME,
+
+ ALARM_NUMTYPE,
+};
+
+enum alarmtimer_restart {
+ ALARMTIMER_NORESTART,
+ ALARMTIMER_RESTART,
+};
+
+
+#define ALARMTIMER_STATE_INACTIVE 0x00
+#define ALARMTIMER_STATE_ENQUEUED 0x01
+
+/**
+ * struct alarm - Alarm timer structure
+ * @node: timerqueue node for adding to the event list this value
+ * also includes the expiration time.
+ * @period: Period for recuring alarms
+ * @function: Function pointer to be executed when the timer fires.
+ * @type: Alarm type (BOOTTIME/REALTIME)
+ * @enabled: Flag that represents if the alarm is set to fire or not
+ * @data: Internal data value.
+ */
+struct alarm {
+ struct timerqueue_node node;
+ struct hrtimer timer;
+ enum alarmtimer_restart (*function)(struct alarm *, ktime_t now);
+ enum alarmtimer_type type;
+ int state;
+ void *data;
+};
+
+void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
+ enum alarmtimer_restart (*function)(struct alarm *, ktime_t));
+int alarm_start(struct alarm *alarm, ktime_t start);
+int alarm_start_relative(struct alarm *alarm, ktime_t start);
+void alarm_restart(struct alarm *alarm);
+int alarm_try_to_cancel(struct alarm *alarm);
+int alarm_cancel(struct alarm *alarm);
+
+u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval);
+u64 alarm_forward_now(struct alarm *alarm, ktime_t interval);
+ktime_t alarm_expires_remaining(const struct alarm *alarm);
+
+/* Provide way to access the rtc device being used by alarmtimers */
+struct rtc_device *alarmtimer_get_rtcdev(void);
+
+#endif
diff --git a/include/linux/altera_jtaguart.h b/include/linux/altera_jtaguart.h
new file mode 100644
index 000000000..953b178a1
--- /dev/null
+++ b/include/linux/altera_jtaguart.h
@@ -0,0 +1,16 @@
+/*
+ * altera_jtaguart.h -- Altera JTAG UART driver defines.
+ */
+
+#ifndef __ALTJUART_H
+#define __ALTJUART_H
+
+#define ALTERA_JTAGUART_MAJOR 204
+#define ALTERA_JTAGUART_MINOR 186
+
+struct altera_jtaguart_platform_uart {
+ unsigned long mapbase; /* Physical address base */
+ unsigned int irq; /* Interrupt vector */
+};
+
+#endif /* __ALTJUART_H */
diff --git a/include/linux/altera_uart.h b/include/linux/altera_uart.h
new file mode 100644
index 000000000..c022c82db
--- /dev/null
+++ b/include/linux/altera_uart.h
@@ -0,0 +1,15 @@
+/*
+ * altera_uart.h -- Altera UART driver defines.
+ */
+
+#ifndef __ALTUART_H
+#define __ALTUART_H
+
+struct altera_uart_platform_uart {
+ unsigned long mapbase; /* Physical address base */
+ unsigned int irq; /* Interrupt vector */
+ unsigned int uartclk; /* UART clock rate */
+ unsigned int bus_shift; /* Bus shift (address stride) */
+};
+
+#endif /* __ALTUART_H */
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
new file mode 100644
index 000000000..50fc66868
--- /dev/null
+++ b/include/linux/amba/bus.h
@@ -0,0 +1,168 @@
+/*
+ * linux/include/amba/bus.h
+ *
+ * This device type deals with ARM PrimeCells and anything else that
+ * presents a proper CID (0xB105F00D) at the end of the I/O register
+ * region or that is derived from a PrimeCell.
+ *
+ * Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ASMARM_AMBA_H
+#define ASMARM_AMBA_H
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/err.h>
+#include <linux/resource.h>
+#include <linux/regulator/consumer.h>
+
+#define AMBA_NR_IRQS 9
+#define AMBA_CID 0xb105f00d
+#define CORESIGHT_CID 0xb105900d
+
+struct clk;
+
+struct amba_device {
+ struct device dev;
+ struct resource res;
+ struct clk *pclk;
+ unsigned int periphid;
+ unsigned int irq[AMBA_NR_IRQS];
+ char *driver_override;
+};
+
+struct amba_driver {
+ struct device_driver drv;
+ int (*probe)(struct amba_device *, const struct amba_id *);
+ int (*remove)(struct amba_device *);
+ void (*shutdown)(struct amba_device *);
+ int (*suspend)(struct amba_device *, pm_message_t);
+ int (*resume)(struct amba_device *);
+ const struct amba_id *id_table;
+};
+
+/*
+ * Constants for the designer field of the Peripheral ID register. When bit 7
+ * is set to '1', bits [6:0] should be the JEP106 manufacturer identity code.
+ */
+enum amba_vendor {
+ AMBA_VENDOR_ARM = 0x41,
+ AMBA_VENDOR_ST = 0x80,
+ AMBA_VENDOR_QCOM = 0x51,
+ AMBA_VENDOR_LSI = 0xb6,
+};
+
+extern struct bus_type amba_bustype;
+
+#define to_amba_device(d) container_of(d, struct amba_device, dev)
+
+#define amba_get_drvdata(d) dev_get_drvdata(&d->dev)
+#define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p)
+
+int amba_driver_register(struct amba_driver *);
+void amba_driver_unregister(struct amba_driver *);
+struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t);
+void amba_device_put(struct amba_device *);
+int amba_device_add(struct amba_device *, struct resource *);
+int amba_device_register(struct amba_device *, struct resource *);
+struct amba_device *amba_apb_device_add(struct device *parent, const char *name,
+ resource_size_t base, size_t size,
+ int irq1, int irq2, void *pdata,
+ unsigned int periphid);
+struct amba_device *amba_ahb_device_add(struct device *parent, const char *name,
+ resource_size_t base, size_t size,
+ int irq1, int irq2, void *pdata,
+ unsigned int periphid);
+struct amba_device *
+amba_apb_device_add_res(struct device *parent, const char *name,
+ resource_size_t base, size_t size, int irq1,
+ int irq2, void *pdata, unsigned int periphid,
+ struct resource *resbase);
+struct amba_device *
+amba_ahb_device_add_res(struct device *parent, const char *name,
+ resource_size_t base, size_t size, int irq1,
+ int irq2, void *pdata, unsigned int periphid,
+ struct resource *resbase);
+void amba_device_unregister(struct amba_device *);
+struct amba_device *amba_find_device(const char *, struct device *, unsigned int, unsigned int);
+int amba_request_regions(struct amba_device *, const char *);
+void amba_release_regions(struct amba_device *);
+
+static inline int amba_pclk_enable(struct amba_device *dev)
+{
+ return clk_enable(dev->pclk);
+}
+
+static inline void amba_pclk_disable(struct amba_device *dev)
+{
+ clk_disable(dev->pclk);
+}
+
+static inline int amba_pclk_prepare(struct amba_device *dev)
+{
+ return clk_prepare(dev->pclk);
+}
+
+static inline void amba_pclk_unprepare(struct amba_device *dev)
+{
+ clk_unprepare(dev->pclk);
+}
+
+/* Some drivers don't use the struct amba_device */
+#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
+#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f)
+#define AMBA_MANF_BITS(a) (((a) >> 12) & 0xff)
+#define AMBA_PART_BITS(a) ((a) & 0xfff)
+
+#define amba_config(d) AMBA_CONFIG_BITS((d)->periphid)
+#define amba_rev(d) AMBA_REV_BITS((d)->periphid)
+#define amba_manf(d) AMBA_MANF_BITS((d)->periphid)
+#define amba_part(d) AMBA_PART_BITS((d)->periphid)
+
+#define __AMBA_DEV(busid, data, mask) \
+ { \
+ .coherent_dma_mask = mask, \
+ .init_name = busid, \
+ .platform_data = data, \
+ }
+
+/*
+ * APB devices do not themselves have the ability to address memory,
+ * so DMA masks should be zero (much like USB peripheral devices.)
+ * The DMA controller DMA masks should be used instead (much like
+ * USB host controllers in conventional PCs.)
+ */
+#define AMBA_APB_DEVICE(name, busid, id, base, irqs, data) \
+struct amba_device name##_device = { \
+ .dev = __AMBA_DEV(busid, data, 0), \
+ .res = DEFINE_RES_MEM(base, SZ_4K), \
+ .irq = irqs, \
+ .periphid = id, \
+}
+
+/*
+ * AHB devices are DMA capable, so set their DMA masks
+ */
+#define AMBA_AHB_DEVICE(name, busid, id, base, irqs, data) \
+struct amba_device name##_device = { \
+ .dev = __AMBA_DEV(busid, data, ~0ULL), \
+ .res = DEFINE_RES_MEM(base, SZ_4K), \
+ .irq = irqs, \
+ .periphid = id, \
+}
+
+/*
+ * module_amba_driver() - Helper macro for drivers that don't do anything
+ * special in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces module_init()
+ * and module_exit()
+ */
+#define module_amba_driver(__amba_drv) \
+ module_driver(__amba_drv, amba_driver_register, amba_driver_unregister)
+
+#endif
diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h
new file mode 100644
index 000000000..e82e3ee2c
--- /dev/null
+++ b/include/linux/amba/clcd.h
@@ -0,0 +1,330 @@
+/*
+ * linux/include/asm-arm/hardware/amba_clcd.h -- Integrator LCD panel.
+ *
+ * David A Rusling
+ *
+ * Copyright (C) 2001 ARM Limited
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+#include <linux/fb.h>
+
+/*
+ * CLCD Controller Internal Register addresses
+ */
+#define CLCD_TIM0 0x00000000
+#define CLCD_TIM1 0x00000004
+#define CLCD_TIM2 0x00000008
+#define CLCD_TIM3 0x0000000c
+#define CLCD_UBAS 0x00000010
+#define CLCD_LBAS 0x00000014
+
+#define CLCD_PL110_IENB 0x00000018
+#define CLCD_PL110_CNTL 0x0000001c
+#define CLCD_PL110_STAT 0x00000020
+#define CLCD_PL110_INTR 0x00000024
+#define CLCD_PL110_UCUR 0x00000028
+#define CLCD_PL110_LCUR 0x0000002C
+
+#define CLCD_PL111_CNTL 0x00000018
+#define CLCD_PL111_IENB 0x0000001c
+#define CLCD_PL111_RIS 0x00000020
+#define CLCD_PL111_MIS 0x00000024
+#define CLCD_PL111_ICR 0x00000028
+#define CLCD_PL111_UCUR 0x0000002c
+#define CLCD_PL111_LCUR 0x00000030
+
+#define CLCD_PALL 0x00000200
+#define CLCD_PALETTE 0x00000200
+
+#define TIM2_CLKSEL (1 << 5)
+#define TIM2_IVS (1 << 11)
+#define TIM2_IHS (1 << 12)
+#define TIM2_IPC (1 << 13)
+#define TIM2_IOE (1 << 14)
+#define TIM2_BCD (1 << 26)
+
+#define CNTL_LCDEN (1 << 0)
+#define CNTL_LCDBPP1 (0 << 1)
+#define CNTL_LCDBPP2 (1 << 1)
+#define CNTL_LCDBPP4 (2 << 1)
+#define CNTL_LCDBPP8 (3 << 1)
+#define CNTL_LCDBPP16 (4 << 1)
+#define CNTL_LCDBPP16_565 (6 << 1)
+#define CNTL_LCDBPP16_444 (7 << 1)
+#define CNTL_LCDBPP24 (5 << 1)
+#define CNTL_LCDBW (1 << 4)
+#define CNTL_LCDTFT (1 << 5)
+#define CNTL_LCDMONO8 (1 << 6)
+#define CNTL_LCDDUAL (1 << 7)
+#define CNTL_BGR (1 << 8)
+#define CNTL_BEBO (1 << 9)
+#define CNTL_BEPO (1 << 10)
+#define CNTL_LCDPWR (1 << 11)
+#define CNTL_LCDVCOMP(x) ((x) << 12)
+#define CNTL_LDMAFIFOTIME (1 << 15)
+#define CNTL_WATERMARK (1 << 16)
+
+enum {
+ /* individual formats */
+ CLCD_CAP_RGB444 = (1 << 0),
+ CLCD_CAP_RGB5551 = (1 << 1),
+ CLCD_CAP_RGB565 = (1 << 2),
+ CLCD_CAP_RGB888 = (1 << 3),
+ CLCD_CAP_BGR444 = (1 << 4),
+ CLCD_CAP_BGR5551 = (1 << 5),
+ CLCD_CAP_BGR565 = (1 << 6),
+ CLCD_CAP_BGR888 = (1 << 7),
+
+ /* connection layouts */
+ CLCD_CAP_444 = CLCD_CAP_RGB444 | CLCD_CAP_BGR444,
+ CLCD_CAP_5551 = CLCD_CAP_RGB5551 | CLCD_CAP_BGR5551,
+ CLCD_CAP_565 = CLCD_CAP_RGB565 | CLCD_CAP_BGR565,
+ CLCD_CAP_888 = CLCD_CAP_RGB888 | CLCD_CAP_BGR888,
+
+ /* red/blue ordering */
+ CLCD_CAP_RGB = CLCD_CAP_RGB444 | CLCD_CAP_RGB5551 |
+ CLCD_CAP_RGB565 | CLCD_CAP_RGB888,
+ CLCD_CAP_BGR = CLCD_CAP_BGR444 | CLCD_CAP_BGR5551 |
+ CLCD_CAP_BGR565 | CLCD_CAP_BGR888,
+
+ CLCD_CAP_ALL = CLCD_CAP_BGR | CLCD_CAP_RGB,
+};
+
+struct clcd_panel {
+ struct fb_videomode mode;
+ signed short width; /* width in mm */
+ signed short height; /* height in mm */
+ u32 tim2;
+ u32 tim3;
+ u32 cntl;
+ u32 caps;
+ unsigned int bpp:8,
+ fixedtimings:1,
+ grayscale:1;
+ unsigned int connector;
+};
+
+struct clcd_regs {
+ u32 tim0;
+ u32 tim1;
+ u32 tim2;
+ u32 tim3;
+ u32 cntl;
+ unsigned long pixclock;
+};
+
+struct clcd_fb;
+
+/*
+ * the board-type specific routines
+ */
+struct clcd_board {
+ const char *name;
+
+ /*
+ * Optional. Hardware capability flags.
+ */
+ u32 caps;
+
+ /*
+ * Optional. Check whether the var structure is acceptable
+ * for this display.
+ */
+ int (*check)(struct clcd_fb *fb, struct fb_var_screeninfo *var);
+
+ /*
+ * Compulsory. Decode fb->fb.var into regs->*. In the case of
+ * fixed timing, set regs->* to the register values required.
+ */
+ void (*decode)(struct clcd_fb *fb, struct clcd_regs *regs);
+
+ /*
+ * Optional. Disable any extra display hardware.
+ */
+ void (*disable)(struct clcd_fb *);
+
+ /*
+ * Optional. Enable any extra display hardware.
+ */
+ void (*enable)(struct clcd_fb *);
+
+ /*
+ * Setup platform specific parts of CLCD driver
+ */
+ int (*setup)(struct clcd_fb *);
+
+ /*
+ * mmap the framebuffer memory
+ */
+ int (*mmap)(struct clcd_fb *, struct vm_area_struct *);
+
+ /*
+ * Remove platform specific parts of CLCD driver
+ */
+ void (*remove)(struct clcd_fb *);
+};
+
+struct amba_device;
+struct clk;
+
+/* this data structure describes each frame buffer device we find */
+struct clcd_fb {
+ struct fb_info fb;
+ struct amba_device *dev;
+ struct clk *clk;
+ struct clcd_panel *panel;
+ struct clcd_board *board;
+ void *board_data;
+ void __iomem *regs;
+ u16 off_ienb;
+ u16 off_cntl;
+ u32 clcd_cntl;
+ u32 cmap[16];
+ bool clk_enabled;
+};
+
+static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
+{
+ struct fb_var_screeninfo *var = &fb->fb.var;
+ u32 val, cpl;
+
+ /*
+ * Program the CLCD controller registers and start the CLCD
+ */
+ val = ((var->xres / 16) - 1) << 2;
+ val |= (var->hsync_len - 1) << 8;
+ val |= (var->right_margin - 1) << 16;
+ val |= (var->left_margin - 1) << 24;
+ regs->tim0 = val;
+
+ val = var->yres;
+ if (fb->panel->cntl & CNTL_LCDDUAL)
+ val /= 2;
+ val -= 1;
+ val |= (var->vsync_len - 1) << 10;
+ val |= var->lower_margin << 16;
+ val |= var->upper_margin << 24;
+ regs->tim1 = val;
+
+ val = fb->panel->tim2;
+ val |= var->sync & FB_SYNC_HOR_HIGH_ACT ? 0 : TIM2_IHS;
+ val |= var->sync & FB_SYNC_VERT_HIGH_ACT ? 0 : TIM2_IVS;
+
+ cpl = var->xres_virtual;
+ if (fb->panel->cntl & CNTL_LCDTFT) /* TFT */
+ /* / 1 */;
+ else if (!var->grayscale) /* STN color */
+ cpl = cpl * 8 / 3;
+ else if (fb->panel->cntl & CNTL_LCDMONO8) /* STN monochrome, 8bit */
+ cpl /= 8;
+ else /* STN monochrome, 4bit */
+ cpl /= 4;
+
+ regs->tim2 = val | ((cpl - 1) << 16);
+
+ regs->tim3 = fb->panel->tim3;
+
+ val = fb->panel->cntl;
+ if (var->grayscale)
+ val |= CNTL_LCDBW;
+
+ if (fb->panel->caps && fb->board->caps &&
+ var->bits_per_pixel >= 16) {
+ /*
+ * if board and panel supply capabilities, we can support
+ * changing BGR/RGB depending on supplied parameters
+ */
+ if (var->red.offset == 0)
+ val &= ~CNTL_BGR;
+ else
+ val |= CNTL_BGR;
+ }
+
+ switch (var->bits_per_pixel) {
+ case 1:
+ val |= CNTL_LCDBPP1;
+ break;
+ case 2:
+ val |= CNTL_LCDBPP2;
+ break;
+ case 4:
+ val |= CNTL_LCDBPP4;
+ break;
+ case 8:
+ val |= CNTL_LCDBPP8;
+ break;
+ case 16:
+ /*
+ * PL110 cannot choose between 5551 and 565 modes in its
+ * control register. It is possible to use 565 with
+ * custom external wiring.
+ */
+ if (amba_part(fb->dev) == 0x110 ||
+ var->green.length == 5)
+ val |= CNTL_LCDBPP16;
+ else if (var->green.length == 6)
+ val |= CNTL_LCDBPP16_565;
+ else
+ val |= CNTL_LCDBPP16_444;
+ break;
+ case 32:
+ val |= CNTL_LCDBPP24;
+ break;
+ }
+
+ regs->cntl = val;
+ regs->pixclock = var->pixclock;
+}
+
+static inline int clcdfb_check(struct clcd_fb *fb, struct fb_var_screeninfo *var)
+{
+ var->xres_virtual = var->xres = (var->xres + 15) & ~15;
+ var->yres_virtual = var->yres = (var->yres + 1) & ~1;
+
+#define CHECK(e,l,h) (var->e < l || var->e > h)
+ if (CHECK(right_margin, (5+1), 256) || /* back porch */
+ CHECK(left_margin, (5+1), 256) || /* front porch */
+ CHECK(hsync_len, (5+1), 256) ||
+ var->xres > 4096 ||
+ var->lower_margin > 255 || /* back porch */
+ var->upper_margin > 255 || /* front porch */
+ var->vsync_len > 32 ||
+ var->yres > 1024)
+ return -EINVAL;
+#undef CHECK
+
+ /* single panel mode: PCD = max(PCD, 1) */
+ /* dual panel mode: PCD = max(PCD, 5) */
+
+ /*
+ * You can't change the grayscale setting, and
+ * we can only do non-interlaced video.
+ */
+ if (var->grayscale != fb->fb.var.grayscale ||
+ (var->vmode & FB_VMODE_MASK) != FB_VMODE_NONINTERLACED)
+ return -EINVAL;
+
+#define CHECK(e) (var->e != fb->fb.var.e)
+ if (fb->panel->fixedtimings &&
+ (CHECK(xres) ||
+ CHECK(yres) ||
+ CHECK(bits_per_pixel) ||
+ CHECK(pixclock) ||
+ CHECK(left_margin) ||
+ CHECK(right_margin) ||
+ CHECK(upper_margin) ||
+ CHECK(lower_margin) ||
+ CHECK(hsync_len) ||
+ CHECK(vsync_len) ||
+ CHECK(sync)))
+ return -EINVAL;
+#undef CHECK
+
+ var->nonstd = 0;
+ var->accel_flags = 0;
+
+ return 0;
+}
diff --git a/include/linux/amba/kmi.h b/include/linux/amba/kmi.h
new file mode 100644
index 000000000..a39e5be75
--- /dev/null
+++ b/include/linux/amba/kmi.h
@@ -0,0 +1,92 @@
+/*
+ * linux/include/asm-arm/hardware/amba_kmi.h
+ *
+ * Internal header file for AMBA KMI ports
+ *
+ * Copyright (C) 2000 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * ---------------------------------------------------------------------------
+ * From ARM PrimeCell(tm) PS2 Keyboard/Mouse Interface (PL050) Technical
+ * Reference Manual - ARM DDI 0143B - see http://www.arm.com/
+ * ---------------------------------------------------------------------------
+ */
+#ifndef ASM_ARM_HARDWARE_AMBA_KMI_H
+#define ASM_ARM_HARDWARE_AMBA_KMI_H
+
+/*
+ * KMI control register:
+ * KMICR_TYPE 0 = PS2/AT mode, 1 = No line control bit mode
+ * KMICR_RXINTREN 1 = enable RX interrupts
+ * KMICR_TXINTREN 1 = enable TX interrupts
+ * KMICR_EN 1 = enable KMI
+ * KMICR_FD 1 = force KMI data low
+ * KMICR_FC 1 = force KMI clock low
+ */
+#define KMICR (KMI_BASE + 0x00)
+#define KMICR_TYPE (1 << 5)
+#define KMICR_RXINTREN (1 << 4)
+#define KMICR_TXINTREN (1 << 3)
+#define KMICR_EN (1 << 2)
+#define KMICR_FD (1 << 1)
+#define KMICR_FC (1 << 0)
+
+/*
+ * KMI status register:
+ * KMISTAT_TXEMPTY 1 = transmitter register empty
+ * KMISTAT_TXBUSY 1 = currently sending data
+ * KMISTAT_RXFULL 1 = receiver register ready to be read
+ * KMISTAT_RXBUSY 1 = currently receiving data
+ * KMISTAT_RXPARITY parity of last databyte received
+ * KMISTAT_IC current level of KMI clock input
+ * KMISTAT_ID current level of KMI data input
+ */
+#define KMISTAT (KMI_BASE + 0x04)
+#define KMISTAT_TXEMPTY (1 << 6)
+#define KMISTAT_TXBUSY (1 << 5)
+#define KMISTAT_RXFULL (1 << 4)
+#define KMISTAT_RXBUSY (1 << 3)
+#define KMISTAT_RXPARITY (1 << 2)
+#define KMISTAT_IC (1 << 1)
+#define KMISTAT_ID (1 << 0)
+
+/*
+ * KMI data register
+ */
+#define KMIDATA (KMI_BASE + 0x08)
+
+/*
+ * KMI clock divisor: to generate 8MHz internal clock
+ * div = (ref / 8MHz) - 1; 0 <= div <= 15
+ */
+#define KMICLKDIV (KMI_BASE + 0x0c)
+
+/*
+ * KMI interrupt register:
+ * KMIIR_TXINTR 1 = transmit interrupt asserted
+ * KMIIR_RXINTR 1 = receive interrupt asserted
+ */
+#define KMIIR (KMI_BASE + 0x10)
+#define KMIIR_TXINTR (1 << 1)
+#define KMIIR_RXINTR (1 << 0)
+
+/*
+ * The size of the KMI primecell
+ */
+#define KMI_SIZE (0x100)
+
+#endif
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
new file mode 100644
index 000000000..8c9811306
--- /dev/null
+++ b/include/linux/amba/mmci.h
@@ -0,0 +1,36 @@
+/*
+ * include/linux/amba/mmci.h
+ */
+#ifndef AMBA_MMCI_H
+#define AMBA_MMCI_H
+
+#include <linux/mmc/host.h>
+
+/**
+ * struct mmci_platform_data - platform configuration for the MMCI
+ * (also known as PL180) block.
+ * @ocr_mask: available voltages on the 4 pins from the block, this
+ * is ignored if a regulator is used, see the MMC_VDD_* masks in
+ * mmc/host.h
+ * @ios_handler: a callback function to act on specfic ios changes,
+ * used for example to control a levelshifter
+ * mask into a value to be binary (or set some other custom bits
+ * in MMCIPWR) or:ed and written into the MMCIPWR register of the
+ * block. May also control external power based on the power_mode.
+ * @status: if no GPIO read function was given to the block in
+ * gpio_wp (below) this function will be called to determine
+ * whether a card is present in the MMC slot or not
+ * @gpio_wp: read this GPIO pin to see if the card is write protected
+ * @gpio_cd: read this GPIO pin to detect card insertion
+ * @cd_invert: true if the gpio_cd pin value is active low
+ */
+struct mmci_platform_data {
+ unsigned int ocr_mask;
+ int (*ios_handler)(struct device *, struct mmc_ios *);
+ unsigned int (*status)(struct device *);
+ int gpio_wp;
+ int gpio_cd;
+ bool cd_invert;
+};
+
+#endif
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h
new file mode 100644
index 000000000..854b7294f
--- /dev/null
+++ b/include/linux/amba/pl022.h
@@ -0,0 +1,295 @@
+/*
+ * include/linux/amba/pl022.h
+ *
+ * Copyright (C) 2008-2009 ST-Ericsson AB
+ * Copyright (C) 2006 STMicroelectronics Pvt. Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ *
+ * Initial version inspired by:
+ * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
+ * Initial adoption to PL022 by:
+ * Sachin Verma <sachin.verma@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SSP_PL022_H
+#define _SSP_PL022_H
+
+#include <linux/types.h>
+
+/**
+ * whether SSP is in loopback mode or not
+ */
+enum ssp_loopback {
+ LOOPBACK_DISABLED,
+ LOOPBACK_ENABLED
+};
+
+/**
+ * enum ssp_interface - interfaces allowed for this SSP Controller
+ * @SSP_INTERFACE_MOTOROLA_SPI: Motorola Interface
+ * @SSP_INTERFACE_TI_SYNC_SERIAL: Texas Instrument Synchronous Serial
+ * interface
+ * @SSP_INTERFACE_NATIONAL_MICROWIRE: National Semiconductor Microwire
+ * interface
+ * @SSP_INTERFACE_UNIDIRECTIONAL: Unidirectional interface (STn8810
+ * &STn8815 only)
+ */
+enum ssp_interface {
+ SSP_INTERFACE_MOTOROLA_SPI,
+ SSP_INTERFACE_TI_SYNC_SERIAL,
+ SSP_INTERFACE_NATIONAL_MICROWIRE,
+ SSP_INTERFACE_UNIDIRECTIONAL
+};
+
+/**
+ * enum ssp_hierarchy - whether SSP is configured as Master or Slave
+ */
+enum ssp_hierarchy {
+ SSP_MASTER,
+ SSP_SLAVE
+};
+
+/**
+ * enum ssp_clock_params - clock parameters, to set SSP clock at a
+ * desired freq
+ */
+struct ssp_clock_params {
+ u8 cpsdvsr; /* value from 2 to 254 (even only!) */
+ u8 scr; /* value from 0 to 255 */
+};
+
+/**
+ * enum ssp_rx_endian - endianess of Rx FIFO Data
+ * this feature is only available in ST versionf of PL022
+ */
+enum ssp_rx_endian {
+ SSP_RX_MSB,
+ SSP_RX_LSB
+};
+
+/**
+ * enum ssp_tx_endian - endianess of Tx FIFO Data
+ */
+enum ssp_tx_endian {
+ SSP_TX_MSB,
+ SSP_TX_LSB
+};
+
+/**
+ * enum ssp_data_size - number of bits in one data element
+ */
+enum ssp_data_size {
+ SSP_DATA_BITS_4 = 0x03, SSP_DATA_BITS_5, SSP_DATA_BITS_6,
+ SSP_DATA_BITS_7, SSP_DATA_BITS_8, SSP_DATA_BITS_9,
+ SSP_DATA_BITS_10, SSP_DATA_BITS_11, SSP_DATA_BITS_12,
+ SSP_DATA_BITS_13, SSP_DATA_BITS_14, SSP_DATA_BITS_15,
+ SSP_DATA_BITS_16, SSP_DATA_BITS_17, SSP_DATA_BITS_18,
+ SSP_DATA_BITS_19, SSP_DATA_BITS_20, SSP_DATA_BITS_21,
+ SSP_DATA_BITS_22, SSP_DATA_BITS_23, SSP_DATA_BITS_24,
+ SSP_DATA_BITS_25, SSP_DATA_BITS_26, SSP_DATA_BITS_27,
+ SSP_DATA_BITS_28, SSP_DATA_BITS_29, SSP_DATA_BITS_30,
+ SSP_DATA_BITS_31, SSP_DATA_BITS_32
+};
+
+/**
+ * enum ssp_mode - SSP mode of operation (Communication modes)
+ */
+enum ssp_mode {
+ INTERRUPT_TRANSFER,
+ POLLING_TRANSFER,
+ DMA_TRANSFER
+};
+
+/**
+ * enum ssp_rx_level_trig - receive FIFO watermark level which triggers
+ * IT: Interrupt fires when _N_ or more elements in RX FIFO.
+ */
+enum ssp_rx_level_trig {
+ SSP_RX_1_OR_MORE_ELEM,
+ SSP_RX_4_OR_MORE_ELEM,
+ SSP_RX_8_OR_MORE_ELEM,
+ SSP_RX_16_OR_MORE_ELEM,
+ SSP_RX_32_OR_MORE_ELEM
+};
+
+/**
+ * Transmit FIFO watermark level which triggers (IT Interrupt fires
+ * when _N_ or more empty locations in TX FIFO)
+ */
+enum ssp_tx_level_trig {
+ SSP_TX_1_OR_MORE_EMPTY_LOC,
+ SSP_TX_4_OR_MORE_EMPTY_LOC,
+ SSP_TX_8_OR_MORE_EMPTY_LOC,
+ SSP_TX_16_OR_MORE_EMPTY_LOC,
+ SSP_TX_32_OR_MORE_EMPTY_LOC
+};
+
+/**
+ * enum SPI Clock Phase - clock phase (Motorola SPI interface only)
+ * @SSP_CLK_FIRST_EDGE: Receive data on first edge transition (actual direction depends on polarity)
+ * @SSP_CLK_SECOND_EDGE: Receive data on second edge transition (actual direction depends on polarity)
+ */
+enum ssp_spi_clk_phase {
+ SSP_CLK_FIRST_EDGE,
+ SSP_CLK_SECOND_EDGE
+};
+
+/**
+ * enum SPI Clock Polarity - clock polarity (Motorola SPI interface only)
+ * @SSP_CLK_POL_IDLE_LOW: Low inactive level
+ * @SSP_CLK_POL_IDLE_HIGH: High inactive level
+ */
+enum ssp_spi_clk_pol {
+ SSP_CLK_POL_IDLE_LOW,
+ SSP_CLK_POL_IDLE_HIGH
+};
+
+/**
+ * Microwire Conrol Lengths Command size in microwire format
+ */
+enum ssp_microwire_ctrl_len {
+ SSP_BITS_4 = 0x03, SSP_BITS_5, SSP_BITS_6,
+ SSP_BITS_7, SSP_BITS_8, SSP_BITS_9,
+ SSP_BITS_10, SSP_BITS_11, SSP_BITS_12,
+ SSP_BITS_13, SSP_BITS_14, SSP_BITS_15,
+ SSP_BITS_16, SSP_BITS_17, SSP_BITS_18,
+ SSP_BITS_19, SSP_BITS_20, SSP_BITS_21,
+ SSP_BITS_22, SSP_BITS_23, SSP_BITS_24,
+ SSP_BITS_25, SSP_BITS_26, SSP_BITS_27,
+ SSP_BITS_28, SSP_BITS_29, SSP_BITS_30,
+ SSP_BITS_31, SSP_BITS_32
+};
+
+/**
+ * enum Microwire Wait State
+ * @SSP_MWIRE_WAIT_ZERO: No wait state inserted after last command bit
+ * @SSP_MWIRE_WAIT_ONE: One wait state inserted after last command bit
+ */
+enum ssp_microwire_wait_state {
+ SSP_MWIRE_WAIT_ZERO,
+ SSP_MWIRE_WAIT_ONE
+};
+
+/**
+ * enum ssp_duplex - whether Full/Half Duplex on microwire, only
+ * available in the ST Micro variant.
+ * @SSP_MICROWIRE_CHANNEL_FULL_DUPLEX: SSPTXD becomes bi-directional,
+ * SSPRXD not used
+ * @SSP_MICROWIRE_CHANNEL_HALF_DUPLEX: SSPTXD is an output, SSPRXD is
+ * an input.
+ */
+enum ssp_duplex {
+ SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
+ SSP_MICROWIRE_CHANNEL_HALF_DUPLEX
+};
+
+/**
+ * enum ssp_clkdelay - an optional clock delay on the feedback clock
+ * only available in the ST Micro PL023 variant.
+ * @SSP_FEEDBACK_CLK_DELAY_NONE: no delay, the data coming in from the
+ * slave is sampled directly
+ * @SSP_FEEDBACK_CLK_DELAY_1T: the incoming slave data is sampled with
+ * a delay of T-dt
+ * @SSP_FEEDBACK_CLK_DELAY_2T: dito with a delay if 2T-dt
+ * @SSP_FEEDBACK_CLK_DELAY_3T: dito with a delay if 3T-dt
+ * @SSP_FEEDBACK_CLK_DELAY_4T: dito with a delay if 4T-dt
+ * @SSP_FEEDBACK_CLK_DELAY_5T: dito with a delay if 5T-dt
+ * @SSP_FEEDBACK_CLK_DELAY_6T: dito with a delay if 6T-dt
+ * @SSP_FEEDBACK_CLK_DELAY_7T: dito with a delay if 7T-dt
+ */
+enum ssp_clkdelay {
+ SSP_FEEDBACK_CLK_DELAY_NONE,
+ SSP_FEEDBACK_CLK_DELAY_1T,
+ SSP_FEEDBACK_CLK_DELAY_2T,
+ SSP_FEEDBACK_CLK_DELAY_3T,
+ SSP_FEEDBACK_CLK_DELAY_4T,
+ SSP_FEEDBACK_CLK_DELAY_5T,
+ SSP_FEEDBACK_CLK_DELAY_6T,
+ SSP_FEEDBACK_CLK_DELAY_7T
+};
+
+/**
+ * CHIP select/deselect commands
+ */
+enum ssp_chip_select {
+ SSP_CHIP_SELECT,
+ SSP_CHIP_DESELECT
+};
+
+
+struct dma_chan;
+/**
+ * struct pl022_ssp_master - device.platform_data for SPI controller devices.
+ * @bus_id: identifier for this bus
+ * @num_chipselect: chipselects are used to distinguish individual
+ * SPI slaves, and are numbered from zero to num_chipselects - 1.
+ * each slave has a chipselect signal, but it's common that not
+ * every chipselect is connected to a slave.
+ * @enable_dma: if true enables DMA driven transfers.
+ * @dma_rx_param: parameter to locate an RX DMA channel.
+ * @dma_tx_param: parameter to locate a TX DMA channel.
+ * @autosuspend_delay: delay in ms following transfer completion before the
+ * runtime power management system suspends the device. A setting of 0
+ * indicates no delay and the device will be suspended immediately.
+ * @rt: indicates the controller should run the message pump with realtime
+ * priority to minimise the transfer latency on the bus.
+ * @chipselects: list of <num_chipselects> chip select gpios
+ */
+struct pl022_ssp_controller {
+ u16 bus_id;
+ u8 num_chipselect;
+ u8 enable_dma:1;
+ bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+ void *dma_rx_param;
+ void *dma_tx_param;
+ int autosuspend_delay;
+ bool rt;
+ int *chipselects;
+};
+
+/**
+ * struct ssp_config_chip - spi_board_info.controller_data for SPI
+ * slave devices, copied to spi_device.controller_data.
+ *
+ * @iface: Interface type(Motorola, TI, Microwire, Universal)
+ * @hierarchy: sets whether interface is master or slave
+ * @slave_tx_disable: SSPTXD is disconnected (in slave mode only)
+ * @clk_freq: Tune freq parameters of SSP(when in master mode)
+ * @com_mode: communication mode: polling, Interrupt or DMA
+ * @rx_lev_trig: Rx FIFO watermark level (for IT & DMA mode)
+ * @tx_lev_trig: Tx FIFO watermark level (for IT & DMA mode)
+ * @ctrl_len: Microwire interface: Control length
+ * @wait_state: Microwire interface: Wait state
+ * @duplex: Microwire interface: Full/Half duplex
+ * @clkdelay: on the PL023 variant, the delay in feeback clock cycles
+ * before sampling the incoming line
+ * @cs_control: function pointer to board-specific function to
+ * assert/deassert I/O port to control HW generation of devices chip-select.
+ */
+struct pl022_config_chip {
+ enum ssp_interface iface;
+ enum ssp_hierarchy hierarchy;
+ bool slave_tx_disable;
+ struct ssp_clock_params clk_freq;
+ enum ssp_mode com_mode;
+ enum ssp_rx_level_trig rx_lev_trig;
+ enum ssp_tx_level_trig tx_lev_trig;
+ enum ssp_microwire_ctrl_len ctrl_len;
+ enum ssp_microwire_wait_state wait_state;
+ enum ssp_duplex duplex;
+ enum ssp_clkdelay clkdelay;
+ void (*cs_control) (u32 control);
+};
+
+#endif /* _SSP_PL022_H */
diff --git a/include/linux/amba/pl061.h b/include/linux/amba/pl061.h
new file mode 100644
index 000000000..fb83c0453
--- /dev/null
+++ b/include/linux/amba/pl061.h
@@ -0,0 +1,16 @@
+#include <linux/types.h>
+
+/* platform data for the PL061 GPIO driver */
+
+struct pl061_platform_data {
+ /* number of the first GPIO */
+ unsigned gpio_base;
+
+ /* number of the first IRQ.
+ * If the IRQ functionality in not desired this must be set to 0.
+ */
+ unsigned irq_base;
+
+ u8 directions; /* startup directions, 1: out, 0: in */
+ u8 values; /* startup values */
+};
diff --git a/include/linux/amba/pl080.h b/include/linux/amba/pl080.h
new file mode 100644
index 000000000..91b84a7f0
--- /dev/null
+++ b/include/linux/amba/pl080.h
@@ -0,0 +1,147 @@
+/* include/linux/amba/pl080.h
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * ARM PrimeCell PL080 DMA controller
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/* Note, there are some Samsung updates to this controller block which
+ * make it not entierly compatible with the PL080 specification from
+ * ARM. When in doubt, check the Samsung documentation first.
+ *
+ * The Samsung defines are PL080S, and add an extra control register,
+ * the ability to move more than 2^11 counts of data and some extra
+ * OneNAND features.
+*/
+
+#ifndef ASM_PL080_H
+#define ASM_PL080_H
+
+#define PL080_INT_STATUS (0x00)
+#define PL080_TC_STATUS (0x04)
+#define PL080_TC_CLEAR (0x08)
+#define PL080_ERR_STATUS (0x0C)
+#define PL080_ERR_CLEAR (0x10)
+#define PL080_RAW_TC_STATUS (0x14)
+#define PL080_RAW_ERR_STATUS (0x18)
+#define PL080_EN_CHAN (0x1c)
+#define PL080_SOFT_BREQ (0x20)
+#define PL080_SOFT_SREQ (0x24)
+#define PL080_SOFT_LBREQ (0x28)
+#define PL080_SOFT_LSREQ (0x2C)
+
+#define PL080_CONFIG (0x30)
+#define PL080_CONFIG_M2_BE (1 << 2)
+#define PL080_CONFIG_M1_BE (1 << 1)
+#define PL080_CONFIG_ENABLE (1 << 0)
+
+#define PL080_SYNC (0x34)
+
+/* Per channel configuration registers */
+
+#define PL080_Cx_STRIDE (0x20)
+#define PL080_Cx_BASE(x) ((0x100 + (x * 0x20)))
+#define PL080_Cx_SRC_ADDR(x) ((0x100 + (x * 0x20)))
+#define PL080_Cx_DST_ADDR(x) ((0x104 + (x * 0x20)))
+#define PL080_Cx_LLI(x) ((0x108 + (x * 0x20)))
+#define PL080_Cx_CONTROL(x) ((0x10C + (x * 0x20)))
+#define PL080_Cx_CONFIG(x) ((0x110 + (x * 0x20)))
+#define PL080S_Cx_CONTROL2(x) ((0x110 + (x * 0x20)))
+#define PL080S_Cx_CONFIG(x) ((0x114 + (x * 0x20)))
+
+#define PL080_CH_SRC_ADDR (0x00)
+#define PL080_CH_DST_ADDR (0x04)
+#define PL080_CH_LLI (0x08)
+#define PL080_CH_CONTROL (0x0C)
+#define PL080_CH_CONFIG (0x10)
+#define PL080S_CH_CONTROL2 (0x10)
+#define PL080S_CH_CONFIG (0x14)
+
+#define PL080_LLI_ADDR_MASK (0x3fffffff << 2)
+#define PL080_LLI_ADDR_SHIFT (2)
+#define PL080_LLI_LM_AHB2 (1 << 0)
+
+#define PL080_CONTROL_TC_IRQ_EN (1 << 31)
+#define PL080_CONTROL_PROT_MASK (0x7 << 28)
+#define PL080_CONTROL_PROT_SHIFT (28)
+#define PL080_CONTROL_PROT_CACHE (1 << 30)
+#define PL080_CONTROL_PROT_BUFF (1 << 29)
+#define PL080_CONTROL_PROT_SYS (1 << 28)
+#define PL080_CONTROL_DST_INCR (1 << 27)
+#define PL080_CONTROL_SRC_INCR (1 << 26)
+#define PL080_CONTROL_DST_AHB2 (1 << 25)
+#define PL080_CONTROL_SRC_AHB2 (1 << 24)
+#define PL080_CONTROL_DWIDTH_MASK (0x7 << 21)
+#define PL080_CONTROL_DWIDTH_SHIFT (21)
+#define PL080_CONTROL_SWIDTH_MASK (0x7 << 18)
+#define PL080_CONTROL_SWIDTH_SHIFT (18)
+#define PL080_CONTROL_DB_SIZE_MASK (0x7 << 15)
+#define PL080_CONTROL_DB_SIZE_SHIFT (15)
+#define PL080_CONTROL_SB_SIZE_MASK (0x7 << 12)
+#define PL080_CONTROL_SB_SIZE_SHIFT (12)
+#define PL080_CONTROL_TRANSFER_SIZE_MASK (0xfff << 0)
+#define PL080S_CONTROL_TRANSFER_SIZE_MASK (0x1ffffff << 0)
+#define PL080_CONTROL_TRANSFER_SIZE_SHIFT (0)
+
+#define PL080_BSIZE_1 (0x0)
+#define PL080_BSIZE_4 (0x1)
+#define PL080_BSIZE_8 (0x2)
+#define PL080_BSIZE_16 (0x3)
+#define PL080_BSIZE_32 (0x4)
+#define PL080_BSIZE_64 (0x5)
+#define PL080_BSIZE_128 (0x6)
+#define PL080_BSIZE_256 (0x7)
+
+#define PL080_WIDTH_8BIT (0x0)
+#define PL080_WIDTH_16BIT (0x1)
+#define PL080_WIDTH_32BIT (0x2)
+
+#define PL080N_CONFIG_ITPROT (1 << 20)
+#define PL080N_CONFIG_SECPROT (1 << 19)
+#define PL080_CONFIG_HALT (1 << 18)
+#define PL080_CONFIG_ACTIVE (1 << 17) /* RO */
+#define PL080_CONFIG_LOCK (1 << 16)
+#define PL080_CONFIG_TC_IRQ_MASK (1 << 15)
+#define PL080_CONFIG_ERR_IRQ_MASK (1 << 14)
+#define PL080_CONFIG_FLOW_CONTROL_MASK (0x7 << 11)
+#define PL080_CONFIG_FLOW_CONTROL_SHIFT (11)
+#define PL080_CONFIG_DST_SEL_MASK (0xf << 6)
+#define PL080_CONFIG_DST_SEL_SHIFT (6)
+#define PL080_CONFIG_SRC_SEL_MASK (0xf << 1)
+#define PL080_CONFIG_SRC_SEL_SHIFT (1)
+#define PL080_CONFIG_ENABLE (1 << 0)
+
+#define PL080_FLOW_MEM2MEM (0x0)
+#define PL080_FLOW_MEM2PER (0x1)
+#define PL080_FLOW_PER2MEM (0x2)
+#define PL080_FLOW_SRC2DST (0x3)
+#define PL080_FLOW_SRC2DST_DST (0x4)
+#define PL080_FLOW_MEM2PER_PER (0x5)
+#define PL080_FLOW_PER2MEM_PER (0x6)
+#define PL080_FLOW_SRC2DST_SRC (0x7)
+
+/* DMA linked list chain structure */
+
+struct pl080_lli {
+ u32 src_addr;
+ u32 dst_addr;
+ u32 next_lli;
+ u32 control0;
+};
+
+struct pl080s_lli {
+ u32 src_addr;
+ u32 dst_addr;
+ u32 next_lli;
+ u32 control0;
+ u32 control1;
+};
+
+#endif /* ASM_PL080_H */
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
new file mode 100644
index 000000000..10fe2a211
--- /dev/null
+++ b/include/linux/amba/pl08x.h
@@ -0,0 +1,107 @@
+/*
+ * linux/amba/pl08x.h - ARM PrimeCell DMA Controller driver
+ *
+ * Copyright (C) 2005 ARM Ltd
+ * Copyright (C) 2010 ST-Ericsson SA
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * pl08x information required by platform code
+ *
+ * Please credit ARM.com
+ * Documentation: ARM DDI 0196D
+ */
+
+#ifndef AMBA_PL08X_H
+#define AMBA_PL08X_H
+
+/* We need sizes of structs from this header */
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+struct pl08x_driver_data;
+struct pl08x_phy_chan;
+struct pl08x_txd;
+
+/* Bitmasks for selecting AHB ports for DMA transfers */
+enum {
+ PL08X_AHB1 = (1 << 0),
+ PL08X_AHB2 = (1 << 1)
+};
+
+/**
+ * struct pl08x_channel_data - data structure to pass info between
+ * platform and PL08x driver regarding channel configuration
+ * @bus_id: name of this device channel, not just a device name since
+ * devices may have more than one channel e.g. "foo_tx"
+ * @min_signal: the minimum DMA signal number to be muxed in for this
+ * channel (for platforms supporting muxed signals). If you have
+ * static assignments, make sure this is set to the assigned signal
+ * number, PL08x have 16 possible signals in number 0 thru 15 so
+ * when these are not enough they often get muxed (in hardware)
+ * disabling simultaneous use of the same channel for two devices.
+ * @max_signal: the maximum DMA signal number to be muxed in for
+ * the channel. Set to the same as min_signal for
+ * devices with static assignments
+ * @muxval: a number usually used to poke into some mux regiser to
+ * mux in the signal to this channel
+ * @cctl_memcpy: options for the channel control register for memcpy
+ * *** not used for slave channels ***
+ * @addr: source/target address in physical memory for this DMA channel,
+ * can be the address of a FIFO register for burst requests for example.
+ * This can be left undefined if the PrimeCell API is used for configuring
+ * this.
+ * @single: the device connected to this channel will request single DMA
+ * transfers, not bursts. (Bursts are default.)
+ * @periph_buses: the device connected to this channel is accessible via
+ * these buses (use PL08X_AHB1 | PL08X_AHB2).
+ */
+struct pl08x_channel_data {
+ const char *bus_id;
+ int min_signal;
+ int max_signal;
+ u32 muxval;
+ u32 cctl_memcpy;
+ dma_addr_t addr;
+ bool single;
+ u8 periph_buses;
+};
+
+/**
+ * struct pl08x_platform_data - the platform configuration for the PL08x
+ * PrimeCells.
+ * @slave_channels: the channels defined for the different devices on the
+ * platform, all inclusive, including multiplexed channels. The available
+ * physical channels will be multiplexed around these signals as they are
+ * requested, just enumerate all possible channels.
+ * @get_xfer_signal: request a physical signal to be used for a DMA transfer
+ * immediately: if there is some multiplexing or similar blocking the use
+ * of the channel the transfer can be denied by returning less than zero,
+ * else it returns the allocated signal number
+ * @put_xfer_signal: indicate to the platform that this physical signal is not
+ * running any DMA transfer and multiplexing can be recycled
+ * @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2
+ * @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2
+ */
+struct pl08x_platform_data {
+ const struct pl08x_channel_data *slave_channels;
+ unsigned int num_slave_channels;
+ struct pl08x_channel_data memcpy_channel;
+ int (*get_xfer_signal)(const struct pl08x_channel_data *);
+ void (*put_xfer_signal)(const struct pl08x_channel_data *, int);
+ u8 lli_buses;
+ u8 mem_buses;
+};
+
+#ifdef CONFIG_AMBA_PL08X
+bool pl08x_filter_id(struct dma_chan *chan, void *chan_id);
+#else
+static inline bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
+{
+ return false;
+}
+#endif
+
+#endif /* AMBA_PL08X_H */
diff --git a/include/linux/amba/pl093.h b/include/linux/amba/pl093.h
new file mode 100644
index 000000000..2983e3671
--- /dev/null
+++ b/include/linux/amba/pl093.h
@@ -0,0 +1,80 @@
+/* linux/amba/pl093.h
+ *
+ * Copyright (c) 2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * AMBA PL093 SSMC (synchronous static memory controller)
+ * See DDI0236.pdf (r0p4) for more details
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#define SMB_BANK(x) ((x) * 0x20) /* each bank control set is 0x20 apart */
+
+/* Offsets for SMBxxxxRy registers */
+
+#define SMBIDCYR (0x00)
+#define SMBWSTRDR (0x04)
+#define SMBWSTWRR (0x08)
+#define SMBWSTOENR (0x0C)
+#define SMBWSTWENR (0x10)
+#define SMBCR (0x14)
+#define SMBSR (0x18)
+#define SMBWSTBRDR (0x1C)
+
+/* Masks for SMB registers */
+#define IDCY_MASK (0xf)
+#define WSTRD_MASK (0xf)
+#define WSTWR_MASK (0xf)
+#define WSTOEN_MASK (0xf)
+#define WSTWEN_MASK (0xf)
+
+/* Notes from datasheet:
+ * WSTOEN <= WSTRD
+ * WSTWEN <= WSTWR
+ *
+ * WSTOEN is not used with nWAIT
+ */
+
+/* SMBCR bit definitions */
+#define SMBCR_BIWRITEEN (1 << 21)
+#define SMBCR_ADDRVALIDWRITEEN (1 << 20)
+#define SMBCR_SYNCWRITE (1 << 17)
+#define SMBCR_BMWRITE (1 << 16)
+#define SMBCR_WRAPREAD (1 << 14)
+#define SMBCR_BIREADEN (1 << 13)
+#define SMBCR_ADDRVALIDREADEN (1 << 12)
+#define SMBCR_SYNCREAD (1 << 9)
+#define SMBCR_BMREAD (1 << 8)
+#define SMBCR_SMBLSPOL (1 << 6)
+#define SMBCR_WP (1 << 3)
+#define SMBCR_WAITEN (1 << 2)
+#define SMBCR_WAITPOL (1 << 1)
+#define SMBCR_RBLE (1 << 0)
+
+#define SMBCR_BURSTLENWRITE_MASK (3 << 18)
+#define SMBCR_BURSTLENWRITE_4 (0 << 18)
+#define SMBCR_BURSTLENWRITE_8 (1 << 18)
+#define SMBCR_BURSTLENWRITE_RESERVED (2 << 18)
+#define SMBCR_BURSTLENWRITE_CONTINUOUS (3 << 18)
+
+#define SMBCR_BURSTLENREAD_MASK (3 << 10)
+#define SMBCR_BURSTLENREAD_4 (0 << 10)
+#define SMBCR_BURSTLENREAD_8 (1 << 10)
+#define SMBCR_BURSTLENREAD_16 (2 << 10)
+#define SMBCR_BURSTLENREAD_CONTINUOUS (3 << 10)
+
+#define SMBCR_MW_MASK (3 << 4)
+#define SMBCR_MW_8BIT (0 << 4)
+#define SMBCR_MW_16BIT (1 << 4)
+#define SMBCR_MW_M32BIT (2 << 4)
+
+/* SSMC status registers */
+#define SSMCCSR (0x200)
+#define SSMCCR (0x204)
+#define SSMCITCR (0x208)
+#define SSMCITIP (0x20C)
+#define SSMCITIOP (0x210)
diff --git a/include/linux/amba/pl330.h b/include/linux/amba/pl330.h
new file mode 100644
index 000000000..fe93758e8
--- /dev/null
+++ b/include/linux/amba/pl330.h
@@ -0,0 +1,35 @@
+/* linux/include/linux/amba/pl330.h
+ *
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __AMBA_PL330_H_
+#define __AMBA_PL330_H_
+
+#include <linux/dmaengine.h>
+
+struct dma_pl330_platdata {
+ /*
+ * Number of valid peripherals connected to DMAC.
+ * This may be different from the value read from
+ * CR0, as the PL330 implementation might have 'holes'
+ * in the peri list or the peri could also be reached
+ * from another DMAC which the platform prefers.
+ */
+ u8 nr_valid_peri;
+ /* Array of valid peripherals */
+ u8 *peri_id;
+ /* Operational capabilities */
+ dma_cap_mask_t cap_mask;
+ /* Bytes to allocate for MC buffer */
+ unsigned mcbuf_sz;
+};
+
+extern bool pl330_filter(struct dma_chan *chan, void *param);
+#endif /* __AMBA_PL330_H_ */
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
new file mode 100644
index 000000000..0ddb5c02a
--- /dev/null
+++ b/include/linux/amba/serial.h
@@ -0,0 +1,214 @@
+/*
+ * linux/include/asm-arm/hardware/serial_amba.h
+ *
+ * Internal header file for AMBA serial ports
+ *
+ * Copyright (C) ARM Limited
+ * Copyright (C) 2000 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H
+#define ASM_ARM_HARDWARE_SERIAL_AMBA_H
+
+#include <linux/types.h>
+
+/* -------------------------------------------------------------------------------
+ * From AMBA UART (PL010) Block Specification
+ * -------------------------------------------------------------------------------
+ * UART Register Offsets.
+ */
+#define UART01x_DR 0x00 /* Data read or written from the interface. */
+#define UART01x_RSR 0x04 /* Receive status register (Read). */
+#define UART01x_ECR 0x04 /* Error clear register (Write). */
+#define UART010_LCRH 0x08 /* Line control register, high byte. */
+#define ST_UART011_DMAWM 0x08 /* DMA watermark configure register. */
+#define UART010_LCRM 0x0C /* Line control register, middle byte. */
+#define ST_UART011_TIMEOUT 0x0C /* Timeout period register. */
+#define UART010_LCRL 0x10 /* Line control register, low byte. */
+#define UART010_CR 0x14 /* Control register. */
+#define UART01x_FR 0x18 /* Flag register (Read only). */
+#define UART010_IIR 0x1C /* Interrupt identification register (Read). */
+#define UART010_ICR 0x1C /* Interrupt clear register (Write). */
+#define ST_UART011_LCRH_RX 0x1C /* Rx line control register. */
+#define UART01x_ILPR 0x20 /* IrDA low power counter register. */
+#define UART011_IBRD 0x24 /* Integer baud rate divisor register. */
+#define UART011_FBRD 0x28 /* Fractional baud rate divisor register. */
+#define UART011_LCRH 0x2c /* Line control register. */
+#define ST_UART011_LCRH_TX 0x2c /* Tx Line control register. */
+#define UART011_CR 0x30 /* Control register. */
+#define UART011_IFLS 0x34 /* Interrupt fifo level select. */
+#define UART011_IMSC 0x38 /* Interrupt mask. */
+#define UART011_RIS 0x3c /* Raw interrupt status. */
+#define UART011_MIS 0x40 /* Masked interrupt status. */
+#define UART011_ICR 0x44 /* Interrupt clear register. */
+#define UART011_DMACR 0x48 /* DMA control register. */
+#define ST_UART011_XFCR 0x50 /* XON/XOFF control register. */
+#define ST_UART011_XON1 0x54 /* XON1 register. */
+#define ST_UART011_XON2 0x58 /* XON2 register. */
+#define ST_UART011_XOFF1 0x5C /* XON1 register. */
+#define ST_UART011_XOFF2 0x60 /* XON2 register. */
+#define ST_UART011_ITCR 0x80 /* Integration test control register. */
+#define ST_UART011_ITIP 0x84 /* Integration test input register. */
+#define ST_UART011_ABCR 0x100 /* Autobaud control register. */
+#define ST_UART011_ABIMSC 0x15C /* Autobaud interrupt mask/clear register. */
+
+#define UART011_DR_OE (1 << 11)
+#define UART011_DR_BE (1 << 10)
+#define UART011_DR_PE (1 << 9)
+#define UART011_DR_FE (1 << 8)
+
+#define UART01x_RSR_OE 0x08
+#define UART01x_RSR_BE 0x04
+#define UART01x_RSR_PE 0x02
+#define UART01x_RSR_FE 0x01
+
+#define UART011_FR_RI 0x100
+#define UART011_FR_TXFE 0x080
+#define UART011_FR_RXFF 0x040
+#define UART01x_FR_TXFF 0x020
+#define UART01x_FR_RXFE 0x010
+#define UART01x_FR_BUSY 0x008
+#define UART01x_FR_DCD 0x004
+#define UART01x_FR_DSR 0x002
+#define UART01x_FR_CTS 0x001
+#define UART01x_FR_TMSK (UART01x_FR_TXFF + UART01x_FR_BUSY)
+
+#define UART011_CR_CTSEN 0x8000 /* CTS hardware flow control */
+#define UART011_CR_RTSEN 0x4000 /* RTS hardware flow control */
+#define UART011_CR_OUT2 0x2000 /* OUT2 */
+#define UART011_CR_OUT1 0x1000 /* OUT1 */
+#define UART011_CR_RTS 0x0800 /* RTS */
+#define UART011_CR_DTR 0x0400 /* DTR */
+#define UART011_CR_RXE 0x0200 /* receive enable */
+#define UART011_CR_TXE 0x0100 /* transmit enable */
+#define UART011_CR_LBE 0x0080 /* loopback enable */
+#define UART010_CR_RTIE 0x0040
+#define UART010_CR_TIE 0x0020
+#define UART010_CR_RIE 0x0010
+#define UART010_CR_MSIE 0x0008
+#define ST_UART011_CR_OVSFACT 0x0008 /* Oversampling factor */
+#define UART01x_CR_IIRLP 0x0004 /* SIR low power mode */
+#define UART01x_CR_SIREN 0x0002 /* SIR enable */
+#define UART01x_CR_UARTEN 0x0001 /* UART enable */
+
+#define UART011_LCRH_SPS 0x80
+#define UART01x_LCRH_WLEN_8 0x60
+#define UART01x_LCRH_WLEN_7 0x40
+#define UART01x_LCRH_WLEN_6 0x20
+#define UART01x_LCRH_WLEN_5 0x00
+#define UART01x_LCRH_FEN 0x10
+#define UART01x_LCRH_STP2 0x08
+#define UART01x_LCRH_EPS 0x04
+#define UART01x_LCRH_PEN 0x02
+#define UART01x_LCRH_BRK 0x01
+
+#define ST_UART011_DMAWM_RX_1 (0 << 3)
+#define ST_UART011_DMAWM_RX_2 (1 << 3)
+#define ST_UART011_DMAWM_RX_4 (2 << 3)
+#define ST_UART011_DMAWM_RX_8 (3 << 3)
+#define ST_UART011_DMAWM_RX_16 (4 << 3)
+#define ST_UART011_DMAWM_RX_32 (5 << 3)
+#define ST_UART011_DMAWM_RX_48 (6 << 3)
+#define ST_UART011_DMAWM_TX_1 0
+#define ST_UART011_DMAWM_TX_2 1
+#define ST_UART011_DMAWM_TX_4 2
+#define ST_UART011_DMAWM_TX_8 3
+#define ST_UART011_DMAWM_TX_16 4
+#define ST_UART011_DMAWM_TX_32 5
+#define ST_UART011_DMAWM_TX_48 6
+
+#define UART010_IIR_RTIS 0x08
+#define UART010_IIR_TIS 0x04
+#define UART010_IIR_RIS 0x02
+#define UART010_IIR_MIS 0x01
+
+#define UART011_IFLS_RX1_8 (0 << 3)
+#define UART011_IFLS_RX2_8 (1 << 3)
+#define UART011_IFLS_RX4_8 (2 << 3)
+#define UART011_IFLS_RX6_8 (3 << 3)
+#define UART011_IFLS_RX7_8 (4 << 3)
+#define UART011_IFLS_TX1_8 (0 << 0)
+#define UART011_IFLS_TX2_8 (1 << 0)
+#define UART011_IFLS_TX4_8 (2 << 0)
+#define UART011_IFLS_TX6_8 (3 << 0)
+#define UART011_IFLS_TX7_8 (4 << 0)
+/* special values for ST vendor with deeper fifo */
+#define UART011_IFLS_RX_HALF (5 << 3)
+#define UART011_IFLS_TX_HALF (5 << 0)
+
+#define UART011_OEIM (1 << 10) /* overrun error interrupt mask */
+#define UART011_BEIM (1 << 9) /* break error interrupt mask */
+#define UART011_PEIM (1 << 8) /* parity error interrupt mask */
+#define UART011_FEIM (1 << 7) /* framing error interrupt mask */
+#define UART011_RTIM (1 << 6) /* receive timeout interrupt mask */
+#define UART011_TXIM (1 << 5) /* transmit interrupt mask */
+#define UART011_RXIM (1 << 4) /* receive interrupt mask */
+#define UART011_DSRMIM (1 << 3) /* DSR interrupt mask */
+#define UART011_DCDMIM (1 << 2) /* DCD interrupt mask */
+#define UART011_CTSMIM (1 << 1) /* CTS interrupt mask */
+#define UART011_RIMIM (1 << 0) /* RI interrupt mask */
+
+#define UART011_OEIS (1 << 10) /* overrun error interrupt status */
+#define UART011_BEIS (1 << 9) /* break error interrupt status */
+#define UART011_PEIS (1 << 8) /* parity error interrupt status */
+#define UART011_FEIS (1 << 7) /* framing error interrupt status */
+#define UART011_RTIS (1 << 6) /* receive timeout interrupt status */
+#define UART011_TXIS (1 << 5) /* transmit interrupt status */
+#define UART011_RXIS (1 << 4) /* receive interrupt status */
+#define UART011_DSRMIS (1 << 3) /* DSR interrupt status */
+#define UART011_DCDMIS (1 << 2) /* DCD interrupt status */
+#define UART011_CTSMIS (1 << 1) /* CTS interrupt status */
+#define UART011_RIMIS (1 << 0) /* RI interrupt status */
+
+#define UART011_OEIC (1 << 10) /* overrun error interrupt clear */
+#define UART011_BEIC (1 << 9) /* break error interrupt clear */
+#define UART011_PEIC (1 << 8) /* parity error interrupt clear */
+#define UART011_FEIC (1 << 7) /* framing error interrupt clear */
+#define UART011_RTIC (1 << 6) /* receive timeout interrupt clear */
+#define UART011_TXIC (1 << 5) /* transmit interrupt clear */
+#define UART011_RXIC (1 << 4) /* receive interrupt clear */
+#define UART011_DSRMIC (1 << 3) /* DSR interrupt clear */
+#define UART011_DCDMIC (1 << 2) /* DCD interrupt clear */
+#define UART011_CTSMIC (1 << 1) /* CTS interrupt clear */
+#define UART011_RIMIC (1 << 0) /* RI interrupt clear */
+
+#define UART011_DMAONERR (1 << 2) /* disable dma on error */
+#define UART011_TXDMAE (1 << 1) /* enable transmit dma */
+#define UART011_RXDMAE (1 << 0) /* enable receive dma */
+
+#define UART01x_RSR_ANY (UART01x_RSR_OE|UART01x_RSR_BE|UART01x_RSR_PE|UART01x_RSR_FE)
+#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS)
+
+#ifndef __ASSEMBLY__
+struct amba_device; /* in uncompress this is included but amba/bus.h is not */
+struct amba_pl010_data {
+ void (*set_mctrl)(struct amba_device *dev, void __iomem *base, unsigned int mctrl);
+};
+
+struct dma_chan;
+struct amba_pl011_data {
+ bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+ void *dma_rx_param;
+ void *dma_tx_param;
+ bool dma_rx_poll_enable;
+ unsigned int dma_rx_poll_rate;
+ unsigned int dma_rx_poll_timeout;
+ void (*init) (void);
+ void (*exit) (void);
+};
+#endif
+
+#endif
diff --git a/include/linux/amba/sp810.h b/include/linux/amba/sp810.h
new file mode 100644
index 000000000..c7df89f99
--- /dev/null
+++ b/include/linux/amba/sp810.h
@@ -0,0 +1,62 @@
+/*
+ * ARM PrimeXsys System Controller SP810 header file
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __AMBA_SP810_H
+#define __AMBA_SP810_H
+
+#include <linux/io.h>
+
+/* sysctl registers offset */
+#define SCCTRL 0x000
+#define SCSYSSTAT 0x004
+#define SCIMCTRL 0x008
+#define SCIMSTAT 0x00C
+#define SCXTALCTRL 0x010
+#define SCPLLCTRL 0x014
+#define SCPLLFCTRL 0x018
+#define SCPERCTRL0 0x01C
+#define SCPERCTRL1 0x020
+#define SCPEREN 0x024
+#define SCPERDIS 0x028
+#define SCPERCLKEN 0x02C
+#define SCPERSTAT 0x030
+#define SCSYSID0 0xEE0
+#define SCSYSID1 0xEE4
+#define SCSYSID2 0xEE8
+#define SCSYSID3 0xEEC
+#define SCITCR 0xF00
+#define SCITIR0 0xF04
+#define SCITIR1 0xF08
+#define SCITOR 0xF0C
+#define SCCNTCTRL 0xF10
+#define SCCNTDATA 0xF14
+#define SCCNTSTEP 0xF18
+#define SCPERIPHID0 0xFE0
+#define SCPERIPHID1 0xFE4
+#define SCPERIPHID2 0xFE8
+#define SCPERIPHID3 0xFEC
+#define SCPCELLID0 0xFF0
+#define SCPCELLID1 0xFF4
+#define SCPCELLID2 0xFF8
+#define SCPCELLID3 0xFFC
+
+#define SCCTRL_TIMERENnSEL_SHIFT(n) (15 + ((n) * 2))
+
+static inline void sysctl_soft_reset(void __iomem *base)
+{
+ /* switch to slow mode */
+ writel(0x2, base + SCCTRL);
+
+ /* writing any value to SCSYSSTAT reg will reset system */
+ writel(0, base + SCSYSSTAT);
+}
+
+#endif /* __AMBA_SP810_H */
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
new file mode 100644
index 000000000..2b08e79f5
--- /dev/null
+++ b/include/linux/amd-iommu.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <joerg.roedel@amd.com>
+ * Leo Duran <leo.duran@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASM_X86_AMD_IOMMU_H
+#define _ASM_X86_AMD_IOMMU_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_AMD_IOMMU
+
+struct task_struct;
+struct pci_dev;
+
+extern int amd_iommu_detect(void);
+extern int amd_iommu_init_hardware(void);
+
+/**
+ * amd_iommu_enable_device_erratum() - Enable erratum workaround for device
+ * in the IOMMUv2 driver
+ * @pdev: The PCI device the workaround is necessary for
+ * @erratum: The erratum workaround to enable
+ *
+ * The function needs to be called before amd_iommu_init_device().
+ * Possible values for the erratum number are for now:
+ * - AMD_PRI_DEV_ERRATUM_ENABLE_RESET - Reset PRI capability when PRI
+ * is enabled
+ * - AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE - Limit number of outstanding PRI
+ * requests to one
+ */
+#define AMD_PRI_DEV_ERRATUM_ENABLE_RESET 0
+#define AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE 1
+
+extern void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum);
+
+/**
+ * amd_iommu_init_device() - Init device for use with IOMMUv2 driver
+ * @pdev: The PCI device to initialize
+ * @pasids: Number of PASIDs to support for this device
+ *
+ * This function does all setup for the device pdev so that it can be
+ * used with IOMMUv2.
+ * Returns 0 on success or negative value on error.
+ */
+extern int amd_iommu_init_device(struct pci_dev *pdev, int pasids);
+
+/**
+ * amd_iommu_free_device() - Free all IOMMUv2 related device resources
+ * and disable IOMMUv2 usage for this device
+ * @pdev: The PCI device to disable IOMMUv2 usage for'
+ */
+extern void amd_iommu_free_device(struct pci_dev *pdev);
+
+/**
+ * amd_iommu_bind_pasid() - Bind a given task to a PASID on a device
+ * @pdev: The PCI device to bind the task to
+ * @pasid: The PASID on the device the task should be bound to
+ * @task: the task to bind
+ *
+ * The function returns 0 on success or a negative value on error.
+ */
+extern int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
+ struct task_struct *task);
+
+/**
+ * amd_iommu_unbind_pasid() - Unbind a PASID from its task on
+ * a device
+ * @pdev: The device of the PASID
+ * @pasid: The PASID to unbind
+ *
+ * When this function returns the device is no longer using the PASID
+ * and the PASID is no longer bound to its task.
+ */
+extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid);
+
+/**
+ * amd_iommu_set_invalid_ppr_cb() - Register a call-back for failed
+ * PRI requests
+ * @pdev: The PCI device the call-back should be registered for
+ * @cb: The call-back function
+ *
+ * The IOMMUv2 driver invokes this call-back when it is unable to
+ * successfully handle a PRI request. The device driver can then decide
+ * which PRI response the device should see. Possible return values for
+ * the call-back are:
+ *
+ * - AMD_IOMMU_INV_PRI_RSP_SUCCESS - Send SUCCESS back to the device
+ * - AMD_IOMMU_INV_PRI_RSP_INVALID - Send INVALID back to the device
+ * - AMD_IOMMU_INV_PRI_RSP_FAIL - Send Failure back to the device,
+ * the device is required to disable
+ * PRI when it receives this response
+ *
+ * The function returns 0 on success or negative value on error.
+ */
+#define AMD_IOMMU_INV_PRI_RSP_SUCCESS 0
+#define AMD_IOMMU_INV_PRI_RSP_INVALID 1
+#define AMD_IOMMU_INV_PRI_RSP_FAIL 2
+
+typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev,
+ int pasid,
+ unsigned long address,
+ u16);
+
+extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
+ amd_iommu_invalid_ppr_cb cb);
+
+#define PPR_FAULT_EXEC (1 << 1)
+#define PPR_FAULT_READ (1 << 2)
+#define PPR_FAULT_WRITE (1 << 5)
+#define PPR_FAULT_USER (1 << 6)
+#define PPR_FAULT_RSVD (1 << 7)
+#define PPR_FAULT_GN (1 << 8)
+
+/**
+ * amd_iommu_device_info() - Get information about IOMMUv2 support of a
+ * PCI device
+ * @pdev: PCI device to query information from
+ * @info: A pointer to an amd_iommu_device_info structure which will contain
+ * the information about the PCI device
+ *
+ * Returns 0 on success, negative value on error
+ */
+
+#define AMD_IOMMU_DEVICE_FLAG_ATS_SUP 0x1 /* ATS feature supported */
+#define AMD_IOMMU_DEVICE_FLAG_PRI_SUP 0x2 /* PRI feature supported */
+#define AMD_IOMMU_DEVICE_FLAG_PASID_SUP 0x4 /* PASID context supported */
+#define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP 0x8 /* Device may request execution
+ on memory pages */
+#define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP 0x10 /* Device may request
+ super-user privileges */
+
+struct amd_iommu_device_info {
+ int max_pasids;
+ u32 flags;
+};
+
+extern int amd_iommu_device_info(struct pci_dev *pdev,
+ struct amd_iommu_device_info *info);
+
+/**
+ * amd_iommu_set_invalidate_ctx_cb() - Register a call-back for invalidating
+ * a pasid context. This call-back is
+ * invoked when the IOMMUv2 driver needs to
+ * invalidate a PASID context, for example
+ * because the task that is bound to that
+ * context is about to exit.
+ *
+ * @pdev: The PCI device the call-back should be registered for
+ * @cb: The call-back function
+ */
+
+typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, int pasid);
+
+extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
+ amd_iommu_invalidate_ctx cb);
+
+#else
+
+static inline int amd_iommu_detect(void) { return -ENODEV; }
+
+#endif
+
+#endif /* _ASM_X86_AMD_IOMMU_H */
diff --git a/include/linux/amifd.h b/include/linux/amifd.h
new file mode 100644
index 000000000..346993268
--- /dev/null
+++ b/include/linux/amifd.h
@@ -0,0 +1,62 @@
+#ifndef _AMIFD_H
+#define _AMIFD_H
+
+/* Definitions for the Amiga floppy driver */
+
+#include <linux/fd.h>
+
+#define FD_MAX_UNITS 4 /* Max. Number of drives */
+#define FLOPPY_MAX_SECTORS 22 /* Max. Number of sectors per track */
+
+#ifndef ASSEMBLER
+
+struct fd_data_type {
+ char *name; /* description of data type */
+ int sects; /* sectors per track */
+#ifdef __STDC__
+ int (*read_fkt)(int);
+ void (*write_fkt)(int);
+#else
+ int (*read_fkt)(); /* read whole track */
+ void (*write_fkt)(); /* write whole track */
+#endif
+};
+
+/*
+** Floppy type descriptions
+*/
+
+struct fd_drive_type {
+ unsigned long code; /* code returned from drive */
+ char *name; /* description of drive */
+ unsigned int tracks; /* number of tracks */
+ unsigned int heads; /* number of heads */
+ unsigned int read_size; /* raw read size for one track */
+ unsigned int write_size; /* raw write size for one track */
+ unsigned int sect_mult; /* sectors and gap multiplier (HD = 2) */
+ unsigned int precomp1; /* start track for precomp 1 */
+ unsigned int precomp2; /* start track for precomp 2 */
+ unsigned int step_delay; /* time (in ms) for delay after step */
+ unsigned int settle_time; /* time to settle after dir change */
+ unsigned int side_time; /* time needed to change sides */
+};
+
+struct amiga_floppy_struct {
+ struct fd_drive_type *type; /* type of floppy for this unit */
+ struct fd_data_type *dtype; /* type of floppy for this unit */
+ int track; /* current track (-1 == unknown) */
+ unsigned char *trackbuf; /* current track (kmaloc()'d */
+
+ int blocks; /* total # blocks on disk */
+
+ int changed; /* true when not known */
+ int disk; /* disk in drive (-1 == unknown) */
+ int motor; /* true when motor is at speed */
+ int busy; /* true when drive is active */
+ int dirty; /* true when trackbuf is not on disk */
+ int status; /* current error code for unit */
+ struct gendisk *gendisk;
+};
+#endif
+
+#endif
diff --git a/include/linux/amifdreg.h b/include/linux/amifdreg.h
new file mode 100644
index 000000000..76188bf48
--- /dev/null
+++ b/include/linux/amifdreg.h
@@ -0,0 +1,81 @@
+#ifndef _LINUX_AMIFDREG_H
+#define _LINUX_AMIFDREG_H
+
+/*
+** CIAAPRA bits (read only)
+*/
+
+#define DSKRDY (0x1<<5) /* disk ready when low */
+#define DSKTRACK0 (0x1<<4) /* head at track zero when low */
+#define DSKPROT (0x1<<3) /* disk protected when low */
+#define DSKCHANGE (0x1<<2) /* low when disk removed */
+
+/*
+** CIAAPRB bits (read/write)
+*/
+
+#define DSKMOTOR (0x1<<7) /* motor on when low */
+#define DSKSEL3 (0x1<<6) /* select drive 3 when low */
+#define DSKSEL2 (0x1<<5) /* select drive 2 when low */
+#define DSKSEL1 (0x1<<4) /* select drive 1 when low */
+#define DSKSEL0 (0x1<<3) /* select drive 0 when low */
+#define DSKSIDE (0x1<<2) /* side selection: 0 = upper, 1 = lower */
+#define DSKDIREC (0x1<<1) /* step direction: 0=in, 1=out (to trk 0) */
+#define DSKSTEP (0x1) /* pulse low to step head 1 track */
+
+/*
+** DSKBYTR bits (read only)
+*/
+
+#define DSKBYT (1<<15) /* register contains valid byte when set */
+#define DMAON (1<<14) /* disk DMA enabled */
+#define DISKWRITE (1<<13) /* disk write bit in DSKLEN enabled */
+#define WORDEQUAL (1<<12) /* DSKSYNC register match when true */
+/* bits 7-0 are data */
+
+/*
+** ADKCON/ADKCONR bits
+*/
+
+#ifndef SETCLR
+#define ADK_SETCLR (1<<15) /* control bit */
+#endif
+#define ADK_PRECOMP1 (1<<14) /* precompensation selection */
+#define ADK_PRECOMP0 (1<<13) /* 00=none, 01=140ns, 10=280ns, 11=500ns */
+#define ADK_MFMPREC (1<<12) /* 0=GCR precomp., 1=MFM precomp. */
+#define ADK_WORDSYNC (1<<10) /* enable DSKSYNC auto DMA */
+#define ADK_MSBSYNC (1<<9) /* when 1, enable sync on MSbit (for GCR) */
+#define ADK_FAST (1<<8) /* bit cell: 0=2us (GCR), 1=1us (MFM) */
+
+/*
+** DSKLEN bits
+*/
+
+#define DSKLEN_DMAEN (1<<15)
+#define DSKLEN_WRITE (1<<14)
+
+/*
+** INTENA/INTREQ bits
+*/
+
+#define DSKINDEX (0x1<<4) /* DSKINDEX bit */
+
+/*
+** Misc
+*/
+
+#define MFM_SYNC 0x4489 /* standard MFM sync value */
+
+/* Values for FD_COMMAND */
+#define FD_RECALIBRATE 0x07 /* move to track 0 */
+#define FD_SEEK 0x0F /* seek track */
+#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */
+#define FD_WRITE 0xC5 /* write with MT, MFM */
+#define FD_SENSEI 0x08 /* Sense Interrupt Status */
+#define FD_SPECIFY 0x03 /* specify HUT etc */
+#define FD_FORMAT 0x4D /* format one track */
+#define FD_VERSION 0x10 /* get version code */
+#define FD_CONFIGURE 0x13 /* configure FIFO operation */
+#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */
+
+#endif /* _LINUX_AMIFDREG_H */
diff --git a/include/linux/amigaffs.h b/include/linux/amigaffs.h
new file mode 100644
index 000000000..43b41c06a
--- /dev/null
+++ b/include/linux/amigaffs.h
@@ -0,0 +1,144 @@
+#ifndef AMIGAFFS_H
+#define AMIGAFFS_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+#define FS_OFS 0x444F5300
+#define FS_FFS 0x444F5301
+#define FS_INTLOFS 0x444F5302
+#define FS_INTLFFS 0x444F5303
+#define FS_DCOFS 0x444F5304
+#define FS_DCFFS 0x444F5305
+#define MUFS_FS 0x6d754653 /* 'muFS' */
+#define MUFS_OFS 0x6d754600 /* 'muF\0' */
+#define MUFS_FFS 0x6d754601 /* 'muF\1' */
+#define MUFS_INTLOFS 0x6d754602 /* 'muF\2' */
+#define MUFS_INTLFFS 0x6d754603 /* 'muF\3' */
+#define MUFS_DCOFS 0x6d754604 /* 'muF\4' */
+#define MUFS_DCFFS 0x6d754605 /* 'muF\5' */
+
+#define T_SHORT 2
+#define T_LIST 16
+#define T_DATA 8
+
+#define ST_LINKFILE -4
+#define ST_FILE -3
+#define ST_ROOT 1
+#define ST_USERDIR 2
+#define ST_SOFTLINK 3
+#define ST_LINKDIR 4
+
+#define AFFS_ROOT_BMAPS 25
+
+struct affs_date {
+ __be32 days;
+ __be32 mins;
+ __be32 ticks;
+};
+
+struct affs_short_date {
+ __be16 days;
+ __be16 mins;
+ __be16 ticks;
+};
+
+struct affs_root_head {
+ __be32 ptype;
+ __be32 spare1;
+ __be32 spare2;
+ __be32 hash_size;
+ __be32 spare3;
+ __be32 checksum;
+ __be32 hashtable[1];
+};
+
+struct affs_root_tail {
+ __be32 bm_flag;
+ __be32 bm_blk[AFFS_ROOT_BMAPS];
+ __be32 bm_ext;
+ struct affs_date root_change;
+ u8 disk_name[32];
+ __be32 spare1;
+ __be32 spare2;
+ struct affs_date disk_change;
+ struct affs_date disk_create;
+ __be32 spare3;
+ __be32 spare4;
+ __be32 dcache;
+ __be32 stype;
+};
+
+struct affs_head {
+ __be32 ptype;
+ __be32 key;
+ __be32 block_count;
+ __be32 spare1;
+ __be32 first_data;
+ __be32 checksum;
+ __be32 table[1];
+};
+
+struct affs_tail {
+ __be32 spare1;
+ __be16 uid;
+ __be16 gid;
+ __be32 protect;
+ __be32 size;
+ u8 comment[92];
+ struct affs_date change;
+ u8 name[32];
+ __be32 spare2;
+ __be32 original;
+ __be32 link_chain;
+ __be32 spare[5];
+ __be32 hash_chain;
+ __be32 parent;
+ __be32 extension;
+ __be32 stype;
+};
+
+struct slink_front
+{
+ __be32 ptype;
+ __be32 key;
+ __be32 spare1[3];
+ __be32 checksum;
+ u8 symname[1]; /* depends on block size */
+};
+
+struct affs_data_head
+{
+ __be32 ptype;
+ __be32 key;
+ __be32 sequence;
+ __be32 size;
+ __be32 next;
+ __be32 checksum;
+ u8 data[1]; /* depends on block size */
+};
+
+/* Permission bits */
+
+#define FIBF_OTR_READ 0x8000
+#define FIBF_OTR_WRITE 0x4000
+#define FIBF_OTR_EXECUTE 0x2000
+#define FIBF_OTR_DELETE 0x1000
+#define FIBF_GRP_READ 0x0800
+#define FIBF_GRP_WRITE 0x0400
+#define FIBF_GRP_EXECUTE 0x0200
+#define FIBF_GRP_DELETE 0x0100
+
+#define FIBF_HIDDEN 0x0080
+#define FIBF_SCRIPT 0x0040
+#define FIBF_PURE 0x0020 /* no use under linux */
+#define FIBF_ARCHIVED 0x0010 /* never set, always cleared on write */
+#define FIBF_NOREAD 0x0008 /* 0 means allowed */
+#define FIBF_NOWRITE 0x0004 /* 0 means allowed */
+#define FIBF_NOEXECUTE 0x0002 /* 0 means allowed, ignored under linux */
+#define FIBF_NODELETE 0x0001 /* 0 means allowed */
+
+#define FIBF_OWNER 0x000F /* Bits pertaining to owner */
+#define FIBF_MASK 0xEE0E /* Bits modified by Linux */
+
+#endif
diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h
new file mode 100644
index 000000000..8013a4524
--- /dev/null
+++ b/include/linux/anon_inodes.h
@@ -0,0 +1,20 @@
+/*
+ * include/linux/anon_inodes.h
+ *
+ * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#ifndef _LINUX_ANON_INODES_H
+#define _LINUX_ANON_INODES_H
+
+struct file_operations;
+
+struct file *anon_inode_getfile(const char *name,
+ const struct file_operations *fops,
+ void *priv, int flags);
+int anon_inode_getfd(const char *name, const struct file_operations *fops,
+ void *priv, int flags);
+
+#endif /* _LINUX_ANON_INODES_H */
+
diff --git a/include/linux/apm-emulation.h b/include/linux/apm-emulation.h
new file mode 100644
index 000000000..e6d800358
--- /dev/null
+++ b/include/linux/apm-emulation.h
@@ -0,0 +1,62 @@
+/* -*- linux-c -*-
+ *
+ * (C) 2003 zecke@handhelds.org
+ *
+ * GPL version 2
+ *
+ * based on arch/arm/kernel/apm.c
+ * factor out the information needed by architectures to provide
+ * apm status
+ */
+#ifndef __LINUX_APM_EMULATION_H
+#define __LINUX_APM_EMULATION_H
+
+#include <linux/apm_bios.h>
+
+/*
+ * This structure gets filled in by the machine specific 'get_power_status'
+ * implementation. Any fields which are not set default to a safe value.
+ */
+struct apm_power_info {
+ unsigned char ac_line_status;
+#define APM_AC_OFFLINE 0
+#define APM_AC_ONLINE 1
+#define APM_AC_BACKUP 2
+#define APM_AC_UNKNOWN 0xff
+
+ unsigned char battery_status;
+#define APM_BATTERY_STATUS_HIGH 0
+#define APM_BATTERY_STATUS_LOW 1
+#define APM_BATTERY_STATUS_CRITICAL 2
+#define APM_BATTERY_STATUS_CHARGING 3
+#define APM_BATTERY_STATUS_NOT_PRESENT 4
+#define APM_BATTERY_STATUS_UNKNOWN 0xff
+
+ unsigned char battery_flag;
+#define APM_BATTERY_FLAG_HIGH (1 << 0)
+#define APM_BATTERY_FLAG_LOW (1 << 1)
+#define APM_BATTERY_FLAG_CRITICAL (1 << 2)
+#define APM_BATTERY_FLAG_CHARGING (1 << 3)
+#define APM_BATTERY_FLAG_NOT_PRESENT (1 << 7)
+#define APM_BATTERY_FLAG_UNKNOWN 0xff
+
+ int battery_life;
+ int time;
+ int units;
+#define APM_UNITS_MINS 0
+#define APM_UNITS_SECS 1
+#define APM_UNITS_UNKNOWN -1
+
+};
+
+/*
+ * This allows machines to provide their own "apm get power status" function.
+ */
+extern void (*apm_get_power_status)(struct apm_power_info *);
+
+/*
+ * Queue an event (APM_SYS_SUSPEND or APM_CRITICAL_SUSPEND)
+ */
+void apm_queue_event(apm_event_t event);
+
+#endif /* __LINUX_APM_EMULATION_H */
diff --git a/include/linux/apm_bios.h b/include/linux/apm_bios.h
new file mode 100644
index 000000000..9c3a87184
--- /dev/null
+++ b/include/linux/apm_bios.h
@@ -0,0 +1,101 @@
+/*
+ * Include file for the interface to an APM BIOS
+ * Copyright 1994-2001 Stephen Rothwell (sfr@canb.auug.org.au)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef _LINUX_APM_H
+#define _LINUX_APM_H
+
+#include <uapi/linux/apm_bios.h>
+
+
+#define APM_CS (GDT_ENTRY_APMBIOS_BASE * 8)
+#define APM_CS_16 (APM_CS + 8)
+#define APM_DS (APM_CS_16 + 8)
+
+/* Results of APM Installation Check */
+#define APM_16_BIT_SUPPORT 0x0001
+#define APM_32_BIT_SUPPORT 0x0002
+#define APM_IDLE_SLOWS_CLOCK 0x0004
+#define APM_BIOS_DISABLED 0x0008
+#define APM_BIOS_DISENGAGED 0x0010
+
+/*
+ * Data for APM that is persistent across module unload/load
+ */
+struct apm_info {
+ struct apm_bios_info bios;
+ unsigned short connection_version;
+ int get_power_status_broken;
+ int get_power_status_swabinminutes;
+ int allow_ints;
+ int forbid_idle;
+ int realmode_power_off;
+ int disabled;
+};
+
+/*
+ * The APM function codes
+ */
+#define APM_FUNC_INST_CHECK 0x5300
+#define APM_FUNC_REAL_CONN 0x5301
+#define APM_FUNC_16BIT_CONN 0x5302
+#define APM_FUNC_32BIT_CONN 0x5303
+#define APM_FUNC_DISCONN 0x5304
+#define APM_FUNC_IDLE 0x5305
+#define APM_FUNC_BUSY 0x5306
+#define APM_FUNC_SET_STATE 0x5307
+#define APM_FUNC_ENABLE_PM 0x5308
+#define APM_FUNC_RESTORE_BIOS 0x5309
+#define APM_FUNC_GET_STATUS 0x530a
+#define APM_FUNC_GET_EVENT 0x530b
+#define APM_FUNC_GET_STATE 0x530c
+#define APM_FUNC_ENABLE_DEV_PM 0x530d
+#define APM_FUNC_VERSION 0x530e
+#define APM_FUNC_ENGAGE_PM 0x530f
+#define APM_FUNC_GET_CAP 0x5310
+#define APM_FUNC_RESUME_TIMER 0x5311
+#define APM_FUNC_RESUME_ON_RING 0x5312
+#define APM_FUNC_TIMER 0x5313
+
+/*
+ * Function code for APM_FUNC_RESUME_TIMER
+ */
+#define APM_FUNC_DISABLE_TIMER 0
+#define APM_FUNC_GET_TIMER 1
+#define APM_FUNC_SET_TIMER 2
+
+/*
+ * Function code for APM_FUNC_RESUME_ON_RING
+ */
+#define APM_FUNC_DISABLE_RING 0
+#define APM_FUNC_ENABLE_RING 1
+#define APM_FUNC_GET_RING 2
+
+/*
+ * Function code for APM_FUNC_TIMER_STATUS
+ */
+#define APM_FUNC_TIMER_DISABLE 0
+#define APM_FUNC_TIMER_ENABLE 1
+#define APM_FUNC_TIMER_GET 2
+
+/*
+ * in arch/i386/kernel/setup.c
+ */
+extern struct apm_info apm_info;
+
+/*
+ * This is the "All Devices" ID communicated to the BIOS
+ */
+#define APM_DEVICE_BALL ((apm_info.connection_version > 0x0100) ? \
+ APM_DEVICE_ALL : APM_DEVICE_OLD_ALL)
+#endif /* LINUX_APM_H */
diff --git a/include/linux/apple_bl.h b/include/linux/apple_bl.h
new file mode 100644
index 000000000..0a95e730f
--- /dev/null
+++ b/include/linux/apple_bl.h
@@ -0,0 +1,26 @@
+/*
+ * apple_bl exported symbols
+ */
+
+#ifndef _LINUX_APPLE_BL_H
+#define _LINUX_APPLE_BL_H
+
+#if defined(CONFIG_BACKLIGHT_APPLE) || defined(CONFIG_BACKLIGHT_APPLE_MODULE)
+
+extern int apple_bl_register(void);
+extern void apple_bl_unregister(void);
+
+#else /* !CONFIG_BACKLIGHT_APPLE */
+
+static inline int apple_bl_register(void)
+{
+ return 0;
+}
+
+static inline void apple_bl_unregister(void)
+{
+}
+
+#endif /* !CONFIG_BACKLIGHT_APPLE */
+
+#endif /* _LINUX_APPLE_BL_H */
diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h
new file mode 100644
index 000000000..df0356220
--- /dev/null
+++ b/include/linux/arcdevice.h
@@ -0,0 +1,342 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. NET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions used by the ARCnet driver.
+ *
+ * Authors: Avery Pennarun and David Woodhouse
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#ifndef _LINUX_ARCDEVICE_H
+#define _LINUX_ARCDEVICE_H
+
+#include <asm/timex.h>
+#include <linux/if_arcnet.h>
+
+#ifdef __KERNEL__
+#include <linux/irqreturn.h>
+
+/*
+ * RECON_THRESHOLD is the maximum number of RECON messages to receive
+ * within one minute before printing a "cabling problem" warning. The
+ * default value should be fine.
+ *
+ * After that, a "cabling restored" message will be printed on the next IRQ
+ * if no RECON messages have been received for 10 seconds.
+ *
+ * Do not define RECON_THRESHOLD at all if you want to disable this feature.
+ */
+#define RECON_THRESHOLD 30
+
+
+/*
+ * Define this to the minimum "timeout" value. If a transmit takes longer
+ * than TX_TIMEOUT jiffies, Linux will abort the TX and retry. On a large
+ * network, or one with heavy network traffic, this timeout may need to be
+ * increased. The larger it is, though, the longer it will be between
+ * necessary transmits - don't set this too high.
+ */
+#define TX_TIMEOUT (HZ * 200 / 1000)
+
+
+/* Display warnings about the driver being an ALPHA version. */
+#undef ALPHA_WARNING
+
+
+/*
+ * Debugging bitflags: each option can be enabled individually.
+ *
+ * Note: only debug flags included in the ARCNET_DEBUG_MAX define will
+ * actually be available. GCC will (at least, GCC 2.7.0 will) notice
+ * lines using a BUGLVL not in ARCNET_DEBUG_MAX and automatically optimize
+ * them out.
+ */
+#define D_NORMAL 1 /* important operational info */
+#define D_EXTRA 2 /* useful, but non-vital information */
+#define D_INIT 4 /* show init/probe messages */
+#define D_INIT_REASONS 8 /* show reasons for discarding probes */
+#define D_RECON 32 /* print a message whenever token is lost */
+#define D_PROTO 64 /* debug auto-protocol support */
+/* debug levels below give LOTS of output during normal operation! */
+#define D_DURING 128 /* trace operations (including irq's) */
+#define D_TX 256 /* show tx packets */
+#define D_RX 512 /* show rx packets */
+#define D_SKB 1024 /* show skb's */
+#define D_SKB_SIZE 2048 /* show skb sizes */
+#define D_TIMING 4096 /* show time needed to copy buffers to card */
+#define D_DEBUG 8192 /* Very detailed debug line for line */
+
+#ifndef ARCNET_DEBUG_MAX
+#define ARCNET_DEBUG_MAX (127) /* change to ~0 if you want detailed debugging */
+#endif
+
+#ifndef ARCNET_DEBUG
+#define ARCNET_DEBUG (D_NORMAL|D_EXTRA)
+#endif
+extern int arcnet_debug;
+
+/* macros to simplify debug checking */
+#define BUGLVL(x) if ((ARCNET_DEBUG_MAX)&arcnet_debug&(x))
+#define BUGMSG2(x,msg,args...) do { BUGLVL(x) printk(msg, ## args); } while (0)
+#define BUGMSG(x,msg,args...) \
+ BUGMSG2(x, "%s%6s: " msg, \
+ x==D_NORMAL ? KERN_WARNING \
+ : x < D_DURING ? KERN_INFO : KERN_DEBUG, \
+ dev->name , ## args)
+
+/* see how long a function call takes to run, expressed in CPU cycles */
+#define TIME(name, bytes, call) BUGLVL(D_TIMING) { \
+ unsigned long _x, _y; \
+ _x = get_cycles(); \
+ call; \
+ _y = get_cycles(); \
+ BUGMSG(D_TIMING, \
+ "%s: %d bytes in %lu cycles == " \
+ "%lu Kbytes/100Mcycle\n",\
+ name, bytes, _y - _x, \
+ 100000000 / 1024 * bytes / (_y - _x + 1));\
+ } \
+ else { \
+ call;\
+ }
+
+
+/*
+ * Time needed to reset the card - in ms (milliseconds). This works on my
+ * SMC PC100. I can't find a reference that tells me just how long I
+ * should wait.
+ */
+#define RESETtime (300)
+
+/*
+ * These are the max/min lengths of packet payload, not including the
+ * arc_hardware header, but definitely including the soft header.
+ *
+ * Note: packet sizes 254, 255, 256 are impossible because of the way
+ * ARCnet registers work That's why RFC1201 defines "exception" packets.
+ * In non-RFC1201 protocols, we have to just tack some extra bytes on the
+ * end.
+ */
+#define MTU 253 /* normal packet max size */
+#define MinTU 257 /* extended packet min size */
+#define XMTU 508 /* extended packet max size */
+
+/* status/interrupt mask bit fields */
+#define TXFREEflag 0x01 /* transmitter available */
+#define TXACKflag 0x02 /* transmitted msg. ackd */
+#define RECONflag 0x04 /* network reconfigured */
+#define TESTflag 0x08 /* test flag */
+#define EXCNAKflag 0x08 /* excesive nak flag */
+#define RESETflag 0x10 /* power-on-reset */
+#define RES1flag 0x20 /* reserved - usually set by jumper */
+#define RES2flag 0x40 /* reserved - usually set by jumper */
+#define NORXflag 0x80 /* receiver inhibited */
+
+/* Flags used for IO-mapped memory operations */
+#define AUTOINCflag 0x40 /* Increase location with each access */
+#define IOMAPflag 0x02 /* (for 90xx) Use IO mapped memory, not mmap */
+#define ENABLE16flag 0x80 /* (for 90xx) Enable 16-bit mode */
+
+/* in the command register, the following bits have these meanings:
+ * 0-2 command
+ * 3-4 page number (for enable rcv/xmt command)
+ * 7 receive broadcasts
+ */
+#define NOTXcmd 0x01 /* disable transmitter */
+#define NORXcmd 0x02 /* disable receiver */
+#define TXcmd 0x03 /* enable transmitter */
+#define RXcmd 0x04 /* enable receiver */
+#define CONFIGcmd 0x05 /* define configuration */
+#define CFLAGScmd 0x06 /* clear flags */
+#define TESTcmd 0x07 /* load test flags */
+
+/* flags for "clear flags" command */
+#define RESETclear 0x08 /* power-on-reset */
+#define CONFIGclear 0x10 /* system reconfigured */
+
+#define EXCNAKclear 0x0E /* Clear and acknowledge the excive nak bit */
+
+/* flags for "load test flags" command */
+#define TESTload 0x08 /* test flag (diagnostic) */
+
+/* byte deposited into first address of buffers on reset */
+#define TESTvalue 0321 /* that's octal for 0xD1 :) */
+
+/* for "enable receiver" command */
+#define RXbcasts 0x80 /* receive broadcasts */
+
+/* flags for "define configuration" command */
+#define NORMALconf 0x00 /* 1-249 byte packets */
+#define EXTconf 0x08 /* 250-504 byte packets */
+
+/* card feature flags, set during auto-detection.
+ * (currently only used by com20020pci)
+ */
+#define ARC_IS_5MBIT 1 /* card default speed is 5MBit */
+#define ARC_CAN_10MBIT 2 /* card uses COM20022, supporting 10MBit,
+ but default is 2.5MBit. */
+
+
+/* information needed to define an encapsulation driver */
+struct ArcProto {
+ char suffix; /* a for RFC1201, e for ether-encap, etc. */
+ int mtu; /* largest possible packet */
+ int is_ip; /* This is a ip plugin - not a raw thing */
+
+ void (*rx) (struct net_device * dev, int bufnum,
+ struct archdr * pkthdr, int length);
+ int (*build_header) (struct sk_buff * skb, struct net_device *dev,
+ unsigned short ethproto, uint8_t daddr);
+
+ /* these functions return '1' if the skb can now be freed */
+ int (*prepare_tx) (struct net_device * dev, struct archdr * pkt, int length,
+ int bufnum);
+ int (*continue_tx) (struct net_device * dev, int bufnum);
+ int (*ack_tx) (struct net_device * dev, int acked);
+};
+
+extern struct ArcProto *arc_proto_map[256], *arc_proto_default,
+ *arc_bcast_proto, *arc_raw_proto;
+
+
+/*
+ * "Incoming" is information needed for each address that could be sending
+ * to us. Mostly for partially-received split packets.
+ */
+struct Incoming {
+ struct sk_buff *skb; /* packet data buffer */
+ __be16 sequence; /* sequence number of assembly */
+ uint8_t lastpacket, /* number of last packet (from 1) */
+ numpackets; /* number of packets in split */
+};
+
+
+/* only needed for RFC1201 */
+struct Outgoing {
+ struct ArcProto *proto; /* protocol driver that owns this:
+ * if NULL, no packet is pending.
+ */
+ struct sk_buff *skb; /* buffer from upper levels */
+ struct archdr *pkt; /* a pointer into the skb */
+ uint16_t length, /* bytes total */
+ dataleft, /* bytes left */
+ segnum, /* segment being sent */
+ numsegs; /* number of segments */
+};
+
+
+struct arcnet_local {
+ uint8_t config, /* current value of CONFIG register */
+ timeout, /* Extended timeout for COM20020 */
+ backplane, /* Backplane flag for COM20020 */
+ clockp, /* COM20020 clock divider */
+ clockm, /* COM20020 clock multiplier flag */
+ setup, /* Contents of setup1 register */
+ setup2, /* Contents of setup2 register */
+ intmask; /* current value of INTMASK register */
+ uint8_t default_proto[256]; /* default encap to use for each host */
+ int cur_tx, /* buffer used by current transmit, or -1 */
+ next_tx, /* buffer where a packet is ready to send */
+ cur_rx; /* current receive buffer */
+ int lastload_dest, /* can last loaded packet be acked? */
+ lasttrans_dest; /* can last TX'd packet be acked? */
+ int timed_out; /* need to process TX timeout and drop packet */
+ unsigned long last_timeout; /* time of last reported timeout */
+ char *card_name; /* card ident string */
+ int card_flags; /* special card features */
+
+
+ /* On preemtive and SMB a lock is needed */
+ spinlock_t lock;
+
+ /*
+ * Buffer management: an ARCnet card has 4 x 512-byte buffers, each of
+ * which can be used for either sending or receiving. The new dynamic
+ * buffer management routines use a simple circular queue of available
+ * buffers, and take them as they're needed. This way, we simplify
+ * situations in which we (for example) want to pre-load a transmit
+ * buffer, or start receiving while we copy a received packet to
+ * memory.
+ *
+ * The rules: only the interrupt handler is allowed to _add_ buffers to
+ * the queue; thus, this doesn't require a lock. Both the interrupt
+ * handler and the transmit function will want to _remove_ buffers, so
+ * we need to handle the situation where they try to do it at the same
+ * time.
+ *
+ * If next_buf == first_free_buf, the queue is empty. Since there are
+ * only four possible buffers, the queue should never be full.
+ */
+ atomic_t buf_lock;
+ int buf_queue[5];
+ int next_buf, first_free_buf;
+
+ /* network "reconfiguration" handling */
+ unsigned long first_recon; /* time of "first" RECON message to count */
+ unsigned long last_recon; /* time of most recent RECON */
+ int num_recons; /* number of RECONs between first and last. */
+ int network_down; /* do we think the network is down? */
+
+ int excnak_pending; /* We just got an excesive nak interrupt */
+
+ struct {
+ uint16_t sequence; /* sequence number (incs with each packet) */
+ __be16 aborted_seq;
+
+ struct Incoming incoming[256]; /* one from each address */
+ } rfc1201;
+
+ /* really only used by rfc1201, but we'll pretend it's not */
+ struct Outgoing outgoing; /* packet currently being sent */
+
+ /* hardware-specific functions */
+ struct {
+ struct module *owner;
+ void (*command) (struct net_device * dev, int cmd);
+ int (*status) (struct net_device * dev);
+ void (*intmask) (struct net_device * dev, int mask);
+ int (*reset) (struct net_device * dev, int really_reset);
+ void (*open) (struct net_device * dev);
+ void (*close) (struct net_device * dev);
+
+ void (*copy_to_card) (struct net_device * dev, int bufnum, int offset,
+ void *buf, int count);
+ void (*copy_from_card) (struct net_device * dev, int bufnum, int offset,
+ void *buf, int count);
+ } hw;
+
+ void __iomem *mem_start; /* pointer to ioremap'ed MMIO */
+};
+
+
+#define ARCRESET(x) (lp->hw.reset(dev, (x)))
+#define ACOMMAND(x) (lp->hw.command(dev, (x)))
+#define ASTATUS() (lp->hw.status(dev))
+#define AINTMASK(x) (lp->hw.intmask(dev, (x)))
+
+
+
+#if ARCNET_DEBUG_MAX & D_SKB
+void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc);
+#else
+#define arcnet_dump_skb(dev,skb,desc) ;
+#endif
+
+void arcnet_unregister_proto(struct ArcProto *proto);
+irqreturn_t arcnet_interrupt(int irq, void *dev_id);
+struct net_device *alloc_arcdev(const char *name);
+
+int arcnet_open(struct net_device *dev);
+int arcnet_close(struct net_device *dev);
+netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
+ struct net_device *dev);
+void arcnet_timeout(struct net_device *dev);
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_ARCDEVICE_H */
diff --git a/include/linux/arm-cci.h b/include/linux/arm-cci.h
new file mode 100644
index 000000000..521ec1f2e
--- /dev/null
+++ b/include/linux/arm-cci.h
@@ -0,0 +1,68 @@
+/*
+ * CCI cache coherent interconnect support
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_ARM_CCI_H
+#define __LINUX_ARM_CCI_H
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+#include <asm/arm-cci.h>
+
+struct device_node;
+
+#ifdef CONFIG_ARM_CCI
+extern bool cci_probed(void);
+#else
+static inline bool cci_probed(void) { return false; }
+#endif
+
+#ifdef CONFIG_ARM_CCI400_PORT_CTRL
+extern int cci_ace_get_port(struct device_node *dn);
+extern int cci_disable_port_by_cpu(u64 mpidr);
+extern int __cci_control_port_by_device(struct device_node *dn, bool enable);
+extern int __cci_control_port_by_index(u32 port, bool enable);
+#else
+static inline int cci_ace_get_port(struct device_node *dn)
+{
+ return -ENODEV;
+}
+static inline int cci_disable_port_by_cpu(u64 mpidr) { return -ENODEV; }
+static inline int __cci_control_port_by_device(struct device_node *dn,
+ bool enable)
+{
+ return -ENODEV;
+}
+static inline int __cci_control_port_by_index(u32 port, bool enable)
+{
+ return -ENODEV;
+}
+#endif
+
+#define cci_disable_port_by_device(dev) \
+ __cci_control_port_by_device(dev, false)
+#define cci_enable_port_by_device(dev) \
+ __cci_control_port_by_device(dev, true)
+#define cci_disable_port_by_index(dev) \
+ __cci_control_port_by_index(dev, false)
+#define cci_enable_port_by_index(dev) \
+ __cci_control_port_by_index(dev, true)
+
+#endif
diff --git a/include/linux/asn1.h b/include/linux/asn1.h
new file mode 100644
index 000000000..eed698286
--- /dev/null
+++ b/include/linux/asn1.h
@@ -0,0 +1,69 @@
+/* ASN.1 BER/DER/CER encoding definitions
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_ASN1_H
+#define _LINUX_ASN1_H
+
+/* Class */
+enum asn1_class {
+ ASN1_UNIV = 0, /* Universal */
+ ASN1_APPL = 1, /* Application */
+ ASN1_CONT = 2, /* Context */
+ ASN1_PRIV = 3 /* Private */
+};
+#define ASN1_CLASS_BITS 0xc0
+
+
+enum asn1_method {
+ ASN1_PRIM = 0, /* Primitive */
+ ASN1_CONS = 1 /* Constructed */
+};
+#define ASN1_CONS_BIT 0x20
+
+/* Tag */
+enum asn1_tag {
+ ASN1_EOC = 0, /* End Of Contents or N/A */
+ ASN1_BOOL = 1, /* Boolean */
+ ASN1_INT = 2, /* Integer */
+ ASN1_BTS = 3, /* Bit String */
+ ASN1_OTS = 4, /* Octet String */
+ ASN1_NULL = 5, /* Null */
+ ASN1_OID = 6, /* Object Identifier */
+ ASN1_ODE = 7, /* Object Description */
+ ASN1_EXT = 8, /* External */
+ ASN1_REAL = 9, /* Real float */
+ ASN1_ENUM = 10, /* Enumerated */
+ ASN1_EPDV = 11, /* Embedded PDV */
+ ASN1_UTF8STR = 12, /* UTF8 String */
+ ASN1_RELOID = 13, /* Relative OID */
+ /* 14 - Reserved */
+ /* 15 - Reserved */
+ ASN1_SEQ = 16, /* Sequence and Sequence of */
+ ASN1_SET = 17, /* Set and Set of */
+ ASN1_NUMSTR = 18, /* Numerical String */
+ ASN1_PRNSTR = 19, /* Printable String */
+ ASN1_TEXSTR = 20, /* T61 String / Teletext String */
+ ASN1_VIDSTR = 21, /* Videotex String */
+ ASN1_IA5STR = 22, /* IA5 String */
+ ASN1_UNITIM = 23, /* Universal Time */
+ ASN1_GENTIM = 24, /* General Time */
+ ASN1_GRASTR = 25, /* Graphic String */
+ ASN1_VISSTR = 26, /* Visible String */
+ ASN1_GENSTR = 27, /* General String */
+ ASN1_UNISTR = 28, /* Universal String */
+ ASN1_CHRSTR = 29, /* Character String */
+ ASN1_BMPSTR = 30, /* BMP String */
+ ASN1_LONG_TAG = 31 /* Long form tag */
+};
+
+#define ASN1_INDEFINITE_LENGTH 0x80
+
+#endif /* _LINUX_ASN1_H */
diff --git a/include/linux/asn1_ber_bytecode.h b/include/linux/asn1_ber_bytecode.h
new file mode 100644
index 000000000..945d44ae5
--- /dev/null
+++ b/include/linux/asn1_ber_bytecode.h
@@ -0,0 +1,87 @@
+/* ASN.1 BER/DER/CER parsing state machine internal definitions
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_ASN1_BER_BYTECODE_H
+#define _LINUX_ASN1_BER_BYTECODE_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#endif
+#include <linux/asn1.h>
+
+typedef int (*asn1_action_t)(void *context,
+ size_t hdrlen, /* In case of ANY type */
+ unsigned char tag, /* In case of ANY type */
+ const void *value, size_t vlen);
+
+struct asn1_decoder {
+ const unsigned char *machine;
+ size_t machlen;
+ const asn1_action_t *actions;
+};
+
+enum asn1_opcode {
+ /* The tag-matching ops come first and the odd-numbered slots
+ * are for OR_SKIP ops.
+ */
+#define ASN1_OP_MATCH__SKIP 0x01
+#define ASN1_OP_MATCH__ACT 0x02
+#define ASN1_OP_MATCH__JUMP 0x04
+#define ASN1_OP_MATCH__ANY 0x08
+#define ASN1_OP_MATCH__COND 0x10
+
+ ASN1_OP_MATCH = 0x00,
+ ASN1_OP_MATCH_OR_SKIP = 0x01,
+ ASN1_OP_MATCH_ACT = 0x02,
+ ASN1_OP_MATCH_ACT_OR_SKIP = 0x03,
+ ASN1_OP_MATCH_JUMP = 0x04,
+ ASN1_OP_MATCH_JUMP_OR_SKIP = 0x05,
+ ASN1_OP_MATCH_ANY = 0x08,
+ ASN1_OP_MATCH_ANY_ACT = 0x0a,
+ /* Everything before here matches unconditionally */
+
+ ASN1_OP_COND_MATCH_OR_SKIP = 0x11,
+ ASN1_OP_COND_MATCH_ACT_OR_SKIP = 0x13,
+ ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 0x15,
+ ASN1_OP_COND_MATCH_ANY = 0x18,
+ ASN1_OP_COND_MATCH_ANY_ACT = 0x1a,
+
+ /* Everything before here will want a tag from the data */
+#define ASN1_OP__MATCHES_TAG ASN1_OP_COND_MATCH_ANY_ACT
+
+ /* These are here to help fill up space */
+ ASN1_OP_COND_FAIL = 0x1b,
+ ASN1_OP_COMPLETE = 0x1c,
+ ASN1_OP_ACT = 0x1d,
+ ASN1_OP_RETURN = 0x1e,
+
+ /* The following eight have bit 0 -> SET, 1 -> OF, 2 -> ACT */
+ ASN1_OP_END_SEQ = 0x20,
+ ASN1_OP_END_SET = 0x21,
+ ASN1_OP_END_SEQ_OF = 0x22,
+ ASN1_OP_END_SET_OF = 0x23,
+ ASN1_OP_END_SEQ_ACT = 0x24,
+ ASN1_OP_END_SET_ACT = 0x25,
+ ASN1_OP_END_SEQ_OF_ACT = 0x26,
+ ASN1_OP_END_SET_OF_ACT = 0x27,
+#define ASN1_OP_END__SET 0x01
+#define ASN1_OP_END__OF 0x02
+#define ASN1_OP_END__ACT 0x04
+
+ ASN1_OP__NR
+};
+
+#define _tag(CLASS, CP, TAG) ((ASN1_##CLASS << 6) | (ASN1_##CP << 5) | ASN1_##TAG)
+#define _tagn(CLASS, CP, TAG) ((ASN1_##CLASS << 6) | (ASN1_##CP << 5) | TAG)
+#define _jump_target(N) (N)
+#define _action(N) (N)
+
+#endif /* _LINUX_ASN1_BER_BYTECODE_H */
diff --git a/include/linux/asn1_decoder.h b/include/linux/asn1_decoder.h
new file mode 100644
index 000000000..fa2ff5bc0
--- /dev/null
+++ b/include/linux/asn1_decoder.h
@@ -0,0 +1,24 @@
+/* ASN.1 decoder
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_ASN1_DECODER_H
+#define _LINUX_ASN1_DECODER_H
+
+#include <linux/asn1.h>
+
+struct asn1_decoder;
+
+extern int asn1_ber_decoder(const struct asn1_decoder *decoder,
+ void *context,
+ const unsigned char *data,
+ size_t datalen);
+
+#endif /* _LINUX_ASN1_DECODER_H */
diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h
new file mode 100644
index 000000000..a89df3be1
--- /dev/null
+++ b/include/linux/assoc_array.h
@@ -0,0 +1,92 @@
+/* Generic associative array implementation.
+ *
+ * See Documentation/assoc_array.txt for information.
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_ASSOC_ARRAY_H
+#define _LINUX_ASSOC_ARRAY_H
+
+#ifdef CONFIG_ASSOCIATIVE_ARRAY
+
+#include <linux/types.h>
+
+#define ASSOC_ARRAY_KEY_CHUNK_SIZE BITS_PER_LONG /* Key data retrieved in chunks of this size */
+
+/*
+ * Generic associative array.
+ */
+struct assoc_array {
+ struct assoc_array_ptr *root; /* The node at the root of the tree */
+ unsigned long nr_leaves_on_tree;
+};
+
+/*
+ * Operations on objects and index keys for use by array manipulation routines.
+ */
+struct assoc_array_ops {
+ /* Method to get a chunk of an index key from caller-supplied data */
+ unsigned long (*get_key_chunk)(const void *index_key, int level);
+
+ /* Method to get a piece of an object's index key */
+ unsigned long (*get_object_key_chunk)(const void *object, int level);
+
+ /* Is this the object we're looking for? */
+ bool (*compare_object)(const void *object, const void *index_key);
+
+ /* How different is an object from an index key, to a bit position in
+ * their keys? (or -1 if they're the same)
+ */
+ int (*diff_objects)(const void *object, const void *index_key);
+
+ /* Method to free an object. */
+ void (*free_object)(void *object);
+};
+
+/*
+ * Access and manipulation functions.
+ */
+struct assoc_array_edit;
+
+static inline void assoc_array_init(struct assoc_array *array)
+{
+ array->root = NULL;
+ array->nr_leaves_on_tree = 0;
+}
+
+extern int assoc_array_iterate(const struct assoc_array *array,
+ int (*iterator)(const void *object,
+ void *iterator_data),
+ void *iterator_data);
+extern void *assoc_array_find(const struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key);
+extern void assoc_array_destroy(struct assoc_array *array,
+ const struct assoc_array_ops *ops);
+extern struct assoc_array_edit *assoc_array_insert(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key,
+ void *object);
+extern void assoc_array_insert_set_object(struct assoc_array_edit *edit,
+ void *object);
+extern struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ const void *index_key);
+extern struct assoc_array_edit *assoc_array_clear(struct assoc_array *array,
+ const struct assoc_array_ops *ops);
+extern void assoc_array_apply_edit(struct assoc_array_edit *edit);
+extern void assoc_array_cancel_edit(struct assoc_array_edit *edit);
+extern int assoc_array_gc(struct assoc_array *array,
+ const struct assoc_array_ops *ops,
+ bool (*iterator)(void *object, void *iterator_data),
+ void *iterator_data);
+
+#endif /* CONFIG_ASSOCIATIVE_ARRAY */
+#endif /* _LINUX_ASSOC_ARRAY_H */
diff --git a/include/linux/assoc_array_priv.h b/include/linux/assoc_array_priv.h
new file mode 100644
index 000000000..711275e66
--- /dev/null
+++ b/include/linux/assoc_array_priv.h
@@ -0,0 +1,182 @@
+/* Private definitions for the generic associative array implementation.
+ *
+ * See Documentation/assoc_array.txt for information.
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_ASSOC_ARRAY_PRIV_H
+#define _LINUX_ASSOC_ARRAY_PRIV_H
+
+#ifdef CONFIG_ASSOCIATIVE_ARRAY
+
+#include <linux/assoc_array.h>
+
+#define ASSOC_ARRAY_FAN_OUT 16 /* Number of slots per node */
+#define ASSOC_ARRAY_FAN_MASK (ASSOC_ARRAY_FAN_OUT - 1)
+#define ASSOC_ARRAY_LEVEL_STEP (ilog2(ASSOC_ARRAY_FAN_OUT))
+#define ASSOC_ARRAY_LEVEL_STEP_MASK (ASSOC_ARRAY_LEVEL_STEP - 1)
+#define ASSOC_ARRAY_KEY_CHUNK_MASK (ASSOC_ARRAY_KEY_CHUNK_SIZE - 1)
+#define ASSOC_ARRAY_KEY_CHUNK_SHIFT (ilog2(BITS_PER_LONG))
+
+/*
+ * Undefined type representing a pointer with type information in the bottom
+ * two bits.
+ */
+struct assoc_array_ptr;
+
+/*
+ * An N-way node in the tree.
+ *
+ * Each slot contains one of four things:
+ *
+ * (1) Nothing (NULL).
+ *
+ * (2) A leaf object (pointer types 0).
+ *
+ * (3) A next-level node (pointer type 1, subtype 0).
+ *
+ * (4) A shortcut (pointer type 1, subtype 1).
+ *
+ * The tree is optimised for search-by-ID, but permits reasonable iteration
+ * also.
+ *
+ * The tree is navigated by constructing an index key consisting of an array of
+ * segments, where each segment is ilog2(ASSOC_ARRAY_FAN_OUT) bits in size.
+ *
+ * The segments correspond to levels of the tree (the first segment is used at
+ * level 0, the second at level 1, etc.).
+ */
+struct assoc_array_node {
+ struct assoc_array_ptr *back_pointer;
+ u8 parent_slot;
+ struct assoc_array_ptr *slots[ASSOC_ARRAY_FAN_OUT];
+ unsigned long nr_leaves_on_branch;
+};
+
+/*
+ * A shortcut through the index space out to where a collection of nodes/leaves
+ * with the same IDs live.
+ */
+struct assoc_array_shortcut {
+ struct assoc_array_ptr *back_pointer;
+ int parent_slot;
+ int skip_to_level;
+ struct assoc_array_ptr *next_node;
+ unsigned long index_key[];
+};
+
+/*
+ * Preallocation cache.
+ */
+struct assoc_array_edit {
+ struct rcu_head rcu;
+ struct assoc_array *array;
+ const struct assoc_array_ops *ops;
+ const struct assoc_array_ops *ops_for_excised_subtree;
+ struct assoc_array_ptr *leaf;
+ struct assoc_array_ptr **leaf_p;
+ struct assoc_array_ptr *dead_leaf;
+ struct assoc_array_ptr *new_meta[3];
+ struct assoc_array_ptr *excised_meta[1];
+ struct assoc_array_ptr *excised_subtree;
+ struct assoc_array_ptr **set_backpointers[ASSOC_ARRAY_FAN_OUT];
+ struct assoc_array_ptr *set_backpointers_to;
+ struct assoc_array_node *adjust_count_on;
+ long adjust_count_by;
+ struct {
+ struct assoc_array_ptr **ptr;
+ struct assoc_array_ptr *to;
+ } set[2];
+ struct {
+ u8 *p;
+ u8 to;
+ } set_parent_slot[1];
+ u8 segment_cache[ASSOC_ARRAY_FAN_OUT + 1];
+};
+
+/*
+ * Internal tree member pointers are marked in the bottom one or two bits to
+ * indicate what type they are so that we don't have to look behind every
+ * pointer to see what it points to.
+ *
+ * We provide functions to test type annotations and to create and translate
+ * the annotated pointers.
+ */
+#define ASSOC_ARRAY_PTR_TYPE_MASK 0x1UL
+#define ASSOC_ARRAY_PTR_LEAF_TYPE 0x0UL /* Points to leaf (or nowhere) */
+#define ASSOC_ARRAY_PTR_META_TYPE 0x1UL /* Points to node or shortcut */
+#define ASSOC_ARRAY_PTR_SUBTYPE_MASK 0x2UL
+#define ASSOC_ARRAY_PTR_NODE_SUBTYPE 0x0UL
+#define ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE 0x2UL
+
+static inline bool assoc_array_ptr_is_meta(const struct assoc_array_ptr *x)
+{
+ return (unsigned long)x & ASSOC_ARRAY_PTR_TYPE_MASK;
+}
+static inline bool assoc_array_ptr_is_leaf(const struct assoc_array_ptr *x)
+{
+ return !assoc_array_ptr_is_meta(x);
+}
+static inline bool assoc_array_ptr_is_shortcut(const struct assoc_array_ptr *x)
+{
+ return (unsigned long)x & ASSOC_ARRAY_PTR_SUBTYPE_MASK;
+}
+static inline bool assoc_array_ptr_is_node(const struct assoc_array_ptr *x)
+{
+ return !assoc_array_ptr_is_shortcut(x);
+}
+
+static inline void *assoc_array_ptr_to_leaf(const struct assoc_array_ptr *x)
+{
+ return (void *)((unsigned long)x & ~ASSOC_ARRAY_PTR_TYPE_MASK);
+}
+
+static inline
+unsigned long __assoc_array_ptr_to_meta(const struct assoc_array_ptr *x)
+{
+ return (unsigned long)x &
+ ~(ASSOC_ARRAY_PTR_SUBTYPE_MASK | ASSOC_ARRAY_PTR_TYPE_MASK);
+}
+static inline
+struct assoc_array_node *assoc_array_ptr_to_node(const struct assoc_array_ptr *x)
+{
+ return (struct assoc_array_node *)__assoc_array_ptr_to_meta(x);
+}
+static inline
+struct assoc_array_shortcut *assoc_array_ptr_to_shortcut(const struct assoc_array_ptr *x)
+{
+ return (struct assoc_array_shortcut *)__assoc_array_ptr_to_meta(x);
+}
+
+static inline
+struct assoc_array_ptr *__assoc_array_x_to_ptr(const void *p, unsigned long t)
+{
+ return (struct assoc_array_ptr *)((unsigned long)p | t);
+}
+static inline
+struct assoc_array_ptr *assoc_array_leaf_to_ptr(const void *p)
+{
+ return __assoc_array_x_to_ptr(p, ASSOC_ARRAY_PTR_LEAF_TYPE);
+}
+static inline
+struct assoc_array_ptr *assoc_array_node_to_ptr(const struct assoc_array_node *p)
+{
+ return __assoc_array_x_to_ptr(
+ p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_NODE_SUBTYPE);
+}
+static inline
+struct assoc_array_ptr *assoc_array_shortcut_to_ptr(const struct assoc_array_shortcut *p)
+{
+ return __assoc_array_x_to_ptr(
+ p, ASSOC_ARRAY_PTR_META_TYPE | ASSOC_ARRAY_PTR_SHORTCUT_SUBTYPE);
+}
+
+#endif /* CONFIG_ASSOCIATIVE_ARRAY */
+#endif /* _LINUX_ASSOC_ARRAY_PRIV_H */
diff --git a/include/linux/async.h b/include/linux/async.h
new file mode 100644
index 000000000..6b0226bda
--- /dev/null
+++ b/include/linux/async.h
@@ -0,0 +1,50 @@
+/*
+ * async.h: Asynchronous function calls for boot performance
+ *
+ * (C) Copyright 2009 Intel Corporation
+ * Author: Arjan van de Ven <arjan@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef __ASYNC_H__
+#define __ASYNC_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+typedef u64 async_cookie_t;
+typedef void (*async_func_t) (void *data, async_cookie_t cookie);
+struct async_domain {
+ struct list_head pending;
+ unsigned registered:1;
+};
+
+/*
+ * domain participates in global async_synchronize_full
+ */
+#define ASYNC_DOMAIN(_name) \
+ struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \
+ .registered = 1 }
+
+/*
+ * domain is free to go out of scope as soon as all pending work is
+ * complete, this domain does not participate in async_synchronize_full
+ */
+#define ASYNC_DOMAIN_EXCLUSIVE(_name) \
+ struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \
+ .registered = 0 }
+
+extern async_cookie_t async_schedule(async_func_t func, void *data);
+extern async_cookie_t async_schedule_domain(async_func_t func, void *data,
+ struct async_domain *domain);
+void async_unregister_domain(struct async_domain *domain);
+extern void async_synchronize_full(void);
+extern void async_synchronize_full_domain(struct async_domain *domain);
+extern void async_synchronize_cookie(async_cookie_t cookie);
+extern void async_synchronize_cookie_domain(async_cookie_t cookie,
+ struct async_domain *domain);
+extern bool current_is_async(void);
+#endif
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
new file mode 100644
index 000000000..388574ea3
--- /dev/null
+++ b/include/linux/async_tx.h
@@ -0,0 +1,208 @@
+/*
+ * Copyright © 2006, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef _ASYNC_TX_H_
+#define _ASYNC_TX_H_
+#include <linux/dmaengine.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+
+/* on architectures without dma-mapping capabilities we need to ensure
+ * that the asynchronous path compiles away
+ */
+#ifdef CONFIG_HAS_DMA
+#define __async_inline
+#else
+#define __async_inline __always_inline
+#endif
+
+/**
+ * dma_chan_ref - object used to manage dma channels received from the
+ * dmaengine core.
+ * @chan - the channel being tracked
+ * @node - node for the channel to be placed on async_tx_master_list
+ * @rcu - for list_del_rcu
+ * @count - number of times this channel is listed in the pool
+ * (for channels with multiple capabiities)
+ */
+struct dma_chan_ref {
+ struct dma_chan *chan;
+ struct list_head node;
+ struct rcu_head rcu;
+ atomic_t count;
+};
+
+/**
+ * async_tx_flags - modifiers for the async_* calls
+ * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the
+ * the destination address is not a source. The asynchronous case handles this
+ * implicitly, the synchronous case needs to zero the destination block.
+ * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is
+ * also one of the source addresses. In the synchronous case the destination
+ * address is an implied source, whereas the asynchronous case it must be listed
+ * as a source. The destination address must be the first address in the source
+ * array.
+ * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a
+ * dependency chain
+ * @ASYNC_TX_FENCE: specify that the next operation in the dependency
+ * chain uses this operation's result as an input
+ * @ASYNC_TX_PQ_XOR_DST: do not overwrite the syndrome but XOR it with the
+ * input data. Required for rmw case.
+ */
+enum async_tx_flags {
+ ASYNC_TX_XOR_ZERO_DST = (1 << 0),
+ ASYNC_TX_XOR_DROP_DST = (1 << 1),
+ ASYNC_TX_ACK = (1 << 2),
+ ASYNC_TX_FENCE = (1 << 3),
+ ASYNC_TX_PQ_XOR_DST = (1 << 4),
+};
+
+/**
+ * struct async_submit_ctl - async_tx submission/completion modifiers
+ * @flags: submission modifiers
+ * @depend_tx: parent dependency of the current operation being submitted
+ * @cb_fn: callback routine to run at operation completion
+ * @cb_param: parameter for the callback routine
+ * @scribble: caller provided space for dma/page address conversions
+ */
+struct async_submit_ctl {
+ enum async_tx_flags flags;
+ struct dma_async_tx_descriptor *depend_tx;
+ dma_async_tx_callback cb_fn;
+ void *cb_param;
+ void *scribble;
+};
+
+#ifdef CONFIG_DMA_ENGINE
+#define async_tx_issue_pending_all dma_issue_pending_all
+
+/**
+ * async_tx_issue_pending - send pending descriptor to the hardware channel
+ * @tx: descriptor handle to retrieve hardware context
+ *
+ * Note: any dependent operations will have already been issued by
+ * async_tx_channel_switch, or (in the case of no channel switch) will
+ * be already pending on this channel.
+ */
+static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
+{
+ if (likely(tx)) {
+ struct dma_chan *chan = tx->chan;
+ struct dma_device *dma = chan->device;
+
+ dma->device_issue_pending(chan);
+ }
+}
+#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+#include <asm/async_tx.h>
+#else
+#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
+ __async_tx_find_channel(dep, type)
+struct dma_chan *
+__async_tx_find_channel(struct async_submit_ctl *submit,
+ enum dma_transaction_type tx_type);
+#endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */
+#else
+static inline void async_tx_issue_pending_all(void)
+{
+ do { } while (0);
+}
+
+static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
+{
+ do { } while (0);
+}
+
+static inline struct dma_chan *
+async_tx_find_channel(struct async_submit_ctl *submit,
+ enum dma_transaction_type tx_type, struct page **dst,
+ int dst_count, struct page **src, int src_count,
+ size_t len)
+{
+ return NULL;
+}
+#endif
+
+/**
+ * async_tx_sync_epilog - actions to take if an operation is run synchronously
+ * @cb_fn: function to call when the transaction completes
+ * @cb_fn_param: parameter to pass to the callback routine
+ */
+static inline void
+async_tx_sync_epilog(struct async_submit_ctl *submit)
+{
+ if (submit->cb_fn)
+ submit->cb_fn(submit->cb_param);
+}
+
+typedef union {
+ unsigned long addr;
+ struct page *page;
+ dma_addr_t dma;
+} addr_conv_t;
+
+static inline void
+init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags,
+ struct dma_async_tx_descriptor *tx,
+ dma_async_tx_callback cb_fn, void *cb_param,
+ addr_conv_t *scribble)
+{
+ args->flags = flags;
+ args->depend_tx = tx;
+ args->cb_fn = cb_fn;
+ args->cb_param = cb_param;
+ args->scribble = scribble;
+}
+
+void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
+ struct async_submit_ctl *submit);
+
+struct dma_async_tx_descriptor *
+async_xor(struct page *dest, struct page **src_list, unsigned int offset,
+ int src_cnt, size_t len, struct async_submit_ctl *submit);
+
+struct dma_async_tx_descriptor *
+async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
+ int src_cnt, size_t len, enum sum_check_flags *result,
+ struct async_submit_ctl *submit);
+
+struct dma_async_tx_descriptor *
+async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
+ unsigned int src_offset, size_t len,
+ struct async_submit_ctl *submit);
+
+struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);
+
+struct dma_async_tx_descriptor *
+async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt,
+ size_t len, struct async_submit_ctl *submit);
+
+struct dma_async_tx_descriptor *
+async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt,
+ size_t len, enum sum_check_flags *pqres, struct page *spare,
+ struct async_submit_ctl *submit);
+
+struct dma_async_tx_descriptor *
+async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb,
+ struct page **ptrs, struct async_submit_ctl *submit);
+
+struct dma_async_tx_descriptor *
+async_raid6_datap_recov(int src_num, size_t bytes, int faila,
+ struct page **ptrs, struct async_submit_ctl *submit);
+
+void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
+#endif /* _ASYNC_TX_H_ */
diff --git a/include/linux/ata.h b/include/linux/ata.h
new file mode 100644
index 000000000..533dbb642
--- /dev/null
+++ b/include/linux/ata.h
@@ -0,0 +1,1049 @@
+
+/*
+ * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
+ * Copyright 2003-2004 Jeff Garzik
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ * Hardware documentation available from http://www.t13.org/
+ *
+ */
+
+#ifndef __LINUX_ATA_H__
+#define __LINUX_ATA_H__
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+/* defines only for the constants which don't work well as enums */
+#define ATA_DMA_BOUNDARY 0xffffUL
+#define ATA_DMA_MASK 0xffffffffULL
+
+enum {
+ /* various global constants */
+ ATA_MAX_DEVICES = 2, /* per bus/port */
+ ATA_MAX_PRD = 256, /* we could make these 256/256 */
+ ATA_SECT_SIZE = 512,
+ ATA_MAX_SECTORS_128 = 128,
+ ATA_MAX_SECTORS = 256,
+ ATA_MAX_SECTORS_1024 = 1024,
+ ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */
+ ATA_MAX_SECTORS_TAPE = 65535,
+
+ ATA_ID_WORDS = 256,
+ ATA_ID_CONFIG = 0,
+ ATA_ID_CYLS = 1,
+ ATA_ID_HEADS = 3,
+ ATA_ID_SECTORS = 6,
+ ATA_ID_SERNO = 10,
+ ATA_ID_BUF_SIZE = 21,
+ ATA_ID_FW_REV = 23,
+ ATA_ID_PROD = 27,
+ ATA_ID_MAX_MULTSECT = 47,
+ ATA_ID_DWORD_IO = 48,
+ ATA_ID_CAPABILITY = 49,
+ ATA_ID_OLD_PIO_MODES = 51,
+ ATA_ID_OLD_DMA_MODES = 52,
+ ATA_ID_FIELD_VALID = 53,
+ ATA_ID_CUR_CYLS = 54,
+ ATA_ID_CUR_HEADS = 55,
+ ATA_ID_CUR_SECTORS = 56,
+ ATA_ID_MULTSECT = 59,
+ ATA_ID_LBA_CAPACITY = 60,
+ ATA_ID_SWDMA_MODES = 62,
+ ATA_ID_MWDMA_MODES = 63,
+ ATA_ID_PIO_MODES = 64,
+ ATA_ID_EIDE_DMA_MIN = 65,
+ ATA_ID_EIDE_DMA_TIME = 66,
+ ATA_ID_EIDE_PIO = 67,
+ ATA_ID_EIDE_PIO_IORDY = 68,
+ ATA_ID_ADDITIONAL_SUPP = 69,
+ ATA_ID_QUEUE_DEPTH = 75,
+ ATA_ID_SATA_CAPABILITY = 76,
+ ATA_ID_SATA_CAPABILITY_2 = 77,
+ ATA_ID_FEATURE_SUPP = 78,
+ ATA_ID_MAJOR_VER = 80,
+ ATA_ID_COMMAND_SET_1 = 82,
+ ATA_ID_COMMAND_SET_2 = 83,
+ ATA_ID_CFSSE = 84,
+ ATA_ID_CFS_ENABLE_1 = 85,
+ ATA_ID_CFS_ENABLE_2 = 86,
+ ATA_ID_CSF_DEFAULT = 87,
+ ATA_ID_UDMA_MODES = 88,
+ ATA_ID_HW_CONFIG = 93,
+ ATA_ID_SPG = 98,
+ ATA_ID_LBA_CAPACITY_2 = 100,
+ ATA_ID_SECTOR_SIZE = 106,
+ ATA_ID_WWN = 108,
+ ATA_ID_LOGICAL_SECTOR_SIZE = 117, /* and 118 */
+ ATA_ID_COMMAND_SET_3 = 119,
+ ATA_ID_COMMAND_SET_4 = 120,
+ ATA_ID_LAST_LUN = 126,
+ ATA_ID_DLF = 128,
+ ATA_ID_CSFO = 129,
+ ATA_ID_CFA_POWER = 160,
+ ATA_ID_CFA_KEY_MGMT = 162,
+ ATA_ID_CFA_MODES = 163,
+ ATA_ID_DATA_SET_MGMT = 169,
+ ATA_ID_ROT_SPEED = 217,
+ ATA_ID_PIO4 = (1 << 1),
+
+ ATA_ID_SERNO_LEN = 20,
+ ATA_ID_FW_REV_LEN = 8,
+ ATA_ID_PROD_LEN = 40,
+ ATA_ID_WWN_LEN = 8,
+
+ ATA_PCI_CTL_OFS = 2,
+
+ ATA_PIO0 = (1 << 0),
+ ATA_PIO1 = ATA_PIO0 | (1 << 1),
+ ATA_PIO2 = ATA_PIO1 | (1 << 2),
+ ATA_PIO3 = ATA_PIO2 | (1 << 3),
+ ATA_PIO4 = ATA_PIO3 | (1 << 4),
+ ATA_PIO5 = ATA_PIO4 | (1 << 5),
+ ATA_PIO6 = ATA_PIO5 | (1 << 6),
+
+ ATA_PIO4_ONLY = (1 << 4),
+
+ ATA_SWDMA0 = (1 << 0),
+ ATA_SWDMA1 = ATA_SWDMA0 | (1 << 1),
+ ATA_SWDMA2 = ATA_SWDMA1 | (1 << 2),
+
+ ATA_SWDMA2_ONLY = (1 << 2),
+
+ ATA_MWDMA0 = (1 << 0),
+ ATA_MWDMA1 = ATA_MWDMA0 | (1 << 1),
+ ATA_MWDMA2 = ATA_MWDMA1 | (1 << 2),
+ ATA_MWDMA3 = ATA_MWDMA2 | (1 << 3),
+ ATA_MWDMA4 = ATA_MWDMA3 | (1 << 4),
+
+ ATA_MWDMA12_ONLY = (1 << 1) | (1 << 2),
+ ATA_MWDMA2_ONLY = (1 << 2),
+
+ ATA_UDMA0 = (1 << 0),
+ ATA_UDMA1 = ATA_UDMA0 | (1 << 1),
+ ATA_UDMA2 = ATA_UDMA1 | (1 << 2),
+ ATA_UDMA3 = ATA_UDMA2 | (1 << 3),
+ ATA_UDMA4 = ATA_UDMA3 | (1 << 4),
+ ATA_UDMA5 = ATA_UDMA4 | (1 << 5),
+ ATA_UDMA6 = ATA_UDMA5 | (1 << 6),
+ ATA_UDMA7 = ATA_UDMA6 | (1 << 7),
+ /* ATA_UDMA7 is just for completeness... doesn't exist (yet?). */
+
+ ATA_UDMA24_ONLY = (1 << 2) | (1 << 4),
+
+ ATA_UDMA_MASK_40C = ATA_UDMA2, /* udma0-2 */
+
+ /* DMA-related */
+ ATA_PRD_SZ = 8,
+ ATA_PRD_TBL_SZ = (ATA_MAX_PRD * ATA_PRD_SZ),
+ ATA_PRD_EOT = (1 << 31), /* end-of-table flag */
+
+ ATA_DMA_TABLE_OFS = 4,
+ ATA_DMA_STATUS = 2,
+ ATA_DMA_CMD = 0,
+ ATA_DMA_WR = (1 << 3),
+ ATA_DMA_START = (1 << 0),
+ ATA_DMA_INTR = (1 << 2),
+ ATA_DMA_ERR = (1 << 1),
+ ATA_DMA_ACTIVE = (1 << 0),
+
+ /* bits in ATA command block registers */
+ ATA_HOB = (1 << 7), /* LBA48 selector */
+ ATA_NIEN = (1 << 1), /* disable-irq flag */
+ ATA_LBA = (1 << 6), /* LBA28 selector */
+ ATA_DEV1 = (1 << 4), /* Select Device 1 (slave) */
+ ATA_DEVICE_OBS = (1 << 7) | (1 << 5), /* obs bits in dev reg */
+ ATA_DEVCTL_OBS = (1 << 3), /* obsolete bit in devctl reg */
+ ATA_BUSY = (1 << 7), /* BSY status bit */
+ ATA_DRDY = (1 << 6), /* device ready */
+ ATA_DF = (1 << 5), /* device fault */
+ ATA_DSC = (1 << 4), /* drive seek complete */
+ ATA_DRQ = (1 << 3), /* data request i/o */
+ ATA_CORR = (1 << 2), /* corrected data error */
+ ATA_SENSE = (1 << 1), /* sense code available */
+ ATA_ERR = (1 << 0), /* have an error */
+ ATA_SRST = (1 << 2), /* software reset */
+ ATA_ICRC = (1 << 7), /* interface CRC error */
+ ATA_BBK = ATA_ICRC, /* pre-EIDE: block marked bad */
+ ATA_UNC = (1 << 6), /* uncorrectable media error */
+ ATA_MC = (1 << 5), /* media changed */
+ ATA_IDNF = (1 << 4), /* ID not found */
+ ATA_MCR = (1 << 3), /* media change requested */
+ ATA_ABORTED = (1 << 2), /* command aborted */
+ ATA_TRK0NF = (1 << 1), /* track 0 not found */
+ ATA_AMNF = (1 << 0), /* address mark not found */
+ ATAPI_LFS = 0xF0, /* last failed sense */
+ ATAPI_EOM = ATA_TRK0NF, /* end of media */
+ ATAPI_ILI = ATA_AMNF, /* illegal length indication */
+ ATAPI_IO = (1 << 1),
+ ATAPI_COD = (1 << 0),
+
+ /* ATA command block registers */
+ ATA_REG_DATA = 0x00,
+ ATA_REG_ERR = 0x01,
+ ATA_REG_NSECT = 0x02,
+ ATA_REG_LBAL = 0x03,
+ ATA_REG_LBAM = 0x04,
+ ATA_REG_LBAH = 0x05,
+ ATA_REG_DEVICE = 0x06,
+ ATA_REG_STATUS = 0x07,
+
+ ATA_REG_FEATURE = ATA_REG_ERR, /* and their aliases */
+ ATA_REG_CMD = ATA_REG_STATUS,
+ ATA_REG_BYTEL = ATA_REG_LBAM,
+ ATA_REG_BYTEH = ATA_REG_LBAH,
+ ATA_REG_DEVSEL = ATA_REG_DEVICE,
+ ATA_REG_IRQ = ATA_REG_NSECT,
+
+ /* ATA device commands */
+ ATA_CMD_DEV_RESET = 0x08, /* ATAPI device reset */
+ ATA_CMD_CHK_POWER = 0xE5, /* check power mode */
+ ATA_CMD_STANDBY = 0xE2, /* place in standby power mode */
+ ATA_CMD_IDLE = 0xE3, /* place in idle power mode */
+ ATA_CMD_EDD = 0x90, /* execute device diagnostic */
+ ATA_CMD_DOWNLOAD_MICRO = 0x92,
+ ATA_CMD_DOWNLOAD_MICRO_DMA = 0x93,
+ ATA_CMD_NOP = 0x00,
+ ATA_CMD_FLUSH = 0xE7,
+ ATA_CMD_FLUSH_EXT = 0xEA,
+ ATA_CMD_ID_ATA = 0xEC,
+ ATA_CMD_ID_ATAPI = 0xA1,
+ ATA_CMD_SERVICE = 0xA2,
+ ATA_CMD_READ = 0xC8,
+ ATA_CMD_READ_EXT = 0x25,
+ ATA_CMD_READ_QUEUED = 0x26,
+ ATA_CMD_READ_STREAM_EXT = 0x2B,
+ ATA_CMD_READ_STREAM_DMA_EXT = 0x2A,
+ ATA_CMD_WRITE = 0xCA,
+ ATA_CMD_WRITE_EXT = 0x35,
+ ATA_CMD_WRITE_QUEUED = 0x36,
+ ATA_CMD_WRITE_STREAM_EXT = 0x3B,
+ ATA_CMD_WRITE_STREAM_DMA_EXT = 0x3A,
+ ATA_CMD_WRITE_FUA_EXT = 0x3D,
+ ATA_CMD_WRITE_QUEUED_FUA_EXT = 0x3E,
+ ATA_CMD_FPDMA_READ = 0x60,
+ ATA_CMD_FPDMA_WRITE = 0x61,
+ ATA_CMD_FPDMA_SEND = 0x64,
+ ATA_CMD_FPDMA_RECV = 0x65,
+ ATA_CMD_PIO_READ = 0x20,
+ ATA_CMD_PIO_READ_EXT = 0x24,
+ ATA_CMD_PIO_WRITE = 0x30,
+ ATA_CMD_PIO_WRITE_EXT = 0x34,
+ ATA_CMD_READ_MULTI = 0xC4,
+ ATA_CMD_READ_MULTI_EXT = 0x29,
+ ATA_CMD_WRITE_MULTI = 0xC5,
+ ATA_CMD_WRITE_MULTI_EXT = 0x39,
+ ATA_CMD_WRITE_MULTI_FUA_EXT = 0xCE,
+ ATA_CMD_SET_FEATURES = 0xEF,
+ ATA_CMD_SET_MULTI = 0xC6,
+ ATA_CMD_PACKET = 0xA0,
+ ATA_CMD_VERIFY = 0x40,
+ ATA_CMD_VERIFY_EXT = 0x42,
+ ATA_CMD_WRITE_UNCORR_EXT = 0x45,
+ ATA_CMD_STANDBYNOW1 = 0xE0,
+ ATA_CMD_IDLEIMMEDIATE = 0xE1,
+ ATA_CMD_SLEEP = 0xE6,
+ ATA_CMD_INIT_DEV_PARAMS = 0x91,
+ ATA_CMD_READ_NATIVE_MAX = 0xF8,
+ ATA_CMD_READ_NATIVE_MAX_EXT = 0x27,
+ ATA_CMD_SET_MAX = 0xF9,
+ ATA_CMD_SET_MAX_EXT = 0x37,
+ ATA_CMD_READ_LOG_EXT = 0x2F,
+ ATA_CMD_WRITE_LOG_EXT = 0x3F,
+ ATA_CMD_READ_LOG_DMA_EXT = 0x47,
+ ATA_CMD_WRITE_LOG_DMA_EXT = 0x57,
+ ATA_CMD_TRUSTED_NONDATA = 0x5B,
+ ATA_CMD_TRUSTED_RCV = 0x5C,
+ ATA_CMD_TRUSTED_RCV_DMA = 0x5D,
+ ATA_CMD_TRUSTED_SND = 0x5E,
+ ATA_CMD_TRUSTED_SND_DMA = 0x5F,
+ ATA_CMD_PMP_READ = 0xE4,
+ ATA_CMD_PMP_READ_DMA = 0xE9,
+ ATA_CMD_PMP_WRITE = 0xE8,
+ ATA_CMD_PMP_WRITE_DMA = 0xEB,
+ ATA_CMD_CONF_OVERLAY = 0xB1,
+ ATA_CMD_SEC_SET_PASS = 0xF1,
+ ATA_CMD_SEC_UNLOCK = 0xF2,
+ ATA_CMD_SEC_ERASE_PREP = 0xF3,
+ ATA_CMD_SEC_ERASE_UNIT = 0xF4,
+ ATA_CMD_SEC_FREEZE_LOCK = 0xF5,
+ ATA_CMD_SEC_DISABLE_PASS = 0xF6,
+ ATA_CMD_CONFIG_STREAM = 0x51,
+ ATA_CMD_SMART = 0xB0,
+ ATA_CMD_MEDIA_LOCK = 0xDE,
+ ATA_CMD_MEDIA_UNLOCK = 0xDF,
+ ATA_CMD_DSM = 0x06,
+ ATA_CMD_CHK_MED_CRD_TYP = 0xD1,
+ ATA_CMD_CFA_REQ_EXT_ERR = 0x03,
+ ATA_CMD_CFA_WRITE_NE = 0x38,
+ ATA_CMD_CFA_TRANS_SECT = 0x87,
+ ATA_CMD_CFA_ERASE = 0xC0,
+ ATA_CMD_CFA_WRITE_MULT_NE = 0xCD,
+ ATA_CMD_REQ_SENSE_DATA = 0x0B,
+ ATA_CMD_SANITIZE_DEVICE = 0xB4,
+
+ /* marked obsolete in the ATA/ATAPI-7 spec */
+ ATA_CMD_RESTORE = 0x10,
+
+ /* Subcmds for ATA_CMD_FPDMA_SEND */
+ ATA_SUBCMD_FPDMA_SEND_DSM = 0x00,
+ ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT = 0x02,
+
+ /* READ_LOG_EXT pages */
+ ATA_LOG_SATA_NCQ = 0x10,
+ ATA_LOG_NCQ_SEND_RECV = 0x13,
+ ATA_LOG_SATA_ID_DEV_DATA = 0x30,
+ ATA_LOG_SATA_SETTINGS = 0x08,
+ ATA_LOG_DEVSLP_OFFSET = 0x30,
+ ATA_LOG_DEVSLP_SIZE = 0x08,
+ ATA_LOG_DEVSLP_MDAT = 0x00,
+ ATA_LOG_DEVSLP_MDAT_MASK = 0x1F,
+ ATA_LOG_DEVSLP_DETO = 0x01,
+ ATA_LOG_DEVSLP_VALID = 0x07,
+ ATA_LOG_DEVSLP_VALID_MASK = 0x80,
+
+ /* NCQ send and receive log */
+ ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0x00,
+ ATA_LOG_NCQ_SEND_RECV_SUBCMDS_DSM = (1 << 0),
+ ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET = 0x04,
+ ATA_LOG_NCQ_SEND_RECV_DSM_TRIM = (1 << 0),
+ ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET = 0x08,
+ ATA_LOG_NCQ_SEND_RECV_WR_LOG_OFFSET = 0x0C,
+ ATA_LOG_NCQ_SEND_RECV_SIZE = 0x10,
+
+ /* READ/WRITE LONG (obsolete) */
+ ATA_CMD_READ_LONG = 0x22,
+ ATA_CMD_READ_LONG_ONCE = 0x23,
+ ATA_CMD_WRITE_LONG = 0x32,
+ ATA_CMD_WRITE_LONG_ONCE = 0x33,
+
+ /* SETFEATURES stuff */
+ SETFEATURES_XFER = 0x03,
+ XFER_UDMA_7 = 0x47,
+ XFER_UDMA_6 = 0x46,
+ XFER_UDMA_5 = 0x45,
+ XFER_UDMA_4 = 0x44,
+ XFER_UDMA_3 = 0x43,
+ XFER_UDMA_2 = 0x42,
+ XFER_UDMA_1 = 0x41,
+ XFER_UDMA_0 = 0x40,
+ XFER_MW_DMA_4 = 0x24, /* CFA only */
+ XFER_MW_DMA_3 = 0x23, /* CFA only */
+ XFER_MW_DMA_2 = 0x22,
+ XFER_MW_DMA_1 = 0x21,
+ XFER_MW_DMA_0 = 0x20,
+ XFER_SW_DMA_2 = 0x12,
+ XFER_SW_DMA_1 = 0x11,
+ XFER_SW_DMA_0 = 0x10,
+ XFER_PIO_6 = 0x0E, /* CFA only */
+ XFER_PIO_5 = 0x0D, /* CFA only */
+ XFER_PIO_4 = 0x0C,
+ XFER_PIO_3 = 0x0B,
+ XFER_PIO_2 = 0x0A,
+ XFER_PIO_1 = 0x09,
+ XFER_PIO_0 = 0x08,
+ XFER_PIO_SLOW = 0x00,
+
+ SETFEATURES_WC_ON = 0x02, /* Enable write cache */
+ SETFEATURES_WC_OFF = 0x82, /* Disable write cache */
+
+ /* Enable/Disable Automatic Acoustic Management */
+ SETFEATURES_AAM_ON = 0x42,
+ SETFEATURES_AAM_OFF = 0xC2,
+
+ SETFEATURES_SPINUP = 0x07, /* Spin-up drive */
+
+ SETFEATURES_SATA_ENABLE = 0x10, /* Enable use of SATA feature */
+ SETFEATURES_SATA_DISABLE = 0x90, /* Disable use of SATA feature */
+
+ /* SETFEATURE Sector counts for SATA features */
+ SATA_FPDMA_OFFSET = 0x01, /* FPDMA non-zero buffer offsets */
+ SATA_FPDMA_AA = 0x02, /* FPDMA Setup FIS Auto-Activate */
+ SATA_DIPM = 0x03, /* Device Initiated Power Management */
+ SATA_FPDMA_IN_ORDER = 0x04, /* FPDMA in-order data delivery */
+ SATA_AN = 0x05, /* Asynchronous Notification */
+ SATA_SSP = 0x06, /* Software Settings Preservation */
+ SATA_DEVSLP = 0x09, /* Device Sleep */
+
+ SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
+
+ /* feature values for SET_MAX */
+ ATA_SET_MAX_ADDR = 0x00,
+ ATA_SET_MAX_PASSWD = 0x01,
+ ATA_SET_MAX_LOCK = 0x02,
+ ATA_SET_MAX_UNLOCK = 0x03,
+ ATA_SET_MAX_FREEZE_LOCK = 0x04,
+
+ /* feature values for DEVICE CONFIGURATION OVERLAY */
+ ATA_DCO_RESTORE = 0xC0,
+ ATA_DCO_FREEZE_LOCK = 0xC1,
+ ATA_DCO_IDENTIFY = 0xC2,
+ ATA_DCO_SET = 0xC3,
+
+ /* feature values for SMART */
+ ATA_SMART_ENABLE = 0xD8,
+ ATA_SMART_READ_VALUES = 0xD0,
+ ATA_SMART_READ_THRESHOLDS = 0xD1,
+
+ /* feature values for Data Set Management */
+ ATA_DSM_TRIM = 0x01,
+
+ /* password used in LBA Mid / LBA High for executing SMART commands */
+ ATA_SMART_LBAM_PASS = 0x4F,
+ ATA_SMART_LBAH_PASS = 0xC2,
+
+ /* ATAPI stuff */
+ ATAPI_PKT_DMA = (1 << 0),
+ ATAPI_DMADIR = (1 << 2), /* ATAPI data dir:
+ 0=to device, 1=to host */
+ ATAPI_CDB_LEN = 16,
+
+ /* PMP stuff */
+ SATA_PMP_MAX_PORTS = 15,
+ SATA_PMP_CTRL_PORT = 15,
+
+ SATA_PMP_GSCR_DWORDS = 128,
+ SATA_PMP_GSCR_PROD_ID = 0,
+ SATA_PMP_GSCR_REV = 1,
+ SATA_PMP_GSCR_PORT_INFO = 2,
+ SATA_PMP_GSCR_ERROR = 32,
+ SATA_PMP_GSCR_ERROR_EN = 33,
+ SATA_PMP_GSCR_FEAT = 64,
+ SATA_PMP_GSCR_FEAT_EN = 96,
+
+ SATA_PMP_PSCR_STATUS = 0,
+ SATA_PMP_PSCR_ERROR = 1,
+ SATA_PMP_PSCR_CONTROL = 2,
+
+ SATA_PMP_FEAT_BIST = (1 << 0),
+ SATA_PMP_FEAT_PMREQ = (1 << 1),
+ SATA_PMP_FEAT_DYNSSC = (1 << 2),
+ SATA_PMP_FEAT_NOTIFY = (1 << 3),
+
+ /* cable types */
+ ATA_CBL_NONE = 0,
+ ATA_CBL_PATA40 = 1,
+ ATA_CBL_PATA80 = 2,
+ ATA_CBL_PATA40_SHORT = 3, /* 40 wire cable to high UDMA spec */
+ ATA_CBL_PATA_UNK = 4, /* don't know, maybe 80c? */
+ ATA_CBL_PATA_IGN = 5, /* don't know, ignore cable handling */
+ ATA_CBL_SATA = 6,
+
+ /* SATA Status and Control Registers */
+ SCR_STATUS = 0,
+ SCR_ERROR = 1,
+ SCR_CONTROL = 2,
+ SCR_ACTIVE = 3,
+ SCR_NOTIFICATION = 4,
+
+ /* SError bits */
+ SERR_DATA_RECOVERED = (1 << 0), /* recovered data error */
+ SERR_COMM_RECOVERED = (1 << 1), /* recovered comm failure */
+ SERR_DATA = (1 << 8), /* unrecovered data error */
+ SERR_PERSISTENT = (1 << 9), /* persistent data/comm error */
+ SERR_PROTOCOL = (1 << 10), /* protocol violation */
+ SERR_INTERNAL = (1 << 11), /* host internal error */
+ SERR_PHYRDY_CHG = (1 << 16), /* PHY RDY changed */
+ SERR_PHY_INT_ERR = (1 << 17), /* PHY internal error */
+ SERR_COMM_WAKE = (1 << 18), /* Comm wake */
+ SERR_10B_8B_ERR = (1 << 19), /* 10b to 8b decode error */
+ SERR_DISPARITY = (1 << 20), /* Disparity */
+ SERR_CRC = (1 << 21), /* CRC error */
+ SERR_HANDSHAKE = (1 << 22), /* Handshake error */
+ SERR_LINK_SEQ_ERR = (1 << 23), /* Link sequence error */
+ SERR_TRANS_ST_ERROR = (1 << 24), /* Transport state trans. error */
+ SERR_UNRECOG_FIS = (1 << 25), /* Unrecognized FIS */
+ SERR_DEV_XCHG = (1 << 26), /* device exchanged */
+};
+
+enum ata_tf_protocols {
+ /* ATA taskfile protocols */
+ ATA_PROT_UNKNOWN, /* unknown/invalid */
+ ATA_PROT_NODATA, /* no data */
+ ATA_PROT_PIO, /* PIO data xfer */
+ ATA_PROT_DMA, /* DMA */
+ ATA_PROT_NCQ, /* NCQ */
+ ATAPI_PROT_NODATA, /* packet command, no data */
+ ATAPI_PROT_PIO, /* packet command, PIO data xfer*/
+ ATAPI_PROT_DMA, /* packet command with special DMA sauce */
+};
+
+enum ata_ioctls {
+ ATA_IOC_GET_IO32 = 0x309,
+ ATA_IOC_SET_IO32 = 0x324,
+};
+
+/* core structures */
+
+struct ata_bmdma_prd {
+ __le32 addr;
+ __le32 flags_len;
+};
+
+/*
+ * id tests
+ */
+#define ata_id_is_ata(id) (((id)[ATA_ID_CONFIG] & (1 << 15)) == 0)
+#define ata_id_has_lba(id) ((id)[ATA_ID_CAPABILITY] & (1 << 9))
+#define ata_id_has_dma(id) ((id)[ATA_ID_CAPABILITY] & (1 << 8))
+#define ata_id_has_ncq(id) ((id)[ATA_ID_SATA_CAPABILITY] & (1 << 8))
+#define ata_id_queue_depth(id) (((id)[ATA_ID_QUEUE_DEPTH] & 0x1f) + 1)
+#define ata_id_removable(id) ((id)[ATA_ID_CONFIG] & (1 << 7))
+#define ata_id_has_atapi_AN(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 5)))
+#define ata_id_has_fpdma_aa(id) \
+ ((((id)[ATA_ID_SATA_CAPABILITY] != 0x0000) && \
+ ((id)[ATA_ID_SATA_CAPABILITY] != 0xffff)) && \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 2)))
+#define ata_id_iordy_disable(id) ((id)[ATA_ID_CAPABILITY] & (1 << 10))
+#define ata_id_has_iordy(id) ((id)[ATA_ID_CAPABILITY] & (1 << 11))
+#define ata_id_u32(id,n) \
+ (((u32) (id)[(n) + 1] << 16) | ((u32) (id)[(n)]))
+#define ata_id_u64(id,n) \
+ ( ((u64) (id)[(n) + 3] << 48) | \
+ ((u64) (id)[(n) + 2] << 32) | \
+ ((u64) (id)[(n) + 1] << 16) | \
+ ((u64) (id)[(n) + 0]) )
+
+#define ata_id_cdb_intr(id) (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
+#define ata_id_has_da(id) ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
+#define ata_id_has_devslp(id) ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
+#define ata_id_has_ncq_autosense(id) \
+ ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
+
+static inline bool ata_id_has_hipm(const u16 *id)
+{
+ u16 val = id[ATA_ID_SATA_CAPABILITY];
+
+ if (val == 0 || val == 0xffff)
+ return false;
+
+ return val & (1 << 9);
+}
+
+static inline bool ata_id_has_dipm(const u16 *id)
+{
+ u16 val = id[ATA_ID_FEATURE_SUPP];
+
+ if (val == 0 || val == 0xffff)
+ return false;
+
+ return val & (1 << 3);
+}
+
+
+static inline bool ata_id_has_fua(const u16 *id)
+{
+ if ((id[ATA_ID_CFSSE] & 0xC000) != 0x4000)
+ return false;
+ return id[ATA_ID_CFSSE] & (1 << 6);
+}
+
+static inline bool ata_id_has_flush(const u16 *id)
+{
+ if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
+ return false;
+ return id[ATA_ID_COMMAND_SET_2] & (1 << 12);
+}
+
+static inline bool ata_id_flush_enabled(const u16 *id)
+{
+ if (ata_id_has_flush(id) == 0)
+ return false;
+ if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
+ return false;
+ return id[ATA_ID_CFS_ENABLE_2] & (1 << 12);
+}
+
+static inline bool ata_id_has_flush_ext(const u16 *id)
+{
+ if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
+ return false;
+ return id[ATA_ID_COMMAND_SET_2] & (1 << 13);
+}
+
+static inline bool ata_id_flush_ext_enabled(const u16 *id)
+{
+ if (ata_id_has_flush_ext(id) == 0)
+ return false;
+ if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
+ return false;
+ /*
+ * some Maxtor disks have bit 13 defined incorrectly
+ * so check bit 10 too
+ */
+ return (id[ATA_ID_CFS_ENABLE_2] & 0x2400) == 0x2400;
+}
+
+static inline u32 ata_id_logical_sector_size(const u16 *id)
+{
+ /* T13/1699-D Revision 6a, Sep 6, 2008. Page 128.
+ * IDENTIFY DEVICE data, word 117-118.
+ * 0xd000 ignores bit 13 (logical:physical > 1)
+ */
+ if ((id[ATA_ID_SECTOR_SIZE] & 0xd000) == 0x5000)
+ return (((id[ATA_ID_LOGICAL_SECTOR_SIZE+1] << 16)
+ + id[ATA_ID_LOGICAL_SECTOR_SIZE]) * sizeof(u16)) ;
+ return ATA_SECT_SIZE;
+}
+
+static inline u8 ata_id_log2_per_physical_sector(const u16 *id)
+{
+ /* T13/1699-D Revision 6a, Sep 6, 2008. Page 128.
+ * IDENTIFY DEVICE data, word 106.
+ * 0xe000 ignores bit 12 (logical sector > 512 bytes)
+ */
+ if ((id[ATA_ID_SECTOR_SIZE] & 0xe000) == 0x6000)
+ return (id[ATA_ID_SECTOR_SIZE] & 0xf);
+ return 0;
+}
+
+/* Offset of logical sectors relative to physical sectors.
+ *
+ * If device has more than one logical sector per physical sector
+ * (aka 512 byte emulation), vendors might offset the "sector 0" address
+ * so sector 63 is "naturally aligned" - e.g. FAT partition table.
+ * This avoids Read/Mod/Write penalties when using FAT partition table
+ * and updating "well aligned" (FS perspective) physical sectors on every
+ * transaction.
+ */
+static inline u16 ata_id_logical_sector_offset(const u16 *id,
+ u8 log2_per_phys)
+{
+ u16 word_209 = id[209];
+
+ if ((log2_per_phys > 1) && (word_209 & 0xc000) == 0x4000) {
+ u16 first = word_209 & 0x3fff;
+ if (first > 0)
+ return (1 << log2_per_phys) - first;
+ }
+ return 0;
+}
+
+static inline bool ata_id_has_lba48(const u16 *id)
+{
+ if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
+ return false;
+ if (!ata_id_u64(id, ATA_ID_LBA_CAPACITY_2))
+ return false;
+ return id[ATA_ID_COMMAND_SET_2] & (1 << 10);
+}
+
+static inline bool ata_id_lba48_enabled(const u16 *id)
+{
+ if (ata_id_has_lba48(id) == 0)
+ return false;
+ if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
+ return false;
+ return id[ATA_ID_CFS_ENABLE_2] & (1 << 10);
+}
+
+static inline bool ata_id_hpa_enabled(const u16 *id)
+{
+ /* Yes children, word 83 valid bits cover word 82 data */
+ if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
+ return false;
+ /* And 87 covers 85-87 */
+ if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
+ return false;
+ /* Check command sets enabled as well as supported */
+ if ((id[ATA_ID_CFS_ENABLE_1] & (1 << 10)) == 0)
+ return false;
+ return id[ATA_ID_COMMAND_SET_1] & (1 << 10);
+}
+
+static inline bool ata_id_has_wcache(const u16 *id)
+{
+ /* Yes children, word 83 valid bits cover word 82 data */
+ if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
+ return false;
+ return id[ATA_ID_COMMAND_SET_1] & (1 << 5);
+}
+
+static inline bool ata_id_has_pm(const u16 *id)
+{
+ if ((id[ATA_ID_COMMAND_SET_2] & 0xC000) != 0x4000)
+ return false;
+ return id[ATA_ID_COMMAND_SET_1] & (1 << 3);
+}
+
+static inline bool ata_id_rahead_enabled(const u16 *id)
+{
+ if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
+ return false;
+ return id[ATA_ID_CFS_ENABLE_1] & (1 << 6);
+}
+
+static inline bool ata_id_wcache_enabled(const u16 *id)
+{
+ if ((id[ATA_ID_CSF_DEFAULT] & 0xC000) != 0x4000)
+ return false;
+ return id[ATA_ID_CFS_ENABLE_1] & (1 << 5);
+}
+
+static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
+{
+ if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ return false;
+ return id[ATA_ID_COMMAND_SET_3] & (1 << 3);
+}
+
+static inline bool ata_id_has_sense_reporting(const u16 *id)
+{
+ if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ return false;
+ return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
+}
+
+static inline bool ata_id_sense_reporting_enabled(const u16 *id)
+{
+ if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+ return false;
+ return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
+}
+
+/**
+ * ata_id_major_version - get ATA level of drive
+ * @id: Identify data
+ *
+ * Caveats:
+ * ATA-1 considers identify optional
+ * ATA-2 introduces mandatory identify
+ * ATA-3 introduces word 80 and accurate reporting
+ *
+ * The practical impact of this is that ata_id_major_version cannot
+ * reliably report on drives below ATA3.
+ */
+
+static inline unsigned int ata_id_major_version(const u16 *id)
+{
+ unsigned int mver;
+
+ if (id[ATA_ID_MAJOR_VER] == 0xFFFF)
+ return 0;
+
+ for (mver = 14; mver >= 1; mver--)
+ if (id[ATA_ID_MAJOR_VER] & (1 << mver))
+ break;
+ return mver;
+}
+
+static inline bool ata_id_is_sata(const u16 *id)
+{
+ /*
+ * See if word 93 is 0 AND drive is at least ATA-5 compatible
+ * verifying that word 80 by casting it to a signed type --
+ * this trick allows us to filter out the reserved values of
+ * 0x0000 and 0xffff along with the earlier ATA revisions...
+ */
+ if (id[ATA_ID_HW_CONFIG] == 0 && (short)id[ATA_ID_MAJOR_VER] >= 0x0020)
+ return true;
+ return false;
+}
+
+static inline bool ata_id_has_tpm(const u16 *id)
+{
+ /* The TPM bits are only valid on ATA8 */
+ if (ata_id_major_version(id) < 8)
+ return false;
+ if ((id[48] & 0xC000) != 0x4000)
+ return false;
+ return id[48] & (1 << 0);
+}
+
+static inline bool ata_id_has_dword_io(const u16 *id)
+{
+ /* ATA 8 reuses this flag for "trusted" computing */
+ if (ata_id_major_version(id) > 7)
+ return false;
+ return id[ATA_ID_DWORD_IO] & (1 << 0);
+}
+
+static inline bool ata_id_has_unload(const u16 *id)
+{
+ if (ata_id_major_version(id) >= 7 &&
+ (id[ATA_ID_CFSSE] & 0xC000) == 0x4000 &&
+ id[ATA_ID_CFSSE] & (1 << 13))
+ return true;
+ return false;
+}
+
+static inline bool ata_id_has_wwn(const u16 *id)
+{
+ return (id[ATA_ID_CSF_DEFAULT] & 0xC100) == 0x4100;
+}
+
+static inline int ata_id_form_factor(const u16 *id)
+{
+ u16 val = id[168];
+
+ if (ata_id_major_version(id) < 7 || val == 0 || val == 0xffff)
+ return 0;
+
+ val &= 0xf;
+
+ if (val > 5)
+ return 0;
+
+ return val;
+}
+
+static inline int ata_id_rotation_rate(const u16 *id)
+{
+ u16 val = id[217];
+
+ if (ata_id_major_version(id) < 7 || val == 0 || val == 0xffff)
+ return 0;
+
+ if (val > 1 && val < 0x401)
+ return 0;
+
+ return val;
+}
+
+static inline bool ata_id_has_ncq_send_and_recv(const u16 *id)
+{
+ return id[ATA_ID_SATA_CAPABILITY_2] & BIT(6);
+}
+
+static inline bool ata_id_has_trim(const u16 *id)
+{
+ if (ata_id_major_version(id) >= 7 &&
+ (id[ATA_ID_DATA_SET_MGMT] & 1))
+ return true;
+ return false;
+}
+
+static inline bool ata_id_has_zero_after_trim(const u16 *id)
+{
+ /* DSM supported, deterministic read, and read zero after trim set */
+ if (ata_id_has_trim(id) &&
+ (id[ATA_ID_ADDITIONAL_SUPP] & 0x4020) == 0x4020)
+ return true;
+
+ return false;
+}
+
+static inline bool ata_id_current_chs_valid(const u16 *id)
+{
+ /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
+ has not been issued to the device then the values of
+ id[ATA_ID_CUR_CYLS] to id[ATA_ID_CUR_SECTORS] are vendor specific. */
+ return (id[ATA_ID_FIELD_VALID] & 1) && /* Current translation valid */
+ id[ATA_ID_CUR_CYLS] && /* cylinders in current translation */
+ id[ATA_ID_CUR_HEADS] && /* heads in current translation */
+ id[ATA_ID_CUR_HEADS] <= 16 &&
+ id[ATA_ID_CUR_SECTORS]; /* sectors in current translation */
+}
+
+static inline bool ata_id_is_cfa(const u16 *id)
+{
+ if ((id[ATA_ID_CONFIG] == 0x848A) || /* Traditional CF */
+ (id[ATA_ID_CONFIG] == 0x844A)) /* Delkin Devices CF */
+ return true;
+ /*
+ * CF specs don't require specific value in the word 0 anymore and yet
+ * they forbid to report the ATA version in the word 80 and require the
+ * CFA feature set support to be indicated in the word 83 in this case.
+ * Unfortunately, some cards only follow either of this requirements,
+ * and while those that don't indicate CFA feature support need some
+ * sort of quirk list, it seems impractical for the ones that do...
+ */
+ return (id[ATA_ID_COMMAND_SET_2] & 0xC004) == 0x4004;
+}
+
+static inline bool ata_id_is_ssd(const u16 *id)
+{
+ return id[ATA_ID_ROT_SPEED] == 0x01;
+}
+
+static inline bool ata_id_pio_need_iordy(const u16 *id, const u8 pio)
+{
+ /* CF spec. r4.1 Table 22 says no IORDY on PIO5 and PIO6. */
+ if (pio > 4 && ata_id_is_cfa(id))
+ return false;
+ /* For PIO3 and higher it is mandatory. */
+ if (pio > 2)
+ return true;
+ /* Turn it on when possible. */
+ return ata_id_has_iordy(id);
+}
+
+static inline bool ata_drive_40wire(const u16 *dev_id)
+{
+ if (ata_id_is_sata(dev_id))
+ return false; /* SATA */
+ if ((dev_id[ATA_ID_HW_CONFIG] & 0xE000) == 0x6000)
+ return false; /* 80 wire */
+ return true;
+}
+
+static inline bool ata_drive_40wire_relaxed(const u16 *dev_id)
+{
+ if ((dev_id[ATA_ID_HW_CONFIG] & 0x2000) == 0x2000)
+ return false; /* 80 wire */
+ return true;
+}
+
+static inline int atapi_cdb_len(const u16 *dev_id)
+{
+ u16 tmp = dev_id[ATA_ID_CONFIG] & 0x3;
+ switch (tmp) {
+ case 0: return 12;
+ case 1: return 16;
+ default: return -1;
+ }
+}
+
+static inline int atapi_command_packet_set(const u16 *dev_id)
+{
+ return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f;
+}
+
+static inline bool atapi_id_dmadir(const u16 *dev_id)
+{
+ return ata_id_major_version(dev_id) >= 7 && (dev_id[62] & 0x8000);
+}
+
+/*
+ * ata_id_is_lba_capacity_ok() performs a sanity check on
+ * the claimed LBA capacity value for the device.
+ *
+ * Returns 1 if LBA capacity looks sensible, 0 otherwise.
+ *
+ * It is called only once for each device.
+ */
+static inline bool ata_id_is_lba_capacity_ok(u16 *id)
+{
+ unsigned long lba_sects, chs_sects, head, tail;
+
+ /* No non-LBA info .. so valid! */
+ if (id[ATA_ID_CYLS] == 0)
+ return true;
+
+ lba_sects = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
+
+ /*
+ * The ATA spec tells large drives to return
+ * C/H/S = 16383/16/63 independent of their size.
+ * Some drives can be jumpered to use 15 heads instead of 16.
+ * Some drives can be jumpered to use 4092 cyls instead of 16383.
+ */
+ if ((id[ATA_ID_CYLS] == 16383 ||
+ (id[ATA_ID_CYLS] == 4092 && id[ATA_ID_CUR_CYLS] == 16383)) &&
+ id[ATA_ID_SECTORS] == 63 &&
+ (id[ATA_ID_HEADS] == 15 || id[ATA_ID_HEADS] == 16) &&
+ (lba_sects >= 16383 * 63 * id[ATA_ID_HEADS]))
+ return true;
+
+ chs_sects = id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * id[ATA_ID_SECTORS];
+
+ /* perform a rough sanity check on lba_sects: within 10% is OK */
+ if (lba_sects - chs_sects < chs_sects/10)
+ return true;
+
+ /* some drives have the word order reversed */
+ head = (lba_sects >> 16) & 0xffff;
+ tail = lba_sects & 0xffff;
+ lba_sects = head | (tail << 16);
+
+ if (lba_sects - chs_sects < chs_sects/10) {
+ *(__le32 *)&id[ATA_ID_LBA_CAPACITY] = __cpu_to_le32(lba_sects);
+ return true; /* LBA capacity is (now) good */
+ }
+
+ return false; /* LBA capacity value may be bad */
+}
+
+static inline void ata_id_to_hd_driveid(u16 *id)
+{
+#ifdef __BIG_ENDIAN
+ /* accessed in struct hd_driveid as 8-bit values */
+ id[ATA_ID_MAX_MULTSECT] = __cpu_to_le16(id[ATA_ID_MAX_MULTSECT]);
+ id[ATA_ID_CAPABILITY] = __cpu_to_le16(id[ATA_ID_CAPABILITY]);
+ id[ATA_ID_OLD_PIO_MODES] = __cpu_to_le16(id[ATA_ID_OLD_PIO_MODES]);
+ id[ATA_ID_OLD_DMA_MODES] = __cpu_to_le16(id[ATA_ID_OLD_DMA_MODES]);
+ id[ATA_ID_MULTSECT] = __cpu_to_le16(id[ATA_ID_MULTSECT]);
+
+ /* as 32-bit values */
+ *(u32 *)&id[ATA_ID_LBA_CAPACITY] = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
+ *(u32 *)&id[ATA_ID_SPG] = ata_id_u32(id, ATA_ID_SPG);
+
+ /* as 64-bit value */
+ *(u64 *)&id[ATA_ID_LBA_CAPACITY_2] =
+ ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
+#endif
+}
+
+/*
+ * Write LBA Range Entries to the buffer that will cover the extent from
+ * sector to sector + count. This is used for TRIM and for ADD LBA(S)
+ * TO NV CACHE PINNED SET.
+ */
+static inline unsigned ata_set_lba_range_entries(void *_buffer,
+ unsigned buf_size, u64 sector, unsigned long count)
+{
+ __le64 *buffer = _buffer;
+ unsigned i = 0, used_bytes;
+
+ while (i < buf_size / 8 ) { /* 6-byte LBA + 2-byte range per entry */
+ u64 entry = sector |
+ ((u64)(count > 0xffff ? 0xffff : count) << 48);
+ buffer[i++] = __cpu_to_le64(entry);
+ if (count <= 0xffff)
+ break;
+ count -= 0xffff;
+ sector += 0xffff;
+ }
+
+ used_bytes = ALIGN(i * 8, 512);
+ memset(buffer + i, 0, used_bytes - i * 8);
+ return used_bytes;
+}
+
+static inline bool ata_ok(u8 status)
+{
+ return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))
+ == ATA_DRDY);
+}
+
+static inline bool lba_28_ok(u64 block, u32 n_block)
+{
+ /* check the ending block number: must be LESS THAN 0x0fffffff */
+ return ((block + n_block) < ((1 << 28) - 1)) && (n_block <= 256);
+}
+
+static inline bool lba_48_ok(u64 block, u32 n_block)
+{
+ /* check the ending block number */
+ return ((block + n_block - 1) < ((u64)1 << 48)) && (n_block <= 65536);
+}
+
+#define sata_pmp_gscr_vendor(gscr) ((gscr)[SATA_PMP_GSCR_PROD_ID] & 0xffff)
+#define sata_pmp_gscr_devid(gscr) ((gscr)[SATA_PMP_GSCR_PROD_ID] >> 16)
+#define sata_pmp_gscr_rev(gscr) (((gscr)[SATA_PMP_GSCR_REV] >> 8) & 0xff)
+#define sata_pmp_gscr_ports(gscr) ((gscr)[SATA_PMP_GSCR_PORT_INFO] & 0xf)
+
+#endif /* __LINUX_ATA_H__ */
diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h
new file mode 100644
index 000000000..619d9e78e
--- /dev/null
+++ b/include/linux/ata_platform.h
@@ -0,0 +1,30 @@
+#ifndef __LINUX_ATA_PLATFORM_H
+#define __LINUX_ATA_PLATFORM_H
+
+struct pata_platform_info {
+ /*
+ * I/O port shift, for platforms with ports that are
+ * constantly spaced and need larger than the 1-byte
+ * spacing used by ata_std_ports().
+ */
+ unsigned int ioport_shift;
+};
+
+struct scsi_host_template;
+
+extern int __pata_platform_probe(struct device *dev,
+ struct resource *io_res,
+ struct resource *ctl_res,
+ struct resource *irq_res,
+ unsigned int ioport_shift,
+ int __pio_mask,
+ struct scsi_host_template *sht);
+
+/*
+ * Marvell SATA private data
+ */
+struct mv_sata_platform_data {
+ int n_ports; /* number of sata ports */
+};
+
+#endif /* __LINUX_ATA_PLATFORM_H */
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
new file mode 100644
index 000000000..73fd8b7e9
--- /dev/null
+++ b/include/linux/atalk.h
@@ -0,0 +1,168 @@
+#ifndef __LINUX_ATALK_H__
+#define __LINUX_ATALK_H__
+
+
+#include <net/sock.h>
+#include <uapi/linux/atalk.h>
+
+struct atalk_route {
+ struct net_device *dev;
+ struct atalk_addr target;
+ struct atalk_addr gateway;
+ int flags;
+ struct atalk_route *next;
+};
+
+/**
+ * struct atalk_iface - AppleTalk Interface
+ * @dev - Network device associated with this interface
+ * @address - Our address
+ * @status - What are we doing?
+ * @nets - Associated direct netrange
+ * @next - next element in the list of interfaces
+ */
+struct atalk_iface {
+ struct net_device *dev;
+ struct atalk_addr address;
+ int status;
+#define ATIF_PROBE 1 /* Probing for an address */
+#define ATIF_PROBE_FAIL 2 /* Probe collided */
+ struct atalk_netrange nets;
+ struct atalk_iface *next;
+};
+
+struct atalk_sock {
+ /* struct sock has to be the first member of atalk_sock */
+ struct sock sk;
+ __be16 dest_net;
+ __be16 src_net;
+ unsigned char dest_node;
+ unsigned char src_node;
+ unsigned char dest_port;
+ unsigned char src_port;
+};
+
+static inline struct atalk_sock *at_sk(struct sock *sk)
+{
+ return (struct atalk_sock *)sk;
+}
+
+struct ddpehdr {
+ __be16 deh_len_hops; /* lower 10 bits are length, next 4 - hops */
+ __be16 deh_sum;
+ __be16 deh_dnet;
+ __be16 deh_snet;
+ __u8 deh_dnode;
+ __u8 deh_snode;
+ __u8 deh_dport;
+ __u8 deh_sport;
+ /* And netatalk apps expect to stick the type in themselves */
+};
+
+static __inline__ struct ddpehdr *ddp_hdr(struct sk_buff *skb)
+{
+ return (struct ddpehdr *)skb_transport_header(skb);
+}
+
+/* AppleTalk AARP headers */
+struct elapaarp {
+ __be16 hw_type;
+#define AARP_HW_TYPE_ETHERNET 1
+#define AARP_HW_TYPE_TOKENRING 2
+ __be16 pa_type;
+ __u8 hw_len;
+ __u8 pa_len;
+#define AARP_PA_ALEN 4
+ __be16 function;
+#define AARP_REQUEST 1
+#define AARP_REPLY 2
+#define AARP_PROBE 3
+ __u8 hw_src[ETH_ALEN];
+ __u8 pa_src_zero;
+ __be16 pa_src_net;
+ __u8 pa_src_node;
+ __u8 hw_dst[ETH_ALEN];
+ __u8 pa_dst_zero;
+ __be16 pa_dst_net;
+ __u8 pa_dst_node;
+} __attribute__ ((packed));
+
+static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb)
+{
+ return (struct elapaarp *)skb_transport_header(skb);
+}
+
+/* Not specified - how long till we drop a resolved entry */
+#define AARP_EXPIRY_TIME (5 * 60 * HZ)
+/* Size of hash table */
+#define AARP_HASH_SIZE 16
+/* Fast retransmission timer when resolving */
+#define AARP_TICK_TIME (HZ / 5)
+/* Send 10 requests then give up (2 seconds) */
+#define AARP_RETRANSMIT_LIMIT 10
+/*
+ * Some value bigger than total retransmit time + a bit for last reply to
+ * appear and to stop continual requests
+ */
+#define AARP_RESOLVE_TIME (10 * HZ)
+
+extern struct datalink_proto *ddp_dl, *aarp_dl;
+extern void aarp_proto_init(void);
+
+/* Inter module exports */
+
+/* Give a device find its atif control structure */
+static inline struct atalk_iface *atalk_find_dev(struct net_device *dev)
+{
+ return dev->atalk_ptr;
+}
+
+extern struct atalk_addr *atalk_find_dev_addr(struct net_device *dev);
+extern struct net_device *atrtr_get_dev(struct atalk_addr *sa);
+extern int aarp_send_ddp(struct net_device *dev,
+ struct sk_buff *skb,
+ struct atalk_addr *sa, void *hwaddr);
+extern void aarp_device_down(struct net_device *dev);
+extern void aarp_probe_network(struct atalk_iface *atif);
+extern int aarp_proxy_probe_network(struct atalk_iface *atif,
+ struct atalk_addr *sa);
+extern void aarp_proxy_remove(struct net_device *dev,
+ struct atalk_addr *sa);
+
+extern void aarp_cleanup_module(void);
+
+extern struct hlist_head atalk_sockets;
+extern rwlock_t atalk_sockets_lock;
+
+extern struct atalk_route *atalk_routes;
+extern rwlock_t atalk_routes_lock;
+
+extern struct atalk_iface *atalk_interfaces;
+extern rwlock_t atalk_interfaces_lock;
+
+extern struct atalk_route atrtr_default;
+
+extern const struct file_operations atalk_seq_arp_fops;
+
+extern int sysctl_aarp_expiry_time;
+extern int sysctl_aarp_tick_time;
+extern int sysctl_aarp_retransmit_limit;
+extern int sysctl_aarp_resolve_time;
+
+#ifdef CONFIG_SYSCTL
+extern void atalk_register_sysctl(void);
+extern void atalk_unregister_sysctl(void);
+#else
+#define atalk_register_sysctl() do { } while(0)
+#define atalk_unregister_sysctl() do { } while(0)
+#endif
+
+#ifdef CONFIG_PROC_FS
+extern int atalk_proc_init(void);
+extern void atalk_proc_exit(void);
+#else
+#define atalk_proc_init() ({ 0; })
+#define atalk_proc_exit() do { } while(0)
+#endif /* CONFIG_PROC_FS */
+
+#endif /* __LINUX_ATALK_H__ */
diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h
new file mode 100644
index 000000000..33eb274cd
--- /dev/null
+++ b/include/linux/ath9k_platform.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2008 Atheros Communications Inc.
+ * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _LINUX_ATH9K_PLATFORM_H
+#define _LINUX_ATH9K_PLATFORM_H
+
+#define ATH9K_PLAT_EEP_MAX_WORDS 2048
+
+struct ath9k_platform_data {
+ const char *eeprom_name;
+
+ u16 eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS];
+ u8 *macaddr;
+
+ int led_pin;
+ u32 gpio_mask;
+ u32 gpio_val;
+
+ bool endian_check;
+ bool is_clk_25mhz;
+ bool tx_gain_buffalo;
+ bool disable_2ghz;
+ bool disable_5ghz;
+
+ int (*get_mac_revision)(void);
+ int (*external_reset)(void);
+
+ bool use_eeprom;
+};
+
+#endif /* _LINUX_ATH9K_PLATFORM_H */
diff --git a/include/linux/atm.h b/include/linux/atm.h
new file mode 100644
index 000000000..30006c435
--- /dev/null
+++ b/include/linux/atm.h
@@ -0,0 +1,15 @@
+/* atm.h - general ATM declarations */
+#ifndef _LINUX_ATM_H
+#define _LINUX_ATM_H
+
+#include <uapi/linux/atm.h>
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+struct compat_atmif_sioc {
+ int number;
+ int length;
+ compat_uptr_t arg;
+};
+#endif
+#endif
diff --git a/include/linux/atm_suni.h b/include/linux/atm_suni.h
new file mode 100644
index 000000000..84f3aab54
--- /dev/null
+++ b/include/linux/atm_suni.h
@@ -0,0 +1,12 @@
+/* atm_suni.h - Driver-specific declarations of the SUNI driver (for use by
+ driver-specific utilities) */
+
+/* Written 1998,2000 by Werner Almesberger, EPFL ICA */
+
+
+#ifndef LINUX_ATM_SUNI_H
+#define LINUX_ATM_SUNI_H
+
+/* everything obsoleted */
+
+#endif
diff --git a/include/linux/atm_tcp.h b/include/linux/atm_tcp.h
new file mode 100644
index 000000000..db6b65fc0
--- /dev/null
+++ b/include/linux/atm_tcp.h
@@ -0,0 +1,21 @@
+/* atm_tcp.h - Driver-specific declarations of the ATMTCP driver (for use by
+ driver-specific utilities) */
+
+/* Written 1997-2000 by Werner Almesberger, EPFL LRC/ICA */
+
+#ifndef LINUX_ATM_TCP_H
+#define LINUX_ATM_TCP_H
+
+#include <uapi/linux/atm_tcp.h>
+
+
+struct atm_tcp_ops {
+ int (*attach)(struct atm_vcc *vcc,int itf);
+ int (*create_persistent)(int itf);
+ int (*remove_persistent)(int itf);
+ struct module *owner;
+};
+
+extern struct atm_tcp_ops atm_tcp_ops;
+
+#endif
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
new file mode 100644
index 000000000..c1da539f5
--- /dev/null
+++ b/include/linux/atmdev.h
@@ -0,0 +1,317 @@
+/* atmdev.h - ATM device driver declarations and various related items */
+#ifndef LINUX_ATMDEV_H
+#define LINUX_ATMDEV_H
+
+
+#include <linux/wait.h> /* wait_queue_head_t */
+#include <linux/time.h> /* struct timeval */
+#include <linux/net.h>
+#include <linux/bug.h>
+#include <linux/skbuff.h> /* struct sk_buff */
+#include <linux/uio.h>
+#include <net/sock.h>
+#include <linux/atomic.h>
+#include <uapi/linux/atmdev.h>
+
+#ifdef CONFIG_PROC_FS
+#include <linux/proc_fs.h>
+
+extern struct proc_dir_entry *atm_proc_root;
+#endif
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+struct compat_atm_iobuf {
+ int length;
+ compat_uptr_t buffer;
+};
+#endif
+
+struct k_atm_aal_stats {
+#define __HANDLE_ITEM(i) atomic_t i
+ __AAL_STAT_ITEMS
+#undef __HANDLE_ITEM
+};
+
+
+struct k_atm_dev_stats {
+ struct k_atm_aal_stats aal0;
+ struct k_atm_aal_stats aal34;
+ struct k_atm_aal_stats aal5;
+};
+
+struct device;
+
+enum {
+ ATM_VF_ADDR, /* Address is in use. Set by anybody, cleared
+ by device driver. */
+ ATM_VF_READY, /* VC is ready to transfer data. Set by device
+ driver, cleared by anybody. */
+ ATM_VF_PARTIAL, /* resources are bound to PVC (partial PVC
+ setup), controlled by socket layer */
+ ATM_VF_REGIS, /* registered with demon, controlled by SVC
+ socket layer */
+ ATM_VF_BOUND, /* local SAP is set, controlled by SVC socket
+ layer */
+ ATM_VF_RELEASED, /* demon has indicated/requested release,
+ controlled by SVC socket layer */
+ ATM_VF_HASQOS, /* QOS parameters have been set */
+ ATM_VF_LISTEN, /* socket is used for listening */
+ ATM_VF_META, /* SVC socket isn't used for normal data
+ traffic and doesn't depend on signaling
+ to be available */
+ ATM_VF_SESSION, /* VCC is p2mp session control descriptor */
+ ATM_VF_HASSAP, /* SAP has been set */
+ ATM_VF_CLOSE, /* asynchronous close - treat like VF_RELEASED*/
+ ATM_VF_WAITING, /* waiting for reply from sigd */
+ ATM_VF_IS_CLIP, /* in use by CLIP protocol */
+};
+
+
+#define ATM_VF2VS(flags) \
+ (test_bit(ATM_VF_READY,&(flags)) ? ATM_VS_CONNECTED : \
+ test_bit(ATM_VF_RELEASED,&(flags)) ? ATM_VS_CLOSING : \
+ test_bit(ATM_VF_LISTEN,&(flags)) ? ATM_VS_LISTEN : \
+ test_bit(ATM_VF_REGIS,&(flags)) ? ATM_VS_INUSE : \
+ test_bit(ATM_VF_BOUND,&(flags)) ? ATM_VS_BOUND : ATM_VS_IDLE)
+
+
+enum {
+ ATM_DF_REMOVED, /* device was removed from atm_devs list */
+};
+
+
+#define ATM_PHY_SIG_LOST 0 /* no carrier/light */
+#define ATM_PHY_SIG_UNKNOWN 1 /* carrier/light status is unknown */
+#define ATM_PHY_SIG_FOUND 2 /* carrier/light okay */
+
+#define ATM_ATMOPT_CLP 1 /* set CLP bit */
+
+struct atm_vcc {
+ /* struct sock has to be the first member of atm_vcc */
+ struct sock sk;
+ unsigned long flags; /* VCC flags (ATM_VF_*) */
+ short vpi; /* VPI and VCI (types must be equal */
+ /* with sockaddr) */
+ int vci;
+ unsigned long aal_options; /* AAL layer options */
+ unsigned long atm_options; /* ATM layer options */
+ struct atm_dev *dev; /* device back pointer */
+ struct atm_qos qos; /* QOS */
+ struct atm_sap sap; /* SAP */
+ void (*release_cb)(struct atm_vcc *vcc); /* release_sock callback */
+ void (*push)(struct atm_vcc *vcc,struct sk_buff *skb);
+ void (*pop)(struct atm_vcc *vcc,struct sk_buff *skb); /* optional */
+ int (*push_oam)(struct atm_vcc *vcc,void *cell);
+ int (*send)(struct atm_vcc *vcc,struct sk_buff *skb);
+ void *dev_data; /* per-device data */
+ void *proto_data; /* per-protocol data */
+ struct k_atm_aal_stats *stats; /* pointer to AAL stats group */
+ struct module *owner; /* owner of ->push function */
+ /* SVC part --- may move later ------------------------------------- */
+ short itf; /* interface number */
+ struct sockaddr_atmsvc local;
+ struct sockaddr_atmsvc remote;
+ /* Multipoint part ------------------------------------------------- */
+ struct atm_vcc *session; /* session VCC descriptor */
+ /* Other stuff ----------------------------------------------------- */
+ void *user_back; /* user backlink - not touched by */
+ /* native ATM stack. Currently used */
+ /* by CLIP and sch_atm. */
+};
+
+static inline struct atm_vcc *atm_sk(struct sock *sk)
+{
+ return (struct atm_vcc *)sk;
+}
+
+static inline struct atm_vcc *ATM_SD(struct socket *sock)
+{
+ return atm_sk(sock->sk);
+}
+
+static inline struct sock *sk_atm(struct atm_vcc *vcc)
+{
+ return (struct sock *)vcc;
+}
+
+struct atm_dev_addr {
+ struct sockaddr_atmsvc addr; /* ATM address */
+ struct list_head entry; /* next address */
+};
+
+enum atm_addr_type_t { ATM_ADDR_LOCAL, ATM_ADDR_LECS };
+
+struct atm_dev {
+ const struct atmdev_ops *ops; /* device operations; NULL if unused */
+ const struct atmphy_ops *phy; /* PHY operations, may be undefined */
+ /* (NULL) */
+ const char *type; /* device type name */
+ int number; /* device index */
+ void *dev_data; /* per-device data */
+ void *phy_data; /* private PHY date */
+ unsigned long flags; /* device flags (ATM_DF_*) */
+ struct list_head local; /* local ATM addresses */
+ struct list_head lecs; /* LECS ATM addresses learned via ILMI */
+ unsigned char esi[ESI_LEN]; /* ESI ("MAC" addr) */
+ struct atm_cirange ci_range; /* VPI/VCI range */
+ struct k_atm_dev_stats stats; /* statistics */
+ char signal; /* signal status (ATM_PHY_SIG_*) */
+ int link_rate; /* link rate (default: OC3) */
+ atomic_t refcnt; /* reference count */
+ spinlock_t lock; /* protect internal members */
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *proc_entry; /* proc entry */
+ char *proc_name; /* proc entry name */
+#endif
+ struct device class_dev; /* sysfs device */
+ struct list_head dev_list; /* linkage */
+};
+
+
+/* OF: send_Oam Flags */
+
+#define ATM_OF_IMMED 1 /* Attempt immediate delivery */
+#define ATM_OF_INRATE 2 /* Attempt in-rate delivery */
+
+
+/*
+ * ioctl, getsockopt, and setsockopt are optional and can be set to NULL.
+ */
+
+struct atmdev_ops { /* only send is required */
+ void (*dev_close)(struct atm_dev *dev);
+ int (*open)(struct atm_vcc *vcc);
+ void (*close)(struct atm_vcc *vcc);
+ int (*ioctl)(struct atm_dev *dev,unsigned int cmd,void __user *arg);
+#ifdef CONFIG_COMPAT
+ int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd,
+ void __user *arg);
+#endif
+ int (*getsockopt)(struct atm_vcc *vcc,int level,int optname,
+ void __user *optval,int optlen);
+ int (*setsockopt)(struct atm_vcc *vcc,int level,int optname,
+ void __user *optval,unsigned int optlen);
+ int (*send)(struct atm_vcc *vcc,struct sk_buff *skb);
+ int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags);
+ void (*phy_put)(struct atm_dev *dev,unsigned char value,
+ unsigned long addr);
+ unsigned char (*phy_get)(struct atm_dev *dev,unsigned long addr);
+ int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
+ int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
+ struct module *owner;
+};
+
+struct atmphy_ops {
+ int (*start)(struct atm_dev *dev);
+ int (*ioctl)(struct atm_dev *dev,unsigned int cmd,void __user *arg);
+ void (*interrupt)(struct atm_dev *dev);
+ int (*stop)(struct atm_dev *dev);
+};
+
+struct atm_skb_data {
+ struct atm_vcc *vcc; /* ATM VCC */
+ unsigned long atm_options; /* ATM layer options */
+};
+
+#define VCC_HTABLE_SIZE 32
+
+extern struct hlist_head vcc_hash[VCC_HTABLE_SIZE];
+extern rwlock_t vcc_sklist_lock;
+
+#define ATM_SKB(skb) (((struct atm_skb_data *) (skb)->cb))
+
+struct atm_dev *atm_dev_register(const char *type, struct device *parent,
+ const struct atmdev_ops *ops,
+ int number, /* -1 == pick first available */
+ unsigned long *flags);
+struct atm_dev *atm_dev_lookup(int number);
+void atm_dev_deregister(struct atm_dev *dev);
+
+/* atm_dev_signal_change
+ *
+ * Propagate lower layer signal change in atm_dev->signal to netdevice.
+ * The event will be sent via a notifier call chain.
+ */
+void atm_dev_signal_change(struct atm_dev *dev, char signal);
+
+void vcc_insert_socket(struct sock *sk);
+
+void atm_dev_release_vccs(struct atm_dev *dev);
+
+
+static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
+{
+ atomic_add(truesize, &sk_atm(vcc)->sk_rmem_alloc);
+}
+
+
+static inline void atm_return(struct atm_vcc *vcc,int truesize)
+{
+ atomic_sub(truesize, &sk_atm(vcc)->sk_rmem_alloc);
+}
+
+
+static inline int atm_may_send(struct atm_vcc *vcc,unsigned int size)
+{
+ return (size + atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) <
+ sk_atm(vcc)->sk_sndbuf;
+}
+
+
+static inline void atm_dev_hold(struct atm_dev *dev)
+{
+ atomic_inc(&dev->refcnt);
+}
+
+
+static inline void atm_dev_put(struct atm_dev *dev)
+{
+ if (atomic_dec_and_test(&dev->refcnt)) {
+ BUG_ON(!test_bit(ATM_DF_REMOVED, &dev->flags));
+ if (dev->ops->dev_close)
+ dev->ops->dev_close(dev);
+ put_device(&dev->class_dev);
+ }
+}
+
+
+int atm_charge(struct atm_vcc *vcc,int truesize);
+struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
+ gfp_t gfp_flags);
+int atm_pcr_goal(const struct atm_trafprm *tp);
+
+void vcc_release_async(struct atm_vcc *vcc, int reply);
+
+struct atm_ioctl {
+ struct module *owner;
+ /* A module reference is kept if appropriate over this call.
+ * Return -ENOIOCTLCMD if you don't handle it. */
+ int (*ioctl)(struct socket *, unsigned int cmd, unsigned long arg);
+ struct list_head list;
+};
+
+/**
+ * register_atm_ioctl - register handler for ioctl operations
+ *
+ * Special (non-device) handlers of ioctl's should
+ * register here. If you're a normal device, you should
+ * set .ioctl in your atmdev_ops instead.
+ */
+void register_atm_ioctl(struct atm_ioctl *);
+
+/**
+ * deregister_atm_ioctl - remove the ioctl handler
+ */
+void deregister_atm_ioctl(struct atm_ioctl *);
+
+
+/* register_atmdevice_notifier - register atm_dev notify events
+ *
+ * Clients like br2684 will register notify events
+ * Currently we notify of signal found/lost
+ */
+int register_atmdevice_notifier(struct notifier_block *nb);
+void unregister_atmdevice_notifier(struct notifier_block *nb);
+
+#endif
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
new file mode 100644
index 000000000..9177947bf
--- /dev/null
+++ b/include/linux/atmel-mci.h
@@ -0,0 +1,43 @@
+#ifndef __LINUX_ATMEL_MCI_H
+#define __LINUX_ATMEL_MCI_H
+
+#include <linux/types.h>
+
+#define ATMCI_MAX_NR_SLOTS 2
+
+/**
+ * struct mci_slot_pdata - board-specific per-slot configuration
+ * @bus_width: Number of data lines wired up the slot
+ * @detect_pin: GPIO pin wired to the card detect switch
+ * @wp_pin: GPIO pin wired to the write protect sensor
+ * @detect_is_active_high: The state of the detect pin when it is active
+ * @non_removable: The slot is not removable, only detect once
+ *
+ * If a given slot is not present on the board, @bus_width should be
+ * set to 0. The other fields are ignored in this case.
+ *
+ * Any pins that aren't available should be set to a negative value.
+ *
+ * Note that support for multiple slots is experimental -- some cards
+ * might get upset if we don't get the clock management exactly right.
+ * But in most cases, it should work just fine.
+ */
+struct mci_slot_pdata {
+ unsigned int bus_width;
+ int detect_pin;
+ int wp_pin;
+ bool detect_is_active_high;
+ bool non_removable;
+};
+
+/**
+ * struct mci_platform_data - board-specific MMC/SDcard configuration
+ * @dma_slave: DMA slave interface to use in data transfers.
+ * @slot: Per-slot configuration data.
+ */
+struct mci_platform_data {
+ struct mci_dma_data *dma_slave;
+ struct mci_slot_pdata slot[ATMCI_MAX_NR_SLOTS];
+};
+
+#endif /* __LINUX_ATMEL_MCI_H */
diff --git a/include/linux/atmel-ssc.h b/include/linux/atmel-ssc.h
new file mode 100644
index 000000000..7c0f65498
--- /dev/null
+++ b/include/linux/atmel-ssc.h
@@ -0,0 +1,333 @@
+#ifndef __INCLUDE_ATMEL_SSC_H
+#define __INCLUDE_ATMEL_SSC_H
+
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/io.h>
+
+struct atmel_ssc_platform_data {
+ int use_dma;
+ int has_fslen_ext;
+};
+
+struct ssc_device {
+ struct list_head list;
+ dma_addr_t phybase;
+ void __iomem *regs;
+ struct platform_device *pdev;
+ struct atmel_ssc_platform_data *pdata;
+ struct clk *clk;
+ int user;
+ int irq;
+ bool clk_from_rk_pin;
+};
+
+struct ssc_device * __must_check ssc_request(unsigned int ssc_num);
+void ssc_free(struct ssc_device *ssc);
+
+/* SSC register offsets */
+
+/* SSC Control Register */
+#define SSC_CR 0x00000000
+#define SSC_CR_RXDIS_SIZE 1
+#define SSC_CR_RXDIS_OFFSET 1
+#define SSC_CR_RXEN_SIZE 1
+#define SSC_CR_RXEN_OFFSET 0
+#define SSC_CR_SWRST_SIZE 1
+#define SSC_CR_SWRST_OFFSET 15
+#define SSC_CR_TXDIS_SIZE 1
+#define SSC_CR_TXDIS_OFFSET 9
+#define SSC_CR_TXEN_SIZE 1
+#define SSC_CR_TXEN_OFFSET 8
+
+/* SSC Clock Mode Register */
+#define SSC_CMR 0x00000004
+#define SSC_CMR_DIV_SIZE 12
+#define SSC_CMR_DIV_OFFSET 0
+
+/* SSC Receive Clock Mode Register */
+#define SSC_RCMR 0x00000010
+#define SSC_RCMR_CKG_SIZE 2
+#define SSC_RCMR_CKG_OFFSET 6
+#define SSC_RCMR_CKI_SIZE 1
+#define SSC_RCMR_CKI_OFFSET 5
+#define SSC_RCMR_CKO_SIZE 3
+#define SSC_RCMR_CKO_OFFSET 2
+#define SSC_RCMR_CKS_SIZE 2
+#define SSC_RCMR_CKS_OFFSET 0
+#define SSC_RCMR_PERIOD_SIZE 8
+#define SSC_RCMR_PERIOD_OFFSET 24
+#define SSC_RCMR_START_SIZE 4
+#define SSC_RCMR_START_OFFSET 8
+#define SSC_RCMR_STOP_SIZE 1
+#define SSC_RCMR_STOP_OFFSET 12
+#define SSC_RCMR_STTDLY_SIZE 8
+#define SSC_RCMR_STTDLY_OFFSET 16
+
+/* SSC Receive Frame Mode Register */
+#define SSC_RFMR 0x00000014
+#define SSC_RFMR_DATLEN_SIZE 5
+#define SSC_RFMR_DATLEN_OFFSET 0
+#define SSC_RFMR_DATNB_SIZE 4
+#define SSC_RFMR_DATNB_OFFSET 8
+#define SSC_RFMR_FSEDGE_SIZE 1
+#define SSC_RFMR_FSEDGE_OFFSET 24
+/*
+ * The FSLEN_EXT exist on at91sam9rl, at91sam9g10,
+ * at91sam9g20, and at91sam9g45 and newer SoCs
+ */
+#define SSC_RFMR_FSLEN_EXT_SIZE 4
+#define SSC_RFMR_FSLEN_EXT_OFFSET 28
+#define SSC_RFMR_FSLEN_SIZE 4
+#define SSC_RFMR_FSLEN_OFFSET 16
+#define SSC_RFMR_FSOS_SIZE 4
+#define SSC_RFMR_FSOS_OFFSET 20
+#define SSC_RFMR_LOOP_SIZE 1
+#define SSC_RFMR_LOOP_OFFSET 5
+#define SSC_RFMR_MSBF_SIZE 1
+#define SSC_RFMR_MSBF_OFFSET 7
+
+/* SSC Transmit Clock Mode Register */
+#define SSC_TCMR 0x00000018
+#define SSC_TCMR_CKG_SIZE 2
+#define SSC_TCMR_CKG_OFFSET 6
+#define SSC_TCMR_CKI_SIZE 1
+#define SSC_TCMR_CKI_OFFSET 5
+#define SSC_TCMR_CKO_SIZE 3
+#define SSC_TCMR_CKO_OFFSET 2
+#define SSC_TCMR_CKS_SIZE 2
+#define SSC_TCMR_CKS_OFFSET 0
+#define SSC_TCMR_PERIOD_SIZE 8
+#define SSC_TCMR_PERIOD_OFFSET 24
+#define SSC_TCMR_START_SIZE 4
+#define SSC_TCMR_START_OFFSET 8
+#define SSC_TCMR_STTDLY_SIZE 8
+#define SSC_TCMR_STTDLY_OFFSET 16
+
+/* SSC Transmit Frame Mode Register */
+#define SSC_TFMR 0x0000001c
+#define SSC_TFMR_DATDEF_SIZE 1
+#define SSC_TFMR_DATDEF_OFFSET 5
+#define SSC_TFMR_DATLEN_SIZE 5
+#define SSC_TFMR_DATLEN_OFFSET 0
+#define SSC_TFMR_DATNB_SIZE 4
+#define SSC_TFMR_DATNB_OFFSET 8
+#define SSC_TFMR_FSDEN_SIZE 1
+#define SSC_TFMR_FSDEN_OFFSET 23
+#define SSC_TFMR_FSEDGE_SIZE 1
+#define SSC_TFMR_FSEDGE_OFFSET 24
+/*
+ * The FSLEN_EXT exist on at91sam9rl, at91sam9g10,
+ * at91sam9g20, and at91sam9g45 and newer SoCs
+ */
+#define SSC_TFMR_FSLEN_EXT_SIZE 4
+#define SSC_TFMR_FSLEN_EXT_OFFSET 28
+#define SSC_TFMR_FSLEN_SIZE 4
+#define SSC_TFMR_FSLEN_OFFSET 16
+#define SSC_TFMR_FSOS_SIZE 3
+#define SSC_TFMR_FSOS_OFFSET 20
+#define SSC_TFMR_MSBF_SIZE 1
+#define SSC_TFMR_MSBF_OFFSET 7
+
+/* SSC Receive Hold Register */
+#define SSC_RHR 0x00000020
+#define SSC_RHR_RDAT_SIZE 32
+#define SSC_RHR_RDAT_OFFSET 0
+
+/* SSC Transmit Hold Register */
+#define SSC_THR 0x00000024
+#define SSC_THR_TDAT_SIZE 32
+#define SSC_THR_TDAT_OFFSET 0
+
+/* SSC Receive Sync. Holding Register */
+#define SSC_RSHR 0x00000030
+#define SSC_RSHR_RSDAT_SIZE 16
+#define SSC_RSHR_RSDAT_OFFSET 0
+
+/* SSC Transmit Sync. Holding Register */
+#define SSC_TSHR 0x00000034
+#define SSC_TSHR_TSDAT_SIZE 16
+#define SSC_TSHR_RSDAT_OFFSET 0
+
+/* SSC Receive Compare 0 Register */
+#define SSC_RC0R 0x00000038
+#define SSC_RC0R_CP0_SIZE 16
+#define SSC_RC0R_CP0_OFFSET 0
+
+/* SSC Receive Compare 1 Register */
+#define SSC_RC1R 0x0000003c
+#define SSC_RC1R_CP1_SIZE 16
+#define SSC_RC1R_CP1_OFFSET 0
+
+/* SSC Status Register */
+#define SSC_SR 0x00000040
+#define SSC_SR_CP0_SIZE 1
+#define SSC_SR_CP0_OFFSET 8
+#define SSC_SR_CP1_SIZE 1
+#define SSC_SR_CP1_OFFSET 9
+#define SSC_SR_ENDRX_SIZE 1
+#define SSC_SR_ENDRX_OFFSET 6
+#define SSC_SR_ENDTX_SIZE 1
+#define SSC_SR_ENDTX_OFFSET 2
+#define SSC_SR_OVRUN_SIZE 1
+#define SSC_SR_OVRUN_OFFSET 5
+#define SSC_SR_RXBUFF_SIZE 1
+#define SSC_SR_RXBUFF_OFFSET 7
+#define SSC_SR_RXEN_SIZE 1
+#define SSC_SR_RXEN_OFFSET 17
+#define SSC_SR_RXRDY_SIZE 1
+#define SSC_SR_RXRDY_OFFSET 4
+#define SSC_SR_RXSYN_SIZE 1
+#define SSC_SR_RXSYN_OFFSET 11
+#define SSC_SR_TXBUFE_SIZE 1
+#define SSC_SR_TXBUFE_OFFSET 3
+#define SSC_SR_TXEMPTY_SIZE 1
+#define SSC_SR_TXEMPTY_OFFSET 1
+#define SSC_SR_TXEN_SIZE 1
+#define SSC_SR_TXEN_OFFSET 16
+#define SSC_SR_TXRDY_SIZE 1
+#define SSC_SR_TXRDY_OFFSET 0
+#define SSC_SR_TXSYN_SIZE 1
+#define SSC_SR_TXSYN_OFFSET 10
+
+/* SSC Interrupt Enable Register */
+#define SSC_IER 0x00000044
+#define SSC_IER_CP0_SIZE 1
+#define SSC_IER_CP0_OFFSET 8
+#define SSC_IER_CP1_SIZE 1
+#define SSC_IER_CP1_OFFSET 9
+#define SSC_IER_ENDRX_SIZE 1
+#define SSC_IER_ENDRX_OFFSET 6
+#define SSC_IER_ENDTX_SIZE 1
+#define SSC_IER_ENDTX_OFFSET 2
+#define SSC_IER_OVRUN_SIZE 1
+#define SSC_IER_OVRUN_OFFSET 5
+#define SSC_IER_RXBUFF_SIZE 1
+#define SSC_IER_RXBUFF_OFFSET 7
+#define SSC_IER_RXRDY_SIZE 1
+#define SSC_IER_RXRDY_OFFSET 4
+#define SSC_IER_RXSYN_SIZE 1
+#define SSC_IER_RXSYN_OFFSET 11
+#define SSC_IER_TXBUFE_SIZE 1
+#define SSC_IER_TXBUFE_OFFSET 3
+#define SSC_IER_TXEMPTY_SIZE 1
+#define SSC_IER_TXEMPTY_OFFSET 1
+#define SSC_IER_TXRDY_SIZE 1
+#define SSC_IER_TXRDY_OFFSET 0
+#define SSC_IER_TXSYN_SIZE 1
+#define SSC_IER_TXSYN_OFFSET 10
+
+/* SSC Interrupt Disable Register */
+#define SSC_IDR 0x00000048
+#define SSC_IDR_CP0_SIZE 1
+#define SSC_IDR_CP0_OFFSET 8
+#define SSC_IDR_CP1_SIZE 1
+#define SSC_IDR_CP1_OFFSET 9
+#define SSC_IDR_ENDRX_SIZE 1
+#define SSC_IDR_ENDRX_OFFSET 6
+#define SSC_IDR_ENDTX_SIZE 1
+#define SSC_IDR_ENDTX_OFFSET 2
+#define SSC_IDR_OVRUN_SIZE 1
+#define SSC_IDR_OVRUN_OFFSET 5
+#define SSC_IDR_RXBUFF_SIZE 1
+#define SSC_IDR_RXBUFF_OFFSET 7
+#define SSC_IDR_RXRDY_SIZE 1
+#define SSC_IDR_RXRDY_OFFSET 4
+#define SSC_IDR_RXSYN_SIZE 1
+#define SSC_IDR_RXSYN_OFFSET 11
+#define SSC_IDR_TXBUFE_SIZE 1
+#define SSC_IDR_TXBUFE_OFFSET 3
+#define SSC_IDR_TXEMPTY_SIZE 1
+#define SSC_IDR_TXEMPTY_OFFSET 1
+#define SSC_IDR_TXRDY_SIZE 1
+#define SSC_IDR_TXRDY_OFFSET 0
+#define SSC_IDR_TXSYN_SIZE 1
+#define SSC_IDR_TXSYN_OFFSET 10
+
+/* SSC Interrupt Mask Register */
+#define SSC_IMR 0x0000004c
+#define SSC_IMR_CP0_SIZE 1
+#define SSC_IMR_CP0_OFFSET 8
+#define SSC_IMR_CP1_SIZE 1
+#define SSC_IMR_CP1_OFFSET 9
+#define SSC_IMR_ENDRX_SIZE 1
+#define SSC_IMR_ENDRX_OFFSET 6
+#define SSC_IMR_ENDTX_SIZE 1
+#define SSC_IMR_ENDTX_OFFSET 2
+#define SSC_IMR_OVRUN_SIZE 1
+#define SSC_IMR_OVRUN_OFFSET 5
+#define SSC_IMR_RXBUFF_SIZE 1
+#define SSC_IMR_RXBUFF_OFFSET 7
+#define SSC_IMR_RXRDY_SIZE 1
+#define SSC_IMR_RXRDY_OFFSET 4
+#define SSC_IMR_RXSYN_SIZE 1
+#define SSC_IMR_RXSYN_OFFSET 11
+#define SSC_IMR_TXBUFE_SIZE 1
+#define SSC_IMR_TXBUFE_OFFSET 3
+#define SSC_IMR_TXEMPTY_SIZE 1
+#define SSC_IMR_TXEMPTY_OFFSET 1
+#define SSC_IMR_TXRDY_SIZE 1
+#define SSC_IMR_TXRDY_OFFSET 0
+#define SSC_IMR_TXSYN_SIZE 1
+#define SSC_IMR_TXSYN_OFFSET 10
+
+/* SSC PDC Receive Pointer Register */
+#define SSC_PDC_RPR 0x00000100
+
+/* SSC PDC Receive Counter Register */
+#define SSC_PDC_RCR 0x00000104
+
+/* SSC PDC Transmit Pointer Register */
+#define SSC_PDC_TPR 0x00000108
+
+/* SSC PDC Receive Next Pointer Register */
+#define SSC_PDC_RNPR 0x00000110
+
+/* SSC PDC Receive Next Counter Register */
+#define SSC_PDC_RNCR 0x00000114
+
+/* SSC PDC Transmit Counter Register */
+#define SSC_PDC_TCR 0x0000010c
+
+/* SSC PDC Transmit Next Pointer Register */
+#define SSC_PDC_TNPR 0x00000118
+
+/* SSC PDC Transmit Next Counter Register */
+#define SSC_PDC_TNCR 0x0000011c
+
+/* SSC PDC Transfer Control Register */
+#define SSC_PDC_PTCR 0x00000120
+#define SSC_PDC_PTCR_RXTDIS_SIZE 1
+#define SSC_PDC_PTCR_RXTDIS_OFFSET 1
+#define SSC_PDC_PTCR_RXTEN_SIZE 1
+#define SSC_PDC_PTCR_RXTEN_OFFSET 0
+#define SSC_PDC_PTCR_TXTDIS_SIZE 1
+#define SSC_PDC_PTCR_TXTDIS_OFFSET 9
+#define SSC_PDC_PTCR_TXTEN_SIZE 1
+#define SSC_PDC_PTCR_TXTEN_OFFSET 8
+
+/* SSC PDC Transfer Status Register */
+#define SSC_PDC_PTSR 0x00000124
+#define SSC_PDC_PTSR_RXTEN_SIZE 1
+#define SSC_PDC_PTSR_RXTEN_OFFSET 0
+#define SSC_PDC_PTSR_TXTEN_SIZE 1
+#define SSC_PDC_PTSR_TXTEN_OFFSET 8
+
+/* Bit manipulation macros */
+#define SSC_BIT(name) \
+ (1 << SSC_##name##_OFFSET)
+#define SSC_BF(name, value) \
+ (((value) & ((1 << SSC_##name##_SIZE) - 1)) \
+ << SSC_##name##_OFFSET)
+#define SSC_BFEXT(name, value) \
+ (((value) >> SSC_##name##_OFFSET) \
+ & ((1 << SSC_##name##_SIZE) - 1))
+#define SSC_BFINS(name, value, old) \
+ (((old) & ~(((1 << SSC_##name##_SIZE) - 1) \
+ << SSC_##name##_OFFSET)) | SSC_BF(name, value))
+
+/* Register access macros */
+#define ssc_readl(base, reg) __raw_readl(base + SSC_##reg)
+#define ssc_writel(base, reg, value) __raw_writel((value), base + SSC_##reg)
+
+#endif /* __INCLUDE_ATMEL_SSC_H */
diff --git a/include/linux/atmel_pdc.h b/include/linux/atmel_pdc.h
new file mode 100644
index 000000000..63499ce80
--- /dev/null
+++ b/include/linux/atmel_pdc.h
@@ -0,0 +1,38 @@
+/*
+ * include/linux/atmel_pdc.h
+ *
+ * Copyright (C) 2005 Ivan Kokshaysky
+ * Copyright (C) SAN People
+ *
+ * Peripheral Data Controller (PDC) registers.
+ * Based on AT91RM9200 datasheet revision E.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef ATMEL_PDC_H
+#define ATMEL_PDC_H
+
+#define ATMEL_PDC_RPR 0x100 /* Receive Pointer Register */
+#define ATMEL_PDC_RCR 0x104 /* Receive Counter Register */
+#define ATMEL_PDC_TPR 0x108 /* Transmit Pointer Register */
+#define ATMEL_PDC_TCR 0x10c /* Transmit Counter Register */
+#define ATMEL_PDC_RNPR 0x110 /* Receive Next Pointer Register */
+#define ATMEL_PDC_RNCR 0x114 /* Receive Next Counter Register */
+#define ATMEL_PDC_TNPR 0x118 /* Transmit Next Pointer Register */
+#define ATMEL_PDC_TNCR 0x11c /* Transmit Next Counter Register */
+
+#define ATMEL_PDC_PTCR 0x120 /* Transfer Control Register */
+#define ATMEL_PDC_RXTEN (1 << 0) /* Receiver Transfer Enable */
+#define ATMEL_PDC_RXTDIS (1 << 1) /* Receiver Transfer Disable */
+#define ATMEL_PDC_TXTEN (1 << 8) /* Transmitter Transfer Enable */
+#define ATMEL_PDC_TXTDIS (1 << 9) /* Transmitter Transfer Disable */
+
+#define ATMEL_PDC_PTSR 0x124 /* Transfer Status Register */
+
+#define ATMEL_PDC_SCND_BUF_OFF 0x10 /* Offset between first and second buffer registers */
+
+#endif
diff --git a/include/linux/atmel_serial.h b/include/linux/atmel_serial.h
new file mode 100644
index 000000000..00beddf6b
--- /dev/null
+++ b/include/linux/atmel_serial.h
@@ -0,0 +1,130 @@
+/*
+ * include/linux/atmel_serial.h
+ *
+ * Copyright (C) 2005 Ivan Kokshaysky
+ * Copyright (C) SAN People
+ *
+ * USART registers.
+ * Based on AT91RM9200 datasheet revision E.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef ATMEL_SERIAL_H
+#define ATMEL_SERIAL_H
+
+#define ATMEL_US_CR 0x00 /* Control Register */
+#define ATMEL_US_RSTRX (1 << 2) /* Reset Receiver */
+#define ATMEL_US_RSTTX (1 << 3) /* Reset Transmitter */
+#define ATMEL_US_RXEN (1 << 4) /* Receiver Enable */
+#define ATMEL_US_RXDIS (1 << 5) /* Receiver Disable */
+#define ATMEL_US_TXEN (1 << 6) /* Transmitter Enable */
+#define ATMEL_US_TXDIS (1 << 7) /* Transmitter Disable */
+#define ATMEL_US_RSTSTA (1 << 8) /* Reset Status Bits */
+#define ATMEL_US_STTBRK (1 << 9) /* Start Break */
+#define ATMEL_US_STPBRK (1 << 10) /* Stop Break */
+#define ATMEL_US_STTTO (1 << 11) /* Start Time-out */
+#define ATMEL_US_SENDA (1 << 12) /* Send Address */
+#define ATMEL_US_RSTIT (1 << 13) /* Reset Iterations */
+#define ATMEL_US_RSTNACK (1 << 14) /* Reset Non Acknowledge */
+#define ATMEL_US_RETTO (1 << 15) /* Rearm Time-out */
+#define ATMEL_US_DTREN (1 << 16) /* Data Terminal Ready Enable [AT91RM9200 only] */
+#define ATMEL_US_DTRDIS (1 << 17) /* Data Terminal Ready Disable [AT91RM9200 only] */
+#define ATMEL_US_RTSEN (1 << 18) /* Request To Send Enable */
+#define ATMEL_US_RTSDIS (1 << 19) /* Request To Send Disable */
+
+#define ATMEL_US_MR 0x04 /* Mode Register */
+#define ATMEL_US_USMODE (0xf << 0) /* Mode of the USART */
+#define ATMEL_US_USMODE_NORMAL 0
+#define ATMEL_US_USMODE_RS485 1
+#define ATMEL_US_USMODE_HWHS 2
+#define ATMEL_US_USMODE_MODEM 3
+#define ATMEL_US_USMODE_ISO7816_T0 4
+#define ATMEL_US_USMODE_ISO7816_T1 6
+#define ATMEL_US_USMODE_IRDA 8
+#define ATMEL_US_USCLKS (3 << 4) /* Clock Selection */
+#define ATMEL_US_USCLKS_MCK (0 << 4)
+#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4)
+#define ATMEL_US_USCLKS_SCK (3 << 4)
+#define ATMEL_US_CHRL (3 << 6) /* Character Length */
+#define ATMEL_US_CHRL_5 (0 << 6)
+#define ATMEL_US_CHRL_6 (1 << 6)
+#define ATMEL_US_CHRL_7 (2 << 6)
+#define ATMEL_US_CHRL_8 (3 << 6)
+#define ATMEL_US_SYNC (1 << 8) /* Synchronous Mode Select */
+#define ATMEL_US_PAR (7 << 9) /* Parity Type */
+#define ATMEL_US_PAR_EVEN (0 << 9)
+#define ATMEL_US_PAR_ODD (1 << 9)
+#define ATMEL_US_PAR_SPACE (2 << 9)
+#define ATMEL_US_PAR_MARK (3 << 9)
+#define ATMEL_US_PAR_NONE (4 << 9)
+#define ATMEL_US_PAR_MULTI_DROP (6 << 9)
+#define ATMEL_US_NBSTOP (3 << 12) /* Number of Stop Bits */
+#define ATMEL_US_NBSTOP_1 (0 << 12)
+#define ATMEL_US_NBSTOP_1_5 (1 << 12)
+#define ATMEL_US_NBSTOP_2 (2 << 12)
+#define ATMEL_US_CHMODE (3 << 14) /* Channel Mode */
+#define ATMEL_US_CHMODE_NORMAL (0 << 14)
+#define ATMEL_US_CHMODE_ECHO (1 << 14)
+#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14)
+#define ATMEL_US_CHMODE_REM_LOOP (3 << 14)
+#define ATMEL_US_MSBF (1 << 16) /* Bit Order */
+#define ATMEL_US_MODE9 (1 << 17) /* 9-bit Character Length */
+#define ATMEL_US_CLKO (1 << 18) /* Clock Output Select */
+#define ATMEL_US_OVER (1 << 19) /* Oversampling Mode */
+#define ATMEL_US_INACK (1 << 20) /* Inhibit Non Acknowledge */
+#define ATMEL_US_DSNACK (1 << 21) /* Disable Successive NACK */
+#define ATMEL_US_MAX_ITER (7 << 24) /* Max Iterations */
+#define ATMEL_US_FILTER (1 << 28) /* Infrared Receive Line Filter */
+
+#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */
+#define ATMEL_US_RXRDY (1 << 0) /* Receiver Ready */
+#define ATMEL_US_TXRDY (1 << 1) /* Transmitter Ready */
+#define ATMEL_US_RXBRK (1 << 2) /* Break Received / End of Break */
+#define ATMEL_US_ENDRX (1 << 3) /* End of Receiver Transfer */
+#define ATMEL_US_ENDTX (1 << 4) /* End of Transmitter Transfer */
+#define ATMEL_US_OVRE (1 << 5) /* Overrun Error */
+#define ATMEL_US_FRAME (1 << 6) /* Framing Error */
+#define ATMEL_US_PARE (1 << 7) /* Parity Error */
+#define ATMEL_US_TIMEOUT (1 << 8) /* Receiver Time-out */
+#define ATMEL_US_TXEMPTY (1 << 9) /* Transmitter Empty */
+#define ATMEL_US_ITERATION (1 << 10) /* Max number of Repetitions Reached */
+#define ATMEL_US_TXBUFE (1 << 11) /* Transmission Buffer Empty */
+#define ATMEL_US_RXBUFF (1 << 12) /* Reception Buffer Full */
+#define ATMEL_US_NACK (1 << 13) /* Non Acknowledge */
+#define ATMEL_US_RIIC (1 << 16) /* Ring Indicator Input Change [AT91RM9200 only] */
+#define ATMEL_US_DSRIC (1 << 17) /* Data Set Ready Input Change [AT91RM9200 only] */
+#define ATMEL_US_DCDIC (1 << 18) /* Data Carrier Detect Input Change [AT91RM9200 only] */
+#define ATMEL_US_CTSIC (1 << 19) /* Clear to Send Input Change */
+#define ATMEL_US_RI (1 << 20) /* RI */
+#define ATMEL_US_DSR (1 << 21) /* DSR */
+#define ATMEL_US_DCD (1 << 22) /* DCD */
+#define ATMEL_US_CTS (1 << 23) /* CTS */
+
+#define ATMEL_US_IDR 0x0c /* Interrupt Disable Register */
+#define ATMEL_US_IMR 0x10 /* Interrupt Mask Register */
+#define ATMEL_US_CSR 0x14 /* Channel Status Register */
+#define ATMEL_US_RHR 0x18 /* Receiver Holding Register */
+#define ATMEL_US_THR 0x1c /* Transmitter Holding Register */
+#define ATMEL_US_SYNH (1 << 15) /* Transmit/Receive Sync [AT91SAM9261 only] */
+
+#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */
+#define ATMEL_US_CD (0xffff << 0) /* Clock Divider */
+
+#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register */
+#define ATMEL_US_TO (0xffff << 0) /* Time-out Value */
+
+#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */
+#define ATMEL_US_TG (0xff << 0) /* Timeguard Value */
+
+#define ATMEL_US_FIDI 0x40 /* FI DI Ratio Register */
+#define ATMEL_US_NER 0x44 /* Number of Errors Register */
+#define ATMEL_US_IF 0x4c /* IrDA Filter Register */
+
+#define ATMEL_US_NAME 0xf0 /* Ip Name */
+#define ATMEL_US_VERSION 0xfc /* Ip Version */
+
+#endif
diff --git a/include/linux/atmel_tc.h b/include/linux/atmel_tc.h
new file mode 100644
index 000000000..b87c1c7c2
--- /dev/null
+++ b/include/linux/atmel_tc.h
@@ -0,0 +1,269 @@
+/*
+ * Timer/Counter Unit (TC) registers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef ATMEL_TC_H
+#define ATMEL_TC_H
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+
+/*
+ * Many 32-bit Atmel SOCs include one or more TC blocks, each of which holds
+ * three general-purpose 16-bit timers. These timers share one register bank.
+ * Depending on the SOC, each timer may have its own clock and IRQ, or those
+ * may be shared by the whole TC block.
+ *
+ * These TC blocks may have up to nine external pins: TCLK0..2 signals for
+ * clocks or clock gates, and per-timer TIOA and TIOB signals used for PWM
+ * or triggering. Those pins need to be set up for use with the TC block,
+ * else they will be used as GPIOs or for a different controller.
+ *
+ * Although we expect each TC block to have a platform_device node, those
+ * nodes are not what drivers bind to. Instead, they ask for a specific
+ * TC block, by number ... which is a common approach on systems with many
+ * timers. Then they use clk_get() and platform_get_irq() to get clock and
+ * IRQ resources.
+ */
+
+struct clk;
+
+/**
+ * struct atmel_tcb_config - SoC data for a Timer/Counter Block
+ * @counter_width: size in bits of a timer counter register
+ */
+struct atmel_tcb_config {
+ size_t counter_width;
+};
+
+/**
+ * struct atmel_tc - information about a Timer/Counter Block
+ * @pdev: physical device
+ * @regs: mapping through which the I/O registers can be accessed
+ * @id: block id
+ * @tcb_config: configuration data from SoC
+ * @irq: irq for each of the three channels
+ * @clk: internal clock source for each of the three channels
+ * @node: list node, for tclib internal use
+ * @allocated: if already used, for tclib internal use
+ *
+ * On some platforms, each TC channel has its own clocks and IRQs,
+ * while on others, all TC channels share the same clock and IRQ.
+ * Drivers should clk_enable() all the clocks they need even though
+ * all the entries in @clk may point to the same physical clock.
+ * Likewise, drivers should request irqs independently for each
+ * channel, but they must use IRQF_SHARED in case some of the entries
+ * in @irq are actually the same IRQ.
+ */
+struct atmel_tc {
+ struct platform_device *pdev;
+ void __iomem *regs;
+ int id;
+ const struct atmel_tcb_config *tcb_config;
+ int irq[3];
+ struct clk *clk[3];
+ struct list_head node;
+ bool allocated;
+};
+
+extern struct atmel_tc *atmel_tc_alloc(unsigned block);
+extern void atmel_tc_free(struct atmel_tc *tc);
+
+/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */
+extern const u8 atmel_tc_divisors[5];
+
+
+/*
+ * Two registers have block-wide controls. These are: configuring the three
+ * "external" clocks (or event sources) used by the timer channels; and
+ * synchronizing the timers by resetting them all at once.
+ *
+ * "External" can mean "external to chip" using the TCLK0, TCLK1, or TCLK2
+ * signals. Or, it can mean "external to timer", using the TIOA output from
+ * one of the other two timers that's being run in waveform mode.
+ */
+
+#define ATMEL_TC_BCR 0xc0 /* TC Block Control Register */
+#define ATMEL_TC_SYNC (1 << 0) /* synchronize timers */
+
+#define ATMEL_TC_BMR 0xc4 /* TC Block Mode Register */
+#define ATMEL_TC_TC0XC0S (3 << 0) /* external clock 0 source */
+#define ATMEL_TC_TC0XC0S_TCLK0 (0 << 0)
+#define ATMEL_TC_TC0XC0S_NONE (1 << 0)
+#define ATMEL_TC_TC0XC0S_TIOA1 (2 << 0)
+#define ATMEL_TC_TC0XC0S_TIOA2 (3 << 0)
+#define ATMEL_TC_TC1XC1S (3 << 2) /* external clock 1 source */
+#define ATMEL_TC_TC1XC1S_TCLK1 (0 << 2)
+#define ATMEL_TC_TC1XC1S_NONE (1 << 2)
+#define ATMEL_TC_TC1XC1S_TIOA0 (2 << 2)
+#define ATMEL_TC_TC1XC1S_TIOA2 (3 << 2)
+#define ATMEL_TC_TC2XC2S (3 << 4) /* external clock 2 source */
+#define ATMEL_TC_TC2XC2S_TCLK2 (0 << 4)
+#define ATMEL_TC_TC2XC2S_NONE (1 << 4)
+#define ATMEL_TC_TC2XC2S_TIOA0 (2 << 4)
+#define ATMEL_TC_TC2XC2S_TIOA1 (3 << 4)
+
+
+/*
+ * Each TC block has three "channels", each with one counter and controls.
+ *
+ * Note that the semantics of ATMEL_TC_TIMER_CLOCKx (input clock selection
+ * when it's not "external") is silicon-specific. AT91 platforms use one
+ * set of definitions; AVR32 platforms use a different set. Don't hard-wire
+ * such knowledge into your code, use the global "atmel_tc_divisors" ...
+ * where index N is the divisor for clock N+1, else zero to indicate it uses
+ * the 32 KiHz clock.
+ *
+ * The timers can be chained in various ways, and operated in "waveform"
+ * generation mode (including PWM) or "capture" mode (to time events). In
+ * both modes, behavior can be configured in many ways.
+ *
+ * Each timer has two I/O pins, TIOA and TIOB. Waveform mode uses TIOA as a
+ * PWM output, and TIOB as either another PWM or as a trigger. Capture mode
+ * uses them only as inputs.
+ */
+#define ATMEL_TC_CHAN(idx) ((idx)*0x40)
+#define ATMEL_TC_REG(idx, reg) (ATMEL_TC_CHAN(idx) + ATMEL_TC_ ## reg)
+
+#define ATMEL_TC_CCR 0x00 /* Channel Control Register */
+#define ATMEL_TC_CLKEN (1 << 0) /* clock enable */
+#define ATMEL_TC_CLKDIS (1 << 1) /* clock disable */
+#define ATMEL_TC_SWTRG (1 << 2) /* software trigger */
+
+#define ATMEL_TC_CMR 0x04 /* Channel Mode Register */
+
+/* Both modes share some CMR bits */
+#define ATMEL_TC_TCCLKS (7 << 0) /* clock source */
+#define ATMEL_TC_TIMER_CLOCK1 (0 << 0)
+#define ATMEL_TC_TIMER_CLOCK2 (1 << 0)
+#define ATMEL_TC_TIMER_CLOCK3 (2 << 0)
+#define ATMEL_TC_TIMER_CLOCK4 (3 << 0)
+#define ATMEL_TC_TIMER_CLOCK5 (4 << 0)
+#define ATMEL_TC_XC0 (5 << 0)
+#define ATMEL_TC_XC1 (6 << 0)
+#define ATMEL_TC_XC2 (7 << 0)
+#define ATMEL_TC_CLKI (1 << 3) /* clock invert */
+#define ATMEL_TC_BURST (3 << 4) /* clock gating */
+#define ATMEL_TC_GATE_NONE (0 << 4)
+#define ATMEL_TC_GATE_XC0 (1 << 4)
+#define ATMEL_TC_GATE_XC1 (2 << 4)
+#define ATMEL_TC_GATE_XC2 (3 << 4)
+#define ATMEL_TC_WAVE (1 << 15) /* true = Waveform mode */
+
+/* CAPTURE mode CMR bits */
+#define ATMEL_TC_LDBSTOP (1 << 6) /* counter stops on RB load */
+#define ATMEL_TC_LDBDIS (1 << 7) /* counter disable on RB load */
+#define ATMEL_TC_ETRGEDG (3 << 8) /* external trigger edge */
+#define ATMEL_TC_ETRGEDG_NONE (0 << 8)
+#define ATMEL_TC_ETRGEDG_RISING (1 << 8)
+#define ATMEL_TC_ETRGEDG_FALLING (2 << 8)
+#define ATMEL_TC_ETRGEDG_BOTH (3 << 8)
+#define ATMEL_TC_ABETRG (1 << 10) /* external trigger is TIOA? */
+#define ATMEL_TC_CPCTRG (1 << 14) /* RC compare trigger enable */
+#define ATMEL_TC_LDRA (3 << 16) /* RA loading edge (of TIOA) */
+#define ATMEL_TC_LDRA_NONE (0 << 16)
+#define ATMEL_TC_LDRA_RISING (1 << 16)
+#define ATMEL_TC_LDRA_FALLING (2 << 16)
+#define ATMEL_TC_LDRA_BOTH (3 << 16)
+#define ATMEL_TC_LDRB (3 << 18) /* RB loading edge (of TIOA) */
+#define ATMEL_TC_LDRB_NONE (0 << 18)
+#define ATMEL_TC_LDRB_RISING (1 << 18)
+#define ATMEL_TC_LDRB_FALLING (2 << 18)
+#define ATMEL_TC_LDRB_BOTH (3 << 18)
+
+/* WAVEFORM mode CMR bits */
+#define ATMEL_TC_CPCSTOP (1 << 6) /* RC compare stops counter */
+#define ATMEL_TC_CPCDIS (1 << 7) /* RC compare disables counter */
+#define ATMEL_TC_EEVTEDG (3 << 8) /* external event edge */
+#define ATMEL_TC_EEVTEDG_NONE (0 << 8)
+#define ATMEL_TC_EEVTEDG_RISING (1 << 8)
+#define ATMEL_TC_EEVTEDG_FALLING (2 << 8)
+#define ATMEL_TC_EEVTEDG_BOTH (3 << 8)
+#define ATMEL_TC_EEVT (3 << 10) /* external event source */
+#define ATMEL_TC_EEVT_TIOB (0 << 10)
+#define ATMEL_TC_EEVT_XC0 (1 << 10)
+#define ATMEL_TC_EEVT_XC1 (2 << 10)
+#define ATMEL_TC_EEVT_XC2 (3 << 10)
+#define ATMEL_TC_ENETRG (1 << 12) /* external event is trigger */
+#define ATMEL_TC_WAVESEL (3 << 13) /* waveform type */
+#define ATMEL_TC_WAVESEL_UP (0 << 13)
+#define ATMEL_TC_WAVESEL_UPDOWN (1 << 13)
+#define ATMEL_TC_WAVESEL_UP_AUTO (2 << 13)
+#define ATMEL_TC_WAVESEL_UPDOWN_AUTO (3 << 13)
+#define ATMEL_TC_ACPA (3 << 16) /* RA compare changes TIOA */
+#define ATMEL_TC_ACPA_NONE (0 << 16)
+#define ATMEL_TC_ACPA_SET (1 << 16)
+#define ATMEL_TC_ACPA_CLEAR (2 << 16)
+#define ATMEL_TC_ACPA_TOGGLE (3 << 16)
+#define ATMEL_TC_ACPC (3 << 18) /* RC compare changes TIOA */
+#define ATMEL_TC_ACPC_NONE (0 << 18)
+#define ATMEL_TC_ACPC_SET (1 << 18)
+#define ATMEL_TC_ACPC_CLEAR (2 << 18)
+#define ATMEL_TC_ACPC_TOGGLE (3 << 18)
+#define ATMEL_TC_AEEVT (3 << 20) /* external event changes TIOA */
+#define ATMEL_TC_AEEVT_NONE (0 << 20)
+#define ATMEL_TC_AEEVT_SET (1 << 20)
+#define ATMEL_TC_AEEVT_CLEAR (2 << 20)
+#define ATMEL_TC_AEEVT_TOGGLE (3 << 20)
+#define ATMEL_TC_ASWTRG (3 << 22) /* software trigger changes TIOA */
+#define ATMEL_TC_ASWTRG_NONE (0 << 22)
+#define ATMEL_TC_ASWTRG_SET (1 << 22)
+#define ATMEL_TC_ASWTRG_CLEAR (2 << 22)
+#define ATMEL_TC_ASWTRG_TOGGLE (3 << 22)
+#define ATMEL_TC_BCPB (3 << 24) /* RB compare changes TIOB */
+#define ATMEL_TC_BCPB_NONE (0 << 24)
+#define ATMEL_TC_BCPB_SET (1 << 24)
+#define ATMEL_TC_BCPB_CLEAR (2 << 24)
+#define ATMEL_TC_BCPB_TOGGLE (3 << 24)
+#define ATMEL_TC_BCPC (3 << 26) /* RC compare changes TIOB */
+#define ATMEL_TC_BCPC_NONE (0 << 26)
+#define ATMEL_TC_BCPC_SET (1 << 26)
+#define ATMEL_TC_BCPC_CLEAR (2 << 26)
+#define ATMEL_TC_BCPC_TOGGLE (3 << 26)
+#define ATMEL_TC_BEEVT (3 << 28) /* external event changes TIOB */
+#define ATMEL_TC_BEEVT_NONE (0 << 28)
+#define ATMEL_TC_BEEVT_SET (1 << 28)
+#define ATMEL_TC_BEEVT_CLEAR (2 << 28)
+#define ATMEL_TC_BEEVT_TOGGLE (3 << 28)
+#define ATMEL_TC_BSWTRG (3 << 30) /* software trigger changes TIOB */
+#define ATMEL_TC_BSWTRG_NONE (0 << 30)
+#define ATMEL_TC_BSWTRG_SET (1 << 30)
+#define ATMEL_TC_BSWTRG_CLEAR (2 << 30)
+#define ATMEL_TC_BSWTRG_TOGGLE (3 << 30)
+
+#define ATMEL_TC_CV 0x10 /* counter Value */
+#define ATMEL_TC_RA 0x14 /* register A */
+#define ATMEL_TC_RB 0x18 /* register B */
+#define ATMEL_TC_RC 0x1c /* register C */
+
+#define ATMEL_TC_SR 0x20 /* status (read-only) */
+/* Status-only flags */
+#define ATMEL_TC_CLKSTA (1 << 16) /* clock enabled */
+#define ATMEL_TC_MTIOA (1 << 17) /* TIOA mirror */
+#define ATMEL_TC_MTIOB (1 << 18) /* TIOB mirror */
+
+#define ATMEL_TC_IER 0x24 /* interrupt enable (write-only) */
+#define ATMEL_TC_IDR 0x28 /* interrupt disable (write-only) */
+#define ATMEL_TC_IMR 0x2c /* interrupt mask (read-only) */
+
+/* Status and IRQ flags */
+#define ATMEL_TC_COVFS (1 << 0) /* counter overflow */
+#define ATMEL_TC_LOVRS (1 << 1) /* load overrun */
+#define ATMEL_TC_CPAS (1 << 2) /* RA compare */
+#define ATMEL_TC_CPBS (1 << 3) /* RB compare */
+#define ATMEL_TC_CPCS (1 << 4) /* RC compare */
+#define ATMEL_TC_LDRAS (1 << 5) /* RA loading */
+#define ATMEL_TC_LDRBS (1 << 6) /* RB loading */
+#define ATMEL_TC_ETRGS (1 << 7) /* external trigger */
+#define ATMEL_TC_ALL_IRQ (ATMEL_TC_COVFS | ATMEL_TC_LOVRS | \
+ ATMEL_TC_CPAS | ATMEL_TC_CPBS | \
+ ATMEL_TC_CPCS | ATMEL_TC_LDRAS | \
+ ATMEL_TC_LDRBS | ATMEL_TC_ETRGS) \
+ /* all IRQs */
+
+#endif
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
new file mode 100644
index 000000000..5b08a8540
--- /dev/null
+++ b/include/linux/atomic.h
@@ -0,0 +1,131 @@
+/* Atomic operations usable in machine independent code */
+#ifndef _LINUX_ATOMIC_H
+#define _LINUX_ATOMIC_H
+#include <asm/atomic.h>
+
+/**
+ * atomic_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as @v was not already @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
+ */
+static inline int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ return __atomic_add_unless(v, a, u) != u;
+}
+
+/**
+ * atomic_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1, so long as @v is non-zero.
+ * Returns non-zero if @v was non-zero, and zero otherwise.
+ */
+#ifndef atomic_inc_not_zero
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+#endif
+
+/**
+ * atomic_inc_not_zero_hint - increment if not null
+ * @v: pointer of type atomic_t
+ * @hint: probable value of the atomic before the increment
+ *
+ * This version of atomic_inc_not_zero() gives a hint of probable
+ * value of the atomic. This helps processor to not read the memory
+ * before doing the atomic read/modify/write cycle, lowering
+ * number of bus transactions on some arches.
+ *
+ * Returns: 0 if increment was not done, 1 otherwise.
+ */
+#ifndef atomic_inc_not_zero_hint
+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
+{
+ int val, c = hint;
+
+ /* sanity test, should be removed by compiler if hint is a constant */
+ if (!hint)
+ return atomic_inc_not_zero(v);
+
+ do {
+ val = atomic_cmpxchg(v, c, c + 1);
+ if (val == c)
+ return 1;
+ c = val;
+ } while (c);
+
+ return 0;
+}
+#endif
+
+#ifndef atomic_inc_unless_negative
+static inline int atomic_inc_unless_negative(atomic_t *p)
+{
+ int v, v1;
+ for (v = 0; v >= 0; v = v1) {
+ v1 = atomic_cmpxchg(p, v, v + 1);
+ if (likely(v1 == v))
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+#ifndef atomic_dec_unless_positive
+static inline int atomic_dec_unless_positive(atomic_t *p)
+{
+ int v, v1;
+ for (v = 0; v <= 0; v = v1) {
+ v1 = atomic_cmpxchg(p, v, v - 1);
+ if (likely(v1 == v))
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+/*
+ * atomic_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic_t
+ *
+ * The function returns the old value of *v minus 1, even if
+ * the atomic variable, v, was not decremented.
+ */
+#ifndef atomic_dec_if_positive
+static inline int atomic_dec_if_positive(atomic_t *v)
+{
+ int c, old, dec;
+ c = atomic_read(v);
+ for (;;) {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ old = atomic_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return dec;
+}
+#endif
+
+#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
+static inline void atomic_or(int i, atomic_t *v)
+{
+ int old;
+ int new;
+
+ do {
+ old = atomic_read(v);
+ new = old | i;
+ } while (atomic_cmpxchg(v, old, new) != old);
+}
+#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
+
+#include <asm-generic/atomic-long.h>
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
+#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h
new file mode 100644
index 000000000..896c6892f
--- /dev/null
+++ b/include/linux/attribute_container.h
@@ -0,0 +1,72 @@
+/*
+ * attribute_container.h - a generic container for all classes
+ *
+ * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
+ *
+ * This file is licensed under GPLv2
+ */
+
+#ifndef _ATTRIBUTE_CONTAINER_H_
+#define _ATTRIBUTE_CONTAINER_H_
+
+#include <linux/list.h>
+#include <linux/klist.h>
+
+struct device;
+
+struct attribute_container {
+ struct list_head node;
+ struct klist containers;
+ struct class *class;
+ const struct attribute_group *grp;
+ struct device_attribute **attrs;
+ int (*match)(struct attribute_container *, struct device *);
+#define ATTRIBUTE_CONTAINER_NO_CLASSDEVS 0x01
+ unsigned long flags;
+};
+
+static inline int
+attribute_container_no_classdevs(struct attribute_container *atc)
+{
+ return atc->flags & ATTRIBUTE_CONTAINER_NO_CLASSDEVS;
+}
+
+static inline void
+attribute_container_set_no_classdevs(struct attribute_container *atc)
+{
+ atc->flags |= ATTRIBUTE_CONTAINER_NO_CLASSDEVS;
+}
+
+int attribute_container_register(struct attribute_container *cont);
+int __must_check attribute_container_unregister(struct attribute_container *cont);
+void attribute_container_create_device(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+ struct device *));
+void attribute_container_add_device(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+ struct device *));
+void attribute_container_remove_device(struct device *dev,
+ void (*fn)(struct attribute_container *,
+ struct device *,
+ struct device *));
+void attribute_container_device_trigger(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+ struct device *));
+void attribute_container_trigger(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *));
+int attribute_container_add_attrs(struct device *classdev);
+int attribute_container_add_class_device(struct device *classdev);
+int attribute_container_add_class_device_adapter(struct attribute_container *cont,
+ struct device *dev,
+ struct device *classdev);
+void attribute_container_remove_attrs(struct device *classdev);
+void attribute_container_class_device_del(struct device *classdev);
+struct attribute_container *attribute_container_classdev_to_container(struct device *);
+struct device *attribute_container_find_class_device(struct attribute_container *, struct device *);
+struct device_attribute **attribute_container_classdev_to_attrs(const struct device *classdev);
+
+#endif
diff --git a/include/linux/audit.h b/include/linux/audit.h
new file mode 100644
index 000000000..c2e7e3a83
--- /dev/null
+++ b/include/linux/audit.h
@@ -0,0 +1,550 @@
+/* audit.h -- Auditing support
+ *
+ * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Written by Rickard E. (Rik) Faith <faith@redhat.com>
+ *
+ */
+#ifndef _LINUX_AUDIT_H_
+#define _LINUX_AUDIT_H_
+
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <uapi/linux/audit.h>
+
+struct audit_sig_info {
+ uid_t uid;
+ pid_t pid;
+ char ctx[0];
+};
+
+struct audit_buffer;
+struct audit_context;
+struct inode;
+struct netlink_skb_parms;
+struct path;
+struct linux_binprm;
+struct mq_attr;
+struct mqstat;
+struct audit_watch;
+struct audit_tree;
+struct sk_buff;
+
+struct audit_krule {
+ u32 pflags;
+ u32 flags;
+ u32 listnr;
+ u32 action;
+ u32 mask[AUDIT_BITMASK_SIZE];
+ u32 buflen; /* for data alloc on list rules */
+ u32 field_count;
+ char *filterkey; /* ties events to rules */
+ struct audit_field *fields;
+ struct audit_field *arch_f; /* quick access to arch field */
+ struct audit_field *inode_f; /* quick access to an inode field */
+ struct audit_watch *watch; /* associated watch */
+ struct audit_tree *tree; /* associated watched tree */
+ struct list_head rlist; /* entry in audit_{watch,tree}.rules list */
+ struct list_head list; /* for AUDIT_LIST* purposes only */
+ u64 prio;
+};
+
+/* Flag to indicate legacy AUDIT_LOGINUID unset usage */
+#define AUDIT_LOGINUID_LEGACY 0x1
+
+struct audit_field {
+ u32 type;
+ union {
+ u32 val;
+ kuid_t uid;
+ kgid_t gid;
+ struct {
+ char *lsm_str;
+ void *lsm_rule;
+ };
+ };
+ u32 op;
+};
+
+extern int is_audit_feature_set(int which);
+
+extern int __init audit_register_class(int class, unsigned *list);
+extern int audit_classify_syscall(int abi, unsigned syscall);
+extern int audit_classify_arch(int arch);
+/* only for compat system calls */
+extern unsigned compat_write_class[];
+extern unsigned compat_read_class[];
+extern unsigned compat_dir_class[];
+extern unsigned compat_chattr_class[];
+extern unsigned compat_signal_class[];
+
+extern int audit_classify_compat_syscall(int abi, unsigned syscall);
+
+/* audit_names->type values */
+#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */
+#define AUDIT_TYPE_NORMAL 1 /* a "normal" audit record */
+#define AUDIT_TYPE_PARENT 2 /* a parent audit record */
+#define AUDIT_TYPE_CHILD_DELETE 3 /* a child being deleted */
+#define AUDIT_TYPE_CHILD_CREATE 4 /* a child being created */
+
+/* maximized args number that audit_socketcall can process */
+#define AUDITSC_ARGS 6
+
+struct filename;
+
+extern void audit_log_session_info(struct audit_buffer *ab);
+
+#ifdef CONFIG_AUDIT_COMPAT_GENERIC
+#define audit_is_compat(arch) (!((arch) & __AUDIT_ARCH_64BIT))
+#else
+#define audit_is_compat(arch) false
+#endif
+
+#ifdef CONFIG_AUDITSYSCALL
+#include <asm/syscall.h> /* for syscall_get_arch() */
+
+/* These are defined in auditsc.c */
+ /* Public API */
+extern int audit_alloc(struct task_struct *task);
+extern void __audit_free(struct task_struct *task);
+extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3);
+extern void __audit_syscall_exit(int ret_success, long ret_value);
+extern struct filename *__audit_reusename(const __user char *uptr);
+extern void __audit_getname(struct filename *name);
+
+#define AUDIT_INODE_PARENT 1 /* dentry represents the parent */
+#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */
+extern void __audit_inode(struct filename *name, const struct dentry *dentry,
+ unsigned int flags);
+extern void __audit_file(const struct file *);
+extern void __audit_inode_child(const struct inode *parent,
+ const struct dentry *dentry,
+ const unsigned char type);
+extern void __audit_seccomp(unsigned long syscall, long signr, int code);
+extern void __audit_ptrace(struct task_struct *t);
+
+static inline int audit_dummy_context(void)
+{
+ void *p = current->audit_context;
+ return !p || *(int *)p;
+}
+static inline void audit_free(struct task_struct *task)
+{
+ if (unlikely(task->audit_context))
+ __audit_free(task);
+}
+static inline void audit_syscall_entry(int major, unsigned long a0,
+ unsigned long a1, unsigned long a2,
+ unsigned long a3)
+{
+ if (unlikely(current->audit_context))
+ __audit_syscall_entry(major, a0, a1, a2, a3);
+}
+static inline void audit_syscall_exit(void *pt_regs)
+{
+ if (unlikely(current->audit_context)) {
+ int success = is_syscall_success(pt_regs);
+ long return_code = regs_return_value(pt_regs);
+
+ __audit_syscall_exit(success, return_code);
+ }
+}
+static inline struct filename *audit_reusename(const __user char *name)
+{
+ if (unlikely(!audit_dummy_context()))
+ return __audit_reusename(name);
+ return NULL;
+}
+static inline void audit_getname(struct filename *name)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_getname(name);
+}
+static inline void audit_inode(struct filename *name,
+ const struct dentry *dentry,
+ unsigned int parent) {
+ if (unlikely(!audit_dummy_context())) {
+ unsigned int flags = 0;
+ if (parent)
+ flags |= AUDIT_INODE_PARENT;
+ __audit_inode(name, dentry, flags);
+ }
+}
+static inline void audit_file(struct file *file)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_file(file);
+}
+static inline void audit_inode_parent_hidden(struct filename *name,
+ const struct dentry *dentry)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_inode(name, dentry,
+ AUDIT_INODE_PARENT | AUDIT_INODE_HIDDEN);
+}
+static inline void audit_inode_child(const struct inode *parent,
+ const struct dentry *dentry,
+ const unsigned char type) {
+ if (unlikely(!audit_dummy_context()))
+ __audit_inode_child(parent, dentry, type);
+}
+void audit_core_dumps(long signr);
+
+static inline void audit_seccomp(unsigned long syscall, long signr, int code)
+{
+ /* Force a record to be reported if a signal was delivered. */
+ if (signr || unlikely(!audit_dummy_context()))
+ __audit_seccomp(syscall, signr, code);
+}
+
+static inline void audit_ptrace(struct task_struct *t)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_ptrace(t);
+}
+
+ /* Private API (for audit.c only) */
+extern unsigned int audit_serial(void);
+extern int auditsc_get_stamp(struct audit_context *ctx,
+ struct timespec *t, unsigned int *serial);
+extern int audit_set_loginuid(kuid_t loginuid);
+
+static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
+{
+ return tsk->loginuid;
+}
+
+static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
+{
+ return tsk->sessionid;
+}
+
+extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
+extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
+extern void __audit_bprm(struct linux_binprm *bprm);
+extern int __audit_socketcall(int nargs, unsigned long *args);
+extern int __audit_sockaddr(int len, void *addr);
+extern void __audit_fd_pair(int fd1, int fd2);
+extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr);
+extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout);
+extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification);
+extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat);
+extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
+ const struct cred *new,
+ const struct cred *old);
+extern void __audit_log_capset(const struct cred *new, const struct cred *old);
+extern void __audit_mmap_fd(int fd, int flags);
+
+static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_ipc_obj(ipcp);
+}
+static inline void audit_fd_pair(int fd1, int fd2)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_fd_pair(fd1, fd2);
+}
+static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_ipc_set_perm(qbytes, uid, gid, mode);
+}
+static inline void audit_bprm(struct linux_binprm *bprm)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_bprm(bprm);
+}
+static inline int audit_socketcall(int nargs, unsigned long *args)
+{
+ if (unlikely(!audit_dummy_context()))
+ return __audit_socketcall(nargs, args);
+ return 0;
+}
+static inline int audit_sockaddr(int len, void *addr)
+{
+ if (unlikely(!audit_dummy_context()))
+ return __audit_sockaddr(len, addr);
+ return 0;
+}
+static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_mq_open(oflag, mode, attr);
+}
+static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_mq_sendrecv(mqdes, msg_len, msg_prio, abs_timeout);
+}
+static inline void audit_mq_notify(mqd_t mqdes, const struct sigevent *notification)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_mq_notify(mqdes, notification);
+}
+static inline void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_mq_getsetattr(mqdes, mqstat);
+}
+
+static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm,
+ const struct cred *new,
+ const struct cred *old)
+{
+ if (unlikely(!audit_dummy_context()))
+ return __audit_log_bprm_fcaps(bprm, new, old);
+ return 0;
+}
+
+static inline void audit_log_capset(const struct cred *new,
+ const struct cred *old)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_log_capset(new, old);
+}
+
+static inline void audit_mmap_fd(int fd, int flags)
+{
+ if (unlikely(!audit_dummy_context()))
+ __audit_mmap_fd(fd, flags);
+}
+
+extern int audit_n_rules;
+extern int audit_signals;
+#else /* CONFIG_AUDITSYSCALL */
+static inline int audit_alloc(struct task_struct *task)
+{
+ return 0;
+}
+static inline void audit_free(struct task_struct *task)
+{ }
+static inline void audit_syscall_entry(int major, unsigned long a0,
+ unsigned long a1, unsigned long a2,
+ unsigned long a3)
+{ }
+static inline void audit_syscall_exit(void *pt_regs)
+{ }
+static inline int audit_dummy_context(void)
+{
+ return 1;
+}
+static inline struct filename *audit_reusename(const __user char *name)
+{
+ return NULL;
+}
+static inline void audit_getname(struct filename *name)
+{ }
+static inline void __audit_inode(struct filename *name,
+ const struct dentry *dentry,
+ unsigned int flags)
+{ }
+static inline void __audit_inode_child(const struct inode *parent,
+ const struct dentry *dentry,
+ const unsigned char type)
+{ }
+static inline void audit_inode(struct filename *name,
+ const struct dentry *dentry,
+ unsigned int parent)
+{ }
+static inline void audit_file(struct file *file)
+{
+}
+static inline void audit_inode_parent_hidden(struct filename *name,
+ const struct dentry *dentry)
+{ }
+static inline void audit_inode_child(const struct inode *parent,
+ const struct dentry *dentry,
+ const unsigned char type)
+{ }
+static inline void audit_core_dumps(long signr)
+{ }
+static inline void __audit_seccomp(unsigned long syscall, long signr, int code)
+{ }
+static inline void audit_seccomp(unsigned long syscall, long signr, int code)
+{ }
+static inline int auditsc_get_stamp(struct audit_context *ctx,
+ struct timespec *t, unsigned int *serial)
+{
+ return 0;
+}
+static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
+{
+ return INVALID_UID;
+}
+static inline unsigned int audit_get_sessionid(struct task_struct *tsk)
+{
+ return -1;
+}
+static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
+{ }
+static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid,
+ gid_t gid, umode_t mode)
+{ }
+static inline void audit_bprm(struct linux_binprm *bprm)
+{ }
+static inline int audit_socketcall(int nargs, unsigned long *args)
+{
+ return 0;
+}
+static inline void audit_fd_pair(int fd1, int fd2)
+{ }
+static inline int audit_sockaddr(int len, void *addr)
+{
+ return 0;
+}
+static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr)
+{ }
+static inline void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len,
+ unsigned int msg_prio,
+ const struct timespec *abs_timeout)
+{ }
+static inline void audit_mq_notify(mqd_t mqdes,
+ const struct sigevent *notification)
+{ }
+static inline void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat)
+{ }
+static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm,
+ const struct cred *new,
+ const struct cred *old)
+{
+ return 0;
+}
+static inline void audit_log_capset(const struct cred *new,
+ const struct cred *old)
+{ }
+static inline void audit_mmap_fd(int fd, int flags)
+{ }
+static inline void audit_ptrace(struct task_struct *t)
+{ }
+#define audit_n_rules 0
+#define audit_signals 0
+#endif /* CONFIG_AUDITSYSCALL */
+
+static inline bool audit_loginuid_set(struct task_struct *tsk)
+{
+ return uid_valid(audit_get_loginuid(tsk));
+}
+
+#ifdef CONFIG_AUDIT
+/* These are defined in audit.c */
+ /* Public API */
+extern __printf(4, 5)
+void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
+ const char *fmt, ...);
+
+extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type);
+extern __printf(2, 3)
+void audit_log_format(struct audit_buffer *ab, const char *fmt, ...);
+extern void audit_log_end(struct audit_buffer *ab);
+extern int audit_string_contains_control(const char *string,
+ size_t len);
+extern void audit_log_n_hex(struct audit_buffer *ab,
+ const unsigned char *buf,
+ size_t len);
+extern void audit_log_n_string(struct audit_buffer *ab,
+ const char *buf,
+ size_t n);
+extern void audit_log_n_untrustedstring(struct audit_buffer *ab,
+ const char *string,
+ size_t n);
+extern void audit_log_untrustedstring(struct audit_buffer *ab,
+ const char *string);
+extern void audit_log_d_path(struct audit_buffer *ab,
+ const char *prefix,
+ const struct path *path);
+extern void audit_log_key(struct audit_buffer *ab,
+ char *key);
+extern void audit_log_link_denied(const char *operation,
+ struct path *link);
+extern void audit_log_lost(const char *message);
+#ifdef CONFIG_SECURITY
+extern void audit_log_secctx(struct audit_buffer *ab, u32 secid);
+#else
+static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid)
+{ }
+#endif
+
+extern int audit_log_task_context(struct audit_buffer *ab);
+extern void audit_log_task_info(struct audit_buffer *ab,
+ struct task_struct *tsk);
+
+extern int audit_update_lsm_rules(void);
+
+ /* Private API (for audit.c only) */
+extern int audit_filter_user(int type);
+extern int audit_filter_type(int type);
+extern int audit_rule_change(int type, __u32 portid, int seq,
+ void *data, size_t datasz);
+extern int audit_list_rules_send(struct sk_buff *request_skb, int seq);
+
+extern u32 audit_enabled;
+#else /* CONFIG_AUDIT */
+static inline __printf(4, 5)
+void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
+ const char *fmt, ...)
+{ }
+static inline struct audit_buffer *audit_log_start(struct audit_context *ctx,
+ gfp_t gfp_mask, int type)
+{
+ return NULL;
+}
+static inline __printf(2, 3)
+void audit_log_format(struct audit_buffer *ab, const char *fmt, ...)
+{ }
+static inline void audit_log_end(struct audit_buffer *ab)
+{ }
+static inline void audit_log_n_hex(struct audit_buffer *ab,
+ const unsigned char *buf, size_t len)
+{ }
+static inline void audit_log_n_string(struct audit_buffer *ab,
+ const char *buf, size_t n)
+{ }
+static inline void audit_log_n_untrustedstring(struct audit_buffer *ab,
+ const char *string, size_t n)
+{ }
+static inline void audit_log_untrustedstring(struct audit_buffer *ab,
+ const char *string)
+{ }
+static inline void audit_log_d_path(struct audit_buffer *ab,
+ const char *prefix,
+ const struct path *path)
+{ }
+static inline void audit_log_key(struct audit_buffer *ab, char *key)
+{ }
+static inline void audit_log_link_denied(const char *string,
+ const struct path *link)
+{ }
+static inline void audit_log_secctx(struct audit_buffer *ab, u32 secid)
+{ }
+static inline int audit_log_task_context(struct audit_buffer *ab)
+{
+ return 0;
+}
+static inline void audit_log_task_info(struct audit_buffer *ab,
+ struct task_struct *tsk)
+{ }
+#define audit_enabled 0
+#endif /* CONFIG_AUDIT */
+static inline void audit_log_string(struct audit_buffer *ab, const char *buf)
+{
+ audit_log_n_string(ab, buf, strlen(buf));
+}
+
+#endif
diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h
new file mode 100644
index 000000000..850f39b33
--- /dev/null
+++ b/include/linux/auto_dev-ioctl.h
@@ -0,0 +1,229 @@
+/*
+ * Copyright 2008 Red Hat, Inc. All rights reserved.
+ * Copyright 2008 Ian Kent <raven@themaw.net>
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ */
+
+#ifndef _LINUX_AUTO_DEV_IOCTL_H
+#define _LINUX_AUTO_DEV_IOCTL_H
+
+#include <linux/auto_fs.h>
+
+#ifdef __KERNEL__
+#include <linux/string.h>
+#else
+#include <string.h>
+#endif /* __KERNEL__ */
+
+#define AUTOFS_DEVICE_NAME "autofs"
+
+#define AUTOFS_DEV_IOCTL_VERSION_MAJOR 1
+#define AUTOFS_DEV_IOCTL_VERSION_MINOR 0
+
+#define AUTOFS_DEVID_LEN 16
+
+#define AUTOFS_DEV_IOCTL_SIZE sizeof(struct autofs_dev_ioctl)
+
+/*
+ * An ioctl interface for autofs mount point control.
+ */
+
+struct args_protover {
+ __u32 version;
+};
+
+struct args_protosubver {
+ __u32 sub_version;
+};
+
+struct args_openmount {
+ __u32 devid;
+};
+
+struct args_ready {
+ __u32 token;
+};
+
+struct args_fail {
+ __u32 token;
+ __s32 status;
+};
+
+struct args_setpipefd {
+ __s32 pipefd;
+};
+
+struct args_timeout {
+ __u64 timeout;
+};
+
+struct args_requester {
+ __u32 uid;
+ __u32 gid;
+};
+
+struct args_expire {
+ __u32 how;
+};
+
+struct args_askumount {
+ __u32 may_umount;
+};
+
+struct args_ismountpoint {
+ union {
+ struct args_in {
+ __u32 type;
+ } in;
+ struct args_out {
+ __u32 devid;
+ __u32 magic;
+ } out;
+ };
+};
+
+/*
+ * All the ioctls use this structure.
+ * When sending a path size must account for the total length
+ * of the chunk of memory otherwise is is the size of the
+ * structure.
+ */
+
+struct autofs_dev_ioctl {
+ __u32 ver_major;
+ __u32 ver_minor;
+ __u32 size; /* total size of data passed in
+ * including this struct */
+ __s32 ioctlfd; /* automount command fd */
+
+ /* Command parameters */
+
+ union {
+ struct args_protover protover;
+ struct args_protosubver protosubver;
+ struct args_openmount openmount;
+ struct args_ready ready;
+ struct args_fail fail;
+ struct args_setpipefd setpipefd;
+ struct args_timeout timeout;
+ struct args_requester requester;
+ struct args_expire expire;
+ struct args_askumount askumount;
+ struct args_ismountpoint ismountpoint;
+ };
+
+ char path[0];
+};
+
+static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
+{
+ memset(in, 0, sizeof(struct autofs_dev_ioctl));
+ in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR;
+ in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR;
+ in->size = sizeof(struct autofs_dev_ioctl);
+ in->ioctlfd = -1;
+ return;
+}
+
+/*
+ * If you change this make sure you make the corresponding change
+ * to autofs-dev-ioctl.c:lookup_ioctl()
+ */
+enum {
+ /* Get various version info */
+ AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71,
+ AUTOFS_DEV_IOCTL_PROTOVER_CMD,
+ AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD,
+
+ /* Open mount ioctl fd */
+ AUTOFS_DEV_IOCTL_OPENMOUNT_CMD,
+
+ /* Close mount ioctl fd */
+ AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD,
+
+ /* Mount/expire status returns */
+ AUTOFS_DEV_IOCTL_READY_CMD,
+ AUTOFS_DEV_IOCTL_FAIL_CMD,
+
+ /* Activate/deactivate autofs mount */
+ AUTOFS_DEV_IOCTL_SETPIPEFD_CMD,
+ AUTOFS_DEV_IOCTL_CATATONIC_CMD,
+
+ /* Expiry timeout */
+ AUTOFS_DEV_IOCTL_TIMEOUT_CMD,
+
+ /* Get mount last requesting uid and gid */
+ AUTOFS_DEV_IOCTL_REQUESTER_CMD,
+
+ /* Check for eligible expire candidates */
+ AUTOFS_DEV_IOCTL_EXPIRE_CMD,
+
+ /* Request busy status */
+ AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD,
+
+ /* Check if path is a mountpoint */
+ AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD,
+};
+
+#define AUTOFS_IOCTL 0x93
+
+#define AUTOFS_DEV_IOCTL_VERSION \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_PROTOVER \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_PROTOVER_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_PROTOSUBVER \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_OPENMOUNT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_CLOSEMOUNT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_READY \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_READY_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_FAIL \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_FAIL_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_SETPIPEFD \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_CATATONIC \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_CATATONIC_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_TIMEOUT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_TIMEOUT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_REQUESTER \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_REQUESTER_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_EXPIRE \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_EXPIRE_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_ASKUMOUNT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_ISMOUNTPOINT \
+ _IOWR(AUTOFS_IOCTL, \
+ AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, struct autofs_dev_ioctl)
+
+#endif /* _LINUX_AUTO_DEV_IOCTL_H */
diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h
new file mode 100644
index 000000000..fcd704d35
--- /dev/null
+++ b/include/linux/auto_fs.h
@@ -0,0 +1,20 @@
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * linux/include/linux/auto_fs.h
+ *
+ * Copyright 1997 Transmeta Corporation - All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+#ifndef _LINUX_AUTO_FS_H
+#define _LINUX_AUTO_FS_H
+
+#include <linux/fs.h>
+#include <linux/limits.h>
+#include <linux/ioctl.h>
+#include <uapi/linux/auto_fs.h>
+#endif /* _LINUX_AUTO_FS_H */
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
new file mode 100644
index 000000000..3e0fbe441
--- /dev/null
+++ b/include/linux/auxvec.h
@@ -0,0 +1,8 @@
+#ifndef _LINUX_AUXVEC_H
+#define _LINUX_AUXVEC_H
+
+#include <uapi/linux/auxvec.h>
+
+#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
+ /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
+#endif /* _LINUX_AUXVEC_H */
diff --git a/include/linux/average.h b/include/linux/average.h
new file mode 100644
index 000000000..c6028fd74
--- /dev/null
+++ b/include/linux/average.h
@@ -0,0 +1,30 @@
+#ifndef _LINUX_AVERAGE_H
+#define _LINUX_AVERAGE_H
+
+/* Exponentially weighted moving average (EWMA) */
+
+/* For more documentation see lib/average.c */
+
+struct ewma {
+ unsigned long internal;
+ unsigned long factor;
+ unsigned long weight;
+};
+
+extern void ewma_init(struct ewma *avg, unsigned long factor,
+ unsigned long weight);
+
+extern struct ewma *ewma_add(struct ewma *avg, unsigned long val);
+
+/**
+ * ewma_read() - Get average value
+ * @avg: Average structure
+ *
+ * Returns the average value held in @avg.
+ */
+static inline unsigned long ewma_read(const struct ewma *avg)
+{
+ return avg->internal >> avg->factor;
+}
+
+#endif /* _LINUX_AVERAGE_H */
diff --git a/include/linux/b1pcmcia.h b/include/linux/b1pcmcia.h
new file mode 100644
index 000000000..12a867c60
--- /dev/null
+++ b/include/linux/b1pcmcia.h
@@ -0,0 +1,21 @@
+/* $Id: b1pcmcia.h,v 1.1.8.2 2001/09/23 22:25:05 kai Exp $
+ *
+ * Exported functions of module b1pcmcia to be called by
+ * avm_cs card services module.
+ *
+ * Copyright 1999 by Carsten Paeth (calle@calle.in-berlin.de)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#ifndef _B1PCMCIA_H_
+#define _B1PCMCIA_H_
+
+int b1pcmcia_addcard_b1(unsigned int port, unsigned irq);
+int b1pcmcia_addcard_m1(unsigned int port, unsigned irq);
+int b1pcmcia_addcard_m2(unsigned int port, unsigned irq);
+int b1pcmcia_delcard(unsigned int port, unsigned irq);
+
+#endif /* _B1PCMCIA_H_ */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
new file mode 100644
index 000000000..d87d8eced
--- /dev/null
+++ b/include/linux/backing-dev.h
@@ -0,0 +1,321 @@
+/*
+ * include/linux/backing-dev.h
+ *
+ * low-level device information and state which is propagated up through
+ * to high-level code.
+ */
+
+#ifndef _LINUX_BACKING_DEV_H
+#define _LINUX_BACKING_DEV_H
+
+#include <linux/percpu_counter.h>
+#include <linux/log2.h>
+#include <linux/flex_proportions.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/writeback.h>
+#include <linux/atomic.h>
+#include <linux/sysctl.h>
+#include <linux/workqueue.h>
+
+struct page;
+struct device;
+struct dentry;
+
+/*
+ * Bits in backing_dev_info.state
+ */
+enum bdi_state {
+ BDI_async_congested, /* The async (write) queue is getting full */
+ BDI_sync_congested, /* The sync queue is getting full */
+ BDI_registered, /* bdi_register() was done */
+ BDI_writeback_running, /* Writeback is in progress */
+};
+
+typedef int (congested_fn)(void *, int);
+
+enum bdi_stat_item {
+ BDI_RECLAIMABLE,
+ BDI_WRITEBACK,
+ BDI_DIRTIED,
+ BDI_WRITTEN,
+ NR_BDI_STAT_ITEMS
+};
+
+#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
+
+struct bdi_writeback {
+ struct backing_dev_info *bdi; /* our parent bdi */
+
+ unsigned long last_old_flush; /* last old data flush */
+
+ struct delayed_work dwork; /* work item used for writeback */
+ struct list_head b_dirty; /* dirty inodes */
+ struct list_head b_io; /* parked for writeback */
+ struct list_head b_more_io; /* parked for more writeback */
+ struct list_head b_dirty_time; /* time stamps are dirty */
+ spinlock_t list_lock; /* protects the b_* lists */
+};
+
+struct backing_dev_info {
+ struct list_head bdi_list;
+ unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
+ unsigned long state; /* Always use atomic bitops on this */
+ unsigned int capabilities; /* Device capabilities */
+ congested_fn *congested_fn; /* Function pointer if device is md/dm */
+ void *congested_data; /* Pointer to aux data for congested func */
+
+ char *name;
+
+ struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
+
+ unsigned long bw_time_stamp; /* last time write bw is updated */
+ unsigned long dirtied_stamp;
+ unsigned long written_stamp; /* pages written at bw_time_stamp */
+ unsigned long write_bandwidth; /* the estimated write bandwidth */
+ unsigned long avg_write_bandwidth; /* further smoothed write bw */
+
+ /*
+ * The base dirty throttle rate, re-calculated on every 200ms.
+ * All the bdi tasks' dirty rate will be curbed under it.
+ * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
+ * in small steps and is much more smooth/stable than the latter.
+ */
+ unsigned long dirty_ratelimit;
+ unsigned long balanced_dirty_ratelimit;
+
+ struct fprop_local_percpu completions;
+ int dirty_exceeded;
+
+ unsigned int min_ratio;
+ unsigned int max_ratio, max_prop_frac;
+
+ struct bdi_writeback wb; /* default writeback info for this bdi */
+ spinlock_t wb_lock; /* protects work_list & wb.dwork scheduling */
+
+ struct list_head work_list;
+
+ struct device *dev;
+
+ struct timer_list laptop_mode_wb_timer;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debug_dir;
+ struct dentry *debug_stats;
+#endif
+};
+
+struct backing_dev_info *inode_to_bdi(struct inode *inode);
+
+int __must_check bdi_init(struct backing_dev_info *bdi);
+void bdi_destroy(struct backing_dev_info *bdi);
+
+__printf(3, 4)
+int bdi_register(struct backing_dev_info *bdi, struct device *parent,
+ const char *fmt, ...);
+int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
+int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
+void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
+ enum wb_reason reason);
+void bdi_start_background_writeback(struct backing_dev_info *bdi);
+void bdi_writeback_workfn(struct work_struct *work);
+int bdi_has_dirty_io(struct backing_dev_info *bdi);
+void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
+
+extern spinlock_t bdi_lock;
+extern struct list_head bdi_list;
+
+extern struct workqueue_struct *bdi_wq;
+
+static inline int wb_has_dirty_io(struct bdi_writeback *wb)
+{
+ return !list_empty(&wb->b_dirty) ||
+ !list_empty(&wb->b_io) ||
+ !list_empty(&wb->b_more_io);
+}
+
+static inline void __add_bdi_stat(struct backing_dev_info *bdi,
+ enum bdi_stat_item item, s64 amount)
+{
+ __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
+}
+
+static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
+ enum bdi_stat_item item)
+{
+ __add_bdi_stat(bdi, item, 1);
+}
+
+static inline void inc_bdi_stat(struct backing_dev_info *bdi,
+ enum bdi_stat_item item)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __inc_bdi_stat(bdi, item);
+ local_irq_restore(flags);
+}
+
+static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
+ enum bdi_stat_item item)
+{
+ __add_bdi_stat(bdi, item, -1);
+}
+
+static inline void dec_bdi_stat(struct backing_dev_info *bdi,
+ enum bdi_stat_item item)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __dec_bdi_stat(bdi, item);
+ local_irq_restore(flags);
+}
+
+static inline s64 bdi_stat(struct backing_dev_info *bdi,
+ enum bdi_stat_item item)
+{
+ return percpu_counter_read_positive(&bdi->bdi_stat[item]);
+}
+
+static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
+ enum bdi_stat_item item)
+{
+ return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
+}
+
+static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
+ enum bdi_stat_item item)
+{
+ s64 sum;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ sum = __bdi_stat_sum(bdi, item);
+ local_irq_restore(flags);
+
+ return sum;
+}
+
+extern void bdi_writeout_inc(struct backing_dev_info *bdi);
+
+/*
+ * maximal error of a stat counter.
+ */
+static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
+{
+#ifdef CONFIG_SMP
+ return nr_cpu_ids * BDI_STAT_BATCH;
+#else
+ return 1;
+#endif
+}
+
+int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
+int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
+
+/*
+ * Flags in backing_dev_info::capability
+ *
+ * The first three flags control whether dirty pages will contribute to the
+ * VM's accounting and whether writepages() should be called for dirty pages
+ * (something that would not, for example, be appropriate for ramfs)
+ *
+ * WARNING: these flags are closely related and should not normally be
+ * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
+ * three flags into a single convenience macro.
+ *
+ * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
+ * BDI_CAP_NO_WRITEBACK: Don't write pages back
+ * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
+ * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
+ */
+#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
+#define BDI_CAP_NO_WRITEBACK 0x00000002
+#define BDI_CAP_NO_ACCT_WB 0x00000004
+#define BDI_CAP_STABLE_WRITES 0x00000008
+#define BDI_CAP_STRICTLIMIT 0x00000010
+
+#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
+ (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
+
+extern struct backing_dev_info noop_backing_dev_info;
+
+int writeback_in_progress(struct backing_dev_info *bdi);
+
+static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
+{
+ if (bdi->congested_fn)
+ return bdi->congested_fn(bdi->congested_data, bdi_bits);
+ return (bdi->state & bdi_bits);
+}
+
+static inline int bdi_read_congested(struct backing_dev_info *bdi)
+{
+ return bdi_congested(bdi, 1 << BDI_sync_congested);
+}
+
+static inline int bdi_write_congested(struct backing_dev_info *bdi)
+{
+ return bdi_congested(bdi, 1 << BDI_async_congested);
+}
+
+static inline int bdi_rw_congested(struct backing_dev_info *bdi)
+{
+ return bdi_congested(bdi, (1 << BDI_sync_congested) |
+ (1 << BDI_async_congested));
+}
+
+enum {
+ BLK_RW_ASYNC = 0,
+ BLK_RW_SYNC = 1,
+};
+
+void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
+void set_bdi_congested(struct backing_dev_info *bdi, int sync);
+long congestion_wait(int sync, long timeout);
+long wait_iff_congested(struct zone *zone, int sync, long timeout);
+int pdflush_proc_obsolete(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
+static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
+{
+ return bdi->capabilities & BDI_CAP_STABLE_WRITES;
+}
+
+static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
+{
+ return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
+}
+
+static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
+{
+ return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
+}
+
+static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
+{
+ /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
+ return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
+ BDI_CAP_NO_WRITEBACK));
+}
+
+static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
+{
+ return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
+}
+
+static inline bool mapping_cap_account_dirty(struct address_space *mapping)
+{
+ return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
+}
+
+static inline int bdi_sched_wait(void *word)
+{
+ schedule();
+ return 0;
+}
+
+#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
new file mode 100644
index 000000000..adb14a861
--- /dev/null
+++ b/include/linux/backlight.h
@@ -0,0 +1,170 @@
+/*
+ * Backlight Lowlevel Control Abstraction
+ *
+ * Copyright (C) 2003,2004 Hewlett-Packard Company
+ *
+ */
+
+#ifndef _LINUX_BACKLIGHT_H
+#define _LINUX_BACKLIGHT_H
+
+#include <linux/device.h>
+#include <linux/fb.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+
+/* Notes on locking:
+ *
+ * backlight_device->ops_lock is an internal backlight lock protecting the
+ * ops pointer and no code outside the core should need to touch it.
+ *
+ * Access to update_status() is serialised by the update_lock mutex since
+ * most drivers seem to need this and historically get it wrong.
+ *
+ * Most drivers don't need locking on their get_brightness() method.
+ * If yours does, you need to implement it in the driver. You can use the
+ * update_lock mutex if appropriate.
+ *
+ * Any other use of the locks below is probably wrong.
+ */
+
+enum backlight_update_reason {
+ BACKLIGHT_UPDATE_HOTKEY,
+ BACKLIGHT_UPDATE_SYSFS,
+};
+
+enum backlight_type {
+ BACKLIGHT_RAW = 1,
+ BACKLIGHT_PLATFORM,
+ BACKLIGHT_FIRMWARE,
+ BACKLIGHT_TYPE_MAX,
+};
+
+enum backlight_notification {
+ BACKLIGHT_REGISTERED,
+ BACKLIGHT_UNREGISTERED,
+};
+
+struct backlight_device;
+struct fb_info;
+
+struct backlight_ops {
+ unsigned int options;
+
+#define BL_CORE_SUSPENDRESUME (1 << 0)
+
+ /* Notify the backlight driver some property has changed */
+ int (*update_status)(struct backlight_device *);
+ /* Return the current backlight brightness (accounting for power,
+ fb_blank etc.) */
+ int (*get_brightness)(struct backlight_device *);
+ /* Check if given framebuffer device is the one bound to this backlight;
+ return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
+ int (*check_fb)(struct backlight_device *, struct fb_info *);
+};
+
+/* This structure defines all the properties of a backlight */
+struct backlight_properties {
+ /* Current User requested brightness (0 - max_brightness) */
+ int brightness;
+ /* Maximal value for brightness (read-only) */
+ int max_brightness;
+ /* Current FB Power mode (0: full on, 1..3: power saving
+ modes; 4: full off), see FB_BLANK_XXX */
+ int power;
+ /* FB Blanking active? (values as for power) */
+ /* Due to be removed, please use (state & BL_CORE_FBBLANK) */
+ int fb_blank;
+ /* Backlight type */
+ enum backlight_type type;
+ /* Flags used to signal drivers of state changes */
+ /* Upper 4 bits are reserved for driver internal use */
+ unsigned int state;
+
+#define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */
+#define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */
+#define BL_CORE_DRIVER4 (1 << 28) /* reserved for driver specific use */
+#define BL_CORE_DRIVER3 (1 << 29) /* reserved for driver specific use */
+#define BL_CORE_DRIVER2 (1 << 30) /* reserved for driver specific use */
+#define BL_CORE_DRIVER1 (1 << 31) /* reserved for driver specific use */
+
+};
+
+struct backlight_device {
+ /* Backlight properties */
+ struct backlight_properties props;
+
+ /* Serialise access to update_status method */
+ struct mutex update_lock;
+
+ /* This protects the 'ops' field. If 'ops' is NULL, the driver that
+ registered this device has been unloaded, and if class_get_devdata()
+ points to something in the body of that driver, it is also invalid. */
+ struct mutex ops_lock;
+ const struct backlight_ops *ops;
+
+ /* The framebuffer notifier block */
+ struct notifier_block fb_notif;
+
+ /* list entry of all registered backlight devices */
+ struct list_head entry;
+
+ struct device dev;
+
+ /* Multiple framebuffers may share one backlight device */
+ bool fb_bl_on[FB_MAX];
+
+ int use_count;
+};
+
+static inline void backlight_update_status(struct backlight_device *bd)
+{
+ mutex_lock(&bd->update_lock);
+ if (bd->ops && bd->ops->update_status)
+ bd->ops->update_status(bd);
+ mutex_unlock(&bd->update_lock);
+}
+
+extern struct backlight_device *backlight_device_register(const char *name,
+ struct device *dev, void *devdata, const struct backlight_ops *ops,
+ const struct backlight_properties *props);
+extern struct backlight_device *devm_backlight_device_register(
+ struct device *dev, const char *name, struct device *parent,
+ void *devdata, const struct backlight_ops *ops,
+ const struct backlight_properties *props);
+extern void backlight_device_unregister(struct backlight_device *bd);
+extern void devm_backlight_device_unregister(struct device *dev,
+ struct backlight_device *bd);
+extern void backlight_force_update(struct backlight_device *bd,
+ enum backlight_update_reason reason);
+extern bool backlight_device_registered(enum backlight_type type);
+extern int backlight_register_notifier(struct notifier_block *nb);
+extern int backlight_unregister_notifier(struct notifier_block *nb);
+
+#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
+
+static inline void * bl_get_data(struct backlight_device *bl_dev)
+{
+ return dev_get_drvdata(&bl_dev->dev);
+}
+
+struct generic_bl_info {
+ const char *name;
+ int max_intensity;
+ int default_intensity;
+ int limit_mask;
+ void (*set_bl_intensity)(int intensity);
+ void (*kick_battery)(void);
+};
+
+#ifdef CONFIG_OF
+struct backlight_device *of_find_backlight_by_node(struct device_node *node);
+#else
+static inline struct backlight_device *
+of_find_backlight_by_node(struct device_node *node)
+{
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
new file mode 100644
index 000000000..9b0a15d06
--- /dev/null
+++ b/include/linux/balloon_compaction.h
@@ -0,0 +1,216 @@
+/*
+ * include/linux/balloon_compaction.h
+ *
+ * Common interface definitions for making balloon pages movable by compaction.
+ *
+ * Despite being perfectly possible to perform ballooned pages migration, they
+ * make a special corner case to compaction scans because balloon pages are not
+ * enlisted at any LRU list like the other pages we do compact / migrate.
+ *
+ * As the page isolation scanning step a compaction thread does is a lockless
+ * procedure (from a page standpoint), it might bring some racy situations while
+ * performing balloon page compaction. In order to sort out these racy scenarios
+ * and safely perform balloon's page compaction and migration we must, always,
+ * ensure following these three simple rules:
+ *
+ * i. when updating a balloon's page ->mapping element, strictly do it under
+ * the following lock order, independently of the far superior
+ * locking scheme (lru_lock, balloon_lock):
+ * +-page_lock(page);
+ * +--spin_lock_irq(&b_dev_info->pages_lock);
+ * ... page->mapping updates here ...
+ *
+ * ii. before isolating or dequeueing a balloon page from the balloon device
+ * pages list, the page reference counter must be raised by one and the
+ * extra refcount must be dropped when the page is enqueued back into
+ * the balloon device page list, thus a balloon page keeps its reference
+ * counter raised only while it is under our special handling;
+ *
+ * iii. after the lockless scan step have selected a potential balloon page for
+ * isolation, re-test the PageBalloon mark and the PagePrivate flag
+ * under the proper page lock, to ensure isolating a valid balloon page
+ * (not yet isolated, nor under release procedure)
+ *
+ * iv. isolation or dequeueing procedure must clear PagePrivate flag under
+ * page lock together with removing page from balloon device page list.
+ *
+ * The functions provided by this interface are placed to help on coping with
+ * the aforementioned balloon page corner case, as well as to ensure the simple
+ * set of exposed rules are satisfied while we are dealing with balloon pages
+ * compaction / migration.
+ *
+ * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com>
+ */
+#ifndef _LINUX_BALLOON_COMPACTION_H
+#define _LINUX_BALLOON_COMPACTION_H
+#include <linux/pagemap.h>
+#include <linux/page-flags.h>
+#include <linux/migrate.h>
+#include <linux/gfp.h>
+#include <linux/err.h>
+
+/*
+ * Balloon device information descriptor.
+ * This struct is used to allow the common balloon compaction interface
+ * procedures to find the proper balloon device holding memory pages they'll
+ * have to cope for page compaction / migration, as well as it serves the
+ * balloon driver as a page book-keeper for its registered balloon devices.
+ */
+struct balloon_dev_info {
+ unsigned long isolated_pages; /* # of isolated pages for migration */
+ spinlock_t pages_lock; /* Protection to pages list */
+ struct list_head pages; /* Pages enqueued & handled to Host */
+ int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
+ struct page *page, enum migrate_mode mode);
+};
+
+extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
+extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
+
+static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
+{
+ balloon->isolated_pages = 0;
+ spin_lock_init(&balloon->pages_lock);
+ INIT_LIST_HEAD(&balloon->pages);
+ balloon->migratepage = NULL;
+}
+
+#ifdef CONFIG_BALLOON_COMPACTION
+extern bool balloon_page_isolate(struct page *page);
+extern void balloon_page_putback(struct page *page);
+extern int balloon_page_migrate(struct page *newpage,
+ struct page *page, enum migrate_mode mode);
+
+/*
+ * __is_movable_balloon_page - helper to perform @page PageBalloon tests
+ */
+static inline bool __is_movable_balloon_page(struct page *page)
+{
+ return PageBalloon(page);
+}
+
+/*
+ * balloon_page_movable - test PageBalloon to identify balloon pages
+ * and PagePrivate to check that the page is not
+ * isolated and can be moved by compaction/migration.
+ *
+ * As we might return false positives in the case of a balloon page being just
+ * released under us, this need to be re-tested later, under the page lock.
+ */
+static inline bool balloon_page_movable(struct page *page)
+{
+ return PageBalloon(page) && PagePrivate(page);
+}
+
+/*
+ * isolated_balloon_page - identify an isolated balloon page on private
+ * compaction/migration page lists.
+ */
+static inline bool isolated_balloon_page(struct page *page)
+{
+ return PageBalloon(page);
+}
+
+/*
+ * balloon_page_insert - insert a page into the balloon's page list and make
+ * the page->private assignment accordingly.
+ * @balloon : pointer to balloon device
+ * @page : page to be assigned as a 'balloon page'
+ *
+ * Caller must ensure the page is locked and the spin_lock protecting balloon
+ * pages list is held before inserting a page into the balloon device.
+ */
+static inline void balloon_page_insert(struct balloon_dev_info *balloon,
+ struct page *page)
+{
+ __SetPageBalloon(page);
+ SetPagePrivate(page);
+ set_page_private(page, (unsigned long)balloon);
+ list_add(&page->lru, &balloon->pages);
+}
+
+/*
+ * balloon_page_delete - delete a page from balloon's page list and clear
+ * the page->private assignement accordingly.
+ * @page : page to be released from balloon's page list
+ *
+ * Caller must ensure the page is locked and the spin_lock protecting balloon
+ * pages list is held before deleting a page from the balloon device.
+ */
+static inline void balloon_page_delete(struct page *page)
+{
+ __ClearPageBalloon(page);
+ set_page_private(page, 0);
+ if (PagePrivate(page)) {
+ ClearPagePrivate(page);
+ list_del(&page->lru);
+ }
+}
+
+/*
+ * balloon_page_device - get the b_dev_info descriptor for the balloon device
+ * that enqueues the given page.
+ */
+static inline struct balloon_dev_info *balloon_page_device(struct page *page)
+{
+ return (struct balloon_dev_info *)page_private(page);
+}
+
+static inline gfp_t balloon_mapping_gfp_mask(void)
+{
+ return GFP_HIGHUSER_MOVABLE;
+}
+
+#else /* !CONFIG_BALLOON_COMPACTION */
+
+static inline void balloon_page_insert(struct balloon_dev_info *balloon,
+ struct page *page)
+{
+ __SetPageBalloon(page);
+ list_add(&page->lru, &balloon->pages);
+}
+
+static inline void balloon_page_delete(struct page *page)
+{
+ __ClearPageBalloon(page);
+ list_del(&page->lru);
+}
+
+static inline bool __is_movable_balloon_page(struct page *page)
+{
+ return false;
+}
+
+static inline bool balloon_page_movable(struct page *page)
+{
+ return false;
+}
+
+static inline bool isolated_balloon_page(struct page *page)
+{
+ return false;
+}
+
+static inline bool balloon_page_isolate(struct page *page)
+{
+ return false;
+}
+
+static inline void balloon_page_putback(struct page *page)
+{
+ return;
+}
+
+static inline int balloon_page_migrate(struct page *newpage,
+ struct page *page, enum migrate_mode mode)
+{
+ return 0;
+}
+
+static inline gfp_t balloon_mapping_gfp_mask(void)
+{
+ return GFP_HIGHUSER;
+}
+
+#endif /* CONFIG_BALLOON_COMPACTION */
+#endif /* _LINUX_BALLOON_COMPACTION_H */
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
new file mode 100644
index 000000000..0e97856b2
--- /dev/null
+++ b/include/linux/basic_mmio_gpio.h
@@ -0,0 +1,78 @@
+/*
+ * Basic memory-mapped GPIO controllers.
+ *
+ * Copyright 2008 MontaVista Software, Inc.
+ * Copyright 2008,2010 Anton Vorontsov <cbouatmailru@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __BASIC_MMIO_GPIO_H
+#define __BASIC_MMIO_GPIO_H
+
+#include <linux/gpio.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/spinlock_types.h>
+
+struct bgpio_pdata {
+ const char *label;
+ int base;
+ int ngpio;
+};
+
+struct device;
+
+struct bgpio_chip {
+ struct gpio_chip gc;
+
+ unsigned long (*read_reg)(void __iomem *reg);
+ void (*write_reg)(void __iomem *reg, unsigned long data);
+
+ void __iomem *reg_dat;
+ void __iomem *reg_set;
+ void __iomem *reg_clr;
+ void __iomem *reg_dir;
+
+ /* Number of bits (GPIOs): <register width> * 8. */
+ int bits;
+
+ /*
+ * Some GPIO controllers work with the big-endian bits notation,
+ * e.g. in a 8-bits register, GPIO7 is the least significant bit.
+ */
+ unsigned long (*pin2mask)(struct bgpio_chip *bgc, unsigned int pin);
+
+ /*
+ * Used to lock bgpio_chip->data. Also, this is needed to keep
+ * shadowed and real data registers writes together.
+ */
+ spinlock_t lock;
+
+ /* Shadowed data register to clear/set bits safely. */
+ unsigned long data;
+
+ /* Shadowed direction registers to clear/set direction safely. */
+ unsigned long dir;
+};
+
+static inline struct bgpio_chip *to_bgpio_chip(struct gpio_chip *gc)
+{
+ return container_of(gc, struct bgpio_chip, gc);
+}
+
+int bgpio_remove(struct bgpio_chip *bgc);
+int bgpio_init(struct bgpio_chip *bgc, struct device *dev,
+ unsigned long sz, void __iomem *dat, void __iomem *set,
+ void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
+ unsigned long flags);
+
+#define BGPIOF_BIG_ENDIAN BIT(0)
+#define BGPIOF_UNREADABLE_REG_SET BIT(1) /* reg_set is unreadable */
+#define BGPIOF_UNREADABLE_REG_DIR BIT(2) /* reg_dir is unreadable */
+#define BGPIOF_BIG_ENDIAN_BYTE_ORDER BIT(3)
+
+#endif /* __BASIC_MMIO_GPIO_H */
diff --git a/include/linux/bcd.h b/include/linux/bcd.h
new file mode 100644
index 000000000..18fff11fb
--- /dev/null
+++ b/include/linux/bcd.h
@@ -0,0 +1,22 @@
+#ifndef _BCD_H
+#define _BCD_H
+
+#include <linux/compiler.h>
+
+#define bcd2bin(x) \
+ (__builtin_constant_p((u8 )(x)) ? \
+ const_bcd2bin(x) : \
+ _bcd2bin(x))
+
+#define bin2bcd(x) \
+ (__builtin_constant_p((u8 )(x)) ? \
+ const_bin2bcd(x) : \
+ _bin2bcd(x))
+
+#define const_bcd2bin(x) (((x) & 0x0f) + ((x) >> 4) * 10)
+#define const_bin2bcd(x) ((((x) / 10) << 4) + (x) % 10)
+
+unsigned _bcd2bin(unsigned char val) __attribute_const__;
+unsigned char _bin2bcd(unsigned val) __attribute_const__;
+
+#endif /* _BCD_H */
diff --git a/include/linux/bch.h b/include/linux/bch.h
new file mode 100644
index 000000000..295b4ef15
--- /dev/null
+++ b/include/linux/bch.h
@@ -0,0 +1,79 @@
+/*
+ * Generic binary BCH encoding/decoding library
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 51
+ * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Copyright © 2011 Parrot S.A.
+ *
+ * Author: Ivan Djelic <ivan.djelic@parrot.com>
+ *
+ * Description:
+ *
+ * This library provides runtime configurable encoding/decoding of binary
+ * Bose-Chaudhuri-Hocquenghem (BCH) codes.
+*/
+#ifndef _BCH_H
+#define _BCH_H
+
+#include <linux/types.h>
+
+/**
+ * struct bch_control - BCH control structure
+ * @m: Galois field order
+ * @n: maximum codeword size in bits (= 2^m-1)
+ * @t: error correction capability in bits
+ * @ecc_bits: ecc exact size in bits, i.e. generator polynomial degree (<=m*t)
+ * @ecc_bytes: ecc max size (m*t bits) in bytes
+ * @a_pow_tab: Galois field GF(2^m) exponentiation lookup table
+ * @a_log_tab: Galois field GF(2^m) log lookup table
+ * @mod8_tab: remainder generator polynomial lookup tables
+ * @ecc_buf: ecc parity words buffer
+ * @ecc_buf2: ecc parity words buffer
+ * @xi_tab: GF(2^m) base for solving degree 2 polynomial roots
+ * @syn: syndrome buffer
+ * @cache: log-based polynomial representation buffer
+ * @elp: error locator polynomial
+ * @poly_2t: temporary polynomials of degree 2t
+ */
+struct bch_control {
+ unsigned int m;
+ unsigned int n;
+ unsigned int t;
+ unsigned int ecc_bits;
+ unsigned int ecc_bytes;
+/* private: */
+ uint16_t *a_pow_tab;
+ uint16_t *a_log_tab;
+ uint32_t *mod8_tab;
+ uint32_t *ecc_buf;
+ uint32_t *ecc_buf2;
+ unsigned int *xi_tab;
+ unsigned int *syn;
+ int *cache;
+ struct gf_poly *elp;
+ struct gf_poly *poly_2t[4];
+};
+
+struct bch_control *init_bch(int m, int t, unsigned int prim_poly);
+
+void free_bch(struct bch_control *bch);
+
+void encode_bch(struct bch_control *bch, const uint8_t *data,
+ unsigned int len, uint8_t *ecc);
+
+int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len,
+ const uint8_t *recv_ecc, const uint8_t *calc_ecc,
+ const unsigned int *syn, unsigned int *errloc);
+
+#endif /* _BCH_H */
diff --git a/include/linux/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h
new file mode 100644
index 000000000..b12b07e75
--- /dev/null
+++ b/include/linux/bcm47xx_nvram.h
@@ -0,0 +1,34 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __BCM47XX_NVRAM_H
+#define __BCM47XX_NVRAM_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#ifdef CONFIG_BCM47XX
+int bcm47xx_nvram_init_from_mem(u32 base, u32 lim);
+int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len);
+int bcm47xx_nvram_gpio_pin(const char *name);
+#else
+static inline int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
+{
+ return -ENOTSUPP;
+};
+static inline int bcm47xx_nvram_getenv(const char *name, char *val,
+ size_t val_len)
+{
+ return -ENOTSUPP;
+};
+static inline int bcm47xx_nvram_gpio_pin(const char *name)
+{
+ return -ENOTSUPP;
+};
+#endif
+
+#endif /* __BCM47XX_NVRAM_H */
diff --git a/include/linux/bcm47xx_wdt.h b/include/linux/bcm47xx_wdt.h
new file mode 100644
index 000000000..5582c211f
--- /dev/null
+++ b/include/linux/bcm47xx_wdt.h
@@ -0,0 +1,29 @@
+#ifndef LINUX_BCM47XX_WDT_H_
+#define LINUX_BCM47XX_WDT_H_
+
+#include <linux/notifier.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+
+
+struct bcm47xx_wdt {
+ u32 (*timer_set)(struct bcm47xx_wdt *, u32);
+ u32 (*timer_set_ms)(struct bcm47xx_wdt *, u32);
+ u32 max_timer_ms;
+
+ void *driver_data;
+
+ struct watchdog_device wdd;
+ struct notifier_block notifier;
+ struct notifier_block restart_handler;
+
+ struct timer_list soft_timer;
+ atomic_t soft_ticks;
+};
+
+static inline void *bcm47xx_wdt_get_drvdata(struct bcm47xx_wdt *wdt)
+{
+ return wdt->driver_data;
+}
+#endif /* LINUX_BCM47XX_WDT_H_ */
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
new file mode 100644
index 000000000..e34f90664
--- /dev/null
+++ b/include/linux/bcma/bcma.h
@@ -0,0 +1,474 @@
+#ifndef LINUX_BCMA_H_
+#define LINUX_BCMA_H_
+
+#include <linux/pci.h>
+#include <linux/mod_devicetable.h>
+
+#include <linux/bcma/bcma_driver_chipcommon.h>
+#include <linux/bcma/bcma_driver_pci.h>
+#include <linux/bcma/bcma_driver_pcie2.h>
+#include <linux/bcma/bcma_driver_mips.h>
+#include <linux/bcma/bcma_driver_gmac_cmn.h>
+#include <linux/ssb/ssb.h> /* SPROM sharing */
+
+#include <linux/bcma/bcma_regs.h>
+
+struct bcma_device;
+struct bcma_bus;
+
+enum bcma_hosttype {
+ BCMA_HOSTTYPE_PCI,
+ BCMA_HOSTTYPE_SDIO,
+ BCMA_HOSTTYPE_SOC,
+};
+
+struct bcma_chipinfo {
+ u16 id;
+ u8 rev;
+ u8 pkg;
+};
+
+struct bcma_boardinfo {
+ u16 vendor;
+ u16 type;
+};
+
+enum bcma_clkmode {
+ BCMA_CLKMODE_FAST,
+ BCMA_CLKMODE_DYNAMIC,
+};
+
+struct bcma_host_ops {
+ u8 (*read8)(struct bcma_device *core, u16 offset);
+ u16 (*read16)(struct bcma_device *core, u16 offset);
+ u32 (*read32)(struct bcma_device *core, u16 offset);
+ void (*write8)(struct bcma_device *core, u16 offset, u8 value);
+ void (*write16)(struct bcma_device *core, u16 offset, u16 value);
+ void (*write32)(struct bcma_device *core, u16 offset, u32 value);
+#ifdef CONFIG_BCMA_BLOCKIO
+ void (*block_read)(struct bcma_device *core, void *buffer,
+ size_t count, u16 offset, u8 reg_width);
+ void (*block_write)(struct bcma_device *core, const void *buffer,
+ size_t count, u16 offset, u8 reg_width);
+#endif
+ /* Agent ops */
+ u32 (*aread32)(struct bcma_device *core, u16 offset);
+ void (*awrite32)(struct bcma_device *core, u16 offset, u32 value);
+};
+
+/* Core manufacturers */
+#define BCMA_MANUF_ARM 0x43B
+#define BCMA_MANUF_MIPS 0x4A7
+#define BCMA_MANUF_BCM 0x4BF
+
+/* Core class values. */
+#define BCMA_CL_SIM 0x0
+#define BCMA_CL_EROM 0x1
+#define BCMA_CL_CORESIGHT 0x9
+#define BCMA_CL_VERIF 0xB
+#define BCMA_CL_OPTIMO 0xD
+#define BCMA_CL_GEN 0xE
+#define BCMA_CL_PRIMECELL 0xF
+
+/* Core-ID values. */
+#define BCMA_CORE_OOB_ROUTER 0x367 /* Out of band */
+#define BCMA_CORE_4706_CHIPCOMMON 0x500
+#define BCMA_CORE_NS_PCIEG2 0x501
+#define BCMA_CORE_NS_DMA 0x502
+#define BCMA_CORE_NS_SDIO3 0x503
+#define BCMA_CORE_NS_USB20 0x504
+#define BCMA_CORE_NS_USB30 0x505
+#define BCMA_CORE_NS_A9JTAG 0x506
+#define BCMA_CORE_NS_DDR23 0x507
+#define BCMA_CORE_NS_ROM 0x508
+#define BCMA_CORE_NS_NAND 0x509
+#define BCMA_CORE_NS_QSPI 0x50A
+#define BCMA_CORE_NS_CHIPCOMMON_B 0x50B
+#define BCMA_CORE_4706_SOC_RAM 0x50E
+#define BCMA_CORE_ARMCA9 0x510
+#define BCMA_CORE_4706_MAC_GBIT 0x52D
+#define BCMA_CORE_AMEMC 0x52E /* DDR1/2 memory controller core */
+#define BCMA_CORE_ALTA 0x534 /* I2S core */
+#define BCMA_CORE_4706_MAC_GBIT_COMMON 0x5DC
+#define BCMA_CORE_DDR23_PHY 0x5DD
+#define BCMA_CORE_INVALID 0x700
+#define BCMA_CORE_CHIPCOMMON 0x800
+#define BCMA_CORE_ILINE20 0x801
+#define BCMA_CORE_SRAM 0x802
+#define BCMA_CORE_SDRAM 0x803
+#define BCMA_CORE_PCI 0x804
+#define BCMA_CORE_MIPS 0x805
+#define BCMA_CORE_ETHERNET 0x806
+#define BCMA_CORE_V90 0x807
+#define BCMA_CORE_USB11_HOSTDEV 0x808
+#define BCMA_CORE_ADSL 0x809
+#define BCMA_CORE_ILINE100 0x80A
+#define BCMA_CORE_IPSEC 0x80B
+#define BCMA_CORE_UTOPIA 0x80C
+#define BCMA_CORE_PCMCIA 0x80D
+#define BCMA_CORE_INTERNAL_MEM 0x80E
+#define BCMA_CORE_MEMC_SDRAM 0x80F
+#define BCMA_CORE_OFDM 0x810
+#define BCMA_CORE_EXTIF 0x811
+#define BCMA_CORE_80211 0x812
+#define BCMA_CORE_PHY_A 0x813
+#define BCMA_CORE_PHY_B 0x814
+#define BCMA_CORE_PHY_G 0x815
+#define BCMA_CORE_MIPS_3302 0x816
+#define BCMA_CORE_USB11_HOST 0x817
+#define BCMA_CORE_USB11_DEV 0x818
+#define BCMA_CORE_USB20_HOST 0x819
+#define BCMA_CORE_USB20_DEV 0x81A
+#define BCMA_CORE_SDIO_HOST 0x81B
+#define BCMA_CORE_ROBOSWITCH 0x81C
+#define BCMA_CORE_PARA_ATA 0x81D
+#define BCMA_CORE_SATA_XORDMA 0x81E
+#define BCMA_CORE_ETHERNET_GBIT 0x81F
+#define BCMA_CORE_PCIE 0x820
+#define BCMA_CORE_PHY_N 0x821
+#define BCMA_CORE_SRAM_CTL 0x822
+#define BCMA_CORE_MINI_MACPHY 0x823
+#define BCMA_CORE_ARM_1176 0x824
+#define BCMA_CORE_ARM_7TDMI 0x825
+#define BCMA_CORE_PHY_LP 0x826
+#define BCMA_CORE_PMU 0x827
+#define BCMA_CORE_PHY_SSN 0x828
+#define BCMA_CORE_SDIO_DEV 0x829
+#define BCMA_CORE_ARM_CM3 0x82A
+#define BCMA_CORE_PHY_HT 0x82B
+#define BCMA_CORE_MIPS_74K 0x82C
+#define BCMA_CORE_MAC_GBIT 0x82D
+#define BCMA_CORE_DDR12_MEM_CTL 0x82E
+#define BCMA_CORE_PCIE_RC 0x82F /* PCIe Root Complex */
+#define BCMA_CORE_OCP_OCP_BRIDGE 0x830
+#define BCMA_CORE_SHARED_COMMON 0x831
+#define BCMA_CORE_OCP_AHB_BRIDGE 0x832
+#define BCMA_CORE_SPI_HOST 0x833
+#define BCMA_CORE_I2S 0x834
+#define BCMA_CORE_SDR_DDR1_MEM_CTL 0x835 /* SDR/DDR1 memory controller core */
+#define BCMA_CORE_SHIM 0x837 /* SHIM component in ubus/6362 */
+#define BCMA_CORE_PHY_AC 0x83B
+#define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */
+#define BCMA_CORE_USB30_DEV 0x83D
+#define BCMA_CORE_ARM_CR4 0x83E
+#define BCMA_CORE_DEFAULT 0xFFF
+
+#define BCMA_MAX_NR_CORES 16
+
+/* Chip IDs of PCIe devices */
+#define BCMA_CHIP_ID_BCM4313 0x4313
+#define BCMA_CHIP_ID_BCM43142 43142
+#define BCMA_CHIP_ID_BCM43131 43131
+#define BCMA_CHIP_ID_BCM43217 43217
+#define BCMA_CHIP_ID_BCM43222 43222
+#define BCMA_CHIP_ID_BCM43224 43224
+#define BCMA_PKG_ID_BCM43224_FAB_CSM 0x8
+#define BCMA_PKG_ID_BCM43224_FAB_SMIC 0xa
+#define BCMA_CHIP_ID_BCM43225 43225
+#define BCMA_CHIP_ID_BCM43227 43227
+#define BCMA_CHIP_ID_BCM43228 43228
+#define BCMA_CHIP_ID_BCM43421 43421
+#define BCMA_CHIP_ID_BCM43428 43428
+#define BCMA_CHIP_ID_BCM43431 43431
+#define BCMA_CHIP_ID_BCM43460 43460
+#define BCMA_CHIP_ID_BCM4331 0x4331
+#define BCMA_CHIP_ID_BCM6362 0x6362
+#define BCMA_CHIP_ID_BCM4360 0x4360
+#define BCMA_CHIP_ID_BCM4352 0x4352
+
+/* Chip IDs of SoCs */
+#define BCMA_CHIP_ID_BCM4706 0x5300
+#define BCMA_PKG_ID_BCM4706L 1
+#define BCMA_CHIP_ID_BCM4716 0x4716
+#define BCMA_PKG_ID_BCM4716 8
+#define BCMA_PKG_ID_BCM4717 9
+#define BCMA_PKG_ID_BCM4718 10
+#define BCMA_CHIP_ID_BCM47162 47162
+#define BCMA_CHIP_ID_BCM4748 0x4748
+#define BCMA_CHIP_ID_BCM4749 0x4749
+#define BCMA_CHIP_ID_BCM5356 0x5356
+#define BCMA_CHIP_ID_BCM5357 0x5357
+#define BCMA_PKG_ID_BCM5358 9
+#define BCMA_PKG_ID_BCM47186 10
+#define BCMA_PKG_ID_BCM5357 11
+#define BCMA_CHIP_ID_BCM53572 53572
+#define BCMA_PKG_ID_BCM47188 9
+#define BCMA_CHIP_ID_BCM4707 53010
+#define BCMA_PKG_ID_BCM4707 1
+#define BCMA_PKG_ID_BCM4708 2
+#define BCMA_PKG_ID_BCM4709 0
+#define BCMA_CHIP_ID_BCM53018 53018
+
+/* Board types (on PCI usually equals to the subsystem dev id) */
+/* BCM4313 */
+#define BCMA_BOARD_TYPE_BCM94313BU 0X050F
+#define BCMA_BOARD_TYPE_BCM94313HM 0X0510
+#define BCMA_BOARD_TYPE_BCM94313EPA 0X0511
+#define BCMA_BOARD_TYPE_BCM94313HMG 0X051C
+/* BCM4716 */
+#define BCMA_BOARD_TYPE_BCM94716NR2 0X04CD
+/* BCM43224 */
+#define BCMA_BOARD_TYPE_BCM943224X21 0X056E
+#define BCMA_BOARD_TYPE_BCM943224X21_FCC 0X00D1
+#define BCMA_BOARD_TYPE_BCM943224X21B 0X00E9
+#define BCMA_BOARD_TYPE_BCM943224M93 0X008B
+#define BCMA_BOARD_TYPE_BCM943224M93A 0X0090
+#define BCMA_BOARD_TYPE_BCM943224X16 0X0093
+#define BCMA_BOARD_TYPE_BCM94322X9 0X008D
+#define BCMA_BOARD_TYPE_BCM94322M35E 0X008E
+/* BCM43228 */
+#define BCMA_BOARD_TYPE_BCM943228BU8 0X0540
+#define BCMA_BOARD_TYPE_BCM943228BU9 0X0541
+#define BCMA_BOARD_TYPE_BCM943228BU 0X0542
+#define BCMA_BOARD_TYPE_BCM943227HM4L 0X0543
+#define BCMA_BOARD_TYPE_BCM943227HMB 0X0544
+#define BCMA_BOARD_TYPE_BCM943228HM4L 0X0545
+#define BCMA_BOARD_TYPE_BCM943228SD 0X0573
+/* BCM4331 */
+#define BCMA_BOARD_TYPE_BCM94331X19 0X00D6
+#define BCMA_BOARD_TYPE_BCM94331X28 0X00E4
+#define BCMA_BOARD_TYPE_BCM94331X28B 0X010E
+#define BCMA_BOARD_TYPE_BCM94331PCIEBT3AX 0X00E4
+#define BCMA_BOARD_TYPE_BCM94331X12_2G 0X00EC
+#define BCMA_BOARD_TYPE_BCM94331X12_5G 0X00ED
+#define BCMA_BOARD_TYPE_BCM94331X29B 0X00EF
+#define BCMA_BOARD_TYPE_BCM94331CSAX 0X00EF
+#define BCMA_BOARD_TYPE_BCM94331X19C 0X00F5
+#define BCMA_BOARD_TYPE_BCM94331X33 0X00F4
+#define BCMA_BOARD_TYPE_BCM94331BU 0X0523
+#define BCMA_BOARD_TYPE_BCM94331S9BU 0X0524
+#define BCMA_BOARD_TYPE_BCM94331MC 0X0525
+#define BCMA_BOARD_TYPE_BCM94331MCI 0X0526
+#define BCMA_BOARD_TYPE_BCM94331PCIEBT4 0X0527
+#define BCMA_BOARD_TYPE_BCM94331HM 0X0574
+#define BCMA_BOARD_TYPE_BCM94331PCIEDUAL 0X059B
+#define BCMA_BOARD_TYPE_BCM94331MCH5 0X05A9
+#define BCMA_BOARD_TYPE_BCM94331CS 0X05C6
+#define BCMA_BOARD_TYPE_BCM94331CD 0X05DA
+/* BCM53572 */
+#define BCMA_BOARD_TYPE_BCM953572BU 0X058D
+#define BCMA_BOARD_TYPE_BCM953572NR2 0X058E
+#define BCMA_BOARD_TYPE_BCM947188NR2 0X058F
+#define BCMA_BOARD_TYPE_BCM953572SDRNR2 0X0590
+/* BCM43142 */
+#define BCMA_BOARD_TYPE_BCM943142HM 0X05E0
+
+struct bcma_device {
+ struct bcma_bus *bus;
+ struct bcma_device_id id;
+
+ struct device dev;
+ struct device *dma_dev;
+
+ unsigned int irq;
+ bool dev_registered;
+
+ u8 core_index;
+ u8 core_unit;
+
+ u32 addr;
+ u32 addr_s[8];
+ u32 wrap;
+
+ void __iomem *io_addr;
+ void __iomem *io_wrap;
+
+ void *drvdata;
+ struct list_head list;
+};
+
+static inline void *bcma_get_drvdata(struct bcma_device *core)
+{
+ return core->drvdata;
+}
+static inline void bcma_set_drvdata(struct bcma_device *core, void *drvdata)
+{
+ core->drvdata = drvdata;
+}
+
+struct bcma_driver {
+ const char *name;
+ const struct bcma_device_id *id_table;
+
+ int (*probe)(struct bcma_device *dev);
+ void (*remove)(struct bcma_device *dev);
+ int (*suspend)(struct bcma_device *dev);
+ int (*resume)(struct bcma_device *dev);
+ void (*shutdown)(struct bcma_device *dev);
+
+ struct device_driver drv;
+};
+extern
+int __bcma_driver_register(struct bcma_driver *drv, struct module *owner);
+#define bcma_driver_register(drv) \
+ __bcma_driver_register(drv, THIS_MODULE)
+
+extern void bcma_driver_unregister(struct bcma_driver *drv);
+
+/* Set a fallback SPROM.
+ * See kdoc at the function definition for complete documentation. */
+extern int bcma_arch_register_fallback_sprom(
+ int (*sprom_callback)(struct bcma_bus *bus,
+ struct ssb_sprom *out));
+
+struct bcma_bus {
+ /* The MMIO area. */
+ void __iomem *mmio;
+
+ const struct bcma_host_ops *ops;
+
+ enum bcma_hosttype hosttype;
+ bool host_is_pcie2; /* Used for BCMA_HOSTTYPE_PCI only */
+ union {
+ /* Pointer to the PCI bus (only for BCMA_HOSTTYPE_PCI) */
+ struct pci_dev *host_pci;
+ /* Pointer to the SDIO device (only for BCMA_HOSTTYPE_SDIO) */
+ struct sdio_func *host_sdio;
+ /* Pointer to platform device (only for BCMA_HOSTTYPE_SOC) */
+ struct platform_device *host_pdev;
+ };
+
+ struct bcma_chipinfo chipinfo;
+
+ struct bcma_boardinfo boardinfo;
+
+ struct bcma_device *mapped_core;
+ struct list_head cores;
+ u8 nr_cores;
+ u8 num;
+
+ struct bcma_drv_cc drv_cc;
+ struct bcma_drv_cc_b drv_cc_b;
+ struct bcma_drv_pci drv_pci[2];
+ struct bcma_drv_pcie2 drv_pcie2;
+ struct bcma_drv_mips drv_mips;
+ struct bcma_drv_gmac_cmn drv_gmac_cmn;
+
+ /* We decided to share SPROM struct with SSB as long as we do not need
+ * any hacks for BCMA. This simplifies drivers code. */
+ struct ssb_sprom sprom;
+};
+
+static inline u32 bcma_read8(struct bcma_device *core, u16 offset)
+{
+ return core->bus->ops->read8(core, offset);
+}
+static inline u32 bcma_read16(struct bcma_device *core, u16 offset)
+{
+ return core->bus->ops->read16(core, offset);
+}
+static inline u32 bcma_read32(struct bcma_device *core, u16 offset)
+{
+ return core->bus->ops->read32(core, offset);
+}
+static inline
+void bcma_write8(struct bcma_device *core, u16 offset, u32 value)
+{
+ core->bus->ops->write8(core, offset, value);
+}
+static inline
+void bcma_write16(struct bcma_device *core, u16 offset, u32 value)
+{
+ core->bus->ops->write16(core, offset, value);
+}
+static inline
+void bcma_write32(struct bcma_device *core, u16 offset, u32 value)
+{
+ core->bus->ops->write32(core, offset, value);
+}
+#ifdef CONFIG_BCMA_BLOCKIO
+static inline void bcma_block_read(struct bcma_device *core, void *buffer,
+ size_t count, u16 offset, u8 reg_width)
+{
+ core->bus->ops->block_read(core, buffer, count, offset, reg_width);
+}
+static inline void bcma_block_write(struct bcma_device *core,
+ const void *buffer, size_t count,
+ u16 offset, u8 reg_width)
+{
+ core->bus->ops->block_write(core, buffer, count, offset, reg_width);
+}
+#endif
+static inline u32 bcma_aread32(struct bcma_device *core, u16 offset)
+{
+ return core->bus->ops->aread32(core, offset);
+}
+static inline
+void bcma_awrite32(struct bcma_device *core, u16 offset, u32 value)
+{
+ core->bus->ops->awrite32(core, offset, value);
+}
+
+static inline void bcma_mask32(struct bcma_device *cc, u16 offset, u32 mask)
+{
+ bcma_write32(cc, offset, bcma_read32(cc, offset) & mask);
+}
+static inline void bcma_set32(struct bcma_device *cc, u16 offset, u32 set)
+{
+ bcma_write32(cc, offset, bcma_read32(cc, offset) | set);
+}
+static inline void bcma_maskset32(struct bcma_device *cc,
+ u16 offset, u32 mask, u32 set)
+{
+ bcma_write32(cc, offset, (bcma_read32(cc, offset) & mask) | set);
+}
+static inline void bcma_mask16(struct bcma_device *cc, u16 offset, u16 mask)
+{
+ bcma_write16(cc, offset, bcma_read16(cc, offset) & mask);
+}
+static inline void bcma_set16(struct bcma_device *cc, u16 offset, u16 set)
+{
+ bcma_write16(cc, offset, bcma_read16(cc, offset) | set);
+}
+static inline void bcma_maskset16(struct bcma_device *cc,
+ u16 offset, u16 mask, u16 set)
+{
+ bcma_write16(cc, offset, (bcma_read16(cc, offset) & mask) | set);
+}
+
+extern struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
+ u8 unit);
+static inline struct bcma_device *bcma_find_core(struct bcma_bus *bus,
+ u16 coreid)
+{
+ return bcma_find_core_unit(bus, coreid, 0);
+}
+
+#ifdef CONFIG_BCMA_HOST_PCI
+extern void bcma_host_pci_up(struct bcma_bus *bus);
+extern void bcma_host_pci_down(struct bcma_bus *bus);
+extern int bcma_host_pci_irq_ctl(struct bcma_bus *bus,
+ struct bcma_device *core, bool enable);
+#else
+static inline void bcma_host_pci_up(struct bcma_bus *bus)
+{
+}
+static inline void bcma_host_pci_down(struct bcma_bus *bus)
+{
+}
+static inline int bcma_host_pci_irq_ctl(struct bcma_bus *bus,
+ struct bcma_device *core, bool enable)
+{
+ if (bus->hosttype == BCMA_HOSTTYPE_PCI)
+ return -ENOTSUPP;
+ return 0;
+}
+#endif
+
+extern bool bcma_core_is_enabled(struct bcma_device *core);
+extern void bcma_core_disable(struct bcma_device *core, u32 flags);
+extern int bcma_core_enable(struct bcma_device *core, u32 flags);
+extern void bcma_core_set_clockmode(struct bcma_device *core,
+ enum bcma_clkmode clkmode);
+extern void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status,
+ bool on);
+extern u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset);
+#define BCMA_DMA_TRANSLATION_MASK 0xC0000000
+#define BCMA_DMA_TRANSLATION_NONE 0x00000000
+#define BCMA_DMA_TRANSLATION_DMA32_CMT 0x40000000 /* Client Mode Translation for 32-bit DMA */
+#define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */
+extern u32 bcma_core_dma_translation(struct bcma_device *core);
+
+extern unsigned int bcma_core_irq(struct bcma_device *core, int num);
+
+#endif /* LINUX_BCMA_H_ */
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
new file mode 100644
index 000000000..6cceedf65
--- /dev/null
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -0,0 +1,699 @@
+#ifndef LINUX_BCMA_DRIVER_CC_H_
+#define LINUX_BCMA_DRIVER_CC_H_
+
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+
+/** ChipCommon core registers. **/
+#define BCMA_CC_ID 0x0000
+#define BCMA_CC_ID_ID 0x0000FFFF
+#define BCMA_CC_ID_ID_SHIFT 0
+#define BCMA_CC_ID_REV 0x000F0000
+#define BCMA_CC_ID_REV_SHIFT 16
+#define BCMA_CC_ID_PKG 0x00F00000
+#define BCMA_CC_ID_PKG_SHIFT 20
+#define BCMA_CC_ID_NRCORES 0x0F000000
+#define BCMA_CC_ID_NRCORES_SHIFT 24
+#define BCMA_CC_ID_TYPE 0xF0000000
+#define BCMA_CC_ID_TYPE_SHIFT 28
+#define BCMA_CC_CAP 0x0004 /* Capabilities */
+#define BCMA_CC_CAP_NRUART 0x00000003 /* # of UARTs */
+#define BCMA_CC_CAP_MIPSEB 0x00000004 /* MIPS in BigEndian Mode */
+#define BCMA_CC_CAP_UARTCLK 0x00000018 /* UART clock select */
+#define BCMA_CC_CAP_UARTCLK_INT 0x00000008 /* UARTs are driven by internal divided clock */
+#define BCMA_CC_CAP_UARTGPIO 0x00000020 /* UARTs on GPIO 15-12 */
+#define BCMA_CC_CAP_EXTBUS 0x000000C0 /* External buses present */
+#define BCMA_CC_CAP_FLASHT 0x00000700 /* Flash Type */
+#define BCMA_CC_FLASHT_NONE 0x00000000 /* No flash */
+#define BCMA_CC_FLASHT_STSER 0x00000100 /* ST serial flash */
+#define BCMA_CC_FLASHT_ATSER 0x00000200 /* Atmel serial flash */
+#define BCMA_CC_FLASHT_NAND 0x00000300 /* NAND flash */
+#define BCMA_CC_FLASHT_PARA 0x00000700 /* Parallel flash */
+#define BCMA_CC_CAP_PLLT 0x00038000 /* PLL Type */
+#define BCMA_PLLTYPE_NONE 0x00000000
+#define BCMA_PLLTYPE_1 0x00010000 /* 48Mhz base, 3 dividers */
+#define BCMA_PLLTYPE_2 0x00020000 /* 48Mhz, 4 dividers */
+#define BCMA_PLLTYPE_3 0x00030000 /* 25Mhz, 2 dividers */
+#define BCMA_PLLTYPE_4 0x00008000 /* 48Mhz, 4 dividers */
+#define BCMA_PLLTYPE_5 0x00018000 /* 25Mhz, 4 dividers */
+#define BCMA_PLLTYPE_6 0x00028000 /* 100/200 or 120/240 only */
+#define BCMA_PLLTYPE_7 0x00038000 /* 25Mhz, 4 dividers */
+#define BCMA_CC_CAP_PCTL 0x00040000 /* Power Control */
+#define BCMA_CC_CAP_OTPS 0x00380000 /* OTP size */
+#define BCMA_CC_CAP_OTPS_SHIFT 19
+#define BCMA_CC_CAP_OTPS_BASE 5
+#define BCMA_CC_CAP_JTAGM 0x00400000 /* JTAG master present */
+#define BCMA_CC_CAP_BROM 0x00800000 /* Internal boot ROM active */
+#define BCMA_CC_CAP_64BIT 0x08000000 /* 64-bit Backplane */
+#define BCMA_CC_CAP_PMU 0x10000000 /* PMU available (rev >= 20) */
+#define BCMA_CC_CAP_ECI 0x20000000 /* ECI available (rev >= 20) */
+#define BCMA_CC_CAP_SPROM 0x40000000 /* SPROM present */
+#define BCMA_CC_CAP_NFLASH 0x80000000 /* NAND flash present (rev >= 35 or BCM4706?) */
+#define BCMA_CC_CORECTL 0x0008
+#define BCMA_CC_CORECTL_UARTCLK0 0x00000001 /* Drive UART with internal clock */
+#define BCMA_CC_CORECTL_SE 0x00000002 /* sync clk out enable (corerev >= 3) */
+#define BCMA_CC_CORECTL_UARTCLKEN 0x00000008 /* UART clock enable (rev >= 21) */
+#define BCMA_CC_BIST 0x000C
+#define BCMA_CC_OTPS 0x0010 /* OTP status */
+#define BCMA_CC_OTPS_PROGFAIL 0x80000000
+#define BCMA_CC_OTPS_PROTECT 0x00000007
+#define BCMA_CC_OTPS_HW_PROTECT 0x00000001
+#define BCMA_CC_OTPS_SW_PROTECT 0x00000002
+#define BCMA_CC_OTPS_CID_PROTECT 0x00000004
+#define BCMA_CC_OTPS_GU_PROG_IND 0x00000F00 /* General Use programmed indication */
+#define BCMA_CC_OTPS_GU_PROG_IND_SHIFT 8
+#define BCMA_CC_OTPS_GU_PROG_HW 0x00000100 /* HW region programmed */
+#define BCMA_CC_OTPC 0x0014 /* OTP control */
+#define BCMA_CC_OTPC_RECWAIT 0xFF000000
+#define BCMA_CC_OTPC_PROGWAIT 0x00FFFF00
+#define BCMA_CC_OTPC_PRW_SHIFT 8
+#define BCMA_CC_OTPC_MAXFAIL 0x00000038
+#define BCMA_CC_OTPC_VSEL 0x00000006
+#define BCMA_CC_OTPC_SELVL 0x00000001
+#define BCMA_CC_OTPP 0x0018 /* OTP prog */
+#define BCMA_CC_OTPP_COL 0x000000FF
+#define BCMA_CC_OTPP_ROW 0x0000FF00
+#define BCMA_CC_OTPP_ROW_SHIFT 8
+#define BCMA_CC_OTPP_READERR 0x10000000
+#define BCMA_CC_OTPP_VALUE 0x20000000
+#define BCMA_CC_OTPP_READ 0x40000000
+#define BCMA_CC_OTPP_START 0x80000000
+#define BCMA_CC_OTPP_BUSY 0x80000000
+#define BCMA_CC_OTPL 0x001C /* OTP layout */
+#define BCMA_CC_OTPL_GURGN_OFFSET 0x00000FFF /* offset of general use region */
+#define BCMA_CC_IRQSTAT 0x0020
+#define BCMA_CC_IRQMASK 0x0024
+#define BCMA_CC_IRQ_GPIO 0x00000001 /* gpio intr */
+#define BCMA_CC_IRQ_EXT 0x00000002 /* ro: ext intr pin (corerev >= 3) */
+#define BCMA_CC_IRQ_WDRESET 0x80000000 /* watchdog reset occurred */
+#define BCMA_CC_CHIPCTL 0x0028 /* Rev >= 11 only */
+#define BCMA_CC_CHIPSTAT 0x002C /* Rev >= 11 only */
+#define BCMA_CC_CHIPST_4313_SPROM_PRESENT 1
+#define BCMA_CC_CHIPST_4313_OTP_PRESENT 2
+#define BCMA_CC_CHIPST_4331_SPROM_PRESENT 2
+#define BCMA_CC_CHIPST_4331_OTP_PRESENT 4
+#define BCMA_CC_CHIPST_43228_ILP_DIV_EN 0x00000001
+#define BCMA_CC_CHIPST_43228_OTP_PRESENT 0x00000002
+#define BCMA_CC_CHIPST_43228_SERDES_REFCLK_PADSEL 0x00000004
+#define BCMA_CC_CHIPST_43228_SDIO_MODE 0x00000008
+#define BCMA_CC_CHIPST_43228_SDIO_OTP_PRESENT 0x00000010
+#define BCMA_CC_CHIPST_43228_SDIO_RESET 0x00000020
+#define BCMA_CC_CHIPST_4706_PKG_OPTION BIT(0) /* 0: full-featured package 1: low-cost package */
+#define BCMA_CC_CHIPST_4706_SFLASH_PRESENT BIT(1) /* 0: parallel, 1: serial flash is present */
+#define BCMA_CC_CHIPST_4706_SFLASH_TYPE BIT(2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */
+#define BCMA_CC_CHIPST_4706_MIPS_BENDIAN BIT(3) /* 0: little, 1: big endian */
+#define BCMA_CC_CHIPST_4706_PCIE1_DISABLE BIT(5) /* PCIE1 enable strap pin */
+#define BCMA_CC_CHIPST_5357_NAND_BOOT BIT(4) /* NAND boot, valid for CC rev 38 and/or BCM5357 */
+#define BCMA_CC_CHIPST_4360_XTAL_40MZ 0x00000001
+#define BCMA_CC_JCMD 0x0030 /* Rev >= 10 only */
+#define BCMA_CC_JCMD_START 0x80000000
+#define BCMA_CC_JCMD_BUSY 0x80000000
+#define BCMA_CC_JCMD_PAUSE 0x40000000
+#define BCMA_CC_JCMD0_ACC_MASK 0x0000F000
+#define BCMA_CC_JCMD0_ACC_IRDR 0x00000000
+#define BCMA_CC_JCMD0_ACC_DR 0x00001000
+#define BCMA_CC_JCMD0_ACC_IR 0x00002000
+#define BCMA_CC_JCMD0_ACC_RESET 0x00003000
+#define BCMA_CC_JCMD0_ACC_IRPDR 0x00004000
+#define BCMA_CC_JCMD0_ACC_PDR 0x00005000
+#define BCMA_CC_JCMD0_IRW_MASK 0x00000F00
+#define BCMA_CC_JCMD_ACC_MASK 0x000F0000 /* Changes for corerev 11 */
+#define BCMA_CC_JCMD_ACC_IRDR 0x00000000
+#define BCMA_CC_JCMD_ACC_DR 0x00010000
+#define BCMA_CC_JCMD_ACC_IR 0x00020000
+#define BCMA_CC_JCMD_ACC_RESET 0x00030000
+#define BCMA_CC_JCMD_ACC_IRPDR 0x00040000
+#define BCMA_CC_JCMD_ACC_PDR 0x00050000
+#define BCMA_CC_JCMD_IRW_MASK 0x00001F00
+#define BCMA_CC_JCMD_IRW_SHIFT 8
+#define BCMA_CC_JCMD_DRW_MASK 0x0000003F
+#define BCMA_CC_JIR 0x0034 /* Rev >= 10 only */
+#define BCMA_CC_JDR 0x0038 /* Rev >= 10 only */
+#define BCMA_CC_JCTL 0x003C /* Rev >= 10 only */
+#define BCMA_CC_JCTL_FORCE_CLK 4 /* Force clock */
+#define BCMA_CC_JCTL_EXT_EN 2 /* Enable external targets */
+#define BCMA_CC_JCTL_EN 1 /* Enable Jtag master */
+#define BCMA_CC_FLASHCTL 0x0040
+/* Start/busy bit in flashcontrol */
+#define BCMA_CC_FLASHCTL_OPCODE 0x000000ff
+#define BCMA_CC_FLASHCTL_ACTION 0x00000700
+#define BCMA_CC_FLASHCTL_CS_ACTIVE 0x00001000 /* Chip Select Active, rev >= 20 */
+#define BCMA_CC_FLASHCTL_START 0x80000000
+#define BCMA_CC_FLASHCTL_BUSY BCMA_CC_FLASHCTL_START
+/* Flashcontrol action + opcodes for ST flashes */
+#define BCMA_CC_FLASHCTL_ST_WREN 0x0006 /* Write Enable */
+#define BCMA_CC_FLASHCTL_ST_WRDIS 0x0004 /* Write Disable */
+#define BCMA_CC_FLASHCTL_ST_RDSR 0x0105 /* Read Status Register */
+#define BCMA_CC_FLASHCTL_ST_WRSR 0x0101 /* Write Status Register */
+#define BCMA_CC_FLASHCTL_ST_READ 0x0303 /* Read Data Bytes */
+#define BCMA_CC_FLASHCTL_ST_PP 0x0302 /* Page Program */
+#define BCMA_CC_FLASHCTL_ST_SE 0x02d8 /* Sector Erase */
+#define BCMA_CC_FLASHCTL_ST_BE 0x00c7 /* Bulk Erase */
+#define BCMA_CC_FLASHCTL_ST_DP 0x00b9 /* Deep Power-down */
+#define BCMA_CC_FLASHCTL_ST_RES 0x03ab /* Read Electronic Signature */
+#define BCMA_CC_FLASHCTL_ST_CSA 0x1000 /* Keep chip select asserted */
+#define BCMA_CC_FLASHCTL_ST_SSE 0x0220 /* Sub-sector Erase */
+/* Flashcontrol action + opcodes for Atmel flashes */
+#define BCMA_CC_FLASHCTL_AT_READ 0x07e8
+#define BCMA_CC_FLASHCTL_AT_PAGE_READ 0x07d2
+#define BCMA_CC_FLASHCTL_AT_STATUS 0x01d7
+#define BCMA_CC_FLASHCTL_AT_BUF1_WRITE 0x0384
+#define BCMA_CC_FLASHCTL_AT_BUF2_WRITE 0x0387
+#define BCMA_CC_FLASHCTL_AT_BUF1_ERASE_PROGRAM 0x0283
+#define BCMA_CC_FLASHCTL_AT_BUF2_ERASE_PROGRAM 0x0286
+#define BCMA_CC_FLASHCTL_AT_BUF1_PROGRAM 0x0288
+#define BCMA_CC_FLASHCTL_AT_BUF2_PROGRAM 0x0289
+#define BCMA_CC_FLASHCTL_AT_PAGE_ERASE 0x0281
+#define BCMA_CC_FLASHCTL_AT_BLOCK_ERASE 0x0250
+#define BCMA_CC_FLASHCTL_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382
+#define BCMA_CC_FLASHCTL_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385
+#define BCMA_CC_FLASHCTL_AT_BUF1_LOAD 0x0253
+#define BCMA_CC_FLASHCTL_AT_BUF2_LOAD 0x0255
+#define BCMA_CC_FLASHCTL_AT_BUF1_COMPARE 0x0260
+#define BCMA_CC_FLASHCTL_AT_BUF2_COMPARE 0x0261
+#define BCMA_CC_FLASHCTL_AT_BUF1_REPROGRAM 0x0258
+#define BCMA_CC_FLASHCTL_AT_BUF2_REPROGRAM 0x0259
+#define BCMA_CC_FLASHADDR 0x0044
+#define BCMA_CC_FLASHDATA 0x0048
+/* Status register bits for ST flashes */
+#define BCMA_CC_FLASHDATA_ST_WIP 0x01 /* Write In Progress */
+#define BCMA_CC_FLASHDATA_ST_WEL 0x02 /* Write Enable Latch */
+#define BCMA_CC_FLASHDATA_ST_BP_MASK 0x1c /* Block Protect */
+#define BCMA_CC_FLASHDATA_ST_BP_SHIFT 2
+#define BCMA_CC_FLASHDATA_ST_SRWD 0x80 /* Status Register Write Disable */
+/* Status register bits for Atmel flashes */
+#define BCMA_CC_FLASHDATA_AT_READY 0x80
+#define BCMA_CC_FLASHDATA_AT_MISMATCH 0x40
+#define BCMA_CC_FLASHDATA_AT_ID_MASK 0x38
+#define BCMA_CC_FLASHDATA_AT_ID_SHIFT 3
+#define BCMA_CC_BCAST_ADDR 0x0050
+#define BCMA_CC_BCAST_DATA 0x0054
+#define BCMA_CC_GPIOPULLUP 0x0058 /* Rev >= 20 only */
+#define BCMA_CC_GPIOPULLDOWN 0x005C /* Rev >= 20 only */
+#define BCMA_CC_GPIOIN 0x0060
+#define BCMA_CC_GPIOOUT 0x0064
+#define BCMA_CC_GPIOOUTEN 0x0068
+#define BCMA_CC_GPIOCTL 0x006C
+#define BCMA_CC_GPIOPOL 0x0070
+#define BCMA_CC_GPIOIRQ 0x0074
+#define BCMA_CC_WATCHDOG 0x0080
+#define BCMA_CC_GPIOTIMER 0x0088 /* LED powersave (corerev >= 16) */
+#define BCMA_CC_GPIOTIMER_OFFTIME 0x0000FFFF
+#define BCMA_CC_GPIOTIMER_OFFTIME_SHIFT 0
+#define BCMA_CC_GPIOTIMER_ONTIME 0xFFFF0000
+#define BCMA_CC_GPIOTIMER_ONTIME_SHIFT 16
+#define BCMA_CC_GPIOTOUTM 0x008C /* LED powersave (corerev >= 16) */
+#define BCMA_CC_CLOCK_N 0x0090
+#define BCMA_CC_CLOCK_SB 0x0094
+#define BCMA_CC_CLOCK_PCI 0x0098
+#define BCMA_CC_CLOCK_M2 0x009C
+#define BCMA_CC_CLOCK_MIPS 0x00A0
+#define BCMA_CC_CLKDIV 0x00A4 /* Rev >= 3 only */
+#define BCMA_CC_CLKDIV_SFLASH 0x0F000000
+#define BCMA_CC_CLKDIV_SFLASH_SHIFT 24
+#define BCMA_CC_CLKDIV_OTP 0x000F0000
+#define BCMA_CC_CLKDIV_OTP_SHIFT 16
+#define BCMA_CC_CLKDIV_JTAG 0x00000F00
+#define BCMA_CC_CLKDIV_JTAG_SHIFT 8
+#define BCMA_CC_CLKDIV_UART 0x000000FF
+#define BCMA_CC_CAP_EXT 0x00AC /* Capabilities */
+#define BCMA_CC_PLLONDELAY 0x00B0 /* Rev >= 4 only */
+#define BCMA_CC_FREFSELDELAY 0x00B4 /* Rev >= 4 only */
+#define BCMA_CC_SLOWCLKCTL 0x00B8 /* 6 <= Rev <= 9 only */
+#define BCMA_CC_SLOWCLKCTL_SRC 0x00000007 /* slow clock source mask */
+#define BCMA_CC_SLOWCLKCTL_SRC_LPO 0x00000000 /* source of slow clock is LPO */
+#define BCMA_CC_SLOWCLKCTL_SRC_XTAL 0x00000001 /* source of slow clock is crystal */
+#define BCMA_CC_SLOECLKCTL_SRC_PCI 0x00000002 /* source of slow clock is PCI */
+#define BCMA_CC_SLOWCLKCTL_LPOFREQ 0x00000200 /* LPOFreqSel, 1: 160Khz, 0: 32KHz */
+#define BCMA_CC_SLOWCLKCTL_LPOPD 0x00000400 /* LPOPowerDown, 1: LPO is disabled, 0: LPO is enabled */
+#define BCMA_CC_SLOWCLKCTL_FSLOW 0x00000800 /* ForceSlowClk, 1: sb/cores running on slow clock, 0: power logic control */
+#define BCMA_CC_SLOWCLKCTL_IPLL 0x00001000 /* IgnorePllOffReq, 1/0: power logic ignores/honors PLL clock disable requests from core */
+#define BCMA_CC_SLOWCLKCTL_ENXTAL 0x00002000 /* XtalControlEn, 1/0: power logic does/doesn't disable crystal when appropriate */
+#define BCMA_CC_SLOWCLKCTL_XTALPU 0x00004000 /* XtalPU (RO), 1/0: crystal running/disabled */
+#define BCMA_CC_SLOWCLKCTL_CLKDIV 0xFFFF0000 /* ClockDivider (SlowClk = 1/(4+divisor)) */
+#define BCMA_CC_SLOWCLKCTL_CLKDIV_SHIFT 16
+#define BCMA_CC_SYSCLKCTL 0x00C0 /* Rev >= 3 only */
+#define BCMA_CC_SYSCLKCTL_IDLPEN 0x00000001 /* ILPen: Enable Idle Low Power */
+#define BCMA_CC_SYSCLKCTL_ALPEN 0x00000002 /* ALPen: Enable Active Low Power */
+#define BCMA_CC_SYSCLKCTL_PLLEN 0x00000004 /* ForcePLLOn */
+#define BCMA_CC_SYSCLKCTL_FORCEALP 0x00000008 /* Force ALP (or HT if ALPen is not set */
+#define BCMA_CC_SYSCLKCTL_FORCEHT 0x00000010 /* Force HT */
+#define BCMA_CC_SYSCLKCTL_CLKDIV 0xFFFF0000 /* ClkDiv (ILP = 1/(4+divisor)) */
+#define BCMA_CC_SYSCLKCTL_CLKDIV_SHIFT 16
+#define BCMA_CC_CLKSTSTR 0x00C4 /* Rev >= 3 only */
+#define BCMA_CC_EROM 0x00FC
+#define BCMA_CC_PCMCIA_CFG 0x0100
+#define BCMA_CC_PCMCIA_MEMWAIT 0x0104
+#define BCMA_CC_PCMCIA_ATTRWAIT 0x0108
+#define BCMA_CC_PCMCIA_IOWAIT 0x010C
+#define BCMA_CC_IDE_CFG 0x0110
+#define BCMA_CC_IDE_MEMWAIT 0x0114
+#define BCMA_CC_IDE_ATTRWAIT 0x0118
+#define BCMA_CC_IDE_IOWAIT 0x011C
+#define BCMA_CC_PROG_CFG 0x0120
+#define BCMA_CC_PROG_WAITCNT 0x0124
+#define BCMA_CC_FLASH_CFG 0x0128
+#define BCMA_CC_FLASH_CFG_DS 0x0010 /* Data size, 0=8bit, 1=16bit */
+#define BCMA_CC_FLASH_WAITCNT 0x012C
+#define BCMA_CC_SROM_CONTROL 0x0190
+#define BCMA_CC_SROM_CONTROL_START 0x80000000
+#define BCMA_CC_SROM_CONTROL_BUSY 0x80000000
+#define BCMA_CC_SROM_CONTROL_OPCODE 0x60000000
+#define BCMA_CC_SROM_CONTROL_OP_READ 0x00000000
+#define BCMA_CC_SROM_CONTROL_OP_WRITE 0x20000000
+#define BCMA_CC_SROM_CONTROL_OP_WRDIS 0x40000000
+#define BCMA_CC_SROM_CONTROL_OP_WREN 0x60000000
+#define BCMA_CC_SROM_CONTROL_OTPSEL 0x00000010
+#define BCMA_CC_SROM_CONTROL_LOCK 0x00000008
+#define BCMA_CC_SROM_CONTROL_SIZE_MASK 0x00000006
+#define BCMA_CC_SROM_CONTROL_SIZE_1K 0x00000000
+#define BCMA_CC_SROM_CONTROL_SIZE_4K 0x00000002
+#define BCMA_CC_SROM_CONTROL_SIZE_16K 0x00000004
+#define BCMA_CC_SROM_CONTROL_SIZE_SHIFT 1
+#define BCMA_CC_SROM_CONTROL_PRESENT 0x00000001
+/* Block 0x140 - 0x190 registers are chipset specific */
+#define BCMA_CC_4706_FLASHSCFG 0x18C /* Flash struct configuration */
+#define BCMA_CC_4706_FLASHSCFG_MASK 0x000000ff
+#define BCMA_CC_4706_FLASHSCFG_SF1 0x00000001 /* 2nd serial flash present */
+#define BCMA_CC_4706_FLASHSCFG_PF1 0x00000002 /* 2nd parallel flash present */
+#define BCMA_CC_4706_FLASHSCFG_SF1_TYPE 0x00000004 /* 2nd serial flash type : 0 : ST, 1 : Atmel */
+#define BCMA_CC_4706_FLASHSCFG_NF1 0x00000008 /* 2nd NAND flash present */
+#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_MASK 0x000000f0
+#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_4MB 0x00000010 /* 4MB */
+#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_8MB 0x00000020 /* 8MB */
+#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_16MB 0x00000030 /* 16MB */
+#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_32MB 0x00000040 /* 32MB */
+#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_64MB 0x00000050 /* 64MB */
+#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_128MB 0x00000060 /* 128MB */
+#define BCMA_CC_4706_FLASHSCFG_1ST_MADDR_SEG_256MB 0x00000070 /* 256MB */
+/* NAND flash registers for BCM4706 (corerev = 31) */
+#define BCMA_CC_NFLASH_CTL 0x01A0
+#define BCMA_CC_NFLASH_CTL_ERR 0x08000000
+#define BCMA_CC_NFLASH_CONF 0x01A4
+#define BCMA_CC_NFLASH_COL_ADDR 0x01A8
+#define BCMA_CC_NFLASH_ROW_ADDR 0x01AC
+#define BCMA_CC_NFLASH_DATA 0x01B0
+#define BCMA_CC_NFLASH_WAITCNT0 0x01B4
+/* 0x1E0 is defined as shared BCMA_CLKCTLST */
+#define BCMA_CC_HW_WORKAROUND 0x01E4 /* Hardware workaround (rev >= 20) */
+#define BCMA_CC_UART0_DATA 0x0300
+#define BCMA_CC_UART0_IMR 0x0304
+#define BCMA_CC_UART0_FCR 0x0308
+#define BCMA_CC_UART0_LCR 0x030C
+#define BCMA_CC_UART0_MCR 0x0310
+#define BCMA_CC_UART0_LSR 0x0314
+#define BCMA_CC_UART0_MSR 0x0318
+#define BCMA_CC_UART0_SCRATCH 0x031C
+#define BCMA_CC_UART1_DATA 0x0400
+#define BCMA_CC_UART1_IMR 0x0404
+#define BCMA_CC_UART1_FCR 0x0408
+#define BCMA_CC_UART1_LCR 0x040C
+#define BCMA_CC_UART1_MCR 0x0410
+#define BCMA_CC_UART1_LSR 0x0414
+#define BCMA_CC_UART1_MSR 0x0418
+#define BCMA_CC_UART1_SCRATCH 0x041C
+/* PMU registers (rev >= 20) */
+#define BCMA_CC_PMU_CTL 0x0600 /* PMU control */
+#define BCMA_CC_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */
+#define BCMA_CC_PMU_CTL_ILP_DIV_SHIFT 16
+#define BCMA_CC_PMU_CTL_RES 0x00006000 /* reset control mask */
+#define BCMA_CC_PMU_CTL_RES_SHIFT 13
+#define BCMA_CC_PMU_CTL_RES_RELOAD 0x2 /* reload POR values */
+#define BCMA_CC_PMU_CTL_PLL_UPD 0x00000400
+#define BCMA_CC_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */
+#define BCMA_CC_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */
+#define BCMA_CC_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */
+#define BCMA_CC_PMU_CTL_XTALFREQ 0x0000007C /* Crystal freq */
+#define BCMA_CC_PMU_CTL_XTALFREQ_SHIFT 2
+#define BCMA_CC_PMU_CTL_ILPDIVEN 0x00000002 /* ILP div enable */
+#define BCMA_CC_PMU_CTL_LPOSEL 0x00000001 /* LPO sel */
+#define BCMA_CC_PMU_CAP 0x0604 /* PMU capabilities */
+#define BCMA_CC_PMU_CAP_REVISION 0x000000FF /* Revision mask */
+#define BCMA_CC_PMU_STAT 0x0608 /* PMU status */
+#define BCMA_CC_PMU_STAT_EXT_LPO_AVAIL 0x00000100
+#define BCMA_CC_PMU_STAT_WDRESET 0x00000080
+#define BCMA_CC_PMU_STAT_INTPEND 0x00000040 /* Interrupt pending */
+#define BCMA_CC_PMU_STAT_SBCLKST 0x00000030 /* Backplane clock status? */
+#define BCMA_CC_PMU_STAT_HAVEALP 0x00000008 /* ALP available */
+#define BCMA_CC_PMU_STAT_HAVEHT 0x00000004 /* HT available */
+#define BCMA_CC_PMU_STAT_RESINIT 0x00000003 /* Res init */
+#define BCMA_CC_PMU_RES_STAT 0x060C /* PMU res status */
+#define BCMA_CC_PMU_RES_PEND 0x0610 /* PMU res pending */
+#define BCMA_CC_PMU_TIMER 0x0614 /* PMU timer */
+#define BCMA_CC_PMU_MINRES_MSK 0x0618 /* PMU min res mask */
+#define BCMA_CC_PMU_MAXRES_MSK 0x061C /* PMU max res mask */
+#define BCMA_CC_PMU_RES_TABSEL 0x0620 /* PMU res table sel */
+#define BCMA_CC_PMU_RES_DEPMSK 0x0624 /* PMU res dep mask */
+#define BCMA_CC_PMU_RES_UPDNTM 0x0628 /* PMU res updown timer */
+#define BCMA_CC_PMU_RES_TIMER 0x062C /* PMU res timer */
+#define BCMA_CC_PMU_CLKSTRETCH 0x0630 /* PMU clockstretch */
+#define BCMA_CC_PMU_WATCHDOG 0x0634 /* PMU watchdog */
+#define BCMA_CC_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */
+#define BCMA_CC_PMU_RES_REQT 0x0644 /* PMU res req timer */
+#define BCMA_CC_PMU_RES_REQM 0x0648 /* PMU res req mask */
+#define BCMA_CC_CHIPCTL_ADDR 0x0650
+#define BCMA_CC_CHIPCTL_DATA 0x0654
+#define BCMA_CC_REGCTL_ADDR 0x0658
+#define BCMA_CC_REGCTL_DATA 0x065C
+#define BCMA_CC_PLLCTL_ADDR 0x0660
+#define BCMA_CC_PLLCTL_DATA 0x0664
+#define BCMA_CC_PMU_STRAPOPT 0x0668 /* (corerev >= 28) */
+#define BCMA_CC_PMU_XTAL_FREQ 0x066C /* (pmurev >= 10) */
+#define BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK 0x00001FFF
+#define BCMA_CC_PMU_XTAL_FREQ_MEASURE_MASK 0x80000000
+#define BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT 31
+#define BCMA_CC_SPROM 0x0800 /* SPROM beginning */
+/* NAND flash MLC controller registers (corerev >= 38) */
+#define BCMA_CC_NAND_REVISION 0x0C00
+#define BCMA_CC_NAND_CMD_START 0x0C04
+#define BCMA_CC_NAND_CMD_ADDR_X 0x0C08
+#define BCMA_CC_NAND_CMD_ADDR 0x0C0C
+#define BCMA_CC_NAND_CMD_END_ADDR 0x0C10
+#define BCMA_CC_NAND_CS_NAND_SELECT 0x0C14
+#define BCMA_CC_NAND_CS_NAND_XOR 0x0C18
+#define BCMA_CC_NAND_SPARE_RD0 0x0C20
+#define BCMA_CC_NAND_SPARE_RD4 0x0C24
+#define BCMA_CC_NAND_SPARE_RD8 0x0C28
+#define BCMA_CC_NAND_SPARE_RD12 0x0C2C
+#define BCMA_CC_NAND_SPARE_WR0 0x0C30
+#define BCMA_CC_NAND_SPARE_WR4 0x0C34
+#define BCMA_CC_NAND_SPARE_WR8 0x0C38
+#define BCMA_CC_NAND_SPARE_WR12 0x0C3C
+#define BCMA_CC_NAND_ACC_CONTROL 0x0C40
+#define BCMA_CC_NAND_CONFIG 0x0C48
+#define BCMA_CC_NAND_TIMING_1 0x0C50
+#define BCMA_CC_NAND_TIMING_2 0x0C54
+#define BCMA_CC_NAND_SEMAPHORE 0x0C58
+#define BCMA_CC_NAND_DEVID 0x0C60
+#define BCMA_CC_NAND_DEVID_X 0x0C64
+#define BCMA_CC_NAND_BLOCK_LOCK_STATUS 0x0C68
+#define BCMA_CC_NAND_INTFC_STATUS 0x0C6C
+#define BCMA_CC_NAND_ECC_CORR_ADDR_X 0x0C70
+#define BCMA_CC_NAND_ECC_CORR_ADDR 0x0C74
+#define BCMA_CC_NAND_ECC_UNC_ADDR_X 0x0C78
+#define BCMA_CC_NAND_ECC_UNC_ADDR 0x0C7C
+#define BCMA_CC_NAND_READ_ERROR_COUNT 0x0C80
+#define BCMA_CC_NAND_CORR_STAT_THRESHOLD 0x0C84
+#define BCMA_CC_NAND_READ_ADDR_X 0x0C90
+#define BCMA_CC_NAND_READ_ADDR 0x0C94
+#define BCMA_CC_NAND_PAGE_PROGRAM_ADDR_X 0x0C98
+#define BCMA_CC_NAND_PAGE_PROGRAM_ADDR 0x0C9C
+#define BCMA_CC_NAND_COPY_BACK_ADDR_X 0x0CA0
+#define BCMA_CC_NAND_COPY_BACK_ADDR 0x0CA4
+#define BCMA_CC_NAND_BLOCK_ERASE_ADDR_X 0x0CA8
+#define BCMA_CC_NAND_BLOCK_ERASE_ADDR 0x0CAC
+#define BCMA_CC_NAND_INV_READ_ADDR_X 0x0CB0
+#define BCMA_CC_NAND_INV_READ_ADDR 0x0CB4
+#define BCMA_CC_NAND_BLK_WR_PROTECT 0x0CC0
+#define BCMA_CC_NAND_ACC_CONTROL_CS1 0x0CD0
+#define BCMA_CC_NAND_CONFIG_CS1 0x0CD4
+#define BCMA_CC_NAND_TIMING_1_CS1 0x0CD8
+#define BCMA_CC_NAND_TIMING_2_CS1 0x0CDC
+#define BCMA_CC_NAND_SPARE_RD16 0x0D30
+#define BCMA_CC_NAND_SPARE_RD20 0x0D34
+#define BCMA_CC_NAND_SPARE_RD24 0x0D38
+#define BCMA_CC_NAND_SPARE_RD28 0x0D3C
+#define BCMA_CC_NAND_CACHE_ADDR 0x0D40
+#define BCMA_CC_NAND_CACHE_DATA 0x0D44
+#define BCMA_CC_NAND_CTRL_CONFIG 0x0D48
+#define BCMA_CC_NAND_CTRL_STATUS 0x0D4C
+
+/* Divider allocation in 4716/47162/5356 */
+#define BCMA_CC_PMU5_MAINPLL_CPU 1
+#define BCMA_CC_PMU5_MAINPLL_MEM 2
+#define BCMA_CC_PMU5_MAINPLL_SSB 3
+
+/* PLL usage in 4716/47162 */
+#define BCMA_CC_PMU4716_MAINPLL_PLL0 12
+
+/* PLL usage in 5356/5357 */
+#define BCMA_CC_PMU5356_MAINPLL_PLL0 0
+#define BCMA_CC_PMU5357_MAINPLL_PLL0 0
+
+/* 4706 PMU */
+#define BCMA_CC_PMU4706_MAINPLL_PLL0 0
+#define BCMA_CC_PMU6_4706_PROCPLL_OFF 4 /* The CPU PLL */
+#define BCMA_CC_PMU6_4706_PROC_P2DIV_MASK 0x000f0000
+#define BCMA_CC_PMU6_4706_PROC_P2DIV_SHIFT 16
+#define BCMA_CC_PMU6_4706_PROC_P1DIV_MASK 0x0000f000
+#define BCMA_CC_PMU6_4706_PROC_P1DIV_SHIFT 12
+#define BCMA_CC_PMU6_4706_PROC_NDIV_INT_MASK 0x00000ff8
+#define BCMA_CC_PMU6_4706_PROC_NDIV_INT_SHIFT 3
+#define BCMA_CC_PMU6_4706_PROC_NDIV_MODE_MASK 0x00000007
+#define BCMA_CC_PMU6_4706_PROC_NDIV_MODE_SHIFT 0
+
+/* PMU rev 15 */
+#define BCMA_CC_PMU15_PLL_PLLCTL0 0
+#define BCMA_CC_PMU15_PLL_PC0_CLKSEL_MASK 0x00000003
+#define BCMA_CC_PMU15_PLL_PC0_CLKSEL_SHIFT 0
+#define BCMA_CC_PMU15_PLL_PC0_FREQTGT_MASK 0x003FFFFC
+#define BCMA_CC_PMU15_PLL_PC0_FREQTGT_SHIFT 2
+#define BCMA_CC_PMU15_PLL_PC0_PRESCALE_MASK 0x00C00000
+#define BCMA_CC_PMU15_PLL_PC0_PRESCALE_SHIFT 22
+#define BCMA_CC_PMU15_PLL_PC0_KPCTRL_MASK 0x07000000
+#define BCMA_CC_PMU15_PLL_PC0_KPCTRL_SHIFT 24
+#define BCMA_CC_PMU15_PLL_PC0_FCNTCTRL_MASK 0x38000000
+#define BCMA_CC_PMU15_PLL_PC0_FCNTCTRL_SHIFT 27
+#define BCMA_CC_PMU15_PLL_PC0_FDCMODE_MASK 0x40000000
+#define BCMA_CC_PMU15_PLL_PC0_FDCMODE_SHIFT 30
+#define BCMA_CC_PMU15_PLL_PC0_CTRLBIAS_MASK 0x80000000
+#define BCMA_CC_PMU15_PLL_PC0_CTRLBIAS_SHIFT 31
+
+/* ALP clock on pre-PMU chips */
+#define BCMA_CC_PMU_ALP_CLOCK 20000000
+/* HT clock for systems with PMU-enabled chipcommon */
+#define BCMA_CC_PMU_HT_CLOCK 80000000
+
+/* PMU rev 5 (& 6) */
+#define BCMA_CC_PPL_P1P2_OFF 0
+#define BCMA_CC_PPL_P1_MASK 0x0f000000
+#define BCMA_CC_PPL_P1_SHIFT 24
+#define BCMA_CC_PPL_P2_MASK 0x00f00000
+#define BCMA_CC_PPL_P2_SHIFT 20
+#define BCMA_CC_PPL_M14_OFF 1
+#define BCMA_CC_PPL_MDIV_MASK 0x000000ff
+#define BCMA_CC_PPL_MDIV_WIDTH 8
+#define BCMA_CC_PPL_NM5_OFF 2
+#define BCMA_CC_PPL_NDIV_MASK 0xfff00000
+#define BCMA_CC_PPL_NDIV_SHIFT 20
+#define BCMA_CC_PPL_FMAB_OFF 3
+#define BCMA_CC_PPL_MRAT_MASK 0xf0000000
+#define BCMA_CC_PPL_MRAT_SHIFT 28
+#define BCMA_CC_PPL_ABRAT_MASK 0x08000000
+#define BCMA_CC_PPL_ABRAT_SHIFT 27
+#define BCMA_CC_PPL_FDIV_MASK 0x07ffffff
+#define BCMA_CC_PPL_PLLCTL_OFF 4
+#define BCMA_CC_PPL_PCHI_OFF 5
+#define BCMA_CC_PPL_PCHI_MASK 0x0000003f
+
+#define BCMA_CC_PMU_PLL_CTL0 0
+#define BCMA_CC_PMU_PLL_CTL1 1
+#define BCMA_CC_PMU_PLL_CTL2 2
+#define BCMA_CC_PMU_PLL_CTL3 3
+#define BCMA_CC_PMU_PLL_CTL4 4
+#define BCMA_CC_PMU_PLL_CTL5 5
+
+#define BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK 0x00f00000
+#define BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT 20
+
+#define BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000
+#define BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT 20
+
+/* BCM4331 ChipControl numbers. */
+#define BCMA_CHIPCTL_4331_BT_COEXIST BIT(0) /* 0 disable */
+#define BCMA_CHIPCTL_4331_SECI BIT(1) /* 0 SECI is disabled (JATG functional) */
+#define BCMA_CHIPCTL_4331_EXT_LNA BIT(2) /* 0 disable */
+#define BCMA_CHIPCTL_4331_SPROM_GPIO13_15 BIT(3) /* sprom/gpio13-15 mux */
+#define BCMA_CHIPCTL_4331_EXTPA_EN BIT(4) /* 0 ext pa disable, 1 ext pa enabled */
+#define BCMA_CHIPCTL_4331_GPIOCLK_ON_SPROMCS BIT(5) /* set drive out GPIO_CLK on sprom_cs pin */
+#define BCMA_CHIPCTL_4331_PCIE_MDIO_ON_SPROMCS BIT(6) /* use sprom_cs pin as PCIE mdio interface */
+#define BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5 BIT(7) /* aband extpa will be at gpio2/5 and sprom_dout */
+#define BCMA_CHIPCTL_4331_OVR_PIPEAUXCLKEN BIT(8) /* override core control on pipe_AuxClkEnable */
+#define BCMA_CHIPCTL_4331_OVR_PIPEAUXPWRDOWN BIT(9) /* override core control on pipe_AuxPowerDown */
+#define BCMA_CHIPCTL_4331_PCIE_AUXCLKEN BIT(10) /* pcie_auxclkenable */
+#define BCMA_CHIPCTL_4331_PCIE_PIPE_PLLDOWN BIT(11) /* pcie_pipe_pllpowerdown */
+#define BCMA_CHIPCTL_4331_EXTPA_EN2 BIT(12) /* 0 ext pa disable, 1 ext pa enabled */
+#define BCMA_CHIPCTL_4331_BT_SHD0_ON_GPIO4 BIT(16) /* enable bt_shd0 at gpio4 */
+#define BCMA_CHIPCTL_4331_BT_SHD1_ON_GPIO5 BIT(17) /* enable bt_shd1 at gpio5 */
+
+/* 43224 chip-specific ChipControl register bits */
+#define BCMA_CCTRL_43224_GPIO_TOGGLE 0x8000 /* gpio[3:0] pins as btcoex or s/w gpio */
+#define BCMA_CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0 /* 12 mA drive strength */
+#define BCMA_CCTRL_43224B0_12MA_LED_DRIVE 0xF0 /* 12 mA drive strength for later 43224s */
+
+/* 4313 Chip specific ChipControl register bits */
+#define BCMA_CCTRL_4313_12MA_LED_DRIVE 0x00000007 /* 12 mA drive strengh for later 4313 */
+
+/* BCM5357 ChipControl register bits */
+#define BCMA_CHIPCTL_5357_EXTPA BIT(14)
+#define BCMA_CHIPCTL_5357_ANT_MUX_2O3 BIT(15)
+#define BCMA_CHIPCTL_5357_NFLASH BIT(16)
+#define BCMA_CHIPCTL_5357_I2S_PINS_ENABLE BIT(18)
+#define BCMA_CHIPCTL_5357_I2CSPI_PINS_ENABLE BIT(19)
+
+#define BCMA_RES_4314_LPLDO_PU BIT(0)
+#define BCMA_RES_4314_PMU_SLEEP_DIS BIT(1)
+#define BCMA_RES_4314_PMU_BG_PU BIT(2)
+#define BCMA_RES_4314_CBUCK_LPOM_PU BIT(3)
+#define BCMA_RES_4314_CBUCK_PFM_PU BIT(4)
+#define BCMA_RES_4314_CLDO_PU BIT(5)
+#define BCMA_RES_4314_LPLDO2_LVM BIT(6)
+#define BCMA_RES_4314_WL_PMU_PU BIT(7)
+#define BCMA_RES_4314_LNLDO_PU BIT(8)
+#define BCMA_RES_4314_LDO3P3_PU BIT(9)
+#define BCMA_RES_4314_OTP_PU BIT(10)
+#define BCMA_RES_4314_XTAL_PU BIT(11)
+#define BCMA_RES_4314_WL_PWRSW_PU BIT(12)
+#define BCMA_RES_4314_LQ_AVAIL BIT(13)
+#define BCMA_RES_4314_LOGIC_RET BIT(14)
+#define BCMA_RES_4314_MEM_SLEEP BIT(15)
+#define BCMA_RES_4314_MACPHY_RET BIT(16)
+#define BCMA_RES_4314_WL_CORE_READY BIT(17)
+#define BCMA_RES_4314_ILP_REQ BIT(18)
+#define BCMA_RES_4314_ALP_AVAIL BIT(19)
+#define BCMA_RES_4314_MISC_PWRSW_PU BIT(20)
+#define BCMA_RES_4314_SYNTH_PWRSW_PU BIT(21)
+#define BCMA_RES_4314_RX_PWRSW_PU BIT(22)
+#define BCMA_RES_4314_RADIO_PU BIT(23)
+#define BCMA_RES_4314_VCO_LDO_PU BIT(24)
+#define BCMA_RES_4314_AFE_LDO_PU BIT(25)
+#define BCMA_RES_4314_RX_LDO_PU BIT(26)
+#define BCMA_RES_4314_TX_LDO_PU BIT(27)
+#define BCMA_RES_4314_HT_AVAIL BIT(28)
+#define BCMA_RES_4314_MACPHY_CLK_AVAIL BIT(29)
+
+/* Data for the PMU, if available.
+ * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU)
+ */
+struct bcma_chipcommon_pmu {
+ u8 rev; /* PMU revision */
+ u32 crystalfreq; /* The active crystal frequency (in kHz) */
+};
+
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+struct bcma_pflash {
+ bool present;
+ u8 buswidth;
+ u32 window;
+ u32 window_size;
+};
+
+#ifdef CONFIG_BCMA_SFLASH
+struct bcma_sflash {
+ bool present;
+ u32 window;
+ u32 blocksize;
+ u16 numblocks;
+ u32 size;
+
+ struct mtd_info *mtd;
+ void *priv;
+};
+#endif
+
+#ifdef CONFIG_BCMA_NFLASH
+struct mtd_info;
+
+struct bcma_nflash {
+ bool present;
+ bool boot; /* This is the flash the SoC boots from */
+
+ struct mtd_info *mtd;
+};
+#endif
+
+struct bcma_serial_port {
+ void *regs;
+ unsigned long clockspeed;
+ unsigned int irq;
+ unsigned int baud_base;
+ unsigned int reg_shift;
+};
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
+
+struct bcma_drv_cc {
+ struct bcma_device *core;
+ u32 status;
+ u32 capabilities;
+ u32 capabilities_ext;
+ u8 setup_done:1;
+ u8 early_setup_done:1;
+ /* Fast Powerup Delay constant */
+ u16 fast_pwrup_delay;
+ struct bcma_chipcommon_pmu pmu;
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+ struct bcma_pflash pflash;
+#ifdef CONFIG_BCMA_SFLASH
+ struct bcma_sflash sflash;
+#endif
+#ifdef CONFIG_BCMA_NFLASH
+ struct bcma_nflash nflash;
+#endif
+
+ int nr_serial_ports;
+ struct bcma_serial_port serial_ports[4];
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
+ u32 ticks_per_ms;
+ struct platform_device *watchdog;
+
+ /* Lock for GPIO register access. */
+ spinlock_t gpio_lock;
+#ifdef CONFIG_BCMA_DRIVER_GPIO
+ struct gpio_chip gpio;
+ struct irq_domain *irq_domain;
+#endif
+};
+
+struct bcma_drv_cc_b {
+ struct bcma_device *core;
+ u8 setup_done:1;
+ void __iomem *mii;
+};
+
+/* Register access */
+#define bcma_cc_read32(cc, offset) \
+ bcma_read32((cc)->core, offset)
+#define bcma_cc_write32(cc, offset, val) \
+ bcma_write32((cc)->core, offset, val)
+
+#define bcma_cc_mask32(cc, offset, mask) \
+ bcma_cc_write32(cc, offset, bcma_cc_read32(cc, offset) & (mask))
+#define bcma_cc_set32(cc, offset, set) \
+ bcma_cc_write32(cc, offset, bcma_cc_read32(cc, offset) | (set))
+#define bcma_cc_maskset32(cc, offset, mask, set) \
+ bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set))
+
+extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks);
+
+extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc);
+
+void bcma_chipco_irq_mask(struct bcma_drv_cc *cc, u32 mask, u32 value);
+
+u32 bcma_chipco_irq_status(struct bcma_drv_cc *cc, u32 mask);
+
+/* Chipcommon GPIO pin access. */
+u32 bcma_chipco_gpio_in(struct bcma_drv_cc *cc, u32 mask);
+u32 bcma_chipco_gpio_out(struct bcma_drv_cc *cc, u32 mask, u32 value);
+u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value);
+u32 bcma_chipco_gpio_control(struct bcma_drv_cc *cc, u32 mask, u32 value);
+u32 bcma_chipco_gpio_intmask(struct bcma_drv_cc *cc, u32 mask, u32 value);
+u32 bcma_chipco_gpio_polarity(struct bcma_drv_cc *cc, u32 mask, u32 value);
+u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value);
+u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value);
+
+/* PMU support */
+extern void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset,
+ u32 value);
+extern void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset,
+ u32 mask, u32 set);
+extern void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
+ u32 offset, u32 mask, u32 set);
+extern void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc,
+ u32 offset, u32 mask, u32 set);
+extern void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid);
+
+extern u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc);
+
+void bcma_chipco_b_mii_write(struct bcma_drv_cc_b *ccb, u32 offset, u32 value);
+
+#endif /* LINUX_BCMA_DRIVER_CC_H_ */
diff --git a/include/linux/bcma/bcma_driver_gmac_cmn.h b/include/linux/bcma/bcma_driver_gmac_cmn.h
new file mode 100644
index 000000000..4354d4ea6
--- /dev/null
+++ b/include/linux/bcma/bcma_driver_gmac_cmn.h
@@ -0,0 +1,94 @@
+#ifndef LINUX_BCMA_DRIVER_GMAC_CMN_H_
+#define LINUX_BCMA_DRIVER_GMAC_CMN_H_
+
+#include <linux/types.h>
+
+#define BCMA_GMAC_CMN_STAG0 0x000
+#define BCMA_GMAC_CMN_STAG1 0x004
+#define BCMA_GMAC_CMN_STAG2 0x008
+#define BCMA_GMAC_CMN_STAG3 0x00C
+#define BCMA_GMAC_CMN_PARSER_CTL 0x020
+#define BCMA_GMAC_CMN_MIB_MAX_LEN 0x024
+#define BCMA_GMAC_CMN_PHY_ACCESS 0x100
+#define BCMA_GMAC_CMN_PA_DATA_MASK 0x0000ffff
+#define BCMA_GMAC_CMN_PA_ADDR_MASK 0x001f0000
+#define BCMA_GMAC_CMN_PA_ADDR_SHIFT 16
+#define BCMA_GMAC_CMN_PA_REG_MASK 0x1f000000
+#define BCMA_GMAC_CMN_PA_REG_SHIFT 24
+#define BCMA_GMAC_CMN_PA_WRITE 0x20000000
+#define BCMA_GMAC_CMN_PA_START 0x40000000
+#define BCMA_GMAC_CMN_PHY_CTL 0x104
+#define BCMA_GMAC_CMN_PC_EPA_MASK 0x0000001f
+#define BCMA_GMAC_CMN_PC_MCT_MASK 0x007f0000
+#define BCMA_GMAC_CMN_PC_MCT_SHIFT 16
+#define BCMA_GMAC_CMN_PC_MTE 0x00800000
+#define BCMA_GMAC_CMN_GMAC0_RGMII_CTL 0x110
+#define BCMA_GMAC_CMN_CFP_ACCESS 0x200
+#define BCMA_GMAC_CMN_CFP_TCAM_DATA0 0x210
+#define BCMA_GMAC_CMN_CFP_TCAM_DATA1 0x214
+#define BCMA_GMAC_CMN_CFP_TCAM_DATA2 0x218
+#define BCMA_GMAC_CMN_CFP_TCAM_DATA3 0x21C
+#define BCMA_GMAC_CMN_CFP_TCAM_DATA4 0x220
+#define BCMA_GMAC_CMN_CFP_TCAM_DATA5 0x224
+#define BCMA_GMAC_CMN_CFP_TCAM_DATA6 0x228
+#define BCMA_GMAC_CMN_CFP_TCAM_DATA7 0x22C
+#define BCMA_GMAC_CMN_CFP_TCAM_MASK0 0x230
+#define BCMA_GMAC_CMN_CFP_TCAM_MASK1 0x234
+#define BCMA_GMAC_CMN_CFP_TCAM_MASK2 0x238
+#define BCMA_GMAC_CMN_CFP_TCAM_MASK3 0x23C
+#define BCMA_GMAC_CMN_CFP_TCAM_MASK4 0x240
+#define BCMA_GMAC_CMN_CFP_TCAM_MASK5 0x244
+#define BCMA_GMAC_CMN_CFP_TCAM_MASK6 0x248
+#define BCMA_GMAC_CMN_CFP_TCAM_MASK7 0x24C
+#define BCMA_GMAC_CMN_CFP_ACTION_DATA 0x250
+#define BCMA_GMAC_CMN_TCAM_BIST_CTL 0x2A0
+#define BCMA_GMAC_CMN_TCAM_BIST_STATUS 0x2A4
+#define BCMA_GMAC_CMN_TCAM_CMP_STATUS 0x2A8
+#define BCMA_GMAC_CMN_TCAM_DISABLE 0x2AC
+#define BCMA_GMAC_CMN_TCAM_TEST_CTL 0x2F0
+#define BCMA_GMAC_CMN_UDF_0_A3_A0 0x300
+#define BCMA_GMAC_CMN_UDF_0_A7_A4 0x304
+#define BCMA_GMAC_CMN_UDF_0_A8 0x308
+#define BCMA_GMAC_CMN_UDF_1_A3_A0 0x310
+#define BCMA_GMAC_CMN_UDF_1_A7_A4 0x314
+#define BCMA_GMAC_CMN_UDF_1_A8 0x318
+#define BCMA_GMAC_CMN_UDF_2_A3_A0 0x320
+#define BCMA_GMAC_CMN_UDF_2_A7_A4 0x324
+#define BCMA_GMAC_CMN_UDF_2_A8 0x328
+#define BCMA_GMAC_CMN_UDF_0_B3_B0 0x330
+#define BCMA_GMAC_CMN_UDF_0_B7_B4 0x334
+#define BCMA_GMAC_CMN_UDF_0_B8 0x338
+#define BCMA_GMAC_CMN_UDF_1_B3_B0 0x340
+#define BCMA_GMAC_CMN_UDF_1_B7_B4 0x344
+#define BCMA_GMAC_CMN_UDF_1_B8 0x348
+#define BCMA_GMAC_CMN_UDF_2_B3_B0 0x350
+#define BCMA_GMAC_CMN_UDF_2_B7_B4 0x354
+#define BCMA_GMAC_CMN_UDF_2_B8 0x358
+#define BCMA_GMAC_CMN_UDF_0_C3_C0 0x360
+#define BCMA_GMAC_CMN_UDF_0_C7_C4 0x364
+#define BCMA_GMAC_CMN_UDF_0_C8 0x368
+#define BCMA_GMAC_CMN_UDF_1_C3_C0 0x370
+#define BCMA_GMAC_CMN_UDF_1_C7_C4 0x374
+#define BCMA_GMAC_CMN_UDF_1_C8 0x378
+#define BCMA_GMAC_CMN_UDF_2_C3_C0 0x380
+#define BCMA_GMAC_CMN_UDF_2_C7_C4 0x384
+#define BCMA_GMAC_CMN_UDF_2_C8 0x388
+#define BCMA_GMAC_CMN_UDF_0_D3_D0 0x390
+#define BCMA_GMAC_CMN_UDF_0_D7_D4 0x394
+#define BCMA_GMAC_CMN_UDF_0_D11_D8 0x394
+
+struct bcma_drv_gmac_cmn {
+ struct bcma_device *core;
+
+ /* Drivers accessing BCMA_GMAC_CMN_PHY_ACCESS and
+ * BCMA_GMAC_CMN_PHY_CTL need to take that mutex first. */
+ struct mutex phy_mutex;
+};
+
+/* Register access */
+#define gmac_cmn_read16(gc, offset) bcma_read16((gc)->core, offset)
+#define gmac_cmn_read32(gc, offset) bcma_read32((gc)->core, offset)
+#define gmac_cmn_write16(gc, offset, val) bcma_write16((gc)->core, offset, val)
+#define gmac_cmn_write32(gc, offset, val) bcma_write32((gc)->core, offset, val)
+
+#endif /* LINUX_BCMA_DRIVER_GMAC_CMN_H_ */
diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h
new file mode 100644
index 000000000..8eea7f9e3
--- /dev/null
+++ b/include/linux/bcma/bcma_driver_mips.h
@@ -0,0 +1,44 @@
+#ifndef LINUX_BCMA_DRIVER_MIPS_H_
+#define LINUX_BCMA_DRIVER_MIPS_H_
+
+#define BCMA_MIPS_IPSFLAG 0x0F08
+/* which sbflags get routed to mips interrupt 1 */
+#define BCMA_MIPS_IPSFLAG_IRQ1 0x0000003F
+#define BCMA_MIPS_IPSFLAG_IRQ1_SHIFT 0
+/* which sbflags get routed to mips interrupt 2 */
+#define BCMA_MIPS_IPSFLAG_IRQ2 0x00003F00
+#define BCMA_MIPS_IPSFLAG_IRQ2_SHIFT 8
+/* which sbflags get routed to mips interrupt 3 */
+#define BCMA_MIPS_IPSFLAG_IRQ3 0x003F0000
+#define BCMA_MIPS_IPSFLAG_IRQ3_SHIFT 16
+/* which sbflags get routed to mips interrupt 4 */
+#define BCMA_MIPS_IPSFLAG_IRQ4 0x3F000000
+#define BCMA_MIPS_IPSFLAG_IRQ4_SHIFT 24
+
+/* MIPS 74K core registers */
+#define BCMA_MIPS_MIPS74K_CORECTL 0x0000
+#define BCMA_MIPS_MIPS74K_EXCEPTBASE 0x0004
+#define BCMA_MIPS_MIPS74K_BIST 0x000C
+#define BCMA_MIPS_MIPS74K_INTMASK_INT0 0x0014
+#define BCMA_MIPS_MIPS74K_INTMASK(int) \
+ ((int) * 4 + BCMA_MIPS_MIPS74K_INTMASK_INT0)
+#define BCMA_MIPS_MIPS74K_NMIMASK 0x002C
+#define BCMA_MIPS_MIPS74K_GPIOSEL 0x0040
+#define BCMA_MIPS_MIPS74K_GPIOOUT 0x0044
+#define BCMA_MIPS_MIPS74K_GPIOEN 0x0048
+#define BCMA_MIPS_MIPS74K_CLKCTLST 0x01E0
+
+#define BCMA_MIPS_OOBSELINA74 0x004
+#define BCMA_MIPS_OOBSELOUTA30 0x100
+
+struct bcma_device;
+
+struct bcma_drv_mips {
+ struct bcma_device *core;
+ u8 setup_done:1;
+ u8 early_setup_done:1;
+};
+
+extern u32 bcma_cpu_clock(struct bcma_drv_mips *mcore);
+
+#endif /* LINUX_BCMA_DRIVER_MIPS_H_ */
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
new file mode 100644
index 000000000..5ba6918ca
--- /dev/null
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -0,0 +1,252 @@
+#ifndef LINUX_BCMA_DRIVER_PCI_H_
+#define LINUX_BCMA_DRIVER_PCI_H_
+
+#include <linux/types.h>
+
+struct pci_dev;
+
+/** PCI core registers. **/
+#define BCMA_CORE_PCI_CTL 0x0000 /* PCI Control */
+#define BCMA_CORE_PCI_CTL_RST_OE 0x00000001 /* PCI_RESET Output Enable */
+#define BCMA_CORE_PCI_CTL_RST 0x00000002 /* PCI_RESET driven out to pin */
+#define BCMA_CORE_PCI_CTL_CLK_OE 0x00000004 /* Clock gate Output Enable */
+#define BCMA_CORE_PCI_CTL_CLK 0x00000008 /* Gate for clock driven out to pin */
+#define BCMA_CORE_PCI_ARBCTL 0x0010 /* PCI Arbiter Control */
+#define BCMA_CORE_PCI_ARBCTL_INTERN 0x00000001 /* Use internal arbiter */
+#define BCMA_CORE_PCI_ARBCTL_EXTERN 0x00000002 /* Use external arbiter */
+#define BCMA_CORE_PCI_ARBCTL_PARKID 0x00000006 /* Mask, selects which agent is parked on an idle bus */
+#define BCMA_CORE_PCI_ARBCTL_PARKID_LAST 0x00000000 /* Last requestor */
+#define BCMA_CORE_PCI_ARBCTL_PARKID_4710 0x00000002 /* 4710 */
+#define BCMA_CORE_PCI_ARBCTL_PARKID_EXT0 0x00000004 /* External requestor 0 */
+#define BCMA_CORE_PCI_ARBCTL_PARKID_EXT1 0x00000006 /* External requestor 1 */
+#define BCMA_CORE_PCI_ISTAT 0x0020 /* Interrupt status */
+#define BCMA_CORE_PCI_ISTAT_INTA 0x00000001 /* PCI INTA# */
+#define BCMA_CORE_PCI_ISTAT_INTB 0x00000002 /* PCI INTB# */
+#define BCMA_CORE_PCI_ISTAT_SERR 0x00000004 /* PCI SERR# (write to clear) */
+#define BCMA_CORE_PCI_ISTAT_PERR 0x00000008 /* PCI PERR# (write to clear) */
+#define BCMA_CORE_PCI_ISTAT_PME 0x00000010 /* PCI PME# */
+#define BCMA_CORE_PCI_IMASK 0x0024 /* Interrupt mask */
+#define BCMA_CORE_PCI_IMASK_INTA 0x00000001 /* PCI INTA# */
+#define BCMA_CORE_PCI_IMASK_INTB 0x00000002 /* PCI INTB# */
+#define BCMA_CORE_PCI_IMASK_SERR 0x00000004 /* PCI SERR# */
+#define BCMA_CORE_PCI_IMASK_PERR 0x00000008 /* PCI PERR# */
+#define BCMA_CORE_PCI_IMASK_PME 0x00000010 /* PCI PME# */
+#define BCMA_CORE_PCI_MBOX 0x0028 /* Backplane to PCI Mailbox */
+#define BCMA_CORE_PCI_MBOX_F0_0 0x00000100 /* PCI function 0, INT 0 */
+#define BCMA_CORE_PCI_MBOX_F0_1 0x00000200 /* PCI function 0, INT 1 */
+#define BCMA_CORE_PCI_MBOX_F1_0 0x00000400 /* PCI function 1, INT 0 */
+#define BCMA_CORE_PCI_MBOX_F1_1 0x00000800 /* PCI function 1, INT 1 */
+#define BCMA_CORE_PCI_MBOX_F2_0 0x00001000 /* PCI function 2, INT 0 */
+#define BCMA_CORE_PCI_MBOX_F2_1 0x00002000 /* PCI function 2, INT 1 */
+#define BCMA_CORE_PCI_MBOX_F3_0 0x00004000 /* PCI function 3, INT 0 */
+#define BCMA_CORE_PCI_MBOX_F3_1 0x00008000 /* PCI function 3, INT 1 */
+#define BCMA_CORE_PCI_BCAST_ADDR 0x0050 /* Backplane Broadcast Address */
+#define BCMA_CORE_PCI_BCAST_ADDR_MASK 0x000000FF
+#define BCMA_CORE_PCI_BCAST_DATA 0x0054 /* Backplane Broadcast Data */
+#define BCMA_CORE_PCI_GPIO_IN 0x0060 /* rev >= 2 only */
+#define BCMA_CORE_PCI_GPIO_OUT 0x0064 /* rev >= 2 only */
+#define BCMA_CORE_PCI_GPIO_ENABLE 0x0068 /* rev >= 2 only */
+#define BCMA_CORE_PCI_GPIO_CTL 0x006C /* rev >= 2 only */
+#define BCMA_CORE_PCI_SBTOPCI0 0x0100 /* Backplane to PCI translation 0 (sbtopci0) */
+#define BCMA_CORE_PCI_SBTOPCI0_MASK 0xFC000000
+#define BCMA_CORE_PCI_SBTOPCI1 0x0104 /* Backplane to PCI translation 1 (sbtopci1) */
+#define BCMA_CORE_PCI_SBTOPCI1_MASK 0xFC000000
+#define BCMA_CORE_PCI_SBTOPCI2 0x0108 /* Backplane to PCI translation 2 (sbtopci2) */
+#define BCMA_CORE_PCI_SBTOPCI2_MASK 0xC0000000
+#define BCMA_CORE_PCI_CONFIG_ADDR 0x0120 /* pcie config space access */
+#define BCMA_CORE_PCI_CONFIG_DATA 0x0124 /* pcie config space access */
+#define BCMA_CORE_PCI_MDIO_CONTROL 0x0128 /* controls the mdio access */
+#define BCMA_CORE_PCI_MDIOCTL_DIVISOR_MASK 0x7f /* clock to be used on MDIO */
+#define BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL 0x2
+#define BCMA_CORE_PCI_MDIOCTL_PREAM_EN 0x80 /* Enable preamble sequnce */
+#define BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE 0x100 /* Tranaction complete */
+#define BCMA_CORE_PCI_MDIO_DATA 0x012c /* Data to the mdio access */
+#define BCMA_CORE_PCI_MDIODATA_MASK 0x0000ffff /* data 2 bytes */
+#define BCMA_CORE_PCI_MDIODATA_TA 0x00020000 /* Turnaround */
+#define BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD 18 /* Regaddr shift (rev < 10) */
+#define BCMA_CORE_PCI_MDIODATA_REGADDR_MASK_OLD 0x003c0000 /* Regaddr Mask (rev < 10) */
+#define BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD 22 /* Physmedia devaddr shift (rev < 10) */
+#define BCMA_CORE_PCI_MDIODATA_DEVADDR_MASK_OLD 0x0fc00000 /* Physmedia devaddr Mask (rev < 10) */
+#define BCMA_CORE_PCI_MDIODATA_REGADDR_SHF 18 /* Regaddr shift */
+#define BCMA_CORE_PCI_MDIODATA_REGADDR_MASK 0x007c0000 /* Regaddr Mask */
+#define BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF 23 /* Physmedia devaddr shift */
+#define BCMA_CORE_PCI_MDIODATA_DEVADDR_MASK 0x0f800000 /* Physmedia devaddr Mask */
+#define BCMA_CORE_PCI_MDIODATA_WRITE 0x10000000 /* write Transaction */
+#define BCMA_CORE_PCI_MDIODATA_READ 0x20000000 /* Read Transaction */
+#define BCMA_CORE_PCI_MDIODATA_START 0x40000000 /* start of Transaction */
+#define BCMA_CORE_PCI_MDIODATA_DEV_ADDR 0x0 /* dev address for serdes */
+#define BCMA_CORE_PCI_MDIODATA_BLK_ADDR 0x1F /* blk address for serdes */
+#define BCMA_CORE_PCI_MDIODATA_DEV_PLL 0x1d /* SERDES PLL Dev */
+#define BCMA_CORE_PCI_MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */
+#define BCMA_CORE_PCI_MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */
+#define BCMA_CORE_PCI_PCIEIND_ADDR 0x0130 /* indirect access to the internal register */
+#define BCMA_CORE_PCI_PCIEIND_DATA 0x0134 /* Data to/from the internal regsiter */
+#define BCMA_CORE_PCI_CLKREQENCTRL 0x0138 /* >= rev 6, Clkreq rdma control */
+#define BCMA_CORE_PCI_PCICFG0 0x0400 /* PCI config space 0 (rev >= 8) */
+#define BCMA_CORE_PCI_PCICFG1 0x0500 /* PCI config space 1 (rev >= 8) */
+#define BCMA_CORE_PCI_PCICFG2 0x0600 /* PCI config space 2 (rev >= 8) */
+#define BCMA_CORE_PCI_PCICFG3 0x0700 /* PCI config space 3 (rev >= 8) */
+#define BCMA_CORE_PCI_SPROM(wordoffset) (0x0800 + ((wordoffset) * 2)) /* SPROM shadow area (72 bytes) */
+#define BCMA_CORE_PCI_SPROM_PI_OFFSET 0 /* first word */
+#define BCMA_CORE_PCI_SPROM_PI_MASK 0xf000 /* bit 15:12 */
+#define BCMA_CORE_PCI_SPROM_PI_SHIFT 12 /* bit 15:12 */
+#define BCMA_CORE_PCI_SPROM_MISC_CONFIG 5 /* word 5 */
+#define BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST 0x8000 /* bit 15 */
+#define BCMA_CORE_PCI_SPROM_CLKREQ_OFFSET_REV5 20 /* word 20 for srom rev <= 5 */
+#define BCMA_CORE_PCI_SPROM_CLKREQ_ENB 0x0800 /* bit 11 */
+
+/* SBtoPCIx */
+#define BCMA_CORE_PCI_SBTOPCI_MEM 0x00000000
+#define BCMA_CORE_PCI_SBTOPCI_IO 0x00000001
+#define BCMA_CORE_PCI_SBTOPCI_CFG0 0x00000002
+#define BCMA_CORE_PCI_SBTOPCI_CFG1 0x00000003
+#define BCMA_CORE_PCI_SBTOPCI_PREF 0x00000004 /* Prefetch enable */
+#define BCMA_CORE_PCI_SBTOPCI_BURST 0x00000008 /* Burst enable */
+#define BCMA_CORE_PCI_SBTOPCI_MRM 0x00000020 /* Memory Read Multiple */
+#define BCMA_CORE_PCI_SBTOPCI_RC 0x00000030 /* Read Command mask (rev >= 11) */
+#define BCMA_CORE_PCI_SBTOPCI_RC_READ 0x00000000 /* Memory read */
+#define BCMA_CORE_PCI_SBTOPCI_RC_READL 0x00000010 /* Memory read line */
+#define BCMA_CORE_PCI_SBTOPCI_RC_READM 0x00000020 /* Memory read multiple */
+
+/* PCIE protocol PHY diagnostic registers */
+#define BCMA_CORE_PCI_PLP_MODEREG 0x200 /* Mode */
+#define BCMA_CORE_PCI_PLP_STATUSREG 0x204 /* Status */
+#define BCMA_CORE_PCI_PLP_POLARITYINV_STAT 0x10 /* Status reg PCIE_PLP_STATUSREG */
+#define BCMA_CORE_PCI_PLP_LTSSMCTRLREG 0x208 /* LTSSM control */
+#define BCMA_CORE_PCI_PLP_LTLINKNUMREG 0x20c /* Link Training Link number */
+#define BCMA_CORE_PCI_PLP_LTLANENUMREG 0x210 /* Link Training Lane number */
+#define BCMA_CORE_PCI_PLP_LTNFTSREG 0x214 /* Link Training N_FTS */
+#define BCMA_CORE_PCI_PLP_ATTNREG 0x218 /* Attention */
+#define BCMA_CORE_PCI_PLP_ATTNMASKREG 0x21C /* Attention Mask */
+#define BCMA_CORE_PCI_PLP_RXERRCTR 0x220 /* Rx Error */
+#define BCMA_CORE_PCI_PLP_RXFRMERRCTR 0x224 /* Rx Framing Error */
+#define BCMA_CORE_PCI_PLP_RXERRTHRESHREG 0x228 /* Rx Error threshold */
+#define BCMA_CORE_PCI_PLP_TESTCTRLREG 0x22C /* Test Control reg */
+#define BCMA_CORE_PCI_PLP_SERDESCTRLOVRDREG 0x230 /* SERDES Control Override */
+#define BCMA_CORE_PCI_PLP_TIMINGOVRDREG 0x234 /* Timing param override */
+#define BCMA_CORE_PCI_PLP_RXTXSMDIAGREG 0x238 /* RXTX State Machine Diag */
+#define BCMA_CORE_PCI_PLP_LTSSMDIAGREG 0x23C /* LTSSM State Machine Diag */
+
+/* PCIE protocol DLLP diagnostic registers */
+#define BCMA_CORE_PCI_DLLP_LCREG 0x100 /* Link Control */
+#define BCMA_CORE_PCI_DLLP_LSREG 0x104 /* Link Status */
+#define BCMA_CORE_PCI_DLLP_LAREG 0x108 /* Link Attention */
+#define BCMA_CORE_PCI_DLLP_LSREG_LINKUP (1 << 16)
+#define BCMA_CORE_PCI_DLLP_LAMASKREG 0x10C /* Link Attention Mask */
+#define BCMA_CORE_PCI_DLLP_NEXTTXSEQNUMREG 0x110 /* Next Tx Seq Num */
+#define BCMA_CORE_PCI_DLLP_ACKEDTXSEQNUMREG 0x114 /* Acked Tx Seq Num */
+#define BCMA_CORE_PCI_DLLP_PURGEDTXSEQNUMREG 0x118 /* Purged Tx Seq Num */
+#define BCMA_CORE_PCI_DLLP_RXSEQNUMREG 0x11C /* Rx Sequence Number */
+#define BCMA_CORE_PCI_DLLP_LRREG 0x120 /* Link Replay */
+#define BCMA_CORE_PCI_DLLP_LACKTOREG 0x124 /* Link Ack Timeout */
+#define BCMA_CORE_PCI_DLLP_PMTHRESHREG 0x128 /* Power Management Threshold */
+#define BCMA_CORE_PCI_ASPMTIMER_EXTEND 0x01000000 /* > rev7: enable extend ASPM timer */
+#define BCMA_CORE_PCI_DLLP_RTRYWPREG 0x12C /* Retry buffer write ptr */
+#define BCMA_CORE_PCI_DLLP_RTRYRPREG 0x130 /* Retry buffer Read ptr */
+#define BCMA_CORE_PCI_DLLP_RTRYPPREG 0x134 /* Retry buffer Purged ptr */
+#define BCMA_CORE_PCI_DLLP_RTRRWREG 0x138 /* Retry buffer Read/Write */
+#define BCMA_CORE_PCI_DLLP_ECTHRESHREG 0x13C /* Error Count Threshold */
+#define BCMA_CORE_PCI_DLLP_TLPERRCTRREG 0x140 /* TLP Error Counter */
+#define BCMA_CORE_PCI_DLLP_ERRCTRREG 0x144 /* Error Counter */
+#define BCMA_CORE_PCI_DLLP_NAKRXCTRREG 0x148 /* NAK Received Counter */
+#define BCMA_CORE_PCI_DLLP_TESTREG 0x14C /* Test */
+#define BCMA_CORE_PCI_DLLP_PKTBIST 0x150 /* Packet BIST */
+#define BCMA_CORE_PCI_DLLP_PCIE11 0x154 /* DLLP PCIE 1.1 reg */
+
+/* SERDES RX registers */
+#define BCMA_CORE_PCI_SERDES_RX_CTRL 1 /* Rx cntrl */
+#define BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE 0x80 /* rxpolarity_force */
+#define BCMA_CORE_PCI_SERDES_RX_CTRL_POLARITY 0x40 /* rxpolarity_value */
+#define BCMA_CORE_PCI_SERDES_RX_TIMER1 2 /* Rx Timer1 */
+#define BCMA_CORE_PCI_SERDES_RX_CDR 6 /* CDR */
+#define BCMA_CORE_PCI_SERDES_RX_CDRBW 7 /* CDR BW */
+
+/* SERDES PLL registers */
+#define BCMA_CORE_PCI_SERDES_PLL_CTRL 1 /* PLL control reg */
+#define BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN 0x4000 /* bit 14 is FREQDET on */
+
+/* PCIcore specific boardflags */
+#define BCMA_CORE_PCI_BFL_NOPCI 0x00000400 /* Board leaves PCI floating */
+
+/* PCIE Config space accessing MACROS */
+#define BCMA_CORE_PCI_CFG_BUS_SHIFT 24 /* Bus shift */
+#define BCMA_CORE_PCI_CFG_SLOT_SHIFT 19 /* Slot/Device shift */
+#define BCMA_CORE_PCI_CFG_FUN_SHIFT 16 /* Function shift */
+#define BCMA_CORE_PCI_CFG_OFF_SHIFT 0 /* Register shift */
+
+#define BCMA_CORE_PCI_CFG_BUS_MASK 0xff /* Bus mask */
+#define BCMA_CORE_PCI_CFG_SLOT_MASK 0x1f /* Slot/Device mask */
+#define BCMA_CORE_PCI_CFG_FUN_MASK 7 /* Function mask */
+#define BCMA_CORE_PCI_CFG_OFF_MASK 0xfff /* Register mask */
+
+#define BCMA_CORE_PCI_CFG_DEVCTRL 0xd8
+
+#define BCMA_CORE_PCI_
+
+/* MDIO devices (SERDES modules) */
+#define BCMA_CORE_PCI_MDIO_IEEE0 0x000
+#define BCMA_CORE_PCI_MDIO_IEEE1 0x001
+#define BCMA_CORE_PCI_MDIO_BLK0 0x800
+#define BCMA_CORE_PCI_MDIO_BLK1 0x801
+#define BCMA_CORE_PCI_MDIO_BLK1_MGMT0 0x16
+#define BCMA_CORE_PCI_MDIO_BLK1_MGMT1 0x17
+#define BCMA_CORE_PCI_MDIO_BLK1_MGMT2 0x18
+#define BCMA_CORE_PCI_MDIO_BLK1_MGMT3 0x19
+#define BCMA_CORE_PCI_MDIO_BLK1_MGMT4 0x1A
+#define BCMA_CORE_PCI_MDIO_BLK2 0x802
+#define BCMA_CORE_PCI_MDIO_BLK3 0x803
+#define BCMA_CORE_PCI_MDIO_BLK4 0x804
+#define BCMA_CORE_PCI_MDIO_TXPLL 0x808 /* TXPLL register block idx */
+#define BCMA_CORE_PCI_MDIO_TXCTRL0 0x820
+#define BCMA_CORE_PCI_MDIO_SERDESID 0x831
+#define BCMA_CORE_PCI_MDIO_RXCTRL0 0x840
+
+/* PCIE Root Capability Register bits (Host mode only) */
+#define BCMA_CORE_PCI_RC_CRS_VISIBILITY 0x0001
+
+struct bcma_drv_pci;
+struct bcma_bus;
+
+#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
+struct bcma_drv_pci_host {
+ struct bcma_drv_pci *pdev;
+
+ u32 host_cfg_addr;
+ spinlock_t cfgspace_lock;
+
+ struct pci_controller pci_controller;
+ struct pci_ops pci_ops;
+ struct resource mem_resource;
+ struct resource io_resource;
+};
+#endif
+
+struct bcma_drv_pci {
+ struct bcma_device *core;
+ u8 early_setup_done:1;
+ u8 setup_done:1;
+ u8 hostmode:1;
+
+#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
+ struct bcma_drv_pci_host *host_controller;
+#endif
+};
+
+/* Register access */
+#define pcicore_read16(pc, offset) bcma_read16((pc)->core, offset)
+#define pcicore_read32(pc, offset) bcma_read32((pc)->core, offset)
+#define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val)
+#define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val)
+
+#ifdef CONFIG_BCMA_DRIVER_PCI
+extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up);
+#else
+static inline void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
+{
+}
+#endif
+
+extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
+extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
+
+#endif /* LINUX_BCMA_DRIVER_PCI_H_ */
diff --git a/include/linux/bcma/bcma_driver_pcie2.h b/include/linux/bcma/bcma_driver_pcie2.h
new file mode 100644
index 000000000..31e6d17ab
--- /dev/null
+++ b/include/linux/bcma/bcma_driver_pcie2.h
@@ -0,0 +1,158 @@
+#ifndef LINUX_BCMA_DRIVER_PCIE2_H_
+#define LINUX_BCMA_DRIVER_PCIE2_H_
+
+#define BCMA_CORE_PCIE2_CLK_CONTROL 0x0000
+#define PCIE2_CLKC_RST_OE 0x0001 /* When set, drives PCI_RESET out to pin */
+#define PCIE2_CLKC_RST 0x0002 /* Value driven out to pin */
+#define PCIE2_CLKC_SPERST 0x0004 /* SurvivePeRst */
+#define PCIE2_CLKC_DISABLE_L1CLK_GATING 0x0010
+#define PCIE2_CLKC_DLYPERST 0x0100 /* Delay PeRst to CoE Core */
+#define PCIE2_CLKC_DISSPROMLD 0x0200 /* DisableSpromLoadOnPerst */
+#define PCIE2_CLKC_WAKE_MODE_L2 0x1000 /* Wake on L2 */
+#define BCMA_CORE_PCIE2_RC_PM_CONTROL 0x0004
+#define BCMA_CORE_PCIE2_RC_PM_STATUS 0x0008
+#define BCMA_CORE_PCIE2_EP_PM_CONTROL 0x000C
+#define BCMA_CORE_PCIE2_EP_PM_STATUS 0x0010
+#define BCMA_CORE_PCIE2_EP_LTR_CONTROL 0x0014
+#define BCMA_CORE_PCIE2_EP_LTR_STATUS 0x0018
+#define BCMA_CORE_PCIE2_EP_OBFF_STATUS 0x001C
+#define BCMA_CORE_PCIE2_PCIE_ERR_STATUS 0x0020
+#define BCMA_CORE_PCIE2_RC_AXI_CONFIG 0x0100
+#define BCMA_CORE_PCIE2_EP_AXI_CONFIG 0x0104
+#define BCMA_CORE_PCIE2_RXDEBUG_STATUS0 0x0108
+#define BCMA_CORE_PCIE2_RXDEBUG_CONTROL0 0x010C
+#define BCMA_CORE_PCIE2_CONFIGINDADDR 0x0120
+#define BCMA_CORE_PCIE2_CONFIGINDDATA 0x0124
+#define BCMA_CORE_PCIE2_MDIOCONTROL 0x0128
+#define BCMA_CORE_PCIE2_MDIOWRDATA 0x012C
+#define BCMA_CORE_PCIE2_MDIORDDATA 0x0130
+#define BCMA_CORE_PCIE2_DATAINTF 0x0180
+#define BCMA_CORE_PCIE2_D2H_INTRLAZY_0 0x0188
+#define BCMA_CORE_PCIE2_H2D_INTRLAZY_0 0x018c
+#define BCMA_CORE_PCIE2_H2D_INTSTAT_0 0x0190
+#define BCMA_CORE_PCIE2_H2D_INTMASK_0 0x0194
+#define BCMA_CORE_PCIE2_D2H_INTSTAT_0 0x0198
+#define BCMA_CORE_PCIE2_D2H_INTMASK_0 0x019c
+#define BCMA_CORE_PCIE2_LTR_STATE 0x01A0 /* Latency Tolerance Reporting */
+#define PCIE2_LTR_ACTIVE 2
+#define PCIE2_LTR_ACTIVE_IDLE 1
+#define PCIE2_LTR_SLEEP 0
+#define PCIE2_LTR_FINAL_MASK 0x300
+#define PCIE2_LTR_FINAL_SHIFT 8
+#define BCMA_CORE_PCIE2_PWR_INT_STATUS 0x01A4
+#define BCMA_CORE_PCIE2_PWR_INT_MASK 0x01A8
+#define BCMA_CORE_PCIE2_CFG_ADDR 0x01F8
+#define BCMA_CORE_PCIE2_CFG_DATA 0x01FC
+#define BCMA_CORE_PCIE2_SYS_EQ_PAGE 0x0200
+#define BCMA_CORE_PCIE2_SYS_MSI_PAGE 0x0204
+#define BCMA_CORE_PCIE2_SYS_MSI_INTREN 0x0208
+#define BCMA_CORE_PCIE2_SYS_MSI_CTRL0 0x0210
+#define BCMA_CORE_PCIE2_SYS_MSI_CTRL1 0x0214
+#define BCMA_CORE_PCIE2_SYS_MSI_CTRL2 0x0218
+#define BCMA_CORE_PCIE2_SYS_MSI_CTRL3 0x021C
+#define BCMA_CORE_PCIE2_SYS_MSI_CTRL4 0x0220
+#define BCMA_CORE_PCIE2_SYS_MSI_CTRL5 0x0224
+#define BCMA_CORE_PCIE2_SYS_EQ_HEAD0 0x0250
+#define BCMA_CORE_PCIE2_SYS_EQ_TAIL0 0x0254
+#define BCMA_CORE_PCIE2_SYS_EQ_HEAD1 0x0258
+#define BCMA_CORE_PCIE2_SYS_EQ_TAIL1 0x025C
+#define BCMA_CORE_PCIE2_SYS_EQ_HEAD2 0x0260
+#define BCMA_CORE_PCIE2_SYS_EQ_TAIL2 0x0264
+#define BCMA_CORE_PCIE2_SYS_EQ_HEAD3 0x0268
+#define BCMA_CORE_PCIE2_SYS_EQ_TAIL3 0x026C
+#define BCMA_CORE_PCIE2_SYS_EQ_HEAD4 0x0270
+#define BCMA_CORE_PCIE2_SYS_EQ_TAIL4 0x0274
+#define BCMA_CORE_PCIE2_SYS_EQ_HEAD5 0x0278
+#define BCMA_CORE_PCIE2_SYS_EQ_TAIL5 0x027C
+#define BCMA_CORE_PCIE2_SYS_RC_INTX_EN 0x0330
+#define BCMA_CORE_PCIE2_SYS_RC_INTX_CSR 0x0334
+#define BCMA_CORE_PCIE2_SYS_MSI_REQ 0x0340
+#define BCMA_CORE_PCIE2_SYS_HOST_INTR_EN 0x0344
+#define BCMA_CORE_PCIE2_SYS_HOST_INTR_CSR 0x0348
+#define BCMA_CORE_PCIE2_SYS_HOST_INTR0 0x0350
+#define BCMA_CORE_PCIE2_SYS_HOST_INTR1 0x0354
+#define BCMA_CORE_PCIE2_SYS_HOST_INTR2 0x0358
+#define BCMA_CORE_PCIE2_SYS_HOST_INTR3 0x035C
+#define BCMA_CORE_PCIE2_SYS_EP_INT_EN0 0x0360
+#define BCMA_CORE_PCIE2_SYS_EP_INT_EN1 0x0364
+#define BCMA_CORE_PCIE2_SYS_EP_INT_CSR0 0x0370
+#define BCMA_CORE_PCIE2_SYS_EP_INT_CSR1 0x0374
+#define BCMA_CORE_PCIE2_SPROM(wordoffset) (0x0800 + ((wordoffset) * 2))
+#define BCMA_CORE_PCIE2_FUNC0_IMAP0_0 0x0C00
+#define BCMA_CORE_PCIE2_FUNC0_IMAP0_1 0x0C04
+#define BCMA_CORE_PCIE2_FUNC0_IMAP0_2 0x0C08
+#define BCMA_CORE_PCIE2_FUNC0_IMAP0_3 0x0C0C
+#define BCMA_CORE_PCIE2_FUNC0_IMAP0_4 0x0C10
+#define BCMA_CORE_PCIE2_FUNC0_IMAP0_5 0x0C14
+#define BCMA_CORE_PCIE2_FUNC0_IMAP0_6 0x0C18
+#define BCMA_CORE_PCIE2_FUNC0_IMAP0_7 0x0C1C
+#define BCMA_CORE_PCIE2_FUNC1_IMAP0_0 0x0C20
+#define BCMA_CORE_PCIE2_FUNC1_IMAP0_1 0x0C24
+#define BCMA_CORE_PCIE2_FUNC1_IMAP0_2 0x0C28
+#define BCMA_CORE_PCIE2_FUNC1_IMAP0_3 0x0C2C
+#define BCMA_CORE_PCIE2_FUNC1_IMAP0_4 0x0C30
+#define BCMA_CORE_PCIE2_FUNC1_IMAP0_5 0x0C34
+#define BCMA_CORE_PCIE2_FUNC1_IMAP0_6 0x0C38
+#define BCMA_CORE_PCIE2_FUNC1_IMAP0_7 0x0C3C
+#define BCMA_CORE_PCIE2_FUNC0_IMAP1 0x0C80
+#define BCMA_CORE_PCIE2_FUNC1_IMAP1 0x0C88
+#define BCMA_CORE_PCIE2_FUNC0_IMAP2 0x0CC0
+#define BCMA_CORE_PCIE2_FUNC1_IMAP2 0x0CC8
+#define BCMA_CORE_PCIE2_IARR0_LOWER 0x0D00
+#define BCMA_CORE_PCIE2_IARR0_UPPER 0x0D04
+#define BCMA_CORE_PCIE2_IARR1_LOWER 0x0D08
+#define BCMA_CORE_PCIE2_IARR1_UPPER 0x0D0C
+#define BCMA_CORE_PCIE2_IARR2_LOWER 0x0D10
+#define BCMA_CORE_PCIE2_IARR2_UPPER 0x0D14
+#define BCMA_CORE_PCIE2_OARR0 0x0D20
+#define BCMA_CORE_PCIE2_OARR1 0x0D28
+#define BCMA_CORE_PCIE2_OARR2 0x0D30
+#define BCMA_CORE_PCIE2_OMAP0_LOWER 0x0D40
+#define BCMA_CORE_PCIE2_OMAP0_UPPER 0x0D44
+#define BCMA_CORE_PCIE2_OMAP1_LOWER 0x0D48
+#define BCMA_CORE_PCIE2_OMAP1_UPPER 0x0D4C
+#define BCMA_CORE_PCIE2_OMAP2_LOWER 0x0D50
+#define BCMA_CORE_PCIE2_OMAP2_UPPER 0x0D54
+#define BCMA_CORE_PCIE2_FUNC1_IARR1_SIZE 0x0D58
+#define BCMA_CORE_PCIE2_FUNC1_IARR2_SIZE 0x0D5C
+#define BCMA_CORE_PCIE2_MEM_CONTROL 0x0F00
+#define BCMA_CORE_PCIE2_MEM_ECC_ERRLOG0 0x0F04
+#define BCMA_CORE_PCIE2_MEM_ECC_ERRLOG1 0x0F08
+#define BCMA_CORE_PCIE2_LINK_STATUS 0x0F0C
+#define BCMA_CORE_PCIE2_STRAP_STATUS 0x0F10
+#define BCMA_CORE_PCIE2_RESET_STATUS 0x0F14
+#define BCMA_CORE_PCIE2_RESETEN_IN_LINKDOWN 0x0F18
+#define BCMA_CORE_PCIE2_MISC_INTR_EN 0x0F1C
+#define BCMA_CORE_PCIE2_TX_DEBUG_CFG 0x0F20
+#define BCMA_CORE_PCIE2_MISC_CONFIG 0x0F24
+#define BCMA_CORE_PCIE2_MISC_STATUS 0x0F28
+#define BCMA_CORE_PCIE2_INTR_EN 0x0F30
+#define BCMA_CORE_PCIE2_INTR_CLEAR 0x0F34
+#define BCMA_CORE_PCIE2_INTR_STATUS 0x0F38
+
+/* PCIE gen2 config regs */
+#define PCIE2_INTSTATUS 0x090
+#define PCIE2_INTMASK 0x094
+#define PCIE2_SBMBX 0x098
+
+#define PCIE2_PMCR_REFUP 0x1814 /* Trefup time */
+
+#define PCIE2_CAP_DEVSTSCTRL2_OFFSET 0xD4
+#define PCIE2_CAP_DEVSTSCTRL2_LTRENAB 0x400
+#define PCIE2_PVT_REG_PM_CLK_PERIOD 0x184c
+
+struct bcma_drv_pcie2 {
+ struct bcma_device *core;
+
+ u16 reqsize;
+};
+
+#define pcie2_read16(pcie2, offset) bcma_read16((pcie2)->core, offset)
+#define pcie2_read32(pcie2, offset) bcma_read32((pcie2)->core, offset)
+#define pcie2_write16(pcie2, offset, val) bcma_write16((pcie2)->core, offset, val)
+#define pcie2_write32(pcie2, offset, val) bcma_write32((pcie2)->core, offset, val)
+
+#define pcie2_set32(pcie2, offset, set) bcma_set32((pcie2)->core, offset, set)
+#define pcie2_mask32(pcie2, offset, mask) bcma_mask32((pcie2)->core, offset, mask)
+
+#endif /* LINUX_BCMA_DRIVER_PCIE2_H_ */
diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h
new file mode 100644
index 000000000..ebd5c1fcd
--- /dev/null
+++ b/include/linux/bcma/bcma_regs.h
@@ -0,0 +1,101 @@
+#ifndef LINUX_BCMA_REGS_H_
+#define LINUX_BCMA_REGS_H_
+
+/* Some single registers are shared between many cores */
+/* BCMA_CLKCTLST: ChipCommon (rev >= 20), PCIe, 80211 */
+#define BCMA_CLKCTLST 0x01E0 /* Clock control and status */
+#define BCMA_CLKCTLST_FORCEALP 0x00000001 /* Force ALP request */
+#define BCMA_CLKCTLST_FORCEHT 0x00000002 /* Force HT request */
+#define BCMA_CLKCTLST_FORCEILP 0x00000004 /* Force ILP request */
+#define BCMA_CLKCTLST_HAVEALPREQ 0x00000008 /* ALP available request */
+#define BCMA_CLKCTLST_HAVEHTREQ 0x00000010 /* HT available request */
+#define BCMA_CLKCTLST_HWCROFF 0x00000020 /* Force HW clock request off */
+#define BCMA_CLKCTLST_EXTRESREQ 0x00000700 /* Mask of external resource requests */
+#define BCMA_CLKCTLST_EXTRESREQ_SHIFT 8
+#define BCMA_CLKCTLST_HAVEALP 0x00010000 /* ALP available */
+#define BCMA_CLKCTLST_HAVEHT 0x00020000 /* HT available */
+#define BCMA_CLKCTLST_BP_ON_ALP 0x00040000 /* RO: running on ALP clock */
+#define BCMA_CLKCTLST_BP_ON_HT 0x00080000 /* RO: running on HT clock */
+#define BCMA_CLKCTLST_EXTRESST 0x07000000 /* Mask of external resource status */
+#define BCMA_CLKCTLST_EXTRESST_SHIFT 24
+/* Is there any BCM4328 on BCMA bus? */
+#define BCMA_CLKCTLST_4328A0_HAVEHT 0x00010000 /* 4328a0 has reversed bits */
+#define BCMA_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */
+
+/* Agent registers (common for every core) */
+#define BCMA_IOCTL 0x0408 /* IO control */
+#define BCMA_IOCTL_CLK 0x0001
+#define BCMA_IOCTL_FGC 0x0002
+#define BCMA_IOCTL_CORE_BITS 0x3FFC
+#define BCMA_IOCTL_PME_EN 0x4000
+#define BCMA_IOCTL_BIST_EN 0x8000
+#define BCMA_IOST 0x0500 /* IO status */
+#define BCMA_IOST_CORE_BITS 0x0FFF
+#define BCMA_IOST_DMA64 0x1000
+#define BCMA_IOST_GATED_CLK 0x2000
+#define BCMA_IOST_BIST_ERROR 0x4000
+#define BCMA_IOST_BIST_DONE 0x8000
+#define BCMA_RESET_CTL 0x0800
+#define BCMA_RESET_CTL_RESET 0x0001
+#define BCMA_RESET_ST 0x0804
+
+#define BCMA_NS_ROM_IOST_BOOT_DEV_MASK 0x0003
+#define BCMA_NS_ROM_IOST_BOOT_DEV_NOR 0x0000
+#define BCMA_NS_ROM_IOST_BOOT_DEV_NAND 0x0001
+#define BCMA_NS_ROM_IOST_BOOT_DEV_ROM 0x0002
+
+/* BCMA PCI config space registers. */
+#define BCMA_PCI_PMCSR 0x44
+#define BCMA_PCI_PE 0x100
+#define BCMA_PCI_BAR0_WIN 0x80 /* Backplane address space 0 */
+#define BCMA_PCI_BAR1_WIN 0x84 /* Backplane address space 1 */
+#define BCMA_PCI_SPROMCTL 0x88 /* SPROM control */
+#define BCMA_PCI_SPROMCTL_WE 0x10 /* SPROM write enable */
+#define BCMA_PCI_BAR1_CONTROL 0x8c /* Address space 1 burst control */
+#define BCMA_PCI_IRQS 0x90 /* PCI interrupts */
+#define BCMA_PCI_IRQMASK 0x94 /* PCI IRQ control and mask (pcirev >= 6 only) */
+#define BCMA_PCI_BACKPLANE_IRQS 0x98 /* Backplane Interrupts */
+#define BCMA_PCI_BAR0_WIN2 0xAC
+#define BCMA_PCI_GPIO_IN 0xB0 /* GPIO Input (pcirev >= 3 only) */
+#define BCMA_PCI_GPIO_OUT 0xB4 /* GPIO Output (pcirev >= 3 only) */
+#define BCMA_PCI_GPIO_OUT_ENABLE 0xB8 /* GPIO Output Enable/Disable (pcirev >= 3 only) */
+#define BCMA_PCI_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */
+#define BCMA_PCI_GPIO_HWRAD 0x20 /* PCI config space GPIO 13 for hw radio disable */
+#define BCMA_PCI_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal powerup */
+#define BCMA_PCI_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL powerdown */
+
+#define BCMA_PCIE2_BAR0_WIN2 0x70
+
+/* SiliconBackplane Address Map.
+ * All regions may not exist on all chips.
+ */
+#define BCMA_SOC_SDRAM_BASE 0x00000000U /* Physical SDRAM */
+#define BCMA_SOC_PCI_MEM 0x08000000U /* Host Mode sb2pcitranslation0 (64 MB) */
+#define BCMA_SOC_PCI_MEM_SZ (64 * 1024 * 1024)
+#define BCMA_SOC_PCI_CFG 0x0c000000U /* Host Mode sb2pcitranslation1 (64 MB) */
+#define BCMA_SOC_SDRAM_SWAPPED 0x10000000U /* Byteswapped Physical SDRAM */
+#define BCMA_SOC_SDRAM_R2 0x80000000U /* Region 2 for sdram (512 MB) */
+
+
+#define BCMA_SOC_PCI_DMA 0x40000000U /* Client Mode sb2pcitranslation2 (1 GB) */
+#define BCMA_SOC_PCI_DMA2 0x80000000U /* Client Mode sb2pcitranslation2 (1 GB) */
+#define BCMA_SOC_PCI_DMA_SZ 0x40000000U /* Client Mode sb2pcitranslation2 size in bytes */
+#define BCMA_SOC_PCIE_DMA_L32 0x00000000U /* PCIE Client Mode sb2pcitranslation2
+ * (2 ZettaBytes), low 32 bits
+ */
+#define BCMA_SOC_PCIE_DMA_H32 0x80000000U /* PCIE Client Mode sb2pcitranslation2
+ * (2 ZettaBytes), high 32 bits
+ */
+
+#define BCMA_SOC_PCI1_MEM 0x40000000U /* Host Mode sb2pcitranslation0 (64 MB) */
+#define BCMA_SOC_PCI1_CFG 0x44000000U /* Host Mode sb2pcitranslation1 (64 MB) */
+#define BCMA_SOC_PCIE1_DMA_H32 0xc0000000U /* PCIE Client Mode sb2pcitranslation2
+ * (2 ZettaBytes), high 32 bits
+ */
+
+#define BCMA_SOC_FLASH1 0x1fc00000 /* MIPS Flash Region 1 */
+#define BCMA_SOC_FLASH1_SZ 0x00400000 /* MIPS Size of Flash Region 1 */
+#define BCMA_SOC_FLASH2 0x1c000000 /* Flash Region 2 (region 1 shadowed here) */
+#define BCMA_SOC_FLASH2_SZ 0x02000000 /* Size of Flash Region 2 */
+
+#endif /* LINUX_BCMA_REGS_H_ */
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
new file mode 100644
index 000000000..1b5fc0c3b
--- /dev/null
+++ b/include/linux/bcma/bcma_soc.h
@@ -0,0 +1,15 @@
+#ifndef LINUX_BCMA_SOC_H_
+#define LINUX_BCMA_SOC_H_
+
+#include <linux/bcma/bcma.h>
+
+struct bcma_soc {
+ struct bcma_bus bus;
+};
+
+int __init bcma_host_soc_register(struct bcma_soc *soc);
+int __init bcma_host_soc_init(struct bcma_soc *soc);
+
+int bcma_bus_register(struct bcma_bus *bus);
+
+#endif /* LINUX_BCMA_SOC_H_ */
diff --git a/include/linux/bfin_mac.h b/include/linux/bfin_mac.h
new file mode 100644
index 000000000..a69554ef8
--- /dev/null
+++ b/include/linux/bfin_mac.h
@@ -0,0 +1,30 @@
+/*
+ * Blackfin On-Chip MAC Driver
+ *
+ * Copyright 2004-2010 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _LINUX_BFIN_MAC_H_
+#define _LINUX_BFIN_MAC_H_
+
+#include <linux/phy.h>
+
+struct bfin_phydev_platform_data {
+ unsigned short addr;
+ int irq;
+};
+
+struct bfin_mii_bus_platform_data {
+ int phydev_number;
+ struct bfin_phydev_platform_data *phydev_data;
+ const unsigned short *mac_peripherals;
+ int phy_mode;
+ unsigned int phy_mask;
+ unsigned short vlan1_mask, vlan2_mask;
+};
+
+#endif
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
new file mode 100644
index 000000000..576e4639c
--- /dev/null
+++ b/include/linux/binfmts.h
@@ -0,0 +1,123 @@
+#ifndef _LINUX_BINFMTS_H
+#define _LINUX_BINFMTS_H
+
+#include <linux/sched.h>
+#include <linux/unistd.h>
+#include <asm/exec.h>
+#include <uapi/linux/binfmts.h>
+
+#define CORENAME_MAX_SIZE 128
+
+/*
+ * This structure is used to hold the arguments that are used when loading binaries.
+ */
+struct linux_binprm {
+ char buf[BINPRM_BUF_SIZE];
+#ifdef CONFIG_MMU
+ struct vm_area_struct *vma;
+ unsigned long vma_pages;
+#else
+# define MAX_ARG_PAGES 32
+ struct page *page[MAX_ARG_PAGES];
+#endif
+ struct mm_struct *mm;
+ unsigned long p; /* current top of mem */
+ unsigned int
+ cred_prepared:1,/* true if creds already prepared (multiple
+ * preps happen for interpreters) */
+ cap_effective:1;/* true if has elevated effective capabilities,
+ * false if not; except for init which inherits
+ * its parent's caps anyway */
+#ifdef __alpha__
+ unsigned int taso:1;
+#endif
+ unsigned int recursion_depth; /* only for search_binary_handler() */
+ struct file * file;
+ struct cred *cred; /* new credentials */
+ int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */
+ unsigned int per_clear; /* bits to clear in current->personality */
+ int argc, envc;
+ const char * filename; /* Name of binary as seen by procps */
+ const char * interp; /* Name of the binary really executed. Most
+ of the time same as filename, but could be
+ different for binfmt_{misc,script} */
+ unsigned interp_flags;
+ unsigned interp_data;
+ unsigned long loader, exec;
+};
+
+#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
+#define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
+
+/* fd of the binary should be passed to the interpreter */
+#define BINPRM_FLAGS_EXECFD_BIT 1
+#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT)
+
+/* filename of the binary will be inaccessible after exec */
+#define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2
+#define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT)
+
+/* Function parameter for binfmt->coredump */
+struct coredump_params {
+ const siginfo_t *siginfo;
+ struct pt_regs *regs;
+ struct file *file;
+ unsigned long limit;
+ unsigned long mm_flags;
+ loff_t written;
+};
+
+/*
+ * This structure defines the functions that are used to load the binary formats that
+ * linux accepts.
+ */
+struct linux_binfmt {
+ struct list_head lh;
+ struct module *module;
+ int (*load_binary)(struct linux_binprm *);
+ int (*load_shlib)(struct file *);
+ int (*core_dump)(struct coredump_params *cprm);
+ unsigned long min_coredump; /* minimal dump size */
+};
+
+extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
+
+/* Registration of default binfmt handlers */
+static inline void register_binfmt(struct linux_binfmt *fmt)
+{
+ __register_binfmt(fmt, 0);
+}
+/* Same as above, but adds a new binfmt at the top of the list */
+static inline void insert_binfmt(struct linux_binfmt *fmt)
+{
+ __register_binfmt(fmt, 1);
+}
+
+extern void unregister_binfmt(struct linux_binfmt *);
+
+extern int prepare_binprm(struct linux_binprm *);
+extern int __must_check remove_arg_zero(struct linux_binprm *);
+extern int search_binary_handler(struct linux_binprm *);
+extern int flush_old_exec(struct linux_binprm * bprm);
+extern void setup_new_exec(struct linux_binprm * bprm);
+extern void would_dump(struct linux_binprm *, struct file *);
+
+extern int suid_dumpable;
+
+/* Stack area protections */
+#define EXSTACK_DEFAULT 0 /* Whatever the arch defaults to */
+#define EXSTACK_DISABLE_X 1 /* Disable executable stacks */
+#define EXSTACK_ENABLE_X 2 /* Enable executable stacks */
+
+extern int setup_arg_pages(struct linux_binprm * bprm,
+ unsigned long stack_top,
+ int executable_stack);
+extern int bprm_change_interp(char *interp, struct linux_binprm *bprm);
+extern int copy_strings_kernel(int argc, const char *const *argv,
+ struct linux_binprm *bprm);
+extern int prepare_bprm_creds(struct linux_binprm *bprm);
+extern void install_exec_creds(struct linux_binprm *bprm);
+extern void set_binfmt(struct linux_binfmt *new);
+extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t);
+
+#endif /* _LINUX_BINFMTS_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
new file mode 100644
index 000000000..bdf6ba5f7
--- /dev/null
+++ b/include/linux/bio.h
@@ -0,0 +1,777 @@
+/*
+ * 2.5 block I/O model
+ *
+ * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public Licens
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
+ */
+#ifndef __LINUX_BIO_H
+#define __LINUX_BIO_H
+
+#include <linux/highmem.h>
+#include <linux/mempool.h>
+#include <linux/ioprio.h>
+#include <linux/bug.h>
+
+#ifdef CONFIG_BLOCK
+
+#include <asm/io.h>
+
+/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
+#include <linux/blk_types.h>
+
+extern int trap_non_toi_io;
+
+#define BIO_DEBUG
+
+#ifdef BIO_DEBUG
+#define BIO_BUG_ON BUG_ON
+#else
+#define BIO_BUG_ON
+#endif
+
+#define BIO_MAX_PAGES 256
+#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
+#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
+
+/*
+ * upper 16 bits of bi_rw define the io priority of this bio
+ */
+#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS)
+#define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT)
+#define bio_prio_valid(bio) ioprio_valid(bio_prio(bio))
+
+#define bio_set_prio(bio, prio) do { \
+ WARN_ON(prio >= (1 << IOPRIO_BITS)); \
+ (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \
+ (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \
+} while (0)
+
+/*
+ * various member access, note that bio_data should of course not be used
+ * on highmem page vectors
+ */
+#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx])
+
+#define bvec_iter_page(bvec, iter) \
+ (__bvec_iter_bvec((bvec), (iter))->bv_page)
+
+#define bvec_iter_len(bvec, iter) \
+ min((iter).bi_size, \
+ __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
+
+#define bvec_iter_offset(bvec, iter) \
+ (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
+
+#define bvec_iter_bvec(bvec, iter) \
+((struct bio_vec) { \
+ .bv_page = bvec_iter_page((bvec), (iter)), \
+ .bv_len = bvec_iter_len((bvec), (iter)), \
+ .bv_offset = bvec_iter_offset((bvec), (iter)), \
+})
+
+#define bio_iter_iovec(bio, iter) \
+ bvec_iter_bvec((bio)->bi_io_vec, (iter))
+
+#define bio_iter_page(bio, iter) \
+ bvec_iter_page((bio)->bi_io_vec, (iter))
+#define bio_iter_len(bio, iter) \
+ bvec_iter_len((bio)->bi_io_vec, (iter))
+#define bio_iter_offset(bio, iter) \
+ bvec_iter_offset((bio)->bi_io_vec, (iter))
+
+#define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
+#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
+#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
+
+#define bio_multiple_segments(bio) \
+ ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
+#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
+#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
+
+/*
+ * Check whether this bio carries any data or not. A NULL bio is allowed.
+ */
+static inline bool bio_has_data(struct bio *bio)
+{
+ if (bio &&
+ bio->bi_iter.bi_size &&
+ !(bio->bi_rw & REQ_DISCARD))
+ return true;
+
+ return false;
+}
+
+static inline bool bio_is_rw(struct bio *bio)
+{
+ if (!bio_has_data(bio))
+ return false;
+
+ if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+ return false;
+
+ return true;
+}
+
+static inline bool bio_mergeable(struct bio *bio)
+{
+ if (bio->bi_rw & REQ_NOMERGE_FLAGS)
+ return false;
+
+ return true;
+}
+
+static inline unsigned int bio_cur_bytes(struct bio *bio)
+{
+ if (bio_has_data(bio))
+ return bio_iovec(bio).bv_len;
+ else /* dataless requests such as discard */
+ return bio->bi_iter.bi_size;
+}
+
+static inline void *bio_data(struct bio *bio)
+{
+ if (bio_has_data(bio))
+ return page_address(bio_page(bio)) + bio_offset(bio);
+
+ return NULL;
+}
+
+/*
+ * will die
+ */
+#define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
+#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
+
+/*
+ * queues that have highmem support enabled may still need to revert to
+ * PIO transfers occasionally and thus map high pages temporarily. For
+ * permanent PIO fall back, user is probably better off disabling highmem
+ * I/O completely on that queue (see ide-dma for example)
+ */
+#define __bio_kmap_atomic(bio, iter) \
+ (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \
+ bio_iter_iovec((bio), (iter)).bv_offset)
+
+#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
+
+/*
+ * merge helpers etc
+ */
+
+/* Default implementation of BIOVEC_PHYS_MERGEABLE */
+#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
+
+/*
+ * allow arch override, for eg virtualized architectures (put in asm/io.h)
+ */
+#ifndef BIOVEC_PHYS_MERGEABLE
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
+#endif
+
+#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
+ (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
+#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
+ __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
+
+/*
+ * Check if adding a bio_vec after bprv with offset would create a gap in
+ * the SG list. Most drivers don't care about this, but some do.
+ */
+static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset)
+{
+ return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1));
+}
+
+#define bio_io_error(bio) bio_endio((bio), -EIO)
+
+/*
+ * drivers should _never_ use the all version - the bio may have been split
+ * before it got to the driver and the driver won't own all of it
+ */
+#define bio_for_each_segment_all(bvl, bio, i) \
+ for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
+
+static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter,
+ unsigned bytes)
+{
+ WARN_ONCE(bytes > iter->bi_size,
+ "Attempted to advance past end of bvec iter\n");
+
+ while (bytes) {
+ unsigned len = min(bytes, bvec_iter_len(bv, *iter));
+
+ bytes -= len;
+ iter->bi_size -= len;
+ iter->bi_bvec_done += len;
+
+ if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
+ iter->bi_bvec_done = 0;
+ iter->bi_idx++;
+ }
+ }
+}
+
+#define for_each_bvec(bvl, bio_vec, iter, start) \
+ for (iter = (start); \
+ (iter).bi_size && \
+ ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
+ bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
+
+
+static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
+ unsigned bytes)
+{
+ iter->bi_sector += bytes >> 9;
+
+ if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
+ iter->bi_size -= bytes;
+ else
+ bvec_iter_advance(bio->bi_io_vec, iter, bytes);
+}
+
+#define __bio_for_each_segment(bvl, bio, iter, start) \
+ for (iter = (start); \
+ (iter).bi_size && \
+ ((bvl = bio_iter_iovec((bio), (iter))), 1); \
+ bio_advance_iter((bio), &(iter), (bvl).bv_len))
+
+#define bio_for_each_segment(bvl, bio, iter) \
+ __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
+
+#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
+
+static inline unsigned bio_segments(struct bio *bio)
+{
+ unsigned segs = 0;
+ struct bio_vec bv;
+ struct bvec_iter iter;
+
+ /*
+ * We special case discard/write same, because they interpret bi_size
+ * differently:
+ */
+
+ if (bio->bi_rw & REQ_DISCARD)
+ return 1;
+
+ if (bio->bi_rw & REQ_WRITE_SAME)
+ return 1;
+
+ bio_for_each_segment(bv, bio, iter)
+ segs++;
+
+ return segs;
+}
+
+/*
+ * get a reference to a bio, so it won't disappear. the intended use is
+ * something like:
+ *
+ * bio_get(bio);
+ * submit_bio(rw, bio);
+ * if (bio->bi_flags ...)
+ * do_something
+ * bio_put(bio);
+ *
+ * without the bio_get(), it could potentially complete I/O before submit_bio
+ * returns. and then bio would be freed memory when if (bio->bi_flags ...)
+ * runs
+ */
+#define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
+
+enum bip_flags {
+ BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
+ BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
+ BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
+ BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
+ BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
+};
+
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+
+static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
+{
+ if (bio->bi_rw & REQ_INTEGRITY)
+ return bio->bi_integrity;
+
+ return NULL;
+}
+
+/*
+ * bio integrity payload
+ */
+struct bio_integrity_payload {
+ struct bio *bip_bio; /* parent bio */
+
+ struct bvec_iter bip_iter;
+
+ bio_end_io_t *bip_end_io; /* saved I/O completion fn */
+
+ unsigned short bip_slab; /* slab the bip came from */
+ unsigned short bip_vcnt; /* # of integrity bio_vecs */
+ unsigned short bip_max_vcnt; /* integrity bio_vec slots */
+ unsigned short bip_flags; /* control flags */
+
+ struct work_struct bip_work; /* I/O completion */
+
+ struct bio_vec *bip_vec;
+ struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
+};
+
+static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
+{
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+
+ if (bip)
+ return bip->bip_flags & flag;
+
+ return false;
+}
+
+static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
+{
+ return bip->bip_iter.bi_sector;
+}
+
+static inline void bip_set_seed(struct bio_integrity_payload *bip,
+ sector_t seed)
+{
+ bip->bip_iter.bi_sector = seed;
+}
+
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
+extern void bio_trim(struct bio *bio, int offset, int size);
+extern struct bio *bio_split(struct bio *bio, int sectors,
+ gfp_t gfp, struct bio_set *bs);
+
+/**
+ * bio_next_split - get next @sectors from a bio, splitting if necessary
+ * @bio: bio to split
+ * @sectors: number of sectors to split from the front of @bio
+ * @gfp: gfp mask
+ * @bs: bio set to allocate from
+ *
+ * Returns a bio representing the next @sectors of @bio - if the bio is smaller
+ * than @sectors, returns the original bio unchanged.
+ */
+static inline struct bio *bio_next_split(struct bio *bio, int sectors,
+ gfp_t gfp, struct bio_set *bs)
+{
+ if (sectors >= bio_sectors(bio))
+ return bio;
+
+ return bio_split(bio, sectors, gfp, bs);
+}
+
+extern struct bio_set *bioset_create(unsigned int, unsigned int);
+extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int);
+extern void bioset_free(struct bio_set *);
+extern mempool_t *biovec_create_pool(int pool_entries);
+
+extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
+extern void bio_put(struct bio *);
+
+extern void __bio_clone_fast(struct bio *, struct bio *);
+extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
+extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
+
+extern struct bio_set *fs_bio_set;
+
+static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
+{
+ return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
+}
+
+static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
+{
+ return bio_clone_bioset(bio, gfp_mask, fs_bio_set);
+}
+
+static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
+{
+ return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
+}
+
+static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
+{
+ return bio_clone_bioset(bio, gfp_mask, NULL);
+
+}
+
+extern void bio_endio(struct bio *, int);
+extern void bio_endio_nodec(struct bio *, int);
+struct request_queue;
+extern int bio_phys_segments(struct request_queue *, struct bio *);
+
+extern int submit_bio_wait(int rw, struct bio *bio);
+extern void bio_advance(struct bio *, unsigned);
+
+extern void bio_init(struct bio *);
+extern void bio_reset(struct bio *);
+void bio_chain(struct bio *, struct bio *);
+
+extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
+extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
+ unsigned int, unsigned int);
+extern int bio_get_nr_vecs(struct block_device *);
+struct rq_map_data;
+extern struct bio *bio_map_user_iov(struct request_queue *,
+ const struct iov_iter *, gfp_t);
+extern void bio_unmap_user(struct bio *);
+extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
+ gfp_t);
+extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
+ gfp_t, int);
+extern void bio_set_pages_dirty(struct bio *bio);
+extern void bio_check_pages_dirty(struct bio *bio);
+
+void generic_start_io_acct(int rw, unsigned long sectors,
+ struct hd_struct *part);
+void generic_end_io_acct(int rw, struct hd_struct *part,
+ unsigned long start_time);
+
+#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
+#endif
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+extern void bio_flush_dcache_pages(struct bio *bi);
+#else
+static inline void bio_flush_dcache_pages(struct bio *bi)
+{
+}
+#endif
+
+extern void bio_copy_data(struct bio *dst, struct bio *src);
+extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
+
+extern struct bio *bio_copy_user_iov(struct request_queue *,
+ struct rq_map_data *,
+ const struct iov_iter *,
+ gfp_t);
+extern int bio_uncopy_user(struct bio *);
+void zero_fill_bio(struct bio *bio);
+extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
+extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
+extern unsigned int bvec_nr_vecs(unsigned short idx);
+
+#ifdef CONFIG_BLK_CGROUP
+int bio_associate_current(struct bio *bio);
+void bio_disassociate_task(struct bio *bio);
+#else /* CONFIG_BLK_CGROUP */
+static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
+static inline void bio_disassociate_task(struct bio *bio) { }
+#endif /* CONFIG_BLK_CGROUP */
+
+#ifdef CONFIG_HIGHMEM
+/*
+ * remember never ever reenable interrupts between a bvec_kmap_irq and
+ * bvec_kunmap_irq!
+ */
+static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
+{
+ unsigned long addr;
+
+ /*
+ * might not be a highmem page, but the preempt/irq count
+ * balancing is a lot nicer this way
+ */
+ local_irq_save(*flags);
+ addr = (unsigned long) kmap_atomic(bvec->bv_page);
+
+ BUG_ON(addr & ~PAGE_MASK);
+
+ return (char *) addr + bvec->bv_offset;
+}
+
+static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
+{
+ unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
+
+ kunmap_atomic((void *) ptr);
+ local_irq_restore(*flags);
+}
+
+#else
+static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
+{
+ return page_address(bvec->bv_page) + bvec->bv_offset;
+}
+
+static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
+{
+ *flags = 0;
+}
+#endif
+
+static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
+ unsigned long *flags)
+{
+ return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
+}
+#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
+
+#define bio_kmap_irq(bio, flags) \
+ __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
+#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
+
+/*
+ * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
+ *
+ * A bio_list anchors a singly-linked list of bios chained through the bi_next
+ * member of the bio. The bio_list also caches the last list member to allow
+ * fast access to the tail.
+ */
+struct bio_list {
+ struct bio *head;
+ struct bio *tail;
+};
+
+static inline int bio_list_empty(const struct bio_list *bl)
+{
+ return bl->head == NULL;
+}
+
+static inline void bio_list_init(struct bio_list *bl)
+{
+ bl->head = bl->tail = NULL;
+}
+
+#define BIO_EMPTY_LIST { NULL, NULL }
+
+#define bio_list_for_each(bio, bl) \
+ for (bio = (bl)->head; bio; bio = bio->bi_next)
+
+static inline unsigned bio_list_size(const struct bio_list *bl)
+{
+ unsigned sz = 0;
+ struct bio *bio;
+
+ bio_list_for_each(bio, bl)
+ sz++;
+
+ return sz;
+}
+
+static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
+{
+ bio->bi_next = NULL;
+
+ if (bl->tail)
+ bl->tail->bi_next = bio;
+ else
+ bl->head = bio;
+
+ bl->tail = bio;
+}
+
+static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
+{
+ bio->bi_next = bl->head;
+
+ bl->head = bio;
+
+ if (!bl->tail)
+ bl->tail = bio;
+}
+
+static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
+{
+ if (!bl2->head)
+ return;
+
+ if (bl->tail)
+ bl->tail->bi_next = bl2->head;
+ else
+ bl->head = bl2->head;
+
+ bl->tail = bl2->tail;
+}
+
+static inline void bio_list_merge_head(struct bio_list *bl,
+ struct bio_list *bl2)
+{
+ if (!bl2->head)
+ return;
+
+ if (bl->head)
+ bl2->tail->bi_next = bl->head;
+ else
+ bl->tail = bl2->tail;
+
+ bl->head = bl2->head;
+}
+
+static inline struct bio *bio_list_peek(struct bio_list *bl)
+{
+ return bl->head;
+}
+
+static inline struct bio *bio_list_pop(struct bio_list *bl)
+{
+ struct bio *bio = bl->head;
+
+ if (bio) {
+ bl->head = bl->head->bi_next;
+ if (!bl->head)
+ bl->tail = NULL;
+
+ bio->bi_next = NULL;
+ }
+
+ return bio;
+}
+
+static inline struct bio *bio_list_get(struct bio_list *bl)
+{
+ struct bio *bio = bl->head;
+
+ bl->head = bl->tail = NULL;
+
+ return bio;
+}
+
+/*
+ * bio_set is used to allow other portions of the IO system to
+ * allocate their own private memory pools for bio and iovec structures.
+ * These memory pools in turn all allocate from the bio_slab
+ * and the bvec_slabs[].
+ */
+#define BIO_POOL_SIZE 2
+#define BIOVEC_NR_POOLS 6
+#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1)
+
+struct bio_set {
+ struct kmem_cache *bio_slab;
+ unsigned int front_pad;
+
+ mempool_t *bio_pool;
+ mempool_t *bvec_pool;
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+ mempool_t *bio_integrity_pool;
+ mempool_t *bvec_integrity_pool;
+#endif
+
+ /*
+ * Deadlock avoidance for stacking block drivers: see comments in
+ * bio_alloc_bioset() for details
+ */
+ spinlock_t rescue_lock;
+ struct bio_list rescue_list;
+ struct work_struct rescue_work;
+ struct workqueue_struct *rescue_workqueue;
+};
+
+struct biovec_slab {
+ int nr_vecs;
+ char *name;
+ struct kmem_cache *slab;
+};
+
+/*
+ * a small number of entries is fine, not going to be performance critical.
+ * basically we just need to survive
+ */
+#define BIO_SPLIT_ENTRIES 2
+
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+
+#define bip_for_each_vec(bvl, bip, iter) \
+ for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
+
+#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
+ for_each_bio(_bio) \
+ bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
+
+extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
+extern void bio_integrity_free(struct bio *);
+extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
+extern bool bio_integrity_enabled(struct bio *bio);
+extern int bio_integrity_prep(struct bio *);
+extern void bio_integrity_endio(struct bio *, int);
+extern void bio_integrity_advance(struct bio *, unsigned int);
+extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
+extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
+extern int bioset_integrity_create(struct bio_set *, int);
+extern void bioset_integrity_free(struct bio_set *);
+extern void bio_integrity_init(void);
+
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+
+static inline void *bio_integrity(struct bio *bio)
+{
+ return NULL;
+}
+
+static inline bool bio_integrity_enabled(struct bio *bio)
+{
+ return false;
+}
+
+static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
+{
+ return 0;
+}
+
+static inline void bioset_integrity_free (struct bio_set *bs)
+{
+ return;
+}
+
+static inline int bio_integrity_prep(struct bio *bio)
+{
+ return 0;
+}
+
+static inline void bio_integrity_free(struct bio *bio)
+{
+ return;
+}
+
+static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
+ gfp_t gfp_mask)
+{
+ return 0;
+}
+
+static inline void bio_integrity_advance(struct bio *bio,
+ unsigned int bytes_done)
+{
+ return;
+}
+
+static inline void bio_integrity_trim(struct bio *bio, unsigned int offset,
+ unsigned int sectors)
+{
+ return;
+}
+
+static inline void bio_integrity_init(void)
+{
+ return;
+}
+
+static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
+{
+ return false;
+}
+
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
+#endif /* CONFIG_BLOCK */
+#endif /* __LINUX_BIO_H */
diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h
new file mode 100644
index 000000000..3b5bafce4
--- /dev/null
+++ b/include/linux/bit_spinlock.h
@@ -0,0 +1,100 @@
+#ifndef __LINUX_BIT_SPINLOCK_H
+#define __LINUX_BIT_SPINLOCK_H
+
+#include <linux/kernel.h>
+#include <linux/preempt.h>
+#include <linux/atomic.h>
+#include <linux/bug.h>
+
+/*
+ * bit-based spin_lock()
+ *
+ * Don't use this unless you really need to: spin_lock() and spin_unlock()
+ * are significantly faster.
+ */
+static inline void bit_spin_lock(int bitnum, unsigned long *addr)
+{
+ /*
+ * Assuming the lock is uncontended, this never enters
+ * the body of the outer loop. If it is contended, then
+ * within the inner loop a non-atomic test is used to
+ * busywait with less bus contention for a good time to
+ * attempt to acquire the lock bit.
+ */
+ preempt_disable();
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+ while (unlikely(test_and_set_bit_lock(bitnum, addr))) {
+ preempt_enable();
+ do {
+ cpu_relax();
+ } while (test_bit(bitnum, addr));
+ preempt_disable();
+ }
+#endif
+ __acquire(bitlock);
+}
+
+/*
+ * Return true if it was acquired
+ */
+static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
+{
+ preempt_disable();
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+ if (unlikely(test_and_set_bit_lock(bitnum, addr))) {
+ preempt_enable();
+ return 0;
+ }
+#endif
+ __acquire(bitlock);
+ return 1;
+}
+
+/*
+ * bit-based spin_unlock()
+ */
+static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ BUG_ON(!test_bit(bitnum, addr));
+#endif
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+ clear_bit_unlock(bitnum, addr);
+#endif
+ preempt_enable();
+ __release(bitlock);
+}
+
+/*
+ * bit-based spin_unlock()
+ * non-atomic version, which can be used eg. if the bit lock itself is
+ * protecting the rest of the flags in the word.
+ */
+static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ BUG_ON(!test_bit(bitnum, addr));
+#endif
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+ __clear_bit_unlock(bitnum, addr);
+#endif
+ preempt_enable();
+ __release(bitlock);
+}
+
+/*
+ * Return true if the lock is held.
+ */
+static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
+{
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+ return test_bit(bitnum, addr);
+#elif defined CONFIG_PREEMPT_COUNT
+ return preempt_count();
+#else
+ return 1;
+#endif
+}
+
+#endif /* __LINUX_BIT_SPINLOCK_H */
+
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
new file mode 100644
index 000000000..ea17cca9e
--- /dev/null
+++ b/include/linux/bitmap.h
@@ -0,0 +1,331 @@
+#ifndef __LINUX_BITMAP_H
+#define __LINUX_BITMAP_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+/*
+ * bitmaps provide bit arrays that consume one or more unsigned
+ * longs. The bitmap interface and available operations are listed
+ * here, in bitmap.h
+ *
+ * Function implementations generic to all architectures are in
+ * lib/bitmap.c. Functions implementations that are architecture
+ * specific are in various include/asm-<arch>/bitops.h headers
+ * and other arch/<arch> specific files.
+ *
+ * See lib/bitmap.c for more details.
+ */
+
+/*
+ * The available bitmap operations and their rough meaning in the
+ * case that the bitmap is a single unsigned long are thus:
+ *
+ * Note that nbits should be always a compile time evaluable constant.
+ * Otherwise many inlines will generate horrible code.
+ *
+ * bitmap_zero(dst, nbits) *dst = 0UL
+ * bitmap_fill(dst, nbits) *dst = ~0UL
+ * bitmap_copy(dst, src, nbits) *dst = *src
+ * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2
+ * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2
+ * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2
+ * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2)
+ * bitmap_complement(dst, src, nbits) *dst = ~(*src)
+ * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal?
+ * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap?
+ * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2?
+ * bitmap_empty(src, nbits) Are all bits zero in *src?
+ * bitmap_full(src, nbits) Are all bits set in *src?
+ * bitmap_weight(src, nbits) Hamming Weight: number set bits
+ * bitmap_set(dst, pos, nbits) Set specified bit area
+ * bitmap_clear(dst, pos, nbits) Clear specified bit area
+ * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
+ * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
+ * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
+ * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
+ * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
+ * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
+ * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
+ * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
+ * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
+ * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
+ * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
+ * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
+ * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
+ * bitmap_release_region(bitmap, pos, order) Free specified bit region
+ * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
+ */
+
+/*
+ * Also the following operations in asm/bitops.h apply to bitmaps.
+ *
+ * set_bit(bit, addr) *addr |= bit
+ * clear_bit(bit, addr) *addr &= ~bit
+ * change_bit(bit, addr) *addr ^= bit
+ * test_bit(bit, addr) Is bit set in *addr?
+ * test_and_set_bit(bit, addr) Set bit and return old value
+ * test_and_clear_bit(bit, addr) Clear bit and return old value
+ * test_and_change_bit(bit, addr) Change bit and return old value
+ * find_first_zero_bit(addr, nbits) Position first zero bit in *addr
+ * find_first_bit(addr, nbits) Position first set bit in *addr
+ * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
+ * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
+ */
+
+/*
+ * The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used
+ * to declare an array named 'name' of just enough unsigned longs to
+ * contain all bit positions from 0 to 'bits' - 1.
+ */
+
+/*
+ * lib/bitmap.c provides these functions:
+ */
+
+extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits);
+extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits);
+extern int __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
+ unsigned int nbits);
+extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits);
+extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits);
+extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+extern int __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+extern int __bitmap_subset(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int nbits);
+extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
+
+extern void bitmap_set(unsigned long *map, unsigned int start, int len);
+extern void bitmap_clear(unsigned long *map, unsigned int start, int len);
+
+extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask,
+ unsigned long align_offset);
+
+/**
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @align_mask: Alignment mask for zero area
+ *
+ * The @align_mask should be one less than a power of 2; the effect is that
+ * the bit offset of all zero areas this function finds is multiples of that
+ * power of 2. A @align_mask of 0 means no alignment is required.
+ */
+static inline unsigned long
+bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask)
+{
+ return bitmap_find_next_zero_area_off(map, size, start, nr,
+ align_mask, 0);
+}
+
+extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
+ unsigned long *dst, int nbits);
+extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
+ unsigned long *dst, int nbits);
+extern int bitmap_parselist(const char *buf, unsigned long *maskp,
+ int nmaskbits);
+extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
+ unsigned long *dst, int nbits);
+extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
+ const unsigned long *old, const unsigned long *new, unsigned int nbits);
+extern int bitmap_bitremap(int oldbit,
+ const unsigned long *old, const unsigned long *new, int bits);
+extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
+ const unsigned long *relmap, unsigned int bits);
+extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
+ unsigned int sz, unsigned int nbits);
+extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
+extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
+extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
+#ifdef __BIG_ENDIAN
+extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
+#else
+#define bitmap_copy_le bitmap_copy
+#endif
+extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits);
+extern int bitmap_print_to_pagebuf(bool list, char *buf,
+ const unsigned long *maskp, int nmaskbits);
+
+#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
+#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
+
+#define small_const_nbits(nbits) \
+ (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
+
+static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ *dst = 0UL;
+ else {
+ unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memset(dst, 0, len);
+ }
+}
+
+static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
+{
+ unsigned int nlongs = BITS_TO_LONGS(nbits);
+ if (!small_const_nbits(nbits)) {
+ unsigned int len = (nlongs - 1) * sizeof(unsigned long);
+ memset(dst, 0xff, len);
+ }
+ dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
+}
+
+static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
+ unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ *dst = *src;
+ else {
+ unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
+ memcpy(dst, src, len);
+ }
+}
+
+static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0;
+ return __bitmap_and(dst, src1, src2, nbits);
+}
+
+static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ *dst = *src1 | *src2;
+ else
+ __bitmap_or(dst, src1, src2, nbits);
+}
+
+static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ *dst = *src1 ^ *src2;
+ else
+ __bitmap_xor(dst, src1, src2, nbits);
+}
+
+static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
+ return __bitmap_andnot(dst, src1, src2, nbits);
+}
+
+static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
+ unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ *dst = ~(*src);
+ else
+ __bitmap_complement(dst, src, nbits);
+}
+
+static inline int bitmap_equal(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
+ else
+ return __bitmap_equal(src1, src2, nbits);
+}
+
+static inline int bitmap_intersects(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
+ else
+ return __bitmap_intersects(src1, src2, nbits);
+}
+
+static inline int bitmap_subset(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
+ else
+ return __bitmap_subset(src1, src2, nbits);
+}
+
+static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
+{
+ if (small_const_nbits(nbits))
+ return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
+
+ return find_first_bit(src, nbits) == nbits;
+}
+
+static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
+
+ return find_first_zero_bit(src, nbits) == nbits;
+}
+
+static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
+ return __bitmap_weight(src, nbits);
+}
+
+static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, int nbits)
+{
+ if (small_const_nbits(nbits))
+ *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
+ else
+ __bitmap_shift_right(dst, src, shift, nbits);
+}
+
+static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
+ unsigned int shift, unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ *dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits);
+ else
+ __bitmap_shift_left(dst, src, shift, nbits);
+}
+
+static inline int bitmap_parse(const char *buf, unsigned int buflen,
+ unsigned long *maskp, int nmaskbits)
+{
+ return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __LINUX_BITMAP_H */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
new file mode 100644
index 000000000..297f5bda4
--- /dev/null
+++ b/include/linux/bitops.h
@@ -0,0 +1,230 @@
+#ifndef _LINUX_BITOPS_H
+#define _LINUX_BITOPS_H
+#include <asm/types.h>
+
+#ifdef __KERNEL__
+#define BIT(nr) (1UL << (nr))
+#define BIT_ULL(nr) (1ULL << (nr))
+#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
+#define BITS_PER_BYTE 8
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#endif
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(h, l) \
+ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+extern unsigned int __sw_hweight8(unsigned int w);
+extern unsigned int __sw_hweight16(unsigned int w);
+extern unsigned int __sw_hweight32(unsigned int w);
+extern unsigned long __sw_hweight64(__u64 w);
+
+/*
+ * Include this here because some architectures need generic_ffs/fls in
+ * scope
+ */
+#include <asm/bitops.h>
+
+#define for_each_set_bit(bit, addr, size) \
+ for ((bit) = find_first_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_from(bit, addr, size) \
+ for ((bit) = find_next_bit((addr), (size), (bit)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+#define for_each_clear_bit(bit, addr, size) \
+ for ((bit) = find_first_zero_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_clear_bit() but use bit as value to start with */
+#define for_each_clear_bit_from(bit, addr, size) \
+ for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
+ (bit) < (size); \
+ (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+
+static __inline__ int get_bitmask_order(unsigned int count)
+{
+ int order;
+
+ order = fls(count);
+ return order; /* We could be slightly more clever with -1 here... */
+}
+
+static __inline__ int get_count_order(unsigned int count)
+{
+ int order;
+
+ order = fls(count) - 1;
+ if (count & (count - 1))
+ order++;
+ return order;
+}
+
+static inline unsigned long hweight_long(unsigned long w)
+{
+ return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
+}
+
+/**
+ * rol64 - rotate a 64-bit value left
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline __u64 rol64(__u64 word, unsigned int shift)
+{
+ return (word << shift) | (word >> (64 - shift));
+}
+
+/**
+ * ror64 - rotate a 64-bit value right
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline __u64 ror64(__u64 word, unsigned int shift)
+{
+ return (word >> shift) | (word << (64 - shift));
+}
+
+/**
+ * rol32 - rotate a 32-bit value left
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline __u32 rol32(__u32 word, unsigned int shift)
+{
+ return (word << shift) | (word >> (32 - shift));
+}
+
+/**
+ * ror32 - rotate a 32-bit value right
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline __u32 ror32(__u32 word, unsigned int shift)
+{
+ return (word >> shift) | (word << (32 - shift));
+}
+
+/**
+ * rol16 - rotate a 16-bit value left
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline __u16 rol16(__u16 word, unsigned int shift)
+{
+ return (word << shift) | (word >> (16 - shift));
+}
+
+/**
+ * ror16 - rotate a 16-bit value right
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline __u16 ror16(__u16 word, unsigned int shift)
+{
+ return (word >> shift) | (word << (16 - shift));
+}
+
+/**
+ * rol8 - rotate an 8-bit value left
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline __u8 rol8(__u8 word, unsigned int shift)
+{
+ return (word << shift) | (word >> (8 - shift));
+}
+
+/**
+ * ror8 - rotate an 8-bit value right
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline __u8 ror8(__u8 word, unsigned int shift)
+{
+ return (word >> shift) | (word << (8 - shift));
+}
+
+/**
+ * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
+ * @value: value to sign extend
+ * @index: 0 based bit index (0<=index<32) to sign bit
+ */
+static inline __s32 sign_extend32(__u32 value, int index)
+{
+ __u8 shift = 31 - index;
+ return (__s32)(value << shift) >> shift;
+}
+
+static inline unsigned fls_long(unsigned long l)
+{
+ if (sizeof(l) == 4)
+ return fls(l);
+ return fls64(l);
+}
+
+/**
+ * __ffs64 - find first set bit in a 64 bit word
+ * @word: The 64 bit word
+ *
+ * On 64 bit arches this is a synomyn for __ffs
+ * The result is not defined if no bits are set, so check that @word
+ * is non-zero before calling this.
+ */
+static inline unsigned long __ffs64(u64 word)
+{
+#if BITS_PER_LONG == 32
+ if (((u32)word) == 0UL)
+ return __ffs((u32)(word >> 32)) + 32;
+#elif BITS_PER_LONG != 64
+#error BITS_PER_LONG not 32 or 64
+#endif
+ return __ffs((unsigned long)word);
+}
+
+#ifdef __KERNEL__
+
+#ifndef set_mask_bits
+#define set_mask_bits(ptr, _mask, _bits) \
+({ \
+ const typeof(*ptr) mask = (_mask), bits = (_bits); \
+ typeof(*ptr) old, new; \
+ \
+ do { \
+ old = ACCESS_ONCE(*ptr); \
+ new = (old & ~mask) | bits; \
+ } while (cmpxchg(ptr, old, new) != old); \
+ \
+ new; \
+})
+#endif
+
+#ifndef find_last_bit
+/**
+ * find_last_bit - find the last set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The number of bits to search
+ *
+ * Returns the bit number of the last set bit, or size.
+ */
+extern unsigned long find_last_bit(const unsigned long *addr,
+ unsigned long size);
+#endif
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
new file mode 100644
index 000000000..fb790b844
--- /dev/null
+++ b/include/linux/bitrev.h
@@ -0,0 +1,85 @@
+#ifndef _LINUX_BITREV_H
+#define _LINUX_BITREV_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_HAVE_ARCH_BITREVERSE
+#include <asm/bitrev.h>
+
+#define __bitrev32 __arch_bitrev32
+#define __bitrev16 __arch_bitrev16
+#define __bitrev8 __arch_bitrev8
+
+#else
+extern u8 const byte_rev_table[256];
+static inline u8 __bitrev8(u8 byte)
+{
+ return byte_rev_table[byte];
+}
+
+static inline u16 __bitrev16(u16 x)
+{
+ return (__bitrev8(x & 0xff) << 8) | __bitrev8(x >> 8);
+}
+
+static inline u32 __bitrev32(u32 x)
+{
+ return (__bitrev16(x & 0xffff) << 16) | __bitrev16(x >> 16);
+}
+
+#endif /* CONFIG_HAVE_ARCH_BITREVERSE */
+
+#define __constant_bitrev32(x) \
+({ \
+ u32 __x = x; \
+ __x = (__x >> 16) | (__x << 16); \
+ __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \
+ __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
+ __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
+ __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
+ __x; \
+})
+
+#define __constant_bitrev16(x) \
+({ \
+ u16 __x = x; \
+ __x = (__x >> 8) | (__x << 8); \
+ __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \
+ __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \
+ __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \
+ __x; \
+})
+
+#define __constant_bitrev8(x) \
+({ \
+ u8 __x = x; \
+ __x = (__x >> 4) | (__x << 4); \
+ __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \
+ __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \
+ __x; \
+})
+
+#define bitrev32(x) \
+({ \
+ u32 __x = x; \
+ __builtin_constant_p(__x) ? \
+ __constant_bitrev32(__x) : \
+ __bitrev32(__x); \
+})
+
+#define bitrev16(x) \
+({ \
+ u16 __x = x; \
+ __builtin_constant_p(__x) ? \
+ __constant_bitrev16(__x) : \
+ __bitrev16(__x); \
+ })
+
+#define bitrev8(x) \
+({ \
+ u8 __x = x; \
+ __builtin_constant_p(__x) ? \
+ __constant_bitrev8(__x) : \
+ __bitrev8(__x) ; \
+ })
+#endif /* _LINUX_BITREV_H */
diff --git a/include/linux/blk-iopoll.h b/include/linux/blk-iopoll.h
new file mode 100644
index 000000000..77ae77c0b
--- /dev/null
+++ b/include/linux/blk-iopoll.h
@@ -0,0 +1,46 @@
+#ifndef BLK_IOPOLL_H
+#define BLK_IOPOLL_H
+
+struct blk_iopoll;
+typedef int (blk_iopoll_fn)(struct blk_iopoll *, int);
+
+struct blk_iopoll {
+ struct list_head list;
+ unsigned long state;
+ unsigned long data;
+ int weight;
+ int max;
+ blk_iopoll_fn *poll;
+};
+
+enum {
+ IOPOLL_F_SCHED = 0,
+ IOPOLL_F_DISABLE = 1,
+};
+
+/*
+ * Returns 0 if we successfully set the IOPOLL_F_SCHED bit, indicating
+ * that we were the first to acquire this iop for scheduling. If this iop
+ * is currently disabled, return "failure".
+ */
+static inline int blk_iopoll_sched_prep(struct blk_iopoll *iop)
+{
+ if (!test_bit(IOPOLL_F_DISABLE, &iop->state))
+ return test_and_set_bit(IOPOLL_F_SCHED, &iop->state);
+
+ return 1;
+}
+
+static inline int blk_iopoll_disable_pending(struct blk_iopoll *iop)
+{
+ return test_bit(IOPOLL_F_DISABLE, &iop->state);
+}
+
+extern void blk_iopoll_sched(struct blk_iopoll *);
+extern void blk_iopoll_init(struct blk_iopoll *, int, blk_iopoll_fn *);
+extern void blk_iopoll_complete(struct blk_iopoll *);
+extern void __blk_iopoll_complete(struct blk_iopoll *);
+extern void blk_iopoll_enable(struct blk_iopoll *);
+extern void blk_iopoll_disable(struct blk_iopoll *);
+
+#endif
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
new file mode 100644
index 000000000..2056a99b9
--- /dev/null
+++ b/include/linux/blk-mq.h
@@ -0,0 +1,266 @@
+#ifndef BLK_MQ_H
+#define BLK_MQ_H
+
+#include <linux/blkdev.h>
+
+struct blk_mq_tags;
+struct blk_flush_queue;
+
+struct blk_mq_cpu_notifier {
+ struct list_head list;
+ void *data;
+ int (*notify)(void *data, unsigned long action, unsigned int cpu);
+};
+
+struct blk_mq_ctxmap {
+ unsigned int size;
+ unsigned int bits_per_word;
+ struct blk_align_bitmap *map;
+};
+
+struct blk_mq_hw_ctx {
+ struct {
+ spinlock_t lock;
+ struct list_head dispatch;
+ } ____cacheline_aligned_in_smp;
+
+ unsigned long state; /* BLK_MQ_S_* flags */
+ struct delayed_work run_work;
+ struct delayed_work delay_work;
+ cpumask_var_t cpumask;
+ int next_cpu;
+ int next_cpu_batch;
+
+ unsigned long flags; /* BLK_MQ_F_* flags */
+
+ struct request_queue *queue;
+ struct blk_flush_queue *fq;
+
+ void *driver_data;
+
+ struct blk_mq_ctxmap ctx_map;
+
+ unsigned int nr_ctx;
+ struct blk_mq_ctx **ctxs;
+
+ atomic_t wait_index;
+
+ struct blk_mq_tags *tags;
+
+ unsigned long queued;
+ unsigned long run;
+#define BLK_MQ_MAX_DISPATCH_ORDER 10
+ unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
+
+ unsigned int numa_node;
+ unsigned int queue_num;
+
+ atomic_t nr_active;
+
+ struct blk_mq_cpu_notifier cpu_notifier;
+ struct kobject kobj;
+};
+
+struct blk_mq_tag_set {
+ struct blk_mq_ops *ops;
+ unsigned int nr_hw_queues;
+ unsigned int queue_depth; /* max hw supported */
+ unsigned int reserved_tags;
+ unsigned int cmd_size; /* per-request extra data */
+ int numa_node;
+ unsigned int timeout;
+ unsigned int flags; /* BLK_MQ_F_* */
+ void *driver_data;
+
+ struct blk_mq_tags **tags;
+
+ struct mutex tag_list_lock;
+ struct list_head tag_list;
+};
+
+struct blk_mq_queue_data {
+ struct request *rq;
+ struct list_head *list;
+ bool last;
+};
+
+typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
+typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
+typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
+typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
+typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
+typedef int (init_request_fn)(void *, struct request *, unsigned int,
+ unsigned int, unsigned int);
+typedef void (exit_request_fn)(void *, struct request *, unsigned int,
+ unsigned int);
+
+typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
+ bool);
+
+struct blk_mq_ops {
+ /*
+ * Queue request
+ */
+ queue_rq_fn *queue_rq;
+
+ /*
+ * Map to specific hardware queue
+ */
+ map_queue_fn *map_queue;
+
+ /*
+ * Called on request timeout
+ */
+ timeout_fn *timeout;
+
+ softirq_done_fn *complete;
+
+ /*
+ * Called when the block layer side of a hardware queue has been
+ * set up, allowing the driver to allocate/init matching structures.
+ * Ditto for exit/teardown.
+ */
+ init_hctx_fn *init_hctx;
+ exit_hctx_fn *exit_hctx;
+
+ /*
+ * Called for every command allocated by the block layer to allow
+ * the driver to set up driver specific data.
+ *
+ * Tag greater than or equal to queue_depth is for setting up
+ * flush request.
+ *
+ * Ditto for exit/teardown.
+ */
+ init_request_fn *init_request;
+ exit_request_fn *exit_request;
+};
+
+enum {
+ BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */
+ BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */
+ BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */
+
+ BLK_MQ_F_SHOULD_MERGE = 1 << 0,
+ BLK_MQ_F_TAG_SHARED = 1 << 1,
+ BLK_MQ_F_SG_MERGE = 1 << 2,
+ BLK_MQ_F_SYSFS_UP = 1 << 3,
+ BLK_MQ_F_DEFER_ISSUE = 1 << 4,
+ BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
+ BLK_MQ_F_ALLOC_POLICY_BITS = 1,
+
+ BLK_MQ_S_STOPPED = 0,
+ BLK_MQ_S_TAG_ACTIVE = 1,
+
+ BLK_MQ_MAX_DEPTH = 10240,
+
+ BLK_MQ_CPU_WORK_BATCH = 8,
+};
+#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
+ ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
+ ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
+#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
+ ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
+ << BLK_MQ_F_ALLOC_POLICY_START_BIT)
+
+struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
+struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ struct request_queue *q);
+void blk_mq_finish_init(struct request_queue *q);
+int blk_mq_register_disk(struct gendisk *);
+void blk_mq_unregister_disk(struct gendisk *);
+
+int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
+void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
+
+void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
+
+void blk_mq_insert_request(struct request *, bool, bool, bool);
+void blk_mq_free_request(struct request *rq);
+void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq);
+bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
+ gfp_t gfp, bool reserved);
+struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
+
+enum {
+ BLK_MQ_UNIQUE_TAG_BITS = 16,
+ BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
+};
+
+u32 blk_mq_unique_tag(struct request *rq);
+
+static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
+{
+ return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
+}
+
+static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
+{
+ return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
+}
+
+struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
+struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
+
+int blk_mq_request_started(struct request *rq);
+void blk_mq_start_request(struct request *rq);
+void blk_mq_end_request(struct request *rq, int error);
+void __blk_mq_end_request(struct request *rq, int error);
+
+void blk_mq_requeue_request(struct request *rq);
+void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
+void blk_mq_cancel_requeue_work(struct request_queue *q);
+void blk_mq_kick_requeue_list(struct request_queue *q);
+void blk_mq_abort_requeue_list(struct request_queue *q);
+void blk_mq_complete_request(struct request *rq);
+
+void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
+void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
+void blk_mq_stop_hw_queues(struct request_queue *q);
+void blk_mq_start_hw_queues(struct request_queue *q);
+void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
+void blk_mq_run_hw_queues(struct request_queue *q, bool async);
+void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
+void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
+ void *priv);
+void blk_mq_freeze_queue(struct request_queue *q);
+void blk_mq_unfreeze_queue(struct request_queue *q);
+void blk_mq_freeze_queue_start(struct request_queue *q);
+
+/*
+ * Driver command data is immediately after the request. So subtract request
+ * size to get back to the original request, add request size to get the PDU.
+ */
+static inline struct request *blk_mq_rq_from_pdu(void *pdu)
+{
+ return pdu - sizeof(struct request);
+}
+static inline void *blk_mq_rq_to_pdu(struct request *rq)
+{
+ return rq + 1;
+}
+
+#define queue_for_each_hw_ctx(q, hctx, i) \
+ for ((i) = 0; (i) < (q)->nr_hw_queues && \
+ ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
+
+#define queue_for_each_ctx(q, ctx, i) \
+ for ((i) = 0; (i) < (q)->nr_queues && \
+ ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
+
+#define hctx_for_each_ctx(hctx, ctx, i) \
+ for ((i) = 0; (i) < (hctx)->nr_ctx && \
+ ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
+
+#define blk_ctx_sum(q, sum) \
+({ \
+ struct blk_mq_ctx *__x; \
+ unsigned int __ret = 0, __i; \
+ \
+ queue_for_each_ctx((q), __x, __i) \
+ __ret += sum; \
+ __ret; \
+})
+
+#endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
new file mode 100644
index 000000000..9bd89b1e7
--- /dev/null
+++ b/include/linux/blk_types.h
@@ -0,0 +1,254 @@
+/*
+ * Block data types and constants. Directly include this file only to
+ * break include dependency loop.
+ */
+#ifndef __LINUX_BLK_TYPES_H
+#define __LINUX_BLK_TYPES_H
+
+#include <linux/types.h>
+
+struct bio_set;
+struct bio;
+struct bio_integrity_payload;
+struct page;
+struct block_device;
+struct io_context;
+struct cgroup_subsys_state;
+typedef void (bio_end_io_t) (struct bio *, int);
+typedef void (bio_destructor_t) (struct bio *);
+
+/*
+ * was unsigned short, but we might as well be ready for > 64kB I/O pages
+ */
+struct bio_vec {
+ struct page *bv_page;
+ unsigned int bv_len;
+ unsigned int bv_offset;
+};
+
+#ifdef CONFIG_BLOCK
+
+struct bvec_iter {
+ sector_t bi_sector; /* device address in 512 byte
+ sectors */
+ unsigned int bi_size; /* residual I/O count */
+
+ unsigned int bi_idx; /* current index into bvl_vec */
+
+ unsigned int bi_bvec_done; /* number of bytes completed in
+ current bvec */
+};
+
+/*
+ * main unit of I/O for the block layer and lower layers (ie drivers and
+ * stacking drivers)
+ */
+struct bio {
+ struct bio *bi_next; /* request queue link */
+ struct block_device *bi_bdev;
+ unsigned long bi_flags; /* status, command, etc */
+ unsigned long bi_rw; /* bottom bits READ/WRITE,
+ * top bits priority
+ */
+
+ struct bvec_iter bi_iter;
+
+ /* Number of segments in this BIO after
+ * physical address coalescing is performed.
+ */
+ unsigned int bi_phys_segments;
+
+ /*
+ * To keep track of the max segment size, we account for the
+ * sizes of the first and last mergeable segments in this bio.
+ */
+ unsigned int bi_seg_front_size;
+ unsigned int bi_seg_back_size;
+
+ atomic_t bi_remaining;
+
+ bio_end_io_t *bi_end_io;
+
+ void *bi_private;
+#ifdef CONFIG_BLK_CGROUP
+ /*
+ * Optional ioc and css associated with this bio. Put on bio
+ * release. Read comment on top of bio_associate_current().
+ */
+ struct io_context *bi_ioc;
+ struct cgroup_subsys_state *bi_css;
+#endif
+ union {
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+ struct bio_integrity_payload *bi_integrity; /* data integrity */
+#endif
+ };
+
+ unsigned short bi_vcnt; /* how many bio_vec's */
+
+ /*
+ * Everything starting with bi_max_vecs will be preserved by bio_reset()
+ */
+
+ unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
+
+ atomic_t bi_cnt; /* pin count */
+
+ struct bio_vec *bi_io_vec; /* the actual vec list */
+
+ struct bio_set *bi_pool;
+
+ /*
+ * We can inline a number of vecs at the end of the bio, to avoid
+ * double allocations for a small number of bio_vecs. This member
+ * MUST obviously be kept at the very end of the bio.
+ */
+ struct bio_vec bi_inline_vecs[0];
+};
+
+#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
+
+/*
+ * bio flags
+ */
+#define BIO_UPTODATE 0 /* ok after I/O completion */
+#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
+#define BIO_EOF 2 /* out-out-bounds error */
+#define BIO_SEG_VALID 3 /* bi_phys_segments valid */
+#define BIO_CLONED 4 /* doesn't own data */
+#define BIO_BOUNCED 5 /* bio is a bounce bio */
+#define BIO_USER_MAPPED 6 /* contains user pages */
+#define BIO_EOPNOTSUPP 7 /* not supported */
+#define BIO_NULL_MAPPED 8 /* contains invalid user pages */
+#define BIO_FS_INTEGRITY 9 /* fs owns integrity data, not block layer */
+#define BIO_QUIET 10 /* Make BIO Quiet */
+#define BIO_MAPPED_INTEGRITY 11/* integrity metadata has been remapped */
+#define BIO_SNAP_STABLE 12 /* bio data must be snapshotted during write */
+#define BIO_TOI 13 /* bio is TuxOnIce submitted */
+
+/*
+ * Flags starting here get preserved by bio_reset() - this includes
+ * BIO_POOL_IDX()
+ */
+#define BIO_RESET_BITS 14
+#define BIO_OWNS_VEC 14 /* bio_free() should free bvec */
+
+#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
+
+/*
+ * top 4 bits of bio flags indicate the pool this bio came from
+ */
+#define BIO_POOL_BITS (4)
+#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
+#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
+#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
+#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
+
+#endif /* CONFIG_BLOCK */
+
+/*
+ * Request flags. For use in the cmd_flags field of struct request, and in
+ * bi_rw of struct bio. Note that some flags are only valid in either one.
+ */
+enum rq_flag_bits {
+ /* common flags */
+ __REQ_WRITE, /* not set, read. set, write */
+ __REQ_FAILFAST_DEV, /* no driver retries of device errors */
+ __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
+ __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
+
+ __REQ_SYNC, /* request is sync (sync write or read) */
+ __REQ_META, /* metadata io request */
+ __REQ_PRIO, /* boost priority in cfq */
+ __REQ_DISCARD, /* request to discard sectors */
+ __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
+ __REQ_WRITE_SAME, /* write same block many times */
+
+ __REQ_NOIDLE, /* don't anticipate more IO after this one */
+ __REQ_INTEGRITY, /* I/O includes block integrity payload */
+ __REQ_FUA, /* forced unit access */
+ __REQ_FLUSH, /* request for cache flush */
+
+ /* bio only flags */
+ __REQ_RAHEAD, /* read ahead, can fail anytime */
+ __REQ_THROTTLED, /* This bio has already been subjected to
+ * throttling rules. Don't do it again. */
+
+ /* request only flags */
+ __REQ_SORTED, /* elevator knows about this request */
+ __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
+ __REQ_NOMERGE, /* don't touch this for merging */
+ __REQ_STARTED, /* drive already may have started this one */
+ __REQ_DONTPREP, /* don't call prep for this one */
+ __REQ_QUEUED, /* uses queueing */
+ __REQ_ELVPRIV, /* elevator private data attached */
+ __REQ_FAILED, /* set if the request failed */
+ __REQ_QUIET, /* don't worry about errors */
+ __REQ_PREEMPT, /* set for "ide_preempt" requests and also
+ for requests for which the SCSI "quiesce"
+ state must be ignored. */
+ __REQ_ALLOCED, /* request came from our alloc pool */
+ __REQ_COPY_USER, /* contains copies of user pages */
+ __REQ_FLUSH_SEQ, /* request for flush sequence */
+ __REQ_IO_STAT, /* account I/O stat */
+ __REQ_MIXED_MERGE, /* merge of different types, fail separately */
+ __REQ_PM, /* runtime pm request */
+ __REQ_HASHED, /* on IO scheduler merge hash */
+ __REQ_MQ_INFLIGHT, /* track inflight for MQ */
+ __REQ_NO_TIMEOUT, /* requests may never expire */
+ __REQ_NR_BITS, /* stops here */
+};
+
+#define REQ_WRITE (1ULL << __REQ_WRITE)
+#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
+#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
+#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
+#define REQ_SYNC (1ULL << __REQ_SYNC)
+#define REQ_META (1ULL << __REQ_META)
+#define REQ_PRIO (1ULL << __REQ_PRIO)
+#define REQ_DISCARD (1ULL << __REQ_DISCARD)
+#define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME)
+#define REQ_NOIDLE (1ULL << __REQ_NOIDLE)
+#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
+
+#define REQ_FAILFAST_MASK \
+ (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
+#define REQ_COMMON_MASK \
+ (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
+ REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
+ REQ_SECURE | REQ_INTEGRITY)
+#define REQ_CLONE_MASK REQ_COMMON_MASK
+
+#define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME)
+
+/* This mask is used for both bio and request merge checking */
+#define REQ_NOMERGE_FLAGS \
+ (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ)
+
+#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
+#define REQ_THROTTLED (1ULL << __REQ_THROTTLED)
+
+#define REQ_SORTED (1ULL << __REQ_SORTED)
+#define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER)
+#define REQ_FUA (1ULL << __REQ_FUA)
+#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
+#define REQ_STARTED (1ULL << __REQ_STARTED)
+#define REQ_DONTPREP (1ULL << __REQ_DONTPREP)
+#define REQ_QUEUED (1ULL << __REQ_QUEUED)
+#define REQ_ELVPRIV (1ULL << __REQ_ELVPRIV)
+#define REQ_FAILED (1ULL << __REQ_FAILED)
+#define REQ_QUIET (1ULL << __REQ_QUIET)
+#define REQ_PREEMPT (1ULL << __REQ_PREEMPT)
+#define REQ_ALLOCED (1ULL << __REQ_ALLOCED)
+#define REQ_COPY_USER (1ULL << __REQ_COPY_USER)
+#define REQ_FLUSH (1ULL << __REQ_FLUSH)
+#define REQ_FLUSH_SEQ (1ULL << __REQ_FLUSH_SEQ)
+#define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
+#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
+#define REQ_SECURE (1ULL << __REQ_SECURE)
+#define REQ_PM (1ULL << __REQ_PM)
+#define REQ_HASHED (1ULL << __REQ_HASHED)
+#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
+#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT)
+
+#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
new file mode 100644
index 000000000..5d93a6645
--- /dev/null
+++ b/include/linux/blkdev.h
@@ -0,0 +1,1673 @@
+#ifndef _LINUX_BLKDEV_H
+#define _LINUX_BLKDEV_H
+
+#include <linux/sched.h>
+
+#ifdef CONFIG_BLOCK
+
+#include <linux/major.h>
+#include <linux/genhd.h>
+#include <linux/list.h>
+#include <linux/llist.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/pagemap.h>
+#include <linux/backing-dev.h>
+#include <linux/wait.h>
+#include <linux/mempool.h>
+#include <linux/bio.h>
+#include <linux/stringify.h>
+#include <linux/gfp.h>
+#include <linux/bsg.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/percpu-refcount.h>
+
+#include <asm/scatterlist.h>
+
+struct module;
+struct scsi_ioctl_command;
+
+struct request_queue;
+struct elevator_queue;
+struct request_pm_state;
+struct blk_trace;
+struct request;
+struct sg_io_hdr;
+struct bsg_job;
+struct blkcg_gq;
+struct blk_flush_queue;
+
+#define BLKDEV_MIN_RQ 4
+#define BLKDEV_MAX_RQ 128 /* Default maximum */
+
+/*
+ * Maximum number of blkcg policies allowed to be registered concurrently.
+ * Defined here to simplify include dependency.
+ */
+#define BLKCG_MAX_POLS 2
+
+struct request;
+typedef void (rq_end_io_fn)(struct request *, int);
+
+#define BLK_RL_SYNCFULL (1U << 0)
+#define BLK_RL_ASYNCFULL (1U << 1)
+
+struct request_list {
+ struct request_queue *q; /* the queue this rl belongs to */
+#ifdef CONFIG_BLK_CGROUP
+ struct blkcg_gq *blkg; /* blkg this request pool belongs to */
+#endif
+ /*
+ * count[], starved[], and wait[] are indexed by
+ * BLK_RW_SYNC/BLK_RW_ASYNC
+ */
+ int count[2];
+ int starved[2];
+ mempool_t *rq_pool;
+ wait_queue_head_t wait[2];
+ unsigned int flags;
+};
+
+/*
+ * request command types
+ */
+enum rq_cmd_type_bits {
+ REQ_TYPE_FS = 1, /* fs request */
+ REQ_TYPE_BLOCK_PC, /* scsi command */
+ REQ_TYPE_SENSE, /* sense request */
+ REQ_TYPE_PM_SUSPEND, /* suspend request */
+ REQ_TYPE_PM_RESUME, /* resume request */
+ REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
+ REQ_TYPE_SPECIAL, /* driver defined type */
+ /*
+ * for ATA/ATAPI devices. this really doesn't belong here, ide should
+ * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
+ * private REQ_LB opcodes to differentiate what type of request this is
+ */
+ REQ_TYPE_ATA_TASKFILE,
+ REQ_TYPE_ATA_PC,
+};
+
+#define BLK_MAX_CDB 16
+
+/*
+ * Try to put the fields that are referenced together in the same cacheline.
+ *
+ * If you modify this structure, make sure to update blk_rq_init() and
+ * especially blk_mq_rq_ctx_init() to take care of the added fields.
+ */
+struct request {
+ struct list_head queuelist;
+ union {
+ struct call_single_data csd;
+ unsigned long fifo_time;
+ };
+
+ struct request_queue *q;
+ struct blk_mq_ctx *mq_ctx;
+
+ u64 cmd_flags;
+ enum rq_cmd_type_bits cmd_type;
+ unsigned long atomic_flags;
+
+ int cpu;
+
+ /* the following two fields are internal, NEVER access directly */
+ unsigned int __data_len; /* total data len */
+ sector_t __sector; /* sector cursor */
+
+ struct bio *bio;
+ struct bio *biotail;
+
+ /*
+ * The hash is used inside the scheduler, and killed once the
+ * request reaches the dispatch list. The ipi_list is only used
+ * to queue the request for softirq completion, which is long
+ * after the request has been unhashed (and even removed from
+ * the dispatch list).
+ */
+ union {
+ struct hlist_node hash; /* merge hash */
+ struct list_head ipi_list;
+ };
+
+ /*
+ * The rb_node is only used inside the io scheduler, requests
+ * are pruned when moved to the dispatch queue. So let the
+ * completion_data share space with the rb_node.
+ */
+ union {
+ struct rb_node rb_node; /* sort/lookup */
+ void *completion_data;
+ };
+
+ /*
+ * Three pointers are available for the IO schedulers, if they need
+ * more they have to dynamically allocate it. Flush requests are
+ * never put on the IO scheduler. So let the flush fields share
+ * space with the elevator data.
+ */
+ union {
+ struct {
+ struct io_cq *icq;
+ void *priv[2];
+ } elv;
+
+ struct {
+ unsigned int seq;
+ struct list_head list;
+ rq_end_io_fn *saved_end_io;
+ } flush;
+ };
+
+ struct gendisk *rq_disk;
+ struct hd_struct *part;
+ unsigned long start_time;
+#ifdef CONFIG_BLK_CGROUP
+ struct request_list *rl; /* rl this rq is alloced from */
+ unsigned long long start_time_ns;
+ unsigned long long io_start_time_ns; /* when passed to hardware */
+#endif
+ /* Number of scatter-gather DMA addr+len pairs after
+ * physical address coalescing is performed.
+ */
+ unsigned short nr_phys_segments;
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+ unsigned short nr_integrity_segments;
+#endif
+
+ unsigned short ioprio;
+
+ void *special; /* opaque pointer available for LLD use */
+
+ int tag;
+ int errors;
+
+ /*
+ * when request is used as a packet command carrier
+ */
+ unsigned char __cmd[BLK_MAX_CDB];
+ unsigned char *cmd;
+ unsigned short cmd_len;
+
+ unsigned int extra_len; /* length of alignment and padding */
+ unsigned int sense_len;
+ unsigned int resid_len; /* residual count */
+ void *sense;
+
+ unsigned long deadline;
+ struct list_head timeout_list;
+ unsigned int timeout;
+ int retries;
+
+ /*
+ * completion callback.
+ */
+ rq_end_io_fn *end_io;
+ void *end_io_data;
+
+ /* for bidi */
+ struct request *next_rq;
+};
+
+static inline unsigned short req_get_ioprio(struct request *req)
+{
+ return req->ioprio;
+}
+
+/*
+ * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
+ * requests. Some step values could eventually be made generic.
+ */
+struct request_pm_state
+{
+ /* PM state machine step value, currently driver specific */
+ int pm_step;
+ /* requested PM state value (S1, S2, S3, S4, ...) */
+ u32 pm_state;
+ void* data; /* for driver use */
+};
+
+#include <linux/elevator.h>
+
+struct blk_queue_ctx;
+
+typedef void (request_fn_proc) (struct request_queue *q);
+typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
+typedef int (prep_rq_fn) (struct request_queue *, struct request *);
+typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
+
+struct bio_vec;
+struct bvec_merge_data {
+ struct block_device *bi_bdev;
+ sector_t bi_sector;
+ unsigned bi_size;
+ unsigned long bi_rw;
+};
+typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
+ struct bio_vec *);
+typedef void (softirq_done_fn)(struct request *);
+typedef int (dma_drain_needed_fn)(struct request *);
+typedef int (lld_busy_fn) (struct request_queue *q);
+typedef int (bsg_job_fn) (struct bsg_job *);
+
+enum blk_eh_timer_return {
+ BLK_EH_NOT_HANDLED,
+ BLK_EH_HANDLED,
+ BLK_EH_RESET_TIMER,
+};
+
+typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
+
+enum blk_queue_state {
+ Queue_down,
+ Queue_up,
+};
+
+struct blk_queue_tag {
+ struct request **tag_index; /* map of busy tags */
+ unsigned long *tag_map; /* bit map of free/busy tags */
+ int busy; /* current depth */
+ int max_depth; /* what we will send to device */
+ int real_max_depth; /* what the array can hold */
+ atomic_t refcnt; /* map can be shared */
+ int alloc_policy; /* tag allocation policy */
+ int next_tag; /* next tag */
+};
+#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
+#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
+
+#define BLK_SCSI_MAX_CMDS (256)
+#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
+
+struct queue_limits {
+ unsigned long bounce_pfn;
+ unsigned long seg_boundary_mask;
+
+ unsigned int max_hw_sectors;
+ unsigned int chunk_sectors;
+ unsigned int max_sectors;
+ unsigned int max_segment_size;
+ unsigned int physical_block_size;
+ unsigned int alignment_offset;
+ unsigned int io_min;
+ unsigned int io_opt;
+ unsigned int max_discard_sectors;
+ unsigned int max_write_same_sectors;
+ unsigned int discard_granularity;
+ unsigned int discard_alignment;
+
+ unsigned short logical_block_size;
+ unsigned short max_segments;
+ unsigned short max_integrity_segments;
+
+ unsigned char misaligned;
+ unsigned char discard_misaligned;
+ unsigned char cluster;
+ unsigned char discard_zeroes_data;
+ unsigned char raid_partial_stripes_expensive;
+};
+
+struct request_queue {
+ /*
+ * Together with queue_head for cacheline sharing
+ */
+ struct list_head queue_head;
+ struct request *last_merge;
+ struct elevator_queue *elevator;
+ int nr_rqs[2]; /* # allocated [a]sync rqs */
+ int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
+
+ /*
+ * If blkcg is not used, @q->root_rl serves all requests. If blkcg
+ * is used, root blkg allocates from @q->root_rl and all other
+ * blkgs from their own blkg->rl. Which one to use should be
+ * determined using bio_request_list().
+ */
+ struct request_list root_rl;
+
+ request_fn_proc *request_fn;
+ make_request_fn *make_request_fn;
+ prep_rq_fn *prep_rq_fn;
+ unprep_rq_fn *unprep_rq_fn;
+ merge_bvec_fn *merge_bvec_fn;
+ softirq_done_fn *softirq_done_fn;
+ rq_timed_out_fn *rq_timed_out_fn;
+ dma_drain_needed_fn *dma_drain_needed;
+ lld_busy_fn *lld_busy_fn;
+
+ struct blk_mq_ops *mq_ops;
+
+ unsigned int *mq_map;
+
+ /* sw queues */
+ struct blk_mq_ctx __percpu *queue_ctx;
+ unsigned int nr_queues;
+
+ /* hw dispatch queues */
+ struct blk_mq_hw_ctx **queue_hw_ctx;
+ unsigned int nr_hw_queues;
+
+ /*
+ * Dispatch queue sorting
+ */
+ sector_t end_sector;
+ struct request *boundary_rq;
+
+ /*
+ * Delayed queue handling
+ */
+ struct delayed_work delay_work;
+
+ struct backing_dev_info backing_dev_info;
+
+ /*
+ * The queue owner gets to use this for whatever they like.
+ * ll_rw_blk doesn't touch it.
+ */
+ void *queuedata;
+
+ /*
+ * various queue flags, see QUEUE_* below
+ */
+ unsigned long queue_flags;
+
+ /*
+ * ida allocated id for this queue. Used to index queues from
+ * ioctx.
+ */
+ int id;
+
+ /*
+ * queue needs bounce pages for pages above this limit
+ */
+ gfp_t bounce_gfp;
+
+ /*
+ * protects queue structures from reentrancy. ->__queue_lock should
+ * _never_ be used directly, it is queue private. always use
+ * ->queue_lock.
+ */
+ spinlock_t __queue_lock;
+ spinlock_t *queue_lock;
+
+ /*
+ * queue kobject
+ */
+ struct kobject kobj;
+
+ /*
+ * mq queue kobject
+ */
+ struct kobject mq_kobj;
+
+#ifdef CONFIG_PM
+ struct device *dev;
+ int rpm_status;
+ unsigned int nr_pending;
+#endif
+
+ /*
+ * queue settings
+ */
+ unsigned long nr_requests; /* Max # of requests */
+ unsigned int nr_congestion_on;
+ unsigned int nr_congestion_off;
+ unsigned int nr_batching;
+
+ unsigned int dma_drain_size;
+ void *dma_drain_buffer;
+ unsigned int dma_pad_mask;
+ unsigned int dma_alignment;
+
+ struct blk_queue_tag *queue_tags;
+ struct list_head tag_busy_list;
+
+ unsigned int nr_sorted;
+ unsigned int in_flight[2];
+ /*
+ * Number of active block driver functions for which blk_drain_queue()
+ * must wait. Must be incremented around functions that unlock the
+ * queue_lock internally, e.g. scsi_request_fn().
+ */
+ unsigned int request_fn_active;
+
+ unsigned int rq_timeout;
+ struct timer_list timeout;
+ struct list_head timeout_list;
+
+ struct list_head icq_list;
+#ifdef CONFIG_BLK_CGROUP
+ DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
+ struct blkcg_gq *root_blkg;
+ struct list_head blkg_list;
+#endif
+
+ struct queue_limits limits;
+
+ /*
+ * sg stuff
+ */
+ unsigned int sg_timeout;
+ unsigned int sg_reserved_size;
+ int node;
+#ifdef CONFIG_BLK_DEV_IO_TRACE
+ struct blk_trace *blk_trace;
+#endif
+ /*
+ * for flush operations
+ */
+ unsigned int flush_flags;
+ unsigned int flush_not_queueable:1;
+ struct blk_flush_queue *fq;
+
+ struct list_head requeue_list;
+ spinlock_t requeue_lock;
+ struct work_struct requeue_work;
+
+ struct mutex sysfs_lock;
+
+ int bypass_depth;
+ int mq_freeze_depth;
+
+#if defined(CONFIG_BLK_DEV_BSG)
+ bsg_job_fn *bsg_job_fn;
+ int bsg_job_size;
+ struct bsg_class_device bsg_dev;
+#endif
+
+#ifdef CONFIG_BLK_DEV_THROTTLING
+ /* Throttle data */
+ struct throtl_data *td;
+#endif
+ struct rcu_head rcu_head;
+ wait_queue_head_t mq_freeze_wq;
+ struct percpu_ref mq_usage_counter;
+ struct list_head all_q_node;
+
+ struct blk_mq_tag_set *tag_set;
+ struct list_head tag_set_list;
+};
+
+#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
+#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
+#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
+#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
+#define QUEUE_FLAG_DYING 5 /* queue being torn down */
+#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
+#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
+#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
+#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
+#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
+#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
+#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
+#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
+#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
+#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
+#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
+#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
+#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
+#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
+#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
+#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
+#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
+#define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */
+
+#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
+ (1 << QUEUE_FLAG_STACKABLE) | \
+ (1 << QUEUE_FLAG_SAME_COMP) | \
+ (1 << QUEUE_FLAG_ADD_RANDOM))
+
+#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
+ (1 << QUEUE_FLAG_STACKABLE) | \
+ (1 << QUEUE_FLAG_SAME_COMP))
+
+static inline void queue_lockdep_assert_held(struct request_queue *q)
+{
+ if (q->queue_lock)
+ lockdep_assert_held(q->queue_lock);
+}
+
+static inline void queue_flag_set_unlocked(unsigned int flag,
+ struct request_queue *q)
+{
+ __set_bit(flag, &q->queue_flags);
+}
+
+static inline int queue_flag_test_and_clear(unsigned int flag,
+ struct request_queue *q)
+{
+ queue_lockdep_assert_held(q);
+
+ if (test_bit(flag, &q->queue_flags)) {
+ __clear_bit(flag, &q->queue_flags);
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int queue_flag_test_and_set(unsigned int flag,
+ struct request_queue *q)
+{
+ queue_lockdep_assert_held(q);
+
+ if (!test_bit(flag, &q->queue_flags)) {
+ __set_bit(flag, &q->queue_flags);
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
+{
+ queue_lockdep_assert_held(q);
+ __set_bit(flag, &q->queue_flags);
+}
+
+static inline void queue_flag_clear_unlocked(unsigned int flag,
+ struct request_queue *q)
+{
+ __clear_bit(flag, &q->queue_flags);
+}
+
+static inline int queue_in_flight(struct request_queue *q)
+{
+ return q->in_flight[0] + q->in_flight[1];
+}
+
+static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
+{
+ queue_lockdep_assert_held(q);
+ __clear_bit(flag, &q->queue_flags);
+}
+
+#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
+#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
+#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
+#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
+#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
+#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
+#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
+#define blk_queue_noxmerges(q) \
+ test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
+#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
+#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
+#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
+#define blk_queue_stackable(q) \
+ test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
+#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
+#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
+ test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
+
+#define blk_noretry_request(rq) \
+ ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
+ REQ_FAILFAST_DRIVER))
+
+#define blk_account_rq(rq) \
+ (((rq)->cmd_flags & REQ_STARTED) && \
+ ((rq)->cmd_type == REQ_TYPE_FS))
+
+#define blk_pm_request(rq) \
+ ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
+ (rq)->cmd_type == REQ_TYPE_PM_RESUME)
+
+#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
+#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
+/* rq->queuelist of dequeued request must be list_empty() */
+#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
+
+#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
+
+#define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0)
+
+/*
+ * Driver can handle struct request, if it either has an old style
+ * request_fn defined, or is blk-mq based.
+ */
+static inline bool queue_is_rq_based(struct request_queue *q)
+{
+ return q->request_fn || q->mq_ops;
+}
+
+static inline unsigned int blk_queue_cluster(struct request_queue *q)
+{
+ return q->limits.cluster;
+}
+
+/*
+ * We regard a request as sync, if either a read or a sync write
+ */
+static inline bool rw_is_sync(unsigned int rw_flags)
+{
+ return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
+}
+
+static inline bool rq_is_sync(struct request *rq)
+{
+ return rw_is_sync(rq->cmd_flags);
+}
+
+static inline bool blk_rl_full(struct request_list *rl, bool sync)
+{
+ unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
+
+ return rl->flags & flag;
+}
+
+static inline void blk_set_rl_full(struct request_list *rl, bool sync)
+{
+ unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
+
+ rl->flags |= flag;
+}
+
+static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
+{
+ unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
+
+ rl->flags &= ~flag;
+}
+
+static inline bool rq_mergeable(struct request *rq)
+{
+ if (rq->cmd_type != REQ_TYPE_FS)
+ return false;
+
+ if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
+ return false;
+
+ return true;
+}
+
+static inline bool blk_check_merge_flags(unsigned int flags1,
+ unsigned int flags2)
+{
+ if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD))
+ return false;
+
+ if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE))
+ return false;
+
+ if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME))
+ return false;
+
+ return true;
+}
+
+static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
+{
+ if (bio_data(a) == bio_data(b))
+ return true;
+
+ return false;
+}
+
+/*
+ * q->prep_rq_fn return values
+ */
+#define BLKPREP_OK 0 /* serve it */
+#define BLKPREP_KILL 1 /* fatal error, kill */
+#define BLKPREP_DEFER 2 /* leave on queue */
+
+extern unsigned long blk_max_low_pfn, blk_max_pfn;
+
+/*
+ * standard bounce addresses:
+ *
+ * BLK_BOUNCE_HIGH : bounce all highmem pages
+ * BLK_BOUNCE_ANY : don't bounce anything
+ * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
+ */
+
+#if BITS_PER_LONG == 32
+#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
+#else
+#define BLK_BOUNCE_HIGH -1ULL
+#endif
+#define BLK_BOUNCE_ANY (-1ULL)
+#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
+
+/*
+ * default timeout for SG_IO if none specified
+ */
+#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
+#define BLK_MIN_SG_TIMEOUT (7 * HZ)
+
+#ifdef CONFIG_BOUNCE
+extern int init_emergency_isa_pool(void);
+extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
+#else
+static inline int init_emergency_isa_pool(void)
+{
+ return 0;
+}
+static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
+{
+}
+#endif /* CONFIG_MMU */
+
+struct rq_map_data {
+ struct page **pages;
+ int page_order;
+ int nr_entries;
+ unsigned long offset;
+ int null_mapped;
+ int from_user;
+};
+
+struct req_iterator {
+ struct bvec_iter iter;
+ struct bio *bio;
+};
+
+/* This should not be used directly - use rq_for_each_segment */
+#define for_each_bio(_bio) \
+ for (; _bio; _bio = _bio->bi_next)
+#define __rq_for_each_bio(_bio, rq) \
+ if ((rq->bio)) \
+ for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
+
+#define rq_for_each_segment(bvl, _rq, _iter) \
+ __rq_for_each_bio(_iter.bio, _rq) \
+ bio_for_each_segment(bvl, _iter.bio, _iter.iter)
+
+#define rq_iter_last(bvec, _iter) \
+ (_iter.bio->bi_next == NULL && \
+ bio_iter_last(bvec, _iter.iter))
+
+#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
+#endif
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+extern void rq_flush_dcache_pages(struct request *rq);
+#else
+static inline void rq_flush_dcache_pages(struct request *rq)
+{
+}
+#endif
+
+extern int blk_register_queue(struct gendisk *disk);
+extern void blk_unregister_queue(struct gendisk *disk);
+extern void generic_make_request(struct bio *bio);
+extern void blk_rq_init(struct request_queue *q, struct request *rq);
+extern void blk_put_request(struct request *);
+extern void __blk_put_request(struct request_queue *, struct request *);
+extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
+extern struct request *blk_make_request(struct request_queue *, struct bio *,
+ gfp_t);
+extern void blk_rq_set_block_pc(struct request *);
+extern void blk_requeue_request(struct request_queue *, struct request *);
+extern void blk_add_request_payload(struct request *rq, struct page *page,
+ unsigned int len);
+extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
+extern int blk_lld_busy(struct request_queue *q);
+extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
+ struct bio_set *bs, gfp_t gfp_mask,
+ int (*bio_ctr)(struct bio *, struct bio *, void *),
+ void *data);
+extern void blk_rq_unprep_clone(struct request *rq);
+extern int blk_insert_cloned_request(struct request_queue *q,
+ struct request *rq);
+extern void blk_delay_queue(struct request_queue *, unsigned long);
+extern void blk_recount_segments(struct request_queue *, struct bio *);
+extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
+extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
+ unsigned int, void __user *);
+extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
+ unsigned int, void __user *);
+extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
+ struct scsi_ioctl_command __user *);
+
+/*
+ * A queue has just exitted congestion. Note this in the global counter of
+ * congested queues, and wake up anyone who was waiting for requests to be
+ * put back.
+ */
+static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
+{
+ clear_bdi_congested(&q->backing_dev_info, sync);
+}
+
+/*
+ * A queue has just entered congestion. Flag that in the queue's VM-visible
+ * state flags and increment the global gounter of congested queues.
+ */
+static inline void blk_set_queue_congested(struct request_queue *q, int sync)
+{
+ set_bdi_congested(&q->backing_dev_info, sync);
+}
+
+extern void blk_start_queue(struct request_queue *q);
+extern void blk_stop_queue(struct request_queue *q);
+extern void blk_sync_queue(struct request_queue *q);
+extern void __blk_stop_queue(struct request_queue *q);
+extern void __blk_run_queue(struct request_queue *q);
+extern void blk_run_queue(struct request_queue *);
+extern void blk_run_queue_async(struct request_queue *q);
+extern int blk_rq_map_user(struct request_queue *, struct request *,
+ struct rq_map_data *, void __user *, unsigned long,
+ gfp_t);
+extern int blk_rq_unmap_user(struct bio *);
+extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
+extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
+ struct rq_map_data *, const struct iov_iter *,
+ gfp_t);
+extern int blk_execute_rq(struct request_queue *, struct gendisk *,
+ struct request *, int);
+extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
+ struct request *, int, rq_end_io_fn *);
+
+static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
+{
+ return bdev->bd_disk->queue; /* this is never NULL */
+}
+
+/*
+ * blk_rq_pos() : the current sector
+ * blk_rq_bytes() : bytes left in the entire request
+ * blk_rq_cur_bytes() : bytes left in the current segment
+ * blk_rq_err_bytes() : bytes left till the next error boundary
+ * blk_rq_sectors() : sectors left in the entire request
+ * blk_rq_cur_sectors() : sectors left in the current segment
+ */
+static inline sector_t blk_rq_pos(const struct request *rq)
+{
+ return rq->__sector;
+}
+
+static inline unsigned int blk_rq_bytes(const struct request *rq)
+{
+ return rq->__data_len;
+}
+
+static inline int blk_rq_cur_bytes(const struct request *rq)
+{
+ return rq->bio ? bio_cur_bytes(rq->bio) : 0;
+}
+
+extern unsigned int blk_rq_err_bytes(const struct request *rq);
+
+static inline unsigned int blk_rq_sectors(const struct request *rq)
+{
+ return blk_rq_bytes(rq) >> 9;
+}
+
+static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
+{
+ return blk_rq_cur_bytes(rq) >> 9;
+}
+
+static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
+ unsigned int cmd_flags)
+{
+ if (unlikely(cmd_flags & REQ_DISCARD))
+ return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+
+ if (unlikely(cmd_flags & REQ_WRITE_SAME))
+ return q->limits.max_write_same_sectors;
+
+ return q->limits.max_sectors;
+}
+
+/*
+ * Return maximum size of a request at given offset. Only valid for
+ * file system requests.
+ */
+static inline unsigned int blk_max_size_offset(struct request_queue *q,
+ sector_t offset)
+{
+ if (!q->limits.chunk_sectors)
+ return q->limits.max_sectors;
+
+ return q->limits.chunk_sectors -
+ (offset & (q->limits.chunk_sectors - 1));
+}
+
+static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
+ return q->limits.max_hw_sectors;
+
+ if (!q->limits.chunk_sectors)
+ return blk_queue_get_max_sectors(q, rq->cmd_flags);
+
+ return min(blk_max_size_offset(q, blk_rq_pos(rq)),
+ blk_queue_get_max_sectors(q, rq->cmd_flags));
+}
+
+static inline unsigned int blk_rq_count_bios(struct request *rq)
+{
+ unsigned int nr_bios = 0;
+ struct bio *bio;
+
+ __rq_for_each_bio(bio, rq)
+ nr_bios++;
+
+ return nr_bios;
+}
+
+/*
+ * Request issue related functions.
+ */
+extern struct request *blk_peek_request(struct request_queue *q);
+extern void blk_start_request(struct request *rq);
+extern struct request *blk_fetch_request(struct request_queue *q);
+
+/*
+ * Request completion related functions.
+ *
+ * blk_update_request() completes given number of bytes and updates
+ * the request without completing it.
+ *
+ * blk_end_request() and friends. __blk_end_request() must be called
+ * with the request queue spinlock acquired.
+ *
+ * Several drivers define their own end_request and call
+ * blk_end_request() for parts of the original function.
+ * This prevents code duplication in drivers.
+ */
+extern bool blk_update_request(struct request *rq, int error,
+ unsigned int nr_bytes);
+extern void blk_finish_request(struct request *rq, int error);
+extern bool blk_end_request(struct request *rq, int error,
+ unsigned int nr_bytes);
+extern void blk_end_request_all(struct request *rq, int error);
+extern bool blk_end_request_cur(struct request *rq, int error);
+extern bool blk_end_request_err(struct request *rq, int error);
+extern bool __blk_end_request(struct request *rq, int error,
+ unsigned int nr_bytes);
+extern void __blk_end_request_all(struct request *rq, int error);
+extern bool __blk_end_request_cur(struct request *rq, int error);
+extern bool __blk_end_request_err(struct request *rq, int error);
+
+extern void blk_complete_request(struct request *);
+extern void __blk_complete_request(struct request *);
+extern void blk_abort_request(struct request *);
+extern void blk_unprep_request(struct request *);
+
+/*
+ * Access functions for manipulating queue properties
+ */
+extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
+ spinlock_t *lock, int node_id);
+extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
+extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
+ request_fn_proc *, spinlock_t *);
+extern void blk_cleanup_queue(struct request_queue *);
+extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
+extern void blk_queue_bounce_limit(struct request_queue *, u64);
+extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
+extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
+extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
+extern void blk_queue_max_segments(struct request_queue *, unsigned short);
+extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
+extern void blk_queue_max_discard_sectors(struct request_queue *q,
+ unsigned int max_discard_sectors);
+extern void blk_queue_max_write_same_sectors(struct request_queue *q,
+ unsigned int max_write_same_sectors);
+extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
+extern void blk_queue_alignment_offset(struct request_queue *q,
+ unsigned int alignment);
+extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
+extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
+extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
+extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
+extern void blk_set_default_limits(struct queue_limits *lim);
+extern void blk_set_stacking_limits(struct queue_limits *lim);
+extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ sector_t offset);
+extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
+ sector_t offset);
+extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
+ sector_t offset);
+extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
+extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
+extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
+extern int blk_queue_dma_drain(struct request_queue *q,
+ dma_drain_needed_fn *dma_drain_needed,
+ void *buf, unsigned int size);
+extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
+extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
+extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
+extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
+extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
+extern void blk_queue_dma_alignment(struct request_queue *, int);
+extern void blk_queue_update_dma_alignment(struct request_queue *, int);
+extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
+extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
+extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
+extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
+extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
+extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
+
+extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
+extern void blk_dump_rq_flags(struct request *, char *);
+extern long nr_blockdev_pages(void);
+
+bool __must_check blk_get_queue(struct request_queue *);
+struct request_queue *blk_alloc_queue(gfp_t);
+struct request_queue *blk_alloc_queue_node(gfp_t, int);
+extern void blk_put_queue(struct request_queue *);
+
+/*
+ * block layer runtime pm functions
+ */
+#ifdef CONFIG_PM
+extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
+extern int blk_pre_runtime_suspend(struct request_queue *q);
+extern void blk_post_runtime_suspend(struct request_queue *q, int err);
+extern void blk_pre_runtime_resume(struct request_queue *q);
+extern void blk_post_runtime_resume(struct request_queue *q, int err);
+#else
+static inline void blk_pm_runtime_init(struct request_queue *q,
+ struct device *dev) {}
+static inline int blk_pre_runtime_suspend(struct request_queue *q)
+{
+ return -ENOSYS;
+}
+static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
+static inline void blk_pre_runtime_resume(struct request_queue *q) {}
+static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
+#endif
+
+/*
+ * blk_plug permits building a queue of related requests by holding the I/O
+ * fragments for a short period. This allows merging of sequential requests
+ * into single larger request. As the requests are moved from a per-task list to
+ * the device's request_queue in a batch, this results in improved scalability
+ * as the lock contention for request_queue lock is reduced.
+ *
+ * It is ok not to disable preemption when adding the request to the plug list
+ * or when attempting a merge, because blk_schedule_flush_list() will only flush
+ * the plug list when the task sleeps by itself. For details, please see
+ * schedule() where blk_schedule_flush_plug() is called.
+ */
+struct blk_plug {
+ struct list_head list; /* requests */
+ struct list_head mq_list; /* blk-mq requests */
+ struct list_head cb_list; /* md requires an unplug callback */
+};
+#define BLK_MAX_REQUEST_COUNT 16
+
+struct blk_plug_cb;
+typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
+struct blk_plug_cb {
+ struct list_head list;
+ blk_plug_cb_fn callback;
+ void *data;
+};
+extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
+ void *data, int size);
+extern void blk_start_plug(struct blk_plug *);
+extern void blk_finish_plug(struct blk_plug *);
+extern void blk_flush_plug_list(struct blk_plug *, bool);
+
+static inline void blk_flush_plug(struct task_struct *tsk)
+{
+ struct blk_plug *plug = tsk->plug;
+
+ if (plug)
+ blk_flush_plug_list(plug, false);
+}
+
+static inline void blk_schedule_flush_plug(struct task_struct *tsk)
+{
+ struct blk_plug *plug = tsk->plug;
+
+ if (plug)
+ blk_flush_plug_list(plug, true);
+}
+
+static inline bool blk_needs_flush_plug(struct task_struct *tsk)
+{
+ struct blk_plug *plug = tsk->plug;
+
+ return plug &&
+ (!list_empty(&plug->list) ||
+ !list_empty(&plug->mq_list) ||
+ !list_empty(&plug->cb_list));
+}
+
+/*
+ * tag stuff
+ */
+extern int blk_queue_start_tag(struct request_queue *, struct request *);
+extern struct request *blk_queue_find_tag(struct request_queue *, int);
+extern void blk_queue_end_tag(struct request_queue *, struct request *);
+extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
+extern void blk_queue_free_tags(struct request_queue *);
+extern int blk_queue_resize_tags(struct request_queue *, int);
+extern void blk_queue_invalidate_tags(struct request_queue *);
+extern struct blk_queue_tag *blk_init_tags(int, int);
+extern void blk_free_tags(struct blk_queue_tag *);
+
+static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
+ int tag)
+{
+ if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
+ return NULL;
+ return bqt->tag_index[tag];
+}
+
+#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */
+
+extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
+extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
+extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, struct page *page);
+extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, bool discard);
+static inline int sb_issue_discard(struct super_block *sb, sector_t block,
+ sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
+{
+ return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
+ nr_blocks << (sb->s_blocksize_bits - 9),
+ gfp_mask, flags);
+}
+static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
+ sector_t nr_blocks, gfp_t gfp_mask)
+{
+ return blkdev_issue_zeroout(sb->s_bdev,
+ block << (sb->s_blocksize_bits - 9),
+ nr_blocks << (sb->s_blocksize_bits - 9),
+ gfp_mask, true);
+}
+
+extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
+
+enum blk_default_limits {
+ BLK_MAX_SEGMENTS = 128,
+ BLK_SAFE_MAX_SECTORS = 255,
+ BLK_MAX_SEGMENT_SIZE = 65536,
+ BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
+};
+
+#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
+
+static inline unsigned long queue_bounce_pfn(struct request_queue *q)
+{
+ return q->limits.bounce_pfn;
+}
+
+static inline unsigned long queue_segment_boundary(struct request_queue *q)
+{
+ return q->limits.seg_boundary_mask;
+}
+
+static inline unsigned int queue_max_sectors(struct request_queue *q)
+{
+ return q->limits.max_sectors;
+}
+
+static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
+{
+ return q->limits.max_hw_sectors;
+}
+
+static inline unsigned short queue_max_segments(struct request_queue *q)
+{
+ return q->limits.max_segments;
+}
+
+static inline unsigned int queue_max_segment_size(struct request_queue *q)
+{
+ return q->limits.max_segment_size;
+}
+
+static inline unsigned short queue_logical_block_size(struct request_queue *q)
+{
+ int retval = 512;
+
+ if (q && q->limits.logical_block_size)
+ retval = q->limits.logical_block_size;
+
+ return retval;
+}
+
+static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
+{
+ return queue_logical_block_size(bdev_get_queue(bdev));
+}
+
+static inline unsigned int queue_physical_block_size(struct request_queue *q)
+{
+ return q->limits.physical_block_size;
+}
+
+static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
+{
+ return queue_physical_block_size(bdev_get_queue(bdev));
+}
+
+static inline unsigned int queue_io_min(struct request_queue *q)
+{
+ return q->limits.io_min;
+}
+
+static inline int bdev_io_min(struct block_device *bdev)
+{
+ return queue_io_min(bdev_get_queue(bdev));
+}
+
+static inline unsigned int queue_io_opt(struct request_queue *q)
+{
+ return q->limits.io_opt;
+}
+
+static inline int bdev_io_opt(struct block_device *bdev)
+{
+ return queue_io_opt(bdev_get_queue(bdev));
+}
+
+static inline int queue_alignment_offset(struct request_queue *q)
+{
+ if (q->limits.misaligned)
+ return -1;
+
+ return q->limits.alignment_offset;
+}
+
+static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
+{
+ unsigned int granularity = max(lim->physical_block_size, lim->io_min);
+ unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
+
+ return (granularity + lim->alignment_offset - alignment) % granularity;
+}
+
+static inline int bdev_alignment_offset(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (q->limits.misaligned)
+ return -1;
+
+ if (bdev != bdev->bd_contains)
+ return bdev->bd_part->alignment_offset;
+
+ return q->limits.alignment_offset;
+}
+
+static inline int queue_discard_alignment(struct request_queue *q)
+{
+ if (q->limits.discard_misaligned)
+ return -1;
+
+ return q->limits.discard_alignment;
+}
+
+static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
+{
+ unsigned int alignment, granularity, offset;
+
+ if (!lim->max_discard_sectors)
+ return 0;
+
+ /* Why are these in bytes, not sectors? */
+ alignment = lim->discard_alignment >> 9;
+ granularity = lim->discard_granularity >> 9;
+ if (!granularity)
+ return 0;
+
+ /* Offset of the partition start in 'granularity' sectors */
+ offset = sector_div(sector, granularity);
+
+ /* And why do we do this modulus *again* in blkdev_issue_discard()? */
+ offset = (granularity + alignment - offset) % granularity;
+
+ /* Turn it back into bytes, gaah */
+ return offset << 9;
+}
+
+static inline int bdev_discard_alignment(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (bdev != bdev->bd_contains)
+ return bdev->bd_part->discard_alignment;
+
+ return q->limits.discard_alignment;
+}
+
+static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
+{
+ if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
+ return 1;
+
+ return 0;
+}
+
+static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
+{
+ return queue_discard_zeroes_data(bdev_get_queue(bdev));
+}
+
+static inline unsigned int bdev_write_same(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (q)
+ return q->limits.max_write_same_sectors;
+
+ return 0;
+}
+
+static inline int queue_dma_alignment(struct request_queue *q)
+{
+ return q ? q->dma_alignment : 511;
+}
+
+static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
+ unsigned int len)
+{
+ unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
+ return !(addr & alignment) && !(len & alignment);
+}
+
+/* assumes size > 256 */
+static inline unsigned int blksize_bits(unsigned int size)
+{
+ unsigned int bits = 8;
+ do {
+ bits++;
+ size >>= 1;
+ } while (size > 256);
+ return bits;
+}
+
+static inline unsigned int block_size(struct block_device *bdev)
+{
+ return bdev->bd_block_size;
+}
+
+static inline bool queue_flush_queueable(struct request_queue *q)
+{
+ return !q->flush_not_queueable;
+}
+
+typedef struct {struct page *v;} Sector;
+
+unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
+
+static inline void put_dev_sector(Sector p)
+{
+ page_cache_release(p.v);
+}
+
+struct work_struct;
+int kblockd_schedule_work(struct work_struct *work);
+int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
+int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
+
+#ifdef CONFIG_BLK_CGROUP
+/*
+ * This should not be using sched_clock(). A real patch is in progress
+ * to fix this up, until that is in place we need to disable preemption
+ * around sched_clock() in this function and set_io_start_time_ns().
+ */
+static inline void set_start_time_ns(struct request *req)
+{
+ preempt_disable();
+ req->start_time_ns = sched_clock();
+ preempt_enable();
+}
+
+static inline void set_io_start_time_ns(struct request *req)
+{
+ preempt_disable();
+ req->io_start_time_ns = sched_clock();
+ preempt_enable();
+}
+
+static inline uint64_t rq_start_time_ns(struct request *req)
+{
+ return req->start_time_ns;
+}
+
+static inline uint64_t rq_io_start_time_ns(struct request *req)
+{
+ return req->io_start_time_ns;
+}
+#else
+static inline void set_start_time_ns(struct request *req) {}
+static inline void set_io_start_time_ns(struct request *req) {}
+static inline uint64_t rq_start_time_ns(struct request *req)
+{
+ return 0;
+}
+static inline uint64_t rq_io_start_time_ns(struct request *req)
+{
+ return 0;
+}
+#endif
+
+#define MODULE_ALIAS_BLOCKDEV(major,minor) \
+ MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
+#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
+ MODULE_ALIAS("block-major-" __stringify(major) "-*")
+
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+
+enum blk_integrity_flags {
+ BLK_INTEGRITY_VERIFY = 1 << 0,
+ BLK_INTEGRITY_GENERATE = 1 << 1,
+ BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
+ BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
+};
+
+struct blk_integrity_iter {
+ void *prot_buf;
+ void *data_buf;
+ sector_t seed;
+ unsigned int data_size;
+ unsigned short interval;
+ const char *disk_name;
+};
+
+typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
+
+struct blk_integrity {
+ integrity_processing_fn *generate_fn;
+ integrity_processing_fn *verify_fn;
+
+ unsigned short flags;
+ unsigned short tuple_size;
+ unsigned short interval;
+ unsigned short tag_size;
+
+ const char *name;
+
+ struct kobject kobj;
+};
+
+extern bool blk_integrity_is_initialized(struct gendisk *);
+extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
+extern void blk_integrity_unregister(struct gendisk *);
+extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
+extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
+ struct scatterlist *);
+extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
+extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
+ struct request *);
+extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
+ struct bio *);
+
+static inline
+struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
+{
+ return bdev->bd_disk->integrity;
+}
+
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+{
+ return disk->integrity;
+}
+
+static inline bool blk_integrity_rq(struct request *rq)
+{
+ return rq->cmd_flags & REQ_INTEGRITY;
+}
+
+static inline void blk_queue_max_integrity_segments(struct request_queue *q,
+ unsigned int segs)
+{
+ q->limits.max_integrity_segments = segs;
+}
+
+static inline unsigned short
+queue_max_integrity_segments(struct request_queue *q)
+{
+ return q->limits.max_integrity_segments;
+}
+
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+
+struct bio;
+struct block_device;
+struct gendisk;
+struct blk_integrity;
+
+static inline int blk_integrity_rq(struct request *rq)
+{
+ return 0;
+}
+static inline int blk_rq_count_integrity_sg(struct request_queue *q,
+ struct bio *b)
+{
+ return 0;
+}
+static inline int blk_rq_map_integrity_sg(struct request_queue *q,
+ struct bio *b,
+ struct scatterlist *s)
+{
+ return 0;
+}
+static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
+{
+ return NULL;
+}
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+{
+ return NULL;
+}
+static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
+{
+ return 0;
+}
+static inline int blk_integrity_register(struct gendisk *d,
+ struct blk_integrity *b)
+{
+ return 0;
+}
+static inline void blk_integrity_unregister(struct gendisk *d)
+{
+}
+static inline void blk_queue_max_integrity_segments(struct request_queue *q,
+ unsigned int segs)
+{
+}
+static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
+{
+ return 0;
+}
+static inline bool blk_integrity_merge_rq(struct request_queue *rq,
+ struct request *r1,
+ struct request *r2)
+{
+ return true;
+}
+static inline bool blk_integrity_merge_bio(struct request_queue *rq,
+ struct request *r,
+ struct bio *b)
+{
+ return true;
+}
+static inline bool blk_integrity_is_initialized(struct gendisk *g)
+{
+ return 0;
+}
+
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
+struct block_device_operations {
+ int (*open) (struct block_device *, fmode_t);
+ void (*release) (struct gendisk *, fmode_t);
+ int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
+ int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
+ int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
+ long (*direct_access)(struct block_device *, sector_t,
+ void **, unsigned long *pfn, long size);
+ unsigned int (*check_events) (struct gendisk *disk,
+ unsigned int clearing);
+ /* ->media_changed() is DEPRECATED, use ->check_events() instead */
+ int (*media_changed) (struct gendisk *);
+ void (*unlock_native_capacity) (struct gendisk *);
+ int (*revalidate_disk) (struct gendisk *);
+ int (*getgeo)(struct block_device *, struct hd_geometry *);
+ /* this callback is with swap_lock and sometimes page table lock held */
+ void (*swap_slot_free_notify) (struct block_device *, unsigned long);
+ struct module *owner;
+};
+
+extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
+ unsigned long);
+extern int bdev_read_page(struct block_device *, sector_t, struct page *);
+extern int bdev_write_page(struct block_device *, sector_t, struct page *,
+ struct writeback_control *);
+extern long bdev_direct_access(struct block_device *, sector_t, void **addr,
+ unsigned long *pfn, long size);
+#else /* CONFIG_BLOCK */
+
+struct block_device;
+
+/*
+ * stubs for when the block layer is configured out
+ */
+#define buffer_heads_over_limit 0
+
+static inline long nr_blockdev_pages(void)
+{
+ return 0;
+}
+
+struct blk_plug {
+};
+
+static inline void blk_start_plug(struct blk_plug *plug)
+{
+}
+
+static inline void blk_finish_plug(struct blk_plug *plug)
+{
+}
+
+static inline void blk_flush_plug(struct task_struct *task)
+{
+}
+
+static inline void blk_schedule_flush_plug(struct task_struct *task)
+{
+}
+
+
+static inline bool blk_needs_flush_plug(struct task_struct *tsk)
+{
+ return false;
+}
+
+static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
+ sector_t *error_sector)
+{
+ return 0;
+}
+
+#endif /* CONFIG_BLOCK */
+
+#endif
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
new file mode 100644
index 000000000..afc1343df
--- /dev/null
+++ b/include/linux/blktrace_api.h
@@ -0,0 +1,116 @@
+#ifndef BLKTRACE_H
+#define BLKTRACE_H
+
+#include <linux/blkdev.h>
+#include <linux/relay.h>
+#include <linux/compat.h>
+#include <uapi/linux/blktrace_api.h>
+#include <linux/list.h>
+
+#if defined(CONFIG_BLK_DEV_IO_TRACE)
+
+#include <linux/sysfs.h>
+
+struct blk_trace {
+ int trace_state;
+ struct rchan *rchan;
+ unsigned long __percpu *sequence;
+ unsigned char __percpu *msg_data;
+ u16 act_mask;
+ u64 start_lba;
+ u64 end_lba;
+ u32 pid;
+ u32 dev;
+ struct dentry *dir;
+ struct dentry *dropped_file;
+ struct dentry *msg_file;
+ struct list_head running_list;
+ atomic_t dropped;
+};
+
+extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
+extern void blk_trace_shutdown(struct request_queue *);
+extern int do_blk_trace_setup(struct request_queue *q, char *name,
+ dev_t dev, struct block_device *bdev,
+ struct blk_user_trace_setup *buts);
+extern __printf(2, 3)
+void __trace_note_message(struct blk_trace *, const char *fmt, ...);
+
+/**
+ * blk_add_trace_msg - Add a (simple) message to the blktrace stream
+ * @q: queue the io is for
+ * @fmt: format to print message in
+ * args... Variable argument list for format
+ *
+ * Description:
+ * Records a (simple) message onto the blktrace stream.
+ *
+ * NOTE: BLK_TN_MAX_MSG characters are output at most.
+ * NOTE: Can not use 'static inline' due to presence of var args...
+ *
+ **/
+#define blk_add_trace_msg(q, fmt, ...) \
+ do { \
+ struct blk_trace *bt = (q)->blk_trace; \
+ if (unlikely(bt)) \
+ __trace_note_message(bt, fmt, ##__VA_ARGS__); \
+ } while (0)
+#define BLK_TN_MAX_MSG 128
+
+extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
+ void *data, size_t len);
+extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ struct block_device *bdev,
+ char __user *arg);
+extern int blk_trace_startstop(struct request_queue *q, int start);
+extern int blk_trace_remove(struct request_queue *q);
+extern void blk_trace_remove_sysfs(struct device *dev);
+extern int blk_trace_init_sysfs(struct device *dev);
+
+extern struct attribute_group blk_trace_attr_group;
+
+#else /* !CONFIG_BLK_DEV_IO_TRACE */
+# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
+# define blk_trace_shutdown(q) do { } while (0)
+# define do_blk_trace_setup(q, name, dev, bdev, buts) (-ENOTTY)
+# define blk_add_driver_data(q, rq, data, len) do {} while (0)
+# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
+# define blk_trace_startstop(q, start) (-ENOTTY)
+# define blk_trace_remove(q) (-ENOTTY)
+# define blk_add_trace_msg(q, fmt, ...) do { } while (0)
+# define blk_trace_remove_sysfs(dev) do { } while (0)
+static inline int blk_trace_init_sysfs(struct device *dev)
+{
+ return 0;
+}
+
+#endif /* CONFIG_BLK_DEV_IO_TRACE */
+
+#ifdef CONFIG_COMPAT
+
+struct compat_blk_user_trace_setup {
+ char name[BLKTRACE_BDEV_SIZE];
+ u16 act_mask;
+ u32 buf_size;
+ u32 buf_nr;
+ compat_u64 start_lba;
+ compat_u64 end_lba;
+ u32 pid;
+};
+#define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup)
+
+#endif
+
+#if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK)
+
+static inline int blk_cmd_buf_len(struct request *rq)
+{
+ return (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? rq->cmd_len * 3 : 1;
+}
+
+extern void blk_dump_cmd(char *buf, struct request *rq);
+extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes);
+
+#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
+
+#endif
diff --git a/include/linux/blockgroup_lock.h b/include/linux/blockgroup_lock.h
new file mode 100644
index 000000000..e44b88ba5
--- /dev/null
+++ b/include/linux/blockgroup_lock.h
@@ -0,0 +1,62 @@
+#ifndef _LINUX_BLOCKGROUP_LOCK_H
+#define _LINUX_BLOCKGROUP_LOCK_H
+/*
+ * Per-blockgroup locking for ext2 and ext3.
+ *
+ * Simple hashed spinlocking.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/cache.h>
+
+#ifdef CONFIG_SMP
+
+/*
+ * We want a power-of-two. Is there a better way than this?
+ */
+
+#if NR_CPUS >= 32
+#define NR_BG_LOCKS 128
+#elif NR_CPUS >= 16
+#define NR_BG_LOCKS 64
+#elif NR_CPUS >= 8
+#define NR_BG_LOCKS 32
+#elif NR_CPUS >= 4
+#define NR_BG_LOCKS 16
+#elif NR_CPUS >= 2
+#define NR_BG_LOCKS 8
+#else
+#define NR_BG_LOCKS 4
+#endif
+
+#else /* CONFIG_SMP */
+#define NR_BG_LOCKS 1
+#endif /* CONFIG_SMP */
+
+struct bgl_lock {
+ spinlock_t lock;
+} ____cacheline_aligned_in_smp;
+
+struct blockgroup_lock {
+ struct bgl_lock locks[NR_BG_LOCKS];
+};
+
+static inline void bgl_lock_init(struct blockgroup_lock *bgl)
+{
+ int i;
+
+ for (i = 0; i < NR_BG_LOCKS; i++)
+ spin_lock_init(&bgl->locks[i].lock);
+}
+
+/*
+ * The accessor is a macro so we can embed a blockgroup_lock into different
+ * superblock types
+ */
+static inline spinlock_t *
+bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group)
+{
+ return &bgl->locks[(block_group) & (NR_BG_LOCKS-1)].lock;
+}
+
+#endif
diff --git a/include/linux/bma150.h b/include/linux/bma150.h
new file mode 100644
index 000000000..97ade7cdc
--- /dev/null
+++ b/include/linux/bma150.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011 Bosch Sensortec GmbH
+ * Copyright (c) 2011 Unixphere
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _BMA150_H_
+#define _BMA150_H_
+
+#define BMA150_DRIVER "bma150"
+
+#define BMA150_RANGE_2G 0
+#define BMA150_RANGE_4G 1
+#define BMA150_RANGE_8G 2
+
+#define BMA150_BW_25HZ 0
+#define BMA150_BW_50HZ 1
+#define BMA150_BW_100HZ 2
+#define BMA150_BW_190HZ 3
+#define BMA150_BW_375HZ 4
+#define BMA150_BW_750HZ 5
+#define BMA150_BW_1500HZ 6
+
+struct bma150_cfg {
+ bool any_motion_int; /* Set to enable any-motion interrupt */
+ bool hg_int; /* Set to enable high-G interrupt */
+ bool lg_int; /* Set to enable low-G interrupt */
+ unsigned char any_motion_dur; /* Any-motion duration */
+ unsigned char any_motion_thres; /* Any-motion threshold */
+ unsigned char hg_hyst; /* High-G hysterisis */
+ unsigned char hg_dur; /* High-G duration */
+ unsigned char hg_thres; /* High-G threshold */
+ unsigned char lg_hyst; /* Low-G hysterisis */
+ unsigned char lg_dur; /* Low-G duration */
+ unsigned char lg_thres; /* Low-G threshold */
+ unsigned char range; /* one of BMA0150_RANGE_xxx */
+ unsigned char bandwidth; /* one of BMA0150_BW_xxx */
+};
+
+struct bma150_platform_data {
+ struct bma150_cfg cfg;
+ int (*irq_gpio_cfg)(void);
+};
+
+#endif /* _BMA150_H_ */
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
new file mode 100644
index 000000000..0995c2de8
--- /dev/null
+++ b/include/linux/bootmem.h
@@ -0,0 +1,368 @@
+/*
+ * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
+ */
+#ifndef _LINUX_BOOTMEM_H
+#define _LINUX_BOOTMEM_H
+
+#include <linux/mmzone.h>
+#include <linux/mm_types.h>
+#include <asm/dma.h>
+
+/*
+ * simple boot-time physical memory area allocator.
+ */
+
+extern unsigned long max_low_pfn;
+extern unsigned long min_low_pfn;
+
+/*
+ * highest page
+ */
+extern unsigned long max_pfn;
+
+#ifndef CONFIG_NO_BOOTMEM
+/*
+ * node_bootmem_map is a map pointer - the bits represent all physical
+ * memory pages (including holes) on the node.
+ */
+typedef struct bootmem_data {
+ unsigned long node_min_pfn;
+ unsigned long node_low_pfn;
+ void *node_bootmem_map;
+ unsigned long last_end_off;
+ unsigned long hint_idx;
+ struct list_head list;
+} bootmem_data_t;
+
+extern bootmem_data_t bootmem_node_data[];
+#endif
+
+extern unsigned long bootmem_bootmap_pages(unsigned long);
+
+extern unsigned long init_bootmem_node(pg_data_t *pgdat,
+ unsigned long freepfn,
+ unsigned long startpfn,
+ unsigned long endpfn);
+extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
+
+extern unsigned long free_all_bootmem(void);
+extern void reset_node_managed_pages(pg_data_t *pgdat);
+extern void reset_all_zones_managed_pages(void);
+
+extern void free_bootmem_node(pg_data_t *pgdat,
+ unsigned long addr,
+ unsigned long size);
+extern void free_bootmem(unsigned long physaddr, unsigned long size);
+extern void free_bootmem_late(unsigned long physaddr, unsigned long size);
+
+/*
+ * Flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
+ * the architecture-specific code should honor this).
+ *
+ * If flags is BOOTMEM_DEFAULT, then the return value is always 0 (success).
+ * If flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the memory
+ * already was reserved.
+ */
+#define BOOTMEM_DEFAULT 0
+#define BOOTMEM_EXCLUSIVE (1<<0)
+
+extern int reserve_bootmem(unsigned long addr,
+ unsigned long size,
+ int flags);
+extern int reserve_bootmem_node(pg_data_t *pgdat,
+ unsigned long physaddr,
+ unsigned long size,
+ int flags);
+
+extern void *__alloc_bootmem(unsigned long size,
+ unsigned long align,
+ unsigned long goal);
+extern void *__alloc_bootmem_nopanic(unsigned long size,
+ unsigned long align,
+ unsigned long goal);
+extern void *__alloc_bootmem_node(pg_data_t *pgdat,
+ unsigned long size,
+ unsigned long align,
+ unsigned long goal);
+void *__alloc_bootmem_node_high(pg_data_t *pgdat,
+ unsigned long size,
+ unsigned long align,
+ unsigned long goal);
+extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+ unsigned long size,
+ unsigned long align,
+ unsigned long goal);
+void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+ unsigned long size,
+ unsigned long align,
+ unsigned long goal,
+ unsigned long limit);
+extern void *__alloc_bootmem_low(unsigned long size,
+ unsigned long align,
+ unsigned long goal);
+void *__alloc_bootmem_low_nopanic(unsigned long size,
+ unsigned long align,
+ unsigned long goal);
+extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
+ unsigned long size,
+ unsigned long align,
+ unsigned long goal);
+
+#ifdef CONFIG_NO_BOOTMEM
+/* We are using top down, so it is safe to use 0 here */
+#define BOOTMEM_LOW_LIMIT 0
+#else
+#define BOOTMEM_LOW_LIMIT __pa(MAX_DMA_ADDRESS)
+#endif
+
+#define alloc_bootmem(x) \
+ __alloc_bootmem(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
+#define alloc_bootmem_align(x, align) \
+ __alloc_bootmem(x, align, BOOTMEM_LOW_LIMIT)
+#define alloc_bootmem_nopanic(x) \
+ __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
+#define alloc_bootmem_pages(x) \
+ __alloc_bootmem(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
+#define alloc_bootmem_pages_nopanic(x) \
+ __alloc_bootmem_nopanic(x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
+#define alloc_bootmem_node(pgdat, x) \
+ __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
+#define alloc_bootmem_node_nopanic(pgdat, x) \
+ __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, BOOTMEM_LOW_LIMIT)
+#define alloc_bootmem_pages_node(pgdat, x) \
+ __alloc_bootmem_node(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
+#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
+ __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, BOOTMEM_LOW_LIMIT)
+
+#define alloc_bootmem_low(x) \
+ __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
+#define alloc_bootmem_low_pages_nopanic(x) \
+ __alloc_bootmem_low_nopanic(x, PAGE_SIZE, 0)
+#define alloc_bootmem_low_pages(x) \
+ __alloc_bootmem_low(x, PAGE_SIZE, 0)
+#define alloc_bootmem_low_pages_node(pgdat, x) \
+ __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
+
+
+#if defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM)
+
+/* FIXME: use MEMBLOCK_ALLOC_* variants here */
+#define BOOTMEM_ALLOC_ACCESSIBLE 0
+#define BOOTMEM_ALLOC_ANYWHERE (~(phys_addr_t)0)
+
+/* FIXME: Move to memblock.h at a point where we remove nobootmem.c */
+void *memblock_virt_alloc_try_nid_nopanic(phys_addr_t size,
+ phys_addr_t align, phys_addr_t min_addr,
+ phys_addr_t max_addr, int nid);
+void *memblock_virt_alloc_try_nid(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr, int nid);
+void __memblock_free_early(phys_addr_t base, phys_addr_t size);
+void __memblock_free_late(phys_addr_t base, phys_addr_t size);
+
+static inline void * __init memblock_virt_alloc(
+ phys_addr_t size, phys_addr_t align)
+{
+ return memblock_virt_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT,
+ BOOTMEM_ALLOC_ACCESSIBLE,
+ NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_virt_alloc_nopanic(
+ phys_addr_t size, phys_addr_t align)
+{
+ return memblock_virt_alloc_try_nid_nopanic(size, align,
+ BOOTMEM_LOW_LIMIT,
+ BOOTMEM_ALLOC_ACCESSIBLE,
+ NUMA_NO_NODE);
+}
+
+#ifndef ARCH_LOW_ADDRESS_LIMIT
+#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
+#endif
+
+static inline void * __init memblock_virt_alloc_low(
+ phys_addr_t size, phys_addr_t align)
+{
+ return memblock_virt_alloc_try_nid(size, align,
+ BOOTMEM_LOW_LIMIT,
+ ARCH_LOW_ADDRESS_LIMIT,
+ NUMA_NO_NODE);
+}
+static inline void * __init memblock_virt_alloc_low_nopanic(
+ phys_addr_t size, phys_addr_t align)
+{
+ return memblock_virt_alloc_try_nid_nopanic(size, align,
+ BOOTMEM_LOW_LIMIT,
+ ARCH_LOW_ADDRESS_LIMIT,
+ NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_virt_alloc_from_nopanic(
+ phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
+{
+ return memblock_virt_alloc_try_nid_nopanic(size, align, min_addr,
+ BOOTMEM_ALLOC_ACCESSIBLE,
+ NUMA_NO_NODE);
+}
+
+static inline void * __init memblock_virt_alloc_node(
+ phys_addr_t size, int nid)
+{
+ return memblock_virt_alloc_try_nid(size, 0, BOOTMEM_LOW_LIMIT,
+ BOOTMEM_ALLOC_ACCESSIBLE, nid);
+}
+
+static inline void * __init memblock_virt_alloc_node_nopanic(
+ phys_addr_t size, int nid)
+{
+ return memblock_virt_alloc_try_nid_nopanic(size, 0, BOOTMEM_LOW_LIMIT,
+ BOOTMEM_ALLOC_ACCESSIBLE,
+ nid);
+}
+
+static inline void __init memblock_free_early(
+ phys_addr_t base, phys_addr_t size)
+{
+ __memblock_free_early(base, size);
+}
+
+static inline void __init memblock_free_early_nid(
+ phys_addr_t base, phys_addr_t size, int nid)
+{
+ __memblock_free_early(base, size);
+}
+
+static inline void __init memblock_free_late(
+ phys_addr_t base, phys_addr_t size)
+{
+ __memblock_free_late(base, size);
+}
+
+#else
+
+#define BOOTMEM_ALLOC_ACCESSIBLE 0
+
+
+/* Fall back to all the existing bootmem APIs */
+static inline void * __init memblock_virt_alloc(
+ phys_addr_t size, phys_addr_t align)
+{
+ if (!align)
+ align = SMP_CACHE_BYTES;
+ return __alloc_bootmem(size, align, BOOTMEM_LOW_LIMIT);
+}
+
+static inline void * __init memblock_virt_alloc_nopanic(
+ phys_addr_t size, phys_addr_t align)
+{
+ if (!align)
+ align = SMP_CACHE_BYTES;
+ return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT);
+}
+
+static inline void * __init memblock_virt_alloc_low(
+ phys_addr_t size, phys_addr_t align)
+{
+ if (!align)
+ align = SMP_CACHE_BYTES;
+ return __alloc_bootmem_low(size, align, 0);
+}
+
+static inline void * __init memblock_virt_alloc_low_nopanic(
+ phys_addr_t size, phys_addr_t align)
+{
+ if (!align)
+ align = SMP_CACHE_BYTES;
+ return __alloc_bootmem_low_nopanic(size, align, 0);
+}
+
+static inline void * __init memblock_virt_alloc_from_nopanic(
+ phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
+{
+ return __alloc_bootmem_nopanic(size, align, min_addr);
+}
+
+static inline void * __init memblock_virt_alloc_node(
+ phys_addr_t size, int nid)
+{
+ return __alloc_bootmem_node(NODE_DATA(nid), size, SMP_CACHE_BYTES,
+ BOOTMEM_LOW_LIMIT);
+}
+
+static inline void * __init memblock_virt_alloc_node_nopanic(
+ phys_addr_t size, int nid)
+{
+ return __alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
+ SMP_CACHE_BYTES,
+ BOOTMEM_LOW_LIMIT);
+}
+
+static inline void * __init memblock_virt_alloc_try_nid(phys_addr_t size,
+ phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid)
+{
+ return __alloc_bootmem_node_high(NODE_DATA(nid), size, align,
+ min_addr);
+}
+
+static inline void * __init memblock_virt_alloc_try_nid_nopanic(
+ phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr, int nid)
+{
+ return ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, align,
+ min_addr, max_addr);
+}
+
+static inline void __init memblock_free_early(
+ phys_addr_t base, phys_addr_t size)
+{
+ free_bootmem(base, size);
+}
+
+static inline void __init memblock_free_early_nid(
+ phys_addr_t base, phys_addr_t size, int nid)
+{
+ free_bootmem_node(NODE_DATA(nid), base, size);
+}
+
+static inline void __init memblock_free_late(
+ phys_addr_t base, phys_addr_t size)
+{
+ free_bootmem_late(base, size);
+}
+#endif /* defined(CONFIG_HAVE_MEMBLOCK) && defined(CONFIG_NO_BOOTMEM) */
+
+#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
+extern void *alloc_remap(int nid, unsigned long size);
+#else
+static inline void *alloc_remap(int nid, unsigned long size)
+{
+ return NULL;
+}
+#endif /* CONFIG_HAVE_ARCH_ALLOC_REMAP */
+
+extern void *alloc_large_system_hash(const char *tablename,
+ unsigned long bucketsize,
+ unsigned long numentries,
+ int scale,
+ int flags,
+ unsigned int *_hash_shift,
+ unsigned int *_hash_mask,
+ unsigned long low_limit,
+ unsigned long high_limit);
+
+#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
+#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
+ * shift passed via *_hash_shift */
+
+/* Only NUMA needs hash distribution. 64bit NUMA architectures have
+ * sufficient vmalloc space.
+ */
+#if defined(CONFIG_NUMA) && defined(CONFIG_64BIT)
+#define HASHDIST_DEFAULT 1
+#else
+#define HASHDIST_DEFAULT 0
+#endif
+extern int hashdist; /* Distribute hashes across NUMA nodes? */
+
+
+#endif /* _LINUX_BOOTMEM_H */
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
new file mode 100644
index 000000000..86c12c93e
--- /dev/null
+++ b/include/linux/bottom_half.h
@@ -0,0 +1,35 @@
+#ifndef _LINUX_BH_H
+#define _LINUX_BH_H
+
+#include <linux/preempt.h>
+#include <linux/preempt_mask.h>
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
+#else
+static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
+{
+ preempt_count_add(cnt);
+ barrier();
+}
+#endif
+
+static inline void local_bh_disable(void)
+{
+ __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
+}
+
+extern void _local_bh_enable(void);
+extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt);
+
+static inline void local_bh_enable_ip(unsigned long ip)
+{
+ __local_bh_enable_ip(ip, SOFTIRQ_DISABLE_OFFSET);
+}
+
+static inline void local_bh_enable(void)
+{
+ __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
+}
+
+#endif /* _LINUX_BH_H */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
new file mode 100644
index 000000000..d5cda0671
--- /dev/null
+++ b/include/linux/bpf.h
@@ -0,0 +1,164 @@
+/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#ifndef _LINUX_BPF_H
+#define _LINUX_BPF_H 1
+
+#include <uapi/linux/bpf.h>
+#include <linux/workqueue.h>
+#include <linux/file.h>
+
+struct bpf_map;
+
+/* map is generic key/value storage optionally accesible by eBPF programs */
+struct bpf_map_ops {
+ /* funcs callable from userspace (via syscall) */
+ struct bpf_map *(*map_alloc)(union bpf_attr *attr);
+ void (*map_free)(struct bpf_map *);
+ int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
+
+ /* funcs callable from userspace and from eBPF programs */
+ void *(*map_lookup_elem)(struct bpf_map *map, void *key);
+ int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
+ int (*map_delete_elem)(struct bpf_map *map, void *key);
+};
+
+struct bpf_map {
+ atomic_t refcnt;
+ enum bpf_map_type map_type;
+ u32 key_size;
+ u32 value_size;
+ u32 max_entries;
+ const struct bpf_map_ops *ops;
+ struct work_struct work;
+};
+
+struct bpf_map_type_list {
+ struct list_head list_node;
+ const struct bpf_map_ops *ops;
+ enum bpf_map_type type;
+};
+
+/* function argument constraints */
+enum bpf_arg_type {
+ ARG_DONTCARE = 0, /* unused argument in helper function */
+
+ /* the following constraints used to prototype
+ * bpf_map_lookup/update/delete_elem() functions
+ */
+ ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
+ ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
+ ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
+
+ /* the following constraints used to prototype bpf_memcmp() and other
+ * functions that access data on eBPF program stack
+ */
+ ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
+ ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
+
+ ARG_PTR_TO_CTX, /* pointer to context */
+ ARG_ANYTHING, /* any (initialized) argument is ok */
+};
+
+/* type of values returned from helper functions */
+enum bpf_return_type {
+ RET_INTEGER, /* function returns integer */
+ RET_VOID, /* function doesn't return anything */
+ RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
+};
+
+/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
+ * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
+ * instructions after verifying
+ */
+struct bpf_func_proto {
+ u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+ bool gpl_only;
+ enum bpf_return_type ret_type;
+ enum bpf_arg_type arg1_type;
+ enum bpf_arg_type arg2_type;
+ enum bpf_arg_type arg3_type;
+ enum bpf_arg_type arg4_type;
+ enum bpf_arg_type arg5_type;
+};
+
+/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
+ * the first argument to eBPF programs.
+ * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
+ */
+struct bpf_context;
+
+enum bpf_access_type {
+ BPF_READ = 1,
+ BPF_WRITE = 2
+};
+
+struct bpf_verifier_ops {
+ /* return eBPF function prototype for verification */
+ const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
+
+ /* return true if 'size' wide access at offset 'off' within bpf_context
+ * with 'type' (read or write) is allowed
+ */
+ bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
+
+ u32 (*convert_ctx_access)(int dst_reg, int src_reg, int ctx_off,
+ struct bpf_insn *insn);
+};
+
+struct bpf_prog_type_list {
+ struct list_head list_node;
+ const struct bpf_verifier_ops *ops;
+ enum bpf_prog_type type;
+};
+
+struct bpf_prog;
+
+struct bpf_prog_aux {
+ atomic_t refcnt;
+ u32 used_map_cnt;
+ const struct bpf_verifier_ops *ops;
+ struct bpf_map **used_maps;
+ struct bpf_prog *prog;
+ struct work_struct work;
+};
+
+#ifdef CONFIG_BPF_SYSCALL
+void bpf_register_prog_type(struct bpf_prog_type_list *tl);
+void bpf_register_map_type(struct bpf_map_type_list *tl);
+
+struct bpf_prog *bpf_prog_get(u32 ufd);
+void bpf_prog_put(struct bpf_prog *prog);
+
+struct bpf_map *bpf_map_get(struct fd f);
+void bpf_map_put(struct bpf_map *map);
+
+/* verify correctness of eBPF program */
+int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
+#else
+static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl)
+{
+}
+
+static inline struct bpf_prog *bpf_prog_get(u32 ufd)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void bpf_prog_put(struct bpf_prog *prog)
+{
+}
+#endif /* CONFIG_BPF_SYSCALL */
+
+/* verifier prototypes for helper functions called from eBPF programs */
+extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
+extern const struct bpf_func_proto bpf_map_update_elem_proto;
+extern const struct bpf_func_proto bpf_map_delete_elem_proto;
+
+extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
+extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
+
+#endif /* _LINUX_BPF_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
new file mode 100644
index 000000000..656da2a12
--- /dev/null
+++ b/include/linux/brcmphy.h
@@ -0,0 +1,228 @@
+#ifndef _LINUX_BRCMPHY_H
+#define _LINUX_BRCMPHY_H
+
+#define PHY_ID_BCM50610 0x0143bd60
+#define PHY_ID_BCM50610M 0x0143bd70
+#define PHY_ID_BCM5241 0x0143bc30
+#define PHY_ID_BCMAC131 0x0143bc70
+#define PHY_ID_BCM5481 0x0143bca0
+#define PHY_ID_BCM5482 0x0143bcb0
+#define PHY_ID_BCM5411 0x00206070
+#define PHY_ID_BCM5421 0x002060e0
+#define PHY_ID_BCM5464 0x002060b0
+#define PHY_ID_BCM5461 0x002060c0
+#define PHY_ID_BCM54616S 0x03625d10
+#define PHY_ID_BCM57780 0x03625d90
+
+#define PHY_ID_BCM7250 0xae025280
+#define PHY_ID_BCM7364 0xae025260
+#define PHY_ID_BCM7366 0x600d8490
+#define PHY_ID_BCM7425 0x600d86b0
+#define PHY_ID_BCM7429 0x600d8730
+#define PHY_ID_BCM7439 0x600d8480
+#define PHY_ID_BCM7439_2 0xae025080
+#define PHY_ID_BCM7445 0x600d8510
+
+#define PHY_BCM_OUI_MASK 0xfffffc00
+#define PHY_BCM_OUI_1 0x00206000
+#define PHY_BCM_OUI_2 0x0143bc00
+#define PHY_BCM_OUI_3 0x03625c00
+#define PHY_BCM_OUI_4 0x600d8400
+#define PHY_BCM_OUI_5 0x03625e00
+#define PHY_BCM_OUI_6 0xae025000
+
+#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
+#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002
+#define PHY_BCM_FLAGS_INTF_SGMII 0x00000010
+#define PHY_BCM_FLAGS_INTF_XAUI 0x00000020
+#define PHY_BRCM_WIRESPEED_ENABLE 0x00000100
+#define PHY_BRCM_AUTO_PWRDWN_ENABLE 0x00000200
+#define PHY_BRCM_RX_REFCLK_UNUSED 0x00000400
+#define PHY_BRCM_STD_IBND_DISABLE 0x00000800
+#define PHY_BRCM_EXT_IBND_RX_ENABLE 0x00001000
+#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000
+#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
+#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
+/* Broadcom BCM7xxx specific workarounds */
+#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff)
+#define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff)
+#define PHY_BCM_FLAGS_VALID 0x80000000
+
+/* Broadcom BCM54XX register definitions, common to most Broadcom PHYs */
+#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */
+#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */
+#define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */
+
+#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */
+#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */
+
+#define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */
+#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */
+#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */
+#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */
+
+#define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */
+#define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */
+#define MII_BCM54XX_IMR 0x1b /* BCM54xx interrupt mask register */
+#define MII_BCM54XX_INT_CRCERR 0x0001 /* CRC error */
+#define MII_BCM54XX_INT_LINK 0x0002 /* Link status changed */
+#define MII_BCM54XX_INT_SPEED 0x0004 /* Link speed change */
+#define MII_BCM54XX_INT_DUPLEX 0x0008 /* Duplex mode changed */
+#define MII_BCM54XX_INT_LRS 0x0010 /* Local receiver status changed */
+#define MII_BCM54XX_INT_RRS 0x0020 /* Remote receiver status changed */
+#define MII_BCM54XX_INT_SSERR 0x0040 /* Scrambler synchronization error */
+#define MII_BCM54XX_INT_UHCD 0x0080 /* Unsupported HCD negotiated */
+#define MII_BCM54XX_INT_NHCD 0x0100 /* No HCD */
+#define MII_BCM54XX_INT_NHCDL 0x0200 /* No HCD link */
+#define MII_BCM54XX_INT_ANPR 0x0400 /* Auto-negotiation page received */
+#define MII_BCM54XX_INT_LC 0x0800 /* All counters below 128 */
+#define MII_BCM54XX_INT_HC 0x1000 /* Counter above 32768 */
+#define MII_BCM54XX_INT_MDIX 0x2000 /* MDIX status change */
+#define MII_BCM54XX_INT_PSERR 0x4000 /* Pair swap error */
+
+#define MII_BCM54XX_SHD 0x1c /* 0x1c shadow registers */
+#define MII_BCM54XX_SHD_WRITE 0x8000
+#define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10)
+#define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0)
+
+/*
+ * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18)
+ */
+#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
+#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400
+#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
+
+#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
+#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
+#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007
+
+#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
+
+/*
+ * Broadcom LED source encodings. These are used in BCM5461, BCM5481,
+ * BCM5482, and possibly some others.
+ */
+#define BCM_LED_SRC_LINKSPD1 0x0
+#define BCM_LED_SRC_LINKSPD2 0x1
+#define BCM_LED_SRC_XMITLED 0x2
+#define BCM_LED_SRC_ACTIVITYLED 0x3
+#define BCM_LED_SRC_FDXLED 0x4
+#define BCM_LED_SRC_SLAVE 0x5
+#define BCM_LED_SRC_INTR 0x6
+#define BCM_LED_SRC_QUALITY 0x7
+#define BCM_LED_SRC_RCVLED 0x8
+#define BCM_LED_SRC_MULTICOLOR1 0xa
+#define BCM_LED_SRC_OPENSHORT 0xb
+#define BCM_LED_SRC_OFF 0xe /* Tied high */
+#define BCM_LED_SRC_ON 0xf /* Tied low */
+
+
+/*
+ * BCM5482: Shadow registers
+ * Shadow values go into bits [14:10] of register 0x1c to select a shadow
+ * register to access.
+ */
+/* 00101: Spare Control Register 3 */
+#define BCM54XX_SHD_SCR3 0x05
+#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001
+#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002
+#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004
+
+/* 01010: Auto Power-Down */
+#define BCM54XX_SHD_APD 0x0a
+#define BCM54XX_SHD_APD_EN 0x0020
+
+#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
+ /* LED3 / ~LINKSPD[2] selector */
+#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
+ /* LED1 / ~LINKSPD[1] selector */
+#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
+#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */
+#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */
+#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */
+#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */
+#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */
+#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */
+
+
+/*
+ * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17)
+ */
+#define MII_BCM54XX_EXP_AADJ1CH0 0x001f
+#define MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN 0x0200
+#define MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF 0x0100
+#define MII_BCM54XX_EXP_AADJ1CH3 0x601f
+#define MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ 0x0002
+#define MII_BCM54XX_EXP_EXP08 0x0F08
+#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001
+#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200
+#define MII_BCM54XX_EXP_EXP75 0x0f75
+#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c
+#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001
+#define MII_BCM54XX_EXP_EXP96 0x0f96
+#define MII_BCM54XX_EXP_EXP96_MYST 0x0010
+#define MII_BCM54XX_EXP_EXP97 0x0f97
+#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c
+
+/*
+ * BCM5482: Secondary SerDes registers
+ */
+#define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */
+#define BCM5482_SSD_1000BX_CTL_PWRDOWN 0x0800 /* Power-down SSD */
+#define BCM5482_SSD_SGMII_SLAVE 0x15 /* SGMII Slave Register */
+#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */
+#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */
+
+
+/*****************************************************************************/
+/* Fast Ethernet Transceiver definitions. */
+/*****************************************************************************/
+
+#define MII_BRCM_FET_INTREG 0x1a /* Interrupt register */
+#define MII_BRCM_FET_IR_MASK 0x0100 /* Mask all interrupts */
+#define MII_BRCM_FET_IR_LINK_EN 0x0200 /* Link status change enable */
+#define MII_BRCM_FET_IR_SPEED_EN 0x0400 /* Link speed change enable */
+#define MII_BRCM_FET_IR_DUPLEX_EN 0x0800 /* Duplex mode change enable */
+#define MII_BRCM_FET_IR_ENABLE 0x4000 /* Interrupt enable */
+
+#define MII_BRCM_FET_BRCMTEST 0x1f /* Brcm test register */
+#define MII_BRCM_FET_BT_SRE 0x0080 /* Shadow register enable */
+
+
+/*** Shadow register definitions ***/
+
+#define MII_BRCM_FET_SHDW_MISCCTRL 0x10 /* Shadow misc ctrl */
+#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */
+
+#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */
+#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003
+#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001
+
+#define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */
+#define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */
+
+/*
+ * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T
+ * 0x1c shadow registers.
+ */
+static inline int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow)
+{
+ phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow));
+ return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD));
+}
+
+static inline int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow,
+ u16 val)
+{
+ return phy_write(phydev, MII_BCM54XX_SHD,
+ MII_BCM54XX_SHD_WRITE |
+ MII_BCM54XX_SHD_VAL(shadow) |
+ MII_BCM54XX_SHD_DATA(val));
+}
+
+#define BRCM_CL45VEN_EEE_CONTROL 0x803d
+#define LPI_FEATURE_EN 0x8000
+#define LPI_FEATURE_EN_DIG1000X 0x4000
+
+#endif /* _LINUX_BRCMPHY_H */
diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h
new file mode 100644
index 000000000..90b1aa867
--- /dev/null
+++ b/include/linux/bsearch.h
@@ -0,0 +1,9 @@
+#ifndef _LINUX_BSEARCH_H
+#define _LINUX_BSEARCH_H
+
+#include <linux/types.h>
+
+void *bsearch(const void *key, const void *base, size_t num, size_t size,
+ int (*cmp)(const void *key, const void *elt));
+
+#endif /* _LINUX_BSEARCH_H */
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
new file mode 100644
index 000000000..a226652a5
--- /dev/null
+++ b/include/linux/bsg-lib.h
@@ -0,0 +1,71 @@
+/*
+ * BSG helper library
+ *
+ * Copyright (C) 2008 James Smart, Emulex Corporation
+ * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2011 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef _BLK_BSG_
+#define _BLK_BSG_
+
+#include <linux/blkdev.h>
+
+struct request;
+struct device;
+struct scatterlist;
+struct request_queue;
+
+struct bsg_buffer {
+ unsigned int payload_len;
+ int sg_cnt;
+ struct scatterlist *sg_list;
+};
+
+struct bsg_job {
+ struct device *dev;
+ struct request *req;
+
+ /* Transport/driver specific request/reply structs */
+ void *request;
+ void *reply;
+
+ unsigned int request_len;
+ unsigned int reply_len;
+ /*
+ * On entry : reply_len indicates the buffer size allocated for
+ * the reply.
+ *
+ * Upon completion : the message handler must set reply_len
+ * to indicates the size of the reply to be returned to the
+ * caller.
+ */
+
+ /* DMA payloads for the request/response */
+ struct bsg_buffer request_payload;
+ struct bsg_buffer reply_payload;
+
+ void *dd_data; /* Used for driver-specific storage */
+};
+
+void bsg_job_done(struct bsg_job *job, int result,
+ unsigned int reply_payload_rcv_len);
+int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name,
+ bsg_job_fn *job_fn, int dd_job_size);
+void bsg_request_fn(struct request_queue *q);
+
+#endif
diff --git a/include/linux/bsg.h b/include/linux/bsg.h
new file mode 100644
index 000000000..7173f6e9d
--- /dev/null
+++ b/include/linux/bsg.h
@@ -0,0 +1,33 @@
+#ifndef BSG_H
+#define BSG_H
+
+#include <uapi/linux/bsg.h>
+
+
+#if defined(CONFIG_BLK_DEV_BSG)
+struct bsg_class_device {
+ struct device *class_dev;
+ struct device *parent;
+ int minor;
+ struct request_queue *queue;
+ struct kref ref;
+ void (*release)(struct device *);
+};
+
+extern int bsg_register_queue(struct request_queue *q,
+ struct device *parent, const char *name,
+ void (*release)(struct device *));
+extern void bsg_unregister_queue(struct request_queue *);
+#else
+static inline int bsg_register_queue(struct request_queue *q,
+ struct device *parent, const char *name,
+ void (*release)(struct device *))
+{
+ return 0;
+}
+static inline void bsg_unregister_queue(struct request_queue *q)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/btree-128.h b/include/linux/btree-128.h
new file mode 100644
index 000000000..0b3414c4c
--- /dev/null
+++ b/include/linux/btree-128.h
@@ -0,0 +1,109 @@
+extern struct btree_geo btree_geo128;
+
+struct btree_head128 { struct btree_head h; };
+
+static inline void btree_init_mempool128(struct btree_head128 *head,
+ mempool_t *mempool)
+{
+ btree_init_mempool(&head->h, mempool);
+}
+
+static inline int btree_init128(struct btree_head128 *head)
+{
+ return btree_init(&head->h);
+}
+
+static inline void btree_destroy128(struct btree_head128 *head)
+{
+ btree_destroy(&head->h);
+}
+
+static inline void *btree_lookup128(struct btree_head128 *head, u64 k1, u64 k2)
+{
+ u64 key[2] = {k1, k2};
+ return btree_lookup(&head->h, &btree_geo128, (unsigned long *)&key);
+}
+
+static inline void *btree_get_prev128(struct btree_head128 *head,
+ u64 *k1, u64 *k2)
+{
+ u64 key[2] = {*k1, *k2};
+ void *val;
+
+ val = btree_get_prev(&head->h, &btree_geo128,
+ (unsigned long *)&key);
+ *k1 = key[0];
+ *k2 = key[1];
+ return val;
+}
+
+static inline int btree_insert128(struct btree_head128 *head, u64 k1, u64 k2,
+ void *val, gfp_t gfp)
+{
+ u64 key[2] = {k1, k2};
+ return btree_insert(&head->h, &btree_geo128,
+ (unsigned long *)&key, val, gfp);
+}
+
+static inline int btree_update128(struct btree_head128 *head, u64 k1, u64 k2,
+ void *val)
+{
+ u64 key[2] = {k1, k2};
+ return btree_update(&head->h, &btree_geo128,
+ (unsigned long *)&key, val);
+}
+
+static inline void *btree_remove128(struct btree_head128 *head, u64 k1, u64 k2)
+{
+ u64 key[2] = {k1, k2};
+ return btree_remove(&head->h, &btree_geo128, (unsigned long *)&key);
+}
+
+static inline void *btree_last128(struct btree_head128 *head, u64 *k1, u64 *k2)
+{
+ u64 key[2];
+ void *val;
+
+ val = btree_last(&head->h, &btree_geo128, (unsigned long *)&key[0]);
+ if (val) {
+ *k1 = key[0];
+ *k2 = key[1];
+ }
+
+ return val;
+}
+
+static inline int btree_merge128(struct btree_head128 *target,
+ struct btree_head128 *victim,
+ gfp_t gfp)
+{
+ return btree_merge(&target->h, &victim->h, &btree_geo128, gfp);
+}
+
+void visitor128(void *elem, unsigned long opaque, unsigned long *__key,
+ size_t index, void *__func);
+
+typedef void (*visitor128_t)(void *elem, unsigned long opaque,
+ u64 key1, u64 key2, size_t index);
+
+static inline size_t btree_visitor128(struct btree_head128 *head,
+ unsigned long opaque,
+ visitor128_t func2)
+{
+ return btree_visitor(&head->h, &btree_geo128, opaque,
+ visitor128, func2);
+}
+
+static inline size_t btree_grim_visitor128(struct btree_head128 *head,
+ unsigned long opaque,
+ visitor128_t func2)
+{
+ return btree_grim_visitor(&head->h, &btree_geo128, opaque,
+ visitor128, func2);
+}
+
+#define btree_for_each_safe128(head, k1, k2, val) \
+ for (val = btree_last128(head, &k1, &k2); \
+ val; \
+ val = btree_get_prev128(head, &k1, &k2))
+
diff --git a/include/linux/btree-type.h b/include/linux/btree-type.h
new file mode 100644
index 000000000..9a1147ef8
--- /dev/null
+++ b/include/linux/btree-type.h
@@ -0,0 +1,147 @@
+#define __BTREE_TP(pfx, type, sfx) pfx ## type ## sfx
+#define _BTREE_TP(pfx, type, sfx) __BTREE_TP(pfx, type, sfx)
+#define BTREE_TP(pfx) _BTREE_TP(pfx, BTREE_TYPE_SUFFIX,)
+#define BTREE_FN(name) BTREE_TP(btree_ ## name)
+#define BTREE_TYPE_HEAD BTREE_TP(struct btree_head)
+#define VISITOR_FN BTREE_TP(visitor)
+#define VISITOR_FN_T _BTREE_TP(visitor, BTREE_TYPE_SUFFIX, _t)
+
+BTREE_TYPE_HEAD {
+ struct btree_head h;
+};
+
+static inline void BTREE_FN(init_mempool)(BTREE_TYPE_HEAD *head,
+ mempool_t *mempool)
+{
+ btree_init_mempool(&head->h, mempool);
+}
+
+static inline int BTREE_FN(init)(BTREE_TYPE_HEAD *head)
+{
+ return btree_init(&head->h);
+}
+
+static inline void BTREE_FN(destroy)(BTREE_TYPE_HEAD *head)
+{
+ btree_destroy(&head->h);
+}
+
+static inline int BTREE_FN(merge)(BTREE_TYPE_HEAD *target,
+ BTREE_TYPE_HEAD *victim,
+ gfp_t gfp)
+{
+ return btree_merge(&target->h, &victim->h, BTREE_TYPE_GEO, gfp);
+}
+
+#if (BITS_PER_LONG > BTREE_TYPE_BITS)
+static inline void *BTREE_FN(lookup)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key)
+{
+ unsigned long _key = key;
+ return btree_lookup(&head->h, BTREE_TYPE_GEO, &_key);
+}
+
+static inline int BTREE_FN(insert)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key,
+ void *val, gfp_t gfp)
+{
+ unsigned long _key = key;
+ return btree_insert(&head->h, BTREE_TYPE_GEO, &_key, val, gfp);
+}
+
+static inline int BTREE_FN(update)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key,
+ void *val)
+{
+ unsigned long _key = key;
+ return btree_update(&head->h, BTREE_TYPE_GEO, &_key, val);
+}
+
+static inline void *BTREE_FN(remove)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key)
+{
+ unsigned long _key = key;
+ return btree_remove(&head->h, BTREE_TYPE_GEO, &_key);
+}
+
+static inline void *BTREE_FN(last)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key)
+{
+ unsigned long _key;
+ void *val = btree_last(&head->h, BTREE_TYPE_GEO, &_key);
+ if (val)
+ *key = _key;
+ return val;
+}
+
+static inline void *BTREE_FN(get_prev)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key)
+{
+ unsigned long _key = *key;
+ void *val = btree_get_prev(&head->h, BTREE_TYPE_GEO, &_key);
+ if (val)
+ *key = _key;
+ return val;
+}
+#else
+static inline void *BTREE_FN(lookup)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key)
+{
+ return btree_lookup(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key);
+}
+
+static inline int BTREE_FN(insert)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key,
+ void *val, gfp_t gfp)
+{
+ return btree_insert(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key,
+ val, gfp);
+}
+
+static inline int BTREE_FN(update)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key,
+ void *val)
+{
+ return btree_update(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key, val);
+}
+
+static inline void *BTREE_FN(remove)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key)
+{
+ return btree_remove(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key);
+}
+
+static inline void *BTREE_FN(last)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key)
+{
+ return btree_last(&head->h, BTREE_TYPE_GEO, (unsigned long *)key);
+}
+
+static inline void *BTREE_FN(get_prev)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key)
+{
+ return btree_get_prev(&head->h, BTREE_TYPE_GEO, (unsigned long *)key);
+}
+#endif
+
+void VISITOR_FN(void *elem, unsigned long opaque, unsigned long *key,
+ size_t index, void *__func);
+
+typedef void (*VISITOR_FN_T)(void *elem, unsigned long opaque,
+ BTREE_KEYTYPE key, size_t index);
+
+static inline size_t BTREE_FN(visitor)(BTREE_TYPE_HEAD *head,
+ unsigned long opaque,
+ VISITOR_FN_T func2)
+{
+ return btree_visitor(&head->h, BTREE_TYPE_GEO, opaque,
+ visitorl, func2);
+}
+
+static inline size_t BTREE_FN(grim_visitor)(BTREE_TYPE_HEAD *head,
+ unsigned long opaque,
+ VISITOR_FN_T func2)
+{
+ return btree_grim_visitor(&head->h, BTREE_TYPE_GEO, opaque,
+ visitorl, func2);
+}
+
+#undef VISITOR_FN
+#undef VISITOR_FN_T
+#undef __BTREE_TP
+#undef _BTREE_TP
+#undef BTREE_TP
+#undef BTREE_FN
+#undef BTREE_TYPE_HEAD
+#undef BTREE_TYPE_SUFFIX
+#undef BTREE_TYPE_GEO
+#undef BTREE_KEYTYPE
+#undef BTREE_TYPE_BITS
diff --git a/include/linux/btree.h b/include/linux/btree.h
new file mode 100644
index 000000000..65b5bb058
--- /dev/null
+++ b/include/linux/btree.h
@@ -0,0 +1,243 @@
+#ifndef BTREE_H
+#define BTREE_H
+
+#include <linux/kernel.h>
+#include <linux/mempool.h>
+
+/**
+ * DOC: B+Tree basics
+ *
+ * A B+Tree is a data structure for looking up arbitrary (currently allowing
+ * unsigned long, u32, u64 and 2 * u64) keys into pointers. The data structure
+ * is described at http://en.wikipedia.org/wiki/B-tree, we currently do not
+ * use binary search to find the key on lookups.
+ *
+ * Each B+Tree consists of a head, that contains bookkeeping information and
+ * a variable number (starting with zero) nodes. Each node contains the keys
+ * and pointers to sub-nodes, or, for leaf nodes, the keys and values for the
+ * tree entries.
+ *
+ * Each node in this implementation has the following layout:
+ * [key1, key2, ..., keyN] [val1, val2, ..., valN]
+ *
+ * Each key here is an array of unsigned longs, geo->no_longs in total. The
+ * number of keys and values (N) is geo->no_pairs.
+ */
+
+/**
+ * struct btree_head - btree head
+ *
+ * @node: the first node in the tree
+ * @mempool: mempool used for node allocations
+ * @height: current of the tree
+ */
+struct btree_head {
+ unsigned long *node;
+ mempool_t *mempool;
+ int height;
+};
+
+/* btree geometry */
+struct btree_geo;
+
+/**
+ * btree_alloc - allocate function for the mempool
+ * @gfp_mask: gfp mask for the allocation
+ * @pool_data: unused
+ */
+void *btree_alloc(gfp_t gfp_mask, void *pool_data);
+
+/**
+ * btree_free - free function for the mempool
+ * @element: the element to free
+ * @pool_data: unused
+ */
+void btree_free(void *element, void *pool_data);
+
+/**
+ * btree_init_mempool - initialise a btree with given mempool
+ *
+ * @head: the btree head to initialise
+ * @mempool: the mempool to use
+ *
+ * When this function is used, there is no need to destroy
+ * the mempool.
+ */
+void btree_init_mempool(struct btree_head *head, mempool_t *mempool);
+
+/**
+ * btree_init - initialise a btree
+ *
+ * @head: the btree head to initialise
+ *
+ * This function allocates the memory pool that the
+ * btree needs. Returns zero or a negative error code
+ * (-%ENOMEM) when memory allocation fails.
+ *
+ */
+int __must_check btree_init(struct btree_head *head);
+
+/**
+ * btree_destroy - destroy mempool
+ *
+ * @head: the btree head to destroy
+ *
+ * This function destroys the internal memory pool, use only
+ * when using btree_init(), not with btree_init_mempool().
+ */
+void btree_destroy(struct btree_head *head);
+
+/**
+ * btree_lookup - look up a key in the btree
+ *
+ * @head: the btree to look in
+ * @geo: the btree geometry
+ * @key: the key to look up
+ *
+ * This function returns the value for the given key, or %NULL.
+ */
+void *btree_lookup(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key);
+
+/**
+ * btree_insert - insert an entry into the btree
+ *
+ * @head: the btree to add to
+ * @geo: the btree geometry
+ * @key: the key to add (must not already be present)
+ * @val: the value to add (must not be %NULL)
+ * @gfp: allocation flags for node allocations
+ *
+ * This function returns 0 if the item could be added, or an
+ * error code if it failed (may fail due to memory pressure).
+ */
+int __must_check btree_insert(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, void *val, gfp_t gfp);
+/**
+ * btree_update - update an entry in the btree
+ *
+ * @head: the btree to update
+ * @geo: the btree geometry
+ * @key: the key to update
+ * @val: the value to change it to (must not be %NULL)
+ *
+ * This function returns 0 if the update was successful, or
+ * -%ENOENT if the key could not be found.
+ */
+int btree_update(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, void *val);
+/**
+ * btree_remove - remove an entry from the btree
+ *
+ * @head: the btree to update
+ * @geo: the btree geometry
+ * @key: the key to remove
+ *
+ * This function returns the removed entry, or %NULL if the key
+ * could not be found.
+ */
+void *btree_remove(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key);
+
+/**
+ * btree_merge - merge two btrees
+ *
+ * @target: the tree that gets all the entries
+ * @victim: the tree that gets merged into @target
+ * @geo: the btree geometry
+ * @gfp: allocation flags
+ *
+ * The two trees @target and @victim may not contain the same keys,
+ * that is a bug and triggers a BUG(). This function returns zero
+ * if the trees were merged successfully, and may return a failure
+ * when memory allocation fails, in which case both trees might have
+ * been partially merged, i.e. some entries have been moved from
+ * @victim to @target.
+ */
+int btree_merge(struct btree_head *target, struct btree_head *victim,
+ struct btree_geo *geo, gfp_t gfp);
+
+/**
+ * btree_last - get last entry in btree
+ *
+ * @head: btree head
+ * @geo: btree geometry
+ * @key: last key
+ *
+ * Returns the last entry in the btree, and sets @key to the key
+ * of that entry; returns NULL if the tree is empty, in that case
+ * key is not changed.
+ */
+void *btree_last(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key);
+
+/**
+ * btree_get_prev - get previous entry
+ *
+ * @head: btree head
+ * @geo: btree geometry
+ * @key: pointer to key
+ *
+ * The function returns the next item right before the value pointed to by
+ * @key, and updates @key with its key, or returns %NULL when there is no
+ * entry with a key smaller than the given key.
+ */
+void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key);
+
+
+/* internal use, use btree_visitor{l,32,64,128} */
+size_t btree_visitor(struct btree_head *head, struct btree_geo *geo,
+ unsigned long opaque,
+ void (*func)(void *elem, unsigned long opaque,
+ unsigned long *key, size_t index,
+ void *func2),
+ void *func2);
+
+/* internal use, use btree_grim_visitor{l,32,64,128} */
+size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo,
+ unsigned long opaque,
+ void (*func)(void *elem, unsigned long opaque,
+ unsigned long *key,
+ size_t index, void *func2),
+ void *func2);
+
+
+#include <linux/btree-128.h>
+
+extern struct btree_geo btree_geo32;
+#define BTREE_TYPE_SUFFIX l
+#define BTREE_TYPE_BITS BITS_PER_LONG
+#define BTREE_TYPE_GEO &btree_geo32
+#define BTREE_KEYTYPE unsigned long
+#include <linux/btree-type.h>
+
+#define btree_for_each_safel(head, key, val) \
+ for (val = btree_lastl(head, &key); \
+ val; \
+ val = btree_get_prevl(head, &key))
+
+#define BTREE_TYPE_SUFFIX 32
+#define BTREE_TYPE_BITS 32
+#define BTREE_TYPE_GEO &btree_geo32
+#define BTREE_KEYTYPE u32
+#include <linux/btree-type.h>
+
+#define btree_for_each_safe32(head, key, val) \
+ for (val = btree_last32(head, &key); \
+ val; \
+ val = btree_get_prev32(head, &key))
+
+extern struct btree_geo btree_geo64;
+#define BTREE_TYPE_SUFFIX 64
+#define BTREE_TYPE_BITS 64
+#define BTREE_TYPE_GEO &btree_geo64
+#define BTREE_KEYTYPE u64
+#include <linux/btree-type.h>
+
+#define btree_for_each_safe64(head, key, val) \
+ for (val = btree_last64(head, &key); \
+ val; \
+ val = btree_get_prev64(head, &key))
+
+#endif
diff --git a/include/linux/btrfs.h b/include/linux/btrfs.h
new file mode 100644
index 000000000..22d799147
--- /dev/null
+++ b/include/linux/btrfs.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_BTRFS_H
+#define _LINUX_BTRFS_H
+
+#include <uapi/linux/btrfs.h>
+
+#endif /* _LINUX_BTRFS_H */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
new file mode 100644
index 000000000..e6797ded7
--- /dev/null
+++ b/include/linux/buffer_head.h
@@ -0,0 +1,403 @@
+/*
+ * include/linux/buffer_head.h
+ *
+ * Everything to do with buffer_heads.
+ */
+
+#ifndef _LINUX_BUFFER_HEAD_H
+#define _LINUX_BUFFER_HEAD_H
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/linkage.h>
+#include <linux/pagemap.h>
+#include <linux/wait.h>
+#include <linux/atomic.h>
+
+#ifdef CONFIG_BLOCK
+
+enum bh_state_bits {
+ BH_Uptodate, /* Contains valid data */
+ BH_Dirty, /* Is dirty */
+ BH_Lock, /* Is locked */
+ BH_Req, /* Has been submitted for I/O */
+ BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise
+ * IO completion of other buffers in the page
+ */
+
+ BH_Mapped, /* Has a disk mapping */
+ BH_New, /* Disk mapping was newly created by get_block */
+ BH_Async_Read, /* Is under end_buffer_async_read I/O */
+ BH_Async_Write, /* Is under end_buffer_async_write I/O */
+ BH_Delay, /* Buffer is not yet allocated on disk */
+ BH_Boundary, /* Block is followed by a discontiguity */
+ BH_Write_EIO, /* I/O error on write */
+ BH_Unwritten, /* Buffer is allocated on disk but not written */
+ BH_Quiet, /* Buffer Error Prinks to be quiet */
+ BH_Meta, /* Buffer contains metadata */
+ BH_Prio, /* Buffer should be submitted with REQ_PRIO */
+ BH_Defer_Completion, /* Defer AIO completion to workqueue */
+
+ BH_PrivateStart,/* not a state bit, but the first bit available
+ * for private allocation by other entities
+ */
+};
+
+#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
+
+struct page;
+struct buffer_head;
+struct address_space;
+typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
+
+/*
+ * Historically, a buffer_head was used to map a single block
+ * within a page, and of course as the unit of I/O through the
+ * filesystem and block layers. Nowadays the basic I/O unit
+ * is the bio, and buffer_heads are used for extracting block
+ * mappings (via a get_block_t call), for tracking state within
+ * a page (via a page_mapping) and for wrapping bio submission
+ * for backward compatibility reasons (e.g. submit_bh).
+ */
+struct buffer_head {
+ unsigned long b_state; /* buffer state bitmap (see above) */
+ struct buffer_head *b_this_page;/* circular list of page's buffers */
+ struct page *b_page; /* the page this bh is mapped to */
+
+ sector_t b_blocknr; /* start block number */
+ size_t b_size; /* size of mapping */
+ char *b_data; /* pointer to data within the page */
+
+ struct block_device *b_bdev;
+ bh_end_io_t *b_end_io; /* I/O completion */
+ void *b_private; /* reserved for b_end_io */
+ struct list_head b_assoc_buffers; /* associated with another mapping */
+ struct address_space *b_assoc_map; /* mapping this buffer is
+ associated with */
+ atomic_t b_count; /* users using this buffer_head */
+};
+
+/*
+ * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
+ * and buffer_foo() functions.
+ */
+#define BUFFER_FNS(bit, name) \
+static inline void set_buffer_##name(struct buffer_head *bh) \
+{ \
+ set_bit(BH_##bit, &(bh)->b_state); \
+} \
+static inline void clear_buffer_##name(struct buffer_head *bh) \
+{ \
+ clear_bit(BH_##bit, &(bh)->b_state); \
+} \
+static inline int buffer_##name(const struct buffer_head *bh) \
+{ \
+ return test_bit(BH_##bit, &(bh)->b_state); \
+}
+
+/*
+ * test_set_buffer_foo() and test_clear_buffer_foo()
+ */
+#define TAS_BUFFER_FNS(bit, name) \
+static inline int test_set_buffer_##name(struct buffer_head *bh) \
+{ \
+ return test_and_set_bit(BH_##bit, &(bh)->b_state); \
+} \
+static inline int test_clear_buffer_##name(struct buffer_head *bh) \
+{ \
+ return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
+} \
+
+/*
+ * Emit the buffer bitops functions. Note that there are also functions
+ * of the form "mark_buffer_foo()". These are higher-level functions which
+ * do something in addition to setting a b_state bit.
+ */
+BUFFER_FNS(Uptodate, uptodate)
+BUFFER_FNS(Dirty, dirty)
+TAS_BUFFER_FNS(Dirty, dirty)
+BUFFER_FNS(Lock, locked)
+BUFFER_FNS(Req, req)
+TAS_BUFFER_FNS(Req, req)
+BUFFER_FNS(Mapped, mapped)
+BUFFER_FNS(New, new)
+BUFFER_FNS(Async_Read, async_read)
+BUFFER_FNS(Async_Write, async_write)
+BUFFER_FNS(Delay, delay)
+BUFFER_FNS(Boundary, boundary)
+BUFFER_FNS(Write_EIO, write_io_error)
+BUFFER_FNS(Unwritten, unwritten)
+BUFFER_FNS(Meta, meta)
+BUFFER_FNS(Prio, prio)
+BUFFER_FNS(Defer_Completion, defer_completion)
+
+#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
+
+/* If we *know* page->private refers to buffer_heads */
+#define page_buffers(page) \
+ ({ \
+ BUG_ON(!PagePrivate(page)); \
+ ((struct buffer_head *)page_private(page)); \
+ })
+#define page_has_buffers(page) PagePrivate(page)
+
+void buffer_check_dirty_writeback(struct page *page,
+ bool *dirty, bool *writeback);
+
+/*
+ * Declarations
+ */
+
+void mark_buffer_dirty(struct buffer_head *bh);
+void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
+void touch_buffer(struct buffer_head *bh);
+void set_bh_page(struct buffer_head *bh,
+ struct page *page, unsigned long offset);
+int try_to_free_buffers(struct page *);
+struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
+ int retry);
+void create_empty_buffers(struct page *, unsigned long,
+ unsigned long b_state);
+void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
+void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
+void end_buffer_async_write(struct buffer_head *bh, int uptodate);
+
+/* Things to do with buffers at mapping->private_list */
+void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
+int inode_has_buffers(struct inode *);
+void invalidate_inode_buffers(struct inode *);
+int remove_inode_buffers(struct inode *inode);
+int sync_mapping_buffers(struct address_space *mapping);
+void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
+
+void mark_buffer_async_write(struct buffer_head *bh);
+void __wait_on_buffer(struct buffer_head *);
+wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
+struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
+ unsigned size);
+struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
+ unsigned size, gfp_t gfp);
+void __brelse(struct buffer_head *);
+void __bforget(struct buffer_head *);
+void __breadahead(struct block_device *, sector_t block, unsigned int size);
+struct buffer_head *__bread_gfp(struct block_device *,
+ sector_t block, unsigned size, gfp_t gfp);
+void invalidate_bh_lrus(void);
+struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
+void free_buffer_head(struct buffer_head * bh);
+void unlock_buffer(struct buffer_head *bh);
+void __lock_buffer(struct buffer_head *bh);
+void ll_rw_block(int, int, struct buffer_head * bh[]);
+int sync_dirty_buffer(struct buffer_head *bh);
+int __sync_dirty_buffer(struct buffer_head *bh, int rw);
+void write_dirty_buffer(struct buffer_head *bh, int rw);
+int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags);
+int submit_bh(int, struct buffer_head *);
+void write_boundary_block(struct block_device *bdev,
+ sector_t bblock, unsigned blocksize);
+int bh_uptodate_or_lock(struct buffer_head *bh);
+int bh_submit_read(struct buffer_head *bh);
+
+extern int buffer_heads_over_limit;
+
+/*
+ * Generic address_space_operations implementations for buffer_head-backed
+ * address_spaces.
+ */
+void block_invalidatepage(struct page *page, unsigned int offset,
+ unsigned int length);
+int block_write_full_page(struct page *page, get_block_t *get_block,
+ struct writeback_control *wbc);
+int block_read_full_page(struct page*, get_block_t*);
+int block_is_partially_uptodate(struct page *page, unsigned long from,
+ unsigned long count);
+int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
+ unsigned flags, struct page **pagep, get_block_t *get_block);
+int __block_write_begin(struct page *page, loff_t pos, unsigned len,
+ get_block_t *get_block);
+int block_write_end(struct file *, struct address_space *,
+ loff_t, unsigned, unsigned,
+ struct page *, void *);
+int generic_write_end(struct file *, struct address_space *,
+ loff_t, unsigned, unsigned,
+ struct page *, void *);
+void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
+int cont_write_begin(struct file *, struct address_space *, loff_t,
+ unsigned, unsigned, struct page **, void **,
+ get_block_t *, loff_t *);
+int generic_cont_expand_simple(struct inode *inode, loff_t size);
+int block_commit_write(struct page *page, unsigned from, unsigned to);
+int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
+ get_block_t get_block);
+int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
+ get_block_t get_block);
+/* Convert errno to return value from ->page_mkwrite() call */
+static inline int block_page_mkwrite_return(int err)
+{
+ if (err == 0)
+ return VM_FAULT_LOCKED;
+ if (err == -EFAULT)
+ return VM_FAULT_NOPAGE;
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ if (err == -EAGAIN)
+ return VM_FAULT_RETRY;
+ /* -ENOSPC, -EDQUOT, -EIO ... */
+ return VM_FAULT_SIGBUS;
+}
+sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
+int block_truncate_page(struct address_space *, loff_t, get_block_t *);
+int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
+ struct page **, void **, get_block_t*);
+int nobh_write_end(struct file *, struct address_space *,
+ loff_t, unsigned, unsigned,
+ struct page *, void *);
+int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
+int nobh_writepage(struct page *page, get_block_t *get_block,
+ struct writeback_control *wbc);
+
+void buffer_init(void);
+
+/*
+ * inline definitions
+ */
+
+static inline void attach_page_buffers(struct page *page,
+ struct buffer_head *head)
+{
+ page_cache_get(page);
+ SetPagePrivate(page);
+ set_page_private(page, (unsigned long)head);
+}
+
+static inline void get_bh(struct buffer_head *bh)
+{
+ atomic_inc(&bh->b_count);
+}
+
+static inline void put_bh(struct buffer_head *bh)
+{
+ smp_mb__before_atomic();
+ atomic_dec(&bh->b_count);
+}
+
+static inline void brelse(struct buffer_head *bh)
+{
+ if (bh)
+ __brelse(bh);
+}
+
+static inline void bforget(struct buffer_head *bh)
+{
+ if (bh)
+ __bforget(bh);
+}
+
+static inline struct buffer_head *
+sb_bread(struct super_block *sb, sector_t block)
+{
+ return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
+}
+
+static inline struct buffer_head *
+sb_bread_unmovable(struct super_block *sb, sector_t block)
+{
+ return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
+}
+
+static inline void
+sb_breadahead(struct super_block *sb, sector_t block)
+{
+ __breadahead(sb->s_bdev, block, sb->s_blocksize);
+}
+
+static inline struct buffer_head *
+sb_getblk(struct super_block *sb, sector_t block)
+{
+ return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
+}
+
+
+static inline struct buffer_head *
+sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
+{
+ return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
+}
+
+static inline struct buffer_head *
+sb_find_get_block(struct super_block *sb, sector_t block)
+{
+ return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
+}
+
+static inline void
+map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
+{
+ set_buffer_mapped(bh);
+ bh->b_bdev = sb->s_bdev;
+ bh->b_blocknr = block;
+ bh->b_size = sb->s_blocksize;
+}
+
+static inline void wait_on_buffer(struct buffer_head *bh)
+{
+ might_sleep();
+ if (buffer_locked(bh))
+ __wait_on_buffer(bh);
+}
+
+static inline int trylock_buffer(struct buffer_head *bh)
+{
+ return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
+}
+
+static inline void lock_buffer(struct buffer_head *bh)
+{
+ might_sleep();
+ if (!trylock_buffer(bh))
+ __lock_buffer(bh);
+}
+
+static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
+ sector_t block,
+ unsigned size)
+{
+ return __getblk_gfp(bdev, block, size, 0);
+}
+
+static inline struct buffer_head *__getblk(struct block_device *bdev,
+ sector_t block,
+ unsigned size)
+{
+ return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
+}
+
+/**
+ * __bread() - reads a specified block and returns the bh
+ * @bdev: the block_device to read from
+ * @block: number of block
+ * @size: size (in bytes) to read
+ *
+ * Reads a specified block, and returns buffer head that contains it.
+ * The page cache is allocated from movable area so that it can be migrated.
+ * It returns NULL if the block was unreadable.
+ */
+static inline struct buffer_head *
+__bread(struct block_device *bdev, sector_t block, unsigned size)
+{
+ return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
+}
+
+extern int __set_page_dirty_buffers(struct page *page);
+
+#else /* CONFIG_BLOCK */
+
+static inline void buffer_init(void) {}
+static inline int try_to_free_buffers(struct page *page) { return 1; }
+static inline int inode_has_buffers(struct inode *inode) { return 0; }
+static inline void invalidate_inode_buffers(struct inode *inode) {}
+static inline int remove_inode_buffers(struct inode *inode) { return 1; }
+static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
+
+#endif /* CONFIG_BLOCK */
+#endif /* _LINUX_BUFFER_HEAD_H */
diff --git a/include/linux/bug.h b/include/linux/bug.h
new file mode 100644
index 000000000..7f4818673
--- /dev/null
+++ b/include/linux/bug.h
@@ -0,0 +1,112 @@
+#ifndef _LINUX_BUG_H
+#define _LINUX_BUG_H
+
+#include <asm/bug.h>
+#include <linux/compiler.h>
+
+enum bug_trap_type {
+ BUG_TRAP_TYPE_NONE = 0,
+ BUG_TRAP_TYPE_WARN = 1,
+ BUG_TRAP_TYPE_BUG = 2,
+};
+
+struct pt_regs;
+
+#ifdef __CHECKER__
+#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
+#define BUILD_BUG_ON_ZERO(e) (0)
+#define BUILD_BUG_ON_NULL(e) ((void*)0)
+#define BUILD_BUG_ON_INVALID(e) (0)
+#define BUILD_BUG_ON_MSG(cond, msg) (0)
+#define BUILD_BUG_ON(condition) (0)
+#define BUILD_BUG() (0)
+#else /* __CHECKER__ */
+
+/* Force a compilation error if a constant expression is not a power of 2 */
+#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
+ BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
+
+/* Force a compilation error if condition is true, but also produce a
+ result (of value 0 and type size_t), so the expression can be used
+ e.g. in a structure initializer (or where-ever else comma expressions
+ aren't permitted). */
+#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
+#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
+
+/*
+ * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the
+ * expression but avoids the generation of any code, even if that expression
+ * has side-effects.
+ */
+#define BUILD_BUG_ON_INVALID(e) ((void)(sizeof((__force long)(e))))
+
+/**
+ * BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied
+ * error message.
+ * @condition: the condition which the compiler should know is false.
+ *
+ * See BUILD_BUG_ON for description.
+ */
+#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg)
+
+/**
+ * BUILD_BUG_ON - break compile if a condition is true.
+ * @condition: the condition which the compiler should know is false.
+ *
+ * If you have some code which relies on certain constants being equal, or
+ * some other compile-time-evaluated condition, you should use BUILD_BUG_ON to
+ * detect if someone changes it.
+ *
+ * The implementation uses gcc's reluctance to create a negative array, but gcc
+ * (as of 4.4) only emits that error for obvious cases (e.g. not arguments to
+ * inline functions). Luckily, in 4.3 they added the "error" function
+ * attribute just for this type of case. Thus, we use a negative sized array
+ * (should always create an error on gcc versions older than 4.4) and then call
+ * an undefined function with the error attribute (should always create an
+ * error on gcc 4.3 and later). If for some reason, neither creates a
+ * compile-time error, we'll still have a link-time error, which is harder to
+ * track down.
+ */
+#ifndef __OPTIMIZE__
+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+#else
+#define BUILD_BUG_ON(condition) \
+ BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition)
+#endif
+
+/**
+ * BUILD_BUG - break compile if used.
+ *
+ * If you have some code that you expect the compiler to eliminate at
+ * build time, you should use BUILD_BUG to detect if it is
+ * unexpectedly used.
+ */
+#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed")
+
+#endif /* __CHECKER__ */
+
+#ifdef CONFIG_GENERIC_BUG
+#include <asm-generic/bug.h>
+
+static inline int is_warning_bug(const struct bug_entry *bug)
+{
+ return bug->flags & BUGFLAG_WARNING;
+}
+
+const struct bug_entry *find_bug(unsigned long bugaddr);
+
+enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
+
+/* These are defined by the architecture */
+int is_valid_bugaddr(unsigned long addr);
+
+#else /* !CONFIG_GENERIC_BUG */
+
+static inline enum bug_trap_type report_bug(unsigned long bug_addr,
+ struct pt_regs *regs)
+{
+ return BUG_TRAP_TYPE_BUG;
+}
+
+#endif /* CONFIG_GENERIC_BUG */
+#endif /* _LINUX_BUG_H */
diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h
new file mode 100644
index 000000000..392041475
--- /dev/null
+++ b/include/linux/byteorder/big_endian.h
@@ -0,0 +1,7 @@
+#ifndef _LINUX_BYTEORDER_BIG_ENDIAN_H
+#define _LINUX_BYTEORDER_BIG_ENDIAN_H
+
+#include <uapi/linux/byteorder/big_endian.h>
+
+#include <linux/byteorder/generic.h>
+#endif /* _LINUX_BYTEORDER_BIG_ENDIAN_H */
diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h
new file mode 100644
index 000000000..89f67c1c3
--- /dev/null
+++ b/include/linux/byteorder/generic.h
@@ -0,0 +1,173 @@
+#ifndef _LINUX_BYTEORDER_GENERIC_H
+#define _LINUX_BYTEORDER_GENERIC_H
+
+/*
+ * linux/byteorder/generic.h
+ * Generic Byte-reordering support
+ *
+ * The "... p" macros, like le64_to_cpup, can be used with pointers
+ * to unaligned data, but there will be a performance penalty on
+ * some architectures. Use get_unaligned for unaligned data.
+ *
+ * Francois-Rene Rideau <fare@tunes.org> 19970707
+ * gathered all the good ideas from all asm-foo/byteorder.h into one file,
+ * cleaned them up.
+ * I hope it is compliant with non-GCC compilers.
+ * I decided to put __BYTEORDER_HAS_U64__ in byteorder.h,
+ * because I wasn't sure it would be ok to put it in types.h
+ * Upgraded it to 2.1.43
+ * Francois-Rene Rideau <fare@tunes.org> 19971012
+ * Upgraded it to 2.1.57
+ * to please Linus T., replaced huge #ifdef's between little/big endian
+ * by nestedly #include'd files.
+ * Francois-Rene Rideau <fare@tunes.org> 19971205
+ * Made it to 2.1.71; now a facelift:
+ * Put files under include/linux/byteorder/
+ * Split swab from generic support.
+ *
+ * TODO:
+ * = Regular kernel maintainers could also replace all these manual
+ * byteswap macros that remain, disseminated among drivers,
+ * after some grep or the sources...
+ * = Linus might want to rename all these macros and files to fit his taste,
+ * to fit his personal naming scheme.
+ * = it seems that a few drivers would also appreciate
+ * nybble swapping support...
+ * = every architecture could add their byteswap macro in asm/byteorder.h
+ * see how some architectures already do (i386, alpha, ppc, etc)
+ * = cpu_to_beXX and beXX_to_cpu might some day need to be well
+ * distinguished throughout the kernel. This is not the case currently,
+ * since little endian, big endian, and pdp endian machines needn't it.
+ * But this might be the case for, say, a port of Linux to 20/21 bit
+ * architectures (and F21 Linux addict around?).
+ */
+
+/*
+ * The following macros are to be defined by <asm/byteorder.h>:
+ *
+ * Conversion of long and short int between network and host format
+ * ntohl(__u32 x)
+ * ntohs(__u16 x)
+ * htonl(__u32 x)
+ * htons(__u16 x)
+ * It seems that some programs (which? where? or perhaps a standard? POSIX?)
+ * might like the above to be functions, not macros (why?).
+ * if that's true, then detect them, and take measures.
+ * Anyway, the measure is: define only ___ntohl as a macro instead,
+ * and in a separate file, have
+ * unsigned long inline ntohl(x){return ___ntohl(x);}
+ *
+ * The same for constant arguments
+ * __constant_ntohl(__u32 x)
+ * __constant_ntohs(__u16 x)
+ * __constant_htonl(__u32 x)
+ * __constant_htons(__u16 x)
+ *
+ * Conversion of XX-bit integers (16- 32- or 64-)
+ * between native CPU format and little/big endian format
+ * 64-bit stuff only defined for proper architectures
+ * cpu_to_[bl]eXX(__uXX x)
+ * [bl]eXX_to_cpu(__uXX x)
+ *
+ * The same, but takes a pointer to the value to convert
+ * cpu_to_[bl]eXXp(__uXX x)
+ * [bl]eXX_to_cpup(__uXX x)
+ *
+ * The same, but change in situ
+ * cpu_to_[bl]eXXs(__uXX x)
+ * [bl]eXX_to_cpus(__uXX x)
+ *
+ * See asm-foo/byteorder.h for examples of how to provide
+ * architecture-optimized versions
+ *
+ */
+
+#define cpu_to_le64 __cpu_to_le64
+#define le64_to_cpu __le64_to_cpu
+#define cpu_to_le32 __cpu_to_le32
+#define le32_to_cpu __le32_to_cpu
+#define cpu_to_le16 __cpu_to_le16
+#define le16_to_cpu __le16_to_cpu
+#define cpu_to_be64 __cpu_to_be64
+#define be64_to_cpu __be64_to_cpu
+#define cpu_to_be32 __cpu_to_be32
+#define be32_to_cpu __be32_to_cpu
+#define cpu_to_be16 __cpu_to_be16
+#define be16_to_cpu __be16_to_cpu
+#define cpu_to_le64p __cpu_to_le64p
+#define le64_to_cpup __le64_to_cpup
+#define cpu_to_le32p __cpu_to_le32p
+#define le32_to_cpup __le32_to_cpup
+#define cpu_to_le16p __cpu_to_le16p
+#define le16_to_cpup __le16_to_cpup
+#define cpu_to_be64p __cpu_to_be64p
+#define be64_to_cpup __be64_to_cpup
+#define cpu_to_be32p __cpu_to_be32p
+#define be32_to_cpup __be32_to_cpup
+#define cpu_to_be16p __cpu_to_be16p
+#define be16_to_cpup __be16_to_cpup
+#define cpu_to_le64s __cpu_to_le64s
+#define le64_to_cpus __le64_to_cpus
+#define cpu_to_le32s __cpu_to_le32s
+#define le32_to_cpus __le32_to_cpus
+#define cpu_to_le16s __cpu_to_le16s
+#define le16_to_cpus __le16_to_cpus
+#define cpu_to_be64s __cpu_to_be64s
+#define be64_to_cpus __be64_to_cpus
+#define cpu_to_be32s __cpu_to_be32s
+#define be32_to_cpus __be32_to_cpus
+#define cpu_to_be16s __cpu_to_be16s
+#define be16_to_cpus __be16_to_cpus
+
+/*
+ * They have to be macros in order to do the constant folding
+ * correctly - if the argument passed into a inline function
+ * it is no longer constant according to gcc..
+ */
+
+#undef ntohl
+#undef ntohs
+#undef htonl
+#undef htons
+
+#define ___htonl(x) __cpu_to_be32(x)
+#define ___htons(x) __cpu_to_be16(x)
+#define ___ntohl(x) __be32_to_cpu(x)
+#define ___ntohs(x) __be16_to_cpu(x)
+
+#define htonl(x) ___htonl(x)
+#define ntohl(x) ___ntohl(x)
+#define htons(x) ___htons(x)
+#define ntohs(x) ___ntohs(x)
+
+static inline void le16_add_cpu(__le16 *var, u16 val)
+{
+ *var = cpu_to_le16(le16_to_cpu(*var) + val);
+}
+
+static inline void le32_add_cpu(__le32 *var, u32 val)
+{
+ *var = cpu_to_le32(le32_to_cpu(*var) + val);
+}
+
+static inline void le64_add_cpu(__le64 *var, u64 val)
+{
+ *var = cpu_to_le64(le64_to_cpu(*var) + val);
+}
+
+static inline void be16_add_cpu(__be16 *var, u16 val)
+{
+ *var = cpu_to_be16(be16_to_cpu(*var) + val);
+}
+
+static inline void be32_add_cpu(__be32 *var, u32 val)
+{
+ *var = cpu_to_be32(be32_to_cpu(*var) + val);
+}
+
+static inline void be64_add_cpu(__be64 *var, u64 val)
+{
+ *var = cpu_to_be64(be64_to_cpu(*var) + val);
+}
+
+#endif /* _LINUX_BYTEORDER_GENERIC_H */
diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
new file mode 100644
index 000000000..08057377a
--- /dev/null
+++ b/include/linux/byteorder/little_endian.h
@@ -0,0 +1,7 @@
+#ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H
+#define _LINUX_BYTEORDER_LITTLE_ENDIAN_H
+
+#include <uapi/linux/byteorder/little_endian.h>
+
+#include <linux/byteorder/generic.h>
+#endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */
diff --git a/include/linux/c2port.h b/include/linux/c2port.h
new file mode 100644
index 000000000..4efabcb51
--- /dev/null
+++ b/include/linux/c2port.h
@@ -0,0 +1,66 @@
+/*
+ * Silicon Labs C2 port Linux support
+ *
+ * Copyright (c) 2007 Rodolfo Giometti <giometti@linux.it>
+ * Copyright (c) 2007 Eurotech S.p.A. <info@eurotech.it>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation
+ */
+
+#include <linux/kmemcheck.h>
+
+#define C2PORT_NAME_LEN 32
+
+struct device;
+
+/*
+ * C2 port basic structs
+ */
+
+/* Main struct */
+struct c2port_ops;
+struct c2port_device {
+ kmemcheck_bitfield_begin(flags);
+ unsigned int access:1;
+ unsigned int flash_access:1;
+ kmemcheck_bitfield_end(flags);
+
+ int id;
+ char name[C2PORT_NAME_LEN];
+ struct c2port_ops *ops;
+ struct mutex mutex; /* prevent races during read/write */
+
+ struct device *dev;
+
+ void *private_data;
+};
+
+/* Basic operations */
+struct c2port_ops {
+ /* Flash layout */
+ unsigned short block_size; /* flash block size in bytes */
+ unsigned short blocks_num; /* flash blocks number */
+
+ /* Enable or disable the access to C2 port */
+ void (*access)(struct c2port_device *dev, int status);
+
+ /* Set C2D data line as input/output */
+ void (*c2d_dir)(struct c2port_device *dev, int dir);
+
+ /* Read/write C2D data line */
+ int (*c2d_get)(struct c2port_device *dev);
+ void (*c2d_set)(struct c2port_device *dev, int status);
+
+ /* Write C2CK clock line */
+ void (*c2ck_set)(struct c2port_device *dev, int status);
+};
+
+/*
+ * Exported functions
+ */
+
+extern struct c2port_device *c2port_device_register(char *name,
+ struct c2port_ops *ops, void *devdata);
+extern void c2port_device_unregister(struct c2port_device *dev);
diff --git a/include/linux/cache.h b/include/linux/cache.h
new file mode 100644
index 000000000..17e7e82d2
--- /dev/null
+++ b/include/linux/cache.h
@@ -0,0 +1,67 @@
+#ifndef __LINUX_CACHE_H
+#define __LINUX_CACHE_H
+
+#include <uapi/linux/kernel.h>
+#include <asm/cache.h>
+
+#ifndef L1_CACHE_ALIGN
+#define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
+#endif
+
+#ifndef SMP_CACHE_BYTES
+#define SMP_CACHE_BYTES L1_CACHE_BYTES
+#endif
+
+#ifndef __read_mostly
+#define __read_mostly
+#endif
+
+#ifndef ____cacheline_aligned
+#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
+#endif
+
+#ifndef ____cacheline_aligned_in_smp
+#ifdef CONFIG_SMP
+#define ____cacheline_aligned_in_smp ____cacheline_aligned
+#else
+#define ____cacheline_aligned_in_smp
+#endif /* CONFIG_SMP */
+#endif
+
+#ifndef __cacheline_aligned
+#define __cacheline_aligned \
+ __attribute__((__aligned__(SMP_CACHE_BYTES), \
+ __section__(".data..cacheline_aligned")))
+#endif /* __cacheline_aligned */
+
+#ifndef __cacheline_aligned_in_smp
+#ifdef CONFIG_SMP
+#define __cacheline_aligned_in_smp __cacheline_aligned
+#else
+#define __cacheline_aligned_in_smp
+#endif /* CONFIG_SMP */
+#endif
+
+/*
+ * The maximum alignment needed for some critical structures
+ * These could be inter-node cacheline sizes/L3 cacheline
+ * size etc. Define this in asm/cache.h for your arch
+ */
+#ifndef INTERNODE_CACHE_SHIFT
+#define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT
+#endif
+
+#if !defined(____cacheline_internodealigned_in_smp)
+#if defined(CONFIG_SMP)
+#define ____cacheline_internodealigned_in_smp \
+ __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT))))
+#else
+#define ____cacheline_internodealigned_in_smp
+#endif
+#endif
+
+#ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
+#define cache_line_size() L1_CACHE_BYTES
+#endif
+
+#endif /* __LINUX_CACHE_H */
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
new file mode 100644
index 000000000..3daf5ed39
--- /dev/null
+++ b/include/linux/cacheinfo.h
@@ -0,0 +1,100 @@
+#ifndef _LINUX_CACHEINFO_H
+#define _LINUX_CACHEINFO_H
+
+#include <linux/bitops.h>
+#include <linux/cpumask.h>
+#include <linux/smp.h>
+
+struct device_node;
+struct attribute;
+
+enum cache_type {
+ CACHE_TYPE_NOCACHE = 0,
+ CACHE_TYPE_INST = BIT(0),
+ CACHE_TYPE_DATA = BIT(1),
+ CACHE_TYPE_SEPARATE = CACHE_TYPE_INST | CACHE_TYPE_DATA,
+ CACHE_TYPE_UNIFIED = BIT(2),
+};
+
+/**
+ * struct cacheinfo - represent a cache leaf node
+ * @type: type of the cache - data, inst or unified
+ * @level: represents the hierarcy in the multi-level cache
+ * @coherency_line_size: size of each cache line usually representing
+ * the minimum amount of data that gets transferred from memory
+ * @number_of_sets: total number of sets, a set is a collection of cache
+ * lines sharing the same index
+ * @ways_of_associativity: number of ways in which a particular memory
+ * block can be placed in the cache
+ * @physical_line_partition: number of physical cache lines sharing the
+ * same cachetag
+ * @size: Total size of the cache
+ * @shared_cpu_map: logical cpumask representing all the cpus sharing
+ * this cache node
+ * @attributes: bitfield representing various cache attributes
+ * @of_node: if devicetree is used, this represents either the cpu node in
+ * case there's no explicit cache node or the cache node itself in the
+ * device tree
+ * @disable_sysfs: indicates whether this node is visible to the user via
+ * sysfs or not
+ * @priv: pointer to any private data structure specific to particular
+ * cache design
+ *
+ * While @of_node, @disable_sysfs and @priv are used for internal book
+ * keeping, the remaining members form the core properties of the cache
+ */
+struct cacheinfo {
+ enum cache_type type;
+ unsigned int level;
+ unsigned int coherency_line_size;
+ unsigned int number_of_sets;
+ unsigned int ways_of_associativity;
+ unsigned int physical_line_partition;
+ unsigned int size;
+ cpumask_t shared_cpu_map;
+ unsigned int attributes;
+#define CACHE_WRITE_THROUGH BIT(0)
+#define CACHE_WRITE_BACK BIT(1)
+#define CACHE_WRITE_POLICY_MASK \
+ (CACHE_WRITE_THROUGH | CACHE_WRITE_BACK)
+#define CACHE_READ_ALLOCATE BIT(2)
+#define CACHE_WRITE_ALLOCATE BIT(3)
+#define CACHE_ALLOCATE_POLICY_MASK \
+ (CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE)
+
+ struct device_node *of_node;
+ bool disable_sysfs;
+ void *priv;
+};
+
+struct cpu_cacheinfo {
+ struct cacheinfo *info_list;
+ unsigned int num_levels;
+ unsigned int num_leaves;
+};
+
+/*
+ * Helpers to make sure "func" is executed on the cpu whose cache
+ * attributes are being detected
+ */
+#define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \
+static inline void _##func(void *ret) \
+{ \
+ int cpu = smp_processor_id(); \
+ *(int *)ret = __##func(cpu); \
+} \
+ \
+int func(unsigned int cpu) \
+{ \
+ int ret; \
+ smp_call_function_single(cpu, _##func, &ret, true); \
+ return ret; \
+}
+
+struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
+int init_cache_level(unsigned int cpu);
+int populate_cache_leaves(unsigned int cpu);
+
+const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);
+
+#endif /* _LINUX_CACHEINFO_H */
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
new file mode 100644
index 000000000..a0875001b
--- /dev/null
+++ b/include/linux/can/core.h
@@ -0,0 +1,61 @@
+/*
+ * linux/can/core.h
+ *
+ * Protoypes and definitions for CAN protocol modules using the PF_CAN core
+ *
+ * Authors: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
+ * Urs Thuermann <urs.thuermann@volkswagen.de>
+ * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
+ * All rights reserved.
+ *
+ */
+
+#ifndef _CAN_CORE_H
+#define _CAN_CORE_H
+
+#include <linux/can.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#define CAN_VERSION "20120528"
+
+/* increment this number each time you change some user-space interface */
+#define CAN_ABI_VERSION "9"
+
+#define CAN_VERSION_STRING "rev " CAN_VERSION " abi " CAN_ABI_VERSION
+
+#define DNAME(dev) ((dev) ? (dev)->name : "any")
+
+/**
+ * struct can_proto - CAN protocol structure
+ * @type: type argument in socket() syscall, e.g. SOCK_DGRAM.
+ * @protocol: protocol number in socket() syscall.
+ * @ops: pointer to struct proto_ops for sock->ops.
+ * @prot: pointer to struct proto structure.
+ */
+struct can_proto {
+ int type;
+ int protocol;
+ const struct proto_ops *ops;
+ struct proto *prot;
+};
+
+/* function prototypes for the CAN networklayer core (af_can.c) */
+
+extern int can_proto_register(const struct can_proto *cp);
+extern void can_proto_unregister(const struct can_proto *cp);
+
+extern int can_rx_register(struct net_device *dev, canid_t can_id,
+ canid_t mask,
+ void (*func)(struct sk_buff *, void *),
+ void *data, char *ident);
+
+extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
+ canid_t mask,
+ void (*func)(struct sk_buff *, void *),
+ void *data);
+
+extern int can_send(struct sk_buff *skb, int loop);
+extern int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+
+#endif /* !_CAN_CORE_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
new file mode 100644
index 000000000..c3a9c8fc6
--- /dev/null
+++ b/include/linux/can/dev.h
@@ -0,0 +1,146 @@
+/*
+ * linux/can/dev.h
+ *
+ * Definitions for the CAN network device driver interface
+ *
+ * Copyright (C) 2006 Andrey Volkov <avolkov@varma-el.com>
+ * Varma Electronics Oy
+ *
+ * Copyright (C) 2008 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ */
+
+#ifndef _CAN_DEV_H
+#define _CAN_DEV_H
+
+#include <linux/can.h>
+#include <linux/can/netlink.h>
+#include <linux/can/error.h>
+#include <linux/can/led.h>
+
+/*
+ * CAN mode
+ */
+enum can_mode {
+ CAN_MODE_STOP = 0,
+ CAN_MODE_START,
+ CAN_MODE_SLEEP
+};
+
+/*
+ * CAN common private data
+ */
+struct can_priv {
+ struct can_device_stats can_stats;
+
+ struct can_bittiming bittiming, data_bittiming;
+ const struct can_bittiming_const *bittiming_const,
+ *data_bittiming_const;
+ struct can_clock clock;
+
+ enum can_state state;
+ u32 ctrlmode;
+ u32 ctrlmode_supported;
+
+ int restart_ms;
+ struct timer_list restart_timer;
+
+ int (*do_set_bittiming)(struct net_device *dev);
+ int (*do_set_data_bittiming)(struct net_device *dev);
+ int (*do_set_mode)(struct net_device *dev, enum can_mode mode);
+ int (*do_get_state)(const struct net_device *dev,
+ enum can_state *state);
+ int (*do_get_berr_counter)(const struct net_device *dev,
+ struct can_berr_counter *bec);
+
+ unsigned int echo_skb_max;
+ struct sk_buff **echo_skb;
+
+#ifdef CONFIG_CAN_LEDS
+ struct led_trigger *tx_led_trig;
+ char tx_led_trig_name[CAN_LED_NAME_SZ];
+ struct led_trigger *rx_led_trig;
+ char rx_led_trig_name[CAN_LED_NAME_SZ];
+ struct led_trigger *rxtx_led_trig;
+ char rxtx_led_trig_name[CAN_LED_NAME_SZ];
+#endif
+};
+
+/*
+ * get_can_dlc(value) - helper macro to cast a given data length code (dlc)
+ * to __u8 and ensure the dlc value to be max. 8 bytes.
+ *
+ * To be used in the CAN netdriver receive path to ensure conformance with
+ * ISO 11898-1 Chapter 8.4.2.3 (DLC field)
+ */
+#define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC))
+#define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC))
+
+/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
+static inline int can_dropped_invalid_skb(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (skb->protocol == htons(ETH_P_CAN)) {
+ if (unlikely(skb->len != CAN_MTU ||
+ cfd->len > CAN_MAX_DLEN))
+ goto inval_skb;
+ } else if (skb->protocol == htons(ETH_P_CANFD)) {
+ if (unlikely(skb->len != CANFD_MTU ||
+ cfd->len > CANFD_MAX_DLEN))
+ goto inval_skb;
+ } else
+ goto inval_skb;
+
+ return 0;
+
+inval_skb:
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return 1;
+}
+
+static inline bool can_is_canfd_skb(const struct sk_buff *skb)
+{
+ /* the CAN specific type of skb is identified by its data length */
+ return skb->len == CANFD_MTU;
+}
+
+/* get data length from can_dlc with sanitized can_dlc */
+u8 can_dlc2len(u8 can_dlc);
+
+/* map the sanitized data length to an appropriate data length code */
+u8 can_len2dlc(u8 len);
+
+struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max);
+void free_candev(struct net_device *dev);
+
+/* a candev safe wrapper around netdev_priv */
+struct can_priv *safe_candev_priv(struct net_device *dev);
+
+int open_candev(struct net_device *dev);
+void close_candev(struct net_device *dev);
+int can_change_mtu(struct net_device *dev, int new_mtu);
+
+int register_candev(struct net_device *dev);
+void unregister_candev(struct net_device *dev);
+
+int can_restart_now(struct net_device *dev);
+void can_bus_off(struct net_device *dev);
+
+void can_change_state(struct net_device *dev, struct can_frame *cf,
+ enum can_state tx_state, enum can_state rx_state);
+
+void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ unsigned int idx);
+unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
+void can_free_echo_skb(struct net_device *dev, unsigned int idx);
+
+struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ struct canfd_frame **cfd);
+struct sk_buff *alloc_can_err_skb(struct net_device *dev,
+ struct can_frame **cf);
+
+#endif /* !_CAN_DEV_H */
diff --git a/include/linux/can/led.h b/include/linux/can/led.h
new file mode 100644
index 000000000..146de4506
--- /dev/null
+++ b/include/linux/can/led.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2012, Fabio Baltieri <fabio.baltieri@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _CAN_LED_H
+#define _CAN_LED_H
+
+#include <linux/if.h>
+#include <linux/leds.h>
+
+enum can_led_event {
+ CAN_LED_EVENT_OPEN,
+ CAN_LED_EVENT_STOP,
+ CAN_LED_EVENT_TX,
+ CAN_LED_EVENT_RX,
+};
+
+#ifdef CONFIG_CAN_LEDS
+
+/* keep space for interface name + "-tx"/"-rx"/"-rxtx"
+ * suffix and null terminator
+ */
+#define CAN_LED_NAME_SZ (IFNAMSIZ + 6)
+
+void can_led_event(struct net_device *netdev, enum can_led_event event);
+void devm_can_led_init(struct net_device *netdev);
+int __init can_led_notifier_init(void);
+void __exit can_led_notifier_exit(void);
+
+#else
+
+static inline void can_led_event(struct net_device *netdev,
+ enum can_led_event event)
+{
+}
+static inline void devm_can_led_init(struct net_device *netdev)
+{
+}
+static inline int can_led_notifier_init(void)
+{
+ return 0;
+}
+static inline void can_led_notifier_exit(void)
+{
+}
+
+#endif
+
+#endif /* !_CAN_LED_H */
diff --git a/include/linux/can/platform/cc770.h b/include/linux/can/platform/cc770.h
new file mode 100644
index 000000000..78b2d44f0
--- /dev/null
+++ b/include/linux/can/platform/cc770.h
@@ -0,0 +1,33 @@
+#ifndef _CAN_PLATFORM_CC770_H
+#define _CAN_PLATFORM_CC770_H
+
+/* CPU Interface Register (0x02) */
+#define CPUIF_CEN 0x01 /* Clock Out Enable */
+#define CPUIF_MUX 0x04 /* Multiplex */
+#define CPUIF_SLP 0x08 /* Sleep */
+#define CPUIF_PWD 0x10 /* Power Down Mode */
+#define CPUIF_DMC 0x20 /* Divide Memory Clock */
+#define CPUIF_DSC 0x40 /* Divide System Clock */
+#define CPUIF_RST 0x80 /* Hardware Reset Status */
+
+/* Clock Out Register (0x1f) */
+#define CLKOUT_CD_MASK 0x0f /* Clock Divider mask */
+#define CLKOUT_SL_MASK 0x30 /* Slew Rate mask */
+#define CLKOUT_SL_SHIFT 4
+
+/* Bus Configuration Register (0x2f) */
+#define BUSCFG_DR0 0x01 /* Disconnect RX0 Input / Select RX input */
+#define BUSCFG_DR1 0x02 /* Disconnect RX1 Input / Silent mode */
+#define BUSCFG_DT1 0x08 /* Disconnect TX1 Output */
+#define BUSCFG_POL 0x20 /* Polarity dominant or recessive */
+#define BUSCFG_CBY 0x40 /* Input Comparator Bypass */
+
+struct cc770_platform_data {
+ u32 osc_freq; /* CAN bus oscillator frequency in Hz */
+
+ u8 cir; /* CPU Interface Register */
+ u8 cor; /* Clock Out Register */
+ u8 bcr; /* Bus Configuration Register */
+};
+
+#endif /* !_CAN_PLATFORM_CC770_H */
diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h
new file mode 100644
index 000000000..d44fcae27
--- /dev/null
+++ b/include/linux/can/platform/mcp251x.h
@@ -0,0 +1,21 @@
+#ifndef _CAN_PLATFORM_MCP251X_H
+#define _CAN_PLATFORM_MCP251X_H
+
+/*
+ *
+ * CAN bus driver for Microchip 251x CAN Controller with SPI Interface
+ *
+ */
+
+#include <linux/spi/spi.h>
+
+/*
+ * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data
+ * @oscillator_frequency: - oscillator frequency in Hz
+ */
+
+struct mcp251x_platform_data {
+ unsigned long oscillator_frequency;
+};
+
+#endif /* !_CAN_PLATFORM_MCP251X_H */
diff --git a/include/linux/can/platform/rcar_can.h b/include/linux/can/platform/rcar_can.h
new file mode 100644
index 000000000..0f4a2f3df
--- /dev/null
+++ b/include/linux/can/platform/rcar_can.h
@@ -0,0 +1,17 @@
+#ifndef _CAN_PLATFORM_RCAR_CAN_H_
+#define _CAN_PLATFORM_RCAR_CAN_H_
+
+#include <linux/types.h>
+
+/* Clock Select Register settings */
+enum CLKR {
+ CLKR_CLKP1 = 0, /* Peripheral clock (clkp1) */
+ CLKR_CLKP2 = 1, /* Peripheral clock (clkp2) */
+ CLKR_CLKEXT = 3 /* Externally input clock */
+};
+
+struct rcar_can_platform_data {
+ enum CLKR clock_select; /* Clock source select */
+};
+
+#endif /* !_CAN_PLATFORM_RCAR_CAN_H_ */
diff --git a/include/linux/can/platform/sja1000.h b/include/linux/can/platform/sja1000.h
new file mode 100644
index 000000000..93570b61e
--- /dev/null
+++ b/include/linux/can/platform/sja1000.h
@@ -0,0 +1,35 @@
+#ifndef _CAN_PLATFORM_SJA1000_H
+#define _CAN_PLATFORM_SJA1000_H
+
+/* clock divider register */
+#define CDR_CLKOUT_MASK 0x07
+#define CDR_CLK_OFF 0x08 /* Clock off (CLKOUT pin) */
+#define CDR_RXINPEN 0x20 /* TX1 output is RX irq output */
+#define CDR_CBP 0x40 /* CAN input comparator bypass */
+#define CDR_PELICAN 0x80 /* PeliCAN mode */
+
+/* output control register */
+#define OCR_MODE_BIPHASE 0x00
+#define OCR_MODE_TEST 0x01
+#define OCR_MODE_NORMAL 0x02
+#define OCR_MODE_CLOCK 0x03
+#define OCR_MODE_MASK 0x07
+#define OCR_TX0_INVERT 0x04
+#define OCR_TX0_PULLDOWN 0x08
+#define OCR_TX0_PULLUP 0x10
+#define OCR_TX0_PUSHPULL 0x18
+#define OCR_TX1_INVERT 0x20
+#define OCR_TX1_PULLDOWN 0x40
+#define OCR_TX1_PULLUP 0x80
+#define OCR_TX1_PUSHPULL 0xc0
+#define OCR_TX_MASK 0xfc
+#define OCR_TX_SHIFT 2
+
+struct sja1000_platform_data {
+ u32 osc_freq; /* CAN bus oscillator frequency in Hz */
+
+ u8 ocr; /* output control register */
+ u8 cdr; /* clock divider register */
+};
+
+#endif /* !_CAN_PLATFORM_SJA1000_H */
diff --git a/include/linux/can/platform/ti_hecc.h b/include/linux/can/platform/ti_hecc.h
new file mode 100644
index 000000000..a52f47ca6
--- /dev/null
+++ b/include/linux/can/platform/ti_hecc.h
@@ -0,0 +1,44 @@
+#ifndef _CAN_PLATFORM_TI_HECC_H
+#define _CAN_PLATFORM_TI_HECC_H
+
+/*
+ * TI HECC (High End CAN Controller) driver platform header
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed as is WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/**
+ * struct hecc_platform_data - HECC Platform Data
+ *
+ * @scc_hecc_offset: mostly 0 - should really never change
+ * @scc_ram_offset: SCC RAM offset
+ * @hecc_ram_offset: HECC RAM offset
+ * @mbx_offset: Mailbox RAM offset
+ * @int_line: Interrupt line to use - 0 or 1
+ * @version: version for future use
+ * @transceiver_switch: platform specific callback fn for transceiver control
+ *
+ * Platform data structure to get all platform specific settings.
+ * this structure also accounts the fact that the IP may have different
+ * RAM and mailbox offsets for different SOC's
+ */
+struct ti_hecc_platform_data {
+ u32 scc_hecc_offset;
+ u32 scc_ram_offset;
+ u32 hecc_ram_offset;
+ u32 mbx_offset;
+ u32 int_line;
+ u32 version;
+ void (*transceiver_switch) (int);
+};
+#endif /* !_CAN_PLATFORM_TI_HECC_H */
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
new file mode 100644
index 000000000..b6a52a4b4
--- /dev/null
+++ b/include/linux/can/skb.h
@@ -0,0 +1,78 @@
+/*
+ * linux/can/skb.h
+ *
+ * Definitions for the CAN network socket buffer
+ *
+ * Copyright (C) 2012 Oliver Hartkopp <socketcan@hartkopp.net>
+ *
+ */
+
+#ifndef _CAN_SKB_H
+#define _CAN_SKB_H
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/can.h>
+#include <net/sock.h>
+
+/*
+ * The struct can_skb_priv is used to transport additional information along
+ * with the stored struct can(fd)_frame that can not be contained in existing
+ * struct sk_buff elements.
+ * N.B. that this information must not be modified in cloned CAN sk_buffs.
+ * To modify the CAN frame content or the struct can_skb_priv content
+ * skb_copy() needs to be used instead of skb_clone().
+ */
+
+/**
+ * struct can_skb_priv - private additional data inside CAN sk_buffs
+ * @ifindex: ifindex of the first interface the CAN frame appeared on
+ * @cf: align to the following CAN frame at skb->data
+ */
+struct can_skb_priv {
+ int ifindex;
+ struct can_frame cf[0];
+};
+
+static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb)
+{
+ return (struct can_skb_priv *)(skb->head);
+}
+
+static inline void can_skb_reserve(struct sk_buff *skb)
+{
+ skb_reserve(skb, sizeof(struct can_skb_priv));
+}
+
+static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
+{
+ if (sk) {
+ sock_hold(sk);
+ skb->destructor = sock_efree;
+ skb->sk = sk;
+ }
+}
+
+/*
+ * returns an unshared skb owned by the original sock to be echo'ed back
+ */
+static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
+{
+ if (skb_shared(skb)) {
+ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+ if (likely(nskb)) {
+ can_skb_set_owner(nskb, skb->sk);
+ consume_skb(skb);
+ return nskb;
+ } else {
+ kfree_skb(skb);
+ return NULL;
+ }
+ }
+
+ /* we can assume to have an unshared skb with proper owner */
+ return skb;
+}
+
+#endif /* !_CAN_SKB_H */
diff --git a/include/linux/capability.h b/include/linux/capability.h
new file mode 100644
index 000000000..af9f0b9e8
--- /dev/null
+++ b/include/linux/capability.h
@@ -0,0 +1,251 @@
+/*
+ * This is <linux/capability.h>
+ *
+ * Andrew G. Morgan <morgan@kernel.org>
+ * Alexander Kjeldaas <astor@guardian.no>
+ * with help from Aleph1, Roland Buresund and Andrew Main.
+ *
+ * See here for the libcap library ("POSIX draft" compliance):
+ *
+ * ftp://www.kernel.org/pub/linux/libs/security/linux-privs/kernel-2.6/
+ */
+#ifndef _LINUX_CAPABILITY_H
+#define _LINUX_CAPABILITY_H
+
+#include <uapi/linux/capability.h>
+
+
+#define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3
+#define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3
+
+extern int file_caps_enabled;
+
+typedef struct kernel_cap_struct {
+ __u32 cap[_KERNEL_CAPABILITY_U32S];
+} kernel_cap_t;
+
+/* exact same as vfs_cap_data but in cpu endian and always filled completely */
+struct cpu_vfs_cap_data {
+ __u32 magic_etc;
+ kernel_cap_t permitted;
+ kernel_cap_t inheritable;
+};
+
+#define _USER_CAP_HEADER_SIZE (sizeof(struct __user_cap_header_struct))
+#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t))
+
+
+struct file;
+struct inode;
+struct dentry;
+struct user_namespace;
+
+struct user_namespace *current_user_ns(void);
+
+extern const kernel_cap_t __cap_empty_set;
+extern const kernel_cap_t __cap_init_eff_set;
+
+/*
+ * Internal kernel functions only
+ */
+
+#define CAP_FOR_EACH_U32(__capi) \
+ for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi)
+
+/*
+ * CAP_FS_MASK and CAP_NFSD_MASKS:
+ *
+ * The fs mask is all the privileges that fsuid==0 historically meant.
+ * At one time in the past, that included CAP_MKNOD and CAP_LINUX_IMMUTABLE.
+ *
+ * It has never meant setting security.* and trusted.* xattrs.
+ *
+ * We could also define fsmask as follows:
+ * 1. CAP_FS_MASK is the privilege to bypass all fs-related DAC permissions
+ * 2. The security.* and trusted.* xattrs are fs-related MAC permissions
+ */
+
+# define CAP_FS_MASK_B0 (CAP_TO_MASK(CAP_CHOWN) \
+ | CAP_TO_MASK(CAP_MKNOD) \
+ | CAP_TO_MASK(CAP_DAC_OVERRIDE) \
+ | CAP_TO_MASK(CAP_DAC_READ_SEARCH) \
+ | CAP_TO_MASK(CAP_FOWNER) \
+ | CAP_TO_MASK(CAP_FSETID))
+
+# define CAP_FS_MASK_B1 (CAP_TO_MASK(CAP_MAC_OVERRIDE))
+
+#if _KERNEL_CAPABILITY_U32S != 2
+# error Fix up hand-coded capability macro initializers
+#else /* HAND-CODED capability initializers */
+
+#define CAP_LAST_U32 ((_KERNEL_CAPABILITY_U32S) - 1)
+#define CAP_LAST_U32_VALID_MASK (CAP_TO_MASK(CAP_LAST_CAP + 1) -1)
+
+# define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }})
+# define CAP_FULL_SET ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }})
+# define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
+ | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
+ CAP_FS_MASK_B1 } })
+# define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \
+ | CAP_TO_MASK(CAP_SYS_RESOURCE), \
+ CAP_FS_MASK_B1 } })
+
+#endif /* _KERNEL_CAPABILITY_U32S != 2 */
+
+# define cap_clear(c) do { (c) = __cap_empty_set; } while (0)
+
+#define cap_raise(c, flag) ((c).cap[CAP_TO_INDEX(flag)] |= CAP_TO_MASK(flag))
+#define cap_lower(c, flag) ((c).cap[CAP_TO_INDEX(flag)] &= ~CAP_TO_MASK(flag))
+#define cap_raised(c, flag) ((c).cap[CAP_TO_INDEX(flag)] & CAP_TO_MASK(flag))
+
+#define CAP_BOP_ALL(c, a, b, OP) \
+do { \
+ unsigned __capi; \
+ CAP_FOR_EACH_U32(__capi) { \
+ c.cap[__capi] = a.cap[__capi] OP b.cap[__capi]; \
+ } \
+} while (0)
+
+#define CAP_UOP_ALL(c, a, OP) \
+do { \
+ unsigned __capi; \
+ CAP_FOR_EACH_U32(__capi) { \
+ c.cap[__capi] = OP a.cap[__capi]; \
+ } \
+} while (0)
+
+static inline kernel_cap_t cap_combine(const kernel_cap_t a,
+ const kernel_cap_t b)
+{
+ kernel_cap_t dest;
+ CAP_BOP_ALL(dest, a, b, |);
+ return dest;
+}
+
+static inline kernel_cap_t cap_intersect(const kernel_cap_t a,
+ const kernel_cap_t b)
+{
+ kernel_cap_t dest;
+ CAP_BOP_ALL(dest, a, b, &);
+ return dest;
+}
+
+static inline kernel_cap_t cap_drop(const kernel_cap_t a,
+ const kernel_cap_t drop)
+{
+ kernel_cap_t dest;
+ CAP_BOP_ALL(dest, a, drop, &~);
+ return dest;
+}
+
+static inline kernel_cap_t cap_invert(const kernel_cap_t c)
+{
+ kernel_cap_t dest;
+ CAP_UOP_ALL(dest, c, ~);
+ return dest;
+}
+
+static inline int cap_isclear(const kernel_cap_t a)
+{
+ unsigned __capi;
+ CAP_FOR_EACH_U32(__capi) {
+ if (a.cap[__capi] != 0)
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Check if "a" is a subset of "set".
+ * return 1 if ALL of the capabilities in "a" are also in "set"
+ * cap_issubset(0101, 1111) will return 1
+ * return 0 if ANY of the capabilities in "a" are not in "set"
+ * cap_issubset(1111, 0101) will return 0
+ */
+static inline int cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
+{
+ kernel_cap_t dest;
+ dest = cap_drop(a, set);
+ return cap_isclear(dest);
+}
+
+/* Used to decide between falling back on the old suser() or fsuser(). */
+
+static inline int cap_is_fs_cap(int cap)
+{
+ const kernel_cap_t __cap_fs_set = CAP_FS_SET;
+ return !!(CAP_TO_MASK(cap) & __cap_fs_set.cap[CAP_TO_INDEX(cap)]);
+}
+
+static inline kernel_cap_t cap_drop_fs_set(const kernel_cap_t a)
+{
+ const kernel_cap_t __cap_fs_set = CAP_FS_SET;
+ return cap_drop(a, __cap_fs_set);
+}
+
+static inline kernel_cap_t cap_raise_fs_set(const kernel_cap_t a,
+ const kernel_cap_t permitted)
+{
+ const kernel_cap_t __cap_fs_set = CAP_FS_SET;
+ return cap_combine(a,
+ cap_intersect(permitted, __cap_fs_set));
+}
+
+static inline kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a)
+{
+ const kernel_cap_t __cap_fs_set = CAP_NFSD_SET;
+ return cap_drop(a, __cap_fs_set);
+}
+
+static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
+ const kernel_cap_t permitted)
+{
+ const kernel_cap_t __cap_nfsd_set = CAP_NFSD_SET;
+ return cap_combine(a,
+ cap_intersect(permitted, __cap_nfsd_set));
+}
+
+#ifdef CONFIG_MULTIUSER
+extern bool has_capability(struct task_struct *t, int cap);
+extern bool has_ns_capability(struct task_struct *t,
+ struct user_namespace *ns, int cap);
+extern bool has_capability_noaudit(struct task_struct *t, int cap);
+extern bool has_ns_capability_noaudit(struct task_struct *t,
+ struct user_namespace *ns, int cap);
+extern bool capable(int cap);
+extern bool ns_capable(struct user_namespace *ns, int cap);
+#else
+static inline bool has_capability(struct task_struct *t, int cap)
+{
+ return true;
+}
+static inline bool has_ns_capability(struct task_struct *t,
+ struct user_namespace *ns, int cap)
+{
+ return true;
+}
+static inline bool has_capability_noaudit(struct task_struct *t, int cap)
+{
+ return true;
+}
+static inline bool has_ns_capability_noaudit(struct task_struct *t,
+ struct user_namespace *ns, int cap)
+{
+ return true;
+}
+static inline bool capable(int cap)
+{
+ return true;
+}
+static inline bool ns_capable(struct user_namespace *ns, int cap)
+{
+ return true;
+}
+#endif /* CONFIG_MULTIUSER */
+extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
+extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
+
+/* audit system wants to get cap info from files as well */
+extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
+
+#endif /* !_LINUX_CAPABILITY_H */
diff --git a/include/linux/cb710.h b/include/linux/cb710.h
new file mode 100644
index 000000000..8cc10411b
--- /dev/null
+++ b/include/linux/cb710.h
@@ -0,0 +1,208 @@
+/*
+ * cb710/cb710.h
+ *
+ * Copyright by Michał Mirosław, 2008-2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef LINUX_CB710_DRIVER_H
+#define LINUX_CB710_DRIVER_H
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/mmc/host.h>
+
+struct cb710_slot;
+
+typedef int (*cb710_irq_handler_t)(struct cb710_slot *);
+
+/* per-virtual-slot structure */
+struct cb710_slot {
+ struct platform_device pdev;
+ void __iomem *iobase;
+ cb710_irq_handler_t irq_handler;
+};
+
+/* per-device structure */
+struct cb710_chip {
+ struct pci_dev *pdev;
+ void __iomem *iobase;
+ unsigned platform_id;
+#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS
+ atomic_t slot_refs_count;
+#endif
+ unsigned slot_mask;
+ unsigned slots;
+ spinlock_t irq_lock;
+ struct cb710_slot slot[0];
+};
+
+/* NOTE: cb710_chip.slots is modified only during device init/exit and
+ * they are all serialized wrt themselves */
+
+/* cb710_chip.slot_mask values */
+#define CB710_SLOT_MMC 1
+#define CB710_SLOT_MS 2
+#define CB710_SLOT_SM 4
+
+/* slot port accessors - so the logic is more clear in the code */
+#define CB710_PORT_ACCESSORS(t) \
+static inline void cb710_write_port_##t(struct cb710_slot *slot, \
+ unsigned port, u##t value) \
+{ \
+ iowrite##t(value, slot->iobase + port); \
+} \
+ \
+static inline u##t cb710_read_port_##t(struct cb710_slot *slot, \
+ unsigned port) \
+{ \
+ return ioread##t(slot->iobase + port); \
+} \
+ \
+static inline void cb710_modify_port_##t(struct cb710_slot *slot, \
+ unsigned port, u##t set, u##t clear) \
+{ \
+ iowrite##t( \
+ (ioread##t(slot->iobase + port) & ~clear)|set, \
+ slot->iobase + port); \
+}
+
+CB710_PORT_ACCESSORS(8)
+CB710_PORT_ACCESSORS(16)
+CB710_PORT_ACCESSORS(32)
+
+void cb710_pci_update_config_reg(struct pci_dev *pdev,
+ int reg, uint32_t and, uint32_t xor);
+void cb710_set_irq_handler(struct cb710_slot *slot,
+ cb710_irq_handler_t handler);
+
+/* some device struct walking */
+
+static inline struct cb710_slot *cb710_pdev_to_slot(
+ struct platform_device *pdev)
+{
+ return container_of(pdev, struct cb710_slot, pdev);
+}
+
+static inline struct cb710_chip *cb710_slot_to_chip(struct cb710_slot *slot)
+{
+ return dev_get_drvdata(slot->pdev.dev.parent);
+}
+
+static inline struct device *cb710_slot_dev(struct cb710_slot *slot)
+{
+ return &slot->pdev.dev;
+}
+
+static inline struct device *cb710_chip_dev(struct cb710_chip *chip)
+{
+ return &chip->pdev->dev;
+}
+
+/* debugging aids */
+
+#ifdef CONFIG_CB710_DEBUG
+void cb710_dump_regs(struct cb710_chip *chip, unsigned dump);
+#else
+#define cb710_dump_regs(c, d) do {} while (0)
+#endif
+
+#define CB710_DUMP_REGS_MMC 0x0F
+#define CB710_DUMP_REGS_MS 0x30
+#define CB710_DUMP_REGS_SM 0xC0
+#define CB710_DUMP_REGS_ALL 0xFF
+#define CB710_DUMP_REGS_MASK 0xFF
+
+#define CB710_DUMP_ACCESS_8 0x100
+#define CB710_DUMP_ACCESS_16 0x200
+#define CB710_DUMP_ACCESS_32 0x400
+#define CB710_DUMP_ACCESS_ALL 0x700
+#define CB710_DUMP_ACCESS_MASK 0x700
+
+#endif /* LINUX_CB710_DRIVER_H */
+/*
+ * cb710/sgbuf2.h
+ *
+ * Copyright by Michał Mirosław, 2008-2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef LINUX_CB710_SG_H
+#define LINUX_CB710_SG_H
+
+#include <linux/highmem.h>
+#include <linux/scatterlist.h>
+
+/*
+ * 32-bit PIO mapping sg iterator
+ *
+ * Hides scatterlist access issues - fragment boundaries, alignment, page
+ * mapping - for drivers using 32-bit-word-at-a-time-PIO (ie. PCI devices
+ * without DMA support).
+ *
+ * Best-case reading (transfer from device):
+ * sg_miter_start(, SG_MITER_TO_SG);
+ * cb710_sg_dwiter_write_from_io();
+ * sg_miter_stop();
+ *
+ * Best-case writing (transfer to device):
+ * sg_miter_start(, SG_MITER_FROM_SG);
+ * cb710_sg_dwiter_read_to_io();
+ * sg_miter_stop();
+ */
+
+uint32_t cb710_sg_dwiter_read_next_block(struct sg_mapping_iter *miter);
+void cb710_sg_dwiter_write_next_block(struct sg_mapping_iter *miter, uint32_t data);
+
+/**
+ * cb710_sg_dwiter_write_from_io - transfer data to mapped buffer from 32-bit IO port
+ * @miter: sg mapping iter
+ * @port: PIO port - IO or MMIO address
+ * @count: number of 32-bit words to transfer
+ *
+ * Description:
+ * Reads @count 32-bit words from register @port and stores it in
+ * buffer iterated by @miter. Data that would overflow the buffer
+ * is silently ignored. Iterator is advanced by 4*@count bytes
+ * or to the buffer's end whichever is closer.
+ *
+ * Context:
+ * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
+ */
+static inline void cb710_sg_dwiter_write_from_io(struct sg_mapping_iter *miter,
+ void __iomem *port, size_t count)
+{
+ while (count-- > 0)
+ cb710_sg_dwiter_write_next_block(miter, ioread32(port));
+}
+
+/**
+ * cb710_sg_dwiter_read_to_io - transfer data to 32-bit IO port from mapped buffer
+ * @miter: sg mapping iter
+ * @port: PIO port - IO or MMIO address
+ * @count: number of 32-bit words to transfer
+ *
+ * Description:
+ * Writes @count 32-bit words to register @port from buffer iterated
+ * through @miter. If buffer ends before @count words are written
+ * missing data is replaced by zeroes. @miter is advanced by 4*@count
+ * bytes or to the buffer's end whichever is closer.
+ *
+ * Context:
+ * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
+ */
+static inline void cb710_sg_dwiter_read_to_io(struct sg_mapping_iter *miter,
+ void __iomem *port, size_t count)
+{
+ while (count-- > 0)
+ iowrite32(cb710_sg_dwiter_read_next_block(miter), port);
+}
+
+#endif /* LINUX_CB710_SG_H */
diff --git a/include/linux/cciss_ioctl.h b/include/linux/cciss_ioctl.h
new file mode 100644
index 000000000..84b6e2d0f
--- /dev/null
+++ b/include/linux/cciss_ioctl.h
@@ -0,0 +1,31 @@
+#ifndef CCISS_IOCTLH
+#define CCISS_IOCTLH
+
+#include <uapi/linux/cciss_ioctl.h>
+
+#ifdef CONFIG_COMPAT
+
+/* 32 bit compatible ioctl structs */
+typedef struct _IOCTL32_Command_struct {
+ LUNAddr_struct LUN_info;
+ RequestBlock_struct Request;
+ ErrorInfo_struct error_info;
+ WORD buf_size; /* size in bytes of the buf */
+ __u32 buf; /* 32 bit pointer to data buffer */
+} IOCTL32_Command_struct;
+
+typedef struct _BIG_IOCTL32_Command_struct {
+ LUNAddr_struct LUN_info;
+ RequestBlock_struct Request;
+ ErrorInfo_struct error_info;
+ DWORD malloc_size; /* < MAX_KMALLOC_SIZE in cciss.c */
+ DWORD buf_size; /* size in bytes of the buf */
+ /* < malloc_size * MAXSGENTRIES */
+ __u32 buf; /* 32 bit pointer to data buffer */
+} BIG_IOCTL32_Command_struct;
+
+#define CCISS_PASSTHRU32 _IOWR(CCISS_IOC_MAGIC, 11, IOCTL32_Command_struct)
+#define CCISS_BIG_PASSTHRU32 _IOWR(CCISS_IOC_MAGIC, 18, BIG_IOCTL32_Command_struct)
+
+#endif /* CONFIG_COMPAT */
+#endif
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
new file mode 100644
index 000000000..7f437036b
--- /dev/null
+++ b/include/linux/ccp.h
@@ -0,0 +1,556 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CPP_H__
+#define __CPP_H__
+
+#include <linux/scatterlist.h>
+#include <linux/workqueue.h>
+#include <linux/list.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+
+
+struct ccp_device;
+struct ccp_cmd;
+
+#if defined(CONFIG_CRYPTO_DEV_CCP_DD) || \
+ defined(CONFIG_CRYPTO_DEV_CCP_DD_MODULE)
+
+/**
+ * ccp_present - check if a CCP device is present
+ *
+ * Returns zero if a CCP device is present, -ENODEV otherwise.
+ */
+int ccp_present(void);
+
+/**
+ * ccp_enqueue_cmd - queue an operation for processing by the CCP
+ *
+ * @cmd: ccp_cmd struct to be processed
+ *
+ * Refer to the ccp_cmd struct below for required fields.
+ *
+ * Queue a cmd to be processed by the CCP. If queueing the cmd
+ * would exceed the defined length of the cmd queue the cmd will
+ * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
+ * result in a return code of -EBUSY.
+ *
+ * The callback routine specified in the ccp_cmd struct will be
+ * called to notify the caller of completion (if the cmd was not
+ * backlogged) or advancement out of the backlog. If the cmd has
+ * advanced out of the backlog the "err" value of the callback
+ * will be -EINPROGRESS. Any other "err" value during callback is
+ * the result of the operation.
+ *
+ * The cmd has been successfully queued if:
+ * the return code is -EINPROGRESS or
+ * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
+ */
+int ccp_enqueue_cmd(struct ccp_cmd *cmd);
+
+#else /* CONFIG_CRYPTO_DEV_CCP_DD is not enabled */
+
+static inline int ccp_present(void)
+{
+ return -ENODEV;
+}
+
+static inline int ccp_enqueue_cmd(struct ccp_cmd *cmd)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_CRYPTO_DEV_CCP_DD */
+
+
+/***** AES engine *****/
+/**
+ * ccp_aes_type - AES key size
+ *
+ * @CCP_AES_TYPE_128: 128-bit key
+ * @CCP_AES_TYPE_192: 192-bit key
+ * @CCP_AES_TYPE_256: 256-bit key
+ */
+enum ccp_aes_type {
+ CCP_AES_TYPE_128 = 0,
+ CCP_AES_TYPE_192,
+ CCP_AES_TYPE_256,
+ CCP_AES_TYPE__LAST,
+};
+
+/**
+ * ccp_aes_mode - AES operation mode
+ *
+ * @CCP_AES_MODE_ECB: ECB mode
+ * @CCP_AES_MODE_CBC: CBC mode
+ * @CCP_AES_MODE_OFB: OFB mode
+ * @CCP_AES_MODE_CFB: CFB mode
+ * @CCP_AES_MODE_CTR: CTR mode
+ * @CCP_AES_MODE_CMAC: CMAC mode
+ */
+enum ccp_aes_mode {
+ CCP_AES_MODE_ECB = 0,
+ CCP_AES_MODE_CBC,
+ CCP_AES_MODE_OFB,
+ CCP_AES_MODE_CFB,
+ CCP_AES_MODE_CTR,
+ CCP_AES_MODE_CMAC,
+ CCP_AES_MODE__LAST,
+};
+
+/**
+ * ccp_aes_mode - AES operation mode
+ *
+ * @CCP_AES_ACTION_DECRYPT: AES decrypt operation
+ * @CCP_AES_ACTION_ENCRYPT: AES encrypt operation
+ */
+enum ccp_aes_action {
+ CCP_AES_ACTION_DECRYPT = 0,
+ CCP_AES_ACTION_ENCRYPT,
+ CCP_AES_ACTION__LAST,
+};
+
+/**
+ * struct ccp_aes_engine - CCP AES operation
+ * @type: AES operation key size
+ * @mode: AES operation mode
+ * @action: AES operation (decrypt/encrypt)
+ * @key: key to be used for this AES operation
+ * @key_len: length in bytes of key
+ * @iv: IV to be used for this AES operation
+ * @iv_len: length in bytes of iv
+ * @src: data to be used for this operation
+ * @dst: data produced by this operation
+ * @src_len: length in bytes of data used for this operation
+ * @cmac_final: indicates final operation when running in CMAC mode
+ * @cmac_key: K1/K2 key used in final CMAC operation
+ * @cmac_key_len: length in bytes of cmac_key
+ *
+ * Variables required to be set when calling ccp_enqueue_cmd():
+ * - type, mode, action, key, key_len, src, dst, src_len
+ * - iv, iv_len for any mode other than ECB
+ * - cmac_final for CMAC mode
+ * - cmac_key, cmac_key_len for CMAC mode if cmac_final is non-zero
+ *
+ * The iv variable is used as both input and output. On completion of the
+ * AES operation the new IV overwrites the old IV.
+ */
+struct ccp_aes_engine {
+ enum ccp_aes_type type;
+ enum ccp_aes_mode mode;
+ enum ccp_aes_action action;
+
+ struct scatterlist *key;
+ u32 key_len; /* In bytes */
+
+ struct scatterlist *iv;
+ u32 iv_len; /* In bytes */
+
+ struct scatterlist *src, *dst;
+ u64 src_len; /* In bytes */
+
+ u32 cmac_final; /* Indicates final cmac cmd */
+ struct scatterlist *cmac_key; /* K1/K2 cmac key required for
+ * final cmac cmd */
+ u32 cmac_key_len; /* In bytes */
+};
+
+/***** XTS-AES engine *****/
+/**
+ * ccp_xts_aes_unit_size - XTS unit size
+ *
+ * @CCP_XTS_AES_UNIT_SIZE_16: Unit size of 16 bytes
+ * @CCP_XTS_AES_UNIT_SIZE_512: Unit size of 512 bytes
+ * @CCP_XTS_AES_UNIT_SIZE_1024: Unit size of 1024 bytes
+ * @CCP_XTS_AES_UNIT_SIZE_2048: Unit size of 2048 bytes
+ * @CCP_XTS_AES_UNIT_SIZE_4096: Unit size of 4096 bytes
+ */
+enum ccp_xts_aes_unit_size {
+ CCP_XTS_AES_UNIT_SIZE_16 = 0,
+ CCP_XTS_AES_UNIT_SIZE_512,
+ CCP_XTS_AES_UNIT_SIZE_1024,
+ CCP_XTS_AES_UNIT_SIZE_2048,
+ CCP_XTS_AES_UNIT_SIZE_4096,
+ CCP_XTS_AES_UNIT_SIZE__LAST,
+};
+
+/**
+ * struct ccp_xts_aes_engine - CCP XTS AES operation
+ * @action: AES operation (decrypt/encrypt)
+ * @unit_size: unit size of the XTS operation
+ * @key: key to be used for this XTS AES operation
+ * @key_len: length in bytes of key
+ * @iv: IV to be used for this XTS AES operation
+ * @iv_len: length in bytes of iv
+ * @src: data to be used for this operation
+ * @dst: data produced by this operation
+ * @src_len: length in bytes of data used for this operation
+ * @final: indicates final XTS operation
+ *
+ * Variables required to be set when calling ccp_enqueue_cmd():
+ * - action, unit_size, key, key_len, iv, iv_len, src, dst, src_len, final
+ *
+ * The iv variable is used as both input and output. On completion of the
+ * AES operation the new IV overwrites the old IV.
+ */
+struct ccp_xts_aes_engine {
+ enum ccp_aes_action action;
+ enum ccp_xts_aes_unit_size unit_size;
+
+ struct scatterlist *key;
+ u32 key_len; /* In bytes */
+
+ struct scatterlist *iv;
+ u32 iv_len; /* In bytes */
+
+ struct scatterlist *src, *dst;
+ u64 src_len; /* In bytes */
+
+ u32 final;
+};
+
+/***** SHA engine *****/
+#define CCP_SHA_BLOCKSIZE SHA256_BLOCK_SIZE
+#define CCP_SHA_CTXSIZE SHA256_DIGEST_SIZE
+
+/**
+ * ccp_sha_type - type of SHA operation
+ *
+ * @CCP_SHA_TYPE_1: SHA-1 operation
+ * @CCP_SHA_TYPE_224: SHA-224 operation
+ * @CCP_SHA_TYPE_256: SHA-256 operation
+ */
+enum ccp_sha_type {
+ CCP_SHA_TYPE_1 = 1,
+ CCP_SHA_TYPE_224,
+ CCP_SHA_TYPE_256,
+ CCP_SHA_TYPE__LAST,
+};
+
+/**
+ * struct ccp_sha_engine - CCP SHA operation
+ * @type: Type of SHA operation
+ * @ctx: current hash value
+ * @ctx_len: length in bytes of hash value
+ * @src: data to be used for this operation
+ * @src_len: length in bytes of data used for this operation
+ * @opad: data to be used for final HMAC operation
+ * @opad_len: length in bytes of data used for final HMAC operation
+ * @first: indicates first SHA operation
+ * @final: indicates final SHA operation
+ * @msg_bits: total length of the message in bits used in final SHA operation
+ *
+ * Variables required to be set when calling ccp_enqueue_cmd():
+ * - type, ctx, ctx_len, src, src_len, final
+ * - msg_bits if final is non-zero
+ *
+ * The ctx variable is used as both input and output. On completion of the
+ * SHA operation the new hash value overwrites the old hash value.
+ */
+struct ccp_sha_engine {
+ enum ccp_sha_type type;
+
+ struct scatterlist *ctx;
+ u32 ctx_len; /* In bytes */
+
+ struct scatterlist *src;
+ u64 src_len; /* In bytes */
+
+ struct scatterlist *opad;
+ u32 opad_len; /* In bytes */
+
+ u32 first; /* Indicates first sha cmd */
+ u32 final; /* Indicates final sha cmd */
+ u64 msg_bits; /* Message length in bits required for
+ * final sha cmd */
+};
+
+/***** RSA engine *****/
+/**
+ * struct ccp_rsa_engine - CCP RSA operation
+ * @key_size: length in bits of RSA key
+ * @exp: RSA exponent
+ * @exp_len: length in bytes of exponent
+ * @mod: RSA modulus
+ * @mod_len: length in bytes of modulus
+ * @src: data to be used for this operation
+ * @dst: data produced by this operation
+ * @src_len: length in bytes of data used for this operation
+ *
+ * Variables required to be set when calling ccp_enqueue_cmd():
+ * - key_size, exp, exp_len, mod, mod_len, src, dst, src_len
+ */
+struct ccp_rsa_engine {
+ u32 key_size; /* In bits */
+
+ struct scatterlist *exp;
+ u32 exp_len; /* In bytes */
+
+ struct scatterlist *mod;
+ u32 mod_len; /* In bytes */
+
+ struct scatterlist *src, *dst;
+ u32 src_len; /* In bytes */
+};
+
+/***** Passthru engine *****/
+/**
+ * ccp_passthru_bitwise - type of bitwise passthru operation
+ *
+ * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed
+ * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask
+ * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask
+ * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask
+ * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask
+ */
+enum ccp_passthru_bitwise {
+ CCP_PASSTHRU_BITWISE_NOOP = 0,
+ CCP_PASSTHRU_BITWISE_AND,
+ CCP_PASSTHRU_BITWISE_OR,
+ CCP_PASSTHRU_BITWISE_XOR,
+ CCP_PASSTHRU_BITWISE_MASK,
+ CCP_PASSTHRU_BITWISE__LAST,
+};
+
+/**
+ * ccp_passthru_byteswap - type of byteswap passthru operation
+ *
+ * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed
+ * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words
+ * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words
+ */
+enum ccp_passthru_byteswap {
+ CCP_PASSTHRU_BYTESWAP_NOOP = 0,
+ CCP_PASSTHRU_BYTESWAP_32BIT,
+ CCP_PASSTHRU_BYTESWAP_256BIT,
+ CCP_PASSTHRU_BYTESWAP__LAST,
+};
+
+/**
+ * struct ccp_passthru_engine - CCP pass-through operation
+ * @bit_mod: bitwise operation to perform
+ * @byte_swap: byteswap operation to perform
+ * @mask: mask to be applied to data
+ * @mask_len: length in bytes of mask
+ * @src: data to be used for this operation
+ * @dst: data produced by this operation
+ * @src_len: length in bytes of data used for this operation
+ * @final: indicate final pass-through operation
+ *
+ * Variables required to be set when calling ccp_enqueue_cmd():
+ * - bit_mod, byte_swap, src, dst, src_len
+ * - mask, mask_len if bit_mod is not CCP_PASSTHRU_BITWISE_NOOP
+ */
+struct ccp_passthru_engine {
+ enum ccp_passthru_bitwise bit_mod;
+ enum ccp_passthru_byteswap byte_swap;
+
+ struct scatterlist *mask;
+ u32 mask_len; /* In bytes */
+
+ struct scatterlist *src, *dst;
+ u64 src_len; /* In bytes */
+
+ u32 final;
+};
+
+/***** ECC engine *****/
+#define CCP_ECC_MODULUS_BYTES 48 /* 384-bits */
+#define CCP_ECC_MAX_OPERANDS 6
+#define CCP_ECC_MAX_OUTPUTS 3
+
+/**
+ * ccp_ecc_function - type of ECC function
+ *
+ * @CCP_ECC_FUNCTION_MMUL_384BIT: 384-bit modular multiplication
+ * @CCP_ECC_FUNCTION_MADD_384BIT: 384-bit modular addition
+ * @CCP_ECC_FUNCTION_MINV_384BIT: 384-bit multiplicative inverse
+ * @CCP_ECC_FUNCTION_PADD_384BIT: 384-bit point addition
+ * @CCP_ECC_FUNCTION_PMUL_384BIT: 384-bit point multiplication
+ * @CCP_ECC_FUNCTION_PDBL_384BIT: 384-bit point doubling
+ */
+enum ccp_ecc_function {
+ CCP_ECC_FUNCTION_MMUL_384BIT = 0,
+ CCP_ECC_FUNCTION_MADD_384BIT,
+ CCP_ECC_FUNCTION_MINV_384BIT,
+ CCP_ECC_FUNCTION_PADD_384BIT,
+ CCP_ECC_FUNCTION_PMUL_384BIT,
+ CCP_ECC_FUNCTION_PDBL_384BIT,
+};
+
+/**
+ * struct ccp_ecc_modular_math - CCP ECC modular math parameters
+ * @operand_1: first operand for the modular math operation
+ * @operand_1_len: length of the first operand
+ * @operand_2: second operand for the modular math operation
+ * (not used for CCP_ECC_FUNCTION_MINV_384BIT)
+ * @operand_2_len: length of the second operand
+ * (not used for CCP_ECC_FUNCTION_MINV_384BIT)
+ * @result: result of the modular math operation
+ * @result_len: length of the supplied result buffer
+ */
+struct ccp_ecc_modular_math {
+ struct scatterlist *operand_1;
+ unsigned int operand_1_len; /* In bytes */
+
+ struct scatterlist *operand_2;
+ unsigned int operand_2_len; /* In bytes */
+
+ struct scatterlist *result;
+ unsigned int result_len; /* In bytes */
+};
+
+/**
+ * struct ccp_ecc_point - CCP ECC point definition
+ * @x: the x coordinate of the ECC point
+ * @x_len: the length of the x coordinate
+ * @y: the y coordinate of the ECC point
+ * @y_len: the length of the y coordinate
+ */
+struct ccp_ecc_point {
+ struct scatterlist *x;
+ unsigned int x_len; /* In bytes */
+
+ struct scatterlist *y;
+ unsigned int y_len; /* In bytes */
+};
+
+/**
+ * struct ccp_ecc_point_math - CCP ECC point math parameters
+ * @point_1: the first point of the ECC point math operation
+ * @point_2: the second point of the ECC point math operation
+ * (only used for CCP_ECC_FUNCTION_PADD_384BIT)
+ * @domain_a: the a parameter of the ECC curve
+ * @domain_a_len: the length of the a parameter
+ * @scalar: the scalar parameter for the point match operation
+ * (only used for CCP_ECC_FUNCTION_PMUL_384BIT)
+ * @scalar_len: the length of the scalar parameter
+ * (only used for CCP_ECC_FUNCTION_PMUL_384BIT)
+ * @result: the point resulting from the point math operation
+ */
+struct ccp_ecc_point_math {
+ struct ccp_ecc_point point_1;
+ struct ccp_ecc_point point_2;
+
+ struct scatterlist *domain_a;
+ unsigned int domain_a_len; /* In bytes */
+
+ struct scatterlist *scalar;
+ unsigned int scalar_len; /* In bytes */
+
+ struct ccp_ecc_point result;
+};
+
+/**
+ * struct ccp_ecc_engine - CCP ECC operation
+ * @function: ECC function to perform
+ * @mod: ECC modulus
+ * @mod_len: length in bytes of modulus
+ * @mm: module math parameters
+ * @pm: point math parameters
+ * @ecc_result: result of the ECC operation
+ *
+ * Variables required to be set when calling ccp_enqueue_cmd():
+ * - function, mod, mod_len
+ * - operand, operand_len, operand_count, output, output_len, output_count
+ * - ecc_result
+ */
+struct ccp_ecc_engine {
+ enum ccp_ecc_function function;
+
+ struct scatterlist *mod;
+ u32 mod_len; /* In bytes */
+
+ union {
+ struct ccp_ecc_modular_math mm;
+ struct ccp_ecc_point_math pm;
+ } u;
+
+ u16 ecc_result;
+};
+
+
+/**
+ * ccp_engine - CCP operation identifiers
+ *
+ * @CCP_ENGINE_AES: AES operation
+ * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation
+ * @CCP_ENGINE_RSVD1: unused
+ * @CCP_ENGINE_SHA: SHA operation
+ * @CCP_ENGINE_RSA: RSA operation
+ * @CCP_ENGINE_PASSTHRU: pass-through operation
+ * @CCP_ENGINE_ZLIB_DECOMPRESS: unused
+ * @CCP_ENGINE_ECC: ECC operation
+ */
+enum ccp_engine {
+ CCP_ENGINE_AES = 0,
+ CCP_ENGINE_XTS_AES_128,
+ CCP_ENGINE_RSVD1,
+ CCP_ENGINE_SHA,
+ CCP_ENGINE_RSA,
+ CCP_ENGINE_PASSTHRU,
+ CCP_ENGINE_ZLIB_DECOMPRESS,
+ CCP_ENGINE_ECC,
+ CCP_ENGINE__LAST,
+};
+
+/* Flag values for flags member of ccp_cmd */
+#define CCP_CMD_MAY_BACKLOG 0x00000001
+
+/**
+ * struct ccp_cmd - CPP operation request
+ * @entry: list element (ccp driver use only)
+ * @work: work element used for callbacks (ccp driver use only)
+ * @ccp: CCP device to be run on (ccp driver use only)
+ * @ret: operation return code (ccp driver use only)
+ * @flags: cmd processing flags
+ * @engine: CCP operation to perform
+ * @engine_error: CCP engine return code
+ * @u: engine specific structures, refer to specific engine struct below
+ * @callback: operation completion callback function
+ * @data: parameter value to be supplied to the callback function
+ *
+ * Variables required to be set when calling ccp_enqueue_cmd():
+ * - engine, callback
+ * - See the operation structures below for what is required for each
+ * operation.
+ */
+struct ccp_cmd {
+ /* The list_head, work_struct, ccp and ret variables are for use
+ * by the CCP driver only.
+ */
+ struct list_head entry;
+ struct work_struct work;
+ struct ccp_device *ccp;
+ int ret;
+
+ u32 flags;
+
+ enum ccp_engine engine;
+ u32 engine_error;
+
+ union {
+ struct ccp_aes_engine aes;
+ struct ccp_xts_aes_engine xts;
+ struct ccp_sha_engine sha;
+ struct ccp_rsa_engine rsa;
+ struct ccp_passthru_engine passthru;
+ struct ccp_ecc_engine ecc;
+ } u;
+
+ /* Completion callback support */
+ void (*callback)(void *data, int err);
+ void *data;
+};
+
+#endif
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
new file mode 100644
index 000000000..f8763615a
--- /dev/null
+++ b/include/linux/cdev.h
@@ -0,0 +1,33 @@
+#ifndef _LINUX_CDEV_H
+#define _LINUX_CDEV_H
+
+#include <linux/kobject.h>
+#include <linux/kdev_t.h>
+#include <linux/list.h>
+
+struct file_operations;
+struct inode;
+struct module;
+
+struct cdev {
+ struct kobject kobj;
+ struct module *owner;
+ const struct file_operations *ops;
+ struct list_head list;
+ dev_t dev;
+ unsigned int count;
+};
+
+void cdev_init(struct cdev *, const struct file_operations *);
+
+struct cdev *cdev_alloc(void);
+
+void cdev_put(struct cdev *p);
+
+int cdev_add(struct cdev *, dev_t, unsigned);
+
+void cdev_del(struct cdev *);
+
+void cd_forget(struct inode *);
+
+#endif
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
new file mode 100644
index 000000000..8609d577b
--- /dev/null
+++ b/include/linux/cdrom.h
@@ -0,0 +1,314 @@
+/*
+ * -- <linux/cdrom.h>
+ * General header file for linux CD-ROM drivers
+ * Copyright (C) 1992 David Giller, rafetmad@oxy.edu
+ * 1994, 1995 Eberhard Mönkeberg, emoenke@gwdg.de
+ * 1996 David van Leeuwen, david@tm.tno.nl
+ * 1997, 1998 Erik Andersen, andersee@debian.org
+ * 1998-2002 Jens Axboe, axboe@suse.de
+ */
+#ifndef _LINUX_CDROM_H
+#define _LINUX_CDROM_H
+
+#include <linux/fs.h> /* not really needed, later.. */
+#include <linux/list.h>
+#include <uapi/linux/cdrom.h>
+
+struct packet_command
+{
+ unsigned char cmd[CDROM_PACKET_SIZE];
+ unsigned char *buffer;
+ unsigned int buflen;
+ int stat;
+ struct request_sense *sense;
+ unsigned char data_direction;
+ int quiet;
+ int timeout;
+ void *reserved[1];
+};
+
+/*
+ * _OLD will use PIO transfer on atapi devices, _BPC_* will use DMA
+ */
+#define CDDA_OLD 0 /* old style */
+#define CDDA_BPC_SINGLE 1 /* single frame block pc */
+#define CDDA_BPC_FULL 2 /* multi frame block pc */
+
+/* Uniform cdrom data structures for cdrom.c */
+struct cdrom_device_info {
+ struct cdrom_device_ops *ops; /* link to device_ops */
+ struct list_head list; /* linked list of all device_info */
+ struct gendisk *disk; /* matching block layer disk */
+ void *handle; /* driver-dependent data */
+/* specifications */
+ int mask; /* mask of capability: disables them */
+ int speed; /* maximum speed for reading data */
+ int capacity; /* number of discs in jukebox */
+/* device-related storage */
+ unsigned int options : 30; /* options flags */
+ unsigned mc_flags : 2; /* media change buffer flags */
+ unsigned int vfs_events; /* cached events for vfs path */
+ unsigned int ioctl_events; /* cached events for ioctl path */
+ int use_count; /* number of times device opened */
+ char name[20]; /* name of the device type */
+/* per-device flags */
+ __u8 sanyo_slot : 2; /* Sanyo 3 CD changer support */
+ __u8 keeplocked : 1; /* CDROM_LOCKDOOR status */
+ __u8 reserved : 5; /* not used yet */
+ int cdda_method; /* see flags */
+ __u8 last_sense;
+ __u8 media_written; /* dirty flag, DVD+RW bookkeeping */
+ unsigned short mmc3_profile; /* current MMC3 profile */
+ int for_data;
+ int (*exit)(struct cdrom_device_info *);
+ int mrw_mode_page;
+};
+
+struct cdrom_device_ops {
+/* routines */
+ int (*open) (struct cdrom_device_info *, int);
+ void (*release) (struct cdrom_device_info *);
+ int (*drive_status) (struct cdrom_device_info *, int);
+ unsigned int (*check_events) (struct cdrom_device_info *cdi,
+ unsigned int clearing, int slot);
+ int (*media_changed) (struct cdrom_device_info *, int);
+ int (*tray_move) (struct cdrom_device_info *, int);
+ int (*lock_door) (struct cdrom_device_info *, int);
+ int (*select_speed) (struct cdrom_device_info *, int);
+ int (*select_disc) (struct cdrom_device_info *, int);
+ int (*get_last_session) (struct cdrom_device_info *,
+ struct cdrom_multisession *);
+ int (*get_mcn) (struct cdrom_device_info *,
+ struct cdrom_mcn *);
+ /* hard reset device */
+ int (*reset) (struct cdrom_device_info *);
+ /* play stuff */
+ int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *);
+
+/* driver specifications */
+ const int capability; /* capability flags */
+ int n_minors; /* number of active minor devices */
+ /* handle uniform packets for scsi type devices (scsi,atapi) */
+ int (*generic_packet) (struct cdrom_device_info *,
+ struct packet_command *);
+};
+
+/* the general block_device operations structure: */
+extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
+ fmode_t mode);
+extern void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode);
+extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
+ fmode_t mode, unsigned int cmd, unsigned long arg);
+extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi,
+ unsigned int clearing);
+extern int cdrom_media_changed(struct cdrom_device_info *);
+
+extern int register_cdrom(struct cdrom_device_info *cdi);
+extern void unregister_cdrom(struct cdrom_device_info *cdi);
+
+typedef struct {
+ int data;
+ int audio;
+ int cdi;
+ int xa;
+ long error;
+} tracktype;
+
+extern int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written);
+extern int cdrom_number_of_slots(struct cdrom_device_info *cdi);
+extern int cdrom_mode_select(struct cdrom_device_info *cdi,
+ struct packet_command *cgc);
+extern int cdrom_mode_sense(struct cdrom_device_info *cdi,
+ struct packet_command *cgc,
+ int page_code, int page_control);
+extern void init_cdrom_command(struct packet_command *cgc,
+ void *buffer, int len, int type);
+
+/* The SCSI spec says there could be 256 slots. */
+#define CDROM_MAX_SLOTS 256
+
+struct cdrom_mechstat_header {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ __u8 fault : 1;
+ __u8 changer_state : 2;
+ __u8 curslot : 5;
+ __u8 mech_state : 3;
+ __u8 door_open : 1;
+ __u8 reserved1 : 4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 curslot : 5;
+ __u8 changer_state : 2;
+ __u8 fault : 1;
+ __u8 reserved1 : 4;
+ __u8 door_open : 1;
+ __u8 mech_state : 3;
+#endif
+ __u8 curlba[3];
+ __u8 nslots;
+ __u16 slot_tablelen;
+};
+
+struct cdrom_slot {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ __u8 disc_present : 1;
+ __u8 reserved1 : 6;
+ __u8 change : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 change : 1;
+ __u8 reserved1 : 6;
+ __u8 disc_present : 1;
+#endif
+ __u8 reserved2[3];
+};
+
+struct cdrom_changer_info {
+ struct cdrom_mechstat_header hdr;
+ struct cdrom_slot slots[CDROM_MAX_SLOTS];
+};
+
+typedef enum {
+ mechtype_caddy = 0,
+ mechtype_tray = 1,
+ mechtype_popup = 2,
+ mechtype_individual_changer = 4,
+ mechtype_cartridge_changer = 5
+} mechtype_t;
+
+typedef struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ __u8 ps : 1;
+ __u8 reserved1 : 1;
+ __u8 page_code : 6;
+ __u8 page_length;
+ __u8 reserved2 : 1;
+ __u8 bufe : 1;
+ __u8 ls_v : 1;
+ __u8 test_write : 1;
+ __u8 write_type : 4;
+ __u8 multi_session : 2; /* or border, DVD */
+ __u8 fp : 1;
+ __u8 copy : 1;
+ __u8 track_mode : 4;
+ __u8 reserved3 : 4;
+ __u8 data_block_type : 4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 page_code : 6;
+ __u8 reserved1 : 1;
+ __u8 ps : 1;
+ __u8 page_length;
+ __u8 write_type : 4;
+ __u8 test_write : 1;
+ __u8 ls_v : 1;
+ __u8 bufe : 1;
+ __u8 reserved2 : 1;
+ __u8 track_mode : 4;
+ __u8 copy : 1;
+ __u8 fp : 1;
+ __u8 multi_session : 2; /* or border, DVD */
+ __u8 data_block_type : 4;
+ __u8 reserved3 : 4;
+#endif
+ __u8 link_size;
+ __u8 reserved4;
+#if defined(__BIG_ENDIAN_BITFIELD)
+ __u8 reserved5 : 2;
+ __u8 app_code : 6;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 app_code : 6;
+ __u8 reserved5 : 2;
+#endif
+ __u8 session_format;
+ __u8 reserved6;
+ __be32 packet_size;
+ __u16 audio_pause;
+ __u8 mcn[16];
+ __u8 isrc[16];
+ __u8 subhdr0;
+ __u8 subhdr1;
+ __u8 subhdr2;
+ __u8 subhdr3;
+} __attribute__((packed)) write_param_page;
+
+struct modesel_head
+{
+ __u8 reserved1;
+ __u8 medium;
+ __u8 reserved2;
+ __u8 block_desc_length;
+ __u8 density;
+ __u8 number_of_blocks_hi;
+ __u8 number_of_blocks_med;
+ __u8 number_of_blocks_lo;
+ __u8 reserved3;
+ __u8 block_length_hi;
+ __u8 block_length_med;
+ __u8 block_length_lo;
+};
+
+typedef struct {
+ __u16 report_key_length;
+ __u8 reserved1;
+ __u8 reserved2;
+#if defined(__BIG_ENDIAN_BITFIELD)
+ __u8 type_code : 2;
+ __u8 vra : 3;
+ __u8 ucca : 3;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 ucca : 3;
+ __u8 vra : 3;
+ __u8 type_code : 2;
+#endif
+ __u8 region_mask;
+ __u8 rpc_scheme;
+ __u8 reserved3;
+} rpc_state_t;
+
+struct event_header {
+ __be16 data_len;
+#if defined(__BIG_ENDIAN_BITFIELD)
+ __u8 nea : 1;
+ __u8 reserved1 : 4;
+ __u8 notification_class : 3;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 notification_class : 3;
+ __u8 reserved1 : 4;
+ __u8 nea : 1;
+#endif
+ __u8 supp_event_class;
+};
+
+struct media_event_desc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ __u8 reserved1 : 4;
+ __u8 media_event_code : 4;
+ __u8 reserved2 : 6;
+ __u8 media_present : 1;
+ __u8 door_open : 1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 media_event_code : 4;
+ __u8 reserved1 : 4;
+ __u8 door_open : 1;
+ __u8 media_present : 1;
+ __u8 reserved2 : 6;
+#endif
+ __u8 start_slot;
+ __u8 end_slot;
+};
+
+extern int cdrom_get_media_event(struct cdrom_device_info *cdi, struct media_event_desc *med);
+
+static inline void lba_to_msf(int lba, u8 *m, u8 *s, u8 *f)
+{
+ lba += CD_MSF_OFFSET;
+ lba &= 0xffffff; /* negative lbas use only 24 bits */
+ *m = lba / (CD_SECS * CD_FRAMES);
+ lba %= (CD_SECS * CD_FRAMES);
+ *s = lba / CD_FRAMES;
+ *f = lba % CD_FRAMES;
+}
+
+static inline int msf_to_lba(u8 m, u8 s, u8 f)
+{
+ return (((m * CD_SECS) + s) * CD_FRAMES + f) - CD_MSF_OFFSET;
+}
+#endif /* _LINUX_CDROM_H */
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
new file mode 100644
index 000000000..260d78b58
--- /dev/null
+++ b/include/linux/ceph/auth.h
@@ -0,0 +1,142 @@
+#ifndef _FS_CEPH_AUTH_H
+#define _FS_CEPH_AUTH_H
+
+#include <linux/ceph/types.h>
+#include <linux/ceph/buffer.h>
+
+/*
+ * Abstract interface for communicating with the authenticate module.
+ * There is some handshake that takes place between us and the monitor
+ * to acquire the necessary keys. These are used to generate an
+ * 'authorizer' that we use when connecting to a service (mds, osd).
+ */
+
+struct ceph_auth_client;
+struct ceph_authorizer;
+struct ceph_msg;
+
+struct ceph_auth_handshake {
+ struct ceph_authorizer *authorizer;
+ void *authorizer_buf;
+ size_t authorizer_buf_len;
+ void *authorizer_reply_buf;
+ size_t authorizer_reply_buf_len;
+ int (*sign_message)(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg);
+ int (*check_message_signature)(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg);
+};
+
+struct ceph_auth_client_ops {
+ const char *name;
+
+ /*
+ * true if we are authenticated and can connect to
+ * services.
+ */
+ int (*is_authenticated)(struct ceph_auth_client *ac);
+
+ /*
+ * true if we should (re)authenticate, e.g., when our tickets
+ * are getting old and crusty.
+ */
+ int (*should_authenticate)(struct ceph_auth_client *ac);
+
+ /*
+ * build requests and process replies during monitor
+ * handshake. if handle_reply returns -EAGAIN, we build
+ * another request.
+ */
+ int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end);
+ int (*handle_reply)(struct ceph_auth_client *ac, int result,
+ void *buf, void *end);
+
+ /*
+ * Create authorizer for connecting to a service, and verify
+ * the response to authenticate the service.
+ */
+ int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type,
+ struct ceph_auth_handshake *auth);
+ /* ensure that an existing authorizer is up to date */
+ int (*update_authorizer)(struct ceph_auth_client *ac, int peer_type,
+ struct ceph_auth_handshake *auth);
+ int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
+ struct ceph_authorizer *a, size_t len);
+ void (*destroy_authorizer)(struct ceph_auth_client *ac,
+ struct ceph_authorizer *a);
+ void (*invalidate_authorizer)(struct ceph_auth_client *ac,
+ int peer_type);
+
+ /* reset when we (re)connect to a monitor */
+ void (*reset)(struct ceph_auth_client *ac);
+
+ void (*destroy)(struct ceph_auth_client *ac);
+
+ int (*sign_message)(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg);
+ int (*check_message_signature)(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg);
+};
+
+struct ceph_auth_client {
+ u32 protocol; /* CEPH_AUTH_* */
+ void *private; /* for use by protocol implementation */
+ const struct ceph_auth_client_ops *ops; /* null iff protocol==0 */
+
+ bool negotiating; /* true if negotiating protocol */
+ const char *name; /* entity name */
+ u64 global_id; /* our unique id in system */
+ const struct ceph_crypto_key *key; /* our secret key */
+ unsigned want_keys; /* which services we want */
+
+ struct mutex mutex;
+};
+
+extern struct ceph_auth_client *ceph_auth_init(const char *name,
+ const struct ceph_crypto_key *key);
+extern void ceph_auth_destroy(struct ceph_auth_client *ac);
+
+extern void ceph_auth_reset(struct ceph_auth_client *ac);
+
+extern int ceph_auth_build_hello(struct ceph_auth_client *ac,
+ void *buf, size_t len);
+extern int ceph_handle_auth_reply(struct ceph_auth_client *ac,
+ void *buf, size_t len,
+ void *reply_buf, size_t reply_len);
+extern int ceph_entity_name_encode(const char *name, void **p, void *end);
+
+extern int ceph_build_auth(struct ceph_auth_client *ac,
+ void *msg_buf, size_t msg_len);
+
+extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac);
+extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
+ int peer_type,
+ struct ceph_auth_handshake *auth);
+extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac,
+ struct ceph_authorizer *a);
+extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
+ int peer_type,
+ struct ceph_auth_handshake *a);
+extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
+ struct ceph_authorizer *a,
+ size_t len);
+extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac,
+ int peer_type);
+
+static inline int ceph_auth_sign_message(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg)
+{
+ if (auth->sign_message)
+ return auth->sign_message(auth, msg);
+ return 0;
+}
+
+static inline
+int ceph_auth_check_message_signature(struct ceph_auth_handshake *auth,
+ struct ceph_msg *msg)
+{
+ if (auth->check_message_signature)
+ return auth->check_message_signature(auth, msg);
+ return 0;
+}
+#endif
diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h
new file mode 100644
index 000000000..07ca15e76
--- /dev/null
+++ b/include/linux/ceph/buffer.h
@@ -0,0 +1,37 @@
+#ifndef __FS_CEPH_BUFFER_H
+#define __FS_CEPH_BUFFER_H
+
+#include <linux/kref.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/types.h>
+#include <linux/uio.h>
+
+/*
+ * a simple reference counted buffer.
+ *
+ * use kmalloc for smaller sizes, vmalloc for larger sizes.
+ */
+struct ceph_buffer {
+ struct kref kref;
+ struct kvec vec;
+ size_t alloc_len;
+};
+
+extern struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp);
+extern void ceph_buffer_release(struct kref *kref);
+
+static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
+{
+ kref_get(&b->kref);
+ return b;
+}
+
+static inline void ceph_buffer_put(struct ceph_buffer *b)
+{
+ kref_put(&b->kref, ceph_buffer_release);
+}
+
+extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
+
+#endif
diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h
new file mode 100644
index 000000000..aa2e19182
--- /dev/null
+++ b/include/linux/ceph/ceph_debug.h
@@ -0,0 +1,38 @@
+#ifndef _FS_CEPH_DEBUG_H
+#define _FS_CEPH_DEBUG_H
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#ifdef CONFIG_CEPH_LIB_PRETTYDEBUG
+
+/*
+ * wrap pr_debug to include a filename:lineno prefix on each line.
+ * this incurs some overhead (kernel size and execution time) due to
+ * the extra function call at each call site.
+ */
+
+# if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
+extern const char *ceph_file_part(const char *s, int len);
+# define dout(fmt, ...) \
+ pr_debug("%.*s %12.12s:%-4d : " fmt, \
+ 8 - (int)sizeof(KBUILD_MODNAME), " ", \
+ ceph_file_part(__FILE__, sizeof(__FILE__)), \
+ __LINE__, ##__VA_ARGS__)
+# else
+/* faux printk call just to see any compiler warnings. */
+# define dout(fmt, ...) do { \
+ if (0) \
+ printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
+ } while (0)
+# endif
+
+#else
+
+/*
+ * or, just wrap pr_debug
+ */
+# define dout(fmt, ...) pr_debug(" " fmt, ##__VA_ARGS__)
+
+#endif
+
+#endif
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
new file mode 100644
index 000000000..4763ad64e
--- /dev/null
+++ b/include/linux/ceph/ceph_features.h
@@ -0,0 +1,119 @@
+#ifndef __CEPH_FEATURES
+#define __CEPH_FEATURES
+
+/*
+ * feature bits
+ */
+#define CEPH_FEATURE_UID (1ULL<<0)
+#define CEPH_FEATURE_NOSRCADDR (1ULL<<1)
+#define CEPH_FEATURE_MONCLOCKCHECK (1ULL<<2)
+#define CEPH_FEATURE_FLOCK (1ULL<<3)
+#define CEPH_FEATURE_SUBSCRIBE2 (1ULL<<4)
+#define CEPH_FEATURE_MONNAMES (1ULL<<5)
+#define CEPH_FEATURE_RECONNECT_SEQ (1ULL<<6)
+#define CEPH_FEATURE_DIRLAYOUTHASH (1ULL<<7)
+#define CEPH_FEATURE_OBJECTLOCATOR (1ULL<<8)
+#define CEPH_FEATURE_PGID64 (1ULL<<9)
+#define CEPH_FEATURE_INCSUBOSDMAP (1ULL<<10)
+#define CEPH_FEATURE_PGPOOL3 (1ULL<<11)
+#define CEPH_FEATURE_OSDREPLYMUX (1ULL<<12)
+#define CEPH_FEATURE_OSDENC (1ULL<<13)
+#define CEPH_FEATURE_OMAP (1ULL<<14)
+#define CEPH_FEATURE_MONENC (1ULL<<15)
+#define CEPH_FEATURE_QUERY_T (1ULL<<16)
+#define CEPH_FEATURE_INDEP_PG_MAP (1ULL<<17)
+#define CEPH_FEATURE_CRUSH_TUNABLES (1ULL<<18)
+#define CEPH_FEATURE_CHUNKY_SCRUB (1ULL<<19)
+#define CEPH_FEATURE_MON_NULLROUTE (1ULL<<20)
+#define CEPH_FEATURE_MON_GV (1ULL<<21)
+#define CEPH_FEATURE_BACKFILL_RESERVATION (1ULL<<22)
+#define CEPH_FEATURE_MSG_AUTH (1ULL<<23)
+#define CEPH_FEATURE_RECOVERY_RESERVATION (1ULL<<24)
+#define CEPH_FEATURE_CRUSH_TUNABLES2 (1ULL<<25)
+#define CEPH_FEATURE_CREATEPOOLID (1ULL<<26)
+#define CEPH_FEATURE_REPLY_CREATE_INODE (1ULL<<27)
+#define CEPH_FEATURE_OSD_HBMSGS (1ULL<<28)
+#define CEPH_FEATURE_MDSENC (1ULL<<29)
+#define CEPH_FEATURE_OSDHASHPSPOOL (1ULL<<30)
+#define CEPH_FEATURE_MON_SINGLE_PAXOS (1ULL<<31)
+#define CEPH_FEATURE_OSD_SNAPMAPPER (1ULL<<32)
+#define CEPH_FEATURE_MON_SCRUB (1ULL<<33)
+#define CEPH_FEATURE_OSD_PACKED_RECOVERY (1ULL<<34)
+#define CEPH_FEATURE_OSD_CACHEPOOL (1ULL<<35)
+#define CEPH_FEATURE_CRUSH_V2 (1ULL<<36) /* new indep; SET_* steps */
+#define CEPH_FEATURE_EXPORT_PEER (1ULL<<37)
+#define CEPH_FEATURE_OSD_ERASURE_CODES (1ULL<<38)
+#define CEPH_FEATURE_OSD_TMAP2OMAP (1ULL<<38) /* overlap with EC */
+/* The process supports new-style OSDMap encoding. Monitors also use
+ this bit to determine if peers support NAK messages. */
+#define CEPH_FEATURE_OSDMAP_ENC (1ULL<<39)
+#define CEPH_FEATURE_MDS_INLINE_DATA (1ULL<<40)
+#define CEPH_FEATURE_CRUSH_TUNABLES3 (1ULL<<41)
+#define CEPH_FEATURE_OSD_PRIMARY_AFFINITY (1ULL<<41) /* overlap w/ tunables3 */
+#define CEPH_FEATURE_MSGR_KEEPALIVE2 (1ULL<<42)
+#define CEPH_FEATURE_OSD_POOLRESEND (1ULL<<43)
+#define CEPH_FEATURE_ERASURE_CODE_PLUGINS_V2 (1ULL<<44)
+#define CEPH_FEATURE_OSD_SET_ALLOC_HINT (1ULL<<45)
+#define CEPH_FEATURE_OSD_FADVISE_FLAGS (1ULL<<46)
+#define CEPH_FEATURE_OSD_REPOP (1ULL<<46) /* overlap with fadvise */
+#define CEPH_FEATURE_OSD_OBJECT_DIGEST (1ULL<<46) /* overlap with fadvise */
+#define CEPH_FEATURE_OSD_TRANSACTION_MAY_LAYOUT (1ULL<<46) /* overlap w/ fadvise */
+#define CEPH_FEATURE_MDS_QUOTA (1ULL<<47)
+#define CEPH_FEATURE_CRUSH_V4 (1ULL<<48) /* straw2 buckets */
+#define CEPH_FEATURE_OSD_MIN_SIZE_RECOVERY (1ULL<<49)
+// duplicated since it was introduced at the same time as MIN_SIZE_RECOVERY
+#define CEPH_FEATURE_OSD_PROXY_FEATURES (1ULL<<49) /* overlap w/ above */
+
+/*
+ * The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature
+ * vector to evaluate to 64 bit ~0. To cope, we designate 1ULL << 63
+ * to mean 33 bit ~0, and introduce a helper below to do the
+ * translation.
+ *
+ * This was introduced by ceph.git commit
+ * 9ea02b84104045c2ffd7e7f4e7af512953855ecd v0.58-657-g9ea02b8
+ * and fixed by ceph.git commit
+ * 4255b5c2fb54ae40c53284b3ab700fdfc7e61748 v0.65-263-g4255b5c
+ */
+#define CEPH_FEATURE_RESERVED (1ULL<<63)
+
+static inline u64 ceph_sanitize_features(u64 features)
+{
+ if (features & CEPH_FEATURE_RESERVED) {
+ /* everything through OSD_SNAPMAPPER */
+ return 0x1ffffffffull;
+ } else {
+ return features;
+ }
+}
+
+/*
+ * Features supported.
+ */
+#define CEPH_FEATURES_SUPPORTED_DEFAULT \
+ (CEPH_FEATURE_NOSRCADDR | \
+ CEPH_FEATURE_RECONNECT_SEQ | \
+ CEPH_FEATURE_PGID64 | \
+ CEPH_FEATURE_PGPOOL3 | \
+ CEPH_FEATURE_OSDENC | \
+ CEPH_FEATURE_CRUSH_TUNABLES | \
+ CEPH_FEATURE_MSG_AUTH | \
+ CEPH_FEATURE_CRUSH_TUNABLES2 | \
+ CEPH_FEATURE_REPLY_CREATE_INODE | \
+ CEPH_FEATURE_OSDHASHPSPOOL | \
+ CEPH_FEATURE_OSD_CACHEPOOL | \
+ CEPH_FEATURE_CRUSH_V2 | \
+ CEPH_FEATURE_EXPORT_PEER | \
+ CEPH_FEATURE_OSDMAP_ENC | \
+ CEPH_FEATURE_CRUSH_TUNABLES3 | \
+ CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
+ CEPH_FEATURE_CRUSH_V4)
+
+#define CEPH_FEATURES_REQUIRED_DEFAULT \
+ (CEPH_FEATURE_NOSRCADDR | \
+ CEPH_FEATURE_RECONNECT_SEQ | \
+ CEPH_FEATURE_PGID64 | \
+ CEPH_FEATURE_PGPOOL3 | \
+ CEPH_FEATURE_OSDENC)
+
+#endif
diff --git a/include/linux/ceph/ceph_frag.h b/include/linux/ceph/ceph_frag.h
new file mode 100644
index 000000000..5babb8e95
--- /dev/null
+++ b/include/linux/ceph/ceph_frag.h
@@ -0,0 +1,109 @@
+#ifndef FS_CEPH_FRAG_H
+#define FS_CEPH_FRAG_H
+
+/*
+ * "Frags" are a way to describe a subset of a 32-bit number space,
+ * using a mask and a value to match against that mask. Any given frag
+ * (subset of the number space) can be partitioned into 2^n sub-frags.
+ *
+ * Frags are encoded into a 32-bit word:
+ * 8 upper bits = "bits"
+ * 24 lower bits = "value"
+ * (We could go to 5+27 bits, but who cares.)
+ *
+ * We use the _most_ significant bits of the 24 bit value. This makes
+ * values logically sort.
+ *
+ * Unfortunately, because the "bits" field is still in the high bits, we
+ * can't sort encoded frags numerically. However, it does allow you
+ * to feed encoded frags as values into frag_contains_value.
+ */
+static inline __u32 ceph_frag_make(__u32 b, __u32 v)
+{
+ return (b << 24) |
+ (v & (0xffffffu << (24-b)) & 0xffffffu);
+}
+static inline __u32 ceph_frag_bits(__u32 f)
+{
+ return f >> 24;
+}
+static inline __u32 ceph_frag_value(__u32 f)
+{
+ return f & 0xffffffu;
+}
+static inline __u32 ceph_frag_mask(__u32 f)
+{
+ return (0xffffffu << (24-ceph_frag_bits(f))) & 0xffffffu;
+}
+static inline __u32 ceph_frag_mask_shift(__u32 f)
+{
+ return 24 - ceph_frag_bits(f);
+}
+
+static inline int ceph_frag_contains_value(__u32 f, __u32 v)
+{
+ return (v & ceph_frag_mask(f)) == ceph_frag_value(f);
+}
+static inline int ceph_frag_contains_frag(__u32 f, __u32 sub)
+{
+ /* is sub as specific as us, and contained by us? */
+ return ceph_frag_bits(sub) >= ceph_frag_bits(f) &&
+ (ceph_frag_value(sub) & ceph_frag_mask(f)) == ceph_frag_value(f);
+}
+
+static inline __u32 ceph_frag_parent(__u32 f)
+{
+ return ceph_frag_make(ceph_frag_bits(f) - 1,
+ ceph_frag_value(f) & (ceph_frag_mask(f) << 1));
+}
+static inline int ceph_frag_is_left_child(__u32 f)
+{
+ return ceph_frag_bits(f) > 0 &&
+ (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 0;
+}
+static inline int ceph_frag_is_right_child(__u32 f)
+{
+ return ceph_frag_bits(f) > 0 &&
+ (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 1;
+}
+static inline __u32 ceph_frag_sibling(__u32 f)
+{
+ return ceph_frag_make(ceph_frag_bits(f),
+ ceph_frag_value(f) ^ (0x1000000 >> ceph_frag_bits(f)));
+}
+static inline __u32 ceph_frag_left_child(__u32 f)
+{
+ return ceph_frag_make(ceph_frag_bits(f)+1, ceph_frag_value(f));
+}
+static inline __u32 ceph_frag_right_child(__u32 f)
+{
+ return ceph_frag_make(ceph_frag_bits(f)+1,
+ ceph_frag_value(f) | (0x1000000 >> (1+ceph_frag_bits(f))));
+}
+static inline __u32 ceph_frag_make_child(__u32 f, int by, int i)
+{
+ int newbits = ceph_frag_bits(f) + by;
+ return ceph_frag_make(newbits,
+ ceph_frag_value(f) | (i << (24 - newbits)));
+}
+static inline int ceph_frag_is_leftmost(__u32 f)
+{
+ return ceph_frag_value(f) == 0;
+}
+static inline int ceph_frag_is_rightmost(__u32 f)
+{
+ return ceph_frag_value(f) == ceph_frag_mask(f);
+}
+static inline __u32 ceph_frag_next(__u32 f)
+{
+ return ceph_frag_make(ceph_frag_bits(f),
+ ceph_frag_value(f) + (0x1000000 >> ceph_frag_bits(f)));
+}
+
+/*
+ * comparator to sort frags logically, as when traversing the
+ * number space in ascending order...
+ */
+int ceph_frag_compare(__u32 a, __u32 b);
+
+#endif
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
new file mode 100644
index 000000000..d7d072a25
--- /dev/null
+++ b/include/linux/ceph/ceph_fs.h
@@ -0,0 +1,763 @@
+/*
+ * ceph_fs.h - Ceph constants and data types to share between kernel and
+ * user space.
+ *
+ * Most types in this file are defined as little-endian, and are
+ * primarily intended to describe data structures that pass over the
+ * wire or that are stored on disk.
+ *
+ * LGPL2
+ */
+
+#ifndef CEPH_FS_H
+#define CEPH_FS_H
+
+#include <linux/ceph/msgr.h>
+#include <linux/ceph/rados.h>
+
+/*
+ * subprotocol versions. when specific messages types or high-level
+ * protocols change, bump the affected components. we keep rev
+ * internal cluster protocols separately from the public,
+ * client-facing protocol.
+ */
+#define CEPH_OSDC_PROTOCOL 24 /* server/client */
+#define CEPH_MDSC_PROTOCOL 32 /* server/client */
+#define CEPH_MONC_PROTOCOL 15 /* server/client */
+
+
+#define CEPH_INO_ROOT 1
+#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
+#define CEPH_INO_DOTDOT 3 /* used by ceph fuse for parent (..) */
+
+/* arbitrary limit on max # of monitors (cluster of 3 is typical) */
+#define CEPH_MAX_MON 31
+
+/*
+ * ceph_file_layout - describe data layout for a file/inode
+ */
+struct ceph_file_layout {
+ /* file -> object mapping */
+ __le32 fl_stripe_unit; /* stripe unit, in bytes. must be multiple
+ of page size. */
+ __le32 fl_stripe_count; /* over this many objects */
+ __le32 fl_object_size; /* until objects are this big, then move to
+ new objects */
+ __le32 fl_cas_hash; /* UNUSED. 0 = none; 1 = sha256 */
+
+ /* pg -> disk layout */
+ __le32 fl_object_stripe_unit; /* UNUSED. for per-object parity, if any */
+
+ /* object -> pg layout */
+ __le32 fl_unused; /* unused; used to be preferred primary for pg (-1 for none) */
+ __le32 fl_pg_pool; /* namespace, crush ruleset, rep level */
+} __attribute__ ((packed));
+
+#define ceph_file_layout_su(l) ((__s32)le32_to_cpu((l).fl_stripe_unit))
+#define ceph_file_layout_stripe_count(l) \
+ ((__s32)le32_to_cpu((l).fl_stripe_count))
+#define ceph_file_layout_object_size(l) ((__s32)le32_to_cpu((l).fl_object_size))
+#define ceph_file_layout_cas_hash(l) ((__s32)le32_to_cpu((l).fl_cas_hash))
+#define ceph_file_layout_object_su(l) \
+ ((__s32)le32_to_cpu((l).fl_object_stripe_unit))
+#define ceph_file_layout_pg_pool(l) \
+ ((__s32)le32_to_cpu((l).fl_pg_pool))
+
+static inline unsigned ceph_file_layout_stripe_width(struct ceph_file_layout *l)
+{
+ return le32_to_cpu(l->fl_stripe_unit) *
+ le32_to_cpu(l->fl_stripe_count);
+}
+
+/* "period" == bytes before i start on a new set of objects */
+static inline unsigned ceph_file_layout_period(struct ceph_file_layout *l)
+{
+ return le32_to_cpu(l->fl_object_size) *
+ le32_to_cpu(l->fl_stripe_count);
+}
+
+#define CEPH_MIN_STRIPE_UNIT 65536
+
+int ceph_file_layout_is_valid(const struct ceph_file_layout *layout);
+
+struct ceph_dir_layout {
+ __u8 dl_dir_hash; /* see ceph_hash.h for ids */
+ __u8 dl_unused1;
+ __u16 dl_unused2;
+ __u32 dl_unused3;
+} __attribute__ ((packed));
+
+/* crypto algorithms */
+#define CEPH_CRYPTO_NONE 0x0
+#define CEPH_CRYPTO_AES 0x1
+
+#define CEPH_AES_IV "cephsageyudagreg"
+
+/* security/authentication protocols */
+#define CEPH_AUTH_UNKNOWN 0x0
+#define CEPH_AUTH_NONE 0x1
+#define CEPH_AUTH_CEPHX 0x2
+
+#define CEPH_AUTH_UID_DEFAULT ((__u64) -1)
+
+
+/*********************************************
+ * message layer
+ */
+
+/*
+ * message types
+ */
+
+/* misc */
+#define CEPH_MSG_SHUTDOWN 1
+#define CEPH_MSG_PING 2
+
+/* client <-> monitor */
+#define CEPH_MSG_MON_MAP 4
+#define CEPH_MSG_MON_GET_MAP 5
+#define CEPH_MSG_STATFS 13
+#define CEPH_MSG_STATFS_REPLY 14
+#define CEPH_MSG_MON_SUBSCRIBE 15
+#define CEPH_MSG_MON_SUBSCRIBE_ACK 16
+#define CEPH_MSG_AUTH 17
+#define CEPH_MSG_AUTH_REPLY 18
+#define CEPH_MSG_MON_GET_VERSION 19
+#define CEPH_MSG_MON_GET_VERSION_REPLY 20
+
+/* client <-> mds */
+#define CEPH_MSG_MDS_MAP 21
+
+#define CEPH_MSG_CLIENT_SESSION 22
+#define CEPH_MSG_CLIENT_RECONNECT 23
+
+#define CEPH_MSG_CLIENT_REQUEST 24
+#define CEPH_MSG_CLIENT_REQUEST_FORWARD 25
+#define CEPH_MSG_CLIENT_REPLY 26
+#define CEPH_MSG_CLIENT_CAPS 0x310
+#define CEPH_MSG_CLIENT_LEASE 0x311
+#define CEPH_MSG_CLIENT_SNAP 0x312
+#define CEPH_MSG_CLIENT_CAPRELEASE 0x313
+
+/* pool ops */
+#define CEPH_MSG_POOLOP_REPLY 48
+#define CEPH_MSG_POOLOP 49
+
+
+/* osd */
+#define CEPH_MSG_OSD_MAP 41
+#define CEPH_MSG_OSD_OP 42
+#define CEPH_MSG_OSD_OPREPLY 43
+#define CEPH_MSG_WATCH_NOTIFY 44
+
+
+/* watch-notify operations */
+enum {
+ WATCH_NOTIFY = 1, /* notifying watcher */
+ WATCH_NOTIFY_COMPLETE = 2, /* notifier notified when done */
+};
+
+
+struct ceph_mon_request_header {
+ __le64 have_version;
+ __le16 session_mon;
+ __le64 session_mon_tid;
+} __attribute__ ((packed));
+
+struct ceph_mon_statfs {
+ struct ceph_mon_request_header monhdr;
+ struct ceph_fsid fsid;
+} __attribute__ ((packed));
+
+struct ceph_statfs {
+ __le64 kb, kb_used, kb_avail;
+ __le64 num_objects;
+} __attribute__ ((packed));
+
+struct ceph_mon_statfs_reply {
+ struct ceph_fsid fsid;
+ __le64 version;
+ struct ceph_statfs st;
+} __attribute__ ((packed));
+
+struct ceph_osd_getmap {
+ struct ceph_mon_request_header monhdr;
+ struct ceph_fsid fsid;
+ __le32 start;
+} __attribute__ ((packed));
+
+struct ceph_mds_getmap {
+ struct ceph_mon_request_header monhdr;
+ struct ceph_fsid fsid;
+} __attribute__ ((packed));
+
+struct ceph_client_mount {
+ struct ceph_mon_request_header monhdr;
+} __attribute__ ((packed));
+
+#define CEPH_SUBSCRIBE_ONETIME 1 /* i want only 1 update after have */
+
+struct ceph_mon_subscribe_item {
+ __le64 have_version; __le64 have;
+ __u8 onetime;
+} __attribute__ ((packed));
+
+struct ceph_mon_subscribe_ack {
+ __le32 duration; /* seconds */
+ struct ceph_fsid fsid;
+} __attribute__ ((packed));
+
+/*
+ * mdsmap flags
+ */
+#define CEPH_MDSMAP_DOWN (1<<0) /* cluster deliberately down */
+
+/*
+ * mds states
+ * > 0 -> in
+ * <= 0 -> out
+ */
+#define CEPH_MDS_STATE_DNE 0 /* down, does not exist. */
+#define CEPH_MDS_STATE_STOPPED -1 /* down, once existed, but no subtrees.
+ empty log. */
+#define CEPH_MDS_STATE_BOOT -4 /* up, boot announcement. */
+#define CEPH_MDS_STATE_STANDBY -5 /* up, idle. waiting for assignment. */
+#define CEPH_MDS_STATE_CREATING -6 /* up, creating MDS instance. */
+#define CEPH_MDS_STATE_STARTING -7 /* up, starting previously stopped mds */
+#define CEPH_MDS_STATE_STANDBY_REPLAY -8 /* up, tailing active node's journal */
+#define CEPH_MDS_STATE_REPLAYONCE -9 /* up, replaying an active node's journal */
+
+#define CEPH_MDS_STATE_REPLAY 8 /* up, replaying journal. */
+#define CEPH_MDS_STATE_RESOLVE 9 /* up, disambiguating distributed
+ operations (import, rename, etc.) */
+#define CEPH_MDS_STATE_RECONNECT 10 /* up, reconnect to clients */
+#define CEPH_MDS_STATE_REJOIN 11 /* up, rejoining distributed cache */
+#define CEPH_MDS_STATE_CLIENTREPLAY 12 /* up, replaying client operations */
+#define CEPH_MDS_STATE_ACTIVE 13 /* up, active */
+#define CEPH_MDS_STATE_STOPPING 14 /* up, but exporting metadata */
+
+extern const char *ceph_mds_state_name(int s);
+
+
+/*
+ * metadata lock types.
+ * - these are bitmasks.. we can compose them
+ * - they also define the lock ordering by the MDS
+ * - a few of these are internal to the mds
+ */
+#define CEPH_LOCK_DVERSION 1
+#define CEPH_LOCK_DN 2
+#define CEPH_LOCK_ISNAP 16
+#define CEPH_LOCK_IVERSION 32 /* mds internal */
+#define CEPH_LOCK_IFILE 64
+#define CEPH_LOCK_IAUTH 128
+#define CEPH_LOCK_ILINK 256
+#define CEPH_LOCK_IDFT 512 /* dir frag tree */
+#define CEPH_LOCK_INEST 1024 /* mds internal */
+#define CEPH_LOCK_IXATTR 2048
+#define CEPH_LOCK_IFLOCK 4096 /* advisory file locks */
+#define CEPH_LOCK_INO 8192 /* immutable inode bits; not a lock */
+#define CEPH_LOCK_IPOLICY 16384 /* policy lock on dirs. MDS internal */
+
+/* client_session ops */
+enum {
+ CEPH_SESSION_REQUEST_OPEN,
+ CEPH_SESSION_OPEN,
+ CEPH_SESSION_REQUEST_CLOSE,
+ CEPH_SESSION_CLOSE,
+ CEPH_SESSION_REQUEST_RENEWCAPS,
+ CEPH_SESSION_RENEWCAPS,
+ CEPH_SESSION_STALE,
+ CEPH_SESSION_RECALL_STATE,
+ CEPH_SESSION_FLUSHMSG,
+ CEPH_SESSION_FLUSHMSG_ACK,
+ CEPH_SESSION_FORCE_RO,
+};
+
+extern const char *ceph_session_op_name(int op);
+
+struct ceph_mds_session_head {
+ __le32 op;
+ __le64 seq;
+ struct ceph_timespec stamp;
+ __le32 max_caps, max_leases;
+} __attribute__ ((packed));
+
+/* client_request */
+/*
+ * metadata ops.
+ * & 0x001000 -> write op
+ * & 0x010000 -> follow symlink (e.g. stat(), not lstat()).
+ & & 0x100000 -> use weird ino/path trace
+ */
+#define CEPH_MDS_OP_WRITE 0x001000
+enum {
+ CEPH_MDS_OP_LOOKUP = 0x00100,
+ CEPH_MDS_OP_GETATTR = 0x00101,
+ CEPH_MDS_OP_LOOKUPHASH = 0x00102,
+ CEPH_MDS_OP_LOOKUPPARENT = 0x00103,
+ CEPH_MDS_OP_LOOKUPINO = 0x00104,
+ CEPH_MDS_OP_LOOKUPNAME = 0x00105,
+
+ CEPH_MDS_OP_SETXATTR = 0x01105,
+ CEPH_MDS_OP_RMXATTR = 0x01106,
+ CEPH_MDS_OP_SETLAYOUT = 0x01107,
+ CEPH_MDS_OP_SETATTR = 0x01108,
+ CEPH_MDS_OP_SETFILELOCK= 0x01109,
+ CEPH_MDS_OP_GETFILELOCK= 0x00110,
+ CEPH_MDS_OP_SETDIRLAYOUT=0x0110a,
+
+ CEPH_MDS_OP_MKNOD = 0x01201,
+ CEPH_MDS_OP_LINK = 0x01202,
+ CEPH_MDS_OP_UNLINK = 0x01203,
+ CEPH_MDS_OP_RENAME = 0x01204,
+ CEPH_MDS_OP_MKDIR = 0x01220,
+ CEPH_MDS_OP_RMDIR = 0x01221,
+ CEPH_MDS_OP_SYMLINK = 0x01222,
+
+ CEPH_MDS_OP_CREATE = 0x01301,
+ CEPH_MDS_OP_OPEN = 0x00302,
+ CEPH_MDS_OP_READDIR = 0x00305,
+
+ CEPH_MDS_OP_LOOKUPSNAP = 0x00400,
+ CEPH_MDS_OP_MKSNAP = 0x01400,
+ CEPH_MDS_OP_RMSNAP = 0x01401,
+ CEPH_MDS_OP_LSSNAP = 0x00402,
+ CEPH_MDS_OP_RENAMESNAP = 0x01403,
+};
+
+extern const char *ceph_mds_op_name(int op);
+
+
+#define CEPH_SETATTR_MODE 1
+#define CEPH_SETATTR_UID 2
+#define CEPH_SETATTR_GID 4
+#define CEPH_SETATTR_MTIME 8
+#define CEPH_SETATTR_ATIME 16
+#define CEPH_SETATTR_SIZE 32
+#define CEPH_SETATTR_CTIME 64
+
+/*
+ * Ceph setxattr request flags.
+ */
+#define CEPH_XATTR_CREATE (1 << 0)
+#define CEPH_XATTR_REPLACE (1 << 1)
+#define CEPH_XATTR_REMOVE (1 << 31)
+
+union ceph_mds_request_args {
+ struct {
+ __le32 mask; /* CEPH_CAP_* */
+ } __attribute__ ((packed)) getattr;
+ struct {
+ __le32 mode;
+ __le32 uid;
+ __le32 gid;
+ struct ceph_timespec mtime;
+ struct ceph_timespec atime;
+ __le64 size, old_size; /* old_size needed by truncate */
+ __le32 mask; /* CEPH_SETATTR_* */
+ } __attribute__ ((packed)) setattr;
+ struct {
+ __le32 frag; /* which dir fragment */
+ __le32 max_entries; /* how many dentries to grab */
+ __le32 max_bytes;
+ } __attribute__ ((packed)) readdir;
+ struct {
+ __le32 mode;
+ __le32 rdev;
+ } __attribute__ ((packed)) mknod;
+ struct {
+ __le32 mode;
+ } __attribute__ ((packed)) mkdir;
+ struct {
+ __le32 flags;
+ __le32 mode;
+ __le32 stripe_unit; /* layout for newly created file */
+ __le32 stripe_count; /* ... */
+ __le32 object_size;
+ __le32 file_replication;
+ __le32 unused; /* used to be preferred osd */
+ } __attribute__ ((packed)) open;
+ struct {
+ __le32 flags;
+ } __attribute__ ((packed)) setxattr;
+ struct {
+ struct ceph_file_layout layout;
+ } __attribute__ ((packed)) setlayout;
+ struct {
+ __u8 rule; /* currently fcntl or flock */
+ __u8 type; /* shared, exclusive, remove*/
+ __le64 owner; /* owner of the lock */
+ __le64 pid; /* process id requesting the lock */
+ __le64 start; /* initial location to lock */
+ __le64 length; /* num bytes to lock from start */
+ __u8 wait; /* will caller wait for lock to become available? */
+ } __attribute__ ((packed)) filelock_change;
+} __attribute__ ((packed));
+
+#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
+#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */
+
+struct ceph_mds_request_head {
+ __le64 oldest_client_tid;
+ __le32 mdsmap_epoch; /* on client */
+ __le32 flags; /* CEPH_MDS_FLAG_* */
+ __u8 num_retry, num_fwd; /* count retry, fwd attempts */
+ __le16 num_releases; /* # include cap/lease release records */
+ __le32 op; /* mds op code */
+ __le32 caller_uid, caller_gid;
+ __le64 ino; /* use this ino for openc, mkdir, mknod,
+ etc. (if replaying) */
+ union ceph_mds_request_args args;
+} __attribute__ ((packed));
+
+/* cap/lease release record */
+struct ceph_mds_request_release {
+ __le64 ino, cap_id; /* ino and unique cap id */
+ __le32 caps, wanted; /* new issued, wanted */
+ __le32 seq, issue_seq, mseq;
+ __le32 dname_seq; /* if releasing a dentry lease, a */
+ __le32 dname_len; /* string follows. */
+} __attribute__ ((packed));
+
+/* client reply */
+struct ceph_mds_reply_head {
+ __le32 op;
+ __le32 result;
+ __le32 mdsmap_epoch;
+ __u8 safe; /* true if committed to disk */
+ __u8 is_dentry, is_target; /* true if dentry, target inode records
+ are included with reply */
+} __attribute__ ((packed));
+
+/* one for each node split */
+struct ceph_frag_tree_split {
+ __le32 frag; /* this frag splits... */
+ __le32 by; /* ...by this many bits */
+} __attribute__ ((packed));
+
+struct ceph_frag_tree_head {
+ __le32 nsplits; /* num ceph_frag_tree_split records */
+ struct ceph_frag_tree_split splits[];
+} __attribute__ ((packed));
+
+/* capability issue, for bundling with mds reply */
+struct ceph_mds_reply_cap {
+ __le32 caps, wanted; /* caps issued, wanted */
+ __le64 cap_id;
+ __le32 seq, mseq;
+ __le64 realm; /* snap realm */
+ __u8 flags; /* CEPH_CAP_FLAG_* */
+} __attribute__ ((packed));
+
+#define CEPH_CAP_FLAG_AUTH (1 << 0) /* cap is issued by auth mds */
+#define CEPH_CAP_FLAG_RELEASE (1 << 1) /* release the cap */
+
+/* inode record, for bundling with mds reply */
+struct ceph_mds_reply_inode {
+ __le64 ino;
+ __le64 snapid;
+ __le32 rdev;
+ __le64 version; /* inode version */
+ __le64 xattr_version; /* version for xattr blob */
+ struct ceph_mds_reply_cap cap; /* caps issued for this inode */
+ struct ceph_file_layout layout;
+ struct ceph_timespec ctime, mtime, atime;
+ __le32 time_warp_seq;
+ __le64 size, max_size, truncate_size;
+ __le32 truncate_seq;
+ __le32 mode, uid, gid;
+ __le32 nlink;
+ __le64 files, subdirs, rbytes, rfiles, rsubdirs; /* dir stats */
+ struct ceph_timespec rctime;
+ struct ceph_frag_tree_head fragtree; /* (must be at end of struct) */
+} __attribute__ ((packed));
+/* followed by frag array, symlink string, dir layout, xattr blob */
+
+/* reply_lease follows dname, and reply_inode */
+struct ceph_mds_reply_lease {
+ __le16 mask; /* lease type(s) */
+ __le32 duration_ms; /* lease duration */
+ __le32 seq;
+} __attribute__ ((packed));
+
+struct ceph_mds_reply_dirfrag {
+ __le32 frag; /* fragment */
+ __le32 auth; /* auth mds, if this is a delegation point */
+ __le32 ndist; /* number of mds' this is replicated on */
+ __le32 dist[];
+} __attribute__ ((packed));
+
+#define CEPH_LOCK_FCNTL 1
+#define CEPH_LOCK_FLOCK 2
+#define CEPH_LOCK_FCNTL_INTR 3
+#define CEPH_LOCK_FLOCK_INTR 4
+
+
+#define CEPH_LOCK_SHARED 1
+#define CEPH_LOCK_EXCL 2
+#define CEPH_LOCK_UNLOCK 4
+
+struct ceph_filelock {
+ __le64 start;/* file offset to start lock at */
+ __le64 length; /* num bytes to lock; 0 for all following start */
+ __le64 client; /* which client holds the lock */
+ __le64 owner; /* owner the lock */
+ __le64 pid; /* process id holding the lock on the client */
+ __u8 type; /* shared lock, exclusive lock, or unlock */
+} __attribute__ ((packed));
+
+
+/* file access modes */
+#define CEPH_FILE_MODE_PIN 0
+#define CEPH_FILE_MODE_RD 1
+#define CEPH_FILE_MODE_WR 2
+#define CEPH_FILE_MODE_RDWR 3 /* RD | WR */
+#define CEPH_FILE_MODE_LAZY 4 /* lazy io */
+#define CEPH_FILE_MODE_NUM 8 /* bc these are bit fields.. mostly */
+
+int ceph_flags_to_mode(int flags);
+
+#define CEPH_INLINE_NONE ((__u64)-1)
+
+/* capability bits */
+#define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */
+
+/* generic cap bits */
+#define CEPH_CAP_GSHARED 1 /* client can reads */
+#define CEPH_CAP_GEXCL 2 /* client can read and update */
+#define CEPH_CAP_GCACHE 4 /* (file) client can cache reads */
+#define CEPH_CAP_GRD 8 /* (file) client can read */
+#define CEPH_CAP_GWR 16 /* (file) client can write */
+#define CEPH_CAP_GBUFFER 32 /* (file) client can buffer writes */
+#define CEPH_CAP_GWREXTEND 64 /* (file) client can extend EOF */
+#define CEPH_CAP_GLAZYIO 128 /* (file) client can perform lazy io */
+
+#define CEPH_CAP_SIMPLE_BITS 2
+#define CEPH_CAP_FILE_BITS 8
+
+/* per-lock shift */
+#define CEPH_CAP_SAUTH 2
+#define CEPH_CAP_SLINK 4
+#define CEPH_CAP_SXATTR 6
+#define CEPH_CAP_SFILE 8
+#define CEPH_CAP_SFLOCK 20
+
+#define CEPH_CAP_BITS 22
+
+/* composed values */
+#define CEPH_CAP_AUTH_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SAUTH)
+#define CEPH_CAP_AUTH_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SAUTH)
+#define CEPH_CAP_LINK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SLINK)
+#define CEPH_CAP_LINK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SLINK)
+#define CEPH_CAP_XATTR_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SXATTR)
+#define CEPH_CAP_XATTR_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SXATTR)
+#define CEPH_CAP_FILE(x) (x << CEPH_CAP_SFILE)
+#define CEPH_CAP_FILE_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFILE)
+#define CEPH_CAP_FILE_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFILE)
+#define CEPH_CAP_FILE_CACHE (CEPH_CAP_GCACHE << CEPH_CAP_SFILE)
+#define CEPH_CAP_FILE_RD (CEPH_CAP_GRD << CEPH_CAP_SFILE)
+#define CEPH_CAP_FILE_WR (CEPH_CAP_GWR << CEPH_CAP_SFILE)
+#define CEPH_CAP_FILE_BUFFER (CEPH_CAP_GBUFFER << CEPH_CAP_SFILE)
+#define CEPH_CAP_FILE_WREXTEND (CEPH_CAP_GWREXTEND << CEPH_CAP_SFILE)
+#define CEPH_CAP_FILE_LAZYIO (CEPH_CAP_GLAZYIO << CEPH_CAP_SFILE)
+#define CEPH_CAP_FLOCK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFLOCK)
+#define CEPH_CAP_FLOCK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFLOCK)
+
+
+/* cap masks (for getattr) */
+#define CEPH_STAT_CAP_INODE CEPH_CAP_PIN
+#define CEPH_STAT_CAP_TYPE CEPH_CAP_PIN /* mode >> 12 */
+#define CEPH_STAT_CAP_SYMLINK CEPH_CAP_PIN
+#define CEPH_STAT_CAP_UID CEPH_CAP_AUTH_SHARED
+#define CEPH_STAT_CAP_GID CEPH_CAP_AUTH_SHARED
+#define CEPH_STAT_CAP_MODE CEPH_CAP_AUTH_SHARED
+#define CEPH_STAT_CAP_NLINK CEPH_CAP_LINK_SHARED
+#define CEPH_STAT_CAP_LAYOUT CEPH_CAP_FILE_SHARED
+#define CEPH_STAT_CAP_MTIME CEPH_CAP_FILE_SHARED
+#define CEPH_STAT_CAP_SIZE CEPH_CAP_FILE_SHARED
+#define CEPH_STAT_CAP_ATIME CEPH_CAP_FILE_SHARED /* fixme */
+#define CEPH_STAT_CAP_XATTR CEPH_CAP_XATTR_SHARED
+#define CEPH_STAT_CAP_INODE_ALL (CEPH_CAP_PIN | \
+ CEPH_CAP_AUTH_SHARED | \
+ CEPH_CAP_LINK_SHARED | \
+ CEPH_CAP_FILE_SHARED | \
+ CEPH_CAP_XATTR_SHARED)
+#define CEPH_STAT_CAP_INLINE_DATA (CEPH_CAP_FILE_SHARED | \
+ CEPH_CAP_FILE_RD)
+
+#define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \
+ CEPH_CAP_LINK_SHARED | \
+ CEPH_CAP_XATTR_SHARED | \
+ CEPH_CAP_FILE_SHARED)
+#define CEPH_CAP_ANY_RD (CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_RD | \
+ CEPH_CAP_FILE_CACHE)
+
+#define CEPH_CAP_ANY_EXCL (CEPH_CAP_AUTH_EXCL | \
+ CEPH_CAP_LINK_EXCL | \
+ CEPH_CAP_XATTR_EXCL | \
+ CEPH_CAP_FILE_EXCL)
+#define CEPH_CAP_ANY_FILE_RD (CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE | \
+ CEPH_CAP_FILE_SHARED)
+#define CEPH_CAP_ANY_FILE_WR (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | \
+ CEPH_CAP_FILE_EXCL)
+#define CEPH_CAP_ANY_WR (CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_FILE_WR)
+#define CEPH_CAP_ANY (CEPH_CAP_ANY_RD | CEPH_CAP_ANY_EXCL | \
+ CEPH_CAP_ANY_FILE_WR | CEPH_CAP_FILE_LAZYIO | \
+ CEPH_CAP_PIN)
+
+#define CEPH_CAP_LOCKS (CEPH_LOCK_IFILE | CEPH_LOCK_IAUTH | CEPH_LOCK_ILINK | \
+ CEPH_LOCK_IXATTR)
+
+int ceph_caps_for_mode(int mode);
+
+enum {
+ CEPH_CAP_OP_GRANT, /* mds->client grant */
+ CEPH_CAP_OP_REVOKE, /* mds->client revoke */
+ CEPH_CAP_OP_TRUNC, /* mds->client trunc notify */
+ CEPH_CAP_OP_EXPORT, /* mds has exported the cap */
+ CEPH_CAP_OP_IMPORT, /* mds has imported the cap */
+ CEPH_CAP_OP_UPDATE, /* client->mds update */
+ CEPH_CAP_OP_DROP, /* client->mds drop cap bits */
+ CEPH_CAP_OP_FLUSH, /* client->mds cap writeback */
+ CEPH_CAP_OP_FLUSH_ACK, /* mds->client flushed */
+ CEPH_CAP_OP_FLUSHSNAP, /* client->mds flush snapped metadata */
+ CEPH_CAP_OP_FLUSHSNAP_ACK, /* mds->client flushed snapped metadata */
+ CEPH_CAP_OP_RELEASE, /* client->mds release (clean) cap */
+ CEPH_CAP_OP_RENEW, /* client->mds renewal request */
+};
+
+extern const char *ceph_cap_op_name(int op);
+
+/*
+ * caps message, used for capability callbacks, acks, requests, etc.
+ */
+struct ceph_mds_caps {
+ __le32 op; /* CEPH_CAP_OP_* */
+ __le64 ino, realm;
+ __le64 cap_id;
+ __le32 seq, issue_seq;
+ __le32 caps, wanted, dirty; /* latest issued/wanted/dirty */
+ __le32 migrate_seq;
+ __le64 snap_follows;
+ __le32 snap_trace_len;
+
+ /* authlock */
+ __le32 uid, gid, mode;
+
+ /* linklock */
+ __le32 nlink;
+
+ /* xattrlock */
+ __le32 xattr_len;
+ __le64 xattr_version;
+
+ /* filelock */
+ __le64 size, max_size, truncate_size;
+ __le32 truncate_seq;
+ struct ceph_timespec mtime, atime, ctime;
+ struct ceph_file_layout layout;
+ __le32 time_warp_seq;
+} __attribute__ ((packed));
+
+struct ceph_mds_cap_peer {
+ __le64 cap_id;
+ __le32 seq;
+ __le32 mseq;
+ __le32 mds;
+ __u8 flags;
+} __attribute__ ((packed));
+
+/* cap release msg head */
+struct ceph_mds_cap_release {
+ __le32 num; /* number of cap_items that follow */
+} __attribute__ ((packed));
+
+struct ceph_mds_cap_item {
+ __le64 ino;
+ __le64 cap_id;
+ __le32 migrate_seq, seq;
+} __attribute__ ((packed));
+
+#define CEPH_MDS_LEASE_REVOKE 1 /* mds -> client */
+#define CEPH_MDS_LEASE_RELEASE 2 /* client -> mds */
+#define CEPH_MDS_LEASE_RENEW 3 /* client <-> mds */
+#define CEPH_MDS_LEASE_REVOKE_ACK 4 /* client -> mds */
+
+extern const char *ceph_lease_op_name(int o);
+
+/* lease msg header */
+struct ceph_mds_lease {
+ __u8 action; /* CEPH_MDS_LEASE_* */
+ __le16 mask; /* which lease */
+ __le64 ino;
+ __le64 first, last; /* snap range */
+ __le32 seq;
+ __le32 duration_ms; /* duration of renewal */
+} __attribute__ ((packed));
+/* followed by a __le32+string for dname */
+
+/* client reconnect */
+struct ceph_mds_cap_reconnect {
+ __le64 cap_id;
+ __le32 wanted;
+ __le32 issued;
+ __le64 snaprealm;
+ __le64 pathbase; /* base ino for our path to this ino */
+ __le32 flock_len; /* size of flock state blob, if any */
+} __attribute__ ((packed));
+/* followed by flock blob */
+
+struct ceph_mds_cap_reconnect_v1 {
+ __le64 cap_id;
+ __le32 wanted;
+ __le32 issued;
+ __le64 size;
+ struct ceph_timespec mtime, atime;
+ __le64 snaprealm;
+ __le64 pathbase; /* base ino for our path to this ino */
+} __attribute__ ((packed));
+
+struct ceph_mds_snaprealm_reconnect {
+ __le64 ino; /* snap realm base */
+ __le64 seq; /* snap seq for this snap realm */
+ __le64 parent; /* parent realm */
+} __attribute__ ((packed));
+
+/*
+ * snaps
+ */
+enum {
+ CEPH_SNAP_OP_UPDATE, /* CREATE or DESTROY */
+ CEPH_SNAP_OP_CREATE,
+ CEPH_SNAP_OP_DESTROY,
+ CEPH_SNAP_OP_SPLIT,
+};
+
+extern const char *ceph_snap_op_name(int o);
+
+/* snap msg header */
+struct ceph_mds_snap_head {
+ __le32 op; /* CEPH_SNAP_OP_* */
+ __le64 split; /* ino to split off, if any */
+ __le32 num_split_inos; /* # inos belonging to new child realm */
+ __le32 num_split_realms; /* # child realms udner new child realm */
+ __le32 trace_len; /* size of snap trace blob */
+} __attribute__ ((packed));
+/* followed by split ino list, then split realms, then the trace blob */
+
+/*
+ * encode info about a snaprealm, as viewed by a client
+ */
+struct ceph_mds_snap_realm {
+ __le64 ino; /* ino */
+ __le64 created; /* snap: when created */
+ __le64 parent; /* ino: parent realm */
+ __le64 parent_since; /* snap: same parent since */
+ __le64 seq; /* snap: version */
+ __le32 num_snaps;
+ __le32 num_prior_parent_snaps;
+} __attribute__ ((packed));
+/* followed by my snap list, then prior parent snap list */
+
+#endif
diff --git a/include/linux/ceph/ceph_hash.h b/include/linux/ceph/ceph_hash.h
new file mode 100644
index 000000000..d099c3f90
--- /dev/null
+++ b/include/linux/ceph/ceph_hash.h
@@ -0,0 +1,13 @@
+#ifndef FS_CEPH_HASH_H
+#define FS_CEPH_HASH_H
+
+#define CEPH_STR_HASH_LINUX 0x1 /* linux dcache hash */
+#define CEPH_STR_HASH_RJENKINS 0x2 /* robert jenkins' */
+
+extern unsigned ceph_str_hash_linux(const char *s, unsigned len);
+extern unsigned ceph_str_hash_rjenkins(const char *s, unsigned len);
+
+extern unsigned ceph_str_hash(int type, const char *s, unsigned len);
+extern const char *ceph_str_hash_name(int type);
+
+#endif
diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h
new file mode 100644
index 000000000..29cf897cc
--- /dev/null
+++ b/include/linux/ceph/debugfs.h
@@ -0,0 +1,27 @@
+#ifndef _FS_CEPH_DEBUGFS_H
+#define _FS_CEPH_DEBUGFS_H
+
+#include <linux/ceph/ceph_debug.h>
+#include <linux/ceph/types.h>
+
+#define CEPH_DEFINE_SHOW_FUNC(name) \
+static int name##_open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, name, inode->i_private); \
+} \
+ \
+static const struct file_operations name##_fops = { \
+ .open = name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+};
+
+/* debugfs.c */
+extern int ceph_debugfs_init(void);
+extern void ceph_debugfs_cleanup(void);
+extern int ceph_debugfs_client_init(struct ceph_client *client);
+extern void ceph_debugfs_client_cleanup(struct ceph_client *client);
+
+#endif
+
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
new file mode 100644
index 000000000..a6ef9cc26
--- /dev/null
+++ b/include/linux/ceph/decode.h
@@ -0,0 +1,259 @@
+#ifndef __CEPH_DECODE_H
+#define __CEPH_DECODE_H
+
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/time.h>
+#include <asm/unaligned.h>
+
+#include <linux/ceph/types.h>
+
+/*
+ * in all cases,
+ * void **p pointer to position pointer
+ * void *end pointer to end of buffer (last byte + 1)
+ */
+
+static inline u64 ceph_decode_64(void **p)
+{
+ u64 v = get_unaligned_le64(*p);
+ *p += sizeof(u64);
+ return v;
+}
+static inline u32 ceph_decode_32(void **p)
+{
+ u32 v = get_unaligned_le32(*p);
+ *p += sizeof(u32);
+ return v;
+}
+static inline u16 ceph_decode_16(void **p)
+{
+ u16 v = get_unaligned_le16(*p);
+ *p += sizeof(u16);
+ return v;
+}
+static inline u8 ceph_decode_8(void **p)
+{
+ u8 v = *(u8 *)*p;
+ (*p)++;
+ return v;
+}
+static inline void ceph_decode_copy(void **p, void *pv, size_t n)
+{
+ memcpy(pv, *p, n);
+ *p += n;
+}
+
+/*
+ * bounds check input.
+ */
+static inline int ceph_has_room(void **p, void *end, size_t n)
+{
+ return end >= *p && n <= end - *p;
+}
+
+#define ceph_decode_need(p, end, n, bad) \
+ do { \
+ if (!likely(ceph_has_room(p, end, n))) \
+ goto bad; \
+ } while (0)
+
+#define ceph_decode_64_safe(p, end, v, bad) \
+ do { \
+ ceph_decode_need(p, end, sizeof(u64), bad); \
+ v = ceph_decode_64(p); \
+ } while (0)
+#define ceph_decode_32_safe(p, end, v, bad) \
+ do { \
+ ceph_decode_need(p, end, sizeof(u32), bad); \
+ v = ceph_decode_32(p); \
+ } while (0)
+#define ceph_decode_16_safe(p, end, v, bad) \
+ do { \
+ ceph_decode_need(p, end, sizeof(u16), bad); \
+ v = ceph_decode_16(p); \
+ } while (0)
+#define ceph_decode_8_safe(p, end, v, bad) \
+ do { \
+ ceph_decode_need(p, end, sizeof(u8), bad); \
+ v = ceph_decode_8(p); \
+ } while (0)
+
+#define ceph_decode_copy_safe(p, end, pv, n, bad) \
+ do { \
+ ceph_decode_need(p, end, n, bad); \
+ ceph_decode_copy(p, pv, n); \
+ } while (0)
+
+/*
+ * Allocate a buffer big enough to hold the wire-encoded string, and
+ * decode the string into it. The resulting string will always be
+ * terminated with '\0'. If successful, *p will be advanced
+ * past the decoded data. Also, if lenp is not a null pointer, the
+ * length (not including the terminating '\0') will be recorded in
+ * *lenp. Note that a zero-length string is a valid return value.
+ *
+ * Returns a pointer to the newly-allocated string buffer, or a
+ * pointer-coded errno if an error occurs. Neither *p nor *lenp
+ * will have been updated if an error is returned.
+ *
+ * There are two possible failures:
+ * - converting the string would require accessing memory at or
+ * beyond the "end" pointer provided (-ERANGE)
+ * - memory could not be allocated for the result (-ENOMEM)
+ */
+static inline char *ceph_extract_encoded_string(void **p, void *end,
+ size_t *lenp, gfp_t gfp)
+{
+ u32 len;
+ void *sp = *p;
+ char *buf;
+
+ ceph_decode_32_safe(&sp, end, len, bad);
+ if (!ceph_has_room(&sp, end, len))
+ goto bad;
+
+ buf = kmalloc(len + 1, gfp);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ if (len)
+ memcpy(buf, sp, len);
+ buf[len] = '\0';
+
+ *p = (char *) *p + sizeof (u32) + len;
+ if (lenp)
+ *lenp = (size_t) len;
+
+ return buf;
+
+bad:
+ return ERR_PTR(-ERANGE);
+}
+
+/*
+ * struct ceph_timespec <-> struct timespec
+ */
+static inline void ceph_decode_timespec(struct timespec *ts,
+ const struct ceph_timespec *tv)
+{
+ ts->tv_sec = (__kernel_time_t)le32_to_cpu(tv->tv_sec);
+ ts->tv_nsec = (long)le32_to_cpu(tv->tv_nsec);
+}
+static inline void ceph_encode_timespec(struct ceph_timespec *tv,
+ const struct timespec *ts)
+{
+ tv->tv_sec = cpu_to_le32((u32)ts->tv_sec);
+ tv->tv_nsec = cpu_to_le32((u32)ts->tv_nsec);
+}
+
+/*
+ * sockaddr_storage <-> ceph_sockaddr
+ */
+static inline void ceph_encode_addr(struct ceph_entity_addr *a)
+{
+ __be16 ss_family = htons(a->in_addr.ss_family);
+ a->in_addr.ss_family = *(__u16 *)&ss_family;
+}
+static inline void ceph_decode_addr(struct ceph_entity_addr *a)
+{
+ __be16 ss_family = *(__be16 *)&a->in_addr.ss_family;
+ a->in_addr.ss_family = ntohs(ss_family);
+ WARN_ON(a->in_addr.ss_family == 512);
+}
+
+/*
+ * encoders
+ */
+static inline void ceph_encode_64(void **p, u64 v)
+{
+ put_unaligned_le64(v, (__le64 *)*p);
+ *p += sizeof(u64);
+}
+static inline void ceph_encode_32(void **p, u32 v)
+{
+ put_unaligned_le32(v, (__le32 *)*p);
+ *p += sizeof(u32);
+}
+static inline void ceph_encode_16(void **p, u16 v)
+{
+ put_unaligned_le16(v, (__le16 *)*p);
+ *p += sizeof(u16);
+}
+static inline void ceph_encode_8(void **p, u8 v)
+{
+ *(u8 *)*p = v;
+ (*p)++;
+}
+static inline void ceph_encode_copy(void **p, const void *s, int len)
+{
+ memcpy(*p, s, len);
+ *p += len;
+}
+
+/*
+ * filepath, string encoders
+ */
+static inline void ceph_encode_filepath(void **p, void *end,
+ u64 ino, const char *path)
+{
+ u32 len = path ? strlen(path) : 0;
+ BUG_ON(*p + 1 + sizeof(ino) + sizeof(len) + len > end);
+ ceph_encode_8(p, 1);
+ ceph_encode_64(p, ino);
+ ceph_encode_32(p, len);
+ if (len)
+ memcpy(*p, path, len);
+ *p += len;
+}
+
+static inline void ceph_encode_string(void **p, void *end,
+ const char *s, u32 len)
+{
+ BUG_ON(*p + sizeof(len) + len > end);
+ ceph_encode_32(p, len);
+ if (len)
+ memcpy(*p, s, len);
+ *p += len;
+}
+
+#define ceph_encode_need(p, end, n, bad) \
+ do { \
+ if (!likely(ceph_has_room(p, end, n))) \
+ goto bad; \
+ } while (0)
+
+#define ceph_encode_64_safe(p, end, v, bad) \
+ do { \
+ ceph_encode_need(p, end, sizeof(u64), bad); \
+ ceph_encode_64(p, v); \
+ } while (0)
+#define ceph_encode_32_safe(p, end, v, bad) \
+ do { \
+ ceph_encode_need(p, end, sizeof(u32), bad); \
+ ceph_encode_32(p, v); \
+ } while (0)
+#define ceph_encode_16_safe(p, end, v, bad) \
+ do { \
+ ceph_encode_need(p, end, sizeof(u16), bad); \
+ ceph_encode_16(p, v); \
+ } while (0)
+#define ceph_encode_8_safe(p, end, v, bad) \
+ do { \
+ ceph_encode_need(p, end, sizeof(u8), bad); \
+ ceph_encode_8(p, v); \
+ } while (0)
+
+#define ceph_encode_copy_safe(p, end, pv, n, bad) \
+ do { \
+ ceph_encode_need(p, end, n, bad); \
+ ceph_encode_copy(p, pv, n); \
+ } while (0)
+#define ceph_encode_string_safe(p, end, s, n, bad) \
+ do { \
+ ceph_encode_need(p, end, n, bad); \
+ ceph_encode_string(p, end, s, n); \
+ } while (0)
+
+
+#endif
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
new file mode 100644
index 000000000..30f92cefa
--- /dev/null
+++ b/include/linux/ceph/libceph.h
@@ -0,0 +1,230 @@
+#ifndef _FS_CEPH_LIBCEPH_H
+#define _FS_CEPH_LIBCEPH_H
+
+#include <linux/ceph/ceph_debug.h>
+
+#include <asm/unaligned.h>
+#include <linux/backing-dev.h>
+#include <linux/completion.h>
+#include <linux/exportfs.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+#include <linux/mempool.h>
+#include <linux/pagemap.h>
+#include <linux/wait.h>
+#include <linux/writeback.h>
+#include <linux/slab.h>
+
+#include <linux/ceph/types.h>
+#include <linux/ceph/messenger.h>
+#include <linux/ceph/msgpool.h>
+#include <linux/ceph/mon_client.h>
+#include <linux/ceph/osd_client.h>
+#include <linux/ceph/ceph_fs.h>
+
+/*
+ * mount options
+ */
+#define CEPH_OPT_FSID (1<<0)
+#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */
+#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
+#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */
+#define CEPH_OPT_NOMSGAUTH (1<<4) /* not require cephx message signature */
+#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */
+
+#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
+
+#define ceph_set_opt(client, opt) \
+ (client)->options->flags |= CEPH_OPT_##opt;
+#define ceph_test_opt(client, opt) \
+ (!!((client)->options->flags & CEPH_OPT_##opt))
+
+struct ceph_options {
+ int flags;
+ struct ceph_fsid fsid;
+ struct ceph_entity_addr my_addr;
+ int mount_timeout;
+ int osd_idle_ttl;
+ int osd_keepalive_timeout;
+
+ /*
+ * any type that can't be simply compared or doesn't need need
+ * to be compared should go beyond this point,
+ * ceph_compare_options() should be updated accordingly
+ */
+
+ struct ceph_entity_addr *mon_addr; /* should be the first
+ pointer type of args */
+ int num_mon;
+ char *name;
+ struct ceph_crypto_key *key;
+};
+
+/*
+ * defaults
+ */
+#define CEPH_MOUNT_TIMEOUT_DEFAULT 60
+#define CEPH_OSD_KEEPALIVE_DEFAULT 5
+#define CEPH_OSD_IDLE_TTL_DEFAULT 60
+
+#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
+#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
+#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)
+
+#define CEPH_AUTH_NAME_DEFAULT "guest"
+
+/*
+ * Delay telling the MDS we no longer want caps, in case we reopen
+ * the file. Delay a minimum amount of time, even if we send a cap
+ * message for some other reason. Otherwise, take the oppotunity to
+ * update the mds to avoid sending another message later.
+ */
+#define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */
+#define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */
+
+#define CEPH_CAP_RELEASE_SAFETY_DEFAULT (CEPH_CAPS_PER_RELEASE * 4)
+
+/* mount state */
+enum {
+ CEPH_MOUNT_MOUNTING,
+ CEPH_MOUNT_MOUNTED,
+ CEPH_MOUNT_UNMOUNTING,
+ CEPH_MOUNT_UNMOUNTED,
+ CEPH_MOUNT_SHUTDOWN,
+};
+
+/*
+ * subtract jiffies
+ */
+static inline unsigned long time_sub(unsigned long a, unsigned long b)
+{
+ BUG_ON(time_after(b, a));
+ return (long)a - (long)b;
+}
+
+struct ceph_mds_client;
+
+/*
+ * per client state
+ *
+ * possibly shared by multiple mount points, if they are
+ * mounting the same ceph filesystem/cluster.
+ */
+struct ceph_client {
+ struct ceph_fsid fsid;
+ bool have_fsid;
+
+ void *private;
+
+ struct ceph_options *options;
+
+ struct mutex mount_mutex; /* serialize mount attempts */
+ wait_queue_head_t auth_wq;
+ int auth_err;
+
+ int (*extra_mon_dispatch)(struct ceph_client *, struct ceph_msg *);
+
+ u64 supported_features;
+ u64 required_features;
+
+ struct ceph_messenger msgr; /* messenger instance */
+ struct ceph_mon_client monc;
+ struct ceph_osd_client osdc;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_dir;
+ struct dentry *debugfs_monmap;
+ struct dentry *debugfs_osdmap;
+ struct dentry *debugfs_options;
+#endif
+};
+
+
+
+/*
+ * snapshots
+ */
+
+/*
+ * A "snap context" is the set of existing snapshots when we
+ * write data. It is used by the OSD to guide its COW behavior.
+ *
+ * The ceph_snap_context is refcounted, and attached to each dirty
+ * page, indicating which context the dirty data belonged when it was
+ * dirtied.
+ */
+struct ceph_snap_context {
+ atomic_t nref;
+ u64 seq;
+ u32 num_snaps;
+ u64 snaps[];
+};
+
+extern struct ceph_snap_context *ceph_create_snap_context(u32 snap_count,
+ gfp_t gfp_flags);
+extern struct ceph_snap_context *ceph_get_snap_context(
+ struct ceph_snap_context *sc);
+extern void ceph_put_snap_context(struct ceph_snap_context *sc);
+
+/*
+ * calculate the number of pages a given length and offset map onto,
+ * if we align the data.
+ */
+static inline int calc_pages_for(u64 off, u64 len)
+{
+ return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) -
+ (off >> PAGE_CACHE_SHIFT);
+}
+
+extern struct kmem_cache *ceph_inode_cachep;
+extern struct kmem_cache *ceph_cap_cachep;
+extern struct kmem_cache *ceph_dentry_cachep;
+extern struct kmem_cache *ceph_file_cachep;
+
+/* ceph_common.c */
+extern bool libceph_compatible(void *data);
+
+extern const char *ceph_msg_type_name(int type);
+extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
+extern void *ceph_kvmalloc(size_t size, gfp_t flags);
+
+extern struct ceph_options *ceph_parse_options(char *options,
+ const char *dev_name, const char *dev_name_end,
+ int (*parse_extra_token)(char *c, void *private),
+ void *private);
+int ceph_print_client_options(struct seq_file *m, struct ceph_client *client);
+extern void ceph_destroy_options(struct ceph_options *opt);
+extern int ceph_compare_options(struct ceph_options *new_opt,
+ struct ceph_client *client);
+extern struct ceph_client *ceph_create_client(struct ceph_options *opt,
+ void *private,
+ u64 supported_features,
+ u64 required_features);
+extern u64 ceph_client_id(struct ceph_client *client);
+extern void ceph_destroy_client(struct ceph_client *client);
+extern int __ceph_open_session(struct ceph_client *client,
+ unsigned long started);
+extern int ceph_open_session(struct ceph_client *client);
+
+/* pagevec.c */
+extern void ceph_release_page_vector(struct page **pages, int num_pages);
+
+extern struct page **ceph_get_direct_page_vector(const void __user *data,
+ int num_pages,
+ bool write_page);
+extern void ceph_put_page_vector(struct page **pages, int num_pages,
+ bool dirty);
+extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
+extern int ceph_copy_user_to_page_vector(struct page **pages,
+ const void __user *data,
+ loff_t off, size_t len);
+extern void ceph_copy_to_page_vector(struct page **pages,
+ const void *data,
+ loff_t off, size_t len);
+extern void ceph_copy_from_page_vector(struct page **pages,
+ void *data,
+ loff_t off, size_t len);
+extern void ceph_zero_page_vector_range(int off, int len, struct page **pages);
+
+
+#endif /* _FS_CEPH_SUPER_H */
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
new file mode 100644
index 000000000..87ed09f54
--- /dev/null
+++ b/include/linux/ceph/mdsmap.h
@@ -0,0 +1,63 @@
+#ifndef _FS_CEPH_MDSMAP_H
+#define _FS_CEPH_MDSMAP_H
+
+#include <linux/bug.h>
+#include <linux/ceph/types.h>
+
+/*
+ * mds map - describe servers in the mds cluster.
+ *
+ * we limit fields to those the client actually xcares about
+ */
+struct ceph_mds_info {
+ u64 global_id;
+ struct ceph_entity_addr addr;
+ s32 state;
+ int num_export_targets;
+ bool laggy;
+ u32 *export_targets;
+};
+
+struct ceph_mdsmap {
+ u32 m_epoch, m_client_epoch, m_last_failure;
+ u32 m_root;
+ u32 m_session_timeout; /* seconds */
+ u32 m_session_autoclose; /* seconds */
+ u64 m_max_file_size;
+ u32 m_max_mds; /* size of m_addr, m_state arrays */
+ struct ceph_mds_info *m_info;
+
+ /* which object pools file data can be stored in */
+ int m_num_data_pg_pools;
+ u64 *m_data_pg_pools;
+ u64 m_cas_pg_pool;
+};
+
+static inline struct ceph_entity_addr *
+ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
+{
+ if (w >= m->m_max_mds)
+ return NULL;
+ return &m->m_info[w].addr;
+}
+
+static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
+{
+ BUG_ON(w < 0);
+ if (w >= m->m_max_mds)
+ return CEPH_MDS_STATE_DNE;
+ return m->m_info[w].state;
+}
+
+static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
+{
+ if (w >= 0 && w < m->m_max_mds)
+ return m->m_info[w].laggy;
+ return false;
+}
+
+extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
+extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end);
+extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
+
+#endif
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
new file mode 100644
index 000000000..e15499422
--- /dev/null
+++ b/include/linux/ceph/messenger.h
@@ -0,0 +1,303 @@
+#ifndef __FS_CEPH_MESSENGER_H
+#define __FS_CEPH_MESSENGER_H
+
+#include <linux/blk_types.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/net.h>
+#include <linux/radix-tree.h>
+#include <linux/uio.h>
+#include <linux/workqueue.h>
+
+#include <linux/ceph/types.h>
+#include <linux/ceph/buffer.h>
+
+struct ceph_msg;
+struct ceph_connection;
+
+/*
+ * Ceph defines these callbacks for handling connection events.
+ */
+struct ceph_connection_operations {
+ struct ceph_connection *(*get)(struct ceph_connection *);
+ void (*put)(struct ceph_connection *);
+
+ /* handle an incoming message. */
+ void (*dispatch) (struct ceph_connection *con, struct ceph_msg *m);
+
+ /* authorize an outgoing connection */
+ struct ceph_auth_handshake *(*get_authorizer) (
+ struct ceph_connection *con,
+ int *proto, int force_new);
+ int (*verify_authorizer_reply) (struct ceph_connection *con, int len);
+ int (*invalidate_authorizer)(struct ceph_connection *con);
+
+ /* there was some error on the socket (disconnect, whatever) */
+ void (*fault) (struct ceph_connection *con);
+
+ /* a remote host as terminated a message exchange session, and messages
+ * we sent (or they tried to send us) may be lost. */
+ void (*peer_reset) (struct ceph_connection *con);
+
+ struct ceph_msg * (*alloc_msg) (struct ceph_connection *con,
+ struct ceph_msg_header *hdr,
+ int *skip);
+ int (*sign_message) (struct ceph_connection *con, struct ceph_msg *msg);
+
+ int (*check_message_signature) (struct ceph_connection *con,
+ struct ceph_msg *msg);
+};
+
+/* use format string %s%d */
+#define ENTITY_NAME(n) ceph_entity_type_name((n).type), le64_to_cpu((n).num)
+
+struct ceph_messenger {
+ struct ceph_entity_inst inst; /* my name+address */
+ struct ceph_entity_addr my_enc_addr;
+
+ atomic_t stopping;
+ bool nocrc;
+ bool tcp_nodelay;
+
+ /*
+ * the global_seq counts connections i (attempt to) initiate
+ * in order to disambiguate certain connect race conditions.
+ */
+ u32 global_seq;
+ spinlock_t global_seq_lock;
+
+ u64 supported_features;
+ u64 required_features;
+};
+
+enum ceph_msg_data_type {
+ CEPH_MSG_DATA_NONE, /* message contains no data payload */
+ CEPH_MSG_DATA_PAGES, /* data source/destination is a page array */
+ CEPH_MSG_DATA_PAGELIST, /* data source/destination is a pagelist */
+#ifdef CONFIG_BLOCK
+ CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */
+#endif /* CONFIG_BLOCK */
+};
+
+static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type)
+{
+ switch (type) {
+ case CEPH_MSG_DATA_NONE:
+ case CEPH_MSG_DATA_PAGES:
+ case CEPH_MSG_DATA_PAGELIST:
+#ifdef CONFIG_BLOCK
+ case CEPH_MSG_DATA_BIO:
+#endif /* CONFIG_BLOCK */
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct ceph_msg_data {
+ struct list_head links; /* ceph_msg->data */
+ enum ceph_msg_data_type type;
+ union {
+#ifdef CONFIG_BLOCK
+ struct {
+ struct bio *bio;
+ size_t bio_length;
+ };
+#endif /* CONFIG_BLOCK */
+ struct {
+ struct page **pages; /* NOT OWNER. */
+ size_t length; /* total # bytes */
+ unsigned int alignment; /* first page */
+ };
+ struct ceph_pagelist *pagelist;
+ };
+};
+
+struct ceph_msg_data_cursor {
+ size_t total_resid; /* across all data items */
+ struct list_head *data_head; /* = &ceph_msg->data */
+
+ struct ceph_msg_data *data; /* current data item */
+ size_t resid; /* bytes not yet consumed */
+ bool last_piece; /* current is last piece */
+ bool need_crc; /* crc update needed */
+ union {
+#ifdef CONFIG_BLOCK
+ struct { /* bio */
+ struct bio *bio; /* bio from list */
+ struct bvec_iter bvec_iter;
+ };
+#endif /* CONFIG_BLOCK */
+ struct { /* pages */
+ unsigned int page_offset; /* offset in page */
+ unsigned short page_index; /* index in array */
+ unsigned short page_count; /* pages in array */
+ };
+ struct { /* pagelist */
+ struct page *page; /* page from list */
+ size_t offset; /* bytes from list */
+ };
+ };
+};
+
+/*
+ * a single message. it contains a header (src, dest, message type, etc.),
+ * footer (crc values, mainly), a "front" message body, and possibly a
+ * data payload (stored in some number of pages).
+ */
+struct ceph_msg {
+ struct ceph_msg_header hdr; /* header */
+ union {
+ struct ceph_msg_footer footer; /* footer */
+ struct ceph_msg_footer_old old_footer; /* old format footer */
+ };
+ struct kvec front; /* unaligned blobs of message */
+ struct ceph_buffer *middle;
+
+ size_t data_length;
+ struct list_head data;
+ struct ceph_msg_data_cursor cursor;
+
+ struct ceph_connection *con;
+ struct list_head list_head; /* links for connection lists */
+
+ struct kref kref;
+ bool more_to_follow;
+ bool needs_out_seq;
+ int front_alloc_len;
+ unsigned long ack_stamp; /* tx: when we were acked */
+
+ struct ceph_msgpool *pool;
+};
+
+/* ceph connection fault delay defaults, for exponential backoff */
+#define BASE_DELAY_INTERVAL (HZ/2)
+#define MAX_DELAY_INTERVAL (5 * 60 * HZ)
+
+/*
+ * A single connection with another host.
+ *
+ * We maintain a queue of outgoing messages, and some session state to
+ * ensure that we can preserve the lossless, ordered delivery of
+ * messages in the case of a TCP disconnect.
+ */
+struct ceph_connection {
+ void *private;
+
+ const struct ceph_connection_operations *ops;
+
+ struct ceph_messenger *msgr;
+
+ atomic_t sock_state;
+ struct socket *sock;
+ struct ceph_entity_addr peer_addr; /* peer address */
+ struct ceph_entity_addr peer_addr_for_me;
+
+ unsigned long flags;
+ unsigned long state;
+ const char *error_msg; /* error message, if any */
+
+ struct ceph_entity_name peer_name; /* peer name */
+
+ u64 peer_features;
+ u32 connect_seq; /* identify the most recent connection
+ attempt for this connection, client */
+ u32 peer_global_seq; /* peer's global seq for this connection */
+
+ int auth_retry; /* true if we need a newer authorizer */
+ void *auth_reply_buf; /* where to put the authorizer reply */
+ int auth_reply_buf_len;
+
+ struct mutex mutex;
+
+ /* out queue */
+ struct list_head out_queue;
+ struct list_head out_sent; /* sending or sent but unacked */
+ u64 out_seq; /* last message queued for send */
+
+ u64 in_seq, in_seq_acked; /* last message received, acked */
+
+ /* connection negotiation temps */
+ char in_banner[CEPH_BANNER_MAX_LEN];
+ struct ceph_msg_connect out_connect;
+ struct ceph_msg_connect_reply in_reply;
+ struct ceph_entity_addr actual_peer_addr;
+
+ /* message out temps */
+ struct ceph_msg *out_msg; /* sending message (== tail of
+ out_sent) */
+ bool out_msg_done;
+
+ struct kvec out_kvec[8], /* sending header/footer data */
+ *out_kvec_cur;
+ int out_kvec_left; /* kvec's left in out_kvec */
+ int out_skip; /* skip this many bytes */
+ int out_kvec_bytes; /* total bytes left */
+ bool out_kvec_is_msg; /* kvec refers to out_msg */
+ int out_more; /* there is more data after the kvecs */
+ __le64 out_temp_ack; /* for writing an ack */
+
+ /* message in temps */
+ struct ceph_msg_header in_hdr;
+ struct ceph_msg *in_msg;
+ u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */
+
+ char in_tag; /* protocol control byte */
+ int in_base_pos; /* bytes read */
+ __le64 in_temp_ack; /* for reading an ack */
+
+ struct delayed_work work; /* send|recv work */
+ unsigned long delay; /* current delay interval */
+};
+
+
+extern const char *ceph_pr_addr(const struct sockaddr_storage *ss);
+extern int ceph_parse_ips(const char *c, const char *end,
+ struct ceph_entity_addr *addr,
+ int max_count, int *count);
+
+
+extern int ceph_msgr_init(void);
+extern void ceph_msgr_exit(void);
+extern void ceph_msgr_flush(void);
+
+extern void ceph_messenger_init(struct ceph_messenger *msgr,
+ struct ceph_entity_addr *myaddr,
+ u64 supported_features,
+ u64 required_features,
+ bool nocrc,
+ bool tcp_nodelay);
+
+extern void ceph_con_init(struct ceph_connection *con, void *private,
+ const struct ceph_connection_operations *ops,
+ struct ceph_messenger *msgr);
+extern void ceph_con_open(struct ceph_connection *con,
+ __u8 entity_type, __u64 entity_num,
+ struct ceph_entity_addr *addr);
+extern bool ceph_con_opened(struct ceph_connection *con);
+extern void ceph_con_close(struct ceph_connection *con);
+extern void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg);
+
+extern void ceph_msg_revoke(struct ceph_msg *msg);
+extern void ceph_msg_revoke_incoming(struct ceph_msg *msg);
+
+extern void ceph_con_keepalive(struct ceph_connection *con);
+
+extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
+ size_t length, size_t alignment);
+extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
+ struct ceph_pagelist *pagelist);
+#ifdef CONFIG_BLOCK
+extern void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio,
+ size_t length);
+#endif /* CONFIG_BLOCK */
+
+extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
+ bool can_fail);
+
+extern struct ceph_msg *ceph_msg_get(struct ceph_msg *msg);
+extern void ceph_msg_put(struct ceph_msg *msg);
+
+extern void ceph_msg_dump(struct ceph_msg *msg);
+
+#endif
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
new file mode 100644
index 000000000..81810dc21
--- /dev/null
+++ b/include/linux/ceph/mon_client.h
@@ -0,0 +1,119 @@
+#ifndef _FS_CEPH_MON_CLIENT_H
+#define _FS_CEPH_MON_CLIENT_H
+
+#include <linux/completion.h>
+#include <linux/kref.h>
+#include <linux/rbtree.h>
+
+#include <linux/ceph/messenger.h>
+
+struct ceph_client;
+struct ceph_mount_args;
+struct ceph_auth_client;
+
+/*
+ * The monitor map enumerates the set of all monitors.
+ */
+struct ceph_monmap {
+ struct ceph_fsid fsid;
+ u32 epoch;
+ u32 num_mon;
+ struct ceph_entity_inst mon_inst[0];
+};
+
+struct ceph_mon_client;
+struct ceph_mon_generic_request;
+
+
+/*
+ * Generic mechanism for resending monitor requests.
+ */
+typedef void (*ceph_monc_request_func_t)(struct ceph_mon_client *monc,
+ int newmon);
+
+/* a pending monitor request */
+struct ceph_mon_request {
+ struct ceph_mon_client *monc;
+ struct delayed_work delayed_work;
+ unsigned long delay;
+ ceph_monc_request_func_t do_request;
+};
+
+/*
+ * ceph_mon_generic_request is being used for the statfs and
+ * mon_get_version requests which are being done a bit differently
+ * because we need to get data back to the caller
+ */
+struct ceph_mon_generic_request {
+ struct kref kref;
+ u64 tid;
+ struct rb_node node;
+ int result;
+ void *buf;
+ struct completion completion;
+ struct ceph_msg *request; /* original request */
+ struct ceph_msg *reply; /* and reply */
+};
+
+struct ceph_mon_client {
+ struct ceph_client *client;
+ struct ceph_monmap *monmap;
+
+ struct mutex mutex;
+ struct delayed_work delayed_work;
+
+ struct ceph_auth_client *auth;
+ struct ceph_msg *m_auth, *m_auth_reply, *m_subscribe, *m_subscribe_ack;
+ int pending_auth;
+
+ bool hunting;
+ int cur_mon; /* last monitor i contacted */
+ unsigned long sub_sent, sub_renew_after;
+ struct ceph_connection con;
+
+ /* pending generic requests */
+ struct rb_root generic_request_tree;
+ int num_generic_requests;
+ u64 last_tid;
+
+ /* mds/osd map */
+ int want_mdsmap;
+ int want_next_osdmap; /* 1 = want, 2 = want+asked */
+ u32 have_osdmap, have_mdsmap;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_file;
+#endif
+};
+
+extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end);
+extern int ceph_monmap_contains(struct ceph_monmap *m,
+ struct ceph_entity_addr *addr);
+
+extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl);
+extern void ceph_monc_stop(struct ceph_mon_client *monc);
+
+/*
+ * The model here is to indicate that we need a new map of at least
+ * epoch @want, and also call in when we receive a map. We will
+ * periodically rerequest the map from the monitor cluster until we
+ * get what we want.
+ */
+extern int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 have);
+extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 have);
+
+extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc);
+extern int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch,
+ unsigned long timeout);
+
+extern int ceph_monc_do_statfs(struct ceph_mon_client *monc,
+ struct ceph_statfs *buf);
+
+extern int ceph_monc_do_get_version(struct ceph_mon_client *monc,
+ const char *what, u64 *newest);
+
+extern int ceph_monc_open_session(struct ceph_mon_client *monc);
+
+extern int ceph_monc_validate_auth(struct ceph_mon_client *monc);
+
+#endif
diff --git a/include/linux/ceph/msgpool.h b/include/linux/ceph/msgpool.h
new file mode 100644
index 000000000..4b0d38960
--- /dev/null
+++ b/include/linux/ceph/msgpool.h
@@ -0,0 +1,26 @@
+#ifndef _FS_CEPH_MSGPOOL
+#define _FS_CEPH_MSGPOOL
+
+#include <linux/mempool.h>
+#include <linux/ceph/messenger.h>
+
+/*
+ * we use memory pools for preallocating messages we may receive, to
+ * avoid unexpected OOM conditions.
+ */
+struct ceph_msgpool {
+ const char *name;
+ mempool_t *pool;
+ int type; /* preallocated message type */
+ int front_len; /* preallocated payload size */
+};
+
+extern int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
+ int front_len, int size, bool blocking,
+ const char *name);
+extern void ceph_msgpool_destroy(struct ceph_msgpool *pool);
+extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *,
+ int front_len);
+extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *);
+
+#endif
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h
new file mode 100644
index 000000000..1c1887206
--- /dev/null
+++ b/include/linux/ceph/msgr.h
@@ -0,0 +1,185 @@
+#ifndef CEPH_MSGR_H
+#define CEPH_MSGR_H
+
+/*
+ * Data types for message passing layer used by Ceph.
+ */
+
+#define CEPH_MON_PORT 6789 /* default monitor port */
+
+/*
+ * client-side processes will try to bind to ports in this
+ * range, simply for the benefit of tools like nmap or wireshark
+ * that would like to identify the protocol.
+ */
+#define CEPH_PORT_FIRST 6789
+#define CEPH_PORT_START 6800 /* non-monitors start here */
+#define CEPH_PORT_LAST 6900
+
+/*
+ * tcp connection banner. include a protocol version. and adjust
+ * whenever the wire protocol changes. try to keep this string length
+ * constant.
+ */
+#define CEPH_BANNER "ceph v027"
+#define CEPH_BANNER_MAX_LEN 30
+
+
+/*
+ * Rollover-safe type and comparator for 32-bit sequence numbers.
+ * Comparator returns -1, 0, or 1.
+ */
+typedef __u32 ceph_seq_t;
+
+static inline __s32 ceph_seq_cmp(__u32 a, __u32 b)
+{
+ return (__s32)a - (__s32)b;
+}
+
+
+/*
+ * entity_name -- logical name for a process participating in the
+ * network, e.g. 'mds0' or 'osd3'.
+ */
+struct ceph_entity_name {
+ __u8 type; /* CEPH_ENTITY_TYPE_* */
+ __le64 num;
+} __attribute__ ((packed));
+
+#define CEPH_ENTITY_TYPE_MON 0x01
+#define CEPH_ENTITY_TYPE_MDS 0x02
+#define CEPH_ENTITY_TYPE_OSD 0x04
+#define CEPH_ENTITY_TYPE_CLIENT 0x08
+#define CEPH_ENTITY_TYPE_AUTH 0x20
+
+#define CEPH_ENTITY_TYPE_ANY 0xFF
+
+extern const char *ceph_entity_type_name(int type);
+
+/*
+ * entity_addr -- network address
+ */
+struct ceph_entity_addr {
+ __le32 type;
+ __le32 nonce; /* unique id for process (e.g. pid) */
+ struct sockaddr_storage in_addr;
+} __attribute__ ((packed));
+
+struct ceph_entity_inst {
+ struct ceph_entity_name name;
+ struct ceph_entity_addr addr;
+} __attribute__ ((packed));
+
+
+/* used by message exchange protocol */
+#define CEPH_MSGR_TAG_READY 1 /* server->client: ready for messages */
+#define CEPH_MSGR_TAG_RESETSESSION 2 /* server->client: reset, try again */
+#define CEPH_MSGR_TAG_WAIT 3 /* server->client: wait for racing
+ incoming connection */
+#define CEPH_MSGR_TAG_RETRY_SESSION 4 /* server->client + cseq: try again
+ with higher cseq */
+#define CEPH_MSGR_TAG_RETRY_GLOBAL 5 /* server->client + gseq: try again
+ with higher gseq */
+#define CEPH_MSGR_TAG_CLOSE 6 /* closing pipe */
+#define CEPH_MSGR_TAG_MSG 7 /* message */
+#define CEPH_MSGR_TAG_ACK 8 /* message ack */
+#define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */
+#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */
+#define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */
+#define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */
+#define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */
+
+
+/*
+ * connection negotiation
+ */
+struct ceph_msg_connect {
+ __le64 features; /* supported feature bits */
+ __le32 host_type; /* CEPH_ENTITY_TYPE_* */
+ __le32 global_seq; /* count connections initiated by this host */
+ __le32 connect_seq; /* count connections initiated in this session */
+ __le32 protocol_version;
+ __le32 authorizer_protocol;
+ __le32 authorizer_len;
+ __u8 flags; /* CEPH_MSG_CONNECT_* */
+} __attribute__ ((packed));
+
+struct ceph_msg_connect_reply {
+ __u8 tag;
+ __le64 features; /* feature bits for this session */
+ __le32 global_seq;
+ __le32 connect_seq;
+ __le32 protocol_version;
+ __le32 authorizer_len;
+ __u8 flags;
+} __attribute__ ((packed));
+
+#define CEPH_MSG_CONNECT_LOSSY 1 /* messages i send may be safely dropped */
+
+
+/*
+ * message header
+ */
+struct ceph_msg_header_old {
+ __le64 seq; /* message seq# for this session */
+ __le64 tid; /* transaction id */
+ __le16 type; /* message type */
+ __le16 priority; /* priority. higher value == higher priority */
+ __le16 version; /* version of message encoding */
+
+ __le32 front_len; /* bytes in main payload */
+ __le32 middle_len;/* bytes in middle payload */
+ __le32 data_len; /* bytes of data payload */
+ __le16 data_off; /* sender: include full offset;
+ receiver: mask against ~PAGE_MASK */
+
+ struct ceph_entity_inst src, orig_src;
+ __le32 reserved;
+ __le32 crc; /* header crc32c */
+} __attribute__ ((packed));
+
+struct ceph_msg_header {
+ __le64 seq; /* message seq# for this session */
+ __le64 tid; /* transaction id */
+ __le16 type; /* message type */
+ __le16 priority; /* priority. higher value == higher priority */
+ __le16 version; /* version of message encoding */
+
+ __le32 front_len; /* bytes in main payload */
+ __le32 middle_len;/* bytes in middle payload */
+ __le32 data_len; /* bytes of data payload */
+ __le16 data_off; /* sender: include full offset;
+ receiver: mask against ~PAGE_MASK */
+
+ struct ceph_entity_name src;
+ __le16 compat_version;
+ __le16 reserved;
+ __le32 crc; /* header crc32c */
+} __attribute__ ((packed));
+
+#define CEPH_MSG_PRIO_LOW 64
+#define CEPH_MSG_PRIO_DEFAULT 127
+#define CEPH_MSG_PRIO_HIGH 196
+#define CEPH_MSG_PRIO_HIGHEST 255
+
+/*
+ * follows data payload
+ */
+struct ceph_msg_footer_old {
+ __le32 front_crc, middle_crc, data_crc;
+ __u8 flags;
+} __attribute__ ((packed));
+
+struct ceph_msg_footer {
+ __le32 front_crc, middle_crc, data_crc;
+ // sig holds the 64 bits of the digital signature for the message PLR
+ __le64 sig;
+ __u8 flags;
+} __attribute__ ((packed));
+
+#define CEPH_MSG_FOOTER_COMPLETE (1<<0) /* msg wasn't aborted */
+#define CEPH_MSG_FOOTER_NOCRC (1<<1) /* no data crc */
+#define CEPH_MSG_FOOTER_SIGNED (1<<2) /* msg was signed */
+
+
+#endif
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
new file mode 100644
index 000000000..61b19c46b
--- /dev/null
+++ b/include/linux/ceph/osd_client.h
@@ -0,0 +1,377 @@
+#ifndef _FS_CEPH_OSD_CLIENT_H
+#define _FS_CEPH_OSD_CLIENT_H
+
+#include <linux/completion.h>
+#include <linux/kref.h>
+#include <linux/mempool.h>
+#include <linux/rbtree.h>
+
+#include <linux/ceph/types.h>
+#include <linux/ceph/osdmap.h>
+#include <linux/ceph/messenger.h>
+#include <linux/ceph/auth.h>
+#include <linux/ceph/pagelist.h>
+
+struct ceph_msg;
+struct ceph_snap_context;
+struct ceph_osd_request;
+struct ceph_osd_client;
+struct ceph_authorizer;
+
+/*
+ * completion callback for async writepages
+ */
+typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *,
+ struct ceph_msg *);
+typedef void (*ceph_osdc_unsafe_callback_t)(struct ceph_osd_request *, bool);
+
+/* a given osd we're communicating with */
+struct ceph_osd {
+ atomic_t o_ref;
+ struct ceph_osd_client *o_osdc;
+ int o_osd;
+ int o_incarnation;
+ struct rb_node o_node;
+ struct ceph_connection o_con;
+ struct list_head o_requests;
+ struct list_head o_linger_requests;
+ struct list_head o_osd_lru;
+ struct ceph_auth_handshake o_auth;
+ unsigned long lru_ttl;
+ int o_marked_for_keepalive;
+ struct list_head o_keepalive_item;
+};
+
+
+#define CEPH_OSD_MAX_OP 3
+
+enum ceph_osd_data_type {
+ CEPH_OSD_DATA_TYPE_NONE = 0,
+ CEPH_OSD_DATA_TYPE_PAGES,
+ CEPH_OSD_DATA_TYPE_PAGELIST,
+#ifdef CONFIG_BLOCK
+ CEPH_OSD_DATA_TYPE_BIO,
+#endif /* CONFIG_BLOCK */
+};
+
+struct ceph_osd_data {
+ enum ceph_osd_data_type type;
+ union {
+ struct {
+ struct page **pages;
+ u64 length;
+ u32 alignment;
+ bool pages_from_pool;
+ bool own_pages;
+ };
+ struct ceph_pagelist *pagelist;
+#ifdef CONFIG_BLOCK
+ struct {
+ struct bio *bio; /* list of bios */
+ size_t bio_length; /* total in list */
+ };
+#endif /* CONFIG_BLOCK */
+ };
+};
+
+struct ceph_osd_req_op {
+ u16 op; /* CEPH_OSD_OP_* */
+ u32 flags; /* CEPH_OSD_OP_FLAG_* */
+ u32 payload_len;
+ union {
+ struct ceph_osd_data raw_data_in;
+ struct {
+ u64 offset, length;
+ u64 truncate_size;
+ u32 truncate_seq;
+ struct ceph_osd_data osd_data;
+ } extent;
+ struct {
+ u32 name_len;
+ u32 value_len;
+ __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */
+ __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */
+ struct ceph_osd_data osd_data;
+ } xattr;
+ struct {
+ const char *class_name;
+ const char *method_name;
+ struct ceph_osd_data request_info;
+ struct ceph_osd_data request_data;
+ struct ceph_osd_data response_data;
+ __u8 class_len;
+ __u8 method_len;
+ __u8 argc;
+ } cls;
+ struct {
+ u64 cookie;
+ u64 ver;
+ u32 prot_ver;
+ u32 timeout;
+ __u8 flag;
+ } watch;
+ struct {
+ u64 expected_object_size;
+ u64 expected_write_size;
+ } alloc_hint;
+ };
+};
+
+/* an in-flight request */
+struct ceph_osd_request {
+ u64 r_tid; /* unique for this client */
+ struct rb_node r_node;
+ struct list_head r_req_lru_item;
+ struct list_head r_osd_item;
+ struct list_head r_linger_item;
+ struct list_head r_linger_osd_item;
+ struct ceph_osd *r_osd;
+ struct ceph_pg r_pgid;
+ int r_pg_osds[CEPH_PG_MAX_SIZE];
+ int r_num_pg_osds;
+
+ struct ceph_msg *r_request, *r_reply;
+ int r_flags; /* any additional flags for the osd */
+ u32 r_sent; /* >0 if r_request is sending/sent */
+
+ /* request osd ops array */
+ unsigned int r_num_ops;
+ struct ceph_osd_req_op r_ops[CEPH_OSD_MAX_OP];
+
+ /* these are updated on each send */
+ __le32 *r_request_osdmap_epoch;
+ __le32 *r_request_flags;
+ __le64 *r_request_pool;
+ void *r_request_pgid;
+ __le32 *r_request_attempts;
+ bool r_paused;
+ struct ceph_eversion *r_request_reassert_version;
+
+ int r_result;
+ int r_reply_op_len[CEPH_OSD_MAX_OP];
+ s32 r_reply_op_result[CEPH_OSD_MAX_OP];
+ int r_got_reply;
+ int r_linger;
+
+ struct ceph_osd_client *r_osdc;
+ struct kref r_kref;
+ bool r_mempool;
+ struct completion r_completion, r_safe_completion;
+ ceph_osdc_callback_t r_callback;
+ ceph_osdc_unsafe_callback_t r_unsafe_callback;
+ struct ceph_eversion r_reassert_version;
+ struct list_head r_unsafe_item;
+
+ struct inode *r_inode; /* for use by callbacks */
+ void *r_priv; /* ditto */
+
+ struct ceph_object_locator r_base_oloc;
+ struct ceph_object_id r_base_oid;
+ struct ceph_object_locator r_target_oloc;
+ struct ceph_object_id r_target_oid;
+
+ u64 r_snapid;
+ unsigned long r_stamp; /* send OR check time */
+
+ struct ceph_snap_context *r_snapc; /* snap context for writes */
+};
+
+struct ceph_request_redirect {
+ struct ceph_object_locator oloc;
+};
+
+struct ceph_osd_event {
+ u64 cookie;
+ int one_shot;
+ struct ceph_osd_client *osdc;
+ void (*cb)(u64, u64, u8, void *);
+ void *data;
+ struct rb_node node;
+ struct list_head osd_node;
+ struct kref kref;
+};
+
+struct ceph_osd_event_work {
+ struct work_struct work;
+ struct ceph_osd_event *event;
+ u64 ver;
+ u64 notify_id;
+ u8 opcode;
+};
+
+struct ceph_osd_client {
+ struct ceph_client *client;
+
+ struct ceph_osdmap *osdmap; /* current map */
+ struct rw_semaphore map_sem;
+ struct completion map_waiters;
+ u64 last_requested_map;
+
+ struct mutex request_mutex;
+ struct rb_root osds; /* osds */
+ struct list_head osd_lru; /* idle osds */
+ u64 timeout_tid; /* tid of timeout triggering rq */
+ u64 last_tid; /* tid of last request */
+ struct rb_root requests; /* pending requests */
+ struct list_head req_lru; /* in-flight lru */
+ struct list_head req_unsent; /* unsent/need-resend queue */
+ struct list_head req_notarget; /* map to no osd */
+ struct list_head req_linger; /* lingering requests */
+ int num_requests;
+ struct delayed_work timeout_work;
+ struct delayed_work osds_timeout_work;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_file;
+#endif
+
+ mempool_t *req_mempool;
+
+ struct ceph_msgpool msgpool_op;
+ struct ceph_msgpool msgpool_op_reply;
+
+ spinlock_t event_lock;
+ struct rb_root event_tree;
+ u64 event_count;
+
+ struct workqueue_struct *notify_wq;
+};
+
+extern int ceph_osdc_setup(void);
+extern void ceph_osdc_cleanup(void);
+
+extern int ceph_osdc_init(struct ceph_osd_client *osdc,
+ struct ceph_client *client);
+extern void ceph_osdc_stop(struct ceph_osd_client *osdc);
+
+extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
+ struct ceph_msg *msg);
+extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
+ struct ceph_msg *msg);
+
+extern void osd_req_op_init(struct ceph_osd_request *osd_req,
+ unsigned int which, u16 opcode);
+
+extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *,
+ unsigned int which,
+ struct page **pages, u64 length,
+ u32 alignment, bool pages_from_pool,
+ bool own_pages);
+
+extern void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
+ unsigned int which, u16 opcode,
+ u64 offset, u64 length,
+ u64 truncate_size, u32 truncate_seq);
+extern void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
+ unsigned int which, u64 length);
+
+extern struct ceph_osd_data *osd_req_op_extent_osd_data(
+ struct ceph_osd_request *osd_req,
+ unsigned int which);
+extern struct ceph_osd_data *osd_req_op_cls_response_data(
+ struct ceph_osd_request *osd_req,
+ unsigned int which);
+
+extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *,
+ unsigned int which,
+ struct page **pages, u64 length,
+ u32 alignment, bool pages_from_pool,
+ bool own_pages);
+extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *,
+ unsigned int which,
+ struct ceph_pagelist *pagelist);
+#ifdef CONFIG_BLOCK
+extern void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *,
+ unsigned int which,
+ struct bio *bio, size_t bio_length);
+#endif /* CONFIG_BLOCK */
+
+extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *,
+ unsigned int which,
+ struct ceph_pagelist *pagelist);
+extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *,
+ unsigned int which,
+ struct page **pages, u64 length,
+ u32 alignment, bool pages_from_pool,
+ bool own_pages);
+extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
+ unsigned int which,
+ struct page **pages, u64 length,
+ u32 alignment, bool pages_from_pool,
+ bool own_pages);
+
+extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req,
+ unsigned int which, u16 opcode,
+ const char *class, const char *method);
+extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
+ u16 opcode, const char *name, const void *value,
+ size_t size, u8 cmp_op, u8 cmp_mode);
+extern void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
+ unsigned int which, u16 opcode,
+ u64 cookie, u64 version, int flag);
+extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
+ unsigned int which,
+ u64 expected_object_size,
+ u64 expected_write_size);
+
+extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
+ struct ceph_snap_context *snapc,
+ unsigned int num_ops,
+ bool use_mempool,
+ gfp_t gfp_flags);
+
+extern void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off,
+ struct ceph_snap_context *snapc,
+ u64 snap_id,
+ struct timespec *mtime);
+
+extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
+ struct ceph_file_layout *layout,
+ struct ceph_vino vino,
+ u64 offset, u64 *len,
+ unsigned int which, int num_ops,
+ int opcode, int flags,
+ struct ceph_snap_context *snapc,
+ u32 truncate_seq, u64 truncate_size,
+ bool use_mempool);
+
+extern void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req);
+
+extern void ceph_osdc_get_request(struct ceph_osd_request *req);
+extern void ceph_osdc_put_request(struct ceph_osd_request *req);
+
+extern int ceph_osdc_start_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req,
+ bool nofail);
+extern void ceph_osdc_cancel_request(struct ceph_osd_request *req);
+extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req);
+extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
+
+extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc);
+
+extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
+ struct ceph_vino vino,
+ struct ceph_file_layout *layout,
+ u64 off, u64 *plen,
+ u32 truncate_seq, u64 truncate_size,
+ struct page **pages, int nr_pages,
+ int page_align);
+
+extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
+ struct ceph_vino vino,
+ struct ceph_file_layout *layout,
+ struct ceph_snap_context *sc,
+ u64 off, u64 len,
+ u32 truncate_seq, u64 truncate_size,
+ struct timespec *mtime,
+ struct page **pages, int nr_pages);
+
+/* watch/notify events */
+extern int ceph_osdc_create_event(struct ceph_osd_client *osdc,
+ void (*event_cb)(u64, u64, u8, void *),
+ void *data, struct ceph_osd_event **pevent);
+extern void ceph_osdc_cancel_event(struct ceph_osd_event *event);
+extern void ceph_osdc_put_event(struct ceph_osd_event *event);
+#endif
+
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
new file mode 100644
index 000000000..e55c08bc3
--- /dev/null
+++ b/include/linux/ceph/osdmap.h
@@ -0,0 +1,224 @@
+#ifndef _FS_CEPH_OSDMAP_H
+#define _FS_CEPH_OSDMAP_H
+
+#include <linux/rbtree.h>
+#include <linux/ceph/types.h>
+#include <linux/ceph/decode.h>
+#include <linux/ceph/ceph_fs.h>
+#include <linux/crush/crush.h>
+
+/*
+ * The osd map describes the current membership of the osd cluster and
+ * specifies the mapping of objects to placement groups and placement
+ * groups to (sets of) osds. That is, it completely specifies the
+ * (desired) distribution of all data objects in the system at some
+ * point in time.
+ *
+ * Each map version is identified by an epoch, which increases monotonically.
+ *
+ * The map can be updated either via an incremental map (diff) describing
+ * the change between two successive epochs, or as a fully encoded map.
+ */
+struct ceph_pg {
+ uint64_t pool;
+ uint32_t seed;
+};
+
+#define CEPH_POOL_FLAG_HASHPSPOOL 1
+
+struct ceph_pg_pool_info {
+ struct rb_node node;
+ s64 id;
+ u8 type;
+ u8 size;
+ u8 crush_ruleset;
+ u8 object_hash;
+ u32 pg_num, pgp_num;
+ int pg_num_mask, pgp_num_mask;
+ s64 read_tier;
+ s64 write_tier; /* wins for read+write ops */
+ u64 flags;
+ char *name;
+};
+
+static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool)
+{
+ switch (pool->type) {
+ case CEPH_POOL_TYPE_REP:
+ return true;
+ case CEPH_POOL_TYPE_EC:
+ return false;
+ default:
+ BUG_ON(1);
+ }
+}
+
+struct ceph_object_locator {
+ s64 pool;
+};
+
+/*
+ * Maximum supported by kernel client object name length
+ *
+ * (probably outdated: must be >= RBD_MAX_MD_NAME_LEN -- currently 100)
+ */
+#define CEPH_MAX_OID_NAME_LEN 100
+
+struct ceph_object_id {
+ char name[CEPH_MAX_OID_NAME_LEN];
+ int name_len;
+};
+
+struct ceph_pg_mapping {
+ struct rb_node node;
+ struct ceph_pg pgid;
+
+ union {
+ struct {
+ int len;
+ int osds[];
+ } pg_temp;
+ struct {
+ int osd;
+ } primary_temp;
+ };
+};
+
+struct ceph_osdmap {
+ struct ceph_fsid fsid;
+ u32 epoch;
+ u32 mkfs_epoch;
+ struct ceph_timespec created, modified;
+
+ u32 flags; /* CEPH_OSDMAP_* */
+
+ u32 max_osd; /* size of osd_state, _offload, _addr arrays */
+ u8 *osd_state; /* CEPH_OSD_* */
+ u32 *osd_weight; /* 0 = failed, 0x10000 = 100% normal */
+ struct ceph_entity_addr *osd_addr;
+
+ struct rb_root pg_temp;
+ struct rb_root primary_temp;
+
+ u32 *osd_primary_affinity;
+
+ struct rb_root pg_pools;
+ u32 pool_max;
+
+ /* the CRUSH map specifies the mapping of placement groups to
+ * the list of osds that store+replicate them. */
+ struct crush_map *crush;
+
+ struct mutex crush_scratch_mutex;
+ int crush_scratch_ary[CEPH_PG_MAX_SIZE * 3];
+};
+
+static inline void ceph_oid_set_name(struct ceph_object_id *oid,
+ const char *name)
+{
+ int len;
+
+ len = strlen(name);
+ if (len > sizeof(oid->name)) {
+ WARN(1, "ceph_oid_set_name '%s' len %d vs %zu, truncating\n",
+ name, len, sizeof(oid->name));
+ len = sizeof(oid->name);
+ }
+
+ memcpy(oid->name, name, len);
+ oid->name_len = len;
+}
+
+static inline void ceph_oid_copy(struct ceph_object_id *dest,
+ struct ceph_object_id *src)
+{
+ BUG_ON(src->name_len > sizeof(dest->name));
+ memcpy(dest->name, src->name, src->name_len);
+ dest->name_len = src->name_len;
+}
+
+static inline int ceph_osd_exists(struct ceph_osdmap *map, int osd)
+{
+ return osd >= 0 && osd < map->max_osd &&
+ (map->osd_state[osd] & CEPH_OSD_EXISTS);
+}
+
+static inline int ceph_osd_is_up(struct ceph_osdmap *map, int osd)
+{
+ return ceph_osd_exists(map, osd) &&
+ (map->osd_state[osd] & CEPH_OSD_UP);
+}
+
+static inline int ceph_osd_is_down(struct ceph_osdmap *map, int osd)
+{
+ return !ceph_osd_is_up(map, osd);
+}
+
+static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag)
+{
+ return map && (map->flags & flag);
+}
+
+extern char *ceph_osdmap_state_str(char *str, int len, int state);
+extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd);
+
+static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map,
+ int osd)
+{
+ if (osd >= map->max_osd)
+ return NULL;
+ return &map->osd_addr[osd];
+}
+
+static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid)
+{
+ __u8 version;
+
+ if (!ceph_has_room(p, end, 1 + 8 + 4 + 4)) {
+ pr_warn("incomplete pg encoding\n");
+ return -EINVAL;
+ }
+ version = ceph_decode_8(p);
+ if (version > 1) {
+ pr_warn("do not understand pg encoding %d > 1\n",
+ (int)version);
+ return -EINVAL;
+ }
+
+ pgid->pool = ceph_decode_64(p);
+ pgid->seed = ceph_decode_32(p);
+ *p += 4; /* skip deprecated preferred value */
+
+ return 0;
+}
+
+extern struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end);
+extern struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
+ struct ceph_osdmap *map,
+ struct ceph_messenger *msgr);
+extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
+
+/* calculate mapping of a file extent to an object */
+extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
+ u64 off, u64 len,
+ u64 *bno, u64 *oxoff, u64 *oxlen);
+
+/* calculate mapping of object to a placement group */
+extern int ceph_oloc_oid_to_pg(struct ceph_osdmap *osdmap,
+ struct ceph_object_locator *oloc,
+ struct ceph_object_id *oid,
+ struct ceph_pg *pg_out);
+
+extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap,
+ struct ceph_pg pgid,
+ int *osds, int *primary);
+extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap,
+ struct ceph_pg pgid);
+
+extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
+ u64 id);
+
+extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
+extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
+
+#endif
diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h
new file mode 100644
index 000000000..13d71fe18
--- /dev/null
+++ b/include/linux/ceph/pagelist.h
@@ -0,0 +1,80 @@
+#ifndef __FS_CEPH_PAGELIST_H
+#define __FS_CEPH_PAGELIST_H
+
+#include <asm/byteorder.h>
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+struct ceph_pagelist {
+ struct list_head head;
+ void *mapped_tail;
+ size_t length;
+ size_t room;
+ struct list_head free_list;
+ size_t num_pages_free;
+ atomic_t refcnt;
+};
+
+struct ceph_pagelist_cursor {
+ struct ceph_pagelist *pl; /* pagelist, for error checking */
+ struct list_head *page_lru; /* page in list */
+ size_t room; /* room remaining to reset to */
+};
+
+static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
+{
+ INIT_LIST_HEAD(&pl->head);
+ pl->mapped_tail = NULL;
+ pl->length = 0;
+ pl->room = 0;
+ INIT_LIST_HEAD(&pl->free_list);
+ pl->num_pages_free = 0;
+ atomic_set(&pl->refcnt, 1);
+}
+
+extern void ceph_pagelist_release(struct ceph_pagelist *pl);
+
+extern int ceph_pagelist_append(struct ceph_pagelist *pl, const void *d, size_t l);
+
+extern int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space);
+
+extern int ceph_pagelist_free_reserve(struct ceph_pagelist *pl);
+
+extern void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
+ struct ceph_pagelist_cursor *c);
+
+extern int ceph_pagelist_truncate(struct ceph_pagelist *pl,
+ struct ceph_pagelist_cursor *c);
+
+static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v)
+{
+ __le64 ev = cpu_to_le64(v);
+ return ceph_pagelist_append(pl, &ev, sizeof(ev));
+}
+static inline int ceph_pagelist_encode_32(struct ceph_pagelist *pl, u32 v)
+{
+ __le32 ev = cpu_to_le32(v);
+ return ceph_pagelist_append(pl, &ev, sizeof(ev));
+}
+static inline int ceph_pagelist_encode_16(struct ceph_pagelist *pl, u16 v)
+{
+ __le16 ev = cpu_to_le16(v);
+ return ceph_pagelist_append(pl, &ev, sizeof(ev));
+}
+static inline int ceph_pagelist_encode_8(struct ceph_pagelist *pl, u8 v)
+{
+ return ceph_pagelist_append(pl, &v, 1);
+}
+static inline int ceph_pagelist_encode_string(struct ceph_pagelist *pl,
+ char *s, size_t len)
+{
+ int ret = ceph_pagelist_encode_32(pl, len);
+ if (ret)
+ return ret;
+ if (len)
+ return ceph_pagelist_append(pl, s, len);
+ return 0;
+}
+
+#endif
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
new file mode 100644
index 000000000..2f822dca1
--- /dev/null
+++ b/include/linux/ceph/rados.h
@@ -0,0 +1,469 @@
+#ifndef CEPH_RADOS_H
+#define CEPH_RADOS_H
+
+/*
+ * Data types for the Ceph distributed object storage layer RADOS
+ * (Reliable Autonomic Distributed Object Store).
+ */
+
+#include <linux/ceph/msgr.h>
+
+/*
+ * fs id
+ */
+struct ceph_fsid {
+ unsigned char fsid[16];
+};
+
+static inline int ceph_fsid_compare(const struct ceph_fsid *a,
+ const struct ceph_fsid *b)
+{
+ return memcmp(a, b, sizeof(*a));
+}
+
+/*
+ * ino, object, etc.
+ */
+typedef __le64 ceph_snapid_t;
+#define CEPH_SNAPDIR ((__u64)(-1)) /* reserved for hidden .snap dir */
+#define CEPH_NOSNAP ((__u64)(-2)) /* "head", "live" revision */
+#define CEPH_MAXSNAP ((__u64)(-3)) /* largest valid snapid */
+
+struct ceph_timespec {
+ __le32 tv_sec;
+ __le32 tv_nsec;
+} __attribute__ ((packed));
+
+
+/*
+ * object layout - how objects are mapped into PGs
+ */
+#define CEPH_OBJECT_LAYOUT_HASH 1
+#define CEPH_OBJECT_LAYOUT_LINEAR 2
+#define CEPH_OBJECT_LAYOUT_HASHINO 3
+
+/*
+ * pg layout -- how PGs are mapped onto (sets of) OSDs
+ */
+#define CEPH_PG_LAYOUT_CRUSH 0
+#define CEPH_PG_LAYOUT_HASH 1
+#define CEPH_PG_LAYOUT_LINEAR 2
+#define CEPH_PG_LAYOUT_HYBRID 3
+
+#define CEPH_PG_MAX_SIZE 16 /* max # osds in a single pg */
+
+/*
+ * placement group.
+ * we encode this into one __le64.
+ */
+struct ceph_pg_v1 {
+ __le16 preferred; /* preferred primary osd */
+ __le16 ps; /* placement seed */
+ __le32 pool; /* object pool */
+} __attribute__ ((packed));
+
+/*
+ * pg_pool is a set of pgs storing a pool of objects
+ *
+ * pg_num -- base number of pseudorandomly placed pgs
+ *
+ * pgp_num -- effective number when calculating pg placement. this
+ * is used for pg_num increases. new pgs result in data being "split"
+ * into new pgs. for this to proceed smoothly, new pgs are intiially
+ * colocated with their parents; that is, pgp_num doesn't increase
+ * until the new pgs have successfully split. only _then_ are the new
+ * pgs placed independently.
+ *
+ * lpg_num -- localized pg count (per device). replicas are randomly
+ * selected.
+ *
+ * lpgp_num -- as above.
+ */
+#define CEPH_NOPOOL ((__u64) (-1)) /* pool id not defined */
+
+#define CEPH_POOL_TYPE_REP 1
+#define CEPH_POOL_TYPE_RAID4 2 /* never implemented */
+#define CEPH_POOL_TYPE_EC 3
+
+/*
+ * stable_mod func is used to control number of placement groups.
+ * similar to straight-up modulo, but produces a stable mapping as b
+ * increases over time. b is the number of bins, and bmask is the
+ * containing power of 2 minus 1.
+ *
+ * b <= bmask and bmask=(2**n)-1
+ * e.g., b=12 -> bmask=15, b=123 -> bmask=127
+ */
+static inline int ceph_stable_mod(int x, int b, int bmask)
+{
+ if ((x & bmask) < b)
+ return x & bmask;
+ else
+ return x & (bmask >> 1);
+}
+
+/*
+ * object layout - how a given object should be stored.
+ */
+struct ceph_object_layout {
+ struct ceph_pg_v1 ol_pgid; /* raw pg, with _full_ ps precision. */
+ __le32 ol_stripe_unit; /* for per-object parity, if any */
+} __attribute__ ((packed));
+
+/*
+ * compound epoch+version, used by storage layer to serialize mutations
+ */
+struct ceph_eversion {
+ __le32 epoch;
+ __le64 version;
+} __attribute__ ((packed));
+
+/*
+ * osd map bits
+ */
+
+/* status bits */
+#define CEPH_OSD_EXISTS (1<<0)
+#define CEPH_OSD_UP (1<<1)
+#define CEPH_OSD_AUTOOUT (1<<2) /* osd was automatically marked out */
+#define CEPH_OSD_NEW (1<<3) /* osd is new, never marked in */
+
+extern const char *ceph_osd_state_name(int s);
+
+/* osd weights. fixed point value: 0x10000 == 1.0 ("in"), 0 == "out" */
+#define CEPH_OSD_IN 0x10000
+#define CEPH_OSD_OUT 0
+
+/* osd primary-affinity. fixed point value: 0x10000 == baseline */
+#define CEPH_OSD_MAX_PRIMARY_AFFINITY 0x10000
+#define CEPH_OSD_DEFAULT_PRIMARY_AFFINITY 0x10000
+
+
+/*
+ * osd map flag bits
+ */
+#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC) */
+#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC) */
+#define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */
+#define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */
+#define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */
+#define CEPH_OSDMAP_NOUP (1<<5) /* block osd boot */
+#define CEPH_OSDMAP_NODOWN (1<<6) /* block osd mark-down/failure */
+#define CEPH_OSDMAP_NOOUT (1<<7) /* block osd auto mark-out */
+#define CEPH_OSDMAP_NOIN (1<<8) /* block osd auto mark-in */
+#define CEPH_OSDMAP_NOBACKFILL (1<<9) /* block osd backfill */
+#define CEPH_OSDMAP_NORECOVER (1<<10) /* block osd recovery and backfill */
+
+/*
+ * The error code to return when an OSD can't handle a write
+ * because it is too large.
+ */
+#define OSD_WRITETOOBIG EMSGSIZE
+
+/*
+ * osd ops
+ *
+ * WARNING: do not use these op codes directly. Use the helpers
+ * defined below instead. In certain cases, op code behavior was
+ * redefined, resulting in special-cases in the helpers.
+ */
+#define CEPH_OSD_OP_MODE 0xf000
+#define CEPH_OSD_OP_MODE_RD 0x1000
+#define CEPH_OSD_OP_MODE_WR 0x2000
+#define CEPH_OSD_OP_MODE_RMW 0x3000
+#define CEPH_OSD_OP_MODE_SUB 0x4000
+#define CEPH_OSD_OP_MODE_CACHE 0x8000
+
+#define CEPH_OSD_OP_TYPE 0x0f00
+#define CEPH_OSD_OP_TYPE_LOCK 0x0100
+#define CEPH_OSD_OP_TYPE_DATA 0x0200
+#define CEPH_OSD_OP_TYPE_ATTR 0x0300
+#define CEPH_OSD_OP_TYPE_EXEC 0x0400
+#define CEPH_OSD_OP_TYPE_PG 0x0500
+#define CEPH_OSD_OP_TYPE_MULTI 0x0600 /* multiobject */
+
+#define __CEPH_OSD_OP1(mode, nr) \
+ (CEPH_OSD_OP_MODE_##mode | (nr))
+
+#define __CEPH_OSD_OP(mode, type, nr) \
+ (CEPH_OSD_OP_MODE_##mode | CEPH_OSD_OP_TYPE_##type | (nr))
+
+#define __CEPH_FORALL_OSD_OPS(f) \
+ /** data **/ \
+ /* read */ \
+ f(READ, __CEPH_OSD_OP(RD, DATA, 1), "read") \
+ f(STAT, __CEPH_OSD_OP(RD, DATA, 2), "stat") \
+ f(MAPEXT, __CEPH_OSD_OP(RD, DATA, 3), "mapext") \
+ \
+ /* fancy read */ \
+ f(MASKTRUNC, __CEPH_OSD_OP(RD, DATA, 4), "masktrunc") \
+ f(SPARSE_READ, __CEPH_OSD_OP(RD, DATA, 5), "sparse-read") \
+ \
+ f(NOTIFY, __CEPH_OSD_OP(RD, DATA, 6), "notify") \
+ f(NOTIFY_ACK, __CEPH_OSD_OP(RD, DATA, 7), "notify-ack") \
+ \
+ /* versioning */ \
+ f(ASSERT_VER, __CEPH_OSD_OP(RD, DATA, 8), "assert-version") \
+ \
+ f(LIST_WATCHERS, __CEPH_OSD_OP(RD, DATA, 9), "list-watchers") \
+ \
+ f(LIST_SNAPS, __CEPH_OSD_OP(RD, DATA, 10), "list-snaps") \
+ \
+ /* sync */ \
+ f(SYNC_READ, __CEPH_OSD_OP(RD, DATA, 11), "sync_read") \
+ \
+ /* write */ \
+ f(WRITE, __CEPH_OSD_OP(WR, DATA, 1), "write") \
+ f(WRITEFULL, __CEPH_OSD_OP(WR, DATA, 2), "writefull") \
+ f(TRUNCATE, __CEPH_OSD_OP(WR, DATA, 3), "truncate") \
+ f(ZERO, __CEPH_OSD_OP(WR, DATA, 4), "zero") \
+ f(DELETE, __CEPH_OSD_OP(WR, DATA, 5), "delete") \
+ \
+ /* fancy write */ \
+ f(APPEND, __CEPH_OSD_OP(WR, DATA, 6), "append") \
+ f(STARTSYNC, __CEPH_OSD_OP(WR, DATA, 7), "startsync") \
+ f(SETTRUNC, __CEPH_OSD_OP(WR, DATA, 8), "settrunc") \
+ f(TRIMTRUNC, __CEPH_OSD_OP(WR, DATA, 9), "trimtrunc") \
+ \
+ f(TMAPUP, __CEPH_OSD_OP(RMW, DATA, 10), "tmapup") \
+ f(TMAPPUT, __CEPH_OSD_OP(WR, DATA, 11), "tmapput") \
+ f(TMAPGET, __CEPH_OSD_OP(RD, DATA, 12), "tmapget") \
+ \
+ f(CREATE, __CEPH_OSD_OP(WR, DATA, 13), "create") \
+ f(ROLLBACK, __CEPH_OSD_OP(WR, DATA, 14), "rollback") \
+ \
+ f(WATCH, __CEPH_OSD_OP(WR, DATA, 15), "watch") \
+ \
+ /* omap */ \
+ f(OMAPGETKEYS, __CEPH_OSD_OP(RD, DATA, 17), "omap-get-keys") \
+ f(OMAPGETVALS, __CEPH_OSD_OP(RD, DATA, 18), "omap-get-vals") \
+ f(OMAPGETHEADER, __CEPH_OSD_OP(RD, DATA, 19), "omap-get-header") \
+ f(OMAPGETVALSBYKEYS, __CEPH_OSD_OP(RD, DATA, 20), "omap-get-vals-by-keys") \
+ f(OMAPSETVALS, __CEPH_OSD_OP(WR, DATA, 21), "omap-set-vals") \
+ f(OMAPSETHEADER, __CEPH_OSD_OP(WR, DATA, 22), "omap-set-header") \
+ f(OMAPCLEAR, __CEPH_OSD_OP(WR, DATA, 23), "omap-clear") \
+ f(OMAPRMKEYS, __CEPH_OSD_OP(WR, DATA, 24), "omap-rm-keys") \
+ f(OMAP_CMP, __CEPH_OSD_OP(RD, DATA, 25), "omap-cmp") \
+ \
+ /* tiering */ \
+ f(COPY_FROM, __CEPH_OSD_OP(WR, DATA, 26), "copy-from") \
+ f(COPY_GET_CLASSIC, __CEPH_OSD_OP(RD, DATA, 27), "copy-get-classic") \
+ f(UNDIRTY, __CEPH_OSD_OP(WR, DATA, 28), "undirty") \
+ f(ISDIRTY, __CEPH_OSD_OP(RD, DATA, 29), "isdirty") \
+ f(COPY_GET, __CEPH_OSD_OP(RD, DATA, 30), "copy-get") \
+ f(CACHE_FLUSH, __CEPH_OSD_OP(CACHE, DATA, 31), "cache-flush") \
+ f(CACHE_EVICT, __CEPH_OSD_OP(CACHE, DATA, 32), "cache-evict") \
+ f(CACHE_TRY_FLUSH, __CEPH_OSD_OP(CACHE, DATA, 33), "cache-try-flush") \
+ \
+ /* convert tmap to omap */ \
+ f(TMAP2OMAP, __CEPH_OSD_OP(RMW, DATA, 34), "tmap2omap") \
+ \
+ /* hints */ \
+ f(SETALLOCHINT, __CEPH_OSD_OP(WR, DATA, 35), "set-alloc-hint") \
+ \
+ /** multi **/ \
+ f(CLONERANGE, __CEPH_OSD_OP(WR, MULTI, 1), "clonerange") \
+ f(ASSERT_SRC_VERSION, __CEPH_OSD_OP(RD, MULTI, 2), "assert-src-version") \
+ f(SRC_CMPXATTR, __CEPH_OSD_OP(RD, MULTI, 3), "src-cmpxattr") \
+ \
+ /** attrs **/ \
+ /* read */ \
+ f(GETXATTR, __CEPH_OSD_OP(RD, ATTR, 1), "getxattr") \
+ f(GETXATTRS, __CEPH_OSD_OP(RD, ATTR, 2), "getxattrs") \
+ f(CMPXATTR, __CEPH_OSD_OP(RD, ATTR, 3), "cmpxattr") \
+ \
+ /* write */ \
+ f(SETXATTR, __CEPH_OSD_OP(WR, ATTR, 1), "setxattr") \
+ f(SETXATTRS, __CEPH_OSD_OP(WR, ATTR, 2), "setxattrs") \
+ f(RESETXATTRS, __CEPH_OSD_OP(WR, ATTR, 3), "resetxattrs") \
+ f(RMXATTR, __CEPH_OSD_OP(WR, ATTR, 4), "rmxattr") \
+ \
+ /** subop **/ \
+ f(PULL, __CEPH_OSD_OP1(SUB, 1), "pull") \
+ f(PUSH, __CEPH_OSD_OP1(SUB, 2), "push") \
+ f(BALANCEREADS, __CEPH_OSD_OP1(SUB, 3), "balance-reads") \
+ f(UNBALANCEREADS, __CEPH_OSD_OP1(SUB, 4), "unbalance-reads") \
+ f(SCRUB, __CEPH_OSD_OP1(SUB, 5), "scrub") \
+ f(SCRUB_RESERVE, __CEPH_OSD_OP1(SUB, 6), "scrub-reserve") \
+ f(SCRUB_UNRESERVE, __CEPH_OSD_OP1(SUB, 7), "scrub-unreserve") \
+ f(SCRUB_STOP, __CEPH_OSD_OP1(SUB, 8), "scrub-stop") \
+ f(SCRUB_MAP, __CEPH_OSD_OP1(SUB, 9), "scrub-map") \
+ \
+ /** lock **/ \
+ f(WRLOCK, __CEPH_OSD_OP(WR, LOCK, 1), "wrlock") \
+ f(WRUNLOCK, __CEPH_OSD_OP(WR, LOCK, 2), "wrunlock") \
+ f(RDLOCK, __CEPH_OSD_OP(WR, LOCK, 3), "rdlock") \
+ f(RDUNLOCK, __CEPH_OSD_OP(WR, LOCK, 4), "rdunlock") \
+ f(UPLOCK, __CEPH_OSD_OP(WR, LOCK, 5), "uplock") \
+ f(DNLOCK, __CEPH_OSD_OP(WR, LOCK, 6), "dnlock") \
+ \
+ /** exec **/ \
+ /* note: the RD bit here is wrong; see special-case below in helper */ \
+ f(CALL, __CEPH_OSD_OP(RD, EXEC, 1), "call") \
+ \
+ /** pg **/ \
+ f(PGLS, __CEPH_OSD_OP(RD, PG, 1), "pgls") \
+ f(PGLS_FILTER, __CEPH_OSD_OP(RD, PG, 2), "pgls-filter") \
+ f(PG_HITSET_LS, __CEPH_OSD_OP(RD, PG, 3), "pg-hitset-ls") \
+ f(PG_HITSET_GET, __CEPH_OSD_OP(RD, PG, 4), "pg-hitset-get")
+
+enum {
+#define GENERATE_ENUM_ENTRY(op, opcode, str) CEPH_OSD_OP_##op = (opcode),
+__CEPH_FORALL_OSD_OPS(GENERATE_ENUM_ENTRY)
+#undef GENERATE_ENUM_ENTRY
+};
+
+static inline int ceph_osd_op_type_lock(int op)
+{
+ return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_LOCK;
+}
+static inline int ceph_osd_op_type_data(int op)
+{
+ return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_DATA;
+}
+static inline int ceph_osd_op_type_attr(int op)
+{
+ return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_ATTR;
+}
+static inline int ceph_osd_op_type_exec(int op)
+{
+ return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_EXEC;
+}
+static inline int ceph_osd_op_type_pg(int op)
+{
+ return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_PG;
+}
+static inline int ceph_osd_op_type_multi(int op)
+{
+ return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_MULTI;
+}
+
+static inline int ceph_osd_op_mode_subop(int op)
+{
+ return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_SUB;
+}
+static inline int ceph_osd_op_mode_read(int op)
+{
+ return (op & CEPH_OSD_OP_MODE_RD) &&
+ op != CEPH_OSD_OP_CALL;
+}
+static inline int ceph_osd_op_mode_modify(int op)
+{
+ return op & CEPH_OSD_OP_MODE_WR;
+}
+
+/*
+ * note that the following tmap stuff is also defined in the ceph librados.h
+ * any modification here needs to be updated there
+ */
+#define CEPH_OSD_TMAP_HDR 'h'
+#define CEPH_OSD_TMAP_SET 's'
+#define CEPH_OSD_TMAP_CREATE 'c' /* create key */
+#define CEPH_OSD_TMAP_RM 'r'
+#define CEPH_OSD_TMAP_RMSLOPPY 'R'
+
+extern const char *ceph_osd_op_name(int op);
+
+/*
+ * osd op flags
+ *
+ * An op may be READ, WRITE, or READ|WRITE.
+ */
+enum {
+ CEPH_OSD_FLAG_ACK = 0x0001, /* want (or is) "ack" ack */
+ CEPH_OSD_FLAG_ONNVRAM = 0x0002, /* want (or is) "onnvram" ack */
+ CEPH_OSD_FLAG_ONDISK = 0x0004, /* want (or is) "ondisk" ack */
+ CEPH_OSD_FLAG_RETRY = 0x0008, /* resend attempt */
+ CEPH_OSD_FLAG_READ = 0x0010, /* op may read */
+ CEPH_OSD_FLAG_WRITE = 0x0020, /* op may write */
+ CEPH_OSD_FLAG_ORDERSNAP = 0x0040, /* EOLDSNAP if snapc is out of order */
+ CEPH_OSD_FLAG_PEERSTAT_OLD = 0x0080, /* DEPRECATED msg includes osd_peer_stat */
+ CEPH_OSD_FLAG_BALANCE_READS = 0x0100,
+ CEPH_OSD_FLAG_PARALLELEXEC = 0x0200, /* execute op in parallel */
+ CEPH_OSD_FLAG_PGOP = 0x0400, /* pg op, no object */
+ CEPH_OSD_FLAG_EXEC = 0x0800, /* op may exec */
+ CEPH_OSD_FLAG_EXEC_PUBLIC = 0x1000, /* DEPRECATED op may exec (public) */
+ CEPH_OSD_FLAG_LOCALIZE_READS = 0x2000, /* read from nearby replica, if any */
+ CEPH_OSD_FLAG_RWORDERED = 0x4000, /* order wrt concurrent reads */
+ CEPH_OSD_FLAG_IGNORE_CACHE = 0x8000, /* ignore cache logic */
+ CEPH_OSD_FLAG_SKIPRWLOCKS = 0x10000, /* skip rw locks */
+ CEPH_OSD_FLAG_IGNORE_OVERLAY = 0x20000, /* ignore pool overlay */
+ CEPH_OSD_FLAG_FLUSH = 0x40000, /* this is part of flush */
+};
+
+enum {
+ CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */
+ CEPH_OSD_OP_FLAG_FAILOK = 2, /* continue despite failure */
+};
+
+#define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/
+#define EBLACKLISTED ESHUTDOWN /* blacklisted */
+
+/* xattr comparison */
+enum {
+ CEPH_OSD_CMPXATTR_OP_NOP = 0,
+ CEPH_OSD_CMPXATTR_OP_EQ = 1,
+ CEPH_OSD_CMPXATTR_OP_NE = 2,
+ CEPH_OSD_CMPXATTR_OP_GT = 3,
+ CEPH_OSD_CMPXATTR_OP_GTE = 4,
+ CEPH_OSD_CMPXATTR_OP_LT = 5,
+ CEPH_OSD_CMPXATTR_OP_LTE = 6
+};
+
+enum {
+ CEPH_OSD_CMPXATTR_MODE_STRING = 1,
+ CEPH_OSD_CMPXATTR_MODE_U64 = 2
+};
+
+#define RADOS_NOTIFY_VER 1
+
+/*
+ * an individual object operation. each may be accompanied by some data
+ * payload
+ */
+struct ceph_osd_op {
+ __le16 op; /* CEPH_OSD_OP_* */
+ __le32 flags; /* CEPH_OSD_OP_FLAG_* */
+ union {
+ struct {
+ __le64 offset, length;
+ __le64 truncate_size;
+ __le32 truncate_seq;
+ } __attribute__ ((packed)) extent;
+ struct {
+ __le32 name_len;
+ __le32 value_len;
+ __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */
+ __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */
+ } __attribute__ ((packed)) xattr;
+ struct {
+ __u8 class_len;
+ __u8 method_len;
+ __u8 argc;
+ __le32 indata_len;
+ } __attribute__ ((packed)) cls;
+ struct {
+ __le64 cookie, count;
+ } __attribute__ ((packed)) pgls;
+ struct {
+ __le64 snapid;
+ } __attribute__ ((packed)) snap;
+ struct {
+ __le64 cookie;
+ __le64 ver;
+ __u8 flag; /* 0 = unwatch, 1 = watch */
+ } __attribute__ ((packed)) watch;
+ struct {
+ __le64 offset, length;
+ __le64 src_offset;
+ } __attribute__ ((packed)) clonerange;
+ struct {
+ __le64 expected_object_size;
+ __le64 expected_write_size;
+ } __attribute__ ((packed)) alloc_hint;
+ };
+ __le32 payload_len;
+} __attribute__ ((packed));
+
+
+#endif
diff --git a/include/linux/ceph/types.h b/include/linux/ceph/types.h
new file mode 100644
index 000000000..d3ff1cf2d
--- /dev/null
+++ b/include/linux/ceph/types.h
@@ -0,0 +1,29 @@
+#ifndef _FS_CEPH_TYPES_H
+#define _FS_CEPH_TYPES_H
+
+/* needed before including ceph_fs.h */
+#include <linux/in.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+
+#include <linux/ceph/ceph_fs.h>
+#include <linux/ceph/ceph_frag.h>
+#include <linux/ceph/ceph_hash.h>
+
+/*
+ * Identify inodes by both their ino AND snapshot id (a u64).
+ */
+struct ceph_vino {
+ u64 ino;
+ u64 snap;
+};
+
+
+/* context for the caps reservation mechanism */
+struct ceph_cap_reservation {
+ int count;
+};
+
+
+#endif
diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h
new file mode 100644
index 000000000..b454dfce6
--- /dev/null
+++ b/include/linux/cfag12864b.h
@@ -0,0 +1,82 @@
+/*
+ * Filename: cfag12864b.h
+ * Version: 0.1.0
+ * Description: cfag12864b LCD driver header
+ * License: GPLv2
+ *
+ * Author: Copyright (C) Miguel Ojeda Sandonis
+ * Date: 2006-10-12
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _CFAG12864B_H_
+#define _CFAG12864B_H_
+
+#define CFAG12864B_WIDTH (128)
+#define CFAG12864B_HEIGHT (64)
+#define CFAG12864B_CONTROLLERS (2)
+#define CFAG12864B_PAGES (8)
+#define CFAG12864B_ADDRESSES (64)
+#define CFAG12864B_SIZE ((CFAG12864B_CONTROLLERS) * \
+ (CFAG12864B_PAGES) * \
+ (CFAG12864B_ADDRESSES))
+
+/*
+ * The driver will blit this buffer to the LCD
+ *
+ * Its size is CFAG12864B_SIZE.
+ */
+extern unsigned char * cfag12864b_buffer;
+
+/*
+ * Get the refresh rate of the LCD
+ *
+ * Returns the refresh rate (hertz).
+ */
+extern unsigned int cfag12864b_getrate(void);
+
+/*
+ * Enable refreshing
+ *
+ * Returns 0 if successful (anyone was using it),
+ * or != 0 if failed (someone is using it).
+ */
+extern unsigned char cfag12864b_enable(void);
+
+/*
+ * Disable refreshing
+ *
+ * You should call this only when you finish using the LCD.
+ */
+extern void cfag12864b_disable(void);
+
+/*
+ * Is enabled refreshing? (is anyone using the module?)
+ *
+ * Returns 0 if refreshing is not enabled (anyone is using it),
+ * or != 0 if refreshing is enabled (someone is using it).
+ *
+ * Useful for buffer read-only modules.
+ */
+extern unsigned char cfag12864b_isenabled(void);
+
+/*
+ * Is the module inited?
+ */
+extern unsigned char cfag12864b_isinited(void);
+
+#endif /* _CFAG12864B_H_ */
+
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
new file mode 100644
index 000000000..b9cb94c31
--- /dev/null
+++ b/include/linux/cgroup.h
@@ -0,0 +1,971 @@
+#ifndef _LINUX_CGROUP_H
+#define _LINUX_CGROUP_H
+/*
+ * cgroup interface
+ *
+ * Copyright (C) 2003 BULL SA
+ * Copyright (C) 2004-2006 Silicon Graphics, Inc.
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/nodemask.h>
+#include <linux/rcupdate.h>
+#include <linux/rculist.h>
+#include <linux/cgroupstats.h>
+#include <linux/rwsem.h>
+#include <linux/idr.h>
+#include <linux/workqueue.h>
+#include <linux/fs.h>
+#include <linux/percpu-refcount.h>
+#include <linux/seq_file.h>
+#include <linux/kernfs.h>
+#include <linux/wait.h>
+
+#ifdef CONFIG_CGROUPS
+
+struct cgroup_root;
+struct cgroup_subsys;
+struct cgroup;
+
+extern int cgroup_init_early(void);
+extern int cgroup_init(void);
+extern void cgroup_fork(struct task_struct *p);
+extern void cgroup_post_fork(struct task_struct *p);
+extern void cgroup_exit(struct task_struct *p);
+extern int cgroupstats_build(struct cgroupstats *stats,
+ struct dentry *dentry);
+
+extern int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *tsk);
+
+/* define the enumeration of all cgroup subsystems */
+#define SUBSYS(_x) _x ## _cgrp_id,
+enum cgroup_subsys_id {
+#include <linux/cgroup_subsys.h>
+ CGROUP_SUBSYS_COUNT,
+};
+#undef SUBSYS
+
+/*
+ * Per-subsystem/per-cgroup state maintained by the system. This is the
+ * fundamental structural building block that controllers deal with.
+ *
+ * Fields marked with "PI:" are public and immutable and may be accessed
+ * directly without synchronization.
+ */
+struct cgroup_subsys_state {
+ /* PI: the cgroup that this css is attached to */
+ struct cgroup *cgroup;
+
+ /* PI: the cgroup subsystem that this css is attached to */
+ struct cgroup_subsys *ss;
+
+ /* reference count - access via css_[try]get() and css_put() */
+ struct percpu_ref refcnt;
+
+ /* PI: the parent css */
+ struct cgroup_subsys_state *parent;
+
+ /* siblings list anchored at the parent's ->children */
+ struct list_head sibling;
+ struct list_head children;
+
+ /*
+ * PI: Subsys-unique ID. 0 is unused and root is always 1. The
+ * matching css can be looked up using css_from_id().
+ */
+ int id;
+
+ unsigned int flags;
+
+ /*
+ * Monotonically increasing unique serial number which defines a
+ * uniform order among all csses. It's guaranteed that all
+ * ->children lists are in the ascending order of ->serial_nr and
+ * used to allow interrupting and resuming iterations.
+ */
+ u64 serial_nr;
+
+ /* percpu_ref killing and RCU release */
+ struct rcu_head rcu_head;
+ struct work_struct destroy_work;
+};
+
+/* bits in struct cgroup_subsys_state flags field */
+enum {
+ CSS_NO_REF = (1 << 0), /* no reference counting for this css */
+ CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
+ CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */
+};
+
+/**
+ * css_get - obtain a reference on the specified css
+ * @css: target css
+ *
+ * The caller must already have a reference.
+ */
+static inline void css_get(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_get(&css->refcnt);
+}
+
+/**
+ * css_get_many - obtain references on the specified css
+ * @css: target css
+ * @n: number of references to get
+ *
+ * The caller must already have a reference.
+ */
+static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_get_many(&css->refcnt, n);
+}
+
+/**
+ * css_tryget - try to obtain a reference on the specified css
+ * @css: target css
+ *
+ * Obtain a reference on @css unless it already has reached zero and is
+ * being released. This function doesn't care whether @css is on or
+ * offline. The caller naturally needs to ensure that @css is accessible
+ * but doesn't have to be holding a reference on it - IOW, RCU protected
+ * access is good enough for this function. Returns %true if a reference
+ * count was successfully obtained; %false otherwise.
+ */
+static inline bool css_tryget(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ return percpu_ref_tryget(&css->refcnt);
+ return true;
+}
+
+/**
+ * css_tryget_online - try to obtain a reference on the specified css if online
+ * @css: target css
+ *
+ * Obtain a reference on @css if it's online. The caller naturally needs
+ * to ensure that @css is accessible but doesn't have to be holding a
+ * reference on it - IOW, RCU protected access is good enough for this
+ * function. Returns %true if a reference count was successfully obtained;
+ * %false otherwise.
+ */
+static inline bool css_tryget_online(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ return percpu_ref_tryget_live(&css->refcnt);
+ return true;
+}
+
+/**
+ * css_put - put a css reference
+ * @css: target css
+ *
+ * Put a reference obtained via css_get() and css_tryget_online().
+ */
+static inline void css_put(struct cgroup_subsys_state *css)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_put(&css->refcnt);
+}
+
+/**
+ * css_put_many - put css references
+ * @css: target css
+ * @n: number of references to put
+ *
+ * Put references obtained via css_get() and css_tryget_online().
+ */
+static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
+{
+ if (!(css->flags & CSS_NO_REF))
+ percpu_ref_put_many(&css->refcnt, n);
+}
+
+/* bits in struct cgroup flags field */
+enum {
+ /* Control Group requires release notifications to userspace */
+ CGRP_NOTIFY_ON_RELEASE,
+ /*
+ * Clone the parent's configuration when creating a new child
+ * cpuset cgroup. For historical reasons, this option can be
+ * specified at mount time and thus is implemented here.
+ */
+ CGRP_CPUSET_CLONE_CHILDREN,
+};
+
+struct cgroup {
+ /* self css with NULL ->ss, points back to this cgroup */
+ struct cgroup_subsys_state self;
+
+ unsigned long flags; /* "unsigned long" so bitops work */
+
+ /*
+ * idr allocated in-hierarchy ID.
+ *
+ * ID 0 is not used, the ID of the root cgroup is always 1, and a
+ * new cgroup will be assigned with a smallest available ID.
+ *
+ * Allocating/Removing ID must be protected by cgroup_mutex.
+ */
+ int id;
+
+ /*
+ * If this cgroup contains any tasks, it contributes one to
+ * populated_cnt. All children with non-zero popuplated_cnt of
+ * their own contribute one. The count is zero iff there's no task
+ * in this cgroup or its subtree.
+ */
+ int populated_cnt;
+
+ struct kernfs_node *kn; /* cgroup kernfs entry */
+ struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */
+
+ /*
+ * The bitmask of subsystems enabled on the child cgroups.
+ * ->subtree_control is the one configured through
+ * "cgroup.subtree_control" while ->child_subsys_mask is the
+ * effective one which may have more subsystems enabled.
+ * Controller knobs are made available iff it's enabled in
+ * ->subtree_control.
+ */
+ unsigned int subtree_control;
+ unsigned int child_subsys_mask;
+
+ /* Private pointers for each registered subsystem */
+ struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
+
+ struct cgroup_root *root;
+
+ /*
+ * List of cgrp_cset_links pointing at css_sets with tasks in this
+ * cgroup. Protected by css_set_lock.
+ */
+ struct list_head cset_links;
+
+ /*
+ * On the default hierarchy, a css_set for a cgroup with some
+ * susbsys disabled will point to css's which are associated with
+ * the closest ancestor which has the subsys enabled. The
+ * following lists all css_sets which point to this cgroup's css
+ * for the given subsystem.
+ */
+ struct list_head e_csets[CGROUP_SUBSYS_COUNT];
+
+ /*
+ * list of pidlists, up to two for each namespace (one for procs, one
+ * for tasks); created on demand.
+ */
+ struct list_head pidlists;
+ struct mutex pidlist_mutex;
+
+ /* used to wait for offlining of csses */
+ wait_queue_head_t offline_waitq;
+
+ /* used to schedule release agent */
+ struct work_struct release_agent_work;
+};
+
+#define MAX_CGROUP_ROOT_NAMELEN 64
+
+/* cgroup_root->flags */
+enum {
+ CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */
+ CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
+ CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
+};
+
+/*
+ * A cgroup_root represents the root of a cgroup hierarchy, and may be
+ * associated with a kernfs_root to form an active hierarchy. This is
+ * internal to cgroup core. Don't access directly from controllers.
+ */
+struct cgroup_root {
+ struct kernfs_root *kf_root;
+
+ /* The bitmask of subsystems attached to this hierarchy */
+ unsigned int subsys_mask;
+
+ /* Unique id for this hierarchy. */
+ int hierarchy_id;
+
+ /* The root cgroup. Root is destroyed on its release. */
+ struct cgroup cgrp;
+
+ /* Number of cgroups in the hierarchy, used only for /proc/cgroups */
+ atomic_t nr_cgrps;
+
+ /* A list running through the active hierarchies */
+ struct list_head root_list;
+
+ /* Hierarchy-specific flags */
+ unsigned int flags;
+
+ /* IDs for cgroups in this hierarchy */
+ struct idr cgroup_idr;
+
+ /* The path to use for release notifications. */
+ char release_agent_path[PATH_MAX];
+
+ /* The name for this hierarchy - may be empty */
+ char name[MAX_CGROUP_ROOT_NAMELEN];
+};
+
+/*
+ * A css_set is a structure holding pointers to a set of
+ * cgroup_subsys_state objects. This saves space in the task struct
+ * object and speeds up fork()/exit(), since a single inc/dec and a
+ * list_add()/del() can bump the reference count on the entire cgroup
+ * set for a task.
+ */
+
+struct css_set {
+
+ /* Reference count */
+ atomic_t refcount;
+
+ /*
+ * List running through all cgroup groups in the same hash
+ * slot. Protected by css_set_lock
+ */
+ struct hlist_node hlist;
+
+ /*
+ * Lists running through all tasks using this cgroup group.
+ * mg_tasks lists tasks which belong to this cset but are in the
+ * process of being migrated out or in. Protected by
+ * css_set_rwsem, but, during migration, once tasks are moved to
+ * mg_tasks, it can be read safely while holding cgroup_mutex.
+ */
+ struct list_head tasks;
+ struct list_head mg_tasks;
+
+ /*
+ * List of cgrp_cset_links pointing at cgroups referenced from this
+ * css_set. Protected by css_set_lock.
+ */
+ struct list_head cgrp_links;
+
+ /* the default cgroup associated with this css_set */
+ struct cgroup *dfl_cgrp;
+
+ /*
+ * Set of subsystem states, one for each subsystem. This array is
+ * immutable after creation apart from the init_css_set during
+ * subsystem registration (at boot time).
+ */
+ struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
+
+ /*
+ * List of csets participating in the on-going migration either as
+ * source or destination. Protected by cgroup_mutex.
+ */
+ struct list_head mg_preload_node;
+ struct list_head mg_node;
+
+ /*
+ * If this cset is acting as the source of migration the following
+ * two fields are set. mg_src_cgrp is the source cgroup of the
+ * on-going migration and mg_dst_cset is the destination cset the
+ * target tasks on this cset should be migrated to. Protected by
+ * cgroup_mutex.
+ */
+ struct cgroup *mg_src_cgrp;
+ struct css_set *mg_dst_cset;
+
+ /*
+ * On the default hierarhcy, ->subsys[ssid] may point to a css
+ * attached to an ancestor instead of the cgroup this css_set is
+ * associated with. The following node is anchored at
+ * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
+ * iterate through all css's attached to a given cgroup.
+ */
+ struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
+
+ /* For RCU-protected deletion */
+ struct rcu_head rcu_head;
+};
+
+/*
+ * struct cftype: handler definitions for cgroup control files
+ *
+ * When reading/writing to a file:
+ * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
+ * - the 'cftype' of the file is file->f_path.dentry->d_fsdata
+ */
+
+/* cftype->flags */
+enum {
+ CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
+ CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
+ CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
+
+ /* internal flags, do not use outside cgroup core proper */
+ __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
+ __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
+};
+
+#define MAX_CFTYPE_NAME 64
+
+struct cftype {
+ /*
+ * By convention, the name should begin with the name of the
+ * subsystem, followed by a period. Zero length string indicates
+ * end of cftype array.
+ */
+ char name[MAX_CFTYPE_NAME];
+ int private;
+ /*
+ * If not 0, file mode is set to this value, otherwise it will
+ * be figured out automatically
+ */
+ umode_t mode;
+
+ /*
+ * The maximum length of string, excluding trailing nul, that can
+ * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed.
+ */
+ size_t max_write_len;
+
+ /* CFTYPE_* flags */
+ unsigned int flags;
+
+ /*
+ * Fields used for internal bookkeeping. Initialized automatically
+ * during registration.
+ */
+ struct cgroup_subsys *ss; /* NULL for cgroup core files */
+ struct list_head node; /* anchored at ss->cfts */
+ struct kernfs_ops *kf_ops;
+
+ /*
+ * read_u64() is a shortcut for the common case of returning a
+ * single integer. Use it in place of read()
+ */
+ u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
+ /*
+ * read_s64() is a signed version of read_u64()
+ */
+ s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
+
+ /* generic seq_file read interface */
+ int (*seq_show)(struct seq_file *sf, void *v);
+
+ /* optional ops, implement all or none */
+ void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
+ void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
+ void (*seq_stop)(struct seq_file *sf, void *v);
+
+ /*
+ * write_u64() is a shortcut for the common case of accepting
+ * a single integer (as parsed by simple_strtoull) from
+ * userspace. Use in place of write(); return 0 or error.
+ */
+ int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 val);
+ /*
+ * write_s64() is a signed version of write_u64()
+ */
+ int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
+ s64 val);
+
+ /*
+ * write() is the generic write callback which maps directly to
+ * kernfs write operation and overrides all other operations.
+ * Maximum write size is determined by ->max_write_len. Use
+ * of_css/cft() to access the associated css and cft.
+ */
+ ssize_t (*write)(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lock_class_key lockdep_key;
+#endif
+};
+
+extern struct cgroup_root cgrp_dfl_root;
+extern struct css_set init_css_set;
+
+/**
+ * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
+ * @cgrp: the cgroup of interest
+ *
+ * The default hierarchy is the v2 interface of cgroup and this function
+ * can be used to test whether a cgroup is on the default hierarchy for
+ * cases where a subsystem should behave differnetly depending on the
+ * interface version.
+ *
+ * The set of behaviors which change on the default hierarchy are still
+ * being determined and the mount option is prefixed with __DEVEL__.
+ *
+ * List of changed behaviors:
+ *
+ * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
+ * and "name" are disallowed.
+ *
+ * - When mounting an existing superblock, mount options should match.
+ *
+ * - Remount is disallowed.
+ *
+ * - rename(2) is disallowed.
+ *
+ * - "tasks" is removed. Everything should be at process granularity. Use
+ * "cgroup.procs" instead.
+ *
+ * - "cgroup.procs" is not sorted. pids will be unique unless they got
+ * recycled inbetween reads.
+ *
+ * - "release_agent" and "notify_on_release" are removed. Replacement
+ * notification mechanism will be implemented.
+ *
+ * - "cgroup.clone_children" is removed.
+ *
+ * - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup
+ * and its descendants contain no task; otherwise, 1. The file also
+ * generates kernfs notification which can be monitored through poll and
+ * [di]notify when the value of the file changes.
+ *
+ * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
+ * take masks of ancestors with non-empty cpus/mems, instead of being
+ * moved to an ancestor.
+ *
+ * - cpuset: a task can be moved into an empty cpuset, and again it takes
+ * masks of ancestors.
+ *
+ * - memcg: use_hierarchy is on by default and the cgroup file for the flag
+ * is not created.
+ *
+ * - blkcg: blk-throttle becomes properly hierarchical.
+ *
+ * - debug: disallowed on the default hierarchy.
+ */
+static inline bool cgroup_on_dfl(const struct cgroup *cgrp)
+{
+ return cgrp->root == &cgrp_dfl_root;
+}
+
+/* no synchronization, the result can only be used as a hint */
+static inline bool cgroup_has_tasks(struct cgroup *cgrp)
+{
+ return !list_empty(&cgrp->cset_links);
+}
+
+/* returns ino associated with a cgroup */
+static inline ino_t cgroup_ino(struct cgroup *cgrp)
+{
+ return cgrp->kn->ino;
+}
+
+/* cft/css accessors for cftype->write() operation */
+static inline struct cftype *of_cft(struct kernfs_open_file *of)
+{
+ return of->kn->priv;
+}
+
+struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
+
+/* cft/css accessors for cftype->seq_*() operations */
+static inline struct cftype *seq_cft(struct seq_file *seq)
+{
+ return of_cft(seq->private);
+}
+
+static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
+{
+ return of_css(seq->private);
+}
+
+/*
+ * Name / path handling functions. All are thin wrappers around the kernfs
+ * counterparts and can be called under any context.
+ */
+
+static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
+{
+ return kernfs_name(cgrp->kn, buf, buflen);
+}
+
+static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf,
+ size_t buflen)
+{
+ return kernfs_path(cgrp->kn, buf, buflen);
+}
+
+static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
+{
+ pr_cont_kernfs_name(cgrp->kn);
+}
+
+static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
+{
+ pr_cont_kernfs_path(cgrp->kn);
+}
+
+char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
+
+int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
+int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
+int cgroup_rm_cftypes(struct cftype *cfts);
+
+bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
+
+/*
+ * Control Group taskset, used to pass around set of tasks to cgroup_subsys
+ * methods.
+ */
+struct cgroup_taskset;
+struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
+struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
+
+/**
+ * cgroup_taskset_for_each - iterate cgroup_taskset
+ * @task: the loop cursor
+ * @tset: taskset to iterate
+ */
+#define cgroup_taskset_for_each(task, tset) \
+ for ((task) = cgroup_taskset_first((tset)); (task); \
+ (task) = cgroup_taskset_next((tset)))
+
+/*
+ * Control Group subsystem type.
+ * See Documentation/cgroups/cgroups.txt for details
+ */
+
+struct cgroup_subsys {
+ struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
+ int (*css_online)(struct cgroup_subsys_state *css);
+ void (*css_offline)(struct cgroup_subsys_state *css);
+ void (*css_released)(struct cgroup_subsys_state *css);
+ void (*css_free)(struct cgroup_subsys_state *css);
+ void (*css_reset)(struct cgroup_subsys_state *css);
+ void (*css_e_css_changed)(struct cgroup_subsys_state *css);
+
+ int (*can_attach)(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset);
+ void (*cancel_attach)(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset);
+ void (*attach)(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset);
+ void (*fork)(struct task_struct *task);
+ void (*exit)(struct cgroup_subsys_state *css,
+ struct cgroup_subsys_state *old_css,
+ struct task_struct *task);
+ void (*bind)(struct cgroup_subsys_state *root_css);
+
+ int disabled;
+ int early_init;
+
+ /*
+ * If %false, this subsystem is properly hierarchical -
+ * configuration, resource accounting and restriction on a parent
+ * cgroup cover those of its children. If %true, hierarchy support
+ * is broken in some ways - some subsystems ignore hierarchy
+ * completely while others are only implemented half-way.
+ *
+ * It's now disallowed to create nested cgroups if the subsystem is
+ * broken and cgroup core will emit a warning message on such
+ * cases. Eventually, all subsystems will be made properly
+ * hierarchical and this will go away.
+ */
+ bool broken_hierarchy;
+ bool warned_broken_hierarchy;
+
+ /* the following two fields are initialized automtically during boot */
+ int id;
+#define MAX_CGROUP_TYPE_NAMELEN 32
+ const char *name;
+
+ /* link to parent, protected by cgroup_lock() */
+ struct cgroup_root *root;
+
+ /* idr for css->id */
+ struct idr css_idr;
+
+ /*
+ * List of cftypes. Each entry is the first entry of an array
+ * terminated by zero length name.
+ */
+ struct list_head cfts;
+
+ /*
+ * Base cftypes which are automatically registered. The two can
+ * point to the same array.
+ */
+ struct cftype *dfl_cftypes; /* for the default hierarchy */
+ struct cftype *legacy_cftypes; /* for the legacy hierarchies */
+
+ /*
+ * A subsystem may depend on other subsystems. When such subsystem
+ * is enabled on a cgroup, the depended-upon subsystems are enabled
+ * together if available. Subsystems enabled due to dependency are
+ * not visible to userland until explicitly enabled. The following
+ * specifies the mask of subsystems that this one depends on.
+ */
+ unsigned int depends_on;
+};
+
+#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
+#include <linux/cgroup_subsys.h>
+#undef SUBSYS
+
+/**
+ * task_css_set_check - obtain a task's css_set with extra access conditions
+ * @task: the task to obtain css_set for
+ * @__c: extra condition expression to be passed to rcu_dereference_check()
+ *
+ * A task's css_set is RCU protected, initialized and exited while holding
+ * task_lock(), and can only be modified while holding both cgroup_mutex
+ * and task_lock() while the task is alive. This macro verifies that the
+ * caller is inside proper critical section and returns @task's css_set.
+ *
+ * The caller can also specify additional allowed conditions via @__c, such
+ * as locks used during the cgroup_subsys::attach() methods.
+ */
+#ifdef CONFIG_PROVE_RCU
+extern struct mutex cgroup_mutex;
+extern struct rw_semaphore css_set_rwsem;
+#define task_css_set_check(task, __c) \
+ rcu_dereference_check((task)->cgroups, \
+ lockdep_is_held(&cgroup_mutex) || \
+ lockdep_is_held(&css_set_rwsem) || \
+ ((task)->flags & PF_EXITING) || (__c))
+#else
+#define task_css_set_check(task, __c) \
+ rcu_dereference((task)->cgroups)
+#endif
+
+/**
+ * task_css_check - obtain css for (task, subsys) w/ extra access conds
+ * @task: the target task
+ * @subsys_id: the target subsystem ID
+ * @__c: extra condition expression to be passed to rcu_dereference_check()
+ *
+ * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
+ * synchronization rules are the same as task_css_set_check().
+ */
+#define task_css_check(task, subsys_id, __c) \
+ task_css_set_check((task), (__c))->subsys[(subsys_id)]
+
+/**
+ * task_css_set - obtain a task's css_set
+ * @task: the task to obtain css_set for
+ *
+ * See task_css_set_check().
+ */
+static inline struct css_set *task_css_set(struct task_struct *task)
+{
+ return task_css_set_check(task, false);
+}
+
+/**
+ * task_css - obtain css for (task, subsys)
+ * @task: the target task
+ * @subsys_id: the target subsystem ID
+ *
+ * See task_css_check().
+ */
+static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
+ int subsys_id)
+{
+ return task_css_check(task, subsys_id, false);
+}
+
+/**
+ * task_css_is_root - test whether a task belongs to the root css
+ * @task: the target task
+ * @subsys_id: the target subsystem ID
+ *
+ * Test whether @task belongs to the root css on the specified subsystem.
+ * May be invoked in any context.
+ */
+static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
+{
+ return task_css_check(task, subsys_id, true) ==
+ init_css_set.subsys[subsys_id];
+}
+
+static inline struct cgroup *task_cgroup(struct task_struct *task,
+ int subsys_id)
+{
+ return task_css(task, subsys_id)->cgroup;
+}
+
+struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
+ struct cgroup_subsys_state *parent);
+
+struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
+
+/**
+ * css_for_each_child - iterate through children of a css
+ * @pos: the css * to use as the loop cursor
+ * @parent: css whose children to walk
+ *
+ * Walk @parent's children. Must be called under rcu_read_lock().
+ *
+ * If a subsystem synchronizes ->css_online() and the start of iteration, a
+ * css which finished ->css_online() is guaranteed to be visible in the
+ * future iterations and will stay visible until the last reference is put.
+ * A css which hasn't finished ->css_online() or already finished
+ * ->css_offline() may show up during traversal. It's each subsystem's
+ * responsibility to synchronize against on/offlining.
+ *
+ * It is allowed to temporarily drop RCU read lock during iteration. The
+ * caller is responsible for ensuring that @pos remains accessible until
+ * the start of the next iteration by, for example, bumping the css refcnt.
+ */
+#define css_for_each_child(pos, parent) \
+ for ((pos) = css_next_child(NULL, (parent)); (pos); \
+ (pos) = css_next_child((pos), (parent)))
+
+struct cgroup_subsys_state *
+css_next_descendant_pre(struct cgroup_subsys_state *pos,
+ struct cgroup_subsys_state *css);
+
+struct cgroup_subsys_state *
+css_rightmost_descendant(struct cgroup_subsys_state *pos);
+
+/**
+ * css_for_each_descendant_pre - pre-order walk of a css's descendants
+ * @pos: the css * to use as the loop cursor
+ * @root: css whose descendants to walk
+ *
+ * Walk @root's descendants. @root is included in the iteration and the
+ * first node to be visited. Must be called under rcu_read_lock().
+ *
+ * If a subsystem synchronizes ->css_online() and the start of iteration, a
+ * css which finished ->css_online() is guaranteed to be visible in the
+ * future iterations and will stay visible until the last reference is put.
+ * A css which hasn't finished ->css_online() or already finished
+ * ->css_offline() may show up during traversal. It's each subsystem's
+ * responsibility to synchronize against on/offlining.
+ *
+ * For example, the following guarantees that a descendant can't escape
+ * state updates of its ancestors.
+ *
+ * my_online(@css)
+ * {
+ * Lock @css's parent and @css;
+ * Inherit state from the parent;
+ * Unlock both.
+ * }
+ *
+ * my_update_state(@css)
+ * {
+ * css_for_each_descendant_pre(@pos, @css) {
+ * Lock @pos;
+ * if (@pos == @css)
+ * Update @css's state;
+ * else
+ * Verify @pos is alive and inherit state from its parent;
+ * Unlock @pos;
+ * }
+ * }
+ *
+ * As long as the inheriting step, including checking the parent state, is
+ * enclosed inside @pos locking, double-locking the parent isn't necessary
+ * while inheriting. The state update to the parent is guaranteed to be
+ * visible by walking order and, as long as inheriting operations to the
+ * same @pos are atomic to each other, multiple updates racing each other
+ * still result in the correct state. It's guaranateed that at least one
+ * inheritance happens for any css after the latest update to its parent.
+ *
+ * If checking parent's state requires locking the parent, each inheriting
+ * iteration should lock and unlock both @pos->parent and @pos.
+ *
+ * Alternatively, a subsystem may choose to use a single global lock to
+ * synchronize ->css_online() and ->css_offline() against tree-walking
+ * operations.
+ *
+ * It is allowed to temporarily drop RCU read lock during iteration. The
+ * caller is responsible for ensuring that @pos remains accessible until
+ * the start of the next iteration by, for example, bumping the css refcnt.
+ */
+#define css_for_each_descendant_pre(pos, css) \
+ for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
+ (pos) = css_next_descendant_pre((pos), (css)))
+
+struct cgroup_subsys_state *
+css_next_descendant_post(struct cgroup_subsys_state *pos,
+ struct cgroup_subsys_state *css);
+
+/**
+ * css_for_each_descendant_post - post-order walk of a css's descendants
+ * @pos: the css * to use as the loop cursor
+ * @css: css whose descendants to walk
+ *
+ * Similar to css_for_each_descendant_pre() but performs post-order
+ * traversal instead. @root is included in the iteration and the last
+ * node to be visited.
+ *
+ * If a subsystem synchronizes ->css_online() and the start of iteration, a
+ * css which finished ->css_online() is guaranteed to be visible in the
+ * future iterations and will stay visible until the last reference is put.
+ * A css which hasn't finished ->css_online() or already finished
+ * ->css_offline() may show up during traversal. It's each subsystem's
+ * responsibility to synchronize against on/offlining.
+ *
+ * Note that the walk visibility guarantee example described in pre-order
+ * walk doesn't apply the same to post-order walks.
+ */
+#define css_for_each_descendant_post(pos, css) \
+ for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
+ (pos) = css_next_descendant_post((pos), (css)))
+
+bool css_has_online_children(struct cgroup_subsys_state *css);
+
+/* A css_task_iter should be treated as an opaque object */
+struct css_task_iter {
+ struct cgroup_subsys *ss;
+
+ struct list_head *cset_pos;
+ struct list_head *cset_head;
+
+ struct list_head *task_pos;
+ struct list_head *tasks_head;
+ struct list_head *mg_tasks_head;
+};
+
+void css_task_iter_start(struct cgroup_subsys_state *css,
+ struct css_task_iter *it);
+struct task_struct *css_task_iter_next(struct css_task_iter *it);
+void css_task_iter_end(struct css_task_iter *it);
+
+int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
+int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
+
+struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
+ struct cgroup_subsys *ss);
+struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
+ struct cgroup_subsys *ss);
+
+#else /* !CONFIG_CGROUPS */
+
+struct cgroup_subsys_state;
+
+static inline int cgroup_init_early(void) { return 0; }
+static inline int cgroup_init(void) { return 0; }
+static inline void cgroup_fork(struct task_struct *p) {}
+static inline void cgroup_post_fork(struct task_struct *p) {}
+static inline void cgroup_exit(struct task_struct *p) {}
+
+static inline int cgroupstats_build(struct cgroupstats *stats,
+ struct dentry *dentry)
+{
+ return -EINVAL;
+}
+
+static inline void css_put(struct cgroup_subsys_state *css) {}
+
+/* No cgroups - nothing to do */
+static inline int cgroup_attach_task_all(struct task_struct *from,
+ struct task_struct *t)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_CGROUPS */
+
+#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
new file mode 100644
index 000000000..267d681a4
--- /dev/null
+++ b/include/linux/cgroup_subsys.h
@@ -0,0 +1,62 @@
+/*
+ * List of cgroup subsystems.
+ *
+ * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
+ */
+#if IS_ENABLED(CONFIG_CPUSETS)
+SUBSYS(cpuset)
+#endif
+
+#if IS_ENABLED(CONFIG_CGROUP_SCHED)
+SUBSYS(cpu)
+#endif
+
+#if IS_ENABLED(CONFIG_CGROUP_CPUACCT)
+SUBSYS(cpuacct)
+#endif
+
+#if IS_ENABLED(CONFIG_BLK_CGROUP)
+SUBSYS(blkio)
+#endif
+
+#if IS_ENABLED(CONFIG_MEMCG)
+SUBSYS(memory)
+#endif
+
+#if IS_ENABLED(CONFIG_CGROUP_DEVICE)
+SUBSYS(devices)
+#endif
+
+#if IS_ENABLED(CONFIG_CGROUP_FREEZER)
+SUBSYS(freezer)
+#endif
+
+#if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID)
+SUBSYS(net_cls)
+#endif
+
+#if IS_ENABLED(CONFIG_CGROUP_BFQIO)
+SUBSYS(bfqio)
+#endif
+
+#if IS_ENABLED(CONFIG_CGROUP_PERF)
+SUBSYS(perf_event)
+#endif
+
+#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
+SUBSYS(net_prio)
+#endif
+
+#if IS_ENABLED(CONFIG_CGROUP_HUGETLB)
+SUBSYS(hugetlb)
+#endif
+
+/*
+ * The following subsystems are not supported on the default hierarchy.
+ */
+#if IS_ENABLED(CONFIG_CGROUP_DEBUG)
+SUBSYS(debug)
+#endif
+/*
+ * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
+ */
diff --git a/include/linux/circ_buf.h b/include/linux/circ_buf.h
new file mode 100644
index 000000000..90f2471dc
--- /dev/null
+++ b/include/linux/circ_buf.h
@@ -0,0 +1,36 @@
+/*
+ * See Documentation/circular-buffers.txt for more information.
+ */
+
+#ifndef _LINUX_CIRC_BUF_H
+#define _LINUX_CIRC_BUF_H 1
+
+struct circ_buf {
+ char *buf;
+ int head;
+ int tail;
+};
+
+/* Return count in buffer. */
+#define CIRC_CNT(head,tail,size) (((head) - (tail)) & ((size)-1))
+
+/* Return space available, 0..size-1. We always leave one free char
+ as a completely full buffer has head == tail, which is the same as
+ empty. */
+#define CIRC_SPACE(head,tail,size) CIRC_CNT((tail),((head)+1),(size))
+
+/* Return count up to the end of the buffer. Carefully avoid
+ accessing head and tail more than once, so they can change
+ underneath us without returning inconsistent results. */
+#define CIRC_CNT_TO_END(head,tail,size) \
+ ({int end = (size) - (tail); \
+ int n = ((head) + end) & ((size)-1); \
+ n < end ? n : end;})
+
+/* Return space available up to the end of the buffer. */
+#define CIRC_SPACE_TO_END(head,tail,size) \
+ ({int end = (size) - 1 - (head); \
+ int n = (end + (tail)) & ((size)-1); \
+ n <= end ? n : end+1;})
+
+#endif /* _LINUX_CIRC_BUF_H */
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
new file mode 100644
index 000000000..bda5ec0b4
--- /dev/null
+++ b/include/linux/cleancache.h
@@ -0,0 +1,125 @@
+#ifndef _LINUX_CLEANCACHE_H
+#define _LINUX_CLEANCACHE_H
+
+#include <linux/fs.h>
+#include <linux/exportfs.h>
+#include <linux/mm.h>
+
+#define CLEANCACHE_NO_POOL -1
+#define CLEANCACHE_NO_BACKEND -2
+#define CLEANCACHE_NO_BACKEND_SHARED -3
+
+#define CLEANCACHE_KEY_MAX 6
+
+/*
+ * cleancache requires every file with a page in cleancache to have a
+ * unique key unless/until the file is removed/truncated. For some
+ * filesystems, the inode number is unique, but for "modern" filesystems
+ * an exportable filehandle is required (see exportfs.h)
+ */
+struct cleancache_filekey {
+ union {
+ ino_t ino;
+ __u32 fh[CLEANCACHE_KEY_MAX];
+ u32 key[CLEANCACHE_KEY_MAX];
+ } u;
+};
+
+struct cleancache_ops {
+ int (*init_fs)(size_t);
+ int (*init_shared_fs)(char *uuid, size_t);
+ int (*get_page)(int, struct cleancache_filekey,
+ pgoff_t, struct page *);
+ void (*put_page)(int, struct cleancache_filekey,
+ pgoff_t, struct page *);
+ void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
+ void (*invalidate_inode)(int, struct cleancache_filekey);
+ void (*invalidate_fs)(int);
+};
+
+extern int cleancache_register_ops(struct cleancache_ops *ops);
+extern void __cleancache_init_fs(struct super_block *);
+extern void __cleancache_init_shared_fs(struct super_block *);
+extern int __cleancache_get_page(struct page *);
+extern void __cleancache_put_page(struct page *);
+extern void __cleancache_invalidate_page(struct address_space *, struct page *);
+extern void __cleancache_invalidate_inode(struct address_space *);
+extern void __cleancache_invalidate_fs(struct super_block *);
+
+#ifdef CONFIG_CLEANCACHE
+#define cleancache_enabled (1)
+static inline bool cleancache_fs_enabled(struct page *page)
+{
+ return page->mapping->host->i_sb->cleancache_poolid >= 0;
+}
+static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping)
+{
+ return mapping->host->i_sb->cleancache_poolid >= 0;
+}
+#else
+#define cleancache_enabled (0)
+#define cleancache_fs_enabled(_page) (0)
+#define cleancache_fs_enabled_mapping(_page) (0)
+#endif
+
+/*
+ * The shim layer provided by these inline functions allows the compiler
+ * to reduce all cleancache hooks to nothingness if CONFIG_CLEANCACHE
+ * is disabled, to a single global variable check if CONFIG_CLEANCACHE
+ * is enabled but no cleancache "backend" has dynamically enabled it,
+ * and, for the most frequent cleancache ops, to a single global variable
+ * check plus a superblock element comparison if CONFIG_CLEANCACHE is enabled
+ * and a cleancache backend has dynamically enabled cleancache, but the
+ * filesystem referenced by that cleancache op has not enabled cleancache.
+ * As a result, CONFIG_CLEANCACHE can be enabled by default with essentially
+ * no measurable performance impact.
+ */
+
+static inline void cleancache_init_fs(struct super_block *sb)
+{
+ if (cleancache_enabled)
+ __cleancache_init_fs(sb);
+}
+
+static inline void cleancache_init_shared_fs(struct super_block *sb)
+{
+ if (cleancache_enabled)
+ __cleancache_init_shared_fs(sb);
+}
+
+static inline int cleancache_get_page(struct page *page)
+{
+ int ret = -1;
+
+ if (cleancache_enabled && cleancache_fs_enabled(page))
+ ret = __cleancache_get_page(page);
+ return ret;
+}
+
+static inline void cleancache_put_page(struct page *page)
+{
+ if (cleancache_enabled && cleancache_fs_enabled(page))
+ __cleancache_put_page(page);
+}
+
+static inline void cleancache_invalidate_page(struct address_space *mapping,
+ struct page *page)
+{
+ /* careful... page->mapping is NULL sometimes when this is called */
+ if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
+ __cleancache_invalidate_page(mapping, page);
+}
+
+static inline void cleancache_invalidate_inode(struct address_space *mapping)
+{
+ if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
+ __cleancache_invalidate_inode(mapping);
+}
+
+static inline void cleancache_invalidate_fs(struct super_block *sb)
+{
+ if (cleancache_enabled)
+ __cleancache_invalidate_fs(sb);
+}
+
+#endif /* _LINUX_CLEANCACHE_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
new file mode 100644
index 000000000..df695313f
--- /dev/null
+++ b/include/linux/clk-provider.h
@@ -0,0 +1,698 @@
+/*
+ * linux/include/linux/clk-provider.h
+ *
+ * Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com>
+ * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_CLK_PROVIDER_H
+#define __LINUX_CLK_PROVIDER_H
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#ifdef CONFIG_COMMON_CLK
+
+/*
+ * flags used across common struct clk. these flags should only affect the
+ * top-level framework. custom flags for dealing with hardware specifics
+ * belong in struct clk_foo
+ */
+#define CLK_SET_RATE_GATE BIT(0) /* must be gated across rate change */
+#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */
+#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */
+#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */
+#define CLK_IS_ROOT BIT(4) /* root clk, has no parent */
+#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */
+#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */
+#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
+#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
+
+struct clk_hw;
+struct clk_core;
+struct dentry;
+
+/**
+ * struct clk_ops - Callback operations for hardware clocks; these are to
+ * be provided by the clock implementation, and will be called by drivers
+ * through the clk_* api.
+ *
+ * @prepare: Prepare the clock for enabling. This must not return until
+ * the clock is fully prepared, and it's safe to call clk_enable.
+ * This callback is intended to allow clock implementations to
+ * do any initialisation that may sleep. Called with
+ * prepare_lock held.
+ *
+ * @unprepare: Release the clock from its prepared state. This will typically
+ * undo any work done in the @prepare callback. Called with
+ * prepare_lock held.
+ *
+ * @is_prepared: Queries the hardware to determine if the clock is prepared.
+ * This function is allowed to sleep. Optional, if this op is not
+ * set then the prepare count will be used.
+ *
+ * @unprepare_unused: Unprepare the clock atomically. Only called from
+ * clk_disable_unused for prepare clocks with special needs.
+ * Called with prepare mutex held. This function may sleep.
+ *
+ * @enable: Enable the clock atomically. This must not return until the
+ * clock is generating a valid clock signal, usable by consumer
+ * devices. Called with enable_lock held. This function must not
+ * sleep.
+ *
+ * @disable: Disable the clock atomically. Called with enable_lock held.
+ * This function must not sleep.
+ *
+ * @is_enabled: Queries the hardware to determine if the clock is enabled.
+ * This function must not sleep. Optional, if this op is not
+ * set then the enable count will be used.
+ *
+ * @disable_unused: Disable the clock atomically. Only called from
+ * clk_disable_unused for gate clocks with special needs.
+ * Called with enable_lock held. This function must not
+ * sleep.
+ *
+ * @recalc_rate Recalculate the rate of this clock, by querying hardware. The
+ * parent rate is an input parameter. It is up to the caller to
+ * ensure that the prepare_mutex is held across this call.
+ * Returns the calculated rate. Optional, but recommended - if
+ * this op is not set then clock rate will be initialized to 0.
+ *
+ * @round_rate: Given a target rate as input, returns the closest rate actually
+ * supported by the clock. The parent rate is an input/output
+ * parameter.
+ *
+ * @determine_rate: Given a target rate as input, returns the closest rate
+ * actually supported by the clock, and optionally the parent clock
+ * that should be used to provide the clock rate.
+ *
+ * @set_parent: Change the input source of this clock; for clocks with multiple
+ * possible parents specify a new parent by passing in the index
+ * as a u8 corresponding to the parent in either the .parent_names
+ * or .parents arrays. This function in affect translates an
+ * array index into the value programmed into the hardware.
+ * Returns 0 on success, -EERROR otherwise.
+ *
+ * @get_parent: Queries the hardware to determine the parent of a clock. The
+ * return value is a u8 which specifies the index corresponding to
+ * the parent clock. This index can be applied to either the
+ * .parent_names or .parents arrays. In short, this function
+ * translates the parent value read from hardware into an array
+ * index. Currently only called when the clock is initialized by
+ * __clk_init. This callback is mandatory for clocks with
+ * multiple parents. It is optional (and unnecessary) for clocks
+ * with 0 or 1 parents.
+ *
+ * @set_rate: Change the rate of this clock. The requested rate is specified
+ * by the second argument, which should typically be the return
+ * of .round_rate call. The third argument gives the parent rate
+ * which is likely helpful for most .set_rate implementation.
+ * Returns 0 on success, -EERROR otherwise.
+ *
+ * @set_rate_and_parent: Change the rate and the parent of this clock. The
+ * requested rate is specified by the second argument, which
+ * should typically be the return of .round_rate call. The
+ * third argument gives the parent rate which is likely helpful
+ * for most .set_rate_and_parent implementation. The fourth
+ * argument gives the parent index. This callback is optional (and
+ * unnecessary) for clocks with 0 or 1 parents as well as
+ * for clocks that can tolerate switching the rate and the parent
+ * separately via calls to .set_parent and .set_rate.
+ * Returns 0 on success, -EERROR otherwise.
+ *
+ * @recalc_accuracy: Recalculate the accuracy of this clock. The clock accuracy
+ * is expressed in ppb (parts per billion). The parent accuracy is
+ * an input parameter.
+ * Returns the calculated accuracy. Optional - if this op is not
+ * set then clock accuracy will be initialized to parent accuracy
+ * or 0 (perfect clock) if clock has no parent.
+ *
+ * @get_phase: Queries the hardware to get the current phase of a clock.
+ * Returned values are 0-359 degrees on success, negative
+ * error codes on failure.
+ *
+ * @set_phase: Shift the phase this clock signal in degrees specified
+ * by the second argument. Valid values for degrees are
+ * 0-359. Return 0 on success, otherwise -EERROR.
+ *
+ * @init: Perform platform-specific initialization magic.
+ * This is not not used by any of the basic clock types.
+ * Please consider other ways of solving initialization problems
+ * before using this callback, as its use is discouraged.
+ *
+ * @debug_init: Set up type-specific debugfs entries for this clock. This
+ * is called once, after the debugfs directory entry for this
+ * clock has been created. The dentry pointer representing that
+ * directory is provided as an argument. Called with
+ * prepare_lock held. Returns 0 on success, -EERROR otherwise.
+ *
+ *
+ * The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow
+ * implementations to split any work between atomic (enable) and sleepable
+ * (prepare) contexts. If enabling a clock requires code that might sleep,
+ * this must be done in clk_prepare. Clock enable code that will never be
+ * called in a sleepable context may be implemented in clk_enable.
+ *
+ * Typically, drivers will call clk_prepare when a clock may be needed later
+ * (eg. when a device is opened), and clk_enable when the clock is actually
+ * required (eg. from an interrupt). Note that clk_prepare MUST have been
+ * called before clk_enable.
+ */
+struct clk_ops {
+ int (*prepare)(struct clk_hw *hw);
+ void (*unprepare)(struct clk_hw *hw);
+ int (*is_prepared)(struct clk_hw *hw);
+ void (*unprepare_unused)(struct clk_hw *hw);
+ int (*enable)(struct clk_hw *hw);
+ void (*disable)(struct clk_hw *hw);
+ int (*is_enabled)(struct clk_hw *hw);
+ void (*disable_unused)(struct clk_hw *hw);
+ unsigned long (*recalc_rate)(struct clk_hw *hw,
+ unsigned long parent_rate);
+ long (*round_rate)(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate);
+ long (*determine_rate)(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ unsigned long *best_parent_rate,
+ struct clk_hw **best_parent_hw);
+ int (*set_parent)(struct clk_hw *hw, u8 index);
+ u8 (*get_parent)(struct clk_hw *hw);
+ int (*set_rate)(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+ int (*set_rate_and_parent)(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate, u8 index);
+ unsigned long (*recalc_accuracy)(struct clk_hw *hw,
+ unsigned long parent_accuracy);
+ int (*get_phase)(struct clk_hw *hw);
+ int (*set_phase)(struct clk_hw *hw, int degrees);
+ void (*init)(struct clk_hw *hw);
+ int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
+};
+
+/**
+ * struct clk_init_data - holds init data that's common to all clocks and is
+ * shared between the clock provider and the common clock framework.
+ *
+ * @name: clock name
+ * @ops: operations this clock supports
+ * @parent_names: array of string names for all possible parents
+ * @num_parents: number of possible parents
+ * @flags: framework-level hints and quirks
+ */
+struct clk_init_data {
+ const char *name;
+ const struct clk_ops *ops;
+ const char **parent_names;
+ u8 num_parents;
+ unsigned long flags;
+};
+
+/**
+ * struct clk_hw - handle for traversing from a struct clk to its corresponding
+ * hardware-specific structure. struct clk_hw should be declared within struct
+ * clk_foo and then referenced by the struct clk instance that uses struct
+ * clk_foo's clk_ops
+ *
+ * @core: pointer to the struct clk_core instance that points back to this
+ * struct clk_hw instance
+ *
+ * @clk: pointer to the per-user struct clk instance that can be used to call
+ * into the clk API
+ *
+ * @init: pointer to struct clk_init_data that contains the init data shared
+ * with the common clock framework.
+ */
+struct clk_hw {
+ struct clk_core *core;
+ struct clk *clk;
+ const struct clk_init_data *init;
+};
+
+/*
+ * DOC: Basic clock implementations common to many platforms
+ *
+ * Each basic clock hardware type is comprised of a structure describing the
+ * clock hardware, implementations of the relevant callbacks in struct clk_ops,
+ * unique flags for that hardware type, a registration function and an
+ * alternative macro for static initialization
+ */
+
+/**
+ * struct clk_fixed_rate - fixed-rate clock
+ * @hw: handle between common and hardware-specific interfaces
+ * @fixed_rate: constant frequency of clock
+ */
+struct clk_fixed_rate {
+ struct clk_hw hw;
+ unsigned long fixed_rate;
+ unsigned long fixed_accuracy;
+ u8 flags;
+};
+
+extern const struct clk_ops clk_fixed_rate_ops;
+struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ unsigned long fixed_rate);
+struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ unsigned long fixed_rate, unsigned long fixed_accuracy);
+
+void of_fixed_clk_setup(struct device_node *np);
+
+/**
+ * struct clk_gate - gating clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: register controlling gate
+ * @bit_idx: single bit controlling gate
+ * @flags: hardware-specific flags
+ * @lock: register lock
+ *
+ * Clock which can gate its output. Implements .enable & .disable
+ *
+ * Flags:
+ * CLK_GATE_SET_TO_DISABLE - by default this clock sets the bit at bit_idx to
+ * enable the clock. Setting this flag does the opposite: setting the bit
+ * disable the clock and clearing it enables the clock
+ * CLK_GATE_HIWORD_MASK - The gate settings are only in lower 16-bit
+ * of this register, and mask of gate bits are in higher 16-bit of this
+ * register. While setting the gate bits, higher 16-bit should also be
+ * updated to indicate changing gate bits.
+ */
+struct clk_gate {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 bit_idx;
+ u8 flags;
+ spinlock_t *lock;
+};
+
+#define CLK_GATE_SET_TO_DISABLE BIT(0)
+#define CLK_GATE_HIWORD_MASK BIT(1)
+
+extern const struct clk_ops clk_gate_ops;
+struct clk *clk_register_gate(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate_flags, spinlock_t *lock);
+void clk_unregister_gate(struct clk *clk);
+
+struct clk_div_table {
+ unsigned int val;
+ unsigned int div;
+};
+
+/**
+ * struct clk_divider - adjustable divider clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: register containing the divider
+ * @shift: shift to the divider bit field
+ * @width: width of the divider bit field
+ * @table: array of value/divider pairs, last entry should have div = 0
+ * @lock: register lock
+ *
+ * Clock with an adjustable divider affecting its output frequency. Implements
+ * .recalc_rate, .set_rate and .round_rate
+ *
+ * Flags:
+ * CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the
+ * register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is
+ * the raw value read from the register, with the value of zero considered
+ * invalid, unless CLK_DIVIDER_ALLOW_ZERO is set.
+ * CLK_DIVIDER_POWER_OF_TWO - clock divisor is 2 raised to the value read from
+ * the hardware register
+ * CLK_DIVIDER_ALLOW_ZERO - Allow zero divisors. For dividers which have
+ * CLK_DIVIDER_ONE_BASED set, it is possible to end up with a zero divisor.
+ * Some hardware implementations gracefully handle this case and allow a
+ * zero divisor by not modifying their input clock
+ * (divide by one / bypass).
+ * CLK_DIVIDER_HIWORD_MASK - The divider settings are only in lower 16-bit
+ * of this register, and mask of divider bits are in higher 16-bit of this
+ * register. While setting the divider bits, higher 16-bit should also be
+ * updated to indicate changing divider bits.
+ * CLK_DIVIDER_ROUND_CLOSEST - Makes the best calculated divider to be rounded
+ * to the closest integer instead of the up one.
+ * CLK_DIVIDER_READ_ONLY - The divider settings are preconfigured and should
+ * not be changed by the clock framework.
+ */
+struct clk_divider {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 shift;
+ u8 width;
+ u8 flags;
+ const struct clk_div_table *table;
+ spinlock_t *lock;
+};
+
+#define CLK_DIVIDER_ONE_BASED BIT(0)
+#define CLK_DIVIDER_POWER_OF_TWO BIT(1)
+#define CLK_DIVIDER_ALLOW_ZERO BIT(2)
+#define CLK_DIVIDER_HIWORD_MASK BIT(3)
+#define CLK_DIVIDER_ROUND_CLOSEST BIT(4)
+#define CLK_DIVIDER_READ_ONLY BIT(5)
+
+extern const struct clk_ops clk_divider_ops;
+
+unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
+ unsigned int val, const struct clk_div_table *table,
+ unsigned long flags);
+long divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate, const struct clk_div_table *table,
+ u8 width, unsigned long flags);
+int divider_get_val(unsigned long rate, unsigned long parent_rate,
+ const struct clk_div_table *table, u8 width,
+ unsigned long flags);
+
+struct clk *clk_register_divider(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_divider_flags, spinlock_t *lock);
+struct clk *clk_register_divider_table(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_divider_flags, const struct clk_div_table *table,
+ spinlock_t *lock);
+void clk_unregister_divider(struct clk *clk);
+
+/**
+ * struct clk_mux - multiplexer clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: register controlling multiplexer
+ * @shift: shift to multiplexer bit field
+ * @width: width of mutliplexer bit field
+ * @flags: hardware-specific flags
+ * @lock: register lock
+ *
+ * Clock with multiple selectable parents. Implements .get_parent, .set_parent
+ * and .recalc_rate
+ *
+ * Flags:
+ * CLK_MUX_INDEX_ONE - register index starts at 1, not 0
+ * CLK_MUX_INDEX_BIT - register index is a single bit (power of two)
+ * CLK_MUX_HIWORD_MASK - The mux settings are only in lower 16-bit of this
+ * register, and mask of mux bits are in higher 16-bit of this register.
+ * While setting the mux bits, higher 16-bit should also be updated to
+ * indicate changing mux bits.
+ * CLK_MUX_ROUND_CLOSEST - Use the parent rate that is closest to the desired
+ * frequency.
+ */
+struct clk_mux {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u32 *table;
+ u32 mask;
+ u8 shift;
+ u8 flags;
+ spinlock_t *lock;
+};
+
+#define CLK_MUX_INDEX_ONE BIT(0)
+#define CLK_MUX_INDEX_BIT BIT(1)
+#define CLK_MUX_HIWORD_MASK BIT(2)
+#define CLK_MUX_READ_ONLY BIT(3) /* mux can't be changed */
+#define CLK_MUX_ROUND_CLOSEST BIT(4)
+
+extern const struct clk_ops clk_mux_ops;
+extern const struct clk_ops clk_mux_ro_ops;
+
+struct clk *clk_register_mux(struct device *dev, const char *name,
+ const char **parent_names, u8 num_parents, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_mux_flags, spinlock_t *lock);
+
+struct clk *clk_register_mux_table(struct device *dev, const char *name,
+ const char **parent_names, u8 num_parents, unsigned long flags,
+ void __iomem *reg, u8 shift, u32 mask,
+ u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+
+void clk_unregister_mux(struct clk *clk);
+
+void of_fixed_factor_clk_setup(struct device_node *node);
+
+/**
+ * struct clk_fixed_factor - fixed multiplier and divider clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @mult: multiplier
+ * @div: divider
+ *
+ * Clock with a fixed multiplier and divider. The output frequency is the
+ * parent clock rate divided by div and multiplied by mult.
+ * Implements .recalc_rate, .set_rate and .round_rate
+ */
+
+struct clk_fixed_factor {
+ struct clk_hw hw;
+ unsigned int mult;
+ unsigned int div;
+};
+
+extern struct clk_ops clk_fixed_factor_ops;
+struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ unsigned int mult, unsigned int div);
+
+/**
+ * struct clk_fractional_divider - adjustable fractional divider clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @reg: register containing the divider
+ * @mshift: shift to the numerator bit field
+ * @mwidth: width of the numerator bit field
+ * @nshift: shift to the denominator bit field
+ * @nwidth: width of the denominator bit field
+ * @lock: register lock
+ *
+ * Clock with adjustable fractional divider affecting its output frequency.
+ */
+
+struct clk_fractional_divider {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 mshift;
+ u32 mmask;
+ u8 nshift;
+ u32 nmask;
+ u8 flags;
+ spinlock_t *lock;
+};
+
+extern const struct clk_ops clk_fractional_divider_ops;
+struct clk *clk_register_fractional_divider(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth,
+ u8 clk_divider_flags, spinlock_t *lock);
+
+/***
+ * struct clk_composite - aggregate clock of mux, divider and gate clocks
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @mux_hw: handle between composite and hardware-specific mux clock
+ * @rate_hw: handle between composite and hardware-specific rate clock
+ * @gate_hw: handle between composite and hardware-specific gate clock
+ * @mux_ops: clock ops for mux
+ * @rate_ops: clock ops for rate
+ * @gate_ops: clock ops for gate
+ */
+struct clk_composite {
+ struct clk_hw hw;
+ struct clk_ops ops;
+
+ struct clk_hw *mux_hw;
+ struct clk_hw *rate_hw;
+ struct clk_hw *gate_hw;
+
+ const struct clk_ops *mux_ops;
+ const struct clk_ops *rate_ops;
+ const struct clk_ops *gate_ops;
+};
+
+struct clk *clk_register_composite(struct device *dev, const char *name,
+ const char **parent_names, int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+ unsigned long flags);
+
+/***
+ * struct clk_gpio_gate - gpio gated clock
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @gpiod: gpio descriptor
+ *
+ * Clock with a gpio control for enabling and disabling the parent clock.
+ * Implements .enable, .disable and .is_enabled
+ */
+
+struct clk_gpio {
+ struct clk_hw hw;
+ struct gpio_desc *gpiod;
+};
+
+extern const struct clk_ops clk_gpio_gate_ops;
+struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
+ const char *parent_name, unsigned gpio, bool active_low,
+ unsigned long flags);
+
+void of_gpio_clk_gate_setup(struct device_node *node);
+
+/**
+ * clk_register - allocate a new clock, register it and return an opaque cookie
+ * @dev: device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * clk_register is the primary interface for populating the clock tree with new
+ * clock nodes. It returns a pointer to the newly allocated struct clk which
+ * cannot be dereferenced by driver code but may be used in conjuction with the
+ * rest of the clock API. In the event of an error clk_register will return an
+ * error code; drivers must test for an error code after calling clk_register.
+ */
+struct clk *clk_register(struct device *dev, struct clk_hw *hw);
+struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw);
+
+void clk_unregister(struct clk *clk);
+void devm_clk_unregister(struct device *dev, struct clk *clk);
+
+/* helper functions */
+const char *__clk_get_name(struct clk *clk);
+struct clk_hw *__clk_get_hw(struct clk *clk);
+u8 __clk_get_num_parents(struct clk *clk);
+struct clk *__clk_get_parent(struct clk *clk);
+struct clk *clk_get_parent_by_index(struct clk *clk, u8 index);
+unsigned int __clk_get_enable_count(struct clk *clk);
+unsigned long __clk_get_rate(struct clk *clk);
+unsigned long __clk_get_flags(struct clk *clk);
+bool __clk_is_prepared(struct clk *clk);
+bool __clk_is_enabled(struct clk *clk);
+struct clk *__clk_lookup(const char *name);
+long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ unsigned long *best_parent_rate,
+ struct clk_hw **best_parent_p);
+unsigned long __clk_determine_rate(struct clk_hw *core,
+ unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate);
+long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ unsigned long *best_parent_rate,
+ struct clk_hw **best_parent_p);
+
+static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
+{
+ dst->clk = src->clk;
+ dst->core = src->core;
+}
+
+/*
+ * FIXME clock api without lock protection
+ */
+unsigned long __clk_round_rate(struct clk *clk, unsigned long rate);
+
+struct of_device_id;
+
+typedef void (*of_clk_init_cb_t)(struct device_node *);
+
+struct clk_onecell_data {
+ struct clk **clks;
+ unsigned int clk_num;
+};
+
+extern struct of_device_id __clk_of_table;
+
+#define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn)
+
+#ifdef CONFIG_OF
+int of_clk_add_provider(struct device_node *np,
+ struct clk *(*clk_src_get)(struct of_phandle_args *args,
+ void *data),
+ void *data);
+void of_clk_del_provider(struct device_node *np);
+struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
+ void *data);
+struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data);
+int of_clk_get_parent_count(struct device_node *np);
+const char *of_clk_get_parent_name(struct device_node *np, int index);
+
+void of_clk_init(const struct of_device_id *matches);
+
+#else /* !CONFIG_OF */
+
+static inline int of_clk_add_provider(struct device_node *np,
+ struct clk *(*clk_src_get)(struct of_phandle_args *args,
+ void *data),
+ void *data)
+{
+ return 0;
+}
+#define of_clk_del_provider(np) \
+ { while (0); }
+static inline struct clk *of_clk_src_simple_get(
+ struct of_phandle_args *clkspec, void *data)
+{
+ return ERR_PTR(-ENOENT);
+}
+static inline struct clk *of_clk_src_onecell_get(
+ struct of_phandle_args *clkspec, void *data)
+{
+ return ERR_PTR(-ENOENT);
+}
+static inline const char *of_clk_get_parent_name(struct device_node *np,
+ int index)
+{
+ return NULL;
+}
+#define of_clk_init(matches) \
+ { while (0); }
+#endif /* CONFIG_OF */
+
+/*
+ * wrap access to peripherals in accessor routines
+ * for improved portability across platforms
+ */
+
+#if IS_ENABLED(CONFIG_PPC)
+
+static inline u32 clk_readl(u32 __iomem *reg)
+{
+ return ioread32be(reg);
+}
+
+static inline void clk_writel(u32 val, u32 __iomem *reg)
+{
+ iowrite32be(val, reg);
+}
+
+#else /* platform dependent I/O accessors */
+
+static inline u32 clk_readl(u32 __iomem *reg)
+{
+ return readl(reg);
+}
+
+static inline void clk_writel(u32 val, u32 __iomem *reg)
+{
+ writel(val, reg);
+}
+
+#endif /* platform dependent I/O accessors */
+
+#ifdef CONFIG_DEBUG_FS
+struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
+ void *data, const struct file_operations *fops);
+#endif
+
+#endif /* CONFIG_COMMON_CLK */
+#endif /* CLK_PROVIDER_H */
diff --git a/include/linux/clk.h b/include/linux/clk.h
new file mode 100644
index 000000000..68c16a6be
--- /dev/null
+++ b/include/linux/clk.h
@@ -0,0 +1,506 @@
+/*
+ * linux/include/linux/clk.h
+ *
+ * Copyright (C) 2004 ARM Limited.
+ * Written by Deep Blue Solutions Limited.
+ * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_CLK_H
+#define __LINUX_CLK_H
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+
+struct device;
+
+struct clk;
+
+#ifdef CONFIG_COMMON_CLK
+
+/**
+ * DOC: clk notifier callback types
+ *
+ * PRE_RATE_CHANGE - called immediately before the clk rate is changed,
+ * to indicate that the rate change will proceed. Drivers must
+ * immediately terminate any operations that will be affected by the
+ * rate change. Callbacks may either return NOTIFY_DONE, NOTIFY_OK,
+ * NOTIFY_STOP or NOTIFY_BAD.
+ *
+ * ABORT_RATE_CHANGE: called if the rate change failed for some reason
+ * after PRE_RATE_CHANGE. In this case, all registered notifiers on
+ * the clk will be called with ABORT_RATE_CHANGE. Callbacks must
+ * always return NOTIFY_DONE or NOTIFY_OK.
+ *
+ * POST_RATE_CHANGE - called after the clk rate change has successfully
+ * completed. Callbacks must always return NOTIFY_DONE or NOTIFY_OK.
+ *
+ */
+#define PRE_RATE_CHANGE BIT(0)
+#define POST_RATE_CHANGE BIT(1)
+#define ABORT_RATE_CHANGE BIT(2)
+
+/**
+ * struct clk_notifier - associate a clk with a notifier
+ * @clk: struct clk * to associate the notifier with
+ * @notifier_head: a blocking_notifier_head for this clk
+ * @node: linked list pointers
+ *
+ * A list of struct clk_notifier is maintained by the notifier code.
+ * An entry is created whenever code registers the first notifier on a
+ * particular @clk. Future notifiers on that @clk are added to the
+ * @notifier_head.
+ */
+struct clk_notifier {
+ struct clk *clk;
+ struct srcu_notifier_head notifier_head;
+ struct list_head node;
+};
+
+/**
+ * struct clk_notifier_data - rate data to pass to the notifier callback
+ * @clk: struct clk * being changed
+ * @old_rate: previous rate of this clk
+ * @new_rate: new rate of this clk
+ *
+ * For a pre-notifier, old_rate is the clk's rate before this rate
+ * change, and new_rate is what the rate will be in the future. For a
+ * post-notifier, old_rate and new_rate are both set to the clk's
+ * current rate (this was done to optimize the implementation).
+ */
+struct clk_notifier_data {
+ struct clk *clk;
+ unsigned long old_rate;
+ unsigned long new_rate;
+};
+
+/**
+ * clk_notifier_register: register a clock rate-change notifier callback
+ * @clk: clock whose rate we are interested in
+ * @nb: notifier block with callback function pointer
+ *
+ * ProTip: debugging across notifier chains can be frustrating. Make sure that
+ * your notifier callback function prints a nice big warning in case of
+ * failure.
+ */
+int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
+
+/**
+ * clk_notifier_unregister: unregister a clock rate-change notifier callback
+ * @clk: clock whose rate we are no longer interested in
+ * @nb: notifier block which will be unregistered
+ */
+int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
+
+/**
+ * clk_get_accuracy - obtain the clock accuracy in ppb (parts per billion)
+ * for a clock source.
+ * @clk: clock source
+ *
+ * This gets the clock source accuracy expressed in ppb.
+ * A perfect clock returns 0.
+ */
+long clk_get_accuracy(struct clk *clk);
+
+/**
+ * clk_set_phase - adjust the phase shift of a clock signal
+ * @clk: clock signal source
+ * @degrees: number of degrees the signal is shifted
+ *
+ * Shifts the phase of a clock signal by the specified degrees. Returns 0 on
+ * success, -EERROR otherwise.
+ */
+int clk_set_phase(struct clk *clk, int degrees);
+
+/**
+ * clk_get_phase - return the phase shift of a clock signal
+ * @clk: clock signal source
+ *
+ * Returns the phase shift of a clock node in degrees, otherwise returns
+ * -EERROR.
+ */
+int clk_get_phase(struct clk *clk);
+
+/**
+ * clk_is_match - check if two clk's point to the same hardware clock
+ * @p: clk compared against q
+ * @q: clk compared against p
+ *
+ * Returns true if the two struct clk pointers both point to the same hardware
+ * clock node. Put differently, returns true if struct clk *p and struct clk *q
+ * share the same struct clk_core object.
+ *
+ * Returns false otherwise. Note that two NULL clks are treated as matching.
+ */
+bool clk_is_match(const struct clk *p, const struct clk *q);
+
+#else
+
+static inline long clk_get_accuracy(struct clk *clk)
+{
+ return -ENOTSUPP;
+}
+
+static inline long clk_set_phase(struct clk *clk, int phase)
+{
+ return -ENOTSUPP;
+}
+
+static inline long clk_get_phase(struct clk *clk)
+{
+ return -ENOTSUPP;
+}
+
+static inline bool clk_is_match(const struct clk *p, const struct clk *q)
+{
+ return p == q;
+}
+
+#endif
+
+/**
+ * clk_prepare - prepare a clock source
+ * @clk: clock source
+ *
+ * This prepares the clock source for use.
+ *
+ * Must not be called from within atomic context.
+ */
+#ifdef CONFIG_HAVE_CLK_PREPARE
+int clk_prepare(struct clk *clk);
+#else
+static inline int clk_prepare(struct clk *clk)
+{
+ might_sleep();
+ return 0;
+}
+#endif
+
+/**
+ * clk_unprepare - undo preparation of a clock source
+ * @clk: clock source
+ *
+ * This undoes a previously prepared clock. The caller must balance
+ * the number of prepare and unprepare calls.
+ *
+ * Must not be called from within atomic context.
+ */
+#ifdef CONFIG_HAVE_CLK_PREPARE
+void clk_unprepare(struct clk *clk);
+#else
+static inline void clk_unprepare(struct clk *clk)
+{
+ might_sleep();
+}
+#endif
+
+#ifdef CONFIG_HAVE_CLK
+/**
+ * clk_get - lookup and obtain a reference to a clock producer.
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Returns a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. (IOW, @id may be identical strings, but
+ * clk_get may return different clock producers depending on @dev.)
+ *
+ * Drivers must assume that the clock source is not enabled.
+ *
+ * clk_get should not be called from within interrupt context.
+ */
+struct clk *clk_get(struct device *dev, const char *id);
+
+/**
+ * devm_clk_get - lookup and obtain a managed reference to a clock producer.
+ * @dev: device for clock "consumer"
+ * @id: clock consumer ID
+ *
+ * Returns a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev and @id to determine the clock consumer, and thereby
+ * the clock producer. (IOW, @id may be identical strings, but
+ * clk_get may return different clock producers depending on @dev.)
+ *
+ * Drivers must assume that the clock source is not enabled.
+ *
+ * devm_clk_get should not be called from within interrupt context.
+ *
+ * The clock will automatically be freed when the device is unbound
+ * from the bus.
+ */
+struct clk *devm_clk_get(struct device *dev, const char *id);
+
+/**
+ * clk_enable - inform the system when the clock source should be running.
+ * @clk: clock source
+ *
+ * If the clock can not be enabled/disabled, this should return success.
+ *
+ * May be called from atomic contexts.
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_enable(struct clk *clk);
+
+/**
+ * clk_disable - inform the system when the clock source is no longer required.
+ * @clk: clock source
+ *
+ * Inform the system that a clock source is no longer required by
+ * a driver and may be shut down.
+ *
+ * May be called from atomic contexts.
+ *
+ * Implementation detail: if the clock source is shared between
+ * multiple drivers, clk_enable() calls must be balanced by the
+ * same number of clk_disable() calls for the clock source to be
+ * disabled.
+ */
+void clk_disable(struct clk *clk);
+
+/**
+ * clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
+ * This is only valid once the clock source has been enabled.
+ * @clk: clock source
+ */
+unsigned long clk_get_rate(struct clk *clk);
+
+/**
+ * clk_put - "free" the clock source
+ * @clk: clock source
+ *
+ * Note: drivers must ensure that all clk_enable calls made on this
+ * clock source are balanced by clk_disable calls prior to calling
+ * this function.
+ *
+ * clk_put should not be called from within interrupt context.
+ */
+void clk_put(struct clk *clk);
+
+/**
+ * devm_clk_put - "free" a managed clock source
+ * @dev: device used to acquire the clock
+ * @clk: clock source acquired with devm_clk_get()
+ *
+ * Note: drivers must ensure that all clk_enable calls made on this
+ * clock source are balanced by clk_disable calls prior to calling
+ * this function.
+ *
+ * clk_put should not be called from within interrupt context.
+ */
+void devm_clk_put(struct device *dev, struct clk *clk);
+
+/*
+ * The remaining APIs are optional for machine class support.
+ */
+
+
+/**
+ * clk_round_rate - adjust a rate to the exact rate a clock can provide
+ * @clk: clock source
+ * @rate: desired clock rate in Hz
+ *
+ * Returns rounded clock rate in Hz, or negative errno.
+ */
+long clk_round_rate(struct clk *clk, unsigned long rate);
+
+/**
+ * clk_set_rate - set the clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired clock rate in Hz
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_rate(struct clk *clk, unsigned long rate);
+
+/**
+ * clk_has_parent - check if a clock is a possible parent for another
+ * @clk: clock source
+ * @parent: parent clock source
+ *
+ * This function can be used in drivers that need to check that a clock can be
+ * the parent of another without actually changing the parent.
+ *
+ * Returns true if @parent is a possible parent for @clk, false otherwise.
+ */
+bool clk_has_parent(struct clk *clk, struct clk *parent);
+
+/**
+ * clk_set_rate_range - set a rate range for a clock source
+ * @clk: clock source
+ * @min: desired minimum clock rate in Hz, inclusive
+ * @max: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max);
+
+/**
+ * clk_set_min_rate - set a minimum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired minimum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_min_rate(struct clk *clk, unsigned long rate);
+
+/**
+ * clk_set_max_rate - set a maximum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_max_rate(struct clk *clk, unsigned long rate);
+
+/**
+ * clk_set_parent - set the parent clock source for this clock
+ * @clk: clock source
+ * @parent: parent clock source
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_parent(struct clk *clk, struct clk *parent);
+
+/**
+ * clk_get_parent - get the parent clock source for this clock
+ * @clk: clock source
+ *
+ * Returns struct clk corresponding to parent clock source, or
+ * valid IS_ERR() condition containing errno.
+ */
+struct clk *clk_get_parent(struct clk *clk);
+
+/**
+ * clk_get_sys - get a clock based upon the device name
+ * @dev_id: device name
+ * @con_id: connection ID
+ *
+ * Returns a struct clk corresponding to the clock producer, or
+ * valid IS_ERR() condition containing errno. The implementation
+ * uses @dev_id and @con_id to determine the clock consumer, and
+ * thereby the clock producer. In contrast to clk_get() this function
+ * takes the device name instead of the device itself for identification.
+ *
+ * Drivers must assume that the clock source is not enabled.
+ *
+ * clk_get_sys should not be called from within interrupt context.
+ */
+struct clk *clk_get_sys(const char *dev_id, const char *con_id);
+
+#else /* !CONFIG_HAVE_CLK */
+
+static inline struct clk *clk_get(struct device *dev, const char *id)
+{
+ return NULL;
+}
+
+static inline struct clk *devm_clk_get(struct device *dev, const char *id)
+{
+ return NULL;
+}
+
+static inline void clk_put(struct clk *clk) {}
+
+static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
+
+static inline int clk_enable(struct clk *clk)
+{
+ return 0;
+}
+
+static inline void clk_disable(struct clk *clk) {}
+
+static inline unsigned long clk_get_rate(struct clk *clk)
+{
+ return 0;
+}
+
+static inline int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+
+static inline long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+
+static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
+{
+ return true;
+}
+
+static inline int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ return 0;
+}
+
+static inline struct clk *clk_get_parent(struct clk *clk)
+{
+ return NULL;
+}
+
+#endif
+
+/* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
+static inline int clk_prepare_enable(struct clk *clk)
+{
+ int ret;
+
+ ret = clk_prepare(clk);
+ if (ret)
+ return ret;
+ ret = clk_enable(clk);
+ if (ret)
+ clk_unprepare(clk);
+
+ return ret;
+}
+
+/* clk_disable_unprepare helps cases using clk_disable in non-atomic context. */
+static inline void clk_disable_unprepare(struct clk *clk)
+{
+ clk_disable(clk);
+ clk_unprepare(clk);
+}
+
+/**
+ * clk_add_alias - add a new clock alias
+ * @alias: name for clock alias
+ * @alias_dev_name: device name
+ * @id: platform specific clock name
+ * @dev: device
+ *
+ * Allows using generic clock names for drivers by adding a new alias.
+ * Assumes clkdev, see clkdev.h for more info.
+ */
+int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
+ struct device *dev);
+
+struct device_node;
+struct of_phandle_args;
+
+#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+struct clk *of_clk_get(struct device_node *np, int index);
+struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
+struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec);
+#else
+static inline struct clk *of_clk_get(struct device_node *np, int index)
+{
+ return ERR_PTR(-ENOENT);
+}
+static inline struct clk *of_clk_get_by_name(struct device_node *np,
+ const char *name)
+{
+ return ERR_PTR(-ENOENT);
+}
+#endif
+
+#endif
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
new file mode 100644
index 000000000..7669f7618
--- /dev/null
+++ b/include/linux/clk/at91_pmc.h
@@ -0,0 +1,194 @@
+/*
+ * include/linux/clk/at91_pmc.h
+ *
+ * Copyright (C) 2005 Ivan Kokshaysky
+ * Copyright (C) SAN People
+ *
+ * Power Management Controller (PMC) - System peripherals registers.
+ * Based on AT91RM9200 datasheet revision E.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef AT91_PMC_H
+#define AT91_PMC_H
+
+#ifndef __ASSEMBLY__
+extern void __iomem *at91_pmc_base;
+
+#define at91_pmc_read(field) \
+ readl_relaxed(at91_pmc_base + field)
+
+#define at91_pmc_write(field, value) \
+ writel_relaxed(value, at91_pmc_base + field)
+#else
+.extern at91_pmc_base
+#endif
+
+#define AT91_PMC_SCER 0x00 /* System Clock Enable Register */
+#define AT91_PMC_SCDR 0x04 /* System Clock Disable Register */
+
+#define AT91_PMC_SCSR 0x08 /* System Clock Status Register */
+#define AT91_PMC_PCK (1 << 0) /* Processor Clock */
+#define AT91RM9200_PMC_UDP (1 << 1) /* USB Devcice Port Clock [AT91RM9200 only] */
+#define AT91RM9200_PMC_MCKUDP (1 << 2) /* USB Device Port Master Clock Automatic Disable on Suspend [AT91RM9200 only] */
+#define AT91RM9200_PMC_UHP (1 << 4) /* USB Host Port Clock [AT91RM9200 only] */
+#define AT91SAM926x_PMC_UHP (1 << 6) /* USB Host Port Clock [AT91SAM926x only] */
+#define AT91SAM926x_PMC_UDP (1 << 7) /* USB Devcice Port Clock [AT91SAM926x only] */
+#define AT91_PMC_PCK0 (1 << 8) /* Programmable Clock 0 */
+#define AT91_PMC_PCK1 (1 << 9) /* Programmable Clock 1 */
+#define AT91_PMC_PCK2 (1 << 10) /* Programmable Clock 2 */
+#define AT91_PMC_PCK3 (1 << 11) /* Programmable Clock 3 */
+#define AT91_PMC_PCK4 (1 << 12) /* Programmable Clock 4 [AT572D940HF only] */
+#define AT91_PMC_HCK0 (1 << 16) /* AHB Clock (USB host) [AT91SAM9261 only] */
+#define AT91_PMC_HCK1 (1 << 17) /* AHB Clock (LCD) [AT91SAM9261 only] */
+
+#define AT91_PMC_PCER 0x10 /* Peripheral Clock Enable Register */
+#define AT91_PMC_PCDR 0x14 /* Peripheral Clock Disable Register */
+#define AT91_PMC_PCSR 0x18 /* Peripheral Clock Status Register */
+
+#define AT91_CKGR_UCKR 0x1C /* UTMI Clock Register [some SAM9] */
+#define AT91_PMC_UPLLEN (1 << 16) /* UTMI PLL Enable */
+#define AT91_PMC_UPLLCOUNT (0xf << 20) /* UTMI PLL Start-up Time */
+#define AT91_PMC_BIASEN (1 << 24) /* UTMI BIAS Enable */
+#define AT91_PMC_BIASCOUNT (0xf << 28) /* UTMI BIAS Start-up Time */
+
+#define AT91_CKGR_MOR 0x20 /* Main Oscillator Register [not on SAM9RL] */
+#define AT91_PMC_MOSCEN (1 << 0) /* Main Oscillator Enable */
+#define AT91_PMC_OSCBYPASS (1 << 1) /* Oscillator Bypass */
+#define AT91_PMC_MOSCRCEN (1 << 3) /* Main On-Chip RC Oscillator Enable [some SAM9] */
+#define AT91_PMC_OSCOUNT (0xff << 8) /* Main Oscillator Start-up Time */
+#define AT91_PMC_KEY (0x37 << 16) /* MOR Writing Key */
+#define AT91_PMC_MOSCSEL (1 << 24) /* Main Oscillator Selection [some SAM9] */
+#define AT91_PMC_CFDEN (1 << 25) /* Clock Failure Detector Enable [some SAM9] */
+
+#define AT91_CKGR_MCFR 0x24 /* Main Clock Frequency Register */
+#define AT91_PMC_MAINF (0xffff << 0) /* Main Clock Frequency */
+#define AT91_PMC_MAINRDY (1 << 16) /* Main Clock Ready */
+
+#define AT91_CKGR_PLLAR 0x28 /* PLL A Register */
+#define AT91_CKGR_PLLBR 0x2c /* PLL B Register */
+#define AT91_PMC_DIV (0xff << 0) /* Divider */
+#define AT91_PMC_PLLCOUNT (0x3f << 8) /* PLL Counter */
+#define AT91_PMC_OUT (3 << 14) /* PLL Clock Frequency Range */
+#define AT91_PMC_MUL (0x7ff << 16) /* PLL Multiplier */
+#define AT91_PMC_MUL_GET(n) ((n) >> 16 & 0x7ff)
+#define AT91_PMC3_MUL (0x7f << 18) /* PLL Multiplier [SAMA5 only] */
+#define AT91_PMC3_MUL_GET(n) ((n) >> 18 & 0x7f)
+#define AT91_PMC_USBDIV (3 << 28) /* USB Divisor (PLLB only) */
+#define AT91_PMC_USBDIV_1 (0 << 28)
+#define AT91_PMC_USBDIV_2 (1 << 28)
+#define AT91_PMC_USBDIV_4 (2 << 28)
+#define AT91_PMC_USB96M (1 << 28) /* Divider by 2 Enable (PLLB only) */
+
+#define AT91_PMC_MCKR 0x30 /* Master Clock Register */
+#define AT91_PMC_CSS (3 << 0) /* Master Clock Selection */
+#define AT91_PMC_CSS_SLOW (0 << 0)
+#define AT91_PMC_CSS_MAIN (1 << 0)
+#define AT91_PMC_CSS_PLLA (2 << 0)
+#define AT91_PMC_CSS_PLLB (3 << 0)
+#define AT91_PMC_CSS_UPLL (3 << 0) /* [some SAM9 only] */
+#define PMC_PRES_OFFSET 2
+#define AT91_PMC_PRES (7 << PMC_PRES_OFFSET) /* Master Clock Prescaler */
+#define AT91_PMC_PRES_1 (0 << PMC_PRES_OFFSET)
+#define AT91_PMC_PRES_2 (1 << PMC_PRES_OFFSET)
+#define AT91_PMC_PRES_4 (2 << PMC_PRES_OFFSET)
+#define AT91_PMC_PRES_8 (3 << PMC_PRES_OFFSET)
+#define AT91_PMC_PRES_16 (4 << PMC_PRES_OFFSET)
+#define AT91_PMC_PRES_32 (5 << PMC_PRES_OFFSET)
+#define AT91_PMC_PRES_64 (6 << PMC_PRES_OFFSET)
+#define PMC_ALT_PRES_OFFSET 4
+#define AT91_PMC_ALT_PRES (7 << PMC_ALT_PRES_OFFSET) /* Master Clock Prescaler [alternate location] */
+#define AT91_PMC_ALT_PRES_1 (0 << PMC_ALT_PRES_OFFSET)
+#define AT91_PMC_ALT_PRES_2 (1 << PMC_ALT_PRES_OFFSET)
+#define AT91_PMC_ALT_PRES_4 (2 << PMC_ALT_PRES_OFFSET)
+#define AT91_PMC_ALT_PRES_8 (3 << PMC_ALT_PRES_OFFSET)
+#define AT91_PMC_ALT_PRES_16 (4 << PMC_ALT_PRES_OFFSET)
+#define AT91_PMC_ALT_PRES_32 (5 << PMC_ALT_PRES_OFFSET)
+#define AT91_PMC_ALT_PRES_64 (6 << PMC_ALT_PRES_OFFSET)
+#define AT91_PMC_MDIV (3 << 8) /* Master Clock Division */
+#define AT91RM9200_PMC_MDIV_1 (0 << 8) /* [AT91RM9200 only] */
+#define AT91RM9200_PMC_MDIV_2 (1 << 8)
+#define AT91RM9200_PMC_MDIV_3 (2 << 8)
+#define AT91RM9200_PMC_MDIV_4 (3 << 8)
+#define AT91SAM9_PMC_MDIV_1 (0 << 8) /* [SAM9 only] */
+#define AT91SAM9_PMC_MDIV_2 (1 << 8)
+#define AT91SAM9_PMC_MDIV_4 (2 << 8)
+#define AT91SAM9_PMC_MDIV_6 (3 << 8) /* [some SAM9 only] */
+#define AT91SAM9_PMC_MDIV_3 (3 << 8) /* [some SAM9 only] */
+#define AT91_PMC_PDIV (1 << 12) /* Processor Clock Division [some SAM9 only] */
+#define AT91_PMC_PDIV_1 (0 << 12)
+#define AT91_PMC_PDIV_2 (1 << 12)
+#define AT91_PMC_PLLADIV2 (1 << 12) /* PLLA divisor by 2 [some SAM9 only] */
+#define AT91_PMC_PLLADIV2_OFF (0 << 12)
+#define AT91_PMC_PLLADIV2_ON (1 << 12)
+#define AT91_PMC_H32MXDIV BIT(24)
+
+#define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */
+#define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */
+#define AT91_PMC_USBS_PLLA (0 << 0)
+#define AT91_PMC_USBS_UPLL (1 << 0)
+#define AT91_PMC_USBS_PLLB (1 << 0) /* [AT91SAMN12 only] */
+#define AT91_PMC_OHCIUSBDIV (0xF << 8) /* Divider for USB OHCI Clock */
+#define AT91_PMC_OHCIUSBDIV_1 (0x0 << 8)
+#define AT91_PMC_OHCIUSBDIV_2 (0x1 << 8)
+
+#define AT91_PMC_SMD 0x3c /* Soft Modem Clock Register [some SAM9 only] */
+#define AT91_PMC_SMDS (0x1 << 0) /* SMD input clock selection */
+#define AT91_PMC_SMD_DIV (0x1f << 8) /* SMD input clock divider */
+#define AT91_PMC_SMDDIV(n) (((n) << 8) & AT91_PMC_SMD_DIV)
+
+#define AT91_PMC_PCKR(n) (0x40 + ((n) * 4)) /* Programmable Clock 0-N Registers */
+#define AT91_PMC_ALT_PCKR_CSS (0x7 << 0) /* Programmable Clock Source Selection [alternate length] */
+#define AT91_PMC_CSS_MASTER (4 << 0) /* [some SAM9 only] */
+#define AT91_PMC_CSSMCK (0x1 << 8) /* CSS or Master Clock Selection */
+#define AT91_PMC_CSSMCK_CSS (0 << 8)
+#define AT91_PMC_CSSMCK_MCK (1 << 8)
+
+#define AT91_PMC_IER 0x60 /* Interrupt Enable Register */
+#define AT91_PMC_IDR 0x64 /* Interrupt Disable Register */
+#define AT91_PMC_SR 0x68 /* Status Register */
+#define AT91_PMC_MOSCS (1 << 0) /* MOSCS Flag */
+#define AT91_PMC_LOCKA (1 << 1) /* PLLA Lock */
+#define AT91_PMC_LOCKB (1 << 2) /* PLLB Lock */
+#define AT91_PMC_MCKRDY (1 << 3) /* Master Clock */
+#define AT91_PMC_LOCKU (1 << 6) /* UPLL Lock [some SAM9] */
+#define AT91_PMC_OSCSEL (1 << 7) /* Slow Oscillator Selection [some SAM9] */
+#define AT91_PMC_PCK0RDY (1 << 8) /* Programmable Clock 0 */
+#define AT91_PMC_PCK1RDY (1 << 9) /* Programmable Clock 1 */
+#define AT91_PMC_PCK2RDY (1 << 10) /* Programmable Clock 2 */
+#define AT91_PMC_PCK3RDY (1 << 11) /* Programmable Clock 3 */
+#define AT91_PMC_MOSCSELS (1 << 16) /* Main Oscillator Selection [some SAM9] */
+#define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */
+#define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */
+#define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */
+
+#define AT91_PMC_PLLICPR 0x80 /* PLL Charge Pump Current Register */
+
+#define AT91_PMC_PROT 0xe4 /* Write Protect Mode Register [some SAM9] */
+#define AT91_PMC_WPEN (0x1 << 0) /* Write Protect Enable */
+#define AT91_PMC_WPKEY (0xffffff << 8) /* Write Protect Key */
+#define AT91_PMC_PROTKEY (0x504d43 << 8) /* Activation Code */
+
+#define AT91_PMC_WPSR 0xe8 /* Write Protect Status Register [some SAM9] */
+#define AT91_PMC_WPVS (0x1 << 0) /* Write Protect Violation Status */
+#define AT91_PMC_WPVSRC (0xffff << 8) /* Write Protect Violation Source */
+
+#define AT91_PMC_PCER1 0x100 /* Peripheral Clock Enable Register 1 [SAMA5 only]*/
+#define AT91_PMC_PCDR1 0x104 /* Peripheral Clock Enable Register 1 */
+#define AT91_PMC_PCSR1 0x108 /* Peripheral Clock Enable Register 1 */
+
+#define AT91_PMC_PCR 0x10c /* Peripheral Control Register [some SAM9 and SAMA5] */
+#define AT91_PMC_PCR_PID (0x3f << 0) /* Peripheral ID */
+#define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */
+#define AT91_PMC_PCR_DIV(n) ((n) << 16) /* Divisor Value */
+#define AT91_PMC_PCR_DIV0 0x0 /* Peripheral clock is MCK */
+#define AT91_PMC_PCR_DIV2 0x1 /* Peripheral clock is MCK/2 */
+#define AT91_PMC_PCR_DIV4 0x2 /* Peripheral clock is MCK/4 */
+#define AT91_PMC_PCR_DIV8 0x3 /* Peripheral clock is MCK/8 */
+#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */
+
+#endif
diff --git a/include/linux/clk/bcm2835.h b/include/linux/clk/bcm2835.h
new file mode 100644
index 000000000..aa937f6c1
--- /dev/null
+++ b/include/linux/clk/bcm2835.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2010 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_CLK_BCM2835_H_
+#define __LINUX_CLK_BCM2835_H_
+
+void __init bcm2835_init_clocks(void);
+
+#endif
diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h
new file mode 100644
index 000000000..f3050e15f
--- /dev/null
+++ b/include/linux/clk/clk-conf.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2014 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+struct device_node;
+
+#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+int of_clk_set_defaults(struct device_node *node, bool clk_supplier);
+#else
+static inline int of_clk_set_defaults(struct device_node *node,
+ bool clk_supplier)
+{
+ return 0;
+}
+#endif
diff --git a/include/linux/clk/mxs.h b/include/linux/clk/mxs.h
new file mode 100644
index 000000000..5138a90e0
--- /dev/null
+++ b/include/linux/clk/mxs.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_CLK_MXS_H
+#define __LINUX_CLK_MXS_H
+
+int mxs_saif_clkmux_select(unsigned int clkmux);
+
+#endif
diff --git a/include/linux/clk/shmobile.h b/include/linux/clk/shmobile.h
new file mode 100644
index 000000000..63a8159c4
--- /dev/null
+++ b/include/linux/clk/shmobile.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2013 Ideas On Board SPRL
+ * Copyright 2013, 2014 Horms Solutions Ltd.
+ *
+ * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Contact: Simon Horman <horms@verge.net.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_CLK_SHMOBILE_H_
+#define __LINUX_CLK_SHMOBILE_H_
+
+#include <linux/types.h>
+
+void r8a7778_clocks_init(u32 mode);
+void r8a7779_clocks_init(u32 mode);
+void rcar_gen2_clocks_init(u32 mode);
+
+#endif
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
new file mode 100644
index 000000000..19c4208f4
--- /dev/null
+++ b/include/linux/clk/tegra.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LINUX_CLK_TEGRA_H_
+#define __LINUX_CLK_TEGRA_H_
+
+#include <linux/clk.h>
+
+/*
+ * Tegra CPU clock and reset control ops
+ *
+ * wait_for_reset:
+ * keep waiting until the CPU in reset state
+ * put_in_reset:
+ * put the CPU in reset state
+ * out_of_reset:
+ * release the CPU from reset state
+ * enable_clock:
+ * CPU clock un-gate
+ * disable_clock:
+ * CPU clock gate
+ * rail_off_ready:
+ * CPU is ready for rail off
+ * suspend:
+ * save the clock settings when CPU go into low-power state
+ * resume:
+ * restore the clock settings when CPU exit low-power state
+ */
+struct tegra_cpu_car_ops {
+ void (*wait_for_reset)(u32 cpu);
+ void (*put_in_reset)(u32 cpu);
+ void (*out_of_reset)(u32 cpu);
+ void (*enable_clock)(u32 cpu);
+ void (*disable_clock)(u32 cpu);
+#ifdef CONFIG_PM_SLEEP
+ bool (*rail_off_ready)(void);
+ void (*suspend)(void);
+ void (*resume)(void);
+#endif
+};
+
+extern struct tegra_cpu_car_ops *tegra_cpu_car_ops;
+
+static inline void tegra_wait_cpu_in_reset(u32 cpu)
+{
+ if (WARN_ON(!tegra_cpu_car_ops->wait_for_reset))
+ return;
+
+ tegra_cpu_car_ops->wait_for_reset(cpu);
+}
+
+static inline void tegra_put_cpu_in_reset(u32 cpu)
+{
+ if (WARN_ON(!tegra_cpu_car_ops->put_in_reset))
+ return;
+
+ tegra_cpu_car_ops->put_in_reset(cpu);
+}
+
+static inline void tegra_cpu_out_of_reset(u32 cpu)
+{
+ if (WARN_ON(!tegra_cpu_car_ops->out_of_reset))
+ return;
+
+ tegra_cpu_car_ops->out_of_reset(cpu);
+}
+
+static inline void tegra_enable_cpu_clock(u32 cpu)
+{
+ if (WARN_ON(!tegra_cpu_car_ops->enable_clock))
+ return;
+
+ tegra_cpu_car_ops->enable_clock(cpu);
+}
+
+static inline void tegra_disable_cpu_clock(u32 cpu)
+{
+ if (WARN_ON(!tegra_cpu_car_ops->disable_clock))
+ return;
+
+ tegra_cpu_car_ops->disable_clock(cpu);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static inline bool tegra_cpu_rail_off_ready(void)
+{
+ if (WARN_ON(!tegra_cpu_car_ops->rail_off_ready))
+ return false;
+
+ return tegra_cpu_car_ops->rail_off_ready();
+}
+
+static inline void tegra_cpu_clock_suspend(void)
+{
+ if (WARN_ON(!tegra_cpu_car_ops->suspend))
+ return;
+
+ tegra_cpu_car_ops->suspend();
+}
+
+static inline void tegra_cpu_clock_resume(void)
+{
+ if (WARN_ON(!tegra_cpu_car_ops->resume))
+ return;
+
+ tegra_cpu_car_ops->resume();
+}
+#endif
+
+#endif /* __LINUX_CLK_TEGRA_H_ */
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
new file mode 100644
index 000000000..79b76e13d
--- /dev/null
+++ b/include/linux/clk/ti.h
@@ -0,0 +1,376 @@
+/*
+ * TI clock drivers support
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __LINUX_CLK_TI_H__
+#define __LINUX_CLK_TI_H__
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+
+/**
+ * struct dpll_data - DPLL registers and integration data
+ * @mult_div1_reg: register containing the DPLL M and N bitfields
+ * @mult_mask: mask of the DPLL M bitfield in @mult_div1_reg
+ * @div1_mask: mask of the DPLL N bitfield in @mult_div1_reg
+ * @clk_bypass: struct clk pointer to the clock's bypass clock input
+ * @clk_ref: struct clk pointer to the clock's reference clock input
+ * @control_reg: register containing the DPLL mode bitfield
+ * @enable_mask: mask of the DPLL mode bitfield in @control_reg
+ * @last_rounded_rate: cache of the last rate result of omap2_dpll_round_rate()
+ * @last_rounded_m: cache of the last M result of omap2_dpll_round_rate()
+ * @last_rounded_m4xen: cache of the last M4X result of
+ * omap4_dpll_regm4xen_round_rate()
+ * @last_rounded_lpmode: cache of the last lpmode result of
+ * omap4_dpll_lpmode_recalc()
+ * @max_multiplier: maximum valid non-bypass multiplier value (actual)
+ * @last_rounded_n: cache of the last N result of omap2_dpll_round_rate()
+ * @min_divider: minimum valid non-bypass divider value (actual)
+ * @max_divider: maximum valid non-bypass divider value (actual)
+ * @modes: possible values of @enable_mask
+ * @autoidle_reg: register containing the DPLL autoidle mode bitfield
+ * @idlest_reg: register containing the DPLL idle status bitfield
+ * @autoidle_mask: mask of the DPLL autoidle mode bitfield in @autoidle_reg
+ * @freqsel_mask: mask of the DPLL jitter correction bitfield in @control_reg
+ * @dcc_mask: mask of the DPLL DCC correction bitfield @mult_div1_reg
+ * @dcc_rate: rate atleast which DCC @dcc_mask must be set
+ * @idlest_mask: mask of the DPLL idle status bitfield in @idlest_reg
+ * @lpmode_mask: mask of the DPLL low-power mode bitfield in @control_reg
+ * @m4xen_mask: mask of the DPLL M4X multiplier bitfield in @control_reg
+ * @auto_recal_bit: bitshift of the driftguard enable bit in @control_reg
+ * @recal_en_bit: bitshift of the PRM_IRQENABLE_* bit for recalibration IRQs
+ * @recal_st_bit: bitshift of the PRM_IRQSTATUS_* bit for recalibration IRQs
+ * @flags: DPLL type/features (see below)
+ *
+ * Possible values for @flags:
+ * DPLL_J_TYPE: "J-type DPLL" (only some 36xx, 4xxx DPLLs)
+ *
+ * @freqsel_mask is only used on the OMAP34xx family and AM35xx.
+ *
+ * XXX Some DPLLs have multiple bypass inputs, so it's not technically
+ * correct to only have one @clk_bypass pointer.
+ *
+ * XXX The runtime-variable fields (@last_rounded_rate, @last_rounded_m,
+ * @last_rounded_n) should be separated from the runtime-fixed fields
+ * and placed into a different structure, so that the runtime-fixed data
+ * can be placed into read-only space.
+ */
+struct dpll_data {
+ void __iomem *mult_div1_reg;
+ u32 mult_mask;
+ u32 div1_mask;
+ struct clk *clk_bypass;
+ struct clk *clk_ref;
+ void __iomem *control_reg;
+ u32 enable_mask;
+ unsigned long last_rounded_rate;
+ u16 last_rounded_m;
+ u8 last_rounded_m4xen;
+ u8 last_rounded_lpmode;
+ u16 max_multiplier;
+ u8 last_rounded_n;
+ u8 min_divider;
+ u16 max_divider;
+ u8 modes;
+ void __iomem *autoidle_reg;
+ void __iomem *idlest_reg;
+ u32 autoidle_mask;
+ u32 freqsel_mask;
+ u32 idlest_mask;
+ u32 dco_mask;
+ u32 sddiv_mask;
+ u32 dcc_mask;
+ unsigned long dcc_rate;
+ u32 lpmode_mask;
+ u32 m4xen_mask;
+ u8 auto_recal_bit;
+ u8 recal_en_bit;
+ u8 recal_st_bit;
+ u8 flags;
+};
+
+struct clk_hw_omap;
+
+/**
+ * struct clk_hw_omap_ops - OMAP clk ops
+ * @find_idlest: find idlest register information for a clock
+ * @find_companion: find companion clock register information for a clock,
+ * basically converts CM_ICLKEN* <-> CM_FCLKEN*
+ * @allow_idle: enables autoidle hardware functionality for a clock
+ * @deny_idle: prevent autoidle hardware functionality for a clock
+ */
+struct clk_hw_omap_ops {
+ void (*find_idlest)(struct clk_hw_omap *oclk,
+ void __iomem **idlest_reg,
+ u8 *idlest_bit, u8 *idlest_val);
+ void (*find_companion)(struct clk_hw_omap *oclk,
+ void __iomem **other_reg,
+ u8 *other_bit);
+ void (*allow_idle)(struct clk_hw_omap *oclk);
+ void (*deny_idle)(struct clk_hw_omap *oclk);
+};
+
+/**
+ * struct clk_hw_omap - OMAP struct clk
+ * @node: list_head connecting this clock into the full clock list
+ * @enable_reg: register to write to enable the clock (see @enable_bit)
+ * @enable_bit: bitshift to write to enable/disable the clock (see @enable_reg)
+ * @flags: see "struct clk.flags possibilities" above
+ * @clksel_reg: for clksel clks, register va containing src/divisor select
+ * @clksel_mask: bitmask in @clksel_reg for the src/divisor selector
+ * @clksel: for clksel clks, pointer to struct clksel for this clock
+ * @dpll_data: for DPLLs, pointer to struct dpll_data for this clock
+ * @clkdm_name: clockdomain name that this clock is contained in
+ * @clkdm: pointer to struct clockdomain, resolved from @clkdm_name at runtime
+ * @ops: clock ops for this clock
+ */
+struct clk_hw_omap {
+ struct clk_hw hw;
+ struct list_head node;
+ unsigned long fixed_rate;
+ u8 fixed_div;
+ void __iomem *enable_reg;
+ u8 enable_bit;
+ u8 flags;
+ void __iomem *clksel_reg;
+ u32 clksel_mask;
+ const struct clksel *clksel;
+ struct dpll_data *dpll_data;
+ const char *clkdm_name;
+ struct clockdomain *clkdm;
+ const struct clk_hw_omap_ops *ops;
+};
+
+/*
+ * struct clk_hw_omap.flags possibilities
+ *
+ * XXX document the rest of the clock flags here
+ *
+ * ENABLE_REG_32BIT: (OMAP1 only) clock control register must be accessed
+ * with 32bit ops, by default OMAP1 uses 16bit ops.
+ * CLOCK_IDLE_CONTROL: (OMAP1 only) clock has autoidle support.
+ * CLOCK_NO_IDLE_PARENT: (OMAP1 only) when clock is enabled, its parent
+ * clock is put to no-idle mode.
+ * ENABLE_ON_INIT: Clock is enabled on init.
+ * INVERT_ENABLE: By default, clock enable bit behavior is '1' enable, '0'
+ * disable. This inverts the behavior making '0' enable and '1' disable.
+ * CLOCK_CLKOUTX2: (OMAP4 only) DPLL CLKOUT and CLKOUTX2 GATE_CTRL
+ * bits share the same register. This flag allows the
+ * omap4_dpllmx*() code to determine which GATE_CTRL bit field
+ * should be used. This is a temporary solution - a better approach
+ * would be to associate clock type-specific data with the clock,
+ * similar to the struct dpll_data approach.
+ * MEMMAP_ADDRESSING: Use memmap addressing to access clock registers.
+ */
+#define ENABLE_REG_32BIT (1 << 0) /* Use 32-bit access */
+#define CLOCK_IDLE_CONTROL (1 << 1)
+#define CLOCK_NO_IDLE_PARENT (1 << 2)
+#define ENABLE_ON_INIT (1 << 3) /* Enable upon framework init */
+#define INVERT_ENABLE (1 << 4) /* 0 enables, 1 disables */
+#define CLOCK_CLKOUTX2 (1 << 5)
+#define MEMMAP_ADDRESSING (1 << 6)
+
+/* CM_CLKEN_PLL*.EN* bit values - not all are available for every DPLL */
+#define DPLL_LOW_POWER_STOP 0x1
+#define DPLL_LOW_POWER_BYPASS 0x5
+#define DPLL_LOCKED 0x7
+
+/* DPLL Type and DCO Selection Flags */
+#define DPLL_J_TYPE 0x1
+
+/* Composite clock component types */
+enum {
+ CLK_COMPONENT_TYPE_GATE = 0,
+ CLK_COMPONENT_TYPE_DIVIDER,
+ CLK_COMPONENT_TYPE_MUX,
+ CLK_COMPONENT_TYPE_MAX,
+};
+
+/**
+ * struct ti_dt_clk - OMAP DT clock alias declarations
+ * @lk: clock lookup definition
+ * @node_name: clock DT node to map to
+ */
+struct ti_dt_clk {
+ struct clk_lookup lk;
+ char *node_name;
+};
+
+#define DT_CLK(dev, con, name) \
+ { \
+ .lk = { \
+ .dev_id = dev, \
+ .con_id = con, \
+ }, \
+ .node_name = name, \
+ }
+
+/* Static memmap indices */
+enum {
+ TI_CLKM_CM = 0,
+ TI_CLKM_CM2,
+ TI_CLKM_PRM,
+ TI_CLKM_SCRM,
+ TI_CLKM_CTRL,
+ CLK_MAX_MEMMAPS
+};
+
+typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
+
+/**
+ * struct clk_omap_reg - OMAP register declaration
+ * @offset: offset from the master IP module base address
+ * @index: index of the master IP module
+ */
+struct clk_omap_reg {
+ u16 offset;
+ u16 index;
+};
+
+/**
+ * struct ti_clk_ll_ops - low-level register access ops for a clock
+ * @clk_readl: pointer to register read function
+ * @clk_writel: pointer to register write function
+ *
+ * Low-level register access ops are generally used by the basic clock types
+ * (clk-gate, clk-mux, clk-divider etc.) to provide support for various
+ * low-level hardware interfaces (direct MMIO, regmap etc.), but can also be
+ * used by other hardware-specific clock drivers if needed.
+ */
+struct ti_clk_ll_ops {
+ u32 (*clk_readl)(void __iomem *reg);
+ void (*clk_writel)(u32 val, void __iomem *reg);
+};
+
+extern struct ti_clk_ll_ops *ti_clk_ll_ops;
+
+extern const struct clk_ops ti_clk_divider_ops;
+extern const struct clk_ops ti_clk_mux_ops;
+
+#define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw)
+
+void omap2_init_clk_hw_omap_clocks(struct clk *clk);
+int omap3_noncore_dpll_enable(struct clk_hw *hw);
+void omap3_noncore_dpll_disable(struct clk_hw *hw);
+int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index);
+int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u8 index);
+long omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ unsigned long *best_parent_rate,
+ struct clk_hw **best_parent_clk);
+unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
+ unsigned long parent_rate);
+long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
+ unsigned long target_rate,
+ unsigned long *parent_rate);
+long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ unsigned long *best_parent_rate,
+ struct clk_hw **best_parent_clk);
+u8 omap2_init_dpll_parent(struct clk_hw *hw);
+unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
+long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
+ unsigned long *parent_rate);
+void omap2_init_clk_clkdm(struct clk_hw *clk);
+unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
+ unsigned long parent_rate);
+int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate);
+int omap2_clkops_enable_clkdm(struct clk_hw *hw);
+void omap2_clkops_disable_clkdm(struct clk_hw *hw);
+int omap2_clk_disable_autoidle_all(void);
+void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
+int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
+ unsigned long parent_rate);
+int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate, u8 index);
+int omap2_dflt_clk_enable(struct clk_hw *hw);
+void omap2_dflt_clk_disable(struct clk_hw *hw);
+int omap2_dflt_clk_is_enabled(struct clk_hw *hw);
+void omap3_clk_lock_dpll5(void);
+unsigned long omap2_dpllcore_recalc(struct clk_hw *hw,
+ unsigned long parent_rate);
+int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate,
+ unsigned long parent_rate);
+void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw);
+void omap2xxx_clkt_vps_init(void);
+
+void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index);
+void ti_dt_clocks_register(struct ti_dt_clk *oclks);
+void ti_dt_clk_init_provider(struct device_node *np, int index);
+void ti_dt_clk_init_retry_clks(void);
+void ti_dt_clockdomains_setup(void);
+int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw,
+ ti_of_clk_init_cb_t func);
+int of_ti_clk_autoidle_setup(struct device_node *node);
+int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type);
+
+int omap3430_dt_clk_init(void);
+int omap3630_dt_clk_init(void);
+int am35xx_dt_clk_init(void);
+int ti81xx_dt_clk_init(void);
+int omap4xxx_dt_clk_init(void);
+int omap5xxx_dt_clk_init(void);
+int dra7xx_dt_clk_init(void);
+int am33xx_dt_clk_init(void);
+int am43xx_dt_clk_init(void);
+int omap2420_dt_clk_init(void);
+int omap2430_dt_clk_init(void);
+
+#ifdef CONFIG_OF
+void of_ti_clk_allow_autoidle_all(void);
+void of_ti_clk_deny_autoidle_all(void);
+#else
+static inline void of_ti_clk_allow_autoidle_all(void) { }
+static inline void of_ti_clk_deny_autoidle_all(void) { }
+#endif
+
+extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll;
+extern const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3_dpll;
+extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx;
+extern const struct clk_hw_omap_ops clkhwops_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait;
+extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait;
+extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait;
+extern const struct clk_hw_omap_ops clkhwops_iclk;
+extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
+
+#ifdef CONFIG_ATAGS
+int omap3430_clk_legacy_init(void);
+int omap3430es1_clk_legacy_init(void);
+int omap36xx_clk_legacy_init(void);
+int am35xx_clk_legacy_init(void);
+#else
+static inline int omap3430_clk_legacy_init(void) { return -ENXIO; }
+static inline int omap3430es1_clk_legacy_init(void) { return -ENXIO; }
+static inline int omap36xx_clk_legacy_init(void) { return -ENXIO; }
+static inline int am35xx_clk_legacy_init(void) { return -ENXIO; }
+#endif
+
+
+#endif
diff --git a/include/linux/clk/zynq.h b/include/linux/clk/zynq.h
new file mode 100644
index 000000000..7a5633b71
--- /dev/null
+++ b/include/linux/clk/zynq.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2013 Xilinx Inc.
+ * Copyright (C) 2012 National Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_CLK_ZYNQ_H_
+#define __LINUX_CLK_ZYNQ_H_
+
+#include <linux/spinlock.h>
+
+void zynq_clock_init(void);
+
+struct clk *clk_register_zynq_pll(const char *name, const char *parent,
+ void __iomem *pll_ctrl, void __iomem *pll_status, u8 lock_index,
+ spinlock_t *lock);
+#endif
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h
new file mode 100644
index 000000000..94bad77ee
--- /dev/null
+++ b/include/linux/clkdev.h
@@ -0,0 +1,51 @@
+/*
+ * include/linux/clkdev.h
+ *
+ * Copyright (C) 2008 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Helper for the clk API to assist looking up a struct clk.
+ */
+#ifndef __CLKDEV_H
+#define __CLKDEV_H
+
+#include <asm/clkdev.h>
+
+struct clk;
+struct device;
+
+struct clk_lookup {
+ struct list_head node;
+ const char *dev_id;
+ const char *con_id;
+ struct clk *clk;
+};
+
+#define CLKDEV_INIT(d, n, c) \
+ { \
+ .dev_id = d, \
+ .con_id = n, \
+ .clk = c, \
+ }
+
+struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
+ const char *dev_fmt, ...);
+
+void clkdev_add(struct clk_lookup *cl);
+void clkdev_drop(struct clk_lookup *cl);
+
+void clkdev_add_table(struct clk_lookup *, size_t);
+int clk_add_alias(const char *, const char *, char *, struct device *);
+
+int clk_register_clkdev(struct clk *, const char *, const char *, ...);
+int clk_register_clkdevs(struct clk *, struct clk_lookup *, size_t);
+
+#ifdef CONFIG_COMMON_CLK
+int __clk_get(struct clk *clk);
+void __clk_put(struct clk *clk);
+#endif
+
+#endif
diff --git a/include/linux/clksrc-dbx500-prcmu.h b/include/linux/clksrc-dbx500-prcmu.h
new file mode 100644
index 000000000..4fb8119c4
--- /dev/null
+++ b/include/linux/clksrc-dbx500-prcmu.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Mattias Wallin <mattias.wallin@stericsson.com>
+ *
+ */
+#ifndef __CLKSRC_DBX500_PRCMU_H
+#define __CLKSRC_DBX500_PRCMU_H
+
+#include <linux/init.h>
+#include <linux/io.h>
+
+#ifdef CONFIG_CLKSRC_DBX500_PRCMU
+void __init clksrc_dbx500_prcmu_init(void __iomem *base);
+#else
+static inline void __init clksrc_dbx500_prcmu_init(void __iomem *base) {}
+#endif
+
+#endif
diff --git a/include/linux/clock_cooling.h b/include/linux/clock_cooling.h
new file mode 100644
index 000000000..4d1019d56
--- /dev/null
+++ b/include/linux/clock_cooling.h
@@ -0,0 +1,65 @@
+/*
+ * linux/include/linux/clock_cooling.h
+ *
+ * Copyright (C) 2014 Eduardo Valentin <edubezval@gmail.com>
+ *
+ * Copyright (C) 2013 Texas Instruments Inc.
+ * Contact: Eduardo Valentin <eduardo.valentin@ti.com>
+ *
+ * Highly based on cpu_cooling.c.
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
+ * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __CPU_COOLING_H__
+#define __CPU_COOLING_H__
+
+#include <linux/of.h>
+#include <linux/thermal.h>
+#include <linux/cpumask.h>
+
+#ifdef CONFIG_CLOCK_THERMAL
+/**
+ * clock_cooling_register - function to create clock cooling device.
+ * @dev: struct device pointer to the device used as clock cooling device.
+ * @clock_name: string containing the clock used as cooling mechanism.
+ */
+struct thermal_cooling_device *
+clock_cooling_register(struct device *dev, const char *clock_name);
+
+/**
+ * clock_cooling_unregister - function to remove clock cooling device.
+ * @cdev: thermal cooling device pointer.
+ */
+void clock_cooling_unregister(struct thermal_cooling_device *cdev);
+
+unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
+ unsigned long freq);
+#else /* !CONFIG_CLOCK_THERMAL */
+static inline struct thermal_cooling_device *
+clock_cooling_register(struct device *dev, const char *clock_name)
+{
+ return NULL;
+}
+static inline
+void clock_cooling_unregister(struct thermal_cooling_device *cdev)
+{
+}
+static inline
+unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
+ unsigned long freq)
+{
+ return THERMAL_CSTATE_INVALID;
+}
+#endif /* CONFIG_CLOCK_THERMAL */
+
+#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
new file mode 100644
index 000000000..96c280b2c
--- /dev/null
+++ b/include/linux/clockchips.h
@@ -0,0 +1,218 @@
+/* linux/include/linux/clockchips.h
+ *
+ * This file contains the structure definitions for clockchips.
+ *
+ * If you are not a clockchip, or the time of day code, you should
+ * not be including this file!
+ */
+#ifndef _LINUX_CLOCKCHIPS_H
+#define _LINUX_CLOCKCHIPS_H
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+
+# include <linux/clocksource.h>
+# include <linux/cpumask.h>
+# include <linux/ktime.h>
+# include <linux/notifier.h>
+
+struct clock_event_device;
+struct module;
+
+/* Clock event mode commands for legacy ->set_mode(): OBSOLETE */
+enum clock_event_mode {
+ CLOCK_EVT_MODE_UNUSED,
+ CLOCK_EVT_MODE_SHUTDOWN,
+ CLOCK_EVT_MODE_PERIODIC,
+ CLOCK_EVT_MODE_ONESHOT,
+ CLOCK_EVT_MODE_RESUME,
+};
+
+/*
+ * Possible states of a clock event device.
+ *
+ * DETACHED: Device is not used by clockevents core. Initial state or can be
+ * reached from SHUTDOWN.
+ * SHUTDOWN: Device is powered-off. Can be reached from PERIODIC or ONESHOT.
+ * PERIODIC: Device is programmed to generate events periodically. Can be
+ * reached from DETACHED or SHUTDOWN.
+ * ONESHOT: Device is programmed to generate event only once. Can be reached
+ * from DETACHED or SHUTDOWN.
+ */
+enum clock_event_state {
+ CLOCK_EVT_STATE_DETACHED,
+ CLOCK_EVT_STATE_SHUTDOWN,
+ CLOCK_EVT_STATE_PERIODIC,
+ CLOCK_EVT_STATE_ONESHOT,
+};
+
+/*
+ * Clock event features
+ */
+# define CLOCK_EVT_FEAT_PERIODIC 0x000001
+# define CLOCK_EVT_FEAT_ONESHOT 0x000002
+# define CLOCK_EVT_FEAT_KTIME 0x000004
+
+/*
+ * x86(64) specific (mis)features:
+ *
+ * - Clockevent source stops in C3 State and needs broadcast support.
+ * - Local APIC timer is used as a dummy device.
+ */
+# define CLOCK_EVT_FEAT_C3STOP 0x000008
+# define CLOCK_EVT_FEAT_DUMMY 0x000010
+
+/*
+ * Core shall set the interrupt affinity dynamically in broadcast mode
+ */
+# define CLOCK_EVT_FEAT_DYNIRQ 0x000020
+# define CLOCK_EVT_FEAT_PERCPU 0x000040
+
+/*
+ * Clockevent device is based on a hrtimer for broadcast
+ */
+# define CLOCK_EVT_FEAT_HRTIMER 0x000080
+
+/**
+ * struct clock_event_device - clock event device descriptor
+ * @event_handler: Assigned by the framework to be called by the low
+ * level handler of the event source
+ * @set_next_event: set next event function using a clocksource delta
+ * @set_next_ktime: set next event function using a direct ktime value
+ * @next_event: local storage for the next event in oneshot mode
+ * @max_delta_ns: maximum delta value in ns
+ * @min_delta_ns: minimum delta value in ns
+ * @mult: nanosecond to cycles multiplier
+ * @shift: nanoseconds to cycles divisor (power of two)
+ * @mode: operating mode, relevant only to ->set_mode(), OBSOLETE
+ * @state: current state of the device, assigned by the core code
+ * @features: features
+ * @retries: number of forced programming retries
+ * @set_mode: legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME.
+ * @set_state_periodic: switch state to periodic, if !set_mode
+ * @set_state_oneshot: switch state to oneshot, if !set_mode
+ * @set_state_shutdown: switch state to shutdown, if !set_mode
+ * @tick_resume: resume clkevt device, if !set_mode
+ * @broadcast: function to broadcast events
+ * @min_delta_ticks: minimum delta value in ticks stored for reconfiguration
+ * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration
+ * @name: ptr to clock event name
+ * @rating: variable to rate clock event devices
+ * @irq: IRQ number (only for non CPU local devices)
+ * @bound_on: Bound on CPU
+ * @cpumask: cpumask to indicate for which CPUs this device works
+ * @list: list head for the management code
+ * @owner: module reference
+ */
+struct clock_event_device {
+ void (*event_handler)(struct clock_event_device *);
+ int (*set_next_event)(unsigned long evt, struct clock_event_device *);
+ int (*set_next_ktime)(ktime_t expires, struct clock_event_device *);
+ ktime_t next_event;
+ u64 max_delta_ns;
+ u64 min_delta_ns;
+ u32 mult;
+ u32 shift;
+ enum clock_event_mode mode;
+ enum clock_event_state state;
+ unsigned int features;
+ unsigned long retries;
+
+ /*
+ * State transition callback(s): Only one of the two groups should be
+ * defined:
+ * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME.
+ * - set_state_{shutdown|periodic|oneshot}(), tick_resume().
+ */
+ void (*set_mode)(enum clock_event_mode mode, struct clock_event_device *);
+ int (*set_state_periodic)(struct clock_event_device *);
+ int (*set_state_oneshot)(struct clock_event_device *);
+ int (*set_state_shutdown)(struct clock_event_device *);
+ int (*tick_resume)(struct clock_event_device *);
+
+ void (*broadcast)(const struct cpumask *mask);
+ void (*suspend)(struct clock_event_device *);
+ void (*resume)(struct clock_event_device *);
+ unsigned long min_delta_ticks;
+ unsigned long max_delta_ticks;
+
+ const char *name;
+ int rating;
+ int irq;
+ int bound_on;
+ const struct cpumask *cpumask;
+ struct list_head list;
+ struct module *owner;
+} ____cacheline_aligned;
+
+/*
+ * Calculate a multiplication factor for scaled math, which is used to convert
+ * nanoseconds based values to clock ticks:
+ *
+ * clock_ticks = (nanoseconds * factor) >> shift.
+ *
+ * div_sc is the rearranged equation to calculate a factor from a given clock
+ * ticks / nanoseconds ratio:
+ *
+ * factor = (clock_ticks << shift) / nanoseconds
+ */
+static inline unsigned long
+div_sc(unsigned long ticks, unsigned long nsec, int shift)
+{
+ u64 tmp = ((u64)ticks) << shift;
+
+ do_div(tmp, nsec);
+
+ return (unsigned long) tmp;
+}
+
+/* Clock event layer functions */
+extern u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt);
+extern void clockevents_register_device(struct clock_event_device *dev);
+extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu);
+
+extern void clockevents_config(struct clock_event_device *dev, u32 freq);
+extern void clockevents_config_and_register(struct clock_event_device *dev,
+ u32 freq, unsigned long min_delta,
+ unsigned long max_delta);
+
+extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq);
+
+static inline void
+clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec)
+{
+ return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, minsec);
+}
+
+extern void clockevents_suspend(void);
+extern void clockevents_resume(void);
+
+# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+# ifdef CONFIG_ARCH_HAS_TICK_BROADCAST
+extern void tick_broadcast(const struct cpumask *mask);
+# else
+# define tick_broadcast NULL
+# endif
+extern int tick_receive_broadcast(void);
+# endif
+
+# if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
+extern void tick_setup_hrtimer_broadcast(void);
+extern int tick_check_broadcast_expired(void);
+# else
+static inline int tick_check_broadcast_expired(void) { return 0; }
+static inline void tick_setup_hrtimer_broadcast(void) { }
+# endif
+
+extern int clockevents_notify(unsigned long reason, void *arg);
+
+#else /* !CONFIG_GENERIC_CLOCKEVENTS: */
+
+static inline void clockevents_suspend(void) { }
+static inline void clockevents_resume(void) { }
+static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; }
+static inline int tick_check_broadcast_expired(void) { return 0; }
+static inline void tick_setup_hrtimer_broadcast(void) { }
+
+#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
+
+#endif /* _LINUX_CLOCKCHIPS_H */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
new file mode 100644
index 000000000..d27d01522
--- /dev/null
+++ b/include/linux/clocksource.h
@@ -0,0 +1,262 @@
+/* linux/include/linux/clocksource.h
+ *
+ * This file contains the structure definitions for clocksources.
+ *
+ * If you are not a clocksource, or timekeeping code, you should
+ * not be including this file!
+ */
+#ifndef _LINUX_CLOCKSOURCE_H
+#define _LINUX_CLOCKSOURCE_H
+
+#include <linux/types.h>
+#include <linux/timex.h>
+#include <linux/time.h>
+#include <linux/list.h>
+#include <linux/cache.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <asm/div64.h>
+#include <asm/io.h>
+
+struct clocksource;
+struct module;
+
+#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
+#include <asm/clocksource.h>
+#endif
+
+/**
+ * struct clocksource - hardware abstraction for a free running counter
+ * Provides mostly state-free accessors to the underlying hardware.
+ * This is the structure used for system time.
+ *
+ * @name: ptr to clocksource name
+ * @list: list head for registration
+ * @rating: rating value for selection (higher is better)
+ * To avoid rating inflation the following
+ * list should give you a guide as to how
+ * to assign your clocksource a rating
+ * 1-99: Unfit for real use
+ * Only available for bootup and testing purposes.
+ * 100-199: Base level usability.
+ * Functional for real use, but not desired.
+ * 200-299: Good.
+ * A correct and usable clocksource.
+ * 300-399: Desired.
+ * A reasonably fast and accurate clocksource.
+ * 400-499: Perfect
+ * The ideal clocksource. A must-use where
+ * available.
+ * @read: returns a cycle value, passes clocksource as argument
+ * @enable: optional function to enable the clocksource
+ * @disable: optional function to disable the clocksource
+ * @mask: bitmask for two's complement
+ * subtraction of non 64 bit counters
+ * @mult: cycle to nanosecond multiplier
+ * @shift: cycle to nanosecond divisor (power of two)
+ * @max_idle_ns: max idle time permitted by the clocksource (nsecs)
+ * @maxadj: maximum adjustment value to mult (~11%)
+ * @max_cycles: maximum safe cycle value which won't overflow on multiplication
+ * @flags: flags describing special properties
+ * @archdata: arch-specific data
+ * @suspend: suspend function for the clocksource, if necessary
+ * @resume: resume function for the clocksource, if necessary
+ * @owner: module reference, must be set by clocksource in modules
+ */
+struct clocksource {
+ /*
+ * Hotpath data, fits in a single cache line when the
+ * clocksource itself is cacheline aligned.
+ */
+ cycle_t (*read)(struct clocksource *cs);
+ cycle_t mask;
+ u32 mult;
+ u32 shift;
+ u64 max_idle_ns;
+ u32 maxadj;
+#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
+ struct arch_clocksource_data archdata;
+#endif
+ u64 max_cycles;
+ const char *name;
+ struct list_head list;
+ int rating;
+ int (*enable)(struct clocksource *cs);
+ void (*disable)(struct clocksource *cs);
+ unsigned long flags;
+ void (*suspend)(struct clocksource *cs);
+ void (*resume)(struct clocksource *cs);
+
+ /* private: */
+#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
+ /* Watchdog related data, used by the framework */
+ struct list_head wd_list;
+ cycle_t cs_last;
+ cycle_t wd_last;
+#endif
+ struct module *owner;
+} ____cacheline_aligned;
+
+/*
+ * Clock source flags bits::
+ */
+#define CLOCK_SOURCE_IS_CONTINUOUS 0x01
+#define CLOCK_SOURCE_MUST_VERIFY 0x02
+
+#define CLOCK_SOURCE_WATCHDOG 0x10
+#define CLOCK_SOURCE_VALID_FOR_HRES 0x20
+#define CLOCK_SOURCE_UNSTABLE 0x40
+#define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80
+#define CLOCK_SOURCE_RESELECT 0x100
+
+/* simplify initialization of mask field */
+#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
+
+/**
+ * clocksource_khz2mult - calculates mult from khz and shift
+ * @khz: Clocksource frequency in KHz
+ * @shift_constant: Clocksource shift factor
+ *
+ * Helper functions that converts a khz counter frequency to a timsource
+ * multiplier, given the clocksource shift value
+ */
+static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
+{
+ /* khz = cyc/(Million ns)
+ * mult/2^shift = ns/cyc
+ * mult = ns/cyc * 2^shift
+ * mult = 1Million/khz * 2^shift
+ * mult = 1000000 * 2^shift / khz
+ * mult = (1000000<<shift) / khz
+ */
+ u64 tmp = ((u64)1000000) << shift_constant;
+
+ tmp += khz/2; /* round for do_div */
+ do_div(tmp, khz);
+
+ return (u32)tmp;
+}
+
+/**
+ * clocksource_hz2mult - calculates mult from hz and shift
+ * @hz: Clocksource frequency in Hz
+ * @shift_constant: Clocksource shift factor
+ *
+ * Helper functions that converts a hz counter
+ * frequency to a timsource multiplier, given the
+ * clocksource shift value
+ */
+static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
+{
+ /* hz = cyc/(Billion ns)
+ * mult/2^shift = ns/cyc
+ * mult = ns/cyc * 2^shift
+ * mult = 1Billion/hz * 2^shift
+ * mult = 1000000000 * 2^shift / hz
+ * mult = (1000000000<<shift) / hz
+ */
+ u64 tmp = ((u64)1000000000) << shift_constant;
+
+ tmp += hz/2; /* round for do_div */
+ do_div(tmp, hz);
+
+ return (u32)tmp;
+}
+
+/**
+ * clocksource_cyc2ns - converts clocksource cycles to nanoseconds
+ * @cycles: cycles
+ * @mult: cycle to nanosecond multiplier
+ * @shift: cycle to nanosecond divisor (power of two)
+ *
+ * Converts cycles to nanoseconds, using the given mult and shift.
+ *
+ * XXX - This could use some mult_lxl_ll() asm optimization
+ */
+static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift)
+{
+ return ((u64) cycles * mult) >> shift;
+}
+
+
+extern int clocksource_unregister(struct clocksource*);
+extern void clocksource_touch_watchdog(void);
+extern struct clocksource* clocksource_get_next(void);
+extern void clocksource_change_rating(struct clocksource *cs, int rating);
+extern void clocksource_suspend(void);
+extern void clocksource_resume(void);
+extern struct clocksource * __init clocksource_default_clock(void);
+extern void clocksource_mark_unstable(struct clocksource *cs);
+
+extern u64
+clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles);
+extern void
+clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);
+
+/*
+ * Don't call __clocksource_register_scale directly, use
+ * clocksource_register_hz/khz
+ */
+extern int
+__clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq);
+extern void
+__clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq);
+
+/*
+ * Don't call this unless you are a default clocksource
+ * (AKA: jiffies) and absolutely have to.
+ */
+static inline int __clocksource_register(struct clocksource *cs)
+{
+ return __clocksource_register_scale(cs, 1, 0);
+}
+
+static inline int clocksource_register_hz(struct clocksource *cs, u32 hz)
+{
+ return __clocksource_register_scale(cs, 1, hz);
+}
+
+static inline int clocksource_register_khz(struct clocksource *cs, u32 khz)
+{
+ return __clocksource_register_scale(cs, 1000, khz);
+}
+
+static inline void __clocksource_update_freq_hz(struct clocksource *cs, u32 hz)
+{
+ __clocksource_update_freq_scale(cs, 1, hz);
+}
+
+static inline void __clocksource_update_freq_khz(struct clocksource *cs, u32 khz)
+{
+ __clocksource_update_freq_scale(cs, 1000, khz);
+}
+
+
+extern int timekeeping_notify(struct clocksource *clock);
+
+extern cycle_t clocksource_mmio_readl_up(struct clocksource *);
+extern cycle_t clocksource_mmio_readl_down(struct clocksource *);
+extern cycle_t clocksource_mmio_readw_up(struct clocksource *);
+extern cycle_t clocksource_mmio_readw_down(struct clocksource *);
+
+extern int clocksource_mmio_init(void __iomem *, const char *,
+ unsigned long, int, unsigned, cycle_t (*)(struct clocksource *));
+
+extern int clocksource_i8253_init(void);
+
+#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \
+ OF_DECLARE_1(clksrc, name, compat, fn)
+
+#ifdef CONFIG_CLKSRC_OF
+extern void clocksource_of_init(void);
+#else
+static inline void clocksource_of_init(void) {}
+#endif
+
+#ifdef CONFIG_ACPI
+void acpi_generic_timer_init(void);
+#else
+static inline void acpi_generic_timer_init(void) { }
+#endif
+
+#endif /* _LINUX_CLOCKSOURCE_H */
diff --git a/include/linux/cm4000_cs.h b/include/linux/cm4000_cs.h
new file mode 100644
index 000000000..88bee3a33
--- /dev/null
+++ b/include/linux/cm4000_cs.h
@@ -0,0 +1,10 @@
+#ifndef _CM4000_H_
+#define _CM4000_H_
+
+#include <uapi/linux/cm4000_cs.h>
+
+
+#define DEVICE_NAME "cmm"
+#define MODULE_NAME "cm4000_cs"
+
+#endif /* _CM4000_H_ */
diff --git a/include/linux/cma.h b/include/linux/cma.h
new file mode 100644
index 000000000..f7ef093ec
--- /dev/null
+++ b/include/linux/cma.h
@@ -0,0 +1,31 @@
+#ifndef __CMA_H__
+#define __CMA_H__
+
+/*
+ * There is always at least global CMA area and a few optional
+ * areas configured in kernel .config.
+ */
+#ifdef CONFIG_CMA_AREAS
+#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
+
+#else
+#define MAX_CMA_AREAS (0)
+
+#endif
+
+struct cma;
+
+extern unsigned long totalcma_pages;
+extern phys_addr_t cma_get_base(const struct cma *cma);
+extern unsigned long cma_get_size(const struct cma *cma);
+
+extern int __init cma_declare_contiguous(phys_addr_t base,
+ phys_addr_t size, phys_addr_t limit,
+ phys_addr_t alignment, unsigned int order_per_bit,
+ bool fixed, struct cma **res_cma);
+extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
+ unsigned int order_per_bit,
+ struct cma **res_cma);
+extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align);
+extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
+#endif
diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h
new file mode 100644
index 000000000..2e6dce6e5
--- /dev/null
+++ b/include/linux/cmdline-parser.h
@@ -0,0 +1,45 @@
+/*
+ * Parsing command line, get the partitions information.
+ *
+ * Written by Cai Zhiyong <caizhiyong@huawei.com>
+ *
+ */
+#ifndef CMDLINEPARSEH
+#define CMDLINEPARSEH
+
+#include <linux/blkdev.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+
+/* partition flags */
+#define PF_RDONLY 0x01 /* Device is read only */
+#define PF_POWERUP_LOCK 0x02 /* Always locked after reset */
+
+struct cmdline_subpart {
+ char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */
+ sector_t from;
+ sector_t size;
+ int flags;
+ struct cmdline_subpart *next_subpart;
+};
+
+struct cmdline_parts {
+ char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */
+ unsigned int nr_subparts;
+ struct cmdline_subpart *subpart;
+ struct cmdline_parts *next_parts;
+};
+
+void cmdline_parts_free(struct cmdline_parts **parts);
+
+int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline);
+
+struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
+ const char *bdev);
+
+int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
+ int slot,
+ int (*add_part)(int, struct cmdline_subpart *, void *),
+ void *param);
+
+#endif /* CMDLINEPARSEH */
diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h
new file mode 100644
index 000000000..1d5b02a96
--- /dev/null
+++ b/include/linux/cn_proc.h
@@ -0,0 +1,58 @@
+/*
+ * cn_proc.h - process events connector
+ *
+ * Copyright (C) Matt Helsley, IBM Corp. 2005
+ * Based on cn_fork.h by Nguyen Anh Quynh and Guillaume Thouvenin
+ * Copyright (C) 2005 Nguyen Anh Quynh <aquynh@gmail.com>
+ * Copyright (C) 2005 Guillaume Thouvenin <guillaume.thouvenin@bull.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#ifndef CN_PROC_H
+#define CN_PROC_H
+
+#include <uapi/linux/cn_proc.h>
+
+#ifdef CONFIG_PROC_EVENTS
+void proc_fork_connector(struct task_struct *task);
+void proc_exec_connector(struct task_struct *task);
+void proc_id_connector(struct task_struct *task, int which_id);
+void proc_sid_connector(struct task_struct *task);
+void proc_ptrace_connector(struct task_struct *task, int which_id);
+void proc_comm_connector(struct task_struct *task);
+void proc_coredump_connector(struct task_struct *task);
+void proc_exit_connector(struct task_struct *task);
+#else
+static inline void proc_fork_connector(struct task_struct *task)
+{}
+
+static inline void proc_exec_connector(struct task_struct *task)
+{}
+
+static inline void proc_id_connector(struct task_struct *task,
+ int which_id)
+{}
+
+static inline void proc_sid_connector(struct task_struct *task)
+{}
+
+static inline void proc_comm_connector(struct task_struct *task)
+{}
+
+static inline void proc_ptrace_connector(struct task_struct *task,
+ int ptrace_id)
+{}
+
+static inline void proc_coredump_connector(struct task_struct *task)
+{}
+
+static inline void proc_exit_connector(struct task_struct *task)
+{}
+#endif /* CONFIG_PROC_EVENTS */
+#endif /* CN_PROC_H */
diff --git a/include/linux/cnt32_to_63.h b/include/linux/cnt32_to_63.h
new file mode 100644
index 000000000..aa629bce9
--- /dev/null
+++ b/include/linux/cnt32_to_63.h
@@ -0,0 +1,107 @@
+/*
+ * Extend a 32-bit counter to 63 bits
+ *
+ * Author: Nicolas Pitre
+ * Created: December 3, 2006
+ * Copyright: MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_CNT32_TO_63_H__
+#define __LINUX_CNT32_TO_63_H__
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+/* this is used only to give gcc a clue about good code generation */
+union cnt32_to_63 {
+ struct {
+#if defined(__LITTLE_ENDIAN)
+ u32 lo, hi;
+#elif defined(__BIG_ENDIAN)
+ u32 hi, lo;
+#endif
+ };
+ u64 val;
+};
+
+
+/**
+ * cnt32_to_63 - Expand a 32-bit counter to a 63-bit counter
+ * @cnt_lo: The low part of the counter
+ *
+ * Many hardware clock counters are only 32 bits wide and therefore have
+ * a relatively short period making wrap-arounds rather frequent. This
+ * is a problem when implementing sched_clock() for example, where a 64-bit
+ * non-wrapping monotonic value is expected to be returned.
+ *
+ * To overcome that limitation, let's extend a 32-bit counter to 63 bits
+ * in a completely lock free fashion. Bits 0 to 31 of the clock are provided
+ * by the hardware while bits 32 to 62 are stored in memory. The top bit in
+ * memory is used to synchronize with the hardware clock half-period. When
+ * the top bit of both counters (hardware and in memory) differ then the
+ * memory is updated with a new value, incrementing it when the hardware
+ * counter wraps around.
+ *
+ * Because a word store in memory is atomic then the incremented value will
+ * always be in synch with the top bit indicating to any potential concurrent
+ * reader if the value in memory is up to date or not with regards to the
+ * needed increment. And any race in updating the value in memory is harmless
+ * as the same value would simply be stored more than once.
+ *
+ * The restrictions for the algorithm to work properly are:
+ *
+ * 1) this code must be called at least once per each half period of the
+ * 32-bit counter;
+ *
+ * 2) this code must not be preempted for a duration longer than the
+ * 32-bit counter half period minus the longest period between two
+ * calls to this code;
+ *
+ * Those requirements ensure proper update to the state bit in memory.
+ * This is usually not a problem in practice, but if it is then a kernel
+ * timer should be scheduled to manage for this code to be executed often
+ * enough.
+ *
+ * And finally:
+ *
+ * 3) the cnt_lo argument must be seen as a globally incrementing value,
+ * meaning that it should be a direct reference to the counter data which
+ * can be evaluated according to a specific ordering within the macro,
+ * and not the result of a previous evaluation stored in a variable.
+ *
+ * For example, this is wrong:
+ *
+ * u32 partial = get_hw_count();
+ * u64 full = cnt32_to_63(partial);
+ * return full;
+ *
+ * This is fine:
+ *
+ * u64 full = cnt32_to_63(get_hw_count());
+ * return full;
+ *
+ * Note that the top bit (bit 63) in the returned value should be considered
+ * as garbage. It is not cleared here because callers are likely to use a
+ * multiplier on the returned value which can get rid of the top bit
+ * implicitly by making the multiplier even, therefore saving on a runtime
+ * clear-bit instruction. Otherwise caller must remember to clear the top
+ * bit explicitly.
+ */
+#define cnt32_to_63(cnt_lo) \
+({ \
+ static u32 __m_cnt_hi; \
+ union cnt32_to_63 __x; \
+ __x.hi = __m_cnt_hi; \
+ smp_rmb(); \
+ __x.lo = (cnt_lo); \
+ if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \
+ __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \
+ __x.val; \
+})
+
+#endif
diff --git a/include/linux/coda.h b/include/linux/coda.h
new file mode 100644
index 000000000..d30209b9c
--- /dev/null
+++ b/include/linux/coda.h
@@ -0,0 +1,65 @@
+/*
+ You may distribute this file under either of the two licenses that
+ follow at your discretion.
+*/
+
+/* BLURB lgpl
+
+ Coda File System
+ Release 5
+
+ Copyright (c) 1987-1999 Carnegie Mellon University
+ Additional copyrights listed below
+
+This code is distributed "AS IS" without warranty of any kind under
+the terms of the GNU Library General Public Licence Version 2, as
+shown in the file LICENSE, or under the license shown below. The
+technical and financial contributors to Coda are listed in the file
+CREDITS.
+
+ Additional copyrights
+*/
+
+/*
+
+ Coda: an Experimental Distributed File System
+ Release 4.0
+
+ Copyright (c) 1987-1999 Carnegie Mellon University
+ All Rights Reserved
+
+Permission to use, copy, modify and distribute this software and its
+documentation is hereby granted, provided that both the copyright
+notice and this permission notice appear in all copies of the
+software, derivative works or modified versions, and any portions
+thereof, and that both notices appear in supporting documentation, and
+that credit is given to Carnegie Mellon University in all documents
+and publicity pertaining to direct or indirect use of this code or its
+derivatives.
+
+CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
+SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
+FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
+DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
+RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
+ANY DERIVATIVE WORK.
+
+Carnegie Mellon encourages users of this software to return any
+improvements or extensions that they make, and to grant Carnegie
+Mellon the rights to redistribute these changes without encumbrance.
+*/
+
+/*
+ *
+ * Based on cfs.h from Mach, but revamped for increased simplicity.
+ * Linux modifications by
+ * Peter Braam, Aug 1996
+ */
+#ifndef _CODA_HEADER_
+#define _CODA_HEADER_
+
+#if defined(__linux__)
+typedef unsigned long long u_quad_t;
+#endif
+#include <uapi/linux/coda.h>
+#endif
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
new file mode 100644
index 000000000..5b8721efa
--- /dev/null
+++ b/include/linux/coda_psdev.h
@@ -0,0 +1,72 @@
+#ifndef __CODA_PSDEV_H
+#define __CODA_PSDEV_H
+
+#include <linux/backing-dev.h>
+#include <linux/mutex.h>
+#include <uapi/linux/coda_psdev.h>
+
+struct kstatfs;
+
+/* communication pending/processing queues */
+struct venus_comm {
+ u_long vc_seq;
+ wait_queue_head_t vc_waitq; /* Venus wait queue */
+ struct list_head vc_pending;
+ struct list_head vc_processing;
+ int vc_inuse;
+ struct super_block *vc_sb;
+ struct backing_dev_info bdi;
+ struct mutex vc_mutex;
+};
+
+
+static inline struct venus_comm *coda_vcp(struct super_block *sb)
+{
+ return (struct venus_comm *)((sb)->s_fs_info);
+}
+
+/* upcalls */
+int venus_rootfid(struct super_block *sb, struct CodaFid *fidp);
+int venus_getattr(struct super_block *sb, struct CodaFid *fid,
+ struct coda_vattr *attr);
+int venus_setattr(struct super_block *, struct CodaFid *, struct coda_vattr *);
+int venus_lookup(struct super_block *sb, struct CodaFid *fid,
+ const char *name, int length, int *type,
+ struct CodaFid *resfid);
+int venus_close(struct super_block *sb, struct CodaFid *fid, int flags,
+ kuid_t uid);
+int venus_open(struct super_block *sb, struct CodaFid *fid, int flags,
+ struct file **f);
+int venus_mkdir(struct super_block *sb, struct CodaFid *dirfid,
+ const char *name, int length,
+ struct CodaFid *newfid, struct coda_vattr *attrs);
+int venus_create(struct super_block *sb, struct CodaFid *dirfid,
+ const char *name, int length, int excl, int mode,
+ struct CodaFid *newfid, struct coda_vattr *attrs) ;
+int venus_rmdir(struct super_block *sb, struct CodaFid *dirfid,
+ const char *name, int length);
+int venus_remove(struct super_block *sb, struct CodaFid *dirfid,
+ const char *name, int length);
+int venus_readlink(struct super_block *sb, struct CodaFid *fid,
+ char *buffer, int *length);
+int venus_rename(struct super_block *, struct CodaFid *new_fid,
+ struct CodaFid *old_fid, size_t old_length,
+ size_t new_length, const char *old_name,
+ const char *new_name);
+int venus_link(struct super_block *sb, struct CodaFid *fid,
+ struct CodaFid *dirfid, const char *name, int len );
+int venus_symlink(struct super_block *sb, struct CodaFid *fid,
+ const char *name, int len, const char *symname, int symlen);
+int venus_access(struct super_block *sb, struct CodaFid *fid, int mask);
+int venus_pioctl(struct super_block *sb, struct CodaFid *fid,
+ unsigned int cmd, struct PioctlData *data);
+int coda_downcall(struct venus_comm *vcp, int opcode, union outputArgs *out);
+int venus_fsync(struct super_block *sb, struct CodaFid *fid);
+int venus_statfs(struct dentry *dentry, struct kstatfs *sfs);
+
+/*
+ * Statistics
+ */
+
+extern struct venus_comm coda_comms[];
+#endif
diff --git a/include/linux/com20020.h b/include/linux/com20020.h
new file mode 100644
index 000000000..85898995b
--- /dev/null
+++ b/include/linux/com20020.h
@@ -0,0 +1,145 @@
+/*
+ * Linux ARCnet driver - COM20020 chipset support - function declarations
+ *
+ * Written 1997 by David Woodhouse.
+ * Written 1994-1999 by Avery Pennarun.
+ * Derived from skeleton.c by Donald Becker.
+ *
+ * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
+ * for sponsoring the further development of this driver.
+ *
+ * **********************
+ *
+ * The original copyright of skeleton.c was as follows:
+ *
+ * skeleton.c Written 1993 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may only be used
+ * and distributed according to the terms of the GNU General Public License as
+ * modified by SRC, incorporated herein by reference.
+ *
+ * **********************
+ *
+ * For more details, see drivers/net/arcnet.c
+ *
+ * **********************
+ */
+#ifndef __COM20020_H
+#define __COM20020_H
+
+int com20020_check(struct net_device *dev);
+int com20020_found(struct net_device *dev, int shared);
+extern const struct net_device_ops com20020_netdev_ops;
+
+/* The number of low I/O ports used by the card. */
+#define ARCNET_TOTAL_SIZE 8
+
+/* various register addresses */
+#ifdef CONFIG_SA1100_CT6001
+#define BUS_ALIGN 2 /* 8 bit device on a 16 bit bus - needs padding */
+#else
+#define BUS_ALIGN 1
+#endif
+
+#define PLX_PCI_MAX_CARDS 2
+
+struct com20020_pci_channel_map {
+ u32 bar;
+ u32 offset;
+ u32 size; /* 0x00 - auto, e.g. length of entire bar */
+};
+
+struct com20020_pci_card_info {
+ const char *name;
+ int devcount;
+
+ struct com20020_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CARDS];
+
+ unsigned int flags;
+};
+
+struct com20020_priv {
+ struct com20020_pci_card_info *ci;
+ struct list_head list_dev;
+};
+
+struct com20020_dev {
+ struct list_head list;
+ struct net_device *dev;
+
+ struct com20020_priv *pci_priv;
+ int index;
+};
+
+#define _INTMASK (ioaddr+BUS_ALIGN*0) /* writable */
+#define _STATUS (ioaddr+BUS_ALIGN*0) /* readable */
+#define _COMMAND (ioaddr+BUS_ALIGN*1) /* standard arcnet commands */
+#define _DIAGSTAT (ioaddr+BUS_ALIGN*1) /* diagnostic status register */
+#define _ADDR_HI (ioaddr+BUS_ALIGN*2) /* control registers for IO-mapped memory */
+#define _ADDR_LO (ioaddr+BUS_ALIGN*3)
+#define _MEMDATA (ioaddr+BUS_ALIGN*4) /* data port for IO-mapped memory */
+#define _SUBADR (ioaddr+BUS_ALIGN*5) /* the extended port _XREG refers to */
+#define _CONFIG (ioaddr+BUS_ALIGN*6) /* configuration register */
+#define _XREG (ioaddr+BUS_ALIGN*7) /* extra registers (indexed by _CONFIG
+ or _SUBADR) */
+
+/* in the ADDR_HI register */
+#define RDDATAflag 0x80 /* next access is a read (not a write) */
+
+/* in the DIAGSTAT register */
+#define NEWNXTIDflag 0x02 /* ID to which token is passed has changed */
+
+/* in the CONFIG register */
+#define RESETcfg 0x80 /* put card in reset state */
+#define TXENcfg 0x20 /* enable TX */
+
+/* in SETUP register */
+#define PROMISCset 0x10 /* enable RCV_ALL */
+#define P1MODE 0x80 /* enable P1-MODE for Backplane */
+#define SLOWARB 0x01 /* enable Slow Arbitration for >=5Mbps */
+
+/* COM2002x */
+#define SUB_TENTATIVE 0 /* tentative node ID */
+#define SUB_NODE 1 /* node ID */
+#define SUB_SETUP1 2 /* various options */
+#define SUB_TEST 3 /* test/diag register */
+
+/* COM20022 only */
+#define SUB_SETUP2 4 /* sundry options */
+#define SUB_BUSCTL 5 /* bus control options */
+#define SUB_DMACOUNT 6 /* DMA count options */
+
+#define SET_SUBADR(x) do { \
+ if ((x) < 4) \
+ { \
+ lp->config = (lp->config & ~0x03) | (x); \
+ SETCONF; \
+ } \
+ else \
+ { \
+ outb(x, _SUBADR); \
+ } \
+} while (0)
+
+#undef ARCRESET
+#undef ASTATUS
+#undef ACOMMAND
+#undef AINTMASK
+
+#define ARCRESET { outb(lp->config | 0x80, _CONFIG); \
+ udelay(5); \
+ outb(lp->config , _CONFIG); \
+ }
+#define ARCRESET0 { outb(0x18 | 0x80, _CONFIG); \
+ udelay(5); \
+ outb(0x18 , _CONFIG); \
+ }
+
+#define ASTATUS() inb(_STATUS)
+#define ADIAGSTATUS() inb(_DIAGSTAT)
+#define ACOMMAND(cmd) outb((cmd),_COMMAND)
+#define AINTMASK(msk) outb((msk),_INTMASK)
+
+#define SETCONF outb(lp->config, _CONFIG)
+
+#endif /* __COM20020_H */
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
new file mode 100644
index 000000000..aa8f61cf3
--- /dev/null
+++ b/include/linux/compaction.h
@@ -0,0 +1,104 @@
+#ifndef _LINUX_COMPACTION_H
+#define _LINUX_COMPACTION_H
+
+/* Return values for compact_zone() and try_to_compact_pages() */
+/* compaction didn't start as it was deferred due to past failures */
+#define COMPACT_DEFERRED 0
+/* compaction didn't start as it was not possible or direct reclaim was more suitable */
+#define COMPACT_SKIPPED 1
+/* compaction should continue to another pageblock */
+#define COMPACT_CONTINUE 2
+/* direct compaction partially compacted a zone and there are suitable pages */
+#define COMPACT_PARTIAL 3
+/* The full zone was compacted */
+#define COMPACT_COMPLETE 4
+/* For more detailed tracepoint output */
+#define COMPACT_NO_SUITABLE_PAGE 5
+#define COMPACT_NOT_SUITABLE_ZONE 6
+/* When adding new state, please change compaction_status_string, too */
+
+/* Used to signal whether compaction detected need_sched() or lock contention */
+/* No contention detected */
+#define COMPACT_CONTENDED_NONE 0
+/* Either need_sched() was true or fatal signal pending */
+#define COMPACT_CONTENDED_SCHED 1
+/* Zone lock or lru_lock was contended in async compaction */
+#define COMPACT_CONTENDED_LOCK 2
+
+struct alloc_context; /* in mm/internal.h */
+
+#ifdef CONFIG_COMPACTION
+extern int sysctl_compact_memory;
+extern int sysctl_compaction_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length, loff_t *ppos);
+extern int sysctl_extfrag_threshold;
+extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length, loff_t *ppos);
+extern int sysctl_compact_unevictable_allowed;
+
+extern int fragmentation_index(struct zone *zone, unsigned int order);
+extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
+ int alloc_flags, const struct alloc_context *ac,
+ enum migrate_mode mode, int *contended);
+extern void compact_pgdat(pg_data_t *pgdat, int order);
+extern void reset_isolation_suitable(pg_data_t *pgdat);
+extern unsigned long compaction_suitable(struct zone *zone, int order,
+ int alloc_flags, int classzone_idx);
+
+extern void defer_compaction(struct zone *zone, int order);
+extern bool compaction_deferred(struct zone *zone, int order);
+extern void compaction_defer_reset(struct zone *zone, int order,
+ bool alloc_success);
+extern bool compaction_restarting(struct zone *zone, int order);
+
+#else
+static inline unsigned long try_to_compact_pages(gfp_t gfp_mask,
+ unsigned int order, int alloc_flags,
+ const struct alloc_context *ac,
+ enum migrate_mode mode, int *contended)
+{
+ return COMPACT_CONTINUE;
+}
+
+static inline void compact_pgdat(pg_data_t *pgdat, int order)
+{
+}
+
+static inline void reset_isolation_suitable(pg_data_t *pgdat)
+{
+}
+
+static inline unsigned long compaction_suitable(struct zone *zone, int order,
+ int alloc_flags, int classzone_idx)
+{
+ return COMPACT_SKIPPED;
+}
+
+static inline void defer_compaction(struct zone *zone, int order)
+{
+}
+
+static inline bool compaction_deferred(struct zone *zone, int order)
+{
+ return true;
+}
+
+#endif /* CONFIG_COMPACTION */
+
+#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
+extern int compaction_register_node(struct node *node);
+extern void compaction_unregister_node(struct node *node);
+
+#else
+
+static inline int compaction_register_node(struct node *node)
+{
+ return 0;
+}
+
+static inline void compaction_unregister_node(struct node *node)
+{
+}
+#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
+
+#endif /* _LINUX_COMPACTION_H */
diff --git a/include/linux/compat.h b/include/linux/compat.h
new file mode 100644
index 000000000..ab2581469
--- /dev/null
+++ b/include/linux/compat.h
@@ -0,0 +1,721 @@
+#ifndef _LINUX_COMPAT_H
+#define _LINUX_COMPAT_H
+/*
+ * These are the type definitions for the architecture specific
+ * syscall compatibility layer.
+ */
+
+#ifdef CONFIG_COMPAT
+
+#include <linux/stat.h>
+#include <linux/param.h> /* for HZ */
+#include <linux/sem.h>
+#include <linux/socket.h>
+#include <linux/if.h>
+#include <linux/fs.h>
+#include <linux/aio_abi.h> /* for aio_context_t */
+#include <linux/unistd.h>
+
+#include <asm/compat.h>
+#include <asm/siginfo.h>
+#include <asm/signal.h>
+
+#ifndef COMPAT_USE_64BIT_TIME
+#define COMPAT_USE_64BIT_TIME 0
+#endif
+
+#ifndef __SC_DELOUSE
+#define __SC_DELOUSE(t,v) ((t)(unsigned long)(v))
+#endif
+
+#define COMPAT_SYSCALL_DEFINE0(name) \
+ asmlinkage long compat_sys_##name(void)
+
+#define COMPAT_SYSCALL_DEFINE1(name, ...) \
+ COMPAT_SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_DEFINE2(name, ...) \
+ COMPAT_SYSCALL_DEFINEx(2, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_DEFINE3(name, ...) \
+ COMPAT_SYSCALL_DEFINEx(3, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_DEFINE4(name, ...) \
+ COMPAT_SYSCALL_DEFINEx(4, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_DEFINE5(name, ...) \
+ COMPAT_SYSCALL_DEFINEx(5, _##name, __VA_ARGS__)
+#define COMPAT_SYSCALL_DEFINE6(name, ...) \
+ COMPAT_SYSCALL_DEFINEx(6, _##name, __VA_ARGS__)
+
+#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))\
+ __attribute__((alias(__stringify(compat_SyS##name)))); \
+ static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
+ asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__));\
+ asmlinkage long compat_SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__))\
+ { \
+ return C_SYSC##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
+ } \
+ static inline long C_SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+#ifndef compat_user_stack_pointer
+#define compat_user_stack_pointer() current_user_stack_pointer()
+#endif
+#ifndef compat_sigaltstack /* we'll need that for MIPS */
+typedef struct compat_sigaltstack {
+ compat_uptr_t ss_sp;
+ int ss_flags;
+ compat_size_t ss_size;
+} compat_stack_t;
+#endif
+
+#define compat_jiffies_to_clock_t(x) \
+ (((unsigned long)(x) * COMPAT_USER_HZ) / HZ)
+
+typedef __compat_uid32_t compat_uid_t;
+typedef __compat_gid32_t compat_gid_t;
+
+typedef compat_ulong_t compat_aio_context_t;
+
+struct compat_sel_arg_struct;
+struct rusage;
+
+struct compat_itimerspec {
+ struct compat_timespec it_interval;
+ struct compat_timespec it_value;
+};
+
+struct compat_utimbuf {
+ compat_time_t actime;
+ compat_time_t modtime;
+};
+
+struct compat_itimerval {
+ struct compat_timeval it_interval;
+ struct compat_timeval it_value;
+};
+
+struct compat_tms {
+ compat_clock_t tms_utime;
+ compat_clock_t tms_stime;
+ compat_clock_t tms_cutime;
+ compat_clock_t tms_cstime;
+};
+
+struct compat_timex {
+ compat_uint_t modes;
+ compat_long_t offset;
+ compat_long_t freq;
+ compat_long_t maxerror;
+ compat_long_t esterror;
+ compat_int_t status;
+ compat_long_t constant;
+ compat_long_t precision;
+ compat_long_t tolerance;
+ struct compat_timeval time;
+ compat_long_t tick;
+ compat_long_t ppsfreq;
+ compat_long_t jitter;
+ compat_int_t shift;
+ compat_long_t stabil;
+ compat_long_t jitcnt;
+ compat_long_t calcnt;
+ compat_long_t errcnt;
+ compat_long_t stbcnt;
+ compat_int_t tai;
+
+ compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32;
+ compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32;
+ compat_int_t:32; compat_int_t:32; compat_int_t:32;
+};
+
+#define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW)
+
+typedef struct {
+ compat_sigset_word sig[_COMPAT_NSIG_WORDS];
+} compat_sigset_t;
+
+struct compat_sigaction {
+#ifndef __ARCH_HAS_IRIX_SIGACTION
+ compat_uptr_t sa_handler;
+ compat_ulong_t sa_flags;
+#else
+ compat_uint_t sa_flags;
+ compat_uptr_t sa_handler;
+#endif
+#ifdef __ARCH_HAS_SA_RESTORER
+ compat_uptr_t sa_restorer;
+#endif
+ compat_sigset_t sa_mask __packed;
+};
+
+/*
+ * These functions operate on 32- or 64-bit specs depending on
+ * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments.
+ */
+extern int compat_get_timespec(struct timespec *, const void __user *);
+extern int compat_put_timespec(const struct timespec *, void __user *);
+extern int compat_get_timeval(struct timeval *, const void __user *);
+extern int compat_put_timeval(const struct timeval *, void __user *);
+
+/*
+ * This function convert a timespec if necessary and returns a *user
+ * space* pointer. If no conversion is necessary, it returns the
+ * initial pointer. NULL is a legitimate argument and will always
+ * output NULL.
+ */
+extern int compat_convert_timespec(struct timespec __user **,
+ const void __user *);
+
+struct compat_iovec {
+ compat_uptr_t iov_base;
+ compat_size_t iov_len;
+};
+
+struct compat_rlimit {
+ compat_ulong_t rlim_cur;
+ compat_ulong_t rlim_max;
+};
+
+struct compat_rusage {
+ struct compat_timeval ru_utime;
+ struct compat_timeval ru_stime;
+ compat_long_t ru_maxrss;
+ compat_long_t ru_ixrss;
+ compat_long_t ru_idrss;
+ compat_long_t ru_isrss;
+ compat_long_t ru_minflt;
+ compat_long_t ru_majflt;
+ compat_long_t ru_nswap;
+ compat_long_t ru_inblock;
+ compat_long_t ru_oublock;
+ compat_long_t ru_msgsnd;
+ compat_long_t ru_msgrcv;
+ compat_long_t ru_nsignals;
+ compat_long_t ru_nvcsw;
+ compat_long_t ru_nivcsw;
+};
+
+extern int put_compat_rusage(const struct rusage *,
+ struct compat_rusage __user *);
+
+struct compat_siginfo;
+
+extern asmlinkage long compat_sys_waitid(int, compat_pid_t,
+ struct compat_siginfo __user *, int,
+ struct compat_rusage __user *);
+
+struct compat_dirent {
+ u32 d_ino;
+ compat_off_t d_off;
+ u16 d_reclen;
+ char d_name[256];
+};
+
+struct compat_ustat {
+ compat_daddr_t f_tfree;
+ compat_ino_t f_tinode;
+ char f_fname[6];
+ char f_fpack[6];
+};
+
+#define COMPAT_SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 3)
+
+typedef struct compat_sigevent {
+ compat_sigval_t sigev_value;
+ compat_int_t sigev_signo;
+ compat_int_t sigev_notify;
+ union {
+ compat_int_t _pad[COMPAT_SIGEV_PAD_SIZE];
+ compat_int_t _tid;
+
+ struct {
+ compat_uptr_t _function;
+ compat_uptr_t _attribute;
+ } _sigev_thread;
+ } _sigev_un;
+} compat_sigevent_t;
+
+struct compat_ifmap {
+ compat_ulong_t mem_start;
+ compat_ulong_t mem_end;
+ unsigned short base_addr;
+ unsigned char irq;
+ unsigned char dma;
+ unsigned char port;
+};
+
+struct compat_if_settings {
+ unsigned int type; /* Type of physical device or protocol */
+ unsigned int size; /* Size of the data allocated by the caller */
+ compat_uptr_t ifs_ifsu; /* union of pointers */
+};
+
+struct compat_ifreq {
+ union {
+ char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ } ifr_ifrn;
+ union {
+ struct sockaddr ifru_addr;
+ struct sockaddr ifru_dstaddr;
+ struct sockaddr ifru_broadaddr;
+ struct sockaddr ifru_netmask;
+ struct sockaddr ifru_hwaddr;
+ short ifru_flags;
+ compat_int_t ifru_ivalue;
+ compat_int_t ifru_mtu;
+ struct compat_ifmap ifru_map;
+ char ifru_slave[IFNAMSIZ]; /* Just fits the size */
+ char ifru_newname[IFNAMSIZ];
+ compat_caddr_t ifru_data;
+ struct compat_if_settings ifru_settings;
+ } ifr_ifru;
+};
+
+struct compat_ifconf {
+ compat_int_t ifc_len; /* size of buffer */
+ compat_caddr_t ifcbuf;
+};
+
+struct compat_robust_list {
+ compat_uptr_t next;
+};
+
+struct compat_robust_list_head {
+ struct compat_robust_list list;
+ compat_long_t futex_offset;
+ compat_uptr_t list_op_pending;
+};
+
+#ifdef CONFIG_COMPAT_OLD_SIGACTION
+struct compat_old_sigaction {
+ compat_uptr_t sa_handler;
+ compat_old_sigset_t sa_mask;
+ compat_ulong_t sa_flags;
+ compat_uptr_t sa_restorer;
+};
+#endif
+
+struct compat_statfs;
+struct compat_statfs64;
+struct compat_old_linux_dirent;
+struct compat_linux_dirent;
+struct linux_dirent64;
+struct compat_msghdr;
+struct compat_mmsghdr;
+struct compat_sysinfo;
+struct compat_sysctl_args;
+struct compat_kexec_segment;
+struct compat_mq_attr;
+struct compat_msgbuf;
+
+extern void compat_exit_robust_list(struct task_struct *curr);
+
+asmlinkage long
+compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
+ compat_size_t len);
+asmlinkage long
+compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
+ compat_size_t __user *len_ptr);
+
+asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
+asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
+asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
+ compat_ssize_t msgsz, int msgflg);
+asmlinkage long compat_sys_msgrcv(int msqid, compat_uptr_t msgp,
+ compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg);
+long compat_sys_msgctl(int first, int second, void __user *uptr);
+long compat_sys_shmctl(int first, int second, void __user *uptr);
+long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
+ unsigned nsems, const struct compat_timespec __user *timeout);
+asmlinkage long compat_sys_keyctl(u32 option,
+ u32 arg2, u32 arg3, u32 arg4, u32 arg5);
+asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
+
+asmlinkage ssize_t compat_sys_readv(compat_ulong_t fd,
+ const struct compat_iovec __user *vec, compat_ulong_t vlen);
+asmlinkage ssize_t compat_sys_writev(compat_ulong_t fd,
+ const struct compat_iovec __user *vec, compat_ulong_t vlen);
+asmlinkage ssize_t compat_sys_preadv(compat_ulong_t fd,
+ const struct compat_iovec __user *vec,
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high);
+asmlinkage ssize_t compat_sys_pwritev(compat_ulong_t fd,
+ const struct compat_iovec __user *vec,
+ compat_ulong_t vlen, u32 pos_low, u32 pos_high);
+
+#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64
+asmlinkage long compat_sys_preadv64(unsigned long fd,
+ const struct compat_iovec __user *vec,
+ unsigned long vlen, loff_t pos);
+#endif
+
+#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64
+asmlinkage long compat_sys_pwritev64(unsigned long fd,
+ const struct compat_iovec __user *vec,
+ unsigned long vlen, loff_t pos);
+#endif
+
+asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
+
+asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp);
+asmlinkage long compat_sys_execveat(int dfd, const char __user *filename,
+ const compat_uptr_t __user *argv,
+ const compat_uptr_t __user *envp, int flags);
+
+asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
+ compat_ulong_t __user *outp, compat_ulong_t __user *exp,
+ struct compat_timeval __user *tvp);
+
+asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg);
+
+asmlinkage long compat_sys_wait4(compat_pid_t pid,
+ compat_uint_t __user *stat_addr, int options,
+ struct compat_rusage __user *ru);
+
+#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
+
+#define BITS_TO_COMPAT_LONGS(bits) \
+ (((bits)+BITS_PER_COMPAT_LONG-1)/BITS_PER_COMPAT_LONG)
+
+long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
+ unsigned long bitmap_size);
+long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
+ unsigned long bitmap_size);
+int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from);
+int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from);
+int get_compat_sigevent(struct sigevent *event,
+ const struct compat_sigevent __user *u_event);
+long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig,
+ struct compat_siginfo __user *uinfo);
+#ifdef CONFIG_COMPAT_OLD_SIGACTION
+asmlinkage long compat_sys_sigaction(int sig,
+ const struct compat_old_sigaction __user *act,
+ struct compat_old_sigaction __user *oact);
+#endif
+
+static inline int compat_timeval_compare(struct compat_timeval *lhs,
+ struct compat_timeval *rhs)
+{
+ if (lhs->tv_sec < rhs->tv_sec)
+ return -1;
+ if (lhs->tv_sec > rhs->tv_sec)
+ return 1;
+ return lhs->tv_usec - rhs->tv_usec;
+}
+
+static inline int compat_timespec_compare(struct compat_timespec *lhs,
+ struct compat_timespec *rhs)
+{
+ if (lhs->tv_sec < rhs->tv_sec)
+ return -1;
+ if (lhs->tv_sec > rhs->tv_sec)
+ return 1;
+ return lhs->tv_nsec - rhs->tv_nsec;
+}
+
+extern int get_compat_itimerspec(struct itimerspec *dst,
+ const struct compat_itimerspec __user *src);
+extern int put_compat_itimerspec(struct compat_itimerspec __user *dst,
+ const struct itimerspec *src);
+
+asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
+ struct timezone __user *tz);
+asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
+ struct timezone __user *tz);
+
+asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
+
+extern int compat_printk(const char *fmt, ...);
+extern void sigset_from_compat(sigset_t *set, const compat_sigset_t *compat);
+extern void sigset_to_compat(compat_sigset_t *compat, const sigset_t *set);
+
+asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
+ compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes,
+ const compat_ulong_t __user *new_nodes);
+
+extern int compat_ptrace_request(struct task_struct *child,
+ compat_long_t request,
+ compat_ulong_t addr, compat_ulong_t data);
+
+extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ compat_ulong_t addr, compat_ulong_t data);
+asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
+ compat_long_t addr, compat_long_t data);
+
+asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
+/*
+ * epoll (fs/eventpoll.c) compat bits follow ...
+ */
+struct epoll_event; /* fortunately, this one is fixed-layout */
+asmlinkage long compat_sys_epoll_pwait(int epfd,
+ struct epoll_event __user *events,
+ int maxevents, int timeout,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize);
+
+asmlinkage long compat_sys_utime(const char __user *filename,
+ struct compat_utimbuf __user *t);
+asmlinkage long compat_sys_utimensat(unsigned int dfd,
+ const char __user *filename,
+ struct compat_timespec __user *t,
+ int flags);
+
+asmlinkage long compat_sys_time(compat_time_t __user *tloc);
+asmlinkage long compat_sys_stime(compat_time_t __user *tptr);
+asmlinkage long compat_sys_signalfd(int ufd,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize);
+asmlinkage long compat_sys_timerfd_settime(int ufd, int flags,
+ const struct compat_itimerspec __user *utmr,
+ struct compat_itimerspec __user *otmr);
+asmlinkage long compat_sys_timerfd_gettime(int ufd,
+ struct compat_itimerspec __user *otmr);
+
+asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages,
+ __u32 __user *pages,
+ const int __user *nodes,
+ int __user *status,
+ int flags);
+asmlinkage long compat_sys_futimesat(unsigned int dfd,
+ const char __user *filename,
+ struct compat_timeval __user *t);
+asmlinkage long compat_sys_utimes(const char __user *filename,
+ struct compat_timeval __user *t);
+asmlinkage long compat_sys_newstat(const char __user *filename,
+ struct compat_stat __user *statbuf);
+asmlinkage long compat_sys_newlstat(const char __user *filename,
+ struct compat_stat __user *statbuf);
+asmlinkage long compat_sys_newfstatat(unsigned int dfd,
+ const char __user *filename,
+ struct compat_stat __user *statbuf,
+ int flag);
+asmlinkage long compat_sys_newfstat(unsigned int fd,
+ struct compat_stat __user *statbuf);
+asmlinkage long compat_sys_statfs(const char __user *pathname,
+ struct compat_statfs __user *buf);
+asmlinkage long compat_sys_fstatfs(unsigned int fd,
+ struct compat_statfs __user *buf);
+asmlinkage long compat_sys_statfs64(const char __user *pathname,
+ compat_size_t sz,
+ struct compat_statfs64 __user *buf);
+asmlinkage long compat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
+ struct compat_statfs64 __user *buf);
+asmlinkage long compat_sys_fcntl64(unsigned int fd, unsigned int cmd,
+ compat_ulong_t arg);
+asmlinkage long compat_sys_fcntl(unsigned int fd, unsigned int cmd,
+ compat_ulong_t arg);
+asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p);
+asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id,
+ compat_long_t min_nr,
+ compat_long_t nr,
+ struct io_event __user *events,
+ struct compat_timespec __user *timeout);
+asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr,
+ u32 __user *iocb);
+asmlinkage long compat_sys_mount(const char __user *dev_name,
+ const char __user *dir_name,
+ const char __user *type, compat_ulong_t flags,
+ const void __user *data);
+asmlinkage long compat_sys_old_readdir(unsigned int fd,
+ struct compat_old_linux_dirent __user *,
+ unsigned int count);
+asmlinkage long compat_sys_getdents(unsigned int fd,
+ struct compat_linux_dirent __user *dirent,
+ unsigned int count);
+#ifdef __ARCH_WANT_COMPAT_SYS_GETDENTS64
+asmlinkage long compat_sys_getdents64(unsigned int fd,
+ struct linux_dirent64 __user *dirent,
+ unsigned int count);
+#endif
+asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *,
+ unsigned int nr_segs, unsigned int flags);
+asmlinkage long compat_sys_open(const char __user *filename, int flags,
+ umode_t mode);
+asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
+ int flags, umode_t mode);
+asmlinkage long compat_sys_open_by_handle_at(int mountdirfd,
+ struct file_handle __user *handle,
+ int flags);
+asmlinkage long compat_sys_truncate(const char __user *, compat_off_t);
+asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t);
+asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
+ compat_ulong_t __user *outp,
+ compat_ulong_t __user *exp,
+ struct compat_timespec __user *tsp,
+ void __user *sig);
+asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
+ unsigned int nfds,
+ struct compat_timespec __user *tsp,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize);
+asmlinkage long compat_sys_signalfd4(int ufd,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize, int flags);
+asmlinkage long compat_sys_get_mempolicy(int __user *policy,
+ compat_ulong_t __user *nmask,
+ compat_ulong_t maxnode,
+ compat_ulong_t addr,
+ compat_ulong_t flags);
+asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
+ compat_ulong_t maxnode);
+asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
+ compat_ulong_t mode,
+ compat_ulong_t __user *nmask,
+ compat_ulong_t maxnode, compat_ulong_t flags);
+
+asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
+ char __user *optval, unsigned int optlen);
+asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
+ unsigned flags);
+asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
+ unsigned vlen, unsigned int flags);
+asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
+ unsigned int flags);
+asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
+ unsigned flags);
+asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len,
+ unsigned flags, struct sockaddr __user *addr,
+ int __user *addrlen);
+asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
+ unsigned vlen, unsigned int flags,
+ struct compat_timespec __user *timeout);
+asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
+ struct compat_timespec __user *rmtp);
+asmlinkage long compat_sys_getitimer(int which,
+ struct compat_itimerval __user *it);
+asmlinkage long compat_sys_setitimer(int which,
+ struct compat_itimerval __user *in,
+ struct compat_itimerval __user *out);
+asmlinkage long compat_sys_times(struct compat_tms __user *tbuf);
+asmlinkage long compat_sys_setrlimit(unsigned int resource,
+ struct compat_rlimit __user *rlim);
+asmlinkage long compat_sys_getrlimit(unsigned int resource,
+ struct compat_rlimit __user *rlim);
+asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru);
+asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
+ unsigned int len,
+ compat_ulong_t __user *user_mask_ptr);
+asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid,
+ unsigned int len,
+ compat_ulong_t __user *user_mask_ptr);
+asmlinkage long compat_sys_timer_create(clockid_t which_clock,
+ struct compat_sigevent __user *timer_event_spec,
+ timer_t __user *created_timer_id);
+asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags,
+ struct compat_itimerspec __user *new,
+ struct compat_itimerspec __user *old);
+asmlinkage long compat_sys_timer_gettime(timer_t timer_id,
+ struct compat_itimerspec __user *setting);
+asmlinkage long compat_sys_clock_settime(clockid_t which_clock,
+ struct compat_timespec __user *tp);
+asmlinkage long compat_sys_clock_gettime(clockid_t which_clock,
+ struct compat_timespec __user *tp);
+asmlinkage long compat_sys_clock_adjtime(clockid_t which_clock,
+ struct compat_timex __user *tp);
+asmlinkage long compat_sys_clock_getres(clockid_t which_clock,
+ struct compat_timespec __user *tp);
+asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
+ struct compat_timespec __user *rqtp,
+ struct compat_timespec __user *rmtp);
+asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese,
+ struct compat_siginfo __user *uinfo,
+ struct compat_timespec __user *uts, compat_size_t sigsetsize);
+asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset,
+ compat_size_t sigsetsize);
+asmlinkage long compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set,
+ compat_sigset_t __user *oset,
+ compat_size_t sigsetsize);
+asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset,
+ compat_size_t sigsetsize);
+#ifndef CONFIG_ODD_RT_SIGACTION
+asmlinkage long compat_sys_rt_sigaction(int,
+ const struct compat_sigaction __user *,
+ struct compat_sigaction __user *,
+ compat_size_t);
+#endif
+asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig,
+ struct compat_siginfo __user *uinfo);
+asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info);
+asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
+ compat_ulong_t arg);
+asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
+ struct compat_timespec __user *utime, u32 __user *uaddr2,
+ u32 val3);
+asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
+ char __user *optval, int __user *optlen);
+asmlinkage long compat_sys_kexec_load(compat_ulong_t entry,
+ compat_ulong_t nr_segments,
+ struct compat_kexec_segment __user *,
+ compat_ulong_t flags);
+asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
+ const struct compat_mq_attr __user *u_mqstat,
+ struct compat_mq_attr __user *u_omqstat);
+asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
+ const struct compat_sigevent __user *u_notification);
+asmlinkage long compat_sys_mq_open(const char __user *u_name,
+ int oflag, compat_mode_t mode,
+ struct compat_mq_attr __user *u_attr);
+asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes,
+ const char __user *u_msg_ptr,
+ compat_size_t msg_len, unsigned int msg_prio,
+ const struct compat_timespec __user *u_abs_timeout);
+asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes,
+ char __user *u_msg_ptr,
+ compat_size_t msg_len, unsigned int __user *u_msg_prio,
+ const struct compat_timespec __user *u_abs_timeout);
+asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
+asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args);
+
+extern ssize_t compat_rw_copy_check_uvector(int type,
+ const struct compat_iovec __user *uvector,
+ unsigned long nr_segs,
+ unsigned long fast_segs, struct iovec *fast_pointer,
+ struct iovec **ret_pointer);
+
+extern void __user *compat_alloc_user_space(unsigned long len);
+
+asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
+ const struct compat_iovec __user *lvec,
+ compat_ulong_t liovcnt, const struct compat_iovec __user *rvec,
+ compat_ulong_t riovcnt, compat_ulong_t flags);
+asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
+ const struct compat_iovec __user *lvec,
+ compat_ulong_t liovcnt, const struct compat_iovec __user *rvec,
+ compat_ulong_t riovcnt, compat_ulong_t flags);
+
+asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
+ compat_off_t __user *offset, compat_size_t count);
+asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
+ compat_loff_t __user *offset, compat_size_t count);
+asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
+ compat_stack_t __user *uoss_ptr);
+
+#ifdef __ARCH_WANT_SYS_SIGPENDING
+asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set);
+#endif
+
+#ifdef __ARCH_WANT_SYS_SIGPROCMASK
+asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *nset,
+ compat_old_sigset_t __user *oset);
+#endif
+
+int compat_restore_altstack(const compat_stack_t __user *uss);
+int __compat_save_altstack(compat_stack_t __user *, unsigned long);
+#define compat_save_altstack_ex(uss, sp) do { \
+ compat_stack_t __user *__uss = uss; \
+ struct task_struct *t = current; \
+ put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \
+ put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
+ put_user_ex(t->sas_ss_size, &__uss->ss_size); \
+} while (0);
+
+asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
+ struct compat_timespec __user *interval);
+
+asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
+ int, const char __user *);
+#else
+
+#define is_compat_task() (0)
+
+#endif /* CONFIG_COMPAT */
+#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
new file mode 100644
index 000000000..d1e49d52b
--- /dev/null
+++ b/include/linux/compiler-clang.h
@@ -0,0 +1,12 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
+#endif
+
+/* Some compiler specific definitions are overwritten here
+ * for Clang compiler
+ */
+
+#ifdef uninitialized_var
+#undef uninitialized_var
+#define uninitialized_var(x) x = *(&(x))
+#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
new file mode 100644
index 000000000..371e560d1
--- /dev/null
+++ b/include/linux/compiler-gcc.h
@@ -0,0 +1,133 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
+#endif
+
+/*
+ * Common definitions for all gcc versions go here.
+ */
+#define GCC_VERSION (__GNUC__ * 10000 \
+ + __GNUC_MINOR__ * 100 \
+ + __GNUC_PATCHLEVEL__)
+
+/* Optimization barrier */
+
+/* The "volatile" is due to gcc bugs */
+#define barrier() __asm__ __volatile__("": : :"memory")
+/*
+ * This version is i.e. to prevent dead stores elimination on @ptr
+ * where gcc and llvm may behave differently when otherwise using
+ * normal barrier(): while gcc behavior gets along with a normal
+ * barrier(), llvm needs an explicit input variable to be assumed
+ * clobbered. The issue is as follows: while the inline asm might
+ * access any memory it wants, the compiler could have fit all of
+ * @ptr into memory registers instead, and since @ptr never escaped
+ * from that, it proofed that the inline asm wasn't touching any of
+ * it. This version works well with both compilers, i.e. we're telling
+ * the compiler that the inline asm absolutely may see the contents
+ * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
+ */
+#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
+
+/*
+ * This macro obfuscates arithmetic on a variable address so that gcc
+ * shouldn't recognize the original var, and make assumptions about it.
+ *
+ * This is needed because the C standard makes it undefined to do
+ * pointer arithmetic on "objects" outside their boundaries and the
+ * gcc optimizers assume this is the case. In particular they
+ * assume such arithmetic does not wrap.
+ *
+ * A miscompilation has been observed because of this on PPC.
+ * To work around it we hide the relationship of the pointer and the object
+ * using this macro.
+ *
+ * Versions of the ppc64 compiler before 4.1 had a bug where use of
+ * RELOC_HIDE could trash r30. The bug can be worked around by changing
+ * the inline assembly constraint from =g to =r, in this particular
+ * case either is valid.
+ */
+#define RELOC_HIDE(ptr, off) \
+ ({ unsigned long __ptr; \
+ __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
+ (typeof(ptr)) (__ptr + (off)); })
+
+/* Make the optimizer believe the variable can be manipulated arbitrarily. */
+#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
+
+#ifdef __CHECKER__
+#define __must_be_array(arr) 0
+#else
+/* &a[0] degrades to a pointer: a different type from an array */
+#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+#endif
+
+/*
+ * Force always-inline if the user requests it so via the .config,
+ * or if gcc is too old:
+ */
+#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
+ !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
+# define inline inline __attribute__((always_inline)) notrace
+# define __inline__ __inline__ __attribute__((always_inline)) notrace
+# define __inline __inline __attribute__((always_inline)) notrace
+#else
+/* A lot of inline functions can cause havoc with function tracing */
+# define inline inline notrace
+# define __inline__ __inline__ notrace
+# define __inline __inline notrace
+#endif
+
+#define __deprecated __attribute__((deprecated))
+#define __packed __attribute__((packed))
+#define __weak __attribute__((weak))
+#define __alias(symbol) __attribute__((alias(#symbol)))
+
+/*
+ * it doesn't make sense on ARM (currently the only user of __naked) to trace
+ * naked functions because then mcount is called without stack and frame pointer
+ * being set up and there is no chance to restore the lr register to the value
+ * before mcount was called.
+ *
+ * The asm() bodies of naked functions often depend on standard calling conventions,
+ * therefore they must be noinline and noclone. GCC 4.[56] currently fail to enforce
+ * this, so we must do so ourselves. See GCC PR44290.
+ */
+#define __naked __attribute__((naked)) noinline __noclone notrace
+
+#define __noreturn __attribute__((noreturn))
+
+/*
+ * From the GCC manual:
+ *
+ * Many functions have no effects except the return value and their
+ * return value depends only on the parameters and/or global
+ * variables. Such a function can be subject to common subexpression
+ * elimination and loop optimization just as an arithmetic operator
+ * would be.
+ * [...]
+ */
+#define __pure __attribute__((pure))
+#define __aligned(x) __attribute__((aligned(x)))
+#define __printf(a, b) __attribute__((format(printf, a, b)))
+#define __scanf(a, b) __attribute__((format(scanf, a, b)))
+#define noinline __attribute__((noinline))
+#define __attribute_const__ __attribute__((__const__))
+#define __maybe_unused __attribute__((unused))
+#define __always_unused __attribute__((unused))
+
+#define __gcc_header(x) #x
+#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
+#define gcc_header(x) _gcc_header(x)
+#include gcc_header(__GNUC__)
+
+#if !defined(__noclone)
+#define __noclone /* not needed */
+#endif
+
+/*
+ * A trick to suppress uninitialized variable warning without generating any
+ * code
+ */
+#define uninitialized_var(x) x = x
+
+#define __always_inline inline __attribute__((always_inline))
diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h
new file mode 100644
index 000000000..7d89febe4
--- /dev/null
+++ b/include/linux/compiler-gcc3.h
@@ -0,0 +1,23 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead."
+#endif
+
+#if GCC_VERSION < 30200
+# error Sorry, your compiler is too old - please upgrade it.
+#endif
+
+#if GCC_VERSION >= 30300
+# define __used __attribute__((__used__))
+#else
+# define __used __attribute__((__unused__))
+#endif
+
+#if GCC_VERSION >= 30400
+#define __must_check __attribute__((warn_unused_result))
+#endif
+
+#ifdef CONFIG_GCOV_KERNEL
+# if GCC_VERSION < 30400
+# error "GCOV profiling support for gcc versions below 3.4 not included"
+# endif /* __GNUC_MINOR__ */
+#endif /* CONFIG_GCOV_KERNEL */
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
new file mode 100644
index 000000000..769e19864
--- /dev/null
+++ b/include/linux/compiler-gcc4.h
@@ -0,0 +1,91 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead."
+#endif
+
+/* GCC 4.1.[01] miscompiles __weak */
+#ifdef __KERNEL__
+# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
+# error Your version of gcc miscompiles the __weak directive
+# endif
+#endif
+
+#define __used __attribute__((__used__))
+#define __must_check __attribute__((warn_unused_result))
+#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
+
+#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
+# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
+#endif
+
+#if GCC_VERSION >= 40300
+/* Mark functions as cold. gcc will assume any path leading to a call
+ to them will be unlikely. This means a lot of manual unlikely()s
+ are unnecessary now for any paths leading to the usual suspects
+ like BUG(), printk(), panic() etc. [but let's keep them for now for
+ older compilers]
+
+ Early snapshots of gcc 4.3 don't support this and we can't detect this
+ in the preprocessor, but we can live with this because they're unreleased.
+ Maketime probing would be overkill here.
+
+ gcc also has a __attribute__((__hot__)) to move hot functions into
+ a special section, but I don't see any sense in this right now in
+ the kernel context */
+#define __cold __attribute__((__cold__))
+
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#ifndef __CHECKER__
+# define __compiletime_warning(message) __attribute__((warning(message)))
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* __CHECKER__ */
+#endif /* GCC_VERSION >= 40300 */
+
+#if GCC_VERSION >= 40500
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased. Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone __attribute__((__noclone__))
+
+#endif /* GCC_VERSION >= 40500 */
+
+#if GCC_VERSION >= 40600
+/*
+ * Tell the optimizer that something else uses this function or variable.
+ */
+#define __visible __attribute__((externally_visible))
+#endif
+
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#if GCC_VERSION >= 40400
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#endif
+#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
+#define __HAVE_BUILTIN_BSWAP16__
+#endif
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#if GCC_VERSION >= 40902
+#define KASAN_ABI_VERSION 3
+#endif
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
new file mode 100644
index 000000000..efee49371
--- /dev/null
+++ b/include/linux/compiler-gcc5.h
@@ -0,0 +1,67 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
+#endif
+
+#define __used __attribute__((__used__))
+#define __must_check __attribute__((warn_unused_result))
+#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
+
+/* Mark functions as cold. gcc will assume any path leading to a call
+ to them will be unlikely. This means a lot of manual unlikely()s
+ are unnecessary now for any paths leading to the usual suspects
+ like BUG(), printk(), panic() etc. [but let's keep them for now for
+ older compilers]
+
+ Early snapshots of gcc 4.3 don't support this and we can't detect this
+ in the preprocessor, but we can live with this because they're unreleased.
+ Maketime probing would be overkill here.
+
+ gcc also has a __attribute__((__hot__)) to move hot functions into
+ a special section, but I don't see any sense in this right now in
+ the kernel context */
+#define __cold __attribute__((__cold__))
+
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#ifndef __CHECKER__
+# define __compiletime_warning(message) __attribute__((warning(message)))
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* __CHECKER__ */
+
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased. Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone __attribute__((__noclone__))
+
+/*
+ * Tell the optimizer that something else uses this function or variable.
+ */
+#define __visible __attribute__((externally_visible))
+
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#define __HAVE_BUILTIN_BSWAP16__
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#define KASAN_ABI_VERSION 4
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
new file mode 100644
index 000000000..d4c71132d
--- /dev/null
+++ b/include/linux/compiler-intel.h
@@ -0,0 +1,45 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-intel.h> directly, include <linux/compiler.h> instead."
+#endif
+
+#ifdef __ECC
+
+/* Some compiler specific definitions are overwritten here
+ * for Intel ECC compiler
+ */
+
+#include <asm/intrinsics.h>
+
+/* Intel ECC compiler doesn't support gcc specific asm stmts.
+ * It uses intrinsics to do the equivalent things.
+ */
+#undef barrier
+#undef barrier_data
+#undef RELOC_HIDE
+#undef OPTIMIZER_HIDE_VAR
+
+#define barrier() __memory_barrier()
+#define barrier_data(ptr) barrier()
+
+#define RELOC_HIDE(ptr, off) \
+ ({ unsigned long __ptr; \
+ __ptr = (unsigned long) (ptr); \
+ (typeof(ptr)) (__ptr + (off)); })
+
+/* This should act as an optimization barrier on var.
+ * Given that this compiler does not have inline assembly, a compiler barrier
+ * is the best we can do.
+ */
+#define OPTIMIZER_HIDE_VAR(var) barrier()
+
+/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */
+#define __must_be_array(a) 0
+
+#endif
+
+#ifndef __HAVE_BUILTIN_BSWAP16__
+/* icc has this, but it's called _bswap16 */
+#define __HAVE_BUILTIN_BSWAP16__
+#define __builtin_bswap16 _bswap16
+#endif
+
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
new file mode 100644
index 000000000..867722591
--- /dev/null
+++ b/include/linux/compiler.h
@@ -0,0 +1,468 @@
+#ifndef __LINUX_COMPILER_H
+#define __LINUX_COMPILER_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef __CHECKER__
+# define __user __attribute__((noderef, address_space(1)))
+# define __kernel __attribute__((address_space(0)))
+# define __safe __attribute__((safe))
+# define __force __attribute__((force))
+# define __nocast __attribute__((nocast))
+# define __iomem __attribute__((noderef, address_space(2)))
+# define __must_hold(x) __attribute__((context(x,1,1)))
+# define __acquires(x) __attribute__((context(x,0,1)))
+# define __releases(x) __attribute__((context(x,1,0)))
+# define __acquire(x) __context__(x,1)
+# define __release(x) __context__(x,-1)
+# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
+# define __percpu __attribute__((noderef, address_space(3)))
+#ifdef CONFIG_SPARSE_RCU_POINTER
+# define __rcu __attribute__((noderef, address_space(4)))
+#else
+# define __rcu
+#endif
+extern void __chk_user_ptr(const volatile void __user *);
+extern void __chk_io_ptr(const volatile void __iomem *);
+#else
+# define __user
+# define __kernel
+# define __safe
+# define __force
+# define __nocast
+# define __iomem
+# define __chk_user_ptr(x) (void)0
+# define __chk_io_ptr(x) (void)0
+# define __builtin_warning(x, y...) (1)
+# define __must_hold(x)
+# define __acquires(x)
+# define __releases(x)
+# define __acquire(x) (void)0
+# define __release(x) (void)0
+# define __cond_lock(x,c) (c)
+# define __percpu
+# define __rcu
+#endif
+
+/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
+#define ___PASTE(a,b) a##b
+#define __PASTE(a,b) ___PASTE(a,b)
+
+#ifdef __KERNEL__
+
+#ifdef __GNUC__
+#include <linux/compiler-gcc.h>
+#endif
+
+#ifdef CC_USING_HOTPATCH
+#define notrace __attribute__((hotpatch(0,0)))
+#else
+#define notrace __attribute__((no_instrument_function))
+#endif
+
+/* Intel compiler defines __GNUC__. So we will overwrite implementations
+ * coming from above header files here
+ */
+#ifdef __INTEL_COMPILER
+# include <linux/compiler-intel.h>
+#endif
+
+/* Clang compiler defines __GNUC__. So we will overwrite implementations
+ * coming from above header files here
+ */
+#ifdef __clang__
+#include <linux/compiler-clang.h>
+#endif
+
+/*
+ * Generic compiler-dependent macros required for kernel
+ * build go below this comment. Actual compiler/compiler version
+ * specific implementations come from the above header files
+ */
+
+struct ftrace_branch_data {
+ const char *func;
+ const char *file;
+ unsigned line;
+ union {
+ struct {
+ unsigned long correct;
+ unsigned long incorrect;
+ };
+ struct {
+ unsigned long miss;
+ unsigned long hit;
+ };
+ unsigned long miss_hit[2];
+ };
+};
+
+/*
+ * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
+ * to disable branch tracing on a per file basis.
+ */
+#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
+ && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
+void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+
+#define likely_notrace(x) __builtin_expect(!!(x), 1)
+#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
+
+#define __branch_check__(x, expect) ({ \
+ int ______r; \
+ static struct ftrace_branch_data \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("_ftrace_annotated_branch"))) \
+ ______f = { \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ }; \
+ ______r = likely_notrace(x); \
+ ftrace_likely_update(&______f, ______r, expect); \
+ ______r; \
+ })
+
+/*
+ * Using __builtin_constant_p(x) to ignore cases where the return
+ * value is always the same. This idea is taken from a similar patch
+ * written by Daniel Walker.
+ */
+# ifndef likely
+# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
+# endif
+# ifndef unlikely
+# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
+# endif
+
+#ifdef CONFIG_PROFILE_ALL_BRANCHES
+/*
+ * "Define 'is'", Bill Clinton
+ * "Define 'if'", Steven Rostedt
+ */
+#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
+#define __trace_if(cond) \
+ if (__builtin_constant_p((cond)) ? !!(cond) : \
+ ({ \
+ int ______r; \
+ static struct ftrace_branch_data \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("_ftrace_branch"))) \
+ ______f = { \
+ .func = __func__, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ }; \
+ ______r = !!(cond); \
+ ______f.miss_hit[______r]++; \
+ ______r; \
+ }))
+#endif /* CONFIG_PROFILE_ALL_BRANCHES */
+
+#else
+# define likely(x) __builtin_expect(!!(x), 1)
+# define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
+
+/* Optimization barrier */
+#ifndef barrier
+# define barrier() __memory_barrier()
+#endif
+
+#ifndef barrier_data
+# define barrier_data(ptr) barrier()
+#endif
+
+/* Unreachable code */
+#ifndef unreachable
+# define unreachable() do { } while (1)
+#endif
+
+#ifndef RELOC_HIDE
+# define RELOC_HIDE(ptr, off) \
+ ({ unsigned long __ptr; \
+ __ptr = (unsigned long) (ptr); \
+ (typeof(ptr)) (__ptr + (off)); })
+#endif
+
+#ifndef OPTIMIZER_HIDE_VAR
+#define OPTIMIZER_HIDE_VAR(var) barrier()
+#endif
+
+/* Not-quite-unique ID. */
+#ifndef __UNIQUE_ID
+# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
+#endif
+
+#include <uapi/linux/types.h>
+
+static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
+{
+ switch (size) {
+ case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
+ case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
+ case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
+ case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
+ default:
+ barrier();
+ __builtin_memcpy((void *)res, (const void *)p, size);
+ barrier();
+ }
+}
+
+static __always_inline void __write_once_size(volatile void *p, void *res, int size)
+{
+ switch (size) {
+ case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
+ case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
+ case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
+ case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
+ default:
+ barrier();
+ __builtin_memcpy((void *)p, (const void *)res, size);
+ barrier();
+ }
+}
+
+/*
+ * Prevent the compiler from merging or refetching reads or writes. The
+ * compiler is also forbidden from reordering successive instances of
+ * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
+ * compiler is aware of some particular ordering. One way to make the
+ * compiler aware of ordering is to put the two invocations of READ_ONCE,
+ * WRITE_ONCE or ACCESS_ONCE() in different C statements.
+ *
+ * In contrast to ACCESS_ONCE these two macros will also work on aggregate
+ * data types like structs or unions. If the size of the accessed data
+ * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
+ * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
+ * compile-time warning.
+ *
+ * Their two major use cases are: (1) Mediating communication between
+ * process-level code and irq/NMI handlers, all running on the same CPU,
+ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
+ * mutilate accesses that either do not require ordering or that interact
+ * with an explicit memory barrier or atomic instruction that provides the
+ * required ordering.
+ */
+
+#define READ_ONCE(x) \
+ ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+
+#define WRITE_ONCE(x, val) \
+ ({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; })
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef __KERNEL__
+/*
+ * Allow us to mark functions as 'deprecated' and have gcc emit a nice
+ * warning for each use, in hopes of speeding the functions removal.
+ * Usage is:
+ * int __deprecated foo(void)
+ */
+#ifndef __deprecated
+# define __deprecated /* unimplemented */
+#endif
+
+#ifdef MODULE
+#define __deprecated_for_modules __deprecated
+#else
+#define __deprecated_for_modules
+#endif
+
+#ifndef __must_check
+#define __must_check
+#endif
+
+#ifndef CONFIG_ENABLE_MUST_CHECK
+#undef __must_check
+#define __must_check
+#endif
+#ifndef CONFIG_ENABLE_WARN_DEPRECATED
+#undef __deprecated
+#undef __deprecated_for_modules
+#define __deprecated
+#define __deprecated_for_modules
+#endif
+
+/*
+ * Allow us to avoid 'defined but not used' warnings on functions and data,
+ * as well as force them to be emitted to the assembly file.
+ *
+ * As of gcc 3.4, static functions that are not marked with attribute((used))
+ * may be elided from the assembly file. As of gcc 3.4, static data not so
+ * marked will not be elided, but this may change in a future gcc version.
+ *
+ * NOTE: Because distributions shipped with a backported unit-at-a-time
+ * compiler in gcc 3.3, we must define __used to be __attribute__((used))
+ * for gcc >=3.3 instead of 3.4.
+ *
+ * In prior versions of gcc, such functions and data would be emitted, but
+ * would be warned about except with attribute((unused)).
+ *
+ * Mark functions that are referenced only in inline assembly as __used so
+ * the code is emitted even though it appears to be unreferenced.
+ */
+#ifndef __used
+# define __used /* unimplemented */
+#endif
+
+#ifndef __maybe_unused
+# define __maybe_unused /* unimplemented */
+#endif
+
+#ifndef __always_unused
+# define __always_unused /* unimplemented */
+#endif
+
+#ifndef noinline
+#define noinline
+#endif
+
+/*
+ * Rather then using noinline to prevent stack consumption, use
+ * noinline_for_stack instead. For documentation reasons.
+ */
+#define noinline_for_stack noinline
+
+#ifndef __always_inline
+#define __always_inline inline
+#endif
+
+#endif /* __KERNEL__ */
+
+/*
+ * From the GCC manual:
+ *
+ * Many functions do not examine any values except their arguments,
+ * and have no effects except the return value. Basically this is
+ * just slightly more strict class than the `pure' attribute above,
+ * since function is not allowed to read global memory.
+ *
+ * Note that a function that has pointer arguments and examines the
+ * data pointed to must _not_ be declared `const'. Likewise, a
+ * function that calls a non-`const' function usually must not be
+ * `const'. It does not make sense for a `const' function to return
+ * `void'.
+ */
+#ifndef __attribute_const__
+# define __attribute_const__ /* unimplemented */
+#endif
+
+/*
+ * Tell gcc if a function is cold. The compiler will assume any path
+ * directly leading to the call is unlikely.
+ */
+
+#ifndef __cold
+#define __cold
+#endif
+
+/* Simple shorthand for a section definition */
+#ifndef __section
+# define __section(S) __attribute__ ((__section__(#S)))
+#endif
+
+#ifndef __visible
+#define __visible
+#endif
+
+/* Are two types/vars the same type (ignoring qualifiers)? */
+#ifndef __same_type
+# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+#endif
+
+/* Is this type a native word size -- useful for atomic operations */
+#ifndef __native_word
+# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+#endif
+
+/* Compile time object size, -1 for unknown */
+#ifndef __compiletime_object_size
+# define __compiletime_object_size(obj) -1
+#endif
+#ifndef __compiletime_warning
+# define __compiletime_warning(message)
+#endif
+#ifndef __compiletime_error
+# define __compiletime_error(message)
+/*
+ * Sparse complains of variable sized arrays due to the temporary variable in
+ * __compiletime_assert. Unfortunately we can't just expand it out to make
+ * sparse see a constant array size without breaking compiletime_assert on old
+ * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
+ */
+# ifndef __CHECKER__
+# define __compiletime_error_fallback(condition) \
+ do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
+# endif
+#endif
+#ifndef __compiletime_error_fallback
+# define __compiletime_error_fallback(condition) do { } while (0)
+#endif
+
+#define __compiletime_assert(condition, msg, prefix, suffix) \
+ do { \
+ bool __cond = !(condition); \
+ extern void prefix ## suffix(void) __compiletime_error(msg); \
+ if (__cond) \
+ prefix ## suffix(); \
+ __compiletime_error_fallback(__cond); \
+ } while (0)
+
+#define _compiletime_assert(condition, msg, prefix, suffix) \
+ __compiletime_assert(condition, msg, prefix, suffix)
+
+/**
+ * compiletime_assert - break build and emit msg if condition is false
+ * @condition: a compile-time constant condition to check
+ * @msg: a message to emit if condition is false
+ *
+ * In tradition of POSIX assert, this macro will break the build if the
+ * supplied condition is *false*, emitting the supplied error message if the
+ * compiler has support to do so.
+ */
+#define compiletime_assert(condition, msg) \
+ _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
+
+#define compiletime_assert_atomic_type(t) \
+ compiletime_assert(__native_word(t), \
+ "Need native word sized stores/loads for atomicity.")
+
+/*
+ * Prevent the compiler from merging or refetching accesses. The compiler
+ * is also forbidden from reordering successive instances of ACCESS_ONCE(),
+ * but only when the compiler is aware of some particular ordering. One way
+ * to make the compiler aware of ordering is to put the two invocations of
+ * ACCESS_ONCE() in different C statements.
+ *
+ * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
+ * on a union member will work as long as the size of the member matches the
+ * size of the union and the size is smaller than word size.
+ *
+ * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
+ * between process-level code and irq/NMI handlers, all running on the same CPU,
+ * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
+ * mutilate accesses that either do not require ordering or that interact
+ * with an explicit memory barrier or atomic instruction that provides the
+ * required ordering.
+ *
+ * If possible use READ_ONCE/ASSIGN_ONCE instead.
+ */
+#define __ACCESS_ONCE(x) ({ \
+ __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
+ (volatile typeof(x) *)&(x); })
+#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
+
+/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
+#ifdef CONFIG_KPROBES
+# define __kprobes __attribute__((__section__(".kprobes.text")))
+# define nokprobe_inline __always_inline
+#else
+# define __kprobes
+# define nokprobe_inline inline
+#endif
+#endif /* __LINUX_COMPILER_H */
diff --git a/include/linux/completion.h b/include/linux/completion.h
new file mode 100644
index 000000000..5d5aaae3a
--- /dev/null
+++ b/include/linux/completion.h
@@ -0,0 +1,109 @@
+#ifndef __LINUX_COMPLETION_H
+#define __LINUX_COMPLETION_H
+
+/*
+ * (C) Copyright 2001 Linus Torvalds
+ *
+ * Atomic wait-for-completion handler data structures.
+ * See kernel/sched/completion.c for details.
+ */
+
+#include <linux/wait.h>
+
+/*
+ * struct completion - structure used to maintain state for a "completion"
+ *
+ * This is the opaque structure used to maintain the state for a "completion".
+ * Completions currently use a FIFO to queue threads that have to wait for
+ * the "completion" event.
+ *
+ * See also: complete(), wait_for_completion() (and friends _timeout,
+ * _interruptible, _interruptible_timeout, and _killable), init_completion(),
+ * reinit_completion(), and macros DECLARE_COMPLETION(),
+ * DECLARE_COMPLETION_ONSTACK().
+ */
+struct completion {
+ unsigned int done;
+ wait_queue_head_t wait;
+};
+
+#define COMPLETION_INITIALIZER(work) \
+ { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+
+#define COMPLETION_INITIALIZER_ONSTACK(work) \
+ ({ init_completion(&work); work; })
+
+/**
+ * DECLARE_COMPLETION - declare and initialize a completion structure
+ * @work: identifier for the completion structure
+ *
+ * This macro declares and initializes a completion structure. Generally used
+ * for static declarations. You should use the _ONSTACK variant for automatic
+ * variables.
+ */
+#define DECLARE_COMPLETION(work) \
+ struct completion work = COMPLETION_INITIALIZER(work)
+
+/*
+ * Lockdep needs to run a non-constant initializer for on-stack
+ * completions - so we use the _ONSTACK() variant for those that
+ * are on the kernel stack:
+ */
+/**
+ * DECLARE_COMPLETION_ONSTACK - declare and initialize a completion structure
+ * @work: identifier for the completion structure
+ *
+ * This macro declares and initializes a completion structure on the kernel
+ * stack.
+ */
+#ifdef CONFIG_LOCKDEP
+# define DECLARE_COMPLETION_ONSTACK(work) \
+ struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
+#else
+# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
+#endif
+
+/**
+ * init_completion - Initialize a dynamically allocated completion
+ * @x: pointer to completion structure that is to be initialized
+ *
+ * This inline function will initialize a dynamically created completion
+ * structure.
+ */
+static inline void init_completion(struct completion *x)
+{
+ x->done = 0;
+ init_waitqueue_head(&x->wait);
+}
+
+/**
+ * reinit_completion - reinitialize a completion structure
+ * @x: pointer to completion structure that is to be reinitialized
+ *
+ * This inline function should be used to reinitialize a completion structure so it can
+ * be reused. This is especially important after complete_all() is used.
+ */
+static inline void reinit_completion(struct completion *x)
+{
+ x->done = 0;
+}
+
+extern void wait_for_completion(struct completion *);
+extern void wait_for_completion_io(struct completion *);
+extern int wait_for_completion_interruptible(struct completion *x);
+extern int wait_for_completion_killable(struct completion *x);
+extern unsigned long wait_for_completion_timeout(struct completion *x,
+ unsigned long timeout);
+extern unsigned long wait_for_completion_io_timeout(struct completion *x,
+ unsigned long timeout);
+extern long wait_for_completion_interruptible_timeout(
+ struct completion *x, unsigned long timeout);
+extern long wait_for_completion_killable_timeout(
+ struct completion *x, unsigned long timeout);
+extern bool try_wait_for_completion(struct completion *x);
+extern bool completion_done(struct completion *x);
+
+extern void complete(struct completion *);
+extern void complete_all(struct completion *);
+
+#endif
diff --git a/include/linux/component.h b/include/linux/component.h
new file mode 100644
index 000000000..c00dcc302
--- /dev/null
+++ b/include/linux/component.h
@@ -0,0 +1,39 @@
+#ifndef COMPONENT_H
+#define COMPONENT_H
+
+struct device;
+
+struct component_ops {
+ int (*bind)(struct device *, struct device *, void *);
+ void (*unbind)(struct device *, struct device *, void *);
+};
+
+int component_add(struct device *, const struct component_ops *);
+void component_del(struct device *, const struct component_ops *);
+
+int component_bind_all(struct device *, void *);
+void component_unbind_all(struct device *, void *);
+
+struct master;
+
+struct component_master_ops {
+ int (*add_components)(struct device *, struct master *);
+ int (*bind)(struct device *);
+ void (*unbind)(struct device *);
+};
+
+int component_master_add(struct device *, const struct component_master_ops *);
+void component_master_del(struct device *,
+ const struct component_master_ops *);
+
+int component_master_add_child(struct master *master,
+ int (*compare)(struct device *, void *), void *compare_data);
+
+struct component_match;
+
+int component_master_add_with_match(struct device *,
+ const struct component_master_ops *, struct component_match *);
+void component_match_add(struct device *, struct component_match **,
+ int (*compare)(struct device *, void *), void *compare_data);
+
+#endif
diff --git a/include/linux/concap.h b/include/linux/concap.h
new file mode 100644
index 000000000..977acb3d1
--- /dev/null
+++ b/include/linux/concap.h
@@ -0,0 +1,112 @@
+/* $Id: concap.h,v 1.3.2.2 2004/01/12 23:08:35 keil Exp $
+ *
+ * Copyright 1997 by Henner Eisen <eis@baty.hanse.de>
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ */
+
+#ifndef _LINUX_CONCAP_H
+#define _LINUX_CONCAP_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+/* Stuff to support encapsulation protocols genericly. The encapsulation
+ protocol is processed at the uppermost layer of the network interface.
+
+ Based on a ideas developed in a 'synchronous device' thread in the
+ linux-x25 mailing list contributed by Alan Cox, Thomasz Motylewski
+ and Jonathan Naylor.
+
+ For more documetation on this refer to Documentation/isdn/README.concap
+*/
+
+struct concap_proto_ops;
+struct concap_device_ops;
+
+/* this manages all data needed by the encapsulation protocol
+ */
+struct concap_proto{
+ struct net_device *net_dev; /* net device using our service */
+ struct concap_device_ops *dops; /* callbacks provided by device */
+ struct concap_proto_ops *pops; /* callbacks provided by us */
+ spinlock_t lock;
+ int flags;
+ void *proto_data; /* protocol specific private data, to
+ be accessed via *pops methods only*/
+ /*
+ :
+ whatever
+ :
+ */
+};
+
+/* Operations to be supported by the net device. Called by the encapsulation
+ * protocol entity. No receive method is offered because the encapsulation
+ * protocol directly calls netif_rx().
+ */
+struct concap_device_ops{
+
+ /* to request data is submitted by device*/
+ int (*data_req)(struct concap_proto *, struct sk_buff *);
+
+ /* Control methods must be set to NULL by devices which do not
+ support connection control.*/
+ /* to request a connection is set up */
+ int (*connect_req)(struct concap_proto *);
+
+ /* to request a connection is released */
+ int (*disconn_req)(struct concap_proto *);
+};
+
+/* Operations to be supported by the encapsulation protocol. Called by
+ * device driver.
+ */
+struct concap_proto_ops{
+
+ /* create a new encapsulation protocol instance of same type */
+ struct concap_proto * (*proto_new) (void);
+
+ /* delete encapsulation protocol instance and free all its resources.
+ cprot may no loger be referenced after calling this */
+ void (*proto_del)(struct concap_proto *cprot);
+
+ /* initialize the protocol's data. To be called at interface startup
+ or when the device driver resets the interface. All services of the
+ encapsulation protocol may be used after this*/
+ int (*restart)(struct concap_proto *cprot,
+ struct net_device *ndev,
+ struct concap_device_ops *dops);
+
+ /* inactivate an encapsulation protocol instance. The encapsulation
+ protocol may not call any *dops methods after this. */
+ int (*close)(struct concap_proto *cprot);
+
+ /* process a frame handed down to us by upper layer */
+ int (*encap_and_xmit)(struct concap_proto *cprot, struct sk_buff *skb);
+
+ /* to be called for each data entity received from lower layer*/
+ int (*data_ind)(struct concap_proto *cprot, struct sk_buff *skb);
+
+ /* to be called when a connection was set up/down.
+ Protocols that don't process these primitives might fill in
+ dummy methods here */
+ int (*connect_ind)(struct concap_proto *cprot);
+ int (*disconn_ind)(struct concap_proto *cprot);
+ /*
+ Some network device support functions, like net_header(), rebuild_header(),
+ and others, that depend solely on the encapsulation protocol, might
+ be provided here, too. The net device would just fill them in its
+ corresponding fields when it is opened.
+ */
+};
+
+/* dummy restart/close/connect/reset/disconn methods
+ */
+extern int concap_nop(struct concap_proto *cprot);
+
+/* dummy submit method
+ */
+extern int concap_drop_skb(struct concap_proto *cprot, struct sk_buff *skb);
+#endif
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
new file mode 100644
index 000000000..34025df61
--- /dev/null
+++ b/include/linux/configfs.h
@@ -0,0 +1,260 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * configfs.h - definitions for the device driver filesystem
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * Based on sysfs:
+ * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
+ *
+ * Based on kobject.h:
+ * Copyright (c) 2002-2003 Patrick Mochel
+ * Copyright (c) 2002-2003 Open Source Development Labs
+ *
+ * configfs Copyright (C) 2005 Oracle. All rights reserved.
+ *
+ * Please read Documentation/filesystems/configfs/configfs.txt before using
+ * the configfs interface, ESPECIALLY the parts about reference counts and
+ * item destructors.
+ */
+
+#ifndef _CONFIGFS_H_
+#define _CONFIGFS_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+
+#include <linux/atomic.h>
+
+#define CONFIGFS_ITEM_NAME_LEN 20
+
+struct module;
+
+struct configfs_item_operations;
+struct configfs_group_operations;
+struct configfs_attribute;
+struct configfs_subsystem;
+
+struct config_item {
+ char *ci_name;
+ char ci_namebuf[CONFIGFS_ITEM_NAME_LEN];
+ struct kref ci_kref;
+ struct list_head ci_entry;
+ struct config_item *ci_parent;
+ struct config_group *ci_group;
+ struct config_item_type *ci_type;
+ struct dentry *ci_dentry;
+};
+
+extern int config_item_set_name(struct config_item *, const char *, ...);
+
+static inline char *config_item_name(struct config_item * item)
+{
+ return item->ci_name;
+}
+
+extern void config_item_init(struct config_item *);
+extern void config_item_init_type_name(struct config_item *item,
+ const char *name,
+ struct config_item_type *type);
+
+extern struct config_item * config_item_get(struct config_item *);
+extern void config_item_put(struct config_item *);
+
+struct config_item_type {
+ struct module *ct_owner;
+ struct configfs_item_operations *ct_item_ops;
+ struct configfs_group_operations *ct_group_ops;
+ struct configfs_attribute **ct_attrs;
+};
+
+/**
+ * group - a group of config_items of a specific type, belonging
+ * to a specific subsystem.
+ */
+struct config_group {
+ struct config_item cg_item;
+ struct list_head cg_children;
+ struct configfs_subsystem *cg_subsys;
+ struct config_group **default_groups;
+};
+
+extern void config_group_init(struct config_group *group);
+extern void config_group_init_type_name(struct config_group *group,
+ const char *name,
+ struct config_item_type *type);
+
+static inline struct config_group *to_config_group(struct config_item *item)
+{
+ return item ? container_of(item,struct config_group,cg_item) : NULL;
+}
+
+static inline struct config_group *config_group_get(struct config_group *group)
+{
+ return group ? to_config_group(config_item_get(&group->cg_item)) : NULL;
+}
+
+static inline void config_group_put(struct config_group *group)
+{
+ config_item_put(&group->cg_item);
+}
+
+extern struct config_item *config_group_find_item(struct config_group *,
+ const char *);
+
+
+struct configfs_attribute {
+ const char *ca_name;
+ struct module *ca_owner;
+ umode_t ca_mode;
+};
+
+/*
+ * Users often need to create attribute structures for their configurable
+ * attributes, containing a configfs_attribute member and function pointers
+ * for the show() and store() operations on that attribute. If they don't
+ * need anything else on the extended attribute structure, they can use
+ * this macro to define it The argument _item is the name of the
+ * config_item structure.
+ */
+#define CONFIGFS_ATTR_STRUCT(_item) \
+struct _item##_attribute { \
+ struct configfs_attribute attr; \
+ ssize_t (*show)(struct _item *, char *); \
+ ssize_t (*store)(struct _item *, const char *, size_t); \
+}
+
+/*
+ * With the extended attribute structure, users can use this macro
+ * (similar to sysfs' __ATTR) to make defining attributes easier.
+ * An example:
+ * #define MYITEM_ATTR(_name, _mode, _show, _store) \
+ * struct myitem_attribute childless_attr_##_name = \
+ * __CONFIGFS_ATTR(_name, _mode, _show, _store)
+ */
+#define __CONFIGFS_ATTR(_name, _mode, _show, _store) \
+{ \
+ .attr = { \
+ .ca_name = __stringify(_name), \
+ .ca_mode = _mode, \
+ .ca_owner = THIS_MODULE, \
+ }, \
+ .show = _show, \
+ .store = _store, \
+}
+/* Here is a readonly version, only requiring a show() operation */
+#define __CONFIGFS_ATTR_RO(_name, _show) \
+{ \
+ .attr = { \
+ .ca_name = __stringify(_name), \
+ .ca_mode = 0444, \
+ .ca_owner = THIS_MODULE, \
+ }, \
+ .show = _show, \
+}
+
+/*
+ * With these extended attributes, the simple show_attribute() and
+ * store_attribute() operations need to call the show() and store() of the
+ * attributes. This is a common pattern, so we provide a macro to define
+ * them. The argument _item is the name of the config_item structure.
+ * This macro expects the attributes to be named "struct <name>_attribute"
+ * and the function to_<name>() to exist;
+ */
+#define CONFIGFS_ATTR_OPS(_item) \
+static ssize_t _item##_attr_show(struct config_item *item, \
+ struct configfs_attribute *attr, \
+ char *page) \
+{ \
+ struct _item *_item = to_##_item(item); \
+ struct _item##_attribute *_item##_attr = \
+ container_of(attr, struct _item##_attribute, attr); \
+ ssize_t ret = 0; \
+ \
+ if (_item##_attr->show) \
+ ret = _item##_attr->show(_item, page); \
+ return ret; \
+} \
+static ssize_t _item##_attr_store(struct config_item *item, \
+ struct configfs_attribute *attr, \
+ const char *page, size_t count) \
+{ \
+ struct _item *_item = to_##_item(item); \
+ struct _item##_attribute *_item##_attr = \
+ container_of(attr, struct _item##_attribute, attr); \
+ ssize_t ret = -EINVAL; \
+ \
+ if (_item##_attr->store) \
+ ret = _item##_attr->store(_item, page, count); \
+ return ret; \
+}
+
+/*
+ * If allow_link() exists, the item can symlink(2) out to other
+ * items. If the item is a group, it may support mkdir(2).
+ * Groups supply one of make_group() and make_item(). If the
+ * group supports make_group(), one can create group children. If it
+ * supports make_item(), one can create config_item children. make_group()
+ * and make_item() return ERR_PTR() on errors. If it has
+ * default_groups on group->default_groups, it has automatically created
+ * group children. default_groups may coexist alongsize make_group() or
+ * make_item(), but if the group wishes to have only default_groups
+ * children (disallowing mkdir(2)), it need not provide either function.
+ * If the group has commit(), it supports pending and committed (active)
+ * items.
+ */
+struct configfs_item_operations {
+ void (*release)(struct config_item *);
+ ssize_t (*show_attribute)(struct config_item *, struct configfs_attribute *,char *);
+ ssize_t (*store_attribute)(struct config_item *,struct configfs_attribute *,const char *, size_t);
+ int (*allow_link)(struct config_item *src, struct config_item *target);
+ int (*drop_link)(struct config_item *src, struct config_item *target);
+};
+
+struct configfs_group_operations {
+ struct config_item *(*make_item)(struct config_group *group, const char *name);
+ struct config_group *(*make_group)(struct config_group *group, const char *name);
+ int (*commit_item)(struct config_item *item);
+ void (*disconnect_notify)(struct config_group *group, struct config_item *item);
+ void (*drop_item)(struct config_group *group, struct config_item *item);
+};
+
+struct configfs_subsystem {
+ struct config_group su_group;
+ struct mutex su_mutex;
+};
+
+static inline struct configfs_subsystem *to_configfs_subsystem(struct config_group *group)
+{
+ return group ?
+ container_of(group, struct configfs_subsystem, su_group) :
+ NULL;
+}
+
+int configfs_register_subsystem(struct configfs_subsystem *subsys);
+void configfs_unregister_subsystem(struct configfs_subsystem *subsys);
+
+/* These functions can sleep and can alloc with GFP_KERNEL */
+/* WARNING: These cannot be called underneath configfs callbacks!! */
+int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target);
+void configfs_undepend_item(struct configfs_subsystem *subsys, struct config_item *target);
+
+#endif /* _CONFIGFS_H_ */
diff --git a/include/linux/connector.h b/include/linux/connector.h
new file mode 100644
index 000000000..f8fe8637d
--- /dev/null
+++ b/include/linux/connector.h
@@ -0,0 +1,88 @@
+/*
+ * connector.h
+ *
+ * 2004-2005 Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __CONNECTOR_H
+#define __CONNECTOR_H
+
+
+#include <linux/atomic.h>
+
+#include <linux/list.h>
+#include <linux/workqueue.h>
+
+#include <net/sock.h>
+#include <uapi/linux/connector.h>
+
+#define CN_CBQ_NAMELEN 32
+
+struct cn_queue_dev {
+ atomic_t refcnt;
+ unsigned char name[CN_CBQ_NAMELEN];
+
+ struct list_head queue_list;
+ spinlock_t queue_lock;
+
+ struct sock *nls;
+};
+
+struct cn_callback_id {
+ unsigned char name[CN_CBQ_NAMELEN];
+ struct cb_id id;
+};
+
+struct cn_callback_entry {
+ struct list_head callback_entry;
+ atomic_t refcnt;
+ struct cn_queue_dev *pdev;
+
+ struct cn_callback_id id;
+ void (*callback) (struct cn_msg *, struct netlink_skb_parms *);
+
+ u32 seq, group;
+};
+
+struct cn_dev {
+ struct cb_id id;
+
+ u32 seq, groups;
+ struct sock *nls;
+ void (*input) (struct sk_buff *skb);
+
+ struct cn_queue_dev *cbdev;
+};
+
+int cn_add_callback(struct cb_id *id, const char *name,
+ void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
+void cn_del_callback(struct cb_id *);
+int cn_netlink_send_mult(struct cn_msg *msg, u16 len, u32 portid, u32 group, gfp_t gfp_mask);
+int cn_netlink_send(struct cn_msg *msg, u32 portid, u32 group, gfp_t gfp_mask);
+
+int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
+ struct cb_id *id,
+ void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
+void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
+void cn_queue_release_callback(struct cn_callback_entry *);
+
+struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *);
+void cn_queue_free_dev(struct cn_queue_dev *dev);
+
+int cn_cb_equal(struct cb_id *, struct cb_id *);
+
+#endif /* __CONNECTOR_H */
diff --git a/include/linux/console.h b/include/linux/console.h
new file mode 100644
index 000000000..ab5d0d486
--- /dev/null
+++ b/include/linux/console.h
@@ -0,0 +1,194 @@
+/*
+ * linux/include/linux/console.h
+ *
+ * Copyright (C) 1993 Hamish Macdonald
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Changed:
+ * 10-Mar-94: Arno Griffioen: Conversion for vt100 emulator port from PC LINUX
+ */
+
+#ifndef _LINUX_CONSOLE_H_
+#define _LINUX_CONSOLE_H_ 1
+
+#include <linux/types.h>
+
+struct vc_data;
+struct console_font_op;
+struct console_font;
+struct module;
+struct tty_struct;
+
+/*
+ * this is what the terminal answers to a ESC-Z or csi0c query.
+ */
+#define VT100ID "\033[?1;2c"
+#define VT102ID "\033[?6c"
+
+struct consw {
+ struct module *owner;
+ const char *(*con_startup)(void);
+ void (*con_init)(struct vc_data *, int);
+ void (*con_deinit)(struct vc_data *);
+ void (*con_clear)(struct vc_data *, int, int, int, int);
+ void (*con_putc)(struct vc_data *, int, int, int);
+ void (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int);
+ void (*con_cursor)(struct vc_data *, int);
+ int (*con_scroll)(struct vc_data *, int, int, int, int);
+ void (*con_bmove)(struct vc_data *, int, int, int, int, int, int);
+ int (*con_switch)(struct vc_data *);
+ int (*con_blank)(struct vc_data *, int, int);
+ int (*con_font_set)(struct vc_data *, struct console_font *, unsigned);
+ int (*con_font_get)(struct vc_data *, struct console_font *);
+ int (*con_font_default)(struct vc_data *, struct console_font *, char *);
+ int (*con_font_copy)(struct vc_data *, int);
+ int (*con_resize)(struct vc_data *, unsigned int, unsigned int,
+ unsigned int);
+ int (*con_set_palette)(struct vc_data *, unsigned char *);
+ int (*con_scrolldelta)(struct vc_data *, int);
+ int (*con_set_origin)(struct vc_data *);
+ void (*con_save_screen)(struct vc_data *);
+ u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8);
+ void (*con_invert_region)(struct vc_data *, u16 *, int);
+ u16 *(*con_screen_pos)(struct vc_data *, int);
+ unsigned long (*con_getxy)(struct vc_data *, unsigned long, int *, int *);
+ /*
+ * Prepare the console for the debugger. This includes, but is not
+ * limited to, unblanking the console, loading an appropriate
+ * palette, and allowing debugger generated output.
+ */
+ int (*con_debug_enter)(struct vc_data *);
+ /*
+ * Restore the console to its pre-debug state as closely as possible.
+ */
+ int (*con_debug_leave)(struct vc_data *);
+};
+
+extern const struct consw *conswitchp;
+
+extern const struct consw dummy_con; /* dummy console buffer */
+extern const struct consw vga_con; /* VGA text console */
+extern const struct consw newport_con; /* SGI Newport console */
+extern const struct consw prom_con; /* SPARC PROM console */
+
+int con_is_bound(const struct consw *csw);
+int do_unregister_con_driver(const struct consw *csw);
+int do_take_over_console(const struct consw *sw, int first, int last, int deflt);
+void give_up_console(const struct consw *sw);
+#ifdef CONFIG_HW_CONSOLE
+int con_debug_enter(struct vc_data *vc);
+int con_debug_leave(void);
+#else
+static inline int con_debug_enter(struct vc_data *vc)
+{
+ return 0;
+}
+static inline int con_debug_leave(void)
+{
+ return 0;
+}
+#endif
+
+/* scroll */
+#define SM_UP (1)
+#define SM_DOWN (2)
+
+/* cursor */
+#define CM_DRAW (1)
+#define CM_ERASE (2)
+#define CM_MOVE (3)
+
+/*
+ * The interface for a console, or any other device that wants to capture
+ * console messages (printer driver?)
+ *
+ * If a console driver is marked CON_BOOT then it will be auto-unregistered
+ * when the first real console is registered. This is for early-printk drivers.
+ */
+
+#define CON_PRINTBUFFER (1)
+#define CON_CONSDEV (2) /* Last on the command line */
+#define CON_ENABLED (4)
+#define CON_BOOT (8)
+#define CON_ANYTIME (16) /* Safe to call when cpu is offline */
+#define CON_BRL (32) /* Used for a braille device */
+
+struct console {
+ char name[16];
+ void (*write)(struct console *, const char *, unsigned, unsigned int);
+ int (*read)(struct console *, char *, unsigned);
+ struct tty_driver *(*device)(struct console *, int *);
+ void (*unblank)(void);
+ int (*setup)(struct console *, char *);
+ int (*match)(struct console *, char *name, int idx, char *options);
+ short flags;
+ short index;
+ int cflag;
+ void *data;
+ struct console *next;
+};
+
+/*
+ * for_each_console() allows you to iterate on each console
+ */
+#define for_each_console(con) \
+ for (con = console_drivers; con != NULL; con = con->next)
+
+extern int console_set_on_cmdline;
+extern struct console *early_console;
+
+extern int add_preferred_console(char *name, int idx, char *options);
+extern void register_console(struct console *);
+extern int unregister_console(struct console *);
+extern struct console *console_drivers;
+extern void console_lock(void);
+extern int console_trylock(void);
+extern void console_unlock(void);
+extern void console_conditional_schedule(void);
+extern void console_unblank(void);
+extern struct tty_driver *console_device(int *);
+extern void console_stop(struct console *);
+extern void console_start(struct console *);
+extern int is_console_locked(void);
+extern int braille_register_console(struct console *, int index,
+ char *console_options, char *braille_options);
+extern int braille_unregister_console(struct console *);
+#ifdef CONFIG_TTY
+extern void console_sysfs_notify(void);
+#else
+static inline void console_sysfs_notify(void)
+{ }
+#endif
+extern bool console_suspend_enabled;
+
+/* Suspend and resume console messages over PM events */
+extern void suspend_console(void);
+extern void resume_console(void);
+
+int mda_console_init(void);
+void prom_con_init(void);
+
+void vcs_make_sysfs(int index);
+void vcs_remove_sysfs(int index);
+
+/* Some debug stub to catch some of the obvious races in the VT code */
+#if 1
+#define WARN_CONSOLE_UNLOCKED() WARN_ON(!is_console_locked() && !oops_in_progress)
+#else
+#define WARN_CONSOLE_UNLOCKED()
+#endif
+
+/* VESA Blanking Levels */
+#define VESA_NO_BLANKING 0
+#define VESA_VSYNC_SUSPEND 1
+#define VESA_HSYNC_SUSPEND 2
+#define VESA_POWERDOWN 3
+
+#ifdef CONFIG_VGA_CONSOLE
+extern bool vgacon_text_force(void);
+#endif
+
+#endif /* _LINUX_CONSOLE_H */
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
new file mode 100644
index 000000000..e859c98d1
--- /dev/null
+++ b/include/linux/console_struct.h
@@ -0,0 +1,140 @@
+/*
+ * console_struct.h
+ *
+ * Data structure describing single virtual console except for data
+ * used by vt.c.
+ *
+ * Fields marked with [#] must be set by the low-level driver.
+ * Fields marked with [!] can be changed by the low-level driver
+ * to achieve effects such as fast scrolling by changing the origin.
+ */
+
+#ifndef _LINUX_CONSOLE_STRUCT_H
+#define _LINUX_CONSOLE_STRUCT_H
+
+#include <linux/wait.h>
+#include <linux/vt.h>
+#include <linux/workqueue.h>
+
+struct vt_struct;
+struct uni_pagedir;
+
+#define NPAR 16
+
+struct vc_data {
+ struct tty_port port; /* Upper level data */
+
+ unsigned short vc_num; /* Console number */
+ unsigned int vc_cols; /* [#] Console size */
+ unsigned int vc_rows;
+ unsigned int vc_size_row; /* Bytes per row */
+ unsigned int vc_scan_lines; /* # of scan lines */
+ unsigned long vc_origin; /* [!] Start of real screen */
+ unsigned long vc_scr_end; /* [!] End of real screen */
+ unsigned long vc_visible_origin; /* [!] Top of visible window */
+ unsigned int vc_top, vc_bottom; /* Scrolling region */
+ const struct consw *vc_sw;
+ unsigned short *vc_screenbuf; /* In-memory character/attribute buffer */
+ unsigned int vc_screenbuf_size;
+ unsigned char vc_mode; /* KD_TEXT, ... */
+ /* attributes for all characters on screen */
+ unsigned char vc_attr; /* Current attributes */
+ unsigned char vc_def_color; /* Default colors */
+ unsigned char vc_color; /* Foreground & background */
+ unsigned char vc_s_color; /* Saved foreground & background */
+ unsigned char vc_ulcolor; /* Color for underline mode */
+ unsigned char vc_itcolor;
+ unsigned char vc_halfcolor; /* Color for half intensity mode */
+ /* cursor */
+ unsigned int vc_cursor_type;
+ unsigned short vc_complement_mask; /* [#] Xor mask for mouse pointer */
+ unsigned short vc_s_complement_mask; /* Saved mouse pointer mask */
+ unsigned int vc_x, vc_y; /* Cursor position */
+ unsigned int vc_saved_x, vc_saved_y;
+ unsigned long vc_pos; /* Cursor address */
+ /* fonts */
+ unsigned short vc_hi_font_mask; /* [#] Attribute set for upper 256 chars of font or 0 if not supported */
+ struct console_font vc_font; /* Current VC font set */
+ unsigned short vc_video_erase_char; /* Background erase character */
+ /* VT terminal data */
+ unsigned int vc_state; /* Escape sequence parser state */
+ unsigned int vc_npar,vc_par[NPAR]; /* Parameters of current escape sequence */
+ /* data for manual vt switching */
+ struct vt_mode vt_mode;
+ struct pid *vt_pid;
+ int vt_newvt;
+ wait_queue_head_t paste_wait;
+ /* mode flags */
+ unsigned int vc_charset : 1; /* Character set G0 / G1 */
+ unsigned int vc_s_charset : 1; /* Saved character set */
+ unsigned int vc_disp_ctrl : 1; /* Display chars < 32? */
+ unsigned int vc_toggle_meta : 1; /* Toggle high bit? */
+ unsigned int vc_decscnm : 1; /* Screen Mode */
+ unsigned int vc_decom : 1; /* Origin Mode */
+ unsigned int vc_decawm : 1; /* Autowrap Mode */
+ unsigned int vc_deccm : 1; /* Cursor Visible */
+ unsigned int vc_decim : 1; /* Insert Mode */
+ unsigned int vc_deccolm : 1; /* 80/132 Column Mode */
+ /* attribute flags */
+ unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */
+ unsigned int vc_italic:1;
+ unsigned int vc_underline : 1;
+ unsigned int vc_blink : 1;
+ unsigned int vc_reverse : 1;
+ unsigned int vc_s_intensity : 2; /* saved rendition */
+ unsigned int vc_s_italic:1;
+ unsigned int vc_s_underline : 1;
+ unsigned int vc_s_blink : 1;
+ unsigned int vc_s_reverse : 1;
+ /* misc */
+ unsigned int vc_ques : 1;
+ unsigned int vc_need_wrap : 1;
+ unsigned int vc_can_do_color : 1;
+ unsigned int vc_report_mouse : 2;
+ unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */
+ unsigned char vc_utf_count;
+ int vc_utf_char;
+ unsigned int vc_tab_stop[8]; /* Tab stops. 256 columns. */
+ unsigned char vc_palette[16*3]; /* Colour palette for VGA+ */
+ unsigned short * vc_translate;
+ unsigned char vc_G0_charset;
+ unsigned char vc_G1_charset;
+ unsigned char vc_saved_G0;
+ unsigned char vc_saved_G1;
+ unsigned int vc_resize_user; /* resize request from user */
+ unsigned int vc_bell_pitch; /* Console bell pitch */
+ unsigned int vc_bell_duration; /* Console bell duration */
+ struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */
+ struct uni_pagedir *vc_uni_pagedir;
+ struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
+ bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
+ /* additional information is in vt_kern.h */
+};
+
+struct vc {
+ struct vc_data *d;
+ struct work_struct SAK_work;
+
+ /* might add scrmem, vt_struct, kbd at some time,
+ to have everything in one place - the disadvantage
+ would be that vc_cons etc can no longer be static */
+};
+
+extern struct vc vc_cons [MAX_NR_CONSOLES];
+extern void vc_SAK(struct work_struct *work);
+
+#define CUR_DEF 0
+#define CUR_NONE 1
+#define CUR_UNDERLINE 2
+#define CUR_LOWER_THIRD 3
+#define CUR_LOWER_HALF 4
+#define CUR_TWO_THIRDS 5
+#define CUR_BLOCK 6
+#define CUR_HWMASK 0x0f
+#define CUR_SWMASK 0xfff0
+
+#define CUR_DEFAULT CUR_UNDERLINE
+
+#define CON_IS_VISIBLE(conp) (*conp->vc_display_fg == conp)
+
+#endif /* _LINUX_CONSOLE_STRUCT_H */
diff --git a/include/linux/consolemap.h b/include/linux/consolemap.h
new file mode 100644
index 000000000..c4811da13
--- /dev/null
+++ b/include/linux/consolemap.h
@@ -0,0 +1,34 @@
+/*
+ * consolemap.h
+ *
+ * Interface between console.c, selection.c and consolemap.c
+ */
+#ifndef __LINUX_CONSOLEMAP_H__
+#define __LINUX_CONSOLEMAP_H__
+
+#define LAT1_MAP 0
+#define GRAF_MAP 1
+#define IBMPC_MAP 2
+#define USER_MAP 3
+
+#include <linux/types.h>
+
+#ifdef CONFIG_CONSOLE_TRANSLATIONS
+struct vc_data;
+
+extern u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode);
+extern unsigned short *set_translate(int m, struct vc_data *vc);
+extern int conv_uni_to_pc(struct vc_data *conp, long ucs);
+extern u32 conv_8bit_to_uni(unsigned char c);
+extern int conv_uni_to_8bit(u32 uni);
+void console_map_init(void);
+#else
+#define inverse_translate(conp, glyph, uni) ((uint16_t)glyph)
+#define set_translate(m, vc) ((unsigned short *)NULL)
+#define conv_uni_to_pc(conp, ucs) ((int) (ucs > 0xff ? -1: ucs))
+#define conv_8bit_to_uni(c) ((uint32_t)(c))
+#define conv_uni_to_8bit(c) ((int) ((c) & 0xff))
+#define console_map_init(c) do { ; } while (0)
+#endif /* CONFIG_CONSOLE_TRANSLATIONS */
+
+#endif /* __LINUX_CONSOLEMAP_H__ */
diff --git a/include/linux/container.h b/include/linux/container.h
new file mode 100644
index 000000000..3c03e6fd2
--- /dev/null
+++ b/include/linux/container.h
@@ -0,0 +1,25 @@
+/*
+ * Definitions for container bus type.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+
+/* drivers/base/power/container.c */
+extern struct bus_type container_subsys;
+
+struct container_dev {
+ struct device dev;
+ int (*offline)(struct container_dev *cdev);
+};
+
+static inline struct container_dev *to_container_dev(struct device *dev)
+{
+ return container_of(dev, struct container_dev, dev);
+}
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
new file mode 100644
index 000000000..282183825
--- /dev/null
+++ b/include/linux/context_tracking.h
@@ -0,0 +1,120 @@
+#ifndef _LINUX_CONTEXT_TRACKING_H
+#define _LINUX_CONTEXT_TRACKING_H
+
+#include <linux/sched.h>
+#include <linux/vtime.h>
+#include <linux/context_tracking_state.h>
+#include <asm/ptrace.h>
+
+
+#ifdef CONFIG_CONTEXT_TRACKING
+extern void context_tracking_cpu_set(int cpu);
+
+extern void context_tracking_enter(enum ctx_state state);
+extern void context_tracking_exit(enum ctx_state state);
+extern void context_tracking_user_enter(void);
+extern void context_tracking_user_exit(void);
+extern void __context_tracking_task_switch(struct task_struct *prev,
+ struct task_struct *next);
+
+static inline void user_enter(void)
+{
+ if (context_tracking_is_enabled())
+ context_tracking_user_enter();
+
+}
+static inline void user_exit(void)
+{
+ if (context_tracking_is_enabled())
+ context_tracking_user_exit();
+}
+
+static inline enum ctx_state exception_enter(void)
+{
+ enum ctx_state prev_ctx;
+
+ if (!context_tracking_is_enabled())
+ return 0;
+
+ prev_ctx = this_cpu_read(context_tracking.state);
+ if (prev_ctx != CONTEXT_KERNEL)
+ context_tracking_exit(prev_ctx);
+
+ return prev_ctx;
+}
+
+static inline void exception_exit(enum ctx_state prev_ctx)
+{
+ if (context_tracking_is_enabled()) {
+ if (prev_ctx != CONTEXT_KERNEL)
+ context_tracking_enter(prev_ctx);
+ }
+}
+
+static inline void context_tracking_task_switch(struct task_struct *prev,
+ struct task_struct *next)
+{
+ if (context_tracking_is_enabled())
+ __context_tracking_task_switch(prev, next);
+}
+#else
+static inline void user_enter(void) { }
+static inline void user_exit(void) { }
+static inline enum ctx_state exception_enter(void) { return 0; }
+static inline void exception_exit(enum ctx_state prev_ctx) { }
+static inline void context_tracking_task_switch(struct task_struct *prev,
+ struct task_struct *next) { }
+#endif /* !CONFIG_CONTEXT_TRACKING */
+
+
+#ifdef CONFIG_CONTEXT_TRACKING_FORCE
+extern void context_tracking_init(void);
+#else
+static inline void context_tracking_init(void) { }
+#endif /* CONFIG_CONTEXT_TRACKING_FORCE */
+
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+static inline void guest_enter(void)
+{
+ if (vtime_accounting_enabled())
+ vtime_guest_enter(current);
+ else
+ current->flags |= PF_VCPU;
+
+ if (context_tracking_is_enabled())
+ context_tracking_enter(CONTEXT_GUEST);
+}
+
+static inline void guest_exit(void)
+{
+ if (context_tracking_is_enabled())
+ context_tracking_exit(CONTEXT_GUEST);
+
+ if (vtime_accounting_enabled())
+ vtime_guest_exit(current);
+ else
+ current->flags &= ~PF_VCPU;
+}
+
+#else
+static inline void guest_enter(void)
+{
+ /*
+ * This is running in ioctl context so its safe
+ * to assume that it's the stime pending cputime
+ * to flush.
+ */
+ vtime_account_system(current);
+ current->flags |= PF_VCPU;
+}
+
+static inline void guest_exit(void)
+{
+ /* Flush the guest cputime we spent on the guest */
+ vtime_account_system(current);
+ current->flags &= ~PF_VCPU;
+}
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
+
+#endif
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
new file mode 100644
index 000000000..6b7b96a32
--- /dev/null
+++ b/include/linux/context_tracking_state.h
@@ -0,0 +1,47 @@
+#ifndef _LINUX_CONTEXT_TRACKING_STATE_H
+#define _LINUX_CONTEXT_TRACKING_STATE_H
+
+#include <linux/percpu.h>
+#include <linux/static_key.h>
+
+struct context_tracking {
+ /*
+ * When active is false, probes are unset in order
+ * to minimize overhead: TIF flags are cleared
+ * and calls to user_enter/exit are ignored. This
+ * may be further optimized using static keys.
+ */
+ bool active;
+ enum ctx_state {
+ CONTEXT_KERNEL = 0,
+ CONTEXT_USER,
+ CONTEXT_GUEST,
+ } state;
+};
+
+#ifdef CONFIG_CONTEXT_TRACKING
+extern struct static_key context_tracking_enabled;
+DECLARE_PER_CPU(struct context_tracking, context_tracking);
+
+static inline bool context_tracking_is_enabled(void)
+{
+ return static_key_false(&context_tracking_enabled);
+}
+
+static inline bool context_tracking_cpu_is_enabled(void)
+{
+ return __this_cpu_read(context_tracking.active);
+}
+
+static inline bool context_tracking_in_user(void)
+{
+ return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
+}
+#else
+static inline bool context_tracking_in_user(void) { return false; }
+static inline bool context_tracking_active(void) { return false; }
+static inline bool context_tracking_is_enabled(void) { return false; }
+static inline bool context_tracking_cpu_is_enabled(void) { return false; }
+#endif /* CONFIG_CONTEXT_TRACKING */
+
+#endif
diff --git a/include/linux/cordic.h b/include/linux/cordic.h
new file mode 100644
index 000000000..cf68ca4a5
--- /dev/null
+++ b/include/linux/cordic.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __CORDIC_H_
+#define __CORDIC_H_
+
+#include <linux/types.h>
+
+/**
+ * struct cordic_iq - i/q coordinate.
+ *
+ * @i: real part of coordinate (in phase).
+ * @q: imaginary part of coordinate (quadrature).
+ */
+struct cordic_iq {
+ s32 i;
+ s32 q;
+};
+
+/**
+ * cordic_calc_iq() - calculates the i/q coordinate for given angle.
+ *
+ * @theta: angle in degrees for which i/q coordinate is to be calculated.
+ * @coord: function output parameter holding the i/q coordinate.
+ *
+ * The function calculates the i/q coordinate for a given angle using the
+ * CORDIC algorithm. The coordinate consists of a real (i) and an
+ * imaginary (q) part. The real part is essentially the cosine of the
+ * angle and the imaginary part is the sine of the angle. The returned
+ * values are scaled by 2^16 for precision. The range for theta is
+ * for -180 degrees to +180 degrees. Passed values outside this range are
+ * converted before doing the actual calculation.
+ */
+struct cordic_iq cordic_calc_iq(s32 theta);
+
+#endif /* __CORDIC_H_ */
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
new file mode 100644
index 000000000..d016a121a
--- /dev/null
+++ b/include/linux/coredump.h
@@ -0,0 +1,23 @@
+#ifndef _LINUX_COREDUMP_H
+#define _LINUX_COREDUMP_H
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <asm/siginfo.h>
+
+/*
+ * These are the only things you should do on a core-file: use only these
+ * functions to write out all the necessary info.
+ */
+struct coredump_params;
+extern int dump_skip(struct coredump_params *cprm, size_t nr);
+extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
+extern int dump_align(struct coredump_params *cprm, int align);
+#ifdef CONFIG_COREDUMP
+extern void do_coredump(const siginfo_t *siginfo);
+#else
+static inline void do_coredump(const siginfo_t *siginfo) {}
+#endif
+
+#endif /* _LINUX_COREDUMP_H */
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
new file mode 100644
index 000000000..3486b9082
--- /dev/null
+++ b/include/linux/coresight.h
@@ -0,0 +1,251 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_CORESIGHT_H
+#define _LINUX_CORESIGHT_H
+
+#include <linux/device.h>
+
+/* Peripheral id registers (0xFD0-0xFEC) */
+#define CORESIGHT_PERIPHIDR4 0xfd0
+#define CORESIGHT_PERIPHIDR5 0xfd4
+#define CORESIGHT_PERIPHIDR6 0xfd8
+#define CORESIGHT_PERIPHIDR7 0xfdC
+#define CORESIGHT_PERIPHIDR0 0xfe0
+#define CORESIGHT_PERIPHIDR1 0xfe4
+#define CORESIGHT_PERIPHIDR2 0xfe8
+#define CORESIGHT_PERIPHIDR3 0xfeC
+/* Component id registers (0xFF0-0xFFC) */
+#define CORESIGHT_COMPIDR0 0xff0
+#define CORESIGHT_COMPIDR1 0xff4
+#define CORESIGHT_COMPIDR2 0xff8
+#define CORESIGHT_COMPIDR3 0xffC
+
+#define ETM_ARCH_V3_3 0x23
+#define ETM_ARCH_V3_5 0x25
+#define PFT_ARCH_V1_0 0x30
+#define PFT_ARCH_V1_1 0x31
+
+#define CORESIGHT_UNLOCK 0xc5acce55
+
+extern struct bus_type coresight_bustype;
+
+enum coresight_dev_type {
+ CORESIGHT_DEV_TYPE_NONE,
+ CORESIGHT_DEV_TYPE_SINK,
+ CORESIGHT_DEV_TYPE_LINK,
+ CORESIGHT_DEV_TYPE_LINKSINK,
+ CORESIGHT_DEV_TYPE_SOURCE,
+};
+
+enum coresight_dev_subtype_sink {
+ CORESIGHT_DEV_SUBTYPE_SINK_NONE,
+ CORESIGHT_DEV_SUBTYPE_SINK_PORT,
+ CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
+};
+
+enum coresight_dev_subtype_link {
+ CORESIGHT_DEV_SUBTYPE_LINK_NONE,
+ CORESIGHT_DEV_SUBTYPE_LINK_MERG,
+ CORESIGHT_DEV_SUBTYPE_LINK_SPLIT,
+ CORESIGHT_DEV_SUBTYPE_LINK_FIFO,
+};
+
+enum coresight_dev_subtype_source {
+ CORESIGHT_DEV_SUBTYPE_SOURCE_NONE,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
+ CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
+};
+
+/**
+ * struct coresight_dev_subtype - further characterisation of a type
+ * @sink_subtype: type of sink this component is, as defined
+ by @coresight_dev_subtype_sink.
+ * @link_subtype: type of link this component is, as defined
+ by @coresight_dev_subtype_link.
+ * @source_subtype: type of source this component is, as defined
+ by @coresight_dev_subtype_source.
+ */
+struct coresight_dev_subtype {
+ enum coresight_dev_subtype_sink sink_subtype;
+ enum coresight_dev_subtype_link link_subtype;
+ enum coresight_dev_subtype_source source_subtype;
+};
+
+/**
+ * struct coresight_platform_data - data harvested from the DT specification
+ * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs.
+ * @name: name of the component as shown under sysfs.
+ * @nr_inport: number of input ports for this component.
+ * @outports: list of remote endpoint port number.
+ * @child_names:name of all child components connected to this device.
+ * @child_ports:child component port number the current component is
+ connected to.
+ * @nr_outport: number of output ports for this component.
+ * @clk: The clock this component is associated to.
+ */
+struct coresight_platform_data {
+ int cpu;
+ const char *name;
+ int nr_inport;
+ int *outports;
+ const char **child_names;
+ int *child_ports;
+ int nr_outport;
+ struct clk *clk;
+};
+
+/**
+ * struct coresight_desc - description of a component required from drivers
+ * @type: as defined by @coresight_dev_type.
+ * @subtype: as defined by @coresight_dev_subtype.
+ * @ops: generic operations for this component, as defined
+ by @coresight_ops.
+ * @pdata: platform data collected from DT.
+ * @dev: The device entity associated to this component.
+ * @groups: operations specific to this component. These will end up
+ in the component's sysfs sub-directory.
+ */
+struct coresight_desc {
+ enum coresight_dev_type type;
+ struct coresight_dev_subtype subtype;
+ const struct coresight_ops *ops;
+ struct coresight_platform_data *pdata;
+ struct device *dev;
+ const struct attribute_group **groups;
+};
+
+/**
+ * struct coresight_connection - representation of a single connection
+ * @outport: a connection's output port number.
+ * @chid_name: remote component's name.
+ * @child_port: remote component's port number @output is connected to.
+ * @child_dev: a @coresight_device representation of the component
+ connected to @outport.
+ */
+struct coresight_connection {
+ int outport;
+ const char *child_name;
+ int child_port;
+ struct coresight_device *child_dev;
+};
+
+/**
+ * struct coresight_device - representation of a device as used by the framework
+ * @conns: array of coresight_connections associated to this component.
+ * @nr_inport: number of input port associated to this component.
+ * @nr_outport: number of output port associated to this component.
+ * @type: as defined by @coresight_dev_type.
+ * @subtype: as defined by @coresight_dev_subtype.
+ * @ops: generic operations for this component, as defined
+ by @coresight_ops.
+ * @dev: The device entity associated to this component.
+ * @refcnt: keep track of what is in use.
+ * @path_link: link of current component into the path being enabled.
+ * @orphan: true if the component has connections that haven't been linked.
+ * @enable: 'true' if component is currently part of an active path.
+ * @activated: 'true' only if a _sink_ has been activated. A sink can be
+ activated but not yet enabled. Enabling for a _sink_
+ happens when a source has been selected for that it.
+ */
+struct coresight_device {
+ struct coresight_connection *conns;
+ int nr_inport;
+ int nr_outport;
+ enum coresight_dev_type type;
+ struct coresight_dev_subtype subtype;
+ const struct coresight_ops *ops;
+ struct device dev;
+ atomic_t *refcnt;
+ struct list_head path_link;
+ bool orphan;
+ bool enable; /* true only if configured as part of a path */
+ bool activated; /* true only if a sink is part of a path */
+};
+
+#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
+
+#define source_ops(csdev) csdev->ops->source_ops
+#define sink_ops(csdev) csdev->ops->sink_ops
+#define link_ops(csdev) csdev->ops->link_ops
+
+/**
+ * struct coresight_ops_sink - basic operations for a sink
+ * Operations available for sinks
+ * @enable: enables the sink.
+ * @disable: disables the sink.
+ */
+struct coresight_ops_sink {
+ int (*enable)(struct coresight_device *csdev);
+ void (*disable)(struct coresight_device *csdev);
+};
+
+/**
+ * struct coresight_ops_link - basic operations for a link
+ * Operations available for links.
+ * @enable: enables flow between iport and oport.
+ * @disable: disables flow between iport and oport.
+ */
+struct coresight_ops_link {
+ int (*enable)(struct coresight_device *csdev, int iport, int oport);
+ void (*disable)(struct coresight_device *csdev, int iport, int oport);
+};
+
+/**
+ * struct coresight_ops_source - basic operations for a source
+ * Operations available for sources.
+ * @trace_id: returns the value of the component's trace ID as known
+ to the HW.
+ * @enable: enables tracing from a source.
+ * @disable: disables tracing for a source.
+ */
+struct coresight_ops_source {
+ int (*trace_id)(struct coresight_device *csdev);
+ int (*enable)(struct coresight_device *csdev);
+ void (*disable)(struct coresight_device *csdev);
+};
+
+struct coresight_ops {
+ const struct coresight_ops_sink *sink_ops;
+ const struct coresight_ops_link *link_ops;
+ const struct coresight_ops_source *source_ops;
+};
+
+#ifdef CONFIG_CORESIGHT
+extern struct coresight_device *
+coresight_register(struct coresight_desc *desc);
+extern void coresight_unregister(struct coresight_device *csdev);
+extern int coresight_enable(struct coresight_device *csdev);
+extern void coresight_disable(struct coresight_device *csdev);
+extern int coresight_timeout(void __iomem *addr, u32 offset,
+ int position, int value);
+#else
+static inline struct coresight_device *
+coresight_register(struct coresight_desc *desc) { return NULL; }
+static inline void coresight_unregister(struct coresight_device *csdev) {}
+static inline int
+coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
+static inline void coresight_disable(struct coresight_device *csdev) {}
+static inline int coresight_timeout(void __iomem *addr, u32 offset,
+ int position, int value) { return 1; }
+#endif
+
+#ifdef CONFIG_OF
+extern struct coresight_platform_data *of_get_coresight_platform_data(
+ struct device *dev, struct device_node *node);
+#else
+static inline struct coresight_platform_data *of_get_coresight_platform_data(
+ struct device *dev, struct device_node *node) { return NULL; }
+#endif
+
+#endif
diff --git a/include/linux/cper.h b/include/linux/cper.h
new file mode 100644
index 000000000..76abba4b2
--- /dev/null
+++ b/include/linux/cper.h
@@ -0,0 +1,433 @@
+/*
+ * UEFI Common Platform Error Record
+ *
+ * Copyright (C) 2010, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef LINUX_CPER_H
+#define LINUX_CPER_H
+
+#include <linux/uuid.h>
+#include <linux/trace_seq.h>
+
+/* CPER record signature and the size */
+#define CPER_SIG_RECORD "CPER"
+#define CPER_SIG_SIZE 4
+/* Used in signature_end field in struct cper_record_header */
+#define CPER_SIG_END 0xffffffff
+
+/*
+ * CPER record header revision, used in revision field in struct
+ * cper_record_header
+ */
+#define CPER_RECORD_REV 0x0100
+
+/*
+ * CPER record length contains the CPER fields which are relevant for further
+ * handling of a memory error in userspace (we don't carry all the fields
+ * defined in the UEFI spec because some of them don't make any sense.)
+ * Currently, a length of 256 should be more than enough.
+ */
+#define CPER_REC_LEN 256
+/*
+ * Severity difinition for error_severity in struct cper_record_header
+ * and section_severity in struct cper_section_descriptor
+ */
+enum {
+ CPER_SEV_RECOVERABLE,
+ CPER_SEV_FATAL,
+ CPER_SEV_CORRECTED,
+ CPER_SEV_INFORMATIONAL,
+};
+
+/*
+ * Validation bits difinition for validation_bits in struct
+ * cper_record_header. If set, corresponding fields in struct
+ * cper_record_header contain valid information.
+ *
+ * corresponds platform_id
+ */
+#define CPER_VALID_PLATFORM_ID 0x0001
+/* corresponds timestamp */
+#define CPER_VALID_TIMESTAMP 0x0002
+/* corresponds partition_id */
+#define CPER_VALID_PARTITION_ID 0x0004
+
+/*
+ * Notification type used to generate error record, used in
+ * notification_type in struct cper_record_header
+ *
+ * Corrected Machine Check
+ */
+#define CPER_NOTIFY_CMC \
+ UUID_LE(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \
+ 0xEB, 0xD4, 0xF8, 0x90)
+/* Corrected Platform Error */
+#define CPER_NOTIFY_CPE \
+ UUID_LE(0x4E292F96, 0xD843, 0x4a55, 0xA8, 0xC2, 0xD4, 0x81, \
+ 0xF2, 0x7E, 0xBE, 0xEE)
+/* Machine Check Exception */
+#define CPER_NOTIFY_MCE \
+ UUID_LE(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \
+ 0xE1, 0x49, 0x13, 0xBB)
+/* PCI Express Error */
+#define CPER_NOTIFY_PCIE \
+ UUID_LE(0xCF93C01F, 0x1A16, 0x4dfc, 0xB8, 0xBC, 0x9C, 0x4D, \
+ 0xAF, 0x67, 0xC1, 0x04)
+/* INIT Record (for IPF) */
+#define CPER_NOTIFY_INIT \
+ UUID_LE(0xCC5263E8, 0x9308, 0x454a, 0x89, 0xD0, 0x34, 0x0B, \
+ 0xD3, 0x9B, 0xC9, 0x8E)
+/* Non-Maskable Interrupt */
+#define CPER_NOTIFY_NMI \
+ UUID_LE(0x5BAD89FF, 0xB7E6, 0x42c9, 0x81, 0x4A, 0xCF, 0x24, \
+ 0x85, 0xD6, 0xE9, 0x8A)
+/* BOOT Error Record */
+#define CPER_NOTIFY_BOOT \
+ UUID_LE(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \
+ 0xD4, 0x64, 0xB3, 0x8F)
+/* DMA Remapping Error */
+#define CPER_NOTIFY_DMAR \
+ UUID_LE(0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, \
+ 0x72, 0x2D, 0xEB, 0x41)
+
+/*
+ * Flags bits definitions for flags in struct cper_record_header
+ * If set, the error has been recovered
+ */
+#define CPER_HW_ERROR_FLAGS_RECOVERED 0x1
+/* If set, the error is for previous boot */
+#define CPER_HW_ERROR_FLAGS_PREVERR 0x2
+/* If set, the error is injected for testing */
+#define CPER_HW_ERROR_FLAGS_SIMULATED 0x4
+
+/*
+ * CPER section header revision, used in revision field in struct
+ * cper_section_descriptor
+ */
+#define CPER_SEC_REV 0x0100
+
+/*
+ * Validation bits difinition for validation_bits in struct
+ * cper_section_descriptor. If set, corresponding fields in struct
+ * cper_section_descriptor contain valid information.
+ *
+ * corresponds fru_id
+ */
+#define CPER_SEC_VALID_FRU_ID 0x1
+/* corresponds fru_text */
+#define CPER_SEC_VALID_FRU_TEXT 0x2
+
+/*
+ * Flags bits definitions for flags in struct cper_section_descriptor
+ *
+ * If set, the section is associated with the error condition
+ * directly, and should be focused on
+ */
+#define CPER_SEC_PRIMARY 0x0001
+/*
+ * If set, the error was not contained within the processor or memory
+ * hierarchy and the error may have propagated to persistent storage
+ * or network
+ */
+#define CPER_SEC_CONTAINMENT_WARNING 0x0002
+/* If set, the component must be re-initialized or re-enabled prior to use */
+#define CPER_SEC_RESET 0x0004
+/* If set, Linux may choose to discontinue use of the resource */
+#define CPER_SEC_ERROR_THRESHOLD_EXCEEDED 0x0008
+/*
+ * If set, resource could not be queried for error information due to
+ * conflicts with other system software or resources. Some fields of
+ * the section will be invalid
+ */
+#define CPER_SEC_RESOURCE_NOT_ACCESSIBLE 0x0010
+/*
+ * If set, action has been taken to ensure error containment (such as
+ * poisoning data), but the error has not been fully corrected and the
+ * data has not been consumed. Linux may choose to take further
+ * corrective action before the data is consumed
+ */
+#define CPER_SEC_LATENT_ERROR 0x0020
+
+/*
+ * Section type definitions, used in section_type field in struct
+ * cper_section_descriptor
+ *
+ * Processor Generic
+ */
+#define CPER_SEC_PROC_GENERIC \
+ UUID_LE(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, 0xF1, \
+ 0x93, 0xC4, 0xF3, 0xDB)
+/* Processor Specific: X86/X86_64 */
+#define CPER_SEC_PROC_IA \
+ UUID_LE(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \
+ 0x24, 0x2B, 0x6E, 0x1D)
+/* Processor Specific: IA64 */
+#define CPER_SEC_PROC_IPF \
+ UUID_LE(0xE429FAF1, 0x3CB7, 0x11D4, 0x0B, 0xCA, 0x07, 0x00, \
+ 0x80, 0xC7, 0x3C, 0x88, 0x81)
+/* Platform Memory */
+#define CPER_SEC_PLATFORM_MEM \
+ UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \
+ 0xED, 0x7C, 0x83, 0xB1)
+#define CPER_SEC_PCIE \
+ UUID_LE(0xD995E954, 0xBBC1, 0x430F, 0xAD, 0x91, 0xB4, 0x4D, \
+ 0xCB, 0x3C, 0x6F, 0x35)
+/* Firmware Error Record Reference */
+#define CPER_SEC_FW_ERR_REC_REF \
+ UUID_LE(0x81212A96, 0x09ED, 0x4996, 0x94, 0x71, 0x8D, 0x72, \
+ 0x9C, 0x8E, 0x69, 0xED)
+/* PCI/PCI-X Bus */
+#define CPER_SEC_PCI_X_BUS \
+ UUID_LE(0xC5753963, 0x3B84, 0x4095, 0xBF, 0x78, 0xED, 0xDA, \
+ 0xD3, 0xF9, 0xC9, 0xDD)
+/* PCI Component/Device */
+#define CPER_SEC_PCI_DEV \
+ UUID_LE(0xEB5E4685, 0xCA66, 0x4769, 0xB6, 0xA2, 0x26, 0x06, \
+ 0x8B, 0x00, 0x13, 0x26)
+#define CPER_SEC_DMAR_GENERIC \
+ UUID_LE(0x5B51FEF7, 0xC79D, 0x4434, 0x8F, 0x1B, 0xAA, 0x62, \
+ 0xDE, 0x3E, 0x2C, 0x64)
+/* Intel VT for Directed I/O specific DMAr */
+#define CPER_SEC_DMAR_VT \
+ UUID_LE(0x71761D37, 0x32B2, 0x45cd, 0xA7, 0xD0, 0xB0, 0xFE, \
+ 0xDD, 0x93, 0xE8, 0xCF)
+/* IOMMU specific DMAr */
+#define CPER_SEC_DMAR_IOMMU \
+ UUID_LE(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \
+ 0xDF, 0xAA, 0x84, 0xEC)
+
+#define CPER_PROC_VALID_TYPE 0x0001
+#define CPER_PROC_VALID_ISA 0x0002
+#define CPER_PROC_VALID_ERROR_TYPE 0x0004
+#define CPER_PROC_VALID_OPERATION 0x0008
+#define CPER_PROC_VALID_FLAGS 0x0010
+#define CPER_PROC_VALID_LEVEL 0x0020
+#define CPER_PROC_VALID_VERSION 0x0040
+#define CPER_PROC_VALID_BRAND_INFO 0x0080
+#define CPER_PROC_VALID_ID 0x0100
+#define CPER_PROC_VALID_TARGET_ADDRESS 0x0200
+#define CPER_PROC_VALID_REQUESTOR_ID 0x0400
+#define CPER_PROC_VALID_RESPONDER_ID 0x0800
+#define CPER_PROC_VALID_IP 0x1000
+
+#define CPER_MEM_VALID_ERROR_STATUS 0x0001
+#define CPER_MEM_VALID_PA 0x0002
+#define CPER_MEM_VALID_PA_MASK 0x0004
+#define CPER_MEM_VALID_NODE 0x0008
+#define CPER_MEM_VALID_CARD 0x0010
+#define CPER_MEM_VALID_MODULE 0x0020
+#define CPER_MEM_VALID_BANK 0x0040
+#define CPER_MEM_VALID_DEVICE 0x0080
+#define CPER_MEM_VALID_ROW 0x0100
+#define CPER_MEM_VALID_COLUMN 0x0200
+#define CPER_MEM_VALID_BIT_POSITION 0x0400
+#define CPER_MEM_VALID_REQUESTOR_ID 0x0800
+#define CPER_MEM_VALID_RESPONDER_ID 0x1000
+#define CPER_MEM_VALID_TARGET_ID 0x2000
+#define CPER_MEM_VALID_ERROR_TYPE 0x4000
+#define CPER_MEM_VALID_RANK_NUMBER 0x8000
+#define CPER_MEM_VALID_CARD_HANDLE 0x10000
+#define CPER_MEM_VALID_MODULE_HANDLE 0x20000
+
+#define CPER_PCIE_VALID_PORT_TYPE 0x0001
+#define CPER_PCIE_VALID_VERSION 0x0002
+#define CPER_PCIE_VALID_COMMAND_STATUS 0x0004
+#define CPER_PCIE_VALID_DEVICE_ID 0x0008
+#define CPER_PCIE_VALID_SERIAL_NUMBER 0x0010
+#define CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS 0x0020
+#define CPER_PCIE_VALID_CAPABILITY 0x0040
+#define CPER_PCIE_VALID_AER_INFO 0x0080
+
+#define CPER_PCIE_SLOT_SHIFT 3
+
+/*
+ * All tables and structs must be byte-packed to match CPER
+ * specification, since the tables are provided by the system BIOS
+ */
+#pragma pack(1)
+
+struct cper_record_header {
+ char signature[CPER_SIG_SIZE]; /* must be CPER_SIG_RECORD */
+ __u16 revision; /* must be CPER_RECORD_REV */
+ __u32 signature_end; /* must be CPER_SIG_END */
+ __u16 section_count;
+ __u32 error_severity;
+ __u32 validation_bits;
+ __u32 record_length;
+ __u64 timestamp;
+ uuid_le platform_id;
+ uuid_le partition_id;
+ uuid_le creator_id;
+ uuid_le notification_type;
+ __u64 record_id;
+ __u32 flags;
+ __u64 persistence_information;
+ __u8 reserved[12]; /* must be zero */
+};
+
+struct cper_section_descriptor {
+ __u32 section_offset; /* Offset in bytes of the
+ * section body from the base
+ * of the record header */
+ __u32 section_length;
+ __u16 revision; /* must be CPER_RECORD_REV */
+ __u8 validation_bits;
+ __u8 reserved; /* must be zero */
+ __u32 flags;
+ uuid_le section_type;
+ uuid_le fru_id;
+ __u32 section_severity;
+ __u8 fru_text[20];
+};
+
+/* Generic Processor Error Section */
+struct cper_sec_proc_generic {
+ __u64 validation_bits;
+ __u8 proc_type;
+ __u8 proc_isa;
+ __u8 proc_error_type;
+ __u8 operation;
+ __u8 flags;
+ __u8 level;
+ __u16 reserved;
+ __u64 cpu_version;
+ char cpu_brand[128];
+ __u64 proc_id;
+ __u64 target_addr;
+ __u64 requestor_id;
+ __u64 responder_id;
+ __u64 ip;
+};
+
+/* IA32/X64 Processor Error Section */
+struct cper_sec_proc_ia {
+ __u64 validation_bits;
+ __u8 lapic_id;
+ __u8 cpuid[48];
+};
+
+/* IA32/X64 Processor Error Information Structure */
+struct cper_ia_err_info {
+ uuid_le err_type;
+ __u64 validation_bits;
+ __u64 check_info;
+ __u64 target_id;
+ __u64 requestor_id;
+ __u64 responder_id;
+ __u64 ip;
+};
+
+/* IA32/X64 Processor Context Information Structure */
+struct cper_ia_proc_ctx {
+ __u16 reg_ctx_type;
+ __u16 reg_arr_size;
+ __u32 msr_addr;
+ __u64 mm_reg_addr;
+};
+
+/* Memory Error Section */
+struct cper_sec_mem_err {
+ __u64 validation_bits;
+ __u64 error_status;
+ __u64 physical_addr;
+ __u64 physical_addr_mask;
+ __u16 node;
+ __u16 card;
+ __u16 module;
+ __u16 bank;
+ __u16 device;
+ __u16 row;
+ __u16 column;
+ __u16 bit_pos;
+ __u64 requestor_id;
+ __u64 responder_id;
+ __u64 target_id;
+ __u8 error_type;
+ __u8 reserved;
+ __u16 rank;
+ __u16 mem_array_handle; /* card handle in UEFI 2.4 */
+ __u16 mem_dev_handle; /* module handle in UEFI 2.4 */
+};
+
+struct cper_mem_err_compact {
+ __u64 validation_bits;
+ __u16 node;
+ __u16 card;
+ __u16 module;
+ __u16 bank;
+ __u16 device;
+ __u16 row;
+ __u16 column;
+ __u16 bit_pos;
+ __u64 requestor_id;
+ __u64 responder_id;
+ __u64 target_id;
+ __u16 rank;
+ __u16 mem_array_handle;
+ __u16 mem_dev_handle;
+};
+
+struct cper_sec_pcie {
+ __u64 validation_bits;
+ __u32 port_type;
+ struct {
+ __u8 minor;
+ __u8 major;
+ __u8 reserved[2];
+ } version;
+ __u16 command;
+ __u16 status;
+ __u32 reserved;
+ struct {
+ __u16 vendor_id;
+ __u16 device_id;
+ __u8 class_code[3];
+ __u8 function;
+ __u8 device;
+ __u16 segment;
+ __u8 bus;
+ __u8 secondary_bus;
+ __u16 slot;
+ __u8 reserved;
+ } device_id;
+ struct {
+ __u32 lower;
+ __u32 upper;
+ } serial_number;
+ struct {
+ __u16 secondary_status;
+ __u16 control;
+ } bridge;
+ __u8 capability[60];
+ __u8 aer_info[96];
+};
+
+/* Reset to default packing */
+#pragma pack()
+
+u64 cper_next_record_id(void);
+const char *cper_severity_str(unsigned int);
+const char *cper_mem_err_type_str(unsigned int);
+void cper_print_bits(const char *prefix, unsigned int bits,
+ const char * const strs[], unsigned int strs_size);
+void cper_mem_err_pack(const struct cper_sec_mem_err *,
+ struct cper_mem_err_compact *);
+const char *cper_mem_err_unpack(struct trace_seq *,
+ struct cper_mem_err_compact *);
+
+#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
new file mode 100644
index 000000000..c0fb6b1b4
--- /dev/null
+++ b/include/linux/cpu.h
@@ -0,0 +1,294 @@
+/*
+ * include/linux/cpu.h - generic cpu definition
+ *
+ * This is mainly for topological representation. We define the
+ * basic 'struct cpu' here, which can be embedded in per-arch
+ * definitions of processors.
+ *
+ * Basic handling of the devices is done in drivers/base/cpu.c
+ *
+ * CPUs are exported via sysfs in the devices/system/cpu
+ * directory.
+ */
+#ifndef _LINUX_CPU_H_
+#define _LINUX_CPU_H_
+
+#include <linux/node.h>
+#include <linux/compiler.h>
+#include <linux/cpumask.h>
+
+struct device;
+struct device_node;
+struct attribute_group;
+
+struct cpu {
+ int node_id; /* The node which contains the CPU */
+ int hotpluggable; /* creates sysfs control file if hotpluggable */
+ struct device dev;
+};
+
+extern int register_cpu(struct cpu *cpu, int num);
+extern struct device *get_cpu_device(unsigned cpu);
+extern bool cpu_is_hotpluggable(unsigned cpu);
+extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
+extern bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
+ int cpu, unsigned int *thread);
+
+extern int cpu_add_dev_attr(struct device_attribute *attr);
+extern void cpu_remove_dev_attr(struct device_attribute *attr);
+
+extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
+extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
+
+extern struct device *cpu_device_create(struct device *parent, void *drvdata,
+ const struct attribute_group **groups,
+ const char *fmt, ...);
+#ifdef CONFIG_HOTPLUG_CPU
+extern void unregister_cpu(struct cpu *cpu);
+extern ssize_t arch_cpu_probe(const char *, size_t);
+extern ssize_t arch_cpu_release(const char *, size_t);
+#endif
+struct notifier_block;
+
+/*
+ * CPU notifier priorities.
+ */
+enum {
+ /*
+ * SCHED_ACTIVE marks a cpu which is coming up active during
+ * CPU_ONLINE and CPU_DOWN_FAILED and must be the first
+ * notifier. CPUSET_ACTIVE adjusts cpuset according to
+ * cpu_active mask right after SCHED_ACTIVE. During
+ * CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are
+ * ordered in the similar way.
+ *
+ * This ordering guarantees consistent cpu_active mask and
+ * migration behavior to all cpu notifiers.
+ */
+ CPU_PRI_SCHED_ACTIVE = INT_MAX,
+ CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1,
+ CPU_PRI_SCHED_INACTIVE = INT_MIN + 1,
+ CPU_PRI_CPUSET_INACTIVE = INT_MIN,
+
+ /* migration should happen before other stuff but after perf */
+ CPU_PRI_PERF = 20,
+ CPU_PRI_MIGRATION = 10,
+ CPU_PRI_SMPBOOT = 9,
+ /* bring up workqueues before normal notifiers and down after */
+ CPU_PRI_WORKQUEUE_UP = 5,
+ CPU_PRI_WORKQUEUE_DOWN = -5,
+};
+
+#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
+#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
+#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
+#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
+#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
+#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
+#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
+ * not handling interrupts, soon dead.
+ * Called on the dying cpu, interrupts
+ * are already disabled. Must not
+ * sleep, must not fail */
+#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
+ * lock is dropped */
+#define CPU_STARTING 0x000A /* CPU (unsigned)v soon running.
+ * Called on the new cpu, just before
+ * enabling interrupts. Must not sleep,
+ * must not fail */
+#define CPU_DYING_IDLE 0x000B /* CPU (unsigned)v dying, reached
+ * idle loop. */
+#define CPU_BROKEN 0x000C /* CPU (unsigned)v did not die properly,
+ * perhaps due to preemption. */
+
+/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
+ * operation in progress
+ */
+#define CPU_TASKS_FROZEN 0x0010
+
+#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
+#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
+#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
+#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
+#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
+#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
+#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
+#define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN)
+
+
+#ifdef CONFIG_SMP
+/* Need to know about CPUs going up/down? */
+#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
+#define cpu_notifier(fn, pri) { \
+ static struct notifier_block fn##_nb = \
+ { .notifier_call = fn, .priority = pri }; \
+ register_cpu_notifier(&fn##_nb); \
+}
+
+#define __cpu_notifier(fn, pri) { \
+ static struct notifier_block fn##_nb = \
+ { .notifier_call = fn, .priority = pri }; \
+ __register_cpu_notifier(&fn##_nb); \
+}
+#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
+#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
+#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
+#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern int register_cpu_notifier(struct notifier_block *nb);
+extern int __register_cpu_notifier(struct notifier_block *nb);
+extern void unregister_cpu_notifier(struct notifier_block *nb);
+extern void __unregister_cpu_notifier(struct notifier_block *nb);
+#else
+
+#ifndef MODULE
+extern int register_cpu_notifier(struct notifier_block *nb);
+extern int __register_cpu_notifier(struct notifier_block *nb);
+#else
+static inline int register_cpu_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int __register_cpu_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+#endif
+
+static inline void unregister_cpu_notifier(struct notifier_block *nb)
+{
+}
+
+static inline void __unregister_cpu_notifier(struct notifier_block *nb)
+{
+}
+#endif
+
+void smpboot_thread_init(void);
+int cpu_up(unsigned int cpu);
+void notify_cpu_starting(unsigned int cpu);
+extern void cpu_maps_update_begin(void);
+extern void cpu_maps_update_done(void);
+
+#define cpu_notifier_register_begin cpu_maps_update_begin
+#define cpu_notifier_register_done cpu_maps_update_done
+
+#else /* CONFIG_SMP */
+
+#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
+#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
+
+static inline int register_cpu_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int __register_cpu_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline void unregister_cpu_notifier(struct notifier_block *nb)
+{
+}
+
+static inline void __unregister_cpu_notifier(struct notifier_block *nb)
+{
+}
+
+static inline void cpu_maps_update_begin(void)
+{
+}
+
+static inline void cpu_maps_update_done(void)
+{
+}
+
+static inline void cpu_notifier_register_begin(void)
+{
+}
+
+static inline void cpu_notifier_register_done(void)
+{
+}
+
+static inline void smpboot_thread_init(void)
+{
+}
+
+#endif /* CONFIG_SMP */
+extern struct bus_type cpu_subsys;
+
+#ifdef CONFIG_HOTPLUG_CPU
+/* Stop CPUs going up and down. */
+
+extern void cpu_hotplug_begin(void);
+extern void cpu_hotplug_done(void);
+extern void get_online_cpus(void);
+extern bool try_get_online_cpus(void);
+extern void put_online_cpus(void);
+extern void cpu_hotplug_disable(void);
+extern void cpu_hotplug_enable(void);
+#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
+#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
+#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
+#define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb)
+#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
+#define __unregister_hotcpu_notifier(nb) __unregister_cpu_notifier(nb)
+void clear_tasks_mm_cpumask(int cpu);
+int cpu_down(unsigned int cpu);
+
+#else /* CONFIG_HOTPLUG_CPU */
+
+static inline void cpu_hotplug_begin(void) {}
+static inline void cpu_hotplug_done(void) {}
+#define get_online_cpus() do { } while (0)
+#define try_get_online_cpus() true
+#define put_online_cpus() do { } while (0)
+#define cpu_hotplug_disable() do { } while (0)
+#define cpu_hotplug_enable() do { } while (0)
+#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
+#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
+/* These aren't inline functions due to a GCC bug. */
+#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
+#define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
+#define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
+#define __unregister_hotcpu_notifier(nb) ({ (void)(nb); })
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#ifdef CONFIG_PM_SLEEP_SMP
+extern int disable_nonboot_cpus(void);
+extern void enable_nonboot_cpus(void);
+#else /* !CONFIG_PM_SLEEP_SMP */
+static inline int disable_nonboot_cpus(void) { return 0; }
+static inline void enable_nonboot_cpus(void) {}
+#endif /* !CONFIG_PM_SLEEP_SMP */
+
+enum cpuhp_state {
+ CPUHP_OFFLINE,
+ CPUHP_ONLINE,
+};
+
+void cpu_startup_entry(enum cpuhp_state state);
+
+void cpu_idle_poll_ctrl(bool enable);
+
+void arch_cpu_idle(void);
+void arch_cpu_idle_prepare(void);
+void arch_cpu_idle_enter(void);
+void arch_cpu_idle_exit(void);
+void arch_cpu_idle_dead(void);
+
+DECLARE_PER_CPU(bool, cpu_dead_idle);
+
+int cpu_report_state(int cpu);
+int cpu_check_up_prepare(int cpu);
+void cpu_set_state_online(int cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+bool cpu_wait_death(unsigned int cpu, int seconds);
+bool cpu_report_death(void);
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
+#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
new file mode 100644
index 000000000..bd955270d
--- /dev/null
+++ b/include/linux/cpu_cooling.h
@@ -0,0 +1,88 @@
+/*
+ * linux/include/linux/cpu_cooling.h
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
+ * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#ifndef __CPU_COOLING_H__
+#define __CPU_COOLING_H__
+
+#include <linux/of.h>
+#include <linux/thermal.h>
+#include <linux/cpumask.h>
+
+#ifdef CONFIG_CPU_THERMAL
+/**
+ * cpufreq_cooling_register - function to create cpufreq cooling device.
+ * @clip_cpus: cpumask of cpus where the frequency constraints will happen
+ */
+struct thermal_cooling_device *
+cpufreq_cooling_register(const struct cpumask *clip_cpus);
+
+/**
+ * of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
+ * @np: a valid struct device_node to the cooling device device tree node.
+ * @clip_cpus: cpumask of cpus where the frequency constraints will happen
+ */
+#ifdef CONFIG_THERMAL_OF
+struct thermal_cooling_device *
+of_cpufreq_cooling_register(struct device_node *np,
+ const struct cpumask *clip_cpus);
+#else
+static inline struct thermal_cooling_device *
+of_cpufreq_cooling_register(struct device_node *np,
+ const struct cpumask *clip_cpus)
+{
+ return ERR_PTR(-ENOSYS);
+}
+#endif
+
+/**
+ * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
+ * @cdev: thermal cooling device pointer.
+ */
+void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev);
+
+unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq);
+#else /* !CONFIG_CPU_THERMAL */
+static inline struct thermal_cooling_device *
+cpufreq_cooling_register(const struct cpumask *clip_cpus)
+{
+ return ERR_PTR(-ENOSYS);
+}
+static inline struct thermal_cooling_device *
+of_cpufreq_cooling_register(struct device_node *np,
+ const struct cpumask *clip_cpus)
+{
+ return ERR_PTR(-ENOSYS);
+}
+static inline
+void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
+{
+ return;
+}
+static inline
+unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
+{
+ return THERMAL_CSTATE_INVALID;
+}
+#endif /* CONFIG_CPU_THERMAL */
+
+#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/cpu_pm.h b/include/linux/cpu_pm.h
new file mode 100644
index 000000000..455b233dd
--- /dev/null
+++ b/include/linux/cpu_pm.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_CPU_PM_H
+#define _LINUX_CPU_PM_H
+
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+
+/*
+ * When a CPU goes to a low power state that turns off power to the CPU's
+ * power domain, the contents of some blocks (floating point coprocessors,
+ * interrupt controllers, caches, timers) in the same power domain can
+ * be lost. The cpm_pm notifiers provide a method for platform idle, suspend,
+ * and hotplug implementations to notify the drivers for these blocks that
+ * they may be reset.
+ *
+ * All cpu_pm notifications must be called with interrupts disabled.
+ *
+ * The notifications are split into two classes: CPU notifications and CPU
+ * cluster notifications.
+ *
+ * CPU notifications apply to a single CPU and must be called on the affected
+ * CPU. They are used to save per-cpu context for affected blocks.
+ *
+ * CPU cluster notifications apply to all CPUs in a single power domain. They
+ * are used to save any global context for affected blocks, and must be called
+ * after all the CPUs in the power domain have been notified of the low power
+ * state.
+ */
+
+/*
+ * Event codes passed as unsigned long val to notifier calls
+ */
+enum cpu_pm_event {
+ /* A single cpu is entering a low power state */
+ CPU_PM_ENTER,
+
+ /* A single cpu failed to enter a low power state */
+ CPU_PM_ENTER_FAILED,
+
+ /* A single cpu is exiting a low power state */
+ CPU_PM_EXIT,
+
+ /* A cpu power domain is entering a low power state */
+ CPU_CLUSTER_PM_ENTER,
+
+ /* A cpu power domain failed to enter a low power state */
+ CPU_CLUSTER_PM_ENTER_FAILED,
+
+ /* A cpu power domain is exiting a low power state */
+ CPU_CLUSTER_PM_EXIT,
+};
+
+#ifdef CONFIG_CPU_PM
+int cpu_pm_register_notifier(struct notifier_block *nb);
+int cpu_pm_unregister_notifier(struct notifier_block *nb);
+int cpu_pm_enter(void);
+int cpu_pm_exit(void);
+int cpu_cluster_pm_enter(void);
+int cpu_cluster_pm_exit(void);
+
+#else
+
+static inline int cpu_pm_register_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int cpu_pm_unregister_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int cpu_pm_enter(void)
+{
+ return 0;
+}
+
+static inline int cpu_pm_exit(void)
+{
+ return 0;
+}
+
+static inline int cpu_cluster_pm_enter(void)
+{
+ return 0;
+}
+
+static inline int cpu_cluster_pm_exit(void)
+{
+ return 0;
+}
+#endif
+#endif
diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h
new file mode 100644
index 000000000..bdd18caa6
--- /dev/null
+++ b/include/linux/cpu_rmap.h
@@ -0,0 +1,69 @@
+#ifndef __LINUX_CPU_RMAP_H
+#define __LINUX_CPU_RMAP_H
+
+/*
+ * cpu_rmap.c: CPU affinity reverse-map support
+ * Copyright 2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/kref.h>
+
+/**
+ * struct cpu_rmap - CPU affinity reverse-map
+ * @refcount: kref for object
+ * @size: Number of objects to be reverse-mapped
+ * @used: Number of objects added
+ * @obj: Pointer to array of object pointers
+ * @near: For each CPU, the index and distance to the nearest object,
+ * based on affinity masks
+ */
+struct cpu_rmap {
+ struct kref refcount;
+ u16 size, used;
+ void **obj;
+ struct {
+ u16 index;
+ u16 dist;
+ } near[0];
+};
+#define CPU_RMAP_DIST_INF 0xffff
+
+extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags);
+extern int cpu_rmap_put(struct cpu_rmap *rmap);
+
+extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj);
+extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
+ const struct cpumask *affinity);
+
+static inline u16 cpu_rmap_lookup_index(struct cpu_rmap *rmap, unsigned int cpu)
+{
+ return rmap->near[cpu].index;
+}
+
+static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu)
+{
+ return rmap->obj[rmap->near[cpu].index];
+}
+
+/**
+ * alloc_irq_cpu_rmap - allocate CPU affinity reverse-map for IRQs
+ * @size: Number of objects to be mapped
+ *
+ * Must be called in process context.
+ */
+static inline struct cpu_rmap *alloc_irq_cpu_rmap(unsigned int size)
+{
+ return alloc_cpu_rmap(size, GFP_KERNEL);
+}
+extern void free_irq_cpu_rmap(struct cpu_rmap *rmap);
+
+extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq);
+
+#endif /* __LINUX_CPU_RMAP_H */
diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h
new file mode 100644
index 000000000..c4d4eb8ac
--- /dev/null
+++ b/include/linux/cpufeature.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_CPUFEATURE_H
+#define __LINUX_CPUFEATURE_H
+
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+
+#include <linux/mod_devicetable.h>
+#include <asm/cpufeature.h>
+
+/*
+ * Macros imported from <asm/cpufeature.h>:
+ * - cpu_feature(x) ordinal value of feature called 'x'
+ * - cpu_have_feature(u32 n) whether feature #n is available
+ * - MAX_CPU_FEATURES upper bound for feature ordinal values
+ * Optional:
+ * - CPU_FEATURE_TYPEFMT format string fragment for printing the cpu type
+ * - CPU_FEATURE_TYPEVAL set of values matching the format string above
+ */
+
+#ifndef CPU_FEATURE_TYPEFMT
+#define CPU_FEATURE_TYPEFMT "%s"
+#endif
+
+#ifndef CPU_FEATURE_TYPEVAL
+#define CPU_FEATURE_TYPEVAL ELF_PLATFORM
+#endif
+
+/*
+ * Use module_cpu_feature_match(feature, module_init_function) to
+ * declare that
+ * a) the module shall be probed upon discovery of CPU feature 'feature'
+ * (typically at boot time using udev)
+ * b) the module must not be loaded if CPU feature 'feature' is not present
+ * (not even by manual insmod).
+ *
+ * For a list of legal values for 'feature', please consult the file
+ * 'asm/cpufeature.h' of your favorite architecture.
+ */
+#define module_cpu_feature_match(x, __init) \
+static struct cpu_feature const cpu_feature_match_ ## x[] = \
+ { { .feature = cpu_feature(x) }, { } }; \
+MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
+ \
+static int cpu_feature_match_ ## x ## _init(void) \
+{ \
+ if (!cpu_have_feature(cpu_feature(x))) \
+ return -ENODEV; \
+ return __init(); \
+} \
+module_init(cpu_feature_match_ ## x ## _init)
+
+#endif
+#endif
diff --git a/include/linux/cpufreq-dt.h b/include/linux/cpufreq-dt.h
new file mode 100644
index 000000000..0414009e2
--- /dev/null
+++ b/include/linux/cpufreq-dt.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2014 Marvell
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CPUFREQ_DT_H__
+#define __CPUFREQ_DT_H__
+
+struct cpufreq_dt_platform_data {
+ /*
+ * True when each CPU has its own clock to control its
+ * frequency, false when all CPUs are controlled by a single
+ * clock.
+ */
+ bool independent_clocks;
+};
+
+#endif /* __CPUFREQ_DT_H__ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
new file mode 100644
index 000000000..2ee4888c1
--- /dev/null
+++ b/include/linux/cpufreq.h
@@ -0,0 +1,604 @@
+/*
+ * linux/include/linux/cpufreq.h
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _LINUX_CPUFREQ_H
+#define _LINUX_CPUFREQ_H
+
+#include <linux/clk.h>
+#include <linux/cpumask.h>
+#include <linux/completion.h>
+#include <linux/kobject.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+
+/*********************************************************************
+ * CPUFREQ INTERFACE *
+ *********************************************************************/
+/*
+ * Frequency values here are CPU kHz
+ *
+ * Maximum transition latency is in nanoseconds - if it's unknown,
+ * CPUFREQ_ETERNAL shall be used.
+ */
+
+#define CPUFREQ_ETERNAL (-1)
+#define CPUFREQ_NAME_LEN 16
+/* Print length for names. Extra 1 space for accomodating '\n' in prints */
+#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
+
+struct cpufreq_governor;
+
+struct cpufreq_freqs {
+ unsigned int cpu; /* cpu nr */
+ unsigned int old;
+ unsigned int new;
+ u8 flags; /* flags of cpufreq_driver, see below. */
+};
+
+struct cpufreq_cpuinfo {
+ unsigned int max_freq;
+ unsigned int min_freq;
+
+ /* in 10^(-9) s = nanoseconds */
+ unsigned int transition_latency;
+};
+
+struct cpufreq_real_policy {
+ unsigned int min; /* in kHz */
+ unsigned int max; /* in kHz */
+ unsigned int policy; /* see above */
+ struct cpufreq_governor *governor; /* see below */
+};
+
+struct cpufreq_policy {
+ /* CPUs sharing clock, require sw coordination */
+ cpumask_var_t cpus; /* Online CPUs only */
+ cpumask_var_t related_cpus; /* Online + Offline CPUs */
+
+ unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
+ should set cpufreq */
+ unsigned int cpu; /* cpu nr of CPU managing this policy */
+ struct clk *clk;
+ struct cpufreq_cpuinfo cpuinfo;/* see above */
+
+ unsigned int min; /* in kHz */
+ unsigned int max; /* in kHz */
+ unsigned int cur; /* in kHz, only needed if cpufreq
+ * governors are used */
+ unsigned int restore_freq; /* = policy->cur before transition */
+ unsigned int suspend_freq; /* freq to set during suspend */
+
+ unsigned int policy; /* see above */
+ struct cpufreq_governor *governor; /* see below */
+ void *governor_data;
+ bool governor_enabled; /* governor start/stop flag */
+
+ struct work_struct update; /* if update_policy() needs to be
+ * called, but you're in IRQ context */
+
+ struct cpufreq_real_policy user_policy;
+ struct cpufreq_frequency_table *freq_table;
+
+ struct list_head policy_list;
+ struct kobject kobj;
+ struct completion kobj_unregister;
+
+ /*
+ * The rules for this semaphore:
+ * - Any routine that wants to read from the policy structure will
+ * do a down_read on this semaphore.
+ * - Any routine that will write to the policy structure and/or may take away
+ * the policy altogether (eg. CPU hotplug), will hold this lock in write
+ * mode before doing so.
+ *
+ * Additional rules:
+ * - Lock should not be held across
+ * __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
+ */
+ struct rw_semaphore rwsem;
+
+ /* Synchronization for frequency transitions */
+ bool transition_ongoing; /* Tracks transition status */
+ spinlock_t transition_lock;
+ wait_queue_head_t transition_wait;
+ struct task_struct *transition_task; /* Task which is doing the transition */
+
+ /* cpufreq-stats */
+ struct cpufreq_stats *stats;
+
+ /* For cpufreq driver's internal use */
+ void *driver_data;
+};
+
+/* Only for ACPI */
+#define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
+#define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
+#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
+#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
+
+#ifdef CONFIG_CPU_FREQ
+struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
+void cpufreq_cpu_put(struct cpufreq_policy *policy);
+#else
+static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
+{
+ return NULL;
+}
+static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
+#endif
+
+static inline bool policy_is_shared(struct cpufreq_policy *policy)
+{
+ return cpumask_weight(policy->cpus) > 1;
+}
+
+/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
+extern struct kobject *cpufreq_global_kobject;
+int cpufreq_get_global_kobject(void);
+void cpufreq_put_global_kobject(void);
+int cpufreq_sysfs_create_file(const struct attribute *attr);
+void cpufreq_sysfs_remove_file(const struct attribute *attr);
+
+#ifdef CONFIG_CPU_FREQ
+unsigned int cpufreq_get(unsigned int cpu);
+unsigned int cpufreq_quick_get(unsigned int cpu);
+unsigned int cpufreq_quick_get_max(unsigned int cpu);
+void disable_cpufreq(void);
+
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
+int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
+int cpufreq_update_policy(unsigned int cpu);
+bool have_governor_per_policy(void);
+struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
+#else
+static inline unsigned int cpufreq_get(unsigned int cpu)
+{
+ return 0;
+}
+static inline unsigned int cpufreq_quick_get(unsigned int cpu)
+{
+ return 0;
+}
+static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
+{
+ return 0;
+}
+static inline void disable_cpufreq(void) { }
+#endif
+
+/*********************************************************************
+ * CPUFREQ DRIVER INTERFACE *
+ *********************************************************************/
+
+#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
+#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
+#define CPUFREQ_RELATION_C 2 /* closest frequency to target */
+
+struct freq_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct cpufreq_policy *, char *);
+ ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
+};
+
+#define cpufreq_freq_attr_ro(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+#define cpufreq_freq_attr_ro_perm(_name, _perm) \
+static struct freq_attr _name = \
+__ATTR(_name, _perm, show_##_name, NULL)
+
+#define cpufreq_freq_attr_rw(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+struct global_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj,
+ struct attribute *attr, char *buf);
+ ssize_t (*store)(struct kobject *a, struct attribute *b,
+ const char *c, size_t count);
+};
+
+#define define_one_global_ro(_name) \
+static struct global_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+#define define_one_global_rw(_name) \
+static struct global_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+
+struct cpufreq_driver {
+ char name[CPUFREQ_NAME_LEN];
+ u8 flags;
+ void *driver_data;
+
+ /* needed by all drivers */
+ int (*init)(struct cpufreq_policy *policy);
+ int (*verify)(struct cpufreq_policy *policy);
+
+ /* define one out of two */
+ int (*setpolicy)(struct cpufreq_policy *policy);
+
+ /*
+ * On failure, should always restore frequency to policy->restore_freq
+ * (i.e. old freq).
+ */
+ int (*target)(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation); /* Deprecated */
+ int (*target_index)(struct cpufreq_policy *policy,
+ unsigned int index);
+ /*
+ * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
+ * unset.
+ *
+ * get_intermediate should return a stable intermediate frequency
+ * platform wants to switch to and target_intermediate() should set CPU
+ * to to that frequency, before jumping to the frequency corresponding
+ * to 'index'. Core will take care of sending notifications and driver
+ * doesn't have to handle them in target_intermediate() or
+ * target_index().
+ *
+ * Drivers can return '0' from get_intermediate() in case they don't
+ * wish to switch to intermediate frequency for some target frequency.
+ * In that case core will directly call ->target_index().
+ */
+ unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
+ unsigned int index);
+ int (*target_intermediate)(struct cpufreq_policy *policy,
+ unsigned int index);
+
+ /* should be defined, if possible */
+ unsigned int (*get)(unsigned int cpu);
+
+ /* optional */
+ int (*bios_limit)(int cpu, unsigned int *limit);
+
+ int (*exit)(struct cpufreq_policy *policy);
+ void (*stop_cpu)(struct cpufreq_policy *policy);
+ int (*suspend)(struct cpufreq_policy *policy);
+ int (*resume)(struct cpufreq_policy *policy);
+
+ /* Will be called after the driver is fully initialized */
+ void (*ready)(struct cpufreq_policy *policy);
+
+ struct freq_attr **attr;
+
+ /* platform specific boost support code */
+ bool boost_supported;
+ bool boost_enabled;
+ int (*set_boost)(int state);
+};
+
+/* flags */
+#define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
+ all ->init() calls failed */
+#define CPUFREQ_CONST_LOOPS (1 << 1) /* loops_per_jiffy or other
+ kernel "constants" aren't
+ affected by frequency
+ transitions */
+#define CPUFREQ_PM_NO_WARN (1 << 2) /* don't warn on suspend/resume
+ speed mismatches */
+
+/*
+ * This should be set by platforms having multiple clock-domains, i.e.
+ * supporting multiple policies. With this sysfs directories of governor would
+ * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same
+ * governor with different tunables for different clusters.
+ */
+#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3)
+
+/*
+ * Driver will do POSTCHANGE notifications from outside of their ->target()
+ * routine and so must set cpufreq_driver->flags with this flag, so that core
+ * can handle them specially.
+ */
+#define CPUFREQ_ASYNC_NOTIFICATION (1 << 4)
+
+/*
+ * Set by drivers which want cpufreq core to check if CPU is running at a
+ * frequency present in freq-table exposed by the driver. For these drivers if
+ * CPU is found running at an out of table freq, we will try to set it to a freq
+ * from the table. And if that fails, we will stop further boot process by
+ * issuing a BUG_ON().
+ */
+#define CPUFREQ_NEED_INITIAL_FREQ_CHECK (1 << 5)
+
+int cpufreq_register_driver(struct cpufreq_driver *driver_data);
+int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
+
+const char *cpufreq_get_current_driver(void);
+void *cpufreq_get_driver_data(void);
+
+static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
+ unsigned int min, unsigned int max)
+{
+ if (policy->min < min)
+ policy->min = min;
+ if (policy->max < min)
+ policy->max = min;
+ if (policy->min > max)
+ policy->min = max;
+ if (policy->max > max)
+ policy->max = max;
+ if (policy->min > policy->max)
+ policy->min = policy->max;
+ return;
+}
+
+static inline void
+cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
+{
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+}
+
+#ifdef CONFIG_CPU_FREQ
+void cpufreq_suspend(void);
+void cpufreq_resume(void);
+int cpufreq_generic_suspend(struct cpufreq_policy *policy);
+#else
+static inline void cpufreq_suspend(void) {}
+static inline void cpufreq_resume(void) {}
+#endif
+
+/*********************************************************************
+ * CPUFREQ NOTIFIER INTERFACE *
+ *********************************************************************/
+
+#define CPUFREQ_TRANSITION_NOTIFIER (0)
+#define CPUFREQ_POLICY_NOTIFIER (1)
+
+/* Transition notifiers */
+#define CPUFREQ_PRECHANGE (0)
+#define CPUFREQ_POSTCHANGE (1)
+
+/* Policy Notifiers */
+#define CPUFREQ_ADJUST (0)
+#define CPUFREQ_INCOMPATIBLE (1)
+#define CPUFREQ_NOTIFY (2)
+#define CPUFREQ_START (3)
+#define CPUFREQ_CREATE_POLICY (4)
+#define CPUFREQ_REMOVE_POLICY (5)
+
+#ifdef CONFIG_CPU_FREQ
+int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
+int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
+
+void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs);
+void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, int transition_failed);
+
+#else /* CONFIG_CPU_FREQ */
+static inline int cpufreq_register_notifier(struct notifier_block *nb,
+ unsigned int list)
+{
+ return 0;
+}
+static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
+ unsigned int list)
+{
+ return 0;
+}
+#endif /* !CONFIG_CPU_FREQ */
+
+/**
+ * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
+ * safe)
+ * @old: old value
+ * @div: divisor
+ * @mult: multiplier
+ *
+ *
+ * new = old * mult / div
+ */
+static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
+ u_int mult)
+{
+#if BITS_PER_LONG == 32
+ u64 result = ((u64) old) * ((u64) mult);
+ do_div(result, div);
+ return (unsigned long) result;
+
+#elif BITS_PER_LONG == 64
+ unsigned long result = old * ((u64) mult);
+ result /= div;
+ return result;
+#endif
+}
+
+/*********************************************************************
+ * CPUFREQ GOVERNORS *
+ *********************************************************************/
+
+/*
+ * If (cpufreq_driver->target) exists, the ->governor decides what frequency
+ * within the limits is used. If (cpufreq_driver->setpolicy> exists, these
+ * two generic policies are available:
+ */
+#define CPUFREQ_POLICY_POWERSAVE (1)
+#define CPUFREQ_POLICY_PERFORMANCE (2)
+
+/* Governor Events */
+#define CPUFREQ_GOV_START 1
+#define CPUFREQ_GOV_STOP 2
+#define CPUFREQ_GOV_LIMITS 3
+#define CPUFREQ_GOV_POLICY_INIT 4
+#define CPUFREQ_GOV_POLICY_EXIT 5
+
+struct cpufreq_governor {
+ char name[CPUFREQ_NAME_LEN];
+ int initialized;
+ int (*governor) (struct cpufreq_policy *policy,
+ unsigned int event);
+ ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
+ char *buf);
+ int (*store_setspeed) (struct cpufreq_policy *policy,
+ unsigned int freq);
+ unsigned int max_transition_latency; /* HW must be able to switch to
+ next freq faster than this value in nano secs or we
+ will fallback to performance governor */
+ struct list_head governor_list;
+ struct module *owner;
+};
+
+/* Pass a target to the cpufreq driver */
+int cpufreq_driver_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation);
+int __cpufreq_driver_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation);
+int cpufreq_register_governor(struct cpufreq_governor *governor);
+void cpufreq_unregister_governor(struct cpufreq_governor *governor);
+
+/* CPUFREQ DEFAULT GOVERNOR */
+/*
+ * Performance governor is fallback governor if any other gov failed to auto
+ * load due latency restrictions
+ */
+#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
+extern struct cpufreq_governor cpufreq_gov_performance;
+#endif
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
+#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_performance)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE)
+extern struct cpufreq_governor cpufreq_gov_powersave;
+#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_powersave)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE)
+extern struct cpufreq_governor cpufreq_gov_userspace;
+#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_userspace)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND)
+extern struct cpufreq_governor cpufreq_gov_ondemand;
+#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_ondemand)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
+extern struct cpufreq_governor cpufreq_gov_conservative;
+#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative)
+#endif
+
+/*********************************************************************
+ * FREQUENCY TABLE HELPERS *
+ *********************************************************************/
+
+/* Special Values of .frequency field */
+#define CPUFREQ_ENTRY_INVALID ~0u
+#define CPUFREQ_TABLE_END ~1u
+/* Special Values of .flags field */
+#define CPUFREQ_BOOST_FREQ (1 << 0)
+
+struct cpufreq_frequency_table {
+ unsigned int flags;
+ unsigned int driver_data; /* driver specific data, not used by core */
+ unsigned int frequency; /* kHz - doesn't need to be in ascending
+ * order */
+};
+
+#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
+int dev_pm_opp_init_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table);
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table);
+#else
+static inline int dev_pm_opp_init_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table
+ **table)
+{
+ return -EINVAL;
+}
+
+static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table
+ **table)
+{
+}
+#endif
+
+static inline bool cpufreq_next_valid(struct cpufreq_frequency_table **pos)
+{
+ while ((*pos)->frequency != CPUFREQ_TABLE_END)
+ if ((*pos)->frequency != CPUFREQ_ENTRY_INVALID)
+ return true;
+ else
+ (*pos)++;
+ return false;
+}
+
+/*
+ * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table
+ * @pos: the cpufreq_frequency_table * to use as a loop cursor.
+ * @table: the cpufreq_frequency_table * to iterate over.
+ */
+
+#define cpufreq_for_each_entry(pos, table) \
+ for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++)
+
+/*
+ * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table
+ * excluding CPUFREQ_ENTRY_INVALID frequencies.
+ * @pos: the cpufreq_frequency_table * to use as a loop cursor.
+ * @table: the cpufreq_frequency_table * to iterate over.
+ */
+
+#define cpufreq_for_each_valid_entry(pos, table) \
+ for (pos = table; cpufreq_next_valid(&pos); pos++)
+
+int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table);
+
+int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table);
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
+
+int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table,
+ unsigned int target_freq,
+ unsigned int relation,
+ unsigned int *index);
+int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
+ unsigned int freq);
+
+ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
+
+#ifdef CONFIG_CPU_FREQ
+int cpufreq_boost_trigger_state(int state);
+int cpufreq_boost_supported(void);
+int cpufreq_boost_enabled(void);
+#else
+static inline int cpufreq_boost_trigger_state(int state)
+{
+ return 0;
+}
+static inline int cpufreq_boost_supported(void)
+{
+ return 0;
+}
+static inline int cpufreq_boost_enabled(void)
+{
+ return 0;
+}
+#endif
+/* the following funtion is for cpufreq core use only */
+struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
+
+/* the following are really really optional */
+extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
+extern struct freq_attr *cpufreq_generic_attr[];
+int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table);
+
+unsigned int cpufreq_generic_get(unsigned int cpu);
+int cpufreq_generic_init(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table,
+ unsigned int transition_latency);
+#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
new file mode 100644
index 000000000..9c5e89254
--- /dev/null
+++ b/include/linux/cpuidle.h
@@ -0,0 +1,245 @@
+/*
+ * cpuidle.h - a generic framework for CPU idle power management
+ *
+ * (C) 2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ * Shaohua Li <shaohua.li@intel.com>
+ * Adam Belay <abelay@novell.com>
+ *
+ * This code is licenced under the GPL.
+ */
+
+#ifndef _LINUX_CPUIDLE_H
+#define _LINUX_CPUIDLE_H
+
+#include <linux/percpu.h>
+#include <linux/list.h>
+#include <linux/hrtimer.h>
+
+#define CPUIDLE_STATE_MAX 10
+#define CPUIDLE_NAME_LEN 16
+#define CPUIDLE_DESC_LEN 32
+
+struct module;
+
+struct cpuidle_device;
+struct cpuidle_driver;
+
+
+/****************************
+ * CPUIDLE DEVICE INTERFACE *
+ ****************************/
+
+struct cpuidle_state_usage {
+ unsigned long long disable;
+ unsigned long long usage;
+ unsigned long long time; /* in US */
+};
+
+struct cpuidle_state {
+ char name[CPUIDLE_NAME_LEN];
+ char desc[CPUIDLE_DESC_LEN];
+
+ unsigned int flags;
+ unsigned int exit_latency; /* in US */
+ int power_usage; /* in mW */
+ unsigned int target_residency; /* in US */
+ bool disabled; /* disabled on all CPUs */
+
+ int (*enter) (struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index);
+
+ int (*enter_dead) (struct cpuidle_device *dev, int index);
+
+ /*
+ * CPUs execute ->enter_freeze with the local tick or entire timekeeping
+ * suspended, so it must not re-enable interrupts at any point (even
+ * temporarily) or attempt to change states of clock event devices.
+ */
+ void (*enter_freeze) (struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index);
+};
+
+/* Idle State Flags */
+#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
+#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
+
+#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
+
+struct cpuidle_device_kobj;
+struct cpuidle_state_kobj;
+struct cpuidle_driver_kobj;
+
+struct cpuidle_device {
+ unsigned int registered:1;
+ unsigned int enabled:1;
+ unsigned int cpu;
+
+ int last_residency;
+ struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
+ struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
+ struct cpuidle_driver_kobj *kobj_driver;
+ struct cpuidle_device_kobj *kobj_dev;
+ struct list_head device_list;
+
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+ int safe_state_index;
+ cpumask_t coupled_cpus;
+ struct cpuidle_coupled *coupled;
+#endif
+};
+
+DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
+DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
+
+/**
+ * cpuidle_get_last_residency - retrieves the last state's residency time
+ * @dev: the target CPU
+ */
+static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
+{
+ return dev->last_residency;
+}
+
+
+/****************************
+ * CPUIDLE DRIVER INTERFACE *
+ ****************************/
+
+struct cpuidle_driver {
+ const char *name;
+ struct module *owner;
+ int refcnt;
+
+ /* used by the cpuidle framework to setup the broadcast timer */
+ unsigned int bctimer:1;
+ /* states array must be ordered in decreasing power consumption */
+ struct cpuidle_state states[CPUIDLE_STATE_MAX];
+ int state_count;
+ int safe_state_index;
+
+ /* the driver handles the cpus in cpumask */
+ struct cpumask *cpumask;
+};
+
+#ifdef CONFIG_CPU_IDLE
+extern void disable_cpuidle(void);
+extern bool cpuidle_not_available(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev);
+
+extern int cpuidle_select(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev);
+extern int cpuidle_enter(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev, int index);
+extern void cpuidle_reflect(struct cpuidle_device *dev, int index);
+
+extern int cpuidle_register_driver(struct cpuidle_driver *drv);
+extern struct cpuidle_driver *cpuidle_get_driver(void);
+extern struct cpuidle_driver *cpuidle_driver_ref(void);
+extern void cpuidle_driver_unref(void);
+extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
+extern int cpuidle_register_device(struct cpuidle_device *dev);
+extern void cpuidle_unregister_device(struct cpuidle_device *dev);
+extern int cpuidle_register(struct cpuidle_driver *drv,
+ const struct cpumask *const coupled_cpus);
+extern void cpuidle_unregister(struct cpuidle_driver *drv);
+extern void cpuidle_pause_and_lock(void);
+extern void cpuidle_resume_and_unlock(void);
+extern void cpuidle_pause(void);
+extern void cpuidle_resume(void);
+extern int cpuidle_enable_device(struct cpuidle_device *dev);
+extern void cpuidle_disable_device(struct cpuidle_device *dev);
+extern int cpuidle_play_dead(void);
+extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev);
+extern int cpuidle_enter_freeze(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev);
+
+extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
+#else
+static inline void disable_cpuidle(void) { }
+static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{return true; }
+static inline int cpuidle_select(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{return -ENODEV; }
+static inline int cpuidle_enter(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev, int index)
+{return -ENODEV; }
+static inline void cpuidle_reflect(struct cpuidle_device *dev, int index) { }
+static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
+{return -ENODEV; }
+static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; }
+static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; }
+static inline void cpuidle_driver_unref(void) {}
+static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { }
+static inline int cpuidle_register_device(struct cpuidle_device *dev)
+{return -ENODEV; }
+static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { }
+static inline int cpuidle_register(struct cpuidle_driver *drv,
+ const struct cpumask *const coupled_cpus)
+{return -ENODEV; }
+static inline void cpuidle_unregister(struct cpuidle_driver *drv) { }
+static inline void cpuidle_pause_and_lock(void) { }
+static inline void cpuidle_resume_and_unlock(void) { }
+static inline void cpuidle_pause(void) { }
+static inline void cpuidle_resume(void) { }
+static inline int cpuidle_enable_device(struct cpuidle_device *dev)
+{return -ENODEV; }
+static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
+static inline int cpuidle_play_dead(void) {return -ENODEV; }
+static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{return -ENODEV; }
+static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev)
+{return -ENODEV; }
+static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
+ struct cpuidle_device *dev) {return NULL; }
+#endif
+
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
+#else
+static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
+{
+}
+#endif
+
+/******************************
+ * CPUIDLE GOVERNOR INTERFACE *
+ ******************************/
+
+struct cpuidle_governor {
+ char name[CPUIDLE_NAME_LEN];
+ struct list_head governor_list;
+ unsigned int rating;
+
+ int (*enable) (struct cpuidle_driver *drv,
+ struct cpuidle_device *dev);
+ void (*disable) (struct cpuidle_driver *drv,
+ struct cpuidle_device *dev);
+
+ int (*select) (struct cpuidle_driver *drv,
+ struct cpuidle_device *dev);
+ void (*reflect) (struct cpuidle_device *dev, int index);
+
+ struct module *owner;
+};
+
+#ifdef CONFIG_CPU_IDLE
+extern int cpuidle_register_governor(struct cpuidle_governor *gov);
+#else
+static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
+{return 0;}
+#endif
+
+#ifdef CONFIG_ARCH_HAS_CPU_RELAX
+#define CPUIDLE_DRIVER_STATE_START 1
+#else
+#define CPUIDLE_DRIVER_STATE_START 0
+#endif
+
+#endif /* _LINUX_CPUIDLE_H */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
new file mode 100644
index 000000000..59915ea53
--- /dev/null
+++ b/include/linux/cpumask.h
@@ -0,0 +1,820 @@
+#ifndef __LINUX_CPUMASK_H
+#define __LINUX_CPUMASK_H
+
+/*
+ * Cpumasks provide a bitmap suitable for representing the
+ * set of CPU's in a system, one bit position per CPU number. In general,
+ * only nr_cpu_ids (<= NR_CPUS) bits are valid.
+ */
+#include <linux/kernel.h>
+#include <linux/threads.h>
+#include <linux/bitmap.h>
+#include <linux/bug.h>
+
+/* Don't assign or return these: may not be this big! */
+typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
+
+/**
+ * cpumask_bits - get the bits in a cpumask
+ * @maskp: the struct cpumask *
+ *
+ * You should only assume nr_cpu_ids bits of this mask are valid. This is
+ * a macro so it's const-correct.
+ */
+#define cpumask_bits(maskp) ((maskp)->bits)
+
+/**
+ * cpumask_pr_args - printf args to output a cpumask
+ * @maskp: cpumask to be printed
+ *
+ * Can be used to provide arguments for '%*pb[l]' when printing a cpumask.
+ */
+#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
+
+#if NR_CPUS == 1
+#define nr_cpu_ids 1
+#else
+extern int nr_cpu_ids;
+#endif
+
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
+ * not all bits may be allocated. */
+#define nr_cpumask_bits nr_cpu_ids
+#else
+#define nr_cpumask_bits NR_CPUS
+#endif
+
+/*
+ * The following particular system cpumasks and operations manage
+ * possible, present, active and online cpus.
+ *
+ * cpu_possible_mask- has bit 'cpu' set iff cpu is populatable
+ * cpu_present_mask - has bit 'cpu' set iff cpu is populated
+ * cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler
+ * cpu_active_mask - has bit 'cpu' set iff cpu available to migration
+ *
+ * If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
+ *
+ * The cpu_possible_mask is fixed at boot time, as the set of CPU id's
+ * that it is possible might ever be plugged in at anytime during the
+ * life of that system boot. The cpu_present_mask is dynamic(*),
+ * representing which CPUs are currently plugged in. And
+ * cpu_online_mask is the dynamic subset of cpu_present_mask,
+ * indicating those CPUs available for scheduling.
+ *
+ * If HOTPLUG is enabled, then cpu_possible_mask is forced to have
+ * all NR_CPUS bits set, otherwise it is just the set of CPUs that
+ * ACPI reports present at boot.
+ *
+ * If HOTPLUG is enabled, then cpu_present_mask varies dynamically,
+ * depending on what ACPI reports as currently plugged in, otherwise
+ * cpu_present_mask is just a copy of cpu_possible_mask.
+ *
+ * (*) Well, cpu_present_mask is dynamic in the hotplug case. If not
+ * hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
+ *
+ * Subtleties:
+ * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
+ * assumption that their single CPU is online. The UP
+ * cpu_{online,possible,present}_masks are placebos. Changing them
+ * will have no useful affect on the following num_*_cpus()
+ * and cpu_*() macros in the UP case. This ugliness is a UP
+ * optimization - don't waste any instructions or memory references
+ * asking if you're online or how many CPUs there are if there is
+ * only one CPU.
+ */
+
+extern const struct cpumask *const cpu_possible_mask;
+extern const struct cpumask *const cpu_online_mask;
+extern const struct cpumask *const cpu_present_mask;
+extern const struct cpumask *const cpu_active_mask;
+
+#if NR_CPUS > 1
+#define num_online_cpus() cpumask_weight(cpu_online_mask)
+#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
+#define num_present_cpus() cpumask_weight(cpu_present_mask)
+#define num_active_cpus() cpumask_weight(cpu_active_mask)
+#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
+#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
+#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
+#define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask)
+#else
+#define num_online_cpus() 1U
+#define num_possible_cpus() 1U
+#define num_present_cpus() 1U
+#define num_active_cpus() 1U
+#define cpu_online(cpu) ((cpu) == 0)
+#define cpu_possible(cpu) ((cpu) == 0)
+#define cpu_present(cpu) ((cpu) == 0)
+#define cpu_active(cpu) ((cpu) == 0)
+#endif
+
+/* verify cpu argument to cpumask_* operators */
+static inline unsigned int cpumask_check(unsigned int cpu)
+{
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+ WARN_ON_ONCE(cpu >= nr_cpumask_bits);
+#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
+ return cpu;
+}
+
+#if NR_CPUS == 1
+/* Uniprocessor. Assume all masks are "1". */
+static inline unsigned int cpumask_first(const struct cpumask *srcp)
+{
+ return 0;
+}
+
+/* Valid inputs for n are -1 and 0. */
+static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
+{
+ return n+1;
+}
+
+static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+{
+ return n+1;
+}
+
+static inline unsigned int cpumask_next_and(int n,
+ const struct cpumask *srcp,
+ const struct cpumask *andp)
+{
+ return n+1;
+}
+
+/* cpu must be a valid cpu, ie 0, so there's no other choice. */
+static inline unsigned int cpumask_any_but(const struct cpumask *mask,
+ unsigned int cpu)
+{
+ return 1;
+}
+
+static inline unsigned int cpumask_local_spread(unsigned int i, int node)
+{
+ return 0;
+}
+
+#define for_each_cpu(cpu, mask) \
+ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
+#define for_each_cpu_not(cpu, mask) \
+ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
+#define for_each_cpu_and(cpu, mask, and) \
+ for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
+#else
+/**
+ * cpumask_first - get the first cpu in a cpumask
+ * @srcp: the cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if no cpus set.
+ */
+static inline unsigned int cpumask_first(const struct cpumask *srcp)
+{
+ return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_next - get the next cpu in a cpumask
+ * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @srcp: the cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if no further cpus set.
+ */
+static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
+}
+
+/**
+ * cpumask_next_zero - get the next unset cpu in a cpumask
+ * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @srcp: the cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if no further cpus unset.
+ */
+static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpumask_check(n);
+ return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
+}
+
+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
+int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
+unsigned int cpumask_local_spread(unsigned int i, int node);
+
+/**
+ * for_each_cpu - iterate over every cpu in a mask
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the cpumask pointer
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu(cpu, mask) \
+ for ((cpu) = -1; \
+ (cpu) = cpumask_next((cpu), (mask)), \
+ (cpu) < nr_cpu_ids;)
+
+/**
+ * for_each_cpu_not - iterate over every cpu in a complemented mask
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the cpumask pointer
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_not(cpu, mask) \
+ for ((cpu) = -1; \
+ (cpu) = cpumask_next_zero((cpu), (mask)), \
+ (cpu) < nr_cpu_ids;)
+
+/**
+ * for_each_cpu_and - iterate over every cpu in both masks
+ * @cpu: the (optionally unsigned) integer iterator
+ * @mask: the first cpumask pointer
+ * @and: the second cpumask pointer
+ *
+ * This saves a temporary CPU mask in many places. It is equivalent to:
+ * struct cpumask tmp;
+ * cpumask_and(&tmp, &mask, &and);
+ * for_each_cpu(cpu, &tmp)
+ * ...
+ *
+ * After the loop, cpu is >= nr_cpu_ids.
+ */
+#define for_each_cpu_and(cpu, mask, and) \
+ for ((cpu) = -1; \
+ (cpu) = cpumask_next_and((cpu), (mask), (and)), \
+ (cpu) < nr_cpu_ids;)
+#endif /* SMP */
+
+#define CPU_BITS_NONE \
+{ \
+ [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
+}
+
+#define CPU_BITS_CPU0 \
+{ \
+ [0] = 1UL \
+}
+
+/**
+ * cpumask_set_cpu - set a cpu in a cpumask
+ * @cpu: cpu number (< nr_cpu_ids)
+ * @dstp: the cpumask pointer
+ */
+static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
+{
+ set_bit(cpumask_check(cpu), cpumask_bits(dstp));
+}
+
+/**
+ * cpumask_clear_cpu - clear a cpu in a cpumask
+ * @cpu: cpu number (< nr_cpu_ids)
+ * @dstp: the cpumask pointer
+ */
+static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
+{
+ clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
+}
+
+/**
+ * cpumask_test_cpu - test for a cpu in a cpumask
+ * @cpu: cpu number (< nr_cpu_ids)
+ * @cpumask: the cpumask pointer
+ *
+ * Returns 1 if @cpu is set in @cpumask, else returns 0
+ */
+static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
+{
+ return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
+}
+
+/**
+ * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
+ * @cpu: cpu number (< nr_cpu_ids)
+ * @cpumask: the cpumask pointer
+ *
+ * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
+ *
+ * test_and_set_bit wrapper for cpumasks.
+ */
+static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
+{
+ return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
+}
+
+/**
+ * cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask
+ * @cpu: cpu number (< nr_cpu_ids)
+ * @cpumask: the cpumask pointer
+ *
+ * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0
+ *
+ * test_and_clear_bit wrapper for cpumasks.
+ */
+static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
+{
+ return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask));
+}
+
+/**
+ * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
+ * @dstp: the cpumask pointer
+ */
+static inline void cpumask_setall(struct cpumask *dstp)
+{
+ bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
+ * @dstp: the cpumask pointer
+ */
+static inline void cpumask_clear(struct cpumask *dstp)
+{
+ bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_and - *dstp = *src1p & *src2p
+ * @dstp: the cpumask result
+ * @src1p: the first input
+ * @src2p: the second input
+ *
+ * If *@dstp is empty, returns 0, else returns 1
+ */
+static inline int cpumask_and(struct cpumask *dstp,
+ const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
+ cpumask_bits(src2p), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_or - *dstp = *src1p | *src2p
+ * @dstp: the cpumask result
+ * @src1p: the first input
+ * @src2p: the second input
+ */
+static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
+ cpumask_bits(src2p), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_xor - *dstp = *src1p ^ *src2p
+ * @dstp: the cpumask result
+ * @src1p: the first input
+ * @src2p: the second input
+ */
+static inline void cpumask_xor(struct cpumask *dstp,
+ const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
+ cpumask_bits(src2p), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_andnot - *dstp = *src1p & ~*src2p
+ * @dstp: the cpumask result
+ * @src1p: the first input
+ * @src2p: the second input
+ *
+ * If *@dstp is empty, returns 0, else returns 1
+ */
+static inline int cpumask_andnot(struct cpumask *dstp,
+ const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
+ cpumask_bits(src2p), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_complement - *dstp = ~*srcp
+ * @dstp: the cpumask result
+ * @srcp: the input to invert
+ */
+static inline void cpumask_complement(struct cpumask *dstp,
+ const struct cpumask *srcp)
+{
+ bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
+ nr_cpumask_bits);
+}
+
+/**
+ * cpumask_equal - *src1p == *src2p
+ * @src1p: the first input
+ * @src2p: the second input
+ */
+static inline bool cpumask_equal(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
+ nr_cpumask_bits);
+}
+
+/**
+ * cpumask_intersects - (*src1p & *src2p) != 0
+ * @src1p: the first input
+ * @src2p: the second input
+ */
+static inline bool cpumask_intersects(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
+ nr_cpumask_bits);
+}
+
+/**
+ * cpumask_subset - (*src1p & ~*src2p) == 0
+ * @src1p: the first input
+ * @src2p: the second input
+ *
+ * Returns 1 if *@src1p is a subset of *@src2p, else returns 0
+ */
+static inline int cpumask_subset(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
+ return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
+ nr_cpumask_bits);
+}
+
+/**
+ * cpumask_empty - *srcp == 0
+ * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
+ */
+static inline bool cpumask_empty(const struct cpumask *srcp)
+{
+ return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_full - *srcp == 0xFFFFFFFF...
+ * @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
+ */
+static inline bool cpumask_full(const struct cpumask *srcp)
+{
+ return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_weight - Count of bits in *srcp
+ * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
+ */
+static inline unsigned int cpumask_weight(const struct cpumask *srcp)
+{
+ return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_shift_right - *dstp = *srcp >> n
+ * @dstp: the cpumask result
+ * @srcp: the input to shift
+ * @n: the number of bits to shift by
+ */
+static inline void cpumask_shift_right(struct cpumask *dstp,
+ const struct cpumask *srcp, int n)
+{
+ bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
+ nr_cpumask_bits);
+}
+
+/**
+ * cpumask_shift_left - *dstp = *srcp << n
+ * @dstp: the cpumask result
+ * @srcp: the input to shift
+ * @n: the number of bits to shift by
+ */
+static inline void cpumask_shift_left(struct cpumask *dstp,
+ const struct cpumask *srcp, int n)
+{
+ bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
+ nr_cpumask_bits);
+}
+
+/**
+ * cpumask_copy - *dstp = *srcp
+ * @dstp: the result
+ * @srcp: the input cpumask
+ */
+static inline void cpumask_copy(struct cpumask *dstp,
+ const struct cpumask *srcp)
+{
+ bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_any - pick a "random" cpu from *srcp
+ * @srcp: the input cpumask
+ *
+ * Returns >= nr_cpu_ids if no cpus set.
+ */
+#define cpumask_any(srcp) cpumask_first(srcp)
+
+/**
+ * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
+ * @src1p: the first input
+ * @src2p: the second input
+ *
+ * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
+ */
+#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
+
+/**
+ * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
+ * @mask1: the first input cpumask
+ * @mask2: the second input cpumask
+ *
+ * Returns >= nr_cpu_ids if no cpus set.
+ */
+#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
+
+/**
+ * cpumask_of - the cpumask containing just a given cpu
+ * @cpu: the cpu (<= nr_cpu_ids)
+ */
+#define cpumask_of(cpu) (get_cpu_mask(cpu))
+
+/**
+ * cpumask_parse_user - extract a cpumask from a user string
+ * @buf: the buffer to extract from
+ * @len: the length of the buffer
+ * @dstp: the cpumask to set.
+ *
+ * Returns -errno, or 0 for success.
+ */
+static inline int cpumask_parse_user(const char __user *buf, int len,
+ struct cpumask *dstp)
+{
+ return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids);
+}
+
+/**
+ * cpumask_parselist_user - extract a cpumask from a user string
+ * @buf: the buffer to extract from
+ * @len: the length of the buffer
+ * @dstp: the cpumask to set.
+ *
+ * Returns -errno, or 0 for success.
+ */
+static inline int cpumask_parselist_user(const char __user *buf, int len,
+ struct cpumask *dstp)
+{
+ return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
+ nr_cpu_ids);
+}
+
+/**
+ * cpumask_parse - extract a cpumask from from a string
+ * @buf: the buffer to extract from
+ * @dstp: the cpumask to set.
+ *
+ * Returns -errno, or 0 for success.
+ */
+static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
+{
+ char *nl = strchr(buf, '\n');
+ unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
+
+ return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids);
+}
+
+/**
+ * cpulist_parse - extract a cpumask from a user string of ranges
+ * @buf: the buffer to extract from
+ * @dstp: the cpumask to set.
+ *
+ * Returns -errno, or 0 for success.
+ */
+static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
+{
+ return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids);
+}
+
+/**
+ * cpumask_size - size to allocate for a 'struct cpumask' in bytes
+ *
+ * This will eventually be a runtime variable, depending on nr_cpu_ids.
+ */
+static inline size_t cpumask_size(void)
+{
+ return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
+}
+
+/*
+ * cpumask_var_t: struct cpumask for stack usage.
+ *
+ * Oh, the wicked games we play! In order to make kernel coding a
+ * little more difficult, we typedef cpumask_var_t to an array or a
+ * pointer: doing &mask on an array is a noop, so it still works.
+ *
+ * ie.
+ * cpumask_var_t tmpmask;
+ * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ * return -ENOMEM;
+ *
+ * ... use 'tmpmask' like a normal struct cpumask * ...
+ *
+ * free_cpumask_var(tmpmask);
+ *
+ *
+ * However, one notable exception is there. alloc_cpumask_var() allocates
+ * only nr_cpumask_bits bits (in the other hand, real cpumask_t always has
+ * NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t.
+ *
+ * cpumask_var_t tmpmask;
+ * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ * return -ENOMEM;
+ *
+ * var = *tmpmask;
+ *
+ * This code makes NR_CPUS length memcopy and brings to a memory corruption.
+ * cpumask_copy() provide safe copy functionality.
+ *
+ * Note that there is another evil here: If you define a cpumask_var_t
+ * as a percpu variable then the way to obtain the address of the cpumask
+ * structure differently influences what this_cpu_* operation needs to be
+ * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
+ * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
+ * other type of cpumask_var_t implementation is configured.
+ */
+#ifdef CONFIG_CPUMASK_OFFSTACK
+typedef struct cpumask *cpumask_var_t;
+
+#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
+
+bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
+bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
+bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
+bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
+void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
+void free_cpumask_var(cpumask_var_t mask);
+void free_bootmem_cpumask_var(cpumask_var_t mask);
+
+#else
+typedef struct cpumask cpumask_var_t[1];
+
+#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
+
+static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ return true;
+}
+
+static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+ int node)
+{
+ return true;
+}
+
+static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ cpumask_clear(*mask);
+ return true;
+}
+
+static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+ int node)
+{
+ cpumask_clear(*mask);
+ return true;
+}
+
+static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
+{
+}
+
+static inline void free_cpumask_var(cpumask_var_t mask)
+{
+}
+
+static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
+{
+}
+#endif /* CONFIG_CPUMASK_OFFSTACK */
+
+/* It's common to want to use cpu_all_mask in struct member initializers,
+ * so it has to refer to an address rather than a pointer. */
+extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
+#define cpu_all_mask to_cpumask(cpu_all_bits)
+
+/* First bits of cpu_bit_bitmap are in fact unset. */
+#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
+
+#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
+#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
+#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
+
+/* Wrappers for arch boot code to manipulate normally-constant masks */
+void set_cpu_possible(unsigned int cpu, bool possible);
+void set_cpu_present(unsigned int cpu, bool present);
+void set_cpu_online(unsigned int cpu, bool online);
+void set_cpu_active(unsigned int cpu, bool active);
+void init_cpu_present(const struct cpumask *src);
+void init_cpu_possible(const struct cpumask *src);
+void init_cpu_online(const struct cpumask *src);
+
+/**
+ * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
+ * @bitmap: the bitmap
+ *
+ * There are a few places where cpumask_var_t isn't appropriate and
+ * static cpumasks must be used (eg. very early boot), yet we don't
+ * expose the definition of 'struct cpumask'.
+ *
+ * This does the conversion, and can be used as a constant initializer.
+ */
+#define to_cpumask(bitmap) \
+ ((struct cpumask *)(1 ? (bitmap) \
+ : (void *)sizeof(__check_is_bitmap(bitmap))))
+
+static inline int __check_is_bitmap(const unsigned long *bitmap)
+{
+ return 1;
+}
+
+/*
+ * Special-case data structure for "single bit set only" constant CPU masks.
+ *
+ * We pre-generate all the 64 (or 32) possible bit positions, with enough
+ * padding to the left and the right, and return the constant pointer
+ * appropriately offset.
+ */
+extern const unsigned long
+ cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
+
+static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
+{
+ const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
+ p -= cpu / BITS_PER_LONG;
+ return to_cpumask(p);
+}
+
+#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
+
+#if NR_CPUS <= BITS_PER_LONG
+#define CPU_BITS_ALL \
+{ \
+ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
+}
+
+#else /* NR_CPUS > BITS_PER_LONG */
+
+#define CPU_BITS_ALL \
+{ \
+ [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
+ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
+}
+#endif /* NR_CPUS > BITS_PER_LONG */
+
+/**
+ * cpumap_print_to_pagebuf - copies the cpumask into the buffer either
+ * as comma-separated list of cpus or hex values of cpumask
+ * @list: indicates whether the cpumap must be list
+ * @mask: the cpumask to copy
+ * @buf: the buffer to copy into
+ *
+ * Returns the length of the (null-terminated) @buf string, zero if
+ * nothing is copied.
+ */
+static inline ssize_t
+cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
+{
+ return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
+ nr_cpu_ids);
+}
+
+#if NR_CPUS <= BITS_PER_LONG
+#define CPU_MASK_ALL \
+(cpumask_t) { { \
+ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
+} }
+#else
+#define CPU_MASK_ALL \
+(cpumask_t) { { \
+ [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
+ [BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
+} }
+#endif /* NR_CPUS > BITS_PER_LONG */
+
+#define CPU_MASK_NONE \
+(cpumask_t) { { \
+ [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
+} }
+
+#define CPU_MASK_CPU0 \
+(cpumask_t) { { \
+ [0] = 1UL \
+} }
+
+#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
new file mode 100644
index 000000000..1b357997c
--- /dev/null
+++ b/include/linux/cpuset.h
@@ -0,0 +1,242 @@
+#ifndef _LINUX_CPUSET_H
+#define _LINUX_CPUSET_H
+/*
+ * cpuset interface
+ *
+ * Copyright (C) 2003 BULL SA
+ * Copyright (C) 2004-2006 Silicon Graphics, Inc.
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/nodemask.h>
+#include <linux/mm.h>
+#include <linux/jump_label.h>
+
+#ifdef CONFIG_CPUSETS
+
+extern struct static_key cpusets_enabled_key;
+static inline bool cpusets_enabled(void)
+{
+ return static_key_false(&cpusets_enabled_key);
+}
+
+static inline int nr_cpusets(void)
+{
+ /* jump label reference count + the top-level cpuset */
+ return static_key_count(&cpusets_enabled_key) + 1;
+}
+
+static inline void cpuset_inc(void)
+{
+ static_key_slow_inc(&cpusets_enabled_key);
+}
+
+static inline void cpuset_dec(void)
+{
+ static_key_slow_dec(&cpusets_enabled_key);
+}
+
+extern int cpuset_init(void);
+extern void cpuset_init_smp(void);
+extern void cpuset_update_active_cpus(bool cpu_online);
+extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
+extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
+extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
+#define cpuset_current_mems_allowed (current->mems_allowed)
+void cpuset_init_current_mems_allowed(void);
+int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
+
+extern int __cpuset_node_allowed(int node, gfp_t gfp_mask);
+
+static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
+{
+ return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask);
+}
+
+static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
+{
+ return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
+}
+
+extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
+ const struct task_struct *tsk2);
+
+#define cpuset_memory_pressure_bump() \
+ do { \
+ if (cpuset_memory_pressure_enabled) \
+ __cpuset_memory_pressure_bump(); \
+ } while (0)
+extern int cpuset_memory_pressure_enabled;
+extern void __cpuset_memory_pressure_bump(void);
+
+extern void cpuset_task_status_allowed(struct seq_file *m,
+ struct task_struct *task);
+extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *tsk);
+
+extern int cpuset_mem_spread_node(void);
+extern int cpuset_slab_spread_node(void);
+
+static inline int cpuset_do_page_mem_spread(void)
+{
+ return task_spread_page(current);
+}
+
+static inline int cpuset_do_slab_mem_spread(void)
+{
+ return task_spread_slab(current);
+}
+
+extern int current_cpuset_is_being_rebound(void);
+
+extern void rebuild_sched_domains(void);
+
+extern void cpuset_print_task_mems_allowed(struct task_struct *p);
+
+/*
+ * read_mems_allowed_begin is required when making decisions involving
+ * mems_allowed such as during page allocation. mems_allowed can be updated in
+ * parallel and depending on the new value an operation can fail potentially
+ * causing process failure. A retry loop with read_mems_allowed_begin and
+ * read_mems_allowed_retry prevents these artificial failures.
+ */
+static inline unsigned int read_mems_allowed_begin(void)
+{
+ return read_seqcount_begin(&current->mems_allowed_seq);
+}
+
+/*
+ * If this returns true, the operation that took place after
+ * read_mems_allowed_begin may have failed artificially due to a concurrent
+ * update of mems_allowed. It is up to the caller to retry the operation if
+ * appropriate.
+ */
+static inline bool read_mems_allowed_retry(unsigned int seq)
+{
+ return read_seqcount_retry(&current->mems_allowed_seq, seq);
+}
+
+static inline void set_mems_allowed(nodemask_t nodemask)
+{
+ unsigned long flags;
+
+ task_lock(current);
+ local_irq_save(flags);
+ write_seqcount_begin(&current->mems_allowed_seq);
+ current->mems_allowed = nodemask;
+ write_seqcount_end(&current->mems_allowed_seq);
+ local_irq_restore(flags);
+ task_unlock(current);
+}
+
+#else /* !CONFIG_CPUSETS */
+
+static inline bool cpusets_enabled(void) { return false; }
+
+static inline int cpuset_init(void) { return 0; }
+static inline void cpuset_init_smp(void) {}
+
+static inline void cpuset_update_active_cpus(bool cpu_online)
+{
+ partition_sched_domains(1, NULL, NULL);
+}
+
+static inline void cpuset_cpus_allowed(struct task_struct *p,
+ struct cpumask *mask)
+{
+ cpumask_copy(mask, cpu_possible_mask);
+}
+
+static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
+{
+}
+
+static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
+{
+ return node_possible_map;
+}
+
+#define cpuset_current_mems_allowed (node_states[N_MEMORY])
+static inline void cpuset_init_current_mems_allowed(void) {}
+
+static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
+{
+ return 1;
+}
+
+static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
+{
+ return 1;
+}
+
+static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
+{
+ return 1;
+}
+
+static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
+ const struct task_struct *tsk2)
+{
+ return 1;
+}
+
+static inline void cpuset_memory_pressure_bump(void) {}
+
+static inline void cpuset_task_status_allowed(struct seq_file *m,
+ struct task_struct *task)
+{
+}
+
+static inline int cpuset_mem_spread_node(void)
+{
+ return 0;
+}
+
+static inline int cpuset_slab_spread_node(void)
+{
+ return 0;
+}
+
+static inline int cpuset_do_page_mem_spread(void)
+{
+ return 0;
+}
+
+static inline int cpuset_do_slab_mem_spread(void)
+{
+ return 0;
+}
+
+static inline int current_cpuset_is_being_rebound(void)
+{
+ return 0;
+}
+
+static inline void rebuild_sched_domains(void)
+{
+ partition_sched_domains(1, NULL, NULL);
+}
+
+static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
+{
+}
+
+static inline void set_mems_allowed(nodemask_t nodemask)
+{
+}
+
+static inline unsigned int read_mems_allowed_begin(void)
+{
+ return 0;
+}
+
+static inline bool read_mems_allowed_retry(unsigned int seq)
+{
+ return false;
+}
+
+#endif /* !CONFIG_CPUSETS */
+
+#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/cputime.h b/include/linux/cputime.h
new file mode 100644
index 000000000..f2eb2ee53
--- /dev/null
+++ b/include/linux/cputime.h
@@ -0,0 +1,16 @@
+#ifndef __LINUX_CPUTIME_H
+#define __LINUX_CPUTIME_H
+
+#include <asm/cputime.h>
+
+#ifndef cputime_to_nsecs
+# define cputime_to_nsecs(__ct) \
+ (cputime_to_usecs(__ct) * NSEC_PER_USEC)
+#endif
+
+#ifndef nsecs_to_cputime
+# define nsecs_to_cputime(__nsecs) \
+ usecs_to_cputime((__nsecs) / NSEC_PER_USEC)
+#endif
+
+#endif /* __LINUX_CPUTIME_H */
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
new file mode 100644
index 000000000..3849fce7e
--- /dev/null
+++ b/include/linux/crash_dump.h
@@ -0,0 +1,91 @@
+#ifndef LINUX_CRASH_DUMP_H
+#define LINUX_CRASH_DUMP_H
+
+#ifdef CONFIG_CRASH_DUMP
+#include <linux/kexec.h>
+#include <linux/proc_fs.h>
+#include <linux/elf.h>
+
+#include <asm/pgtable.h> /* for pgprot_t */
+
+#define ELFCORE_ADDR_MAX (-1ULL)
+#define ELFCORE_ADDR_ERR (-2ULL)
+
+extern unsigned long long elfcorehdr_addr;
+extern unsigned long long elfcorehdr_size;
+
+extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
+extern void elfcorehdr_free(unsigned long long addr);
+extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
+extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
+extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
+ unsigned long from, unsigned long pfn,
+ unsigned long size, pgprot_t prot);
+
+extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
+ unsigned long, int);
+void vmcore_cleanup(void);
+
+/* Architecture code defines this if there are other possible ELF
+ * machine types, e.g. on bi-arch capable hardware. */
+#ifndef vmcore_elf_check_arch_cross
+#define vmcore_elf_check_arch_cross(x) 0
+#endif
+
+/*
+ * Architecture code can redefine this if there are any special checks
+ * needed for 64-bit ELF vmcores. In case of 32-bit only architecture,
+ * this can be set to zero.
+ */
+#ifndef vmcore_elf64_check_arch
+#define vmcore_elf64_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x))
+#endif
+
+/*
+ * is_kdump_kernel() checks whether this kernel is booting after a panic of
+ * previous kernel or not. This is determined by checking if previous kernel
+ * has passed the elf core header address on command line.
+ *
+ * This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will
+ * return 1 if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic of
+ * previous kernel.
+ */
+
+static inline int is_kdump_kernel(void)
+{
+ return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0;
+}
+
+/* is_vmcore_usable() checks if the kernel is booting after a panic and
+ * the vmcore region is usable.
+ *
+ * This makes use of the fact that due to alignment -2ULL is not
+ * a valid pointer, much in the vain of IS_ERR(), except
+ * dealing directly with an unsigned long long rather than a pointer.
+ */
+
+static inline int is_vmcore_usable(void)
+{
+ return is_kdump_kernel() && elfcorehdr_addr != ELFCORE_ADDR_ERR ? 1 : 0;
+}
+
+/* vmcore_unusable() marks the vmcore as unusable,
+ * without disturbing the logic of is_kdump_kernel()
+ */
+
+static inline void vmcore_unusable(void)
+{
+ if (is_kdump_kernel())
+ elfcorehdr_addr = ELFCORE_ADDR_ERR;
+}
+
+#define HAVE_OLDMEM_PFN_IS_RAM 1
+extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn));
+extern void unregister_oldmem_pfn_is_ram(void);
+
+#else /* !CONFIG_CRASH_DUMP */
+static inline int is_kdump_kernel(void) { return 0; }
+#endif /* CONFIG_CRASH_DUMP */
+
+extern unsigned long saved_max_pfn;
+#endif /* LINUX_CRASHDUMP_H */
diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h
new file mode 100644
index 000000000..f52696a1f
--- /dev/null
+++ b/include/linux/crc-ccitt.h
@@ -0,0 +1,15 @@
+#ifndef _LINUX_CRC_CCITT_H
+#define _LINUX_CRC_CCITT_H
+
+#include <linux/types.h>
+
+extern u16 const crc_ccitt_table[256];
+
+extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len);
+
+static inline u16 crc_ccitt_byte(u16 crc, const u8 c)
+{
+ return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
+}
+
+#endif /* _LINUX_CRC_CCITT_H */
diff --git a/include/linux/crc-itu-t.h b/include/linux/crc-itu-t.h
new file mode 100644
index 000000000..84920f3cc
--- /dev/null
+++ b/include/linux/crc-itu-t.h
@@ -0,0 +1,28 @@
+/*
+ * crc-itu-t.h - CRC ITU-T V.41 routine
+ *
+ * Implements the standard CRC ITU-T V.41:
+ * Width 16
+ * Poly 0x0x1021 (x^16 + x^12 + x^15 + 1)
+ * Init 0
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#ifndef CRC_ITU_T_H
+#define CRC_ITU_T_H
+
+#include <linux/types.h>
+
+extern u16 const crc_itu_t_table[256];
+
+extern u16 crc_itu_t(u16 crc, const u8 *buffer, size_t len);
+
+static inline u16 crc_itu_t_byte(u16 crc, const u8 data)
+{
+ return (crc << 8) ^ crc_itu_t_table[((crc >> 8) ^ data) & 0xff];
+}
+
+#endif /* CRC_ITU_T_H */
+
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
new file mode 100644
index 000000000..cf53d0773
--- /dev/null
+++ b/include/linux/crc-t10dif.h
@@ -0,0 +1,13 @@
+#ifndef _LINUX_CRC_T10DIF_H
+#define _LINUX_CRC_T10DIF_H
+
+#include <linux/types.h>
+
+#define CRC_T10DIF_DIGEST_SIZE 2
+#define CRC_T10DIF_BLOCK_SIZE 1
+
+extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
+ size_t len);
+extern __u16 crc_t10dif(unsigned char const *, size_t);
+
+#endif
diff --git a/include/linux/crc16.h b/include/linux/crc16.h
new file mode 100644
index 000000000..9443c084f
--- /dev/null
+++ b/include/linux/crc16.h
@@ -0,0 +1,30 @@
+/*
+ * crc16.h - CRC-16 routine
+ *
+ * Implements the standard CRC-16:
+ * Width 16
+ * Poly 0x8005 (x^16 + x^15 + x^2 + 1)
+ * Init 0
+ *
+ * Copyright (c) 2005 Ben Gardner <bgardner@wabtec.com>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#ifndef __CRC16_H
+#define __CRC16_H
+
+#include <linux/types.h>
+
+extern u16 const crc16_table[256];
+
+extern u16 crc16(u16 crc, const u8 *buffer, size_t len);
+
+static inline u16 crc16_byte(u16 crc, const u8 data)
+{
+ return (crc >> 8) ^ crc16_table[(crc ^ data) & 0xff];
+}
+
+#endif /* __CRC16_H */
+
diff --git a/include/linux/crc32.h b/include/linux/crc32.h
new file mode 100644
index 000000000..9e8a032c1
--- /dev/null
+++ b/include/linux/crc32.h
@@ -0,0 +1,79 @@
+/*
+ * crc32.h
+ * See linux/lib/crc32.c for license and changes
+ */
+#ifndef _LINUX_CRC32_H
+#define _LINUX_CRC32_H
+
+#include <linux/types.h>
+#include <linux/bitrev.h>
+
+u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len);
+u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len);
+
+/**
+ * crc32_le_combine - Combine two crc32 check values into one. For two
+ * sequences of bytes, seq1 and seq2 with lengths len1
+ * and len2, crc32_le() check values were calculated
+ * for each, crc1 and crc2.
+ *
+ * @crc1: crc32 of the first block
+ * @crc2: crc32 of the second block
+ * @len2: length of the second block
+ *
+ * Return: The crc32_le() check value of seq1 and seq2 concatenated,
+ * requiring only crc1, crc2, and len2. Note: If seq_full denotes
+ * the concatenated memory area of seq1 with seq2, and crc_full
+ * the crc32_le() value of seq_full, then crc_full ==
+ * crc32_le_combine(crc1, crc2, len2) when crc_full was seeded
+ * with the same initializer as crc1, and crc2 seed was 0. See
+ * also crc32_combine_test().
+ */
+u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len);
+
+static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
+{
+ return crc32_le_shift(crc1, len2) ^ crc2;
+}
+
+u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len);
+
+/**
+ * __crc32c_le_combine - Combine two crc32c check values into one. For two
+ * sequences of bytes, seq1 and seq2 with lengths len1
+ * and len2, __crc32c_le() check values were calculated
+ * for each, crc1 and crc2.
+ *
+ * @crc1: crc32c of the first block
+ * @crc2: crc32c of the second block
+ * @len2: length of the second block
+ *
+ * Return: The __crc32c_le() check value of seq1 and seq2 concatenated,
+ * requiring only crc1, crc2, and len2. Note: If seq_full denotes
+ * the concatenated memory area of seq1 with seq2, and crc_full
+ * the __crc32c_le() value of seq_full, then crc_full ==
+ * __crc32c_le_combine(crc1, crc2, len2) when crc_full was
+ * seeded with the same initializer as crc1, and crc2 seed
+ * was 0. See also crc32c_combine_test().
+ */
+u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len);
+
+static inline u32 __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2)
+{
+ return __crc32c_le_shift(crc1, len2) ^ crc2;
+}
+
+#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length)
+
+/*
+ * Helpers for hash table generation of ethernet nics:
+ *
+ * Ethernet sends the least significant bit of a byte first, thus crc32_le
+ * is used. The output of crc32_le is bit reversed [most significant bit
+ * is in bit nr 0], thus it must be reversed before use. Except for
+ * nics that bit swap the result internally...
+ */
+#define ether_crc(length, data) bitrev32(crc32_le(~0, data, length))
+#define ether_crc_le(length, data) crc32_le(~0, data, length)
+
+#endif /* _LINUX_CRC32_H */
diff --git a/include/linux/crc32c.h b/include/linux/crc32c.h
new file mode 100644
index 000000000..bd8b44d96
--- /dev/null
+++ b/include/linux/crc32c.h
@@ -0,0 +1,11 @@
+#ifndef _LINUX_CRC32C_H
+#define _LINUX_CRC32C_H
+
+#include <linux/types.h>
+
+extern u32 crc32c(u32 crc, const void *address, unsigned int length);
+
+/* This macro exists for backwards-compatibility. */
+#define crc32c_le crc32c
+
+#endif /* _LINUX_CRC32C_H */
diff --git a/include/linux/crc7.h b/include/linux/crc7.h
new file mode 100644
index 000000000..d59076510
--- /dev/null
+++ b/include/linux/crc7.h
@@ -0,0 +1,14 @@
+#ifndef _LINUX_CRC7_H
+#define _LINUX_CRC7_H
+#include <linux/types.h>
+
+extern const u8 crc7_be_syndrome_table[256];
+
+static inline u8 crc7_be_byte(u8 crc, u8 data)
+{
+ return crc7_be_syndrome_table[crc ^ data];
+}
+
+extern u8 crc7_be(u8 crc, const u8 *buffer, size_t len);
+
+#endif
diff --git a/include/linux/crc8.h b/include/linux/crc8.h
new file mode 100644
index 000000000..13c8dabb0
--- /dev/null
+++ b/include/linux/crc8.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __CRC8_H_
+#define __CRC8_H_
+
+#include <linux/types.h>
+
+/* see usage of this value in crc8() description */
+#define CRC8_INIT_VALUE 0xFF
+
+/*
+ * Return value of crc8() indicating valid message+crc. This is true
+ * if a CRC is inverted before transmission. The CRC computed over the
+ * whole received bitstream is _table[x], where x is the bit pattern
+ * of the modification (almost always 0xff).
+ */
+#define CRC8_GOOD_VALUE(_table) (_table[0xFF])
+
+/* required table size for crc8 algorithm */
+#define CRC8_TABLE_SIZE 256
+
+/* helper macro assuring right table size is used */
+#define DECLARE_CRC8_TABLE(_table) \
+ static u8 _table[CRC8_TABLE_SIZE]
+
+/**
+ * crc8_populate_lsb - fill crc table for given polynomial in regular bit order.
+ *
+ * @table: table to be filled.
+ * @polynomial: polynomial for which table is to be filled.
+ *
+ * This function fills the provided table according the polynomial provided for
+ * regular bit order (lsb first). Polynomials in CRC algorithms are typically
+ * represented as shown below.
+ *
+ * poly = x^8 + x^7 + x^6 + x^4 + x^2 + 1
+ *
+ * For lsb first direction x^7 maps to the lsb. So the polynomial is as below.
+ *
+ * - lsb first: poly = 10101011(1) = 0xAB
+ */
+void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial);
+
+/**
+ * crc8_populate_msb - fill crc table for given polynomial in reverse bit order.
+ *
+ * @table: table to be filled.
+ * @polynomial: polynomial for which table is to be filled.
+ *
+ * This function fills the provided table according the polynomial provided for
+ * reverse bit order (msb first). Polynomials in CRC algorithms are typically
+ * represented as shown below.
+ *
+ * poly = x^8 + x^7 + x^6 + x^4 + x^2 + 1
+ *
+ * For msb first direction x^7 maps to the msb. So the polynomial is as below.
+ *
+ * - msb first: poly = (1)11010101 = 0xD5
+ */
+void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial);
+
+/**
+ * crc8() - calculate a crc8 over the given input data.
+ *
+ * @table: crc table used for calculation.
+ * @pdata: pointer to data buffer.
+ * @nbytes: number of bytes in data buffer.
+ * @crc: previous returned crc8 value.
+ *
+ * The CRC8 is calculated using the polynomial given in crc8_populate_msb()
+ * or crc8_populate_lsb().
+ *
+ * The caller provides the initial value (either %CRC8_INIT_VALUE
+ * or the previous returned value) to allow for processing of
+ * discontiguous blocks of data. When generating the CRC the
+ * caller is responsible for complementing the final return value
+ * and inserting it into the byte stream. When validating a byte
+ * stream (including CRC8), a final return value of %CRC8_GOOD_VALUE
+ * indicates the byte stream data can be considered valid.
+ *
+ * Reference:
+ * "A Painless Guide to CRC Error Detection Algorithms", ver 3, Aug 1993
+ * Williams, Ross N., ross<at>ross.net
+ * (see URL http://www.ross.net/crc/download/crc_v3.txt).
+ */
+u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc);
+
+#endif /* __CRC8_H_ */
diff --git a/include/linux/cred.h b/include/linux/cred.h
new file mode 100644
index 000000000..8b6c083e6
--- /dev/null
+++ b/include/linux/cred.h
@@ -0,0 +1,400 @@
+/* Credentials management - see Documentation/security/credentials.txt
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_CRED_H
+#define _LINUX_CRED_H
+
+#include <linux/capability.h>
+#include <linux/init.h>
+#include <linux/key.h>
+#include <linux/selinux.h>
+#include <linux/atomic.h>
+#include <linux/uidgid.h>
+
+struct user_struct;
+struct cred;
+struct inode;
+
+/*
+ * COW Supplementary groups list
+ */
+#define NGROUPS_SMALL 32
+#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(kgid_t)))
+
+struct group_info {
+ atomic_t usage;
+ int ngroups;
+ int nblocks;
+ kgid_t small_block[NGROUPS_SMALL];
+ kgid_t *blocks[0];
+};
+
+/**
+ * get_group_info - Get a reference to a group info structure
+ * @group_info: The group info to reference
+ *
+ * This gets a reference to a set of supplementary groups.
+ *
+ * If the caller is accessing a task's credentials, they must hold the RCU read
+ * lock when reading.
+ */
+static inline struct group_info *get_group_info(struct group_info *gi)
+{
+ atomic_inc(&gi->usage);
+ return gi;
+}
+
+/**
+ * put_group_info - Release a reference to a group info structure
+ * @group_info: The group info to release
+ */
+#define put_group_info(group_info) \
+do { \
+ if (atomic_dec_and_test(&(group_info)->usage)) \
+ groups_free(group_info); \
+} while (0)
+
+extern struct group_info init_groups;
+#ifdef CONFIG_MULTIUSER
+extern struct group_info *groups_alloc(int);
+extern void groups_free(struct group_info *);
+
+extern int in_group_p(kgid_t);
+extern int in_egroup_p(kgid_t);
+#else
+static inline void groups_free(struct group_info *group_info)
+{
+}
+
+static inline int in_group_p(kgid_t grp)
+{
+ return 1;
+}
+static inline int in_egroup_p(kgid_t grp)
+{
+ return 1;
+}
+#endif
+extern int set_current_groups(struct group_info *);
+extern void set_groups(struct cred *, struct group_info *);
+extern int groups_search(const struct group_info *, kgid_t);
+extern bool may_setgroups(void);
+
+/* access the groups "array" with this macro */
+#define GROUP_AT(gi, i) \
+ ((gi)->blocks[(i) / NGROUPS_PER_BLOCK][(i) % NGROUPS_PER_BLOCK])
+
+/*
+ * The security context of a task
+ *
+ * The parts of the context break down into two categories:
+ *
+ * (1) The objective context of a task. These parts are used when some other
+ * task is attempting to affect this one.
+ *
+ * (2) The subjective context. These details are used when the task is acting
+ * upon another object, be that a file, a task, a key or whatever.
+ *
+ * Note that some members of this structure belong to both categories - the
+ * LSM security pointer for instance.
+ *
+ * A task has two security pointers. task->real_cred points to the objective
+ * context that defines that task's actual details. The objective part of this
+ * context is used whenever that task is acted upon.
+ *
+ * task->cred points to the subjective context that defines the details of how
+ * that task is going to act upon another object. This may be overridden
+ * temporarily to point to another security context, but normally points to the
+ * same context as task->real_cred.
+ */
+struct cred {
+ atomic_t usage;
+#ifdef CONFIG_DEBUG_CREDENTIALS
+ atomic_t subscribers; /* number of processes subscribed */
+ void *put_addr;
+ unsigned magic;
+#define CRED_MAGIC 0x43736564
+#define CRED_MAGIC_DEAD 0x44656144
+#endif
+ kuid_t uid; /* real UID of the task */
+ kgid_t gid; /* real GID of the task */
+ kuid_t suid; /* saved UID of the task */
+ kgid_t sgid; /* saved GID of the task */
+ kuid_t euid; /* effective UID of the task */
+ kgid_t egid; /* effective GID of the task */
+ kuid_t fsuid; /* UID for VFS ops */
+ kgid_t fsgid; /* GID for VFS ops */
+ unsigned securebits; /* SUID-less security management */
+ kernel_cap_t cap_inheritable; /* caps our children can inherit */
+ kernel_cap_t cap_permitted; /* caps we're permitted */
+ kernel_cap_t cap_effective; /* caps we can actually use */
+ kernel_cap_t cap_bset; /* capability bounding set */
+#ifdef CONFIG_KEYS
+ unsigned char jit_keyring; /* default keyring to attach requested
+ * keys to */
+ struct key __rcu *session_keyring; /* keyring inherited over fork */
+ struct key *process_keyring; /* keyring private to this process */
+ struct key *thread_keyring; /* keyring private to this thread */
+ struct key *request_key_auth; /* assumed request_key authority */
+#endif
+#ifdef CONFIG_SECURITY
+ void *security; /* subjective LSM security */
+#endif
+ struct user_struct *user; /* real user ID subscription */
+ struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
+ struct group_info *group_info; /* supplementary groups for euid/fsgid */
+ struct rcu_head rcu; /* RCU deletion hook */
+};
+
+extern void __put_cred(struct cred *);
+extern void exit_creds(struct task_struct *);
+extern int copy_creds(struct task_struct *, unsigned long);
+extern const struct cred *get_task_cred(struct task_struct *);
+extern struct cred *cred_alloc_blank(void);
+extern struct cred *prepare_creds(void);
+extern struct cred *prepare_exec_creds(void);
+extern int commit_creds(struct cred *);
+extern void abort_creds(struct cred *);
+extern const struct cred *override_creds(const struct cred *);
+extern void revert_creds(const struct cred *);
+extern struct cred *prepare_kernel_cred(struct task_struct *);
+extern int change_create_files_as(struct cred *, struct inode *);
+extern int set_security_override(struct cred *, u32);
+extern int set_security_override_from_ctx(struct cred *, const char *);
+extern int set_create_files_as(struct cred *, struct inode *);
+extern void __init cred_init(void);
+
+/*
+ * check for validity of credentials
+ */
+#ifdef CONFIG_DEBUG_CREDENTIALS
+extern void __invalid_creds(const struct cred *, const char *, unsigned);
+extern void __validate_process_creds(struct task_struct *,
+ const char *, unsigned);
+
+extern bool creds_are_invalid(const struct cred *cred);
+
+static inline void __validate_creds(const struct cred *cred,
+ const char *file, unsigned line)
+{
+ if (unlikely(creds_are_invalid(cred)))
+ __invalid_creds(cred, file, line);
+}
+
+#define validate_creds(cred) \
+do { \
+ __validate_creds((cred), __FILE__, __LINE__); \
+} while(0)
+
+#define validate_process_creds() \
+do { \
+ __validate_process_creds(current, __FILE__, __LINE__); \
+} while(0)
+
+extern void validate_creds_for_do_exit(struct task_struct *);
+#else
+static inline void validate_creds(const struct cred *cred)
+{
+}
+static inline void validate_creds_for_do_exit(struct task_struct *tsk)
+{
+}
+static inline void validate_process_creds(void)
+{
+}
+#endif
+
+/**
+ * get_new_cred - Get a reference on a new set of credentials
+ * @cred: The new credentials to reference
+ *
+ * Get a reference on the specified set of new credentials. The caller must
+ * release the reference.
+ */
+static inline struct cred *get_new_cred(struct cred *cred)
+{
+ atomic_inc(&cred->usage);
+ return cred;
+}
+
+/**
+ * get_cred - Get a reference on a set of credentials
+ * @cred: The credentials to reference
+ *
+ * Get a reference on the specified set of credentials. The caller must
+ * release the reference.
+ *
+ * This is used to deal with a committed set of credentials. Although the
+ * pointer is const, this will temporarily discard the const and increment the
+ * usage count. The purpose of this is to attempt to catch at compile time the
+ * accidental alteration of a set of credentials that should be considered
+ * immutable.
+ */
+static inline const struct cred *get_cred(const struct cred *cred)
+{
+ struct cred *nonconst_cred = (struct cred *) cred;
+ validate_creds(cred);
+ return get_new_cred(nonconst_cred);
+}
+
+/**
+ * put_cred - Release a reference to a set of credentials
+ * @cred: The credentials to release
+ *
+ * Release a reference to a set of credentials, deleting them when the last ref
+ * is released.
+ *
+ * This takes a const pointer to a set of credentials because the credentials
+ * on task_struct are attached by const pointers to prevent accidental
+ * alteration of otherwise immutable credential sets.
+ */
+static inline void put_cred(const struct cred *_cred)
+{
+ struct cred *cred = (struct cred *) _cred;
+
+ validate_creds(cred);
+ if (atomic_dec_and_test(&(cred)->usage))
+ __put_cred(cred);
+}
+
+/**
+ * current_cred - Access the current task's subjective credentials
+ *
+ * Access the subjective credentials of the current task. RCU-safe,
+ * since nobody else can modify it.
+ */
+#define current_cred() \
+ rcu_dereference_protected(current->cred, 1)
+
+/**
+ * current_real_cred - Access the current task's objective credentials
+ *
+ * Access the objective credentials of the current task. RCU-safe,
+ * since nobody else can modify it.
+ */
+#define current_real_cred() \
+ rcu_dereference_protected(current->real_cred, 1)
+
+/**
+ * __task_cred - Access a task's objective credentials
+ * @task: The task to query
+ *
+ * Access the objective credentials of a task. The caller must hold the RCU
+ * readlock.
+ *
+ * The result of this function should not be passed directly to get_cred();
+ * rather get_task_cred() should be used instead.
+ */
+#define __task_cred(task) \
+ rcu_dereference((task)->real_cred)
+
+/**
+ * get_current_cred - Get the current task's subjective credentials
+ *
+ * Get the subjective credentials of the current task, pinning them so that
+ * they can't go away. Accessing the current task's credentials directly is
+ * not permitted.
+ */
+#define get_current_cred() \
+ (get_cred(current_cred()))
+
+/**
+ * get_current_user - Get the current task's user_struct
+ *
+ * Get the user record of the current task, pinning it so that it can't go
+ * away.
+ */
+#define get_current_user() \
+({ \
+ struct user_struct *__u; \
+ const struct cred *__cred; \
+ __cred = current_cred(); \
+ __u = get_uid(__cred->user); \
+ __u; \
+})
+
+/**
+ * get_current_groups - Get the current task's supplementary group list
+ *
+ * Get the supplementary group list of the current task, pinning it so that it
+ * can't go away.
+ */
+#define get_current_groups() \
+({ \
+ struct group_info *__groups; \
+ const struct cred *__cred; \
+ __cred = current_cred(); \
+ __groups = get_group_info(__cred->group_info); \
+ __groups; \
+})
+
+#define task_cred_xxx(task, xxx) \
+({ \
+ __typeof__(((struct cred *)NULL)->xxx) ___val; \
+ rcu_read_lock(); \
+ ___val = __task_cred((task))->xxx; \
+ rcu_read_unlock(); \
+ ___val; \
+})
+
+#define task_uid(task) (task_cred_xxx((task), uid))
+#define task_euid(task) (task_cred_xxx((task), euid))
+
+#define current_cred_xxx(xxx) \
+({ \
+ current_cred()->xxx; \
+})
+
+#define current_uid() (current_cred_xxx(uid))
+#define current_gid() (current_cred_xxx(gid))
+#define current_euid() (current_cred_xxx(euid))
+#define current_egid() (current_cred_xxx(egid))
+#define current_suid() (current_cred_xxx(suid))
+#define current_sgid() (current_cred_xxx(sgid))
+#define current_fsuid() (current_cred_xxx(fsuid))
+#define current_fsgid() (current_cred_xxx(fsgid))
+#define current_cap() (current_cred_xxx(cap_effective))
+#define current_user() (current_cred_xxx(user))
+#define current_security() (current_cred_xxx(security))
+
+extern struct user_namespace init_user_ns;
+#ifdef CONFIG_USER_NS
+#define current_user_ns() (current_cred_xxx(user_ns))
+#else
+#define current_user_ns() (&init_user_ns)
+#endif
+
+
+#define current_uid_gid(_uid, _gid) \
+do { \
+ const struct cred *__cred; \
+ __cred = current_cred(); \
+ *(_uid) = __cred->uid; \
+ *(_gid) = __cred->gid; \
+} while(0)
+
+#define current_euid_egid(_euid, _egid) \
+do { \
+ const struct cred *__cred; \
+ __cred = current_cred(); \
+ *(_euid) = __cred->euid; \
+ *(_egid) = __cred->egid; \
+} while(0)
+
+#define current_fsuid_fsgid(_fsuid, _fsgid) \
+do { \
+ const struct cred *__cred; \
+ __cred = current_cred(); \
+ *(_fsuid) = __cred->fsuid; \
+ *(_fsgid) = __cred->fsgid; \
+} while(0)
+
+#endif /* _LINUX_CRED_H */
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h
new file mode 100644
index 000000000..48a1a7d10
--- /dev/null
+++ b/include/linux/crush/crush.h
@@ -0,0 +1,209 @@
+#ifndef CEPH_CRUSH_CRUSH_H
+#define CEPH_CRUSH_CRUSH_H
+
+#include <linux/types.h>
+
+/*
+ * CRUSH is a pseudo-random data distribution algorithm that
+ * efficiently distributes input values (typically, data objects)
+ * across a heterogeneous, structured storage cluster.
+ *
+ * The algorithm was originally described in detail in this paper
+ * (although the algorithm has evolved somewhat since then):
+ *
+ * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
+ *
+ * LGPL2
+ */
+
+
+#define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */
+
+#define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */
+
+
+#define CRUSH_ITEM_UNDEF 0x7ffffffe /* undefined result (internal use only) */
+#define CRUSH_ITEM_NONE 0x7fffffff /* no result */
+
+/*
+ * CRUSH uses user-defined "rules" to describe how inputs should be
+ * mapped to devices. A rule consists of sequence of steps to perform
+ * to generate the set of output devices.
+ */
+struct crush_rule_step {
+ __u32 op;
+ __s32 arg1;
+ __s32 arg2;
+};
+
+/* step op codes */
+enum {
+ CRUSH_RULE_NOOP = 0,
+ CRUSH_RULE_TAKE = 1, /* arg1 = value to start with */
+ CRUSH_RULE_CHOOSE_FIRSTN = 2, /* arg1 = num items to pick */
+ /* arg2 = type */
+ CRUSH_RULE_CHOOSE_INDEP = 3, /* same */
+ CRUSH_RULE_EMIT = 4, /* no args */
+ CRUSH_RULE_CHOOSELEAF_FIRSTN = 6,
+ CRUSH_RULE_CHOOSELEAF_INDEP = 7,
+
+ CRUSH_RULE_SET_CHOOSE_TRIES = 8, /* override choose_total_tries */
+ CRUSH_RULE_SET_CHOOSELEAF_TRIES = 9, /* override chooseleaf_descend_once */
+ CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES = 10,
+ CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES = 11,
+ CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12
+};
+
+/*
+ * for specifying choose num (arg1) relative to the max parameter
+ * passed to do_rule
+ */
+#define CRUSH_CHOOSE_N 0
+#define CRUSH_CHOOSE_N_MINUS(x) (-(x))
+
+/*
+ * The rule mask is used to describe what the rule is intended for.
+ * Given a ruleset and size of output set, we search through the
+ * rule list for a matching rule_mask.
+ */
+struct crush_rule_mask {
+ __u8 ruleset;
+ __u8 type;
+ __u8 min_size;
+ __u8 max_size;
+};
+
+struct crush_rule {
+ __u32 len;
+ struct crush_rule_mask mask;
+ struct crush_rule_step steps[0];
+};
+
+#define crush_rule_size(len) (sizeof(struct crush_rule) + \
+ (len)*sizeof(struct crush_rule_step))
+
+
+
+/*
+ * A bucket is a named container of other items (either devices or
+ * other buckets). Items within a bucket are chosen using one of a
+ * few different algorithms. The table summarizes how the speed of
+ * each option measures up against mapping stability when items are
+ * added or removed.
+ *
+ * Bucket Alg Speed Additions Removals
+ * ------------------------------------------------
+ * uniform O(1) poor poor
+ * list O(n) optimal poor
+ * tree O(log n) good good
+ * straw O(n) better better
+ * straw2 O(n) optimal optimal
+ */
+enum {
+ CRUSH_BUCKET_UNIFORM = 1,
+ CRUSH_BUCKET_LIST = 2,
+ CRUSH_BUCKET_TREE = 3,
+ CRUSH_BUCKET_STRAW = 4,
+ CRUSH_BUCKET_STRAW2 = 5,
+};
+extern const char *crush_bucket_alg_name(int alg);
+
+struct crush_bucket {
+ __s32 id; /* this'll be negative */
+ __u16 type; /* non-zero; type=0 is reserved for devices */
+ __u8 alg; /* one of CRUSH_BUCKET_* */
+ __u8 hash; /* which hash function to use, CRUSH_HASH_* */
+ __u32 weight; /* 16-bit fixed point */
+ __u32 size; /* num items */
+ __s32 *items;
+
+ /*
+ * cached random permutation: used for uniform bucket and for
+ * the linear search fallback for the other bucket types.
+ */
+ __u32 perm_x; /* @x for which *perm is defined */
+ __u32 perm_n; /* num elements of *perm that are permuted/defined */
+ __u32 *perm;
+};
+
+struct crush_bucket_uniform {
+ struct crush_bucket h;
+ __u32 item_weight; /* 16-bit fixed point; all items equally weighted */
+};
+
+struct crush_bucket_list {
+ struct crush_bucket h;
+ __u32 *item_weights; /* 16-bit fixed point */
+ __u32 *sum_weights; /* 16-bit fixed point. element i is sum
+ of weights 0..i, inclusive */
+};
+
+struct crush_bucket_tree {
+ struct crush_bucket h; /* note: h.size is _tree_ size, not number of
+ actual items */
+ __u8 num_nodes;
+ __u32 *node_weights;
+};
+
+struct crush_bucket_straw {
+ struct crush_bucket h;
+ __u32 *item_weights; /* 16-bit fixed point */
+ __u32 *straws; /* 16-bit fixed point */
+};
+
+struct crush_bucket_straw2 {
+ struct crush_bucket h;
+ __u32 *item_weights; /* 16-bit fixed point */
+};
+
+
+
+/*
+ * CRUSH map includes all buckets, rules, etc.
+ */
+struct crush_map {
+ struct crush_bucket **buckets;
+ struct crush_rule **rules;
+
+ __s32 max_buckets;
+ __u32 max_rules;
+ __s32 max_devices;
+
+ /* choose local retries before re-descent */
+ __u32 choose_local_tries;
+ /* choose local attempts using a fallback permutation before
+ * re-descent */
+ __u32 choose_local_fallback_tries;
+ /* choose attempts before giving up */
+ __u32 choose_total_tries;
+ /* attempt chooseleaf inner descent once for firstn mode; on
+ * reject retry outer descent. Note that this does *not*
+ * apply to a collision: in that case we will retry as we used
+ * to. */
+ __u32 chooseleaf_descend_once;
+
+ /* if non-zero, feed r into chooseleaf, bit-shifted right by (r-1)
+ * bits. a value of 1 is best for new clusters. for legacy clusters
+ * that want to limit reshuffling, a value of 3 or 4 will make the
+ * mappings line up a bit better with previous mappings. */
+ __u8 chooseleaf_vary_r;
+};
+
+
+/* crush.c */
+extern int crush_get_bucket_item_weight(const struct crush_bucket *b, int pos);
+extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
+extern void crush_destroy_bucket_list(struct crush_bucket_list *b);
+extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b);
+extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b);
+extern void crush_destroy_bucket_straw2(struct crush_bucket_straw2 *b);
+extern void crush_destroy_bucket(struct crush_bucket *b);
+extern void crush_destroy_rule(struct crush_rule *r);
+extern void crush_destroy(struct crush_map *map);
+
+static inline int crush_calc_tree_node(int i)
+{
+ return ((i+1) << 1)-1;
+}
+
+#endif
diff --git a/include/linux/crush/hash.h b/include/linux/crush/hash.h
new file mode 100644
index 000000000..91e884230
--- /dev/null
+++ b/include/linux/crush/hash.h
@@ -0,0 +1,17 @@
+#ifndef CEPH_CRUSH_HASH_H
+#define CEPH_CRUSH_HASH_H
+
+#define CRUSH_HASH_RJENKINS1 0
+
+#define CRUSH_HASH_DEFAULT CRUSH_HASH_RJENKINS1
+
+extern const char *crush_hash_name(int type);
+
+extern __u32 crush_hash32(int type, __u32 a);
+extern __u32 crush_hash32_2(int type, __u32 a, __u32 b);
+extern __u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c);
+extern __u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d);
+extern __u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d,
+ __u32 e);
+
+#endif
diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h
new file mode 100644
index 000000000..eab367446
--- /dev/null
+++ b/include/linux/crush/mapper.h
@@ -0,0 +1,20 @@
+#ifndef CEPH_CRUSH_MAPPER_H
+#define CEPH_CRUSH_MAPPER_H
+
+/*
+ * CRUSH functions for find rules and then mapping an input to an
+ * output set.
+ *
+ * LGPL2
+ */
+
+#include <linux/crush/crush.h>
+
+extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size);
+extern int crush_do_rule(const struct crush_map *map,
+ int ruleno,
+ int x, int *result, int result_max,
+ const __u32 *weights, int weight_max,
+ int *scratch);
+
+#endif
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
new file mode 100644
index 000000000..10df5d2d0
--- /dev/null
+++ b/include/linux/crypto.h
@@ -0,0 +1,2402 @@
+/*
+ * Scatterlist Cryptographic API.
+ *
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * Copyright (c) 2002 David S. Miller (davem@redhat.com)
+ * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
+ * and Nettle, by Niels Möller.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _LINUX_CRYPTO_H
+#define _LINUX_CRYPTO_H
+
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+
+/*
+ * Autoloaded crypto modules should only use a prefixed name to avoid allowing
+ * arbitrary modules to be loaded. Loading from userspace may still need the
+ * unprefixed names, so retains those aliases as well.
+ * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
+ * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
+ * expands twice on the same line. Instead, use a separate base name for the
+ * alias.
+ */
+#define MODULE_ALIAS_CRYPTO(name) \
+ __MODULE_INFO(alias, alias_userspace, name); \
+ __MODULE_INFO(alias, alias_crypto, "crypto-" name)
+
+/*
+ * Algorithm masks and types.
+ */
+#define CRYPTO_ALG_TYPE_MASK 0x0000000f
+#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
+#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
+#define CRYPTO_ALG_TYPE_AEAD 0x00000003
+#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
+#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
+#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
+#define CRYPTO_ALG_TYPE_DIGEST 0x00000008
+#define CRYPTO_ALG_TYPE_HASH 0x00000008
+#define CRYPTO_ALG_TYPE_SHASH 0x00000009
+#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
+#define CRYPTO_ALG_TYPE_RNG 0x0000000c
+#define CRYPTO_ALG_TYPE_PCOMPRESS 0x0000000f
+
+#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
+#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000c
+#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
+
+#define CRYPTO_ALG_LARVAL 0x00000010
+#define CRYPTO_ALG_DEAD 0x00000020
+#define CRYPTO_ALG_DYING 0x00000040
+#define CRYPTO_ALG_ASYNC 0x00000080
+
+/*
+ * Set this bit if and only if the algorithm requires another algorithm of
+ * the same type to handle corner cases.
+ */
+#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
+
+/*
+ * This bit is set for symmetric key ciphers that have already been wrapped
+ * with a generic IV generator to prevent them from being wrapped again.
+ */
+#define CRYPTO_ALG_GENIV 0x00000200
+
+/*
+ * Set if the algorithm has passed automated run-time testing. Note that
+ * if there is no run-time testing for a given algorithm it is considered
+ * to have passed.
+ */
+
+#define CRYPTO_ALG_TESTED 0x00000400
+
+/*
+ * Set if the algorithm is an instance that is build from templates.
+ */
+#define CRYPTO_ALG_INSTANCE 0x00000800
+
+/* Set this bit if the algorithm provided is hardware accelerated but
+ * not available to userspace via instruction set or so.
+ */
+#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000
+
+/*
+ * Mark a cipher as a service implementation only usable by another
+ * cipher and never by a normal user of the kernel crypto API
+ */
+#define CRYPTO_ALG_INTERNAL 0x00002000
+
+/*
+ * Transform masks and values (for crt_flags).
+ */
+#define CRYPTO_TFM_REQ_MASK 0x000fff00
+#define CRYPTO_TFM_RES_MASK 0xfff00000
+
+#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
+#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
+#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
+#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
+#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
+#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
+#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
+#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
+
+/*
+ * Miscellaneous stuff.
+ */
+#define CRYPTO_MAX_ALG_NAME 64
+
+/*
+ * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
+ * declaration) is used to ensure that the crypto_tfm context structure is
+ * aligned correctly for the given architecture so that there are no alignment
+ * faults for C data types. In particular, this is required on platforms such
+ * as arm where pointers are 32-bit aligned but there are data types such as
+ * u64 which require 64-bit alignment.
+ */
+#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
+
+#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
+
+struct scatterlist;
+struct crypto_ablkcipher;
+struct crypto_async_request;
+struct crypto_aead;
+struct crypto_blkcipher;
+struct crypto_hash;
+struct crypto_rng;
+struct crypto_tfm;
+struct crypto_type;
+struct aead_givcrypt_request;
+struct skcipher_givcrypt_request;
+
+typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
+
+/**
+ * DOC: Block Cipher Context Data Structures
+ *
+ * These data structures define the operating context for each block cipher
+ * type.
+ */
+
+struct crypto_async_request {
+ struct list_head list;
+ crypto_completion_t complete;
+ void *data;
+ struct crypto_tfm *tfm;
+
+ u32 flags;
+};
+
+struct ablkcipher_request {
+ struct crypto_async_request base;
+
+ unsigned int nbytes;
+
+ void *info;
+
+ struct scatterlist *src;
+ struct scatterlist *dst;
+
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
+ * struct aead_request - AEAD request
+ * @base: Common attributes for async crypto requests
+ * @assoclen: Length in bytes of associated data for authentication
+ * @cryptlen: Length of data to be encrypted or decrypted
+ * @iv: Initialisation vector
+ * @assoc: Associated data
+ * @src: Source data
+ * @dst: Destination data
+ * @__ctx: Start of private context data
+ */
+struct aead_request {
+ struct crypto_async_request base;
+
+ unsigned int assoclen;
+ unsigned int cryptlen;
+
+ u8 *iv;
+
+ struct scatterlist *assoc;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+struct blkcipher_desc {
+ struct crypto_blkcipher *tfm;
+ void *info;
+ u32 flags;
+};
+
+struct cipher_desc {
+ struct crypto_tfm *tfm;
+ void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+ unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
+ const u8 *src, unsigned int nbytes);
+ void *info;
+};
+
+struct hash_desc {
+ struct crypto_hash *tfm;
+ u32 flags;
+};
+
+/**
+ * DOC: Block Cipher Algorithm Definitions
+ *
+ * These data structures define modular crypto algorithm implementations,
+ * managed via crypto_register_alg() and crypto_unregister_alg().
+ */
+
+/**
+ * struct ablkcipher_alg - asynchronous block cipher definition
+ * @min_keysize: Minimum key size supported by the transformation. This is the
+ * smallest key length supported by this transformation algorithm.
+ * This must be set to one of the pre-defined values as this is
+ * not hardware specific. Possible values for this field can be
+ * found via git grep "_MIN_KEY_SIZE" include/crypto/
+ * @max_keysize: Maximum key size supported by the transformation. This is the
+ * largest key length supported by this transformation algorithm.
+ * This must be set to one of the pre-defined values as this is
+ * not hardware specific. Possible values for this field can be
+ * found via git grep "_MAX_KEY_SIZE" include/crypto/
+ * @setkey: Set key for the transformation. This function is used to either
+ * program a supplied key into the hardware or store the key in the
+ * transformation context for programming it later. Note that this
+ * function does modify the transformation context. This function can
+ * be called multiple times during the existence of the transformation
+ * object, so one must make sure the key is properly reprogrammed into
+ * the hardware. This function is also responsible for checking the key
+ * length for validity. In case a software fallback was put in place in
+ * the @cra_init call, this function might need to use the fallback if
+ * the algorithm doesn't support all of the key sizes.
+ * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
+ * the supplied scatterlist containing the blocks of data. The crypto
+ * API consumer is responsible for aligning the entries of the
+ * scatterlist properly and making sure the chunks are correctly
+ * sized. In case a software fallback was put in place in the
+ * @cra_init call, this function might need to use the fallback if
+ * the algorithm doesn't support all of the key sizes. In case the
+ * key was stored in transformation context, the key might need to be
+ * re-programmed into the hardware in this function. This function
+ * shall not modify the transformation context, as this function may
+ * be called in parallel with the same transformation object.
+ * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
+ * and the conditions are exactly the same.
+ * @givencrypt: Update the IV for encryption. With this function, a cipher
+ * implementation may provide the function on how to update the IV
+ * for encryption.
+ * @givdecrypt: Update the IV for decryption. This is the reverse of
+ * @givencrypt .
+ * @geniv: The transformation implementation may use an "IV generator" provided
+ * by the kernel crypto API. Several use cases have a predefined
+ * approach how IVs are to be updated. For such use cases, the kernel
+ * crypto API provides ready-to-use implementations that can be
+ * referenced with this variable.
+ * @ivsize: IV size applicable for transformation. The consumer must provide an
+ * IV of exactly that size to perform the encrypt or decrypt operation.
+ *
+ * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
+ * mandatory and must be filled.
+ */
+struct ablkcipher_alg {
+ int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*encrypt)(struct ablkcipher_request *req);
+ int (*decrypt)(struct ablkcipher_request *req);
+ int (*givencrypt)(struct skcipher_givcrypt_request *req);
+ int (*givdecrypt)(struct skcipher_givcrypt_request *req);
+
+ const char *geniv;
+
+ unsigned int min_keysize;
+ unsigned int max_keysize;
+ unsigned int ivsize;
+};
+
+/**
+ * struct aead_alg - AEAD cipher definition
+ * @maxauthsize: Set the maximum authentication tag size supported by the
+ * transformation. A transformation may support smaller tag sizes.
+ * As the authentication tag is a message digest to ensure the
+ * integrity of the encrypted data, a consumer typically wants the
+ * largest authentication tag possible as defined by this
+ * variable.
+ * @setauthsize: Set authentication size for the AEAD transformation. This
+ * function is used to specify the consumer requested size of the
+ * authentication tag to be either generated by the transformation
+ * during encryption or the size of the authentication tag to be
+ * supplied during the decryption operation. This function is also
+ * responsible for checking the authentication tag size for
+ * validity.
+ * @setkey: see struct ablkcipher_alg
+ * @encrypt: see struct ablkcipher_alg
+ * @decrypt: see struct ablkcipher_alg
+ * @givencrypt: see struct ablkcipher_alg
+ * @givdecrypt: see struct ablkcipher_alg
+ * @geniv: see struct ablkcipher_alg
+ * @ivsize: see struct ablkcipher_alg
+ *
+ * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
+ * mandatory and must be filled.
+ */
+struct aead_alg {
+ int (*setkey)(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
+ int (*encrypt)(struct aead_request *req);
+ int (*decrypt)(struct aead_request *req);
+ int (*givencrypt)(struct aead_givcrypt_request *req);
+ int (*givdecrypt)(struct aead_givcrypt_request *req);
+
+ const char *geniv;
+
+ unsigned int ivsize;
+ unsigned int maxauthsize;
+};
+
+/**
+ * struct blkcipher_alg - synchronous block cipher definition
+ * @min_keysize: see struct ablkcipher_alg
+ * @max_keysize: see struct ablkcipher_alg
+ * @setkey: see struct ablkcipher_alg
+ * @encrypt: see struct ablkcipher_alg
+ * @decrypt: see struct ablkcipher_alg
+ * @geniv: see struct ablkcipher_alg
+ * @ivsize: see struct ablkcipher_alg
+ *
+ * All fields except @geniv and @ivsize are mandatory and must be filled.
+ */
+struct blkcipher_alg {
+ int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*encrypt)(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes);
+ int (*decrypt)(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes);
+
+ const char *geniv;
+
+ unsigned int min_keysize;
+ unsigned int max_keysize;
+ unsigned int ivsize;
+};
+
+/**
+ * struct cipher_alg - single-block symmetric ciphers definition
+ * @cia_min_keysize: Minimum key size supported by the transformation. This is
+ * the smallest key length supported by this transformation
+ * algorithm. This must be set to one of the pre-defined
+ * values as this is not hardware specific. Possible values
+ * for this field can be found via git grep "_MIN_KEY_SIZE"
+ * include/crypto/
+ * @cia_max_keysize: Maximum key size supported by the transformation. This is
+ * the largest key length supported by this transformation
+ * algorithm. This must be set to one of the pre-defined values
+ * as this is not hardware specific. Possible values for this
+ * field can be found via git grep "_MAX_KEY_SIZE"
+ * include/crypto/
+ * @cia_setkey: Set key for the transformation. This function is used to either
+ * program a supplied key into the hardware or store the key in the
+ * transformation context for programming it later. Note that this
+ * function does modify the transformation context. This function
+ * can be called multiple times during the existence of the
+ * transformation object, so one must make sure the key is properly
+ * reprogrammed into the hardware. This function is also
+ * responsible for checking the key length for validity.
+ * @cia_encrypt: Encrypt a single block. This function is used to encrypt a
+ * single block of data, which must be @cra_blocksize big. This
+ * always operates on a full @cra_blocksize and it is not possible
+ * to encrypt a block of smaller size. The supplied buffers must
+ * therefore also be at least of @cra_blocksize size. Both the
+ * input and output buffers are always aligned to @cra_alignmask.
+ * In case either of the input or output buffer supplied by user
+ * of the crypto API is not aligned to @cra_alignmask, the crypto
+ * API will re-align the buffers. The re-alignment means that a
+ * new buffer will be allocated, the data will be copied into the
+ * new buffer, then the processing will happen on the new buffer,
+ * then the data will be copied back into the original buffer and
+ * finally the new buffer will be freed. In case a software
+ * fallback was put in place in the @cra_init call, this function
+ * might need to use the fallback if the algorithm doesn't support
+ * all of the key sizes. In case the key was stored in
+ * transformation context, the key might need to be re-programmed
+ * into the hardware in this function. This function shall not
+ * modify the transformation context, as this function may be
+ * called in parallel with the same transformation object.
+ * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to
+ * @cia_encrypt, and the conditions are exactly the same.
+ *
+ * All fields are mandatory and must be filled.
+ */
+struct cipher_alg {
+ unsigned int cia_min_keysize;
+ unsigned int cia_max_keysize;
+ int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+ void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+ void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+};
+
+struct compress_alg {
+ int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen);
+ int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen);
+};
+
+/**
+ * struct rng_alg - random number generator definition
+ * @rng_make_random: The function defined by this variable obtains a random
+ * number. The random number generator transform must generate
+ * the random number out of the context provided with this
+ * call.
+ * @rng_reset: Reset of the random number generator by clearing the entire state.
+ * With the invocation of this function call, the random number
+ * generator shall completely reinitialize its state. If the random
+ * number generator requires a seed for setting up a new state,
+ * the seed must be provided by the consumer while invoking this
+ * function. The required size of the seed is defined with
+ * @seedsize .
+ * @seedsize: The seed size required for a random number generator
+ * initialization defined with this variable. Some random number
+ * generators like the SP800-90A DRBG does not require a seed as the
+ * seeding is implemented internally without the need of support by
+ * the consumer. In this case, the seed size is set to zero.
+ */
+struct rng_alg {
+ int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
+ unsigned int dlen);
+ int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
+
+ unsigned int seedsize;
+};
+
+
+#define cra_ablkcipher cra_u.ablkcipher
+#define cra_aead cra_u.aead
+#define cra_blkcipher cra_u.blkcipher
+#define cra_cipher cra_u.cipher
+#define cra_compress cra_u.compress
+#define cra_rng cra_u.rng
+
+/**
+ * struct crypto_alg - definition of a cryptograpic cipher algorithm
+ * @cra_flags: Flags describing this transformation. See include/linux/crypto.h
+ * CRYPTO_ALG_* flags for the flags which go in here. Those are
+ * used for fine-tuning the description of the transformation
+ * algorithm.
+ * @cra_blocksize: Minimum block size of this transformation. The size in bytes
+ * of the smallest possible unit which can be transformed with
+ * this algorithm. The users must respect this value.
+ * In case of HASH transformation, it is possible for a smaller
+ * block than @cra_blocksize to be passed to the crypto API for
+ * transformation, in case of any other transformation type, an
+ * error will be returned upon any attempt to transform smaller
+ * than @cra_blocksize chunks.
+ * @cra_ctxsize: Size of the operational context of the transformation. This
+ * value informs the kernel crypto API about the memory size
+ * needed to be allocated for the transformation context.
+ * @cra_alignmask: Alignment mask for the input and output data buffer. The data
+ * buffer containing the input data for the algorithm must be
+ * aligned to this alignment mask. The data buffer for the
+ * output data must be aligned to this alignment mask. Note that
+ * the Crypto API will do the re-alignment in software, but
+ * only under special conditions and there is a performance hit.
+ * The re-alignment happens at these occasions for different
+ * @cra_u types: cipher -- For both input data and output data
+ * buffer; ahash -- For output hash destination buf; shash --
+ * For output hash destination buf.
+ * This is needed on hardware which is flawed by design and
+ * cannot pick data from arbitrary addresses.
+ * @cra_priority: Priority of this transformation implementation. In case
+ * multiple transformations with same @cra_name are available to
+ * the Crypto API, the kernel will use the one with highest
+ * @cra_priority.
+ * @cra_name: Generic name (usable by multiple implementations) of the
+ * transformation algorithm. This is the name of the transformation
+ * itself. This field is used by the kernel when looking up the
+ * providers of particular transformation.
+ * @cra_driver_name: Unique name of the transformation provider. This is the
+ * name of the provider of the transformation. This can be any
+ * arbitrary value, but in the usual case, this contains the
+ * name of the chip or provider and the name of the
+ * transformation algorithm.
+ * @cra_type: Type of the cryptographic transformation. This is a pointer to
+ * struct crypto_type, which implements callbacks common for all
+ * trasnformation types. There are multiple options:
+ * &crypto_blkcipher_type, &crypto_ablkcipher_type,
+ * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
+ * This field might be empty. In that case, there are no common
+ * callbacks. This is the case for: cipher, compress, shash.
+ * @cra_u: Callbacks implementing the transformation. This is a union of
+ * multiple structures. Depending on the type of transformation selected
+ * by @cra_type and @cra_flags above, the associated structure must be
+ * filled with callbacks. This field might be empty. This is the case
+ * for ahash, shash.
+ * @cra_init: Initialize the cryptographic transformation object. This function
+ * is used to initialize the cryptographic transformation object.
+ * This function is called only once at the instantiation time, right
+ * after the transformation context was allocated. In case the
+ * cryptographic hardware has some special requirements which need to
+ * be handled by software, this function shall check for the precise
+ * requirement of the transformation and put any software fallbacks
+ * in place.
+ * @cra_exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @cra_init, used to remove various changes set in
+ * @cra_init.
+ * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
+ * @cra_list: internally used
+ * @cra_users: internally used
+ * @cra_refcnt: internally used
+ * @cra_destroy: internally used
+ *
+ * The struct crypto_alg describes a generic Crypto API algorithm and is common
+ * for all of the transformations. Any variable not documented here shall not
+ * be used by a cipher implementation as it is internal to the Crypto API.
+ */
+struct crypto_alg {
+ struct list_head cra_list;
+ struct list_head cra_users;
+
+ u32 cra_flags;
+ unsigned int cra_blocksize;
+ unsigned int cra_ctxsize;
+ unsigned int cra_alignmask;
+
+ int cra_priority;
+ atomic_t cra_refcnt;
+
+ char cra_name[CRYPTO_MAX_ALG_NAME];
+ char cra_driver_name[CRYPTO_MAX_ALG_NAME];
+
+ const struct crypto_type *cra_type;
+
+ union {
+ struct ablkcipher_alg ablkcipher;
+ struct aead_alg aead;
+ struct blkcipher_alg blkcipher;
+ struct cipher_alg cipher;
+ struct compress_alg compress;
+ struct rng_alg rng;
+ } cra_u;
+
+ int (*cra_init)(struct crypto_tfm *tfm);
+ void (*cra_exit)(struct crypto_tfm *tfm);
+ void (*cra_destroy)(struct crypto_alg *alg);
+
+ struct module *cra_module;
+};
+
+/*
+ * Algorithm registration interface.
+ */
+int crypto_register_alg(struct crypto_alg *alg);
+int crypto_unregister_alg(struct crypto_alg *alg);
+int crypto_register_algs(struct crypto_alg *algs, int count);
+int crypto_unregister_algs(struct crypto_alg *algs, int count);
+
+/*
+ * Algorithm query interface.
+ */
+int crypto_has_alg(const char *name, u32 type, u32 mask);
+
+/*
+ * Transforms: user-instantiated objects which encapsulate algorithms
+ * and core processing logic. Managed via crypto_alloc_*() and
+ * crypto_free_*(), as well as the various helpers below.
+ */
+
+struct ablkcipher_tfm {
+ int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*encrypt)(struct ablkcipher_request *req);
+ int (*decrypt)(struct ablkcipher_request *req);
+ int (*givencrypt)(struct skcipher_givcrypt_request *req);
+ int (*givdecrypt)(struct skcipher_givcrypt_request *req);
+
+ struct crypto_ablkcipher *base;
+
+ unsigned int ivsize;
+ unsigned int reqsize;
+};
+
+struct aead_tfm {
+ int (*setkey)(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*encrypt)(struct aead_request *req);
+ int (*decrypt)(struct aead_request *req);
+ int (*givencrypt)(struct aead_givcrypt_request *req);
+ int (*givdecrypt)(struct aead_givcrypt_request *req);
+
+ struct crypto_aead *base;
+
+ unsigned int ivsize;
+ unsigned int authsize;
+ unsigned int reqsize;
+};
+
+struct blkcipher_tfm {
+ void *iv;
+ int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes);
+ int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes);
+};
+
+struct cipher_tfm {
+ int (*cit_setkey)(struct crypto_tfm *tfm,
+ const u8 *key, unsigned int keylen);
+ void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+ void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+};
+
+struct hash_tfm {
+ int (*init)(struct hash_desc *desc);
+ int (*update)(struct hash_desc *desc,
+ struct scatterlist *sg, unsigned int nsg);
+ int (*final)(struct hash_desc *desc, u8 *out);
+ int (*digest)(struct hash_desc *desc, struct scatterlist *sg,
+ unsigned int nsg, u8 *out);
+ int (*setkey)(struct crypto_hash *tfm, const u8 *key,
+ unsigned int keylen);
+ unsigned int digestsize;
+};
+
+struct compress_tfm {
+ int (*cot_compress)(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
+ int (*cot_decompress)(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
+};
+
+struct rng_tfm {
+ int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
+ unsigned int dlen);
+ int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
+};
+
+#define crt_ablkcipher crt_u.ablkcipher
+#define crt_aead crt_u.aead
+#define crt_blkcipher crt_u.blkcipher
+#define crt_cipher crt_u.cipher
+#define crt_hash crt_u.hash
+#define crt_compress crt_u.compress
+#define crt_rng crt_u.rng
+
+struct crypto_tfm {
+
+ u32 crt_flags;
+
+ union {
+ struct ablkcipher_tfm ablkcipher;
+ struct aead_tfm aead;
+ struct blkcipher_tfm blkcipher;
+ struct cipher_tfm cipher;
+ struct hash_tfm hash;
+ struct compress_tfm compress;
+ struct rng_tfm rng;
+ } crt_u;
+
+ void (*exit)(struct crypto_tfm *tfm);
+
+ struct crypto_alg *__crt_alg;
+
+ void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+struct crypto_ablkcipher {
+ struct crypto_tfm base;
+};
+
+struct crypto_aead {
+ struct crypto_tfm base;
+};
+
+struct crypto_blkcipher {
+ struct crypto_tfm base;
+};
+
+struct crypto_cipher {
+ struct crypto_tfm base;
+};
+
+struct crypto_comp {
+ struct crypto_tfm base;
+};
+
+struct crypto_hash {
+ struct crypto_tfm base;
+};
+
+struct crypto_rng {
+ struct crypto_tfm base;
+};
+
+enum {
+ CRYPTOA_UNSPEC,
+ CRYPTOA_ALG,
+ CRYPTOA_TYPE,
+ CRYPTOA_U32,
+ __CRYPTOA_MAX,
+};
+
+#define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
+
+/* Maximum number of (rtattr) parameters for each template. */
+#define CRYPTO_MAX_ATTRS 32
+
+struct crypto_attr_alg {
+ char name[CRYPTO_MAX_ALG_NAME];
+};
+
+struct crypto_attr_type {
+ u32 type;
+ u32 mask;
+};
+
+struct crypto_attr_u32 {
+ u32 num;
+};
+
+/*
+ * Transform user interface.
+ */
+
+struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
+void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
+
+static inline void crypto_free_tfm(struct crypto_tfm *tfm)
+{
+ return crypto_destroy_tfm(tfm, tfm);
+}
+
+int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
+
+/*
+ * Transform helpers which query the underlying algorithm.
+ */
+static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_name;
+}
+
+static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_driver_name;
+}
+
+static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_priority;
+}
+
+static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
+}
+
+static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_blocksize;
+}
+
+static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_alignmask;
+}
+
+static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
+{
+ return tfm->crt_flags;
+}
+
+static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags)
+{
+ tfm->crt_flags |= flags;
+}
+
+static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
+{
+ tfm->crt_flags &= ~flags;
+}
+
+static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_ctx;
+}
+
+static inline unsigned int crypto_tfm_ctx_alignment(void)
+{
+ struct crypto_tfm *tfm;
+ return __alignof__(tfm->__crt_ctx);
+}
+
+/*
+ * API wrappers.
+ */
+static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast(
+ struct crypto_tfm *tfm)
+{
+ return (struct crypto_ablkcipher *)tfm;
+}
+
+static inline u32 crypto_skcipher_type(u32 type)
+{
+ type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+ type |= CRYPTO_ALG_TYPE_BLKCIPHER;
+ return type;
+}
+
+static inline u32 crypto_skcipher_mask(u32 mask)
+{
+ mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+ mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
+ return mask;
+}
+
+/**
+ * DOC: Asynchronous Block Cipher API
+ *
+ * Asynchronous block cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
+ *
+ * Asynchronous cipher operations imply that the function invocation for a
+ * cipher request returns immediately before the completion of the operation.
+ * The cipher request is scheduled as a separate kernel thread and therefore
+ * load-balanced on the different CPUs via the process scheduler. To allow
+ * the kernel crypto API to inform the caller about the completion of a cipher
+ * request, the caller must provide a callback function. That function is
+ * invoked with the cipher handle when the request completes.
+ *
+ * To support the asynchronous operation, additional information than just the
+ * cipher handle must be supplied to the kernel crypto API. That additional
+ * information is given by filling in the ablkcipher_request data structure.
+ *
+ * For the asynchronous block cipher API, the state is maintained with the tfm
+ * cipher handle. A single tfm can be used across multiple calls and in
+ * parallel. For asynchronous block cipher calls, context data supplied and
+ * only used by the caller can be referenced the request data structure in
+ * addition to the IV used for the cipher request. The maintenance of such
+ * state information would be important for a crypto driver implementer to
+ * have, because when calling the callback function upon completion of the
+ * cipher operation, that callback function may need some information about
+ * which operation just finished if it invoked multiple in parallel. This
+ * state information is unused by the kernel crypto API.
+ */
+
+/**
+ * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * ablkcipher cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an ablkcipher. The returned struct
+ * crypto_ablkcipher is the cipher handle that is required for any subsequent
+ * API invocation for that ablkcipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
+ u32 type, u32 mask);
+
+static inline struct crypto_tfm *crypto_ablkcipher_tfm(
+ struct crypto_ablkcipher *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_ablkcipher() - zeroize and free cipher handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
+{
+ crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
+}
+
+/**
+ * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * ablkcipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the ablkcipher is known to the kernel crypto API; false
+ * otherwise
+ */
+static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
+ u32 mask)
+{
+ return crypto_has_alg(alg_name, crypto_skcipher_type(type),
+ crypto_skcipher_mask(mask));
+}
+
+static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
+ struct crypto_ablkcipher *tfm)
+{
+ return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
+}
+
+/**
+ * crypto_ablkcipher_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the ablkcipher referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
+static inline unsigned int crypto_ablkcipher_ivsize(
+ struct crypto_ablkcipher *tfm)
+{
+ return crypto_ablkcipher_crt(tfm)->ivsize;
+}
+
+/**
+ * crypto_ablkcipher_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the ablkcipher referenced with the cipher handle is
+ * returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_ablkcipher_blocksize(
+ struct crypto_ablkcipher *tfm)
+{
+ return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm));
+}
+
+static inline unsigned int crypto_ablkcipher_alignmask(
+ struct crypto_ablkcipher *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm));
+}
+
+static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm)
+{
+ return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm));
+}
+
+static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags);
+}
+
+static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
+}
+
+/**
+ * crypto_ablkcipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the ablkcipher referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm);
+
+ return crt->setkey(crt->base, key, keylen);
+}
+
+/**
+ * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
+ * @req: ablkcipher_request out of which the cipher handle is to be obtained
+ *
+ * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
+ * data structure.
+ *
+ * Return: crypto_ablkcipher handle
+ */
+static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
+ struct ablkcipher_request *req)
+{
+ return __crypto_ablkcipher_cast(req->base.tfm);
+}
+
+/**
+ * crypto_ablkcipher_encrypt() - encrypt plaintext
+ * @req: reference to the ablkcipher_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Encrypt plaintext data using the ablkcipher_request handle. That data
+ * structure and how it is filled with data is discussed with the
+ * ablkcipher_request_* functions.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+ struct ablkcipher_tfm *crt =
+ crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
+ return crt->encrypt(req);
+}
+
+/**
+ * crypto_ablkcipher_decrypt() - decrypt ciphertext
+ * @req: reference to the ablkcipher_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Decrypt ciphertext data using the ablkcipher_request handle. That data
+ * structure and how it is filled with data is discussed with the
+ * ablkcipher_request_* functions.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+ struct ablkcipher_tfm *crt =
+ crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
+ return crt->decrypt(req);
+}
+
+/**
+ * DOC: Asynchronous Cipher Request Handle
+ *
+ * The ablkcipher_request data structure contains all pointers to data
+ * required for the asynchronous cipher operation. This includes the cipher
+ * handle (which can be used by multiple ablkcipher_request instances), pointer
+ * to plaintext and ciphertext, asynchronous callback function, etc. It acts
+ * as a handle to the ablkcipher_request_* API calls in a similar way as
+ * ablkcipher handle to the crypto_ablkcipher_* API calls.
+ */
+
+/**
+ * crypto_ablkcipher_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return: number of bytes
+ */
+static inline unsigned int crypto_ablkcipher_reqsize(
+ struct crypto_ablkcipher *tfm)
+{
+ return crypto_ablkcipher_crt(tfm)->reqsize;
+}
+
+/**
+ * ablkcipher_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing ablkcipher handle in the request
+ * data structure with a different one.
+ */
+static inline void ablkcipher_request_set_tfm(
+ struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
+{
+ req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base);
+}
+
+static inline struct ablkcipher_request *ablkcipher_request_cast(
+ struct crypto_async_request *req)
+{
+ return container_of(req, struct ablkcipher_request, base);
+}
+
+/**
+ * ablkcipher_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the ablkcipher
+ * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
+ * handle is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+static inline struct ablkcipher_request *ablkcipher_request_alloc(
+ struct crypto_ablkcipher *tfm, gfp_t gfp)
+{
+ struct ablkcipher_request *req;
+
+ req = kmalloc(sizeof(struct ablkcipher_request) +
+ crypto_ablkcipher_reqsize(tfm), gfp);
+
+ if (likely(req))
+ ablkcipher_request_set_tfm(req, tfm);
+
+ return req;
+}
+
+/**
+ * ablkcipher_request_free() - zeroize and free request data structure
+ * @req: request data structure cipher handle to be freed
+ */
+static inline void ablkcipher_request_free(struct ablkcipher_request *req)
+{
+ kzfree(req);
+}
+
+/**
+ * ablkcipher_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ * increase the wait queue beyond the initial maximum size;
+ * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ * crypto API, but provided to the callback function for it to use. Here,
+ * the caller can provide a reference to memory the callback function can
+ * operate on. As the callback function is invoked asynchronously to the
+ * related functionality, it may need to access data structures of the
+ * related functionality which can be referenced using this pointer. The
+ * callback function can access the memory via the "data" field in the
+ * crypto_async_request data structure provided to the callback function.
+ *
+ * This function allows setting the callback function that is triggered once the
+ * cipher operation completes.
+ *
+ * The callback function is registered with the ablkcipher_request handle and
+ * must comply with the following template
+ *
+ * void callback_function(struct crypto_async_request *req, int error)
+ */
+static inline void ablkcipher_request_set_callback(
+ struct ablkcipher_request *req,
+ u32 flags, crypto_completion_t compl, void *data)
+{
+ req->base.complete = compl;
+ req->base.data = data;
+ req->base.flags = flags;
+}
+
+/**
+ * ablkcipher_request_set_crypt() - set data buffers
+ * @req: request handle
+ * @src: source scatter / gather list
+ * @dst: destination scatter / gather list
+ * @nbytes: number of bytes to process from @src
+ * @iv: IV for the cipher operation which must comply with the IV size defined
+ * by crypto_ablkcipher_ivsize
+ *
+ * This function allows setting of the source data and destination data
+ * scatter / gather lists.
+ *
+ * For encryption, the source is treated as the plaintext and the
+ * destination is the ciphertext. For a decryption operation, the use is
+ * reversed - the source is the ciphertext and the destination is the plaintext.
+ */
+static inline void ablkcipher_request_set_crypt(
+ struct ablkcipher_request *req,
+ struct scatterlist *src, struct scatterlist *dst,
+ unsigned int nbytes, void *iv)
+{
+ req->src = src;
+ req->dst = dst;
+ req->nbytes = nbytes;
+ req->info = iv;
+}
+
+/**
+ * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
+ *
+ * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD
+ * (listed as type "aead" in /proc/crypto)
+ *
+ * The most prominent examples for this type of encryption is GCM and CCM.
+ * However, the kernel supports other types of AEAD ciphers which are defined
+ * with the following cipher string:
+ *
+ * authenc(keyed message digest, block cipher)
+ *
+ * For example: authenc(hmac(sha256), cbc(aes))
+ *
+ * The example code provided for the asynchronous block cipher operation
+ * applies here as well. Naturally all *ablkcipher* symbols must be exchanged
+ * the *aead* pendants discussed in the following. In addtion, for the AEAD
+ * operation, the aead_request_set_assoc function must be used to set the
+ * pointer to the associated data memory location before performing the
+ * encryption or decryption operation. In case of an encryption, the associated
+ * data memory is filled during the encryption operation. For decryption, the
+ * associated data memory must contain data that is used to verify the integrity
+ * of the decrypted data. Another deviation from the asynchronous block cipher
+ * operation is that the caller should explicitly check for -EBADMSG of the
+ * crypto_aead_decrypt. That error indicates an authentication error, i.e.
+ * a breach in the integrity of the message. In essence, that -EBADMSG error
+ * code is the key bonus an AEAD cipher has over "standard" block chaining
+ * modes.
+ */
+
+static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
+{
+ return (struct crypto_aead *)tfm;
+}
+
+/**
+ * crypto_alloc_aead() - allocate AEAD cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * AEAD cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an AEAD. The returned struct
+ * crypto_aead is the cipher handle that is required for any subsequent
+ * API invocation for that AEAD.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
+
+static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_aead() - zeroize and free aead handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_aead(struct crypto_aead *tfm)
+{
+ crypto_free_tfm(crypto_aead_tfm(tfm));
+}
+
+static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm)
+{
+ return &crypto_aead_tfm(tfm)->crt_aead;
+}
+
+/**
+ * crypto_aead_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the aead referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
+static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
+{
+ return crypto_aead_crt(tfm)->ivsize;
+}
+
+/**
+ * crypto_aead_authsize() - obtain maximum authentication data size
+ * @tfm: cipher handle
+ *
+ * The maximum size of the authentication data for the AEAD cipher referenced
+ * by the AEAD cipher handle is returned. The authentication data size may be
+ * zero if the cipher implements a hard-coded maximum.
+ *
+ * The authentication data may also be known as "tag value".
+ *
+ * Return: authentication data size / tag size in bytes
+ */
+static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
+{
+ return crypto_aead_crt(tfm)->authsize;
+}
+
+/**
+ * crypto_aead_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the AEAD referenced with the cipher handle is returned.
+ * The caller may use that information to allocate appropriate memory for the
+ * data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
+{
+ return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
+}
+
+static inline unsigned int crypto_aead_alignmask(struct crypto_aead *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_aead_tfm(tfm));
+}
+
+static inline u32 crypto_aead_get_flags(struct crypto_aead *tfm)
+{
+ return crypto_tfm_get_flags(crypto_aead_tfm(tfm));
+}
+
+static inline void crypto_aead_set_flags(struct crypto_aead *tfm, u32 flags)
+{
+ crypto_tfm_set_flags(crypto_aead_tfm(tfm), flags);
+}
+
+static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
+}
+
+/**
+ * crypto_aead_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the AEAD referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct aead_tfm *crt = crypto_aead_crt(tfm);
+
+ return crt->setkey(crt->base, key, keylen);
+}
+
+/**
+ * crypto_aead_setauthsize() - set authentication data size
+ * @tfm: cipher handle
+ * @authsize: size of the authentication data / tag in bytes
+ *
+ * Set the authentication data size / tag size. AEAD requires an authentication
+ * tag (or MAC) in addition to the associated data.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
+
+static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
+{
+ return __crypto_aead_cast(req->base.tfm);
+}
+
+/**
+ * crypto_aead_encrypt() - encrypt plaintext
+ * @req: reference to the aead_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Encrypt plaintext data using the aead_request handle. That data structure
+ * and how it is filled with data is discussed with the aead_request_*
+ * functions.
+ *
+ * IMPORTANT NOTE The encryption operation creates the authentication data /
+ * tag. That data is concatenated with the created ciphertext.
+ * The ciphertext memory size is therefore the given number of
+ * block cipher blocks + the size defined by the
+ * crypto_aead_setauthsize invocation. The caller must ensure
+ * that sufficient memory is available for the ciphertext and
+ * the authentication tag.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_aead_encrypt(struct aead_request *req)
+{
+ return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req);
+}
+
+/**
+ * crypto_aead_decrypt() - decrypt ciphertext
+ * @req: reference to the ablkcipher_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Decrypt ciphertext data using the aead_request handle. That data structure
+ * and how it is filled with data is discussed with the aead_request_*
+ * functions.
+ *
+ * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the
+ * authentication data / tag. That authentication data / tag
+ * must have the size defined by the crypto_aead_setauthsize
+ * invocation.
+ *
+ *
+ * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD
+ * cipher operation performs the authentication of the data during the
+ * decryption operation. Therefore, the function returns this error if
+ * the authentication of the ciphertext was unsuccessful (i.e. the
+ * integrity of the ciphertext or the associated data was violated);
+ * < 0 if an error occurred.
+ */
+static inline int crypto_aead_decrypt(struct aead_request *req)
+{
+ if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req)))
+ return -EINVAL;
+
+ return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
+}
+
+/**
+ * DOC: Asynchronous AEAD Request Handle
+ *
+ * The aead_request data structure contains all pointers to data required for
+ * the AEAD cipher operation. This includes the cipher handle (which can be
+ * used by multiple aead_request instances), pointer to plaintext and
+ * ciphertext, asynchronous callback function, etc. It acts as a handle to the
+ * aead_request_* API calls in a similar way as AEAD handle to the
+ * crypto_aead_* API calls.
+ */
+
+/**
+ * crypto_aead_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return: number of bytes
+ */
+static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
+{
+ return crypto_aead_crt(tfm)->reqsize;
+}
+
+/**
+ * aead_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing aead handle in the request
+ * data structure with a different one.
+ */
+static inline void aead_request_set_tfm(struct aead_request *req,
+ struct crypto_aead *tfm)
+{
+ req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
+}
+
+/**
+ * aead_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the AEAD
+ * encrypt and decrypt API calls. During the allocation, the provided aead
+ * handle is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
+ gfp_t gfp)
+{
+ struct aead_request *req;
+
+ req = kmalloc(sizeof(*req) + crypto_aead_reqsize(tfm), gfp);
+
+ if (likely(req))
+ aead_request_set_tfm(req, tfm);
+
+ return req;
+}
+
+/**
+ * aead_request_free() - zeroize and free request data structure
+ * @req: request data structure cipher handle to be freed
+ */
+static inline void aead_request_free(struct aead_request *req)
+{
+ kzfree(req);
+}
+
+/**
+ * aead_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ * increase the wait queue beyond the initial maximum size;
+ * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ * crypto API, but provided to the callback function for it to use. Here,
+ * the caller can provide a reference to memory the callback function can
+ * operate on. As the callback function is invoked asynchronously to the
+ * related functionality, it may need to access data structures of the
+ * related functionality which can be referenced using this pointer. The
+ * callback function can access the memory via the "data" field in the
+ * crypto_async_request data structure provided to the callback function.
+ *
+ * Setting the callback function that is triggered once the cipher operation
+ * completes
+ *
+ * The callback function is registered with the aead_request handle and
+ * must comply with the following template
+ *
+ * void callback_function(struct crypto_async_request *req, int error)
+ */
+static inline void aead_request_set_callback(struct aead_request *req,
+ u32 flags,
+ crypto_completion_t compl,
+ void *data)
+{
+ req->base.complete = compl;
+ req->base.data = data;
+ req->base.flags = flags;
+}
+
+/**
+ * aead_request_set_crypt - set data buffers
+ * @req: request handle
+ * @src: source scatter / gather list
+ * @dst: destination scatter / gather list
+ * @cryptlen: number of bytes to process from @src
+ * @iv: IV for the cipher operation which must comply with the IV size defined
+ * by crypto_aead_ivsize()
+ *
+ * Setting the source data and destination data scatter / gather lists.
+ *
+ * For encryption, the source is treated as the plaintext and the
+ * destination is the ciphertext. For a decryption operation, the use is
+ * reversed - the source is the ciphertext and the destination is the plaintext.
+ *
+ * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
+ * the caller must concatenate the ciphertext followed by the
+ * authentication tag and provide the entire data stream to the
+ * decryption operation (i.e. the data length used for the
+ * initialization of the scatterlist and the data length for the
+ * decryption operation is identical). For encryption, however,
+ * the authentication tag is created while encrypting the data.
+ * The destination buffer must hold sufficient space for the
+ * ciphertext and the authentication tag while the encryption
+ * invocation must only point to the plaintext data size. The
+ * following code snippet illustrates the memory usage
+ * buffer = kmalloc(ptbuflen + (enc ? authsize : 0));
+ * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0));
+ * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv);
+ */
+static inline void aead_request_set_crypt(struct aead_request *req,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ unsigned int cryptlen, u8 *iv)
+{
+ req->src = src;
+ req->dst = dst;
+ req->cryptlen = cryptlen;
+ req->iv = iv;
+}
+
+/**
+ * aead_request_set_assoc() - set the associated data scatter / gather list
+ * @req: request handle
+ * @assoc: associated data scatter / gather list
+ * @assoclen: number of bytes to process from @assoc
+ *
+ * For encryption, the memory is filled with the associated data. For
+ * decryption, the memory must point to the associated data.
+ */
+static inline void aead_request_set_assoc(struct aead_request *req,
+ struct scatterlist *assoc,
+ unsigned int assoclen)
+{
+ req->assoc = assoc;
+ req->assoclen = assoclen;
+}
+
+/**
+ * DOC: Synchronous Block Cipher API
+ *
+ * The synchronous block cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
+ *
+ * Synchronous calls, have a context in the tfm. But since a single tfm can be
+ * used in multiple calls and in parallel, this info should not be changeable
+ * (unless a lock is used). This applies, for example, to the symmetric key.
+ * However, the IV is changeable, so there is an iv field in blkcipher_tfm
+ * structure for synchronous blkcipher api. So, its the only state info that can
+ * be kept for synchronous calls without using a big lock across a tfm.
+ *
+ * The block cipher API allows the use of a complete cipher, i.e. a cipher
+ * consisting of a template (a block chaining mode) and a single block cipher
+ * primitive (e.g. AES).
+ *
+ * The plaintext data buffer and the ciphertext data buffer are pointed to
+ * by using scatter/gather lists. The cipher operation is performed
+ * on all segments of the provided scatter/gather lists.
+ *
+ * The kernel crypto API supports a cipher operation "in-place" which means that
+ * the caller may provide the same scatter/gather list for the plaintext and
+ * cipher text. After the completion of the cipher operation, the plaintext
+ * data is replaced with the ciphertext data in case of an encryption and vice
+ * versa for a decryption. The caller must ensure that the scatter/gather lists
+ * for the output data point to sufficiently large buffers, i.e. multiples of
+ * the block size of the cipher.
+ */
+
+static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
+ struct crypto_tfm *tfm)
+{
+ return (struct crypto_blkcipher *)tfm;
+}
+
+static inline struct crypto_blkcipher *crypto_blkcipher_cast(
+ struct crypto_tfm *tfm)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER);
+ return __crypto_blkcipher_cast(tfm);
+}
+
+/**
+ * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * blkcipher cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a block cipher. The returned struct
+ * crypto_blkcipher is the cipher handle that is required for any subsequent
+ * API invocation for that block cipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
+ const char *alg_name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_BLKCIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
+}
+
+static inline struct crypto_tfm *crypto_blkcipher_tfm(
+ struct crypto_blkcipher *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_blkcipher() - zeroize and free the block cipher handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
+{
+ crypto_free_tfm(crypto_blkcipher_tfm(tfm));
+}
+
+/**
+ * crypto_has_blkcipher() - Search for the availability of a block cipher
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the block cipher is known to the kernel crypto API; false
+ * otherwise
+ */
+static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_BLKCIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return crypto_has_alg(alg_name, type, mask);
+}
+
+/**
+ * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
+ * @tfm: cipher handle
+ *
+ * Return: The character string holding the name of the cipher
+ */
+static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
+{
+ return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
+}
+
+static inline struct blkcipher_tfm *crypto_blkcipher_crt(
+ struct crypto_blkcipher *tfm)
+{
+ return &crypto_blkcipher_tfm(tfm)->crt_blkcipher;
+}
+
+static inline struct blkcipher_alg *crypto_blkcipher_alg(
+ struct crypto_blkcipher *tfm)
+{
+ return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
+}
+
+/**
+ * crypto_blkcipher_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the block cipher referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
+static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
+{
+ return crypto_blkcipher_alg(tfm)->ivsize;
+}
+
+/**
+ * crypto_blkcipher_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the block cipher referenced with the cipher handle is
+ * returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation.
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_blkcipher_blocksize(
+ struct crypto_blkcipher *tfm)
+{
+ return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
+}
+
+static inline unsigned int crypto_blkcipher_alignmask(
+ struct crypto_blkcipher *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
+}
+
+static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
+{
+ return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
+}
+
+static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
+}
+
+static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
+}
+
+/**
+ * crypto_blkcipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the block cipher referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
+ key, keylen);
+}
+
+/**
+ * crypto_blkcipher_encrypt() - encrypt plaintext
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ * ciphertext
+ * @src: scatter/gather list that holds the plaintext
+ * @nbytes: number of bytes of the plaintext to encrypt.
+ *
+ * Encrypt plaintext data using the IV set by the caller with a preceding
+ * call of crypto_blkcipher_set_iv.
+ *
+ * The blkcipher_desc data structure must be filled by the caller and can
+ * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
+ * with the block cipher handle; desc.flags is filled with either
+ * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
+ return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
+}
+
+/**
+ * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ * ciphertext
+ * @src: scatter/gather list that holds the plaintext
+ * @nbytes: number of bytes of the plaintext to encrypt.
+ *
+ * Encrypt plaintext data with the use of an IV that is solely used for this
+ * cipher operation. Any previously set IV is not used.
+ *
+ * The blkcipher_desc data structure must be filled by the caller and can
+ * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
+ * with the block cipher handle; desc.info is filled with the IV to be used for
+ * the current operation; desc.flags is filled with either
+ * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
+}
+
+/**
+ * crypto_blkcipher_decrypt() - decrypt ciphertext
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ * plaintext
+ * @src: scatter/gather list that holds the ciphertext
+ * @nbytes: number of bytes of the ciphertext to decrypt.
+ *
+ * Decrypt ciphertext data using the IV set by the caller with a preceding
+ * call of crypto_blkcipher_set_iv.
+ *
+ * The blkcipher_desc data structure must be filled by the caller as documented
+ * for the crypto_blkcipher_encrypt call above.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ *
+ */
+static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
+ return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
+}
+
+/**
+ * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ * plaintext
+ * @src: scatter/gather list that holds the ciphertext
+ * @nbytes: number of bytes of the ciphertext to decrypt.
+ *
+ * Decrypt ciphertext data with the use of an IV that is solely used for this
+ * cipher operation. Any previously set IV is not used.
+ *
+ * The blkcipher_desc data structure must be filled by the caller as documented
+ * for the crypto_blkcipher_encrypt_iv call above.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
+}
+
+/**
+ * crypto_blkcipher_set_iv() - set IV for cipher
+ * @tfm: cipher handle
+ * @src: buffer holding the IV
+ * @len: length of the IV in bytes
+ *
+ * The caller provided IV is set for the block cipher referenced by the cipher
+ * handle.
+ */
+static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
+ const u8 *src, unsigned int len)
+{
+ memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
+}
+
+/**
+ * crypto_blkcipher_get_iv() - obtain IV from cipher
+ * @tfm: cipher handle
+ * @dst: buffer filled with the IV
+ * @len: length of the buffer dst
+ *
+ * The caller can obtain the IV set for the block cipher referenced by the
+ * cipher handle and store it into the user-provided buffer. If the buffer
+ * has an insufficient space, the IV is truncated to fit the buffer.
+ */
+static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
+ u8 *dst, unsigned int len)
+{
+ memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
+}
+
+/**
+ * DOC: Single Block Cipher API
+ *
+ * The single block cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
+ *
+ * Using the single block cipher API calls, operations with the basic cipher
+ * primitive can be implemented. These cipher primitives exclude any block
+ * chaining operations including IV handling.
+ *
+ * The purpose of this single block cipher API is to support the implementation
+ * of templates or other concepts that only need to perform the cipher operation
+ * on one block at a time. Templates invoke the underlying cipher primitive
+ * block-wise and process either the input or the output data of these cipher
+ * operations.
+ */
+
+static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
+{
+ return (struct crypto_cipher *)tfm;
+}
+
+static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
+ return __crypto_cipher_cast(tfm);
+}
+
+/**
+ * crypto_alloc_cipher() - allocate single block cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * single block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a single block cipher. The returned struct
+ * crypto_cipher is the cipher handle that is required for any subsequent API
+ * invocation for that single block cipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
+ u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_CIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
+}
+
+static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_cipher() - zeroize and free the single block cipher handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_cipher(struct crypto_cipher *tfm)
+{
+ crypto_free_tfm(crypto_cipher_tfm(tfm));
+}
+
+/**
+ * crypto_has_cipher() - Search for the availability of a single block cipher
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * single block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the single block cipher is known to the kernel crypto API;
+ * false otherwise
+ */
+static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_CIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return crypto_has_alg(alg_name, type, mask);
+}
+
+static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
+{
+ return &crypto_cipher_tfm(tfm)->crt_cipher;
+}
+
+/**
+ * crypto_cipher_blocksize() - obtain block size for cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the single block cipher referenced with the cipher handle
+ * tfm is returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
+{
+ return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
+}
+
+static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
+}
+
+static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
+{
+ return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
+}
+
+static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
+}
+
+static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
+}
+
+/**
+ * crypto_cipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the single block cipher referenced by the
+ * cipher handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
+ key, keylen);
+}
+
+/**
+ * crypto_cipher_encrypt_one() - encrypt one block of plaintext
+ * @tfm: cipher handle
+ * @dst: points to the buffer that will be filled with the ciphertext
+ * @src: buffer holding the plaintext to be encrypted
+ *
+ * Invoke the encryption operation of one block. The caller must ensure that
+ * the plaintext and ciphertext buffers are at least one block in size.
+ */
+static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src)
+{
+ crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
+ dst, src);
+}
+
+/**
+ * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
+ * @tfm: cipher handle
+ * @dst: points to the buffer that will be filled with the plaintext
+ * @src: buffer holding the ciphertext to be decrypted
+ *
+ * Invoke the decryption operation of one block. The caller must ensure that
+ * the plaintext and ciphertext buffers are at least one block in size.
+ */
+static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src)
+{
+ crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
+ dst, src);
+}
+
+/**
+ * DOC: Synchronous Message Digest API
+ *
+ * The synchronous message digest API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto)
+ */
+
+static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
+{
+ return (struct crypto_hash *)tfm;
+}
+
+static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
+{
+ BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_HASH) &
+ CRYPTO_ALG_TYPE_HASH_MASK);
+ return __crypto_hash_cast(tfm);
+}
+
+/**
+ * crypto_alloc_hash() - allocate synchronous message digest handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * message digest cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a message digest. The returned struct
+ * crypto_hash is the cipher handle that is required for any subsequent
+ * API invocation for that message digest.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
+ u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ mask &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_HASH;
+ mask |= CRYPTO_ALG_TYPE_HASH_MASK;
+
+ return __crypto_hash_cast(crypto_alloc_base(alg_name, type, mask));
+}
+
+static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_hash() - zeroize and free message digest handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_hash(struct crypto_hash *tfm)
+{
+ crypto_free_tfm(crypto_hash_tfm(tfm));
+}
+
+/**
+ * crypto_has_hash() - Search for the availability of a message digest
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * message digest cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the message digest cipher is known to the kernel crypto
+ * API; false otherwise
+ */
+static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ mask &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_HASH;
+ mask |= CRYPTO_ALG_TYPE_HASH_MASK;
+
+ return crypto_has_alg(alg_name, type, mask);
+}
+
+static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm)
+{
+ return &crypto_hash_tfm(tfm)->crt_hash;
+}
+
+/**
+ * crypto_hash_blocksize() - obtain block size for message digest
+ * @tfm: cipher handle
+ *
+ * The block size for the message digest cipher referenced with the cipher
+ * handle is returned.
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm)
+{
+ return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm));
+}
+
+static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm));
+}
+
+/**
+ * crypto_hash_digestsize() - obtain message digest size
+ * @tfm: cipher handle
+ *
+ * The size for the message digest created by the message digest cipher
+ * referenced with the cipher handle is returned.
+ *
+ * Return: message digest size
+ */
+static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm)
+{
+ return crypto_hash_crt(tfm)->digestsize;
+}
+
+static inline u32 crypto_hash_get_flags(struct crypto_hash *tfm)
+{
+ return crypto_tfm_get_flags(crypto_hash_tfm(tfm));
+}
+
+static inline void crypto_hash_set_flags(struct crypto_hash *tfm, u32 flags)
+{
+ crypto_tfm_set_flags(crypto_hash_tfm(tfm), flags);
+}
+
+static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags);
+}
+
+/**
+ * crypto_hash_init() - (re)initialize message digest handle
+ * @desc: cipher request handle that to be filled by caller --
+ * desc.tfm is filled with the hash cipher handle;
+ * desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0.
+ *
+ * The call (re-)initializes the message digest referenced by the hash cipher
+ * request handle. Any potentially existing state created by previous
+ * operations is discarded.
+ *
+ * Return: 0 if the message digest initialization was successful; < 0 if an
+ * error occurred
+ */
+static inline int crypto_hash_init(struct hash_desc *desc)
+{
+ return crypto_hash_crt(desc->tfm)->init(desc);
+}
+
+/**
+ * crypto_hash_update() - add data to message digest for processing
+ * @desc: cipher request handle
+ * @sg: scatter / gather list pointing to the data to be added to the message
+ * digest
+ * @nbytes: number of bytes to be processed from @sg
+ *
+ * Updates the message digest state of the cipher handle pointed to by the
+ * hash cipher request handle with the input data pointed to by the
+ * scatter/gather list.
+ *
+ * Return: 0 if the message digest update was successful; < 0 if an error
+ * occurred
+ */
+static inline int crypto_hash_update(struct hash_desc *desc,
+ struct scatterlist *sg,
+ unsigned int nbytes)
+{
+ return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes);
+}
+
+/**
+ * crypto_hash_final() - calculate message digest
+ * @desc: cipher request handle
+ * @out: message digest output buffer -- The caller must ensure that the out
+ * buffer has a sufficient size (e.g. by using the crypto_hash_digestsize
+ * function).
+ *
+ * Finalize the message digest operation and create the message digest
+ * based on all data added to the cipher handle. The message digest is placed
+ * into the output buffer.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
+static inline int crypto_hash_final(struct hash_desc *desc, u8 *out)
+{
+ return crypto_hash_crt(desc->tfm)->final(desc, out);
+}
+
+/**
+ * crypto_hash_digest() - calculate message digest for a buffer
+ * @desc: see crypto_hash_final()
+ * @sg: see crypto_hash_update()
+ * @nbytes: see crypto_hash_update()
+ * @out: see crypto_hash_final()
+ *
+ * This function is a "short-hand" for the function calls of crypto_hash_init,
+ * crypto_hash_update and crypto_hash_final. The parameters have the same
+ * meaning as discussed for those separate three functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
+static inline int crypto_hash_digest(struct hash_desc *desc,
+ struct scatterlist *sg,
+ unsigned int nbytes, u8 *out)
+{
+ return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out);
+}
+
+/**
+ * crypto_hash_setkey() - set key for message digest
+ * @hash: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the message digest cipher. The cipher
+ * handle must point to a keyed hash in order for this function to succeed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+static inline int crypto_hash_setkey(struct crypto_hash *hash,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto_hash_crt(hash)->setkey(hash, key, keylen);
+}
+
+static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
+{
+ return (struct crypto_comp *)tfm;
+}
+
+static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
+{
+ BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) &
+ CRYPTO_ALG_TYPE_MASK);
+ return __crypto_comp_cast(tfm);
+}
+
+static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
+ u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_COMPRESS;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask));
+}
+
+static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm)
+{
+ return &tfm->base;
+}
+
+static inline void crypto_free_comp(struct crypto_comp *tfm)
+{
+ crypto_free_tfm(crypto_comp_tfm(tfm));
+}
+
+static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_COMPRESS;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return crypto_has_alg(alg_name, type, mask);
+}
+
+static inline const char *crypto_comp_name(struct crypto_comp *tfm)
+{
+ return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
+}
+
+static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
+{
+ return &crypto_comp_tfm(tfm)->crt_compress;
+}
+
+static inline int crypto_comp_compress(struct crypto_comp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
+{
+ return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
+ src, slen, dst, dlen);
+}
+
+static inline int crypto_comp_decompress(struct crypto_comp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen)
+{
+ return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
+ src, slen, dst, dlen);
+}
+
+#endif /* _LINUX_CRYPTO_H */
+
diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h
new file mode 100644
index 000000000..f4754282c
--- /dev/null
+++ b/include/linux/cryptohash.h
@@ -0,0 +1,20 @@
+#ifndef __CRYPTOHASH_H
+#define __CRYPTOHASH_H
+
+#include <uapi/linux/types.h>
+
+#define SHA_DIGEST_WORDS 5
+#define SHA_MESSAGE_BYTES (512 /*bits*/ / 8)
+#define SHA_WORKSPACE_WORDS 16
+
+void sha_init(__u32 *buf);
+void sha_transform(__u32 *digest, const char *data, __u32 *W);
+
+#define MD5_DIGEST_WORDS 4
+#define MD5_MESSAGE_BYTES 64
+
+void md5_transform(__u32 *hash, __u32 const *in);
+
+__u32 half_md4_transform(__u32 buf[4], __u32 const in[8]);
+
+#endif
diff --git a/include/linux/cryptouser.h b/include/linux/cryptouser.h
new file mode 100644
index 000000000..4abf2ea6a
--- /dev/null
+++ b/include/linux/cryptouser.h
@@ -0,0 +1,105 @@
+/*
+ * Crypto user configuration API.
+ *
+ * Copyright (C) 2011 secunet Security Networks AG
+ * Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* Netlink configuration messages. */
+enum {
+ CRYPTO_MSG_BASE = 0x10,
+ CRYPTO_MSG_NEWALG = 0x10,
+ CRYPTO_MSG_DELALG,
+ CRYPTO_MSG_UPDATEALG,
+ CRYPTO_MSG_GETALG,
+ __CRYPTO_MSG_MAX
+};
+#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1)
+#define CRYPTO_NR_MSGTYPES (CRYPTO_MSG_MAX + 1 - CRYPTO_MSG_BASE)
+
+#define CRYPTO_MAX_NAME CRYPTO_MAX_ALG_NAME
+
+/* Netlink message attributes. */
+enum crypto_attr_type_t {
+ CRYPTOCFGA_UNSPEC,
+ CRYPTOCFGA_PRIORITY_VAL, /* __u32 */
+ CRYPTOCFGA_REPORT_LARVAL, /* struct crypto_report_larval */
+ CRYPTOCFGA_REPORT_HASH, /* struct crypto_report_hash */
+ CRYPTOCFGA_REPORT_BLKCIPHER, /* struct crypto_report_blkcipher */
+ CRYPTOCFGA_REPORT_AEAD, /* struct crypto_report_aead */
+ CRYPTOCFGA_REPORT_COMPRESS, /* struct crypto_report_comp */
+ CRYPTOCFGA_REPORT_RNG, /* struct crypto_report_rng */
+ CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */
+ __CRYPTOCFGA_MAX
+
+#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
+};
+
+struct crypto_user_alg {
+ char cru_name[CRYPTO_MAX_ALG_NAME];
+ char cru_driver_name[CRYPTO_MAX_ALG_NAME];
+ char cru_module_name[CRYPTO_MAX_ALG_NAME];
+ __u32 cru_type;
+ __u32 cru_mask;
+ __u32 cru_refcnt;
+ __u32 cru_flags;
+};
+
+struct crypto_report_larval {
+ char type[CRYPTO_MAX_NAME];
+};
+
+struct crypto_report_hash {
+ char type[CRYPTO_MAX_NAME];
+ unsigned int blocksize;
+ unsigned int digestsize;
+};
+
+struct crypto_report_cipher {
+ char type[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ unsigned int min_keysize;
+ unsigned int max_keysize;
+};
+
+struct crypto_report_blkcipher {
+ char type[CRYPTO_MAX_NAME];
+ char geniv[CRYPTO_MAX_NAME];
+ unsigned int blocksize;
+ unsigned int min_keysize;
+ unsigned int max_keysize;
+ unsigned int ivsize;
+};
+
+struct crypto_report_aead {
+ char type[CRYPTO_MAX_NAME];
+ char geniv[CRYPTO_MAX_NAME];
+ unsigned int blocksize;
+ unsigned int maxauthsize;
+ unsigned int ivsize;
+};
+
+struct crypto_report_comp {
+ char type[CRYPTO_MAX_NAME];
+};
+
+struct crypto_report_rng {
+ char type[CRYPTO_MAX_NAME];
+ unsigned int seedsize;
+};
+
+#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
+ sizeof(struct crypto_report_blkcipher))
diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h
new file mode 100644
index 000000000..cfe83239d
--- /dev/null
+++ b/include/linux/cs5535.h
@@ -0,0 +1,239 @@
+/*
+ * AMD CS5535/CS5536 definitions
+ * Copyright (C) 2006 Advanced Micro Devices, Inc.
+ * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef _CS5535_H
+#define _CS5535_H
+
+#include <asm/msr.h>
+
+/* MSRs */
+#define MSR_GLIU_P2D_RO0 0x10000029
+
+#define MSR_LX_GLD_MSR_CONFIG 0x48002001
+#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data
+ * sheet has the wrong value */
+#define MSR_GLCP_SYS_RSTPLL 0x4C000014
+#define MSR_GLCP_DOTPLL 0x4C000015
+
+#define MSR_LBAR_SMB 0x5140000B
+#define MSR_LBAR_GPIO 0x5140000C
+#define MSR_LBAR_MFGPT 0x5140000D
+#define MSR_LBAR_ACPI 0x5140000E
+#define MSR_LBAR_PMS 0x5140000F
+
+#define MSR_DIVIL_SOFT_RESET 0x51400017
+
+#define MSR_PIC_YSEL_LOW 0x51400020
+#define MSR_PIC_YSEL_HIGH 0x51400021
+#define MSR_PIC_ZSEL_LOW 0x51400022
+#define MSR_PIC_ZSEL_HIGH 0x51400023
+#define MSR_PIC_IRQM_LPC 0x51400025
+
+#define MSR_MFGPT_IRQ 0x51400028
+#define MSR_MFGPT_NR 0x51400029
+#define MSR_MFGPT_SETUP 0x5140002B
+
+#define MSR_RTC_DOMA_OFFSET 0x51400055
+#define MSR_RTC_MONA_OFFSET 0x51400056
+#define MSR_RTC_CEN_OFFSET 0x51400057
+
+#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */
+
+#define MSR_GX_GLD_MSR_CONFIG 0xC0002001
+#define MSR_GX_MSR_PADSEL 0xC0002011
+
+static inline int cs5535_pic_unreqz_select_high(unsigned int group,
+ unsigned int irq)
+{
+ uint32_t lo, hi;
+
+ rdmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
+ lo &= ~(0xF << (group * 4));
+ lo |= (irq & 0xF) << (group * 4);
+ wrmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
+ return 0;
+}
+
+/* PIC registers */
+#define CS5536_PIC_INT_SEL1 0x4d0
+#define CS5536_PIC_INT_SEL2 0x4d1
+
+/* resource sizes */
+#define LBAR_GPIO_SIZE 0xFF
+#define LBAR_MFGPT_SIZE 0x40
+#define LBAR_ACPI_SIZE 0x40
+#define LBAR_PMS_SIZE 0x80
+
+/*
+ * PMC registers (PMS block)
+ * It is only safe to access these registers as dword accesses.
+ * See CS5536 Specification Update erratas 17 & 18
+ */
+#define CS5536_PM_SCLK 0x10
+#define CS5536_PM_IN_SLPCTL 0x20
+#define CS5536_PM_WKXD 0x34
+#define CS5536_PM_WKD 0x30
+#define CS5536_PM_SSC 0x54
+
+/*
+ * PM registers (ACPI block)
+ * It is only safe to access these registers as dword accesses.
+ * See CS5536 Specification Update erratas 17 & 18
+ */
+#define CS5536_PM1_STS 0x00
+#define CS5536_PM1_EN 0x02
+#define CS5536_PM1_CNT 0x08
+#define CS5536_PM_GPE0_STS 0x18
+#define CS5536_PM_GPE0_EN 0x1c
+
+/* CS5536_PM1_STS bits */
+#define CS5536_WAK_FLAG (1 << 15)
+#define CS5536_RTC_FLAG (1 << 10)
+#define CS5536_PWRBTN_FLAG (1 << 8)
+
+/* CS5536_PM1_EN bits */
+#define CS5536_PM_PWRBTN (1 << 8)
+#define CS5536_PM_RTC (1 << 10)
+
+/* CS5536_PM_GPE0_STS bits */
+#define CS5536_GPIOM7_PME_FLAG (1 << 31)
+#define CS5536_GPIOM6_PME_FLAG (1 << 30)
+
+/* CS5536_PM_GPE0_EN bits */
+#define CS5536_GPIOM7_PME_EN (1 << 31)
+#define CS5536_GPIOM6_PME_EN (1 << 30)
+
+/* VSA2 magic values */
+#define VSA_VRC_INDEX 0xAC1C
+#define VSA_VRC_DATA 0xAC1E
+#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */
+#define VSA_VR_SIGNATURE 0x0003
+#define VSA_VR_MEM_SIZE 0x0200
+#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */
+#define GSW_VSA_SIG 0x534d /* General Software signature */
+
+#include <linux/io.h>
+
+static inline int cs5535_has_vsa2(void)
+{
+ static int has_vsa2 = -1;
+
+ if (has_vsa2 == -1) {
+ uint16_t val;
+
+ /*
+ * The VSA has virtual registers that we can query for a
+ * signature.
+ */
+ outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
+ outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX);
+
+ val = inw(VSA_VRC_DATA);
+ has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG);
+ }
+
+ return has_vsa2;
+}
+
+/* GPIOs */
+#define GPIO_OUTPUT_VAL 0x00
+#define GPIO_OUTPUT_ENABLE 0x04
+#define GPIO_OUTPUT_OPEN_DRAIN 0x08
+#define GPIO_OUTPUT_INVERT 0x0C
+#define GPIO_OUTPUT_AUX1 0x10
+#define GPIO_OUTPUT_AUX2 0x14
+#define GPIO_PULL_UP 0x18
+#define GPIO_PULL_DOWN 0x1C
+#define GPIO_INPUT_ENABLE 0x20
+#define GPIO_INPUT_INVERT 0x24
+#define GPIO_INPUT_FILTER 0x28
+#define GPIO_INPUT_EVENT_COUNT 0x2C
+#define GPIO_READ_BACK 0x30
+#define GPIO_INPUT_AUX1 0x34
+#define GPIO_EVENTS_ENABLE 0x38
+#define GPIO_LOCK_ENABLE 0x3C
+#define GPIO_POSITIVE_EDGE_EN 0x40
+#define GPIO_NEGATIVE_EDGE_EN 0x44
+#define GPIO_POSITIVE_EDGE_STS 0x48
+#define GPIO_NEGATIVE_EDGE_STS 0x4C
+
+#define GPIO_FLTR7_AMOUNT 0xD8
+
+#define GPIO_MAP_X 0xE0
+#define GPIO_MAP_Y 0xE4
+#define GPIO_MAP_Z 0xE8
+#define GPIO_MAP_W 0xEC
+
+#define GPIO_FE7_SEL 0xF7
+
+void cs5535_gpio_set(unsigned offset, unsigned int reg);
+void cs5535_gpio_clear(unsigned offset, unsigned int reg);
+int cs5535_gpio_isset(unsigned offset, unsigned int reg);
+int cs5535_gpio_set_irq(unsigned group, unsigned irq);
+void cs5535_gpio_setup_event(unsigned offset, int pair, int pme);
+
+/* MFGPTs */
+
+#define MFGPT_MAX_TIMERS 8
+#define MFGPT_TIMER_ANY (-1)
+
+#define MFGPT_DOMAIN_WORKING 1
+#define MFGPT_DOMAIN_STANDBY 2
+#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY)
+
+#define MFGPT_CMP1 0
+#define MFGPT_CMP2 1
+
+#define MFGPT_EVENT_IRQ 0
+#define MFGPT_EVENT_NMI 1
+#define MFGPT_EVENT_RESET 3
+
+#define MFGPT_REG_CMP1 0
+#define MFGPT_REG_CMP2 2
+#define MFGPT_REG_COUNTER 4
+#define MFGPT_REG_SETUP 6
+
+#define MFGPT_SETUP_CNTEN (1 << 15)
+#define MFGPT_SETUP_CMP2 (1 << 14)
+#define MFGPT_SETUP_CMP1 (1 << 13)
+#define MFGPT_SETUP_SETUP (1 << 12)
+#define MFGPT_SETUP_STOPEN (1 << 11)
+#define MFGPT_SETUP_EXTEN (1 << 10)
+#define MFGPT_SETUP_REVEN (1 << 5)
+#define MFGPT_SETUP_CLKSEL (1 << 4)
+
+struct cs5535_mfgpt_timer;
+
+extern uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer,
+ uint16_t reg);
+extern void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg,
+ uint16_t value);
+
+extern int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp,
+ int event, int enable);
+extern int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp,
+ int *irq, int enable);
+extern struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer,
+ int domain);
+extern void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer);
+
+static inline int cs5535_mfgpt_setup_irq(struct cs5535_mfgpt_timer *timer,
+ int cmp, int *irq)
+{
+ return cs5535_mfgpt_set_irq(timer, cmp, irq, 1);
+}
+
+static inline int cs5535_mfgpt_release_irq(struct cs5535_mfgpt_timer *timer,
+ int cmp, int *irq)
+{
+ return cs5535_mfgpt_set_irq(timer, cmp, irq, 0);
+}
+
+#endif
diff --git a/include/linux/ctype.h b/include/linux/ctype.h
new file mode 100644
index 000000000..653589e3e
--- /dev/null
+++ b/include/linux/ctype.h
@@ -0,0 +1,70 @@
+#ifndef _LINUX_CTYPE_H
+#define _LINUX_CTYPE_H
+
+/*
+ * NOTE! This ctype does not handle EOF like the standard C
+ * library is required to.
+ */
+
+#define _U 0x01 /* upper */
+#define _L 0x02 /* lower */
+#define _D 0x04 /* digit */
+#define _C 0x08 /* cntrl */
+#define _P 0x10 /* punct */
+#define _S 0x20 /* white space (space/lf/tab) */
+#define _X 0x40 /* hex digit */
+#define _SP 0x80 /* hard space (0x20) */
+
+extern const unsigned char _ctype[];
+
+#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
+
+#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
+#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
+#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
+#define isdigit(c) ((__ismask(c)&(_D)) != 0)
+#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
+#define islower(c) ((__ismask(c)&(_L)) != 0)
+#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
+#define ispunct(c) ((__ismask(c)&(_P)) != 0)
+/* Note: isspace() must return false for %NUL-terminator */
+#define isspace(c) ((__ismask(c)&(_S)) != 0)
+#define isupper(c) ((__ismask(c)&(_U)) != 0)
+#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
+
+#define isascii(c) (((unsigned char)(c))<=0x7f)
+#define toascii(c) (((unsigned char)(c))&0x7f)
+
+static inline unsigned char __tolower(unsigned char c)
+{
+ if (isupper(c))
+ c -= 'A'-'a';
+ return c;
+}
+
+static inline unsigned char __toupper(unsigned char c)
+{
+ if (islower(c))
+ c -= 'a'-'A';
+ return c;
+}
+
+#define tolower(c) __tolower(c)
+#define toupper(c) __toupper(c)
+
+/*
+ * Fast implementation of tolower() for internal usage. Do not use in your
+ * code.
+ */
+static inline char _tolower(const char c)
+{
+ return c | 0x20;
+}
+
+/* Fast check for octal digit */
+static inline int isodigit(const char c)
+{
+ return c >= '0' && c <= '7';
+}
+
+#endif
diff --git a/include/linux/cuda.h b/include/linux/cuda.h
new file mode 100644
index 000000000..b72332823
--- /dev/null
+++ b/include/linux/cuda.h
@@ -0,0 +1,18 @@
+/*
+ * Definitions for talking to the CUDA. The CUDA is a microcontroller
+ * which controls the ADB, system power, RTC, and various other things.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ */
+#ifndef _LINUX_CUDA_H
+#define _LINUX_CUDA_H
+
+#include <uapi/linux/cuda.h>
+
+
+extern int find_via_cuda(void);
+extern int cuda_request(struct adb_request *req,
+ void (*done)(struct adb_request *), int nbytes, ...);
+extern void cuda_poll(void);
+
+#endif /* _LINUX_CUDA_H */
diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h
new file mode 100644
index 000000000..19ae518f5
--- /dev/null
+++ b/include/linux/cyclades.h
@@ -0,0 +1,360 @@
+/* $Revision: 3.0 $$Date: 1998/11/02 14:20:59 $
+ * linux/include/linux/cyclades.h
+ *
+ * This file was initially written by
+ * Randolph Bentson <bentson@grieg.seaslug.org> and is maintained by
+ * Ivan Passos <ivan@cyclades.com>.
+ *
+ * This file contains the general definitions for the cyclades.c driver
+ *$Log: cyclades.h,v $
+ *Revision 3.1 2002/01/29 11:36:16 henrique
+ *added throttle field on struct cyclades_port to indicate whether the
+ *port is throttled or not
+ *
+ *Revision 3.1 2000/04/19 18:52:52 ivan
+ *converted address fields to unsigned long and added fields for physical
+ *addresses on cyclades_card structure;
+ *
+ *Revision 3.0 1998/11/02 14:20:59 ivan
+ *added nports field on cyclades_card structure;
+ *
+ *Revision 2.5 1998/08/03 16:57:01 ivan
+ *added cyclades_idle_stats structure;
+ *
+ *Revision 2.4 1998/06/01 12:09:53 ivan
+ *removed closing_wait2 from cyclades_port structure;
+ *
+ *Revision 2.3 1998/03/16 18:01:12 ivan
+ *changes in the cyclades_port structure to get it closer to the
+ *standard serial port structure;
+ *added constants for new ioctls;
+ *
+ *Revision 2.2 1998/02/17 16:50:00 ivan
+ *changes in the cyclades_port structure (addition of shutdown_wait and
+ *chip_rev variables);
+ *added constants for new ioctls and for CD1400 rev. numbers.
+ *
+ *Revision 2.1 1997/10/24 16:03:00 ivan
+ *added rflow (which allows enabling the CD1400 special flow control
+ *feature) and rtsdtr_inv (which allows DTR/RTS pin inversion) to
+ *cyclades_port structure;
+ *added Alpha support
+ *
+ *Revision 2.0 1997/06/30 10:30:00 ivan
+ *added some new doorbell command constants related to IOCTLW and
+ *UART error signaling
+ *
+ *Revision 1.8 1997/06/03 15:30:00 ivan
+ *added constant ZFIRM_HLT
+ *added constant CyPCI_Ze_win ( = 2 * Cy_PCI_Zwin)
+ *
+ *Revision 1.7 1997/03/26 10:30:00 daniel
+ *new entries at the end of cyclades_port struct to reallocate
+ *variables illegally allocated within card memory.
+ *
+ *Revision 1.6 1996/09/09 18:35:30 bentson
+ *fold in changes for Cyclom-Z -- including structures for
+ *communicating with board as well modest changes to original
+ *structures to support new features.
+ *
+ *Revision 1.5 1995/11/13 21:13:31 bentson
+ *changes suggested by Michael Chastain <mec@duracef.shout.net>
+ *to support use of this file in non-kernel applications
+ *
+ *
+ */
+#ifndef _LINUX_CYCLADES_H
+#define _LINUX_CYCLADES_H
+
+#include <uapi/linux/cyclades.h>
+
+
+/* Per card data structure */
+struct cyclades_card {
+ void __iomem *base_addr;
+ union {
+ void __iomem *p9050;
+ struct RUNTIME_9060 __iomem *p9060;
+ } ctl_addr;
+ struct BOARD_CTRL __iomem *board_ctrl; /* cyz specific */
+ int irq;
+ unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */
+ unsigned int first_line; /* minor number of first channel on card */
+ unsigned int nports; /* Number of ports in the card */
+ int bus_index; /* address shift - 0 for ISA, 1 for PCI */
+ int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */
+ u32 hw_ver;
+ spinlock_t card_lock;
+ struct cyclades_port *ports;
+};
+
+/***************************************
+ * Memory access functions/macros *
+ * (required to support Alpha systems) *
+ ***************************************/
+
+#define cy_writeb(port,val) do { writeb((val), (port)); mb(); } while (0)
+#define cy_writew(port,val) do { writew((val), (port)); mb(); } while (0)
+#define cy_writel(port,val) do { writel((val), (port)); mb(); } while (0)
+
+/*
+ * Statistics counters
+ */
+struct cyclades_icount {
+ __u32 cts, dsr, rng, dcd, tx, rx;
+ __u32 frame, parity, overrun, brk;
+ __u32 buf_overrun;
+};
+
+/*
+ * This is our internal structure for each serial port's state.
+ *
+ * Many fields are paralleled by the structure used by the serial_struct
+ * structure.
+ *
+ * For definitions of the flags field, see tty.h
+ */
+
+struct cyclades_port {
+ int magic;
+ struct tty_port port;
+ struct cyclades_card *card;
+ union {
+ struct {
+ void __iomem *base_addr;
+ } cyy;
+ struct {
+ struct CH_CTRL __iomem *ch_ctrl;
+ struct BUF_CTRL __iomem *buf_ctrl;
+ } cyz;
+ } u;
+ int line;
+ int flags; /* defined in tty.h */
+ int type; /* UART type */
+ int read_status_mask;
+ int ignore_status_mask;
+ int timeout;
+ int xmit_fifo_size;
+ int cor1,cor2,cor3,cor4,cor5;
+ int tbpr,tco,rbpr,rco;
+ int baud;
+ int rflow;
+ int rtsdtr_inv;
+ int chip_rev;
+ int custom_divisor;
+ u8 x_char; /* to be pushed out ASAP */
+ int breakon;
+ int breakoff;
+ int xmit_head;
+ int xmit_tail;
+ int xmit_cnt;
+ int default_threshold;
+ int default_timeout;
+ unsigned long rflush_count;
+ struct cyclades_monitor mon;
+ struct cyclades_idle_stats idle_stats;
+ struct cyclades_icount icount;
+ struct completion shutdown_wait;
+ int throttle;
+};
+
+#define CLOSING_WAIT_DELAY 30*HZ
+#define CY_CLOSING_WAIT_NONE ASYNC_CLOSING_WAIT_NONE
+#define CY_CLOSING_WAIT_INF ASYNC_CLOSING_WAIT_INF
+
+
+#define CyMAX_CHIPS_PER_CARD 8
+#define CyMAX_CHAR_FIFO 12
+#define CyPORTS_PER_CHIP 4
+#define CD1400_MAX_SPEED 115200
+
+#define CyISA_Ywin 0x2000
+
+#define CyPCI_Ywin 0x4000
+#define CyPCI_Yctl 0x80
+#define CyPCI_Zctl CTRL_WINDOW_SIZE
+#define CyPCI_Zwin 0x80000
+#define CyPCI_Ze_win (2 * CyPCI_Zwin)
+
+#define PCI_DEVICE_ID_MASK 0x06
+
+/**** CD1400 registers ****/
+
+#define CD1400_REV_G 0x46
+#define CD1400_REV_J 0x48
+
+#define CyRegSize 0x0400
+#define Cy_HwReset 0x1400
+#define Cy_ClrIntr 0x1800
+#define Cy_EpldRev 0x1e00
+
+/* Global Registers */
+
+#define CyGFRCR (0x40*2)
+#define CyRevE (44)
+#define CyCAR (0x68*2)
+#define CyCHAN_0 (0x00)
+#define CyCHAN_1 (0x01)
+#define CyCHAN_2 (0x02)
+#define CyCHAN_3 (0x03)
+#define CyGCR (0x4B*2)
+#define CyCH0_SERIAL (0x00)
+#define CyCH0_PARALLEL (0x80)
+#define CySVRR (0x67*2)
+#define CySRModem (0x04)
+#define CySRTransmit (0x02)
+#define CySRReceive (0x01)
+#define CyRICR (0x44*2)
+#define CyTICR (0x45*2)
+#define CyMICR (0x46*2)
+#define CyICR0 (0x00)
+#define CyICR1 (0x01)
+#define CyICR2 (0x02)
+#define CyICR3 (0x03)
+#define CyRIR (0x6B*2)
+#define CyTIR (0x6A*2)
+#define CyMIR (0x69*2)
+#define CyIRDirEq (0x80)
+#define CyIRBusy (0x40)
+#define CyIRUnfair (0x20)
+#define CyIRContext (0x1C)
+#define CyIRChannel (0x03)
+#define CyPPR (0x7E*2)
+#define CyCLOCK_20_1MS (0x27)
+#define CyCLOCK_25_1MS (0x31)
+#define CyCLOCK_25_5MS (0xf4)
+#define CyCLOCK_60_1MS (0x75)
+#define CyCLOCK_60_2MS (0xea)
+
+/* Virtual Registers */
+
+#define CyRIVR (0x43*2)
+#define CyTIVR (0x42*2)
+#define CyMIVR (0x41*2)
+#define CyIVRMask (0x07)
+#define CyIVRRxEx (0x07)
+#define CyIVRRxOK (0x03)
+#define CyIVRTxOK (0x02)
+#define CyIVRMdmOK (0x01)
+#define CyTDR (0x63*2)
+#define CyRDSR (0x62*2)
+#define CyTIMEOUT (0x80)
+#define CySPECHAR (0x70)
+#define CyBREAK (0x08)
+#define CyPARITY (0x04)
+#define CyFRAME (0x02)
+#define CyOVERRUN (0x01)
+#define CyMISR (0x4C*2)
+/* see CyMCOR_ and CyMSVR_ for bits*/
+#define CyEOSRR (0x60*2)
+
+/* Channel Registers */
+
+#define CyLIVR (0x18*2)
+#define CyMscsr (0x01)
+#define CyTdsr (0x02)
+#define CyRgdsr (0x03)
+#define CyRedsr (0x07)
+#define CyCCR (0x05*2)
+/* Format 1 */
+#define CyCHAN_RESET (0x80)
+#define CyCHIP_RESET (0x81)
+#define CyFlushTransFIFO (0x82)
+/* Format 2 */
+#define CyCOR_CHANGE (0x40)
+#define CyCOR1ch (0x02)
+#define CyCOR2ch (0x04)
+#define CyCOR3ch (0x08)
+/* Format 3 */
+#define CySEND_SPEC_1 (0x21)
+#define CySEND_SPEC_2 (0x22)
+#define CySEND_SPEC_3 (0x23)
+#define CySEND_SPEC_4 (0x24)
+/* Format 4 */
+#define CyCHAN_CTL (0x10)
+#define CyDIS_RCVR (0x01)
+#define CyENB_RCVR (0x02)
+#define CyDIS_XMTR (0x04)
+#define CyENB_XMTR (0x08)
+#define CySRER (0x06*2)
+#define CyMdmCh (0x80)
+#define CyRxData (0x10)
+#define CyTxRdy (0x04)
+#define CyTxMpty (0x02)
+#define CyNNDT (0x01)
+#define CyCOR1 (0x08*2)
+#define CyPARITY_NONE (0x00)
+#define CyPARITY_0 (0x20)
+#define CyPARITY_1 (0xA0)
+#define CyPARITY_E (0x40)
+#define CyPARITY_O (0xC0)
+#define Cy_1_STOP (0x00)
+#define Cy_1_5_STOP (0x04)
+#define Cy_2_STOP (0x08)
+#define Cy_5_BITS (0x00)
+#define Cy_6_BITS (0x01)
+#define Cy_7_BITS (0x02)
+#define Cy_8_BITS (0x03)
+#define CyCOR2 (0x09*2)
+#define CyIXM (0x80)
+#define CyTxIBE (0x40)
+#define CyETC (0x20)
+#define CyAUTO_TXFL (0x60)
+#define CyLLM (0x10)
+#define CyRLM (0x08)
+#define CyRtsAO (0x04)
+#define CyCtsAE (0x02)
+#define CyDsrAE (0x01)
+#define CyCOR3 (0x0A*2)
+#define CySPL_CH_DRANGE (0x80) /* special character detect range */
+#define CySPL_CH_DET1 (0x40) /* enable special character detection
+ on SCHR4-SCHR3 */
+#define CyFL_CTRL_TRNSP (0x20) /* Flow Control Transparency */
+#define CySPL_CH_DET2 (0x10) /* Enable special character detection
+ on SCHR2-SCHR1 */
+#define CyREC_FIFO (0x0F) /* Receive FIFO threshold */
+#define CyCOR4 (0x1E*2)
+#define CyCOR5 (0x1F*2)
+#define CyCCSR (0x0B*2)
+#define CyRxEN (0x80)
+#define CyRxFloff (0x40)
+#define CyRxFlon (0x20)
+#define CyTxEN (0x08)
+#define CyTxFloff (0x04)
+#define CyTxFlon (0x02)
+#define CyRDCR (0x0E*2)
+#define CySCHR1 (0x1A*2)
+#define CySCHR2 (0x1B*2)
+#define CySCHR3 (0x1C*2)
+#define CySCHR4 (0x1D*2)
+#define CySCRL (0x22*2)
+#define CySCRH (0x23*2)
+#define CyLNC (0x24*2)
+#define CyMCOR1 (0x15*2)
+#define CyMCOR2 (0x16*2)
+#define CyRTPR (0x21*2)
+#define CyMSVR1 (0x6C*2)
+#define CyMSVR2 (0x6D*2)
+#define CyANY_DELTA (0xF0)
+#define CyDSR (0x80)
+#define CyCTS (0x40)
+#define CyRI (0x20)
+#define CyDCD (0x10)
+#define CyDTR (0x02)
+#define CyRTS (0x01)
+#define CyPVSR (0x6F*2)
+#define CyRBPR (0x78*2)
+#define CyRCOR (0x7C*2)
+#define CyTBPR (0x72*2)
+#define CyTCOR (0x76*2)
+
+/* Custom Registers */
+
+#define CyPLX_VER (0x3400)
+#define PLX_9050 0x0b
+#define PLX_9060 0x0c
+#define PLX_9080 0x0d
+
+/***************************************************************************/
+
+#endif /* _LINUX_CYCLADES_H */
diff --git a/include/linux/davinci_emac.h b/include/linux/davinci_emac.h
new file mode 100644
index 000000000..542888504
--- /dev/null
+++ b/include/linux/davinci_emac.h
@@ -0,0 +1,50 @@
+/*
+ * TI DaVinci EMAC platform support
+ *
+ * Author: Kevin Hilman, Deep Root Systems, LLC
+ *
+ * 2007 (c) Deep Root Systems, LLC. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#ifndef _LINUX_DAVINCI_EMAC_H
+#define _LINUX_DAVINCI_EMAC_H
+
+#include <linux/if_ether.h>
+#include <linux/memory.h>
+
+struct mdio_platform_data {
+ unsigned long bus_freq;
+};
+
+struct emac_platform_data {
+ char mac_addr[ETH_ALEN];
+ u32 ctrl_reg_offset;
+ u32 ctrl_mod_reg_offset;
+ u32 ctrl_ram_offset;
+ u32 hw_ram_addr;
+ u32 ctrl_ram_size;
+
+ /*
+ * phy_id can be one of the following:
+ * - NULL : use the first phy on the bus,
+ * - "" : force to 100/full, no mdio control
+ * - "<bus>:<addr>" : use the specified bus and phy
+ */
+ const char *phy_id;
+
+ u8 rmii_en;
+ u8 version;
+ bool no_bd_ram;
+ void (*interrupt_enable) (void);
+ void (*interrupt_disable) (void);
+};
+
+enum {
+ EMAC_VERSION_1, /* DM644x */
+ EMAC_VERSION_2, /* DM646x */
+};
+
+void davinci_get_mac_addr(struct memory_accessor *mem_acc, void *context);
+#endif
diff --git a/include/linux/dca.h b/include/linux/dca.h
new file mode 100644
index 000000000..d27a7a057
--- /dev/null
+++ b/include/linux/dca.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+#ifndef DCA_H
+#define DCA_H
+
+#include <linux/pci.h>
+
+/* DCA Provider API */
+
+/* DCA Notifier Interface */
+void dca_register_notify(struct notifier_block *nb);
+void dca_unregister_notify(struct notifier_block *nb);
+
+#define DCA_PROVIDER_ADD 0x0001
+#define DCA_PROVIDER_REMOVE 0x0002
+
+struct dca_provider {
+ struct list_head node;
+ struct dca_ops *ops;
+ struct device *cd;
+ int id;
+};
+
+struct dca_domain {
+ struct list_head node;
+ struct list_head dca_providers;
+ struct pci_bus *pci_rc;
+};
+
+struct dca_ops {
+ int (*add_requester) (struct dca_provider *, struct device *);
+ int (*remove_requester) (struct dca_provider *, struct device *);
+ u8 (*get_tag) (struct dca_provider *, struct device *,
+ int cpu);
+ int (*dev_managed) (struct dca_provider *, struct device *);
+};
+
+struct dca_provider *alloc_dca_provider(struct dca_ops *ops, int priv_size);
+void free_dca_provider(struct dca_provider *dca);
+int register_dca_provider(struct dca_provider *dca, struct device *dev);
+void unregister_dca_provider(struct dca_provider *dca, struct device *dev);
+
+static inline void *dca_priv(struct dca_provider *dca)
+{
+ return (void *)dca + sizeof(struct dca_provider);
+}
+
+/* Requester API */
+#define DCA_GET_TAG_TWO_ARGS
+int dca_add_requester(struct device *dev);
+int dca_remove_requester(struct device *dev);
+u8 dca_get_tag(int cpu);
+u8 dca3_get_tag(struct device *dev, int cpu);
+
+/* internal stuff */
+int __init dca_sysfs_init(void);
+void __exit dca_sysfs_exit(void);
+int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev);
+void dca_sysfs_remove_provider(struct dca_provider *dca);
+int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot);
+void dca_sysfs_remove_req(struct dca_provider *dca, int slot);
+
+#endif /* DCA_H */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
new file mode 100644
index 000000000..df334cbac
--- /dev/null
+++ b/include/linux/dcache.h
@@ -0,0 +1,579 @@
+#ifndef __LINUX_DCACHE_H
+#define __LINUX_DCACHE_H
+
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/rculist_bl.h>
+#include <linux/spinlock.h>
+#include <linux/seqlock.h>
+#include <linux/cache.h>
+#include <linux/rcupdate.h>
+#include <linux/lockref.h>
+
+struct path;
+struct vfsmount;
+
+/*
+ * linux/include/linux/dcache.h
+ *
+ * Dirent cache data structures
+ *
+ * (C) Copyright 1997 Thomas Schoebel-Theuer,
+ * with heavy changes by Linus Torvalds
+ */
+
+#define IS_ROOT(x) ((x) == (x)->d_parent)
+
+/* The hash is always the low bits of hash_len */
+#ifdef __LITTLE_ENDIAN
+ #define HASH_LEN_DECLARE u32 hash; u32 len;
+ #define bytemask_from_count(cnt) (~(~0ul << (cnt)*8))
+#else
+ #define HASH_LEN_DECLARE u32 len; u32 hash;
+ #define bytemask_from_count(cnt) (~(~0ul >> (cnt)*8))
+#endif
+
+/*
+ * "quick string" -- eases parameter passing, but more importantly
+ * saves "metadata" about the string (ie length and the hash).
+ *
+ * hash comes first so it snuggles against d_parent in the
+ * dentry.
+ */
+struct qstr {
+ union {
+ struct {
+ HASH_LEN_DECLARE;
+ };
+ u64 hash_len;
+ };
+ const unsigned char *name;
+};
+
+#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
+#define hashlen_hash(hashlen) ((u32) (hashlen))
+#define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
+#define hashlen_create(hash,len) (((u64)(len)<<32)|(u32)(hash))
+
+struct dentry_stat_t {
+ long nr_dentry;
+ long nr_unused;
+ long age_limit; /* age in seconds */
+ long want_pages; /* pages requested by system */
+ long dummy[2];
+};
+extern struct dentry_stat_t dentry_stat;
+
+/* Name hashing routines. Initial hash value */
+/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
+#define init_name_hash() 0
+
+/* partial hash update function. Assume roughly 4 bits per character */
+static inline unsigned long
+partial_name_hash(unsigned long c, unsigned long prevhash)
+{
+ return (prevhash + (c << 4) + (c >> 4)) * 11;
+}
+
+/*
+ * Finally: cut down the number of bits to a int value (and try to avoid
+ * losing bits)
+ */
+static inline unsigned long end_name_hash(unsigned long hash)
+{
+ return (unsigned int) hash;
+}
+
+/* Compute the hash for a name string. */
+extern unsigned int full_name_hash(const unsigned char *, unsigned int);
+
+/*
+ * Try to keep struct dentry aligned on 64 byte cachelines (this will
+ * give reasonable cacheline footprint with larger lines without the
+ * large memory footprint increase).
+ */
+#ifdef CONFIG_64BIT
+# define DNAME_INLINE_LEN 32 /* 192 bytes */
+#else
+# ifdef CONFIG_SMP
+# define DNAME_INLINE_LEN 36 /* 128 bytes */
+# else
+# define DNAME_INLINE_LEN 40 /* 128 bytes */
+# endif
+#endif
+
+#define d_lock d_lockref.lock
+
+struct dentry {
+ /* RCU lookup touched fields */
+ unsigned int d_flags; /* protected by d_lock */
+ seqcount_t d_seq; /* per dentry seqlock */
+ struct hlist_bl_node d_hash; /* lookup hash list */
+ struct dentry *d_parent; /* parent directory */
+ struct qstr d_name;
+ struct inode *d_inode; /* Where the name belongs to - NULL is
+ * negative */
+ unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */
+
+ /* Ref lookup also touches following */
+ struct lockref d_lockref; /* per-dentry lock and refcount */
+ const struct dentry_operations *d_op;
+ struct super_block *d_sb; /* The root of the dentry tree */
+ unsigned long d_time; /* used by d_revalidate */
+ void *d_fsdata; /* fs-specific data */
+
+ struct list_head d_lru; /* LRU list */
+ struct list_head d_child; /* child of parent list */
+ struct list_head d_subdirs; /* our children */
+ /*
+ * d_alias and d_rcu can share memory
+ */
+ union {
+ struct hlist_node d_alias; /* inode alias list */
+ struct rcu_head d_rcu;
+ } d_u;
+};
+
+/*
+ * dentry->d_lock spinlock nesting subclasses:
+ *
+ * 0: normal
+ * 1: nested
+ */
+enum dentry_d_lock_class
+{
+ DENTRY_D_LOCK_NORMAL, /* implicitly used by plain spin_lock() APIs. */
+ DENTRY_D_LOCK_NESTED
+};
+
+struct dentry_operations {
+ int (*d_revalidate)(struct dentry *, unsigned int);
+ int (*d_weak_revalidate)(struct dentry *, unsigned int);
+ int (*d_hash)(const struct dentry *, struct qstr *);
+ int (*d_compare)(const struct dentry *, const struct dentry *,
+ unsigned int, const char *, const struct qstr *);
+ int (*d_delete)(const struct dentry *);
+ void (*d_release)(struct dentry *);
+ void (*d_prune)(struct dentry *);
+ void (*d_iput)(struct dentry *, struct inode *);
+ char *(*d_dname)(struct dentry *, char *, int);
+ struct vfsmount *(*d_automount)(struct path *);
+ int (*d_manage)(struct dentry *, bool);
+} ____cacheline_aligned;
+
+/*
+ * Locking rules for dentry_operations callbacks are to be found in
+ * Documentation/filesystems/Locking. Keep it updated!
+ *
+ * FUrther descriptions are found in Documentation/filesystems/vfs.txt.
+ * Keep it updated too!
+ */
+
+/* d_flags entries */
+#define DCACHE_OP_HASH 0x00000001
+#define DCACHE_OP_COMPARE 0x00000002
+#define DCACHE_OP_REVALIDATE 0x00000004
+#define DCACHE_OP_DELETE 0x00000008
+#define DCACHE_OP_PRUNE 0x00000010
+
+#define DCACHE_DISCONNECTED 0x00000020
+ /* This dentry is possibly not currently connected to the dcache tree, in
+ * which case its parent will either be itself, or will have this flag as
+ * well. nfsd will not use a dentry with this bit set, but will first
+ * endeavour to clear the bit either by discovering that it is connected,
+ * or by performing lookup operations. Any filesystem which supports
+ * nfsd_operations MUST have a lookup function which, if it finds a
+ * directory inode with a DCACHE_DISCONNECTED dentry, will d_move that
+ * dentry into place and return that dentry rather than the passed one,
+ * typically using d_splice_alias. */
+
+#define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */
+#define DCACHE_RCUACCESS 0x00000080 /* Entry has ever been RCU-visible */
+
+#define DCACHE_CANT_MOUNT 0x00000100
+#define DCACHE_GENOCIDE 0x00000200
+#define DCACHE_SHRINK_LIST 0x00000400
+
+#define DCACHE_OP_WEAK_REVALIDATE 0x00000800
+
+#define DCACHE_NFSFS_RENAMED 0x00001000
+ /* this dentry has been "silly renamed" and has to be deleted on the last
+ * dput() */
+#define DCACHE_COOKIE 0x00002000 /* For use by dcookie subsystem */
+#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x00004000
+ /* Parent inode is watched by some fsnotify listener */
+
+#define DCACHE_DENTRY_KILLED 0x00008000
+
+#define DCACHE_MOUNTED 0x00010000 /* is a mountpoint */
+#define DCACHE_NEED_AUTOMOUNT 0x00020000 /* handle automount on this dir */
+#define DCACHE_MANAGE_TRANSIT 0x00040000 /* manage transit from this dirent */
+#define DCACHE_MANAGED_DENTRY \
+ (DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT)
+
+#define DCACHE_LRU_LIST 0x00080000
+
+#define DCACHE_ENTRY_TYPE 0x00700000
+#define DCACHE_MISS_TYPE 0x00000000 /* Negative dentry (maybe fallthru to nowhere) */
+#define DCACHE_WHITEOUT_TYPE 0x00100000 /* Whiteout dentry (stop pathwalk) */
+#define DCACHE_DIRECTORY_TYPE 0x00200000 /* Normal directory */
+#define DCACHE_AUTODIR_TYPE 0x00300000 /* Lookupless directory (presumed automount) */
+#define DCACHE_REGULAR_TYPE 0x00400000 /* Regular file type (or fallthru to such) */
+#define DCACHE_SPECIAL_TYPE 0x00500000 /* Other file type (or fallthru to such) */
+#define DCACHE_SYMLINK_TYPE 0x00600000 /* Symlink (or fallthru to such) */
+
+#define DCACHE_MAY_FREE 0x00800000
+#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
+
+extern seqlock_t rename_lock;
+
+/*
+ * These are the low-level FS interfaces to the dcache..
+ */
+extern void d_instantiate(struct dentry *, struct inode *);
+extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
+extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
+extern void __d_drop(struct dentry *dentry);
+extern void d_drop(struct dentry *dentry);
+extern void d_delete(struct dentry *);
+extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op);
+
+/* allocate/de-allocate */
+extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
+extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
+extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
+extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
+extern struct dentry *d_find_any_alias(struct inode *inode);
+extern struct dentry * d_obtain_alias(struct inode *);
+extern struct dentry * d_obtain_root(struct inode *);
+extern void shrink_dcache_sb(struct super_block *);
+extern void shrink_dcache_parent(struct dentry *);
+extern void shrink_dcache_for_umount(struct super_block *);
+extern void d_invalidate(struct dentry *);
+
+/* only used at mount-time */
+extern struct dentry * d_make_root(struct inode *);
+
+/* <clickety>-<click> the ramfs-type tree */
+extern void d_genocide(struct dentry *);
+
+extern void d_tmpfile(struct dentry *, struct inode *);
+
+extern struct dentry *d_find_alias(struct inode *);
+extern void d_prune_aliases(struct inode *);
+
+/* test whether we have any submounts in a subdir tree */
+extern int have_submounts(struct dentry *);
+
+/*
+ * This adds the entry to the hash queues.
+ */
+extern void d_rehash(struct dentry *);
+
+/**
+ * d_add - add dentry to hash queues
+ * @entry: dentry to add
+ * @inode: The inode to attach to this dentry
+ *
+ * This adds the entry to the hash queues and initializes @inode.
+ * The entry was actually filled in earlier during d_alloc().
+ */
+
+static inline void d_add(struct dentry *entry, struct inode *inode)
+{
+ d_instantiate(entry, inode);
+ d_rehash(entry);
+}
+
+/**
+ * d_add_unique - add dentry to hash queues without aliasing
+ * @entry: dentry to add
+ * @inode: The inode to attach to this dentry
+ *
+ * This adds the entry to the hash queues and initializes @inode.
+ * The entry was actually filled in earlier during d_alloc().
+ */
+static inline struct dentry *d_add_unique(struct dentry *entry, struct inode *inode)
+{
+ struct dentry *res;
+
+ res = d_instantiate_unique(entry, inode);
+ d_rehash(res != NULL ? res : entry);
+ return res;
+}
+
+extern void dentry_update_name_case(struct dentry *, struct qstr *);
+
+/* used for rename() and baskets */
+extern void d_move(struct dentry *, struct dentry *);
+extern void d_exchange(struct dentry *, struct dentry *);
+extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
+
+/* appendix may either be NULL or be used for transname suffixes */
+extern struct dentry *d_lookup(const struct dentry *, const struct qstr *);
+extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *);
+extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *);
+extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
+ const struct qstr *name, unsigned *seq);
+
+static inline unsigned d_count(const struct dentry *dentry)
+{
+ return dentry->d_lockref.count;
+}
+
+/*
+ * helper function for dentry_operations.d_dname() members
+ */
+extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+extern char *simple_dname(struct dentry *, char *, int);
+
+extern char *__d_path(const struct path *, const struct path *, char *, int);
+extern char *d_absolute_path(const struct path *, char *, int);
+extern char *d_path(const struct path *, char *, int);
+extern char *dentry_path_raw(struct dentry *, char *, int);
+extern char *dentry_path(struct dentry *, char *, int);
+
+/* Allocation counts.. */
+
+/**
+ * dget, dget_dlock - get a reference to a dentry
+ * @dentry: dentry to get a reference to
+ *
+ * Given a dentry or %NULL pointer increment the reference count
+ * if appropriate and return the dentry. A dentry will not be
+ * destroyed when it has references.
+ */
+static inline struct dentry *dget_dlock(struct dentry *dentry)
+{
+ if (dentry)
+ dentry->d_lockref.count++;
+ return dentry;
+}
+
+static inline struct dentry *dget(struct dentry *dentry)
+{
+ if (dentry)
+ lockref_get(&dentry->d_lockref);
+ return dentry;
+}
+
+extern struct dentry *dget_parent(struct dentry *dentry);
+
+/**
+ * d_unhashed - is dentry hashed
+ * @dentry: entry to check
+ *
+ * Returns true if the dentry passed is not currently hashed.
+ */
+
+static inline int d_unhashed(const struct dentry *dentry)
+{
+ return hlist_bl_unhashed(&dentry->d_hash);
+}
+
+static inline int d_unlinked(const struct dentry *dentry)
+{
+ return d_unhashed(dentry) && !IS_ROOT(dentry);
+}
+
+static inline int cant_mount(const struct dentry *dentry)
+{
+ return (dentry->d_flags & DCACHE_CANT_MOUNT);
+}
+
+static inline void dont_mount(struct dentry *dentry)
+{
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags |= DCACHE_CANT_MOUNT;
+ spin_unlock(&dentry->d_lock);
+}
+
+extern void dput(struct dentry *);
+
+static inline bool d_managed(const struct dentry *dentry)
+{
+ return dentry->d_flags & DCACHE_MANAGED_DENTRY;
+}
+
+static inline bool d_mountpoint(const struct dentry *dentry)
+{
+ return dentry->d_flags & DCACHE_MOUNTED;
+}
+
+/*
+ * Directory cache entry type accessor functions.
+ */
+static inline unsigned __d_entry_type(const struct dentry *dentry)
+{
+ unsigned type = READ_ONCE(dentry->d_flags);
+ smp_rmb();
+ return type & DCACHE_ENTRY_TYPE;
+}
+
+static inline bool d_is_miss(const struct dentry *dentry)
+{
+ return __d_entry_type(dentry) == DCACHE_MISS_TYPE;
+}
+
+static inline bool d_is_whiteout(const struct dentry *dentry)
+{
+ return __d_entry_type(dentry) == DCACHE_WHITEOUT_TYPE;
+}
+
+static inline bool d_can_lookup(const struct dentry *dentry)
+{
+ return __d_entry_type(dentry) == DCACHE_DIRECTORY_TYPE;
+}
+
+static inline bool d_is_autodir(const struct dentry *dentry)
+{
+ return __d_entry_type(dentry) == DCACHE_AUTODIR_TYPE;
+}
+
+static inline bool d_is_dir(const struct dentry *dentry)
+{
+ return d_can_lookup(dentry) || d_is_autodir(dentry);
+}
+
+static inline bool d_is_symlink(const struct dentry *dentry)
+{
+ return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE;
+}
+
+static inline bool d_is_reg(const struct dentry *dentry)
+{
+ return __d_entry_type(dentry) == DCACHE_REGULAR_TYPE;
+}
+
+static inline bool d_is_special(const struct dentry *dentry)
+{
+ return __d_entry_type(dentry) == DCACHE_SPECIAL_TYPE;
+}
+
+static inline bool d_is_file(const struct dentry *dentry)
+{
+ return d_is_reg(dentry) || d_is_special(dentry);
+}
+
+static inline bool d_is_negative(const struct dentry *dentry)
+{
+ // TODO: check d_is_whiteout(dentry) also.
+ return d_is_miss(dentry);
+}
+
+static inline bool d_is_positive(const struct dentry *dentry)
+{
+ return !d_is_negative(dentry);
+}
+
+/**
+ * d_really_is_negative - Determine if a dentry is really negative (ignoring fallthroughs)
+ * @dentry: The dentry in question
+ *
+ * Returns true if the dentry represents either an absent name or a name that
+ * doesn't map to an inode (ie. ->d_inode is NULL). The dentry could represent
+ * a true miss, a whiteout that isn't represented by a 0,0 chardev or a
+ * fallthrough marker in an opaque directory.
+ *
+ * Note! (1) This should be used *only* by a filesystem to examine its own
+ * dentries. It should not be used to look at some other filesystem's
+ * dentries. (2) It should also be used in combination with d_inode() to get
+ * the inode. (3) The dentry may have something attached to ->d_lower and the
+ * type field of the flags may be set to something other than miss or whiteout.
+ */
+static inline bool d_really_is_negative(const struct dentry *dentry)
+{
+ return dentry->d_inode == NULL;
+}
+
+/**
+ * d_really_is_positive - Determine if a dentry is really positive (ignoring fallthroughs)
+ * @dentry: The dentry in question
+ *
+ * Returns true if the dentry represents a name that maps to an inode
+ * (ie. ->d_inode is not NULL). The dentry might still represent a whiteout if
+ * that is represented on medium as a 0,0 chardev.
+ *
+ * Note! (1) This should be used *only* by a filesystem to examine its own
+ * dentries. It should not be used to look at some other filesystem's
+ * dentries. (2) It should also be used in combination with d_inode() to get
+ * the inode.
+ */
+static inline bool d_really_is_positive(const struct dentry *dentry)
+{
+ return dentry->d_inode != NULL;
+}
+
+extern void d_set_fallthru(struct dentry *dentry);
+
+static inline bool d_is_fallthru(const struct dentry *dentry)
+{
+ return dentry->d_flags & DCACHE_FALLTHRU;
+}
+
+
+extern int sysctl_vfs_cache_pressure;
+
+static inline unsigned long vfs_pressure_ratio(unsigned long val)
+{
+ return mult_frac(val, sysctl_vfs_cache_pressure, 100);
+}
+
+/**
+ * d_inode - Get the actual inode of this dentry
+ * @dentry: The dentry to query
+ *
+ * This is the helper normal filesystems should use to get at their own inodes
+ * in their own dentries and ignore the layering superimposed upon them.
+ */
+static inline struct inode *d_inode(const struct dentry *dentry)
+{
+ return dentry->d_inode;
+}
+
+/**
+ * d_inode_rcu - Get the actual inode of this dentry with ACCESS_ONCE()
+ * @dentry: The dentry to query
+ *
+ * This is the helper normal filesystems should use to get at their own inodes
+ * in their own dentries and ignore the layering superimposed upon them.
+ */
+static inline struct inode *d_inode_rcu(const struct dentry *dentry)
+{
+ return ACCESS_ONCE(dentry->d_inode);
+}
+
+/**
+ * d_backing_inode - Get upper or lower inode we should be using
+ * @upper: The upper layer
+ *
+ * This is the helper that should be used to get at the inode that will be used
+ * if this dentry were to be opened as a file. The inode may be on the upper
+ * dentry or it may be on a lower dentry pinned by the upper.
+ *
+ * Normal filesystems should not use this to access their own inodes.
+ */
+static inline struct inode *d_backing_inode(const struct dentry *upper)
+{
+ struct inode *inode = upper->d_inode;
+
+ return inode;
+}
+
+/**
+ * d_backing_dentry - Get upper or lower dentry we should be using
+ * @upper: The upper layer
+ *
+ * This is the helper that should be used to get the dentry of the inode that
+ * will be used if this dentry were opened as a file. It may be the upper
+ * dentry or it may be a lower dentry pinned by the upper.
+ *
+ * Normal filesystems should not use this to access their own dentries.
+ */
+static inline struct dentry *d_backing_dentry(struct dentry *upper)
+{
+ return upper;
+}
+
+#endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
new file mode 100644
index 000000000..221025423
--- /dev/null
+++ b/include/linux/dccp.h
@@ -0,0 +1,324 @@
+#ifndef _LINUX_DCCP_H
+#define _LINUX_DCCP_H
+
+
+#include <linux/in.h>
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/uio.h>
+#include <linux/workqueue.h>
+
+#include <net/inet_connection_sock.h>
+#include <net/inet_sock.h>
+#include <net/inet_timewait_sock.h>
+#include <net/tcp_states.h>
+#include <uapi/linux/dccp.h>
+
+enum dccp_state {
+ DCCP_OPEN = TCP_ESTABLISHED,
+ DCCP_REQUESTING = TCP_SYN_SENT,
+ DCCP_LISTEN = TCP_LISTEN,
+ DCCP_RESPOND = TCP_SYN_RECV,
+ /*
+ * States involved in closing a DCCP connection:
+ * 1) ACTIVE_CLOSEREQ is entered by a server sending a CloseReq.
+ *
+ * 2) CLOSING can have three different meanings (RFC 4340, 8.3):
+ * a. Client has performed active-close, has sent a Close to the server
+ * from state OPEN or PARTOPEN, and is waiting for the final Reset
+ * (in this case, SOCK_DONE == 1).
+ * b. Client is asked to perform passive-close, by receiving a CloseReq
+ * in (PART)OPEN state. It sends a Close and waits for final Reset
+ * (in this case, SOCK_DONE == 0).
+ * c. Server performs an active-close as in (a), keeps TIMEWAIT state.
+ *
+ * 3) The following intermediate states are employed to give passively
+ * closing nodes a chance to process their unread data:
+ * - PASSIVE_CLOSE (from OPEN => CLOSED) and
+ * - PASSIVE_CLOSEREQ (from (PART)OPEN to CLOSING; case (b) above).
+ */
+ DCCP_ACTIVE_CLOSEREQ = TCP_FIN_WAIT1,
+ DCCP_PASSIVE_CLOSE = TCP_CLOSE_WAIT, /* any node receiving a Close */
+ DCCP_CLOSING = TCP_CLOSING,
+ DCCP_TIME_WAIT = TCP_TIME_WAIT,
+ DCCP_CLOSED = TCP_CLOSE,
+ DCCP_NEW_SYN_RECV = TCP_NEW_SYN_RECV,
+ DCCP_PARTOPEN = TCP_MAX_STATES,
+ DCCP_PASSIVE_CLOSEREQ, /* clients receiving CloseReq */
+ DCCP_MAX_STATES
+};
+
+enum {
+ DCCPF_OPEN = TCPF_ESTABLISHED,
+ DCCPF_REQUESTING = TCPF_SYN_SENT,
+ DCCPF_LISTEN = TCPF_LISTEN,
+ DCCPF_RESPOND = TCPF_SYN_RECV,
+ DCCPF_ACTIVE_CLOSEREQ = TCPF_FIN_WAIT1,
+ DCCPF_CLOSING = TCPF_CLOSING,
+ DCCPF_TIME_WAIT = TCPF_TIME_WAIT,
+ DCCPF_CLOSED = TCPF_CLOSE,
+ DCCPF_NEW_SYN_RECV = TCPF_NEW_SYN_RECV,
+ DCCPF_PARTOPEN = (1 << DCCP_PARTOPEN),
+};
+
+static inline struct dccp_hdr *dccp_hdr(const struct sk_buff *skb)
+{
+ return (struct dccp_hdr *)skb_transport_header(skb);
+}
+
+static inline struct dccp_hdr *dccp_zeroed_hdr(struct sk_buff *skb, int headlen)
+{
+ skb_push(skb, headlen);
+ skb_reset_transport_header(skb);
+ return memset(skb_transport_header(skb), 0, headlen);
+}
+
+static inline struct dccp_hdr_ext *dccp_hdrx(const struct dccp_hdr *dh)
+{
+ return (struct dccp_hdr_ext *)((unsigned char *)dh + sizeof(*dh));
+}
+
+static inline unsigned int __dccp_basic_hdr_len(const struct dccp_hdr *dh)
+{
+ return sizeof(*dh) + (dh->dccph_x ? sizeof(struct dccp_hdr_ext) : 0);
+}
+
+static inline unsigned int dccp_basic_hdr_len(const struct sk_buff *skb)
+{
+ const struct dccp_hdr *dh = dccp_hdr(skb);
+ return __dccp_basic_hdr_len(dh);
+}
+
+static inline __u64 dccp_hdr_seq(const struct dccp_hdr *dh)
+{
+ __u64 seq_nr = ntohs(dh->dccph_seq);
+
+ if (dh->dccph_x != 0)
+ seq_nr = (seq_nr << 32) + ntohl(dccp_hdrx(dh)->dccph_seq_low);
+ else
+ seq_nr += (u32)dh->dccph_seq2 << 16;
+
+ return seq_nr;
+}
+
+static inline struct dccp_hdr_request *dccp_hdr_request(struct sk_buff *skb)
+{
+ return (struct dccp_hdr_request *)(skb_transport_header(skb) +
+ dccp_basic_hdr_len(skb));
+}
+
+static inline struct dccp_hdr_ack_bits *dccp_hdr_ack_bits(const struct sk_buff *skb)
+{
+ return (struct dccp_hdr_ack_bits *)(skb_transport_header(skb) +
+ dccp_basic_hdr_len(skb));
+}
+
+static inline u64 dccp_hdr_ack_seq(const struct sk_buff *skb)
+{
+ const struct dccp_hdr_ack_bits *dhack = dccp_hdr_ack_bits(skb);
+ return ((u64)ntohs(dhack->dccph_ack_nr_high) << 32) + ntohl(dhack->dccph_ack_nr_low);
+}
+
+static inline struct dccp_hdr_response *dccp_hdr_response(struct sk_buff *skb)
+{
+ return (struct dccp_hdr_response *)(skb_transport_header(skb) +
+ dccp_basic_hdr_len(skb));
+}
+
+static inline struct dccp_hdr_reset *dccp_hdr_reset(struct sk_buff *skb)
+{
+ return (struct dccp_hdr_reset *)(skb_transport_header(skb) +
+ dccp_basic_hdr_len(skb));
+}
+
+static inline unsigned int __dccp_hdr_len(const struct dccp_hdr *dh)
+{
+ return __dccp_basic_hdr_len(dh) +
+ dccp_packet_hdr_len(dh->dccph_type);
+}
+
+static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
+{
+ return __dccp_hdr_len(dccp_hdr(skb));
+}
+
+/**
+ * struct dccp_request_sock - represent DCCP-specific connection request
+ * @dreq_inet_rsk: structure inherited from
+ * @dreq_iss: initial sequence number, sent on the first Response (RFC 4340, 7.1)
+ * @dreq_gss: greatest sequence number sent (for retransmitted Responses)
+ * @dreq_isr: initial sequence number received in the first Request
+ * @dreq_gsr: greatest sequence number received (for retransmitted Request(s))
+ * @dreq_service: service code present on the Request (there is just one)
+ * @dreq_featneg: feature negotiation options for this connection
+ * The following two fields are analogous to the ones in dccp_sock:
+ * @dreq_timestamp_echo: last received timestamp to echo (13.1)
+ * @dreq_timestamp_echo: the time of receiving the last @dreq_timestamp_echo
+ */
+struct dccp_request_sock {
+ struct inet_request_sock dreq_inet_rsk;
+ __u64 dreq_iss;
+ __u64 dreq_gss;
+ __u64 dreq_isr;
+ __u64 dreq_gsr;
+ __be32 dreq_service;
+ struct list_head dreq_featneg;
+ __u32 dreq_timestamp_echo;
+ __u32 dreq_timestamp_time;
+};
+
+static inline struct dccp_request_sock *dccp_rsk(const struct request_sock *req)
+{
+ return (struct dccp_request_sock *)req;
+}
+
+extern struct inet_timewait_death_row dccp_death_row;
+
+extern int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
+ struct sk_buff *skb);
+
+struct dccp_options_received {
+ u64 dccpor_ndp:48;
+ u32 dccpor_timestamp;
+ u32 dccpor_timestamp_echo;
+ u32 dccpor_elapsed_time;
+};
+
+struct ccid;
+
+enum dccp_role {
+ DCCP_ROLE_UNDEFINED,
+ DCCP_ROLE_LISTEN,
+ DCCP_ROLE_CLIENT,
+ DCCP_ROLE_SERVER,
+};
+
+struct dccp_service_list {
+ __u32 dccpsl_nr;
+ __be32 dccpsl_list[0];
+};
+
+#define DCCP_SERVICE_INVALID_VALUE htonl((__u32)-1)
+#define DCCP_SERVICE_CODE_IS_ABSENT 0
+
+static inline int dccp_list_has_service(const struct dccp_service_list *sl,
+ const __be32 service)
+{
+ if (likely(sl != NULL)) {
+ u32 i = sl->dccpsl_nr;
+ while (i--)
+ if (sl->dccpsl_list[i] == service)
+ return 1;
+ }
+ return 0;
+}
+
+struct dccp_ackvec;
+
+/**
+ * struct dccp_sock - DCCP socket state
+ *
+ * @dccps_swl - sequence number window low
+ * @dccps_swh - sequence number window high
+ * @dccps_awl - acknowledgement number window low
+ * @dccps_awh - acknowledgement number window high
+ * @dccps_iss - initial sequence number sent
+ * @dccps_isr - initial sequence number received
+ * @dccps_osr - first OPEN sequence number received
+ * @dccps_gss - greatest sequence number sent
+ * @dccps_gsr - greatest valid sequence number received
+ * @dccps_gar - greatest valid ack number received on a non-Sync; initialized to %dccps_iss
+ * @dccps_service - first (passive sock) or unique (active sock) service code
+ * @dccps_service_list - second .. last service code on passive socket
+ * @dccps_timestamp_echo - latest timestamp received on a TIMESTAMP option
+ * @dccps_timestamp_time - time of receiving latest @dccps_timestamp_echo
+ * @dccps_l_ack_ratio - feature-local Ack Ratio
+ * @dccps_r_ack_ratio - feature-remote Ack Ratio
+ * @dccps_l_seq_win - local Sequence Window (influences ack number validity)
+ * @dccps_r_seq_win - remote Sequence Window (influences seq number validity)
+ * @dccps_pcslen - sender partial checksum coverage (via sockopt)
+ * @dccps_pcrlen - receiver partial checksum coverage (via sockopt)
+ * @dccps_send_ndp_count - local Send NDP Count feature (7.7.2)
+ * @dccps_ndp_count - number of Non Data Packets since last data packet
+ * @dccps_mss_cache - current value of MSS (path MTU minus header sizes)
+ * @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4)
+ * @dccps_featneg - tracks feature-negotiation state (mostly during handshake)
+ * @dccps_hc_rx_ackvec - rx half connection ack vector
+ * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection)
+ * @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection)
+ * @dccps_options_received - parsed set of retrieved options
+ * @dccps_qpolicy - TX dequeueing policy, one of %dccp_packet_dequeueing_policy
+ * @dccps_tx_qlen - maximum length of the TX queue
+ * @dccps_role - role of this sock, one of %dccp_role
+ * @dccps_hc_rx_insert_options - receiver wants to add options when acking
+ * @dccps_hc_tx_insert_options - sender wants to add options when sending
+ * @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3)
+ * @dccps_sync_scheduled - flag which signals "send out-of-band message soon"
+ * @dccps_xmitlet - tasklet scheduled by the TX CCID to dequeue data packets
+ * @dccps_xmit_timer - used by the TX CCID to delay sending (rate-based pacing)
+ * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs)
+ */
+struct dccp_sock {
+ /* inet_connection_sock has to be the first member of dccp_sock */
+ struct inet_connection_sock dccps_inet_connection;
+#define dccps_syn_rtt dccps_inet_connection.icsk_ack.lrcvtime
+ __u64 dccps_swl;
+ __u64 dccps_swh;
+ __u64 dccps_awl;
+ __u64 dccps_awh;
+ __u64 dccps_iss;
+ __u64 dccps_isr;
+ __u64 dccps_osr;
+ __u64 dccps_gss;
+ __u64 dccps_gsr;
+ __u64 dccps_gar;
+ __be32 dccps_service;
+ __u32 dccps_mss_cache;
+ struct dccp_service_list *dccps_service_list;
+ __u32 dccps_timestamp_echo;
+ __u32 dccps_timestamp_time;
+ __u16 dccps_l_ack_ratio;
+ __u16 dccps_r_ack_ratio;
+ __u64 dccps_l_seq_win:48;
+ __u64 dccps_r_seq_win:48;
+ __u8 dccps_pcslen:4;
+ __u8 dccps_pcrlen:4;
+ __u8 dccps_send_ndp_count:1;
+ __u64 dccps_ndp_count:48;
+ unsigned long dccps_rate_last;
+ struct list_head dccps_featneg;
+ struct dccp_ackvec *dccps_hc_rx_ackvec;
+ struct ccid *dccps_hc_rx_ccid;
+ struct ccid *dccps_hc_tx_ccid;
+ struct dccp_options_received dccps_options_received;
+ __u8 dccps_qpolicy;
+ __u32 dccps_tx_qlen;
+ enum dccp_role dccps_role:2;
+ __u8 dccps_hc_rx_insert_options:1;
+ __u8 dccps_hc_tx_insert_options:1;
+ __u8 dccps_server_timewait:1;
+ __u8 dccps_sync_scheduled:1;
+ struct tasklet_struct dccps_xmitlet;
+ struct timer_list dccps_xmit_timer;
+};
+
+static inline struct dccp_sock *dccp_sk(const struct sock *sk)
+{
+ return (struct dccp_sock *)sk;
+}
+
+static inline const char *dccp_role(const struct sock *sk)
+{
+ switch (dccp_sk(sk)->dccps_role) {
+ case DCCP_ROLE_UNDEFINED: return "undefined";
+ case DCCP_ROLE_LISTEN: return "listen";
+ case DCCP_ROLE_SERVER: return "server";
+ case DCCP_ROLE_CLIENT: return "client";
+ }
+ return NULL;
+}
+
+extern void dccp_syn_ack_timeout(const struct request_sock *req);
+
+#endif /* _LINUX_DCCP_H */
diff --git a/include/linux/dcookies.h b/include/linux/dcookies.h
new file mode 100644
index 000000000..5ac3bdd5c
--- /dev/null
+++ b/include/linux/dcookies.h
@@ -0,0 +1,68 @@
+/*
+ * dcookies.h
+ *
+ * Persistent cookie-path mappings
+ *
+ * Copyright 2002 John Levon <levon@movementarian.org>
+ */
+
+#ifndef DCOOKIES_H
+#define DCOOKIES_H
+
+
+#ifdef CONFIG_PROFILING
+
+#include <linux/dcache.h>
+#include <linux/types.h>
+
+struct dcookie_user;
+struct path;
+
+/**
+ * dcookie_register - register a user of dcookies
+ *
+ * Register as a dcookie user. Returns %NULL on failure.
+ */
+struct dcookie_user * dcookie_register(void);
+
+/**
+ * dcookie_unregister - unregister a user of dcookies
+ *
+ * Unregister as a dcookie user. This may invalidate
+ * any dcookie values returned from get_dcookie().
+ */
+void dcookie_unregister(struct dcookie_user * user);
+
+/**
+ * get_dcookie - acquire a dcookie
+ *
+ * Convert the given dentry/vfsmount pair into
+ * a cookie value.
+ *
+ * Returns -EINVAL if no living task has registered as a
+ * dcookie user.
+ *
+ * Returns 0 on success, with *cookie filled in
+ */
+int get_dcookie(struct path *path, unsigned long *cookie);
+
+#else
+
+static inline struct dcookie_user * dcookie_register(void)
+{
+ return NULL;
+}
+
+static inline void dcookie_unregister(struct dcookie_user * user)
+{
+ return;
+}
+
+static inline int get_dcookie(struct path *path, unsigned long *cookie)
+{
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_PROFILING */
+
+#endif /* DCOOKIES_H */
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
new file mode 100644
index 000000000..822c1354f
--- /dev/null
+++ b/include/linux/debug_locks.h
@@ -0,0 +1,75 @@
+#ifndef __LINUX_DEBUG_LOCKING_H
+#define __LINUX_DEBUG_LOCKING_H
+
+#include <linux/kernel.h>
+#include <linux/atomic.h>
+#include <linux/bug.h>
+
+struct task_struct;
+
+extern int debug_locks;
+extern int debug_locks_silent;
+
+
+static inline int __debug_locks_off(void)
+{
+ return xchg(&debug_locks, 0);
+}
+
+/*
+ * Generic 'turn off all lock debugging' function:
+ */
+extern int debug_locks_off(void);
+
+#define DEBUG_LOCKS_WARN_ON(c) \
+({ \
+ int __ret = 0; \
+ \
+ if (!oops_in_progress && unlikely(c)) { \
+ if (debug_locks_off() && !debug_locks_silent) \
+ WARN(1, "DEBUG_LOCKS_WARN_ON(%s)", #c); \
+ __ret = 1; \
+ } \
+ __ret; \
+})
+
+#ifdef CONFIG_SMP
+# define SMP_DEBUG_LOCKS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c)
+#else
+# define SMP_DEBUG_LOCKS_WARN_ON(c) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
+ extern void locking_selftest(void);
+#else
+# define locking_selftest() do { } while (0)
+#endif
+
+struct task_struct;
+
+#ifdef CONFIG_LOCKDEP
+extern void debug_show_all_locks(void);
+extern void debug_show_held_locks(struct task_struct *task);
+extern void debug_check_no_locks_freed(const void *from, unsigned long len);
+extern void debug_check_no_locks_held(void);
+#else
+static inline void debug_show_all_locks(void)
+{
+}
+
+static inline void debug_show_held_locks(struct task_struct *task)
+{
+}
+
+static inline void
+debug_check_no_locks_freed(const void *from, unsigned long len)
+{
+}
+
+static inline void
+debug_check_no_locks_held(void)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
new file mode 100644
index 000000000..cb25af461
--- /dev/null
+++ b/include/linux/debugfs.h
@@ -0,0 +1,288 @@
+/*
+ * debugfs.h - a tiny little debug file system
+ *
+ * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2004 IBM Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * debugfs is for people to use instead of /proc or /sys.
+ * See Documentation/DocBook/filesystems for more details.
+ */
+
+#ifndef _DEBUGFS_H_
+#define _DEBUGFS_H_
+
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+
+#include <linux/types.h>
+
+struct device;
+struct file_operations;
+
+struct debugfs_blob_wrapper {
+ void *data;
+ unsigned long size;
+};
+
+struct debugfs_reg32 {
+ char *name;
+ unsigned long offset;
+};
+
+struct debugfs_regset32 {
+ const struct debugfs_reg32 *regs;
+ int nregs;
+ void __iomem *base;
+};
+
+extern struct dentry *arch_debugfs_dir;
+
+#if defined(CONFIG_DEBUG_FS)
+
+/* declared over in file.c */
+extern const struct file_operations debugfs_file_operations;
+extern const struct inode_operations debugfs_link_operations;
+
+struct dentry *debugfs_create_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops);
+
+struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops,
+ loff_t file_size);
+
+struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
+
+struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
+ const char *dest);
+
+struct dentry *debugfs_create_automount(const char *name,
+ struct dentry *parent,
+ struct vfsmount *(*f)(void *),
+ void *data);
+
+void debugfs_remove(struct dentry *dentry);
+void debugfs_remove_recursive(struct dentry *dentry);
+
+struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
+ struct dentry *new_dir, const char *new_name);
+
+struct dentry *debugfs_create_u8(const char *name, umode_t mode,
+ struct dentry *parent, u8 *value);
+struct dentry *debugfs_create_u16(const char *name, umode_t mode,
+ struct dentry *parent, u16 *value);
+struct dentry *debugfs_create_u32(const char *name, umode_t mode,
+ struct dentry *parent, u32 *value);
+struct dentry *debugfs_create_u64(const char *name, umode_t mode,
+ struct dentry *parent, u64 *value);
+struct dentry *debugfs_create_x8(const char *name, umode_t mode,
+ struct dentry *parent, u8 *value);
+struct dentry *debugfs_create_x16(const char *name, umode_t mode,
+ struct dentry *parent, u16 *value);
+struct dentry *debugfs_create_x32(const char *name, umode_t mode,
+ struct dentry *parent, u32 *value);
+struct dentry *debugfs_create_x64(const char *name, umode_t mode,
+ struct dentry *parent, u64 *value);
+struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
+ struct dentry *parent, size_t *value);
+struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
+ struct dentry *parent, atomic_t *value);
+struct dentry *debugfs_create_bool(const char *name, umode_t mode,
+ struct dentry *parent, u32 *value);
+
+struct dentry *debugfs_create_blob(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_blob_wrapper *blob);
+
+struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_regset32 *regset);
+
+void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
+ int nregs, void __iomem *base, char *prefix);
+
+struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
+ struct dentry *parent,
+ u32 *array, u32 elements);
+
+struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
+ struct dentry *parent,
+ int (*read_fn)(struct seq_file *s,
+ void *data));
+
+bool debugfs_initialized(void);
+
+#else
+
+#include <linux/err.h>
+
+/*
+ * We do not return NULL from these functions if CONFIG_DEBUG_FS is not enabled
+ * so users have a chance to detect if there was a real error or not. We don't
+ * want to duplicate the design decision mistakes of procfs and devfs again.
+ */
+
+static inline struct dentry *debugfs_create_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops,
+ loff_t file_size)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct dentry *debugfs_create_dir(const char *name,
+ struct dentry *parent)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct dentry *debugfs_create_symlink(const char *name,
+ struct dentry *parent,
+ const char *dest)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void debugfs_remove(struct dentry *dentry)
+{ }
+
+static inline void debugfs_remove_recursive(struct dentry *dentry)
+{ }
+
+static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
+ struct dentry *new_dir, char *new_name)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct dentry *debugfs_create_u8(const char *name, umode_t mode,
+ struct dentry *parent,
+ u8 *value)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct dentry *debugfs_create_u16(const char *name, umode_t mode,
+ struct dentry *parent,
+ u16 *value)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct dentry *debugfs_create_u32(const char *name, umode_t mode,
+ struct dentry *parent,
+ u32 *value)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct dentry *debugfs_create_u64(const char *name, umode_t mode,
+ struct dentry *parent,
+ u64 *value)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct dentry *debugfs_create_x8(const char *name, umode_t mode,
+ struct dentry *parent,
+ u8 *value)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct dentry *debugfs_create_x16(const char *name, umode_t mode,
+ struct dentry *parent,
+ u16 *value)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct dentry *debugfs_create_x32(const char *name, umode_t mode,
+ struct dentry *parent,
+ u32 *value)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct dentry *debugfs_create_x64(const char *name, umode_t mode,
+ struct dentry *parent,
+ u64 *value)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
+ struct dentry *parent,
+ size_t *value)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
+ struct dentry *parent, atomic_t *value)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode,
+ struct dentry *parent,
+ u32 *value)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_blob_wrapper *blob)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct dentry *debugfs_create_regset32(const char *name,
+ umode_t mode, struct dentry *parent,
+ struct debugfs_regset32 *regset)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
+ int nregs, void __iomem *base, char *prefix)
+{
+}
+
+static inline bool debugfs_initialized(void)
+{
+ return false;
+}
+
+static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
+ struct dentry *parent,
+ u32 *array, u32 elements)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev,
+ const char *name,
+ struct dentry *parent,
+ int (*read_fn)(struct seq_file *s,
+ void *data))
+{
+ return ERR_PTR(-ENODEV);
+}
+
+#endif
+
+#endif
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
new file mode 100644
index 000000000..98ffcbd48
--- /dev/null
+++ b/include/linux/debugobjects.h
@@ -0,0 +1,110 @@
+#ifndef _LINUX_DEBUGOBJECTS_H
+#define _LINUX_DEBUGOBJECTS_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+enum debug_obj_state {
+ ODEBUG_STATE_NONE,
+ ODEBUG_STATE_INIT,
+ ODEBUG_STATE_INACTIVE,
+ ODEBUG_STATE_ACTIVE,
+ ODEBUG_STATE_DESTROYED,
+ ODEBUG_STATE_NOTAVAILABLE,
+ ODEBUG_STATE_MAX,
+};
+
+struct debug_obj_descr;
+
+/**
+ * struct debug_obj - representaion of an tracked object
+ * @node: hlist node to link the object into the tracker list
+ * @state: tracked object state
+ * @astate: current active state
+ * @object: pointer to the real object
+ * @descr: pointer to an object type specific debug description structure
+ */
+struct debug_obj {
+ struct hlist_node node;
+ enum debug_obj_state state;
+ unsigned int astate;
+ void *object;
+ struct debug_obj_descr *descr;
+};
+
+/**
+ * struct debug_obj_descr - object type specific debug description structure
+ *
+ * @name: name of the object typee
+ * @debug_hint: function returning address, which have associated
+ * kernel symbol, to allow identify the object
+ * @fixup_init: fixup function, which is called when the init check
+ * fails
+ * @fixup_activate: fixup function, which is called when the activate check
+ * fails
+ * @fixup_destroy: fixup function, which is called when the destroy check
+ * fails
+ * @fixup_free: fixup function, which is called when the free check
+ * fails
+ * @fixup_assert_init: fixup function, which is called when the assert_init
+ * check fails
+ */
+struct debug_obj_descr {
+ const char *name;
+ void *(*debug_hint) (void *addr);
+ int (*fixup_init) (void *addr, enum debug_obj_state state);
+ int (*fixup_activate) (void *addr, enum debug_obj_state state);
+ int (*fixup_destroy) (void *addr, enum debug_obj_state state);
+ int (*fixup_free) (void *addr, enum debug_obj_state state);
+ int (*fixup_assert_init)(void *addr, enum debug_obj_state state);
+};
+
+#ifdef CONFIG_DEBUG_OBJECTS
+extern void debug_object_init (void *addr, struct debug_obj_descr *descr);
+extern void
+debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr);
+extern int debug_object_activate (void *addr, struct debug_obj_descr *descr);
+extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr);
+extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr);
+extern void debug_object_free (void *addr, struct debug_obj_descr *descr);
+extern void debug_object_assert_init(void *addr, struct debug_obj_descr *descr);
+
+/*
+ * Active state:
+ * - Set at 0 upon initialization.
+ * - Must return to 0 before deactivation.
+ */
+extern void
+debug_object_active_state(void *addr, struct debug_obj_descr *descr,
+ unsigned int expect, unsigned int next);
+
+extern void debug_objects_early_init(void);
+extern void debug_objects_mem_init(void);
+#else
+static inline void
+debug_object_init (void *addr, struct debug_obj_descr *descr) { }
+static inline void
+debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { }
+static inline int
+debug_object_activate (void *addr, struct debug_obj_descr *descr) { return 0; }
+static inline void
+debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { }
+static inline void
+debug_object_destroy (void *addr, struct debug_obj_descr *descr) { }
+static inline void
+debug_object_free (void *addr, struct debug_obj_descr *descr) { }
+static inline void
+debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { }
+
+static inline void debug_objects_early_init(void) { }
+static inline void debug_objects_mem_init(void) { }
+#endif
+
+#ifdef CONFIG_DEBUG_OBJECTS_FREE
+extern void debug_check_no_obj_freed(const void *address, unsigned long size);
+#else
+static inline void
+debug_check_no_obj_freed(const void *address, unsigned long size) { }
+#endif
+
+#endif
diff --git a/include/linux/decompress/bunzip2.h b/include/linux/decompress/bunzip2.h
new file mode 100644
index 000000000..4d683df89
--- /dev/null
+++ b/include/linux/decompress/bunzip2.h
@@ -0,0 +1,10 @@
+#ifndef DECOMPRESS_BUNZIP2_H
+#define DECOMPRESS_BUNZIP2_H
+
+int bunzip2(unsigned char *inbuf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *output,
+ long *pos,
+ void(*error)(char *x));
+#endif
diff --git a/include/linux/decompress/generic.h b/include/linux/decompress/generic.h
new file mode 100644
index 000000000..1fcfd64b5
--- /dev/null
+++ b/include/linux/decompress/generic.h
@@ -0,0 +1,39 @@
+#ifndef DECOMPRESS_GENERIC_H
+#define DECOMPRESS_GENERIC_H
+
+typedef int (*decompress_fn) (unsigned char *inbuf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *outbuf,
+ long *posp,
+ void(*error)(char *x));
+
+/* inbuf - input buffer
+ *len - len of pre-read data in inbuf
+ *fill - function to fill inbuf when empty
+ *flush - function to write out outbuf
+ *outbuf - output buffer
+ *posp - if non-null, input position (number of bytes read) will be
+ * returned here
+ *
+ *If len != 0, inbuf should contain all the necessary input data, and fill
+ *should be NULL
+ *If len = 0, inbuf can be NULL, in which case the decompressor will allocate
+ *the input buffer. If inbuf != NULL it must be at least XXX_IOBUF_SIZE bytes.
+ *fill will be called (repeatedly...) to read data, at most XXX_IOBUF_SIZE
+ *bytes should be read per call. Replace XXX with the appropriate decompressor
+ *name, i.e. LZMA_IOBUF_SIZE.
+ *
+ *If flush = NULL, outbuf must be large enough to buffer all the expected
+ *output. If flush != NULL, the output buffer will be allocated by the
+ *decompressor (outbuf = NULL), and the flush function will be called to
+ *flush the output buffer at the appropriate time (decompressor and stream
+ *dependent).
+ */
+
+
+/* Utility routine to detect the decompression method */
+decompress_fn decompress_method(const unsigned char *inbuf, long len,
+ const char **name);
+
+#endif
diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h
new file mode 100644
index 000000000..e4f411fdb
--- /dev/null
+++ b/include/linux/decompress/inflate.h
@@ -0,0 +1,10 @@
+#ifndef LINUX_DECOMPRESS_INFLATE_H
+#define LINUX_DECOMPRESS_INFLATE_H
+
+int gunzip(unsigned char *inbuf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *output,
+ long *pos,
+ void(*error_fn)(char *x));
+#endif
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
new file mode 100644
index 000000000..7925bf0ee
--- /dev/null
+++ b/include/linux/decompress/mm.h
@@ -0,0 +1,93 @@
+/*
+ * linux/compr_mm.h
+ *
+ * Memory management for pre-boot and ramdisk uncompressors
+ *
+ * Authors: Alain Knaff <alain@knaff.lu>
+ *
+ */
+
+#ifndef DECOMPR_MM_H
+#define DECOMPR_MM_H
+
+#ifdef STATIC
+
+/* Code active when included from pre-boot environment: */
+
+/*
+ * Some architectures want to ensure there is no local data in their
+ * pre-boot environment, so that data can arbitrarily relocated (via
+ * GOT references). This is achieved by defining STATIC_RW_DATA to
+ * be null.
+ */
+#ifndef STATIC_RW_DATA
+#define STATIC_RW_DATA static
+#endif
+
+/* A trivial malloc implementation, adapted from
+ * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
+ */
+STATIC_RW_DATA unsigned long malloc_ptr;
+STATIC_RW_DATA int malloc_count;
+
+static void *malloc(int size)
+{
+ void *p;
+
+ if (size < 0)
+ return NULL;
+ if (!malloc_ptr)
+ malloc_ptr = free_mem_ptr;
+
+ malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
+
+ p = (void *)malloc_ptr;
+ malloc_ptr += size;
+
+ if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
+ return NULL;
+
+ malloc_count++;
+ return p;
+}
+
+static void free(void *where)
+{
+ malloc_count--;
+ if (!malloc_count)
+ malloc_ptr = free_mem_ptr;
+}
+
+#define large_malloc(a) malloc(a)
+#define large_free(a) free(a)
+
+#define INIT
+
+#else /* STATIC */
+
+/* Code active when compiled standalone for use when loading ramdisk: */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+/* Use defines rather than static inline in order to avoid spurious
+ * warnings when not needed (indeed large_malloc / large_free are not
+ * needed by inflate */
+
+#define malloc(a) kmalloc(a, GFP_KERNEL)
+#define free(a) kfree(a)
+
+#define large_malloc(a) vmalloc(a)
+#define large_free(a) vfree(a)
+
+#define INIT __init
+#define STATIC
+
+#include <linux/init.h>
+
+#endif /* STATIC */
+
+#endif /* DECOMPR_MM_H */
diff --git a/include/linux/decompress/unlz4.h b/include/linux/decompress/unlz4.h
new file mode 100644
index 000000000..3273c2f36
--- /dev/null
+++ b/include/linux/decompress/unlz4.h
@@ -0,0 +1,10 @@
+#ifndef DECOMPRESS_UNLZ4_H
+#define DECOMPRESS_UNLZ4_H
+
+int unlz4(unsigned char *inbuf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *output,
+ long *pos,
+ void(*error)(char *x));
+#endif
diff --git a/include/linux/decompress/unlzma.h b/include/linux/decompress/unlzma.h
new file mode 100644
index 000000000..8a891a193
--- /dev/null
+++ b/include/linux/decompress/unlzma.h
@@ -0,0 +1,12 @@
+#ifndef DECOMPRESS_UNLZMA_H
+#define DECOMPRESS_UNLZMA_H
+
+int unlzma(unsigned char *, long,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *output,
+ long *posp,
+ void(*error)(char *x)
+ );
+
+#endif
diff --git a/include/linux/decompress/unlzo.h b/include/linux/decompress/unlzo.h
new file mode 100644
index 000000000..af18f95d6
--- /dev/null
+++ b/include/linux/decompress/unlzo.h
@@ -0,0 +1,10 @@
+#ifndef DECOMPRESS_UNLZO_H
+#define DECOMPRESS_UNLZO_H
+
+int unlzo(unsigned char *inbuf, long len,
+ long (*fill)(void*, unsigned long),
+ long (*flush)(void*, unsigned long),
+ unsigned char *output,
+ long *pos,
+ void(*error)(char *x));
+#endif
diff --git a/include/linux/decompress/unxz.h b/include/linux/decompress/unxz.h
new file mode 100644
index 000000000..f764e2a72
--- /dev/null
+++ b/include/linux/decompress/unxz.h
@@ -0,0 +1,19 @@
+/*
+ * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef DECOMPRESS_UNXZ_H
+#define DECOMPRESS_UNXZ_H
+
+int unxz(unsigned char *in, long in_size,
+ long (*fill)(void *dest, unsigned long size),
+ long (*flush)(void *src, unsigned long size),
+ unsigned char *out, long *in_used,
+ void (*error)(char *x));
+
+#endif
diff --git a/include/linux/delay.h b/include/linux/delay.h
new file mode 100644
index 000000000..a6ecb34cf
--- /dev/null
+++ b/include/linux/delay.h
@@ -0,0 +1,55 @@
+#ifndef _LINUX_DELAY_H
+#define _LINUX_DELAY_H
+
+/*
+ * Copyright (C) 1993 Linus Torvalds
+ *
+ * Delay routines, using a pre-computed "loops_per_jiffy" value.
+ */
+
+#include <linux/kernel.h>
+
+extern unsigned long loops_per_jiffy;
+
+#include <asm/delay.h>
+
+/*
+ * Using udelay() for intervals greater than a few milliseconds can
+ * risk overflow for high loops_per_jiffy (high bogomips) machines. The
+ * mdelay() provides a wrapper to prevent this. For delays greater
+ * than MAX_UDELAY_MS milliseconds, the wrapper is used. Architecture
+ * specific values can be defined in asm-???/delay.h as an override.
+ * The 2nd mdelay() definition ensures GCC will optimize away the
+ * while loop for the common cases where n <= MAX_UDELAY_MS -- Paul G.
+ */
+
+#ifndef MAX_UDELAY_MS
+#define MAX_UDELAY_MS 5
+#endif
+
+#ifndef mdelay
+#define mdelay(n) (\
+ (__builtin_constant_p(n) && (n)<=MAX_UDELAY_MS) ? udelay((n)*1000) : \
+ ({unsigned long __ms=(n); while (__ms--) udelay(1000);}))
+#endif
+
+#ifndef ndelay
+static inline void ndelay(unsigned long x)
+{
+ udelay(DIV_ROUND_UP(x, 1000));
+}
+#define ndelay(x) ndelay(x)
+#endif
+
+extern unsigned long lpj_fine;
+void calibrate_delay(void);
+void msleep(unsigned int msecs);
+unsigned long msleep_interruptible(unsigned int msecs);
+void usleep_range(unsigned long min, unsigned long max);
+
+static inline void ssleep(unsigned int seconds)
+{
+ msleep(seconds * 1000);
+}
+
+#endif /* defined(_LINUX_DELAY_H) */
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
new file mode 100644
index 000000000..6cee17c22
--- /dev/null
+++ b/include/linux/delayacct.h
@@ -0,0 +1,153 @@
+/* delayacct.h - per-task delay accounting
+ *
+ * Copyright (C) Shailabh Nagar, IBM Corp. 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_DELAYACCT_H
+#define _LINUX_DELAYACCT_H
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+/*
+ * Per-task flags relevant to delay accounting
+ * maintained privately to avoid exhausting similar flags in sched.h:PF_*
+ * Used to set current->delays->flags
+ */
+#define DELAYACCT_PF_SWAPIN 0x00000001 /* I am doing a swapin */
+#define DELAYACCT_PF_BLKIO 0x00000002 /* I am waiting on IO */
+
+#ifdef CONFIG_TASK_DELAY_ACCT
+
+extern int delayacct_on; /* Delay accounting turned on/off */
+extern struct kmem_cache *delayacct_cache;
+extern void delayacct_init(void);
+extern void __delayacct_tsk_init(struct task_struct *);
+extern void __delayacct_tsk_exit(struct task_struct *);
+extern void __delayacct_blkio_start(void);
+extern void __delayacct_blkio_end(void);
+extern int __delayacct_add_tsk(struct taskstats *, struct task_struct *);
+extern __u64 __delayacct_blkio_ticks(struct task_struct *);
+extern void __delayacct_freepages_start(void);
+extern void __delayacct_freepages_end(void);
+
+static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
+{
+ if (p->delays)
+ return (p->delays->flags & DELAYACCT_PF_BLKIO);
+ else
+ return 0;
+}
+
+static inline void delayacct_set_flag(int flag)
+{
+ if (current->delays)
+ current->delays->flags |= flag;
+}
+
+static inline void delayacct_clear_flag(int flag)
+{
+ if (current->delays)
+ current->delays->flags &= ~flag;
+}
+
+static inline void delayacct_tsk_init(struct task_struct *tsk)
+{
+ /* reinitialize in case parent's non-null pointer was dup'ed*/
+ tsk->delays = NULL;
+ if (delayacct_on)
+ __delayacct_tsk_init(tsk);
+}
+
+/* Free tsk->delays. Called from bad fork and __put_task_struct
+ * where there's no risk of tsk->delays being accessed elsewhere
+ */
+static inline void delayacct_tsk_free(struct task_struct *tsk)
+{
+ if (tsk->delays)
+ kmem_cache_free(delayacct_cache, tsk->delays);
+ tsk->delays = NULL;
+}
+
+static inline void delayacct_blkio_start(void)
+{
+ delayacct_set_flag(DELAYACCT_PF_BLKIO);
+ if (current->delays)
+ __delayacct_blkio_start();
+}
+
+static inline void delayacct_blkio_end(void)
+{
+ if (current->delays)
+ __delayacct_blkio_end();
+ delayacct_clear_flag(DELAYACCT_PF_BLKIO);
+}
+
+static inline int delayacct_add_tsk(struct taskstats *d,
+ struct task_struct *tsk)
+{
+ if (!delayacct_on || !tsk->delays)
+ return 0;
+ return __delayacct_add_tsk(d, tsk);
+}
+
+static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
+{
+ if (tsk->delays)
+ return __delayacct_blkio_ticks(tsk);
+ return 0;
+}
+
+static inline void delayacct_freepages_start(void)
+{
+ if (current->delays)
+ __delayacct_freepages_start();
+}
+
+static inline void delayacct_freepages_end(void)
+{
+ if (current->delays)
+ __delayacct_freepages_end();
+}
+
+#else
+static inline void delayacct_set_flag(int flag)
+{}
+static inline void delayacct_clear_flag(int flag)
+{}
+static inline void delayacct_init(void)
+{}
+static inline void delayacct_tsk_init(struct task_struct *tsk)
+{}
+static inline void delayacct_tsk_free(struct task_struct *tsk)
+{}
+static inline void delayacct_blkio_start(void)
+{}
+static inline void delayacct_blkio_end(void)
+{}
+static inline int delayacct_add_tsk(struct taskstats *d,
+ struct task_struct *tsk)
+{ return 0; }
+static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
+{ return 0; }
+static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
+{ return 0; }
+static inline void delayacct_freepages_start(void)
+{}
+static inline void delayacct_freepages_end(void)
+{}
+
+#endif /* CONFIG_TASK_DELAY_ACCT */
+
+#endif
diff --git a/include/linux/dell-led.h b/include/linux/dell-led.h
new file mode 100644
index 000000000..7009b8bec
--- /dev/null
+++ b/include/linux/dell-led.h
@@ -0,0 +1,10 @@
+#ifndef __DELL_LED_H__
+#define __DELL_LED_H__
+
+enum {
+ DELL_LED_MICMUTE,
+};
+
+int dell_app_wmi_led_set(int whichled, int on);
+
+#endif
diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h
new file mode 100644
index 000000000..c0a360e99
--- /dev/null
+++ b/include/linux/devcoredump.h
@@ -0,0 +1,35 @@
+#ifndef __DEVCOREDUMP_H
+#define __DEVCOREDUMP_H
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#ifdef CONFIG_DEV_COREDUMP
+void dev_coredumpv(struct device *dev, const void *data, size_t datalen,
+ gfp_t gfp);
+
+void dev_coredumpm(struct device *dev, struct module *owner,
+ const void *data, size_t datalen, gfp_t gfp,
+ ssize_t (*read)(char *buffer, loff_t offset, size_t count,
+ const void *data, size_t datalen),
+ void (*free)(const void *data));
+#else
+static inline void dev_coredumpv(struct device *dev, const void *data,
+ size_t datalen, gfp_t gfp)
+{
+ vfree(data);
+}
+
+static inline void
+dev_coredumpm(struct device *dev, struct module *owner,
+ const void *data, size_t datalen, gfp_t gfp,
+ ssize_t (*read)(char *buffer, loff_t offset, size_t count,
+ const void *data, size_t datalen),
+ void (*free)(const void *data))
+{
+ free(data);
+}
+#endif /* CONFIG_DEV_COREDUMP */
+
+#endif /* __DEVCOREDUMP_H */
diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
new file mode 100644
index 000000000..0a83a1e64
--- /dev/null
+++ b/include/linux/devfreq-event.h
@@ -0,0 +1,196 @@
+/*
+ * devfreq-event: a framework to provide raw data and events of devfreq devices
+ *
+ * Copyright (C) 2014 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_DEVFREQ_EVENT_H__
+#define __LINUX_DEVFREQ_EVENT_H__
+
+#include <linux/device.h>
+
+/**
+ * struct devfreq_event_dev - the devfreq-event device
+ *
+ * @node : Contain the devfreq-event device that have been registered.
+ * @dev : the device registered by devfreq-event class. dev.parent is
+ * the device using devfreq-event.
+ * @lock : a mutex to protect accessing devfreq-event.
+ * @enable_count: the number of enable function have been called.
+ * @desc : the description for devfreq-event device.
+ *
+ * This structure contains devfreq-event device information.
+ */
+struct devfreq_event_dev {
+ struct list_head node;
+
+ struct device dev;
+ struct mutex lock;
+ u32 enable_count;
+
+ const struct devfreq_event_desc *desc;
+};
+
+/**
+ * struct devfreq_event_data - the devfreq-event data
+ *
+ * @load_count : load count of devfreq-event device for the given period.
+ * @total_count : total count of devfreq-event device for the given period.
+ * each count may represent a clock cycle, a time unit
+ * (ns/us/...), or anything the device driver wants.
+ * Generally, utilization is load_count / total_count.
+ *
+ * This structure contains the data of devfreq-event device for polling period.
+ */
+struct devfreq_event_data {
+ unsigned long load_count;
+ unsigned long total_count;
+};
+
+/**
+ * struct devfreq_event_ops - the operations of devfreq-event device
+ *
+ * @enable : Enable the devfreq-event device.
+ * @disable : Disable the devfreq-event device.
+ * @reset : Reset all setting of the devfreq-event device.
+ * @set_event : Set the specific event type for the devfreq-event device.
+ * @get_event : Get the result of the devfreq-event devie with specific
+ * event type.
+ *
+ * This structure contains devfreq-event device operations which can be
+ * implemented by devfreq-event device drivers.
+ */
+struct devfreq_event_ops {
+ /* Optional functions */
+ int (*enable)(struct devfreq_event_dev *edev);
+ int (*disable)(struct devfreq_event_dev *edev);
+ int (*reset)(struct devfreq_event_dev *edev);
+
+ /* Mandatory functions */
+ int (*set_event)(struct devfreq_event_dev *edev);
+ int (*get_event)(struct devfreq_event_dev *edev,
+ struct devfreq_event_data *edata);
+};
+
+/**
+ * struct devfreq_event_desc - the descriptor of devfreq-event device
+ *
+ * @name : the name of devfreq-event device.
+ * @driver_data : the private data for devfreq-event driver.
+ * @ops : the operation to control devfreq-event device.
+ *
+ * Each devfreq-event device is described with a this structure.
+ * This structure contains the various data for devfreq-event device.
+ */
+struct devfreq_event_desc {
+ const char *name;
+ void *driver_data;
+
+ const struct devfreq_event_ops *ops;
+};
+
+#if defined(CONFIG_PM_DEVFREQ_EVENT)
+extern int devfreq_event_enable_edev(struct devfreq_event_dev *edev);
+extern int devfreq_event_disable_edev(struct devfreq_event_dev *edev);
+extern bool devfreq_event_is_enabled(struct devfreq_event_dev *edev);
+extern int devfreq_event_set_event(struct devfreq_event_dev *edev);
+extern int devfreq_event_get_event(struct devfreq_event_dev *edev,
+ struct devfreq_event_data *edata);
+extern int devfreq_event_reset_event(struct devfreq_event_dev *edev);
+extern struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
+ struct device *dev, int index);
+extern int devfreq_event_get_edev_count(struct device *dev);
+extern struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
+ struct devfreq_event_desc *desc);
+extern int devfreq_event_remove_edev(struct devfreq_event_dev *edev);
+extern struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev,
+ struct devfreq_event_desc *desc);
+extern void devm_devfreq_event_remove_edev(struct device *dev,
+ struct devfreq_event_dev *edev);
+static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
+{
+ return edev->desc->driver_data;
+}
+#else
+static inline int devfreq_event_enable_edev(struct devfreq_event_dev *edev)
+{
+ return -EINVAL;
+}
+
+static inline int devfreq_event_disable_edev(struct devfreq_event_dev *edev)
+{
+ return -EINVAL;
+}
+
+static inline bool devfreq_event_is_enabled(struct devfreq_event_dev *edev)
+{
+ return false;
+}
+
+static inline int devfreq_event_set_event(struct devfreq_event_dev *edev)
+{
+ return -EINVAL;
+}
+
+static inline int devfreq_event_get_event(struct devfreq_event_dev *edev,
+ struct devfreq_event_data *edata)
+{
+ return -EINVAL;
+}
+
+static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
+{
+ return -EINVAL;
+}
+
+static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
+ struct device *dev, int index)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int devfreq_event_get_edev_count(struct device *dev)
+{
+ return -EINVAL;
+}
+
+static inline struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
+ struct devfreq_event_desc *desc)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
+{
+ return -EINVAL;
+}
+
+static inline struct devfreq_event_dev *devm_devfreq_event_add_edev(
+ struct device *dev,
+ struct devfreq_event_desc *desc)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void devm_devfreq_event_remove_edev(struct device *dev,
+ struct devfreq_event_dev *edev)
+{
+}
+
+static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
+{
+ return NULL;
+}
+#endif /* CONFIG_PM_DEVFREQ_EVENT */
+
+#endif /* __LINUX_DEVFREQ_EVENT_H__ */
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
new file mode 100644
index 000000000..ce447f0f1
--- /dev/null
+++ b/include/linux/devfreq.h
@@ -0,0 +1,294 @@
+/*
+ * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
+ * for Non-CPU Devices.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_DEVFREQ_H__
+#define __LINUX_DEVFREQ_H__
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/pm_opp.h>
+
+#define DEVFREQ_NAME_LEN 16
+
+struct devfreq;
+
+/**
+ * struct devfreq_dev_status - Data given from devfreq user device to
+ * governors. Represents the performance
+ * statistics.
+ * @total_time: The total time represented by this instance of
+ * devfreq_dev_status
+ * @busy_time: The time that the device was working among the
+ * total_time.
+ * @current_frequency: The operating frequency.
+ * @private_data: An entry not specified by the devfreq framework.
+ * A device and a specific governor may have their
+ * own protocol with private_data. However, because
+ * this is governor-specific, a governor using this
+ * will be only compatible with devices aware of it.
+ */
+struct devfreq_dev_status {
+ /* both since the last measure */
+ unsigned long total_time;
+ unsigned long busy_time;
+ unsigned long current_frequency;
+ void *private_data;
+};
+
+/*
+ * The resulting frequency should be at most this. (this bound is the
+ * least upper bound; thus, the resulting freq should be lower or same)
+ * If the flag is not set, the resulting frequency should be at most the
+ * bound (greatest lower bound)
+ */
+#define DEVFREQ_FLAG_LEAST_UPPER_BOUND 0x1
+
+/**
+ * struct devfreq_dev_profile - Devfreq's user device profile
+ * @initial_freq: The operating frequency when devfreq_add_device() is
+ * called.
+ * @polling_ms: The polling interval in ms. 0 disables polling.
+ * @target: The device should set its operating frequency at
+ * freq or lowest-upper-than-freq value. If freq is
+ * higher than any operable frequency, set maximum.
+ * Before returning, target function should set
+ * freq at the current frequency.
+ * The "flags" parameter's possible values are
+ * explained above with "DEVFREQ_FLAG_*" macros.
+ * @get_dev_status: The device should provide the current performance
+ * status to devfreq, which is used by governors.
+ * @get_cur_freq: The device should provide the current frequency
+ * at which it is operating.
+ * @exit: An optional callback that is called when devfreq
+ * is removing the devfreq object due to error or
+ * from devfreq_remove_device() call. If the user
+ * has registered devfreq->nb at a notifier-head,
+ * this is the time to unregister it.
+ * @freq_table: Optional list of frequencies to support statistics.
+ * @max_state: The size of freq_table.
+ */
+struct devfreq_dev_profile {
+ unsigned long initial_freq;
+ unsigned int polling_ms;
+
+ int (*target)(struct device *dev, unsigned long *freq, u32 flags);
+ int (*get_dev_status)(struct device *dev,
+ struct devfreq_dev_status *stat);
+ int (*get_cur_freq)(struct device *dev, unsigned long *freq);
+ void (*exit)(struct device *dev);
+
+ unsigned int *freq_table;
+ unsigned int max_state;
+};
+
+/**
+ * struct devfreq_governor - Devfreq policy governor
+ * @node: list node - contains registered devfreq governors
+ * @name: Governor's name
+ * @get_target_freq: Returns desired operating frequency for the device.
+ * Basically, get_target_freq will run
+ * devfreq_dev_profile.get_dev_status() to get the
+ * status of the device (load = busy_time / total_time).
+ * If no_central_polling is set, this callback is called
+ * only with update_devfreq() notified by OPP.
+ * @event_handler: Callback for devfreq core framework to notify events
+ * to governors. Events include per device governor
+ * init and exit, opp changes out of devfreq, suspend
+ * and resume of per device devfreq during device idle.
+ *
+ * Note that the callbacks are called with devfreq->lock locked by devfreq.
+ */
+struct devfreq_governor {
+ struct list_head node;
+
+ const char name[DEVFREQ_NAME_LEN];
+ int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
+ int (*event_handler)(struct devfreq *devfreq,
+ unsigned int event, void *data);
+};
+
+/**
+ * struct devfreq - Device devfreq structure
+ * @node: list node - contains the devices with devfreq that have been
+ * registered.
+ * @lock: a mutex to protect accessing devfreq.
+ * @dev: device registered by devfreq class. dev.parent is the device
+ * using devfreq.
+ * @profile: device-specific devfreq profile
+ * @governor: method how to choose frequency based on the usage.
+ * @governor_name: devfreq governor name for use with this devfreq
+ * @nb: notifier block used to notify devfreq object that it should
+ * reevaluate operable frequencies. Devfreq users may use
+ * devfreq.nb to the corresponding register notifier call chain.
+ * @work: delayed work for load monitoring.
+ * @previous_freq: previously configured frequency value.
+ * @data: Private data of the governor. The devfreq framework does not
+ * touch this.
+ * @min_freq: Limit minimum frequency requested by user (0: none)
+ * @max_freq: Limit maximum frequency requested by user (0: none)
+ * @stop_polling: devfreq polling status of a device.
+ * @total_trans: Number of devfreq transitions
+ * @trans_table: Statistics of devfreq transitions
+ * @time_in_state: Statistics of devfreq states
+ * @last_stat_updated: The last time stat updated
+ *
+ * This structure stores the devfreq information for a give device.
+ *
+ * Note that when a governor accesses entries in struct devfreq in its
+ * functions except for the context of callbacks defined in struct
+ * devfreq_governor, the governor should protect its access with the
+ * struct mutex lock in struct devfreq. A governor may use this mutex
+ * to protect its own private data in void *data as well.
+ */
+struct devfreq {
+ struct list_head node;
+
+ struct mutex lock;
+ struct device dev;
+ struct devfreq_dev_profile *profile;
+ const struct devfreq_governor *governor;
+ char governor_name[DEVFREQ_NAME_LEN];
+ struct notifier_block nb;
+ struct delayed_work work;
+
+ unsigned long previous_freq;
+
+ void *data; /* private data for governors */
+
+ unsigned long min_freq;
+ unsigned long max_freq;
+ bool stop_polling;
+
+ /* information for device frequency transition */
+ unsigned int total_trans;
+ unsigned int *trans_table;
+ unsigned long *time_in_state;
+ unsigned long last_stat_updated;
+};
+
+#if defined(CONFIG_PM_DEVFREQ)
+extern struct devfreq *devfreq_add_device(struct device *dev,
+ struct devfreq_dev_profile *profile,
+ const char *governor_name,
+ void *data);
+extern int devfreq_remove_device(struct devfreq *devfreq);
+extern struct devfreq *devm_devfreq_add_device(struct device *dev,
+ struct devfreq_dev_profile *profile,
+ const char *governor_name,
+ void *data);
+extern void devm_devfreq_remove_device(struct device *dev,
+ struct devfreq *devfreq);
+
+/* Supposed to be called by PM callbacks */
+extern int devfreq_suspend_device(struct devfreq *devfreq);
+extern int devfreq_resume_device(struct devfreq *devfreq);
+
+/* Helper functions for devfreq user device driver with OPP. */
+extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
+ unsigned long *freq, u32 flags);
+extern int devfreq_register_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+extern int devfreq_unregister_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+extern int devm_devfreq_register_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+extern void devm_devfreq_unregister_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
+/**
+ * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq
+ * and devfreq_add_device
+ * @upthreshold: If the load is over this value, the frequency jumps.
+ * Specify 0 to use the default. Valid value = 0 to 100.
+ * @downdifferential: If the load is under upthreshold - downdifferential,
+ * the governor may consider slowing the frequency down.
+ * Specify 0 to use the default. Valid value = 0 to 100.
+ * downdifferential < upthreshold must hold.
+ *
+ * If the fed devfreq_simple_ondemand_data pointer is NULL to the governor,
+ * the governor uses the default values.
+ */
+struct devfreq_simple_ondemand_data {
+ unsigned int upthreshold;
+ unsigned int downdifferential;
+};
+#endif
+
+#else /* !CONFIG_PM_DEVFREQ */
+static inline struct devfreq *devfreq_add_device(struct device *dev,
+ struct devfreq_dev_profile *profile,
+ const char *governor_name,
+ void *data)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline int devfreq_remove_device(struct devfreq *devfreq)
+{
+ return 0;
+}
+
+static inline struct devfreq *devm_devfreq_add_device(struct device *dev,
+ struct devfreq_dev_profile *profile,
+ const char *governor_name,
+ void *data)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void devm_devfreq_remove_device(struct device *dev,
+ struct devfreq *devfreq)
+{
+}
+
+static inline int devfreq_suspend_device(struct devfreq *devfreq)
+{
+ return 0;
+}
+
+static inline int devfreq_resume_device(struct devfreq *devfreq)
+{
+ return 0;
+}
+
+static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
+ unsigned long *freq, u32 flags)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int devfreq_register_opp_notifier(struct device *dev,
+ struct devfreq *devfreq)
+{
+ return -EINVAL;
+}
+
+static inline int devfreq_unregister_opp_notifier(struct device *dev,
+ struct devfreq *devfreq)
+{
+ return -EINVAL;
+}
+
+static inline int devm_devfreq_register_opp_notifier(struct device *dev,
+ struct devfreq *devfreq)
+{
+ return -EINVAL;
+}
+
+static inline void devm_devfreq_unregister_opp_notifier(struct device *dev,
+ struct devfreq *devfreq)
+{
+}
+#endif /* CONFIG_PM_DEVFREQ */
+
+#endif /* __LINUX_DEVFREQ_H__ */
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
new file mode 100644
index 000000000..51cc1deb7
--- /dev/null
+++ b/include/linux/device-mapper.h
@@ -0,0 +1,608 @@
+/*
+ * Copyright (C) 2001 Sistina Software (UK) Limited.
+ * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
+ *
+ * This file is released under the LGPL.
+ */
+
+#ifndef _LINUX_DEVICE_MAPPER_H
+#define _LINUX_DEVICE_MAPPER_H
+
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/math64.h>
+#include <linux/ratelimit.h>
+
+struct dm_dev;
+struct dm_target;
+struct dm_table;
+struct mapped_device;
+struct bio_vec;
+
+typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
+
+union map_info {
+ void *ptr;
+};
+
+/*
+ * In the constructor the target parameter will already have the
+ * table, type, begin and len fields filled in.
+ */
+typedef int (*dm_ctr_fn) (struct dm_target *target,
+ unsigned int argc, char **argv);
+
+/*
+ * The destructor doesn't need to free the dm_target, just
+ * anything hidden ti->private.
+ */
+typedef void (*dm_dtr_fn) (struct dm_target *ti);
+
+/*
+ * The map function must return:
+ * < 0: error
+ * = 0: The target will handle the io by resubmitting it later
+ * = 1: simple remap complete
+ * = 2: The target wants to push back the io
+ */
+typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
+typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
+ union map_info *map_context);
+typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
+ struct request *rq,
+ union map_info *map_context,
+ struct request **clone);
+typedef void (*dm_release_clone_request_fn) (struct request *clone);
+
+/*
+ * Returns:
+ * < 0 : error (currently ignored)
+ * 0 : ended successfully
+ * 1 : for some reason the io has still not completed (eg,
+ * multipath target might want to requeue a failed io).
+ * 2 : The target wants to push back the io
+ */
+typedef int (*dm_endio_fn) (struct dm_target *ti,
+ struct bio *bio, int error);
+typedef int (*dm_request_endio_fn) (struct dm_target *ti,
+ struct request *clone, int error,
+ union map_info *map_context);
+
+typedef void (*dm_presuspend_fn) (struct dm_target *ti);
+typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
+typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
+typedef int (*dm_preresume_fn) (struct dm_target *ti);
+typedef void (*dm_resume_fn) (struct dm_target *ti);
+
+typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
+ unsigned status_flags, char *result, unsigned maxlen);
+
+typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
+
+typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
+ unsigned long arg);
+
+typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
+ struct bio_vec *biovec, int max_size);
+
+/*
+ * These iteration functions are typically used to check (and combine)
+ * properties of underlying devices.
+ * E.g. Does at least one underlying device support flush?
+ * Does any underlying device not support WRITE_SAME?
+ *
+ * The callout function is called once for each contiguous section of
+ * an underlying device. State can be maintained in *data.
+ * Return non-zero to stop iterating through any further devices.
+ */
+typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
+ struct dm_dev *dev,
+ sector_t start, sector_t len,
+ void *data);
+
+/*
+ * This function must iterate through each section of device used by the
+ * target until it encounters a non-zero return code, which it then returns.
+ * Returns zero if no callout returned non-zero.
+ */
+typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
+ iterate_devices_callout_fn fn,
+ void *data);
+
+typedef void (*dm_io_hints_fn) (struct dm_target *ti,
+ struct queue_limits *limits);
+
+/*
+ * Returns:
+ * 0: The target can handle the next I/O immediately.
+ * 1: The target can't handle the next I/O immediately.
+ */
+typedef int (*dm_busy_fn) (struct dm_target *ti);
+
+void dm_error(const char *message);
+
+struct dm_dev {
+ struct block_device *bdev;
+ fmode_t mode;
+ char name[16];
+};
+
+/*
+ * Constructors should call these functions to ensure destination devices
+ * are opened/closed correctly.
+ */
+int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+ struct dm_dev **result);
+void dm_put_device(struct dm_target *ti, struct dm_dev *d);
+
+/*
+ * Information about a target type
+ */
+
+struct target_type {
+ uint64_t features;
+ const char *name;
+ struct module *module;
+ unsigned version[3];
+ dm_ctr_fn ctr;
+ dm_dtr_fn dtr;
+ dm_map_fn map;
+ dm_map_request_fn map_rq;
+ dm_clone_and_map_request_fn clone_and_map_rq;
+ dm_release_clone_request_fn release_clone_rq;
+ dm_endio_fn end_io;
+ dm_request_endio_fn rq_end_io;
+ dm_presuspend_fn presuspend;
+ dm_presuspend_undo_fn presuspend_undo;
+ dm_postsuspend_fn postsuspend;
+ dm_preresume_fn preresume;
+ dm_resume_fn resume;
+ dm_status_fn status;
+ dm_message_fn message;
+ dm_ioctl_fn ioctl;
+ dm_merge_fn merge;
+ dm_busy_fn busy;
+ dm_iterate_devices_fn iterate_devices;
+ dm_io_hints_fn io_hints;
+
+ /* For internal device-mapper use. */
+ struct list_head list;
+};
+
+/*
+ * Target features
+ */
+
+/*
+ * Any table that contains an instance of this target must have only one.
+ */
+#define DM_TARGET_SINGLETON 0x00000001
+#define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON)
+
+/*
+ * Indicates that a target does not support read-only devices.
+ */
+#define DM_TARGET_ALWAYS_WRITEABLE 0x00000002
+#define dm_target_always_writeable(type) \
+ ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
+
+/*
+ * Any device that contains a table with an instance of this target may never
+ * have tables containing any different target type.
+ */
+#define DM_TARGET_IMMUTABLE 0x00000004
+#define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
+
+/*
+ * Some targets need to be sent the same WRITE bio severals times so
+ * that they can send copies of it to different devices. This function
+ * examines any supplied bio and returns the number of copies of it the
+ * target requires.
+ */
+typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
+
+struct dm_target {
+ struct dm_table *table;
+ struct target_type *type;
+
+ /* target limits */
+ sector_t begin;
+ sector_t len;
+
+ /* If non-zero, maximum size of I/O submitted to a target. */
+ uint32_t max_io_len;
+
+ /*
+ * A number of zero-length barrier bios that will be submitted
+ * to the target for the purpose of flushing cache.
+ *
+ * The bio number can be accessed with dm_bio_get_target_bio_nr.
+ * It is a responsibility of the target driver to remap these bios
+ * to the real underlying devices.
+ */
+ unsigned num_flush_bios;
+
+ /*
+ * The number of discard bios that will be submitted to the target.
+ * The bio number can be accessed with dm_bio_get_target_bio_nr.
+ */
+ unsigned num_discard_bios;
+
+ /*
+ * The number of WRITE SAME bios that will be submitted to the target.
+ * The bio number can be accessed with dm_bio_get_target_bio_nr.
+ */
+ unsigned num_write_same_bios;
+
+ /*
+ * The minimum number of extra bytes allocated in each bio for the
+ * target to use. dm_per_bio_data returns the data location.
+ */
+ unsigned per_bio_data_size;
+
+ /*
+ * If defined, this function is called to find out how many
+ * duplicate bios should be sent to the target when writing
+ * data.
+ */
+ dm_num_write_bios_fn num_write_bios;
+
+ /* target specific data */
+ void *private;
+
+ /* Used to provide an error string from the ctr */
+ char *error;
+
+ /*
+ * Set if this target needs to receive flushes regardless of
+ * whether or not its underlying devices have support.
+ */
+ bool flush_supported:1;
+
+ /*
+ * Set if this target needs to receive discards regardless of
+ * whether or not its underlying devices have support.
+ */
+ bool discards_supported:1;
+
+ /*
+ * Set if the target required discard bios to be split
+ * on max_io_len boundary.
+ */
+ bool split_discard_bios:1;
+
+ /*
+ * Set if this target does not return zeroes on discarded blocks.
+ */
+ bool discard_zeroes_data_unsupported:1;
+};
+
+/* Each target can link one of these into the table */
+struct dm_target_callbacks {
+ struct list_head list;
+ int (*congested_fn) (struct dm_target_callbacks *, int);
+};
+
+/*
+ * For bio-based dm.
+ * One of these is allocated for each bio.
+ * This structure shouldn't be touched directly by target drivers.
+ * It is here so that we can inline dm_per_bio_data and
+ * dm_bio_from_per_bio_data
+ */
+struct dm_target_io {
+ struct dm_io *io;
+ struct dm_target *ti;
+ unsigned target_bio_nr;
+ unsigned *len_ptr;
+ struct bio clone;
+};
+
+static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
+{
+ return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
+}
+
+static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
+{
+ return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
+}
+
+static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
+{
+ return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
+}
+
+int dm_register_target(struct target_type *t);
+void dm_unregister_target(struct target_type *t);
+
+/*
+ * Target argument parsing.
+ */
+struct dm_arg_set {
+ unsigned argc;
+ char **argv;
+};
+
+/*
+ * The minimum and maximum value of a numeric argument, together with
+ * the error message to use if the number is found to be outside that range.
+ */
+struct dm_arg {
+ unsigned min;
+ unsigned max;
+ char *error;
+};
+
+/*
+ * Validate the next argument, either returning it as *value or, if invalid,
+ * returning -EINVAL and setting *error.
+ */
+int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
+ unsigned *value, char **error);
+
+/*
+ * Process the next argument as the start of a group containing between
+ * arg->min and arg->max further arguments. Either return the size as
+ * *num_args or, if invalid, return -EINVAL and set *error.
+ */
+int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
+ unsigned *num_args, char **error);
+
+/*
+ * Return the current argument and shift to the next.
+ */
+const char *dm_shift_arg(struct dm_arg_set *as);
+
+/*
+ * Move through num_args arguments.
+ */
+void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
+
+/*-----------------------------------------------------------------
+ * Functions for creating and manipulating mapped devices.
+ * Drop the reference with dm_put when you finish with the object.
+ *---------------------------------------------------------------*/
+
+/*
+ * DM_ANY_MINOR chooses the next available minor number.
+ */
+#define DM_ANY_MINOR (-1)
+int dm_create(int minor, struct mapped_device **md);
+
+/*
+ * Reference counting for md.
+ */
+struct mapped_device *dm_get_md(dev_t dev);
+void dm_get(struct mapped_device *md);
+int dm_hold(struct mapped_device *md);
+void dm_put(struct mapped_device *md);
+
+/*
+ * An arbitrary pointer may be stored alongside a mapped device.
+ */
+void dm_set_mdptr(struct mapped_device *md, void *ptr);
+void *dm_get_mdptr(struct mapped_device *md);
+
+/*
+ * A device can still be used while suspended, but I/O is deferred.
+ */
+int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
+int dm_resume(struct mapped_device *md);
+
+/*
+ * Event functions.
+ */
+uint32_t dm_get_event_nr(struct mapped_device *md);
+int dm_wait_event(struct mapped_device *md, int event_nr);
+uint32_t dm_next_uevent_seq(struct mapped_device *md);
+void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
+
+/*
+ * Info functions.
+ */
+const char *dm_device_name(struct mapped_device *md);
+int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
+struct gendisk *dm_disk(struct mapped_device *md);
+int dm_suspended(struct dm_target *ti);
+int dm_noflush_suspending(struct dm_target *ti);
+void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
+union map_info *dm_get_rq_mapinfo(struct request *rq);
+
+struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
+
+/*
+ * Geometry functions.
+ */
+int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
+int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
+
+/*-----------------------------------------------------------------
+ * Functions for manipulating device-mapper tables.
+ *---------------------------------------------------------------*/
+
+/*
+ * First create an empty table.
+ */
+int dm_table_create(struct dm_table **result, fmode_t mode,
+ unsigned num_targets, struct mapped_device *md);
+
+/*
+ * Then call this once for each target.
+ */
+int dm_table_add_target(struct dm_table *t, const char *type,
+ sector_t start, sector_t len, char *params);
+
+/*
+ * Target_ctr should call this if it needs to add any callbacks.
+ */
+void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
+
+/*
+ * Finally call this to make the table ready for use.
+ */
+int dm_table_complete(struct dm_table *t);
+
+/*
+ * Target may require that it is never sent I/O larger than len.
+ */
+int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
+
+/*
+ * Table reference counting.
+ */
+struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
+void dm_put_live_table(struct mapped_device *md, int srcu_idx);
+void dm_sync_table(struct mapped_device *md);
+
+/*
+ * Queries
+ */
+sector_t dm_table_get_size(struct dm_table *t);
+unsigned int dm_table_get_num_targets(struct dm_table *t);
+fmode_t dm_table_get_mode(struct dm_table *t);
+struct mapped_device *dm_table_get_md(struct dm_table *t);
+
+/*
+ * Trigger an event.
+ */
+void dm_table_event(struct dm_table *t);
+
+/*
+ * Run the queue for request-based targets.
+ */
+void dm_table_run_md_queue_async(struct dm_table *t);
+
+/*
+ * The device must be suspended before calling this method.
+ * Returns the previous table, which the caller must destroy.
+ */
+struct dm_table *dm_swap_table(struct mapped_device *md,
+ struct dm_table *t);
+
+/*
+ * A wrapper around vmalloc.
+ */
+void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
+
+/*-----------------------------------------------------------------
+ * Macros.
+ *---------------------------------------------------------------*/
+#define DM_NAME "device-mapper"
+
+#ifdef CONFIG_PRINTK
+extern struct ratelimit_state dm_ratelimit_state;
+
+#define dm_ratelimit() __ratelimit(&dm_ratelimit_state)
+#else
+#define dm_ratelimit() 0
+#endif
+
+#define DMCRIT(f, arg...) \
+ printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
+
+#define DMERR(f, arg...) \
+ printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
+#define DMERR_LIMIT(f, arg...) \
+ do { \
+ if (dm_ratelimit()) \
+ printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
+ f "\n", ## arg); \
+ } while (0)
+
+#define DMWARN(f, arg...) \
+ printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
+#define DMWARN_LIMIT(f, arg...) \
+ do { \
+ if (dm_ratelimit()) \
+ printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
+ f "\n", ## arg); \
+ } while (0)
+
+#define DMINFO(f, arg...) \
+ printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
+#define DMINFO_LIMIT(f, arg...) \
+ do { \
+ if (dm_ratelimit()) \
+ printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
+ "\n", ## arg); \
+ } while (0)
+
+#ifdef CONFIG_DM_DEBUG
+# define DMDEBUG(f, arg...) \
+ printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
+# define DMDEBUG_LIMIT(f, arg...) \
+ do { \
+ if (dm_ratelimit()) \
+ printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
+ "\n", ## arg); \
+ } while (0)
+#else
+# define DMDEBUG(f, arg...) do {} while (0)
+# define DMDEBUG_LIMIT(f, arg...) do {} while (0)
+#endif
+
+#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
+ 0 : scnprintf(result + sz, maxlen - sz, x))
+
+#define SECTOR_SHIFT 9
+
+/*
+ * Definitions of return values from target end_io function.
+ */
+#define DM_ENDIO_INCOMPLETE 1
+#define DM_ENDIO_REQUEUE 2
+
+/*
+ * Definitions of return values from target map function.
+ */
+#define DM_MAPIO_SUBMITTED 0
+#define DM_MAPIO_REMAPPED 1
+#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
+
+#define dm_sector_div64(x, y)( \
+{ \
+ u64 _res; \
+ (x) = div64_u64_rem(x, y, &_res); \
+ _res; \
+} \
+)
+
+/*
+ * Ceiling(n / sz)
+ */
+#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
+
+#define dm_sector_div_up(n, sz) ( \
+{ \
+ sector_t _r = ((n) + (sz) - 1); \
+ sector_div(_r, (sz)); \
+ _r; \
+} \
+)
+
+/*
+ * ceiling(n / size) * size
+ */
+#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
+
+#define dm_array_too_big(fixed, obj, num) \
+ ((num) > (UINT_MAX - (fixed)) / (obj))
+
+/*
+ * Sector offset taken relative to the start of the target instead of
+ * relative to the start of the device.
+ */
+#define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
+
+static inline sector_t to_sector(unsigned long n)
+{
+ return (n >> SECTOR_SHIFT);
+}
+
+static inline unsigned long to_bytes(sector_t n)
+{
+ return (n << SECTOR_SHIFT);
+}
+
+#endif /* _LINUX_DEVICE_MAPPER_H */
diff --git a/include/linux/device.h b/include/linux/device.h
new file mode 100644
index 000000000..6558af90c
--- /dev/null
+++ b/include/linux/device.h
@@ -0,0 +1,1272 @@
+/*
+ * device.h - generic, centralized driver model
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2009 Novell Inc.
+ *
+ * This file is released under the GPLv2
+ *
+ * See Documentation/driver-model/ for more information.
+ */
+
+#ifndef _DEVICE_H_
+#define _DEVICE_H_
+
+#include <linux/ioport.h>
+#include <linux/kobject.h>
+#include <linux/klist.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/pinctrl/devinfo.h>
+#include <linux/pm.h>
+#include <linux/atomic.h>
+#include <linux/ratelimit.h>
+#include <linux/uidgid.h>
+#include <linux/gfp.h>
+#include <asm/device.h>
+
+struct device;
+struct device_private;
+struct device_driver;
+struct driver_private;
+struct module;
+struct class;
+struct subsys_private;
+struct bus_type;
+struct device_node;
+struct fwnode_handle;
+struct iommu_ops;
+struct iommu_group;
+
+struct bus_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct bus_type *bus, char *buf);
+ ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
+};
+
+#define BUS_ATTR(_name, _mode, _show, _store) \
+ struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
+#define BUS_ATTR_RW(_name) \
+ struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
+#define BUS_ATTR_RO(_name) \
+ struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
+
+extern int __must_check bus_create_file(struct bus_type *,
+ struct bus_attribute *);
+extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
+
+/**
+ * struct bus_type - The bus type of the device
+ *
+ * @name: The name of the bus.
+ * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id).
+ * @dev_root: Default device to use as the parent.
+ * @dev_attrs: Default attributes of the devices on the bus.
+ * @bus_groups: Default attributes of the bus.
+ * @dev_groups: Default attributes of the devices on the bus.
+ * @drv_groups: Default attributes of the device drivers on the bus.
+ * @match: Called, perhaps multiple times, whenever a new device or driver
+ * is added for this bus. It should return a nonzero value if the
+ * given device can be handled by the given driver.
+ * @uevent: Called when a device is added, removed, or a few other things
+ * that generate uevents to add the environment variables.
+ * @probe: Called when a new device or driver add to this bus, and callback
+ * the specific driver's probe to initial the matched device.
+ * @remove: Called when a device removed from this bus.
+ * @shutdown: Called at shut-down time to quiesce the device.
+ *
+ * @online: Called to put the device back online (after offlining it).
+ * @offline: Called to put the device offline for hot-removal. May fail.
+ *
+ * @suspend: Called when a device on this bus wants to go to sleep mode.
+ * @resume: Called to bring a device on this bus out of sleep mode.
+ * @pm: Power management operations of this bus, callback the specific
+ * device driver's pm-ops.
+ * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
+ * driver implementations to a bus and allow the driver to do
+ * bus-specific setup
+ * @p: The private data of the driver core, only the driver core can
+ * touch this.
+ * @lock_key: Lock class key for use by the lock validator
+ *
+ * A bus is a channel between the processor and one or more devices. For the
+ * purposes of the device model, all devices are connected via a bus, even if
+ * it is an internal, virtual, "platform" bus. Buses can plug into each other.
+ * A USB controller is usually a PCI device, for example. The device model
+ * represents the actual connections between buses and the devices they control.
+ * A bus is represented by the bus_type structure. It contains the name, the
+ * default attributes, the bus' methods, PM operations, and the driver core's
+ * private data.
+ */
+struct bus_type {
+ const char *name;
+ const char *dev_name;
+ struct device *dev_root;
+ struct device_attribute *dev_attrs; /* use dev_groups instead */
+ const struct attribute_group **bus_groups;
+ const struct attribute_group **dev_groups;
+ const struct attribute_group **drv_groups;
+
+ int (*match)(struct device *dev, struct device_driver *drv);
+ int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
+ int (*probe)(struct device *dev);
+ int (*remove)(struct device *dev);
+ void (*shutdown)(struct device *dev);
+
+ int (*online)(struct device *dev);
+ int (*offline)(struct device *dev);
+
+ int (*suspend)(struct device *dev, pm_message_t state);
+ int (*resume)(struct device *dev);
+
+ const struct dev_pm_ops *pm;
+
+ const struct iommu_ops *iommu_ops;
+
+ struct subsys_private *p;
+ struct lock_class_key lock_key;
+};
+
+extern int __must_check bus_register(struct bus_type *bus);
+
+extern void bus_unregister(struct bus_type *bus);
+
+extern int __must_check bus_rescan_devices(struct bus_type *bus);
+
+/* iterator helpers for buses */
+struct subsys_dev_iter {
+ struct klist_iter ki;
+ const struct device_type *type;
+};
+void subsys_dev_iter_init(struct subsys_dev_iter *iter,
+ struct bus_type *subsys,
+ struct device *start,
+ const struct device_type *type);
+struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
+void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
+
+int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
+ int (*fn)(struct device *dev, void *data));
+struct device *bus_find_device(struct bus_type *bus, struct device *start,
+ void *data,
+ int (*match)(struct device *dev, void *data));
+struct device *bus_find_device_by_name(struct bus_type *bus,
+ struct device *start,
+ const char *name);
+struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
+ struct device *hint);
+int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
+ void *data, int (*fn)(struct device_driver *, void *));
+void bus_sort_breadthfirst(struct bus_type *bus,
+ int (*compare)(const struct device *a,
+ const struct device *b));
+/*
+ * Bus notifiers: Get notified of addition/removal of devices
+ * and binding/unbinding of drivers to devices.
+ * In the long run, it should be a replacement for the platform
+ * notify hooks.
+ */
+struct notifier_block;
+
+extern int bus_register_notifier(struct bus_type *bus,
+ struct notifier_block *nb);
+extern int bus_unregister_notifier(struct bus_type *bus,
+ struct notifier_block *nb);
+
+/* All 4 notifers below get called with the target struct device *
+ * as an argument. Note that those functions are likely to be called
+ * with the device lock held in the core, so be careful.
+ */
+#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
+#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */
+#define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */
+#define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be
+ bound */
+#define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */
+#define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be
+ unbound */
+#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound
+ from the device */
+
+extern struct kset *bus_get_kset(struct bus_type *bus);
+extern struct klist *bus_get_device_klist(struct bus_type *bus);
+
+/**
+ * struct device_driver - The basic device driver structure
+ * @name: Name of the device driver.
+ * @bus: The bus which the device of this driver belongs to.
+ * @owner: The module owner.
+ * @mod_name: Used for built-in modules.
+ * @suppress_bind_attrs: Disables bind/unbind via sysfs.
+ * @of_match_table: The open firmware table.
+ * @acpi_match_table: The ACPI match table.
+ * @probe: Called to query the existence of a specific device,
+ * whether this driver can work with it, and bind the driver
+ * to a specific device.
+ * @remove: Called when the device is removed from the system to
+ * unbind a device from this driver.
+ * @shutdown: Called at shut-down time to quiesce the device.
+ * @suspend: Called to put the device to sleep mode. Usually to a
+ * low power state.
+ * @resume: Called to bring a device from sleep mode.
+ * @groups: Default attributes that get created by the driver core
+ * automatically.
+ * @pm: Power management operations of the device which matched
+ * this driver.
+ * @p: Driver core's private data, no one other than the driver
+ * core can touch this.
+ *
+ * The device driver-model tracks all of the drivers known to the system.
+ * The main reason for this tracking is to enable the driver core to match
+ * up drivers with new devices. Once drivers are known objects within the
+ * system, however, a number of other things become possible. Device drivers
+ * can export information and configuration variables that are independent
+ * of any specific device.
+ */
+struct device_driver {
+ const char *name;
+ struct bus_type *bus;
+
+ struct module *owner;
+ const char *mod_name; /* used for built-in modules */
+
+ bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
+
+ const struct of_device_id *of_match_table;
+ const struct acpi_device_id *acpi_match_table;
+
+ int (*probe) (struct device *dev);
+ int (*remove) (struct device *dev);
+ void (*shutdown) (struct device *dev);
+ int (*suspend) (struct device *dev, pm_message_t state);
+ int (*resume) (struct device *dev);
+ const struct attribute_group **groups;
+
+ const struct dev_pm_ops *pm;
+
+ struct driver_private *p;
+};
+
+
+extern int __must_check driver_register(struct device_driver *drv);
+extern void driver_unregister(struct device_driver *drv);
+
+extern struct device_driver *driver_find(const char *name,
+ struct bus_type *bus);
+extern int driver_probe_done(void);
+extern void wait_for_device_probe(void);
+
+
+/* sysfs interface for exporting driver attributes */
+
+struct driver_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct device_driver *driver, char *buf);
+ ssize_t (*store)(struct device_driver *driver, const char *buf,
+ size_t count);
+};
+
+#define DRIVER_ATTR(_name, _mode, _show, _store) \
+ struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store)
+#define DRIVER_ATTR_RW(_name) \
+ struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
+#define DRIVER_ATTR_RO(_name) \
+ struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
+#define DRIVER_ATTR_WO(_name) \
+ struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
+
+extern int __must_check driver_create_file(struct device_driver *driver,
+ const struct driver_attribute *attr);
+extern void driver_remove_file(struct device_driver *driver,
+ const struct driver_attribute *attr);
+
+extern int __must_check driver_for_each_device(struct device_driver *drv,
+ struct device *start,
+ void *data,
+ int (*fn)(struct device *dev,
+ void *));
+struct device *driver_find_device(struct device_driver *drv,
+ struct device *start, void *data,
+ int (*match)(struct device *dev, void *data));
+
+/**
+ * struct subsys_interface - interfaces to device functions
+ * @name: name of the device function
+ * @subsys: subsytem of the devices to attach to
+ * @node: the list of functions registered at the subsystem
+ * @add_dev: device hookup to device function handler
+ * @remove_dev: device hookup to device function handler
+ *
+ * Simple interfaces attached to a subsystem. Multiple interfaces can
+ * attach to a subsystem and its devices. Unlike drivers, they do not
+ * exclusively claim or control devices. Interfaces usually represent
+ * a specific functionality of a subsystem/class of devices.
+ */
+struct subsys_interface {
+ const char *name;
+ struct bus_type *subsys;
+ struct list_head node;
+ int (*add_dev)(struct device *dev, struct subsys_interface *sif);
+ int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
+};
+
+int subsys_interface_register(struct subsys_interface *sif);
+void subsys_interface_unregister(struct subsys_interface *sif);
+
+int subsys_system_register(struct bus_type *subsys,
+ const struct attribute_group **groups);
+int subsys_virtual_register(struct bus_type *subsys,
+ const struct attribute_group **groups);
+
+/**
+ * struct class - device classes
+ * @name: Name of the class.
+ * @owner: The module owner.
+ * @class_attrs: Default attributes of this class.
+ * @dev_groups: Default attributes of the devices that belong to the class.
+ * @dev_kobj: The kobject that represents this class and links it into the hierarchy.
+ * @dev_uevent: Called when a device is added, removed from this class, or a
+ * few other things that generate uevents to add the environment
+ * variables.
+ * @devnode: Callback to provide the devtmpfs.
+ * @class_release: Called to release this class.
+ * @dev_release: Called to release the device.
+ * @suspend: Used to put the device to sleep mode, usually to a low power
+ * state.
+ * @resume: Used to bring the device from the sleep mode.
+ * @ns_type: Callbacks so sysfs can detemine namespaces.
+ * @namespace: Namespace of the device belongs to this class.
+ * @pm: The default device power management operations of this class.
+ * @p: The private data of the driver core, no one other than the
+ * driver core can touch this.
+ *
+ * A class is a higher-level view of a device that abstracts out low-level
+ * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
+ * at the class level, they are all simply disks. Classes allow user space
+ * to work with devices based on what they do, rather than how they are
+ * connected or how they work.
+ */
+struct class {
+ const char *name;
+ struct module *owner;
+
+ struct class_attribute *class_attrs;
+ const struct attribute_group **dev_groups;
+ struct kobject *dev_kobj;
+
+ int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
+ char *(*devnode)(struct device *dev, umode_t *mode);
+
+ void (*class_release)(struct class *class);
+ void (*dev_release)(struct device *dev);
+
+ int (*suspend)(struct device *dev, pm_message_t state);
+ int (*resume)(struct device *dev);
+
+ const struct kobj_ns_type_operations *ns_type;
+ const void *(*namespace)(struct device *dev);
+
+ const struct dev_pm_ops *pm;
+
+ struct subsys_private *p;
+};
+
+struct class_dev_iter {
+ struct klist_iter ki;
+ const struct device_type *type;
+};
+
+extern struct kobject *sysfs_dev_block_kobj;
+extern struct kobject *sysfs_dev_char_kobj;
+extern int __must_check __class_register(struct class *class,
+ struct lock_class_key *key);
+extern void class_unregister(struct class *class);
+
+/* This is a #define to keep the compiler from merging different
+ * instances of the __key variable */
+#define class_register(class) \
+({ \
+ static struct lock_class_key __key; \
+ __class_register(class, &__key); \
+})
+
+struct class_compat;
+struct class_compat *class_compat_register(const char *name);
+void class_compat_unregister(struct class_compat *cls);
+int class_compat_create_link(struct class_compat *cls, struct device *dev,
+ struct device *device_link);
+void class_compat_remove_link(struct class_compat *cls, struct device *dev,
+ struct device *device_link);
+
+extern void class_dev_iter_init(struct class_dev_iter *iter,
+ struct class *class,
+ struct device *start,
+ const struct device_type *type);
+extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
+extern void class_dev_iter_exit(struct class_dev_iter *iter);
+
+extern int class_for_each_device(struct class *class, struct device *start,
+ void *data,
+ int (*fn)(struct device *dev, void *data));
+extern struct device *class_find_device(struct class *class,
+ struct device *start, const void *data,
+ int (*match)(struct device *, const void *));
+
+struct class_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct class *class, struct class_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct class *class, struct class_attribute *attr,
+ const char *buf, size_t count);
+};
+
+#define CLASS_ATTR(_name, _mode, _show, _store) \
+ struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store)
+#define CLASS_ATTR_RW(_name) \
+ struct class_attribute class_attr_##_name = __ATTR_RW(_name)
+#define CLASS_ATTR_RO(_name) \
+ struct class_attribute class_attr_##_name = __ATTR_RO(_name)
+
+extern int __must_check class_create_file_ns(struct class *class,
+ const struct class_attribute *attr,
+ const void *ns);
+extern void class_remove_file_ns(struct class *class,
+ const struct class_attribute *attr,
+ const void *ns);
+
+static inline int __must_check class_create_file(struct class *class,
+ const struct class_attribute *attr)
+{
+ return class_create_file_ns(class, attr, NULL);
+}
+
+static inline void class_remove_file(struct class *class,
+ const struct class_attribute *attr)
+{
+ return class_remove_file_ns(class, attr, NULL);
+}
+
+/* Simple class attribute that is just a static string */
+struct class_attribute_string {
+ struct class_attribute attr;
+ char *str;
+};
+
+/* Currently read-only only */
+#define _CLASS_ATTR_STRING(_name, _mode, _str) \
+ { __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
+#define CLASS_ATTR_STRING(_name, _mode, _str) \
+ struct class_attribute_string class_attr_##_name = \
+ _CLASS_ATTR_STRING(_name, _mode, _str)
+
+extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
+ char *buf);
+
+struct class_interface {
+ struct list_head node;
+ struct class *class;
+
+ int (*add_dev) (struct device *, struct class_interface *);
+ void (*remove_dev) (struct device *, struct class_interface *);
+};
+
+extern int __must_check class_interface_register(struct class_interface *);
+extern void class_interface_unregister(struct class_interface *);
+
+extern struct class * __must_check __class_create(struct module *owner,
+ const char *name,
+ struct lock_class_key *key);
+extern void class_destroy(struct class *cls);
+
+/* This is a #define to keep the compiler from merging different
+ * instances of the __key variable */
+#define class_create(owner, name) \
+({ \
+ static struct lock_class_key __key; \
+ __class_create(owner, name, &__key); \
+})
+
+/*
+ * The type of device, "struct device" is embedded in. A class
+ * or bus can contain devices of different types
+ * like "partitions" and "disks", "mouse" and "event".
+ * This identifies the device type and carries type-specific
+ * information, equivalent to the kobj_type of a kobject.
+ * If "name" is specified, the uevent will contain it in
+ * the DEVTYPE variable.
+ */
+struct device_type {
+ const char *name;
+ const struct attribute_group **groups;
+ int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
+ char *(*devnode)(struct device *dev, umode_t *mode,
+ kuid_t *uid, kgid_t *gid);
+ void (*release)(struct device *dev);
+
+ const struct dev_pm_ops *pm;
+};
+
+/* interface for exporting device attributes */
+struct device_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct device *dev, struct device_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
+};
+
+struct dev_ext_attribute {
+ struct device_attribute attr;
+ void *var;
+};
+
+ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
+ char *buf);
+ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
+ssize_t device_show_int(struct device *dev, struct device_attribute *attr,
+ char *buf);
+ssize_t device_store_int(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
+ssize_t device_show_bool(struct device *dev, struct device_attribute *attr,
+ char *buf);
+ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
+
+#define DEVICE_ATTR(_name, _mode, _show, _store) \
+ struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+#define DEVICE_ATTR_RW(_name) \
+ struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
+#define DEVICE_ATTR_RO(_name) \
+ struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
+#define DEVICE_ATTR_WO(_name) \
+ struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
+#define DEVICE_ULONG_ATTR(_name, _mode, _var) \
+ struct dev_ext_attribute dev_attr_##_name = \
+ { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
+#define DEVICE_INT_ATTR(_name, _mode, _var) \
+ struct dev_ext_attribute dev_attr_##_name = \
+ { __ATTR(_name, _mode, device_show_int, device_store_int), &(_var) }
+#define DEVICE_BOOL_ATTR(_name, _mode, _var) \
+ struct dev_ext_attribute dev_attr_##_name = \
+ { __ATTR(_name, _mode, device_show_bool, device_store_bool), &(_var) }
+#define DEVICE_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
+ struct device_attribute dev_attr_##_name = \
+ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
+
+extern int device_create_file(struct device *device,
+ const struct device_attribute *entry);
+extern void device_remove_file(struct device *dev,
+ const struct device_attribute *attr);
+extern bool device_remove_file_self(struct device *dev,
+ const struct device_attribute *attr);
+extern int __must_check device_create_bin_file(struct device *dev,
+ const struct bin_attribute *attr);
+extern void device_remove_bin_file(struct device *dev,
+ const struct bin_attribute *attr);
+
+/* device resource management */
+typedef void (*dr_release_t)(struct device *dev, void *res);
+typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
+
+#ifdef CONFIG_DEBUG_DEVRES
+extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
+ const char *name);
+#define devres_alloc(release, size, gfp) \
+ __devres_alloc(release, size, gfp, #release)
+#else
+extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp);
+#endif
+extern void devres_for_each_res(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data,
+ void (*fn)(struct device *, void *, void *),
+ void *data);
+extern void devres_free(void *res);
+extern void devres_add(struct device *dev, void *res);
+extern void *devres_find(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data);
+extern void *devres_get(struct device *dev, void *new_res,
+ dr_match_t match, void *match_data);
+extern void *devres_remove(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data);
+extern int devres_destroy(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data);
+extern int devres_release(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data);
+
+/* devres group */
+extern void * __must_check devres_open_group(struct device *dev, void *id,
+ gfp_t gfp);
+extern void devres_close_group(struct device *dev, void *id);
+extern void devres_remove_group(struct device *dev, void *id);
+extern int devres_release_group(struct device *dev, void *id);
+
+/* managed devm_k.alloc/kfree for device drivers */
+extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
+extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
+ va_list ap);
+extern __printf(3, 4)
+char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
+static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
+{
+ return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
+}
+static inline void *devm_kmalloc_array(struct device *dev,
+ size_t n, size_t size, gfp_t flags)
+{
+ if (size != 0 && n > SIZE_MAX / size)
+ return NULL;
+ return devm_kmalloc(dev, n * size, flags);
+}
+static inline void *devm_kcalloc(struct device *dev,
+ size_t n, size_t size, gfp_t flags)
+{
+ return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
+}
+extern void devm_kfree(struct device *dev, void *p);
+extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp);
+extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
+ gfp_t gfp);
+
+extern unsigned long devm_get_free_pages(struct device *dev,
+ gfp_t gfp_mask, unsigned int order);
+extern void devm_free_pages(struct device *dev, unsigned long addr);
+
+void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
+
+/* allows to add/remove a custom action to devres stack */
+int devm_add_action(struct device *dev, void (*action)(void *), void *data);
+void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
+
+struct device_dma_parameters {
+ /*
+ * a low level driver may set these to teach IOMMU code about
+ * sg limitations.
+ */
+ unsigned int max_segment_size;
+ unsigned long segment_boundary_mask;
+};
+
+/**
+ * struct device - The basic device structure
+ * @parent: The device's "parent" device, the device to which it is attached.
+ * In most cases, a parent device is some sort of bus or host
+ * controller. If parent is NULL, the device, is a top-level device,
+ * which is not usually what you want.
+ * @p: Holds the private data of the driver core portions of the device.
+ * See the comment of the struct device_private for detail.
+ * @kobj: A top-level, abstract class from which other classes are derived.
+ * @init_name: Initial name of the device.
+ * @type: The type of device.
+ * This identifies the device type and carries type-specific
+ * information.
+ * @mutex: Mutex to synchronize calls to its driver.
+ * @bus: Type of bus device is on.
+ * @driver: Which driver has allocated this
+ * @platform_data: Platform data specific to the device.
+ * Example: For devices on custom boards, as typical of embedded
+ * and SOC based hardware, Linux often uses platform_data to point
+ * to board-specific structures describing devices and how they
+ * are wired. That can include what ports are available, chip
+ * variants, which GPIO pins act in what additional roles, and so
+ * on. This shrinks the "Board Support Packages" (BSPs) and
+ * minimizes board-specific #ifdefs in drivers.
+ * @driver_data: Private pointer for driver specific info.
+ * @power: For device power management.
+ * See Documentation/power/devices.txt for details.
+ * @pm_domain: Provide callbacks that are executed during system suspend,
+ * hibernation, system resume and during runtime PM transitions
+ * along with subsystem-level and driver-level callbacks.
+ * @pins: For device pin management.
+ * See Documentation/pinctrl.txt for details.
+ * @numa_node: NUMA node this device is close to.
+ * @dma_mask: Dma mask (if dma'ble device).
+ * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
+ * hardware supports 64-bit addresses for consistent allocations
+ * such descriptors.
+ * @dma_pfn_offset: offset of DMA memory range relatively of RAM
+ * @dma_parms: A low level driver may set these to teach IOMMU code about
+ * segment limitations.
+ * @dma_pools: Dma pools (if dma'ble device).
+ * @dma_mem: Internal for coherent mem override.
+ * @cma_area: Contiguous memory area for dma allocations
+ * @archdata: For arch-specific additions.
+ * @of_node: Associated device tree node.
+ * @fwnode: Associated device node supplied by platform firmware.
+ * @devt: For creating the sysfs "dev".
+ * @id: device instance
+ * @devres_lock: Spinlock to protect the resource of the device.
+ * @devres_head: The resources list of the device.
+ * @knode_class: The node used to add the device to the class list.
+ * @class: The class of the device.
+ * @groups: Optional attribute groups.
+ * @release: Callback to free the device after all references have
+ * gone away. This should be set by the allocator of the
+ * device (i.e. the bus driver that discovered the device).
+ * @iommu_group: IOMMU group the device belongs to.
+ *
+ * @offline_disabled: If set, the device is permanently online.
+ * @offline: Set after successful invocation of bus type's .offline().
+ *
+ * At the lowest level, every device in a Linux system is represented by an
+ * instance of struct device. The device structure contains the information
+ * that the device model core needs to model the system. Most subsystems,
+ * however, track additional information about the devices they host. As a
+ * result, it is rare for devices to be represented by bare device structures;
+ * instead, that structure, like kobject structures, is usually embedded within
+ * a higher-level representation of the device.
+ */
+struct device {
+ struct device *parent;
+
+ struct device_private *p;
+
+ struct kobject kobj;
+ const char *init_name; /* initial name of the device */
+ const struct device_type *type;
+
+ struct mutex mutex; /* mutex to synchronize calls to
+ * its driver.
+ */
+
+ struct bus_type *bus; /* type of bus device is on */
+ struct device_driver *driver; /* which driver has allocated this
+ device */
+ void *platform_data; /* Platform specific data, device
+ core doesn't touch it */
+ void *driver_data; /* Driver data, set and get with
+ dev_set/get_drvdata */
+ struct dev_pm_info power;
+ struct dev_pm_domain *pm_domain;
+
+#ifdef CONFIG_PINCTRL
+ struct dev_pin_info *pins;
+#endif
+
+#ifdef CONFIG_NUMA
+ int numa_node; /* NUMA node this device is close to */
+#endif
+ u64 *dma_mask; /* dma mask (if dma'able device) */
+ u64 coherent_dma_mask;/* Like dma_mask, but for
+ alloc_coherent mappings as
+ not all hardware supports
+ 64 bit addresses for consistent
+ allocations such descriptors. */
+ unsigned long dma_pfn_offset;
+
+ struct device_dma_parameters *dma_parms;
+
+ struct list_head dma_pools; /* dma pools (if dma'ble) */
+
+ struct dma_coherent_mem *dma_mem; /* internal for coherent mem
+ override */
+#ifdef CONFIG_DMA_CMA
+ struct cma *cma_area; /* contiguous memory area for dma
+ allocations */
+#endif
+ /* arch specific additions */
+ struct dev_archdata archdata;
+
+ struct device_node *of_node; /* associated device tree node */
+ struct fwnode_handle *fwnode; /* firmware device node */
+
+ dev_t devt; /* dev_t, creates the sysfs "dev" */
+ u32 id; /* device instance */
+
+ spinlock_t devres_lock;
+ struct list_head devres_head;
+
+ struct klist_node knode_class;
+ struct class *class;
+ const struct attribute_group **groups; /* optional groups */
+
+ void (*release)(struct device *dev);
+ struct iommu_group *iommu_group;
+
+ bool offline_disabled:1;
+ bool offline:1;
+};
+
+static inline struct device *kobj_to_dev(struct kobject *kobj)
+{
+ return container_of(kobj, struct device, kobj);
+}
+
+/* Get the wakeup routines, which depend on struct device */
+#include <linux/pm_wakeup.h>
+
+static inline const char *dev_name(const struct device *dev)
+{
+ /* Use the init name until the kobject becomes available */
+ if (dev->init_name)
+ return dev->init_name;
+
+ return kobject_name(&dev->kobj);
+}
+
+extern __printf(2, 3)
+int dev_set_name(struct device *dev, const char *name, ...);
+
+#ifdef CONFIG_NUMA
+static inline int dev_to_node(struct device *dev)
+{
+ return dev->numa_node;
+}
+static inline void set_dev_node(struct device *dev, int node)
+{
+ dev->numa_node = node;
+}
+#else
+static inline int dev_to_node(struct device *dev)
+{
+ return -1;
+}
+static inline void set_dev_node(struct device *dev, int node)
+{
+}
+#endif
+
+static inline void *dev_get_drvdata(const struct device *dev)
+{
+ return dev->driver_data;
+}
+
+static inline void dev_set_drvdata(struct device *dev, void *data)
+{
+ dev->driver_data = data;
+}
+
+static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
+{
+ return dev ? dev->power.subsys_data : NULL;
+}
+
+static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
+{
+ return dev->kobj.uevent_suppress;
+}
+
+static inline void dev_set_uevent_suppress(struct device *dev, int val)
+{
+ dev->kobj.uevent_suppress = val;
+}
+
+static inline int device_is_registered(struct device *dev)
+{
+ return dev->kobj.state_in_sysfs;
+}
+
+static inline void device_enable_async_suspend(struct device *dev)
+{
+ if (!dev->power.is_prepared)
+ dev->power.async_suspend = true;
+}
+
+static inline void device_disable_async_suspend(struct device *dev)
+{
+ if (!dev->power.is_prepared)
+ dev->power.async_suspend = false;
+}
+
+static inline bool device_async_suspend_enabled(struct device *dev)
+{
+ return !!dev->power.async_suspend;
+}
+
+static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
+{
+ dev->power.ignore_children = enable;
+}
+
+static inline void dev_pm_syscore_device(struct device *dev, bool val)
+{
+#ifdef CONFIG_PM_SLEEP
+ dev->power.syscore = val;
+#endif
+}
+
+static inline void device_lock(struct device *dev)
+{
+ mutex_lock(&dev->mutex);
+}
+
+static inline int device_trylock(struct device *dev)
+{
+ return mutex_trylock(&dev->mutex);
+}
+
+static inline void device_unlock(struct device *dev)
+{
+ mutex_unlock(&dev->mutex);
+}
+
+static inline void device_lock_assert(struct device *dev)
+{
+ lockdep_assert_held(&dev->mutex);
+}
+
+static inline struct device_node *dev_of_node(struct device *dev)
+{
+ if (!IS_ENABLED(CONFIG_OF))
+ return NULL;
+ return dev->of_node;
+}
+
+void driver_init(void);
+
+/*
+ * High level routines for use by the bus drivers
+ */
+extern int __must_check device_register(struct device *dev);
+extern void device_unregister(struct device *dev);
+extern void device_initialize(struct device *dev);
+extern int __must_check device_add(struct device *dev);
+extern void device_del(struct device *dev);
+extern int device_for_each_child(struct device *dev, void *data,
+ int (*fn)(struct device *dev, void *data));
+extern struct device *device_find_child(struct device *dev, void *data,
+ int (*match)(struct device *dev, void *data));
+extern int device_rename(struct device *dev, const char *new_name);
+extern int device_move(struct device *dev, struct device *new_parent,
+ enum dpm_order dpm_order);
+extern const char *device_get_devnode(struct device *dev,
+ umode_t *mode, kuid_t *uid, kgid_t *gid,
+ const char **tmp);
+
+static inline bool device_supports_offline(struct device *dev)
+{
+ return dev->bus && dev->bus->offline && dev->bus->online;
+}
+
+extern void lock_device_hotplug(void);
+extern void unlock_device_hotplug(void);
+extern int lock_device_hotplug_sysfs(void);
+extern int device_offline(struct device *dev);
+extern int device_online(struct device *dev);
+extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+
+/*
+ * Root device objects for grouping under /sys/devices
+ */
+extern struct device *__root_device_register(const char *name,
+ struct module *owner);
+
+/* This is a macro to avoid include problems with THIS_MODULE */
+#define root_device_register(name) \
+ __root_device_register(name, THIS_MODULE)
+
+extern void root_device_unregister(struct device *root);
+
+static inline void *dev_get_platdata(const struct device *dev)
+{
+ return dev->platform_data;
+}
+
+/*
+ * Manual binding of a device to driver. See drivers/base/bus.c
+ * for information on use.
+ */
+extern int __must_check device_bind_driver(struct device *dev);
+extern void device_release_driver(struct device *dev);
+extern int __must_check device_attach(struct device *dev);
+extern int __must_check driver_attach(struct device_driver *drv);
+extern int __must_check device_reprobe(struct device *dev);
+
+/*
+ * Easy functions for dynamically creating devices on the fly
+ */
+extern struct device *device_create_vargs(struct class *cls,
+ struct device *parent,
+ dev_t devt,
+ void *drvdata,
+ const char *fmt,
+ va_list vargs);
+extern __printf(5, 6)
+struct device *device_create(struct class *cls, struct device *parent,
+ dev_t devt, void *drvdata,
+ const char *fmt, ...);
+extern __printf(6, 7)
+struct device *device_create_with_groups(struct class *cls,
+ struct device *parent, dev_t devt, void *drvdata,
+ const struct attribute_group **groups,
+ const char *fmt, ...);
+extern void device_destroy(struct class *cls, dev_t devt);
+
+/*
+ * Platform "fixup" functions - allow the platform to have their say
+ * about devices and actions that the general device layer doesn't
+ * know about.
+ */
+/* Notify platform of device discovery */
+extern int (*platform_notify)(struct device *dev);
+
+extern int (*platform_notify_remove)(struct device *dev);
+
+
+/*
+ * get_device - atomically increment the reference count for the device.
+ *
+ */
+extern struct device *get_device(struct device *dev);
+extern void put_device(struct device *dev);
+
+#ifdef CONFIG_DEVTMPFS
+extern int devtmpfs_create_node(struct device *dev);
+extern int devtmpfs_delete_node(struct device *dev);
+extern int devtmpfs_mount(const char *mntdir);
+#else
+static inline int devtmpfs_create_node(struct device *dev) { return 0; }
+static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
+static inline int devtmpfs_mount(const char *mountpoint) { return 0; }
+#endif
+
+/* drivers/base/power/shutdown.c */
+extern void device_shutdown(void);
+
+/* debugging and troubleshooting/diagnostic helpers. */
+extern const char *dev_driver_string(const struct device *dev);
+
+
+#ifdef CONFIG_PRINTK
+
+extern __printf(3, 0)
+int dev_vprintk_emit(int level, const struct device *dev,
+ const char *fmt, va_list args);
+extern __printf(3, 4)
+int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
+
+extern __printf(3, 4)
+void dev_printk(const char *level, const struct device *dev,
+ const char *fmt, ...);
+extern __printf(2, 3)
+void dev_emerg(const struct device *dev, const char *fmt, ...);
+extern __printf(2, 3)
+void dev_alert(const struct device *dev, const char *fmt, ...);
+extern __printf(2, 3)
+void dev_crit(const struct device *dev, const char *fmt, ...);
+extern __printf(2, 3)
+void dev_err(const struct device *dev, const char *fmt, ...);
+extern __printf(2, 3)
+void dev_warn(const struct device *dev, const char *fmt, ...);
+extern __printf(2, 3)
+void dev_notice(const struct device *dev, const char *fmt, ...);
+extern __printf(2, 3)
+void _dev_info(const struct device *dev, const char *fmt, ...);
+
+#else
+
+static inline __printf(3, 0)
+int dev_vprintk_emit(int level, const struct device *dev,
+ const char *fmt, va_list args)
+{ return 0; }
+static inline __printf(3, 4)
+int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
+{ return 0; }
+
+static inline void __dev_printk(const char *level, const struct device *dev,
+ struct va_format *vaf)
+{}
+static inline __printf(3, 4)
+void dev_printk(const char *level, const struct device *dev,
+ const char *fmt, ...)
+{}
+
+static inline __printf(2, 3)
+void dev_emerg(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void dev_crit(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void dev_alert(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void dev_err(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void dev_warn(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void dev_notice(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_info(const struct device *dev, const char *fmt, ...)
+{}
+
+#endif
+
+/*
+ * Stupid hackaround for existing uses of non-printk uses dev_info
+ *
+ * Note that the definition of dev_info below is actually _dev_info
+ * and a macro is used to avoid redefining dev_info
+ */
+
+#define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define dev_dbg(dev, format, ...) \
+do { \
+ dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
+} while (0)
+#elif defined(DEBUG)
+#define dev_dbg(dev, format, arg...) \
+ dev_printk(KERN_DEBUG, dev, format, ##arg)
+#else
+#define dev_dbg(dev, format, arg...) \
+({ \
+ if (0) \
+ dev_printk(KERN_DEBUG, dev, format, ##arg); \
+})
+#endif
+
+#ifdef CONFIG_PRINTK
+#define dev_level_once(dev_level, dev, fmt, ...) \
+do { \
+ static bool __print_once __read_mostly; \
+ \
+ if (!__print_once) { \
+ __print_once = true; \
+ dev_level(dev, fmt, ##__VA_ARGS__); \
+ } \
+} while (0)
+#else
+#define dev_level_once(dev_level, dev, fmt, ...) \
+do { \
+ if (0) \
+ dev_level(dev, fmt, ##__VA_ARGS__); \
+} while (0)
+#endif
+
+#define dev_emerg_once(dev, fmt, ...) \
+ dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
+#define dev_alert_once(dev, fmt, ...) \
+ dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
+#define dev_crit_once(dev, fmt, ...) \
+ dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
+#define dev_err_once(dev, fmt, ...) \
+ dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
+#define dev_warn_once(dev, fmt, ...) \
+ dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
+#define dev_notice_once(dev, fmt, ...) \
+ dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
+#define dev_info_once(dev, fmt, ...) \
+ dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
+#define dev_dbg_once(dev, fmt, ...) \
+ dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
+
+#define dev_level_ratelimited(dev_level, dev, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ if (__ratelimit(&_rs)) \
+ dev_level(dev, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define dev_emerg_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__)
+#define dev_alert_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__)
+#define dev_crit_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
+#define dev_err_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__)
+#define dev_warn_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__)
+#define dev_notice_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
+#define dev_info_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
+#if defined(CONFIG_DYNAMIC_DEBUG)
+/* descriptor check is first to prevent flooding with "callbacks suppressed" */
+#define dev_dbg_ratelimited(dev, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
+ __ratelimit(&_rs)) \
+ __dynamic_dev_dbg(&descriptor, dev, fmt, \
+ ##__VA_ARGS__); \
+} while (0)
+#elif defined(DEBUG)
+#define dev_dbg_ratelimited(dev, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ if (__ratelimit(&_rs)) \
+ dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
+} while (0)
+#else
+#define dev_dbg_ratelimited(dev, fmt, ...) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#endif
+
+#ifdef VERBOSE_DEBUG
+#define dev_vdbg dev_dbg
+#else
+#define dev_vdbg(dev, format, arg...) \
+({ \
+ if (0) \
+ dev_printk(KERN_DEBUG, dev, format, ##arg); \
+})
+#endif
+
+/*
+ * dev_WARN*() acts like dev_printk(), but with the key difference of
+ * using WARN/WARN_ONCE to include file/line information and a backtrace.
+ */
+#define dev_WARN(dev, format, arg...) \
+ WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
+
+#define dev_WARN_ONCE(dev, condition, format, arg...) \
+ WARN_ONCE(condition, "%s %s: " format, \
+ dev_driver_string(dev), dev_name(dev), ## arg)
+
+/* Create alias, so I can be autoloaded. */
+#define MODULE_ALIAS_CHARDEV(major,minor) \
+ MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
+#define MODULE_ALIAS_CHARDEV_MAJOR(major) \
+ MODULE_ALIAS("char-major-" __stringify(major) "-*")
+
+#ifdef CONFIG_SYSFS_DEPRECATED
+extern long sysfs_deprecated;
+#else
+#define sysfs_deprecated 0
+#endif
+
+/**
+ * module_driver() - Helper macro for drivers that don't do anything
+ * special in module init/exit. This eliminates a lot of boilerplate.
+ * Each module may only use this macro once, and calling it replaces
+ * module_init() and module_exit().
+ *
+ * @__driver: driver name
+ * @__register: register function for this driver type
+ * @__unregister: unregister function for this driver type
+ * @...: Additional arguments to be passed to __register and __unregister.
+ *
+ * Use this macro to construct bus specific macros for registering
+ * drivers, and do not use it on its own.
+ */
+#define module_driver(__driver, __register, __unregister, ...) \
+static int __init __driver##_init(void) \
+{ \
+ return __register(&(__driver) , ##__VA_ARGS__); \
+} \
+module_init(__driver##_init); \
+static void __exit __driver##_exit(void) \
+{ \
+ __unregister(&(__driver) , ##__VA_ARGS__); \
+} \
+module_exit(__driver##_exit);
+
+#endif /* _DEVICE_H_ */
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
new file mode 100644
index 000000000..8b64221b4
--- /dev/null
+++ b/include/linux/device_cgroup.h
@@ -0,0 +1,19 @@
+#include <linux/fs.h>
+
+#ifdef CONFIG_CGROUP_DEVICE
+extern int __devcgroup_inode_permission(struct inode *inode, int mask);
+extern int devcgroup_inode_mknod(int mode, dev_t dev);
+static inline int devcgroup_inode_permission(struct inode *inode, int mask)
+{
+ if (likely(!inode->i_rdev))
+ return 0;
+ if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
+ return 0;
+ return __devcgroup_inode_permission(inode, mask);
+}
+#else
+static inline int devcgroup_inode_permission(struct inode *inode, int mask)
+{ return 0; }
+static inline int devcgroup_inode_mknod(int mode, dev_t dev)
+{ return 0; }
+#endif
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
new file mode 100644
index 000000000..251a2090a
--- /dev/null
+++ b/include/linux/devpts_fs.h
@@ -0,0 +1,49 @@
+/* -*- linux-c -*- --------------------------------------------------------- *
+ *
+ * linux/include/linux/devpts_fs.h
+ *
+ * Copyright 1998-2004 H. Peter Anvin -- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * ------------------------------------------------------------------------- */
+
+#ifndef _LINUX_DEVPTS_FS_H
+#define _LINUX_DEVPTS_FS_H
+
+#include <linux/errno.h>
+
+#ifdef CONFIG_UNIX98_PTYS
+
+int devpts_new_index(struct inode *ptmx_inode);
+void devpts_kill_index(struct inode *ptmx_inode, int idx);
+/* mknod in devpts */
+struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
+ void *priv);
+/* get private structure */
+void *devpts_get_priv(struct inode *pts_inode);
+/* unlink */
+void devpts_pty_kill(struct inode *inode);
+
+#else
+
+/* Dummy stubs in the no-pty case */
+static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
+static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
+static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
+ dev_t device, int index, void *priv)
+{
+ return ERR_PTR(-EINVAL);
+}
+static inline void *devpts_get_priv(struct inode *pts_inode)
+{
+ return NULL;
+}
+static inline void devpts_pty_kill(struct inode *inode) { }
+
+#endif
+
+
+#endif /* _LINUX_DEVPTS_FS_H */
diff --git a/include/linux/digsig.h b/include/linux/digsig.h
new file mode 100644
index 000000000..6f85a070b
--- /dev/null
+++ b/include/linux/digsig.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2011 Nokia Corporation
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * Author:
+ * Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
+ * <dmitry.kasatkin@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ */
+
+#ifndef _DIGSIG_H
+#define _DIGSIG_H
+
+#include <linux/key.h>
+
+enum pubkey_algo {
+ PUBKEY_ALGO_RSA,
+ PUBKEY_ALGO_MAX,
+};
+
+enum digest_algo {
+ DIGEST_ALGO_SHA1,
+ DIGEST_ALGO_SHA256,
+ DIGEST_ALGO_MAX
+};
+
+struct pubkey_hdr {
+ uint8_t version; /* key format version */
+ uint32_t timestamp; /* key made, always 0 for now */
+ uint8_t algo;
+ uint8_t nmpi;
+ char mpi[0];
+} __packed;
+
+struct signature_hdr {
+ uint8_t version; /* signature format version */
+ uint32_t timestamp; /* signature made */
+ uint8_t algo;
+ uint8_t hash;
+ uint8_t keyid[8];
+ uint8_t nmpi;
+ char mpi[0];
+} __packed;
+
+#if defined(CONFIG_SIGNATURE) || defined(CONFIG_SIGNATURE_MODULE)
+
+int digsig_verify(struct key *keyring, const char *sig, int siglen,
+ const char *digest, int digestlen);
+
+#else
+
+static inline int digsig_verify(struct key *keyring, const char *sig,
+ int siglen, const char *digest, int digestlen)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_SIGNATURE */
+
+#endif /* _DIGSIG_H */
diff --git a/include/linux/dio.h b/include/linux/dio.h
new file mode 100644
index 000000000..2cc0fd004
--- /dev/null
+++ b/include/linux/dio.h
@@ -0,0 +1,280 @@
+/* header file for DIO boards for the HP300 architecture.
+ * Maybe this should handle DIO-II later?
+ * The general structure of this is vaguely based on how
+ * the Amiga port handles Zorro boards.
+ * Copyright (C) Peter Maydell 05/1998 <pmaydell@chiark.greenend.org.uk>
+ * Converted to driver model Jochen Friedrich <jochen@scram.de>
+ *
+ * The board IDs are from the NetBSD kernel, which for once provided
+ * helpful comments...
+ *
+ * This goes with drivers/dio/dio.c
+ */
+
+#ifndef _LINUX_DIO_H
+#define _LINUX_DIO_H
+
+/* The DIO boards in a system are distinguished by 'select codes' which
+ * range from 0-63 (DIO) and 132-255 (DIO-II).
+ * The DIO board with select code sc is located at physical address
+ * 0x600000 + sc * 0x10000
+ * So DIO cards cover [0x600000-0x800000); the areas [0x200000-0x400000) and
+ * [0x800000-0x1000000) are for additional space required by things
+ * like framebuffers. [0x400000-0x600000) is for miscellaneous internal I/O.
+ * On Linux, this is currently all mapped into the virtual address space
+ * at 0xf0000000 on bootup.
+ * DIO-II boards are at 0x1000000 + (sc - 132) * 0x400000
+ * which is address range [0x1000000-0x20000000) -- too big to map completely,
+ * so currently we just don't handle DIO-II boards. It wouldn't be hard to
+ * do with ioremap() though.
+ */
+
+#include <linux/device.h>
+
+#ifdef __KERNEL__
+
+#include <asm/hp300hw.h>
+
+typedef __u16 dio_id;
+
+ /*
+ * DIO devices
+ */
+
+struct dio_dev {
+ struct dio_bus *bus;
+ dio_id id;
+ int scode;
+ struct dio_driver *driver; /* which driver has allocated this device */
+ struct device dev; /* Generic device interface */
+ u8 ipl;
+ char name[64];
+ struct resource resource;
+};
+
+#define to_dio_dev(n) container_of(n, struct dio_dev, dev)
+
+ /*
+ * DIO bus
+ */
+
+struct dio_bus {
+ struct list_head devices; /* list of devices on this bus */
+ unsigned int num_resources; /* number of resources */
+ struct resource resources[2]; /* address space routed to this bus */
+ struct device dev;
+ char name[10];
+};
+
+extern struct dio_bus dio_bus; /* Single DIO bus */
+extern struct bus_type dio_bus_type;
+
+ /*
+ * DIO device IDs
+ */
+
+struct dio_device_id {
+ dio_id id; /* Device ID or DIO_WILDCARD */
+ unsigned long driver_data; /* Data private to the driver */
+};
+
+ /*
+ * DIO device drivers
+ */
+
+struct dio_driver {
+ struct list_head node;
+ char *name;
+ const struct dio_device_id *id_table; /* NULL if wants all devices */
+ int (*probe)(struct dio_dev *z, const struct dio_device_id *id);
+/* New device inserted */
+ void (*remove)(struct dio_dev *z); /* Device removed (NULL if not a hot-plug capable driver) */
+ struct device_driver driver;
+};
+
+#define to_dio_driver(drv) container_of(drv, struct dio_driver, driver)
+
+/* DIO/DIO-II boards all have the following 8bit registers.
+ * These are offsets from the base of the device.
+ */
+#define DIO_IDOFF 0x01 /* primary device ID */
+#define DIO_IPLOFF 0x03 /* interrupt priority level */
+#define DIO_SECIDOFF 0x15 /* secondary device ID */
+#define DIOII_SIZEOFF 0x101 /* device size, DIO-II only */
+#define DIO_VIRADDRBASE 0xf0000000UL /* vir addr where IOspace is mapped */
+
+#define DIO_BASE 0x600000 /* start of DIO space */
+#define DIO_END 0x1000000 /* end of DIO space */
+#define DIO_DEVSIZE 0x10000 /* size of a DIO device */
+
+#define DIOII_BASE 0x01000000 /* start of DIO-II space */
+#define DIOII_END 0x20000000 /* end of DIO-II space */
+#define DIOII_DEVSIZE 0x00400000 /* size of a DIO-II device */
+
+/* Highest valid select code. If we add DIO-II support this should become
+ * 256 for everything except HP320, which only has DIO.
+ */
+#define DIO_SCMAX (hp300_model == HP_320 ? 32 : 256)
+#define DIOII_SCBASE 132 /* lowest DIO-II select code */
+#define DIO_SCINHOLE(scode) (((scode) >= 32) && ((scode) < DIOII_SCBASE))
+#define DIO_ISDIOII(scode) ((scode) >= 132 && (scode) < 256)
+
+/* macros to read device IDs, given base address */
+#define DIO_ID(baseaddr) in_8((baseaddr) + DIO_IDOFF)
+#define DIO_SECID(baseaddr) in_8((baseaddr) + DIO_SECIDOFF)
+
+/* extract the interrupt level */
+#define DIO_IPL(baseaddr) (((in_8((baseaddr) + DIO_IPLOFF) >> 4) & 0x03) + 3)
+
+/* find the size of a DIO-II board's address space.
+ * DIO boards are all fixed length.
+ */
+#define DIOII_SIZE(baseaddr) ((in_8((baseaddr) + DIOII_SIZEOFF) + 1) * 0x100000)
+
+/* general purpose macro for both DIO and DIO-II */
+#define DIO_SIZE(scode, base) (DIO_ISDIOII((scode)) ? DIOII_SIZE((base)) : DIO_DEVSIZE)
+
+/* The hardware has primary and secondary IDs; we encode these in a single
+ * int as PRIMARY ID & (SECONDARY ID << 8).
+ * In practice this is only important for framebuffers,
+ * and everybody else just sets ID fields equal to the DIO_ID_FOO value.
+ */
+#define DIO_ENCODE_ID(pr,sec) ((((int)sec & 0xff) << 8) | ((int)pr & 0xff))
+/* macro to determine whether a given primary ID requires a secondary ID byte */
+#define DIO_NEEDSSECID(id) ((id) == DIO_ID_FBUFFER)
+#define DIO_WILDCARD 0xff
+
+/* Now a whole slew of macros giving device IDs and descriptive strings: */
+#define DIO_ID_DCA0 0x02 /* 98644A serial */
+#define DIO_DESC_DCA0 "98644A DCA0 serial"
+#define DIO_ID_DCA0REM 0x82 /* 98644A serial */
+#define DIO_DESC_DCA0REM "98644A DCA0REM serial"
+#define DIO_ID_DCA1 0x42 /* 98644A serial */
+#define DIO_DESC_DCA1 "98644A DCA1 serial"
+#define DIO_ID_DCA1REM 0xc2 /* 98644A serial */
+#define DIO_DESC_DCA1REM "98644A DCA1REM serial"
+#define DIO_ID_DCM 0x05 /* 98642A serial MUX */
+#define DIO_DESC_DCM "98642A DCM serial MUX"
+#define DIO_ID_DCMREM 0x85 /* 98642A serial MUX */
+#define DIO_DESC_DCMREM "98642A DCMREM serial MUX"
+#define DIO_ID_LAN 0x15 /* 98643A LAN */
+#define DIO_DESC_LAN "98643A LANCE ethernet"
+#define DIO_ID_FHPIB 0x08 /* 98625A/98625B fast HP-IB */
+#define DIO_DESC_FHPIB "98625A/98625B fast HPIB"
+#define DIO_ID_NHPIB 0x01 /* 98624A HP-IB (normal ie slow) */
+#define DIO_DESC_NHPIB "98624A HPIB"
+#define DIO_ID_SCSI0 0x07 /* 98265A SCSI */
+#define DIO_DESC_SCSI0 "98265A SCSI0"
+#define DIO_ID_SCSI1 0x27 /* ditto */
+#define DIO_DESC_SCSI1 "98265A SCSI1"
+#define DIO_ID_SCSI2 0x47 /* ditto */
+#define DIO_DESC_SCSI2 "98265A SCSI2"
+#define DIO_ID_SCSI3 0x67 /* ditto */
+#define DIO_DESC_SCSI3 "98265A SCSI3"
+#define DIO_ID_FBUFFER 0x39 /* framebuffer: flavour is distinguished by secondary ID */
+#define DIO_DESC_FBUFFER "bitmapped display"
+/* the NetBSD kernel source is a bit unsure as to what these next IDs actually do :-> */
+#define DIO_ID_MISC0 0x03 /* 98622A */
+#define DIO_DESC_MISC0 "98622A"
+#define DIO_ID_MISC1 0x04 /* 98623A */
+#define DIO_DESC_MISC1 "98623A"
+#define DIO_ID_PARALLEL 0x06 /* internal parallel */
+#define DIO_DESC_PARALLEL "internal parallel"
+#define DIO_ID_MISC2 0x09 /* 98287A keyboard */
+#define DIO_DESC_MISC2 "98287A keyboard"
+#define DIO_ID_MISC3 0x0a /* HP98635A FP accelerator */
+#define DIO_DESC_MISC3 "HP98635A FP accelerator"
+#define DIO_ID_MISC4 0x0b /* timer */
+#define DIO_DESC_MISC4 "timer"
+#define DIO_ID_MISC5 0x12 /* 98640A */
+#define DIO_DESC_MISC5 "98640A"
+#define DIO_ID_MISC6 0x16 /* 98659A */
+#define DIO_DESC_MISC6 "98659A"
+#define DIO_ID_MISC7 0x19 /* 237 display */
+#define DIO_DESC_MISC7 "237 display"
+#define DIO_ID_MISC8 0x1a /* quad-wide card */
+#define DIO_DESC_MISC8 "quad-wide card"
+#define DIO_ID_MISC9 0x1b /* 98253A */
+#define DIO_DESC_MISC9 "98253A"
+#define DIO_ID_MISC10 0x1c /* 98627A */
+#define DIO_DESC_MISC10 "98253A"
+#define DIO_ID_MISC11 0x1d /* 98633A */
+#define DIO_DESC_MISC11 "98633A"
+#define DIO_ID_MISC12 0x1e /* 98259A */
+#define DIO_DESC_MISC12 "98259A"
+#define DIO_ID_MISC13 0x1f /* 8741 */
+#define DIO_DESC_MISC13 "8741"
+#define DIO_ID_VME 0x31 /* 98577A VME adapter */
+#define DIO_DESC_VME "98577A VME adapter"
+#define DIO_ID_DCL 0x34 /* 98628A serial */
+#define DIO_DESC_DCL "98628A DCL serial"
+#define DIO_ID_DCLREM 0xb4 /* 98628A serial */
+#define DIO_DESC_DCLREM "98628A DCLREM serial"
+/* These are the secondary IDs for the framebuffers */
+#define DIO_ID2_GATORBOX 0x01 /* 98700/98710 "gatorbox" */
+#define DIO_DESC2_GATORBOX "98700/98710 \"gatorbox\" display"
+#define DIO_ID2_TOPCAT 0x02 /* 98544/98545/98547 "topcat" */
+#define DIO_DESC2_TOPCAT "98544/98545/98547 \"topcat\" display"
+#define DIO_ID2_RENAISSANCE 0x04 /* 98720/98721 "renaissance" */
+#define DIO_DESC2_RENAISSANCE "98720/98721 \"renaissance\" display"
+#define DIO_ID2_LRCATSEYE 0x05 /* lowres "catseye" */
+#define DIO_DESC2_LRCATSEYE "low-res catseye display"
+#define DIO_ID2_HRCCATSEYE 0x06 /* highres colour "catseye" */
+#define DIO_DESC2_HRCCATSEYE "high-res color catseye display"
+#define DIO_ID2_HRMCATSEYE 0x07 /* highres mono "catseye" */
+#define DIO_DESC2_HRMCATSEYE "high-res mono catseye display"
+#define DIO_ID2_DAVINCI 0x08 /* 98730/98731 "davinci" */
+#define DIO_DESC2_DAVINCI "98730/98731 \"davinci\" display"
+#define DIO_ID2_XXXCATSEYE 0x09 /* "catseye" */
+#define DIO_DESC2_XXXCATSEYE "catseye display"
+#define DIO_ID2_HYPERION 0x0e /* A1096A "hyperion" */
+#define DIO_DESC2_HYPERION "A1096A \"hyperion\" display"
+#define DIO_ID2_XGENESIS 0x0b /* "x-genesis"; no NetBSD support */
+#define DIO_DESC2_XGENESIS "\"x-genesis\" display"
+#define DIO_ID2_TIGER 0x0c /* "tiger"; no NetBSD support */
+#define DIO_DESC2_TIGER "\"tiger\" display"
+#define DIO_ID2_YGENESIS 0x0d /* "y-genesis"; no NetBSD support */
+#define DIO_DESC2_YGENESIS "\"y-genesis\" display"
+/* if you add new IDs then you should tell dio.c about them so it can
+ * identify them...
+ */
+
+extern int dio_find(int deviceid);
+extern unsigned long dio_scodetophysaddr(int scode);
+extern int dio_create_sysfs_dev_files(struct dio_dev *);
+
+/* New-style probing */
+extern int dio_register_driver(struct dio_driver *);
+extern void dio_unregister_driver(struct dio_driver *);
+extern const struct dio_device_id *dio_match_device(const struct dio_device_id *ids, const struct dio_dev *z);
+static inline struct dio_driver *dio_dev_driver(const struct dio_dev *d)
+{
+ return d->driver;
+}
+
+#define dio_resource_start(d) ((d)->resource.start)
+#define dio_resource_end(d) ((d)->resource.end)
+#define dio_resource_len(d) (resource_size(&(d)->resource))
+#define dio_resource_flags(d) ((d)->resource.flags)
+
+#define dio_request_device(d, name) \
+ request_mem_region(dio_resource_start(d), dio_resource_len(d), name)
+#define dio_release_device(d) \
+ release_mem_region(dio_resource_start(d), dio_resource_len(d))
+
+/* Similar to the helpers above, these manipulate per-dio_dev
+ * driver-specific data. They are really just a wrapper around
+ * the generic device structure functions of these calls.
+ */
+static inline void *dio_get_drvdata (struct dio_dev *d)
+{
+ return dev_get_drvdata(&d->dev);
+}
+
+static inline void dio_set_drvdata (struct dio_dev *d, void *data)
+{
+ dev_set_drvdata(&d->dev, data);
+}
+
+#endif /* __KERNEL__ */
+#endif /* ndef _LINUX_DIO_H */
diff --git a/include/linux/dirent.h b/include/linux/dirent.h
new file mode 100644
index 000000000..f072fb8d1
--- /dev/null
+++ b/include/linux/dirent.h
@@ -0,0 +1,12 @@
+#ifndef _LINUX_DIRENT_H
+#define _LINUX_DIRENT_H
+
+struct linux_dirent64 {
+ u64 d_ino;
+ s64 d_off;
+ unsigned short d_reclen;
+ unsigned char d_type;
+ char d_name[0];
+};
+
+#endif
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
new file mode 100644
index 000000000..d02da2c6f
--- /dev/null
+++ b/include/linux/dlm.h
@@ -0,0 +1,172 @@
+/******************************************************************************
+*******************************************************************************
+**
+** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
+** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
+**
+** This copyrighted material is made available to anyone wishing to use,
+** modify, copy, or redistribute it subject to the terms and conditions
+** of the GNU General Public License v.2.
+**
+*******************************************************************************
+******************************************************************************/
+#ifndef __DLM_DOT_H__
+#define __DLM_DOT_H__
+
+#include <uapi/linux/dlm.h>
+
+
+struct dlm_slot {
+ int nodeid; /* 1 to MAX_INT */
+ int slot; /* 1 to MAX_INT */
+};
+
+/*
+ * recover_prep: called before the dlm begins lock recovery.
+ * Notfies lockspace user that locks from failed members will be granted.
+ * recover_slot: called after recover_prep and before recover_done.
+ * Identifies a failed lockspace member.
+ * recover_done: called after the dlm completes lock recovery.
+ * Identifies lockspace members and lockspace generation number.
+ */
+
+struct dlm_lockspace_ops {
+ void (*recover_prep) (void *ops_arg);
+ void (*recover_slot) (void *ops_arg, struct dlm_slot *slot);
+ void (*recover_done) (void *ops_arg, struct dlm_slot *slots,
+ int num_slots, int our_slot, uint32_t generation);
+};
+
+/*
+ * dlm_new_lockspace
+ *
+ * Create/join a lockspace.
+ *
+ * name: lockspace name, null terminated, up to DLM_LOCKSPACE_LEN (not
+ * including terminating null).
+ *
+ * cluster: cluster name, null terminated, up to DLM_LOCKSPACE_LEN (not
+ * including terminating null). Optional. When cluster is null, it
+ * is not used. When set, dlm_new_lockspace() returns -EBADR if cluster
+ * is not equal to the dlm cluster name.
+ *
+ * flags:
+ * DLM_LSFL_NODIR
+ * The dlm should not use a resource directory, but statically assign
+ * resource mastery to nodes based on the name hash that is otherwise
+ * used to select the directory node. Must be the same on all nodes.
+ * DLM_LSFL_TIMEWARN
+ * The dlm should emit netlink messages if locks have been waiting
+ * for a configurable amount of time. (Unused.)
+ * DLM_LSFL_FS
+ * The lockspace user is in the kernel (i.e. filesystem). Enables
+ * direct bast/cast callbacks.
+ * DLM_LSFL_NEWEXCL
+ * dlm_new_lockspace() should return -EEXIST if the lockspace exists.
+ *
+ * lvblen: length of lvb in bytes. Must be multiple of 8.
+ * dlm_new_lockspace() returns an error if this does not match
+ * what other nodes are using.
+ *
+ * ops: callbacks that indicate lockspace recovery points so the
+ * caller can coordinate its recovery and know lockspace members.
+ * This is only used by the initial dlm_new_lockspace() call.
+ * Optional.
+ *
+ * ops_arg: arg for ops callbacks.
+ *
+ * ops_result: tells caller if the ops callbacks (if provided) will
+ * be used or not. 0: will be used, -EXXX will not be used.
+ * -EOPNOTSUPP: the dlm does not have recovery_callbacks enabled.
+ *
+ * lockspace: handle for dlm functions
+ */
+
+int dlm_new_lockspace(const char *name, const char *cluster,
+ uint32_t flags, int lvblen,
+ const struct dlm_lockspace_ops *ops, void *ops_arg,
+ int *ops_result, dlm_lockspace_t **lockspace);
+
+/*
+ * dlm_release_lockspace
+ *
+ * Stop a lockspace.
+ */
+
+int dlm_release_lockspace(dlm_lockspace_t *lockspace, int force);
+
+/*
+ * dlm_lock
+ *
+ * Make an asynchronous request to acquire or convert a lock on a named
+ * resource.
+ *
+ * lockspace: context for the request
+ * mode: the requested mode of the lock (DLM_LOCK_)
+ * lksb: lock status block for input and async return values
+ * flags: input flags (DLM_LKF_)
+ * name: name of the resource to lock, can be binary
+ * namelen: the length in bytes of the resource name (MAX_RESNAME_LEN)
+ * parent: the lock ID of a parent lock or 0 if none
+ * lockast: function DLM executes when it completes processing the request
+ * astarg: argument passed to lockast and bast functions
+ * bast: function DLM executes when this lock later blocks another request
+ *
+ * Returns:
+ * 0 if request is successfully queued for processing
+ * -EINVAL if any input parameters are invalid
+ * -EAGAIN if request would block and is flagged DLM_LKF_NOQUEUE
+ * -ENOMEM if there is no memory to process request
+ * -ENOTCONN if there is a communication error
+ *
+ * If the call to dlm_lock returns an error then the operation has failed and
+ * the AST routine will not be called. If dlm_lock returns 0 it is still
+ * possible that the lock operation will fail. The AST routine will be called
+ * when the locking is complete and the status is returned in the lksb.
+ *
+ * If the AST routines or parameter are passed to a conversion operation then
+ * they will overwrite those values that were passed to a previous dlm_lock
+ * call.
+ *
+ * AST routines should not block (at least not for long), but may make
+ * any locking calls they please.
+ */
+
+int dlm_lock(dlm_lockspace_t *lockspace,
+ int mode,
+ struct dlm_lksb *lksb,
+ uint32_t flags,
+ void *name,
+ unsigned int namelen,
+ uint32_t parent_lkid,
+ void (*lockast) (void *astarg),
+ void *astarg,
+ void (*bast) (void *astarg, int mode));
+
+/*
+ * dlm_unlock
+ *
+ * Asynchronously release a lock on a resource. The AST routine is called
+ * when the resource is successfully unlocked.
+ *
+ * lockspace: context for the request
+ * lkid: the lock ID as returned in the lksb
+ * flags: input flags (DLM_LKF_)
+ * lksb: if NULL the lksb parameter passed to last lock request is used
+ * astarg: the arg used with the completion ast for the unlock
+ *
+ * Returns:
+ * 0 if request is successfully queued for processing
+ * -EINVAL if any input parameters are invalid
+ * -ENOTEMPTY if the lock still has sublocks
+ * -EBUSY if the lock is waiting for a remote lock operation
+ * -ENOTCONN if there is a communication error
+ */
+
+int dlm_unlock(dlm_lockspace_t *lockspace,
+ uint32_t lkid,
+ uint32_t flags,
+ struct dlm_lksb *lksb,
+ void *astarg);
+
+#endif /* __DLM_DOT_H__ */
diff --git a/include/linux/dlm_plock.h b/include/linux/dlm_plock.h
new file mode 100644
index 000000000..95ad387a7
--- /dev/null
+++ b/include/linux/dlm_plock.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ */
+#ifndef __DLM_PLOCK_DOT_H__
+#define __DLM_PLOCK_DOT_H__
+
+#include <uapi/linux/dlm_plock.h>
+
+int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ int cmd, struct file_lock *fl);
+int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ struct file_lock *fl);
+int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ struct file_lock *fl);
+#endif
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
new file mode 100644
index 000000000..7084503c3
--- /dev/null
+++ b/include/linux/dm-dirty-log.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2003 Sistina Software
+ * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
+ *
+ * Device-Mapper dirty region log.
+ *
+ * This file is released under the LGPL.
+ */
+
+#ifndef _LINUX_DM_DIRTY_LOG
+#define _LINUX_DM_DIRTY_LOG
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+#include <linux/device-mapper.h>
+
+typedef sector_t region_t;
+
+struct dm_dirty_log_type;
+
+struct dm_dirty_log {
+ struct dm_dirty_log_type *type;
+ int (*flush_callback_fn)(struct dm_target *ti);
+ void *context;
+};
+
+struct dm_dirty_log_type {
+ const char *name;
+ struct module *module;
+
+ /* For internal device-mapper use */
+ struct list_head list;
+
+ int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
+ unsigned argc, char **argv);
+ void (*dtr)(struct dm_dirty_log *log);
+
+ /*
+ * There are times when we don't want the log to touch
+ * the disk.
+ */
+ int (*presuspend)(struct dm_dirty_log *log);
+ int (*postsuspend)(struct dm_dirty_log *log);
+ int (*resume)(struct dm_dirty_log *log);
+
+ /*
+ * Retrieves the smallest size of region that the log can
+ * deal with.
+ */
+ uint32_t (*get_region_size)(struct dm_dirty_log *log);
+
+ /*
+ * A predicate to say whether a region is clean or not.
+ * May block.
+ */
+ int (*is_clean)(struct dm_dirty_log *log, region_t region);
+
+ /*
+ * Returns: 0, 1, -EWOULDBLOCK, < 0
+ *
+ * A predicate function to check the area given by
+ * [sector, sector + len) is in sync.
+ *
+ * If -EWOULDBLOCK is returned the state of the region is
+ * unknown, typically this will result in a read being
+ * passed to a daemon to deal with, since a daemon is
+ * allowed to block.
+ */
+ int (*in_sync)(struct dm_dirty_log *log, region_t region,
+ int can_block);
+
+ /*
+ * Flush the current log state (eg, to disk). This
+ * function may block.
+ */
+ int (*flush)(struct dm_dirty_log *log);
+
+ /*
+ * Mark an area as clean or dirty. These functions may
+ * block, though for performance reasons blocking should
+ * be extremely rare (eg, allocating another chunk of
+ * memory for some reason).
+ */
+ void (*mark_region)(struct dm_dirty_log *log, region_t region);
+ void (*clear_region)(struct dm_dirty_log *log, region_t region);
+
+ /*
+ * Returns: <0 (error), 0 (no region), 1 (region)
+ *
+ * The mirrord will need perform recovery on regions of
+ * the mirror that are in the NOSYNC state. This
+ * function asks the log to tell the caller about the
+ * next region that this machine should recover.
+ *
+ * Do not confuse this function with 'in_sync()', one
+ * tells you if an area is synchronised, the other
+ * assigns recovery work.
+ */
+ int (*get_resync_work)(struct dm_dirty_log *log, region_t *region);
+
+ /*
+ * This notifies the log that the resync status of a region
+ * has changed. It also clears the region from the recovering
+ * list (if present).
+ */
+ void (*set_region_sync)(struct dm_dirty_log *log,
+ region_t region, int in_sync);
+
+ /*
+ * Returns the number of regions that are in sync.
+ */
+ region_t (*get_sync_count)(struct dm_dirty_log *log);
+
+ /*
+ * Support function for mirror status requests.
+ */
+ int (*status)(struct dm_dirty_log *log, status_type_t status_type,
+ char *result, unsigned maxlen);
+
+ /*
+ * is_remote_recovering is necessary for cluster mirroring. It provides
+ * a way to detect recovery on another node, so we aren't writing
+ * concurrently. This function is likely to block (when a cluster log
+ * is used).
+ *
+ * Returns: 0, 1
+ */
+ int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region);
+};
+
+int dm_dirty_log_type_register(struct dm_dirty_log_type *type);
+int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type);
+
+/*
+ * Make sure you use these two functions, rather than calling
+ * type->constructor/destructor() directly.
+ */
+struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
+ struct dm_target *ti,
+ int (*flush_callback_fn)(struct dm_target *ti),
+ unsigned argc, char **argv);
+void dm_dirty_log_destroy(struct dm_dirty_log *log);
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_DM_DIRTY_LOG_H */
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
new file mode 100644
index 000000000..a68cbe59e
--- /dev/null
+++ b/include/linux/dm-io.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2003 Sistina Software
+ * Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved.
+ *
+ * Device-Mapper low-level I/O.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef _LINUX_DM_IO_H
+#define _LINUX_DM_IO_H
+
+#ifdef __KERNEL__
+
+#include <linux/types.h>
+
+struct dm_io_region {
+ struct block_device *bdev;
+ sector_t sector;
+ sector_t count; /* If this is zero the region is ignored. */
+};
+
+struct page_list {
+ struct page_list *next;
+ struct page *page;
+};
+
+typedef void (*io_notify_fn)(unsigned long error, void *context);
+
+enum dm_io_mem_type {
+ DM_IO_PAGE_LIST,/* Page list */
+ DM_IO_BIO, /* Bio vector */
+ DM_IO_VMA, /* Virtual memory area */
+ DM_IO_KMEM, /* Kernel memory */
+};
+
+struct dm_io_memory {
+ enum dm_io_mem_type type;
+
+ unsigned offset;
+
+ union {
+ struct page_list *pl;
+ struct bio *bio;
+ void *vma;
+ void *addr;
+ } ptr;
+};
+
+struct dm_io_notify {
+ io_notify_fn fn; /* Callback for asynchronous requests */
+ void *context; /* Passed to callback */
+};
+
+/*
+ * IO request structure
+ */
+struct dm_io_client;
+struct dm_io_request {
+ int bi_rw; /* READ|WRITE - not READA */
+ struct dm_io_memory mem; /* Memory to use for io */
+ struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
+ struct dm_io_client *client; /* Client memory handler */
+};
+
+/*
+ * For async io calls, users can alternatively use the dm_io() function below
+ * and dm_io_client_create() to create private mempools for the client.
+ *
+ * Create/destroy may block.
+ */
+struct dm_io_client *dm_io_client_create(void);
+void dm_io_client_destroy(struct dm_io_client *client);
+
+/*
+ * IO interface using private per-client pools.
+ * Each bit in the optional 'sync_error_bits' bitset indicates whether an
+ * error occurred doing io to the corresponding region.
+ */
+int dm_io(struct dm_io_request *io_req, unsigned num_regions,
+ struct dm_io_region *region, unsigned long *sync_error_bits);
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_DM_IO_H */
diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h
new file mode 100644
index 000000000..f486d636b
--- /dev/null
+++ b/include/linux/dm-kcopyd.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2001 - 2003 Sistina Software
+ * Copyright (C) 2004 - 2008 Red Hat, Inc. All rights reserved.
+ *
+ * kcopyd provides a simple interface for copying an area of one
+ * block-device to one or more other block-devices, either synchronous
+ * or with an asynchronous completion notification.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef _LINUX_DM_KCOPYD_H
+#define _LINUX_DM_KCOPYD_H
+
+#ifdef __KERNEL__
+
+#include <linux/dm-io.h>
+
+/* FIXME: make this configurable */
+#define DM_KCOPYD_MAX_REGIONS 8
+
+#define DM_KCOPYD_IGNORE_ERROR 1
+
+struct dm_kcopyd_throttle {
+ unsigned throttle;
+ unsigned num_io_jobs;
+ unsigned io_period;
+ unsigned total_period;
+ unsigned last_jiffies;
+};
+
+/*
+ * kcopyd clients that want to support throttling must pass an initialised
+ * dm_kcopyd_throttle struct into dm_kcopyd_client_create().
+ * Two or more clients may share the same instance of this struct between
+ * them if they wish to be throttled as a group.
+ *
+ * This macro also creates a corresponding module parameter to configure
+ * the amount of throttling.
+ */
+#define DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(name, description) \
+static struct dm_kcopyd_throttle dm_kcopyd_throttle = { 100, 0, 0, 0, 0 }; \
+module_param_named(name, dm_kcopyd_throttle.throttle, uint, 0644); \
+MODULE_PARM_DESC(name, description)
+
+/*
+ * To use kcopyd you must first create a dm_kcopyd_client object.
+ * throttle can be NULL if you don't want any throttling.
+ */
+struct dm_kcopyd_client;
+struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle);
+void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
+
+/*
+ * Submit a copy job to kcopyd. This is built on top of the
+ * previous three fns.
+ *
+ * read_err is a boolean,
+ * write_err is a bitset, with 1 bit for each destination region
+ */
+typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned long write_err,
+ void *context);
+
+int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
+ unsigned num_dests, struct dm_io_region *dests,
+ unsigned flags, dm_kcopyd_notify_fn fn, void *context);
+
+/*
+ * Prepare a callback and submit it via the kcopyd thread.
+ *
+ * dm_kcopyd_prepare_callback allocates a callback structure and returns it.
+ * It must not be called from interrupt context.
+ * The returned value should be passed into dm_kcopyd_do_callback.
+ *
+ * dm_kcopyd_do_callback submits the callback.
+ * It may be called from interrupt context.
+ * The callback is issued from the kcopyd thread.
+ */
+void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
+ dm_kcopyd_notify_fn fn, void *context);
+void dm_kcopyd_do_callback(void *job, int read_err, unsigned long write_err);
+
+int dm_kcopyd_zero(struct dm_kcopyd_client *kc,
+ unsigned num_dests, struct dm_io_region *dests,
+ unsigned flags, dm_kcopyd_notify_fn fn, void *context);
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_DM_KCOPYD_H */
diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h
new file mode 100644
index 000000000..9e2a7a401
--- /dev/null
+++ b/include/linux/dm-region-hash.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2003 Sistina Software Limited.
+ * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
+ *
+ * Device-Mapper dirty region hash interface.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_REGION_HASH_H
+#define DM_REGION_HASH_H
+
+#include <linux/dm-dirty-log.h>
+
+/*-----------------------------------------------------------------
+ * Region hash
+ *----------------------------------------------------------------*/
+struct dm_region_hash;
+struct dm_region;
+
+/*
+ * States a region can have.
+ */
+enum dm_rh_region_states {
+ DM_RH_CLEAN = 0x01, /* No writes in flight. */
+ DM_RH_DIRTY = 0x02, /* Writes in flight. */
+ DM_RH_NOSYNC = 0x04, /* Out of sync. */
+ DM_RH_RECOVERING = 0x08, /* Under resynchronization. */
+};
+
+/*
+ * Region hash create/destroy.
+ */
+struct bio_list;
+struct dm_region_hash *dm_region_hash_create(
+ void *context, void (*dispatch_bios)(void *context,
+ struct bio_list *bios),
+ void (*wakeup_workers)(void *context),
+ void (*wakeup_all_recovery_waiters)(void *context),
+ sector_t target_begin, unsigned max_recovery,
+ struct dm_dirty_log *log, uint32_t region_size,
+ region_t nr_regions);
+void dm_region_hash_destroy(struct dm_region_hash *rh);
+
+struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh);
+
+/*
+ * Conversion functions.
+ */
+region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio);
+sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region);
+void *dm_rh_region_context(struct dm_region *reg);
+
+/*
+ * Get region size and key (ie. number of the region).
+ */
+sector_t dm_rh_get_region_size(struct dm_region_hash *rh);
+region_t dm_rh_get_region_key(struct dm_region *reg);
+
+/*
+ * Get/set/update region state (and dirty log).
+ *
+ */
+int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block);
+void dm_rh_set_state(struct dm_region_hash *rh, region_t region,
+ enum dm_rh_region_states state, int may_block);
+
+/* Non-zero errors_handled leaves the state of the region NOSYNC */
+void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled);
+
+/* Flush the region hash and dirty log. */
+int dm_rh_flush(struct dm_region_hash *rh);
+
+/* Inc/dec pending count on regions. */
+void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios);
+void dm_rh_dec(struct dm_region_hash *rh, region_t region);
+
+/* Delay bios on regions. */
+void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio);
+
+void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio);
+
+/*
+ * Region recovery control.
+ */
+
+/* Prepare some regions for recovery by starting to quiesce them. */
+void dm_rh_recovery_prepare(struct dm_region_hash *rh);
+
+/* Try fetching a quiesced region for recovery. */
+struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh);
+
+/* Report recovery end on a region. */
+void dm_rh_recovery_end(struct dm_region *reg, int error);
+
+/* Returns number of regions with recovery work outstanding. */
+int dm_rh_recovery_in_flight(struct dm_region_hash *rh);
+
+/* Start/stop recovery. */
+void dm_rh_start_recovery(struct dm_region_hash *rh);
+void dm_rh_stop_recovery(struct dm_region_hash *rh);
+
+#endif /* DM_REGION_HASH_H */
diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h
new file mode 100644
index 000000000..841925fbf
--- /dev/null
+++ b/include/linux/dm9000.h
@@ -0,0 +1,42 @@
+/* include/linux/dm9000.h
+ *
+ * Copyright (c) 2004 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * Header file for dm9000 platform data
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+
+#ifndef __DM9000_PLATFORM_DATA
+#define __DM9000_PLATFORM_DATA __FILE__
+
+#include <linux/if_ether.h>
+
+/* IO control flags */
+
+#define DM9000_PLATF_8BITONLY (0x0001)
+#define DM9000_PLATF_16BITONLY (0x0002)
+#define DM9000_PLATF_32BITONLY (0x0004)
+#define DM9000_PLATF_EXT_PHY (0x0008)
+#define DM9000_PLATF_NO_EEPROM (0x0010)
+#define DM9000_PLATF_SIMPLE_PHY (0x0020) /* Use NSR to find LinkStatus */
+
+/* platform data for platform device structure's platform_data field */
+
+struct dm9000_plat_data {
+ unsigned int flags;
+ unsigned char dev_addr[ETH_ALEN];
+
+ /* allow replacement IO routines */
+
+ void (*inblk)(void __iomem *reg, void *data, int len);
+ void (*outblk)(void __iomem *reg, void *data, int len);
+ void (*dumpblk)(void __iomem *reg, int len);
+};
+
+#endif /* __DM9000_PLATFORM_DATA */
+
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
new file mode 100644
index 000000000..c8e1831d7
--- /dev/null
+++ b/include/linux/dma-attrs.h
@@ -0,0 +1,80 @@
+#ifndef _DMA_ATTR_H
+#define _DMA_ATTR_H
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+
+/**
+ * an enum dma_attr represents an attribute associated with a DMA
+ * mapping. The semantics of each attribute should be defined in
+ * Documentation/DMA-attributes.txt.
+ */
+enum dma_attr {
+ DMA_ATTR_WRITE_BARRIER,
+ DMA_ATTR_WEAK_ORDERING,
+ DMA_ATTR_WRITE_COMBINE,
+ DMA_ATTR_NON_CONSISTENT,
+ DMA_ATTR_NO_KERNEL_MAPPING,
+ DMA_ATTR_SKIP_CPU_SYNC,
+ DMA_ATTR_FORCE_CONTIGUOUS,
+ DMA_ATTR_MAX,
+};
+
+#define __DMA_ATTRS_LONGS BITS_TO_LONGS(DMA_ATTR_MAX)
+
+/**
+ * struct dma_attrs - an opaque container for DMA attributes
+ * @flags - bitmask representing a collection of enum dma_attr
+ */
+struct dma_attrs {
+ unsigned long flags[__DMA_ATTRS_LONGS];
+};
+
+#define DEFINE_DMA_ATTRS(x) \
+ struct dma_attrs x = { \
+ .flags = { [0 ... __DMA_ATTRS_LONGS-1] = 0 }, \
+ }
+
+static inline void init_dma_attrs(struct dma_attrs *attrs)
+{
+ bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS);
+}
+
+#ifdef CONFIG_HAVE_DMA_ATTRS
+/**
+ * dma_set_attr - set a specific attribute
+ * @attr: attribute to set
+ * @attrs: struct dma_attrs (may be NULL)
+ */
+static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
+{
+ if (attrs == NULL)
+ return;
+ BUG_ON(attr >= DMA_ATTR_MAX);
+ __set_bit(attr, attrs->flags);
+}
+
+/**
+ * dma_get_attr - check for a specific attribute
+ * @attr: attribute to set
+ * @attrs: struct dma_attrs (may be NULL)
+ */
+static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
+{
+ if (attrs == NULL)
+ return 0;
+ BUG_ON(attr >= DMA_ATTR_MAX);
+ return test_bit(attr, attrs->flags);
+}
+#else /* !CONFIG_HAVE_DMA_ATTRS */
+static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
+{
+}
+
+static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
+{
+ return 0;
+}
+#endif /* CONFIG_HAVE_DMA_ATTRS */
+#endif /* _DMA_ATTR_H */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
new file mode 100644
index 000000000..2f0b431b7
--- /dev/null
+++ b/include/linux/dma-buf.h
@@ -0,0 +1,236 @@
+/*
+ * Header file for dma buffer sharing framework.
+ *
+ * Copyright(C) 2011 Linaro Limited. All rights reserved.
+ * Author: Sumit Semwal <sumit.semwal@ti.com>
+ *
+ * Many thanks to linaro-mm-sig list, and specially
+ * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
+ * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
+ * refining of this idea.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __DMA_BUF_H__
+#define __DMA_BUF_H__
+
+#include <linux/file.h>
+#include <linux/err.h>
+#include <linux/scatterlist.h>
+#include <linux/list.h>
+#include <linux/dma-mapping.h>
+#include <linux/fs.h>
+#include <linux/fence.h>
+#include <linux/wait.h>
+
+struct device;
+struct dma_buf;
+struct dma_buf_attachment;
+
+/**
+ * struct dma_buf_ops - operations possible on struct dma_buf
+ * @attach: [optional] allows different devices to 'attach' themselves to the
+ * given buffer. It might return -EBUSY to signal that backing storage
+ * is already allocated and incompatible with the requirements
+ * of requesting device.
+ * @detach: [optional] detach a given device from this buffer.
+ * @map_dma_buf: returns list of scatter pages allocated, increases usecount
+ * of the buffer. Requires atleast one attach to be called
+ * before. Returned sg list should already be mapped into
+ * _device_ address space. This call may sleep. May also return
+ * -EINTR. Should return -EINVAL if attach hasn't been called yet.
+ * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
+ * pages.
+ * @release: release this buffer; to be called after the last dma_buf_put.
+ * @begin_cpu_access: [optional] called before cpu access to invalidate cpu
+ * caches and allocate backing storage (if not yet done)
+ * respectively pin the objet into memory.
+ * @end_cpu_access: [optional] called after cpu access to flush caches.
+ * @kmap_atomic: maps a page from the buffer into kernel address
+ * space, users may not block until the subsequent unmap call.
+ * This callback must not sleep.
+ * @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
+ * This Callback must not sleep.
+ * @kmap: maps a page from the buffer into kernel address space.
+ * @kunmap: [optional] unmaps a page from the buffer.
+ * @mmap: used to expose the backing storage to userspace. Note that the
+ * mapping needs to be coherent - if the exporter doesn't directly
+ * support this, it needs to fake coherency by shooting down any ptes
+ * when transitioning away from the cpu domain.
+ * @vmap: [optional] creates a virtual mapping for the buffer into kernel
+ * address space. Same restrictions as for vmap and friends apply.
+ * @vunmap: [optional] unmaps a vmap from the buffer
+ */
+struct dma_buf_ops {
+ int (*attach)(struct dma_buf *, struct device *,
+ struct dma_buf_attachment *);
+
+ void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
+
+ /* For {map,unmap}_dma_buf below, any specific buffer attributes
+ * required should get added to device_dma_parameters accessible
+ * via dev->dma_params.
+ */
+ struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
+ enum dma_data_direction);
+ void (*unmap_dma_buf)(struct dma_buf_attachment *,
+ struct sg_table *,
+ enum dma_data_direction);
+ /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
+ * if the call would block.
+ */
+
+ /* after final dma_buf_put() */
+ void (*release)(struct dma_buf *);
+
+ int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
+ enum dma_data_direction);
+ void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
+ enum dma_data_direction);
+ void *(*kmap_atomic)(struct dma_buf *, unsigned long);
+ void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
+ void *(*kmap)(struct dma_buf *, unsigned long);
+ void (*kunmap)(struct dma_buf *, unsigned long, void *);
+
+ int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
+
+ void *(*vmap)(struct dma_buf *);
+ void (*vunmap)(struct dma_buf *, void *vaddr);
+};
+
+/**
+ * struct dma_buf - shared buffer object
+ * @size: size of the buffer
+ * @file: file pointer used for sharing buffers across, and for refcounting.
+ * @attachments: list of dma_buf_attachment that denotes all devices attached.
+ * @ops: dma_buf_ops associated with this buffer object.
+ * @exp_name: name of the exporter; useful for debugging.
+ * @list_node: node for dma_buf accounting and debugging.
+ * @priv: exporter specific private data for this buffer object.
+ * @resv: reservation object linked to this dma-buf
+ */
+struct dma_buf {
+ size_t size;
+ struct file *file;
+ struct list_head attachments;
+ const struct dma_buf_ops *ops;
+ /* mutex to serialize list manipulation, attach/detach and vmap/unmap */
+ struct mutex lock;
+ unsigned vmapping_counter;
+ void *vmap_ptr;
+ const char *exp_name;
+ struct list_head list_node;
+ void *priv;
+ struct reservation_object *resv;
+
+ /* poll support */
+ wait_queue_head_t poll;
+
+ struct dma_buf_poll_cb_t {
+ struct fence_cb cb;
+ wait_queue_head_t *poll;
+
+ unsigned long active;
+ } cb_excl, cb_shared;
+};
+
+/**
+ * struct dma_buf_attachment - holds device-buffer attachment data
+ * @dmabuf: buffer for this attachment.
+ * @dev: device attached to the buffer.
+ * @node: list of dma_buf_attachment.
+ * @priv: exporter specific attachment data.
+ *
+ * This structure holds the attachment information between the dma_buf buffer
+ * and its user device(s). The list contains one attachment struct per device
+ * attached to the buffer.
+ */
+struct dma_buf_attachment {
+ struct dma_buf *dmabuf;
+ struct device *dev;
+ struct list_head node;
+ void *priv;
+};
+
+/**
+ * struct dma_buf_export_info - holds information needed to export a dma_buf
+ * @exp_name: name of the exporting module - useful for debugging.
+ * @ops: Attach allocator-defined dma buf ops to the new buffer
+ * @size: Size of the buffer
+ * @flags: mode flags for the file
+ * @resv: reservation-object, NULL to allocate default one
+ * @priv: Attach private data of allocator to this buffer
+ *
+ * This structure holds the information required to export the buffer. Used
+ * with dma_buf_export() only.
+ */
+struct dma_buf_export_info {
+ const char *exp_name;
+ const struct dma_buf_ops *ops;
+ size_t size;
+ int flags;
+ struct reservation_object *resv;
+ void *priv;
+};
+
+/**
+ * helper macro for exporters; zeros and fills in most common values
+ */
+#define DEFINE_DMA_BUF_EXPORT_INFO(a) \
+ struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME }
+
+/**
+ * get_dma_buf - convenience wrapper for get_file.
+ * @dmabuf: [in] pointer to dma_buf
+ *
+ * Increments the reference count on the dma-buf, needed in case of drivers
+ * that either need to create additional references to the dmabuf on the
+ * kernel side. For example, an exporter that needs to keep a dmabuf ptr
+ * so that subsequent exports don't create a new dmabuf.
+ */
+static inline void get_dma_buf(struct dma_buf *dmabuf)
+{
+ get_file(dmabuf->file);
+}
+
+struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+ struct device *dev);
+void dma_buf_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *dmabuf_attach);
+
+struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
+
+int dma_buf_fd(struct dma_buf *dmabuf, int flags);
+struct dma_buf *dma_buf_get(int fd);
+void dma_buf_put(struct dma_buf *dmabuf);
+
+struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
+ enum dma_data_direction);
+void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
+ enum dma_data_direction);
+int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
+ enum dma_data_direction dir);
+void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
+ enum dma_data_direction dir);
+void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
+void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
+void *dma_buf_kmap(struct dma_buf *, unsigned long);
+void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
+
+int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
+ unsigned long);
+void *dma_buf_vmap(struct dma_buf *);
+void dma_buf_vunmap(struct dma_buf *, void *vaddr);
+int dma_buf_debugfs_create_file(const char *name,
+ int (*write)(struct seq_file *));
+#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
new file mode 100644
index 000000000..569bbd039
--- /dev/null
+++ b/include/linux/dma-contiguous.h
@@ -0,0 +1,164 @@
+#ifndef __LINUX_CMA_H
+#define __LINUX_CMA_H
+
+/*
+ * Contiguous Memory Allocator for DMA mapping framework
+ * Copyright (c) 2010-2011 by Samsung Electronics.
+ * Written by:
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ * Michal Nazarewicz <mina86@mina86.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ */
+
+/*
+ * Contiguous Memory Allocator
+ *
+ * The Contiguous Memory Allocator (CMA) makes it possible to
+ * allocate big contiguous chunks of memory after the system has
+ * booted.
+ *
+ * Why is it needed?
+ *
+ * Various devices on embedded systems have no scatter-getter and/or
+ * IO map support and require contiguous blocks of memory to
+ * operate. They include devices such as cameras, hardware video
+ * coders, etc.
+ *
+ * Such devices often require big memory buffers (a full HD frame
+ * is, for instance, more then 2 mega pixels large, i.e. more than 6
+ * MB of memory), which makes mechanisms such as kmalloc() or
+ * alloc_page() ineffective.
+ *
+ * At the same time, a solution where a big memory region is
+ * reserved for a device is suboptimal since often more memory is
+ * reserved then strictly required and, moreover, the memory is
+ * inaccessible to page system even if device drivers don't use it.
+ *
+ * CMA tries to solve this issue by operating on memory regions
+ * where only movable pages can be allocated from. This way, kernel
+ * can use the memory for pagecache and when device driver requests
+ * it, allocated pages can be migrated.
+ *
+ * Driver usage
+ *
+ * CMA should not be used by the device drivers directly. It is
+ * only a helper framework for dma-mapping subsystem.
+ *
+ * For more information, see kernel-docs in drivers/base/dma-contiguous.c
+ */
+
+#ifdef __KERNEL__
+
+#include <linux/device.h>
+
+struct cma;
+struct page;
+
+#ifdef CONFIG_DMA_CMA
+
+extern struct cma *dma_contiguous_default_area;
+
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ if (dev && dev->cma_area)
+ return dev->cma_area;
+ return dma_contiguous_default_area;
+}
+
+static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
+{
+ if (dev)
+ dev->cma_area = cma;
+}
+
+static inline void dma_contiguous_set_default(struct cma *cma)
+{
+ dma_contiguous_default_area = cma;
+}
+
+void dma_contiguous_reserve(phys_addr_t addr_limit);
+
+int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+ phys_addr_t limit, struct cma **res_cma,
+ bool fixed);
+
+/**
+ * dma_declare_contiguous() - reserve area for contiguous memory handling
+ * for particular device
+ * @dev: Pointer to device structure.
+ * @size: Size of the reserved memory.
+ * @base: Start address of the reserved memory (optional, 0 for any).
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory for specified device. It should be
+ * called by board specific code when early allocator (memblock or bootmem)
+ * is still activate.
+ */
+
+static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
+ phys_addr_t base, phys_addr_t limit)
+{
+ struct cma *cma;
+ int ret;
+ ret = dma_contiguous_reserve_area(size, base, limit, &cma, true);
+ if (ret == 0)
+ dev_set_cma_area(dev, cma);
+
+ return ret;
+}
+
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+ unsigned int order);
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+ int count);
+
+#else
+
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ return NULL;
+}
+
+static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { }
+
+static inline void dma_contiguous_set_default(struct cma *cma) { }
+
+static inline void dma_contiguous_reserve(phys_addr_t limit) { }
+
+static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+ phys_addr_t limit, struct cma **res_cma,
+ bool fixed)
+{
+ return -ENOSYS;
+}
+
+static inline
+int dma_declare_contiguous(struct device *dev, phys_addr_t size,
+ phys_addr_t base, phys_addr_t limit)
+{
+ return -ENOSYS;
+}
+
+static inline
+struct page *dma_alloc_from_contiguous(struct device *dev, int count,
+ unsigned int order)
+{
+ return NULL;
+}
+
+static inline
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+ int count)
+{
+ return false;
+}
+
+#endif
+
+#endif
+
+#endif
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
new file mode 100644
index 000000000..fe8cb610d
--- /dev/null
+++ b/include/linux/dma-debug.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2008 Advanced Micro Devices, Inc.
+ *
+ * Author: Joerg Roedel <joerg.roedel@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __DMA_DEBUG_H
+#define __DMA_DEBUG_H
+
+#include <linux/types.h>
+
+struct device;
+struct scatterlist;
+struct bus_type;
+
+#ifdef CONFIG_DMA_API_DEBUG
+
+extern void dma_debug_add_bus(struct bus_type *bus);
+
+extern void dma_debug_init(u32 num_entries);
+
+extern int dma_debug_resize_entries(u32 num_entries);
+
+extern void debug_dma_map_page(struct device *dev, struct page *page,
+ size_t offset, size_t size,
+ int direction, dma_addr_t dma_addr,
+ bool map_single);
+
+extern void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
+
+extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, int direction, bool map_single);
+
+extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, int mapped_ents, int direction);
+
+extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nelems, int dir);
+
+extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t dma_addr, void *virt);
+
+extern void debug_dma_free_coherent(struct device *dev, size_t size,
+ void *virt, dma_addr_t addr);
+
+extern void debug_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ int direction);
+
+extern void debug_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size, int direction);
+
+extern void debug_dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size,
+ int direction);
+
+extern void debug_dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size, int direction);
+
+extern void debug_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg,
+ int nelems, int direction);
+
+extern void debug_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg,
+ int nelems, int direction);
+
+extern void debug_dma_dump_mappings(struct device *dev);
+
+extern void debug_dma_assert_idle(struct page *page);
+
+#else /* CONFIG_DMA_API_DEBUG */
+
+static inline void dma_debug_add_bus(struct bus_type *bus)
+{
+}
+
+static inline void dma_debug_init(u32 num_entries)
+{
+}
+
+static inline int dma_debug_resize_entries(u32 num_entries)
+{
+ return 0;
+}
+
+static inline void debug_dma_map_page(struct device *dev, struct page *page,
+ size_t offset, size_t size,
+ int direction, dma_addr_t dma_addr,
+ bool map_single)
+{
+}
+
+static inline void debug_dma_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
+{
+}
+
+static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, int direction,
+ bool map_single)
+{
+}
+
+static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, int mapped_ents, int direction)
+{
+}
+
+static inline void debug_dma_unmap_sg(struct device *dev,
+ struct scatterlist *sglist,
+ int nelems, int dir)
+{
+}
+
+static inline void debug_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t dma_addr, void *virt)
+{
+}
+
+static inline void debug_dma_free_coherent(struct device *dev, size_t size,
+ void *virt, dma_addr_t addr)
+{
+}
+
+static inline void debug_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+}
+
+static inline void debug_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+}
+
+static inline void debug_dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size,
+ int direction)
+{
+}
+
+static inline void debug_dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size,
+ int direction)
+{
+}
+
+static inline void debug_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg,
+ int nelems, int direction)
+{
+}
+
+static inline void debug_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg,
+ int nelems, int direction)
+{
+}
+
+static inline void debug_dma_dump_mappings(struct device *dev)
+{
+}
+
+static inline void debug_dma_assert_idle(struct page *page)
+{
+}
+
+#endif /* CONFIG_DMA_API_DEBUG */
+
+#endif /* __DMA_DEBUG_H */
diff --git a/include/linux/dma-direction.h b/include/linux/dma-direction.h
new file mode 100644
index 000000000..95b6a82f5
--- /dev/null
+++ b/include/linux/dma-direction.h
@@ -0,0 +1,13 @@
+#ifndef _LINUX_DMA_DIRECTION_H
+#define _LINUX_DMA_DIRECTION_H
+/*
+ * These definitions mirror those in pci.h, so they can be used
+ * interchangeably with their PCI_ counterparts.
+ */
+enum dma_data_direction {
+ DMA_BIDIRECTIONAL = 0,
+ DMA_TO_DEVICE = 1,
+ DMA_FROM_DEVICE = 2,
+ DMA_NONE = 3,
+};
+#endif
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
new file mode 100644
index 000000000..ac07ff090
--- /dev/null
+++ b/include/linux/dma-mapping.h
@@ -0,0 +1,317 @@
+#ifndef _LINUX_DMA_MAPPING_H
+#define _LINUX_DMA_MAPPING_H
+
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-direction.h>
+#include <linux/scatterlist.h>
+
+/*
+ * A dma_addr_t can hold any valid DMA or bus address for the platform.
+ * It can be given to a device to use as a DMA source or target. A CPU cannot
+ * reference a dma_addr_t directly because there may be translation between
+ * its physical address space and the bus address space.
+ */
+struct dma_map_ops {
+ void* (*alloc)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs);
+ void (*free)(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs);
+ int (*mmap)(struct device *, struct vm_area_struct *,
+ void *, dma_addr_t, size_t, struct dma_attrs *attrs);
+
+ int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
+ dma_addr_t, size_t, struct dma_attrs *attrs);
+
+ dma_addr_t (*map_page)(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+ void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+ /*
+ * map_sg returns 0 on error and a value > 0 on success.
+ * It should never return a value < 0.
+ */
+ int (*map_sg)(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+ void (*unmap_sg)(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+ void (*sync_single_for_cpu)(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir);
+ void (*sync_single_for_device)(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir);
+ void (*sync_sg_for_cpu)(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+ void (*sync_sg_for_device)(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+ int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
+ int (*dma_supported)(struct device *dev, u64 mask);
+ int (*set_dma_mask)(struct device *dev, u64 mask);
+#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
+ u64 (*get_required_mask)(struct device *dev);
+#endif
+ int is_phys;
+};
+
+#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+
+#define DMA_MASK_NONE 0x0ULL
+
+static inline int valid_dma_direction(int dma_direction)
+{
+ return ((dma_direction == DMA_BIDIRECTIONAL) ||
+ (dma_direction == DMA_TO_DEVICE) ||
+ (dma_direction == DMA_FROM_DEVICE));
+}
+
+static inline int is_device_dma_capable(struct device *dev)
+{
+ return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
+}
+
+#ifdef CONFIG_HAS_DMA
+#include <asm/dma-mapping.h>
+#else
+#include <asm-generic/dma-mapping-broken.h>
+#endif
+
+static inline u64 dma_get_mask(struct device *dev)
+{
+ if (dev && dev->dma_mask && *dev->dma_mask)
+ return *dev->dma_mask;
+ return DMA_BIT_MASK(32);
+}
+
+#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
+int dma_set_coherent_mask(struct device *dev, u64 mask);
+#else
+static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+ if (!dma_supported(dev, mask))
+ return -EIO;
+ dev->coherent_dma_mask = mask;
+ return 0;
+}
+#endif
+
+/*
+ * Set both the DMA mask and the coherent DMA mask to the same thing.
+ * Note that we don't check the return value from dma_set_coherent_mask()
+ * as the DMA API guarantees that the coherent DMA mask can be set to
+ * the same or smaller than the streaming DMA mask.
+ */
+static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
+{
+ int rc = dma_set_mask(dev, mask);
+ if (rc == 0)
+ dma_set_coherent_mask(dev, mask);
+ return rc;
+}
+
+/*
+ * Similar to the above, except it deals with the case where the device
+ * does not have dev->dma_mask appropriately setup.
+ */
+static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
+{
+ dev->dma_mask = &dev->coherent_dma_mask;
+ return dma_set_mask_and_coherent(dev, mask);
+}
+
+extern u64 dma_get_required_mask(struct device *dev);
+
+#ifndef arch_setup_dma_ops
+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
+ u64 size, struct iommu_ops *iommu,
+ bool coherent) { }
+#endif
+
+#ifndef arch_teardown_dma_ops
+static inline void arch_teardown_dma_ops(struct device *dev) { }
+#endif
+
+static inline unsigned int dma_get_max_seg_size(struct device *dev)
+{
+ return dev->dma_parms ? dev->dma_parms->max_segment_size : 65536;
+}
+
+static inline unsigned int dma_set_max_seg_size(struct device *dev,
+ unsigned int size)
+{
+ if (dev->dma_parms) {
+ dev->dma_parms->max_segment_size = size;
+ return 0;
+ } else
+ return -EIO;
+}
+
+static inline unsigned long dma_get_seg_boundary(struct device *dev)
+{
+ return dev->dma_parms ?
+ dev->dma_parms->segment_boundary_mask : 0xffffffff;
+}
+
+static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
+{
+ if (dev->dma_parms) {
+ dev->dma_parms->segment_boundary_mask = mask;
+ return 0;
+ } else
+ return -EIO;
+}
+
+#ifndef dma_max_pfn
+static inline unsigned long dma_max_pfn(struct device *dev)
+{
+ return *dev->dma_mask >> PAGE_SHIFT;
+}
+#endif
+
+static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ void *ret = dma_alloc_coherent(dev, size, dma_handle,
+ flag | __GFP_ZERO);
+ return ret;
+}
+
+#ifdef CONFIG_HAS_DMA
+static inline int dma_get_cache_alignment(void)
+{
+#ifdef ARCH_DMA_MINALIGN
+ return ARCH_DMA_MINALIGN;
+#endif
+ return 1;
+}
+#endif
+
+/* flags for the coherent memory api */
+#define DMA_MEMORY_MAP 0x01
+#define DMA_MEMORY_IO 0x02
+#define DMA_MEMORY_INCLUDES_CHILDREN 0x04
+#define DMA_MEMORY_EXCLUSIVE 0x08
+
+#ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+static inline int
+dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+ dma_addr_t device_addr, size_t size, int flags)
+{
+ return 0;
+}
+
+static inline void
+dma_release_declared_memory(struct device *dev)
+{
+}
+
+static inline void *
+dma_mark_declared_memory_occupied(struct device *dev,
+ dma_addr_t device_addr, size_t size)
+{
+ return ERR_PTR(-EBUSY);
+}
+#endif
+
+/*
+ * Managed DMA API
+ */
+extern void *dmam_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle);
+extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle);
+#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+extern int dmam_declare_coherent_memory(struct device *dev,
+ phys_addr_t phys_addr,
+ dma_addr_t device_addr, size_t size,
+ int flags);
+extern void dmam_release_declared_memory(struct device *dev);
+#else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
+static inline int dmam_declare_coherent_memory(struct device *dev,
+ phys_addr_t phys_addr, dma_addr_t device_addr,
+ size_t size, gfp_t gfp)
+{
+ return 0;
+}
+
+static inline void dmam_release_declared_memory(struct device *dev)
+{
+}
+#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
+
+#ifndef CONFIG_HAVE_DMA_ATTRS
+struct dma_attrs;
+
+#define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \
+ dma_map_single(dev, cpu_addr, size, dir)
+
+#define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \
+ dma_unmap_single(dev, dma_addr, size, dir)
+
+#define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \
+ dma_map_sg(dev, sgl, nents, dir)
+
+#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
+ dma_unmap_sg(dev, sgl, nents, dir)
+
+#else
+static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t gfp)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs);
+}
+
+static inline void dma_free_writecombine(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_addr)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs);
+}
+
+static inline int dma_mmap_writecombine(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
+#endif /* CONFIG_HAVE_DMA_ATTRS */
+
+#ifdef CONFIG_NEED_DMA_MAP_STATE
+#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
+#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
+#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
+#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
+#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
+#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
+#else
+#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
+#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
+#define dma_unmap_addr(PTR, ADDR_NAME) (0)
+#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
+#define dma_unmap_len(PTR, LEN_NAME) (0)
+#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
+#endif
+
+#endif
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h
new file mode 100644
index 000000000..71456442e
--- /dev/null
+++ b/include/linux/dma/dw.h
@@ -0,0 +1,64 @@
+/*
+ * Driver for the Synopsys DesignWare DMA Controller
+ *
+ * Copyright (C) 2007 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
+ * Copyright (C) 2014 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _DMA_DW_H
+#define _DMA_DW_H
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+
+#include <linux/platform_data/dma-dw.h>
+
+struct dw_dma;
+
+/**
+ * struct dw_dma_chip - representation of DesignWare DMA controller hardware
+ * @dev: struct device of the DMA controller
+ * @irq: irq line
+ * @regs: memory mapped I/O space
+ * @clk: hclk clock
+ * @dw: struct dw_dma that is filed by dw_dma_probe()
+ */
+struct dw_dma_chip {
+ struct device *dev;
+ int irq;
+ void __iomem *regs;
+ struct clk *clk;
+ struct dw_dma *dw;
+};
+
+/* Export to the platform drivers */
+int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata);
+int dw_dma_remove(struct dw_dma_chip *chip);
+
+/* DMA API extensions */
+struct dw_desc;
+
+struct dw_cyclic_desc {
+ struct dw_desc **desc;
+ unsigned long periods;
+ void (*period_callback)(void *param);
+ void *period_callback_param;
+};
+
+struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction);
+void dw_dma_cyclic_free(struct dma_chan *chan);
+int dw_dma_cyclic_start(struct dma_chan *chan);
+void dw_dma_cyclic_stop(struct dma_chan *chan);
+
+dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
+
+dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
+
+#endif /* _DMA_DW_H */
diff --git a/include/linux/dma/hsu.h b/include/linux/dma/hsu.h
new file mode 100644
index 000000000..234393a69
--- /dev/null
+++ b/include/linux/dma/hsu.h
@@ -0,0 +1,48 @@
+/*
+ * Driver for the High Speed UART DMA
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _DMA_HSU_H
+#define _DMA_HSU_H
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+
+#include <linux/platform_data/dma-hsu.h>
+
+struct hsu_dma;
+
+/**
+ * struct hsu_dma_chip - representation of HSU DMA hardware
+ * @dev: struct device of the DMA controller
+ * @irq: irq line
+ * @regs: memory mapped I/O space
+ * @length: I/O space length
+ * @offset: offset of the I/O space where registers are located
+ * @hsu: struct hsu_dma that is filed by ->probe()
+ * @pdata: platform data for the DMA controller if provided
+ */
+struct hsu_dma_chip {
+ struct device *dev;
+ int irq;
+ void __iomem *regs;
+ unsigned int length;
+ unsigned int offset;
+ struct hsu_dma *hsu;
+ struct hsu_dma_platform_data *pdata;
+};
+
+/* Export to the internal users */
+irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr);
+
+/* Export to the platform drivers */
+int hsu_dma_probe(struct hsu_dma_chip *chip);
+int hsu_dma_remove(struct hsu_dma_chip *chip);
+
+#endif /* _DMA_HSU_H */
diff --git a/include/linux/dma/ipu-dma.h b/include/linux/dma/ipu-dma.h
new file mode 100644
index 000000000..18031115c
--- /dev/null
+++ b/include/linux/dma/ipu-dma.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2008
+ * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
+ *
+ * Copyright (C) 2005-2007 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_DMA_IPU_DMA_H
+#define __LINUX_DMA_IPU_DMA_H
+
+#include <linux/types.h>
+#include <linux/dmaengine.h>
+
+/* IPU DMA Controller channel definitions. */
+enum ipu_channel {
+ IDMAC_IC_0 = 0, /* IC (encoding task) to memory */
+ IDMAC_IC_1 = 1, /* IC (viewfinder task) to memory */
+ IDMAC_ADC_0 = 1,
+ IDMAC_IC_2 = 2,
+ IDMAC_ADC_1 = 2,
+ IDMAC_IC_3 = 3,
+ IDMAC_IC_4 = 4,
+ IDMAC_IC_5 = 5,
+ IDMAC_IC_6 = 6,
+ IDMAC_IC_7 = 7, /* IC (sensor data) to memory */
+ IDMAC_IC_8 = 8,
+ IDMAC_IC_9 = 9,
+ IDMAC_IC_10 = 10,
+ IDMAC_IC_11 = 11,
+ IDMAC_IC_12 = 12,
+ IDMAC_IC_13 = 13,
+ IDMAC_SDC_0 = 14, /* Background synchronous display data */
+ IDMAC_SDC_1 = 15, /* Foreground data (overlay) */
+ IDMAC_SDC_2 = 16,
+ IDMAC_SDC_3 = 17,
+ IDMAC_ADC_2 = 18,
+ IDMAC_ADC_3 = 19,
+ IDMAC_ADC_4 = 20,
+ IDMAC_ADC_5 = 21,
+ IDMAC_ADC_6 = 22,
+ IDMAC_ADC_7 = 23,
+ IDMAC_PF_0 = 24,
+ IDMAC_PF_1 = 25,
+ IDMAC_PF_2 = 26,
+ IDMAC_PF_3 = 27,
+ IDMAC_PF_4 = 28,
+ IDMAC_PF_5 = 29,
+ IDMAC_PF_6 = 30,
+ IDMAC_PF_7 = 31,
+};
+
+/* Order significant! */
+enum ipu_channel_status {
+ IPU_CHANNEL_FREE,
+ IPU_CHANNEL_INITIALIZED,
+ IPU_CHANNEL_READY,
+ IPU_CHANNEL_ENABLED,
+};
+
+#define IPU_CHANNELS_NUM 32
+
+enum pixel_fmt {
+ /* 1 byte */
+ IPU_PIX_FMT_GENERIC,
+ IPU_PIX_FMT_RGB332,
+ IPU_PIX_FMT_YUV420P,
+ IPU_PIX_FMT_YUV422P,
+ IPU_PIX_FMT_YUV420P2,
+ IPU_PIX_FMT_YVU422P,
+ /* 2 bytes */
+ IPU_PIX_FMT_RGB565,
+ IPU_PIX_FMT_RGB666,
+ IPU_PIX_FMT_BGR666,
+ IPU_PIX_FMT_YUYV,
+ IPU_PIX_FMT_UYVY,
+ /* 3 bytes */
+ IPU_PIX_FMT_RGB24,
+ IPU_PIX_FMT_BGR24,
+ /* 4 bytes */
+ IPU_PIX_FMT_GENERIC_32,
+ IPU_PIX_FMT_RGB32,
+ IPU_PIX_FMT_BGR32,
+ IPU_PIX_FMT_ABGR32,
+ IPU_PIX_FMT_BGRA32,
+ IPU_PIX_FMT_RGBA32,
+};
+
+enum ipu_color_space {
+ IPU_COLORSPACE_RGB,
+ IPU_COLORSPACE_YCBCR,
+ IPU_COLORSPACE_YUV
+};
+
+/*
+ * Enumeration of IPU rotation modes
+ */
+enum ipu_rotate_mode {
+ /* Note the enum values correspond to BAM value */
+ IPU_ROTATE_NONE = 0,
+ IPU_ROTATE_VERT_FLIP = 1,
+ IPU_ROTATE_HORIZ_FLIP = 2,
+ IPU_ROTATE_180 = 3,
+ IPU_ROTATE_90_RIGHT = 4,
+ IPU_ROTATE_90_RIGHT_VFLIP = 5,
+ IPU_ROTATE_90_RIGHT_HFLIP = 6,
+ IPU_ROTATE_90_LEFT = 7,
+};
+
+/*
+ * Enumeration of DI ports for ADC.
+ */
+enum display_port {
+ DISP0,
+ DISP1,
+ DISP2,
+ DISP3
+};
+
+struct idmac_video_param {
+ unsigned short in_width;
+ unsigned short in_height;
+ uint32_t in_pixel_fmt;
+ unsigned short out_width;
+ unsigned short out_height;
+ uint32_t out_pixel_fmt;
+ unsigned short out_stride;
+ bool graphics_combine_en;
+ bool global_alpha_en;
+ bool key_color_en;
+ enum display_port disp;
+ unsigned short out_left;
+ unsigned short out_top;
+};
+
+/*
+ * Union of initialization parameters for a logical channel. So far only video
+ * parameters are used.
+ */
+union ipu_channel_param {
+ struct idmac_video_param video;
+};
+
+struct idmac_tx_desc {
+ struct dma_async_tx_descriptor txd;
+ struct scatterlist *sg; /* scatterlist for this */
+ unsigned int sg_len; /* tx-descriptor. */
+ struct list_head list;
+};
+
+struct idmac_channel {
+ struct dma_chan dma_chan;
+ dma_cookie_t completed; /* last completed cookie */
+ union ipu_channel_param params;
+ enum ipu_channel link; /* input channel, linked to the output */
+ enum ipu_channel_status status;
+ void *client; /* Only one client per channel */
+ unsigned int n_tx_desc;
+ struct idmac_tx_desc *desc; /* allocated tx-descriptors */
+ struct scatterlist *sg[2]; /* scatterlist elements in buffer-0 and -1 */
+ struct list_head free_list; /* free tx-descriptors */
+ struct list_head queue; /* queued tx-descriptors */
+ spinlock_t lock; /* protects sg[0,1], queue */
+ struct mutex chan_mutex; /* protects status, cookie, free_list */
+ bool sec_chan_en;
+ int active_buffer;
+ unsigned int eof_irq;
+ char eof_name[16]; /* EOF IRQ name for request_irq() */
+};
+
+#define to_tx_desc(tx) container_of(tx, struct idmac_tx_desc, txd)
+#define to_idmac_chan(c) container_of(c, struct idmac_channel, dma_chan)
+
+#endif /* __LINUX_DMA_IPU_DMA_H */
diff --git a/include/linux/dma/mmp-pdma.h b/include/linux/dma/mmp-pdma.h
new file mode 100644
index 000000000..2dc9b2bc1
--- /dev/null
+++ b/include/linux/dma/mmp-pdma.h
@@ -0,0 +1,15 @@
+#ifndef _MMP_PDMA_H_
+#define _MMP_PDMA_H_
+
+struct dma_chan;
+
+#ifdef CONFIG_MMP_PDMA
+bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param);
+#else
+static inline bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
+{
+ return false;
+}
+#endif
+
+#endif /* _MMP_PDMA_H_ */
diff --git a/include/linux/dma/xilinx_dma.h b/include/linux/dma/xilinx_dma.h
new file mode 100644
index 000000000..34b98f276
--- /dev/null
+++ b/include/linux/dma/xilinx_dma.h
@@ -0,0 +1,47 @@
+/*
+ * Xilinx DMA Engine drivers support header file
+ *
+ * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DMA_XILINX_DMA_H
+#define __DMA_XILINX_DMA_H
+
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+
+/**
+ * struct xilinx_vdma_config - VDMA Configuration structure
+ * @frm_dly: Frame delay
+ * @gen_lock: Whether in gen-lock mode
+ * @master: Master that it syncs to
+ * @frm_cnt_en: Enable frame count enable
+ * @park: Whether wants to park
+ * @park_frm: Frame to park on
+ * @coalesc: Interrupt coalescing threshold
+ * @delay: Delay counter
+ * @reset: Reset Channel
+ * @ext_fsync: External Frame Sync source
+ */
+struct xilinx_vdma_config {
+ int frm_dly;
+ int gen_lock;
+ int master;
+ int frm_cnt_en;
+ int park;
+ int park_frm;
+ int coalesc;
+ int delay;
+ int reset;
+ int ext_fsync;
+};
+
+int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
+ struct xilinx_vdma_config *cfg);
+
+#endif
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
new file mode 100644
index 000000000..7ac17f572
--- /dev/null
+++ b/include/linux/dma_remapping.h
@@ -0,0 +1,48 @@
+#ifndef _DMA_REMAPPING_H
+#define _DMA_REMAPPING_H
+
+/*
+ * VT-d hardware uses 4KiB page size regardless of host page size.
+ */
+#define VTD_PAGE_SHIFT (12)
+#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
+#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
+#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
+
+#define VTD_STRIDE_SHIFT (9)
+#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
+
+#define DMA_PTE_READ (1)
+#define DMA_PTE_WRITE (2)
+#define DMA_PTE_LARGE_PAGE (1 << 7)
+#define DMA_PTE_SNP (1 << 11)
+
+#define CONTEXT_TT_MULTI_LEVEL 0
+#define CONTEXT_TT_DEV_IOTLB 1
+#define CONTEXT_TT_PASS_THROUGH 2
+
+struct intel_iommu;
+struct dmar_domain;
+struct root_entry;
+
+
+#ifdef CONFIG_INTEL_IOMMU
+extern int iommu_calculate_agaw(struct intel_iommu *iommu);
+extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
+extern int dmar_disabled;
+extern int intel_iommu_enabled;
+#else
+static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
+{
+ return 0;
+}
+static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
+{
+ return 0;
+}
+#define dmar_disabled (1)
+#define intel_iommu_enabled (0)
+#endif
+
+
+#endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
new file mode 100644
index 000000000..ad4197572
--- /dev/null
+++ b/include/linux/dmaengine.h
@@ -0,0 +1,1106 @@
+/*
+ * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+#ifndef LINUX_DMAENGINE_H
+#define LINUX_DMAENGINE_H
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/uio.h>
+#include <linux/bug.h>
+#include <linux/scatterlist.h>
+#include <linux/bitmap.h>
+#include <linux/types.h>
+#include <asm/page.h>
+
+/**
+ * typedef dma_cookie_t - an opaque DMA cookie
+ *
+ * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
+ */
+typedef s32 dma_cookie_t;
+#define DMA_MIN_COOKIE 1
+
+static inline int dma_submit_error(dma_cookie_t cookie)
+{
+ return cookie < 0 ? cookie : 0;
+}
+
+/**
+ * enum dma_status - DMA transaction status
+ * @DMA_COMPLETE: transaction completed
+ * @DMA_IN_PROGRESS: transaction not yet processed
+ * @DMA_PAUSED: transaction is paused
+ * @DMA_ERROR: transaction failed
+ */
+enum dma_status {
+ DMA_COMPLETE,
+ DMA_IN_PROGRESS,
+ DMA_PAUSED,
+ DMA_ERROR,
+};
+
+/**
+ * enum dma_transaction_type - DMA transaction types/indexes
+ *
+ * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is
+ * automatically set as dma devices are registered.
+ */
+enum dma_transaction_type {
+ DMA_MEMCPY,
+ DMA_XOR,
+ DMA_PQ,
+ DMA_XOR_VAL,
+ DMA_PQ_VAL,
+ DMA_INTERRUPT,
+ DMA_SG,
+ DMA_PRIVATE,
+ DMA_ASYNC_TX,
+ DMA_SLAVE,
+ DMA_CYCLIC,
+ DMA_INTERLEAVE,
+/* last transaction type for creation of the capabilities mask */
+ DMA_TX_TYPE_END,
+};
+
+/**
+ * enum dma_transfer_direction - dma transfer mode and direction indicator
+ * @DMA_MEM_TO_MEM: Async/Memcpy mode
+ * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device
+ * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory
+ * @DMA_DEV_TO_DEV: Slave mode & From Device to Device
+ */
+enum dma_transfer_direction {
+ DMA_MEM_TO_MEM,
+ DMA_MEM_TO_DEV,
+ DMA_DEV_TO_MEM,
+ DMA_DEV_TO_DEV,
+ DMA_TRANS_NONE,
+};
+
+/**
+ * Interleaved Transfer Request
+ * ----------------------------
+ * A chunk is collection of contiguous bytes to be transfered.
+ * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
+ * ICGs may or maynot change between chunks.
+ * A FRAME is the smallest series of contiguous {chunk,icg} pairs,
+ * that when repeated an integral number of times, specifies the transfer.
+ * A transfer template is specification of a Frame, the number of times
+ * it is to be repeated and other per-transfer attributes.
+ *
+ * Practically, a client driver would have ready a template for each
+ * type of transfer it is going to need during its lifetime and
+ * set only 'src_start' and 'dst_start' before submitting the requests.
+ *
+ *
+ * | Frame-1 | Frame-2 | ~ | Frame-'numf' |
+ * |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...|
+ *
+ * == Chunk size
+ * ... ICG
+ */
+
+/**
+ * struct data_chunk - Element of scatter-gather list that makes a frame.
+ * @size: Number of bytes to read from source.
+ * size_dst := fn(op, size_src), so doesn't mean much for destination.
+ * @icg: Number of bytes to jump after last src/dst address of this
+ * chunk and before first src/dst address for next chunk.
+ * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
+ * Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
+ */
+struct data_chunk {
+ size_t size;
+ size_t icg;
+};
+
+/**
+ * struct dma_interleaved_template - Template to convey DMAC the transfer pattern
+ * and attributes.
+ * @src_start: Bus address of source for the first chunk.
+ * @dst_start: Bus address of destination for the first chunk.
+ * @dir: Specifies the type of Source and Destination.
+ * @src_inc: If the source address increments after reading from it.
+ * @dst_inc: If the destination address increments after writing to it.
+ * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read).
+ * Otherwise, source is read contiguously (icg ignored).
+ * Ignored if src_inc is false.
+ * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write).
+ * Otherwise, destination is filled contiguously (icg ignored).
+ * Ignored if dst_inc is false.
+ * @numf: Number of frames in this template.
+ * @frame_size: Number of chunks in a frame i.e, size of sgl[].
+ * @sgl: Array of {chunk,icg} pairs that make up a frame.
+ */
+struct dma_interleaved_template {
+ dma_addr_t src_start;
+ dma_addr_t dst_start;
+ enum dma_transfer_direction dir;
+ bool src_inc;
+ bool dst_inc;
+ bool src_sgl;
+ bool dst_sgl;
+ size_t numf;
+ size_t frame_size;
+ struct data_chunk sgl[0];
+};
+
+/**
+ * enum dma_ctrl_flags - DMA flags to augment operation preparation,
+ * control completion, and communicate status.
+ * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
+ * this transaction
+ * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
+ * acknowledges receipt, i.e. has has a chance to establish any dependency
+ * chains
+ * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q
+ * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P
+ * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as
+ * sources that were the result of a previous operation, in the case of a PQ
+ * operation it continues the calculation with new sources
+ * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
+ * on the result of this operation
+ */
+enum dma_ctrl_flags {
+ DMA_PREP_INTERRUPT = (1 << 0),
+ DMA_CTRL_ACK = (1 << 1),
+ DMA_PREP_PQ_DISABLE_P = (1 << 2),
+ DMA_PREP_PQ_DISABLE_Q = (1 << 3),
+ DMA_PREP_CONTINUE = (1 << 4),
+ DMA_PREP_FENCE = (1 << 5),
+};
+
+/**
+ * enum sum_check_bits - bit position of pq_check_flags
+ */
+enum sum_check_bits {
+ SUM_CHECK_P = 0,
+ SUM_CHECK_Q = 1,
+};
+
+/**
+ * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations
+ * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise
+ * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise
+ */
+enum sum_check_flags {
+ SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P),
+ SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q),
+};
+
+
+/**
+ * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
+ * See linux/cpumask.h
+ */
+typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
+
+/**
+ * struct dma_chan_percpu - the per-CPU part of struct dma_chan
+ * @memcpy_count: transaction counter
+ * @bytes_transferred: byte counter
+ */
+
+struct dma_chan_percpu {
+ /* stats */
+ unsigned long memcpy_count;
+ unsigned long bytes_transferred;
+};
+
+/**
+ * struct dma_chan - devices supply DMA channels, clients use them
+ * @device: ptr to the dma device who supplies this channel, always !%NULL
+ * @cookie: last cookie value returned to client
+ * @completed_cookie: last completed cookie for this channel
+ * @chan_id: channel ID for sysfs
+ * @dev: class device for sysfs
+ * @device_node: used to add this to the device chan list
+ * @local: per-cpu pointer to a struct dma_chan_percpu
+ * @client_count: how many clients are using this channel
+ * @table_count: number of appearances in the mem-to-mem allocation table
+ * @private: private data for certain client-channel associations
+ */
+struct dma_chan {
+ struct dma_device *device;
+ dma_cookie_t cookie;
+ dma_cookie_t completed_cookie;
+
+ /* sysfs */
+ int chan_id;
+ struct dma_chan_dev *dev;
+
+ struct list_head device_node;
+ struct dma_chan_percpu __percpu *local;
+ int client_count;
+ int table_count;
+ void *private;
+};
+
+/**
+ * struct dma_chan_dev - relate sysfs device node to backing channel device
+ * @chan: driver channel device
+ * @device: sysfs device
+ * @dev_id: parent dma_device dev_id
+ * @idr_ref: reference count to gate release of dma_device dev_id
+ */
+struct dma_chan_dev {
+ struct dma_chan *chan;
+ struct device device;
+ int dev_id;
+ atomic_t *idr_ref;
+};
+
+/**
+ * enum dma_slave_buswidth - defines bus width of the DMA slave
+ * device, source or target buses
+ */
+enum dma_slave_buswidth {
+ DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
+ DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
+ DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
+ DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
+ DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
+ DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
+ DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
+ DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
+ DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
+};
+
+/**
+ * struct dma_slave_config - dma slave channel runtime config
+ * @direction: whether the data shall go in or out on this slave
+ * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are
+ * legal values. DEPRECATED, drivers should use the direction argument
+ * to the device_prep_slave_sg and device_prep_dma_cyclic functions or
+ * the dir field in the dma_interleaved_template structure.
+ * @src_addr: this is the physical address where DMA slave data
+ * should be read (RX), if the source is memory this argument is
+ * ignored.
+ * @dst_addr: this is the physical address where DMA slave data
+ * should be written (TX), if the source is memory this argument
+ * is ignored.
+ * @src_addr_width: this is the width in bytes of the source (RX)
+ * register where DMA data shall be read. If the source
+ * is memory this may be ignored depending on architecture.
+ * Legal values: 1, 2, 4, 8.
+ * @dst_addr_width: same as src_addr_width but for destination
+ * target (TX) mutatis mutandis.
+ * @src_maxburst: the maximum number of words (note: words, as in
+ * units of the src_addr_width member, not bytes) that can be sent
+ * in one burst to the device. Typically something like half the
+ * FIFO depth on I/O peripherals so you don't overflow it. This
+ * may or may not be applicable on memory sources.
+ * @dst_maxburst: same as src_maxburst but for destination target
+ * mutatis mutandis.
+ * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
+ * with 'true' if peripheral should be flow controller. Direction will be
+ * selected at Runtime.
+ * @slave_id: Slave requester id. Only valid for slave channels. The dma
+ * slave peripheral will have unique id as dma requester which need to be
+ * pass as slave config.
+ *
+ * This struct is passed in as configuration data to a DMA engine
+ * in order to set up a certain channel for DMA transport at runtime.
+ * The DMA device/engine has to provide support for an additional
+ * callback in the dma_device structure, device_config and this struct
+ * will then be passed in as an argument to the function.
+ *
+ * The rationale for adding configuration information to this struct is as
+ * follows: if it is likely that more than one DMA slave controllers in
+ * the world will support the configuration option, then make it generic.
+ * If not: if it is fixed so that it be sent in static from the platform
+ * data, then prefer to do that.
+ */
+struct dma_slave_config {
+ enum dma_transfer_direction direction;
+ dma_addr_t src_addr;
+ dma_addr_t dst_addr;
+ enum dma_slave_buswidth src_addr_width;
+ enum dma_slave_buswidth dst_addr_width;
+ u32 src_maxburst;
+ u32 dst_maxburst;
+ bool device_fc;
+ unsigned int slave_id;
+};
+
+/**
+ * enum dma_residue_granularity - Granularity of the reported transfer residue
+ * @DMA_RESIDUE_GRANULARITY_DESCRIPTOR: Residue reporting is not support. The
+ * DMA channel is only able to tell whether a descriptor has been completed or
+ * not, which means residue reporting is not supported by this channel. The
+ * residue field of the dma_tx_state field will always be 0.
+ * @DMA_RESIDUE_GRANULARITY_SEGMENT: Residue is updated after each successfully
+ * completed segment of the transfer (For cyclic transfers this is after each
+ * period). This is typically implemented by having the hardware generate an
+ * interrupt after each transferred segment and then the drivers updates the
+ * outstanding residue by the size of the segment. Another possibility is if
+ * the hardware supports scatter-gather and the segment descriptor has a field
+ * which gets set after the segment has been completed. The driver then counts
+ * the number of segments without the flag set to compute the residue.
+ * @DMA_RESIDUE_GRANULARITY_BURST: Residue is updated after each transferred
+ * burst. This is typically only supported if the hardware has a progress
+ * register of some sort (E.g. a register with the current read/write address
+ * or a register with the amount of bursts/beats/bytes that have been
+ * transferred or still need to be transferred).
+ */
+enum dma_residue_granularity {
+ DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
+ DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
+ DMA_RESIDUE_GRANULARITY_BURST = 2,
+};
+
+/* struct dma_slave_caps - expose capabilities of a slave channel only
+ *
+ * @src_addr_widths: bit mask of src addr widths the channel supports
+ * @dst_addr_widths: bit mask of dstn addr widths the channel supports
+ * @directions: bit mask of slave direction the channel supported
+ * since the enum dma_transfer_direction is not defined as bits for each
+ * type of direction, the dma controller should fill (1 << <TYPE>) and same
+ * should be checked by controller as well
+ * @cmd_pause: true, if pause and thereby resume is supported
+ * @cmd_terminate: true, if terminate cmd is supported
+ * @residue_granularity: granularity of the reported transfer residue
+ */
+struct dma_slave_caps {
+ u32 src_addr_widths;
+ u32 dst_addr_widths;
+ u32 directions;
+ bool cmd_pause;
+ bool cmd_terminate;
+ enum dma_residue_granularity residue_granularity;
+};
+
+static inline const char *dma_chan_name(struct dma_chan *chan)
+{
+ return dev_name(&chan->dev->device);
+}
+
+void dma_chan_cleanup(struct kref *kref);
+
+/**
+ * typedef dma_filter_fn - callback filter for dma_request_channel
+ * @chan: channel to be reviewed
+ * @filter_param: opaque parameter passed through dma_request_channel
+ *
+ * When this optional parameter is specified in a call to dma_request_channel a
+ * suitable channel is passed to this routine for further dispositioning before
+ * being returned. Where 'suitable' indicates a non-busy channel that
+ * satisfies the given capability mask. It returns 'true' to indicate that the
+ * channel is suitable.
+ */
+typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
+
+typedef void (*dma_async_tx_callback)(void *dma_async_param);
+
+struct dmaengine_unmap_data {
+ u8 map_cnt;
+ u8 to_cnt;
+ u8 from_cnt;
+ u8 bidi_cnt;
+ struct device *dev;
+ struct kref kref;
+ size_t len;
+ dma_addr_t addr[0];
+};
+
+/**
+ * struct dma_async_tx_descriptor - async transaction descriptor
+ * ---dma generic offload fields---
+ * @cookie: tracking cookie for this transaction, set to -EBUSY if
+ * this tx is sitting on a dependency list
+ * @flags: flags to augment operation preparation, control completion, and
+ * communicate status
+ * @phys: physical address of the descriptor
+ * @chan: target channel for this operation
+ * @tx_submit: accept the descriptor, assign ordered cookie and mark the
+ * descriptor pending. To be pushed on .issue_pending() call
+ * @callback: routine to call after this operation is complete
+ * @callback_param: general parameter to pass to the callback routine
+ * ---async_tx api specific fields---
+ * @next: at completion submit this descriptor
+ * @parent: pointer to the next level up in the dependency chain
+ * @lock: protect the parent and next pointers
+ */
+struct dma_async_tx_descriptor {
+ dma_cookie_t cookie;
+ enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */
+ dma_addr_t phys;
+ struct dma_chan *chan;
+ dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
+ dma_async_tx_callback callback;
+ void *callback_param;
+ struct dmaengine_unmap_data *unmap;
+#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ struct dma_async_tx_descriptor *next;
+ struct dma_async_tx_descriptor *parent;
+ spinlock_t lock;
+#endif
+};
+
+#ifdef CONFIG_DMA_ENGINE
+static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_unmap_data *unmap)
+{
+ kref_get(&unmap->kref);
+ tx->unmap = unmap;
+}
+
+struct dmaengine_unmap_data *
+dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
+void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
+#else
+static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_unmap_data *unmap)
+{
+}
+static inline struct dmaengine_unmap_data *
+dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
+{
+ return NULL;
+}
+static inline void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
+{
+}
+#endif
+
+static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
+{
+ if (tx->unmap) {
+ dmaengine_unmap_put(tx->unmap);
+ tx->unmap = NULL;
+ }
+}
+
+#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
+static inline void txd_lock(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
+{
+ BUG();
+}
+static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
+{
+}
+static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
+{
+ return NULL;
+}
+static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
+{
+ return NULL;
+}
+
+#else
+static inline void txd_lock(struct dma_async_tx_descriptor *txd)
+{
+ spin_lock_bh(&txd->lock);
+}
+static inline void txd_unlock(struct dma_async_tx_descriptor *txd)
+{
+ spin_unlock_bh(&txd->lock);
+}
+static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next)
+{
+ txd->next = next;
+ next->parent = txd;
+}
+static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd)
+{
+ txd->parent = NULL;
+}
+static inline void txd_clear_next(struct dma_async_tx_descriptor *txd)
+{
+ txd->next = NULL;
+}
+static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd)
+{
+ return txd->parent;
+}
+static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd)
+{
+ return txd->next;
+}
+#endif
+
+/**
+ * struct dma_tx_state - filled in to report the status of
+ * a transfer.
+ * @last: last completed DMA cookie
+ * @used: last issued DMA cookie (i.e. the one in progress)
+ * @residue: the remaining number of bytes left to transmit
+ * on the selected transfer for states DMA_IN_PROGRESS and
+ * DMA_PAUSED if this is implemented in the driver, else 0
+ */
+struct dma_tx_state {
+ dma_cookie_t last;
+ dma_cookie_t used;
+ u32 residue;
+};
+
+/**
+ * struct dma_device - info on the entity supplying DMA services
+ * @chancnt: how many DMA channels are supported
+ * @privatecnt: how many DMA channels are requested by dma_request_channel
+ * @channels: the list of struct dma_chan
+ * @global_node: list_head for global dma_device_list
+ * @cap_mask: one or more dma_capability flags
+ * @max_xor: maximum number of xor sources, 0 if no capability
+ * @max_pq: maximum number of PQ sources and PQ-continue capability
+ * @copy_align: alignment shift for memcpy operations
+ * @xor_align: alignment shift for xor operations
+ * @pq_align: alignment shift for pq operations
+ * @dev_id: unique device ID
+ * @dev: struct device reference for dma mapping api
+ * @src_addr_widths: bit mask of src addr widths the device supports
+ * @dst_addr_widths: bit mask of dst addr widths the device supports
+ * @directions: bit mask of slave direction the device supports since
+ * the enum dma_transfer_direction is not defined as bits for
+ * each type of direction, the dma controller should fill (1 <<
+ * <TYPE>) and same should be checked by controller as well
+ * @residue_granularity: granularity of the transfer residue reported
+ * by tx_status
+ * @device_alloc_chan_resources: allocate resources and return the
+ * number of allocated descriptors
+ * @device_free_chan_resources: release DMA channel's resources
+ * @device_prep_dma_memcpy: prepares a memcpy operation
+ * @device_prep_dma_xor: prepares a xor operation
+ * @device_prep_dma_xor_val: prepares a xor validation operation
+ * @device_prep_dma_pq: prepares a pq operation
+ * @device_prep_dma_pq_val: prepares a pqzero_sum operation
+ * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
+ * @device_prep_slave_sg: prepares a slave dma operation
+ * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
+ * The function takes a buffer of size buf_len. The callback function will
+ * be called after period_len bytes have been transferred.
+ * @device_prep_interleaved_dma: Transfer expression in a generic way.
+ * @device_config: Pushes a new configuration to a channel, return 0 or an error
+ * code
+ * @device_pause: Pauses any transfer happening on a channel. Returns
+ * 0 or an error code
+ * @device_resume: Resumes any transfer on a channel previously
+ * paused. Returns 0 or an error code
+ * @device_terminate_all: Aborts all transfers on a channel. Returns 0
+ * or an error code
+ * @device_tx_status: poll for transaction completion, the optional
+ * txstate parameter can be supplied with a pointer to get a
+ * struct with auxiliary transfer status information, otherwise the call
+ * will just return a simple status code
+ * @device_issue_pending: push pending transactions to hardware
+ */
+struct dma_device {
+
+ unsigned int chancnt;
+ unsigned int privatecnt;
+ struct list_head channels;
+ struct list_head global_node;
+ dma_cap_mask_t cap_mask;
+ unsigned short max_xor;
+ unsigned short max_pq;
+ u8 copy_align;
+ u8 xor_align;
+ u8 pq_align;
+ #define DMA_HAS_PQ_CONTINUE (1 << 15)
+
+ int dev_id;
+ struct device *dev;
+
+ u32 src_addr_widths;
+ u32 dst_addr_widths;
+ u32 directions;
+ enum dma_residue_granularity residue_granularity;
+
+ int (*device_alloc_chan_resources)(struct dma_chan *chan);
+ void (*device_free_chan_resources)(struct dma_chan *chan);
+
+ struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
+ struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
+ struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
+ unsigned int src_cnt, size_t len, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)(
+ struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
+ size_t len, enum sum_check_flags *result, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_pq)(
+ struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf,
+ size_t len, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)(
+ struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+ unsigned int src_cnt, const unsigned char *scf, size_t len,
+ enum sum_check_flags *pqres, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
+ struct dma_chan *chan, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
+ struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags);
+
+ struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context);
+ struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags);
+
+ int (*device_config)(struct dma_chan *chan,
+ struct dma_slave_config *config);
+ int (*device_pause)(struct dma_chan *chan);
+ int (*device_resume)(struct dma_chan *chan);
+ int (*device_terminate_all)(struct dma_chan *chan);
+
+ enum dma_status (*device_tx_status)(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate);
+ void (*device_issue_pending)(struct dma_chan *chan);
+};
+
+static inline int dmaengine_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ if (chan->device->device_config)
+ return chan->device->device_config(chan, config);
+
+ return -ENOSYS;
+}
+
+static inline bool is_slave_direction(enum dma_transfer_direction direction)
+{
+ return (direction == DMA_MEM_TO_DEV) || (direction == DMA_DEV_TO_MEM);
+}
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
+ struct dma_chan *chan, dma_addr_t buf, size_t len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ struct scatterlist sg;
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = buf;
+ sg_dma_len(&sg) = len;
+
+ return chan->device->device_prep_slave_sg(chan, &sg, 1,
+ dir, flags, NULL);
+}
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction dir, unsigned long flags)
+{
+ return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
+ dir, flags, NULL);
+}
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+struct rio_dma_ext;
+static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction dir, unsigned long flags,
+ struct rio_dma_ext *rio_ext)
+{
+ return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
+ dir, flags, rio_ext);
+}
+#endif
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
+ period_len, dir, flags);
+}
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ return chan->device->device_prep_interleaved_dma(chan, xt, flags);
+}
+
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
+ struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags)
+{
+ return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
+ src_sg, src_nents, flags);
+}
+
+static inline int dmaengine_terminate_all(struct dma_chan *chan)
+{
+ if (chan->device->device_terminate_all)
+ return chan->device->device_terminate_all(chan);
+
+ return -ENOSYS;
+}
+
+static inline int dmaengine_pause(struct dma_chan *chan)
+{
+ if (chan->device->device_pause)
+ return chan->device->device_pause(chan);
+
+ return -ENOSYS;
+}
+
+static inline int dmaengine_resume(struct dma_chan *chan)
+{
+ if (chan->device->device_resume)
+ return chan->device->device_resume(chan);
+
+ return -ENOSYS;
+}
+
+static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ return chan->device->device_tx_status(chan, cookie, state);
+}
+
+static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
+{
+ return desc->tx_submit(desc);
+}
+
+static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
+{
+ size_t mask;
+
+ if (!align)
+ return true;
+ mask = (1 << align) - 1;
+ if (mask & (off1 | off2 | len))
+ return false;
+ return true;
+}
+
+static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
+ size_t off2, size_t len)
+{
+ return dmaengine_check_align(dev->copy_align, off1, off2, len);
+}
+
+static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
+ size_t off2, size_t len)
+{
+ return dmaengine_check_align(dev->xor_align, off1, off2, len);
+}
+
+static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
+ size_t off2, size_t len)
+{
+ return dmaengine_check_align(dev->pq_align, off1, off2, len);
+}
+
+static inline void
+dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
+{
+ dma->max_pq = maxpq;
+ if (has_pq_continue)
+ dma->max_pq |= DMA_HAS_PQ_CONTINUE;
+}
+
+static inline bool dmaf_continue(enum dma_ctrl_flags flags)
+{
+ return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE;
+}
+
+static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags)
+{
+ enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P;
+
+ return (flags & mask) == mask;
+}
+
+static inline bool dma_dev_has_pq_continue(struct dma_device *dma)
+{
+ return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE;
+}
+
+static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma)
+{
+ return dma->max_pq & ~DMA_HAS_PQ_CONTINUE;
+}
+
+/* dma_maxpq - reduce maxpq in the face of continued operations
+ * @dma - dma device with PQ capability
+ * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set
+ *
+ * When an engine does not support native continuation we need 3 extra
+ * source slots to reuse P and Q with the following coefficients:
+ * 1/ {00} * P : remove P from Q', but use it as a source for P'
+ * 2/ {01} * Q : use Q to continue Q' calculation
+ * 3/ {00} * Q : subtract Q from P' to cancel (2)
+ *
+ * In the case where P is disabled we only need 1 extra source:
+ * 1/ {01} * Q : use Q to continue Q' calculation
+ */
+static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags)
+{
+ if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags))
+ return dma_dev_to_maxpq(dma);
+ else if (dmaf_p_disabled_continue(flags))
+ return dma_dev_to_maxpq(dma) - 1;
+ else if (dmaf_continue(flags))
+ return dma_dev_to_maxpq(dma) - 3;
+ BUG();
+}
+
+/* --- public DMA engine API --- */
+
+#ifdef CONFIG_DMA_ENGINE
+void dmaengine_get(void);
+void dmaengine_put(void);
+#else
+static inline void dmaengine_get(void)
+{
+}
+static inline void dmaengine_put(void)
+{
+}
+#endif
+
+#ifdef CONFIG_ASYNC_TX_DMA
+#define async_dmaengine_get() dmaengine_get()
+#define async_dmaengine_put() dmaengine_put()
+#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
+#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
+#else
+#define async_dma_find_channel(type) dma_find_channel(type)
+#endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */
+#else
+static inline void async_dmaengine_get(void)
+{
+}
+static inline void async_dmaengine_put(void)
+{
+}
+static inline struct dma_chan *
+async_dma_find_channel(enum dma_transaction_type type)
+{
+ return NULL;
+}
+#endif /* CONFIG_ASYNC_TX_DMA */
+void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
+ struct dma_chan *chan);
+
+static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
+{
+ tx->flags |= DMA_CTRL_ACK;
+}
+
+static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx)
+{
+ tx->flags &= ~DMA_CTRL_ACK;
+}
+
+static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx)
+{
+ return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK;
+}
+
+#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
+static inline void
+__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
+{
+ set_bit(tx_type, dstp->bits);
+}
+
+#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
+static inline void
+__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
+{
+ clear_bit(tx_type, dstp->bits);
+}
+
+#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
+static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
+{
+ bitmap_zero(dstp->bits, DMA_TX_TYPE_END);
+}
+
+#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
+static inline int
+__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
+{
+ return test_bit(tx_type, srcp->bits);
+}
+
+#define for_each_dma_cap_mask(cap, mask) \
+ for_each_set_bit(cap, mask.bits, DMA_TX_TYPE_END)
+
+/**
+ * dma_async_issue_pending - flush pending transactions to HW
+ * @chan: target DMA channel
+ *
+ * This allows drivers to push copies to HW in batches,
+ * reducing MMIO writes where possible.
+ */
+static inline void dma_async_issue_pending(struct dma_chan *chan)
+{
+ chan->device->device_issue_pending(chan);
+}
+
+/**
+ * dma_async_is_tx_complete - poll for transaction completion
+ * @chan: DMA channel
+ * @cookie: transaction identifier to check status of
+ * @last: returns last completed cookie, can be NULL
+ * @used: returns last issued cookie, can be NULL
+ *
+ * If @last and @used are passed in, upon return they reflect the driver
+ * internal state and can be used with dma_async_is_complete() to check
+ * the status of multiple cookies without re-checking hardware state.
+ */
+static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
+ dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
+{
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = chan->device->device_tx_status(chan, cookie, &state);
+ if (last)
+ *last = state.last;
+ if (used)
+ *used = state.used;
+ return status;
+}
+
+/**
+ * dma_async_is_complete - test a cookie against chan state
+ * @cookie: transaction identifier to test status of
+ * @last_complete: last know completed transaction
+ * @last_used: last cookie value handed out
+ *
+ * dma_async_is_complete() is used in dma_async_is_tx_complete()
+ * the test logic is separated for lightweight testing of multiple cookies
+ */
+static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
+ dma_cookie_t last_complete, dma_cookie_t last_used)
+{
+ if (last_complete <= last_used) {
+ if ((cookie <= last_complete) || (cookie > last_used))
+ return DMA_COMPLETE;
+ } else {
+ if ((cookie <= last_complete) && (cookie > last_used))
+ return DMA_COMPLETE;
+ }
+ return DMA_IN_PROGRESS;
+}
+
+static inline void
+dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue)
+{
+ if (st) {
+ st->last = last;
+ st->used = used;
+ st->residue = residue;
+ }
+}
+
+#ifdef CONFIG_DMA_ENGINE
+struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
+enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
+enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
+void dma_issue_pending_all(void);
+struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param);
+struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
+ const char *name);
+struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
+void dma_release_channel(struct dma_chan *chan);
+int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps);
+#else
+static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
+{
+ return NULL;
+}
+static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
+{
+ return DMA_COMPLETE;
+}
+static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
+{
+ return DMA_COMPLETE;
+}
+static inline void dma_issue_pending_all(void)
+{
+}
+static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param)
+{
+ return NULL;
+}
+static inline struct dma_chan *dma_request_slave_channel_reason(
+ struct device *dev, const char *name)
+{
+ return ERR_PTR(-ENODEV);
+}
+static inline struct dma_chan *dma_request_slave_channel(struct device *dev,
+ const char *name)
+{
+ return NULL;
+}
+static inline void dma_release_channel(struct dma_chan *chan)
+{
+}
+static inline int dma_get_slave_caps(struct dma_chan *chan,
+ struct dma_slave_caps *caps)
+{
+ return -ENXIO;
+}
+#endif
+
+/* --- DMA device --- */
+
+int dma_async_device_register(struct dma_device *device);
+void dma_async_device_unregister(struct dma_device *device);
+void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
+struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
+#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
+#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
+ __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
+
+static inline struct dma_chan
+*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
+ dma_filter_fn fn, void *fn_param,
+ struct device *dev, char *name)
+{
+ struct dma_chan *chan;
+
+ chan = dma_request_slave_channel(dev, name);
+ if (chan)
+ return chan;
+
+ return __dma_request_channel(mask, fn, fn_param);
+}
+#endif /* DMAENGINE_H */
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
new file mode 100644
index 000000000..52456aa56
--- /dev/null
+++ b/include/linux/dmapool.h
@@ -0,0 +1,37 @@
+/*
+ * include/linux/dmapool.h
+ *
+ * Allocation pools for DMAable (coherent) memory.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef LINUX_DMAPOOL_H
+#define LINUX_DMAPOOL_H
+
+#include <asm/io.h>
+#include <asm/scatterlist.h>
+
+struct device;
+
+struct dma_pool *dma_pool_create(const char *name, struct device *dev,
+ size_t size, size_t align, size_t allocation);
+
+void dma_pool_destroy(struct dma_pool *pool);
+
+void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
+ dma_addr_t *handle);
+
+void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
+
+/*
+ * Managed DMA pool
+ */
+struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
+ size_t size, size_t align, size_t allocation);
+void dmam_pool_destroy(struct dma_pool *pool);
+
+#endif
+
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
new file mode 100644
index 000000000..30624954d
--- /dev/null
+++ b/include/linux/dmar.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2006, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Copyright (C) Ashok Raj <ashok.raj@intel.com>
+ * Copyright (C) Shaohua Li <shaohua.li@intel.com>
+ */
+
+#ifndef __DMAR_H__
+#define __DMAR_H__
+
+#include <linux/acpi.h>
+#include <linux/types.h>
+#include <linux/msi.h>
+#include <linux/irqreturn.h>
+#include <linux/rwsem.h>
+#include <linux/rcupdate.h>
+
+struct acpi_dmar_header;
+
+#ifdef CONFIG_X86
+# define DMAR_UNITS_SUPPORTED MAX_IO_APICS
+#else
+# define DMAR_UNITS_SUPPORTED 64
+#endif
+
+/* DMAR Flags */
+#define DMAR_INTR_REMAP 0x1
+#define DMAR_X2APIC_OPT_OUT 0x2
+
+struct intel_iommu;
+
+struct dmar_dev_scope {
+ struct device __rcu *dev;
+ u8 bus;
+ u8 devfn;
+};
+
+#ifdef CONFIG_DMAR_TABLE
+extern struct acpi_table_header *dmar_tbl;
+struct dmar_drhd_unit {
+ struct list_head list; /* list of drhd units */
+ struct acpi_dmar_header *hdr; /* ACPI header */
+ u64 reg_base_addr; /* register base address*/
+ struct dmar_dev_scope *devices;/* target device array */
+ int devices_cnt; /* target device count */
+ u16 segment; /* PCI domain */
+ u8 ignored:1; /* ignore drhd */
+ u8 include_all:1;
+ struct intel_iommu *iommu;
+};
+
+struct dmar_pci_path {
+ u8 bus;
+ u8 device;
+ u8 function;
+};
+
+struct dmar_pci_notify_info {
+ struct pci_dev *dev;
+ unsigned long event;
+ int bus;
+ u16 seg;
+ u16 level;
+ struct dmar_pci_path path[];
+} __attribute__((packed));
+
+extern struct rw_semaphore dmar_global_lock;
+extern struct list_head dmar_drhd_units;
+
+#define for_each_drhd_unit(drhd) \
+ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)
+
+#define for_each_active_drhd_unit(drhd) \
+ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
+ if (drhd->ignored) {} else
+
+#define for_each_active_iommu(i, drhd) \
+ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
+ if (i=drhd->iommu, drhd->ignored) {} else
+
+#define for_each_iommu(i, drhd) \
+ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
+ if (i=drhd->iommu, 0) {} else
+
+static inline bool dmar_rcu_check(void)
+{
+ return rwsem_is_locked(&dmar_global_lock) ||
+ system_state == SYSTEM_BOOTING;
+}
+
+#define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
+
+#define for_each_dev_scope(a, c, p, d) \
+ for ((p) = 0; ((d) = (p) < (c) ? dmar_rcu_dereference((a)[(p)].dev) : \
+ NULL, (p) < (c)); (p)++)
+
+#define for_each_active_dev_scope(a, c, p, d) \
+ for_each_dev_scope((a), (c), (p), (d)) if (!(d)) { continue; } else
+
+extern int dmar_table_init(void);
+extern int dmar_dev_scope_init(void);
+extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
+ struct dmar_dev_scope **devices, u16 segment);
+extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
+extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt);
+extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
+ void *start, void*end, u16 segment,
+ struct dmar_dev_scope *devices,
+ int devices_cnt);
+extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
+ u16 segment, struct dmar_dev_scope *devices,
+ int count);
+/* Intel IOMMU detection */
+extern int detect_intel_iommu(void);
+extern int enable_drhd_fault_handling(void);
+extern int dmar_device_add(acpi_handle handle);
+extern int dmar_device_remove(acpi_handle handle);
+
+static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
+{
+ return 0;
+}
+
+#ifdef CONFIG_INTEL_IOMMU
+extern int iommu_detected, no_iommu;
+extern int intel_iommu_init(void);
+extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
+extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
+extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
+extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);
+extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
+extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
+#else /* !CONFIG_INTEL_IOMMU: */
+static inline int intel_iommu_init(void) { return -ENODEV; }
+
+#define dmar_parse_one_rmrr dmar_res_noop
+#define dmar_parse_one_atsr dmar_res_noop
+#define dmar_check_one_atsr dmar_res_noop
+#define dmar_release_one_atsr dmar_res_noop
+
+static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
+{
+ return 0;
+}
+
+static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
+{
+ return 0;
+}
+#endif /* CONFIG_INTEL_IOMMU */
+
+#ifdef CONFIG_IRQ_REMAP
+extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
+#else /* CONFIG_IRQ_REMAP */
+static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
+{ return 0; }
+#endif /* CONFIG_IRQ_REMAP */
+
+#else /* CONFIG_DMAR_TABLE */
+
+static inline int dmar_device_add(void *handle)
+{
+ return 0;
+}
+
+static inline int dmar_device_remove(void *handle)
+{
+ return 0;
+}
+
+#endif /* CONFIG_DMAR_TABLE */
+
+struct irte {
+ union {
+ struct {
+ __u64 present : 1,
+ fpd : 1,
+ dst_mode : 1,
+ redir_hint : 1,
+ trigger_mode : 1,
+ dlvry_mode : 3,
+ avail : 4,
+ __reserved_1 : 4,
+ vector : 8,
+ __reserved_2 : 8,
+ dest_id : 32;
+ };
+ __u64 low;
+ };
+
+ union {
+ struct {
+ __u64 sid : 16,
+ sq : 2,
+ svt : 2,
+ __reserved_3 : 44;
+ };
+ __u64 high;
+ };
+};
+
+enum {
+ IRQ_REMAP_XAPIC_MODE,
+ IRQ_REMAP_X2APIC_MODE,
+};
+
+/* Can't use the common MSI interrupt functions
+ * since DMAR is not a pci device
+ */
+struct irq_data;
+extern void dmar_msi_unmask(struct irq_data *data);
+extern void dmar_msi_mask(struct irq_data *data);
+extern void dmar_msi_read(int irq, struct msi_msg *msg);
+extern void dmar_msi_write(int irq, struct msi_msg *msg);
+extern int dmar_set_interrupt(struct intel_iommu *iommu);
+extern irqreturn_t dmar_fault(int irq, void *dev_id);
+extern int arch_setup_dmar_msi(unsigned int irq);
+
+#endif /* __DMAR_H__ */
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
new file mode 100644
index 000000000..f820f0a33
--- /dev/null
+++ b/include/linux/dmi.h
@@ -0,0 +1,146 @@
+#ifndef __DMI_H__
+#define __DMI_H__
+
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+
+/* enum dmi_field is in mod_devicetable.h */
+
+enum dmi_device_type {
+ DMI_DEV_TYPE_ANY = 0,
+ DMI_DEV_TYPE_OTHER,
+ DMI_DEV_TYPE_UNKNOWN,
+ DMI_DEV_TYPE_VIDEO,
+ DMI_DEV_TYPE_SCSI,
+ DMI_DEV_TYPE_ETHERNET,
+ DMI_DEV_TYPE_TOKENRING,
+ DMI_DEV_TYPE_SOUND,
+ DMI_DEV_TYPE_PATA,
+ DMI_DEV_TYPE_SATA,
+ DMI_DEV_TYPE_SAS,
+ DMI_DEV_TYPE_IPMI = -1,
+ DMI_DEV_TYPE_OEM_STRING = -2,
+ DMI_DEV_TYPE_DEV_ONBOARD = -3,
+};
+
+enum dmi_entry_type {
+ DMI_ENTRY_BIOS = 0,
+ DMI_ENTRY_SYSTEM,
+ DMI_ENTRY_BASEBOARD,
+ DMI_ENTRY_CHASSIS,
+ DMI_ENTRY_PROCESSOR,
+ DMI_ENTRY_MEM_CONTROLLER,
+ DMI_ENTRY_MEM_MODULE,
+ DMI_ENTRY_CACHE,
+ DMI_ENTRY_PORT_CONNECTOR,
+ DMI_ENTRY_SYSTEM_SLOT,
+ DMI_ENTRY_ONBOARD_DEVICE,
+ DMI_ENTRY_OEMSTRINGS,
+ DMI_ENTRY_SYSCONF,
+ DMI_ENTRY_BIOS_LANG,
+ DMI_ENTRY_GROUP_ASSOC,
+ DMI_ENTRY_SYSTEM_EVENT_LOG,
+ DMI_ENTRY_PHYS_MEM_ARRAY,
+ DMI_ENTRY_MEM_DEVICE,
+ DMI_ENTRY_32_MEM_ERROR,
+ DMI_ENTRY_MEM_ARRAY_MAPPED_ADDR,
+ DMI_ENTRY_MEM_DEV_MAPPED_ADDR,
+ DMI_ENTRY_BUILTIN_POINTING_DEV,
+ DMI_ENTRY_PORTABLE_BATTERY,
+ DMI_ENTRY_SYSTEM_RESET,
+ DMI_ENTRY_HW_SECURITY,
+ DMI_ENTRY_SYSTEM_POWER_CONTROLS,
+ DMI_ENTRY_VOLTAGE_PROBE,
+ DMI_ENTRY_COOLING_DEV,
+ DMI_ENTRY_TEMP_PROBE,
+ DMI_ENTRY_ELECTRICAL_CURRENT_PROBE,
+ DMI_ENTRY_OOB_REMOTE_ACCESS,
+ DMI_ENTRY_BIS_ENTRY,
+ DMI_ENTRY_SYSTEM_BOOT,
+ DMI_ENTRY_MGMT_DEV,
+ DMI_ENTRY_MGMT_DEV_COMPONENT,
+ DMI_ENTRY_MGMT_DEV_THRES,
+ DMI_ENTRY_MEM_CHANNEL,
+ DMI_ENTRY_IPMI_DEV,
+ DMI_ENTRY_SYS_POWER_SUPPLY,
+ DMI_ENTRY_ADDITIONAL,
+ DMI_ENTRY_ONBOARD_DEV_EXT,
+ DMI_ENTRY_MGMT_CONTROLLER_HOST,
+ DMI_ENTRY_INACTIVE = 126,
+ DMI_ENTRY_END_OF_TABLE = 127,
+};
+
+struct dmi_header {
+ u8 type;
+ u8 length;
+ u16 handle;
+};
+
+struct dmi_device {
+ struct list_head list;
+ int type;
+ const char *name;
+ void *device_data; /* Type specific data */
+};
+
+#ifdef CONFIG_DMI
+
+struct dmi_dev_onboard {
+ struct dmi_device dev;
+ int instance;
+ int segment;
+ int bus;
+ int devfn;
+};
+
+extern int dmi_check_system(const struct dmi_system_id *list);
+const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list);
+extern const char * dmi_get_system_info(int field);
+extern const struct dmi_device * dmi_find_device(int type, const char *name,
+ const struct dmi_device *from);
+extern void dmi_scan_machine(void);
+extern void dmi_memdev_walk(void);
+extern void dmi_set_dump_stack_arch_desc(void);
+extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp);
+extern int dmi_name_in_vendors(const char *str);
+extern int dmi_name_in_serial(const char *str);
+extern int dmi_available;
+extern int dmi_walk(void (*decode)(const struct dmi_header *, void *),
+ void *private_data);
+extern bool dmi_match(enum dmi_field f, const char *str);
+extern void dmi_memdev_name(u16 handle, const char **bank, const char **device);
+
+#else
+
+static inline int dmi_check_system(const struct dmi_system_id *list) { return 0; }
+static inline const char * dmi_get_system_info(int field) { return NULL; }
+static inline const struct dmi_device * dmi_find_device(int type, const char *name,
+ const struct dmi_device *from) { return NULL; }
+static inline void dmi_scan_machine(void) { return; }
+static inline void dmi_memdev_walk(void) { }
+static inline void dmi_set_dump_stack_arch_desc(void) { }
+static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
+{
+ if (yearp)
+ *yearp = 0;
+ if (monthp)
+ *monthp = 0;
+ if (dayp)
+ *dayp = 0;
+ return false;
+}
+static inline int dmi_name_in_vendors(const char *s) { return 0; }
+static inline int dmi_name_in_serial(const char *s) { return 0; }
+#define dmi_available 0
+static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *),
+ void *private_data) { return -1; }
+static inline bool dmi_match(enum dmi_field f, const char *str)
+ { return false; }
+static inline void dmi_memdev_name(u16 handle, const char **bank,
+ const char **device) { }
+static inline const struct dmi_system_id *
+ dmi_first_match(const struct dmi_system_id *list) { return NULL; }
+
+#endif
+
+#endif /* __DMI_H__ */
diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h
new file mode 100644
index 000000000..3290555a5
--- /dev/null
+++ b/include/linux/dnotify.h
@@ -0,0 +1,50 @@
+#ifndef _LINUX_DNOTIFY_H
+#define _LINUX_DNOTIFY_H
+/*
+ * Directory notification for Linux
+ *
+ * Copyright (C) 2000,2002 Stephen Rothwell
+ */
+
+#include <linux/fs.h>
+
+struct dnotify_struct {
+ struct dnotify_struct * dn_next;
+ __u32 dn_mask;
+ int dn_fd;
+ struct file * dn_filp;
+ fl_owner_t dn_owner;
+};
+
+#ifdef __KERNEL__
+
+
+#ifdef CONFIG_DNOTIFY
+
+#define DNOTIFY_ALL_EVENTS (FS_DELETE | FS_DELETE_CHILD |\
+ FS_MODIFY | FS_MODIFY_CHILD |\
+ FS_ACCESS | FS_ACCESS_CHILD |\
+ FS_ATTRIB | FS_ATTRIB_CHILD |\
+ FS_CREATE | FS_DN_RENAME |\
+ FS_MOVED_FROM | FS_MOVED_TO)
+
+extern int dir_notify_enable;
+extern void dnotify_flush(struct file *, fl_owner_t);
+extern int fcntl_dirnotify(int, struct file *, unsigned long);
+
+#else
+
+static inline void dnotify_flush(struct file *filp, fl_owner_t id)
+{
+}
+
+static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_DNOTIFY */
+
+#endif /* __KERNEL __ */
+
+#endif /* _LINUX_DNOTIFY_H */
diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h
new file mode 100644
index 000000000..cc92268af
--- /dev/null
+++ b/include/linux/dns_resolver.h
@@ -0,0 +1,34 @@
+/*
+ * DNS Resolver upcall management for CIFS DFS and AFS
+ * Handles host name to IP address resolution and DNS query for AFSDB RR.
+ *
+ * Copyright (c) International Business Machines Corp., 2008
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ * Wang Lei (wang840925@gmail.com)
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_DNS_RESOLVER_H
+#define _LINUX_DNS_RESOLVER_H
+
+#ifdef __KERNEL__
+
+extern int dns_query(const char *type, const char *name, size_t namelen,
+ const char *options, char **_result, time_t *_expiry);
+
+#endif /* KERNEL */
+
+#endif /* _LINUX_DNS_RESOLVER_H */
diff --git a/include/linux/dqblk_qtree.h b/include/linux/dqblk_qtree.h
new file mode 100644
index 000000000..82a16527b
--- /dev/null
+++ b/include/linux/dqblk_qtree.h
@@ -0,0 +1,56 @@
+/*
+ * Definitions of structures and functions for quota formats using trie
+ */
+
+#ifndef _LINUX_DQBLK_QTREE_H
+#define _LINUX_DQBLK_QTREE_H
+
+#include <linux/types.h>
+
+/* Numbers of blocks needed for updates - we count with the smallest
+ * possible block size (1024) */
+#define QTREE_INIT_ALLOC 4
+#define QTREE_INIT_REWRITE 2
+#define QTREE_DEL_ALLOC 0
+#define QTREE_DEL_REWRITE 6
+
+struct dquot;
+
+/* Operations */
+struct qtree_fmt_operations {
+ void (*mem2disk_dqblk)(void *disk, struct dquot *dquot); /* Convert given entry from in memory format to disk one */
+ void (*disk2mem_dqblk)(struct dquot *dquot, void *disk); /* Convert given entry from disk format to in memory one */
+ int (*is_id)(void *disk, struct dquot *dquot); /* Is this structure for given id? */
+};
+
+/* Inmemory copy of version specific information */
+struct qtree_mem_dqinfo {
+ struct super_block *dqi_sb; /* Sb quota is on */
+ int dqi_type; /* Quota type */
+ unsigned int dqi_blocks; /* # of blocks in quota file */
+ unsigned int dqi_free_blk; /* First block in list of free blocks */
+ unsigned int dqi_free_entry; /* First block with free entry */
+ unsigned int dqi_blocksize_bits; /* Block size of quota file */
+ unsigned int dqi_entry_size; /* Size of quota entry in quota file */
+ unsigned int dqi_usable_bs; /* Space usable in block for quota data */
+ unsigned int dqi_qtree_depth; /* Precomputed depth of quota tree */
+ struct qtree_fmt_operations *dqi_ops; /* Operations for entry manipulation */
+};
+
+int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
+int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
+int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
+int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot);
+int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk);
+static inline int qtree_depth(struct qtree_mem_dqinfo *info)
+{
+ unsigned int epb = info->dqi_usable_bs >> 2;
+ unsigned long long entries = epb;
+ int i;
+
+ for (i = 1; entries < (1ULL << 32); i++)
+ entries *= epb;
+ return i;
+}
+
+#endif /* _LINUX_DQBLK_QTREE_H */
diff --git a/include/linux/dqblk_v1.h b/include/linux/dqblk_v1.h
new file mode 100644
index 000000000..c0d4d1e2a
--- /dev/null
+++ b/include/linux/dqblk_v1.h
@@ -0,0 +1,14 @@
+/*
+ * File with in-memory structures of old quota format
+ */
+
+#ifndef _LINUX_DQBLK_V1_H
+#define _LINUX_DQBLK_V1_H
+
+/* Numbers of blocks needed for updates */
+#define V1_INIT_ALLOC 1
+#define V1_INIT_REWRITE 1
+#define V1_DEL_ALLOC 0
+#define V1_DEL_REWRITE 2
+
+#endif /* _LINUX_DQBLK_V1_H */
diff --git a/include/linux/dqblk_v2.h b/include/linux/dqblk_v2.h
new file mode 100644
index 000000000..18000a542
--- /dev/null
+++ b/include/linux/dqblk_v2.h
@@ -0,0 +1,16 @@
+/*
+ * Definitions for vfsv0 quota format
+ */
+
+#ifndef _LINUX_DQBLK_V2_H
+#define _LINUX_DQBLK_V2_H
+
+#include <linux/dqblk_qtree.h>
+
+/* Numbers of blocks needed for updates */
+#define V2_INIT_ALLOC QTREE_INIT_ALLOC
+#define V2_INIT_REWRITE QTREE_INIT_REWRITE
+#define V2_DEL_ALLOC QTREE_DEL_ALLOC
+#define V2_DEL_REWRITE QTREE_DEL_REWRITE
+
+#endif /* _LINUX_DQBLK_V2_H */
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
new file mode 100644
index 000000000..8723f2a99
--- /dev/null
+++ b/include/linux/drbd.h
@@ -0,0 +1,381 @@
+/*
+ drbd.h
+ Kernel module for 2.6.x Kernels
+
+ This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
+
+ Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
+ Copyright (C) 2001-2008, Philipp Reisner <philipp.reisner@linbit.com>.
+ Copyright (C) 2001-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
+
+ drbd is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ drbd is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with drbd; see the file COPYING. If not, write to
+ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+#ifndef DRBD_H
+#define DRBD_H
+#include <linux/connector.h>
+#include <asm/types.h>
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#else
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <limits.h>
+
+/* Although the Linux source code makes a difference between
+ generic endianness and the bitfields' endianness, there is no
+ architecture as of Linux-2.6.24-rc4 where the bitfields' endianness
+ does not match the generic endianness. */
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN_BITFIELD
+#elif __BYTE_ORDER == __BIG_ENDIAN
+#define __BIG_ENDIAN_BITFIELD
+#else
+# error "sorry, weird endianness on this box"
+#endif
+
+#endif
+
+extern const char *drbd_buildtag(void);
+#define REL_VERSION "8.4.5"
+#define API_VERSION 1
+#define PRO_VERSION_MIN 86
+#define PRO_VERSION_MAX 101
+
+
+enum drbd_io_error_p {
+ EP_PASS_ON, /* FIXME should the better be named "Ignore"? */
+ EP_CALL_HELPER,
+ EP_DETACH
+};
+
+enum drbd_fencing_p {
+ FP_NOT_AVAIL = -1, /* Not a policy */
+ FP_DONT_CARE = 0,
+ FP_RESOURCE,
+ FP_STONITH
+};
+
+enum drbd_disconnect_p {
+ DP_RECONNECT,
+ DP_DROP_NET_CONF,
+ DP_FREEZE_IO
+};
+
+enum drbd_after_sb_p {
+ ASB_DISCONNECT,
+ ASB_DISCARD_YOUNGER_PRI,
+ ASB_DISCARD_OLDER_PRI,
+ ASB_DISCARD_ZERO_CHG,
+ ASB_DISCARD_LEAST_CHG,
+ ASB_DISCARD_LOCAL,
+ ASB_DISCARD_REMOTE,
+ ASB_CONSENSUS,
+ ASB_DISCARD_SECONDARY,
+ ASB_CALL_HELPER,
+ ASB_VIOLENTLY
+};
+
+enum drbd_on_no_data {
+ OND_IO_ERROR,
+ OND_SUSPEND_IO
+};
+
+enum drbd_on_congestion {
+ OC_BLOCK,
+ OC_PULL_AHEAD,
+ OC_DISCONNECT,
+};
+
+enum drbd_read_balancing {
+ RB_PREFER_LOCAL,
+ RB_PREFER_REMOTE,
+ RB_ROUND_ROBIN,
+ RB_LEAST_PENDING,
+ RB_CONGESTED_REMOTE,
+ RB_32K_STRIPING,
+ RB_64K_STRIPING,
+ RB_128K_STRIPING,
+ RB_256K_STRIPING,
+ RB_512K_STRIPING,
+ RB_1M_STRIPING,
+};
+
+/* KEEP the order, do not delete or insert. Only append. */
+enum drbd_ret_code {
+ ERR_CODE_BASE = 100,
+ NO_ERROR = 101,
+ ERR_LOCAL_ADDR = 102,
+ ERR_PEER_ADDR = 103,
+ ERR_OPEN_DISK = 104,
+ ERR_OPEN_MD_DISK = 105,
+ ERR_DISK_NOT_BDEV = 107,
+ ERR_MD_NOT_BDEV = 108,
+ ERR_DISK_TOO_SMALL = 111,
+ ERR_MD_DISK_TOO_SMALL = 112,
+ ERR_BDCLAIM_DISK = 114,
+ ERR_BDCLAIM_MD_DISK = 115,
+ ERR_MD_IDX_INVALID = 116,
+ ERR_IO_MD_DISK = 118,
+ ERR_MD_INVALID = 119,
+ ERR_AUTH_ALG = 120,
+ ERR_AUTH_ALG_ND = 121,
+ ERR_NOMEM = 122,
+ ERR_DISCARD_IMPOSSIBLE = 123,
+ ERR_DISK_CONFIGURED = 124,
+ ERR_NET_CONFIGURED = 125,
+ ERR_MANDATORY_TAG = 126,
+ ERR_MINOR_INVALID = 127,
+ ERR_INTR = 129, /* EINTR */
+ ERR_RESIZE_RESYNC = 130,
+ ERR_NO_PRIMARY = 131,
+ ERR_RESYNC_AFTER = 132,
+ ERR_RESYNC_AFTER_CYCLE = 133,
+ ERR_PAUSE_IS_SET = 134,
+ ERR_PAUSE_IS_CLEAR = 135,
+ ERR_PACKET_NR = 137,
+ ERR_NO_DISK = 138,
+ ERR_NOT_PROTO_C = 139,
+ ERR_NOMEM_BITMAP = 140,
+ ERR_INTEGRITY_ALG = 141, /* DRBD 8.2 only */
+ ERR_INTEGRITY_ALG_ND = 142, /* DRBD 8.2 only */
+ ERR_CPU_MASK_PARSE = 143, /* DRBD 8.2 only */
+ ERR_CSUMS_ALG = 144, /* DRBD 8.2 only */
+ ERR_CSUMS_ALG_ND = 145, /* DRBD 8.2 only */
+ ERR_VERIFY_ALG = 146, /* DRBD 8.2 only */
+ ERR_VERIFY_ALG_ND = 147, /* DRBD 8.2 only */
+ ERR_CSUMS_RESYNC_RUNNING= 148, /* DRBD 8.2 only */
+ ERR_VERIFY_RUNNING = 149, /* DRBD 8.2 only */
+ ERR_DATA_NOT_CURRENT = 150,
+ ERR_CONNECTED = 151, /* DRBD 8.3 only */
+ ERR_PERM = 152,
+ ERR_NEED_APV_93 = 153,
+ ERR_STONITH_AND_PROT_A = 154,
+ ERR_CONG_NOT_PROTO_A = 155,
+ ERR_PIC_AFTER_DEP = 156,
+ ERR_PIC_PEER_DEP = 157,
+ ERR_RES_NOT_KNOWN = 158,
+ ERR_RES_IN_USE = 159,
+ ERR_MINOR_CONFIGURED = 160,
+ ERR_MINOR_OR_VOLUME_EXISTS = 161,
+ ERR_INVALID_REQUEST = 162,
+ ERR_NEED_APV_100 = 163,
+ ERR_NEED_ALLOW_TWO_PRI = 164,
+ ERR_MD_UNCLEAN = 165,
+ ERR_MD_LAYOUT_CONNECTED = 166,
+ ERR_MD_LAYOUT_TOO_BIG = 167,
+ ERR_MD_LAYOUT_TOO_SMALL = 168,
+ ERR_MD_LAYOUT_NO_FIT = 169,
+ ERR_IMPLICIT_SHRINK = 170,
+ /* insert new ones above this line */
+ AFTER_LAST_ERR_CODE
+};
+
+#define DRBD_PROT_A 1
+#define DRBD_PROT_B 2
+#define DRBD_PROT_C 3
+
+enum drbd_role {
+ R_UNKNOWN = 0,
+ R_PRIMARY = 1, /* role */
+ R_SECONDARY = 2, /* role */
+ R_MASK = 3,
+};
+
+/* The order of these constants is important.
+ * The lower ones (<C_WF_REPORT_PARAMS) indicate
+ * that there is no socket!
+ * >=C_WF_REPORT_PARAMS ==> There is a socket
+ */
+enum drbd_conns {
+ C_STANDALONE,
+ C_DISCONNECTING, /* Temporal state on the way to StandAlone. */
+ C_UNCONNECTED, /* >= C_UNCONNECTED -> inc_net() succeeds */
+
+ /* These temporal states are all used on the way
+ * from >= C_CONNECTED to Unconnected.
+ * The 'disconnect reason' states
+ * I do not allow to change between them. */
+ C_TIMEOUT,
+ C_BROKEN_PIPE,
+ C_NETWORK_FAILURE,
+ C_PROTOCOL_ERROR,
+ C_TEAR_DOWN,
+
+ C_WF_CONNECTION,
+ C_WF_REPORT_PARAMS, /* we have a socket */
+ C_CONNECTED, /* we have introduced each other */
+ C_STARTING_SYNC_S, /* starting full sync by admin request. */
+ C_STARTING_SYNC_T, /* starting full sync by admin request. */
+ C_WF_BITMAP_S,
+ C_WF_BITMAP_T,
+ C_WF_SYNC_UUID,
+
+ /* All SyncStates are tested with this comparison
+ * xx >= C_SYNC_SOURCE && xx <= C_PAUSED_SYNC_T */
+ C_SYNC_SOURCE,
+ C_SYNC_TARGET,
+ C_VERIFY_S,
+ C_VERIFY_T,
+ C_PAUSED_SYNC_S,
+ C_PAUSED_SYNC_T,
+
+ C_AHEAD,
+ C_BEHIND,
+
+ C_MASK = 31
+};
+
+enum drbd_disk_state {
+ D_DISKLESS,
+ D_ATTACHING, /* In the process of reading the meta-data */
+ D_FAILED, /* Becomes D_DISKLESS as soon as we told it the peer */
+ /* when >= D_FAILED it is legal to access mdev->ldev */
+ D_NEGOTIATING, /* Late attaching state, we need to talk to the peer */
+ D_INCONSISTENT,
+ D_OUTDATED,
+ D_UNKNOWN, /* Only used for the peer, never for myself */
+ D_CONSISTENT, /* Might be D_OUTDATED, might be D_UP_TO_DATE ... */
+ D_UP_TO_DATE, /* Only this disk state allows applications' IO ! */
+ D_MASK = 15
+};
+
+union drbd_state {
+/* According to gcc's docs is the ...
+ * The order of allocation of bit-fields within a unit (C90 6.5.2.1, C99 6.7.2.1).
+ * Determined by ABI.
+ * pointed out by Maxim Uvarov q<muvarov@ru.mvista.com>
+ * even though we transmit as "cpu_to_be32(state)",
+ * the offsets of the bitfields still need to be swapped
+ * on different endianness.
+ */
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned role:2 ; /* 3/4 primary/secondary/unknown */
+ unsigned peer:2 ; /* 3/4 primary/secondary/unknown */
+ unsigned conn:5 ; /* 17/32 cstates */
+ unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
+ unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
+ unsigned susp:1 ; /* 2/2 IO suspended no/yes (by user) */
+ unsigned aftr_isp:1 ; /* isp .. imposed sync pause */
+ unsigned peer_isp:1 ;
+ unsigned user_isp:1 ;
+ unsigned susp_nod:1 ; /* IO suspended because no data */
+ unsigned susp_fen:1 ; /* IO suspended because fence peer handler runs*/
+ unsigned _pad:9; /* 0 unused */
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ unsigned _pad:9;
+ unsigned susp_fen:1 ;
+ unsigned susp_nod:1 ;
+ unsigned user_isp:1 ;
+ unsigned peer_isp:1 ;
+ unsigned aftr_isp:1 ; /* isp .. imposed sync pause */
+ unsigned susp:1 ; /* 2/2 IO suspended no/yes */
+ unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
+ unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */
+ unsigned conn:5 ; /* 17/32 cstates */
+ unsigned peer:2 ; /* 3/4 primary/secondary/unknown */
+ unsigned role:2 ; /* 3/4 primary/secondary/unknown */
+#else
+# error "this endianness is not supported"
+#endif
+ };
+ unsigned int i;
+};
+
+enum drbd_state_rv {
+ SS_CW_NO_NEED = 4,
+ SS_CW_SUCCESS = 3,
+ SS_NOTHING_TO_DO = 2,
+ SS_SUCCESS = 1,
+ SS_UNKNOWN_ERROR = 0, /* Used to sleep longer in _drbd_request_state */
+ SS_TWO_PRIMARIES = -1,
+ SS_NO_UP_TO_DATE_DISK = -2,
+ SS_NO_LOCAL_DISK = -4,
+ SS_NO_REMOTE_DISK = -5,
+ SS_CONNECTED_OUTDATES = -6,
+ SS_PRIMARY_NOP = -7,
+ SS_RESYNC_RUNNING = -8,
+ SS_ALREADY_STANDALONE = -9,
+ SS_CW_FAILED_BY_PEER = -10,
+ SS_IS_DISKLESS = -11,
+ SS_DEVICE_IN_USE = -12,
+ SS_NO_NET_CONFIG = -13,
+ SS_NO_VERIFY_ALG = -14, /* drbd-8.2 only */
+ SS_NEED_CONNECTION = -15, /* drbd-8.2 only */
+ SS_LOWER_THAN_OUTDATED = -16,
+ SS_NOT_SUPPORTED = -17, /* drbd-8.2 only */
+ SS_IN_TRANSIENT_STATE = -18, /* Retry after the next state change */
+ SS_CONCURRENT_ST_CHG = -19, /* Concurrent cluster side state change! */
+ SS_O_VOL_PEER_PRI = -20,
+ SS_OUTDATE_WO_CONN = -21,
+ SS_AFTER_LAST_ERROR = -22, /* Keep this at bottom */
+};
+
+#define SHARED_SECRET_MAX 64
+
+#define MDF_CONSISTENT (1 << 0)
+#define MDF_PRIMARY_IND (1 << 1)
+#define MDF_CONNECTED_IND (1 << 2)
+#define MDF_FULL_SYNC (1 << 3)
+#define MDF_WAS_UP_TO_DATE (1 << 4)
+#define MDF_PEER_OUT_DATED (1 << 5)
+#define MDF_CRASHED_PRIMARY (1 << 6)
+#define MDF_AL_CLEAN (1 << 7)
+#define MDF_AL_DISABLED (1 << 8)
+
+enum drbd_uuid_index {
+ UI_CURRENT,
+ UI_BITMAP,
+ UI_HISTORY_START,
+ UI_HISTORY_END,
+ UI_SIZE, /* nl-packet: number of dirty bits */
+ UI_FLAGS, /* nl-packet: flags */
+ UI_EXTENDED_SIZE /* Everything. */
+};
+
+enum drbd_timeout_flag {
+ UT_DEFAULT = 0,
+ UT_DEGRADED = 1,
+ UT_PEER_OUTDATED = 2,
+};
+
+#define UUID_JUST_CREATED ((__u64)4)
+
+/* magic numbers used in meta data and network packets */
+#define DRBD_MAGIC 0x83740267
+#define DRBD_MAGIC_BIG 0x835a
+#define DRBD_MAGIC_100 0x8620ec20
+
+#define DRBD_MD_MAGIC_07 (DRBD_MAGIC+3)
+#define DRBD_MD_MAGIC_08 (DRBD_MAGIC+4)
+#define DRBD_MD_MAGIC_84_UNCLEAN (DRBD_MAGIC+5)
+
+
+/* how I came up with this magic?
+ * base64 decode "actlog==" ;) */
+#define DRBD_AL_MAGIC 0x69cb65a2
+
+/* these are of type "int" */
+#define DRBD_MD_INDEX_INTERNAL -1
+#define DRBD_MD_INDEX_FLEX_EXT -2
+#define DRBD_MD_INDEX_FLEX_INT -3
+
+#define DRBD_CPU_MASK_SIZE 32
+
+#endif
diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h
new file mode 100644
index 000000000..7b131ed8f
--- /dev/null
+++ b/include/linux/drbd_genl.h
@@ -0,0 +1,384 @@
+/*
+ * General overview:
+ * full generic netlink message:
+ * |nlmsghdr|genlmsghdr|<payload>
+ *
+ * payload:
+ * |optional fixed size family header|<sequence of netlink attributes>
+ *
+ * sequence of netlink attributes:
+ * I chose to have all "top level" attributes NLA_NESTED,
+ * corresponding to some real struct.
+ * So we have a sequence of |tla, len|<nested nla sequence>
+ *
+ * nested nla sequence:
+ * may be empty, or contain a sequence of netlink attributes
+ * representing the struct fields.
+ *
+ * The tag number of any field (regardless of containing struct)
+ * will be available as T_ ## field_name,
+ * so you cannot have the same field name in two differnt structs.
+ *
+ * The tag numbers themselves are per struct, though,
+ * so should always begin at 1 (not 0, that is the special "NLA_UNSPEC" type,
+ * which we won't use here).
+ * The tag numbers are used as index in the respective nla_policy array.
+ *
+ * GENL_struct(tag_name, tag_number, struct name, struct fields) - struct and policy
+ * genl_magic_struct.h
+ * generates the struct declaration,
+ * generates an entry in the tla enum,
+ * genl_magic_func.h
+ * generates an entry in the static tla policy
+ * with .type = NLA_NESTED
+ * generates the static <struct_name>_nl_policy definition,
+ * and static conversion functions
+ *
+ * genl_magic_func.h
+ *
+ * GENL_mc_group(group)
+ * genl_magic_struct.h
+ * does nothing
+ * genl_magic_func.h
+ * defines and registers the mcast group,
+ * and provides a send helper
+ *
+ * GENL_notification(op_name, op_num, mcast_group, tla list)
+ * These are notifications to userspace.
+ *
+ * genl_magic_struct.h
+ * generates an entry in the genl_ops enum,
+ * genl_magic_func.h
+ * does nothing
+ *
+ * mcast group: the name of the mcast group this notification should be
+ * expected on
+ * tla list: the list of expected top level attributes,
+ * for documentation and sanity checking.
+ *
+ * GENL_op(op_name, op_num, flags and handler, tla list) - "genl operations"
+ * These are requests from userspace.
+ *
+ * _op and _notification share the same "number space",
+ * op_nr will be assigned to "genlmsghdr->cmd"
+ *
+ * genl_magic_struct.h
+ * generates an entry in the genl_ops enum,
+ * genl_magic_func.h
+ * generates an entry in the static genl_ops array,
+ * and static register/unregister functions to
+ * genl_register_family_with_ops().
+ *
+ * flags and handler:
+ * GENL_op_init( .doit = x, .dumpit = y, .flags = something)
+ * GENL_doit(x) => .dumpit = NULL, .flags = GENL_ADMIN_PERM
+ * tla list: the list of expected top level attributes,
+ * for documentation and sanity checking.
+ */
+
+/*
+ * STRUCTS
+ */
+
+/* this is sent kernel -> userland on various error conditions, and contains
+ * informational textual info, which is supposedly human readable.
+ * The computer relevant return code is in the drbd_genlmsghdr.
+ */
+GENL_struct(DRBD_NLA_CFG_REPLY, 1, drbd_cfg_reply,
+ /* "arbitrary" size strings, nla_policy.len = 0 */
+ __str_field(1, DRBD_GENLA_F_MANDATORY, info_text, 0)
+)
+
+/* Configuration requests typically need a context to operate on.
+ * Possible keys are device minor (fits in the drbd_genlmsghdr),
+ * the replication link (aka connection) name,
+ * and/or the replication group (aka resource) name,
+ * and the volume id within the resource. */
+GENL_struct(DRBD_NLA_CFG_CONTEXT, 2, drbd_cfg_context,
+ __u32_field(1, DRBD_GENLA_F_MANDATORY, ctx_volume)
+ __str_field(2, DRBD_GENLA_F_MANDATORY, ctx_resource_name, 128)
+ __bin_field(3, DRBD_GENLA_F_MANDATORY, ctx_my_addr, 128)
+ __bin_field(4, DRBD_GENLA_F_MANDATORY, ctx_peer_addr, 128)
+)
+
+GENL_struct(DRBD_NLA_DISK_CONF, 3, disk_conf,
+ __str_field(1, DRBD_F_REQUIRED | DRBD_F_INVARIANT, backing_dev, 128)
+ __str_field(2, DRBD_F_REQUIRED | DRBD_F_INVARIANT, meta_dev, 128)
+ __s32_field(3, DRBD_F_REQUIRED | DRBD_F_INVARIANT, meta_dev_idx)
+
+ /* use the resize command to try and change the disk_size */
+ __u64_field(4, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, disk_size)
+ /* we could change the max_bio_bvecs,
+ * but it won't propagate through the stack */
+ __u32_field(5, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, max_bio_bvecs)
+
+ __u32_field_def(6, DRBD_GENLA_F_MANDATORY, on_io_error, DRBD_ON_IO_ERROR_DEF)
+ __u32_field_def(7, DRBD_GENLA_F_MANDATORY, fencing, DRBD_FENCING_DEF)
+
+ __u32_field_def(8, DRBD_GENLA_F_MANDATORY, resync_rate, DRBD_RESYNC_RATE_DEF)
+ __s32_field_def(9, DRBD_GENLA_F_MANDATORY, resync_after, DRBD_MINOR_NUMBER_DEF)
+ __u32_field_def(10, DRBD_GENLA_F_MANDATORY, al_extents, DRBD_AL_EXTENTS_DEF)
+ __u32_field_def(11, DRBD_GENLA_F_MANDATORY, c_plan_ahead, DRBD_C_PLAN_AHEAD_DEF)
+ __u32_field_def(12, DRBD_GENLA_F_MANDATORY, c_delay_target, DRBD_C_DELAY_TARGET_DEF)
+ __u32_field_def(13, DRBD_GENLA_F_MANDATORY, c_fill_target, DRBD_C_FILL_TARGET_DEF)
+ __u32_field_def(14, DRBD_GENLA_F_MANDATORY, c_max_rate, DRBD_C_MAX_RATE_DEF)
+ __u32_field_def(15, DRBD_GENLA_F_MANDATORY, c_min_rate, DRBD_C_MIN_RATE_DEF)
+
+ __flg_field_def(16, DRBD_GENLA_F_MANDATORY, disk_barrier, DRBD_DISK_BARRIER_DEF)
+ __flg_field_def(17, DRBD_GENLA_F_MANDATORY, disk_flushes, DRBD_DISK_FLUSHES_DEF)
+ __flg_field_def(18, DRBD_GENLA_F_MANDATORY, disk_drain, DRBD_DISK_DRAIN_DEF)
+ __flg_field_def(19, DRBD_GENLA_F_MANDATORY, md_flushes, DRBD_MD_FLUSHES_DEF)
+ __u32_field_def(20, DRBD_GENLA_F_MANDATORY, disk_timeout, DRBD_DISK_TIMEOUT_DEF)
+ __u32_field_def(21, 0 /* OPTIONAL */, read_balancing, DRBD_READ_BALANCING_DEF)
+ /* 9: __u32_field_def(22, DRBD_GENLA_F_MANDATORY, unplug_watermark, DRBD_UNPLUG_WATERMARK_DEF) */
+ __flg_field_def(23, 0 /* OPTIONAL */, al_updates, DRBD_AL_UPDATES_DEF)
+)
+
+GENL_struct(DRBD_NLA_RESOURCE_OPTS, 4, res_opts,
+ __str_field_def(1, DRBD_GENLA_F_MANDATORY, cpu_mask, DRBD_CPU_MASK_SIZE)
+ __u32_field_def(2, DRBD_GENLA_F_MANDATORY, on_no_data, DRBD_ON_NO_DATA_DEF)
+)
+
+GENL_struct(DRBD_NLA_NET_CONF, 5, net_conf,
+ __str_field_def(1, DRBD_GENLA_F_MANDATORY | DRBD_F_SENSITIVE,
+ shared_secret, SHARED_SECRET_MAX)
+ __str_field_def(2, DRBD_GENLA_F_MANDATORY, cram_hmac_alg, SHARED_SECRET_MAX)
+ __str_field_def(3, DRBD_GENLA_F_MANDATORY, integrity_alg, SHARED_SECRET_MAX)
+ __str_field_def(4, DRBD_GENLA_F_MANDATORY, verify_alg, SHARED_SECRET_MAX)
+ __str_field_def(5, DRBD_GENLA_F_MANDATORY, csums_alg, SHARED_SECRET_MAX)
+ __u32_field_def(6, DRBD_GENLA_F_MANDATORY, wire_protocol, DRBD_PROTOCOL_DEF)
+ __u32_field_def(7, DRBD_GENLA_F_MANDATORY, connect_int, DRBD_CONNECT_INT_DEF)
+ __u32_field_def(8, DRBD_GENLA_F_MANDATORY, timeout, DRBD_TIMEOUT_DEF)
+ __u32_field_def(9, DRBD_GENLA_F_MANDATORY, ping_int, DRBD_PING_INT_DEF)
+ __u32_field_def(10, DRBD_GENLA_F_MANDATORY, ping_timeo, DRBD_PING_TIMEO_DEF)
+ __u32_field_def(11, DRBD_GENLA_F_MANDATORY, sndbuf_size, DRBD_SNDBUF_SIZE_DEF)
+ __u32_field_def(12, DRBD_GENLA_F_MANDATORY, rcvbuf_size, DRBD_RCVBUF_SIZE_DEF)
+ __u32_field_def(13, DRBD_GENLA_F_MANDATORY, ko_count, DRBD_KO_COUNT_DEF)
+ __u32_field_def(14, DRBD_GENLA_F_MANDATORY, max_buffers, DRBD_MAX_BUFFERS_DEF)
+ __u32_field_def(15, DRBD_GENLA_F_MANDATORY, max_epoch_size, DRBD_MAX_EPOCH_SIZE_DEF)
+ __u32_field_def(16, DRBD_GENLA_F_MANDATORY, unplug_watermark, DRBD_UNPLUG_WATERMARK_DEF)
+ __u32_field_def(17, DRBD_GENLA_F_MANDATORY, after_sb_0p, DRBD_AFTER_SB_0P_DEF)
+ __u32_field_def(18, DRBD_GENLA_F_MANDATORY, after_sb_1p, DRBD_AFTER_SB_1P_DEF)
+ __u32_field_def(19, DRBD_GENLA_F_MANDATORY, after_sb_2p, DRBD_AFTER_SB_2P_DEF)
+ __u32_field_def(20, DRBD_GENLA_F_MANDATORY, rr_conflict, DRBD_RR_CONFLICT_DEF)
+ __u32_field_def(21, DRBD_GENLA_F_MANDATORY, on_congestion, DRBD_ON_CONGESTION_DEF)
+ __u32_field_def(22, DRBD_GENLA_F_MANDATORY, cong_fill, DRBD_CONG_FILL_DEF)
+ __u32_field_def(23, DRBD_GENLA_F_MANDATORY, cong_extents, DRBD_CONG_EXTENTS_DEF)
+ __flg_field_def(24, DRBD_GENLA_F_MANDATORY, two_primaries, DRBD_ALLOW_TWO_PRIMARIES_DEF)
+ __flg_field(25, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, discard_my_data)
+ __flg_field_def(26, DRBD_GENLA_F_MANDATORY, tcp_cork, DRBD_TCP_CORK_DEF)
+ __flg_field_def(27, DRBD_GENLA_F_MANDATORY, always_asbp, DRBD_ALWAYS_ASBP_DEF)
+ __flg_field(28, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, tentative)
+ __flg_field_def(29, DRBD_GENLA_F_MANDATORY, use_rle, DRBD_USE_RLE_DEF)
+ /* 9: __u32_field_def(30, DRBD_GENLA_F_MANDATORY, fencing_policy, DRBD_FENCING_DEF) */
+ /* 9: __str_field_def(31, DRBD_GENLA_F_MANDATORY, name, SHARED_SECRET_MAX) */
+ /* 9: __u32_field(32, DRBD_F_REQUIRED | DRBD_F_INVARIANT, peer_node_id) */
+ __flg_field_def(33, 0 /* OPTIONAL */, csums_after_crash_only, DRBD_CSUMS_AFTER_CRASH_ONLY_DEF)
+ __u32_field_def(34, 0 /* OPTIONAL */, sock_check_timeo, DRBD_SOCKET_CHECK_TIMEO_DEF)
+)
+
+GENL_struct(DRBD_NLA_SET_ROLE_PARMS, 6, set_role_parms,
+ __flg_field(1, DRBD_GENLA_F_MANDATORY, assume_uptodate)
+)
+
+GENL_struct(DRBD_NLA_RESIZE_PARMS, 7, resize_parms,
+ __u64_field(1, DRBD_GENLA_F_MANDATORY, resize_size)
+ __flg_field(2, DRBD_GENLA_F_MANDATORY, resize_force)
+ __flg_field(3, DRBD_GENLA_F_MANDATORY, no_resync)
+ __u32_field_def(4, 0 /* OPTIONAL */, al_stripes, DRBD_AL_STRIPES_DEF)
+ __u32_field_def(5, 0 /* OPTIONAL */, al_stripe_size, DRBD_AL_STRIPE_SIZE_DEF)
+)
+
+GENL_struct(DRBD_NLA_STATE_INFO, 8, state_info,
+ /* the reason of the broadcast,
+ * if this is an event triggered broadcast. */
+ __u32_field(1, DRBD_GENLA_F_MANDATORY, sib_reason)
+ __u32_field(2, DRBD_F_REQUIRED, current_state)
+ __u64_field(3, DRBD_GENLA_F_MANDATORY, capacity)
+ __u64_field(4, DRBD_GENLA_F_MANDATORY, ed_uuid)
+
+ /* These are for broadcast from after state change work.
+ * prev_state and new_state are from the moment the state change took
+ * place, new_state is not neccessarily the same as current_state,
+ * there may have been more state changes since. Which will be
+ * broadcasted soon, in their respective after state change work. */
+ __u32_field(5, DRBD_GENLA_F_MANDATORY, prev_state)
+ __u32_field(6, DRBD_GENLA_F_MANDATORY, new_state)
+
+ /* if we have a local disk: */
+ __bin_field(7, DRBD_GENLA_F_MANDATORY, uuids, (UI_SIZE*sizeof(__u64)))
+ __u32_field(8, DRBD_GENLA_F_MANDATORY, disk_flags)
+ __u64_field(9, DRBD_GENLA_F_MANDATORY, bits_total)
+ __u64_field(10, DRBD_GENLA_F_MANDATORY, bits_oos)
+ /* and in case resync or online verify is active */
+ __u64_field(11, DRBD_GENLA_F_MANDATORY, bits_rs_total)
+ __u64_field(12, DRBD_GENLA_F_MANDATORY, bits_rs_failed)
+
+ /* for pre and post notifications of helper execution */
+ __str_field(13, DRBD_GENLA_F_MANDATORY, helper, 32)
+ __u32_field(14, DRBD_GENLA_F_MANDATORY, helper_exit_code)
+
+ __u64_field(15, 0, send_cnt)
+ __u64_field(16, 0, recv_cnt)
+ __u64_field(17, 0, read_cnt)
+ __u64_field(18, 0, writ_cnt)
+ __u64_field(19, 0, al_writ_cnt)
+ __u64_field(20, 0, bm_writ_cnt)
+ __u32_field(21, 0, ap_bio_cnt)
+ __u32_field(22, 0, ap_pending_cnt)
+ __u32_field(23, 0, rs_pending_cnt)
+)
+
+GENL_struct(DRBD_NLA_START_OV_PARMS, 9, start_ov_parms,
+ __u64_field(1, DRBD_GENLA_F_MANDATORY, ov_start_sector)
+ __u64_field(2, DRBD_GENLA_F_MANDATORY, ov_stop_sector)
+)
+
+GENL_struct(DRBD_NLA_NEW_C_UUID_PARMS, 10, new_c_uuid_parms,
+ __flg_field(1, DRBD_GENLA_F_MANDATORY, clear_bm)
+)
+
+GENL_struct(DRBD_NLA_TIMEOUT_PARMS, 11, timeout_parms,
+ __u32_field(1, DRBD_F_REQUIRED, timeout_type)
+)
+
+GENL_struct(DRBD_NLA_DISCONNECT_PARMS, 12, disconnect_parms,
+ __flg_field(1, DRBD_GENLA_F_MANDATORY, force_disconnect)
+)
+
+GENL_struct(DRBD_NLA_DETACH_PARMS, 13, detach_parms,
+ __flg_field(1, DRBD_GENLA_F_MANDATORY, force_detach)
+)
+
+/*
+ * Notifications and commands (genlmsghdr->cmd)
+ */
+GENL_mc_group(events)
+
+ /* kernel -> userspace announcement of changes */
+GENL_notification(
+ DRBD_EVENT, 1, events,
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_STATE_INFO, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_NET_CONF, DRBD_GENLA_F_MANDATORY)
+ GENL_tla_expected(DRBD_NLA_DISK_CONF, DRBD_GENLA_F_MANDATORY)
+ GENL_tla_expected(DRBD_NLA_SYNCER_CONF, DRBD_GENLA_F_MANDATORY)
+)
+
+ /* query kernel for specific or all info */
+GENL_op(
+ DRBD_ADM_GET_STATUS, 2,
+ GENL_op_init(
+ .doit = drbd_adm_get_status,
+ .dumpit = drbd_adm_get_status_all,
+ /* anyone may ask for the status,
+ * it is broadcasted anyways */
+ ),
+ /* To select the object .doit.
+ * Or a subset of objects in .dumpit. */
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY)
+)
+
+ /* add DRBD minor devices as volumes to resources */
+GENL_op(DRBD_ADM_NEW_MINOR, 5, GENL_doit(drbd_adm_new_minor),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_DEL_MINOR, 6, GENL_doit(drbd_adm_del_minor),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+
+ /* add or delete resources */
+GENL_op(DRBD_ADM_NEW_RESOURCE, 7, GENL_doit(drbd_adm_new_resource),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_DEL_RESOURCE, 8, GENL_doit(drbd_adm_del_resource),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+
+GENL_op(DRBD_ADM_RESOURCE_OPTS, 9,
+ GENL_doit(drbd_adm_resource_opts),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_RESOURCE_OPTS, DRBD_GENLA_F_MANDATORY)
+)
+
+GENL_op(
+ DRBD_ADM_CONNECT, 10,
+ GENL_doit(drbd_adm_connect),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_NET_CONF, DRBD_F_REQUIRED)
+)
+
+GENL_op(
+ DRBD_ADM_CHG_NET_OPTS, 29,
+ GENL_doit(drbd_adm_net_opts),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_NET_CONF, DRBD_F_REQUIRED)
+)
+
+GENL_op(DRBD_ADM_DISCONNECT, 11, GENL_doit(drbd_adm_disconnect),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+
+GENL_op(DRBD_ADM_ATTACH, 12,
+ GENL_doit(drbd_adm_attach),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_DISK_CONF, DRBD_F_REQUIRED)
+)
+
+GENL_op(DRBD_ADM_CHG_DISK_OPTS, 28,
+ GENL_doit(drbd_adm_disk_opts),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_DISK_OPTS, DRBD_F_REQUIRED)
+)
+
+GENL_op(
+ DRBD_ADM_RESIZE, 13,
+ GENL_doit(drbd_adm_resize),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_RESIZE_PARMS, DRBD_GENLA_F_MANDATORY)
+)
+
+GENL_op(
+ DRBD_ADM_PRIMARY, 14,
+ GENL_doit(drbd_adm_set_role),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_SET_ROLE_PARMS, DRBD_F_REQUIRED)
+)
+
+GENL_op(
+ DRBD_ADM_SECONDARY, 15,
+ GENL_doit(drbd_adm_set_role),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_SET_ROLE_PARMS, DRBD_F_REQUIRED)
+)
+
+GENL_op(
+ DRBD_ADM_NEW_C_UUID, 16,
+ GENL_doit(drbd_adm_new_c_uuid),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_NEW_C_UUID_PARMS, DRBD_GENLA_F_MANDATORY)
+)
+
+GENL_op(
+ DRBD_ADM_START_OV, 17,
+ GENL_doit(drbd_adm_start_ov),
+ GENL_tla_expected(DRBD_NLA_START_OV_PARMS, DRBD_GENLA_F_MANDATORY)
+)
+
+GENL_op(DRBD_ADM_DETACH, 18, GENL_doit(drbd_adm_detach),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_DETACH_PARMS, DRBD_GENLA_F_MANDATORY))
+
+GENL_op(DRBD_ADM_INVALIDATE, 19, GENL_doit(drbd_adm_invalidate),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_INVAL_PEER, 20, GENL_doit(drbd_adm_invalidate_peer),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_PAUSE_SYNC, 21, GENL_doit(drbd_adm_pause_sync),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_RESUME_SYNC, 22, GENL_doit(drbd_adm_resume_sync),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_SUSPEND_IO, 23, GENL_doit(drbd_adm_suspend_io),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_RESUME_IO, 24, GENL_doit(drbd_adm_resume_io),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_OUTDATE, 25, GENL_doit(drbd_adm_outdate),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_GET_TIMEOUT_TYPE, 26, GENL_doit(drbd_adm_get_timeout_type),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+GENL_op(DRBD_ADM_DOWN, 27, GENL_doit(drbd_adm_down),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
diff --git a/include/linux/drbd_genl_api.h b/include/linux/drbd_genl_api.h
new file mode 100644
index 000000000..9ef50d51e
--- /dev/null
+++ b/include/linux/drbd_genl_api.h
@@ -0,0 +1,55 @@
+#ifndef DRBD_GENL_STRUCT_H
+#define DRBD_GENL_STRUCT_H
+
+/**
+ * struct drbd_genlmsghdr - DRBD specific header used in NETLINK_GENERIC requests
+ * @minor:
+ * For admin requests (user -> kernel): which minor device to operate on.
+ * For (unicast) replies or informational (broadcast) messages
+ * (kernel -> user): which minor device the information is about.
+ * If we do not operate on minors, but on connections or resources,
+ * the minor value shall be (~0), and the attribute DRBD_NLA_CFG_CONTEXT
+ * is used instead.
+ * @flags: possible operation modifiers (relevant only for user->kernel):
+ * DRBD_GENL_F_SET_DEFAULTS
+ * @volume:
+ * When creating a new minor (adding it to a resource), the resource needs
+ * to know which volume number within the resource this is supposed to be.
+ * The volume number corresponds to the same volume number on the remote side,
+ * whereas the minor number on the remote side may be different
+ * (union with flags).
+ * @ret_code: kernel->userland unicast cfg reply return code (union with flags);
+ */
+struct drbd_genlmsghdr {
+ __u32 minor;
+ union {
+ __u32 flags;
+ __s32 ret_code;
+ };
+};
+
+/* To be used in drbd_genlmsghdr.flags */
+enum {
+ DRBD_GENL_F_SET_DEFAULTS = 1,
+};
+
+enum drbd_state_info_bcast_reason {
+ SIB_GET_STATUS_REPLY = 1,
+ SIB_STATE_CHANGE = 2,
+ SIB_HELPER_PRE = 3,
+ SIB_HELPER_POST = 4,
+ SIB_SYNC_PROGRESS = 5,
+};
+
+/* hack around predefined gcc/cpp "linux=1",
+ * we cannot possibly include <1/drbd_genl.h> */
+#undef linux
+
+#include <linux/drbd.h>
+#define GENL_MAGIC_VERSION API_VERSION
+#define GENL_MAGIC_FAMILY drbd
+#define GENL_MAGIC_FAMILY_HDRSZ sizeof(struct drbd_genlmsghdr)
+#define GENL_MAGIC_INCLUDE_FILE <linux/drbd_genl.h>
+#include <linux/genl_magic_struct.h>
+
+#endif
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
new file mode 100644
index 000000000..8ac8c5d9a
--- /dev/null
+++ b/include/linux/drbd_limits.h
@@ -0,0 +1,233 @@
+/*
+ drbd_limits.h
+ This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
+*/
+
+/*
+ * Our current limitations.
+ * Some of them are hard limits,
+ * some of them are arbitrary range limits, that make it easier to provide
+ * feedback about nonsense settings for certain configurable values.
+ */
+
+#ifndef DRBD_LIMITS_H
+#define DRBD_LIMITS_H 1
+
+#define DEBUG_RANGE_CHECK 0
+
+#define DRBD_MINOR_COUNT_MIN 1
+#define DRBD_MINOR_COUNT_MAX 255
+#define DRBD_MINOR_COUNT_DEF 32
+#define DRBD_MINOR_COUNT_SCALE '1'
+
+#define DRBD_VOLUME_MAX 65535
+
+#define DRBD_DIALOG_REFRESH_MIN 0
+#define DRBD_DIALOG_REFRESH_MAX 600
+#define DRBD_DIALOG_REFRESH_SCALE '1'
+
+/* valid port number */
+#define DRBD_PORT_MIN 1
+#define DRBD_PORT_MAX 0xffff
+#define DRBD_PORT_SCALE '1'
+
+/* startup { */
+ /* if you want more than 3.4 days, disable */
+#define DRBD_WFC_TIMEOUT_MIN 0
+#define DRBD_WFC_TIMEOUT_MAX 300000
+#define DRBD_WFC_TIMEOUT_DEF 0
+#define DRBD_WFC_TIMEOUT_SCALE '1'
+
+#define DRBD_DEGR_WFC_TIMEOUT_MIN 0
+#define DRBD_DEGR_WFC_TIMEOUT_MAX 300000
+#define DRBD_DEGR_WFC_TIMEOUT_DEF 0
+#define DRBD_DEGR_WFC_TIMEOUT_SCALE '1'
+
+#define DRBD_OUTDATED_WFC_TIMEOUT_MIN 0
+#define DRBD_OUTDATED_WFC_TIMEOUT_MAX 300000
+#define DRBD_OUTDATED_WFC_TIMEOUT_DEF 0
+#define DRBD_OUTDATED_WFC_TIMEOUT_SCALE '1'
+/* }*/
+
+/* net { */
+ /* timeout, unit centi seconds
+ * more than one minute timeout is not useful */
+#define DRBD_TIMEOUT_MIN 1
+#define DRBD_TIMEOUT_MAX 600
+#define DRBD_TIMEOUT_DEF 60 /* 6 seconds */
+#define DRBD_TIMEOUT_SCALE '1'
+
+ /* If backing disk takes longer than disk_timeout, mark the disk as failed */
+#define DRBD_DISK_TIMEOUT_MIN 0 /* 0 = disabled */
+#define DRBD_DISK_TIMEOUT_MAX 6000 /* 10 Minutes */
+#define DRBD_DISK_TIMEOUT_DEF 0 /* disabled */
+#define DRBD_DISK_TIMEOUT_SCALE '1'
+
+ /* active connection retries when C_WF_CONNECTION */
+#define DRBD_CONNECT_INT_MIN 1
+#define DRBD_CONNECT_INT_MAX 120
+#define DRBD_CONNECT_INT_DEF 10 /* seconds */
+#define DRBD_CONNECT_INT_SCALE '1'
+
+ /* keep-alive probes when idle */
+#define DRBD_PING_INT_MIN 1
+#define DRBD_PING_INT_MAX 120
+#define DRBD_PING_INT_DEF 10
+#define DRBD_PING_INT_SCALE '1'
+
+ /* timeout for the ping packets.*/
+#define DRBD_PING_TIMEO_MIN 1
+#define DRBD_PING_TIMEO_MAX 300
+#define DRBD_PING_TIMEO_DEF 5
+#define DRBD_PING_TIMEO_SCALE '1'
+
+ /* max number of write requests between write barriers */
+#define DRBD_MAX_EPOCH_SIZE_MIN 1
+#define DRBD_MAX_EPOCH_SIZE_MAX 20000
+#define DRBD_MAX_EPOCH_SIZE_DEF 2048
+#define DRBD_MAX_EPOCH_SIZE_SCALE '1'
+
+ /* I don't think that a tcp send buffer of more than 10M is useful */
+#define DRBD_SNDBUF_SIZE_MIN 0
+#define DRBD_SNDBUF_SIZE_MAX (10<<20)
+#define DRBD_SNDBUF_SIZE_DEF 0
+#define DRBD_SNDBUF_SIZE_SCALE '1'
+
+#define DRBD_RCVBUF_SIZE_MIN 0
+#define DRBD_RCVBUF_SIZE_MAX (10<<20)
+#define DRBD_RCVBUF_SIZE_DEF 0
+#define DRBD_RCVBUF_SIZE_SCALE '1'
+
+ /* @4k PageSize -> 128kB - 512MB */
+#define DRBD_MAX_BUFFERS_MIN 32
+#define DRBD_MAX_BUFFERS_MAX 131072
+#define DRBD_MAX_BUFFERS_DEF 2048
+#define DRBD_MAX_BUFFERS_SCALE '1'
+
+ /* @4k PageSize -> 4kB - 512MB */
+#define DRBD_UNPLUG_WATERMARK_MIN 1
+#define DRBD_UNPLUG_WATERMARK_MAX 131072
+#define DRBD_UNPLUG_WATERMARK_DEF (DRBD_MAX_BUFFERS_DEF/16)
+#define DRBD_UNPLUG_WATERMARK_SCALE '1'
+
+ /* 0 is disabled.
+ * 200 should be more than enough even for very short timeouts */
+#define DRBD_KO_COUNT_MIN 0
+#define DRBD_KO_COUNT_MAX 200
+#define DRBD_KO_COUNT_DEF 7
+#define DRBD_KO_COUNT_SCALE '1'
+/* } */
+
+/* syncer { */
+ /* FIXME allow rate to be zero? */
+#define DRBD_RESYNC_RATE_MIN 1
+/* channel bonding 10 GbE, or other hardware */
+#define DRBD_RESYNC_RATE_MAX (4 << 20)
+#define DRBD_RESYNC_RATE_DEF 250
+#define DRBD_RESYNC_RATE_SCALE 'k' /* kilobytes */
+
+ /* less than 7 would hit performance unnecessarily. */
+#define DRBD_AL_EXTENTS_MIN 7
+ /* we use u16 as "slot number", (u16)~0 is "FREE".
+ * If you use >= 292 kB on-disk ring buffer,
+ * this is the maximum you can use: */
+#define DRBD_AL_EXTENTS_MAX 0xfffe
+#define DRBD_AL_EXTENTS_DEF 1237
+#define DRBD_AL_EXTENTS_SCALE '1'
+
+#define DRBD_MINOR_NUMBER_MIN -1
+#define DRBD_MINOR_NUMBER_MAX ((1 << 20) - 1)
+#define DRBD_MINOR_NUMBER_DEF -1
+#define DRBD_MINOR_NUMBER_SCALE '1'
+
+/* } */
+
+/* drbdsetup XY resize -d Z
+ * you are free to reduce the device size to nothing, if you want to.
+ * the upper limit with 64bit kernel, enough ram and flexible meta data
+ * is 1 PiB, currently. */
+/* DRBD_MAX_SECTORS */
+#define DRBD_DISK_SIZE_MIN 0
+#define DRBD_DISK_SIZE_MAX (1 * (2LLU << 40))
+#define DRBD_DISK_SIZE_DEF 0 /* = disabled = no user size... */
+#define DRBD_DISK_SIZE_SCALE 's' /* sectors */
+
+#define DRBD_ON_IO_ERROR_DEF EP_DETACH
+#define DRBD_FENCING_DEF FP_DONT_CARE
+#define DRBD_AFTER_SB_0P_DEF ASB_DISCONNECT
+#define DRBD_AFTER_SB_1P_DEF ASB_DISCONNECT
+#define DRBD_AFTER_SB_2P_DEF ASB_DISCONNECT
+#define DRBD_RR_CONFLICT_DEF ASB_DISCONNECT
+#define DRBD_ON_NO_DATA_DEF OND_IO_ERROR
+#define DRBD_ON_CONGESTION_DEF OC_BLOCK
+#define DRBD_READ_BALANCING_DEF RB_PREFER_LOCAL
+
+#define DRBD_MAX_BIO_BVECS_MIN 0
+#define DRBD_MAX_BIO_BVECS_MAX 128
+#define DRBD_MAX_BIO_BVECS_DEF 0
+#define DRBD_MAX_BIO_BVECS_SCALE '1'
+
+#define DRBD_C_PLAN_AHEAD_MIN 0
+#define DRBD_C_PLAN_AHEAD_MAX 300
+#define DRBD_C_PLAN_AHEAD_DEF 20
+#define DRBD_C_PLAN_AHEAD_SCALE '1'
+
+#define DRBD_C_DELAY_TARGET_MIN 1
+#define DRBD_C_DELAY_TARGET_MAX 100
+#define DRBD_C_DELAY_TARGET_DEF 10
+#define DRBD_C_DELAY_TARGET_SCALE '1'
+
+#define DRBD_C_FILL_TARGET_MIN 0
+#define DRBD_C_FILL_TARGET_MAX (1<<20) /* 500MByte in sec */
+#define DRBD_C_FILL_TARGET_DEF 100 /* Try to place 50KiB in socket send buffer during resync */
+#define DRBD_C_FILL_TARGET_SCALE 's' /* sectors */
+
+#define DRBD_C_MAX_RATE_MIN 250
+#define DRBD_C_MAX_RATE_MAX (4 << 20)
+#define DRBD_C_MAX_RATE_DEF 102400
+#define DRBD_C_MAX_RATE_SCALE 'k' /* kilobytes */
+
+#define DRBD_C_MIN_RATE_MIN 0
+#define DRBD_C_MIN_RATE_MAX (4 << 20)
+#define DRBD_C_MIN_RATE_DEF 250
+#define DRBD_C_MIN_RATE_SCALE 'k' /* kilobytes */
+
+#define DRBD_CONG_FILL_MIN 0
+#define DRBD_CONG_FILL_MAX (10<<21) /* 10GByte in sectors */
+#define DRBD_CONG_FILL_DEF 0
+#define DRBD_CONG_FILL_SCALE 's' /* sectors */
+
+#define DRBD_CONG_EXTENTS_MIN DRBD_AL_EXTENTS_MIN
+#define DRBD_CONG_EXTENTS_MAX DRBD_AL_EXTENTS_MAX
+#define DRBD_CONG_EXTENTS_DEF DRBD_AL_EXTENTS_DEF
+#define DRBD_CONG_EXTENTS_SCALE DRBD_AL_EXTENTS_SCALE
+
+#define DRBD_PROTOCOL_DEF DRBD_PROT_C
+
+#define DRBD_DISK_BARRIER_DEF 0
+#define DRBD_DISK_FLUSHES_DEF 1
+#define DRBD_DISK_DRAIN_DEF 1
+#define DRBD_MD_FLUSHES_DEF 1
+#define DRBD_TCP_CORK_DEF 1
+#define DRBD_AL_UPDATES_DEF 1
+
+#define DRBD_ALLOW_TWO_PRIMARIES_DEF 0
+#define DRBD_ALWAYS_ASBP_DEF 0
+#define DRBD_USE_RLE_DEF 1
+#define DRBD_CSUMS_AFTER_CRASH_ONLY_DEF 0
+
+#define DRBD_AL_STRIPES_MIN 1
+#define DRBD_AL_STRIPES_MAX 1024
+#define DRBD_AL_STRIPES_DEF 1
+#define DRBD_AL_STRIPES_SCALE '1'
+
+#define DRBD_AL_STRIPE_SIZE_MIN 4
+#define DRBD_AL_STRIPE_SIZE_MAX 16777216
+#define DRBD_AL_STRIPE_SIZE_DEF 32
+#define DRBD_AL_STRIPE_SIZE_SCALE 'k' /* kilobytes */
+
+#define DRBD_SOCKET_CHECK_TIMEO_MIN 0
+#define DRBD_SOCKET_CHECK_TIMEO_MAX DRBD_PING_TIMEO_MAX
+#define DRBD_SOCKET_CHECK_TIMEO_DEF 0
+#define DRBD_SOCKET_CHECK_TIMEO_SCALE '1'
+#endif
diff --git a/include/linux/ds1286.h b/include/linux/ds1286.h
new file mode 100644
index 000000000..45ea0aa0a
--- /dev/null
+++ b/include/linux/ds1286.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 1998, 1999, 2003 Ralf Baechle
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __LINUX_DS1286_H
+#define __LINUX_DS1286_H
+
+/**********************************************************************
+ * register summary
+ **********************************************************************/
+#define RTC_HUNDREDTH_SECOND 0
+#define RTC_SECONDS 1
+#define RTC_MINUTES 2
+#define RTC_MINUTES_ALARM 3
+#define RTC_HOURS 4
+#define RTC_HOURS_ALARM 5
+#define RTC_DAY 6
+#define RTC_DAY_ALARM 7
+#define RTC_DATE 8
+#define RTC_MONTH 9
+#define RTC_YEAR 10
+#define RTC_CMD 11
+#define RTC_WHSEC 12
+#define RTC_WSEC 13
+#define RTC_UNUSED 14
+
+/* RTC_*_alarm is always true if 2 MSBs are set */
+# define RTC_ALARM_DONT_CARE 0xC0
+
+
+/*
+ * Bits in the month register
+ */
+#define RTC_EOSC 0x80
+#define RTC_ESQW 0x40
+
+/*
+ * Bits in the Command register
+ */
+#define RTC_TDF 0x01
+#define RTC_WAF 0x02
+#define RTC_TDM 0x04
+#define RTC_WAM 0x08
+#define RTC_PU_LVL 0x10
+#define RTC_IBH_LO 0x20
+#define RTC_IPSW 0x40
+#define RTC_TE 0x80
+
+#endif /* __LINUX_DS1286_H */
diff --git a/include/linux/ds17287rtc.h b/include/linux/ds17287rtc.h
new file mode 100644
index 000000000..d85d3f497
--- /dev/null
+++ b/include/linux/ds17287rtc.h
@@ -0,0 +1,66 @@
+/*
+ * ds17287rtc.h - register definitions for the ds1728[57] RTC / CMOS RAM
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * (C) 2003 Guido Guenther <agx@sigxcpu.org>
+ */
+#ifndef __LINUX_DS17287RTC_H
+#define __LINUX_DS17287RTC_H
+
+#include <linux/rtc.h> /* get the user-level API */
+#include <linux/mc146818rtc.h>
+
+/* Register A */
+#define DS_REGA_DV2 0x40 /* countdown chain */
+#define DS_REGA_DV1 0x20 /* oscillator enable */
+#define DS_REGA_DV0 0x10 /* bank select */
+
+/* bank 1 registers */
+#define DS_B1_MODEL 0x40 /* model number byte */
+#define DS_B1_SN1 0x41 /* serial number byte 1 */
+#define DS_B1_SN2 0x42 /* serial number byte 2 */
+#define DS_B1_SN3 0x43 /* serial number byte 3 */
+#define DS_B1_SN4 0x44 /* serial number byte 4 */
+#define DS_B1_SN5 0x45 /* serial number byte 5 */
+#define DS_B1_SN6 0x46 /* serial number byte 6 */
+#define DS_B1_CRC 0x47 /* CRC byte */
+#define DS_B1_CENTURY 0x48 /* Century byte */
+#define DS_B1_DALARM 0x49 /* date alarm */
+#define DS_B1_XCTRL4A 0x4a /* extendec control register 4a */
+#define DS_B1_XCTRL4B 0x4b /* extendec control register 4b */
+#define DS_B1_RTCADDR2 0x4e /* rtc address 2 */
+#define DS_B1_RTCADDR3 0x4f /* rtc address 3 */
+#define DS_B1_RAMLSB 0x50 /* extended ram LSB */
+#define DS_B1_RAMMSB 0x51 /* extended ram MSB */
+#define DS_B1_RAMDPORT 0x53 /* extended ram data port */
+
+/* register details */
+/* extended control register 4a */
+#define DS_XCTRL4A_VRT2 0x80 /* valid ram and time */
+#define DS_XCTRL4A_INCR 0x40 /* increment progress status */
+#define DS_XCTRL4A_BME 0x20 /* burst mode enable */
+#define DS_XCTRL4A_PAB 0x08 /* power active bar ctrl */
+#define DS_XCTRL4A_RF 0x04 /* ram clear flag */
+#define DS_XCTRL4A_WF 0x02 /* wake up alarm flag */
+#define DS_XCTRL4A_KF 0x01 /* kickstart flag */
+
+/* interrupt causes */
+#define DS_XCTRL4A_IFS (DS_XCTRL4A_RF|DS_XCTRL4A_WF|DS_XCTRL4A_KF)
+
+/* extended control register 4b */
+#define DS_XCTRL4B_ABE 0x80 /* auxiliary battery enable */
+#define DS_XCTRL4B_E32K 0x40 /* enable 32.768 kHz Output */
+#define DS_XCTRL4B_CS 0x20 /* crystal select */
+#define DS_XCTRL4B_RCE 0x10 /* ram clear enable */
+#define DS_XCTRL4B_PRS 0x08 /* PAB resec select */
+#define DS_XCTRL4B_RIE 0x04 /* ram clear interrupt enable */
+#define DS_XCTRL4B_WFE 0x02 /* wake up alarm interrupt enable */
+#define DS_XCTRL4B_KFE 0x01 /* kickstart interrupt enable */
+
+/* interrupt enable bits */
+#define DS_XCTRL4B_IFES (DS_XCTRL4B_RIE|DS_XCTRL4B_WFE|DS_XCTRL4B_KFE)
+
+#endif /* __LINUX_DS17287RTC_H */
diff --git a/include/linux/ds2782_battery.h b/include/linux/ds2782_battery.h
new file mode 100644
index 000000000..b4e281f65
--- /dev/null
+++ b/include/linux/ds2782_battery.h
@@ -0,0 +1,8 @@
+#ifndef __LINUX_DS2782_BATTERY_H
+#define __LINUX_DS2782_BATTERY_H
+
+struct ds278x_platform_data {
+ int rsns;
+};
+
+#endif
diff --git a/include/linux/dtlk.h b/include/linux/dtlk.h
new file mode 100644
index 000000000..22a7b9a5f
--- /dev/null
+++ b/include/linux/dtlk.h
@@ -0,0 +1,85 @@
+#define DTLK_MINOR 0
+#define DTLK_IO_EXTENT 0x02
+
+ /* ioctl's use magic number of 0xa3 */
+#define DTLK_INTERROGATE 0xa390 /* get settings from the DoubleTalk */
+#define DTLK_STATUS 0xa391 /* get status from the DoubleTalk */
+
+
+#define DTLK_CLEAR 0x18 /* stops speech */
+
+#define DTLK_MAX_RETRIES (loops_per_jiffy/(10000/HZ))
+
+ /* TTS Port Status Flags */
+#define TTS_READABLE 0x80 /* mask for bit which is nonzero if a
+ byte can be read from the TTS port */
+#define TTS_SPEAKING 0x40 /* mask for SYNC bit, which is nonzero
+ while DoubleTalk is producing
+ output with TTS, PCM or CVSD
+ synthesizers or tone generators
+ (that is, all but LPC) */
+#define TTS_SPEAKING2 0x20 /* mask for SYNC2 bit,
+ which falls to zero up to 0.4 sec
+ before speech stops */
+#define TTS_WRITABLE 0x10 /* mask for RDY bit, which when set to
+ 1, indicates the TTS port is ready
+ to accept a byte of data. The RDY
+ bit goes zero 2-3 usec after
+ writing, and goes 1 again 180-190
+ usec later. */
+#define TTS_ALMOST_FULL 0x08 /* mask for AF bit: When set to 1,
+ indicates that less than 300 free
+ bytes are available in the TTS
+ input buffer. AF is always 0 in the
+ PCM, TGN and CVSD modes. */
+#define TTS_ALMOST_EMPTY 0x04 /* mask for AE bit: When set to 1,
+ indicates that less than 300 bytes
+ of data remain in DoubleTalk's
+ input (TTS or PCM) buffer. AE is
+ always 1 in the TGN and CVSD
+ modes. */
+
+ /* LPC speak commands */
+#define LPC_5220_NORMAL 0x60 /* 5220 format decoding table, normal rate */
+#define LPC_5220_FAST 0x64 /* 5220 format decoding table, fast rate */
+#define LPC_D6_NORMAL 0x20 /* D6 format decoding table, normal rate */
+#define LPC_D6_FAST 0x24 /* D6 format decoding table, fast rate */
+
+ /* LPC Port Status Flags (valid only after one of the LPC
+ speak commands) */
+#define LPC_SPEAKING 0x80 /* mask for TS bit: When set to 1,
+ indicates the LPC synthesizer is
+ producing speech.*/
+#define LPC_BUFFER_LOW 0x40 /* mask for BL bit: When set to 1,
+ indicates that the hardware LPC
+ data buffer has less than 30 bytes
+ remaining. (Total internal buffer
+ size = 4096 bytes.) */
+#define LPC_BUFFER_EMPTY 0x20 /* mask for BE bit: When set to 1,
+ indicates that the LPC data buffer
+ ran out of data (error condition if
+ TS is also 1). */
+
+ /* data returned by Interrogate command */
+struct dtlk_settings
+{
+ unsigned short serial_number; /* 0-7Fh:0-7Fh */
+ unsigned char rom_version[24]; /* null terminated string */
+ unsigned char mode; /* 0=Character; 1=Phoneme; 2=Text */
+ unsigned char punc_level; /* nB; 0-7 */
+ unsigned char formant_freq; /* nF; 0-9 */
+ unsigned char pitch; /* nP; 0-99 */
+ unsigned char speed; /* nS; 0-9 */
+ unsigned char volume; /* nV; 0-9 */
+ unsigned char tone; /* nX; 0-2 */
+ unsigned char expression; /* nE; 0-9 */
+ unsigned char ext_dict_loaded; /* 1=exception dictionary loaded */
+ unsigned char ext_dict_status; /* 1=exception dictionary enabled */
+ unsigned char free_ram; /* # pages (truncated) remaining for
+ text buffer */
+ unsigned char articulation; /* nA; 0-9 */
+ unsigned char reverb; /* nR; 0-9 */
+ unsigned char eob; /* 7Fh value indicating end of
+ parameter block */
+ unsigned char has_indexing; /* nonzero if indexing is implemented */
+};
diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h
new file mode 100644
index 000000000..1f79b2091
--- /dev/null
+++ b/include/linux/dw_apb_timer.h
@@ -0,0 +1,55 @@
+/*
+ * (C) Copyright 2009 Intel Corporation
+ * Author: Jacob Pan (jacob.jun.pan@intel.com)
+ *
+ * Shared with ARM platforms, Jamie Iles, Picochip 2011
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Support for the Synopsys DesignWare APB Timers.
+ */
+#ifndef __DW_APB_TIMER_H__
+#define __DW_APB_TIMER_H__
+
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+
+#define APBTMRS_REG_SIZE 0x14
+
+struct dw_apb_timer {
+ void __iomem *base;
+ unsigned long freq;
+ int irq;
+};
+
+struct dw_apb_clock_event_device {
+ struct clock_event_device ced;
+ struct dw_apb_timer timer;
+ struct irqaction irqaction;
+ void (*eoi)(struct dw_apb_timer *);
+};
+
+struct dw_apb_clocksource {
+ struct dw_apb_timer timer;
+ struct clocksource cs;
+};
+
+void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced);
+void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced);
+void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced);
+void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced);
+
+struct dw_apb_clock_event_device *
+dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
+ void __iomem *base, int irq, unsigned long freq);
+struct dw_apb_clocksource *
+dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base,
+ unsigned long freq);
+void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs);
+void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs);
+cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs);
+
+#endif /* __DW_APB_TIMER_H__ */
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
new file mode 100644
index 000000000..4f1bbc68c
--- /dev/null
+++ b/include/linux/dynamic_debug.h
@@ -0,0 +1,137 @@
+#ifndef _DYNAMIC_DEBUG_H
+#define _DYNAMIC_DEBUG_H
+
+/*
+ * An instance of this structure is created in a special
+ * ELF section at every dynamic debug callsite. At runtime,
+ * the special section is treated as an array of these.
+ */
+struct _ddebug {
+ /*
+ * These fields are used to drive the user interface
+ * for selecting and displaying debug callsites.
+ */
+ const char *modname;
+ const char *function;
+ const char *filename;
+ const char *format;
+ unsigned int lineno:18;
+ /*
+ * The flags field controls the behaviour at the callsite.
+ * The bits here are changed dynamically when the user
+ * writes commands to <debugfs>/dynamic_debug/control
+ */
+#define _DPRINTK_FLAGS_NONE 0
+#define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */
+#define _DPRINTK_FLAGS_INCL_MODNAME (1<<1)
+#define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2)
+#define _DPRINTK_FLAGS_INCL_LINENO (1<<3)
+#define _DPRINTK_FLAGS_INCL_TID (1<<4)
+#if defined DEBUG
+#define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT
+#else
+#define _DPRINTK_FLAGS_DEFAULT 0
+#endif
+ unsigned int flags:8;
+} __attribute__((aligned(8)));
+
+
+int ddebug_add_module(struct _ddebug *tab, unsigned int n,
+ const char *modname);
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+extern int ddebug_remove_module(const char *mod_name);
+extern __printf(2, 3)
+void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...);
+
+extern int ddebug_dyndbg_module_param_cb(char *param, char *val,
+ const char *modname);
+
+struct device;
+
+extern __printf(3, 4)
+void __dynamic_dev_dbg(struct _ddebug *descriptor, const struct device *dev,
+ const char *fmt, ...);
+
+struct net_device;
+
+extern __printf(3, 4)
+void __dynamic_netdev_dbg(struct _ddebug *descriptor,
+ const struct net_device *dev,
+ const char *fmt, ...);
+
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+ static struct _ddebug __aligned(8) \
+ __attribute__((section("__verbose"))) name = { \
+ .modname = KBUILD_MODNAME, \
+ .function = __func__, \
+ .filename = __FILE__, \
+ .format = (fmt), \
+ .lineno = __LINE__, \
+ .flags = _DPRINTK_FLAGS_DEFAULT, \
+ }
+
+#define dynamic_pr_debug(fmt, ...) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define dynamic_dev_dbg(dev, fmt, ...) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ __dynamic_dev_dbg(&descriptor, dev, fmt, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define dynamic_netdev_dbg(dev, fmt, ...) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ __dynamic_netdev_dbg(&descriptor, dev, fmt, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, \
+ __builtin_constant_p(prefix_str) ? prefix_str : "hexdump");\
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ print_hex_dump(KERN_DEBUG, prefix_str, \
+ prefix_type, rowsize, groupsize, \
+ buf, len, ascii); \
+} while (0)
+
+#else
+
+#include <linux/string.h>
+#include <linux/errno.h>
+
+static inline int ddebug_remove_module(const char *mod)
+{
+ return 0;
+}
+
+static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
+ const char *modname)
+{
+ if (strstr(param, "dyndbg")) {
+ /* avoid pr_warn(), which wants pr_fmt() fully defined */
+ printk(KERN_WARNING "dyndbg param is supported only in "
+ "CONFIG_DYNAMIC_DEBUG builds\n");
+ return 0; /* allow and ignore */
+ }
+ return -EINVAL;
+}
+
+#define dynamic_pr_debug(fmt, ...) \
+ do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
+#define dynamic_dev_dbg(dev, fmt, ...) \
+ do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
+#endif
+
+#endif
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
new file mode 100644
index 000000000..a4be70398
--- /dev/null
+++ b/include/linux/dynamic_queue_limits.h
@@ -0,0 +1,105 @@
+/*
+ * Dynamic queue limits (dql) - Definitions
+ *
+ * Copyright (c) 2011, Tom Herbert <therbert@google.com>
+ *
+ * This header file contains the definitions for dynamic queue limits (dql).
+ * dql would be used in conjunction with a producer/consumer type queue
+ * (possibly a HW queue). Such a queue would have these general properties:
+ *
+ * 1) Objects are queued up to some limit specified as number of objects.
+ * 2) Periodically a completion process executes which retires consumed
+ * objects.
+ * 3) Starvation occurs when limit has been reached, all queued data has
+ * actually been consumed, but completion processing has not yet run
+ * so queuing new data is blocked.
+ * 4) Minimizing the amount of queued data is desirable.
+ *
+ * The goal of dql is to calculate the limit as the minimum number of objects
+ * needed to prevent starvation.
+ *
+ * The primary functions of dql are:
+ * dql_queued - called when objects are enqueued to record number of objects
+ * dql_avail - returns how many objects are available to be queued based
+ * on the object limit and how many objects are already enqueued
+ * dql_completed - called at completion time to indicate how many objects
+ * were retired from the queue
+ *
+ * The dql implementation does not implement any locking for the dql data
+ * structures, the higher layer should provide this. dql_queued should
+ * be serialized to prevent concurrent execution of the function; this
+ * is also true for dql_completed. However, dql_queued and dlq_completed can
+ * be executed concurrently (i.e. they can be protected by different locks).
+ */
+
+#ifndef _LINUX_DQL_H
+#define _LINUX_DQL_H
+
+#ifdef __KERNEL__
+
+struct dql {
+ /* Fields accessed in enqueue path (dql_queued) */
+ unsigned int num_queued; /* Total ever queued */
+ unsigned int adj_limit; /* limit + num_completed */
+ unsigned int last_obj_cnt; /* Count at last queuing */
+
+ /* Fields accessed only by completion path (dql_completed) */
+
+ unsigned int limit ____cacheline_aligned_in_smp; /* Current limit */
+ unsigned int num_completed; /* Total ever completed */
+
+ unsigned int prev_ovlimit; /* Previous over limit */
+ unsigned int prev_num_queued; /* Previous queue total */
+ unsigned int prev_last_obj_cnt; /* Previous queuing cnt */
+
+ unsigned int lowest_slack; /* Lowest slack found */
+ unsigned long slack_start_time; /* Time slacks seen */
+
+ /* Configuration */
+ unsigned int max_limit; /* Max limit */
+ unsigned int min_limit; /* Minimum limit */
+ unsigned int slack_hold_time; /* Time to measure slack */
+};
+
+/* Set some static maximums */
+#define DQL_MAX_OBJECT (UINT_MAX / 16)
+#define DQL_MAX_LIMIT ((UINT_MAX / 2) - DQL_MAX_OBJECT)
+
+/*
+ * Record number of objects queued. Assumes that caller has already checked
+ * availability in the queue with dql_avail.
+ */
+static inline void dql_queued(struct dql *dql, unsigned int count)
+{
+ BUG_ON(count > DQL_MAX_OBJECT);
+
+ dql->last_obj_cnt = count;
+
+ /* We want to force a write first, so that cpu do not attempt
+ * to get cache line containing last_obj_cnt, num_queued, adj_limit
+ * in Shared state, but directly does a Request For Ownership
+ * It is only a hint, we use barrier() only.
+ */
+ barrier();
+
+ dql->num_queued += count;
+}
+
+/* Returns how many objects can be queued, < 0 indicates over limit. */
+static inline int dql_avail(const struct dql *dql)
+{
+ return ACCESS_ONCE(dql->adj_limit) - ACCESS_ONCE(dql->num_queued);
+}
+
+/* Record number of completed objects and recalculate the limit. */
+void dql_completed(struct dql *dql, unsigned int count);
+
+/* Reset dql state */
+void dql_reset(struct dql *dql);
+
+/* Initialize dql state */
+int dql_init(struct dql *dql, unsigned hold_time);
+
+#endif /* _KERNEL_ */
+
+#endif /* _LINUX_DQL_H */
diff --git a/include/linux/earlycpio.h b/include/linux/earlycpio.h
new file mode 100644
index 000000000..111f46d83
--- /dev/null
+++ b/include/linux/earlycpio.h
@@ -0,0 +1,17 @@
+#ifndef _LINUX_EARLYCPIO_H
+#define _LINUX_EARLYCPIO_H
+
+#include <linux/types.h>
+
+#define MAX_CPIO_FILE_NAME 18
+
+struct cpio_data {
+ void *data;
+ size_t size;
+ char name[MAX_CPIO_FILE_NAME];
+};
+
+struct cpio_data find_cpio_data(const char *path, void *data, size_t len,
+ long *offset);
+
+#endif /* _LINUX_EARLYCPIO_H */
diff --git a/include/linux/ecryptfs.h b/include/linux/ecryptfs.h
new file mode 100644
index 000000000..8d5ab998a
--- /dev/null
+++ b/include/linux/ecryptfs.h
@@ -0,0 +1,105 @@
+#ifndef _LINUX_ECRYPTFS_H
+#define _LINUX_ECRYPTFS_H
+
+/* Version verification for shared data structures w/ userspace */
+#define ECRYPTFS_VERSION_MAJOR 0x00
+#define ECRYPTFS_VERSION_MINOR 0x04
+#define ECRYPTFS_SUPPORTED_FILE_VERSION 0x03
+/* These flags indicate which features are supported by the kernel
+ * module; userspace tools such as the mount helper read the feature
+ * bits from a sysfs handle in order to determine how to behave. */
+#define ECRYPTFS_VERSIONING_PASSPHRASE 0x00000001
+#define ECRYPTFS_VERSIONING_PUBKEY 0x00000002
+#define ECRYPTFS_VERSIONING_PLAINTEXT_PASSTHROUGH 0x00000004
+#define ECRYPTFS_VERSIONING_POLICY 0x00000008
+#define ECRYPTFS_VERSIONING_XATTR 0x00000010
+#define ECRYPTFS_VERSIONING_MULTKEY 0x00000020
+#define ECRYPTFS_VERSIONING_DEVMISC 0x00000040
+#define ECRYPTFS_VERSIONING_HMAC 0x00000080
+#define ECRYPTFS_VERSIONING_FILENAME_ENCRYPTION 0x00000100
+#define ECRYPTFS_VERSIONING_GCM 0x00000200
+#define ECRYPTFS_MAX_PASSWORD_LENGTH 64
+#define ECRYPTFS_MAX_PASSPHRASE_BYTES ECRYPTFS_MAX_PASSWORD_LENGTH
+#define ECRYPTFS_SALT_SIZE 8
+#define ECRYPTFS_SALT_SIZE_HEX (ECRYPTFS_SALT_SIZE*2)
+/* The original signature size is only for what is stored on disk; all
+ * in-memory representations are expanded hex, so it better adapted to
+ * be passed around or referenced on the command line */
+#define ECRYPTFS_SIG_SIZE 8
+#define ECRYPTFS_SIG_SIZE_HEX (ECRYPTFS_SIG_SIZE*2)
+#define ECRYPTFS_PASSWORD_SIG_SIZE ECRYPTFS_SIG_SIZE_HEX
+#define ECRYPTFS_MAX_KEY_BYTES 64
+#define ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES 512
+#define ECRYPTFS_FILE_VERSION 0x03
+#define ECRYPTFS_MAX_PKI_NAME_BYTES 16
+
+#define RFC2440_CIPHER_DES3_EDE 0x02
+#define RFC2440_CIPHER_CAST_5 0x03
+#define RFC2440_CIPHER_BLOWFISH 0x04
+#define RFC2440_CIPHER_AES_128 0x07
+#define RFC2440_CIPHER_AES_192 0x08
+#define RFC2440_CIPHER_AES_256 0x09
+#define RFC2440_CIPHER_TWOFISH 0x0a
+#define RFC2440_CIPHER_CAST_6 0x0b
+
+#define RFC2440_CIPHER_RSA 0x01
+
+/**
+ * For convenience, we may need to pass around the encrypted session
+ * key between kernel and userspace because the authentication token
+ * may not be extractable. For example, the TPM may not release the
+ * private key, instead requiring the encrypted data and returning the
+ * decrypted data.
+ */
+struct ecryptfs_session_key {
+#define ECRYPTFS_USERSPACE_SHOULD_TRY_TO_DECRYPT 0x00000001
+#define ECRYPTFS_USERSPACE_SHOULD_TRY_TO_ENCRYPT 0x00000002
+#define ECRYPTFS_CONTAINS_DECRYPTED_KEY 0x00000004
+#define ECRYPTFS_CONTAINS_ENCRYPTED_KEY 0x00000008
+ u32 flags;
+ u32 encrypted_key_size;
+ u32 decrypted_key_size;
+ u8 encrypted_key[ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES];
+ u8 decrypted_key[ECRYPTFS_MAX_KEY_BYTES];
+};
+
+struct ecryptfs_password {
+ u32 password_bytes;
+ s32 hash_algo;
+ u32 hash_iterations;
+ u32 session_key_encryption_key_bytes;
+#define ECRYPTFS_PERSISTENT_PASSWORD 0x01
+#define ECRYPTFS_SESSION_KEY_ENCRYPTION_KEY_SET 0x02
+ u32 flags;
+ /* Iterated-hash concatenation of salt and passphrase */
+ u8 session_key_encryption_key[ECRYPTFS_MAX_KEY_BYTES];
+ u8 signature[ECRYPTFS_PASSWORD_SIG_SIZE + 1];
+ /* Always in expanded hex */
+ u8 salt[ECRYPTFS_SALT_SIZE];
+};
+
+enum ecryptfs_token_types {ECRYPTFS_PASSWORD, ECRYPTFS_PRIVATE_KEY};
+
+struct ecryptfs_private_key {
+ u32 key_size;
+ u32 data_len;
+ u8 signature[ECRYPTFS_PASSWORD_SIG_SIZE + 1];
+ char pki_type[ECRYPTFS_MAX_PKI_NAME_BYTES + 1];
+ u8 data[];
+};
+
+/* May be a password or a private key */
+struct ecryptfs_auth_tok {
+ u16 version; /* 8-bit major and 8-bit minor */
+ u16 token_type;
+#define ECRYPTFS_ENCRYPT_ONLY 0x00000001
+ u32 flags;
+ struct ecryptfs_session_key session_key;
+ u8 reserved[32];
+ union {
+ struct ecryptfs_password password;
+ struct ecryptfs_private_key private_key;
+ } token;
+} __attribute__ ((packed));
+
+#endif /* _LINUX_ECRYPTFS_H */
diff --git a/include/linux/edac.h b/include/linux/edac.h
new file mode 100644
index 000000000..da3b72e95
--- /dev/null
+++ b/include/linux/edac.h
@@ -0,0 +1,785 @@
+/*
+ * Generic EDAC defs
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2006-2008 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#ifndef _LINUX_EDAC_H_
+#define _LINUX_EDAC_H_
+
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+
+struct device;
+
+#define EDAC_OPSTATE_INVAL -1
+#define EDAC_OPSTATE_POLL 0
+#define EDAC_OPSTATE_NMI 1
+#define EDAC_OPSTATE_INT 2
+
+extern int edac_op_state;
+extern int edac_err_assert;
+extern atomic_t edac_handlers;
+extern struct bus_type edac_subsys;
+
+extern int edac_handler_set(void);
+extern void edac_atomic_assert_error(void);
+extern struct bus_type *edac_get_sysfs_subsys(void);
+extern void edac_put_sysfs_subsys(void);
+
+enum {
+ EDAC_REPORTING_ENABLED,
+ EDAC_REPORTING_DISABLED,
+ EDAC_REPORTING_FORCE
+};
+
+extern int edac_report_status;
+#ifdef CONFIG_EDAC
+static inline int get_edac_report_status(void)
+{
+ return edac_report_status;
+}
+
+static inline void set_edac_report_status(int new)
+{
+ edac_report_status = new;
+}
+#else
+static inline int get_edac_report_status(void)
+{
+ return EDAC_REPORTING_DISABLED;
+}
+
+static inline void set_edac_report_status(int new)
+{
+}
+#endif
+
+static inline void opstate_init(void)
+{
+ switch (edac_op_state) {
+ case EDAC_OPSTATE_POLL:
+ case EDAC_OPSTATE_NMI:
+ break;
+ default:
+ edac_op_state = EDAC_OPSTATE_POLL;
+ }
+ return;
+}
+
+/* Max length of a DIMM label*/
+#define EDAC_MC_LABEL_LEN 31
+
+/* Maximum size of the location string */
+#define LOCATION_SIZE 256
+
+/* Defines the maximum number of labels that can be reported */
+#define EDAC_MAX_LABELS 8
+
+/* String used to join two or more labels */
+#define OTHER_LABEL " or "
+
+/**
+ * enum dev_type - describe the type of memory DRAM chips used at the stick
+ * @DEV_UNKNOWN: Can't be determined, or MC doesn't support detect it
+ * @DEV_X1: 1 bit for data
+ * @DEV_X2: 2 bits for data
+ * @DEV_X4: 4 bits for data
+ * @DEV_X8: 8 bits for data
+ * @DEV_X16: 16 bits for data
+ * @DEV_X32: 32 bits for data
+ * @DEV_X64: 64 bits for data
+ *
+ * Typical values are x4 and x8.
+ */
+enum dev_type {
+ DEV_UNKNOWN = 0,
+ DEV_X1,
+ DEV_X2,
+ DEV_X4,
+ DEV_X8,
+ DEV_X16,
+ DEV_X32, /* Do these parts exist? */
+ DEV_X64 /* Do these parts exist? */
+};
+
+#define DEV_FLAG_UNKNOWN BIT(DEV_UNKNOWN)
+#define DEV_FLAG_X1 BIT(DEV_X1)
+#define DEV_FLAG_X2 BIT(DEV_X2)
+#define DEV_FLAG_X4 BIT(DEV_X4)
+#define DEV_FLAG_X8 BIT(DEV_X8)
+#define DEV_FLAG_X16 BIT(DEV_X16)
+#define DEV_FLAG_X32 BIT(DEV_X32)
+#define DEV_FLAG_X64 BIT(DEV_X64)
+
+/**
+ * enum hw_event_mc_err_type - type of the detected error
+ *
+ * @HW_EVENT_ERR_CORRECTED: Corrected Error - Indicates that an ECC
+ * corrected error was detected
+ * @HW_EVENT_ERR_UNCORRECTED: Uncorrected Error - Indicates an error that
+ * can't be corrected by ECC, but it is not
+ * fatal (maybe it is on an unused memory area,
+ * or the memory controller could recover from
+ * it for example, by re-trying the operation).
+ * @HW_EVENT_ERR_FATAL: Fatal Error - Uncorrected error that could not
+ * be recovered.
+ */
+enum hw_event_mc_err_type {
+ HW_EVENT_ERR_CORRECTED,
+ HW_EVENT_ERR_UNCORRECTED,
+ HW_EVENT_ERR_FATAL,
+ HW_EVENT_ERR_INFO,
+};
+
+static inline char *mc_event_error_type(const unsigned int err_type)
+{
+ switch (err_type) {
+ case HW_EVENT_ERR_CORRECTED:
+ return "Corrected";
+ case HW_EVENT_ERR_UNCORRECTED:
+ return "Uncorrected";
+ case HW_EVENT_ERR_FATAL:
+ return "Fatal";
+ default:
+ case HW_EVENT_ERR_INFO:
+ return "Info";
+ }
+}
+
+/**
+ * enum mem_type - memory types. For a more detailed reference, please see
+ * http://en.wikipedia.org/wiki/DRAM
+ *
+ * @MEM_EMPTY Empty csrow
+ * @MEM_RESERVED: Reserved csrow type
+ * @MEM_UNKNOWN: Unknown csrow type
+ * @MEM_FPM: FPM - Fast Page Mode, used on systems up to 1995.
+ * @MEM_EDO: EDO - Extended data out, used on systems up to 1998.
+ * @MEM_BEDO: BEDO - Burst Extended data out, an EDO variant.
+ * @MEM_SDR: SDR - Single data rate SDRAM
+ * http://en.wikipedia.org/wiki/Synchronous_dynamic_random-access_memory
+ * They use 3 pins for chip select: Pins 0 and 2 are
+ * for rank 0; pins 1 and 3 are for rank 1, if the memory
+ * is dual-rank.
+ * @MEM_RDR: Registered SDR SDRAM
+ * @MEM_DDR: Double data rate SDRAM
+ * http://en.wikipedia.org/wiki/DDR_SDRAM
+ * @MEM_RDDR: Registered Double data rate SDRAM
+ * This is a variant of the DDR memories.
+ * A registered memory has a buffer inside it, hiding
+ * part of the memory details to the memory controller.
+ * @MEM_RMBS: Rambus DRAM, used on a few Pentium III/IV controllers.
+ * @MEM_DDR2: DDR2 RAM, as described at JEDEC JESD79-2F.
+ * Those memories are labed as "PC2-" instead of "PC" to
+ * differenciate from DDR.
+ * @MEM_FB_DDR2: Fully-Buffered DDR2, as described at JEDEC Std No. 205
+ * and JESD206.
+ * Those memories are accessed per DIMM slot, and not by
+ * a chip select signal.
+ * @MEM_RDDR2: Registered DDR2 RAM
+ * This is a variant of the DDR2 memories.
+ * @MEM_XDR: Rambus XDR
+ * It is an evolution of the original RAMBUS memories,
+ * created to compete with DDR2. Weren't used on any
+ * x86 arch, but cell_edac PPC memory controller uses it.
+ * @MEM_DDR3: DDR3 RAM
+ * @MEM_RDDR3: Registered DDR3 RAM
+ * This is a variant of the DDR3 memories.
+ * @MEM_LRDDR3 Load-Reduced DDR3 memory.
+ * @MEM_DDR4: Unbuffered DDR4 RAM
+ * @MEM_RDDR4: Registered DDR4 RAM
+ * This is a variant of the DDR4 memories.
+ */
+enum mem_type {
+ MEM_EMPTY = 0,
+ MEM_RESERVED,
+ MEM_UNKNOWN,
+ MEM_FPM,
+ MEM_EDO,
+ MEM_BEDO,
+ MEM_SDR,
+ MEM_RDR,
+ MEM_DDR,
+ MEM_RDDR,
+ MEM_RMBS,
+ MEM_DDR2,
+ MEM_FB_DDR2,
+ MEM_RDDR2,
+ MEM_XDR,
+ MEM_DDR3,
+ MEM_RDDR3,
+ MEM_LRDDR3,
+ MEM_DDR4,
+ MEM_RDDR4,
+};
+
+#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
+#define MEM_FLAG_RESERVED BIT(MEM_RESERVED)
+#define MEM_FLAG_UNKNOWN BIT(MEM_UNKNOWN)
+#define MEM_FLAG_FPM BIT(MEM_FPM)
+#define MEM_FLAG_EDO BIT(MEM_EDO)
+#define MEM_FLAG_BEDO BIT(MEM_BEDO)
+#define MEM_FLAG_SDR BIT(MEM_SDR)
+#define MEM_FLAG_RDR BIT(MEM_RDR)
+#define MEM_FLAG_DDR BIT(MEM_DDR)
+#define MEM_FLAG_RDDR BIT(MEM_RDDR)
+#define MEM_FLAG_RMBS BIT(MEM_RMBS)
+#define MEM_FLAG_DDR2 BIT(MEM_DDR2)
+#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
+#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
+#define MEM_FLAG_XDR BIT(MEM_XDR)
+#define MEM_FLAG_DDR3 BIT(MEM_DDR3)
+#define MEM_FLAG_RDDR3 BIT(MEM_RDDR3)
+
+/**
+ * enum edac-type - Error Detection and Correction capabilities and mode
+ * @EDAC_UNKNOWN: Unknown if ECC is available
+ * @EDAC_NONE: Doesn't support ECC
+ * @EDAC_RESERVED: Reserved ECC type
+ * @EDAC_PARITY: Detects parity errors
+ * @EDAC_EC: Error Checking - no correction
+ * @EDAC_SECDED: Single bit error correction, Double detection
+ * @EDAC_S2ECD2ED: Chipkill x2 devices - do these exist?
+ * @EDAC_S4ECD4ED: Chipkill x4 devices
+ * @EDAC_S8ECD8ED: Chipkill x8 devices
+ * @EDAC_S16ECD16ED: Chipkill x16 devices
+ */
+enum edac_type {
+ EDAC_UNKNOWN = 0,
+ EDAC_NONE,
+ EDAC_RESERVED,
+ EDAC_PARITY,
+ EDAC_EC,
+ EDAC_SECDED,
+ EDAC_S2ECD2ED,
+ EDAC_S4ECD4ED,
+ EDAC_S8ECD8ED,
+ EDAC_S16ECD16ED,
+};
+
+#define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN)
+#define EDAC_FLAG_NONE BIT(EDAC_NONE)
+#define EDAC_FLAG_PARITY BIT(EDAC_PARITY)
+#define EDAC_FLAG_EC BIT(EDAC_EC)
+#define EDAC_FLAG_SECDED BIT(EDAC_SECDED)
+#define EDAC_FLAG_S2ECD2ED BIT(EDAC_S2ECD2ED)
+#define EDAC_FLAG_S4ECD4ED BIT(EDAC_S4ECD4ED)
+#define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED)
+#define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED)
+
+/**
+ * enum scrub_type - scrubbing capabilities
+ * @SCRUB_UNKNOWN Unknown if scrubber is available
+ * @SCRUB_NONE: No scrubber
+ * @SCRUB_SW_PROG: SW progressive (sequential) scrubbing
+ * @SCRUB_SW_SRC: Software scrub only errors
+ * @SCRUB_SW_PROG_SRC: Progressive software scrub from an error
+ * @SCRUB_SW_TUNABLE: Software scrub frequency is tunable
+ * @SCRUB_HW_PROG: HW progressive (sequential) scrubbing
+ * @SCRUB_HW_SRC: Hardware scrub only errors
+ * @SCRUB_HW_PROG_SRC: Progressive hardware scrub from an error
+ * SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable
+ */
+enum scrub_type {
+ SCRUB_UNKNOWN = 0,
+ SCRUB_NONE,
+ SCRUB_SW_PROG,
+ SCRUB_SW_SRC,
+ SCRUB_SW_PROG_SRC,
+ SCRUB_SW_TUNABLE,
+ SCRUB_HW_PROG,
+ SCRUB_HW_SRC,
+ SCRUB_HW_PROG_SRC,
+ SCRUB_HW_TUNABLE
+};
+
+#define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG)
+#define SCRUB_FLAG_SW_SRC BIT(SCRUB_SW_SRC)
+#define SCRUB_FLAG_SW_PROG_SRC BIT(SCRUB_SW_PROG_SRC)
+#define SCRUB_FLAG_SW_TUN BIT(SCRUB_SW_SCRUB_TUNABLE)
+#define SCRUB_FLAG_HW_PROG BIT(SCRUB_HW_PROG)
+#define SCRUB_FLAG_HW_SRC BIT(SCRUB_HW_SRC)
+#define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC)
+#define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE)
+
+/* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */
+
+/* EDAC internal operation states */
+#define OP_ALLOC 0x100
+#define OP_RUNNING_POLL 0x201
+#define OP_RUNNING_INTERRUPT 0x202
+#define OP_RUNNING_POLL_INTR 0x203
+#define OP_OFFLINE 0x300
+
+/*
+ * Concepts used at the EDAC subsystem
+ *
+ * There are several things to be aware of that aren't at all obvious:
+ *
+ * SOCKETS, SOCKET SETS, BANKS, ROWS, CHIP-SELECT ROWS, CHANNELS, etc..
+ *
+ * These are some of the many terms that are thrown about that don't always
+ * mean what people think they mean (Inconceivable!). In the interest of
+ * creating a common ground for discussion, terms and their definitions
+ * will be established.
+ *
+ * Memory devices: The individual DRAM chips on a memory stick. These
+ * devices commonly output 4 and 8 bits each (x4, x8).
+ * Grouping several of these in parallel provides the
+ * number of bits that the memory controller expects:
+ * typically 72 bits, in order to provide 64 bits +
+ * 8 bits of ECC data.
+ *
+ * Memory Stick: A printed circuit board that aggregates multiple
+ * memory devices in parallel. In general, this is the
+ * Field Replaceable Unit (FRU) which gets replaced, in
+ * the case of excessive errors. Most often it is also
+ * called DIMM (Dual Inline Memory Module).
+ *
+ * Memory Socket: A physical connector on the motherboard that accepts
+ * a single memory stick. Also called as "slot" on several
+ * datasheets.
+ *
+ * Channel: A memory controller channel, responsible to communicate
+ * with a group of DIMMs. Each channel has its own
+ * independent control (command) and data bus, and can
+ * be used independently or grouped with other channels.
+ *
+ * Branch: It is typically the highest hierarchy on a
+ * Fully-Buffered DIMM memory controller.
+ * Typically, it contains two channels.
+ * Two channels at the same branch can be used in single
+ * mode or in lockstep mode.
+ * When lockstep is enabled, the cacheline is doubled,
+ * but it generally brings some performance penalty.
+ * Also, it is generally not possible to point to just one
+ * memory stick when an error occurs, as the error
+ * correction code is calculated using two DIMMs instead
+ * of one. Due to that, it is capable of correcting more
+ * errors than on single mode.
+ *
+ * Single-channel: The data accessed by the memory controller is contained
+ * into one dimm only. E. g. if the data is 64 bits-wide,
+ * the data flows to the CPU using one 64 bits parallel
+ * access.
+ * Typically used with SDR, DDR, DDR2 and DDR3 memories.
+ * FB-DIMM and RAMBUS use a different concept for channel,
+ * so this concept doesn't apply there.
+ *
+ * Double-channel: The data size accessed by the memory controller is
+ * interlaced into two dimms, accessed at the same time.
+ * E. g. if the DIMM is 64 bits-wide (72 bits with ECC),
+ * the data flows to the CPU using a 128 bits parallel
+ * access.
+ *
+ * Chip-select row: This is the name of the DRAM signal used to select the
+ * DRAM ranks to be accessed. Common chip-select rows for
+ * single channel are 64 bits, for dual channel 128 bits.
+ * It may not be visible by the memory controller, as some
+ * DIMM types have a memory buffer that can hide direct
+ * access to it from the Memory Controller.
+ *
+ * Single-Ranked stick: A Single-ranked stick has 1 chip-select row of memory.
+ * Motherboards commonly drive two chip-select pins to
+ * a memory stick. A single-ranked stick, will occupy
+ * only one of those rows. The other will be unused.
+ *
+ * Double-Ranked stick: A double-ranked stick has two chip-select rows which
+ * access different sets of memory devices. The two
+ * rows cannot be accessed concurrently.
+ *
+ * Double-sided stick: DEPRECATED TERM, see Double-Ranked stick.
+ * A double-sided stick has two chip-select rows which
+ * access different sets of memory devices. The two
+ * rows cannot be accessed concurrently. "Double-sided"
+ * is irrespective of the memory devices being mounted
+ * on both sides of the memory stick.
+ *
+ * Socket set: All of the memory sticks that are required for
+ * a single memory access or all of the memory sticks
+ * spanned by a chip-select row. A single socket set
+ * has two chip-select rows and if double-sided sticks
+ * are used these will occupy those chip-select rows.
+ *
+ * Bank: This term is avoided because it is unclear when
+ * needing to distinguish between chip-select rows and
+ * socket sets.
+ *
+ * Controller pages:
+ *
+ * Physical pages:
+ *
+ * Virtual pages:
+ *
+ *
+ * STRUCTURE ORGANIZATION AND CHOICES
+ *
+ *
+ *
+ * PS - I enjoyed writing all that about as much as you enjoyed reading it.
+ */
+
+/**
+ * enum edac_mc_layer - memory controller hierarchy layer
+ *
+ * @EDAC_MC_LAYER_BRANCH: memory layer is named "branch"
+ * @EDAC_MC_LAYER_CHANNEL: memory layer is named "channel"
+ * @EDAC_MC_LAYER_SLOT: memory layer is named "slot"
+ * @EDAC_MC_LAYER_CHIP_SELECT: memory layer is named "chip select"
+ * @EDAC_MC_LAYER_ALL_MEM: memory layout is unknown. All memory is mapped
+ * as a single memory area. This is used when
+ * retrieving errors from a firmware driven driver.
+ *
+ * This enum is used by the drivers to tell edac_mc_sysfs what name should
+ * be used when describing a memory stick location.
+ */
+enum edac_mc_layer_type {
+ EDAC_MC_LAYER_BRANCH,
+ EDAC_MC_LAYER_CHANNEL,
+ EDAC_MC_LAYER_SLOT,
+ EDAC_MC_LAYER_CHIP_SELECT,
+ EDAC_MC_LAYER_ALL_MEM,
+};
+
+/**
+ * struct edac_mc_layer - describes the memory controller hierarchy
+ * @layer: layer type
+ * @size: number of components per layer. For example,
+ * if the channel layer has two channels, size = 2
+ * @is_virt_csrow: This layer is part of the "csrow" when old API
+ * compatibility mode is enabled. Otherwise, it is
+ * a channel
+ */
+struct edac_mc_layer {
+ enum edac_mc_layer_type type;
+ unsigned size;
+ bool is_virt_csrow;
+};
+
+/*
+ * Maximum number of layers used by the memory controller to uniquely
+ * identify a single memory stick.
+ * NOTE: Changing this constant requires not only to change the constant
+ * below, but also to change the existing code at the core, as there are
+ * some code there that are optimized for 3 layers.
+ */
+#define EDAC_MAX_LAYERS 3
+
+/**
+ * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer array
+ * for the element given by [layer0,layer1,layer2] position
+ *
+ * @layers: a struct edac_mc_layer array, describing how many elements
+ * were allocated for each layer
+ * @n_layers: Number of layers at the @layers array
+ * @layer0: layer0 position
+ * @layer1: layer1 position. Unused if n_layers < 2
+ * @layer2: layer2 position. Unused if n_layers < 3
+ *
+ * For 1 layer, this macro returns &var[layer0] - &var
+ * For 2 layers, this macro is similar to allocate a bi-dimensional array
+ * and to return "&var[layer0][layer1] - &var"
+ * For 3 layers, this macro is similar to allocate a tri-dimensional array
+ * and to return "&var[layer0][layer1][layer2] - &var"
+ *
+ * A loop could be used here to make it more generic, but, as we only have
+ * 3 layers, this is a little faster.
+ * By design, layers can never be 0 or more than 3. If that ever happens,
+ * a NULL is returned, causing an OOPS during the memory allocation routine,
+ * with would point to the developer that he's doing something wrong.
+ */
+#define EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2) ({ \
+ int __i; \
+ if ((nlayers) == 1) \
+ __i = layer0; \
+ else if ((nlayers) == 2) \
+ __i = (layer1) + ((layers[1]).size * (layer0)); \
+ else if ((nlayers) == 3) \
+ __i = (layer2) + ((layers[2]).size * ((layer1) + \
+ ((layers[1]).size * (layer0)))); \
+ else \
+ __i = -EINVAL; \
+ __i; \
+})
+
+/**
+ * EDAC_DIMM_PTR - Macro responsible to get a pointer inside a pointer array
+ * for the element given by [layer0,layer1,layer2] position
+ *
+ * @layers: a struct edac_mc_layer array, describing how many elements
+ * were allocated for each layer
+ * @var: name of the var where we want to get the pointer
+ * (like mci->dimms)
+ * @n_layers: Number of layers at the @layers array
+ * @layer0: layer0 position
+ * @layer1: layer1 position. Unused if n_layers < 2
+ * @layer2: layer2 position. Unused if n_layers < 3
+ *
+ * For 1 layer, this macro returns &var[layer0]
+ * For 2 layers, this macro is similar to allocate a bi-dimensional array
+ * and to return "&var[layer0][layer1]"
+ * For 3 layers, this macro is similar to allocate a tri-dimensional array
+ * and to return "&var[layer0][layer1][layer2]"
+ */
+#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \
+ typeof(*var) __p; \
+ int ___i = EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2); \
+ if (___i < 0) \
+ __p = NULL; \
+ else \
+ __p = (var)[___i]; \
+ __p; \
+})
+
+struct dimm_info {
+ struct device dev;
+
+ char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
+
+ /* Memory location data */
+ unsigned location[EDAC_MAX_LAYERS];
+
+ struct mem_ctl_info *mci; /* the parent */
+
+ u32 grain; /* granularity of reported error in bytes */
+ enum dev_type dtype; /* memory device type */
+ enum mem_type mtype; /* memory dimm type */
+ enum edac_type edac_mode; /* EDAC mode for this dimm */
+
+ u32 nr_pages; /* number of pages on this dimm */
+
+ unsigned csrow, cschannel; /* Points to the old API data */
+};
+
+/**
+ * struct rank_info - contains the information for one DIMM rank
+ *
+ * @chan_idx: channel number where the rank is (typically, 0 or 1)
+ * @ce_count: number of correctable errors for this rank
+ * @csrow: A pointer to the chip select row structure (the parent
+ * structure). The location of the rank is given by
+ * the (csrow->csrow_idx, chan_idx) vector.
+ * @dimm: A pointer to the DIMM structure, where the DIMM label
+ * information is stored.
+ *
+ * FIXME: Currently, the EDAC core model will assume one DIMM per rank.
+ * This is a bad assumption, but it makes this patch easier. Later
+ * patches in this series will fix this issue.
+ */
+struct rank_info {
+ int chan_idx;
+ struct csrow_info *csrow;
+ struct dimm_info *dimm;
+
+ u32 ce_count; /* Correctable Errors for this csrow */
+};
+
+struct csrow_info {
+ struct device dev;
+
+ /* Used only by edac_mc_find_csrow_by_page() */
+ unsigned long first_page; /* first page number in csrow */
+ unsigned long last_page; /* last page number in csrow */
+ unsigned long page_mask; /* used for interleaving -
+ * 0UL for non intlv */
+
+ int csrow_idx; /* the chip-select row */
+
+ u32 ue_count; /* Uncorrectable Errors for this csrow */
+ u32 ce_count; /* Correctable Errors for this csrow */
+
+ struct mem_ctl_info *mci; /* the parent */
+
+ /* channel information for this csrow */
+ u32 nr_channels;
+ struct rank_info **channels;
+};
+
+/*
+ * struct errcount_attribute - used to store the several error counts
+ */
+struct errcount_attribute_data {
+ int n_layers;
+ int pos[EDAC_MAX_LAYERS];
+ int layer0, layer1, layer2;
+};
+
+/**
+ * edac_raw_error_desc - Raw error report structure
+ * @grain: minimum granularity for an error report, in bytes
+ * @error_count: number of errors of the same type
+ * @top_layer: top layer of the error (layer[0])
+ * @mid_layer: middle layer of the error (layer[1])
+ * @low_layer: low layer of the error (layer[2])
+ * @page_frame_number: page where the error happened
+ * @offset_in_page: page offset
+ * @syndrome: syndrome of the error (or 0 if unknown or if
+ * the syndrome is not applicable)
+ * @msg: error message
+ * @location: location of the error
+ * @label: label of the affected DIMM(s)
+ * @other_detail: other driver-specific detail about the error
+ * @enable_per_layer_report: if false, the error affects all layers
+ * (typically, a memory controller error)
+ */
+struct edac_raw_error_desc {
+ /*
+ * NOTE: everything before grain won't be cleaned by
+ * edac_raw_error_desc_clean()
+ */
+ char location[LOCATION_SIZE];
+ char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * EDAC_MAX_LABELS];
+ long grain;
+
+ /* the vars below and grain will be cleaned on every new error report */
+ u16 error_count;
+ int top_layer;
+ int mid_layer;
+ int low_layer;
+ unsigned long page_frame_number;
+ unsigned long offset_in_page;
+ unsigned long syndrome;
+ const char *msg;
+ const char *other_detail;
+ bool enable_per_layer_report;
+};
+
+/* MEMORY controller information structure
+ */
+struct mem_ctl_info {
+ struct device dev;
+ struct bus_type *bus;
+
+ struct list_head link; /* for global list of mem_ctl_info structs */
+
+ struct module *owner; /* Module owner of this control struct */
+
+ unsigned long mtype_cap; /* memory types supported by mc */
+ unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */
+ unsigned long edac_cap; /* configuration capabilities - this is
+ * closely related to edac_ctl_cap. The
+ * difference is that the controller may be
+ * capable of s4ecd4ed which would be listed
+ * in edac_ctl_cap, but if channels aren't
+ * capable of s4ecd4ed then the edac_cap would
+ * not have that capability.
+ */
+ unsigned long scrub_cap; /* chipset scrub capabilities */
+ enum scrub_type scrub_mode; /* current scrub mode */
+
+ /* Translates sdram memory scrub rate given in bytes/sec to the
+ internal representation and configures whatever else needs
+ to be configured.
+ */
+ int (*set_sdram_scrub_rate) (struct mem_ctl_info * mci, u32 bw);
+
+ /* Get the current sdram memory scrub rate from the internal
+ representation and converts it to the closest matching
+ bandwidth in bytes/sec.
+ */
+ int (*get_sdram_scrub_rate) (struct mem_ctl_info * mci);
+
+
+ /* pointer to edac checking routine */
+ void (*edac_check) (struct mem_ctl_info * mci);
+
+ /*
+ * Remaps memory pages: controller pages to physical pages.
+ * For most MC's, this will be NULL.
+ */
+ /* FIXME - why not send the phys page to begin with? */
+ unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
+ unsigned long page);
+ int mc_idx;
+ struct csrow_info **csrows;
+ unsigned nr_csrows, num_cschannel;
+
+ /*
+ * Memory Controller hierarchy
+ *
+ * There are basically two types of memory controller: the ones that
+ * sees memory sticks ("dimms"), and the ones that sees memory ranks.
+ * All old memory controllers enumerate memories per rank, but most
+ * of the recent drivers enumerate memories per DIMM, instead.
+ * When the memory controller is per rank, csbased is true.
+ */
+ unsigned n_layers;
+ struct edac_mc_layer *layers;
+ bool csbased;
+
+ /*
+ * DIMM info. Will eventually remove the entire csrows_info some day
+ */
+ unsigned tot_dimms;
+ struct dimm_info **dimms;
+
+ /*
+ * FIXME - what about controllers on other busses? - IDs must be
+ * unique. dev pointer should be sufficiently unique, but
+ * BUS:SLOT.FUNC numbers may not be unique.
+ */
+ struct device *pdev;
+ const char *mod_name;
+ const char *mod_ver;
+ const char *ctl_name;
+ const char *dev_name;
+ void *pvt_info;
+ unsigned long start_time; /* mci load start time (in jiffies) */
+
+ /*
+ * drivers shouldn't access those fields directly, as the core
+ * already handles that.
+ */
+ u32 ce_noinfo_count, ue_noinfo_count;
+ u32 ue_mc, ce_mc;
+ u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
+
+ struct completion complete;
+
+ /* Additional top controller level attributes, but specified
+ * by the low level driver.
+ *
+ * Set by the low level driver to provide attributes at the
+ * controller level.
+ * An array of structures, NULL terminated
+ *
+ * If attributes are desired, then set to array of attributes
+ * If no attributes are desired, leave NULL
+ */
+ const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes;
+
+ /* work struct for this MC */
+ struct delayed_work work;
+
+ /*
+ * Used to report an error - by being at the global struct
+ * makes the memory allocated by the EDAC core
+ */
+ struct edac_raw_error_desc error_desc;
+
+ /* the internal state of this controller instance */
+ int op_state;
+
+#ifdef CONFIG_EDAC_DEBUG
+ struct dentry *debugfs;
+ u8 fake_inject_layer[EDAC_MAX_LAYERS];
+ u32 fake_inject_ue;
+ u16 fake_inject_count;
+#endif
+};
+
+/*
+ * Maximum number of memory controllers in the coherent fabric.
+ */
+#define EDAC_MAX_MCS 16
+
+#endif
diff --git a/include/linux/edd.h b/include/linux/edd.h
new file mode 100644
index 000000000..83d4371ec
--- /dev/null
+++ b/include/linux/edd.h
@@ -0,0 +1,38 @@
+/*
+ * linux/include/linux/edd.h
+ * Copyright (C) 2002, 2003, 2004 Dell Inc.
+ * by Matt Domsch <Matt_Domsch@dell.com>
+ *
+ * structures and definitions for the int 13h, ax={41,48}h
+ * BIOS Enhanced Disk Drive Services
+ * This is based on the T13 group document D1572 Revision 0 (August 14 2002)
+ * available at http://www.t13.org/docs2002/d1572r0.pdf. It is
+ * very similar to D1484 Revision 3 http://www.t13.org/docs2002/d1484r3.pdf
+ *
+ * In a nutshell, arch/{i386,x86_64}/boot/setup.S populates a scratch
+ * table in the boot_params that contains a list of BIOS-enumerated
+ * boot devices.
+ * In arch/{i386,x86_64}/kernel/setup.c, this information is
+ * transferred into the edd structure, and in drivers/firmware/edd.c, that
+ * information is used to identify BIOS boot disk. The code in setup.S
+ * is very sensitive to the size of these structures.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _LINUX_EDD_H
+#define _LINUX_EDD_H
+
+#include <uapi/linux/edd.h>
+
+#ifndef __ASSEMBLY__
+extern struct edd edd;
+#endif /*!__ASSEMBLY__ */
+#endif /* _LINUX_EDD_H */
diff --git a/include/linux/edma.h b/include/linux/edma.h
new file mode 100644
index 000000000..a1307e782
--- /dev/null
+++ b/include/linux/edma.h
@@ -0,0 +1,29 @@
+/*
+ * TI EDMA DMA engine driver
+ *
+ * Copyright 2012 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __LINUX_EDMA_H
+#define __LINUX_EDMA_H
+
+struct dma_chan;
+
+#if defined(CONFIG_TI_EDMA) || defined(CONFIG_TI_EDMA_MODULE)
+bool edma_filter_fn(struct dma_chan *, void *);
+#else
+static inline bool edma_filter_fn(struct dma_chan *chan, void *param)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h
new file mode 100644
index 000000000..eb0b19880
--- /dev/null
+++ b/include/linux/eeprom_93cx6.h
@@ -0,0 +1,86 @@
+/*
+ Copyright (C) 2004 - 2006 rt2x00 SourceForge Project
+ <http://rt2x00.serialmonkey.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the
+ Free Software Foundation, Inc.,
+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ Module: eeprom_93cx6
+ Abstract: EEPROM reader datastructures for 93cx6 chipsets.
+ Supported chipsets: 93c46, 93c56 and 93c66.
+ */
+
+/*
+ * EEPROM operation defines.
+ */
+#define PCI_EEPROM_WIDTH_93C46 6
+#define PCI_EEPROM_WIDTH_93C56 8
+#define PCI_EEPROM_WIDTH_93C66 8
+#define PCI_EEPROM_WIDTH_93C86 8
+#define PCI_EEPROM_WIDTH_OPCODE 3
+#define PCI_EEPROM_WRITE_OPCODE 0x05
+#define PCI_EEPROM_ERASE_OPCODE 0x07
+#define PCI_EEPROM_READ_OPCODE 0x06
+#define PCI_EEPROM_EWDS_OPCODE 0x10
+#define PCI_EEPROM_EWEN_OPCODE 0x13
+
+/**
+ * struct eeprom_93cx6 - control structure for setting the commands
+ * for reading the eeprom data.
+ * @data: private pointer for the driver.
+ * @register_read(struct eeprom_93cx6 *eeprom): handler to
+ * read the eeprom register, this function should set all reg_* fields.
+ * @register_write(struct eeprom_93cx6 *eeprom): handler to
+ * write to the eeprom register by using all reg_* fields.
+ * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines
+ * @drive_data: Set if we're driving the data line.
+ * @reg_data_in: register field to indicate data input
+ * @reg_data_out: register field to indicate data output
+ * @reg_data_clock: register field to set the data clock
+ * @reg_chip_select: register field to set the chip select
+ *
+ * This structure is used for the communication between the driver
+ * and the eeprom_93cx6 handlers for reading the eeprom.
+ */
+struct eeprom_93cx6 {
+ void *data;
+
+ void (*register_read)(struct eeprom_93cx6 *eeprom);
+ void (*register_write)(struct eeprom_93cx6 *eeprom);
+
+ int width;
+
+ char drive_data;
+ char reg_data_in;
+ char reg_data_out;
+ char reg_data_clock;
+ char reg_chip_select;
+};
+
+extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom,
+ const u8 word, u16 *data);
+extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom,
+ const u8 word, __le16 *data, const u16 words);
+extern void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom,
+ const u8 byte, u8 *data);
+extern void eeprom_93cx6_multireadb(struct eeprom_93cx6 *eeprom,
+ const u8 byte, u8 *data, const u16 bytes);
+
+extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable);
+
+extern void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom,
+ u8 addr, u16 data);
diff --git a/include/linux/eeprom_93xx46.h b/include/linux/eeprom_93xx46.h
new file mode 100644
index 000000000..06791811e
--- /dev/null
+++ b/include/linux/eeprom_93xx46.h
@@ -0,0 +1,18 @@
+/*
+ * Module: eeprom_93xx46
+ * platform description for 93xx46 EEPROMs.
+ */
+
+struct eeprom_93xx46_platform_data {
+ unsigned char flags;
+#define EE_ADDR8 0x01 /* 8 bit addr. cfg */
+#define EE_ADDR16 0x02 /* 16 bit addr. cfg */
+#define EE_READONLY 0x08 /* forbid writing */
+
+ /*
+ * optional hooks to control additional logic
+ * before and after spi transfer.
+ */
+ void (*prepare)(void *);
+ void (*finish)(void *);
+};
diff --git a/include/linux/efi-bgrt.h b/include/linux/efi-bgrt.h
new file mode 100644
index 000000000..051b21fed
--- /dev/null
+++ b/include/linux/efi-bgrt.h
@@ -0,0 +1,21 @@
+#ifndef _LINUX_EFI_BGRT_H
+#define _LINUX_EFI_BGRT_H
+
+#ifdef CONFIG_ACPI_BGRT
+
+#include <linux/acpi.h>
+
+void efi_bgrt_init(void);
+
+/* The BGRT data itself; only valid if bgrt_image != NULL. */
+extern void *bgrt_image;
+extern size_t bgrt_image_size;
+extern struct acpi_table_bgrt *bgrt_tab;
+
+#else /* !CONFIG_ACPI_BGRT */
+
+static inline void efi_bgrt_init(void) {}
+
+#endif /* !CONFIG_ACPI_BGRT */
+
+#endif /* _LINUX_EFI_BGRT_H */
diff --git a/include/linux/efi.h b/include/linux/efi.h
new file mode 100644
index 000000000..af5be0368
--- /dev/null
+++ b/include/linux/efi.h
@@ -0,0 +1,1254 @@
+#ifndef _LINUX_EFI_H
+#define _LINUX_EFI_H
+
+/*
+ * Extensible Firmware Interface
+ * Based on 'Extensible Firmware Interface Specification' version 0.9, April 30, 1999
+ *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 1999, 2002-2003 Hewlett-Packard Co.
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Stephane Eranian <eranian@hpl.hp.com>
+ */
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/rtc.h>
+#include <linux/ioport.h>
+#include <linux/pfn.h>
+#include <linux/pstore.h>
+#include <linux/reboot.h>
+
+#include <asm/page.h>
+
+#define EFI_SUCCESS 0
+#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_INVALID_PARAMETER ( 2 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_UNSUPPORTED ( 3 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_BAD_BUFFER_SIZE ( 4 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_BUFFER_TOO_SMALL ( 5 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_NOT_READY ( 6 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_DEVICE_ERROR ( 7 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1)))
+#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1)))
+
+typedef unsigned long efi_status_t;
+typedef u8 efi_bool_t;
+typedef u16 efi_char16_t; /* UNICODE character */
+typedef u64 efi_physical_addr_t;
+typedef void *efi_handle_t;
+
+
+typedef struct {
+ u8 b[16];
+} efi_guid_t;
+
+#define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \
+((efi_guid_t) \
+{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
+ (b) & 0xff, ((b) >> 8) & 0xff, \
+ (c) & 0xff, ((c) >> 8) & 0xff, \
+ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
+
+/*
+ * Generic EFI table header
+ */
+typedef struct {
+ u64 signature;
+ u32 revision;
+ u32 headersize;
+ u32 crc32;
+ u32 reserved;
+} efi_table_hdr_t;
+
+/*
+ * Memory map descriptor:
+ */
+
+/* Memory types: */
+#define EFI_RESERVED_TYPE 0
+#define EFI_LOADER_CODE 1
+#define EFI_LOADER_DATA 2
+#define EFI_BOOT_SERVICES_CODE 3
+#define EFI_BOOT_SERVICES_DATA 4
+#define EFI_RUNTIME_SERVICES_CODE 5
+#define EFI_RUNTIME_SERVICES_DATA 6
+#define EFI_CONVENTIONAL_MEMORY 7
+#define EFI_UNUSABLE_MEMORY 8
+#define EFI_ACPI_RECLAIM_MEMORY 9
+#define EFI_ACPI_MEMORY_NVS 10
+#define EFI_MEMORY_MAPPED_IO 11
+#define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12
+#define EFI_PAL_CODE 13
+#define EFI_MAX_MEMORY_TYPE 14
+
+/* Attribute values: */
+#define EFI_MEMORY_UC ((u64)0x0000000000000001ULL) /* uncached */
+#define EFI_MEMORY_WC ((u64)0x0000000000000002ULL) /* write-coalescing */
+#define EFI_MEMORY_WT ((u64)0x0000000000000004ULL) /* write-through */
+#define EFI_MEMORY_WB ((u64)0x0000000000000008ULL) /* write-back */
+#define EFI_MEMORY_UCE ((u64)0x0000000000000010ULL) /* uncached, exported */
+#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */
+#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */
+#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */
+#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */
+#define EFI_MEMORY_DESCRIPTOR_VERSION 1
+
+#define EFI_PAGE_SHIFT 12
+#define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT)
+
+typedef struct {
+ u32 type;
+ u32 pad;
+ u64 phys_addr;
+ u64 virt_addr;
+ u64 num_pages;
+ u64 attribute;
+} efi_memory_desc_t;
+
+typedef struct {
+ efi_guid_t guid;
+ u32 headersize;
+ u32 flags;
+ u32 imagesize;
+} efi_capsule_header_t;
+
+/*
+ * Allocation types for calls to boottime->allocate_pages.
+ */
+#define EFI_ALLOCATE_ANY_PAGES 0
+#define EFI_ALLOCATE_MAX_ADDRESS 1
+#define EFI_ALLOCATE_ADDRESS 2
+#define EFI_MAX_ALLOCATE_TYPE 3
+
+typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg);
+
+/*
+ * Types and defines for Time Services
+ */
+#define EFI_TIME_ADJUST_DAYLIGHT 0x1
+#define EFI_TIME_IN_DAYLIGHT 0x2
+#define EFI_UNSPECIFIED_TIMEZONE 0x07ff
+
+typedef struct {
+ u16 year;
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 pad1;
+ u32 nanosecond;
+ s16 timezone;
+ u8 daylight;
+ u8 pad2;
+} efi_time_t;
+
+typedef struct {
+ u32 resolution;
+ u32 accuracy;
+ u8 sets_to_zero;
+} efi_time_cap_t;
+
+typedef struct {
+ efi_table_hdr_t hdr;
+ u32 raise_tpl;
+ u32 restore_tpl;
+ u32 allocate_pages;
+ u32 free_pages;
+ u32 get_memory_map;
+ u32 allocate_pool;
+ u32 free_pool;
+ u32 create_event;
+ u32 set_timer;
+ u32 wait_for_event;
+ u32 signal_event;
+ u32 close_event;
+ u32 check_event;
+ u32 install_protocol_interface;
+ u32 reinstall_protocol_interface;
+ u32 uninstall_protocol_interface;
+ u32 handle_protocol;
+ u32 __reserved;
+ u32 register_protocol_notify;
+ u32 locate_handle;
+ u32 locate_device_path;
+ u32 install_configuration_table;
+ u32 load_image;
+ u32 start_image;
+ u32 exit;
+ u32 unload_image;
+ u32 exit_boot_services;
+ u32 get_next_monotonic_count;
+ u32 stall;
+ u32 set_watchdog_timer;
+ u32 connect_controller;
+ u32 disconnect_controller;
+ u32 open_protocol;
+ u32 close_protocol;
+ u32 open_protocol_information;
+ u32 protocols_per_handle;
+ u32 locate_handle_buffer;
+ u32 locate_protocol;
+ u32 install_multiple_protocol_interfaces;
+ u32 uninstall_multiple_protocol_interfaces;
+ u32 calculate_crc32;
+ u32 copy_mem;
+ u32 set_mem;
+ u32 create_event_ex;
+} __packed efi_boot_services_32_t;
+
+typedef struct {
+ efi_table_hdr_t hdr;
+ u64 raise_tpl;
+ u64 restore_tpl;
+ u64 allocate_pages;
+ u64 free_pages;
+ u64 get_memory_map;
+ u64 allocate_pool;
+ u64 free_pool;
+ u64 create_event;
+ u64 set_timer;
+ u64 wait_for_event;
+ u64 signal_event;
+ u64 close_event;
+ u64 check_event;
+ u64 install_protocol_interface;
+ u64 reinstall_protocol_interface;
+ u64 uninstall_protocol_interface;
+ u64 handle_protocol;
+ u64 __reserved;
+ u64 register_protocol_notify;
+ u64 locate_handle;
+ u64 locate_device_path;
+ u64 install_configuration_table;
+ u64 load_image;
+ u64 start_image;
+ u64 exit;
+ u64 unload_image;
+ u64 exit_boot_services;
+ u64 get_next_monotonic_count;
+ u64 stall;
+ u64 set_watchdog_timer;
+ u64 connect_controller;
+ u64 disconnect_controller;
+ u64 open_protocol;
+ u64 close_protocol;
+ u64 open_protocol_information;
+ u64 protocols_per_handle;
+ u64 locate_handle_buffer;
+ u64 locate_protocol;
+ u64 install_multiple_protocol_interfaces;
+ u64 uninstall_multiple_protocol_interfaces;
+ u64 calculate_crc32;
+ u64 copy_mem;
+ u64 set_mem;
+ u64 create_event_ex;
+} __packed efi_boot_services_64_t;
+
+/*
+ * EFI Boot Services table
+ */
+typedef struct {
+ efi_table_hdr_t hdr;
+ void *raise_tpl;
+ void *restore_tpl;
+ efi_status_t (*allocate_pages)(int, int, unsigned long,
+ efi_physical_addr_t *);
+ efi_status_t (*free_pages)(efi_physical_addr_t, unsigned long);
+ efi_status_t (*get_memory_map)(unsigned long *, void *, unsigned long *,
+ unsigned long *, u32 *);
+ efi_status_t (*allocate_pool)(int, unsigned long, void **);
+ efi_status_t (*free_pool)(void *);
+ void *create_event;
+ void *set_timer;
+ void *wait_for_event;
+ void *signal_event;
+ void *close_event;
+ void *check_event;
+ void *install_protocol_interface;
+ void *reinstall_protocol_interface;
+ void *uninstall_protocol_interface;
+ efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **);
+ void *__reserved;
+ void *register_protocol_notify;
+ void *locate_handle;
+ void *locate_device_path;
+ void *install_configuration_table;
+ void *load_image;
+ void *start_image;
+ void *exit;
+ void *unload_image;
+ efi_status_t (*exit_boot_services)(efi_handle_t, unsigned long);
+ void *get_next_monotonic_count;
+ void *stall;
+ void *set_watchdog_timer;
+ void *connect_controller;
+ void *disconnect_controller;
+ void *open_protocol;
+ void *close_protocol;
+ void *open_protocol_information;
+ void *protocols_per_handle;
+ void *locate_handle_buffer;
+ void *locate_protocol;
+ void *install_multiple_protocol_interfaces;
+ void *uninstall_multiple_protocol_interfaces;
+ void *calculate_crc32;
+ void *copy_mem;
+ void *set_mem;
+ void *create_event_ex;
+} efi_boot_services_t;
+
+typedef enum {
+ EfiPciIoWidthUint8,
+ EfiPciIoWidthUint16,
+ EfiPciIoWidthUint32,
+ EfiPciIoWidthUint64,
+ EfiPciIoWidthFifoUint8,
+ EfiPciIoWidthFifoUint16,
+ EfiPciIoWidthFifoUint32,
+ EfiPciIoWidthFifoUint64,
+ EfiPciIoWidthFillUint8,
+ EfiPciIoWidthFillUint16,
+ EfiPciIoWidthFillUint32,
+ EfiPciIoWidthFillUint64,
+ EfiPciIoWidthMaximum
+} EFI_PCI_IO_PROTOCOL_WIDTH;
+
+typedef enum {
+ EfiPciIoAttributeOperationGet,
+ EfiPciIoAttributeOperationSet,
+ EfiPciIoAttributeOperationEnable,
+ EfiPciIoAttributeOperationDisable,
+ EfiPciIoAttributeOperationSupported,
+ EfiPciIoAttributeOperationMaximum
+} EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION;
+
+typedef struct {
+ u32 read;
+ u32 write;
+} efi_pci_io_protocol_access_32_t;
+
+typedef struct {
+ u64 read;
+ u64 write;
+} efi_pci_io_protocol_access_64_t;
+
+typedef struct {
+ void *read;
+ void *write;
+} efi_pci_io_protocol_access_t;
+
+typedef struct {
+ u32 poll_mem;
+ u32 poll_io;
+ efi_pci_io_protocol_access_32_t mem;
+ efi_pci_io_protocol_access_32_t io;
+ efi_pci_io_protocol_access_32_t pci;
+ u32 copy_mem;
+ u32 map;
+ u32 unmap;
+ u32 allocate_buffer;
+ u32 free_buffer;
+ u32 flush;
+ u32 get_location;
+ u32 attributes;
+ u32 get_bar_attributes;
+ u32 set_bar_attributes;
+ uint64_t romsize;
+ void *romimage;
+} efi_pci_io_protocol_32;
+
+typedef struct {
+ u64 poll_mem;
+ u64 poll_io;
+ efi_pci_io_protocol_access_64_t mem;
+ efi_pci_io_protocol_access_64_t io;
+ efi_pci_io_protocol_access_64_t pci;
+ u64 copy_mem;
+ u64 map;
+ u64 unmap;
+ u64 allocate_buffer;
+ u64 free_buffer;
+ u64 flush;
+ u64 get_location;
+ u64 attributes;
+ u64 get_bar_attributes;
+ u64 set_bar_attributes;
+ uint64_t romsize;
+ void *romimage;
+} efi_pci_io_protocol_64;
+
+typedef struct {
+ void *poll_mem;
+ void *poll_io;
+ efi_pci_io_protocol_access_t mem;
+ efi_pci_io_protocol_access_t io;
+ efi_pci_io_protocol_access_t pci;
+ void *copy_mem;
+ void *map;
+ void *unmap;
+ void *allocate_buffer;
+ void *free_buffer;
+ void *flush;
+ void *get_location;
+ void *attributes;
+ void *get_bar_attributes;
+ void *set_bar_attributes;
+ uint64_t romsize;
+ void *romimage;
+} efi_pci_io_protocol;
+
+#define EFI_PCI_IO_ATTRIBUTE_ISA_MOTHERBOARD_IO 0x0001
+#define EFI_PCI_IO_ATTRIBUTE_ISA_IO 0x0002
+#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO 0x0004
+#define EFI_PCI_IO_ATTRIBUTE_VGA_MEMORY 0x0008
+#define EFI_PCI_IO_ATTRIBUTE_VGA_IO 0x0010
+#define EFI_PCI_IO_ATTRIBUTE_IDE_PRIMARY_IO 0x0020
+#define EFI_PCI_IO_ATTRIBUTE_IDE_SECONDARY_IO 0x0040
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY_WRITE_COMBINE 0x0080
+#define EFI_PCI_IO_ATTRIBUTE_IO 0x0100
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY 0x0200
+#define EFI_PCI_IO_ATTRIBUTE_BUS_MASTER 0x0400
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY_CACHED 0x0800
+#define EFI_PCI_IO_ATTRIBUTE_MEMORY_DISABLE 0x1000
+#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_DEVICE 0x2000
+#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM 0x4000
+#define EFI_PCI_IO_ATTRIBUTE_DUAL_ADDRESS_CYCLE 0x8000
+#define EFI_PCI_IO_ATTRIBUTE_ISA_IO_16 0x10000
+#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000
+#define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000
+
+/*
+ * Types and defines for EFI ResetSystem
+ */
+#define EFI_RESET_COLD 0
+#define EFI_RESET_WARM 1
+#define EFI_RESET_SHUTDOWN 2
+
+/*
+ * EFI Runtime Services table
+ */
+#define EFI_RUNTIME_SERVICES_SIGNATURE ((u64)0x5652453544e5552ULL)
+#define EFI_RUNTIME_SERVICES_REVISION 0x00010000
+
+typedef struct {
+ efi_table_hdr_t hdr;
+ u32 get_time;
+ u32 set_time;
+ u32 get_wakeup_time;
+ u32 set_wakeup_time;
+ u32 set_virtual_address_map;
+ u32 convert_pointer;
+ u32 get_variable;
+ u32 get_next_variable;
+ u32 set_variable;
+ u32 get_next_high_mono_count;
+ u32 reset_system;
+ u32 update_capsule;
+ u32 query_capsule_caps;
+ u32 query_variable_info;
+} efi_runtime_services_32_t;
+
+typedef struct {
+ efi_table_hdr_t hdr;
+ u64 get_time;
+ u64 set_time;
+ u64 get_wakeup_time;
+ u64 set_wakeup_time;
+ u64 set_virtual_address_map;
+ u64 convert_pointer;
+ u64 get_variable;
+ u64 get_next_variable;
+ u64 set_variable;
+ u64 get_next_high_mono_count;
+ u64 reset_system;
+ u64 update_capsule;
+ u64 query_capsule_caps;
+ u64 query_variable_info;
+} efi_runtime_services_64_t;
+
+typedef struct {
+ efi_table_hdr_t hdr;
+ void *get_time;
+ void *set_time;
+ void *get_wakeup_time;
+ void *set_wakeup_time;
+ void *set_virtual_address_map;
+ void *convert_pointer;
+ void *get_variable;
+ void *get_next_variable;
+ void *set_variable;
+ void *get_next_high_mono_count;
+ void *reset_system;
+ void *update_capsule;
+ void *query_capsule_caps;
+ void *query_variable_info;
+} efi_runtime_services_t;
+
+typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc);
+typedef efi_status_t efi_set_time_t (efi_time_t *tm);
+typedef efi_status_t efi_get_wakeup_time_t (efi_bool_t *enabled, efi_bool_t *pending,
+ efi_time_t *tm);
+typedef efi_status_t efi_set_wakeup_time_t (efi_bool_t enabled, efi_time_t *tm);
+typedef efi_status_t efi_get_variable_t (efi_char16_t *name, efi_guid_t *vendor, u32 *attr,
+ unsigned long *data_size, void *data);
+typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char16_t *name,
+ efi_guid_t *vendor);
+typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size,
+ void *data);
+typedef efi_status_t
+efi_set_variable_nonblocking_t(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size, void *data);
+
+typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count);
+typedef void efi_reset_system_t (int reset_type, efi_status_t status,
+ unsigned long data_size, efi_char16_t *data);
+typedef efi_status_t efi_set_virtual_address_map_t (unsigned long memory_map_size,
+ unsigned long descriptor_size,
+ u32 descriptor_version,
+ efi_memory_desc_t *virtual_map);
+typedef efi_status_t efi_query_variable_info_t(u32 attr,
+ u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size);
+typedef efi_status_t efi_update_capsule_t(efi_capsule_header_t **capsules,
+ unsigned long count,
+ unsigned long sg_list);
+typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules,
+ unsigned long count,
+ u64 *max_size,
+ int *reset_type);
+typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size);
+
+void efi_native_runtime_setup(void);
+
+/*
+ * EFI Configuration Table and GUID definitions
+ */
+#define NULL_GUID \
+ EFI_GUID( 0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 )
+
+#define MPS_TABLE_GUID \
+ EFI_GUID( 0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
+
+#define ACPI_TABLE_GUID \
+ EFI_GUID( 0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
+
+#define ACPI_20_TABLE_GUID \
+ EFI_GUID( 0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81 )
+
+#define SMBIOS_TABLE_GUID \
+ EFI_GUID( 0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
+
+#define SMBIOS3_TABLE_GUID \
+ EFI_GUID( 0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94 )
+
+#define SAL_SYSTEM_TABLE_GUID \
+ EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
+
+#define HCDP_TABLE_GUID \
+ EFI_GUID( 0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98 )
+
+#define UGA_IO_PROTOCOL_GUID \
+ EFI_GUID( 0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0xb, 0x7, 0xa2 )
+
+#define EFI_GLOBAL_VARIABLE_GUID \
+ EFI_GUID( 0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c )
+
+#define UV_SYSTEM_TABLE_GUID \
+ EFI_GUID( 0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93 )
+
+#define LINUX_EFI_CRASH_GUID \
+ EFI_GUID( 0xcfc8fc79, 0xbe2e, 0x4ddc, 0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0 )
+
+#define LOADED_IMAGE_PROTOCOL_GUID \
+ EFI_GUID( 0x5b1b31a1, 0x9562, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
+
+#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID \
+ EFI_GUID( 0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a )
+
+#define EFI_UGA_PROTOCOL_GUID \
+ EFI_GUID( 0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39 )
+
+#define EFI_PCI_IO_PROTOCOL_GUID \
+ EFI_GUID( 0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x2, 0x9a )
+
+#define EFI_FILE_INFO_ID \
+ EFI_GUID( 0x9576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
+
+#define EFI_FILE_SYSTEM_GUID \
+ EFI_GUID( 0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
+
+#define DEVICE_TREE_GUID \
+ EFI_GUID( 0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0 )
+
+typedef struct {
+ efi_guid_t guid;
+ u64 table;
+} efi_config_table_64_t;
+
+typedef struct {
+ efi_guid_t guid;
+ u32 table;
+} efi_config_table_32_t;
+
+typedef struct {
+ efi_guid_t guid;
+ unsigned long table;
+} efi_config_table_t;
+
+typedef struct {
+ efi_guid_t guid;
+ const char *name;
+ unsigned long *ptr;
+} efi_config_table_type_t;
+
+#define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL)
+
+#define EFI_2_30_SYSTEM_TABLE_REVISION ((2 << 16) | (30))
+#define EFI_2_20_SYSTEM_TABLE_REVISION ((2 << 16) | (20))
+#define EFI_2_10_SYSTEM_TABLE_REVISION ((2 << 16) | (10))
+#define EFI_2_00_SYSTEM_TABLE_REVISION ((2 << 16) | (00))
+#define EFI_1_10_SYSTEM_TABLE_REVISION ((1 << 16) | (10))
+#define EFI_1_02_SYSTEM_TABLE_REVISION ((1 << 16) | (02))
+
+typedef struct {
+ efi_table_hdr_t hdr;
+ u64 fw_vendor; /* physical addr of CHAR16 vendor string */
+ u32 fw_revision;
+ u32 __pad1;
+ u64 con_in_handle;
+ u64 con_in;
+ u64 con_out_handle;
+ u64 con_out;
+ u64 stderr_handle;
+ u64 stderr;
+ u64 runtime;
+ u64 boottime;
+ u32 nr_tables;
+ u32 __pad2;
+ u64 tables;
+} efi_system_table_64_t;
+
+typedef struct {
+ efi_table_hdr_t hdr;
+ u32 fw_vendor; /* physical addr of CHAR16 vendor string */
+ u32 fw_revision;
+ u32 con_in_handle;
+ u32 con_in;
+ u32 con_out_handle;
+ u32 con_out;
+ u32 stderr_handle;
+ u32 stderr;
+ u32 runtime;
+ u32 boottime;
+ u32 nr_tables;
+ u32 tables;
+} efi_system_table_32_t;
+
+typedef struct {
+ efi_table_hdr_t hdr;
+ unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */
+ u32 fw_revision;
+ unsigned long con_in_handle;
+ unsigned long con_in;
+ unsigned long con_out_handle;
+ unsigned long con_out;
+ unsigned long stderr_handle;
+ unsigned long stderr;
+ efi_runtime_services_t *runtime;
+ efi_boot_services_t *boottime;
+ unsigned long nr_tables;
+ unsigned long tables;
+} efi_system_table_t;
+
+struct efi_memory_map {
+ void *phys_map;
+ void *map;
+ void *map_end;
+ int nr_map;
+ unsigned long desc_version;
+ unsigned long desc_size;
+};
+
+struct efi_fdt_params {
+ u64 system_table;
+ u64 mmap;
+ u32 mmap_size;
+ u32 desc_size;
+ u32 desc_ver;
+};
+
+typedef struct {
+ u32 revision;
+ u32 parent_handle;
+ u32 system_table;
+ u32 device_handle;
+ u32 file_path;
+ u32 reserved;
+ u32 load_options_size;
+ u32 load_options;
+ u32 image_base;
+ __aligned_u64 image_size;
+ unsigned int image_code_type;
+ unsigned int image_data_type;
+ unsigned long unload;
+} efi_loaded_image_32_t;
+
+typedef struct {
+ u32 revision;
+ u64 parent_handle;
+ u64 system_table;
+ u64 device_handle;
+ u64 file_path;
+ u64 reserved;
+ u32 load_options_size;
+ u64 load_options;
+ u64 image_base;
+ __aligned_u64 image_size;
+ unsigned int image_code_type;
+ unsigned int image_data_type;
+ unsigned long unload;
+} efi_loaded_image_64_t;
+
+typedef struct {
+ u32 revision;
+ void *parent_handle;
+ efi_system_table_t *system_table;
+ void *device_handle;
+ void *file_path;
+ void *reserved;
+ u32 load_options_size;
+ void *load_options;
+ void *image_base;
+ __aligned_u64 image_size;
+ unsigned int image_code_type;
+ unsigned int image_data_type;
+ unsigned long unload;
+} efi_loaded_image_t;
+
+
+typedef struct {
+ u64 size;
+ u64 file_size;
+ u64 phys_size;
+ efi_time_t create_time;
+ efi_time_t last_access_time;
+ efi_time_t modification_time;
+ __aligned_u64 attribute;
+ efi_char16_t filename[1];
+} efi_file_info_t;
+
+typedef struct {
+ u64 revision;
+ u32 open;
+ u32 close;
+ u32 delete;
+ u32 read;
+ u32 write;
+ u32 get_position;
+ u32 set_position;
+ u32 get_info;
+ u32 set_info;
+ u32 flush;
+} efi_file_handle_32_t;
+
+typedef struct {
+ u64 revision;
+ u64 open;
+ u64 close;
+ u64 delete;
+ u64 read;
+ u64 write;
+ u64 get_position;
+ u64 set_position;
+ u64 get_info;
+ u64 set_info;
+ u64 flush;
+} efi_file_handle_64_t;
+
+typedef struct _efi_file_handle {
+ u64 revision;
+ efi_status_t (*open)(struct _efi_file_handle *,
+ struct _efi_file_handle **,
+ efi_char16_t *, u64, u64);
+ efi_status_t (*close)(struct _efi_file_handle *);
+ void *delete;
+ efi_status_t (*read)(struct _efi_file_handle *, unsigned long *,
+ void *);
+ void *write;
+ void *get_position;
+ void *set_position;
+ efi_status_t (*get_info)(struct _efi_file_handle *, efi_guid_t *,
+ unsigned long *, void *);
+ void *set_info;
+ void *flush;
+} efi_file_handle_t;
+
+typedef struct _efi_file_io_interface {
+ u64 revision;
+ int (*open_volume)(struct _efi_file_io_interface *,
+ efi_file_handle_t **);
+} efi_file_io_interface_t;
+
+#define EFI_FILE_MODE_READ 0x0000000000000001
+#define EFI_FILE_MODE_WRITE 0x0000000000000002
+#define EFI_FILE_MODE_CREATE 0x8000000000000000
+
+#define EFI_INVALID_TABLE_ADDR (~0UL)
+
+/*
+ * All runtime access to EFI goes through this structure:
+ */
+extern struct efi {
+ efi_system_table_t *systab; /* EFI system table */
+ unsigned int runtime_version; /* Runtime services version */
+ unsigned long mps; /* MPS table */
+ unsigned long acpi; /* ACPI table (IA64 ext 0.71) */
+ unsigned long acpi20; /* ACPI table (ACPI 2.0) */
+ unsigned long smbios; /* SMBIOS table (32 bit entry point) */
+ unsigned long smbios3; /* SMBIOS table (64 bit entry point) */
+ unsigned long sal_systab; /* SAL system table */
+ unsigned long boot_info; /* boot info table */
+ unsigned long hcdp; /* HCDP table */
+ unsigned long uga; /* UGA table */
+ unsigned long uv_systab; /* UV system table */
+ unsigned long fw_vendor; /* fw_vendor */
+ unsigned long runtime; /* runtime table */
+ unsigned long config_table; /* config tables */
+ efi_get_time_t *get_time;
+ efi_set_time_t *set_time;
+ efi_get_wakeup_time_t *get_wakeup_time;
+ efi_set_wakeup_time_t *set_wakeup_time;
+ efi_get_variable_t *get_variable;
+ efi_get_next_variable_t *get_next_variable;
+ efi_set_variable_t *set_variable;
+ efi_set_variable_nonblocking_t *set_variable_nonblocking;
+ efi_query_variable_info_t *query_variable_info;
+ efi_update_capsule_t *update_capsule;
+ efi_query_capsule_caps_t *query_capsule_caps;
+ efi_get_next_high_mono_count_t *get_next_high_mono_count;
+ efi_reset_system_t *reset_system;
+ efi_set_virtual_address_map_t *set_virtual_address_map;
+ struct efi_memory_map *memmap;
+ unsigned long flags;
+} efi;
+
+static inline int
+efi_guidcmp (efi_guid_t left, efi_guid_t right)
+{
+ return memcmp(&left, &right, sizeof (efi_guid_t));
+}
+
+static inline char *
+efi_guid_to_str(efi_guid_t *guid, char *out)
+{
+ sprintf(out, "%pUl", guid->b);
+ return out;
+}
+
+extern void efi_init (void);
+extern void *efi_get_pal_addr (void);
+extern void efi_map_pal_code (void);
+extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
+extern void efi_gettimeofday (struct timespec *ts);
+extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
+#ifdef CONFIG_X86
+extern void efi_late_init(void);
+extern void efi_free_boot_services(void);
+extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size);
+#else
+static inline void efi_late_init(void) {}
+static inline void efi_free_boot_services(void) {}
+
+static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
+{
+ return EFI_SUCCESS;
+}
+#endif
+extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
+extern int efi_config_init(efi_config_table_type_t *arch_tables);
+extern int efi_config_parse_tables(void *config_tables, int count, int sz,
+ efi_config_table_type_t *arch_tables);
+extern u64 efi_get_iobase (void);
+extern u32 efi_mem_type (unsigned long phys_addr);
+extern u64 efi_mem_attributes (unsigned long phys_addr);
+extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
+extern int __init efi_uart_console_only (void);
+extern void efi_initialize_iomem_resources(struct resource *code_resource,
+ struct resource *data_resource, struct resource *bss_resource);
+extern void efi_get_time(struct timespec *now);
+extern void efi_reserve_boot_services(void);
+extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose);
+extern struct efi_memory_map memmap;
+
+extern int efi_reboot_quirk_mode;
+extern bool efi_poweroff_required(void);
+
+/* Iterate through an efi_memory_map */
+#define for_each_efi_memory_desc(m, md) \
+ for ((md) = (m)->map; \
+ (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \
+ (md) = (void *)(md) + (m)->desc_size)
+
+/*
+ * Format an EFI memory descriptor's type and attributes to a user-provided
+ * character buffer, as per snprintf(), and return the buffer.
+ */
+char * __init efi_md_typeattr_format(char *buf, size_t size,
+ const efi_memory_desc_t *md);
+
+/**
+ * efi_range_is_wc - check the WC bit on an address range
+ * @start: starting kvirt address
+ * @len: length of range
+ *
+ * Consult the EFI memory map and make sure it's ok to set this range WC.
+ * Returns true or false.
+ */
+static inline int efi_range_is_wc(unsigned long start, unsigned long len)
+{
+ unsigned long i;
+
+ for (i = 0; i < len; i += (1UL << EFI_PAGE_SHIFT)) {
+ unsigned long paddr = __pa(start + i);
+ if (!(efi_mem_attributes(paddr) & EFI_MEMORY_WC))
+ return 0;
+ }
+ /* The range checked out */
+ return 1;
+}
+
+#ifdef CONFIG_EFI_PCDP
+extern int __init efi_setup_pcdp_console(char *);
+#endif
+
+/*
+ * We play games with efi_enabled so that the compiler will, if
+ * possible, remove EFI-related code altogether.
+ */
+#define EFI_BOOT 0 /* Were we booted from EFI? */
+#define EFI_SYSTEM_TABLES 1 /* Can we use EFI system tables? */
+#define EFI_CONFIG_TABLES 2 /* Can we use EFI config tables? */
+#define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */
+#define EFI_MEMMAP 4 /* Can we use EFI memory map? */
+#define EFI_64BIT 5 /* Is the firmware 64-bit? */
+#define EFI_PARAVIRT 6 /* Access is via a paravirt interface */
+#define EFI_ARCH_1 7 /* First arch-specific bit */
+#define EFI_DBG 8 /* Print additional debug info at runtime */
+
+#ifdef CONFIG_EFI
+/*
+ * Test whether the above EFI_* bits are enabled.
+ */
+static inline bool efi_enabled(int feature)
+{
+ return test_bit(feature, &efi.flags) != 0;
+}
+extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
+#else
+static inline bool efi_enabled(int feature)
+{
+ return false;
+}
+static inline void
+efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {}
+#endif
+
+/*
+ * Variable Attributes
+ */
+#define EFI_VARIABLE_NON_VOLATILE 0x0000000000000001
+#define EFI_VARIABLE_BOOTSERVICE_ACCESS 0x0000000000000002
+#define EFI_VARIABLE_RUNTIME_ACCESS 0x0000000000000004
+#define EFI_VARIABLE_HARDWARE_ERROR_RECORD 0x0000000000000008
+#define EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS 0x0000000000000010
+#define EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS 0x0000000000000020
+#define EFI_VARIABLE_APPEND_WRITE 0x0000000000000040
+
+#define EFI_VARIABLE_MASK (EFI_VARIABLE_NON_VOLATILE | \
+ EFI_VARIABLE_BOOTSERVICE_ACCESS | \
+ EFI_VARIABLE_RUNTIME_ACCESS | \
+ EFI_VARIABLE_HARDWARE_ERROR_RECORD | \
+ EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS | \
+ EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS | \
+ EFI_VARIABLE_APPEND_WRITE)
+/*
+ * Length of a GUID string (strlen("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"))
+ * not including trailing NUL
+ */
+#define EFI_VARIABLE_GUID_LEN 36
+
+/*
+ * The type of search to perform when calling boottime->locate_handle
+ */
+#define EFI_LOCATE_ALL_HANDLES 0
+#define EFI_LOCATE_BY_REGISTER_NOTIFY 1
+#define EFI_LOCATE_BY_PROTOCOL 2
+
+/*
+ * EFI Device Path information
+ */
+#define EFI_DEV_HW 0x01
+#define EFI_DEV_PCI 1
+#define EFI_DEV_PCCARD 2
+#define EFI_DEV_MEM_MAPPED 3
+#define EFI_DEV_VENDOR 4
+#define EFI_DEV_CONTROLLER 5
+#define EFI_DEV_ACPI 0x02
+#define EFI_DEV_BASIC_ACPI 1
+#define EFI_DEV_EXPANDED_ACPI 2
+#define EFI_DEV_MSG 0x03
+#define EFI_DEV_MSG_ATAPI 1
+#define EFI_DEV_MSG_SCSI 2
+#define EFI_DEV_MSG_FC 3
+#define EFI_DEV_MSG_1394 4
+#define EFI_DEV_MSG_USB 5
+#define EFI_DEV_MSG_USB_CLASS 15
+#define EFI_DEV_MSG_I20 6
+#define EFI_DEV_MSG_MAC 11
+#define EFI_DEV_MSG_IPV4 12
+#define EFI_DEV_MSG_IPV6 13
+#define EFI_DEV_MSG_INFINIBAND 9
+#define EFI_DEV_MSG_UART 14
+#define EFI_DEV_MSG_VENDOR 10
+#define EFI_DEV_MEDIA 0x04
+#define EFI_DEV_MEDIA_HARD_DRIVE 1
+#define EFI_DEV_MEDIA_CDROM 2
+#define EFI_DEV_MEDIA_VENDOR 3
+#define EFI_DEV_MEDIA_FILE 4
+#define EFI_DEV_MEDIA_PROTOCOL 5
+#define EFI_DEV_BIOS_BOOT 0x05
+#define EFI_DEV_END_PATH 0x7F
+#define EFI_DEV_END_PATH2 0xFF
+#define EFI_DEV_END_INSTANCE 0x01
+#define EFI_DEV_END_ENTIRE 0xFF
+
+struct efi_generic_dev_path {
+ u8 type;
+ u8 sub_type;
+ u16 length;
+} __attribute ((packed));
+
+static inline void memrange_efi_to_native(u64 *addr, u64 *npages)
+{
+ *npages = PFN_UP(*addr + (*npages<<EFI_PAGE_SHIFT)) - PFN_DOWN(*addr);
+ *addr &= PAGE_MASK;
+}
+
+/*
+ * EFI Variable support.
+ *
+ * Different firmware drivers can expose their EFI-like variables using
+ * the following.
+ */
+
+struct efivar_operations {
+ efi_get_variable_t *get_variable;
+ efi_get_next_variable_t *get_next_variable;
+ efi_set_variable_t *set_variable;
+ efi_set_variable_nonblocking_t *set_variable_nonblocking;
+ efi_query_variable_store_t *query_variable_store;
+};
+
+struct efivars {
+ /*
+ * ->lock protects two things:
+ * 1) efivarfs_list and efivars_sysfs_list
+ * 2) ->ops calls
+ */
+ spinlock_t lock;
+ struct kset *kset;
+ struct kobject *kobject;
+ const struct efivar_operations *ops;
+};
+
+/*
+ * The maximum size of VariableName + Data = 1024
+ * Therefore, it's reasonable to save that much
+ * space in each part of the structure,
+ * and we use a page for reading/writing.
+ */
+
+#define EFI_VAR_NAME_LEN 1024
+
+struct efi_variable {
+ efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)];
+ efi_guid_t VendorGuid;
+ unsigned long DataSize;
+ __u8 Data[1024];
+ efi_status_t Status;
+ __u32 Attributes;
+} __attribute__((packed));
+
+struct efivar_entry {
+ struct efi_variable var;
+ struct list_head list;
+ struct kobject kobj;
+ bool scanning;
+ bool deleting;
+};
+
+struct efi_simple_text_output_protocol_32 {
+ u32 reset;
+ u32 output_string;
+ u32 test_string;
+};
+
+struct efi_simple_text_output_protocol_64 {
+ u64 reset;
+ u64 output_string;
+ u64 test_string;
+};
+
+struct efi_simple_text_output_protocol {
+ void *reset;
+ efi_status_t (*output_string)(void *, void *);
+ void *test_string;
+};
+
+extern struct list_head efivar_sysfs_list;
+
+static inline void
+efivar_unregister(struct efivar_entry *var)
+{
+ kobject_put(&var->kobj);
+}
+
+int efivars_register(struct efivars *efivars,
+ const struct efivar_operations *ops,
+ struct kobject *kobject);
+int efivars_unregister(struct efivars *efivars);
+struct kobject *efivars_kobject(void);
+
+int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
+ void *data, bool atomic, bool duplicates,
+ struct list_head *head);
+
+void efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
+void efivar_entry_remove(struct efivar_entry *entry);
+
+int __efivar_entry_delete(struct efivar_entry *entry);
+int efivar_entry_delete(struct efivar_entry *entry);
+
+int efivar_entry_size(struct efivar_entry *entry, unsigned long *size);
+int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
+ unsigned long *size, void *data);
+int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
+ unsigned long *size, void *data);
+int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
+ unsigned long size, void *data, struct list_head *head);
+int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
+ unsigned long *size, void *data, bool *set);
+int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
+ bool block, unsigned long size, void *data);
+
+void efivar_entry_iter_begin(void);
+void efivar_entry_iter_end(void);
+
+int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
+ struct list_head *head, void *data,
+ struct efivar_entry **prev);
+int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
+ struct list_head *head, void *data);
+
+struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
+ struct list_head *head, bool remove);
+
+bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len);
+
+extern struct work_struct efivar_work;
+void efivar_run_worker(void);
+
+#if defined(CONFIG_EFI_VARS) || defined(CONFIG_EFI_VARS_MODULE)
+int efivars_sysfs_init(void);
+
+#define EFIVARS_DATA_SIZE_MAX 1024
+
+#endif /* CONFIG_EFI_VARS */
+
+#ifdef CONFIG_EFI_RUNTIME_MAP
+int efi_runtime_map_init(struct kobject *);
+void efi_runtime_map_setup(void *, int, u32);
+int efi_get_runtime_map_size(void);
+int efi_get_runtime_map_desc_size(void);
+int efi_runtime_map_copy(void *buf, size_t bufsz);
+#else
+static inline int efi_runtime_map_init(struct kobject *kobj)
+{
+ return 0;
+}
+
+static inline void
+efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) {}
+
+static inline int efi_get_runtime_map_size(void)
+{
+ return 0;
+}
+
+static inline int efi_get_runtime_map_desc_size(void)
+{
+ return 0;
+}
+
+static inline int efi_runtime_map_copy(void *buf, size_t bufsz)
+{
+ return 0;
+}
+
+#endif
+
+/* prototypes shared between arch specific and generic stub code */
+
+#define pr_efi(sys_table, msg) efi_printk(sys_table, "EFI stub: "msg)
+#define pr_efi_err(sys_table, msg) efi_printk(sys_table, "EFI stub: ERROR: "msg)
+
+void efi_printk(efi_system_table_t *sys_table_arg, char *str);
+
+void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
+ unsigned long addr);
+
+char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
+ efi_loaded_image_t *image, int *cmd_line_len);
+
+efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
+ efi_memory_desc_t **map,
+ unsigned long *map_size,
+ unsigned long *desc_size,
+ u32 *desc_ver,
+ unsigned long *key_ptr);
+
+efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
+ unsigned long size, unsigned long align,
+ unsigned long *addr);
+
+efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
+ unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long max);
+
+efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
+ unsigned long *image_addr,
+ unsigned long image_size,
+ unsigned long alloc_size,
+ unsigned long preferred_addr,
+ unsigned long alignment);
+
+efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
+ efi_loaded_image_t *image,
+ char *cmd_line, char *option_string,
+ unsigned long max_addr,
+ unsigned long *load_addr,
+ unsigned long *load_size);
+
+efi_status_t efi_parse_options(char *cmdline);
+
+bool efi_runtime_disabled(void);
+#endif /* _LINUX_EFI_H */
diff --git a/include/linux/efs_vh.h b/include/linux/efs_vh.h
new file mode 100644
index 000000000..8a11150c6
--- /dev/null
+++ b/include/linux/efs_vh.h
@@ -0,0 +1,53 @@
+/*
+ * efs_vh.h
+ *
+ * Copyright (c) 1999 Al Smith
+ *
+ * Portions derived from IRIX header files (c) 1985 MIPS Computer Systems, Inc.
+ */
+
+#ifndef __EFS_VH_H__
+#define __EFS_VH_H__
+
+#define VHMAGIC 0xbe5a941 /* volume header magic number */
+#define NPARTAB 16 /* 16 unix partitions */
+#define NVDIR 15 /* max of 15 directory entries */
+#define BFNAMESIZE 16 /* max 16 chars in boot file name */
+#define VDNAMESIZE 8
+
+struct volume_directory {
+ char vd_name[VDNAMESIZE]; /* name */
+ __be32 vd_lbn; /* logical block number */
+ __be32 vd_nbytes; /* file length in bytes */
+};
+
+struct partition_table { /* one per logical partition */
+ __be32 pt_nblks; /* # of logical blks in partition */
+ __be32 pt_firstlbn; /* first lbn of partition */
+ __be32 pt_type; /* use of partition */
+};
+
+struct volume_header {
+ __be32 vh_magic; /* identifies volume header */
+ __be16 vh_rootpt; /* root partition number */
+ __be16 vh_swappt; /* swap partition number */
+ char vh_bootfile[BFNAMESIZE]; /* name of file to boot */
+ char pad[48]; /* device param space */
+ struct volume_directory vh_vd[NVDIR]; /* other vol hdr contents */
+ struct partition_table vh_pt[NPARTAB]; /* device partition layout */
+ __be32 vh_csum; /* volume header checksum */
+ __be32 vh_fill; /* fill out to 512 bytes */
+};
+
+/* partition type sysv is used for EFS format CD-ROM partitions */
+#define SGI_SYSV 0x05
+#define SGI_EFS 0x07
+#define IS_EFS(x) (((x) == SGI_EFS) || ((x) == SGI_SYSV))
+
+struct pt_types {
+ int pt_type;
+ char *pt_name;
+};
+
+#endif /* __EFS_VH_H__ */
+
diff --git a/include/linux/eisa.h b/include/linux/eisa.h
new file mode 100644
index 000000000..6925249a5
--- /dev/null
+++ b/include/linux/eisa.h
@@ -0,0 +1,111 @@
+#ifndef _LINUX_EISA_H
+#define _LINUX_EISA_H
+
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+#define EISA_MAX_SLOTS 8
+
+#define EISA_MAX_RESOURCES 4
+
+/* A few EISA constants/offsets... */
+
+#define EISA_DMA1_STATUS 8
+#define EISA_INT1_CTRL 0x20
+#define EISA_INT1_MASK 0x21
+#define EISA_INT2_CTRL 0xA0
+#define EISA_INT2_MASK 0xA1
+#define EISA_DMA2_STATUS 0xD0
+#define EISA_DMA2_WRITE_SINGLE 0xD4
+#define EISA_EXT_NMI_RESET_CTRL 0x461
+#define EISA_INT1_EDGE_LEVEL 0x4D0
+#define EISA_INT2_EDGE_LEVEL 0x4D1
+#define EISA_VENDOR_ID_OFFSET 0xC80
+#define EISA_CONFIG_OFFSET 0xC84
+
+#define EISA_CONFIG_ENABLED 1
+#define EISA_CONFIG_FORCED 2
+
+/* There is not much we can say about an EISA device, apart from
+ * signature, slot number, and base address. dma_mask is set by
+ * default to parent device mask..*/
+
+struct eisa_device {
+ struct eisa_device_id id;
+ int slot;
+ int state;
+ unsigned long base_addr;
+ struct resource res[EISA_MAX_RESOURCES];
+ u64 dma_mask;
+ struct device dev; /* generic device */
+#ifdef CONFIG_EISA_NAMES
+ char pretty_name[50];
+#endif
+};
+
+#define to_eisa_device(n) container_of(n, struct eisa_device, dev)
+
+static inline int eisa_get_region_index (void *addr)
+{
+ unsigned long x = (unsigned long) addr;
+
+ x &= 0xc00;
+ return (x >> 12);
+}
+
+struct eisa_driver {
+ const struct eisa_device_id *id_table;
+ struct device_driver driver;
+};
+
+#define to_eisa_driver(drv) container_of(drv,struct eisa_driver, driver)
+
+/* These external functions are only available when EISA support is enabled. */
+#ifdef CONFIG_EISA
+
+extern struct bus_type eisa_bus_type;
+int eisa_driver_register (struct eisa_driver *edrv);
+void eisa_driver_unregister (struct eisa_driver *edrv);
+
+#else /* !CONFIG_EISA */
+
+static inline int eisa_driver_register (struct eisa_driver *edrv) { return 0; }
+static inline void eisa_driver_unregister (struct eisa_driver *edrv) { }
+
+#endif /* !CONFIG_EISA */
+
+/* Mimics pci.h... */
+static inline void *eisa_get_drvdata (struct eisa_device *edev)
+{
+ return dev_get_drvdata(&edev->dev);
+}
+
+static inline void eisa_set_drvdata (struct eisa_device *edev, void *data)
+{
+ dev_set_drvdata(&edev->dev, data);
+}
+
+/* The EISA root device. There's rumours about machines with multiple
+ * busses (PA-RISC ?), so we try to handle that. */
+
+struct eisa_root_device {
+ struct device *dev; /* Pointer to bridge device */
+ struct resource *res;
+ unsigned long bus_base_addr;
+ int slots; /* Max slot number */
+ int force_probe; /* Probe even when no slot 0 */
+ u64 dma_mask; /* from bridge device */
+ int bus_nr; /* Set by eisa_root_register */
+ struct resource eisa_root_res; /* ditto */
+};
+
+int eisa_root_register (struct eisa_root_device *root);
+
+#ifdef CONFIG_EISA
+extern int EISA_bus;
+#else
+# define EISA_bus 0
+#endif
+
+#endif
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
new file mode 100644
index 000000000..45a914744
--- /dev/null
+++ b/include/linux/elevator.h
@@ -0,0 +1,212 @@
+#ifndef _LINUX_ELEVATOR_H
+#define _LINUX_ELEVATOR_H
+
+#include <linux/percpu.h>
+#include <linux/hashtable.h>
+
+#ifdef CONFIG_BLOCK
+
+struct io_cq;
+struct elevator_type;
+
+typedef int (elevator_merge_fn) (struct request_queue *, struct request **,
+ struct bio *);
+
+typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *);
+
+typedef void (elevator_merged_fn) (struct request_queue *, struct request *, int);
+
+typedef int (elevator_allow_merge_fn) (struct request_queue *, struct request *, struct bio *);
+
+typedef void (elevator_bio_merged_fn) (struct request_queue *,
+ struct request *, struct bio *);
+
+typedef int (elevator_dispatch_fn) (struct request_queue *, int);
+
+typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
+typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
+typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
+typedef int (elevator_may_queue_fn) (struct request_queue *, int);
+
+typedef void (elevator_init_icq_fn) (struct io_cq *);
+typedef void (elevator_exit_icq_fn) (struct io_cq *);
+typedef int (elevator_set_req_fn) (struct request_queue *, struct request *,
+ struct bio *, gfp_t);
+typedef void (elevator_put_req_fn) (struct request *);
+typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
+typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
+
+typedef int (elevator_init_fn) (struct request_queue *,
+ struct elevator_type *e);
+typedef void (elevator_exit_fn) (struct elevator_queue *);
+
+struct elevator_ops
+{
+ elevator_merge_fn *elevator_merge_fn;
+ elevator_merged_fn *elevator_merged_fn;
+ elevator_merge_req_fn *elevator_merge_req_fn;
+ elevator_allow_merge_fn *elevator_allow_merge_fn;
+ elevator_bio_merged_fn *elevator_bio_merged_fn;
+
+ elevator_dispatch_fn *elevator_dispatch_fn;
+ elevator_add_req_fn *elevator_add_req_fn;
+ elevator_activate_req_fn *elevator_activate_req_fn;
+ elevator_deactivate_req_fn *elevator_deactivate_req_fn;
+
+ elevator_completed_req_fn *elevator_completed_req_fn;
+
+ elevator_request_list_fn *elevator_former_req_fn;
+ elevator_request_list_fn *elevator_latter_req_fn;
+
+ elevator_init_icq_fn *elevator_init_icq_fn; /* see iocontext.h */
+ elevator_exit_icq_fn *elevator_exit_icq_fn; /* ditto */
+
+ elevator_set_req_fn *elevator_set_req_fn;
+ elevator_put_req_fn *elevator_put_req_fn;
+
+ elevator_may_queue_fn *elevator_may_queue_fn;
+
+ elevator_init_fn *elevator_init_fn;
+ elevator_exit_fn *elevator_exit_fn;
+};
+
+#define ELV_NAME_MAX (16)
+
+struct elv_fs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct elevator_queue *, char *);
+ ssize_t (*store)(struct elevator_queue *, const char *, size_t);
+};
+
+/*
+ * identifies an elevator type, such as AS or deadline
+ */
+struct elevator_type
+{
+ /* managed by elevator core */
+ struct kmem_cache *icq_cache;
+
+ /* fields provided by elevator implementation */
+ struct elevator_ops ops;
+ size_t icq_size; /* see iocontext.h */
+ size_t icq_align; /* ditto */
+ struct elv_fs_entry *elevator_attrs;
+ char elevator_name[ELV_NAME_MAX];
+ struct module *elevator_owner;
+
+ /* managed by elevator core */
+ char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */
+ struct list_head list;
+};
+
+#define ELV_HASH_BITS 6
+
+/*
+ * each queue has an elevator_queue associated with it
+ */
+struct elevator_queue
+{
+ struct elevator_type *type;
+ void *elevator_data;
+ struct kobject kobj;
+ struct mutex sysfs_lock;
+ unsigned int registered:1;
+ DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
+};
+
+/*
+ * block elevator interface
+ */
+extern void elv_dispatch_sort(struct request_queue *, struct request *);
+extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
+extern void elv_add_request(struct request_queue *, struct request *, int);
+extern void __elv_add_request(struct request_queue *, struct request *, int);
+extern int elv_merge(struct request_queue *, struct request **, struct bio *);
+extern void elv_merge_requests(struct request_queue *, struct request *,
+ struct request *);
+extern void elv_merged_request(struct request_queue *, struct request *, int);
+extern void elv_bio_merged(struct request_queue *q, struct request *,
+ struct bio *);
+extern void elv_requeue_request(struct request_queue *, struct request *);
+extern struct request *elv_former_request(struct request_queue *, struct request *);
+extern struct request *elv_latter_request(struct request_queue *, struct request *);
+extern int elv_register_queue(struct request_queue *q);
+extern void elv_unregister_queue(struct request_queue *q);
+extern int elv_may_queue(struct request_queue *, int);
+extern void elv_completed_request(struct request_queue *, struct request *);
+extern int elv_set_request(struct request_queue *q, struct request *rq,
+ struct bio *bio, gfp_t gfp_mask);
+extern void elv_put_request(struct request_queue *, struct request *);
+extern void elv_drain_elevator(struct request_queue *);
+
+/*
+ * io scheduler registration
+ */
+extern void __init load_default_elevator_module(void);
+extern int elv_register(struct elevator_type *);
+extern void elv_unregister(struct elevator_type *);
+
+/*
+ * io scheduler sysfs switching
+ */
+extern ssize_t elv_iosched_show(struct request_queue *, char *);
+extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
+
+extern int elevator_init(struct request_queue *, char *);
+extern void elevator_exit(struct elevator_queue *);
+extern int elevator_change(struct request_queue *, const char *);
+extern bool elv_rq_merge_ok(struct request *, struct bio *);
+extern struct elevator_queue *elevator_alloc(struct request_queue *,
+ struct elevator_type *);
+
+/*
+ * Helper functions.
+ */
+extern struct request *elv_rb_former_request(struct request_queue *, struct request *);
+extern struct request *elv_rb_latter_request(struct request_queue *, struct request *);
+
+/*
+ * rb support functions.
+ */
+extern void elv_rb_add(struct rb_root *, struct request *);
+extern void elv_rb_del(struct rb_root *, struct request *);
+extern struct request *elv_rb_find(struct rb_root *, sector_t);
+
+/*
+ * Return values from elevator merger
+ */
+#define ELEVATOR_NO_MERGE 0
+#define ELEVATOR_FRONT_MERGE 1
+#define ELEVATOR_BACK_MERGE 2
+
+/*
+ * Insertion selection
+ */
+#define ELEVATOR_INSERT_FRONT 1
+#define ELEVATOR_INSERT_BACK 2
+#define ELEVATOR_INSERT_SORT 3
+#define ELEVATOR_INSERT_REQUEUE 4
+#define ELEVATOR_INSERT_FLUSH 5
+#define ELEVATOR_INSERT_SORT_MERGE 6
+
+/*
+ * return values from elevator_may_queue_fn
+ */
+enum {
+ ELV_MQUEUE_MAY,
+ ELV_MQUEUE_NO,
+ ELV_MQUEUE_MUST,
+};
+
+#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
+#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
+
+#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
+#define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist)
+
+#else /* CONFIG_BLOCK */
+
+static inline void load_default_elevator_module(void) { }
+
+#endif /* CONFIG_BLOCK */
+#endif
diff --git a/include/linux/elf-fdpic.h b/include/linux/elf-fdpic.h
new file mode 100644
index 000000000..386440317
--- /dev/null
+++ b/include/linux/elf-fdpic.h
@@ -0,0 +1,51 @@
+/* FDPIC ELF load map
+ *
+ * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_ELF_FDPIC_H
+#define _LINUX_ELF_FDPIC_H
+
+#include <uapi/linux/elf-fdpic.h>
+
+/*
+ * binfmt binary parameters structure
+ */
+struct elf_fdpic_params {
+ struct elfhdr hdr; /* ref copy of ELF header */
+ struct elf_phdr *phdrs; /* ref copy of PT_PHDR table */
+ struct elf32_fdpic_loadmap *loadmap; /* loadmap to be passed to userspace */
+ unsigned long elfhdr_addr; /* mapped ELF header user address */
+ unsigned long ph_addr; /* mapped PT_PHDR user address */
+ unsigned long map_addr; /* mapped loadmap user address */
+ unsigned long entry_addr; /* mapped entry user address */
+ unsigned long stack_size; /* stack size requested (PT_GNU_STACK) */
+ unsigned long dynamic_addr; /* mapped PT_DYNAMIC user address */
+ unsigned long load_addr; /* user address at which to map binary */
+ unsigned long flags;
+#define ELF_FDPIC_FLAG_ARRANGEMENT 0x0000000f /* PT_LOAD arrangement flags */
+#define ELF_FDPIC_FLAG_INDEPENDENT 0x00000000 /* PT_LOADs can be put anywhere */
+#define ELF_FDPIC_FLAG_HONOURVADDR 0x00000001 /* PT_LOAD.vaddr must be honoured */
+#define ELF_FDPIC_FLAG_CONSTDISP 0x00000002 /* PT_LOADs require constant
+ * displacement */
+#define ELF_FDPIC_FLAG_CONTIGUOUS 0x00000003 /* PT_LOADs should be contiguous */
+#define ELF_FDPIC_FLAG_EXEC_STACK 0x00000010 /* T if stack to be executable */
+#define ELF_FDPIC_FLAG_NOEXEC_STACK 0x00000020 /* T if stack not to be executable */
+#define ELF_FDPIC_FLAG_EXECUTABLE 0x00000040 /* T if this object is the executable */
+#define ELF_FDPIC_FLAG_PRESENT 0x80000000 /* T if this object is present */
+};
+
+#ifdef CONFIG_MMU
+extern void elf_fdpic_arch_lay_out_mm(struct elf_fdpic_params *exec_params,
+ struct elf_fdpic_params *interp_params,
+ unsigned long *start_stack,
+ unsigned long *start_brk);
+#endif
+
+#endif /* _LINUX_ELF_FDPIC_H */
diff --git a/include/linux/elf-randomize.h b/include/linux/elf-randomize.h
new file mode 100644
index 000000000..b5f0bda94
--- /dev/null
+++ b/include/linux/elf-randomize.h
@@ -0,0 +1,22 @@
+#ifndef _ELF_RANDOMIZE_H
+#define _ELF_RANDOMIZE_H
+
+struct mm_struct;
+
+#ifndef CONFIG_ARCH_HAS_ELF_RANDOMIZE
+static inline unsigned long arch_mmap_rnd(void) { return 0; }
+# if defined(arch_randomize_brk) && defined(CONFIG_COMPAT_BRK)
+# define compat_brk_randomized
+# endif
+# ifndef arch_randomize_brk
+# define arch_randomize_brk(mm) (mm->brk)
+# endif
+#else
+extern unsigned long arch_mmap_rnd(void);
+extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+# ifdef CONFIG_COMPAT_BRK
+# define compat_brk_randomized
+# endif
+#endif
+
+#endif
diff --git a/include/linux/elf.h b/include/linux/elf.h
new file mode 100644
index 000000000..20fa8d8ae
--- /dev/null
+++ b/include/linux/elf.h
@@ -0,0 +1,56 @@
+#ifndef _LINUX_ELF_H
+#define _LINUX_ELF_H
+
+#include <asm/elf.h>
+#include <uapi/linux/elf.h>
+
+#ifndef elf_read_implies_exec
+ /* Executables for which elf_read_implies_exec() returns TRUE will
+ have the READ_IMPLIES_EXEC personality flag set automatically.
+ Override in asm/elf.h as needed. */
+# define elf_read_implies_exec(ex, have_pt_gnu_stack) 0
+#endif
+#ifndef SET_PERSONALITY
+#define SET_PERSONALITY(ex) \
+ set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
+#endif
+
+#ifndef SET_PERSONALITY2
+#define SET_PERSONALITY2(ex, state) \
+ SET_PERSONALITY(ex)
+#endif
+
+#if ELF_CLASS == ELFCLASS32
+
+extern Elf32_Dyn _DYNAMIC [];
+#define elfhdr elf32_hdr
+#define elf_phdr elf32_phdr
+#define elf_shdr elf32_shdr
+#define elf_note elf32_note
+#define elf_addr_t Elf32_Off
+#define Elf_Half Elf32_Half
+
+#else
+
+extern Elf64_Dyn _DYNAMIC [];
+#define elfhdr elf64_hdr
+#define elf_phdr elf64_phdr
+#define elf_shdr elf64_shdr
+#define elf_note elf64_note
+#define elf_addr_t Elf64_Off
+#define Elf_Half Elf64_Half
+
+#endif
+
+/* Optional callbacks to write extra ELF notes. */
+struct file;
+struct coredump_params;
+
+#ifndef ARCH_HAVE_EXTRA_ELF_NOTES
+static inline int elf_coredump_extra_notes_size(void) { return 0; }
+static inline int elf_coredump_extra_notes_write(struct coredump_params *cprm) { return 0; }
+#else
+extern int elf_coredump_extra_notes_size(void);
+extern int elf_coredump_extra_notes_write(struct coredump_params *cprm);
+#endif
+#endif /* _LINUX_ELF_H */
diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h
new file mode 100644
index 000000000..0a90e1c3a
--- /dev/null
+++ b/include/linux/elfcore-compat.h
@@ -0,0 +1,55 @@
+#ifndef _LINUX_ELFCORE_COMPAT_H
+#define _LINUX_ELFCORE_COMPAT_H
+
+#include <linux/elf.h>
+#include <linux/elfcore.h>
+#include <linux/compat.h>
+
+/*
+ * Make sure these layouts match the linux/elfcore.h native definitions.
+ */
+
+struct compat_elf_siginfo
+{
+ compat_int_t si_signo;
+ compat_int_t si_code;
+ compat_int_t si_errno;
+};
+
+struct compat_elf_prstatus
+{
+ struct compat_elf_siginfo pr_info;
+ short pr_cursig;
+ compat_ulong_t pr_sigpend;
+ compat_ulong_t pr_sighold;
+ compat_pid_t pr_pid;
+ compat_pid_t pr_ppid;
+ compat_pid_t pr_pgrp;
+ compat_pid_t pr_sid;
+ struct compat_timeval pr_utime;
+ struct compat_timeval pr_stime;
+ struct compat_timeval pr_cutime;
+ struct compat_timeval pr_cstime;
+ compat_elf_gregset_t pr_reg;
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+ compat_ulong_t pr_exec_fdpic_loadmap;
+ compat_ulong_t pr_interp_fdpic_loadmap;
+#endif
+ compat_int_t pr_fpvalid;
+};
+
+struct compat_elf_prpsinfo
+{
+ char pr_state;
+ char pr_sname;
+ char pr_zomb;
+ char pr_nice;
+ compat_ulong_t pr_flag;
+ __compat_uid_t pr_uid;
+ __compat_gid_t pr_gid;
+ compat_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
+ char pr_fname[16];
+ char pr_psargs[ELF_PRARGSZ];
+};
+
+#endif /* _LINUX_ELFCORE_COMPAT_H */
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
new file mode 100644
index 000000000..698d51a0e
--- /dev/null
+++ b/include/linux/elfcore.h
@@ -0,0 +1,73 @@
+#ifndef _LINUX_ELFCORE_H
+#define _LINUX_ELFCORE_H
+
+#include <linux/user.h>
+#include <linux/bug.h>
+#include <asm/elf.h>
+#include <uapi/linux/elfcore.h>
+
+struct coredump_params;
+
+static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
+{
+#ifdef ELF_CORE_COPY_REGS
+ ELF_CORE_COPY_REGS((*elfregs), regs)
+#else
+ BUG_ON(sizeof(*elfregs) != sizeof(*regs));
+ *(struct pt_regs *)elfregs = *regs;
+#endif
+}
+
+static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
+{
+#ifdef ELF_CORE_COPY_KERNEL_REGS
+ ELF_CORE_COPY_KERNEL_REGS((*elfregs), regs);
+#else
+ elf_core_copy_regs(elfregs, regs);
+#endif
+}
+
+static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
+{
+#if defined (ELF_CORE_COPY_TASK_REGS)
+ return ELF_CORE_COPY_TASK_REGS(t, elfregs);
+#elif defined (task_pt_regs)
+ elf_core_copy_regs(elfregs, task_pt_regs(t));
+#endif
+ return 0;
+}
+
+extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
+
+static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_regs *regs, elf_fpregset_t *fpu)
+{
+#ifdef ELF_CORE_COPY_FPREGS
+ return ELF_CORE_COPY_FPREGS(t, fpu);
+#else
+ return dump_fpu(regs, fpu);
+#endif
+}
+
+#ifdef ELF_CORE_COPY_XFPREGS
+static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
+{
+ return ELF_CORE_COPY_XFPREGS(t, xfpu);
+}
+#endif
+
+/*
+ * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out
+ * extra segments containing the gate DSO contents. Dumping its
+ * contents makes post-mortem fully interpretable later without matching up
+ * the same kernel and hardware config to see what PC values meant.
+ * Dumping its extra ELF program headers includes all the other information
+ * a debugger needs to easily find how the gate DSO was being used.
+ */
+extern Elf_Half elf_core_extra_phdrs(void);
+extern int
+elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
+extern int
+elf_core_write_extra_data(struct coredump_params *cprm);
+extern size_t elf_core_extra_data_size(void);
+
+#endif /* _LINUX_ELFCORE_H */
diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h
new file mode 100644
index 000000000..278e3ef05
--- /dev/null
+++ b/include/linux/elfnote.h
@@ -0,0 +1,98 @@
+#ifndef _LINUX_ELFNOTE_H
+#define _LINUX_ELFNOTE_H
+/*
+ * Helper macros to generate ELF Note structures, which are put into a
+ * PT_NOTE segment of the final vmlinux image. These are useful for
+ * including name-value pairs of metadata into the kernel binary (or
+ * modules?) for use by external programs.
+ *
+ * Each note has three parts: a name, a type and a desc. The name is
+ * intended to distinguish the note's originator, so it would be a
+ * company, project, subsystem, etc; it must be in a suitable form for
+ * use in a section name. The type is an integer which is used to tag
+ * the data, and is considered to be within the "name" namespace (so
+ * "FooCo"'s type 42 is distinct from "BarProj"'s type 42). The
+ * "desc" field is the actual data. There are no constraints on the
+ * desc field's contents, though typically they're fairly small.
+ *
+ * All notes from a given NAME are put into a section named
+ * .note.NAME. When the kernel image is finally linked, all the notes
+ * are packed into a single .notes section, which is mapped into the
+ * PT_NOTE segment. Because notes for a given name are grouped into
+ * the same section, they'll all be adjacent the output file.
+ *
+ * This file defines macros for both C and assembler use. Their
+ * syntax is slightly different, but they're semantically similar.
+ *
+ * See the ELF specification for more detail about ELF notes.
+ */
+
+#ifdef __ASSEMBLER__
+/*
+ * Generate a structure with the same shape as Elf{32,64}_Nhdr (which
+ * turn out to be the same size and shape), followed by the name and
+ * desc data with appropriate padding. The 'desctype' argument is the
+ * assembler pseudo op defining the type of the data e.g. .asciz while
+ * 'descdata' is the data itself e.g. "hello, world".
+ *
+ * e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two")
+ * ELFNOTE(XYZCo, 12, .long, 0xdeadbeef)
+ */
+#define ELFNOTE_START(name, type, flags) \
+.pushsection .note.name, flags,@note ; \
+ .balign 4 ; \
+ .long 2f - 1f /* namesz */ ; \
+ .long 4484f - 3f /* descsz */ ; \
+ .long type ; \
+1:.asciz #name ; \
+2:.balign 4 ; \
+3:
+
+#define ELFNOTE_END \
+4484:.balign 4 ; \
+.popsection ;
+
+#define ELFNOTE(name, type, desc) \
+ ELFNOTE_START(name, type, "") \
+ desc ; \
+ ELFNOTE_END
+
+#else /* !__ASSEMBLER__ */
+#include <linux/elf.h>
+/*
+ * Use an anonymous structure which matches the shape of
+ * Elf{32,64}_Nhdr, but includes the name and desc data. The size and
+ * type of name and desc depend on the macro arguments. "name" must
+ * be a literal string, and "desc" must be passed by value. You may
+ * only define one note per line, since __LINE__ is used to generate
+ * unique symbols.
+ */
+#define _ELFNOTE_PASTE(a,b) a##b
+#define _ELFNOTE(size, name, unique, type, desc) \
+ static const struct { \
+ struct elf##size##_note _nhdr; \
+ unsigned char _name[sizeof(name)] \
+ __attribute__((aligned(sizeof(Elf##size##_Word)))); \
+ typeof(desc) _desc \
+ __attribute__((aligned(sizeof(Elf##size##_Word)))); \
+ } _ELFNOTE_PASTE(_note_, unique) \
+ __used \
+ __attribute__((section(".note." name), \
+ aligned(sizeof(Elf##size##_Word)), \
+ unused)) = { \
+ { \
+ sizeof(name), \
+ sizeof(desc), \
+ type, \
+ }, \
+ name, \
+ desc \
+ }
+#define ELFNOTE(size, name, type, desc) \
+ _ELFNOTE(size, name, __LINE__, type, desc)
+
+#define ELFNOTE32(name, type, desc) ELFNOTE(32, name, type, desc)
+#define ELFNOTE64(name, type, desc) ELFNOTE(64, name, type, desc)
+#endif /* __ASSEMBLER__ */
+
+#endif /* _LINUX_ELFNOTE_H */
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
new file mode 100644
index 000000000..7be22da32
--- /dev/null
+++ b/include/linux/enclosure.h
@@ -0,0 +1,142 @@
+/*
+ * Enclosure Services
+ *
+ * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
+ *
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or
+** modify it under the terms of the GNU General Public License
+** version 2 as published by the Free Software Foundation.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+*/
+#ifndef _LINUX_ENCLOSURE_H_
+#define _LINUX_ENCLOSURE_H_
+
+#include <linux/device.h>
+#include <linux/list.h>
+
+/* A few generic types ... taken from ses-2 */
+enum enclosure_component_type {
+ ENCLOSURE_COMPONENT_DEVICE = 0x01,
+ ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17,
+};
+
+/* ses-2 common element status */
+enum enclosure_status {
+ ENCLOSURE_STATUS_UNSUPPORTED = 0,
+ ENCLOSURE_STATUS_OK,
+ ENCLOSURE_STATUS_CRITICAL,
+ ENCLOSURE_STATUS_NON_CRITICAL,
+ ENCLOSURE_STATUS_UNRECOVERABLE,
+ ENCLOSURE_STATUS_NOT_INSTALLED,
+ ENCLOSURE_STATUS_UNKNOWN,
+ ENCLOSURE_STATUS_UNAVAILABLE,
+ /* last element for counting purposes */
+ ENCLOSURE_STATUS_MAX
+};
+
+/* SFF-8485 activity light settings */
+enum enclosure_component_setting {
+ ENCLOSURE_SETTING_DISABLED = 0,
+ ENCLOSURE_SETTING_ENABLED = 1,
+ ENCLOSURE_SETTING_BLINK_A_ON_OFF = 2,
+ ENCLOSURE_SETTING_BLINK_A_OFF_ON = 3,
+ ENCLOSURE_SETTING_BLINK_B_ON_OFF = 6,
+ ENCLOSURE_SETTING_BLINK_B_OFF_ON = 7,
+};
+
+struct enclosure_device;
+struct enclosure_component;
+struct enclosure_component_callbacks {
+ void (*get_status)(struct enclosure_device *,
+ struct enclosure_component *);
+ int (*set_status)(struct enclosure_device *,
+ struct enclosure_component *,
+ enum enclosure_status);
+ void (*get_fault)(struct enclosure_device *,
+ struct enclosure_component *);
+ int (*set_fault)(struct enclosure_device *,
+ struct enclosure_component *,
+ enum enclosure_component_setting);
+ void (*get_active)(struct enclosure_device *,
+ struct enclosure_component *);
+ int (*set_active)(struct enclosure_device *,
+ struct enclosure_component *,
+ enum enclosure_component_setting);
+ void (*get_locate)(struct enclosure_device *,
+ struct enclosure_component *);
+ int (*set_locate)(struct enclosure_device *,
+ struct enclosure_component *,
+ enum enclosure_component_setting);
+ void (*get_power_status)(struct enclosure_device *,
+ struct enclosure_component *);
+ int (*set_power_status)(struct enclosure_device *,
+ struct enclosure_component *,
+ int);
+ int (*show_id)(struct enclosure_device *, char *buf);
+};
+
+
+struct enclosure_component {
+ void *scratch;
+ struct device cdev;
+ struct device *dev;
+ enum enclosure_component_type type;
+ int number;
+ int fault;
+ int active;
+ int locate;
+ int slot;
+ enum enclosure_status status;
+ int power_status;
+};
+
+struct enclosure_device {
+ void *scratch;
+ struct list_head node;
+ struct device edev;
+ struct enclosure_component_callbacks *cb;
+ int components;
+ struct enclosure_component component[0];
+};
+
+static inline struct enclosure_device *
+to_enclosure_device(struct device *dev)
+{
+ return container_of(dev, struct enclosure_device, edev);
+}
+
+static inline struct enclosure_component *
+to_enclosure_component(struct device *dev)
+{
+ return container_of(dev, struct enclosure_component, cdev);
+}
+
+struct enclosure_device *
+enclosure_register(struct device *, const char *, int,
+ struct enclosure_component_callbacks *);
+void enclosure_unregister(struct enclosure_device *);
+struct enclosure_component *
+enclosure_component_alloc(struct enclosure_device *, unsigned int,
+ enum enclosure_component_type, const char *);
+int enclosure_component_register(struct enclosure_component *);
+int enclosure_add_device(struct enclosure_device *enclosure, int component,
+ struct device *dev);
+int enclosure_remove_device(struct enclosure_device *, struct device *);
+struct enclosure_device *enclosure_find(struct device *dev,
+ struct enclosure_device *start);
+int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *),
+ void *data);
+
+#endif /* _LINUX_ENCLOSURE_H_ */
diff --git a/include/linux/err.h b/include/linux/err.h
new file mode 100644
index 000000000..a72912064
--- /dev/null
+++ b/include/linux/err.h
@@ -0,0 +1,69 @@
+#ifndef _LINUX_ERR_H
+#define _LINUX_ERR_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#include <asm/errno.h>
+
+/*
+ * Kernel pointers have redundant information, so we can use a
+ * scheme where we can return either an error code or a normal
+ * pointer with the same return value.
+ *
+ * This should be a per-architecture thing, to allow different
+ * error and pointer decisions.
+ */
+#define MAX_ERRNO 4095
+
+#ifndef __ASSEMBLY__
+
+#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
+
+static inline void * __must_check ERR_PTR(long error)
+{
+ return (void *) error;
+}
+
+static inline long __must_check PTR_ERR(__force const void *ptr)
+{
+ return (long) ptr;
+}
+
+static inline bool __must_check IS_ERR(__force const void *ptr)
+{
+ return IS_ERR_VALUE((unsigned long)ptr);
+}
+
+static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr)
+{
+ return !ptr || IS_ERR_VALUE((unsigned long)ptr);
+}
+
+/**
+ * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
+ * @ptr: The pointer to cast.
+ *
+ * Explicitly cast an error-valued pointer to another pointer type in such a
+ * way as to make it clear that's what's going on.
+ */
+static inline void * __must_check ERR_CAST(__force const void *ptr)
+{
+ /* cast away the const */
+ return (void *) ptr;
+}
+
+static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
+{
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+ else
+ return 0;
+}
+
+/* Deprecated */
+#define PTR_RET(p) PTR_ERR_OR_ZERO(p)
+
+#endif
+
+#endif /* _LINUX_ERR_H */
diff --git a/include/linux/errno.h b/include/linux/errno.h
new file mode 100644
index 000000000..89627b918
--- /dev/null
+++ b/include/linux/errno.h
@@ -0,0 +1,32 @@
+#ifndef _LINUX_ERRNO_H
+#define _LINUX_ERRNO_H
+
+#include <uapi/linux/errno.h>
+
+
+/*
+ * These should never be seen by user programs. To return one of ERESTART*
+ * codes, signal_pending() MUST be set. Note that ptrace can observe these
+ * at syscall exit tracing, but they will never be left for the debugged user
+ * process to see.
+ */
+#define ERESTARTSYS 512
+#define ERESTARTNOINTR 513
+#define ERESTARTNOHAND 514 /* restart if no handler.. */
+#define ENOIOCTLCMD 515 /* No ioctl command */
+#define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */
+#define EPROBE_DEFER 517 /* Driver requests probe retry */
+#define EOPENSTALE 518 /* open found a stale dentry */
+
+/* Defined for the NFSv3 protocol */
+#define EBADHANDLE 521 /* Illegal NFS file handle */
+#define ENOTSYNC 522 /* Update synchronization mismatch */
+#define EBADCOOKIE 523 /* Cookie is stale */
+#define ENOTSUPP 524 /* Operation is not supported */
+#define ETOOSMALL 525 /* Buffer or request is too small */
+#define ESERVERFAULT 526 /* An untranslatable error occurred */
+#define EBADTYPE 527 /* Type not supported by server */
+#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */
+#define EIOCBQUEUED 529 /* iocb queued, will get completion event */
+
+#endif
diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h
new file mode 100644
index 000000000..9ca23fcfb
--- /dev/null
+++ b/include/linux/errqueue.h
@@ -0,0 +1,25 @@
+#ifndef _LINUX_ERRQUEUE_H
+#define _LINUX_ERRQUEUE_H 1
+
+
+#include <net/ip.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <linux/ipv6.h>
+#endif
+#include <uapi/linux/errqueue.h>
+
+#define SKB_EXT_ERR(skb) ((struct sock_exterr_skb *) ((skb)->cb))
+
+struct sock_exterr_skb {
+ union {
+ struct inet_skb_parm h4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_skb_parm h6;
+#endif
+ } header;
+ struct sock_extended_err ee;
+ u16 addr_offset;
+ __be16 port;
+};
+
+#endif
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
new file mode 100644
index 000000000..606563ef8
--- /dev/null
+++ b/include/linux/etherdevice.h
@@ -0,0 +1,410 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. NET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Ethernet handlers.
+ *
+ * Version: @(#)eth.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Relocated to include/linux where it belongs by Alan Cox
+ * <gw4pts@gw4pts.ampr.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#ifndef _LINUX_ETHERDEVICE_H
+#define _LINUX_ETHERDEVICE_H
+
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/random.h>
+#include <asm/unaligned.h>
+#include <asm/bitsperlong.h>
+
+#ifdef __KERNEL__
+u32 eth_get_headlen(void *data, unsigned int max_len);
+__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
+extern const struct header_ops eth_header_ops;
+
+int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
+ const void *daddr, const void *saddr, unsigned len);
+int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
+int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
+ __be16 type);
+void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
+ const unsigned char *haddr);
+int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
+void eth_commit_mac_addr_change(struct net_device *dev, void *p);
+int eth_mac_addr(struct net_device *dev, void *p);
+int eth_change_mtu(struct net_device *dev, int new_mtu);
+int eth_validate_addr(struct net_device *dev);
+
+struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
+ unsigned int rxqs);
+#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
+#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
+
+struct sk_buff **eth_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb);
+int eth_gro_complete(struct sk_buff *skb, int nhoff);
+
+/* Reserved Ethernet Addresses per IEEE 802.1Q */
+static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
+{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
+
+/**
+ * is_link_local_ether_addr - Determine if given Ethernet address is link-local
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Return true if address is link local reserved addr (01:80:c2:00:00:0X) per
+ * IEEE 802.1Q 8.6.3 Frame filtering.
+ *
+ * Please note: addr must be aligned to u16.
+ */
+static inline bool is_link_local_ether_addr(const u8 *addr)
+{
+ __be16 *a = (__be16 *)addr;
+ static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
+ static const __be16 m = cpu_to_be16(0xfff0);
+
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
+ ((a[2] ^ b[2]) & m)) == 0;
+#else
+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
+#endif
+}
+
+/**
+ * is_zero_ether_addr - Determine if give Ethernet address is all zeros.
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Return true if the address is all zeroes.
+ *
+ * Please note: addr must be aligned to u16.
+ */
+static inline bool is_zero_ether_addr(const u8 *addr)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return ((*(const u32 *)addr) | (*(const u16 *)(addr + 4))) == 0;
+#else
+ return (*(const u16 *)(addr + 0) |
+ *(const u16 *)(addr + 2) |
+ *(const u16 *)(addr + 4)) == 0;
+#endif
+}
+
+/**
+ * is_multicast_ether_addr - Determine if the Ethernet address is a multicast.
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Return true if the address is a multicast address.
+ * By definition the broadcast address is also a multicast address.
+ */
+static inline bool is_multicast_ether_addr(const u8 *addr)
+{
+ return 0x01 & addr[0];
+}
+
+/**
+ * is_local_ether_addr - Determine if the Ethernet address is locally-assigned one (IEEE 802).
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Return true if the address is a local address.
+ */
+static inline bool is_local_ether_addr(const u8 *addr)
+{
+ return 0x02 & addr[0];
+}
+
+/**
+ * is_broadcast_ether_addr - Determine if the Ethernet address is broadcast
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Return true if the address is the broadcast address.
+ *
+ * Please note: addr must be aligned to u16.
+ */
+static inline bool is_broadcast_ether_addr(const u8 *addr)
+{
+ return (*(const u16 *)(addr + 0) &
+ *(const u16 *)(addr + 2) &
+ *(const u16 *)(addr + 4)) == 0xffff;
+}
+
+/**
+ * is_unicast_ether_addr - Determine if the Ethernet address is unicast
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Return true if the address is a unicast address.
+ */
+static inline bool is_unicast_ether_addr(const u8 *addr)
+{
+ return !is_multicast_ether_addr(addr);
+}
+
+/**
+ * is_valid_ether_addr - Determine if the given Ethernet address is valid
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Check that the Ethernet address (MAC) is not 00:00:00:00:00:00, is not
+ * a multicast address, and is not FF:FF:FF:FF:FF:FF.
+ *
+ * Return true if the address is valid.
+ *
+ * Please note: addr must be aligned to u16.
+ */
+static inline bool is_valid_ether_addr(const u8 *addr)
+{
+ /* FF:FF:FF:FF:FF:FF is a multicast address so we don't need to
+ * explicitly check for it here. */
+ return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr);
+}
+
+/**
+ * eth_random_addr - Generate software assigned random Ethernet address
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Generate a random Ethernet address (MAC) that is not multicast
+ * and has the local assigned bit set.
+ */
+static inline void eth_random_addr(u8 *addr)
+{
+ get_random_bytes(addr, ETH_ALEN);
+ addr[0] &= 0xfe; /* clear multicast bit */
+ addr[0] |= 0x02; /* set local assignment bit (IEEE802) */
+}
+
+#define random_ether_addr(addr) eth_random_addr(addr)
+
+/**
+ * eth_broadcast_addr - Assign broadcast address
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Assign the broadcast address to the given address array.
+ */
+static inline void eth_broadcast_addr(u8 *addr)
+{
+ memset(addr, 0xff, ETH_ALEN);
+}
+
+/**
+ * eth_zero_addr - Assign zero address
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Assign the zero address to the given address array.
+ */
+static inline void eth_zero_addr(u8 *addr)
+{
+ memset(addr, 0x00, ETH_ALEN);
+}
+
+/**
+ * eth_hw_addr_random - Generate software assigned random Ethernet and
+ * set device flag
+ * @dev: pointer to net_device structure
+ *
+ * Generate a random Ethernet address (MAC) to be used by a net device
+ * and set addr_assign_type so the state can be read by sysfs and be
+ * used by userspace.
+ */
+static inline void eth_hw_addr_random(struct net_device *dev)
+{
+ dev->addr_assign_type = NET_ADDR_RANDOM;
+ eth_random_addr(dev->dev_addr);
+}
+
+/**
+ * ether_addr_copy - Copy an Ethernet address
+ * @dst: Pointer to a six-byte array Ethernet address destination
+ * @src: Pointer to a six-byte array Ethernet address source
+ *
+ * Please note: dst & src must both be aligned to u16.
+ */
+static inline void ether_addr_copy(u8 *dst, const u8 *src)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ *(u32 *)dst = *(const u32 *)src;
+ *(u16 *)(dst + 4) = *(const u16 *)(src + 4);
+#else
+ u16 *a = (u16 *)dst;
+ const u16 *b = (const u16 *)src;
+
+ a[0] = b[0];
+ a[1] = b[1];
+ a[2] = b[2];
+#endif
+}
+
+/**
+ * eth_hw_addr_inherit - Copy dev_addr from another net_device
+ * @dst: pointer to net_device to copy dev_addr to
+ * @src: pointer to net_device to copy dev_addr from
+ *
+ * Copy the Ethernet address from one net_device to another along with
+ * the address attributes (addr_assign_type).
+ */
+static inline void eth_hw_addr_inherit(struct net_device *dst,
+ struct net_device *src)
+{
+ dst->addr_assign_type = src->addr_assign_type;
+ ether_addr_copy(dst->dev_addr, src->dev_addr);
+}
+
+/**
+ * ether_addr_equal - Compare two Ethernet addresses
+ * @addr1: Pointer to a six-byte array containing the Ethernet address
+ * @addr2: Pointer other six-byte array containing the Ethernet address
+ *
+ * Compare two Ethernet addresses, returns true if equal
+ *
+ * Please note: addr1 & addr2 must both be aligned to u16.
+ */
+static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ u32 fold = ((*(const u32 *)addr1) ^ (*(const u32 *)addr2)) |
+ ((*(const u16 *)(addr1 + 4)) ^ (*(const u16 *)(addr2 + 4)));
+
+ return fold == 0;
+#else
+ const u16 *a = (const u16 *)addr1;
+ const u16 *b = (const u16 *)addr2;
+
+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
+#endif
+}
+
+/**
+ * ether_addr_equal_64bits - Compare two Ethernet addresses
+ * @addr1: Pointer to an array of 8 bytes
+ * @addr2: Pointer to an other array of 8 bytes
+ *
+ * Compare two Ethernet addresses, returns true if equal, false otherwise.
+ *
+ * The function doesn't need any conditional branches and possibly uses
+ * word memory accesses on CPU allowing cheap unaligned memory reads.
+ * arrays = { byte1, byte2, byte3, byte4, byte5, byte6, pad1, pad2 }
+ *
+ * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
+ */
+
+static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
+ const u8 addr2[6+2])
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
+
+#ifdef __BIG_ENDIAN
+ return (fold >> 16) == 0;
+#else
+ return (fold << 16) == 0;
+#endif
+#else
+ return ether_addr_equal(addr1, addr2);
+#endif
+}
+
+/**
+ * ether_addr_equal_unaligned - Compare two not u16 aligned Ethernet addresses
+ * @addr1: Pointer to a six-byte array containing the Ethernet address
+ * @addr2: Pointer other six-byte array containing the Ethernet address
+ *
+ * Compare two Ethernet addresses, returns true if equal
+ *
+ * Please note: Use only when any Ethernet address may not be u16 aligned.
+ */
+static inline bool ether_addr_equal_unaligned(const u8 *addr1, const u8 *addr2)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return ether_addr_equal(addr1, addr2);
+#else
+ return memcmp(addr1, addr2, ETH_ALEN) == 0;
+#endif
+}
+
+/**
+ * is_etherdev_addr - Tell if given Ethernet address belongs to the device.
+ * @dev: Pointer to a device structure
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Compare passed address with all addresses of the device. Return true if the
+ * address if one of the device addresses.
+ *
+ * Note that this function calls ether_addr_equal_64bits() so take care of
+ * the right padding.
+ */
+static inline bool is_etherdev_addr(const struct net_device *dev,
+ const u8 addr[6 + 2])
+{
+ struct netdev_hw_addr *ha;
+ bool res = false;
+
+ rcu_read_lock();
+ for_each_dev_addr(dev, ha) {
+ res = ether_addr_equal_64bits(addr, ha->addr);
+ if (res)
+ break;
+ }
+ rcu_read_unlock();
+ return res;
+}
+#endif /* __KERNEL__ */
+
+/**
+ * compare_ether_header - Compare two Ethernet headers
+ * @a: Pointer to Ethernet header
+ * @b: Pointer to Ethernet header
+ *
+ * Compare two Ethernet headers, returns 0 if equal.
+ * This assumes that the network header (i.e., IP header) is 4-byte
+ * aligned OR the platform can handle unaligned access. This is the
+ * case for all packets coming into netif_receive_skb or similar
+ * entry points.
+ */
+
+static inline unsigned long compare_ether_header(const void *a, const void *b)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ unsigned long fold;
+
+ /*
+ * We want to compare 14 bytes:
+ * [a0 ... a13] ^ [b0 ... b13]
+ * Use two long XOR, ORed together, with an overlap of two bytes.
+ * [a0 a1 a2 a3 a4 a5 a6 a7 ] ^ [b0 b1 b2 b3 b4 b5 b6 b7 ] |
+ * [a6 a7 a8 a9 a10 a11 a12 a13] ^ [b6 b7 b8 b9 b10 b11 b12 b13]
+ * This means the [a6 a7] ^ [b6 b7] part is done two times.
+ */
+ fold = *(unsigned long *)a ^ *(unsigned long *)b;
+ fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6);
+ return fold;
+#else
+ u32 *a32 = (u32 *)((u8 *)a + 2);
+ u32 *b32 = (u32 *)((u8 *)b + 2);
+
+ return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
+ (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
+#endif
+}
+
+/**
+ * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
+ * @skb: Buffer to pad
+ *
+ * An Ethernet frame should have a minimum size of 60 bytes. This function
+ * takes short frames and pads them with zeros up to the 60 byte limit.
+ */
+static inline int eth_skb_pad(struct sk_buff *skb)
+{
+ return skb_put_padto(skb, ETH_ZLEN);
+}
+
+#endif /* _LINUX_ETHERDEVICE_H */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
new file mode 100644
index 000000000..653dc9c4e
--- /dev/null
+++ b/include/linux/ethtool.h
@@ -0,0 +1,285 @@
+/*
+ * ethtool.h: Defines for Linux ethtool.
+ *
+ * Copyright (C) 1998 David S. Miller (davem@redhat.com)
+ * Copyright 2001 Jeff Garzik <jgarzik@pobox.com>
+ * Portions Copyright 2001 Sun Microsystems (thockin@sun.com)
+ * Portions Copyright 2002 Intel (eli.kupermann@intel.com,
+ * christopher.leech@intel.com,
+ * scott.feldman@intel.com)
+ * Portions Copyright (C) Sun Microsystems 2008
+ */
+#ifndef _LINUX_ETHTOOL_H
+#define _LINUX_ETHTOOL_H
+
+#include <linux/compat.h>
+#include <uapi/linux/ethtool.h>
+
+#ifdef CONFIG_COMPAT
+
+struct compat_ethtool_rx_flow_spec {
+ u32 flow_type;
+ union ethtool_flow_union h_u;
+ struct ethtool_flow_ext h_ext;
+ union ethtool_flow_union m_u;
+ struct ethtool_flow_ext m_ext;
+ compat_u64 ring_cookie;
+ u32 location;
+};
+
+struct compat_ethtool_rxnfc {
+ u32 cmd;
+ u32 flow_type;
+ compat_u64 data;
+ struct compat_ethtool_rx_flow_spec fs;
+ u32 rule_cnt;
+ u32 rule_locs[0];
+};
+
+#endif /* CONFIG_COMPAT */
+
+#include <linux/rculist.h>
+
+extern int __ethtool_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd);
+
+/**
+ * enum ethtool_phys_id_state - indicator state for physical identification
+ * @ETHTOOL_ID_INACTIVE: Physical ID indicator should be deactivated
+ * @ETHTOOL_ID_ACTIVE: Physical ID indicator should be activated
+ * @ETHTOOL_ID_ON: LED should be turned on (used iff %ETHTOOL_ID_ACTIVE
+ * is not supported)
+ * @ETHTOOL_ID_OFF: LED should be turned off (used iff %ETHTOOL_ID_ACTIVE
+ * is not supported)
+ */
+enum ethtool_phys_id_state {
+ ETHTOOL_ID_INACTIVE,
+ ETHTOOL_ID_ACTIVE,
+ ETHTOOL_ID_ON,
+ ETHTOOL_ID_OFF
+};
+
+enum {
+ ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */
+ ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */
+
+ /*
+ * Add your fresh new hash function bits above and remember to update
+ * rss_hash_func_strings[] in ethtool.c
+ */
+ ETH_RSS_HASH_FUNCS_COUNT
+};
+
+#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
+#define __ETH_RSS_HASH(name) __ETH_RSS_HASH_BIT(ETH_RSS_HASH_##name##_BIT)
+
+#define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP)
+#define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR)
+
+#define ETH_RSS_HASH_UNKNOWN 0
+#define ETH_RSS_HASH_NO_CHANGE 0
+
+struct net_device;
+
+/* Some generic methods drivers may use in their ethtool_ops */
+u32 ethtool_op_get_link(struct net_device *dev);
+int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
+
+/**
+ * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
+ * @index: Index in RX flow hash indirection table
+ * @n_rx_rings: Number of RX rings to use
+ *
+ * This function provides the default policy for RX flow hash indirection.
+ */
+static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
+{
+ return index % n_rx_rings;
+}
+
+/**
+ * struct ethtool_ops - optional netdev operations
+ * @get_settings: Get various device settings including Ethernet link
+ * settings. The @cmd parameter is expected to have been cleared
+ * before get_settings is called. Returns a negative error code or
+ * zero.
+ * @set_settings: Set various device settings including Ethernet link
+ * settings. Returns a negative error code or zero.
+ * @get_drvinfo: Report driver/device information. Should only set the
+ * @driver, @version, @fw_version and @bus_info fields. If not
+ * implemented, the @driver and @bus_info fields will be filled in
+ * according to the netdev's parent device.
+ * @get_regs_len: Get buffer length required for @get_regs
+ * @get_regs: Get device registers
+ * @get_wol: Report whether Wake-on-Lan is enabled
+ * @set_wol: Turn Wake-on-Lan on or off. Returns a negative error code
+ * or zero.
+ * @get_msglevel: Report driver message level. This should be the value
+ * of the @msg_enable field used by netif logging functions.
+ * @set_msglevel: Set driver message level
+ * @nway_reset: Restart autonegotiation. Returns a negative error code
+ * or zero.
+ * @get_link: Report whether physical link is up. Will only be called if
+ * the netdev is up. Should usually be set to ethtool_op_get_link(),
+ * which uses netif_carrier_ok().
+ * @get_eeprom: Read data from the device EEPROM.
+ * Should fill in the magic field. Don't need to check len for zero
+ * or wraparound. Fill in the data argument with the eeprom values
+ * from offset to offset + len. Update len to the amount read.
+ * Returns an error or zero.
+ * @set_eeprom: Write data to the device EEPROM.
+ * Should validate the magic field. Don't need to check len for zero
+ * or wraparound. Update len to the amount written. Returns an error
+ * or zero.
+ * @get_coalesce: Get interrupt coalescing parameters. Returns a negative
+ * error code or zero.
+ * @set_coalesce: Set interrupt coalescing parameters. Returns a negative
+ * error code or zero.
+ * @get_ringparam: Report ring sizes
+ * @set_ringparam: Set ring sizes. Returns a negative error code or zero.
+ * @get_pauseparam: Report pause parameters
+ * @set_pauseparam: Set pause parameters. Returns a negative error code
+ * or zero.
+ * @self_test: Run specified self-tests
+ * @get_strings: Return a set of strings that describe the requested objects
+ * @set_phys_id: Identify the physical devices, e.g. by flashing an LED
+ * attached to it. The implementation may update the indicator
+ * asynchronously or synchronously, but in either case it must return
+ * quickly. It is initially called with the argument %ETHTOOL_ID_ACTIVE,
+ * and must either activate asynchronous updates and return zero, return
+ * a negative error or return a positive frequency for synchronous
+ * indication (e.g. 1 for one on/off cycle per second). If it returns
+ * a frequency then it will be called again at intervals with the
+ * argument %ETHTOOL_ID_ON or %ETHTOOL_ID_OFF and should set the state of
+ * the indicator accordingly. Finally, it is called with the argument
+ * %ETHTOOL_ID_INACTIVE and must deactivate the indicator. Returns a
+ * negative error code or zero.
+ * @get_ethtool_stats: Return extended statistics about the device.
+ * This is only useful if the device maintains statistics not
+ * included in &struct rtnl_link_stats64.
+ * @begin: Function to be called before any other operation. Returns a
+ * negative error code or zero.
+ * @complete: Function to be called after any other operation except
+ * @begin. Will be called even if the other operation failed.
+ * @get_priv_flags: Report driver-specific feature flags.
+ * @set_priv_flags: Set driver-specific feature flags. Returns a negative
+ * error code or zero.
+ * @get_sset_count: Get number of strings that @get_strings will write.
+ * @get_rxnfc: Get RX flow classification rules. Returns a negative
+ * error code or zero.
+ * @set_rxnfc: Set RX flow classification rules. Returns a negative
+ * error code or zero.
+ * @flash_device: Write a firmware image to device's flash memory.
+ * Returns a negative error code or zero.
+ * @reset: Reset (part of) the device, as specified by a bitmask of
+ * flags from &enum ethtool_reset_flags. Returns a negative
+ * error code or zero.
+ * @get_rxfh_key_size: Get the size of the RX flow hash key.
+ * Returns zero if not supported for this specific device.
+ * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table.
+ * Returns zero if not supported for this specific device.
+ * @get_rxfh: Get the contents of the RX flow hash indirection table, hash key
+ * and/or hash function.
+ * Returns a negative error code or zero.
+ * @set_rxfh: Set the contents of the RX flow hash indirection table, hash
+ * key, and/or hash function. Arguments which are set to %NULL or zero
+ * will remain unchanged.
+ * Returns a negative error code or zero. An error code must be returned
+ * if at least one unsupported change was requested.
+ * @get_channels: Get number of channels.
+ * @set_channels: Set number of channels. Returns a negative error code or
+ * zero.
+ * @get_dump_flag: Get dump flag indicating current dump length, version,
+ * and flag of the device.
+ * @get_dump_data: Get dump data.
+ * @set_dump: Set dump specific flags to the device.
+ * @get_ts_info: Get the time stamping and PTP hardware clock capabilities.
+ * Drivers supporting transmit time stamps in software should set this to
+ * ethtool_op_get_ts_info().
+ * @get_module_info: Get the size and type of the eeprom contained within
+ * a plug-in module.
+ * @get_module_eeprom: Get the eeprom information from the plug-in module
+ * @get_eee: Get Energy-Efficient (EEE) supported and status.
+ * @set_eee: Set EEE status (enable/disable) as well as LPI timers.
+ *
+ * All operations are optional (i.e. the function pointer may be set
+ * to %NULL) and callers must take this into account. Callers must
+ * hold the RTNL lock.
+ *
+ * See the structures used by these operations for further documentation.
+ * Note that for all operations using a structure ending with a zero-
+ * length array, the array is allocated separately in the kernel and
+ * is passed to the driver as an additional parameter.
+ *
+ * See &struct net_device and &struct net_device_ops for documentation
+ * of the generic netdev features interface.
+ */
+struct ethtool_ops {
+ int (*get_settings)(struct net_device *, struct ethtool_cmd *);
+ int (*set_settings)(struct net_device *, struct ethtool_cmd *);
+ void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
+ int (*get_regs_len)(struct net_device *);
+ void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
+ void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);
+ int (*set_wol)(struct net_device *, struct ethtool_wolinfo *);
+ u32 (*get_msglevel)(struct net_device *);
+ void (*set_msglevel)(struct net_device *, u32);
+ int (*nway_reset)(struct net_device *);
+ u32 (*get_link)(struct net_device *);
+ int (*get_eeprom_len)(struct net_device *);
+ int (*get_eeprom)(struct net_device *,
+ struct ethtool_eeprom *, u8 *);
+ int (*set_eeprom)(struct net_device *,
+ struct ethtool_eeprom *, u8 *);
+ int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
+ int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
+ void (*get_ringparam)(struct net_device *,
+ struct ethtool_ringparam *);
+ int (*set_ringparam)(struct net_device *,
+ struct ethtool_ringparam *);
+ void (*get_pauseparam)(struct net_device *,
+ struct ethtool_pauseparam*);
+ int (*set_pauseparam)(struct net_device *,
+ struct ethtool_pauseparam*);
+ void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
+ void (*get_strings)(struct net_device *, u32 stringset, u8 *);
+ int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state);
+ void (*get_ethtool_stats)(struct net_device *,
+ struct ethtool_stats *, u64 *);
+ int (*begin)(struct net_device *);
+ void (*complete)(struct net_device *);
+ u32 (*get_priv_flags)(struct net_device *);
+ int (*set_priv_flags)(struct net_device *, u32);
+ int (*get_sset_count)(struct net_device *, int);
+ int (*get_rxnfc)(struct net_device *,
+ struct ethtool_rxnfc *, u32 *rule_locs);
+ int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
+ int (*flash_device)(struct net_device *, struct ethtool_flash *);
+ int (*reset)(struct net_device *, u32 *);
+ u32 (*get_rxfh_key_size)(struct net_device *);
+ u32 (*get_rxfh_indir_size)(struct net_device *);
+ int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key,
+ u8 *hfunc);
+ int (*set_rxfh)(struct net_device *, const u32 *indir,
+ const u8 *key, const u8 hfunc);
+ void (*get_channels)(struct net_device *, struct ethtool_channels *);
+ int (*set_channels)(struct net_device *, struct ethtool_channels *);
+ int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
+ int (*get_dump_data)(struct net_device *,
+ struct ethtool_dump *, void *);
+ int (*set_dump)(struct net_device *, struct ethtool_dump *);
+ int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *);
+ int (*get_module_info)(struct net_device *,
+ struct ethtool_modinfo *);
+ int (*get_module_eeprom)(struct net_device *,
+ struct ethtool_eeprom *, u8 *);
+ int (*get_eee)(struct net_device *, struct ethtool_eee *);
+ int (*set_eee)(struct net_device *, struct ethtool_eee *);
+ int (*get_tunable)(struct net_device *,
+ const struct ethtool_tunable *, void *);
+ int (*set_tunable)(struct net_device *,
+ const struct ethtool_tunable *, const void *);
+
+
+};
+#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
new file mode 100644
index 000000000..ff0b981f0
--- /dev/null
+++ b/include/linux/eventfd.h
@@ -0,0 +1,84 @@
+/*
+ * include/linux/eventfd.h
+ *
+ * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#ifndef _LINUX_EVENTFD_H
+#define _LINUX_EVENTFD_H
+
+#include <linux/fcntl.h>
+#include <linux/wait.h>
+
+/*
+ * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
+ * new flags, since they might collide with O_* ones. We want
+ * to re-use O_* flags that couldn't possibly have a meaning
+ * from eventfd, in order to leave a free define-space for
+ * shared O_* flags.
+ */
+#define EFD_SEMAPHORE (1 << 0)
+#define EFD_CLOEXEC O_CLOEXEC
+#define EFD_NONBLOCK O_NONBLOCK
+
+#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
+#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
+
+struct file;
+
+#ifdef CONFIG_EVENTFD
+
+struct file *eventfd_file_create(unsigned int count, int flags);
+struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx);
+void eventfd_ctx_put(struct eventfd_ctx *ctx);
+struct file *eventfd_fget(int fd);
+struct eventfd_ctx *eventfd_ctx_fdget(int fd);
+struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
+__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
+ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
+int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait,
+ __u64 *cnt);
+
+#else /* CONFIG_EVENTFD */
+
+/*
+ * Ugly ugly ugly error layer to support modules that uses eventfd but
+ * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO.
+ */
+static inline struct file *eventfd_file_create(unsigned int count, int flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline int eventfd_signal(struct eventfd_ctx *ctx, int n)
+{
+ return -ENOSYS;
+}
+
+static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
+{
+
+}
+
+static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait,
+ __u64 *cnt)
+{
+ return -ENOSYS;
+}
+
+static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
+ wait_queue_t *wait, __u64 *cnt)
+{
+ return -ENOSYS;
+}
+
+#endif
+
+#endif /* _LINUX_EVENTFD_H */
+
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
new file mode 100644
index 000000000..6daf6d497
--- /dev/null
+++ b/include/linux/eventpoll.h
@@ -0,0 +1,71 @@
+/*
+ * include/linux/eventpoll.h ( Efficient event polling implementation )
+ * Copyright (C) 2001,...,2006 Davide Libenzi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+#ifndef _LINUX_EVENTPOLL_H
+#define _LINUX_EVENTPOLL_H
+
+#include <uapi/linux/eventpoll.h>
+
+
+/* Forward declarations to avoid compiler errors */
+struct file;
+
+
+#ifdef CONFIG_EPOLL
+
+/* Used to initialize the epoll bits inside the "struct file" */
+static inline void eventpoll_init_file(struct file *file)
+{
+ INIT_LIST_HEAD(&file->f_ep_links);
+ INIT_LIST_HEAD(&file->f_tfile_llink);
+}
+
+
+/* Used to release the epoll bits inside the "struct file" */
+void eventpoll_release_file(struct file *file);
+
+/*
+ * This is called from inside fs/file_table.c:__fput() to unlink files
+ * from the eventpoll interface. We need to have this facility to cleanup
+ * correctly files that are closed without being removed from the eventpoll
+ * interface.
+ */
+static inline void eventpoll_release(struct file *file)
+{
+
+ /*
+ * Fast check to avoid the get/release of the semaphore. Since
+ * we're doing this outside the semaphore lock, it might return
+ * false negatives, but we don't care. It'll help in 99.99% of cases
+ * to avoid the semaphore lock. False positives simply cannot happen
+ * because the file in on the way to be removed and nobody ( but
+ * eventpoll ) has still a reference to this file.
+ */
+ if (likely(list_empty(&file->f_ep_links)))
+ return;
+
+ /*
+ * The file is being closed while it is still linked to an epoll
+ * descriptor. We need to handle this by correctly unlinking it
+ * from its containers.
+ */
+ eventpoll_release_file(file);
+}
+
+#else
+
+static inline void eventpoll_init_file(struct file *file) {}
+static inline void eventpoll_release(struct file *file) {}
+
+#endif
+
+#endif /* #ifndef _LINUX_EVENTPOLL_H */
diff --git a/include/linux/evm.h b/include/linux/evm.h
new file mode 100644
index 000000000..1fcb88ca8
--- /dev/null
+++ b/include/linux/evm.h
@@ -0,0 +1,100 @@
+/*
+ * evm.h
+ *
+ * Copyright (c) 2009 IBM Corporation
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ */
+
+#ifndef _LINUX_EVM_H
+#define _LINUX_EVM_H
+
+#include <linux/integrity.h>
+#include <linux/xattr.h>
+
+struct integrity_iint_cache;
+
+#ifdef CONFIG_EVM
+extern enum integrity_status evm_verifyxattr(struct dentry *dentry,
+ const char *xattr_name,
+ void *xattr_value,
+ size_t xattr_value_len,
+ struct integrity_iint_cache *iint);
+extern int evm_inode_setattr(struct dentry *dentry, struct iattr *attr);
+extern void evm_inode_post_setattr(struct dentry *dentry, int ia_valid);
+extern int evm_inode_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size);
+extern void evm_inode_post_setxattr(struct dentry *dentry,
+ const char *xattr_name,
+ const void *xattr_value,
+ size_t xattr_value_len);
+extern int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name);
+extern void evm_inode_post_removexattr(struct dentry *dentry,
+ const char *xattr_name);
+extern int evm_inode_init_security(struct inode *inode,
+ const struct xattr *xattr_array,
+ struct xattr *evm);
+#ifdef CONFIG_FS_POSIX_ACL
+extern int posix_xattr_acl(const char *xattrname);
+#else
+static inline int posix_xattr_acl(const char *xattrname)
+{
+ return 0;
+}
+#endif
+#else
+#ifdef CONFIG_INTEGRITY
+static inline enum integrity_status evm_verifyxattr(struct dentry *dentry,
+ const char *xattr_name,
+ void *xattr_value,
+ size_t xattr_value_len,
+ struct integrity_iint_cache *iint)
+{
+ return INTEGRITY_UNKNOWN;
+}
+#endif
+
+static inline int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ return 0;
+}
+
+static inline void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
+{
+ return;
+}
+
+static inline int evm_inode_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size)
+{
+ return 0;
+}
+
+static inline void evm_inode_post_setxattr(struct dentry *dentry,
+ const char *xattr_name,
+ const void *xattr_value,
+ size_t xattr_value_len)
+{
+ return;
+}
+
+static inline int evm_inode_removexattr(struct dentry *dentry,
+ const char *xattr_name)
+{
+ return 0;
+}
+
+static inline void evm_inode_post_removexattr(struct dentry *dentry,
+ const char *xattr_name)
+{
+ return;
+}
+
+static inline int evm_inode_init_security(struct inode *inode,
+ const struct xattr *xattr_array,
+ struct xattr *evm)
+{
+ return 0;
+}
+
+#endif /* CONFIG_EVM */
+#endif /* LINUX_EVM_H */
diff --git a/include/linux/export.h b/include/linux/export.h
new file mode 100644
index 000000000..96e45ea46
--- /dev/null
+++ b/include/linux/export.h
@@ -0,0 +1,98 @@
+#ifndef _LINUX_EXPORT_H
+#define _LINUX_EXPORT_H
+/*
+ * Export symbols from the kernel to modules. Forked from module.h
+ * to reduce the amount of pointless cruft we feed to gcc when only
+ * exporting a simple symbol or two.
+ *
+ * Try not to add #includes here. It slows compilation and makes kernel
+ * hackers place grumpy comments in header files.
+ */
+
+/* Some toolchains use a `_' prefix for all user symbols. */
+#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
+#define __VMLINUX_SYMBOL(x) _##x
+#define __VMLINUX_SYMBOL_STR(x) "_" #x
+#else
+#define __VMLINUX_SYMBOL(x) x
+#define __VMLINUX_SYMBOL_STR(x) #x
+#endif
+
+/* Indirect, so macros are expanded before pasting. */
+#define VMLINUX_SYMBOL(x) __VMLINUX_SYMBOL(x)
+#define VMLINUX_SYMBOL_STR(x) __VMLINUX_SYMBOL_STR(x)
+
+#ifndef __ASSEMBLY__
+struct kernel_symbol
+{
+ unsigned long value;
+ const char *name;
+};
+
+#ifdef MODULE
+extern struct module __this_module;
+#define THIS_MODULE (&__this_module)
+#else
+#define THIS_MODULE ((struct module *)0)
+#endif
+
+#ifdef CONFIG_MODULES
+
+#ifndef __GENKSYMS__
+#ifdef CONFIG_MODVERSIONS
+/* Mark the CRC weak since genksyms apparently decides not to
+ * generate a checksums for some symbols */
+#define __CRC_SYMBOL(sym, sec) \
+ extern __visible void *__crc_##sym __attribute__((weak)); \
+ static const unsigned long __kcrctab_##sym \
+ __used \
+ __attribute__((section("___kcrctab" sec "+" #sym), unused)) \
+ = (unsigned long) &__crc_##sym;
+#else
+#define __CRC_SYMBOL(sym, sec)
+#endif
+
+/* For every exported symbol, place a struct in the __ksymtab section */
+#define __EXPORT_SYMBOL(sym, sec) \
+ extern typeof(sym) sym; \
+ __CRC_SYMBOL(sym, sec) \
+ static const char __kstrtab_##sym[] \
+ __attribute__((section("__ksymtab_strings"), aligned(1))) \
+ = VMLINUX_SYMBOL_STR(sym); \
+ extern const struct kernel_symbol __ksymtab_##sym; \
+ __visible const struct kernel_symbol __ksymtab_##sym \
+ __used \
+ __attribute__((section("___ksymtab" sec "+" #sym), unused)) \
+ = { (unsigned long)&sym, __kstrtab_##sym }
+
+#define EXPORT_SYMBOL(sym) \
+ __EXPORT_SYMBOL(sym, "")
+
+#define EXPORT_SYMBOL_GPL(sym) \
+ __EXPORT_SYMBOL(sym, "_gpl")
+
+#define EXPORT_SYMBOL_GPL_FUTURE(sym) \
+ __EXPORT_SYMBOL(sym, "_gpl_future")
+
+#ifdef CONFIG_UNUSED_SYMBOLS
+#define EXPORT_UNUSED_SYMBOL(sym) __EXPORT_SYMBOL(sym, "_unused")
+#define EXPORT_UNUSED_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_unused_gpl")
+#else
+#define EXPORT_UNUSED_SYMBOL(sym)
+#define EXPORT_UNUSED_SYMBOL_GPL(sym)
+#endif
+
+#endif /* __GENKSYMS__ */
+
+#else /* !CONFIG_MODULES... */
+
+#define EXPORT_SYMBOL(sym)
+#define EXPORT_SYMBOL_GPL(sym)
+#define EXPORT_SYMBOL_GPL_FUTURE(sym)
+#define EXPORT_UNUSED_SYMBOL(sym)
+#define EXPORT_UNUSED_SYMBOL_GPL(sym)
+
+#endif /* CONFIG_MODULES */
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _LINUX_EXPORT_H */
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
new file mode 100644
index 000000000..fa05e04c5
--- /dev/null
+++ b/include/linux/exportfs.h
@@ -0,0 +1,237 @@
+#ifndef LINUX_EXPORTFS_H
+#define LINUX_EXPORTFS_H 1
+
+#include <linux/types.h>
+
+struct dentry;
+struct iattr;
+struct inode;
+struct super_block;
+struct vfsmount;
+
+/* limit the handle size to NFSv4 handle size now */
+#define MAX_HANDLE_SZ 128
+
+/*
+ * The fileid_type identifies how the file within the filesystem is encoded.
+ * In theory this is freely set and parsed by the filesystem, but we try to
+ * stick to conventions so we can share some generic code and don't confuse
+ * sniffers like ethereal/wireshark.
+ *
+ * The filesystem must not use the value '0' or '0xff'.
+ */
+enum fid_type {
+ /*
+ * The root, or export point, of the filesystem.
+ * (Never actually passed down to the filesystem.
+ */
+ FILEID_ROOT = 0,
+
+ /*
+ * 32bit inode number, 32 bit generation number.
+ */
+ FILEID_INO32_GEN = 1,
+
+ /*
+ * 32bit inode number, 32 bit generation number,
+ * 32 bit parent directory inode number.
+ */
+ FILEID_INO32_GEN_PARENT = 2,
+
+ /*
+ * 64 bit object ID, 64 bit root object ID,
+ * 32 bit generation number.
+ */
+ FILEID_BTRFS_WITHOUT_PARENT = 0x4d,
+
+ /*
+ * 64 bit object ID, 64 bit root object ID,
+ * 32 bit generation number,
+ * 64 bit parent object ID, 32 bit parent generation.
+ */
+ FILEID_BTRFS_WITH_PARENT = 0x4e,
+
+ /*
+ * 64 bit object ID, 64 bit root object ID,
+ * 32 bit generation number,
+ * 64 bit parent object ID, 32 bit parent generation,
+ * 64 bit parent root object ID.
+ */
+ FILEID_BTRFS_WITH_PARENT_ROOT = 0x4f,
+
+ /*
+ * 32 bit block number, 16 bit partition reference,
+ * 16 bit unused, 32 bit generation number.
+ */
+ FILEID_UDF_WITHOUT_PARENT = 0x51,
+
+ /*
+ * 32 bit block number, 16 bit partition reference,
+ * 16 bit unused, 32 bit generation number,
+ * 32 bit parent block number, 32 bit parent generation number
+ */
+ FILEID_UDF_WITH_PARENT = 0x52,
+
+ /*
+ * 64 bit checkpoint number, 64 bit inode number,
+ * 32 bit generation number.
+ */
+ FILEID_NILFS_WITHOUT_PARENT = 0x61,
+
+ /*
+ * 64 bit checkpoint number, 64 bit inode number,
+ * 32 bit generation number, 32 bit parent generation.
+ * 64 bit parent inode number.
+ */
+ FILEID_NILFS_WITH_PARENT = 0x62,
+
+ /*
+ * 32 bit generation number, 40 bit i_pos.
+ */
+ FILEID_FAT_WITHOUT_PARENT = 0x71,
+
+ /*
+ * 32 bit generation number, 40 bit i_pos,
+ * 32 bit parent generation number, 40 bit parent i_pos
+ */
+ FILEID_FAT_WITH_PARENT = 0x72,
+
+ /*
+ * Filesystems must not use 0xff file ID.
+ */
+ FILEID_INVALID = 0xff,
+};
+
+struct fid {
+ union {
+ struct {
+ u32 ino;
+ u32 gen;
+ u32 parent_ino;
+ u32 parent_gen;
+ } i32;
+ struct {
+ u32 block;
+ u16 partref;
+ u16 parent_partref;
+ u32 generation;
+ u32 parent_block;
+ u32 parent_generation;
+ } udf;
+ __u32 raw[0];
+ };
+};
+
+/**
+ * struct export_operations - for nfsd to communicate with file systems
+ * @encode_fh: encode a file handle fragment from a dentry
+ * @fh_to_dentry: find the implied object and get a dentry for it
+ * @fh_to_parent: find the implied object's parent and get a dentry for it
+ * @get_name: find the name for a given inode in a given directory
+ * @get_parent: find the parent of a given directory
+ * @commit_metadata: commit metadata changes to stable storage
+ *
+ * See Documentation/filesystems/nfs/Exporting for details on how to use
+ * this interface correctly.
+ *
+ * encode_fh:
+ * @encode_fh should store in the file handle fragment @fh (using at most
+ * @max_len bytes) information that can be used by @decode_fh to recover the
+ * file referred to by the &struct dentry @de. If the @connectable flag is
+ * set, the encode_fh() should store sufficient information so that a good
+ * attempt can be made to find not only the file but also it's place in the
+ * filesystem. This typically means storing a reference to de->d_parent in
+ * the filehandle fragment. encode_fh() should return the fileid_type on
+ * success and on error returns 255 (if the space needed to encode fh is
+ * greater than @max_len*4 bytes). On error @max_len contains the minimum
+ * size(in 4 byte unit) needed to encode the file handle.
+ *
+ * fh_to_dentry:
+ * @fh_to_dentry is given a &struct super_block (@sb) and a file handle
+ * fragment (@fh, @fh_len). It should return a &struct dentry which refers
+ * to the same file that the file handle fragment refers to. If it cannot,
+ * it should return a %NULL pointer if the file was found but no acceptable
+ * &dentries were available, or an %ERR_PTR error code indicating why it
+ * couldn't be found (e.g. %ENOENT or %ENOMEM). Any suitable dentry can be
+ * returned including, if necessary, a new dentry created with d_alloc_root.
+ * The caller can then find any other extant dentries by following the
+ * d_alias links.
+ *
+ * fh_to_parent:
+ * Same as @fh_to_dentry, except that it returns a pointer to the parent
+ * dentry if it was encoded into the filehandle fragment by @encode_fh.
+ *
+ * get_name:
+ * @get_name should find a name for the given @child in the given @parent
+ * directory. The name should be stored in the @name (with the
+ * understanding that it is already pointing to a a %NAME_MAX+1 sized
+ * buffer. get_name() should return %0 on success, a negative error code
+ * or error. @get_name will be called without @parent->i_mutex held.
+ *
+ * get_parent:
+ * @get_parent should find the parent directory for the given @child which
+ * is also a directory. In the event that it cannot be found, or storage
+ * space cannot be allocated, a %ERR_PTR should be returned.
+ *
+ * commit_metadata:
+ * @commit_metadata should commit metadata changes to stable storage.
+ *
+ * Locking rules:
+ * get_parent is called with child->d_inode->i_mutex down
+ * get_name is not (which is possibly inconsistent)
+ */
+
+/* types of block ranges for multipage write mappings. */
+#define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */
+#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */
+#define IOMAP_MAPPED 0x03 /* blocks allocated @blkno */
+#define IOMAP_UNWRITTEN 0x04 /* blocks allocated @blkno in unwritten state */
+
+#define IOMAP_NULL_BLOCK -1LL /* blkno is not valid */
+
+struct iomap {
+ sector_t blkno; /* first sector of mapping */
+ loff_t offset; /* file offset of mapping, bytes */
+ u64 length; /* length of mapping, bytes */
+ int type; /* type of mapping */
+};
+
+struct export_operations {
+ int (*encode_fh)(struct inode *inode, __u32 *fh, int *max_len,
+ struct inode *parent);
+ struct dentry * (*fh_to_dentry)(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type);
+ struct dentry * (*fh_to_parent)(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type);
+ int (*get_name)(struct dentry *parent, char *name,
+ struct dentry *child);
+ struct dentry * (*get_parent)(struct dentry *child);
+ int (*commit_metadata)(struct inode *inode);
+
+ int (*get_uuid)(struct super_block *sb, u8 *buf, u32 *len, u64 *offset);
+ int (*map_blocks)(struct inode *inode, loff_t offset,
+ u64 len, struct iomap *iomap,
+ bool write, u32 *device_generation);
+ int (*commit_blocks)(struct inode *inode, struct iomap *iomaps,
+ int nr_iomaps, struct iattr *iattr);
+};
+
+extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
+ int *max_len, struct inode *parent);
+extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid,
+ int *max_len, int connectable);
+extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
+ int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *),
+ void *context);
+
+/*
+ * Generic helpers for filesystems.
+ */
+extern struct dentry *generic_fh_to_dentry(struct super_block *sb,
+ struct fid *fid, int fh_len, int fh_type,
+ struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
+extern struct dentry *generic_fh_to_parent(struct super_block *sb,
+ struct fid *fid, int fh_len, int fh_type,
+ struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
+
+#endif /* LINUX_EXPORTFS_H */
diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h
new file mode 100644
index 000000000..2723e715f
--- /dev/null
+++ b/include/linux/ext2_fs.h
@@ -0,0 +1,42 @@
+/*
+ * linux/include/linux/ext2_fs.h
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ * from
+ *
+ * linux/include/linux/minix_fs.h
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#ifndef _LINUX_EXT2_FS_H
+#define _LINUX_EXT2_FS_H
+
+#include <linux/types.h>
+#include <linux/magic.h>
+
+#define EXT2_NAME_LEN 255
+
+/*
+ * Maximal count of links to a file
+ */
+#define EXT2_LINK_MAX 32000
+
+#define EXT2_SB_MAGIC_OFFSET 0x38
+#define EXT2_SB_BLOCKS_OFFSET 0x04
+#define EXT2_SB_BSIZE_OFFSET 0x18
+
+static inline u64 ext2_image_size(void *ext2_sb)
+{
+ __u8 *p = ext2_sb;
+ if (*(__le16 *)(p + EXT2_SB_MAGIC_OFFSET) != cpu_to_le16(EXT2_SUPER_MAGIC))
+ return 0;
+ return (u64)le32_to_cpup((__le32 *)(p + EXT2_SB_BLOCKS_OFFSET)) <<
+ le32_to_cpup((__le32 *)(p + EXT2_SB_BSIZE_OFFSET));
+}
+
+#endif /* _LINUX_EXT2_FS_H */
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
new file mode 100644
index 000000000..36f49c405
--- /dev/null
+++ b/include/linux/extcon.h
@@ -0,0 +1,377 @@
+/*
+ * External connector (extcon) class driver
+ *
+ * Copyright (C) 2012 Samsung Electronics
+ * Author: Donggeun Kim <dg77.kim@samsung.com>
+ * Author: MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * based on switch class driver
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_EXTCON_H__
+#define __LINUX_EXTCON_H__
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/sysfs.h>
+
+#define SUPPORTED_CABLE_MAX 32
+#define CABLE_NAME_MAX 30
+
+/*
+ * The standard cable name is to help support general notifier
+ * and notifiee device drivers to share the common names.
+ * Please use standard cable names unless your notifier device has
+ * a very unique and abnormal cable or
+ * the cable type is supposed to be used with only one unique
+ * pair of notifier/notifiee devices.
+ *
+ * Please add any other "standard" cables used with extcon dev.
+ *
+ * You may add a dot and number to specify version or specification
+ * of the specific cable if it is required. (e.g., "Fast-charger.18"
+ * and "Fast-charger.10" for 1.8A and 1.0A chargers)
+ * However, the notifiee and notifier should be able to handle such
+ * string and if the notifiee can negotiate the protocol or identify,
+ * you don't need such convention. This convention is helpful when
+ * notifier can distinguish but notifiee cannot.
+ */
+enum extcon_cable_name {
+ EXTCON_USB = 0,
+ EXTCON_USB_HOST,
+ EXTCON_TA, /* Travel Adaptor */
+ EXTCON_FAST_CHARGER,
+ EXTCON_SLOW_CHARGER,
+ EXTCON_CHARGE_DOWNSTREAM, /* Charging an external device */
+ EXTCON_HDMI,
+ EXTCON_MHL,
+ EXTCON_DVI,
+ EXTCON_VGA,
+ EXTCON_DOCK,
+ EXTCON_LINE_IN,
+ EXTCON_LINE_OUT,
+ EXTCON_MIC_IN,
+ EXTCON_HEADPHONE_OUT,
+ EXTCON_SPDIF_IN,
+ EXTCON_SPDIF_OUT,
+ EXTCON_VIDEO_IN,
+ EXTCON_VIDEO_OUT,
+ EXTCON_MECHANICAL,
+};
+extern const char extcon_cable_name[][CABLE_NAME_MAX + 1];
+
+struct extcon_cable;
+
+/**
+ * struct extcon_dev - An extcon device represents one external connector.
+ * @name: The name of this extcon device. Parent device name is
+ * used if NULL.
+ * @supported_cable: Array of supported cable names ending with NULL.
+ * If supported_cable is NULL, cable name related APIs
+ * are disabled.
+ * @mutually_exclusive: Array of mutually exclusive set of cables that cannot
+ * be attached simultaneously. The array should be
+ * ending with NULL or be NULL (no mutually exclusive
+ * cables). For example, if it is { 0x7, 0x30, 0}, then,
+ * {0, 1}, {0, 1, 2}, {0, 2}, {1, 2}, or {4, 5} cannot
+ * be attached simulataneously. {0x7, 0} is equivalent to
+ * {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
+ * can be no simultaneous connections.
+ * @print_name: An optional callback to override the method to print the
+ * name of the extcon device.
+ * @print_state: An optional callback to override the method to print the
+ * status of the extcon device.
+ * @dev: Device of this extcon.
+ * @state: Attach/detach state of this extcon. Do not provide at
+ * register-time.
+ * @nh: Notifier for the state change events from this extcon
+ * @entry: To support list of extcon devices so that users can search
+ * for extcon devices based on the extcon name.
+ * @lock:
+ * @max_supported: Internal value to store the number of cables.
+ * @extcon_dev_type: Device_type struct to provide attribute_groups
+ * customized for each extcon device.
+ * @cables: Sysfs subdirectories. Each represents one cable.
+ *
+ * In most cases, users only need to provide "User initializing data" of
+ * this struct when registering an extcon. In some exceptional cases,
+ * optional callbacks may be needed. However, the values in "internal data"
+ * are overwritten by register function.
+ */
+struct extcon_dev {
+ /* Optional user initializing data */
+ const char *name;
+ const char **supported_cable;
+ const u32 *mutually_exclusive;
+
+ /* Optional callbacks to override class functions */
+ ssize_t (*print_name)(struct extcon_dev *edev, char *buf);
+ ssize_t (*print_state)(struct extcon_dev *edev, char *buf);
+
+ /* Internal data. Please do not set. */
+ struct device dev;
+ struct raw_notifier_head nh;
+ struct list_head entry;
+ int max_supported;
+ spinlock_t lock; /* could be called by irq handler */
+ u32 state;
+
+ /* /sys/class/extcon/.../cable.n/... */
+ struct device_type extcon_dev_type;
+ struct extcon_cable *cables;
+
+ /* /sys/class/extcon/.../mutually_exclusive/... */
+ struct attribute_group attr_g_muex;
+ struct attribute **attrs_muex;
+ struct device_attribute *d_attrs_muex;
+};
+
+/**
+ * struct extcon_cable - An internal data for each cable of extcon device.
+ * @edev: The extcon device
+ * @cable_index: Index of this cable in the edev
+ * @attr_g: Attribute group for the cable
+ * @attr_name: "name" sysfs entry
+ * @attr_state: "state" sysfs entry
+ * @attrs: Array pointing to attr_name and attr_state for attr_g
+ */
+struct extcon_cable {
+ struct extcon_dev *edev;
+ int cable_index;
+
+ struct attribute_group attr_g;
+ struct device_attribute attr_name;
+ struct device_attribute attr_state;
+
+ struct attribute *attrs[3]; /* to be fed to attr_g.attrs */
+};
+
+/**
+ * struct extcon_specific_cable_nb - An internal data for
+ * extcon_register_interest().
+ * @internal_nb: A notifier block bridging extcon notifier
+ * and cable notifier.
+ * @user_nb: user provided notifier block for events from
+ * a specific cable.
+ * @cable_index: the target cable.
+ * @edev: the target extcon device.
+ * @previous_value: the saved previous event value.
+ */
+struct extcon_specific_cable_nb {
+ struct notifier_block internal_nb;
+ struct notifier_block *user_nb;
+ int cable_index;
+ struct extcon_dev *edev;
+ unsigned long previous_value;
+};
+
+#if IS_ENABLED(CONFIG_EXTCON)
+
+/*
+ * Following APIs are for notifiers or configurations.
+ * Notifiers are the external port and connection devices.
+ */
+extern int extcon_dev_register(struct extcon_dev *edev);
+extern void extcon_dev_unregister(struct extcon_dev *edev);
+extern int devm_extcon_dev_register(struct device *dev,
+ struct extcon_dev *edev);
+extern void devm_extcon_dev_unregister(struct device *dev,
+ struct extcon_dev *edev);
+extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
+
+/*
+ * Following APIs control the memory of extcon device.
+ */
+extern struct extcon_dev *extcon_dev_allocate(const char **cables);
+extern void extcon_dev_free(struct extcon_dev *edev);
+extern struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
+ const char **cables);
+extern void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev);
+
+/*
+ * get/set/update_state access the 32b encoded state value, which represents
+ * states of all possible cables of the multistate port. For example, if one
+ * calls extcon_set_state(edev, 0x7), it may mean that all the three cables
+ * are attached to the port.
+ */
+static inline u32 extcon_get_state(struct extcon_dev *edev)
+{
+ return edev->state;
+}
+
+extern int extcon_set_state(struct extcon_dev *edev, u32 state);
+extern int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state);
+
+/*
+ * get/set_cable_state access each bit of the 32b encoded state value.
+ * They are used to access the status of each cable based on the cable_name
+ * or cable_index, which is retrieved by extcon_find_cable_index
+ */
+extern int extcon_find_cable_index(struct extcon_dev *sdev,
+ const char *cable_name);
+extern int extcon_get_cable_state_(struct extcon_dev *edev, int cable_index);
+extern int extcon_set_cable_state_(struct extcon_dev *edev, int cable_index,
+ bool cable_state);
+
+extern int extcon_get_cable_state(struct extcon_dev *edev,
+ const char *cable_name);
+extern int extcon_set_cable_state(struct extcon_dev *edev,
+ const char *cable_name, bool cable_state);
+
+/*
+ * Following APIs are for notifiees (those who want to be notified)
+ * to register a callback for events from a specific cable of the extcon.
+ * Notifiees are the connected device drivers wanting to get notified by
+ * a specific external port of a connection device.
+ */
+extern int extcon_register_interest(struct extcon_specific_cable_nb *obj,
+ const char *extcon_name,
+ const char *cable_name,
+ struct notifier_block *nb);
+extern int extcon_unregister_interest(struct extcon_specific_cable_nb *nb);
+
+/*
+ * Following APIs are to monitor every action of a notifier.
+ * Registrar gets notified for every external port of a connection device.
+ * Probably this could be used to debug an action of notifier; however,
+ * we do not recommend to use this for normal 'notifiee' device drivers who
+ * want to be notified by a specific external port of the notifier.
+ */
+extern int extcon_register_notifier(struct extcon_dev *edev,
+ struct notifier_block *nb);
+extern int extcon_unregister_notifier(struct extcon_dev *edev,
+ struct notifier_block *nb);
+
+/*
+ * Following API get the extcon device from devicetree.
+ * This function use phandle of devicetree to get extcon device directly.
+ */
+extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index);
+#else /* CONFIG_EXTCON */
+static inline int extcon_dev_register(struct extcon_dev *edev)
+{
+ return 0;
+}
+
+static inline void extcon_dev_unregister(struct extcon_dev *edev) { }
+
+static inline int devm_extcon_dev_register(struct device *dev,
+ struct extcon_dev *edev)
+{
+ return -EINVAL;
+}
+
+static inline void devm_extcon_dev_unregister(struct device *dev,
+ struct extcon_dev *edev) { }
+
+static inline struct extcon_dev *extcon_dev_allocate(const char **cables)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void extcon_dev_free(struct extcon_dev *edev) { }
+
+static inline struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
+ const char **cables)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void devm_extcon_dev_free(struct extcon_dev *edev) { }
+
+static inline u32 extcon_get_state(struct extcon_dev *edev)
+{
+ return 0;
+}
+
+static inline int extcon_set_state(struct extcon_dev *edev, u32 state)
+{
+ return 0;
+}
+
+static inline int extcon_update_state(struct extcon_dev *edev, u32 mask,
+ u32 state)
+{
+ return 0;
+}
+
+static inline int extcon_find_cable_index(struct extcon_dev *edev,
+ const char *cable_name)
+{
+ return 0;
+}
+
+static inline int extcon_get_cable_state_(struct extcon_dev *edev,
+ int cable_index)
+{
+ return 0;
+}
+
+static inline int extcon_set_cable_state_(struct extcon_dev *edev,
+ int cable_index, bool cable_state)
+{
+ return 0;
+}
+
+static inline int extcon_get_cable_state(struct extcon_dev *edev,
+ const char *cable_name)
+{
+ return 0;
+}
+
+static inline int extcon_set_cable_state(struct extcon_dev *edev,
+ const char *cable_name, int state)
+{
+ return 0;
+}
+
+static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
+{
+ return NULL;
+}
+
+static inline int extcon_register_notifier(struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int extcon_unregister_notifier(struct extcon_dev *edev,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj,
+ const char *extcon_name,
+ const char *cable_name,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int extcon_unregister_interest(struct extcon_specific_cable_nb
+ *obj)
+{
+ return 0;
+}
+
+static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
+ int index)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif /* CONFIG_EXTCON */
+#endif /* __LINUX_EXTCON_H__ */
diff --git a/include/linux/extcon/extcon-adc-jack.h b/include/linux/extcon/extcon-adc-jack.h
new file mode 100644
index 000000000..9ca958c4e
--- /dev/null
+++ b/include/linux/extcon/extcon-adc-jack.h
@@ -0,0 +1,71 @@
+/*
+ * include/linux/extcon/extcon-adc-jack.h
+ *
+ * Analog Jack extcon driver with ADC-based detection capability.
+ *
+ * Copyright (C) 2012 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _EXTCON_ADC_JACK_H_
+#define _EXTCON_ADC_JACK_H_ __FILE__
+
+#include <linux/module.h>
+#include <linux/extcon.h>
+
+/**
+ * struct adc_jack_cond - condition to use an extcon state
+ * @state: the corresponding extcon state (if 0, this struct
+ * denotes the last adc_jack_cond element among the array)
+ * @min_adc: min adc value for this condition
+ * @max_adc: max adc value for this condition
+ *
+ * For example, if { .state = 0x3, .min_adc = 100, .max_adc = 200}, it means
+ * that if ADC value is between (inclusive) 100 and 200, than the cable 0 and
+ * 1 are attached (1<<0 | 1<<1 == 0x3)
+ *
+ * Note that you don't need to describe condition for "no cable attached"
+ * because when no adc_jack_cond is met, state = 0 is automatically chosen.
+ */
+struct adc_jack_cond {
+ u32 state; /* extcon state value. 0 if invalid */
+ u32 min_adc;
+ u32 max_adc;
+};
+
+/**
+ * struct adc_jack_pdata - platform data for adc jack device.
+ * @name: name of the extcon device. If null, "adc-jack" is used.
+ * @consumer_channel: Unique name to identify the channel on the consumer
+ * side. This typically describes the channels used within
+ * the consumer. E.g. 'battery_voltage'
+ * @cable_names: array of cable names ending with null.
+ * @adc_contitions: array of struct adc_jack_cond conditions ending
+ * with .state = 0 entry. This describes how to decode
+ * adc values into extcon state.
+ * @irq_flags: irq flags used for the @irq
+ * @handling_delay_ms: in some devices, we need to read ADC value some
+ * milli-seconds after the interrupt occurs. You may
+ * describe such delays with @handling_delay_ms, which
+ * is rounded-off by jiffies.
+ */
+struct adc_jack_pdata {
+ const char *name;
+ const char *consumer_channel;
+
+ /* The last entry should be NULL */
+ const char **cable_names;
+
+ /* The last entry's state should be 0 */
+ struct adc_jack_cond *adc_conditions;
+
+ unsigned long irq_flags;
+ unsigned long handling_delay_ms; /* in ms */
+};
+
+#endif /* _EXTCON_ADC_JACK_H */
diff --git a/include/linux/extcon/extcon-gpio.h b/include/linux/extcon/extcon-gpio.h
new file mode 100644
index 000000000..0b17ad43f
--- /dev/null
+++ b/include/linux/extcon/extcon-gpio.h
@@ -0,0 +1,59 @@
+/*
+ * External connector (extcon) class generic GPIO driver
+ *
+ * Copyright (C) 2012 Samsung Electronics
+ * Author: MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * based on switch class driver
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+#ifndef __EXTCON_GPIO_H__
+#define __EXTCON_GPIO_H__ __FILE__
+
+#include <linux/extcon.h>
+
+/**
+ * struct gpio_extcon_platform_data - A simple GPIO-controlled extcon device.
+ * @name: The name of this GPIO extcon device.
+ * @gpio: Corresponding GPIO.
+ * @gpio_active_low: Boolean describing whether gpio active state is 1 or 0
+ * If true, low state of gpio means active.
+ * If false, high state of gpio means active.
+ * @debounce: Debounce time for GPIO IRQ in ms.
+ * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW).
+ * @state_on: print_state is overriden with state_on if attached.
+ * If NULL, default method of extcon class is used.
+ * @state_off: print_state is overriden with state_off if detached.
+ * If NUll, default method of extcon class is used.
+ * @check_on_resume: Boolean describing whether to check the state of gpio
+ * while resuming from sleep.
+ *
+ * Note that in order for state_on or state_off to be valid, both state_on
+ * and state_off should be not NULL. If at least one of them is NULL,
+ * the print_state is not overriden.
+ */
+struct gpio_extcon_platform_data {
+ const char *name;
+ unsigned gpio;
+ bool gpio_active_low;
+ unsigned long debounce;
+ unsigned long irq_flags;
+
+ /* if NULL, "0" or "1" will be printed */
+ const char *state_on;
+ const char *state_off;
+ bool check_on_resume;
+};
+
+#endif /* __EXTCON_GPIO_H__ */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
new file mode 100644
index 000000000..591f8c3ef
--- /dev/null
+++ b/include/linux/f2fs_fs.h
@@ -0,0 +1,476 @@
+/**
+ * include/linux/f2fs_fs.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _LINUX_F2FS_FS_H
+#define _LINUX_F2FS_FS_H
+
+#include <linux/pagemap.h>
+#include <linux/types.h>
+
+#define F2FS_SUPER_OFFSET 1024 /* byte-size offset */
+#define F2FS_MIN_LOG_SECTOR_SIZE 9 /* 9 bits for 512 bytes */
+#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */
+#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */
+#define F2FS_BLKSIZE 4096 /* support only 4KB block */
+#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
+#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
+#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE)
+
+#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
+#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
+
+#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS)
+#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS)
+
+/* 0, 1(node nid), 2(meta nid) are reserved node id */
+#define F2FS_RESERVED_NODE_NUM 3
+
+#define F2FS_ROOT_INO(sbi) (sbi->root_ino_num)
+#define F2FS_NODE_INO(sbi) (sbi->node_ino_num)
+#define F2FS_META_INO(sbi) (sbi->meta_ino_num)
+
+/* This flag is used by node and meta inodes, and by recovery */
+#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO)
+#define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM)
+
+/*
+ * For further optimization on multi-head logs, on-disk layout supports maximum
+ * 16 logs by default. The number, 16, is expected to cover all the cases
+ * enoughly. The implementaion currently uses no more than 6 logs.
+ * Half the logs are used for nodes, and the other half are used for data.
+ */
+#define MAX_ACTIVE_LOGS 16
+#define MAX_ACTIVE_NODE_LOGS 8
+#define MAX_ACTIVE_DATA_LOGS 8
+
+/*
+ * For superblock
+ */
+struct f2fs_super_block {
+ __le32 magic; /* Magic Number */
+ __le16 major_ver; /* Major Version */
+ __le16 minor_ver; /* Minor Version */
+ __le32 log_sectorsize; /* log2 sector size in bytes */
+ __le32 log_sectors_per_block; /* log2 # of sectors per block */
+ __le32 log_blocksize; /* log2 block size in bytes */
+ __le32 log_blocks_per_seg; /* log2 # of blocks per segment */
+ __le32 segs_per_sec; /* # of segments per section */
+ __le32 secs_per_zone; /* # of sections per zone */
+ __le32 checksum_offset; /* checksum offset inside super block */
+ __le64 block_count; /* total # of user blocks */
+ __le32 section_count; /* total # of sections */
+ __le32 segment_count; /* total # of segments */
+ __le32 segment_count_ckpt; /* # of segments for checkpoint */
+ __le32 segment_count_sit; /* # of segments for SIT */
+ __le32 segment_count_nat; /* # of segments for NAT */
+ __le32 segment_count_ssa; /* # of segments for SSA */
+ __le32 segment_count_main; /* # of segments for main area */
+ __le32 segment0_blkaddr; /* start block address of segment 0 */
+ __le32 cp_blkaddr; /* start block address of checkpoint */
+ __le32 sit_blkaddr; /* start block address of SIT */
+ __le32 nat_blkaddr; /* start block address of NAT */
+ __le32 ssa_blkaddr; /* start block address of SSA */
+ __le32 main_blkaddr; /* start block address of main area */
+ __le32 root_ino; /* root inode number */
+ __le32 node_ino; /* node inode number */
+ __le32 meta_ino; /* meta inode number */
+ __u8 uuid[16]; /* 128-bit uuid for volume */
+ __le16 volume_name[512]; /* volume name */
+ __le32 extension_count; /* # of extensions below */
+ __u8 extension_list[F2FS_MAX_EXTENSION][8]; /* extension array */
+ __le32 cp_payload;
+} __packed;
+
+/*
+ * For checkpoint
+ */
+#define CP_FASTBOOT_FLAG 0x00000020
+#define CP_FSCK_FLAG 0x00000010
+#define CP_ERROR_FLAG 0x00000008
+#define CP_COMPACT_SUM_FLAG 0x00000004
+#define CP_ORPHAN_PRESENT_FLAG 0x00000002
+#define CP_UMOUNT_FLAG 0x00000001
+
+#define F2FS_CP_PACKS 2 /* # of checkpoint packs */
+
+struct f2fs_checkpoint {
+ __le64 checkpoint_ver; /* checkpoint block version number */
+ __le64 user_block_count; /* # of user blocks */
+ __le64 valid_block_count; /* # of valid blocks in main area */
+ __le32 rsvd_segment_count; /* # of reserved segments for gc */
+ __le32 overprov_segment_count; /* # of overprovision segments */
+ __le32 free_segment_count; /* # of free segments in main area */
+
+ /* information of current node segments */
+ __le32 cur_node_segno[MAX_ACTIVE_NODE_LOGS];
+ __le16 cur_node_blkoff[MAX_ACTIVE_NODE_LOGS];
+ /* information of current data segments */
+ __le32 cur_data_segno[MAX_ACTIVE_DATA_LOGS];
+ __le16 cur_data_blkoff[MAX_ACTIVE_DATA_LOGS];
+ __le32 ckpt_flags; /* Flags : umount and journal_present */
+ __le32 cp_pack_total_block_count; /* total # of one cp pack */
+ __le32 cp_pack_start_sum; /* start block number of data summary */
+ __le32 valid_node_count; /* Total number of valid nodes */
+ __le32 valid_inode_count; /* Total number of valid inodes */
+ __le32 next_free_nid; /* Next free node number */
+ __le32 sit_ver_bitmap_bytesize; /* Default value 64 */
+ __le32 nat_ver_bitmap_bytesize; /* Default value 256 */
+ __le32 checksum_offset; /* checksum offset inside cp block */
+ __le64 elapsed_time; /* mounted time */
+ /* allocation type of current segment */
+ unsigned char alloc_type[MAX_ACTIVE_LOGS];
+
+ /* SIT and NAT version bitmap */
+ unsigned char sit_nat_version_bitmap[1];
+} __packed;
+
+/*
+ * For orphan inode management
+ */
+#define F2FS_ORPHANS_PER_BLOCK 1020
+
+#define GET_ORPHAN_BLOCKS(n) ((n + F2FS_ORPHANS_PER_BLOCK - 1) / \
+ F2FS_ORPHANS_PER_BLOCK)
+
+struct f2fs_orphan_block {
+ __le32 ino[F2FS_ORPHANS_PER_BLOCK]; /* inode numbers */
+ __le32 reserved; /* reserved */
+ __le16 blk_addr; /* block index in current CP */
+ __le16 blk_count; /* Number of orphan inode blocks in CP */
+ __le32 entry_count; /* Total number of orphan nodes in current CP */
+ __le32 check_sum; /* CRC32 for orphan inode block */
+} __packed;
+
+/*
+ * For NODE structure
+ */
+struct f2fs_extent {
+ __le32 fofs; /* start file offset of the extent */
+ __le32 blk; /* start block address of the extent */
+ __le32 len; /* lengh of the extent */
+} __packed;
+
+#define F2FS_NAME_LEN 255
+#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
+#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
+#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
+#define ADDRS_PER_INODE(fi) addrs_per_inode(fi)
+#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
+#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
+
+#define ADDRS_PER_PAGE(page, fi) \
+ (IS_INODE(page) ? ADDRS_PER_INODE(fi) : ADDRS_PER_BLOCK)
+
+#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1)
+#define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2)
+#define NODE_IND1_BLOCK (DEF_ADDRS_PER_INODE + 3)
+#define NODE_IND2_BLOCK (DEF_ADDRS_PER_INODE + 4)
+#define NODE_DIND_BLOCK (DEF_ADDRS_PER_INODE + 5)
+
+#define F2FS_INLINE_XATTR 0x01 /* file inline xattr flag */
+#define F2FS_INLINE_DATA 0x02 /* file inline data flag */
+#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */
+#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
+#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */
+
+#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
+ F2FS_INLINE_XATTR_ADDRS - 1))
+
+struct f2fs_inode {
+ __le16 i_mode; /* file mode */
+ __u8 i_advise; /* file hints */
+ __u8 i_inline; /* file inline flags */
+ __le32 i_uid; /* user ID */
+ __le32 i_gid; /* group ID */
+ __le32 i_links; /* links count */
+ __le64 i_size; /* file size in bytes */
+ __le64 i_blocks; /* file size in blocks */
+ __le64 i_atime; /* access time */
+ __le64 i_ctime; /* change time */
+ __le64 i_mtime; /* modification time */
+ __le32 i_atime_nsec; /* access time in nano scale */
+ __le32 i_ctime_nsec; /* change time in nano scale */
+ __le32 i_mtime_nsec; /* modification time in nano scale */
+ __le32 i_generation; /* file version (for NFS) */
+ __le32 i_current_depth; /* only for directory depth */
+ __le32 i_xattr_nid; /* nid to save xattr */
+ __le32 i_flags; /* file attributes */
+ __le32 i_pino; /* parent inode number */
+ __le32 i_namelen; /* file name length */
+ __u8 i_name[F2FS_NAME_LEN]; /* file name for SPOR */
+ __u8 i_dir_level; /* dentry_level for large dir */
+
+ struct f2fs_extent i_ext; /* caching a largest extent */
+
+ __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
+
+ __le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2),
+ double_indirect(1) node id */
+} __packed;
+
+struct direct_node {
+ __le32 addr[ADDRS_PER_BLOCK]; /* array of data block address */
+} __packed;
+
+struct indirect_node {
+ __le32 nid[NIDS_PER_BLOCK]; /* array of data block address */
+} __packed;
+
+enum {
+ COLD_BIT_SHIFT = 0,
+ FSYNC_BIT_SHIFT,
+ DENT_BIT_SHIFT,
+ OFFSET_BIT_SHIFT
+};
+
+#define OFFSET_BIT_MASK (0x07) /* (0x01 << OFFSET_BIT_SHIFT) - 1 */
+
+struct node_footer {
+ __le32 nid; /* node id */
+ __le32 ino; /* inode nunmber */
+ __le32 flag; /* include cold/fsync/dentry marks and offset */
+ __le64 cp_ver; /* checkpoint version */
+ __le32 next_blkaddr; /* next node page block address */
+} __packed;
+
+struct f2fs_node {
+ /* can be one of three types: inode, direct, and indirect types */
+ union {
+ struct f2fs_inode i;
+ struct direct_node dn;
+ struct indirect_node in;
+ };
+ struct node_footer footer;
+} __packed;
+
+/*
+ * For NAT entries
+ */
+#define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry))
+
+struct f2fs_nat_entry {
+ __u8 version; /* latest version of cached nat entry */
+ __le32 ino; /* inode number */
+ __le32 block_addr; /* block address */
+} __packed;
+
+struct f2fs_nat_block {
+ struct f2fs_nat_entry entries[NAT_ENTRY_PER_BLOCK];
+} __packed;
+
+/*
+ * For SIT entries
+ *
+ * Each segment is 2MB in size by default so that a bitmap for validity of
+ * there-in blocks should occupy 64 bytes, 512 bits.
+ * Not allow to change this.
+ */
+#define SIT_VBLOCK_MAP_SIZE 64
+#define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry))
+
+/*
+ * Note that f2fs_sit_entry->vblocks has the following bit-field information.
+ * [15:10] : allocation type such as CURSEG_XXXX_TYPE
+ * [9:0] : valid block count
+ */
+#define SIT_VBLOCKS_SHIFT 10
+#define SIT_VBLOCKS_MASK ((1 << SIT_VBLOCKS_SHIFT) - 1)
+#define GET_SIT_VBLOCKS(raw_sit) \
+ (le16_to_cpu((raw_sit)->vblocks) & SIT_VBLOCKS_MASK)
+#define GET_SIT_TYPE(raw_sit) \
+ ((le16_to_cpu((raw_sit)->vblocks) & ~SIT_VBLOCKS_MASK) \
+ >> SIT_VBLOCKS_SHIFT)
+
+struct f2fs_sit_entry {
+ __le16 vblocks; /* reference above */
+ __u8 valid_map[SIT_VBLOCK_MAP_SIZE]; /* bitmap for valid blocks */
+ __le64 mtime; /* segment age for cleaning */
+} __packed;
+
+struct f2fs_sit_block {
+ struct f2fs_sit_entry entries[SIT_ENTRY_PER_BLOCK];
+} __packed;
+
+/*
+ * For segment summary
+ *
+ * One summary block contains exactly 512 summary entries, which represents
+ * exactly 2MB segment by default. Not allow to change the basic units.
+ *
+ * NOTE: For initializing fields, you must use set_summary
+ *
+ * - If data page, nid represents dnode's nid
+ * - If node page, nid represents the node page's nid.
+ *
+ * The ofs_in_node is used by only data page. It represents offset
+ * from node's page's beginning to get a data block address.
+ * ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node)
+ */
+#define ENTRIES_IN_SUM 512
+#define SUMMARY_SIZE (7) /* sizeof(struct summary) */
+#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */
+#define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM)
+
+/* a summary entry for a 4KB-sized block in a segment */
+struct f2fs_summary {
+ __le32 nid; /* parent node id */
+ union {
+ __u8 reserved[3];
+ struct {
+ __u8 version; /* node version number */
+ __le16 ofs_in_node; /* block index in parent node */
+ } __packed;
+ };
+} __packed;
+
+/* summary block type, node or data, is stored to the summary_footer */
+#define SUM_TYPE_NODE (1)
+#define SUM_TYPE_DATA (0)
+
+struct summary_footer {
+ unsigned char entry_type; /* SUM_TYPE_XXX */
+ __u32 check_sum; /* summary checksum */
+} __packed;
+
+#define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\
+ SUM_ENTRY_SIZE)
+#define NAT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\
+ sizeof(struct nat_journal_entry))
+#define NAT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\
+ sizeof(struct nat_journal_entry))
+#define SIT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\
+ sizeof(struct sit_journal_entry))
+#define SIT_JOURNAL_RESERVED ((SUM_JOURNAL_SIZE - 2) %\
+ sizeof(struct sit_journal_entry))
+/*
+ * frequently updated NAT/SIT entries can be stored in the spare area in
+ * summary blocks
+ */
+enum {
+ NAT_JOURNAL = 0,
+ SIT_JOURNAL
+};
+
+struct nat_journal_entry {
+ __le32 nid;
+ struct f2fs_nat_entry ne;
+} __packed;
+
+struct nat_journal {
+ struct nat_journal_entry entries[NAT_JOURNAL_ENTRIES];
+ __u8 reserved[NAT_JOURNAL_RESERVED];
+} __packed;
+
+struct sit_journal_entry {
+ __le32 segno;
+ struct f2fs_sit_entry se;
+} __packed;
+
+struct sit_journal {
+ struct sit_journal_entry entries[SIT_JOURNAL_ENTRIES];
+ __u8 reserved[SIT_JOURNAL_RESERVED];
+} __packed;
+
+/* 4KB-sized summary block structure */
+struct f2fs_summary_block {
+ struct f2fs_summary entries[ENTRIES_IN_SUM];
+ union {
+ __le16 n_nats;
+ __le16 n_sits;
+ };
+ /* spare area is used by NAT or SIT journals */
+ union {
+ struct nat_journal nat_j;
+ struct sit_journal sit_j;
+ };
+ struct summary_footer footer;
+} __packed;
+
+/*
+ * For directory operations
+ */
+#define F2FS_DOT_HASH 0
+#define F2FS_DDOT_HASH F2FS_DOT_HASH
+#define F2FS_MAX_HASH (~((0x3ULL) << 62))
+#define F2FS_HASH_COL_BIT ((0x1ULL) << 63)
+
+typedef __le32 f2fs_hash_t;
+
+/* One directory entry slot covers 8bytes-long file name */
+#define F2FS_SLOT_LEN 8
+#define F2FS_SLOT_LEN_BITS 3
+
+#define GET_DENTRY_SLOTS(x) ((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
+
+/* the number of dentry in a block */
+#define NR_DENTRY_IN_BLOCK 214
+
+/* MAX level for dir lookup */
+#define MAX_DIR_HASH_DEPTH 63
+
+/* MAX buckets in one level of dir */
+#define MAX_DIR_BUCKETS (1 << ((MAX_DIR_HASH_DEPTH / 2) - 1))
+
+#define SIZE_OF_DIR_ENTRY 11 /* by byte */
+#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
+ BITS_PER_BYTE)
+#define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \
+ F2FS_SLOT_LEN) * \
+ NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP))
+
+/* One directory entry slot representing F2FS_SLOT_LEN-sized file name */
+struct f2fs_dir_entry {
+ __le32 hash_code; /* hash code of file name */
+ __le32 ino; /* inode number */
+ __le16 name_len; /* lengh of file name */
+ __u8 file_type; /* file type */
+} __packed;
+
+/* 4KB-sized directory entry block */
+struct f2fs_dentry_block {
+ /* validity bitmap for directory entries in each block */
+ __u8 dentry_bitmap[SIZE_OF_DENTRY_BITMAP];
+ __u8 reserved[SIZE_OF_RESERVED];
+ struct f2fs_dir_entry dentry[NR_DENTRY_IN_BLOCK];
+ __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN];
+} __packed;
+
+/* for inline dir */
+#define NR_INLINE_DENTRY (MAX_INLINE_DATA * BITS_PER_BYTE / \
+ ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
+ BITS_PER_BYTE + 1))
+#define INLINE_DENTRY_BITMAP_SIZE ((NR_INLINE_DENTRY + \
+ BITS_PER_BYTE - 1) / BITS_PER_BYTE)
+#define INLINE_RESERVED_SIZE (MAX_INLINE_DATA - \
+ ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
+ NR_INLINE_DENTRY + INLINE_DENTRY_BITMAP_SIZE))
+
+/* inline directory entry structure */
+struct f2fs_inline_dentry {
+ __u8 dentry_bitmap[INLINE_DENTRY_BITMAP_SIZE];
+ __u8 reserved[INLINE_RESERVED_SIZE];
+ struct f2fs_dir_entry dentry[NR_INLINE_DENTRY];
+ __u8 filename[NR_INLINE_DENTRY][F2FS_SLOT_LEN];
+} __packed;
+
+/* file types used in inode_info->flags */
+enum {
+ F2FS_FT_UNKNOWN,
+ F2FS_FT_REG_FILE,
+ F2FS_FT_DIR,
+ F2FS_FT_CHRDEV,
+ F2FS_FT_BLKDEV,
+ F2FS_FT_FIFO,
+ F2FS_FT_SOCK,
+ F2FS_FT_SYMLINK,
+ F2FS_FT_MAX
+};
+
+#endif /* _LINUX_F2FS_FS_H */
diff --git a/include/linux/f75375s.h b/include/linux/f75375s.h
new file mode 100644
index 000000000..e99e22500
--- /dev/null
+++ b/include/linux/f75375s.h
@@ -0,0 +1,21 @@
+/*
+ * f75375s.h - platform data structure for f75375s sensor
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007, Riku Voipio <riku.voipio@iki.fi>
+ */
+
+#ifndef __LINUX_F75375S_H
+#define __LINUX_F75375S_H
+
+/* We want to set fans spinning on systems where there is no
+ * BIOS to do that for us */
+struct f75375s_platform_data {
+ u8 pwm[2];
+ u8 pwm_enable[2];
+};
+
+#endif /* __LINUX_F75375S_H */
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
new file mode 100644
index 000000000..996111000
--- /dev/null
+++ b/include/linux/falloc.h
@@ -0,0 +1,30 @@
+#ifndef _FALLOC_H_
+#define _FALLOC_H_
+
+#include <uapi/linux/falloc.h>
+
+
+/*
+ * Space reservation ioctls and argument structure
+ * are designed to be compatible with the legacy XFS ioctls.
+ */
+struct space_resv {
+ __s16 l_type;
+ __s16 l_whence;
+ __s64 l_start;
+ __s64 l_len; /* len == 0 means until end of file */
+ __s32 l_sysid;
+ __u32 l_pid;
+ __s32 l_pad[4]; /* reserved area */
+};
+
+#define FS_IOC_RESVSP _IOW('X', 40, struct space_resv)
+#define FS_IOC_RESVSP64 _IOW('X', 42, struct space_resv)
+
+#define FALLOC_FL_SUPPORTED_MASK (FALLOC_FL_KEEP_SIZE | \
+ FALLOC_FL_PUNCH_HOLE | \
+ FALLOC_FL_COLLAPSE_RANGE | \
+ FALLOC_FL_ZERO_RANGE | \
+ FALLOC_FL_INSERT_RANGE)
+
+#endif /* _FALLOC_H_ */
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
new file mode 100644
index 000000000..cef93ddcc
--- /dev/null
+++ b/include/linux/fanotify.h
@@ -0,0 +1,8 @@
+#ifndef _LINUX_FANOTIFY_H
+#define _LINUX_FANOTIFY_H
+
+#include <uapi/linux/fanotify.h>
+
+/* not valid from userspace, only kernel internal */
+#define FAN_MARK_ONDIR 0x00000100
+#endif /* _LINUX_FANOTIFY_H */
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
new file mode 100644
index 000000000..798fad9e4
--- /dev/null
+++ b/include/linux/fault-inject.h
@@ -0,0 +1,74 @@
+#ifndef _LINUX_FAULT_INJECT_H
+#define _LINUX_FAULT_INJECT_H
+
+#ifdef CONFIG_FAULT_INJECTION
+
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/ratelimit.h>
+#include <linux/atomic.h>
+
+/*
+ * For explanation of the elements of this struct, see
+ * Documentation/fault-injection/fault-injection.txt
+ */
+struct fault_attr {
+ unsigned long probability;
+ unsigned long interval;
+ atomic_t times;
+ atomic_t space;
+ unsigned long verbose;
+ u32 task_filter;
+ unsigned long stacktrace_depth;
+ unsigned long require_start;
+ unsigned long require_end;
+ unsigned long reject_start;
+ unsigned long reject_end;
+
+ unsigned long count;
+ struct ratelimit_state ratelimit_state;
+ struct dentry *dname;
+};
+
+#define FAULT_ATTR_INITIALIZER { \
+ .interval = 1, \
+ .times = ATOMIC_INIT(1), \
+ .require_end = ULONG_MAX, \
+ .stacktrace_depth = 32, \
+ .ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \
+ .verbose = 2, \
+ .dname = NULL, \
+ }
+
+#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
+int setup_fault_attr(struct fault_attr *attr, char *str);
+bool should_fail(struct fault_attr *attr, ssize_t size);
+
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+
+struct dentry *fault_create_debugfs_attr(const char *name,
+ struct dentry *parent, struct fault_attr *attr);
+
+#else /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+
+static inline struct dentry *fault_create_debugfs_attr(const char *name,
+ struct dentry *parent, struct fault_attr *attr)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+
+#endif /* CONFIG_FAULT_INJECTION */
+
+#ifdef CONFIG_FAILSLAB
+extern bool should_failslab(size_t size, gfp_t gfpflags, unsigned long flags);
+#else
+static inline bool should_failslab(size_t size, gfp_t gfpflags,
+ unsigned long flags)
+{
+ return false;
+}
+#endif /* CONFIG_FAILSLAB */
+
+#endif /* _LINUX_FAULT_INJECT_H */
diff --git a/include/linux/fb.h b/include/linux/fb.h
new file mode 100644
index 000000000..043f3283b
--- /dev/null
+++ b/include/linux/fb.h
@@ -0,0 +1,818 @@
+#ifndef _LINUX_FB_H
+#define _LINUX_FB_H
+
+#include <linux/kgdb.h>
+#include <uapi/linux/fb.h>
+
+#define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor_user)
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/workqueue.h>
+#include <linux/notifier.h>
+#include <linux/list.h>
+#include <linux/backlight.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+struct vm_area_struct;
+struct fb_info;
+struct device;
+struct file;
+struct videomode;
+struct device_node;
+
+/* Definitions below are used in the parsed monitor specs */
+#define FB_DPMS_ACTIVE_OFF 1
+#define FB_DPMS_SUSPEND 2
+#define FB_DPMS_STANDBY 4
+
+#define FB_DISP_DDI 1
+#define FB_DISP_ANA_700_300 2
+#define FB_DISP_ANA_714_286 4
+#define FB_DISP_ANA_1000_400 8
+#define FB_DISP_ANA_700_000 16
+
+#define FB_DISP_MONO 32
+#define FB_DISP_RGB 64
+#define FB_DISP_MULTI 128
+#define FB_DISP_UNKNOWN 256
+
+#define FB_SIGNAL_NONE 0
+#define FB_SIGNAL_BLANK_BLANK 1
+#define FB_SIGNAL_SEPARATE 2
+#define FB_SIGNAL_COMPOSITE 4
+#define FB_SIGNAL_SYNC_ON_GREEN 8
+#define FB_SIGNAL_SERRATION_ON 16
+
+#define FB_MISC_PRIM_COLOR 1
+#define FB_MISC_1ST_DETAIL 2 /* First Detailed Timing is preferred */
+#define FB_MISC_HDMI 4
+struct fb_chroma {
+ __u32 redx; /* in fraction of 1024 */
+ __u32 greenx;
+ __u32 bluex;
+ __u32 whitex;
+ __u32 redy;
+ __u32 greeny;
+ __u32 bluey;
+ __u32 whitey;
+};
+
+struct fb_monspecs {
+ struct fb_chroma chroma;
+ struct fb_videomode *modedb; /* mode database */
+ __u8 manufacturer[4]; /* Manufacturer */
+ __u8 monitor[14]; /* Monitor String */
+ __u8 serial_no[14]; /* Serial Number */
+ __u8 ascii[14]; /* ? */
+ __u32 modedb_len; /* mode database length */
+ __u32 model; /* Monitor Model */
+ __u32 serial; /* Serial Number - Integer */
+ __u32 year; /* Year manufactured */
+ __u32 week; /* Week Manufactured */
+ __u32 hfmin; /* hfreq lower limit (Hz) */
+ __u32 hfmax; /* hfreq upper limit (Hz) */
+ __u32 dclkmin; /* pixelclock lower limit (Hz) */
+ __u32 dclkmax; /* pixelclock upper limit (Hz) */
+ __u16 input; /* display type - see FB_DISP_* */
+ __u16 dpms; /* DPMS support - see FB_DPMS_ */
+ __u16 signal; /* Signal Type - see FB_SIGNAL_* */
+ __u16 vfmin; /* vfreq lower limit (Hz) */
+ __u16 vfmax; /* vfreq upper limit (Hz) */
+ __u16 gamma; /* Gamma - in fractions of 100 */
+ __u16 gtf : 1; /* supports GTF */
+ __u16 misc; /* Misc flags - see FB_MISC_* */
+ __u8 version; /* EDID version... */
+ __u8 revision; /* ...and revision */
+ __u8 max_x; /* Maximum horizontal size (cm) */
+ __u8 max_y; /* Maximum vertical size (cm) */
+};
+
+struct fb_cmap_user {
+ __u32 start; /* First entry */
+ __u32 len; /* Number of entries */
+ __u16 __user *red; /* Red values */
+ __u16 __user *green;
+ __u16 __user *blue;
+ __u16 __user *transp; /* transparency, can be NULL */
+};
+
+struct fb_image_user {
+ __u32 dx; /* Where to place image */
+ __u32 dy;
+ __u32 width; /* Size of image */
+ __u32 height;
+ __u32 fg_color; /* Only used when a mono bitmap */
+ __u32 bg_color;
+ __u8 depth; /* Depth of the image */
+ const char __user *data; /* Pointer to image data */
+ struct fb_cmap_user cmap; /* color map info */
+};
+
+struct fb_cursor_user {
+ __u16 set; /* what to set */
+ __u16 enable; /* cursor on/off */
+ __u16 rop; /* bitop operation */
+ const char __user *mask; /* cursor mask bits */
+ struct fbcurpos hot; /* cursor hot spot */
+ struct fb_image_user image; /* Cursor image */
+};
+
+/*
+ * Register/unregister for framebuffer events
+ */
+
+/* The resolution of the passed in fb_info about to change */
+#define FB_EVENT_MODE_CHANGE 0x01
+/* The display on this fb_info is beeing suspended, no access to the
+ * framebuffer is allowed any more after that call returns
+ */
+#define FB_EVENT_SUSPEND 0x02
+/* The display on this fb_info was resumed, you can restore the display
+ * if you own it
+ */
+#define FB_EVENT_RESUME 0x03
+/* An entry from the modelist was removed */
+#define FB_EVENT_MODE_DELETE 0x04
+/* A driver registered itself */
+#define FB_EVENT_FB_REGISTERED 0x05
+/* A driver unregistered itself */
+#define FB_EVENT_FB_UNREGISTERED 0x06
+/* CONSOLE-SPECIFIC: get console to framebuffer mapping */
+#define FB_EVENT_GET_CONSOLE_MAP 0x07
+/* CONSOLE-SPECIFIC: set console to framebuffer mapping */
+#define FB_EVENT_SET_CONSOLE_MAP 0x08
+/* A hardware display blank change occurred */
+#define FB_EVENT_BLANK 0x09
+/* Private modelist is to be replaced */
+#define FB_EVENT_NEW_MODELIST 0x0A
+/* The resolution of the passed in fb_info about to change and
+ all vc's should be changed */
+#define FB_EVENT_MODE_CHANGE_ALL 0x0B
+/* A software display blank change occurred */
+#define FB_EVENT_CONBLANK 0x0C
+/* Get drawing requirements */
+#define FB_EVENT_GET_REQ 0x0D
+/* Unbind from the console if possible */
+#define FB_EVENT_FB_UNBIND 0x0E
+/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga switcheroo */
+#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F
+/* A hardware display blank early change occured */
+#define FB_EARLY_EVENT_BLANK 0x10
+/* A hardware display blank revert early change occured */
+#define FB_R_EARLY_EVENT_BLANK 0x11
+
+struct fb_event {
+ struct fb_info *info;
+ void *data;
+};
+
+struct fb_blit_caps {
+ u32 x;
+ u32 y;
+ u32 len;
+ u32 flags;
+};
+
+extern int fb_register_client(struct notifier_block *nb);
+extern int fb_unregister_client(struct notifier_block *nb);
+extern int fb_notifier_call_chain(unsigned long val, void *v);
+/*
+ * Pixmap structure definition
+ *
+ * The purpose of this structure is to translate data
+ * from the hardware independent format of fbdev to what
+ * format the hardware needs.
+ */
+
+#define FB_PIXMAP_DEFAULT 1 /* used internally by fbcon */
+#define FB_PIXMAP_SYSTEM 2 /* memory is in system RAM */
+#define FB_PIXMAP_IO 4 /* memory is iomapped */
+#define FB_PIXMAP_SYNC 256 /* set if GPU can DMA */
+
+struct fb_pixmap {
+ u8 *addr; /* pointer to memory */
+ u32 size; /* size of buffer in bytes */
+ u32 offset; /* current offset to buffer */
+ u32 buf_align; /* byte alignment of each bitmap */
+ u32 scan_align; /* alignment per scanline */
+ u32 access_align; /* alignment per read/write (bits) */
+ u32 flags; /* see FB_PIXMAP_* */
+ u32 blit_x; /* supported bit block dimensions (1-32)*/
+ u32 blit_y; /* Format: blit_x = 1 << (width - 1) */
+ /* blit_y = 1 << (height - 1) */
+ /* if 0, will be set to 0xffffffff (all)*/
+ /* access methods */
+ void (*writeio)(struct fb_info *info, void __iomem *dst, void *src, unsigned int size);
+ void (*readio) (struct fb_info *info, void *dst, void __iomem *src, unsigned int size);
+};
+
+#ifdef CONFIG_FB_DEFERRED_IO
+struct fb_deferred_io {
+ /* delay between mkwrite and deferred handler */
+ unsigned long delay;
+ struct mutex lock; /* mutex that protects the page list */
+ struct list_head pagelist; /* list of touched pages */
+ /* callback */
+ void (*first_io)(struct fb_info *info);
+ void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
+};
+#endif
+
+/*
+ * Frame buffer operations
+ *
+ * LOCKING NOTE: those functions must _ALL_ be called with the console
+ * semaphore held, this is the only suitable locking mechanism we have
+ * in 2.6. Some may be called at interrupt time at this point though.
+ *
+ * The exception to this is the debug related hooks. Putting the fb
+ * into a debug state (e.g. flipping to the kernel console) and restoring
+ * it must be done in a lock-free manner, so low level drivers should
+ * keep track of the initial console (if applicable) and may need to
+ * perform direct, unlocked hardware writes in these hooks.
+ */
+
+struct fb_ops {
+ /* open/release and usage marking */
+ struct module *owner;
+ int (*fb_open)(struct fb_info *info, int user);
+ int (*fb_release)(struct fb_info *info, int user);
+
+ /* For framebuffers with strange non linear layouts or that do not
+ * work with normal memory mapped access
+ */
+ ssize_t (*fb_read)(struct fb_info *info, char __user *buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*fb_write)(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos);
+
+ /* checks var and eventually tweaks it to something supported,
+ * DO NOT MODIFY PAR */
+ int (*fb_check_var)(struct fb_var_screeninfo *var, struct fb_info *info);
+
+ /* set the video mode according to info->var */
+ int (*fb_set_par)(struct fb_info *info);
+
+ /* set color register */
+ int (*fb_setcolreg)(unsigned regno, unsigned red, unsigned green,
+ unsigned blue, unsigned transp, struct fb_info *info);
+
+ /* set color registers in batch */
+ int (*fb_setcmap)(struct fb_cmap *cmap, struct fb_info *info);
+
+ /* blank display */
+ int (*fb_blank)(int blank, struct fb_info *info);
+
+ /* pan display */
+ int (*fb_pan_display)(struct fb_var_screeninfo *var, struct fb_info *info);
+
+ /* Draws a rectangle */
+ void (*fb_fillrect) (struct fb_info *info, const struct fb_fillrect *rect);
+ /* Copy data from area to another */
+ void (*fb_copyarea) (struct fb_info *info, const struct fb_copyarea *region);
+ /* Draws a image to the display */
+ void (*fb_imageblit) (struct fb_info *info, const struct fb_image *image);
+
+ /* Draws cursor */
+ int (*fb_cursor) (struct fb_info *info, struct fb_cursor *cursor);
+
+ /* Rotates the display */
+ void (*fb_rotate)(struct fb_info *info, int angle);
+
+ /* wait for blit idle, optional */
+ int (*fb_sync)(struct fb_info *info);
+
+ /* perform fb specific ioctl (optional) */
+ int (*fb_ioctl)(struct fb_info *info, unsigned int cmd,
+ unsigned long arg);
+
+ /* Handle 32bit compat ioctl (optional) */
+ int (*fb_compat_ioctl)(struct fb_info *info, unsigned cmd,
+ unsigned long arg);
+
+ /* perform fb specific mmap */
+ int (*fb_mmap)(struct fb_info *info, struct vm_area_struct *vma);
+
+ /* get capability given var */
+ void (*fb_get_caps)(struct fb_info *info, struct fb_blit_caps *caps,
+ struct fb_var_screeninfo *var);
+
+ /* teardown any resources to do with this framebuffer */
+ void (*fb_destroy)(struct fb_info *info);
+
+ /* called at KDB enter and leave time to prepare the console */
+ int (*fb_debug_enter)(struct fb_info *info);
+ int (*fb_debug_leave)(struct fb_info *info);
+};
+
+#ifdef CONFIG_FB_TILEBLITTING
+#define FB_TILE_CURSOR_NONE 0
+#define FB_TILE_CURSOR_UNDERLINE 1
+#define FB_TILE_CURSOR_LOWER_THIRD 2
+#define FB_TILE_CURSOR_LOWER_HALF 3
+#define FB_TILE_CURSOR_TWO_THIRDS 4
+#define FB_TILE_CURSOR_BLOCK 5
+
+struct fb_tilemap {
+ __u32 width; /* width of each tile in pixels */
+ __u32 height; /* height of each tile in scanlines */
+ __u32 depth; /* color depth of each tile */
+ __u32 length; /* number of tiles in the map */
+ const __u8 *data; /* actual tile map: a bitmap array, packed
+ to the nearest byte */
+};
+
+struct fb_tilerect {
+ __u32 sx; /* origin in the x-axis */
+ __u32 sy; /* origin in the y-axis */
+ __u32 width; /* number of tiles in the x-axis */
+ __u32 height; /* number of tiles in the y-axis */
+ __u32 index; /* what tile to use: index to tile map */
+ __u32 fg; /* foreground color */
+ __u32 bg; /* background color */
+ __u32 rop; /* raster operation */
+};
+
+struct fb_tilearea {
+ __u32 sx; /* source origin in the x-axis */
+ __u32 sy; /* source origin in the y-axis */
+ __u32 dx; /* destination origin in the x-axis */
+ __u32 dy; /* destination origin in the y-axis */
+ __u32 width; /* number of tiles in the x-axis */
+ __u32 height; /* number of tiles in the y-axis */
+};
+
+struct fb_tileblit {
+ __u32 sx; /* origin in the x-axis */
+ __u32 sy; /* origin in the y-axis */
+ __u32 width; /* number of tiles in the x-axis */
+ __u32 height; /* number of tiles in the y-axis */
+ __u32 fg; /* foreground color */
+ __u32 bg; /* background color */
+ __u32 length; /* number of tiles to draw */
+ __u32 *indices; /* array of indices to tile map */
+};
+
+struct fb_tilecursor {
+ __u32 sx; /* cursor position in the x-axis */
+ __u32 sy; /* cursor position in the y-axis */
+ __u32 mode; /* 0 = erase, 1 = draw */
+ __u32 shape; /* see FB_TILE_CURSOR_* */
+ __u32 fg; /* foreground color */
+ __u32 bg; /* background color */
+};
+
+struct fb_tile_ops {
+ /* set tile characteristics */
+ void (*fb_settile)(struct fb_info *info, struct fb_tilemap *map);
+
+ /* all dimensions from hereon are in terms of tiles */
+
+ /* move a rectangular region of tiles from one area to another*/
+ void (*fb_tilecopy)(struct fb_info *info, struct fb_tilearea *area);
+ /* fill a rectangular region with a tile */
+ void (*fb_tilefill)(struct fb_info *info, struct fb_tilerect *rect);
+ /* copy an array of tiles */
+ void (*fb_tileblit)(struct fb_info *info, struct fb_tileblit *blit);
+ /* cursor */
+ void (*fb_tilecursor)(struct fb_info *info,
+ struct fb_tilecursor *cursor);
+ /* get maximum length of the tile map */
+ int (*fb_get_tilemax)(struct fb_info *info);
+};
+#endif /* CONFIG_FB_TILEBLITTING */
+
+/* FBINFO_* = fb_info.flags bit flags */
+#define FBINFO_MODULE 0x0001 /* Low-level driver is a module */
+#define FBINFO_HWACCEL_DISABLED 0x0002
+ /* When FBINFO_HWACCEL_DISABLED is set:
+ * Hardware acceleration is turned off. Software implementations
+ * of required functions (copyarea(), fillrect(), and imageblit())
+ * takes over; acceleration engine should be in a quiescent state */
+
+/* hints */
+#define FBINFO_VIRTFB 0x0004 /* FB is System RAM, not device. */
+#define FBINFO_PARTIAL_PAN_OK 0x0040 /* otw use pan only for double-buffering */
+#define FBINFO_READS_FAST 0x0080 /* soft-copy faster than rendering */
+
+/* hardware supported ops */
+/* semantics: when a bit is set, it indicates that the operation is
+ * accelerated by hardware.
+ * required functions will still work even if the bit is not set.
+ * optional functions may not even exist if the flag bit is not set.
+ */
+#define FBINFO_HWACCEL_NONE 0x0000
+#define FBINFO_HWACCEL_COPYAREA 0x0100 /* required */
+#define FBINFO_HWACCEL_FILLRECT 0x0200 /* required */
+#define FBINFO_HWACCEL_IMAGEBLIT 0x0400 /* required */
+#define FBINFO_HWACCEL_ROTATE 0x0800 /* optional */
+#define FBINFO_HWACCEL_XPAN 0x1000 /* optional */
+#define FBINFO_HWACCEL_YPAN 0x2000 /* optional */
+#define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */
+
+#define FBINFO_MISC_USEREVENT 0x10000 /* event request
+ from userspace */
+#define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */
+
+/* A driver may set this flag to indicate that it does want a set_par to be
+ * called every time when fbcon_switch is executed. The advantage is that with
+ * this flag set you can really be sure that set_par is always called before
+ * any of the functions dependent on the correct hardware state or altering
+ * that state, even if you are using some broken X releases. The disadvantage
+ * is that it introduces unwanted delays to every console switch if set_par
+ * is slow. It is a good idea to try this flag in the drivers initialization
+ * code whenever there is a bug report related to switching between X and the
+ * framebuffer console.
+ */
+#define FBINFO_MISC_ALWAYS_SETPAR 0x40000
+
+/* where the fb is a firmware driver, and can be replaced with a proper one */
+#define FBINFO_MISC_FIRMWARE 0x80000
+/*
+ * Host and GPU endianness differ.
+ */
+#define FBINFO_FOREIGN_ENDIAN 0x100000
+/*
+ * Big endian math. This is the same flags as above, but with different
+ * meaning, it is set by the fb subsystem depending FOREIGN_ENDIAN flag
+ * and host endianness. Drivers should not use this flag.
+ */
+#define FBINFO_BE_MATH 0x100000
+
+/* report to the VT layer that this fb driver can accept forced console
+ output like oopses */
+#define FBINFO_CAN_FORCE_OUTPUT 0x200000
+
+struct fb_info {
+ atomic_t count;
+ int node;
+ int flags;
+ struct mutex lock; /* Lock for open/release/ioctl funcs */
+ struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */
+ struct fb_var_screeninfo var; /* Current var */
+ struct fb_fix_screeninfo fix; /* Current fix */
+ struct fb_monspecs monspecs; /* Current Monitor specs */
+ struct work_struct queue; /* Framebuffer event queue */
+ struct fb_pixmap pixmap; /* Image hardware mapper */
+ struct fb_pixmap sprite; /* Cursor hardware mapper */
+ struct fb_cmap cmap; /* Current cmap */
+ struct list_head modelist; /* mode list */
+ struct fb_videomode *mode; /* current mode */
+
+#ifdef CONFIG_FB_BACKLIGHT
+ /* assigned backlight device */
+ /* set before framebuffer registration,
+ remove after unregister */
+ struct backlight_device *bl_dev;
+
+ /* Backlight level curve */
+ struct mutex bl_curve_mutex;
+ u8 bl_curve[FB_BACKLIGHT_LEVELS];
+#endif
+#ifdef CONFIG_FB_DEFERRED_IO
+ struct delayed_work deferred_work;
+ struct fb_deferred_io *fbdefio;
+#endif
+
+ struct fb_ops *fbops;
+ struct device *device; /* This is the parent */
+ struct device *dev; /* This is this fb device */
+ int class_flag; /* private sysfs flags */
+#ifdef CONFIG_FB_TILEBLITTING
+ struct fb_tile_ops *tileops; /* Tile Blitting */
+#endif
+ char __iomem *screen_base; /* Virtual address */
+ unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */
+ void *pseudo_palette; /* Fake palette of 16 colors */
+#define FBINFO_STATE_RUNNING 0
+#define FBINFO_STATE_SUSPENDED 1
+ u32 state; /* Hardware state i.e suspend */
+ void *fbcon_par; /* fbcon use-only private area */
+ /* From here on everything is device dependent */
+ void *par;
+ /* we need the PCI or similar aperture base/size not
+ smem_start/size as smem_start may just be an object
+ allocated inside the aperture so may not actually overlap */
+ struct apertures_struct {
+ unsigned int count;
+ struct aperture {
+ resource_size_t base;
+ resource_size_t size;
+ } ranges[0];
+ } *apertures;
+
+ bool skip_vt_switch; /* no VT switch on suspend/resume required */
+};
+
+static inline struct apertures_struct *alloc_apertures(unsigned int max_num) {
+ struct apertures_struct *a = kzalloc(sizeof(struct apertures_struct)
+ + max_num * sizeof(struct aperture), GFP_KERNEL);
+ if (!a)
+ return NULL;
+ a->count = max_num;
+ return a;
+}
+
+#ifdef MODULE
+#define FBINFO_DEFAULT FBINFO_MODULE
+#else
+#define FBINFO_DEFAULT 0
+#endif
+
+// This will go away
+#define FBINFO_FLAG_MODULE FBINFO_MODULE
+#define FBINFO_FLAG_DEFAULT FBINFO_DEFAULT
+
+/* This will go away
+ * fbset currently hacks in FB_ACCELF_TEXT into var.accel_flags
+ * when it wants to turn the acceleration engine on. This is
+ * really a separate operation, and should be modified via sysfs.
+ * But for now, we leave it broken with the following define
+ */
+#define STUPID_ACCELF_TEXT_SHIT
+
+// This will go away
+#if defined(__sparc__)
+
+/* We map all of our framebuffers such that big-endian accesses
+ * are what we want, so the following is sufficient.
+ */
+
+// This will go away
+#define fb_readb sbus_readb
+#define fb_readw sbus_readw
+#define fb_readl sbus_readl
+#define fb_readq sbus_readq
+#define fb_writeb sbus_writeb
+#define fb_writew sbus_writew
+#define fb_writel sbus_writel
+#define fb_writeq sbus_writeq
+#define fb_memset sbus_memset_io
+#define fb_memcpy_fromfb sbus_memcpy_fromio
+#define fb_memcpy_tofb sbus_memcpy_toio
+
+#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__) || defined(__arm__)
+
+#define fb_readb __raw_readb
+#define fb_readw __raw_readw
+#define fb_readl __raw_readl
+#define fb_readq __raw_readq
+#define fb_writeb __raw_writeb
+#define fb_writew __raw_writew
+#define fb_writel __raw_writel
+#define fb_writeq __raw_writeq
+#define fb_memset memset_io
+#define fb_memcpy_fromfb memcpy_fromio
+#define fb_memcpy_tofb memcpy_toio
+
+#else
+
+#define fb_readb(addr) (*(volatile u8 *) (addr))
+#define fb_readw(addr) (*(volatile u16 *) (addr))
+#define fb_readl(addr) (*(volatile u32 *) (addr))
+#define fb_readq(addr) (*(volatile u64 *) (addr))
+#define fb_writeb(b,addr) (*(volatile u8 *) (addr) = (b))
+#define fb_writew(b,addr) (*(volatile u16 *) (addr) = (b))
+#define fb_writel(b,addr) (*(volatile u32 *) (addr) = (b))
+#define fb_writeq(b,addr) (*(volatile u64 *) (addr) = (b))
+#define fb_memset memset
+#define fb_memcpy_fromfb memcpy
+#define fb_memcpy_tofb memcpy
+
+#endif
+
+#define FB_LEFT_POS(p, bpp) (fb_be_math(p) ? (32 - (bpp)) : 0)
+#define FB_SHIFT_HIGH(p, val, bits) (fb_be_math(p) ? (val) >> (bits) : \
+ (val) << (bits))
+#define FB_SHIFT_LOW(p, val, bits) (fb_be_math(p) ? (val) << (bits) : \
+ (val) >> (bits))
+
+ /*
+ * `Generic' versions of the frame buffer device operations
+ */
+
+extern int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var);
+extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var);
+extern int fb_blank(struct fb_info *info, int blank);
+extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+extern void cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
+extern void cfb_imageblit(struct fb_info *info, const struct fb_image *image);
+/*
+ * Drawing operations where framebuffer is in system RAM
+ */
+extern void sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+extern void sys_copyarea(struct fb_info *info, const struct fb_copyarea *area);
+extern void sys_imageblit(struct fb_info *info, const struct fb_image *image);
+extern ssize_t fb_sys_read(struct fb_info *info, char __user *buf,
+ size_t count, loff_t *ppos);
+extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos);
+
+/* drivers/video/fbmem.c */
+extern int register_framebuffer(struct fb_info *fb_info);
+extern int unregister_framebuffer(struct fb_info *fb_info);
+extern int unlink_framebuffer(struct fb_info *fb_info);
+extern int remove_conflicting_framebuffers(struct apertures_struct *a,
+ const char *name, bool primary);
+extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
+extern int fb_show_logo(struct fb_info *fb_info, int rotate);
+extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
+extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx,
+ u32 height, u32 shift_high, u32 shift_low, u32 mod);
+extern void fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 s_pitch, u32 height);
+extern void fb_set_suspend(struct fb_info *info, int state);
+extern int fb_get_color_depth(struct fb_var_screeninfo *var,
+ struct fb_fix_screeninfo *fix);
+extern int fb_get_options(const char *name, char **option);
+extern int fb_new_modelist(struct fb_info *info);
+
+extern struct fb_info *registered_fb[FB_MAX];
+extern int num_registered_fb;
+extern struct class *fb_class;
+
+extern int lock_fb_info(struct fb_info *info);
+
+static inline void unlock_fb_info(struct fb_info *info)
+{
+ mutex_unlock(&info->lock);
+}
+
+static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
+ u8 *src, u32 s_pitch, u32 height)
+{
+ u32 i, j;
+
+ d_pitch -= s_pitch;
+
+ for (i = height; i--; ) {
+ /* s_pitch is a few bytes at the most, memcpy is suboptimal */
+ for (j = 0; j < s_pitch; j++)
+ *dst++ = *src++;
+ dst += d_pitch;
+ }
+}
+
+/* drivers/video/fb_defio.c */
+extern void fb_deferred_io_init(struct fb_info *info);
+extern void fb_deferred_io_open(struct fb_info *info,
+ struct inode *inode,
+ struct file *file);
+extern void fb_deferred_io_cleanup(struct fb_info *info);
+extern int fb_deferred_io_fsync(struct file *file, loff_t start,
+ loff_t end, int datasync);
+
+static inline bool fb_be_math(struct fb_info *info)
+{
+#ifdef CONFIG_FB_FOREIGN_ENDIAN
+#if defined(CONFIG_FB_BOTH_ENDIAN)
+ return info->flags & FBINFO_BE_MATH;
+#elif defined(CONFIG_FB_BIG_ENDIAN)
+ return true;
+#elif defined(CONFIG_FB_LITTLE_ENDIAN)
+ return false;
+#endif /* CONFIG_FB_BOTH_ENDIAN */
+#else
+#ifdef __BIG_ENDIAN
+ return true;
+#else
+ return false;
+#endif /* __BIG_ENDIAN */
+#endif /* CONFIG_FB_FOREIGN_ENDIAN */
+}
+
+/* drivers/video/fbsysfs.c */
+extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev);
+extern void framebuffer_release(struct fb_info *info);
+extern int fb_init_device(struct fb_info *fb_info);
+extern void fb_cleanup_device(struct fb_info *head);
+extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max);
+
+/* drivers/video/fbmon.c */
+#define FB_MAXTIMINGS 0
+#define FB_VSYNCTIMINGS 1
+#define FB_HSYNCTIMINGS 2
+#define FB_DCLKTIMINGS 3
+#define FB_IGNOREMON 0x100
+
+#define FB_MODE_IS_UNKNOWN 0
+#define FB_MODE_IS_DETAILED 1
+#define FB_MODE_IS_STANDARD 2
+#define FB_MODE_IS_VESA 4
+#define FB_MODE_IS_CALCULATED 8
+#define FB_MODE_IS_FIRST 16
+#define FB_MODE_IS_FROM_VAR 32
+
+extern int fbmon_dpms(const struct fb_info *fb_info);
+extern int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var,
+ struct fb_info *info);
+extern int fb_validate_mode(const struct fb_var_screeninfo *var,
+ struct fb_info *info);
+extern int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var);
+extern const unsigned char *fb_firmware_edid(struct device *device);
+extern void fb_edid_to_monspecs(unsigned char *edid,
+ struct fb_monspecs *specs);
+extern void fb_edid_add_monspecs(unsigned char *edid,
+ struct fb_monspecs *specs);
+extern void fb_destroy_modedb(struct fb_videomode *modedb);
+extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb);
+extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter);
+
+extern int of_get_fb_videomode(struct device_node *np,
+ struct fb_videomode *fb,
+ int index);
+extern int fb_videomode_from_videomode(const struct videomode *vm,
+ struct fb_videomode *fbmode);
+
+/* drivers/video/modedb.c */
+#define VESA_MODEDB_SIZE 43
+#define DMT_SIZE 0x50
+
+extern void fb_var_to_videomode(struct fb_videomode *mode,
+ const struct fb_var_screeninfo *var);
+extern void fb_videomode_to_var(struct fb_var_screeninfo *var,
+ const struct fb_videomode *mode);
+extern int fb_mode_is_equal(const struct fb_videomode *mode1,
+ const struct fb_videomode *mode2);
+extern int fb_add_videomode(const struct fb_videomode *mode,
+ struct list_head *head);
+extern void fb_delete_videomode(const struct fb_videomode *mode,
+ struct list_head *head);
+extern const struct fb_videomode *fb_match_mode(const struct fb_var_screeninfo *var,
+ struct list_head *head);
+extern const struct fb_videomode *fb_find_best_mode(const struct fb_var_screeninfo *var,
+ struct list_head *head);
+extern const struct fb_videomode *fb_find_nearest_mode(const struct fb_videomode *mode,
+ struct list_head *head);
+extern void fb_destroy_modelist(struct list_head *head);
+extern void fb_videomode_to_modelist(const struct fb_videomode *modedb, int num,
+ struct list_head *head);
+extern const struct fb_videomode *fb_find_best_display(const struct fb_monspecs *specs,
+ struct list_head *head);
+
+/* drivers/video/fbcmap.c */
+extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp);
+extern int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags);
+extern void fb_dealloc_cmap(struct fb_cmap *cmap);
+extern int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to);
+extern int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to);
+extern int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *fb_info);
+extern int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *fb_info);
+extern const struct fb_cmap *fb_default_cmap(int len);
+extern void fb_invert_cmaps(void);
+
+struct fb_videomode {
+ const char *name; /* optional */
+ u32 refresh; /* optional */
+ u32 xres;
+ u32 yres;
+ u32 pixclock;
+ u32 left_margin;
+ u32 right_margin;
+ u32 upper_margin;
+ u32 lower_margin;
+ u32 hsync_len;
+ u32 vsync_len;
+ u32 sync;
+ u32 vmode;
+ u32 flag;
+};
+
+struct dmt_videomode {
+ u32 dmt_id;
+ u32 std_2byte_code;
+ u32 cvt_3byte_code;
+ const struct fb_videomode *mode;
+};
+
+extern const char *fb_mode_option;
+extern const struct fb_videomode vesa_modes[];
+extern const struct fb_videomode cea_modes[64];
+extern const struct dmt_videomode dmt_modes[];
+
+struct fb_modelist {
+ struct list_head list;
+ struct fb_videomode mode;
+};
+
+extern int fb_find_mode(struct fb_var_screeninfo *var,
+ struct fb_info *info, const char *mode_option,
+ const struct fb_videomode *db,
+ unsigned int dbsize,
+ const struct fb_videomode *default_mode,
+ unsigned int default_bpp);
+
+/* Convenience logging macros */
+#define fb_err(fb_info, fmt, ...) \
+ pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_notice(info, fmt, ...) \
+ pr_notice("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_warn(fb_info, fmt, ...) \
+ pr_warn("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_info(fb_info, fmt, ...) \
+ pr_info("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+#define fb_dbg(fb_info, fmt, ...) \
+ pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
+
+#endif /* _LINUX_FB_H */
diff --git a/include/linux/fcdevice.h b/include/linux/fcdevice.h
new file mode 100644
index 000000000..5009fa16b
--- /dev/null
+++ b/include/linux/fcdevice.h
@@ -0,0 +1,33 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. NET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Fibre Channel handlers.
+ *
+ * Version: @(#)fcdevice.h 1.0.0 09/26/98
+ *
+ * Authors: Vineet Abraham <vma@iol.unh.edu>
+ *
+ * Relocated to include/linux where it belongs by Alan Cox
+ * <gw4pts@gw4pts.ampr.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * WARNING: This move may well be temporary. This file will get merged with others RSN.
+ *
+ */
+#ifndef _LINUX_FCDEVICE_H
+#define _LINUX_FCDEVICE_H
+
+
+#include <linux/if_fc.h>
+
+#ifdef __KERNEL__
+struct net_device *alloc_fcdev(int sizeof_priv);
+#endif
+
+#endif /* _LINUX_FCDEVICE_H */
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
new file mode 100644
index 000000000..76ce329e6
--- /dev/null
+++ b/include/linux/fcntl.h
@@ -0,0 +1,31 @@
+#ifndef _LINUX_FCNTL_H
+#define _LINUX_FCNTL_H
+
+#include <uapi/linux/fcntl.h>
+
+
+#ifndef force_o_largefile
+#define force_o_largefile() (BITS_PER_LONG != 32)
+#endif
+
+#if BITS_PER_LONG == 32
+#define IS_GETLK32(cmd) ((cmd) == F_GETLK)
+#define IS_SETLK32(cmd) ((cmd) == F_SETLK)
+#define IS_SETLKW32(cmd) ((cmd) == F_SETLKW)
+#define IS_GETLK64(cmd) ((cmd) == F_GETLK64)
+#define IS_SETLK64(cmd) ((cmd) == F_SETLK64)
+#define IS_SETLKW64(cmd) ((cmd) == F_SETLKW64)
+#else
+#define IS_GETLK32(cmd) (0)
+#define IS_SETLK32(cmd) (0)
+#define IS_SETLKW32(cmd) (0)
+#define IS_GETLK64(cmd) ((cmd) == F_GETLK)
+#define IS_SETLK64(cmd) ((cmd) == F_SETLK)
+#define IS_SETLKW64(cmd) ((cmd) == F_SETLKW)
+#endif /* BITS_PER_LONG == 32 */
+
+#define IS_GETLK(cmd) (IS_GETLK32(cmd) || IS_GETLK64(cmd))
+#define IS_SETLK(cmd) (IS_SETLK32(cmd) || IS_SETLK64(cmd))
+#define IS_SETLKW(cmd) (IS_SETLKW32(cmd) || IS_SETLKW64(cmd))
+
+#endif
diff --git a/include/linux/fd.h b/include/linux/fd.h
new file mode 100644
index 000000000..69275bccc
--- /dev/null
+++ b/include/linux/fd.h
@@ -0,0 +1,24 @@
+#ifndef _LINUX_FD_H
+#define _LINUX_FD_H
+
+#include <uapi/linux/fd.h>
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+
+struct compat_floppy_struct {
+ compat_uint_t size;
+ compat_uint_t sect;
+ compat_uint_t head;
+ compat_uint_t track;
+ compat_uint_t stretch;
+ unsigned char gap;
+ unsigned char rate;
+ unsigned char spec1;
+ unsigned char fmt_gap;
+ const compat_caddr_t name;
+};
+
+#define FDGETPRM32 _IOR(2, 0x04, struct compat_floppy_struct)
+#endif
+#endif
diff --git a/include/linux/fddidevice.h b/include/linux/fddidevice.h
new file mode 100644
index 000000000..9a79f0106
--- /dev/null
+++ b/include/linux/fddidevice.h
@@ -0,0 +1,33 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the FDDI handlers.
+ *
+ * Version: @(#)fddidevice.h 1.0.0 08/12/96
+ *
+ * Author: Lawrence V. Stefani, <stefani@lkg.dec.com>
+ *
+ * fddidevice.h is based on previous trdevice.h work by
+ * Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_FDDIDEVICE_H
+#define _LINUX_FDDIDEVICE_H
+
+#include <linux/if_fddi.h>
+
+#ifdef __KERNEL__
+__be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev);
+int fddi_change_mtu(struct net_device *dev, int new_mtu);
+struct net_device *alloc_fddidev(int sizeof_priv);
+#endif
+
+#endif /* _LINUX_FDDIDEVICE_H */
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
new file mode 100644
index 000000000..230f87bdf
--- /dev/null
+++ b/include/linux/fdtable.h
@@ -0,0 +1,118 @@
+/*
+ * descriptor table internals; you almost certainly want file.h instead.
+ */
+
+#ifndef __LINUX_FDTABLE_H
+#define __LINUX_FDTABLE_H
+
+#include <linux/posix_types.h>
+#include <linux/compiler.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+
+#include <linux/atomic.h>
+
+/*
+ * The default fd array needs to be at least BITS_PER_LONG,
+ * as this is the granularity returned by copy_fdset().
+ */
+#define NR_OPEN_DEFAULT BITS_PER_LONG
+
+struct fdtable {
+ unsigned int max_fds;
+ struct file __rcu **fd; /* current fd array */
+ unsigned long *close_on_exec;
+ unsigned long *open_fds;
+ struct rcu_head rcu;
+};
+
+static inline bool close_on_exec(int fd, const struct fdtable *fdt)
+{
+ return test_bit(fd, fdt->close_on_exec);
+}
+
+static inline bool fd_is_open(int fd, const struct fdtable *fdt)
+{
+ return test_bit(fd, fdt->open_fds);
+}
+
+/*
+ * Open file table structure
+ */
+struct files_struct {
+ /*
+ * read mostly part
+ */
+ atomic_t count;
+ struct fdtable __rcu *fdt;
+ struct fdtable fdtab;
+ /*
+ * written part on a separate cache line in SMP
+ */
+ spinlock_t file_lock ____cacheline_aligned_in_smp;
+ int next_fd;
+ unsigned long close_on_exec_init[1];
+ unsigned long open_fds_init[1];
+ struct file __rcu * fd_array[NR_OPEN_DEFAULT];
+};
+
+struct file_operations;
+struct vfsmount;
+struct dentry;
+
+#define rcu_dereference_check_fdtable(files, fdtfd) \
+ rcu_dereference_check((fdtfd), lockdep_is_held(&(files)->file_lock))
+
+#define files_fdtable(files) \
+ rcu_dereference_check_fdtable((files), (files)->fdt)
+
+/*
+ * The caller must ensure that fd table isn't shared or hold rcu or file lock
+ */
+static inline struct file *__fcheck_files(struct files_struct *files, unsigned int fd)
+{
+ struct fdtable *fdt = rcu_dereference_raw(files->fdt);
+
+ if (fd < fdt->max_fds)
+ return rcu_dereference_raw(fdt->fd[fd]);
+ return NULL;
+}
+
+static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd)
+{
+ rcu_lockdep_assert(rcu_read_lock_held() ||
+ lockdep_is_held(&files->file_lock),
+ "suspicious rcu_dereference_check() usage");
+ return __fcheck_files(files, fd);
+}
+
+/*
+ * Check whether the specified fd has an open file.
+ */
+#define fcheck(fd) fcheck_files(current->files, fd)
+
+struct task_struct;
+
+struct files_struct *get_files_struct(struct task_struct *);
+void put_files_struct(struct files_struct *fs);
+void reset_files_struct(struct files_struct *);
+int unshare_files(struct files_struct **);
+struct files_struct *dup_fd(struct files_struct *, int *);
+void do_close_on_exec(struct files_struct *);
+int iterate_fd(struct files_struct *, unsigned,
+ int (*)(const void *, struct file *, unsigned),
+ const void *);
+
+extern int __alloc_fd(struct files_struct *files,
+ unsigned start, unsigned end, unsigned flags);
+extern void __fd_install(struct files_struct *files,
+ unsigned int fd, struct file *file);
+extern int __close_fd(struct files_struct *files,
+ unsigned int fd);
+
+extern struct kmem_cache *files_cachep;
+
+#endif /* __LINUX_FDTABLE_H */
diff --git a/include/linux/fec.h b/include/linux/fec.h
new file mode 100644
index 000000000..1454a5036
--- /dev/null
+++ b/include/linux/fec.h
@@ -0,0 +1,25 @@
+/* include/linux/fec.h
+ *
+ * Copyright (c) 2009 Orex Computed Radiography
+ * Baruch Siach <baruch@tkos.co.il>
+ *
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
+ *
+ * Header file for the FEC platform data
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_FEC_H__
+#define __LINUX_FEC_H__
+
+#include <linux/phy.h>
+
+struct fec_platform_data {
+ phy_interface_t phy;
+ unsigned char mac[ETH_ALEN];
+ void (*sleep_mode_enable)(int enabled);
+};
+
+#endif
diff --git a/include/linux/fence.h b/include/linux/fence.h
new file mode 100644
index 000000000..39efee130
--- /dev/null
+++ b/include/linux/fence.h
@@ -0,0 +1,360 @@
+/*
+ * Fence mechanism for dma-buf to allow for asynchronous dma access
+ *
+ * Copyright (C) 2012 Canonical Ltd
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_FENCE_H
+#define __LINUX_FENCE_H
+
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/kref.h>
+#include <linux/sched.h>
+#include <linux/printk.h>
+#include <linux/rcupdate.h>
+
+struct fence;
+struct fence_ops;
+struct fence_cb;
+
+/**
+ * struct fence - software synchronization primitive
+ * @refcount: refcount for this fence
+ * @ops: fence_ops associated with this fence
+ * @rcu: used for releasing fence with kfree_rcu
+ * @cb_list: list of all callbacks to call
+ * @lock: spin_lock_irqsave used for locking
+ * @context: execution context this fence belongs to, returned by
+ * fence_context_alloc()
+ * @seqno: the sequence number of this fence inside the execution context,
+ * can be compared to decide which fence would be signaled later.
+ * @flags: A mask of FENCE_FLAG_* defined below
+ * @timestamp: Timestamp when the fence was signaled.
+ * @status: Optional, only valid if < 0, must be set before calling
+ * fence_signal, indicates that the fence has completed with an error.
+ *
+ * the flags member must be manipulated and read using the appropriate
+ * atomic ops (bit_*), so taking the spinlock will not be needed most
+ * of the time.
+ *
+ * FENCE_FLAG_SIGNALED_BIT - fence is already signaled
+ * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called*
+ * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
+ * implementer of the fence for its own purposes. Can be used in different
+ * ways by different fence implementers, so do not rely on this.
+ *
+ * *) Since atomic bitops are used, this is not guaranteed to be the case.
+ * Particularly, if the bit was set, but fence_signal was called right
+ * before this bit was set, it would have been able to set the
+ * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
+ * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting
+ * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
+ * after fence_signal was called, any enable_signaling call will have either
+ * been completed, or never called at all.
+ */
+struct fence {
+ struct kref refcount;
+ const struct fence_ops *ops;
+ struct rcu_head rcu;
+ struct list_head cb_list;
+ spinlock_t *lock;
+ unsigned context, seqno;
+ unsigned long flags;
+ ktime_t timestamp;
+ int status;
+};
+
+enum fence_flag_bits {
+ FENCE_FLAG_SIGNALED_BIT,
+ FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ FENCE_FLAG_USER_BITS, /* must always be last member */
+};
+
+typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb);
+
+/**
+ * struct fence_cb - callback for fence_add_callback
+ * @node: used by fence_add_callback to append this struct to fence::cb_list
+ * @func: fence_func_t to call
+ *
+ * This struct will be initialized by fence_add_callback, additional
+ * data can be passed along by embedding fence_cb in another struct.
+ */
+struct fence_cb {
+ struct list_head node;
+ fence_func_t func;
+};
+
+/**
+ * struct fence_ops - operations implemented for fence
+ * @get_driver_name: returns the driver name.
+ * @get_timeline_name: return the name of the context this fence belongs to.
+ * @enable_signaling: enable software signaling of fence.
+ * @signaled: [optional] peek whether the fence is signaled, can be null.
+ * @wait: custom wait implementation, or fence_default_wait.
+ * @release: [optional] called on destruction of fence, can be null
+ * @fill_driver_data: [optional] callback to fill in free-form debug info
+ * Returns amount of bytes filled, or -errno.
+ * @fence_value_str: [optional] fills in the value of the fence as a string
+ * @timeline_value_str: [optional] fills in the current value of the timeline
+ * as a string
+ *
+ * Notes on enable_signaling:
+ * For fence implementations that have the capability for hw->hw
+ * signaling, they can implement this op to enable the necessary
+ * irqs, or insert commands into cmdstream, etc. This is called
+ * in the first wait() or add_callback() path to let the fence
+ * implementation know that there is another driver waiting on
+ * the signal (ie. hw->sw case).
+ *
+ * This function can be called called from atomic context, but not
+ * from irq context, so normal spinlocks can be used.
+ *
+ * A return value of false indicates the fence already passed,
+ * or some failure occurred that made it impossible to enable
+ * signaling. True indicates successful enabling.
+ *
+ * fence->status may be set in enable_signaling, but only when false is
+ * returned.
+ *
+ * Calling fence_signal before enable_signaling is called allows
+ * for a tiny race window in which enable_signaling is called during,
+ * before, or after fence_signal. To fight this, it is recommended
+ * that before enable_signaling returns true an extra reference is
+ * taken on the fence, to be released when the fence is signaled.
+ * This will mean fence_signal will still be called twice, but
+ * the second time will be a noop since it was already signaled.
+ *
+ * Notes on signaled:
+ * May set fence->status if returning true.
+ *
+ * Notes on wait:
+ * Must not be NULL, set to fence_default_wait for default implementation.
+ * the fence_default_wait implementation should work for any fence, as long
+ * as enable_signaling works correctly.
+ *
+ * Must return -ERESTARTSYS if the wait is intr = true and the wait was
+ * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
+ * timed out. Can also return other error values on custom implementations,
+ * which should be treated as if the fence is signaled. For example a hardware
+ * lockup could be reported like that.
+ *
+ * Notes on release:
+ * Can be NULL, this function allows additional commands to run on
+ * destruction of the fence. Can be called from irq context.
+ * If pointer is set to NULL, kfree will get called instead.
+ */
+
+struct fence_ops {
+ const char * (*get_driver_name)(struct fence *fence);
+ const char * (*get_timeline_name)(struct fence *fence);
+ bool (*enable_signaling)(struct fence *fence);
+ bool (*signaled)(struct fence *fence);
+ signed long (*wait)(struct fence *fence, bool intr, signed long timeout);
+ void (*release)(struct fence *fence);
+
+ int (*fill_driver_data)(struct fence *fence, void *data, int size);
+ void (*fence_value_str)(struct fence *fence, char *str, int size);
+ void (*timeline_value_str)(struct fence *fence, char *str, int size);
+};
+
+void fence_init(struct fence *fence, const struct fence_ops *ops,
+ spinlock_t *lock, unsigned context, unsigned seqno);
+
+void fence_release(struct kref *kref);
+void fence_free(struct fence *fence);
+
+/**
+ * fence_get - increases refcount of the fence
+ * @fence: [in] fence to increase refcount of
+ *
+ * Returns the same fence, with refcount increased by 1.
+ */
+static inline struct fence *fence_get(struct fence *fence)
+{
+ if (fence)
+ kref_get(&fence->refcount);
+ return fence;
+}
+
+/**
+ * fence_get_rcu - get a fence from a reservation_object_list with rcu read lock
+ * @fence: [in] fence to increase refcount of
+ *
+ * Function returns NULL if no refcount could be obtained, or the fence.
+ */
+static inline struct fence *fence_get_rcu(struct fence *fence)
+{
+ if (kref_get_unless_zero(&fence->refcount))
+ return fence;
+ else
+ return NULL;
+}
+
+/**
+ * fence_put - decreases refcount of the fence
+ * @fence: [in] fence to reduce refcount of
+ */
+static inline void fence_put(struct fence *fence)
+{
+ if (fence)
+ kref_put(&fence->refcount, fence_release);
+}
+
+int fence_signal(struct fence *fence);
+int fence_signal_locked(struct fence *fence);
+signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout);
+int fence_add_callback(struct fence *fence, struct fence_cb *cb,
+ fence_func_t func);
+bool fence_remove_callback(struct fence *fence, struct fence_cb *cb);
+void fence_enable_sw_signaling(struct fence *fence);
+
+/**
+ * fence_is_signaled_locked - Return an indication if the fence is signaled yet.
+ * @fence: [in] the fence to check
+ *
+ * Returns true if the fence was already signaled, false if not. Since this
+ * function doesn't enable signaling, it is not guaranteed to ever return
+ * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
+ * haven't been called before.
+ *
+ * This function requires fence->lock to be held.
+ */
+static inline bool
+fence_is_signaled_locked(struct fence *fence)
+{
+ if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return true;
+
+ if (fence->ops->signaled && fence->ops->signaled(fence)) {
+ fence_signal_locked(fence);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * fence_is_signaled - Return an indication if the fence is signaled yet.
+ * @fence: [in] the fence to check
+ *
+ * Returns true if the fence was already signaled, false if not. Since this
+ * function doesn't enable signaling, it is not guaranteed to ever return
+ * true if fence_add_callback, fence_wait or fence_enable_sw_signaling
+ * haven't been called before.
+ *
+ * It's recommended for seqno fences to call fence_signal when the
+ * operation is complete, it makes it possible to prevent issues from
+ * wraparound between time of issue and time of use by checking the return
+ * value of this function before calling hardware-specific wait instructions.
+ */
+static inline bool
+fence_is_signaled(struct fence *fence)
+{
+ if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return true;
+
+ if (fence->ops->signaled && fence->ops->signaled(fence)) {
+ fence_signal(fence);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * fence_later - return the chronologically later fence
+ * @f1: [in] the first fence from the same context
+ * @f2: [in] the second fence from the same context
+ *
+ * Returns NULL if both fences are signaled, otherwise the fence that would be
+ * signaled last. Both fences must be from the same context, since a seqno is
+ * not re-used across contexts.
+ */
+static inline struct fence *fence_later(struct fence *f1, struct fence *f2)
+{
+ if (WARN_ON(f1->context != f2->context))
+ return NULL;
+
+ /*
+ * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been
+ * set if enable_signaling wasn't called, and enabling that here is
+ * overkill.
+ */
+ if (f2->seqno - f1->seqno <= INT_MAX)
+ return fence_is_signaled(f2) ? NULL : f2;
+ else
+ return fence_is_signaled(f1) ? NULL : f1;
+}
+
+signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
+
+
+/**
+ * fence_wait - sleep until the fence gets signaled
+ * @fence: [in] the fence to wait on
+ * @intr: [in] if true, do an interruptible wait
+ *
+ * This function will return -ERESTARTSYS if interrupted by a signal,
+ * or 0 if the fence was signaled. Other error values may be
+ * returned on custom implementations.
+ *
+ * Performs a synchronous wait on this fence. It is assumed the caller
+ * directly or indirectly holds a reference to the fence, otherwise the
+ * fence might be freed before return, resulting in undefined behavior.
+ */
+static inline signed long fence_wait(struct fence *fence, bool intr)
+{
+ signed long ret;
+
+ /* Since fence_wait_timeout cannot timeout with
+ * MAX_SCHEDULE_TIMEOUT, only valid return values are
+ * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
+ */
+ ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
+
+ return ret < 0 ? ret : 0;
+}
+
+unsigned fence_context_alloc(unsigned num);
+
+#define FENCE_TRACE(f, fmt, args...) \
+ do { \
+ struct fence *__ff = (f); \
+ if (config_enabled(CONFIG_FENCE_TRACE)) \
+ pr_info("f %u#%u: " fmt, \
+ __ff->context, __ff->seqno, ##args); \
+ } while (0)
+
+#define FENCE_WARN(f, fmt, args...) \
+ do { \
+ struct fence *__ff = (f); \
+ pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \
+ ##args); \
+ } while (0)
+
+#define FENCE_ERR(f, fmt, args...) \
+ do { \
+ struct fence *__ff = (f); \
+ pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \
+ ##args); \
+ } while (0)
+
+#endif /* __LINUX_FENCE_H */
diff --git a/include/linux/file.h b/include/linux/file.h
new file mode 100644
index 000000000..9a290b36b
--- /dev/null
+++ b/include/linux/file.h
@@ -0,0 +1,77 @@
+/*
+ * Wrapper functions for accessing the file_struct fd array.
+ */
+
+#ifndef __LINUX_FILE_H
+#define __LINUX_FILE_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/posix_types.h>
+
+struct file;
+
+extern void fput(struct file *);
+
+struct file_operations;
+struct vfsmount;
+struct dentry;
+struct path;
+extern struct file *alloc_file(struct path *, fmode_t mode,
+ const struct file_operations *fop);
+extern struct file *get_empty_filp(void);
+
+static inline void fput_light(struct file *file, int fput_needed)
+{
+ if (fput_needed)
+ fput(file);
+}
+
+struct fd {
+ struct file *file;
+ unsigned int flags;
+};
+#define FDPUT_FPUT 1
+#define FDPUT_POS_UNLOCK 2
+
+static inline void fdput(struct fd fd)
+{
+ if (fd.flags & FDPUT_FPUT)
+ fput(fd.file);
+}
+
+extern struct file *fget(unsigned int fd);
+extern struct file *fget_raw(unsigned int fd);
+extern unsigned long __fdget(unsigned int fd);
+extern unsigned long __fdget_raw(unsigned int fd);
+extern unsigned long __fdget_pos(unsigned int fd);
+
+static inline struct fd __to_fd(unsigned long v)
+{
+ return (struct fd){(struct file *)(v & ~3),v & 3};
+}
+
+static inline struct fd fdget(unsigned int fd)
+{
+ return __to_fd(__fdget(fd));
+}
+
+static inline struct fd fdget_raw(unsigned int fd)
+{
+ return __to_fd(__fdget_raw(fd));
+}
+
+extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
+extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
+extern void set_close_on_exec(unsigned int fd, int flag);
+extern bool get_close_on_exec(unsigned int fd);
+extern void put_filp(struct file *);
+extern int get_unused_fd_flags(unsigned flags);
+extern void put_unused_fd(unsigned int fd);
+
+extern void fd_install(unsigned int fd, struct file *file);
+
+extern void flush_delayed_fput(void);
+extern void __fput_sync(struct file *);
+
+#endif /* __LINUX_FILE_H */
diff --git a/include/linux/filter.h b/include/linux/filter.h
new file mode 100644
index 000000000..fa11b3a36
--- /dev/null
+++ b/include/linux/filter.h
@@ -0,0 +1,482 @@
+/*
+ * Linux Socket Filter Data Structures
+ */
+#ifndef __LINUX_FILTER_H__
+#define __LINUX_FILTER_H__
+
+#include <stdarg.h>
+
+#include <linux/atomic.h>
+#include <linux/compat.h>
+#include <linux/skbuff.h>
+#include <linux/linkage.h>
+#include <linux/printk.h>
+#include <linux/workqueue.h>
+
+#include <asm/cacheflush.h>
+
+#include <uapi/linux/filter.h>
+#include <uapi/linux/bpf.h>
+
+struct sk_buff;
+struct sock;
+struct seccomp_data;
+struct bpf_prog_aux;
+
+/* ArgX, context and stack frame pointer register positions. Note,
+ * Arg1, Arg2, Arg3, etc are used as argument mappings of function
+ * calls in BPF_CALL instruction.
+ */
+#define BPF_REG_ARG1 BPF_REG_1
+#define BPF_REG_ARG2 BPF_REG_2
+#define BPF_REG_ARG3 BPF_REG_3
+#define BPF_REG_ARG4 BPF_REG_4
+#define BPF_REG_ARG5 BPF_REG_5
+#define BPF_REG_CTX BPF_REG_6
+#define BPF_REG_FP BPF_REG_10
+
+/* Additional register mappings for converted user programs. */
+#define BPF_REG_A BPF_REG_0
+#define BPF_REG_X BPF_REG_7
+#define BPF_REG_TMP BPF_REG_8
+
+/* BPF program can access up to 512 bytes of stack space. */
+#define MAX_BPF_STACK 512
+
+/* Helper macros for filter block array initializers. */
+
+/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
+
+#define BPF_ALU64_REG(OP, DST, SRC) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = 0 })
+
+#define BPF_ALU32_REG(OP, DST, SRC) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_OP(OP) | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = 0 })
+
+/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
+
+#define BPF_ALU64_IMM(OP, DST, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
+#define BPF_ALU32_IMM(OP, DST, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_OP(OP) | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
+/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
+
+#define BPF_ENDIAN(TYPE, DST, LEN) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = LEN })
+
+/* Short form of mov, dst_reg = src_reg */
+
+#define BPF_MOV64_REG(DST, SRC) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = 0 })
+
+#define BPF_MOV32_REG(DST, SRC) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = 0 })
+
+/* Short form of mov, dst_reg = imm32 */
+
+#define BPF_MOV64_IMM(DST, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_MOV | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
+#define BPF_MOV32_IMM(DST, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_MOV | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
+/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
+#define BPF_LD_IMM64(DST, IMM) \
+ BPF_LD_IMM64_RAW(DST, 0, IMM)
+
+#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_LD | BPF_DW | BPF_IMM, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = (__u32) (IMM) }), \
+ ((struct bpf_insn) { \
+ .code = 0, /* zero is reserved opcode */ \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = ((__u64) (IMM)) >> 32 })
+
+/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
+#define BPF_LD_MAP_FD(DST, MAP_FD) \
+ BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
+
+/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
+
+#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = IMM })
+
+#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = IMM })
+
+/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
+
+#define BPF_LD_ABS(SIZE, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = IMM })
+
+/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
+
+#define BPF_LD_IND(SIZE, SRC, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \
+ .dst_reg = 0, \
+ .src_reg = SRC, \
+ .off = 0, \
+ .imm = IMM })
+
+/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
+
+#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
+
+#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
+
+#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \
+ ((struct bpf_insn) { \
+ .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = OFF, \
+ .imm = IMM })
+
+/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
+
+#define BPF_JMP_REG(OP, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP | BPF_OP(OP) | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
+
+#define BPF_JMP_IMM(OP, DST, IMM, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP | BPF_OP(OP) | BPF_K, \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = OFF, \
+ .imm = IMM })
+
+/* Function call */
+
+#define BPF_EMIT_CALL(FUNC) \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP | BPF_CALL, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = ((FUNC) - __bpf_call_base) })
+
+/* Raw code statement block */
+
+#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
+ ((struct bpf_insn) { \
+ .code = CODE, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = IMM })
+
+/* Program exit */
+
+#define BPF_EXIT_INSN() \
+ ((struct bpf_insn) { \
+ .code = BPF_JMP | BPF_EXIT, \
+ .dst_reg = 0, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = 0 })
+
+#define bytes_to_bpf_size(bytes) \
+({ \
+ int bpf_size = -EINVAL; \
+ \
+ if (bytes == sizeof(u8)) \
+ bpf_size = BPF_B; \
+ else if (bytes == sizeof(u16)) \
+ bpf_size = BPF_H; \
+ else if (bytes == sizeof(u32)) \
+ bpf_size = BPF_W; \
+ else if (bytes == sizeof(u64)) \
+ bpf_size = BPF_DW; \
+ \
+ bpf_size; \
+})
+
+/* Macro to invoke filter function. */
+#define SK_RUN_FILTER(filter, ctx) \
+ (*filter->prog->bpf_func)(ctx, filter->prog->insnsi)
+
+#ifdef CONFIG_COMPAT
+/* A struct sock_filter is architecture independent. */
+struct compat_sock_fprog {
+ u16 len;
+ compat_uptr_t filter; /* struct sock_filter * */
+};
+#endif
+
+struct sock_fprog_kern {
+ u16 len;
+ struct sock_filter *filter;
+};
+
+struct bpf_binary_header {
+ unsigned int pages;
+ u8 image[];
+};
+
+struct bpf_prog {
+ u16 pages; /* Number of allocated pages */
+ bool jited; /* Is our filter JIT'ed? */
+ bool gpl_compatible; /* Is our filter GPL compatible? */
+ u32 len; /* Number of filter blocks */
+ enum bpf_prog_type type; /* Type of BPF program */
+ struct bpf_prog_aux *aux; /* Auxiliary fields */
+ struct sock_fprog_kern *orig_prog; /* Original BPF program */
+ unsigned int (*bpf_func)(const struct sk_buff *skb,
+ const struct bpf_insn *filter);
+ /* Instructions for interpreter */
+ union {
+ struct sock_filter insns[0];
+ struct bpf_insn insnsi[0];
+ };
+};
+
+struct sk_filter {
+ atomic_t refcnt;
+ struct rcu_head rcu;
+ struct bpf_prog *prog;
+};
+
+#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
+
+static inline unsigned int bpf_prog_size(unsigned int proglen)
+{
+ return max(sizeof(struct bpf_prog),
+ offsetof(struct bpf_prog, insns[proglen]));
+}
+
+#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
+
+#ifdef CONFIG_DEBUG_SET_MODULE_RONX
+static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
+{
+ set_memory_ro((unsigned long)fp, fp->pages);
+}
+
+static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
+{
+ set_memory_rw((unsigned long)fp, fp->pages);
+}
+#else
+static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
+{
+}
+
+static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
+{
+}
+#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
+
+int sk_filter(struct sock *sk, struct sk_buff *skb);
+
+void bpf_prog_select_runtime(struct bpf_prog *fp);
+void bpf_prog_free(struct bpf_prog *fp);
+
+int bpf_convert_filter(struct sock_filter *prog, int len,
+ struct bpf_insn *new_prog, int *new_len);
+
+struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
+struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
+ gfp_t gfp_extra_flags);
+void __bpf_prog_free(struct bpf_prog *fp);
+
+static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
+{
+ bpf_prog_unlock_ro(fp);
+ __bpf_prog_free(fp);
+}
+
+int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
+void bpf_prog_destroy(struct bpf_prog *fp);
+
+int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
+int sk_attach_bpf(u32 ufd, struct sock *sk);
+int sk_detach_filter(struct sock *sk);
+
+int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
+int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
+ unsigned int len);
+
+bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
+void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
+
+u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+void bpf_int_jit_compile(struct bpf_prog *fp);
+
+#ifdef CONFIG_BPF_JIT
+typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
+
+struct bpf_binary_header *
+bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
+ unsigned int alignment,
+ bpf_jit_fill_hole_t bpf_fill_ill_insns);
+void bpf_jit_binary_free(struct bpf_binary_header *hdr);
+
+void bpf_jit_compile(struct bpf_prog *fp);
+void bpf_jit_free(struct bpf_prog *fp);
+
+static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
+ u32 pass, void *image)
+{
+ pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
+ flen, proglen, pass, image);
+ if (image)
+ print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
+ 16, 1, image, proglen, false);
+}
+#else
+static inline void bpf_jit_compile(struct bpf_prog *fp)
+{
+}
+
+static inline void bpf_jit_free(struct bpf_prog *fp)
+{
+ bpf_prog_unlock_free(fp);
+}
+#endif /* CONFIG_BPF_JIT */
+
+#define BPF_ANC BIT(15)
+
+static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
+{
+ BUG_ON(ftest->code & BPF_ANC);
+
+ switch (ftest->code) {
+ case BPF_LD | BPF_W | BPF_ABS:
+ case BPF_LD | BPF_H | BPF_ABS:
+ case BPF_LD | BPF_B | BPF_ABS:
+#define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
+ return BPF_ANC | SKF_AD_##CODE
+ switch (ftest->k) {
+ BPF_ANCILLARY(PROTOCOL);
+ BPF_ANCILLARY(PKTTYPE);
+ BPF_ANCILLARY(IFINDEX);
+ BPF_ANCILLARY(NLATTR);
+ BPF_ANCILLARY(NLATTR_NEST);
+ BPF_ANCILLARY(MARK);
+ BPF_ANCILLARY(QUEUE);
+ BPF_ANCILLARY(HATYPE);
+ BPF_ANCILLARY(RXHASH);
+ BPF_ANCILLARY(CPU);
+ BPF_ANCILLARY(ALU_XOR_X);
+ BPF_ANCILLARY(VLAN_TAG);
+ BPF_ANCILLARY(VLAN_TAG_PRESENT);
+ BPF_ANCILLARY(PAY_OFFSET);
+ BPF_ANCILLARY(RANDOM);
+ BPF_ANCILLARY(VLAN_TPID);
+ }
+ /* Fallthrough. */
+ default:
+ return ftest->code;
+ }
+}
+
+void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
+ int k, unsigned int size);
+
+static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
+ unsigned int size, void *buffer)
+{
+ if (k >= 0)
+ return skb_header_pointer(skb, k, size, buffer);
+
+ return bpf_internal_load_pointer_neg_helper(skb, k, size);
+}
+
+static inline int bpf_tell_extensions(void)
+{
+ return SKF_AD_MAX;
+}
+
+#endif /* __LINUX_FILTER_H__ */
diff --git a/include/linux/fips.h b/include/linux/fips.h
new file mode 100644
index 000000000..f8fb07b0b
--- /dev/null
+++ b/include/linux/fips.h
@@ -0,0 +1,10 @@
+#ifndef _FIPS_H
+#define _FIPS_H
+
+#ifdef CONFIG_CRYPTO_FIPS
+extern int fips_enabled;
+#else
+#define fips_enabled 0
+#endif
+
+#endif
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
new file mode 100644
index 000000000..d4b7683c7
--- /dev/null
+++ b/include/linux/firewire.h
@@ -0,0 +1,472 @@
+#ifndef _LINUX_FIREWIRE_H
+#define _LINUX_FIREWIRE_H
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/atomic.h>
+#include <asm/byteorder.h>
+
+#define CSR_REGISTER_BASE 0xfffff0000000ULL
+
+/* register offsets are relative to CSR_REGISTER_BASE */
+#define CSR_STATE_CLEAR 0x0
+#define CSR_STATE_SET 0x4
+#define CSR_NODE_IDS 0x8
+#define CSR_RESET_START 0xc
+#define CSR_SPLIT_TIMEOUT_HI 0x18
+#define CSR_SPLIT_TIMEOUT_LO 0x1c
+#define CSR_CYCLE_TIME 0x200
+#define CSR_BUS_TIME 0x204
+#define CSR_BUSY_TIMEOUT 0x210
+#define CSR_PRIORITY_BUDGET 0x218
+#define CSR_BUS_MANAGER_ID 0x21c
+#define CSR_BANDWIDTH_AVAILABLE 0x220
+#define CSR_CHANNELS_AVAILABLE 0x224
+#define CSR_CHANNELS_AVAILABLE_HI 0x224
+#define CSR_CHANNELS_AVAILABLE_LO 0x228
+#define CSR_MAINT_UTILITY 0x230
+#define CSR_BROADCAST_CHANNEL 0x234
+#define CSR_CONFIG_ROM 0x400
+#define CSR_CONFIG_ROM_END 0x800
+#define CSR_OMPR 0x900
+#define CSR_OPCR(i) (0x904 + (i) * 4)
+#define CSR_IMPR 0x980
+#define CSR_IPCR(i) (0x984 + (i) * 4)
+#define CSR_FCP_COMMAND 0xB00
+#define CSR_FCP_RESPONSE 0xD00
+#define CSR_FCP_END 0xF00
+#define CSR_TOPOLOGY_MAP 0x1000
+#define CSR_TOPOLOGY_MAP_END 0x1400
+#define CSR_SPEED_MAP 0x2000
+#define CSR_SPEED_MAP_END 0x3000
+
+#define CSR_OFFSET 0x40
+#define CSR_LEAF 0x80
+#define CSR_DIRECTORY 0xc0
+
+#define CSR_DESCRIPTOR 0x01
+#define CSR_VENDOR 0x03
+#define CSR_HARDWARE_VERSION 0x04
+#define CSR_UNIT 0x11
+#define CSR_SPECIFIER_ID 0x12
+#define CSR_VERSION 0x13
+#define CSR_DEPENDENT_INFO 0x14
+#define CSR_MODEL 0x17
+#define CSR_DIRECTORY_ID 0x20
+
+struct fw_csr_iterator {
+ const u32 *p;
+ const u32 *end;
+};
+
+void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p);
+int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value);
+int fw_csr_string(const u32 *directory, int key, char *buf, size_t size);
+
+extern struct bus_type fw_bus_type;
+
+struct fw_card_driver;
+struct fw_node;
+
+struct fw_card {
+ const struct fw_card_driver *driver;
+ struct device *device;
+ struct kref kref;
+ struct completion done;
+
+ int node_id;
+ int generation;
+ int current_tlabel;
+ u64 tlabel_mask;
+ struct list_head transaction_list;
+ u64 reset_jiffies;
+
+ u32 split_timeout_hi;
+ u32 split_timeout_lo;
+ unsigned int split_timeout_cycles;
+ unsigned int split_timeout_jiffies;
+
+ unsigned long long guid;
+ unsigned max_receive;
+ int link_speed;
+ int config_rom_generation;
+
+ spinlock_t lock; /* Take this lock when handling the lists in
+ * this struct. */
+ struct fw_node *local_node;
+ struct fw_node *root_node;
+ struct fw_node *irm_node;
+ u8 color; /* must be u8 to match the definition in struct fw_node */
+ int gap_count;
+ bool beta_repeaters_present;
+
+ int index;
+ struct list_head link;
+
+ struct list_head phy_receiver_list;
+
+ struct delayed_work br_work; /* bus reset job */
+ bool br_short;
+
+ struct delayed_work bm_work; /* bus manager job */
+ int bm_retries;
+ int bm_generation;
+ int bm_node_id;
+ bool bm_abdicate;
+
+ bool priority_budget_implemented; /* controller feature */
+ bool broadcast_channel_auto_allocated; /* controller feature */
+
+ bool broadcast_channel_allocated;
+ u32 broadcast_channel;
+ __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
+
+ __be32 maint_utility_register;
+};
+
+static inline struct fw_card *fw_card_get(struct fw_card *card)
+{
+ kref_get(&card->kref);
+
+ return card;
+}
+
+void fw_card_release(struct kref *kref);
+
+static inline void fw_card_put(struct fw_card *card)
+{
+ kref_put(&card->kref, fw_card_release);
+}
+
+struct fw_attribute_group {
+ struct attribute_group *groups[2];
+ struct attribute_group group;
+ struct attribute *attrs[13];
+};
+
+enum fw_device_state {
+ FW_DEVICE_INITIALIZING,
+ FW_DEVICE_RUNNING,
+ FW_DEVICE_GONE,
+ FW_DEVICE_SHUTDOWN,
+};
+
+/*
+ * Note, fw_device.generation always has to be read before fw_device.node_id.
+ * Use SMP memory barriers to ensure this. Otherwise requests will be sent
+ * to an outdated node_id if the generation was updated in the meantime due
+ * to a bus reset.
+ *
+ * Likewise, fw-core will take care to update .node_id before .generation so
+ * that whenever fw_device.generation is current WRT the actual bus generation,
+ * fw_device.node_id is guaranteed to be current too.
+ *
+ * The same applies to fw_device.card->node_id vs. fw_device.generation.
+ *
+ * fw_device.config_rom and fw_device.config_rom_length may be accessed during
+ * the lifetime of any fw_unit belonging to the fw_device, before device_del()
+ * was called on the last fw_unit. Alternatively, they may be accessed while
+ * holding fw_device_rwsem.
+ */
+struct fw_device {
+ atomic_t state;
+ struct fw_node *node;
+ int node_id;
+ int generation;
+ unsigned max_speed;
+ struct fw_card *card;
+ struct device device;
+
+ struct mutex client_list_mutex;
+ struct list_head client_list;
+
+ const u32 *config_rom;
+ size_t config_rom_length;
+ int config_rom_retries;
+ unsigned is_local:1;
+ unsigned max_rec:4;
+ unsigned cmc:1;
+ unsigned irmc:1;
+ unsigned bc_implemented:2;
+
+ work_func_t workfn;
+ struct delayed_work work;
+ struct fw_attribute_group attribute_group;
+};
+
+static inline struct fw_device *fw_device(struct device *dev)
+{
+ return container_of(dev, struct fw_device, device);
+}
+
+static inline int fw_device_is_shutdown(struct fw_device *device)
+{
+ return atomic_read(&device->state) == FW_DEVICE_SHUTDOWN;
+}
+
+int fw_device_enable_phys_dma(struct fw_device *device);
+
+/*
+ * fw_unit.directory must not be accessed after device_del(&fw_unit.device).
+ */
+struct fw_unit {
+ struct device device;
+ const u32 *directory;
+ struct fw_attribute_group attribute_group;
+};
+
+static inline struct fw_unit *fw_unit(struct device *dev)
+{
+ return container_of(dev, struct fw_unit, device);
+}
+
+static inline struct fw_unit *fw_unit_get(struct fw_unit *unit)
+{
+ get_device(&unit->device);
+
+ return unit;
+}
+
+static inline void fw_unit_put(struct fw_unit *unit)
+{
+ put_device(&unit->device);
+}
+
+static inline struct fw_device *fw_parent_device(struct fw_unit *unit)
+{
+ return fw_device(unit->device.parent);
+}
+
+struct ieee1394_device_id;
+
+struct fw_driver {
+ struct device_driver driver;
+ int (*probe)(struct fw_unit *unit, const struct ieee1394_device_id *id);
+ /* Called when the parent device sits through a bus reset. */
+ void (*update)(struct fw_unit *unit);
+ void (*remove)(struct fw_unit *unit);
+ const struct ieee1394_device_id *id_table;
+};
+
+struct fw_packet;
+struct fw_request;
+
+typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
+ struct fw_card *card, int status);
+typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
+ void *data, size_t length,
+ void *callback_data);
+/*
+ * This callback handles an inbound request subaction. It is called in
+ * RCU read-side context, therefore must not sleep.
+ *
+ * The callback should not initiate outbound request subactions directly.
+ * Otherwise there is a danger of recursion of inbound and outbound
+ * transactions from and to the local node.
+ *
+ * The callback is responsible that either fw_send_response() or kfree()
+ * is called on the @request, except for FCP registers for which the core
+ * takes care of that.
+ */
+typedef void (*fw_address_callback_t)(struct fw_card *card,
+ struct fw_request *request,
+ int tcode, int destination, int source,
+ int generation,
+ unsigned long long offset,
+ void *data, size_t length,
+ void *callback_data);
+
+struct fw_packet {
+ int speed;
+ int generation;
+ u32 header[4];
+ size_t header_length;
+ void *payload;
+ size_t payload_length;
+ dma_addr_t payload_bus;
+ bool payload_mapped;
+ u32 timestamp;
+
+ /*
+ * This callback is called when the packet transmission has completed.
+ * For successful transmission, the status code is the ack received
+ * from the destination. Otherwise it is one of the juju-specific
+ * rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK.
+ * The callback can be called from tasklet context and thus
+ * must never block.
+ */
+ fw_packet_callback_t callback;
+ int ack;
+ struct list_head link;
+ void *driver_data;
+};
+
+struct fw_transaction {
+ int node_id; /* The generation is implied; it is always the current. */
+ int tlabel;
+ struct list_head link;
+ struct fw_card *card;
+ bool is_split_transaction;
+ struct timer_list split_timeout_timer;
+
+ struct fw_packet packet;
+
+ /*
+ * The data passed to the callback is valid only during the
+ * callback.
+ */
+ fw_transaction_callback_t callback;
+ void *callback_data;
+};
+
+struct fw_address_handler {
+ u64 offset;
+ u64 length;
+ fw_address_callback_t address_callback;
+ void *callback_data;
+ struct list_head link;
+};
+
+struct fw_address_region {
+ u64 start;
+ u64 end;
+};
+
+extern const struct fw_address_region fw_high_memory_region;
+
+int fw_core_add_address_handler(struct fw_address_handler *handler,
+ const struct fw_address_region *region);
+void fw_core_remove_address_handler(struct fw_address_handler *handler);
+void fw_send_response(struct fw_card *card,
+ struct fw_request *request, int rcode);
+int fw_get_request_speed(struct fw_request *request);
+void fw_send_request(struct fw_card *card, struct fw_transaction *t,
+ int tcode, int destination_id, int generation, int speed,
+ unsigned long long offset, void *payload, size_t length,
+ fw_transaction_callback_t callback, void *callback_data);
+int fw_cancel_transaction(struct fw_card *card,
+ struct fw_transaction *transaction);
+int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
+ int generation, int speed, unsigned long long offset,
+ void *payload, size_t length);
+const char *fw_rcode_string(int rcode);
+
+static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
+{
+ return tag << 14 | channel << 8 | sy;
+}
+
+void fw_schedule_bus_reset(struct fw_card *card, bool delayed,
+ bool short_reset);
+
+struct fw_descriptor {
+ struct list_head link;
+ size_t length;
+ u32 immediate;
+ u32 key;
+ const u32 *data;
+};
+
+int fw_core_add_descriptor(struct fw_descriptor *desc);
+void fw_core_remove_descriptor(struct fw_descriptor *desc);
+
+/*
+ * The iso packet format allows for an immediate header/payload part
+ * stored in 'header' immediately after the packet info plus an
+ * indirect payload part that is pointer to by the 'payload' field.
+ * Applications can use one or the other or both to implement simple
+ * low-bandwidth streaming (e.g. audio) or more advanced
+ * scatter-gather streaming (e.g. assembling video frame automatically).
+ */
+struct fw_iso_packet {
+ u16 payload_length; /* Length of indirect payload */
+ u32 interrupt:1; /* Generate interrupt on this packet */
+ u32 skip:1; /* tx: Set to not send packet at all */
+ /* rx: Sync bit, wait for matching sy */
+ u32 tag:2; /* tx: Tag in packet header */
+ u32 sy:4; /* tx: Sy in packet header */
+ u32 header_length:8; /* Length of immediate header */
+ u32 header[0]; /* tx: Top of 1394 isoch. data_block */
+};
+
+#define FW_ISO_CONTEXT_TRANSMIT 0
+#define FW_ISO_CONTEXT_RECEIVE 1
+#define FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2
+
+#define FW_ISO_CONTEXT_MATCH_TAG0 1
+#define FW_ISO_CONTEXT_MATCH_TAG1 2
+#define FW_ISO_CONTEXT_MATCH_TAG2 4
+#define FW_ISO_CONTEXT_MATCH_TAG3 8
+#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
+
+/*
+ * An iso buffer is just a set of pages mapped for DMA in the
+ * specified direction. Since the pages are to be used for DMA, they
+ * are not mapped into the kernel virtual address space. We store the
+ * DMA address in the page private. The helper function
+ * fw_iso_buffer_map() will map the pages into a given vma.
+ */
+struct fw_iso_buffer {
+ enum dma_data_direction direction;
+ struct page **pages;
+ int page_count;
+ int page_count_mapped;
+};
+
+int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
+ int page_count, enum dma_data_direction direction);
+void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
+size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed);
+
+struct fw_iso_context;
+typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
+ u32 cycle, size_t header_length,
+ void *header, void *data);
+typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context,
+ dma_addr_t completed, void *data);
+struct fw_iso_context {
+ struct fw_card *card;
+ int type;
+ int channel;
+ int speed;
+ bool drop_overflow_headers;
+ size_t header_size;
+ union {
+ fw_iso_callback_t sc;
+ fw_iso_mc_callback_t mc;
+ } callback;
+ void *callback_data;
+};
+
+struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
+ int type, int channel, int speed, size_t header_size,
+ fw_iso_callback_t callback, void *callback_data);
+int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels);
+int fw_iso_context_queue(struct fw_iso_context *ctx,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload);
+void fw_iso_context_queue_flush(struct fw_iso_context *ctx);
+int fw_iso_context_flush_completions(struct fw_iso_context *ctx);
+int fw_iso_context_start(struct fw_iso_context *ctx,
+ int cycle, int sync, int tags);
+int fw_iso_context_stop(struct fw_iso_context *ctx);
+void fw_iso_context_destroy(struct fw_iso_context *ctx);
+void fw_iso_resource_manage(struct fw_card *card, int generation,
+ u64 channels_mask, int *channel, int *bandwidth,
+ bool allocate);
+
+extern struct workqueue_struct *fw_workqueue;
+
+#endif /* _LINUX_FIREWIRE_H */
diff --git a/include/linux/firmware-map.h b/include/linux/firmware-map.h
new file mode 100644
index 000000000..71d4fa721
--- /dev/null
+++ b/include/linux/firmware-map.h
@@ -0,0 +1,49 @@
+/*
+ * include/linux/firmware-map.h:
+ * Copyright (C) 2008 SUSE LINUX Products GmbH
+ * by Bernhard Walle <bernhard.walle@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _LINUX_FIRMWARE_MAP_H
+#define _LINUX_FIRMWARE_MAP_H
+
+#include <linux/list.h>
+
+/*
+ * provide a dummy interface if CONFIG_FIRMWARE_MEMMAP is disabled
+ */
+#ifdef CONFIG_FIRMWARE_MEMMAP
+
+int firmware_map_add_early(u64 start, u64 end, const char *type);
+int firmware_map_add_hotplug(u64 start, u64 end, const char *type);
+int firmware_map_remove(u64 start, u64 end, const char *type);
+
+#else /* CONFIG_FIRMWARE_MEMMAP */
+
+static inline int firmware_map_add_early(u64 start, u64 end, const char *type)
+{
+ return 0;
+}
+
+static inline int firmware_map_add_hotplug(u64 start, u64 end, const char *type)
+{
+ return 0;
+}
+
+static inline int firmware_map_remove(u64 start, u64 end, const char *type)
+{
+ return 0;
+}
+
+#endif /* CONFIG_FIRMWARE_MEMMAP */
+
+#endif /* _LINUX_FIRMWARE_MAP_H */
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
new file mode 100644
index 000000000..92156e687
--- /dev/null
+++ b/include/linux/firmware.h
@@ -0,0 +1,170 @@
+#ifndef _LINUX_FIRMWARE_H
+#define _LINUX_FIRMWARE_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/gfp.h>
+
+#define FW_ACTION_NOHOTPLUG 0
+#define FW_ACTION_HOTPLUG 1
+
+struct firmware {
+ size_t size;
+ const u8 *data;
+ struct page **pages;
+
+ /* firmware loader private fields */
+ void *priv;
+};
+
+struct module;
+struct device;
+
+struct builtin_fw {
+ char *name;
+ void *data;
+ unsigned long size;
+};
+
+/* We have to play tricks here much like stringify() to get the
+ __COUNTER__ macro to be expanded as we want it */
+#define __fw_concat1(x, y) x##y
+#define __fw_concat(x, y) __fw_concat1(x, y)
+
+#define DECLARE_BUILTIN_FIRMWARE(name, blob) \
+ DECLARE_BUILTIN_FIRMWARE_SIZE(name, &(blob), sizeof(blob))
+
+#define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \
+ static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \
+ __used __section(.builtin_fw) = { name, blob, size }
+
+#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE))
+int request_firmware(const struct firmware **fw, const char *name,
+ struct device *device);
+int request_firmware_nowait(
+ struct module *module, bool uevent,
+ const char *name, struct device *device, gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw, void *context));
+int request_firmware_direct(const struct firmware **fw, const char *name,
+ struct device *device);
+
+void release_firmware(const struct firmware *fw);
+#else
+static inline int request_firmware(const struct firmware **fw,
+ const char *name,
+ struct device *device)
+{
+ return -EINVAL;
+}
+static inline int request_firmware_nowait(
+ struct module *module, bool uevent,
+ const char *name, struct device *device, gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw, void *context))
+{
+ return -EINVAL;
+}
+
+static inline void release_firmware(const struct firmware *fw)
+{
+}
+
+static inline int request_firmware_direct(const struct firmware **fw,
+ const char *name,
+ struct device *device)
+{
+ return -EINVAL;
+}
+
+#endif
+#ifndef _LINUX_LIBRE_FIRMWARE_H
+#define _LINUX_LIBRE_FIRMWARE_H
+
+#include <linux/device.h>
+
+#define NONFREE_FIRMWARE "/*(DEBLOBBED)*/"
+
+static inline int
+is_nonfree_firmware(const char *name)
+{
+ return strstr(name, NONFREE_FIRMWARE) != 0;
+}
+
+static inline int
+report_missing_free_firmware(const char *name, const char *what)
+{
+ printk(KERN_ERR "%s: Missing Free %s (non-Free firmware loading is disabled)\n", name,
+ what ? what : "firmware");
+ return -EINVAL;
+}
+static inline int
+reject_firmware(const struct firmware **fw,
+ const char *name, struct device *device)
+{
+ const struct firmware *xfw = NULL;
+ int retval;
+ report_missing_free_firmware(dev_name(device), NULL);
+ retval = request_firmware(&xfw, NONFREE_FIRMWARE, device);
+ if (!retval)
+ release_firmware(xfw);
+ return -EINVAL;
+}
+static inline int
+maybe_reject_firmware(const struct firmware **fw,
+ const char *name, struct device *device)
+{
+ if (is_nonfree_firmware(name))
+ return reject_firmware(fw, name, device);
+ else
+ return request_firmware(fw, name, device);
+}
+static inline int
+reject_firmware_direct(const struct firmware **fw,
+ const char *name, struct device *device)
+{
+ const struct firmware *xfw = NULL;
+ int retval;
+ report_missing_free_firmware(dev_name(device), NULL);
+ retval = request_firmware_direct(&xfw, NONFREE_FIRMWARE, device);
+ if (!retval)
+ release_firmware(xfw);
+ return -EINVAL;
+}
+static inline void
+discard_rejected_firmware(const struct firmware *fw, void *context)
+{
+ release_firmware(fw);
+}
+static inline int
+reject_firmware_nowait(struct module *module, int uevent,
+ const char *name, struct device *device,
+ gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw,
+ void *context))
+{
+ int retval;
+ report_missing_free_firmware(dev_name(device), NULL);
+ retval = request_firmware_nowait(module, uevent, NONFREE_FIRMWARE,
+ device, gfp, NULL,
+ discard_rejected_firmware);
+ if (retval)
+ return retval;
+ return -EINVAL;
+}
+static inline int
+maybe_reject_firmware_nowait(struct module *module, int uevent,
+ const char *name, struct device *device,
+ gfp_t gfp, void *context,
+ void (*cont)(const struct firmware *fw,
+ void *context))
+{
+ if (is_nonfree_firmware(name))
+ return reject_firmware_nowait(module, uevent, name,
+ device, gfp, context, cont);
+ else
+ return request_firmware_nowait(module, uevent, name,
+ device, gfp, context, cont);
+}
+
+#endif /* _LINUX_LIBRE_FIRMWARE_H */
+
+#endif
diff --git a/include/linux/fixp-arith.h b/include/linux/fixp-arith.h
new file mode 100644
index 000000000..d4686fe1c
--- /dev/null
+++ b/include/linux/fixp-arith.h
@@ -0,0 +1,156 @@
+#ifndef _FIXP_ARITH_H
+#define _FIXP_ARITH_H
+
+#include <linux/math64.h>
+
+/*
+ * Simplistic fixed-point arithmetics.
+ * Hmm, I'm probably duplicating some code :(
+ *
+ * Copyright (c) 2002 Johann Deneux
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Should you need to contact me, the author, you can do so by
+ * e-mail - mail your message to <johann.deneux@gmail.com>
+ */
+
+#include <linux/types.h>
+
+static const s32 sin_table[] = {
+ 0x00000000, 0x023be165, 0x04779632, 0x06b2f1d2, 0x08edc7b6, 0x0b27eb5c,
+ 0x0d61304d, 0x0f996a26, 0x11d06c96, 0x14060b67, 0x163a1a7d, 0x186c6ddd,
+ 0x1a9cd9ac, 0x1ccb3236, 0x1ef74bf2, 0x2120fb82, 0x234815ba, 0x256c6f9e,
+ 0x278dde6e, 0x29ac379f, 0x2bc750e8, 0x2ddf003f, 0x2ff31bdd, 0x32037a44,
+ 0x340ff241, 0x36185aee, 0x381c8bb5, 0x3a1c5c56, 0x3c17a4e7, 0x3e0e3ddb,
+ 0x3fffffff, 0x41ecc483, 0x43d464fa, 0x45b6bb5d, 0x4793a20f, 0x496af3e1,
+ 0x4b3c8c11, 0x4d084650, 0x4ecdfec6, 0x508d9210, 0x5246dd48, 0x53f9be04,
+ 0x55a6125a, 0x574bb8e5, 0x58ea90c2, 0x5a827999, 0x5c135399, 0x5d9cff82,
+ 0x5f1f5ea0, 0x609a52d1, 0x620dbe8a, 0x637984d3, 0x64dd894f, 0x6639b039,
+ 0x678dde6d, 0x68d9f963, 0x6a1de735, 0x6b598ea1, 0x6c8cd70a, 0x6db7a879,
+ 0x6ed9eba0, 0x6ff389de, 0x71046d3c, 0x720c8074, 0x730baeec, 0x7401e4bf,
+ 0x74ef0ebb, 0x75d31a5f, 0x76adf5e5, 0x777f903b, 0x7847d908, 0x7906c0af,
+ 0x79bc384c, 0x7a6831b8, 0x7b0a9f8c, 0x7ba3751c, 0x7c32a67c, 0x7cb82884,
+ 0x7d33f0c8, 0x7da5f5a3, 0x7e0e2e31, 0x7e6c924f, 0x7ec11aa3, 0x7f0bc095,
+ 0x7f4c7e52, 0x7f834ecf, 0x7fb02dc4, 0x7fd317b3, 0x7fec09e1, 0x7ffb025e,
+ 0x7fffffff
+};
+
+/**
+ * __fixp_sin32() returns the sin of an angle in degrees
+ *
+ * @degrees: angle, in degrees, from 0 to 360.
+ *
+ * The returned value ranges from -0x7fffffff to +0x7fffffff.
+ */
+static inline s32 __fixp_sin32(int degrees)
+{
+ s32 ret;
+ bool negative = false;
+
+ if (degrees > 180) {
+ negative = true;
+ degrees -= 180;
+ }
+ if (degrees > 90)
+ degrees = 180 - degrees;
+
+ ret = sin_table[degrees];
+
+ return negative ? -ret : ret;
+}
+
+/**
+ * fixp_sin32() returns the sin of an angle in degrees
+ *
+ * @degrees: angle, in degrees. The angle can be positive or negative
+ *
+ * The returned value ranges from -0x7fffffff to +0x7fffffff.
+ */
+static inline s32 fixp_sin32(int degrees)
+{
+ degrees = (degrees % 360 + 360) % 360;
+
+ return __fixp_sin32(degrees);
+}
+
+/* cos(x) = sin(x + 90 degrees) */
+#define fixp_cos32(v) fixp_sin32((v) + 90)
+
+/*
+ * 16 bits variants
+ *
+ * The returned value ranges from -0x7fff to 0x7fff
+ */
+
+#define fixp_sin16(v) (fixp_sin32(v) >> 16)
+#define fixp_cos16(v) (fixp_cos32(v) >> 16)
+
+/**
+ * fixp_sin32_rad() - calculates the sin of an angle in radians
+ *
+ * @radians: angle, in radians
+ * @twopi: value to be used for 2*pi
+ *
+ * Provides a variant for the cases where just 360
+ * values is not enough. This function uses linear
+ * interpolation to a wider range of values given by
+ * twopi var.
+ *
+ * Experimental tests gave a maximum difference of
+ * 0.000038 between the value calculated by sin() and
+ * the one produced by this function, when twopi is
+ * equal to 360000. That seems to be enough precision
+ * for practical purposes.
+ *
+ * Please notice that two high numbers for twopi could cause
+ * overflows, so the routine will not allow values of twopi
+ * bigger than 1^18.
+ */
+static inline s32 fixp_sin32_rad(u32 radians, u32 twopi)
+{
+ int degrees;
+ s32 v1, v2, dx, dy;
+ s64 tmp;
+
+ /*
+ * Avoid too large values for twopi, as we don't want overflows.
+ */
+ BUG_ON(twopi > 1 << 18);
+
+ degrees = (radians * 360) / twopi;
+ tmp = radians - (degrees * twopi) / 360;
+
+ degrees = (degrees % 360 + 360) % 360;
+ v1 = __fixp_sin32(degrees);
+
+ v2 = fixp_sin32(degrees + 1);
+
+ dx = twopi / 360;
+ dy = v2 - v1;
+
+ tmp *= dy;
+
+ return v1 + div_s64(tmp, dx);
+}
+
+/* cos(x) = sin(x + pi/2 radians) */
+
+#define fixp_cos32_rad(rad, twopi) \
+ fixp_sin32_rad(rad + twopi / 4, twopi)
+
+#endif
diff --git a/include/linux/flat.h b/include/linux/flat.h
new file mode 100644
index 000000000..2c1eb15c4
--- /dev/null
+++ b/include/linux/flat.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2002-2003 David McCullough <davidm@snapgear.com>
+ * Copyright (C) 1998 Kenneth Albanowski <kjahds@kjahds.com>
+ * The Silver Hammer Group, Ltd.
+ *
+ * This file provides the definitions and structures needed to
+ * support uClinux flat-format executables.
+ */
+#ifndef _LINUX_FLAT_H
+#define _LINUX_FLAT_H
+
+#include <asm/flat.h>
+#include <uapi/linux/flat.h>
+
+/*
+ * While it would be nice to keep this header clean, users of older
+ * tools still need this support in the kernel. So this section is
+ * purely for compatibility with old tool chains.
+ *
+ * DO NOT make changes or enhancements to the old format please, just work
+ * with the format above, except to fix bugs with old format support.
+ */
+
+#include <asm/byteorder.h>
+
+#define OLD_FLAT_VERSION 0x00000002L
+#define OLD_FLAT_RELOC_TYPE_TEXT 0
+#define OLD_FLAT_RELOC_TYPE_DATA 1
+#define OLD_FLAT_RELOC_TYPE_BSS 2
+
+typedef union {
+ unsigned long value;
+ struct {
+# if defined(mc68000) && !defined(CONFIG_COLDFIRE)
+ signed long offset : 30;
+ unsigned long type : 2;
+# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */
+# elif defined(__BIG_ENDIAN_BITFIELD)
+ unsigned long type : 2;
+ signed long offset : 30;
+# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */
+# elif defined(__LITTLE_ENDIAN_BITFIELD)
+ signed long offset : 30;
+ unsigned long type : 2;
+# define OLD_FLAT_FLAG_RAM 0x1 /* load program entirely into RAM */
+# else
+# error "Unknown bitfield order for flat files."
+# endif
+ } reloc;
+} flat_v2_reloc_t;
+
+#endif /* _LINUX_FLAT_H */
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h
new file mode 100644
index 000000000..b6efb0c64
--- /dev/null
+++ b/include/linux/flex_array.h
@@ -0,0 +1,81 @@
+#ifndef _FLEX_ARRAY_H
+#define _FLEX_ARRAY_H
+
+#include <linux/types.h>
+#include <linux/reciprocal_div.h>
+#include <asm/page.h>
+
+#define FLEX_ARRAY_PART_SIZE PAGE_SIZE
+#define FLEX_ARRAY_BASE_SIZE PAGE_SIZE
+
+struct flex_array_part;
+
+/*
+ * This is meant to replace cases where an array-like
+ * structure has gotten too big to fit into kmalloc()
+ * and the developer is getting tempted to use
+ * vmalloc().
+ */
+
+struct flex_array {
+ union {
+ struct {
+ int element_size;
+ int total_nr_elements;
+ int elems_per_part;
+ struct reciprocal_value reciprocal_elems;
+ struct flex_array_part *parts[];
+ };
+ /*
+ * This little trick makes sure that
+ * sizeof(flex_array) == PAGE_SIZE
+ */
+ char padding[FLEX_ARRAY_BASE_SIZE];
+ };
+};
+
+/* Number of bytes left in base struct flex_array, excluding metadata */
+#define FLEX_ARRAY_BASE_BYTES_LEFT \
+ (FLEX_ARRAY_BASE_SIZE - offsetof(struct flex_array, parts))
+
+/* Number of pointers in base to struct flex_array_part pages */
+#define FLEX_ARRAY_NR_BASE_PTRS \
+ (FLEX_ARRAY_BASE_BYTES_LEFT / sizeof(struct flex_array_part *))
+
+/* Number of elements of size that fit in struct flex_array_part */
+#define FLEX_ARRAY_ELEMENTS_PER_PART(size) \
+ (FLEX_ARRAY_PART_SIZE / size)
+
+/*
+ * Defines a statically allocated flex array and ensures its parameters are
+ * valid.
+ */
+#define DEFINE_FLEX_ARRAY(__arrayname, __element_size, __total) \
+ struct flex_array __arrayname = { { { \
+ .element_size = (__element_size), \
+ .total_nr_elements = (__total), \
+ } } }; \
+ static inline void __arrayname##_invalid_parameter(void) \
+ { \
+ BUILD_BUG_ON((__total) > FLEX_ARRAY_NR_BASE_PTRS * \
+ FLEX_ARRAY_ELEMENTS_PER_PART(__element_size)); \
+ }
+
+struct flex_array *flex_array_alloc(int element_size, unsigned int total,
+ gfp_t flags);
+int flex_array_prealloc(struct flex_array *fa, unsigned int start,
+ unsigned int nr_elements, gfp_t flags);
+void flex_array_free(struct flex_array *fa);
+void flex_array_free_parts(struct flex_array *fa);
+int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
+ gfp_t flags);
+int flex_array_clear(struct flex_array *fa, unsigned int element_nr);
+void *flex_array_get(struct flex_array *fa, unsigned int element_nr);
+int flex_array_shrink(struct flex_array *fa);
+
+#define flex_array_put_ptr(fa, nr, src, gfp) \
+ flex_array_put(fa, nr, (void *)&(src), gfp)
+
+void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr);
+
+#endif /* _FLEX_ARRAY_H */
diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h
new file mode 100644
index 000000000..0d348e011
--- /dev/null
+++ b/include/linux/flex_proportions.h
@@ -0,0 +1,102 @@
+/*
+ * Floating proportions with flexible aging period
+ *
+ * Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
+ */
+
+#ifndef _LINUX_FLEX_PROPORTIONS_H
+#define _LINUX_FLEX_PROPORTIONS_H
+
+#include <linux/percpu_counter.h>
+#include <linux/spinlock.h>
+#include <linux/seqlock.h>
+#include <linux/gfp.h>
+
+/*
+ * When maximum proportion of some event type is specified, this is the
+ * precision with which we allow limitting. Note that this creates an upper
+ * bound on the number of events per period like
+ * ULLONG_MAX >> FPROP_FRAC_SHIFT.
+ */
+#define FPROP_FRAC_SHIFT 10
+#define FPROP_FRAC_BASE (1UL << FPROP_FRAC_SHIFT)
+
+/*
+ * ---- Global proportion definitions ----
+ */
+struct fprop_global {
+ /* Number of events in the current period */
+ struct percpu_counter events;
+ /* Current period */
+ unsigned int period;
+ /* Synchronization with period transitions */
+ seqcount_t sequence;
+};
+
+int fprop_global_init(struct fprop_global *p, gfp_t gfp);
+void fprop_global_destroy(struct fprop_global *p);
+bool fprop_new_period(struct fprop_global *p, int periods);
+
+/*
+ * ---- SINGLE ----
+ */
+struct fprop_local_single {
+ /* the local events counter */
+ unsigned long events;
+ /* Period in which we last updated events */
+ unsigned int period;
+ raw_spinlock_t lock; /* Protect period and numerator */
+};
+
+#define INIT_FPROP_LOCAL_SINGLE(name) \
+{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+}
+
+int fprop_local_init_single(struct fprop_local_single *pl);
+void fprop_local_destroy_single(struct fprop_local_single *pl);
+void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl);
+void fprop_fraction_single(struct fprop_global *p,
+ struct fprop_local_single *pl, unsigned long *numerator,
+ unsigned long *denominator);
+
+static inline
+void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __fprop_inc_single(p, pl);
+ local_irq_restore(flags);
+}
+
+/*
+ * ---- PERCPU ----
+ */
+struct fprop_local_percpu {
+ /* the local events counter */
+ struct percpu_counter events;
+ /* Period in which we last updated events */
+ unsigned int period;
+ raw_spinlock_t lock; /* Protect period and numerator */
+};
+
+int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
+void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
+void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
+void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
+ int max_frac);
+void fprop_fraction_percpu(struct fprop_global *p,
+ struct fprop_local_percpu *pl, unsigned long *numerator,
+ unsigned long *denominator);
+
+static inline
+void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __fprop_inc_percpu(p, pl);
+ local_irq_restore(flags);
+}
+
+#endif
diff --git a/include/linux/fmc-sdb.h b/include/linux/fmc-sdb.h
new file mode 100644
index 000000000..599bd6bab
--- /dev/null
+++ b/include/linux/fmc-sdb.h
@@ -0,0 +1,38 @@
+/*
+ * This file is separate from sdb.h, because I want that one to remain
+ * unchanged (as far as possible) from the official sdb distribution
+ *
+ * This file and associated functionality are a playground for me to
+ * understand stuff which will later be implemented in more generic places.
+ */
+#include <linux/sdb.h>
+
+/* This is the union of all currently defined types */
+union sdb_record {
+ struct sdb_interconnect ic;
+ struct sdb_device dev;
+ struct sdb_bridge bridge;
+ struct sdb_integration integr;
+ struct sdb_empty empty;
+ struct sdb_synthesis synthesis;
+ struct sdb_repo_url repo_url;
+};
+
+struct fmc_device;
+
+/* Every sdb table is turned into this structure */
+struct sdb_array {
+ int len;
+ int level;
+ unsigned long baseaddr;
+ struct fmc_device *fmc; /* the device that hosts it */
+ struct sdb_array *parent; /* NULL at root */
+ union sdb_record *record; /* copies of the struct */
+ struct sdb_array **subtree; /* only valid for bridge items */
+};
+
+extern int fmc_scan_sdb_tree(struct fmc_device *fmc, unsigned long address);
+extern void fmc_show_sdb_tree(const struct fmc_device *fmc);
+extern signed long fmc_find_sdb_device(struct sdb_array *tree, uint64_t vendor,
+ uint32_t device, unsigned long *sz);
+extern int fmc_free_sdb_tree(struct fmc_device *fmc);
diff --git a/include/linux/fmc.h b/include/linux/fmc.h
new file mode 100644
index 000000000..a5f0aa5c2
--- /dev/null
+++ b/include/linux/fmc.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2012 CERN (www.cern.ch)
+ * Author: Alessandro Rubini <rubini@gnudd.com>
+ *
+ * Released according to the GNU GPL, version 2 or any later version.
+ *
+ * This work is part of the White Rabbit project, a research effort led
+ * by CERN, the European Institute for Nuclear Research.
+ */
+#ifndef __LINUX_FMC_H__
+#define __LINUX_FMC_H__
+#include <linux/types.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+struct fmc_device;
+struct fmc_driver;
+
+/*
+ * This bus abstraction is developed separately from drivers, so we need
+ * to check the version of the data structures we receive.
+ */
+
+#define FMC_MAJOR 3
+#define FMC_MINOR 0
+#define FMC_VERSION ((FMC_MAJOR << 16) | FMC_MINOR)
+#define __FMC_MAJOR(x) ((x) >> 16)
+#define __FMC_MINOR(x) ((x) & 0xffff)
+
+/*
+ * The device identification, as defined by the IPMI FRU (Field Replaceable
+ * Unit) includes four different strings to describe the device. Here we
+ * only match the "Board Manufacturer" and the "Board Product Name",
+ * ignoring the "Board Serial Number" and "Board Part Number". All 4 are
+ * expected to be strings, so they are treated as zero-terminated C strings.
+ * Unspecified string (NULL) means "any", so if both are unspecified this
+ * is a catch-all driver. So null entries are allowed and we use array
+ * and length. This is unlike pci and usb that use null-terminated arrays
+ */
+struct fmc_fru_id {
+ char *manufacturer;
+ char *product_name;
+};
+
+/*
+ * If the FPGA is already programmed (think Etherbone or the second
+ * SVEC slot), we can match on SDB devices in the memory image. This
+ * match uses an array of devices that must all be present, and the
+ * match is based on vendor and device only. Further checks are expected
+ * to happen in the probe function. Zero means "any" and catch-all is allowed.
+ */
+struct fmc_sdb_one_id {
+ uint64_t vendor;
+ uint32_t device;
+};
+struct fmc_sdb_id {
+ struct fmc_sdb_one_id *cores;
+ int cores_nr;
+};
+
+struct fmc_device_id {
+ struct fmc_fru_id *fru_id;
+ int fru_id_nr;
+ struct fmc_sdb_id *sdb_id;
+ int sdb_id_nr;
+};
+
+/* This sizes the module_param_array used by generic module parameters */
+#define FMC_MAX_CARDS 32
+
+/* The driver is a pretty simple thing */
+struct fmc_driver {
+ unsigned long version;
+ struct device_driver driver;
+ int (*probe)(struct fmc_device *);
+ int (*remove)(struct fmc_device *);
+ const struct fmc_device_id id_table;
+ /* What follows is for generic module parameters */
+ int busid_n;
+ int busid_val[FMC_MAX_CARDS];
+ int gw_n;
+ char *gw_val[FMC_MAX_CARDS];
+};
+#define to_fmc_driver(x) container_of((x), struct fmc_driver, driver)
+
+/* These are the generic parameters, that drivers may instantiate */
+#define FMC_PARAM_BUSID(_d) \
+ module_param_array_named(busid, _d.busid_val, int, &_d.busid_n, 0444)
+#define FMC_PARAM_GATEWARE(_d) \
+ module_param_array_named(gateware, _d.gw_val, charp, &_d.gw_n, 0444)
+
+/*
+ * Drivers may need to configure gpio pins in the carrier. To read input
+ * (a very uncommon operation, and definitely not in the hot paths), just
+ * configure one gpio only and get 0 or 1 as retval of the config method
+ */
+struct fmc_gpio {
+ char *carrier_name; /* name or NULL for virtual pins */
+ int gpio;
+ int _gpio; /* internal use by the carrier */
+ int mode; /* GPIOF_DIR_OUT etc, from <linux/gpio.h> */
+ int irqmode; /* IRQF_TRIGGER_LOW and so on */
+};
+
+/* The numbering of gpio pins allows access to raw pins or virtual roles */
+#define FMC_GPIO_RAW(x) (x) /* 4096 of them */
+#define __FMC_GPIO_IS_RAW(x) ((x) < 0x1000)
+#define FMC_GPIO_IRQ(x) ((x) + 0x1000) /* 256 of them */
+#define FMC_GPIO_LED(x) ((x) + 0x1100) /* 256 of them */
+#define FMC_GPIO_KEY(x) ((x) + 0x1200) /* 256 of them */
+#define FMC_GPIO_TP(x) ((x) + 0x1300) /* 256 of them */
+#define FMC_GPIO_USER(x) ((x) + 0x1400) /* 256 of them */
+/* We may add SCL and SDA, or other roles if the need arises */
+
+/* GPIOF_DIR_IN etc are missing before 3.0. copy from <linux/gpio.h> */
+#ifndef GPIOF_DIR_IN
+# define GPIOF_DIR_OUT (0 << 0)
+# define GPIOF_DIR_IN (1 << 0)
+# define GPIOF_INIT_LOW (0 << 1)
+# define GPIOF_INIT_HIGH (1 << 1)
+#endif
+
+/*
+ * The operations are offered by each carrier and should make driver
+ * design completely independent of the carrier. Named GPIO pins may be
+ * the exception.
+ */
+struct fmc_operations {
+ uint32_t (*read32)(struct fmc_device *fmc, int offset);
+ void (*write32)(struct fmc_device *fmc, uint32_t value, int offset);
+ int (*validate)(struct fmc_device *fmc, struct fmc_driver *drv);
+ int (*reprogram)(struct fmc_device *f, struct fmc_driver *d, char *gw);
+ int (*irq_request)(struct fmc_device *fmc, irq_handler_t h,
+ char *name, int flags);
+ void (*irq_ack)(struct fmc_device *fmc);
+ int (*irq_free)(struct fmc_device *fmc);
+ int (*gpio_config)(struct fmc_device *fmc, struct fmc_gpio *gpio,
+ int ngpio);
+ int (*read_ee)(struct fmc_device *fmc, int pos, void *d, int l);
+ int (*write_ee)(struct fmc_device *fmc, int pos, const void *d, int l);
+};
+
+/* Prefer this helper rather than calling of fmc->reprogram directly */
+extern int fmc_reprogram(struct fmc_device *f, struct fmc_driver *d, char *gw,
+ int sdb_entry);
+
+/*
+ * The device reports all information needed to access hw.
+ *
+ * If we have eeprom_len and not contents, the core reads it.
+ * Then, parsing of identifiers is done by the core which fills fmc_fru_id..
+ * Similarly a device that must be matched based on SDB cores must
+ * fill the entry point and the core will scan the bus (FIXME: sdb match)
+ */
+struct fmc_device {
+ unsigned long version;
+ unsigned long flags;
+ struct module *owner; /* char device must pin it */
+ struct fmc_fru_id id; /* for EEPROM-based match */
+ struct fmc_operations *op; /* carrier-provided */
+ int irq; /* according to host bus. 0 == none */
+ int eeprom_len; /* Usually 8kB, may be less */
+ int eeprom_addr; /* 0x50, 0x52 etc */
+ uint8_t *eeprom; /* Full contents or leading part */
+ char *carrier_name; /* "SPEC" or similar, for special use */
+ void *carrier_data; /* "struct spec *" or equivalent */
+ __iomem void *fpga_base; /* May be NULL (Etherbone) */
+ __iomem void *slot_base; /* Set by the driver */
+ struct fmc_device **devarray; /* Allocated by the bus */
+ int slot_id; /* Index in the slot array */
+ int nr_slots; /* Number of slots in this carrier */
+ unsigned long memlen; /* Used for the char device */
+ struct device dev; /* For Linux use */
+ struct device *hwdev; /* The underlying hardware device */
+ unsigned long sdbfs_entry;
+ struct sdb_array *sdb;
+ uint32_t device_id; /* Filled by the device */
+ char *mezzanine_name; /* Defaults to ``fmc'' */
+ void *mezzanine_data;
+};
+#define to_fmc_device(x) container_of((x), struct fmc_device, dev)
+
+#define FMC_DEVICE_HAS_GOLDEN 1
+#define FMC_DEVICE_HAS_CUSTOM 2
+#define FMC_DEVICE_NO_MEZZANINE 4
+#define FMC_DEVICE_MATCH_SDB 8 /* fmc-core must scan sdb in fpga */
+
+/*
+ * If fpga_base can be used, the carrier offers no readl/writel methods, and
+ * this expands to a single, fast, I/O access.
+ */
+static inline uint32_t fmc_readl(struct fmc_device *fmc, int offset)
+{
+ if (unlikely(fmc->op->read32))
+ return fmc->op->read32(fmc, offset);
+ return readl(fmc->fpga_base + offset);
+}
+static inline void fmc_writel(struct fmc_device *fmc, uint32_t val, int off)
+{
+ if (unlikely(fmc->op->write32))
+ fmc->op->write32(fmc, val, off);
+ else
+ writel(val, fmc->fpga_base + off);
+}
+
+/* pci-like naming */
+static inline void *fmc_get_drvdata(const struct fmc_device *fmc)
+{
+ return dev_get_drvdata(&fmc->dev);
+}
+
+static inline void fmc_set_drvdata(struct fmc_device *fmc, void *data)
+{
+ dev_set_drvdata(&fmc->dev, data);
+}
+
+/* The 4 access points */
+extern int fmc_driver_register(struct fmc_driver *drv);
+extern void fmc_driver_unregister(struct fmc_driver *drv);
+extern int fmc_device_register(struct fmc_device *tdev);
+extern void fmc_device_unregister(struct fmc_device *tdev);
+
+/* Two more for device sets, all driven by the same FPGA */
+extern int fmc_device_register_n(struct fmc_device **devs, int n);
+extern void fmc_device_unregister_n(struct fmc_device **devs, int n);
+
+/* Internal cross-calls between files; not exported to other modules */
+extern int fmc_match(struct device *dev, struct device_driver *drv);
+extern int fmc_fill_id_info(struct fmc_device *fmc);
+extern void fmc_free_id_info(struct fmc_device *fmc);
+extern void fmc_dump_eeprom(const struct fmc_device *fmc);
+extern void fmc_dump_sdb(const struct fmc_device *fmc);
+
+#endif /* __LINUX_FMC_H__ */
diff --git a/include/linux/font.h b/include/linux/font.h
new file mode 100644
index 000000000..d6821769d
--- /dev/null
+++ b/include/linux/font.h
@@ -0,0 +1,60 @@
+/*
+ * font.h -- `Soft' font definitions
+ *
+ * Created 1995 by Geert Uytterhoeven
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _VIDEO_FONT_H
+#define _VIDEO_FONT_H
+
+#include <linux/types.h>
+
+struct font_desc {
+ int idx;
+ const char *name;
+ int width, height;
+ const void *data;
+ int pref;
+};
+
+#define VGA8x8_IDX 0
+#define VGA8x16_IDX 1
+#define PEARL8x8_IDX 2
+#define VGA6x11_IDX 3
+#define FONT7x14_IDX 4
+#define FONT10x18_IDX 5
+#define SUN8x16_IDX 6
+#define SUN12x22_IDX 7
+#define ACORN8x8_IDX 8
+#define MINI4x6_IDX 9
+#define FONT6x10_IDX 10
+
+extern const struct font_desc font_vga_8x8,
+ font_vga_8x16,
+ font_pearl_8x8,
+ font_vga_6x11,
+ font_7x14,
+ font_10x18,
+ font_sun_8x16,
+ font_sun_12x22,
+ font_acorn_8x8,
+ font_mini_4x6,
+ font_6x10;
+
+/* Find a font with a specific name */
+
+extern const struct font_desc *find_font(const char *name);
+
+/* Get the default font for a specific screen size */
+
+extern const struct font_desc *get_default_font(int xres, int yres,
+ u32 font_w, u32 font_h);
+
+/* Max. length for the name of a predefined font */
+#define MAX_FONT_NAME 32
+
+#endif /* _VIDEO_FONT_H */
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
new file mode 100644
index 000000000..6b7fd9cf5
--- /dev/null
+++ b/include/linux/freezer.h
@@ -0,0 +1,301 @@
+/* Freezer declarations */
+
+#ifndef FREEZER_H_INCLUDED
+#define FREEZER_H_INCLUDED
+
+#include <linux/debug_locks.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/atomic.h>
+
+#ifdef CONFIG_FREEZER
+extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
+extern bool pm_freezing; /* PM freezing in effect */
+extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
+
+/*
+ * Timeout for stopping processes
+ */
+extern unsigned int freeze_timeout_msecs;
+
+/*
+ * Check if a process has been frozen
+ */
+static inline bool frozen(struct task_struct *p)
+{
+ return p->flags & PF_FROZEN;
+}
+
+extern bool freezing_slow_path(struct task_struct *p);
+
+/*
+ * Check if there is a request to freeze a process
+ */
+static inline bool freezing(struct task_struct *p)
+{
+ if (likely(!atomic_read(&system_freezing_cnt)))
+ return false;
+ return freezing_slow_path(p);
+}
+
+/* Takes and releases task alloc lock using task_lock() */
+extern void __thaw_task(struct task_struct *t);
+
+extern bool __refrigerator(bool check_kthr_stop);
+extern int freeze_processes(void);
+extern int freeze_kernel_threads(void);
+extern void thaw_processes(void);
+extern void thaw_kernel_threads(void);
+
+/*
+ * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
+ * If try_to_freeze causes a lockdep warning it means the caller may deadlock
+ */
+static inline bool try_to_freeze_unsafe(void)
+{
+ might_sleep();
+ if (likely(!freezing(current)))
+ return false;
+ return __refrigerator(false);
+}
+
+static inline bool try_to_freeze(void)
+{
+ if (!(current->flags & PF_NOFREEZE))
+ debug_check_no_locks_held();
+ return try_to_freeze_unsafe();
+}
+
+extern bool freeze_task(struct task_struct *p);
+extern bool set_freezable(void);
+
+#ifdef CONFIG_CGROUP_FREEZER
+extern bool cgroup_freezing(struct task_struct *task);
+#else /* !CONFIG_CGROUP_FREEZER */
+static inline bool cgroup_freezing(struct task_struct *task)
+{
+ return false;
+}
+#endif /* !CONFIG_CGROUP_FREEZER */
+
+/*
+ * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
+ * calls wait_for_completion(&vfork) and reset right after it returns from this
+ * function. Next, the parent should call try_to_freeze() to freeze itself
+ * appropriately in case the child has exited before the freezing of tasks is
+ * complete. However, we don't want kernel threads to be frozen in unexpected
+ * places, so we allow them to block freeze_processes() instead or to set
+ * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
+ * parent won't really block freeze_processes(), since ____call_usermodehelper()
+ * (the child) does a little before exec/exit and it can't be frozen before
+ * waking up the parent.
+ */
+
+
+/**
+ * freezer_do_not_count - tell freezer to ignore %current
+ *
+ * Tell freezers to ignore the current task when determining whether the
+ * target frozen state is reached. IOW, the current task will be
+ * considered frozen enough by freezers.
+ *
+ * The caller shouldn't do anything which isn't allowed for a frozen task
+ * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
+ * wrap a scheduling operation and nothing much else.
+ */
+static inline void freezer_do_not_count(void)
+{
+ current->flags |= PF_FREEZER_SKIP;
+}
+
+/**
+ * freezer_count - tell freezer to stop ignoring %current
+ *
+ * Undo freezer_do_not_count(). It tells freezers that %current should be
+ * considered again and tries to freeze if freezing condition is already in
+ * effect.
+ */
+static inline void freezer_count(void)
+{
+ current->flags &= ~PF_FREEZER_SKIP;
+ /*
+ * If freezing is in progress, the following paired with smp_mb()
+ * in freezer_should_skip() ensures that either we see %true
+ * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
+ */
+ smp_mb();
+ try_to_freeze();
+}
+
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+static inline void freezer_count_unsafe(void)
+{
+ current->flags &= ~PF_FREEZER_SKIP;
+ smp_mb();
+ try_to_freeze_unsafe();
+}
+
+/**
+ * freezer_should_skip - whether to skip a task when determining frozen
+ * state is reached
+ * @p: task in quesion
+ *
+ * This function is used by freezers after establishing %true freezing() to
+ * test whether a task should be skipped when determining the target frozen
+ * state is reached. IOW, if this function returns %true, @p is considered
+ * frozen enough.
+ */
+static inline bool freezer_should_skip(struct task_struct *p)
+{
+ /*
+ * The following smp_mb() paired with the one in freezer_count()
+ * ensures that either freezer_count() sees %true freezing() or we
+ * see cleared %PF_FREEZER_SKIP and return %false. This makes it
+ * impossible for a task to slip frozen state testing after
+ * clearing %PF_FREEZER_SKIP.
+ */
+ smp_mb();
+ return p->flags & PF_FREEZER_SKIP;
+}
+
+/*
+ * These functions are intended to be used whenever you want allow a sleeping
+ * task to be frozen. Note that neither return any clear indication of
+ * whether a freeze event happened while in this function.
+ */
+
+/* Like schedule(), but should not block the freezer. */
+static inline void freezable_schedule(void)
+{
+ freezer_do_not_count();
+ schedule();
+ freezer_count();
+}
+
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+static inline void freezable_schedule_unsafe(void)
+{
+ freezer_do_not_count();
+ schedule();
+ freezer_count_unsafe();
+}
+
+/*
+ * Like freezable_schedule_timeout(), but should not block the freezer. Do not
+ * call this with locks held.
+ */
+static inline long freezable_schedule_timeout(long timeout)
+{
+ long __retval;
+ freezer_do_not_count();
+ __retval = schedule_timeout(timeout);
+ freezer_count();
+ return __retval;
+}
+
+/*
+ * Like schedule_timeout_interruptible(), but should not block the freezer. Do not
+ * call this with locks held.
+ */
+static inline long freezable_schedule_timeout_interruptible(long timeout)
+{
+ long __retval;
+ freezer_do_not_count();
+ __retval = schedule_timeout_interruptible(timeout);
+ freezer_count();
+ return __retval;
+}
+
+/* Like schedule_timeout_killable(), but should not block the freezer. */
+static inline long freezable_schedule_timeout_killable(long timeout)
+{
+ long __retval;
+ freezer_do_not_count();
+ __retval = schedule_timeout_killable(timeout);
+ freezer_count();
+ return __retval;
+}
+
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
+{
+ long __retval;
+ freezer_do_not_count();
+ __retval = schedule_timeout_killable(timeout);
+ freezer_count_unsafe();
+ return __retval;
+}
+
+/*
+ * Like schedule_hrtimeout_range(), but should not block the freezer. Do not
+ * call this with locks held.
+ */
+static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
+ unsigned long delta, const enum hrtimer_mode mode)
+{
+ int __retval;
+ freezer_do_not_count();
+ __retval = schedule_hrtimeout_range(expires, delta, mode);
+ freezer_count();
+ return __retval;
+}
+
+/*
+ * Freezer-friendly wrappers around wait_event_interruptible(),
+ * wait_event_killable() and wait_event_interruptible_timeout(), originally
+ * defined in <linux/wait.h>
+ */
+
+/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
+#define wait_event_freezekillable_unsafe(wq, condition) \
+({ \
+ int __retval; \
+ freezer_do_not_count(); \
+ __retval = wait_event_killable(wq, (condition)); \
+ freezer_count_unsafe(); \
+ __retval; \
+})
+
+#else /* !CONFIG_FREEZER */
+static inline bool frozen(struct task_struct *p) { return false; }
+static inline bool freezing(struct task_struct *p) { return false; }
+static inline void __thaw_task(struct task_struct *t) {}
+
+static inline bool __refrigerator(bool check_kthr_stop) { return false; }
+static inline int freeze_processes(void) { return -ENOSYS; }
+static inline int freeze_kernel_threads(void) { return -ENOSYS; }
+static inline void thaw_processes(void) {}
+static inline void thaw_kernel_threads(void) {}
+
+static inline bool try_to_freeze_nowarn(void) { return false; }
+static inline bool try_to_freeze(void) { return false; }
+
+static inline void freezer_do_not_count(void) {}
+static inline void freezer_count(void) {}
+static inline int freezer_should_skip(struct task_struct *p) { return 0; }
+static inline void set_freezable(void) {}
+
+#define freezable_schedule() schedule()
+
+#define freezable_schedule_unsafe() schedule()
+
+#define freezable_schedule_timeout(timeout) schedule_timeout(timeout)
+
+#define freezable_schedule_timeout_interruptible(timeout) \
+ schedule_timeout_interruptible(timeout)
+
+#define freezable_schedule_timeout_killable(timeout) \
+ schedule_timeout_killable(timeout)
+
+#define freezable_schedule_timeout_killable_unsafe(timeout) \
+ schedule_timeout_killable(timeout)
+
+#define freezable_schedule_hrtimeout_range(expires, delta, mode) \
+ schedule_hrtimeout_range(expires, delta, mode)
+
+#define wait_event_freezekillable_unsafe(wq, condition) \
+ wait_event_killable(wq, condition)
+
+#endif /* !CONFIG_FREEZER */
+
+#endif /* FREEZER_H_INCLUDED */
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
new file mode 100644
index 000000000..829326240
--- /dev/null
+++ b/include/linux/frontswap.h
@@ -0,0 +1,107 @@
+#ifndef _LINUX_FRONTSWAP_H
+#define _LINUX_FRONTSWAP_H
+
+#include <linux/swap.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
+
+struct frontswap_ops {
+ void (*init)(unsigned);
+ int (*store)(unsigned, pgoff_t, struct page *);
+ int (*load)(unsigned, pgoff_t, struct page *);
+ void (*invalidate_page)(unsigned, pgoff_t);
+ void (*invalidate_area)(unsigned);
+};
+
+extern bool frontswap_enabled;
+extern struct frontswap_ops *
+ frontswap_register_ops(struct frontswap_ops *ops);
+extern void frontswap_shrink(unsigned long);
+extern unsigned long frontswap_curr_pages(void);
+extern void frontswap_writethrough(bool);
+#define FRONTSWAP_HAS_EXCLUSIVE_GETS
+extern void frontswap_tmem_exclusive_gets(bool);
+
+extern bool __frontswap_test(struct swap_info_struct *, pgoff_t);
+extern void __frontswap_init(unsigned type, unsigned long *map);
+extern int __frontswap_store(struct page *page);
+extern int __frontswap_load(struct page *page);
+extern void __frontswap_invalidate_page(unsigned, pgoff_t);
+extern void __frontswap_invalidate_area(unsigned);
+
+#ifdef CONFIG_FRONTSWAP
+#define frontswap_enabled (1)
+
+static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
+{
+ return __frontswap_test(sis, offset);
+}
+
+static inline void frontswap_map_set(struct swap_info_struct *p,
+ unsigned long *map)
+{
+ p->frontswap_map = map;
+}
+
+static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
+{
+ return p->frontswap_map;
+}
+#else
+/* all inline routines become no-ops and all externs are ignored */
+
+#define frontswap_enabled (0)
+
+static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
+{
+ return false;
+}
+
+static inline void frontswap_map_set(struct swap_info_struct *p,
+ unsigned long *map)
+{
+}
+
+static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
+{
+ return NULL;
+}
+#endif
+
+static inline int frontswap_store(struct page *page)
+{
+ int ret = -1;
+
+ if (frontswap_enabled)
+ ret = __frontswap_store(page);
+ return ret;
+}
+
+static inline int frontswap_load(struct page *page)
+{
+ int ret = -1;
+
+ if (frontswap_enabled)
+ ret = __frontswap_load(page);
+ return ret;
+}
+
+static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset)
+{
+ if (frontswap_enabled)
+ __frontswap_invalidate_page(type, offset);
+}
+
+static inline void frontswap_invalidate_area(unsigned type)
+{
+ if (frontswap_enabled)
+ __frontswap_invalidate_area(type);
+}
+
+static inline void frontswap_init(unsigned type, unsigned long *map)
+{
+ if (frontswap_enabled)
+ __frontswap_init(type, map);
+}
+
+#endif /* _LINUX_FRONTSWAP_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
new file mode 100644
index 000000000..5da04c852
--- /dev/null
+++ b/include/linux/fs.h
@@ -0,0 +1,3008 @@
+#ifndef _LINUX_FS_H
+#define _LINUX_FS_H
+
+
+#include <linux/linkage.h>
+#include <linux/wait.h>
+#include <linux/kdev_t.h>
+#include <linux/dcache.h>
+#include <linux/path.h>
+#include <linux/stat.h>
+#include <linux/cache.h>
+#include <linux/list.h>
+#include <linux/list_lru.h>
+#include <linux/llist.h>
+#include <linux/radix-tree.h>
+#include <linux/rbtree.h>
+#include <linux/init.h>
+#include <linux/pid.h>
+#include <linux/bug.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/capability.h>
+#include <linux/semaphore.h>
+#include <linux/fiemap.h>
+#include <linux/rculist_bl.h>
+#include <linux/atomic.h>
+#include <linux/shrinker.h>
+#include <linux/migrate_mode.h>
+#include <linux/uidgid.h>
+#include <linux/lockdep.h>
+#include <linux/percpu-rwsem.h>
+#include <linux/blk_types.h>
+
+#include <asm/byteorder.h>
+#include <uapi/linux/fs.h>
+
+struct backing_dev_info;
+struct export_operations;
+struct hd_geometry;
+struct iovec;
+struct nameidata;
+struct kiocb;
+struct kobject;
+struct pipe_inode_info;
+struct poll_table_struct;
+struct kstatfs;
+struct vm_area_struct;
+struct vfsmount;
+struct cred;
+struct swap_info_struct;
+struct seq_file;
+struct workqueue_struct;
+struct iov_iter;
+struct vm_fault;
+
+extern void __init inode_init(void);
+extern void __init inode_init_early(void);
+extern void __init files_init(unsigned long);
+
+extern struct files_stat_struct files_stat;
+extern unsigned long get_max_files(void);
+extern int sysctl_nr_open;
+extern struct inodes_stat_t inodes_stat;
+extern int leases_enable, lease_break_time;
+extern int sysctl_protected_symlinks;
+extern int sysctl_protected_hardlinks;
+
+struct buffer_head;
+typedef int (get_block_t)(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh_result, int create);
+typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
+ ssize_t bytes, void *private);
+
+#define MAY_EXEC 0x00000001
+#define MAY_WRITE 0x00000002
+#define MAY_READ 0x00000004
+#define MAY_APPEND 0x00000008
+#define MAY_ACCESS 0x00000010
+#define MAY_OPEN 0x00000020
+#define MAY_CHDIR 0x00000040
+/* called from RCU mode, don't block */
+#define MAY_NOT_BLOCK 0x00000080
+
+/*
+ * flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond
+ * to O_WRONLY and O_RDWR via the strange trick in __dentry_open()
+ */
+
+/* file is open for reading */
+#define FMODE_READ ((__force fmode_t)0x1)
+/* file is open for writing */
+#define FMODE_WRITE ((__force fmode_t)0x2)
+/* file is seekable */
+#define FMODE_LSEEK ((__force fmode_t)0x4)
+/* file can be accessed using pread */
+#define FMODE_PREAD ((__force fmode_t)0x8)
+/* file can be accessed using pwrite */
+#define FMODE_PWRITE ((__force fmode_t)0x10)
+/* File is opened for execution with sys_execve / sys_uselib */
+#define FMODE_EXEC ((__force fmode_t)0x20)
+/* File is opened with O_NDELAY (only set for block devices) */
+#define FMODE_NDELAY ((__force fmode_t)0x40)
+/* File is opened with O_EXCL (only set for block devices) */
+#define FMODE_EXCL ((__force fmode_t)0x80)
+/* File is opened using open(.., 3, ..) and is writeable only for ioctls
+ (specialy hack for floppy.c) */
+#define FMODE_WRITE_IOCTL ((__force fmode_t)0x100)
+/* 32bit hashes as llseek() offset (for directories) */
+#define FMODE_32BITHASH ((__force fmode_t)0x200)
+/* 64bit hashes as llseek() offset (for directories) */
+#define FMODE_64BITHASH ((__force fmode_t)0x400)
+
+/*
+ * Don't update ctime and mtime.
+ *
+ * Currently a special hack for the XFS open_by_handle ioctl, but we'll
+ * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon.
+ */
+#define FMODE_NOCMTIME ((__force fmode_t)0x800)
+
+/* Expect random access pattern */
+#define FMODE_RANDOM ((__force fmode_t)0x1000)
+
+/* File is huge (eg. /dev/kmem): treat loff_t as unsigned */
+#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000)
+
+/* File is opened with O_PATH; almost nothing can be done with it */
+#define FMODE_PATH ((__force fmode_t)0x4000)
+
+/* File needs atomic accesses to f_pos */
+#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000)
+/* Write access to underlying fs */
+#define FMODE_WRITER ((__force fmode_t)0x10000)
+/* Has read method(s) */
+#define FMODE_CAN_READ ((__force fmode_t)0x20000)
+/* Has write method(s) */
+#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
+
+/* File was opened by fanotify and shouldn't generate fanotify events */
+#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
+
+/*
+ * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
+ * that indicates that they should check the contents of the iovec are
+ * valid, but not check the memory that the iovec elements
+ * points too.
+ */
+#define CHECK_IOVEC_ONLY -1
+
+/*
+ * The below are the various read and write types that we support. Some of
+ * them include behavioral modifiers that send information down to the
+ * block layer and IO scheduler. Terminology:
+ *
+ * The block layer uses device plugging to defer IO a little bit, in
+ * the hope that we will see more IO very shortly. This increases
+ * coalescing of adjacent IO and thus reduces the number of IOs we
+ * have to send to the device. It also allows for better queuing,
+ * if the IO isn't mergeable. If the caller is going to be waiting
+ * for the IO, then he must ensure that the device is unplugged so
+ * that the IO is dispatched to the driver.
+ *
+ * All IO is handled async in Linux. This is fine for background
+ * writes, but for reads or writes that someone waits for completion
+ * on, we want to notify the block layer and IO scheduler so that they
+ * know about it. That allows them to make better scheduling
+ * decisions. So when the below references 'sync' and 'async', it
+ * is referencing this priority hint.
+ *
+ * With that in mind, the available types are:
+ *
+ * READ A normal read operation. Device will be plugged.
+ * READ_SYNC A synchronous read. Device is not plugged, caller can
+ * immediately wait on this read without caring about
+ * unplugging.
+ * READA Used for read-ahead operations. Lower priority, and the
+ * block layer could (in theory) choose to ignore this
+ * request if it runs into resource problems.
+ * WRITE A normal async write. Device will be plugged.
+ * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down
+ * the hint that someone will be waiting on this IO
+ * shortly. The write equivalent of READ_SYNC.
+ * WRITE_ODIRECT Special case write for O_DIRECT only.
+ * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush.
+ * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on
+ * non-volatile media on completion.
+ * WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded
+ * by a cache flush and data is guaranteed to be on
+ * non-volatile media on completion.
+ *
+ */
+#define RW_MASK REQ_WRITE
+#define RWA_MASK REQ_RAHEAD
+
+#define READ 0
+#define WRITE RW_MASK
+#define READA RWA_MASK
+
+#define READ_SYNC (READ | REQ_SYNC)
+#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE)
+#define WRITE_ODIRECT (WRITE | REQ_SYNC)
+#define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH)
+#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
+#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
+
+/*
+ * Attribute flags. These should be or-ed together to figure out what
+ * has been changed!
+ */
+#define ATTR_MODE (1 << 0)
+#define ATTR_UID (1 << 1)
+#define ATTR_GID (1 << 2)
+#define ATTR_SIZE (1 << 3)
+#define ATTR_ATIME (1 << 4)
+#define ATTR_MTIME (1 << 5)
+#define ATTR_CTIME (1 << 6)
+#define ATTR_ATIME_SET (1 << 7)
+#define ATTR_MTIME_SET (1 << 8)
+#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */
+#define ATTR_ATTR_FLAG (1 << 10)
+#define ATTR_KILL_SUID (1 << 11)
+#define ATTR_KILL_SGID (1 << 12)
+#define ATTR_FILE (1 << 13)
+#define ATTR_KILL_PRIV (1 << 14)
+#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */
+#define ATTR_TIMES_SET (1 << 16)
+
+/*
+ * Whiteout is represented by a char device. The following constants define the
+ * mode and device number to use.
+ */
+#define WHITEOUT_MODE 0
+#define WHITEOUT_DEV 0
+
+/*
+ * This is the Inode Attributes structure, used for notify_change(). It
+ * uses the above definitions as flags, to know which values have changed.
+ * Also, in this manner, a Filesystem can look at only the values it cares
+ * about. Basically, these are the attributes that the VFS layer can
+ * request to change from the FS layer.
+ *
+ * Derek Atkins <warlord@MIT.EDU> 94-10-20
+ */
+struct iattr {
+ unsigned int ia_valid;
+ umode_t ia_mode;
+ kuid_t ia_uid;
+ kgid_t ia_gid;
+ loff_t ia_size;
+ struct timespec ia_atime;
+ struct timespec ia_mtime;
+ struct timespec ia_ctime;
+
+ /*
+ * Not an attribute, but an auxiliary info for filesystems wanting to
+ * implement an ftruncate() like method. NOTE: filesystem should
+ * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL).
+ */
+ struct file *ia_file;
+};
+
+/*
+ * Includes for diskquotas.
+ */
+#include <linux/quota.h>
+
+/*
+ * Maximum number of layers of fs stack. Needs to be limited to
+ * prevent kernel stack overflow
+ */
+#define FILESYSTEM_MAX_STACK_DEPTH 2
+
+/**
+ * enum positive_aop_returns - aop return codes with specific semantics
+ *
+ * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has
+ * completed, that the page is still locked, and
+ * should be considered active. The VM uses this hint
+ * to return the page to the active list -- it won't
+ * be a candidate for writeback again in the near
+ * future. Other callers must be careful to unlock
+ * the page if they get this return. Returned by
+ * writepage();
+ *
+ * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has
+ * unlocked it and the page might have been truncated.
+ * The caller should back up to acquiring a new page and
+ * trying again. The aop will be taking reasonable
+ * precautions not to livelock. If the caller held a page
+ * reference, it should drop it before retrying. Returned
+ * by readpage().
+ *
+ * address_space_operation functions return these large constants to indicate
+ * special semantics to the caller. These are much larger than the bytes in a
+ * page to allow for functions that return the number of bytes operated on in a
+ * given page.
+ */
+
+enum positive_aop_returns {
+ AOP_WRITEPAGE_ACTIVATE = 0x80000,
+ AOP_TRUNCATED_PAGE = 0x80001,
+};
+
+#define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */
+#define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */
+#define AOP_FLAG_NOFS 0x0004 /* used by filesystem to direct
+ * helper code (eg buffer layer)
+ * to clear GFP_FS from alloc */
+
+/*
+ * oh the beauties of C type declarations.
+ */
+struct page;
+struct address_space;
+struct writeback_control;
+
+#define IOCB_EVENTFD (1 << 0)
+#define IOCB_APPEND (1 << 1)
+#define IOCB_DIRECT (1 << 2)
+
+struct kiocb {
+ struct file *ki_filp;
+ loff_t ki_pos;
+ void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
+ void *private;
+ int ki_flags;
+};
+
+static inline bool is_sync_kiocb(struct kiocb *kiocb)
+{
+ return kiocb->ki_complete == NULL;
+}
+
+static inline int iocb_flags(struct file *file);
+
+static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
+{
+ *kiocb = (struct kiocb) {
+ .ki_filp = filp,
+ .ki_flags = iocb_flags(filp),
+ };
+}
+
+/*
+ * "descriptor" for what we're up to with a read.
+ * This allows us to use the same read code yet
+ * have multiple different users of the data that
+ * we read from a file.
+ *
+ * The simplest case just copies the data to user
+ * mode.
+ */
+typedef struct {
+ size_t written;
+ size_t count;
+ union {
+ char __user *buf;
+ void *data;
+ } arg;
+ int error;
+} read_descriptor_t;
+
+typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
+ unsigned long, unsigned long);
+
+struct address_space_operations {
+ int (*writepage)(struct page *page, struct writeback_control *wbc);
+ int (*readpage)(struct file *, struct page *);
+
+ /* Write back some dirty pages from this mapping. */
+ int (*writepages)(struct address_space *, struct writeback_control *);
+
+ /* Set a page dirty. Return true if this dirtied it */
+ int (*set_page_dirty)(struct page *page);
+
+ int (*readpages)(struct file *filp, struct address_space *mapping,
+ struct list_head *pages, unsigned nr_pages);
+
+ int (*write_begin)(struct file *, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata);
+ int (*write_end)(struct file *, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata);
+
+ /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
+ sector_t (*bmap)(struct address_space *, sector_t);
+ void (*invalidatepage) (struct page *, unsigned int, unsigned int);
+ int (*releasepage) (struct page *, gfp_t);
+ void (*freepage)(struct page *);
+ ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
+ /*
+ * migrate the contents of a page to the specified target. If
+ * migrate_mode is MIGRATE_ASYNC, it must not block.
+ */
+ int (*migratepage) (struct address_space *,
+ struct page *, struct page *, enum migrate_mode);
+ int (*launder_page) (struct page *);
+ int (*is_partially_uptodate) (struct page *, unsigned long,
+ unsigned long);
+ void (*is_dirty_writeback) (struct page *, bool *, bool *);
+ int (*error_remove_page)(struct address_space *, struct page *);
+
+ /* swapfile support */
+ int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
+ sector_t *span);
+ void (*swap_deactivate)(struct file *file);
+};
+
+extern const struct address_space_operations empty_aops;
+
+/*
+ * pagecache_write_begin/pagecache_write_end must be used by general code
+ * to write into the pagecache.
+ */
+int pagecache_write_begin(struct file *, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata);
+
+int pagecache_write_end(struct file *, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata);
+
+struct address_space {
+ struct inode *host; /* owner: inode, block_device */
+ struct radix_tree_root page_tree; /* radix tree of all pages */
+ spinlock_t tree_lock; /* and lock protecting it */
+ atomic_t i_mmap_writable;/* count VM_SHARED mappings */
+ struct rb_root i_mmap; /* tree of private and shared mappings */
+ struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */
+ /* Protected by tree_lock together with the radix tree */
+ unsigned long nrpages; /* number of total pages */
+ unsigned long nrshadows; /* number of shadow entries */
+ pgoff_t writeback_index;/* writeback starts here */
+ const struct address_space_operations *a_ops; /* methods */
+ unsigned long flags; /* error bits/gfp mask */
+ spinlock_t private_lock; /* for use by the address_space */
+ struct list_head private_list; /* ditto */
+ void *private_data; /* ditto */
+} __attribute__((aligned(sizeof(long))));
+ /*
+ * On most architectures that alignment is already the case; but
+ * must be enforced here for CRIS, to let the least significant bit
+ * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
+ */
+struct request_queue;
+
+struct block_device {
+ dev_t bd_dev; /* not a kdev_t - it's a search key */
+ int bd_openers;
+ struct inode * bd_inode; /* will die */
+ struct super_block * bd_super;
+ struct mutex bd_mutex; /* open/close mutex */
+ struct list_head bd_inodes;
+ void * bd_claiming;
+ void * bd_holder;
+ int bd_holders;
+ bool bd_write_holder;
+#ifdef CONFIG_SYSFS
+ struct list_head bd_holder_disks;
+#endif
+ struct block_device * bd_contains;
+ unsigned bd_block_size;
+ struct hd_struct * bd_part;
+ /* number of times partitions within this device have been opened. */
+ unsigned bd_part_count;
+ int bd_invalidated;
+ struct gendisk * bd_disk;
+ struct request_queue * bd_queue;
+ struct list_head bd_list;
+ /*
+ * Private data. You must have bd_claim'ed the block_device
+ * to use this. NOTE: bd_claim allows an owner to claim
+ * the same device multiple times, the owner must take special
+ * care to not mess up bd_private for that case.
+ */
+ unsigned long bd_private;
+
+ /* The counter of freeze processes */
+ int bd_fsfreeze_count;
+ /* Mutex for freeze */
+ struct mutex bd_fsfreeze_mutex;
+};
+
+/*
+ * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
+ * radix trees
+ */
+#define PAGECACHE_TAG_DIRTY 0
+#define PAGECACHE_TAG_WRITEBACK 1
+#define PAGECACHE_TAG_TOWRITE 2
+
+int mapping_tagged(struct address_space *mapping, int tag);
+
+static inline void i_mmap_lock_write(struct address_space *mapping)
+{
+ down_write(&mapping->i_mmap_rwsem);
+}
+
+static inline void i_mmap_unlock_write(struct address_space *mapping)
+{
+ up_write(&mapping->i_mmap_rwsem);
+}
+
+static inline void i_mmap_lock_read(struct address_space *mapping)
+{
+ down_read(&mapping->i_mmap_rwsem);
+}
+
+static inline void i_mmap_unlock_read(struct address_space *mapping)
+{
+ up_read(&mapping->i_mmap_rwsem);
+}
+
+/*
+ * Might pages of this file be mapped into userspace?
+ */
+static inline int mapping_mapped(struct address_space *mapping)
+{
+ return !RB_EMPTY_ROOT(&mapping->i_mmap);
+}
+
+/*
+ * Might pages of this file have been modified in userspace?
+ * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff
+ * marks vma as VM_SHARED if it is shared, and the file was opened for
+ * writing i.e. vma may be mprotected writable even if now readonly.
+ *
+ * If i_mmap_writable is negative, no new writable mappings are allowed. You
+ * can only deny writable mappings, if none exists right now.
+ */
+static inline int mapping_writably_mapped(struct address_space *mapping)
+{
+ return atomic_read(&mapping->i_mmap_writable) > 0;
+}
+
+static inline int mapping_map_writable(struct address_space *mapping)
+{
+ return atomic_inc_unless_negative(&mapping->i_mmap_writable) ?
+ 0 : -EPERM;
+}
+
+static inline void mapping_unmap_writable(struct address_space *mapping)
+{
+ atomic_dec(&mapping->i_mmap_writable);
+}
+
+static inline int mapping_deny_writable(struct address_space *mapping)
+{
+ return atomic_dec_unless_positive(&mapping->i_mmap_writable) ?
+ 0 : -EBUSY;
+}
+
+static inline void mapping_allow_writable(struct address_space *mapping)
+{
+ atomic_inc(&mapping->i_mmap_writable);
+}
+
+/*
+ * Use sequence counter to get consistent i_size on 32-bit processors.
+ */
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+#include <linux/seqlock.h>
+#define __NEED_I_SIZE_ORDERED
+#define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount)
+#else
+#define i_size_ordered_init(inode) do { } while (0)
+#endif
+
+struct posix_acl;
+#define ACL_NOT_CACHED ((void *)(-1))
+
+#define IOP_FASTPERM 0x0001
+#define IOP_LOOKUP 0x0002
+#define IOP_NOFOLLOW 0x0004
+
+/*
+ * Keep mostly read-only and often accessed (especially for
+ * the RCU path lookup and 'stat' data) fields at the beginning
+ * of the 'struct inode'
+ */
+struct inode {
+ umode_t i_mode;
+ unsigned short i_opflags;
+ kuid_t i_uid;
+ kgid_t i_gid;
+ unsigned int i_flags;
+
+#ifdef CONFIG_FS_POSIX_ACL
+ struct posix_acl *i_acl;
+ struct posix_acl *i_default_acl;
+#endif
+
+ const struct inode_operations *i_op;
+ struct super_block *i_sb;
+ struct address_space *i_mapping;
+
+#ifdef CONFIG_SECURITY
+ void *i_security;
+#endif
+
+ /* Stat data, not accessed from path walking */
+ unsigned long i_ino;
+ /*
+ * Filesystems may only read i_nlink directly. They shall use the
+ * following functions for modification:
+ *
+ * (set|clear|inc|drop)_nlink
+ * inode_(inc|dec)_link_count
+ */
+ union {
+ const unsigned int i_nlink;
+ unsigned int __i_nlink;
+ };
+ dev_t i_rdev;
+ loff_t i_size;
+ struct timespec i_atime;
+ struct timespec i_mtime;
+ struct timespec i_ctime;
+ spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
+ unsigned short i_bytes;
+ unsigned int i_blkbits;
+ blkcnt_t i_blocks;
+
+#ifdef __NEED_I_SIZE_ORDERED
+ seqcount_t i_size_seqcount;
+#endif
+
+ /* Misc */
+ unsigned long i_state;
+ struct mutex i_mutex;
+
+ unsigned long dirtied_when; /* jiffies of first dirtying */
+ unsigned long dirtied_time_when;
+
+ struct hlist_node i_hash;
+ struct list_head i_wb_list; /* backing dev IO list */
+ struct list_head i_lru; /* inode LRU list */
+ struct list_head i_sb_list;
+ union {
+ struct hlist_head i_dentry;
+ struct rcu_head i_rcu;
+ };
+ u64 i_version;
+ atomic_t i_count;
+ atomic_t i_dio_count;
+ atomic_t i_writecount;
+#ifdef CONFIG_IMA
+ atomic_t i_readcount; /* struct files open RO */
+#endif
+ const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
+ struct file_lock_context *i_flctx;
+ struct address_space i_data;
+ struct list_head i_devices;
+ union {
+ struct pipe_inode_info *i_pipe;
+ struct block_device *i_bdev;
+ struct cdev *i_cdev;
+ };
+
+ __u32 i_generation;
+
+#ifdef CONFIG_FSNOTIFY
+ __u32 i_fsnotify_mask; /* all events this inode cares about */
+ struct hlist_head i_fsnotify_marks;
+#endif
+
+ void *i_private; /* fs or device private pointer */
+};
+
+static inline int inode_unhashed(struct inode *inode)
+{
+ return hlist_unhashed(&inode->i_hash);
+}
+
+/*
+ * inode->i_mutex nesting subclasses for the lock validator:
+ *
+ * 0: the object of the current VFS operation
+ * 1: parent
+ * 2: child/target
+ * 3: xattr
+ * 4: second non-directory
+ * 5: second parent (when locking independent directories in rename)
+ *
+ * I_MUTEX_NONDIR2 is for certain operations (such as rename) which lock two
+ * non-directories at once.
+ *
+ * The locking order between these classes is
+ * parent[2] -> child -> grandchild -> normal -> xattr -> second non-directory
+ */
+enum inode_i_mutex_lock_class
+{
+ I_MUTEX_NORMAL,
+ I_MUTEX_PARENT,
+ I_MUTEX_CHILD,
+ I_MUTEX_XATTR,
+ I_MUTEX_NONDIR2,
+ I_MUTEX_PARENT2,
+};
+
+void lock_two_nondirectories(struct inode *, struct inode*);
+void unlock_two_nondirectories(struct inode *, struct inode*);
+
+/*
+ * NOTE: in a 32bit arch with a preemptable kernel and
+ * an UP compile the i_size_read/write must be atomic
+ * with respect to the local cpu (unlike with preempt disabled),
+ * but they don't need to be atomic with respect to other cpus like in
+ * true SMP (so they need either to either locally disable irq around
+ * the read or for example on x86 they can be still implemented as a
+ * cmpxchg8b without the need of the lock prefix). For SMP compiles
+ * and 64bit archs it makes no difference if preempt is enabled or not.
+ */
+static inline loff_t i_size_read(const struct inode *inode)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ loff_t i_size;
+ unsigned int seq;
+
+ do {
+ seq = read_seqcount_begin(&inode->i_size_seqcount);
+ i_size = inode->i_size;
+ } while (read_seqcount_retry(&inode->i_size_seqcount, seq));
+ return i_size;
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+ loff_t i_size;
+
+ preempt_disable();
+ i_size = inode->i_size;
+ preempt_enable();
+ return i_size;
+#else
+ return inode->i_size;
+#endif
+}
+
+/*
+ * NOTE: unlike i_size_read(), i_size_write() does need locking around it
+ * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount
+ * can be lost, resulting in subsequent i_size_read() calls spinning forever.
+ */
+static inline void i_size_write(struct inode *inode, loff_t i_size)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ preempt_disable();
+ write_seqcount_begin(&inode->i_size_seqcount);
+ inode->i_size = i_size;
+ write_seqcount_end(&inode->i_size_seqcount);
+ preempt_enable();
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+ preempt_disable();
+ inode->i_size = i_size;
+ preempt_enable();
+#else
+ inode->i_size = i_size;
+#endif
+}
+
+/* Helper functions so that in most cases filesystems will
+ * not need to deal directly with kuid_t and kgid_t and can
+ * instead deal with the raw numeric values that are stored
+ * in the filesystem.
+ */
+static inline uid_t i_uid_read(const struct inode *inode)
+{
+ return from_kuid(&init_user_ns, inode->i_uid);
+}
+
+static inline gid_t i_gid_read(const struct inode *inode)
+{
+ return from_kgid(&init_user_ns, inode->i_gid);
+}
+
+static inline void i_uid_write(struct inode *inode, uid_t uid)
+{
+ inode->i_uid = make_kuid(&init_user_ns, uid);
+}
+
+static inline void i_gid_write(struct inode *inode, gid_t gid)
+{
+ inode->i_gid = make_kgid(&init_user_ns, gid);
+}
+
+static inline unsigned iminor(const struct inode *inode)
+{
+ return MINOR(inode->i_rdev);
+}
+
+static inline unsigned imajor(const struct inode *inode)
+{
+ return MAJOR(inode->i_rdev);
+}
+
+extern struct block_device *I_BDEV(struct inode *inode);
+
+struct fown_struct {
+ rwlock_t lock; /* protects pid, uid, euid fields */
+ struct pid *pid; /* pid or -pgrp where SIGIO should be sent */
+ enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */
+ kuid_t uid, euid; /* uid/euid of process setting the owner */
+ int signum; /* posix.1b rt signal to be delivered on IO */
+};
+
+/*
+ * Track a single file's readahead state
+ */
+struct file_ra_state {
+ pgoff_t start; /* where readahead started */
+ unsigned int size; /* # of readahead pages */
+ unsigned int async_size; /* do asynchronous readahead when
+ there are only # of pages ahead */
+
+ unsigned int ra_pages; /* Maximum readahead window */
+ unsigned int mmap_miss; /* Cache miss stat for mmap accesses */
+ loff_t prev_pos; /* Cache last read() position */
+};
+
+/*
+ * Check if @index falls in the readahead windows.
+ */
+static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
+{
+ return (index >= ra->start &&
+ index < ra->start + ra->size);
+}
+
+struct file {
+ union {
+ struct llist_node fu_llist;
+ struct rcu_head fu_rcuhead;
+ } f_u;
+ struct path f_path;
+ struct inode *f_inode; /* cached value */
+ const struct file_operations *f_op;
+
+ /*
+ * Protects f_ep_links, f_flags.
+ * Must not be taken from IRQ context.
+ */
+ spinlock_t f_lock;
+ atomic_long_t f_count;
+ unsigned int f_flags;
+ fmode_t f_mode;
+ struct mutex f_pos_lock;
+ loff_t f_pos;
+ struct fown_struct f_owner;
+ const struct cred *f_cred;
+ struct file_ra_state f_ra;
+
+ u64 f_version;
+#ifdef CONFIG_SECURITY
+ void *f_security;
+#endif
+ /* needed for tty driver, and maybe others */
+ void *private_data;
+
+#ifdef CONFIG_EPOLL
+ /* Used by fs/eventpoll.c to link all the hooks to this file */
+ struct list_head f_ep_links;
+ struct list_head f_tfile_llink;
+#endif /* #ifdef CONFIG_EPOLL */
+ struct address_space *f_mapping;
+} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
+
+struct file_handle {
+ __u32 handle_bytes;
+ int handle_type;
+ /* file identifier */
+ unsigned char f_handle[0];
+};
+
+static inline struct file *get_file(struct file *f)
+{
+ atomic_long_inc(&f->f_count);
+ return f;
+}
+#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count)
+#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
+#define file_count(x) atomic_long_read(&(x)->f_count)
+
+#define MAX_NON_LFS ((1UL<<31) - 1)
+
+/* Page cache limit. The filesystems should put that into their s_maxbytes
+ limits, otherwise bad things can happen in VM. */
+#if BITS_PER_LONG==32
+#define MAX_LFS_FILESIZE (((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
+#elif BITS_PER_LONG==64
+#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
+#endif
+
+#define FL_POSIX 1
+#define FL_FLOCK 2
+#define FL_DELEG 4 /* NFSv4 delegation */
+#define FL_ACCESS 8 /* not trying to lock, just looking */
+#define FL_EXISTS 16 /* when unlocking, test for existence */
+#define FL_LEASE 32 /* lease held on this file */
+#define FL_CLOSE 64 /* unlock on close */
+#define FL_SLEEP 128 /* A blocking lock */
+#define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */
+#define FL_UNLOCK_PENDING 512 /* Lease is being broken */
+#define FL_OFDLCK 1024 /* lock is "owned" by struct file */
+#define FL_LAYOUT 2048 /* outstanding pNFS layout */
+
+/*
+ * Special return value from posix_lock_file() and vfs_lock_file() for
+ * asynchronous locking.
+ */
+#define FILE_LOCK_DEFERRED 1
+
+/* legacy typedef, should eventually be removed */
+typedef void *fl_owner_t;
+
+struct file_lock;
+
+struct file_lock_operations {
+ void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
+ void (*fl_release_private)(struct file_lock *);
+};
+
+struct lock_manager_operations {
+ int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
+ unsigned long (*lm_owner_key)(struct file_lock *);
+ fl_owner_t (*lm_get_owner)(fl_owner_t);
+ void (*lm_put_owner)(fl_owner_t);
+ void (*lm_notify)(struct file_lock *); /* unblock callback */
+ int (*lm_grant)(struct file_lock *, int);
+ bool (*lm_break)(struct file_lock *);
+ int (*lm_change)(struct file_lock *, int, struct list_head *);
+ void (*lm_setup)(struct file_lock *, void **);
+};
+
+struct lock_manager {
+ struct list_head list;
+};
+
+struct net;
+void locks_start_grace(struct net *, struct lock_manager *);
+void locks_end_grace(struct lock_manager *);
+int locks_in_grace(struct net *);
+
+/* that will die - we need it for nfs_lock_info */
+#include <linux/nfs_fs_i.h>
+
+/*
+ * struct file_lock represents a generic "file lock". It's used to represent
+ * POSIX byte range locks, BSD (flock) locks, and leases. It's important to
+ * note that the same struct is used to represent both a request for a lock and
+ * the lock itself, but the same object is never used for both.
+ *
+ * FIXME: should we create a separate "struct lock_request" to help distinguish
+ * these two uses?
+ *
+ * The varous i_flctx lists are ordered by:
+ *
+ * 1) lock owner
+ * 2) lock range start
+ * 3) lock range end
+ *
+ * Obviously, the last two criteria only matter for POSIX locks.
+ */
+struct file_lock {
+ struct file_lock *fl_next; /* singly linked list for this inode */
+ struct list_head fl_list; /* link into file_lock_context */
+ struct hlist_node fl_link; /* node in global lists */
+ struct list_head fl_block; /* circular list of blocked processes */
+ fl_owner_t fl_owner;
+ unsigned int fl_flags;
+ unsigned char fl_type;
+ unsigned int fl_pid;
+ int fl_link_cpu; /* what cpu's list is this on? */
+ struct pid *fl_nspid;
+ wait_queue_head_t fl_wait;
+ struct file *fl_file;
+ loff_t fl_start;
+ loff_t fl_end;
+
+ struct fasync_struct * fl_fasync; /* for lease break notifications */
+ /* for lease breaks: */
+ unsigned long fl_break_time;
+ unsigned long fl_downgrade_time;
+
+ const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
+ const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
+ union {
+ struct nfs_lock_info nfs_fl;
+ struct nfs4_lock_info nfs4_fl;
+ struct {
+ struct list_head link; /* link in AFS vnode's pending_locks list */
+ int state; /* state of grant or error if -ve */
+ } afs;
+ } fl_u;
+};
+
+struct file_lock_context {
+ spinlock_t flc_lock;
+ struct list_head flc_flock;
+ struct list_head flc_posix;
+ struct list_head flc_lease;
+};
+
+/* The following constant reflects the upper bound of the file/locking space */
+#ifndef OFFSET_MAX
+#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1)))
+#define OFFSET_MAX INT_LIMIT(loff_t)
+#define OFFT_OFFSET_MAX INT_LIMIT(off_t)
+#endif
+
+#include <linux/fcntl.h>
+
+extern void send_sigio(struct fown_struct *fown, int fd, int band);
+
+#ifdef CONFIG_FILE_LOCKING
+extern int fcntl_getlk(struct file *, unsigned int, struct flock __user *);
+extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
+ struct flock __user *);
+
+#if BITS_PER_LONG == 32
+extern int fcntl_getlk64(struct file *, unsigned int, struct flock64 __user *);
+extern int fcntl_setlk64(unsigned int, struct file *, unsigned int,
+ struct flock64 __user *);
+#endif
+
+extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
+extern int fcntl_getlease(struct file *filp);
+
+/* fs/locks.c */
+void locks_free_lock_context(struct file_lock_context *ctx);
+void locks_free_lock(struct file_lock *fl);
+extern void locks_init_lock(struct file_lock *);
+extern struct file_lock * locks_alloc_lock(void);
+extern void locks_copy_lock(struct file_lock *, struct file_lock *);
+extern void locks_copy_conflock(struct file_lock *, struct file_lock *);
+extern void locks_remove_posix(struct file *, fl_owner_t);
+extern void locks_remove_file(struct file *);
+extern void locks_release_private(struct file_lock *);
+extern void posix_test_lock(struct file *, struct file_lock *);
+extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
+extern int posix_lock_file_wait(struct file *, struct file_lock *);
+extern int posix_unblock_lock(struct file_lock *);
+extern int vfs_test_lock(struct file *, struct file_lock *);
+extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
+extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
+extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
+extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
+extern void lease_get_mtime(struct inode *, struct timespec *time);
+extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
+extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
+extern int lease_modify(struct file_lock *, int, struct list_head *);
+struct files_struct;
+extern void show_fd_locks(struct seq_file *f,
+ struct file *filp, struct files_struct *files);
+#else /* !CONFIG_FILE_LOCKING */
+static inline int fcntl_getlk(struct file *file, unsigned int cmd,
+ struct flock __user *user)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_setlk(unsigned int fd, struct file *file,
+ unsigned int cmd, struct flock __user *user)
+{
+ return -EACCES;
+}
+
+#if BITS_PER_LONG == 32
+static inline int fcntl_getlk64(struct file *file, unsigned int cmd,
+ struct flock64 __user *user)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_setlk64(unsigned int fd, struct file *file,
+ unsigned int cmd, struct flock64 __user *user)
+{
+ return -EACCES;
+}
+#endif
+static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
+{
+ return -EINVAL;
+}
+
+static inline int fcntl_getlease(struct file *filp)
+{
+ return F_UNLCK;
+}
+
+static inline void
+locks_free_lock_context(struct file_lock_context *ctx)
+{
+}
+
+static inline void locks_init_lock(struct file_lock *fl)
+{
+ return;
+}
+
+static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
+{
+ return;
+}
+
+static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
+{
+ return;
+}
+
+static inline void locks_remove_posix(struct file *filp, fl_owner_t owner)
+{
+ return;
+}
+
+static inline void locks_remove_file(struct file *filp)
+{
+ return;
+}
+
+static inline void posix_test_lock(struct file *filp, struct file_lock *fl)
+{
+ return;
+}
+
+static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
+ struct file_lock *conflock)
+{
+ return -ENOLCK;
+}
+
+static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+{
+ return -ENOLCK;
+}
+
+static inline int posix_unblock_lock(struct file_lock *waiter)
+{
+ return -ENOENT;
+}
+
+static inline int vfs_test_lock(struct file *filp, struct file_lock *fl)
+{
+ return 0;
+}
+
+static inline int vfs_lock_file(struct file *filp, unsigned int cmd,
+ struct file_lock *fl, struct file_lock *conf)
+{
+ return -ENOLCK;
+}
+
+static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
+{
+ return 0;
+}
+
+static inline int flock_lock_file_wait(struct file *filp,
+ struct file_lock *request)
+{
+ return -ENOLCK;
+}
+
+static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
+{
+ return 0;
+}
+
+static inline void lease_get_mtime(struct inode *inode, struct timespec *time)
+{
+ return;
+}
+
+static inline int generic_setlease(struct file *filp, long arg,
+ struct file_lock **flp, void **priv)
+{
+ return -EINVAL;
+}
+
+static inline int vfs_setlease(struct file *filp, long arg,
+ struct file_lock **lease, void **priv)
+{
+ return -EINVAL;
+}
+
+static inline int lease_modify(struct file_lock *fl, int arg,
+ struct list_head *dispose)
+{
+ return -EINVAL;
+}
+
+struct files_struct;
+static inline void show_fd_locks(struct seq_file *f,
+ struct file *filp, struct files_struct *files) {}
+#endif /* !CONFIG_FILE_LOCKING */
+
+
+struct fasync_struct {
+ spinlock_t fa_lock;
+ int magic;
+ int fa_fd;
+ struct fasync_struct *fa_next; /* singly linked list */
+ struct file *fa_file;
+ struct rcu_head fa_rcu;
+};
+
+#define FASYNC_MAGIC 0x4601
+
+/* SMP safe fasync helpers: */
+extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
+extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *);
+extern int fasync_remove_entry(struct file *, struct fasync_struct **);
+extern struct fasync_struct *fasync_alloc(void);
+extern void fasync_free(struct fasync_struct *);
+
+/* can be called from interrupts */
+extern void kill_fasync(struct fasync_struct **, int, int);
+
+extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
+extern void f_setown(struct file *filp, unsigned long arg, int force);
+extern void f_delown(struct file *filp);
+extern pid_t f_getown(struct file *filp);
+extern int send_sigurg(struct fown_struct *fown);
+
+struct mm_struct;
+
+/*
+ * Umount options
+ */
+
+#define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */
+#define MNT_DETACH 0x00000002 /* Just detach from the tree */
+#define MNT_EXPIRE 0x00000004 /* Mark for expiry */
+#define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */
+#define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */
+
+extern struct list_head super_blocks;
+
+/* Possible states of 'frozen' field */
+enum {
+ SB_UNFROZEN = 0, /* FS is unfrozen */
+ SB_FREEZE_WRITE = 1, /* Writes, dir ops, ioctls frozen */
+ SB_FREEZE_PAGEFAULT = 2, /* Page faults stopped as well */
+ SB_FREEZE_FS = 3, /* For internal FS use (e.g. to stop
+ * internal threads if needed) */
+ SB_FREEZE_COMPLETE = 4, /* ->freeze_fs finished successfully */
+};
+
+#define SB_FREEZE_LEVELS (SB_FREEZE_COMPLETE - 1)
+
+struct sb_writers {
+ /* Counters for counting writers at each level */
+ struct percpu_counter counter[SB_FREEZE_LEVELS];
+ wait_queue_head_t wait; /* queue for waiting for
+ writers / faults to finish */
+ int frozen; /* Is sb frozen? */
+ wait_queue_head_t wait_unfrozen; /* queue for waiting for
+ sb to be thawed */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map lock_map[SB_FREEZE_LEVELS];
+#endif
+};
+
+struct super_block {
+ struct list_head s_list; /* Keep this first */
+ dev_t s_dev; /* search index; _not_ kdev_t */
+ unsigned char s_blocksize_bits;
+ unsigned long s_blocksize;
+ loff_t s_maxbytes; /* Max file size */
+ struct file_system_type *s_type;
+ const struct super_operations *s_op;
+ const struct dquot_operations *dq_op;
+ const struct quotactl_ops *s_qcop;
+ const struct export_operations *s_export_op;
+ unsigned long s_flags;
+ unsigned long s_magic;
+ struct dentry *s_root;
+ struct rw_semaphore s_umount;
+ int s_count;
+ atomic_t s_active;
+#ifdef CONFIG_SECURITY
+ void *s_security;
+#endif
+ const struct xattr_handler **s_xattr;
+
+ struct list_head s_inodes; /* all inodes */
+ struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */
+ struct list_head s_mounts; /* list of mounts; _not_ for fs use */
+ struct block_device *s_bdev;
+ struct backing_dev_info *s_bdi;
+ struct mtd_info *s_mtd;
+ struct hlist_node s_instances;
+ unsigned int s_quota_types; /* Bitmask of supported quota types */
+ struct quota_info s_dquot; /* Diskquota specific options */
+
+ struct sb_writers s_writers;
+
+ char s_id[32]; /* Informational name */
+ u8 s_uuid[16]; /* UUID */
+
+ void *s_fs_info; /* Filesystem private info */
+ unsigned int s_max_links;
+ fmode_t s_mode;
+
+ /* Granularity of c/m/atime in ns.
+ Cannot be worse than a second */
+ u32 s_time_gran;
+
+ /*
+ * The next field is for VFS *only*. No filesystems have any business
+ * even looking at it. You had been warned.
+ */
+ struct mutex s_vfs_rename_mutex; /* Kludge */
+
+ /*
+ * Filesystem subtype. If non-empty the filesystem type field
+ * in /proc/mounts will be "type.subtype"
+ */
+ char *s_subtype;
+
+ /*
+ * Saved mount options for lazy filesystems using
+ * generic_show_options()
+ */
+ char __rcu *s_options;
+ const struct dentry_operations *s_d_op; /* default d_op for dentries */
+
+ /*
+ * Saved pool identifier for cleancache (-1 means none)
+ */
+ int cleancache_poolid;
+
+ struct shrinker s_shrink; /* per-sb shrinker handle */
+
+ /* Number of inodes with nlink == 0 but still referenced */
+ atomic_long_t s_remove_count;
+
+ /* Being remounted read-only */
+ int s_readonly_remount;
+
+ /* AIO completions deferred from interrupt context */
+ struct workqueue_struct *s_dio_done_wq;
+ struct hlist_head s_pins;
+
+ /*
+ * Keep the lru lists last in the structure so they always sit on their
+ * own individual cachelines.
+ */
+ struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
+ struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
+ struct rcu_head rcu;
+
+ /*
+ * Indicates how deep in a filesystem stack this SB is
+ */
+ int s_stack_depth;
+};
+
+extern struct timespec current_fs_time(struct super_block *sb);
+
+/*
+ * Snapshotting support.
+ */
+
+void __sb_end_write(struct super_block *sb, int level);
+int __sb_start_write(struct super_block *sb, int level, bool wait);
+
+/**
+ * sb_end_write - drop write access to a superblock
+ * @sb: the super we wrote to
+ *
+ * Decrement number of writers to the filesystem. Wake up possible waiters
+ * wanting to freeze the filesystem.
+ */
+static inline void sb_end_write(struct super_block *sb)
+{
+ __sb_end_write(sb, SB_FREEZE_WRITE);
+}
+
+/**
+ * sb_end_pagefault - drop write access to a superblock from a page fault
+ * @sb: the super we wrote to
+ *
+ * Decrement number of processes handling write page fault to the filesystem.
+ * Wake up possible waiters wanting to freeze the filesystem.
+ */
+static inline void sb_end_pagefault(struct super_block *sb)
+{
+ __sb_end_write(sb, SB_FREEZE_PAGEFAULT);
+}
+
+/**
+ * sb_end_intwrite - drop write access to a superblock for internal fs purposes
+ * @sb: the super we wrote to
+ *
+ * Decrement fs-internal number of writers to the filesystem. Wake up possible
+ * waiters wanting to freeze the filesystem.
+ */
+static inline void sb_end_intwrite(struct super_block *sb)
+{
+ __sb_end_write(sb, SB_FREEZE_FS);
+}
+
+/**
+ * sb_start_write - get write access to a superblock
+ * @sb: the super we write to
+ *
+ * When a process wants to write data or metadata to a file system (i.e. dirty
+ * a page or an inode), it should embed the operation in a sb_start_write() -
+ * sb_end_write() pair to get exclusion against file system freezing. This
+ * function increments number of writers preventing freezing. If the file
+ * system is already frozen, the function waits until the file system is
+ * thawed.
+ *
+ * Since freeze protection behaves as a lock, users have to preserve
+ * ordering of freeze protection and other filesystem locks. Generally,
+ * freeze protection should be the outermost lock. In particular, we have:
+ *
+ * sb_start_write
+ * -> i_mutex (write path, truncate, directory ops, ...)
+ * -> s_umount (freeze_super, thaw_super)
+ */
+static inline void sb_start_write(struct super_block *sb)
+{
+ __sb_start_write(sb, SB_FREEZE_WRITE, true);
+}
+
+static inline int sb_start_write_trylock(struct super_block *sb)
+{
+ return __sb_start_write(sb, SB_FREEZE_WRITE, false);
+}
+
+/**
+ * sb_start_pagefault - get write access to a superblock from a page fault
+ * @sb: the super we write to
+ *
+ * When a process starts handling write page fault, it should embed the
+ * operation into sb_start_pagefault() - sb_end_pagefault() pair to get
+ * exclusion against file system freezing. This is needed since the page fault
+ * is going to dirty a page. This function increments number of running page
+ * faults preventing freezing. If the file system is already frozen, the
+ * function waits until the file system is thawed.
+ *
+ * Since page fault freeze protection behaves as a lock, users have to preserve
+ * ordering of freeze protection and other filesystem locks. It is advised to
+ * put sb_start_pagefault() close to mmap_sem in lock ordering. Page fault
+ * handling code implies lock dependency:
+ *
+ * mmap_sem
+ * -> sb_start_pagefault
+ */
+static inline void sb_start_pagefault(struct super_block *sb)
+{
+ __sb_start_write(sb, SB_FREEZE_PAGEFAULT, true);
+}
+
+/*
+ * sb_start_intwrite - get write access to a superblock for internal fs purposes
+ * @sb: the super we write to
+ *
+ * This is the third level of protection against filesystem freezing. It is
+ * free for use by a filesystem. The only requirement is that it must rank
+ * below sb_start_pagefault.
+ *
+ * For example filesystem can call sb_start_intwrite() when starting a
+ * transaction which somewhat eases handling of freezing for internal sources
+ * of filesystem changes (internal fs threads, discarding preallocation on file
+ * close, etc.).
+ */
+static inline void sb_start_intwrite(struct super_block *sb)
+{
+ __sb_start_write(sb, SB_FREEZE_FS, true);
+}
+
+
+extern bool inode_owner_or_capable(const struct inode *inode);
+
+/*
+ * VFS helper functions..
+ */
+extern int vfs_create(struct inode *, struct dentry *, umode_t, bool);
+extern int vfs_mkdir(struct inode *, struct dentry *, umode_t);
+extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
+extern int vfs_symlink(struct inode *, struct dentry *, const char *);
+extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct inode **);
+extern int vfs_rmdir(struct inode *, struct dentry *);
+extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
+extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
+extern int vfs_whiteout(struct inode *, struct dentry *);
+
+/*
+ * VFS dentry helper functions.
+ */
+extern void dentry_unhash(struct dentry *dentry);
+
+/*
+ * VFS file helper functions.
+ */
+extern void inode_init_owner(struct inode *inode, const struct inode *dir,
+ umode_t mode);
+/*
+ * VFS FS_IOC_FIEMAP helper definitions.
+ */
+struct fiemap_extent_info {
+ unsigned int fi_flags; /* Flags as passed from user */
+ unsigned int fi_extents_mapped; /* Number of mapped extents */
+ unsigned int fi_extents_max; /* Size of fiemap_extent array */
+ struct fiemap_extent __user *fi_extents_start; /* Start of
+ fiemap_extent array */
+};
+int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
+ u64 phys, u64 len, u32 flags);
+int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
+
+/*
+ * File types
+ *
+ * NOTE! These match bits 12..15 of stat.st_mode
+ * (ie "(i_mode >> 12) & 15").
+ */
+#define DT_UNKNOWN 0
+#define DT_FIFO 1
+#define DT_CHR 2
+#define DT_DIR 4
+#define DT_BLK 6
+#define DT_REG 8
+#define DT_LNK 10
+#define DT_SOCK 12
+#define DT_WHT 14
+
+/*
+ * This is the "filldir" function type, used by readdir() to let
+ * the kernel specify what kind of dirent layout it wants to have.
+ * This allows the kernel to read directories into kernel space or
+ * to have different dirent layouts depending on the binary type.
+ */
+struct dir_context;
+typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
+ unsigned);
+
+struct dir_context {
+ const filldir_t actor;
+ loff_t pos;
+};
+
+struct block_device_operations;
+
+/* These macros are for out of kernel modules to test that
+ * the kernel supports the unlocked_ioctl and compat_ioctl
+ * fields in struct file_operations. */
+#define HAVE_COMPAT_IOCTL 1
+#define HAVE_UNLOCKED_IOCTL 1
+
+/*
+ * These flags let !MMU mmap() govern direct device mapping vs immediate
+ * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
+ *
+ * NOMMU_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
+ * NOMMU_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
+ * NOMMU_MAP_READ: Can be mapped for reading
+ * NOMMU_MAP_WRITE: Can be mapped for writing
+ * NOMMU_MAP_EXEC: Can be mapped for execution
+ */
+#define NOMMU_MAP_COPY 0x00000001
+#define NOMMU_MAP_DIRECT 0x00000008
+#define NOMMU_MAP_READ VM_MAYREAD
+#define NOMMU_MAP_WRITE VM_MAYWRITE
+#define NOMMU_MAP_EXEC VM_MAYEXEC
+
+#define NOMMU_VMFLAGS \
+ (NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC)
+
+
+struct iov_iter;
+
+struct file_operations {
+ struct module *owner;
+ loff_t (*llseek) (struct file *, loff_t, int);
+ ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
+ ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
+ ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
+ ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
+ int (*iterate) (struct file *, struct dir_context *);
+ unsigned int (*poll) (struct file *, struct poll_table_struct *);
+ long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
+ long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
+ int (*mmap) (struct file *, struct vm_area_struct *);
+ int (*mremap)(struct file *, struct vm_area_struct *);
+ int (*open) (struct inode *, struct file *);
+ int (*flush) (struct file *, fl_owner_t id);
+ int (*release) (struct inode *, struct file *);
+ int (*fsync) (struct file *, loff_t, loff_t, int datasync);
+ int (*aio_fsync) (struct kiocb *, int datasync);
+ int (*fasync) (int, struct file *, int);
+ int (*lock) (struct file *, int, struct file_lock *);
+ ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
+ unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+ int (*check_flags)(int);
+ int (*flock) (struct file *, int, struct file_lock *);
+ ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
+ ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
+ int (*setlease)(struct file *, long, struct file_lock **, void **);
+ long (*fallocate)(struct file *file, int mode, loff_t offset,
+ loff_t len);
+ void (*show_fdinfo)(struct seq_file *m, struct file *f);
+#ifndef CONFIG_MMU
+ unsigned (*mmap_capabilities)(struct file *);
+#endif
+};
+
+struct inode_operations {
+ struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
+ void * (*follow_link) (struct dentry *, struct nameidata *);
+ int (*permission) (struct inode *, int);
+ struct posix_acl * (*get_acl)(struct inode *, int);
+
+ int (*readlink) (struct dentry *, char __user *,int);
+ void (*put_link) (struct dentry *, struct nameidata *, void *);
+
+ int (*create) (struct inode *,struct dentry *, umode_t, bool);
+ int (*link) (struct dentry *,struct inode *,struct dentry *);
+ int (*unlink) (struct inode *,struct dentry *);
+ int (*symlink) (struct inode *,struct dentry *,const char *);
+ int (*mkdir) (struct inode *,struct dentry *,umode_t);
+ int (*rmdir) (struct inode *,struct dentry *);
+ int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
+ int (*rename) (struct inode *, struct dentry *,
+ struct inode *, struct dentry *);
+ int (*rename2) (struct inode *, struct dentry *,
+ struct inode *, struct dentry *, unsigned int);
+ int (*setattr) (struct dentry *, struct iattr *);
+ int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
+ int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
+ ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
+ ssize_t (*listxattr) (struct dentry *, char *, size_t);
+ int (*removexattr) (struct dentry *, const char *);
+ int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
+ u64 len);
+ int (*update_time)(struct inode *, struct timespec *, int);
+ int (*atomic_open)(struct inode *, struct dentry *,
+ struct file *, unsigned open_flag,
+ umode_t create_mode, int *opened);
+ int (*tmpfile) (struct inode *, struct dentry *, umode_t);
+ int (*set_acl)(struct inode *, struct posix_acl *, int);
+
+ /* WARNING: probably going away soon, do not use! */
+ int (*dentry_open)(struct dentry *, struct file *, const struct cred *);
+} ____cacheline_aligned;
+
+ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
+ unsigned long nr_segs, unsigned long fast_segs,
+ struct iovec *fast_pointer,
+ struct iovec **ret_pointer);
+
+typedef ssize_t (*vfs_readf_t)(struct file *, char __user *, size_t, loff_t *);
+typedef ssize_t (*vfs_writef_t)(struct file *, const char __user *, size_t,
+ loff_t *);
+vfs_readf_t vfs_readf(struct file *file);
+vfs_writef_t vfs_writef(struct file *file);
+
+extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
+extern ssize_t __vfs_write(struct file *, const char __user *, size_t, loff_t *);
+extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
+extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
+extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
+ unsigned long, loff_t *);
+extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
+ unsigned long, loff_t *);
+
+struct super_operations {
+ struct inode *(*alloc_inode)(struct super_block *sb);
+ void (*destroy_inode)(struct inode *);
+
+ void (*dirty_inode) (struct inode *, int flags);
+ int (*write_inode) (struct inode *, struct writeback_control *wbc);
+ int (*drop_inode) (struct inode *);
+ void (*evict_inode) (struct inode *);
+ void (*put_super) (struct super_block *);
+ int (*sync_fs)(struct super_block *sb, int wait);
+ int (*freeze_super) (struct super_block *);
+ int (*freeze_fs) (struct super_block *);
+ int (*thaw_super) (struct super_block *);
+ int (*unfreeze_fs) (struct super_block *);
+ int (*statfs) (struct dentry *, struct kstatfs *);
+ int (*remount_fs) (struct super_block *, int *, char *);
+ void (*umount_begin) (struct super_block *);
+
+ int (*show_options)(struct seq_file *, struct dentry *);
+ int (*show_devname)(struct seq_file *, struct dentry *);
+ int (*show_path)(struct seq_file *, struct dentry *);
+ int (*show_stats)(struct seq_file *, struct dentry *);
+#ifdef CONFIG_QUOTA
+ ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
+ ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
+ struct dquot **(*get_dquots)(struct inode *);
+#endif
+ int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
+ long (*nr_cached_objects)(struct super_block *,
+ struct shrink_control *);
+ long (*free_cached_objects)(struct super_block *,
+ struct shrink_control *);
+};
+
+/*
+ * Inode flags - they have no relation to superblock flags now
+ */
+#define S_SYNC 1 /* Writes are synced at once */
+#define S_NOATIME 2 /* Do not update access times */
+#define S_APPEND 4 /* Append-only file */
+#define S_IMMUTABLE 8 /* Immutable file */
+#define S_DEAD 16 /* removed, but still open directory */
+#define S_NOQUOTA 32 /* Inode is not counted to quota */
+#define S_DIRSYNC 64 /* Directory modifications are synchronous */
+#define S_NOCMTIME 128 /* Do not update file c/mtime */
+#define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */
+#define S_PRIVATE 512 /* Inode is fs-internal */
+#define S_IMA 1024 /* Inode has an associated IMA struct */
+#define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */
+#define S_NOSEC 4096 /* no suid or xattr security attributes */
+#ifdef CONFIG_FS_DAX
+#define S_DAX 8192 /* Direct Access, avoiding the page cache */
+#else
+#define S_DAX 0 /* Make all the DAX code disappear */
+#endif
+#define S_ATOMIC_COPY 16384 /* Pages mapped with this inode need to be
+ atomically copied (gem) */
+
+/*
+ * Note that nosuid etc flags are inode-specific: setting some file-system
+ * flags just means all the inodes inherit those flags by default. It might be
+ * possible to override it selectively if you really wanted to with some
+ * ioctl() that is not currently implemented.
+ *
+ * Exception: MS_RDONLY is always applied to the entire file system.
+ *
+ * Unfortunately, it is possible to change a filesystems flags with it mounted
+ * with files in use. This means that all of the inodes will not have their
+ * i_flags updated. Hence, i_flags no longer inherit the superblock mount
+ * flags, so these have to be checked separately. -- rmk@arm.uk.linux.org
+ */
+#define __IS_FLG(inode, flg) ((inode)->i_sb->s_flags & (flg))
+
+#define IS_RDONLY(inode) ((inode)->i_sb->s_flags & MS_RDONLY)
+#define IS_SYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS) || \
+ ((inode)->i_flags & S_SYNC))
+#define IS_DIRSYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \
+ ((inode)->i_flags & (S_SYNC|S_DIRSYNC)))
+#define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK)
+#define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME)
+#define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION)
+
+#define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
+#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
+#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
+#define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL)
+
+#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
+#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
+#define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE)
+#define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE)
+#define IS_IMA(inode) ((inode)->i_flags & S_IMA)
+#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
+#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
+#define IS_DAX(inode) ((inode)->i_flags & S_DAX)
+
+#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
+ (inode)->i_rdev == WHITEOUT_DEV)
+
+/*
+ * Inode state bits. Protected by inode->i_lock
+ *
+ * Three bits determine the dirty state of the inode, I_DIRTY_SYNC,
+ * I_DIRTY_DATASYNC and I_DIRTY_PAGES.
+ *
+ * Four bits define the lifetime of an inode. Initially, inodes are I_NEW,
+ * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
+ * various stages of removing an inode.
+ *
+ * Two bits are used for locking and completion notification, I_NEW and I_SYNC.
+ *
+ * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
+ * fdatasync(). i_atime is the usual cause.
+ * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
+ * these changes separately from I_DIRTY_SYNC so that we
+ * don't have to write inode on fdatasync() when only
+ * mtime has changed in it.
+ * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
+ * I_NEW Serves as both a mutex and completion notification.
+ * New inodes set I_NEW. If two processes both create
+ * the same inode, one of them will release its inode and
+ * wait for I_NEW to be released before returning.
+ * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
+ * also cause waiting on I_NEW, without I_NEW actually
+ * being set. find_inode() uses this to prevent returning
+ * nearly-dead inodes.
+ * I_WILL_FREE Must be set when calling write_inode_now() if i_count
+ * is zero. I_FREEING must be set when I_WILL_FREE is
+ * cleared.
+ * I_FREEING Set when inode is about to be freed but still has dirty
+ * pages or buffers attached or the inode itself is still
+ * dirty.
+ * I_CLEAR Added by clear_inode(). In this state the inode is
+ * clean and can be destroyed. Inode keeps I_FREEING.
+ *
+ * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are
+ * prohibited for many purposes. iget() must wait for
+ * the inode to be completely released, then create it
+ * anew. Other functions will just ignore such inodes,
+ * if appropriate. I_NEW is used for waiting.
+ *
+ * I_SYNC Writeback of inode is running. The bit is set during
+ * data writeback, and cleared with a wakeup on the bit
+ * address once it is done. The bit is also used to pin
+ * the inode in memory for flusher thread.
+ *
+ * I_REFERENCED Marks the inode as recently references on the LRU list.
+ *
+ * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit().
+ *
+ * Q: What is the difference between I_WILL_FREE and I_FREEING?
+ */
+#define I_DIRTY_SYNC (1 << 0)
+#define I_DIRTY_DATASYNC (1 << 1)
+#define I_DIRTY_PAGES (1 << 2)
+#define __I_NEW 3
+#define I_NEW (1 << __I_NEW)
+#define I_WILL_FREE (1 << 4)
+#define I_FREEING (1 << 5)
+#define I_CLEAR (1 << 6)
+#define __I_SYNC 7
+#define I_SYNC (1 << __I_SYNC)
+#define I_REFERENCED (1 << 8)
+#define __I_DIO_WAKEUP 9
+#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
+#define I_LINKABLE (1 << 10)
+#define I_DIRTY_TIME (1 << 11)
+#define __I_DIRTY_TIME_EXPIRED 12
+#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
+
+#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
+#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME)
+
+extern void __mark_inode_dirty(struct inode *, int);
+static inline void mark_inode_dirty(struct inode *inode)
+{
+ __mark_inode_dirty(inode, I_DIRTY);
+}
+
+static inline void mark_inode_dirty_sync(struct inode *inode)
+{
+ __mark_inode_dirty(inode, I_DIRTY_SYNC);
+}
+
+extern void inc_nlink(struct inode *inode);
+extern void drop_nlink(struct inode *inode);
+extern void clear_nlink(struct inode *inode);
+extern void set_nlink(struct inode *inode, unsigned int nlink);
+
+static inline void inode_inc_link_count(struct inode *inode)
+{
+ inc_nlink(inode);
+ mark_inode_dirty(inode);
+}
+
+static inline void inode_dec_link_count(struct inode *inode)
+{
+ drop_nlink(inode);
+ mark_inode_dirty(inode);
+}
+
+/**
+ * inode_inc_iversion - increments i_version
+ * @inode: inode that need to be updated
+ *
+ * Every time the inode is modified, the i_version field will be incremented.
+ * The filesystem has to be mounted with i_version flag
+ */
+
+static inline void inode_inc_iversion(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ inode->i_version++;
+ spin_unlock(&inode->i_lock);
+}
+
+enum file_time_flags {
+ S_ATIME = 1,
+ S_MTIME = 2,
+ S_CTIME = 4,
+ S_VERSION = 8,
+};
+
+extern void touch_atime(const struct path *);
+static inline void file_accessed(struct file *file)
+{
+ if (!(file->f_flags & O_NOATIME))
+ touch_atime(&file->f_path);
+}
+
+int sync_inode(struct inode *inode, struct writeback_control *wbc);
+int sync_inode_metadata(struct inode *inode, int wait);
+
+struct file_system_type {
+ const char *name;
+ int fs_flags;
+#define FS_REQUIRES_DEV 1
+#define FS_BINARY_MOUNTDATA 2
+#define FS_HAS_SUBTYPE 4
+#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
+#define FS_USERNS_DEV_MOUNT 16 /* A userns mount does not imply MNT_NODEV */
+#define FS_USERNS_VISIBLE 32 /* FS must already be visible */
+#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
+ struct dentry *(*mount) (struct file_system_type *, int,
+ const char *, void *);
+ void (*kill_sb) (struct super_block *);
+ struct module *owner;
+ struct file_system_type * next;
+ struct hlist_head fs_supers;
+
+ struct lock_class_key s_lock_key;
+ struct lock_class_key s_umount_key;
+ struct lock_class_key s_vfs_rename_key;
+ struct lock_class_key s_writers_key[SB_FREEZE_LEVELS];
+
+ struct lock_class_key i_lock_key;
+ struct lock_class_key i_mutex_key;
+ struct lock_class_key i_mutex_dir_key;
+};
+
+#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
+
+extern struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
+ void *data, int (*fill_super)(struct super_block *, void *, int));
+extern struct dentry *mount_bdev(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data,
+ int (*fill_super)(struct super_block *, void *, int));
+extern struct dentry *mount_single(struct file_system_type *fs_type,
+ int flags, void *data,
+ int (*fill_super)(struct super_block *, void *, int));
+extern struct dentry *mount_nodev(struct file_system_type *fs_type,
+ int flags, void *data,
+ int (*fill_super)(struct super_block *, void *, int));
+extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path);
+void generic_shutdown_super(struct super_block *sb);
+void kill_block_super(struct super_block *sb);
+void kill_anon_super(struct super_block *sb);
+void kill_litter_super(struct super_block *sb);
+void deactivate_super(struct super_block *sb);
+void deactivate_locked_super(struct super_block *sb);
+int set_anon_super(struct super_block *s, void *data);
+int get_anon_bdev(dev_t *);
+void free_anon_bdev(dev_t);
+struct super_block *sget(struct file_system_type *type,
+ int (*test)(struct super_block *,void *),
+ int (*set)(struct super_block *,void *),
+ int flags, void *data);
+extern struct dentry *mount_pseudo(struct file_system_type *, char *,
+ const struct super_operations *ops,
+ const struct dentry_operations *dops,
+ unsigned long);
+
+/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
+#define fops_get(fops) \
+ (((fops) && try_module_get((fops)->owner) ? (fops) : NULL))
+#define fops_put(fops) \
+ do { if (fops) module_put((fops)->owner); } while(0)
+/*
+ * This one is to be used *ONLY* from ->open() instances.
+ * fops must be non-NULL, pinned down *and* module dependencies
+ * should be sufficient to pin the caller down as well.
+ */
+#define replace_fops(f, fops) \
+ do { \
+ struct file *__file = (f); \
+ fops_put(__file->f_op); \
+ BUG_ON(!(__file->f_op = (fops))); \
+ } while(0)
+
+extern int register_filesystem(struct file_system_type *);
+extern int unregister_filesystem(struct file_system_type *);
+extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
+#define kern_mount(type) kern_mount_data(type, NULL)
+extern void kern_unmount(struct vfsmount *mnt);
+extern int may_umount_tree(struct vfsmount *);
+extern int may_umount(struct vfsmount *);
+extern long do_mount(const char *, const char __user *,
+ const char *, unsigned long, void *);
+extern struct vfsmount *collect_mounts(struct path *);
+extern void drop_collected_mounts(struct vfsmount *);
+extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
+ struct vfsmount *);
+extern int vfs_statfs(struct path *, struct kstatfs *);
+extern int user_statfs(const char __user *, struct kstatfs *);
+extern int fd_statfs(int, struct kstatfs *);
+extern int vfs_ustat(dev_t, struct kstatfs *);
+extern int freeze_super(struct super_block *super);
+extern int thaw_super(struct super_block *super);
+extern bool our_mnt(struct vfsmount *mnt);
+
+extern int current_umask(void);
+
+extern void ihold(struct inode * inode);
+extern void iput(struct inode *);
+extern int generic_update_time(struct inode *, struct timespec *, int);
+
+static inline struct inode *file_inode(const struct file *f)
+{
+ return f->f_inode;
+}
+
+/* /sys/fs */
+extern struct kobject *fs_kobj;
+
+#define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK)
+
+#define FLOCK_VERIFY_READ 1
+#define FLOCK_VERIFY_WRITE 2
+
+#ifdef CONFIG_FILE_LOCKING
+extern int locks_mandatory_locked(struct file *);
+extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t);
+
+/*
+ * Candidates for mandatory locking have the setgid bit set
+ * but no group execute bit - an otherwise meaningless combination.
+ */
+
+static inline int __mandatory_lock(struct inode *ino)
+{
+ return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
+}
+
+/*
+ * ... and these candidates should be on MS_MANDLOCK mounted fs,
+ * otherwise these will be advisory locks
+ */
+
+static inline int mandatory_lock(struct inode *ino)
+{
+ return IS_MANDLOCK(ino) && __mandatory_lock(ino);
+}
+
+static inline int locks_verify_locked(struct file *file)
+{
+ if (mandatory_lock(file_inode(file)))
+ return locks_mandatory_locked(file);
+ return 0;
+}
+
+static inline int locks_verify_truncate(struct inode *inode,
+ struct file *filp,
+ loff_t size)
+{
+ if (inode->i_flctx && mandatory_lock(inode))
+ return locks_mandatory_area(
+ FLOCK_VERIFY_WRITE, inode, filp,
+ size < inode->i_size ? size : inode->i_size,
+ (size < inode->i_size ? inode->i_size - size
+ : size - inode->i_size)
+ );
+ return 0;
+}
+
+static inline int break_lease(struct inode *inode, unsigned int mode)
+{
+ /*
+ * Since this check is lockless, we must ensure that any refcounts
+ * taken are done before checking i_flctx->flc_lease. Otherwise, we
+ * could end up racing with tasks trying to set a new lease on this
+ * file.
+ */
+ smp_mb();
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
+ return __break_lease(inode, mode, FL_LEASE);
+ return 0;
+}
+
+static inline int break_deleg(struct inode *inode, unsigned int mode)
+{
+ /*
+ * Since this check is lockless, we must ensure that any refcounts
+ * taken are done before checking i_flctx->flc_lease. Otherwise, we
+ * could end up racing with tasks trying to set a new lease on this
+ * file.
+ */
+ smp_mb();
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
+ return __break_lease(inode, mode, FL_DELEG);
+ return 0;
+}
+
+static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
+{
+ int ret;
+
+ ret = break_deleg(inode, O_WRONLY|O_NONBLOCK);
+ if (ret == -EWOULDBLOCK && delegated_inode) {
+ *delegated_inode = inode;
+ ihold(inode);
+ }
+ return ret;
+}
+
+static inline int break_deleg_wait(struct inode **delegated_inode)
+{
+ int ret;
+
+ ret = break_deleg(*delegated_inode, O_WRONLY);
+ iput(*delegated_inode);
+ *delegated_inode = NULL;
+ return ret;
+}
+
+static inline int break_layout(struct inode *inode, bool wait)
+{
+ smp_mb();
+ if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease))
+ return __break_lease(inode,
+ wait ? O_WRONLY : O_WRONLY | O_NONBLOCK,
+ FL_LAYOUT);
+ return 0;
+}
+
+#else /* !CONFIG_FILE_LOCKING */
+static inline int locks_mandatory_locked(struct file *file)
+{
+ return 0;
+}
+
+static inline int locks_mandatory_area(int rw, struct inode *inode,
+ struct file *filp, loff_t offset,
+ size_t count)
+{
+ return 0;
+}
+
+static inline int __mandatory_lock(struct inode *inode)
+{
+ return 0;
+}
+
+static inline int mandatory_lock(struct inode *inode)
+{
+ return 0;
+}
+
+static inline int locks_verify_locked(struct file *file)
+{
+ return 0;
+}
+
+static inline int locks_verify_truncate(struct inode *inode, struct file *filp,
+ size_t size)
+{
+ return 0;
+}
+
+static inline int break_lease(struct inode *inode, unsigned int mode)
+{
+ return 0;
+}
+
+static inline int break_deleg(struct inode *inode, unsigned int mode)
+{
+ return 0;
+}
+
+static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode)
+{
+ return 0;
+}
+
+static inline int break_deleg_wait(struct inode **delegated_inode)
+{
+ BUG();
+ return 0;
+}
+
+static inline int break_layout(struct inode *inode, bool wait)
+{
+ return 0;
+}
+
+#endif /* CONFIG_FILE_LOCKING */
+
+/* fs/open.c */
+struct audit_names;
+struct filename {
+ const char *name; /* pointer to actual string */
+ const __user char *uptr; /* original userland pointer */
+ struct audit_names *aname;
+ int refcnt;
+ const char iname[];
+};
+
+extern long vfs_truncate(struct path *, loff_t);
+extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
+ struct file *filp);
+extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
+ loff_t len);
+extern long do_sys_open(int dfd, const char __user *filename, int flags,
+ umode_t mode);
+extern struct file *file_open_name(struct filename *, int, umode_t);
+extern struct file *filp_open(const char *, int, umode_t);
+extern struct file *file_open_root(struct dentry *, struct vfsmount *,
+ const char *, int);
+extern int vfs_open(const struct path *, struct file *, const struct cred *);
+extern struct file * dentry_open(const struct path *, int, const struct cred *);
+extern int filp_close(struct file *, fl_owner_t id);
+
+extern struct filename *getname_flags(const char __user *, int, int *);
+extern struct filename *getname(const char __user *);
+extern struct filename *getname_kernel(const char *);
+extern void putname(struct filename *name);
+
+enum {
+ FILE_CREATED = 1,
+ FILE_OPENED = 2
+};
+extern int finish_open(struct file *file, struct dentry *dentry,
+ int (*open)(struct inode *, struct file *),
+ int *opened);
+extern int finish_no_open(struct file *file, struct dentry *dentry);
+
+/* fs/ioctl.c */
+
+extern int ioctl_preallocate(struct file *filp, void __user *argp);
+
+/* fs/dcache.c */
+extern void __init vfs_caches_init_early(void);
+extern void __init vfs_caches_init(unsigned long);
+
+extern struct kmem_cache *names_cachep;
+
+#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
+#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
+
+#ifdef CONFIG_BLOCK
+extern int register_blkdev(unsigned int, const char *);
+extern void unregister_blkdev(unsigned int, const char *);
+extern struct block_device *bdget(dev_t);
+extern struct block_device *bdgrab(struct block_device *bdev);
+extern void bd_set_size(struct block_device *, loff_t size);
+extern void bd_forget(struct inode *inode);
+extern void bdput(struct block_device *);
+extern void invalidate_bdev(struct block_device *);
+extern void iterate_bdevs(void (*)(struct block_device *, void *), void *);
+extern int sync_blockdev(struct block_device *bdev);
+extern void kill_bdev(struct block_device *);
+extern struct super_block *freeze_bdev(struct block_device *);
+extern void emergency_thaw_all(void);
+extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
+extern int fsync_bdev(struct block_device *);
+extern int fsync_super(struct super_block *);
+extern int fsync_no_super(struct block_device *);
+#define FS_FREEZER_FUSE 1
+#define FS_FREEZER_NORMAL 2
+#define FS_FREEZER_ALL (FS_FREEZER_FUSE | FS_FREEZER_NORMAL)
+void freeze_filesystems(int which);
+void thaw_filesystems(int which);
+extern int sb_is_blkdev_sb(struct super_block *sb);
+#else
+static inline void bd_forget(struct inode *inode) {}
+static inline int sync_blockdev(struct block_device *bdev) { return 0; }
+static inline void kill_bdev(struct block_device *bdev) {}
+static inline void invalidate_bdev(struct block_device *bdev) {}
+
+static inline struct super_block *freeze_bdev(struct block_device *sb)
+{
+ return NULL;
+}
+
+static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
+{
+ return 0;
+}
+
+static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg)
+{
+}
+
+static inline int sb_is_blkdev_sb(struct super_block *sb)
+{
+ return 0;
+}
+#endif
+extern int sync_filesystem(struct super_block *);
+extern const struct file_operations def_blk_fops;
+extern const struct file_operations def_chr_fops;
+#ifdef CONFIG_BLOCK
+extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
+extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
+extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
+extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
+extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
+ void *holder);
+extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
+ void *holder);
+extern void blkdev_put(struct block_device *bdev, fmode_t mode);
+#ifdef CONFIG_SYSFS
+extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
+extern void bd_unlink_disk_holder(struct block_device *bdev,
+ struct gendisk *disk);
+#else
+static inline int bd_link_disk_holder(struct block_device *bdev,
+ struct gendisk *disk)
+{
+ return 0;
+}
+static inline void bd_unlink_disk_holder(struct block_device *bdev,
+ struct gendisk *disk)
+{
+}
+#endif
+#endif
+
+/* fs/char_dev.c */
+#define CHRDEV_MAJOR_HASH_SIZE 255
+extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
+extern int register_chrdev_region(dev_t, unsigned, const char *);
+extern int __register_chrdev(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name,
+ const struct file_operations *fops);
+extern void __unregister_chrdev(unsigned int major, unsigned int baseminor,
+ unsigned int count, const char *name);
+extern void unregister_chrdev_region(dev_t, unsigned);
+extern void chrdev_show(struct seq_file *,off_t);
+
+static inline int register_chrdev(unsigned int major, const char *name,
+ const struct file_operations *fops)
+{
+ return __register_chrdev(major, 0, 256, name, fops);
+}
+
+static inline void unregister_chrdev(unsigned int major, const char *name)
+{
+ __unregister_chrdev(major, 0, 256, name);
+}
+
+/* fs/block_dev.c */
+#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
+#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
+
+#ifdef CONFIG_BLOCK
+#define BLKDEV_MAJOR_HASH_SIZE 255
+extern const char *__bdevname(dev_t, char *buffer);
+extern const char *bdevname(struct block_device *bdev, char *buffer);
+extern struct block_device *lookup_bdev(const char *);
+extern void blkdev_show(struct seq_file *,off_t);
+
+#else
+#define BLKDEV_MAJOR_HASH_SIZE 0
+#endif
+
+extern void init_special_inode(struct inode *, umode_t, dev_t);
+
+/* Invalid inode operations -- fs/bad_inode.c */
+extern void make_bad_inode(struct inode *);
+extern int is_bad_inode(struct inode *);
+
+#ifdef CONFIG_BLOCK
+/*
+ * return READ, READA, or WRITE
+ */
+#define bio_rw(bio) ((bio)->bi_rw & (RW_MASK | RWA_MASK))
+
+/*
+ * return data direction, READ or WRITE
+ */
+#define bio_data_dir(bio) ((bio)->bi_rw & 1)
+
+extern void check_disk_size_change(struct gendisk *disk,
+ struct block_device *bdev);
+extern int revalidate_disk(struct gendisk *);
+extern int check_disk_change(struct block_device *);
+extern int __invalidate_device(struct block_device *, bool);
+extern int invalidate_partition(struct gendisk *, int);
+#endif
+unsigned long invalidate_mapping_pages(struct address_space *mapping,
+ pgoff_t start, pgoff_t end);
+
+static inline void invalidate_remote_inode(struct inode *inode)
+{
+ if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+ S_ISLNK(inode->i_mode))
+ invalidate_mapping_pages(inode->i_mapping, 0, -1);
+}
+extern int invalidate_inode_pages2(struct address_space *mapping);
+extern int invalidate_inode_pages2_range(struct address_space *mapping,
+ pgoff_t start, pgoff_t end);
+extern int write_inode_now(struct inode *, int);
+extern int filemap_fdatawrite(struct address_space *);
+extern int filemap_flush(struct address_space *);
+extern int filemap_fdatawait(struct address_space *);
+extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
+ loff_t lend);
+extern int filemap_write_and_wait(struct address_space *mapping);
+extern int filemap_write_and_wait_range(struct address_space *mapping,
+ loff_t lstart, loff_t lend);
+extern int __filemap_fdatawrite_range(struct address_space *mapping,
+ loff_t start, loff_t end, int sync_mode);
+extern int filemap_fdatawrite_range(struct address_space *mapping,
+ loff_t start, loff_t end);
+
+extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
+ int datasync);
+extern int vfs_fsync(struct file *file, int datasync);
+static inline int generic_write_sync(struct file *file, loff_t pos, loff_t count)
+{
+ if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
+ return 0;
+ return vfs_fsync_range(file, pos, pos + count - 1,
+ (file->f_flags & __O_SYNC) ? 0 : 1);
+}
+extern void emergency_sync(void);
+extern void emergency_remount(void);
+#ifdef CONFIG_BLOCK
+extern sector_t bmap(struct inode *, sector_t);
+#endif
+extern int notify_change(struct dentry *, struct iattr *, struct inode **);
+extern int inode_permission(struct inode *, int);
+extern int __inode_permission(struct inode *, int);
+extern int generic_permission(struct inode *, int);
+extern int __check_sticky(struct inode *dir, struct inode *inode);
+
+static inline bool execute_ok(struct inode *inode)
+{
+ return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode);
+}
+
+static inline void file_start_write(struct file *file)
+{
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return;
+ __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true);
+}
+
+static inline bool file_start_write_trylock(struct file *file)
+{
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return true;
+ return __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, false);
+}
+
+static inline void file_end_write(struct file *file)
+{
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return;
+ __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+}
+
+/*
+ * get_write_access() gets write permission for a file.
+ * put_write_access() releases this write permission.
+ * This is used for regular files.
+ * We cannot support write (and maybe mmap read-write shared) accesses and
+ * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode
+ * can have the following values:
+ * 0: no writers, no VM_DENYWRITE mappings
+ * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist
+ * > 0: (i_writecount) users are writing to the file.
+ *
+ * Normally we operate on that counter with atomic_{inc,dec} and it's safe
+ * except for the cases where we don't hold i_writecount yet. Then we need to
+ * use {get,deny}_write_access() - these functions check the sign and refuse
+ * to do the change if sign is wrong.
+ */
+static inline int get_write_access(struct inode *inode)
+{
+ return atomic_inc_unless_negative(&inode->i_writecount) ? 0 : -ETXTBSY;
+}
+static inline int deny_write_access(struct file *file)
+{
+ struct inode *inode = file_inode(file);
+ return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -ETXTBSY;
+}
+static inline void put_write_access(struct inode * inode)
+{
+ atomic_dec(&inode->i_writecount);
+}
+static inline void allow_write_access(struct file *file)
+{
+ if (file)
+ atomic_inc(&file_inode(file)->i_writecount);
+}
+static inline bool inode_is_open_for_write(const struct inode *inode)
+{
+ return atomic_read(&inode->i_writecount) > 0;
+}
+
+#ifdef CONFIG_IMA
+static inline void i_readcount_dec(struct inode *inode)
+{
+ BUG_ON(!atomic_read(&inode->i_readcount));
+ atomic_dec(&inode->i_readcount);
+}
+static inline void i_readcount_inc(struct inode *inode)
+{
+ atomic_inc(&inode->i_readcount);
+}
+#else
+static inline void i_readcount_dec(struct inode *inode)
+{
+ return;
+}
+static inline void i_readcount_inc(struct inode *inode)
+{
+ return;
+}
+#endif
+extern int do_pipe_flags(int *, int);
+
+extern int kernel_read(struct file *, loff_t, char *, unsigned long);
+extern ssize_t kernel_write(struct file *, const char *, size_t, loff_t);
+extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
+extern struct file * open_exec(const char *);
+
+/* fs/dcache.c -- generic fs support functions */
+extern int is_subdir(struct dentry *, struct dentry *);
+extern int path_is_under(struct path *, struct path *);
+
+#include <linux/err.h>
+
+/* needed for stackable file system support */
+extern loff_t default_llseek(struct file *file, loff_t offset, int whence);
+
+extern loff_t vfs_llseek(struct file *file, loff_t offset, int whence);
+
+extern int inode_init_always(struct super_block *, struct inode *);
+extern void inode_init_once(struct inode *);
+extern void address_space_init_once(struct address_space *mapping);
+extern struct inode * igrab(struct inode *);
+extern ino_t iunique(struct super_block *, ino_t);
+extern int inode_needs_sync(struct inode *inode);
+extern int generic_delete_inode(struct inode *inode);
+static inline int generic_drop_inode(struct inode *inode)
+{
+ return !inode->i_nlink || inode_unhashed(inode);
+}
+
+extern struct inode *ilookup5_nowait(struct super_block *sb,
+ unsigned long hashval, int (*test)(struct inode *, void *),
+ void *data);
+extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
+ int (*test)(struct inode *, void *), void *data);
+extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
+
+extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
+extern struct inode * iget_locked(struct super_block *, unsigned long);
+extern struct inode *find_inode_nowait(struct super_block *,
+ unsigned long,
+ int (*match)(struct inode *,
+ unsigned long, void *),
+ void *data);
+extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *);
+extern int insert_inode_locked(struct inode *);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void lockdep_annotate_inode_mutex_key(struct inode *inode);
+#else
+static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
+#endif
+extern void unlock_new_inode(struct inode *);
+extern unsigned int get_next_ino(void);
+
+extern void __iget(struct inode * inode);
+extern void iget_failed(struct inode *);
+extern void clear_inode(struct inode *);
+extern void __destroy_inode(struct inode *);
+extern struct inode *new_inode_pseudo(struct super_block *sb);
+extern struct inode *new_inode(struct super_block *sb);
+extern void free_inode_nonrcu(struct inode *inode);
+extern int should_remove_suid(struct dentry *);
+extern int file_remove_suid(struct file *);
+
+extern void __insert_inode_hash(struct inode *, unsigned long hashval);
+static inline void insert_inode_hash(struct inode *inode)
+{
+ __insert_inode_hash(inode, inode->i_ino);
+}
+
+extern void __remove_inode_hash(struct inode *);
+static inline void remove_inode_hash(struct inode *inode)
+{
+ if (!inode_unhashed(inode))
+ __remove_inode_hash(inode);
+}
+
+extern void inode_sb_list_add(struct inode *inode);
+
+#ifdef CONFIG_BLOCK
+extern void submit_bio(int, struct bio *);
+extern int bdev_read_only(struct block_device *);
+#endif
+extern int set_blocksize(struct block_device *, int);
+extern int sb_set_blocksize(struct super_block *, int);
+extern int sb_min_blocksize(struct super_block *, int);
+
+extern int generic_file_mmap(struct file *, struct vm_area_struct *);
+extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
+extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
+extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
+extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
+extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
+extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *, loff_t);
+extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
+
+ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos);
+ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos);
+
+/* fs/block_dev.c */
+extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
+extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from);
+extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
+ int datasync);
+extern void block_sync_page(struct page *page);
+
+/* fs/splice.c */
+extern ssize_t generic_file_splice_read(struct file *, loff_t *,
+ struct pipe_inode_info *, size_t, unsigned int);
+extern ssize_t default_file_splice_read(struct file *, loff_t *,
+ struct pipe_inode_info *, size_t, unsigned int);
+extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
+ struct file *, loff_t *, size_t, unsigned int);
+extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
+ struct file *out, loff_t *, size_t len, unsigned int flags);
+extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
+ loff_t *opos, size_t len, unsigned int flags);
+
+
+extern void
+file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
+extern loff_t noop_llseek(struct file *file, loff_t offset, int whence);
+extern loff_t no_llseek(struct file *file, loff_t offset, int whence);
+extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize);
+extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence);
+extern loff_t generic_file_llseek_size(struct file *file, loff_t offset,
+ int whence, loff_t maxsize, loff_t eof);
+extern loff_t fixed_size_llseek(struct file *file, loff_t offset,
+ int whence, loff_t size);
+extern int generic_file_open(struct inode * inode, struct file * filp);
+extern int nonseekable_open(struct inode * inode, struct file * filp);
+
+ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
+ get_block_t, dio_iodone_t, int flags);
+int dax_clear_blocks(struct inode *, sector_t block, long size);
+int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
+int dax_truncate_page(struct inode *, loff_t from, get_block_t);
+int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
+int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
+#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
+
+#ifdef CONFIG_BLOCK
+typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
+ loff_t file_offset);
+
+enum {
+ /* need locking between buffered and direct access */
+ DIO_LOCKING = 0x01,
+
+ /* filesystem does not support filling holes */
+ DIO_SKIP_HOLES = 0x02,
+
+ /* filesystem can handle aio writes beyond i_size */
+ DIO_ASYNC_EXTEND = 0x04,
+
+ /* inode/fs/bdev does not need truncate protection */
+ DIO_SKIP_DIO_COUNT = 0x08,
+};
+
+void dio_end_io(struct bio *bio, int error);
+
+ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+ struct block_device *bdev, struct iov_iter *iter,
+ loff_t offset, get_block_t get_block,
+ dio_iodone_t end_io, dio_submit_t submit_io,
+ int flags);
+
+static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
+ struct inode *inode,
+ struct iov_iter *iter, loff_t offset,
+ get_block_t get_block)
+{
+ return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
+ offset, get_block, NULL, NULL,
+ DIO_LOCKING | DIO_SKIP_HOLES);
+}
+#endif
+
+void inode_dio_wait(struct inode *inode);
+
+/*
+ * inode_dio_begin - signal start of a direct I/O requests
+ * @inode: inode the direct I/O happens on
+ *
+ * This is called once we've finished processing a direct I/O request,
+ * and is used to wake up callers waiting for direct I/O to be quiesced.
+ */
+static inline void inode_dio_begin(struct inode *inode)
+{
+ atomic_inc(&inode->i_dio_count);
+}
+
+/*
+ * inode_dio_end - signal finish of a direct I/O requests
+ * @inode: inode the direct I/O happens on
+ *
+ * This is called once we've finished processing a direct I/O request,
+ * and is used to wake up callers waiting for direct I/O to be quiesced.
+ */
+static inline void inode_dio_end(struct inode *inode)
+{
+ if (atomic_dec_and_test(&inode->i_dio_count))
+ wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
+}
+
+extern void inode_set_flags(struct inode *inode, unsigned int flags,
+ unsigned int mask);
+
+extern const struct file_operations generic_ro_fops;
+
+#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
+
+extern int readlink_copy(char __user *, int, const char *);
+extern int page_readlink(struct dentry *, char __user *, int);
+extern void *page_follow_link_light(struct dentry *, struct nameidata *);
+extern void page_put_link(struct dentry *, struct nameidata *, void *);
+extern int __page_symlink(struct inode *inode, const char *symname, int len,
+ int nofs);
+extern int page_symlink(struct inode *inode, const char *symname, int len);
+extern const struct inode_operations page_symlink_inode_operations;
+extern void kfree_put_link(struct dentry *, struct nameidata *, void *);
+extern int generic_readlink(struct dentry *, char __user *, int);
+extern void generic_fillattr(struct inode *, struct kstat *);
+int vfs_getattr_nosec(struct path *path, struct kstat *stat);
+extern int vfs_getattr(struct path *, struct kstat *);
+void __inode_add_bytes(struct inode *inode, loff_t bytes);
+void inode_add_bytes(struct inode *inode, loff_t bytes);
+void __inode_sub_bytes(struct inode *inode, loff_t bytes);
+void inode_sub_bytes(struct inode *inode, loff_t bytes);
+loff_t inode_get_bytes(struct inode *inode);
+void inode_set_bytes(struct inode *inode, loff_t bytes);
+
+extern int iterate_dir(struct file *, struct dir_context *);
+
+extern int vfs_stat(const char __user *, struct kstat *);
+extern int vfs_lstat(const char __user *, struct kstat *);
+extern int vfs_fstat(unsigned int, struct kstat *);
+extern int vfs_fstatat(int , const char __user *, struct kstat *, int);
+
+extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
+ unsigned long arg);
+extern int __generic_block_fiemap(struct inode *inode,
+ struct fiemap_extent_info *fieinfo,
+ loff_t start, loff_t len,
+ get_block_t *get_block);
+extern int generic_block_fiemap(struct inode *inode,
+ struct fiemap_extent_info *fieinfo, u64 start,
+ u64 len, get_block_t *get_block);
+
+extern void get_filesystem(struct file_system_type *fs);
+extern void put_filesystem(struct file_system_type *fs);
+extern struct file_system_type *get_fs_type(const char *name);
+extern struct super_block *get_super(struct block_device *);
+extern struct super_block *get_super_thawed(struct block_device *);
+extern struct super_block *get_active_super(struct block_device *bdev);
+extern void drop_super(struct super_block *sb);
+extern void iterate_supers(void (*)(struct super_block *, void *), void *);
+extern void iterate_supers_type(struct file_system_type *,
+ void (*)(struct super_block *, void *), void *);
+
+extern int dcache_dir_open(struct inode *, struct file *);
+extern int dcache_dir_close(struct inode *, struct file *);
+extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
+extern int dcache_readdir(struct file *, struct dir_context *);
+extern int simple_setattr(struct dentry *, struct iattr *);
+extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+extern int simple_statfs(struct dentry *, struct kstatfs *);
+extern int simple_open(struct inode *inode, struct file *file);
+extern int simple_link(struct dentry *, struct inode *, struct dentry *);
+extern int simple_unlink(struct inode *, struct dentry *);
+extern int simple_rmdir(struct inode *, struct dentry *);
+extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
+extern int noop_fsync(struct file *, loff_t, loff_t, int);
+extern int simple_empty(struct dentry *);
+extern int simple_readpage(struct file *file, struct page *page);
+extern int simple_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata);
+extern int simple_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata);
+extern int always_delete_dentry(const struct dentry *);
+extern struct inode *alloc_anon_inode(struct super_block *);
+extern int simple_nosetlease(struct file *, long, struct file_lock **, void **);
+extern const struct dentry_operations simple_dentry_operations;
+
+extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
+extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
+extern const struct file_operations simple_dir_operations;
+extern const struct inode_operations simple_dir_inode_operations;
+extern void make_empty_dir_inode(struct inode *inode);
+extern bool is_empty_dir_inode(struct inode *inode);
+struct tree_descr { char *name; const struct file_operations *ops; int mode; };
+struct dentry *d_alloc_name(struct dentry *, const char *);
+extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *);
+extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
+extern void simple_release_fs(struct vfsmount **mount, int *count);
+
+extern ssize_t simple_read_from_buffer(void __user *to, size_t count,
+ loff_t *ppos, const void *from, size_t available);
+extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
+ const void __user *from, size_t count);
+
+extern int __generic_file_fsync(struct file *, loff_t, loff_t, int);
+extern int generic_file_fsync(struct file *, loff_t, loff_t, int);
+
+extern int generic_check_addressable(unsigned, u64);
+
+#ifdef CONFIG_MIGRATION
+extern int buffer_migrate_page(struct address_space *,
+ struct page *, struct page *,
+ enum migrate_mode);
+#else
+#define buffer_migrate_page NULL
+#endif
+
+extern int inode_change_ok(const struct inode *, struct iattr *);
+extern int inode_newsize_ok(const struct inode *, loff_t offset);
+extern void setattr_copy(struct inode *inode, const struct iattr *attr);
+
+extern int file_update_time(struct file *file);
+
+extern int generic_show_options(struct seq_file *m, struct dentry *root);
+extern void save_mount_options(struct super_block *sb, char *options);
+extern void replace_mount_options(struct super_block *sb, char *options);
+
+static inline bool io_is_direct(struct file *filp)
+{
+ return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp));
+}
+
+static inline int iocb_flags(struct file *file)
+{
+ int res = 0;
+ if (file->f_flags & O_APPEND)
+ res |= IOCB_APPEND;
+ if (io_is_direct(file))
+ res |= IOCB_DIRECT;
+ return res;
+}
+
+static inline ino_t parent_ino(struct dentry *dentry)
+{
+ ino_t res;
+
+ /*
+ * Don't strictly need d_lock here? If the parent ino could change
+ * then surely we'd have a deeper race in the caller?
+ */
+ spin_lock(&dentry->d_lock);
+ res = dentry->d_parent->d_inode->i_ino;
+ spin_unlock(&dentry->d_lock);
+ return res;
+}
+
+/* Transaction based IO helpers */
+
+/*
+ * An argresp is stored in an allocated page and holds the
+ * size of the argument or response, along with its content
+ */
+struct simple_transaction_argresp {
+ ssize_t size;
+ char data[0];
+};
+
+#define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp))
+
+char *simple_transaction_get(struct file *file, const char __user *buf,
+ size_t size);
+ssize_t simple_transaction_read(struct file *file, char __user *buf,
+ size_t size, loff_t *pos);
+int simple_transaction_release(struct inode *inode, struct file *file);
+
+void simple_transaction_set(struct file *file, size_t n);
+
+/*
+ * simple attribute files
+ *
+ * These attributes behave similar to those in sysfs:
+ *
+ * Writing to an attribute immediately sets a value, an open file can be
+ * written to multiple times.
+ *
+ * Reading from an attribute creates a buffer from the value that might get
+ * read with multiple read calls. When the attribute has been read
+ * completely, no further read calls are possible until the file is opened
+ * again.
+ *
+ * All attributes contain a text representation of a numeric value
+ * that are accessed with the get() and set() functions.
+ */
+#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
+static int __fops ## _open(struct inode *inode, struct file *file) \
+{ \
+ __simple_attr_check_format(__fmt, 0ull); \
+ return simple_attr_open(inode, file, __get, __set, __fmt); \
+} \
+static const struct file_operations __fops = { \
+ .owner = THIS_MODULE, \
+ .open = __fops ## _open, \
+ .release = simple_attr_release, \
+ .read = simple_attr_read, \
+ .write = simple_attr_write, \
+ .llseek = generic_file_llseek, \
+}
+
+static inline __printf(1, 2)
+void __simple_attr_check_format(const char *fmt, ...)
+{
+ /* don't do anything, just let the compiler check the arguments; */
+}
+
+int simple_attr_open(struct inode *inode, struct file *file,
+ int (*get)(void *, u64 *), int (*set)(void *, u64),
+ const char *fmt);
+int simple_attr_release(struct inode *inode, struct file *file);
+ssize_t simple_attr_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos);
+ssize_t simple_attr_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos);
+
+struct ctl_table;
+int proc_nr_files(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+int proc_nr_dentry(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+int proc_nr_inodes(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+int __init get_filesystem_list(char *buf);
+
+#define __FMODE_EXEC ((__force int) FMODE_EXEC)
+#define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY)
+
+#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
+#define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
+ (flag & __FMODE_NONOTIFY)))
+
+static inline int is_sxid(umode_t mode)
+{
+ return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
+}
+
+static inline int check_sticky(struct inode *dir, struct inode *inode)
+{
+ if (!(dir->i_mode & S_ISVTX))
+ return 0;
+
+ return __check_sticky(dir, inode);
+}
+
+static inline void inode_has_no_xattr(struct inode *inode)
+{
+ if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC))
+ inode->i_flags |= S_NOSEC;
+}
+
+static inline bool is_root_inode(struct inode *inode)
+{
+ return inode == inode->i_sb->s_root->d_inode;
+}
+
+static inline bool dir_emit(struct dir_context *ctx,
+ const char *name, int namelen,
+ u64 ino, unsigned type)
+{
+ return ctx->actor(ctx, name, namelen, ctx->pos, ino, type) == 0;
+}
+static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx)
+{
+ return ctx->actor(ctx, ".", 1, ctx->pos,
+ file->f_path.dentry->d_inode->i_ino, DT_DIR) == 0;
+}
+static inline bool dir_emit_dotdot(struct file *file, struct dir_context *ctx)
+{
+ return ctx->actor(ctx, "..", 2, ctx->pos,
+ parent_ino(file->f_path.dentry), DT_DIR) == 0;
+}
+static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
+{
+ if (ctx->pos == 0) {
+ if (!dir_emit_dot(file, ctx))
+ return false;
+ ctx->pos = 1;
+ }
+ if (ctx->pos == 1) {
+ if (!dir_emit_dotdot(file, ctx))
+ return false;
+ ctx->pos = 2;
+ }
+ return true;
+}
+static inline bool dir_relax(struct inode *inode)
+{
+ mutex_unlock(&inode->i_mutex);
+ mutex_lock(&inode->i_mutex);
+ return !IS_DEADDIR(inode);
+}
+
+#endif /* _LINUX_FS_H */
diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h
new file mode 100644
index 000000000..77d783f71
--- /dev/null
+++ b/include/linux/fs_enet_pd.h
@@ -0,0 +1,165 @@
+/*
+ * Platform information definitions for the
+ * universal Freescale Ethernet driver.
+ *
+ * Copyright (c) 2003 Intracom S.A.
+ * by Pantelis Antoniou <panto@intracom.gr>
+ *
+ * 2005 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef FS_ENET_PD_H
+#define FS_ENET_PD_H
+
+#include <linux/clk.h>
+#include <linux/string.h>
+#include <linux/of_mdio.h>
+#include <linux/if_ether.h>
+#include <asm/types.h>
+
+#define FS_ENET_NAME "fs_enet"
+
+enum fs_id {
+ fsid_fec1,
+ fsid_fec2,
+ fsid_fcc1,
+ fsid_fcc2,
+ fsid_fcc3,
+ fsid_scc1,
+ fsid_scc2,
+ fsid_scc3,
+ fsid_scc4,
+};
+
+#define FS_MAX_INDEX 9
+
+static inline int fs_get_fec_index(enum fs_id id)
+{
+ if (id >= fsid_fec1 && id <= fsid_fec2)
+ return id - fsid_fec1;
+ return -1;
+}
+
+static inline int fs_get_fcc_index(enum fs_id id)
+{
+ if (id >= fsid_fcc1 && id <= fsid_fcc3)
+ return id - fsid_fcc1;
+ return -1;
+}
+
+static inline int fs_get_scc_index(enum fs_id id)
+{
+ if (id >= fsid_scc1 && id <= fsid_scc4)
+ return id - fsid_scc1;
+ return -1;
+}
+
+static inline int fs_fec_index2id(int index)
+{
+ int id = fsid_fec1 + index - 1;
+ if (id >= fsid_fec1 && id <= fsid_fec2)
+ return id;
+ return FS_MAX_INDEX;
+ }
+
+static inline int fs_fcc_index2id(int index)
+{
+ int id = fsid_fcc1 + index - 1;
+ if (id >= fsid_fcc1 && id <= fsid_fcc3)
+ return id;
+ return FS_MAX_INDEX;
+}
+
+static inline int fs_scc_index2id(int index)
+{
+ int id = fsid_scc1 + index - 1;
+ if (id >= fsid_scc1 && id <= fsid_scc4)
+ return id;
+ return FS_MAX_INDEX;
+}
+
+enum fs_mii_method {
+ fsmii_fixed,
+ fsmii_fec,
+ fsmii_bitbang,
+};
+
+enum fs_ioport {
+ fsiop_porta,
+ fsiop_portb,
+ fsiop_portc,
+ fsiop_portd,
+ fsiop_porte,
+};
+
+struct fs_mii_bit {
+ u32 offset;
+ u8 bit;
+ u8 polarity;
+};
+struct fs_mii_bb_platform_info {
+ struct fs_mii_bit mdio_dir;
+ struct fs_mii_bit mdio_dat;
+ struct fs_mii_bit mdc_dat;
+ int delay; /* delay in us */
+ int irq[32]; /* irqs per phy's */
+};
+
+struct fs_platform_info {
+
+ void(*init_ioports)(struct fs_platform_info *);
+ /* device specific information */
+ int fs_no; /* controller index */
+ char fs_type[4]; /* controller type */
+
+ u32 cp_page; /* CPM page */
+ u32 cp_block; /* CPM sblock */
+ u32 cp_command; /* CPM page/sblock/mcn */
+
+ u32 clk_trx; /* some stuff for pins & mux configuration*/
+ u32 clk_rx;
+ u32 clk_tx;
+ u32 clk_route;
+ u32 clk_mask;
+
+ u32 mem_offset;
+ u32 dpram_offset;
+ u32 fcc_regs_c;
+
+ u32 device_flags;
+
+ struct device_node *phy_node;
+ const struct fs_mii_bus_info *bus_info;
+
+ int rx_ring, tx_ring; /* number of buffers on rx */
+ __u8 macaddr[ETH_ALEN]; /* mac address */
+ int rx_copybreak; /* limit we copy small frames */
+ int napi_weight; /* NAPI weight */
+
+ int use_rmii; /* use RMII mode */
+ int has_phy; /* if the network is phy container as well...*/
+
+ struct clk *clk_per; /* 'per' clock for register access */
+};
+struct fs_mii_fec_platform_info {
+ u32 irq[32];
+ u32 mii_speed;
+};
+
+static inline int fs_get_id(struct fs_platform_info *fpi)
+{
+ if(strstr(fpi->fs_type, "SCC"))
+ return fs_scc_index2id(fpi->fs_no);
+ if(strstr(fpi->fs_type, "FCC"))
+ return fs_fcc_index2id(fpi->fs_no);
+ if(strstr(fpi->fs_type, "FEC"))
+ return fs_fec_index2id(fpi->fs_no);
+ return fpi->fs_no;
+}
+
+#endif
diff --git a/include/linux/fs_pin.h b/include/linux/fs_pin.h
new file mode 100644
index 000000000..3886b3bff
--- /dev/null
+++ b/include/linux/fs_pin.h
@@ -0,0 +1,24 @@
+#include <linux/wait.h>
+
+struct fs_pin {
+ wait_queue_head_t wait;
+ int done;
+ struct hlist_node s_list;
+ struct hlist_node m_list;
+ void (*kill)(struct fs_pin *);
+};
+
+struct vfsmount;
+
+static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *))
+{
+ init_waitqueue_head(&p->wait);
+ INIT_HLIST_NODE(&p->s_list);
+ INIT_HLIST_NODE(&p->m_list);
+ p->kill = kill;
+}
+
+void pin_remove(struct fs_pin *);
+void pin_insert_group(struct fs_pin *, struct vfsmount *, struct hlist_head *);
+void pin_insert(struct fs_pin *, struct vfsmount *);
+void pin_kill(struct fs_pin *);
diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h
new file mode 100644
index 000000000..da317c716
--- /dev/null
+++ b/include/linux/fs_stack.h
@@ -0,0 +1,29 @@
+#ifndef _LINUX_FS_STACK_H
+#define _LINUX_FS_STACK_H
+
+/* This file defines generic functions used primarily by stackable
+ * filesystems; none of these functions require i_mutex to be held.
+ */
+
+#include <linux/fs.h>
+
+/* externs for fs/stack.c */
+extern void fsstack_copy_attr_all(struct inode *dest, const struct inode *src);
+extern void fsstack_copy_inode_size(struct inode *dst, struct inode *src);
+
+/* inlines */
+static inline void fsstack_copy_attr_atime(struct inode *dest,
+ const struct inode *src)
+{
+ dest->i_atime = src->i_atime;
+}
+
+static inline void fsstack_copy_attr_times(struct inode *dest,
+ const struct inode *src)
+{
+ dest->i_atime = src->i_atime;
+ dest->i_mtime = src->i_mtime;
+ dest->i_ctime = src->i_ctime;
+}
+
+#endif /* _LINUX_FS_STACK_H */
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
new file mode 100644
index 000000000..0efc3e628
--- /dev/null
+++ b/include/linux/fs_struct.h
@@ -0,0 +1,44 @@
+#ifndef _LINUX_FS_STRUCT_H
+#define _LINUX_FS_STRUCT_H
+
+#include <linux/path.h>
+#include <linux/spinlock.h>
+#include <linux/seqlock.h>
+
+struct fs_struct {
+ int users;
+ spinlock_t lock;
+ seqcount_t seq;
+ int umask;
+ int in_exec;
+ struct path root, pwd;
+};
+
+extern struct kmem_cache *fs_cachep;
+
+extern void exit_fs(struct task_struct *);
+extern void set_fs_root(struct fs_struct *, const struct path *);
+extern void set_fs_pwd(struct fs_struct *, const struct path *);
+extern struct fs_struct *copy_fs_struct(struct fs_struct *);
+extern void free_fs_struct(struct fs_struct *);
+extern int unshare_fs_struct(void);
+
+static inline void get_fs_root(struct fs_struct *fs, struct path *root)
+{
+ spin_lock(&fs->lock);
+ *root = fs->root;
+ path_get(root);
+ spin_unlock(&fs->lock);
+}
+
+static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd)
+{
+ spin_lock(&fs->lock);
+ *pwd = fs->pwd;
+ path_get(pwd);
+ spin_unlock(&fs->lock);
+}
+
+extern bool current_chrooted(void);
+
+#endif /* _LINUX_FS_STRUCT_H */
diff --git a/include/linux/fs_uart_pd.h b/include/linux/fs_uart_pd.h
new file mode 100644
index 000000000..36b61ff39
--- /dev/null
+++ b/include/linux/fs_uart_pd.h
@@ -0,0 +1,71 @@
+/*
+ * Platform information definitions for the CPM Uart driver.
+ *
+ * 2006 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef FS_UART_PD_H
+#define FS_UART_PD_H
+
+#include <asm/types.h>
+
+enum fs_uart_id {
+ fsid_smc1_uart,
+ fsid_smc2_uart,
+ fsid_scc1_uart,
+ fsid_scc2_uart,
+ fsid_scc3_uart,
+ fsid_scc4_uart,
+ fs_uart_nr,
+};
+
+static inline int fs_uart_id_scc2fsid(int id)
+{
+ return fsid_scc1_uart + id - 1;
+}
+
+static inline int fs_uart_id_fsid2scc(int id)
+{
+ return id - fsid_scc1_uart + 1;
+}
+
+static inline int fs_uart_id_smc2fsid(int id)
+{
+ return fsid_smc1_uart + id - 1;
+}
+
+static inline int fs_uart_id_fsid2smc(int id)
+{
+ return id - fsid_smc1_uart + 1;
+}
+
+struct fs_uart_platform_info {
+ void(*init_ioports)(struct fs_uart_platform_info *);
+ /* device specific information */
+ int fs_no; /* controller index */
+ char fs_type[4]; /* controller type */
+ u32 uart_clk;
+ u8 tx_num_fifo;
+ u8 tx_buf_size;
+ u8 rx_num_fifo;
+ u8 rx_buf_size;
+ u8 brg;
+ u8 clk_rx;
+ u8 clk_tx;
+};
+
+static inline int fs_uart_get_id(struct fs_uart_platform_info *fpi)
+{
+ if(strstr(fpi->fs_type, "SMC"))
+ return fs_uart_id_smc2fsid(fpi->fs_no);
+ if(strstr(fpi->fs_type, "SCC"))
+ return fs_uart_id_scc2fsid(fpi->fs_no);
+ return fpi->fs_no;
+}
+
+#endif
diff --git a/include/linux/fs_uuid.h b/include/linux/fs_uuid.h
new file mode 100644
index 000000000..3234135b5
--- /dev/null
+++ b/include/linux/fs_uuid.h
@@ -0,0 +1,19 @@
+#include <linux/device.h>
+
+struct hd_struct;
+struct block_device;
+
+struct fs_info {
+ char uuid[16];
+ dev_t dev_t;
+ char *last_mount;
+ int last_mount_size;
+};
+
+int part_matches_fs_info(struct hd_struct *part, struct fs_info *seek);
+dev_t blk_lookup_fs_info(struct fs_info *seek);
+struct fs_info *fs_info_from_block_dev(struct block_device *bdev);
+void free_fs_info(struct fs_info *fs_info);
+int bdev_matches_key(struct block_device *bdev, const char *key);
+struct block_device *next_bdev_of_type(struct block_device *last,
+ const char *key);
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
new file mode 100644
index 000000000..771484993
--- /dev/null
+++ b/include/linux/fscache-cache.h
@@ -0,0 +1,554 @@
+/* General filesystem caching backing cache interface
+ *
+ * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * NOTE!!! See:
+ *
+ * Documentation/filesystems/caching/backend-api.txt
+ *
+ * for a description of the cache backend interface declared here.
+ */
+
+#ifndef _LINUX_FSCACHE_CACHE_H
+#define _LINUX_FSCACHE_CACHE_H
+
+#include <linux/fscache.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+
+#define NR_MAXCACHES BITS_PER_LONG
+
+struct fscache_cache;
+struct fscache_cache_ops;
+struct fscache_object;
+struct fscache_operation;
+
+/*
+ * cache tag definition
+ */
+struct fscache_cache_tag {
+ struct list_head link;
+ struct fscache_cache *cache; /* cache referred to by this tag */
+ unsigned long flags;
+#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */
+ atomic_t usage;
+ char name[0]; /* tag name */
+};
+
+/*
+ * cache definition
+ */
+struct fscache_cache {
+ const struct fscache_cache_ops *ops;
+ struct fscache_cache_tag *tag; /* tag representing this cache */
+ struct kobject *kobj; /* system representation of this cache */
+ struct list_head link; /* link in list of caches */
+ size_t max_index_size; /* maximum size of index data */
+ char identifier[36]; /* cache label */
+
+ /* node management */
+ struct work_struct op_gc; /* operation garbage collector */
+ struct list_head object_list; /* list of data/index objects */
+ struct list_head op_gc_list; /* list of ops to be deleted */
+ spinlock_t object_list_lock;
+ spinlock_t op_gc_list_lock;
+ atomic_t object_count; /* no. of live objects in this cache */
+ struct fscache_object *fsdef; /* object for the fsdef index */
+ unsigned long flags;
+#define FSCACHE_IOERROR 0 /* cache stopped on I/O error */
+#define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */
+};
+
+extern wait_queue_head_t fscache_cache_cleared_wq;
+
+/*
+ * operation to be applied to a cache object
+ * - retrieval initiation operations are done in the context of the process
+ * that issued them, and not in an async thread pool
+ */
+typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
+typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
+
+enum fscache_operation_state {
+ FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */
+ FSCACHE_OP_ST_INITIALISED, /* Op is initialised */
+ FSCACHE_OP_ST_PENDING, /* Op is blocked from running */
+ FSCACHE_OP_ST_IN_PROGRESS, /* Op is in progress */
+ FSCACHE_OP_ST_COMPLETE, /* Op is complete */
+ FSCACHE_OP_ST_CANCELLED, /* Op has been cancelled */
+ FSCACHE_OP_ST_DEAD /* Op is now dead */
+};
+
+struct fscache_operation {
+ struct work_struct work; /* record for async ops */
+ struct list_head pend_link; /* link in object->pending_ops */
+ struct fscache_object *object; /* object to be operated upon */
+
+ unsigned long flags;
+#define FSCACHE_OP_TYPE 0x000f /* operation type */
+#define FSCACHE_OP_ASYNC 0x0001 /* - async op, processor may sleep for disk */
+#define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */
+#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
+#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
+#define FSCACHE_OP_DEC_READ_CNT 6 /* decrement object->n_reads on destruction */
+#define FSCACHE_OP_UNUSE_COOKIE 7 /* call fscache_unuse_cookie() on completion */
+#define FSCACHE_OP_KEEP_FLAGS 0x00f0 /* flags to keep when repurposing an op */
+
+ enum fscache_operation_state state;
+ atomic_t usage;
+ unsigned debug_id; /* debugging ID */
+
+ /* operation processor callback
+ * - can be NULL if FSCACHE_OP_WAITING is going to be used to perform
+ * the op in a non-pool thread */
+ fscache_operation_processor_t processor;
+
+ /* operation releaser */
+ fscache_operation_release_t release;
+};
+
+extern atomic_t fscache_op_debug_id;
+extern void fscache_op_work_func(struct work_struct *work);
+
+extern void fscache_enqueue_operation(struct fscache_operation *);
+extern void fscache_op_complete(struct fscache_operation *, bool);
+extern void fscache_put_operation(struct fscache_operation *);
+
+/**
+ * fscache_operation_init - Do basic initialisation of an operation
+ * @op: The operation to initialise
+ * @release: The release function to assign
+ *
+ * Do basic initialisation of an operation. The caller must still set flags,
+ * object and processor if needed.
+ */
+static inline void fscache_operation_init(struct fscache_operation *op,
+ fscache_operation_processor_t processor,
+ fscache_operation_release_t release)
+{
+ INIT_WORK(&op->work, fscache_op_work_func);
+ atomic_set(&op->usage, 1);
+ op->state = FSCACHE_OP_ST_INITIALISED;
+ op->debug_id = atomic_inc_return(&fscache_op_debug_id);
+ op->processor = processor;
+ op->release = release;
+ INIT_LIST_HEAD(&op->pend_link);
+}
+
+/*
+ * data read operation
+ */
+struct fscache_retrieval {
+ struct fscache_operation op;
+ struct address_space *mapping; /* netfs pages */
+ fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
+ void *context; /* netfs read context (pinned) */
+ struct list_head to_do; /* list of things to be done by the backend */
+ unsigned long start_time; /* time at which retrieval started */
+ atomic_t n_pages; /* number of pages to be retrieved */
+};
+
+typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op,
+ struct page *page,
+ gfp_t gfp);
+
+typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op,
+ struct list_head *pages,
+ unsigned *nr_pages,
+ gfp_t gfp);
+
+/**
+ * fscache_get_retrieval - Get an extra reference on a retrieval operation
+ * @op: The retrieval operation to get a reference on
+ *
+ * Get an extra reference on a retrieval operation.
+ */
+static inline
+struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op)
+{
+ atomic_inc(&op->op.usage);
+ return op;
+}
+
+/**
+ * fscache_enqueue_retrieval - Enqueue a retrieval operation for processing
+ * @op: The retrieval operation affected
+ *
+ * Enqueue a retrieval operation for processing by the FS-Cache thread pool.
+ */
+static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
+{
+ fscache_enqueue_operation(&op->op);
+}
+
+/**
+ * fscache_retrieval_complete - Record (partial) completion of a retrieval
+ * @op: The retrieval operation affected
+ * @n_pages: The number of pages to account for
+ */
+static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
+ int n_pages)
+{
+ atomic_sub(n_pages, &op->n_pages);
+ if (atomic_read(&op->n_pages) <= 0)
+ fscache_op_complete(&op->op, true);
+}
+
+/**
+ * fscache_put_retrieval - Drop a reference to a retrieval operation
+ * @op: The retrieval operation affected
+ *
+ * Drop a reference to a retrieval operation.
+ */
+static inline void fscache_put_retrieval(struct fscache_retrieval *op)
+{
+ fscache_put_operation(&op->op);
+}
+
+/*
+ * cached page storage work item
+ * - used to do three things:
+ * - batch writes to the cache
+ * - do cache writes asynchronously
+ * - defer writes until cache object lookup completion
+ */
+struct fscache_storage {
+ struct fscache_operation op;
+ pgoff_t store_limit; /* don't write more than this */
+};
+
+/*
+ * cache operations
+ */
+struct fscache_cache_ops {
+ /* name of cache provider */
+ const char *name;
+
+ /* allocate an object record for a cookie */
+ struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
+ struct fscache_cookie *cookie);
+
+ /* look up the object for a cookie
+ * - return -ETIMEDOUT to be requeued
+ */
+ int (*lookup_object)(struct fscache_object *object);
+
+ /* finished looking up */
+ void (*lookup_complete)(struct fscache_object *object);
+
+ /* increment the usage count on this object (may fail if unmounting) */
+ struct fscache_object *(*grab_object)(struct fscache_object *object);
+
+ /* pin an object in the cache */
+ int (*pin_object)(struct fscache_object *object);
+
+ /* unpin an object in the cache */
+ void (*unpin_object)(struct fscache_object *object);
+
+ /* check the consistency between the backing cache and the FS-Cache
+ * cookie */
+ bool (*check_consistency)(struct fscache_operation *op);
+
+ /* store the updated auxiliary data on an object */
+ void (*update_object)(struct fscache_object *object);
+
+ /* Invalidate an object */
+ void (*invalidate_object)(struct fscache_operation *op);
+
+ /* discard the resources pinned by an object and effect retirement if
+ * necessary */
+ void (*drop_object)(struct fscache_object *object);
+
+ /* dispose of a reference to an object */
+ void (*put_object)(struct fscache_object *object);
+
+ /* sync a cache */
+ void (*sync_cache)(struct fscache_cache *cache);
+
+ /* notification that the attributes of a non-index object (such as
+ * i_size) have changed */
+ int (*attr_changed)(struct fscache_object *object);
+
+ /* reserve space for an object's data and associated metadata */
+ int (*reserve_space)(struct fscache_object *object, loff_t i_size);
+
+ /* request a backing block for a page be read or allocated in the
+ * cache */
+ fscache_page_retrieval_func_t read_or_alloc_page;
+
+ /* request backing blocks for a list of pages be read or allocated in
+ * the cache */
+ fscache_pages_retrieval_func_t read_or_alloc_pages;
+
+ /* request a backing block for a page be allocated in the cache so that
+ * it can be written directly */
+ fscache_page_retrieval_func_t allocate_page;
+
+ /* request backing blocks for pages be allocated in the cache so that
+ * they can be written directly */
+ fscache_pages_retrieval_func_t allocate_pages;
+
+ /* write a page to its backing block in the cache */
+ int (*write_page)(struct fscache_storage *op, struct page *page);
+
+ /* detach backing block from a page (optional)
+ * - must release the cookie lock before returning
+ * - may sleep
+ */
+ void (*uncache_page)(struct fscache_object *object,
+ struct page *page);
+
+ /* dissociate a cache from all the pages it was backing */
+ void (*dissociate_pages)(struct fscache_cache *cache);
+};
+
+extern struct fscache_cookie fscache_fsdef_index;
+
+/*
+ * Event list for fscache_object::{event_mask,events}
+ */
+enum {
+ FSCACHE_OBJECT_EV_NEW_CHILD, /* T if object has a new child */
+ FSCACHE_OBJECT_EV_PARENT_READY, /* T if object's parent is ready */
+ FSCACHE_OBJECT_EV_UPDATE, /* T if object should be updated */
+ FSCACHE_OBJECT_EV_INVALIDATE, /* T if cache requested object invalidation */
+ FSCACHE_OBJECT_EV_CLEARED, /* T if accessors all gone */
+ FSCACHE_OBJECT_EV_ERROR, /* T if fatal error occurred during processing */
+ FSCACHE_OBJECT_EV_KILL, /* T if netfs relinquished or cache withdrew object */
+ NR_FSCACHE_OBJECT_EVENTS
+};
+
+#define FSCACHE_OBJECT_EVENTS_MASK ((1UL << NR_FSCACHE_OBJECT_EVENTS) - 1)
+
+/*
+ * States for object state machine.
+ */
+struct fscache_transition {
+ unsigned long events;
+ const struct fscache_state *transit_to;
+};
+
+struct fscache_state {
+ char name[24];
+ char short_name[8];
+ const struct fscache_state *(*work)(struct fscache_object *object,
+ int event);
+ const struct fscache_transition transitions[];
+};
+
+/*
+ * on-disk cache file or index handle
+ */
+struct fscache_object {
+ const struct fscache_state *state; /* Object state machine state */
+ const struct fscache_transition *oob_table; /* OOB state transition table */
+ int debug_id; /* debugging ID */
+ int n_children; /* number of child objects */
+ int n_ops; /* number of extant ops on object */
+ int n_obj_ops; /* number of object ops outstanding on object */
+ int n_in_progress; /* number of ops in progress */
+ int n_exclusive; /* number of exclusive ops queued or in progress */
+ atomic_t n_reads; /* number of read ops in progress */
+ spinlock_t lock; /* state and operations lock */
+
+ unsigned long lookup_jif; /* time at which lookup started */
+ unsigned long oob_event_mask; /* OOB events this object is interested in */
+ unsigned long event_mask; /* events this object is interested in */
+ unsigned long events; /* events to be processed by this object
+ * (order is important - using fls) */
+
+ unsigned long flags;
+#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
+#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */
+#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */
+#define FSCACHE_OBJECT_IS_LIVE 3 /* T if object is not withdrawn or relinquished */
+#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */
+#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
+#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
+
+ struct list_head cache_link; /* link in cache->object_list */
+ struct hlist_node cookie_link; /* link in cookie->backing_objects */
+ struct fscache_cache *cache; /* cache that supplied this object */
+ struct fscache_cookie *cookie; /* netfs's file/index object */
+ struct fscache_object *parent; /* parent object */
+ struct work_struct work; /* attention scheduling record */
+ struct list_head dependents; /* FIFO of dependent objects */
+ struct list_head dep_link; /* link in parent's dependents list */
+ struct list_head pending_ops; /* unstarted operations on this object */
+#ifdef CONFIG_FSCACHE_OBJECT_LIST
+ struct rb_node objlist_link; /* link in global object list */
+#endif
+ pgoff_t store_limit; /* current storage limit */
+ loff_t store_limit_l; /* current storage limit */
+};
+
+extern void fscache_object_init(struct fscache_object *, struct fscache_cookie *,
+ struct fscache_cache *);
+extern void fscache_object_destroy(struct fscache_object *);
+
+extern void fscache_object_lookup_negative(struct fscache_object *object);
+extern void fscache_obtained_object(struct fscache_object *object);
+
+static inline bool fscache_object_is_live(struct fscache_object *object)
+{
+ return test_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
+}
+
+static inline bool fscache_object_is_dying(struct fscache_object *object)
+{
+ return !fscache_object_is_live(object);
+}
+
+static inline bool fscache_object_is_available(struct fscache_object *object)
+{
+ return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
+}
+
+static inline bool fscache_object_is_active(struct fscache_object *object)
+{
+ return fscache_object_is_available(object) &&
+ fscache_object_is_live(object) &&
+ !test_bit(FSCACHE_IOERROR, &object->cache->flags);
+}
+
+static inline bool fscache_object_is_dead(struct fscache_object *object)
+{
+ return fscache_object_is_dying(object) &&
+ test_bit(FSCACHE_IOERROR, &object->cache->flags);
+}
+
+/**
+ * fscache_object_destroyed - Note destruction of an object in a cache
+ * @cache: The cache from which the object came
+ *
+ * Note the destruction and deallocation of an object record in a cache.
+ */
+static inline void fscache_object_destroyed(struct fscache_cache *cache)
+{
+ if (atomic_dec_and_test(&cache->object_count))
+ wake_up_all(&fscache_cache_cleared_wq);
+}
+
+/**
+ * fscache_object_lookup_error - Note an object encountered an error
+ * @object: The object on which the error was encountered
+ *
+ * Note that an object encountered a fatal error (usually an I/O error) and
+ * that it should be withdrawn as soon as possible.
+ */
+static inline void fscache_object_lookup_error(struct fscache_object *object)
+{
+ set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events);
+}
+
+/**
+ * fscache_set_store_limit - Set the maximum size to be stored in an object
+ * @object: The object to set the maximum on
+ * @i_size: The limit to set in bytes
+ *
+ * Set the maximum size an object is permitted to reach, implying the highest
+ * byte that may be written. Intended to be called by the attr_changed() op.
+ *
+ * See Documentation/filesystems/caching/backend-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
+{
+ object->store_limit_l = i_size;
+ object->store_limit = i_size >> PAGE_SHIFT;
+ if (i_size & ~PAGE_MASK)
+ object->store_limit++;
+}
+
+/**
+ * fscache_end_io - End a retrieval operation on a page
+ * @op: The FS-Cache operation covering the retrieval
+ * @page: The page that was to be fetched
+ * @error: The error code (0 if successful)
+ *
+ * Note the end of an operation to retrieve a page, as covered by a particular
+ * operation record.
+ */
+static inline void fscache_end_io(struct fscache_retrieval *op,
+ struct page *page, int error)
+{
+ op->end_io_func(page, op->context, error);
+}
+
+static inline void __fscache_use_cookie(struct fscache_cookie *cookie)
+{
+ atomic_inc(&cookie->n_active);
+}
+
+/**
+ * fscache_use_cookie - Request usage of cookie attached to an object
+ * @object: Object description
+ *
+ * Request usage of the cookie attached to an object. NULL is returned if the
+ * relinquishment had reduced the cookie usage count to 0.
+ */
+static inline bool fscache_use_cookie(struct fscache_object *object)
+{
+ struct fscache_cookie *cookie = object->cookie;
+ return atomic_inc_not_zero(&cookie->n_active) != 0;
+}
+
+static inline bool __fscache_unuse_cookie(struct fscache_cookie *cookie)
+{
+ return atomic_dec_and_test(&cookie->n_active);
+}
+
+static inline void __fscache_wake_unused_cookie(struct fscache_cookie *cookie)
+{
+ wake_up_atomic_t(&cookie->n_active);
+}
+
+/**
+ * fscache_unuse_cookie - Cease usage of cookie attached to an object
+ * @object: Object description
+ *
+ * Cease usage of the cookie attached to an object. When the users count
+ * reaches zero then the cookie relinquishment will be permitted to proceed.
+ */
+static inline void fscache_unuse_cookie(struct fscache_object *object)
+{
+ struct fscache_cookie *cookie = object->cookie;
+ if (__fscache_unuse_cookie(cookie))
+ __fscache_wake_unused_cookie(cookie);
+}
+
+/*
+ * out-of-line cache backend functions
+ */
+extern __printf(3, 4)
+void fscache_init_cache(struct fscache_cache *cache,
+ const struct fscache_cache_ops *ops,
+ const char *idfmt, ...);
+
+extern int fscache_add_cache(struct fscache_cache *cache,
+ struct fscache_object *fsdef,
+ const char *tagname);
+extern void fscache_withdraw_cache(struct fscache_cache *cache);
+
+extern void fscache_io_error(struct fscache_cache *cache);
+
+extern void fscache_mark_page_cached(struct fscache_retrieval *op,
+ struct page *page);
+
+extern void fscache_mark_pages_cached(struct fscache_retrieval *op,
+ struct pagevec *pagevec);
+
+extern bool fscache_object_sleep_till_congested(signed long *timeoutp);
+
+extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
+ const void *data,
+ uint16_t datalen);
+
+#endif /* _LINUX_FSCACHE_CACHE_H */
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
new file mode 100644
index 000000000..115bb8191
--- /dev/null
+++ b/include/linux/fscache.h
@@ -0,0 +1,832 @@
+/* General filesystem caching interface
+ *
+ * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * NOTE!!! See:
+ *
+ * Documentation/filesystems/caching/netfs-api.txt
+ *
+ * for a description of the network filesystem interface declared here.
+ */
+
+#ifndef _LINUX_FSCACHE_H
+#define _LINUX_FSCACHE_H
+
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/pagemap.h>
+#include <linux/pagevec.h>
+
+#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
+#define fscache_available() (1)
+#define fscache_cookie_valid(cookie) (cookie)
+#else
+#define fscache_available() (0)
+#define fscache_cookie_valid(cookie) (0)
+#endif
+
+
+/*
+ * overload PG_private_2 to give us PG_fscache - this is used to indicate that
+ * a page is currently backed by a local disk cache
+ */
+#define PageFsCache(page) PagePrivate2((page))
+#define SetPageFsCache(page) SetPagePrivate2((page))
+#define ClearPageFsCache(page) ClearPagePrivate2((page))
+#define TestSetPageFsCache(page) TestSetPagePrivate2((page))
+#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
+
+/* pattern used to fill dead space in an index entry */
+#define FSCACHE_INDEX_DEADFILL_PATTERN 0x79
+
+struct pagevec;
+struct fscache_cache_tag;
+struct fscache_cookie;
+struct fscache_netfs;
+
+typedef void (*fscache_rw_complete_t)(struct page *page,
+ void *context,
+ int error);
+
+/* result of index entry consultation */
+enum fscache_checkaux {
+ FSCACHE_CHECKAUX_OKAY, /* entry okay as is */
+ FSCACHE_CHECKAUX_NEEDS_UPDATE, /* entry requires update */
+ FSCACHE_CHECKAUX_OBSOLETE, /* entry requires deletion */
+};
+
+/*
+ * fscache cookie definition
+ */
+struct fscache_cookie_def {
+ /* name of cookie type */
+ char name[16];
+
+ /* cookie type */
+ uint8_t type;
+#define FSCACHE_COOKIE_TYPE_INDEX 0
+#define FSCACHE_COOKIE_TYPE_DATAFILE 1
+
+ /* select the cache into which to insert an entry in this index
+ * - optional
+ * - should return a cache identifier or NULL to cause the cache to be
+ * inherited from the parent if possible or the first cache picked
+ * for a non-index file if not
+ */
+ struct fscache_cache_tag *(*select_cache)(
+ const void *parent_netfs_data,
+ const void *cookie_netfs_data);
+
+ /* get an index key
+ * - should store the key data in the buffer
+ * - should return the amount of data stored
+ * - not permitted to return an error
+ * - the netfs data from the cookie being used as the source is
+ * presented
+ */
+ uint16_t (*get_key)(const void *cookie_netfs_data,
+ void *buffer,
+ uint16_t bufmax);
+
+ /* get certain file attributes from the netfs data
+ * - this function can be absent for an index
+ * - not permitted to return an error
+ * - the netfs data from the cookie being used as the source is
+ * presented
+ */
+ void (*get_attr)(const void *cookie_netfs_data, uint64_t *size);
+
+ /* get the auxiliary data from netfs data
+ * - this function can be absent if the index carries no state data
+ * - should store the auxiliary data in the buffer
+ * - should return the amount of amount stored
+ * - not permitted to return an error
+ * - the netfs data from the cookie being used as the source is
+ * presented
+ */
+ uint16_t (*get_aux)(const void *cookie_netfs_data,
+ void *buffer,
+ uint16_t bufmax);
+
+ /* consult the netfs about the state of an object
+ * - this function can be absent if the index carries no state data
+ * - the netfs data from the cookie being used as the target is
+ * presented, as is the auxiliary data
+ */
+ enum fscache_checkaux (*check_aux)(void *cookie_netfs_data,
+ const void *data,
+ uint16_t datalen);
+
+ /* get an extra reference on a read context
+ * - this function can be absent if the completion function doesn't
+ * require a context
+ */
+ void (*get_context)(void *cookie_netfs_data, void *context);
+
+ /* release an extra reference on a read context
+ * - this function can be absent if the completion function doesn't
+ * require a context
+ */
+ void (*put_context)(void *cookie_netfs_data, void *context);
+
+ /* indicate page that now have cache metadata retained
+ * - this function should mark the specified page as now being cached
+ * - the page will have been marked with PG_fscache before this is
+ * called, so this is optional
+ */
+ void (*mark_page_cached)(void *cookie_netfs_data,
+ struct address_space *mapping,
+ struct page *page);
+
+ /* indicate the cookie is no longer cached
+ * - this function is called when the backing store currently caching
+ * a cookie is removed
+ * - the netfs should use this to clean up any markers indicating
+ * cached pages
+ * - this is mandatory for any object that may have data
+ */
+ void (*now_uncached)(void *cookie_netfs_data);
+};
+
+/*
+ * fscache cached network filesystem type
+ * - name, version and ops must be filled in before registration
+ * - all other fields will be set during registration
+ */
+struct fscache_netfs {
+ uint32_t version; /* indexing version */
+ const char *name; /* filesystem name */
+ struct fscache_cookie *primary_index;
+ struct list_head link; /* internal link */
+};
+
+/*
+ * data file or index object cookie
+ * - a file will only appear in one cache
+ * - a request to cache a file may or may not be honoured, subject to
+ * constraints such as disk space
+ * - indices are created on disk just-in-time
+ */
+struct fscache_cookie {
+ atomic_t usage; /* number of users of this cookie */
+ atomic_t n_children; /* number of children of this cookie */
+ atomic_t n_active; /* number of active users of netfs ptrs */
+ spinlock_t lock;
+ spinlock_t stores_lock; /* lock on page store tree */
+ struct hlist_head backing_objects; /* object(s) backing this file/index */
+ const struct fscache_cookie_def *def; /* definition */
+ struct fscache_cookie *parent; /* parent of this entry */
+ void *netfs_data; /* back pointer to netfs */
+ struct radix_tree_root stores; /* pages to be stored on this cookie */
+#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
+#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
+
+ unsigned long flags;
+#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
+#define FSCACHE_COOKIE_NO_DATA_YET 1 /* T if new object with no cached data yet */
+#define FSCACHE_COOKIE_UNAVAILABLE 2 /* T if cookie is unavailable (error, etc) */
+#define FSCACHE_COOKIE_INVALIDATING 3 /* T if cookie is being invalidated */
+#define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */
+#define FSCACHE_COOKIE_ENABLED 5 /* T if cookie is enabled */
+#define FSCACHE_COOKIE_ENABLEMENT_LOCK 6 /* T if cookie is being en/disabled */
+};
+
+static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
+{
+ return test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
+}
+
+/*
+ * slow-path functions for when there is actually caching available, and the
+ * netfs does actually have a valid token
+ * - these are not to be called directly
+ * - these are undefined symbols when FS-Cache is not configured and the
+ * optimiser takes care of not using them
+ */
+extern int __fscache_register_netfs(struct fscache_netfs *);
+extern void __fscache_unregister_netfs(struct fscache_netfs *);
+extern struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *);
+extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
+
+extern struct fscache_cookie *__fscache_acquire_cookie(
+ struct fscache_cookie *,
+ const struct fscache_cookie_def *,
+ void *, bool);
+extern void __fscache_relinquish_cookie(struct fscache_cookie *, bool);
+extern int __fscache_check_consistency(struct fscache_cookie *);
+extern void __fscache_update_cookie(struct fscache_cookie *);
+extern int __fscache_attr_changed(struct fscache_cookie *);
+extern void __fscache_invalidate(struct fscache_cookie *);
+extern void __fscache_wait_on_invalidate(struct fscache_cookie *);
+extern int __fscache_read_or_alloc_page(struct fscache_cookie *,
+ struct page *,
+ fscache_rw_complete_t,
+ void *,
+ gfp_t);
+extern int __fscache_read_or_alloc_pages(struct fscache_cookie *,
+ struct address_space *,
+ struct list_head *,
+ unsigned *,
+ fscache_rw_complete_t,
+ void *,
+ gfp_t);
+extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t);
+extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t);
+extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
+extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
+extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
+extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *,
+ gfp_t);
+extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *,
+ struct inode *);
+extern void __fscache_readpages_cancel(struct fscache_cookie *cookie,
+ struct list_head *pages);
+extern void __fscache_disable_cookie(struct fscache_cookie *, bool);
+extern void __fscache_enable_cookie(struct fscache_cookie *,
+ bool (*)(void *), void *);
+
+/**
+ * fscache_register_netfs - Register a filesystem as desiring caching services
+ * @netfs: The description of the filesystem
+ *
+ * Register a filesystem as desiring caching services if they're available.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+int fscache_register_netfs(struct fscache_netfs *netfs)
+{
+ if (fscache_available())
+ return __fscache_register_netfs(netfs);
+ else
+ return 0;
+}
+
+/**
+ * fscache_unregister_netfs - Indicate that a filesystem no longer desires
+ * caching services
+ * @netfs: The description of the filesystem
+ *
+ * Indicate that a filesystem no longer desires caching services for the
+ * moment.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_unregister_netfs(struct fscache_netfs *netfs)
+{
+ if (fscache_available())
+ __fscache_unregister_netfs(netfs);
+}
+
+/**
+ * fscache_lookup_cache_tag - Look up a cache tag
+ * @name: The name of the tag to search for
+ *
+ * Acquire a specific cache referral tag that can be used to select a specific
+ * cache in which to cache an index.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name)
+{
+ if (fscache_available())
+ return __fscache_lookup_cache_tag(name);
+ else
+ return NULL;
+}
+
+/**
+ * fscache_release_cache_tag - Release a cache tag
+ * @tag: The tag to release
+ *
+ * Release a reference to a cache referral tag previously looked up.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_release_cache_tag(struct fscache_cache_tag *tag)
+{
+ if (fscache_available())
+ __fscache_release_cache_tag(tag);
+}
+
+/**
+ * fscache_acquire_cookie - Acquire a cookie to represent a cache object
+ * @parent: The cookie that's to be the parent of this one
+ * @def: A description of the cache object, including callback operations
+ * @netfs_data: An arbitrary piece of data to be kept in the cookie to
+ * represent the cache object to the netfs
+ * @enable: Whether or not to enable a data cookie immediately
+ *
+ * This function is used to inform FS-Cache about part of an index hierarchy
+ * that can be used to locate files. This is done by requesting a cookie for
+ * each index in the path to the file.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+struct fscache_cookie *fscache_acquire_cookie(
+ struct fscache_cookie *parent,
+ const struct fscache_cookie_def *def,
+ void *netfs_data,
+ bool enable)
+{
+ if (fscache_cookie_valid(parent) && fscache_cookie_enabled(parent))
+ return __fscache_acquire_cookie(parent, def, netfs_data,
+ enable);
+ else
+ return NULL;
+}
+
+/**
+ * fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding
+ * it
+ * @cookie: The cookie being returned
+ * @retire: True if the cache object the cookie represents is to be discarded
+ *
+ * This function returns a cookie to the cache, forcibly discarding the
+ * associated cache object if retire is set to true.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
+{
+ if (fscache_cookie_valid(cookie))
+ __fscache_relinquish_cookie(cookie, retire);
+}
+
+/**
+ * fscache_check_consistency - Request that if the cache is updated
+ * @cookie: The cookie representing the cache object
+ *
+ * Request an consistency check from fscache, which passes the request
+ * to the backing cache.
+ *
+ * Returns 0 if consistent and -ESTALE if inconsistent. May also
+ * return -ENOMEM and -ERESTARTSYS.
+ */
+static inline
+int fscache_check_consistency(struct fscache_cookie *cookie)
+{
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
+ return __fscache_check_consistency(cookie);
+ else
+ return 0;
+}
+
+/**
+ * fscache_update_cookie - Request that a cache object be updated
+ * @cookie: The cookie representing the cache object
+ *
+ * Request an update of the index data for the cache object associated with the
+ * cookie.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_update_cookie(struct fscache_cookie *cookie)
+{
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
+ __fscache_update_cookie(cookie);
+}
+
+/**
+ * fscache_pin_cookie - Pin a data-storage cache object in its cache
+ * @cookie: The cookie representing the cache object
+ *
+ * Permit data-storage cache objects to be pinned in the cache.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+int fscache_pin_cookie(struct fscache_cookie *cookie)
+{
+ return -ENOBUFS;
+}
+
+/**
+ * fscache_pin_cookie - Unpin a data-storage cache object in its cache
+ * @cookie: The cookie representing the cache object
+ *
+ * Permit data-storage cache objects to be unpinned from the cache.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_unpin_cookie(struct fscache_cookie *cookie)
+{
+}
+
+/**
+ * fscache_attr_changed - Notify cache that an object's attributes changed
+ * @cookie: The cookie representing the cache object
+ *
+ * Send a notification to the cache indicating that an object's attributes have
+ * changed. This includes the data size. These attributes will be obtained
+ * through the get_attr() cookie definition op.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+int fscache_attr_changed(struct fscache_cookie *cookie)
+{
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
+ return __fscache_attr_changed(cookie);
+ else
+ return -ENOBUFS;
+}
+
+/**
+ * fscache_invalidate - Notify cache that an object needs invalidation
+ * @cookie: The cookie representing the cache object
+ *
+ * Notify the cache that an object is needs to be invalidated and that it
+ * should abort any retrievals or stores it is doing on the cache. The object
+ * is then marked non-caching until such time as the invalidation is complete.
+ *
+ * This can be called with spinlocks held.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_invalidate(struct fscache_cookie *cookie)
+{
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
+ __fscache_invalidate(cookie);
+}
+
+/**
+ * fscache_wait_on_invalidate - Wait for invalidation to complete
+ * @cookie: The cookie representing the cache object
+ *
+ * Wait for the invalidation of an object to complete.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_wait_on_invalidate(struct fscache_cookie *cookie)
+{
+ if (fscache_cookie_valid(cookie))
+ __fscache_wait_on_invalidate(cookie);
+}
+
+/**
+ * fscache_reserve_space - Reserve data space for a cached object
+ * @cookie: The cookie representing the cache object
+ * @i_size: The amount of space to be reserved
+ *
+ * Reserve an amount of space in the cache for the cache object attached to a
+ * cookie so that a write to that object within the space can always be
+ * honoured.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size)
+{
+ return -ENOBUFS;
+}
+
+/**
+ * fscache_read_or_alloc_page - Read a page from the cache or allocate a block
+ * in which to store it
+ * @cookie: The cookie representing the cache object
+ * @page: The netfs page to fill if possible
+ * @end_io_func: The callback to invoke when and if the page is filled
+ * @context: An arbitrary piece of data to pass on to end_io_func()
+ * @gfp: The conditions under which memory allocation should be made
+ *
+ * Read a page from the cache, or if that's not possible make a potential
+ * one-block reservation in the cache into which the page may be stored once
+ * fetched from the server.
+ *
+ * If the page is not backed by the cache object, or if it there's some reason
+ * it can't be, -ENOBUFS will be returned and nothing more will be done for
+ * that page.
+ *
+ * Else, if that page is backed by the cache, a read will be initiated directly
+ * to the netfs's page and 0 will be returned by this function. The
+ * end_io_func() callback will be invoked when the operation terminates on a
+ * completion or failure. Note that the callback may be invoked before the
+ * return.
+ *
+ * Else, if the page is unbacked, -ENODATA is returned and a block may have
+ * been allocated in the cache.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
+ struct page *page,
+ fscache_rw_complete_t end_io_func,
+ void *context,
+ gfp_t gfp)
+{
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
+ return __fscache_read_or_alloc_page(cookie, page, end_io_func,
+ context, gfp);
+ else
+ return -ENOBUFS;
+}
+
+/**
+ * fscache_read_or_alloc_pages - Read pages from the cache and/or allocate
+ * blocks in which to store them
+ * @cookie: The cookie representing the cache object
+ * @mapping: The netfs inode mapping to which the pages will be attached
+ * @pages: A list of potential netfs pages to be filled
+ * @nr_pages: Number of pages to be read and/or allocated
+ * @end_io_func: The callback to invoke when and if each page is filled
+ * @context: An arbitrary piece of data to pass on to end_io_func()
+ * @gfp: The conditions under which memory allocation should be made
+ *
+ * Read a set of pages from the cache, or if that's not possible, attempt to
+ * make a potential one-block reservation for each page in the cache into which
+ * that page may be stored once fetched from the server.
+ *
+ * If some pages are not backed by the cache object, or if it there's some
+ * reason they can't be, -ENOBUFS will be returned and nothing more will be
+ * done for that pages.
+ *
+ * Else, if some of the pages are backed by the cache, a read will be initiated
+ * directly to the netfs's page and 0 will be returned by this function. The
+ * end_io_func() callback will be invoked when the operation terminates on a
+ * completion or failure. Note that the callback may be invoked before the
+ * return.
+ *
+ * Else, if a page is unbacked, -ENODATA is returned and a block may have
+ * been allocated in the cache.
+ *
+ * Because the function may want to return all of -ENOBUFS, -ENODATA and 0 in
+ * regard to different pages, the return values are prioritised in that order.
+ * Any pages submitted for reading are removed from the pages list.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned *nr_pages,
+ fscache_rw_complete_t end_io_func,
+ void *context,
+ gfp_t gfp)
+{
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
+ return __fscache_read_or_alloc_pages(cookie, mapping, pages,
+ nr_pages, end_io_func,
+ context, gfp);
+ else
+ return -ENOBUFS;
+}
+
+/**
+ * fscache_alloc_page - Allocate a block in which to store a page
+ * @cookie: The cookie representing the cache object
+ * @page: The netfs page to allocate a page for
+ * @gfp: The conditions under which memory allocation should be made
+ *
+ * Request Allocation a block in the cache in which to store a netfs page
+ * without retrieving any contents from the cache.
+ *
+ * If the page is not backed by a file then -ENOBUFS will be returned and
+ * nothing more will be done, and no reservation will be made.
+ *
+ * Else, a block will be allocated if one wasn't already, and 0 will be
+ * returned
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+int fscache_alloc_page(struct fscache_cookie *cookie,
+ struct page *page,
+ gfp_t gfp)
+{
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
+ return __fscache_alloc_page(cookie, page, gfp);
+ else
+ return -ENOBUFS;
+}
+
+/**
+ * fscache_readpages_cancel - Cancel read/alloc on pages
+ * @cookie: The cookie representing the inode's cache object.
+ * @pages: The netfs pages that we canceled write on in readpages()
+ *
+ * Uncache/unreserve the pages reserved earlier in readpages() via
+ * fscache_readpages_or_alloc() and similar. In most successful caches in
+ * readpages() this doesn't do anything. In cases when the underlying netfs's
+ * readahead failed we need to clean up the pagelist (unmark and uncache).
+ *
+ * This function may sleep as it may have to clean up disk state.
+ */
+static inline
+void fscache_readpages_cancel(struct fscache_cookie *cookie,
+ struct list_head *pages)
+{
+ if (fscache_cookie_valid(cookie))
+ __fscache_readpages_cancel(cookie, pages);
+}
+
+/**
+ * fscache_write_page - Request storage of a page in the cache
+ * @cookie: The cookie representing the cache object
+ * @page: The netfs page to store
+ * @gfp: The conditions under which memory allocation should be made
+ *
+ * Request the contents of the netfs page be written into the cache. This
+ * request may be ignored if no cache block is currently allocated, in which
+ * case it will return -ENOBUFS.
+ *
+ * If a cache block was already allocated, a write will be initiated and 0 will
+ * be returned. The PG_fscache_write page bit is set immediately and will then
+ * be cleared at the completion of the write to indicate the success or failure
+ * of the operation. Note that the completion may happen before the return.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+int fscache_write_page(struct fscache_cookie *cookie,
+ struct page *page,
+ gfp_t gfp)
+{
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
+ return __fscache_write_page(cookie, page, gfp);
+ else
+ return -ENOBUFS;
+}
+
+/**
+ * fscache_uncache_page - Indicate that caching is no longer required on a page
+ * @cookie: The cookie representing the cache object
+ * @page: The netfs page that was being cached.
+ *
+ * Tell the cache that we no longer want a page to be cached and that it should
+ * remove any knowledge of the netfs page it may have.
+ *
+ * Note that this cannot cancel any outstanding I/O operations between this
+ * page and the cache.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_uncache_page(struct fscache_cookie *cookie,
+ struct page *page)
+{
+ if (fscache_cookie_valid(cookie))
+ __fscache_uncache_page(cookie, page);
+}
+
+/**
+ * fscache_check_page_write - Ask if a page is being writing to the cache
+ * @cookie: The cookie representing the cache object
+ * @page: The netfs page that is being cached.
+ *
+ * Ask the cache if a page is being written to the cache.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+bool fscache_check_page_write(struct fscache_cookie *cookie,
+ struct page *page)
+{
+ if (fscache_cookie_valid(cookie))
+ return __fscache_check_page_write(cookie, page);
+ return false;
+}
+
+/**
+ * fscache_wait_on_page_write - Wait for a page to complete writing to the cache
+ * @cookie: The cookie representing the cache object
+ * @page: The netfs page that is being cached.
+ *
+ * Ask the cache to wake us up when a page is no longer being written to the
+ * cache.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
+ * description.
+ */
+static inline
+void fscache_wait_on_page_write(struct fscache_cookie *cookie,
+ struct page *page)
+{
+ if (fscache_cookie_valid(cookie))
+ __fscache_wait_on_page_write(cookie, page);
+}
+
+/**
+ * fscache_maybe_release_page - Consider releasing a page, cancelling a store
+ * @cookie: The cookie representing the cache object
+ * @page: The netfs page that is being cached.
+ * @gfp: The gfp flags passed to releasepage()
+ *
+ * Consider releasing a page for the vmscan algorithm, on behalf of the netfs's
+ * releasepage() call. A storage request on the page may cancelled if it is
+ * not currently being processed.
+ *
+ * The function returns true if the page no longer has a storage request on it,
+ * and false if a storage request is left in place. If true is returned, the
+ * page will have been passed to fscache_uncache_page(). If false is returned
+ * the page cannot be freed yet.
+ */
+static inline
+bool fscache_maybe_release_page(struct fscache_cookie *cookie,
+ struct page *page,
+ gfp_t gfp)
+{
+ if (fscache_cookie_valid(cookie) && PageFsCache(page))
+ return __fscache_maybe_release_page(cookie, page, gfp);
+ return false;
+}
+
+/**
+ * fscache_uncache_all_inode_pages - Uncache all an inode's pages
+ * @cookie: The cookie representing the inode's cache object.
+ * @inode: The inode to uncache pages from.
+ *
+ * Uncache all the pages in an inode that are marked PG_fscache, assuming them
+ * to be associated with the given cookie.
+ *
+ * This function may sleep. It will wait for pages that are being written out
+ * and will wait whilst the PG_fscache mark is removed by the cache.
+ */
+static inline
+void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
+ struct inode *inode)
+{
+ if (fscache_cookie_valid(cookie))
+ __fscache_uncache_all_inode_pages(cookie, inode);
+}
+
+/**
+ * fscache_disable_cookie - Disable a cookie
+ * @cookie: The cookie representing the cache object
+ * @invalidate: Invalidate the backing object
+ *
+ * Disable a cookie from accepting further alloc, read, write, invalidate,
+ * update or acquire operations. Outstanding operations can still be waited
+ * upon and pages can still be uncached and the cookie relinquished.
+ *
+ * This will not return until all outstanding operations have completed.
+ *
+ * If @invalidate is set, then the backing object will be invalidated and
+ * detached, otherwise it will just be detached.
+ */
+static inline
+void fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
+{
+ if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
+ __fscache_disable_cookie(cookie, invalidate);
+}
+
+/**
+ * fscache_enable_cookie - Reenable a cookie
+ * @cookie: The cookie representing the cache object
+ * @can_enable: A function to permit enablement once lock is held
+ * @data: Data for can_enable()
+ *
+ * Reenable a previously disabled cookie, allowing it to accept further alloc,
+ * read, write, invalidate, update or acquire operations. An attempt will be
+ * made to immediately reattach the cookie to a backing object.
+ *
+ * The can_enable() function is called (if not NULL) once the enablement lock
+ * is held to rule on whether enablement is still permitted to go ahead.
+ */
+static inline
+void fscache_enable_cookie(struct fscache_cookie *cookie,
+ bool (*can_enable)(void *data),
+ void *data)
+{
+ if (fscache_cookie_valid(cookie) && !fscache_cookie_enabled(cookie))
+ __fscache_enable_cookie(cookie, can_enable, data);
+}
+
+#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/fsl-diu-fb.h b/include/linux/fsl-diu-fb.h
new file mode 100644
index 000000000..a1e827712
--- /dev/null
+++ b/include/linux/fsl-diu-fb.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * Freescale DIU Frame Buffer device driver
+ *
+ * Authors: Hongjun Chen <hong-jun.chen@freescale.com>
+ * Paul Widmer <paul.widmer@freescale.com>
+ * Srikanth Srinivasan <srikanth.srinivasan@freescale.com>
+ * York Sun <yorksun@freescale.com>
+ *
+ * Based on imxfb.c Copyright (C) 2004 S.Hauer, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __FSL_DIU_FB_H__
+#define __FSL_DIU_FB_H__
+
+#include <linux/types.h>
+
+struct mfb_chroma_key {
+ int enable;
+ __u8 red_max;
+ __u8 green_max;
+ __u8 blue_max;
+ __u8 red_min;
+ __u8 green_min;
+ __u8 blue_min;
+};
+
+struct aoi_display_offset {
+ __s32 x_aoi_d;
+ __s32 y_aoi_d;
+};
+
+#define MFB_SET_CHROMA_KEY _IOW('M', 1, struct mfb_chroma_key)
+#define MFB_SET_BRIGHTNESS _IOW('M', 3, __u8)
+#define MFB_SET_ALPHA _IOW('M', 0, __u8)
+#define MFB_GET_ALPHA _IOR('M', 0, __u8)
+#define MFB_SET_AOID _IOW('M', 4, struct aoi_display_offset)
+#define MFB_GET_AOID _IOR('M', 4, struct aoi_display_offset)
+#define MFB_SET_PIXFMT _IOW('M', 8, __u32)
+#define MFB_GET_PIXFMT _IOR('M', 8, __u32)
+
+/*
+ * The MPC5121 BSP comes with a gamma_set utility that initializes the
+ * gamma table. Unfortunately, it uses bad values for the IOCTL commands,
+ * but there's nothing we can do about it now. These ioctls are only
+ * supported on the MPC5121.
+ */
+#define MFB_SET_GAMMA _IOW('M', 1, __u8)
+#define MFB_GET_GAMMA _IOR('M', 1, __u8)
+
+/*
+ * The original definitions of MFB_SET_PIXFMT and MFB_GET_PIXFMT used the
+ * wrong value for 'size' field of the ioctl. The current macros above use the
+ * right size, but we still need to provide backwards compatibility, at least
+ * for a while.
+*/
+#define MFB_SET_PIXFMT_OLD 0x80014d08
+#define MFB_GET_PIXFMT_OLD 0x40014d08
+
+#ifdef __KERNEL__
+
+/*
+ * These are the fields of area descriptor(in DDR memory) for every plane
+ */
+struct diu_ad {
+ /* Word 0(32-bit) in DDR memory */
+/* __u16 comp; */
+/* __u16 pixel_s:2; */
+/* __u16 pallete:1; */
+/* __u16 red_c:2; */
+/* __u16 green_c:2; */
+/* __u16 blue_c:2; */
+/* __u16 alpha_c:3; */
+/* __u16 byte_f:1; */
+/* __u16 res0:3; */
+
+ __be32 pix_fmt; /* hard coding pixel format */
+
+ /* Word 1(32-bit) in DDR memory */
+ __le32 addr;
+
+ /* Word 2(32-bit) in DDR memory */
+/* __u32 delta_xs:11; */
+/* __u32 res1:1; */
+/* __u32 delta_ys:11; */
+/* __u32 res2:1; */
+/* __u32 g_alpha:8; */
+ __le32 src_size_g_alpha;
+
+ /* Word 3(32-bit) in DDR memory */
+/* __u32 delta_xi:11; */
+/* __u32 res3:5; */
+/* __u32 delta_yi:11; */
+/* __u32 res4:3; */
+/* __u32 flip:2; */
+ __le32 aoi_size;
+
+ /* Word 4(32-bit) in DDR memory */
+ /*__u32 offset_xi:11;
+ __u32 res5:5;
+ __u32 offset_yi:11;
+ __u32 res6:5;
+ */
+ __le32 offset_xyi;
+
+ /* Word 5(32-bit) in DDR memory */
+ /*__u32 offset_xd:11;
+ __u32 res7:5;
+ __u32 offset_yd:11;
+ __u32 res8:5; */
+ __le32 offset_xyd;
+
+
+ /* Word 6(32-bit) in DDR memory */
+ __u8 ckmax_r;
+ __u8 ckmax_g;
+ __u8 ckmax_b;
+ __u8 res9;
+
+ /* Word 7(32-bit) in DDR memory */
+ __u8 ckmin_r;
+ __u8 ckmin_g;
+ __u8 ckmin_b;
+ __u8 res10;
+/* __u32 res10:8; */
+
+ /* Word 8(32-bit) in DDR memory */
+ __le32 next_ad;
+
+ /* Word 9(32-bit) in DDR memory, just for 64-bit aligned */
+ __u32 paddr;
+} __attribute__ ((packed));
+
+/* DIU register map */
+struct diu {
+ __be32 desc[3];
+ __be32 gamma;
+ __be32 pallete;
+ __be32 cursor;
+ __be32 curs_pos;
+ __be32 diu_mode;
+ __be32 bgnd;
+ __be32 bgnd_wb;
+ __be32 disp_size;
+ __be32 wb_size;
+ __be32 wb_mem_addr;
+ __be32 hsyn_para;
+ __be32 vsyn_para;
+ __be32 syn_pol;
+ __be32 thresholds;
+ __be32 int_status;
+ __be32 int_mask;
+ __be32 colorbar[8];
+ __be32 filling;
+ __be32 plut;
+} __attribute__ ((packed));
+
+/*
+ * Modes of operation of DIU. The DIU supports five different modes, but
+ * the driver only supports modes 0 and 1.
+ */
+#define MFB_MODE0 0 /* DIU off */
+#define MFB_MODE1 1 /* All three planes output to display */
+
+#endif /* __KERNEL__ */
+#endif /* __FSL_DIU_FB_H__ */
diff --git a/include/linux/fsl/bestcomm/ata.h b/include/linux/fsl/bestcomm/ata.h
new file mode 100644
index 000000000..0b2371811
--- /dev/null
+++ b/include/linux/fsl/bestcomm/ata.h
@@ -0,0 +1,30 @@
+/*
+ * Header for Bestcomm ATA task driver
+ *
+ *
+ * Copyright (C) 2006 Freescale - John Rigby
+ * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __BESTCOMM_ATA_H__
+#define __BESTCOMM_ATA_H__
+
+
+struct bcom_ata_bd {
+ u32 status;
+ u32 src_pa;
+ u32 dst_pa;
+};
+
+extern struct bcom_task * bcom_ata_init(int queue_len, int maxbufsize);
+extern void bcom_ata_rx_prepare(struct bcom_task *tsk);
+extern void bcom_ata_tx_prepare(struct bcom_task *tsk);
+extern void bcom_ata_reset_bd(struct bcom_task *tsk);
+extern void bcom_ata_release(struct bcom_task *tsk);
+
+#endif /* __BESTCOMM_ATA_H__ */
+
diff --git a/include/linux/fsl/bestcomm/bestcomm.h b/include/linux/fsl/bestcomm/bestcomm.h
new file mode 100644
index 000000000..a0e2e6b19
--- /dev/null
+++ b/include/linux/fsl/bestcomm/bestcomm.h
@@ -0,0 +1,213 @@
+/*
+ * Public header for the MPC52xx processor BestComm driver
+ *
+ *
+ * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2005 Varma Electronics Oy,
+ * ( by Andrey Volkov <avolkov@varma-el.com> )
+ * Copyright (C) 2003-2004 MontaVista, Software, Inc.
+ * ( by Dale Farnsworth <dfarnsworth@mvista.com> )
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __BESTCOMM_H__
+#define __BESTCOMM_H__
+
+/**
+ * struct bcom_bd - Structure describing a generic BestComm buffer descriptor
+ * @status: The current status of this buffer. Exact meaning depends on the
+ * task type
+ * @data: An array of u32 extra data. Size of array is task dependent.
+ *
+ * Note: Don't dereference a bcom_bd pointer as an array. The size of the
+ * bcom_bd is variable. Use bcom_get_bd() instead.
+ */
+struct bcom_bd {
+ u32 status;
+ u32 data[0]; /* variable payload size */
+};
+
+/* ======================================================================== */
+/* Generic task management */
+/* ======================================================================== */
+
+/**
+ * struct bcom_task - Structure describing a loaded BestComm task
+ *
+ * This structure is never built by the driver it self. It's built and
+ * filled the intermediate layer of the BestComm API, the task dependent
+ * support code.
+ *
+ * Most likely you don't need to poke around inside this structure. The
+ * fields are exposed in the header just for the sake of inline functions
+ */
+struct bcom_task {
+ unsigned int tasknum;
+ unsigned int flags;
+ int irq;
+
+ struct bcom_bd *bd;
+ phys_addr_t bd_pa;
+ void **cookie;
+ unsigned short index;
+ unsigned short outdex;
+ unsigned int num_bd;
+ unsigned int bd_size;
+
+ void* priv;
+};
+
+#define BCOM_FLAGS_NONE 0x00000000ul
+#define BCOM_FLAGS_ENABLE_TASK (1ul << 0)
+
+/**
+ * bcom_enable - Enable a BestComm task
+ * @tsk: The BestComm task structure
+ *
+ * This function makes sure the given task is enabled and can be run
+ * by the BestComm engine as needed
+ */
+extern void bcom_enable(struct bcom_task *tsk);
+
+/**
+ * bcom_disable - Disable a BestComm task
+ * @tsk: The BestComm task structure
+ *
+ * This function disable a given task, making sure it's not executed
+ * by the BestComm engine.
+ */
+extern void bcom_disable(struct bcom_task *tsk);
+
+
+/**
+ * bcom_get_task_irq - Returns the irq number of a BestComm task
+ * @tsk: The BestComm task structure
+ */
+static inline int
+bcom_get_task_irq(struct bcom_task *tsk) {
+ return tsk->irq;
+}
+
+/* ======================================================================== */
+/* BD based tasks helpers */
+/* ======================================================================== */
+
+#define BCOM_BD_READY 0x40000000ul
+
+/** _bcom_next_index - Get next input index.
+ * @tsk: pointer to task structure
+ *
+ * Support function; Device drivers should not call this
+ */
+static inline int
+_bcom_next_index(struct bcom_task *tsk)
+{
+ return ((tsk->index + 1) == tsk->num_bd) ? 0 : tsk->index + 1;
+}
+
+/** _bcom_next_outdex - Get next output index.
+ * @tsk: pointer to task structure
+ *
+ * Support function; Device drivers should not call this
+ */
+static inline int
+_bcom_next_outdex(struct bcom_task *tsk)
+{
+ return ((tsk->outdex + 1) == tsk->num_bd) ? 0 : tsk->outdex + 1;
+}
+
+/**
+ * bcom_queue_empty - Checks if a BestComm task BD queue is empty
+ * @tsk: The BestComm task structure
+ */
+static inline int
+bcom_queue_empty(struct bcom_task *tsk)
+{
+ return tsk->index == tsk->outdex;
+}
+
+/**
+ * bcom_queue_full - Checks if a BestComm task BD queue is full
+ * @tsk: The BestComm task structure
+ */
+static inline int
+bcom_queue_full(struct bcom_task *tsk)
+{
+ return tsk->outdex == _bcom_next_index(tsk);
+}
+
+/**
+ * bcom_get_bd - Get a BD from the queue
+ * @tsk: The BestComm task structure
+ * index: Index of the BD to fetch
+ */
+static inline struct bcom_bd
+*bcom_get_bd(struct bcom_task *tsk, unsigned int index)
+{
+ /* A cast to (void*) so the address can be incremented by the
+ * real size instead of by sizeof(struct bcom_bd) */
+ return ((void *)tsk->bd) + (index * tsk->bd_size);
+}
+
+/**
+ * bcom_buffer_done - Checks if a BestComm
+ * @tsk: The BestComm task structure
+ */
+static inline int
+bcom_buffer_done(struct bcom_task *tsk)
+{
+ struct bcom_bd *bd;
+ if (bcom_queue_empty(tsk))
+ return 0;
+
+ bd = bcom_get_bd(tsk, tsk->outdex);
+ return !(bd->status & BCOM_BD_READY);
+}
+
+/**
+ * bcom_prepare_next_buffer - clear status of next available buffer.
+ * @tsk: The BestComm task structure
+ *
+ * Returns pointer to next buffer descriptor
+ */
+static inline struct bcom_bd *
+bcom_prepare_next_buffer(struct bcom_task *tsk)
+{
+ struct bcom_bd *bd;
+
+ bd = bcom_get_bd(tsk, tsk->index);
+ bd->status = 0; /* cleanup last status */
+ return bd;
+}
+
+static inline void
+bcom_submit_next_buffer(struct bcom_task *tsk, void *cookie)
+{
+ struct bcom_bd *bd = bcom_get_bd(tsk, tsk->index);
+
+ tsk->cookie[tsk->index] = cookie;
+ mb(); /* ensure the bd is really up-to-date */
+ bd->status |= BCOM_BD_READY;
+ tsk->index = _bcom_next_index(tsk);
+ if (tsk->flags & BCOM_FLAGS_ENABLE_TASK)
+ bcom_enable(tsk);
+}
+
+static inline void *
+bcom_retrieve_buffer(struct bcom_task *tsk, u32 *p_status, struct bcom_bd **p_bd)
+{
+ void *cookie = tsk->cookie[tsk->outdex];
+ struct bcom_bd *bd = bcom_get_bd(tsk, tsk->outdex);
+
+ if (p_status)
+ *p_status = bd->status;
+ if (p_bd)
+ *p_bd = bd;
+ tsk->outdex = _bcom_next_outdex(tsk);
+ return cookie;
+}
+
+#endif /* __BESTCOMM_H__ */
diff --git a/include/linux/fsl/bestcomm/bestcomm_priv.h b/include/linux/fsl/bestcomm/bestcomm_priv.h
new file mode 100644
index 000000000..3b52f3ffb
--- /dev/null
+++ b/include/linux/fsl/bestcomm/bestcomm_priv.h
@@ -0,0 +1,350 @@
+/*
+ * Private header for the MPC52xx processor BestComm driver
+ *
+ * By private, we mean that driver should not use it directly. It's meant
+ * to be used by the BestComm engine driver itself and by the intermediate
+ * layer between the core and the drivers.
+ *
+ * Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2005 Varma Electronics Oy,
+ * ( by Andrey Volkov <avolkov@varma-el.com> )
+ * Copyright (C) 2003-2004 MontaVista, Software, Inc.
+ * ( by Dale Farnsworth <dfarnsworth@mvista.com> )
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __BESTCOMM_PRIV_H__
+#define __BESTCOMM_PRIV_H__
+
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <asm/io.h>
+#include <asm/mpc52xx.h>
+
+#include "sram.h"
+
+
+/* ======================================================================== */
+/* Engine related stuff */
+/* ======================================================================== */
+
+/* Zones sizes and needed alignments */
+#define BCOM_MAX_TASKS 16
+#define BCOM_MAX_VAR 24
+#define BCOM_MAX_INC 8
+#define BCOM_MAX_FDT 64
+#define BCOM_MAX_CTX 20
+#define BCOM_CTX_SIZE (BCOM_MAX_CTX * sizeof(u32))
+#define BCOM_CTX_ALIGN 0x100
+#define BCOM_VAR_SIZE (BCOM_MAX_VAR * sizeof(u32))
+#define BCOM_INC_SIZE (BCOM_MAX_INC * sizeof(u32))
+#define BCOM_VAR_ALIGN 0x80
+#define BCOM_FDT_SIZE (BCOM_MAX_FDT * sizeof(u32))
+#define BCOM_FDT_ALIGN 0x100
+
+/**
+ * struct bcom_tdt - Task Descriptor Table Entry
+ *
+ */
+struct bcom_tdt {
+ u32 start;
+ u32 stop;
+ u32 var;
+ u32 fdt;
+ u32 exec_status; /* used internally by BestComm engine */
+ u32 mvtp; /* used internally by BestComm engine */
+ u32 context;
+ u32 litbase;
+};
+
+/**
+ * struct bcom_engine
+ *
+ * This holds all info needed globaly to handle the engine
+ */
+struct bcom_engine {
+ struct device_node *ofnode;
+ struct mpc52xx_sdma __iomem *regs;
+ phys_addr_t regs_base;
+
+ struct bcom_tdt *tdt;
+ u32 *ctx;
+ u32 *var;
+ u32 *fdt;
+
+ spinlock_t lock;
+};
+
+extern struct bcom_engine *bcom_eng;
+
+
+/* ======================================================================== */
+/* Tasks related stuff */
+/* ======================================================================== */
+
+/* Tasks image header */
+#define BCOM_TASK_MAGIC 0x4243544B /* 'BCTK' */
+
+struct bcom_task_header {
+ u32 magic;
+ u8 desc_size; /* the size fields */
+ u8 var_size; /* are given in number */
+ u8 inc_size; /* of 32-bits words */
+ u8 first_var;
+ u8 reserved[8];
+};
+
+/* Descriptors structure & co */
+#define BCOM_DESC_NOP 0x000001f8
+#define BCOM_LCD_MASK 0x80000000
+#define BCOM_DRD_EXTENDED 0x40000000
+#define BCOM_DRD_INITIATOR_SHIFT 21
+
+/* Tasks pragma */
+#define BCOM_PRAGMA_BIT_RSV 7 /* reserved pragma bit */
+#define BCOM_PRAGMA_BIT_PRECISE_INC 6 /* increment 0=when possible, */
+ /* 1=iter end */
+#define BCOM_PRAGMA_BIT_RST_ERROR_NO 5 /* don't reset errors on */
+ /* task enable */
+#define BCOM_PRAGMA_BIT_PACK 4 /* pack data enable */
+#define BCOM_PRAGMA_BIT_INTEGER 3 /* data alignment */
+ /* 0=frac(msb), 1=int(lsb) */
+#define BCOM_PRAGMA_BIT_SPECREAD 2 /* XLB speculative read */
+#define BCOM_PRAGMA_BIT_CW 1 /* write line buffer enable */
+#define BCOM_PRAGMA_BIT_RL 0 /* read line buffer enable */
+
+ /* Looks like XLB speculative read generates XLB errors when a buffer
+ * is at the end of the physical memory. i.e. when accessing the
+ * lasts words, the engine tries to prefetch the next but there is no
+ * next ...
+ */
+#define BCOM_STD_PRAGMA ((0 << BCOM_PRAGMA_BIT_RSV) | \
+ (0 << BCOM_PRAGMA_BIT_PRECISE_INC) | \
+ (0 << BCOM_PRAGMA_BIT_RST_ERROR_NO) | \
+ (0 << BCOM_PRAGMA_BIT_PACK) | \
+ (0 << BCOM_PRAGMA_BIT_INTEGER) | \
+ (0 << BCOM_PRAGMA_BIT_SPECREAD) | \
+ (1 << BCOM_PRAGMA_BIT_CW) | \
+ (1 << BCOM_PRAGMA_BIT_RL))
+
+#define BCOM_PCI_PRAGMA ((0 << BCOM_PRAGMA_BIT_RSV) | \
+ (0 << BCOM_PRAGMA_BIT_PRECISE_INC) | \
+ (0 << BCOM_PRAGMA_BIT_RST_ERROR_NO) | \
+ (0 << BCOM_PRAGMA_BIT_PACK) | \
+ (1 << BCOM_PRAGMA_BIT_INTEGER) | \
+ (0 << BCOM_PRAGMA_BIT_SPECREAD) | \
+ (1 << BCOM_PRAGMA_BIT_CW) | \
+ (1 << BCOM_PRAGMA_BIT_RL))
+
+#define BCOM_ATA_PRAGMA BCOM_STD_PRAGMA
+#define BCOM_CRC16_DP_0_PRAGMA BCOM_STD_PRAGMA
+#define BCOM_CRC16_DP_1_PRAGMA BCOM_STD_PRAGMA
+#define BCOM_FEC_RX_BD_PRAGMA BCOM_STD_PRAGMA
+#define BCOM_FEC_TX_BD_PRAGMA BCOM_STD_PRAGMA
+#define BCOM_GEN_DP_0_PRAGMA BCOM_STD_PRAGMA
+#define BCOM_GEN_DP_1_PRAGMA BCOM_STD_PRAGMA
+#define BCOM_GEN_DP_2_PRAGMA BCOM_STD_PRAGMA
+#define BCOM_GEN_DP_3_PRAGMA BCOM_STD_PRAGMA
+#define BCOM_GEN_DP_BD_0_PRAGMA BCOM_STD_PRAGMA
+#define BCOM_GEN_DP_BD_1_PRAGMA BCOM_STD_PRAGMA
+#define BCOM_GEN_RX_BD_PRAGMA BCOM_STD_PRAGMA
+#define BCOM_GEN_TX_BD_PRAGMA BCOM_STD_PRAGMA
+#define BCOM_GEN_LPC_PRAGMA BCOM_STD_PRAGMA
+#define BCOM_PCI_RX_PRAGMA BCOM_PCI_PRAGMA
+#define BCOM_PCI_TX_PRAGMA BCOM_PCI_PRAGMA
+
+/* Initiators number */
+#define BCOM_INITIATOR_ALWAYS 0
+#define BCOM_INITIATOR_SCTMR_0 1
+#define BCOM_INITIATOR_SCTMR_1 2
+#define BCOM_INITIATOR_FEC_RX 3
+#define BCOM_INITIATOR_FEC_TX 4
+#define BCOM_INITIATOR_ATA_RX 5
+#define BCOM_INITIATOR_ATA_TX 6
+#define BCOM_INITIATOR_SCPCI_RX 7
+#define BCOM_INITIATOR_SCPCI_TX 8
+#define BCOM_INITIATOR_PSC3_RX 9
+#define BCOM_INITIATOR_PSC3_TX 10
+#define BCOM_INITIATOR_PSC2_RX 11
+#define BCOM_INITIATOR_PSC2_TX 12
+#define BCOM_INITIATOR_PSC1_RX 13
+#define BCOM_INITIATOR_PSC1_TX 14
+#define BCOM_INITIATOR_SCTMR_2 15
+#define BCOM_INITIATOR_SCLPC 16
+#define BCOM_INITIATOR_PSC5_RX 17
+#define BCOM_INITIATOR_PSC5_TX 18
+#define BCOM_INITIATOR_PSC4_RX 19
+#define BCOM_INITIATOR_PSC4_TX 20
+#define BCOM_INITIATOR_I2C2_RX 21
+#define BCOM_INITIATOR_I2C2_TX 22
+#define BCOM_INITIATOR_I2C1_RX 23
+#define BCOM_INITIATOR_I2C1_TX 24
+#define BCOM_INITIATOR_PSC6_RX 25
+#define BCOM_INITIATOR_PSC6_TX 26
+#define BCOM_INITIATOR_IRDA_RX 25
+#define BCOM_INITIATOR_IRDA_TX 26
+#define BCOM_INITIATOR_SCTMR_3 27
+#define BCOM_INITIATOR_SCTMR_4 28
+#define BCOM_INITIATOR_SCTMR_5 29
+#define BCOM_INITIATOR_SCTMR_6 30
+#define BCOM_INITIATOR_SCTMR_7 31
+
+/* Initiators priorities */
+#define BCOM_IPR_ALWAYS 7
+#define BCOM_IPR_SCTMR_0 2
+#define BCOM_IPR_SCTMR_1 2
+#define BCOM_IPR_FEC_RX 6
+#define BCOM_IPR_FEC_TX 5
+#define BCOM_IPR_ATA_RX 7
+#define BCOM_IPR_ATA_TX 7
+#define BCOM_IPR_SCPCI_RX 2
+#define BCOM_IPR_SCPCI_TX 2
+#define BCOM_IPR_PSC3_RX 2
+#define BCOM_IPR_PSC3_TX 2
+#define BCOM_IPR_PSC2_RX 2
+#define BCOM_IPR_PSC2_TX 2
+#define BCOM_IPR_PSC1_RX 2
+#define BCOM_IPR_PSC1_TX 2
+#define BCOM_IPR_SCTMR_2 2
+#define BCOM_IPR_SCLPC 2
+#define BCOM_IPR_PSC5_RX 2
+#define BCOM_IPR_PSC5_TX 2
+#define BCOM_IPR_PSC4_RX 2
+#define BCOM_IPR_PSC4_TX 2
+#define BCOM_IPR_I2C2_RX 2
+#define BCOM_IPR_I2C2_TX 2
+#define BCOM_IPR_I2C1_RX 2
+#define BCOM_IPR_I2C1_TX 2
+#define BCOM_IPR_PSC6_RX 2
+#define BCOM_IPR_PSC6_TX 2
+#define BCOM_IPR_IRDA_RX 2
+#define BCOM_IPR_IRDA_TX 2
+#define BCOM_IPR_SCTMR_3 2
+#define BCOM_IPR_SCTMR_4 2
+#define BCOM_IPR_SCTMR_5 2
+#define BCOM_IPR_SCTMR_6 2
+#define BCOM_IPR_SCTMR_7 2
+
+
+/* ======================================================================== */
+/* API */
+/* ======================================================================== */
+
+extern struct bcom_task *bcom_task_alloc(int bd_count, int bd_size, int priv_size);
+extern void bcom_task_free(struct bcom_task *tsk);
+extern int bcom_load_image(int task, u32 *task_image);
+extern void bcom_set_initiator(int task, int initiator);
+
+
+#define TASK_ENABLE 0x8000
+
+/**
+ * bcom_disable_prefetch - Hook to disable bus prefetching
+ *
+ * ATA DMA and the original MPC5200 need this due to silicon bugs. At the
+ * moment disabling prefetch is a one-way street. There is no mechanism
+ * in place to turn prefetch back on after it has been disabled. There is
+ * no reason it couldn't be done, it would just be more complex to implement.
+ */
+static inline void bcom_disable_prefetch(void)
+{
+ u16 regval;
+
+ regval = in_be16(&bcom_eng->regs->PtdCntrl);
+ out_be16(&bcom_eng->regs->PtdCntrl, regval | 1);
+};
+
+static inline void
+bcom_enable_task(int task)
+{
+ u16 reg;
+ reg = in_be16(&bcom_eng->regs->tcr[task]);
+ out_be16(&bcom_eng->regs->tcr[task], reg | TASK_ENABLE);
+}
+
+static inline void
+bcom_disable_task(int task)
+{
+ u16 reg = in_be16(&bcom_eng->regs->tcr[task]);
+ out_be16(&bcom_eng->regs->tcr[task], reg & ~TASK_ENABLE);
+}
+
+
+static inline u32 *
+bcom_task_desc(int task)
+{
+ return bcom_sram_pa2va(bcom_eng->tdt[task].start);
+}
+
+static inline int
+bcom_task_num_descs(int task)
+{
+ return (bcom_eng->tdt[task].stop - bcom_eng->tdt[task].start)/sizeof(u32) + 1;
+}
+
+static inline u32 *
+bcom_task_var(int task)
+{
+ return bcom_sram_pa2va(bcom_eng->tdt[task].var);
+}
+
+static inline u32 *
+bcom_task_inc(int task)
+{
+ return &bcom_task_var(task)[BCOM_MAX_VAR];
+}
+
+
+static inline int
+bcom_drd_is_extended(u32 desc)
+{
+ return (desc) & BCOM_DRD_EXTENDED;
+}
+
+static inline int
+bcom_desc_is_drd(u32 desc)
+{
+ return !(desc & BCOM_LCD_MASK) && desc != BCOM_DESC_NOP;
+}
+
+static inline int
+bcom_desc_initiator(u32 desc)
+{
+ return (desc >> BCOM_DRD_INITIATOR_SHIFT) & 0x1f;
+}
+
+static inline void
+bcom_set_desc_initiator(u32 *desc, int initiator)
+{
+ *desc = (*desc & ~(0x1f << BCOM_DRD_INITIATOR_SHIFT)) |
+ ((initiator & 0x1f) << BCOM_DRD_INITIATOR_SHIFT);
+}
+
+
+static inline void
+bcom_set_task_pragma(int task, int pragma)
+{
+ u32 *fdt = &bcom_eng->tdt[task].fdt;
+ *fdt = (*fdt & ~0xff) | pragma;
+}
+
+static inline void
+bcom_set_task_auto_start(int task, int next_task)
+{
+ u16 __iomem *tcr = &bcom_eng->regs->tcr[task];
+ out_be16(tcr, (in_be16(tcr) & ~0xff) | 0x00c0 | next_task);
+}
+
+static inline void
+bcom_set_tcr_initiator(int task, int initiator)
+{
+ u16 __iomem *tcr = &bcom_eng->regs->tcr[task];
+ out_be16(tcr, (in_be16(tcr) & ~0x1f00) | ((initiator & 0x1f) << 8));
+}
+
+
+#endif /* __BESTCOMM_PRIV_H__ */
+
diff --git a/include/linux/fsl/bestcomm/fec.h b/include/linux/fsl/bestcomm/fec.h
new file mode 100644
index 000000000..ee565d94d
--- /dev/null
+++ b/include/linux/fsl/bestcomm/fec.h
@@ -0,0 +1,61 @@
+/*
+ * Header for Bestcomm FEC tasks driver
+ *
+ *
+ * Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003-2004 MontaVista, Software, Inc.
+ * ( by Dale Farnsworth <dfarnsworth@mvista.com> )
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __BESTCOMM_FEC_H__
+#define __BESTCOMM_FEC_H__
+
+
+struct bcom_fec_bd {
+ u32 status;
+ u32 skb_pa;
+};
+
+#define BCOM_FEC_TX_BD_TFD 0x08000000ul /* transmit frame done */
+#define BCOM_FEC_TX_BD_TC 0x04000000ul /* transmit CRC */
+#define BCOM_FEC_TX_BD_ABC 0x02000000ul /* append bad CRC */
+
+#define BCOM_FEC_RX_BD_L 0x08000000ul /* buffer is last in frame */
+#define BCOM_FEC_RX_BD_BC 0x00800000ul /* DA is broadcast */
+#define BCOM_FEC_RX_BD_MC 0x00400000ul /* DA is multicast and not broadcast */
+#define BCOM_FEC_RX_BD_LG 0x00200000ul /* Rx frame length violation */
+#define BCOM_FEC_RX_BD_NO 0x00100000ul /* Rx non-octet aligned frame */
+#define BCOM_FEC_RX_BD_CR 0x00040000ul /* Rx CRC error */
+#define BCOM_FEC_RX_BD_OV 0x00020000ul /* overrun */
+#define BCOM_FEC_RX_BD_TR 0x00010000ul /* Rx frame truncated */
+#define BCOM_FEC_RX_BD_LEN_MASK 0x000007fful /* mask for length of received frame */
+#define BCOM_FEC_RX_BD_ERRORS (BCOM_FEC_RX_BD_LG | BCOM_FEC_RX_BD_NO | \
+ BCOM_FEC_RX_BD_CR | BCOM_FEC_RX_BD_OV | BCOM_FEC_RX_BD_TR)
+
+
+extern struct bcom_task *
+bcom_fec_rx_init(int queue_len, phys_addr_t fifo, int maxbufsize);
+
+extern int
+bcom_fec_rx_reset(struct bcom_task *tsk);
+
+extern void
+bcom_fec_rx_release(struct bcom_task *tsk);
+
+
+extern struct bcom_task *
+bcom_fec_tx_init(int queue_len, phys_addr_t fifo);
+
+extern int
+bcom_fec_tx_reset(struct bcom_task *tsk);
+
+extern void
+bcom_fec_tx_release(struct bcom_task *tsk);
+
+
+#endif /* __BESTCOMM_FEC_H__ */
+
diff --git a/include/linux/fsl/bestcomm/gen_bd.h b/include/linux/fsl/bestcomm/gen_bd.h
new file mode 100644
index 000000000..de47260e6
--- /dev/null
+++ b/include/linux/fsl/bestcomm/gen_bd.h
@@ -0,0 +1,53 @@
+/*
+ * Header for Bestcomm General Buffer Descriptor tasks driver
+ *
+ *
+ * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2006 AppSpec Computer Technologies Corp.
+ * Jeff Gibbons <jeff.gibbons@appspec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ *
+ */
+
+#ifndef __BESTCOMM_GEN_BD_H__
+#define __BESTCOMM_GEN_BD_H__
+
+struct bcom_gen_bd {
+ u32 status;
+ u32 buf_pa;
+};
+
+
+extern struct bcom_task *
+bcom_gen_bd_rx_init(int queue_len, phys_addr_t fifo,
+ int initiator, int ipr, int maxbufsize);
+
+extern int
+bcom_gen_bd_rx_reset(struct bcom_task *tsk);
+
+extern void
+bcom_gen_bd_rx_release(struct bcom_task *tsk);
+
+
+extern struct bcom_task *
+bcom_gen_bd_tx_init(int queue_len, phys_addr_t fifo,
+ int initiator, int ipr);
+
+extern int
+bcom_gen_bd_tx_reset(struct bcom_task *tsk);
+
+extern void
+bcom_gen_bd_tx_release(struct bcom_task *tsk);
+
+
+/* PSC support utility wrappers */
+struct bcom_task * bcom_psc_gen_bd_rx_init(unsigned psc_num, int queue_len,
+ phys_addr_t fifo, int maxbufsize);
+struct bcom_task * bcom_psc_gen_bd_tx_init(unsigned psc_num, int queue_len,
+ phys_addr_t fifo);
+#endif /* __BESTCOMM_GEN_BD_H__ */
+
diff --git a/include/linux/fsl/bestcomm/sram.h b/include/linux/fsl/bestcomm/sram.h
new file mode 100644
index 000000000..b6d668963
--- /dev/null
+++ b/include/linux/fsl/bestcomm/sram.h
@@ -0,0 +1,54 @@
+/*
+ * Handling of a sram zone for bestcomm
+ *
+ *
+ * Copyright (C) 2007 Sylvain Munaut <tnt@246tNt.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __BESTCOMM_SRAM_H__
+#define __BESTCOMM_SRAM_H__
+
+#include <asm/rheap.h>
+#include <asm/mmu.h>
+#include <linux/spinlock.h>
+
+
+/* Structure used internally */
+ /* The internals are here for the inline functions
+ * sake, certainly not for the user to mess with !
+ */
+struct bcom_sram {
+ phys_addr_t base_phys;
+ void *base_virt;
+ unsigned int size;
+ rh_info_t *rh;
+ spinlock_t lock;
+};
+
+extern struct bcom_sram *bcom_sram;
+
+
+/* Public API */
+extern int bcom_sram_init(struct device_node *sram_node, char *owner);
+extern void bcom_sram_cleanup(void);
+
+extern void* bcom_sram_alloc(int size, int align, phys_addr_t *phys);
+extern void bcom_sram_free(void *ptr);
+
+static inline phys_addr_t bcom_sram_va2pa(void *va) {
+ return bcom_sram->base_phys +
+ (unsigned long)(va - bcom_sram->base_virt);
+}
+
+static inline void *bcom_sram_pa2va(phys_addr_t pa) {
+ return bcom_sram->base_virt +
+ (unsigned long)(pa - bcom_sram->base_phys);
+}
+
+
+#endif /* __BESTCOMM_SRAM_H__ */
+
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
new file mode 100644
index 000000000..a82296af4
--- /dev/null
+++ b/include/linux/fsl_devices.h
@@ -0,0 +1,147 @@
+/*
+ * include/linux/fsl_devices.h
+ *
+ * Definitions for any platform device related flags or structures for
+ * Freescale processor devices
+ *
+ * Maintainer: Kumar Gala <galak@kernel.crashing.org>
+ *
+ * Copyright 2004,2012 Freescale Semiconductor, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _FSL_DEVICE_H_
+#define _FSL_DEVICE_H_
+
+#define FSL_UTMI_PHY_DLY 10 /*As per P1010RM, delay for UTMI
+ PHY CLK to become stable - 10ms*/
+#define FSL_USB_PHY_CLK_TIMEOUT 10000 /* uSec */
+#define FSL_USB_VER_OLD 0
+#define FSL_USB_VER_1_6 1
+#define FSL_USB_VER_2_2 2
+#define FSL_USB_VER_2_4 3
+
+#include <linux/types.h>
+
+/*
+ * Some conventions on how we handle peripherals on Freescale chips
+ *
+ * unique device: a platform_device entry in fsl_plat_devs[] plus
+ * associated device information in its platform_data structure.
+ *
+ * A chip is described by a set of unique devices.
+ *
+ * Each sub-arch has its own master list of unique devices and
+ * enumerates them by enum fsl_devices in a sub-arch specific header
+ *
+ * The platform data structure is broken into two parts. The
+ * first is device specific information that help identify any
+ * unique features of a peripheral. The second is any
+ * information that may be defined by the board or how the device
+ * is connected externally of the chip.
+ *
+ * naming conventions:
+ * - platform data structures: <driver>_platform_data
+ * - platform data device flags: FSL_<driver>_DEV_<FLAG>
+ * - platform data board flags: FSL_<driver>_BRD_<FLAG>
+ *
+ */
+
+enum fsl_usb2_operating_modes {
+ FSL_USB2_MPH_HOST,
+ FSL_USB2_DR_HOST,
+ FSL_USB2_DR_DEVICE,
+ FSL_USB2_DR_OTG,
+};
+
+enum fsl_usb2_phy_modes {
+ FSL_USB2_PHY_NONE,
+ FSL_USB2_PHY_ULPI,
+ FSL_USB2_PHY_UTMI,
+ FSL_USB2_PHY_UTMI_WIDE,
+ FSL_USB2_PHY_SERIAL,
+};
+
+struct clk;
+struct platform_device;
+
+struct fsl_usb2_platform_data {
+ /* board specific information */
+ int controller_ver;
+ enum fsl_usb2_operating_modes operating_mode;
+ enum fsl_usb2_phy_modes phy_mode;
+ unsigned int port_enables;
+ unsigned int workaround;
+
+ int (*init)(struct platform_device *);
+ void (*exit)(struct platform_device *);
+ void __iomem *regs; /* ioremap'd register base */
+ struct clk *clk;
+ unsigned power_budget; /* hcd->power_budget */
+ unsigned big_endian_mmio:1;
+ unsigned big_endian_desc:1;
+ unsigned es:1; /* need USBMODE:ES */
+ unsigned le_setup_buf:1;
+ unsigned have_sysif_regs:1;
+ unsigned invert_drvvbus:1;
+ unsigned invert_pwr_fault:1;
+
+ unsigned suspended:1;
+ unsigned already_suspended:1;
+
+ /* register save area for suspend/resume */
+ u32 pm_command;
+ u32 pm_status;
+ u32 pm_intr_enable;
+ u32 pm_frame_index;
+ u32 pm_segment;
+ u32 pm_frame_list;
+ u32 pm_async_next;
+ u32 pm_configured_flag;
+ u32 pm_portsc;
+ u32 pm_usbgenctrl;
+};
+
+/* Flags in fsl_usb2_mph_platform_data */
+#define FSL_USB2_PORT0_ENABLED 0x00000001
+#define FSL_USB2_PORT1_ENABLED 0x00000002
+
+#define FLS_USB2_WORKAROUND_ENGCM09152 (1 << 0)
+
+struct spi_device;
+
+struct fsl_spi_platform_data {
+ u32 initial_spmode; /* initial SPMODE value */
+ s16 bus_num;
+ unsigned int flags;
+#define SPI_QE_CPU_MODE (1 << 0) /* QE CPU ("PIO") mode */
+#define SPI_CPM_MODE (1 << 1) /* CPM/QE ("DMA") mode */
+#define SPI_CPM1 (1 << 2) /* SPI unit is in CPM1 block */
+#define SPI_CPM2 (1 << 3) /* SPI unit is in CPM2 block */
+#define SPI_QE (1 << 4) /* SPI unit is in QE block */
+ /* board specific information */
+ u16 max_chipselect;
+ void (*cs_control)(struct spi_device *spi, bool on);
+ u32 sysclk;
+};
+
+struct mpc8xx_pcmcia_ops {
+ void(*hw_ctrl)(int slot, int enable);
+ int(*voltage_set)(int slot, int vcc, int vpp);
+};
+
+/* Returns non-zero if the current suspend operation would
+ * lead to a deep sleep (i.e. power removed from the core,
+ * instead of just the clock).
+ */
+#if defined(CONFIG_PPC_83xx) && defined(CONFIG_SUSPEND)
+int fsl_deep_sleep(void);
+#else
+static inline int fsl_deep_sleep(void) { return 0; }
+#endif
+
+#endif /* _FSL_DEVICE_H_ */
diff --git a/include/linux/fsl_hypervisor.h b/include/linux/fsl_hypervisor.h
new file mode 100644
index 000000000..2a707d7fb
--- /dev/null
+++ b/include/linux/fsl_hypervisor.h
@@ -0,0 +1,63 @@
+/*
+ * Freescale hypervisor ioctl and kernel interface
+ *
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * This software is provided by Freescale Semiconductor "as is" and any
+ * express or implied warranties, including, but not limited to, the implied
+ * warranties of merchantability and fitness for a particular purpose are
+ * disclaimed. In no event shall Freescale Semiconductor be liable for any
+ * direct, indirect, incidental, special, exemplary, or consequential damages
+ * (including, but not limited to, procurement of substitute goods or services;
+ * loss of use, data, or profits; or business interruption) however caused and
+ * on any theory of liability, whether in contract, strict liability, or tort
+ * (including negligence or otherwise) arising in any way out of the use of this
+ * software, even if advised of the possibility of such damage.
+ *
+ * This file is used by the Freescale hypervisor management driver. It can
+ * also be included by applications that need to communicate with the driver
+ * via the ioctl interface.
+ */
+#ifndef FSL_HYPERVISOR_H
+#define FSL_HYPERVISOR_H
+
+#include <uapi/linux/fsl_hypervisor.h>
+
+
+/**
+ * fsl_hv_event_register() - register a callback for failover events
+ * @nb: pointer to caller-supplied notifier_block structure
+ *
+ * This function is called by device drivers to register their callback
+ * functions for fail-over events.
+ *
+ * The caller should allocate a notifier_block object and initialize the
+ * 'priority' and 'notifier_call' fields.
+ */
+int fsl_hv_failover_register(struct notifier_block *nb);
+
+/**
+ * fsl_hv_event_unregister() - unregister a callback for failover events
+ * @nb: the same 'nb' used in previous fsl_hv_failover_register call
+ */
+int fsl_hv_failover_unregister(struct notifier_block *nb);
+
+#endif
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
new file mode 100644
index 000000000..bf0321eab
--- /dev/null
+++ b/include/linux/fsl_ifc.h
@@ -0,0 +1,849 @@
+/* Freescale Integrated Flash Controller
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc
+ *
+ * Author: Dipen Dudhat <dipen.dudhat@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_FSL_IFC_H
+#define __ASM_FSL_IFC_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+
+/*
+ * The actual number of banks implemented depends on the IFC version
+ * - IFC version 1.0 implements 4 banks.
+ * - IFC version 1.1 onward implements 8 banks.
+ */
+#define FSL_IFC_BANK_COUNT 8
+
+#define FSL_IFC_VERSION_MASK 0x0F0F0000
+#define FSL_IFC_VERSION_1_0_0 0x01000000
+#define FSL_IFC_VERSION_1_1_0 0x01010000
+
+/*
+ * CSPR - Chip Select Property Register
+ */
+#define CSPR_BA 0xFFFF0000
+#define CSPR_BA_SHIFT 16
+#define CSPR_PORT_SIZE 0x00000180
+#define CSPR_PORT_SIZE_SHIFT 7
+/* Port Size 8 bit */
+#define CSPR_PORT_SIZE_8 0x00000080
+/* Port Size 16 bit */
+#define CSPR_PORT_SIZE_16 0x00000100
+/* Port Size 32 bit */
+#define CSPR_PORT_SIZE_32 0x00000180
+/* Write Protect */
+#define CSPR_WP 0x00000040
+#define CSPR_WP_SHIFT 6
+/* Machine Select */
+#define CSPR_MSEL 0x00000006
+#define CSPR_MSEL_SHIFT 1
+/* NOR */
+#define CSPR_MSEL_NOR 0x00000000
+/* NAND */
+#define CSPR_MSEL_NAND 0x00000002
+/* GPCM */
+#define CSPR_MSEL_GPCM 0x00000004
+/* Bank Valid */
+#define CSPR_V 0x00000001
+#define CSPR_V_SHIFT 0
+
+/*
+ * Address Mask Register
+ */
+#define IFC_AMASK_MASK 0xFFFF0000
+#define IFC_AMASK_SHIFT 16
+#define IFC_AMASK(n) (IFC_AMASK_MASK << \
+ (__ilog2(n) - IFC_AMASK_SHIFT))
+
+/*
+ * Chip Select Option Register IFC_NAND Machine
+ */
+/* Enable ECC Encoder */
+#define CSOR_NAND_ECC_ENC_EN 0x80000000
+#define CSOR_NAND_ECC_MODE_MASK 0x30000000
+/* 4 bit correction per 520 Byte sector */
+#define CSOR_NAND_ECC_MODE_4 0x00000000
+/* 8 bit correction per 528 Byte sector */
+#define CSOR_NAND_ECC_MODE_8 0x10000000
+/* Enable ECC Decoder */
+#define CSOR_NAND_ECC_DEC_EN 0x04000000
+/* Row Address Length */
+#define CSOR_NAND_RAL_MASK 0x01800000
+#define CSOR_NAND_RAL_SHIFT 20
+#define CSOR_NAND_RAL_1 0x00000000
+#define CSOR_NAND_RAL_2 0x00800000
+#define CSOR_NAND_RAL_3 0x01000000
+#define CSOR_NAND_RAL_4 0x01800000
+/* Page Size 512b, 2k, 4k */
+#define CSOR_NAND_PGS_MASK 0x00180000
+#define CSOR_NAND_PGS_SHIFT 16
+#define CSOR_NAND_PGS_512 0x00000000
+#define CSOR_NAND_PGS_2K 0x00080000
+#define CSOR_NAND_PGS_4K 0x00100000
+#define CSOR_NAND_PGS_8K 0x00180000
+/* Spare region Size */
+#define CSOR_NAND_SPRZ_MASK 0x0000E000
+#define CSOR_NAND_SPRZ_SHIFT 13
+#define CSOR_NAND_SPRZ_16 0x00000000
+#define CSOR_NAND_SPRZ_64 0x00002000
+#define CSOR_NAND_SPRZ_128 0x00004000
+#define CSOR_NAND_SPRZ_210 0x00006000
+#define CSOR_NAND_SPRZ_218 0x00008000
+#define CSOR_NAND_SPRZ_224 0x0000A000
+#define CSOR_NAND_SPRZ_CSOR_EXT 0x0000C000
+/* Pages Per Block */
+#define CSOR_NAND_PB_MASK 0x00000700
+#define CSOR_NAND_PB_SHIFT 8
+#define CSOR_NAND_PB(n) ((__ilog2(n) - 5) << CSOR_NAND_PB_SHIFT)
+/* Time for Read Enable High to Output High Impedance */
+#define CSOR_NAND_TRHZ_MASK 0x0000001C
+#define CSOR_NAND_TRHZ_SHIFT 2
+#define CSOR_NAND_TRHZ_20 0x00000000
+#define CSOR_NAND_TRHZ_40 0x00000004
+#define CSOR_NAND_TRHZ_60 0x00000008
+#define CSOR_NAND_TRHZ_80 0x0000000C
+#define CSOR_NAND_TRHZ_100 0x00000010
+/* Buffer control disable */
+#define CSOR_NAND_BCTLD 0x00000001
+
+/*
+ * Chip Select Option Register - NOR Flash Mode
+ */
+/* Enable Address shift Mode */
+#define CSOR_NOR_ADM_SHFT_MODE_EN 0x80000000
+/* Page Read Enable from NOR device */
+#define CSOR_NOR_PGRD_EN 0x10000000
+/* AVD Toggle Enable during Burst Program */
+#define CSOR_NOR_AVD_TGL_PGM_EN 0x01000000
+/* Address Data Multiplexing Shift */
+#define CSOR_NOR_ADM_MASK 0x0003E000
+#define CSOR_NOR_ADM_SHIFT_SHIFT 13
+#define CSOR_NOR_ADM_SHIFT(n) ((n) << CSOR_NOR_ADM_SHIFT_SHIFT)
+/* Type of the NOR device hooked */
+#define CSOR_NOR_NOR_MODE_AYSNC_NOR 0x00000000
+#define CSOR_NOR_NOR_MODE_AVD_NOR 0x00000020
+/* Time for Read Enable High to Output High Impedance */
+#define CSOR_NOR_TRHZ_MASK 0x0000001C
+#define CSOR_NOR_TRHZ_SHIFT 2
+#define CSOR_NOR_TRHZ_20 0x00000000
+#define CSOR_NOR_TRHZ_40 0x00000004
+#define CSOR_NOR_TRHZ_60 0x00000008
+#define CSOR_NOR_TRHZ_80 0x0000000C
+#define CSOR_NOR_TRHZ_100 0x00000010
+/* Buffer control disable */
+#define CSOR_NOR_BCTLD 0x00000001
+
+/*
+ * Chip Select Option Register - GPCM Mode
+ */
+/* GPCM Mode - Normal */
+#define CSOR_GPCM_GPMODE_NORMAL 0x00000000
+/* GPCM Mode - GenericASIC */
+#define CSOR_GPCM_GPMODE_ASIC 0x80000000
+/* Parity Mode odd/even */
+#define CSOR_GPCM_PARITY_EVEN 0x40000000
+/* Parity Checking enable/disable */
+#define CSOR_GPCM_PAR_EN 0x20000000
+/* GPCM Timeout Count */
+#define CSOR_GPCM_GPTO_MASK 0x0F000000
+#define CSOR_GPCM_GPTO_SHIFT 24
+#define CSOR_GPCM_GPTO(n) ((__ilog2(n) - 8) << CSOR_GPCM_GPTO_SHIFT)
+/* GPCM External Access Termination mode for read access */
+#define CSOR_GPCM_RGETA_EXT 0x00080000
+/* GPCM External Access Termination mode for write access */
+#define CSOR_GPCM_WGETA_EXT 0x00040000
+/* Address Data Multiplexing Shift */
+#define CSOR_GPCM_ADM_MASK 0x0003E000
+#define CSOR_GPCM_ADM_SHIFT_SHIFT 13
+#define CSOR_GPCM_ADM_SHIFT(n) ((n) << CSOR_GPCM_ADM_SHIFT_SHIFT)
+/* Generic ASIC Parity error indication delay */
+#define CSOR_GPCM_GAPERRD_MASK 0x00000180
+#define CSOR_GPCM_GAPERRD_SHIFT 7
+#define CSOR_GPCM_GAPERRD(n) (((n) - 1) << CSOR_GPCM_GAPERRD_SHIFT)
+/* Time for Read Enable High to Output High Impedance */
+#define CSOR_GPCM_TRHZ_MASK 0x0000001C
+#define CSOR_GPCM_TRHZ_20 0x00000000
+#define CSOR_GPCM_TRHZ_40 0x00000004
+#define CSOR_GPCM_TRHZ_60 0x00000008
+#define CSOR_GPCM_TRHZ_80 0x0000000C
+#define CSOR_GPCM_TRHZ_100 0x00000010
+/* Buffer control disable */
+#define CSOR_GPCM_BCTLD 0x00000001
+
+/*
+ * Ready Busy Status Register (RB_STAT)
+ */
+/* CSn is READY */
+#define IFC_RB_STAT_READY_CS0 0x80000000
+#define IFC_RB_STAT_READY_CS1 0x40000000
+#define IFC_RB_STAT_READY_CS2 0x20000000
+#define IFC_RB_STAT_READY_CS3 0x10000000
+
+/*
+ * General Control Register (GCR)
+ */
+#define IFC_GCR_MASK 0x8000F800
+/* reset all IFC hardware */
+#define IFC_GCR_SOFT_RST_ALL 0x80000000
+/* Turnaroud Time of external buffer */
+#define IFC_GCR_TBCTL_TRN_TIME 0x0000F800
+#define IFC_GCR_TBCTL_TRN_TIME_SHIFT 11
+
+/*
+ * Common Event and Error Status Register (CM_EVTER_STAT)
+ */
+/* Chip select error */
+#define IFC_CM_EVTER_STAT_CSER 0x80000000
+
+/*
+ * Common Event and Error Enable Register (CM_EVTER_EN)
+ */
+/* Chip select error checking enable */
+#define IFC_CM_EVTER_EN_CSEREN 0x80000000
+
+/*
+ * Common Event and Error Interrupt Enable Register (CM_EVTER_INTR_EN)
+ */
+/* Chip select error interrupt enable */
+#define IFC_CM_EVTER_INTR_EN_CSERIREN 0x80000000
+
+/*
+ * Common Transfer Error Attribute Register-0 (CM_ERATTR0)
+ */
+/* transaction type of error Read/Write */
+#define IFC_CM_ERATTR0_ERTYP_READ 0x80000000
+#define IFC_CM_ERATTR0_ERAID 0x0FF00000
+#define IFC_CM_ERATTR0_ERAID_SHIFT 20
+#define IFC_CM_ERATTR0_ESRCID 0x0000FF00
+#define IFC_CM_ERATTR0_ESRCID_SHIFT 8
+
+/*
+ * Clock Control Register (CCR)
+ */
+#define IFC_CCR_MASK 0x0F0F8800
+/* Clock division ratio */
+#define IFC_CCR_CLK_DIV_MASK 0x0F000000
+#define IFC_CCR_CLK_DIV_SHIFT 24
+#define IFC_CCR_CLK_DIV(n) ((n-1) << IFC_CCR_CLK_DIV_SHIFT)
+/* IFC Clock Delay */
+#define IFC_CCR_CLK_DLY_MASK 0x000F0000
+#define IFC_CCR_CLK_DLY_SHIFT 16
+#define IFC_CCR_CLK_DLY(n) ((n) << IFC_CCR_CLK_DLY_SHIFT)
+/* Invert IFC clock before sending out */
+#define IFC_CCR_INV_CLK_EN 0x00008000
+/* Fedback IFC Clock */
+#define IFC_CCR_FB_IFC_CLK_SEL 0x00000800
+
+/*
+ * Clock Status Register (CSR)
+ */
+/* Clk is stable */
+#define IFC_CSR_CLK_STAT_STABLE 0x80000000
+
+/*
+ * IFC_NAND Machine Specific Registers
+ */
+/*
+ * NAND Configuration Register (NCFGR)
+ */
+/* Auto Boot Mode */
+#define IFC_NAND_NCFGR_BOOT 0x80000000
+/* Addressing Mode-ROW0+n/COL0 */
+#define IFC_NAND_NCFGR_ADDR_MODE_RC0 0x00000000
+/* Addressing Mode-ROW0+n/COL0+n */
+#define IFC_NAND_NCFGR_ADDR_MODE_RC1 0x00400000
+/* Number of loop iterations of FIR sequences for multi page operations */
+#define IFC_NAND_NCFGR_NUM_LOOP_MASK 0x0000F000
+#define IFC_NAND_NCFGR_NUM_LOOP_SHIFT 12
+#define IFC_NAND_NCFGR_NUM_LOOP(n) ((n) << IFC_NAND_NCFGR_NUM_LOOP_SHIFT)
+/* Number of wait cycles */
+#define IFC_NAND_NCFGR_NUM_WAIT_MASK 0x000000FF
+#define IFC_NAND_NCFGR_NUM_WAIT_SHIFT 0
+
+/*
+ * NAND Flash Command Registers (NAND_FCR0/NAND_FCR1)
+ */
+/* General purpose FCM flash command bytes CMD0-CMD7 */
+#define IFC_NAND_FCR0_CMD0 0xFF000000
+#define IFC_NAND_FCR0_CMD0_SHIFT 24
+#define IFC_NAND_FCR0_CMD1 0x00FF0000
+#define IFC_NAND_FCR0_CMD1_SHIFT 16
+#define IFC_NAND_FCR0_CMD2 0x0000FF00
+#define IFC_NAND_FCR0_CMD2_SHIFT 8
+#define IFC_NAND_FCR0_CMD3 0x000000FF
+#define IFC_NAND_FCR0_CMD3_SHIFT 0
+#define IFC_NAND_FCR1_CMD4 0xFF000000
+#define IFC_NAND_FCR1_CMD4_SHIFT 24
+#define IFC_NAND_FCR1_CMD5 0x00FF0000
+#define IFC_NAND_FCR1_CMD5_SHIFT 16
+#define IFC_NAND_FCR1_CMD6 0x0000FF00
+#define IFC_NAND_FCR1_CMD6_SHIFT 8
+#define IFC_NAND_FCR1_CMD7 0x000000FF
+#define IFC_NAND_FCR1_CMD7_SHIFT 0
+
+/*
+ * Flash ROW and COL Address Register (ROWn, COLn)
+ */
+/* Main/spare region locator */
+#define IFC_NAND_COL_MS 0x80000000
+/* Column Address */
+#define IFC_NAND_COL_CA_MASK 0x00000FFF
+
+/*
+ * NAND Flash Byte Count Register (NAND_BC)
+ */
+/* Byte Count for read/Write */
+#define IFC_NAND_BC 0x000001FF
+
+/*
+ * NAND Flash Instruction Registers (NAND_FIR0/NAND_FIR1/NAND_FIR2)
+ */
+/* NAND Machine specific opcodes OP0-OP14*/
+#define IFC_NAND_FIR0_OP0 0xFC000000
+#define IFC_NAND_FIR0_OP0_SHIFT 26
+#define IFC_NAND_FIR0_OP1 0x03F00000
+#define IFC_NAND_FIR0_OP1_SHIFT 20
+#define IFC_NAND_FIR0_OP2 0x000FC000
+#define IFC_NAND_FIR0_OP2_SHIFT 14
+#define IFC_NAND_FIR0_OP3 0x00003F00
+#define IFC_NAND_FIR0_OP3_SHIFT 8
+#define IFC_NAND_FIR0_OP4 0x000000FC
+#define IFC_NAND_FIR0_OP4_SHIFT 2
+#define IFC_NAND_FIR1_OP5 0xFC000000
+#define IFC_NAND_FIR1_OP5_SHIFT 26
+#define IFC_NAND_FIR1_OP6 0x03F00000
+#define IFC_NAND_FIR1_OP6_SHIFT 20
+#define IFC_NAND_FIR1_OP7 0x000FC000
+#define IFC_NAND_FIR1_OP7_SHIFT 14
+#define IFC_NAND_FIR1_OP8 0x00003F00
+#define IFC_NAND_FIR1_OP8_SHIFT 8
+#define IFC_NAND_FIR1_OP9 0x000000FC
+#define IFC_NAND_FIR1_OP9_SHIFT 2
+#define IFC_NAND_FIR2_OP10 0xFC000000
+#define IFC_NAND_FIR2_OP10_SHIFT 26
+#define IFC_NAND_FIR2_OP11 0x03F00000
+#define IFC_NAND_FIR2_OP11_SHIFT 20
+#define IFC_NAND_FIR2_OP12 0x000FC000
+#define IFC_NAND_FIR2_OP12_SHIFT 14
+#define IFC_NAND_FIR2_OP13 0x00003F00
+#define IFC_NAND_FIR2_OP13_SHIFT 8
+#define IFC_NAND_FIR2_OP14 0x000000FC
+#define IFC_NAND_FIR2_OP14_SHIFT 2
+
+/*
+ * Instruction opcodes to be programmed
+ * in FIR registers- 6bits
+ */
+enum ifc_nand_fir_opcodes {
+ IFC_FIR_OP_NOP,
+ IFC_FIR_OP_CA0,
+ IFC_FIR_OP_CA1,
+ IFC_FIR_OP_CA2,
+ IFC_FIR_OP_CA3,
+ IFC_FIR_OP_RA0,
+ IFC_FIR_OP_RA1,
+ IFC_FIR_OP_RA2,
+ IFC_FIR_OP_RA3,
+ IFC_FIR_OP_CMD0,
+ IFC_FIR_OP_CMD1,
+ IFC_FIR_OP_CMD2,
+ IFC_FIR_OP_CMD3,
+ IFC_FIR_OP_CMD4,
+ IFC_FIR_OP_CMD5,
+ IFC_FIR_OP_CMD6,
+ IFC_FIR_OP_CMD7,
+ IFC_FIR_OP_CW0,
+ IFC_FIR_OP_CW1,
+ IFC_FIR_OP_CW2,
+ IFC_FIR_OP_CW3,
+ IFC_FIR_OP_CW4,
+ IFC_FIR_OP_CW5,
+ IFC_FIR_OP_CW6,
+ IFC_FIR_OP_CW7,
+ IFC_FIR_OP_WBCD,
+ IFC_FIR_OP_RBCD,
+ IFC_FIR_OP_BTRD,
+ IFC_FIR_OP_RDSTAT,
+ IFC_FIR_OP_NWAIT,
+ IFC_FIR_OP_WFR,
+ IFC_FIR_OP_SBRD,
+ IFC_FIR_OP_UA,
+ IFC_FIR_OP_RB,
+};
+
+/*
+ * NAND Chip Select Register (NAND_CSEL)
+ */
+#define IFC_NAND_CSEL 0x0C000000
+#define IFC_NAND_CSEL_SHIFT 26
+#define IFC_NAND_CSEL_CS0 0x00000000
+#define IFC_NAND_CSEL_CS1 0x04000000
+#define IFC_NAND_CSEL_CS2 0x08000000
+#define IFC_NAND_CSEL_CS3 0x0C000000
+
+/*
+ * NAND Operation Sequence Start (NANDSEQ_STRT)
+ */
+/* NAND Flash Operation Start */
+#define IFC_NAND_SEQ_STRT_FIR_STRT 0x80000000
+/* Automatic Erase */
+#define IFC_NAND_SEQ_STRT_AUTO_ERS 0x00800000
+/* Automatic Program */
+#define IFC_NAND_SEQ_STRT_AUTO_PGM 0x00100000
+/* Automatic Copyback */
+#define IFC_NAND_SEQ_STRT_AUTO_CPB 0x00020000
+/* Automatic Read Operation */
+#define IFC_NAND_SEQ_STRT_AUTO_RD 0x00004000
+/* Automatic Status Read */
+#define IFC_NAND_SEQ_STRT_AUTO_STAT_RD 0x00000800
+
+/*
+ * NAND Event and Error Status Register (NAND_EVTER_STAT)
+ */
+/* Operation Complete */
+#define IFC_NAND_EVTER_STAT_OPC 0x80000000
+/* Flash Timeout Error */
+#define IFC_NAND_EVTER_STAT_FTOER 0x08000000
+/* Write Protect Error */
+#define IFC_NAND_EVTER_STAT_WPER 0x04000000
+/* ECC Error */
+#define IFC_NAND_EVTER_STAT_ECCER 0x02000000
+/* RCW Load Done */
+#define IFC_NAND_EVTER_STAT_RCW_DN 0x00008000
+/* Boot Loadr Done */
+#define IFC_NAND_EVTER_STAT_BOOT_DN 0x00004000
+/* Bad Block Indicator search select */
+#define IFC_NAND_EVTER_STAT_BBI_SRCH_SE 0x00000800
+
+/*
+ * NAND Flash Page Read Completion Event Status Register
+ * (PGRDCMPL_EVT_STAT)
+ */
+#define PGRDCMPL_EVT_STAT_MASK 0xFFFF0000
+/* Small Page 0-15 Done */
+#define PGRDCMPL_EVT_STAT_SECTION_SP(n) (1 << (31 - (n)))
+/* Large Page(2K) 0-3 Done */
+#define PGRDCMPL_EVT_STAT_LP_2K(n) (0xF << (28 - (n)*4))
+/* Large Page(4K) 0-1 Done */
+#define PGRDCMPL_EVT_STAT_LP_4K(n) (0xFF << (24 - (n)*8))
+
+/*
+ * NAND Event and Error Enable Register (NAND_EVTER_EN)
+ */
+/* Operation complete event enable */
+#define IFC_NAND_EVTER_EN_OPC_EN 0x80000000
+/* Page read complete event enable */
+#define IFC_NAND_EVTER_EN_PGRDCMPL_EN 0x20000000
+/* Flash Timeout error enable */
+#define IFC_NAND_EVTER_EN_FTOER_EN 0x08000000
+/* Write Protect error enable */
+#define IFC_NAND_EVTER_EN_WPER_EN 0x04000000
+/* ECC error logging enable */
+#define IFC_NAND_EVTER_EN_ECCER_EN 0x02000000
+
+/*
+ * NAND Event and Error Interrupt Enable Register (NAND_EVTER_INTR_EN)
+ */
+/* Enable interrupt for operation complete */
+#define IFC_NAND_EVTER_INTR_OPCIR_EN 0x80000000
+/* Enable interrupt for Page read complete */
+#define IFC_NAND_EVTER_INTR_PGRDCMPLIR_EN 0x20000000
+/* Enable interrupt for Flash timeout error */
+#define IFC_NAND_EVTER_INTR_FTOERIR_EN 0x08000000
+/* Enable interrupt for Write protect error */
+#define IFC_NAND_EVTER_INTR_WPERIR_EN 0x04000000
+/* Enable interrupt for ECC error*/
+#define IFC_NAND_EVTER_INTR_ECCERIR_EN 0x02000000
+
+/*
+ * NAND Transfer Error Attribute Register-0 (NAND_ERATTR0)
+ */
+#define IFC_NAND_ERATTR0_MASK 0x0C080000
+/* Error on CS0-3 for NAND */
+#define IFC_NAND_ERATTR0_ERCS_CS0 0x00000000
+#define IFC_NAND_ERATTR0_ERCS_CS1 0x04000000
+#define IFC_NAND_ERATTR0_ERCS_CS2 0x08000000
+#define IFC_NAND_ERATTR0_ERCS_CS3 0x0C000000
+/* Transaction type of error Read/Write */
+#define IFC_NAND_ERATTR0_ERTTYPE_READ 0x00080000
+
+/*
+ * NAND Flash Status Register (NAND_FSR)
+ */
+/* First byte of data read from read status op */
+#define IFC_NAND_NFSR_RS0 0xFF000000
+/* Second byte of data read from read status op */
+#define IFC_NAND_NFSR_RS1 0x00FF0000
+
+/*
+ * ECC Error Status Registers (ECCSTAT0-ECCSTAT3)
+ */
+/* Number of ECC errors on sector n (n = 0-15) */
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR0_MASK 0x0F000000
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR0_SHIFT 24
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR1_MASK 0x000F0000
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR1_SHIFT 16
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR2_MASK 0x00000F00
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR2_SHIFT 8
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR3_MASK 0x0000000F
+#define IFC_NAND_ECCSTAT0_ERRCNT_SECTOR3_SHIFT 0
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR4_MASK 0x0F000000
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR4_SHIFT 24
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR5_MASK 0x000F0000
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR5_SHIFT 16
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR6_MASK 0x00000F00
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR6_SHIFT 8
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR7_MASK 0x0000000F
+#define IFC_NAND_ECCSTAT1_ERRCNT_SECTOR7_SHIFT 0
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR8_MASK 0x0F000000
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR8_SHIFT 24
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR9_MASK 0x000F0000
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR9_SHIFT 16
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR10_MASK 0x00000F00
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR10_SHIFT 8
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR11_MASK 0x0000000F
+#define IFC_NAND_ECCSTAT2_ERRCNT_SECTOR11_SHIFT 0
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR12_MASK 0x0F000000
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR12_SHIFT 24
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR13_MASK 0x000F0000
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR13_SHIFT 16
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR14_MASK 0x00000F00
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR14_SHIFT 8
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR15_MASK 0x0000000F
+#define IFC_NAND_ECCSTAT3_ERRCNT_SECTOR15_SHIFT 0
+
+/*
+ * NAND Control Register (NANDCR)
+ */
+#define IFC_NAND_NCR_FTOCNT_MASK 0x1E000000
+#define IFC_NAND_NCR_FTOCNT_SHIFT 25
+#define IFC_NAND_NCR_FTOCNT(n) ((_ilog2(n) - 8) << IFC_NAND_NCR_FTOCNT_SHIFT)
+
+/*
+ * NAND_AUTOBOOT_TRGR
+ */
+/* Trigger RCW load */
+#define IFC_NAND_AUTOBOOT_TRGR_RCW_LD 0x80000000
+/* Trigget Auto Boot */
+#define IFC_NAND_AUTOBOOT_TRGR_BOOT_LD 0x20000000
+
+/*
+ * NAND_MDR
+ */
+/* 1st read data byte when opcode SBRD */
+#define IFC_NAND_MDR_RDATA0 0xFF000000
+/* 2nd read data byte when opcode SBRD */
+#define IFC_NAND_MDR_RDATA1 0x00FF0000
+
+/*
+ * NOR Machine Specific Registers
+ */
+/*
+ * NOR Event and Error Status Register (NOR_EVTER_STAT)
+ */
+/* NOR Command Sequence Operation Complete */
+#define IFC_NOR_EVTER_STAT_OPC_NOR 0x80000000
+/* Write Protect Error */
+#define IFC_NOR_EVTER_STAT_WPER 0x04000000
+/* Command Sequence Timeout Error */
+#define IFC_NOR_EVTER_STAT_STOER 0x01000000
+
+/*
+ * NOR Event and Error Enable Register (NOR_EVTER_EN)
+ */
+/* NOR Command Seq complete event enable */
+#define IFC_NOR_EVTER_EN_OPCEN_NOR 0x80000000
+/* Write Protect Error Checking Enable */
+#define IFC_NOR_EVTER_EN_WPEREN 0x04000000
+/* Timeout Error Enable */
+#define IFC_NOR_EVTER_EN_STOEREN 0x01000000
+
+/*
+ * NOR Event and Error Interrupt Enable Register (NOR_EVTER_INTR_EN)
+ */
+/* Enable interrupt for OPC complete */
+#define IFC_NOR_EVTER_INTR_OPCEN_NOR 0x80000000
+/* Enable interrupt for write protect error */
+#define IFC_NOR_EVTER_INTR_WPEREN 0x04000000
+/* Enable interrupt for timeout error */
+#define IFC_NOR_EVTER_INTR_STOEREN 0x01000000
+
+/*
+ * NOR Transfer Error Attribute Register-0 (NOR_ERATTR0)
+ */
+/* Source ID for error transaction */
+#define IFC_NOR_ERATTR0_ERSRCID 0xFF000000
+/* AXI ID for error transation */
+#define IFC_NOR_ERATTR0_ERAID 0x000FF000
+/* Chip select corresponds to NOR error */
+#define IFC_NOR_ERATTR0_ERCS_CS0 0x00000000
+#define IFC_NOR_ERATTR0_ERCS_CS1 0x00000010
+#define IFC_NOR_ERATTR0_ERCS_CS2 0x00000020
+#define IFC_NOR_ERATTR0_ERCS_CS3 0x00000030
+/* Type of transaction read/write */
+#define IFC_NOR_ERATTR0_ERTYPE_READ 0x00000001
+
+/*
+ * NOR Transfer Error Attribute Register-2 (NOR_ERATTR2)
+ */
+#define IFC_NOR_ERATTR2_ER_NUM_PHASE_EXP 0x000F0000
+#define IFC_NOR_ERATTR2_ER_NUM_PHASE_PER 0x00000F00
+
+/*
+ * NOR Control Register (NORCR)
+ */
+#define IFC_NORCR_MASK 0x0F0F0000
+/* No. of Address/Data Phase */
+#define IFC_NORCR_NUM_PHASE_MASK 0x0F000000
+#define IFC_NORCR_NUM_PHASE_SHIFT 24
+#define IFC_NORCR_NUM_PHASE(n) ((n-1) << IFC_NORCR_NUM_PHASE_SHIFT)
+/* Sequence Timeout Count */
+#define IFC_NORCR_STOCNT_MASK 0x000F0000
+#define IFC_NORCR_STOCNT_SHIFT 16
+#define IFC_NORCR_STOCNT(n) ((__ilog2(n) - 8) << IFC_NORCR_STOCNT_SHIFT)
+
+/*
+ * GPCM Machine specific registers
+ */
+/*
+ * GPCM Event and Error Status Register (GPCM_EVTER_STAT)
+ */
+/* Timeout error */
+#define IFC_GPCM_EVTER_STAT_TOER 0x04000000
+/* Parity error */
+#define IFC_GPCM_EVTER_STAT_PER 0x01000000
+
+/*
+ * GPCM Event and Error Enable Register (GPCM_EVTER_EN)
+ */
+/* Timeout error enable */
+#define IFC_GPCM_EVTER_EN_TOER_EN 0x04000000
+/* Parity error enable */
+#define IFC_GPCM_EVTER_EN_PER_EN 0x01000000
+
+/*
+ * GPCM Event and Error Interrupt Enable Register (GPCM_EVTER_INTR_EN)
+ */
+/* Enable Interrupt for timeout error */
+#define IFC_GPCM_EEIER_TOERIR_EN 0x04000000
+/* Enable Interrupt for Parity error */
+#define IFC_GPCM_EEIER_PERIR_EN 0x01000000
+
+/*
+ * GPCM Transfer Error Attribute Register-0 (GPCM_ERATTR0)
+ */
+/* Source ID for error transaction */
+#define IFC_GPCM_ERATTR0_ERSRCID 0xFF000000
+/* AXI ID for error transaction */
+#define IFC_GPCM_ERATTR0_ERAID 0x000FF000
+/* Chip select corresponds to GPCM error */
+#define IFC_GPCM_ERATTR0_ERCS_CS0 0x00000000
+#define IFC_GPCM_ERATTR0_ERCS_CS1 0x00000040
+#define IFC_GPCM_ERATTR0_ERCS_CS2 0x00000080
+#define IFC_GPCM_ERATTR0_ERCS_CS3 0x000000C0
+/* Type of transaction read/Write */
+#define IFC_GPCM_ERATTR0_ERTYPE_READ 0x00000001
+
+/*
+ * GPCM Transfer Error Attribute Register-2 (GPCM_ERATTR2)
+ */
+/* On which beat of address/data parity error is observed */
+#define IFC_GPCM_ERATTR2_PERR_BEAT 0x00000C00
+/* Parity Error on byte */
+#define IFC_GPCM_ERATTR2_PERR_BYTE 0x000000F0
+/* Parity Error reported in addr or data phase */
+#define IFC_GPCM_ERATTR2_PERR_DATA_PHASE 0x00000001
+
+/*
+ * GPCM Status Register (GPCM_STAT)
+ */
+#define IFC_GPCM_STAT_BSY 0x80000000 /* GPCM is busy */
+
+/*
+ * IFC Controller NAND Machine registers
+ */
+struct fsl_ifc_nand {
+ __be32 ncfgr;
+ u32 res1[0x4];
+ __be32 nand_fcr0;
+ __be32 nand_fcr1;
+ u32 res2[0x8];
+ __be32 row0;
+ u32 res3;
+ __be32 col0;
+ u32 res4;
+ __be32 row1;
+ u32 res5;
+ __be32 col1;
+ u32 res6;
+ __be32 row2;
+ u32 res7;
+ __be32 col2;
+ u32 res8;
+ __be32 row3;
+ u32 res9;
+ __be32 col3;
+ u32 res10[0x24];
+ __be32 nand_fbcr;
+ u32 res11;
+ __be32 nand_fir0;
+ __be32 nand_fir1;
+ __be32 nand_fir2;
+ u32 res12[0x10];
+ __be32 nand_csel;
+ u32 res13;
+ __be32 nandseq_strt;
+ u32 res14;
+ __be32 nand_evter_stat;
+ u32 res15;
+ __be32 pgrdcmpl_evt_stat;
+ u32 res16[0x2];
+ __be32 nand_evter_en;
+ u32 res17[0x2];
+ __be32 nand_evter_intr_en;
+ u32 res18[0x2];
+ __be32 nand_erattr0;
+ __be32 nand_erattr1;
+ u32 res19[0x10];
+ __be32 nand_fsr;
+ u32 res20;
+ __be32 nand_eccstat[4];
+ u32 res21[0x20];
+ __be32 nanndcr;
+ u32 res22[0x2];
+ __be32 nand_autoboot_trgr;
+ u32 res23;
+ __be32 nand_mdr;
+ u32 res24[0x5C];
+};
+
+/*
+ * IFC controller NOR Machine registers
+ */
+struct fsl_ifc_nor {
+ __be32 nor_evter_stat;
+ u32 res1[0x2];
+ __be32 nor_evter_en;
+ u32 res2[0x2];
+ __be32 nor_evter_intr_en;
+ u32 res3[0x2];
+ __be32 nor_erattr0;
+ __be32 nor_erattr1;
+ __be32 nor_erattr2;
+ u32 res4[0x4];
+ __be32 norcr;
+ u32 res5[0xEF];
+};
+
+/*
+ * IFC controller GPCM Machine registers
+ */
+struct fsl_ifc_gpcm {
+ __be32 gpcm_evter_stat;
+ u32 res1[0x2];
+ __be32 gpcm_evter_en;
+ u32 res2[0x2];
+ __be32 gpcm_evter_intr_en;
+ u32 res3[0x2];
+ __be32 gpcm_erattr0;
+ __be32 gpcm_erattr1;
+ __be32 gpcm_erattr2;
+ __be32 gpcm_stat;
+ u32 res4[0x1F3];
+};
+
+/*
+ * IFC Controller Registers
+ */
+struct fsl_ifc_regs {
+ __be32 ifc_rev;
+ u32 res1[0x2];
+ struct {
+ __be32 cspr_ext;
+ __be32 cspr;
+ u32 res2;
+ } cspr_cs[FSL_IFC_BANK_COUNT];
+ u32 res3[0xd];
+ struct {
+ __be32 amask;
+ u32 res4[0x2];
+ } amask_cs[FSL_IFC_BANK_COUNT];
+ u32 res5[0xc];
+ struct {
+ __be32 csor;
+ __be32 csor_ext;
+ u32 res6;
+ } csor_cs[FSL_IFC_BANK_COUNT];
+ u32 res7[0xc];
+ struct {
+ __be32 ftim[4];
+ u32 res8[0x8];
+ } ftim_cs[FSL_IFC_BANK_COUNT];
+ u32 res9[0x30];
+ __be32 rb_stat;
+ u32 res10[0x2];
+ __be32 ifc_gcr;
+ u32 res11[0x2];
+ __be32 cm_evter_stat;
+ u32 res12[0x2];
+ __be32 cm_evter_en;
+ u32 res13[0x2];
+ __be32 cm_evter_intr_en;
+ u32 res14[0x2];
+ __be32 cm_erattr0;
+ __be32 cm_erattr1;
+ u32 res15[0x2];
+ __be32 ifc_ccr;
+ __be32 ifc_csr;
+ u32 res16[0x2EB];
+ struct fsl_ifc_nand ifc_nand;
+ struct fsl_ifc_nor ifc_nor;
+ struct fsl_ifc_gpcm ifc_gpcm;
+};
+
+extern unsigned int convert_ifc_address(phys_addr_t addr_base);
+extern int fsl_ifc_find(phys_addr_t addr_base);
+
+/* overview of the fsl ifc controller */
+
+struct fsl_ifc_ctrl {
+ /* device info */
+ struct device *dev;
+ struct fsl_ifc_regs __iomem *regs;
+ int irq;
+ int nand_irq;
+ spinlock_t lock;
+ void *nand;
+ int version;
+ int banks;
+
+ u32 nand_stat;
+ wait_queue_head_t nand_wait;
+};
+
+extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
+
+
+#endif /* __ASM_FSL_IFC_H */
diff --git a/include/linux/fsldma.h b/include/linux/fsldma.h
new file mode 100644
index 000000000..b213c0296
--- /dev/null
+++ b/include/linux/fsldma.h
@@ -0,0 +1,13 @@
+/*
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef FSL_DMA_H
+#define FSL_DMA_H
+/* fsl dma API for enxternal start */
+int fsl_dma_external_start(struct dma_chan *dchan, int enable);
+
+#endif
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
new file mode 100644
index 000000000..7ee1774ed
--- /dev/null
+++ b/include/linux/fsnotify.h
@@ -0,0 +1,344 @@
+#ifndef _LINUX_FS_NOTIFY_H
+#define _LINUX_FS_NOTIFY_H
+
+/*
+ * include/linux/fsnotify.h - generic hooks for filesystem notification, to
+ * reduce in-source duplication from both dnotify and inotify.
+ *
+ * We don't compile any of this away in some complicated menagerie of ifdefs.
+ * Instead, we rely on the code inside to optimize away as needed.
+ *
+ * (C) Copyright 2005 Robert Love
+ */
+
+#include <linux/fsnotify_backend.h>
+#include <linux/audit.h>
+#include <linux/slab.h>
+#include <linux/bug.h>
+
+/*
+ * fsnotify_d_instantiate - instantiate a dentry for inode
+ */
+static inline void fsnotify_d_instantiate(struct dentry *dentry,
+ struct inode *inode)
+{
+ __fsnotify_d_instantiate(dentry, inode);
+}
+
+/* Notify this dentry's parent about a child's events. */
+static inline int fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
+{
+ if (!dentry)
+ dentry = path->dentry;
+
+ return __fsnotify_parent(path, dentry, mask);
+}
+
+/* simple call site for access decisions */
+static inline int fsnotify_perm(struct file *file, int mask)
+{
+ struct path *path = &file->f_path;
+ struct inode *inode = file_inode(file);
+ __u32 fsnotify_mask = 0;
+ int ret;
+
+ if (file->f_mode & FMODE_NONOTIFY)
+ return 0;
+ if (!(mask & (MAY_READ | MAY_OPEN)))
+ return 0;
+ if (mask & MAY_OPEN)
+ fsnotify_mask = FS_OPEN_PERM;
+ else if (mask & MAY_READ)
+ fsnotify_mask = FS_ACCESS_PERM;
+ else
+ BUG();
+
+ ret = fsnotify_parent(path, NULL, fsnotify_mask);
+ if (ret)
+ return ret;
+
+ return fsnotify(inode, fsnotify_mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+}
+
+/*
+ * fsnotify_d_move - dentry has been moved
+ */
+static inline void fsnotify_d_move(struct dentry *dentry)
+{
+ /*
+ * On move we need to update dentry->d_flags to indicate if the new parent
+ * cares about events from this dentry.
+ */
+ __fsnotify_update_dcache_flags(dentry);
+}
+
+/*
+ * fsnotify_link_count - inode's link count changed
+ */
+static inline void fsnotify_link_count(struct inode *inode)
+{
+ fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+}
+
+/*
+ * fsnotify_move - file old_name at old_dir was moved to new_name at new_dir
+ */
+static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
+ const unsigned char *old_name,
+ int isdir, struct inode *target, struct dentry *moved)
+{
+ struct inode *source = moved->d_inode;
+ u32 fs_cookie = fsnotify_get_cookie();
+ __u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM);
+ __u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO);
+ const unsigned char *new_name = moved->d_name.name;
+
+ if (old_dir == new_dir)
+ old_dir_mask |= FS_DN_RENAME;
+
+ if (isdir) {
+ old_dir_mask |= FS_ISDIR;
+ new_dir_mask |= FS_ISDIR;
+ }
+
+ fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name,
+ fs_cookie);
+ fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name,
+ fs_cookie);
+
+ if (target)
+ fsnotify_link_count(target);
+
+ if (source)
+ fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ audit_inode_child(new_dir, moved, AUDIT_TYPE_CHILD_CREATE);
+}
+
+/*
+ * fsnotify_inode_delete - and inode is being evicted from cache, clean up is needed
+ */
+static inline void fsnotify_inode_delete(struct inode *inode)
+{
+ __fsnotify_inode_delete(inode);
+}
+
+/*
+ * fsnotify_vfsmount_delete - a vfsmount is being destroyed, clean up is needed
+ */
+static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt)
+{
+ __fsnotify_vfsmount_delete(mnt);
+}
+
+/*
+ * fsnotify_nameremove - a filename was removed from a directory
+ */
+static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
+{
+ __u32 mask = FS_DELETE;
+
+ if (isdir)
+ mask |= FS_ISDIR;
+
+ fsnotify_parent(NULL, dentry, mask);
+}
+
+/*
+ * fsnotify_inoderemove - an inode is going away
+ */
+static inline void fsnotify_inoderemove(struct inode *inode)
+{
+ fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ __fsnotify_inode_delete(inode);
+}
+
+/*
+ * fsnotify_create - 'name' was linked in
+ */
+static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
+{
+ audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE);
+
+ fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
+}
+
+/*
+ * fsnotify_link - new hardlink in 'inode' directory
+ * Note: We have to pass also the linked inode ptr as some filesystems leave
+ * new_dentry->d_inode NULL and instantiate inode pointer later
+ */
+static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry)
+{
+ fsnotify_link_count(inode);
+ audit_inode_child(dir, new_dentry, AUDIT_TYPE_CHILD_CREATE);
+
+ fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, new_dentry->d_name.name, 0);
+}
+
+/*
+ * fsnotify_mkdir - directory 'name' was created
+ */
+static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
+{
+ __u32 mask = (FS_CREATE | FS_ISDIR);
+ struct inode *d_inode = dentry->d_inode;
+
+ audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE);
+
+ fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
+}
+
+/*
+ * fsnotify_access - file was read
+ */
+static inline void fsnotify_access(struct file *file)
+{
+ struct path *path = &file->f_path;
+ struct inode *inode = file_inode(file);
+ __u32 mask = FS_ACCESS;
+
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+ if (!(file->f_mode & FMODE_NONOTIFY)) {
+ fsnotify_parent(path, NULL, mask);
+ fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ }
+}
+
+/*
+ * fsnotify_modify - file was modified
+ */
+static inline void fsnotify_modify(struct file *file)
+{
+ struct path *path = &file->f_path;
+ struct inode *inode = file_inode(file);
+ __u32 mask = FS_MODIFY;
+
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+ if (!(file->f_mode & FMODE_NONOTIFY)) {
+ fsnotify_parent(path, NULL, mask);
+ fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ }
+}
+
+/*
+ * fsnotify_open - file was opened
+ */
+static inline void fsnotify_open(struct file *file)
+{
+ struct path *path = &file->f_path;
+ struct inode *inode = file_inode(file);
+ __u32 mask = FS_OPEN;
+
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+ fsnotify_parent(path, NULL, mask);
+ fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+}
+
+/*
+ * fsnotify_close - file was closed
+ */
+static inline void fsnotify_close(struct file *file)
+{
+ struct path *path = &file->f_path;
+ struct inode *inode = file_inode(file);
+ fmode_t mode = file->f_mode;
+ __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
+
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+ if (!(file->f_mode & FMODE_NONOTIFY)) {
+ fsnotify_parent(path, NULL, mask);
+ fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ }
+}
+
+/*
+ * fsnotify_xattr - extended attributes were changed
+ */
+static inline void fsnotify_xattr(struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+ __u32 mask = FS_ATTRIB;
+
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+ fsnotify_parent(NULL, dentry, mask);
+ fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+}
+
+/*
+ * fsnotify_change - notify_change event. file was modified and/or metadata
+ * was changed.
+ */
+static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
+{
+ struct inode *inode = dentry->d_inode;
+ __u32 mask = 0;
+
+ if (ia_valid & ATTR_UID)
+ mask |= FS_ATTRIB;
+ if (ia_valid & ATTR_GID)
+ mask |= FS_ATTRIB;
+ if (ia_valid & ATTR_SIZE)
+ mask |= FS_MODIFY;
+
+ /* both times implies a utime(s) call */
+ if ((ia_valid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME))
+ mask |= FS_ATTRIB;
+ else if (ia_valid & ATTR_ATIME)
+ mask |= FS_ACCESS;
+ else if (ia_valid & ATTR_MTIME)
+ mask |= FS_MODIFY;
+
+ if (ia_valid & ATTR_MODE)
+ mask |= FS_ATTRIB;
+
+ if (mask) {
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+ fsnotify_parent(NULL, dentry, mask);
+ fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ }
+}
+
+#if defined(CONFIG_FSNOTIFY) /* notify helpers */
+
+/*
+ * fsnotify_oldname_init - save off the old filename before we change it
+ */
+static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
+{
+ return kstrdup(name, GFP_KERNEL);
+}
+
+/*
+ * fsnotify_oldname_free - free the name we got from fsnotify_oldname_init
+ */
+static inline void fsnotify_oldname_free(const unsigned char *old_name)
+{
+ kfree(old_name);
+}
+
+#else /* CONFIG_FSNOTIFY */
+
+static inline const char *fsnotify_oldname_init(const unsigned char *name)
+{
+ return NULL;
+}
+
+static inline void fsnotify_oldname_free(const unsigned char *old_name)
+{
+}
+
+#endif /* CONFIG_FSNOTIFY */
+
+#endif /* _LINUX_FS_NOTIFY_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
new file mode 100644
index 000000000..0f313f93c
--- /dev/null
+++ b/include/linux/fsnotify_backend.h
@@ -0,0 +1,405 @@
+/*
+ * Filesystem access notification for Linux
+ *
+ * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
+ */
+
+#ifndef __LINUX_FSNOTIFY_BACKEND_H
+#define __LINUX_FSNOTIFY_BACKEND_H
+
+#ifdef __KERNEL__
+
+#include <linux/idr.h> /* inotify uses this */
+#include <linux/fs.h> /* struct inode */
+#include <linux/list.h>
+#include <linux/path.h> /* struct path */
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/atomic.h>
+
+/*
+ * IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily
+ * convert between them. dnotify only needs conversion at watch creation
+ * so no perf loss there. fanotify isn't defined yet, so it can use the
+ * wholes if it needs more events.
+ */
+#define FS_ACCESS 0x00000001 /* File was accessed */
+#define FS_MODIFY 0x00000002 /* File was modified */
+#define FS_ATTRIB 0x00000004 /* Metadata changed */
+#define FS_CLOSE_WRITE 0x00000008 /* Writtable file was closed */
+#define FS_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */
+#define FS_OPEN 0x00000020 /* File was opened */
+#define FS_MOVED_FROM 0x00000040 /* File was moved from X */
+#define FS_MOVED_TO 0x00000080 /* File was moved to Y */
+#define FS_CREATE 0x00000100 /* Subfile was created */
+#define FS_DELETE 0x00000200 /* Subfile was deleted */
+#define FS_DELETE_SELF 0x00000400 /* Self was deleted */
+#define FS_MOVE_SELF 0x00000800 /* Self was moved */
+
+#define FS_UNMOUNT 0x00002000 /* inode on umount fs */
+#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
+#define FS_IN_IGNORED 0x00008000 /* last inotify event here */
+
+#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */
+#define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */
+
+#define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */
+#define FS_ISDIR 0x40000000 /* event occurred against dir */
+#define FS_IN_ONESHOT 0x80000000 /* only send event once */
+
+#define FS_DN_RENAME 0x10000000 /* file renamed */
+#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */
+
+/* This inode cares about things that happen to its children. Always set for
+ * dnotify and inotify. */
+#define FS_EVENT_ON_CHILD 0x08000000
+
+/* This is a list of all events that may get sent to a parernt based on fs event
+ * happening to inodes inside that directory */
+#define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\
+ FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\
+ FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
+ FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM)
+
+#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO)
+
+#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM)
+
+#define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \
+ FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN | \
+ FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \
+ FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \
+ FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \
+ FS_OPEN_PERM | FS_ACCESS_PERM | FS_EXCL_UNLINK | \
+ FS_ISDIR | FS_IN_ONESHOT | FS_DN_RENAME | \
+ FS_DN_MULTISHOT | FS_EVENT_ON_CHILD)
+
+struct fsnotify_group;
+struct fsnotify_event;
+struct fsnotify_mark;
+struct fsnotify_event_private_data;
+struct fsnotify_fname;
+
+/*
+ * Each group much define these ops. The fsnotify infrastructure will call
+ * these operations for each relevant group.
+ *
+ * should_send_event - given a group, inode, and mask this function determines
+ * if the group is interested in this event.
+ * handle_event - main call for a group to handle an fs event
+ * free_group_priv - called when a group refcnt hits 0 to clean up the private union
+ * freeing_mark - called when a mark is being destroyed for some reason. The group
+ * MUST be holding a reference on each mark and that reference must be
+ * dropped in this function. inotify uses this function to send
+ * userspace messages that marks have been removed.
+ */
+struct fsnotify_ops {
+ int (*handle_event)(struct fsnotify_group *group,
+ struct inode *inode,
+ struct fsnotify_mark *inode_mark,
+ struct fsnotify_mark *vfsmount_mark,
+ u32 mask, void *data, int data_type,
+ const unsigned char *file_name, u32 cookie);
+ void (*free_group_priv)(struct fsnotify_group *group);
+ void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
+ void (*free_event)(struct fsnotify_event *event);
+};
+
+/*
+ * all of the information about the original object we want to now send to
+ * a group. If you want to carry more info from the accessing task to the
+ * listener this structure is where you need to be adding fields.
+ */
+struct fsnotify_event {
+ struct list_head list;
+ /* inode may ONLY be dereferenced during handle_event(). */
+ struct inode *inode; /* either the inode the event happened to or its parent */
+ u32 mask; /* the type of access, bitwise OR for FS_* event types */
+};
+
+/*
+ * A group is a "thing" that wants to receive notification about filesystem
+ * events. The mask holds the subset of event types this group cares about.
+ * refcnt on a group is up to the implementor and at any moment if it goes 0
+ * everything will be cleaned up.
+ */
+struct fsnotify_group {
+ /*
+ * How the refcnt is used is up to each group. When the refcnt hits 0
+ * fsnotify will clean up all of the resources associated with this group.
+ * As an example, the dnotify group will always have a refcnt=1 and that
+ * will never change. Inotify, on the other hand, has a group per
+ * inotify_init() and the refcnt will hit 0 only when that fd has been
+ * closed.
+ */
+ atomic_t refcnt; /* things with interest in this group */
+
+ const struct fsnotify_ops *ops; /* how this group handles things */
+
+ /* needed to send notification to userspace */
+ struct mutex notification_mutex; /* protect the notification_list */
+ struct list_head notification_list; /* list of event_holder this group needs to send to userspace */
+ wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */
+ unsigned int q_len; /* events on the queue */
+ unsigned int max_events; /* maximum events allowed on the list */
+ /*
+ * Valid fsnotify group priorities. Events are send in order from highest
+ * priority to lowest priority. We default to the lowest priority.
+ */
+ #define FS_PRIO_0 0 /* normal notifiers, no permissions */
+ #define FS_PRIO_1 1 /* fanotify content based access control */
+ #define FS_PRIO_2 2 /* fanotify pre-content access */
+ unsigned int priority;
+
+ /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
+ struct mutex mark_mutex; /* protect marks_list */
+ atomic_t num_marks; /* 1 for each mark and 1 for not being
+ * past the point of no return when freeing
+ * a group */
+ struct list_head marks_list; /* all inode marks for this group */
+
+ struct fasync_struct *fsn_fa; /* async notification */
+
+ struct fsnotify_event *overflow_event; /* Event we queue when the
+ * notification list is too
+ * full */
+
+ /* groups can define private fields here or use the void *private */
+ union {
+ void *private;
+#ifdef CONFIG_INOTIFY_USER
+ struct inotify_group_private_data {
+ spinlock_t idr_lock;
+ struct idr idr;
+ struct user_struct *user;
+ } inotify_data;
+#endif
+#ifdef CONFIG_FANOTIFY
+ struct fanotify_group_private_data {
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ /* allows a group to block waiting for a userspace response */
+ spinlock_t access_lock;
+ struct list_head access_list;
+ wait_queue_head_t access_waitq;
+ atomic_t bypass_perm;
+#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
+ int f_flags;
+ unsigned int max_marks;
+ struct user_struct *user;
+ } fanotify_data;
+#endif /* CONFIG_FANOTIFY */
+ };
+};
+
+/* when calling fsnotify tell it if the data is a path or inode */
+#define FSNOTIFY_EVENT_NONE 0
+#define FSNOTIFY_EVENT_PATH 1
+#define FSNOTIFY_EVENT_INODE 2
+
+/*
+ * a mark is simply an object attached to an in core inode which allows an
+ * fsnotify listener to indicate they are either no longer interested in events
+ * of a type matching mask or only interested in those events.
+ *
+ * these are flushed when an inode is evicted from core and may be flushed
+ * when the inode is modified (as seen by fsnotify_access). Some fsnotify users
+ * (such as dnotify) will flush these when the open fd is closed and not at
+ * inode eviction or modification.
+ */
+struct fsnotify_mark {
+ __u32 mask; /* mask this mark is for */
+ /* we hold ref for each i_list and g_list. also one ref for each 'thing'
+ * in kernel that found and may be using this mark. */
+ atomic_t refcnt; /* active things looking at this mark */
+ struct fsnotify_group *group; /* group this mark is for */
+ struct list_head g_list; /* list of marks by group->i_fsnotify_marks
+ * Also reused for queueing mark into
+ * destroy_list when it's waiting for
+ * the end of SRCU period before it can
+ * be freed */
+ spinlock_t lock; /* protect group and inode */
+ struct hlist_node obj_list; /* list of marks for inode / vfsmount */
+ struct list_head free_list; /* tmp list used when freeing this mark */
+ union {
+ struct inode *inode; /* inode this mark is associated with */
+ struct vfsmount *mnt; /* vfsmount this mark is associated with */
+ };
+ __u32 ignored_mask; /* events types to ignore */
+#define FSNOTIFY_MARK_FLAG_INODE 0x01
+#define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02
+#define FSNOTIFY_MARK_FLAG_OBJECT_PINNED 0x04
+#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08
+#define FSNOTIFY_MARK_FLAG_ALIVE 0x10
+ unsigned int flags; /* vfsmount or inode mark? */
+ void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */
+};
+
+#ifdef CONFIG_FSNOTIFY
+
+/* called from the vfs helpers */
+
+/* main fsnotify call to send events */
+extern int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
+ const unsigned char *name, u32 cookie);
+extern int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask);
+extern void __fsnotify_inode_delete(struct inode *inode);
+extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
+extern u32 fsnotify_get_cookie(void);
+
+static inline int fsnotify_inode_watches_children(struct inode *inode)
+{
+ /* FS_EVENT_ON_CHILD is set if the inode may care */
+ if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD))
+ return 0;
+ /* this inode might care about child events, does it care about the
+ * specific set of events that can happen on a child? */
+ return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD;
+}
+
+/*
+ * Update the dentry with a flag indicating the interest of its parent to receive
+ * filesystem events when those events happens to this dentry->d_inode.
+ */
+static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
+{
+ struct dentry *parent;
+
+ assert_spin_locked(&dentry->d_lock);
+
+ /*
+ * Serialisation of setting PARENT_WATCHED on the dentries is provided
+ * by d_lock. If inotify_inode_watched changes after we have taken
+ * d_lock, the following __fsnotify_update_child_dentry_flags call will
+ * find our entry, so it will spin until we complete here, and update
+ * us with the new state.
+ */
+ parent = dentry->d_parent;
+ if (parent->d_inode && fsnotify_inode_watches_children(parent->d_inode))
+ dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
+ else
+ dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
+}
+
+/*
+ * fsnotify_d_instantiate - instantiate a dentry for inode
+ */
+static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
+{
+ if (!inode)
+ return;
+
+ spin_lock(&dentry->d_lock);
+ __fsnotify_update_dcache_flags(dentry);
+ spin_unlock(&dentry->d_lock);
+}
+
+/* called from fsnotify listeners, such as fanotify or dnotify */
+
+/* create a new group */
+extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops);
+/* get reference to a group */
+extern void fsnotify_get_group(struct fsnotify_group *group);
+/* drop reference on a group from fsnotify_alloc_group */
+extern void fsnotify_put_group(struct fsnotify_group *group);
+/* destroy group */
+extern void fsnotify_destroy_group(struct fsnotify_group *group);
+/* fasync handler function */
+extern int fsnotify_fasync(int fd, struct file *file, int on);
+/* Free event from memory */
+extern void fsnotify_destroy_event(struct fsnotify_group *group,
+ struct fsnotify_event *event);
+/* attach the event to the group notification queue */
+extern int fsnotify_add_event(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ int (*merge)(struct list_head *,
+ struct fsnotify_event *));
+/* Remove passed event from groups notification queue */
+extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
+/* true if the group notification queue is empty */
+extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
+/* return, but do not dequeue the first event on the notification queue */
+extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group);
+/* return AND dequeue the first event on the notification queue */
+extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group);
+
+/* functions used to manipulate the marks attached to inodes */
+
+/* run all marks associated with a vfsmount and update mnt->mnt_fsnotify_mask */
+extern void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt);
+/* run all marks associated with an inode and update inode->i_fsnotify_mask */
+extern void fsnotify_recalc_inode_mask(struct inode *inode);
+extern void fsnotify_init_mark(struct fsnotify_mark *mark, void (*free_mark)(struct fsnotify_mark *mark));
+/* find (and take a reference) to a mark associated with group and inode */
+extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode);
+/* find (and take a reference) to a mark associated with group and vfsmount */
+extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt);
+/* copy the values from old into new */
+extern void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old);
+/* set the ignored_mask of a mark */
+extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask);
+/* set the mask of a mark (might pin the object into memory */
+extern void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask);
+/* attach the mark to both the group and the inode */
+extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
+ struct inode *inode, struct vfsmount *mnt, int allow_dups);
+extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct fsnotify_group *group,
+ struct inode *inode, struct vfsmount *mnt, int allow_dups);
+/* given a group and a mark, flag mark to be freed when all references are dropped */
+extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group);
+extern void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
+ struct fsnotify_group *group);
+/* run all the marks in a group, and clear all of the vfsmount marks */
+extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group);
+/* run all the marks in a group, and clear all of the inode marks */
+extern void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group);
+/* run all the marks in a group, and clear all of the marks where mark->flags & flags is true*/
+extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags);
+/* run all the marks in a group, and flag them to be freed */
+extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
+extern void fsnotify_get_mark(struct fsnotify_mark *mark);
+extern void fsnotify_put_mark(struct fsnotify_mark *mark);
+extern void fsnotify_unmount_inodes(struct list_head *list);
+
+/* put here because inotify does some weird stuff when destroying watches */
+extern void fsnotify_init_event(struct fsnotify_event *event,
+ struct inode *to_tell, u32 mask);
+
+#else
+
+static inline int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
+ const unsigned char *name, u32 cookie)
+{
+ return 0;
+}
+
+static inline int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
+{
+ return 0;
+}
+
+static inline void __fsnotify_inode_delete(struct inode *inode)
+{}
+
+static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
+{}
+
+static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
+{}
+
+static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
+{}
+
+static inline u32 fsnotify_get_cookie(void)
+{
+ return 0;
+}
+
+static inline void fsnotify_unmount_inodes(struct list_head *list)
+{}
+
+#endif /* CONFIG_FSNOTIFY */
+
+#endif /* __KERNEL __ */
+
+#endif /* __LINUX_FSNOTIFY_BACKEND_H */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
new file mode 100644
index 000000000..1da602982
--- /dev/null
+++ b/include/linux/ftrace.h
@@ -0,0 +1,905 @@
+/*
+ * Ftrace header. For implementation details beyond the random comments
+ * scattered below, see: Documentation/trace/ftrace-design.txt
+ */
+
+#ifndef _LINUX_FTRACE_H
+#define _LINUX_FTRACE_H
+
+#include <linux/trace_clock.h>
+#include <linux/kallsyms.h>
+#include <linux/linkage.h>
+#include <linux/bitops.h>
+#include <linux/ptrace.h>
+#include <linux/ktime.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+
+#include <asm/ftrace.h>
+
+/*
+ * If the arch supports passing the variable contents of
+ * function_trace_op as the third parameter back from the
+ * mcount call, then the arch should define this as 1.
+ */
+#ifndef ARCH_SUPPORTS_FTRACE_OPS
+#define ARCH_SUPPORTS_FTRACE_OPS 0
+#endif
+
+/*
+ * If the arch's mcount caller does not support all of ftrace's
+ * features, then it must call an indirect function that
+ * does. Or at least does enough to prevent any unwelcomed side effects.
+ */
+#if !ARCH_SUPPORTS_FTRACE_OPS
+# define FTRACE_FORCE_LIST_FUNC 1
+#else
+# define FTRACE_FORCE_LIST_FUNC 0
+#endif
+
+/* Main tracing buffer and events set up */
+#ifdef CONFIG_TRACING
+void trace_init(void);
+#else
+static inline void trace_init(void) { }
+#endif
+
+struct module;
+struct ftrace_hash;
+
+#ifdef CONFIG_FUNCTION_TRACER
+
+extern int ftrace_enabled;
+extern int
+ftrace_enable_sysctl(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
+struct ftrace_ops;
+
+typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *regs);
+
+ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
+
+/*
+ * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
+ * set in the flags member.
+ * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
+ * IPMODIFY are a kind of attribute flags which can be set only before
+ * registering the ftrace_ops, and can not be modified while registered.
+ * Changing those attribute flags after regsitering ftrace_ops will
+ * cause unexpected results.
+ *
+ * ENABLED - set/unset when ftrace_ops is registered/unregistered
+ * DYNAMIC - set when ftrace_ops is registered to denote dynamically
+ * allocated ftrace_ops which need special care
+ * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
+ * could be controled by following calls:
+ * ftrace_function_local_enable
+ * ftrace_function_local_disable
+ * SAVE_REGS - The ftrace_ops wants regs saved at each function called
+ * and passed to the callback. If this flag is set, but the
+ * architecture does not support passing regs
+ * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
+ * ftrace_ops will fail to register, unless the next flag
+ * is set.
+ * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
+ * handler can handle an arch that does not save regs
+ * (the handler tests if regs == NULL), then it can set
+ * this flag instead. It will not fail registering the ftrace_ops
+ * but, the regs field will be NULL if the arch does not support
+ * passing regs to the handler.
+ * Note, if this flag is set, the SAVE_REGS flag will automatically
+ * get set upon registering the ftrace_ops, if the arch supports it.
+ * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
+ * that the call back has its own recursion protection. If it does
+ * not set this, then the ftrace infrastructure will add recursion
+ * protection for the caller.
+ * STUB - The ftrace_ops is just a place holder.
+ * INITIALIZED - The ftrace_ops has already been initialized (first use time
+ * register_ftrace_function() is called, it will initialized the ops)
+ * DELETED - The ops are being deleted, do not let them be registered again.
+ * ADDING - The ops is in the process of being added.
+ * REMOVING - The ops is in the process of being removed.
+ * MODIFYING - The ops is in the process of changing its filter functions.
+ * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
+ * The arch specific code sets this flag when it allocated a
+ * trampoline. This lets the arch know that it can update the
+ * trampoline in case the callback function changes.
+ * The ftrace_ops trampoline can be set by the ftrace users, and
+ * in such cases the arch must not modify it. Only the arch ftrace
+ * core code should set this flag.
+ * IPMODIFY - The ops can modify the IP register. This can only be set with
+ * SAVE_REGS. If another ops with this flag set is already registered
+ * for any of the functions that this ops will be registered for, then
+ * this ops will fail to register or set_filter_ip.
+ */
+enum {
+ FTRACE_OPS_FL_ENABLED = 1 << 0,
+ FTRACE_OPS_FL_DYNAMIC = 1 << 1,
+ FTRACE_OPS_FL_CONTROL = 1 << 2,
+ FTRACE_OPS_FL_SAVE_REGS = 1 << 3,
+ FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4,
+ FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5,
+ FTRACE_OPS_FL_STUB = 1 << 6,
+ FTRACE_OPS_FL_INITIALIZED = 1 << 7,
+ FTRACE_OPS_FL_DELETED = 1 << 8,
+ FTRACE_OPS_FL_ADDING = 1 << 9,
+ FTRACE_OPS_FL_REMOVING = 1 << 10,
+ FTRACE_OPS_FL_MODIFYING = 1 << 11,
+ FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
+ FTRACE_OPS_FL_IPMODIFY = 1 << 13,
+};
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/* The hash used to know what functions callbacks trace */
+struct ftrace_ops_hash {
+ struct ftrace_hash *notrace_hash;
+ struct ftrace_hash *filter_hash;
+ struct mutex regex_lock;
+};
+#endif
+
+/*
+ * Note, ftrace_ops can be referenced outside of RCU protection.
+ * (Although, for perf, the control ops prevent that). If ftrace_ops is
+ * allocated and not part of kernel core data, the unregistering of it will
+ * perform a scheduling on all CPUs to make sure that there are no more users.
+ * Depending on the load of the system that may take a bit of time.
+ *
+ * Any private data added must also take care not to be freed and if private
+ * data is added to a ftrace_ops that is in core code, the user of the
+ * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
+ */
+struct ftrace_ops {
+ ftrace_func_t func;
+ struct ftrace_ops *next;
+ unsigned long flags;
+ void *private;
+ int __percpu *disabled;
+#ifdef CONFIG_DYNAMIC_FTRACE
+ int nr_trampolines;
+ struct ftrace_ops_hash local_hash;
+ struct ftrace_ops_hash *func_hash;
+ struct ftrace_ops_hash old_hash;
+ unsigned long trampoline;
+ unsigned long trampoline_size;
+#endif
+};
+
+/*
+ * Type of the current tracing.
+ */
+enum ftrace_tracing_type_t {
+ FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
+ FTRACE_TYPE_RETURN, /* Hook the return of the function */
+};
+
+/* Current tracing type, default is FTRACE_TYPE_ENTER */
+extern enum ftrace_tracing_type_t ftrace_tracing_type;
+
+/*
+ * The ftrace_ops must be a static and should also
+ * be read_mostly. These functions do modify read_mostly variables
+ * so use them sparely. Never free an ftrace_op or modify the
+ * next pointer after it has been registered. Even after unregistering
+ * it, the next pointer may still be used internally.
+ */
+int register_ftrace_function(struct ftrace_ops *ops);
+int unregister_ftrace_function(struct ftrace_ops *ops);
+void clear_ftrace_function(void);
+
+/**
+ * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
+ *
+ * This function enables tracing on current cpu by decreasing
+ * the per cpu control variable.
+ * It must be called with preemption disabled and only on ftrace_ops
+ * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
+ * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
+ */
+static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
+{
+ if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
+ return;
+
+ (*this_cpu_ptr(ops->disabled))--;
+}
+
+/**
+ * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
+ *
+ * This function enables tracing on current cpu by decreasing
+ * the per cpu control variable.
+ * It must be called with preemption disabled and only on ftrace_ops
+ * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
+ * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
+ */
+static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
+{
+ if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
+ return;
+
+ (*this_cpu_ptr(ops->disabled))++;
+}
+
+/**
+ * ftrace_function_local_disabled - returns ftrace_ops disabled value
+ * on current cpu
+ *
+ * This function returns value of ftrace_ops::disabled on current cpu.
+ * It must be called with preemption disabled and only on ftrace_ops
+ * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
+ * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
+ */
+static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
+{
+ WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
+ return *this_cpu_ptr(ops->disabled);
+}
+
+extern void ftrace_stub(unsigned long a0, unsigned long a1,
+ struct ftrace_ops *op, struct pt_regs *regs);
+
+#else /* !CONFIG_FUNCTION_TRACER */
+/*
+ * (un)register_ftrace_function must be a macro since the ops parameter
+ * must not be evaluated.
+ */
+#define register_ftrace_function(ops) ({ 0; })
+#define unregister_ftrace_function(ops) ({ 0; })
+static inline int ftrace_nr_registered_ops(void)
+{
+ return 0;
+}
+static inline void clear_ftrace_function(void) { }
+static inline void ftrace_kill(void) { }
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#ifdef CONFIG_STACK_TRACER
+extern int stack_tracer_enabled;
+int
+stack_trace_sysctl(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+#endif
+
+struct ftrace_func_command {
+ struct list_head list;
+ char *name;
+ int (*func)(struct ftrace_hash *hash,
+ char *func, char *cmd,
+ char *params, int enable);
+};
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+int ftrace_arch_code_modify_prepare(void);
+int ftrace_arch_code_modify_post_process(void);
+
+struct dyn_ftrace;
+
+void ftrace_bug(int err, struct dyn_ftrace *rec);
+
+struct seq_file;
+
+struct ftrace_probe_ops {
+ void (*func)(unsigned long ip,
+ unsigned long parent_ip,
+ void **data);
+ int (*init)(struct ftrace_probe_ops *ops,
+ unsigned long ip, void **data);
+ void (*free)(struct ftrace_probe_ops *ops,
+ unsigned long ip, void **data);
+ int (*print)(struct seq_file *m,
+ unsigned long ip,
+ struct ftrace_probe_ops *ops,
+ void *data);
+};
+
+extern int
+register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+ void *data);
+extern void
+unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+ void *data);
+extern void
+unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
+extern void unregister_ftrace_function_probe_all(char *glob);
+
+extern int ftrace_text_reserved(const void *start, const void *end);
+
+extern int ftrace_nr_registered_ops(void);
+
+bool is_ftrace_trampoline(unsigned long addr);
+
+/*
+ * The dyn_ftrace record's flags field is split into two parts.
+ * the first part which is '0-FTRACE_REF_MAX' is a counter of
+ * the number of callbacks that have registered the function that
+ * the dyn_ftrace descriptor represents.
+ *
+ * The second part is a mask:
+ * ENABLED - the function is being traced
+ * REGS - the record wants the function to save regs
+ * REGS_EN - the function is set up to save regs.
+ * IPMODIFY - the record allows for the IP address to be changed.
+ *
+ * When a new ftrace_ops is registered and wants a function to save
+ * pt_regs, the rec->flag REGS is set. When the function has been
+ * set up to save regs, the REG_EN flag is set. Once a function
+ * starts saving regs it will do so until all ftrace_ops are removed
+ * from tracing that function.
+ */
+enum {
+ FTRACE_FL_ENABLED = (1UL << 31),
+ FTRACE_FL_REGS = (1UL << 30),
+ FTRACE_FL_REGS_EN = (1UL << 29),
+ FTRACE_FL_TRAMP = (1UL << 28),
+ FTRACE_FL_TRAMP_EN = (1UL << 27),
+ FTRACE_FL_IPMODIFY = (1UL << 26),
+};
+
+#define FTRACE_REF_MAX_SHIFT 26
+#define FTRACE_FL_BITS 6
+#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
+#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
+#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
+
+#define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK)
+
+struct dyn_ftrace {
+ unsigned long ip; /* address of mcount call-site */
+ unsigned long flags;
+ struct dyn_arch_ftrace arch;
+};
+
+int ftrace_force_update(void);
+int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
+ int remove, int reset);
+int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset);
+int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
+ int len, int reset);
+void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
+void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
+void ftrace_free_filter(struct ftrace_ops *ops);
+
+int register_ftrace_command(struct ftrace_func_command *cmd);
+int unregister_ftrace_command(struct ftrace_func_command *cmd);
+
+enum {
+ FTRACE_UPDATE_CALLS = (1 << 0),
+ FTRACE_DISABLE_CALLS = (1 << 1),
+ FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
+ FTRACE_START_FUNC_RET = (1 << 3),
+ FTRACE_STOP_FUNC_RET = (1 << 4),
+};
+
+/*
+ * The FTRACE_UPDATE_* enum is used to pass information back
+ * from the ftrace_update_record() and ftrace_test_record()
+ * functions. These are called by the code update routines
+ * to find out what is to be done for a given function.
+ *
+ * IGNORE - The function is already what we want it to be
+ * MAKE_CALL - Start tracing the function
+ * MODIFY_CALL - Stop saving regs for the function
+ * MAKE_NOP - Stop tracing the function
+ */
+enum {
+ FTRACE_UPDATE_IGNORE,
+ FTRACE_UPDATE_MAKE_CALL,
+ FTRACE_UPDATE_MODIFY_CALL,
+ FTRACE_UPDATE_MAKE_NOP,
+};
+
+enum {
+ FTRACE_ITER_FILTER = (1 << 0),
+ FTRACE_ITER_NOTRACE = (1 << 1),
+ FTRACE_ITER_PRINTALL = (1 << 2),
+ FTRACE_ITER_DO_HASH = (1 << 3),
+ FTRACE_ITER_HASH = (1 << 4),
+ FTRACE_ITER_ENABLED = (1 << 5),
+};
+
+void arch_ftrace_update_code(int command);
+
+struct ftrace_rec_iter;
+
+struct ftrace_rec_iter *ftrace_rec_iter_start(void);
+struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
+struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
+
+#define for_ftrace_rec_iter(iter) \
+ for (iter = ftrace_rec_iter_start(); \
+ iter; \
+ iter = ftrace_rec_iter_next(iter))
+
+
+int ftrace_update_record(struct dyn_ftrace *rec, int enable);
+int ftrace_test_record(struct dyn_ftrace *rec, int enable);
+void ftrace_run_stop_machine(int command);
+unsigned long ftrace_location(unsigned long ip);
+unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
+unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
+
+extern ftrace_func_t ftrace_trace_function;
+
+int ftrace_regex_open(struct ftrace_ops *ops, int flag,
+ struct inode *inode, struct file *file);
+ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
+ size_t cnt, loff_t *ppos);
+ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
+ size_t cnt, loff_t *ppos);
+int ftrace_regex_release(struct inode *inode, struct file *file);
+
+void __init
+ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
+
+/* defined in arch */
+extern int ftrace_ip_converted(unsigned long ip);
+extern int ftrace_dyn_arch_init(void);
+extern void ftrace_replace_code(int enable);
+extern int ftrace_update_ftrace_func(ftrace_func_t func);
+extern void ftrace_caller(void);
+extern void ftrace_regs_caller(void);
+extern void ftrace_call(void);
+extern void ftrace_regs_call(void);
+extern void mcount_call(void);
+
+void ftrace_modify_all_code(int command);
+
+#ifndef FTRACE_ADDR
+#define FTRACE_ADDR ((unsigned long)ftrace_caller)
+#endif
+
+#ifndef FTRACE_GRAPH_ADDR
+#define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
+#endif
+
+#ifndef FTRACE_REGS_ADDR
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
+#else
+# define FTRACE_REGS_ADDR FTRACE_ADDR
+#endif
+#endif
+
+/*
+ * If an arch would like functions that are only traced
+ * by the function graph tracer to jump directly to its own
+ * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
+ * to be that address to jump to.
+ */
+#ifndef FTRACE_GRAPH_TRAMP_ADDR
+#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
+#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+extern void ftrace_graph_caller(void);
+extern int ftrace_enable_ftrace_graph_caller(void);
+extern int ftrace_disable_ftrace_graph_caller(void);
+#else
+static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
+static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
+#endif
+
+/**
+ * ftrace_make_nop - convert code into nop
+ * @mod: module structure if called by module load initialization
+ * @rec: the mcount call site record
+ * @addr: the address that the call site should be calling
+ *
+ * This is a very sensitive operation and great care needs
+ * to be taken by the arch. The operation should carefully
+ * read the location, check to see if what is read is indeed
+ * what we expect it to be, and then on success of the compare,
+ * it should write to the location.
+ *
+ * The code segment at @rec->ip should be a caller to @addr
+ *
+ * Return must be:
+ * 0 on success
+ * -EFAULT on error reading the location
+ * -EINVAL on a failed compare of the contents
+ * -EPERM on error writing to the location
+ * Any other value will be considered a failure.
+ */
+extern int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr);
+
+/**
+ * ftrace_make_call - convert a nop call site into a call to addr
+ * @rec: the mcount call site record
+ * @addr: the address that the call site should call
+ *
+ * This is a very sensitive operation and great care needs
+ * to be taken by the arch. The operation should carefully
+ * read the location, check to see if what is read is indeed
+ * what we expect it to be, and then on success of the compare,
+ * it should write to the location.
+ *
+ * The code segment at @rec->ip should be a nop
+ *
+ * Return must be:
+ * 0 on success
+ * -EFAULT on error reading the location
+ * -EINVAL on a failed compare of the contents
+ * -EPERM on error writing to the location
+ * Any other value will be considered a failure.
+ */
+extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+/**
+ * ftrace_modify_call - convert from one addr to another (no nop)
+ * @rec: the mcount call site record
+ * @old_addr: the address expected to be currently called to
+ * @addr: the address to change to
+ *
+ * This is a very sensitive operation and great care needs
+ * to be taken by the arch. The operation should carefully
+ * read the location, check to see if what is read is indeed
+ * what we expect it to be, and then on success of the compare,
+ * it should write to the location.
+ *
+ * The code segment at @rec->ip should be a caller to @old_addr
+ *
+ * Return must be:
+ * 0 on success
+ * -EFAULT on error reading the location
+ * -EINVAL on a failed compare of the contents
+ * -EPERM on error writing to the location
+ * Any other value will be considered a failure.
+ */
+extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr);
+#else
+/* Should never be called */
+static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
+{
+ return -EINVAL;
+}
+#endif
+
+/* May be defined in arch */
+extern int ftrace_arch_read_dyn_info(char *buf, int size);
+
+extern int skip_trace(unsigned long ip);
+extern void ftrace_module_init(struct module *mod);
+
+extern void ftrace_disable_daemon(void);
+extern void ftrace_enable_daemon(void);
+#else /* CONFIG_DYNAMIC_FTRACE */
+static inline int skip_trace(unsigned long ip) { return 0; }
+static inline int ftrace_force_update(void) { return 0; }
+static inline void ftrace_disable_daemon(void) { }
+static inline void ftrace_enable_daemon(void) { }
+static inline void ftrace_release_mod(struct module *mod) {}
+static inline void ftrace_module_init(struct module *mod) {}
+static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
+{
+ return -EINVAL;
+}
+static inline __init int unregister_ftrace_command(char *cmd_name)
+{
+ return -EINVAL;
+}
+static inline int ftrace_text_reserved(const void *start, const void *end)
+{
+ return 0;
+}
+static inline unsigned long ftrace_location(unsigned long ip)
+{
+ return 0;
+}
+
+/*
+ * Again users of functions that have ftrace_ops may not
+ * have them defined when ftrace is not enabled, but these
+ * functions may still be called. Use a macro instead of inline.
+ */
+#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
+#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
+#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
+#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
+#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
+#define ftrace_free_filter(ops) do { } while (0)
+
+static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
+ size_t cnt, loff_t *ppos) { return -ENODEV; }
+static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
+ size_t cnt, loff_t *ppos) { return -ENODEV; }
+static inline int
+ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
+
+static inline bool is_ftrace_trampoline(unsigned long addr)
+{
+ return false;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+/* totally disable ftrace - can not re-enable after this */
+void ftrace_kill(void);
+
+static inline void tracer_disable(void)
+{
+#ifdef CONFIG_FUNCTION_TRACER
+ ftrace_enabled = 0;
+#endif
+}
+
+/*
+ * Ftrace disable/restore without lock. Some synchronization mechanism
+ * must be used to prevent ftrace_enabled to be changed between
+ * disable/restore.
+ */
+static inline int __ftrace_enabled_save(void)
+{
+#ifdef CONFIG_FUNCTION_TRACER
+ int saved_ftrace_enabled = ftrace_enabled;
+ ftrace_enabled = 0;
+ return saved_ftrace_enabled;
+#else
+ return 0;
+#endif
+}
+
+static inline void __ftrace_enabled_restore(int enabled)
+{
+#ifdef CONFIG_FUNCTION_TRACER
+ ftrace_enabled = enabled;
+#endif
+}
+
+/* All archs should have this, but we define it for consistency */
+#ifndef ftrace_return_address0
+# define ftrace_return_address0 __builtin_return_address(0)
+#endif
+
+/* Archs may use other ways for ADDR1 and beyond */
+#ifndef ftrace_return_address
+# ifdef CONFIG_FRAME_POINTER
+# define ftrace_return_address(n) __builtin_return_address(n)
+# else
+# define ftrace_return_address(n) 0UL
+# endif
+#endif
+
+#define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
+#define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
+#define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
+#define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
+#define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
+#define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
+#define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
+
+#ifdef CONFIG_IRQSOFF_TRACER
+ extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
+ extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
+#else
+ static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
+ static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
+#endif
+
+#ifdef CONFIG_PREEMPT_TRACER
+ extern void trace_preempt_on(unsigned long a0, unsigned long a1);
+ extern void trace_preempt_off(unsigned long a0, unsigned long a1);
+#else
+/*
+ * Use defines instead of static inlines because some arches will make code out
+ * of the CALLER_ADDR, when we really want these to be a real nop.
+ */
+# define trace_preempt_on(a0, a1) do { } while (0)
+# define trace_preempt_off(a0, a1) do { } while (0)
+#endif
+
+#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+extern void ftrace_init(void);
+#else
+static inline void ftrace_init(void) { }
+#endif
+
+/*
+ * Structure that defines an entry function trace.
+ */
+struct ftrace_graph_ent {
+ unsigned long func; /* Current function */
+ int depth;
+};
+
+/*
+ * Structure that defines a return function trace.
+ */
+struct ftrace_graph_ret {
+ unsigned long func; /* Current function */
+ unsigned long long calltime;
+ unsigned long long rettime;
+ /* Number of functions that overran the depth limit for current task */
+ unsigned long overrun;
+ int depth;
+};
+
+/* Type of the callback handlers for tracing function graph*/
+typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
+typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+/* for init task */
+#define INIT_FTRACE_GRAPH .ret_stack = NULL,
+
+/*
+ * Stack of return addresses for functions
+ * of a thread.
+ * Used in struct thread_info
+ */
+struct ftrace_ret_stack {
+ unsigned long ret;
+ unsigned long func;
+ unsigned long long calltime;
+ unsigned long long subtime;
+ unsigned long fp;
+};
+
+/*
+ * Primary handler of a function return.
+ * It relays on ftrace_return_to_handler.
+ * Defined in entry_32/64.S
+ */
+extern void return_to_handler(void);
+
+extern int
+ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
+ unsigned long frame_pointer);
+
+/*
+ * Sometimes we don't want to trace a function with the function
+ * graph tracer but we want them to keep traced by the usual function
+ * tracer if the function graph tracer is not configured.
+ */
+#define __notrace_funcgraph notrace
+
+/*
+ * We want to which function is an entrypoint of a hardirq.
+ * That will help us to put a signal on output.
+ */
+#define __irq_entry __attribute__((__section__(".irqentry.text")))
+
+/* Limits of hardirq entrypoints */
+extern char __irqentry_text_start[];
+extern char __irqentry_text_end[];
+
+#define FTRACE_NOTRACE_DEPTH 65536
+#define FTRACE_RETFUNC_DEPTH 50
+#define FTRACE_RETSTACK_ALLOC_SIZE 32
+extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+ trace_func_graph_ent_t entryfunc);
+
+extern bool ftrace_graph_is_dead(void);
+extern void ftrace_graph_stop(void);
+
+/* The current handlers in use */
+extern trace_func_graph_ret_t ftrace_graph_return;
+extern trace_func_graph_ent_t ftrace_graph_entry;
+
+extern void unregister_ftrace_graph(void);
+
+extern void ftrace_graph_init_task(struct task_struct *t);
+extern void ftrace_graph_exit_task(struct task_struct *t);
+extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
+
+static inline int task_curr_ret_stack(struct task_struct *t)
+{
+ return t->curr_ret_stack;
+}
+
+static inline void pause_graph_tracing(void)
+{
+ atomic_inc(&current->tracing_graph_pause);
+}
+
+static inline void unpause_graph_tracing(void)
+{
+ atomic_dec(&current->tracing_graph_pause);
+}
+#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
+
+#define __notrace_funcgraph
+#define __irq_entry
+#define INIT_FTRACE_GRAPH
+
+static inline void ftrace_graph_init_task(struct task_struct *t) { }
+static inline void ftrace_graph_exit_task(struct task_struct *t) { }
+static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
+
+static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
+ trace_func_graph_ent_t entryfunc)
+{
+ return -1;
+}
+static inline void unregister_ftrace_graph(void) { }
+
+static inline int task_curr_ret_stack(struct task_struct *tsk)
+{
+ return -1;
+}
+
+static inline void pause_graph_tracing(void) { }
+static inline void unpause_graph_tracing(void) { }
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_TRACING
+
+/* flags for current->trace */
+enum {
+ TSK_TRACE_FL_TRACE_BIT = 0,
+ TSK_TRACE_FL_GRAPH_BIT = 1,
+};
+enum {
+ TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
+ TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
+};
+
+static inline void set_tsk_trace_trace(struct task_struct *tsk)
+{
+ set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
+}
+
+static inline void clear_tsk_trace_trace(struct task_struct *tsk)
+{
+ clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
+}
+
+static inline int test_tsk_trace_trace(struct task_struct *tsk)
+{
+ return tsk->trace & TSK_TRACE_FL_TRACE;
+}
+
+static inline void set_tsk_trace_graph(struct task_struct *tsk)
+{
+ set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
+}
+
+static inline void clear_tsk_trace_graph(struct task_struct *tsk)
+{
+ clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
+}
+
+static inline int test_tsk_trace_graph(struct task_struct *tsk)
+{
+ return tsk->trace & TSK_TRACE_FL_GRAPH;
+}
+
+enum ftrace_dump_mode;
+
+extern enum ftrace_dump_mode ftrace_dump_on_oops;
+extern int tracepoint_printk;
+
+extern void disable_trace_on_warning(void);
+extern int __disable_trace_on_warning;
+
+#ifdef CONFIG_PREEMPT
+#define INIT_TRACE_RECURSION .trace_recursion = 0,
+#endif
+
+#else /* CONFIG_TRACING */
+static inline void disable_trace_on_warning(void) { }
+#endif /* CONFIG_TRACING */
+
+#ifndef INIT_TRACE_RECURSION
+#define INIT_TRACE_RECURSION
+#endif
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+
+unsigned long arch_syscall_addr(int nr);
+
+#endif /* CONFIG_FTRACE_SYSCALLS */
+
+#endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
new file mode 100644
index 000000000..f9ecf63d4
--- /dev/null
+++ b/include/linux/ftrace_event.h
@@ -0,0 +1,627 @@
+
+#ifndef _LINUX_FTRACE_EVENT_H
+#define _LINUX_FTRACE_EVENT_H
+
+#include <linux/ring_buffer.h>
+#include <linux/trace_seq.h>
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
+#include <linux/perf_event.h>
+#include <linux/tracepoint.h>
+
+struct trace_array;
+struct trace_buffer;
+struct tracer;
+struct dentry;
+struct bpf_prog;
+
+struct trace_print_flags {
+ unsigned long mask;
+ const char *name;
+};
+
+struct trace_print_flags_u64 {
+ unsigned long long mask;
+ const char *name;
+};
+
+const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
+ unsigned long flags,
+ const struct trace_print_flags *flag_array);
+
+const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
+ const struct trace_print_flags *symbol_array);
+
+#if BITS_PER_LONG == 32
+const char *ftrace_print_symbols_seq_u64(struct trace_seq *p,
+ unsigned long long val,
+ const struct trace_print_flags_u64
+ *symbol_array);
+#endif
+
+const char *ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
+ unsigned int bitmask_size);
+
+const char *ftrace_print_hex_seq(struct trace_seq *p,
+ const unsigned char *buf, int len);
+
+const char *ftrace_print_array_seq(struct trace_seq *p,
+ const void *buf, int count,
+ size_t el_size);
+
+struct trace_iterator;
+struct trace_event;
+
+int ftrace_raw_output_prep(struct trace_iterator *iter,
+ struct trace_event *event);
+
+/*
+ * The trace entry - the most basic unit of tracing. This is what
+ * is printed in the end as a single line in the trace output, such as:
+ *
+ * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
+ */
+struct trace_entry {
+ unsigned short type;
+ unsigned char flags;
+ unsigned char preempt_count;
+ int pid;
+};
+
+#define FTRACE_MAX_EVENT \
+ ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
+
+/*
+ * Trace iterator - used by printout routines who present trace
+ * results to users and which routines might sleep, etc:
+ */
+struct trace_iterator {
+ struct trace_array *tr;
+ struct tracer *trace;
+ struct trace_buffer *trace_buffer;
+ void *private;
+ int cpu_file;
+ struct mutex mutex;
+ struct ring_buffer_iter **buffer_iter;
+ unsigned long iter_flags;
+
+ /* trace_seq for __print_flags() and __print_symbolic() etc. */
+ struct trace_seq tmp_seq;
+
+ cpumask_var_t started;
+
+ /* it's true when current open file is snapshot */
+ bool snapshot;
+
+ /* The below is zeroed out in pipe_read */
+ struct trace_seq seq;
+ struct trace_entry *ent;
+ unsigned long lost_events;
+ int leftover;
+ int ent_size;
+ int cpu;
+ u64 ts;
+
+ loff_t pos;
+ long idx;
+
+ /* All new field here will be zeroed out in pipe_read */
+};
+
+enum trace_iter_flags {
+ TRACE_FILE_LAT_FMT = 1,
+ TRACE_FILE_ANNOTATE = 2,
+ TRACE_FILE_TIME_IN_NS = 4,
+};
+
+
+typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
+ int flags, struct trace_event *event);
+
+struct trace_event_functions {
+ trace_print_func trace;
+ trace_print_func raw;
+ trace_print_func hex;
+ trace_print_func binary;
+};
+
+struct trace_event {
+ struct hlist_node node;
+ struct list_head list;
+ int type;
+ struct trace_event_functions *funcs;
+};
+
+extern int register_ftrace_event(struct trace_event *event);
+extern int unregister_ftrace_event(struct trace_event *event);
+
+/* Return values for print_line callback */
+enum print_line_t {
+ TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
+ TRACE_TYPE_HANDLED = 1,
+ TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
+ TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
+};
+
+/*
+ * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
+ * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
+ * simplifies those functions and keeps them in sync.
+ */
+static inline enum print_line_t trace_handle_return(struct trace_seq *s)
+{
+ return trace_seq_has_overflowed(s) ?
+ TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
+}
+
+void tracing_generic_entry_update(struct trace_entry *entry,
+ unsigned long flags,
+ int pc);
+struct ftrace_event_file;
+
+struct ring_buffer_event *
+trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
+ struct ftrace_event_file *ftrace_file,
+ int type, unsigned long len,
+ unsigned long flags, int pc);
+struct ring_buffer_event *
+trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
+ int type, unsigned long len,
+ unsigned long flags, int pc);
+void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc);
+void trace_buffer_unlock_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc);
+void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ unsigned long flags, int pc,
+ struct pt_regs *regs);
+void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event);
+
+void tracing_record_cmdline(struct task_struct *tsk);
+
+int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
+
+struct event_filter;
+
+enum trace_reg {
+ TRACE_REG_REGISTER,
+ TRACE_REG_UNREGISTER,
+#ifdef CONFIG_PERF_EVENTS
+ TRACE_REG_PERF_REGISTER,
+ TRACE_REG_PERF_UNREGISTER,
+ TRACE_REG_PERF_OPEN,
+ TRACE_REG_PERF_CLOSE,
+ TRACE_REG_PERF_ADD,
+ TRACE_REG_PERF_DEL,
+#endif
+};
+
+struct ftrace_event_call;
+
+struct ftrace_event_class {
+ const char *system;
+ void *probe;
+#ifdef CONFIG_PERF_EVENTS
+ void *perf_probe;
+#endif
+ int (*reg)(struct ftrace_event_call *event,
+ enum trace_reg type, void *data);
+ int (*define_fields)(struct ftrace_event_call *);
+ struct list_head *(*get_fields)(struct ftrace_event_call *);
+ struct list_head fields;
+ int (*raw_init)(struct ftrace_event_call *);
+};
+
+extern int ftrace_event_reg(struct ftrace_event_call *event,
+ enum trace_reg type, void *data);
+
+int ftrace_output_event(struct trace_iterator *iter, struct ftrace_event_call *event,
+ char *fmt, ...);
+
+int ftrace_event_define_field(struct ftrace_event_call *call,
+ char *type, int len, char *item, int offset,
+ int field_size, int sign, int filter);
+
+struct ftrace_event_buffer {
+ struct ring_buffer *buffer;
+ struct ring_buffer_event *event;
+ struct ftrace_event_file *ftrace_file;
+ void *entry;
+ unsigned long flags;
+ int pc;
+};
+
+void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer,
+ struct ftrace_event_file *ftrace_file,
+ unsigned long len);
+
+void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer);
+
+int ftrace_event_define_field(struct ftrace_event_call *call,
+ char *type, int len, char *item, int offset,
+ int field_size, int sign, int filter);
+
+enum {
+ TRACE_EVENT_FL_FILTERED_BIT,
+ TRACE_EVENT_FL_CAP_ANY_BIT,
+ TRACE_EVENT_FL_NO_SET_FILTER_BIT,
+ TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
+ TRACE_EVENT_FL_WAS_ENABLED_BIT,
+ TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
+ TRACE_EVENT_FL_TRACEPOINT_BIT,
+ TRACE_EVENT_FL_KPROBE_BIT,
+};
+
+/*
+ * Event flags:
+ * FILTERED - The event has a filter attached
+ * CAP_ANY - Any user can enable for perf
+ * NO_SET_FILTER - Set when filter has error and is to be ignored
+ * IGNORE_ENABLE - For ftrace internal events, do not enable with debugfs file
+ * WAS_ENABLED - Set and stays set when an event was ever enabled
+ * (used for module unloading, if a module event is enabled,
+ * it is best to clear the buffers that used it).
+ * USE_CALL_FILTER - For ftrace internal events, don't use file filter
+ * TRACEPOINT - Event is a tracepoint
+ * KPROBE - Event is a kprobe
+ */
+enum {
+ TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
+ TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
+ TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
+ TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
+ TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
+ TRACE_EVENT_FL_USE_CALL_FILTER = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
+ TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
+ TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
+};
+
+struct ftrace_event_call {
+ struct list_head list;
+ struct ftrace_event_class *class;
+ union {
+ char *name;
+ /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
+ struct tracepoint *tp;
+ };
+ struct trace_event event;
+ char *print_fmt;
+ struct event_filter *filter;
+ void *mod;
+ void *data;
+ /*
+ * bit 0: filter_active
+ * bit 1: allow trace by non root (cap any)
+ * bit 2: failed to apply filter
+ * bit 3: ftrace internal event (do not enable)
+ * bit 4: Event was enabled by module
+ * bit 5: use call filter rather than file filter
+ * bit 6: Event is a tracepoint
+ */
+ int flags; /* static flags of different events */
+
+#ifdef CONFIG_PERF_EVENTS
+ int perf_refcount;
+ struct hlist_head __percpu *perf_events;
+ struct bpf_prog *prog;
+
+ int (*perf_perm)(struct ftrace_event_call *,
+ struct perf_event *);
+#endif
+};
+
+static inline const char *
+ftrace_event_name(struct ftrace_event_call *call)
+{
+ if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
+ return call->tp ? call->tp->name : NULL;
+ else
+ return call->name;
+}
+
+struct trace_array;
+struct ftrace_subsystem_dir;
+
+enum {
+ FTRACE_EVENT_FL_ENABLED_BIT,
+ FTRACE_EVENT_FL_RECORDED_CMD_BIT,
+ FTRACE_EVENT_FL_FILTERED_BIT,
+ FTRACE_EVENT_FL_NO_SET_FILTER_BIT,
+ FTRACE_EVENT_FL_SOFT_MODE_BIT,
+ FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
+ FTRACE_EVENT_FL_TRIGGER_MODE_BIT,
+ FTRACE_EVENT_FL_TRIGGER_COND_BIT,
+};
+
+/*
+ * Ftrace event file flags:
+ * ENABLED - The event is enabled
+ * RECORDED_CMD - The comms should be recorded at sched_switch
+ * FILTERED - The event has a filter attached
+ * NO_SET_FILTER - Set when filter has error and is to be ignored
+ * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
+ * SOFT_DISABLED - When set, do not trace the event (even though its
+ * tracepoint may be enabled)
+ * TRIGGER_MODE - When set, invoke the triggers associated with the event
+ * TRIGGER_COND - When set, one or more triggers has an associated filter
+ */
+enum {
+ FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT),
+ FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT),
+ FTRACE_EVENT_FL_FILTERED = (1 << FTRACE_EVENT_FL_FILTERED_BIT),
+ FTRACE_EVENT_FL_NO_SET_FILTER = (1 << FTRACE_EVENT_FL_NO_SET_FILTER_BIT),
+ FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT),
+ FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT),
+ FTRACE_EVENT_FL_TRIGGER_MODE = (1 << FTRACE_EVENT_FL_TRIGGER_MODE_BIT),
+ FTRACE_EVENT_FL_TRIGGER_COND = (1 << FTRACE_EVENT_FL_TRIGGER_COND_BIT),
+};
+
+struct ftrace_event_file {
+ struct list_head list;
+ struct ftrace_event_call *event_call;
+ struct event_filter *filter;
+ struct dentry *dir;
+ struct trace_array *tr;
+ struct ftrace_subsystem_dir *system;
+ struct list_head triggers;
+
+ /*
+ * 32 bit flags:
+ * bit 0: enabled
+ * bit 1: enabled cmd record
+ * bit 2: enable/disable with the soft disable bit
+ * bit 3: soft disabled
+ * bit 4: trigger enabled
+ *
+ * Note: The bits must be set atomically to prevent races
+ * from other writers. Reads of flags do not need to be in
+ * sync as they occur in critical sections. But the way flags
+ * is currently used, these changes do not affect the code
+ * except that when a change is made, it may have a slight
+ * delay in propagating the changes to other CPUs due to
+ * caching and such. Which is mostly OK ;-)
+ */
+ unsigned long flags;
+ atomic_t sm_ref; /* soft-mode reference counter */
+ atomic_t tm_ref; /* trigger-mode reference counter */
+};
+
+#define __TRACE_EVENT_FLAGS(name, value) \
+ static int __init trace_init_flags_##name(void) \
+ { \
+ event_##name.flags |= value; \
+ return 0; \
+ } \
+ early_initcall(trace_init_flags_##name);
+
+#define __TRACE_EVENT_PERF_PERM(name, expr...) \
+ static int perf_perm_##name(struct ftrace_event_call *tp_event, \
+ struct perf_event *p_event) \
+ { \
+ return ({ expr; }); \
+ } \
+ static int __init trace_init_perf_perm_##name(void) \
+ { \
+ event_##name.perf_perm = &perf_perm_##name; \
+ return 0; \
+ } \
+ early_initcall(trace_init_perf_perm_##name);
+
+#define PERF_MAX_TRACE_SIZE 2048
+
+#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
+
+enum event_trigger_type {
+ ETT_NONE = (0),
+ ETT_TRACE_ONOFF = (1 << 0),
+ ETT_SNAPSHOT = (1 << 1),
+ ETT_STACKTRACE = (1 << 2),
+ ETT_EVENT_ENABLE = (1 << 3),
+};
+
+extern int filter_match_preds(struct event_filter *filter, void *rec);
+
+extern int filter_check_discard(struct ftrace_event_file *file, void *rec,
+ struct ring_buffer *buffer,
+ struct ring_buffer_event *event);
+extern int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
+ struct ring_buffer *buffer,
+ struct ring_buffer_event *event);
+extern enum event_trigger_type event_triggers_call(struct ftrace_event_file *file,
+ void *rec);
+extern void event_triggers_post_call(struct ftrace_event_file *file,
+ enum event_trigger_type tt);
+
+/**
+ * ftrace_trigger_soft_disabled - do triggers and test if soft disabled
+ * @file: The file pointer of the event to test
+ *
+ * If any triggers without filters are attached to this event, they
+ * will be called here. If the event is soft disabled and has no
+ * triggers that require testing the fields, it will return true,
+ * otherwise false.
+ */
+static inline bool
+ftrace_trigger_soft_disabled(struct ftrace_event_file *file)
+{
+ unsigned long eflags = file->flags;
+
+ if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
+ if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
+ event_triggers_call(file, NULL);
+ if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Helper function for event_trigger_unlock_commit{_regs}().
+ * If there are event triggers attached to this event that requires
+ * filtering against its fields, then they wil be called as the
+ * entry already holds the field information of the current event.
+ *
+ * It also checks if the event should be discarded or not.
+ * It is to be discarded if the event is soft disabled and the
+ * event was only recorded to process triggers, or if the event
+ * filter is active and this event did not match the filters.
+ *
+ * Returns true if the event is discarded, false otherwise.
+ */
+static inline bool
+__event_trigger_test_discard(struct ftrace_event_file *file,
+ struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ void *entry,
+ enum event_trigger_type *tt)
+{
+ unsigned long eflags = file->flags;
+
+ if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
+ *tt = event_triggers_call(file, entry);
+
+ if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags))
+ ring_buffer_discard_commit(buffer, event);
+ else if (!filter_check_discard(file, entry, buffer, event))
+ return false;
+
+ return true;
+}
+
+/**
+ * event_trigger_unlock_commit - handle triggers and finish event commit
+ * @file: The file pointer assoctiated to the event
+ * @buffer: The ring buffer that the event is being written to
+ * @event: The event meta data in the ring buffer
+ * @entry: The event itself
+ * @irq_flags: The state of the interrupts at the start of the event
+ * @pc: The state of the preempt count at the start of the event.
+ *
+ * This is a helper function to handle triggers that require data
+ * from the event itself. It also tests the event against filters and
+ * if the event is soft disabled and should be discarded.
+ */
+static inline void
+event_trigger_unlock_commit(struct ftrace_event_file *file,
+ struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ void *entry, unsigned long irq_flags, int pc)
+{
+ enum event_trigger_type tt = ETT_NONE;
+
+ if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
+ trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
+
+ if (tt)
+ event_triggers_post_call(file, tt);
+}
+
+/**
+ * event_trigger_unlock_commit_regs - handle triggers and finish event commit
+ * @file: The file pointer assoctiated to the event
+ * @buffer: The ring buffer that the event is being written to
+ * @event: The event meta data in the ring buffer
+ * @entry: The event itself
+ * @irq_flags: The state of the interrupts at the start of the event
+ * @pc: The state of the preempt count at the start of the event.
+ *
+ * This is a helper function to handle triggers that require data
+ * from the event itself. It also tests the event against filters and
+ * if the event is soft disabled and should be discarded.
+ *
+ * Same as event_trigger_unlock_commit() but calls
+ * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit().
+ */
+static inline void
+event_trigger_unlock_commit_regs(struct ftrace_event_file *file,
+ struct ring_buffer *buffer,
+ struct ring_buffer_event *event,
+ void *entry, unsigned long irq_flags, int pc,
+ struct pt_regs *regs)
+{
+ enum event_trigger_type tt = ETT_NONE;
+
+ if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
+ trace_buffer_unlock_commit_regs(buffer, event,
+ irq_flags, pc, regs);
+
+ if (tt)
+ event_triggers_post_call(file, tt);
+}
+
+#ifdef CONFIG_BPF_SYSCALL
+unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx);
+#else
+static inline unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
+{
+ return 1;
+}
+#endif
+
+enum {
+ FILTER_OTHER = 0,
+ FILTER_STATIC_STRING,
+ FILTER_DYN_STRING,
+ FILTER_PTR_STRING,
+ FILTER_TRACE_FN,
+};
+
+extern int trace_event_raw_init(struct ftrace_event_call *call);
+extern int trace_define_field(struct ftrace_event_call *call, const char *type,
+ const char *name, int offset, int size,
+ int is_signed, int filter_type);
+extern int trace_add_event_call(struct ftrace_event_call *call);
+extern int trace_remove_event_call(struct ftrace_event_call *call);
+
+#define is_signed_type(type) (((type)(-1)) < (type)1)
+
+int trace_set_clr_event(const char *system, const char *event, int set);
+
+/*
+ * The double __builtin_constant_p is because gcc will give us an error
+ * if we try to allocate the static variable to fmt if it is not a
+ * constant. Even with the outer if statement optimizing out.
+ */
+#define event_trace_printk(ip, fmt, args...) \
+do { \
+ __trace_printk_check_format(fmt, ##args); \
+ tracing_record_cmdline(current); \
+ if (__builtin_constant_p(fmt)) { \
+ static const char *trace_printk_fmt \
+ __attribute__((section("__trace_printk_fmt"))) = \
+ __builtin_constant_p(fmt) ? fmt : NULL; \
+ \
+ __trace_bprintk(ip, trace_printk_fmt, ##args); \
+ } else \
+ __trace_printk(ip, fmt, ##args); \
+} while (0)
+
+#ifdef CONFIG_PERF_EVENTS
+struct perf_event;
+
+DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
+
+extern int perf_trace_init(struct perf_event *event);
+extern void perf_trace_destroy(struct perf_event *event);
+extern int perf_trace_add(struct perf_event *event, int flags);
+extern void perf_trace_del(struct perf_event *event, int flags);
+extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
+ char *filter_str);
+extern void ftrace_profile_free_filter(struct perf_event *event);
+extern void *perf_trace_buf_prepare(int size, unsigned short type,
+ struct pt_regs **regs, int *rctxp);
+
+static inline void
+perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
+ u64 count, struct pt_regs *regs, void *head,
+ struct task_struct *task)
+{
+ perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task);
+}
+#endif
+
+#endif /* _LINUX_FTRACE_EVENT_H */
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
new file mode 100644
index 000000000..dca7bf8cf
--- /dev/null
+++ b/include/linux/ftrace_irq.h
@@ -0,0 +1,13 @@
+#ifndef _LINUX_FTRACE_IRQ_H
+#define _LINUX_FTRACE_IRQ_H
+
+
+#ifdef CONFIG_FTRACE_NMI_ENTER
+extern void ftrace_nmi_enter(void);
+extern void ftrace_nmi_exit(void);
+#else
+static inline void ftrace_nmi_enter(void) { }
+static inline void ftrace_nmi_exit(void) { }
+#endif
+
+#endif /* _LINUX_FTRACE_IRQ_H */
diff --git a/include/linux/futex.h b/include/linux/futex.h
new file mode 100644
index 000000000..6435f46d6
--- /dev/null
+++ b/include/linux/futex.h
@@ -0,0 +1,71 @@
+#ifndef _LINUX_FUTEX_H
+#define _LINUX_FUTEX_H
+
+#include <uapi/linux/futex.h>
+
+struct inode;
+struct mm_struct;
+struct task_struct;
+union ktime;
+
+long do_futex(u32 __user *uaddr, int op, u32 val, union ktime *timeout,
+ u32 __user *uaddr2, u32 val2, u32 val3);
+
+extern int
+handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
+
+/*
+ * Futexes are matched on equal values of this key.
+ * The key type depends on whether it's a shared or private mapping.
+ * Don't rearrange members without looking at hash_futex().
+ *
+ * offset is aligned to a multiple of sizeof(u32) (== 4) by definition.
+ * We use the two low order bits of offset to tell what is the kind of key :
+ * 00 : Private process futex (PTHREAD_PROCESS_PRIVATE)
+ * (no reference on an inode or mm)
+ * 01 : Shared futex (PTHREAD_PROCESS_SHARED)
+ * mapped on a file (reference on the underlying inode)
+ * 10 : Shared futex (PTHREAD_PROCESS_SHARED)
+ * (but private mapping on an mm, and reference taken on it)
+*/
+
+#define FUT_OFF_INODE 1 /* We set bit 0 if key has a reference on inode */
+#define FUT_OFF_MMSHARED 2 /* We set bit 1 if key has a reference on mm */
+
+union futex_key {
+ struct {
+ unsigned long pgoff;
+ struct inode *inode;
+ int offset;
+ } shared;
+ struct {
+ unsigned long address;
+ struct mm_struct *mm;
+ int offset;
+ } private;
+ struct {
+ unsigned long word;
+ void *ptr;
+ int offset;
+ } both;
+};
+
+#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
+
+#ifdef CONFIG_FUTEX
+extern void exit_robust_list(struct task_struct *curr);
+extern void exit_pi_state_list(struct task_struct *curr);
+#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
+#define futex_cmpxchg_enabled 1
+#else
+extern int futex_cmpxchg_enabled;
+#endif
+#else
+static inline void exit_robust_list(struct task_struct *curr)
+{
+}
+static inline void exit_pi_state_list(struct task_struct *curr)
+{
+}
+#endif
+#endif
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
new file mode 100644
index 000000000..0408545bc
--- /dev/null
+++ b/include/linux/fwnode.h
@@ -0,0 +1,27 @@
+/*
+ * fwnode.h - Firmware device node object handle type definition.
+ *
+ * Copyright (C) 2015, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_FWNODE_H_
+#define _LINUX_FWNODE_H_
+
+enum fwnode_type {
+ FWNODE_INVALID = 0,
+ FWNODE_OF,
+ FWNODE_ACPI,
+ FWNODE_PDATA,
+};
+
+struct fwnode_handle {
+ enum fwnode_type type;
+ struct fwnode_handle *secondary;
+};
+
+#endif
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
new file mode 100644
index 000000000..bb7de09e8
--- /dev/null
+++ b/include/linux/gameport.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 1999-2002 Vojtech Pavlik
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef _GAMEPORT_H
+#define _GAMEPORT_H
+
+#include <asm/io.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <uapi/linux/gameport.h>
+
+struct gameport {
+
+ void *port_data; /* Private pointer for gameport drivers */
+ char name[32];
+ char phys[32];
+
+ int io;
+ int speed;
+ int fuzz;
+
+ void (*trigger)(struct gameport *);
+ unsigned char (*read)(struct gameport *);
+ int (*cooked_read)(struct gameport *, int *, int *);
+ int (*calibrate)(struct gameport *, int *, int *);
+ int (*open)(struct gameport *, int);
+ void (*close)(struct gameport *);
+
+ struct timer_list poll_timer;
+ unsigned int poll_interval; /* in msecs */
+ spinlock_t timer_lock;
+ unsigned int poll_cnt;
+ void (*poll_handler)(struct gameport *);
+
+ struct gameport *parent, *child;
+
+ struct gameport_driver *drv;
+ struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */
+
+ struct device dev;
+
+ struct list_head node;
+};
+#define to_gameport_port(d) container_of(d, struct gameport, dev)
+
+struct gameport_driver {
+ const char *description;
+
+ int (*connect)(struct gameport *, struct gameport_driver *drv);
+ int (*reconnect)(struct gameport *);
+ void (*disconnect)(struct gameport *);
+
+ struct device_driver driver;
+
+ bool ignore;
+};
+#define to_gameport_driver(d) container_of(d, struct gameport_driver, driver)
+
+int gameport_open(struct gameport *gameport, struct gameport_driver *drv, int mode);
+void gameport_close(struct gameport *gameport);
+
+#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+
+void __gameport_register_port(struct gameport *gameport, struct module *owner);
+/* use a define to avoid include chaining to get THIS_MODULE */
+#define gameport_register_port(gameport) \
+ __gameport_register_port(gameport, THIS_MODULE)
+
+void gameport_unregister_port(struct gameport *gameport);
+
+__printf(2, 3)
+void gameport_set_phys(struct gameport *gameport, const char *fmt, ...);
+
+#else
+
+static inline void gameport_register_port(struct gameport *gameport)
+{
+ return;
+}
+
+static inline void gameport_unregister_port(struct gameport *gameport)
+{
+ return;
+}
+
+static inline __printf(2, 3)
+void gameport_set_phys(struct gameport *gameport, const char *fmt, ...)
+{
+ return;
+}
+
+#endif
+
+static inline struct gameport *gameport_allocate_port(void)
+{
+ struct gameport *gameport = kzalloc(sizeof(struct gameport), GFP_KERNEL);
+
+ return gameport;
+}
+
+static inline void gameport_free_port(struct gameport *gameport)
+{
+ kfree(gameport);
+}
+
+static inline void gameport_set_name(struct gameport *gameport, const char *name)
+{
+ strlcpy(gameport->name, name, sizeof(gameport->name));
+}
+
+/*
+ * Use the following functions to manipulate gameport's per-port
+ * driver-specific data.
+ */
+static inline void *gameport_get_drvdata(struct gameport *gameport)
+{
+ return dev_get_drvdata(&gameport->dev);
+}
+
+static inline void gameport_set_drvdata(struct gameport *gameport, void *data)
+{
+ dev_set_drvdata(&gameport->dev, data);
+}
+
+/*
+ * Use the following functions to pin gameport's driver in process context
+ */
+static inline int gameport_pin_driver(struct gameport *gameport)
+{
+ return mutex_lock_interruptible(&gameport->drv_mutex);
+}
+
+static inline void gameport_unpin_driver(struct gameport *gameport)
+{
+ mutex_unlock(&gameport->drv_mutex);
+}
+
+int __must_check __gameport_register_driver(struct gameport_driver *drv,
+ struct module *owner, const char *mod_name);
+
+/* use a define to avoid include chaining to get THIS_MODULE & friends */
+#define gameport_register_driver(drv) \
+ __gameport_register_driver(drv, THIS_MODULE, KBUILD_MODNAME)
+
+void gameport_unregister_driver(struct gameport_driver *drv);
+
+/**
+ * module_gameport_driver() - Helper macro for registering a gameport driver
+ * @__gameport_driver: gameport_driver struct
+ *
+ * Helper macro for gameport drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module may
+ * only use this macro once, and calling it replaces module_init() and
+ * module_exit().
+ */
+#define module_gameport_driver(__gameport_driver) \
+ module_driver(__gameport_driver, gameport_register_driver, \
+ gameport_unregister_driver)
+
+
+static inline void gameport_trigger(struct gameport *gameport)
+{
+ if (gameport->trigger)
+ gameport->trigger(gameport);
+ else
+ outb(0xff, gameport->io);
+}
+
+static inline unsigned char gameport_read(struct gameport *gameport)
+{
+ if (gameport->read)
+ return gameport->read(gameport);
+ else
+ return inb(gameport->io);
+}
+
+static inline int gameport_cooked_read(struct gameport *gameport, int *axes, int *buttons)
+{
+ if (gameport->cooked_read)
+ return gameport->cooked_read(gameport, axes, buttons);
+ else
+ return -1;
+}
+
+static inline int gameport_calibrate(struct gameport *gameport, int *axes, int *max)
+{
+ if (gameport->calibrate)
+ return gameport->calibrate(gameport, axes, max);
+ else
+ return -1;
+}
+
+static inline int gameport_time(struct gameport *gameport, int time)
+{
+ return (time * gameport->speed) / 1000;
+}
+
+static inline void gameport_set_poll_handler(struct gameport *gameport, void (*handler)(struct gameport *))
+{
+ gameport->poll_handler = handler;
+}
+
+static inline void gameport_set_poll_interval(struct gameport *gameport, unsigned int msecs)
+{
+ gameport->poll_interval = msecs;
+}
+
+void gameport_start_polling(struct gameport *gameport);
+void gameport_stop_polling(struct gameport *gameport);
+
+#endif
diff --git a/include/linux/gcd.h b/include/linux/gcd.h
new file mode 100644
index 000000000..69f5e8a01
--- /dev/null
+++ b/include/linux/gcd.h
@@ -0,0 +1,8 @@
+#ifndef _GCD_H
+#define _GCD_H
+
+#include <linux/compiler.h>
+
+unsigned long gcd(unsigned long a, unsigned long b) __attribute_const__;
+
+#endif /* _GCD_H */
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
new file mode 100644
index 000000000..1ccaab44a
--- /dev/null
+++ b/include/linux/genalloc.h
@@ -0,0 +1,137 @@
+/*
+ * Basic general purpose allocator for managing special purpose
+ * memory, for example, memory that is not managed by the regular
+ * kmalloc/kfree interface. Uses for this includes on-device special
+ * memory, uncached memory etc.
+ *
+ * It is safe to use the allocator in NMI handlers and other special
+ * unblockable contexts that could otherwise deadlock on locks. This
+ * is implemented by using atomic operations and retries on any
+ * conflicts. The disadvantage is that there may be livelocks in
+ * extreme cases. For better scalability, one allocator can be used
+ * for each CPU.
+ *
+ * The lockless operation only works if there is enough memory
+ * available. If new memory is added to the pool a lock has to be
+ * still taken. So any user relying on locklessness has to ensure
+ * that sufficient memory is preallocated.
+ *
+ * The basic atomic operation of this allocator is cmpxchg on long.
+ * On architectures that don't have NMI-safe cmpxchg implementation,
+ * the allocator can NOT be used in NMI handler. So code uses the
+ * allocator in NMI handler should depend on
+ * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+
+#ifndef __GENALLOC_H__
+#define __GENALLOC_H__
+
+#include <linux/spinlock_types.h>
+
+struct device;
+struct device_node;
+
+/**
+ * Allocation callback function type definition
+ * @map: Pointer to bitmap
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: optional additional data used by @genpool_algo_t
+ */
+typedef unsigned long (*genpool_algo_t)(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ void *data);
+
+/*
+ * General purpose special memory pool descriptor.
+ */
+struct gen_pool {
+ spinlock_t lock;
+ struct list_head chunks; /* list of chunks in this pool */
+ int min_alloc_order; /* minimum allocation order */
+
+ genpool_algo_t algo; /* allocation function */
+ void *data;
+};
+
+/*
+ * General purpose special memory pool chunk descriptor.
+ */
+struct gen_pool_chunk {
+ struct list_head next_chunk; /* next chunk in pool */
+ atomic_t avail;
+ phys_addr_t phys_addr; /* physical starting address of memory chunk */
+ unsigned long start_addr; /* start address of memory chunk */
+ unsigned long end_addr; /* end address of memory chunk (inclusive) */
+ unsigned long bits[0]; /* bitmap for allocating memory chunk */
+};
+
+extern struct gen_pool *gen_pool_create(int, int);
+extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
+extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t,
+ size_t, int);
+/**
+ * gen_pool_add - add a new chunk of special memory to the pool
+ * @pool: pool to add new memory chunk to
+ * @addr: starting address of memory chunk to add to pool
+ * @size: size in bytes of the memory chunk to add to pool
+ * @nid: node id of the node the chunk structure and bitmap should be
+ * allocated on, or -1
+ *
+ * Add a new chunk of special memory to the specified pool.
+ *
+ * Returns 0 on success or a -ve errno on failure.
+ */
+static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
+ size_t size, int nid)
+{
+ return gen_pool_add_virt(pool, addr, -1, size, nid);
+}
+extern void gen_pool_destroy(struct gen_pool *);
+extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
+extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size,
+ dma_addr_t *dma);
+extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
+extern void gen_pool_for_each_chunk(struct gen_pool *,
+ void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *);
+extern size_t gen_pool_avail(struct gen_pool *);
+extern size_t gen_pool_size(struct gen_pool *);
+
+extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
+ void *data);
+
+extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr, void *data);
+
+extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
+ unsigned long size, unsigned long start, unsigned int nr,
+ void *data);
+
+extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr, void *data);
+
+extern struct gen_pool *devm_gen_pool_create(struct device *dev,
+ int min_alloc_order, int nid);
+extern struct gen_pool *dev_get_gen_pool(struct device *dev);
+
+bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
+ size_t size);
+
+#ifdef CONFIG_OF
+extern struct gen_pool *of_get_named_gen_pool(struct device_node *np,
+ const char *propname, int index);
+#else
+static inline struct gen_pool *of_get_named_gen_pool(struct device_node *np,
+ const char *propname, int index)
+{
+ return NULL;
+}
+#endif
+#endif /* __GENALLOC_H__ */
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
new file mode 100644
index 000000000..09460d6d6
--- /dev/null
+++ b/include/linux/genetlink.h
@@ -0,0 +1,42 @@
+#ifndef __LINUX_GENERIC_NETLINK_H
+#define __LINUX_GENERIC_NETLINK_H
+
+#include <uapi/linux/genetlink.h>
+
+
+/* All generic netlink requests are serialized by a global lock. */
+extern void genl_lock(void);
+extern void genl_unlock(void);
+#ifdef CONFIG_LOCKDEP
+extern int lockdep_genl_is_held(void);
+#endif
+
+/* for synchronisation between af_netlink and genetlink */
+extern atomic_t genl_sk_destructing_cnt;
+extern wait_queue_head_t genl_sk_destructing_waitq;
+
+/**
+ * rcu_dereference_genl - rcu_dereference with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
+ * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference()
+ */
+#define rcu_dereference_genl(p) \
+ rcu_dereference_check(p, lockdep_genl_is_held())
+
+/**
+ * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Return the value of the specified RCU-protected pointer, but omit
+ * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
+ * caller holds genl mutex.
+ */
+#define genl_dereference(p) \
+ rcu_dereference_protected(p, lockdep_genl_is_held())
+
+#define MODULE_ALIAS_GENL_FAMILY(family)\
+ MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family)
+
+#endif /* __LINUX_GENERIC_NETLINK_H */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
new file mode 100644
index 000000000..ec274e0f4
--- /dev/null
+++ b/include/linux/genhd.h
@@ -0,0 +1,733 @@
+#ifndef _LINUX_GENHD_H
+#define _LINUX_GENHD_H
+
+/*
+ * genhd.h Copyright (C) 1992 Drew Eckhardt
+ * Generic hard disk header file by
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ */
+
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+
+#ifdef CONFIG_BLOCK
+
+#define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev)
+#define dev_to_part(device) container_of((device), struct hd_struct, __dev)
+#define disk_to_dev(disk) (&(disk)->part0.__dev)
+#define part_to_dev(part) (&((part)->__dev))
+
+extern struct device_type part_type;
+extern struct kobject *block_depr;
+extern struct class block_class;
+
+enum {
+/* These three have identical behaviour; use the second one if DOS FDISK gets
+ confused about extended/logical partitions starting past cylinder 1023. */
+ DOS_EXTENDED_PARTITION = 5,
+ LINUX_EXTENDED_PARTITION = 0x85,
+ WIN98_EXTENDED_PARTITION = 0x0f,
+
+ SUN_WHOLE_DISK = DOS_EXTENDED_PARTITION,
+
+ LINUX_SWAP_PARTITION = 0x82,
+ LINUX_DATA_PARTITION = 0x83,
+ LINUX_LVM_PARTITION = 0x8e,
+ LINUX_RAID_PARTITION = 0xfd, /* autodetect RAID partition */
+
+ SOLARIS_X86_PARTITION = LINUX_SWAP_PARTITION,
+ NEW_SOLARIS_X86_PARTITION = 0xbf,
+
+ DM6_AUX1PARTITION = 0x51, /* no DDO: use xlated geom */
+ DM6_AUX3PARTITION = 0x53, /* no DDO: use xlated geom */
+ DM6_PARTITION = 0x54, /* has DDO: use xlated geom & offset */
+ EZD_PARTITION = 0x55, /* EZ-DRIVE */
+
+ FREEBSD_PARTITION = 0xa5, /* FreeBSD Partition ID */
+ OPENBSD_PARTITION = 0xa6, /* OpenBSD Partition ID */
+ NETBSD_PARTITION = 0xa9, /* NetBSD Partition ID */
+ BSDI_PARTITION = 0xb7, /* BSDI Partition ID */
+ MINIX_PARTITION = 0x81, /* Minix Partition ID */
+ UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */
+};
+
+#define DISK_MAX_PARTS 256
+#define DISK_NAME_LEN 32
+
+#include <linux/major.h>
+#include <linux/device.h>
+#include <linux/smp.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/workqueue.h>
+
+struct partition {
+ unsigned char boot_ind; /* 0x80 - active */
+ unsigned char head; /* starting head */
+ unsigned char sector; /* starting sector */
+ unsigned char cyl; /* starting cylinder */
+ unsigned char sys_ind; /* What partition type */
+ unsigned char end_head; /* end head */
+ unsigned char end_sector; /* end sector */
+ unsigned char end_cyl; /* end cylinder */
+ __le32 start_sect; /* starting sector counting from 0 */
+ __le32 nr_sects; /* nr of sectors in partition */
+} __attribute__((packed));
+
+struct disk_stats {
+ unsigned long sectors[2]; /* READs and WRITEs */
+ unsigned long ios[2];
+ unsigned long merges[2];
+ unsigned long ticks[2];
+ unsigned long io_ticks;
+ unsigned long time_in_queue;
+};
+
+#define PARTITION_META_INFO_VOLNAMELTH 64
+/*
+ * Enough for the string representation of any kind of UUID plus NULL.
+ * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
+ */
+#define PARTITION_META_INFO_UUIDLTH 37
+
+struct partition_meta_info {
+ char uuid[PARTITION_META_INFO_UUIDLTH];
+ u8 volname[PARTITION_META_INFO_VOLNAMELTH];
+};
+
+struct hd_struct {
+ sector_t start_sect;
+ /*
+ * nr_sects is protected by sequence counter. One might extend a
+ * partition while IO is happening to it and update of nr_sects
+ * can be non-atomic on 32bit machines with 64bit sector_t.
+ */
+ sector_t nr_sects;
+ seqcount_t nr_sects_seq;
+ sector_t alignment_offset;
+ unsigned int discard_alignment;
+ struct device __dev;
+ struct kobject *holder_dir;
+ int policy, partno;
+ struct partition_meta_info *info;
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+ int make_it_fail;
+#endif
+ unsigned long stamp;
+ atomic_t in_flight[2];
+#ifdef CONFIG_SMP
+ struct disk_stats __percpu *dkstats;
+#else
+ struct disk_stats dkstats;
+#endif
+ atomic_t ref;
+ struct rcu_head rcu_head;
+};
+
+#define GENHD_FL_REMOVABLE 1
+/* 2 is unused */
+#define GENHD_FL_MEDIA_CHANGE_NOTIFY 4
+#define GENHD_FL_CD 8
+#define GENHD_FL_UP 16
+#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
+#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */
+#define GENHD_FL_NATIVE_CAPACITY 128
+#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 256
+#define GENHD_FL_NO_PART_SCAN 512
+
+enum {
+ DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
+ DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
+};
+
+#define BLK_SCSI_MAX_CMDS (256)
+#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
+
+struct blk_scsi_cmd_filter {
+ unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
+ unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
+ struct kobject kobj;
+};
+
+struct disk_part_tbl {
+ struct rcu_head rcu_head;
+ int len;
+ struct hd_struct __rcu *last_lookup;
+ struct hd_struct __rcu *part[];
+};
+
+struct disk_events;
+
+struct gendisk {
+ /* major, first_minor and minors are input parameters only,
+ * don't use directly. Use disk_devt() and disk_max_parts().
+ */
+ int major; /* major number of driver */
+ int first_minor;
+ int minors; /* maximum number of minors, =1 for
+ * disks that can't be partitioned. */
+
+ char disk_name[DISK_NAME_LEN]; /* name of major driver */
+ char *(*devnode)(struct gendisk *gd, umode_t *mode);
+
+ unsigned int events; /* supported events */
+ unsigned int async_events; /* async events, subset of all */
+
+ /* Array of pointers to partitions indexed by partno.
+ * Protected with matching bdev lock but stat and other
+ * non-critical accesses use RCU. Always access through
+ * helpers.
+ */
+ struct disk_part_tbl __rcu *part_tbl;
+ struct hd_struct part0;
+
+ const struct block_device_operations *fops;
+ struct request_queue *queue;
+ void *private_data;
+
+ int flags;
+ struct device *driverfs_dev; // FIXME: remove
+ struct kobject *slave_dir;
+
+ struct timer_rand_state *random;
+ atomic_t sync_io; /* RAID */
+ struct disk_events *ev;
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ struct blk_integrity *integrity;
+#endif
+ int node_id;
+};
+
+static inline struct gendisk *part_to_disk(struct hd_struct *part)
+{
+ if (likely(part)) {
+ if (part->partno)
+ return dev_to_disk(part_to_dev(part)->parent);
+ else
+ return dev_to_disk(part_to_dev(part));
+ }
+ return NULL;
+}
+
+static inline void part_pack_uuid(const u8 *uuid_str, u8 *to)
+{
+ int i;
+ for (i = 0; i < 16; ++i) {
+ *to++ = (hex_to_bin(*uuid_str) << 4) |
+ (hex_to_bin(*(uuid_str + 1)));
+ uuid_str += 2;
+ switch (i) {
+ case 3:
+ case 5:
+ case 7:
+ case 9:
+ uuid_str++;
+ continue;
+ }
+ }
+}
+
+static inline int blk_part_pack_uuid(const u8 *uuid_str, u8 *to)
+{
+ part_pack_uuid(uuid_str, to);
+ return 0;
+}
+
+static inline int disk_max_parts(struct gendisk *disk)
+{
+ if (disk->flags & GENHD_FL_EXT_DEVT)
+ return DISK_MAX_PARTS;
+ return disk->minors;
+}
+
+static inline bool disk_part_scan_enabled(struct gendisk *disk)
+{
+ return disk_max_parts(disk) > 1 &&
+ !(disk->flags & GENHD_FL_NO_PART_SCAN);
+}
+
+static inline dev_t disk_devt(struct gendisk *disk)
+{
+ return disk_to_dev(disk)->devt;
+}
+
+static inline dev_t part_devt(struct hd_struct *part)
+{
+ return part_to_dev(part)->devt;
+}
+
+extern struct hd_struct *disk_get_part(struct gendisk *disk, int partno);
+
+static inline void disk_put_part(struct hd_struct *part)
+{
+ if (likely(part))
+ put_device(part_to_dev(part));
+}
+
+/*
+ * Smarter partition iterator without context limits.
+ */
+#define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */
+#define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */
+#define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */
+#define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */
+
+struct disk_part_iter {
+ struct gendisk *disk;
+ struct hd_struct *part;
+ int idx;
+ unsigned int flags;
+};
+
+extern void disk_part_iter_init(struct disk_part_iter *piter,
+ struct gendisk *disk, unsigned int flags);
+extern struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter);
+extern void disk_part_iter_exit(struct disk_part_iter *piter);
+
+extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
+ sector_t sector);
+
+/*
+ * Macros to operate on percpu disk statistics:
+ *
+ * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters
+ * and should be called between disk_stat_lock() and
+ * disk_stat_unlock().
+ *
+ * part_stat_read() can be called at any time.
+ *
+ * part_stat_{add|set_all}() and {init|free}_part_stats are for
+ * internal use only.
+ */
+#ifdef CONFIG_SMP
+#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); })
+#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0)
+
+#define __part_stat_add(cpu, part, field, addnd) \
+ (per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd))
+
+#define part_stat_read(part, field) \
+({ \
+ typeof((part)->dkstats->field) res = 0; \
+ unsigned int _cpu; \
+ for_each_possible_cpu(_cpu) \
+ res += per_cpu_ptr((part)->dkstats, _cpu)->field; \
+ res; \
+})
+
+static inline void part_stat_set_all(struct hd_struct *part, int value)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ memset(per_cpu_ptr(part->dkstats, i), value,
+ sizeof(struct disk_stats));
+}
+
+static inline int init_part_stats(struct hd_struct *part)
+{
+ part->dkstats = alloc_percpu(struct disk_stats);
+ if (!part->dkstats)
+ return 0;
+ return 1;
+}
+
+static inline void free_part_stats(struct hd_struct *part)
+{
+ free_percpu(part->dkstats);
+}
+
+#else /* !CONFIG_SMP */
+#define part_stat_lock() ({ rcu_read_lock(); 0; })
+#define part_stat_unlock() rcu_read_unlock()
+
+#define __part_stat_add(cpu, part, field, addnd) \
+ ((part)->dkstats.field += addnd)
+
+#define part_stat_read(part, field) ((part)->dkstats.field)
+
+static inline void part_stat_set_all(struct hd_struct *part, int value)
+{
+ memset(&part->dkstats, value, sizeof(struct disk_stats));
+}
+
+static inline int init_part_stats(struct hd_struct *part)
+{
+ return 1;
+}
+
+static inline void free_part_stats(struct hd_struct *part)
+{
+}
+
+#endif /* CONFIG_SMP */
+
+#define part_stat_add(cpu, part, field, addnd) do { \
+ __part_stat_add((cpu), (part), field, addnd); \
+ if ((part)->partno) \
+ __part_stat_add((cpu), &part_to_disk((part))->part0, \
+ field, addnd); \
+} while (0)
+
+#define part_stat_dec(cpu, gendiskp, field) \
+ part_stat_add(cpu, gendiskp, field, -1)
+#define part_stat_inc(cpu, gendiskp, field) \
+ part_stat_add(cpu, gendiskp, field, 1)
+#define part_stat_sub(cpu, gendiskp, field, subnd) \
+ part_stat_add(cpu, gendiskp, field, -subnd)
+
+static inline void part_inc_in_flight(struct hd_struct *part, int rw)
+{
+ atomic_inc(&part->in_flight[rw]);
+ if (part->partno)
+ atomic_inc(&part_to_disk(part)->part0.in_flight[rw]);
+}
+
+static inline void part_dec_in_flight(struct hd_struct *part, int rw)
+{
+ atomic_dec(&part->in_flight[rw]);
+ if (part->partno)
+ atomic_dec(&part_to_disk(part)->part0.in_flight[rw]);
+}
+
+static inline int part_in_flight(struct hd_struct *part)
+{
+ return atomic_read(&part->in_flight[0]) + atomic_read(&part->in_flight[1]);
+}
+
+static inline struct partition_meta_info *alloc_part_info(struct gendisk *disk)
+{
+ if (disk)
+ return kzalloc_node(sizeof(struct partition_meta_info),
+ GFP_KERNEL, disk->node_id);
+ return kzalloc(sizeof(struct partition_meta_info), GFP_KERNEL);
+}
+
+static inline void free_part_info(struct hd_struct *part)
+{
+ kfree(part->info);
+}
+
+/* block/blk-core.c */
+extern void part_round_stats(int cpu, struct hd_struct *part);
+
+/* block/genhd.c */
+extern void add_disk(struct gendisk *disk);
+extern void del_gendisk(struct gendisk *gp);
+extern struct gendisk *get_gendisk(dev_t dev, int *partno);
+extern struct block_device *bdget_disk(struct gendisk *disk, int partno);
+
+extern void set_device_ro(struct block_device *bdev, int flag);
+extern void set_disk_ro(struct gendisk *disk, int flag);
+
+static inline int get_disk_ro(struct gendisk *disk)
+{
+ return disk->part0.policy;
+}
+
+extern void disk_block_events(struct gendisk *disk);
+extern void disk_unblock_events(struct gendisk *disk);
+extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
+extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
+
+/* drivers/char/random.c */
+extern void add_disk_randomness(struct gendisk *disk);
+extern void rand_initialize_disk(struct gendisk *disk);
+
+static inline sector_t get_start_sect(struct block_device *bdev)
+{
+ return bdev->bd_part->start_sect;
+}
+static inline sector_t get_capacity(struct gendisk *disk)
+{
+ return disk->part0.nr_sects;
+}
+static inline void set_capacity(struct gendisk *disk, sector_t size)
+{
+ disk->part0.nr_sects = size;
+}
+
+#ifdef CONFIG_SOLARIS_X86_PARTITION
+
+#define SOLARIS_X86_NUMSLICE 16
+#define SOLARIS_X86_VTOC_SANE (0x600DDEEEUL)
+
+struct solaris_x86_slice {
+ __le16 s_tag; /* ID tag of partition */
+ __le16 s_flag; /* permission flags */
+ __le32 s_start; /* start sector no of partition */
+ __le32 s_size; /* # of blocks in partition */
+};
+
+struct solaris_x86_vtoc {
+ unsigned int v_bootinfo[3]; /* info needed by mboot (unsupported) */
+ __le32 v_sanity; /* to verify vtoc sanity */
+ __le32 v_version; /* layout version */
+ char v_volume[8]; /* volume name */
+ __le16 v_sectorsz; /* sector size in bytes */
+ __le16 v_nparts; /* number of partitions */
+ unsigned int v_reserved[10]; /* free space */
+ struct solaris_x86_slice
+ v_slice[SOLARIS_X86_NUMSLICE]; /* slice headers */
+ unsigned int timestamp[SOLARIS_X86_NUMSLICE]; /* timestamp (unsupported) */
+ char v_asciilabel[128]; /* for compatibility */
+};
+
+#endif /* CONFIG_SOLARIS_X86_PARTITION */
+
+#ifdef CONFIG_BSD_DISKLABEL
+/*
+ * BSD disklabel support by Yossi Gottlieb <yogo@math.tau.ac.il>
+ * updated by Marc Espie <Marc.Espie@openbsd.org>
+ */
+
+/* check against BSD src/sys/sys/disklabel.h for consistency */
+
+#define BSD_DISKMAGIC (0x82564557UL) /* The disk magic number */
+#define BSD_MAXPARTITIONS 16
+#define OPENBSD_MAXPARTITIONS 16
+#define BSD_FS_UNUSED 0 /* disklabel unused partition entry ID */
+struct bsd_disklabel {
+ __le32 d_magic; /* the magic number */
+ __s16 d_type; /* drive type */
+ __s16 d_subtype; /* controller/d_type specific */
+ char d_typename[16]; /* type name, e.g. "eagle" */
+ char d_packname[16]; /* pack identifier */
+ __u32 d_secsize; /* # of bytes per sector */
+ __u32 d_nsectors; /* # of data sectors per track */
+ __u32 d_ntracks; /* # of tracks per cylinder */
+ __u32 d_ncylinders; /* # of data cylinders per unit */
+ __u32 d_secpercyl; /* # of data sectors per cylinder */
+ __u32 d_secperunit; /* # of data sectors per unit */
+ __u16 d_sparespertrack; /* # of spare sectors per track */
+ __u16 d_sparespercyl; /* # of spare sectors per cylinder */
+ __u32 d_acylinders; /* # of alt. cylinders per unit */
+ __u16 d_rpm; /* rotational speed */
+ __u16 d_interleave; /* hardware sector interleave */
+ __u16 d_trackskew; /* sector 0 skew, per track */
+ __u16 d_cylskew; /* sector 0 skew, per cylinder */
+ __u32 d_headswitch; /* head switch time, usec */
+ __u32 d_trkseek; /* track-to-track seek, usec */
+ __u32 d_flags; /* generic flags */
+#define NDDATA 5
+ __u32 d_drivedata[NDDATA]; /* drive-type specific information */
+#define NSPARE 5
+ __u32 d_spare[NSPARE]; /* reserved for future use */
+ __le32 d_magic2; /* the magic number (again) */
+ __le16 d_checksum; /* xor of data incl. partitions */
+
+ /* filesystem and partition information: */
+ __le16 d_npartitions; /* number of partitions in following */
+ __le32 d_bbsize; /* size of boot area at sn0, bytes */
+ __le32 d_sbsize; /* max size of fs superblock, bytes */
+ struct bsd_partition { /* the partition table */
+ __le32 p_size; /* number of sectors in partition */
+ __le32 p_offset; /* starting sector */
+ __le32 p_fsize; /* filesystem basic fragment size */
+ __u8 p_fstype; /* filesystem type, see below */
+ __u8 p_frag; /* filesystem fragments per block */
+ __le16 p_cpg; /* filesystem cylinders per group */
+ } d_partitions[BSD_MAXPARTITIONS]; /* actually may be more */
+};
+
+#endif /* CONFIG_BSD_DISKLABEL */
+
+#ifdef CONFIG_UNIXWARE_DISKLABEL
+/*
+ * Unixware slices support by Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl>
+ * and Krzysztof G. Baranowski <kgb@knm.org.pl>
+ */
+
+#define UNIXWARE_DISKMAGIC (0xCA5E600DUL) /* The disk magic number */
+#define UNIXWARE_DISKMAGIC2 (0x600DDEEEUL) /* The slice table magic nr */
+#define UNIXWARE_NUMSLICE 16
+#define UNIXWARE_FS_UNUSED 0 /* Unused slice entry ID */
+
+struct unixware_slice {
+ __le16 s_label; /* label */
+ __le16 s_flags; /* permission flags */
+ __le32 start_sect; /* starting sector */
+ __le32 nr_sects; /* number of sectors in slice */
+};
+
+struct unixware_disklabel {
+ __le32 d_type; /* drive type */
+ __le32 d_magic; /* the magic number */
+ __le32 d_version; /* version number */
+ char d_serial[12]; /* serial number of the device */
+ __le32 d_ncylinders; /* # of data cylinders per device */
+ __le32 d_ntracks; /* # of tracks per cylinder */
+ __le32 d_nsectors; /* # of data sectors per track */
+ __le32 d_secsize; /* # of bytes per sector */
+ __le32 d_part_start; /* # of first sector of this partition */
+ __le32 d_unknown1[12]; /* ? */
+ __le32 d_alt_tbl; /* byte offset of alternate table */
+ __le32 d_alt_len; /* byte length of alternate table */
+ __le32 d_phys_cyl; /* # of physical cylinders per device */
+ __le32 d_phys_trk; /* # of physical tracks per cylinder */
+ __le32 d_phys_sec; /* # of physical sectors per track */
+ __le32 d_phys_bytes; /* # of physical bytes per sector */
+ __le32 d_unknown2; /* ? */
+ __le32 d_unknown3; /* ? */
+ __le32 d_pad[8]; /* pad */
+
+ struct unixware_vtoc {
+ __le32 v_magic; /* the magic number */
+ __le32 v_version; /* version number */
+ char v_name[8]; /* volume name */
+ __le16 v_nslices; /* # of slices */
+ __le16 v_unknown1; /* ? */
+ __le32 v_reserved[10]; /* reserved */
+ struct unixware_slice
+ v_slice[UNIXWARE_NUMSLICE]; /* slice headers */
+ } vtoc;
+
+}; /* 408 */
+
+#endif /* CONFIG_UNIXWARE_DISKLABEL */
+
+#ifdef CONFIG_MINIX_SUBPARTITION
+# define MINIX_NR_SUBPARTITIONS 4
+#endif /* CONFIG_MINIX_SUBPARTITION */
+
+#define ADDPART_FLAG_NONE 0
+#define ADDPART_FLAG_RAID 1
+#define ADDPART_FLAG_WHOLEDISK 2
+
+extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
+extern void blk_free_devt(dev_t devt);
+extern dev_t blk_lookup_devt(const char *name, int partno);
+extern char *disk_name (struct gendisk *hd, int partno, char *buf);
+
+extern int disk_expand_part_tbl(struct gendisk *disk, int target);
+extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
+extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev);
+extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
+ int partno, sector_t start,
+ sector_t len, int flags,
+ struct partition_meta_info
+ *info);
+extern void __delete_partition(struct hd_struct *);
+extern void delete_partition(struct gendisk *, int);
+extern void printk_all_partitions(void);
+
+extern struct gendisk *alloc_disk_node(int minors, int node_id);
+extern struct gendisk *alloc_disk(int minors);
+extern struct kobject *get_disk(struct gendisk *disk);
+extern void put_disk(struct gendisk *disk);
+extern void blk_register_region(dev_t devt, unsigned long range,
+ struct module *module,
+ struct kobject *(*probe)(dev_t, int *, void *),
+ int (*lock)(dev_t, void *),
+ void *data);
+extern void blk_unregister_region(dev_t devt, unsigned long range);
+
+extern ssize_t part_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t part_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t part_inflight_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+extern ssize_t part_fail_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t part_fail_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+#endif /* CONFIG_FAIL_MAKE_REQUEST */
+
+static inline void hd_ref_init(struct hd_struct *part)
+{
+ atomic_set(&part->ref, 1);
+ smp_mb();
+}
+
+static inline void hd_struct_get(struct hd_struct *part)
+{
+ atomic_inc(&part->ref);
+ smp_mb__after_atomic();
+}
+
+static inline int hd_struct_try_get(struct hd_struct *part)
+{
+ return atomic_inc_not_zero(&part->ref);
+}
+
+static inline void hd_struct_put(struct hd_struct *part)
+{
+ if (atomic_dec_and_test(&part->ref))
+ __delete_partition(part);
+}
+
+/*
+ * Any access of part->nr_sects which is not protected by partition
+ * bd_mutex or gendisk bdev bd_mutex, should be done using this
+ * accessor function.
+ *
+ * Code written along the lines of i_size_read() and i_size_write().
+ * CONFIG_PREEMPT case optimizes the case of UP kernel with preemption
+ * on.
+ */
+static inline sector_t part_nr_sects_read(struct hd_struct *part)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP)
+ sector_t nr_sects;
+ unsigned seq;
+ do {
+ seq = read_seqcount_begin(&part->nr_sects_seq);
+ nr_sects = part->nr_sects;
+ } while (read_seqcount_retry(&part->nr_sects_seq, seq));
+ return nr_sects;
+#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT)
+ sector_t nr_sects;
+
+ preempt_disable();
+ nr_sects = part->nr_sects;
+ preempt_enable();
+ return nr_sects;
+#else
+ return part->nr_sects;
+#endif
+}
+
+/*
+ * Should be called with mutex lock held (typically bd_mutex) of partition
+ * to provide mutual exlusion among writers otherwise seqcount might be
+ * left in wrong state leaving the readers spinning infinitely.
+ */
+static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP)
+ write_seqcount_begin(&part->nr_sects_seq);
+ part->nr_sects = size;
+ write_seqcount_end(&part->nr_sects_seq);
+#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT)
+ preempt_disable();
+ part->nr_sects = size;
+ preempt_enable();
+#else
+ part->nr_sects = size;
+#endif
+}
+
+#else /* CONFIG_BLOCK */
+
+static inline void printk_all_partitions(void) { }
+
+static inline dev_t blk_lookup_devt(const char *name, int partno)
+{
+ dev_t devt = MKDEV(0, 0);
+ return devt;
+}
+
+static inline int blk_part_pack_uuid(const u8 *uuid_str, u8 *to)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_BLOCK */
+
+#endif /* _LINUX_GENHD_H */
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
new file mode 100644
index 000000000..667c31101
--- /dev/null
+++ b/include/linux/genl_magic_func.h
@@ -0,0 +1,413 @@
+#ifndef GENL_MAGIC_FUNC_H
+#define GENL_MAGIC_FUNC_H
+
+#include <linux/genl_magic_struct.h>
+
+/*
+ * Magic: declare tla policy {{{1
+ * Magic: declare nested policies
+ * {{{2
+ */
+#undef GENL_mc_group
+#define GENL_mc_group(group)
+
+#undef GENL_notification
+#define GENL_notification(op_name, op_num, mcast_group, tla_list)
+
+#undef GENL_op
+#define GENL_op(op_name, op_num, handler, tla_list)
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+ [tag_name] = { .type = NLA_NESTED },
+
+static struct nla_policy CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy)[] = {
+#include GENL_MAGIC_INCLUDE_FILE
+};
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+static struct nla_policy s_name ## _nl_policy[] __read_mostly = \
+{ s_fields };
+
+#undef __field
+#define __field(attr_nr, attr_flag, name, nla_type, _type, __get, \
+ __put, __is_signed) \
+ [attr_nr] = { .type = nla_type },
+
+#undef __array
+#define __array(attr_nr, attr_flag, name, nla_type, _type, maxlen, \
+ __get, __put, __is_signed) \
+ [attr_nr] = { .type = nla_type, \
+ .len = maxlen - (nla_type == NLA_NUL_STRING) },
+
+#include GENL_MAGIC_INCLUDE_FILE
+
+#ifndef __KERNEL__
+#ifndef pr_info
+#define pr_info(args...) fprintf(stderr, args);
+#endif
+#endif
+
+#ifdef GENL_MAGIC_DEBUG
+static void dprint_field(const char *dir, int nla_type,
+ const char *name, void *valp)
+{
+ __u64 val = valp ? *(__u32 *)valp : 1;
+ switch (nla_type) {
+ case NLA_U8: val = (__u8)val;
+ case NLA_U16: val = (__u16)val;
+ case NLA_U32: val = (__u32)val;
+ pr_info("%s attr %s: %d 0x%08x\n", dir,
+ name, (int)val, (unsigned)val);
+ break;
+ case NLA_U64:
+ val = *(__u64*)valp;
+ pr_info("%s attr %s: %lld 0x%08llx\n", dir,
+ name, (long long)val, (unsigned long long)val);
+ break;
+ case NLA_FLAG:
+ if (val)
+ pr_info("%s attr %s: set\n", dir, name);
+ break;
+ }
+}
+
+static void dprint_array(const char *dir, int nla_type,
+ const char *name, const char *val, unsigned len)
+{
+ switch (nla_type) {
+ case NLA_NUL_STRING:
+ if (len && val[len-1] == '\0')
+ len--;
+ pr_info("%s attr %s: [len:%u] '%s'\n", dir, name, len, val);
+ break;
+ default:
+ /* we can always show 4 byte,
+ * thats what nlattr are aligned to. */
+ pr_info("%s attr %s: [len:%u] %02x%02x%02x%02x ...\n",
+ dir, name, len, val[0], val[1], val[2], val[3]);
+ }
+}
+
+#define DPRINT_TLA(a, op, b) pr_info("%s %s %s\n", a, op, b);
+
+/* Name is a member field name of the struct s.
+ * If s is NULL (only parsing, no copy requested in *_from_attrs()),
+ * nla is supposed to point to the attribute containing the information
+ * corresponding to that struct member. */
+#define DPRINT_FIELD(dir, nla_type, name, s, nla) \
+ do { \
+ if (s) \
+ dprint_field(dir, nla_type, #name, &s->name); \
+ else if (nla) \
+ dprint_field(dir, nla_type, #name, \
+ (nla_type == NLA_FLAG) ? NULL \
+ : nla_data(nla)); \
+ } while (0)
+
+#define DPRINT_ARRAY(dir, nla_type, name, s, nla) \
+ do { \
+ if (s) \
+ dprint_array(dir, nla_type, #name, \
+ s->name, s->name ## _len); \
+ else if (nla) \
+ dprint_array(dir, nla_type, #name, \
+ nla_data(nla), nla_len(nla)); \
+ } while (0)
+#else
+#define DPRINT_TLA(a, op, b) do {} while (0)
+#define DPRINT_FIELD(dir, nla_type, name, s, nla) do {} while (0)
+#define DPRINT_ARRAY(dir, nla_type, name, s, nla) do {} while (0)
+#endif
+
+/*
+ * Magic: provide conversion functions {{{1
+ * populate struct from attribute table:
+ * {{{2
+ */
+
+/* processing of generic netlink messages is serialized.
+ * use one static buffer for parsing of nested attributes */
+static struct nlattr *nested_attr_tb[128];
+
+#ifndef BUILD_BUG_ON
+/* Force a compilation error if condition is true */
+#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition))
+/* Force a compilation error if condition is true, but also produce a
+ result (of value 0 and type size_t), so the expression can be used
+ e.g. in a structure initializer (or where-ever else comma expressions
+ aren't permitted). */
+#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
+#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
+#endif
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+/* *_from_attrs functions are static, but potentially unused */ \
+static int __ ## s_name ## _from_attrs(struct s_name *s, \
+ struct genl_info *info, bool exclude_invariants) \
+{ \
+ const int maxtype = ARRAY_SIZE(s_name ## _nl_policy)-1; \
+ struct nlattr *tla = info->attrs[tag_number]; \
+ struct nlattr **ntb = nested_attr_tb; \
+ struct nlattr *nla; \
+ int err; \
+ BUILD_BUG_ON(ARRAY_SIZE(s_name ## _nl_policy) > ARRAY_SIZE(nested_attr_tb)); \
+ if (!tla) \
+ return -ENOMSG; \
+ DPRINT_TLA(#s_name, "<=-", #tag_name); \
+ err = drbd_nla_parse_nested(ntb, maxtype, tla, s_name ## _nl_policy); \
+ if (err) \
+ return err; \
+ \
+ s_fields \
+ return 0; \
+} __attribute__((unused)) \
+static int s_name ## _from_attrs(struct s_name *s, \
+ struct genl_info *info) \
+{ \
+ return __ ## s_name ## _from_attrs(s, info, false); \
+} __attribute__((unused)) \
+static int s_name ## _from_attrs_for_change(struct s_name *s, \
+ struct genl_info *info) \
+{ \
+ return __ ## s_name ## _from_attrs(s, info, true); \
+} __attribute__((unused)) \
+
+#define __assign(attr_nr, attr_flag, name, nla_type, type, assignment...) \
+ nla = ntb[attr_nr]; \
+ if (nla) { \
+ if (exclude_invariants && !!((attr_flag) & DRBD_F_INVARIANT)) { \
+ pr_info("<< must not change invariant attr: %s\n", #name); \
+ return -EEXIST; \
+ } \
+ assignment; \
+ } else if (exclude_invariants && !!((attr_flag) & DRBD_F_INVARIANT)) { \
+ /* attribute missing from payload, */ \
+ /* which was expected */ \
+ } else if ((attr_flag) & DRBD_F_REQUIRED) { \
+ pr_info("<< missing attr: %s\n", #name); \
+ return -ENOMSG; \
+ }
+
+#undef __field
+#define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \
+ __is_signed) \
+ __assign(attr_nr, attr_flag, name, nla_type, type, \
+ if (s) \
+ s->name = __get(nla); \
+ DPRINT_FIELD("<<", nla_type, name, s, nla))
+
+/* validate_nla() already checked nla_len <= maxlen appropriately. */
+#undef __array
+#define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \
+ __get, __put, __is_signed) \
+ __assign(attr_nr, attr_flag, name, nla_type, type, \
+ if (s) \
+ s->name ## _len = \
+ __get(s->name, nla, maxlen); \
+ DPRINT_ARRAY("<<", nla_type, name, s, nla))
+
+#include GENL_MAGIC_INCLUDE_FILE
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields)
+
+/*
+ * Magic: define op number to op name mapping {{{1
+ * {{{2
+ */
+const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
+{
+ switch (cmd) {
+#undef GENL_op
+#define GENL_op(op_name, op_num, handler, tla_list) \
+ case op_num: return #op_name;
+#include GENL_MAGIC_INCLUDE_FILE
+ default:
+ return "unknown";
+ }
+}
+
+#ifdef __KERNEL__
+#include <linux/stringify.h>
+/*
+ * Magic: define genl_ops {{{1
+ * {{{2
+ */
+
+#undef GENL_op
+#define GENL_op(op_name, op_num, handler, tla_list) \
+{ \
+ handler \
+ .cmd = op_name, \
+ .policy = CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy), \
+},
+
+#define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
+static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
+#include GENL_MAGIC_INCLUDE_FILE
+};
+
+#undef GENL_op
+#define GENL_op(op_name, op_num, handler, tla_list)
+
+/*
+ * Define the genl_family, multicast groups, {{{1
+ * and provide register/unregister functions.
+ * {{{2
+ */
+#define ZZZ_genl_family CONCAT_(GENL_MAGIC_FAMILY, _genl_family)
+static struct genl_family ZZZ_genl_family __read_mostly = {
+ .id = GENL_ID_GENERATE,
+ .name = __stringify(GENL_MAGIC_FAMILY),
+ .version = GENL_MAGIC_VERSION,
+#ifdef GENL_MAGIC_FAMILY_HDRSZ
+ .hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ),
+#endif
+ .maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1,
+};
+
+/*
+ * Magic: define multicast groups
+ * Magic: define multicast group registration helper
+ */
+#define ZZZ_genl_mcgrps CONCAT_(GENL_MAGIC_FAMILY, _genl_mcgrps)
+static const struct genl_multicast_group ZZZ_genl_mcgrps[] = {
+#undef GENL_mc_group
+#define GENL_mc_group(group) { .name = #group, },
+#include GENL_MAGIC_INCLUDE_FILE
+};
+
+enum CONCAT_(GENL_MAGIC_FAMILY, group_ids) {
+#undef GENL_mc_group
+#define GENL_mc_group(group) CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group),
+#include GENL_MAGIC_INCLUDE_FILE
+};
+
+#undef GENL_mc_group
+#define GENL_mc_group(group) \
+static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \
+ struct sk_buff *skb, gfp_t flags) \
+{ \
+ unsigned int group_id = \
+ CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group); \
+ return genlmsg_multicast(&ZZZ_genl_family, skb, 0, \
+ group_id, flags); \
+}
+
+#include GENL_MAGIC_INCLUDE_FILE
+
+#undef GENL_mc_group
+#define GENL_mc_group(group)
+
+int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void)
+{
+ return genl_register_family_with_ops_groups(&ZZZ_genl_family, \
+ ZZZ_genl_ops, \
+ ZZZ_genl_mcgrps);
+}
+
+void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void)
+{
+ genl_unregister_family(&ZZZ_genl_family);
+}
+
+/*
+ * Magic: provide conversion functions {{{1
+ * populate skb from struct.
+ * {{{2
+ */
+
+#undef GENL_op
+#define GENL_op(op_name, op_num, handler, tla_list)
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+static int s_name ## _to_skb(struct sk_buff *skb, struct s_name *s, \
+ const bool exclude_sensitive) \
+{ \
+ struct nlattr *tla = nla_nest_start(skb, tag_number); \
+ if (!tla) \
+ goto nla_put_failure; \
+ DPRINT_TLA(#s_name, "-=>", #tag_name); \
+ s_fields \
+ nla_nest_end(skb, tla); \
+ return 0; \
+ \
+nla_put_failure: \
+ if (tla) \
+ nla_nest_cancel(skb, tla); \
+ return -EMSGSIZE; \
+} \
+static inline int s_name ## _to_priv_skb(struct sk_buff *skb, \
+ struct s_name *s) \
+{ \
+ return s_name ## _to_skb(skb, s, 0); \
+} \
+static inline int s_name ## _to_unpriv_skb(struct sk_buff *skb, \
+ struct s_name *s) \
+{ \
+ return s_name ## _to_skb(skb, s, 1); \
+}
+
+
+#undef __field
+#define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \
+ __is_signed) \
+ if (!exclude_sensitive || !((attr_flag) & DRBD_F_SENSITIVE)) { \
+ DPRINT_FIELD(">>", nla_type, name, s, NULL); \
+ if (__put(skb, attr_nr, s->name)) \
+ goto nla_put_failure; \
+ }
+
+#undef __array
+#define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \
+ __get, __put, __is_signed) \
+ if (!exclude_sensitive || !((attr_flag) & DRBD_F_SENSITIVE)) { \
+ DPRINT_ARRAY(">>",nla_type, name, s, NULL); \
+ if (__put(skb, attr_nr, min_t(int, maxlen, \
+ s->name ## _len + (nla_type == NLA_NUL_STRING)),\
+ s->name)) \
+ goto nla_put_failure; \
+ }
+
+#include GENL_MAGIC_INCLUDE_FILE
+
+
+/* Functions for initializing structs to default values. */
+
+#undef __field
+#define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \
+ __is_signed)
+#undef __array
+#define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \
+ __get, __put, __is_signed)
+#undef __u32_field_def
+#define __u32_field_def(attr_nr, attr_flag, name, default) \
+ x->name = default;
+#undef __s32_field_def
+#define __s32_field_def(attr_nr, attr_flag, name, default) \
+ x->name = default;
+#undef __flg_field_def
+#define __flg_field_def(attr_nr, attr_flag, name, default) \
+ x->name = default;
+#undef __str_field_def
+#define __str_field_def(attr_nr, attr_flag, name, maxlen) \
+ memset(x->name, 0, sizeof(x->name)); \
+ x->name ## _len = 0;
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+static void set_ ## s_name ## _defaults(struct s_name *x) __attribute__((unused)); \
+static void set_ ## s_name ## _defaults(struct s_name *x) { \
+s_fields \
+}
+
+#include GENL_MAGIC_INCLUDE_FILE
+
+#endif /* __KERNEL__ */
+
+/* }}}1 */
+#endif /* GENL_MAGIC_FUNC_H */
+/* vim: set foldmethod=marker foldlevel=1 nofoldenable : */
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
new file mode 100644
index 000000000..eecd19b37
--- /dev/null
+++ b/include/linux/genl_magic_struct.h
@@ -0,0 +1,277 @@
+#ifndef GENL_MAGIC_STRUCT_H
+#define GENL_MAGIC_STRUCT_H
+
+#ifndef GENL_MAGIC_FAMILY
+# error "you need to define GENL_MAGIC_FAMILY before inclusion"
+#endif
+
+#ifndef GENL_MAGIC_VERSION
+# error "you need to define GENL_MAGIC_VERSION before inclusion"
+#endif
+
+#ifndef GENL_MAGIC_INCLUDE_FILE
+# error "you need to define GENL_MAGIC_INCLUDE_FILE before inclusion"
+#endif
+
+#include <linux/genetlink.h>
+#include <linux/types.h>
+
+#define CONCAT__(a,b) a ## b
+#define CONCAT_(a,b) CONCAT__(a,b)
+
+extern int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void);
+extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void);
+
+/*
+ * Extension of genl attribute validation policies {{{2
+ */
+
+/*
+ * @DRBD_GENLA_F_MANDATORY: By default, netlink ignores attributes it does not
+ * know about. This flag can be set in nlattr->nla_type to indicate that this
+ * attribute must not be ignored.
+ *
+ * We check and remove this flag in drbd_nla_check_mandatory() before
+ * validating the attribute types and lengths via nla_parse_nested().
+ */
+#define DRBD_GENLA_F_MANDATORY (1 << 14)
+
+/*
+ * Flags specific to drbd and not visible at the netlink layer, used in
+ * <struct>_from_attrs and <struct>_to_skb:
+ *
+ * @DRBD_F_REQUIRED: Attribute is required; a request without this attribute is
+ * invalid.
+ *
+ * @DRBD_F_SENSITIVE: Attribute includes sensitive information and must not be
+ * included in unpriviledged get requests or broadcasts.
+ *
+ * @DRBD_F_INVARIANT: Attribute is set when an object is initially created, but
+ * cannot subsequently be changed.
+ */
+#define DRBD_F_REQUIRED (1 << 0)
+#define DRBD_F_SENSITIVE (1 << 1)
+#define DRBD_F_INVARIANT (1 << 2)
+
+#define __nla_type(x) ((__u16)((x) & NLA_TYPE_MASK & ~DRBD_GENLA_F_MANDATORY))
+
+/* }}}1
+ * MAGIC
+ * multi-include macro expansion magic starts here
+ */
+
+/* MAGIC helpers {{{2 */
+
+/* possible field types */
+#define __flg_field(attr_nr, attr_flag, name) \
+ __field(attr_nr, attr_flag, name, NLA_U8, char, \
+ nla_get_u8, nla_put_u8, false)
+#define __u8_field(attr_nr, attr_flag, name) \
+ __field(attr_nr, attr_flag, name, NLA_U8, unsigned char, \
+ nla_get_u8, nla_put_u8, false)
+#define __u16_field(attr_nr, attr_flag, name) \
+ __field(attr_nr, attr_flag, name, NLA_U16, __u16, \
+ nla_get_u16, nla_put_u16, false)
+#define __u32_field(attr_nr, attr_flag, name) \
+ __field(attr_nr, attr_flag, name, NLA_U32, __u32, \
+ nla_get_u32, nla_put_u32, false)
+#define __s32_field(attr_nr, attr_flag, name) \
+ __field(attr_nr, attr_flag, name, NLA_U32, __s32, \
+ nla_get_u32, nla_put_u32, true)
+#define __u64_field(attr_nr, attr_flag, name) \
+ __field(attr_nr, attr_flag, name, NLA_U64, __u64, \
+ nla_get_u64, nla_put_u64, false)
+#define __str_field(attr_nr, attr_flag, name, maxlen) \
+ __array(attr_nr, attr_flag, name, NLA_NUL_STRING, char, maxlen, \
+ nla_strlcpy, nla_put, false)
+#define __bin_field(attr_nr, attr_flag, name, maxlen) \
+ __array(attr_nr, attr_flag, name, NLA_BINARY, char, maxlen, \
+ nla_memcpy, nla_put, false)
+
+/* fields with default values */
+#define __flg_field_def(attr_nr, attr_flag, name, default) \
+ __flg_field(attr_nr, attr_flag, name)
+#define __u32_field_def(attr_nr, attr_flag, name, default) \
+ __u32_field(attr_nr, attr_flag, name)
+#define __s32_field_def(attr_nr, attr_flag, name, default) \
+ __s32_field(attr_nr, attr_flag, name)
+#define __str_field_def(attr_nr, attr_flag, name, maxlen) \
+ __str_field(attr_nr, attr_flag, name, maxlen)
+
+#define GENL_op_init(args...) args
+#define GENL_doit(handler) \
+ .doit = handler, \
+ .flags = GENL_ADMIN_PERM,
+#define GENL_dumpit(handler) \
+ .dumpit = handler, \
+ .flags = GENL_ADMIN_PERM,
+
+/* }}}1
+ * Magic: define the enum symbols for genl_ops
+ * Magic: define the enum symbols for top level attributes
+ * Magic: define the enum symbols for nested attributes
+ * {{{2
+ */
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields)
+
+#undef GENL_mc_group
+#define GENL_mc_group(group)
+
+#undef GENL_notification
+#define GENL_notification(op_name, op_num, mcast_group, tla_list) \
+ op_name = op_num,
+
+#undef GENL_op
+#define GENL_op(op_name, op_num, handler, tla_list) \
+ op_name = op_num,
+
+enum {
+#include GENL_MAGIC_INCLUDE_FILE
+};
+
+#undef GENL_notification
+#define GENL_notification(op_name, op_num, mcast_group, tla_list)
+
+#undef GENL_op
+#define GENL_op(op_name, op_num, handler, attr_list)
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+ tag_name = tag_number,
+
+enum {
+#include GENL_MAGIC_INCLUDE_FILE
+};
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+enum { \
+ s_fields \
+};
+
+#undef __field
+#define __field(attr_nr, attr_flag, name, nla_type, type, \
+ __get, __put, __is_signed) \
+ T_ ## name = (__u16)(attr_nr | ((attr_flag) & DRBD_GENLA_F_MANDATORY)),
+
+#undef __array
+#define __array(attr_nr, attr_flag, name, nla_type, type, \
+ maxlen, __get, __put, __is_signed) \
+ T_ ## name = (__u16)(attr_nr | ((attr_flag) & DRBD_GENLA_F_MANDATORY)),
+
+#include GENL_MAGIC_INCLUDE_FILE
+
+/* }}}1
+ * Magic: compile time assert unique numbers for operations
+ * Magic: -"- unique numbers for top level attributes
+ * Magic: -"- unique numbers for nested attributes
+ * {{{2
+ */
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields)
+
+#undef GENL_op
+#define GENL_op(op_name, op_num, handler, attr_list) \
+ case op_name:
+
+#undef GENL_notification
+#define GENL_notification(op_name, op_num, mcast_group, tla_list) \
+ case op_name:
+
+static inline void ct_assert_unique_operations(void)
+{
+ switch (0) {
+#include GENL_MAGIC_INCLUDE_FILE
+ ;
+ }
+}
+
+#undef GENL_op
+#define GENL_op(op_name, op_num, handler, attr_list)
+
+#undef GENL_notification
+#define GENL_notification(op_name, op_num, mcast_group, tla_list)
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+ case tag_number:
+
+static inline void ct_assert_unique_top_level_attributes(void)
+{
+ switch (0) {
+#include GENL_MAGIC_INCLUDE_FILE
+ ;
+ }
+}
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+static inline void ct_assert_unique_ ## s_name ## _attributes(void) \
+{ \
+ switch (0) { \
+ s_fields \
+ ; \
+ } \
+}
+
+#undef __field
+#define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \
+ __is_signed) \
+ case attr_nr:
+
+#undef __array
+#define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \
+ __get, __put, __is_signed) \
+ case attr_nr:
+
+#include GENL_MAGIC_INCLUDE_FILE
+
+/* }}}1
+ * Magic: declare structs
+ * struct <name> {
+ * fields
+ * };
+ * {{{2
+ */
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+struct s_name { s_fields };
+
+#undef __field
+#define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \
+ __is_signed) \
+ type name;
+
+#undef __array
+#define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \
+ __get, __put, __is_signed) \
+ type name[maxlen]; \
+ __u32 name ## _len;
+
+#include GENL_MAGIC_INCLUDE_FILE
+
+#undef GENL_struct
+#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
+enum { \
+ s_fields \
+};
+
+#undef __field
+#define __field(attr_nr, attr_flag, name, nla_type, type, __get, __put, \
+ is_signed) \
+ F_ ## name ## _IS_SIGNED = is_signed,
+
+#undef __array
+#define __array(attr_nr, attr_flag, name, nla_type, type, maxlen, \
+ __get, __put, is_signed) \
+ F_ ## name ## _IS_SIGNED = is_signed,
+
+#include GENL_MAGIC_INCLUDE_FILE
+
+/* }}}1 */
+#endif /* GENL_MAGIC_STRUCT_H */
+/* vim: set foldmethod=marker nofoldenable : */
diff --git a/include/linux/getcpu.h b/include/linux/getcpu.h
new file mode 100644
index 000000000..c7372d7a9
--- /dev/null
+++ b/include/linux/getcpu.h
@@ -0,0 +1,18 @@
+#ifndef _LINUX_GETCPU_H
+#define _LINUX_GETCPU_H 1
+
+/* Cache for getcpu() to speed it up. Results might be a short time
+ out of date, but will be faster.
+
+ User programs should not refer to the contents of this structure.
+ I repeat they should not refer to it. If they do they will break
+ in future kernels.
+
+ It is only a private cache for vgetcpu(). It will change in future kernels.
+ The user program must store this information per thread (__thread)
+ If you want 100% accurate information pass NULL instead. */
+struct getcpu_cache {
+ unsigned long blob[128 / sizeof(long)];
+};
+
+#endif
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
new file mode 100644
index 000000000..a444b84fb
--- /dev/null
+++ b/include/linux/gfp.h
@@ -0,0 +1,420 @@
+#ifndef __LINUX_GFP_H
+#define __LINUX_GFP_H
+
+#include <linux/mmdebug.h>
+#include <linux/mmzone.h>
+#include <linux/stddef.h>
+#include <linux/linkage.h>
+#include <linux/topology.h>
+
+struct vm_area_struct;
+
+/* Plain integer GFP bitmasks. Do not use this directly. */
+#define ___GFP_DMA 0x01u
+#define ___GFP_HIGHMEM 0x02u
+#define ___GFP_DMA32 0x04u
+#define ___GFP_MOVABLE 0x08u
+#define ___GFP_WAIT 0x10u
+#define ___GFP_HIGH 0x20u
+#define ___GFP_IO 0x40u
+#define ___GFP_FS 0x80u
+#define ___GFP_COLD 0x100u
+#define ___GFP_NOWARN 0x200u
+#define ___GFP_REPEAT 0x400u
+#define ___GFP_NOFAIL 0x800u
+#define ___GFP_NORETRY 0x1000u
+#define ___GFP_MEMALLOC 0x2000u
+#define ___GFP_COMP 0x4000u
+#define ___GFP_ZERO 0x8000u
+#define ___GFP_NOMEMALLOC 0x10000u
+#define ___GFP_HARDWALL 0x20000u
+#define ___GFP_THISNODE 0x40000u
+#define ___GFP_RECLAIMABLE 0x80000u
+#define ___GFP_NOACCOUNT 0x100000u
+#define ___GFP_NOTRACK 0x200000u
+#define ___GFP_NO_KSWAPD 0x400000u
+#define ___GFP_OTHER_NODE 0x800000u
+#define ___GFP_WRITE 0x1000000u
+#define ___GFP_TOI_NOTRACK 0x2000000u
+/* If the above are modified, __GFP_BITS_SHIFT may need updating */
+
+/*
+ * GFP bitmasks..
+ *
+ * Zone modifiers (see linux/mmzone.h - low three bits)
+ *
+ * Do not put any conditional on these. If necessary modify the definitions
+ * without the underscores and use them consistently. The definitions here may
+ * be used in bit comparisons.
+ */
+#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
+#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
+#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
+#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
+#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
+/*
+ * Action modifiers - doesn't change the zoning
+ *
+ * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
+ * _might_ fail. This depends upon the particular VM implementation.
+ *
+ * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
+ * cannot handle allocation failures. New users should be evaluated carefully
+ * (and the flag should be used only when there is no reasonable failure policy)
+ * but it is definitely preferable to use the flag rather than opencode endless
+ * loop around allocator.
+ *
+ * __GFP_NORETRY: The VM implementation must not retry indefinitely.
+ *
+ * __GFP_MOVABLE: Flag that this page will be movable by the page migration
+ * mechanism or reclaimed
+ */
+#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */
+#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */
+#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */
+#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */
+#define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */
+#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */
+#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */
+#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */
+#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */
+#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */
+#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */
+#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */
+#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves.
+ * This takes precedence over the
+ * __GFP_MEMALLOC flag if both are
+ * set
+ */
+#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
+#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
+#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
+#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */
+#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
+
+#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
+#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
+#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
+#define __GFP_TOI_NOTRACK ((__force gfp_t)___GFP_TOI_NOTRACK) /* Allocator wants page untracked by TOI */
+
+/*
+ * This may seem redundant, but it's a way of annotating false positives vs.
+ * allocations that simply cannot be supported (e.g. page tables).
+ */
+#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
+
+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
+#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
+
+/* This equals 0, but use constants in case they ever change */
+#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
+/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
+#define GFP_ATOMIC (__GFP_HIGH)
+#define GFP_NOIO (__GFP_WAIT)
+#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
+#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
+#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
+ __GFP_RECLAIMABLE)
+#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
+#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
+#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
+#define GFP_IOFS (__GFP_IO | __GFP_FS)
+#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
+ __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
+ __GFP_NO_KSWAPD)
+
+/* This mask makes up all the page movable related flags */
+#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
+
+/* Control page allocator reclaim behavior */
+#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
+ __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
+ __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
+
+/* Control slab gfp mask during early boot */
+#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
+
+/* Control allocation constraints */
+#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
+
+/* Do not use these with a slab allocator */
+#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
+
+/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
+ platforms, used as appropriate on others */
+
+#define GFP_DMA __GFP_DMA
+
+/* 4GB DMA on some platforms */
+#define GFP_DMA32 __GFP_DMA32
+
+/* Convert GFP flags to their corresponding migrate type */
+static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
+{
+ WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
+
+ if (unlikely(page_group_by_mobility_disabled))
+ return MIGRATE_UNMOVABLE;
+
+ /* Group based on mobility */
+ return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
+ ((gfp_flags & __GFP_RECLAIMABLE) != 0);
+}
+
+#ifdef CONFIG_HIGHMEM
+#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
+#else
+#define OPT_ZONE_HIGHMEM ZONE_NORMAL
+#endif
+
+#ifdef CONFIG_ZONE_DMA
+#define OPT_ZONE_DMA ZONE_DMA
+#else
+#define OPT_ZONE_DMA ZONE_NORMAL
+#endif
+
+#ifdef CONFIG_ZONE_DMA32
+#define OPT_ZONE_DMA32 ZONE_DMA32
+#else
+#define OPT_ZONE_DMA32 ZONE_NORMAL
+#endif
+
+/*
+ * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the
+ * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long
+ * and there are 16 of them to cover all possible combinations of
+ * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM.
+ *
+ * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
+ * But GFP_MOVABLE is not only a zone specifier but also an allocation
+ * policy. Therefore __GFP_MOVABLE plus another zone selector is valid.
+ * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1".
+ *
+ * bit result
+ * =================
+ * 0x0 => NORMAL
+ * 0x1 => DMA or NORMAL
+ * 0x2 => HIGHMEM or NORMAL
+ * 0x3 => BAD (DMA+HIGHMEM)
+ * 0x4 => DMA32 or DMA or NORMAL
+ * 0x5 => BAD (DMA+DMA32)
+ * 0x6 => BAD (HIGHMEM+DMA32)
+ * 0x7 => BAD (HIGHMEM+DMA32+DMA)
+ * 0x8 => NORMAL (MOVABLE+0)
+ * 0x9 => DMA or NORMAL (MOVABLE+DMA)
+ * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too)
+ * 0xb => BAD (MOVABLE+HIGHMEM+DMA)
+ * 0xc => DMA32 (MOVABLE+DMA32)
+ * 0xd => BAD (MOVABLE+DMA32+DMA)
+ * 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
+ * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
+ *
+ * ZONES_SHIFT must be <= 2 on 32 bit platforms.
+ */
+
+#if 16 * ZONES_SHIFT > BITS_PER_LONG
+#error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer
+#endif
+
+#define GFP_ZONE_TABLE ( \
+ (ZONE_NORMAL << 0 * ZONES_SHIFT) \
+ | (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT) \
+ | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT) \
+ | (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT) \
+ | (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT) \
+ | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT) \
+ | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT) \
+ | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \
+)
+
+/*
+ * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32
+ * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per
+ * entry starting with bit 0. Bit is set if the combination is not
+ * allowed.
+ */
+#define GFP_ZONE_BAD ( \
+ 1 << (___GFP_DMA | ___GFP_HIGHMEM) \
+ | 1 << (___GFP_DMA | ___GFP_DMA32) \
+ | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \
+ | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \
+ | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \
+ | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \
+ | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \
+ | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \
+)
+
+static inline enum zone_type gfp_zone(gfp_t flags)
+{
+ enum zone_type z;
+ int bit = (__force int) (flags & GFP_ZONEMASK);
+
+ z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &
+ ((1 << ZONES_SHIFT) - 1);
+ VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
+ return z;
+}
+
+/*
+ * There is only one page-allocator function, and two main namespaces to
+ * it. The alloc_page*() variants return 'struct page *' and as such
+ * can allocate highmem pages, the *get*page*() variants return
+ * virtual kernel addresses to the allocated page(s).
+ */
+
+static inline int gfp_zonelist(gfp_t flags)
+{
+ if (IS_ENABLED(CONFIG_NUMA) && unlikely(flags & __GFP_THISNODE))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * We get the zone list from the current node and the gfp_mask.
+ * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
+ * There are two zonelists per node, one for all zones with memory and
+ * one containing just zones from the node the zonelist belongs to.
+ *
+ * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
+ * optimized to &contig_page_data at compile-time.
+ */
+static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
+{
+ return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
+}
+
+#ifndef HAVE_ARCH_FREE_PAGE
+static inline void arch_free_page(struct page *page, int order) { }
+#endif
+#ifndef HAVE_ARCH_ALLOC_PAGE
+static inline void arch_alloc_page(struct page *page, int order) { }
+#endif
+
+struct page *
+__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
+ struct zonelist *zonelist, nodemask_t *nodemask);
+
+static inline struct page *
+__alloc_pages(gfp_t gfp_mask, unsigned int order,
+ struct zonelist *zonelist)
+{
+ return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
+}
+
+static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
+ unsigned int order)
+{
+ /* Unknown node is current node */
+ if (nid < 0)
+ nid = numa_node_id();
+
+ return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
+}
+
+static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
+ unsigned int order)
+{
+ VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES || !node_online(nid));
+
+ return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
+}
+
+#ifdef CONFIG_NUMA
+extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
+
+static inline struct page *
+alloc_pages(gfp_t gfp_mask, unsigned int order)
+{
+ return alloc_pages_current(gfp_mask, order);
+}
+extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
+ struct vm_area_struct *vma, unsigned long addr,
+ int node, bool hugepage);
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
+ alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
+#else
+#define alloc_pages(gfp_mask, order) \
+ alloc_pages_node(numa_node_id(), gfp_mask, order)
+#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
+ alloc_pages(gfp_mask, order)
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
+ alloc_pages(gfp_mask, order)
+#endif
+#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
+#define alloc_page_vma(gfp_mask, vma, addr) \
+ alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
+#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
+ alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
+
+extern struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order);
+extern struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask,
+ unsigned int order);
+
+extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
+extern unsigned long get_zeroed_page(gfp_t gfp_mask);
+
+void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
+void free_pages_exact(void *virt, size_t size);
+/* This is different from alloc_pages_exact_node !!! */
+void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
+
+#define __get_free_page(gfp_mask) \
+ __get_free_pages((gfp_mask), 0)
+
+#define __get_dma_pages(gfp_mask, order) \
+ __get_free_pages((gfp_mask) | GFP_DMA, (order))
+
+extern void __free_pages(struct page *page, unsigned int order);
+extern void free_pages(unsigned long addr, unsigned int order);
+extern void free_hot_cold_page(struct page *page, bool cold);
+extern void free_hot_cold_page_list(struct list_head *list, bool cold);
+
+extern void __free_kmem_pages(struct page *page, unsigned int order);
+extern void free_kmem_pages(unsigned long addr, unsigned int order);
+
+#define __free_page(page) __free_pages((page), 0)
+#define free_page(addr) free_pages((addr), 0)
+
+void page_alloc_init(void);
+void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
+void drain_all_pages(struct zone *zone);
+void drain_local_pages(struct zone *zone);
+
+/*
+ * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
+ * GFP flags are used before interrupts are enabled. Once interrupts are
+ * enabled, it is set to __GFP_BITS_MASK while the system is running. During
+ * hibernation, it is used by PM to avoid I/O during memory allocation while
+ * devices are suspended.
+ */
+extern gfp_t gfp_allowed_mask;
+
+/* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */
+bool gfp_pfmemalloc_allowed(gfp_t gfp_mask);
+
+extern void pm_restrict_gfp_mask(void);
+extern void pm_restore_gfp_mask(void);
+
+#ifdef CONFIG_PM_SLEEP
+extern bool pm_suspended_storage(void);
+#else
+static inline bool pm_suspended_storage(void)
+{
+ return false;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_CMA
+
+/* The below functions must be run on a range from a single zone. */
+extern int alloc_contig_range(unsigned long start, unsigned long end,
+ unsigned migratetype);
+extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
+
+/* CMA stuff */
+extern void init_cma_reserved_pageblock(struct page *page);
+
+#endif
+
+#endif /* __LINUX_GFP_H */
diff --git a/include/linux/glob.h b/include/linux/glob.h
new file mode 100644
index 000000000..861d8347d
--- /dev/null
+++ b/include/linux/glob.h
@@ -0,0 +1,9 @@
+#ifndef _LINUX_GLOB_H
+#define _LINUX_GLOB_H
+
+#include <linux/types.h> /* For bool */
+#include <linux/compiler.h> /* For __pure */
+
+bool __pure glob_match(char const *pat, char const *str);
+
+#endif /* _LINUX_GLOB_H */
diff --git a/include/linux/goldfish.h b/include/linux/goldfish.h
new file mode 100644
index 000000000..569236e6b
--- /dev/null
+++ b/include/linux/goldfish.h
@@ -0,0 +1,15 @@
+#ifndef __LINUX_GOLDFISH_H
+#define __LINUX_GOLDFISH_H
+
+/* Helpers for Goldfish virtual platform */
+
+static inline void gf_write64(unsigned long data,
+ void __iomem *portl, void __iomem *porth)
+{
+ writel((u32)data, portl);
+#ifdef CONFIG_64BIT
+ writel(data>>32, porth);
+#endif
+}
+
+#endif /* __LINUX_GOLDFISH_H */
diff --git a/include/linux/gpio-fan.h b/include/linux/gpio-fan.h
new file mode 100644
index 000000000..096659169
--- /dev/null
+++ b/include/linux/gpio-fan.h
@@ -0,0 +1,36 @@
+/*
+ * include/linux/gpio-fan.h
+ *
+ * Platform data structure for GPIO fan driver
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __LINUX_GPIO_FAN_H
+#define __LINUX_GPIO_FAN_H
+
+struct gpio_fan_alarm {
+ unsigned gpio;
+ unsigned active_low;
+};
+
+struct gpio_fan_speed {
+ int rpm;
+ int ctrl_val;
+};
+
+struct gpio_fan_platform_data {
+ int num_ctrl;
+ unsigned *ctrl; /* fan control GPIOs. */
+ struct gpio_fan_alarm *alarm; /* fan alarm GPIO. */
+ /*
+ * Speed conversion array: rpm from/to GPIO bit field.
+ * This array _must_ be sorted in ascending rpm order.
+ */
+ int num_speed;
+ struct gpio_fan_speed *speed;
+};
+
+#endif /* __LINUX_GPIO_FAN_H */
diff --git a/include/linux/gpio-pxa.h b/include/linux/gpio-pxa.h
new file mode 100644
index 000000000..d90ebbe02
--- /dev/null
+++ b/include/linux/gpio-pxa.h
@@ -0,0 +1,21 @@
+#ifndef __GPIO_PXA_H
+#define __GPIO_PXA_H
+
+#define GPIO_bit(x) (1 << ((x) & 0x1f))
+
+#define gpio_to_bank(gpio) ((gpio) >> 5)
+
+/* NOTE: some PXAs have fewer on-chip GPIOs (like PXA255, with 85).
+ * Those cases currently cause holes in the GPIO number space, the
+ * actual number of the last GPIO is recorded by 'pxa_last_gpio'.
+ */
+extern int pxa_last_gpio;
+
+extern int pxa_irq_to_gpio(int irq);
+
+struct pxa_gpio_platform_data {
+ int irq_base;
+ int (*gpio_set_wake)(unsigned int gpio, unsigned int on);
+};
+
+#endif /* __GPIO_PXA_H */
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
new file mode 100644
index 000000000..ab81339a8
--- /dev/null
+++ b/include/linux/gpio.h
@@ -0,0 +1,284 @@
+#ifndef __LINUX_GPIO_H
+#define __LINUX_GPIO_H
+
+#include <linux/errno.h>
+
+/* see Documentation/gpio/gpio-legacy.txt */
+
+/* make these flag values available regardless of GPIO kconfig options */
+#define GPIOF_DIR_OUT (0 << 0)
+#define GPIOF_DIR_IN (1 << 0)
+
+#define GPIOF_INIT_LOW (0 << 1)
+#define GPIOF_INIT_HIGH (1 << 1)
+
+#define GPIOF_IN (GPIOF_DIR_IN)
+#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
+#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
+
+/* Gpio pin is active-low */
+#define GPIOF_ACTIVE_LOW (1 << 2)
+
+/* Gpio pin is open drain */
+#define GPIOF_OPEN_DRAIN (1 << 3)
+
+/* Gpio pin is open source */
+#define GPIOF_OPEN_SOURCE (1 << 4)
+
+#define GPIOF_EXPORT (1 << 5)
+#define GPIOF_EXPORT_CHANGEABLE (1 << 6)
+#define GPIOF_EXPORT_DIR_FIXED (GPIOF_EXPORT)
+#define GPIOF_EXPORT_DIR_CHANGEABLE (GPIOF_EXPORT | GPIOF_EXPORT_CHANGEABLE)
+
+/**
+ * struct gpio - a structure describing a GPIO with configuration
+ * @gpio: the GPIO number
+ * @flags: GPIO configuration as specified by GPIOF_*
+ * @label: a literal description string of this GPIO
+ */
+struct gpio {
+ unsigned gpio;
+ unsigned long flags;
+ const char *label;
+};
+
+#ifdef CONFIG_GPIOLIB
+
+#ifdef CONFIG_ARCH_HAVE_CUSTOM_GPIO_H
+#include <asm/gpio.h>
+#else
+
+#include <asm-generic/gpio.h>
+
+static inline int gpio_get_value(unsigned int gpio)
+{
+ return __gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value(unsigned int gpio, int value)
+{
+ __gpio_set_value(gpio, value);
+}
+
+static inline int gpio_cansleep(unsigned int gpio)
+{
+ return __gpio_cansleep(gpio);
+}
+
+static inline int gpio_to_irq(unsigned int gpio)
+{
+ return __gpio_to_irq(gpio);
+}
+
+static inline int irq_to_gpio(unsigned int irq)
+{
+ return -EINVAL;
+}
+
+#endif /* ! CONFIG_ARCH_HAVE_CUSTOM_GPIO_H */
+
+/* CONFIG_GPIOLIB: bindings for managed devices that want to request gpios */
+
+struct device;
+
+int devm_gpio_request(struct device *dev, unsigned gpio, const char *label);
+int devm_gpio_request_one(struct device *dev, unsigned gpio,
+ unsigned long flags, const char *label);
+void devm_gpio_free(struct device *dev, unsigned int gpio);
+
+#else /* ! CONFIG_GPIOLIB */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/pinctrl/pinctrl.h>
+
+struct device;
+struct gpio_chip;
+
+static inline bool gpio_is_valid(int number)
+{
+ return false;
+}
+
+static inline int gpio_request(unsigned gpio, const char *label)
+{
+ return -ENOSYS;
+}
+
+static inline int gpio_request_one(unsigned gpio,
+ unsigned long flags, const char *label)
+{
+ return -ENOSYS;
+}
+
+static inline int gpio_request_array(const struct gpio *array, size_t num)
+{
+ return -ENOSYS;
+}
+
+static inline void gpio_free(unsigned gpio)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
+static inline void gpio_free_array(const struct gpio *array, size_t num)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
+static inline int gpio_direction_input(unsigned gpio)
+{
+ return -ENOSYS;
+}
+
+static inline int gpio_direction_output(unsigned gpio, int value)
+{
+ return -ENOSYS;
+}
+
+static inline int gpio_set_debounce(unsigned gpio, unsigned debounce)
+{
+ return -ENOSYS;
+}
+
+static inline int gpio_get_value(unsigned gpio)
+{
+ /* GPIO can never have been requested or set as {in,out}put */
+ WARN_ON(1);
+ return 0;
+}
+
+static inline void gpio_set_value(unsigned gpio, int value)
+{
+ /* GPIO can never have been requested or set as output */
+ WARN_ON(1);
+}
+
+static inline int gpio_cansleep(unsigned gpio)
+{
+ /* GPIO can never have been requested or set as {in,out}put */
+ WARN_ON(1);
+ return 0;
+}
+
+static inline int gpio_get_value_cansleep(unsigned gpio)
+{
+ /* GPIO can never have been requested or set as {in,out}put */
+ WARN_ON(1);
+ return 0;
+}
+
+static inline void gpio_set_value_cansleep(unsigned gpio, int value)
+{
+ /* GPIO can never have been requested or set as output */
+ WARN_ON(1);
+}
+
+static inline int gpio_export(unsigned gpio, bool direction_may_change)
+{
+ /* GPIO can never have been requested or set as {in,out}put */
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static inline int gpio_export_link(struct device *dev, const char *name,
+ unsigned gpio)
+{
+ /* GPIO can never have been exported */
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static inline void gpio_unexport(unsigned gpio)
+{
+ /* GPIO can never have been exported */
+ WARN_ON(1);
+}
+
+static inline int gpio_to_irq(unsigned gpio)
+{
+ /* GPIO can never have been requested or set as input */
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static inline int gpiochip_lock_as_irq(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ WARN_ON(1);
+}
+
+static inline int irq_to_gpio(unsigned irq)
+{
+ /* irq can never have been returned from gpio_to_irq() */
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static inline int
+gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+ unsigned int gpio_offset, unsigned int pin_offset,
+ unsigned int npins)
+{
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static inline int
+gpiochip_add_pingroup_range(struct gpio_chip *chip,
+ struct pinctrl_dev *pctldev,
+ unsigned int gpio_offset, const char *pin_group)
+{
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static inline void
+gpiochip_remove_pin_ranges(struct gpio_chip *chip)
+{
+ WARN_ON(1);
+}
+
+static inline int devm_gpio_request(struct device *dev, unsigned gpio,
+ const char *label)
+{
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static inline int devm_gpio_request_one(struct device *dev, unsigned gpio,
+ unsigned long flags, const char *label)
+{
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static inline void devm_gpio_free(struct device *dev, unsigned int gpio)
+{
+ WARN_ON(1);
+}
+
+#endif /* ! CONFIG_GPIOLIB */
+
+#endif /* __LINUX_GPIO_H */
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
new file mode 100644
index 000000000..da042657d
--- /dev/null
+++ b/include/linux/gpio/consumer.h
@@ -0,0 +1,495 @@
+#ifndef __LINUX_GPIO_CONSUMER_H
+#define __LINUX_GPIO_CONSUMER_H
+
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+
+struct device;
+
+/**
+ * Opaque descriptor for a GPIO. These are obtained using gpiod_get() and are
+ * preferable to the old integer-based handles.
+ *
+ * Contrary to integers, a pointer to a gpio_desc is guaranteed to be valid
+ * until the GPIO is released.
+ */
+struct gpio_desc;
+
+/**
+ * Struct containing an array of descriptors that can be obtained using
+ * gpiod_get_array().
+ */
+struct gpio_descs {
+ unsigned int ndescs;
+ struct gpio_desc *desc[];
+};
+
+#define GPIOD_FLAGS_BIT_DIR_SET BIT(0)
+#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
+#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
+
+/**
+ * Optional flags that can be passed to one of gpiod_* to configure direction
+ * and output value. These values cannot be OR'd.
+ */
+enum gpiod_flags {
+ GPIOD_ASIS = 0,
+ GPIOD_IN = GPIOD_FLAGS_BIT_DIR_SET,
+ GPIOD_OUT_LOW = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT,
+ GPIOD_OUT_HIGH = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT |
+ GPIOD_FLAGS_BIT_DIR_VAL,
+};
+
+#ifdef CONFIG_GPIOLIB
+
+/* Return the number of GPIOs associated with a device / function */
+int gpiod_count(struct device *dev, const char *con_id);
+
+/* Acquire and dispose GPIOs */
+struct gpio_desc *__must_check __gpiod_get(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags);
+struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags flags);
+struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags);
+struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
+ const char *con_id,
+ unsigned int index,
+ enum gpiod_flags flags);
+struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags);
+struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags);
+void gpiod_put(struct gpio_desc *desc);
+void gpiod_put_array(struct gpio_descs *descs);
+
+struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags);
+struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags flags);
+struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags);
+struct gpio_desc *__must_check
+__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
+ unsigned int index, enum gpiod_flags flags);
+struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags);
+struct gpio_descs *__must_check
+devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags);
+void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
+void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs);
+
+int gpiod_get_direction(struct gpio_desc *desc);
+int gpiod_direction_input(struct gpio_desc *desc);
+int gpiod_direction_output(struct gpio_desc *desc, int value);
+int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
+
+/* Value get/set from non-sleeping context */
+int gpiod_get_value(const struct gpio_desc *desc);
+void gpiod_set_value(struct gpio_desc *desc, int value);
+void gpiod_set_array(unsigned int array_size,
+ struct gpio_desc **desc_array, int *value_array);
+int gpiod_get_raw_value(const struct gpio_desc *desc);
+void gpiod_set_raw_value(struct gpio_desc *desc, int value);
+void gpiod_set_raw_array(unsigned int array_size,
+ struct gpio_desc **desc_array, int *value_array);
+
+/* Value get/set from sleeping context */
+int gpiod_get_value_cansleep(const struct gpio_desc *desc);
+void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
+void gpiod_set_array_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
+int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
+void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
+void gpiod_set_raw_array_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
+
+int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
+
+int gpiod_is_active_low(const struct gpio_desc *desc);
+int gpiod_cansleep(const struct gpio_desc *desc);
+
+int gpiod_to_irq(const struct gpio_desc *desc);
+
+/* Convert between the old gpio_ and new gpiod_ interfaces */
+struct gpio_desc *gpio_to_desc(unsigned gpio);
+int desc_to_gpio(const struct gpio_desc *desc);
+
+/* Child properties interface */
+struct fwnode_handle;
+
+struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
+ const char *propname);
+struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
+ const char *con_id,
+ struct fwnode_handle *child);
+#else /* CONFIG_GPIOLIB */
+
+static inline int gpiod_count(struct device *dev, const char *con_id)
+{
+ return 0;
+}
+
+static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+static inline struct gpio_desc *__must_check
+__gpiod_get_index(struct device *dev,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct gpio_desc *__must_check
+__gpiod_get_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct gpio_desc *__must_check
+__gpiod_get_index_optional(struct device *dev, const char *con_id,
+ unsigned int index, enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct gpio_descs *__must_check
+gpiod_get_array(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct gpio_descs *__must_check
+gpiod_get_array_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void gpiod_put(struct gpio_desc *desc)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
+static inline void gpiod_put_array(struct gpio_descs *descs)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
+static inline struct gpio_desc *__must_check
+__devm_gpiod_get(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+static inline
+struct gpio_desc *__must_check
+__devm_gpiod_get_index(struct device *dev,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct gpio_desc *__must_check
+__devm_gpiod_get_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct gpio_desc *__must_check
+__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
+ unsigned int index, enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct gpio_descs *__must_check
+devm_gpiod_get_array(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct gpio_descs *__must_check
+devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
+static inline void devm_gpiod_put_array(struct device *dev,
+ struct gpio_descs *descs)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
+
+static inline int gpiod_get_direction(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -ENOSYS;
+}
+static inline int gpiod_direction_input(struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -ENOSYS;
+}
+static inline int gpiod_direction_output(struct gpio_desc *desc, int value)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -ENOSYS;
+}
+static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -ENOSYS;
+}
+
+
+static inline int gpiod_get_value(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return 0;
+}
+static inline void gpiod_set_value(struct gpio_desc *desc, int value)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+static inline void gpiod_set_array(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return 0;
+}
+static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+static inline void gpiod_set_raw_array(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
+static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return 0;
+}
+static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+static inline void gpiod_set_array_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return 0;
+}
+static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
+ int value)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+static inline void gpiod_set_raw_array_cansleep(unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
+static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -ENOSYS;
+}
+
+static inline int gpiod_is_active_low(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return 0;
+}
+static inline int gpiod_cansleep(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return 0;
+}
+
+static inline int gpiod_to_irq(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
+{
+ return ERR_PTR(-EINVAL);
+}
+static inline int desc_to_gpio(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+/* Child properties interface */
+struct fwnode_handle;
+
+static inline struct gpio_desc *fwnode_get_named_gpiod(
+ struct fwnode_handle *fwnode, const char *propname)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct gpio_desc *devm_get_gpiod_from_child(
+ struct device *dev, const char *con_id, struct fwnode_handle *child)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+#endif /* CONFIG_GPIOLIB */
+
+/*
+ * Vararg-hacks! This is done to transition the kernel to always pass
+ * the options flags argument to the below functions. During a transition
+ * phase these vararg macros make both old-and-newstyle code compile,
+ * but when all calls to the elder API are removed, these should go away
+ * and the __gpiod_get() etc functions above be renamed just gpiod_get()
+ * etc.
+ */
+#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
+#define gpiod_get(varargs...) __gpiod_get(varargs, GPIOD_ASIS)
+#define __gpiod_get_index(dev, con_id, index, flags, ...) \
+ __gpiod_get_index(dev, con_id, index, flags)
+#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, GPIOD_ASIS)
+#define __gpiod_get_optional(dev, con_id, flags, ...) \
+ __gpiod_get_optional(dev, con_id, flags)
+#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, GPIOD_ASIS)
+#define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \
+ __gpiod_get_index_optional(dev, con_id, index, flags)
+#define gpiod_get_index_optional(varargs...) \
+ __gpiod_get_index_optional(varargs, GPIOD_ASIS)
+#define __devm_gpiod_get(dev, con_id, flags, ...) \
+ __devm_gpiod_get(dev, con_id, flags)
+#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, GPIOD_ASIS)
+#define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \
+ __devm_gpiod_get_index(dev, con_id, index, flags)
+#define devm_gpiod_get_index(varargs...) \
+ __devm_gpiod_get_index(varargs, GPIOD_ASIS)
+#define __devm_gpiod_get_optional(dev, con_id, flags, ...) \
+ __devm_gpiod_get_optional(dev, con_id, flags)
+#define devm_gpiod_get_optional(varargs...) \
+ __devm_gpiod_get_optional(varargs, GPIOD_ASIS)
+#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \
+ __devm_gpiod_get_index_optional(dev, con_id, index, flags)
+#define devm_gpiod_get_index_optional(varargs...) \
+ __devm_gpiod_get_index_optional(varargs, GPIOD_ASIS)
+
+#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
+
+int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
+int gpiod_export_link(struct device *dev, const char *name,
+ struct gpio_desc *desc);
+int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value);
+void gpiod_unexport(struct gpio_desc *desc);
+
+#else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
+
+static inline int gpiod_export(struct gpio_desc *desc,
+ bool direction_may_change)
+{
+ return -ENOSYS;
+}
+
+static inline int gpiod_export_link(struct device *dev, const char *name,
+ struct gpio_desc *desc)
+{
+ return -ENOSYS;
+}
+
+static inline int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
+{
+ return -ENOSYS;
+}
+
+static inline void gpiod_unexport(struct gpio_desc *desc)
+{
+}
+
+#endif /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
+
+#endif
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
new file mode 100644
index 000000000..f1b36593e
--- /dev/null
+++ b/include/linux/gpio/driver.h
@@ -0,0 +1,239 @@
+#ifndef __LINUX_GPIO_DRIVER_H
+#define __LINUX_GPIO_DRIVER_H
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/pinctrl/pinctrl.h>
+
+struct device;
+struct gpio_desc;
+struct of_phandle_args;
+struct device_node;
+struct seq_file;
+
+#ifdef CONFIG_GPIOLIB
+
+/**
+ * struct gpio_chip - abstract a GPIO controller
+ * @label: for diagnostics
+ * @dev: optional device providing the GPIOs
+ * @owner: helps prevent removal of modules exporting active GPIOs
+ * @list: links gpio_chips together for traversal
+ * @request: optional hook for chip-specific activation, such as
+ * enabling module power and clock; may sleep
+ * @free: optional hook for chip-specific deactivation, such as
+ * disabling module power and clock; may sleep
+ * @get_direction: returns direction for signal "offset", 0=out, 1=in,
+ * (same as GPIOF_DIR_XXX), or negative error
+ * @direction_input: configures signal "offset" as input, or returns error
+ * @direction_output: configures signal "offset" as output, or returns error
+ * @get: returns value for signal "offset"; for output signals this
+ * returns either the value actually sensed, or zero
+ * @set: assigns output value for signal "offset"
+ * @set_multiple: assigns output values for multiple signals defined by "mask"
+ * @set_debounce: optional hook for setting debounce time for specified gpio in
+ * interrupt triggered gpio chips
+ * @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
+ * implementation may not sleep
+ * @dbg_show: optional routine to show contents in debugfs; default code
+ * will be used when this is omitted, but custom code can show extra
+ * state (such as pullup/pulldown configuration).
+ * @base: identifies the first GPIO number handled by this chip; or, if
+ * negative during registration, requests dynamic ID allocation.
+ * @ngpio: the number of GPIOs handled by this controller; the last GPIO
+ * handled is (base + ngpio - 1).
+ * @desc: array of ngpio descriptors. Private.
+ * @names: if set, must be an array of strings to use as alternative
+ * names for the GPIOs in this chip. Any entry in the array
+ * may be NULL if there is no alias for the GPIO, however the
+ * array must be @ngpio entries long. A name can include a single printk
+ * format specifier for an unsigned int. It is substituted by the actual
+ * number of the gpio.
+ * @can_sleep: flag must be set iff get()/set() methods sleep, as they
+ * must while accessing GPIO expander chips over I2C or SPI. This
+ * implies that if the chip supports IRQs, these IRQs need to be threaded
+ * as the chip access may sleep when e.g. reading out the IRQ status
+ * registers.
+ * @exported: flags if the gpiochip is exported for use from sysfs. Private.
+ * @irq_not_threaded: flag must be set if @can_sleep is set but the
+ * IRQs don't need to be threaded
+ *
+ * A gpio_chip can help platforms abstract various sources of GPIOs so
+ * they can all be accessed through a common programing interface.
+ * Example sources would be SOC controllers, FPGAs, multifunction
+ * chips, dedicated GPIO expanders, and so on.
+ *
+ * Each chip controls a number of signals, identified in method calls
+ * by "offset" values in the range 0..(@ngpio - 1). When those signals
+ * are referenced through calls like gpio_get_value(gpio), the offset
+ * is calculated by subtracting @base from the gpio number.
+ */
+struct gpio_chip {
+ const char *label;
+ struct device *dev;
+ struct module *owner;
+ struct list_head list;
+
+ int (*request)(struct gpio_chip *chip,
+ unsigned offset);
+ void (*free)(struct gpio_chip *chip,
+ unsigned offset);
+ int (*get_direction)(struct gpio_chip *chip,
+ unsigned offset);
+ int (*direction_input)(struct gpio_chip *chip,
+ unsigned offset);
+ int (*direction_output)(struct gpio_chip *chip,
+ unsigned offset, int value);
+ int (*get)(struct gpio_chip *chip,
+ unsigned offset);
+ void (*set)(struct gpio_chip *chip,
+ unsigned offset, int value);
+ void (*set_multiple)(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits);
+ int (*set_debounce)(struct gpio_chip *chip,
+ unsigned offset,
+ unsigned debounce);
+
+ int (*to_irq)(struct gpio_chip *chip,
+ unsigned offset);
+
+ void (*dbg_show)(struct seq_file *s,
+ struct gpio_chip *chip);
+ int base;
+ u16 ngpio;
+ struct gpio_desc *desc;
+ const char *const *names;
+ bool can_sleep;
+ bool irq_not_threaded;
+ bool exported;
+
+#ifdef CONFIG_GPIOLIB_IRQCHIP
+ /*
+ * With CONFIG_GPIOLIB_IRQCHIP we get an irqchip inside the gpiolib
+ * to handle IRQs for most practical cases.
+ */
+ struct irq_chip *irqchip;
+ struct irq_domain *irqdomain;
+ unsigned int irq_base;
+ irq_flow_handler_t irq_handler;
+ unsigned int irq_default_type;
+#endif
+
+#if defined(CONFIG_OF_GPIO)
+ /*
+ * If CONFIG_OF is enabled, then all GPIO controllers described in the
+ * device tree automatically may have an OF translation
+ */
+ struct device_node *of_node;
+ int of_gpio_n_cells;
+ int (*of_xlate)(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec, u32 *flags);
+#endif
+#ifdef CONFIG_PINCTRL
+ /*
+ * If CONFIG_PINCTRL is enabled, then gpio controllers can optionally
+ * describe the actual pin range which they serve in an SoC. This
+ * information would be used by pinctrl subsystem to configure
+ * corresponding pins for gpio usage.
+ */
+ struct list_head pin_ranges;
+#endif
+};
+
+extern const char *gpiochip_is_requested(struct gpio_chip *chip,
+ unsigned offset);
+
+/* add/remove chips */
+extern int gpiochip_add(struct gpio_chip *chip);
+extern void gpiochip_remove(struct gpio_chip *chip);
+extern struct gpio_chip *gpiochip_find(void *data,
+ int (*match)(struct gpio_chip *chip, void *data));
+
+/* lock/unlock as IRQ */
+int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
+void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
+
+struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
+
+#ifdef CONFIG_GPIOLIB_IRQCHIP
+
+void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ int parent_irq,
+ irq_flow_handler_t parent_handler);
+
+int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type);
+
+#endif /* CONFIG_GPIOLIB_IRQCHIP */
+
+#ifdef CONFIG_PINCTRL
+
+/**
+ * struct gpio_pin_range - pin range controlled by a gpio chip
+ * @head: list for maintaining set of pin ranges, used internally
+ * @pctldev: pinctrl device which handles corresponding pins
+ * @range: actual range of pins controlled by a gpio controller
+ */
+
+struct gpio_pin_range {
+ struct list_head node;
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_gpio_range range;
+};
+
+int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+ unsigned int gpio_offset, unsigned int pin_offset,
+ unsigned int npins);
+int gpiochip_add_pingroup_range(struct gpio_chip *chip,
+ struct pinctrl_dev *pctldev,
+ unsigned int gpio_offset, const char *pin_group);
+void gpiochip_remove_pin_ranges(struct gpio_chip *chip);
+
+#else
+
+static inline int
+gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name,
+ unsigned int gpio_offset, unsigned int pin_offset,
+ unsigned int npins)
+{
+ return 0;
+}
+static inline int
+gpiochip_add_pingroup_range(struct gpio_chip *chip,
+ struct pinctrl_dev *pctldev,
+ unsigned int gpio_offset, const char *pin_group)
+{
+ return 0;
+}
+
+static inline void
+gpiochip_remove_pin_ranges(struct gpio_chip *chip)
+{
+}
+
+#endif /* CONFIG_PINCTRL */
+
+struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
+ const char *label);
+void gpiochip_free_own_desc(struct gpio_desc *desc);
+
+#else /* CONFIG_GPIOLIB */
+
+static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+ return ERR_PTR(-ENODEV);
+}
+
+#endif /* CONFIG_GPIOLIB */
+
+#endif
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
new file mode 100644
index 000000000..e2706140e
--- /dev/null
+++ b/include/linux/gpio/machine.h
@@ -0,0 +1,61 @@
+#ifndef __LINUX_GPIO_MACHINE_H
+#define __LINUX_GPIO_MACHINE_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+enum gpio_lookup_flags {
+ GPIO_ACTIVE_HIGH = (0 << 0),
+ GPIO_ACTIVE_LOW = (1 << 0),
+ GPIO_OPEN_DRAIN = (1 << 1),
+ GPIO_OPEN_SOURCE = (1 << 2),
+};
+
+/**
+ * struct gpiod_lookup - lookup table
+ * @chip_label: name of the chip the GPIO belongs to
+ * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO
+ * @con_id: name of the GPIO from the device's point of view
+ * @idx: index of the GPIO in case several GPIOs share the same name
+ * @flags: mask of GPIO_* values
+ *
+ * gpiod_lookup is a lookup table for associating GPIOs to specific devices and
+ * functions using platform data.
+ */
+struct gpiod_lookup {
+ const char *chip_label;
+ u16 chip_hwnum;
+ const char *con_id;
+ unsigned int idx;
+ enum gpio_lookup_flags flags;
+};
+
+struct gpiod_lookup_table {
+ struct list_head list;
+ const char *dev_id;
+ struct gpiod_lookup table[];
+};
+
+/*
+ * Simple definition of a single GPIO under a con_id
+ */
+#define GPIO_LOOKUP(_chip_label, _chip_hwnum, _con_id, _flags) \
+ GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, 0, _flags)
+
+/*
+ * Use this macro if you need to have several GPIOs under the same con_id.
+ * Each GPIO needs to use a different index and can be accessed using
+ * gpiod_get_index()
+ */
+#define GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, _idx, _flags) \
+{ \
+ .chip_label = _chip_label, \
+ .chip_hwnum = _chip_hwnum, \
+ .con_id = _con_id, \
+ .idx = _idx, \
+ .flags = _flags, \
+}
+
+void gpiod_add_lookup_table(struct gpiod_lookup_table *table);
+
+#endif /* __LINUX_GPIO_MACHINE_H */
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
new file mode 100644
index 000000000..ee2d8c6f9
--- /dev/null
+++ b/include/linux/gpio_keys.h
@@ -0,0 +1,58 @@
+#ifndef _GPIO_KEYS_H
+#define _GPIO_KEYS_H
+
+struct device;
+struct gpio_desc;
+
+/**
+ * struct gpio_keys_button - configuration parameters
+ * @code: input event code (KEY_*, SW_*)
+ * @gpio: %-1 if this key does not support gpio
+ * @active_low: %true indicates that button is considered
+ * depressed when gpio is low
+ * @desc: label that will be attached to button's gpio
+ * @type: input event type (%EV_KEY, %EV_SW, %EV_ABS)
+ * @wakeup: configure the button as a wake-up source
+ * @debounce_interval: debounce ticks interval in msecs
+ * @can_disable: %true indicates that userspace is allowed to
+ * disable button via sysfs
+ * @value: axis value for %EV_ABS
+ * @irq: Irq number in case of interrupt keys
+ * @gpiod: GPIO descriptor
+ */
+struct gpio_keys_button {
+ unsigned int code;
+ int gpio;
+ int active_low;
+ const char *desc;
+ unsigned int type;
+ int wakeup;
+ int debounce_interval;
+ bool can_disable;
+ int value;
+ unsigned int irq;
+ struct gpio_desc *gpiod;
+};
+
+/**
+ * struct gpio_keys_platform_data - platform data for gpio_keys driver
+ * @buttons: pointer to array of &gpio_keys_button structures
+ * describing buttons attached to the device
+ * @nbuttons: number of elements in @buttons array
+ * @poll_interval: polling interval in msecs - for polling driver only
+ * @rep: enable input subsystem auto repeat
+ * @enable: platform hook for enabling the device
+ * @disable: platform hook for disabling the device
+ * @name: input device name
+ */
+struct gpio_keys_platform_data {
+ struct gpio_keys_button *buttons;
+ int nbuttons;
+ unsigned int poll_interval;
+ unsigned int rep:1;
+ int (*enable)(struct device *dev);
+ void (*disable)(struct device *dev);
+ const char *name;
+};
+
+#endif
diff --git a/include/linux/gpio_mouse.h b/include/linux/gpio_mouse.h
new file mode 100644
index 000000000..44ed7aa14
--- /dev/null
+++ b/include/linux/gpio_mouse.h
@@ -0,0 +1,61 @@
+/*
+ * Driver for simulating a mouse on GPIO lines.
+ *
+ * Copyright (C) 2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _GPIO_MOUSE_H
+#define _GPIO_MOUSE_H
+
+#define GPIO_MOUSE_POLARITY_ACT_HIGH 0x00
+#define GPIO_MOUSE_POLARITY_ACT_LOW 0x01
+
+#define GPIO_MOUSE_PIN_UP 0
+#define GPIO_MOUSE_PIN_DOWN 1
+#define GPIO_MOUSE_PIN_LEFT 2
+#define GPIO_MOUSE_PIN_RIGHT 3
+#define GPIO_MOUSE_PIN_BLEFT 4
+#define GPIO_MOUSE_PIN_BMIDDLE 5
+#define GPIO_MOUSE_PIN_BRIGHT 6
+#define GPIO_MOUSE_PIN_MAX 7
+
+/**
+ * struct gpio_mouse_platform_data
+ * @scan_ms: integer in ms specifying the scan periode.
+ * @polarity: Pin polarity, active high or low.
+ * @up: GPIO line for up value.
+ * @down: GPIO line for down value.
+ * @left: GPIO line for left value.
+ * @right: GPIO line for right value.
+ * @bleft: GPIO line for left button.
+ * @bmiddle: GPIO line for middle button.
+ * @bright: GPIO line for right button.
+ *
+ * This struct must be added to the platform_device in the board code.
+ * It is used by the gpio_mouse driver to setup GPIO lines and to
+ * calculate mouse movement.
+ */
+struct gpio_mouse_platform_data {
+ int scan_ms;
+ int polarity;
+
+ union {
+ struct {
+ int up;
+ int down;
+ int left;
+ int right;
+
+ int bleft;
+ int bmiddle;
+ int bright;
+ };
+ int pins[GPIO_MOUSE_PIN_MAX];
+ };
+};
+
+#endif /* _GPIO_MOUSE_H */
diff --git a/include/linux/gsmmux.h b/include/linux/gsmmux.h
new file mode 100644
index 000000000..c25e9477f
--- /dev/null
+++ b/include/linux/gsmmux.h
@@ -0,0 +1,36 @@
+#ifndef _LINUX_GSMMUX_H
+#define _LINUX_GSMMUX_H
+
+struct gsm_config
+{
+ unsigned int adaption;
+ unsigned int encapsulation;
+ unsigned int initiator;
+ unsigned int t1;
+ unsigned int t2;
+ unsigned int t3;
+ unsigned int n2;
+ unsigned int mru;
+ unsigned int mtu;
+ unsigned int k;
+ unsigned int i;
+ unsigned int unused[8]; /* Padding for expansion without
+ breaking stuff */
+};
+
+#define GSMIOC_GETCONF _IOR('G', 0, struct gsm_config)
+#define GSMIOC_SETCONF _IOW('G', 1, struct gsm_config)
+
+struct gsm_netconfig {
+ unsigned int adaption; /* Adaption to use in network mode */
+ unsigned short protocol;/* Protocol to use - only ETH_P_IP supported */
+ unsigned short unused2;
+ char if_name[IFNAMSIZ]; /* interface name format string */
+ __u8 unused[28]; /* For future use */
+};
+
+#define GSMIOC_ENABLE_NET _IOW('G', 2, struct gsm_netconfig)
+#define GSMIOC_DISABLE_NET _IO('G', 3)
+
+
+#endif
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
new file mode 100644
index 000000000..f4af03404
--- /dev/null
+++ b/include/linux/hardirq.h
@@ -0,0 +1,82 @@
+#ifndef LINUX_HARDIRQ_H
+#define LINUX_HARDIRQ_H
+
+#include <linux/preempt_mask.h>
+#include <linux/lockdep.h>
+#include <linux/ftrace_irq.h>
+#include <linux/vtime.h>
+#include <asm/hardirq.h>
+
+
+extern void synchronize_irq(unsigned int irq);
+extern bool synchronize_hardirq(unsigned int irq);
+
+#if defined(CONFIG_TINY_RCU)
+
+static inline void rcu_nmi_enter(void)
+{
+}
+
+static inline void rcu_nmi_exit(void)
+{
+}
+
+#else
+extern void rcu_nmi_enter(void);
+extern void rcu_nmi_exit(void);
+#endif
+
+/*
+ * It is safe to do non-atomic ops on ->hardirq_context,
+ * because NMI handlers may not preempt and the ops are
+ * always balanced, so the interrupted value of ->hardirq_context
+ * will always be restored.
+ */
+#define __irq_enter() \
+ do { \
+ account_irq_enter_time(current); \
+ preempt_count_add(HARDIRQ_OFFSET); \
+ trace_hardirq_enter(); \
+ } while (0)
+
+/*
+ * Enter irq context (on NO_HZ, update jiffies):
+ */
+extern void irq_enter(void);
+
+/*
+ * Exit irq context without processing softirqs:
+ */
+#define __irq_exit() \
+ do { \
+ trace_hardirq_exit(); \
+ account_irq_exit_time(current); \
+ preempt_count_sub(HARDIRQ_OFFSET); \
+ } while (0)
+
+/*
+ * Exit irq context and process softirqs if needed:
+ */
+extern void irq_exit(void);
+
+#define nmi_enter() \
+ do { \
+ lockdep_off(); \
+ ftrace_nmi_enter(); \
+ BUG_ON(in_nmi()); \
+ preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
+ rcu_nmi_enter(); \
+ trace_hardirq_enter(); \
+ } while (0)
+
+#define nmi_exit() \
+ do { \
+ trace_hardirq_exit(); \
+ rcu_nmi_exit(); \
+ BUG_ON(!in_nmi()); \
+ preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
+ ftrace_nmi_exit(); \
+ lockdep_on(); \
+ } while (0)
+
+#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hash.h b/include/linux/hash.h
new file mode 100644
index 000000000..1afde47e1
--- /dev/null
+++ b/include/linux/hash.h
@@ -0,0 +1,86 @@
+#ifndef _LINUX_HASH_H
+#define _LINUX_HASH_H
+/* Fast hashing routine for ints, longs and pointers.
+ (C) 2002 Nadia Yvette Chambers, IBM */
+
+/*
+ * Knuth recommends primes in approximately golden ratio to the maximum
+ * integer representable by a machine word for multiplicative hashing.
+ * Chuck Lever verified the effectiveness of this technique:
+ * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
+ *
+ * These primes are chosen to be bit-sparse, that is operations on
+ * them can use shifts and additions instead of multiplications for
+ * machines where multiplications are slow.
+ */
+
+#include <asm/types.h>
+#include <linux/compiler.h>
+
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
+/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
+#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
+
+#if BITS_PER_LONG == 32
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32
+#define hash_long(val, bits) hash_32(val, bits)
+#elif BITS_PER_LONG == 64
+#define hash_long(val, bits) hash_64(val, bits)
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64
+#else
+#error Wordsize not 32 or 64
+#endif
+
+static __always_inline u64 hash_64(u64 val, unsigned int bits)
+{
+ u64 hash = val;
+
+#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+ hash = hash * GOLDEN_RATIO_PRIME_64;
+#else
+ /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
+ u64 n = hash;
+ n <<= 18;
+ hash -= n;
+ n <<= 33;
+ hash -= n;
+ n <<= 3;
+ hash += n;
+ n <<= 3;
+ hash -= n;
+ n <<= 4;
+ hash += n;
+ n <<= 2;
+ hash += n;
+#endif
+
+ /* High bits are more random, so use them. */
+ return hash >> (64 - bits);
+}
+
+static inline u32 hash_32(u32 val, unsigned int bits)
+{
+ /* On some cpus multiply is faster, on others gcc will do shifts */
+ u32 hash = val * GOLDEN_RATIO_PRIME_32;
+
+ /* High bits are more random, so use them. */
+ return hash >> (32 - bits);
+}
+
+static inline unsigned long hash_ptr(const void *ptr, unsigned int bits)
+{
+ return hash_long((unsigned long)ptr, bits);
+}
+
+static inline u32 hash32_ptr(const void *ptr)
+{
+ unsigned long val = (unsigned long)ptr;
+
+#if BITS_PER_LONG == 64
+ val ^= (val >> 32);
+#endif
+ return (u32)val;
+}
+
+#endif /* _LINUX_HASH_H */
diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h
new file mode 100644
index 000000000..519b6e2d7
--- /dev/null
+++ b/include/linux/hashtable.h
@@ -0,0 +1,205 @@
+/*
+ * Statically sized hash table implementation
+ * (C) 2012 Sasha Levin <levinsasha928@gmail.com>
+ */
+
+#ifndef _LINUX_HASHTABLE_H
+#define _LINUX_HASHTABLE_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/hash.h>
+#include <linux/rculist.h>
+
+#define DEFINE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)] = \
+ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+
+#define DECLARE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)]
+
+#define HASH_SIZE(name) (ARRAY_SIZE(name))
+#define HASH_BITS(name) ilog2(HASH_SIZE(name))
+
+/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
+#define hash_min(val, bits) \
+ (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
+
+static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ INIT_HLIST_HEAD(&ht[i]);
+}
+
+/**
+ * hash_init - initialize a hash table
+ * @hashtable: hashtable to be initialized
+ *
+ * Calculates the size of the hashtable from the given parameter, otherwise
+ * same as hash_init_size.
+ *
+ * This has to be a macro since HASH_BITS() will not work on pointers since
+ * it calculates the size during preprocessing.
+ */
+#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
+
+/**
+ * hash_add - add an object to a hashtable
+ * @hashtable: hashtable to add to
+ * @node: the &struct hlist_node of the object to be added
+ * @key: the key of the object to be added
+ */
+#define hash_add(hashtable, node, key) \
+ hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
+
+/**
+ * hash_add_rcu - add an object to a rcu enabled hashtable
+ * @hashtable: hashtable to add to
+ * @node: the &struct hlist_node of the object to be added
+ * @key: the key of the object to be added
+ */
+#define hash_add_rcu(hashtable, node, key) \
+ hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
+
+/**
+ * hash_hashed - check whether an object is in any hashtable
+ * @node: the &struct hlist_node of the object to be checked
+ */
+static inline bool hash_hashed(struct hlist_node *node)
+{
+ return !hlist_unhashed(node);
+}
+
+static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ if (!hlist_empty(&ht[i]))
+ return false;
+
+ return true;
+}
+
+/**
+ * hash_empty - check whether a hashtable is empty
+ * @hashtable: hashtable to check
+ *
+ * This has to be a macro since HASH_BITS() will not work on pointers since
+ * it calculates the size during preprocessing.
+ */
+#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
+
+/**
+ * hash_del - remove an object from a hashtable
+ * @node: &struct hlist_node of the object to remove
+ */
+static inline void hash_del(struct hlist_node *node)
+{
+ hlist_del_init(node);
+}
+
+/**
+ * hash_del_rcu - remove an object from a rcu enabled hashtable
+ * @node: &struct hlist_node of the object to remove
+ */
+static inline void hash_del_rcu(struct hlist_node *node)
+{
+ hlist_del_init_rcu(node);
+}
+
+/**
+ * hash_for_each - iterate over a hashtable
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each(name, bkt, obj, member) \
+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+ (bkt)++)\
+ hlist_for_each_entry(obj, &name[bkt], member)
+
+/**
+ * hash_for_each_rcu - iterate over a rcu enabled hashtable
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each_rcu(name, bkt, obj, member) \
+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+ (bkt)++)\
+ hlist_for_each_entry_rcu(obj, &name[bkt], member)
+
+/**
+ * hash_for_each_safe - iterate over a hashtable safe against removal of
+ * hash entry
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @tmp: a &struct used for temporary storage
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each_safe(name, bkt, tmp, obj, member) \
+ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
+ (bkt)++)\
+ hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
+
+/**
+ * hash_for_each_possible - iterate over all possible objects hashing to the
+ * same bucket
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ */
+#define hash_for_each_possible(name, obj, member, key) \
+ hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
+
+/**
+ * hash_for_each_possible_rcu - iterate over all possible objects hashing to the
+ * same bucket in an rcu enabled hashtable
+ * in a rcu enabled hashtable
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ */
+#define hash_for_each_possible_rcu(name, obj, member, key) \
+ hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
+ member)
+
+/**
+ * hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing
+ * to the same bucket in an rcu enabled hashtable in a rcu enabled hashtable
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ *
+ * This is the same as hash_for_each_possible_rcu() except that it does
+ * not do any RCU debugging or tracing.
+ */
+#define hash_for_each_possible_rcu_notrace(name, obj, member, key) \
+ hlist_for_each_entry_rcu_notrace(obj, \
+ &name[hash_min(key, HASH_BITS(name))], member)
+
+/**
+ * hash_for_each_possible_safe - iterate over all possible objects hashing to the
+ * same bucket safe against removals
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @tmp: a &struct used for temporary storage
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ */
+#define hash_for_each_possible_safe(name, obj, tmp, member, key) \
+ hlist_for_each_entry_safe(obj, tmp,\
+ &name[hash_min(key, HASH_BITS(name))], member)
+
+
+#endif
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
new file mode 100644
index 000000000..1acb1445e
--- /dev/null
+++ b/include/linux/hdlc.h
@@ -0,0 +1,120 @@
+/*
+ * Generic HDLC support routines for Linux
+ *
+ * Copyright (C) 1999-2005 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+#ifndef __HDLC_H
+#define __HDLC_H
+
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/hdlc/ioctl.h>
+#include <uapi/linux/hdlc.h>
+
+/* This structure is a private property of HDLC protocols.
+ Hardware drivers have no interest here */
+
+struct hdlc_proto {
+ int (*open)(struct net_device *dev);
+ void (*close)(struct net_device *dev);
+ void (*start)(struct net_device *dev); /* if open & DCD */
+ void (*stop)(struct net_device *dev); /* if open & !DCD */
+ void (*detach)(struct net_device *dev);
+ int (*ioctl)(struct net_device *dev, struct ifreq *ifr);
+ __be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev);
+ int (*netif_rx)(struct sk_buff *skb);
+ netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev);
+ struct module *module;
+ struct hdlc_proto *next; /* next protocol in the list */
+};
+
+
+/* Pointed to by netdev_priv(dev) */
+typedef struct hdlc_device {
+ /* used by HDLC layer to take control over HDLC device from hw driver*/
+ int (*attach)(struct net_device *dev,
+ unsigned short encoding, unsigned short parity);
+
+ /* hardware driver must handle this instead of dev->hard_start_xmit */
+ netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev);
+
+ /* Things below are for HDLC layer internal use only */
+ const struct hdlc_proto *proto;
+ int carrier;
+ int open;
+ spinlock_t state_lock;
+ void *state;
+ void *priv;
+} hdlc_device;
+
+
+
+/* Exported from hdlc module */
+
+/* Called by hardware driver when a user requests HDLC service */
+int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+/* Must be used by hardware driver on module startup/exit */
+#define register_hdlc_device(dev) register_netdev(dev)
+void unregister_hdlc_device(struct net_device *dev);
+
+
+void register_hdlc_protocol(struct hdlc_proto *proto);
+void unregister_hdlc_protocol(struct hdlc_proto *proto);
+
+struct net_device *alloc_hdlcdev(void *priv);
+
+static inline struct hdlc_device* dev_to_hdlc(struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+static __inline__ void debug_frame(const struct sk_buff *skb)
+{
+ int i;
+
+ for (i=0; i < skb->len; i++) {
+ if (i == 100) {
+ printk("...\n");
+ return;
+ }
+ printk(" %02X", skb->data[i]);
+ }
+ printk("\n");
+}
+
+
+/* Must be called by hardware driver when HDLC device is being opened */
+int hdlc_open(struct net_device *dev);
+/* Must be called by hardware driver when HDLC device is being closed */
+void hdlc_close(struct net_device *dev);
+/* May be used by hardware driver */
+int hdlc_change_mtu(struct net_device *dev, int new_mtu);
+/* Must be pointed to by hw driver's dev->netdev_ops->ndo_start_xmit */
+netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev);
+
+int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
+ size_t size);
+/* May be used by hardware driver to gain control over HDLC device */
+void detach_hdlc_protocol(struct net_device *dev);
+
+static __inline__ __be16 hdlc_type_trans(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ hdlc_device *hdlc = dev_to_hdlc(dev);
+
+ skb->dev = dev;
+ skb_reset_mac_header(skb);
+
+ if (hdlc->proto->type_trans)
+ return hdlc->proto->type_trans(skb, dev);
+ else
+ return htons(ETH_P_HDLC);
+}
+
+#endif /* __HDLC_H */
diff --git a/include/linux/hdlcdrv.h b/include/linux/hdlcdrv.h
new file mode 100644
index 000000000..be3be25bb
--- /dev/null
+++ b/include/linux/hdlcdrv.h
@@ -0,0 +1,275 @@
+/*
+ * hdlcdrv.h -- HDLC packet radio network driver.
+ * The Linux soundcard driver for 1200 baud and 9600 baud packet radio
+ * (C) 1996-1998 by Thomas Sailer, HB9JNX/AE4WA
+ */
+#ifndef _HDLCDRV_H
+#define _HDLCDRV_H
+
+
+#include <linux/netdevice.h>
+#include <linux/if.h>
+#include <linux/spinlock.h>
+#include <uapi/linux/hdlcdrv.h>
+
+#define HDLCDRV_MAGIC 0x5ac6e778
+#define HDLCDRV_HDLCBUFFER 32 /* should be a power of 2 for speed reasons */
+#define HDLCDRV_BITBUFFER 256 /* should be a power of 2 for speed reasons */
+#undef HDLCDRV_LOOPBACK /* define for HDLC debugging purposes */
+#define HDLCDRV_DEBUG
+
+/* maximum packet length, excluding CRC */
+#define HDLCDRV_MAXFLEN 400
+
+
+struct hdlcdrv_hdlcbuffer {
+ spinlock_t lock;
+ unsigned rd, wr;
+ unsigned short buf[HDLCDRV_HDLCBUFFER];
+};
+
+#ifdef HDLCDRV_DEBUG
+struct hdlcdrv_bitbuffer {
+ unsigned int rd;
+ unsigned int wr;
+ unsigned int shreg;
+ unsigned char buffer[HDLCDRV_BITBUFFER];
+};
+
+static inline void hdlcdrv_add_bitbuffer(struct hdlcdrv_bitbuffer *buf,
+ unsigned int bit)
+{
+ unsigned char new;
+
+ new = buf->shreg & 1;
+ buf->shreg >>= 1;
+ buf->shreg |= (!!bit) << 7;
+ if (new) {
+ buf->buffer[buf->wr] = buf->shreg;
+ buf->wr = (buf->wr+1) % sizeof(buf->buffer);
+ buf->shreg = 0x80;
+ }
+}
+
+static inline void hdlcdrv_add_bitbuffer_word(struct hdlcdrv_bitbuffer *buf,
+ unsigned int bits)
+{
+ buf->buffer[buf->wr] = bits & 0xff;
+ buf->wr = (buf->wr+1) % sizeof(buf->buffer);
+ buf->buffer[buf->wr] = (bits >> 8) & 0xff;
+ buf->wr = (buf->wr+1) % sizeof(buf->buffer);
+
+}
+#endif /* HDLCDRV_DEBUG */
+
+/* -------------------------------------------------------------------- */
+/*
+ * Information that need to be kept for each driver.
+ */
+
+struct hdlcdrv_ops {
+ /*
+ * first some informations needed by the hdlcdrv routines
+ */
+ const char *drvname;
+ const char *drvinfo;
+ /*
+ * the routines called by the hdlcdrv routines
+ */
+ int (*open)(struct net_device *);
+ int (*close)(struct net_device *);
+ int (*ioctl)(struct net_device *, struct ifreq *,
+ struct hdlcdrv_ioctl *, int);
+};
+
+struct hdlcdrv_state {
+ int magic;
+ int opened;
+
+ const struct hdlcdrv_ops *ops;
+
+ struct {
+ int bitrate;
+ } par;
+
+ struct hdlcdrv_pttoutput {
+ int dma2;
+ int seriobase;
+ int pariobase;
+ int midiiobase;
+ unsigned int flags;
+ } ptt_out;
+
+ struct hdlcdrv_channel_params ch_params;
+
+ struct hdlcdrv_hdlcrx {
+ struct hdlcdrv_hdlcbuffer hbuf;
+ unsigned long in_hdlc_rx;
+ /* 0 = sync hunt, != 0 receiving */
+ int rx_state;
+ unsigned int bitstream;
+ unsigned int bitbuf;
+ int numbits;
+ unsigned char dcd;
+
+ int len;
+ unsigned char *bp;
+ unsigned char buffer[HDLCDRV_MAXFLEN+2];
+ } hdlcrx;
+
+ struct hdlcdrv_hdlctx {
+ struct hdlcdrv_hdlcbuffer hbuf;
+ unsigned long in_hdlc_tx;
+ /*
+ * 0 = send flags
+ * 1 = send txtail (flags)
+ * 2 = send packet
+ */
+ int tx_state;
+ int numflags;
+ unsigned int bitstream;
+ unsigned char ptt;
+ int calibrate;
+ int slotcnt;
+
+ unsigned int bitbuf;
+ int numbits;
+
+ int len;
+ unsigned char *bp;
+ unsigned char buffer[HDLCDRV_MAXFLEN+2];
+ } hdlctx;
+
+#ifdef HDLCDRV_DEBUG
+ struct hdlcdrv_bitbuffer bitbuf_channel;
+ struct hdlcdrv_bitbuffer bitbuf_hdlc;
+#endif /* HDLCDRV_DEBUG */
+
+ int ptt_keyed;
+
+ /* queued skb for transmission */
+ struct sk_buff *skb;
+};
+
+
+/* -------------------------------------------------------------------- */
+
+static inline int hdlcdrv_hbuf_full(struct hdlcdrv_hdlcbuffer *hb)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&hb->lock, flags);
+ ret = !((HDLCDRV_HDLCBUFFER - 1 + hb->rd - hb->wr) % HDLCDRV_HDLCBUFFER);
+ spin_unlock_irqrestore(&hb->lock, flags);
+ return ret;
+}
+
+/* -------------------------------------------------------------------- */
+
+static inline int hdlcdrv_hbuf_empty(struct hdlcdrv_hdlcbuffer *hb)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&hb->lock, flags);
+ ret = (hb->rd == hb->wr);
+ spin_unlock_irqrestore(&hb->lock, flags);
+ return ret;
+}
+
+/* -------------------------------------------------------------------- */
+
+static inline unsigned short hdlcdrv_hbuf_get(struct hdlcdrv_hdlcbuffer *hb)
+{
+ unsigned long flags;
+ unsigned short val;
+ unsigned newr;
+
+ spin_lock_irqsave(&hb->lock, flags);
+ if (hb->rd == hb->wr)
+ val = 0;
+ else {
+ newr = (hb->rd+1) % HDLCDRV_HDLCBUFFER;
+ val = hb->buf[hb->rd];
+ hb->rd = newr;
+ }
+ spin_unlock_irqrestore(&hb->lock, flags);
+ return val;
+}
+
+/* -------------------------------------------------------------------- */
+
+static inline void hdlcdrv_hbuf_put(struct hdlcdrv_hdlcbuffer *hb,
+ unsigned short val)
+{
+ unsigned newp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hb->lock, flags);
+ newp = (hb->wr+1) % HDLCDRV_HDLCBUFFER;
+ if (newp != hb->rd) {
+ hb->buf[hb->wr] = val & 0xffff;
+ hb->wr = newp;
+ }
+ spin_unlock_irqrestore(&hb->lock, flags);
+}
+
+/* -------------------------------------------------------------------- */
+
+static inline void hdlcdrv_putbits(struct hdlcdrv_state *s, unsigned int bits)
+{
+ hdlcdrv_hbuf_put(&s->hdlcrx.hbuf, bits);
+}
+
+static inline unsigned int hdlcdrv_getbits(struct hdlcdrv_state *s)
+{
+ unsigned int ret;
+
+ if (hdlcdrv_hbuf_empty(&s->hdlctx.hbuf)) {
+ if (s->hdlctx.calibrate > 0)
+ s->hdlctx.calibrate--;
+ else
+ s->hdlctx.ptt = 0;
+ ret = 0;
+ } else
+ ret = hdlcdrv_hbuf_get(&s->hdlctx.hbuf);
+#ifdef HDLCDRV_LOOPBACK
+ hdlcdrv_hbuf_put(&s->hdlcrx.hbuf, ret);
+#endif /* HDLCDRV_LOOPBACK */
+ return ret;
+}
+
+static inline void hdlcdrv_channelbit(struct hdlcdrv_state *s, unsigned int bit)
+{
+#ifdef HDLCDRV_DEBUG
+ hdlcdrv_add_bitbuffer(&s->bitbuf_channel, bit);
+#endif /* HDLCDRV_DEBUG */
+}
+
+static inline void hdlcdrv_setdcd(struct hdlcdrv_state *s, int dcd)
+{
+ s->hdlcrx.dcd = !!dcd;
+}
+
+static inline int hdlcdrv_ptt(struct hdlcdrv_state *s)
+{
+ return s->hdlctx.ptt || (s->hdlctx.calibrate > 0);
+}
+
+/* -------------------------------------------------------------------- */
+
+void hdlcdrv_receiver(struct net_device *, struct hdlcdrv_state *);
+void hdlcdrv_transmitter(struct net_device *, struct hdlcdrv_state *);
+void hdlcdrv_arbitrate(struct net_device *, struct hdlcdrv_state *);
+struct net_device *hdlcdrv_register(const struct hdlcdrv_ops *ops,
+ unsigned int privsize, const char *ifname,
+ unsigned int baseaddr, unsigned int irq,
+ unsigned int dma);
+void hdlcdrv_unregister(struct net_device *dev);
+
+/* -------------------------------------------------------------------- */
+
+
+
+#endif /* _HDLCDRV_H */
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
new file mode 100644
index 000000000..e9744202f
--- /dev/null
+++ b/include/linux/hdmi.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __LINUX_HDMI_H_
+#define __LINUX_HDMI_H_
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+enum hdmi_infoframe_type {
+ HDMI_INFOFRAME_TYPE_VENDOR = 0x81,
+ HDMI_INFOFRAME_TYPE_AVI = 0x82,
+ HDMI_INFOFRAME_TYPE_SPD = 0x83,
+ HDMI_INFOFRAME_TYPE_AUDIO = 0x84,
+};
+
+#define HDMI_IEEE_OUI 0x000c03
+#define HDMI_INFOFRAME_HEADER_SIZE 4
+#define HDMI_AVI_INFOFRAME_SIZE 13
+#define HDMI_SPD_INFOFRAME_SIZE 25
+#define HDMI_AUDIO_INFOFRAME_SIZE 10
+
+#define HDMI_INFOFRAME_SIZE(type) \
+ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
+
+struct hdmi_any_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+};
+
+enum hdmi_colorspace {
+ HDMI_COLORSPACE_RGB,
+ HDMI_COLORSPACE_YUV422,
+ HDMI_COLORSPACE_YUV444,
+ HDMI_COLORSPACE_YUV420,
+ HDMI_COLORSPACE_RESERVED4,
+ HDMI_COLORSPACE_RESERVED5,
+ HDMI_COLORSPACE_RESERVED6,
+ HDMI_COLORSPACE_IDO_DEFINED,
+};
+
+enum hdmi_scan_mode {
+ HDMI_SCAN_MODE_NONE,
+ HDMI_SCAN_MODE_OVERSCAN,
+ HDMI_SCAN_MODE_UNDERSCAN,
+ HDMI_SCAN_MODE_RESERVED,
+};
+
+enum hdmi_colorimetry {
+ HDMI_COLORIMETRY_NONE,
+ HDMI_COLORIMETRY_ITU_601,
+ HDMI_COLORIMETRY_ITU_709,
+ HDMI_COLORIMETRY_EXTENDED,
+};
+
+enum hdmi_picture_aspect {
+ HDMI_PICTURE_ASPECT_NONE,
+ HDMI_PICTURE_ASPECT_4_3,
+ HDMI_PICTURE_ASPECT_16_9,
+ HDMI_PICTURE_ASPECT_RESERVED,
+};
+
+enum hdmi_active_aspect {
+ HDMI_ACTIVE_ASPECT_16_9_TOP = 2,
+ HDMI_ACTIVE_ASPECT_14_9_TOP = 3,
+ HDMI_ACTIVE_ASPECT_16_9_CENTER = 4,
+ HDMI_ACTIVE_ASPECT_PICTURE = 8,
+ HDMI_ACTIVE_ASPECT_4_3 = 9,
+ HDMI_ACTIVE_ASPECT_16_9 = 10,
+ HDMI_ACTIVE_ASPECT_14_9 = 11,
+ HDMI_ACTIVE_ASPECT_4_3_SP_14_9 = 13,
+ HDMI_ACTIVE_ASPECT_16_9_SP_14_9 = 14,
+ HDMI_ACTIVE_ASPECT_16_9_SP_4_3 = 15,
+};
+
+enum hdmi_extended_colorimetry {
+ HDMI_EXTENDED_COLORIMETRY_XV_YCC_601,
+ HDMI_EXTENDED_COLORIMETRY_XV_YCC_709,
+ HDMI_EXTENDED_COLORIMETRY_S_YCC_601,
+ HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601,
+ HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB,
+
+ /* The following EC values are only defined in CEA-861-F. */
+ HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM,
+ HDMI_EXTENDED_COLORIMETRY_BT2020,
+ HDMI_EXTENDED_COLORIMETRY_RESERVED,
+};
+
+enum hdmi_quantization_range {
+ HDMI_QUANTIZATION_RANGE_DEFAULT,
+ HDMI_QUANTIZATION_RANGE_LIMITED,
+ HDMI_QUANTIZATION_RANGE_FULL,
+ HDMI_QUANTIZATION_RANGE_RESERVED,
+};
+
+/* non-uniform picture scaling */
+enum hdmi_nups {
+ HDMI_NUPS_UNKNOWN,
+ HDMI_NUPS_HORIZONTAL,
+ HDMI_NUPS_VERTICAL,
+ HDMI_NUPS_BOTH,
+};
+
+enum hdmi_ycc_quantization_range {
+ HDMI_YCC_QUANTIZATION_RANGE_LIMITED,
+ HDMI_YCC_QUANTIZATION_RANGE_FULL,
+};
+
+enum hdmi_content_type {
+ HDMI_CONTENT_TYPE_GRAPHICS,
+ HDMI_CONTENT_TYPE_PHOTO,
+ HDMI_CONTENT_TYPE_CINEMA,
+ HDMI_CONTENT_TYPE_GAME,
+};
+
+struct hdmi_avi_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ enum hdmi_colorspace colorspace;
+ enum hdmi_scan_mode scan_mode;
+ enum hdmi_colorimetry colorimetry;
+ enum hdmi_picture_aspect picture_aspect;
+ enum hdmi_active_aspect active_aspect;
+ bool itc;
+ enum hdmi_extended_colorimetry extended_colorimetry;
+ enum hdmi_quantization_range quantization_range;
+ enum hdmi_nups nups;
+ unsigned char video_code;
+ enum hdmi_ycc_quantization_range ycc_quantization_range;
+ enum hdmi_content_type content_type;
+ unsigned char pixel_repeat;
+ unsigned short top_bar;
+ unsigned short bottom_bar;
+ unsigned short left_bar;
+ unsigned short right_bar;
+};
+
+int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
+ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
+ size_t size);
+
+enum hdmi_spd_sdi {
+ HDMI_SPD_SDI_UNKNOWN,
+ HDMI_SPD_SDI_DSTB,
+ HDMI_SPD_SDI_DVDP,
+ HDMI_SPD_SDI_DVHS,
+ HDMI_SPD_SDI_HDDVR,
+ HDMI_SPD_SDI_DVC,
+ HDMI_SPD_SDI_DSC,
+ HDMI_SPD_SDI_VCD,
+ HDMI_SPD_SDI_GAME,
+ HDMI_SPD_SDI_PC,
+ HDMI_SPD_SDI_BD,
+ HDMI_SPD_SDI_SACD,
+ HDMI_SPD_SDI_HDDVD,
+ HDMI_SPD_SDI_PMP,
+};
+
+struct hdmi_spd_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ char vendor[8];
+ char product[16];
+ enum hdmi_spd_sdi sdi;
+};
+
+int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
+ const char *vendor, const char *product);
+ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
+ size_t size);
+
+enum hdmi_audio_coding_type {
+ HDMI_AUDIO_CODING_TYPE_STREAM,
+ HDMI_AUDIO_CODING_TYPE_PCM,
+ HDMI_AUDIO_CODING_TYPE_AC3,
+ HDMI_AUDIO_CODING_TYPE_MPEG1,
+ HDMI_AUDIO_CODING_TYPE_MP3,
+ HDMI_AUDIO_CODING_TYPE_MPEG2,
+ HDMI_AUDIO_CODING_TYPE_AAC_LC,
+ HDMI_AUDIO_CODING_TYPE_DTS,
+ HDMI_AUDIO_CODING_TYPE_ATRAC,
+ HDMI_AUDIO_CODING_TYPE_DSD,
+ HDMI_AUDIO_CODING_TYPE_EAC3,
+ HDMI_AUDIO_CODING_TYPE_DTS_HD,
+ HDMI_AUDIO_CODING_TYPE_MLP,
+ HDMI_AUDIO_CODING_TYPE_DST,
+ HDMI_AUDIO_CODING_TYPE_WMA_PRO,
+ HDMI_AUDIO_CODING_TYPE_CXT,
+};
+
+enum hdmi_audio_sample_size {
+ HDMI_AUDIO_SAMPLE_SIZE_STREAM,
+ HDMI_AUDIO_SAMPLE_SIZE_16,
+ HDMI_AUDIO_SAMPLE_SIZE_20,
+ HDMI_AUDIO_SAMPLE_SIZE_24,
+};
+
+enum hdmi_audio_sample_frequency {
+ HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_32000,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_44100,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_48000,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_88200,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_96000,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_176400,
+ HDMI_AUDIO_SAMPLE_FREQUENCY_192000,
+};
+
+enum hdmi_audio_coding_type_ext {
+ /* Refer to Audio Coding Type (CT) field in Data Byte 1 */
+ HDMI_AUDIO_CODING_TYPE_EXT_CT,
+
+ /*
+ * The next three CXT values are defined in CEA-861-E only.
+ * They do not exist in older versions, and in CEA-861-F they are
+ * defined as 'Not in use'.
+ */
+ HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC,
+ HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND,
+
+ /* The following CXT values are only defined in CEA-861-F. */
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC,
+ HDMI_AUDIO_CODING_TYPE_EXT_DRA,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND,
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND = 10,
+};
+
+struct hdmi_audio_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ unsigned char channels;
+ enum hdmi_audio_coding_type coding_type;
+ enum hdmi_audio_sample_size sample_size;
+ enum hdmi_audio_sample_frequency sample_frequency;
+ enum hdmi_audio_coding_type_ext coding_type_ext;
+ unsigned char channel_allocation;
+ unsigned char level_shift_value;
+ bool downmix_inhibit;
+
+};
+
+int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame);
+ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size);
+
+enum hdmi_3d_structure {
+ HDMI_3D_STRUCTURE_INVALID = -1,
+ HDMI_3D_STRUCTURE_FRAME_PACKING = 0,
+ HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE,
+ HDMI_3D_STRUCTURE_LINE_ALTERNATIVE,
+ HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL,
+ HDMI_3D_STRUCTURE_L_DEPTH,
+ HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH,
+ HDMI_3D_STRUCTURE_TOP_AND_BOTTOM,
+ HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF = 8,
+};
+
+
+struct hdmi_vendor_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ unsigned int oui;
+ u8 vic;
+ enum hdmi_3d_structure s3d_struct;
+ unsigned int s3d_ext_data;
+};
+
+int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame);
+ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size);
+
+union hdmi_vendor_any_infoframe {
+ struct {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ unsigned int oui;
+ } any;
+ struct hdmi_vendor_infoframe hdmi;
+};
+
+/**
+ * union hdmi_infoframe - overall union of all abstract infoframe representations
+ * @any: generic infoframe
+ * @avi: avi infoframe
+ * @spd: spd infoframe
+ * @vendor: union of all vendor infoframes
+ * @audio: audio infoframe
+ *
+ * This is used by the generic pack function. This works since all infoframes
+ * have the same header which also indicates which type of infoframe should be
+ * packed.
+ */
+union hdmi_infoframe {
+ struct hdmi_any_infoframe any;
+ struct hdmi_avi_infoframe avi;
+ struct hdmi_spd_infoframe spd;
+ union hdmi_vendor_any_infoframe vendor;
+ struct hdmi_audio_infoframe audio;
+};
+
+ssize_t
+hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
+int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer);
+void hdmi_infoframe_log(const char *level, struct device *dev,
+ union hdmi_infoframe *frame);
+
+#endif /* _DRM_HDMI_H */
diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h
new file mode 100644
index 000000000..8663f216c
--- /dev/null
+++ b/include/linux/hid-debug.h
@@ -0,0 +1,67 @@
+#ifndef __HID_DEBUG_H
+#define __HID_DEBUG_H
+
+/*
+ * Copyright (c) 2007-2009 Jiri Kosina
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#define HID_DEBUG_BUFSIZE 512
+
+void hid_dump_input(struct hid_device *, struct hid_usage *, __s32);
+void hid_dump_report(struct hid_device *, int , u8 *, int);
+void hid_dump_device(struct hid_device *, struct seq_file *);
+void hid_dump_field(struct hid_field *, int, struct seq_file *);
+char *hid_resolv_usage(unsigned, struct seq_file *);
+void hid_debug_register(struct hid_device *, const char *);
+void hid_debug_unregister(struct hid_device *);
+void hid_debug_init(void);
+void hid_debug_exit(void);
+void hid_debug_event(struct hid_device *, char *);
+
+
+struct hid_debug_list {
+ char *hid_debug_buf;
+ int head;
+ int tail;
+ struct fasync_struct *fasync;
+ struct hid_device *hdev;
+ struct list_head node;
+ struct mutex read_mutex;
+};
+
+#else
+
+#define hid_dump_input(a,b,c) do { } while (0)
+#define hid_dump_report(a,b,c,d) do { } while (0)
+#define hid_dump_device(a,b) do { } while (0)
+#define hid_dump_field(a,b,c) do { } while (0)
+#define hid_resolv_usage(a,b) do { } while (0)
+#define hid_debug_register(a, b) do { } while (0)
+#define hid_debug_unregister(a) do { } while (0)
+#define hid_debug_init() do { } while (0)
+#define hid_debug_exit() do { } while (0)
+#define hid_debug_event(a,b) do { } while (0)
+
+#endif
+
+#endif
+
diff --git a/include/linux/hid-roccat.h b/include/linux/hid-roccat.h
new file mode 100644
index 000000000..24e1ca01f
--- /dev/null
+++ b/include/linux/hid-roccat.h
@@ -0,0 +1,29 @@
+#ifndef __HID_ROCCAT_H
+#define __HID_ROCCAT_H
+
+/*
+ * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/hid.h>
+#include <linux/types.h>
+
+#define ROCCATIOCGREPSIZE _IOR('H', 0xf1, int)
+
+#ifdef __KERNEL__
+
+int roccat_connect(struct class *klass, struct hid_device *hid,
+ int report_size);
+void roccat_disconnect(int minor);
+int roccat_report_event(int minor, u8 const *data);
+
+#endif
+
+#endif
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
new file mode 100644
index 000000000..c02b5ce6c
--- /dev/null
+++ b/include/linux/hid-sensor-hub.h
@@ -0,0 +1,273 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef _HID_SENSORS_HUB_H
+#define _HID_SENSORS_HUB_H
+
+#include <linux/hid.h>
+#include <linux/hid-sensor-ids.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+
+/**
+ * struct hid_sensor_hub_attribute_info - Attribute info
+ * @usage_id: Parent usage id of a physical device.
+ * @attrib_id: Attribute id for this attribute.
+ * @report_id: Report id in which this information resides.
+ * @index: Field index in the report.
+ * @units: Measurment unit for this attribute.
+ * @unit_expo: Exponent used in the data.
+ * @size: Size in bytes for data size.
+ * @logical_minimum: Logical minimum value for this attribute.
+ * @logical_maximum: Logical maximum value for this attribute.
+ */
+struct hid_sensor_hub_attribute_info {
+ u32 usage_id;
+ u32 attrib_id;
+ s32 report_id;
+ s32 index;
+ s32 units;
+ s32 unit_expo;
+ s32 size;
+ s32 logical_minimum;
+ s32 logical_maximum;
+};
+
+/**
+ * struct sensor_hub_pending - Synchronous read pending information
+ * @status: Pending status true/false.
+ * @ready: Completion synchronization data.
+ * @usage_id: Usage id for physical device, E.g. Gyro usage id.
+ * @attr_usage_id: Usage Id of a field, E.g. X-AXIS for a gyro.
+ * @raw_size: Response size for a read request.
+ * @raw_data: Place holder for received response.
+ */
+struct sensor_hub_pending {
+ bool status;
+ struct completion ready;
+ u32 usage_id;
+ u32 attr_usage_id;
+ int raw_size;
+ u8 *raw_data;
+};
+
+/**
+ * struct hid_sensor_hub_device - Stores the hub instance data
+ * @hdev: Stores the hid instance.
+ * @vendor_id: Vendor id of hub device.
+ * @product_id: Product id of hub device.
+ * @usage: Usage id for this hub device instance.
+ * @start_collection_index: Starting index for a phy type collection
+ * @end_collection_index: Last index for a phy type collection
+ * @mutex_ptr: synchronizing mutex pointer.
+ * @pending: Holds information of pending sync read request.
+ */
+struct hid_sensor_hub_device {
+ struct hid_device *hdev;
+ u32 vendor_id;
+ u32 product_id;
+ u32 usage;
+ int start_collection_index;
+ int end_collection_index;
+ struct mutex *mutex_ptr;
+ struct sensor_hub_pending pending;
+};
+
+/**
+ * struct hid_sensor_hub_callbacks - Client callback functions
+ * @pdev: Platform device instance of the client driver.
+ * @suspend: Suspend callback.
+ * @resume: Resume callback.
+ * @capture_sample: Callback to get a sample.
+ * @send_event: Send notification to indicate all samples are
+ * captured, process and send event
+ */
+struct hid_sensor_hub_callbacks {
+ struct platform_device *pdev;
+ int (*suspend)(struct hid_sensor_hub_device *hsdev, void *priv);
+ int (*resume)(struct hid_sensor_hub_device *hsdev, void *priv);
+ int (*capture_sample)(struct hid_sensor_hub_device *hsdev,
+ u32 usage_id, size_t raw_len, char *raw_data,
+ void *priv);
+ int (*send_event)(struct hid_sensor_hub_device *hsdev, u32 usage_id,
+ void *priv);
+};
+
+/**
+* sensor_hub_device_open() - Open hub device
+* @hsdev: Hub device instance.
+*
+* Used to open hid device for sensor hub.
+*/
+int sensor_hub_device_open(struct hid_sensor_hub_device *hsdev);
+
+/**
+* sensor_hub_device_clode() - Close hub device
+* @hsdev: Hub device instance.
+*
+* Used to clode hid device for sensor hub.
+*/
+void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev);
+
+/* Registration functions */
+
+/**
+* sensor_hub_register_callback() - Register client callbacks
+* @hsdev: Hub device instance.
+* @usage_id: Usage id of the client (E.g. 0x200076 for Gyro).
+* @usage_callback: Callback function storage
+*
+* Used to register callbacks by client processing drivers. Sensor
+* hub core driver will call these callbacks to offload processing
+* of data streams and notifications.
+*/
+int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev,
+ u32 usage_id,
+ struct hid_sensor_hub_callbacks *usage_callback);
+
+/**
+* sensor_hub_remove_callback() - Remove client callbacks
+* @hsdev: Hub device instance.
+* @usage_id: Usage id of the client (E.g. 0x200076 for Gyro).
+*
+* If there is a callback registred, this call will remove that
+* callbacks, so that it will stop data and event notifications.
+*/
+int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
+ u32 usage_id);
+
+
+/* Hid sensor hub core interfaces */
+
+/**
+* sensor_hub_input_get_attribute_info() - Get an attribute information
+* @hsdev: Hub device instance.
+* @type: Type of this attribute, input/output/feature
+* @usage_id: Attribute usage id of parent physical device as per spec
+* @attr_usage_id: Attribute usage id as per spec
+* @info: return information about attribute after parsing report
+*
+* Parses report and returns the attribute information such as report id,
+* field index, units and exponet etc.
+*/
+int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
+ u8 type,
+ u32 usage_id, u32 attr_usage_id,
+ struct hid_sensor_hub_attribute_info *info);
+
+/**
+* sensor_hub_input_attr_get_raw_value() - Synchronous read request
+* @hsdev: Hub device instance.
+* @usage_id: Attribute usage id of parent physical device as per spec
+* @attr_usage_id: Attribute usage id as per spec
+* @report_id: Report id to look for
+* @flag: Synchronous or asynchronous read
+*
+* Issues a synchronous or asynchronous read request for an input attribute.
+* Returns data upto 32 bits.
+*/
+
+enum sensor_hub_read_flags {
+ SENSOR_HUB_SYNC,
+ SENSOR_HUB_ASYNC,
+};
+
+int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
+ u32 usage_id,
+ u32 attr_usage_id, u32 report_id,
+ enum sensor_hub_read_flags flag
+);
+
+/**
+* sensor_hub_set_feature() - Feature set request
+* @hsdev: Hub device instance.
+* @report_id: Report id to look for
+* @field_index: Field index inside a report
+* @buffer_size: size of the buffer
+* @buffer: buffer to use in the feature set
+*
+* Used to set a field in feature report. For example this can set polling
+* interval, sensitivity, activate/deactivate state.
+*/
+int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
+ u32 field_index, int buffer_size, void *buffer);
+
+/**
+* sensor_hub_get_feature() - Feature get request
+* @hsdev: Hub device instance.
+* @report_id: Report id to look for
+* @field_index: Field index inside a report
+* @buffer_size: size of the buffer
+* @buffer: buffer to copy output
+*
+* Used to get a field in feature report. For example this can get polling
+* interval, sensitivity, activate/deactivate state. On success it returns
+* number of bytes copied to buffer. On failure, it returns value < 0.
+*/
+int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
+ u32 field_index, int buffer_size, void *buffer);
+
+/* hid-sensor-attributes */
+
+/* Common hid sensor iio structure */
+struct hid_sensor_common {
+ struct hid_sensor_hub_device *hsdev;
+ struct platform_device *pdev;
+ unsigned usage_id;
+ atomic_t data_ready;
+ atomic_t user_requested_state;
+ struct iio_trigger *trigger;
+ struct hid_sensor_hub_attribute_info poll;
+ struct hid_sensor_hub_attribute_info report_state;
+ struct hid_sensor_hub_attribute_info power_state;
+ struct hid_sensor_hub_attribute_info sensitivity;
+};
+
+/* Convert from hid unit expo to regular exponent */
+static inline int hid_sensor_convert_exponent(int unit_expo)
+{
+ if (unit_expo < 0x08)
+ return unit_expo;
+ else if (unit_expo <= 0x0f)
+ return -(0x0f-unit_expo+1);
+ else
+ return 0;
+}
+
+int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
+ u32 usage_id,
+ struct hid_sensor_common *st);
+int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
+ int val1, int val2);
+int hid_sensor_read_raw_hyst_value(struct hid_sensor_common *st,
+ int *val1, int *val2);
+int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
+ int val1, int val2);
+int hid_sensor_read_samp_freq_value(struct hid_sensor_common *st,
+ int *val1, int *val2);
+
+int hid_sensor_get_usage_index(struct hid_sensor_hub_device *hsdev,
+ u32 report_id, int field_index, u32 usage_id);
+
+int hid_sensor_format_scale(u32 usage_id,
+ struct hid_sensor_hub_attribute_info *attr_info,
+ int *val0, int *val1);
+
+s32 hid_sensor_read_poll_value(struct hid_sensor_common *st);
+
+#endif
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
new file mode 100644
index 000000000..f2ee90aed
--- /dev/null
+++ b/include/linux/hid-sensor-ids.h
@@ -0,0 +1,157 @@
+/*
+ * HID Sensors Driver
+ * Copyright (c) 2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#ifndef _HID_SENSORS_IDS_H
+#define _HID_SENSORS_IDS_H
+
+#define HID_MAX_PHY_DEVICES 0xFF
+
+#define HID_USAGE_SENSOR_COLLECTION 0x200001
+
+/* Accel 3D (200073) */
+#define HID_USAGE_SENSOR_ACCEL_3D 0x200073
+#define HID_USAGE_SENSOR_DATA_ACCELERATION 0x200452
+#define HID_USAGE_SENSOR_ACCEL_X_AXIS 0x200453
+#define HID_USAGE_SENSOR_ACCEL_Y_AXIS 0x200454
+#define HID_USAGE_SENSOR_ACCEL_Z_AXIS 0x200455
+
+/* ALS (200041) */
+#define HID_USAGE_SENSOR_ALS 0x200041
+#define HID_USAGE_SENSOR_DATA_LIGHT 0x2004d0
+#define HID_USAGE_SENSOR_LIGHT_ILLUM 0x2004d1
+
+/* PROX (200011) */
+#define HID_USAGE_SENSOR_PROX 0x200011
+#define HID_USAGE_SENSOR_DATA_PRESENCE 0x2004b0
+#define HID_USAGE_SENSOR_HUMAN_PRESENCE 0x2004b1
+
+/* Pressure (200031) */
+#define HID_USAGE_SENSOR_PRESSURE 0x200031
+#define HID_USAGE_SENSOR_DATA_ATMOSPHERIC_PRESSURE 0x200430
+#define HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE 0x200431
+
+/* Gyro 3D: (200076) */
+#define HID_USAGE_SENSOR_GYRO_3D 0x200076
+#define HID_USAGE_SENSOR_DATA_ANGL_VELOCITY 0x200456
+#define HID_USAGE_SENSOR_ANGL_VELOCITY_X_AXIS 0x200457
+#define HID_USAGE_SENSOR_ANGL_VELOCITY_Y_AXIS 0x200458
+#define HID_USAGE_SENSOR_ANGL_VELOCITY_Z_AXIS 0x200459
+
+/* ORIENTATION: Compass 3D: (200083) */
+#define HID_USAGE_SENSOR_COMPASS_3D 0x200083
+#define HID_USAGE_SENSOR_DATA_ORIENTATION 0x200470
+#define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING 0x200471
+#define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING_X 0x200472
+#define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING_Y 0x200473
+#define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING_Z 0x200474
+
+#define HID_USAGE_SENSOR_ORIENT_COMP_MAGN_NORTH 0x200475
+#define HID_USAGE_SENSOR_ORIENT_COMP_TRUE_NORTH 0x200476
+#define HID_USAGE_SENSOR_ORIENT_MAGN_NORTH 0x200477
+#define HID_USAGE_SENSOR_ORIENT_TRUE_NORTH 0x200478
+
+#define HID_USAGE_SENSOR_ORIENT_DISTANCE 0x200479
+#define HID_USAGE_SENSOR_ORIENT_DISTANCE_X 0x20047A
+#define HID_USAGE_SENSOR_ORIENT_DISTANCE_Y 0x20047B
+#define HID_USAGE_SENSOR_ORIENT_DISTANCE_Z 0x20047C
+#define HID_USAGE_SENSOR_ORIENT_DISTANCE_OUT_OF_RANGE 0x20047D
+
+/* ORIENTATION: Inclinometer 3D: (200086) */
+#define HID_USAGE_SENSOR_INCLINOMETER_3D 0x200086
+#define HID_USAGE_SENSOR_ORIENT_TILT 0x20047E
+#define HID_USAGE_SENSOR_ORIENT_TILT_X 0x20047F
+#define HID_USAGE_SENSOR_ORIENT_TILT_Y 0x200480
+#define HID_USAGE_SENSOR_ORIENT_TILT_Z 0x200481
+
+#define HID_USAGE_SENSOR_DEVICE_ORIENTATION 0x20008A
+#define HID_USAGE_SENSOR_ORIENT_ROTATION_MATRIX 0x200482
+#define HID_USAGE_SENSOR_ORIENT_QUATERNION 0x200483
+#define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX 0x200484
+
+#define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX_X_AXIS 0x200485
+#define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX_Y_AXIS 0x200486
+#define HID_USAGE_SENSOR_ORIENT_MAGN_FLUX_Z_AXIS 0x200487
+
+/* Time (2000a0) */
+#define HID_USAGE_SENSOR_TIME 0x2000a0
+#define HID_USAGE_SENSOR_TIME_YEAR 0x200521
+#define HID_USAGE_SENSOR_TIME_MONTH 0x200522
+#define HID_USAGE_SENSOR_TIME_DAY 0x200523
+#define HID_USAGE_SENSOR_TIME_HOUR 0x200525
+#define HID_USAGE_SENSOR_TIME_MINUTE 0x200526
+#define HID_USAGE_SENSOR_TIME_SECOND 0x200527
+
+/* Units */
+#define HID_USAGE_SENSOR_UNITS_NOT_SPECIFIED 0x00
+#define HID_USAGE_SENSOR_UNITS_LUX 0x01
+#define HID_USAGE_SENSOR_UNITS_KELVIN 0x01000100
+#define HID_USAGE_SENSOR_UNITS_FAHRENHEIT 0x03000100
+#define HID_USAGE_SENSOR_UNITS_PASCAL 0xF1E1
+#define HID_USAGE_SENSOR_UNITS_NEWTON 0x11E1
+#define HID_USAGE_SENSOR_UNITS_METERS_PER_SECOND 0x11F0
+#define HID_USAGE_SENSOR_UNITS_METERS_PER_SEC_SQRD 0x11E0
+#define HID_USAGE_SENSOR_UNITS_FARAD 0xE14F2000
+#define HID_USAGE_SENSOR_UNITS_AMPERE 0x01001000
+#define HID_USAGE_SENSOR_UNITS_WATT 0x21d1
+#define HID_USAGE_SENSOR_UNITS_HENRY 0x21E1E000
+#define HID_USAGE_SENSOR_UNITS_OHM 0x21D1E000
+#define HID_USAGE_SENSOR_UNITS_VOLT 0x21D1F000
+#define HID_USAGE_SENSOR_UNITS_HERTZ 0x01F0
+#define HID_USAGE_SENSOR_UNITS_DEGREES_PER_SEC_SQRD 0x14E0
+#define HID_USAGE_SENSOR_UNITS_RADIANS 0x12
+#define HID_USAGE_SENSOR_UNITS_RADIANS_PER_SECOND 0x12F0
+#define HID_USAGE_SENSOR_UNITS_RADIANS_PER_SEC_SQRD 0x12E0
+#define HID_USAGE_SENSOR_UNITS_SECOND 0x0110
+#define HID_USAGE_SENSOR_UNITS_GAUSS 0x01E1F000
+#define HID_USAGE_SENSOR_UNITS_GRAM 0x0101
+#define HID_USAGE_SENSOR_UNITS_CENTIMETER 0x11
+#define HID_USAGE_SENSOR_UNITS_G 0x1A
+#define HID_USAGE_SENSOR_UNITS_MILLISECOND 0x19
+#define HID_USAGE_SENSOR_UNITS_PERCENT 0x17
+#define HID_USAGE_SENSOR_UNITS_DEGREES 0x14
+#define HID_USAGE_SENSOR_UNITS_DEGREES_PER_SECOND 0x15
+
+/* Common selectors */
+#define HID_USAGE_SENSOR_PROP_REPORT_INTERVAL 0x20030E
+#define HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS 0x20030F
+#define HID_USAGE_SENSOR_PROP_SENSITIVITY_RANGE_PCT 0x200310
+#define HID_USAGE_SENSOR_PROP_SENSITIVITY_REL_PCT 0x200311
+#define HID_USAGE_SENSOR_PROP_ACCURACY 0x200312
+#define HID_USAGE_SENSOR_PROP_RESOLUTION 0x200313
+#define HID_USAGE_SENSOR_PROP_RANGE_MAXIMUM 0x200314
+#define HID_USAGE_SENSOR_PROP_RANGE_MINIMUM 0x200315
+#define HID_USAGE_SENSOR_PROP_REPORT_STATE 0x200316
+#define HID_USAGE_SENSOR_PROY_POWER_STATE 0x200319
+
+/* Per data field properties */
+#define HID_USAGE_SENSOR_DATA_MOD_NONE 0x00
+#define HID_USAGE_SENSOR_DATA_MOD_CHANGE_SENSITIVITY_ABS 0x1000
+
+/* Power state enumerations */
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_UNDEFINED_ENUM 0x200850
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D0_FULL_POWER_ENUM 0x200851
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D1_LOW_POWER_ENUM 0x200852
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D2_STANDBY_WITH_WAKE_ENUM 0x200853
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D3_SLEEP_WITH_WAKE_ENUM 0x200854
+#define HID_USAGE_SENSOR_PROP_POWER_STATE_D4_POWER_OFF_ENUM 0x200855
+
+/* Report State enumerations */
+#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_NO_EVENTS_ENUM 0x200840
+#define HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM 0x200841
+
+#endif
diff --git a/include/linux/hid.h b/include/linux/hid.h
new file mode 100644
index 000000000..176b43670
--- /dev/null
+++ b/include/linux/hid.h
@@ -0,0 +1,1144 @@
+/*
+ * Copyright (c) 1999 Andreas Gal
+ * Copyright (c) 2000-2001 Vojtech Pavlik
+ * Copyright (c) 2006-2007 Jiri Kosina
+ */
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Should you need to contact me, the author, you can do so either by
+ * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
+ * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
+ */
+#ifndef __HID_H
+#define __HID_H
+
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h> /* hid_device_id */
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/input.h>
+#include <linux/semaphore.h>
+#include <linux/power_supply.h>
+#include <uapi/linux/hid.h>
+
+/*
+ * We parse each description item into this structure. Short items data
+ * values are expanded to 32-bit signed int, long items contain a pointer
+ * into the data area.
+ */
+
+struct hid_item {
+ unsigned format;
+ __u8 size;
+ __u8 type;
+ __u8 tag;
+ union {
+ __u8 u8;
+ __s8 s8;
+ __u16 u16;
+ __s16 s16;
+ __u32 u32;
+ __s32 s32;
+ __u8 *longdata;
+ } data;
+};
+
+/*
+ * HID report item format
+ */
+
+#define HID_ITEM_FORMAT_SHORT 0
+#define HID_ITEM_FORMAT_LONG 1
+
+/*
+ * Special tag indicating long items
+ */
+
+#define HID_ITEM_TAG_LONG 15
+
+/*
+ * HID report descriptor item type (prefix bit 2,3)
+ */
+
+#define HID_ITEM_TYPE_MAIN 0
+#define HID_ITEM_TYPE_GLOBAL 1
+#define HID_ITEM_TYPE_LOCAL 2
+#define HID_ITEM_TYPE_RESERVED 3
+
+/*
+ * HID report descriptor main item tags
+ */
+
+#define HID_MAIN_ITEM_TAG_INPUT 8
+#define HID_MAIN_ITEM_TAG_OUTPUT 9
+#define HID_MAIN_ITEM_TAG_FEATURE 11
+#define HID_MAIN_ITEM_TAG_BEGIN_COLLECTION 10
+#define HID_MAIN_ITEM_TAG_END_COLLECTION 12
+
+/*
+ * HID report descriptor main item contents
+ */
+
+#define HID_MAIN_ITEM_CONSTANT 0x001
+#define HID_MAIN_ITEM_VARIABLE 0x002
+#define HID_MAIN_ITEM_RELATIVE 0x004
+#define HID_MAIN_ITEM_WRAP 0x008
+#define HID_MAIN_ITEM_NONLINEAR 0x010
+#define HID_MAIN_ITEM_NO_PREFERRED 0x020
+#define HID_MAIN_ITEM_NULL_STATE 0x040
+#define HID_MAIN_ITEM_VOLATILE 0x080
+#define HID_MAIN_ITEM_BUFFERED_BYTE 0x100
+
+/*
+ * HID report descriptor collection item types
+ */
+
+#define HID_COLLECTION_PHYSICAL 0
+#define HID_COLLECTION_APPLICATION 1
+#define HID_COLLECTION_LOGICAL 2
+
+/*
+ * HID report descriptor global item tags
+ */
+
+#define HID_GLOBAL_ITEM_TAG_USAGE_PAGE 0
+#define HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM 1
+#define HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM 2
+#define HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM 3
+#define HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM 4
+#define HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT 5
+#define HID_GLOBAL_ITEM_TAG_UNIT 6
+#define HID_GLOBAL_ITEM_TAG_REPORT_SIZE 7
+#define HID_GLOBAL_ITEM_TAG_REPORT_ID 8
+#define HID_GLOBAL_ITEM_TAG_REPORT_COUNT 9
+#define HID_GLOBAL_ITEM_TAG_PUSH 10
+#define HID_GLOBAL_ITEM_TAG_POP 11
+
+/*
+ * HID report descriptor local item tags
+ */
+
+#define HID_LOCAL_ITEM_TAG_USAGE 0
+#define HID_LOCAL_ITEM_TAG_USAGE_MINIMUM 1
+#define HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM 2
+#define HID_LOCAL_ITEM_TAG_DESIGNATOR_INDEX 3
+#define HID_LOCAL_ITEM_TAG_DESIGNATOR_MINIMUM 4
+#define HID_LOCAL_ITEM_TAG_DESIGNATOR_MAXIMUM 5
+#define HID_LOCAL_ITEM_TAG_STRING_INDEX 7
+#define HID_LOCAL_ITEM_TAG_STRING_MINIMUM 8
+#define HID_LOCAL_ITEM_TAG_STRING_MAXIMUM 9
+#define HID_LOCAL_ITEM_TAG_DELIMITER 10
+
+/*
+ * HID usage tables
+ */
+
+#define HID_USAGE_PAGE 0xffff0000
+
+#define HID_UP_UNDEFINED 0x00000000
+#define HID_UP_GENDESK 0x00010000
+#define HID_UP_SIMULATION 0x00020000
+#define HID_UP_GENDEVCTRLS 0x00060000
+#define HID_UP_KEYBOARD 0x00070000
+#define HID_UP_LED 0x00080000
+#define HID_UP_BUTTON 0x00090000
+#define HID_UP_ORDINAL 0x000a0000
+#define HID_UP_TELEPHONY 0x000b0000
+#define HID_UP_CONSUMER 0x000c0000
+#define HID_UP_DIGITIZER 0x000d0000
+#define HID_UP_PID 0x000f0000
+#define HID_UP_HPVENDOR 0xff7f0000
+#define HID_UP_HPVENDOR2 0xff010000
+#define HID_UP_MSVENDOR 0xff000000
+#define HID_UP_CUSTOM 0x00ff0000
+#define HID_UP_LOGIVENDOR 0xffbc0000
+#define HID_UP_LNVENDOR 0xffa00000
+#define HID_UP_SENSOR 0x00200000
+
+#define HID_USAGE 0x0000ffff
+
+#define HID_GD_POINTER 0x00010001
+#define HID_GD_MOUSE 0x00010002
+#define HID_GD_JOYSTICK 0x00010004
+#define HID_GD_GAMEPAD 0x00010005
+#define HID_GD_KEYBOARD 0x00010006
+#define HID_GD_KEYPAD 0x00010007
+#define HID_GD_MULTIAXIS 0x00010008
+#define HID_GD_X 0x00010030
+#define HID_GD_Y 0x00010031
+#define HID_GD_Z 0x00010032
+#define HID_GD_RX 0x00010033
+#define HID_GD_RY 0x00010034
+#define HID_GD_RZ 0x00010035
+#define HID_GD_SLIDER 0x00010036
+#define HID_GD_DIAL 0x00010037
+#define HID_GD_WHEEL 0x00010038
+#define HID_GD_HATSWITCH 0x00010039
+#define HID_GD_BUFFER 0x0001003a
+#define HID_GD_BYTECOUNT 0x0001003b
+#define HID_GD_MOTION 0x0001003c
+#define HID_GD_START 0x0001003d
+#define HID_GD_SELECT 0x0001003e
+#define HID_GD_VX 0x00010040
+#define HID_GD_VY 0x00010041
+#define HID_GD_VZ 0x00010042
+#define HID_GD_VBRX 0x00010043
+#define HID_GD_VBRY 0x00010044
+#define HID_GD_VBRZ 0x00010045
+#define HID_GD_VNO 0x00010046
+#define HID_GD_FEATURE 0x00010047
+#define HID_GD_SYSTEM_CONTROL 0x00010080
+#define HID_GD_UP 0x00010090
+#define HID_GD_DOWN 0x00010091
+#define HID_GD_RIGHT 0x00010092
+#define HID_GD_LEFT 0x00010093
+
+#define HID_DC_BATTERYSTRENGTH 0x00060020
+
+#define HID_CP_CONSUMER_CONTROL 0x000c0001
+
+#define HID_DG_DIGITIZER 0x000d0001
+#define HID_DG_PEN 0x000d0002
+#define HID_DG_LIGHTPEN 0x000d0003
+#define HID_DG_TOUCHSCREEN 0x000d0004
+#define HID_DG_TOUCHPAD 0x000d0005
+#define HID_DG_STYLUS 0x000d0020
+#define HID_DG_PUCK 0x000d0021
+#define HID_DG_FINGER 0x000d0022
+#define HID_DG_TIPPRESSURE 0x000d0030
+#define HID_DG_BARRELPRESSURE 0x000d0031
+#define HID_DG_INRANGE 0x000d0032
+#define HID_DG_TOUCH 0x000d0033
+#define HID_DG_UNTOUCH 0x000d0034
+#define HID_DG_TAP 0x000d0035
+#define HID_DG_TABLETFUNCTIONKEY 0x000d0039
+#define HID_DG_PROGRAMCHANGEKEY 0x000d003a
+#define HID_DG_INVERT 0x000d003c
+#define HID_DG_TIPSWITCH 0x000d0042
+#define HID_DG_TIPSWITCH2 0x000d0043
+#define HID_DG_BARRELSWITCH 0x000d0044
+#define HID_DG_ERASER 0x000d0045
+#define HID_DG_TABLETPICK 0x000d0046
+
+#define HID_CP_CONSUMERCONTROL 0x000c0001
+#define HID_CP_NUMERICKEYPAD 0x000c0002
+#define HID_CP_PROGRAMMABLEBUTTONS 0x000c0003
+#define HID_CP_MICROPHONE 0x000c0004
+#define HID_CP_HEADPHONE 0x000c0005
+#define HID_CP_GRAPHICEQUALIZER 0x000c0006
+#define HID_CP_FUNCTIONBUTTONS 0x000c0036
+#define HID_CP_SELECTION 0x000c0080
+#define HID_CP_MEDIASELECTION 0x000c0087
+#define HID_CP_SELECTDISC 0x000c00ba
+#define HID_CP_PLAYBACKSPEED 0x000c00f1
+#define HID_CP_PROXIMITY 0x000c0109
+#define HID_CP_SPEAKERSYSTEM 0x000c0160
+#define HID_CP_CHANNELLEFT 0x000c0161
+#define HID_CP_CHANNELRIGHT 0x000c0162
+#define HID_CP_CHANNELCENTER 0x000c0163
+#define HID_CP_CHANNELFRONT 0x000c0164
+#define HID_CP_CHANNELCENTERFRONT 0x000c0165
+#define HID_CP_CHANNELSIDE 0x000c0166
+#define HID_CP_CHANNELSURROUND 0x000c0167
+#define HID_CP_CHANNELLOWFREQUENCYENHANCEMENT 0x000c0168
+#define HID_CP_CHANNELTOP 0x000c0169
+#define HID_CP_CHANNELUNKNOWN 0x000c016a
+#define HID_CP_APPLICATIONLAUNCHBUTTONS 0x000c0180
+#define HID_CP_GENERICGUIAPPLICATIONCONTROLS 0x000c0200
+
+#define HID_DG_CONFIDENCE 0x000d0047
+#define HID_DG_WIDTH 0x000d0048
+#define HID_DG_HEIGHT 0x000d0049
+#define HID_DG_CONTACTID 0x000d0051
+#define HID_DG_INPUTMODE 0x000d0052
+#define HID_DG_DEVICEINDEX 0x000d0053
+#define HID_DG_CONTACTCOUNT 0x000d0054
+#define HID_DG_CONTACTMAX 0x000d0055
+#define HID_DG_BUTTONTYPE 0x000d0059
+#define HID_DG_BARRELSWITCH2 0x000d005a
+#define HID_DG_TOOLSERIALNUMBER 0x000d005b
+
+/*
+ * HID report types --- Ouch! HID spec says 1 2 3!
+ */
+
+#define HID_INPUT_REPORT 0
+#define HID_OUTPUT_REPORT 1
+#define HID_FEATURE_REPORT 2
+
+#define HID_REPORT_TYPES 3
+
+/*
+ * HID connect requests
+ */
+
+#define HID_CONNECT_HIDINPUT 0x01
+#define HID_CONNECT_HIDINPUT_FORCE 0x02
+#define HID_CONNECT_HIDRAW 0x04
+#define HID_CONNECT_HIDDEV 0x08
+#define HID_CONNECT_HIDDEV_FORCE 0x10
+#define HID_CONNECT_FF 0x20
+#define HID_CONNECT_DRIVER 0x40
+#define HID_CONNECT_DEFAULT (HID_CONNECT_HIDINPUT|HID_CONNECT_HIDRAW| \
+ HID_CONNECT_HIDDEV|HID_CONNECT_FF)
+
+/*
+ * HID device quirks.
+ */
+
+/*
+ * Increase this if you need to configure more HID quirks at module load time
+ */
+#define MAX_USBHID_BOOT_QUIRKS 4
+
+#define HID_QUIRK_INVERT 0x00000001
+#define HID_QUIRK_NOTOUCH 0x00000002
+#define HID_QUIRK_IGNORE 0x00000004
+#define HID_QUIRK_NOGET 0x00000008
+#define HID_QUIRK_HIDDEV_FORCE 0x00000010
+#define HID_QUIRK_BADPAD 0x00000020
+#define HID_QUIRK_MULTI_INPUT 0x00000040
+#define HID_QUIRK_HIDINPUT_FORCE 0x00000080
+#define HID_QUIRK_NO_EMPTY_INPUT 0x00000100
+#define HID_QUIRK_NO_INIT_INPUT_REPORTS 0x00000200
+#define HID_QUIRK_ALWAYS_POLL 0x00000400
+#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
+#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000
+#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000
+#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
+#define HID_QUIRK_NO_INIT_REPORTS 0x20000000
+#define HID_QUIRK_NO_IGNORE 0x40000000
+#define HID_QUIRK_NO_INPUT_SYNC 0x80000000
+
+/*
+ * HID device groups
+ *
+ * Note: HID_GROUP_ANY is declared in linux/mod_devicetable.h
+ * and has a value of 0x0000
+ */
+#define HID_GROUP_GENERIC 0x0001
+#define HID_GROUP_MULTITOUCH 0x0002
+#define HID_GROUP_SENSOR_HUB 0x0003
+#define HID_GROUP_MULTITOUCH_WIN_8 0x0004
+
+/*
+ * Vendor specific HID device groups
+ */
+#define HID_GROUP_RMI 0x0100
+#define HID_GROUP_WACOM 0x0101
+#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102
+
+/*
+ * This is the global environment of the parser. This information is
+ * persistent for main-items. The global environment can be saved and
+ * restored with PUSH/POP statements.
+ */
+
+struct hid_global {
+ unsigned usage_page;
+ __s32 logical_minimum;
+ __s32 logical_maximum;
+ __s32 physical_minimum;
+ __s32 physical_maximum;
+ __s32 unit_exponent;
+ unsigned unit;
+ unsigned report_id;
+ unsigned report_size;
+ unsigned report_count;
+};
+
+/*
+ * This is the local environment. It is persistent up the next main-item.
+ */
+
+#define HID_MAX_USAGES 12288
+#define HID_DEFAULT_NUM_COLLECTIONS 16
+
+struct hid_local {
+ unsigned usage[HID_MAX_USAGES]; /* usage array */
+ unsigned collection_index[HID_MAX_USAGES]; /* collection index array */
+ unsigned usage_index;
+ unsigned usage_minimum;
+ unsigned delimiter_depth;
+ unsigned delimiter_branch;
+};
+
+/*
+ * This is the collection stack. We climb up the stack to determine
+ * application and function of each field.
+ */
+
+struct hid_collection {
+ unsigned type;
+ unsigned usage;
+ unsigned level;
+};
+
+struct hid_usage {
+ unsigned hid; /* hid usage code */
+ unsigned collection_index; /* index into collection array */
+ unsigned usage_index; /* index into usage array */
+ /* hidinput data */
+ __u16 code; /* input driver code */
+ __u8 type; /* input driver type */
+ __s8 hat_min; /* hat switch fun */
+ __s8 hat_max; /* ditto */
+ __s8 hat_dir; /* ditto */
+};
+
+struct hid_input;
+
+struct hid_field {
+ unsigned physical; /* physical usage for this field */
+ unsigned logical; /* logical usage for this field */
+ unsigned application; /* application usage for this field */
+ struct hid_usage *usage; /* usage table for this function */
+ unsigned maxusage; /* maximum usage index */
+ unsigned flags; /* main-item flags (i.e. volatile,array,constant) */
+ unsigned report_offset; /* bit offset in the report */
+ unsigned report_size; /* size of this field in the report */
+ unsigned report_count; /* number of this field in the report */
+ unsigned report_type; /* (input,output,feature) */
+ __s32 *value; /* last known value(s) */
+ __s32 logical_minimum;
+ __s32 logical_maximum;
+ __s32 physical_minimum;
+ __s32 physical_maximum;
+ __s32 unit_exponent;
+ unsigned unit;
+ struct hid_report *report; /* associated report */
+ unsigned index; /* index into report->field[] */
+ /* hidinput data */
+ struct hid_input *hidinput; /* associated input structure */
+ __u16 dpad; /* dpad input code */
+};
+
+#define HID_MAX_FIELDS 256
+
+struct hid_report {
+ struct list_head list;
+ unsigned id; /* id of this report */
+ unsigned type; /* report type */
+ struct hid_field *field[HID_MAX_FIELDS]; /* fields of the report */
+ unsigned maxfield; /* maximum valid field index */
+ unsigned size; /* size of the report (bits) */
+ struct hid_device *device; /* associated device */
+};
+
+#define HID_MAX_IDS 256
+
+struct hid_report_enum {
+ unsigned numbered;
+ struct list_head report_list;
+ struct hid_report *report_id_hash[HID_MAX_IDS];
+};
+
+#define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */
+#define HID_MAX_BUFFER_SIZE 4096 /* 4kb */
+#define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */
+#define HID_OUTPUT_FIFO_SIZE 64
+
+struct hid_control_fifo {
+ unsigned char dir;
+ struct hid_report *report;
+ char *raw_report;
+};
+
+struct hid_output_fifo {
+ struct hid_report *report;
+ char *raw_report;
+};
+
+#define HID_CLAIMED_INPUT 1
+#define HID_CLAIMED_HIDDEV 2
+#define HID_CLAIMED_HIDRAW 4
+#define HID_CLAIMED_DRIVER 8
+
+#define HID_STAT_ADDED 1
+#define HID_STAT_PARSED 2
+
+struct hid_input {
+ struct list_head list;
+ struct hid_report *report;
+ struct input_dev *input;
+};
+
+enum hid_type {
+ HID_TYPE_OTHER = 0,
+ HID_TYPE_USBMOUSE,
+ HID_TYPE_USBNONE
+};
+
+struct hid_driver;
+struct hid_ll_driver;
+
+struct hid_device { /* device report descriptor */
+ __u8 *dev_rdesc;
+ unsigned dev_rsize;
+ __u8 *rdesc;
+ unsigned rsize;
+ struct hid_collection *collection; /* List of HID collections */
+ unsigned collection_size; /* Number of allocated hid_collections */
+ unsigned maxcollection; /* Number of parsed collections */
+ unsigned maxapplication; /* Number of applications */
+ __u16 bus; /* BUS ID */
+ __u16 group; /* Report group */
+ __u32 vendor; /* Vendor ID */
+ __u32 product; /* Product ID */
+ __u32 version; /* HID version */
+ enum hid_type type; /* device type (mouse, kbd, ...) */
+ unsigned country; /* HID country */
+ struct hid_report_enum report_enum[HID_REPORT_TYPES];
+ struct work_struct led_work; /* delayed LED worker */
+
+ struct semaphore driver_lock; /* protects the current driver, except during input */
+ struct semaphore driver_input_lock; /* protects the current driver */
+ struct device dev; /* device */
+ struct hid_driver *driver;
+ struct hid_ll_driver *ll_driver;
+
+#ifdef CONFIG_HID_BATTERY_STRENGTH
+ /*
+ * Power supply information for HID devices which report
+ * battery strength. power_supply was successfully registered if
+ * battery is non-NULL.
+ */
+ struct power_supply *battery;
+ __s32 battery_min;
+ __s32 battery_max;
+ __s32 battery_report_type;
+ __s32 battery_report_id;
+#endif
+
+ unsigned int status; /* see STAT flags above */
+ unsigned claimed; /* Claimed by hidinput, hiddev? */
+ unsigned quirks; /* Various quirks the device can pull on us */
+ bool io_started; /* Protected by driver_lock. If IO has started */
+
+ struct list_head inputs; /* The list of inputs */
+ void *hiddev; /* The hiddev structure */
+ void *hidraw;
+ int minor; /* Hiddev minor number */
+
+ int open; /* is the device open by anyone? */
+ char name[128]; /* Device name */
+ char phys[64]; /* Device physical location */
+ char uniq[64]; /* Device unique identifier (serial #) */
+
+ void *driver_data;
+
+ /* temporary hid_ff handling (until moved to the drivers) */
+ int (*ff_init)(struct hid_device *);
+
+ /* hiddev event handler */
+ int (*hiddev_connect)(struct hid_device *, unsigned int);
+ void (*hiddev_disconnect)(struct hid_device *);
+ void (*hiddev_hid_event) (struct hid_device *, struct hid_field *field,
+ struct hid_usage *, __s32);
+ void (*hiddev_report_event) (struct hid_device *, struct hid_report *);
+
+ /* debugging support via debugfs */
+ unsigned short debug;
+ struct dentry *debug_dir;
+ struct dentry *debug_rdesc;
+ struct dentry *debug_events;
+ struct list_head debug_list;
+ spinlock_t debug_list_lock;
+ wait_queue_head_t debug_wait;
+};
+
+static inline void *hid_get_drvdata(struct hid_device *hdev)
+{
+ return dev_get_drvdata(&hdev->dev);
+}
+
+static inline void hid_set_drvdata(struct hid_device *hdev, void *data)
+{
+ dev_set_drvdata(&hdev->dev, data);
+}
+
+#define HID_GLOBAL_STACK_SIZE 4
+#define HID_COLLECTION_STACK_SIZE 4
+
+#define HID_SCAN_FLAG_MT_WIN_8 BIT(0)
+#define HID_SCAN_FLAG_VENDOR_SPECIFIC BIT(1)
+#define HID_SCAN_FLAG_GD_POINTER BIT(2)
+
+struct hid_parser {
+ struct hid_global global;
+ struct hid_global global_stack[HID_GLOBAL_STACK_SIZE];
+ unsigned global_stack_ptr;
+ struct hid_local local;
+ unsigned collection_stack[HID_COLLECTION_STACK_SIZE];
+ unsigned collection_stack_ptr;
+ struct hid_device *device;
+ unsigned scan_flags;
+};
+
+struct hid_class_descriptor {
+ __u8 bDescriptorType;
+ __le16 wDescriptorLength;
+} __attribute__ ((packed));
+
+struct hid_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __le16 bcdHID;
+ __u8 bCountryCode;
+ __u8 bNumDescriptors;
+
+ struct hid_class_descriptor desc[1];
+} __attribute__ ((packed));
+
+#define HID_DEVICE(b, g, ven, prod) \
+ .bus = (b), .group = (g), .vendor = (ven), .product = (prod)
+#define HID_USB_DEVICE(ven, prod) \
+ .bus = BUS_USB, .vendor = (ven), .product = (prod)
+#define HID_BLUETOOTH_DEVICE(ven, prod) \
+ .bus = BUS_BLUETOOTH, .vendor = (ven), .product = (prod)
+#define HID_I2C_DEVICE(ven, prod) \
+ .bus = BUS_I2C, .vendor = (ven), .product = (prod)
+
+#define HID_REPORT_ID(rep) \
+ .report_type = (rep)
+#define HID_USAGE_ID(uhid, utype, ucode) \
+ .usage_hid = (uhid), .usage_type = (utype), .usage_code = (ucode)
+/* we don't want to catch types and codes equal to 0 */
+#define HID_TERMINATOR (HID_ANY_ID - 1)
+
+struct hid_report_id {
+ __u32 report_type;
+};
+struct hid_usage_id {
+ __u32 usage_hid;
+ __u32 usage_type;
+ __u32 usage_code;
+};
+
+/**
+ * struct hid_driver
+ * @name: driver name (e.g. "Footech_bar-wheel")
+ * @id_table: which devices is this driver for (must be non-NULL for probe
+ * to be called)
+ * @dyn_list: list of dynamically added device ids
+ * @dyn_lock: lock protecting @dyn_list
+ * @probe: new device inserted
+ * @remove: device removed (NULL if not a hot-plug capable driver)
+ * @report_table: on which reports to call raw_event (NULL means all)
+ * @raw_event: if report in report_table, this hook is called (NULL means nop)
+ * @usage_table: on which events to call event (NULL means all)
+ * @event: if usage in usage_table, this hook is called (NULL means nop)
+ * @report: this hook is called after parsing a report (NULL means nop)
+ * @report_fixup: called before report descriptor parsing (NULL means nop)
+ * @input_mapping: invoked on input registering before mapping an usage
+ * @input_mapped: invoked on input registering after mapping an usage
+ * @input_configured: invoked just before the device is registered
+ * @feature_mapping: invoked on feature registering
+ * @suspend: invoked on suspend (NULL means nop)
+ * @resume: invoked on resume if device was not reset (NULL means nop)
+ * @reset_resume: invoked on resume if device was reset (NULL means nop)
+ *
+ * probe should return -errno on error, or 0 on success. During probe,
+ * input will not be passed to raw_event unless hid_device_io_start is
+ * called.
+ *
+ * raw_event and event should return 0 on no action performed, 1 when no
+ * further processing should be done and negative on error
+ *
+ * input_mapping shall return a negative value to completely ignore this usage
+ * (e.g. doubled or invalid usage), zero to continue with parsing of this
+ * usage by generic code (no special handling needed) or positive to skip
+ * generic parsing (needed special handling which was done in the hook already)
+ * input_mapped shall return negative to inform the layer that this usage
+ * should not be considered for further processing or zero to notify that
+ * no processing was performed and should be done in a generic manner
+ * Both these functions may be NULL which means the same behavior as returning
+ * zero from them.
+ */
+struct hid_driver {
+ char *name;
+ const struct hid_device_id *id_table;
+
+ struct list_head dyn_list;
+ spinlock_t dyn_lock;
+
+ int (*probe)(struct hid_device *dev, const struct hid_device_id *id);
+ void (*remove)(struct hid_device *dev);
+
+ const struct hid_report_id *report_table;
+ int (*raw_event)(struct hid_device *hdev, struct hid_report *report,
+ u8 *data, int size);
+ const struct hid_usage_id *usage_table;
+ int (*event)(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value);
+ void (*report)(struct hid_device *hdev, struct hid_report *report);
+
+ __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf,
+ unsigned int *size);
+
+ int (*input_mapping)(struct hid_device *hdev,
+ struct hid_input *hidinput, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit, int *max);
+ int (*input_mapped)(struct hid_device *hdev,
+ struct hid_input *hidinput, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit, int *max);
+ void (*input_configured)(struct hid_device *hdev,
+ struct hid_input *hidinput);
+ void (*feature_mapping)(struct hid_device *hdev,
+ struct hid_field *field,
+ struct hid_usage *usage);
+#ifdef CONFIG_PM
+ int (*suspend)(struct hid_device *hdev, pm_message_t message);
+ int (*resume)(struct hid_device *hdev);
+ int (*reset_resume)(struct hid_device *hdev);
+#endif
+/* private: */
+ struct device_driver driver;
+};
+
+/**
+ * hid_ll_driver - low level driver callbacks
+ * @start: called on probe to start the device
+ * @stop: called on remove
+ * @open: called by input layer on open
+ * @close: called by input layer on close
+ * @parse: this method is called only once to parse the device data,
+ * shouldn't allocate anything to not leak memory
+ * @request: send report request to device (e.g. feature report)
+ * @wait: wait for buffered io to complete (send/recv reports)
+ * @raw_request: send raw report request to device (e.g. feature report)
+ * @output_report: send output report to device
+ * @idle: send idle request to device
+ */
+struct hid_ll_driver {
+ int (*start)(struct hid_device *hdev);
+ void (*stop)(struct hid_device *hdev);
+
+ int (*open)(struct hid_device *hdev);
+ void (*close)(struct hid_device *hdev);
+
+ int (*power)(struct hid_device *hdev, int level);
+
+ int (*parse)(struct hid_device *hdev);
+
+ void (*request)(struct hid_device *hdev,
+ struct hid_report *report, int reqtype);
+
+ int (*wait)(struct hid_device *hdev);
+
+ int (*raw_request) (struct hid_device *hdev, unsigned char reportnum,
+ __u8 *buf, size_t len, unsigned char rtype,
+ int reqtype);
+
+ int (*output_report) (struct hid_device *hdev, __u8 *buf, size_t len);
+
+ int (*idle)(struct hid_device *hdev, int report, int idle, int reqtype);
+};
+
+#define PM_HINT_FULLON 1<<5
+#define PM_HINT_NORMAL 1<<1
+
+/* Applications from HID Usage Tables 4/8/99 Version 1.1 */
+/* We ignore a few input applications that are not widely used */
+#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || ((a >= 0x000d0002) && (a <= 0x000d0006)))
+
+/* HID core API */
+
+extern int hid_debug;
+
+extern bool hid_ignore(struct hid_device *);
+extern int hid_add_device(struct hid_device *);
+extern void hid_destroy_device(struct hid_device *);
+
+extern int __must_check __hid_register_driver(struct hid_driver *,
+ struct module *, const char *mod_name);
+
+/* use a define to avoid include chaining to get THIS_MODULE & friends */
+#define hid_register_driver(driver) \
+ __hid_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
+
+extern void hid_unregister_driver(struct hid_driver *);
+
+/**
+ * module_hid_driver() - Helper macro for registering a HID driver
+ * @__hid_driver: hid_driver struct
+ *
+ * Helper macro for HID drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_hid_driver(__hid_driver) \
+ module_driver(__hid_driver, hid_register_driver, \
+ hid_unregister_driver)
+
+extern void hidinput_hid_event(struct hid_device *, struct hid_field *, struct hid_usage *, __s32);
+extern void hidinput_report_event(struct hid_device *hid, struct hid_report *report);
+extern int hidinput_connect(struct hid_device *hid, unsigned int force);
+extern void hidinput_disconnect(struct hid_device *);
+
+int hid_set_field(struct hid_field *, unsigned, __s32);
+int hid_input_report(struct hid_device *, int type, u8 *, int, int);
+int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
+struct hid_field *hidinput_get_led_field(struct hid_device *hid);
+unsigned int hidinput_count_leds(struct hid_device *hid);
+__s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code);
+void hid_output_report(struct hid_report *report, __u8 *data);
+void __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype);
+u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
+struct hid_device *hid_allocate_device(void);
+struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
+int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
+struct hid_report *hid_validate_values(struct hid_device *hid,
+ unsigned int type, unsigned int id,
+ unsigned int field_index,
+ unsigned int report_counts);
+int hid_open_report(struct hid_device *device);
+int hid_check_keys_pressed(struct hid_device *hid);
+int hid_connect(struct hid_device *hid, unsigned int connect_mask);
+void hid_disconnect(struct hid_device *hid);
+const struct hid_device_id *hid_match_id(struct hid_device *hdev,
+ const struct hid_device_id *id);
+s32 hid_snto32(__u32 value, unsigned n);
+
+/**
+ * hid_device_io_start - enable HID input during probe, remove
+ *
+ * @hid - the device
+ *
+ * This should only be called during probe or remove and only be
+ * called by the thread calling probe or remove. It will allow
+ * incoming packets to be delivered to the driver.
+ */
+static inline void hid_device_io_start(struct hid_device *hid) {
+ if (hid->io_started) {
+ dev_warn(&hid->dev, "io already started");
+ return;
+ }
+ hid->io_started = true;
+ up(&hid->driver_input_lock);
+}
+
+/**
+ * hid_device_io_stop - disable HID input during probe, remove
+ *
+ * @hid - the device
+ *
+ * Should only be called after hid_device_io_start. It will prevent
+ * incoming packets from going to the driver for the duration of
+ * probe, remove. If called during probe, packets will still go to the
+ * driver after probe is complete. This function should only be called
+ * by the thread calling probe or remove.
+ */
+static inline void hid_device_io_stop(struct hid_device *hid) {
+ if (!hid->io_started) {
+ dev_warn(&hid->dev, "io already stopped");
+ return;
+ }
+ hid->io_started = false;
+ down(&hid->driver_input_lock);
+}
+
+/**
+ * hid_map_usage - map usage input bits
+ *
+ * @hidinput: hidinput which we are interested in
+ * @usage: usage to fill in
+ * @bit: pointer to input->{}bit (out parameter)
+ * @max: maximal valid usage->code to consider later (out parameter)
+ * @type: input event type (EV_KEY, EV_REL, ...)
+ * @c: code which corresponds to this usage and type
+ */
+static inline void hid_map_usage(struct hid_input *hidinput,
+ struct hid_usage *usage, unsigned long **bit, int *max,
+ __u8 type, __u16 c)
+{
+ struct input_dev *input = hidinput->input;
+
+ usage->type = type;
+ usage->code = c;
+
+ switch (type) {
+ case EV_ABS:
+ *bit = input->absbit;
+ *max = ABS_MAX;
+ break;
+ case EV_REL:
+ *bit = input->relbit;
+ *max = REL_MAX;
+ break;
+ case EV_KEY:
+ *bit = input->keybit;
+ *max = KEY_MAX;
+ break;
+ case EV_LED:
+ *bit = input->ledbit;
+ *max = LED_MAX;
+ break;
+ }
+}
+
+/**
+ * hid_map_usage_clear - map usage input bits and clear the input bit
+ *
+ * The same as hid_map_usage, except the @c bit is also cleared in supported
+ * bits (@bit).
+ */
+static inline void hid_map_usage_clear(struct hid_input *hidinput,
+ struct hid_usage *usage, unsigned long **bit, int *max,
+ __u8 type, __u16 c)
+{
+ hid_map_usage(hidinput, usage, bit, max, type, c);
+ clear_bit(c, *bit);
+}
+
+/**
+ * hid_parse - parse HW reports
+ *
+ * @hdev: hid device
+ *
+ * Call this from probe after you set up the device (if needed). Your
+ * report_fixup will be called (if non-NULL) after reading raw report from
+ * device before passing it to hid layer for real parsing.
+ */
+static inline int __must_check hid_parse(struct hid_device *hdev)
+{
+ return hid_open_report(hdev);
+}
+
+/**
+ * hid_hw_start - start underlaying HW
+ *
+ * @hdev: hid device
+ * @connect_mask: which outputs to connect, see HID_CONNECT_*
+ *
+ * Call this in probe function *after* hid_parse. This will setup HW buffers
+ * and start the device (if not deffered to device open). hid_hw_stop must be
+ * called if this was successful.
+ */
+static inline int __must_check hid_hw_start(struct hid_device *hdev,
+ unsigned int connect_mask)
+{
+ int ret = hdev->ll_driver->start(hdev);
+ if (ret || !connect_mask)
+ return ret;
+ ret = hid_connect(hdev, connect_mask);
+ if (ret)
+ hdev->ll_driver->stop(hdev);
+ return ret;
+}
+
+/**
+ * hid_hw_stop - stop underlaying HW
+ *
+ * @hdev: hid device
+ *
+ * This is usually called from remove function or from probe when something
+ * failed and hid_hw_start was called already.
+ */
+static inline void hid_hw_stop(struct hid_device *hdev)
+{
+ hid_disconnect(hdev);
+ hdev->ll_driver->stop(hdev);
+}
+
+/**
+ * hid_hw_open - signal underlaying HW to start delivering events
+ *
+ * @hdev: hid device
+ *
+ * Tell underlying HW to start delivering events from the device.
+ * This function should be called sometime after successful call
+ * to hid_hiw_start().
+ */
+static inline int __must_check hid_hw_open(struct hid_device *hdev)
+{
+ return hdev->ll_driver->open(hdev);
+}
+
+/**
+ * hid_hw_close - signal underlaying HW to stop delivering events
+ *
+ * @hdev: hid device
+ *
+ * This function indicates that we are not interested in the events
+ * from this device anymore. Delivery of events may or may not stop,
+ * depending on the number of users still outstanding.
+ */
+static inline void hid_hw_close(struct hid_device *hdev)
+{
+ hdev->ll_driver->close(hdev);
+}
+
+/**
+ * hid_hw_power - requests underlying HW to go into given power mode
+ *
+ * @hdev: hid device
+ * @level: requested power level (one of %PM_HINT_* defines)
+ *
+ * This function requests underlying hardware to enter requested power
+ * mode.
+ */
+
+static inline int hid_hw_power(struct hid_device *hdev, int level)
+{
+ return hdev->ll_driver->power ? hdev->ll_driver->power(hdev, level) : 0;
+}
+
+
+/**
+ * hid_hw_request - send report request to device
+ *
+ * @hdev: hid device
+ * @report: report to send
+ * @reqtype: hid request type
+ */
+static inline void hid_hw_request(struct hid_device *hdev,
+ struct hid_report *report, int reqtype)
+{
+ if (hdev->ll_driver->request)
+ return hdev->ll_driver->request(hdev, report, reqtype);
+
+ __hid_request(hdev, report, reqtype);
+}
+
+/**
+ * hid_hw_raw_request - send report request to device
+ *
+ * @hdev: hid device
+ * @reportnum: report ID
+ * @buf: in/out data to transfer
+ * @len: length of buf
+ * @rtype: HID report type
+ * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
+ *
+ * @return: count of data transfered, negative if error
+ *
+ * Same behavior as hid_hw_request, but with raw buffers instead.
+ */
+static inline int hid_hw_raw_request(struct hid_device *hdev,
+ unsigned char reportnum, __u8 *buf,
+ size_t len, unsigned char rtype, int reqtype)
+{
+ if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
+ return -EINVAL;
+
+ return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
+ rtype, reqtype);
+}
+
+/**
+ * hid_hw_output_report - send output report to device
+ *
+ * @hdev: hid device
+ * @buf: raw data to transfer
+ * @len: length of buf
+ *
+ * @return: count of data transfered, negative if error
+ */
+static inline int hid_hw_output_report(struct hid_device *hdev, __u8 *buf,
+ size_t len)
+{
+ if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
+ return -EINVAL;
+
+ if (hdev->ll_driver->output_report)
+ return hdev->ll_driver->output_report(hdev, buf, len);
+
+ return -ENOSYS;
+}
+
+/**
+ * hid_hw_idle - send idle request to device
+ *
+ * @hdev: hid device
+ * @report: report to control
+ * @idle: idle state
+ * @reqtype: hid request type
+ */
+static inline int hid_hw_idle(struct hid_device *hdev, int report, int idle,
+ int reqtype)
+{
+ if (hdev->ll_driver->idle)
+ return hdev->ll_driver->idle(hdev, report, idle, reqtype);
+
+ return 0;
+}
+
+/**
+ * hid_hw_wait - wait for buffered io to complete
+ *
+ * @hdev: hid device
+ */
+static inline void hid_hw_wait(struct hid_device *hdev)
+{
+ if (hdev->ll_driver->wait)
+ hdev->ll_driver->wait(hdev);
+}
+
+/**
+ * hid_report_len - calculate the report length
+ *
+ * @report: the report we want to know the length
+ */
+static inline int hid_report_len(struct hid_report *report)
+{
+ /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
+ return ((report->size - 1) >> 3) + 1 + (report->id > 0);
+}
+
+int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
+ int interrupt);
+
+/* HID quirks API */
+u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct);
+int usbhid_quirks_init(char **quirks_param);
+void usbhid_quirks_exit(void);
+
+#ifdef CONFIG_HID_PID
+int hid_pidff_init(struct hid_device *hid);
+#else
+#define hid_pidff_init NULL
+#endif
+
+#define dbg_hid(format, arg...) \
+do { \
+ if (hid_debug) \
+ printk(KERN_DEBUG "%s: " format, __FILE__, ##arg); \
+} while (0)
+
+#define hid_printk(level, hid, fmt, arg...) \
+ dev_printk(level, &(hid)->dev, fmt, ##arg)
+#define hid_emerg(hid, fmt, arg...) \
+ dev_emerg(&(hid)->dev, fmt, ##arg)
+#define hid_crit(hid, fmt, arg...) \
+ dev_crit(&(hid)->dev, fmt, ##arg)
+#define hid_alert(hid, fmt, arg...) \
+ dev_alert(&(hid)->dev, fmt, ##arg)
+#define hid_err(hid, fmt, arg...) \
+ dev_err(&(hid)->dev, fmt, ##arg)
+#define hid_notice(hid, fmt, arg...) \
+ dev_notice(&(hid)->dev, fmt, ##arg)
+#define hid_warn(hid, fmt, arg...) \
+ dev_warn(&(hid)->dev, fmt, ##arg)
+#define hid_info(hid, fmt, arg...) \
+ dev_info(&(hid)->dev, fmt, ##arg)
+#define hid_dbg(hid, fmt, arg...) \
+ dev_dbg(&(hid)->dev, fmt, ##arg)
+
+#endif
diff --git a/include/linux/hiddev.h b/include/linux/hiddev.h
new file mode 100644
index 000000000..a5dd81486
--- /dev/null
+++ b/include/linux/hiddev.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 1999-2000 Vojtech Pavlik
+ *
+ * Sponsored by SuSE
+ */
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Should you need to contact me, the author, you can do so either by
+ * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
+ * Vojtech Pavlik, Ucitelska 1576, Prague 8, 182 00 Czech Republic
+ */
+#ifndef _HIDDEV_H
+#define _HIDDEV_H
+
+#include <uapi/linux/hiddev.h>
+
+
+/*
+ * In-kernel definitions.
+ */
+
+struct hid_device;
+struct hid_usage;
+struct hid_field;
+struct hid_report;
+
+#ifdef CONFIG_USB_HIDDEV
+int hiddev_connect(struct hid_device *hid, unsigned int force);
+void hiddev_disconnect(struct hid_device *);
+void hiddev_hid_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value);
+void hiddev_report_event(struct hid_device *hid, struct hid_report *report);
+#else
+static inline int hiddev_connect(struct hid_device *hid,
+ unsigned int force)
+{ return -1; }
+static inline void hiddev_disconnect(struct hid_device *hid) { }
+static inline void hiddev_hid_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value) { }
+static inline void hiddev_report_event(struct hid_device *hid, struct hid_report *report) { }
+#endif
+
+#endif
diff --git a/include/linux/hidraw.h b/include/linux/hidraw.h
new file mode 100644
index 000000000..ddf52612e
--- /dev/null
+++ b/include/linux/hidraw.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2007 Jiri Kosina
+ */
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef _HIDRAW_H
+#define _HIDRAW_H
+
+#include <uapi/linux/hidraw.h>
+
+
+struct hidraw {
+ unsigned int minor;
+ int exist;
+ int open;
+ wait_queue_head_t wait;
+ struct hid_device *hid;
+ struct device *dev;
+ spinlock_t list_lock;
+ struct list_head list;
+};
+
+struct hidraw_report {
+ __u8 *value;
+ int len;
+};
+
+struct hidraw_list {
+ struct hidraw_report buffer[HIDRAW_BUFFER_SIZE];
+ int head;
+ int tail;
+ struct fasync_struct *fasync;
+ struct hidraw *hidraw;
+ struct list_head node;
+ struct mutex read_mutex;
+};
+
+#ifdef CONFIG_HIDRAW
+int hidraw_init(void);
+void hidraw_exit(void);
+int hidraw_report_event(struct hid_device *, u8 *, int);
+int hidraw_connect(struct hid_device *);
+void hidraw_disconnect(struct hid_device *);
+#else
+static inline int hidraw_init(void) { return 0; }
+static inline void hidraw_exit(void) { }
+static inline int hidraw_report_event(struct hid_device *hid, u8 *data, int len) { return 0; }
+static inline int hidraw_connect(struct hid_device *hid) { return -1; }
+static inline void hidraw_disconnect(struct hid_device *hid) { }
+#endif
+
+#endif
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
new file mode 100644
index 000000000..9286a46b7
--- /dev/null
+++ b/include/linux/highmem.h
@@ -0,0 +1,249 @@
+#ifndef _LINUX_HIGHMEM_H
+#define _LINUX_HIGHMEM_H
+
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/hardirq.h>
+
+#include <asm/cacheflush.h>
+
+#ifndef ARCH_HAS_FLUSH_ANON_PAGE
+static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
+{
+}
+#endif
+
+#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+static inline void flush_kernel_dcache_page(struct page *page)
+{
+}
+static inline void flush_kernel_vmap_range(void *vaddr, int size)
+{
+}
+static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+}
+#endif
+
+#include <asm/kmap_types.h>
+
+#ifdef CONFIG_HIGHMEM
+#include <asm/highmem.h>
+
+/* declarations for linux/mm/highmem.c */
+unsigned int nr_free_highpages(void);
+extern unsigned long totalhigh_pages;
+
+void kmap_flush_unused(void);
+
+struct page *kmap_to_page(void *addr);
+
+#else /* CONFIG_HIGHMEM */
+
+static inline unsigned int nr_free_highpages(void) { return 0; }
+
+static inline struct page *kmap_to_page(void *addr)
+{
+ return virt_to_page(addr);
+}
+
+#define totalhigh_pages 0UL
+
+#ifndef ARCH_HAS_KMAP
+static inline void *kmap(struct page *page)
+{
+ might_sleep();
+ return page_address(page);
+}
+
+static inline void kunmap(struct page *page)
+{
+}
+
+static inline void *kmap_atomic(struct page *page)
+{
+ pagefault_disable();
+ return page_address(page);
+}
+#define kmap_atomic_prot(page, prot) kmap_atomic(page)
+
+static inline void __kunmap_atomic(void *addr)
+{
+ pagefault_enable();
+}
+
+#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
+#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
+
+#define kmap_flush_unused() do {} while(0)
+#endif
+
+#endif /* CONFIG_HIGHMEM */
+
+#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+
+DECLARE_PER_CPU(int, __kmap_atomic_idx);
+
+static inline int kmap_atomic_idx_push(void)
+{
+ int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+ WARN_ON_ONCE(in_irq() && !irqs_disabled());
+ BUG_ON(idx >= KM_TYPE_NR);
+#endif
+ return idx;
+}
+
+static inline int kmap_atomic_idx(void)
+{
+ return __this_cpu_read(__kmap_atomic_idx) - 1;
+}
+
+static inline void kmap_atomic_idx_pop(void)
+{
+#ifdef CONFIG_DEBUG_HIGHMEM
+ int idx = __this_cpu_dec_return(__kmap_atomic_idx);
+
+ BUG_ON(idx < 0);
+#else
+ __this_cpu_dec(__kmap_atomic_idx);
+#endif
+}
+
+#endif
+
+/*
+ * Prevent people trying to call kunmap_atomic() as if it were kunmap()
+ * kunmap_atomic() should get the return value of kmap_atomic, not the page.
+ */
+#define kunmap_atomic(addr) \
+do { \
+ BUILD_BUG_ON(__same_type((addr), struct page *)); \
+ __kunmap_atomic(addr); \
+} while (0)
+
+
+/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
+#ifndef clear_user_highpage
+static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
+{
+ void *addr = kmap_atomic(page);
+ clear_user_page(addr, vaddr, page);
+ kunmap_atomic(addr);
+}
+#endif
+
+#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+/**
+ * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
+ * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
+ * @vma: The VMA the page is to be allocated for
+ * @vaddr: The virtual address the page will be inserted into
+ *
+ * This function will allocate a page for a VMA but the caller is expected
+ * to specify via movableflags whether the page will be movable in the
+ * future or not
+ *
+ * An architecture may override this function by defining
+ * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
+ * implementation.
+ */
+static inline struct page *
+__alloc_zeroed_user_highpage(gfp_t movableflags,
+ struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
+ vma, vaddr);
+
+ if (page)
+ clear_user_highpage(page, vaddr);
+
+ return page;
+}
+#endif
+
+/**
+ * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
+ * @vma: The VMA the page is to be allocated for
+ * @vaddr: The virtual address the page will be inserted into
+ *
+ * This function will allocate a page for a VMA that the caller knows will
+ * be able to migrate in the future using move_pages() or reclaimed
+ */
+static inline struct page *
+alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
+}
+
+static inline void clear_highpage(struct page *page)
+{
+ void *kaddr = kmap_atomic(page);
+ clear_page(kaddr);
+ kunmap_atomic(kaddr);
+}
+
+static inline void zero_user_segments(struct page *page,
+ unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2)
+{
+ void *kaddr = kmap_atomic(page);
+
+ BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
+
+ if (end1 > start1)
+ memset(kaddr + start1, 0, end1 - start1);
+
+ if (end2 > start2)
+ memset(kaddr + start2, 0, end2 - start2);
+
+ kunmap_atomic(kaddr);
+ flush_dcache_page(page);
+}
+
+static inline void zero_user_segment(struct page *page,
+ unsigned start, unsigned end)
+{
+ zero_user_segments(page, start, end, 0, 0);
+}
+
+static inline void zero_user(struct page *page,
+ unsigned start, unsigned size)
+{
+ zero_user_segments(page, start, start + size, 0, 0);
+}
+
+#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
+
+static inline void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ char *vfrom, *vto;
+
+ vfrom = kmap_atomic(from);
+ vto = kmap_atomic(to);
+ copy_user_page(vto, vfrom, vaddr, to);
+ kunmap_atomic(vto);
+ kunmap_atomic(vfrom);
+}
+
+#endif
+
+static inline void copy_highpage(struct page *to, struct page *from)
+{
+ char *vfrom, *vto;
+
+ vfrom = kmap_atomic(from);
+ vto = kmap_atomic(to);
+ copy_page(vto, vfrom);
+ kunmap_atomic(vto);
+ kunmap_atomic(vfrom);
+}
+
+#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/highuid.h b/include/linux/highuid.h
new file mode 100644
index 000000000..434e56246
--- /dev/null
+++ b/include/linux/highuid.h
@@ -0,0 +1,97 @@
+#ifndef _LINUX_HIGHUID_H
+#define _LINUX_HIGHUID_H
+
+#include <linux/types.h>
+
+/*
+ * general notes:
+ *
+ * CONFIG_UID16 is defined if the given architecture needs to
+ * support backwards compatibility for old system calls.
+ *
+ * kernel code should use uid_t and gid_t at all times when dealing with
+ * kernel-private data.
+ *
+ * old_uid_t and old_gid_t should only be different if CONFIG_UID16 is
+ * defined, else the platform should provide dummy typedefs for them
+ * such that they are equivalent to __kernel_{u,g}id_t.
+ *
+ * uid16_t and gid16_t are used on all architectures. (when dealing
+ * with structures hard coded to 16 bits, such as in filesystems)
+ */
+
+
+/*
+ * This is the "overflow" UID and GID. They are used to signify uid/gid
+ * overflow to old programs when they request uid/gid information but are
+ * using the old 16 bit interfaces.
+ * When you run a libc5 program, it will think that all highuid files or
+ * processes are owned by this uid/gid.
+ * The idea is that it's better to do so than possibly return 0 in lieu of
+ * 65536, etc.
+ */
+
+extern int overflowuid;
+extern int overflowgid;
+
+extern void __bad_uid(void);
+extern void __bad_gid(void);
+
+#define DEFAULT_OVERFLOWUID 65534
+#define DEFAULT_OVERFLOWGID 65534
+
+#ifdef CONFIG_UID16
+
+/* prevent uid mod 65536 effect by returning a default value for high UIDs */
+#define high2lowuid(uid) ((uid) & ~0xFFFF ? (old_uid_t)overflowuid : (old_uid_t)(uid))
+#define high2lowgid(gid) ((gid) & ~0xFFFF ? (old_gid_t)overflowgid : (old_gid_t)(gid))
+/*
+ * -1 is different in 16 bits than it is in 32 bits
+ * these macros are used by chown(), setreuid(), ...,
+ */
+#define low2highuid(uid) ((uid) == (old_uid_t)-1 ? (uid_t)-1 : (uid_t)(uid))
+#define low2highgid(gid) ((gid) == (old_gid_t)-1 ? (gid_t)-1 : (gid_t)(gid))
+
+#define __convert_uid(size, uid) \
+ (size >= sizeof(uid) ? (uid) : high2lowuid(uid))
+#define __convert_gid(size, gid) \
+ (size >= sizeof(gid) ? (gid) : high2lowgid(gid))
+
+
+#else
+
+#define __convert_uid(size, uid) (uid)
+#define __convert_gid(size, gid) (gid)
+
+#endif /* !CONFIG_UID16 */
+
+/* uid/gid input should be always 32bit uid_t */
+#define SET_UID(var, uid) do { (var) = __convert_uid(sizeof(var), (uid)); } while (0)
+#define SET_GID(var, gid) do { (var) = __convert_gid(sizeof(var), (gid)); } while (0)
+
+/*
+ * Everything below this line is needed on all architectures, to deal with
+ * filesystems that only store 16 bits of the UID/GID, etc.
+ */
+
+/*
+ * This is the UID and GID that will get written to disk if a filesystem
+ * only supports 16-bit UIDs and the kernel has a high UID/GID to write
+ */
+extern int fs_overflowuid;
+extern int fs_overflowgid;
+
+#define DEFAULT_FS_OVERFLOWUID 65534
+#define DEFAULT_FS_OVERFLOWGID 65534
+
+/*
+ * Since these macros are used in architectures that only need limited
+ * 16-bit UID back compatibility, we won't use old_uid_t and old_gid_t
+ */
+#define fs_high2lowuid(uid) ((uid) & ~0xFFFF ? (uid16_t)fs_overflowuid : (uid16_t)(uid))
+#define fs_high2lowgid(gid) ((gid) & ~0xFFFF ? (gid16_t)fs_overflowgid : (gid16_t)(gid))
+
+#define low_16_bits(x) ((x) & 0xFFFF)
+#define high_16_bits(x) (((x) & 0xFFFF0000) >> 16)
+
+#endif /* _LINUX_HIGHUID_H */
diff --git a/include/linux/hil.h b/include/linux/hil.h
new file mode 100644
index 000000000..523785a9d
--- /dev/null
+++ b/include/linux/hil.h
@@ -0,0 +1,483 @@
+#ifndef _HIL_H_
+#define _HIL_H_
+
+/*
+ * Hewlett Packard Human Interface Loop (HP-HIL) Protocol -- header.
+ *
+ * Copyright (c) 2001 Brian S. Julin
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ *
+ * References:
+ * HP-HIL Technical Reference Manual. Hewlett Packard Product No. 45918A
+ *
+ * A note of thanks to HP for providing and shipping reference materials
+ * free of charge to help in the development of HIL support for Linux.
+ *
+ */
+
+#include <asm/types.h>
+
+/* Physical constants relevant to raw loop/device timing.
+ */
+
+#define HIL_CLOCK 8MHZ
+#define HIL_EK1_CLOCK 30HZ
+#define HIL_EK2_CLOCK 60HZ
+
+#define HIL_TIMEOUT_DEV 5 /* ms */
+#define HIL_TIMEOUT_DEVS 10 /* ms */
+#define HIL_TIMEOUT_NORESP 10 /* ms */
+#define HIL_TIMEOUT_DEVS_DATA 16 /* ms */
+#define HIL_TIMEOUT_SELFTEST 200 /* ms */
+
+
+/* Actual wire line coding. These will only be useful if someone is
+ * implementing a software MLC to run HIL devices on a non-parisc machine.
+ */
+
+#define HIL_WIRE_PACKET_LEN 15
+enum hil_wire_bitpos {
+ HIL_WIRE_START = 0,
+ HIL_WIRE_ADDR2,
+ HIL_WIRE_ADDR1,
+ HIL_WIRE_ADDR0,
+ HIL_WIRE_COMMAND,
+ HIL_WIRE_DATA7,
+ HIL_WIRE_DATA6,
+ HIL_WIRE_DATA5,
+ HIL_WIRE_DATA4,
+ HIL_WIRE_DATA3,
+ HIL_WIRE_DATA2,
+ HIL_WIRE_DATA1,
+ HIL_WIRE_DATA0,
+ HIL_WIRE_PARITY,
+ HIL_WIRE_STOP
+};
+
+/* HP documentation uses these bit positions to refer to commands;
+ * we will call these "packets".
+ */
+enum hil_pkt_bitpos {
+ HIL_PKT_CMD = 0x00000800,
+ HIL_PKT_ADDR2 = 0x00000400,
+ HIL_PKT_ADDR1 = 0x00000200,
+ HIL_PKT_ADDR0 = 0x00000100,
+ HIL_PKT_ADDR_MASK = 0x00000700,
+ HIL_PKT_ADDR_SHIFT = 8,
+ HIL_PKT_DATA7 = 0x00000080,
+ HIL_PKT_DATA6 = 0x00000040,
+ HIL_PKT_DATA5 = 0x00000020,
+ HIL_PKT_DATA4 = 0x00000010,
+ HIL_PKT_DATA3 = 0x00000008,
+ HIL_PKT_DATA2 = 0x00000004,
+ HIL_PKT_DATA1 = 0x00000002,
+ HIL_PKT_DATA0 = 0x00000001,
+ HIL_PKT_DATA_MASK = 0x000000FF,
+ HIL_PKT_DATA_SHIFT = 0
+};
+
+/* The HIL MLC also has several error/status/control bits. We extend the
+ * "packet" to include these when direct access to the MLC is available,
+ * or emulate them in cases where they are not available.
+ *
+ * This way the device driver knows that the underlying MLC driver
+ * has had to deal with loop errors.
+ */
+enum hil_error_bitpos {
+ HIL_ERR_OB = 0x00000800, /* MLC is busy sending an auto-poll,
+ or we have filled up the output
+ buffer and must wait. */
+ HIL_ERR_INT = 0x00010000, /* A normal interrupt has occurred. */
+ HIL_ERR_NMI = 0x00020000, /* An NMI has occurred. */
+ HIL_ERR_LERR = 0x00040000, /* A poll didn't come back. */
+ HIL_ERR_PERR = 0x01000000, /* There was a Parity Error. */
+ HIL_ERR_FERR = 0x02000000, /* There was a Framing Error. */
+ HIL_ERR_FOF = 0x04000000 /* Input FIFO Overflowed. */
+};
+
+enum hil_control_bitpos {
+ HIL_CTRL_TEST = 0x00010000,
+ HIL_CTRL_IPF = 0x00040000,
+ HIL_CTRL_APE = 0x02000000
+};
+
+/* Bits 30,31 are unused, we use them to control write behavior. */
+#define HIL_DO_ALTER_CTRL 0x40000000 /* Write MSW of packet to control
+ before writing LSW to loop */
+#define HIL_CTRL_ONLY 0xc0000000 /* *Only* alter the control registers */
+
+/* This gives us a 32-bit "packet"
+ */
+typedef u32 hil_packet;
+
+
+/* HIL Loop commands
+ */
+enum hil_command {
+ HIL_CMD_IFC = 0x00, /* Interface Clear */
+ HIL_CMD_EPT = 0x01, /* Enter Pass-Thru Mode */
+ HIL_CMD_ELB = 0x02, /* Enter Loop-Back Mode */
+ HIL_CMD_IDD = 0x03, /* Identify and Describe */
+ HIL_CMD_DSR = 0x04, /* Device Soft Reset */
+ HIL_CMD_PST = 0x05, /* Perform Self Test */
+ HIL_CMD_RRG = 0x06, /* Read Register */
+ HIL_CMD_WRG = 0x07, /* Write Register */
+ HIL_CMD_ACF = 0x08, /* Auto Configure */
+ HIL_CMDID_ACF = 0x07, /* Auto Configure bits with incremented ID */
+ HIL_CMD_POL = 0x10, /* Poll */
+ HIL_CMDCT_POL = 0x0f, /* Poll command bits with item count */
+ HIL_CMD_RPL = 0x20, /* RePoll */
+ HIL_CMDCT_RPL = 0x0f, /* RePoll command bits with item count */
+ HIL_CMD_RNM = 0x30, /* Report Name */
+ HIL_CMD_RST = 0x31, /* Report Status */
+ HIL_CMD_EXD = 0x32, /* Extended Describe */
+ HIL_CMD_RSC = 0x33, /* Report Security Code */
+
+ /* 0x34 to 0x3c reserved for future use */
+
+ HIL_CMD_DKA = 0x3d, /* Disable Keyswitch Autorepeat */
+ HIL_CMD_EK1 = 0x3e, /* Enable Keyswitch Autorepeat 1 */
+ HIL_CMD_EK2 = 0x3f, /* Enable Keyswitch Autorepeat 2 */
+ HIL_CMD_PR1 = 0x40, /* Prompt1 */
+ HIL_CMD_PR2 = 0x41, /* Prompt2 */
+ HIL_CMD_PR3 = 0x42, /* Prompt3 */
+ HIL_CMD_PR4 = 0x43, /* Prompt4 */
+ HIL_CMD_PR5 = 0x44, /* Prompt5 */
+ HIL_CMD_PR6 = 0x45, /* Prompt6 */
+ HIL_CMD_PR7 = 0x46, /* Prompt7 */
+ HIL_CMD_PRM = 0x47, /* Prompt (General Purpose) */
+ HIL_CMD_AK1 = 0x48, /* Acknowledge1 */
+ HIL_CMD_AK2 = 0x49, /* Acknowledge2 */
+ HIL_CMD_AK3 = 0x4a, /* Acknowledge3 */
+ HIL_CMD_AK4 = 0x4b, /* Acknowledge4 */
+ HIL_CMD_AK5 = 0x4c, /* Acknowledge5 */
+ HIL_CMD_AK6 = 0x4d, /* Acknowledge6 */
+ HIL_CMD_AK7 = 0x4e, /* Acknowledge7 */
+ HIL_CMD_ACK = 0x4f, /* Acknowledge (General Purpose) */
+
+ /* 0x50 to 0x78 reserved for future use */
+ /* 0x80 to 0xEF device-specific commands */
+ /* 0xf0 to 0xf9 reserved for future use */
+
+ HIL_CMD_RIO = 0xfa, /* Register I/O Error */
+ HIL_CMD_SHR = 0xfb, /* System Hard Reset */
+ HIL_CMD_TER = 0xfc, /* Transmission Error */
+ HIL_CMD_CAE = 0xfd, /* Configuration Address Error */
+ HIL_CMD_DHR = 0xfe, /* Device Hard Reset */
+
+ /* 0xff is prohibited from use. */
+};
+
+
+/*
+ * Response "records" to HIL commands
+ */
+
+/* Device ID byte
+ */
+#define HIL_IDD_DID_TYPE_MASK 0xe0 /* Primary type bits */
+#define HIL_IDD_DID_TYPE_KB_INTEGRAL 0xa0 /* Integral keyboard */
+#define HIL_IDD_DID_TYPE_KB_ITF 0xc0 /* ITD keyboard */
+#define HIL_IDD_DID_TYPE_KB_RSVD 0xe0 /* Reserved keyboard type */
+#define HIL_IDD_DID_TYPE_KB_LANG_MASK 0x1f /* Keyboard locale bits */
+#define HIL_IDD_DID_KBLANG_USE_ESD 0x00 /* Use ESD Locale instead */
+#define HIL_IDD_DID_TYPE_ABS 0x80 /* Absolute Positioners */
+#define HIL_IDD_DID_ABS_RSVD1_MASK 0xf8 /* Reserved */
+#define HIL_IDD_DID_ABS_RSVD1 0x98
+#define HIL_IDD_DID_ABS_TABLET_MASK 0xf8 /* Tablets and digitizers */
+#define HIL_IDD_DID_ABS_TABLET 0x90
+#define HIL_IDD_DID_ABS_TSCREEN_MASK 0xfc /* Touch screens */
+#define HIL_IDD_DID_ABS_TSCREEN 0x8c
+#define HIL_IDD_DID_ABS_RSVD2_MASK 0xfc /* Reserved */
+#define HIL_IDD_DID_ABS_RSVD2 0x88
+#define HIL_IDD_DID_ABS_RSVD3_MASK 0xfc /* Reserved */
+#define HIL_IDD_DID_ABS_RSVD3 0x80
+#define HIL_IDD_DID_TYPE_REL 0x60 /* Relative Positioners */
+#define HIL_IDD_DID_REL_RSVD1_MASK 0xf0 /* Reserved */
+#define HIL_IDD_DID_REL_RSVD1 0x70
+#define HIL_IDD_DID_REL_RSVD2_MASK 0xfc /* Reserved */
+#define HIL_IDD_DID_REL_RSVD2 0x6c
+#define HIL_IDD_DID_REL_MOUSE_MASK 0xfc /* Mouse */
+#define HIL_IDD_DID_REL_MOUSE 0x68
+#define HIL_IDD_DID_REL_QUAD_MASK 0xf8 /* Other Quadrature Devices */
+#define HIL_IDD_DID_REL_QUAD 0x60
+#define HIL_IDD_DID_TYPE_CHAR 0x40 /* Character Entry */
+#define HIL_IDD_DID_CHAR_BARCODE_MASK 0xfc /* Barcode Reader */
+#define HIL_IDD_DID_CHAR_BARCODE 0x5c
+#define HIL_IDD_DID_CHAR_RSVD1_MASK 0xfc /* Reserved */
+#define HIL_IDD_DID_CHAR_RSVD1 0x58
+#define HIL_IDD_DID_CHAR_RSVD2_MASK 0xf8 /* Reserved */
+#define HIL_IDD_DID_CHAR_RSVD2 0x50
+#define HIL_IDD_DID_CHAR_RSVD3_MASK 0xf0 /* Reserved */
+#define HIL_IDD_DID_CHAR_RSVD3 0x40
+#define HIL_IDD_DID_TYPE_OTHER 0x20 /* Miscellaneous */
+#define HIL_IDD_DID_OTHER_RSVD1_MASK 0xf0 /* Reserved */
+#define HIL_IDD_DID_OTHER_RSVD1 0x30
+#define HIL_IDD_DID_OTHER_BARCODE_MASK 0xfc /* Tone Generator */
+#define HIL_IDD_DID_OTHER_BARCODE 0x2c
+#define HIL_IDD_DID_OTHER_RSVD2_MASK 0xfc /* Reserved */
+#define HIL_IDD_DID_OTHER_RSVD2 0x28
+#define HIL_IDD_DID_OTHER_RSVD3_MASK 0xf8 /* Reserved */
+#define HIL_IDD_DID_OTHER_RSVD3 0x20
+#define HIL_IDD_DID_TYPE_KEYPAD 0x00 /* Vectra Keyboard */
+
+/* IDD record header
+ */
+#define HIL_IDD_HEADER_AXSET_MASK 0x03 /* Number of axis in a set */
+#define HIL_IDD_HEADER_RSC 0x04 /* Supports RSC command */
+#define HIL_IDD_HEADER_EXD 0x08 /* Supports EXD command */
+#define HIL_IDD_HEADER_IOD 0x10 /* IOD byte to follow */
+#define HIL_IDD_HEADER_16BIT 0x20 /* 16 (vs. 8) bit resolution */
+#define HIL_IDD_HEADER_ABS 0x40 /* Reports Absolute Position */
+#define HIL_IDD_HEADER_2X_AXIS 0x80 /* Two sets of 1-3 axis */
+
+/* I/O Descriptor
+ */
+#define HIL_IDD_IOD_NBUTTON_MASK 0x07 /* Number of buttons */
+#define HIL_IDD_IOD_PROXIMITY 0x08 /* Proximity in/out events */
+#define HIL_IDD_IOD_PROMPT_MASK 0x70 /* Number of prompts/acks */
+#define HIL_IDD_IOD_PROMPT_SHIFT 4
+#define HIL_IDD_IOD_PROMPT 0x80 /* Generic prompt/ack */
+
+#define HIL_IDD_NUM_AXES_PER_SET(header_packet) \
+((header_packet) & HIL_IDD_HEADER_AXSET_MASK)
+
+#define HIL_IDD_NUM_AXSETS(header_packet) \
+(2 - !((header_packet) & HIL_IDD_HEADER_2X_AXIS))
+
+#define HIL_IDD_LEN(header_packet) \
+((4 - !(header_packet & HIL_IDD_HEADER_IOD) - \
+ 2 * !(HIL_IDD_NUM_AXES_PER_SET(header_packet))) + \
+ 2 * HIL_IDD_NUM_AXES_PER_SET(header_packet) * \
+ !!((header_packet) & HIL_IDD_HEADER_ABS))
+
+/* The following HIL_IDD_* macros assume you have an array of
+ * packets and/or unpacked 8-bit data in the order that they
+ * were received.
+ */
+
+#define HIL_IDD_AXIS_COUNTS_PER_M(header_ptr) \
+(!(HIL_IDD_NUM_AXSETS(*(header_ptr))) ? -1 : \
+(((*(header_ptr + 1) & HIL_PKT_DATA_MASK) + \
+ ((*(header_ptr + 2) & HIL_PKT_DATA_MASK)) << 8) \
+* ((*(header_ptr) & HIL_IDD_HEADER_16BIT) ? 100 : 1)))
+
+#define HIL_IDD_AXIS_MAX(header_ptr, __axnum) \
+((!(*(header_ptr) & HIL_IDD_HEADER_ABS) || \
+ (HIL_IDD_NUM_AXES_PER_SET(*(header_ptr)) <= __axnum)) ? 0 : \
+ ((HIL_PKT_DATA_MASK & *((header_ptr) + 3 + 2 * __axnum)) + \
+ ((HIL_PKT_DATA_MASK & *((header_ptr) + 4 + 2 * __axnum)) << 8)))
+
+#define HIL_IDD_IOD(header_ptr) \
+(*(header_ptr + HIL_IDD_LEN((*header_ptr)) - 1))
+
+#define HIL_IDD_HAS_GEN_PROMPT(header_ptr) \
+((*header_ptr & HIL_IDD_HEADER_IOD) && \
+ (HIL_IDD_IOD(header_ptr) & HIL_IDD_IOD_PROMPT))
+
+#define HIL_IDD_HAS_GEN_PROXIMITY(header_ptr) \
+((*header_ptr & HIL_IDD_HEADER_IOD) && \
+ (HIL_IDD_IOD(header_ptr) & HIL_IDD_IOD_PROXIMITY))
+
+#define HIL_IDD_NUM_BUTTONS(header_ptr) \
+((*header_ptr & HIL_IDD_HEADER_IOD) ? \
+ (HIL_IDD_IOD(header_ptr) & HIL_IDD_IOD_NBUTTON_MASK) : 0)
+
+#define HIL_IDD_NUM_PROMPTS(header_ptr) \
+((*header_ptr & HIL_IDD_HEADER_IOD) ? \
+ ((HIL_IDD_IOD(header_ptr) & HIL_IDD_IOD_NPROMPT_MASK) \
+ >> HIL_IDD_IOD_PROMPT_SHIFT) : 0)
+
+/* The response to HIL EXD commands -- the "extended describe record" */
+#define HIL_EXD_HEADER_WRG 0x03 /* Supports type2 WRG */
+#define HIL_EXD_HEADER_WRG_TYPE1 0x01 /* Supports type1 WRG */
+#define HIL_EXD_HEADER_WRG_TYPE2 0x02 /* Supports type2 WRG */
+#define HIL_EXD_HEADER_RRG 0x04 /* Supports RRG command */
+#define HIL_EXD_HEADER_RNM 0x10 /* Supports RNM command */
+#define HIL_EXD_HEADER_RST 0x20 /* Supports RST command */
+#define HIL_EXD_HEADER_LOCALE 0x40 /* Contains locale code */
+
+#define HIL_EXD_NUM_RRG(header_ptr) \
+((*header_ptr & HIL_EXD_HEADER_RRG) ? \
+ (*(header_ptr + 1) & HIL_PKT_DATA_MASK) : 0)
+
+#define HIL_EXD_NUM_WWG(header_ptr) \
+((*header_ptr & HIL_EXD_HEADER_WRG) ? \
+ (*(header_ptr + 2 - !(*header_ptr & HIL_EXD_HEADER_RRG)) & \
+ HIL_PKT_DATA_MASK) : 0)
+
+#define HIL_EXD_LEN(header_ptr) \
+(!!(*header_ptr & HIL_EXD_HEADER_RRG) + \
+ !!(*header_ptr & HIL_EXD_HEADER_WRG) + \
+ !!(*header_ptr & HIL_EXD_HEADER_LOCALE) + \
+ 2 * !!(*header_ptr & HIL_EXD_HEADER_WRG_TYPE2) + 1)
+
+#define HIL_EXD_LOCALE(header_ptr) \
+(!(*header_ptr & HIL_EXD_HEADER_LOCALE) ? -1 : \
+ (*(header_ptr + HIL_EXD_LEN(header_ptr) - 1) & HIL_PKT_DATA_MASK))
+
+#define HIL_EXD_WRG_TYPE2_LEN(header_ptr) \
+(!(*header_ptr & HIL_EXD_HEADER_WRG_TYPE2) ? -1 : \
+ (*(header_ptr + HIL_EXD_LEN(header_ptr) - 2 - \
+ !!(*header_ptr & HIL_EXD_HEADER_LOCALE)) & HIL_PKT_DATA_MASK) + \
+ ((*(header_ptr + HIL_EXD_LEN(header_ptr) - 1 - \
+ !!(*header_ptr & HIL_EXD_HEADER_LOCALE)) & HIL_PKT_DATA_MASK) << 8))
+
+/* Device locale codes. */
+
+/* Last defined locale code. Everything above this is "Reserved",
+ and note that this same table applies to the Device ID Byte where
+ keyboards may have a nationality code which is only 5 bits. */
+#define HIL_LOCALE_MAX 0x1f
+
+/* Map to hopefully useful strings. I was trying to make these look
+ like locale.aliases strings do; maybe that isn't the right table to
+ emulate. In either case, I didn't have much to work on. */
+#define HIL_LOCALE_MAP \
+"", /* 0x00 Reserved */ \
+"", /* 0x01 Reserved */ \
+"", /* 0x02 Reserved */ \
+"swiss.french", /* 0x03 Swiss/French */ \
+"portuguese", /* 0x04 Portuguese */ \
+"arabic", /* 0x05 Arabic */ \
+"hebrew", /* 0x06 Hebrew */ \
+"english.canadian", /* 0x07 Canadian English */ \
+"turkish", /* 0x08 Turkish */ \
+"greek", /* 0x09 Greek */ \
+"thai", /* 0x0a Thai (Thailand) */ \
+"italian", /* 0x0b Italian */ \
+"korean", /* 0x0c Hangul (Korea) */ \
+"dutch", /* 0x0d Dutch */ \
+"swedish", /* 0x0e Swedish */ \
+"german", /* 0x0f German */ \
+"chinese", /* 0x10 Chinese-PRC */ \
+"chinese", /* 0x11 Chinese-ROC */ \
+"swiss.french", /* 0x12 Swiss/French II */ \
+"spanish", /* 0x13 Spanish */ \
+"swiss.german", /* 0x14 Swiss/German II */ \
+"flemish", /* 0x15 Belgian (Flemish) */ \
+"finnish", /* 0x16 Finnish */ \
+"english.uk", /* 0x17 United Kingdom */ \
+"french.canadian", /* 0x18 French/Canadian */ \
+"swiss.german", /* 0x19 Swiss/German */ \
+"norwegian", /* 0x1a Norwegian */ \
+"french", /* 0x1b French */ \
+"danish", /* 0x1c Danish */ \
+"japanese", /* 0x1d Katakana */ \
+"spanish", /* 0x1e Latin American/Spanish*/\
+"english.us" /* 0x1f United States */ \
+
+
+/* HIL keycodes */
+#define HIL_KEYCODES_SET1_TBLSIZE 128
+#define HIL_KEYCODES_SET1 \
+ KEY_5, KEY_RESERVED, KEY_RIGHTALT, KEY_LEFTALT, \
+ KEY_RIGHTSHIFT, KEY_LEFTSHIFT, KEY_LEFTCTRL, KEY_SYSRQ, \
+ KEY_KP4, KEY_KP8, KEY_KP5, KEY_KP9, \
+ KEY_KP6, KEY_KP7, KEY_KPCOMMA, KEY_KPENTER, \
+ KEY_KP1, KEY_KPSLASH, KEY_KP2, KEY_KPPLUS, \
+ KEY_KP3, KEY_KPASTERISK, KEY_KP0, KEY_KPMINUS, \
+ KEY_B, KEY_V, KEY_C, KEY_X, \
+ KEY_Z, KEY_RESERVED, KEY_RESERVED, KEY_ESC, \
+ KEY_6, KEY_F10, KEY_3, KEY_F11, \
+ KEY_KPDOT, KEY_F9, KEY_TAB /*KP*/, KEY_F12, \
+ KEY_H, KEY_G, KEY_F, KEY_D, \
+ KEY_S, KEY_A, KEY_RESERVED, KEY_CAPSLOCK, \
+ KEY_U, KEY_Y, KEY_T, KEY_R, \
+ KEY_E, KEY_W, KEY_Q, KEY_TAB, \
+ KEY_7, KEY_6, KEY_5, KEY_4, \
+ KEY_3, KEY_2, KEY_1, KEY_GRAVE, \
+ KEY_F13, KEY_F14, KEY_F15, KEY_F16, \
+ KEY_F17, KEY_F18, KEY_F19, KEY_F20, \
+ KEY_MENU, KEY_F4, KEY_F3, KEY_F2, \
+ KEY_F1, KEY_VOLUMEUP, KEY_STOP, KEY_SENDFILE, \
+ KEY_SYSRQ, KEY_F5, KEY_F6, KEY_F7, \
+ KEY_F8, KEY_VOLUMEDOWN, KEY_DEL_EOL, KEY_DEL_EOS, \
+ KEY_8, KEY_9, KEY_0, KEY_MINUS, \
+ KEY_EQUAL, KEY_BACKSPACE, KEY_INS_LINE, KEY_DEL_LINE, \
+ KEY_I, KEY_O, KEY_P, KEY_LEFTBRACE, \
+ KEY_RIGHTBRACE, KEY_BACKSLASH, KEY_INSERT, KEY_DELETE, \
+ KEY_J, KEY_K, KEY_L, KEY_SEMICOLON, \
+ KEY_APOSTROPHE, KEY_ENTER, KEY_HOME, KEY_PAGEUP, \
+ KEY_M, KEY_COMMA, KEY_DOT, KEY_SLASH, \
+ KEY_BACKSLASH, KEY_SELECT, KEY_102ND, KEY_PAGEDOWN, \
+ KEY_N, KEY_SPACE, KEY_NEXT, KEY_RESERVED, \
+ KEY_LEFT, KEY_DOWN, KEY_UP, KEY_RIGHT
+
+
+#define HIL_KEYCODES_SET3_TBLSIZE 128
+#define HIL_KEYCODES_SET3 \
+ KEY_RESERVED, KEY_ESC, KEY_1, KEY_2, \
+ KEY_3, KEY_4, KEY_5, KEY_6, \
+ KEY_7, KEY_8, KEY_9, KEY_0, \
+ KEY_MINUS, KEY_EQUAL, KEY_BACKSPACE, KEY_TAB, \
+ KEY_Q, KEY_W, KEY_E, KEY_R, \
+ KEY_T, KEY_Y, KEY_U, KEY_I, \
+ KEY_O, KEY_P, KEY_LEFTBRACE, KEY_RIGHTBRACE, \
+ KEY_ENTER, KEY_LEFTCTRL, KEY_A, KEY_S, \
+ KEY_D, KEY_F, KEY_G, KEY_H, \
+ KEY_J, KEY_K, KEY_L, KEY_SEMICOLON, \
+ KEY_APOSTROPHE,KEY_GRAVE, KEY_LEFTSHIFT, KEY_BACKSLASH, \
+ KEY_Z, KEY_X, KEY_C, KEY_V, \
+ KEY_B, KEY_N, KEY_M, KEY_COMMA, \
+ KEY_DOT, KEY_SLASH, KEY_RIGHTSHIFT, KEY_KPASTERISK, \
+ KEY_LEFTALT, KEY_SPACE, KEY_CAPSLOCK, KEY_F1, \
+ KEY_F2, KEY_F3, KEY_F4, KEY_F5, \
+ KEY_F6, KEY_F7, KEY_F8, KEY_F9, \
+ KEY_F10, KEY_NUMLOCK, KEY_SCROLLLOCK, KEY_KP7, \
+ KEY_KP8, KEY_KP9, KEY_KPMINUS, KEY_KP4, \
+ KEY_KP5, KEY_KP6, KEY_KPPLUS, KEY_KP1, \
+ KEY_KP2, KEY_KP3, KEY_KP0, KEY_KPDOT, \
+ KEY_SYSRQ, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, \
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, \
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, \
+ KEY_UP, KEY_LEFT, KEY_DOWN, KEY_RIGHT, \
+ KEY_HOME, KEY_PAGEUP, KEY_END, KEY_PAGEDOWN, \
+ KEY_INSERT, KEY_DELETE, KEY_102ND, KEY_RESERVED, \
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, \
+ KEY_F1, KEY_F2, KEY_F3, KEY_F4, \
+ KEY_F5, KEY_F6, KEY_F7, KEY_F8, \
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, \
+ KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED
+
+
+/* Response to POL command, the "poll record header" */
+
+#define HIL_POL_NUM_AXES_MASK 0x03 /* Number of axis reported */
+#define HIL_POL_CTS 0x04 /* Device ready to receive data */
+#define HIL_POL_STATUS_PENDING 0x08 /* Device has status to report */
+#define HIL_POL_CHARTYPE_MASK 0x70 /* Type of character data to follow */
+#define HIL_POL_CHARTYPE_NONE 0x00 /* No character data to follow */
+#define HIL_POL_CHARTYPE_RSVD1 0x10 /* Reserved Set 1 */
+#define HIL_POL_CHARTYPE_ASCII 0x20 /* U.S. ASCII */
+#define HIL_POL_CHARTYPE_BINARY 0x30 /* Binary data */
+#define HIL_POL_CHARTYPE_SET1 0x40 /* Keycode Set 1 */
+#define HIL_POL_CHARTYPE_RSVD2 0x50 /* Reserved Set 2 */
+#define HIL_POL_CHARTYPE_SET2 0x60 /* Keycode Set 2 */
+#define HIL_POL_CHARTYPE_SET3 0x70 /* Keycode Set 3 */
+#define HIL_POL_AXIS_ALT 0x80 /* Data is from axis set 2 */
+
+
+#endif /* _HIL_H_ */
diff --git a/include/linux/hil_mlc.h b/include/linux/hil_mlc.h
new file mode 100644
index 000000000..394a8405d
--- /dev/null
+++ b/include/linux/hil_mlc.h
@@ -0,0 +1,168 @@
+/*
+ * HP Human Interface Loop Master Link Controller driver.
+ *
+ * Copyright (c) 2001 Brian S. Julin
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ *
+ * References:
+ * HP-HIL Technical Reference Manual. Hewlett Packard Product No. 45918A
+ *
+ */
+
+#include <linux/hil.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/semaphore.h>
+#include <linux/serio.h>
+#include <linux/list.h>
+
+typedef struct hil_mlc hil_mlc;
+
+/* The HIL has a complicated state engine.
+ * We define the structure of nodes in the state engine here.
+ */
+enum hilse_act {
+ /* HILSE_OUT prepares to receive input if the next node
+ * is an IN or EXPECT, and then sends the given packet.
+ */
+ HILSE_OUT = 0,
+
+ /* HILSE_CTS checks if the loop is busy. */
+ HILSE_CTS,
+
+ /* HILSE_OUT_LAST sends the given command packet to
+ * the last configured/running device on the loop.
+ */
+ HILSE_OUT_LAST,
+
+ /* HILSE_OUT_DISC sends the given command packet to
+ * the next device past the last configured/running one.
+ */
+ HILSE_OUT_DISC,
+
+ /* HILSE_FUNC runs a callback function with given arguments.
+ * a positive return value causes the "ugly" branch to be taken.
+ */
+ HILSE_FUNC,
+
+ /* HILSE_IN simply expects any non-errored packet to arrive
+ * within arg usecs.
+ */
+ HILSE_IN = 0x100,
+
+ /* HILSE_EXPECT expects a particular packet to arrive
+ * within arg usecs, any other packet is considered an error.
+ */
+ HILSE_EXPECT,
+
+ /* HILSE_EXPECT_LAST as above but dev field should be last
+ * discovered/operational device.
+ */
+ HILSE_EXPECT_LAST,
+
+ /* HILSE_EXPECT_LAST as above but dev field should be first
+ * undiscovered/inoperational device.
+ */
+ HILSE_EXPECT_DISC
+};
+
+typedef int (hilse_func) (hil_mlc *mlc, int arg);
+struct hilse_node {
+ enum hilse_act act; /* How to process this node */
+ union {
+ hilse_func *func; /* Function to call if HILSE_FUNC */
+ hil_packet packet; /* Packet to send or to compare */
+ } object;
+ int arg; /* Timeout in usec or parm for func */
+ int good; /* Node to jump to on success */
+ int bad; /* Node to jump to on error */
+ int ugly; /* Node to jump to on timeout */
+};
+
+/* Methods for back-end drivers, e.g. hp_sdc_mlc */
+typedef int (hil_mlc_cts) (hil_mlc *mlc);
+typedef void (hil_mlc_out) (hil_mlc *mlc);
+typedef int (hil_mlc_in) (hil_mlc *mlc, suseconds_t timeout);
+
+struct hil_mlc_devinfo {
+ uint8_t idd[16]; /* Device ID Byte and Describe Record */
+ uint8_t rsc[16]; /* Security Code Header and Record */
+ uint8_t exd[16]; /* Extended Describe Record */
+ uint8_t rnm[16]; /* Device name as returned by RNM command */
+};
+
+struct hil_mlc_serio_map {
+ hil_mlc *mlc;
+ int di_revmap;
+ int didx;
+};
+
+/* How many (possibly old/detached) devices the we try to keep track of */
+#define HIL_MLC_DEVMEM 16
+
+struct hil_mlc {
+ struct list_head list; /* hil_mlc is organized as linked list */
+
+ rwlock_t lock;
+
+ void *priv; /* Data specific to a particular type of MLC */
+
+ int seidx; /* Current node in state engine */
+ int istarted, ostarted;
+
+ hil_mlc_cts *cts;
+ struct semaphore csem; /* Raised when loop idle */
+
+ hil_mlc_out *out;
+ struct semaphore osem; /* Raised when outpacket dispatched */
+ hil_packet opacket;
+
+ hil_mlc_in *in;
+ struct semaphore isem; /* Raised when a packet arrives */
+ hil_packet ipacket[16];
+ hil_packet imatch;
+ int icount;
+ struct timeval instart;
+ suseconds_t intimeout;
+
+ int ddi; /* Last operational device id */
+ int lcv; /* LCV to throttle loops */
+ struct timeval lcv_tv; /* Time loop was started */
+
+ int di_map[7]; /* Maps below items to live devs */
+ struct hil_mlc_devinfo di[HIL_MLC_DEVMEM];
+ struct serio *serio[HIL_MLC_DEVMEM];
+ struct hil_mlc_serio_map serio_map[HIL_MLC_DEVMEM];
+ hil_packet serio_opacket[HIL_MLC_DEVMEM];
+ int serio_oidx[HIL_MLC_DEVMEM];
+ struct hil_mlc_devinfo di_scratch; /* Temporary area */
+
+ int opercnt;
+
+ struct tasklet_struct *tasklet;
+};
+
+int hil_mlc_register(hil_mlc *mlc);
+int hil_mlc_unregister(hil_mlc *mlc);
diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h
new file mode 100644
index 000000000..8ec23fb0b
--- /dev/null
+++ b/include/linux/hippidevice.h
@@ -0,0 +1,41 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the HIPPI handlers.
+ *
+ * Version: @(#)hippidevice.h 1.0.0 05/26/97
+ *
+ * Author: Jes Sorensen, <Jes.Sorensen@cern.ch>
+ *
+ * hippidevice.h is based on previous fddidevice.h work by
+ * Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ * Lawrence V. Stefani, <stefani@lkg.dec.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_HIPPIDEVICE_H
+#define _LINUX_HIPPIDEVICE_H
+
+#include <linux/if_hippi.h>
+
+#ifdef __KERNEL__
+
+struct hippi_cb {
+ __u32 ifield;
+};
+
+__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
+int hippi_change_mtu(struct net_device *dev, int new_mtu);
+int hippi_mac_addr(struct net_device *dev, void *p);
+int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
+struct net_device *alloc_hippi_dev(int sizeof_priv);
+#endif
+
+#endif /* _LINUX_HIPPIDEVICE_H */
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
new file mode 100644
index 000000000..d2ba7d334
--- /dev/null
+++ b/include/linux/host1x.h
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __LINUX_HOST1X_H
+#define __LINUX_HOST1X_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+enum host1x_class {
+ HOST1X_CLASS_HOST1X = 0x1,
+ HOST1X_CLASS_GR2D = 0x51,
+ HOST1X_CLASS_GR2D_SB = 0x52,
+ HOST1X_CLASS_GR3D = 0x60,
+};
+
+struct host1x_client;
+
+struct host1x_client_ops {
+ int (*init)(struct host1x_client *client);
+ int (*exit)(struct host1x_client *client);
+};
+
+struct host1x_client {
+ struct list_head list;
+ struct device *parent;
+ struct device *dev;
+
+ const struct host1x_client_ops *ops;
+
+ enum host1x_class class;
+ struct host1x_channel *channel;
+
+ struct host1x_syncpt **syncpts;
+ unsigned int num_syncpts;
+};
+
+/*
+ * host1x buffer objects
+ */
+
+struct host1x_bo;
+struct sg_table;
+
+struct host1x_bo_ops {
+ struct host1x_bo *(*get)(struct host1x_bo *bo);
+ void (*put)(struct host1x_bo *bo);
+ dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
+ void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
+ void *(*mmap)(struct host1x_bo *bo);
+ void (*munmap)(struct host1x_bo *bo, void *addr);
+ void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
+ void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
+};
+
+struct host1x_bo {
+ const struct host1x_bo_ops *ops;
+};
+
+static inline void host1x_bo_init(struct host1x_bo *bo,
+ const struct host1x_bo_ops *ops)
+{
+ bo->ops = ops;
+}
+
+static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
+{
+ return bo->ops->get(bo);
+}
+
+static inline void host1x_bo_put(struct host1x_bo *bo)
+{
+ bo->ops->put(bo);
+}
+
+static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
+ struct sg_table **sgt)
+{
+ return bo->ops->pin(bo, sgt);
+}
+
+static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+{
+ bo->ops->unpin(bo, sgt);
+}
+
+static inline void *host1x_bo_mmap(struct host1x_bo *bo)
+{
+ return bo->ops->mmap(bo);
+}
+
+static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
+{
+ bo->ops->munmap(bo, addr);
+}
+
+static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
+{
+ return bo->ops->kmap(bo, pagenum);
+}
+
+static inline void host1x_bo_kunmap(struct host1x_bo *bo,
+ unsigned int pagenum, void *addr)
+{
+ bo->ops->kunmap(bo, pagenum, addr);
+}
+
+/*
+ * host1x syncpoints
+ */
+
+#define HOST1X_SYNCPT_CLIENT_MANAGED (1 << 0)
+#define HOST1X_SYNCPT_HAS_BASE (1 << 1)
+
+struct host1x_syncpt_base;
+struct host1x_syncpt;
+struct host1x;
+
+struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
+u32 host1x_syncpt_id(struct host1x_syncpt *sp);
+u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
+u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
+u32 host1x_syncpt_read(struct host1x_syncpt *sp);
+int host1x_syncpt_incr(struct host1x_syncpt *sp);
+u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
+int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
+ u32 *value);
+struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
+ unsigned long flags);
+void host1x_syncpt_free(struct host1x_syncpt *sp);
+
+struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
+u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
+
+/*
+ * host1x channel
+ */
+
+struct host1x_channel;
+struct host1x_job;
+
+struct host1x_channel *host1x_channel_request(struct device *dev);
+void host1x_channel_free(struct host1x_channel *channel);
+struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
+void host1x_channel_put(struct host1x_channel *channel);
+int host1x_job_submit(struct host1x_job *job);
+
+/*
+ * host1x job
+ */
+
+struct host1x_reloc {
+ struct {
+ struct host1x_bo *bo;
+ unsigned long offset;
+ } cmdbuf;
+ struct {
+ struct host1x_bo *bo;
+ unsigned long offset;
+ } target;
+ unsigned long shift;
+};
+
+struct host1x_job {
+ /* When refcount goes to zero, job can be freed */
+ struct kref ref;
+
+ /* List entry */
+ struct list_head list;
+
+ /* Channel where job is submitted to */
+ struct host1x_channel *channel;
+
+ u32 client;
+
+ /* Gathers and their memory */
+ struct host1x_job_gather *gathers;
+ unsigned int num_gathers;
+
+ /* Wait checks to be processed at submit time */
+ struct host1x_waitchk *waitchk;
+ unsigned int num_waitchk;
+ u32 waitchk_mask;
+
+ /* Array of handles to be pinned & unpinned */
+ struct host1x_reloc *relocarray;
+ unsigned int num_relocs;
+ struct host1x_job_unpin_data *unpins;
+ unsigned int num_unpins;
+
+ dma_addr_t *addr_phys;
+ dma_addr_t *gather_addr_phys;
+ dma_addr_t *reloc_addr_phys;
+
+ /* Sync point id, number of increments and end related to the submit */
+ u32 syncpt_id;
+ u32 syncpt_incrs;
+ u32 syncpt_end;
+
+ /* Maximum time to wait for this job */
+ unsigned int timeout;
+
+ /* Index and number of slots used in the push buffer */
+ unsigned int first_get;
+ unsigned int num_slots;
+
+ /* Copy of gathers */
+ size_t gather_copy_size;
+ dma_addr_t gather_copy;
+ u8 *gather_copy_mapped;
+
+ /* Check if register is marked as an address reg */
+ int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
+
+ /* Request a SETCLASS to this class */
+ u32 class;
+
+ /* Add a channel wait for previous ops to complete */
+ bool serialize;
+};
+
+struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
+ u32 num_cmdbufs, u32 num_relocs,
+ u32 num_waitchks);
+void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
+ u32 words, u32 offset);
+struct host1x_job *host1x_job_get(struct host1x_job *job);
+void host1x_job_put(struct host1x_job *job);
+int host1x_job_pin(struct host1x_job *job, struct device *dev);
+void host1x_job_unpin(struct host1x_job *job);
+
+/*
+ * subdevice probe infrastructure
+ */
+
+struct host1x_device;
+
+struct host1x_driver {
+ struct device_driver driver;
+
+ const struct of_device_id *subdevs;
+ struct list_head list;
+
+ int (*probe)(struct host1x_device *device);
+ int (*remove)(struct host1x_device *device);
+ void (*shutdown)(struct host1x_device *device);
+};
+
+static inline struct host1x_driver *
+to_host1x_driver(struct device_driver *driver)
+{
+ return container_of(driver, struct host1x_driver, driver);
+}
+
+int host1x_driver_register_full(struct host1x_driver *driver,
+ struct module *owner);
+void host1x_driver_unregister(struct host1x_driver *driver);
+
+#define host1x_driver_register(driver) \
+ host1x_driver_register_full(driver, THIS_MODULE)
+
+struct host1x_device {
+ struct host1x_driver *driver;
+ struct list_head list;
+ struct device dev;
+
+ struct mutex subdevs_lock;
+ struct list_head subdevs;
+ struct list_head active;
+
+ struct mutex clients_lock;
+ struct list_head clients;
+
+ bool registered;
+};
+
+static inline struct host1x_device *to_host1x_device(struct device *dev)
+{
+ return container_of(dev, struct host1x_device, dev);
+}
+
+int host1x_device_init(struct host1x_device *device);
+int host1x_device_exit(struct host1x_device *device);
+
+int host1x_client_register(struct host1x_client *client);
+int host1x_client_unregister(struct host1x_client *client);
+
+struct tegra_mipi_device;
+
+struct tegra_mipi_device *tegra_mipi_request(struct device *device);
+void tegra_mipi_free(struct tegra_mipi_device *device);
+int tegra_mipi_calibrate(struct tegra_mipi_device *device);
+
+#endif
diff --git a/include/linux/hp_sdc.h b/include/linux/hp_sdc.h
new file mode 100644
index 000000000..d392975d8
--- /dev/null
+++ b/include/linux/hp_sdc.h
@@ -0,0 +1,301 @@
+/*
+ * HP i8042 System Device Controller -- header
+ *
+ * Copyright (c) 2001 Brian S. Julin
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL").
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ *
+ * References:
+ *
+ * HP-HIL Technical Reference Manual. Hewlett Packard Product No. 45918A
+ *
+ * System Device Controller Microprocessor Firmware Theory of Operation
+ * for Part Number 1820-4784 Revision B. Dwg No. A-1820-4784-2
+ *
+ */
+
+#ifndef _LINUX_HP_SDC_H
+#define _LINUX_HP_SDC_H
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#if defined(__hppa__)
+#include <asm/hardware.h>
+#endif
+
+
+/* No 4X status reads take longer than this (in usec).
+ */
+#define HP_SDC_MAX_REG_DELAY 20000
+
+typedef void (hp_sdc_irqhook) (int irq, void *dev_id,
+ uint8_t status, uint8_t data);
+
+int hp_sdc_request_timer_irq(hp_sdc_irqhook *callback);
+int hp_sdc_request_hil_irq(hp_sdc_irqhook *callback);
+int hp_sdc_request_cooked_irq(hp_sdc_irqhook *callback);
+int hp_sdc_release_timer_irq(hp_sdc_irqhook *callback);
+int hp_sdc_release_hil_irq(hp_sdc_irqhook *callback);
+int hp_sdc_release_cooked_irq(hp_sdc_irqhook *callback);
+
+typedef struct {
+ int actidx; /* Start of act. Acts are atomic WRT I/O to SDC */
+ int idx; /* Index within the act */
+ int endidx; /* transaction is over and done if idx == endidx */
+ uint8_t *seq; /* commands/data for the transaction */
+ union {
+ hp_sdc_irqhook *irqhook; /* Callback, isr or tasklet context */
+ struct semaphore *semaphore; /* Semaphore to sleep on. */
+ } act;
+} hp_sdc_transaction;
+int __hp_sdc_enqueue_transaction(hp_sdc_transaction *this);
+int hp_sdc_enqueue_transaction(hp_sdc_transaction *this);
+int hp_sdc_dequeue_transaction(hp_sdc_transaction *this);
+
+/* The HP_SDC_ACT* values are peculiar to this driver.
+ * Nuance: never HP_SDC_ACT_DATAIN | HP_SDC_ACT_DEALLOC, use another
+ * act to perform the dealloc.
+ */
+#define HP_SDC_ACT_PRECMD 0x01 /* Send a command first */
+#define HP_SDC_ACT_DATAREG 0x02 /* Set data registers */
+#define HP_SDC_ACT_DATAOUT 0x04 /* Send data bytes */
+#define HP_SDC_ACT_POSTCMD 0x08 /* Send command after */
+#define HP_SDC_ACT_DATAIN 0x10 /* Collect data after */
+#define HP_SDC_ACT_DURING 0x1f
+#define HP_SDC_ACT_SEMAPHORE 0x20 /* Raise semaphore after */
+#define HP_SDC_ACT_CALLBACK 0x40 /* Pass data to IRQ handler */
+#define HP_SDC_ACT_DEALLOC 0x80 /* Destroy transaction after */
+#define HP_SDC_ACT_AFTER 0xe0
+#define HP_SDC_ACT_DEAD 0x60 /* Act timed out. */
+
+/* Rest of the flags are straightforward representation of the SDC interface */
+#define HP_SDC_STATUS_IBF 0x02 /* Input buffer full */
+
+#define HP_SDC_STATUS_IRQMASK 0xf0 /* Bits containing "level 1" irq */
+#define HP_SDC_STATUS_PERIODIC 0x10 /* Periodic 10ms timer */
+#define HP_SDC_STATUS_USERTIMER 0x20 /* "Special purpose" timer */
+#define HP_SDC_STATUS_TIMER 0x30 /* Both PERIODIC and USERTIMER */
+#define HP_SDC_STATUS_REG 0x40 /* Data from an i8042 register */
+#define HP_SDC_STATUS_HILCMD 0x50 /* Command from HIL MLC */
+#define HP_SDC_STATUS_HILDATA 0x60 /* Data from HIL MLC */
+#define HP_SDC_STATUS_PUP 0x70 /* Successful power-up self test */
+#define HP_SDC_STATUS_KCOOKED 0x80 /* Key from cooked kbd */
+#define HP_SDC_STATUS_KRPG 0xc0 /* Key from Repeat Gen */
+#define HP_SDC_STATUS_KMOD_SUP 0x10 /* Shift key is up */
+#define HP_SDC_STATUS_KMOD_CUP 0x20 /* Control key is up */
+
+#define HP_SDC_NMISTATUS_FHS 0x40 /* NMI is a fast handshake irq */
+
+/* Internal i8042 registers (there are more, but they are not too useful). */
+
+#define HP_SDC_USE 0x02 /* Resource usage (including OB bit) */
+#define HP_SDC_IM 0x04 /* Interrupt mask */
+#define HP_SDC_CFG 0x11 /* Configuration register */
+#define HP_SDC_KBLANGUAGE 0x12 /* Keyboard language */
+
+#define HP_SDC_D0 0x70 /* General purpose data buffer 0 */
+#define HP_SDC_D1 0x71 /* General purpose data buffer 1 */
+#define HP_SDC_D2 0x72 /* General purpose data buffer 2 */
+#define HP_SDC_D3 0x73 /* General purpose data buffer 3 */
+#define HP_SDC_VT1 0x74 /* Timer for voice 1 */
+#define HP_SDC_VT2 0x75 /* Timer for voice 2 */
+#define HP_SDC_VT3 0x76 /* Timer for voice 3 */
+#define HP_SDC_VT4 0x77 /* Timer for voice 4 */
+#define HP_SDC_KBN 0x78 /* Which HIL devs are Nimitz */
+#define HP_SDC_KBC 0x79 /* Which HIL devs are cooked kbds */
+#define HP_SDC_LPS 0x7a /* i8042's view of HIL status */
+#define HP_SDC_LPC 0x7b /* i8042's view of HIL "control" */
+#define HP_SDC_RSV 0x7c /* Reserved "for testing" */
+#define HP_SDC_LPR 0x7d /* i8042 count of HIL reconfigs */
+#define HP_SDC_XTD 0x7e /* "Extended Configuration" register */
+#define HP_SDC_STR 0x7f /* i8042 self-test result */
+
+/* Bitfields for above registers */
+#define HP_SDC_USE_LOOP 0x04 /* Command is currently on the loop. */
+
+#define HP_SDC_IM_MASK 0x1f /* these bits not part of cmd/status */
+#define HP_SDC_IM_FH 0x10 /* Mask the fast handshake irq */
+#define HP_SDC_IM_PT 0x08 /* Mask the periodic timer irq */
+#define HP_SDC_IM_TIMERS 0x04 /* Mask the MT/DT/CT irq */
+#define HP_SDC_IM_RESET 0x02 /* Mask the reset key irq */
+#define HP_SDC_IM_HIL 0x01 /* Mask the HIL MLC irq */
+
+#define HP_SDC_CFG_ROLLOVER 0x08 /* WTF is "N-key rollover"? */
+#define HP_SDC_CFG_KBD 0x10 /* There is a keyboard */
+#define HP_SDC_CFG_NEW 0x20 /* Supports/uses HIL MLC */
+#define HP_SDC_CFG_KBD_OLD 0x03 /* keyboard code for non-HIL */
+#define HP_SDC_CFG_KBD_NEW 0x07 /* keyboard code from HIL autoconfig */
+#define HP_SDC_CFG_REV 0x40 /* Code revision bit */
+#define HP_SDC_CFG_IDPROM 0x80 /* IDPROM present in kbd (not HIL) */
+
+#define HP_SDC_LPS_NDEV 0x07 /* # devices autoconfigured on HIL */
+#define HP_SDC_LPS_ACSUCC 0x08 /* loop autoconfigured successfully */
+#define HP_SDC_LPS_ACFAIL 0x80 /* last loop autoconfigure failed */
+
+#define HP_SDC_LPC_APE_IPF 0x01 /* HIL MLC APE/IPF (autopoll) set */
+#define HP_SDC_LPC_ARCONERR 0x02 /* i8042 autoreconfigs loop on err */
+#define HP_SDC_LPC_ARCQUIET 0x03 /* i8042 doesn't report autoreconfigs*/
+#define HP_SDC_LPC_COOK 0x10 /* i8042 cooks devices in _KBN */
+#define HP_SDC_LPC_RC 0x80 /* causes autoreconfig */
+
+#define HP_SDC_XTD_REV 0x07 /* contains revision code */
+#define HP_SDC_XTD_REV_STRINGS(val, str) \
+switch (val) { \
+ case 0x1: str = "1820-3712"; break; \
+ case 0x2: str = "1820-4379"; break; \
+ case 0x3: str = "1820-4784"; break; \
+ default: str = "unknown"; \
+};
+#define HP_SDC_XTD_BEEPER 0x08 /* TI SN76494 beeper available */
+#define HP_SDC_XTD_BBRTC 0x20 /* OKI MSM-58321 BBRTC present */
+
+#define HP_SDC_CMD_LOAD_RT 0x31 /* Load real time (from 8042) */
+#define HP_SDC_CMD_LOAD_FHS 0x36 /* Load the fast handshake timer */
+#define HP_SDC_CMD_LOAD_MT 0x38 /* Load the match timer */
+#define HP_SDC_CMD_LOAD_DT 0x3B /* Load the delay timer */
+#define HP_SDC_CMD_LOAD_CT 0x3E /* Load the cycle timer */
+
+#define HP_SDC_CMD_SET_IM 0x40 /* 010xxxxx == set irq mask */
+
+/* The documents provided do not explicitly state that all registers betweem
+ * 0x01 and 0x1f inclusive can be read by sending their register index as a
+ * command, but this is implied and appears to be the case.
+ */
+#define HP_SDC_CMD_READ_RAM 0x00 /* Load from i8042 RAM (autoinc) */
+#define HP_SDC_CMD_READ_USE 0x02 /* Undocumented! Load from usage reg */
+#define HP_SDC_CMD_READ_IM 0x04 /* Load current interrupt mask */
+#define HP_SDC_CMD_READ_KCC 0x11 /* Load primary kbd config code */
+#define HP_SDC_CMD_READ_KLC 0x12 /* Load primary kbd language code */
+#define HP_SDC_CMD_READ_T1 0x13 /* Load timer output buffer byte 1 */
+#define HP_SDC_CMD_READ_T2 0x14 /* Load timer output buffer byte 1 */
+#define HP_SDC_CMD_READ_T3 0x15 /* Load timer output buffer byte 1 */
+#define HP_SDC_CMD_READ_T4 0x16 /* Load timer output buffer byte 1 */
+#define HP_SDC_CMD_READ_T5 0x17 /* Load timer output buffer byte 1 */
+#define HP_SDC_CMD_READ_D0 0xf0 /* Load from i8042 RAM location 0x70 */
+#define HP_SDC_CMD_READ_D1 0xf1 /* Load from i8042 RAM location 0x71 */
+#define HP_SDC_CMD_READ_D2 0xf2 /* Load from i8042 RAM location 0x72 */
+#define HP_SDC_CMD_READ_D3 0xf3 /* Load from i8042 RAM location 0x73 */
+#define HP_SDC_CMD_READ_VT1 0xf4 /* Load from i8042 RAM location 0x74 */
+#define HP_SDC_CMD_READ_VT2 0xf5 /* Load from i8042 RAM location 0x75 */
+#define HP_SDC_CMD_READ_VT3 0xf6 /* Load from i8042 RAM location 0x76 */
+#define HP_SDC_CMD_READ_VT4 0xf7 /* Load from i8042 RAM location 0x77 */
+#define HP_SDC_CMD_READ_KBN 0xf8 /* Load from i8042 RAM location 0x78 */
+#define HP_SDC_CMD_READ_KBC 0xf9 /* Load from i8042 RAM location 0x79 */
+#define HP_SDC_CMD_READ_LPS 0xfa /* Load from i8042 RAM location 0x7a */
+#define HP_SDC_CMD_READ_LPC 0xfb /* Load from i8042 RAM location 0x7b */
+#define HP_SDC_CMD_READ_RSV 0xfc /* Load from i8042 RAM location 0x7c */
+#define HP_SDC_CMD_READ_LPR 0xfd /* Load from i8042 RAM location 0x7d */
+#define HP_SDC_CMD_READ_XTD 0xfe /* Load from i8042 RAM location 0x7e */
+#define HP_SDC_CMD_READ_STR 0xff /* Load from i8042 RAM location 0x7f */
+
+#define HP_SDC_CMD_SET_ARD 0xA0 /* Set emulated autorepeat delay */
+#define HP_SDC_CMD_SET_ARR 0xA2 /* Set emulated autorepeat rate */
+#define HP_SDC_CMD_SET_BELL 0xA3 /* Set voice 3 params for "beep" cmd */
+#define HP_SDC_CMD_SET_RPGR 0xA6 /* Set "RPG" irq rate (doesn't work) */
+#define HP_SDC_CMD_SET_RTMS 0xAD /* Set the RTC time (milliseconds) */
+#define HP_SDC_CMD_SET_RTD 0xAF /* Set the RTC time (days) */
+#define HP_SDC_CMD_SET_FHS 0xB2 /* Set fast handshake timer */
+#define HP_SDC_CMD_SET_MT 0xB4 /* Set match timer */
+#define HP_SDC_CMD_SET_DT 0xB7 /* Set delay timer */
+#define HP_SDC_CMD_SET_CT 0xBA /* Set cycle timer */
+#define HP_SDC_CMD_SET_RAMP 0xC1 /* Reset READ_RAM autoinc counter */
+#define HP_SDC_CMD_SET_D0 0xe0 /* Load to i8042 RAM location 0x70 */
+#define HP_SDC_CMD_SET_D1 0xe1 /* Load to i8042 RAM location 0x71 */
+#define HP_SDC_CMD_SET_D2 0xe2 /* Load to i8042 RAM location 0x72 */
+#define HP_SDC_CMD_SET_D3 0xe3 /* Load to i8042 RAM location 0x73 */
+#define HP_SDC_CMD_SET_VT1 0xe4 /* Load to i8042 RAM location 0x74 */
+#define HP_SDC_CMD_SET_VT2 0xe5 /* Load to i8042 RAM location 0x75 */
+#define HP_SDC_CMD_SET_VT3 0xe6 /* Load to i8042 RAM location 0x76 */
+#define HP_SDC_CMD_SET_VT4 0xe7 /* Load to i8042 RAM location 0x77 */
+#define HP_SDC_CMD_SET_KBN 0xe8 /* Load to i8042 RAM location 0x78 */
+#define HP_SDC_CMD_SET_KBC 0xe9 /* Load to i8042 RAM location 0x79 */
+#define HP_SDC_CMD_SET_LPS 0xea /* Load to i8042 RAM location 0x7a */
+#define HP_SDC_CMD_SET_LPC 0xeb /* Load to i8042 RAM location 0x7b */
+#define HP_SDC_CMD_SET_RSV 0xec /* Load to i8042 RAM location 0x7c */
+#define HP_SDC_CMD_SET_LPR 0xed /* Load to i8042 RAM location 0x7d */
+#define HP_SDC_CMD_SET_XTD 0xee /* Load to i8042 RAM location 0x7e */
+#define HP_SDC_CMD_SET_STR 0xef /* Load to i8042 RAM location 0x7f */
+
+#define HP_SDC_CMD_DO_RTCW 0xc2 /* i8042 RAM 0x70 --> RTC */
+#define HP_SDC_CMD_DO_RTCR 0xc3 /* RTC[0x70 0:3] --> irq/status/data */
+#define HP_SDC_CMD_DO_BEEP 0xc4 /* i8042 RAM 0x70-74 --> beeper,VT3 */
+#define HP_SDC_CMD_DO_HIL 0xc5 /* i8042 RAM 0x70-73 -->
+ HIL MLC R0,R1 i8042 HIL watchdog */
+
+/* Values used to (de)mangle input/output to/from the HIL MLC */
+#define HP_SDC_DATA 0x40 /* Data from an 8042 register */
+#define HP_SDC_HIL_CMD 0x50 /* Data from HIL MLC R1/8042 */
+#define HP_SDC_HIL_R1MASK 0x0f /* Contents of HIL MLC R1 0:3 */
+#define HP_SDC_HIL_AUTO 0x10 /* Set if POL results from i8042 */
+#define HP_SDC_HIL_ISERR 0x80 /* Has meaning as in next 4 values */
+#define HP_SDC_HIL_RC_DONE 0x80 /* i8042 auto-configured loop */
+#define HP_SDC_HIL_ERR 0x81 /* HIL MLC R2 had a bit set */
+#define HP_SDC_HIL_TO 0x82 /* i8042 HIL watchdog expired */
+#define HP_SDC_HIL_RC 0x84 /* i8042 is auto-configuring loop */
+#define HP_SDC_HIL_DAT 0x60 /* Data from HIL MLC R0 */
+
+
+typedef struct {
+ rwlock_t ibf_lock;
+ rwlock_t lock; /* user/tasklet lock */
+ rwlock_t rtq_lock; /* isr/tasklet lock */
+ rwlock_t hook_lock; /* isr/user lock for handler add/del */
+
+ unsigned int irq, nmi; /* Our IRQ lines */
+ unsigned long base_io, status_io, data_io; /* Our IO ports */
+
+ uint8_t im; /* Interrupt mask */
+ int set_im; /* Interrupt mask needs to be set. */
+
+ int ibf; /* Last known status of IBF flag */
+ uint8_t wi; /* current i8042 write index */
+ uint8_t r7[4]; /* current i8042[0x70 - 0x74] values */
+ uint8_t r11, r7e; /* Values from version/revision regs */
+
+ hp_sdc_irqhook *timer, *reg, *hil, *pup, *cooked;
+
+#define HP_SDC_QUEUE_LEN 16
+ hp_sdc_transaction *tq[HP_SDC_QUEUE_LEN]; /* All pending read/writes */
+
+ int rcurr, rqty; /* Current read transact in process */
+ struct timeval rtv; /* Time when current read started */
+ int wcurr; /* Current write transact in process */
+
+ int dev_err; /* carries status from registration */
+#if defined(__hppa__)
+ struct parisc_device *dev;
+#elif defined(__mc68000__)
+ void *dev;
+#else
+#error No support for device registration on this arch yet.
+#endif
+
+ struct timer_list kicker; /* Keeps below task alive */
+ struct tasklet_struct task;
+
+} hp_i8042_sdc;
+
+#endif /* _LINUX_HP_SDC_H */
diff --git a/include/linux/hpet.h b/include/linux/hpet.h
new file mode 100644
index 000000000..9427ab4e0
--- /dev/null
+++ b/include/linux/hpet.h
@@ -0,0 +1,110 @@
+#ifndef __HPET__
+#define __HPET__ 1
+
+#include <uapi/linux/hpet.h>
+
+
+/*
+ * Offsets into HPET Registers
+ */
+
+struct hpet {
+ u64 hpet_cap; /* capabilities */
+ u64 res0; /* reserved */
+ u64 hpet_config; /* configuration */
+ u64 res1; /* reserved */
+ u64 hpet_isr; /* interrupt status reg */
+ u64 res2[25]; /* reserved */
+ union { /* main counter */
+ u64 _hpet_mc64;
+ u32 _hpet_mc32;
+ unsigned long _hpet_mc;
+ } _u0;
+ u64 res3; /* reserved */
+ struct hpet_timer {
+ u64 hpet_config; /* configuration/cap */
+ union { /* timer compare register */
+ u64 _hpet_hc64;
+ u32 _hpet_hc32;
+ unsigned long _hpet_compare;
+ } _u1;
+ u64 hpet_fsb[2]; /* FSB route */
+ } hpet_timers[1];
+};
+
+#define hpet_mc _u0._hpet_mc
+#define hpet_compare _u1._hpet_compare
+
+#define HPET_MAX_TIMERS (32)
+#define HPET_MAX_IRQ (32)
+
+/*
+ * HPET general capabilities register
+ */
+
+#define HPET_COUNTER_CLK_PERIOD_MASK (0xffffffff00000000ULL)
+#define HPET_COUNTER_CLK_PERIOD_SHIFT (32UL)
+#define HPET_VENDOR_ID_MASK (0x00000000ffff0000ULL)
+#define HPET_VENDOR_ID_SHIFT (16ULL)
+#define HPET_LEG_RT_CAP_MASK (0x8000)
+#define HPET_COUNTER_SIZE_MASK (0x2000)
+#define HPET_NUM_TIM_CAP_MASK (0x1f00)
+#define HPET_NUM_TIM_CAP_SHIFT (8ULL)
+
+/*
+ * HPET general configuration register
+ */
+
+#define HPET_LEG_RT_CNF_MASK (2UL)
+#define HPET_ENABLE_CNF_MASK (1UL)
+
+
+/*
+ * Timer configuration register
+ */
+
+#define Tn_INT_ROUTE_CAP_MASK (0xffffffff00000000ULL)
+#define Tn_INT_ROUTE_CAP_SHIFT (32UL)
+#define Tn_FSB_INT_DELCAP_MASK (0x8000UL)
+#define Tn_FSB_INT_DELCAP_SHIFT (15)
+#define Tn_FSB_EN_CNF_MASK (0x4000UL)
+#define Tn_FSB_EN_CNF_SHIFT (14)
+#define Tn_INT_ROUTE_CNF_MASK (0x3e00UL)
+#define Tn_INT_ROUTE_CNF_SHIFT (9)
+#define Tn_32MODE_CNF_MASK (0x0100UL)
+#define Tn_VAL_SET_CNF_MASK (0x0040UL)
+#define Tn_SIZE_CAP_MASK (0x0020UL)
+#define Tn_PER_INT_CAP_MASK (0x0010UL)
+#define Tn_TYPE_CNF_MASK (0x0008UL)
+#define Tn_INT_ENB_CNF_MASK (0x0004UL)
+#define Tn_INT_TYPE_CNF_MASK (0x0002UL)
+
+/*
+ * Timer FSB Interrupt Route Register
+ */
+
+#define Tn_FSB_INT_ADDR_MASK (0xffffffff00000000ULL)
+#define Tn_FSB_INT_ADDR_SHIFT (32UL)
+#define Tn_FSB_INT_VAL_MASK (0x00000000ffffffffULL)
+
+/*
+ * exported interfaces
+ */
+
+struct hpet_data {
+ unsigned long hd_phys_address;
+ void __iomem *hd_address;
+ unsigned short hd_nirqs;
+ unsigned int hd_state; /* timer allocated */
+ unsigned int hd_irq[HPET_MAX_TIMERS];
+};
+
+static inline void hpet_reserve_timer(struct hpet_data *hd, int timer)
+{
+ hd->hd_state |= (1 << timer);
+ return;
+}
+
+int hpet_alloc(struct hpet_data *);
+
+#endif /* !__HPET__ */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
new file mode 100644
index 000000000..05f6df1fd
--- /dev/null
+++ b/include/linux/hrtimer.h
@@ -0,0 +1,454 @@
+/*
+ * include/linux/hrtimer.h
+ *
+ * hrtimers - High-resolution kernel timers
+ *
+ * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar
+ *
+ * data type definitions, declarations, prototypes
+ *
+ * Started by: Thomas Gleixner and Ingo Molnar
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+#ifndef _LINUX_HRTIMER_H
+#define _LINUX_HRTIMER_H
+
+#include <linux/rbtree.h>
+#include <linux/ktime.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/percpu.h>
+#include <linux/timer.h>
+#include <linux/timerqueue.h>
+
+struct hrtimer_clock_base;
+struct hrtimer_cpu_base;
+
+/*
+ * Mode arguments of xxx_hrtimer functions:
+ */
+enum hrtimer_mode {
+ HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */
+ HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */
+ HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */
+ HRTIMER_MODE_ABS_PINNED = 0x02,
+ HRTIMER_MODE_REL_PINNED = 0x03,
+};
+
+/*
+ * Return values for the callback function
+ */
+enum hrtimer_restart {
+ HRTIMER_NORESTART, /* Timer is not restarted */
+ HRTIMER_RESTART, /* Timer must be restarted */
+};
+
+/*
+ * Values to track state of the timer
+ *
+ * Possible states:
+ *
+ * 0x00 inactive
+ * 0x01 enqueued into rbtree
+ * 0x02 callback function running
+ * 0x04 timer is migrated to another cpu
+ *
+ * Special cases:
+ * 0x03 callback function running and enqueued
+ * (was requeued on another CPU)
+ * 0x05 timer was migrated on CPU hotunplug
+ *
+ * The "callback function running and enqueued" status is only possible on
+ * SMP. It happens for example when a posix timer expired and the callback
+ * queued a signal. Between dropping the lock which protects the posix timer
+ * and reacquiring the base lock of the hrtimer, another CPU can deliver the
+ * signal and rearm the timer. We have to preserve the callback running state,
+ * as otherwise the timer could be removed before the softirq code finishes the
+ * the handling of the timer.
+ *
+ * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state
+ * to preserve the HRTIMER_STATE_CALLBACK in the above scenario. This
+ * also affects HRTIMER_STATE_MIGRATE where the preservation is not
+ * necessary. HRTIMER_STATE_MIGRATE is cleared after the timer is
+ * enqueued on the new cpu.
+ *
+ * All state transitions are protected by cpu_base->lock.
+ */
+#define HRTIMER_STATE_INACTIVE 0x00
+#define HRTIMER_STATE_ENQUEUED 0x01
+#define HRTIMER_STATE_CALLBACK 0x02
+#define HRTIMER_STATE_MIGRATE 0x04
+
+/**
+ * struct hrtimer - the basic hrtimer structure
+ * @node: timerqueue node, which also manages node.expires,
+ * the absolute expiry time in the hrtimers internal
+ * representation. The time is related to the clock on
+ * which the timer is based. Is setup by adding
+ * slack to the _softexpires value. For non range timers
+ * identical to _softexpires.
+ * @_softexpires: the absolute earliest expiry time of the hrtimer.
+ * The time which was given as expiry time when the timer
+ * was armed.
+ * @function: timer expiry callback function
+ * @base: pointer to the timer base (per cpu and per clock)
+ * @state: state information (See bit values above)
+ * @start_pid: timer statistics field to store the pid of the task which
+ * started the timer
+ * @start_site: timer statistics field to store the site where the timer
+ * was started
+ * @start_comm: timer statistics field to store the name of the process which
+ * started the timer
+ *
+ * The hrtimer structure must be initialized by hrtimer_init()
+ */
+struct hrtimer {
+ struct timerqueue_node node;
+ ktime_t _softexpires;
+ enum hrtimer_restart (*function)(struct hrtimer *);
+ struct hrtimer_clock_base *base;
+ unsigned long state;
+#ifdef CONFIG_TIMER_STATS
+ int start_pid;
+ void *start_site;
+ char start_comm[16];
+#endif
+};
+
+/**
+ * struct hrtimer_sleeper - simple sleeper structure
+ * @timer: embedded timer structure
+ * @task: task to wake up
+ *
+ * task is set to NULL, when the timer expires.
+ */
+struct hrtimer_sleeper {
+ struct hrtimer timer;
+ struct task_struct *task;
+};
+
+/**
+ * struct hrtimer_clock_base - the timer base for a specific clock
+ * @cpu_base: per cpu clock base
+ * @index: clock type index for per_cpu support when moving a
+ * timer to a base on another cpu.
+ * @clockid: clock id for per_cpu support
+ * @active: red black tree root node for the active timers
+ * @resolution: the resolution of the clock, in nanoseconds
+ * @get_time: function to retrieve the current time of the clock
+ * @softirq_time: the time when running the hrtimer queue in the softirq
+ * @offset: offset of this clock to the monotonic base
+ */
+struct hrtimer_clock_base {
+ struct hrtimer_cpu_base *cpu_base;
+ int index;
+ clockid_t clockid;
+ struct timerqueue_head active;
+ ktime_t resolution;
+ ktime_t (*get_time)(void);
+ ktime_t softirq_time;
+ ktime_t offset;
+};
+
+enum hrtimer_base_type {
+ HRTIMER_BASE_MONOTONIC,
+ HRTIMER_BASE_REALTIME,
+ HRTIMER_BASE_BOOTTIME,
+ HRTIMER_BASE_TAI,
+ HRTIMER_MAX_CLOCK_BASES,
+};
+
+/*
+ * struct hrtimer_cpu_base - the per cpu clock bases
+ * @lock: lock protecting the base and associated clock bases
+ * and timers
+ * @cpu: cpu number
+ * @active_bases: Bitfield to mark bases with active timers
+ * @clock_was_set: Indicates that clock was set from irq context.
+ * @expires_next: absolute time of the next event which was scheduled
+ * via clock_set_next_event()
+ * @in_hrtirq: hrtimer_interrupt() is currently executing
+ * @hres_active: State of high resolution mode
+ * @hang_detected: The last hrtimer interrupt detected a hang
+ * @nr_events: Total number of hrtimer interrupt events
+ * @nr_retries: Total number of hrtimer interrupt retries
+ * @nr_hangs: Total number of hrtimer interrupt hangs
+ * @max_hang_time: Maximum time spent in hrtimer_interrupt
+ * @clock_base: array of clock bases for this cpu
+ */
+struct hrtimer_cpu_base {
+ raw_spinlock_t lock;
+ unsigned int cpu;
+ unsigned int active_bases;
+ unsigned int clock_was_set;
+#ifdef CONFIG_HIGH_RES_TIMERS
+ ktime_t expires_next;
+ int in_hrtirq;
+ int hres_active;
+ int hang_detected;
+ unsigned long nr_events;
+ unsigned long nr_retries;
+ unsigned long nr_hangs;
+ ktime_t max_hang_time;
+#endif
+ struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
+};
+
+static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
+{
+ timer->node.expires = time;
+ timer->_softexpires = time;
+}
+
+static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta)
+{
+ timer->_softexpires = time;
+ timer->node.expires = ktime_add_safe(time, delta);
+}
+
+static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta)
+{
+ timer->_softexpires = time;
+ timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
+}
+
+static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
+{
+ timer->node.expires.tv64 = tv64;
+ timer->_softexpires.tv64 = tv64;
+}
+
+static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
+{
+ timer->node.expires = ktime_add_safe(timer->node.expires, time);
+ timer->_softexpires = ktime_add_safe(timer->_softexpires, time);
+}
+
+static inline void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns)
+{
+ timer->node.expires = ktime_add_ns(timer->node.expires, ns);
+ timer->_softexpires = ktime_add_ns(timer->_softexpires, ns);
+}
+
+static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer)
+{
+ return timer->node.expires;
+}
+
+static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
+{
+ return timer->_softexpires;
+}
+
+static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
+{
+ return timer->node.expires.tv64;
+}
+static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
+{
+ return timer->_softexpires.tv64;
+}
+
+static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
+{
+ return ktime_to_ns(timer->node.expires);
+}
+
+static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
+{
+ return ktime_sub(timer->node.expires, timer->base->get_time());
+}
+
+#ifdef CONFIG_HIGH_RES_TIMERS
+struct clock_event_device;
+
+extern void hrtimer_interrupt(struct clock_event_device *dev);
+
+/*
+ * In high resolution mode the time reference must be read accurate
+ */
+static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
+{
+ return timer->base->get_time();
+}
+
+static inline int hrtimer_is_hres_active(struct hrtimer *timer)
+{
+ return timer->base->cpu_base->hres_active;
+}
+
+extern void hrtimer_peek_ahead_timers(void);
+
+/*
+ * The resolution of the clocks. The resolution value is returned in
+ * the clock_getres() system call to give application programmers an
+ * idea of the (in)accuracy of timers. Timer values are rounded up to
+ * this resolution values.
+ */
+# define HIGH_RES_NSEC 1
+# define KTIME_HIGH_RES (ktime_t) { .tv64 = HIGH_RES_NSEC }
+# define MONOTONIC_RES_NSEC HIGH_RES_NSEC
+# define KTIME_MONOTONIC_RES KTIME_HIGH_RES
+
+extern void clock_was_set_delayed(void);
+
+#else
+
+# define MONOTONIC_RES_NSEC LOW_RES_NSEC
+# define KTIME_MONOTONIC_RES KTIME_LOW_RES
+
+static inline void hrtimer_peek_ahead_timers(void) { }
+
+/*
+ * In non high resolution mode the time reference is taken from
+ * the base softirq time variable.
+ */
+static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
+{
+ return timer->base->softirq_time;
+}
+
+static inline int hrtimer_is_hres_active(struct hrtimer *timer)
+{
+ return 0;
+}
+
+static inline void clock_was_set_delayed(void) { }
+
+#endif
+
+extern void clock_was_set(void);
+#ifdef CONFIG_TIMERFD
+extern void timerfd_clock_was_set(void);
+#else
+static inline void timerfd_clock_was_set(void) { }
+#endif
+extern void hrtimers_resume(void);
+
+DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
+
+
+/* Exported timer functions: */
+
+/* Initialize timers: */
+extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
+ enum hrtimer_mode mode);
+
+#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
+extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock,
+ enum hrtimer_mode mode);
+
+extern void destroy_hrtimer_on_stack(struct hrtimer *timer);
+#else
+static inline void hrtimer_init_on_stack(struct hrtimer *timer,
+ clockid_t which_clock,
+ enum hrtimer_mode mode)
+{
+ hrtimer_init(timer, which_clock, mode);
+}
+static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
+#endif
+
+/* Basic timer operations: */
+extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
+ const enum hrtimer_mode mode);
+extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ unsigned long range_ns, const enum hrtimer_mode mode);
+extern int
+__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ unsigned long delta_ns,
+ const enum hrtimer_mode mode, int wakeup);
+
+extern int hrtimer_cancel(struct hrtimer *timer);
+extern int hrtimer_try_to_cancel(struct hrtimer *timer);
+
+static inline int hrtimer_start_expires(struct hrtimer *timer,
+ enum hrtimer_mode mode)
+{
+ unsigned long delta;
+ ktime_t soft, hard;
+ soft = hrtimer_get_softexpires(timer);
+ hard = hrtimer_get_expires(timer);
+ delta = ktime_to_ns(ktime_sub(hard, soft));
+ return hrtimer_start_range_ns(timer, soft, delta, mode);
+}
+
+static inline int hrtimer_restart(struct hrtimer *timer)
+{
+ return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
+}
+
+/* Query timers: */
+extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
+extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
+
+extern ktime_t hrtimer_get_next_event(void);
+
+/*
+ * A timer is active, when it is enqueued into the rbtree or the
+ * callback function is running or it's in the state of being migrated
+ * to another cpu.
+ */
+static inline int hrtimer_active(const struct hrtimer *timer)
+{
+ return timer->state != HRTIMER_STATE_INACTIVE;
+}
+
+/*
+ * Helper function to check, whether the timer is on one of the queues
+ */
+static inline int hrtimer_is_queued(struct hrtimer *timer)
+{
+ return timer->state & HRTIMER_STATE_ENQUEUED;
+}
+
+/*
+ * Helper function to check, whether the timer is running the callback
+ * function
+ */
+static inline int hrtimer_callback_running(struct hrtimer *timer)
+{
+ return timer->state & HRTIMER_STATE_CALLBACK;
+}
+
+/* Forward a hrtimer so it expires after now: */
+extern u64
+hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
+
+/* Forward a hrtimer so it expires after the hrtimer's current now */
+static inline u64 hrtimer_forward_now(struct hrtimer *timer,
+ ktime_t interval)
+{
+ return hrtimer_forward(timer, timer->base->get_time(), interval);
+}
+
+/* Precise sleep: */
+extern long hrtimer_nanosleep(struct timespec *rqtp,
+ struct timespec __user *rmtp,
+ const enum hrtimer_mode mode,
+ const clockid_t clockid);
+extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
+
+extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
+ struct task_struct *tsk);
+
+extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
+ const enum hrtimer_mode mode);
+extern int schedule_hrtimeout_range_clock(ktime_t *expires,
+ unsigned long delta, const enum hrtimer_mode mode, int clock);
+extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
+
+/* Soft interrupt function to run the hrtimer queues: */
+extern void hrtimer_run_queues(void);
+extern void hrtimer_run_pending(void);
+
+/* Bootup initialization: */
+extern void __init hrtimers_init(void);
+
+/* Show pending timers: */
+extern void sysrq_timer_list_show(void);
+
+#endif
diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h
new file mode 100644
index 000000000..5dd60c2e1
--- /dev/null
+++ b/include/linux/hsi/hsi.h
@@ -0,0 +1,444 @@
+/*
+ * HSI core header file.
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_HSI_H__
+#define __LINUX_HSI_H__
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+
+/* HSI message ttype */
+#define HSI_MSG_READ 0
+#define HSI_MSG_WRITE 1
+
+/* HSI configuration values */
+enum {
+ HSI_MODE_STREAM = 1,
+ HSI_MODE_FRAME,
+};
+
+enum {
+ HSI_FLOW_SYNC, /* Synchronized flow */
+ HSI_FLOW_PIPE, /* Pipelined flow */
+};
+
+enum {
+ HSI_ARB_RR, /* Round-robin arbitration */
+ HSI_ARB_PRIO, /* Channel priority arbitration */
+};
+
+#define HSI_MAX_CHANNELS 16
+
+/* HSI message status codes */
+enum {
+ HSI_STATUS_COMPLETED, /* Message transfer is completed */
+ HSI_STATUS_PENDING, /* Message pending to be read/write (POLL) */
+ HSI_STATUS_PROCEEDING, /* Message transfer is ongoing */
+ HSI_STATUS_QUEUED, /* Message waiting to be served */
+ HSI_STATUS_ERROR, /* Error when message transfer was ongoing */
+};
+
+/* HSI port event codes */
+enum {
+ HSI_EVENT_START_RX,
+ HSI_EVENT_STOP_RX,
+};
+
+/**
+ * struct hsi_channel - channel resource used by the hsi clients
+ * @id: Channel number
+ * @name: Channel name
+ */
+struct hsi_channel {
+ unsigned int id;
+ const char *name;
+};
+
+/**
+ * struct hsi_config - Configuration for RX/TX HSI modules
+ * @mode: Bit transmission mode (STREAM or FRAME)
+ * @channels: Channel resources used by the client
+ * @num_channels: Number of channel resources
+ * @num_hw_channels: Number of channels the transceiver is configured for [1..16]
+ * @speed: Max bit transmission speed (Kbit/s)
+ * @flow: RX flow type (SYNCHRONIZED or PIPELINE)
+ * @arb_mode: Arbitration mode for TX frame (Round robin, priority)
+ */
+struct hsi_config {
+ unsigned int mode;
+ struct hsi_channel *channels;
+ unsigned int num_channels;
+ unsigned int num_hw_channels;
+ unsigned int speed;
+ union {
+ unsigned int flow; /* RX only */
+ unsigned int arb_mode; /* TX only */
+ };
+};
+
+/**
+ * struct hsi_board_info - HSI client board info
+ * @name: Name for the HSI device
+ * @hsi_id: HSI controller id where the client sits
+ * @port: Port number in the controller where the client sits
+ * @tx_cfg: HSI TX configuration
+ * @rx_cfg: HSI RX configuration
+ * @platform_data: Platform related data
+ * @archdata: Architecture-dependent device data
+ */
+struct hsi_board_info {
+ const char *name;
+ unsigned int hsi_id;
+ unsigned int port;
+ struct hsi_config tx_cfg;
+ struct hsi_config rx_cfg;
+ void *platform_data;
+ struct dev_archdata *archdata;
+};
+
+#ifdef CONFIG_HSI_BOARDINFO
+extern int hsi_register_board_info(struct hsi_board_info const *info,
+ unsigned int len);
+#else
+static inline int hsi_register_board_info(struct hsi_board_info const *info,
+ unsigned int len)
+{
+ return 0;
+}
+#endif /* CONFIG_HSI_BOARDINFO */
+
+/**
+ * struct hsi_client - HSI client attached to an HSI port
+ * @device: Driver model representation of the device
+ * @tx_cfg: HSI TX configuration
+ * @rx_cfg: HSI RX configuration
+ * @e_handler: Callback for handling port events (RX Wake High/Low)
+ * @pclaimed: Keeps tracks if the clients claimed its associated HSI port
+ * @nb: Notifier block for port events
+ */
+struct hsi_client {
+ struct device device;
+ struct hsi_config tx_cfg;
+ struct hsi_config rx_cfg;
+ /* private: */
+ void (*ehandler)(struct hsi_client *, unsigned long);
+ unsigned int pclaimed:1;
+ struct notifier_block nb;
+};
+
+#define to_hsi_client(dev) container_of(dev, struct hsi_client, device)
+
+static inline void hsi_client_set_drvdata(struct hsi_client *cl, void *data)
+{
+ dev_set_drvdata(&cl->device, data);
+}
+
+static inline void *hsi_client_drvdata(struct hsi_client *cl)
+{
+ return dev_get_drvdata(&cl->device);
+}
+
+int hsi_register_port_event(struct hsi_client *cl,
+ void (*handler)(struct hsi_client *, unsigned long));
+int hsi_unregister_port_event(struct hsi_client *cl);
+
+/**
+ * struct hsi_client_driver - Driver associated to an HSI client
+ * @driver: Driver model representation of the driver
+ */
+struct hsi_client_driver {
+ struct device_driver driver;
+};
+
+#define to_hsi_client_driver(drv) container_of(drv, struct hsi_client_driver,\
+ driver)
+
+int hsi_register_client_driver(struct hsi_client_driver *drv);
+
+static inline void hsi_unregister_client_driver(struct hsi_client_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+
+/**
+ * struct hsi_msg - HSI message descriptor
+ * @link: Free to use by the current descriptor owner
+ * @cl: HSI device client that issues the transfer
+ * @sgt: Head of the scatterlist array
+ * @context: Client context data associated to the transfer
+ * @complete: Transfer completion callback
+ * @destructor: Destructor to free resources when flushing
+ * @status: Status of the transfer when completed
+ * @actual_len: Actual length of data transferred on completion
+ * @channel: Channel were to TX/RX the message
+ * @ttype: Transfer type (TX if set, RX otherwise)
+ * @break_frame: if true HSI will send/receive a break frame. Data buffers are
+ * ignored in the request.
+ */
+struct hsi_msg {
+ struct list_head link;
+ struct hsi_client *cl;
+ struct sg_table sgt;
+ void *context;
+
+ void (*complete)(struct hsi_msg *msg);
+ void (*destructor)(struct hsi_msg *msg);
+
+ int status;
+ unsigned int actual_len;
+ unsigned int channel;
+ unsigned int ttype:1;
+ unsigned int break_frame:1;
+};
+
+struct hsi_msg *hsi_alloc_msg(unsigned int n_frag, gfp_t flags);
+void hsi_free_msg(struct hsi_msg *msg);
+
+/**
+ * struct hsi_port - HSI port device
+ * @device: Driver model representation of the device
+ * @tx_cfg: Current TX path configuration
+ * @rx_cfg: Current RX path configuration
+ * @num: Port number
+ * @shared: Set when port can be shared by different clients
+ * @claimed: Reference count of clients which claimed the port
+ * @lock: Serialize port claim
+ * @async: Asynchronous transfer callback
+ * @setup: Callback to set the HSI client configuration
+ * @flush: Callback to clean the HW state and destroy all pending transfers
+ * @start_tx: Callback to inform that a client wants to TX data
+ * @stop_tx: Callback to inform that a client no longer wishes to TX data
+ * @release: Callback to inform that a client no longer uses the port
+ * @n_head: Notifier chain for signaling port events to the clients.
+ */
+struct hsi_port {
+ struct device device;
+ struct hsi_config tx_cfg;
+ struct hsi_config rx_cfg;
+ unsigned int num;
+ unsigned int shared:1;
+ int claimed;
+ struct mutex lock;
+ int (*async)(struct hsi_msg *msg);
+ int (*setup)(struct hsi_client *cl);
+ int (*flush)(struct hsi_client *cl);
+ int (*start_tx)(struct hsi_client *cl);
+ int (*stop_tx)(struct hsi_client *cl);
+ int (*release)(struct hsi_client *cl);
+ /* private */
+ struct atomic_notifier_head n_head;
+};
+
+#define to_hsi_port(dev) container_of(dev, struct hsi_port, device)
+#define hsi_get_port(cl) to_hsi_port((cl)->device.parent)
+
+int hsi_event(struct hsi_port *port, unsigned long event);
+int hsi_claim_port(struct hsi_client *cl, unsigned int share);
+void hsi_release_port(struct hsi_client *cl);
+
+static inline int hsi_port_claimed(struct hsi_client *cl)
+{
+ return cl->pclaimed;
+}
+
+static inline void hsi_port_set_drvdata(struct hsi_port *port, void *data)
+{
+ dev_set_drvdata(&port->device, data);
+}
+
+static inline void *hsi_port_drvdata(struct hsi_port *port)
+{
+ return dev_get_drvdata(&port->device);
+}
+
+/**
+ * struct hsi_controller - HSI controller device
+ * @device: Driver model representation of the device
+ * @owner: Pointer to the module owning the controller
+ * @id: HSI controller ID
+ * @num_ports: Number of ports in the HSI controller
+ * @port: Array of HSI ports
+ */
+struct hsi_controller {
+ struct device device;
+ struct module *owner;
+ unsigned int id;
+ unsigned int num_ports;
+ struct hsi_port **port;
+};
+
+#define to_hsi_controller(dev) container_of(dev, struct hsi_controller, device)
+
+struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags);
+void hsi_put_controller(struct hsi_controller *hsi);
+int hsi_register_controller(struct hsi_controller *hsi);
+void hsi_unregister_controller(struct hsi_controller *hsi);
+struct hsi_client *hsi_new_client(struct hsi_port *port,
+ struct hsi_board_info *info);
+int hsi_remove_client(struct device *dev, void *data);
+void hsi_port_unregister_clients(struct hsi_port *port);
+
+#ifdef CONFIG_OF
+void hsi_add_clients_from_dt(struct hsi_port *port,
+ struct device_node *clients);
+#else
+static inline void hsi_add_clients_from_dt(struct hsi_port *port,
+ struct device_node *clients)
+{
+ return;
+}
+#endif
+
+static inline void hsi_controller_set_drvdata(struct hsi_controller *hsi,
+ void *data)
+{
+ dev_set_drvdata(&hsi->device, data);
+}
+
+static inline void *hsi_controller_drvdata(struct hsi_controller *hsi)
+{
+ return dev_get_drvdata(&hsi->device);
+}
+
+static inline struct hsi_port *hsi_find_port_num(struct hsi_controller *hsi,
+ unsigned int num)
+{
+ return (num < hsi->num_ports) ? hsi->port[num] : NULL;
+}
+
+/*
+ * API for HSI clients
+ */
+int hsi_async(struct hsi_client *cl, struct hsi_msg *msg);
+
+int hsi_get_channel_id_by_name(struct hsi_client *cl, char *name);
+
+/**
+ * hsi_id - Get HSI controller ID associated to a client
+ * @cl: Pointer to a HSI client
+ *
+ * Return the controller id where the client is attached to
+ */
+static inline unsigned int hsi_id(struct hsi_client *cl)
+{
+ return to_hsi_controller(cl->device.parent->parent)->id;
+}
+
+/**
+ * hsi_port_id - Gets the port number a client is attached to
+ * @cl: Pointer to HSI client
+ *
+ * Return the port number associated to the client
+ */
+static inline unsigned int hsi_port_id(struct hsi_client *cl)
+{
+ return to_hsi_port(cl->device.parent)->num;
+}
+
+/**
+ * hsi_setup - Configure the client's port
+ * @cl: Pointer to the HSI client
+ *
+ * When sharing ports, clients should either relay on a single
+ * client setup or have the same setup for all of them.
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_setup(struct hsi_client *cl)
+{
+ if (!hsi_port_claimed(cl))
+ return -EACCES;
+ return hsi_get_port(cl)->setup(cl);
+}
+
+/**
+ * hsi_flush - Flush all pending transactions on the client's port
+ * @cl: Pointer to the HSI client
+ *
+ * This function will destroy all pending hsi_msg in the port and reset
+ * the HW port so it is ready to receive and transmit from a clean state.
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_flush(struct hsi_client *cl)
+{
+ if (!hsi_port_claimed(cl))
+ return -EACCES;
+ return hsi_get_port(cl)->flush(cl);
+}
+
+/**
+ * hsi_async_read - Submit a read transfer
+ * @cl: Pointer to the HSI client
+ * @msg: HSI message descriptor of the transfer
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_async_read(struct hsi_client *cl, struct hsi_msg *msg)
+{
+ msg->ttype = HSI_MSG_READ;
+ return hsi_async(cl, msg);
+}
+
+/**
+ * hsi_async_write - Submit a write transfer
+ * @cl: Pointer to the HSI client
+ * @msg: HSI message descriptor of the transfer
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_async_write(struct hsi_client *cl, struct hsi_msg *msg)
+{
+ msg->ttype = HSI_MSG_WRITE;
+ return hsi_async(cl, msg);
+}
+
+/**
+ * hsi_start_tx - Signal the port that the client wants to start a TX
+ * @cl: Pointer to the HSI client
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_start_tx(struct hsi_client *cl)
+{
+ if (!hsi_port_claimed(cl))
+ return -EACCES;
+ return hsi_get_port(cl)->start_tx(cl);
+}
+
+/**
+ * hsi_stop_tx - Signal the port that the client no longer wants to transmit
+ * @cl: Pointer to the HSI client
+ *
+ * Return -errno on failure, 0 on success
+ */
+static inline int hsi_stop_tx(struct hsi_client *cl)
+{
+ if (!hsi_port_claimed(cl))
+ return -EACCES;
+ return hsi_get_port(cl)->stop_tx(cl);
+}
+#endif /* __LINUX_HSI_H__ */
diff --git a/include/linux/hsi/ssi_protocol.h b/include/linux/hsi/ssi_protocol.h
new file mode 100644
index 000000000..1433651be
--- /dev/null
+++ b/include/linux/hsi/ssi_protocol.h
@@ -0,0 +1,42 @@
+/*
+ * ssip_slave.h
+ *
+ * SSIP slave support header file
+ *
+ * Copyright (C) 2010 Nokia Corporation. All rights reserved.
+ *
+ * Contact: Carlos Chinea <carlos.chinea@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_SSIP_SLAVE_H__
+#define __LINUX_SSIP_SLAVE_H__
+
+#include <linux/hsi/hsi.h>
+
+static inline void ssip_slave_put_master(struct hsi_client *master)
+{
+}
+
+struct hsi_client *ssip_slave_get_master(struct hsi_client *slave);
+int ssip_slave_start_tx(struct hsi_client *master);
+int ssip_slave_stop_tx(struct hsi_client *master);
+void ssip_reset_event(struct hsi_client *master);
+
+int ssip_slave_running(struct hsi_client *master);
+
+#endif /* __LINUX_SSIP_SLAVE_H__ */
+
diff --git a/include/linux/htcpld.h b/include/linux/htcpld.h
new file mode 100644
index 000000000..ab3f6cb4d
--- /dev/null
+++ b/include/linux/htcpld.h
@@ -0,0 +1,24 @@
+#ifndef __LINUX_HTCPLD_H
+#define __LINUX_HTCPLD_H
+
+struct htcpld_chip_platform_data {
+ unsigned int addr;
+ unsigned int reset;
+ unsigned int num_gpios;
+ unsigned int gpio_out_base;
+ unsigned int gpio_in_base;
+ unsigned int irq_base;
+ unsigned int num_irqs;
+};
+
+struct htcpld_core_platform_data {
+ unsigned int int_reset_gpio_hi;
+ unsigned int int_reset_gpio_lo;
+ unsigned int i2c_adapter_id;
+
+ struct htcpld_chip_platform_data *chip;
+ unsigned int num_chip;
+};
+
+#endif /* __LINUX_HTCPLD_H */
+
diff --git a/include/linux/htirq.h b/include/linux/htirq.h
new file mode 100644
index 000000000..70a1dbbf2
--- /dev/null
+++ b/include/linux/htirq.h
@@ -0,0 +1,24 @@
+#ifndef LINUX_HTIRQ_H
+#define LINUX_HTIRQ_H
+
+struct ht_irq_msg {
+ u32 address_lo; /* low 32 bits of the ht irq message */
+ u32 address_hi; /* high 32 bits of the it irq message */
+};
+
+/* Helper functions.. */
+void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
+void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
+struct irq_data;
+void mask_ht_irq(struct irq_data *data);
+void unmask_ht_irq(struct irq_data *data);
+
+/* The arch hook for getting things started */
+int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev);
+
+/* For drivers of buggy hardware */
+typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
+ struct ht_irq_msg *msg);
+int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update);
+
+#endif /* LINUX_HTIRQ_H */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
new file mode 100644
index 000000000..f10b20f05
--- /dev/null
+++ b/include/linux/huge_mm.h
@@ -0,0 +1,223 @@
+#ifndef _LINUX_HUGE_MM_H
+#define _LINUX_HUGE_MM_H
+
+extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmd,
+ unsigned int flags);
+extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
+ pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
+ struct vm_area_struct *vma);
+extern void huge_pmd_set_accessed(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmd,
+ pmd_t orig_pmd, int dirty);
+extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmd,
+ pmd_t orig_pmd);
+extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
+ unsigned long addr,
+ pmd_t *pmd,
+ unsigned int flags);
+extern int zap_huge_pmd(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
+ pmd_t *pmd, unsigned long addr);
+extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ unsigned char *vec);
+extern int move_huge_pmd(struct vm_area_struct *vma,
+ struct vm_area_struct *new_vma,
+ unsigned long old_addr,
+ unsigned long new_addr, unsigned long old_end,
+ pmd_t *old_pmd, pmd_t *new_pmd);
+extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr, pgprot_t newprot,
+ int prot_numa);
+
+enum transparent_hugepage_flag {
+ TRANSPARENT_HUGEPAGE_FLAG,
+ TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
+ TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
+ TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
+ TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
+ TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
+#ifdef CONFIG_DEBUG_VM
+ TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
+#endif
+};
+
+enum page_check_address_pmd_flag {
+ PAGE_CHECK_ADDRESS_PMD_FLAG,
+ PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG,
+ PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG,
+};
+extern pmd_t *page_check_address_pmd(struct page *page,
+ struct mm_struct *mm,
+ unsigned long address,
+ enum page_check_address_pmd_flag flag,
+ spinlock_t **ptl);
+
+#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
+#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define HPAGE_PMD_SHIFT PMD_SHIFT
+#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
+#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
+
+extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
+
+#define transparent_hugepage_enabled(__vma) \
+ ((transparent_hugepage_flags & \
+ (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
+ (transparent_hugepage_flags & \
+ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
+ ((__vma)->vm_flags & VM_HUGEPAGE))) && \
+ !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
+ !is_vma_temporary_stack(__vma))
+#define transparent_hugepage_defrag(__vma) \
+ ((transparent_hugepage_flags & \
+ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
+ (transparent_hugepage_flags & \
+ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) && \
+ (__vma)->vm_flags & VM_HUGEPAGE))
+#define transparent_hugepage_use_zero_page() \
+ (transparent_hugepage_flags & \
+ (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
+#ifdef CONFIG_DEBUG_VM
+#define transparent_hugepage_debug_cow() \
+ (transparent_hugepage_flags & \
+ (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
+#else /* CONFIG_DEBUG_VM */
+#define transparent_hugepage_debug_cow() 0
+#endif /* CONFIG_DEBUG_VM */
+
+extern unsigned long transparent_hugepage_flags;
+extern int split_huge_page_to_list(struct page *page, struct list_head *list);
+static inline int split_huge_page(struct page *page)
+{
+ return split_huge_page_to_list(page, NULL);
+}
+extern void __split_huge_page_pmd(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmd);
+#define split_huge_page_pmd(__vma, __address, __pmd) \
+ do { \
+ pmd_t *____pmd = (__pmd); \
+ if (unlikely(pmd_trans_huge(*____pmd))) \
+ __split_huge_page_pmd(__vma, __address, \
+ ____pmd); \
+ } while (0)
+#define wait_split_huge_page(__anon_vma, __pmd) \
+ do { \
+ pmd_t *____pmd = (__pmd); \
+ anon_vma_lock_write(__anon_vma); \
+ anon_vma_unlock_write(__anon_vma); \
+ BUG_ON(pmd_trans_splitting(*____pmd) || \
+ pmd_trans_huge(*____pmd)); \
+ } while (0)
+extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
+ pmd_t *pmd);
+#if HPAGE_PMD_ORDER >= MAX_ORDER
+#error "hugepages can't be allocated by the buddy allocator"
+#endif
+extern int hugepage_madvise(struct vm_area_struct *vma,
+ unsigned long *vm_flags, int advice);
+extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end,
+ long adjust_next);
+extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
+ spinlock_t **ptl);
+/* mmap_sem must be held on entry */
+static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
+ spinlock_t **ptl)
+{
+ VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
+ if (pmd_trans_huge(*pmd))
+ return __pmd_trans_huge_lock(pmd, vma, ptl);
+ else
+ return 0;
+}
+static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end,
+ long adjust_next)
+{
+ if (!vma->anon_vma || vma->vm_ops)
+ return;
+ __vma_adjust_trans_huge(vma, start, end, adjust_next);
+}
+static inline int hpage_nr_pages(struct page *page)
+{
+ if (unlikely(PageTransHuge(page)))
+ return HPAGE_PMD_NR;
+ return 1;
+}
+
+extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pmd_t pmd, pmd_t *pmdp);
+
+extern struct page *huge_zero_page;
+
+static inline bool is_huge_zero_page(struct page *page)
+{
+ return ACCESS_ONCE(huge_zero_page) == page;
+}
+
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
+#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
+#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
+
+#define hpage_nr_pages(x) 1
+
+#define transparent_hugepage_enabled(__vma) 0
+
+#define transparent_hugepage_flags 0UL
+static inline int
+split_huge_page_to_list(struct page *page, struct list_head *list)
+{
+ return 0;
+}
+static inline int split_huge_page(struct page *page)
+{
+ return 0;
+}
+#define split_huge_page_pmd(__vma, __address, __pmd) \
+ do { } while (0)
+#define wait_split_huge_page(__anon_vma, __pmd) \
+ do { } while (0)
+#define split_huge_page_pmd_mm(__mm, __address, __pmd) \
+ do { } while (0)
+static inline int hugepage_madvise(struct vm_area_struct *vma,
+ unsigned long *vm_flags, int advice)
+{
+ BUG();
+ return 0;
+}
+static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end,
+ long adjust_next)
+{
+}
+static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
+ spinlock_t **ptl)
+{
+ return 0;
+}
+
+static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pmd_t pmd, pmd_t *pmdp)
+{
+ return 0;
+}
+
+static inline bool is_huge_zero_page(struct page *page)
+{
+ return false;
+}
+
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#endif /* _LINUX_HUGE_MM_H */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
new file mode 100644
index 000000000..205026175
--- /dev/null
+++ b/include/linux/hugetlb.h
@@ -0,0 +1,520 @@
+#ifndef _LINUX_HUGETLB_H
+#define _LINUX_HUGETLB_H
+
+#include <linux/mm_types.h>
+#include <linux/mmdebug.h>
+#include <linux/fs.h>
+#include <linux/hugetlb_inline.h>
+#include <linux/cgroup.h>
+#include <linux/list.h>
+#include <linux/kref.h>
+
+struct ctl_table;
+struct user_struct;
+struct mmu_gather;
+
+#ifdef CONFIG_HUGETLB_PAGE
+
+#include <linux/mempolicy.h>
+#include <linux/shm.h>
+#include <asm/tlbflush.h>
+
+struct hugepage_subpool {
+ spinlock_t lock;
+ long count;
+ long max_hpages; /* Maximum huge pages or -1 if no maximum. */
+ long used_hpages; /* Used count against maximum, includes */
+ /* both alloced and reserved pages. */
+ struct hstate *hstate;
+ long min_hpages; /* Minimum huge pages or -1 if no minimum. */
+ long rsv_hpages; /* Pages reserved against global pool to */
+ /* sasitfy minimum size. */
+};
+
+struct resv_map {
+ struct kref refs;
+ spinlock_t lock;
+ struct list_head regions;
+};
+extern struct resv_map *resv_map_alloc(void);
+void resv_map_release(struct kref *ref);
+
+extern spinlock_t hugetlb_lock;
+extern int hugetlb_max_hstate __read_mostly;
+#define for_each_hstate(h) \
+ for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
+
+struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
+ long min_hpages);
+void hugepage_put_subpool(struct hugepage_subpool *spool);
+
+void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
+int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
+int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
+int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
+
+#ifdef CONFIG_NUMA
+int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+#endif
+
+int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
+long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
+ struct page **, struct vm_area_struct **,
+ unsigned long *, unsigned long *, long, unsigned int);
+void unmap_hugepage_range(struct vm_area_struct *,
+ unsigned long, unsigned long, struct page *);
+void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct page *ref_page);
+void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct page *ref_page);
+void hugetlb_report_meminfo(struct seq_file *);
+int hugetlb_report_node_meminfo(int, char *);
+void hugetlb_show_meminfo(void);
+unsigned long hugetlb_total_pages(void);
+int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags);
+int hugetlb_reserve_pages(struct inode *inode, long from, long to,
+ struct vm_area_struct *vma,
+ vm_flags_t vm_flags);
+void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
+int dequeue_hwpoisoned_huge_page(struct page *page);
+bool isolate_huge_page(struct page *page, struct list_head *list);
+void putback_active_hugepage(struct page *page);
+void free_huge_page(struct page *page);
+
+#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
+#endif
+
+extern int hugepages_treat_as_movable;
+extern int sysctl_hugetlb_shm_group;
+extern struct list_head huge_boot_pages;
+
+/* arch callbacks */
+
+pte_t *huge_pte_alloc(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz);
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
+struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+ int write);
+struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ pmd_t *pmd, int flags);
+struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
+ pud_t *pud, int flags);
+int pmd_huge(pmd_t pmd);
+int pud_huge(pud_t pmd);
+unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+ unsigned long address, unsigned long end, pgprot_t newprot);
+
+#else /* !CONFIG_HUGETLB_PAGE */
+
+static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
+{
+}
+
+static inline unsigned long hugetlb_total_pages(void)
+{
+ return 0;
+}
+
+#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
+#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
+#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
+static inline void hugetlb_report_meminfo(struct seq_file *m)
+{
+}
+#define hugetlb_report_node_meminfo(n, buf) 0
+static inline void hugetlb_show_meminfo(void)
+{
+}
+#define follow_huge_pmd(mm, addr, pmd, flags) NULL
+#define follow_huge_pud(mm, addr, pud, flags) NULL
+#define prepare_hugepage_range(file, addr, len) (-EINVAL)
+#define pmd_huge(x) 0
+#define pud_huge(x) 0
+#define is_hugepage_only_range(mm, addr, len) 0
+#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
+#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
+#define huge_pte_offset(mm, address) 0
+static inline int dequeue_hwpoisoned_huge_page(struct page *page)
+{
+ return 0;
+}
+
+static inline bool isolate_huge_page(struct page *page, struct list_head *list)
+{
+ return false;
+}
+#define putback_active_hugepage(p) do {} while (0)
+
+static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+ unsigned long address, unsigned long end, pgprot_t newprot)
+{
+ return 0;
+}
+
+static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, struct page *ref_page)
+{
+ BUG();
+}
+
+static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, struct page *ref_page)
+{
+ BUG();
+}
+
+#endif /* !CONFIG_HUGETLB_PAGE */
+/*
+ * hugepages at page global directory. If arch support
+ * hugepages at pgd level, they need to define this.
+ */
+#ifndef pgd_huge
+#define pgd_huge(x) 0
+#endif
+
+#ifndef pgd_write
+static inline int pgd_write(pgd_t pgd)
+{
+ BUG();
+ return 0;
+}
+#endif
+
+#ifndef pud_write
+static inline int pud_write(pud_t pud)
+{
+ BUG();
+ return 0;
+}
+#endif
+
+#ifndef is_hugepd
+/*
+ * Some architectures requires a hugepage directory format that is
+ * required to support multiple hugepage sizes. For example
+ * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
+ * introduced the same on powerpc. This allows for a more flexible hugepage
+ * pagetable layout.
+ */
+typedef struct { unsigned long pd; } hugepd_t;
+#define is_hugepd(hugepd) (0)
+#define __hugepd(x) ((hugepd_t) { (x) })
+static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
+ unsigned pdshift, unsigned long end,
+ int write, struct page **pages, int *nr)
+{
+ return 0;
+}
+#else
+extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
+ unsigned pdshift, unsigned long end,
+ int write, struct page **pages, int *nr);
+#endif
+
+#define HUGETLB_ANON_FILE "anon_hugepage"
+
+enum {
+ /*
+ * The file will be used as an shm file so shmfs accounting rules
+ * apply
+ */
+ HUGETLB_SHMFS_INODE = 1,
+ /*
+ * The file is being created on the internal vfs mount and shmfs
+ * accounting rules do not apply
+ */
+ HUGETLB_ANONHUGE_INODE = 2,
+};
+
+#ifdef CONFIG_HUGETLBFS
+struct hugetlbfs_sb_info {
+ long max_inodes; /* inodes allowed */
+ long free_inodes; /* inodes free */
+ spinlock_t stat_lock;
+ struct hstate *hstate;
+ struct hugepage_subpool *spool;
+};
+
+static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
+extern const struct file_operations hugetlbfs_file_operations;
+extern const struct vm_operations_struct hugetlb_vm_ops;
+struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
+ struct user_struct **user, int creat_flags,
+ int page_size_log);
+
+static inline int is_file_hugepages(struct file *file)
+{
+ if (file->f_op == &hugetlbfs_file_operations)
+ return 1;
+ if (is_file_shm_hugepages(file))
+ return 1;
+
+ return 0;
+}
+
+
+#else /* !CONFIG_HUGETLBFS */
+
+#define is_file_hugepages(file) 0
+static inline struct file *
+hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
+ struct user_struct **user, int creat_flags,
+ int page_size_log)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+#endif /* !CONFIG_HUGETLBFS */
+
+#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
+
+#ifdef CONFIG_HUGETLB_PAGE
+
+#define HSTATE_NAME_LEN 32
+/* Defines one hugetlb page size */
+struct hstate {
+ int next_nid_to_alloc;
+ int next_nid_to_free;
+ unsigned int order;
+ unsigned long mask;
+ unsigned long max_huge_pages;
+ unsigned long nr_huge_pages;
+ unsigned long free_huge_pages;
+ unsigned long resv_huge_pages;
+ unsigned long surplus_huge_pages;
+ unsigned long nr_overcommit_huge_pages;
+ struct list_head hugepage_activelist;
+ struct list_head hugepage_freelists[MAX_NUMNODES];
+ unsigned int nr_huge_pages_node[MAX_NUMNODES];
+ unsigned int free_huge_pages_node[MAX_NUMNODES];
+ unsigned int surplus_huge_pages_node[MAX_NUMNODES];
+#ifdef CONFIG_CGROUP_HUGETLB
+ /* cgroup control files */
+ struct cftype cgroup_files[5];
+#endif
+ char name[HSTATE_NAME_LEN];
+};
+
+struct huge_bootmem_page {
+ struct list_head list;
+ struct hstate *hstate;
+#ifdef CONFIG_HIGHMEM
+ phys_addr_t phys;
+#endif
+};
+
+struct page *alloc_huge_page_node(struct hstate *h, int nid);
+struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
+ unsigned long addr, int avoid_reserve);
+
+/* arch callback */
+int __init alloc_bootmem_huge_page(struct hstate *h);
+
+void __init hugetlb_add_hstate(unsigned order);
+struct hstate *size_to_hstate(unsigned long size);
+
+#ifndef HUGE_MAX_HSTATE
+#define HUGE_MAX_HSTATE 1
+#endif
+
+extern struct hstate hstates[HUGE_MAX_HSTATE];
+extern unsigned int default_hstate_idx;
+
+#define default_hstate (hstates[default_hstate_idx])
+
+static inline struct hstate *hstate_inode(struct inode *i)
+{
+ struct hugetlbfs_sb_info *hsb;
+ hsb = HUGETLBFS_SB(i->i_sb);
+ return hsb->hstate;
+}
+
+static inline struct hstate *hstate_file(struct file *f)
+{
+ return hstate_inode(file_inode(f));
+}
+
+static inline struct hstate *hstate_sizelog(int page_size_log)
+{
+ if (!page_size_log)
+ return &default_hstate;
+
+ return size_to_hstate(1UL << page_size_log);
+}
+
+static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
+{
+ return hstate_file(vma->vm_file);
+}
+
+static inline unsigned long huge_page_size(struct hstate *h)
+{
+ return (unsigned long)PAGE_SIZE << h->order;
+}
+
+extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
+
+extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
+
+static inline unsigned long huge_page_mask(struct hstate *h)
+{
+ return h->mask;
+}
+
+static inline unsigned int huge_page_order(struct hstate *h)
+{
+ return h->order;
+}
+
+static inline unsigned huge_page_shift(struct hstate *h)
+{
+ return h->order + PAGE_SHIFT;
+}
+
+static inline bool hstate_is_gigantic(struct hstate *h)
+{
+ return huge_page_order(h) >= MAX_ORDER;
+}
+
+static inline unsigned int pages_per_huge_page(struct hstate *h)
+{
+ return 1 << h->order;
+}
+
+static inline unsigned int blocks_per_huge_page(struct hstate *h)
+{
+ return huge_page_size(h) / 512;
+}
+
+#include <asm/hugetlb.h>
+
+#ifndef arch_make_huge_pte
+static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+ struct page *page, int writable)
+{
+ return entry;
+}
+#endif
+
+static inline struct hstate *page_hstate(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageHuge(page), page);
+ return size_to_hstate(PAGE_SIZE << compound_order(page));
+}
+
+static inline unsigned hstate_index_to_shift(unsigned index)
+{
+ return hstates[index].order + PAGE_SHIFT;
+}
+
+static inline int hstate_index(struct hstate *h)
+{
+ return h - hstates;
+}
+
+pgoff_t __basepage_index(struct page *page);
+
+/* Return page->index in PAGE_SIZE units */
+static inline pgoff_t basepage_index(struct page *page)
+{
+ if (!PageCompound(page))
+ return page->index;
+
+ return __basepage_index(page);
+}
+
+extern void dissolve_free_huge_pages(unsigned long start_pfn,
+ unsigned long end_pfn);
+static inline int hugepage_migration_supported(struct hstate *h)
+{
+#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+ return huge_page_shift(h) == PMD_SHIFT;
+#else
+ return 0;
+#endif
+}
+
+static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
+ struct mm_struct *mm, pte_t *pte)
+{
+ if (huge_page_size(h) == PMD_SIZE)
+ return pmd_lockptr(mm, (pmd_t *) pte);
+ VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
+ return &mm->page_table_lock;
+}
+
+static inline bool hugepages_supported(void)
+{
+ /*
+ * Some platform decide whether they support huge pages at boot
+ * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
+ * there is no such support
+ */
+ return HPAGE_SHIFT != 0;
+}
+
+#else /* CONFIG_HUGETLB_PAGE */
+struct hstate {};
+#define alloc_huge_page_node(h, nid) NULL
+#define alloc_huge_page_noerr(v, a, r) NULL
+#define alloc_bootmem_huge_page(h) NULL
+#define hstate_file(f) NULL
+#define hstate_sizelog(s) NULL
+#define hstate_vma(v) NULL
+#define hstate_inode(i) NULL
+#define page_hstate(page) NULL
+#define huge_page_size(h) PAGE_SIZE
+#define huge_page_mask(h) PAGE_MASK
+#define vma_kernel_pagesize(v) PAGE_SIZE
+#define vma_mmu_pagesize(v) PAGE_SIZE
+#define huge_page_order(h) 0
+#define huge_page_shift(h) PAGE_SHIFT
+static inline unsigned int pages_per_huge_page(struct hstate *h)
+{
+ return 1;
+}
+#define hstate_index_to_shift(index) 0
+#define hstate_index(h) 0
+
+static inline pgoff_t basepage_index(struct page *page)
+{
+ return page->index;
+}
+#define dissolve_free_huge_pages(s, e) do {} while (0)
+#define hugepage_migration_supported(h) 0
+
+static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
+ struct mm_struct *mm, pte_t *pte)
+{
+ return &mm->page_table_lock;
+}
+#endif /* CONFIG_HUGETLB_PAGE */
+
+static inline spinlock_t *huge_pte_lock(struct hstate *h,
+ struct mm_struct *mm, pte_t *pte)
+{
+ spinlock_t *ptl;
+
+ ptl = huge_pte_lockptr(h, mm, pte);
+ spin_lock(ptl);
+ return ptl;
+}
+
+#endif /* _LINUX_HUGETLB_H */
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
new file mode 100644
index 000000000..bcc853ecc
--- /dev/null
+++ b/include/linux/hugetlb_cgroup.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright IBM Corporation, 2012
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+
+#ifndef _LINUX_HUGETLB_CGROUP_H
+#define _LINUX_HUGETLB_CGROUP_H
+
+#include <linux/mmdebug.h>
+
+struct hugetlb_cgroup;
+/*
+ * Minimum page order trackable by hugetlb cgroup.
+ * At least 3 pages are necessary for all the tracking information.
+ */
+#define HUGETLB_CGROUP_MIN_ORDER 2
+
+#ifdef CONFIG_CGROUP_HUGETLB
+
+static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageHuge(page), page);
+
+ if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
+ return NULL;
+ return (struct hugetlb_cgroup *)page[2].lru.next;
+}
+
+static inline
+int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
+{
+ VM_BUG_ON_PAGE(!PageHuge(page), page);
+
+ if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
+ return -1;
+ page[2].lru.next = (void *)h_cg;
+ return 0;
+}
+
+static inline bool hugetlb_cgroup_disabled(void)
+{
+ if (hugetlb_cgrp_subsys.disabled)
+ return true;
+ return false;
+}
+
+extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr);
+extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page);
+extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
+ struct page *page);
+extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg);
+extern void hugetlb_cgroup_file_init(void) __init;
+extern void hugetlb_cgroup_migrate(struct page *oldhpage,
+ struct page *newhpage);
+
+#else
+static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
+{
+ return NULL;
+}
+
+static inline
+int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
+{
+ return 0;
+}
+
+static inline bool hugetlb_cgroup_disabled(void)
+{
+ return true;
+}
+
+static inline int
+hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup **ptr)
+{
+ return 0;
+}
+
+static inline void
+hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg,
+ struct page *page)
+{
+ return;
+}
+
+static inline void
+hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page)
+{
+ return;
+}
+
+static inline void
+hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
+ struct hugetlb_cgroup *h_cg)
+{
+ return;
+}
+
+static inline void hugetlb_cgroup_file_init(void)
+{
+}
+
+static inline void hugetlb_cgroup_migrate(struct page *oldhpage,
+ struct page *newhpage)
+{
+ return;
+}
+
+#endif /* CONFIG_MEM_RES_CTLR_HUGETLB */
+#endif
diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h
new file mode 100644
index 000000000..2bb681fbe
--- /dev/null
+++ b/include/linux/hugetlb_inline.h
@@ -0,0 +1,22 @@
+#ifndef _LINUX_HUGETLB_INLINE_H
+#define _LINUX_HUGETLB_INLINE_H
+
+#ifdef CONFIG_HUGETLB_PAGE
+
+#include <linux/mm.h>
+
+static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
+{
+ return !!(vma->vm_flags & VM_HUGETLB);
+}
+
+#else
+
+static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
new file mode 100644
index 000000000..0464c85e6
--- /dev/null
+++ b/include/linux/hw_breakpoint.h
@@ -0,0 +1,127 @@
+#ifndef _LINUX_HW_BREAKPOINT_H
+#define _LINUX_HW_BREAKPOINT_H
+
+#include <linux/perf_event.h>
+#include <uapi/linux/hw_breakpoint.h>
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+
+extern int __init init_hw_breakpoint(void);
+
+static inline void hw_breakpoint_init(struct perf_event_attr *attr)
+{
+ memset(attr, 0, sizeof(*attr));
+
+ attr->type = PERF_TYPE_BREAKPOINT;
+ attr->size = sizeof(*attr);
+ /*
+ * As it's for in-kernel or ptrace use, we want it to be pinned
+ * and to call its callback every hits.
+ */
+ attr->pinned = 1;
+ attr->sample_period = 1;
+}
+
+static inline void ptrace_breakpoint_init(struct perf_event_attr *attr)
+{
+ hw_breakpoint_init(attr);
+ attr->exclude_kernel = 1;
+}
+
+static inline unsigned long hw_breakpoint_addr(struct perf_event *bp)
+{
+ return bp->attr.bp_addr;
+}
+
+static inline int hw_breakpoint_type(struct perf_event *bp)
+{
+ return bp->attr.bp_type;
+}
+
+static inline unsigned long hw_breakpoint_len(struct perf_event *bp)
+{
+ return bp->attr.bp_len;
+}
+
+extern struct perf_event *
+register_user_hw_breakpoint(struct perf_event_attr *attr,
+ perf_overflow_handler_t triggered,
+ void *context,
+ struct task_struct *tsk);
+
+/* FIXME: only change from the attr, and don't unregister */
+extern int
+modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr);
+
+/*
+ * Kernel breakpoints are not associated with any particular thread.
+ */
+extern struct perf_event *
+register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
+ perf_overflow_handler_t triggered,
+ void *context,
+ int cpu);
+
+extern struct perf_event * __percpu *
+register_wide_hw_breakpoint(struct perf_event_attr *attr,
+ perf_overflow_handler_t triggered,
+ void *context);
+
+extern int register_perf_hw_breakpoint(struct perf_event *bp);
+extern int __register_perf_hw_breakpoint(struct perf_event *bp);
+extern void unregister_hw_breakpoint(struct perf_event *bp);
+extern void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events);
+
+extern int dbg_reserve_bp_slot(struct perf_event *bp);
+extern int dbg_release_bp_slot(struct perf_event *bp);
+extern int reserve_bp_slot(struct perf_event *bp);
+extern void release_bp_slot(struct perf_event *bp);
+
+extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
+
+static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp)
+{
+ return &bp->hw.info;
+}
+
+#else /* !CONFIG_HAVE_HW_BREAKPOINT */
+
+static inline int __init init_hw_breakpoint(void) { return 0; }
+
+static inline struct perf_event *
+register_user_hw_breakpoint(struct perf_event_attr *attr,
+ perf_overflow_handler_t triggered,
+ void *context,
+ struct task_struct *tsk) { return NULL; }
+static inline int
+modify_user_hw_breakpoint(struct perf_event *bp,
+ struct perf_event_attr *attr) { return -ENOSYS; }
+static inline struct perf_event *
+register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
+ perf_overflow_handler_t triggered,
+ void *context,
+ int cpu) { return NULL; }
+static inline struct perf_event * __percpu *
+register_wide_hw_breakpoint(struct perf_event_attr *attr,
+ perf_overflow_handler_t triggered,
+ void *context) { return NULL; }
+static inline int
+register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
+static inline int
+__register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
+static inline void unregister_hw_breakpoint(struct perf_event *bp) { }
+static inline void
+unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { }
+static inline int
+reserve_bp_slot(struct perf_event *bp) {return -ENOSYS; }
+static inline void release_bp_slot(struct perf_event *bp) { }
+
+static inline void flush_ptrace_hw_breakpoint(struct task_struct *tsk) { }
+
+static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+#endif /* _LINUX_HW_BREAKPOINT_H */
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
new file mode 100644
index 000000000..4f7d8f4b1
--- /dev/null
+++ b/include/linux/hw_random.h
@@ -0,0 +1,64 @@
+/*
+ Hardware Random Number Generator
+
+ Please read Documentation/hw_random.txt for details on use.
+
+ ----------------------------------------------------------
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ */
+
+#ifndef LINUX_HWRANDOM_H_
+#define LINUX_HWRANDOM_H_
+
+#include <linux/completion.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/kref.h>
+
+/**
+ * struct hwrng - Hardware Random Number Generator driver
+ * @name: Unique RNG name.
+ * @init: Initialization callback (can be NULL).
+ * @cleanup: Cleanup callback (can be NULL).
+ * @data_present: Callback to determine if data is available
+ * on the RNG. If NULL, it is assumed that
+ * there is always data available. *OBSOLETE*
+ * @data_read: Read data from the RNG device.
+ * Returns the number of lower random bytes in "data".
+ * Must not be NULL. *OBSOLETE*
+ * @read: New API. drivers can fill up to max bytes of data
+ * into the buffer. The buffer is aligned for any type.
+ * @priv: Private data, for use by the RNG driver.
+ * @quality: Estimation of true entropy in RNG's bitstream
+ * (per mill).
+ */
+struct hwrng {
+ const char *name;
+ int (*init)(struct hwrng *rng);
+ void (*cleanup)(struct hwrng *rng);
+ int (*data_present)(struct hwrng *rng, int wait);
+ int (*data_read)(struct hwrng *rng, u32 *data);
+ int (*read)(struct hwrng *rng, void *data, size_t max, bool wait);
+ unsigned long priv;
+ unsigned short quality;
+
+ /* internal. */
+ struct list_head list;
+ struct kref ref;
+ struct completion cleanup_done;
+};
+
+struct device;
+
+/** Register a new Hardware Random Number Generator driver. */
+extern int hwrng_register(struct hwrng *rng);
+extern int devm_hwrng_register(struct device *dev, struct hwrng *rng);
+/** Unregister a Hardware Random Number Generator driver. */
+extern void hwrng_unregister(struct hwrng *rng);
+extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
+/** Feed random bits into the pool. */
+extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy);
+
+#endif /* LINUX_HWRANDOM_H_ */
diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
new file mode 100644
index 000000000..1c7b89ae6
--- /dev/null
+++ b/include/linux/hwmon-sysfs.h
@@ -0,0 +1,57 @@
+/*
+ * hwmon-sysfs.h - hardware monitoring chip driver sysfs defines
+ *
+ * Copyright (C) 2005 Yani Ioannou <yani.ioannou@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef _LINUX_HWMON_SYSFS_H
+#define _LINUX_HWMON_SYSFS_H
+
+#include <linux/device.h>
+
+struct sensor_device_attribute{
+ struct device_attribute dev_attr;
+ int index;
+};
+#define to_sensor_dev_attr(_dev_attr) \
+ container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
+
+#define SENSOR_ATTR(_name, _mode, _show, _store, _index) \
+ { .dev_attr = __ATTR(_name, _mode, _show, _store), \
+ .index = _index }
+
+#define SENSOR_DEVICE_ATTR(_name, _mode, _show, _store, _index) \
+struct sensor_device_attribute sensor_dev_attr_##_name \
+ = SENSOR_ATTR(_name, _mode, _show, _store, _index)
+
+struct sensor_device_attribute_2 {
+ struct device_attribute dev_attr;
+ u8 index;
+ u8 nr;
+};
+#define to_sensor_dev_attr_2(_dev_attr) \
+ container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
+
+#define SENSOR_ATTR_2(_name, _mode, _show, _store, _nr, _index) \
+ { .dev_attr = __ATTR(_name, _mode, _show, _store), \
+ .index = _index, \
+ .nr = _nr }
+
+#define SENSOR_DEVICE_ATTR_2(_name,_mode,_show,_store,_nr,_index) \
+struct sensor_device_attribute_2 sensor_dev_attr_##_name \
+ = SENSOR_ATTR_2(_name, _mode, _show, _store, _nr, _index)
+
+#endif /* _LINUX_HWMON_SYSFS_H */
diff --git a/include/linux/hwmon-vid.h b/include/linux/hwmon-vid.h
new file mode 100644
index 000000000..da0a680e2
--- /dev/null
+++ b/include/linux/hwmon-vid.h
@@ -0,0 +1,45 @@
+/*
+ hwmon-vid.h - VID/VRM/VRD voltage conversions
+
+ Originally part of lm_sensors
+ Copyright (c) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
+ With assistance from Trent Piepho <xyzzy@speakeasy.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef _LINUX_HWMON_VID_H
+#define _LINUX_HWMON_VID_H
+
+int vid_from_reg(int val, u8 vrm);
+u8 vid_which_vrm(void);
+
+/* vrm is the VRM/VRD document version multiplied by 10.
+ val is in mV to avoid floating point in the kernel.
+ Returned value is the 4-, 5- or 6-bit VID code.
+ Note that only VRM 9.x is supported for now. */
+static inline int vid_to_reg(int val, u8 vrm)
+{
+ switch (vrm) {
+ case 91: /* VRM 9.1 */
+ case 90: /* VRM 9.0 */
+ return ((val >= 1100) && (val <= 1850) ?
+ ((18499 - val * 10) / 25 + 5) / 10 : -1);
+ default:
+ return -EINVAL;
+ }
+}
+
+#endif /* _LINUX_HWMON_VID_H */
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
new file mode 100644
index 000000000..09354f6c1
--- /dev/null
+++ b/include/linux/hwmon.h
@@ -0,0 +1,33 @@
+/*
+ hwmon.h - part of lm_sensors, Linux kernel modules for hardware monitoring
+
+ This file declares helper functions for the sysfs class "hwmon",
+ for use by sensors drivers.
+
+ Copyright (C) 2005 Mark M. Hoffman <mhoffman@lightlink.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+*/
+
+#ifndef _HWMON_H_
+#define _HWMON_H_
+
+struct device;
+struct attribute_group;
+
+struct device *hwmon_device_register(struct device *dev);
+struct device *
+hwmon_device_register_with_groups(struct device *dev, const char *name,
+ void *drvdata,
+ const struct attribute_group **groups);
+struct device *
+devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
+ void *drvdata,
+ const struct attribute_group **groups);
+
+void hwmon_device_unregister(struct device *dev);
+void devm_hwmon_device_unregister(struct device *dev);
+
+#endif
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
new file mode 100644
index 000000000..3343298e4
--- /dev/null
+++ b/include/linux/hwspinlock.h
@@ -0,0 +1,313 @@
+/*
+ * Hardware spinlock public header
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Contact: Ohad Ben-Cohen <ohad@wizery.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_HWSPINLOCK_H
+#define __LINUX_HWSPINLOCK_H
+
+#include <linux/err.h>
+#include <linux/sched.h>
+
+/* hwspinlock mode argument */
+#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
+#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
+
+struct device;
+struct hwspinlock;
+struct hwspinlock_device;
+struct hwspinlock_ops;
+
+/**
+ * struct hwspinlock_pdata - platform data for hwspinlock drivers
+ * @base_id: base id for this hwspinlock device
+ *
+ * hwspinlock devices provide system-wide hardware locks that are used
+ * by remote processors that have no other way to achieve synchronization.
+ *
+ * To achieve that, each physical lock must have a system-wide id number
+ * that is agreed upon, otherwise remote processors can't possibly assume
+ * they're using the same hardware lock.
+ *
+ * Usually boards have a single hwspinlock device, which provides several
+ * hwspinlocks, and in this case, they can be trivially numbered 0 to
+ * (num-of-locks - 1).
+ *
+ * In case boards have several hwspinlocks devices, a different base id
+ * should be used for each hwspinlock device (they can't all use 0 as
+ * a starting id!).
+ *
+ * This platform data structure should be used to provide the base id
+ * for each device (which is trivially 0 when only a single hwspinlock
+ * device exists). It can be shared between different platforms, hence
+ * its location.
+ */
+struct hwspinlock_pdata {
+ int base_id;
+};
+
+#if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE)
+
+int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
+ const struct hwspinlock_ops *ops, int base_id, int num_locks);
+int hwspin_lock_unregister(struct hwspinlock_device *bank);
+struct hwspinlock *hwspin_lock_request(void);
+struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
+int hwspin_lock_free(struct hwspinlock *hwlock);
+int hwspin_lock_get_id(struct hwspinlock *hwlock);
+int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
+ unsigned long *);
+int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
+void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
+
+#else /* !CONFIG_HWSPINLOCK */
+
+/*
+ * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
+ * enabled. We prefer to silently succeed in this case, and let the
+ * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
+ * required on a given setup, users will still work.
+ *
+ * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
+ * we _do_ want users to fail (no point in registering hwspinlock instances if
+ * the framework is not available).
+ *
+ * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
+ * users. Others, which care, can still check this with IS_ERR.
+ */
+static inline struct hwspinlock *hwspin_lock_request(void)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int hwspin_lock_free(struct hwspinlock *hwlock)
+{
+ return 0;
+}
+
+static inline
+int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
+ int mode, unsigned long *flags)
+{
+ return 0;
+}
+
+static inline
+int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+{
+ return 0;
+}
+
+static inline
+void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
+{
+}
+
+static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_HWSPINLOCK */
+
+/**
+ * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
+ * @hwlock: an hwspinlock which we want to trylock
+ * @flags: a pointer to where the caller's interrupt state will be saved at
+ *
+ * This function attempts to lock the underlying hwspinlock, and will
+ * immediately fail if the hwspinlock is already locked.
+ *
+ * Upon a successful return from this function, preemption and local
+ * interrupts are disabled (previous interrupts state is saved at @flags),
+ * so the caller must not sleep, and is advised to release the hwspinlock
+ * as soon as possible.
+ *
+ * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
+ * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
+ */
+static inline
+int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
+{
+ return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
+}
+
+/**
+ * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
+ * @hwlock: an hwspinlock which we want to trylock
+ *
+ * This function attempts to lock the underlying hwspinlock, and will
+ * immediately fail if the hwspinlock is already locked.
+ *
+ * Upon a successful return from this function, preemption and local
+ * interrupts are disabled, so the caller must not sleep, and is advised
+ * to release the hwspinlock as soon as possible.
+ *
+ * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
+ * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
+ */
+static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
+{
+ return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
+}
+
+/**
+ * hwspin_trylock() - attempt to lock a specific hwspinlock
+ * @hwlock: an hwspinlock which we want to trylock
+ *
+ * This function attempts to lock an hwspinlock, and will immediately fail
+ * if the hwspinlock is already taken.
+ *
+ * Upon a successful return from this function, preemption is disabled,
+ * so the caller must not sleep, and is advised to release the hwspinlock
+ * as soon as possible. This is required in order to minimize remote cores
+ * polling on the hardware interconnect.
+ *
+ * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
+ * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
+ */
+static inline int hwspin_trylock(struct hwspinlock *hwlock)
+{
+ return __hwspin_trylock(hwlock, 0, NULL);
+}
+
+/**
+ * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
+ * @hwlock: the hwspinlock to be locked
+ * @to: timeout value in msecs
+ * @flags: a pointer to where the caller's interrupt state will be saved at
+ *
+ * This function locks the underlying @hwlock. If the @hwlock
+ * is already taken, the function will busy loop waiting for it to
+ * be released, but give up when @timeout msecs have elapsed.
+ *
+ * Upon a successful return from this function, preemption and local interrupts
+ * are disabled (plus previous interrupt state is saved), so the caller must
+ * not sleep, and is advised to release the hwspinlock as soon as possible.
+ *
+ * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
+ * busy after @timeout msecs). The function will never sleep.
+ */
+static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
+ unsigned int to, unsigned long *flags)
+{
+ return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
+}
+
+/**
+ * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
+ * @hwlock: the hwspinlock to be locked
+ * @to: timeout value in msecs
+ *
+ * This function locks the underlying @hwlock. If the @hwlock
+ * is already taken, the function will busy loop waiting for it to
+ * be released, but give up when @timeout msecs have elapsed.
+ *
+ * Upon a successful return from this function, preemption and local interrupts
+ * are disabled so the caller must not sleep, and is advised to release the
+ * hwspinlock as soon as possible.
+ *
+ * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
+ * busy after @timeout msecs). The function will never sleep.
+ */
+static inline
+int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
+{
+ return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
+}
+
+/**
+ * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
+ * @hwlock: the hwspinlock to be locked
+ * @to: timeout value in msecs
+ *
+ * This function locks the underlying @hwlock. If the @hwlock
+ * is already taken, the function will busy loop waiting for it to
+ * be released, but give up when @timeout msecs have elapsed.
+ *
+ * Upon a successful return from this function, preemption is disabled
+ * so the caller must not sleep, and is advised to release the hwspinlock
+ * as soon as possible.
+ * This is required in order to minimize remote cores polling on the
+ * hardware interconnect.
+ *
+ * Returns 0 when the @hwlock was successfully taken, and an appropriate
+ * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
+ * busy after @timeout msecs). The function will never sleep.
+ */
+static inline
+int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
+{
+ return __hwspin_lock_timeout(hwlock, to, 0, NULL);
+}
+
+/**
+ * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
+ * @hwlock: a previously-acquired hwspinlock which we want to unlock
+ * @flags: previous caller's interrupt state to restore
+ *
+ * This function will unlock a specific hwspinlock, enable preemption and
+ * restore the previous state of the local interrupts. It should be used
+ * to undo, e.g., hwspin_trylock_irqsave().
+ *
+ * @hwlock must be already locked before calling this function: it is a bug
+ * to call unlock on a @hwlock that is already unlocked.
+ */
+static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
+ unsigned long *flags)
+{
+ __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
+}
+
+/**
+ * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
+ * @hwlock: a previously-acquired hwspinlock which we want to unlock
+ *
+ * This function will unlock a specific hwspinlock, enable preemption and
+ * enable local interrupts. Should be used to undo hwspin_lock_irq().
+ *
+ * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
+ * calling this function: it is a bug to call unlock on a @hwlock that is
+ * already unlocked.
+ */
+static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
+{
+ __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
+}
+
+/**
+ * hwspin_unlock() - unlock hwspinlock
+ * @hwlock: a previously-acquired hwspinlock which we want to unlock
+ *
+ * This function will unlock a specific hwspinlock and enable preemption
+ * back.
+ *
+ * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
+ * this function: it is a bug to call unlock on a @hwlock that is already
+ * unlocked.
+ */
+static inline void hwspin_unlock(struct hwspinlock *hwlock)
+{
+ __hwspin_unlock(hwlock, 0, NULL);
+}
+
+#endif /* __LINUX_HWSPINLOCK_H */
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
new file mode 100644
index 000000000..902c37aef
--- /dev/null
+++ b/include/linux/hyperv.h
@@ -0,0 +1,1256 @@
+/*
+ *
+ * Copyright (c) 2011, Microsoft Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Authors:
+ * Haiyang Zhang <haiyangz@microsoft.com>
+ * Hank Janssen <hjanssen@microsoft.com>
+ * K. Y. Srinivasan <kys@microsoft.com>
+ *
+ */
+
+#ifndef _HYPERV_H
+#define _HYPERV_H
+
+#include <uapi/linux/hyperv.h>
+
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+
+#define MAX_PAGE_BUFFER_COUNT 32
+#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
+
+#pragma pack(push, 1)
+
+/* Single-page buffer */
+struct hv_page_buffer {
+ u32 len;
+ u32 offset;
+ u64 pfn;
+};
+
+/* Multiple-page buffer */
+struct hv_multipage_buffer {
+ /* Length and Offset determines the # of pfns in the array */
+ u32 len;
+ u32 offset;
+ u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
+};
+
+/*
+ * Multiple-page buffer array; the pfn array is variable size:
+ * The number of entries in the PFN array is determined by
+ * "len" and "offset".
+ */
+struct hv_mpb_array {
+ /* Length and Offset determines the # of pfns in the array */
+ u32 len;
+ u32 offset;
+ u64 pfn_array[];
+};
+
+/* 0x18 includes the proprietary packet header */
+#define MAX_PAGE_BUFFER_PACKET (0x18 + \
+ (sizeof(struct hv_page_buffer) * \
+ MAX_PAGE_BUFFER_COUNT))
+#define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
+ sizeof(struct hv_multipage_buffer))
+
+
+#pragma pack(pop)
+
+struct hv_ring_buffer {
+ /* Offset in bytes from the start of ring data below */
+ u32 write_index;
+
+ /* Offset in bytes from the start of ring data below */
+ u32 read_index;
+
+ u32 interrupt_mask;
+
+ /*
+ * Win8 uses some of the reserved bits to implement
+ * interrupt driven flow management. On the send side
+ * we can request that the receiver interrupt the sender
+ * when the ring transitions from being full to being able
+ * to handle a message of size "pending_send_sz".
+ *
+ * Add necessary state for this enhancement.
+ */
+ u32 pending_send_sz;
+
+ u32 reserved1[12];
+
+ union {
+ struct {
+ u32 feat_pending_send_sz:1;
+ };
+ u32 value;
+ } feature_bits;
+
+ /* Pad it to PAGE_SIZE so that data starts on page boundary */
+ u8 reserved2[4028];
+
+ /*
+ * Ring data starts here + RingDataStartOffset
+ * !!! DO NOT place any fields below this !!!
+ */
+ u8 buffer[0];
+} __packed;
+
+struct hv_ring_buffer_info {
+ struct hv_ring_buffer *ring_buffer;
+ u32 ring_size; /* Include the shared header */
+ spinlock_t ring_lock;
+
+ u32 ring_datasize; /* < ring_size */
+ u32 ring_data_startoffset;
+};
+
+/*
+ *
+ * hv_get_ringbuffer_availbytes()
+ *
+ * Get number of bytes available to read and to write to
+ * for the specified ring buffer
+ */
+static inline void
+hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
+ u32 *read, u32 *write)
+{
+ u32 read_loc, write_loc, dsize;
+
+ smp_read_barrier_depends();
+
+ /* Capture the read/write indices before they changed */
+ read_loc = rbi->ring_buffer->read_index;
+ write_loc = rbi->ring_buffer->write_index;
+ dsize = rbi->ring_datasize;
+
+ *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
+ read_loc - write_loc;
+ *read = dsize - *write;
+}
+
+/*
+ * VMBUS version is 32 bit entity broken up into
+ * two 16 bit quantities: major_number. minor_number.
+ *
+ * 0 . 13 (Windows Server 2008)
+ * 1 . 1 (Windows 7)
+ * 2 . 4 (Windows 8)
+ * 3 . 0 (Windows 8 R2)
+ */
+
+#define VERSION_WS2008 ((0 << 16) | (13))
+#define VERSION_WIN7 ((1 << 16) | (1))
+#define VERSION_WIN8 ((2 << 16) | (4))
+#define VERSION_WIN8_1 ((3 << 16) | (0))
+
+#define VERSION_INVAL -1
+
+#define VERSION_CURRENT VERSION_WIN8_1
+
+/* Make maximum size of pipe payload of 16K */
+#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
+
+/* Define PipeMode values. */
+#define VMBUS_PIPE_TYPE_BYTE 0x00000000
+#define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
+
+/* The size of the user defined data buffer for non-pipe offers. */
+#define MAX_USER_DEFINED_BYTES 120
+
+/* The size of the user defined data buffer for pipe offers. */
+#define MAX_PIPE_USER_DEFINED_BYTES 116
+
+/*
+ * At the center of the Channel Management library is the Channel Offer. This
+ * struct contains the fundamental information about an offer.
+ */
+struct vmbus_channel_offer {
+ uuid_le if_type;
+ uuid_le if_instance;
+
+ /*
+ * These two fields are not currently used.
+ */
+ u64 reserved1;
+ u64 reserved2;
+
+ u16 chn_flags;
+ u16 mmio_megabytes; /* in bytes * 1024 * 1024 */
+
+ union {
+ /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
+ struct {
+ unsigned char user_def[MAX_USER_DEFINED_BYTES];
+ } std;
+
+ /*
+ * Pipes:
+ * The following sructure is an integrated pipe protocol, which
+ * is implemented on top of standard user-defined data. Pipe
+ * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
+ * use.
+ */
+ struct {
+ u32 pipe_mode;
+ unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
+ } pipe;
+ } u;
+ /*
+ * The sub_channel_index is defined in win8.
+ */
+ u16 sub_channel_index;
+ u16 reserved3;
+} __packed;
+
+/* Server Flags */
+#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
+#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
+#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
+#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
+#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
+#define VMBUS_CHANNEL_PARENT_OFFER 0x200
+#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
+
+struct vmpacket_descriptor {
+ u16 type;
+ u16 offset8;
+ u16 len8;
+ u16 flags;
+ u64 trans_id;
+} __packed;
+
+struct vmpacket_header {
+ u32 prev_pkt_start_offset;
+ struct vmpacket_descriptor descriptor;
+} __packed;
+
+struct vmtransfer_page_range {
+ u32 byte_count;
+ u32 byte_offset;
+} __packed;
+
+struct vmtransfer_page_packet_header {
+ struct vmpacket_descriptor d;
+ u16 xfer_pageset_id;
+ u8 sender_owns_set;
+ u8 reserved;
+ u32 range_cnt;
+ struct vmtransfer_page_range ranges[1];
+} __packed;
+
+struct vmgpadl_packet_header {
+ struct vmpacket_descriptor d;
+ u32 gpadl;
+ u32 reserved;
+} __packed;
+
+struct vmadd_remove_transfer_page_set {
+ struct vmpacket_descriptor d;
+ u32 gpadl;
+ u16 xfer_pageset_id;
+ u16 reserved;
+} __packed;
+
+/*
+ * This structure defines a range in guest physical space that can be made to
+ * look virtually contiguous.
+ */
+struct gpa_range {
+ u32 byte_count;
+ u32 byte_offset;
+ u64 pfn_array[0];
+};
+
+/*
+ * This is the format for an Establish Gpadl packet, which contains a handle by
+ * which this GPADL will be known and a set of GPA ranges associated with it.
+ * This can be converted to a MDL by the guest OS. If there are multiple GPA
+ * ranges, then the resulting MDL will be "chained," representing multiple VA
+ * ranges.
+ */
+struct vmestablish_gpadl {
+ struct vmpacket_descriptor d;
+ u32 gpadl;
+ u32 range_cnt;
+ struct gpa_range range[1];
+} __packed;
+
+/*
+ * This is the format for a Teardown Gpadl packet, which indicates that the
+ * GPADL handle in the Establish Gpadl packet will never be referenced again.
+ */
+struct vmteardown_gpadl {
+ struct vmpacket_descriptor d;
+ u32 gpadl;
+ u32 reserved; /* for alignment to a 8-byte boundary */
+} __packed;
+
+/*
+ * This is the format for a GPA-Direct packet, which contains a set of GPA
+ * ranges, in addition to commands and/or data.
+ */
+struct vmdata_gpa_direct {
+ struct vmpacket_descriptor d;
+ u32 reserved;
+ u32 range_cnt;
+ struct gpa_range range[1];
+} __packed;
+
+/* This is the format for a Additional Data Packet. */
+struct vmadditional_data {
+ struct vmpacket_descriptor d;
+ u64 total_bytes;
+ u32 offset;
+ u32 byte_cnt;
+ unsigned char data[1];
+} __packed;
+
+union vmpacket_largest_possible_header {
+ struct vmpacket_descriptor simple_hdr;
+ struct vmtransfer_page_packet_header xfer_page_hdr;
+ struct vmgpadl_packet_header gpadl_hdr;
+ struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
+ struct vmestablish_gpadl establish_gpadl_hdr;
+ struct vmteardown_gpadl teardown_gpadl_hdr;
+ struct vmdata_gpa_direct data_gpa_direct_hdr;
+};
+
+#define VMPACKET_DATA_START_ADDRESS(__packet) \
+ (void *)(((unsigned char *)__packet) + \
+ ((struct vmpacket_descriptor)__packet)->offset8 * 8)
+
+#define VMPACKET_DATA_LENGTH(__packet) \
+ ((((struct vmpacket_descriptor)__packet)->len8 - \
+ ((struct vmpacket_descriptor)__packet)->offset8) * 8)
+
+#define VMPACKET_TRANSFER_MODE(__packet) \
+ (((struct IMPACT)__packet)->type)
+
+enum vmbus_packet_type {
+ VM_PKT_INVALID = 0x0,
+ VM_PKT_SYNCH = 0x1,
+ VM_PKT_ADD_XFER_PAGESET = 0x2,
+ VM_PKT_RM_XFER_PAGESET = 0x3,
+ VM_PKT_ESTABLISH_GPADL = 0x4,
+ VM_PKT_TEARDOWN_GPADL = 0x5,
+ VM_PKT_DATA_INBAND = 0x6,
+ VM_PKT_DATA_USING_XFER_PAGES = 0x7,
+ VM_PKT_DATA_USING_GPADL = 0x8,
+ VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
+ VM_PKT_CANCEL_REQUEST = 0xa,
+ VM_PKT_COMP = 0xb,
+ VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
+ VM_PKT_ADDITIONAL_DATA = 0xd
+};
+
+#define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
+
+
+/* Version 1 messages */
+enum vmbus_channel_message_type {
+ CHANNELMSG_INVALID = 0,
+ CHANNELMSG_OFFERCHANNEL = 1,
+ CHANNELMSG_RESCIND_CHANNELOFFER = 2,
+ CHANNELMSG_REQUESTOFFERS = 3,
+ CHANNELMSG_ALLOFFERS_DELIVERED = 4,
+ CHANNELMSG_OPENCHANNEL = 5,
+ CHANNELMSG_OPENCHANNEL_RESULT = 6,
+ CHANNELMSG_CLOSECHANNEL = 7,
+ CHANNELMSG_GPADL_HEADER = 8,
+ CHANNELMSG_GPADL_BODY = 9,
+ CHANNELMSG_GPADL_CREATED = 10,
+ CHANNELMSG_GPADL_TEARDOWN = 11,
+ CHANNELMSG_GPADL_TORNDOWN = 12,
+ CHANNELMSG_RELID_RELEASED = 13,
+ CHANNELMSG_INITIATE_CONTACT = 14,
+ CHANNELMSG_VERSION_RESPONSE = 15,
+ CHANNELMSG_UNLOAD = 16,
+#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
+ CHANNELMSG_VIEWRANGE_ADD = 17,
+ CHANNELMSG_VIEWRANGE_REMOVE = 18,
+#endif
+ CHANNELMSG_COUNT
+};
+
+struct vmbus_channel_message_header {
+ enum vmbus_channel_message_type msgtype;
+ u32 padding;
+} __packed;
+
+/* Query VMBus Version parameters */
+struct vmbus_channel_query_vmbus_version {
+ struct vmbus_channel_message_header header;
+ u32 version;
+} __packed;
+
+/* VMBus Version Supported parameters */
+struct vmbus_channel_version_supported {
+ struct vmbus_channel_message_header header;
+ u8 version_supported;
+} __packed;
+
+/* Offer Channel parameters */
+struct vmbus_channel_offer_channel {
+ struct vmbus_channel_message_header header;
+ struct vmbus_channel_offer offer;
+ u32 child_relid;
+ u8 monitorid;
+ /*
+ * win7 and beyond splits this field into a bit field.
+ */
+ u8 monitor_allocated:1;
+ u8 reserved:7;
+ /*
+ * These are new fields added in win7 and later.
+ * Do not access these fields without checking the
+ * negotiated protocol.
+ *
+ * If "is_dedicated_interrupt" is set, we must not set the
+ * associated bit in the channel bitmap while sending the
+ * interrupt to the host.
+ *
+ * connection_id is to be used in signaling the host.
+ */
+ u16 is_dedicated_interrupt:1;
+ u16 reserved1:15;
+ u32 connection_id;
+} __packed;
+
+/* Rescind Offer parameters */
+struct vmbus_channel_rescind_offer {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+} __packed;
+
+/*
+ * Request Offer -- no parameters, SynIC message contains the partition ID
+ * Set Snoop -- no parameters, SynIC message contains the partition ID
+ * Clear Snoop -- no parameters, SynIC message contains the partition ID
+ * All Offers Delivered -- no parameters, SynIC message contains the partition
+ * ID
+ * Flush Client -- no parameters, SynIC message contains the partition ID
+ */
+
+/* Open Channel parameters */
+struct vmbus_channel_open_channel {
+ struct vmbus_channel_message_header header;
+
+ /* Identifies the specific VMBus channel that is being opened. */
+ u32 child_relid;
+
+ /* ID making a particular open request at a channel offer unique. */
+ u32 openid;
+
+ /* GPADL for the channel's ring buffer. */
+ u32 ringbuffer_gpadlhandle;
+
+ /*
+ * Starting with win8, this field will be used to specify
+ * the target virtual processor on which to deliver the interrupt for
+ * the host to guest communication.
+ * Prior to win8, incoming channel interrupts would only
+ * be delivered on cpu 0. Setting this value to 0 would
+ * preserve the earlier behavior.
+ */
+ u32 target_vp;
+
+ /*
+ * The upstream ring buffer begins at offset zero in the memory
+ * described by RingBufferGpadlHandle. The downstream ring buffer
+ * follows it at this offset (in pages).
+ */
+ u32 downstream_ringbuffer_pageoffset;
+
+ /* User-specific data to be passed along to the server endpoint. */
+ unsigned char userdata[MAX_USER_DEFINED_BYTES];
+} __packed;
+
+/* Open Channel Result parameters */
+struct vmbus_channel_open_result {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 openid;
+ u32 status;
+} __packed;
+
+/* Close channel parameters; */
+struct vmbus_channel_close_channel {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+} __packed;
+
+/* Channel Message GPADL */
+#define GPADL_TYPE_RING_BUFFER 1
+#define GPADL_TYPE_SERVER_SAVE_AREA 2
+#define GPADL_TYPE_TRANSACTION 8
+
+/*
+ * The number of PFNs in a GPADL message is defined by the number of
+ * pages that would be spanned by ByteCount and ByteOffset. If the
+ * implied number of PFNs won't fit in this packet, there will be a
+ * follow-up packet that contains more.
+ */
+struct vmbus_channel_gpadl_header {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 gpadl;
+ u16 range_buflen;
+ u16 rangecount;
+ struct gpa_range range[0];
+} __packed;
+
+/* This is the followup packet that contains more PFNs. */
+struct vmbus_channel_gpadl_body {
+ struct vmbus_channel_message_header header;
+ u32 msgnumber;
+ u32 gpadl;
+ u64 pfn[0];
+} __packed;
+
+struct vmbus_channel_gpadl_created {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 gpadl;
+ u32 creation_status;
+} __packed;
+
+struct vmbus_channel_gpadl_teardown {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 gpadl;
+} __packed;
+
+struct vmbus_channel_gpadl_torndown {
+ struct vmbus_channel_message_header header;
+ u32 gpadl;
+} __packed;
+
+#ifdef VMBUS_FEATURE_PARENT_OR_PEER_MEMORY_MAPPED_INTO_A_CHILD
+struct vmbus_channel_view_range_add {
+ struct vmbus_channel_message_header header;
+ PHYSICAL_ADDRESS viewrange_base;
+ u64 viewrange_length;
+ u32 child_relid;
+} __packed;
+
+struct vmbus_channel_view_range_remove {
+ struct vmbus_channel_message_header header;
+ PHYSICAL_ADDRESS viewrange_base;
+ u32 child_relid;
+} __packed;
+#endif
+
+struct vmbus_channel_relid_released {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+} __packed;
+
+struct vmbus_channel_initiate_contact {
+ struct vmbus_channel_message_header header;
+ u32 vmbus_version_requested;
+ u32 target_vcpu; /* The VCPU the host should respond to */
+ u64 interrupt_page;
+ u64 monitor_page1;
+ u64 monitor_page2;
+} __packed;
+
+struct vmbus_channel_version_response {
+ struct vmbus_channel_message_header header;
+ u8 version_supported;
+} __packed;
+
+enum vmbus_channel_state {
+ CHANNEL_OFFER_STATE,
+ CHANNEL_OPENING_STATE,
+ CHANNEL_OPEN_STATE,
+ CHANNEL_OPENED_STATE,
+};
+
+/*
+ * Represents each channel msg on the vmbus connection This is a
+ * variable-size data structure depending on the msg type itself
+ */
+struct vmbus_channel_msginfo {
+ /* Bookkeeping stuff */
+ struct list_head msglistentry;
+
+ /* So far, this is only used to handle gpadl body message */
+ struct list_head submsglist;
+
+ /* Synchronize the request/response if needed */
+ struct completion waitevent;
+ union {
+ struct vmbus_channel_version_supported version_supported;
+ struct vmbus_channel_open_result open_result;
+ struct vmbus_channel_gpadl_torndown gpadl_torndown;
+ struct vmbus_channel_gpadl_created gpadl_created;
+ struct vmbus_channel_version_response version_response;
+ } response;
+
+ u32 msgsize;
+ /*
+ * The channel message that goes out on the "wire".
+ * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
+ */
+ unsigned char msg[0];
+};
+
+struct vmbus_close_msg {
+ struct vmbus_channel_msginfo info;
+ struct vmbus_channel_close_channel msg;
+};
+
+/* Define connection identifier type. */
+union hv_connection_id {
+ u32 asu32;
+ struct {
+ u32 id:24;
+ u32 reserved:8;
+ } u;
+};
+
+/* Definition of the hv_signal_event hypercall input structure. */
+struct hv_input_signal_event {
+ union hv_connection_id connectionid;
+ u16 flag_number;
+ u16 rsvdz;
+};
+
+struct hv_input_signal_event_buffer {
+ u64 align8;
+ struct hv_input_signal_event event;
+};
+
+struct vmbus_channel {
+ /* Unique channel id */
+ int id;
+
+ struct list_head listentry;
+
+ struct hv_device *device_obj;
+
+ enum vmbus_channel_state state;
+
+ struct vmbus_channel_offer_channel offermsg;
+ /*
+ * These are based on the OfferMsg.MonitorId.
+ * Save it here for easy access.
+ */
+ u8 monitor_grp;
+ u8 monitor_bit;
+
+ bool rescind; /* got rescind msg */
+
+ u32 ringbuffer_gpadlhandle;
+
+ /* Allocated memory for ring buffer */
+ void *ringbuffer_pages;
+ u32 ringbuffer_pagecount;
+ struct hv_ring_buffer_info outbound; /* send to parent */
+ struct hv_ring_buffer_info inbound; /* receive from parent */
+ spinlock_t inbound_lock;
+
+ struct vmbus_close_msg close_msg;
+
+ /* Channel callback are invoked in this workqueue context */
+ /* HANDLE dataWorkQueue; */
+
+ void (*onchannel_callback)(void *context);
+ void *channel_callback_context;
+
+ /*
+ * A channel can be marked for efficient (batched)
+ * reading:
+ * If batched_reading is set to "true", we read until the
+ * channel is empty and hold off interrupts from the host
+ * during the entire read process.
+ * If batched_reading is set to "false", the client is not
+ * going to perform batched reading.
+ *
+ * By default we will enable batched reading; specific
+ * drivers that don't want this behavior can turn it off.
+ */
+
+ bool batched_reading;
+
+ bool is_dedicated_interrupt;
+ struct hv_input_signal_event_buffer sig_buf;
+ struct hv_input_signal_event *sig_event;
+
+ /*
+ * Starting with win8, this field will be used to specify
+ * the target virtual processor on which to deliver the interrupt for
+ * the host to guest communication.
+ * Prior to win8, incoming channel interrupts would only
+ * be delivered on cpu 0. Setting this value to 0 would
+ * preserve the earlier behavior.
+ */
+ u32 target_vp;
+ /* The corresponding CPUID in the guest */
+ u32 target_cpu;
+ /*
+ * Support for sub-channels. For high performance devices,
+ * it will be useful to have multiple sub-channels to support
+ * a scalable communication infrastructure with the host.
+ * The support for sub-channels is implemented as an extention
+ * to the current infrastructure.
+ * The initial offer is considered the primary channel and this
+ * offer message will indicate if the host supports sub-channels.
+ * The guest is free to ask for sub-channels to be offerred and can
+ * open these sub-channels as a normal "primary" channel. However,
+ * all sub-channels will have the same type and instance guids as the
+ * primary channel. Requests sent on a given channel will result in a
+ * response on the same channel.
+ */
+
+ /*
+ * Sub-channel creation callback. This callback will be called in
+ * process context when a sub-channel offer is received from the host.
+ * The guest can open the sub-channel in the context of this callback.
+ */
+ void (*sc_creation_callback)(struct vmbus_channel *new_sc);
+
+ /*
+ * The spinlock to protect the structure. It is being used to protect
+ * test-and-set access to various attributes of the structure as well
+ * as all sc_list operations.
+ */
+ spinlock_t lock;
+ /*
+ * All Sub-channels of a primary channel are linked here.
+ */
+ struct list_head sc_list;
+ /*
+ * The primary channel this sub-channel belongs to.
+ * This will be NULL for the primary channel.
+ */
+ struct vmbus_channel *primary_channel;
+ /*
+ * Support per-channel state for use by vmbus drivers.
+ */
+ void *per_channel_state;
+ /*
+ * To support per-cpu lookup mapping of relid to channel,
+ * link up channels based on their CPU affinity.
+ */
+ struct list_head percpu_list;
+
+ int num_sc;
+ int next_oc;
+};
+
+static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
+{
+ c->batched_reading = state;
+}
+
+static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
+{
+ c->per_channel_state = s;
+}
+
+static inline void *get_per_channel_state(struct vmbus_channel *c)
+{
+ return c->per_channel_state;
+}
+
+void vmbus_onmessage(void *context);
+
+int vmbus_request_offers(void);
+
+/*
+ * APIs for managing sub-channels.
+ */
+
+void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
+ void (*sc_cr_cb)(struct vmbus_channel *new_sc));
+
+/*
+ * Retrieve the (sub) channel on which to send an outgoing request.
+ * When a primary channel has multiple sub-channels, we choose a
+ * channel whose VCPU binding is closest to the VCPU on which
+ * this call is being made.
+ */
+struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary);
+
+/*
+ * Check if sub-channels have already been offerred. This API will be useful
+ * when the driver is unloaded after establishing sub-channels. In this case,
+ * when the driver is re-loaded, the driver would have to check if the
+ * subchannels have already been established before attempting to request
+ * the creation of sub-channels.
+ * This function returns TRUE to indicate that subchannels have already been
+ * created.
+ * This function should be invoked after setting the callback function for
+ * sub-channel creation.
+ */
+bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
+
+/* The format must be the same as struct vmdata_gpa_direct */
+struct vmbus_channel_packet_page_buffer {
+ u16 type;
+ u16 dataoffset8;
+ u16 length8;
+ u16 flags;
+ u64 transactionid;
+ u32 reserved;
+ u32 rangecount;
+ struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
+} __packed;
+
+/* The format must be the same as struct vmdata_gpa_direct */
+struct vmbus_channel_packet_multipage_buffer {
+ u16 type;
+ u16 dataoffset8;
+ u16 length8;
+ u16 flags;
+ u64 transactionid;
+ u32 reserved;
+ u32 rangecount; /* Always 1 in this case */
+ struct hv_multipage_buffer range;
+} __packed;
+
+/* The format must be the same as struct vmdata_gpa_direct */
+struct vmbus_packet_mpb_array {
+ u16 type;
+ u16 dataoffset8;
+ u16 length8;
+ u16 flags;
+ u64 transactionid;
+ u32 reserved;
+ u32 rangecount; /* Always 1 in this case */
+ struct hv_mpb_array range;
+} __packed;
+
+
+extern int vmbus_open(struct vmbus_channel *channel,
+ u32 send_ringbuffersize,
+ u32 recv_ringbuffersize,
+ void *userdata,
+ u32 userdatalen,
+ void(*onchannel_callback)(void *context),
+ void *context);
+
+extern void vmbus_close(struct vmbus_channel *channel);
+
+extern int vmbus_sendpacket(struct vmbus_channel *channel,
+ void *buffer,
+ u32 bufferLen,
+ u64 requestid,
+ enum vmbus_packet_type type,
+ u32 flags);
+
+extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
+ void *buffer,
+ u32 bufferLen,
+ u64 requestid,
+ enum vmbus_packet_type type,
+ u32 flags,
+ bool kick_q);
+
+extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
+ struct hv_page_buffer pagebuffers[],
+ u32 pagecount,
+ void *buffer,
+ u32 bufferlen,
+ u64 requestid);
+
+extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
+ struct hv_page_buffer pagebuffers[],
+ u32 pagecount,
+ void *buffer,
+ u32 bufferlen,
+ u64 requestid,
+ u32 flags,
+ bool kick_q);
+
+extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
+ struct hv_multipage_buffer *mpb,
+ void *buffer,
+ u32 bufferlen,
+ u64 requestid);
+
+extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
+ struct vmbus_packet_mpb_array *mpb,
+ u32 desc_size,
+ void *buffer,
+ u32 bufferlen,
+ u64 requestid);
+
+extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
+ void *kbuffer,
+ u32 size,
+ u32 *gpadl_handle);
+
+extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
+ u32 gpadl_handle);
+
+extern int vmbus_recvpacket(struct vmbus_channel *channel,
+ void *buffer,
+ u32 bufferlen,
+ u32 *buffer_actual_len,
+ u64 *requestid);
+
+extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
+ void *buffer,
+ u32 bufferlen,
+ u32 *buffer_actual_len,
+ u64 *requestid);
+
+
+extern void vmbus_ontimer(unsigned long data);
+
+/* Base driver object */
+struct hv_driver {
+ const char *name;
+
+ /* the device type supported by this driver */
+ uuid_le dev_type;
+ const struct hv_vmbus_device_id *id_table;
+
+ struct device_driver driver;
+
+ int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
+ int (*remove)(struct hv_device *);
+ void (*shutdown)(struct hv_device *);
+
+};
+
+/* Base device object */
+struct hv_device {
+ /* the device type id of this device */
+ uuid_le dev_type;
+
+ /* the device instance id of this device */
+ uuid_le dev_instance;
+
+ struct device device;
+
+ struct vmbus_channel *channel;
+};
+
+
+static inline struct hv_device *device_to_hv_device(struct device *d)
+{
+ return container_of(d, struct hv_device, device);
+}
+
+static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
+{
+ return container_of(d, struct hv_driver, driver);
+}
+
+static inline void hv_set_drvdata(struct hv_device *dev, void *data)
+{
+ dev_set_drvdata(&dev->device, data);
+}
+
+static inline void *hv_get_drvdata(struct hv_device *dev)
+{
+ return dev_get_drvdata(&dev->device);
+}
+
+/* Vmbus interface */
+#define vmbus_driver_register(driver) \
+ __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
+int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
+ struct module *owner,
+ const char *mod_name);
+void vmbus_driver_unregister(struct hv_driver *hv_driver);
+
+/**
+ * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device
+ *
+ * This macro is used to create a struct hv_vmbus_device_id that matches a
+ * specific device.
+ */
+#define VMBUS_DEVICE(g0, g1, g2, g3, g4, g5, g6, g7, \
+ g8, g9, ga, gb, gc, gd, ge, gf) \
+ .guid = { g0, g1, g2, g3, g4, g5, g6, g7, \
+ g8, g9, ga, gb, gc, gd, ge, gf },
+
+/*
+ * GUID definitions of various offer types - services offered to the guest.
+ */
+
+/*
+ * Network GUID
+ * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
+ */
+#define HV_NIC_GUID \
+ .guid = { \
+ 0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46, \
+ 0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e \
+ }
+
+/*
+ * IDE GUID
+ * {32412632-86cb-44a2-9b5c-50d1417354f5}
+ */
+#define HV_IDE_GUID \
+ .guid = { \
+ 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, \
+ 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 \
+ }
+
+/*
+ * SCSI GUID
+ * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
+ */
+#define HV_SCSI_GUID \
+ .guid = { \
+ 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, \
+ 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f \
+ }
+
+/*
+ * Shutdown GUID
+ * {0e0b6031-5213-4934-818b-38d90ced39db}
+ */
+#define HV_SHUTDOWN_GUID \
+ .guid = { \
+ 0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49, \
+ 0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb \
+ }
+
+/*
+ * Time Synch GUID
+ * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
+ */
+#define HV_TS_GUID \
+ .guid = { \
+ 0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, \
+ 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf \
+ }
+
+/*
+ * Heartbeat GUID
+ * {57164f39-9115-4e78-ab55-382f3bd5422d}
+ */
+#define HV_HEART_BEAT_GUID \
+ .guid = { \
+ 0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, \
+ 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d \
+ }
+
+/*
+ * KVP GUID
+ * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
+ */
+#define HV_KVP_GUID \
+ .guid = { \
+ 0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, \
+ 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6 \
+ }
+
+/*
+ * Dynamic memory GUID
+ * {525074dc-8985-46e2-8057-a307dc18a502}
+ */
+#define HV_DM_GUID \
+ .guid = { \
+ 0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46, \
+ 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 \
+ }
+
+/*
+ * Mouse GUID
+ * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
+ */
+#define HV_MOUSE_GUID \
+ .guid = { \
+ 0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c, \
+ 0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a \
+ }
+
+/*
+ * VSS (Backup/Restore) GUID
+ */
+#define HV_VSS_GUID \
+ .guid = { \
+ 0x29, 0x2e, 0xfa, 0x35, 0x23, 0xea, 0x36, 0x42, \
+ 0x96, 0xae, 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40 \
+ }
+/*
+ * Synthetic Video GUID
+ * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
+ */
+#define HV_SYNTHVID_GUID \
+ .guid = { \
+ 0x02, 0x78, 0x0a, 0xda, 0x77, 0xe3, 0xac, 0x4a, \
+ 0x8e, 0x77, 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8 \
+ }
+
+/*
+ * Synthetic FC GUID
+ * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
+ */
+#define HV_SYNTHFC_GUID \
+ .guid = { \
+ 0x4A, 0xCC, 0x9B, 0x2F, 0x69, 0x00, 0xF3, 0x4A, \
+ 0xB7, 0x6B, 0x6F, 0xD0, 0xBE, 0x52, 0x8C, 0xDA \
+ }
+
+/*
+ * Guest File Copy Service
+ * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
+ */
+
+#define HV_FCOPY_GUID \
+ .guid = { \
+ 0xE3, 0x4B, 0xD1, 0x34, 0xE4, 0xDE, 0xC8, 0x41, \
+ 0x9A, 0xE7, 0x6B, 0x17, 0x49, 0x77, 0xC1, 0x92 \
+ }
+
+/*
+ * NetworkDirect. This is the guest RDMA service.
+ * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
+ */
+#define HV_ND_GUID \
+ .guid = { \
+ 0x3d, 0xaf, 0x2e, 0x8c, 0xa7, 0x32, 0x09, 0x4b, \
+ 0xab, 0x99, 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01 \
+ }
+
+/*
+ * Common header for Hyper-V ICs
+ */
+
+#define ICMSGTYPE_NEGOTIATE 0
+#define ICMSGTYPE_HEARTBEAT 1
+#define ICMSGTYPE_KVPEXCHANGE 2
+#define ICMSGTYPE_SHUTDOWN 3
+#define ICMSGTYPE_TIMESYNC 4
+#define ICMSGTYPE_VSS 5
+
+#define ICMSGHDRFLAG_TRANSACTION 1
+#define ICMSGHDRFLAG_REQUEST 2
+#define ICMSGHDRFLAG_RESPONSE 4
+
+
+/*
+ * While we want to handle util services as regular devices,
+ * there is only one instance of each of these services; so
+ * we statically allocate the service specific state.
+ */
+
+struct hv_util_service {
+ u8 *recv_buffer;
+ void (*util_cb)(void *);
+ int (*util_init)(struct hv_util_service *);
+ void (*util_deinit)(void);
+};
+
+struct vmbuspipe_hdr {
+ u32 flags;
+ u32 msgsize;
+} __packed;
+
+struct ic_version {
+ u16 major;
+ u16 minor;
+} __packed;
+
+struct icmsg_hdr {
+ struct ic_version icverframe;
+ u16 icmsgtype;
+ struct ic_version icvermsg;
+ u16 icmsgsize;
+ u32 status;
+ u8 ictransaction_id;
+ u8 icflags;
+ u8 reserved[2];
+} __packed;
+
+struct icmsg_negotiate {
+ u16 icframe_vercnt;
+ u16 icmsg_vercnt;
+ u32 reserved;
+ struct ic_version icversion_data[1]; /* any size array */
+} __packed;
+
+struct shutdown_msg_data {
+ u32 reason_code;
+ u32 timeout_seconds;
+ u32 flags;
+ u8 display_message[2048];
+} __packed;
+
+struct heartbeat_msg_data {
+ u64 seq_num;
+ u32 reserved[8];
+} __packed;
+
+/* Time Sync IC defs */
+#define ICTIMESYNCFLAG_PROBE 0
+#define ICTIMESYNCFLAG_SYNC 1
+#define ICTIMESYNCFLAG_SAMPLE 2
+
+#ifdef __x86_64__
+#define WLTIMEDELTA 116444736000000000L /* in 100ns unit */
+#else
+#define WLTIMEDELTA 116444736000000000LL
+#endif
+
+struct ictimesync_data {
+ u64 parenttime;
+ u64 childtime;
+ u64 roundtriptime;
+ u8 flags;
+} __packed;
+
+struct hyperv_service_callback {
+ u8 msg_type;
+ char *log_msg;
+ uuid_le data;
+ struct vmbus_channel *channel;
+ void (*callback) (void *context);
+};
+
+#define MAX_SRV_VER 0x7ffffff
+extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
+ struct icmsg_negotiate *, u8 *, int,
+ int);
+
+int hv_kvp_init(struct hv_util_service *);
+void hv_kvp_deinit(void);
+void hv_kvp_onchannelcallback(void *);
+
+int hv_vss_init(struct hv_util_service *);
+void hv_vss_deinit(void);
+void hv_vss_onchannelcallback(void *);
+void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
+
+extern struct resource hyperv_mmio;
+
+/*
+ * Negotiated version with the Host.
+ */
+
+extern __u32 vmbus_proto_version;
+
+#endif /* _HYPERV_H */
diff --git a/include/linux/i2c-algo-bit.h b/include/linux/i2c-algo-bit.h
new file mode 100644
index 000000000..63904ba68
--- /dev/null
+++ b/include/linux/i2c-algo-bit.h
@@ -0,0 +1,55 @@
+/* ------------------------------------------------------------------------- */
+/* i2c-algo-bit.h i2c driver algorithms for bit-shift adapters */
+/* ------------------------------------------------------------------------- */
+/* Copyright (C) 1995-99 Simon G. Vogl
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301 USA. */
+/* ------------------------------------------------------------------------- */
+
+/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
+ Frodo Looijaard <frodol@dds.nl> */
+
+#ifndef _LINUX_I2C_ALGO_BIT_H
+#define _LINUX_I2C_ALGO_BIT_H
+
+/* --- Defines for bit-adapters --------------------------------------- */
+/*
+ * This struct contains the hw-dependent functions of bit-style adapters to
+ * manipulate the line states, and to init any hw-specific features. This is
+ * only used if you have more than one hw-type of adapter running.
+ */
+struct i2c_algo_bit_data {
+ void *data; /* private data for lowlevel routines */
+ void (*setsda) (void *data, int state);
+ void (*setscl) (void *data, int state);
+ int (*getsda) (void *data);
+ int (*getscl) (void *data);
+ int (*pre_xfer) (struct i2c_adapter *);
+ void (*post_xfer) (struct i2c_adapter *);
+
+ /* local settings */
+ int udelay; /* half clock cycle time in us,
+ minimum 2 us for fast-mode I2C,
+ minimum 5 us for standard-mode I2C and SMBus,
+ maximum 50 us for SMBus */
+ int timeout; /* in jiffies */
+};
+
+int i2c_bit_add_bus(struct i2c_adapter *);
+int i2c_bit_add_numbered_bus(struct i2c_adapter *);
+extern const struct i2c_algorithm i2c_bit_algo;
+
+#endif /* _LINUX_I2C_ALGO_BIT_H */
diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h
new file mode 100644
index 000000000..a3c3ecd59
--- /dev/null
+++ b/include/linux/i2c-algo-pca.h
@@ -0,0 +1,71 @@
+#ifndef _LINUX_I2C_ALGO_PCA_H
+#define _LINUX_I2C_ALGO_PCA_H
+
+/* Chips known to the pca algo */
+#define I2C_PCA_CHIP_9564 0x00
+#define I2C_PCA_CHIP_9665 0x01
+
+/* Internal period for PCA9665 oscilator */
+#define I2C_PCA_OSC_PER 3 /* e10-8s */
+
+/* Clock speeds for the bus for PCA9564*/
+#define I2C_PCA_CON_330kHz 0x00
+#define I2C_PCA_CON_288kHz 0x01
+#define I2C_PCA_CON_217kHz 0x02
+#define I2C_PCA_CON_146kHz 0x03
+#define I2C_PCA_CON_88kHz 0x04
+#define I2C_PCA_CON_59kHz 0x05
+#define I2C_PCA_CON_44kHz 0x06
+#define I2C_PCA_CON_36kHz 0x07
+
+/* PCA9564 registers */
+#define I2C_PCA_STA 0x00 /* STATUS Read Only */
+#define I2C_PCA_TO 0x00 /* TIMEOUT Write Only */
+#define I2C_PCA_DAT 0x01 /* DATA Read/Write */
+#define I2C_PCA_ADR 0x02 /* OWN ADR Read/Write */
+#define I2C_PCA_CON 0x03 /* CONTROL Read/Write */
+
+/* PCA9665 registers */
+#define I2C_PCA_INDPTR 0x00 /* INDIRECT Pointer Write Only */
+#define I2C_PCA_IND 0x02 /* INDIRECT Read/Write */
+
+/* PCA9665 indirect registers */
+#define I2C_PCA_ICOUNT 0x00 /* Byte Count for buffered mode */
+#define I2C_PCA_IADR 0x01 /* OWN ADR */
+#define I2C_PCA_ISCLL 0x02 /* SCL LOW period */
+#define I2C_PCA_ISCLH 0x03 /* SCL HIGH period */
+#define I2C_PCA_ITO 0x04 /* TIMEOUT */
+#define I2C_PCA_IPRESET 0x05 /* Parallel bus reset */
+#define I2C_PCA_IMODE 0x06 /* I2C Bus mode */
+
+/* PCA9665 I2C bus mode */
+#define I2C_PCA_MODE_STD 0x00 /* Standard mode */
+#define I2C_PCA_MODE_FAST 0x01 /* Fast mode */
+#define I2C_PCA_MODE_FASTP 0x02 /* Fast Plus mode */
+#define I2C_PCA_MODE_TURBO 0x03 /* Turbo mode */
+
+
+#define I2C_PCA_CON_AA 0x80 /* Assert Acknowledge */
+#define I2C_PCA_CON_ENSIO 0x40 /* Enable */
+#define I2C_PCA_CON_STA 0x20 /* Start */
+#define I2C_PCA_CON_STO 0x10 /* Stop */
+#define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */
+#define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */
+
+struct i2c_algo_pca_data {
+ void *data; /* private low level data */
+ void (*write_byte) (void *data, int reg, int val);
+ int (*read_byte) (void *data, int reg);
+ int (*wait_for_completion) (void *data);
+ void (*reset_chip) (void *data);
+ /* For PCA9564, use one of the predefined frequencies:
+ * 330000, 288000, 217000, 146000, 88000, 59000, 44000, 36000
+ * For PCA9665, use the frequency you want here. */
+ unsigned int i2c_clock;
+ unsigned int chip;
+};
+
+int i2c_pca_add_bus(struct i2c_adapter *);
+int i2c_pca_add_numbered_bus(struct i2c_adapter *);
+
+#endif /* _LINUX_I2C_ALGO_PCA_H */
diff --git a/include/linux/i2c-algo-pcf.h b/include/linux/i2c-algo-pcf.h
new file mode 100644
index 000000000..538e8f41a
--- /dev/null
+++ b/include/linux/i2c-algo-pcf.h
@@ -0,0 +1,49 @@
+/* ------------------------------------------------------------------------- */
+/* adap-pcf.h i2c driver algorithms for PCF8584 adapters */
+/* ------------------------------------------------------------------------- */
+/* Copyright (C) 1995-97 Simon G. Vogl
+ 1998-99 Hans Berglund
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301 USA. */
+/* ------------------------------------------------------------------------- */
+
+/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
+ Frodo Looijaard <frodol@dds.nl> */
+
+#ifndef _LINUX_I2C_ALGO_PCF_H
+#define _LINUX_I2C_ALGO_PCF_H
+
+struct i2c_algo_pcf_data {
+ void *data; /* private data for lolevel routines */
+ void (*setpcf) (void *data, int ctl, int val);
+ int (*getpcf) (void *data, int ctl);
+ int (*getown) (void *data);
+ int (*getclock) (void *data);
+ void (*waitforpin) (void *data);
+
+ void (*xfer_begin) (void *data);
+ void (*xfer_end) (void *data);
+
+ /* Multi-master lost arbitration back-off delay (msecs)
+ * This should be set by the bus adapter or knowledgable client
+ * if bus is multi-mastered, else zero
+ */
+ unsigned long lab_mdelay;
+};
+
+int i2c_pcf_add_bus(struct i2c_adapter *);
+
+#endif /* _LINUX_I2C_ALGO_PCF_H */
diff --git a/include/linux/i2c-dev.h b/include/linux/i2c-dev.h
new file mode 100644
index 000000000..79727144c
--- /dev/null
+++ b/include/linux/i2c-dev.h
@@ -0,0 +1,28 @@
+/*
+ i2c-dev.h - i2c-bus driver, char device interface
+
+ Copyright (C) 1995-97 Simon G. Vogl
+ Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301 USA.
+*/
+#ifndef _LINUX_I2C_DEV_H
+#define _LINUX_I2C_DEV_H
+
+#include <uapi/linux/i2c-dev.h>
+
+#define I2C_MAJOR 89 /* Device major number */
+#endif /* _LINUX_I2C_DEV_H */
diff --git a/include/linux/i2c-gpio.h b/include/linux/i2c-gpio.h
new file mode 100644
index 000000000..c1bcb1f1d
--- /dev/null
+++ b/include/linux/i2c-gpio.h
@@ -0,0 +1,38 @@
+/*
+ * i2c-gpio interface to platform code
+ *
+ * Copyright (C) 2007 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _LINUX_I2C_GPIO_H
+#define _LINUX_I2C_GPIO_H
+
+/**
+ * struct i2c_gpio_platform_data - Platform-dependent data for i2c-gpio
+ * @sda_pin: GPIO pin ID to use for SDA
+ * @scl_pin: GPIO pin ID to use for SCL
+ * @udelay: signal toggle delay. SCL frequency is (500 / udelay) kHz
+ * @timeout: clock stretching timeout in jiffies. If the slave keeps
+ * SCL low for longer than this, the transfer will time out.
+ * @sda_is_open_drain: SDA is configured as open drain, i.e. the pin
+ * isn't actively driven high when setting the output value high.
+ * gpio_get_value() must return the actual pin state even if the
+ * pin is configured as an output.
+ * @scl_is_open_drain: SCL is set up as open drain. Same requirements
+ * as for sda_is_open_drain apply.
+ * @scl_is_output_only: SCL output drivers cannot be turned off.
+ */
+struct i2c_gpio_platform_data {
+ unsigned int sda_pin;
+ unsigned int scl_pin;
+ int udelay;
+ int timeout;
+ unsigned int sda_is_open_drain:1;
+ unsigned int scl_is_open_drain:1;
+ unsigned int scl_is_output_only:1;
+};
+
+#endif /* _LINUX_I2C_GPIO_H */
diff --git a/include/linux/i2c-mux-gpio.h b/include/linux/i2c-mux-gpio.h
new file mode 100644
index 000000000..440610820
--- /dev/null
+++ b/include/linux/i2c-mux-gpio.h
@@ -0,0 +1,43 @@
+/*
+ * i2c-mux-gpio interface to platform code
+ *
+ * Peter Korsgaard <peter.korsgaard@barco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_I2C_MUX_GPIO_H
+#define _LINUX_I2C_MUX_GPIO_H
+
+/* MUX has no specific idle mode */
+#define I2C_MUX_GPIO_NO_IDLE ((unsigned)-1)
+
+/**
+ * struct i2c_mux_gpio_platform_data - Platform-dependent data for i2c-mux-gpio
+ * @parent: Parent I2C bus adapter number
+ * @base_nr: Base I2C bus number to number adapters from or zero for dynamic
+ * @values: Array of bitmasks of GPIO settings (low/high) for each
+ * position
+ * @n_values: Number of multiplexer positions (busses to instantiate)
+ * @classes: Optional I2C auto-detection classes
+ * @gpio_chip: Optional GPIO chip name; if set, GPIO pin numbers are given
+ * relative to the base GPIO number of that chip
+ * @gpios: Array of GPIO numbers used to control MUX
+ * @n_gpios: Number of GPIOs used to control MUX
+ * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used
+ */
+struct i2c_mux_gpio_platform_data {
+ int parent;
+ int base_nr;
+ const unsigned *values;
+ int n_values;
+ const unsigned *classes;
+ char *gpio_chip;
+ const unsigned *gpios;
+ int n_gpios;
+ unsigned idle;
+};
+
+#endif /* _LINUX_I2C_MUX_GPIO_H */
diff --git a/include/linux/i2c-mux-pinctrl.h b/include/linux/i2c-mux-pinctrl.h
new file mode 100644
index 000000000..a65c86429
--- /dev/null
+++ b/include/linux/i2c-mux-pinctrl.h
@@ -0,0 +1,41 @@
+/*
+ * i2c-mux-pinctrl platform data
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _LINUX_I2C_MUX_PINCTRL_H
+#define _LINUX_I2C_MUX_PINCTRL_H
+
+/**
+ * struct i2c_mux_pinctrl_platform_data - Platform data for i2c-mux-pinctrl
+ * @parent_bus_num: Parent I2C bus number
+ * @base_bus_num: Base I2C bus number for the child busses. 0 for dynamic.
+ * @bus_count: Number of child busses. Also the number of elements in
+ * @pinctrl_states
+ * @pinctrl_states: The names of the pinctrl state to select for each child bus
+ * @pinctrl_state_idle: The pinctrl state to select when no child bus is being
+ * accessed. If NULL, the most recently used pinctrl state will be left
+ * selected.
+ */
+struct i2c_mux_pinctrl_platform_data {
+ int parent_bus_num;
+ int base_bus_num;
+ int bus_count;
+ const char **pinctrl_states;
+ const char *pinctrl_state_idle;
+};
+
+#endif
diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h
new file mode 100644
index 000000000..b5f9a007a
--- /dev/null
+++ b/include/linux/i2c-mux.h
@@ -0,0 +1,49 @@
+/*
+ *
+ * i2c-mux.h - functions for the i2c-bus mux support
+ *
+ * Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it>
+ * Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it>
+ * Michael Lawnick <michael.lawnick.ext@nsn.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301 USA.
+ */
+
+#ifndef _LINUX_I2C_MUX_H
+#define _LINUX_I2C_MUX_H
+
+#ifdef __KERNEL__
+
+/*
+ * Called to create a i2c bus on a multiplexed bus segment.
+ * The mux_dev and chan_id parameters are passed to the select
+ * and deselect callback functions to perform hardware-specific
+ * mux control.
+ */
+struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
+ struct device *mux_dev,
+ void *mux_priv, u32 force_nr, u32 chan_id,
+ unsigned int class,
+ int (*select) (struct i2c_adapter *,
+ void *mux_dev, u32 chan_id),
+ int (*deselect) (struct i2c_adapter *,
+ void *mux_dev, u32 chan_id));
+
+void i2c_del_mux_adapter(struct i2c_adapter *adap);
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_I2C_MUX_H */
diff --git a/include/linux/i2c-ocores.h b/include/linux/i2c-ocores.h
new file mode 100644
index 000000000..1c06b5c7c
--- /dev/null
+++ b/include/linux/i2c-ocores.h
@@ -0,0 +1,22 @@
+/*
+ * i2c-ocores.h - definitions for the i2c-ocores interface
+ *
+ * Peter Korsgaard <jacmet@sunsite.dk>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef _LINUX_I2C_OCORES_H
+#define _LINUX_I2C_OCORES_H
+
+struct ocores_i2c_platform_data {
+ u32 reg_shift; /* register offset shift value */
+ u32 reg_io_width; /* register io read/write width */
+ u32 clock_khz; /* input clock in kHz */
+ u8 num_devices; /* number of devices in the devices list */
+ struct i2c_board_info const *devices; /* devices connected to the bus */
+};
+
+#endif /* _LINUX_I2C_OCORES_H */
diff --git a/include/linux/i2c-omap.h b/include/linux/i2c-omap.h
new file mode 100644
index 000000000..babe0cf6d
--- /dev/null
+++ b/include/linux/i2c-omap.h
@@ -0,0 +1,38 @@
+#ifndef __I2C_OMAP_H__
+#define __I2C_OMAP_H__
+
+#include <linux/platform_device.h>
+
+/*
+ * Version 2 of the I2C peripheral unit has a different register
+ * layout and extra registers. The ID register in the V2 peripheral
+ * unit on the OMAP4430 reports the same ID as the V1 peripheral
+ * unit on the OMAP3530, so we must inform the driver which IP
+ * version we know it is running on from platform / cpu-specific
+ * code using these constants in the hwmod class definition.
+ */
+
+#define OMAP_I2C_IP_VERSION_1 1
+#define OMAP_I2C_IP_VERSION_2 2
+
+/* struct omap_i2c_bus_platform_data .flags meanings */
+
+#define OMAP_I2C_FLAG_NO_FIFO BIT(0)
+#define OMAP_I2C_FLAG_SIMPLE_CLOCK BIT(1)
+#define OMAP_I2C_FLAG_16BIT_DATA_REG BIT(2)
+#define OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK BIT(5)
+#define OMAP_I2C_FLAG_FORCE_19200_INT_CLK BIT(6)
+/* how the CPU address bus must be translated for I2C unit access */
+#define OMAP_I2C_FLAG_BUS_SHIFT_NONE 0
+#define OMAP_I2C_FLAG_BUS_SHIFT_1 BIT(7)
+#define OMAP_I2C_FLAG_BUS_SHIFT_2 BIT(8)
+#define OMAP_I2C_FLAG_BUS_SHIFT__SHIFT 7
+
+struct omap_i2c_bus_platform_data {
+ u32 clkrate;
+ u32 rev;
+ u32 flags;
+ void (*set_mpu_wkup_lat)(struct device *dev, long set);
+};
+
+#endif
diff --git a/include/linux/i2c-pca-platform.h b/include/linux/i2c-pca-platform.h
new file mode 100644
index 000000000..aba33759d
--- /dev/null
+++ b/include/linux/i2c-pca-platform.h
@@ -0,0 +1,12 @@
+#ifndef I2C_PCA9564_PLATFORM_H
+#define I2C_PCA9564_PLATFORM_H
+
+struct i2c_pca9564_pf_platform_data {
+ int gpio; /* pin to reset chip. driver will work when
+ * not supplied (negative value), but it
+ * cannot exit some error conditions then */
+ int i2c_clock_speed; /* values are defined in linux/i2c-algo-pca.h */
+ int timeout; /* timeout in jiffies */
+};
+
+#endif /* I2C_PCA9564_PLATFORM_H */
diff --git a/include/linux/i2c-pnx.h b/include/linux/i2c-pnx.h
new file mode 100644
index 000000000..5388326fb
--- /dev/null
+++ b/include/linux/i2c-pnx.h
@@ -0,0 +1,38 @@
+/*
+ * Header file for I2C support on PNX010x/4008.
+ *
+ * Author: Dennis Kovalev <dkovalev@ru.mvista.com>
+ *
+ * 2004-2006 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#ifndef __I2C_PNX_H__
+#define __I2C_PNX_H__
+
+struct platform_device;
+struct clk;
+
+struct i2c_pnx_mif {
+ int ret; /* Return value */
+ int mode; /* Interface mode */
+ struct completion complete; /* I/O completion */
+ struct timer_list timer; /* Timeout */
+ u8 * buf; /* Data buffer */
+ int len; /* Length of data buffer */
+ int order; /* RX Bytes to order via TX */
+};
+
+struct i2c_pnx_algo_data {
+ void __iomem *ioaddr;
+ struct i2c_pnx_mif mif;
+ int last;
+ struct clk *clk;
+ struct i2c_adapter adapter;
+ int irq;
+ u32 timeout;
+};
+
+#endif /* __I2C_PNX_H__ */
diff --git a/include/linux/i2c-pxa.h b/include/linux/i2c-pxa.h
new file mode 100644
index 000000000..41dcdfe7f
--- /dev/null
+++ b/include/linux/i2c-pxa.h
@@ -0,0 +1,17 @@
+#ifndef _LINUX_I2C_ALGO_PXA_H
+#define _LINUX_I2C_ALGO_PXA_H
+
+typedef enum i2c_slave_event_e {
+ I2C_SLAVE_EVENT_START_READ,
+ I2C_SLAVE_EVENT_START_WRITE,
+ I2C_SLAVE_EVENT_STOP
+} i2c_slave_event_t;
+
+struct i2c_slave_client {
+ void *data;
+ void (*event)(void *ptr, i2c_slave_event_t event);
+ int (*read) (void *ptr);
+ void (*write)(void *ptr, unsigned int val);
+};
+
+#endif /* _LINUX_I2C_ALGO_PXA_H */
diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h
new file mode 100644
index 000000000..8f1b086ca
--- /dev/null
+++ b/include/linux/i2c-smbus.h
@@ -0,0 +1,51 @@
+/*
+ * i2c-smbus.h - SMBus extensions to the I2C protocol
+ *
+ * Copyright (C) 2010 Jean Delvare <jdelvare@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301 USA.
+ */
+
+#ifndef _LINUX_I2C_SMBUS_H
+#define _LINUX_I2C_SMBUS_H
+
+#include <linux/i2c.h>
+
+
+/**
+ * i2c_smbus_alert_setup - platform data for the smbus_alert i2c client
+ * @alert_edge_triggered: whether the alert interrupt is edge (1) or level (0)
+ * triggered
+ * @irq: IRQ number, if the smbus_alert driver should take care of interrupt
+ * handling
+ *
+ * If irq is not specified, the smbus_alert driver doesn't take care of
+ * interrupt handling. In that case it is up to the I2C bus driver to either
+ * handle the interrupts or to poll for alerts.
+ *
+ * If irq is specified then it it crucial that alert_edge_triggered is
+ * properly set.
+ */
+struct i2c_smbus_alert_setup {
+ unsigned int alert_edge_triggered:1;
+ int irq;
+};
+
+struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
+ struct i2c_smbus_alert_setup *setup);
+int i2c_handle_smbus_alert(struct i2c_client *ara);
+
+#endif /* _LINUX_I2C_SMBUS_H */
diff --git a/include/linux/i2c-xiic.h b/include/linux/i2c-xiic.h
new file mode 100644
index 000000000..4f9f2256a
--- /dev/null
+++ b/include/linux/i2c-xiic.h
@@ -0,0 +1,43 @@
+/*
+ * i2c-xiic.h
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Xilinx IIC
+ */
+
+#ifndef _LINUX_I2C_XIIC_H
+#define _LINUX_I2C_XIIC_H
+
+/**
+ * struct xiic_i2c_platform_data - Platform data of the Xilinx I2C driver
+ * @num_devices: Number of devices that shall be added when the driver
+ * is probed.
+ * @devices: The actuall devices to add.
+ *
+ * This purpose of this platform data struct is to be able to provide a number
+ * of devices that should be added to the I2C bus. The reason is that sometimes
+ * the I2C board info is not enough, a new PCI board can for instance be
+ * plugged into a standard PC, and the bus number might be unknown at
+ * early init time.
+ */
+struct xiic_i2c_platform_data {
+ u8 num_devices;
+ struct i2c_board_info const *devices;
+};
+
+#endif /* _LINUX_I2C_XIIC_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
new file mode 100644
index 000000000..e83a738a3
--- /dev/null
+++ b/include/linux/i2c.h
@@ -0,0 +1,654 @@
+/* ------------------------------------------------------------------------- */
+/* */
+/* i2c.h - definitions for the i2c-bus interface */
+/* */
+/* ------------------------------------------------------------------------- */
+/* Copyright (C) 1995-2000 Simon G. Vogl
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ MA 02110-1301 USA. */
+/* ------------------------------------------------------------------------- */
+
+/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
+ Frodo Looijaard <frodol@dds.nl> */
+#ifndef _LINUX_I2C_H
+#define _LINUX_I2C_H
+
+#include <linux/mod_devicetable.h>
+#include <linux/device.h> /* for struct device */
+#include <linux/sched.h> /* for completion */
+#include <linux/mutex.h>
+#include <linux/of.h> /* for struct device_node */
+#include <linux/swab.h> /* for swab16 */
+#include <uapi/linux/i2c.h>
+
+extern struct bus_type i2c_bus_type;
+extern struct device_type i2c_adapter_type;
+
+/* --- General options ------------------------------------------------ */
+
+struct i2c_msg;
+struct i2c_algorithm;
+struct i2c_adapter;
+struct i2c_client;
+struct i2c_driver;
+union i2c_smbus_data;
+struct i2c_board_info;
+enum i2c_slave_event;
+typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *);
+
+struct module;
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+/*
+ * The master routines are the ones normally used to transmit data to devices
+ * on a bus (or read from them). Apart from two basic transfer functions to
+ * transmit one message at a time, a more complex version can be used to
+ * transmit an arbitrary number of messages without interruption.
+ * @count must be be less than 64k since msg.len is u16.
+ */
+extern int i2c_master_send(const struct i2c_client *client, const char *buf,
+ int count);
+extern int i2c_master_recv(const struct i2c_client *client, char *buf,
+ int count);
+
+/* Transfer num messages.
+ */
+extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num);
+/* Unlocked flavor */
+extern int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num);
+
+/* This is the very generalized SMBus access routine. You probably do not
+ want to use this, though; one of the functions below may be much easier,
+ and probably just as fast.
+ Note that we use i2c_adapter here, because you do not need a specific
+ smbus adapter to call this function. */
+extern s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int size, union i2c_smbus_data *data);
+
+/* Now follow the 'nice' access routines. These also document the calling
+ conventions of i2c_smbus_xfer. */
+
+extern s32 i2c_smbus_read_byte(const struct i2c_client *client);
+extern s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value);
+extern s32 i2c_smbus_read_byte_data(const struct i2c_client *client,
+ u8 command);
+extern s32 i2c_smbus_write_byte_data(const struct i2c_client *client,
+ u8 command, u8 value);
+extern s32 i2c_smbus_read_word_data(const struct i2c_client *client,
+ u8 command);
+extern s32 i2c_smbus_write_word_data(const struct i2c_client *client,
+ u8 command, u16 value);
+
+static inline s32
+i2c_smbus_read_word_swapped(const struct i2c_client *client, u8 command)
+{
+ s32 value = i2c_smbus_read_word_data(client, command);
+
+ return (value < 0) ? value : swab16(value);
+}
+
+static inline s32
+i2c_smbus_write_word_swapped(const struct i2c_client *client,
+ u8 command, u16 value)
+{
+ return i2c_smbus_write_word_data(client, command, swab16(value));
+}
+
+/* Returns the number of read bytes */
+extern s32 i2c_smbus_read_block_data(const struct i2c_client *client,
+ u8 command, u8 *values);
+extern s32 i2c_smbus_write_block_data(const struct i2c_client *client,
+ u8 command, u8 length, const u8 *values);
+/* Returns the number of read bytes */
+extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
+ u8 command, u8 length, u8 *values);
+extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
+ u8 command, u8 length,
+ const u8 *values);
+#endif /* I2C */
+
+/**
+ * struct i2c_driver - represent an I2C device driver
+ * @class: What kind of i2c device we instantiate (for detect)
+ * @attach_adapter: Callback for bus addition (deprecated)
+ * @probe: Callback for device binding
+ * @remove: Callback for device unbinding
+ * @shutdown: Callback for device shutdown
+ * @alert: Alert callback, for example for the SMBus alert protocol
+ * @command: Callback for bus-wide signaling (optional)
+ * @driver: Device driver model driver
+ * @id_table: List of I2C devices supported by this driver
+ * @detect: Callback for device detection
+ * @address_list: The I2C addresses to probe (for detect)
+ * @clients: List of detected clients we created (for i2c-core use only)
+ *
+ * The driver.owner field should be set to the module owner of this driver.
+ * The driver.name field should be set to the name of this driver.
+ *
+ * For automatic device detection, both @detect and @address_list must
+ * be defined. @class should also be set, otherwise only devices forced
+ * with module parameters will be created. The detect function must
+ * fill at least the name field of the i2c_board_info structure it is
+ * handed upon successful detection, and possibly also the flags field.
+ *
+ * If @detect is missing, the driver will still work fine for enumerated
+ * devices. Detected devices simply won't be supported. This is expected
+ * for the many I2C/SMBus devices which can't be detected reliably, and
+ * the ones which can always be enumerated in practice.
+ *
+ * The i2c_client structure which is handed to the @detect callback is
+ * not a real i2c_client. It is initialized just enough so that you can
+ * call i2c_smbus_read_byte_data and friends on it. Don't do anything
+ * else with it. In particular, calling dev_dbg and friends on it is
+ * not allowed.
+ */
+struct i2c_driver {
+ unsigned int class;
+
+ /* Notifies the driver that a new bus has appeared. You should avoid
+ * using this, it will be removed in a near future.
+ */
+ int (*attach_adapter)(struct i2c_adapter *) __deprecated;
+
+ /* Standard driver model interfaces */
+ int (*probe)(struct i2c_client *, const struct i2c_device_id *);
+ int (*remove)(struct i2c_client *);
+
+ /* driver model interfaces that don't relate to enumeration */
+ void (*shutdown)(struct i2c_client *);
+
+ /* Alert callback, for example for the SMBus alert protocol.
+ * The format and meaning of the data value depends on the protocol.
+ * For the SMBus alert protocol, there is a single bit of data passed
+ * as the alert response's low bit ("event flag").
+ */
+ void (*alert)(struct i2c_client *, unsigned int data);
+
+ /* a ioctl like command that can be used to perform specific functions
+ * with the device.
+ */
+ int (*command)(struct i2c_client *client, unsigned int cmd, void *arg);
+
+ struct device_driver driver;
+ const struct i2c_device_id *id_table;
+
+ /* Device detection callback for automatic device creation */
+ int (*detect)(struct i2c_client *, struct i2c_board_info *);
+ const unsigned short *address_list;
+ struct list_head clients;
+};
+#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
+
+/**
+ * struct i2c_client - represent an I2C slave device
+ * @flags: I2C_CLIENT_TEN indicates the device uses a ten bit chip address;
+ * I2C_CLIENT_PEC indicates it uses SMBus Packet Error Checking
+ * @addr: Address used on the I2C bus connected to the parent adapter.
+ * @name: Indicates the type of the device, usually a chip name that's
+ * generic enough to hide second-sourcing and compatible revisions.
+ * @adapter: manages the bus segment hosting this I2C device
+ * @dev: Driver model device node for the slave.
+ * @irq: indicates the IRQ generated by this device (if any)
+ * @detected: member of an i2c_driver.clients list or i2c-core's
+ * userspace_devices list
+ * @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter
+ * calls it to pass on slave events to the slave driver.
+ *
+ * An i2c_client identifies a single device (i.e. chip) connected to an
+ * i2c bus. The behaviour exposed to Linux is defined by the driver
+ * managing the device.
+ */
+struct i2c_client {
+ unsigned short flags; /* div., see below */
+ unsigned short addr; /* chip address - NOTE: 7bit */
+ /* addresses are stored in the */
+ /* _LOWER_ 7 bits */
+ char name[I2C_NAME_SIZE];
+ struct i2c_adapter *adapter; /* the adapter we sit on */
+ struct device dev; /* the device structure */
+ int irq; /* irq issued by device */
+ struct list_head detected;
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ i2c_slave_cb_t slave_cb; /* callback for slave mode */
+#endif
+};
+#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
+
+extern struct i2c_client *i2c_verify_client(struct device *dev);
+extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
+
+static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
+{
+ struct device * const dev = container_of(kobj, struct device, kobj);
+ return to_i2c_client(dev);
+}
+
+static inline void *i2c_get_clientdata(const struct i2c_client *dev)
+{
+ return dev_get_drvdata(&dev->dev);
+}
+
+static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
+{
+ dev_set_drvdata(&dev->dev, data);
+}
+
+/* I2C slave support */
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+enum i2c_slave_event {
+ I2C_SLAVE_READ_REQUESTED,
+ I2C_SLAVE_WRITE_REQUESTED,
+ I2C_SLAVE_READ_PROCESSED,
+ I2C_SLAVE_WRITE_RECEIVED,
+ I2C_SLAVE_STOP,
+};
+
+extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb);
+extern int i2c_slave_unregister(struct i2c_client *client);
+
+static inline int i2c_slave_event(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ return client->slave_cb(client, event, val);
+}
+#endif
+
+/**
+ * struct i2c_board_info - template for device creation
+ * @type: chip type, to initialize i2c_client.name
+ * @flags: to initialize i2c_client.flags
+ * @addr: stored in i2c_client.addr
+ * @platform_data: stored in i2c_client.dev.platform_data
+ * @archdata: copied into i2c_client.dev.archdata
+ * @of_node: pointer to OpenFirmware device node
+ * @fwnode: device node supplied by the platform firmware
+ * @irq: stored in i2c_client.irq
+ *
+ * I2C doesn't actually support hardware probing, although controllers and
+ * devices may be able to use I2C_SMBUS_QUICK to tell whether or not there's
+ * a device at a given address. Drivers commonly need more information than
+ * that, such as chip type, configuration, associated IRQ, and so on.
+ *
+ * i2c_board_info is used to build tables of information listing I2C devices
+ * that are present. This information is used to grow the driver model tree.
+ * For mainboards this is done statically using i2c_register_board_info();
+ * bus numbers identify adapters that aren't yet available. For add-on boards,
+ * i2c_new_device() does this dynamically with the adapter already known.
+ */
+struct i2c_board_info {
+ char type[I2C_NAME_SIZE];
+ unsigned short flags;
+ unsigned short addr;
+ void *platform_data;
+ struct dev_archdata *archdata;
+ struct device_node *of_node;
+ struct fwnode_handle *fwnode;
+ int irq;
+};
+
+/**
+ * I2C_BOARD_INFO - macro used to list an i2c device and its address
+ * @dev_type: identifies the device type
+ * @dev_addr: the device's address on the bus.
+ *
+ * This macro initializes essential fields of a struct i2c_board_info,
+ * declaring what has been provided on a particular board. Optional
+ * fields (such as associated irq, or device-specific platform_data)
+ * are provided using conventional syntax.
+ */
+#define I2C_BOARD_INFO(dev_type, dev_addr) \
+ .type = dev_type, .addr = (dev_addr)
+
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+/* Add-on boards should register/unregister their devices; e.g. a board
+ * with integrated I2C, a config eeprom, sensors, and a codec that's
+ * used in conjunction with the primary hardware.
+ */
+extern struct i2c_client *
+i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
+
+/* If you don't know the exact address of an I2C device, use this variant
+ * instead, which can probe for device presence in a list of possible
+ * addresses. The "probe" callback function is optional. If it is provided,
+ * it must return 1 on successful probe, 0 otherwise. If it is not provided,
+ * a default probing method is used.
+ */
+extern struct i2c_client *
+i2c_new_probed_device(struct i2c_adapter *adap,
+ struct i2c_board_info *info,
+ unsigned short const *addr_list,
+ int (*probe)(struct i2c_adapter *, unsigned short addr));
+
+/* Common custom probe functions */
+extern int i2c_probe_func_quick_read(struct i2c_adapter *, unsigned short addr);
+
+/* For devices that use several addresses, use i2c_new_dummy() to make
+ * client handles for the extra addresses.
+ */
+extern struct i2c_client *
+i2c_new_dummy(struct i2c_adapter *adap, u16 address);
+
+extern void i2c_unregister_device(struct i2c_client *);
+#endif /* I2C */
+
+/* Mainboard arch_initcall() code should register all its I2C devices.
+ * This is done at arch_initcall time, before declaring any i2c adapters.
+ * Modules for add-on boards must use other calls.
+ */
+#ifdef CONFIG_I2C_BOARDINFO
+extern int
+i2c_register_board_info(int busnum, struct i2c_board_info const *info,
+ unsigned n);
+#else
+static inline int
+i2c_register_board_info(int busnum, struct i2c_board_info const *info,
+ unsigned n)
+{
+ return 0;
+}
+#endif /* I2C_BOARDINFO */
+
+/**
+ * struct i2c_algorithm - represent I2C transfer method
+ * @master_xfer: Issue a set of i2c transactions to the given I2C adapter
+ * defined by the msgs array, with num messages available to transfer via
+ * the adapter specified by adap.
+ * @smbus_xfer: Issue smbus transactions to the given I2C adapter. If this
+ * is not present, then the bus layer will try and convert the SMBus calls
+ * into I2C transfers instead.
+ * @functionality: Return the flags that this algorithm/adapter pair supports
+ * from the I2C_FUNC_* flags.
+ * @reg_slave: Register given client to I2C slave mode of this adapter
+ * @unreg_slave: Unregister given client from I2C slave mode of this adapter
+ *
+ * The following structs are for those who like to implement new bus drivers:
+ * i2c_algorithm is the interface to a class of hardware solutions which can
+ * be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584
+ * to name two of the most common.
+ *
+ * The return codes from the @master_xfer field should indicate the type of
+ * error code that occurred during the transfer, as documented in the kernel
+ * Documentation file Documentation/i2c/fault-codes.
+ */
+struct i2c_algorithm {
+ /* If an adapter algorithm can't do I2C-level access, set master_xfer
+ to NULL. If an adapter algorithm can do SMBus access, set
+ smbus_xfer. If set to NULL, the SMBus protocol is simulated
+ using common I2C messages */
+ /* master_xfer should return the number of messages successfully
+ processed, or a negative value on error */
+ int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num);
+ int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size, union i2c_smbus_data *data);
+
+ /* To determine what the adapter supports */
+ u32 (*functionality) (struct i2c_adapter *);
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ int (*reg_slave)(struct i2c_client *client);
+ int (*unreg_slave)(struct i2c_client *client);
+#endif
+};
+
+/**
+ * struct i2c_bus_recovery_info - I2C bus recovery information
+ * @recover_bus: Recover routine. Either pass driver's recover_bus() routine, or
+ * i2c_generic_scl_recovery() or i2c_generic_gpio_recovery().
+ * @get_scl: This gets current value of SCL line. Mandatory for generic SCL
+ * recovery. Used internally for generic GPIO recovery.
+ * @set_scl: This sets/clears SCL line. Mandatory for generic SCL recovery. Used
+ * internally for generic GPIO recovery.
+ * @get_sda: This gets current value of SDA line. Optional for generic SCL
+ * recovery. Used internally, if sda_gpio is a valid GPIO, for generic GPIO
+ * recovery.
+ * @prepare_recovery: This will be called before starting recovery. Platform may
+ * configure padmux here for SDA/SCL line or something else they want.
+ * @unprepare_recovery: This will be called after completing recovery. Platform
+ * may configure padmux here for SDA/SCL line or something else they want.
+ * @scl_gpio: gpio number of the SCL line. Only required for GPIO recovery.
+ * @sda_gpio: gpio number of the SDA line. Only required for GPIO recovery.
+ */
+struct i2c_bus_recovery_info {
+ int (*recover_bus)(struct i2c_adapter *);
+
+ int (*get_scl)(struct i2c_adapter *);
+ void (*set_scl)(struct i2c_adapter *, int val);
+ int (*get_sda)(struct i2c_adapter *);
+
+ void (*prepare_recovery)(struct i2c_adapter *);
+ void (*unprepare_recovery)(struct i2c_adapter *);
+
+ /* gpio recovery */
+ int scl_gpio;
+ int sda_gpio;
+};
+
+int i2c_recover_bus(struct i2c_adapter *adap);
+
+/* Generic recovery routines */
+int i2c_generic_gpio_recovery(struct i2c_adapter *adap);
+int i2c_generic_scl_recovery(struct i2c_adapter *adap);
+
+/**
+ * struct i2c_adapter_quirks - describe flaws of an i2c adapter
+ * @flags: see I2C_AQ_* for possible flags and read below
+ * @max_num_msgs: maximum number of messages per transfer
+ * @max_write_len: maximum length of a write message
+ * @max_read_len: maximum length of a read message
+ * @max_comb_1st_msg_len: maximum length of the first msg in a combined message
+ * @max_comb_2nd_msg_len: maximum length of the second msg in a combined message
+ *
+ * Note about combined messages: Some I2C controllers can only send one message
+ * per transfer, plus something called combined message or write-then-read.
+ * This is (usually) a small write message followed by a read message and
+ * barely enough to access register based devices like EEPROMs. There is a flag
+ * to support this mode. It implies max_num_msg = 2 and does the length checks
+ * with max_comb_*_len because combined message mode usually has its own
+ * limitations. Because of HW implementations, some controllers can actually do
+ * write-then-anything or other variants. To support that, write-then-read has
+ * been broken out into smaller bits like write-first and read-second which can
+ * be combined as needed.
+ */
+
+struct i2c_adapter_quirks {
+ u64 flags;
+ int max_num_msgs;
+ u16 max_write_len;
+ u16 max_read_len;
+ u16 max_comb_1st_msg_len;
+ u16 max_comb_2nd_msg_len;
+};
+
+/* enforce max_num_msgs = 2 and use max_comb_*_len for length checks */
+#define I2C_AQ_COMB BIT(0)
+/* first combined message must be write */
+#define I2C_AQ_COMB_WRITE_FIRST BIT(1)
+/* second combined message must be read */
+#define I2C_AQ_COMB_READ_SECOND BIT(2)
+/* both combined messages must have the same target address */
+#define I2C_AQ_COMB_SAME_ADDR BIT(3)
+/* convenience macro for typical write-then read case */
+#define I2C_AQ_COMB_WRITE_THEN_READ (I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | \
+ I2C_AQ_COMB_READ_SECOND | I2C_AQ_COMB_SAME_ADDR)
+
+/*
+ * i2c_adapter is the structure used to identify a physical i2c bus along
+ * with the access algorithms necessary to access it.
+ */
+struct i2c_adapter {
+ struct module *owner;
+ unsigned int class; /* classes to allow probing for */
+ const struct i2c_algorithm *algo; /* the algorithm to access the bus */
+ void *algo_data;
+
+ /* data fields that are valid for all devices */
+ struct rt_mutex bus_lock;
+
+ int timeout; /* in jiffies */
+ int retries;
+ struct device dev; /* the adapter device */
+
+ int nr;
+ char name[48];
+ struct completion dev_released;
+
+ struct mutex userspace_clients_lock;
+ struct list_head userspace_clients;
+
+ struct i2c_bus_recovery_info *bus_recovery_info;
+ const struct i2c_adapter_quirks *quirks;
+};
+#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
+
+static inline void *i2c_get_adapdata(const struct i2c_adapter *dev)
+{
+ return dev_get_drvdata(&dev->dev);
+}
+
+static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
+{
+ dev_set_drvdata(&dev->dev, data);
+}
+
+static inline struct i2c_adapter *
+i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
+{
+#if IS_ENABLED(CONFIG_I2C_MUX)
+ struct device *parent = adapter->dev.parent;
+
+ if (parent != NULL && parent->type == &i2c_adapter_type)
+ return to_i2c_adapter(parent);
+ else
+#endif
+ return NULL;
+}
+
+int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *));
+
+/* Adapter locking functions, exported for shared pin cases */
+void i2c_lock_adapter(struct i2c_adapter *);
+void i2c_unlock_adapter(struct i2c_adapter *);
+
+/*flags for the client struct: */
+#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
+#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
+ /* Must equal I2C_M_TEN below */
+#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
+#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
+ /* Must match I2C_M_STOP|IGNORE_NAK */
+
+/* i2c adapter classes (bitmask) */
+#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
+#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
+#define I2C_CLASS_SPD (1<<7) /* Memory modules */
+#define I2C_CLASS_DEPRECATED (1<<8) /* Warn users that adapter will stop using classes */
+
+/* Internal numbers to terminate lists */
+#define I2C_CLIENT_END 0xfffeU
+
+/* Construct an I2C_CLIENT_END-terminated array of i2c addresses */
+#define I2C_ADDRS(addr, addrs...) \
+ ((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END })
+
+
+/* ----- functions exported by i2c.o */
+
+/* administration...
+ */
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+extern int i2c_add_adapter(struct i2c_adapter *);
+extern void i2c_del_adapter(struct i2c_adapter *);
+extern int i2c_add_numbered_adapter(struct i2c_adapter *);
+
+extern int i2c_register_driver(struct module *, struct i2c_driver *);
+extern void i2c_del_driver(struct i2c_driver *);
+
+/* use a define to avoid include chaining to get THIS_MODULE */
+#define i2c_add_driver(driver) \
+ i2c_register_driver(THIS_MODULE, driver)
+
+extern struct i2c_client *i2c_use_client(struct i2c_client *client);
+extern void i2c_release_client(struct i2c_client *client);
+
+/* call the i2c_client->command() of all attached clients with
+ * the given arguments */
+extern void i2c_clients_command(struct i2c_adapter *adap,
+ unsigned int cmd, void *arg);
+
+extern struct i2c_adapter *i2c_get_adapter(int nr);
+extern void i2c_put_adapter(struct i2c_adapter *adap);
+
+
+/* Return the functionality mask */
+static inline u32 i2c_get_functionality(struct i2c_adapter *adap)
+{
+ return adap->algo->functionality(adap);
+}
+
+/* Return 1 if adapter supports everything we need, 0 if not. */
+static inline int i2c_check_functionality(struct i2c_adapter *adap, u32 func)
+{
+ return (func & i2c_get_functionality(adap)) == func;
+}
+
+/* Return the adapter number for a specific adapter */
+static inline int i2c_adapter_id(struct i2c_adapter *adap)
+{
+ return adap->nr;
+}
+
+/**
+ * module_i2c_driver() - Helper macro for registering a I2C driver
+ * @__i2c_driver: i2c_driver struct
+ *
+ * Helper macro for I2C drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_i2c_driver(__i2c_driver) \
+ module_driver(__i2c_driver, i2c_add_driver, \
+ i2c_del_driver)
+
+#endif /* I2C */
+
+#if IS_ENABLED(CONFIG_OF)
+/* must call put_device() when done with returned i2c_client device */
+extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
+
+/* must call put_device() when done with returned i2c_adapter device */
+extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
+
+#else
+
+static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
+{
+ return NULL;
+}
+
+static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h
new file mode 100644
index 000000000..c2153049c
--- /dev/null
+++ b/include/linux/i2c/adp5588.h
@@ -0,0 +1,172 @@
+/*
+ * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller
+ *
+ * Copyright 2009-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _ADP5588_H
+#define _ADP5588_H
+
+#define DEV_ID 0x00 /* Device ID */
+#define CFG 0x01 /* Configuration Register1 */
+#define INT_STAT 0x02 /* Interrupt Status Register */
+#define KEY_LCK_EC_STAT 0x03 /* Key Lock and Event Counter Register */
+#define Key_EVENTA 0x04 /* Key Event Register A */
+#define Key_EVENTB 0x05 /* Key Event Register B */
+#define Key_EVENTC 0x06 /* Key Event Register C */
+#define Key_EVENTD 0x07 /* Key Event Register D */
+#define Key_EVENTE 0x08 /* Key Event Register E */
+#define Key_EVENTF 0x09 /* Key Event Register F */
+#define Key_EVENTG 0x0A /* Key Event Register G */
+#define Key_EVENTH 0x0B /* Key Event Register H */
+#define Key_EVENTI 0x0C /* Key Event Register I */
+#define Key_EVENTJ 0x0D /* Key Event Register J */
+#define KP_LCK_TMR 0x0E /* Keypad Lock1 to Lock2 Timer */
+#define UNLOCK1 0x0F /* Unlock Key1 */
+#define UNLOCK2 0x10 /* Unlock Key2 */
+#define GPIO_INT_STAT1 0x11 /* GPIO Interrupt Status */
+#define GPIO_INT_STAT2 0x12 /* GPIO Interrupt Status */
+#define GPIO_INT_STAT3 0x13 /* GPIO Interrupt Status */
+#define GPIO_DAT_STAT1 0x14 /* GPIO Data Status, Read twice to clear */
+#define GPIO_DAT_STAT2 0x15 /* GPIO Data Status, Read twice to clear */
+#define GPIO_DAT_STAT3 0x16 /* GPIO Data Status, Read twice to clear */
+#define GPIO_DAT_OUT1 0x17 /* GPIO DATA OUT */
+#define GPIO_DAT_OUT2 0x18 /* GPIO DATA OUT */
+#define GPIO_DAT_OUT3 0x19 /* GPIO DATA OUT */
+#define GPIO_INT_EN1 0x1A /* GPIO Interrupt Enable */
+#define GPIO_INT_EN2 0x1B /* GPIO Interrupt Enable */
+#define GPIO_INT_EN3 0x1C /* GPIO Interrupt Enable */
+#define KP_GPIO1 0x1D /* Keypad or GPIO Selection */
+#define KP_GPIO2 0x1E /* Keypad or GPIO Selection */
+#define KP_GPIO3 0x1F /* Keypad or GPIO Selection */
+#define GPI_EM1 0x20 /* GPI Event Mode 1 */
+#define GPI_EM2 0x21 /* GPI Event Mode 2 */
+#define GPI_EM3 0x22 /* GPI Event Mode 3 */
+#define GPIO_DIR1 0x23 /* GPIO Data Direction */
+#define GPIO_DIR2 0x24 /* GPIO Data Direction */
+#define GPIO_DIR3 0x25 /* GPIO Data Direction */
+#define GPIO_INT_LVL1 0x26 /* GPIO Edge/Level Detect */
+#define GPIO_INT_LVL2 0x27 /* GPIO Edge/Level Detect */
+#define GPIO_INT_LVL3 0x28 /* GPIO Edge/Level Detect */
+#define Debounce_DIS1 0x29 /* Debounce Disable */
+#define Debounce_DIS2 0x2A /* Debounce Disable */
+#define Debounce_DIS3 0x2B /* Debounce Disable */
+#define GPIO_PULL1 0x2C /* GPIO Pull Disable */
+#define GPIO_PULL2 0x2D /* GPIO Pull Disable */
+#define GPIO_PULL3 0x2E /* GPIO Pull Disable */
+#define CMP_CFG_STAT 0x30 /* Comparator Configuration and Status Register */
+#define CMP_CONFG_SENS1 0x31 /* Sensor1 Comparator Configuration Register */
+#define CMP_CONFG_SENS2 0x32 /* L2 Light Sensor Reference Level, Output Falling for Sensor 1 */
+#define CMP1_LVL2_TRIP 0x33 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 1 */
+#define CMP1_LVL2_HYS 0x34 /* L3 Light Sensor Reference Level, Output Falling For Sensor 1 */
+#define CMP1_LVL3_TRIP 0x35 /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 1 */
+#define CMP1_LVL3_HYS 0x36 /* Sensor 2 Comparator Configuration Register */
+#define CMP2_LVL2_TRIP 0x37 /* L2 Light Sensor Reference Level, Output Falling for Sensor 2 */
+#define CMP2_LVL2_HYS 0x38 /* L2 Light Sensor Hysteresis (Active when Output Rising) for Sensor 2 */
+#define CMP2_LVL3_TRIP 0x39 /* L3 Light Sensor Reference Level, Output Falling For Sensor 2 */
+#define CMP2_LVL3_HYS 0x3A /* L3 Light Sensor Hysteresis (Active when Output Rising) For Sensor 2 */
+#define CMP1_ADC_DAT_R1 0x3B /* Comparator 1 ADC data Register1 */
+#define CMP1_ADC_DAT_R2 0x3C /* Comparator 1 ADC data Register2 */
+#define CMP2_ADC_DAT_R1 0x3D /* Comparator 2 ADC data Register1 */
+#define CMP2_ADC_DAT_R2 0x3E /* Comparator 2 ADC data Register2 */
+
+#define ADP5588_DEVICE_ID_MASK 0xF
+
+ /* Configuration Register1 */
+#define ADP5588_AUTO_INC (1 << 7)
+#define ADP5588_GPIEM_CFG (1 << 6)
+#define ADP5588_OVR_FLOW_M (1 << 5)
+#define ADP5588_INT_CFG (1 << 4)
+#define ADP5588_OVR_FLOW_IEN (1 << 3)
+#define ADP5588_K_LCK_IM (1 << 2)
+#define ADP5588_GPI_IEN (1 << 1)
+#define ADP5588_KE_IEN (1 << 0)
+
+/* Interrupt Status Register */
+#define ADP5588_CMP2_INT (1 << 5)
+#define ADP5588_CMP1_INT (1 << 4)
+#define ADP5588_OVR_FLOW_INT (1 << 3)
+#define ADP5588_K_LCK_INT (1 << 2)
+#define ADP5588_GPI_INT (1 << 1)
+#define ADP5588_KE_INT (1 << 0)
+
+/* Key Lock and Event Counter Register */
+#define ADP5588_K_LCK_EN (1 << 6)
+#define ADP5588_LCK21 0x30
+#define ADP5588_KEC 0xF
+
+#define ADP5588_MAXGPIO 18
+#define ADP5588_BANK(offs) ((offs) >> 3)
+#define ADP5588_BIT(offs) (1u << ((offs) & 0x7))
+
+/* Put one of these structures in i2c_board_info platform_data */
+
+#define ADP5588_KEYMAPSIZE 80
+
+#define GPI_PIN_ROW0 97
+#define GPI_PIN_ROW1 98
+#define GPI_PIN_ROW2 99
+#define GPI_PIN_ROW3 100
+#define GPI_PIN_ROW4 101
+#define GPI_PIN_ROW5 102
+#define GPI_PIN_ROW6 103
+#define GPI_PIN_ROW7 104
+#define GPI_PIN_COL0 105
+#define GPI_PIN_COL1 106
+#define GPI_PIN_COL2 107
+#define GPI_PIN_COL3 108
+#define GPI_PIN_COL4 109
+#define GPI_PIN_COL5 110
+#define GPI_PIN_COL6 111
+#define GPI_PIN_COL7 112
+#define GPI_PIN_COL8 113
+#define GPI_PIN_COL9 114
+
+#define GPI_PIN_ROW_BASE GPI_PIN_ROW0
+#define GPI_PIN_ROW_END GPI_PIN_ROW7
+#define GPI_PIN_COL_BASE GPI_PIN_COL0
+#define GPI_PIN_COL_END GPI_PIN_COL9
+
+#define GPI_PIN_BASE GPI_PIN_ROW_BASE
+#define GPI_PIN_END GPI_PIN_COL_END
+
+#define ADP5588_GPIMAPSIZE_MAX (GPI_PIN_END - GPI_PIN_BASE + 1)
+
+struct adp5588_gpi_map {
+ unsigned short pin;
+ unsigned short sw_evt;
+};
+
+struct adp5588_kpad_platform_data {
+ int rows; /* Number of rows */
+ int cols; /* Number of columns */
+ const unsigned short *keymap; /* Pointer to keymap */
+ unsigned short keymapsize; /* Keymap size */
+ unsigned repeat:1; /* Enable key repeat */
+ unsigned en_keylock:1; /* Enable Key Lock feature */
+ unsigned short unlock_key1; /* Unlock Key 1 */
+ unsigned short unlock_key2; /* Unlock Key 2 */
+ const struct adp5588_gpi_map *gpimap;
+ unsigned short gpimapsize;
+ const struct adp5588_gpio_platform_data *gpio_data;
+};
+
+struct i2c_client; /* forward declaration */
+
+struct adp5588_gpio_platform_data {
+ int gpio_start; /* GPIO Chip base # */
+ const char *const *names;
+ unsigned irq_base; /* interrupt base # */
+ unsigned pullup_dis_mask; /* Pull-Up Disable Mask */
+ int (*setup)(struct i2c_client *client,
+ unsigned gpio, unsigned ngpio,
+ void *context);
+ int (*teardown)(struct i2c_client *client,
+ unsigned gpio, unsigned ngpio,
+ void *context);
+ void *context;
+};
+
+#endif
diff --git a/include/linux/i2c/adp8860.h b/include/linux/i2c/adp8860.h
new file mode 100644
index 000000000..0b4d39855
--- /dev/null
+++ b/include/linux/i2c/adp8860.h
@@ -0,0 +1,154 @@
+/*
+ * Definitions and platform data for Analog Devices
+ * Backlight drivers ADP8860
+ *
+ * Copyright 2009-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __LINUX_I2C_ADP8860_H
+#define __LINUX_I2C_ADP8860_H
+
+#include <linux/leds.h>
+#include <linux/types.h>
+
+#define ID_ADP8860 8860
+
+#define ADP8860_MAX_BRIGHTNESS 0x7F
+#define FLAG_OFFT_SHIFT 8
+
+/*
+ * LEDs subdevice platform data
+ */
+
+#define ADP8860_LED_DIS_BLINK (0 << FLAG_OFFT_SHIFT)
+#define ADP8860_LED_OFFT_600ms (1 << FLAG_OFFT_SHIFT)
+#define ADP8860_LED_OFFT_1200ms (2 << FLAG_OFFT_SHIFT)
+#define ADP8860_LED_OFFT_1800ms (3 << FLAG_OFFT_SHIFT)
+
+#define ADP8860_LED_ONT_200ms 0
+#define ADP8860_LED_ONT_600ms 1
+#define ADP8860_LED_ONT_800ms 2
+#define ADP8860_LED_ONT_1200ms 3
+
+#define ADP8860_LED_D7 (7)
+#define ADP8860_LED_D6 (6)
+#define ADP8860_LED_D5 (5)
+#define ADP8860_LED_D4 (4)
+#define ADP8860_LED_D3 (3)
+#define ADP8860_LED_D2 (2)
+#define ADP8860_LED_D1 (1)
+
+/*
+ * Backlight subdevice platform data
+ */
+
+#define ADP8860_BL_D7 (1 << 6)
+#define ADP8860_BL_D6 (1 << 5)
+#define ADP8860_BL_D5 (1 << 4)
+#define ADP8860_BL_D4 (1 << 3)
+#define ADP8860_BL_D3 (1 << 2)
+#define ADP8860_BL_D2 (1 << 1)
+#define ADP8860_BL_D1 (1 << 0)
+
+#define ADP8860_FADE_T_DIS 0 /* Fade Timer Disabled */
+#define ADP8860_FADE_T_300ms 1 /* 0.3 Sec */
+#define ADP8860_FADE_T_600ms 2
+#define ADP8860_FADE_T_900ms 3
+#define ADP8860_FADE_T_1200ms 4
+#define ADP8860_FADE_T_1500ms 5
+#define ADP8860_FADE_T_1800ms 6
+#define ADP8860_FADE_T_2100ms 7
+#define ADP8860_FADE_T_2400ms 8
+#define ADP8860_FADE_T_2700ms 9
+#define ADP8860_FADE_T_3000ms 10
+#define ADP8860_FADE_T_3500ms 11
+#define ADP8860_FADE_T_4000ms 12
+#define ADP8860_FADE_T_4500ms 13
+#define ADP8860_FADE_T_5000ms 14
+#define ADP8860_FADE_T_5500ms 15 /* 5.5 Sec */
+
+#define ADP8860_FADE_LAW_LINEAR 0
+#define ADP8860_FADE_LAW_SQUARE 1
+#define ADP8860_FADE_LAW_CUBIC1 2
+#define ADP8860_FADE_LAW_CUBIC2 3
+
+#define ADP8860_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */
+#define ADP8860_BL_AMBL_FILT_160ms 1
+#define ADP8860_BL_AMBL_FILT_320ms 2
+#define ADP8860_BL_AMBL_FILT_640ms 3
+#define ADP8860_BL_AMBL_FILT_1280ms 4
+#define ADP8860_BL_AMBL_FILT_2560ms 5
+#define ADP8860_BL_AMBL_FILT_5120ms 6
+#define ADP8860_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */
+
+/*
+ * Blacklight current 0..30mA
+ */
+#define ADP8860_BL_CUR_mA(I) ((I * 127) / 30)
+
+/*
+ * L2 comparator current 0..1106uA
+ */
+#define ADP8860_L2_COMP_CURR_uA(I) ((I * 255) / 1106)
+
+/*
+ * L3 comparator current 0..138uA
+ */
+#define ADP8860_L3_COMP_CURR_uA(I) ((I * 255) / 138)
+
+struct adp8860_backlight_platform_data {
+ u8 bl_led_assign; /* 1 = Backlight 0 = Individual LED */
+
+ u8 bl_fade_in; /* Backlight Fade-In Timer */
+ u8 bl_fade_out; /* Backlight Fade-Out Timer */
+ u8 bl_fade_law; /* fade-on/fade-off transfer characteristic */
+
+ u8 en_ambl_sens; /* 1 = enable ambient light sensor */
+ u8 abml_filt; /* Light sensor filter time */
+
+ u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l3_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l3_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+
+ u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
+ u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
+ u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
+ u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
+
+ /**
+ * Independent Current Sinks / LEDS
+ * Sinks not assigned to the Backlight can be exposed to
+ * user space using the LEDS CLASS interface
+ */
+
+ int num_leds;
+ struct led_info *leds;
+ u8 led_fade_in; /* LED Fade-In Timer */
+ u8 led_fade_out; /* LED Fade-Out Timer */
+ u8 led_fade_law; /* fade-on/fade-off transfer characteristic */
+ u8 led_on_time;
+
+ /**
+ * Gain down disable. Setting this option does not allow the
+ * charge pump to switch to lower gains. NOT AVAILABLE on ADP8860
+ * 1 = the charge pump doesn't switch down in gain until all LEDs are 0.
+ * The charge pump switches up in gain as needed. This feature is
+ * useful if the ADP8863 charge pump is used to drive an external load.
+ * This feature must be used when utilizing small fly capacitors
+ * (0402 or smaller).
+ * 0 = the charge pump automatically switches up and down in gain.
+ * This provides optimal efficiency, but is not suitable for driving
+ * loads that are not connected through the ADP8863 diode drivers.
+ * Additionally, the charge pump fly capacitors should be low ESR
+ * and sized 0603 or greater.
+ */
+
+ u8 gdwn_dis;
+};
+
+#endif /* __LINUX_I2C_ADP8860_H */
diff --git a/include/linux/i2c/adp8870.h b/include/linux/i2c/adp8870.h
new file mode 100644
index 000000000..624dceccb
--- /dev/null
+++ b/include/linux/i2c/adp8870.h
@@ -0,0 +1,153 @@
+/*
+ * Definitions and platform data for Analog Devices
+ * Backlight drivers ADP8870
+ *
+ * Copyright 2009-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __LINUX_I2C_ADP8870_H
+#define __LINUX_I2C_ADP8870_H
+
+#define ID_ADP8870 8870
+
+#define ADP8870_MAX_BRIGHTNESS 0x7F
+#define FLAG_OFFT_SHIFT 8
+
+/*
+ * LEDs subdevice platform data
+ */
+
+#define ADP8870_LED_DIS_BLINK (0 << FLAG_OFFT_SHIFT)
+#define ADP8870_LED_OFFT_600ms (1 << FLAG_OFFT_SHIFT)
+#define ADP8870_LED_OFFT_1200ms (2 << FLAG_OFFT_SHIFT)
+#define ADP8870_LED_OFFT_1800ms (3 << FLAG_OFFT_SHIFT)
+
+#define ADP8870_LED_ONT_200ms 0
+#define ADP8870_LED_ONT_600ms 1
+#define ADP8870_LED_ONT_800ms 2
+#define ADP8870_LED_ONT_1200ms 3
+
+#define ADP8870_LED_D7 (7)
+#define ADP8870_LED_D6 (6)
+#define ADP8870_LED_D5 (5)
+#define ADP8870_LED_D4 (4)
+#define ADP8870_LED_D3 (3)
+#define ADP8870_LED_D2 (2)
+#define ADP8870_LED_D1 (1)
+
+/*
+ * Backlight subdevice platform data
+ */
+
+#define ADP8870_BL_D7 (1 << 6)
+#define ADP8870_BL_D6 (1 << 5)
+#define ADP8870_BL_D5 (1 << 4)
+#define ADP8870_BL_D4 (1 << 3)
+#define ADP8870_BL_D3 (1 << 2)
+#define ADP8870_BL_D2 (1 << 1)
+#define ADP8870_BL_D1 (1 << 0)
+
+#define ADP8870_FADE_T_DIS 0 /* Fade Timer Disabled */
+#define ADP8870_FADE_T_300ms 1 /* 0.3 Sec */
+#define ADP8870_FADE_T_600ms 2
+#define ADP8870_FADE_T_900ms 3
+#define ADP8870_FADE_T_1200ms 4
+#define ADP8870_FADE_T_1500ms 5
+#define ADP8870_FADE_T_1800ms 6
+#define ADP8870_FADE_T_2100ms 7
+#define ADP8870_FADE_T_2400ms 8
+#define ADP8870_FADE_T_2700ms 9
+#define ADP8870_FADE_T_3000ms 10
+#define ADP8870_FADE_T_3500ms 11
+#define ADP8870_FADE_T_4000ms 12
+#define ADP8870_FADE_T_4500ms 13
+#define ADP8870_FADE_T_5000ms 14
+#define ADP8870_FADE_T_5500ms 15 /* 5.5 Sec */
+
+#define ADP8870_FADE_LAW_LINEAR 0
+#define ADP8870_FADE_LAW_SQUARE 1
+#define ADP8870_FADE_LAW_CUBIC1 2
+#define ADP8870_FADE_LAW_CUBIC2 3
+
+#define ADP8870_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */
+#define ADP8870_BL_AMBL_FILT_160ms 1
+#define ADP8870_BL_AMBL_FILT_320ms 2
+#define ADP8870_BL_AMBL_FILT_640ms 3
+#define ADP8870_BL_AMBL_FILT_1280ms 4
+#define ADP8870_BL_AMBL_FILT_2560ms 5
+#define ADP8870_BL_AMBL_FILT_5120ms 6
+#define ADP8870_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */
+
+/*
+ * Blacklight current 0..30mA
+ */
+#define ADP8870_BL_CUR_mA(I) ((I * 127) / 30)
+
+/*
+ * L2 comparator current 0..1106uA
+ */
+#define ADP8870_L2_COMP_CURR_uA(I) ((I * 255) / 1106)
+
+/*
+ * L3 comparator current 0..551uA
+ */
+#define ADP8870_L3_COMP_CURR_uA(I) ((I * 255) / 551)
+
+/*
+ * L4 comparator current 0..275uA
+ */
+#define ADP8870_L4_COMP_CURR_uA(I) ((I * 255) / 275)
+
+/*
+ * L5 comparator current 0..138uA
+ */
+#define ADP8870_L5_COMP_CURR_uA(I) ((I * 255) / 138)
+
+struct adp8870_backlight_platform_data {
+ u8 bl_led_assign; /* 1 = Backlight 0 = Individual LED */
+ u8 pwm_assign; /* 1 = Enables PWM mode */
+
+ u8 bl_fade_in; /* Backlight Fade-In Timer */
+ u8 bl_fade_out; /* Backlight Fade-Out Timer */
+ u8 bl_fade_law; /* fade-on/fade-off transfer characteristic */
+
+ u8 en_ambl_sens; /* 1 = enable ambient light sensor */
+ u8 abml_filt; /* Light sensor filter time */
+
+ u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_bright_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_bright_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l3_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l3_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l4_indoor_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l4_indor_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l5_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l5_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+
+ u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
+ u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
+ u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
+ u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
+ u8 l4_trip; /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */
+ u8 l4_hyst; /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */
+ u8 l5_trip; /* use L5_COMP_CURR_uA(I) 0 <= I <= 138 uA */
+ u8 l5_hyst; /* use L6_COMP_CURR_uA(I) 0 <= I <= 138 uA */
+
+ /**
+ * Independent Current Sinks / LEDS
+ * Sinks not assigned to the Backlight can be exposed to
+ * user space using the LEDS CLASS interface
+ */
+
+ int num_leds;
+ struct led_info *leds;
+ u8 led_fade_in; /* LED Fade-In Timer */
+ u8 led_fade_out; /* LED Fade-Out Timer */
+ u8 led_fade_law; /* fade-on/fade-off transfer characteristic */
+ u8 led_on_time;
+};
+
+#endif /* __LINUX_I2C_ADP8870_H */
diff --git a/include/linux/i2c/ads1015.h b/include/linux/i2c/ads1015.h
new file mode 100644
index 000000000..d5aa2a045
--- /dev/null
+++ b/include/linux/i2c/ads1015.h
@@ -0,0 +1,36 @@
+/*
+ * Platform Data for ADS1015 12-bit 4-input ADC
+ * (C) Copyright 2010
+ * Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef LINUX_ADS1015_H
+#define LINUX_ADS1015_H
+
+#define ADS1015_CHANNELS 8
+
+struct ads1015_channel_data {
+ bool enabled;
+ unsigned int pga;
+ unsigned int data_rate;
+};
+
+struct ads1015_platform_data {
+ struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
+};
+
+#endif /* LINUX_ADS1015_H */
diff --git a/include/linux/i2c/apds990x.h b/include/linux/i2c/apds990x.h
new file mode 100644
index 000000000..d186fcc5d
--- /dev/null
+++ b/include/linux/i2c/apds990x.h
@@ -0,0 +1,79 @@
+/*
+ * This file is part of the APDS990x sensor driver.
+ * Chip is combined proximity and ambient light sensor.
+ *
+ * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __APDS990X_H__
+#define __APDS990X_H__
+
+
+#define APDS_IRLED_CURR_12mA 0x3
+#define APDS_IRLED_CURR_25mA 0x2
+#define APDS_IRLED_CURR_50mA 0x1
+#define APDS_IRLED_CURR_100mA 0x0
+
+/**
+ * struct apds990x_chip_factors - defines effect of the cover window
+ * @ga: Total glass attenuation
+ * @cf1: clear channel factor 1 for raw to lux conversion
+ * @irf1: IR channel factor 1 for raw to lux conversion
+ * @cf2: clear channel factor 2 for raw to lux conversion
+ * @irf2: IR channel factor 2 for raw to lux conversion
+ * @df: device factor for conversion formulas
+ *
+ * Structure for tuning ALS calculation to match with environment.
+ * Values depend on the material above the sensor and the sensor
+ * itself. If the GA is zero, driver will use uncovered sensor default values
+ * format: decimal value * APDS_PARAM_SCALE except df which is plain integer.
+ */
+#define APDS_PARAM_SCALE 4096
+struct apds990x_chip_factors {
+ int ga;
+ int cf1;
+ int irf1;
+ int cf2;
+ int irf2;
+ int df;
+};
+
+/**
+ * struct apds990x_platform_data - platform data for apsd990x.c driver
+ * @cf: chip factor data
+ * @pddrive: IR-led driving current
+ * @ppcount: number of IR pulses used for proximity estimation
+ * @setup_resources: interrupt line setup call back function
+ * @release_resources: interrupt line release call back function
+ *
+ * Proximity detection result depends heavily on correct ppcount, pdrive
+ * and cover window.
+ *
+ */
+
+struct apds990x_platform_data {
+ struct apds990x_chip_factors cf;
+ u8 pdrive;
+ u8 ppcount;
+ int (*setup_resources)(void);
+ int (*release_resources)(void);
+};
+
+#endif
diff --git a/include/linux/i2c/atmel_mxt_ts.h b/include/linux/i2c/atmel_mxt_ts.h
new file mode 100644
index 000000000..02bf6ea31
--- /dev/null
+++ b/include/linux/i2c/atmel_mxt_ts.h
@@ -0,0 +1,25 @@
+/*
+ * Atmel maXTouch Touchscreen driver
+ *
+ * Copyright (C) 2010 Samsung Electronics Co.Ltd
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_ATMEL_MXT_TS_H
+#define __LINUX_ATMEL_MXT_TS_H
+
+#include <linux/types.h>
+
+/* The platform data for the Atmel maXTouch touchscreen driver */
+struct mxt_platform_data {
+ unsigned long irqflags;
+ u8 t19_num_keys;
+ const unsigned int *t19_keymap;
+};
+
+#endif /* __LINUX_ATMEL_MXT_TS_H */
diff --git a/include/linux/i2c/bfin_twi.h b/include/linux/i2c/bfin_twi.h
new file mode 100644
index 000000000..135a4e087
--- /dev/null
+++ b/include/linux/i2c/bfin_twi.h
@@ -0,0 +1,145 @@
+/*
+ * i2c-bfin-twi.h - interface to ADI TWI controller
+ *
+ * Copyright 2005-2014 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __I2C_BFIN_TWI_H__
+#define __I2C_BFIN_TWI_H__
+
+#include <linux/types.h>
+#include <linux/i2c.h>
+
+/*
+ * ADI twi registers layout
+ */
+struct bfin_twi_regs {
+ u16 clkdiv;
+ u16 dummy1;
+ u16 control;
+ u16 dummy2;
+ u16 slave_ctl;
+ u16 dummy3;
+ u16 slave_stat;
+ u16 dummy4;
+ u16 slave_addr;
+ u16 dummy5;
+ u16 master_ctl;
+ u16 dummy6;
+ u16 master_stat;
+ u16 dummy7;
+ u16 master_addr;
+ u16 dummy8;
+ u16 int_stat;
+ u16 dummy9;
+ u16 int_mask;
+ u16 dummy10;
+ u16 fifo_ctl;
+ u16 dummy11;
+ u16 fifo_stat;
+ u16 dummy12;
+ u32 __pad[20];
+ u16 xmt_data8;
+ u16 dummy13;
+ u16 xmt_data16;
+ u16 dummy14;
+ u16 rcv_data8;
+ u16 dummy15;
+ u16 rcv_data16;
+ u16 dummy16;
+};
+
+struct bfin_twi_iface {
+ int irq;
+ spinlock_t lock;
+ char read_write;
+ u8 command;
+ u8 *transPtr;
+ int readNum;
+ int writeNum;
+ int cur_mode;
+ int manual_stop;
+ int result;
+ struct i2c_adapter adap;
+ struct completion complete;
+ struct i2c_msg *pmsg;
+ int msg_num;
+ int cur_msg;
+ u16 saved_clkdiv;
+ u16 saved_control;
+ struct bfin_twi_regs __iomem *regs_base;
+};
+
+/* ******************** TWO-WIRE INTERFACE (TWI) MASKS ********************/
+/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y); ) */
+#define CLKLOW(x) ((x) & 0xFF) /* Periods Clock Is Held Low */
+#define CLKHI(y) (((y)&0xFF)<<0x8) /* Periods Before New Clock Low */
+
+/* TWI_PRESCALE Masks */
+#define PRESCALE 0x007F /* SCLKs Per Internal Time Reference (10MHz) */
+#define TWI_ENA 0x0080 /* TWI Enable */
+#define SCCB 0x0200 /* SCCB Compatibility Enable */
+
+/* TWI_SLAVE_CTL Masks */
+#define SEN 0x0001 /* Slave Enable */
+#define SADD_LEN 0x0002 /* Slave Address Length */
+#define STDVAL 0x0004 /* Slave Transmit Data Valid */
+#define NAK 0x0008 /* NAK Generated At Conclusion Of Transfer */
+#define GEN 0x0010 /* General Call Address Matching Enabled */
+
+/* TWI_SLAVE_STAT Masks */
+#define SDIR 0x0001 /* Slave Transfer Direction (RX/TX*) */
+#define GCALL 0x0002 /* General Call Indicator */
+
+/* TWI_MASTER_CTL Masks */
+#define MEN 0x0001 /* Master Mode Enable */
+#define MADD_LEN 0x0002 /* Master Address Length */
+#define MDIR 0x0004 /* Master Transmit Direction (RX/TX*) */
+#define FAST 0x0008 /* Use Fast Mode Timing Specs */
+#define STOP 0x0010 /* Issue Stop Condition */
+#define RSTART 0x0020 /* Repeat Start or Stop* At End Of Transfer */
+#define DCNT 0x3FC0 /* Data Bytes To Transfer */
+#define SDAOVR 0x4000 /* Serial Data Override */
+#define SCLOVR 0x8000 /* Serial Clock Override */
+
+/* TWI_MASTER_STAT Masks */
+#define MPROG 0x0001 /* Master Transfer In Progress */
+#define LOSTARB 0x0002 /* Lost Arbitration Indicator (Xfer Aborted) */
+#define ANAK 0x0004 /* Address Not Acknowledged */
+#define DNAK 0x0008 /* Data Not Acknowledged */
+#define BUFRDERR 0x0010 /* Buffer Read Error */
+#define BUFWRERR 0x0020 /* Buffer Write Error */
+#define SDASEN 0x0040 /* Serial Data Sense */
+#define SCLSEN 0x0080 /* Serial Clock Sense */
+#define BUSBUSY 0x0100 /* Bus Busy Indicator */
+
+/* TWI_INT_SRC and TWI_INT_ENABLE Masks */
+#define SINIT 0x0001 /* Slave Transfer Initiated */
+#define SCOMP 0x0002 /* Slave Transfer Complete */
+#define SERR 0x0004 /* Slave Transfer Error */
+#define SOVF 0x0008 /* Slave Overflow */
+#define MCOMP 0x0010 /* Master Transfer Complete */
+#define MERR 0x0020 /* Master Transfer Error */
+#define XMTSERV 0x0040 /* Transmit FIFO Service */
+#define RCVSERV 0x0080 /* Receive FIFO Service */
+
+/* TWI_FIFO_CTRL Masks */
+#define XMTFLUSH 0x0001 /* Transmit Buffer Flush */
+#define RCVFLUSH 0x0002 /* Receive Buffer Flush */
+#define XMTINTLEN 0x0004 /* Transmit Buffer Interrupt Length */
+#define RCVINTLEN 0x0008 /* Receive Buffer Interrupt Length */
+
+/* TWI_FIFO_STAT Masks */
+#define XMTSTAT 0x0003 /* Transmit FIFO Status */
+#define XMT_EMPTY 0x0000 /* Transmit FIFO Empty */
+#define XMT_HALF 0x0001 /* Transmit FIFO Has 1 Byte To Write */
+#define XMT_FULL 0x0003 /* Transmit FIFO Full (2 Bytes To Write) */
+
+#define RCVSTAT 0x000C /* Receive FIFO Status */
+#define RCV_EMPTY 0x0000 /* Receive FIFO Empty */
+#define RCV_HALF 0x0004 /* Receive FIFO Has 1 Byte To Read */
+#define RCV_FULL 0x000C /* Receive FIFO Full (2 Bytes To Read) */
+
+#endif
diff --git a/include/linux/i2c/bh1770glc.h b/include/linux/i2c/bh1770glc.h
new file mode 100644
index 000000000..8b5e2df36
--- /dev/null
+++ b/include/linux/i2c/bh1770glc.h
@@ -0,0 +1,53 @@
+/*
+ * This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver.
+ * Chip is combined proximity and ambient light sensor.
+ *
+ * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __BH1770_H__
+#define __BH1770_H__
+
+/**
+ * struct bh1770_platform_data - platform data for bh1770glc driver
+ * @led_def_curr: IR led driving current.
+ * @glass_attenuation: Attenuation factor for covering window.
+ * @setup_resources: Call back for interrupt line setup function
+ * @release_resources: Call back for interrupte line release function
+ *
+ * Example of glass attenuation: 16384 * 385 / 100 means attenuation factor
+ * of 3.85. i.e. light_above_sensor = light_above_cover_window / 3.85
+ */
+
+struct bh1770_platform_data {
+#define BH1770_LED_5mA 0
+#define BH1770_LED_10mA 1
+#define BH1770_LED_20mA 2
+#define BH1770_LED_50mA 3
+#define BH1770_LED_100mA 4
+#define BH1770_LED_150mA 5
+#define BH1770_LED_200mA 6
+ __u8 led_def_curr;
+#define BH1770_NEUTRAL_GA 16384 /* 16384 / 16384 = 1 */
+ __u32 glass_attenuation;
+ int (*setup_resources)(void);
+ int (*release_resources)(void);
+};
+#endif
diff --git a/include/linux/i2c/dm355evm_msp.h b/include/linux/i2c/dm355evm_msp.h
new file mode 100644
index 000000000..372470350
--- /dev/null
+++ b/include/linux/i2c/dm355evm_msp.h
@@ -0,0 +1,79 @@
+/*
+ * dm355evm_msp.h - support MSP430 microcontroller on DM355EVM board
+ */
+#ifndef __LINUX_I2C_DM355EVM_MSP
+#define __LINUX_I2C_DM355EVM_MSP
+
+/*
+ * Written against Spectrum's writeup for the A4 firmware revision,
+ * and tweaked to match source and rev D2 schematics by removing CPLD
+ * and NOR flash hooks (which were last appropriate in rev B boards).
+ *
+ * Note that the firmware supports a flavor of write posting ... to be
+ * sure a write completes, issue another read or write.
+ */
+
+/* utilities to access "registers" emulated by msp430 firmware */
+extern int dm355evm_msp_write(u8 value, u8 reg);
+extern int dm355evm_msp_read(u8 reg);
+
+
+/* command/control registers */
+#define DM355EVM_MSP_COMMAND 0x00
+# define MSP_COMMAND_NULL 0
+# define MSP_COMMAND_RESET_COLD 1
+# define MSP_COMMAND_RESET_WARM 2
+# define MSP_COMMAND_RESET_WARM_I 3
+# define MSP_COMMAND_POWEROFF 4
+# define MSP_COMMAND_IR_REINIT 5
+#define DM355EVM_MSP_STATUS 0x01
+# define MSP_STATUS_BAD_OFFSET BIT(0)
+# define MSP_STATUS_BAD_COMMAND BIT(1)
+# define MSP_STATUS_POWER_ERROR BIT(2)
+# define MSP_STATUS_RXBUF_OVERRUN BIT(3)
+#define DM355EVM_MSP_RESET 0x02 /* 0 bits == in reset */
+# define MSP_RESET_DC5 BIT(0)
+# define MSP_RESET_TVP5154 BIT(2)
+# define MSP_RESET_IMAGER BIT(3)
+# define MSP_RESET_ETHERNET BIT(4)
+# define MSP_RESET_SYS BIT(5)
+# define MSP_RESET_AIC33 BIT(7)
+
+/* GPIO registers ... bit patterns mostly match the source MSP ports */
+#define DM355EVM_MSP_LED 0x03 /* active low (MSP P4) */
+#define DM355EVM_MSP_SWITCH1 0x04 /* (MSP P5, masked) */
+# define MSP_SWITCH1_SW6_1 BIT(0)
+# define MSP_SWITCH1_SW6_2 BIT(1)
+# define MSP_SWITCH1_SW6_3 BIT(2)
+# define MSP_SWITCH1_SW6_4 BIT(3)
+# define MSP_SWITCH1_J1 BIT(4) /* NTSC/PAL */
+# define MSP_SWITCH1_MSP_INT BIT(5) /* active low */
+#define DM355EVM_MSP_SWITCH2 0x05 /* (MSP P6, masked) */
+# define MSP_SWITCH2_SW10 BIT(3)
+# define MSP_SWITCH2_SW11 BIT(4)
+# define MSP_SWITCH2_SW12 BIT(5)
+# define MSP_SWITCH2_SW13 BIT(6)
+# define MSP_SWITCH2_SW14 BIT(7)
+#define DM355EVM_MSP_SDMMC 0x06 /* (MSP P2, masked) */
+# define MSP_SDMMC_0_WP BIT(1)
+# define MSP_SDMMC_0_CD BIT(2) /* active low */
+# define MSP_SDMMC_1_WP BIT(3)
+# define MSP_SDMMC_1_CD BIT(4) /* active low */
+#define DM355EVM_MSP_FIRMREV 0x07 /* not a GPIO (out of order) */
+#define DM355EVM_MSP_VIDEO_IN 0x08 /* (MSP P3, masked) */
+# define MSP_VIDEO_IMAGER BIT(7) /* low == tvp5146 */
+
+/* power supply registers are currently omitted */
+
+/* RTC registers */
+#define DM355EVM_MSP_RTC_0 0x12 /* LSB */
+#define DM355EVM_MSP_RTC_1 0x13
+#define DM355EVM_MSP_RTC_2 0x14
+#define DM355EVM_MSP_RTC_3 0x15 /* MSB */
+
+/* input event queue registers; code == ((HIGH << 8) | LOW) */
+#define DM355EVM_MSP_INPUT_COUNT 0x16 /* decrement by reading LOW */
+#define DM355EVM_MSP_INPUT_HIGH 0x17
+#define DM355EVM_MSP_INPUT_LOW 0x18
+
+#endif /* __LINUX_I2C_DM355EVM_MSP */
diff --git a/include/linux/i2c/ds620.h b/include/linux/i2c/ds620.h
new file mode 100644
index 000000000..736bb87ac
--- /dev/null
+++ b/include/linux/i2c/ds620.h
@@ -0,0 +1,21 @@
+#ifndef _LINUX_DS620_H
+#define _LINUX_DS620_H
+
+#include <linux/types.h>
+#include <linux/i2c.h>
+
+/* platform data for the DS620 temperature sensor and thermostat */
+
+struct ds620_platform_data {
+ /*
+ * Thermostat output pin PO mode:
+ * 0 = always low (default)
+ * 1 = PO_LOW
+ * 2 = PO_HIGH
+ *
+ * (see Documentation/hwmon/ds620)
+ */
+ int pomode;
+};
+
+#endif /* _LINUX_DS620_H */
diff --git a/include/linux/i2c/i2c-hid.h b/include/linux/i2c/i2c-hid.h
new file mode 100644
index 000000000..7aa901d92
--- /dev/null
+++ b/include/linux/i2c/i2c-hid.h
@@ -0,0 +1,36 @@
+/*
+ * HID over I2C protocol implementation
+ *
+ * Copyright (c) 2012 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ * Copyright (c) 2012 Ecole Nationale de l'Aviation Civile, France
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#ifndef __LINUX_I2C_HID_H
+#define __LINUX_I2C_HID_H
+
+#include <linux/types.h>
+
+/**
+ * struct i2chid_platform_data - used by hid over i2c implementation.
+ * @hid_descriptor_address: i2c register where the HID descriptor is stored.
+ *
+ * Note that it is the responsibility of the platform driver (or the acpi 5.0
+ * driver, or the flattened device tree) to setup the irq related to the gpio in
+ * the struct i2c_board_info.
+ * The platform driver should also setup the gpio according to the device:
+ *
+ * A typical example is the following:
+ * irq = gpio_to_irq(intr_gpio);
+ * hkdk4412_i2c_devs5[0].irq = irq; // store the irq in i2c_board_info
+ * gpio_request(intr_gpio, "elan-irq");
+ * s3c_gpio_setpull(intr_gpio, S3C_GPIO_PULL_UP);
+ */
+struct i2c_hid_platform_data {
+ u16 hid_descriptor_address;
+};
+
+#endif /* __LINUX_I2C_HID_H */
diff --git a/include/linux/i2c/i2c-rcar.h b/include/linux/i2c/i2c-rcar.h
new file mode 100644
index 000000000..496f5c2b2
--- /dev/null
+++ b/include/linux/i2c/i2c-rcar.h
@@ -0,0 +1,10 @@
+#ifndef __I2C_R_CAR_H__
+#define __I2C_R_CAR_H__
+
+#include <linux/platform_device.h>
+
+struct i2c_rcar_platform_data {
+ u32 bus_speed;
+};
+
+#endif /* __I2C_R_CAR_H__ */
diff --git a/include/linux/i2c/i2c-sh_mobile.h b/include/linux/i2c/i2c-sh_mobile.h
new file mode 100644
index 000000000..06e308979
--- /dev/null
+++ b/include/linux/i2c/i2c-sh_mobile.h
@@ -0,0 +1,11 @@
+#ifndef __I2C_SH_MOBILE_H__
+#define __I2C_SH_MOBILE_H__
+
+#include <linux/platform_device.h>
+
+struct i2c_sh_mobile_platform_data {
+ unsigned long bus_speed;
+ unsigned int clks_per_count;
+};
+
+#endif /* __I2C_SH_MOBILE_H__ */
diff --git a/include/linux/i2c/lm8323.h b/include/linux/i2c/lm8323.h
new file mode 100644
index 000000000..478d668bc
--- /dev/null
+++ b/include/linux/i2c/lm8323.h
@@ -0,0 +1,46 @@
+/*
+ * lm8323.h - Configuration for LM8323 keypad driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation (version 2 of the License only).
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_LM8323_H
+#define __LINUX_LM8323_H
+
+#include <linux/types.h>
+
+/*
+ * Largest keycode that the chip can send, plus one,
+ * so keys can be mapped directly at the index of the
+ * LM8323 keycode instead of subtracting one.
+ */
+#define LM8323_KEYMAP_SIZE (0x7f + 1)
+
+#define LM8323_NUM_PWMS 3
+
+struct lm8323_platform_data {
+ int debounce_time; /* Time to watch for key bouncing, in ms. */
+ int active_time; /* Idle time until sleep, in ms. */
+
+ int size_x;
+ int size_y;
+ bool repeat;
+ const unsigned short *keymap;
+
+ const char *pwm_names[LM8323_NUM_PWMS];
+
+ const char *name; /* Device name. */
+};
+
+#endif /* __LINUX_LM8323_H */
diff --git a/include/linux/i2c/ltc4245.h b/include/linux/i2c/ltc4245.h
new file mode 100644
index 000000000..56bda4be0
--- /dev/null
+++ b/include/linux/i2c/ltc4245.h
@@ -0,0 +1,21 @@
+/*
+ * Platform Data for LTC4245 hardware monitor chip
+ *
+ * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef LINUX_LTC4245_H
+#define LINUX_LTC4245_H
+
+#include <linux/types.h>
+
+struct ltc4245_platform_data {
+ bool use_extra_gpios;
+};
+
+#endif /* LINUX_LTC4245_H */
diff --git a/include/linux/i2c/max6639.h b/include/linux/i2c/max6639.h
new file mode 100644
index 000000000..6011c4203
--- /dev/null
+++ b/include/linux/i2c/max6639.h
@@ -0,0 +1,14 @@
+#ifndef _LINUX_MAX6639_H
+#define _LINUX_MAX6639_H
+
+#include <linux/types.h>
+
+/* platform data for the MAX6639 temperature sensor and fan control */
+
+struct max6639_platform_data {
+ bool pwm_polarity; /* Polarity low (0) or high (1, default) */
+ int ppr; /* Pulses per rotation 1..4 (default == 2) */
+ int rpm_range; /* 2000, 4000 (default), 8000 or 16000 */
+};
+
+#endif /* _LINUX_MAX6639_H */
diff --git a/include/linux/i2c/max732x.h b/include/linux/i2c/max732x.h
new file mode 100644
index 000000000..c04bac8bf
--- /dev/null
+++ b/include/linux/i2c/max732x.h
@@ -0,0 +1,22 @@
+#ifndef __LINUX_I2C_MAX732X_H
+#define __LINUX_I2C_MAX732X_H
+
+/* platform data for the MAX732x 8/16-bit I/O expander driver */
+
+struct max732x_platform_data {
+ /* number of the first GPIO */
+ unsigned gpio_base;
+
+ /* interrupt base */
+ int irq_base;
+
+ void *context; /* param to setup/teardown */
+
+ int (*setup)(struct i2c_client *client,
+ unsigned gpio, unsigned ngpio,
+ void *context);
+ int (*teardown)(struct i2c_client *client,
+ unsigned gpio, unsigned ngpio,
+ void *context);
+};
+#endif /* __LINUX_I2C_MAX732X_H */
diff --git a/include/linux/i2c/mcs.h b/include/linux/i2c/mcs.h
new file mode 100644
index 000000000..61bb18a4f
--- /dev/null
+++ b/include/linux/i2c/mcs.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2009 - 2010 Samsung Electronics Co.Ltd
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MCS_H
+#define __LINUX_MCS_H
+
+#define MCS_KEY_MAP(v, c) ((((v) & 0xff) << 16) | ((c) & 0xffff))
+#define MCS_KEY_VAL(v) (((v) >> 16) & 0xff)
+#define MCS_KEY_CODE(v) ((v) & 0xffff)
+
+struct mcs_platform_data {
+ void (*poweron)(bool);
+ void (*cfg_pin)(void);
+
+ /* touchscreen */
+ unsigned int x_size;
+ unsigned int y_size;
+
+ /* touchkey */
+ const u32 *keymap;
+ unsigned int keymap_size;
+ unsigned int key_maxval;
+ bool no_autorepeat;
+};
+
+#endif /* __LINUX_MCS_H */
diff --git a/include/linux/i2c/mms114.h b/include/linux/i2c/mms114.h
new file mode 100644
index 000000000..5722ebfb2
--- /dev/null
+++ b/include/linux/i2c/mms114.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#ifndef __LINUX_MMS114_H
+#define __LINUX_MMS114_H
+
+struct mms114_platform_data {
+ unsigned int x_size;
+ unsigned int y_size;
+ unsigned int contact_threshold;
+ unsigned int moving_threshold;
+ bool x_invert;
+ bool y_invert;
+
+ void (*cfg_pin)(bool);
+};
+
+#endif /* __LINUX_MMS114_H */
diff --git a/include/linux/i2c/mpr121_touchkey.h b/include/linux/i2c/mpr121_touchkey.h
new file mode 100644
index 000000000..f0bcc38bb
--- /dev/null
+++ b/include/linux/i2c/mpr121_touchkey.h
@@ -0,0 +1,20 @@
+/* Header file for Freescale MPR121 Capacitive Touch Sensor */
+
+#ifndef _MPR121_TOUCHKEY_H
+#define _MPR121_TOUCHKEY_H
+
+/**
+ * struct mpr121_platform_data - platform data for mpr121 sensor
+ * @keymap: pointer to array of KEY_* values representing keymap
+ * @keymap_size: size of the keymap
+ * @wakeup: configure the button as a wake-up source
+ * @vdd_uv: VDD voltage in uV
+ */
+struct mpr121_platform_data {
+ const unsigned short *keymap;
+ unsigned int keymap_size;
+ bool wakeup;
+ int vdd_uv;
+};
+
+#endif /* _MPR121_TOUCHKEY_H */
diff --git a/include/linux/i2c/pca954x.h b/include/linux/i2c/pca954x.h
new file mode 100644
index 000000000..1712677d5
--- /dev/null
+++ b/include/linux/i2c/pca954x.h
@@ -0,0 +1,48 @@
+/*
+ *
+ * pca954x.h - I2C multiplexer/switch support
+ *
+ * Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it>
+ * Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it>
+ * Michael Lawnick <michael.lawnick.ext@nsn.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+#ifndef _LINUX_I2C_PCA954X_H
+#define _LINUX_I2C_PCA954X_H
+
+/* Platform data for the PCA954x I2C multiplexers */
+
+/* Per channel initialisation data:
+ * @adap_id: bus number for the adapter. 0 = don't care
+ * @deselect_on_exit: set this entry to 1, if your H/W needs deselection
+ * of this channel after transaction.
+ *
+ */
+struct pca954x_platform_mode {
+ int adap_id;
+ unsigned int deselect_on_exit:1;
+ unsigned int class;
+};
+
+/* Per mux/switch data, used with i2c_register_board_info */
+struct pca954x_platform_data {
+ struct pca954x_platform_mode *modes;
+ int num_modes;
+};
+
+#endif /* _LINUX_I2C_PCA954X_H */
diff --git a/include/linux/i2c/pcf857x.h b/include/linux/i2c/pcf857x.h
new file mode 100644
index 000000000..0767a2a6b
--- /dev/null
+++ b/include/linux/i2c/pcf857x.h
@@ -0,0 +1,44 @@
+#ifndef __LINUX_PCF857X_H
+#define __LINUX_PCF857X_H
+
+/**
+ * struct pcf857x_platform_data - data to set up pcf857x driver
+ * @gpio_base: number of the chip's first GPIO
+ * @n_latch: optional bit-inverse of initial register value; if
+ * you leave this initialized to zero the driver will act
+ * like the chip was just reset
+ * @setup: optional callback issued once the GPIOs are valid
+ * @teardown: optional callback issued before the GPIOs are invalidated
+ * @context: optional parameter passed to setup() and teardown()
+ *
+ * In addition to the I2C_BOARD_INFO() state appropriate to each chip,
+ * the i2c_board_info used with the pcf875x driver must provide its
+ * platform_data (pointer to one of these structures) with at least
+ * the gpio_base value initialized.
+ *
+ * The @setup callback may be used with the kind of board-specific glue
+ * which hands the (now-valid) GPIOs to other drivers, or which puts
+ * devices in their initial states using these GPIOs.
+ *
+ * These GPIO chips are only "quasi-bidirectional"; read the chip specs
+ * to understand the behavior. They don't have separate registers to
+ * record which pins are used for input or output, record which output
+ * values are driven, or provide access to input values. That must be
+ * inferred by reading the chip's value and knowing the last value written
+ * to it. If you leave n_latch initialized to zero, that last written
+ * value is presumed to be all ones (as if the chip were just reset).
+ */
+struct pcf857x_platform_data {
+ unsigned gpio_base;
+ unsigned n_latch;
+
+ int (*setup)(struct i2c_client *client,
+ int gpio, unsigned ngpio,
+ void *context);
+ int (*teardown)(struct i2c_client *client,
+ int gpio, unsigned ngpio,
+ void *context);
+ void *context;
+};
+
+#endif /* __LINUX_PCF857X_H */
diff --git a/include/linux/i2c/pmbus.h b/include/linux/i2c/pmbus.h
new file mode 100644
index 000000000..ee3c2aba2
--- /dev/null
+++ b/include/linux/i2c/pmbus.h
@@ -0,0 +1,49 @@
+/*
+ * Hardware monitoring driver for PMBus devices
+ *
+ * Copyright (c) 2010, 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _PMBUS_H_
+#define _PMBUS_H_
+
+/* flags */
+
+/*
+ * PMBUS_SKIP_STATUS_CHECK
+ *
+ * During register detection, skip checking the status register for
+ * communication or command errors.
+ *
+ * Some PMBus chips respond with valid data when trying to read an unsupported
+ * register. For such chips, checking the status register is mandatory when
+ * trying to determine if a chip register exists or not.
+ * Other PMBus chips don't support the STATUS_CML register, or report
+ * communication errors for no explicable reason. For such chips, checking
+ * the status register must be disabled.
+ */
+#define PMBUS_SKIP_STATUS_CHECK (1 << 0)
+
+struct pmbus_platform_data {
+ u32 flags; /* Device specific flags */
+
+ /* regulator support */
+ int num_regulators;
+ struct regulator_init_data *reg_init_data;
+};
+
+#endif /* _PMBUS_H_ */
diff --git a/include/linux/i2c/pxa-i2c.h b/include/linux/i2c/pxa-i2c.h
new file mode 100644
index 000000000..53aab243c
--- /dev/null
+++ b/include/linux/i2c/pxa-i2c.h
@@ -0,0 +1,85 @@
+/*
+ * i2c_pxa.h
+ *
+ * Copyright (C) 2002 Intrinsyc Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef _I2C_PXA_H_
+#define _I2C_PXA_H_
+
+#if 0
+#define DEF_TIMEOUT 3
+#else
+/* need a longer timeout if we're dealing with the fact we may well be
+ * looking at a multi-master environment
+*/
+#define DEF_TIMEOUT 32
+#endif
+
+#define BUS_ERROR (-EREMOTEIO)
+#define XFER_NAKED (-ECONNREFUSED)
+#define I2C_RETRY (-2000) /* an error has occurred retry transmit */
+
+/* ICR initialize bit values
+*
+* 15. FM 0 (100 Khz operation)
+* 14. UR 0 (No unit reset)
+* 13. SADIE 0 (Disables the unit from interrupting on slave addresses
+* matching its slave address)
+* 12. ALDIE 0 (Disables the unit from interrupt when it loses arbitration
+* in master mode)
+* 11. SSDIE 0 (Disables interrupts from a slave stop detected, in slave mode)
+* 10. BEIE 1 (Enable interrupts from detected bus errors, no ACK sent)
+* 9. IRFIE 1 (Enable interrupts from full buffer received)
+* 8. ITEIE 1 (Enables the I2C unit to interrupt when transmit buffer empty)
+* 7. GCD 1 (Disables i2c unit response to general call messages as a slave)
+* 6. IUE 0 (Disable unit until we change settings)
+* 5. SCLE 1 (Enables the i2c clock output for master mode (drives SCL)
+* 4. MA 0 (Only send stop with the ICR stop bit)
+* 3. TB 0 (We are not transmitting a byte initially)
+* 2. ACKNAK 0 (Send an ACK after the unit receives a byte)
+* 1. STOP 0 (Do not send a STOP)
+* 0. START 0 (Do not send a START)
+*
+*/
+#define I2C_ICR_INIT (ICR_BEIE | ICR_IRFIE | ICR_ITEIE | ICR_GCD | ICR_SCLE)
+
+/* I2C status register init values
+ *
+ * 10. BED 1 (Clear bus error detected)
+ * 9. SAD 1 (Clear slave address detected)
+ * 7. IRF 1 (Clear IDBR Receive Full)
+ * 6. ITE 1 (Clear IDBR Transmit Empty)
+ * 5. ALD 1 (Clear Arbitration Loss Detected)
+ * 4. SSD 1 (Clear Slave Stop Detected)
+ */
+#define I2C_ISR_INIT 0x7FF /* status register init */
+
+struct i2c_slave_client;
+
+struct i2c_pxa_platform_data {
+ unsigned int slave_addr;
+ struct i2c_slave_client *slave;
+ unsigned int class;
+ unsigned int use_pio :1;
+ unsigned int fast_mode :1;
+ unsigned int high_mode:1;
+ unsigned char master_code;
+ unsigned long rate;
+};
+
+extern void pxa_set_i2c_info(struct i2c_pxa_platform_data *info);
+
+#ifdef CONFIG_PXA27x
+extern void pxa27x_set_i2c_power_info(struct i2c_pxa_platform_data *info);
+#endif
+
+#ifdef CONFIG_PXA3xx
+extern void pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info);
+#endif
+
+#endif
diff --git a/include/linux/i2c/sx150x.h b/include/linux/i2c/sx150x.h
new file mode 100644
index 000000000..52baa79d6
--- /dev/null
+++ b/include/linux/i2c/sx150x.h
@@ -0,0 +1,82 @@
+/*
+ * Driver for the Semtech SX150x I2C GPIO Expanders
+ *
+ * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+#ifndef __LINUX_I2C_SX150X_H
+#define __LINUX_I2C_SX150X_H
+
+/**
+ * struct sx150x_platform_data - config data for SX150x driver
+ * @gpio_base: The index number of the first GPIO assigned to this
+ * GPIO expander. The expander will create a block of
+ * consecutively numbered gpios beginning at the given base,
+ * with the size of the block depending on the model of the
+ * expander chip.
+ * @oscio_is_gpo: If set to true, the driver will configure OSCIO as a GPO
+ * instead of as an oscillator, increasing the size of the
+ * GP(I)O pool created by this expander by one. The
+ * output-only GPO pin will be added at the end of the block.
+ * @io_pullup_ena: A bit-mask which enables or disables the pull-up resistor
+ * for each IO line in the expander. Setting the bit at
+ * position n will enable the pull-up for the IO at
+ * the corresponding offset. For chips with fewer than
+ * 16 IO pins, high-end bits are ignored.
+ * @io_pulldn_ena: A bit-mask which enables-or disables the pull-down
+ * resistor for each IO line in the expander. Setting the
+ * bit at position n will enable the pull-down for the IO at
+ * the corresponding offset. For chips with fewer than
+ * 16 IO pins, high-end bits are ignored.
+ * @io_open_drain_ena: A bit-mask which enables-or disables open-drain
+ * operation for each IO line in the expander. Setting the
+ * bit at position n enables open-drain operation for
+ * the IO at the corresponding offset. Clearing the bit
+ * enables regular push-pull operation for that IO.
+ * For chips with fewer than 16 IO pins, high-end bits
+ * are ignored.
+ * @io_polarity: A bit-mask which enables polarity inversion for each IO line
+ * in the expander. Setting the bit at position n inverts
+ * the polarity of that IO line, while clearing it results
+ * in normal polarity. For chips with fewer than 16 IO pins,
+ * high-end bits are ignored.
+ * @irq_summary: The 'summary IRQ' line to which the GPIO expander's INT line
+ * is connected, via which it reports interrupt events
+ * across all GPIO lines. This must be a real,
+ * pre-existing IRQ line.
+ * Setting this value < 0 disables the irq_chip functionality
+ * of the driver.
+ * @irq_base: The first 'virtual IRQ' line at which our block of GPIO-based
+ * IRQ lines will appear. Similarly to gpio_base, the expander
+ * will create a block of irqs beginning at this number.
+ * This value is ignored if irq_summary is < 0.
+ * @reset_during_probe: If set to true, the driver will trigger a full
+ * reset of the chip at the beginning of the probe
+ * in order to place it in a known state.
+ */
+struct sx150x_platform_data {
+ unsigned gpio_base;
+ bool oscio_is_gpo;
+ u16 io_pullup_ena;
+ u16 io_pulldn_ena;
+ u16 io_open_drain_ena;
+ u16 io_polarity;
+ int irq_summary;
+ unsigned irq_base;
+ bool reset_during_probe;
+};
+
+#endif /* __LINUX_I2C_SX150X_H */
diff --git a/include/linux/i2c/tc35876x.h b/include/linux/i2c/tc35876x.h
new file mode 100644
index 000000000..cd6a51c71
--- /dev/null
+++ b/include/linux/i2c/tc35876x.h
@@ -0,0 +1,11 @@
+
+#ifndef _TC35876X_H
+#define _TC35876X_H
+
+struct tc35876x_platform_data {
+ int gpio_bridge_reset;
+ int gpio_panel_bl_en;
+ int gpio_panel_vadd;
+};
+
+#endif /* _TC35876X_H */
diff --git a/include/linux/i2c/tps65010.h b/include/linux/i2c/tps65010.h
new file mode 100644
index 000000000..08aa92278
--- /dev/null
+++ b/include/linux/i2c/tps65010.h
@@ -0,0 +1,205 @@
+/* linux/i2c/tps65010.h
+ *
+ * Functions to access TPS65010 power management device.
+ *
+ * Copyright (C) 2004 Dirk Behme <dirk.behme@de.bosch.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_I2C_TPS65010_H
+#define __LINUX_I2C_TPS65010_H
+
+/*
+ * ----------------------------------------------------------------------------
+ * Registers, all 8 bits
+ * ----------------------------------------------------------------------------
+ */
+
+#define TPS_CHGSTATUS 0x01
+# define TPS_CHG_USB (1 << 7)
+# define TPS_CHG_AC (1 << 6)
+# define TPS_CHG_THERM (1 << 5)
+# define TPS_CHG_TERM (1 << 4)
+# define TPS_CHG_TAPER_TMO (1 << 3)
+# define TPS_CHG_CHG_TMO (1 << 2)
+# define TPS_CHG_PRECHG_TMO (1 << 1)
+# define TPS_CHG_TEMP_ERR (1 << 0)
+#define TPS_REGSTATUS 0x02
+# define TPS_REG_ONOFF (1 << 7)
+# define TPS_REG_COVER (1 << 6)
+# define TPS_REG_UVLO (1 << 5)
+# define TPS_REG_NO_CHG (1 << 4) /* tps65013 */
+# define TPS_REG_PG_LD02 (1 << 3)
+# define TPS_REG_PG_LD01 (1 << 2)
+# define TPS_REG_PG_MAIN (1 << 1)
+# define TPS_REG_PG_CORE (1 << 0)
+#define TPS_MASK1 0x03
+#define TPS_MASK2 0x04
+#define TPS_ACKINT1 0x05
+#define TPS_ACKINT2 0x06
+#define TPS_CHGCONFIG 0x07
+# define TPS_CHARGE_POR (1 << 7) /* 65010/65012 */
+# define TPS65013_AUA (1 << 7) /* 65011/65013 */
+# define TPS_CHARGE_RESET (1 << 6)
+# define TPS_CHARGE_FAST (1 << 5)
+# define TPS_CHARGE_CURRENT (3 << 3)
+# define TPS_VBUS_500MA (1 << 2)
+# define TPS_VBUS_CHARGING (1 << 1)
+# define TPS_CHARGE_ENABLE (1 << 0)
+#define TPS_LED1_ON 0x08
+#define TPS_LED1_PER 0x09
+#define TPS_LED2_ON 0x0a
+#define TPS_LED2_PER 0x0b
+#define TPS_VDCDC1 0x0c
+# define TPS_ENABLE_LP (1 << 3)
+#define TPS_VDCDC2 0x0d
+# define TPS_LP_COREOFF (1 << 7)
+# define TPS_VCORE_1_8V (7<<4)
+# define TPS_VCORE_1_5V (6 << 4)
+# define TPS_VCORE_1_4V (5 << 4)
+# define TPS_VCORE_1_3V (4 << 4)
+# define TPS_VCORE_1_2V (3 << 4)
+# define TPS_VCORE_1_1V (2 << 4)
+# define TPS_VCORE_1_0V (1 << 4)
+# define TPS_VCORE_0_85V (0 << 4)
+# define TPS_VCORE_LP_1_2V (3 << 2)
+# define TPS_VCORE_LP_1_1V (2 << 2)
+# define TPS_VCORE_LP_1_0V (1 << 2)
+# define TPS_VCORE_LP_0_85V (0 << 2)
+# define TPS_VIB (1 << 1)
+# define TPS_VCORE_DISCH (1 << 0)
+#define TPS_VREGS1 0x0e
+# define TPS_LDO2_ENABLE (1 << 7)
+# define TPS_LDO2_OFF (1 << 6)
+# define TPS_VLDO2_3_0V (3 << 4)
+# define TPS_VLDO2_2_75V (2 << 4)
+# define TPS_VLDO2_2_5V (1 << 4)
+# define TPS_VLDO2_1_8V (0 << 4)
+# define TPS_LDO1_ENABLE (1 << 3)
+# define TPS_LDO1_OFF (1 << 2)
+# define TPS_VLDO1_3_0V (3 << 0)
+# define TPS_VLDO1_2_75V (2 << 0)
+# define TPS_VLDO1_2_5V (1 << 0)
+# define TPS_VLDO1_ADJ (0 << 0)
+#define TPS_MASK3 0x0f
+#define TPS_DEFGPIO 0x10
+
+/*
+ * ----------------------------------------------------------------------------
+ * Macros used by exported functions
+ * ----------------------------------------------------------------------------
+ */
+
+#define LED1 1
+#define LED2 2
+#define OFF 0
+#define ON 1
+#define BLINK 2
+#define GPIO1 1
+#define GPIO2 2
+#define GPIO3 3
+#define GPIO4 4
+#define LOW 0
+#define HIGH 1
+
+/*
+ * ----------------------------------------------------------------------------
+ * Exported functions
+ * ----------------------------------------------------------------------------
+ */
+
+/* Draw from VBUS:
+ * 0 mA -- DON'T DRAW (might supply power instead)
+ * 100 mA -- usb unit load (slowest charge rate)
+ * 500 mA -- usb high power (fast battery charge)
+ */
+extern int tps65010_set_vbus_draw(unsigned mA);
+
+/* tps65010_set_gpio_out_value parameter:
+ * gpio: GPIO1, GPIO2, GPIO3 or GPIO4
+ * value: LOW or HIGH
+ */
+extern int tps65010_set_gpio_out_value(unsigned gpio, unsigned value);
+
+/* tps65010_set_led parameter:
+ * led: LED1 or LED2
+ * mode: ON, OFF or BLINK
+ */
+extern int tps65010_set_led(unsigned led, unsigned mode);
+
+/* tps65010_set_vib parameter:
+ * value: ON or OFF
+ */
+extern int tps65010_set_vib(unsigned value);
+
+/* tps65010_set_low_pwr parameter:
+ * mode: ON or OFF
+ */
+extern int tps65010_set_low_pwr(unsigned mode);
+
+/* tps65010_config_vregs1 parameter:
+ * value to be written to VREGS1 register
+ * Note: The complete register is written, set all bits you need
+ */
+extern int tps65010_config_vregs1(unsigned value);
+
+/* tps65013_set_low_pwr parameter:
+ * mode: ON or OFF
+ */
+extern int tps65013_set_low_pwr(unsigned mode);
+
+/* tps65010_set_vdcdc2
+ * value to be written to VDCDC2
+ */
+extern int tps65010_config_vdcdc2(unsigned value);
+
+struct i2c_client;
+
+/**
+ * struct tps65010_board - packages GPIO and LED lines
+ * @base: the GPIO number to assign to GPIO-1
+ * @outmask: bit (N-1) is set to allow GPIO-N to be used as an
+ * (open drain) output
+ * @setup: optional callback issued once the GPIOs are valid
+ * @teardown: optional callback issued before the GPIOs are invalidated
+ * @context: optional parameter passed to setup() and teardown()
+ *
+ * Board data may be used to package the GPIO (and LED) lines for use
+ * in by the generic GPIO and LED frameworks. The first four GPIOs
+ * starting at gpio_base are GPIO1..GPIO4. The next two are LED1/nPG
+ * and LED2 (with hardware blinking capability, not currently exposed).
+ *
+ * The @setup callback may be used with the kind of board-specific glue
+ * which hands the (now-valid) GPIOs to other drivers, or which puts
+ * devices in their initial states using these GPIOs.
+ */
+struct tps65010_board {
+ int base;
+ unsigned outmask;
+
+ int (*setup)(struct i2c_client *client, void *context);
+ int (*teardown)(struct i2c_client *client, void *context);
+ void *context;
+};
+
+#endif /* __LINUX_I2C_TPS65010_H */
+
diff --git a/include/linux/i2c/tsc2007.h b/include/linux/i2c/tsc2007.h
new file mode 100644
index 000000000..4f35b6ad3
--- /dev/null
+++ b/include/linux/i2c/tsc2007.h
@@ -0,0 +1,22 @@
+#ifndef __LINUX_I2C_TSC2007_H
+#define __LINUX_I2C_TSC2007_H
+
+/* linux/i2c/tsc2007.h */
+
+struct tsc2007_platform_data {
+ u16 model; /* 2007. */
+ u16 x_plate_ohms; /* must be non-zero value */
+ u16 max_rt; /* max. resistance above which samples are ignored */
+ unsigned long poll_period; /* time (in ms) between samples */
+ int fuzzx; /* fuzz factor for X, Y and pressure axes */
+ int fuzzy;
+ int fuzzz;
+
+ int (*get_pendown_state)(struct device *);
+ /* If needed, clear 2nd level interrupt source */
+ void (*clear_penirq)(void);
+ int (*init_platform_hw)(void);
+ void (*exit_platform_hw)(void);
+};
+
+#endif
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
new file mode 100644
index 000000000..0bc03f100
--- /dev/null
+++ b/include/linux/i2c/twl.h
@@ -0,0 +1,875 @@
+/*
+ * twl4030.h - header for TWL4030 PM and audio CODEC device
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * Based on tlv320aic23.c:
+ * Copyright (c) by Kai Svahn <kai.svahn@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __TWL_H_
+#define __TWL_H_
+
+#include <linux/types.h>
+#include <linux/input/matrix_keypad.h>
+
+/*
+ * Using the twl4030 core we address registers using a pair
+ * { module id, relative register offset }
+ * which that core then maps to the relevant
+ * { i2c slave, absolute register address }
+ *
+ * The module IDs are meaningful only to the twl4030 core code,
+ * which uses them as array indices to look up the first register
+ * address each module uses within a given i2c slave.
+ */
+
+/* Module IDs for similar functionalities found in twl4030/twl6030 */
+enum twl_module_ids {
+ TWL_MODULE_USB,
+ TWL_MODULE_PIH,
+ TWL_MODULE_MAIN_CHARGE,
+ TWL_MODULE_PM_MASTER,
+ TWL_MODULE_PM_RECEIVER,
+
+ TWL_MODULE_RTC,
+ TWL_MODULE_PWM,
+ TWL_MODULE_LED,
+ TWL_MODULE_SECURED_REG,
+
+ TWL_MODULE_LAST,
+};
+
+/* Modules only available in twl4030 series */
+enum twl4030_module_ids {
+ TWL4030_MODULE_AUDIO_VOICE = TWL_MODULE_LAST,
+ TWL4030_MODULE_GPIO,
+ TWL4030_MODULE_INTBR,
+ TWL4030_MODULE_TEST,
+ TWL4030_MODULE_KEYPAD,
+
+ TWL4030_MODULE_MADC,
+ TWL4030_MODULE_INTERRUPTS,
+ TWL4030_MODULE_PRECHARGE,
+ TWL4030_MODULE_BACKUP,
+ TWL4030_MODULE_INT,
+
+ TWL5031_MODULE_ACCESSORY,
+ TWL5031_MODULE_INTERRUPTS,
+
+ TWL4030_MODULE_LAST,
+};
+
+/* Modules only available in twl6030 series */
+enum twl6030_module_ids {
+ TWL6030_MODULE_ID0 = TWL_MODULE_LAST,
+ TWL6030_MODULE_ID1,
+ TWL6030_MODULE_ID2,
+ TWL6030_MODULE_GPADC,
+ TWL6030_MODULE_GASGAUGE,
+
+ TWL6030_MODULE_LAST,
+};
+
+/* Until the clients has been converted to use TWL_MODULE_LED */
+#define TWL4030_MODULE_LED TWL_MODULE_LED
+
+#define GPIO_INTR_OFFSET 0
+#define KEYPAD_INTR_OFFSET 1
+#define BCI_INTR_OFFSET 2
+#define MADC_INTR_OFFSET 3
+#define USB_INTR_OFFSET 4
+#define CHARGERFAULT_INTR_OFFSET 5
+#define BCI_PRES_INTR_OFFSET 9
+#define USB_PRES_INTR_OFFSET 10
+#define RTC_INTR_OFFSET 11
+
+/*
+ * Offset from TWL6030_IRQ_BASE / pdata->irq_base
+ */
+#define PWR_INTR_OFFSET 0
+#define HOTDIE_INTR_OFFSET 12
+#define SMPSLDO_INTR_OFFSET 13
+#define BATDETECT_INTR_OFFSET 14
+#define SIMDETECT_INTR_OFFSET 15
+#define MMCDETECT_INTR_OFFSET 16
+#define GASGAUGE_INTR_OFFSET 17
+#define USBOTG_INTR_OFFSET 4
+#define CHARGER_INTR_OFFSET 2
+#define RSV_INTR_OFFSET 0
+
+/* INT register offsets */
+#define REG_INT_STS_A 0x00
+#define REG_INT_STS_B 0x01
+#define REG_INT_STS_C 0x02
+
+#define REG_INT_MSK_LINE_A 0x03
+#define REG_INT_MSK_LINE_B 0x04
+#define REG_INT_MSK_LINE_C 0x05
+
+#define REG_INT_MSK_STS_A 0x06
+#define REG_INT_MSK_STS_B 0x07
+#define REG_INT_MSK_STS_C 0x08
+
+/* MASK INT REG GROUP A */
+#define TWL6030_PWR_INT_MASK 0x07
+#define TWL6030_RTC_INT_MASK 0x18
+#define TWL6030_HOTDIE_INT_MASK 0x20
+#define TWL6030_SMPSLDOA_INT_MASK 0xC0
+
+/* MASK INT REG GROUP B */
+#define TWL6030_SMPSLDOB_INT_MASK 0x01
+#define TWL6030_BATDETECT_INT_MASK 0x02
+#define TWL6030_SIMDETECT_INT_MASK 0x04
+#define TWL6030_MMCDETECT_INT_MASK 0x08
+#define TWL6030_GPADC_INT_MASK 0x60
+#define TWL6030_GASGAUGE_INT_MASK 0x80
+
+/* MASK INT REG GROUP C */
+#define TWL6030_USBOTG_INT_MASK 0x0F
+#define TWL6030_CHARGER_CTRL_INT_MASK 0x10
+#define TWL6030_CHARGER_FAULT_INT_MASK 0x60
+
+#define TWL6030_MMCCTRL 0xEE
+#define VMMC_AUTO_OFF (0x1 << 3)
+#define SW_FC (0x1 << 2)
+#define STS_MMC 0x1
+
+#define TWL6030_CFG_INPUT_PUPD3 0xF2
+#define MMC_PU (0x1 << 3)
+#define MMC_PD (0x1 << 2)
+
+#define TWL_SIL_TYPE(rev) ((rev) & 0x00FFFFFF)
+#define TWL_SIL_REV(rev) ((rev) >> 24)
+#define TWL_SIL_5030 0x09002F
+#define TWL5030_REV_1_0 0x00
+#define TWL5030_REV_1_1 0x10
+#define TWL5030_REV_1_2 0x30
+
+#define TWL4030_CLASS_ID 0x4030
+#define TWL6030_CLASS_ID 0x6030
+unsigned int twl_rev(void);
+#define GET_TWL_REV (twl_rev())
+#define TWL_CLASS_IS(class, id) \
+static inline int twl_class_is_ ##class(void) \
+{ \
+ return ((id) == (GET_TWL_REV)) ? 1 : 0; \
+}
+
+TWL_CLASS_IS(4030, TWL4030_CLASS_ID)
+TWL_CLASS_IS(6030, TWL6030_CLASS_ID)
+
+/* Set the regcache bypass for the regmap associated with the nodule */
+int twl_set_regcache_bypass(u8 mod_no, bool enable);
+
+/*
+ * Read and write several 8-bit registers at once.
+ */
+int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
+int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
+
+/*
+ * Read and write single 8-bit registers
+ */
+static inline int twl_i2c_write_u8(u8 mod_no, u8 val, u8 reg) {
+ return twl_i2c_write(mod_no, &val, reg, 1);
+}
+
+static inline int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg) {
+ return twl_i2c_read(mod_no, val, reg, 1);
+}
+
+static inline int twl_i2c_write_u16(u8 mod_no, u16 val, u8 reg) {
+ val = cpu_to_le16(val);
+ return twl_i2c_write(mod_no, (u8*) &val, reg, 2);
+}
+
+static inline int twl_i2c_read_u16(u8 mod_no, u16 *val, u8 reg) {
+ int ret;
+ ret = twl_i2c_read(mod_no, (u8*) val, reg, 2);
+ *val = le16_to_cpu(*val);
+ return ret;
+}
+
+int twl_get_type(void);
+int twl_get_version(void);
+int twl_get_hfclk_rate(void);
+
+int twl6030_interrupt_unmask(u8 bit_mask, u8 offset);
+int twl6030_interrupt_mask(u8 bit_mask, u8 offset);
+
+/* Card detect Configuration for MMC1 Controller on OMAP4 */
+#ifdef CONFIG_TWL4030_CORE
+int twl6030_mmc_card_detect_config(void);
+#else
+static inline int twl6030_mmc_card_detect_config(void)
+{
+ pr_debug("twl6030_mmc_card_detect_config not supported\n");
+ return 0;
+}
+#endif
+
+/* MMC1 Controller on OMAP4 uses Phoenix irq for Card detect */
+#ifdef CONFIG_TWL4030_CORE
+int twl6030_mmc_card_detect(struct device *dev, int slot);
+#else
+static inline int twl6030_mmc_card_detect(struct device *dev, int slot)
+{
+ pr_debug("Call back twl6030_mmc_card_detect not supported\n");
+ return -EIO;
+}
+#endif
+/*----------------------------------------------------------------------*/
+
+/*
+ * NOTE: at up to 1024 registers, this is a big chip.
+ *
+ * Avoid putting register declarations in this file, instead of into
+ * a driver-private file, unless some of the registers in a block
+ * need to be shared with other drivers. One example is blocks that
+ * have Secondary IRQ Handler (SIH) registers.
+ */
+
+#define TWL4030_SIH_CTRL_EXCLEN_MASK BIT(0)
+#define TWL4030_SIH_CTRL_PENDDIS_MASK BIT(1)
+#define TWL4030_SIH_CTRL_COR_MASK BIT(2)
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * GPIO Block Register offsets (use TWL4030_MODULE_GPIO)
+ */
+
+#define REG_GPIODATAIN1 0x0
+#define REG_GPIODATAIN2 0x1
+#define REG_GPIODATAIN3 0x2
+#define REG_GPIODATADIR1 0x3
+#define REG_GPIODATADIR2 0x4
+#define REG_GPIODATADIR3 0x5
+#define REG_GPIODATAOUT1 0x6
+#define REG_GPIODATAOUT2 0x7
+#define REG_GPIODATAOUT3 0x8
+#define REG_CLEARGPIODATAOUT1 0x9
+#define REG_CLEARGPIODATAOUT2 0xA
+#define REG_CLEARGPIODATAOUT3 0xB
+#define REG_SETGPIODATAOUT1 0xC
+#define REG_SETGPIODATAOUT2 0xD
+#define REG_SETGPIODATAOUT3 0xE
+#define REG_GPIO_DEBEN1 0xF
+#define REG_GPIO_DEBEN2 0x10
+#define REG_GPIO_DEBEN3 0x11
+#define REG_GPIO_CTRL 0x12
+#define REG_GPIOPUPDCTR1 0x13
+#define REG_GPIOPUPDCTR2 0x14
+#define REG_GPIOPUPDCTR3 0x15
+#define REG_GPIOPUPDCTR4 0x16
+#define REG_GPIOPUPDCTR5 0x17
+#define REG_GPIO_ISR1A 0x19
+#define REG_GPIO_ISR2A 0x1A
+#define REG_GPIO_ISR3A 0x1B
+#define REG_GPIO_IMR1A 0x1C
+#define REG_GPIO_IMR2A 0x1D
+#define REG_GPIO_IMR3A 0x1E
+#define REG_GPIO_ISR1B 0x1F
+#define REG_GPIO_ISR2B 0x20
+#define REG_GPIO_ISR3B 0x21
+#define REG_GPIO_IMR1B 0x22
+#define REG_GPIO_IMR2B 0x23
+#define REG_GPIO_IMR3B 0x24
+#define REG_GPIO_EDR1 0x28
+#define REG_GPIO_EDR2 0x29
+#define REG_GPIO_EDR3 0x2A
+#define REG_GPIO_EDR4 0x2B
+#define REG_GPIO_EDR5 0x2C
+#define REG_GPIO_SIH_CTRL 0x2D
+
+/* Up to 18 signals are available as GPIOs, when their
+ * pins are not assigned to another use (such as ULPI/USB).
+ */
+#define TWL4030_GPIO_MAX 18
+
+/*----------------------------------------------------------------------*/
+
+/*Interface Bit Register (INTBR) offsets
+ *(Use TWL_4030_MODULE_INTBR)
+ */
+
+#define REG_IDCODE_7_0 0x00
+#define REG_IDCODE_15_8 0x01
+#define REG_IDCODE_16_23 0x02
+#define REG_IDCODE_31_24 0x03
+#define REG_GPPUPDCTR1 0x0F
+#define REG_UNLOCK_TEST_REG 0x12
+
+/*I2C1 and I2C4(SR) SDA/SCL pull-up control bits */
+
+#define I2C_SCL_CTRL_PU BIT(0)
+#define I2C_SDA_CTRL_PU BIT(2)
+#define SR_I2C_SCL_CTRL_PU BIT(4)
+#define SR_I2C_SDA_CTRL_PU BIT(6)
+
+#define TWL_EEPROM_R_UNLOCK 0x49
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Keypad register offsets (use TWL4030_MODULE_KEYPAD)
+ * ... SIH/interrupt only
+ */
+
+#define TWL4030_KEYPAD_KEYP_ISR1 0x11
+#define TWL4030_KEYPAD_KEYP_IMR1 0x12
+#define TWL4030_KEYPAD_KEYP_ISR2 0x13
+#define TWL4030_KEYPAD_KEYP_IMR2 0x14
+#define TWL4030_KEYPAD_KEYP_SIR 0x15 /* test register */
+#define TWL4030_KEYPAD_KEYP_EDR 0x16
+#define TWL4030_KEYPAD_KEYP_SIH_CTRL 0x17
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Multichannel ADC register offsets (use TWL4030_MODULE_MADC)
+ * ... SIH/interrupt only
+ */
+
+#define TWL4030_MADC_ISR1 0x61
+#define TWL4030_MADC_IMR1 0x62
+#define TWL4030_MADC_ISR2 0x63
+#define TWL4030_MADC_IMR2 0x64
+#define TWL4030_MADC_SIR 0x65 /* test register */
+#define TWL4030_MADC_EDR 0x66
+#define TWL4030_MADC_SIH_CTRL 0x67
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Battery charger register offsets (use TWL4030_MODULE_INTERRUPTS)
+ */
+
+#define TWL4030_INTERRUPTS_BCIISR1A 0x0
+#define TWL4030_INTERRUPTS_BCIISR2A 0x1
+#define TWL4030_INTERRUPTS_BCIIMR1A 0x2
+#define TWL4030_INTERRUPTS_BCIIMR2A 0x3
+#define TWL4030_INTERRUPTS_BCIISR1B 0x4
+#define TWL4030_INTERRUPTS_BCIISR2B 0x5
+#define TWL4030_INTERRUPTS_BCIIMR1B 0x6
+#define TWL4030_INTERRUPTS_BCIIMR2B 0x7
+#define TWL4030_INTERRUPTS_BCISIR1 0x8 /* test register */
+#define TWL4030_INTERRUPTS_BCISIR2 0x9 /* test register */
+#define TWL4030_INTERRUPTS_BCIEDR1 0xa
+#define TWL4030_INTERRUPTS_BCIEDR2 0xb
+#define TWL4030_INTERRUPTS_BCIEDR3 0xc
+#define TWL4030_INTERRUPTS_BCISIHCTRL 0xd
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Power Interrupt block register offsets (use TWL4030_MODULE_INT)
+ */
+
+#define TWL4030_INT_PWR_ISR1 0x0
+#define TWL4030_INT_PWR_IMR1 0x1
+#define TWL4030_INT_PWR_ISR2 0x2
+#define TWL4030_INT_PWR_IMR2 0x3
+#define TWL4030_INT_PWR_SIR 0x4 /* test register */
+#define TWL4030_INT_PWR_EDR1 0x5
+#define TWL4030_INT_PWR_EDR2 0x6
+#define TWL4030_INT_PWR_SIH_CTRL 0x7
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Accessory Interrupts
+ */
+#define TWL5031_ACIIMR_LSB 0x05
+#define TWL5031_ACIIMR_MSB 0x06
+#define TWL5031_ACIIDR_LSB 0x07
+#define TWL5031_ACIIDR_MSB 0x08
+#define TWL5031_ACCISR1 0x0F
+#define TWL5031_ACCIMR1 0x10
+#define TWL5031_ACCISR2 0x11
+#define TWL5031_ACCIMR2 0x12
+#define TWL5031_ACCSIR 0x13
+#define TWL5031_ACCEDR1 0x14
+#define TWL5031_ACCSIHCTRL 0x15
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Battery Charger Controller
+ */
+
+#define TWL5031_INTERRUPTS_BCIISR1 0x0
+#define TWL5031_INTERRUPTS_BCIIMR1 0x1
+#define TWL5031_INTERRUPTS_BCIISR2 0x2
+#define TWL5031_INTERRUPTS_BCIIMR2 0x3
+#define TWL5031_INTERRUPTS_BCISIR 0x4
+#define TWL5031_INTERRUPTS_BCIEDR1 0x5
+#define TWL5031_INTERRUPTS_BCIEDR2 0x6
+#define TWL5031_INTERRUPTS_BCISIHCTRL 0x7
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * PM Master module register offsets (use TWL4030_MODULE_PM_MASTER)
+ */
+
+#define TWL4030_PM_MASTER_CFG_P1_TRANSITION 0x00
+#define TWL4030_PM_MASTER_CFG_P2_TRANSITION 0x01
+#define TWL4030_PM_MASTER_CFG_P3_TRANSITION 0x02
+#define TWL4030_PM_MASTER_CFG_P123_TRANSITION 0x03
+#define TWL4030_PM_MASTER_STS_BOOT 0x04
+#define TWL4030_PM_MASTER_CFG_BOOT 0x05
+#define TWL4030_PM_MASTER_SHUNDAN 0x06
+#define TWL4030_PM_MASTER_BOOT_BCI 0x07
+#define TWL4030_PM_MASTER_CFG_PWRANA1 0x08
+#define TWL4030_PM_MASTER_CFG_PWRANA2 0x09
+#define TWL4030_PM_MASTER_BACKUP_MISC_STS 0x0b
+#define TWL4030_PM_MASTER_BACKUP_MISC_CFG 0x0c
+#define TWL4030_PM_MASTER_BACKUP_MISC_TST 0x0d
+#define TWL4030_PM_MASTER_PROTECT_KEY 0x0e
+#define TWL4030_PM_MASTER_STS_HW_CONDITIONS 0x0f
+#define TWL4030_PM_MASTER_P1_SW_EVENTS 0x10
+#define TWL4030_PM_MASTER_P2_SW_EVENTS 0x11
+#define TWL4030_PM_MASTER_P3_SW_EVENTS 0x12
+#define TWL4030_PM_MASTER_STS_P123_STATE 0x13
+#define TWL4030_PM_MASTER_PB_CFG 0x14
+#define TWL4030_PM_MASTER_PB_WORD_MSB 0x15
+#define TWL4030_PM_MASTER_PB_WORD_LSB 0x16
+#define TWL4030_PM_MASTER_SEQ_ADD_W2P 0x1c
+#define TWL4030_PM_MASTER_SEQ_ADD_P2A 0x1d
+#define TWL4030_PM_MASTER_SEQ_ADD_A2W 0x1e
+#define TWL4030_PM_MASTER_SEQ_ADD_A2S 0x1f
+#define TWL4030_PM_MASTER_SEQ_ADD_S2A12 0x20
+#define TWL4030_PM_MASTER_SEQ_ADD_S2A3 0x21
+#define TWL4030_PM_MASTER_SEQ_ADD_WARM 0x22
+#define TWL4030_PM_MASTER_MEMORY_ADDRESS 0x23
+#define TWL4030_PM_MASTER_MEMORY_DATA 0x24
+
+#define TWL4030_PM_MASTER_KEY_CFG1 0xc0
+#define TWL4030_PM_MASTER_KEY_CFG2 0x0c
+
+#define TWL4030_PM_MASTER_KEY_TST1 0xe0
+#define TWL4030_PM_MASTER_KEY_TST2 0x0e
+
+#define TWL4030_PM_MASTER_GLOBAL_TST 0xb6
+
+/*----------------------------------------------------------------------*/
+
+/* Power bus message definitions */
+
+/* The TWL4030/5030 splits its power-management resources (the various
+ * regulators, clock and reset lines) into 3 processor groups - P1, P2 and
+ * P3. These groups can then be configured to transition between sleep, wait-on
+ * and active states by sending messages to the power bus. See Section 5.4.2
+ * Power Resources of TWL4030 TRM
+ */
+
+/* Processor groups */
+#define DEV_GRP_NULL 0x0
+#define DEV_GRP_P1 0x1 /* P1: all OMAP devices */
+#define DEV_GRP_P2 0x2 /* P2: all Modem devices */
+#define DEV_GRP_P3 0x4 /* P3: all peripheral devices */
+
+/* Resource groups */
+#define RES_GRP_RES 0x0 /* Reserved */
+#define RES_GRP_PP 0x1 /* Power providers */
+#define RES_GRP_RC 0x2 /* Reset and control */
+#define RES_GRP_PP_RC 0x3
+#define RES_GRP_PR 0x4 /* Power references */
+#define RES_GRP_PP_PR 0x5
+#define RES_GRP_RC_PR 0x6
+#define RES_GRP_ALL 0x7 /* All resource groups */
+
+#define RES_TYPE2_R0 0x0
+#define RES_TYPE2_R1 0x1
+#define RES_TYPE2_R2 0x2
+
+#define RES_TYPE_R0 0x0
+#define RES_TYPE_ALL 0x7
+
+/* Resource states */
+#define RES_STATE_WRST 0xF
+#define RES_STATE_ACTIVE 0xE
+#define RES_STATE_SLEEP 0x8
+#define RES_STATE_OFF 0x0
+
+/* Power resources */
+
+/* Power providers */
+#define RES_VAUX1 1
+#define RES_VAUX2 2
+#define RES_VAUX3 3
+#define RES_VAUX4 4
+#define RES_VMMC1 5
+#define RES_VMMC2 6
+#define RES_VPLL1 7
+#define RES_VPLL2 8
+#define RES_VSIM 9
+#define RES_VDAC 10
+#define RES_VINTANA1 11
+#define RES_VINTANA2 12
+#define RES_VINTDIG 13
+#define RES_VIO 14
+#define RES_VDD1 15
+#define RES_VDD2 16
+#define RES_VUSB_1V5 17
+#define RES_VUSB_1V8 18
+#define RES_VUSB_3V1 19
+#define RES_VUSBCP 20
+#define RES_REGEN 21
+/* Reset and control */
+#define RES_NRES_PWRON 22
+#define RES_CLKEN 23
+#define RES_SYSEN 24
+#define RES_HFCLKOUT 25
+#define RES_32KCLKOUT 26
+#define RES_RESET 27
+/* Power Reference */
+#define RES_MAIN_REF 28
+
+#define TOTAL_RESOURCES 28
+/*
+ * Power Bus Message Format ... these can be sent individually by Linux,
+ * but are usually part of downloaded scripts that are run when various
+ * power events are triggered.
+ *
+ * Broadcast Message (16 Bits):
+ * DEV_GRP[15:13] MT[12] RES_GRP[11:9] RES_TYPE2[8:7] RES_TYPE[6:4]
+ * RES_STATE[3:0]
+ *
+ * Singular Message (16 Bits):
+ * DEV_GRP[15:13] MT[12] RES_ID[11:4] RES_STATE[3:0]
+ */
+
+#define MSG_BROADCAST(devgrp, grp, type, type2, state) \
+ ( (devgrp) << 13 | 1 << 12 | (grp) << 9 | (type2) << 7 \
+ | (type) << 4 | (state))
+
+#define MSG_SINGULAR(devgrp, id, state) \
+ ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state))
+
+#define MSG_BROADCAST_ALL(devgrp, state) \
+ ((devgrp) << 5 | (state))
+
+#define MSG_BROADCAST_REF MSG_BROADCAST_ALL
+#define MSG_BROADCAST_PROV MSG_BROADCAST_ALL
+#define MSG_BROADCAST__CLK_RST MSG_BROADCAST_ALL
+/*----------------------------------------------------------------------*/
+
+struct twl4030_clock_init_data {
+ bool ck32k_lowpwr_enable;
+};
+
+struct twl4030_bci_platform_data {
+ int *battery_tmp_tbl;
+ unsigned int tblsize;
+ int bb_uvolt; /* voltage to charge backup battery */
+ int bb_uamp; /* current for backup battery charging */
+};
+
+/* TWL4030_GPIO_MAX (18) GPIOs, with interrupts */
+struct twl4030_gpio_platform_data {
+ /* package the two LED signals as output-only GPIOs? */
+ bool use_leds;
+
+ /* gpio-n should control VMMC(n+1) if BIT(n) in mmc_cd is set */
+ u8 mmc_cd;
+
+ /* if BIT(N) is set, or VMMC(n+1) is linked, debounce GPIO-N */
+ u32 debounce;
+
+ /* For gpio-N, bit (1 << N) in "pullups" is set if that pullup
+ * should be enabled. Else, if that bit is set in "pulldowns",
+ * that pulldown is enabled. Don't waste power by letting any
+ * digital inputs float...
+ */
+ u32 pullups;
+ u32 pulldowns;
+
+ int (*setup)(struct device *dev,
+ unsigned gpio, unsigned ngpio);
+ int (*teardown)(struct device *dev,
+ unsigned gpio, unsigned ngpio);
+};
+
+struct twl4030_madc_platform_data {
+ int irq_line;
+};
+
+/* Boards have unique mappings of {row, col} --> keycode.
+ * Column and row are 8 bits each, but range only from 0..7.
+ * a PERSISTENT_KEY is "always on" and never reported.
+ */
+#define PERSISTENT_KEY(r, c) KEY((r), (c), KEY_RESERVED)
+
+struct twl4030_keypad_data {
+ const struct matrix_keymap_data *keymap_data;
+ unsigned rows;
+ unsigned cols;
+ bool rep;
+};
+
+enum twl4030_usb_mode {
+ T2_USB_MODE_ULPI = 1,
+ T2_USB_MODE_CEA2011_3PIN = 2,
+};
+
+struct twl4030_usb_data {
+ enum twl4030_usb_mode usb_mode;
+ unsigned long features;
+
+ int (*phy_init)(struct device *dev);
+ int (*phy_exit)(struct device *dev);
+ /* Power on/off the PHY */
+ int (*phy_power)(struct device *dev, int iD, int on);
+ /* enable/disable phy clocks */
+ int (*phy_set_clock)(struct device *dev, int on);
+ /* suspend/resume of phy */
+ int (*phy_suspend)(struct device *dev, int suspend);
+};
+
+struct twl4030_ins {
+ u16 pmb_message;
+ u8 delay;
+};
+
+struct twl4030_script {
+ struct twl4030_ins *script;
+ unsigned size;
+ u8 flags;
+#define TWL4030_WRST_SCRIPT (1<<0)
+#define TWL4030_WAKEUP12_SCRIPT (1<<1)
+#define TWL4030_WAKEUP3_SCRIPT (1<<2)
+#define TWL4030_SLEEP_SCRIPT (1<<3)
+};
+
+struct twl4030_resconfig {
+ u8 resource;
+ u8 devgroup; /* Processor group that Power resource belongs to */
+ u8 type; /* Power resource addressed, 6 / broadcast message */
+ u8 type2; /* Power resource addressed, 3 / broadcast message */
+ u8 remap_off; /* off state remapping */
+ u8 remap_sleep; /* sleep state remapping */
+};
+
+struct twl4030_power_data {
+ struct twl4030_script **scripts;
+ unsigned num;
+ struct twl4030_resconfig *resource_config;
+ struct twl4030_resconfig *board_config;
+#define TWL4030_RESCONFIG_UNDEF ((u8)-1)
+ bool use_poweroff; /* Board is wired for TWL poweroff */
+};
+
+extern int twl4030_remove_script(u8 flags);
+extern void twl4030_power_off(void);
+
+struct twl4030_codec_data {
+ unsigned int digimic_delay; /* in ms */
+ unsigned int ramp_delay_value;
+ unsigned int offset_cncl_path;
+ unsigned int hs_extmute:1;
+ int hs_extmute_gpio;
+};
+
+struct twl4030_vibra_data {
+ unsigned int coexist;
+};
+
+struct twl4030_audio_data {
+ unsigned int audio_mclk;
+ struct twl4030_codec_data *codec;
+ struct twl4030_vibra_data *vibra;
+
+ /* twl6040 */
+ int audpwron_gpio; /* audio power-on gpio */
+ int naudint_irq; /* audio interrupt */
+ unsigned int irq_base;
+};
+
+struct twl4030_platform_data {
+ struct twl4030_clock_init_data *clock;
+ struct twl4030_bci_platform_data *bci;
+ struct twl4030_gpio_platform_data *gpio;
+ struct twl4030_madc_platform_data *madc;
+ struct twl4030_keypad_data *keypad;
+ struct twl4030_usb_data *usb;
+ struct twl4030_power_data *power;
+ struct twl4030_audio_data *audio;
+
+ /* Common LDO regulators for TWL4030/TWL6030 */
+ struct regulator_init_data *vdac;
+ struct regulator_init_data *vaux1;
+ struct regulator_init_data *vaux2;
+ struct regulator_init_data *vaux3;
+ struct regulator_init_data *vdd1;
+ struct regulator_init_data *vdd2;
+ struct regulator_init_data *vdd3;
+ /* TWL4030 LDO regulators */
+ struct regulator_init_data *vpll1;
+ struct regulator_init_data *vpll2;
+ struct regulator_init_data *vmmc1;
+ struct regulator_init_data *vmmc2;
+ struct regulator_init_data *vsim;
+ struct regulator_init_data *vaux4;
+ struct regulator_init_data *vio;
+ struct regulator_init_data *vintana1;
+ struct regulator_init_data *vintana2;
+ struct regulator_init_data *vintdig;
+ /* TWL6030 LDO regulators */
+ struct regulator_init_data *vmmc;
+ struct regulator_init_data *vpp;
+ struct regulator_init_data *vusim;
+ struct regulator_init_data *vana;
+ struct regulator_init_data *vcxio;
+ struct regulator_init_data *vusb;
+ struct regulator_init_data *clk32kg;
+ struct regulator_init_data *v1v8;
+ struct regulator_init_data *v2v1;
+ /* TWL6032 LDO regulators */
+ struct regulator_init_data *ldo1;
+ struct regulator_init_data *ldo2;
+ struct regulator_init_data *ldo3;
+ struct regulator_init_data *ldo4;
+ struct regulator_init_data *ldo5;
+ struct regulator_init_data *ldo6;
+ struct regulator_init_data *ldo7;
+ struct regulator_init_data *ldoln;
+ struct regulator_init_data *ldousb;
+ /* TWL6032 DCDC regulators */
+ struct regulator_init_data *smps3;
+ struct regulator_init_data *smps4;
+ struct regulator_init_data *vio6025;
+};
+
+struct twl_regulator_driver_data {
+ int (*set_voltage)(void *data, int target_uV);
+ int (*get_voltage)(void *data);
+ void *data;
+ unsigned long features;
+};
+/* chip-specific feature flags, for twl_regulator_driver_data.features */
+#define TWL4030_VAUX2 BIT(0) /* pre-5030 voltage ranges */
+#define TPS_SUBSET BIT(1) /* tps659[23]0 have fewer LDOs */
+#define TWL5031 BIT(2) /* twl5031 has different registers */
+#define TWL6030_CLASS BIT(3) /* TWL6030 class */
+#define TWL6032_SUBCLASS BIT(4) /* TWL6032 has changed registers */
+#define TWL4030_ALLOW_UNSUPPORTED BIT(5) /* Some voltages are possible
+ * but not officially supported.
+ * This flag is necessary to
+ * enable them.
+ */
+
+/*----------------------------------------------------------------------*/
+
+int twl4030_sih_setup(struct device *dev, int module, int irq_base);
+
+/* Offsets to Power Registers */
+#define TWL4030_VDAC_DEV_GRP 0x3B
+#define TWL4030_VDAC_DEDICATED 0x3E
+#define TWL4030_VAUX1_DEV_GRP 0x17
+#define TWL4030_VAUX1_DEDICATED 0x1A
+#define TWL4030_VAUX2_DEV_GRP 0x1B
+#define TWL4030_VAUX2_DEDICATED 0x1E
+#define TWL4030_VAUX3_DEV_GRP 0x1F
+#define TWL4030_VAUX3_DEDICATED 0x22
+
+static inline int twl4030charger_usb_en(int enable) { return 0; }
+
+/*----------------------------------------------------------------------*/
+
+/* Linux-specific regulator identifiers ... for now, we only support
+ * the LDOs, and leave the three buck converters alone. VDD1 and VDD2
+ * need to tie into hardware based voltage scaling (cpufreq etc), while
+ * VIO is generally fixed.
+ */
+
+/* TWL4030 SMPS/LDO's */
+/* EXTERNAL dc-to-dc buck converters */
+#define TWL4030_REG_VDD1 0
+#define TWL4030_REG_VDD2 1
+#define TWL4030_REG_VIO 2
+
+/* EXTERNAL LDOs */
+#define TWL4030_REG_VDAC 3
+#define TWL4030_REG_VPLL1 4
+#define TWL4030_REG_VPLL2 5 /* not on all chips */
+#define TWL4030_REG_VMMC1 6
+#define TWL4030_REG_VMMC2 7 /* not on all chips */
+#define TWL4030_REG_VSIM 8 /* not on all chips */
+#define TWL4030_REG_VAUX1 9 /* not on all chips */
+#define TWL4030_REG_VAUX2_4030 10 /* (twl4030-specific) */
+#define TWL4030_REG_VAUX2 11 /* (twl5030 and newer) */
+#define TWL4030_REG_VAUX3 12 /* not on all chips */
+#define TWL4030_REG_VAUX4 13 /* not on all chips */
+
+/* INTERNAL LDOs */
+#define TWL4030_REG_VINTANA1 14
+#define TWL4030_REG_VINTANA2 15
+#define TWL4030_REG_VINTDIG 16
+#define TWL4030_REG_VUSB1V5 17
+#define TWL4030_REG_VUSB1V8 18
+#define TWL4030_REG_VUSB3V1 19
+
+/* TWL6030 SMPS/LDO's */
+/* EXTERNAL dc-to-dc buck convertor controllable via SR */
+#define TWL6030_REG_VDD1 30
+#define TWL6030_REG_VDD2 31
+#define TWL6030_REG_VDD3 32
+
+/* Non SR compliant dc-to-dc buck convertors */
+#define TWL6030_REG_VMEM 33
+#define TWL6030_REG_V2V1 34
+#define TWL6030_REG_V1V29 35
+#define TWL6030_REG_V1V8 36
+
+/* EXTERNAL LDOs */
+#define TWL6030_REG_VAUX1_6030 37
+#define TWL6030_REG_VAUX2_6030 38
+#define TWL6030_REG_VAUX3_6030 39
+#define TWL6030_REG_VMMC 40
+#define TWL6030_REG_VPP 41
+#define TWL6030_REG_VUSIM 42
+#define TWL6030_REG_VANA 43
+#define TWL6030_REG_VCXIO 44
+#define TWL6030_REG_VDAC 45
+#define TWL6030_REG_VUSB 46
+
+/* INTERNAL LDOs */
+#define TWL6030_REG_VRTC 47
+#define TWL6030_REG_CLK32KG 48
+
+/* LDOs on 6025 have different names */
+#define TWL6032_REG_LDO2 49
+#define TWL6032_REG_LDO4 50
+#define TWL6032_REG_LDO3 51
+#define TWL6032_REG_LDO5 52
+#define TWL6032_REG_LDO1 53
+#define TWL6032_REG_LDO7 54
+#define TWL6032_REG_LDO6 55
+#define TWL6032_REG_LDOLN 56
+#define TWL6032_REG_LDOUSB 57
+
+/* 6025 DCDC supplies */
+#define TWL6032_REG_SMPS3 58
+#define TWL6032_REG_SMPS4 59
+#define TWL6032_REG_VIO 60
+
+
+#endif /* End of __TWL4030_H */
diff --git a/include/linux/i2c/twl4030-madc.h b/include/linux/i2c/twl4030-madc.h
new file mode 100644
index 000000000..1c0134dd3
--- /dev/null
+++ b/include/linux/i2c/twl4030-madc.h
@@ -0,0 +1,147 @@
+/*
+ * twl4030_madc.h - Header for TWL4030 MADC
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * J Keerthy <j-keerthy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef _TWL4030_MADC_H
+#define _TWL4030_MADC_H
+
+struct twl4030_madc_conversion_method {
+ u8 sel;
+ u8 avg;
+ u8 rbase;
+ u8 ctrl;
+};
+
+#define TWL4030_MADC_MAX_CHANNELS 16
+
+
+/*
+ * twl4030_madc_request- madc request packet for channel conversion
+ * @channels: 16 bit bitmap for individual channels
+ * @do_avgP: sample the input channel for 4 consecutive cycles
+ * @method: RT, SW1, SW2
+ * @type: Polling or interrupt based method
+ * @raw: Return raw value, do not convert it
+ */
+
+struct twl4030_madc_request {
+ unsigned long channels;
+ bool do_avg;
+ u16 method;
+ u16 type;
+ bool active;
+ bool result_pending;
+ bool raw;
+ int rbuf[TWL4030_MADC_MAX_CHANNELS];
+ void (*func_cb)(int len, int channels, int *buf);
+};
+
+enum conversion_methods {
+ TWL4030_MADC_RT,
+ TWL4030_MADC_SW1,
+ TWL4030_MADC_SW2,
+ TWL4030_MADC_NUM_METHODS
+};
+
+enum sample_type {
+ TWL4030_MADC_WAIT,
+ TWL4030_MADC_IRQ_ONESHOT,
+ TWL4030_MADC_IRQ_REARM
+};
+
+#define TWL4030_MADC_CTRL1 0x00
+#define TWL4030_MADC_CTRL2 0x01
+
+#define TWL4030_MADC_RTSELECT_LSB 0x02
+#define TWL4030_MADC_SW1SELECT_LSB 0x06
+#define TWL4030_MADC_SW2SELECT_LSB 0x0A
+
+#define TWL4030_MADC_RTAVERAGE_LSB 0x04
+#define TWL4030_MADC_SW1AVERAGE_LSB 0x08
+#define TWL4030_MADC_SW2AVERAGE_LSB 0x0C
+
+#define TWL4030_MADC_CTRL_SW1 0x12
+#define TWL4030_MADC_CTRL_SW2 0x13
+
+#define TWL4030_MADC_RTCH0_LSB 0x17
+#define TWL4030_MADC_GPCH0_LSB 0x37
+
+#define TWL4030_MADC_MADCON (1 << 0) /* MADC power on */
+#define TWL4030_MADC_BUSY (1 << 0) /* MADC busy */
+/* MADC conversion completion */
+#define TWL4030_MADC_EOC_SW (1 << 1)
+/* MADC SWx start conversion */
+#define TWL4030_MADC_SW_START (1 << 5)
+#define TWL4030_MADC_ADCIN0 (1 << 0)
+#define TWL4030_MADC_ADCIN1 (1 << 1)
+#define TWL4030_MADC_ADCIN2 (1 << 2)
+#define TWL4030_MADC_ADCIN3 (1 << 3)
+#define TWL4030_MADC_ADCIN4 (1 << 4)
+#define TWL4030_MADC_ADCIN5 (1 << 5)
+#define TWL4030_MADC_ADCIN6 (1 << 6)
+#define TWL4030_MADC_ADCIN7 (1 << 7)
+#define TWL4030_MADC_ADCIN8 (1 << 8)
+#define TWL4030_MADC_ADCIN9 (1 << 9)
+#define TWL4030_MADC_ADCIN10 (1 << 10)
+#define TWL4030_MADC_ADCIN11 (1 << 11)
+#define TWL4030_MADC_ADCIN12 (1 << 12)
+#define TWL4030_MADC_ADCIN13 (1 << 13)
+#define TWL4030_MADC_ADCIN14 (1 << 14)
+#define TWL4030_MADC_ADCIN15 (1 << 15)
+
+/* Fixed channels */
+#define TWL4030_MADC_BTEMP TWL4030_MADC_ADCIN1
+#define TWL4030_MADC_VBUS TWL4030_MADC_ADCIN8
+#define TWL4030_MADC_VBKB TWL4030_MADC_ADCIN9
+#define TWL4030_MADC_ICHG TWL4030_MADC_ADCIN10
+#define TWL4030_MADC_VCHG TWL4030_MADC_ADCIN11
+#define TWL4030_MADC_VBAT TWL4030_MADC_ADCIN12
+
+/* Step size and prescaler ratio */
+#define TEMP_STEP_SIZE 147
+#define TEMP_PSR_R 100
+#define CURR_STEP_SIZE 147
+#define CURR_PSR_R1 44
+#define CURR_PSR_R2 88
+
+#define TWL4030_BCI_BCICTL1 0x23
+#define TWL4030_BCI_CGAIN 0x020
+#define TWL4030_BCI_MESBAT (1 << 1)
+#define TWL4030_BCI_TYPEN (1 << 4)
+#define TWL4030_BCI_ITHEN (1 << 3)
+
+#define REG_BCICTL2 0x024
+#define TWL4030_BCI_ITHSENS 0x007
+
+/* Register and bits for GPBR1 register */
+#define TWL4030_REG_GPBR1 0x0c
+#define TWL4030_GPBR1_MADC_HFCLK_EN (1 << 7)
+
+struct twl4030_madc_user_parms {
+ int channel;
+ int average;
+ int status;
+ u16 result;
+};
+
+int twl4030_madc_conversion(struct twl4030_madc_request *conv);
+int twl4030_get_madc_conversion(int channel_no);
+#endif
diff --git a/include/linux/i7300_idle.h b/include/linux/i7300_idle.h
new file mode 100644
index 000000000..1587b7dec
--- /dev/null
+++ b/include/linux/i7300_idle.h
@@ -0,0 +1,83 @@
+
+#ifndef I7300_IDLE_H
+#define I7300_IDLE_H
+
+#include <linux/pci.h>
+
+/*
+ * I/O AT controls (PCI bus 0 device 8 function 0)
+ * DIMM controls (PCI bus 0 device 16 function 1)
+ */
+#define IOAT_BUS 0
+#define IOAT_DEVFN PCI_DEVFN(8, 0)
+#define MEMCTL_BUS 0
+#define MEMCTL_DEVFN PCI_DEVFN(16, 1)
+
+struct fbd_ioat {
+ unsigned int vendor;
+ unsigned int ioat_dev;
+ unsigned int enabled;
+};
+
+/*
+ * The i5000 chip-set has the same hooks as the i7300
+ * but it is not enabled by default and must be manually
+ * manually enabled with "forceload=1" because it is
+ * only lightly validated.
+ */
+
+static const struct fbd_ioat fbd_ioat_list[] = {
+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB, 1},
+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT, 0},
+ {0, 0}
+};
+
+/* table of devices that work with this driver */
+static const struct pci_device_id pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) },
+ { } /* Terminating entry */
+};
+
+/* Check for known platforms with I/O-AT */
+static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev,
+ struct pci_dev **ioat_dev,
+ int enable_all)
+{
+ int i;
+ struct pci_dev *memdev, *dmadev;
+
+ memdev = pci_get_bus_and_slot(MEMCTL_BUS, MEMCTL_DEVFN);
+ if (!memdev)
+ return -ENODEV;
+
+ for (i = 0; pci_tbl[i].vendor != 0; i++) {
+ if (memdev->vendor == pci_tbl[i].vendor &&
+ memdev->device == pci_tbl[i].device) {
+ break;
+ }
+ }
+ if (pci_tbl[i].vendor == 0)
+ return -ENODEV;
+
+ dmadev = pci_get_bus_and_slot(IOAT_BUS, IOAT_DEVFN);
+ if (!dmadev)
+ return -ENODEV;
+
+ for (i = 0; fbd_ioat_list[i].vendor != 0; i++) {
+ if (dmadev->vendor == fbd_ioat_list[i].vendor &&
+ dmadev->device == fbd_ioat_list[i].ioat_dev) {
+ if (!(fbd_ioat_list[i].enabled || enable_all))
+ continue;
+ if (fbd_dev)
+ *fbd_dev = memdev;
+ if (ioat_dev)
+ *ioat_dev = dmadev;
+
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+#endif
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
new file mode 100644
index 000000000..0f9bafa17
--- /dev/null
+++ b/include/linux/i8042.h
@@ -0,0 +1,105 @@
+#ifndef _LINUX_I8042_H
+#define _LINUX_I8042_H
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+
+/*
+ * Standard commands.
+ */
+
+#define I8042_CMD_CTL_RCTR 0x0120
+#define I8042_CMD_CTL_WCTR 0x1060
+#define I8042_CMD_CTL_TEST 0x01aa
+
+#define I8042_CMD_KBD_DISABLE 0x00ad
+#define I8042_CMD_KBD_ENABLE 0x00ae
+#define I8042_CMD_KBD_TEST 0x01ab
+#define I8042_CMD_KBD_LOOP 0x11d2
+
+#define I8042_CMD_AUX_DISABLE 0x00a7
+#define I8042_CMD_AUX_ENABLE 0x00a8
+#define I8042_CMD_AUX_TEST 0x01a9
+#define I8042_CMD_AUX_SEND 0x10d4
+#define I8042_CMD_AUX_LOOP 0x11d3
+
+#define I8042_CMD_MUX_PFX 0x0090
+#define I8042_CMD_MUX_SEND 0x1090
+
+/*
+ * Status register bits.
+ */
+
+#define I8042_STR_PARITY 0x80
+#define I8042_STR_TIMEOUT 0x40
+#define I8042_STR_AUXDATA 0x20
+#define I8042_STR_KEYLOCK 0x10
+#define I8042_STR_CMDDAT 0x08
+#define I8042_STR_MUXERR 0x04
+#define I8042_STR_IBF 0x02
+#define I8042_STR_OBF 0x01
+
+/*
+ * Control register bits.
+ */
+
+#define I8042_CTR_KBDINT 0x01
+#define I8042_CTR_AUXINT 0x02
+#define I8042_CTR_IGNKEYLOCK 0x08
+#define I8042_CTR_KBDDIS 0x10
+#define I8042_CTR_AUXDIS 0x20
+#define I8042_CTR_XLATE 0x40
+
+struct serio;
+
+#if defined(CONFIG_SERIO_I8042) || defined(CONFIG_SERIO_I8042_MODULE)
+
+void i8042_lock_chip(void);
+void i8042_unlock_chip(void);
+int i8042_command(unsigned char *param, int command);
+bool i8042_check_port_owner(const struct serio *);
+int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *serio));
+int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *serio));
+
+#else
+
+static inline void i8042_lock_chip(void)
+{
+}
+
+static inline void i8042_unlock_chip(void)
+{
+}
+
+static inline int i8042_command(unsigned char *param, int command)
+{
+ return -ENODEV;
+}
+
+static inline bool i8042_check_port_owner(const struct serio *serio)
+{
+ return false;
+}
+
+static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *serio))
+{
+ return -ENODEV;
+}
+
+static inline int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
+ struct serio *serio))
+{
+ return -ENODEV;
+}
+
+#endif
+
+#endif
diff --git a/include/linux/i8253.h b/include/linux/i8253.h
new file mode 100644
index 000000000..e6bb36a97
--- /dev/null
+++ b/include/linux/i8253.h
@@ -0,0 +1,29 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Machine specific IO port address definition for generic.
+ * Written by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef __LINUX_I8253_H
+#define __LINUX_I8253_H
+
+#include <linux/param.h>
+#include <linux/spinlock.h>
+#include <linux/timex.h>
+
+/* i8253A PIT registers */
+#define PIT_MODE 0x43
+#define PIT_CH0 0x40
+#define PIT_CH2 0x42
+
+#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ)
+
+extern raw_spinlock_t i8253_lock;
+extern struct clock_event_device i8253_clockevent;
+extern void clockevent_i8253_init(bool oneshot);
+
+extern void setup_pit_timer(void);
+
+#endif /* __LINUX_I8253_H */
diff --git a/include/linux/icmp.h b/include/linux/icmp.h
new file mode 100644
index 000000000..efc184906
--- /dev/null
+++ b/include/linux/icmp.h
@@ -0,0 +1,27 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the ICMP protocol.
+ *
+ * Version: @(#)icmp.h 1.0.3 04/28/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_ICMP_H
+#define _LINUX_ICMP_H
+
+#include <linux/skbuff.h>
+#include <uapi/linux/icmp.h>
+
+static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb)
+{
+ return (struct icmphdr *)skb_transport_header(skb);
+}
+#endif /* _LINUX_ICMP_H */
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
new file mode 100644
index 000000000..630f45335
--- /dev/null
+++ b/include/linux/icmpv6.h
@@ -0,0 +1,45 @@
+#ifndef _LINUX_ICMPV6_H
+#define _LINUX_ICMPV6_H
+
+#include <linux/skbuff.h>
+#include <uapi/linux/icmpv6.h>
+
+static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
+{
+ return (struct icmp6hdr *)skb_transport_header(skb);
+}
+
+#include <linux/netdevice.h>
+
+#if IS_ENABLED(CONFIG_IPV6)
+extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
+
+typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info);
+extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn);
+extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
+
+#else
+
+static inline void icmpv6_send(struct sk_buff *skb,
+ u8 type, u8 code, __u32 info)
+{
+
+}
+#endif
+
+extern int icmpv6_init(void);
+extern int icmpv6_err_convert(u8 type, u8 code,
+ int *err);
+extern void icmpv6_cleanup(void);
+extern void icmpv6_param_prob(struct sk_buff *skb,
+ u8 code, int pos);
+
+struct flowi6;
+struct in6_addr;
+extern void icmpv6_flow_init(struct sock *sk,
+ struct flowi6 *fl6,
+ u8 type,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ int oif);
+#endif
diff --git a/include/linux/ide.h b/include/linux/ide.h
new file mode 100644
index 000000000..93b5ca754
--- /dev/null
+++ b/include/linux/ide.h
@@ -0,0 +1,1554 @@
+#ifndef _IDE_H
+#define _IDE_H
+/*
+ * linux/include/linux/ide.h
+ *
+ * Copyright (C) 1994-2002 Linus Torvalds & authors
+ */
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/ata.h>
+#include <linux/blkdev.h>
+#include <linux/proc_fs.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/bio.h>
+#include <linux/pci.h>
+#include <linux/completion.h>
+#include <linux/pm.h>
+#include <linux/mutex.h>
+/* for request_sense */
+#include <linux/cdrom.h>
+#include <asm/byteorder.h>
+#include <asm/io.h>
+
+#if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300)
+# define SUPPORT_VLB_SYNC 0
+#else
+# define SUPPORT_VLB_SYNC 1
+#endif
+
+/*
+ * Probably not wise to fiddle with these
+ */
+#define IDE_DEFAULT_MAX_FAILURES 1
+#define ERROR_MAX 8 /* Max read/write errors per sector */
+#define ERROR_RESET 3 /* Reset controller every 4th retry */
+#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
+
+struct device;
+
+/* Error codes returned in rq->errors to the higher part of the driver. */
+enum {
+ IDE_DRV_ERROR_GENERAL = 101,
+ IDE_DRV_ERROR_FILEMARK = 102,
+ IDE_DRV_ERROR_EOD = 103,
+};
+
+/*
+ * Definitions for accessing IDE controller registers
+ */
+#define IDE_NR_PORTS (10)
+
+struct ide_io_ports {
+ unsigned long data_addr;
+
+ union {
+ unsigned long error_addr; /* read: error */
+ unsigned long feature_addr; /* write: feature */
+ };
+
+ unsigned long nsect_addr;
+ unsigned long lbal_addr;
+ unsigned long lbam_addr;
+ unsigned long lbah_addr;
+
+ unsigned long device_addr;
+
+ union {
+ unsigned long status_addr; /*  read: status  */
+ unsigned long command_addr; /* write: command */
+ };
+
+ unsigned long ctl_addr;
+
+ unsigned long irq_addr;
+};
+
+#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
+
+#define BAD_R_STAT (ATA_BUSY | ATA_ERR)
+#define BAD_W_STAT (BAD_R_STAT | ATA_DF)
+#define BAD_STAT (BAD_R_STAT | ATA_DRQ)
+#define DRIVE_READY (ATA_DRDY | ATA_DSC)
+
+#define BAD_CRC (ATA_ABORTED | ATA_ICRC)
+
+#define SATA_NR_PORTS (3) /* 16 possible ?? */
+
+#define SATA_STATUS_OFFSET (0)
+#define SATA_ERROR_OFFSET (1)
+#define SATA_CONTROL_OFFSET (2)
+
+/*
+ * Our Physical Region Descriptor (PRD) table should be large enough
+ * to handle the biggest I/O request we are likely to see. Since requests
+ * can have no more than 256 sectors, and since the typical blocksize is
+ * two or more sectors, we could get by with a limit of 128 entries here for
+ * the usual worst case. Most requests seem to include some contiguous blocks,
+ * further reducing the number of table entries required.
+ *
+ * The driver reverts to PIO mode for individual requests that exceed
+ * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling
+ * 100% of all crazy scenarios here is not necessary.
+ *
+ * As it turns out though, we must allocate a full 4KB page for this,
+ * so the two PRD tables (ide0 & ide1) will each get half of that,
+ * allowing each to have about 256 entries (8 bytes each) from this.
+ */
+#define PRD_BYTES 8
+#define PRD_ENTRIES 256
+
+/*
+ * Some more useful definitions
+ */
+#define PARTN_BITS 6 /* number of minor dev bits for partitions */
+#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
+#define SECTOR_SIZE 512
+
+/*
+ * Timeouts for various operations:
+ */
+enum {
+ /* spec allows up to 20ms, but CF cards and SSD drives need more */
+ WAIT_DRQ = 1 * HZ, /* 1s */
+ /* some laptops are very slow */
+ WAIT_READY = 5 * HZ, /* 5s */
+ /* should be less than 3ms (?), if all ATAPI CD is closed at boot */
+ WAIT_PIDENTIFY = 10 * HZ, /* 10s */
+ /* worst case when spinning up */
+ WAIT_WORSTCASE = 30 * HZ, /* 30s */
+ /* maximum wait for an IRQ to happen */
+ WAIT_CMD = 10 * HZ, /* 10s */
+ /* Some drives require a longer IRQ timeout. */
+ WAIT_FLOPPY_CMD = 50 * HZ, /* 50s */
+ /*
+ * Some drives (for example, Seagate STT3401A Travan) require a very
+ * long timeout, because they don't return an interrupt or clear their
+ * BSY bit until after the command completes (even retension commands).
+ */
+ WAIT_TAPE_CMD = 900 * HZ, /* 900s */
+ /* minimum sleep time */
+ WAIT_MIN_SLEEP = HZ / 50, /* 20ms */
+};
+
+/*
+ * Op codes for special requests to be handled by ide_special_rq().
+ * Values should be in the range of 0x20 to 0x3f.
+ */
+#define REQ_DRIVE_RESET 0x20
+#define REQ_DEVSET_EXEC 0x21
+#define REQ_PARK_HEADS 0x22
+#define REQ_UNPARK_HEADS 0x23
+
+/*
+ * hwif_chipset_t is used to keep track of the specific hardware
+ * chipset used by each IDE interface, if known.
+ */
+enum { ide_unknown, ide_generic, ide_pci,
+ ide_cmd640, ide_dtc2278, ide_ali14xx,
+ ide_qd65xx, ide_umc8672, ide_ht6560b,
+ ide_4drives, ide_pmac, ide_acorn,
+ ide_au1xxx, ide_palm3710
+};
+
+typedef u8 hwif_chipset_t;
+
+/*
+ * Structure to hold all information about the location of this port
+ */
+struct ide_hw {
+ union {
+ struct ide_io_ports io_ports;
+ unsigned long io_ports_array[IDE_NR_PORTS];
+ };
+
+ int irq; /* our irq number */
+ struct device *dev, *parent;
+ unsigned long config;
+};
+
+static inline void ide_std_init_ports(struct ide_hw *hw,
+ unsigned long io_addr,
+ unsigned long ctl_addr)
+{
+ unsigned int i;
+
+ for (i = 0; i <= 7; i++)
+ hw->io_ports_array[i] = io_addr++;
+
+ hw->io_ports.ctl_addr = ctl_addr;
+}
+
+#define MAX_HWIFS 10
+
+/*
+ * Now for the data we need to maintain per-drive: ide_drive_t
+ */
+
+#define ide_scsi 0x21
+#define ide_disk 0x20
+#define ide_optical 0x7
+#define ide_cdrom 0x5
+#define ide_tape 0x1
+#define ide_floppy 0x0
+
+/*
+ * Special Driver Flags
+ */
+enum {
+ IDE_SFLAG_SET_GEOMETRY = (1 << 0),
+ IDE_SFLAG_RECALIBRATE = (1 << 1),
+ IDE_SFLAG_SET_MULTMODE = (1 << 2),
+};
+
+/*
+ * Status returned from various ide_ functions
+ */
+typedef enum {
+ ide_stopped, /* no drive operation was started */
+ ide_started, /* a drive operation was started, handler was set */
+} ide_startstop_t;
+
+enum {
+ IDE_VALID_ERROR = (1 << 1),
+ IDE_VALID_FEATURE = IDE_VALID_ERROR,
+ IDE_VALID_NSECT = (1 << 2),
+ IDE_VALID_LBAL = (1 << 3),
+ IDE_VALID_LBAM = (1 << 4),
+ IDE_VALID_LBAH = (1 << 5),
+ IDE_VALID_DEVICE = (1 << 6),
+ IDE_VALID_LBA = IDE_VALID_LBAL |
+ IDE_VALID_LBAM |
+ IDE_VALID_LBAH,
+ IDE_VALID_OUT_TF = IDE_VALID_FEATURE |
+ IDE_VALID_NSECT |
+ IDE_VALID_LBA,
+ IDE_VALID_IN_TF = IDE_VALID_NSECT |
+ IDE_VALID_LBA,
+ IDE_VALID_OUT_HOB = IDE_VALID_OUT_TF,
+ IDE_VALID_IN_HOB = IDE_VALID_ERROR |
+ IDE_VALID_NSECT |
+ IDE_VALID_LBA,
+};
+
+enum {
+ IDE_TFLAG_LBA48 = (1 << 0),
+ IDE_TFLAG_WRITE = (1 << 1),
+ IDE_TFLAG_CUSTOM_HANDLER = (1 << 2),
+ IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 3),
+ /* force 16-bit I/O operations */
+ IDE_TFLAG_IO_16BIT = (1 << 4),
+ /* struct ide_cmd was allocated using kmalloc() */
+ IDE_TFLAG_DYN = (1 << 5),
+ IDE_TFLAG_FS = (1 << 6),
+ IDE_TFLAG_MULTI_PIO = (1 << 7),
+ IDE_TFLAG_SET_XFER = (1 << 8),
+};
+
+enum {
+ IDE_FTFLAG_FLAGGED = (1 << 0),
+ IDE_FTFLAG_SET_IN_FLAGS = (1 << 1),
+ IDE_FTFLAG_OUT_DATA = (1 << 2),
+ IDE_FTFLAG_IN_DATA = (1 << 3),
+};
+
+struct ide_taskfile {
+ u8 data; /* 0: data byte (for TASKFILE ioctl) */
+ union { /* 1: */
+ u8 error; /* read: error */
+ u8 feature; /* write: feature */
+ };
+ u8 nsect; /* 2: number of sectors */
+ u8 lbal; /* 3: LBA low */
+ u8 lbam; /* 4: LBA mid */
+ u8 lbah; /* 5: LBA high */
+ u8 device; /* 6: device select */
+ union { /* 7: */
+ u8 status; /* read: status */
+ u8 command; /* write: command */
+ };
+};
+
+struct ide_cmd {
+ struct ide_taskfile tf;
+ struct ide_taskfile hob;
+ struct {
+ struct {
+ u8 tf;
+ u8 hob;
+ } out, in;
+ } valid;
+
+ u16 tf_flags;
+ u8 ftf_flags; /* for TASKFILE ioctl */
+ int protocol;
+
+ int sg_nents; /* number of sg entries */
+ int orig_sg_nents;
+ int sg_dma_direction; /* DMA transfer direction */
+
+ unsigned int nbytes;
+ unsigned int nleft;
+ unsigned int last_xfer_len;
+
+ struct scatterlist *cursg;
+ unsigned int cursg_ofs;
+
+ struct request *rq; /* copy of request */
+};
+
+/* ATAPI packet command flags */
+enum {
+ /* set when an error is considered normal - no retry (ide-tape) */
+ PC_FLAG_ABORT = (1 << 0),
+ PC_FLAG_SUPPRESS_ERROR = (1 << 1),
+ PC_FLAG_WAIT_FOR_DSC = (1 << 2),
+ PC_FLAG_DMA_OK = (1 << 3),
+ PC_FLAG_DMA_IN_PROGRESS = (1 << 4),
+ PC_FLAG_DMA_ERROR = (1 << 5),
+ PC_FLAG_WRITING = (1 << 6),
+};
+
+#define ATAPI_WAIT_PC (60 * HZ)
+
+struct ide_atapi_pc {
+ /* actual packet bytes */
+ u8 c[12];
+ /* incremented on each retry */
+ int retries;
+ int error;
+
+ /* bytes to transfer */
+ int req_xfer;
+
+ /* the corresponding request */
+ struct request *rq;
+
+ unsigned long flags;
+
+ /*
+ * those are more or less driver-specific and some of them are subject
+ * to change/removal later.
+ */
+ unsigned long timeout;
+};
+
+struct ide_devset;
+struct ide_driver;
+
+#ifdef CONFIG_BLK_DEV_IDEACPI
+struct ide_acpi_drive_link;
+struct ide_acpi_hwif_link;
+#endif
+
+struct ide_drive_s;
+
+struct ide_disk_ops {
+ int (*check)(struct ide_drive_s *, const char *);
+ int (*get_capacity)(struct ide_drive_s *);
+ void (*unlock_native_capacity)(struct ide_drive_s *);
+ void (*setup)(struct ide_drive_s *);
+ void (*flush)(struct ide_drive_s *);
+ int (*init_media)(struct ide_drive_s *, struct gendisk *);
+ int (*set_doorlock)(struct ide_drive_s *, struct gendisk *,
+ int);
+ ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *,
+ sector_t);
+ int (*ioctl)(struct ide_drive_s *, struct block_device *,
+ fmode_t, unsigned int, unsigned long);
+};
+
+/* ATAPI device flags */
+enum {
+ IDE_AFLAG_DRQ_INTERRUPT = (1 << 0),
+
+ /* ide-cd */
+ /* Drive cannot eject the disc. */
+ IDE_AFLAG_NO_EJECT = (1 << 1),
+ /* Drive is a pre ATAPI 1.2 drive. */
+ IDE_AFLAG_PRE_ATAPI12 = (1 << 2),
+ /* TOC addresses are in BCD. */
+ IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3),
+ /* TOC track numbers are in BCD. */
+ IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4),
+ /* Saved TOC information is current. */
+ IDE_AFLAG_TOC_VALID = (1 << 6),
+ /* We think that the drive door is locked. */
+ IDE_AFLAG_DOOR_LOCKED = (1 << 7),
+ /* SET_CD_SPEED command is unsupported. */
+ IDE_AFLAG_NO_SPEED_SELECT = (1 << 8),
+ IDE_AFLAG_VERTOS_300_SSD = (1 << 9),
+ IDE_AFLAG_VERTOS_600_ESD = (1 << 10),
+ IDE_AFLAG_SANYO_3CD = (1 << 11),
+ IDE_AFLAG_FULL_CAPS_PAGE = (1 << 12),
+ IDE_AFLAG_PLAY_AUDIO_OK = (1 << 13),
+ IDE_AFLAG_LE_SPEED_FIELDS = (1 << 14),
+
+ /* ide-floppy */
+ /* Avoid commands not supported in Clik drive */
+ IDE_AFLAG_CLIK_DRIVE = (1 << 15),
+ /* Requires BH algorithm for packets */
+ IDE_AFLAG_ZIP_DRIVE = (1 << 16),
+ /* Supports format progress report */
+ IDE_AFLAG_SRFP = (1 << 17),
+
+ /* ide-tape */
+ IDE_AFLAG_IGNORE_DSC = (1 << 18),
+ /* 0 When the tape position is unknown */
+ IDE_AFLAG_ADDRESS_VALID = (1 << 19),
+ /* Device already opened */
+ IDE_AFLAG_BUSY = (1 << 20),
+ /* Attempt to auto-detect the current user block size */
+ IDE_AFLAG_DETECT_BS = (1 << 21),
+ /* Currently on a filemark */
+ IDE_AFLAG_FILEMARK = (1 << 22),
+ /* 0 = no tape is loaded, so we don't rewind after ejecting */
+ IDE_AFLAG_MEDIUM_PRESENT = (1 << 23),
+
+ IDE_AFLAG_NO_AUTOCLOSE = (1 << 24),
+};
+
+/* device flags */
+enum {
+ /* restore settings after device reset */
+ IDE_DFLAG_KEEP_SETTINGS = (1 << 0),
+ /* device is using DMA for read/write */
+ IDE_DFLAG_USING_DMA = (1 << 1),
+ /* okay to unmask other IRQs */
+ IDE_DFLAG_UNMASK = (1 << 2),
+ /* don't attempt flushes */
+ IDE_DFLAG_NOFLUSH = (1 << 3),
+ /* DSC overlap */
+ IDE_DFLAG_DSC_OVERLAP = (1 << 4),
+ /* give potential excess bandwidth */
+ IDE_DFLAG_NICE1 = (1 << 5),
+ /* device is physically present */
+ IDE_DFLAG_PRESENT = (1 << 6),
+ /* disable Host Protected Area */
+ IDE_DFLAG_NOHPA = (1 << 7),
+ /* id read from device (synthetic if not set) */
+ IDE_DFLAG_ID_READ = (1 << 8),
+ IDE_DFLAG_NOPROBE = (1 << 9),
+ /* need to do check_media_change() */
+ IDE_DFLAG_REMOVABLE = (1 << 10),
+ /* needed for removable devices */
+ IDE_DFLAG_ATTACH = (1 << 11),
+ IDE_DFLAG_FORCED_GEOM = (1 << 12),
+ /* disallow setting unmask bit */
+ IDE_DFLAG_NO_UNMASK = (1 << 13),
+ /* disallow enabling 32-bit I/O */
+ IDE_DFLAG_NO_IO_32BIT = (1 << 14),
+ /* for removable only: door lock/unlock works */
+ IDE_DFLAG_DOORLOCKING = (1 << 15),
+ /* disallow DMA */
+ IDE_DFLAG_NODMA = (1 << 16),
+ /* powermanagement told us not to do anything, so sleep nicely */
+ IDE_DFLAG_BLOCKED = (1 << 17),
+ /* sleeping & sleep field valid */
+ IDE_DFLAG_SLEEPING = (1 << 18),
+ IDE_DFLAG_POST_RESET = (1 << 19),
+ IDE_DFLAG_UDMA33_WARNED = (1 << 20),
+ IDE_DFLAG_LBA48 = (1 << 21),
+ /* status of write cache */
+ IDE_DFLAG_WCACHE = (1 << 22),
+ /* used for ignoring ATA_DF */
+ IDE_DFLAG_NOWERR = (1 << 23),
+ /* retrying in PIO */
+ IDE_DFLAG_DMA_PIO_RETRY = (1 << 24),
+ IDE_DFLAG_LBA = (1 << 25),
+ /* don't unload heads */
+ IDE_DFLAG_NO_UNLOAD = (1 << 26),
+ /* heads unloaded, please don't reset port */
+ IDE_DFLAG_PARKED = (1 << 27),
+ IDE_DFLAG_MEDIA_CHANGED = (1 << 28),
+ /* write protect */
+ IDE_DFLAG_WP = (1 << 29),
+ IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30),
+ IDE_DFLAG_NIEN_QUIRK = (1 << 31),
+};
+
+struct ide_drive_s {
+ char name[4]; /* drive name, such as "hda" */
+ char driver_req[10]; /* requests specific driver */
+
+ struct request_queue *queue; /* request queue */
+
+ struct request *rq; /* current request */
+ void *driver_data; /* extra driver data */
+ u16 *id; /* identification info */
+#ifdef CONFIG_IDE_PROC_FS
+ struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
+ const struct ide_proc_devset *settings; /* /proc/ide/ drive settings */
+#endif
+ struct hwif_s *hwif; /* actually (ide_hwif_t *) */
+
+ const struct ide_disk_ops *disk_ops;
+
+ unsigned long dev_flags;
+
+ unsigned long sleep; /* sleep until this time */
+ unsigned long timeout; /* max time to wait for irq */
+
+ u8 special_flags; /* special action flags */
+
+ u8 select; /* basic drive/head select reg value */
+ u8 retry_pio; /* retrying dma capable host in pio */
+ u8 waiting_for_dma; /* dma currently in progress */
+ u8 dma; /* atapi dma flag */
+
+ u8 init_speed; /* transfer rate set at boot */
+ u8 current_speed; /* current transfer rate set */
+ u8 desired_speed; /* desired transfer rate set */
+ u8 pio_mode; /* for ->set_pio_mode _only_ */
+ u8 dma_mode; /* for ->set_dma_mode _only_ */
+ u8 dn; /* now wide spread use */
+ u8 acoustic; /* acoustic management */
+ u8 media; /* disk, cdrom, tape, floppy, ... */
+ u8 ready_stat; /* min status value for drive ready */
+ u8 mult_count; /* current multiple sector setting */
+ u8 mult_req; /* requested multiple sector setting */
+ u8 io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
+ u8 bad_wstat; /* used for ignoring ATA_DF */
+ u8 head; /* "real" number of heads */
+ u8 sect; /* "real" sectors per track */
+ u8 bios_head; /* BIOS/fdisk/LILO number of heads */
+ u8 bios_sect; /* BIOS/fdisk/LILO sectors per track */
+
+ /* delay this long before sending packet command */
+ u8 pc_delay;
+
+ unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */
+ unsigned int cyl; /* "real" number of cyls */
+ void *drive_data; /* used by set_pio_mode/dev_select() */
+ unsigned int failures; /* current failure count */
+ unsigned int max_failures; /* maximum allowed failure count */
+ u64 probed_capacity;/* initial/native media capacity */
+ u64 capacity64; /* total number of sectors */
+
+ int lun; /* logical unit */
+ int crc_count; /* crc counter to reduce drive speed */
+
+ unsigned long debug_mask; /* debugging levels switch */
+
+#ifdef CONFIG_BLK_DEV_IDEACPI
+ struct ide_acpi_drive_link *acpidata;
+#endif
+ struct list_head list;
+ struct device gendev;
+ struct completion gendev_rel_comp; /* to deal with device release() */
+
+ /* current packet command */
+ struct ide_atapi_pc *pc;
+
+ /* last failed packet command */
+ struct ide_atapi_pc *failed_pc;
+
+ /* callback for packet commands */
+ int (*pc_callback)(struct ide_drive_s *, int);
+
+ ide_startstop_t (*irq_handler)(struct ide_drive_s *);
+
+ unsigned long atapi_flags;
+
+ struct ide_atapi_pc request_sense_pc;
+
+ /* current sense rq and buffer */
+ bool sense_rq_armed;
+ struct request sense_rq;
+ struct request_sense sense_data;
+};
+
+typedef struct ide_drive_s ide_drive_t;
+
+#define to_ide_device(dev) container_of(dev, ide_drive_t, gendev)
+
+#define to_ide_drv(obj, cont_type) \
+ container_of(obj, struct cont_type, dev)
+
+#define ide_drv_g(disk, cont_type) \
+ container_of((disk)->private_data, struct cont_type, driver)
+
+struct ide_port_info;
+
+struct ide_tp_ops {
+ void (*exec_command)(struct hwif_s *, u8);
+ u8 (*read_status)(struct hwif_s *);
+ u8 (*read_altstatus)(struct hwif_s *);
+ void (*write_devctl)(struct hwif_s *, u8);
+
+ void (*dev_select)(ide_drive_t *);
+ void (*tf_load)(ide_drive_t *, struct ide_taskfile *, u8);
+ void (*tf_read)(ide_drive_t *, struct ide_taskfile *, u8);
+
+ void (*input_data)(ide_drive_t *, struct ide_cmd *,
+ void *, unsigned int);
+ void (*output_data)(ide_drive_t *, struct ide_cmd *,
+ void *, unsigned int);
+};
+
+extern const struct ide_tp_ops default_tp_ops;
+
+/**
+ * struct ide_port_ops - IDE port operations
+ *
+ * @init_dev: host specific initialization of a device
+ * @set_pio_mode: routine to program host for PIO mode
+ * @set_dma_mode: routine to program host for DMA mode
+ * @reset_poll: chipset polling based on hba specifics
+ * @pre_reset: chipset specific changes to default for device-hba resets
+ * @resetproc: routine to reset controller after a disk reset
+ * @maskproc: special host masking for drive selection
+ * @quirkproc: check host's drive quirk list
+ * @clear_irq: clear IRQ
+ *
+ * @mdma_filter: filter MDMA modes
+ * @udma_filter: filter UDMA modes
+ *
+ * @cable_detect: detect cable type
+ */
+struct ide_port_ops {
+ void (*init_dev)(ide_drive_t *);
+ void (*set_pio_mode)(struct hwif_s *, ide_drive_t *);
+ void (*set_dma_mode)(struct hwif_s *, ide_drive_t *);
+ int (*reset_poll)(ide_drive_t *);
+ void (*pre_reset)(ide_drive_t *);
+ void (*resetproc)(ide_drive_t *);
+ void (*maskproc)(ide_drive_t *, int);
+ void (*quirkproc)(ide_drive_t *);
+ void (*clear_irq)(ide_drive_t *);
+ int (*test_irq)(struct hwif_s *);
+
+ u8 (*mdma_filter)(ide_drive_t *);
+ u8 (*udma_filter)(ide_drive_t *);
+
+ u8 (*cable_detect)(struct hwif_s *);
+};
+
+struct ide_dma_ops {
+ void (*dma_host_set)(struct ide_drive_s *, int);
+ int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *);
+ void (*dma_start)(struct ide_drive_s *);
+ int (*dma_end)(struct ide_drive_s *);
+ int (*dma_test_irq)(struct ide_drive_s *);
+ void (*dma_lost_irq)(struct ide_drive_s *);
+ /* below ones are optional */
+ int (*dma_check)(struct ide_drive_s *, struct ide_cmd *);
+ int (*dma_timer_expiry)(struct ide_drive_s *);
+ void (*dma_clear)(struct ide_drive_s *);
+ /*
+ * The following method is optional and only required to be
+ * implemented for the SFF-8038i compatible controllers.
+ */
+ u8 (*dma_sff_read_status)(struct hwif_s *);
+};
+
+enum {
+ IDE_PFLAG_PROBING = (1 << 0),
+};
+
+struct ide_host;
+
+typedef struct hwif_s {
+ struct hwif_s *mate; /* other hwif from same PCI chip */
+ struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
+
+ struct ide_host *host;
+
+ char name[6]; /* name of interface, eg. "ide0" */
+
+ struct ide_io_ports io_ports;
+
+ unsigned long sata_scr[SATA_NR_PORTS];
+
+ ide_drive_t *devices[MAX_DRIVES + 1];
+
+ unsigned long port_flags;
+
+ u8 major; /* our major number */
+ u8 index; /* 0 for ide0; 1 for ide1; ... */
+ u8 channel; /* for dual-port chips: 0=primary, 1=secondary */
+
+ u32 host_flags;
+
+ u8 pio_mask;
+
+ u8 ultra_mask;
+ u8 mwdma_mask;
+ u8 swdma_mask;
+
+ u8 cbl; /* cable type */
+
+ hwif_chipset_t chipset; /* sub-module for tuning.. */
+
+ struct device *dev;
+
+ void (*rw_disk)(ide_drive_t *, struct request *);
+
+ const struct ide_tp_ops *tp_ops;
+ const struct ide_port_ops *port_ops;
+ const struct ide_dma_ops *dma_ops;
+
+ /* dma physical region descriptor table (cpu view) */
+ unsigned int *dmatable_cpu;
+ /* dma physical region descriptor table (dma view) */
+ dma_addr_t dmatable_dma;
+
+ /* maximum number of PRD table entries */
+ int prd_max_nents;
+ /* PRD entry size in bytes */
+ int prd_ent_size;
+
+ /* Scatter-gather list used to build the above */
+ struct scatterlist *sg_table;
+ int sg_max_nents; /* Maximum number of entries in it */
+
+ struct ide_cmd cmd; /* current command */
+
+ int rqsize; /* max sectors per request */
+ int irq; /* our irq number */
+
+ unsigned long dma_base; /* base addr for dma ports */
+
+ unsigned long config_data; /* for use by chipset-specific code */
+ unsigned long select_data; /* for use by chipset-specific code */
+
+ unsigned long extra_base; /* extra addr for dma ports */
+ unsigned extra_ports; /* number of extra dma ports */
+
+ unsigned present : 1; /* this interface exists */
+ unsigned busy : 1; /* serializes devices on a port */
+
+ struct device gendev;
+ struct device *portdev;
+
+ struct completion gendev_rel_comp; /* To deal with device release() */
+
+ void *hwif_data; /* extra hwif data */
+
+#ifdef CONFIG_BLK_DEV_IDEACPI
+ struct ide_acpi_hwif_link *acpidata;
+#endif
+
+ /* IRQ handler, if active */
+ ide_startstop_t (*handler)(ide_drive_t *);
+
+ /* BOOL: polling active & poll_timeout field valid */
+ unsigned int polling : 1;
+
+ /* current drive */
+ ide_drive_t *cur_dev;
+
+ /* current request */
+ struct request *rq;
+
+ /* failsafe timer */
+ struct timer_list timer;
+ /* timeout value during long polls */
+ unsigned long poll_timeout;
+ /* queried upon timeouts */
+ int (*expiry)(ide_drive_t *);
+
+ int req_gen;
+ int req_gen_timer;
+
+ spinlock_t lock;
+} ____cacheline_internodealigned_in_smp ide_hwif_t;
+
+#define MAX_HOST_PORTS 4
+
+struct ide_host {
+ ide_hwif_t *ports[MAX_HOST_PORTS + 1];
+ unsigned int n_ports;
+ struct device *dev[2];
+
+ int (*init_chipset)(struct pci_dev *);
+
+ void (*get_lock)(irq_handler_t, void *);
+ void (*release_lock)(void);
+
+ irq_handler_t irq_handler;
+
+ unsigned long host_flags;
+
+ int irq_flags;
+
+ void *host_priv;
+ ide_hwif_t *cur_port; /* for hosts requiring serialization */
+
+ /* used for hosts requiring serialization */
+ volatile unsigned long host_busy;
+};
+
+#define IDE_HOST_BUSY 0
+
+/*
+ * internal ide interrupt handler type
+ */
+typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
+typedef int (ide_expiry_t)(ide_drive_t *);
+
+/* used by ide-cd, ide-floppy, etc. */
+typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned);
+
+extern struct mutex ide_setting_mtx;
+
+/*
+ * configurable drive settings
+ */
+
+#define DS_SYNC (1 << 0)
+
+struct ide_devset {
+ int (*get)(ide_drive_t *);
+ int (*set)(ide_drive_t *, int);
+ unsigned int flags;
+};
+
+#define __DEVSET(_flags, _get, _set) { \
+ .flags = _flags, \
+ .get = _get, \
+ .set = _set, \
+}
+
+#define ide_devset_get(name, field) \
+static int get_##name(ide_drive_t *drive) \
+{ \
+ return drive->field; \
+}
+
+#define ide_devset_set(name, field) \
+static int set_##name(ide_drive_t *drive, int arg) \
+{ \
+ drive->field = arg; \
+ return 0; \
+}
+
+#define ide_devset_get_flag(name, flag) \
+static int get_##name(ide_drive_t *drive) \
+{ \
+ return !!(drive->dev_flags & flag); \
+}
+
+#define ide_devset_set_flag(name, flag) \
+static int set_##name(ide_drive_t *drive, int arg) \
+{ \
+ if (arg) \
+ drive->dev_flags |= flag; \
+ else \
+ drive->dev_flags &= ~flag; \
+ return 0; \
+}
+
+#define __IDE_DEVSET(_name, _flags, _get, _set) \
+const struct ide_devset ide_devset_##_name = \
+ __DEVSET(_flags, _get, _set)
+
+#define IDE_DEVSET(_name, _flags, _get, _set) \
+static __IDE_DEVSET(_name, _flags, _get, _set)
+
+#define ide_devset_rw(_name, _func) \
+IDE_DEVSET(_name, 0, get_##_func, set_##_func)
+
+#define ide_devset_w(_name, _func) \
+IDE_DEVSET(_name, 0, NULL, set_##_func)
+
+#define ide_ext_devset_rw(_name, _func) \
+__IDE_DEVSET(_name, 0, get_##_func, set_##_func)
+
+#define ide_ext_devset_rw_sync(_name, _func) \
+__IDE_DEVSET(_name, DS_SYNC, get_##_func, set_##_func)
+
+#define ide_decl_devset(_name) \
+extern const struct ide_devset ide_devset_##_name
+
+ide_decl_devset(io_32bit);
+ide_decl_devset(keepsettings);
+ide_decl_devset(pio_mode);
+ide_decl_devset(unmaskirq);
+ide_decl_devset(using_dma);
+
+#ifdef CONFIG_IDE_PROC_FS
+/*
+ * /proc/ide interface
+ */
+
+#define ide_devset_rw_field(_name, _field) \
+ide_devset_get(_name, _field); \
+ide_devset_set(_name, _field); \
+IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
+
+#define ide_devset_rw_flag(_name, _field) \
+ide_devset_get_flag(_name, _field); \
+ide_devset_set_flag(_name, _field); \
+IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
+
+struct ide_proc_devset {
+ const char *name;
+ const struct ide_devset *setting;
+ int min, max;
+ int (*mulf)(ide_drive_t *);
+ int (*divf)(ide_drive_t *);
+};
+
+#define __IDE_PROC_DEVSET(_name, _min, _max, _mulf, _divf) { \
+ .name = __stringify(_name), \
+ .setting = &ide_devset_##_name, \
+ .min = _min, \
+ .max = _max, \
+ .mulf = _mulf, \
+ .divf = _divf, \
+}
+
+#define IDE_PROC_DEVSET(_name, _min, _max) \
+__IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL)
+
+typedef struct {
+ const char *name;
+ umode_t mode;
+ const struct file_operations *proc_fops;
+} ide_proc_entry_t;
+
+void proc_ide_create(void);
+void proc_ide_destroy(void);
+void ide_proc_register_port(ide_hwif_t *);
+void ide_proc_port_register_devices(ide_hwif_t *);
+void ide_proc_unregister_device(ide_drive_t *);
+void ide_proc_unregister_port(ide_hwif_t *);
+void ide_proc_register_driver(ide_drive_t *, struct ide_driver *);
+void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *);
+
+extern const struct file_operations ide_capacity_proc_fops;
+extern const struct file_operations ide_geometry_proc_fops;
+#else
+static inline void proc_ide_create(void) { ; }
+static inline void proc_ide_destroy(void) { ; }
+static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; }
+static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; }
+static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; }
+static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; }
+static inline void ide_proc_register_driver(ide_drive_t *drive,
+ struct ide_driver *driver) { ; }
+static inline void ide_proc_unregister_driver(ide_drive_t *drive,
+ struct ide_driver *driver) { ; }
+#endif
+
+enum {
+ /* enter/exit functions */
+ IDE_DBG_FUNC = (1 << 0),
+ /* sense key/asc handling */
+ IDE_DBG_SENSE = (1 << 1),
+ /* packet commands handling */
+ IDE_DBG_PC = (1 << 2),
+ /* request handling */
+ IDE_DBG_RQ = (1 << 3),
+ /* driver probing/setup */
+ IDE_DBG_PROBE = (1 << 4),
+};
+
+/* DRV_NAME has to be defined in the driver before using the macro below */
+#define __ide_debug_log(lvl, fmt, args...) \
+{ \
+ if (unlikely(drive->debug_mask & lvl)) \
+ printk(KERN_INFO DRV_NAME ": %s: " fmt "\n", \
+ __func__, ## args); \
+}
+
+/*
+ * Power Management state machine (rq->pm->pm_step).
+ *
+ * For each step, the core calls ide_start_power_step() first.
+ * This can return:
+ * - ide_stopped : In this case, the core calls us back again unless
+ * step have been set to ide_power_state_completed.
+ * - ide_started : In this case, the channel is left busy until an
+ * async event (interrupt) occurs.
+ * Typically, ide_start_power_step() will issue a taskfile request with
+ * do_rw_taskfile().
+ *
+ * Upon reception of the interrupt, the core will call ide_complete_power_step()
+ * with the error code if any. This routine should update the step value
+ * and return. It should not start a new request. The core will call
+ * ide_start_power_step() for the new step value, unless step have been
+ * set to IDE_PM_COMPLETED.
+ */
+enum {
+ IDE_PM_START_SUSPEND,
+ IDE_PM_FLUSH_CACHE = IDE_PM_START_SUSPEND,
+ IDE_PM_STANDBY,
+
+ IDE_PM_START_RESUME,
+ IDE_PM_RESTORE_PIO = IDE_PM_START_RESUME,
+ IDE_PM_IDLE,
+ IDE_PM_RESTORE_DMA,
+
+ IDE_PM_COMPLETED,
+};
+
+int generic_ide_suspend(struct device *, pm_message_t);
+int generic_ide_resume(struct device *);
+
+void ide_complete_power_step(ide_drive_t *, struct request *);
+ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
+void ide_complete_pm_rq(ide_drive_t *, struct request *);
+void ide_check_pm_state(ide_drive_t *, struct request *);
+
+/*
+ * Subdrivers support.
+ *
+ * The gendriver.owner field should be set to the module owner of this driver.
+ * The gendriver.name field should be set to the name of this driver
+ */
+struct ide_driver {
+ const char *version;
+ ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
+ struct device_driver gen_driver;
+ int (*probe)(ide_drive_t *);
+ void (*remove)(ide_drive_t *);
+ void (*resume)(ide_drive_t *);
+ void (*shutdown)(ide_drive_t *);
+#ifdef CONFIG_IDE_PROC_FS
+ ide_proc_entry_t * (*proc_entries)(ide_drive_t *);
+ const struct ide_proc_devset * (*proc_devsets)(ide_drive_t *);
+#endif
+};
+
+#define to_ide_driver(drv) container_of(drv, struct ide_driver, gen_driver)
+
+int ide_device_get(ide_drive_t *);
+void ide_device_put(ide_drive_t *);
+
+struct ide_ioctl_devset {
+ unsigned int get_ioctl;
+ unsigned int set_ioctl;
+ const struct ide_devset *setting;
+};
+
+int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int,
+ unsigned long, const struct ide_ioctl_devset *);
+
+int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned long);
+
+extern int ide_vlb_clk;
+extern int ide_pci_clk;
+
+int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int);
+void ide_kill_rq(ide_drive_t *, struct request *);
+
+void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
+void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
+
+void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *,
+ unsigned int);
+
+void ide_pad_transfer(ide_drive_t *, int, int);
+
+ide_startstop_t ide_error(ide_drive_t *, const char *, u8);
+
+void ide_fix_driveid(u16 *);
+
+extern void ide_fixstring(u8 *, const int, const int);
+
+int ide_busy_sleep(ide_drive_t *, unsigned long, int);
+
+int __ide_wait_stat(ide_drive_t *, u8, u8, unsigned long, u8 *);
+int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
+
+ide_startstop_t ide_do_park_unpark(ide_drive_t *, struct request *);
+ide_startstop_t ide_do_devset(ide_drive_t *, struct request *);
+
+extern ide_startstop_t ide_do_reset (ide_drive_t *);
+
+extern int ide_devset_execute(ide_drive_t *drive,
+ const struct ide_devset *setting, int arg);
+
+void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8);
+int ide_complete_rq(ide_drive_t *, int, unsigned int);
+
+void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd);
+void ide_tf_dump(const char *, struct ide_cmd *);
+
+void ide_exec_command(ide_hwif_t *, u8);
+u8 ide_read_status(ide_hwif_t *);
+u8 ide_read_altstatus(ide_hwif_t *);
+void ide_write_devctl(ide_hwif_t *, u8);
+
+void ide_dev_select(ide_drive_t *);
+void ide_tf_load(ide_drive_t *, struct ide_taskfile *, u8);
+void ide_tf_read(ide_drive_t *, struct ide_taskfile *, u8);
+
+void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
+void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
+
+void SELECT_MASK(ide_drive_t *, int);
+
+u8 ide_read_error(ide_drive_t *);
+void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *);
+
+int ide_check_ireason(ide_drive_t *, struct request *, int, int, int);
+
+int ide_check_atapi_device(ide_drive_t *, const char *);
+
+void ide_init_pc(struct ide_atapi_pc *);
+
+/* Disk head parking */
+extern wait_queue_head_t ide_park_wq;
+ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
+ char *buf);
+ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len);
+
+/*
+ * Special requests for ide-tape block device strategy routine.
+ *
+ * In order to service a character device command, we add special requests to
+ * the tail of our block device request queue and wait for their completion.
+ */
+enum {
+ REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */
+ REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
+ REQ_IDETAPE_READ = (1 << 2),
+ REQ_IDETAPE_WRITE = (1 << 3),
+};
+
+int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *,
+ void *, unsigned int);
+
+int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *);
+int ide_do_start_stop(ide_drive_t *, struct gendisk *, int);
+int ide_set_media_lock(ide_drive_t *, struct gendisk *, int);
+void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *);
+void ide_retry_pc(ide_drive_t *drive);
+
+void ide_prep_sense(ide_drive_t *drive, struct request *rq);
+int ide_queue_sense_rq(ide_drive_t *drive, void *special);
+
+int ide_cd_expiry(ide_drive_t *);
+
+int ide_cd_get_xferlen(struct request *);
+
+ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *);
+
+ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *);
+
+void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int);
+
+void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8);
+
+int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16);
+int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *);
+
+int ide_taskfile_ioctl(ide_drive_t *, unsigned long);
+
+int ide_dev_read_id(ide_drive_t *, u8, u16 *, int);
+
+extern int ide_driveid_update(ide_drive_t *);
+extern int ide_config_drive_speed(ide_drive_t *, u8);
+extern u8 eighty_ninty_three (ide_drive_t *);
+extern int taskfile_lib_get_identify(ide_drive_t *drive, u8 *);
+
+extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout);
+
+extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
+
+extern void ide_timer_expiry(unsigned long);
+extern irqreturn_t ide_intr(int irq, void *dev_id);
+extern void do_ide_request(struct request_queue *);
+extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
+
+void ide_init_disk(struct gendisk *, ide_drive_t *);
+
+#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
+extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name);
+#define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME)
+#else
+#define ide_pci_register_driver(d) pci_register_driver(d)
+#endif
+
+static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev)
+{
+ if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5)
+ return 1;
+ return 0;
+}
+
+void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *,
+ struct ide_hw *, struct ide_hw **);
+void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
+
+#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
+int ide_pci_set_master(struct pci_dev *, const char *);
+unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
+int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *);
+int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
+#else
+static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
+ const struct ide_port_info *d)
+{
+ return -EINVAL;
+}
+#endif
+
+struct ide_pci_enablebit {
+ u8 reg; /* byte pci reg holding the enable-bit */
+ u8 mask; /* mask to isolate the enable-bit */
+ u8 val; /* value of masked reg when "enabled" */
+};
+
+enum {
+ /* Uses ISA control ports not PCI ones. */
+ IDE_HFLAG_ISA_PORTS = (1 << 0),
+ /* single port device */
+ IDE_HFLAG_SINGLE = (1 << 1),
+ /* don't use legacy PIO blacklist */
+ IDE_HFLAG_PIO_NO_BLACKLIST = (1 << 2),
+ /* set for the second port of QD65xx */
+ IDE_HFLAG_QD_2ND_PORT = (1 << 3),
+ /* use PIO8/9 for prefetch off/on */
+ IDE_HFLAG_ABUSE_PREFETCH = (1 << 4),
+ /* use PIO6/7 for fast-devsel off/on */
+ IDE_HFLAG_ABUSE_FAST_DEVSEL = (1 << 5),
+ /* use 100-102 and 200-202 PIO values to set DMA modes */
+ IDE_HFLAG_ABUSE_DMA_MODES = (1 << 6),
+ /*
+ * keep DMA setting when programming PIO mode, may be used only
+ * for hosts which have separate PIO and DMA timings (ie. PMAC)
+ */
+ IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = (1 << 7),
+ /* program host for the transfer mode after programming device */
+ IDE_HFLAG_POST_SET_MODE = (1 << 8),
+ /* don't program host/device for the transfer mode ("smart" hosts) */
+ IDE_HFLAG_NO_SET_MODE = (1 << 9),
+ /* trust BIOS for programming chipset/device for DMA */
+ IDE_HFLAG_TRUST_BIOS_FOR_DMA = (1 << 10),
+ /* host is CS5510/CS5520 */
+ IDE_HFLAG_CS5520 = (1 << 11),
+ /* ATAPI DMA is unsupported */
+ IDE_HFLAG_NO_ATAPI_DMA = (1 << 12),
+ /* set if host is a "non-bootable" controller */
+ IDE_HFLAG_NON_BOOTABLE = (1 << 13),
+ /* host doesn't support DMA */
+ IDE_HFLAG_NO_DMA = (1 << 14),
+ /* check if host is PCI IDE device before allowing DMA */
+ IDE_HFLAG_NO_AUTODMA = (1 << 15),
+ /* host uses MMIO */
+ IDE_HFLAG_MMIO = (1 << 16),
+ /* no LBA48 */
+ IDE_HFLAG_NO_LBA48 = (1 << 17),
+ /* no LBA48 DMA */
+ IDE_HFLAG_NO_LBA48_DMA = (1 << 18),
+ /* data FIFO is cleared by an error */
+ IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19),
+ /* serialize ports */
+ IDE_HFLAG_SERIALIZE = (1 << 20),
+ /* host is DTC2278 */
+ IDE_HFLAG_DTC2278 = (1 << 21),
+ /* 4 devices on a single set of I/O ports */
+ IDE_HFLAG_4DRIVES = (1 << 22),
+ /* host is TRM290 */
+ IDE_HFLAG_TRM290 = (1 << 23),
+ /* use 32-bit I/O ops */
+ IDE_HFLAG_IO_32BIT = (1 << 24),
+ /* unmask IRQs */
+ IDE_HFLAG_UNMASK_IRQS = (1 << 25),
+ IDE_HFLAG_BROKEN_ALTSTATUS = (1 << 26),
+ /* serialize ports if DMA is possible (for sl82c105) */
+ IDE_HFLAG_SERIALIZE_DMA = (1 << 27),
+ /* force host out of "simplex" mode */
+ IDE_HFLAG_CLEAR_SIMPLEX = (1 << 28),
+ /* DSC overlap is unsupported */
+ IDE_HFLAG_NO_DSC = (1 << 29),
+ /* never use 32-bit I/O ops */
+ IDE_HFLAG_NO_IO_32BIT = (1 << 30),
+ /* never unmask IRQs */
+ IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31),
+};
+
+#ifdef CONFIG_BLK_DEV_OFFBOARD
+# define IDE_HFLAG_OFF_BOARD 0
+#else
+# define IDE_HFLAG_OFF_BOARD IDE_HFLAG_NON_BOOTABLE
+#endif
+
+struct ide_port_info {
+ char *name;
+
+ int (*init_chipset)(struct pci_dev *);
+
+ void (*get_lock)(irq_handler_t, void *);
+ void (*release_lock)(void);
+
+ void (*init_iops)(ide_hwif_t *);
+ void (*init_hwif)(ide_hwif_t *);
+ int (*init_dma)(ide_hwif_t *,
+ const struct ide_port_info *);
+
+ const struct ide_tp_ops *tp_ops;
+ const struct ide_port_ops *port_ops;
+ const struct ide_dma_ops *dma_ops;
+
+ struct ide_pci_enablebit enablebits[2];
+
+ hwif_chipset_t chipset;
+
+ u16 max_sectors; /* if < than the default one */
+
+ u32 host_flags;
+
+ int irq_flags;
+
+ u8 pio_mask;
+ u8 swdma_mask;
+ u8 mwdma_mask;
+ u8 udma_mask;
+};
+
+int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
+int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
+ const struct ide_port_info *, void *);
+void ide_pci_remove(struct pci_dev *);
+
+#ifdef CONFIG_PM
+int ide_pci_suspend(struct pci_dev *, pm_message_t);
+int ide_pci_resume(struct pci_dev *);
+#else
+#define ide_pci_suspend NULL
+#define ide_pci_resume NULL
+#endif
+
+void ide_map_sg(ide_drive_t *, struct ide_cmd *);
+void ide_init_sg_cmd(struct ide_cmd *, unsigned int);
+
+#define BAD_DMA_DRIVE 0
+#define GOOD_DMA_DRIVE 1
+
+struct drive_list_entry {
+ const char *id_model;
+ const char *id_firmware;
+};
+
+int ide_in_drive_list(u16 *, const struct drive_list_entry *);
+
+#ifdef CONFIG_BLK_DEV_IDEDMA
+int ide_dma_good_drive(ide_drive_t *);
+int __ide_dma_bad_drive(ide_drive_t *);
+
+u8 ide_find_dma_mode(ide_drive_t *, u8);
+
+static inline u8 ide_max_dma_mode(ide_drive_t *drive)
+{
+ return ide_find_dma_mode(drive, XFER_UDMA_6);
+}
+
+void ide_dma_off_quietly(ide_drive_t *);
+void ide_dma_off(ide_drive_t *);
+void ide_dma_on(ide_drive_t *);
+int ide_set_dma(ide_drive_t *);
+void ide_check_dma_crc(ide_drive_t *);
+ide_startstop_t ide_dma_intr(ide_drive_t *);
+
+int ide_allocate_dma_engine(ide_hwif_t *);
+void ide_release_dma_engine(ide_hwif_t *);
+
+int ide_dma_prepare(ide_drive_t *, struct ide_cmd *);
+void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *);
+
+#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
+int config_drive_for_dma(ide_drive_t *);
+int ide_build_dmatable(ide_drive_t *, struct ide_cmd *);
+void ide_dma_host_set(ide_drive_t *, int);
+int ide_dma_setup(ide_drive_t *, struct ide_cmd *);
+extern void ide_dma_start(ide_drive_t *);
+int ide_dma_end(ide_drive_t *);
+int ide_dma_test_irq(ide_drive_t *);
+int ide_dma_sff_timer_expiry(ide_drive_t *);
+u8 ide_dma_sff_read_status(ide_hwif_t *);
+extern const struct ide_dma_ops sff_dma_ops;
+#else
+static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
+#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
+
+void ide_dma_lost_irq(ide_drive_t *);
+ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
+
+#else
+static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
+static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
+static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; }
+static inline void ide_dma_off(ide_drive_t *drive) { ; }
+static inline void ide_dma_on(ide_drive_t *drive) { ; }
+static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
+static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
+static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
+static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; }
+static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
+static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
+static inline int ide_dma_prepare(ide_drive_t *drive,
+ struct ide_cmd *cmd) { return 1; }
+static inline void ide_dma_unmap_sg(ide_drive_t *drive,
+ struct ide_cmd *cmd) { ; }
+#endif /* CONFIG_BLK_DEV_IDEDMA */
+
+#ifdef CONFIG_BLK_DEV_IDEACPI
+int ide_acpi_init(void);
+bool ide_port_acpi(ide_hwif_t *hwif);
+extern int ide_acpi_exec_tfs(ide_drive_t *drive);
+extern void ide_acpi_get_timing(ide_hwif_t *hwif);
+extern void ide_acpi_push_timing(ide_hwif_t *hwif);
+void ide_acpi_init_port(ide_hwif_t *);
+void ide_acpi_port_init_devices(ide_hwif_t *);
+extern void ide_acpi_set_state(ide_hwif_t *hwif, int on);
+#else
+static inline int ide_acpi_init(void) { return 0; }
+static inline bool ide_port_acpi(ide_hwif_t *hwif) { return 0; }
+static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
+static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
+static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
+static inline void ide_acpi_init_port(ide_hwif_t *hwif) { ; }
+static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; }
+static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
+#endif
+
+void ide_register_region(struct gendisk *);
+void ide_unregister_region(struct gendisk *);
+
+void ide_check_nien_quirk_list(ide_drive_t *);
+void ide_undecoded_slave(ide_drive_t *);
+
+void ide_port_apply_params(ide_hwif_t *);
+int ide_sysfs_register_port(ide_hwif_t *);
+
+struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **,
+ unsigned int);
+void ide_host_free(struct ide_host *);
+int ide_host_register(struct ide_host *, const struct ide_port_info *,
+ struct ide_hw **);
+int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int,
+ struct ide_host **);
+void ide_host_remove(struct ide_host *);
+int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
+void ide_port_unregister_devices(ide_hwif_t *);
+void ide_port_scan(ide_hwif_t *);
+
+static inline void *ide_get_hwifdata (ide_hwif_t * hwif)
+{
+ return hwif->hwif_data;
+}
+
+static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data)
+{
+ hwif->hwif_data = data;
+}
+
+extern void ide_toggle_bounce(ide_drive_t *drive, int on);
+
+u64 ide_get_lba_addr(struct ide_cmd *, int);
+u8 ide_dump_status(ide_drive_t *, const char *, u8);
+
+struct ide_timing {
+ u8 mode;
+ u8 setup; /* t1 */
+ u16 act8b; /* t2 for 8-bit io */
+ u16 rec8b; /* t2i for 8-bit io */
+ u16 cyc8b; /* t0 for 8-bit io */
+ u16 active; /* t2 or tD */
+ u16 recover; /* t2i or tK */
+ u16 cycle; /* t0 */
+ u16 udma; /* t2CYCTYP/2 */
+};
+
+enum {
+ IDE_TIMING_SETUP = (1 << 0),
+ IDE_TIMING_ACT8B = (1 << 1),
+ IDE_TIMING_REC8B = (1 << 2),
+ IDE_TIMING_CYC8B = (1 << 3),
+ IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B |
+ IDE_TIMING_CYC8B,
+ IDE_TIMING_ACTIVE = (1 << 4),
+ IDE_TIMING_RECOVER = (1 << 5),
+ IDE_TIMING_CYCLE = (1 << 6),
+ IDE_TIMING_UDMA = (1 << 7),
+ IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT |
+ IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER |
+ IDE_TIMING_CYCLE | IDE_TIMING_UDMA,
+};
+
+struct ide_timing *ide_timing_find_mode(u8);
+u16 ide_pio_cycle_time(ide_drive_t *, u8);
+void ide_timing_merge(struct ide_timing *, struct ide_timing *,
+ struct ide_timing *, unsigned int);
+int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
+
+#ifdef CONFIG_IDE_XFER_MODE
+int ide_scan_pio_blacklist(char *);
+const char *ide_xfer_verbose(u8);
+int ide_pio_need_iordy(ide_drive_t *, const u8);
+int ide_set_pio_mode(ide_drive_t *, u8);
+int ide_set_dma_mode(ide_drive_t *, u8);
+void ide_set_pio(ide_drive_t *, u8);
+int ide_set_xfer_rate(ide_drive_t *, u8);
+#else
+static inline void ide_set_pio(ide_drive_t *drive, u8 pio) { ; }
+static inline int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) { return -1; }
+#endif
+
+static inline void ide_set_max_pio(ide_drive_t *drive)
+{
+ ide_set_pio(drive, 255);
+}
+
+char *ide_media_string(ide_drive_t *);
+
+extern const struct attribute_group *ide_dev_groups[];
+extern struct bus_type ide_bus_type;
+extern struct class *ide_port_class;
+
+static inline void ide_dump_identify(u8 *id)
+{
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 2, id, 512, 0);
+}
+
+static inline int hwif_to_node(ide_hwif_t *hwif)
+{
+ return hwif->dev ? dev_to_node(hwif->dev) : -1;
+}
+
+static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive)
+{
+ ide_drive_t *peer = drive->hwif->devices[(drive->dn ^ 1) & 1];
+
+ return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL;
+}
+
+static inline void *ide_get_drivedata(ide_drive_t *drive)
+{
+ return drive->drive_data;
+}
+
+static inline void ide_set_drivedata(ide_drive_t *drive, void *data)
+{
+ drive->drive_data = data;
+}
+
+#define ide_port_for_each_dev(i, dev, port) \
+ for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++)
+
+#define ide_port_for_each_present_dev(i, dev, port) \
+ for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) \
+ if ((dev)->dev_flags & IDE_DFLAG_PRESENT)
+
+#define ide_host_for_each_port(i, port, host) \
+ for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)
+
+#endif /* _IDE_H */
diff --git a/include/linux/idr.h b/include/linux/idr.h
new file mode 100644
index 000000000..013fd9bc4
--- /dev/null
+++ b/include/linux/idr.h
@@ -0,0 +1,186 @@
+/*
+ * include/linux/idr.h
+ *
+ * 2002-10-18 written by Jim Houston jim.houston@ccur.com
+ * Copyright (C) 2002 by Concurrent Computer Corporation
+ * Distributed under the GNU GPL license version 2.
+ *
+ * Small id to pointer translation service avoiding fixed sized
+ * tables.
+ */
+
+#ifndef __IDR_H__
+#define __IDR_H__
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/rcupdate.h>
+
+/*
+ * We want shallower trees and thus more bits covered at each layer. 8
+ * bits gives us large enough first layer for most use cases and maximum
+ * tree depth of 4. Each idr_layer is slightly larger than 2k on 64bit and
+ * 1k on 32bit.
+ */
+#define IDR_BITS 8
+#define IDR_SIZE (1 << IDR_BITS)
+#define IDR_MASK ((1 << IDR_BITS)-1)
+
+struct idr_layer {
+ int prefix; /* the ID prefix of this idr_layer */
+ int layer; /* distance from leaf */
+ struct idr_layer __rcu *ary[1<<IDR_BITS];
+ int count; /* When zero, we can release it */
+ union {
+ /* A zero bit means "space here" */
+ DECLARE_BITMAP(bitmap, IDR_SIZE);
+ struct rcu_head rcu_head;
+ };
+};
+
+struct idr {
+ struct idr_layer __rcu *hint; /* the last layer allocated from */
+ struct idr_layer __rcu *top;
+ int layers; /* only valid w/o concurrent changes */
+ int cur; /* current pos for cyclic allocation */
+ spinlock_t lock;
+ int id_free_cnt;
+ struct idr_layer *id_free;
+};
+
+#define IDR_INIT(name) \
+{ \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
+}
+#define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
+
+/**
+ * DOC: idr sync
+ * idr synchronization (stolen from radix-tree.h)
+ *
+ * idr_find() is able to be called locklessly, using RCU. The caller must
+ * ensure calls to this function are made within rcu_read_lock() regions.
+ * Other readers (lock-free or otherwise) and modifications may be running
+ * concurrently.
+ *
+ * It is still required that the caller manage the synchronization and
+ * lifetimes of the items. So if RCU lock-free lookups are used, typically
+ * this would mean that the items have their own locks, or are amenable to
+ * lock-free access; and that the items are freed by RCU (or only freed after
+ * having been deleted from the idr tree *and* a synchronize_rcu() grace
+ * period).
+ */
+
+/*
+ * This is what we export.
+ */
+
+void *idr_find_slowpath(struct idr *idp, int id);
+void idr_preload(gfp_t gfp_mask);
+int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
+int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask);
+int idr_for_each(struct idr *idp,
+ int (*fn)(int id, void *p, void *data), void *data);
+void *idr_get_next(struct idr *idp, int *nextid);
+void *idr_replace(struct idr *idp, void *ptr, int id);
+void idr_remove(struct idr *idp, int id);
+void idr_destroy(struct idr *idp);
+void idr_init(struct idr *idp);
+bool idr_is_empty(struct idr *idp);
+
+/**
+ * idr_preload_end - end preload section started with idr_preload()
+ *
+ * Each idr_preload() should be matched with an invocation of this
+ * function. See idr_preload() for details.
+ */
+static inline void idr_preload_end(void)
+{
+ preempt_enable();
+}
+
+/**
+ * idr_find - return pointer for given id
+ * @idr: idr handle
+ * @id: lookup key
+ *
+ * Return the pointer given the id it has been registered with. A %NULL
+ * return indicates that @id is not valid or you passed %NULL in
+ * idr_get_new().
+ *
+ * This function can be called under rcu_read_lock(), given that the leaf
+ * pointers lifetimes are correctly managed.
+ */
+static inline void *idr_find(struct idr *idr, int id)
+{
+ struct idr_layer *hint = rcu_dereference_raw(idr->hint);
+
+ if (hint && (id & ~IDR_MASK) == hint->prefix)
+ return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
+
+ return idr_find_slowpath(idr, id);
+}
+
+/**
+ * idr_for_each_entry - iterate over an idr's elements of a given type
+ * @idp: idr handle
+ * @entry: the type * to use as cursor
+ * @id: id entry's key
+ *
+ * @entry and @id do not need to be initialized before the loop, and
+ * after normal terminatinon @entry is left with the value NULL. This
+ * is convenient for a "not found" value.
+ */
+#define idr_for_each_entry(idp, entry, id) \
+ for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
+
+/*
+ * IDA - IDR based id allocator, use when translation from id to
+ * pointer isn't necessary.
+ *
+ * IDA_BITMAP_LONGS is calculated to be one less to accommodate
+ * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes.
+ */
+#define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
+#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1)
+#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8)
+
+struct ida_bitmap {
+ long nr_busy;
+ unsigned long bitmap[IDA_BITMAP_LONGS];
+};
+
+struct ida {
+ struct idr idr;
+ struct ida_bitmap *free_bitmap;
+};
+
+#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, }
+#define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
+
+int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
+int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
+void ida_remove(struct ida *ida, int id);
+void ida_destroy(struct ida *ida);
+void ida_init(struct ida *ida);
+
+int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
+ gfp_t gfp_mask);
+void ida_simple_remove(struct ida *ida, unsigned int id);
+
+/**
+ * ida_get_new - allocate new ID
+ * @ida: idr handle
+ * @p_id: pointer to the allocated handle
+ *
+ * Simple wrapper around ida_get_new_above() w/ @starting_id of zero.
+ */
+static inline int ida_get_new(struct ida *ida, int *p_id)
+{
+ return ida_get_new_above(ida, 0, p_id);
+}
+
+void __init idr_init_cache(void);
+
+#endif /* __IDR_H__ */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
new file mode 100644
index 000000000..b9c7897dc
--- /dev/null
+++ b/include/linux/ieee80211.h
@@ -0,0 +1,2548 @@
+/*
+ * IEEE 802.11 defines
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2005, Devicescape Software, Inc.
+ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
+ * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef LINUX_IEEE80211_H
+#define LINUX_IEEE80211_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+/*
+ * DS bit usage
+ *
+ * TA = transmitter address
+ * RA = receiver address
+ * DA = destination address
+ * SA = source address
+ *
+ * ToDS FromDS A1(RA) A2(TA) A3 A4 Use
+ * -----------------------------------------------------------------
+ * 0 0 DA SA BSSID - IBSS/DLS
+ * 0 1 DA BSSID SA - AP -> STA
+ * 1 0 BSSID SA DA - AP <- STA
+ * 1 1 RA TA DA SA unspecified (WDS)
+ */
+
+#define FCS_LEN 4
+
+#define IEEE80211_FCTL_VERS 0x0003
+#define IEEE80211_FCTL_FTYPE 0x000c
+#define IEEE80211_FCTL_STYPE 0x00f0
+#define IEEE80211_FCTL_TODS 0x0100
+#define IEEE80211_FCTL_FROMDS 0x0200
+#define IEEE80211_FCTL_MOREFRAGS 0x0400
+#define IEEE80211_FCTL_RETRY 0x0800
+#define IEEE80211_FCTL_PM 0x1000
+#define IEEE80211_FCTL_MOREDATA 0x2000
+#define IEEE80211_FCTL_PROTECTED 0x4000
+#define IEEE80211_FCTL_ORDER 0x8000
+#define IEEE80211_FCTL_CTL_EXT 0x0f00
+
+#define IEEE80211_SCTL_FRAG 0x000F
+#define IEEE80211_SCTL_SEQ 0xFFF0
+
+#define IEEE80211_FTYPE_MGMT 0x0000
+#define IEEE80211_FTYPE_CTL 0x0004
+#define IEEE80211_FTYPE_DATA 0x0008
+#define IEEE80211_FTYPE_EXT 0x000c
+
+/* management */
+#define IEEE80211_STYPE_ASSOC_REQ 0x0000
+#define IEEE80211_STYPE_ASSOC_RESP 0x0010
+#define IEEE80211_STYPE_REASSOC_REQ 0x0020
+#define IEEE80211_STYPE_REASSOC_RESP 0x0030
+#define IEEE80211_STYPE_PROBE_REQ 0x0040
+#define IEEE80211_STYPE_PROBE_RESP 0x0050
+#define IEEE80211_STYPE_BEACON 0x0080
+#define IEEE80211_STYPE_ATIM 0x0090
+#define IEEE80211_STYPE_DISASSOC 0x00A0
+#define IEEE80211_STYPE_AUTH 0x00B0
+#define IEEE80211_STYPE_DEAUTH 0x00C0
+#define IEEE80211_STYPE_ACTION 0x00D0
+
+/* control */
+#define IEEE80211_STYPE_CTL_EXT 0x0060
+#define IEEE80211_STYPE_BACK_REQ 0x0080
+#define IEEE80211_STYPE_BACK 0x0090
+#define IEEE80211_STYPE_PSPOLL 0x00A0
+#define IEEE80211_STYPE_RTS 0x00B0
+#define IEEE80211_STYPE_CTS 0x00C0
+#define IEEE80211_STYPE_ACK 0x00D0
+#define IEEE80211_STYPE_CFEND 0x00E0
+#define IEEE80211_STYPE_CFENDACK 0x00F0
+
+/* data */
+#define IEEE80211_STYPE_DATA 0x0000
+#define IEEE80211_STYPE_DATA_CFACK 0x0010
+#define IEEE80211_STYPE_DATA_CFPOLL 0x0020
+#define IEEE80211_STYPE_DATA_CFACKPOLL 0x0030
+#define IEEE80211_STYPE_NULLFUNC 0x0040
+#define IEEE80211_STYPE_CFACK 0x0050
+#define IEEE80211_STYPE_CFPOLL 0x0060
+#define IEEE80211_STYPE_CFACKPOLL 0x0070
+#define IEEE80211_STYPE_QOS_DATA 0x0080
+#define IEEE80211_STYPE_QOS_DATA_CFACK 0x0090
+#define IEEE80211_STYPE_QOS_DATA_CFPOLL 0x00A0
+#define IEEE80211_STYPE_QOS_DATA_CFACKPOLL 0x00B0
+#define IEEE80211_STYPE_QOS_NULLFUNC 0x00C0
+#define IEEE80211_STYPE_QOS_CFACK 0x00D0
+#define IEEE80211_STYPE_QOS_CFPOLL 0x00E0
+#define IEEE80211_STYPE_QOS_CFACKPOLL 0x00F0
+
+/* extension, added by 802.11ad */
+#define IEEE80211_STYPE_DMG_BEACON 0x0000
+
+/* control extension - for IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTL_EXT */
+#define IEEE80211_CTL_EXT_POLL 0x2000
+#define IEEE80211_CTL_EXT_SPR 0x3000
+#define IEEE80211_CTL_EXT_GRANT 0x4000
+#define IEEE80211_CTL_EXT_DMG_CTS 0x5000
+#define IEEE80211_CTL_EXT_DMG_DTS 0x6000
+#define IEEE80211_CTL_EXT_SSW 0x8000
+#define IEEE80211_CTL_EXT_SSW_FBACK 0x9000
+#define IEEE80211_CTL_EXT_SSW_ACK 0xa000
+
+
+#define IEEE80211_SN_MASK ((IEEE80211_SCTL_SEQ) >> 4)
+#define IEEE80211_MAX_SN IEEE80211_SN_MASK
+#define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1)
+
+static inline int ieee80211_sn_less(u16 sn1, u16 sn2)
+{
+ return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1);
+}
+
+static inline u16 ieee80211_sn_add(u16 sn1, u16 sn2)
+{
+ return (sn1 + sn2) & IEEE80211_SN_MASK;
+}
+
+static inline u16 ieee80211_sn_inc(u16 sn)
+{
+ return ieee80211_sn_add(sn, 1);
+}
+
+static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
+{
+ return (sn1 - sn2) & IEEE80211_SN_MASK;
+}
+
+#define IEEE80211_SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define IEEE80211_SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+
+/* miscellaneous IEEE 802.11 constants */
+#define IEEE80211_MAX_FRAG_THRESHOLD 2352
+#define IEEE80211_MAX_RTS_THRESHOLD 2353
+#define IEEE80211_MAX_AID 2007
+#define IEEE80211_MAX_TIM_LEN 251
+#define IEEE80211_MAX_MESH_PEERINGS 63
+/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
+ 6.2.1.1.2.
+
+ 802.11e clarifies the figure in section 7.1.2. The frame body is
+ up to 2304 octets long (maximum MSDU size) plus any crypt overhead. */
+#define IEEE80211_MAX_DATA_LEN 2304
+/* 802.11ad extends maximum MSDU size for DMG (freq > 40Ghz) networks
+ * to 7920 bytes, see 8.2.3 General frame format
+ */
+#define IEEE80211_MAX_DATA_LEN_DMG 7920
+/* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */
+#define IEEE80211_MAX_FRAME_LEN 2352
+
+#define IEEE80211_MAX_SSID_LEN 32
+
+#define IEEE80211_MAX_MESH_ID_LEN 32
+
+#define IEEE80211_FIRST_TSPEC_TSID 8
+#define IEEE80211_NUM_TIDS 16
+
+/* number of user priorities 802.11 uses */
+#define IEEE80211_NUM_UPS 8
+
+#define IEEE80211_QOS_CTL_LEN 2
+/* 1d tag mask */
+#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007
+/* TID mask */
+#define IEEE80211_QOS_CTL_TID_MASK 0x000f
+/* EOSP */
+#define IEEE80211_QOS_CTL_EOSP 0x0010
+/* ACK policy */
+#define IEEE80211_QOS_CTL_ACK_POLICY_NORMAL 0x0000
+#define IEEE80211_QOS_CTL_ACK_POLICY_NOACK 0x0020
+#define IEEE80211_QOS_CTL_ACK_POLICY_NO_EXPL 0x0040
+#define IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK 0x0060
+#define IEEE80211_QOS_CTL_ACK_POLICY_MASK 0x0060
+/* A-MSDU 802.11n */
+#define IEEE80211_QOS_CTL_A_MSDU_PRESENT 0x0080
+/* Mesh Control 802.11s */
+#define IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT 0x0100
+
+/* Mesh Power Save Level */
+#define IEEE80211_QOS_CTL_MESH_PS_LEVEL 0x0200
+/* Mesh Receiver Service Period Initiated */
+#define IEEE80211_QOS_CTL_RSPI 0x0400
+
+/* U-APSD queue for WMM IEs sent by AP */
+#define IEEE80211_WMM_IE_AP_QOSINFO_UAPSD (1<<7)
+#define IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK 0x0f
+
+/* U-APSD queues for WMM IEs sent by STA */
+#define IEEE80211_WMM_IE_STA_QOSINFO_AC_VO (1<<0)
+#define IEEE80211_WMM_IE_STA_QOSINFO_AC_VI (1<<1)
+#define IEEE80211_WMM_IE_STA_QOSINFO_AC_BK (1<<2)
+#define IEEE80211_WMM_IE_STA_QOSINFO_AC_BE (1<<3)
+#define IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK 0x0f
+
+/* U-APSD max SP length for WMM IEs sent by STA */
+#define IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL 0x00
+#define IEEE80211_WMM_IE_STA_QOSINFO_SP_2 0x01
+#define IEEE80211_WMM_IE_STA_QOSINFO_SP_4 0x02
+#define IEEE80211_WMM_IE_STA_QOSINFO_SP_6 0x03
+#define IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK 0x03
+#define IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT 5
+
+#define IEEE80211_HT_CTL_LEN 4
+
+struct ieee80211_hdr {
+ __le16 frame_control;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+} __packed __aligned(2);
+
+struct ieee80211_hdr_3addr {
+ __le16 frame_control;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+} __packed __aligned(2);
+
+struct ieee80211_qos_hdr {
+ __le16 frame_control;
+ __le16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ __le16 qos_ctrl;
+} __packed __aligned(2);
+
+/**
+ * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_has_tods(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_TODS)) != 0;
+}
+
+/**
+ * ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_has_fromds(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FROMDS)) != 0;
+}
+
+/**
+ * ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_has_a4(__le16 fc)
+{
+ __le16 tmp = cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS);
+ return (fc & tmp) == tmp;
+}
+
+/**
+ * ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_has_morefrags(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) != 0;
+}
+
+/**
+ * ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_has_retry(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_RETRY)) != 0;
+}
+
+/**
+ * ieee80211_has_pm - check if IEEE80211_FCTL_PM is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_has_pm(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_PM)) != 0;
+}
+
+/**
+ * ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_has_moredata(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) != 0;
+}
+
+/**
+ * ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_has_protected(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_PROTECTED)) != 0;
+}
+
+/**
+ * ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_has_order(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_ORDER)) != 0;
+}
+
+/**
+ * ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_mgmt(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT);
+}
+
+/**
+ * ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_ctl(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL);
+}
+
+/**
+ * ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_data(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_DATA);
+}
+
+/**
+ * ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_data_qos(__le16 fc)
+{
+ /*
+ * mask with QOS_DATA rather than IEEE80211_FCTL_STYPE as we just need
+ * to check the one bit
+ */
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_STYPE_QOS_DATA)) ==
+ cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA);
+}
+
+/**
+ * ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_data_present(__le16 fc)
+{
+ /*
+ * mask with 0x40 and test that that bit is clear to only return true
+ * for the data-containing substypes.
+ */
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | 0x40)) ==
+ cpu_to_le16(IEEE80211_FTYPE_DATA);
+}
+
+/**
+ * ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_assoc_req(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_REQ);
+}
+
+/**
+ * ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_assoc_resp(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ASSOC_RESP);
+}
+
+/**
+ * ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_reassoc_req(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_REQ);
+}
+
+/**
+ * ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_reassoc_resp(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_REASSOC_RESP);
+}
+
+/**
+ * ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_probe_req(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ);
+}
+
+/**
+ * ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_probe_resp(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP);
+}
+
+/**
+ * ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_beacon(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
+}
+
+/**
+ * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_atim(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ATIM);
+}
+
+/**
+ * ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_disassoc(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DISASSOC);
+}
+
+/**
+ * ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_auth(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
+}
+
+/**
+ * ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_deauth(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH);
+}
+
+/**
+ * ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_action(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION);
+}
+
+/**
+ * ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_back_req(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
+}
+
+/**
+ * ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_back(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK);
+}
+
+/**
+ * ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_pspoll(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
+}
+
+/**
+ * ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_rts(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
+}
+
+/**
+ * ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_cts(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
+}
+
+/**
+ * ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_ack(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK);
+}
+
+/**
+ * ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_cfend(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFEND);
+}
+
+/**
+ * ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_cfendack(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CFENDACK);
+}
+
+/**
+ * ieee80211_is_nullfunc - check if frame is a regular (non-QoS) nullfunc frame
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_nullfunc(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC);
+}
+
+/**
+ * ieee80211_is_qos_nullfunc - check if frame is a QoS nullfunc frame
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_qos_nullfunc(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
+}
+
+/**
+ * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU
+ * @fc: frame control field in little-endian byteorder
+ */
+static inline bool ieee80211_is_bufferable_mmpdu(__le16 fc)
+{
+ /* IEEE 802.11-2012, definition of "bufferable management frame";
+ * note that this ignores the IBSS special case. */
+ return ieee80211_is_mgmt(fc) &&
+ (ieee80211_is_action(fc) ||
+ ieee80211_is_disassoc(fc) ||
+ ieee80211_is_deauth(fc));
+}
+
+/**
+ * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set
+ * @seq_ctrl: frame sequence control bytes in little-endian byteorder
+ */
+static inline int ieee80211_is_first_frag(__le16 seq_ctrl)
+{
+ return (seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0;
+}
+
+struct ieee80211s_hdr {
+ u8 flags;
+ u8 ttl;
+ __le32 seqnum;
+ u8 eaddr1[ETH_ALEN];
+ u8 eaddr2[ETH_ALEN];
+} __packed __aligned(2);
+
+/* Mesh flags */
+#define MESH_FLAGS_AE_A4 0x1
+#define MESH_FLAGS_AE_A5_A6 0x2
+#define MESH_FLAGS_AE 0x3
+#define MESH_FLAGS_PS_DEEP 0x4
+
+/**
+ * enum ieee80211_preq_flags - mesh PREQ element flags
+ *
+ * @IEEE80211_PREQ_PROACTIVE_PREP_FLAG: proactive PREP subfield
+ */
+enum ieee80211_preq_flags {
+ IEEE80211_PREQ_PROACTIVE_PREP_FLAG = 1<<2,
+};
+
+/**
+ * enum ieee80211_preq_target_flags - mesh PREQ element per target flags
+ *
+ * @IEEE80211_PREQ_TO_FLAG: target only subfield
+ * @IEEE80211_PREQ_USN_FLAG: unknown target HWMP sequence number subfield
+ */
+enum ieee80211_preq_target_flags {
+ IEEE80211_PREQ_TO_FLAG = 1<<0,
+ IEEE80211_PREQ_USN_FLAG = 1<<2,
+};
+
+/**
+ * struct ieee80211_quiet_ie
+ *
+ * This structure refers to "Quiet information element"
+ */
+struct ieee80211_quiet_ie {
+ u8 count;
+ u8 period;
+ __le16 duration;
+ __le16 offset;
+} __packed;
+
+/**
+ * struct ieee80211_msrment_ie
+ *
+ * This structure refers to "Measurement Request/Report information element"
+ */
+struct ieee80211_msrment_ie {
+ u8 token;
+ u8 mode;
+ u8 type;
+ u8 request[0];
+} __packed;
+
+/**
+ * struct ieee80211_channel_sw_ie
+ *
+ * This structure refers to "Channel Switch Announcement information element"
+ */
+struct ieee80211_channel_sw_ie {
+ u8 mode;
+ u8 new_ch_num;
+ u8 count;
+} __packed;
+
+/**
+ * struct ieee80211_ext_chansw_ie
+ *
+ * This structure represents the "Extended Channel Switch Announcement element"
+ */
+struct ieee80211_ext_chansw_ie {
+ u8 mode;
+ u8 new_operating_class;
+ u8 new_ch_num;
+ u8 count;
+} __packed;
+
+/**
+ * struct ieee80211_sec_chan_offs_ie - secondary channel offset IE
+ * @sec_chan_offs: secondary channel offset, uses IEEE80211_HT_PARAM_CHA_SEC_*
+ * values here
+ * This structure represents the "Secondary Channel Offset element"
+ */
+struct ieee80211_sec_chan_offs_ie {
+ u8 sec_chan_offs;
+} __packed;
+
+/**
+ * struct ieee80211_mesh_chansw_params_ie - mesh channel switch parameters IE
+ *
+ * This structure represents the "Mesh Channel Switch Paramters element"
+ */
+struct ieee80211_mesh_chansw_params_ie {
+ u8 mesh_ttl;
+ u8 mesh_flags;
+ __le16 mesh_reason;
+ __le16 mesh_pre_value;
+} __packed;
+
+/**
+ * struct ieee80211_wide_bw_chansw_ie - wide bandwidth channel switch IE
+ */
+struct ieee80211_wide_bw_chansw_ie {
+ u8 new_channel_width;
+ u8 new_center_freq_seg0, new_center_freq_seg1;
+} __packed;
+
+/**
+ * struct ieee80211_tim
+ *
+ * This structure refers to "Traffic Indication Map information element"
+ */
+struct ieee80211_tim_ie {
+ u8 dtim_count;
+ u8 dtim_period;
+ u8 bitmap_ctrl;
+ /* variable size: 1 - 251 bytes */
+ u8 virtual_map[1];
+} __packed;
+
+/**
+ * struct ieee80211_meshconf_ie
+ *
+ * This structure refers to "Mesh Configuration information element"
+ */
+struct ieee80211_meshconf_ie {
+ u8 meshconf_psel;
+ u8 meshconf_pmetric;
+ u8 meshconf_congest;
+ u8 meshconf_synch;
+ u8 meshconf_auth;
+ u8 meshconf_form;
+ u8 meshconf_cap;
+} __packed;
+
+/**
+ * enum mesh_config_capab_flags - Mesh Configuration IE capability field flags
+ *
+ * @IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS: STA is willing to establish
+ * additional mesh peerings with other mesh STAs
+ * @IEEE80211_MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs
+ * @IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure
+ * is ongoing
+ * @IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL: STA is in deep sleep mode or has
+ * neighbors in deep sleep mode
+ */
+enum mesh_config_capab_flags {
+ IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS = 0x01,
+ IEEE80211_MESHCONF_CAPAB_FORWARDING = 0x08,
+ IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING = 0x20,
+ IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL = 0x40,
+};
+
+/**
+ * mesh channel switch parameters element's flag indicator
+ *
+ */
+#define WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT BIT(0)
+#define WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR BIT(1)
+#define WLAN_EID_CHAN_SWITCH_PARAM_REASON BIT(2)
+
+/**
+ * struct ieee80211_rann_ie
+ *
+ * This structure refers to "Root Announcement information element"
+ */
+struct ieee80211_rann_ie {
+ u8 rann_flags;
+ u8 rann_hopcount;
+ u8 rann_ttl;
+ u8 rann_addr[ETH_ALEN];
+ __le32 rann_seq;
+ __le32 rann_interval;
+ __le32 rann_metric;
+} __packed;
+
+enum ieee80211_rann_flags {
+ RANN_FLAG_IS_GATE = 1 << 0,
+};
+
+enum ieee80211_ht_chanwidth_values {
+ IEEE80211_HT_CHANWIDTH_20MHZ = 0,
+ IEEE80211_HT_CHANWIDTH_ANY = 1,
+};
+
+/**
+ * enum ieee80211_opmode_bits - VHT operating mode field bits
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK: channel width mask
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: 20 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: 80 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: 160 MHz or 80+80 MHz channel width
+ * @IEEE80211_OPMODE_NOTIF_RX_NSS_MASK: number of spatial streams mask
+ * (the NSS value is the value of this field + 1)
+ * @IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT: number of spatial streams shift
+ * @IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF: indicates streams in SU-MIMO PPDU
+ * using a beamforming steering matrix
+ */
+enum ieee80211_vht_opmode_bits {
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 3,
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ = 0,
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ = 1,
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ = 2,
+ IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ = 3,
+ IEEE80211_OPMODE_NOTIF_RX_NSS_MASK = 0x70,
+ IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT = 4,
+ IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF = 0x80,
+};
+
+#define WLAN_SA_QUERY_TR_ID_LEN 2
+
+/**
+ * struct ieee80211_tpc_report_ie
+ *
+ * This structure refers to "TPC Report element"
+ */
+struct ieee80211_tpc_report_ie {
+ u8 tx_power;
+ u8 link_margin;
+} __packed;
+
+struct ieee80211_mgmt {
+ __le16 frame_control;
+ __le16 duration;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ __le16 seq_ctrl;
+ union {
+ struct {
+ __le16 auth_alg;
+ __le16 auth_transaction;
+ __le16 status_code;
+ /* possibly followed by Challenge text */
+ u8 variable[0];
+ } __packed auth;
+ struct {
+ __le16 reason_code;
+ } __packed deauth;
+ struct {
+ __le16 capab_info;
+ __le16 listen_interval;
+ /* followed by SSID and Supported rates */
+ u8 variable[0];
+ } __packed assoc_req;
+ struct {
+ __le16 capab_info;
+ __le16 status_code;
+ __le16 aid;
+ /* followed by Supported rates */
+ u8 variable[0];
+ } __packed assoc_resp, reassoc_resp;
+ struct {
+ __le16 capab_info;
+ __le16 listen_interval;
+ u8 current_ap[ETH_ALEN];
+ /* followed by SSID and Supported rates */
+ u8 variable[0];
+ } __packed reassoc_req;
+ struct {
+ __le16 reason_code;
+ } __packed disassoc;
+ struct {
+ __le64 timestamp;
+ __le16 beacon_int;
+ __le16 capab_info;
+ /* followed by some of SSID, Supported rates,
+ * FH Params, DS Params, CF Params, IBSS Params, TIM */
+ u8 variable[0];
+ } __packed beacon;
+ struct {
+ /* only variable items: SSID, Supported rates */
+ u8 variable[0];
+ } __packed probe_req;
+ struct {
+ __le64 timestamp;
+ __le16 beacon_int;
+ __le16 capab_info;
+ /* followed by some of SSID, Supported rates,
+ * FH Params, DS Params, CF Params, IBSS Params */
+ u8 variable[0];
+ } __packed probe_resp;
+ struct {
+ u8 category;
+ union {
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 status_code;
+ u8 variable[0];
+ } __packed wme_action;
+ struct{
+ u8 action_code;
+ u8 variable[0];
+ } __packed chan_switch;
+ struct{
+ u8 action_code;
+ struct ieee80211_ext_chansw_ie data;
+ u8 variable[0];
+ } __packed ext_chan_switch;
+ struct{
+ u8 action_code;
+ u8 dialog_token;
+ u8 element_id;
+ u8 length;
+ struct ieee80211_msrment_ie msr_elem;
+ } __packed measurement;
+ struct{
+ u8 action_code;
+ u8 dialog_token;
+ __le16 capab;
+ __le16 timeout;
+ __le16 start_seq_num;
+ } __packed addba_req;
+ struct{
+ u8 action_code;
+ u8 dialog_token;
+ __le16 status;
+ __le16 capab;
+ __le16 timeout;
+ } __packed addba_resp;
+ struct{
+ u8 action_code;
+ __le16 params;
+ __le16 reason_code;
+ } __packed delba;
+ struct {
+ u8 action_code;
+ u8 variable[0];
+ } __packed self_prot;
+ struct{
+ u8 action_code;
+ u8 variable[0];
+ } __packed mesh_action;
+ struct {
+ u8 action;
+ u8 trans_id[WLAN_SA_QUERY_TR_ID_LEN];
+ } __packed sa_query;
+ struct {
+ u8 action;
+ u8 smps_control;
+ } __packed ht_smps;
+ struct {
+ u8 action_code;
+ u8 chanwidth;
+ } __packed ht_notify_cw;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ __le16 capability;
+ u8 variable[0];
+ } __packed tdls_discover_resp;
+ struct {
+ u8 action_code;
+ u8 operating_mode;
+ } __packed vht_opmode_notif;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 tpc_elem_id;
+ u8 tpc_elem_length;
+ struct ieee80211_tpc_report_ie tpc;
+ } __packed tpc_report;
+ } u;
+ } __packed action;
+ } u;
+} __packed __aligned(2);
+
+/* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */
+#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127
+
+/* mgmt header + 1 byte category code */
+#define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
+
+
+/* Management MIC information element (IEEE 802.11w) */
+struct ieee80211_mmie {
+ u8 element_id;
+ u8 length;
+ __le16 key_id;
+ u8 sequence_number[6];
+ u8 mic[8];
+} __packed;
+
+/* Management MIC information element (IEEE 802.11w) for GMAC and CMAC-256 */
+struct ieee80211_mmie_16 {
+ u8 element_id;
+ u8 length;
+ __le16 key_id;
+ u8 sequence_number[6];
+ u8 mic[16];
+} __packed;
+
+struct ieee80211_vendor_ie {
+ u8 element_id;
+ u8 len;
+ u8 oui[3];
+ u8 oui_type;
+} __packed;
+
+struct ieee80211_wmm_ac_param {
+ u8 aci_aifsn; /* AIFSN, ACM, ACI */
+ u8 cw; /* ECWmin, ECWmax (CW = 2^ECW - 1) */
+ __le16 txop_limit;
+} __packed;
+
+struct ieee80211_wmm_param_ie {
+ u8 element_id; /* Element ID: 221 (0xdd); */
+ u8 len; /* Length: 24 */
+ /* required fields for WMM version 1 */
+ u8 oui[3]; /* 00:50:f2 */
+ u8 oui_type; /* 2 */
+ u8 oui_subtype; /* 1 */
+ u8 version; /* 1 for WMM version 1.0 */
+ u8 qos_info; /* AP/STA specific QoS info */
+ u8 reserved; /* 0 */
+ /* AC_BE, AC_BK, AC_VI, AC_VO */
+ struct ieee80211_wmm_ac_param ac[4];
+} __packed;
+
+/* Control frames */
+struct ieee80211_rts {
+ __le16 frame_control;
+ __le16 duration;
+ u8 ra[ETH_ALEN];
+ u8 ta[ETH_ALEN];
+} __packed __aligned(2);
+
+struct ieee80211_cts {
+ __le16 frame_control;
+ __le16 duration;
+ u8 ra[ETH_ALEN];
+} __packed __aligned(2);
+
+struct ieee80211_pspoll {
+ __le16 frame_control;
+ __le16 aid;
+ u8 bssid[ETH_ALEN];
+ u8 ta[ETH_ALEN];
+} __packed __aligned(2);
+
+/* TDLS */
+
+/* Channel switch timing */
+struct ieee80211_ch_switch_timing {
+ __le16 switch_time;
+ __le16 switch_timeout;
+} __packed;
+
+/* Link-id information element */
+struct ieee80211_tdls_lnkie {
+ u8 ie_type; /* Link Identifier IE */
+ u8 ie_len;
+ u8 bssid[ETH_ALEN];
+ u8 init_sta[ETH_ALEN];
+ u8 resp_sta[ETH_ALEN];
+} __packed;
+
+struct ieee80211_tdls_data {
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ __be16 ether_type;
+ u8 payload_type;
+ u8 category;
+ u8 action_code;
+ union {
+ struct {
+ u8 dialog_token;
+ __le16 capability;
+ u8 variable[0];
+ } __packed setup_req;
+ struct {
+ __le16 status_code;
+ u8 dialog_token;
+ __le16 capability;
+ u8 variable[0];
+ } __packed setup_resp;
+ struct {
+ __le16 status_code;
+ u8 dialog_token;
+ u8 variable[0];
+ } __packed setup_cfm;
+ struct {
+ __le16 reason_code;
+ u8 variable[0];
+ } __packed teardown;
+ struct {
+ u8 dialog_token;
+ u8 variable[0];
+ } __packed discover_req;
+ struct {
+ u8 target_channel;
+ u8 oper_class;
+ u8 variable[0];
+ } __packed chan_switch_req;
+ struct {
+ __le16 status_code;
+ u8 variable[0];
+ } __packed chan_switch_resp;
+ } u;
+} __packed;
+
+/*
+ * Peer-to-Peer IE attribute related definitions.
+ */
+/**
+ * enum ieee80211_p2p_attr_id - identifies type of peer-to-peer attribute.
+ */
+enum ieee80211_p2p_attr_id {
+ IEEE80211_P2P_ATTR_STATUS = 0,
+ IEEE80211_P2P_ATTR_MINOR_REASON,
+ IEEE80211_P2P_ATTR_CAPABILITY,
+ IEEE80211_P2P_ATTR_DEVICE_ID,
+ IEEE80211_P2P_ATTR_GO_INTENT,
+ IEEE80211_P2P_ATTR_GO_CONFIG_TIMEOUT,
+ IEEE80211_P2P_ATTR_LISTEN_CHANNEL,
+ IEEE80211_P2P_ATTR_GROUP_BSSID,
+ IEEE80211_P2P_ATTR_EXT_LISTEN_TIMING,
+ IEEE80211_P2P_ATTR_INTENDED_IFACE_ADDR,
+ IEEE80211_P2P_ATTR_MANAGABILITY,
+ IEEE80211_P2P_ATTR_CHANNEL_LIST,
+ IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
+ IEEE80211_P2P_ATTR_DEVICE_INFO,
+ IEEE80211_P2P_ATTR_GROUP_INFO,
+ IEEE80211_P2P_ATTR_GROUP_ID,
+ IEEE80211_P2P_ATTR_INTERFACE,
+ IEEE80211_P2P_ATTR_OPER_CHANNEL,
+ IEEE80211_P2P_ATTR_INVITE_FLAGS,
+ /* 19 - 220: Reserved */
+ IEEE80211_P2P_ATTR_VENDOR_SPECIFIC = 221,
+
+ IEEE80211_P2P_ATTR_MAX
+};
+
+/* Notice of Absence attribute - described in P2P spec 4.1.14 */
+/* Typical max value used here */
+#define IEEE80211_P2P_NOA_DESC_MAX 4
+
+struct ieee80211_p2p_noa_desc {
+ u8 count;
+ __le32 duration;
+ __le32 interval;
+ __le32 start_time;
+} __packed;
+
+struct ieee80211_p2p_noa_attr {
+ u8 index;
+ u8 oppps_ctwindow;
+ struct ieee80211_p2p_noa_desc desc[IEEE80211_P2P_NOA_DESC_MAX];
+} __packed;
+
+#define IEEE80211_P2P_OPPPS_ENABLE_BIT BIT(7)
+#define IEEE80211_P2P_OPPPS_CTWINDOW_MASK 0x7F
+
+/**
+ * struct ieee80211_bar - HT Block Ack Request
+ *
+ * This structure refers to "HT BlockAckReq" as
+ * described in 802.11n draft section 7.2.1.7.1
+ */
+struct ieee80211_bar {
+ __le16 frame_control;
+ __le16 duration;
+ __u8 ra[ETH_ALEN];
+ __u8 ta[ETH_ALEN];
+ __le16 control;
+ __le16 start_seq_num;
+} __packed;
+
+/* 802.11 BAR control masks */
+#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
+#define IEEE80211_BAR_CTRL_MULTI_TID 0x0002
+#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
+#define IEEE80211_BAR_CTRL_TID_INFO_MASK 0xf000
+#define IEEE80211_BAR_CTRL_TID_INFO_SHIFT 12
+
+#define IEEE80211_HT_MCS_MASK_LEN 10
+
+/**
+ * struct ieee80211_mcs_info - MCS information
+ * @rx_mask: RX mask
+ * @rx_highest: highest supported RX rate. If set represents
+ * the highest supported RX data rate in units of 1 Mbps.
+ * If this field is 0 this value should not be used to
+ * consider the highest RX data rate supported.
+ * @tx_params: TX parameters
+ */
+struct ieee80211_mcs_info {
+ u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN];
+ __le16 rx_highest;
+ u8 tx_params;
+ u8 reserved[3];
+} __packed;
+
+/* 802.11n HT capability MSC set */
+#define IEEE80211_HT_MCS_RX_HIGHEST_MASK 0x3ff
+#define IEEE80211_HT_MCS_TX_DEFINED 0x01
+#define IEEE80211_HT_MCS_TX_RX_DIFF 0x02
+/* value 0 == 1 stream etc */
+#define IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK 0x0C
+#define IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT 2
+#define IEEE80211_HT_MCS_TX_MAX_STREAMS 4
+#define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10
+
+/*
+ * 802.11n D5.0 20.3.5 / 20.6 says:
+ * - indices 0 to 7 and 32 are single spatial stream
+ * - 8 to 31 are multiple spatial streams using equal modulation
+ * [8..15 for two streams, 16..23 for three and 24..31 for four]
+ * - remainder are multiple spatial streams using unequal modulation
+ */
+#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START 33
+#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE \
+ (IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8)
+
+/**
+ * struct ieee80211_ht_cap - HT capabilities
+ *
+ * This structure is the "HT capabilities element" as
+ * described in 802.11n D5.0 7.3.2.57
+ */
+struct ieee80211_ht_cap {
+ __le16 cap_info;
+ u8 ampdu_params_info;
+
+ /* 16 bytes MCS information */
+ struct ieee80211_mcs_info mcs;
+
+ __le16 extended_ht_cap_info;
+ __le32 tx_BF_cap_info;
+ u8 antenna_selection_info;
+} __packed;
+
+/* 802.11n HT capabilities masks (for cap_info) */
+#define IEEE80211_HT_CAP_LDPC_CODING 0x0001
+#define IEEE80211_HT_CAP_SUP_WIDTH_20_40 0x0002
+#define IEEE80211_HT_CAP_SM_PS 0x000C
+#define IEEE80211_HT_CAP_SM_PS_SHIFT 2
+#define IEEE80211_HT_CAP_GRN_FLD 0x0010
+#define IEEE80211_HT_CAP_SGI_20 0x0020
+#define IEEE80211_HT_CAP_SGI_40 0x0040
+#define IEEE80211_HT_CAP_TX_STBC 0x0080
+#define IEEE80211_HT_CAP_RX_STBC 0x0300
+#define IEEE80211_HT_CAP_RX_STBC_SHIFT 8
+#define IEEE80211_HT_CAP_DELAY_BA 0x0400
+#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
+#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
+#define IEEE80211_HT_CAP_RESERVED 0x2000
+#define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000
+#define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000
+
+/* 802.11n HT extended capabilities masks (for extended_ht_cap_info) */
+#define IEEE80211_HT_EXT_CAP_PCO 0x0001
+#define IEEE80211_HT_EXT_CAP_PCO_TIME 0x0006
+#define IEEE80211_HT_EXT_CAP_PCO_TIME_SHIFT 1
+#define IEEE80211_HT_EXT_CAP_MCS_FB 0x0300
+#define IEEE80211_HT_EXT_CAP_MCS_FB_SHIFT 8
+#define IEEE80211_HT_EXT_CAP_HTC_SUP 0x0400
+#define IEEE80211_HT_EXT_CAP_RD_RESPONDER 0x0800
+
+/* 802.11n HT capability AMPDU settings (for ampdu_params_info) */
+#define IEEE80211_HT_AMPDU_PARM_FACTOR 0x03
+#define IEEE80211_HT_AMPDU_PARM_DENSITY 0x1C
+#define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2
+
+/*
+ * Maximum length of AMPDU that the STA can receive in high-throughput (HT).
+ * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
+ */
+enum ieee80211_max_ampdu_length_exp {
+ IEEE80211_HT_MAX_AMPDU_8K = 0,
+ IEEE80211_HT_MAX_AMPDU_16K = 1,
+ IEEE80211_HT_MAX_AMPDU_32K = 2,
+ IEEE80211_HT_MAX_AMPDU_64K = 3
+};
+
+/*
+ * Maximum length of AMPDU that the STA can receive in VHT.
+ * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
+ */
+enum ieee80211_vht_max_ampdu_length_exp {
+ IEEE80211_VHT_MAX_AMPDU_8K = 0,
+ IEEE80211_VHT_MAX_AMPDU_16K = 1,
+ IEEE80211_VHT_MAX_AMPDU_32K = 2,
+ IEEE80211_VHT_MAX_AMPDU_64K = 3,
+ IEEE80211_VHT_MAX_AMPDU_128K = 4,
+ IEEE80211_VHT_MAX_AMPDU_256K = 5,
+ IEEE80211_VHT_MAX_AMPDU_512K = 6,
+ IEEE80211_VHT_MAX_AMPDU_1024K = 7
+};
+
+#define IEEE80211_HT_MAX_AMPDU_FACTOR 13
+
+/* Minimum MPDU start spacing */
+enum ieee80211_min_mpdu_spacing {
+ IEEE80211_HT_MPDU_DENSITY_NONE = 0, /* No restriction */
+ IEEE80211_HT_MPDU_DENSITY_0_25 = 1, /* 1/4 usec */
+ IEEE80211_HT_MPDU_DENSITY_0_5 = 2, /* 1/2 usec */
+ IEEE80211_HT_MPDU_DENSITY_1 = 3, /* 1 usec */
+ IEEE80211_HT_MPDU_DENSITY_2 = 4, /* 2 usec */
+ IEEE80211_HT_MPDU_DENSITY_4 = 5, /* 4 usec */
+ IEEE80211_HT_MPDU_DENSITY_8 = 6, /* 8 usec */
+ IEEE80211_HT_MPDU_DENSITY_16 = 7 /* 16 usec */
+};
+
+/**
+ * struct ieee80211_ht_operation - HT operation IE
+ *
+ * This structure is the "HT operation element" as
+ * described in 802.11n-2009 7.3.2.57
+ */
+struct ieee80211_ht_operation {
+ u8 primary_chan;
+ u8 ht_param;
+ __le16 operation_mode;
+ __le16 stbc_param;
+ u8 basic_set[16];
+} __packed;
+
+/* for ht_param */
+#define IEEE80211_HT_PARAM_CHA_SEC_OFFSET 0x03
+#define IEEE80211_HT_PARAM_CHA_SEC_NONE 0x00
+#define IEEE80211_HT_PARAM_CHA_SEC_ABOVE 0x01
+#define IEEE80211_HT_PARAM_CHA_SEC_BELOW 0x03
+#define IEEE80211_HT_PARAM_CHAN_WIDTH_ANY 0x04
+#define IEEE80211_HT_PARAM_RIFS_MODE 0x08
+
+/* for operation_mode */
+#define IEEE80211_HT_OP_MODE_PROTECTION 0x0003
+#define IEEE80211_HT_OP_MODE_PROTECTION_NONE 0
+#define IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER 1
+#define IEEE80211_HT_OP_MODE_PROTECTION_20MHZ 2
+#define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3
+#define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004
+#define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010
+
+/* for stbc_param */
+#define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040
+#define IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT 0x0080
+#define IEEE80211_HT_STBC_PARAM_STBC_BEACON 0x0100
+#define IEEE80211_HT_STBC_PARAM_LSIG_TXOP_FULLPROT 0x0200
+#define IEEE80211_HT_STBC_PARAM_PCO_ACTIVE 0x0400
+#define IEEE80211_HT_STBC_PARAM_PCO_PHASE 0x0800
+
+
+/* block-ack parameters */
+#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
+#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
+#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
+#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
+#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
+
+/*
+ * A-PMDU buffer sizes
+ * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
+ */
+#define IEEE80211_MIN_AMPDU_BUF 0x8
+#define IEEE80211_MAX_AMPDU_BUF 0x40
+
+
+/* Spatial Multiplexing Power Save Modes (for capability) */
+#define WLAN_HT_CAP_SM_PS_STATIC 0
+#define WLAN_HT_CAP_SM_PS_DYNAMIC 1
+#define WLAN_HT_CAP_SM_PS_INVALID 2
+#define WLAN_HT_CAP_SM_PS_DISABLED 3
+
+/* for SM power control field lower two bits */
+#define WLAN_HT_SMPS_CONTROL_DISABLED 0
+#define WLAN_HT_SMPS_CONTROL_STATIC 1
+#define WLAN_HT_SMPS_CONTROL_DYNAMIC 3
+
+/**
+ * struct ieee80211_vht_mcs_info - VHT MCS information
+ * @rx_mcs_map: RX MCS map 2 bits for each stream, total 8 streams
+ * @rx_highest: Indicates highest long GI VHT PPDU data rate
+ * STA can receive. Rate expressed in units of 1 Mbps.
+ * If this field is 0 this value should not be used to
+ * consider the highest RX data rate supported.
+ * The top 3 bits of this field are reserved.
+ * @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams
+ * @tx_highest: Indicates highest long GI VHT PPDU data rate
+ * STA can transmit. Rate expressed in units of 1 Mbps.
+ * If this field is 0 this value should not be used to
+ * consider the highest TX data rate supported.
+ * The top 3 bits of this field are reserved.
+ */
+struct ieee80211_vht_mcs_info {
+ __le16 rx_mcs_map;
+ __le16 rx_highest;
+ __le16 tx_mcs_map;
+ __le16 tx_highest;
+} __packed;
+
+/**
+ * enum ieee80211_vht_mcs_support - VHT MCS support definitions
+ * @IEEE80211_VHT_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
+ * number of streams
+ * @IEEE80211_VHT_MCS_SUPPORT_0_8: MCSes 0-8 are supported
+ * @IEEE80211_VHT_MCS_SUPPORT_0_9: MCSes 0-9 are supported
+ * @IEEE80211_VHT_MCS_NOT_SUPPORTED: This number of streams isn't supported
+ *
+ * These definitions are used in each 2-bit subfield of the @rx_mcs_map
+ * and @tx_mcs_map fields of &struct ieee80211_vht_mcs_info, which are
+ * both split into 8 subfields by number of streams. These values indicate
+ * which MCSes are supported for the number of streams the value appears
+ * for.
+ */
+enum ieee80211_vht_mcs_support {
+ IEEE80211_VHT_MCS_SUPPORT_0_7 = 0,
+ IEEE80211_VHT_MCS_SUPPORT_0_8 = 1,
+ IEEE80211_VHT_MCS_SUPPORT_0_9 = 2,
+ IEEE80211_VHT_MCS_NOT_SUPPORTED = 3,
+};
+
+/**
+ * struct ieee80211_vht_cap - VHT capabilities
+ *
+ * This structure is the "VHT capabilities element" as
+ * described in 802.11ac D3.0 8.4.2.160
+ * @vht_cap_info: VHT capability info
+ * @supp_mcs: VHT MCS supported rates
+ */
+struct ieee80211_vht_cap {
+ __le32 vht_cap_info;
+ struct ieee80211_vht_mcs_info supp_mcs;
+} __packed;
+
+/**
+ * enum ieee80211_vht_chanwidth - VHT channel width
+ * @IEEE80211_VHT_CHANWIDTH_USE_HT: use the HT operation IE to
+ * determine the channel width (20 or 40 MHz)
+ * @IEEE80211_VHT_CHANWIDTH_80MHZ: 80 MHz bandwidth
+ * @IEEE80211_VHT_CHANWIDTH_160MHZ: 160 MHz bandwidth
+ * @IEEE80211_VHT_CHANWIDTH_80P80MHZ: 80+80 MHz bandwidth
+ */
+enum ieee80211_vht_chanwidth {
+ IEEE80211_VHT_CHANWIDTH_USE_HT = 0,
+ IEEE80211_VHT_CHANWIDTH_80MHZ = 1,
+ IEEE80211_VHT_CHANWIDTH_160MHZ = 2,
+ IEEE80211_VHT_CHANWIDTH_80P80MHZ = 3,
+};
+
+/**
+ * struct ieee80211_vht_operation - VHT operation IE
+ *
+ * This structure is the "VHT operation element" as
+ * described in 802.11ac D3.0 8.4.2.161
+ * @chan_width: Operating channel width
+ * @center_freq_seg1_idx: center freq segment 1 index
+ * @center_freq_seg2_idx: center freq segment 2 index
+ * @basic_mcs_set: VHT Basic MCS rate set
+ */
+struct ieee80211_vht_operation {
+ u8 chan_width;
+ u8 center_freq_seg1_idx;
+ u8 center_freq_seg2_idx;
+ __le16 basic_mcs_set;
+} __packed;
+
+
+/* 802.11ac VHT Capabilities */
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK 0x0000000C
+#define IEEE80211_VHT_CAP_RXLDPC 0x00000010
+#define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020
+#define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040
+#define IEEE80211_VHT_CAP_TXSTBC 0x00000080
+#define IEEE80211_VHT_CAP_RXSTBC_1 0x00000100
+#define IEEE80211_VHT_CAP_RXSTBC_2 0x00000200
+#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300
+#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400
+#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700
+#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
+#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
+#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT 13
+#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK \
+ (7 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT)
+#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT 16
+#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK \
+ (7 << IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT)
+#define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000
+#define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000
+#define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000
+#define IEEE80211_VHT_CAP_HTC_VHT 0x00400000
+#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT 23
+#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK \
+ (7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT)
+#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB 0x08000000
+#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000
+#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
+#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
+
+/* Authentication algorithms */
+#define WLAN_AUTH_OPEN 0
+#define WLAN_AUTH_SHARED_KEY 1
+#define WLAN_AUTH_FT 2
+#define WLAN_AUTH_SAE 3
+#define WLAN_AUTH_LEAP 128
+
+#define WLAN_AUTH_CHALLENGE_LEN 128
+
+#define WLAN_CAPABILITY_ESS (1<<0)
+#define WLAN_CAPABILITY_IBSS (1<<1)
+
+/*
+ * A mesh STA sets the ESS and IBSS capability bits to zero.
+ * however, this holds true for p2p probe responses (in the p2p_find
+ * phase) as well.
+ */
+#define WLAN_CAPABILITY_IS_STA_BSS(cap) \
+ (!((cap) & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)))
+
+#define WLAN_CAPABILITY_CF_POLLABLE (1<<2)
+#define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3)
+#define WLAN_CAPABILITY_PRIVACY (1<<4)
+#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5)
+#define WLAN_CAPABILITY_PBCC (1<<6)
+#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7)
+
+/* 802.11h */
+#define WLAN_CAPABILITY_SPECTRUM_MGMT (1<<8)
+#define WLAN_CAPABILITY_QOS (1<<9)
+#define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10)
+#define WLAN_CAPABILITY_APSD (1<<11)
+#define WLAN_CAPABILITY_RADIO_MEASURE (1<<12)
+#define WLAN_CAPABILITY_DSSS_OFDM (1<<13)
+#define WLAN_CAPABILITY_DEL_BACK (1<<14)
+#define WLAN_CAPABILITY_IMM_BACK (1<<15)
+
+/* DMG (60gHz) 802.11ad */
+/* type - bits 0..1 */
+#define WLAN_CAPABILITY_DMG_TYPE_MASK (3<<0)
+#define WLAN_CAPABILITY_DMG_TYPE_IBSS (1<<0) /* Tx by: STA */
+#define WLAN_CAPABILITY_DMG_TYPE_PBSS (2<<0) /* Tx by: PCP */
+#define WLAN_CAPABILITY_DMG_TYPE_AP (3<<0) /* Tx by: AP */
+
+#define WLAN_CAPABILITY_DMG_CBAP_ONLY (1<<2)
+#define WLAN_CAPABILITY_DMG_CBAP_SOURCE (1<<3)
+#define WLAN_CAPABILITY_DMG_PRIVACY (1<<4)
+#define WLAN_CAPABILITY_DMG_ECPAC (1<<5)
+
+#define WLAN_CAPABILITY_DMG_SPECTRUM_MGMT (1<<8)
+#define WLAN_CAPABILITY_DMG_RADIO_MEASURE (1<<12)
+
+/* measurement */
+#define IEEE80211_SPCT_MSR_RPRT_MODE_LATE (1<<0)
+#define IEEE80211_SPCT_MSR_RPRT_MODE_INCAPABLE (1<<1)
+#define IEEE80211_SPCT_MSR_RPRT_MODE_REFUSED (1<<2)
+
+#define IEEE80211_SPCT_MSR_RPRT_TYPE_BASIC 0
+#define IEEE80211_SPCT_MSR_RPRT_TYPE_CCA 1
+#define IEEE80211_SPCT_MSR_RPRT_TYPE_RPI 2
+
+/* 802.11g ERP information element */
+#define WLAN_ERP_NON_ERP_PRESENT (1<<0)
+#define WLAN_ERP_USE_PROTECTION (1<<1)
+#define WLAN_ERP_BARKER_PREAMBLE (1<<2)
+
+/* WLAN_ERP_BARKER_PREAMBLE values */
+enum {
+ WLAN_ERP_PREAMBLE_SHORT = 0,
+ WLAN_ERP_PREAMBLE_LONG = 1,
+};
+
+/* Band ID, 802.11ad #8.4.1.45 */
+enum {
+ IEEE80211_BANDID_TV_WS = 0, /* TV white spaces */
+ IEEE80211_BANDID_SUB1 = 1, /* Sub-1 GHz (excluding TV white spaces) */
+ IEEE80211_BANDID_2G = 2, /* 2.4 GHz */
+ IEEE80211_BANDID_3G = 3, /* 3.6 GHz */
+ IEEE80211_BANDID_5G = 4, /* 4.9 and 5 GHz */
+ IEEE80211_BANDID_60G = 5, /* 60 GHz */
+};
+
+/* Status codes */
+enum ieee80211_statuscode {
+ WLAN_STATUS_SUCCESS = 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE = 1,
+ WLAN_STATUS_CAPS_UNSUPPORTED = 10,
+ WLAN_STATUS_REASSOC_NO_ASSOC = 11,
+ WLAN_STATUS_ASSOC_DENIED_UNSPEC = 12,
+ WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG = 13,
+ WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION = 14,
+ WLAN_STATUS_CHALLENGE_FAIL = 15,
+ WLAN_STATUS_AUTH_TIMEOUT = 16,
+ WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA = 17,
+ WLAN_STATUS_ASSOC_DENIED_RATES = 18,
+ /* 802.11b */
+ WLAN_STATUS_ASSOC_DENIED_NOSHORTPREAMBLE = 19,
+ WLAN_STATUS_ASSOC_DENIED_NOPBCC = 20,
+ WLAN_STATUS_ASSOC_DENIED_NOAGILITY = 21,
+ /* 802.11h */
+ WLAN_STATUS_ASSOC_DENIED_NOSPECTRUM = 22,
+ WLAN_STATUS_ASSOC_REJECTED_BAD_POWER = 23,
+ WLAN_STATUS_ASSOC_REJECTED_BAD_SUPP_CHAN = 24,
+ /* 802.11g */
+ WLAN_STATUS_ASSOC_DENIED_NOSHORTTIME = 25,
+ WLAN_STATUS_ASSOC_DENIED_NODSSSOFDM = 26,
+ /* 802.11w */
+ WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY = 30,
+ WLAN_STATUS_ROBUST_MGMT_FRAME_POLICY_VIOLATION = 31,
+ /* 802.11i */
+ WLAN_STATUS_INVALID_IE = 40,
+ WLAN_STATUS_INVALID_GROUP_CIPHER = 41,
+ WLAN_STATUS_INVALID_PAIRWISE_CIPHER = 42,
+ WLAN_STATUS_INVALID_AKMP = 43,
+ WLAN_STATUS_UNSUPP_RSN_VERSION = 44,
+ WLAN_STATUS_INVALID_RSN_IE_CAP = 45,
+ WLAN_STATUS_CIPHER_SUITE_REJECTED = 46,
+ /* 802.11e */
+ WLAN_STATUS_UNSPECIFIED_QOS = 32,
+ WLAN_STATUS_ASSOC_DENIED_NOBANDWIDTH = 33,
+ WLAN_STATUS_ASSOC_DENIED_LOWACK = 34,
+ WLAN_STATUS_ASSOC_DENIED_UNSUPP_QOS = 35,
+ WLAN_STATUS_REQUEST_DECLINED = 37,
+ WLAN_STATUS_INVALID_QOS_PARAM = 38,
+ WLAN_STATUS_CHANGE_TSPEC = 39,
+ WLAN_STATUS_WAIT_TS_DELAY = 47,
+ WLAN_STATUS_NO_DIRECT_LINK = 48,
+ WLAN_STATUS_STA_NOT_PRESENT = 49,
+ WLAN_STATUS_STA_NOT_QSTA = 50,
+ /* 802.11s */
+ WLAN_STATUS_ANTI_CLOG_REQUIRED = 76,
+ WLAN_STATUS_FCG_NOT_SUPP = 78,
+ WLAN_STATUS_STA_NO_TBTT = 78,
+ /* 802.11ad */
+ WLAN_STATUS_REJECTED_WITH_SUGGESTED_CHANGES = 39,
+ WLAN_STATUS_REJECTED_FOR_DELAY_PERIOD = 47,
+ WLAN_STATUS_REJECT_WITH_SCHEDULE = 83,
+ WLAN_STATUS_PENDING_ADMITTING_FST_SESSION = 86,
+ WLAN_STATUS_PERFORMING_FST_NOW = 87,
+ WLAN_STATUS_PENDING_GAP_IN_BA_WINDOW = 88,
+ WLAN_STATUS_REJECT_U_PID_SETTING = 89,
+ WLAN_STATUS_REJECT_DSE_BAND = 96,
+ WLAN_STATUS_DENIED_WITH_SUGGESTED_BAND_AND_CHANNEL = 99,
+ WLAN_STATUS_DENIED_DUE_TO_SPECTRUM_MANAGEMENT = 103,
+};
+
+
+/* Reason codes */
+enum ieee80211_reasoncode {
+ WLAN_REASON_UNSPECIFIED = 1,
+ WLAN_REASON_PREV_AUTH_NOT_VALID = 2,
+ WLAN_REASON_DEAUTH_LEAVING = 3,
+ WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY = 4,
+ WLAN_REASON_DISASSOC_AP_BUSY = 5,
+ WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA = 6,
+ WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA = 7,
+ WLAN_REASON_DISASSOC_STA_HAS_LEFT = 8,
+ WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH = 9,
+ /* 802.11h */
+ WLAN_REASON_DISASSOC_BAD_POWER = 10,
+ WLAN_REASON_DISASSOC_BAD_SUPP_CHAN = 11,
+ /* 802.11i */
+ WLAN_REASON_INVALID_IE = 13,
+ WLAN_REASON_MIC_FAILURE = 14,
+ WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT = 15,
+ WLAN_REASON_GROUP_KEY_HANDSHAKE_TIMEOUT = 16,
+ WLAN_REASON_IE_DIFFERENT = 17,
+ WLAN_REASON_INVALID_GROUP_CIPHER = 18,
+ WLAN_REASON_INVALID_PAIRWISE_CIPHER = 19,
+ WLAN_REASON_INVALID_AKMP = 20,
+ WLAN_REASON_UNSUPP_RSN_VERSION = 21,
+ WLAN_REASON_INVALID_RSN_IE_CAP = 22,
+ WLAN_REASON_IEEE8021X_FAILED = 23,
+ WLAN_REASON_CIPHER_SUITE_REJECTED = 24,
+ /* TDLS (802.11z) */
+ WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE = 25,
+ WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED = 26,
+ /* 802.11e */
+ WLAN_REASON_DISASSOC_UNSPECIFIED_QOS = 32,
+ WLAN_REASON_DISASSOC_QAP_NO_BANDWIDTH = 33,
+ WLAN_REASON_DISASSOC_LOW_ACK = 34,
+ WLAN_REASON_DISASSOC_QAP_EXCEED_TXOP = 35,
+ WLAN_REASON_QSTA_LEAVE_QBSS = 36,
+ WLAN_REASON_QSTA_NOT_USE = 37,
+ WLAN_REASON_QSTA_REQUIRE_SETUP = 38,
+ WLAN_REASON_QSTA_TIMEOUT = 39,
+ WLAN_REASON_QSTA_CIPHER_NOT_SUPP = 45,
+ /* 802.11s */
+ WLAN_REASON_MESH_PEER_CANCELED = 52,
+ WLAN_REASON_MESH_MAX_PEERS = 53,
+ WLAN_REASON_MESH_CONFIG = 54,
+ WLAN_REASON_MESH_CLOSE = 55,
+ WLAN_REASON_MESH_MAX_RETRIES = 56,
+ WLAN_REASON_MESH_CONFIRM_TIMEOUT = 57,
+ WLAN_REASON_MESH_INVALID_GTK = 58,
+ WLAN_REASON_MESH_INCONSISTENT_PARAM = 59,
+ WLAN_REASON_MESH_INVALID_SECURITY = 60,
+ WLAN_REASON_MESH_PATH_ERROR = 61,
+ WLAN_REASON_MESH_PATH_NOFORWARD = 62,
+ WLAN_REASON_MESH_PATH_DEST_UNREACHABLE = 63,
+ WLAN_REASON_MAC_EXISTS_IN_MBSS = 64,
+ WLAN_REASON_MESH_CHAN_REGULATORY = 65,
+ WLAN_REASON_MESH_CHAN = 66,
+};
+
+
+/* Information Element IDs */
+enum ieee80211_eid {
+ WLAN_EID_SSID = 0,
+ WLAN_EID_SUPP_RATES = 1,
+ WLAN_EID_FH_PARAMS = 2, /* reserved now */
+ WLAN_EID_DS_PARAMS = 3,
+ WLAN_EID_CF_PARAMS = 4,
+ WLAN_EID_TIM = 5,
+ WLAN_EID_IBSS_PARAMS = 6,
+ WLAN_EID_COUNTRY = 7,
+ WLAN_EID_HP_PARAMS = 8,
+ WLAN_EID_HP_TABLE = 9,
+ WLAN_EID_REQUEST = 10,
+ WLAN_EID_QBSS_LOAD = 11,
+ WLAN_EID_EDCA_PARAM_SET = 12,
+ WLAN_EID_TSPEC = 13,
+ WLAN_EID_TCLAS = 14,
+ WLAN_EID_SCHEDULE = 15,
+ WLAN_EID_CHALLENGE = 16,
+ /* 17-31 reserved for challenge text extension */
+ WLAN_EID_PWR_CONSTRAINT = 32,
+ WLAN_EID_PWR_CAPABILITY = 33,
+ WLAN_EID_TPC_REQUEST = 34,
+ WLAN_EID_TPC_REPORT = 35,
+ WLAN_EID_SUPPORTED_CHANNELS = 36,
+ WLAN_EID_CHANNEL_SWITCH = 37,
+ WLAN_EID_MEASURE_REQUEST = 38,
+ WLAN_EID_MEASURE_REPORT = 39,
+ WLAN_EID_QUIET = 40,
+ WLAN_EID_IBSS_DFS = 41,
+ WLAN_EID_ERP_INFO = 42,
+ WLAN_EID_TS_DELAY = 43,
+ WLAN_EID_TCLAS_PROCESSING = 44,
+ WLAN_EID_HT_CAPABILITY = 45,
+ WLAN_EID_QOS_CAPA = 46,
+ /* 47 reserved for Broadcom */
+ WLAN_EID_RSN = 48,
+ WLAN_EID_802_15_COEX = 49,
+ WLAN_EID_EXT_SUPP_RATES = 50,
+ WLAN_EID_AP_CHAN_REPORT = 51,
+ WLAN_EID_NEIGHBOR_REPORT = 52,
+ WLAN_EID_RCPI = 53,
+ WLAN_EID_MOBILITY_DOMAIN = 54,
+ WLAN_EID_FAST_BSS_TRANSITION = 55,
+ WLAN_EID_TIMEOUT_INTERVAL = 56,
+ WLAN_EID_RIC_DATA = 57,
+ WLAN_EID_DSE_REGISTERED_LOCATION = 58,
+ WLAN_EID_SUPPORTED_REGULATORY_CLASSES = 59,
+ WLAN_EID_EXT_CHANSWITCH_ANN = 60,
+ WLAN_EID_HT_OPERATION = 61,
+ WLAN_EID_SECONDARY_CHANNEL_OFFSET = 62,
+ WLAN_EID_BSS_AVG_ACCESS_DELAY = 63,
+ WLAN_EID_ANTENNA_INFO = 64,
+ WLAN_EID_RSNI = 65,
+ WLAN_EID_MEASUREMENT_PILOT_TX_INFO = 66,
+ WLAN_EID_BSS_AVAILABLE_CAPACITY = 67,
+ WLAN_EID_BSS_AC_ACCESS_DELAY = 68,
+ WLAN_EID_TIME_ADVERTISEMENT = 69,
+ WLAN_EID_RRM_ENABLED_CAPABILITIES = 70,
+ WLAN_EID_MULTIPLE_BSSID = 71,
+ WLAN_EID_BSS_COEX_2040 = 72,
+ WLAN_EID_BSS_INTOLERANT_CHL_REPORT = 73,
+ WLAN_EID_OVERLAP_BSS_SCAN_PARAM = 74,
+ WLAN_EID_RIC_DESCRIPTOR = 75,
+ WLAN_EID_MMIE = 76,
+ WLAN_EID_ASSOC_COMEBACK_TIME = 77,
+ WLAN_EID_EVENT_REQUEST = 78,
+ WLAN_EID_EVENT_REPORT = 79,
+ WLAN_EID_DIAGNOSTIC_REQUEST = 80,
+ WLAN_EID_DIAGNOSTIC_REPORT = 81,
+ WLAN_EID_LOCATION_PARAMS = 82,
+ WLAN_EID_NON_TX_BSSID_CAP = 83,
+ WLAN_EID_SSID_LIST = 84,
+ WLAN_EID_MULTI_BSSID_IDX = 85,
+ WLAN_EID_FMS_DESCRIPTOR = 86,
+ WLAN_EID_FMS_REQUEST = 87,
+ WLAN_EID_FMS_RESPONSE = 88,
+ WLAN_EID_QOS_TRAFFIC_CAPA = 89,
+ WLAN_EID_BSS_MAX_IDLE_PERIOD = 90,
+ WLAN_EID_TSF_REQUEST = 91,
+ WLAN_EID_TSF_RESPOSNE = 92,
+ WLAN_EID_WNM_SLEEP_MODE = 93,
+ WLAN_EID_TIM_BCAST_REQ = 94,
+ WLAN_EID_TIM_BCAST_RESP = 95,
+ WLAN_EID_COLL_IF_REPORT = 96,
+ WLAN_EID_CHANNEL_USAGE = 97,
+ WLAN_EID_TIME_ZONE = 98,
+ WLAN_EID_DMS_REQUEST = 99,
+ WLAN_EID_DMS_RESPONSE = 100,
+ WLAN_EID_LINK_ID = 101,
+ WLAN_EID_WAKEUP_SCHEDUL = 102,
+ /* 103 reserved */
+ WLAN_EID_CHAN_SWITCH_TIMING = 104,
+ WLAN_EID_PTI_CONTROL = 105,
+ WLAN_EID_PU_BUFFER_STATUS = 106,
+ WLAN_EID_INTERWORKING = 107,
+ WLAN_EID_ADVERTISEMENT_PROTOCOL = 108,
+ WLAN_EID_EXPEDITED_BW_REQ = 109,
+ WLAN_EID_QOS_MAP_SET = 110,
+ WLAN_EID_ROAMING_CONSORTIUM = 111,
+ WLAN_EID_EMERGENCY_ALERT = 112,
+ WLAN_EID_MESH_CONFIG = 113,
+ WLAN_EID_MESH_ID = 114,
+ WLAN_EID_LINK_METRIC_REPORT = 115,
+ WLAN_EID_CONGESTION_NOTIFICATION = 116,
+ WLAN_EID_PEER_MGMT = 117,
+ WLAN_EID_CHAN_SWITCH_PARAM = 118,
+ WLAN_EID_MESH_AWAKE_WINDOW = 119,
+ WLAN_EID_BEACON_TIMING = 120,
+ WLAN_EID_MCCAOP_SETUP_REQ = 121,
+ WLAN_EID_MCCAOP_SETUP_RESP = 122,
+ WLAN_EID_MCCAOP_ADVERT = 123,
+ WLAN_EID_MCCAOP_TEARDOWN = 124,
+ WLAN_EID_GANN = 125,
+ WLAN_EID_RANN = 126,
+ WLAN_EID_EXT_CAPABILITY = 127,
+ /* 128, 129 reserved for Agere */
+ WLAN_EID_PREQ = 130,
+ WLAN_EID_PREP = 131,
+ WLAN_EID_PERR = 132,
+ /* 133-136 reserved for Cisco */
+ WLAN_EID_PXU = 137,
+ WLAN_EID_PXUC = 138,
+ WLAN_EID_AUTH_MESH_PEER_EXCH = 139,
+ WLAN_EID_MIC = 140,
+ WLAN_EID_DESTINATION_URI = 141,
+ WLAN_EID_UAPSD_COEX = 142,
+ WLAN_EID_WAKEUP_SCHEDULE = 143,
+ WLAN_EID_EXT_SCHEDULE = 144,
+ WLAN_EID_STA_AVAILABILITY = 145,
+ WLAN_EID_DMG_TSPEC = 146,
+ WLAN_EID_DMG_AT = 147,
+ WLAN_EID_DMG_CAP = 148,
+ /* 149 reserved for Cisco */
+ WLAN_EID_CISCO_VENDOR_SPECIFIC = 150,
+ WLAN_EID_DMG_OPERATION = 151,
+ WLAN_EID_DMG_BSS_PARAM_CHANGE = 152,
+ WLAN_EID_DMG_BEAM_REFINEMENT = 153,
+ WLAN_EID_CHANNEL_MEASURE_FEEDBACK = 154,
+ /* 155-156 reserved for Cisco */
+ WLAN_EID_AWAKE_WINDOW = 157,
+ WLAN_EID_MULTI_BAND = 158,
+ WLAN_EID_ADDBA_EXT = 159,
+ WLAN_EID_NEXT_PCP_LIST = 160,
+ WLAN_EID_PCP_HANDOVER = 161,
+ WLAN_EID_DMG_LINK_MARGIN = 162,
+ WLAN_EID_SWITCHING_STREAM = 163,
+ WLAN_EID_SESSION_TRANSITION = 164,
+ WLAN_EID_DYN_TONE_PAIRING_REPORT = 165,
+ WLAN_EID_CLUSTER_REPORT = 166,
+ WLAN_EID_RELAY_CAP = 167,
+ WLAN_EID_RELAY_XFER_PARAM_SET = 168,
+ WLAN_EID_BEAM_LINK_MAINT = 169,
+ WLAN_EID_MULTIPLE_MAC_ADDR = 170,
+ WLAN_EID_U_PID = 171,
+ WLAN_EID_DMG_LINK_ADAPT_ACK = 172,
+ /* 173 reserved for Symbol */
+ WLAN_EID_MCCAOP_ADV_OVERVIEW = 174,
+ WLAN_EID_QUIET_PERIOD_REQ = 175,
+ /* 176 reserved for Symbol */
+ WLAN_EID_QUIET_PERIOD_RESP = 177,
+ /* 178-179 reserved for Symbol */
+ /* 180 reserved for ISO/IEC 20011 */
+ WLAN_EID_EPAC_POLICY = 182,
+ WLAN_EID_CLISTER_TIME_OFF = 183,
+ WLAN_EID_INTER_AC_PRIO = 184,
+ WLAN_EID_SCS_DESCRIPTOR = 185,
+ WLAN_EID_QLOAD_REPORT = 186,
+ WLAN_EID_HCCA_TXOP_UPDATE_COUNT = 187,
+ WLAN_EID_HL_STREAM_ID = 188,
+ WLAN_EID_GCR_GROUP_ADDR = 189,
+ WLAN_EID_ANTENNA_SECTOR_ID_PATTERN = 190,
+ WLAN_EID_VHT_CAPABILITY = 191,
+ WLAN_EID_VHT_OPERATION = 192,
+ WLAN_EID_EXTENDED_BSS_LOAD = 193,
+ WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194,
+ WLAN_EID_VHT_TX_POWER_ENVELOPE = 195,
+ WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196,
+ WLAN_EID_AID = 197,
+ WLAN_EID_QUIET_CHANNEL = 198,
+ WLAN_EID_OPMODE_NOTIF = 199,
+
+ WLAN_EID_VENDOR_SPECIFIC = 221,
+ WLAN_EID_QOS_PARAMETER = 222,
+};
+
+/* Action category code */
+enum ieee80211_category {
+ WLAN_CATEGORY_SPECTRUM_MGMT = 0,
+ WLAN_CATEGORY_QOS = 1,
+ WLAN_CATEGORY_DLS = 2,
+ WLAN_CATEGORY_BACK = 3,
+ WLAN_CATEGORY_PUBLIC = 4,
+ WLAN_CATEGORY_RADIO_MEASUREMENT = 5,
+ WLAN_CATEGORY_HT = 7,
+ WLAN_CATEGORY_SA_QUERY = 8,
+ WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
+ WLAN_CATEGORY_TDLS = 12,
+ WLAN_CATEGORY_MESH_ACTION = 13,
+ WLAN_CATEGORY_MULTIHOP_ACTION = 14,
+ WLAN_CATEGORY_SELF_PROTECTED = 15,
+ WLAN_CATEGORY_DMG = 16,
+ WLAN_CATEGORY_WMM = 17,
+ WLAN_CATEGORY_FST = 18,
+ WLAN_CATEGORY_UNPROT_DMG = 20,
+ WLAN_CATEGORY_VHT = 21,
+ WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126,
+ WLAN_CATEGORY_VENDOR_SPECIFIC = 127,
+};
+
+/* SPECTRUM_MGMT action code */
+enum ieee80211_spectrum_mgmt_actioncode {
+ WLAN_ACTION_SPCT_MSR_REQ = 0,
+ WLAN_ACTION_SPCT_MSR_RPRT = 1,
+ WLAN_ACTION_SPCT_TPC_REQ = 2,
+ WLAN_ACTION_SPCT_TPC_RPRT = 3,
+ WLAN_ACTION_SPCT_CHL_SWITCH = 4,
+};
+
+/* HT action codes */
+enum ieee80211_ht_actioncode {
+ WLAN_HT_ACTION_NOTIFY_CHANWIDTH = 0,
+ WLAN_HT_ACTION_SMPS = 1,
+ WLAN_HT_ACTION_PSMP = 2,
+ WLAN_HT_ACTION_PCO_PHASE = 3,
+ WLAN_HT_ACTION_CSI = 4,
+ WLAN_HT_ACTION_NONCOMPRESSED_BF = 5,
+ WLAN_HT_ACTION_COMPRESSED_BF = 6,
+ WLAN_HT_ACTION_ASEL_IDX_FEEDBACK = 7,
+};
+
+/* VHT action codes */
+enum ieee80211_vht_actioncode {
+ WLAN_VHT_ACTION_COMPRESSED_BF = 0,
+ WLAN_VHT_ACTION_GROUPID_MGMT = 1,
+ WLAN_VHT_ACTION_OPMODE_NOTIF = 2,
+};
+
+/* Self Protected Action codes */
+enum ieee80211_self_protected_actioncode {
+ WLAN_SP_RESERVED = 0,
+ WLAN_SP_MESH_PEERING_OPEN = 1,
+ WLAN_SP_MESH_PEERING_CONFIRM = 2,
+ WLAN_SP_MESH_PEERING_CLOSE = 3,
+ WLAN_SP_MGK_INFORM = 4,
+ WLAN_SP_MGK_ACK = 5,
+};
+
+/* Mesh action codes */
+enum ieee80211_mesh_actioncode {
+ WLAN_MESH_ACTION_LINK_METRIC_REPORT,
+ WLAN_MESH_ACTION_HWMP_PATH_SELECTION,
+ WLAN_MESH_ACTION_GATE_ANNOUNCEMENT,
+ WLAN_MESH_ACTION_CONGESTION_CONTROL_NOTIFICATION,
+ WLAN_MESH_ACTION_MCCA_SETUP_REQUEST,
+ WLAN_MESH_ACTION_MCCA_SETUP_REPLY,
+ WLAN_MESH_ACTION_MCCA_ADVERTISEMENT_REQUEST,
+ WLAN_MESH_ACTION_MCCA_ADVERTISEMENT,
+ WLAN_MESH_ACTION_MCCA_TEARDOWN,
+ WLAN_MESH_ACTION_TBTT_ADJUSTMENT_REQUEST,
+ WLAN_MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE,
+};
+
+/* Security key length */
+enum ieee80211_key_len {
+ WLAN_KEY_LEN_WEP40 = 5,
+ WLAN_KEY_LEN_WEP104 = 13,
+ WLAN_KEY_LEN_CCMP = 16,
+ WLAN_KEY_LEN_CCMP_256 = 32,
+ WLAN_KEY_LEN_TKIP = 32,
+ WLAN_KEY_LEN_AES_CMAC = 16,
+ WLAN_KEY_LEN_SMS4 = 32,
+ WLAN_KEY_LEN_GCMP = 16,
+ WLAN_KEY_LEN_GCMP_256 = 32,
+ WLAN_KEY_LEN_BIP_CMAC_256 = 32,
+ WLAN_KEY_LEN_BIP_GMAC_128 = 16,
+ WLAN_KEY_LEN_BIP_GMAC_256 = 32,
+};
+
+#define IEEE80211_WEP_IV_LEN 4
+#define IEEE80211_WEP_ICV_LEN 4
+#define IEEE80211_CCMP_HDR_LEN 8
+#define IEEE80211_CCMP_MIC_LEN 8
+#define IEEE80211_CCMP_PN_LEN 6
+#define IEEE80211_CCMP_256_HDR_LEN 8
+#define IEEE80211_CCMP_256_MIC_LEN 16
+#define IEEE80211_CCMP_256_PN_LEN 6
+#define IEEE80211_TKIP_IV_LEN 8
+#define IEEE80211_TKIP_ICV_LEN 4
+#define IEEE80211_CMAC_PN_LEN 6
+#define IEEE80211_GMAC_PN_LEN 6
+#define IEEE80211_GCMP_HDR_LEN 8
+#define IEEE80211_GCMP_MIC_LEN 16
+#define IEEE80211_GCMP_PN_LEN 6
+
+/* Public action codes */
+enum ieee80211_pub_actioncode {
+ WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4,
+ WLAN_PUB_ACTION_TDLS_DISCOVER_RES = 14,
+};
+
+/* TDLS action codes */
+enum ieee80211_tdls_actioncode {
+ WLAN_TDLS_SETUP_REQUEST = 0,
+ WLAN_TDLS_SETUP_RESPONSE = 1,
+ WLAN_TDLS_SETUP_CONFIRM = 2,
+ WLAN_TDLS_TEARDOWN = 3,
+ WLAN_TDLS_PEER_TRAFFIC_INDICATION = 4,
+ WLAN_TDLS_CHANNEL_SWITCH_REQUEST = 5,
+ WLAN_TDLS_CHANNEL_SWITCH_RESPONSE = 6,
+ WLAN_TDLS_PEER_PSM_REQUEST = 7,
+ WLAN_TDLS_PEER_PSM_RESPONSE = 8,
+ WLAN_TDLS_PEER_TRAFFIC_RESPONSE = 9,
+ WLAN_TDLS_DISCOVERY_REQUEST = 10,
+};
+
+/* Extended Channel Switching capability to be set in the 1st byte of
+ * the @WLAN_EID_EXT_CAPABILITY information element
+ */
+#define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2)
+
+/* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */
+#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4)
+#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5)
+#define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6)
+
+/* Interworking capabilities are set in 7th bit of 4th byte of the
+ * @WLAN_EID_EXT_CAPABILITY information element
+ */
+#define WLAN_EXT_CAPA4_INTERWORKING_ENABLED BIT(7)
+
+/*
+ * TDLS capabililites to be enabled in the 5th byte of the
+ * @WLAN_EID_EXT_CAPABILITY information element
+ */
+#define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5)
+#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6)
+#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7)
+
+#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6)
+#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7)
+
+/* TDLS specific payload type in the LLC/SNAP header */
+#define WLAN_TDLS_SNAP_RFTYPE 0x2
+
+/* BSS Coex IE information field bits */
+#define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0)
+
+/**
+ * enum - mesh synchronization method identifier
+ *
+ * @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method
+ * @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method
+ * that will be specified in a vendor specific information element
+ */
+enum {
+ IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1,
+ IEEE80211_SYNC_METHOD_VENDOR = 255,
+};
+
+/**
+ * enum - mesh path selection protocol identifier
+ *
+ * @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol
+ * @IEEE80211_PATH_PROTOCOL_VENDOR: a vendor specific protocol that will
+ * be specified in a vendor specific information element
+ */
+enum {
+ IEEE80211_PATH_PROTOCOL_HWMP = 1,
+ IEEE80211_PATH_PROTOCOL_VENDOR = 255,
+};
+
+/**
+ * enum - mesh path selection metric identifier
+ *
+ * @IEEE80211_PATH_METRIC_AIRTIME: the default path selection metric
+ * @IEEE80211_PATH_METRIC_VENDOR: a vendor specific metric that will be
+ * specified in a vendor specific information element
+ */
+enum {
+ IEEE80211_PATH_METRIC_AIRTIME = 1,
+ IEEE80211_PATH_METRIC_VENDOR = 255,
+};
+
+/**
+ * enum ieee80211_root_mode_identifier - root mesh STA mode identifier
+ *
+ * These attribute are used by dot11MeshHWMPRootMode to set root mesh STA mode
+ *
+ * @IEEE80211_ROOTMODE_NO_ROOT: the mesh STA is not a root mesh STA (default)
+ * @IEEE80211_ROOTMODE_ROOT: the mesh STA is a root mesh STA if greater than
+ * this value
+ * @IEEE80211_PROACTIVE_PREQ_NO_PREP: the mesh STA is a root mesh STA supports
+ * the proactive PREQ with proactive PREP subfield set to 0
+ * @IEEE80211_PROACTIVE_PREQ_WITH_PREP: the mesh STA is a root mesh STA
+ * supports the proactive PREQ with proactive PREP subfield set to 1
+ * @IEEE80211_PROACTIVE_RANN: the mesh STA is a root mesh STA supports
+ * the proactive RANN
+ */
+enum ieee80211_root_mode_identifier {
+ IEEE80211_ROOTMODE_NO_ROOT = 0,
+ IEEE80211_ROOTMODE_ROOT = 1,
+ IEEE80211_PROACTIVE_PREQ_NO_PREP = 2,
+ IEEE80211_PROACTIVE_PREQ_WITH_PREP = 3,
+ IEEE80211_PROACTIVE_RANN = 4,
+};
+
+/*
+ * IEEE 802.11-2007 7.3.2.9 Country information element
+ *
+ * Minimum length is 8 octets, ie len must be evenly
+ * divisible by 2
+ */
+
+/* Although the spec says 8 I'm seeing 6 in practice */
+#define IEEE80211_COUNTRY_IE_MIN_LEN 6
+
+/* The Country String field of the element shall be 3 octets in length */
+#define IEEE80211_COUNTRY_STRING_LEN 3
+
+/*
+ * For regulatory extension stuff see IEEE 802.11-2007
+ * Annex I (page 1141) and Annex J (page 1147). Also
+ * review 7.3.2.9.
+ *
+ * When dot11RegulatoryClassesRequired is true and the
+ * first_channel/reg_extension_id is >= 201 then the IE
+ * compromises of the 'ext' struct represented below:
+ *
+ * - Regulatory extension ID - when generating IE this just needs
+ * to be monotonically increasing for each triplet passed in
+ * the IE
+ * - Regulatory class - index into set of rules
+ * - Coverage class - index into air propagation time (Table 7-27),
+ * in microseconds, you can compute the air propagation time from
+ * the index by multiplying by 3, so index 10 yields a propagation
+ * of 10 us. Valid values are 0-31, values 32-255 are not defined
+ * yet. A value of 0 inicates air propagation of <= 1 us.
+ *
+ * See also Table I.2 for Emission limit sets and table
+ * I.3 for Behavior limit sets. Table J.1 indicates how to map
+ * a reg_class to an emission limit set and behavior limit set.
+ */
+#define IEEE80211_COUNTRY_EXTENSION_ID 201
+
+/*
+ * Channels numbers in the IE must be monotonically increasing
+ * if dot11RegulatoryClassesRequired is not true.
+ *
+ * If dot11RegulatoryClassesRequired is true consecutive
+ * subband triplets following a regulatory triplet shall
+ * have monotonically increasing first_channel number fields.
+ *
+ * Channel numbers shall not overlap.
+ *
+ * Note that max_power is signed.
+ */
+struct ieee80211_country_ie_triplet {
+ union {
+ struct {
+ u8 first_channel;
+ u8 num_channels;
+ s8 max_power;
+ } __packed chans;
+ struct {
+ u8 reg_extension_id;
+ u8 reg_class;
+ u8 coverage_class;
+ } __packed ext;
+ };
+} __packed;
+
+enum ieee80211_timeout_interval_type {
+ WLAN_TIMEOUT_REASSOC_DEADLINE = 1 /* 802.11r */,
+ WLAN_TIMEOUT_KEY_LIFETIME = 2 /* 802.11r */,
+ WLAN_TIMEOUT_ASSOC_COMEBACK = 3 /* 802.11w */,
+};
+
+/**
+ * struct ieee80211_timeout_interval_ie - Timeout Interval element
+ * @type: type, see &enum ieee80211_timeout_interval_type
+ * @value: timeout interval value
+ */
+struct ieee80211_timeout_interval_ie {
+ u8 type;
+ __le32 value;
+} __packed;
+
+/* BACK action code */
+enum ieee80211_back_actioncode {
+ WLAN_ACTION_ADDBA_REQ = 0,
+ WLAN_ACTION_ADDBA_RESP = 1,
+ WLAN_ACTION_DELBA = 2,
+};
+
+/* BACK (block-ack) parties */
+enum ieee80211_back_parties {
+ WLAN_BACK_RECIPIENT = 0,
+ WLAN_BACK_INITIATOR = 1,
+};
+
+/* SA Query action */
+enum ieee80211_sa_query_action {
+ WLAN_ACTION_SA_QUERY_REQUEST = 0,
+ WLAN_ACTION_SA_QUERY_RESPONSE = 1,
+};
+
+
+/* cipher suite selectors */
+#define WLAN_CIPHER_SUITE_USE_GROUP 0x000FAC00
+#define WLAN_CIPHER_SUITE_WEP40 0x000FAC01
+#define WLAN_CIPHER_SUITE_TKIP 0x000FAC02
+/* reserved: 0x000FAC03 */
+#define WLAN_CIPHER_SUITE_CCMP 0x000FAC04
+#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05
+#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06
+#define WLAN_CIPHER_SUITE_GCMP 0x000FAC08
+#define WLAN_CIPHER_SUITE_GCMP_256 0x000FAC09
+#define WLAN_CIPHER_SUITE_CCMP_256 0x000FAC0A
+#define WLAN_CIPHER_SUITE_BIP_GMAC_128 0x000FAC0B
+#define WLAN_CIPHER_SUITE_BIP_GMAC_256 0x000FAC0C
+#define WLAN_CIPHER_SUITE_BIP_CMAC_256 0x000FAC0D
+
+#define WLAN_CIPHER_SUITE_SMS4 0x00147201
+
+/* AKM suite selectors */
+#define WLAN_AKM_SUITE_8021X 0x000FAC01
+#define WLAN_AKM_SUITE_PSK 0x000FAC02
+#define WLAN_AKM_SUITE_8021X_SHA256 0x000FAC05
+#define WLAN_AKM_SUITE_PSK_SHA256 0x000FAC06
+#define WLAN_AKM_SUITE_TDLS 0x000FAC07
+#define WLAN_AKM_SUITE_SAE 0x000FAC08
+#define WLAN_AKM_SUITE_FT_OVER_SAE 0x000FAC09
+
+#define WLAN_MAX_KEY_LEN 32
+
+#define WLAN_PMKID_LEN 16
+
+#define WLAN_OUI_WFA 0x506f9a
+#define WLAN_OUI_TYPE_WFA_P2P 9
+#define WLAN_OUI_MICROSOFT 0x0050f2
+#define WLAN_OUI_TYPE_MICROSOFT_WPA 1
+#define WLAN_OUI_TYPE_MICROSOFT_WMM 2
+#define WLAN_OUI_TYPE_MICROSOFT_WPS 4
+
+/*
+ * WMM/802.11e Tspec Element
+ */
+#define IEEE80211_WMM_IE_TSPEC_TID_MASK 0x0F
+#define IEEE80211_WMM_IE_TSPEC_TID_SHIFT 1
+
+enum ieee80211_tspec_status_code {
+ IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED = 0,
+ IEEE80211_TSPEC_STATUS_ADDTS_INVAL_PARAMS = 0x1,
+};
+
+struct ieee80211_tspec_ie {
+ u8 element_id;
+ u8 len;
+ u8 oui[3];
+ u8 oui_type;
+ u8 oui_subtype;
+ u8 version;
+ __le16 tsinfo;
+ u8 tsinfo_resvd;
+ __le16 nominal_msdu;
+ __le16 max_msdu;
+ __le32 min_service_int;
+ __le32 max_service_int;
+ __le32 inactivity_int;
+ __le32 suspension_int;
+ __le32 service_start_time;
+ __le32 min_data_rate;
+ __le32 mean_data_rate;
+ __le32 peak_data_rate;
+ __le32 max_burst_size;
+ __le32 delay_bound;
+ __le32 min_phy_rate;
+ __le16 sba;
+ __le16 medium_time;
+} __packed;
+
+/**
+ * ieee80211_get_qos_ctl - get pointer to qos control bytes
+ * @hdr: the frame
+ *
+ * The qos ctrl bytes come after the frame_control, duration, seq_num
+ * and 3 or 4 addresses of length ETH_ALEN.
+ * 3 addr: 2 + 2 + 2 + 3*6 = 24
+ * 4 addr: 2 + 2 + 2 + 4*6 = 30
+ */
+static inline u8 *ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr)
+{
+ if (ieee80211_has_a4(hdr->frame_control))
+ return (u8 *)hdr + 30;
+ else
+ return (u8 *)hdr + 24;
+}
+
+/**
+ * ieee80211_get_SA - get pointer to SA
+ * @hdr: the frame
+ *
+ * Given an 802.11 frame, this function returns the offset
+ * to the source address (SA). It does not verify that the
+ * header is long enough to contain the address, and the
+ * header must be long enough to contain the frame control
+ * field.
+ */
+static inline u8 *ieee80211_get_SA(struct ieee80211_hdr *hdr)
+{
+ if (ieee80211_has_a4(hdr->frame_control))
+ return hdr->addr4;
+ if (ieee80211_has_fromds(hdr->frame_control))
+ return hdr->addr3;
+ return hdr->addr2;
+}
+
+/**
+ * ieee80211_get_DA - get pointer to DA
+ * @hdr: the frame
+ *
+ * Given an 802.11 frame, this function returns the offset
+ * to the destination address (DA). It does not verify that
+ * the header is long enough to contain the address, and the
+ * header must be long enough to contain the frame control
+ * field.
+ */
+static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr)
+{
+ if (ieee80211_has_tods(hdr->frame_control))
+ return hdr->addr3;
+ else
+ return hdr->addr1;
+}
+
+/**
+ * _ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame
+ * @hdr: the frame (buffer must include at least the first octet of payload)
+ */
+static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
+{
+ if (ieee80211_is_disassoc(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control))
+ return true;
+
+ if (ieee80211_is_action(hdr->frame_control)) {
+ u8 *category;
+
+ /*
+ * Action frames, excluding Public Action frames, are Robust
+ * Management Frames. However, if we are looking at a Protected
+ * frame, skip the check since the data may be encrypted and
+ * the frame has already been found to be a Robust Management
+ * Frame (by the other end).
+ */
+ if (ieee80211_has_protected(hdr->frame_control))
+ return true;
+ category = ((u8 *) hdr) + 24;
+ return *category != WLAN_CATEGORY_PUBLIC &&
+ *category != WLAN_CATEGORY_HT &&
+ *category != WLAN_CATEGORY_SELF_PROTECTED &&
+ *category != WLAN_CATEGORY_VENDOR_SPECIFIC;
+ }
+
+ return false;
+}
+
+/**
+ * ieee80211_is_robust_mgmt_frame - check if skb contains a robust mgmt frame
+ * @skb: the skb containing the frame, length will be checked
+ */
+static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb)
+{
+ if (skb->len < 25)
+ return false;
+ return _ieee80211_is_robust_mgmt_frame((void *)skb->data);
+}
+
+/**
+ * ieee80211_is_public_action - check if frame is a public action frame
+ * @hdr: the frame
+ * @len: length of the frame
+ */
+static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
+ size_t len)
+{
+ struct ieee80211_mgmt *mgmt = (void *)hdr;
+
+ if (len < IEEE80211_MIN_ACTION_SIZE)
+ return false;
+ if (!ieee80211_is_action(hdr->frame_control))
+ return false;
+ return mgmt->u.action.category == WLAN_CATEGORY_PUBLIC;
+}
+
+/**
+ * ieee80211_tu_to_usec - convert time units (TU) to microseconds
+ * @tu: the TUs
+ */
+static inline unsigned long ieee80211_tu_to_usec(unsigned long tu)
+{
+ return 1024 * tu;
+}
+
+/**
+ * ieee80211_check_tim - check if AID bit is set in TIM
+ * @tim: the TIM IE
+ * @tim_len: length of the TIM IE
+ * @aid: the AID to look for
+ */
+static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
+ u8 tim_len, u16 aid)
+{
+ u8 mask;
+ u8 index, indexn1, indexn2;
+
+ if (unlikely(!tim || tim_len < sizeof(*tim)))
+ return false;
+
+ aid &= 0x3fff;
+ index = aid / 8;
+ mask = 1 << (aid & 7);
+
+ indexn1 = tim->bitmap_ctrl & 0xfe;
+ indexn2 = tim_len + indexn1 - 4;
+
+ if (index < indexn1 || index > indexn2)
+ return false;
+
+ index -= indexn1;
+
+ return !!(tim->virtual_map[index] & mask);
+}
+
+/**
+ * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet)
+ * @skb: the skb containing the frame, length will not be checked
+ * @hdr_size: the size of the ieee80211_hdr that starts at skb->data
+ *
+ * This function assumes the frame is a data frame, and that the network header
+ * is in the correct place.
+ */
+static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size)
+{
+ if (!skb_is_nonlinear(skb) &&
+ skb->len > (skb_network_offset(skb) + 2)) {
+ /* Point to where the indication of TDLS should start */
+ const u8 *tdls_data = skb_network_header(skb) - 2;
+
+ if (get_unaligned_be16(tdls_data) == ETH_P_TDLS &&
+ tdls_data[2] == WLAN_TDLS_SNAP_RFTYPE &&
+ tdls_data[3] == WLAN_CATEGORY_TDLS)
+ return tdls_data[4];
+ }
+
+ return -1;
+}
+
+/* convert time units */
+#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
+#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
+
+/**
+ * ieee80211_action_contains_tpc - checks if the frame contains TPC element
+ * @skb: the skb containing the frame, length will be checked
+ *
+ * This function checks if it's either TPC report action frame or Link
+ * Measurement report action frame as defined in IEEE Std. 802.11-2012 8.5.2.5
+ * and 8.5.7.5 accordingly.
+ */
+static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+
+ if (!ieee80211_is_action(mgmt->frame_control))
+ return false;
+
+ if (skb->len < IEEE80211_MIN_ACTION_SIZE +
+ sizeof(mgmt->u.action.u.tpc_report))
+ return false;
+
+ /*
+ * TPC report - check that:
+ * category = 0 (Spectrum Management) or 5 (Radio Measurement)
+ * spectrum management action = 3 (TPC/Link Measurement report)
+ * TPC report EID = 35
+ * TPC report element length = 2
+ *
+ * The spectrum management's tpc_report struct is used here both for
+ * parsing tpc_report and radio measurement's link measurement report
+ * frame, since the relevant part is identical in both frames.
+ */
+ if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT &&
+ mgmt->u.action.category != WLAN_CATEGORY_RADIO_MEASUREMENT)
+ return false;
+
+ /* both spectrum mgmt and link measurement have same action code */
+ if (mgmt->u.action.u.tpc_report.action_code !=
+ WLAN_ACTION_SPCT_TPC_RPRT)
+ return false;
+
+ if (mgmt->u.action.u.tpc_report.tpc_elem_id != WLAN_EID_TPC_REPORT ||
+ mgmt->u.action.u.tpc_report.tpc_elem_length !=
+ sizeof(struct ieee80211_tpc_report_ie))
+ return false;
+
+ return true;
+}
+
+#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
new file mode 100644
index 000000000..8872ca103
--- /dev/null
+++ b/include/linux/ieee802154.h
@@ -0,0 +1,252 @@
+/*
+ * IEEE802.15.4-2003 specification
+ *
+ * Copyright (C) 2007, 2008 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Written by:
+ * Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Maxim Osipov <maxim.osipov@siemens.com>
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#ifndef LINUX_IEEE802154_H
+#define LINUX_IEEE802154_H
+
+#include <linux/types.h>
+#include <linux/random.h>
+#include <asm/byteorder.h>
+
+#define IEEE802154_MTU 127
+#define IEEE802154_ACK_PSDU_LEN 5
+#define IEEE802154_MIN_PSDU_LEN 9
+#define IEEE802154_FCS_LEN 2
+
+#define IEEE802154_PAN_ID_BROADCAST 0xffff
+#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff
+#define IEEE802154_ADDR_SHORT_UNSPEC 0xfffe
+
+#define IEEE802154_EXTENDED_ADDR_LEN 8
+
+#define IEEE802154_LIFS_PERIOD 40
+#define IEEE802154_SIFS_PERIOD 12
+#define IEEE802154_MAX_SIFS_FRAME_SIZE 18
+
+#define IEEE802154_MAX_CHANNEL 26
+#define IEEE802154_MAX_PAGE 31
+
+#define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */
+#define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */
+#define IEEE802154_FC_TYPE_ACK 0x2 /* Frame is acknowledgment */
+#define IEEE802154_FC_TYPE_MAC_CMD 0x3 /* Frame is MAC command */
+
+#define IEEE802154_FC_TYPE_SHIFT 0
+#define IEEE802154_FC_TYPE_MASK ((1 << 3) - 1)
+#define IEEE802154_FC_TYPE(x) ((x & IEEE802154_FC_TYPE_MASK) >> IEEE802154_FC_TYPE_SHIFT)
+#define IEEE802154_FC_SET_TYPE(v, x) do { \
+ v = (((v) & ~IEEE802154_FC_TYPE_MASK) | \
+ (((x) << IEEE802154_FC_TYPE_SHIFT) & IEEE802154_FC_TYPE_MASK)); \
+ } while (0)
+
+#define IEEE802154_FC_SECEN_SHIFT 3
+#define IEEE802154_FC_SECEN (1 << IEEE802154_FC_SECEN_SHIFT)
+#define IEEE802154_FC_FRPEND_SHIFT 4
+#define IEEE802154_FC_FRPEND (1 << IEEE802154_FC_FRPEND_SHIFT)
+#define IEEE802154_FC_ACK_REQ_SHIFT 5
+#define IEEE802154_FC_ACK_REQ (1 << IEEE802154_FC_ACK_REQ_SHIFT)
+#define IEEE802154_FC_INTRA_PAN_SHIFT 6
+#define IEEE802154_FC_INTRA_PAN (1 << IEEE802154_FC_INTRA_PAN_SHIFT)
+
+#define IEEE802154_FC_SAMODE_SHIFT 14
+#define IEEE802154_FC_SAMODE_MASK (3 << IEEE802154_FC_SAMODE_SHIFT)
+#define IEEE802154_FC_DAMODE_SHIFT 10
+#define IEEE802154_FC_DAMODE_MASK (3 << IEEE802154_FC_DAMODE_SHIFT)
+
+#define IEEE802154_FC_VERSION_SHIFT 12
+#define IEEE802154_FC_VERSION_MASK (3 << IEEE802154_FC_VERSION_SHIFT)
+#define IEEE802154_FC_VERSION(x) ((x & IEEE802154_FC_VERSION_MASK) >> IEEE802154_FC_VERSION_SHIFT)
+
+#define IEEE802154_FC_SAMODE(x) \
+ (((x) & IEEE802154_FC_SAMODE_MASK) >> IEEE802154_FC_SAMODE_SHIFT)
+
+#define IEEE802154_FC_DAMODE(x) \
+ (((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT)
+
+#define IEEE802154_SCF_SECLEVEL_MASK 7
+#define IEEE802154_SCF_SECLEVEL_SHIFT 0
+#define IEEE802154_SCF_SECLEVEL(x) (x & IEEE802154_SCF_SECLEVEL_MASK)
+#define IEEE802154_SCF_KEY_ID_MODE_SHIFT 3
+#define IEEE802154_SCF_KEY_ID_MODE_MASK (3 << IEEE802154_SCF_KEY_ID_MODE_SHIFT)
+#define IEEE802154_SCF_KEY_ID_MODE(x) \
+ ((x & IEEE802154_SCF_KEY_ID_MODE_MASK) >> IEEE802154_SCF_KEY_ID_MODE_SHIFT)
+
+#define IEEE802154_SCF_KEY_IMPLICIT 0
+#define IEEE802154_SCF_KEY_INDEX 1
+#define IEEE802154_SCF_KEY_SHORT_INDEX 2
+#define IEEE802154_SCF_KEY_HW_INDEX 3
+
+#define IEEE802154_SCF_SECLEVEL_NONE 0
+#define IEEE802154_SCF_SECLEVEL_MIC32 1
+#define IEEE802154_SCF_SECLEVEL_MIC64 2
+#define IEEE802154_SCF_SECLEVEL_MIC128 3
+#define IEEE802154_SCF_SECLEVEL_ENC 4
+#define IEEE802154_SCF_SECLEVEL_ENC_MIC32 5
+#define IEEE802154_SCF_SECLEVEL_ENC_MIC64 6
+#define IEEE802154_SCF_SECLEVEL_ENC_MIC128 7
+
+/* MAC footer size */
+#define IEEE802154_MFR_SIZE 2 /* 2 octets */
+
+/* MAC's Command Frames Identifiers */
+#define IEEE802154_CMD_ASSOCIATION_REQ 0x01
+#define IEEE802154_CMD_ASSOCIATION_RESP 0x02
+#define IEEE802154_CMD_DISASSOCIATION_NOTIFY 0x03
+#define IEEE802154_CMD_DATA_REQ 0x04
+#define IEEE802154_CMD_PANID_CONFLICT_NOTIFY 0x05
+#define IEEE802154_CMD_ORPHAN_NOTIFY 0x06
+#define IEEE802154_CMD_BEACON_REQ 0x07
+#define IEEE802154_CMD_COORD_REALIGN_NOTIFY 0x08
+#define IEEE802154_CMD_GTS_REQ 0x09
+
+/*
+ * The return values of MAC operations
+ */
+enum {
+ /*
+ * The requested operation was completed successfully.
+ * For a transmission request, this value indicates
+ * a successful transmission.
+ */
+ IEEE802154_SUCCESS = 0x0,
+
+ /* The beacon was lost following a synchronization request. */
+ IEEE802154_BEACON_LOSS = 0xe0,
+ /*
+ * A transmission could not take place due to activity on the
+ * channel, i.e., the CSMA-CA mechanism has failed.
+ */
+ IEEE802154_CHNL_ACCESS_FAIL = 0xe1,
+ /* The GTS request has been denied by the PAN coordinator. */
+ IEEE802154_DENINED = 0xe2,
+ /* The attempt to disable the transceiver has failed. */
+ IEEE802154_DISABLE_TRX_FAIL = 0xe3,
+ /*
+ * The received frame induces a failed security check according to
+ * the security suite.
+ */
+ IEEE802154_FAILED_SECURITY_CHECK = 0xe4,
+ /*
+ * The frame resulting from secure processing has a length that is
+ * greater than aMACMaxFrameSize.
+ */
+ IEEE802154_FRAME_TOO_LONG = 0xe5,
+ /*
+ * The requested GTS transmission failed because the specified GTS
+ * either did not have a transmit GTS direction or was not defined.
+ */
+ IEEE802154_INVALID_GTS = 0xe6,
+ /*
+ * A request to purge an MSDU from the transaction queue was made using
+ * an MSDU handle that was not found in the transaction table.
+ */
+ IEEE802154_INVALID_HANDLE = 0xe7,
+ /* A parameter in the primitive is out of the valid range.*/
+ IEEE802154_INVALID_PARAMETER = 0xe8,
+ /* No acknowledgment was received after aMaxFrameRetries. */
+ IEEE802154_NO_ACK = 0xe9,
+ /* A scan operation failed to find any network beacons.*/
+ IEEE802154_NO_BEACON = 0xea,
+ /* No response data were available following a request. */
+ IEEE802154_NO_DATA = 0xeb,
+ /* The operation failed because a short address was not allocated. */
+ IEEE802154_NO_SHORT_ADDRESS = 0xec,
+ /*
+ * A receiver enable request was unsuccessful because it could not be
+ * completed within the CAP.
+ */
+ IEEE802154_OUT_OF_CAP = 0xed,
+ /*
+ * A PAN identifier conflict has been detected and communicated to the
+ * PAN coordinator.
+ */
+ IEEE802154_PANID_CONFLICT = 0xee,
+ /* A coordinator realignment command has been received. */
+ IEEE802154_REALIGMENT = 0xef,
+ /* The transaction has expired and its information discarded. */
+ IEEE802154_TRANSACTION_EXPIRED = 0xf0,
+ /* There is no capacity to store the transaction. */
+ IEEE802154_TRANSACTION_OVERFLOW = 0xf1,
+ /*
+ * The transceiver was in the transmitter enabled state when the
+ * receiver was requested to be enabled.
+ */
+ IEEE802154_TX_ACTIVE = 0xf2,
+ /* The appropriate key is not available in the ACL. */
+ IEEE802154_UNAVAILABLE_KEY = 0xf3,
+ /*
+ * A SET/GET request was issued with the identifier of a PIB attribute
+ * that is not supported.
+ */
+ IEEE802154_UNSUPPORTED_ATTR = 0xf4,
+ /*
+ * A request to perform a scan operation failed because the MLME was
+ * in the process of performing a previously initiated scan operation.
+ */
+ IEEE802154_SCAN_IN_PROGRESS = 0xfc,
+};
+
+/**
+ * ieee802154_is_valid_psdu_len - check if psdu len is valid
+ * available lengths:
+ * 0-4 Reserved
+ * 5 MPDU (Acknowledgment)
+ * 6-8 Reserved
+ * 9-127 MPDU
+ *
+ * @len: psdu len with (MHR + payload + MFR)
+ */
+static inline bool ieee802154_is_valid_psdu_len(const u8 len)
+{
+ return (len == IEEE802154_ACK_PSDU_LEN ||
+ (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU));
+}
+
+/**
+ * ieee802154_is_valid_psdu_len - check if extended addr is valid
+ * @addr: extended addr to check
+ */
+static inline bool ieee802154_is_valid_extended_addr(const __le64 addr)
+{
+ /* These EUI-64 addresses are reserved by IEEE. 0xffffffffffffffff
+ * is used internally as extended to short address broadcast mapping.
+ * This is currently a workaround because neighbor discovery can't
+ * deal with short addresses types right now.
+ */
+ return ((addr != cpu_to_le64(0x0000000000000000ULL)) &&
+ (addr != cpu_to_le64(0xffffffffffffffffULL)));
+}
+
+/**
+ * ieee802154_random_extended_addr - generates a random extended address
+ * @addr: extended addr pointer to place the random address
+ */
+static inline void ieee802154_random_extended_addr(__le64 *addr)
+{
+ get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN);
+
+ /* toggle some bit if we hit an invalid extended addr */
+ if (!ieee802154_is_valid_extended_addr(*addr))
+ ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] ^= 0x01;
+}
+
+#endif /* LINUX_IEEE802154_H */
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
new file mode 100644
index 000000000..f563907ed
--- /dev/null
+++ b/include/linux/if_arp.h
@@ -0,0 +1,47 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the ARP (RFC 826) protocol.
+ *
+ * Version: @(#)if_arp.h 1.0.1 04/16/93
+ *
+ * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1986-1988
+ * Portions taken from the KA9Q/NOS (v2.00m PA0GRI) source.
+ * Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Florian La Roche,
+ * Jonathan Layes <layes@loran.com>
+ * Arnaldo Carvalho de Melo <acme@conectiva.com.br> ARPHRD_HWX25
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_ARP_H
+#define _LINUX_IF_ARP_H
+
+#include <linux/skbuff.h>
+#include <uapi/linux/if_arp.h>
+
+static inline struct arphdr *arp_hdr(const struct sk_buff *skb)
+{
+ return (struct arphdr *)skb_network_header(skb);
+}
+
+static inline int arp_hdr_len(struct net_device *dev)
+{
+ switch (dev->type) {
+#if IS_ENABLED(CONFIG_FIREWIRE_NET)
+ case ARPHRD_IEEE1394:
+ /* ARP header, device address and 2 IP addresses */
+ return sizeof(struct arphdr) + dev->addr_len + sizeof(u32) * 2;
+#endif
+ default:
+ /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
+ return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2;
+ }
+}
+#endif /* _LINUX_IF_ARP_H */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
new file mode 100644
index 000000000..dad8b00be
--- /dev/null
+++ b/include/linux/if_bridge.h
@@ -0,0 +1,77 @@
+/*
+ * Linux ethernet bridge
+ *
+ * Authors:
+ * Lennert Buytenhek <buytenh@gnu.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_BRIDGE_H
+#define _LINUX_IF_BRIDGE_H
+
+
+#include <linux/netdevice.h>
+#include <uapi/linux/if_bridge.h>
+#include <linux/bitops.h>
+
+struct br_ip {
+ union {
+ __be32 ip4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr ip6;
+#endif
+ } u;
+ __be16 proto;
+ __u16 vid;
+};
+
+struct br_ip_list {
+ struct list_head list;
+ struct br_ip addr;
+};
+
+#define BR_HAIRPIN_MODE BIT(0)
+#define BR_BPDU_GUARD BIT(1)
+#define BR_ROOT_BLOCK BIT(2)
+#define BR_MULTICAST_FAST_LEAVE BIT(3)
+#define BR_ADMIN_COST BIT(4)
+#define BR_LEARNING BIT(5)
+#define BR_FLOOD BIT(6)
+#define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING)
+#define BR_PROMISC BIT(7)
+#define BR_PROXYARP BIT(8)
+#define BR_LEARNING_SYNC BIT(9)
+#define BR_PROXYARP_WIFI BIT(10)
+
+extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
+
+typedef int br_should_route_hook_t(struct sk_buff *skb);
+extern br_should_route_hook_t __rcu *br_should_route_hook;
+
+#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
+int br_multicast_list_adjacent(struct net_device *dev,
+ struct list_head *br_ip_list);
+bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto);
+bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto);
+#else
+static inline int br_multicast_list_adjacent(struct net_device *dev,
+ struct list_head *br_ip_list)
+{
+ return 0;
+}
+static inline bool br_multicast_has_querier_anywhere(struct net_device *dev,
+ int proto)
+{
+ return false;
+}
+static inline bool br_multicast_has_querier_adjacent(struct net_device *dev,
+ int proto)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/include/linux/if_eql.h b/include/linux/if_eql.h
new file mode 100644
index 000000000..d984694c3
--- /dev/null
+++ b/include/linux/if_eql.h
@@ -0,0 +1,49 @@
+/*
+ * Equalizer Load-balancer for serial network interfaces.
+ *
+ * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
+ * NCM: Network and Communications Management, Inc.
+ *
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * The author may be reached as simon@ncm.com, or C/O
+ * NCM
+ * Attn: Simon Janes
+ * 6803 Whittier Ave
+ * McLean VA 22101
+ * Phone: 1-703-847-0040 ext 103
+ */
+#ifndef _LINUX_IF_EQL_H
+#define _LINUX_IF_EQL_H
+
+
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <uapi/linux/if_eql.h>
+
+typedef struct slave {
+ struct list_head list;
+ struct net_device *dev;
+ long priority;
+ long priority_bps;
+ long priority_Bps;
+ long bytes_queued;
+} slave_t;
+
+typedef struct slave_queue {
+ spinlock_t lock;
+ struct list_head all_slaves;
+ int num_slaves;
+ struct net_device *master_dev;
+} slave_queue_t;
+
+typedef struct equalizer {
+ slave_queue_t queue;
+ int min_slaves;
+ int max_slaves;
+ struct timer_list timer;
+} equalizer_t;
+
+#endif /* _LINUX_EQL_H */
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
new file mode 100644
index 000000000..d5569734f
--- /dev/null
+++ b/include/linux/if_ether.h
@@ -0,0 +1,35 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the Ethernet IEEE 802.3 interface.
+ *
+ * Version: @(#)if_ether.h 1.0.1a 02/08/94
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald Becker, <becker@super.org>
+ * Alan Cox, <alan@lxorguk.ukuu.org.uk>
+ * Steve Whitehouse, <gw7rrm@eeshack3.swan.ac.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_ETHER_H
+#define _LINUX_IF_ETHER_H
+
+#include <linux/skbuff.h>
+#include <uapi/linux/if_ether.h>
+
+static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
+{
+ return (struct ethhdr *)skb_mac_header(skb);
+}
+
+int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
+
+extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
+
+#endif /* _LINUX_IF_ETHER_H */
diff --git a/include/linux/if_fddi.h b/include/linux/if_fddi.h
new file mode 100644
index 000000000..f5550b3ee
--- /dev/null
+++ b/include/linux/if_fddi.h
@@ -0,0 +1,121 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the ANSI FDDI interface.
+ *
+ * Version: @(#)if_fddi.h 1.0.2 Sep 29 2004
+ *
+ * Author: Lawrence V. Stefani, <stefani@lkg.dec.com>
+ *
+ * if_fddi.h is based on previous if_ether.h and if_tr.h work by
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Donald Becker, <becker@super.org>
+ * Alan Cox, <alan@lxorguk.ukuu.org.uk>
+ * Steve Whitehouse, <gw7rrm@eeshack3.swan.ac.uk>
+ * Peter De Schrijver, <stud11@cc4.kuleuven.ac.be>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_FDDI_H
+#define _LINUX_IF_FDDI_H
+
+#include <linux/netdevice.h>
+#include <uapi/linux/if_fddi.h>
+
+/* Define FDDI statistics structure */
+struct fddi_statistics {
+
+ /* Generic statistics. */
+
+ struct net_device_stats gen;
+
+ /* Detailed FDDI statistics. Adopted from RFC 1512 */
+
+ __u8 smt_station_id[8];
+ __u32 smt_op_version_id;
+ __u32 smt_hi_version_id;
+ __u32 smt_lo_version_id;
+ __u8 smt_user_data[32];
+ __u32 smt_mib_version_id;
+ __u32 smt_mac_cts;
+ __u32 smt_non_master_cts;
+ __u32 smt_master_cts;
+ __u32 smt_available_paths;
+ __u32 smt_config_capabilities;
+ __u32 smt_config_policy;
+ __u32 smt_connection_policy;
+ __u32 smt_t_notify;
+ __u32 smt_stat_rpt_policy;
+ __u32 smt_trace_max_expiration;
+ __u32 smt_bypass_present;
+ __u32 smt_ecm_state;
+ __u32 smt_cf_state;
+ __u32 smt_remote_disconnect_flag;
+ __u32 smt_station_status;
+ __u32 smt_peer_wrap_flag;
+ __u32 smt_time_stamp;
+ __u32 smt_transition_time_stamp;
+ __u32 mac_frame_status_functions;
+ __u32 mac_t_max_capability;
+ __u32 mac_tvx_capability;
+ __u32 mac_available_paths;
+ __u32 mac_current_path;
+ __u8 mac_upstream_nbr[FDDI_K_ALEN];
+ __u8 mac_downstream_nbr[FDDI_K_ALEN];
+ __u8 mac_old_upstream_nbr[FDDI_K_ALEN];
+ __u8 mac_old_downstream_nbr[FDDI_K_ALEN];
+ __u32 mac_dup_address_test;
+ __u32 mac_requested_paths;
+ __u32 mac_downstream_port_type;
+ __u8 mac_smt_address[FDDI_K_ALEN];
+ __u32 mac_t_req;
+ __u32 mac_t_neg;
+ __u32 mac_t_max;
+ __u32 mac_tvx_value;
+ __u32 mac_frame_cts;
+ __u32 mac_copied_cts;
+ __u32 mac_transmit_cts;
+ __u32 mac_error_cts;
+ __u32 mac_lost_cts;
+ __u32 mac_frame_error_threshold;
+ __u32 mac_frame_error_ratio;
+ __u32 mac_rmt_state;
+ __u32 mac_da_flag;
+ __u32 mac_una_da_flag;
+ __u32 mac_frame_error_flag;
+ __u32 mac_ma_unitdata_available;
+ __u32 mac_hardware_present;
+ __u32 mac_ma_unitdata_enable;
+ __u32 path_tvx_lower_bound;
+ __u32 path_t_max_lower_bound;
+ __u32 path_max_t_req;
+ __u32 path_configuration[8];
+ __u32 port_my_type[2];
+ __u32 port_neighbor_type[2];
+ __u32 port_connection_policies[2];
+ __u32 port_mac_indicated[2];
+ __u32 port_current_path[2];
+ __u8 port_requested_paths[3*2];
+ __u32 port_mac_placement[2];
+ __u32 port_available_paths[2];
+ __u32 port_pmd_class[2];
+ __u32 port_connection_capabilities[2];
+ __u32 port_bs_flag[2];
+ __u32 port_lct_fail_cts[2];
+ __u32 port_ler_estimate[2];
+ __u32 port_lem_reject_cts[2];
+ __u32 port_lem_cts[2];
+ __u32 port_ler_cutoff[2];
+ __u32 port_ler_alarm[2];
+ __u32 port_connect_state[2];
+ __u32 port_pcm_state[2];
+ __u32 port_pc_withhold[2];
+ __u32 port_ler_flag[2];
+ __u32 port_hardware_present[2];
+};
+#endif /* _LINUX_IF_FDDI_H */
diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h
new file mode 100644
index 000000000..4316aa173
--- /dev/null
+++ b/include/linux/if_frad.h
@@ -0,0 +1,97 @@
+/*
+ * DLCI/FRAD Definitions for Frame Relay Access Devices. DLCI devices are
+ * created for each DLCI associated with a FRAD. The FRAD driver
+ * is not truly a network device, but the lower level device
+ * handler. This allows other FRAD manufacturers to use the DLCI
+ * code, including its RFC1490 encapsulation alongside the current
+ * implementation for the Sangoma cards.
+ *
+ * Version: @(#)if_ifrad.h 0.15 31 Mar 96
+ *
+ * Author: Mike McLagan <mike.mclagan@linux.org>
+ *
+ * Changes:
+ * 0.15 Mike McLagan changed structure defs (packed)
+ * re-arranged flags
+ * added DLCI_RET vars
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _FRAD_H_
+#define _FRAD_H_
+
+#include <uapi/linux/if_frad.h>
+
+
+#if defined(CONFIG_DLCI) || defined(CONFIG_DLCI_MODULE)
+
+/* these are the fields of an RFC 1490 header */
+struct frhdr
+{
+ unsigned char control;
+
+ /* for IP packets, this can be the NLPID */
+ unsigned char pad;
+
+ unsigned char NLPID;
+ unsigned char OUI[3];
+ __be16 PID;
+
+#define IP_NLPID pad
+} __packed;
+
+/* see RFC 1490 for the definition of the following */
+#define FRAD_I_UI 0x03
+
+#define FRAD_P_PADDING 0x00
+#define FRAD_P_Q933 0x08
+#define FRAD_P_SNAP 0x80
+#define FRAD_P_CLNP 0x81
+#define FRAD_P_IP 0xCC
+
+struct dlci_local
+{
+ struct net_device *master;
+ struct net_device *slave;
+ struct dlci_conf config;
+ int configured;
+ struct list_head list;
+
+ /* callback function */
+ void (*receive)(struct sk_buff *skb, struct net_device *);
+};
+
+struct frad_local
+{
+ struct net_device_stats stats;
+
+ /* devices which this FRAD is slaved to */
+ struct net_device *master[CONFIG_DLCI_MAX];
+ short dlci[CONFIG_DLCI_MAX];
+
+ struct frad_conf config;
+ int configured; /* has this device been configured */
+ int initialized; /* mem_start, port, irq set ? */
+
+ /* callback functions */
+ int (*activate)(struct net_device *, struct net_device *);
+ int (*deactivate)(struct net_device *, struct net_device *);
+ int (*assoc)(struct net_device *, struct net_device *);
+ int (*deassoc)(struct net_device *, struct net_device *);
+ int (*dlci_conf)(struct net_device *, struct net_device *, int get);
+
+ /* fields that are used by the Sangoma SDLA cards */
+ struct timer_list timer;
+ int type; /* adapter type */
+ int state; /* state of the S502/8 control latch */
+ int buffer; /* current buffer for S508 firmware */
+};
+
+#endif /* CONFIG_DLCI || CONFIG_DLCI_MODULE */
+
+extern void dlci_ioctl_set(int (*hook)(unsigned int, void __user *));
+
+#endif
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
new file mode 100644
index 000000000..da4929927
--- /dev/null
+++ b/include/linux/if_link.h
@@ -0,0 +1,19 @@
+#ifndef _LINUX_IF_LINK_H
+#define _LINUX_IF_LINK_H
+
+#include <uapi/linux/if_link.h>
+
+
+/* We don't want this structure exposed to user space */
+struct ifla_vf_info {
+ __u32 vf;
+ __u8 mac[32];
+ __u32 vlan;
+ __u32 qos;
+ __u32 spoofchk;
+ __u32 linkstate;
+ __u32 min_tx_rate;
+ __u32 max_tx_rate;
+ __u32 rss_query_en;
+};
+#endif /* _LINUX_IF_LINK_H */
diff --git a/include/linux/if_ltalk.h b/include/linux/if_ltalk.h
new file mode 100644
index 000000000..81e434c50
--- /dev/null
+++ b/include/linux/if_ltalk.h
@@ -0,0 +1,7 @@
+#ifndef __LINUX_LTALK_H
+#define __LINUX_LTALK_H
+
+#include <uapi/linux/if_ltalk.h>
+
+extern struct net_device *alloc_ltalkdev(int sizeof_priv);
+#endif
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
new file mode 100644
index 000000000..6f6929ea8
--- /dev/null
+++ b/include/linux/if_macvlan.h
@@ -0,0 +1,115 @@
+#ifndef _LINUX_IF_MACVLAN_H
+#define _LINUX_IF_MACVLAN_H
+
+#include <linux/if_link.h>
+#include <linux/if_vlan.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/netlink.h>
+#include <net/netlink.h>
+#include <linux/u64_stats_sync.h>
+
+#if IS_ENABLED(CONFIG_MACVTAP)
+struct socket *macvtap_get_socket(struct file *);
+#else
+#include <linux/err.h>
+#include <linux/errno.h>
+struct file;
+struct socket;
+static inline struct socket *macvtap_get_socket(struct file *f)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif /* CONFIG_MACVTAP */
+
+struct macvlan_port;
+struct macvtap_queue;
+
+/*
+ * Maximum times a macvtap device can be opened. This can be used to
+ * configure the number of receive queue, e.g. for multiqueue virtio.
+ */
+#define MAX_MACVTAP_QUEUES 16
+
+#define MACVLAN_MC_FILTER_BITS 8
+#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS)
+
+struct macvlan_dev {
+ struct net_device *dev;
+ struct list_head list;
+ struct hlist_node hlist;
+ struct macvlan_port *port;
+ struct net_device *lowerdev;
+ void *fwd_priv;
+ struct vlan_pcpu_stats __percpu *pcpu_stats;
+
+ DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
+
+ netdev_features_t set_features;
+ enum macvlan_mode mode;
+ u16 flags;
+ /* This array tracks active taps. */
+ struct macvtap_queue __rcu *taps[MAX_MACVTAP_QUEUES];
+ /* This list tracks all taps (both enabled and disabled) */
+ struct list_head queue_list;
+ int numvtaps;
+ int numqueues;
+ netdev_features_t tap_features;
+ int minor;
+ int nest_level;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *netpoll;
+#endif
+ unsigned int macaddr_count;
+};
+
+static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
+ unsigned int len, bool success,
+ bool multicast)
+{
+ if (likely(success)) {
+ struct vlan_pcpu_stats *pcpu_stats;
+
+ pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += len;
+ if (multicast)
+ pcpu_stats->rx_multicast++;
+ u64_stats_update_end(&pcpu_stats->syncp);
+ } else {
+ this_cpu_inc(vlan->pcpu_stats->rx_errors);
+ }
+}
+
+extern void macvlan_common_setup(struct net_device *dev);
+
+extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[]);
+
+extern void macvlan_count_rx(const struct macvlan_dev *vlan,
+ unsigned int len, bool success,
+ bool multicast);
+
+extern void macvlan_dellink(struct net_device *dev, struct list_head *head);
+
+extern int macvlan_link_register(struct rtnl_link_ops *ops);
+
+#if IS_ENABLED(CONFIG_MACVLAN)
+static inline struct net_device *
+macvlan_dev_real_dev(const struct net_device *dev)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ return macvlan->lowerdev;
+}
+#else
+static inline struct net_device *
+macvlan_dev_real_dev(const struct net_device *dev)
+{
+ BUG();
+ return NULL;
+}
+#endif
+
+#endif /* _LINUX_IF_MACVLAN_H */
diff --git a/include/linux/if_phonet.h b/include/linux/if_phonet.h
new file mode 100644
index 000000000..bbcdb0a76
--- /dev/null
+++ b/include/linux/if_phonet.h
@@ -0,0 +1,14 @@
+/*
+ * File: if_phonet.h
+ *
+ * Phonet interface kernel definitions
+ *
+ * Copyright (C) 2008 Nokia Corporation. All rights reserved.
+ */
+#ifndef LINUX_IF_PHONET_H
+#define LINUX_IF_PHONET_H
+
+#include <uapi/linux/if_phonet.h>
+
+extern struct header_ops phonet_header_ops;
+#endif
diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h
new file mode 100644
index 000000000..0fb71e532
--- /dev/null
+++ b/include/linux/if_pppol2tp.h
@@ -0,0 +1,21 @@
+/***************************************************************************
+ * Linux PPP over L2TP (PPPoL2TP) Socket Implementation (RFC 2661)
+ *
+ * This file supplies definitions required by the PPP over L2TP driver
+ * (l2tp_ppp.c). All version information wrt this file is located in l2tp_ppp.c
+ *
+ * License:
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#ifndef __LINUX_IF_PPPOL2TP_H
+#define __LINUX_IF_PPPOL2TP_H
+
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <uapi/linux/if_pppol2tp.h>
+
+#endif
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
new file mode 100644
index 000000000..66a7d7600
--- /dev/null
+++ b/include/linux/if_pppox.h
@@ -0,0 +1,98 @@
+/***************************************************************************
+ * Linux PPP over X - Generic PPP transport layer sockets
+ * Linux PPP over Ethernet (PPPoE) Socket Implementation (RFC 2516)
+ *
+ * This file supplies definitions required by the PPP over Ethernet driver
+ * (pppox.c). All version information wrt this file is located in pppox.c
+ *
+ * License:
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#ifndef __LINUX_IF_PPPOX_H
+#define __LINUX_IF_PPPOX_H
+
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/ppp_channel.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <uapi/linux/if_pppox.h>
+
+static inline struct pppoe_hdr *pppoe_hdr(const struct sk_buff *skb)
+{
+ return (struct pppoe_hdr *)skb_network_header(skb);
+}
+
+struct pppoe_opt {
+ struct net_device *dev; /* device associated with socket*/
+ int ifindex; /* ifindex of device associated with socket */
+ struct pppoe_addr pa; /* what this socket is bound to*/
+ struct sockaddr_pppox relay; /* what socket data will be
+ relayed to (PPPoE relaying) */
+ struct work_struct padt_work;/* Work item for handling PADT */
+};
+
+struct pptp_opt {
+ struct pptp_addr src_addr;
+ struct pptp_addr dst_addr;
+ u32 ack_sent, ack_recv;
+ u32 seq_sent, seq_recv;
+ int ppp_flags;
+};
+#include <net/sock.h>
+
+struct pppox_sock {
+ /* struct sock must be the first member of pppox_sock */
+ struct sock sk;
+ struct ppp_channel chan;
+ struct pppox_sock *next; /* for hash table */
+ union {
+ struct pppoe_opt pppoe;
+ struct pptp_opt pptp;
+ } proto;
+ __be16 num;
+};
+#define pppoe_dev proto.pppoe.dev
+#define pppoe_ifindex proto.pppoe.ifindex
+#define pppoe_pa proto.pppoe.pa
+#define pppoe_relay proto.pppoe.relay
+
+static inline struct pppox_sock *pppox_sk(struct sock *sk)
+{
+ return (struct pppox_sock *)sk;
+}
+
+static inline struct sock *sk_pppox(struct pppox_sock *po)
+{
+ return (struct sock *)po;
+}
+
+struct module;
+
+struct pppox_proto {
+ int (*create)(struct net *net, struct socket *sock);
+ int (*ioctl)(struct socket *sock, unsigned int cmd,
+ unsigned long arg);
+ struct module *owner;
+};
+
+extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
+extern void unregister_pppox_proto(int proto_num);
+extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
+extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+
+/* PPPoX socket states */
+enum {
+ PPPOX_NONE = 0, /* initial state */
+ PPPOX_CONNECTED = 1, /* connection established ==TCP_ESTABLISHED */
+ PPPOX_BOUND = 2, /* bound to ppp device */
+ PPPOX_RELAY = 4, /* forwarding is enabled */
+ PPPOX_ZOMBIE = 8, /* dead, but still bound to ppp device */
+ PPPOX_DEAD = 16 /* dead, useless, please clean me up!*/
+};
+
+#endif /* !(__LINUX_IF_PPPOX_H) */
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
new file mode 100644
index 000000000..a6aa97075
--- /dev/null
+++ b/include/linux/if_team.h
@@ -0,0 +1,299 @@
+/*
+ * include/linux/if_team.h - Network team device driver header
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _LINUX_IF_TEAM_H_
+#define _LINUX_IF_TEAM_H_
+
+#include <linux/netpoll.h>
+#include <net/sch_generic.h>
+#include <linux/types.h>
+#include <uapi/linux/if_team.h>
+
+struct team_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_multicast;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 rx_dropped;
+ u32 tx_dropped;
+};
+
+struct team;
+
+struct team_port {
+ struct net_device *dev;
+ struct hlist_node hlist; /* node in enabled ports hash list */
+ struct list_head list; /* node in ordinary list */
+ struct team *team;
+ int index; /* index of enabled port. If disabled, it's set to -1 */
+
+ bool linkup; /* either state.linkup or user.linkup */
+
+ struct {
+ bool linkup;
+ u32 speed;
+ u8 duplex;
+ } state;
+
+ /* Values set by userspace */
+ struct {
+ bool linkup;
+ bool linkup_enabled;
+ } user;
+
+ /* Custom gennetlink interface related flags */
+ bool changed;
+ bool removed;
+
+ /*
+ * A place for storing original values of the device before it
+ * become a port.
+ */
+ struct {
+ unsigned char dev_addr[MAX_ADDR_LEN];
+ unsigned int mtu;
+ } orig;
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *np;
+#endif
+
+ s32 priority; /* lower number ~ higher priority */
+ u16 queue_id;
+ struct list_head qom_list; /* node in queue override mapping list */
+ struct rcu_head rcu;
+ long mode_priv[0];
+};
+
+static inline bool team_port_enabled(struct team_port *port)
+{
+ return port->index != -1;
+}
+
+static inline bool team_port_txable(struct team_port *port)
+{
+ return port->linkup && team_port_enabled(port);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static inline void team_netpoll_send_skb(struct team_port *port,
+ struct sk_buff *skb)
+{
+ struct netpoll *np = port->np;
+
+ if (np)
+ netpoll_send_skb(np, skb);
+}
+#else
+static inline void team_netpoll_send_skb(struct team_port *port,
+ struct sk_buff *skb)
+{
+}
+#endif
+
+struct team_mode_ops {
+ int (*init)(struct team *team);
+ void (*exit)(struct team *team);
+ rx_handler_result_t (*receive)(struct team *team,
+ struct team_port *port,
+ struct sk_buff *skb);
+ bool (*transmit)(struct team *team, struct sk_buff *skb);
+ int (*port_enter)(struct team *team, struct team_port *port);
+ void (*port_leave)(struct team *team, struct team_port *port);
+ void (*port_change_dev_addr)(struct team *team, struct team_port *port);
+ void (*port_enabled)(struct team *team, struct team_port *port);
+ void (*port_disabled)(struct team *team, struct team_port *port);
+};
+
+extern int team_modeop_port_enter(struct team *team, struct team_port *port);
+extern void team_modeop_port_change_dev_addr(struct team *team,
+ struct team_port *port);
+
+enum team_option_type {
+ TEAM_OPTION_TYPE_U32,
+ TEAM_OPTION_TYPE_STRING,
+ TEAM_OPTION_TYPE_BINARY,
+ TEAM_OPTION_TYPE_BOOL,
+ TEAM_OPTION_TYPE_S32,
+};
+
+struct team_option_inst_info {
+ u32 array_index;
+ struct team_port *port; /* != NULL if per-port */
+};
+
+struct team_gsetter_ctx {
+ union {
+ u32 u32_val;
+ const char *str_val;
+ struct {
+ const void *ptr;
+ u32 len;
+ } bin_val;
+ bool bool_val;
+ s32 s32_val;
+ } data;
+ struct team_option_inst_info *info;
+};
+
+struct team_option {
+ struct list_head list;
+ const char *name;
+ bool per_port;
+ unsigned int array_size; /* != 0 means the option is array */
+ enum team_option_type type;
+ int (*init)(struct team *team, struct team_option_inst_info *info);
+ int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
+ int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
+};
+
+extern void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info);
+extern void team_options_change_check(struct team *team);
+
+struct team_mode {
+ const char *kind;
+ struct module *owner;
+ size_t priv_size;
+ size_t port_priv_size;
+ const struct team_mode_ops *ops;
+};
+
+#define TEAM_PORT_HASHBITS 4
+#define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS)
+
+#define TEAM_MODE_PRIV_LONGS 4
+#define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS)
+
+struct team {
+ struct net_device *dev; /* associated netdevice */
+ struct team_pcpu_stats __percpu *pcpu_stats;
+
+ struct mutex lock; /* used for overall locking, e.g. port lists write */
+
+ /*
+ * List of enabled ports and their count
+ */
+ int en_port_count;
+ struct hlist_head en_port_hlist[TEAM_PORT_HASHENTRIES];
+
+ struct list_head port_list; /* list of all ports */
+
+ struct list_head option_list;
+ struct list_head option_inst_list; /* list of option instances */
+
+ const struct team_mode *mode;
+ struct team_mode_ops ops;
+ bool user_carrier_enabled;
+ bool queue_override_enabled;
+ struct list_head *qom_lists; /* array of queue override mapping lists */
+ bool port_mtu_change_allowed;
+ struct {
+ unsigned int count;
+ unsigned int interval; /* in ms */
+ atomic_t count_pending;
+ struct delayed_work dw;
+ } notify_peers;
+ struct {
+ unsigned int count;
+ unsigned int interval; /* in ms */
+ atomic_t count_pending;
+ struct delayed_work dw;
+ } mcast_rejoin;
+ long mode_priv[TEAM_MODE_PRIV_LONGS];
+};
+
+static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
+ struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
+ sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
+ skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
+
+ skb->dev = port->dev;
+ if (unlikely(netpoll_tx_running(team->dev))) {
+ team_netpoll_send_skb(port, skb);
+ return 0;
+ }
+ return dev_queue_xmit(skb);
+}
+
+static inline struct hlist_head *team_port_index_hash(struct team *team,
+ int port_index)
+{
+ return &team->en_port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
+}
+
+static inline struct team_port *team_get_port_by_index(struct team *team,
+ int port_index)
+{
+ struct team_port *port;
+ struct hlist_head *head = team_port_index_hash(team, port_index);
+
+ hlist_for_each_entry(port, head, hlist)
+ if (port->index == port_index)
+ return port;
+ return NULL;
+}
+
+static inline int team_num_to_port_index(struct team *team, int num)
+{
+ int en_port_count = ACCESS_ONCE(team->en_port_count);
+
+ if (unlikely(!en_port_count))
+ return 0;
+ return num % en_port_count;
+}
+
+static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
+ int port_index)
+{
+ struct team_port *port;
+ struct hlist_head *head = team_port_index_hash(team, port_index);
+
+ hlist_for_each_entry_rcu(port, head, hlist)
+ if (port->index == port_index)
+ return port;
+ return NULL;
+}
+
+static inline struct team_port *
+team_get_first_port_txable_rcu(struct team *team, struct team_port *port)
+{
+ struct team_port *cur;
+
+ if (likely(team_port_txable(port)))
+ return port;
+ cur = port;
+ list_for_each_entry_continue_rcu(cur, &team->port_list, list)
+ if (team_port_txable(cur))
+ return cur;
+ list_for_each_entry_rcu(cur, &team->port_list, list) {
+ if (cur == port)
+ break;
+ if (team_port_txable(cur))
+ return cur;
+ }
+ return NULL;
+}
+
+extern int team_options_register(struct team *team,
+ const struct team_option *option,
+ size_t option_count);
+extern void team_options_unregister(struct team *team,
+ const struct team_option *option,
+ size_t option_count);
+extern int team_mode_register(const struct team_mode *mode);
+extern void team_mode_unregister(const struct team_mode *mode);
+
+#define TEAM_DEFAULT_NUM_TX_QUEUES 16
+#define TEAM_DEFAULT_NUM_RX_QUEUES 16
+
+#endif /* _LINUX_IF_TEAM_H_ */
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
new file mode 100644
index 000000000..ed6da2e6d
--- /dev/null
+++ b/include/linux/if_tun.h
@@ -0,0 +1,32 @@
+/*
+ * Universal TUN/TAP device driver.
+ * Copyright (C) 1999-2000 Maxim Krasnyansky <max_mk@yahoo.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __IF_TUN_H
+#define __IF_TUN_H
+
+#include <uapi/linux/if_tun.h>
+
+#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
+struct socket *tun_get_socket(struct file *);
+#else
+#include <linux/err.h>
+#include <linux/errno.h>
+struct file;
+struct socket;
+static inline struct socket *tun_get_socket(struct file *f)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif /* CONFIG_TUN */
+#endif /* __IF_TUN_H */
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
new file mode 100644
index 000000000..712710bc0
--- /dev/null
+++ b/include/linux/if_tunnel.h
@@ -0,0 +1,16 @@
+#ifndef _IF_TUNNEL_H_
+#define _IF_TUNNEL_H_
+
+#include <linux/ip.h>
+#include <linux/in6.h>
+#include <uapi/linux/if_tunnel.h>
+#include <linux/u64_stats_sync.h>
+
+/*
+ * Locking : hash tables are protected by RCU and RTNL
+ */
+
+#define for_each_ip_tunnel_rcu(pos, start) \
+ for (pos = rcu_dereference(start); pos; pos = rcu_dereference(pos->next))
+
+#endif /* _IF_TUNNEL_H_ */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
new file mode 100644
index 000000000..920e4457c
--- /dev/null
+++ b/include/linux/if_vlan.h
@@ -0,0 +1,631 @@
+/*
+ * VLAN An implementation of 802.1Q VLAN tagging.
+ *
+ * Authors: Ben Greear <greearb@candelatech.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#ifndef _LINUX_IF_VLAN_H_
+#define _LINUX_IF_VLAN_H_
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/bug.h>
+#include <uapi/linux/if_vlan.h>
+
+#define VLAN_HLEN 4 /* The additional bytes required by VLAN
+ * (in addition to the Ethernet header)
+ */
+#define VLAN_ETH_HLEN 18 /* Total octets in header. */
+#define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */
+
+/*
+ * According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan
+ */
+#define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */
+#define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */
+
+/*
+ * struct vlan_hdr - vlan header
+ * @h_vlan_TCI: priority and VLAN ID
+ * @h_vlan_encapsulated_proto: packet type ID or len
+ */
+struct vlan_hdr {
+ __be16 h_vlan_TCI;
+ __be16 h_vlan_encapsulated_proto;
+};
+
+/**
+ * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr)
+ * @h_dest: destination ethernet address
+ * @h_source: source ethernet address
+ * @h_vlan_proto: ethernet protocol
+ * @h_vlan_TCI: priority and VLAN ID
+ * @h_vlan_encapsulated_proto: packet type ID or len
+ */
+struct vlan_ethhdr {
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+ __be16 h_vlan_encapsulated_proto;
+};
+
+#include <linux/skbuff.h>
+
+static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
+{
+ return (struct vlan_ethhdr *)skb_mac_header(skb);
+}
+
+#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
+#define VLAN_PRIO_SHIFT 13
+#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */
+#define VLAN_TAG_PRESENT VLAN_CFI_MASK
+#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
+#define VLAN_N_VID 4096
+
+/* found in socket.c */
+extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
+
+static inline bool is_vlan_dev(struct net_device *dev)
+{
+ return dev->priv_flags & IFF_802_1Q_VLAN;
+}
+
+#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
+#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
+#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
+
+/**
+ * struct vlan_pcpu_stats - VLAN percpu rx/tx stats
+ * @rx_packets: number of received packets
+ * @rx_bytes: number of received bytes
+ * @rx_multicast: number of received multicast packets
+ * @tx_packets: number of transmitted packets
+ * @tx_bytes: number of transmitted bytes
+ * @syncp: synchronization point for 64bit counters
+ * @rx_errors: number of rx errors
+ * @tx_dropped: number of tx drops
+ */
+struct vlan_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_multicast;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 rx_errors;
+ u32 tx_dropped;
+};
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+
+extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
+ __be16 vlan_proto, u16 vlan_id);
+extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
+extern u16 vlan_dev_vlan_id(const struct net_device *dev);
+extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
+
+/**
+ * struct vlan_priority_tci_mapping - vlan egress priority mappings
+ * @priority: skb priority
+ * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
+ * @next: pointer to next struct
+ */
+struct vlan_priority_tci_mapping {
+ u32 priority;
+ u16 vlan_qos;
+ struct vlan_priority_tci_mapping *next;
+};
+
+struct proc_dir_entry;
+struct netpoll;
+
+/**
+ * struct vlan_dev_priv - VLAN private device data
+ * @nr_ingress_mappings: number of ingress priority mappings
+ * @ingress_priority_map: ingress priority mappings
+ * @nr_egress_mappings: number of egress priority mappings
+ * @egress_priority_map: hash of egress priority mappings
+ * @vlan_proto: VLAN encapsulation protocol
+ * @vlan_id: VLAN identifier
+ * @flags: device flags
+ * @real_dev: underlying netdevice
+ * @real_dev_addr: address of underlying netdevice
+ * @dent: proc dir entry
+ * @vlan_pcpu_stats: ptr to percpu rx stats
+ */
+struct vlan_dev_priv {
+ unsigned int nr_ingress_mappings;
+ u32 ingress_priority_map[8];
+ unsigned int nr_egress_mappings;
+ struct vlan_priority_tci_mapping *egress_priority_map[16];
+
+ __be16 vlan_proto;
+ u16 vlan_id;
+ u16 flags;
+
+ struct net_device *real_dev;
+ unsigned char real_dev_addr[ETH_ALEN];
+
+ struct proc_dir_entry *dent;
+ struct vlan_pcpu_stats __percpu *vlan_pcpu_stats;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *netpoll;
+#endif
+ unsigned int nest_level;
+};
+
+static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+static inline u16
+vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio)
+{
+ struct vlan_priority_tci_mapping *mp;
+
+ smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
+
+ mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)];
+ while (mp) {
+ if (mp->priority == skprio) {
+ return mp->vlan_qos; /* This should already be shifted
+ * to mask correctly with the
+ * VLAN's TCI */
+ }
+ mp = mp->next;
+ }
+ return 0;
+}
+
+extern bool vlan_do_receive(struct sk_buff **skb);
+
+extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid);
+extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid);
+
+extern int vlan_vids_add_by_dev(struct net_device *dev,
+ const struct net_device *by_dev);
+extern void vlan_vids_del_by_dev(struct net_device *dev,
+ const struct net_device *by_dev);
+
+extern bool vlan_uses_dev(const struct net_device *dev);
+
+static inline int vlan_get_encap_level(struct net_device *dev)
+{
+ BUG_ON(!is_vlan_dev(dev));
+ return vlan_dev_priv(dev)->nest_level;
+}
+#else
+static inline struct net_device *
+__vlan_find_dev_deep_rcu(struct net_device *real_dev,
+ __be16 vlan_proto, u16 vlan_id)
+{
+ return NULL;
+}
+
+static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
+{
+ BUG();
+ return NULL;
+}
+
+static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
+{
+ BUG();
+ return 0;
+}
+
+static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev)
+{
+ BUG();
+ return 0;
+}
+
+static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev,
+ u32 skprio)
+{
+ return 0;
+}
+
+static inline bool vlan_do_receive(struct sk_buff **skb)
+{
+ return false;
+}
+
+static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
+{
+ return 0;
+}
+
+static inline void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
+{
+}
+
+static inline int vlan_vids_add_by_dev(struct net_device *dev,
+ const struct net_device *by_dev)
+{
+ return 0;
+}
+
+static inline void vlan_vids_del_by_dev(struct net_device *dev,
+ const struct net_device *by_dev)
+{
+}
+
+static inline bool vlan_uses_dev(const struct net_device *dev)
+{
+ return false;
+}
+static inline int vlan_get_encap_level(struct net_device *dev)
+{
+ BUG();
+ return 0;
+}
+#endif
+
+static inline bool vlan_hw_offload_capable(netdev_features_t features,
+ __be16 proto)
+{
+ if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX)
+ return true;
+ if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX)
+ return true;
+ return false;
+}
+
+/**
+ * __vlan_insert_tag - regular VLAN tag inserting
+ * @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
+ * @vlan_tci: VLAN TCI to insert
+ *
+ * Inserts the VLAN tag into @skb as part of the payload
+ * Returns error if skb_cow_head failes.
+ *
+ * Does not change skb->protocol so this function can be used during receive.
+ */
+static inline int __vlan_insert_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci)
+{
+ struct vlan_ethhdr *veth;
+
+ if (skb_cow_head(skb, VLAN_HLEN) < 0)
+ return -ENOMEM;
+
+ veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
+
+ /* Move the mac addresses to the beginning of the new header. */
+ memmove(skb->data, skb->data + VLAN_HLEN, 2 * ETH_ALEN);
+ skb->mac_header -= VLAN_HLEN;
+
+ /* first, the ethernet type */
+ veth->h_vlan_proto = vlan_proto;
+
+ /* now, the TCI */
+ veth->h_vlan_TCI = htons(vlan_tci);
+
+ return 0;
+}
+
+/**
+ * vlan_insert_tag - regular VLAN tag inserting
+ * @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
+ * @vlan_tci: VLAN TCI to insert
+ *
+ * Inserts the VLAN tag into @skb as part of the payload
+ * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ *
+ * Following the skb_unshare() example, in case of error, the calling function
+ * doesn't have to worry about freeing the original skb.
+ *
+ * Does not change skb->protocol so this function can be used during receive.
+ */
+static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci)
+{
+ int err;
+
+ err = __vlan_insert_tag(skb, vlan_proto, vlan_tci);
+ if (err) {
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+ return skb;
+}
+
+/**
+ * vlan_insert_tag_set_proto - regular VLAN tag inserting
+ * @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
+ * @vlan_tci: VLAN TCI to insert
+ *
+ * Inserts the VLAN tag into @skb as part of the payload
+ * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
+ *
+ * Following the skb_unshare() example, in case of error, the calling function
+ * doesn't have to worry about freeing the original skb.
+ */
+static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
+ __be16 vlan_proto,
+ u16 vlan_tci)
+{
+ skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
+ if (skb)
+ skb->protocol = vlan_proto;
+ return skb;
+}
+
+/*
+ * __vlan_hwaccel_push_inside - pushes vlan tag to the payload
+ * @skb: skbuff to tag
+ *
+ * Pushes the VLAN tag from @skb->vlan_tci inside to the payload.
+ *
+ * Following the skb_unshare() example, in case of error, the calling function
+ * doesn't have to worry about freeing the original skb.
+ */
+static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
+{
+ skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
+ skb_vlan_tag_get(skb));
+ if (likely(skb))
+ skb->vlan_tci = 0;
+ return skb;
+}
+/*
+ * vlan_hwaccel_push_inside - pushes vlan tag to the payload
+ * @skb: skbuff to tag
+ *
+ * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the
+ * VLAN tag from @skb->vlan_tci inside to the payload.
+ *
+ * Following the skb_unshare() example, in case of error, the calling function
+ * doesn't have to worry about freeing the original skb.
+ */
+static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb)
+{
+ if (skb_vlan_tag_present(skb))
+ skb = __vlan_hwaccel_push_inside(skb);
+ return skb;
+}
+
+/**
+ * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
+ * @skb: skbuff to tag
+ * @vlan_proto: VLAN encapsulation protocol
+ * @vlan_tci: VLAN TCI to insert
+ *
+ * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
+ */
+static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
+ __be16 vlan_proto, u16 vlan_tci)
+{
+ skb->vlan_proto = vlan_proto;
+ skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
+}
+
+/**
+ * __vlan_get_tag - get the VLAN ID that is part of the payload
+ * @skb: skbuff to query
+ * @vlan_tci: buffer to store vlaue
+ *
+ * Returns error if the skb is not of VLAN type
+ */
+static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
+{
+ struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
+
+ if (veth->h_vlan_proto != htons(ETH_P_8021Q) &&
+ veth->h_vlan_proto != htons(ETH_P_8021AD))
+ return -EINVAL;
+
+ *vlan_tci = ntohs(veth->h_vlan_TCI);
+ return 0;
+}
+
+/**
+ * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[]
+ * @skb: skbuff to query
+ * @vlan_tci: buffer to store vlaue
+ *
+ * Returns error if @skb->vlan_tci is not set correctly
+ */
+static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
+ u16 *vlan_tci)
+{
+ if (skb_vlan_tag_present(skb)) {
+ *vlan_tci = skb_vlan_tag_get(skb);
+ return 0;
+ } else {
+ *vlan_tci = 0;
+ return -EINVAL;
+ }
+}
+
+#define HAVE_VLAN_GET_TAG
+
+/**
+ * vlan_get_tag - get the VLAN ID from the skb
+ * @skb: skbuff to query
+ * @vlan_tci: buffer to store vlaue
+ *
+ * Returns error if the skb is not VLAN tagged
+ */
+static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
+{
+ if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
+ return __vlan_hwaccel_get_tag(skb, vlan_tci);
+ } else {
+ return __vlan_get_tag(skb, vlan_tci);
+ }
+}
+
+/**
+ * vlan_get_protocol - get protocol EtherType.
+ * @skb: skbuff to query
+ * @type: first vlan protocol
+ * @depth: buffer to store length of eth and vlan tags in bytes
+ *
+ * Returns the EtherType of the packet, regardless of whether it is
+ * vlan encapsulated (normal or hardware accelerated) or not.
+ */
+static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
+ int *depth)
+{
+ unsigned int vlan_depth = skb->mac_len;
+
+ /* if type is 802.1Q/AD then the header should already be
+ * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
+ * ETH_HLEN otherwise
+ */
+ if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
+ if (vlan_depth) {
+ if (WARN_ON(vlan_depth < VLAN_HLEN))
+ return 0;
+ vlan_depth -= VLAN_HLEN;
+ } else {
+ vlan_depth = ETH_HLEN;
+ }
+ do {
+ struct vlan_hdr *vh;
+
+ if (unlikely(!pskb_may_pull(skb,
+ vlan_depth + VLAN_HLEN)))
+ return 0;
+
+ vh = (struct vlan_hdr *)(skb->data + vlan_depth);
+ type = vh->h_vlan_encapsulated_proto;
+ vlan_depth += VLAN_HLEN;
+ } while (type == htons(ETH_P_8021Q) ||
+ type == htons(ETH_P_8021AD));
+ }
+
+ if (depth)
+ *depth = vlan_depth;
+
+ return type;
+}
+
+/**
+ * vlan_get_protocol - get protocol EtherType.
+ * @skb: skbuff to query
+ *
+ * Returns the EtherType of the packet, regardless of whether it is
+ * vlan encapsulated (normal or hardware accelerated) or not.
+ */
+static inline __be16 vlan_get_protocol(struct sk_buff *skb)
+{
+ return __vlan_get_protocol(skb, skb->protocol, NULL);
+}
+
+static inline void vlan_set_encap_proto(struct sk_buff *skb,
+ struct vlan_hdr *vhdr)
+{
+ __be16 proto;
+ unsigned short *rawp;
+
+ /*
+ * Was a VLAN packet, grab the encapsulated protocol, which the layer
+ * three protocols care about.
+ */
+
+ proto = vhdr->h_vlan_encapsulated_proto;
+ if (ntohs(proto) >= ETH_P_802_3_MIN) {
+ skb->protocol = proto;
+ return;
+ }
+
+ rawp = (unsigned short *)(vhdr + 1);
+ if (*rawp == 0xFFFF)
+ /*
+ * This is a magic hack to spot IPX packets. Older Novell
+ * breaks the protocol design and runs IPX over 802.3 without
+ * an 802.2 LLC layer. We look for FFFF which isn't a used
+ * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
+ * but does for the rest.
+ */
+ skb->protocol = htons(ETH_P_802_3);
+ else
+ /*
+ * Real 802.2 LLC
+ */
+ skb->protocol = htons(ETH_P_802_2);
+}
+
+/**
+ * skb_vlan_tagged - check if skb is vlan tagged.
+ * @skb: skbuff to query
+ *
+ * Returns true if the skb is tagged, regardless of whether it is hardware
+ * accelerated or not.
+ */
+static inline bool skb_vlan_tagged(const struct sk_buff *skb)
+{
+ if (!skb_vlan_tag_present(skb) &&
+ likely(skb->protocol != htons(ETH_P_8021Q) &&
+ skb->protocol != htons(ETH_P_8021AD)))
+ return false;
+
+ return true;
+}
+
+/**
+ * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers.
+ * @skb: skbuff to query
+ *
+ * Returns true if the skb is tagged with multiple vlan headers, regardless
+ * of whether it is hardware accelerated or not.
+ */
+static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+{
+ __be16 protocol = skb->protocol;
+
+ if (!skb_vlan_tag_present(skb)) {
+ struct vlan_ethhdr *veh;
+
+ if (likely(protocol != htons(ETH_P_8021Q) &&
+ protocol != htons(ETH_P_8021AD)))
+ return false;
+
+ veh = (struct vlan_ethhdr *)skb->data;
+ protocol = veh->h_vlan_encapsulated_proto;
+ }
+
+ if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD))
+ return false;
+
+ return true;
+}
+
+/**
+ * vlan_features_check - drop unsafe features for skb with multiple tags.
+ * @skb: skbuff to query
+ * @features: features to be checked
+ *
+ * Returns features without unsafe ones if the skb has multiple tags.
+ */
+static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
+ netdev_features_t features)
+{
+ if (skb_vlan_tagged_multi(skb))
+ features = netdev_intersect_features(features,
+ NETIF_F_SG |
+ NETIF_F_HIGHDMA |
+ NETIF_F_FRAGLIST |
+ NETIF_F_GEN_CSUM |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
+
+ return features;
+}
+
+#endif /* !(_LINUX_IF_VLAN_H_) */
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
new file mode 100644
index 000000000..2c677afee
--- /dev/null
+++ b/include/linux/igmp.h
@@ -0,0 +1,134 @@
+/*
+ * Linux NET3: Internet Group Management Protocol [IGMP]
+ *
+ * Authors:
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * Extended to talk the BSD extended IGMP protocol of mrouted 3.6
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IGMP_H
+#define _LINUX_IGMP_H
+
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/in.h>
+#include <uapi/linux/igmp.h>
+
+static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb)
+{
+ return (struct igmphdr *)skb_transport_header(skb);
+}
+
+static inline struct igmpv3_report *
+ igmpv3_report_hdr(const struct sk_buff *skb)
+{
+ return (struct igmpv3_report *)skb_transport_header(skb);
+}
+
+static inline struct igmpv3_query *
+ igmpv3_query_hdr(const struct sk_buff *skb)
+{
+ return (struct igmpv3_query *)skb_transport_header(skb);
+}
+
+extern int sysctl_igmp_max_memberships;
+extern int sysctl_igmp_max_msf;
+extern int sysctl_igmp_qrv;
+
+struct ip_sf_socklist {
+ unsigned int sl_max;
+ unsigned int sl_count;
+ struct rcu_head rcu;
+ __be32 sl_addr[0];
+};
+
+#define IP_SFLSIZE(count) (sizeof(struct ip_sf_socklist) + \
+ (count) * sizeof(__be32))
+
+#define IP_SFBLOCK 10 /* allocate this many at once */
+
+/* ip_mc_socklist is real list now. Speed is not argument;
+ this list never used in fast path code
+ */
+
+struct ip_mc_socklist {
+ struct ip_mc_socklist __rcu *next_rcu;
+ struct ip_mreqn multi;
+ unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */
+ struct ip_sf_socklist __rcu *sflist;
+ struct rcu_head rcu;
+};
+
+struct ip_sf_list {
+ struct ip_sf_list *sf_next;
+ __be32 sf_inaddr;
+ unsigned long sf_count[2]; /* include/exclude counts */
+ unsigned char sf_gsresp; /* include in g & s response? */
+ unsigned char sf_oldin; /* change state */
+ unsigned char sf_crcount; /* retrans. left to send */
+};
+
+struct ip_mc_list {
+ struct in_device *interface;
+ __be32 multiaddr;
+ unsigned int sfmode;
+ struct ip_sf_list *sources;
+ struct ip_sf_list *tomb;
+ unsigned long sfcount[2];
+ union {
+ struct ip_mc_list *next;
+ struct ip_mc_list __rcu *next_rcu;
+ };
+ struct ip_mc_list __rcu *next_hash;
+ struct timer_list timer;
+ int users;
+ atomic_t refcnt;
+ spinlock_t lock;
+ char tm_running;
+ char reporter;
+ char unsolicit_count;
+ char loaded;
+ unsigned char gsquery; /* check source marks? */
+ unsigned char crcount;
+ struct rcu_head rcu;
+};
+
+/* V3 exponential field decoding */
+#define IGMPV3_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value))
+#define IGMPV3_EXP(thresh, nbmant, nbexp, value) \
+ ((value) < (thresh) ? (value) : \
+ ((IGMPV3_MASK(value, nbmant) | (1<<(nbmant))) << \
+ (IGMPV3_MASK((value) >> (nbmant), nbexp) + (nbexp))))
+
+#define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value)
+#define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value)
+
+extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u16 proto);
+extern int igmp_rcv(struct sk_buff *);
+extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
+extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
+extern void ip_mc_drop_socket(struct sock *sk);
+extern int ip_mc_source(int add, int omode, struct sock *sk,
+ struct ip_mreq_source *mreqs, int ifindex);
+extern int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf,int ifindex);
+extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
+ struct ip_msfilter __user *optval, int __user *optlen);
+extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
+ struct group_filter __user *optval, int __user *optlen);
+extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, int dif);
+extern void ip_mc_init_dev(struct in_device *);
+extern void ip_mc_destroy_dev(struct in_device *);
+extern void ip_mc_up(struct in_device *);
+extern void ip_mc_down(struct in_device *);
+extern void ip_mc_unmap(struct in_device *);
+extern void ip_mc_remap(struct in_device *);
+extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
+extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
+
+#endif
diff --git a/include/linux/ihex.h b/include/linux/ihex.h
new file mode 100644
index 000000000..86713b058
--- /dev/null
+++ b/include/linux/ihex.h
@@ -0,0 +1,89 @@
+/*
+ * Compact binary representation of ihex records. Some devices need their
+ * firmware loaded in strange orders rather than a single big blob, but
+ * actually parsing ihex-as-text within the kernel seems silly. Thus,...
+ */
+
+#ifndef __LINUX_IHEX_H__
+#define __LINUX_IHEX_H__
+
+#include <linux/types.h>
+#include <linux/firmware.h>
+#include <linux/device.h>
+
+/* Intel HEX files actually limit the length to 256 bytes, but we have
+ drivers which would benefit from using separate records which are
+ longer than that, so we extend to 16 bits of length */
+struct ihex_binrec {
+ __be32 addr;
+ __be16 len;
+ uint8_t data[0];
+} __attribute__((packed));
+
+/* Find the next record, taking into account the 4-byte alignment */
+static inline const struct ihex_binrec *
+ihex_next_binrec(const struct ihex_binrec *rec)
+{
+ int next = ((be16_to_cpu(rec->len) + 5) & ~3) - 2;
+ rec = (void *)&rec->data[next];
+
+ return be16_to_cpu(rec->len) ? rec : NULL;
+}
+
+/* Check that ihex_next_binrec() won't take us off the end of the image... */
+static inline int ihex_validate_fw(const struct firmware *fw)
+{
+ const struct ihex_binrec *rec;
+ size_t ofs = 0;
+
+ while (ofs <= fw->size - sizeof(*rec)) {
+ rec = (void *)&fw->data[ofs];
+
+ /* Zero length marks end of records */
+ if (!be16_to_cpu(rec->len))
+ return 0;
+
+ /* Point to next record... */
+ ofs += (sizeof(*rec) + be16_to_cpu(rec->len) + 3) & ~3;
+ }
+ return -EINVAL;
+}
+
+/* Request firmware and validate it so that we can trust we won't
+ * run off the end while reading records... */
+static inline int request_ihex_firmware(const struct firmware **fw,
+ const char *fw_name,
+ struct device *dev)
+{
+ const struct firmware *lfw;
+ int ret;
+
+ ret = request_firmware(&lfw, fw_name, dev);
+ if (ret)
+ return ret;
+ ret = ihex_validate_fw(lfw);
+ if (ret) {
+ dev_err(dev, "Firmware \"%s\" not valid IHEX records\n",
+ fw_name);
+ release_firmware(lfw);
+ return ret;
+ }
+ *fw = lfw;
+ return 0;
+}
+#ifndef _LINUX_LIBRE_IHEX_H
+#define _LINUX_LIBRE_IHEX_H
+
+static inline int
+maybe_reject_ihex_firmware(const struct firmware **fw,
+ const char *name, struct device *device)
+{
+ if (strstr (name, NONFREE_FIRMWARE))
+ return reject_firmware(fw, name, device);
+ else
+ return request_ihex_firmware(fw, name, device);
+}
+
+#endif /* _LINUX_LIBRE_IHEX_H */
+
+#endif /* __LINUX_IHEX_H__ */
diff --git a/include/linux/iio/accel/kxcjk_1013.h b/include/linux/iio/accel/kxcjk_1013.h
new file mode 100644
index 000000000..fd1d540ea
--- /dev/null
+++ b/include/linux/iio/accel/kxcjk_1013.h
@@ -0,0 +1,22 @@
+/*
+ * KXCJK-1013 3-axis accelerometer Interface
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __IIO_KXCJK_1013_H__
+#define __IIO_KXCJK_1013_H__
+
+struct kxcjk_1013_platform_data {
+ bool active_high_intr;
+};
+
+#endif
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
new file mode 100644
index 000000000..e7fdec4db
--- /dev/null
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -0,0 +1,173 @@
+/*
+ * Support code for Analog Devices Sigma-Delta ADCs
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+#ifndef __AD_SIGMA_DELTA_H__
+#define __AD_SIGMA_DELTA_H__
+
+enum ad_sigma_delta_mode {
+ AD_SD_MODE_CONTINUOUS = 0,
+ AD_SD_MODE_SINGLE = 1,
+ AD_SD_MODE_IDLE = 2,
+ AD_SD_MODE_POWERDOWN = 3,
+};
+
+/**
+ * struct ad_sigma_delta_calib_data - Calibration data for Sigma Delta devices
+ * @mode: Calibration mode.
+ * @channel: Calibration channel.
+ */
+struct ad_sd_calib_data {
+ unsigned int mode;
+ unsigned int channel;
+};
+
+struct ad_sigma_delta;
+struct iio_dev;
+
+/**
+ * struct ad_sigma_delta_info - Sigma Delta driver specific callbacks and options
+ * @set_channel: Will be called to select the current channel, may be NULL.
+ * @set_mode: Will be called to select the current mode, may be NULL.
+ * @postprocess_sample: Is called for each sampled data word, can be used to
+ * modify or drop the sample data, it, may be NULL.
+ * @has_registers: true if the device has writable and readable registers, false
+ * if there is just one read-only sample data shift register.
+ * @addr_shift: Shift of the register address in the communications register.
+ * @read_mask: Mask for the communications register having the read bit set.
+ */
+struct ad_sigma_delta_info {
+ int (*set_channel)(struct ad_sigma_delta *, unsigned int channel);
+ int (*set_mode)(struct ad_sigma_delta *, enum ad_sigma_delta_mode mode);
+ int (*postprocess_sample)(struct ad_sigma_delta *, unsigned int raw_sample);
+ bool has_registers;
+ unsigned int addr_shift;
+ unsigned int read_mask;
+};
+
+/**
+ * struct ad_sigma_delta - Sigma Delta device struct
+ * @spi: The spi device associated with the Sigma Delta device.
+ * @trig: The IIO trigger associated with the Sigma Delta device.
+ *
+ * Most of the fields are private to the sigma delta library code and should not
+ * be accessed by individual drivers.
+ */
+struct ad_sigma_delta {
+ struct spi_device *spi;
+ struct iio_trigger *trig;
+
+/* private: */
+ struct completion completion;
+ bool irq_dis;
+
+ bool bus_locked;
+
+ uint8_t comm;
+
+ const struct ad_sigma_delta_info *info;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ uint8_t data[4] ____cacheline_aligned;
+};
+
+static inline int ad_sigma_delta_set_channel(struct ad_sigma_delta *sd,
+ unsigned int channel)
+{
+ if (sd->info->set_channel)
+ return sd->info->set_channel(sd, channel);
+
+ return 0;
+}
+
+static inline int ad_sigma_delta_set_mode(struct ad_sigma_delta *sd,
+ unsigned int mode)
+{
+ if (sd->info->set_mode)
+ return sd->info->set_mode(sd, mode);
+
+ return 0;
+}
+
+static inline int ad_sigma_delta_postprocess_sample(struct ad_sigma_delta *sd,
+ unsigned int raw_sample)
+{
+ if (sd->info->postprocess_sample)
+ return sd->info->postprocess_sample(sd, raw_sample);
+
+ return 0;
+}
+
+void ad_sd_set_comm(struct ad_sigma_delta *sigma_delta, uint8_t comm);
+int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
+ unsigned int size, unsigned int val);
+int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
+ unsigned int size, unsigned int *val);
+
+int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val);
+int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
+ const struct ad_sd_calib_data *cd, unsigned int n);
+int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev,
+ struct spi_device *spi, const struct ad_sigma_delta_info *info);
+
+int ad_sd_setup_buffer_and_trigger(struct iio_dev *indio_dev);
+void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev);
+
+int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig);
+
+#define __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift, _extend_name, _type) \
+ { \
+ .type = (_type), \
+ .differential = (_channel2 == -1 ? 0 : 1), \
+ .indexed = 1, \
+ .channel = (_channel1), \
+ .channel2 = (_channel2), \
+ .address = (_address), \
+ .extend_name = (_extend_name), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = (_storagebits), \
+ .shift = (_shift), \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define AD_SD_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift) \
+ __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_VOLTAGE)
+
+#define AD_SD_SHORTED_CHANNEL(_si, _channel, _address, _bits, \
+ _storagebits, _shift) \
+ __AD_SD_CHANNEL(_si, _channel, _channel, _address, _bits, \
+ _storagebits, _shift, "shorted", IIO_VOLTAGE)
+
+#define AD_SD_CHANNEL(_si, _channel, _address, _bits, \
+ _storagebits, _shift) \
+ __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_VOLTAGE)
+
+#define AD_SD_TEMP_CHANNEL(_si, _address, _bits, _storagebits, _shift) \
+ __AD_SD_CHANNEL(_si, 0, -1, _address, _bits, \
+ _storagebits, _shift, NULL, IIO_TEMP)
+
+#define AD_SD_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \
+ _shift) \
+ __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \
+ _storagebits, _shift, "supply", IIO_VOLTAGE)
+
+#endif
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
new file mode 100644
index 000000000..eb8622b78
--- /dev/null
+++ b/include/linux/iio/buffer.h
@@ -0,0 +1,180 @@
+/* The industrial I/O core - generic buffer interfaces.
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _IIO_BUFFER_GENERIC_H_
+#define _IIO_BUFFER_GENERIC_H_
+#include <linux/sysfs.h>
+#include <linux/iio/iio.h>
+#include <linux/kref.h>
+
+#ifdef CONFIG_IIO_BUFFER
+
+struct iio_buffer;
+
+/**
+ * struct iio_buffer_access_funcs - access functions for buffers.
+ * @store_to: actually store stuff to the buffer
+ * @read_first_n: try to get a specified number of bytes (must exist)
+ * @data_available: indicates how much data is available for reading from
+ * the buffer.
+ * @request_update: if a parameter change has been marked, update underlying
+ * storage.
+ * @set_bytes_per_datum:set number of bytes per datum
+ * @set_length: set number of datums in buffer
+ * @release: called when the last reference to the buffer is dropped,
+ * should free all resources allocated by the buffer.
+ *
+ * The purpose of this structure is to make the buffer element
+ * modular as event for a given driver, different usecases may require
+ * different buffer designs (space efficiency vs speed for example).
+ *
+ * It is worth noting that a given buffer implementation may only support a
+ * small proportion of these functions. The core code 'should' cope fine with
+ * any of them not existing.
+ **/
+struct iio_buffer_access_funcs {
+ int (*store_to)(struct iio_buffer *buffer, const void *data);
+ int (*read_first_n)(struct iio_buffer *buffer,
+ size_t n,
+ char __user *buf);
+ size_t (*data_available)(struct iio_buffer *buffer);
+
+ int (*request_update)(struct iio_buffer *buffer);
+
+ int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
+ int (*set_length)(struct iio_buffer *buffer, int length);
+
+ void (*release)(struct iio_buffer *buffer);
+};
+
+/**
+ * struct iio_buffer - general buffer structure
+ * @length: [DEVICE] number of datums in buffer
+ * @bytes_per_datum: [DEVICE] size of individual datum including timestamp
+ * @scan_el_attrs: [DRIVER] control of scan elements if that scan mode
+ * control method is used
+ * @scan_mask: [INTERN] bitmask used in masking scan mode elements
+ * @scan_timestamp: [INTERN] does the scan mode include a timestamp
+ * @access: [DRIVER] buffer access functions associated with the
+ * implementation.
+ * @scan_el_dev_attr_list:[INTERN] list of scan element related attributes.
+ * @scan_el_group: [DRIVER] attribute group for those attributes not
+ * created from the iio_chan_info array.
+ * @pollq: [INTERN] wait queue to allow for polling on the buffer.
+ * @stufftoread: [INTERN] flag to indicate new data.
+ * @demux_list: [INTERN] list of operations required to demux the scan.
+ * @demux_bounce: [INTERN] buffer for doing gather from incoming scan.
+ * @buffer_list: [INTERN] entry in the devices list of current buffers.
+ * @ref: [INTERN] reference count of the buffer.
+ * @watermark: [INTERN] number of datums to wait for poll/read.
+ */
+struct iio_buffer {
+ int length;
+ int bytes_per_datum;
+ struct attribute_group *scan_el_attrs;
+ long *scan_mask;
+ bool scan_timestamp;
+ const struct iio_buffer_access_funcs *access;
+ struct list_head scan_el_dev_attr_list;
+ struct attribute_group buffer_group;
+ struct attribute_group scan_el_group;
+ wait_queue_head_t pollq;
+ bool stufftoread;
+ const struct attribute **attrs;
+ struct list_head demux_list;
+ void *demux_bounce;
+ struct list_head buffer_list;
+ struct kref ref;
+ unsigned int watermark;
+};
+
+/**
+ * iio_update_buffers() - add or remove buffer from active list
+ * @indio_dev: device to add buffer to
+ * @insert_buffer: buffer to insert
+ * @remove_buffer: buffer_to_remove
+ *
+ * Note this will tear down the all buffering and build it up again
+ */
+int iio_update_buffers(struct iio_dev *indio_dev,
+ struct iio_buffer *insert_buffer,
+ struct iio_buffer *remove_buffer);
+
+/**
+ * iio_buffer_init() - Initialize the buffer structure
+ * @buffer: buffer to be initialized
+ **/
+void iio_buffer_init(struct iio_buffer *buffer);
+
+int iio_scan_mask_query(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer, int bit);
+
+/**
+ * iio_push_to_buffers() - push to a registered buffer.
+ * @indio_dev: iio_dev structure for device.
+ * @data: Full scan.
+ */
+int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data);
+
+/*
+ * iio_push_to_buffers_with_timestamp() - push data and timestamp to buffers
+ * @indio_dev: iio_dev structure for device.
+ * @data: sample data
+ * @timestamp: timestamp for the sample data
+ *
+ * Pushes data to the IIO device's buffers. If timestamps are enabled for the
+ * device the function will store the supplied timestamp as the last element in
+ * the sample data buffer before pushing it to the device buffers. The sample
+ * data buffer needs to be large enough to hold the additional timestamp
+ * (usually the buffer should be indio->scan_bytes bytes large).
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev,
+ void *data, int64_t timestamp)
+{
+ if (indio_dev->scan_timestamp) {
+ size_t ts_offset = indio_dev->scan_bytes / sizeof(int64_t) - 1;
+ ((int64_t *)data)[ts_offset] = timestamp;
+ }
+
+ return iio_push_to_buffers(indio_dev, data);
+}
+
+int iio_update_demux(struct iio_dev *indio_dev);
+
+bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
+ const unsigned long *mask);
+
+struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer);
+void iio_buffer_put(struct iio_buffer *buffer);
+
+/**
+ * iio_device_attach_buffer - Attach a buffer to a IIO device
+ * @indio_dev: The device the buffer should be attached to
+ * @buffer: The buffer to attach to the device
+ *
+ * This function attaches a buffer to a IIO device. The buffer stays attached to
+ * the device until the device is freed. The function should only be called at
+ * most once per device.
+ */
+static inline void iio_device_attach_buffer(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer)
+{
+ indio_dev->buffer = iio_buffer_get(buffer);
+}
+
+#else /* CONFIG_IIO_BUFFER */
+
+static inline void iio_buffer_get(struct iio_buffer *buffer) {}
+static inline void iio_buffer_put(struct iio_buffer *buffer) {}
+
+#endif /* CONFIG_IIO_BUFFER */
+
+#endif /* _IIO_BUFFER_GENERIC_H_ */
diff --git a/include/linux/iio/common/ssp_sensors.h b/include/linux/iio/common/ssp_sensors.h
new file mode 100644
index 000000000..f4d1b0edb
--- /dev/null
+++ b/include/linux/iio/common/ssp_sensors.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2014, Samsung Electronics Co. Ltd. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _SSP_SENSORS_H_
+#define _SSP_SENSORS_H_
+
+#include <linux/iio/iio.h>
+
+#define SSP_TIME_SIZE 4
+#define SSP_ACCELEROMETER_SIZE 6
+#define SSP_GYROSCOPE_SIZE 6
+#define SSP_BIO_HRM_RAW_SIZE 8
+#define SSP_BIO_HRM_RAW_FAC_SIZE 36
+#define SSP_BIO_HRM_LIB_SIZE 8
+
+/**
+ * enum ssp_sensor_type - SSP sensor type
+ */
+enum ssp_sensor_type {
+ SSP_ACCELEROMETER_SENSOR = 0,
+ SSP_GYROSCOPE_SENSOR,
+ SSP_GEOMAGNETIC_UNCALIB_SENSOR,
+ SSP_GEOMAGNETIC_RAW,
+ SSP_GEOMAGNETIC_SENSOR,
+ SSP_PRESSURE_SENSOR,
+ SSP_GESTURE_SENSOR,
+ SSP_PROXIMITY_SENSOR,
+ SSP_TEMPERATURE_HUMIDITY_SENSOR,
+ SSP_LIGHT_SENSOR,
+ SSP_PROXIMITY_RAW,
+ SSP_ORIENTATION_SENSOR,
+ SSP_STEP_DETECTOR,
+ SSP_SIG_MOTION_SENSOR,
+ SSP_GYRO_UNCALIB_SENSOR,
+ SSP_GAME_ROTATION_VECTOR,
+ SSP_ROTATION_VECTOR,
+ SSP_STEP_COUNTER,
+ SSP_BIO_HRM_RAW,
+ SSP_BIO_HRM_RAW_FAC,
+ SSP_BIO_HRM_LIB,
+ SSP_SENSOR_MAX,
+};
+
+struct ssp_data;
+
+/**
+ * struct ssp_sensor_data - Sensor object
+ * @process_data: Callback to feed sensor data.
+ * @type: Used sensor type.
+ * @buffer: Received data buffer.
+ */
+struct ssp_sensor_data {
+ int (*process_data)(struct iio_dev *indio_dev, void *buf,
+ int64_t timestamp);
+ enum ssp_sensor_type type;
+ u8 *buffer;
+};
+
+void ssp_register_consumer(struct iio_dev *indio_dev,
+ enum ssp_sensor_type type);
+
+int ssp_enable_sensor(struct ssp_data *data, enum ssp_sensor_type type,
+ u32 delay);
+
+int ssp_disable_sensor(struct ssp_data *data, enum ssp_sensor_type type);
+
+u32 ssp_get_sensor_delay(struct ssp_data *data, enum ssp_sensor_type);
+
+int ssp_change_delay(struct ssp_data *data, enum ssp_sensor_type type,
+ u32 delay);
+#endif /* _SSP_SENSORS_H_ */
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
new file mode 100644
index 000000000..2c476acb8
--- /dev/null
+++ b/include/linux/iio/common/st_sensors.h
@@ -0,0 +1,290 @@
+/*
+ * STMicroelectronics sensors library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_SENSORS_H
+#define ST_SENSORS_H
+
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <linux/irqreturn.h>
+#include <linux/iio/trigger.h>
+#include <linux/bitops.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/platform_data/st_sensors_pdata.h>
+
+#define ST_SENSORS_TX_MAX_LENGTH 2
+#define ST_SENSORS_RX_MAX_LENGTH 6
+
+#define ST_SENSORS_ODR_LIST_MAX 10
+#define ST_SENSORS_FULLSCALE_AVL_MAX 10
+
+#define ST_SENSORS_NUMBER_ALL_CHANNELS 4
+#define ST_SENSORS_ENABLE_ALL_AXIS 0x07
+#define ST_SENSORS_SCAN_X 0
+#define ST_SENSORS_SCAN_Y 1
+#define ST_SENSORS_SCAN_Z 2
+#define ST_SENSORS_DEFAULT_POWER_ON_VALUE 0x01
+#define ST_SENSORS_DEFAULT_POWER_OFF_VALUE 0x00
+#define ST_SENSORS_DEFAULT_WAI_ADDRESS 0x0f
+#define ST_SENSORS_DEFAULT_AXIS_ADDR 0x20
+#define ST_SENSORS_DEFAULT_AXIS_MASK 0x07
+#define ST_SENSORS_DEFAULT_AXIS_N_BIT 3
+
+#define ST_SENSORS_MAX_NAME 17
+#define ST_SENSORS_MAX_4WAI 7
+
+#define ST_SENSORS_LSM_CHANNELS(device_type, mask, index, mod, \
+ ch2, s, endian, rbits, sbits, addr) \
+{ \
+ .type = device_type, \
+ .modified = mod, \
+ .info_mask_separate = mask, \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = index, \
+ .channel2 = ch2, \
+ .address = addr, \
+ .scan_type = { \
+ .sign = s, \
+ .realbits = rbits, \
+ .shift = sbits - rbits, \
+ .storagebits = sbits, \
+ .endianness = endian, \
+ }, \
+}
+
+#define ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL() \
+ IIO_DEV_ATTR_SAMP_FREQ_AVAIL( \
+ st_sensors_sysfs_sampling_frequency_avail)
+
+#define ST_SENSORS_DEV_ATTR_SCALE_AVAIL(name) \
+ IIO_DEVICE_ATTR(name, S_IRUGO, \
+ st_sensors_sysfs_scale_avail, NULL , 0);
+
+struct st_sensor_odr_avl {
+ unsigned int hz;
+ u8 value;
+};
+
+struct st_sensor_odr {
+ u8 addr;
+ u8 mask;
+ struct st_sensor_odr_avl odr_avl[ST_SENSORS_ODR_LIST_MAX];
+};
+
+struct st_sensor_power {
+ u8 addr;
+ u8 mask;
+ u8 value_off;
+ u8 value_on;
+};
+
+struct st_sensor_axis {
+ u8 addr;
+ u8 mask;
+};
+
+struct st_sensor_fullscale_avl {
+ unsigned int num;
+ u8 value;
+ unsigned int gain;
+ unsigned int gain2;
+};
+
+struct st_sensor_fullscale {
+ u8 addr;
+ u8 mask;
+ struct st_sensor_fullscale_avl fs_avl[ST_SENSORS_FULLSCALE_AVL_MAX];
+};
+
+/**
+ * struct st_sensor_bdu - ST sensor device block data update
+ * @addr: address of the register.
+ * @mask: mask to write the block data update flag.
+ */
+struct st_sensor_bdu {
+ u8 addr;
+ u8 mask;
+};
+
+/**
+ * struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt
+ * @addr: address of the register.
+ * @mask_int1: mask to enable/disable IRQ on INT1 pin.
+ * @mask_int2: mask to enable/disable IRQ on INT2 pin.
+ * struct ig1 - represents the Interrupt Generator 1 of sensors.
+ * @en_addr: address of the enable ig1 register.
+ * @en_mask: mask to write the on/off value for enable.
+ */
+struct st_sensor_data_ready_irq {
+ u8 addr;
+ u8 mask_int1;
+ u8 mask_int2;
+ struct {
+ u8 en_addr;
+ u8 en_mask;
+ } ig1;
+};
+
+/**
+ * struct st_sensor_transfer_buffer - ST sensor device I/O buffer
+ * @buf_lock: Mutex to protect rx and tx buffers.
+ * @tx_buf: Buffer used by SPI transfer function to send data to the sensors.
+ * This buffer is used to avoid DMA not-aligned issue.
+ * @rx_buf: Buffer used by SPI transfer to receive data from sensors.
+ * This buffer is used to avoid DMA not-aligned issue.
+ */
+struct st_sensor_transfer_buffer {
+ struct mutex buf_lock;
+ u8 rx_buf[ST_SENSORS_RX_MAX_LENGTH];
+ u8 tx_buf[ST_SENSORS_TX_MAX_LENGTH] ____cacheline_aligned;
+};
+
+/**
+ * struct st_sensor_transfer_function - ST sensor device I/O function
+ * @read_byte: Function used to read one byte.
+ * @write_byte: Function used to write one byte.
+ * @read_multiple_byte: Function used to read multiple byte.
+ */
+struct st_sensor_transfer_function {
+ int (*read_byte) (struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, u8 *res_byte);
+ int (*write_byte) (struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, u8 data);
+ int (*read_multiple_byte) (struct st_sensor_transfer_buffer *tb,
+ struct device *dev, u8 reg_addr, int len, u8 *data,
+ bool multiread_bit);
+};
+
+/**
+ * struct st_sensor_settings - ST specific sensor settings
+ * @wai: Contents of WhoAmI register.
+ * @sensors_supported: List of supported sensors by struct itself.
+ * @ch: IIO channels for the sensor.
+ * @odr: Output data rate register and ODR list available.
+ * @pw: Power register of the sensor.
+ * @enable_axis: Enable one or more axis of the sensor.
+ * @fs: Full scale register and full scale list available.
+ * @bdu: Block data update register.
+ * @drdy_irq: Data ready register of the sensor.
+ * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
+ * @bootime: samples to discard when sensor passing from power-down to power-up.
+ */
+struct st_sensor_settings {
+ u8 wai;
+ char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME];
+ struct iio_chan_spec *ch;
+ int num_ch;
+ struct st_sensor_odr odr;
+ struct st_sensor_power pw;
+ struct st_sensor_axis enable_axis;
+ struct st_sensor_fullscale fs;
+ struct st_sensor_bdu bdu;
+ struct st_sensor_data_ready_irq drdy_irq;
+ bool multi_read_bit;
+ unsigned int bootime;
+};
+
+/**
+ * struct st_sensor_data - ST sensor device status
+ * @dev: Pointer to instance of struct device (I2C or SPI).
+ * @trig: The trigger in use by the core driver.
+ * @sensor_settings: Pointer to the specific sensor settings in use.
+ * @current_fullscale: Maximum range of measure by the sensor.
+ * @vdd: Pointer to sensor's Vdd power supply
+ * @vdd_io: Pointer to sensor's Vdd-IO power supply
+ * @enabled: Status of the sensor (false->off, true->on).
+ * @multiread_bit: Use or not particular bit for [I2C/SPI] multiread.
+ * @buffer_data: Data used by buffer part.
+ * @odr: Output data rate of the sensor [Hz].
+ * num_data_channels: Number of data channels used in buffer.
+ * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
+ * @get_irq_data_ready: Function to get the IRQ used for data ready signal.
+ * @tf: Transfer function structure used by I/O operations.
+ * @tb: Transfer buffers and mutex used by I/O operations.
+ */
+struct st_sensor_data {
+ struct device *dev;
+ struct iio_trigger *trig;
+ struct st_sensor_settings *sensor_settings;
+ struct st_sensor_fullscale_avl *current_fullscale;
+ struct regulator *vdd;
+ struct regulator *vdd_io;
+
+ bool enabled;
+ bool multiread_bit;
+
+ char *buffer_data;
+
+ unsigned int odr;
+ unsigned int num_data_channels;
+
+ u8 drdy_int_pin;
+
+ unsigned int (*get_irq_data_ready) (struct iio_dev *indio_dev);
+
+ const struct st_sensor_transfer_function *tf;
+ struct st_sensor_transfer_buffer tb;
+};
+
+#ifdef CONFIG_IIO_BUFFER
+irqreturn_t st_sensors_trigger_handler(int irq, void *p);
+
+int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf);
+#endif
+
+#ifdef CONFIG_IIO_TRIGGER
+int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
+ const struct iio_trigger_ops *trigger_ops);
+
+void st_sensors_deallocate_trigger(struct iio_dev *indio_dev);
+
+#else
+static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
+ const struct iio_trigger_ops *trigger_ops)
+{
+ return 0;
+}
+static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
+{
+ return;
+}
+#endif
+
+int st_sensors_init_sensor(struct iio_dev *indio_dev,
+ struct st_sensors_platform_data *pdata);
+
+int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable);
+
+int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable);
+
+void st_sensors_power_enable(struct iio_dev *indio_dev);
+
+void st_sensors_power_disable(struct iio_dev *indio_dev);
+
+int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr);
+
+int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable);
+
+int st_sensors_set_fullscale_by_gain(struct iio_dev *indio_dev, int scale);
+
+int st_sensors_read_info_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *ch, int *val);
+
+int st_sensors_check_device_support(struct iio_dev *indio_dev,
+ int num_sensors_list, const struct st_sensor_settings *sensor_settings);
+
+ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+#endif /* ST_SENSORS_H */
diff --git a/include/linux/iio/common/st_sensors_i2c.h b/include/linux/iio/common/st_sensors_i2c.h
new file mode 100644
index 000000000..1796af093
--- /dev/null
+++ b/include/linux/iio/common/st_sensors_i2c.h
@@ -0,0 +1,31 @@
+/*
+ * STMicroelectronics sensors i2c library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_SENSORS_I2C_H
+#define ST_SENSORS_I2C_H
+
+#include <linux/i2c.h>
+#include <linux/iio/common/st_sensors.h>
+#include <linux/of.h>
+
+void st_sensors_i2c_configure(struct iio_dev *indio_dev,
+ struct i2c_client *client, struct st_sensor_data *sdata);
+
+#ifdef CONFIG_OF
+void st_sensors_of_i2c_probe(struct i2c_client *client,
+ const struct of_device_id *match);
+#else
+static inline void st_sensors_of_i2c_probe(struct i2c_client *client,
+ const struct of_device_id *match)
+{
+}
+#endif
+
+#endif /* ST_SENSORS_I2C_H */
diff --git a/include/linux/iio/common/st_sensors_spi.h b/include/linux/iio/common/st_sensors_spi.h
new file mode 100644
index 000000000..d964a3563
--- /dev/null
+++ b/include/linux/iio/common/st_sensors_spi.h
@@ -0,0 +1,20 @@
+/*
+ * STMicroelectronics sensors spi library driver
+ *
+ * Copyright 2012-2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_SENSORS_SPI_H
+#define ST_SENSORS_SPI_H
+
+#include <linux/spi/spi.h>
+#include <linux/iio/common/st_sensors.h>
+
+void st_sensors_spi_configure(struct iio_dev *indio_dev,
+ struct spi_device *spi, struct st_sensor_data *sdata);
+
+#endif /* ST_SENSORS_SPI_H */
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
new file mode 100644
index 000000000..26fb8f634
--- /dev/null
+++ b/include/linux/iio/consumer.h
@@ -0,0 +1,209 @@
+/*
+ * Industrial I/O in kernel consumer interface
+ *
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef _IIO_INKERN_CONSUMER_H_
+#define _IIO_INKERN_CONSUMER_H_
+
+#include <linux/types.h>
+#include <linux/iio/types.h>
+
+struct iio_dev;
+struct iio_chan_spec;
+struct device;
+
+/**
+ * struct iio_channel - everything needed for a consumer to use a channel
+ * @indio_dev: Device on which the channel exists.
+ * @channel: Full description of the channel.
+ * @data: Data about the channel used by consumer.
+ */
+struct iio_channel {
+ struct iio_dev *indio_dev;
+ const struct iio_chan_spec *channel;
+ void *data;
+};
+
+/**
+ * iio_channel_get() - get description of all that is needed to access channel.
+ * @dev: Pointer to consumer device. Device name must match
+ * the name of the device as provided in the iio_map
+ * with which the desired provider to consumer mapping
+ * was registered.
+ * @consumer_channel: Unique name to identify the channel on the consumer
+ * side. This typically describes the channels use within
+ * the consumer. E.g. 'battery_voltage'
+ */
+struct iio_channel *iio_channel_get(struct device *dev,
+ const char *consumer_channel);
+
+/**
+ * iio_channel_release() - release channels obtained via iio_channel_get
+ * @chan: The channel to be released.
+ */
+void iio_channel_release(struct iio_channel *chan);
+
+/**
+ * iio_channel_get_all() - get all channels associated with a client
+ * @dev: Pointer to consumer device.
+ *
+ * Returns an array of iio_channel structures terminated with one with
+ * null iio_dev pointer.
+ * This function is used by fairly generic consumers to get all the
+ * channels registered as having this consumer.
+ */
+struct iio_channel *iio_channel_get_all(struct device *dev);
+
+/**
+ * iio_channel_release_all() - reverse iio_channel_get_all
+ * @chan: Array of channels to be released.
+ */
+void iio_channel_release_all(struct iio_channel *chan);
+
+struct iio_cb_buffer;
+/**
+ * iio_channel_get_all_cb() - register callback for triggered capture
+ * @dev: Pointer to client device.
+ * @cb: Callback function.
+ * @private: Private data passed to callback.
+ *
+ * NB right now we have no ability to mux data from multiple devices.
+ * So if the channels requested come from different devices this will
+ * fail.
+ */
+struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
+ int (*cb)(const void *data,
+ void *private),
+ void *private);
+/**
+ * iio_channel_release_all_cb() - release and unregister the callback.
+ * @cb_buffer: The callback buffer that was allocated.
+ */
+void iio_channel_release_all_cb(struct iio_cb_buffer *cb_buffer);
+
+/**
+ * iio_channel_start_all_cb() - start the flow of data through callback.
+ * @cb_buff: The callback buffer we are starting.
+ */
+int iio_channel_start_all_cb(struct iio_cb_buffer *cb_buff);
+
+/**
+ * iio_channel_stop_all_cb() - stop the flow of data through the callback.
+ * @cb_buff: The callback buffer we are stopping.
+ */
+void iio_channel_stop_all_cb(struct iio_cb_buffer *cb_buff);
+
+/**
+ * iio_channel_cb_get_channels() - get access to the underlying channels.
+ * @cb_buff: The callback buffer from whom we want the channel
+ * information.
+ *
+ * This function allows one to obtain information about the channels.
+ * Whilst this may allow direct reading if all buffers are disabled, the
+ * primary aim is to allow drivers that are consuming a channel to query
+ * things like scaling of the channel.
+ */
+struct iio_channel
+*iio_channel_cb_get_channels(const struct iio_cb_buffer *cb_buffer);
+
+/**
+ * iio_read_channel_raw() - read from a given channel
+ * @chan: The channel being queried.
+ * @val: Value read back.
+ *
+ * Note raw reads from iio channels are in adc counts and hence
+ * scale will need to be applied if standard units required.
+ */
+int iio_read_channel_raw(struct iio_channel *chan,
+ int *val);
+
+/**
+ * iio_read_channel_average_raw() - read from a given channel
+ * @chan: The channel being queried.
+ * @val: Value read back.
+ *
+ * Note raw reads from iio channels are in adc counts and hence
+ * scale will need to be applied if standard units required.
+ *
+ * In opposit to the normal iio_read_channel_raw this function
+ * returns the average of multiple reads.
+ */
+int iio_read_channel_average_raw(struct iio_channel *chan, int *val);
+
+/**
+ * iio_read_channel_processed() - read processed value from a given channel
+ * @chan: The channel being queried.
+ * @val: Value read back.
+ *
+ * Returns an error code or 0.
+ *
+ * This function will read a processed value from a channel. A processed value
+ * means that this value will have the correct unit and not some device internal
+ * representation. If the device does not support reporting a processed value
+ * the function will query the raw value and the channels scale and offset and
+ * do the appropriate transformation.
+ */
+int iio_read_channel_processed(struct iio_channel *chan, int *val);
+
+/**
+ * iio_write_channel_raw() - write to a given channel
+ * @chan: The channel being queried.
+ * @val: Value being written.
+ *
+ * Note raw writes to iio channels are in dac counts and hence
+ * scale will need to be applied if standard units required.
+ */
+int iio_write_channel_raw(struct iio_channel *chan, int val);
+
+/**
+ * iio_get_channel_type() - get the type of a channel
+ * @channel: The channel being queried.
+ * @type: The type of the channel.
+ *
+ * returns the enum iio_chan_type of the channel
+ */
+int iio_get_channel_type(struct iio_channel *channel,
+ enum iio_chan_type *type);
+
+/**
+ * iio_read_channel_scale() - read the scale value for a channel
+ * @chan: The channel being queried.
+ * @val: First part of value read back.
+ * @val2: Second part of value read back.
+ *
+ * Note returns a description of what is in val and val2, such
+ * as IIO_VAL_INT_PLUS_MICRO telling us we have a value of val
+ * + val2/1e6
+ */
+int iio_read_channel_scale(struct iio_channel *chan, int *val,
+ int *val2);
+
+/**
+ * iio_convert_raw_to_processed() - Converts a raw value to a processed value
+ * @chan: The channel being queried
+ * @raw: The raw IIO to convert
+ * @processed: The result of the conversion
+ * @scale: Scale factor to apply during the conversion
+ *
+ * Returns an error code or 0.
+ *
+ * This function converts a raw value to processed value for a specific channel.
+ * A raw value is the device internal representation of a sample and the value
+ * returned by iio_read_channel_raw, so the unit of that value is device
+ * depended. A processed value on the other hand is value has a normed unit
+ * according with the IIO specification.
+ *
+ * The scale factor allows to increase the precession of the returned value. For
+ * a scale factor of 1 the function will return the result in the normal IIO
+ * unit for the channel type. E.g. millivolt for voltage channels, if you want
+ * nanovolts instead pass 1000000 as the scale factor.
+ */
+int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
+ int *processed, unsigned int scale);
+
+#endif
diff --git a/include/linux/iio/dac/ad5421.h b/include/linux/iio/dac/ad5421.h
new file mode 100644
index 000000000..8fd8f057a
--- /dev/null
+++ b/include/linux/iio/dac/ad5421.h
@@ -0,0 +1,28 @@
+#ifndef __IIO_DAC_AD5421_H__
+#define __IIO_DAC_AD5421_H__
+
+/**
+ * enum ad5421_current_range - Current range the AD5421 is configured for.
+ * @AD5421_CURRENT_RANGE_4mA_20mA: 4 mA to 20 mA (RANGE1,0 pins = 00)
+ * @AD5421_CURRENT_RANGE_3mA8_21mA: 3.8 mA to 21 mA (RANGE1,0 pins = x1)
+ * @AD5421_CURRENT_RANGE_3mA2_24mA: 3.2 mA to 24 mA (RANGE1,0 pins = 10)
+ */
+
+enum ad5421_current_range {
+ AD5421_CURRENT_RANGE_4mA_20mA,
+ AD5421_CURRENT_RANGE_3mA8_21mA,
+ AD5421_CURRENT_RANGE_3mA2_24mA,
+};
+
+/**
+ * struct ad5421_platform_data - AD5421 DAC driver platform data
+ * @external_vref: whether an external reference voltage is used or not
+ * @current_range: Current range the AD5421 is configured for
+ */
+
+struct ad5421_platform_data {
+ bool external_vref;
+ enum ad5421_current_range current_range;
+};
+
+#endif
diff --git a/include/linux/iio/dac/ad5504.h b/include/linux/iio/dac/ad5504.h
new file mode 100644
index 000000000..43895376a
--- /dev/null
+++ b/include/linux/iio/dac/ad5504.h
@@ -0,0 +1,16 @@
+/*
+ * AD5504 SPI DAC driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef SPI_AD5504_H_
+#define SPI_AD5504_H_
+
+struct ad5504_platform_data {
+ u16 vref_mv;
+};
+
+#endif /* SPI_AD5504_H_ */
diff --git a/include/linux/iio/dac/ad5791.h b/include/linux/iio/dac/ad5791.h
new file mode 100644
index 000000000..45ee281c6
--- /dev/null
+++ b/include/linux/iio/dac/ad5791.h
@@ -0,0 +1,25 @@
+/*
+ * AD5791 SPI DAC driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef SPI_AD5791_H_
+#define SPI_AD5791_H_
+
+/**
+ * struct ad5791_platform_data - platform specific information
+ * @vref_pos_mv: Vdd Positive Analog Supply Volatge (mV)
+ * @vref_neg_mv: Vdd Negative Analog Supply Volatge (mV)
+ * @use_rbuf_gain2: ext. amplifier connected in gain of two configuration
+ */
+
+struct ad5791_platform_data {
+ u16 vref_pos_mv;
+ u16 vref_neg_mv;
+ bool use_rbuf_gain2;
+};
+
+#endif /* SPI_AD5791_H_ */
diff --git a/include/linux/iio/dac/max517.h b/include/linux/iio/dac/max517.h
new file mode 100644
index 000000000..7668716cd
--- /dev/null
+++ b/include/linux/iio/dac/max517.h
@@ -0,0 +1,15 @@
+/*
+ * MAX517 DAC driver
+ *
+ * Copyright 2011 Roland Stigge <stigge@antcom.de>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+#ifndef IIO_DAC_MAX517_H_
+#define IIO_DAC_MAX517_H_
+
+struct max517_platform_data {
+ u16 vref_mv[8];
+};
+
+#endif /* IIO_DAC_MAX517_H_ */
diff --git a/include/linux/iio/dac/mcp4725.h b/include/linux/iio/dac/mcp4725.h
new file mode 100644
index 000000000..91530e661
--- /dev/null
+++ b/include/linux/iio/dac/mcp4725.h
@@ -0,0 +1,16 @@
+/*
+ * MCP4725 DAC driver
+ *
+ * Copyright (C) 2012 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef IIO_DAC_MCP4725_H_
+#define IIO_DAC_MCP4725_H_
+
+struct mcp4725_platform_data {
+ u16 vref_mv;
+};
+
+#endif /* IIO_DAC_MCP4725_H_ */
diff --git a/include/linux/iio/driver.h b/include/linux/iio/driver.h
new file mode 100644
index 000000000..7dfb10ee2
--- /dev/null
+++ b/include/linux/iio/driver.h
@@ -0,0 +1,31 @@
+/*
+ * Industrial I/O in kernel access map interface.
+ *
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _IIO_INKERN_H_
+#define _IIO_INKERN_H_
+
+struct iio_map;
+
+/**
+ * iio_map_array_register() - tell the core about inkernel consumers
+ * @indio_dev: provider device
+ * @map: array of mappings specifying association of channel with client
+ */
+int iio_map_array_register(struct iio_dev *indio_dev,
+ struct iio_map *map);
+
+/**
+ * iio_map_array_unregister() - tell the core to remove consumer mappings for
+ * the given provider device
+ * @indio_dev: provider device
+ */
+int iio_map_array_unregister(struct iio_dev *indio_dev);
+
+#endif
diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h
new file mode 100644
index 000000000..8ad87d1c5
--- /dev/null
+++ b/include/linux/iio/events.h
@@ -0,0 +1,59 @@
+/* The industrial I/O - event passing to userspace
+ *
+ * Copyright (c) 2008-2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef _IIO_EVENTS_H_
+#define _IIO_EVENTS_H_
+
+#include <linux/iio/types.h>
+#include <uapi/linux/iio/events.h>
+
+/**
+ * IIO_EVENT_CODE() - create event identifier
+ * @chan_type: Type of the channel. Should be one of enum iio_chan_type.
+ * @diff: Whether the event is for an differential channel or not.
+ * @modifier: Modifier for the channel. Should be one of enum iio_modifier.
+ * @direction: Direction of the event. One of enum iio_event_direction.
+ * @type: Type of the event. Should be one of enum iio_event_type.
+ * @chan: Channel number for non-differential channels.
+ * @chan1: First channel number for differential channels.
+ * @chan2: Second channel number for differential channels.
+ */
+
+#define IIO_EVENT_CODE(chan_type, diff, modifier, direction, \
+ type, chan, chan1, chan2) \
+ (((u64)type << 56) | ((u64)diff << 55) | \
+ ((u64)direction << 48) | ((u64)modifier << 40) | \
+ ((u64)chan_type << 32) | (((u16)chan2) << 16) | ((u16)chan1) | \
+ ((u16)chan))
+
+
+/**
+ * IIO_MOD_EVENT_CODE() - create event identifier for modified channels
+ * @chan_type: Type of the channel. Should be one of enum iio_chan_type.
+ * @number: Channel number.
+ * @modifier: Modifier for the channel. Should be one of enum iio_modifier.
+ * @type: Type of the event. Should be one of enum iio_event_type.
+ * @direction: Direction of the event. One of enum iio_event_direction.
+ */
+
+#define IIO_MOD_EVENT_CODE(chan_type, number, modifier, \
+ type, direction) \
+ IIO_EVENT_CODE(chan_type, 0, modifier, direction, type, number, 0, 0)
+
+/**
+ * IIO_UNMOD_EVENT_CODE() - create event identifier for unmodified channels
+ * @chan_type: Type of the channel. Should be one of enum iio_chan_type.
+ * @number: Channel number.
+ * @type: Type of the event. Should be one of enum iio_event_type.
+ * @direction: Direction of the event. One of enum iio_event_direction.
+ */
+
+#define IIO_UNMOD_EVENT_CODE(chan_type, number, type, direction) \
+ IIO_EVENT_CODE(chan_type, 0, 0, direction, type, number, 0, 0)
+
+#endif
diff --git a/include/linux/iio/frequency/ad9523.h b/include/linux/iio/frequency/ad9523.h
new file mode 100644
index 000000000..12ce3ee42
--- /dev/null
+++ b/include/linux/iio/frequency/ad9523.h
@@ -0,0 +1,195 @@
+/*
+ * AD9523 SPI Low Jitter Clock Generator
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef IIO_FREQUENCY_AD9523_H_
+#define IIO_FREQUENCY_AD9523_H_
+
+enum outp_drv_mode {
+ TRISTATE,
+ LVPECL_8mA,
+ LVDS_4mA,
+ LVDS_7mA,
+ HSTL0_16mA,
+ HSTL1_8mA,
+ CMOS_CONF1,
+ CMOS_CONF2,
+ CMOS_CONF3,
+ CMOS_CONF4,
+ CMOS_CONF5,
+ CMOS_CONF6,
+ CMOS_CONF7,
+ CMOS_CONF8,
+ CMOS_CONF9
+};
+
+enum ref_sel_mode {
+ NONEREVERTIVE_STAY_ON_REFB,
+ REVERT_TO_REFA,
+ SELECT_REFA,
+ SELECT_REFB,
+ EXT_REF_SEL
+};
+
+/**
+ * struct ad9523_channel_spec - Output channel configuration
+ *
+ * @channel_num: Output channel number.
+ * @divider_output_invert_en: Invert the polarity of the output clock.
+ * @sync_ignore_en: Ignore chip-level SYNC signal.
+ * @low_power_mode_en: Reduce power used in the differential output modes.
+ * @use_alt_clock_src: Channel divider uses alternative clk source.
+ * @output_dis: Disables, powers down the entire channel.
+ * @driver_mode: Output driver mode (logic level family).
+ * @divider_phase: Divider initial phase after a SYNC. Range 0..63
+ LSB = 1/2 of a period of the divider input clock.
+ * @channel_divider: 10-bit channel divider.
+ * @extended_name: Optional descriptive channel name.
+ */
+
+struct ad9523_channel_spec {
+ unsigned channel_num;
+ bool divider_output_invert_en;
+ bool sync_ignore_en;
+ bool low_power_mode_en;
+ /* CH0..CH3 VCXO, CH4..CH9 VCO2 */
+ bool use_alt_clock_src;
+ bool output_dis;
+ enum outp_drv_mode driver_mode;
+ unsigned char divider_phase;
+ unsigned short channel_divider;
+ char extended_name[16];
+};
+
+enum pll1_rzero_resistor {
+ RZERO_883_OHM,
+ RZERO_677_OHM,
+ RZERO_341_OHM,
+ RZERO_135_OHM,
+ RZERO_10_OHM,
+ RZERO_USE_EXT_RES = 8,
+};
+
+enum rpole2_resistor {
+ RPOLE2_900_OHM,
+ RPOLE2_450_OHM,
+ RPOLE2_300_OHM,
+ RPOLE2_225_OHM,
+};
+
+enum rzero_resistor {
+ RZERO_3250_OHM,
+ RZERO_2750_OHM,
+ RZERO_2250_OHM,
+ RZERO_2100_OHM,
+ RZERO_3000_OHM,
+ RZERO_2500_OHM,
+ RZERO_2000_OHM,
+ RZERO_1850_OHM,
+};
+
+enum cpole1_capacitor {
+ CPOLE1_0_PF,
+ CPOLE1_8_PF,
+ CPOLE1_16_PF,
+ CPOLE1_24_PF,
+ _CPOLE1_24_PF, /* place holder */
+ CPOLE1_32_PF,
+ CPOLE1_40_PF,
+ CPOLE1_48_PF,
+};
+
+/**
+ * struct ad9523_platform_data - platform specific information
+ *
+ * @vcxo_freq: External VCXO frequency in Hz
+ * @refa_diff_rcv_en: REFA differential/single-ended input selection.
+ * @refb_diff_rcv_en: REFB differential/single-ended input selection.
+ * @zd_in_diff_en: Zero Delay differential/single-ended input selection.
+ * @osc_in_diff_en: OSC differential/ single-ended input selection.
+ * @refa_cmos_neg_inp_en: REFA single-ended neg./pos. input enable.
+ * @refb_cmos_neg_inp_en: REFB single-ended neg./pos. input enable.
+ * @zd_in_cmos_neg_inp_en: Zero Delay single-ended neg./pos. input enable.
+ * @osc_in_cmos_neg_inp_en: OSC single-ended neg./pos. input enable.
+ * @refa_r_div: PLL1 10-bit REFA R divider.
+ * @refb_r_div: PLL1 10-bit REFB R divider.
+ * @pll1_feedback_div: PLL1 10-bit Feedback N divider.
+ * @pll1_charge_pump_current_nA: Magnitude of PLL1 charge pump current (nA).
+ * @zero_delay_mode_internal_en: Internal, external Zero Delay mode selection.
+ * @osc_in_feedback_en: PLL1 feedback path, local feedback from
+ * the OSC_IN receiver or zero delay mode
+ * @pll1_loop_filter_rzero: PLL1 Loop Filter Zero Resistor selection.
+ * @ref_mode: Reference selection mode.
+ * @pll2_charge_pump_current_nA: Magnitude of PLL2 charge pump current (nA).
+ * @pll2_ndiv_a_cnt: PLL2 Feedback N-divider, A Counter, range 0..4.
+ * @pll2_ndiv_b_cnt: PLL2 Feedback N-divider, B Counter, range 0..63.
+ * @pll2_freq_doubler_en: PLL2 frequency doubler enable.
+ * @pll2_r2_div: PLL2 R2 divider, range 0..31.
+ * @pll2_vco_diff_m1: VCO1 divider, range 3..5.
+ * @pll2_vco_diff_m2: VCO2 divider, range 3..5.
+ * @rpole2: PLL2 loop filter Rpole resistor value.
+ * @rzero: PLL2 loop filter Rzero resistor value.
+ * @cpole1: PLL2 loop filter Cpole capacitor value.
+ * @rzero_bypass_en: PLL2 loop filter Rzero bypass enable.
+ * @num_channels: Array size of struct ad9523_channel_spec.
+ * @channels: Pointer to channel array.
+ * @name: Optional alternative iio device name.
+ */
+
+struct ad9523_platform_data {
+ unsigned long vcxo_freq;
+
+ /* Differential/ Single-Ended Input Configuration */
+ bool refa_diff_rcv_en;
+ bool refb_diff_rcv_en;
+ bool zd_in_diff_en;
+ bool osc_in_diff_en;
+
+ /*
+ * Valid if differential input disabled
+ * if false defaults to pos input
+ */
+ bool refa_cmos_neg_inp_en;
+ bool refb_cmos_neg_inp_en;
+ bool zd_in_cmos_neg_inp_en;
+ bool osc_in_cmos_neg_inp_en;
+
+ /* PLL1 Setting */
+ unsigned short refa_r_div;
+ unsigned short refb_r_div;
+ unsigned short pll1_feedback_div;
+ unsigned short pll1_charge_pump_current_nA;
+ bool zero_delay_mode_internal_en;
+ bool osc_in_feedback_en;
+ enum pll1_rzero_resistor pll1_loop_filter_rzero;
+
+ /* Reference */
+ enum ref_sel_mode ref_mode;
+
+ /* PLL2 Setting */
+ unsigned int pll2_charge_pump_current_nA;
+ unsigned char pll2_ndiv_a_cnt;
+ unsigned char pll2_ndiv_b_cnt;
+ bool pll2_freq_doubler_en;
+ unsigned char pll2_r2_div;
+ unsigned char pll2_vco_diff_m1; /* 3..5 */
+ unsigned char pll2_vco_diff_m2; /* 3..5 */
+
+ /* Loop Filter PLL2 */
+ enum rpole2_resistor rpole2;
+ enum rzero_resistor rzero;
+ enum cpole1_capacitor cpole1;
+ bool rzero_bypass_en;
+
+ /* Output Channel Configuration */
+ int num_channels;
+ struct ad9523_channel_spec *channels;
+
+ char name[SPI_NAME_SIZE];
+};
+
+#endif /* IIO_FREQUENCY_AD9523_H_ */
diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h
new file mode 100644
index 000000000..ffd8c8f90
--- /dev/null
+++ b/include/linux/iio/frequency/adf4350.h
@@ -0,0 +1,128 @@
+/*
+ * ADF4350/ADF4351 SPI PLL driver
+ *
+ * Copyright 2012-2013 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef IIO_PLL_ADF4350_H_
+#define IIO_PLL_ADF4350_H_
+
+/* Registers */
+#define ADF4350_REG0 0
+#define ADF4350_REG1 1
+#define ADF4350_REG2 2
+#define ADF4350_REG3 3
+#define ADF4350_REG4 4
+#define ADF4350_REG5 5
+
+/* REG0 Bit Definitions */
+#define ADF4350_REG0_FRACT(x) (((x) & 0xFFF) << 3)
+#define ADF4350_REG0_INT(x) (((x) & 0xFFFF) << 15)
+
+/* REG1 Bit Definitions */
+#define ADF4350_REG1_MOD(x) (((x) & 0xFFF) << 3)
+#define ADF4350_REG1_PHASE(x) (((x) & 0xFFF) << 15)
+#define ADF4350_REG1_PRESCALER (1 << 27)
+
+/* REG2 Bit Definitions */
+#define ADF4350_REG2_COUNTER_RESET_EN (1 << 3)
+#define ADF4350_REG2_CP_THREESTATE_EN (1 << 4)
+#define ADF4350_REG2_POWER_DOWN_EN (1 << 5)
+#define ADF4350_REG2_PD_POLARITY_POS (1 << 6)
+#define ADF4350_REG2_LDP_6ns (1 << 7)
+#define ADF4350_REG2_LDP_10ns (0 << 7)
+#define ADF4350_REG2_LDF_FRACT_N (0 << 8)
+#define ADF4350_REG2_LDF_INT_N (1 << 8)
+#define ADF4350_REG2_CHARGE_PUMP_CURR_uA(x) (((((x)-312) / 312) & 0xF) << 9)
+#define ADF4350_REG2_DOUBLE_BUFF_EN (1 << 13)
+#define ADF4350_REG2_10BIT_R_CNT(x) ((x) << 14)
+#define ADF4350_REG2_RDIV2_EN (1 << 24)
+#define ADF4350_REG2_RMULT2_EN (1 << 25)
+#define ADF4350_REG2_MUXOUT(x) ((x) << 26)
+#define ADF4350_REG2_NOISE_MODE(x) (((unsigned)(x)) << 29)
+#define ADF4350_MUXOUT_THREESTATE 0
+#define ADF4350_MUXOUT_DVDD 1
+#define ADF4350_MUXOUT_GND 2
+#define ADF4350_MUXOUT_R_DIV_OUT 3
+#define ADF4350_MUXOUT_N_DIV_OUT 4
+#define ADF4350_MUXOUT_ANALOG_LOCK_DETECT 5
+#define ADF4350_MUXOUT_DIGITAL_LOCK_DETECT 6
+
+/* REG3 Bit Definitions */
+#define ADF4350_REG3_12BIT_CLKDIV(x) ((x) << 3)
+#define ADF4350_REG3_12BIT_CLKDIV_MODE(x) ((x) << 16)
+#define ADF4350_REG3_12BIT_CSR_EN (1 << 18)
+#define ADF4351_REG3_CHARGE_CANCELLATION_EN (1 << 21)
+#define ADF4351_REG3_ANTI_BACKLASH_3ns_EN (1 << 22)
+#define ADF4351_REG3_BAND_SEL_CLOCK_MODE_HIGH (1 << 23)
+
+/* REG4 Bit Definitions */
+#define ADF4350_REG4_OUTPUT_PWR(x) ((x) << 3)
+#define ADF4350_REG4_RF_OUT_EN (1 << 5)
+#define ADF4350_REG4_AUX_OUTPUT_PWR(x) ((x) << 6)
+#define ADF4350_REG4_AUX_OUTPUT_EN (1 << 8)
+#define ADF4350_REG4_AUX_OUTPUT_FUND (1 << 9)
+#define ADF4350_REG4_AUX_OUTPUT_DIV (0 << 9)
+#define ADF4350_REG4_MUTE_TILL_LOCK_EN (1 << 10)
+#define ADF4350_REG4_VCO_PWRDOWN_EN (1 << 11)
+#define ADF4350_REG4_8BIT_BAND_SEL_CLKDIV(x) ((x) << 12)
+#define ADF4350_REG4_RF_DIV_SEL(x) ((x) << 20)
+#define ADF4350_REG4_FEEDBACK_DIVIDED (0 << 23)
+#define ADF4350_REG4_FEEDBACK_FUND (1 << 23)
+
+/* REG5 Bit Definitions */
+#define ADF4350_REG5_LD_PIN_MODE_LOW (0 << 22)
+#define ADF4350_REG5_LD_PIN_MODE_DIGITAL (1 << 22)
+#define ADF4350_REG5_LD_PIN_MODE_HIGH (3 << 22)
+
+/* Specifications */
+#define ADF4350_MAX_OUT_FREQ 4400000000ULL /* Hz */
+#define ADF4350_MIN_OUT_FREQ 137500000 /* Hz */
+#define ADF4351_MIN_OUT_FREQ 34375000 /* Hz */
+#define ADF4350_MIN_VCO_FREQ 2200000000ULL /* Hz */
+#define ADF4350_MAX_FREQ_45_PRESC 3000000000ULL /* Hz */
+#define ADF4350_MAX_FREQ_PFD 32000000 /* Hz */
+#define ADF4350_MAX_BANDSEL_CLK 125000 /* Hz */
+#define ADF4350_MAX_FREQ_REFIN 250000000 /* Hz */
+#define ADF4350_MAX_MODULUS 4095
+#define ADF4350_MAX_R_CNT 1023
+
+
+/**
+ * struct adf4350_platform_data - platform specific information
+ * @name: Optional device name.
+ * @clkin: REFin frequency in Hz.
+ * @channel_spacing: Channel spacing in Hz (influences MODULUS).
+ * @power_up_frequency: Optional, If set in Hz the PLL tunes to the desired
+ * frequency on probe.
+ * @ref_div_factor: Optional, if set the driver skips dynamic calculation
+ * and uses this default value instead.
+ * @ref_doubler_en: Enables reference doubler.
+ * @ref_div2_en: Enables reference divider.
+ * @r2_user_settings: User defined settings for ADF4350/1 REGISTER_2.
+ * @r3_user_settings: User defined settings for ADF4350/1 REGISTER_3.
+ * @r4_user_settings: User defined settings for ADF4350/1 REGISTER_4.
+ * @gpio_lock_detect: Optional, if set with a valid GPIO number,
+ * pll lock state is tested upon read.
+ * If not used - set to -1.
+ */
+
+struct adf4350_platform_data {
+ char name[32];
+ unsigned long clkin;
+ unsigned long channel_spacing;
+ unsigned long long power_up_frequency;
+
+ unsigned short ref_div_factor; /* 10-bit R counter */
+ bool ref_doubler_en;
+ bool ref_div2_en;
+
+ unsigned r2_user_settings;
+ unsigned r3_user_settings;
+ unsigned r4_user_settings;
+ int gpio_lock_detect;
+};
+
+#endif /* IIO_PLL_ADF4350_H_ */
diff --git a/include/linux/iio/gyro/itg3200.h b/include/linux/iio/gyro/itg3200.h
new file mode 100644
index 000000000..2a820850f
--- /dev/null
+++ b/include/linux/iio/gyro/itg3200.h
@@ -0,0 +1,154 @@
+/*
+ * itg3200.h -- support InvenSense ITG3200
+ * Digital 3-Axis Gyroscope driver
+ *
+ * Copyright (c) 2011 Christian Strobel <christian.strobel@iis.fraunhofer.de>
+ * Copyright (c) 2011 Manuel Stahl <manuel.stahl@iis.fraunhofer.de>
+ * Copyright (c) 2012 Thorsten Nowak <thorsten.nowak@iis.fraunhofer.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef I2C_ITG3200_H_
+#define I2C_ITG3200_H_
+
+#include <linux/iio/iio.h>
+
+/* Register with I2C address (34h) */
+#define ITG3200_REG_ADDRESS 0x00
+
+/* Sample rate divider
+ * Range: 0 to 255
+ * Default value: 0x00 */
+#define ITG3200_REG_SAMPLE_RATE_DIV 0x15
+
+/* Digital low pass filter settings */
+#define ITG3200_REG_DLPF 0x16
+/* DLPF full scale range */
+#define ITG3200_DLPF_FS_SEL_2000 0x18
+/* Bandwidth (Hz) and internal sample rate
+ * (kHz) of DLPF */
+#define ITG3200_DLPF_256_8 0x00
+#define ITG3200_DLPF_188_1 0x01
+#define ITG3200_DLPF_98_1 0x02
+#define ITG3200_DLPF_42_1 0x03
+#define ITG3200_DLPF_20_1 0x04
+#define ITG3200_DLPF_10_1 0x05
+#define ITG3200_DLPF_5_1 0x06
+
+#define ITG3200_DLPF_CFG_MASK 0x07
+
+/* Configuration for interrupt operations */
+#define ITG3200_REG_IRQ_CONFIG 0x17
+/* Logic level */
+#define ITG3200_IRQ_ACTIVE_LOW 0x80
+#define ITG3200_IRQ_ACTIVE_HIGH 0x00
+/* Drive type */
+#define ITG3200_IRQ_OPEN_DRAIN 0x40
+#define ITG3200_IRQ_PUSH_PULL 0x00
+/* Latch mode */
+#define ITG3200_IRQ_LATCH_UNTIL_CLEARED 0x20
+#define ITG3200_IRQ_LATCH_50US_PULSE 0x00
+/* Latch clear method */
+#define ITG3200_IRQ_LATCH_CLEAR_ANY 0x10
+#define ITG3200_IRQ_LATCH_CLEAR_STATUS 0x00
+/* Enable interrupt when device is ready */
+#define ITG3200_IRQ_DEVICE_RDY_ENABLE 0x04
+/* Enable interrupt when data is available */
+#define ITG3200_IRQ_DATA_RDY_ENABLE 0x01
+
+/* Determine the status of ITG-3200 interrupts */
+#define ITG3200_REG_IRQ_STATUS 0x1A
+/* Status of 'device is ready'-interrupt */
+#define ITG3200_IRQ_DEVICE_RDY_STATUS 0x04
+/* Status of 'data is available'-interrupt */
+#define ITG3200_IRQ_DATA_RDY_STATUS 0x01
+
+/* Sensor registers */
+#define ITG3200_REG_TEMP_OUT_H 0x1B
+#define ITG3200_REG_TEMP_OUT_L 0x1C
+#define ITG3200_REG_GYRO_XOUT_H 0x1D
+#define ITG3200_REG_GYRO_XOUT_L 0x1E
+#define ITG3200_REG_GYRO_YOUT_H 0x1F
+#define ITG3200_REG_GYRO_YOUT_L 0x20
+#define ITG3200_REG_GYRO_ZOUT_H 0x21
+#define ITG3200_REG_GYRO_ZOUT_L 0x22
+
+/* Power management */
+#define ITG3200_REG_POWER_MANAGEMENT 0x3E
+/* Reset device and internal registers to the
+ * power-up-default settings */
+#define ITG3200_RESET 0x80
+/* Enable low power sleep mode */
+#define ITG3200_SLEEP 0x40
+/* Put according gyroscope in standby mode */
+#define ITG3200_STANDBY_GYRO_X 0x20
+#define ITG3200_STANDBY_GYRO_Y 0x10
+#define ITG3200_STANDBY_GYRO_Z 0x08
+/* Determine the device clock source */
+#define ITG3200_CLK_INTERNAL 0x00
+#define ITG3200_CLK_GYRO_X 0x01
+#define ITG3200_CLK_GYRO_Y 0x02
+#define ITG3200_CLK_GYRO_Z 0x03
+#define ITG3200_CLK_EXT_32K 0x04
+#define ITG3200_CLK_EXT_19M 0x05
+
+
+/**
+ * struct itg3200 - device instance specific data
+ * @i2c: actual i2c_client
+ * @trig: data ready trigger from itg3200 pin
+ **/
+struct itg3200 {
+ struct i2c_client *i2c;
+ struct iio_trigger *trig;
+};
+
+enum ITG3200_SCAN_INDEX {
+ ITG3200_SCAN_TEMP,
+ ITG3200_SCAN_GYRO_X,
+ ITG3200_SCAN_GYRO_Y,
+ ITG3200_SCAN_GYRO_Z,
+ ITG3200_SCAN_ELEMENTS,
+};
+
+int itg3200_write_reg_8(struct iio_dev *indio_dev,
+ u8 reg_address, u8 val);
+
+int itg3200_read_reg_8(struct iio_dev *indio_dev,
+ u8 reg_address, u8 *val);
+
+
+#ifdef CONFIG_IIO_BUFFER
+
+void itg3200_remove_trigger(struct iio_dev *indio_dev);
+int itg3200_probe_trigger(struct iio_dev *indio_dev);
+
+int itg3200_buffer_configure(struct iio_dev *indio_dev);
+void itg3200_buffer_unconfigure(struct iio_dev *indio_dev);
+
+#else /* CONFIG_IIO_BUFFER */
+
+static inline void itg3200_remove_trigger(struct iio_dev *indio_dev)
+{
+}
+
+static inline int itg3200_probe_trigger(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline int itg3200_buffer_configure(struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void itg3200_buffer_unconfigure(struct iio_dev *indio_dev)
+{
+}
+
+#endif /* CONFIG_IIO_BUFFER */
+
+#endif /* ITG3200_H_ */
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
new file mode 100644
index 000000000..d86b753e9
--- /dev/null
+++ b/include/linux/iio/iio.h
@@ -0,0 +1,652 @@
+
+/* The industrial I/O core
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef _INDUSTRIAL_IO_H_
+#define _INDUSTRIAL_IO_H_
+
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/iio/types.h>
+#include <linux/of.h>
+/* IIO TODO LIST */
+/*
+ * Provide means of adjusting timer accuracy.
+ * Currently assumes nano seconds.
+ */
+
+enum iio_chan_info_enum {
+ IIO_CHAN_INFO_RAW = 0,
+ IIO_CHAN_INFO_PROCESSED,
+ IIO_CHAN_INFO_SCALE,
+ IIO_CHAN_INFO_OFFSET,
+ IIO_CHAN_INFO_CALIBSCALE,
+ IIO_CHAN_INFO_CALIBBIAS,
+ IIO_CHAN_INFO_PEAK,
+ IIO_CHAN_INFO_PEAK_SCALE,
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
+ IIO_CHAN_INFO_AVERAGE_RAW,
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
+ IIO_CHAN_INFO_SAMP_FREQ,
+ IIO_CHAN_INFO_FREQUENCY,
+ IIO_CHAN_INFO_PHASE,
+ IIO_CHAN_INFO_HARDWAREGAIN,
+ IIO_CHAN_INFO_HYSTERESIS,
+ IIO_CHAN_INFO_INT_TIME,
+ IIO_CHAN_INFO_ENABLE,
+ IIO_CHAN_INFO_CALIBHEIGHT,
+ IIO_CHAN_INFO_CALIBWEIGHT,
+ IIO_CHAN_INFO_DEBOUNCE_COUNT,
+ IIO_CHAN_INFO_DEBOUNCE_TIME,
+};
+
+enum iio_shared_by {
+ IIO_SEPARATE,
+ IIO_SHARED_BY_TYPE,
+ IIO_SHARED_BY_DIR,
+ IIO_SHARED_BY_ALL
+};
+
+enum iio_endian {
+ IIO_CPU,
+ IIO_BE,
+ IIO_LE,
+};
+
+struct iio_chan_spec;
+struct iio_dev;
+
+/**
+ * struct iio_chan_spec_ext_info - Extended channel info attribute
+ * @name: Info attribute name
+ * @shared: Whether this attribute is shared between all channels.
+ * @read: Read callback for this info attribute, may be NULL.
+ * @write: Write callback for this info attribute, may be NULL.
+ * @private: Data private to the driver.
+ */
+struct iio_chan_spec_ext_info {
+ const char *name;
+ enum iio_shared_by shared;
+ ssize_t (*read)(struct iio_dev *, uintptr_t private,
+ struct iio_chan_spec const *, char *buf);
+ ssize_t (*write)(struct iio_dev *, uintptr_t private,
+ struct iio_chan_spec const *, const char *buf,
+ size_t len);
+ uintptr_t private;
+};
+
+/**
+ * struct iio_enum - Enum channel info attribute
+ * @items: An array of strings.
+ * @num_items: Length of the item array.
+ * @set: Set callback function, may be NULL.
+ * @get: Get callback function, may be NULL.
+ *
+ * The iio_enum struct can be used to implement enum style channel attributes.
+ * Enum style attributes are those which have a set of strings which map to
+ * unsigned integer values. The IIO enum helper code takes care of mapping
+ * between value and string as well as generating a "_available" file which
+ * contains a list of all available items. The set callback will be called when
+ * the attribute is updated. The last parameter is the index to the newly
+ * activated item. The get callback will be used to query the currently active
+ * item and is supposed to return the index for it.
+ */
+struct iio_enum {
+ const char * const *items;
+ unsigned int num_items;
+ int (*set)(struct iio_dev *, const struct iio_chan_spec *, unsigned int);
+ int (*get)(struct iio_dev *, const struct iio_chan_spec *);
+};
+
+ssize_t iio_enum_available_read(struct iio_dev *indio_dev,
+ uintptr_t priv, const struct iio_chan_spec *chan, char *buf);
+ssize_t iio_enum_read(struct iio_dev *indio_dev,
+ uintptr_t priv, const struct iio_chan_spec *chan, char *buf);
+ssize_t iio_enum_write(struct iio_dev *indio_dev,
+ uintptr_t priv, const struct iio_chan_spec *chan, const char *buf,
+ size_t len);
+
+/**
+ * IIO_ENUM() - Initialize enum extended channel attribute
+ * @_name: Attribute name
+ * @_shared: Whether the attribute is shared between all channels
+ * @_e: Pointer to an iio_enum struct
+ *
+ * This should usually be used together with IIO_ENUM_AVAILABLE()
+ */
+#define IIO_ENUM(_name, _shared, _e) \
+{ \
+ .name = (_name), \
+ .shared = (_shared), \
+ .read = iio_enum_read, \
+ .write = iio_enum_write, \
+ .private = (uintptr_t)(_e), \
+}
+
+/**
+ * IIO_ENUM_AVAILABLE() - Initialize enum available extended channel attribute
+ * @_name: Attribute name ("_available" will be appended to the name)
+ * @_e: Pointer to an iio_enum struct
+ *
+ * Creates a read only attribute which lists all the available enum items in a
+ * space separated list. This should usually be used together with IIO_ENUM()
+ */
+#define IIO_ENUM_AVAILABLE(_name, _e) \
+{ \
+ .name = (_name "_available"), \
+ .shared = IIO_SHARED_BY_TYPE, \
+ .read = iio_enum_available_read, \
+ .private = (uintptr_t)(_e), \
+}
+
+/**
+ * struct iio_event_spec - specification for a channel event
+ * @type: Type of the event
+ * @dir: Direction of the event
+ * @mask_separate: Bit mask of enum iio_event_info values. Attributes
+ * set in this mask will be registered per channel.
+ * @mask_shared_by_type: Bit mask of enum iio_event_info values. Attributes
+ * set in this mask will be shared by channel type.
+ * @mask_shared_by_dir: Bit mask of enum iio_event_info values. Attributes
+ * set in this mask will be shared by channel type and
+ * direction.
+ * @mask_shared_by_all: Bit mask of enum iio_event_info values. Attributes
+ * set in this mask will be shared by all channels.
+ */
+struct iio_event_spec {
+ enum iio_event_type type;
+ enum iio_event_direction dir;
+ unsigned long mask_separate;
+ unsigned long mask_shared_by_type;
+ unsigned long mask_shared_by_dir;
+ unsigned long mask_shared_by_all;
+};
+
+/**
+ * struct iio_chan_spec - specification of a single channel
+ * @type: What type of measurement is the channel making.
+ * @channel: What number do we wish to assign the channel.
+ * @channel2: If there is a second number for a differential
+ * channel then this is it. If modified is set then the
+ * value here specifies the modifier.
+ * @address: Driver specific identifier.
+ * @scan_index: Monotonic index to give ordering in scans when read
+ * from a buffer.
+ * @scan_type: Sign: 's' or 'u' to specify signed or unsigned
+ * realbits: Number of valid bits of data
+ * storage_bits: Realbits + padding
+ * shift: Shift right by this before masking out
+ * realbits.
+ * endianness: little or big endian
+ * repeat: Number of times real/storage bits
+ * repeats. When the repeat element is
+ * more than 1, then the type element in
+ * sysfs will show a repeat value.
+ * Otherwise, the number of repetitions is
+ * omitted.
+ * @info_mask_separate: What information is to be exported that is specific to
+ * this channel.
+ * @info_mask_shared_by_type: What information is to be exported that is shared
+ * by all channels of the same type.
+ * @info_mask_shared_by_dir: What information is to be exported that is shared
+ * by all channels of the same direction.
+ * @info_mask_shared_by_all: What information is to be exported that is shared
+ * by all channels.
+ * @event_spec: Array of events which should be registered for this
+ * channel.
+ * @num_event_specs: Size of the event_spec array.
+ * @ext_info: Array of extended info attributes for this channel.
+ * The array is NULL terminated, the last element should
+ * have its name field set to NULL.
+ * @extend_name: Allows labeling of channel attributes with an
+ * informative name. Note this has no effect codes etc,
+ * unlike modifiers.
+ * @datasheet_name: A name used in in-kernel mapping of channels. It should
+ * correspond to the first name that the channel is referred
+ * to by in the datasheet (e.g. IND), or the nearest
+ * possible compound name (e.g. IND-INC).
+ * @modified: Does a modifier apply to this channel. What these are
+ * depends on the channel type. Modifier is set in
+ * channel2. Examples are IIO_MOD_X for axial sensors about
+ * the 'x' axis.
+ * @indexed: Specify the channel has a numerical index. If not,
+ * the channel index number will be suppressed for sysfs
+ * attributes but not for event codes.
+ * @output: Channel is output.
+ * @differential: Channel is differential.
+ */
+struct iio_chan_spec {
+ enum iio_chan_type type;
+ int channel;
+ int channel2;
+ unsigned long address;
+ int scan_index;
+ struct {
+ char sign;
+ u8 realbits;
+ u8 storagebits;
+ u8 shift;
+ u8 repeat;
+ enum iio_endian endianness;
+ } scan_type;
+ long info_mask_separate;
+ long info_mask_shared_by_type;
+ long info_mask_shared_by_dir;
+ long info_mask_shared_by_all;
+ const struct iio_event_spec *event_spec;
+ unsigned int num_event_specs;
+ const struct iio_chan_spec_ext_info *ext_info;
+ const char *extend_name;
+ const char *datasheet_name;
+ unsigned modified:1;
+ unsigned indexed:1;
+ unsigned output:1;
+ unsigned differential:1;
+};
+
+
+/**
+ * iio_channel_has_info() - Checks whether a channel supports a info attribute
+ * @chan: The channel to be queried
+ * @type: Type of the info attribute to be checked
+ *
+ * Returns true if the channels supports reporting values for the given info
+ * attribute type, false otherwise.
+ */
+static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
+ enum iio_chan_info_enum type)
+{
+ return (chan->info_mask_separate & BIT(type)) |
+ (chan->info_mask_shared_by_type & BIT(type)) |
+ (chan->info_mask_shared_by_dir & BIT(type)) |
+ (chan->info_mask_shared_by_all & BIT(type));
+}
+
+#define IIO_CHAN_SOFT_TIMESTAMP(_si) { \
+ .type = IIO_TIMESTAMP, \
+ .channel = -1, \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 64, \
+ .storagebits = 64, \
+ }, \
+}
+
+/**
+ * iio_get_time_ns() - utility function to get a time stamp for events etc
+ **/
+static inline s64 iio_get_time_ns(void)
+{
+ return ktime_get_real_ns();
+}
+
+/* Device operating modes */
+#define INDIO_DIRECT_MODE 0x01
+#define INDIO_BUFFER_TRIGGERED 0x02
+#define INDIO_BUFFER_SOFTWARE 0x04
+#define INDIO_BUFFER_HARDWARE 0x08
+
+#define INDIO_ALL_BUFFER_MODES \
+ (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE)
+
+#define INDIO_MAX_RAW_ELEMENTS 4
+
+struct iio_trigger; /* forward declaration */
+struct iio_dev;
+
+/**
+ * struct iio_info - constant information about device
+ * @driver_module: module structure used to ensure correct
+ * ownership of chrdevs etc
+ * @event_attrs: event control attributes
+ * @attrs: general purpose device attributes
+ * @read_raw: function to request a value from the device.
+ * mask specifies which value. Note 0 means a reading of
+ * the channel in question. Return value will specify the
+ * type of value returned by the device. val and val2 will
+ * contain the elements making up the returned value.
+ * @read_raw_multi: function to return values from the device.
+ * mask specifies which value. Note 0 means a reading of
+ * the channel in question. Return value will specify the
+ * type of value returned by the device. vals pointer
+ * contain the elements making up the returned value.
+ * max_len specifies maximum number of elements
+ * vals pointer can contain. val_len is used to return
+ * length of valid elements in vals.
+ * @write_raw: function to write a value to the device.
+ * Parameters are the same as for read_raw.
+ * @write_raw_get_fmt: callback function to query the expected
+ * format/precision. If not set by the driver, write_raw
+ * returns IIO_VAL_INT_PLUS_MICRO.
+ * @read_event_config: find out if the event is enabled.
+ * @write_event_config: set if the event is enabled.
+ * @read_event_value: read a configuration value associated with the event.
+ * @write_event_value: write a configuration value for the event.
+ * @validate_trigger: function to validate the trigger when the
+ * current trigger gets changed.
+ * @update_scan_mode: function to configure device and scan buffer when
+ * channels have changed
+ * @debugfs_reg_access: function to read or write register value of device
+ * @of_xlate: function pointer to obtain channel specifier index.
+ * When #iio-cells is greater than '0', the driver could
+ * provide a custom of_xlate function that reads the
+ * *args* and returns the appropriate index in registered
+ * IIO channels array.
+ * @hwfifo_set_watermark: function pointer to set the current hardware
+ * fifo watermark level; see hwfifo_* entries in
+ * Documentation/ABI/testing/sysfs-bus-iio for details on
+ * how the hardware fifo operates
+ * @hwfifo_flush_to_buffer: function pointer to flush the samples stored
+ * in the hardware fifo to the device buffer. The driver
+ * should not flush more than count samples. The function
+ * must return the number of samples flushed, 0 if no
+ * samples were flushed or a negative integer if no samples
+ * were flushed and there was an error.
+ **/
+struct iio_info {
+ struct module *driver_module;
+ struct attribute_group *event_attrs;
+ const struct attribute_group *attrs;
+
+ int (*read_raw)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask);
+
+ int (*read_raw_multi)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int max_len,
+ int *vals,
+ int *val_len,
+ long mask);
+
+ int (*write_raw)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask);
+
+ int (*write_raw_get_fmt)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask);
+
+ int (*read_event_config)(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir);
+
+ int (*write_event_config)(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state);
+
+ int (*read_event_value)(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2);
+
+ int (*write_event_value)(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2);
+
+ int (*validate_trigger)(struct iio_dev *indio_dev,
+ struct iio_trigger *trig);
+ int (*update_scan_mode)(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask);
+ int (*debugfs_reg_access)(struct iio_dev *indio_dev,
+ unsigned reg, unsigned writeval,
+ unsigned *readval);
+ int (*of_xlate)(struct iio_dev *indio_dev,
+ const struct of_phandle_args *iiospec);
+ int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned val);
+ int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev,
+ unsigned count);
+};
+
+/**
+ * struct iio_buffer_setup_ops - buffer setup related callbacks
+ * @preenable: [DRIVER] function to run prior to marking buffer enabled
+ * @postenable: [DRIVER] function to run after marking buffer enabled
+ * @predisable: [DRIVER] function to run prior to marking buffer
+ * disabled
+ * @postdisable: [DRIVER] function to run after marking buffer disabled
+ * @validate_scan_mask: [DRIVER] function callback to check whether a given
+ * scan mask is valid for the device.
+ */
+struct iio_buffer_setup_ops {
+ int (*preenable)(struct iio_dev *);
+ int (*postenable)(struct iio_dev *);
+ int (*predisable)(struct iio_dev *);
+ int (*postdisable)(struct iio_dev *);
+ bool (*validate_scan_mask)(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask);
+};
+
+/**
+ * struct iio_dev - industrial I/O device
+ * @id: [INTERN] used to identify device internally
+ * @modes: [DRIVER] operating modes supported by device
+ * @currentmode: [DRIVER] current operating mode
+ * @dev: [DRIVER] device structure, should be assigned a parent
+ * and owner
+ * @event_interface: [INTERN] event chrdevs associated with interrupt lines
+ * @buffer: [DRIVER] any buffer present
+ * @buffer_list: [INTERN] list of all buffers currently attached
+ * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux
+ * @mlock: [INTERN] lock used to prevent simultaneous device state
+ * changes
+ * @available_scan_masks: [DRIVER] optional array of allowed bitmasks
+ * @masklength: [INTERN] the length of the mask established from
+ * channels
+ * @active_scan_mask: [INTERN] union of all scan masks requested by buffers
+ * @scan_timestamp: [INTERN] set if any buffers have requested timestamp
+ * @scan_index_timestamp:[INTERN] cache of the index to the timestamp
+ * @trig: [INTERN] current device trigger (buffer modes)
+ * @pollfunc: [DRIVER] function run on trigger being received
+ * @channels: [DRIVER] channel specification structure table
+ * @num_channels: [DRIVER] number of channels specified in @channels.
+ * @channel_attr_list: [INTERN] keep track of automatically created channel
+ * attributes
+ * @chan_attr_group: [INTERN] group for all attrs in base directory
+ * @name: [DRIVER] name of the device.
+ * @info: [DRIVER] callbacks and constant info from driver
+ * @info_exist_lock: [INTERN] lock to prevent use during removal
+ * @setup_ops: [DRIVER] callbacks to call before and after buffer
+ * enable/disable
+ * @chrdev: [INTERN] associated character device
+ * @groups: [INTERN] attribute groups
+ * @groupcounter: [INTERN] index of next attribute group
+ * @flags: [INTERN] file ops related flags including busy flag.
+ * @debugfs_dentry: [INTERN] device specific debugfs dentry.
+ * @cached_reg_addr: [INTERN] cached register address for debugfs reads.
+ */
+struct iio_dev {
+ int id;
+
+ int modes;
+ int currentmode;
+ struct device dev;
+
+ struct iio_event_interface *event_interface;
+
+ struct iio_buffer *buffer;
+ struct list_head buffer_list;
+ int scan_bytes;
+ struct mutex mlock;
+
+ const unsigned long *available_scan_masks;
+ unsigned masklength;
+ const unsigned long *active_scan_mask;
+ bool scan_timestamp;
+ unsigned scan_index_timestamp;
+ struct iio_trigger *trig;
+ struct iio_poll_func *pollfunc;
+
+ struct iio_chan_spec const *channels;
+ int num_channels;
+
+ struct list_head channel_attr_list;
+ struct attribute_group chan_attr_group;
+ const char *name;
+ const struct iio_info *info;
+ struct mutex info_exist_lock;
+ const struct iio_buffer_setup_ops *setup_ops;
+ struct cdev chrdev;
+#define IIO_MAX_GROUPS 6
+ const struct attribute_group *groups[IIO_MAX_GROUPS + 1];
+ int groupcounter;
+
+ unsigned long flags;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *debugfs_dentry;
+ unsigned cached_reg_addr;
+#endif
+};
+
+const struct iio_chan_spec
+*iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
+int iio_device_register(struct iio_dev *indio_dev);
+void iio_device_unregister(struct iio_dev *indio_dev);
+int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev);
+void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev);
+int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
+
+extern struct bus_type iio_bus_type;
+
+/**
+ * iio_device_put() - reference counted deallocation of struct device
+ * @indio_dev: IIO device structure containing the device
+ **/
+static inline void iio_device_put(struct iio_dev *indio_dev)
+{
+ if (indio_dev)
+ put_device(&indio_dev->dev);
+}
+
+/**
+ * dev_to_iio_dev() - Get IIO device struct from a device struct
+ * @dev: The device embedded in the IIO device
+ *
+ * Note: The device must be a IIO device, otherwise the result is undefined.
+ */
+static inline struct iio_dev *dev_to_iio_dev(struct device *dev)
+{
+ return container_of(dev, struct iio_dev, dev);
+}
+
+/**
+ * iio_device_get() - increment reference count for the device
+ * @indio_dev: IIO device structure
+ *
+ * Returns: The passed IIO device
+ **/
+static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev)
+{
+ return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL;
+}
+
+
+/**
+ * iio_device_set_drvdata() - Set device driver data
+ * @indio_dev: IIO device structure
+ * @data: Driver specific data
+ *
+ * Allows to attach an arbitrary pointer to an IIO device, which can later be
+ * retrieved by iio_device_get_drvdata().
+ */
+static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data)
+{
+ dev_set_drvdata(&indio_dev->dev, data);
+}
+
+/**
+ * iio_device_get_drvdata() - Get device driver data
+ * @indio_dev: IIO device structure
+ *
+ * Returns the data previously set with iio_device_set_drvdata()
+ */
+static inline void *iio_device_get_drvdata(struct iio_dev *indio_dev)
+{
+ return dev_get_drvdata(&indio_dev->dev);
+}
+
+/* Can we make this smaller? */
+#define IIO_ALIGN L1_CACHE_BYTES
+struct iio_dev *iio_device_alloc(int sizeof_priv);
+
+static inline void *iio_priv(const struct iio_dev *indio_dev)
+{
+ return (char *)indio_dev + ALIGN(sizeof(struct iio_dev), IIO_ALIGN);
+}
+
+static inline struct iio_dev *iio_priv_to_dev(void *priv)
+{
+ return (struct iio_dev *)((char *)priv -
+ ALIGN(sizeof(struct iio_dev), IIO_ALIGN));
+}
+
+void iio_device_free(struct iio_dev *indio_dev);
+struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv);
+void devm_iio_device_free(struct device *dev, struct iio_dev *indio_dev);
+struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
+ const char *fmt, ...);
+void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig);
+
+/**
+ * iio_buffer_enabled() - helper function to test if the buffer is enabled
+ * @indio_dev: IIO device structure for device
+ **/
+static inline bool iio_buffer_enabled(struct iio_dev *indio_dev)
+{
+ return indio_dev->currentmode
+ & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE |
+ INDIO_BUFFER_SOFTWARE);
+}
+
+/**
+ * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
+ * @indio_dev: IIO device structure for device
+ **/
+#if defined(CONFIG_DEBUG_FS)
+static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
+{
+ return indio_dev->debugfs_dentry;
+}
+#else
+static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
+{
+ return NULL;
+}
+#endif
+
+int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
+ int *fract);
+
+/**
+ * IIO_DEGREE_TO_RAD() - Convert degree to rad
+ * @deg: A value in degree
+ *
+ * Returns the given value converted from degree to rad
+ */
+#define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
+
+/**
+ * IIO_G_TO_M_S_2() - Convert g to meter / second**2
+ * @g: A value in g
+ *
+ * Returns the given value converted from g to meter / second**2
+ */
+#define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
+
+#endif /* _INDUSTRIAL_IO_H_ */
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
new file mode 100644
index 000000000..fa2d01ef8
--- /dev/null
+++ b/include/linux/iio/imu/adis.h
@@ -0,0 +1,283 @@
+/*
+ * Common library for ADIS16XXX devices
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __IIO_ADIS_H__
+#define __IIO_ADIS_H__
+
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/iio/types.h>
+
+#define ADIS_WRITE_REG(reg) ((0x80 | (reg)))
+#define ADIS_READ_REG(reg) ((reg) & 0x7f)
+
+#define ADIS_PAGE_SIZE 0x80
+#define ADIS_REG_PAGE_ID 0x00
+
+struct adis;
+
+/**
+ * struct adis_data - ADIS chip variant specific data
+ * @read_delay: SPI delay for read operations in us
+ * @write_delay: SPI delay for write operations in us
+ * @glob_cmd_reg: Register address of the GLOB_CMD register
+ * @msc_ctrl_reg: Register address of the MSC_CTRL register
+ * @diag_stat_reg: Register address of the DIAG_STAT register
+ * @status_error_msgs: Array of error messgaes
+ * @status_error_mask:
+ */
+struct adis_data {
+ unsigned int read_delay;
+ unsigned int write_delay;
+
+ unsigned int glob_cmd_reg;
+ unsigned int msc_ctrl_reg;
+ unsigned int diag_stat_reg;
+
+ unsigned int self_test_mask;
+ unsigned int startup_delay;
+
+ const char * const *status_error_msgs;
+ unsigned int status_error_mask;
+
+ int (*enable_irq)(struct adis *adis, bool enable);
+
+ bool has_paging;
+};
+
+struct adis {
+ struct spi_device *spi;
+ struct iio_trigger *trig;
+
+ const struct adis_data *data;
+
+ struct mutex txrx_lock;
+ struct spi_message msg;
+ struct spi_transfer *xfer;
+ unsigned int current_page;
+ void *buffer;
+
+ uint8_t tx[10] ____cacheline_aligned;
+ uint8_t rx[4];
+};
+
+int adis_init(struct adis *adis, struct iio_dev *indio_dev,
+ struct spi_device *spi, const struct adis_data *data);
+int adis_reset(struct adis *adis);
+
+int adis_write_reg(struct adis *adis, unsigned int reg,
+ unsigned int val, unsigned int size);
+int adis_read_reg(struct adis *adis, unsigned int reg,
+ unsigned int *val, unsigned int size);
+
+/**
+ * adis_write_reg_8() - Write single byte to a register
+ * @adis: The adis device
+ * @reg: The address of the register to be written
+ * @value: The value to write
+ */
+static inline int adis_write_reg_8(struct adis *adis, unsigned int reg,
+ uint8_t val)
+{
+ return adis_write_reg(adis, reg, val, 1);
+}
+
+/**
+ * adis_write_reg_16() - Write 2 bytes to a pair of registers
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @value: Value to be written
+ */
+static inline int adis_write_reg_16(struct adis *adis, unsigned int reg,
+ uint16_t val)
+{
+ return adis_write_reg(adis, reg, val, 2);
+}
+
+/**
+ * adis_write_reg_32() - write 4 bytes to four registers
+ * @adis: The adis device
+ * @reg: The address of the lower of the four register
+ * @value: Value to be written
+ */
+static inline int adis_write_reg_32(struct adis *adis, unsigned int reg,
+ uint32_t val)
+{
+ return adis_write_reg(adis, reg, val, 4);
+}
+
+/**
+ * adis_read_reg_16() - read 2 bytes from a 16-bit register
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: The value read back from the device
+ */
+static inline int adis_read_reg_16(struct adis *adis, unsigned int reg,
+ uint16_t *val)
+{
+ unsigned int tmp;
+ int ret;
+
+ ret = adis_read_reg(adis, reg, &tmp, 2);
+ *val = tmp;
+
+ return ret;
+}
+
+/**
+ * adis_read_reg_32() - read 4 bytes from a 32-bit register
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: The value read back from the device
+ */
+static inline int adis_read_reg_32(struct adis *adis, unsigned int reg,
+ uint32_t *val)
+{
+ unsigned int tmp;
+ int ret;
+
+ ret = adis_read_reg(adis, reg, &tmp, 4);
+ *val = tmp;
+
+ return ret;
+}
+
+int adis_enable_irq(struct adis *adis, bool enable);
+int adis_check_status(struct adis *adis);
+
+int adis_initial_startup(struct adis *adis);
+
+int adis_single_conversion(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int error_mask,
+ int *val);
+
+#define ADIS_VOLTAGE_CHAN(addr, si, chan, name, info_all, bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (chan), \
+ .extend_name = name, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = info_all, \
+ .address = (addr), \
+ .scan_index = (si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS_SUPPLY_CHAN(addr, si, info_all, bits) \
+ ADIS_VOLTAGE_CHAN(addr, si, 0, "supply", info_all, bits)
+
+#define ADIS_AUX_ADC_CHAN(addr, si, info_all, bits) \
+ ADIS_VOLTAGE_CHAN(addr, si, 1, NULL, info_all, bits)
+
+#define ADIS_TEMP_CHAN(addr, si, info_all, bits) { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_all = info_all, \
+ .address = (addr), \
+ .scan_index = (si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS_MOD_CHAN(_type, mod, addr, si, info_sep, info_all, bits) { \
+ .type = (_type), \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## mod, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ info_sep, \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = info_all, \
+ .address = (addr), \
+ .scan_index = (si), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADIS_ACCEL_CHAN(mod, addr, si, info_sep, info_all, bits) \
+ ADIS_MOD_CHAN(IIO_ACCEL, mod, addr, si, info_sep, info_all, bits)
+
+#define ADIS_GYRO_CHAN(mod, addr, si, info_sep, info_all, bits) \
+ ADIS_MOD_CHAN(IIO_ANGL_VEL, mod, addr, si, info_sep, info_all, bits)
+
+#define ADIS_INCLI_CHAN(mod, addr, si, info_sep, info_all, bits) \
+ ADIS_MOD_CHAN(IIO_INCLI, mod, addr, si, info_sep, info_all, bits)
+
+#define ADIS_ROT_CHAN(mod, addr, si, info_sep, info_all, bits) \
+ ADIS_MOD_CHAN(IIO_ROT, mod, addr, si, info_sep, info_all, bits)
+
+#ifdef CONFIG_IIO_ADIS_LIB_BUFFER
+
+int adis_setup_buffer_and_trigger(struct adis *adis,
+ struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *));
+void adis_cleanup_buffer_and_trigger(struct adis *adis,
+ struct iio_dev *indio_dev);
+
+int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev);
+void adis_remove_trigger(struct adis *adis);
+
+int adis_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask);
+
+#else /* CONFIG_IIO_BUFFER */
+
+static inline int adis_setup_buffer_and_trigger(struct adis *adis,
+ struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *))
+{
+ return 0;
+}
+
+static inline void adis_cleanup_buffer_and_trigger(struct adis *adis,
+ struct iio_dev *indio_dev)
+{
+}
+
+static inline int adis_probe_trigger(struct adis *adis,
+ struct iio_dev *indio_dev)
+{
+ return 0;
+}
+
+static inline void adis_remove_trigger(struct adis *adis)
+{
+}
+
+#define adis_update_scan_mode NULL
+
+#endif /* CONFIG_IIO_BUFFER */
+
+#ifdef CONFIG_DEBUG_FS
+
+int adis_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int writeval, unsigned int *readval);
+
+#else
+
+#define adis_debugfs_reg_access NULL
+
+#endif
+
+#endif
diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h
new file mode 100644
index 000000000..1683bc710
--- /dev/null
+++ b/include/linux/iio/kfifo_buf.h
@@ -0,0 +1,14 @@
+#ifndef __LINUX_IIO_KFIFO_BUF_H__
+#define __LINUX_IIO_KFIFO_BUF_H__
+
+#include <linux/kfifo.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+
+struct iio_buffer *iio_kfifo_allocate(void);
+void iio_kfifo_free(struct iio_buffer *r);
+
+struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev);
+void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r);
+
+#endif
diff --git a/include/linux/iio/machine.h b/include/linux/iio/machine.h
new file mode 100644
index 000000000..1601a2a63
--- /dev/null
+++ b/include/linux/iio/machine.h
@@ -0,0 +1,31 @@
+/*
+ * Industrial I/O in kernel access map definitions for board files.
+ *
+ * Copyright (c) 2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __LINUX_IIO_MACHINE_H__
+#define __LINUX_IIO_MACHINE_H__
+
+/**
+ * struct iio_map - description of link between consumer and device channels
+ * @adc_channel_label: Label used to identify the channel on the provider.
+ * This is matched against the datasheet_name element
+ * of struct iio_chan_spec.
+ * @consumer_dev_name: Name to uniquely identify the consumer device.
+ * @consumer_channel: Unique name used to identify the channel on the
+ * consumer side.
+ * @consumer_data: Data about the channel for use by the consumer driver.
+ */
+struct iio_map {
+ const char *adc_channel_label;
+ const char *consumer_dev_name;
+ const char *consumer_channel;
+ void *consumer_data;
+};
+
+#endif
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h
new file mode 100644
index 000000000..8a1d18640
--- /dev/null
+++ b/include/linux/iio/sysfs.h
@@ -0,0 +1,127 @@
+/* The industrial I/O core
+ *
+ *Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * General attributes
+ */
+
+#ifndef _INDUSTRIAL_IO_SYSFS_H_
+#define _INDUSTRIAL_IO_SYSFS_H_
+
+struct iio_chan_spec;
+
+/**
+ * struct iio_dev_attr - iio specific device attribute
+ * @dev_attr: underlying device attribute
+ * @address: associated register address
+ * @l: list head for maintaining list of dynamically created attrs.
+ */
+struct iio_dev_attr {
+ struct device_attribute dev_attr;
+ u64 address;
+ struct list_head l;
+ struct iio_chan_spec const *c;
+};
+
+#define to_iio_dev_attr(_dev_attr) \
+ container_of(_dev_attr, struct iio_dev_attr, dev_attr)
+
+ssize_t iio_read_const_attr(struct device *dev,
+ struct device_attribute *attr,
+ char *len);
+
+/**
+ * struct iio_const_attr - constant device specific attribute
+ * often used for things like available modes
+ * @string: attribute string
+ * @dev_attr: underlying device attribute
+ */
+struct iio_const_attr {
+ const char *string;
+ struct device_attribute dev_attr;
+};
+
+#define to_iio_const_attr(_dev_attr) \
+ container_of(_dev_attr, struct iio_const_attr, dev_attr)
+
+/* Some attributes will be hard coded (device dependent) and not require an
+ address, in these cases pass a negative */
+#define IIO_ATTR(_name, _mode, _show, _store, _addr) \
+ { .dev_attr = __ATTR(_name, _mode, _show, _store), \
+ .address = _addr }
+
+#define IIO_DEVICE_ATTR(_name, _mode, _show, _store, _addr) \
+ struct iio_dev_attr iio_dev_attr_##_name \
+ = IIO_ATTR(_name, _mode, _show, _store, _addr)
+
+#define IIO_DEVICE_ATTR_NAMED(_vname, _name, _mode, _show, _store, _addr) \
+ struct iio_dev_attr iio_dev_attr_##_vname \
+ = IIO_ATTR(_name, _mode, _show, _store, _addr)
+
+#define IIO_CONST_ATTR(_name, _string) \
+ struct iio_const_attr iio_const_attr_##_name \
+ = { .string = _string, \
+ .dev_attr = __ATTR(_name, S_IRUGO, iio_read_const_attr, NULL)}
+
+#define IIO_CONST_ATTR_NAMED(_vname, _name, _string) \
+ struct iio_const_attr iio_const_attr_##_vname \
+ = { .string = _string, \
+ .dev_attr = __ATTR(_name, S_IRUGO, iio_read_const_attr, NULL)}
+
+/* Generic attributes of onetype or another */
+
+/**
+ * IIO_DEV_ATTR_SAMP_FREQ - sets any internal clock frequency
+ * @_mode: sysfs file mode/permissions
+ * @_show: output method for the attribute
+ * @_store: input method for the attribute
+ **/
+#define IIO_DEV_ATTR_SAMP_FREQ(_mode, _show, _store) \
+ IIO_DEVICE_ATTR(sampling_frequency, _mode, _show, _store, 0)
+
+/**
+ * IIO_DEV_ATTR_SAMP_FREQ_AVAIL - list available sampling frequencies
+ * @_show: output method for the attribute
+ *
+ * May be mode dependent on some devices
+ **/
+#define IIO_DEV_ATTR_SAMP_FREQ_AVAIL(_show) \
+ IIO_DEVICE_ATTR(sampling_frequency_available, S_IRUGO, _show, NULL, 0)
+/**
+ * IIO_CONST_ATTR_SAMP_FREQ_AVAIL - list available sampling frequencies
+ * @_string: frequency string for the attribute
+ *
+ * Constant version
+ **/
+#define IIO_CONST_ATTR_SAMP_FREQ_AVAIL(_string) \
+ IIO_CONST_ATTR(sampling_frequency_available, _string)
+
+/**
+ * IIO_DEV_ATTR_INT_TIME_AVAIL - list available integration times
+ * @_show: output method for the attribute
+ **/
+#define IIO_DEV_ATTR_INT_TIME_AVAIL(_show) \
+ IIO_DEVICE_ATTR(integration_time_available, S_IRUGO, _show, NULL, 0)
+/**
+ * IIO_CONST_ATTR_INT_TIME_AVAIL - list available integration times
+ * @_string: frequency string for the attribute
+ *
+ * Constant version
+ **/
+#define IIO_CONST_ATTR_INT_TIME_AVAIL(_string) \
+ IIO_CONST_ATTR(integration_time_available, _string)
+
+#define IIO_DEV_ATTR_TEMP_RAW(_show) \
+ IIO_DEVICE_ATTR(in_temp_raw, S_IRUGO, _show, NULL, 0)
+
+#define IIO_CONST_ATTR_TEMP_OFFSET(_string) \
+ IIO_CONST_ATTR(in_temp_offset, _string)
+
+#define IIO_CONST_ATTR_TEMP_SCALE(_string) \
+ IIO_CONST_ATTR(in_temp_scale, _string)
+
+#endif /* _INDUSTRIAL_IO_SYSFS_H_ */
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
new file mode 100644
index 000000000..fa76c79a5
--- /dev/null
+++ b/include/linux/iio/trigger.h
@@ -0,0 +1,149 @@
+/* The industrial I/O core, trigger handling functions
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/atomic.h>
+
+#ifndef _IIO_TRIGGER_H_
+#define _IIO_TRIGGER_H_
+
+#ifdef CONFIG_IIO_TRIGGER
+struct iio_subirq {
+ bool enabled;
+};
+
+/**
+ * struct iio_trigger_ops - operations structure for an iio_trigger.
+ * @owner: used to monitor usage count of the trigger.
+ * @set_trigger_state: switch on/off the trigger on demand
+ * @try_reenable: function to reenable the trigger when the
+ * use count is zero (may be NULL)
+ * @validate_device: function to validate the device when the
+ * current trigger gets changed.
+ *
+ * This is typically static const within a driver and shared by
+ * instances of a given device.
+ **/
+struct iio_trigger_ops {
+ struct module *owner;
+ int (*set_trigger_state)(struct iio_trigger *trig, bool state);
+ int (*try_reenable)(struct iio_trigger *trig);
+ int (*validate_device)(struct iio_trigger *trig,
+ struct iio_dev *indio_dev);
+};
+
+
+/**
+ * struct iio_trigger - industrial I/O trigger device
+ * @ops: [DRIVER] operations structure
+ * @id: [INTERN] unique id number
+ * @name: [DRIVER] unique name
+ * @dev: [DRIVER] associated device (if relevant)
+ * @list: [INTERN] used in maintenance of global trigger list
+ * @alloc_list: [DRIVER] used for driver specific trigger list
+ * @use_count: use count for the trigger
+ * @subirq_chip: [INTERN] associate 'virtual' irq chip.
+ * @subirq_base: [INTERN] base number for irqs provided by trigger.
+ * @subirqs: [INTERN] information about the 'child' irqs.
+ * @pool: [INTERN] bitmap of irqs currently in use.
+ * @pool_lock: [INTERN] protection of the irq pool.
+ **/
+struct iio_trigger {
+ const struct iio_trigger_ops *ops;
+ int id;
+ const char *name;
+ struct device dev;
+
+ struct list_head list;
+ struct list_head alloc_list;
+ atomic_t use_count;
+
+ struct irq_chip subirq_chip;
+ int subirq_base;
+
+ struct iio_subirq subirqs[CONFIG_IIO_CONSUMERS_PER_TRIGGER];
+ unsigned long pool[BITS_TO_LONGS(CONFIG_IIO_CONSUMERS_PER_TRIGGER)];
+ struct mutex pool_lock;
+};
+
+
+static inline struct iio_trigger *to_iio_trigger(struct device *d)
+{
+ return container_of(d, struct iio_trigger, dev);
+}
+
+static inline void iio_trigger_put(struct iio_trigger *trig)
+{
+ module_put(trig->ops->owner);
+ put_device(&trig->dev);
+}
+
+static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig)
+{
+ get_device(&trig->dev);
+ __module_get(trig->ops->owner);
+
+ return trig;
+}
+
+/**
+ * iio_device_set_drvdata() - Set trigger driver data
+ * @trig: IIO trigger structure
+ * @data: Driver specific data
+ *
+ * Allows to attach an arbitrary pointer to an IIO trigger, which can later be
+ * retrieved by iio_trigger_get_drvdata().
+ */
+static inline void iio_trigger_set_drvdata(struct iio_trigger *trig, void *data)
+{
+ dev_set_drvdata(&trig->dev, data);
+}
+
+/**
+ * iio_trigger_get_drvdata() - Get trigger driver data
+ * @trig: IIO trigger structure
+ *
+ * Returns the data previously set with iio_trigger_set_drvdata()
+ */
+static inline void *iio_trigger_get_drvdata(struct iio_trigger *trig)
+{
+ return dev_get_drvdata(&trig->dev);
+}
+
+/**
+ * iio_trigger_register() - register a trigger with the IIO core
+ * @trig_info: trigger to be registered
+ **/
+int iio_trigger_register(struct iio_trigger *trig_info);
+
+/**
+ * iio_trigger_unregister() - unregister a trigger from the core
+ * @trig_info: trigger to be unregistered
+ **/
+void iio_trigger_unregister(struct iio_trigger *trig_info);
+
+/**
+ * iio_trigger_poll() - called on a trigger occurring
+ * @trig: trigger which occurred
+ *
+ * Typically called in relevant hardware interrupt handler.
+ **/
+void iio_trigger_poll(struct iio_trigger *trig);
+void iio_trigger_poll_chained(struct iio_trigger *trig);
+
+irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private);
+
+__printf(1, 2) struct iio_trigger *iio_trigger_alloc(const char *fmt, ...);
+void iio_trigger_free(struct iio_trigger *trig);
+
+#else
+struct iio_trigger;
+struct iio_trigger_ops;
+#endif
+#endif /* _IIO_TRIGGER_H_ */
diff --git a/include/linux/iio/trigger_consumer.h b/include/linux/iio/trigger_consumer.h
new file mode 100644
index 000000000..c4f8c7409
--- /dev/null
+++ b/include/linux/iio/trigger_consumer.h
@@ -0,0 +1,63 @@
+/* The industrial I/O core, trigger consumer functions
+ *
+ * Copyright (c) 2008-2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __LINUX_IIO_TRIGGER_CONSUMER_H__
+#define __LINUX_IIO_TRIGGER_CONSUMER_H__
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+
+struct iio_dev;
+struct iio_trigger;
+
+/**
+ * struct iio_poll_func - poll function pair
+ *
+ * @indio_dev: data specific to device (passed into poll func)
+ * @h: the function that is actually run on trigger
+ * @thread: threaded interrupt part
+ * @type: the type of interrupt (basically if oneshot)
+ * @name: name used to identify the trigger consumer.
+ * @irq: the corresponding irq as allocated from the
+ * trigger pool
+ * @timestamp: some devices need a timestamp grabbed as soon
+ * as possible after the trigger - hence handler
+ * passes it via here.
+ **/
+struct iio_poll_func {
+ struct iio_dev *indio_dev;
+ irqreturn_t (*h)(int irq, void *p);
+ irqreturn_t (*thread)(int irq, void *p);
+ int type;
+ char *name;
+ int irq;
+ s64 timestamp;
+};
+
+
+struct iio_poll_func
+*iio_alloc_pollfunc(irqreturn_t (*h)(int irq, void *p),
+ irqreturn_t (*thread)(int irq, void *p),
+ int type,
+ struct iio_dev *indio_dev,
+ const char *fmt,
+ ...);
+void iio_dealloc_pollfunc(struct iio_poll_func *pf);
+irqreturn_t iio_pollfunc_store_time(int irq, void *p);
+
+void iio_trigger_notify_done(struct iio_trigger *trig);
+
+/*
+ * Two functions for common case where all that happens is a pollfunc
+ * is attached and detached from a trigger
+ */
+int iio_triggered_buffer_postenable(struct iio_dev *indio_dev);
+int iio_triggered_buffer_predisable(struct iio_dev *indio_dev);
+
+#endif
diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h
new file mode 100644
index 000000000..c378ebec6
--- /dev/null
+++ b/include/linux/iio/triggered_buffer.h
@@ -0,0 +1,15 @@
+#ifndef _LINUX_IIO_TRIGGERED_BUFFER_H_
+#define _LINUX_IIO_TRIGGERED_BUFFER_H_
+
+#include <linux/interrupt.h>
+
+struct iio_dev;
+struct iio_buffer_setup_ops;
+
+int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
+ irqreturn_t (*pollfunc_bh)(int irq, void *p),
+ irqreturn_t (*pollfunc_th)(int irq, void *p),
+ const struct iio_buffer_setup_ops *setup_ops);
+void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev);
+
+#endif
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
new file mode 100644
index 000000000..942b6de68
--- /dev/null
+++ b/include/linux/iio/types.h
@@ -0,0 +1,30 @@
+/* industrial I/O data types needed both in and out of kernel
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _IIO_TYPES_H_
+#define _IIO_TYPES_H_
+
+#include <uapi/linux/iio/types.h>
+
+enum iio_event_info {
+ IIO_EV_INFO_ENABLE,
+ IIO_EV_INFO_VALUE,
+ IIO_EV_INFO_HYSTERESIS,
+ IIO_EV_INFO_PERIOD,
+};
+
+#define IIO_VAL_INT 1
+#define IIO_VAL_INT_PLUS_MICRO 2
+#define IIO_VAL_INT_PLUS_NANO 3
+#define IIO_VAL_INT_PLUS_MICRO_DB 4
+#define IIO_VAL_INT_MULTIPLE 5
+#define IIO_VAL_FRACTIONAL 10
+#define IIO_VAL_FRACTIONAL_LOG2 11
+
+#endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/ima.h b/include/linux/ima.h
new file mode 100644
index 000000000..120ccc53f
--- /dev/null
+++ b/include/linux/ima.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2008 IBM Corporation
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#ifndef _LINUX_IMA_H
+#define _LINUX_IMA_H
+
+#include <linux/fs.h>
+struct linux_binprm;
+
+#ifdef CONFIG_IMA
+extern int ima_bprm_check(struct linux_binprm *bprm);
+extern int ima_file_check(struct file *file, int mask, int opened);
+extern void ima_file_free(struct file *file);
+extern int ima_file_mmap(struct file *file, unsigned long prot);
+extern int ima_module_check(struct file *file);
+extern int ima_fw_from_file(struct file *file, char *buf, size_t size);
+
+#else
+static inline int ima_bprm_check(struct linux_binprm *bprm)
+{
+ return 0;
+}
+
+static inline int ima_file_check(struct file *file, int mask, int opened)
+{
+ return 0;
+}
+
+static inline void ima_file_free(struct file *file)
+{
+ return;
+}
+
+static inline int ima_file_mmap(struct file *file, unsigned long prot)
+{
+ return 0;
+}
+
+static inline int ima_module_check(struct file *file)
+{
+ return 0;
+}
+
+static inline int ima_fw_from_file(struct file *file, char *buf, size_t size)
+{
+ return 0;
+}
+
+#endif /* CONFIG_IMA */
+
+#ifdef CONFIG_IMA_APPRAISE
+extern void ima_inode_post_setattr(struct dentry *dentry);
+extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len);
+extern int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name);
+#else
+static inline void ima_inode_post_setattr(struct dentry *dentry)
+{
+ return;
+}
+
+static inline int ima_inode_setxattr(struct dentry *dentry,
+ const char *xattr_name,
+ const void *xattr_value,
+ size_t xattr_value_len)
+{
+ return 0;
+}
+
+static inline int ima_inode_removexattr(struct dentry *dentry,
+ const char *xattr_name)
+{
+ return 0;
+}
+#endif /* CONFIG_IMA_APPRAISE */
+#endif /* _LINUX_IMA_H */
diff --git a/include/linux/in.h b/include/linux/in.h
new file mode 100644
index 000000000..31b493734
--- /dev/null
+++ b/include/linux/in.h
@@ -0,0 +1,104 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions of the Internet Protocol.
+ *
+ * Version: @(#)in.h 1.0.1 04/21/93
+ *
+ * Authors: Original taken from the GNU Project <netinet/in.h> file.
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IN_H
+#define _LINUX_IN_H
+
+
+#include <linux/errno.h>
+#include <uapi/linux/in.h>
+
+static inline int proto_ports_offset(int proto)
+{
+ switch (proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_DCCP:
+ case IPPROTO_ESP: /* SPI */
+ case IPPROTO_SCTP:
+ case IPPROTO_UDPLITE:
+ return 0;
+ case IPPROTO_AH: /* SPI */
+ return 4;
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline bool ipv4_is_loopback(__be32 addr)
+{
+ return (addr & htonl(0xff000000)) == htonl(0x7f000000);
+}
+
+static inline bool ipv4_is_multicast(__be32 addr)
+{
+ return (addr & htonl(0xf0000000)) == htonl(0xe0000000);
+}
+
+static inline bool ipv4_is_local_multicast(__be32 addr)
+{
+ return (addr & htonl(0xffffff00)) == htonl(0xe0000000);
+}
+
+static inline bool ipv4_is_lbcast(__be32 addr)
+{
+ /* limited broadcast */
+ return addr == htonl(INADDR_BROADCAST);
+}
+
+static inline bool ipv4_is_zeronet(__be32 addr)
+{
+ return (addr & htonl(0xff000000)) == htonl(0x00000000);
+}
+
+/* Special-Use IPv4 Addresses (RFC3330) */
+
+static inline bool ipv4_is_private_10(__be32 addr)
+{
+ return (addr & htonl(0xff000000)) == htonl(0x0a000000);
+}
+
+static inline bool ipv4_is_private_172(__be32 addr)
+{
+ return (addr & htonl(0xfff00000)) == htonl(0xac100000);
+}
+
+static inline bool ipv4_is_private_192(__be32 addr)
+{
+ return (addr & htonl(0xffff0000)) == htonl(0xc0a80000);
+}
+
+static inline bool ipv4_is_linklocal_169(__be32 addr)
+{
+ return (addr & htonl(0xffff0000)) == htonl(0xa9fe0000);
+}
+
+static inline bool ipv4_is_anycast_6to4(__be32 addr)
+{
+ return (addr & htonl(0xffffff00)) == htonl(0xc0586300);
+}
+
+static inline bool ipv4_is_test_192(__be32 addr)
+{
+ return (addr & htonl(0xffffff00)) == htonl(0xc0000200);
+}
+
+static inline bool ipv4_is_test_198(__be32 addr)
+{
+ return (addr & htonl(0xfffe0000)) == htonl(0xc6120000);
+}
+#endif /* _LINUX_IN_H */
diff --git a/include/linux/in6.h b/include/linux/in6.h
new file mode 100644
index 000000000..34edf1f6c
--- /dev/null
+++ b/include/linux/in6.h
@@ -0,0 +1,48 @@
+/*
+ * Types and definitions for AF_INET6
+ * Linux INET6 implementation
+ *
+ * Authors:
+ * Pedro Roque <roque@di.fc.ul.pt>
+ *
+ * Sources:
+ * IPv6 Program Interfaces for BSD Systems
+ * <draft-ietf-ipngwg-bsd-api-05.txt>
+ *
+ * Advanced Sockets API for IPv6
+ * <draft-stevens-advanced-api-00.txt>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IN6_H
+#define _LINUX_IN6_H
+
+#include <uapi/linux/in6.h>
+
+/* IPv6 Wildcard Address (::) and Loopback Address (::1) defined in RFC2553
+ * NOTE: Be aware the IN6ADDR_* constants and in6addr_* externals are defined
+ * in network byte order, not in host byte order as are the IPv4 equivalents
+ */
+extern const struct in6_addr in6addr_any;
+#define IN6ADDR_ANY_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 } } }
+extern const struct in6_addr in6addr_loopback;
+#define IN6ADDR_LOOPBACK_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } }
+extern const struct in6_addr in6addr_linklocal_allnodes;
+#define IN6ADDR_LINKLOCAL_ALLNODES_INIT \
+ { { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } }
+extern const struct in6_addr in6addr_linklocal_allrouters;
+#define IN6ADDR_LINKLOCAL_ALLROUTERS_INIT \
+ { { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2 } } }
+extern const struct in6_addr in6addr_interfacelocal_allnodes;
+#define IN6ADDR_INTERFACELOCAL_ALLNODES_INIT \
+ { { { 0xff,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } }
+extern const struct in6_addr in6addr_interfacelocal_allrouters;
+#define IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT \
+ { { { 0xff,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2 } } }
+extern const struct in6_addr in6addr_sitelocal_allrouters;
+#define IN6ADDR_SITELOCAL_ALLROUTERS_INIT \
+ { { { 0xff,5,0,0,0,0,0,0,0,0,0,0,0,0,0,2 } } }
+#endif
diff --git a/include/linux/inet.h b/include/linux/inet.h
new file mode 100644
index 000000000..4cca05c96
--- /dev/null
+++ b/include/linux/inet.h
@@ -0,0 +1,57 @@
+/*
+ * Swansea University Computer Society NET3
+ *
+ * This work is derived from NET2Debugged, which is in turn derived
+ * from NET2D which was written by:
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This work was derived from Ross Biro's inspirational work
+ * for the LINUX operating system. His version numbers were:
+ *
+ * $Id: Space.c,v 0.8.4.5 1992/12/12 19:25:04 bir7 Exp $
+ * $Id: arp.c,v 0.8.4.6 1993/01/28 22:30:00 bir7 Exp $
+ * $Id: arp.h,v 0.8.4.6 1993/01/28 22:30:00 bir7 Exp $
+ * $Id: dev.c,v 0.8.4.13 1993/01/23 18:00:11 bir7 Exp $
+ * $Id: dev.h,v 0.8.4.7 1993/01/23 18:00:11 bir7 Exp $
+ * $Id: eth.c,v 0.8.4.4 1993/01/22 23:21:38 bir7 Exp $
+ * $Id: eth.h,v 0.8.4.1 1992/11/10 00:17:18 bir7 Exp $
+ * $Id: icmp.c,v 0.8.4.9 1993/01/23 18:00:11 bir7 Exp $
+ * $Id: icmp.h,v 0.8.4.2 1992/11/15 14:55:30 bir7 Exp $
+ * $Id: ip.c,v 0.8.4.8 1992/12/12 19:25:04 bir7 Exp $
+ * $Id: ip.h,v 0.8.4.2 1993/01/23 18:00:11 bir7 Exp $
+ * $Id: loopback.c,v 0.8.4.8 1993/01/23 18:00:11 bir7 Exp $
+ * $Id: packet.c,v 0.8.4.7 1993/01/26 22:04:00 bir7 Exp $
+ * $Id: protocols.c,v 0.8.4.3 1992/11/15 14:55:30 bir7 Exp $
+ * $Id: raw.c,v 0.8.4.12 1993/01/26 22:04:00 bir7 Exp $
+ * $Id: sock.c,v 0.8.4.6 1993/01/28 22:30:00 bir7 Exp $
+ * $Id: sock.h,v 0.8.4.7 1993/01/26 22:04:00 bir7 Exp $
+ * $Id: tcp.c,v 0.8.4.16 1993/01/26 22:04:00 bir7 Exp $
+ * $Id: tcp.h,v 0.8.4.7 1993/01/22 22:58:08 bir7 Exp $
+ * $Id: timer.c,v 0.8.4.8 1993/01/23 18:00:11 bir7 Exp $
+ * $Id: timer.h,v 0.8.4.2 1993/01/23 18:00:11 bir7 Exp $
+ * $Id: udp.c,v 0.8.4.12 1993/01/26 22:04:00 bir7 Exp $
+ * $Id: udp.h,v 0.8.4.1 1992/11/10 00:17:18 bir7 Exp $
+ * $Id: we.c,v 0.8.4.10 1993/01/23 18:00:11 bir7 Exp $
+ * $Id: wereg.h,v 0.8.4.1 1992/11/10 00:17:18 bir7 Exp $
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_INET_H
+#define _LINUX_INET_H
+
+#include <linux/types.h>
+
+/*
+ * These mimic similar macros defined in user-space for inet_ntop(3).
+ * See /usr/include/netinet/in.h .
+ */
+#define INET_ADDRSTRLEN (16)
+#define INET6_ADDRSTRLEN (48)
+
+extern __be32 in_aton(const char *str);
+extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
+extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
+#endif /* _LINUX_INET_H */
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
new file mode 100644
index 000000000..ac48b10c9
--- /dev/null
+++ b/include/linux/inet_diag.h
@@ -0,0 +1,47 @@
+#ifndef _INET_DIAG_H_
+#define _INET_DIAG_H_ 1
+
+#include <uapi/linux/inet_diag.h>
+
+struct sock;
+struct inet_hashinfo;
+struct nlattr;
+struct nlmsghdr;
+struct sk_buff;
+struct netlink_callback;
+
+struct inet_diag_handler {
+ void (*dump)(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *r,
+ struct nlattr *bc);
+
+ int (*dump_one)(struct sk_buff *in_skb,
+ const struct nlmsghdr *nlh,
+ const struct inet_diag_req_v2 *req);
+
+ void (*idiag_get_info)(struct sock *sk,
+ struct inet_diag_msg *r,
+ void *info);
+ __u16 idiag_type;
+};
+
+struct inet_connection_sock;
+int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
+ struct sk_buff *skb, const struct inet_diag_req_v2 *req,
+ struct user_namespace *user_ns,
+ u32 pid, u32 seq, u16 nlmsg_flags,
+ const struct nlmsghdr *unlh);
+void inet_diag_dump_icsk(struct inet_hashinfo *h, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ const struct inet_diag_req_v2 *r,
+ struct nlattr *bc);
+int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
+ struct sk_buff *in_skb, const struct nlmsghdr *nlh,
+ const struct inet_diag_req_v2 *req);
+
+int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
+
+extern int inet_diag_register(const struct inet_diag_handler *handler);
+extern void inet_diag_unregister(const struct inet_diag_handler *handler);
+#endif /* _INET_DIAG_H_ */
diff --git a/include/linux/inet_lro.h b/include/linux/inet_lro.h
new file mode 100644
index 000000000..9a715cfa1
--- /dev/null
+++ b/include/linux/inet_lro.h
@@ -0,0 +1,142 @@
+/*
+ * linux/include/linux/inet_lro.h
+ *
+ * Large Receive Offload (ipv4 / tcp)
+ *
+ * (C) Copyright IBM Corp. 2007
+ *
+ * Authors:
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Christoph Raisch <raisch@de.ibm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __INET_LRO_H_
+#define __INET_LRO_H_
+
+#include <net/ip.h>
+#include <net/tcp.h>
+
+/*
+ * LRO statistics
+ */
+
+struct net_lro_stats {
+ unsigned long aggregated;
+ unsigned long flushed;
+ unsigned long no_desc;
+};
+
+/*
+ * LRO descriptor for a tcp session
+ */
+struct net_lro_desc {
+ struct sk_buff *parent;
+ struct sk_buff *last_skb;
+ struct skb_frag_struct *next_frag;
+ struct iphdr *iph;
+ struct tcphdr *tcph;
+ __wsum data_csum;
+ __be32 tcp_rcv_tsecr;
+ __be32 tcp_rcv_tsval;
+ __be32 tcp_ack;
+ u32 tcp_next_seq;
+ u32 skb_tot_frags_len;
+ u16 ip_tot_len;
+ u16 tcp_saw_tstamp; /* timestamps enabled */
+ __be16 tcp_window;
+ int pkt_aggr_cnt; /* counts aggregated packets */
+ int vlan_packet;
+ int mss;
+ int active;
+};
+
+/*
+ * Large Receive Offload (LRO) Manager
+ *
+ * Fields must be set by driver
+ */
+
+struct net_lro_mgr {
+ struct net_device *dev;
+ struct net_lro_stats stats;
+
+ /* LRO features */
+ unsigned long features;
+#define LRO_F_NAPI 1 /* Pass packets to stack via NAPI */
+#define LRO_F_EXTRACT_VLAN_ID 2 /* Set flag if VLAN IDs are extracted
+ from received packets and eth protocol
+ is still ETH_P_8021Q */
+
+ /*
+ * Set for generated SKBs that are not added to
+ * the frag list in fragmented mode
+ */
+ u32 ip_summed;
+ u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY
+ * or CHECKSUM_NONE */
+
+ int max_desc; /* Max number of LRO descriptors */
+ int max_aggr; /* Max number of LRO packets to be aggregated */
+
+ int frag_align_pad; /* Padding required to properly align layer 3
+ * headers in generated skb when using frags */
+
+ struct net_lro_desc *lro_arr; /* Array of LRO descriptors */
+
+ /*
+ * Optimized driver functions
+ *
+ * get_skb_header: returns tcp and ip header for packet in SKB
+ */
+ int (*get_skb_header)(struct sk_buff *skb, void **ip_hdr,
+ void **tcpudp_hdr, u64 *hdr_flags, void *priv);
+
+ /* hdr_flags: */
+#define LRO_IPV4 1 /* ip_hdr is IPv4 header */
+#define LRO_TCP 2 /* tcpudp_hdr is TCP header */
+
+ /*
+ * get_frag_header: returns mac, tcp and ip header for packet in SKB
+ *
+ * @hdr_flags: Indicate what kind of LRO has to be done
+ * (IPv4/IPv6/TCP/UDP)
+ */
+ int (*get_frag_header)(struct skb_frag_struct *frag, void **mac_hdr,
+ void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
+ void *priv);
+};
+
+/*
+ * Processes a SKB
+ *
+ * @lro_mgr: LRO manager to use
+ * @skb: SKB to aggregate
+ * @priv: Private data that may be used by driver functions
+ * (for example get_tcp_ip_hdr)
+ */
+
+void lro_receive_skb(struct net_lro_mgr *lro_mgr,
+ struct sk_buff *skb,
+ void *priv);
+/*
+ * Forward all aggregated SKBs held by lro_mgr to network stack
+ */
+
+void lro_flush_all(struct net_lro_mgr *lro_mgr);
+
+#endif
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
new file mode 100644
index 000000000..0a21fbefd
--- /dev/null
+++ b/include/linux/inetdevice.h
@@ -0,0 +1,258 @@
+#ifndef _LINUX_INETDEVICE_H
+#define _LINUX_INETDEVICE_H
+
+#ifdef __KERNEL__
+
+#include <linux/bitmap.h>
+#include <linux/if.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <linux/rcupdate.h>
+#include <linux/timer.h>
+#include <linux/sysctl.h>
+#include <linux/rtnetlink.h>
+
+struct ipv4_devconf {
+ void *sysctl;
+ int data[IPV4_DEVCONF_MAX];
+ DECLARE_BITMAP(state, IPV4_DEVCONF_MAX);
+};
+
+#define MC_HASH_SZ_LOG 9
+
+struct in_device {
+ struct net_device *dev;
+ atomic_t refcnt;
+ int dead;
+ struct in_ifaddr *ifa_list; /* IP ifaddr chain */
+
+ struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */
+ struct ip_mc_list __rcu * __rcu *mc_hash;
+
+ int mc_count; /* Number of installed mcasts */
+ spinlock_t mc_tomb_lock;
+ struct ip_mc_list *mc_tomb;
+ unsigned long mr_v1_seen;
+ unsigned long mr_v2_seen;
+ unsigned long mr_maxdelay;
+ unsigned char mr_qrv;
+ unsigned char mr_gq_running;
+ unsigned char mr_ifc_count;
+ struct timer_list mr_gq_timer; /* general query timer */
+ struct timer_list mr_ifc_timer; /* interface change timer */
+
+ struct neigh_parms *arp_parms;
+ struct ipv4_devconf cnf;
+ struct rcu_head rcu_head;
+};
+
+#define IPV4_DEVCONF(cnf, attr) ((cnf).data[IPV4_DEVCONF_ ## attr - 1])
+#define IPV4_DEVCONF_ALL(net, attr) \
+ IPV4_DEVCONF((*(net)->ipv4.devconf_all), attr)
+
+static inline int ipv4_devconf_get(struct in_device *in_dev, int index)
+{
+ index--;
+ return in_dev->cnf.data[index];
+}
+
+static inline void ipv4_devconf_set(struct in_device *in_dev, int index,
+ int val)
+{
+ index--;
+ set_bit(index, in_dev->cnf.state);
+ in_dev->cnf.data[index] = val;
+}
+
+static inline void ipv4_devconf_setall(struct in_device *in_dev)
+{
+ bitmap_fill(in_dev->cnf.state, IPV4_DEVCONF_MAX);
+}
+
+#define IN_DEV_CONF_GET(in_dev, attr) \
+ ipv4_devconf_get((in_dev), IPV4_DEVCONF_ ## attr)
+#define IN_DEV_CONF_SET(in_dev, attr, val) \
+ ipv4_devconf_set((in_dev), IPV4_DEVCONF_ ## attr, (val))
+
+#define IN_DEV_ANDCONF(in_dev, attr) \
+ (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \
+ IN_DEV_CONF_GET((in_dev), attr))
+
+#define IN_DEV_NET_ORCONF(in_dev, net, attr) \
+ (IPV4_DEVCONF_ALL(net, attr) || \
+ IN_DEV_CONF_GET((in_dev), attr))
+
+#define IN_DEV_ORCONF(in_dev, attr) \
+ IN_DEV_NET_ORCONF(in_dev, dev_net(in_dev->dev), attr)
+
+#define IN_DEV_MAXCONF(in_dev, attr) \
+ (max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \
+ IN_DEV_CONF_GET((in_dev), attr)))
+
+#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
+#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
+#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
+#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK)
+#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
+ ACCEPT_SOURCE_ROUTE)
+#define IN_DEV_ACCEPT_LOCAL(in_dev) IN_DEV_ORCONF((in_dev), ACCEPT_LOCAL)
+#define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY)
+
+#define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS)
+#define IN_DEV_PROXY_ARP(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP)
+#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_CONF_GET(in_dev, PROXY_ARP_PVLAN)
+#define IN_DEV_SHARED_MEDIA(in_dev) IN_DEV_ORCONF((in_dev), SHARED_MEDIA)
+#define IN_DEV_TX_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), SEND_REDIRECTS)
+#define IN_DEV_SEC_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), \
+ SECURE_REDIRECTS)
+#define IN_DEV_IDTAG(in_dev) IN_DEV_CONF_GET(in_dev, TAG)
+#define IN_DEV_MEDIUM_ID(in_dev) IN_DEV_CONF_GET(in_dev, MEDIUM_ID)
+#define IN_DEV_PROMOTE_SECONDARIES(in_dev) \
+ IN_DEV_ORCONF((in_dev), \
+ PROMOTE_SECONDARIES)
+#define IN_DEV_ROUTE_LOCALNET(in_dev) IN_DEV_ORCONF(in_dev, ROUTE_LOCALNET)
+#define IN_DEV_NET_ROUTE_LOCALNET(in_dev, net) \
+ IN_DEV_NET_ORCONF(in_dev, net, ROUTE_LOCALNET)
+
+#define IN_DEV_RX_REDIRECTS(in_dev) \
+ ((IN_DEV_FORWARD(in_dev) && \
+ IN_DEV_ANDCONF((in_dev), ACCEPT_REDIRECTS)) \
+ || (!IN_DEV_FORWARD(in_dev) && \
+ IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS)))
+
+#define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER)
+#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT)
+#define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
+#define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE)
+#define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
+
+struct in_ifaddr {
+ struct hlist_node hash;
+ struct in_ifaddr *ifa_next;
+ struct in_device *ifa_dev;
+ struct rcu_head rcu_head;
+ __be32 ifa_local;
+ __be32 ifa_address;
+ __be32 ifa_mask;
+ __be32 ifa_broadcast;
+ unsigned char ifa_scope;
+ unsigned char ifa_prefixlen;
+ __u32 ifa_flags;
+ char ifa_label[IFNAMSIZ];
+
+ /* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */
+ __u32 ifa_valid_lft;
+ __u32 ifa_preferred_lft;
+ unsigned long ifa_cstamp; /* created timestamp */
+ unsigned long ifa_tstamp; /* updated timestamp */
+};
+
+int register_inetaddr_notifier(struct notifier_block *nb);
+int unregister_inetaddr_notifier(struct notifier_block *nb);
+
+void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
+ struct ipv4_devconf *devconf);
+
+struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref);
+static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
+{
+ return __ip_dev_find(net, addr, true);
+}
+
+int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
+int devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
+void devinet_init(void);
+struct in_device *inetdev_by_index(struct net *, int);
+__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
+__be32 inet_confirm_addr(struct net *net, struct in_device *in_dev, __be32 dst,
+ __be32 local, int scope);
+struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
+ __be32 mask);
+static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
+{
+ return !((addr^ifa->ifa_address)&ifa->ifa_mask);
+}
+
+/*
+ * Check if a mask is acceptable.
+ */
+
+static __inline__ int bad_mask(__be32 mask, __be32 addr)
+{
+ __u32 hmask;
+ if (addr & (mask = ~mask))
+ return 1;
+ hmask = ntohl(mask);
+ if (hmask & (hmask+1))
+ return 1;
+ return 0;
+}
+
+#define for_primary_ifa(in_dev) { struct in_ifaddr *ifa; \
+ for (ifa = (in_dev)->ifa_list; ifa && !(ifa->ifa_flags&IFA_F_SECONDARY); ifa = ifa->ifa_next)
+
+#define for_ifa(in_dev) { struct in_ifaddr *ifa; \
+ for (ifa = (in_dev)->ifa_list; ifa; ifa = ifa->ifa_next)
+
+
+#define endfor_ifa(in_dev) }
+
+static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev)
+{
+ return rcu_dereference(dev->ip_ptr);
+}
+
+static inline struct in_device *in_dev_get(const struct net_device *dev)
+{
+ struct in_device *in_dev;
+
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(dev);
+ if (in_dev)
+ atomic_inc(&in_dev->refcnt);
+ rcu_read_unlock();
+ return in_dev;
+}
+
+static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
+{
+ return rtnl_dereference(dev->ip_ptr);
+}
+
+static inline struct neigh_parms *__in_dev_arp_parms_get_rcu(const struct net_device *dev)
+{
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
+
+ return in_dev ? in_dev->arp_parms : NULL;
+}
+
+void in_dev_finish_destroy(struct in_device *idev);
+
+static inline void in_dev_put(struct in_device *idev)
+{
+ if (atomic_dec_and_test(&idev->refcnt))
+ in_dev_finish_destroy(idev);
+}
+
+#define __in_dev_put(idev) atomic_dec(&(idev)->refcnt)
+#define in_dev_hold(idev) atomic_inc(&(idev)->refcnt)
+
+#endif /* __KERNEL__ */
+
+static __inline__ __be32 inet_make_mask(int logmask)
+{
+ if (logmask)
+ return htonl(~((1U<<(32-logmask))-1));
+ return 0;
+}
+
+static __inline__ int inet_mask_len(__be32 mask)
+{
+ __u32 hmask = ntohl(mask);
+ if (!hmask)
+ return 0;
+ return 32 - ffz(~hmask);
+}
+
+
+#endif /* _LINUX_INETDEVICE_H */
diff --git a/include/linux/init.h b/include/linux/init.h
new file mode 100644
index 000000000..21b6d768e
--- /dev/null
+++ b/include/linux/init.h
@@ -0,0 +1,389 @@
+#ifndef _LINUX_INIT_H
+#define _LINUX_INIT_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+/* These macros are used to mark some functions or
+ * initialized data (doesn't apply to uninitialized data)
+ * as `initialization' functions. The kernel can take this
+ * as hint that the function is used only during the initialization
+ * phase and free up used memory resources after
+ *
+ * Usage:
+ * For functions:
+ *
+ * You should add __init immediately before the function name, like:
+ *
+ * static void __init initme(int x, int y)
+ * {
+ * extern int z; z = x * y;
+ * }
+ *
+ * If the function has a prototype somewhere, you can also add
+ * __init between closing brace of the prototype and semicolon:
+ *
+ * extern int initialize_foobar_device(int, int, int) __init;
+ *
+ * For initialized data:
+ * You should insert __initdata or __initconst between the variable name
+ * and equal sign followed by value, e.g.:
+ *
+ * static int init_variable __initdata = 0;
+ * static const char linux_logo[] __initconst = { 0x32, 0x36, ... };
+ *
+ * Don't forget to initialize data not at file scope, i.e. within a function,
+ * as gcc otherwise puts the data into the bss section and not into the init
+ * section.
+ */
+
+/* These are for everybody (although not all archs will actually
+ discard it in modules) */
+#define __init __section(.init.text) __cold notrace
+#define __initdata __section(.init.data)
+#define __initconst __constsection(.init.rodata)
+#define __exitdata __section(.exit.data)
+#define __exit_call __used __section(.exitcall.exit)
+
+/*
+ * Some architecture have tool chains which do not handle rodata attributes
+ * correctly. For those disable special sections for const, so that other
+ * architectures can annotate correctly.
+ */
+#ifdef CONFIG_BROKEN_RODATA
+#define __constsection(x)
+#else
+#define __constsection(x) __section(x)
+#endif
+
+/*
+ * modpost check for section mismatches during the kernel build.
+ * A section mismatch happens when there are references from a
+ * code or data section to an init section (both code or data).
+ * The init sections are (for most archs) discarded by the kernel
+ * when early init has completed so all such references are potential bugs.
+ * For exit sections the same issue exists.
+ *
+ * The following markers are used for the cases where the reference to
+ * the *init / *exit section (code or data) is valid and will teach
+ * modpost not to issue a warning. Intended semantics is that a code or
+ * data tagged __ref* can reference code or data from init section without
+ * producing a warning (of course, no warning does not mean code is
+ * correct, so optimally document why the __ref is needed and why it's OK).
+ *
+ * The markers follow same syntax rules as __init / __initdata.
+ */
+#define __ref __section(.ref.text) noinline
+#define __refdata __section(.ref.data)
+#define __refconst __constsection(.ref.rodata)
+
+/* compatibility defines */
+#define __init_refok __ref
+#define __initdata_refok __refdata
+#define __exit_refok __ref
+
+
+#ifdef MODULE
+#define __exitused
+#else
+#define __exitused __used
+#endif
+
+#define __exit __section(.exit.text) __exitused __cold notrace
+
+/* temporary, until all users are removed */
+#define __cpuinit
+#define __cpuinitdata
+#define __cpuinitconst
+#define __cpuexit
+#define __cpuexitdata
+#define __cpuexitconst
+
+/* Used for MEMORY_HOTPLUG */
+#define __meminit __section(.meminit.text) __cold notrace
+#define __meminitdata __section(.meminit.data)
+#define __meminitconst __constsection(.meminit.rodata)
+#define __memexit __section(.memexit.text) __exitused __cold notrace
+#define __memexitdata __section(.memexit.data)
+#define __memexitconst __constsection(.memexit.rodata)
+
+/* For assembly routines */
+#define __HEAD .section ".head.text","ax"
+#define __INIT .section ".init.text","ax"
+#define __FINIT .previous
+
+#define __INITDATA .section ".init.data","aw",%progbits
+#define __INITRODATA .section ".init.rodata","a",%progbits
+#define __FINITDATA .previous
+
+/* temporary, until all users are removed */
+#define __CPUINIT
+
+#define __MEMINIT .section ".meminit.text", "ax"
+#define __MEMINITDATA .section ".meminit.data", "aw"
+#define __MEMINITRODATA .section ".meminit.rodata", "a"
+
+/* silence warnings when references are OK */
+#define __REF .section ".ref.text", "ax"
+#define __REFDATA .section ".ref.data", "aw"
+#define __REFCONST .section ".ref.rodata", "a"
+
+#ifndef __ASSEMBLY__
+/*
+ * Used for initialization calls..
+ */
+typedef int (*initcall_t)(void);
+typedef void (*exitcall_t)(void);
+
+extern initcall_t __con_initcall_start[], __con_initcall_end[];
+extern initcall_t __security_initcall_start[], __security_initcall_end[];
+
+/* Used for contructor calls. */
+typedef void (*ctor_fn_t)(void);
+
+/* Defined in init/main.c */
+extern int do_one_initcall(initcall_t fn);
+extern char __initdata boot_command_line[];
+extern char *saved_command_line;
+extern unsigned int reset_devices;
+
+/* used by init/main.c */
+void setup_arch(char **);
+void prepare_namespace(void);
+void __init load_default_modules(void);
+int __init init_rootfs(void);
+
+extern void (*late_time_init)(void);
+
+extern bool initcall_debug;
+
+#endif
+
+#ifndef MODULE
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_LTO
+/* Work around a LTO gcc problem: when there is no reference to a variable
+ * in a module it will be moved to the end of the program. This causes
+ * reordering of initcalls which the kernel does not like.
+ * Add a dummy reference function to avoid this. The function is
+ * deleted by the linker.
+ */
+#define LTO_REFERENCE_INITCALL(x) \
+ ; /* yes this is needed */ \
+ static __used __exit void *reference_##x(void) \
+ { \
+ return &x; \
+ }
+#else
+#define LTO_REFERENCE_INITCALL(x)
+#endif
+
+/* initcalls are now grouped by functionality into separate
+ * subsections. Ordering inside the subsections is determined
+ * by link order.
+ * For backwards compatibility, initcall() puts the call in
+ * the device init subsection.
+ *
+ * The `id' arg to __define_initcall() is needed so that multiple initcalls
+ * can point at the same handler without causing duplicate-symbol build errors.
+ */
+
+#define __define_initcall(fn, id) \
+ static initcall_t __initcall_##fn##id __used \
+ __attribute__((__section__(".initcall" #id ".init"))) = fn; \
+ LTO_REFERENCE_INITCALL(__initcall_##fn##id)
+
+/*
+ * Early initcalls run before initializing SMP.
+ *
+ * Only for built-in code, not modules.
+ */
+#define early_initcall(fn) __define_initcall(fn, early)
+
+/*
+ * A "pure" initcall has no dependencies on anything else, and purely
+ * initializes variables that couldn't be statically initialized.
+ *
+ * This only exists for built-in code, not for modules.
+ * Keep main.c:initcall_level_names[] in sync.
+ */
+#define pure_initcall(fn) __define_initcall(fn, 0)
+
+#define core_initcall(fn) __define_initcall(fn, 1)
+#define core_initcall_sync(fn) __define_initcall(fn, 1s)
+#define postcore_initcall(fn) __define_initcall(fn, 2)
+#define postcore_initcall_sync(fn) __define_initcall(fn, 2s)
+#define arch_initcall(fn) __define_initcall(fn, 3)
+#define arch_initcall_sync(fn) __define_initcall(fn, 3s)
+#define subsys_initcall(fn) __define_initcall(fn, 4)
+#define subsys_initcall_sync(fn) __define_initcall(fn, 4s)
+#define fs_initcall(fn) __define_initcall(fn, 5)
+#define fs_initcall_sync(fn) __define_initcall(fn, 5s)
+#define rootfs_initcall(fn) __define_initcall(fn, rootfs)
+#define device_initcall(fn) __define_initcall(fn, 6)
+#define device_initcall_sync(fn) __define_initcall(fn, 6s)
+#define late_initcall(fn) __define_initcall(fn, 7)
+#define late_initcall_sync(fn) __define_initcall(fn, 7s)
+
+#define __initcall(fn) device_initcall(fn)
+
+#define __exitcall(fn) \
+ static exitcall_t __exitcall_##fn __exit_call = fn
+
+#define console_initcall(fn) \
+ static initcall_t __initcall_##fn \
+ __used __section(.con_initcall.init) = fn
+
+#define security_initcall(fn) \
+ static initcall_t __initcall_##fn \
+ __used __section(.security_initcall.init) = fn
+
+struct obs_kernel_param {
+ const char *str;
+ int (*setup_func)(char *);
+ int early;
+};
+
+/*
+ * Only for really core code. See moduleparam.h for the normal way.
+ *
+ * Force the alignment so the compiler doesn't space elements of the
+ * obs_kernel_param "array" too far apart in .init.setup.
+ */
+#define __setup_param(str, unique_id, fn, early) \
+ static const char __setup_str_##unique_id[] __initconst \
+ __aligned(1) = str; \
+ static struct obs_kernel_param __setup_##unique_id \
+ __used __section(.init.setup) \
+ __attribute__((aligned((sizeof(long))))) \
+ = { __setup_str_##unique_id, fn, early }
+
+#define __setup(str, fn) \
+ __setup_param(str, fn, fn, 0)
+
+/*
+ * NOTE: fn is as per module_param, not __setup!
+ * Emits warning if fn returns non-zero.
+ */
+#define early_param(str, fn) \
+ __setup_param(str, fn, fn, 1)
+
+#define early_param_on_off(str_on, str_off, var, config) \
+ \
+ int var = IS_ENABLED(config); \
+ \
+ static int __init parse_##var##_on(char *arg) \
+ { \
+ var = 1; \
+ return 0; \
+ } \
+ __setup_param(str_on, parse_##var##_on, parse_##var##_on, 1); \
+ \
+ static int __init parse_##var##_off(char *arg) \
+ { \
+ var = 0; \
+ return 0; \
+ } \
+ __setup_param(str_off, parse_##var##_off, parse_##var##_off, 1)
+
+/* Relies on boot_command_line being set */
+void __init parse_early_param(void);
+void __init parse_early_options(char *cmdline);
+#endif /* __ASSEMBLY__ */
+
+/**
+ * module_init() - driver initialization entry point
+ * @x: function to be run at kernel boot time or module insertion
+ *
+ * module_init() will either be called during do_initcalls() (if
+ * builtin) or at module insertion time (if a module). There can only
+ * be one per module.
+ */
+#define module_init(x) __initcall(x);
+
+/**
+ * module_exit() - driver exit entry point
+ * @x: function to be run when driver is removed
+ *
+ * module_exit() will wrap the driver clean-up code
+ * with cleanup_module() when used with rmmod when
+ * the driver is a module. If the driver is statically
+ * compiled into the kernel, module_exit() has no effect.
+ * There can only be one per module.
+ */
+#define module_exit(x) __exitcall(x);
+
+#else /* MODULE */
+
+/*
+ * In most cases loadable modules do not need custom
+ * initcall levels. There are still some valid cases where
+ * a driver may be needed early if built in, and does not
+ * matter when built as a loadable module. Like bus
+ * snooping debug drivers.
+ */
+#define early_initcall(fn) module_init(fn)
+#define core_initcall(fn) module_init(fn)
+#define core_initcall_sync(fn) module_init(fn)
+#define postcore_initcall(fn) module_init(fn)
+#define postcore_initcall_sync(fn) module_init(fn)
+#define arch_initcall(fn) module_init(fn)
+#define subsys_initcall(fn) module_init(fn)
+#define subsys_initcall_sync(fn) module_init(fn)
+#define fs_initcall(fn) module_init(fn)
+#define fs_initcall_sync(fn) module_init(fn)
+#define rootfs_initcall(fn) module_init(fn)
+#define device_initcall(fn) module_init(fn)
+#define device_initcall_sync(fn) module_init(fn)
+#define late_initcall(fn) module_init(fn)
+#define late_initcall_sync(fn) module_init(fn)
+
+#define console_initcall(fn) module_init(fn)
+#define security_initcall(fn) module_init(fn)
+
+/* Each module must use one module_init(). */
+#define module_init(initfn) \
+ static inline initcall_t __inittest(void) \
+ { return initfn; } \
+ int init_module(void) __attribute__((alias(#initfn)));
+
+/* This is only required if you want to be unloadable. */
+#define module_exit(exitfn) \
+ static inline exitcall_t __exittest(void) \
+ { return exitfn; } \
+ void cleanup_module(void) __attribute__((alias(#exitfn)));
+
+#define __setup_param(str, unique_id, fn) /* nothing */
+#define __setup(str, func) /* nothing */
+#endif
+
+/* Data marked not to be saved by software suspend */
+#define __nosavedata __section(.data..nosave)
+
+/* This means "can be init if no module support, otherwise module load
+ may call it." */
+#ifdef CONFIG_MODULES
+#define __init_or_module
+#define __initdata_or_module
+#define __initconst_or_module
+#define __INIT_OR_MODULE .text
+#define __INITDATA_OR_MODULE .data
+#define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits
+#else
+#define __init_or_module __init
+#define __initdata_or_module __initdata
+#define __initconst_or_module __initconst
+#define __INIT_OR_MODULE __INIT
+#define __INITDATA_OR_MODULE __INITDATA
+#define __INITRODATA_OR_MODULE __INITRODATA
+#endif /*CONFIG_MODULES*/
+
+#ifdef MODULE
+#define __exit_p(x) x
+#else
+#define __exit_p(x) NULL
+#endif
+
+#endif /* _LINUX_INIT_H */
diff --git a/include/linux/init_ohci1394_dma.h b/include/linux/init_ohci1394_dma.h
new file mode 100644
index 000000000..3c03a4bba
--- /dev/null
+++ b/include/linux/init_ohci1394_dma.h
@@ -0,0 +1,4 @@
+#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
+extern int __initdata init_ohci1394_dma_early;
+extern void __init init_ohci1394_dma_on_all_controllers(void);
+#endif
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
new file mode 100644
index 000000000..5c0e90dd6
--- /dev/null
+++ b/include/linux/init_task.h
@@ -0,0 +1,339 @@
+#ifndef _LINUX__INIT_TASK_H
+#define _LINUX__INIT_TASK_H
+
+#include <linux/rcupdate.h>
+#include <linux/irqflags.h>
+#include <linux/utsname.h>
+#include <linux/lockdep.h>
+#include <linux/ftrace.h>
+#include <linux/ipc.h>
+#include <linux/pid_namespace.h>
+#include <linux/user_namespace.h>
+#include <linux/securebits.h>
+#include <linux/seqlock.h>
+#include <linux/rbtree.h>
+#include <net/net_namespace.h>
+#include <linux/sched/rt.h>
+
+#ifdef CONFIG_SMP
+# define INIT_PUSHABLE_TASKS(tsk) \
+ .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO),
+#else
+# define INIT_PUSHABLE_TASKS(tsk)
+#endif
+
+extern struct files_struct init_files;
+extern struct fs_struct init_fs;
+
+#ifdef CONFIG_CGROUPS
+#define INIT_GROUP_RWSEM(sig) \
+ .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem),
+#else
+#define INIT_GROUP_RWSEM(sig)
+#endif
+
+#ifdef CONFIG_CPUSETS
+#define INIT_CPUSET_SEQ(tsk) \
+ .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
+#else
+#define INIT_CPUSET_SEQ(tsk)
+#endif
+
+#define INIT_SIGNALS(sig) { \
+ .nr_threads = 1, \
+ .thread_head = LIST_HEAD_INIT(init_task.thread_node), \
+ .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
+ .shared_pending = { \
+ .list = LIST_HEAD_INIT(sig.shared_pending.list), \
+ .signal = {{0}}}, \
+ .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \
+ .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \
+ .rlim = INIT_RLIMITS, \
+ .cputimer = { \
+ .cputime = INIT_CPUTIME, \
+ .running = 0, \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
+ }, \
+ .cred_guard_mutex = \
+ __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
+ INIT_GROUP_RWSEM(sig) \
+}
+
+extern struct nsproxy init_nsproxy;
+
+#define INIT_SIGHAND(sighand) { \
+ .count = ATOMIC_INIT(1), \
+ .action = { { { .sa_handler = SIG_DFL, } }, }, \
+ .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
+ .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \
+}
+
+extern struct group_info init_groups;
+
+#define INIT_STRUCT_PID { \
+ .count = ATOMIC_INIT(1), \
+ .tasks = { \
+ { .first = NULL }, \
+ { .first = NULL }, \
+ { .first = NULL }, \
+ }, \
+ .level = 0, \
+ .numbers = { { \
+ .nr = 0, \
+ .ns = &init_pid_ns, \
+ .pid_chain = { .next = NULL, .pprev = NULL }, \
+ }, } \
+}
+
+#define INIT_PID_LINK(type) \
+{ \
+ .node = { \
+ .next = NULL, \
+ .pprev = NULL, \
+ }, \
+ .pid = &init_struct_pid, \
+}
+
+#ifdef CONFIG_AUDITSYSCALL
+#define INIT_IDS \
+ .loginuid = INVALID_UID, \
+ .sessionid = (unsigned int)-1,
+#else
+#define INIT_IDS
+#endif
+
+#ifdef CONFIG_PREEMPT_RCU
+#define INIT_TASK_RCU_TREE_PREEMPT() \
+ .rcu_blocked_node = NULL,
+#else
+#define INIT_TASK_RCU_TREE_PREEMPT(tsk)
+#endif
+#ifdef CONFIG_PREEMPT_RCU
+#define INIT_TASK_RCU_PREEMPT(tsk) \
+ .rcu_read_lock_nesting = 0, \
+ .rcu_read_unlock_special.s = 0, \
+ .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
+ INIT_TASK_RCU_TREE_PREEMPT()
+#else
+#define INIT_TASK_RCU_PREEMPT(tsk)
+#endif
+#ifdef CONFIG_TASKS_RCU
+#define INIT_TASK_RCU_TASKS(tsk) \
+ .rcu_tasks_holdout = false, \
+ .rcu_tasks_holdout_list = \
+ LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), \
+ .rcu_tasks_idle_cpu = -1,
+#else
+#define INIT_TASK_RCU_TASKS(tsk)
+#endif
+
+extern struct cred init_cred;
+
+extern struct task_group root_task_group;
+
+#ifdef CONFIG_CGROUP_SCHED
+# define INIT_CGROUP_SCHED(tsk) \
+ .sched_task_group = &root_task_group,
+#else
+# define INIT_CGROUP_SCHED(tsk)
+#endif
+
+#ifdef CONFIG_PERF_EVENTS
+# define INIT_PERF_EVENTS(tsk) \
+ .perf_event_mutex = \
+ __MUTEX_INITIALIZER(tsk.perf_event_mutex), \
+ .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
+#else
+# define INIT_PERF_EVENTS(tsk)
+#endif
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+# define INIT_VTIME(tsk) \
+ .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \
+ .vtime_snap = 0, \
+ .vtime_snap_whence = VTIME_SYS,
+#else
+# define INIT_VTIME(tsk)
+#endif
+
+#ifdef CONFIG_RT_MUTEXES
+# define INIT_RT_MUTEXES(tsk) \
+ .pi_waiters = RB_ROOT, \
+ .pi_waiters_leftmost = NULL,
+#else
+# define INIT_RT_MUTEXES(tsk)
+#endif
+
+#ifdef CONFIG_NUMA_BALANCING
+# define INIT_NUMA_BALANCING(tsk) \
+ .numa_preferred_nid = -1, \
+ .numa_group = NULL, \
+ .numa_faults = NULL,
+#else
+# define INIT_NUMA_BALANCING(tsk)
+#endif
+
+#ifdef CONFIG_KASAN
+# define INIT_KASAN(tsk) \
+ .kasan_depth = 1,
+#else
+# define INIT_KASAN(tsk)
+#endif
+
+/*
+ * INIT_TASK is used to set up the first task table, touch at
+ * your own risk!. Base=0, limit=0x1fffff (=2MB)
+ */
+#ifdef CONFIG_SCHED_BFS
+#define INIT_TASK_COMM "BFS"
+#define INIT_TASK(tsk) \
+{ \
+ .state = 0, \
+ .stack = &init_thread_info, \
+ .usage = ATOMIC_INIT(2), \
+ .flags = PF_KTHREAD, \
+ .prio = NORMAL_PRIO, \
+ .static_prio = MAX_PRIO-20, \
+ .normal_prio = NORMAL_PRIO, \
+ .deadline = 0, \
+ .policy = SCHED_NORMAL, \
+ .cpus_allowed = CPU_MASK_ALL, \
+ .mm = NULL, \
+ .active_mm = &init_mm, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+ .run_list = LIST_HEAD_INIT(tsk.run_list), \
+ .time_slice = HZ, \
+ .tasks = LIST_HEAD_INIT(tsk.tasks), \
+ INIT_PUSHABLE_TASKS(tsk) \
+ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
+ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
+ .real_parent = &tsk, \
+ .parent = &tsk, \
+ .children = LIST_HEAD_INIT(tsk.children), \
+ .sibling = LIST_HEAD_INIT(tsk.sibling), \
+ .group_leader = &tsk, \
+ RCU_POINTER_INITIALIZER(real_cred, &init_cred), \
+ RCU_POINTER_INITIALIZER(cred, &init_cred), \
+ .comm = INIT_TASK_COMM, \
+ .thread = INIT_THREAD, \
+ .fs = &init_fs, \
+ .files = &init_files, \
+ .signal = &init_signals, \
+ .sighand = &init_sighand, \
+ .nsproxy = &init_nsproxy, \
+ .pending = { \
+ .list = LIST_HEAD_INIT(tsk.pending.list), \
+ .signal = {{0}}}, \
+ .blocked = {{0}}, \
+ .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
+ .journal_info = NULL, \
+ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .timer_slack_ns = 50000, /* 50 usec default slack */ \
+ .pids = { \
+ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
+ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
+ [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
+ }, \
+ .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
+ .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \
+ INIT_IDS \
+ INIT_PERF_EVENTS(tsk) \
+ INIT_TRACE_IRQFLAGS \
+ INIT_LOCKDEP \
+ INIT_FTRACE_GRAPH \
+ INIT_TRACE_RECURSION \
+ INIT_TASK_RCU_PREEMPT(tsk) \
+ INIT_KASAN(tsk) \
+}
+#else /* CONFIG_SCHED_BFS */
+#define INIT_TASK_COMM "swapper"
+#define INIT_TASK(tsk) \
+{ \
+ .state = 0, \
+ .stack = &init_thread_info, \
+ .usage = ATOMIC_INIT(2), \
+ .flags = PF_KTHREAD, \
+ .prio = MAX_PRIO-20, \
+ .static_prio = MAX_PRIO-20, \
+ .normal_prio = MAX_PRIO-20, \
+ .policy = SCHED_NORMAL, \
+ .cpus_allowed = CPU_MASK_ALL, \
+ .nr_cpus_allowed= NR_CPUS, \
+ .mm = NULL, \
+ .active_mm = &init_mm, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+ .se = { \
+ .group_node = LIST_HEAD_INIT(tsk.se.group_node), \
+ }, \
+ .rt = { \
+ .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
+ .time_slice = RR_TIMESLICE, \
+ }, \
+ .tasks = LIST_HEAD_INIT(tsk.tasks), \
+ INIT_PUSHABLE_TASKS(tsk) \
+ INIT_CGROUP_SCHED(tsk) \
+ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
+ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
+ .real_parent = &tsk, \
+ .parent = &tsk, \
+ .children = LIST_HEAD_INIT(tsk.children), \
+ .sibling = LIST_HEAD_INIT(tsk.sibling), \
+ .group_leader = &tsk, \
+ RCU_POINTER_INITIALIZER(real_cred, &init_cred), \
+ RCU_POINTER_INITIALIZER(cred, &init_cred), \
+ .comm = INIT_TASK_COMM, \
+ .thread = INIT_THREAD, \
+ .fs = &init_fs, \
+ .files = &init_files, \
+ .signal = &init_signals, \
+ .sighand = &init_sighand, \
+ .nsproxy = &init_nsproxy, \
+ .pending = { \
+ .list = LIST_HEAD_INIT(tsk.pending.list), \
+ .signal = {{0}}}, \
+ .blocked = {{0}}, \
+ .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
+ .journal_info = NULL, \
+ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .timer_slack_ns = 50000, /* 50 usec default slack */ \
+ .pids = { \
+ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
+ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
+ [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
+ }, \
+ .thread_group = LIST_HEAD_INIT(tsk.thread_group), \
+ .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \
+ INIT_IDS \
+ INIT_PERF_EVENTS(tsk) \
+ INIT_TRACE_IRQFLAGS \
+ INIT_LOCKDEP \
+ INIT_FTRACE_GRAPH \
+ INIT_TRACE_RECURSION \
+ INIT_TASK_RCU_PREEMPT(tsk) \
+ INIT_TASK_RCU_TASKS(tsk) \
+ INIT_CPUSET_SEQ(tsk) \
+ INIT_RT_MUTEXES(tsk) \
+ INIT_VTIME(tsk) \
+ INIT_NUMA_BALANCING(tsk) \
+ INIT_KASAN(tsk) \
+}
+#endif /* CONFIG_SCHED_BFS */
+
+#define INIT_CPU_TIMERS(cpu_timers) \
+{ \
+ LIST_HEAD_INIT(cpu_timers[0]), \
+ LIST_HEAD_INIT(cpu_timers[1]), \
+ LIST_HEAD_INIT(cpu_timers[2]), \
+}
+
+/* Attach to the init_task data structure for proper alignment */
+#define __init_task_data __attribute__((__section__(".data..init_task")))
+
+
+#endif
diff --git a/include/linux/initrd.h b/include/linux/initrd.h
new file mode 100644
index 000000000..55289d261
--- /dev/null
+++ b/include/linux/initrd.h
@@ -0,0 +1,20 @@
+
+#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
+
+/* 1 = load ramdisk, 0 = don't load */
+extern int rd_doload;
+
+/* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_prompt;
+
+/* starting block # of image */
+extern int rd_image_start;
+
+/* 1 if it is not an error if initrd_start < memory_start */
+extern int initrd_below_start_ok;
+
+/* free_initrd_mem always gets called with the next two as arguments.. */
+extern unsigned long initrd_start, initrd_end;
+extern void free_initrd_mem(unsigned long, unsigned long);
+
+extern unsigned int real_root_dev;
diff --git a/include/linux/inotify.h b/include/linux/inotify.h
new file mode 100644
index 000000000..23aede0b5
--- /dev/null
+++ b/include/linux/inotify.h
@@ -0,0 +1,22 @@
+/*
+ * Inode based directory notification for Linux
+ *
+ * Copyright (C) 2005 John McCutchan
+ */
+#ifndef _LINUX_INOTIFY_H
+#define _LINUX_INOTIFY_H
+
+#include <linux/sysctl.h>
+#include <uapi/linux/inotify.h>
+
+extern struct ctl_table inotify_table[]; /* for sysctl */
+
+#define ALL_INOTIFY_BITS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \
+ IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \
+ IN_MOVED_TO | IN_CREATE | IN_DELETE | \
+ IN_DELETE_SELF | IN_MOVE_SELF | IN_UNMOUNT | \
+ IN_Q_OVERFLOW | IN_IGNORED | IN_ONLYDIR | \
+ IN_DONT_FOLLOW | IN_EXCL_UNLINK | IN_MASK_ADD | \
+ IN_ISDIR | IN_ONESHOT)
+
+#endif /* _LINUX_INOTIFY_H */
diff --git a/include/linux/input-polldev.h b/include/linux/input-polldev.h
new file mode 100644
index 000000000..246518267
--- /dev/null
+++ b/include/linux/input-polldev.h
@@ -0,0 +1,61 @@
+#ifndef _INPUT_POLLDEV_H
+#define _INPUT_POLLDEV_H
+
+/*
+ * Copyright (c) 2007 Dmitry Torokhov
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/input.h>
+#include <linux/workqueue.h>
+
+/**
+ * struct input_polled_dev - simple polled input device
+ * @private: private driver data.
+ * @open: driver-supplied method that prepares device for polling
+ * (enabled the device and maybe flushes device state).
+ * @close: driver-supplied method that is called when device is no
+ * longer being polled. Used to put device into low power mode.
+ * @poll: driver-supplied method that polls the device and posts
+ * input events (mandatory).
+ * @poll_interval: specifies how often the poll() method should be called.
+ * Defaults to 500 msec unless overridden when registering the device.
+ * @poll_interval_max: specifies upper bound for the poll interval.
+ * Defaults to the initial value of @poll_interval.
+ * @poll_interval_min: specifies lower bound for the poll interval.
+ * Defaults to 0.
+ * @input: input device structure associated with the polled device.
+ * Must be properly initialized by the driver (id, name, phys, bits).
+ *
+ * Polled input device provides a skeleton for supporting simple input
+ * devices that do not raise interrupts but have to be periodically
+ * scanned or polled to detect changes in their state.
+ */
+struct input_polled_dev {
+ void *private;
+
+ void (*open)(struct input_polled_dev *dev);
+ void (*close)(struct input_polled_dev *dev);
+ void (*poll)(struct input_polled_dev *dev);
+ unsigned int poll_interval; /* msec */
+ unsigned int poll_interval_max; /* msec */
+ unsigned int poll_interval_min; /* msec */
+
+ struct input_dev *input;
+
+/* private: */
+ struct delayed_work work;
+
+ bool devres_managed;
+};
+
+struct input_polled_dev *input_allocate_polled_device(void);
+struct input_polled_dev *devm_input_allocate_polled_device(struct device *dev);
+void input_free_polled_device(struct input_polled_dev *dev);
+int input_register_polled_device(struct input_polled_dev *dev);
+void input_unregister_polled_device(struct input_polled_dev *dev);
+
+#endif
diff --git a/include/linux/input.h b/include/linux/input.h
new file mode 100644
index 000000000..82ce323b9
--- /dev/null
+++ b/include/linux/input.h
@@ -0,0 +1,534 @@
+/*
+ * Copyright (c) 1999-2002 Vojtech Pavlik
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef _INPUT_H
+#define _INPUT_H
+
+#include <linux/time.h>
+#include <linux/list.h>
+#include <uapi/linux/input.h>
+/* Implementation details, userspace should not care about these */
+#define ABS_MT_FIRST ABS_MT_TOUCH_MAJOR
+#define ABS_MT_LAST ABS_MT_TOOL_Y
+
+/*
+ * In-kernel definitions.
+ */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/timer.h>
+#include <linux/mod_devicetable.h>
+
+/**
+ * struct input_value - input value representation
+ * @type: type of value (EV_KEY, EV_ABS, etc)
+ * @code: the value code
+ * @value: the value
+ */
+struct input_value {
+ __u16 type;
+ __u16 code;
+ __s32 value;
+};
+
+/**
+ * struct input_dev - represents an input device
+ * @name: name of the device
+ * @phys: physical path to the device in the system hierarchy
+ * @uniq: unique identification code for the device (if device has it)
+ * @id: id of the device (struct input_id)
+ * @propbit: bitmap of device properties and quirks
+ * @evbit: bitmap of types of events supported by the device (EV_KEY,
+ * EV_REL, etc.)
+ * @keybit: bitmap of keys/buttons this device has
+ * @relbit: bitmap of relative axes for the device
+ * @absbit: bitmap of absolute axes for the device
+ * @mscbit: bitmap of miscellaneous events supported by the device
+ * @ledbit: bitmap of leds present on the device
+ * @sndbit: bitmap of sound effects supported by the device
+ * @ffbit: bitmap of force feedback effects supported by the device
+ * @swbit: bitmap of switches present on the device
+ * @hint_events_per_packet: average number of events generated by the
+ * device in a packet (between EV_SYN/SYN_REPORT events). Used by
+ * event handlers to estimate size of the buffer needed to hold
+ * events.
+ * @keycodemax: size of keycode table
+ * @keycodesize: size of elements in keycode table
+ * @keycode: map of scancodes to keycodes for this device
+ * @getkeycode: optional legacy method to retrieve current keymap.
+ * @setkeycode: optional method to alter current keymap, used to implement
+ * sparse keymaps. If not supplied default mechanism will be used.
+ * The method is being called while holding event_lock and thus must
+ * not sleep
+ * @ff: force feedback structure associated with the device if device
+ * supports force feedback effects
+ * @repeat_key: stores key code of the last key pressed; used to implement
+ * software autorepeat
+ * @timer: timer for software autorepeat
+ * @rep: current values for autorepeat parameters (delay, rate)
+ * @mt: pointer to multitouch state
+ * @absinfo: array of &struct input_absinfo elements holding information
+ * about absolute axes (current value, min, max, flat, fuzz,
+ * resolution)
+ * @key: reflects current state of device's keys/buttons
+ * @led: reflects current state of device's LEDs
+ * @snd: reflects current state of sound effects
+ * @sw: reflects current state of device's switches
+ * @open: this method is called when the very first user calls
+ * input_open_device(). The driver must prepare the device
+ * to start generating events (start polling thread,
+ * request an IRQ, submit URB, etc.)
+ * @close: this method is called when the very last user calls
+ * input_close_device().
+ * @flush: purges the device. Most commonly used to get rid of force
+ * feedback effects loaded into the device when disconnecting
+ * from it
+ * @event: event handler for events sent _to_ the device, like EV_LED
+ * or EV_SND. The device is expected to carry out the requested
+ * action (turn on a LED, play sound, etc.) The call is protected
+ * by @event_lock and must not sleep
+ * @grab: input handle that currently has the device grabbed (via
+ * EVIOCGRAB ioctl). When a handle grabs a device it becomes sole
+ * recipient for all input events coming from the device
+ * @event_lock: this spinlock is is taken when input core receives
+ * and processes a new event for the device (in input_event()).
+ * Code that accesses and/or modifies parameters of a device
+ * (such as keymap or absmin, absmax, absfuzz, etc.) after device
+ * has been registered with input core must take this lock.
+ * @mutex: serializes calls to open(), close() and flush() methods
+ * @users: stores number of users (input handlers) that opened this
+ * device. It is used by input_open_device() and input_close_device()
+ * to make sure that dev->open() is only called when the first
+ * user opens device and dev->close() is called when the very
+ * last user closes the device
+ * @going_away: marks devices that are in a middle of unregistering and
+ * causes input_open_device*() fail with -ENODEV.
+ * @dev: driver model's view of this device
+ * @h_list: list of input handles associated with the device. When
+ * accessing the list dev->mutex must be held
+ * @node: used to place the device onto input_dev_list
+ * @num_vals: number of values queued in the current frame
+ * @max_vals: maximum number of values queued in a frame
+ * @vals: array of values queued in the current frame
+ * @devres_managed: indicates that devices is managed with devres framework
+ * and needs not be explicitly unregistered or freed.
+ */
+struct input_dev {
+ const char *name;
+ const char *phys;
+ const char *uniq;
+ struct input_id id;
+
+ unsigned long propbit[BITS_TO_LONGS(INPUT_PROP_CNT)];
+
+ unsigned long evbit[BITS_TO_LONGS(EV_CNT)];
+ unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+ unsigned long relbit[BITS_TO_LONGS(REL_CNT)];
+ unsigned long absbit[BITS_TO_LONGS(ABS_CNT)];
+ unsigned long mscbit[BITS_TO_LONGS(MSC_CNT)];
+ unsigned long ledbit[BITS_TO_LONGS(LED_CNT)];
+ unsigned long sndbit[BITS_TO_LONGS(SND_CNT)];
+ unsigned long ffbit[BITS_TO_LONGS(FF_CNT)];
+ unsigned long swbit[BITS_TO_LONGS(SW_CNT)];
+
+ unsigned int hint_events_per_packet;
+
+ unsigned int keycodemax;
+ unsigned int keycodesize;
+ void *keycode;
+
+ int (*setkeycode)(struct input_dev *dev,
+ const struct input_keymap_entry *ke,
+ unsigned int *old_keycode);
+ int (*getkeycode)(struct input_dev *dev,
+ struct input_keymap_entry *ke);
+
+ struct ff_device *ff;
+
+ unsigned int repeat_key;
+ struct timer_list timer;
+
+ int rep[REP_CNT];
+
+ struct input_mt *mt;
+
+ struct input_absinfo *absinfo;
+
+ unsigned long key[BITS_TO_LONGS(KEY_CNT)];
+ unsigned long led[BITS_TO_LONGS(LED_CNT)];
+ unsigned long snd[BITS_TO_LONGS(SND_CNT)];
+ unsigned long sw[BITS_TO_LONGS(SW_CNT)];
+
+ int (*open)(struct input_dev *dev);
+ void (*close)(struct input_dev *dev);
+ int (*flush)(struct input_dev *dev, struct file *file);
+ int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value);
+
+ struct input_handle __rcu *grab;
+
+ spinlock_t event_lock;
+ struct mutex mutex;
+
+ unsigned int users;
+ bool going_away;
+
+ struct device dev;
+
+ struct list_head h_list;
+ struct list_head node;
+
+ unsigned int num_vals;
+ unsigned int max_vals;
+ struct input_value *vals;
+
+ bool devres_managed;
+};
+#define to_input_dev(d) container_of(d, struct input_dev, dev)
+
+/*
+ * Verify that we are in sync with input_device_id mod_devicetable.h #defines
+ */
+
+#if EV_MAX != INPUT_DEVICE_ID_EV_MAX
+#error "EV_MAX and INPUT_DEVICE_ID_EV_MAX do not match"
+#endif
+
+#if KEY_MIN_INTERESTING != INPUT_DEVICE_ID_KEY_MIN_INTERESTING
+#error "KEY_MIN_INTERESTING and INPUT_DEVICE_ID_KEY_MIN_INTERESTING do not match"
+#endif
+
+#if KEY_MAX != INPUT_DEVICE_ID_KEY_MAX
+#error "KEY_MAX and INPUT_DEVICE_ID_KEY_MAX do not match"
+#endif
+
+#if REL_MAX != INPUT_DEVICE_ID_REL_MAX
+#error "REL_MAX and INPUT_DEVICE_ID_REL_MAX do not match"
+#endif
+
+#if ABS_MAX != INPUT_DEVICE_ID_ABS_MAX
+#error "ABS_MAX and INPUT_DEVICE_ID_ABS_MAX do not match"
+#endif
+
+#if MSC_MAX != INPUT_DEVICE_ID_MSC_MAX
+#error "MSC_MAX and INPUT_DEVICE_ID_MSC_MAX do not match"
+#endif
+
+#if LED_MAX != INPUT_DEVICE_ID_LED_MAX
+#error "LED_MAX and INPUT_DEVICE_ID_LED_MAX do not match"
+#endif
+
+#if SND_MAX != INPUT_DEVICE_ID_SND_MAX
+#error "SND_MAX and INPUT_DEVICE_ID_SND_MAX do not match"
+#endif
+
+#if FF_MAX != INPUT_DEVICE_ID_FF_MAX
+#error "FF_MAX and INPUT_DEVICE_ID_FF_MAX do not match"
+#endif
+
+#if SW_MAX != INPUT_DEVICE_ID_SW_MAX
+#error "SW_MAX and INPUT_DEVICE_ID_SW_MAX do not match"
+#endif
+
+#define INPUT_DEVICE_ID_MATCH_DEVICE \
+ (INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT)
+#define INPUT_DEVICE_ID_MATCH_DEVICE_AND_VERSION \
+ (INPUT_DEVICE_ID_MATCH_DEVICE | INPUT_DEVICE_ID_MATCH_VERSION)
+
+struct input_handle;
+
+/**
+ * struct input_handler - implements one of interfaces for input devices
+ * @private: driver-specific data
+ * @event: event handler. This method is being called by input core with
+ * interrupts disabled and dev->event_lock spinlock held and so
+ * it may not sleep
+ * @events: event sequence handler. This method is being called by
+ * input core with interrupts disabled and dev->event_lock
+ * spinlock held and so it may not sleep
+ * @filter: similar to @event; separates normal event handlers from
+ * "filters".
+ * @match: called after comparing device's id with handler's id_table
+ * to perform fine-grained matching between device and handler
+ * @connect: called when attaching a handler to an input device
+ * @disconnect: disconnects a handler from input device
+ * @start: starts handler for given handle. This function is called by
+ * input core right after connect() method and also when a process
+ * that "grabbed" a device releases it
+ * @legacy_minors: set to %true by drivers using legacy minor ranges
+ * @minor: beginning of range of 32 legacy minors for devices this driver
+ * can provide
+ * @name: name of the handler, to be shown in /proc/bus/input/handlers
+ * @id_table: pointer to a table of input_device_ids this driver can
+ * handle
+ * @h_list: list of input handles associated with the handler
+ * @node: for placing the driver onto input_handler_list
+ *
+ * Input handlers attach to input devices and create input handles. There
+ * are likely several handlers attached to any given input device at the
+ * same time. All of them will get their copy of input event generated by
+ * the device.
+ *
+ * The very same structure is used to implement input filters. Input core
+ * allows filters to run first and will not pass event to regular handlers
+ * if any of the filters indicate that the event should be filtered (by
+ * returning %true from their filter() method).
+ *
+ * Note that input core serializes calls to connect() and disconnect()
+ * methods.
+ */
+struct input_handler {
+
+ void *private;
+
+ void (*event)(struct input_handle *handle, unsigned int type, unsigned int code, int value);
+ void (*events)(struct input_handle *handle,
+ const struct input_value *vals, unsigned int count);
+ bool (*filter)(struct input_handle *handle, unsigned int type, unsigned int code, int value);
+ bool (*match)(struct input_handler *handler, struct input_dev *dev);
+ int (*connect)(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id);
+ void (*disconnect)(struct input_handle *handle);
+ void (*start)(struct input_handle *handle);
+
+ bool legacy_minors;
+ int minor;
+ const char *name;
+
+ const struct input_device_id *id_table;
+
+ struct list_head h_list;
+ struct list_head node;
+};
+
+/**
+ * struct input_handle - links input device with an input handler
+ * @private: handler-specific data
+ * @open: counter showing whether the handle is 'open', i.e. should deliver
+ * events from its device
+ * @name: name given to the handle by handler that created it
+ * @dev: input device the handle is attached to
+ * @handler: handler that works with the device through this handle
+ * @d_node: used to put the handle on device's list of attached handles
+ * @h_node: used to put the handle on handler's list of handles from which
+ * it gets events
+ */
+struct input_handle {
+
+ void *private;
+
+ int open;
+ const char *name;
+
+ struct input_dev *dev;
+ struct input_handler *handler;
+
+ struct list_head d_node;
+ struct list_head h_node;
+};
+
+struct input_dev __must_check *input_allocate_device(void);
+struct input_dev __must_check *devm_input_allocate_device(struct device *);
+void input_free_device(struct input_dev *dev);
+
+static inline struct input_dev *input_get_device(struct input_dev *dev)
+{
+ return dev ? to_input_dev(get_device(&dev->dev)) : NULL;
+}
+
+static inline void input_put_device(struct input_dev *dev)
+{
+ if (dev)
+ put_device(&dev->dev);
+}
+
+static inline void *input_get_drvdata(struct input_dev *dev)
+{
+ return dev_get_drvdata(&dev->dev);
+}
+
+static inline void input_set_drvdata(struct input_dev *dev, void *data)
+{
+ dev_set_drvdata(&dev->dev, data);
+}
+
+int __must_check input_register_device(struct input_dev *);
+void input_unregister_device(struct input_dev *);
+
+void input_reset_device(struct input_dev *);
+
+int __must_check input_register_handler(struct input_handler *);
+void input_unregister_handler(struct input_handler *);
+
+int __must_check input_get_new_minor(int legacy_base, unsigned int legacy_num,
+ bool allow_dynamic);
+void input_free_minor(unsigned int minor);
+
+int input_handler_for_each_handle(struct input_handler *, void *data,
+ int (*fn)(struct input_handle *, void *));
+
+int input_register_handle(struct input_handle *);
+void input_unregister_handle(struct input_handle *);
+
+int input_grab_device(struct input_handle *);
+void input_release_device(struct input_handle *);
+
+int input_open_device(struct input_handle *);
+void input_close_device(struct input_handle *);
+
+int input_flush_device(struct input_handle *handle, struct file *file);
+
+void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value);
+void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value);
+
+static inline void input_report_key(struct input_dev *dev, unsigned int code, int value)
+{
+ input_event(dev, EV_KEY, code, !!value);
+}
+
+static inline void input_report_rel(struct input_dev *dev, unsigned int code, int value)
+{
+ input_event(dev, EV_REL, code, value);
+}
+
+static inline void input_report_abs(struct input_dev *dev, unsigned int code, int value)
+{
+ input_event(dev, EV_ABS, code, value);
+}
+
+static inline void input_report_ff_status(struct input_dev *dev, unsigned int code, int value)
+{
+ input_event(dev, EV_FF_STATUS, code, value);
+}
+
+static inline void input_report_switch(struct input_dev *dev, unsigned int code, int value)
+{
+ input_event(dev, EV_SW, code, !!value);
+}
+
+static inline void input_sync(struct input_dev *dev)
+{
+ input_event(dev, EV_SYN, SYN_REPORT, 0);
+}
+
+static inline void input_mt_sync(struct input_dev *dev)
+{
+ input_event(dev, EV_SYN, SYN_MT_REPORT, 0);
+}
+
+void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code);
+
+/**
+ * input_set_events_per_packet - tell handlers about the driver event rate
+ * @dev: the input device used by the driver
+ * @n_events: the average number of events between calls to input_sync()
+ *
+ * If the event rate sent from a device is unusually large, use this
+ * function to set the expected event rate. This will allow handlers
+ * to set up an appropriate buffer size for the event stream, in order
+ * to minimize information loss.
+ */
+static inline void input_set_events_per_packet(struct input_dev *dev, int n_events)
+{
+ dev->hint_events_per_packet = n_events;
+}
+
+void input_alloc_absinfo(struct input_dev *dev);
+void input_set_abs_params(struct input_dev *dev, unsigned int axis,
+ int min, int max, int fuzz, int flat);
+
+#define INPUT_GENERATE_ABS_ACCESSORS(_suffix, _item) \
+static inline int input_abs_get_##_suffix(struct input_dev *dev, \
+ unsigned int axis) \
+{ \
+ return dev->absinfo ? dev->absinfo[axis]._item : 0; \
+} \
+ \
+static inline void input_abs_set_##_suffix(struct input_dev *dev, \
+ unsigned int axis, int val) \
+{ \
+ input_alloc_absinfo(dev); \
+ if (dev->absinfo) \
+ dev->absinfo[axis]._item = val; \
+}
+
+INPUT_GENERATE_ABS_ACCESSORS(val, value)
+INPUT_GENERATE_ABS_ACCESSORS(min, minimum)
+INPUT_GENERATE_ABS_ACCESSORS(max, maximum)
+INPUT_GENERATE_ABS_ACCESSORS(fuzz, fuzz)
+INPUT_GENERATE_ABS_ACCESSORS(flat, flat)
+INPUT_GENERATE_ABS_ACCESSORS(res, resolution)
+
+int input_scancode_to_scalar(const struct input_keymap_entry *ke,
+ unsigned int *scancode);
+
+int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke);
+int input_set_keycode(struct input_dev *dev,
+ const struct input_keymap_entry *ke);
+
+extern struct class input_class;
+
+/**
+ * struct ff_device - force-feedback part of an input device
+ * @upload: Called to upload an new effect into device
+ * @erase: Called to erase an effect from device
+ * @playback: Called to request device to start playing specified effect
+ * @set_gain: Called to set specified gain
+ * @set_autocenter: Called to auto-center device
+ * @destroy: called by input core when parent input device is being
+ * destroyed
+ * @private: driver-specific data, will be freed automatically
+ * @ffbit: bitmap of force feedback capabilities truly supported by
+ * device (not emulated like ones in input_dev->ffbit)
+ * @mutex: mutex for serializing access to the device
+ * @max_effects: maximum number of effects supported by device
+ * @effects: pointer to an array of effects currently loaded into device
+ * @effect_owners: array of effect owners; when file handle owning
+ * an effect gets closed the effect is automatically erased
+ *
+ * Every force-feedback device must implement upload() and playback()
+ * methods; erase() is optional. set_gain() and set_autocenter() need
+ * only be implemented if driver sets up FF_GAIN and FF_AUTOCENTER
+ * bits.
+ *
+ * Note that playback(), set_gain() and set_autocenter() are called with
+ * dev->event_lock spinlock held and interrupts off and thus may not
+ * sleep.
+ */
+struct ff_device {
+ int (*upload)(struct input_dev *dev, struct ff_effect *effect,
+ struct ff_effect *old);
+ int (*erase)(struct input_dev *dev, int effect_id);
+
+ int (*playback)(struct input_dev *dev, int effect_id, int value);
+ void (*set_gain)(struct input_dev *dev, u16 gain);
+ void (*set_autocenter)(struct input_dev *dev, u16 magnitude);
+
+ void (*destroy)(struct ff_device *);
+
+ void *private;
+
+ unsigned long ffbit[BITS_TO_LONGS(FF_CNT)];
+
+ struct mutex mutex;
+
+ int max_effects;
+ struct ff_effect *effects;
+ struct file *effect_owners[];
+};
+
+int input_ff_create(struct input_dev *dev, unsigned int max_effects);
+void input_ff_destroy(struct input_dev *dev);
+
+int input_ff_event(struct input_dev *dev, unsigned int type, unsigned int code, int value);
+
+int input_ff_upload(struct input_dev *dev, struct ff_effect *effect, struct file *file);
+int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file);
+
+int input_ff_create_memless(struct input_dev *dev, void *data,
+ int (*play_effect)(struct input_dev *, void *, struct ff_effect *));
+
+#endif
diff --git a/include/linux/input/ad714x.h b/include/linux/input/ad714x.h
new file mode 100644
index 000000000..d388d857b
--- /dev/null
+++ b/include/linux/input/ad714x.h
@@ -0,0 +1,64 @@
+/*
+ * include/linux/input/ad714x.h
+ *
+ * AD714x is very flexible, it can be used as buttons, scrollwheel,
+ * slider, touchpad at the same time. That depends on the boards.
+ * The platform_data for the device's "struct device" holds this
+ * information.
+ *
+ * Copyright 2009-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __LINUX_INPUT_AD714X_H__
+#define __LINUX_INPUT_AD714X_H__
+
+#define STAGE_NUM 12
+#define STAGE_CFGREG_NUM 8
+#define SYS_CFGREG_NUM 8
+
+/* board information which need be initialized in arch/mach... */
+struct ad714x_slider_plat {
+ int start_stage;
+ int end_stage;
+ int max_coord;
+};
+
+struct ad714x_wheel_plat {
+ int start_stage;
+ int end_stage;
+ int max_coord;
+};
+
+struct ad714x_touchpad_plat {
+ int x_start_stage;
+ int x_end_stage;
+ int x_max_coord;
+
+ int y_start_stage;
+ int y_end_stage;
+ int y_max_coord;
+};
+
+struct ad714x_button_plat {
+ int keycode;
+ unsigned short l_mask;
+ unsigned short h_mask;
+};
+
+struct ad714x_platform_data {
+ int slider_num;
+ int wheel_num;
+ int touchpad_num;
+ int button_num;
+ struct ad714x_slider_plat *slider;
+ struct ad714x_wheel_plat *wheel;
+ struct ad714x_touchpad_plat *touchpad;
+ struct ad714x_button_plat *button;
+ unsigned short stage_cfg_reg[STAGE_NUM][STAGE_CFGREG_NUM];
+ unsigned short sys_cfg_reg[SYS_CFGREG_NUM];
+ unsigned long irqflags;
+};
+
+#endif
diff --git a/include/linux/input/adp5589.h b/include/linux/input/adp5589.h
new file mode 100644
index 000000000..1a05eee15
--- /dev/null
+++ b/include/linux/input/adp5589.h
@@ -0,0 +1,188 @@
+/*
+ * Analog Devices ADP5589/ADP5585 I/O Expander and QWERTY Keypad Controller
+ *
+ * Copyright 2010-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef _ADP5589_H
+#define _ADP5589_H
+
+/*
+ * ADP5589 specific GPI and Keymap defines
+ */
+
+#define ADP5589_KEYMAPSIZE 88
+
+#define ADP5589_GPI_PIN_ROW0 97
+#define ADP5589_GPI_PIN_ROW1 98
+#define ADP5589_GPI_PIN_ROW2 99
+#define ADP5589_GPI_PIN_ROW3 100
+#define ADP5589_GPI_PIN_ROW4 101
+#define ADP5589_GPI_PIN_ROW5 102
+#define ADP5589_GPI_PIN_ROW6 103
+#define ADP5589_GPI_PIN_ROW7 104
+#define ADP5589_GPI_PIN_COL0 105
+#define ADP5589_GPI_PIN_COL1 106
+#define ADP5589_GPI_PIN_COL2 107
+#define ADP5589_GPI_PIN_COL3 108
+#define ADP5589_GPI_PIN_COL4 109
+#define ADP5589_GPI_PIN_COL5 110
+#define ADP5589_GPI_PIN_COL6 111
+#define ADP5589_GPI_PIN_COL7 112
+#define ADP5589_GPI_PIN_COL8 113
+#define ADP5589_GPI_PIN_COL9 114
+#define ADP5589_GPI_PIN_COL10 115
+#define GPI_LOGIC1 116
+#define GPI_LOGIC2 117
+
+#define ADP5589_GPI_PIN_ROW_BASE ADP5589_GPI_PIN_ROW0
+#define ADP5589_GPI_PIN_ROW_END ADP5589_GPI_PIN_ROW7
+#define ADP5589_GPI_PIN_COL_BASE ADP5589_GPI_PIN_COL0
+#define ADP5589_GPI_PIN_COL_END ADP5589_GPI_PIN_COL10
+
+#define ADP5589_GPI_PIN_BASE ADP5589_GPI_PIN_ROW_BASE
+#define ADP5589_GPI_PIN_END ADP5589_GPI_PIN_COL_END
+
+#define ADP5589_GPIMAPSIZE_MAX (ADP5589_GPI_PIN_END - ADP5589_GPI_PIN_BASE + 1)
+
+/*
+ * ADP5585 specific GPI and Keymap defines
+ */
+
+#define ADP5585_KEYMAPSIZE 30
+
+#define ADP5585_GPI_PIN_ROW0 37
+#define ADP5585_GPI_PIN_ROW1 38
+#define ADP5585_GPI_PIN_ROW2 39
+#define ADP5585_GPI_PIN_ROW3 40
+#define ADP5585_GPI_PIN_ROW4 41
+#define ADP5585_GPI_PIN_ROW5 42
+#define ADP5585_GPI_PIN_COL0 43
+#define ADP5585_GPI_PIN_COL1 44
+#define ADP5585_GPI_PIN_COL2 45
+#define ADP5585_GPI_PIN_COL3 46
+#define ADP5585_GPI_PIN_COL4 47
+#define GPI_LOGIC 48
+
+#define ADP5585_GPI_PIN_ROW_BASE ADP5585_GPI_PIN_ROW0
+#define ADP5585_GPI_PIN_ROW_END ADP5585_GPI_PIN_ROW5
+#define ADP5585_GPI_PIN_COL_BASE ADP5585_GPI_PIN_COL0
+#define ADP5585_GPI_PIN_COL_END ADP5585_GPI_PIN_COL4
+
+#define ADP5585_GPI_PIN_BASE ADP5585_GPI_PIN_ROW_BASE
+#define ADP5585_GPI_PIN_END ADP5585_GPI_PIN_COL_END
+
+#define ADP5585_GPIMAPSIZE_MAX (ADP5585_GPI_PIN_END - ADP5585_GPI_PIN_BASE + 1)
+
+struct adp5589_gpi_map {
+ unsigned short pin;
+ unsigned short sw_evt;
+};
+
+/* scan_cycle_time */
+#define ADP5589_SCAN_CYCLE_10ms 0
+#define ADP5589_SCAN_CYCLE_20ms 1
+#define ADP5589_SCAN_CYCLE_30ms 2
+#define ADP5589_SCAN_CYCLE_40ms 3
+
+/* RESET_CFG */
+#define RESET_PULSE_WIDTH_500us 0
+#define RESET_PULSE_WIDTH_1ms 1
+#define RESET_PULSE_WIDTH_2ms 2
+#define RESET_PULSE_WIDTH_10ms 3
+
+#define RESET_TRIG_TIME_0ms (0 << 2)
+#define RESET_TRIG_TIME_1000ms (1 << 2)
+#define RESET_TRIG_TIME_1500ms (2 << 2)
+#define RESET_TRIG_TIME_2000ms (3 << 2)
+#define RESET_TRIG_TIME_2500ms (4 << 2)
+#define RESET_TRIG_TIME_3000ms (5 << 2)
+#define RESET_TRIG_TIME_3500ms (6 << 2)
+#define RESET_TRIG_TIME_4000ms (7 << 2)
+
+#define RESET_PASSTHRU_EN (1 << 5)
+#define RESET1_POL_HIGH (1 << 6)
+#define RESET1_POL_LOW (0 << 6)
+#define RESET2_POL_HIGH (1 << 7)
+#define RESET2_POL_LOW (0 << 7)
+
+/* ADP5589 Mask Bits:
+ * C C C C C C C C C C C | R R R R R R R R
+ * 1 9 8 7 6 5 4 3 2 1 0 | 7 6 5 4 3 2 1 0
+ * 0
+ * ---------------- BIT ------------------
+ * 1 1 1 1 1 1 1 1 1 0 0 | 0 0 0 0 0 0 0 0
+ * 8 7 6 5 4 3 2 1 0 9 8 | 7 6 5 4 3 2 1 0
+ */
+
+#define ADP_ROW(x) (1 << (x))
+#define ADP_COL(x) (1 << (x + 8))
+#define ADP5589_ROW_MASK 0xFF
+#define ADP5589_COL_MASK 0xFF
+#define ADP5589_COL_SHIFT 8
+#define ADP5589_MAX_ROW_NUM 7
+#define ADP5589_MAX_COL_NUM 10
+
+/* ADP5585 Mask Bits:
+ * C C C C C | R R R R R R
+ * 4 3 2 1 0 | 5 4 3 2 1 0
+ *
+ * ---- BIT -- -----------
+ * 1 0 0 0 0 | 0 0 0 0 0 0
+ * 0 9 8 7 6 | 5 4 3 2 1 0
+ */
+
+#define ADP5585_ROW_MASK 0x3F
+#define ADP5585_COL_MASK 0x1F
+#define ADP5585_ROW_SHIFT 0
+#define ADP5585_COL_SHIFT 6
+#define ADP5585_MAX_ROW_NUM 5
+#define ADP5585_MAX_COL_NUM 4
+
+#define ADP5585_ROW(x) (1 << ((x) & ADP5585_ROW_MASK))
+#define ADP5585_COL(x) (1 << (((x) & ADP5585_COL_MASK) + ADP5585_COL_SHIFT))
+
+/* Put one of these structures in i2c_board_info platform_data */
+
+struct adp5589_kpad_platform_data {
+ unsigned keypad_en_mask; /* Keypad (Rows/Columns) enable mask */
+ const unsigned short *keymap; /* Pointer to keymap */
+ unsigned short keymapsize; /* Keymap size */
+ bool repeat; /* Enable key repeat */
+ bool en_keylock; /* Enable key lock feature (ADP5589 only)*/
+ unsigned char unlock_key1; /* Unlock Key 1 (ADP5589 only) */
+ unsigned char unlock_key2; /* Unlock Key 2 (ADP5589 only) */
+ unsigned char unlock_timer; /* Time in seconds [0..7] between the two unlock keys 0=disable (ADP5589 only) */
+ unsigned char scan_cycle_time; /* Time between consecutive scan cycles */
+ unsigned char reset_cfg; /* Reset config */
+ unsigned short reset1_key_1; /* Reset Key 1 */
+ unsigned short reset1_key_2; /* Reset Key 2 */
+ unsigned short reset1_key_3; /* Reset Key 3 */
+ unsigned short reset2_key_1; /* Reset Key 1 */
+ unsigned short reset2_key_2; /* Reset Key 2 */
+ unsigned debounce_dis_mask; /* Disable debounce mask */
+ unsigned pull_dis_mask; /* Disable all pull resistors mask */
+ unsigned pullup_en_100k; /* Pull-Up 100k Enable Mask */
+ unsigned pullup_en_300k; /* Pull-Up 300k Enable Mask */
+ unsigned pulldown_en_300k; /* Pull-Down 300k Enable Mask */
+ const struct adp5589_gpi_map *gpimap;
+ unsigned short gpimapsize;
+ const struct adp5589_gpio_platform_data *gpio_data;
+};
+
+struct i2c_client; /* forward declaration */
+
+struct adp5589_gpio_platform_data {
+ int gpio_start; /* GPIO Chip base # */
+ int (*setup)(struct i2c_client *client,
+ int gpio, unsigned ngpio,
+ void *context);
+ int (*teardown)(struct i2c_client *client,
+ int gpio, unsigned ngpio,
+ void *context);
+ void *context;
+};
+
+#endif
diff --git a/include/linux/input/adxl34x.h b/include/linux/input/adxl34x.h
new file mode 100644
index 000000000..010d98175
--- /dev/null
+++ b/include/linux/input/adxl34x.h
@@ -0,0 +1,358 @@
+/*
+ * include/linux/input/adxl34x.h
+ *
+ * Digital Accelerometer characteristics are highly application specific
+ * and may vary between boards and models. The platform_data for the
+ * device's "struct device" holds this information.
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __LINUX_INPUT_ADXL34X_H__
+#define __LINUX_INPUT_ADXL34X_H__
+
+#include <linux/input.h>
+
+struct adxl34x_platform_data {
+
+ /*
+ * X,Y,Z Axis Offset:
+ * offer user offset adjustments in twoscompliment
+ * form with a scale factor of 15.6 mg/LSB (i.e. 0x7F = +2 g)
+ */
+
+ s8 x_axis_offset;
+ s8 y_axis_offset;
+ s8 z_axis_offset;
+
+ /*
+ * TAP_X/Y/Z Enable: Setting TAP_X, Y, or Z Enable enables X,
+ * Y, or Z participation in Tap detection. A '0' excludes the
+ * selected axis from participation in Tap detection.
+ * Setting the SUPPRESS bit suppresses Double Tap detection if
+ * acceleration greater than tap_threshold is present during the
+ * tap_latency period, i.e. after the first tap but before the
+ * opening of the second tap window.
+ */
+
+#define ADXL_SUPPRESS (1 << 3)
+#define ADXL_TAP_X_EN (1 << 2)
+#define ADXL_TAP_Y_EN (1 << 1)
+#define ADXL_TAP_Z_EN (1 << 0)
+
+ u8 tap_axis_control;
+
+ /*
+ * tap_threshold:
+ * holds the threshold value for tap detection/interrupts.
+ * The data format is unsigned. The scale factor is 62.5 mg/LSB
+ * (i.e. 0xFF = +16 g). A zero value may result in undesirable
+ * behavior if Tap/Double Tap is enabled.
+ */
+
+ u8 tap_threshold;
+
+ /*
+ * tap_duration:
+ * is an unsigned time value representing the maximum
+ * time that an event must be above the tap_threshold threshold
+ * to qualify as a tap event. The scale factor is 625 us/LSB. A zero
+ * value will prevent Tap/Double Tap functions from working.
+ */
+
+ u8 tap_duration;
+
+ /*
+ * tap_latency:
+ * is an unsigned time value representing the wait time
+ * from the detection of a tap event to the opening of the time
+ * window tap_window for a possible second tap event. The scale
+ * factor is 1.25 ms/LSB. A zero value will disable the Double Tap
+ * function.
+ */
+
+ u8 tap_latency;
+
+ /*
+ * tap_window:
+ * is an unsigned time value representing the amount
+ * of time after the expiration of tap_latency during which a second
+ * tap can begin. The scale factor is 1.25 ms/LSB. A zero value will
+ * disable the Double Tap function.
+ */
+
+ u8 tap_window;
+
+ /*
+ * act_axis_control:
+ * X/Y/Z Enable: A '1' enables X, Y, or Z participation in activity
+ * or inactivity detection. A '0' excludes the selected axis from
+ * participation. If all of the axes are excluded, the function is
+ * disabled.
+ * AC/DC: A '0' = DC coupled operation and a '1' = AC coupled
+ * operation. In DC coupled operation, the current acceleration is
+ * compared with activity_threshold and inactivity_threshold directly
+ * to determine whether activity or inactivity is detected. In AC
+ * coupled operation for activity detection, the acceleration value
+ * at the start of activity detection is taken as a reference value.
+ * New samples of acceleration are then compared to this
+ * reference value and if the magnitude of the difference exceeds
+ * activity_threshold the device will trigger an activity interrupt. In
+ * AC coupled operation for inactivity detection, a reference value
+ * is used again for comparison and is updated whenever the
+ * device exceeds the inactivity threshold. Once the reference
+ * value is selected, the device compares the magnitude of the
+ * difference between the reference value and the current
+ * acceleration with inactivity_threshold. If the difference is below
+ * inactivity_threshold for a total of inactivity_time, the device is
+ * considered inactive and the inactivity interrupt is triggered.
+ */
+
+#define ADXL_ACT_ACDC (1 << 7)
+#define ADXL_ACT_X_EN (1 << 6)
+#define ADXL_ACT_Y_EN (1 << 5)
+#define ADXL_ACT_Z_EN (1 << 4)
+#define ADXL_INACT_ACDC (1 << 3)
+#define ADXL_INACT_X_EN (1 << 2)
+#define ADXL_INACT_Y_EN (1 << 1)
+#define ADXL_INACT_Z_EN (1 << 0)
+
+ u8 act_axis_control;
+
+ /*
+ * activity_threshold:
+ * holds the threshold value for activity detection.
+ * The data format is unsigned. The scale factor is
+ * 62.5 mg/LSB. A zero value may result in undesirable behavior if
+ * Activity interrupt is enabled.
+ */
+
+ u8 activity_threshold;
+
+ /*
+ * inactivity_threshold:
+ * holds the threshold value for inactivity
+ * detection. The data format is unsigned. The scale
+ * factor is 62.5 mg/LSB. A zero value may result in undesirable
+ * behavior if Inactivity interrupt is enabled.
+ */
+
+ u8 inactivity_threshold;
+
+ /*
+ * inactivity_time:
+ * is an unsigned time value representing the
+ * amount of time that acceleration must be below the value in
+ * inactivity_threshold for inactivity to be declared. The scale factor
+ * is 1 second/LSB. Unlike the other interrupt functions, which
+ * operate on unfiltered data, the inactivity function operates on the
+ * filtered output data. At least one output sample must be
+ * generated for the inactivity interrupt to be triggered. This will
+ * result in the function appearing un-responsive if the
+ * inactivity_time register is set with a value less than the time
+ * constant of the Output Data Rate. A zero value will result in an
+ * interrupt when the output data is below inactivity_threshold.
+ */
+
+ u8 inactivity_time;
+
+ /*
+ * free_fall_threshold:
+ * holds the threshold value for Free-Fall detection.
+ * The data format is unsigned. The root-sum-square(RSS) value
+ * of all axes is calculated and compared to the value in
+ * free_fall_threshold to determine if a free fall event may be
+ * occurring. The scale factor is 62.5 mg/LSB. A zero value may
+ * result in undesirable behavior if Free-Fall interrupt is
+ * enabled. Values between 300 and 600 mg (0x05 to 0x09) are
+ * recommended.
+ */
+
+ u8 free_fall_threshold;
+
+ /*
+ * free_fall_time:
+ * is an unsigned time value representing the minimum
+ * time that the RSS value of all axes must be less than
+ * free_fall_threshold to generate a Free-Fall interrupt. The
+ * scale factor is 5 ms/LSB. A zero value may result in
+ * undesirable behavior if Free-Fall interrupt is enabled.
+ * Values between 100 to 350 ms (0x14 to 0x46) are recommended.
+ */
+
+ u8 free_fall_time;
+
+ /*
+ * data_rate:
+ * Selects device bandwidth and output data rate.
+ * RATE = 3200 Hz / (2^(15 - x)). Default value is 0x0A, or 100 Hz
+ * Output Data Rate. An Output Data Rate should be selected that
+ * is appropriate for the communication protocol and frequency
+ * selected. Selecting too high of an Output Data Rate with a low
+ * communication speed will result in samples being discarded.
+ */
+
+ u8 data_rate;
+
+ /*
+ * data_range:
+ * FULL_RES: When this bit is set with the device is
+ * in Full-Resolution Mode, where the output resolution increases
+ * with RANGE to maintain a 4 mg/LSB scale factor. When this
+ * bit is cleared the device is in 10-bit Mode and RANGE determine the
+ * maximum g-Range and scale factor.
+ */
+
+#define ADXL_FULL_RES (1 << 3)
+#define ADXL_RANGE_PM_2g 0
+#define ADXL_RANGE_PM_4g 1
+#define ADXL_RANGE_PM_8g 2
+#define ADXL_RANGE_PM_16g 3
+
+ u8 data_range;
+
+ /*
+ * low_power_mode:
+ * A '0' = Normal operation and a '1' = Reduced
+ * power operation with somewhat higher noise.
+ */
+
+ u8 low_power_mode;
+
+ /*
+ * power_mode:
+ * LINK: A '1' with both the activity and inactivity functions
+ * enabled will delay the start of the activity function until
+ * inactivity is detected. Once activity is detected, inactivity
+ * detection will begin and prevent the detection of activity. This
+ * bit serially links the activity and inactivity functions. When '0'
+ * the inactivity and activity functions are concurrent. Additional
+ * information can be found in the ADXL34x datasheet's Application
+ * section under Link Mode.
+ * AUTO_SLEEP: A '1' sets the ADXL34x to switch to Sleep Mode
+ * when inactivity (acceleration has been below inactivity_threshold
+ * for at least inactivity_time) is detected and the LINK bit is set.
+ * A '0' disables automatic switching to Sleep Mode. See the
+ * Sleep Bit section of the ADXL34x datasheet for more information.
+ */
+
+#define ADXL_LINK (1 << 5)
+#define ADXL_AUTO_SLEEP (1 << 4)
+
+ u8 power_mode;
+
+ /*
+ * fifo_mode:
+ * BYPASS The FIFO is bypassed
+ * FIFO FIFO collects up to 32 values then stops collecting data
+ * STREAM FIFO holds the last 32 data values. Once full, the FIFO's
+ * oldest data is lost as it is replaced with newer data
+ *
+ * DEFAULT should be ADXL_FIFO_STREAM
+ */
+
+#define ADXL_FIFO_BYPASS 0
+#define ADXL_FIFO_FIFO 1
+#define ADXL_FIFO_STREAM 2
+
+ u8 fifo_mode;
+
+ /*
+ * watermark:
+ * The Watermark feature can be used to reduce the interrupt load
+ * of the system. The FIFO fills up to the value stored in watermark
+ * [1..32] and then generates an interrupt.
+ * A '0' disables the watermark feature.
+ */
+
+ u8 watermark;
+
+ /*
+ * When acceleration measurements are received from the ADXL34x
+ * events are sent to the event subsystem. The following settings
+ * select the event type and event code for new x, y and z axis data
+ * respectively.
+ */
+ u32 ev_type; /* EV_ABS or EV_REL */
+
+ u32 ev_code_x; /* ABS_X,Y,Z or REL_X,Y,Z */
+ u32 ev_code_y; /* ABS_X,Y,Z or REL_X,Y,Z */
+ u32 ev_code_z; /* ABS_X,Y,Z or REL_X,Y,Z */
+
+ /*
+ * A valid BTN or KEY Code; use tap_axis_control to disable
+ * event reporting
+ */
+
+ u32 ev_code_tap[3]; /* EV_KEY {X-Axis, Y-Axis, Z-Axis} */
+
+ /*
+ * A valid BTN or KEY Code for Free-Fall or Activity enables
+ * input event reporting. A '0' disables the Free-Fall or
+ * Activity reporting.
+ */
+
+ u32 ev_code_ff; /* EV_KEY */
+ u32 ev_code_act_inactivity; /* EV_KEY */
+
+ /*
+ * Use ADXL34x INT2 pin instead of INT1 pin for interrupt output
+ */
+ u8 use_int2;
+
+ /*
+ * ADXL346 only ORIENTATION SENSING feature
+ * The orientation function of the ADXL346 reports both 2-D and
+ * 3-D orientation concurrently.
+ */
+
+#define ADXL_EN_ORIENTATION_2D 1
+#define ADXL_EN_ORIENTATION_3D 2
+#define ADXL_EN_ORIENTATION_2D_3D 3
+
+ u8 orientation_enable;
+
+ /*
+ * The width of the deadzone region between two or more
+ * orientation positions is determined by setting the Deadzone
+ * value. The deadzone region size can be specified with a
+ * resolution of 3.6deg. The deadzone angle represents the total
+ * angle where the orientation is considered invalid.
+ */
+
+#define ADXL_DEADZONE_ANGLE_0p0 0 /* !!!0.0 [deg] */
+#define ADXL_DEADZONE_ANGLE_3p6 1 /* 3.6 [deg] */
+#define ADXL_DEADZONE_ANGLE_7p2 2 /* 7.2 [deg] */
+#define ADXL_DEADZONE_ANGLE_10p8 3 /* 10.8 [deg] */
+#define ADXL_DEADZONE_ANGLE_14p4 4 /* 14.4 [deg] */
+#define ADXL_DEADZONE_ANGLE_18p0 5 /* 18.0 [deg] */
+#define ADXL_DEADZONE_ANGLE_21p6 6 /* 21.6 [deg] */
+#define ADXL_DEADZONE_ANGLE_25p2 7 /* 25.2 [deg] */
+
+ u8 deadzone_angle;
+
+ /*
+ * To eliminate most human motion such as walking or shaking,
+ * a Divisor value should be selected to effectively limit the
+ * orientation bandwidth. Set the depth of the filter used to
+ * low-pass filter the measured acceleration for stable
+ * orientation sensing
+ */
+
+#define ADXL_LP_FILTER_DIVISOR_2 0
+#define ADXL_LP_FILTER_DIVISOR_4 1
+#define ADXL_LP_FILTER_DIVISOR_8 2
+#define ADXL_LP_FILTER_DIVISOR_16 3
+#define ADXL_LP_FILTER_DIVISOR_32 4
+#define ADXL_LP_FILTER_DIVISOR_64 5
+#define ADXL_LP_FILTER_DIVISOR_128 6
+#define ADXL_LP_FILTER_DIVISOR_256 7
+
+ u8 divisor_length;
+
+ u32 ev_codes_orient_2d[4]; /* EV_KEY {+X, -X, +Y, -Y} */
+ u32 ev_codes_orient_3d[6]; /* EV_KEY {+Z, +Y, +X, -X, -Y, -Z} */
+};
+#endif
diff --git a/include/linux/input/as5011.h b/include/linux/input/as5011.h
new file mode 100644
index 000000000..1affd0ddf
--- /dev/null
+++ b/include/linux/input/as5011.h
@@ -0,0 +1,20 @@
+#ifndef _AS5011_H
+#define _AS5011_H
+
+/*
+ * Copyright (c) 2010, 2011 Fabien Marteau <fabien.marteau@armadeus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+struct as5011_platform_data {
+ unsigned int button_gpio;
+ unsigned int axis_irq; /* irq number */
+ unsigned long axis_irqflags;
+ char xp, xn; /* threshold for x axis */
+ char yp, yn; /* threshold for y axis */
+};
+
+#endif /* _AS5011_H */
diff --git a/include/linux/input/auo-pixcir-ts.h b/include/linux/input/auo-pixcir-ts.h
new file mode 100644
index 000000000..5049f2192
--- /dev/null
+++ b/include/linux/input/auo-pixcir-ts.h
@@ -0,0 +1,54 @@
+/*
+ * Driver for AUO in-cell touchscreens
+ *
+ * Copyright (c) 2011 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on auo_touch.h from Dell Streak kernel
+ *
+ * Copyright (c) 2008 QUALCOMM Incorporated.
+ * Copyright (c) 2008 QUALCOMM USA, INC.
+ *
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __AUO_PIXCIR_TS_H__
+#define __AUO_PIXCIR_TS_H__
+
+/*
+ * Interrupt modes:
+ * periodical: interrupt is asserted periodicaly
+ * compare coordinates: interrupt is asserted when coordinates change
+ * indicate touch: interrupt is asserted during touch
+ */
+#define AUO_PIXCIR_INT_PERIODICAL 0x00
+#define AUO_PIXCIR_INT_COMP_COORD 0x01
+#define AUO_PIXCIR_INT_TOUCH_IND 0x02
+
+/*
+ * @gpio_int interrupt gpio
+ * @int_setting one of AUO_PIXCIR_INT_*
+ * @init_hw hardwarespecific init
+ * @exit_hw hardwarespecific shutdown
+ * @x_max x-resolution
+ * @y_max y-resolution
+ */
+struct auo_pixcir_ts_platdata {
+ int gpio_int;
+ int gpio_rst;
+
+ int int_setting;
+
+ unsigned int x_max;
+ unsigned int y_max;
+};
+
+#endif
diff --git a/include/linux/input/bu21013.h b/include/linux/input/bu21013.h
new file mode 100644
index 000000000..6230d76bd
--- /dev/null
+++ b/include/linux/input/bu21013.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Naveen Kumar G <naveen.gaddipati@stericsson.com> for ST-Ericsson
+ * License terms:GNU General Public License (GPL) version 2
+ */
+
+#ifndef _BU21013_H
+#define _BU21013_H
+
+/**
+ * struct bu21013_platform_device - Handle the platform data
+ * @touch_x_max: touch x max
+ * @touch_y_max: touch y max
+ * @cs_pin: chip select pin
+ * @touch_pin: touch gpio pin
+ * @ext_clk: external clock flag
+ * @x_flip: x flip flag
+ * @y_flip: y flip flag
+ * @wakeup: wakeup flag
+ *
+ * This is used to handle the platform data
+ */
+struct bu21013_platform_device {
+ int touch_x_max;
+ int touch_y_max;
+ unsigned int cs_pin;
+ unsigned int touch_pin;
+ bool ext_clk;
+ bool x_flip;
+ bool y_flip;
+ bool wakeup;
+};
+
+#endif
diff --git a/include/linux/input/cma3000.h b/include/linux/input/cma3000.h
new file mode 100644
index 000000000..cbbaac27d
--- /dev/null
+++ b/include/linux/input/cma3000.h
@@ -0,0 +1,59 @@
+/*
+ * VTI CMA3000_Dxx Accelerometer driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ * Author: Hemanth V <hemanthv@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _LINUX_CMA3000_H
+#define _LINUX_CMA3000_H
+
+#define CMAMODE_DEFAULT 0
+#define CMAMODE_MEAS100 1
+#define CMAMODE_MEAS400 2
+#define CMAMODE_MEAS40 3
+#define CMAMODE_MOTDET 4
+#define CMAMODE_FF100 5
+#define CMAMODE_FF400 6
+#define CMAMODE_POFF 7
+
+#define CMARANGE_2G 2000
+#define CMARANGE_8G 8000
+
+/**
+ * struct cma3000_i2c_platform_data - CMA3000 Platform data
+ * @fuzz_x: Noise on X Axis
+ * @fuzz_y: Noise on Y Axis
+ * @fuzz_z: Noise on Z Axis
+ * @g_range: G range in milli g i.e 2000 or 8000
+ * @mode: Operating mode
+ * @mdthr: Motion detect threshold value
+ * @mdfftmr: Motion detect and free fall time value
+ * @ffthr: Free fall threshold value
+ */
+
+struct cma3000_platform_data {
+ int fuzz_x;
+ int fuzz_y;
+ int fuzz_z;
+ int g_range;
+ uint8_t mode;
+ uint8_t mdthr;
+ uint8_t mdfftmr;
+ uint8_t ffthr;
+ unsigned long irqflags;
+};
+
+#endif
diff --git a/include/linux/input/cy8ctmg110_pdata.h b/include/linux/input/cy8ctmg110_pdata.h
new file mode 100644
index 000000000..09522cb59
--- /dev/null
+++ b/include/linux/input/cy8ctmg110_pdata.h
@@ -0,0 +1,10 @@
+#ifndef _LINUX_CY8CTMG110_PDATA_H
+#define _LINUX_CY8CTMG110_PDATA_H
+
+struct cy8ctmg110_pdata
+{
+ int reset_pin; /* Reset pin is wired to this GPIO (optional) */
+ int irq_pin; /* IRQ pin is wired to this GPIO */
+};
+
+#endif
diff --git a/include/linux/input/cyttsp.h b/include/linux/input/cyttsp.h
new file mode 100644
index 000000000..5af7c66f1
--- /dev/null
+++ b/include/linux/input/cyttsp.h
@@ -0,0 +1,58 @@
+/*
+ * Header file for:
+ * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers.
+ * For use with Cypress Txx3xx parts.
+ * Supported parts include:
+ * CY8CTST341
+ * CY8CTMA340
+ *
+ * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
+ * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com)
+ *
+ */
+#ifndef _CYTTSP_H_
+#define _CYTTSP_H_
+
+#define CY_SPI_NAME "cyttsp-spi"
+#define CY_I2C_NAME "cyttsp-i2c"
+/* Active Power state scanning/processing refresh interval */
+#define CY_ACT_INTRVL_DFLT 0x00 /* ms */
+/* touch timeout for the Active power */
+#define CY_TCH_TMOUT_DFLT 0xFF /* ms */
+/* Low Power state scanning/processing refresh interval */
+#define CY_LP_INTRVL_DFLT 0x0A /* ms */
+/* Active distance in pixels for a gesture to be reported */
+#define CY_ACT_DIST_DFLT 0xF8 /* pixels */
+
+struct cyttsp_platform_data {
+ u32 maxx;
+ u32 maxy;
+ bool use_hndshk;
+ u8 act_dist; /* Active distance */
+ u8 act_intrvl; /* Active refresh interval; ms */
+ u8 tch_tmout; /* Active touch timeout; ms */
+ u8 lp_intrvl; /* Low power refresh interval; ms */
+ int (*init)(void);
+ void (*exit)(void);
+ char *name;
+ s16 irq_gpio;
+ u8 *bl_keys;
+};
+
+#endif /* _CYTTSP_H_ */
diff --git a/include/linux/input/edt-ft5x06.h b/include/linux/input/edt-ft5x06.h
new file mode 100644
index 000000000..8a1e0d1a0
--- /dev/null
+++ b/include/linux/input/edt-ft5x06.h
@@ -0,0 +1,24 @@
+#ifndef _EDT_FT5X06_H
+#define _EDT_FT5X06_H
+
+/*
+ * Copyright (c) 2012 Simon Budig, <simon.budig@kernelconcepts.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+struct edt_ft5x06_platform_data {
+ int irq_pin;
+ int reset_pin;
+
+ /* startup defaults for operational parameters */
+ bool use_parameters;
+ u8 gain;
+ u8 threshold;
+ u8 offset;
+ u8 report_rate;
+};
+
+#endif /* _EDT_FT5X06_H */
diff --git a/include/linux/input/eeti_ts.h b/include/linux/input/eeti_ts.h
new file mode 100644
index 000000000..16625d799
--- /dev/null
+++ b/include/linux/input/eeti_ts.h
@@ -0,0 +1,10 @@
+#ifndef LINUX_INPUT_EETI_TS_H
+#define LINUX_INPUT_EETI_TS_H
+
+struct eeti_ts_platform_data {
+ int irq_gpio;
+ unsigned int irq_active_high;
+};
+
+#endif /* LINUX_INPUT_EETI_TS_H */
+
diff --git a/include/linux/input/gp2ap002a00f.h b/include/linux/input/gp2ap002a00f.h
new file mode 100644
index 000000000..aad2fd44a
--- /dev/null
+++ b/include/linux/input/gp2ap002a00f.h
@@ -0,0 +1,22 @@
+#ifndef _GP2AP002A00F_H_
+#define _GP2AP002A00F_H_
+
+#include <linux/i2c.h>
+
+#define GP2A_I2C_NAME "gp2ap002a00f"
+
+/**
+ * struct gp2a_platform_data - Sharp gp2ap002a00f proximity platform data
+ * @vout_gpio: The gpio connected to the object detected pin (VOUT)
+ * @wakeup: Set to true if the proximity can wake the device from suspend
+ * @hw_setup: Callback for setting up hardware such as gpios and vregs
+ * @hw_shutdown: Callback for properly shutting down hardware
+ */
+struct gp2a_platform_data {
+ int vout_gpio;
+ bool wakeup;
+ int (*hw_setup)(struct i2c_client *client);
+ int (*hw_shutdown)(struct i2c_client *client);
+};
+
+#endif
diff --git a/include/linux/input/gpio_tilt.h b/include/linux/input/gpio_tilt.h
new file mode 100644
index 000000000..c1cc52d38
--- /dev/null
+++ b/include/linux/input/gpio_tilt.h
@@ -0,0 +1,73 @@
+#ifndef _INPUT_GPIO_TILT_H
+#define _INPUT_GPIO_TILT_H
+
+/**
+ * struct gpio_tilt_axis - Axis used by the tilt switch
+ * @axis: Constant describing the axis, e.g. ABS_X
+ * @min: minimum value for abs_param
+ * @max: maximum value for abs_param
+ * @fuzz: fuzz value for abs_param
+ * @flat: flat value for abs_param
+ */
+struct gpio_tilt_axis {
+ int axis;
+ int min;
+ int max;
+ int fuzz;
+ int flat;
+};
+
+/**
+ * struct gpio_tilt_state - state description
+ * @gpios: bitfield of gpio target-states for the value
+ * @axes: array containing the axes settings for the gpio state
+ * The array indizes must correspond to the axes defined
+ * in platform_data
+ *
+ * This structure describes a supported axis settings
+ * and the necessary gpio-state which represent it.
+ *
+ * The n-th bit in the bitfield describes the state of the n-th GPIO
+ * from the gpios-array defined in gpio_regulator_config below.
+ */
+struct gpio_tilt_state {
+ int gpios;
+ int *axes;
+};
+
+/**
+ * struct gpio_tilt_platform_data
+ * @gpios: Array containing the gpios determining the tilt state
+ * @nr_gpios: Number of gpios
+ * @axes: Array of gpio_tilt_axis descriptions
+ * @nr_axes: Number of axes
+ * @states: Array of gpio_tilt_state entries describing
+ * the gpio state for specific tilts
+ * @nr_states: Number of states available
+ * @debounce_interval: debounce ticks interval in msecs
+ * @poll_interval: polling interval in msecs - for polling driver only
+ * @enable: callback to enable the tilt switch
+ * @disable: callback to disable the tilt switch
+ *
+ * This structure contains gpio-tilt-switch configuration
+ * information that must be passed by platform code to the
+ * gpio-tilt input driver.
+ */
+struct gpio_tilt_platform_data {
+ struct gpio *gpios;
+ int nr_gpios;
+
+ struct gpio_tilt_axis *axes;
+ int nr_axes;
+
+ struct gpio_tilt_state *states;
+ int nr_states;
+
+ int debounce_interval;
+
+ unsigned int poll_interval;
+ int (*enable)(struct device *dev);
+ void (*disable)(struct device *dev);
+};
+
+#endif
diff --git a/include/linux/input/ili210x.h b/include/linux/input/ili210x.h
new file mode 100644
index 000000000..a5471245a
--- /dev/null
+++ b/include/linux/input/ili210x.h
@@ -0,0 +1,10 @@
+#ifndef _ILI210X_H
+#define _ILI210X_H
+
+struct ili210x_platform_data {
+ unsigned long irq_flags;
+ unsigned int poll_period;
+ bool (*get_pendown_state)(void);
+};
+
+#endif
diff --git a/include/linux/input/kxtj9.h b/include/linux/input/kxtj9.h
new file mode 100644
index 000000000..d415579b5
--- /dev/null
+++ b/include/linux/input/kxtj9.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2011 Kionix, Inc.
+ * Written by Chris Hudson <chudson@kionix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#ifndef __KXTJ9_H__
+#define __KXTJ9_H__
+
+#define KXTJ9_I2C_ADDR 0x0F
+
+struct kxtj9_platform_data {
+ unsigned int min_interval; /* minimum poll interval (in milli-seconds) */
+ unsigned int init_interval; /* initial poll interval (in milli-seconds) */
+
+ /*
+ * By default, x is axis 0, y is axis 1, z is axis 2; these can be
+ * changed to account for sensor orientation within the host device.
+ */
+ u8 axis_map_x;
+ u8 axis_map_y;
+ u8 axis_map_z;
+
+ /*
+ * Each axis can be negated to account for sensor orientation within
+ * the host device.
+ */
+ bool negate_x;
+ bool negate_y;
+ bool negate_z;
+
+ /* CTRL_REG1: set resolution, g-range, data ready enable */
+ /* Output resolution: 8-bit valid or 12-bit valid */
+ #define RES_8BIT 0
+ #define RES_12BIT (1 << 6)
+ u8 res_12bit;
+ /* Output g-range: +/-2g, 4g, or 8g */
+ #define KXTJ9_G_2G 0
+ #define KXTJ9_G_4G (1 << 3)
+ #define KXTJ9_G_8G (1 << 4)
+ u8 g_range;
+
+ int (*init)(void);
+ void (*exit)(void);
+ int (*power_on)(void);
+ int (*power_off)(void);
+};
+#endif /* __KXTJ9_H__ */
diff --git a/include/linux/input/lm8333.h b/include/linux/input/lm8333.h
new file mode 100644
index 000000000..79f918c6e
--- /dev/null
+++ b/include/linux/input/lm8333.h
@@ -0,0 +1,24 @@
+/*
+ * public include for LM8333 keypad driver - same license as driver
+ * Copyright (C) 2012 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
+ */
+
+#ifndef _LM8333_H
+#define _LM8333_H
+
+struct lm8333;
+
+struct lm8333_platform_data {
+ /* Keymap data */
+ const struct matrix_keymap_data *matrix_data;
+ /* Active timeout before enter HALT mode in microseconds */
+ unsigned active_time;
+ /* Debounce interval in microseconds */
+ unsigned debounce_time;
+};
+
+extern int lm8333_read8(struct lm8333 *lm8333, u8 cmd);
+extern int lm8333_write8(struct lm8333 *lm8333, u8 cmd, u8 val);
+extern int lm8333_read_block(struct lm8333 *lm8333, u8 cmd, u8 len, u8 *buf);
+
+#endif /* _LM8333_H */
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
new file mode 100644
index 000000000..27e06acc5
--- /dev/null
+++ b/include/linux/input/matrix_keypad.h
@@ -0,0 +1,103 @@
+#ifndef _MATRIX_KEYPAD_H
+#define _MATRIX_KEYPAD_H
+
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/of.h>
+
+#define MATRIX_MAX_ROWS 32
+#define MATRIX_MAX_COLS 32
+
+#define KEY(row, col, val) ((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\
+ (((col) & (MATRIX_MAX_COLS - 1)) << 16) |\
+ ((val) & 0xffff))
+
+#define KEY_ROW(k) (((k) >> 24) & 0xff)
+#define KEY_COL(k) (((k) >> 16) & 0xff)
+#define KEY_VAL(k) ((k) & 0xffff)
+
+#define MATRIX_SCAN_CODE(row, col, row_shift) (((row) << (row_shift)) + (col))
+
+/**
+ * struct matrix_keymap_data - keymap for matrix keyboards
+ * @keymap: pointer to array of uint32 values encoded with KEY() macro
+ * representing keymap
+ * @keymap_size: number of entries (initialized) in this keymap
+ *
+ * This structure is supposed to be used by platform code to supply
+ * keymaps to drivers that implement matrix-like keypads/keyboards.
+ */
+struct matrix_keymap_data {
+ const uint32_t *keymap;
+ unsigned int keymap_size;
+};
+
+/**
+ * struct matrix_keypad_platform_data - platform-dependent keypad data
+ * @keymap_data: pointer to &matrix_keymap_data
+ * @row_gpios: pointer to array of gpio numbers representing rows
+ * @col_gpios: pointer to array of gpio numbers reporesenting colums
+ * @num_row_gpios: actual number of row gpios used by device
+ * @num_col_gpios: actual number of col gpios used by device
+ * @col_scan_delay_us: delay, measured in microseconds, that is
+ * needed before we can keypad after activating column gpio
+ * @debounce_ms: debounce interval in milliseconds
+ * @clustered_irq: may be specified if interrupts of all row/column GPIOs
+ * are bundled to one single irq
+ * @clustered_irq_flags: flags that are needed for the clustered irq
+ * @active_low: gpio polarity
+ * @wakeup: controls whether the device should be set up as wakeup
+ * source
+ * @no_autorepeat: disable key autorepeat
+ *
+ * This structure represents platform-specific data that use used by
+ * matrix_keypad driver to perform proper initialization.
+ */
+struct matrix_keypad_platform_data {
+ const struct matrix_keymap_data *keymap_data;
+
+ const unsigned int *row_gpios;
+ const unsigned int *col_gpios;
+
+ unsigned int num_row_gpios;
+ unsigned int num_col_gpios;
+
+ unsigned int col_scan_delay_us;
+
+ /* key debounce interval in milli-second */
+ unsigned int debounce_ms;
+
+ unsigned int clustered_irq;
+ unsigned int clustered_irq_flags;
+
+ bool active_low;
+ bool wakeup;
+ bool no_autorepeat;
+};
+
+int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
+ const char *keymap_name,
+ unsigned int rows, unsigned int cols,
+ unsigned short *keymap,
+ struct input_dev *input_dev);
+
+#ifdef CONFIG_OF
+/**
+ * matrix_keypad_parse_of_params() - Read parameters from matrix-keypad node
+ *
+ * @dev: Device containing of_node
+ * @rows: Returns number of matrix rows
+ * @cols: Returns number of matrix columns
+ * @return 0 if OK, <0 on error
+ */
+int matrix_keypad_parse_of_params(struct device *dev,
+ unsigned int *rows, unsigned int *cols);
+#else
+static inline int matrix_keypad_parse_of_params(struct device *dev,
+ unsigned int *rows, unsigned int *cols)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_OF */
+
+#endif /* _MATRIX_KEYPAD_H */
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
new file mode 100644
index 000000000..d7188de4d
--- /dev/null
+++ b/include/linux/input/mt.h
@@ -0,0 +1,127 @@
+#ifndef _INPUT_MT_H
+#define _INPUT_MT_H
+
+/*
+ * Input Multitouch Library
+ *
+ * Copyright (c) 2010 Henrik Rydberg
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/input.h>
+
+#define TRKID_MAX 0xffff
+
+#define INPUT_MT_POINTER 0x0001 /* pointer device, e.g. trackpad */
+#define INPUT_MT_DIRECT 0x0002 /* direct device, e.g. touchscreen */
+#define INPUT_MT_DROP_UNUSED 0x0004 /* drop contacts not seen in frame */
+#define INPUT_MT_TRACK 0x0008 /* use in-kernel tracking */
+#define INPUT_MT_SEMI_MT 0x0010 /* semi-mt device, finger count handled manually */
+
+/**
+ * struct input_mt_slot - represents the state of an input MT slot
+ * @abs: holds current values of ABS_MT axes for this slot
+ * @frame: last frame at which input_mt_report_slot_state() was called
+ * @key: optional driver designation of this slot
+ */
+struct input_mt_slot {
+ int abs[ABS_MT_LAST - ABS_MT_FIRST + 1];
+ unsigned int frame;
+ unsigned int key;
+};
+
+/**
+ * struct input_mt - state of tracked contacts
+ * @trkid: stores MT tracking ID for the next contact
+ * @num_slots: number of MT slots the device uses
+ * @slot: MT slot currently being transmitted
+ * @flags: input_mt operation flags
+ * @frame: increases every time input_mt_sync_frame() is called
+ * @red: reduced cost matrix for in-kernel tracking
+ * @slots: array of slots holding current values of tracked contacts
+ */
+struct input_mt {
+ int trkid;
+ int num_slots;
+ int slot;
+ unsigned int flags;
+ unsigned int frame;
+ int *red;
+ struct input_mt_slot slots[];
+};
+
+static inline void input_mt_set_value(struct input_mt_slot *slot,
+ unsigned code, int value)
+{
+ slot->abs[code - ABS_MT_FIRST] = value;
+}
+
+static inline int input_mt_get_value(const struct input_mt_slot *slot,
+ unsigned code)
+{
+ return slot->abs[code - ABS_MT_FIRST];
+}
+
+static inline bool input_mt_is_active(const struct input_mt_slot *slot)
+{
+ return input_mt_get_value(slot, ABS_MT_TRACKING_ID) >= 0;
+}
+
+static inline bool input_mt_is_used(const struct input_mt *mt,
+ const struct input_mt_slot *slot)
+{
+ return slot->frame == mt->frame;
+}
+
+int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots,
+ unsigned int flags);
+void input_mt_destroy_slots(struct input_dev *dev);
+
+static inline int input_mt_new_trkid(struct input_mt *mt)
+{
+ return mt->trkid++ & TRKID_MAX;
+}
+
+static inline void input_mt_slot(struct input_dev *dev, int slot)
+{
+ input_event(dev, EV_ABS, ABS_MT_SLOT, slot);
+}
+
+static inline bool input_is_mt_value(int axis)
+{
+ return axis >= ABS_MT_FIRST && axis <= ABS_MT_LAST;
+}
+
+static inline bool input_is_mt_axis(int axis)
+{
+ return axis == ABS_MT_SLOT || input_is_mt_value(axis);
+}
+
+void input_mt_report_slot_state(struct input_dev *dev,
+ unsigned int tool_type, bool active);
+
+void input_mt_report_finger_count(struct input_dev *dev, int count);
+void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count);
+void input_mt_drop_unused(struct input_dev *dev);
+
+void input_mt_sync_frame(struct input_dev *dev);
+
+/**
+ * struct input_mt_pos - contact position
+ * @x: horizontal coordinate
+ * @y: vertical coordinate
+ */
+struct input_mt_pos {
+ s16 x, y;
+};
+
+int input_mt_assign_slots(struct input_dev *dev, int *slots,
+ const struct input_mt_pos *pos, int num_pos,
+ int dmax);
+
+int input_mt_get_slot_by_key(struct input_dev *dev, int key);
+
+#endif
diff --git a/include/linux/input/navpoint.h b/include/linux/input/navpoint.h
new file mode 100644
index 000000000..45050eb34
--- /dev/null
+++ b/include/linux/input/navpoint.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) 2012 Paul Parsons <lost.distance@yahoo.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+struct navpoint_platform_data {
+ int port; /* PXA SSP port for pxa_ssp_request() */
+ int gpio; /* GPIO for power on/off */
+};
diff --git a/include/linux/input/pixcir_ts.h b/include/linux/input/pixcir_ts.h
new file mode 100644
index 000000000..7bae83b7c
--- /dev/null
+++ b/include/linux/input/pixcir_ts.h
@@ -0,0 +1,64 @@
+#ifndef _PIXCIR_I2C_TS_H
+#define _PIXCIR_I2C_TS_H
+
+/*
+ * Register map
+ */
+#define PIXCIR_REG_POWER_MODE 51
+#define PIXCIR_REG_INT_MODE 52
+
+/*
+ * Power modes:
+ * active: max scan speed
+ * idle: lower scan speed with automatic transition to active on touch
+ * halt: datasheet says sleep but this is more like halt as the chip
+ * clocks are cut and it can only be brought out of this mode
+ * using the RESET pin.
+ */
+enum pixcir_power_mode {
+ PIXCIR_POWER_ACTIVE,
+ PIXCIR_POWER_IDLE,
+ PIXCIR_POWER_HALT,
+};
+
+#define PIXCIR_POWER_MODE_MASK 0x03
+#define PIXCIR_POWER_ALLOW_IDLE (1UL << 2)
+
+/*
+ * Interrupt modes:
+ * periodical: interrupt is asserted periodicaly
+ * diff coordinates: interrupt is asserted when coordinates change
+ * level on touch: interrupt level asserted during touch
+ * pulse on touch: interrupt pulse asserted druing touch
+ *
+ */
+enum pixcir_int_mode {
+ PIXCIR_INT_PERIODICAL,
+ PIXCIR_INT_DIFF_COORD,
+ PIXCIR_INT_LEVEL_TOUCH,
+ PIXCIR_INT_PULSE_TOUCH,
+};
+
+#define PIXCIR_INT_MODE_MASK 0x03
+#define PIXCIR_INT_ENABLE (1UL << 3)
+#define PIXCIR_INT_POL_HIGH (1UL << 2)
+
+/**
+ * struct pixcir_irc_chip_data - chip related data
+ * @max_fingers: Max number of fingers reported simultaneously by h/w
+ * @has_hw_ids: Hardware supports finger tracking IDs
+ *
+ */
+struct pixcir_i2c_chip_data {
+ u8 max_fingers;
+ bool has_hw_ids;
+};
+
+struct pixcir_ts_platform_data {
+ int x_max;
+ int y_max;
+ int gpio_attb; /* GPIO connected to ATTB line */
+ struct pixcir_i2c_chip_data chip;
+};
+
+#endif
diff --git a/include/linux/input/samsung-keypad.h b/include/linux/input/samsung-keypad.h
new file mode 100644
index 000000000..f25619bfd
--- /dev/null
+++ b/include/linux/input/samsung-keypad.h
@@ -0,0 +1,43 @@
+/*
+ * Samsung Keypad platform data definitions
+ *
+ * Copyright (C) 2010 Samsung Electronics Co.Ltd
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __SAMSUNG_KEYPAD_H
+#define __SAMSUNG_KEYPAD_H
+
+#include <linux/input/matrix_keypad.h>
+
+#define SAMSUNG_MAX_ROWS 8
+#define SAMSUNG_MAX_COLS 8
+
+/**
+ * struct samsung_keypad_platdata - Platform device data for Samsung Keypad.
+ * @keymap_data: pointer to &matrix_keymap_data.
+ * @rows: number of keypad row supported.
+ * @cols: number of keypad col supported.
+ * @no_autorepeat: disable key autorepeat.
+ * @wakeup: controls whether the device should be set up as wakeup source.
+ * @cfg_gpio: configure the GPIO.
+ *
+ * Initialisation data specific to either the machine or the platform
+ * for the device driver to use or call-back when configuring gpio.
+ */
+struct samsung_keypad_platdata {
+ const struct matrix_keymap_data *keymap_data;
+ unsigned int rows;
+ unsigned int cols;
+ bool no_autorepeat;
+ bool wakeup;
+
+ void (*cfg_gpio)(unsigned int rows, unsigned int cols);
+};
+
+#endif /* __SAMSUNG_KEYPAD_H */
diff --git a/include/linux/input/sh_keysc.h b/include/linux/input/sh_keysc.h
new file mode 100644
index 000000000..5d253cd93
--- /dev/null
+++ b/include/linux/input/sh_keysc.h
@@ -0,0 +1,15 @@
+#ifndef __SH_KEYSC_H__
+#define __SH_KEYSC_H__
+
+#define SH_KEYSC_MAXKEYS 64
+
+struct sh_keysc_info {
+ enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3,
+ SH_KEYSC_MODE_4, SH_KEYSC_MODE_5, SH_KEYSC_MODE_6 } mode;
+ int scan_timing; /* 0 -> 7, see KYCR1, SCN[2:0] */
+ int delay;
+ int kycr2_delay;
+ int keycodes[SH_KEYSC_MAXKEYS]; /* KEYIN * KEYOUT */
+};
+
+#endif /* __SH_KEYSC_H__ */
diff --git a/include/linux/input/sparse-keymap.h b/include/linux/input/sparse-keymap.h
new file mode 100644
index 000000000..52db62064
--- /dev/null
+++ b/include/linux/input/sparse-keymap.h
@@ -0,0 +1,62 @@
+#ifndef _SPARSE_KEYMAP_H
+#define _SPARSE_KEYMAP_H
+
+/*
+ * Copyright (c) 2009 Dmitry Torokhov
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#define KE_END 0 /* Indicates end of keymap */
+#define KE_KEY 1 /* Ordinary key/button */
+#define KE_SW 2 /* Switch (predetermined value) */
+#define KE_VSW 3 /* Switch (value supplied at runtime) */
+#define KE_IGNORE 4 /* Known entry that should be ignored */
+#define KE_LAST KE_IGNORE
+
+/**
+ * struct key_entry - keymap entry for use in sparse keymap
+ * @type: Type of the key entry (KE_KEY, KE_SW, KE_VSW, KE_END);
+ * drivers are allowed to extend the list with their own
+ * private definitions.
+ * @code: Device-specific data identifying the button/switch
+ * @keycode: KEY_* code assigned to a key/button
+ * @sw.code: SW_* code assigned to a switch
+ * @sw.value: Value that should be sent in an input even when KE_SW
+ * switch is toggled. KE_VSW switches ignore this field and
+ * expect driver to supply value for the event.
+ *
+ * This structure defines an entry in a sparse keymap used by some
+ * input devices for which traditional table-based approach is not
+ * suitable.
+ */
+struct key_entry {
+ int type; /* See KE_* above */
+ u32 code;
+ union {
+ u16 keycode; /* For KE_KEY */
+ struct { /* For KE_SW, KE_VSW */
+ u8 code;
+ u8 value; /* For KE_SW, ignored by KE_VSW */
+ } sw;
+ };
+};
+
+struct key_entry *sparse_keymap_entry_from_scancode(struct input_dev *dev,
+ unsigned int code);
+struct key_entry *sparse_keymap_entry_from_keycode(struct input_dev *dev,
+ unsigned int code);
+int sparse_keymap_setup(struct input_dev *dev,
+ const struct key_entry *keymap,
+ int (*setup)(struct input_dev *, struct key_entry *));
+void sparse_keymap_free(struct input_dev *dev);
+
+void sparse_keymap_report_entry(struct input_dev *dev, const struct key_entry *ke,
+ unsigned int value, bool autorelease);
+
+bool sparse_keymap_report_event(struct input_dev *dev, unsigned int code,
+ unsigned int value, bool autorelease);
+
+#endif /* _SPARSE_KEYMAP_H */
diff --git a/include/linux/input/tca8418_keypad.h b/include/linux/input/tca8418_keypad.h
new file mode 100644
index 000000000..e71a85dc2
--- /dev/null
+++ b/include/linux/input/tca8418_keypad.h
@@ -0,0 +1,44 @@
+/*
+ * TCA8418 keypad platform support
+ *
+ * Copyright (C) 2011 Fuel7, Inc. All rights reserved.
+ *
+ * Author: Kyle Manna <kyle.manna@fuel7.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * If you can't comply with GPLv2, alternative licensing terms may be
+ * arranged. Please contact Fuel7, Inc. (http://fuel7.com/) for proprietary
+ * alternative licensing inquiries.
+ */
+
+#ifndef _TCA8418_KEYPAD_H
+#define _TCA8418_KEYPAD_H
+
+#include <linux/types.h>
+#include <linux/input/matrix_keypad.h>
+
+#define TCA8418_I2C_ADDR 0x34
+#define TCA8418_NAME "tca8418_keypad"
+
+struct tca8418_keypad_platform_data {
+ const struct matrix_keymap_data *keymap_data;
+ unsigned rows;
+ unsigned cols;
+ bool rep;
+ bool irq_is_gpio;
+};
+
+#endif
diff --git a/include/linux/input/touchscreen.h b/include/linux/input/touchscreen.h
new file mode 100644
index 000000000..08a5ef6e8
--- /dev/null
+++ b/include/linux/input/touchscreen.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2014 Sebastian Reichel <sre@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _TOUCHSCREEN_H
+#define _TOUCHSCREEN_H
+
+#include <linux/input.h>
+
+#ifdef CONFIG_OF
+void touchscreen_parse_of_params(struct input_dev *dev);
+#else
+static inline void touchscreen_parse_of_params(struct input_dev *dev)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/input/tps6507x-ts.h b/include/linux/input/tps6507x-ts.h
new file mode 100644
index 000000000..b433df801
--- /dev/null
+++ b/include/linux/input/tps6507x-ts.h
@@ -0,0 +1,23 @@
+/* linux/i2c/tps6507x-ts.h
+ *
+ * Functions to access TPS65070 touch screen chip.
+ *
+ * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com)
+ *
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+
+#ifndef __LINUX_I2C_TPS6507X_TS_H
+#define __LINUX_I2C_TPS6507X_TS_H
+
+/* Board specific touch screen initial values */
+struct touchscreen_init_data {
+ int poll_period; /* ms */
+ __u16 min_pressure; /* min reading to be treated as a touch */
+ __u16 vendor;
+ __u16 product;
+ __u16 version;
+};
+
+#endif /* __LINUX_I2C_TPS6507X_TS_H */
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
new file mode 100644
index 000000000..c2d6082a1
--- /dev/null
+++ b/include/linux/integrity.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2009 IBM Corporation
+ * Author: Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#ifndef _LINUX_INTEGRITY_H
+#define _LINUX_INTEGRITY_H
+
+#include <linux/fs.h>
+
+enum integrity_status {
+ INTEGRITY_PASS = 0,
+ INTEGRITY_FAIL,
+ INTEGRITY_NOLABEL,
+ INTEGRITY_NOXATTRS,
+ INTEGRITY_UNKNOWN,
+};
+
+/* List of EVM protected security xattrs */
+#ifdef CONFIG_INTEGRITY
+extern struct integrity_iint_cache *integrity_inode_get(struct inode *inode);
+extern void integrity_inode_free(struct inode *inode);
+extern void __init integrity_load_keys(void);
+
+#else
+static inline struct integrity_iint_cache *
+ integrity_inode_get(struct inode *inode)
+{
+ return NULL;
+}
+
+static inline void integrity_inode_free(struct inode *inode)
+{
+ return;
+}
+
+static inline void integrity_load_keys(void)
+{
+}
+#endif /* CONFIG_INTEGRITY */
+
+#endif /* _LINUX_INTEGRITY_H */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
new file mode 100644
index 000000000..a240e61a7
--- /dev/null
+++ b/include/linux/intel-iommu.h
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2006, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * Copyright (C) 2006-2008 Intel Corporation
+ * Author: Ashok Raj <ashok.raj@intel.com>
+ * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ */
+
+#ifndef _INTEL_IOMMU_H_
+#define _INTEL_IOMMU_H_
+
+#include <linux/types.h>
+#include <linux/iova.h>
+#include <linux/io.h>
+#include <linux/dma_remapping.h>
+#include <asm/cacheflush.h>
+#include <asm/iommu.h>
+
+/*
+ * Intel IOMMU register specification per version 1.0 public spec.
+ */
+
+#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
+#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
+#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
+#define DMAR_GCMD_REG 0x18 /* Global command register */
+#define DMAR_GSTS_REG 0x1c /* Global status register */
+#define DMAR_RTADDR_REG 0x20 /* Root entry table */
+#define DMAR_CCMD_REG 0x28 /* Context command reg */
+#define DMAR_FSTS_REG 0x34 /* Fault Status register */
+#define DMAR_FECTL_REG 0x38 /* Fault control register */
+#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
+#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
+#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
+#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
+#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
+#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
+#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
+#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
+#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
+#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
+#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
+#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
+#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
+#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
+#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
+
+#define OFFSET_STRIDE (9)
+/*
+#define dmar_readl(dmar, reg) readl(dmar + reg)
+#define dmar_readq(dmar, reg) ({ \
+ u32 lo, hi; \
+ lo = readl(dmar + reg); \
+ hi = readl(dmar + reg + 4); \
+ (((u64) hi) << 32) + lo; })
+*/
+static inline u64 dmar_readq(void __iomem *addr)
+{
+ u32 lo, hi;
+ lo = readl(addr);
+ hi = readl(addr + 4);
+ return (((u64) hi) << 32) + lo;
+}
+
+static inline void dmar_writeq(void __iomem *addr, u64 val)
+{
+ writel((u32)val, addr);
+ writel((u32)(val >> 32), addr + 4);
+}
+
+#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
+#define DMAR_VER_MINOR(v) ((v) & 0x0f)
+
+/*
+ * Decoding Capability Register
+ */
+#define cap_read_drain(c) (((c) >> 55) & 1)
+#define cap_write_drain(c) (((c) >> 54) & 1)
+#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
+#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
+#define cap_pgsel_inv(c) (((c) >> 39) & 1)
+
+#define cap_super_page_val(c) (((c) >> 34) & 0xf)
+#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
+ * OFFSET_STRIDE) + 21)
+
+#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
+#define cap_max_fault_reg_offset(c) \
+ (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
+
+#define cap_zlr(c) (((c) >> 22) & 1)
+#define cap_isoch(c) (((c) >> 23) & 1)
+#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
+#define cap_sagaw(c) (((c) >> 8) & 0x1f)
+#define cap_caching_mode(c) (((c) >> 7) & 1)
+#define cap_phmr(c) (((c) >> 6) & 1)
+#define cap_plmr(c) (((c) >> 5) & 1)
+#define cap_rwbf(c) (((c) >> 4) & 1)
+#define cap_afl(c) (((c) >> 3) & 1)
+#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
+/*
+ * Extended Capability Register
+ */
+
+#define ecap_pasid(e) ((e >> 40) & 0x1)
+#define ecap_pss(e) ((e >> 35) & 0x1f)
+#define ecap_eafs(e) ((e >> 34) & 0x1)
+#define ecap_nwfs(e) ((e >> 33) & 0x1)
+#define ecap_srs(e) ((e >> 31) & 0x1)
+#define ecap_ers(e) ((e >> 30) & 0x1)
+#define ecap_prs(e) ((e >> 29) & 0x1)
+/* PASID support used to be on bit 28 */
+#define ecap_dis(e) ((e >> 27) & 0x1)
+#define ecap_nest(e) ((e >> 26) & 0x1)
+#define ecap_mts(e) ((e >> 25) & 0x1)
+#define ecap_ecs(e) ((e >> 24) & 0x1)
+#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
+#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
+#define ecap_coherent(e) ((e) & 0x1)
+#define ecap_qis(e) ((e) & 0x2)
+#define ecap_pass_through(e) ((e >> 6) & 0x1)
+#define ecap_eim_support(e) ((e >> 4) & 0x1)
+#define ecap_ir_support(e) ((e >> 3) & 0x1)
+#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
+#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
+#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
+
+/* IOTLB_REG */
+#define DMA_TLB_FLUSH_GRANU_OFFSET 60
+#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
+#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
+#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
+#define DMA_TLB_IIRG(type) ((type >> 60) & 7)
+#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
+#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
+#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
+#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
+#define DMA_TLB_IVT (((u64)1) << 63)
+#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
+#define DMA_TLB_MAX_SIZE (0x3f)
+
+/* INVALID_DESC */
+#define DMA_CCMD_INVL_GRANU_OFFSET 61
+#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3)
+#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3)
+#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3)
+#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
+#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
+#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
+#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
+#define DMA_ID_TLB_ADDR(addr) (addr)
+#define DMA_ID_TLB_ADDR_MASK(mask) (mask)
+
+/* PMEN_REG */
+#define DMA_PMEN_EPM (((u32)1)<<31)
+#define DMA_PMEN_PRS (((u32)1)<<0)
+
+/* GCMD_REG */
+#define DMA_GCMD_TE (((u32)1) << 31)
+#define DMA_GCMD_SRTP (((u32)1) << 30)
+#define DMA_GCMD_SFL (((u32)1) << 29)
+#define DMA_GCMD_EAFL (((u32)1) << 28)
+#define DMA_GCMD_WBF (((u32)1) << 27)
+#define DMA_GCMD_QIE (((u32)1) << 26)
+#define DMA_GCMD_SIRTP (((u32)1) << 24)
+#define DMA_GCMD_IRE (((u32) 1) << 25)
+#define DMA_GCMD_CFI (((u32) 1) << 23)
+
+/* GSTS_REG */
+#define DMA_GSTS_TES (((u32)1) << 31)
+#define DMA_GSTS_RTPS (((u32)1) << 30)
+#define DMA_GSTS_FLS (((u32)1) << 29)
+#define DMA_GSTS_AFLS (((u32)1) << 28)
+#define DMA_GSTS_WBFS (((u32)1) << 27)
+#define DMA_GSTS_QIES (((u32)1) << 26)
+#define DMA_GSTS_IRTPS (((u32)1) << 24)
+#define DMA_GSTS_IRES (((u32)1) << 25)
+#define DMA_GSTS_CFIS (((u32)1) << 23)
+
+/* DMA_RTADDR_REG */
+#define DMA_RTADDR_RTT (((u64)1) << 11)
+
+/* CCMD_REG */
+#define DMA_CCMD_ICC (((u64)1) << 63)
+#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
+#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
+#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
+#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
+#define DMA_CCMD_MASK_NOBIT 0
+#define DMA_CCMD_MASK_1BIT 1
+#define DMA_CCMD_MASK_2BIT 2
+#define DMA_CCMD_MASK_3BIT 3
+#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
+#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
+
+/* FECTL_REG */
+#define DMA_FECTL_IM (((u32)1) << 31)
+
+/* FSTS_REG */
+#define DMA_FSTS_PPF ((u32)2)
+#define DMA_FSTS_PFO ((u32)1)
+#define DMA_FSTS_IQE (1 << 4)
+#define DMA_FSTS_ICE (1 << 5)
+#define DMA_FSTS_ITE (1 << 6)
+#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
+
+/* FRCD_REG, 32 bits access */
+#define DMA_FRCD_F (((u32)1) << 31)
+#define dma_frcd_type(d) ((d >> 30) & 1)
+#define dma_frcd_fault_reason(c) (c & 0xff)
+#define dma_frcd_source_id(c) (c & 0xffff)
+/* low 64 bit */
+#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
+
+#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
+do { \
+ cycles_t start_time = get_cycles(); \
+ while (1) { \
+ sts = op(iommu->reg + offset); \
+ if (cond) \
+ break; \
+ if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
+ panic("DMAR hardware is malfunctioning\n"); \
+ cpu_relax(); \
+ } \
+} while (0)
+
+#define QI_LENGTH 256 /* queue length */
+
+enum {
+ QI_FREE,
+ QI_IN_USE,
+ QI_DONE,
+ QI_ABORT
+};
+
+#define QI_CC_TYPE 0x1
+#define QI_IOTLB_TYPE 0x2
+#define QI_DIOTLB_TYPE 0x3
+#define QI_IEC_TYPE 0x4
+#define QI_IWD_TYPE 0x5
+
+#define QI_IEC_SELECTIVE (((u64)1) << 4)
+#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
+#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
+
+#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
+#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
+
+#define QI_IOTLB_DID(did) (((u64)did) << 16)
+#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
+#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
+#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
+#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
+#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
+#define QI_IOTLB_AM(am) (((u8)am))
+
+#define QI_CC_FM(fm) (((u64)fm) << 48)
+#define QI_CC_SID(sid) (((u64)sid) << 32)
+#define QI_CC_DID(did) (((u64)did) << 16)
+#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
+
+#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
+#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
+#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
+#define QI_DEV_IOTLB_SIZE 1
+#define QI_DEV_IOTLB_MAX_INVS 32
+
+struct qi_desc {
+ u64 low, high;
+};
+
+struct q_inval {
+ raw_spinlock_t q_lock;
+ struct qi_desc *desc; /* invalidation queue */
+ int *desc_status; /* desc status */
+ int free_head; /* first free entry */
+ int free_tail; /* last free entry */
+ int free_cnt;
+};
+
+#ifdef CONFIG_IRQ_REMAP
+/* 1MB - maximum possible interrupt remapping table size */
+#define INTR_REMAP_PAGE_ORDER 8
+#define INTR_REMAP_TABLE_REG_SIZE 0xf
+
+#define INTR_REMAP_TABLE_ENTRIES 65536
+
+struct ir_table {
+ struct irte *base;
+ unsigned long *bitmap;
+};
+#endif
+
+struct iommu_flush {
+ void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
+ u8 fm, u64 type);
+ void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type);
+};
+
+enum {
+ SR_DMAR_FECTL_REG,
+ SR_DMAR_FEDATA_REG,
+ SR_DMAR_FEADDR_REG,
+ SR_DMAR_FEUADDR_REG,
+ MAX_SR_DMAR_REGS
+};
+
+struct intel_iommu {
+ void __iomem *reg; /* Pointer to hardware regs, virtual addr */
+ u64 reg_phys; /* physical address of hw register set */
+ u64 reg_size; /* size of hw register set */
+ u64 cap;
+ u64 ecap;
+ u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
+ raw_spinlock_t register_lock; /* protect register handling */
+ int seq_id; /* sequence id of the iommu */
+ int agaw; /* agaw of this iommu */
+ int msagaw; /* max sagaw of this iommu */
+ unsigned int irq;
+ u16 segment; /* PCI segment# */
+ unsigned char name[13]; /* Device Name */
+
+#ifdef CONFIG_INTEL_IOMMU
+ unsigned long *domain_ids; /* bitmap of domains */
+ struct dmar_domain **domains; /* ptr to domains */
+ spinlock_t lock; /* protect context, domain ids */
+ struct root_entry *root_entry; /* virtual address */
+
+ struct iommu_flush flush;
+#endif
+ struct q_inval *qi; /* Queued invalidation info */
+ u32 *iommu_state; /* Store iommu states between suspend and resume.*/
+
+#ifdef CONFIG_IRQ_REMAP
+ struct ir_table *ir_table; /* Interrupt remapping info */
+#endif
+ struct device *iommu_dev; /* IOMMU-sysfs device */
+ int node;
+};
+
+static inline void __iommu_flush_cache(
+ struct intel_iommu *iommu, void *addr, int size)
+{
+ if (!ecap_coherent(iommu->ecap))
+ clflush_cache_range(addr, size);
+}
+
+extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
+extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
+
+extern int dmar_enable_qi(struct intel_iommu *iommu);
+extern void dmar_disable_qi(struct intel_iommu *iommu);
+extern int dmar_reenable_qi(struct intel_iommu *iommu);
+extern void qi_global_iec(struct intel_iommu *iommu);
+
+extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
+ u8 fm, u64 type);
+extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type);
+extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
+ u64 addr, unsigned mask);
+
+extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
+
+extern int dmar_ir_support(void);
+
+extern const struct attribute_group *intel_iommu_groups[];
+
+#endif
diff --git a/include/linux/intel_pmic_gpio.h b/include/linux/intel_pmic_gpio.h
new file mode 100644
index 000000000..920109a29
--- /dev/null
+++ b/include/linux/intel_pmic_gpio.h
@@ -0,0 +1,15 @@
+#ifndef LINUX_INTEL_PMIC_H
+#define LINUX_INTEL_PMIC_H
+
+struct intel_pmic_gpio_platform_data {
+ /* the first IRQ of the chip */
+ unsigned irq_base;
+ /* number assigned to the first GPIO */
+ unsigned gpio_base;
+ /* sram address for gpiointr register, the langwell chip will map
+ * the PMIC spi GPIO expander's GPIOINTR register in sram.
+ */
+ unsigned gpiointr;
+};
+
+#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
new file mode 100644
index 000000000..950ae4501
--- /dev/null
+++ b/include/linux/interrupt.h
@@ -0,0 +1,672 @@
+/* interrupt.h */
+#ifndef _LINUX_INTERRUPT_H
+#define _LINUX_INTERRUPT_H
+
+#include <linux/kernel.h>
+#include <linux/linkage.h>
+#include <linux/bitops.h>
+#include <linux/preempt.h>
+#include <linux/cpumask.h>
+#include <linux/irqreturn.h>
+#include <linux/irqnr.h>
+#include <linux/hardirq.h>
+#include <linux/irqflags.h>
+#include <linux/hrtimer.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
+
+#include <linux/atomic.h>
+#include <asm/ptrace.h>
+#include <asm/irq.h>
+
+/*
+ * These correspond to the IORESOURCE_IRQ_* defines in
+ * linux/ioport.h to select the interrupt line behaviour. When
+ * requesting an interrupt without specifying a IRQF_TRIGGER, the
+ * setting should be assumed to be "as already configured", which
+ * may be as per machine or firmware initialisation.
+ */
+#define IRQF_TRIGGER_NONE 0x00000000
+#define IRQF_TRIGGER_RISING 0x00000001
+#define IRQF_TRIGGER_FALLING 0x00000002
+#define IRQF_TRIGGER_HIGH 0x00000004
+#define IRQF_TRIGGER_LOW 0x00000008
+#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
+#define IRQF_TRIGGER_PROBE 0x00000010
+
+/*
+ * These flags used only by the kernel as part of the
+ * irq handling routines.
+ *
+ * IRQF_SHARED - allow sharing the irq among several devices
+ * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
+ * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
+ * IRQF_PERCPU - Interrupt is per cpu
+ * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
+ * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
+ * registered first in an shared interrupt is considered for
+ * performance reasons)
+ * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
+ * Used by threaded interrupts which need to keep the
+ * irq line disabled until the threaded handler has been run.
+ * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
+ * that this interrupt will wake the system from a suspended
+ * state. See Documentation/power/suspend-and-interrupts.txt
+ * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
+ * IRQF_NO_THREAD - Interrupt cannot be threaded
+ * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
+ * resume time.
+ * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
+ * interrupt handler after suspending interrupts. For system
+ * wakeup devices users need to implement wakeup detection in
+ * their interrupt handlers.
+ */
+#define IRQF_SHARED 0x00000080
+#define IRQF_PROBE_SHARED 0x00000100
+#define __IRQF_TIMER 0x00000200
+#define IRQF_PERCPU 0x00000400
+#define IRQF_NOBALANCING 0x00000800
+#define IRQF_IRQPOLL 0x00001000
+#define IRQF_ONESHOT 0x00002000
+#define IRQF_NO_SUSPEND 0x00004000
+#define IRQF_FORCE_RESUME 0x00008000
+#define IRQF_NO_THREAD 0x00010000
+#define IRQF_EARLY_RESUME 0x00020000
+#define IRQF_COND_SUSPEND 0x00040000
+
+#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
+
+/*
+ * These values can be returned by request_any_context_irq() and
+ * describe the context the interrupt will be run in.
+ *
+ * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
+ * IRQC_IS_NESTED - interrupt runs in a nested threaded context
+ */
+enum {
+ IRQC_IS_HARDIRQ = 0,
+ IRQC_IS_NESTED,
+};
+
+typedef irqreturn_t (*irq_handler_t)(int, void *);
+
+/**
+ * struct irqaction - per interrupt action descriptor
+ * @handler: interrupt handler function
+ * @name: name of the device
+ * @dev_id: cookie to identify the device
+ * @percpu_dev_id: cookie to identify the device
+ * @next: pointer to the next irqaction for shared interrupts
+ * @irq: interrupt number
+ * @flags: flags (see IRQF_* above)
+ * @thread_fn: interrupt handler function for threaded interrupts
+ * @thread: thread pointer for threaded interrupts
+ * @thread_flags: flags related to @thread
+ * @thread_mask: bitmask for keeping track of @thread activity
+ * @dir: pointer to the proc/irq/NN/name entry
+ */
+struct irqaction {
+ irq_handler_t handler;
+ void *dev_id;
+ void __percpu *percpu_dev_id;
+ struct irqaction *next;
+ irq_handler_t thread_fn;
+ struct task_struct *thread;
+ unsigned int irq;
+ unsigned int flags;
+ unsigned long thread_flags;
+ unsigned long thread_mask;
+ const char *name;
+ struct proc_dir_entry *dir;
+} ____cacheline_internodealigned_in_smp;
+
+extern irqreturn_t no_action(int cpl, void *dev_id);
+
+extern int __must_check
+request_threaded_irq(unsigned int irq, irq_handler_t handler,
+ irq_handler_t thread_fn,
+ unsigned long flags, const char *name, void *dev);
+
+static inline int __must_check
+request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
+ const char *name, void *dev)
+{
+ return request_threaded_irq(irq, handler, NULL, flags, name, dev);
+}
+
+extern int __must_check
+request_any_context_irq(unsigned int irq, irq_handler_t handler,
+ unsigned long flags, const char *name, void *dev_id);
+
+extern int __must_check
+request_percpu_irq(unsigned int irq, irq_handler_t handler,
+ const char *devname, void __percpu *percpu_dev_id);
+
+extern void free_irq(unsigned int, void *);
+extern void free_percpu_irq(unsigned int, void __percpu *);
+
+struct device;
+
+extern int __must_check
+devm_request_threaded_irq(struct device *dev, unsigned int irq,
+ irq_handler_t handler, irq_handler_t thread_fn,
+ unsigned long irqflags, const char *devname,
+ void *dev_id);
+
+static inline int __must_check
+devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
+ unsigned long irqflags, const char *devname, void *dev_id)
+{
+ return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
+ devname, dev_id);
+}
+
+extern int __must_check
+devm_request_any_context_irq(struct device *dev, unsigned int irq,
+ irq_handler_t handler, unsigned long irqflags,
+ const char *devname, void *dev_id);
+
+extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
+
+/*
+ * On lockdep we dont want to enable hardirqs in hardirq
+ * context. Use local_irq_enable_in_hardirq() to annotate
+ * kernel code that has to do this nevertheless (pretty much
+ * the only valid case is for old/broken hardware that is
+ * insanely slow).
+ *
+ * NOTE: in theory this might break fragile code that relies
+ * on hardirq delivery - in practice we dont seem to have such
+ * places left. So the only effect should be slightly increased
+ * irqs-off latencies.
+ */
+#ifdef CONFIG_LOCKDEP
+# define local_irq_enable_in_hardirq() do { } while (0)
+#else
+# define local_irq_enable_in_hardirq() local_irq_enable()
+#endif
+
+extern void disable_irq_nosync(unsigned int irq);
+extern bool disable_hardirq(unsigned int irq);
+extern void disable_irq(unsigned int irq);
+extern void disable_percpu_irq(unsigned int irq);
+extern void enable_irq(unsigned int irq);
+extern void enable_percpu_irq(unsigned int irq, unsigned int type);
+extern void irq_wake_thread(unsigned int irq, void *dev_id);
+
+/* The following three functions are for the core kernel use only. */
+extern void suspend_device_irqs(void);
+extern void resume_device_irqs(void);
+
+/**
+ * struct irq_affinity_notify - context for notification of IRQ affinity changes
+ * @irq: Interrupt to which notification applies
+ * @kref: Reference count, for internal use
+ * @work: Work item, for internal use
+ * @notify: Function to be called on change. This will be
+ * called in process context.
+ * @release: Function to be called on release. This will be
+ * called in process context. Once registered, the
+ * structure must only be freed when this function is
+ * called or later.
+ */
+struct irq_affinity_notify {
+ unsigned int irq;
+ struct kref kref;
+ struct work_struct work;
+ void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
+ void (*release)(struct kref *ref);
+};
+
+#if defined(CONFIG_SMP)
+
+extern cpumask_var_t irq_default_affinity;
+
+/* Internal implementation. Use the helpers below */
+extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
+ bool force);
+
+/**
+ * irq_set_affinity - Set the irq affinity of a given irq
+ * @irq: Interrupt to set affinity
+ * @cpumask: cpumask
+ *
+ * Fails if cpumask does not contain an online CPU
+ */
+static inline int
+irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+ return __irq_set_affinity(irq, cpumask, false);
+}
+
+/**
+ * irq_force_affinity - Force the irq affinity of a given irq
+ * @irq: Interrupt to set affinity
+ * @cpumask: cpumask
+ *
+ * Same as irq_set_affinity, but without checking the mask against
+ * online cpus.
+ *
+ * Solely for low level cpu hotplug code, where we need to make per
+ * cpu interrupts affine before the cpu becomes online.
+ */
+static inline int
+irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+ return __irq_set_affinity(irq, cpumask, true);
+}
+
+extern int irq_can_set_affinity(unsigned int irq);
+extern int irq_select_affinity(unsigned int irq);
+
+extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
+
+extern int
+irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
+
+#else /* CONFIG_SMP */
+
+static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
+{
+ return -EINVAL;
+}
+
+static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+ return 0;
+}
+
+static inline int irq_can_set_affinity(unsigned int irq)
+{
+ return 0;
+}
+
+static inline int irq_select_affinity(unsigned int irq) { return 0; }
+
+static inline int irq_set_affinity_hint(unsigned int irq,
+ const struct cpumask *m)
+{
+ return -EINVAL;
+}
+
+static inline int
+irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
+{
+ return 0;
+}
+#endif /* CONFIG_SMP */
+
+/*
+ * Special lockdep variants of irq disabling/enabling.
+ * These should be used for locking constructs that
+ * know that a particular irq context which is disabled,
+ * and which is the only irq-context user of a lock,
+ * that it's safe to take the lock in the irq-disabled
+ * section without disabling hardirqs.
+ *
+ * On !CONFIG_LOCKDEP they are equivalent to the normal
+ * irq disable/enable methods.
+ */
+static inline void disable_irq_nosync_lockdep(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+#ifdef CONFIG_LOCKDEP
+ local_irq_disable();
+#endif
+}
+
+static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
+{
+ disable_irq_nosync(irq);
+#ifdef CONFIG_LOCKDEP
+ local_irq_save(*flags);
+#endif
+}
+
+static inline void disable_irq_lockdep(unsigned int irq)
+{
+ disable_irq(irq);
+#ifdef CONFIG_LOCKDEP
+ local_irq_disable();
+#endif
+}
+
+static inline void enable_irq_lockdep(unsigned int irq)
+{
+#ifdef CONFIG_LOCKDEP
+ local_irq_enable();
+#endif
+ enable_irq(irq);
+}
+
+static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
+{
+#ifdef CONFIG_LOCKDEP
+ local_irq_restore(*flags);
+#endif
+ enable_irq(irq);
+}
+
+/* IRQ wakeup (PM) control: */
+extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
+
+static inline int enable_irq_wake(unsigned int irq)
+{
+ return irq_set_irq_wake(irq, 1);
+}
+
+static inline int disable_irq_wake(unsigned int irq)
+{
+ return irq_set_irq_wake(irq, 0);
+}
+
+/*
+ * irq_get_irqchip_state/irq_set_irqchip_state specific flags
+ */
+enum irqchip_irq_state {
+ IRQCHIP_STATE_PENDING, /* Is interrupt pending? */
+ IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */
+ IRQCHIP_STATE_MASKED, /* Is interrupt masked? */
+ IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */
+};
+
+extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
+ bool *state);
+extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
+ bool state);
+
+#ifdef CONFIG_IRQ_FORCED_THREADING
+extern bool force_irqthreads;
+#else
+#define force_irqthreads (0)
+#endif
+
+#ifndef __ARCH_SET_SOFTIRQ_PENDING
+#define set_softirq_pending(x) (local_softirq_pending() = (x))
+#define or_softirq_pending(x) (local_softirq_pending() |= (x))
+#endif
+
+/* Some architectures might implement lazy enabling/disabling of
+ * interrupts. In some cases, such as stop_machine, we might want
+ * to ensure that after a local_irq_disable(), interrupts have
+ * really been disabled in hardware. Such architectures need to
+ * implement the following hook.
+ */
+#ifndef hard_irq_disable
+#define hard_irq_disable() do { } while(0)
+#endif
+
+/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
+ frequency threaded job scheduling. For almost all the purposes
+ tasklets are more than enough. F.e. all serial device BHs et
+ al. should be converted to tasklets, not to softirqs.
+ */
+
+enum
+{
+ HI_SOFTIRQ=0,
+ TIMER_SOFTIRQ,
+ NET_TX_SOFTIRQ,
+ NET_RX_SOFTIRQ,
+ BLOCK_SOFTIRQ,
+ BLOCK_IOPOLL_SOFTIRQ,
+ TASKLET_SOFTIRQ,
+ SCHED_SOFTIRQ,
+ HRTIMER_SOFTIRQ,
+ RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
+
+ NR_SOFTIRQS
+};
+
+#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
+
+/* map softirq index to softirq name. update 'softirq_to_name' in
+ * kernel/softirq.c when adding a new softirq.
+ */
+extern const char * const softirq_to_name[NR_SOFTIRQS];
+
+/* softirq mask and active fields moved to irq_cpustat_t in
+ * asm/hardirq.h to get better cache usage. KAO
+ */
+
+struct softirq_action
+{
+ void (*action)(struct softirq_action *);
+};
+
+asmlinkage void do_softirq(void);
+asmlinkage void __do_softirq(void);
+
+#ifdef __ARCH_HAS_DO_SOFTIRQ
+void do_softirq_own_stack(void);
+#else
+static inline void do_softirq_own_stack(void)
+{
+ __do_softirq();
+}
+#endif
+
+extern void open_softirq(int nr, void (*action)(struct softirq_action *));
+extern void softirq_init(void);
+extern void __raise_softirq_irqoff(unsigned int nr);
+
+extern void raise_softirq_irqoff(unsigned int nr);
+extern void raise_softirq(unsigned int nr);
+
+DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
+
+static inline struct task_struct *this_cpu_ksoftirqd(void)
+{
+ return this_cpu_read(ksoftirqd);
+}
+
+/* Tasklets --- multithreaded analogue of BHs.
+
+ Main feature differing them of generic softirqs: tasklet
+ is running only on one CPU simultaneously.
+
+ Main feature differing them of BHs: different tasklets
+ may be run simultaneously on different CPUs.
+
+ Properties:
+ * If tasklet_schedule() is called, then tasklet is guaranteed
+ to be executed on some cpu at least once after this.
+ * If the tasklet is already scheduled, but its execution is still not
+ started, it will be executed only once.
+ * If this tasklet is already running on another CPU (or schedule is called
+ from tasklet itself), it is rescheduled for later.
+ * Tasklet is strictly serialized wrt itself, but not
+ wrt another tasklets. If client needs some intertask synchronization,
+ he makes it with spinlocks.
+ */
+
+struct tasklet_struct
+{
+ struct tasklet_struct *next;
+ unsigned long state;
+ atomic_t count;
+ void (*func)(unsigned long);
+ unsigned long data;
+};
+
+#define DECLARE_TASKLET(name, func, data) \
+struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
+
+#define DECLARE_TASKLET_DISABLED(name, func, data) \
+struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
+
+
+enum
+{
+ TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
+ TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
+};
+
+#ifdef CONFIG_SMP
+static inline int tasklet_trylock(struct tasklet_struct *t)
+{
+ return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
+}
+
+static inline void tasklet_unlock(struct tasklet_struct *t)
+{
+ smp_mb__before_atomic();
+ clear_bit(TASKLET_STATE_RUN, &(t)->state);
+}
+
+static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+{
+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
+}
+#else
+#define tasklet_trylock(t) 1
+#define tasklet_unlock_wait(t) do { } while (0)
+#define tasklet_unlock(t) do { } while (0)
+#endif
+
+extern void __tasklet_schedule(struct tasklet_struct *t);
+
+static inline void tasklet_schedule(struct tasklet_struct *t)
+{
+ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
+ __tasklet_schedule(t);
+}
+
+extern void __tasklet_hi_schedule(struct tasklet_struct *t);
+
+static inline void tasklet_hi_schedule(struct tasklet_struct *t)
+{
+ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
+ __tasklet_hi_schedule(t);
+}
+
+extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
+
+/*
+ * This version avoids touching any other tasklets. Needed for kmemcheck
+ * in order not to take any page faults while enqueueing this tasklet;
+ * consider VERY carefully whether you really need this or
+ * tasklet_hi_schedule()...
+ */
+static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
+{
+ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
+ __tasklet_hi_schedule_first(t);
+}
+
+
+static inline void tasklet_disable_nosync(struct tasklet_struct *t)
+{
+ atomic_inc(&t->count);
+ smp_mb__after_atomic();
+}
+
+static inline void tasklet_disable(struct tasklet_struct *t)
+{
+ tasklet_disable_nosync(t);
+ tasklet_unlock_wait(t);
+ smp_mb();
+}
+
+static inline void tasklet_enable(struct tasklet_struct *t)
+{
+ smp_mb__before_atomic();
+ atomic_dec(&t->count);
+}
+
+extern void tasklet_kill(struct tasklet_struct *t);
+extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
+extern void tasklet_init(struct tasklet_struct *t,
+ void (*func)(unsigned long), unsigned long data);
+
+struct tasklet_hrtimer {
+ struct hrtimer timer;
+ struct tasklet_struct tasklet;
+ enum hrtimer_restart (*function)(struct hrtimer *);
+};
+
+extern void
+tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
+ enum hrtimer_restart (*function)(struct hrtimer *),
+ clockid_t which_clock, enum hrtimer_mode mode);
+
+static inline
+int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
+ const enum hrtimer_mode mode)
+{
+ return hrtimer_start(&ttimer->timer, time, mode);
+}
+
+static inline
+void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
+{
+ hrtimer_cancel(&ttimer->timer);
+ tasklet_kill(&ttimer->tasklet);
+}
+
+/*
+ * Autoprobing for irqs:
+ *
+ * probe_irq_on() and probe_irq_off() provide robust primitives
+ * for accurate IRQ probing during kernel initialization. They are
+ * reasonably simple to use, are not "fooled" by spurious interrupts,
+ * and, unlike other attempts at IRQ probing, they do not get hung on
+ * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
+ *
+ * For reasonably foolproof probing, use them as follows:
+ *
+ * 1. clear and/or mask the device's internal interrupt.
+ * 2. sti();
+ * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
+ * 4. enable the device and cause it to trigger an interrupt.
+ * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
+ * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
+ * 7. service the device to clear its pending interrupt.
+ * 8. loop again if paranoia is required.
+ *
+ * probe_irq_on() returns a mask of allocated irq's.
+ *
+ * probe_irq_off() takes the mask as a parameter,
+ * and returns the irq number which occurred,
+ * or zero if none occurred, or a negative irq number
+ * if more than one irq occurred.
+ */
+
+#if !defined(CONFIG_GENERIC_IRQ_PROBE)
+static inline unsigned long probe_irq_on(void)
+{
+ return 0;
+}
+static inline int probe_irq_off(unsigned long val)
+{
+ return 0;
+}
+static inline unsigned int probe_irq_mask(unsigned long val)
+{
+ return 0;
+}
+#else
+extern unsigned long probe_irq_on(void); /* returns 0 on failure */
+extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
+extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
+#endif
+
+#ifdef CONFIG_PROC_FS
+/* Initialize /proc/irq/ */
+extern void init_irq_proc(void);
+#else
+static inline void init_irq_proc(void)
+{
+}
+#endif
+
+struct seq_file;
+int show_interrupts(struct seq_file *p, void *v);
+int arch_show_interrupts(struct seq_file *p, int prec);
+
+extern int early_irq_init(void);
+extern int arch_probe_nr_irqs(void);
+extern int arch_early_irq_init(void);
+
+#endif
diff --git a/include/linux/interval_tree.h b/include/linux/interval_tree.h
new file mode 100644
index 000000000..724556aa3
--- /dev/null
+++ b/include/linux/interval_tree.h
@@ -0,0 +1,27 @@
+#ifndef _LINUX_INTERVAL_TREE_H
+#define _LINUX_INTERVAL_TREE_H
+
+#include <linux/rbtree.h>
+
+struct interval_tree_node {
+ struct rb_node rb;
+ unsigned long start; /* Start of interval */
+ unsigned long last; /* Last location _in_ interval */
+ unsigned long __subtree_last;
+};
+
+extern void
+interval_tree_insert(struct interval_tree_node *node, struct rb_root *root);
+
+extern void
+interval_tree_remove(struct interval_tree_node *node, struct rb_root *root);
+
+extern struct interval_tree_node *
+interval_tree_iter_first(struct rb_root *root,
+ unsigned long start, unsigned long last);
+
+extern struct interval_tree_node *
+interval_tree_iter_next(struct interval_tree_node *node,
+ unsigned long start, unsigned long last);
+
+#endif /* _LINUX_INTERVAL_TREE_H */
diff --git a/include/linux/interval_tree_generic.h b/include/linux/interval_tree_generic.h
new file mode 100644
index 000000000..58370e186
--- /dev/null
+++ b/include/linux/interval_tree_generic.h
@@ -0,0 +1,191 @@
+/*
+ Interval Trees
+ (C) 2012 Michel Lespinasse <walken@google.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ include/linux/interval_tree_generic.h
+*/
+
+#include <linux/rbtree_augmented.h>
+
+/*
+ * Template for implementing interval trees
+ *
+ * ITSTRUCT: struct type of the interval tree nodes
+ * ITRB: name of struct rb_node field within ITSTRUCT
+ * ITTYPE: type of the interval endpoints
+ * ITSUBTREE: name of ITTYPE field within ITSTRUCT holding last-in-subtree
+ * ITSTART(n): start endpoint of ITSTRUCT node n
+ * ITLAST(n): last endpoint of ITSTRUCT node n
+ * ITSTATIC: 'static' or empty
+ * ITPREFIX: prefix to use for the inline tree definitions
+ *
+ * Note - before using this, please consider if non-generic version
+ * (interval_tree.h) would work for you...
+ */
+
+#define INTERVAL_TREE_DEFINE(ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, \
+ ITSTART, ITLAST, ITSTATIC, ITPREFIX) \
+ \
+/* Callbacks for augmented rbtree insert and remove */ \
+ \
+static inline ITTYPE ITPREFIX ## _compute_subtree_last(ITSTRUCT *node) \
+{ \
+ ITTYPE max = ITLAST(node), subtree_last; \
+ if (node->ITRB.rb_left) { \
+ subtree_last = rb_entry(node->ITRB.rb_left, \
+ ITSTRUCT, ITRB)->ITSUBTREE; \
+ if (max < subtree_last) \
+ max = subtree_last; \
+ } \
+ if (node->ITRB.rb_right) { \
+ subtree_last = rb_entry(node->ITRB.rb_right, \
+ ITSTRUCT, ITRB)->ITSUBTREE; \
+ if (max < subtree_last) \
+ max = subtree_last; \
+ } \
+ return max; \
+} \
+ \
+RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \
+ ITTYPE, ITSUBTREE, ITPREFIX ## _compute_subtree_last) \
+ \
+/* Insert / remove interval nodes from the tree */ \
+ \
+ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, struct rb_root *root) \
+{ \
+ struct rb_node **link = &root->rb_node, *rb_parent = NULL; \
+ ITTYPE start = ITSTART(node), last = ITLAST(node); \
+ ITSTRUCT *parent; \
+ \
+ while (*link) { \
+ rb_parent = *link; \
+ parent = rb_entry(rb_parent, ITSTRUCT, ITRB); \
+ if (parent->ITSUBTREE < last) \
+ parent->ITSUBTREE = last; \
+ if (start < ITSTART(parent)) \
+ link = &parent->ITRB.rb_left; \
+ else \
+ link = &parent->ITRB.rb_right; \
+ } \
+ \
+ node->ITSUBTREE = last; \
+ rb_link_node(&node->ITRB, rb_parent, link); \
+ rb_insert_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \
+} \
+ \
+ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, struct rb_root *root) \
+{ \
+ rb_erase_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \
+} \
+ \
+/* \
+ * Iterate over intervals intersecting [start;last] \
+ * \
+ * Note that a node's interval intersects [start;last] iff: \
+ * Cond1: ITSTART(node) <= last \
+ * and \
+ * Cond2: start <= ITLAST(node) \
+ */ \
+ \
+static ITSTRUCT * \
+ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
+{ \
+ while (true) { \
+ /* \
+ * Loop invariant: start <= node->ITSUBTREE \
+ * (Cond2 is satisfied by one of the subtree nodes) \
+ */ \
+ if (node->ITRB.rb_left) { \
+ ITSTRUCT *left = rb_entry(node->ITRB.rb_left, \
+ ITSTRUCT, ITRB); \
+ if (start <= left->ITSUBTREE) { \
+ /* \
+ * Some nodes in left subtree satisfy Cond2. \
+ * Iterate to find the leftmost such node N. \
+ * If it also satisfies Cond1, that's the \
+ * match we are looking for. Otherwise, there \
+ * is no matching interval as nodes to the \
+ * right of N can't satisfy Cond1 either. \
+ */ \
+ node = left; \
+ continue; \
+ } \
+ } \
+ if (ITSTART(node) <= last) { /* Cond1 */ \
+ if (start <= ITLAST(node)) /* Cond2 */ \
+ return node; /* node is leftmost match */ \
+ if (node->ITRB.rb_right) { \
+ node = rb_entry(node->ITRB.rb_right, \
+ ITSTRUCT, ITRB); \
+ if (start <= node->ITSUBTREE) \
+ continue; \
+ } \
+ } \
+ return NULL; /* No match */ \
+ } \
+} \
+ \
+ITSTATIC ITSTRUCT * \
+ITPREFIX ## _iter_first(struct rb_root *root, ITTYPE start, ITTYPE last) \
+{ \
+ ITSTRUCT *node; \
+ \
+ if (!root->rb_node) \
+ return NULL; \
+ node = rb_entry(root->rb_node, ITSTRUCT, ITRB); \
+ if (node->ITSUBTREE < start) \
+ return NULL; \
+ return ITPREFIX ## _subtree_search(node, start, last); \
+} \
+ \
+ITSTATIC ITSTRUCT * \
+ITPREFIX ## _iter_next(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
+{ \
+ struct rb_node *rb = node->ITRB.rb_right, *prev; \
+ \
+ while (true) { \
+ /* \
+ * Loop invariants: \
+ * Cond1: ITSTART(node) <= last \
+ * rb == node->ITRB.rb_right \
+ * \
+ * First, search right subtree if suitable \
+ */ \
+ if (rb) { \
+ ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB); \
+ if (start <= right->ITSUBTREE) \
+ return ITPREFIX ## _subtree_search(right, \
+ start, last); \
+ } \
+ \
+ /* Move up the tree until we come from a node's left child */ \
+ do { \
+ rb = rb_parent(&node->ITRB); \
+ if (!rb) \
+ return NULL; \
+ prev = &node->ITRB; \
+ node = rb_entry(rb, ITSTRUCT, ITRB); \
+ rb = node->ITRB.rb_right; \
+ } while (prev == rb); \
+ \
+ /* Check if the node intersects [start;last] */ \
+ if (last < ITSTART(node)) /* !Cond1 */ \
+ return NULL; \
+ else if (start <= ITLAST(node)) /* Cond2 */ \
+ return node; \
+ } \
+}
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
new file mode 100644
index 000000000..657fab4ef
--- /dev/null
+++ b/include/linux/io-mapping.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright © 2008 Keith Packard <keithp@keithp.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_IO_MAPPING_H
+#define _LINUX_IO_MAPPING_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/bug.h>
+#include <asm/io.h>
+#include <asm/page.h>
+
+/*
+ * The io_mapping mechanism provides an abstraction for mapping
+ * individual pages from an io device to the CPU in an efficient fashion.
+ *
+ * See Documentation/io-mapping.txt
+ */
+
+#ifdef CONFIG_HAVE_ATOMIC_IOMAP
+
+#include <asm/iomap.h>
+
+struct io_mapping {
+ resource_size_t base;
+ unsigned long size;
+ pgprot_t prot;
+};
+
+/*
+ * For small address space machines, mapping large objects
+ * into the kernel virtual space isn't practical. Where
+ * available, use fixmap support to dynamically map pages
+ * of the object at run time.
+ */
+
+static inline struct io_mapping *
+io_mapping_create_wc(resource_size_t base, unsigned long size)
+{
+ struct io_mapping *iomap;
+ pgprot_t prot;
+
+ iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
+ if (!iomap)
+ goto out_err;
+
+ if (iomap_create_wc(base, size, &prot))
+ goto out_free;
+
+ iomap->base = base;
+ iomap->size = size;
+ iomap->prot = prot;
+ return iomap;
+
+out_free:
+ kfree(iomap);
+out_err:
+ return NULL;
+}
+
+static inline void
+io_mapping_free(struct io_mapping *mapping)
+{
+ iomap_free(mapping->base, mapping->size);
+ kfree(mapping);
+}
+
+/* Atomic map/unmap */
+static inline void __iomem *
+io_mapping_map_atomic_wc(struct io_mapping *mapping,
+ unsigned long offset)
+{
+ resource_size_t phys_addr;
+ unsigned long pfn;
+
+ BUG_ON(offset >= mapping->size);
+ phys_addr = mapping->base + offset;
+ pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
+ return iomap_atomic_prot_pfn(pfn, mapping->prot);
+}
+
+static inline void
+io_mapping_unmap_atomic(void __iomem *vaddr)
+{
+ iounmap_atomic(vaddr);
+}
+
+static inline void __iomem *
+io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
+{
+ resource_size_t phys_addr;
+
+ BUG_ON(offset >= mapping->size);
+ phys_addr = mapping->base + offset;
+
+ return ioremap_wc(phys_addr, PAGE_SIZE);
+}
+
+static inline void
+io_mapping_unmap(void __iomem *vaddr)
+{
+ iounmap(vaddr);
+}
+
+#else
+
+#include <linux/uaccess.h>
+
+/* this struct isn't actually defined anywhere */
+struct io_mapping;
+
+/* Create the io_mapping object*/
+static inline struct io_mapping *
+io_mapping_create_wc(resource_size_t base, unsigned long size)
+{
+ return (struct io_mapping __force *) ioremap_wc(base, size);
+}
+
+static inline void
+io_mapping_free(struct io_mapping *mapping)
+{
+ iounmap((void __force __iomem *) mapping);
+}
+
+/* Atomic map/unmap */
+static inline void __iomem *
+io_mapping_map_atomic_wc(struct io_mapping *mapping,
+ unsigned long offset)
+{
+ pagefault_disable();
+ return ((char __force __iomem *) mapping) + offset;
+}
+
+static inline void
+io_mapping_unmap_atomic(void __iomem *vaddr)
+{
+ pagefault_enable();
+}
+
+/* Non-atomic map/unmap */
+static inline void __iomem *
+io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
+{
+ return ((char __force __iomem *) mapping) + offset;
+}
+
+static inline void
+io_mapping_unmap(void __iomem *vaddr)
+{
+}
+
+#endif /* HAVE_ATOMIC_IOMAP */
+
+#endif /* _LINUX_IO_MAPPING_H */
diff --git a/include/linux/io.h b/include/linux/io.h
new file mode 100644
index 000000000..986f2bffe
--- /dev/null
+++ b/include/linux/io.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2006 PathScale, Inc. All Rights Reserved.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_IO_H
+#define _LINUX_IO_H
+
+#include <linux/types.h>
+#include <asm/io.h>
+#include <asm/page.h>
+
+struct device;
+
+__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
+void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
+
+#ifdef CONFIG_MMU
+int ioremap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot);
+#else
+static inline int ioremap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+void __init ioremap_huge_init(void);
+int arch_ioremap_pud_supported(void);
+int arch_ioremap_pmd_supported(void);
+#else
+static inline void ioremap_huge_init(void) { }
+#endif
+
+/*
+ * Managed iomap interface
+ */
+#ifdef CONFIG_HAS_IOPORT_MAP
+void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
+ unsigned int nr);
+void devm_ioport_unmap(struct device *dev, void __iomem *addr);
+#else
+static inline void __iomem *devm_ioport_map(struct device *dev,
+ unsigned long port,
+ unsigned int nr)
+{
+ return NULL;
+}
+
+static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
+{
+}
+#endif
+
+#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
+
+void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
+ resource_size_t size);
+void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
+ resource_size_t size);
+void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
+ resource_size_t size);
+void devm_iounmap(struct device *dev, void __iomem *addr);
+int check_signature(const volatile void __iomem *io_addr,
+ const unsigned char *signature, int length);
+void devm_ioremap_release(struct device *dev, void *res);
+
+/*
+ * Some systems do not have legacy ISA devices.
+ * /dev/port is not a valid interface on these systems.
+ * So for those archs, <asm/io.h> should define the following symbol.
+ */
+#ifndef arch_has_dev_port
+#define arch_has_dev_port() (1)
+#endif
+
+/*
+ * Some systems (x86 without PAT) have a somewhat reliable way to mark a
+ * physical address range such that uncached mappings will actually
+ * end up write-combining. This facility should be used in conjunction
+ * with pgprot_writecombine, ioremap-wc, or set_memory_wc, since it has
+ * no effect if the per-page mechanisms are functional.
+ * (On x86 without PAT, these functions manipulate MTRRs.)
+ *
+ * arch_phys_del_wc(0) or arch_phys_del_wc(any error code) is guaranteed
+ * to have no effect.
+ */
+#ifndef arch_phys_wc_add
+static inline int __must_check arch_phys_wc_add(unsigned long base,
+ unsigned long size)
+{
+ return 0; /* It worked (i.e. did nothing). */
+}
+
+static inline void arch_phys_wc_del(int handle)
+{
+}
+
+#define arch_phys_wc_add arch_phys_wc_add
+#endif
+
+#endif /* _LINUX_IO_H */
diff --git a/include/linux/ioc3.h b/include/linux/ioc3.h
new file mode 100644
index 000000000..38b286e9a
--- /dev/null
+++ b/include/linux/ioc3.h
@@ -0,0 +1,93 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2005 Stanislaw Skowronek <skylark@linux-mips.org>
+ */
+
+#ifndef _LINUX_IOC3_H
+#define _LINUX_IOC3_H
+
+#include <asm/sn/ioc3.h>
+
+#define IOC3_MAX_SUBMODULES 32
+
+#define IOC3_CLASS_NONE 0
+#define IOC3_CLASS_BASE_IP27 1
+#define IOC3_CLASS_BASE_IP30 2
+#define IOC3_CLASS_MENET_123 3
+#define IOC3_CLASS_MENET_4 4
+#define IOC3_CLASS_CADDUO 5
+#define IOC3_CLASS_SERIAL 6
+
+/* One of these per IOC3 */
+struct ioc3_driver_data {
+ struct list_head list;
+ int id; /* IOC3 sequence number */
+ /* PCI mapping */
+ unsigned long pma; /* physical address */
+ struct ioc3 __iomem *vma; /* pointer to registers */
+ struct pci_dev *pdev; /* PCI device */
+ /* IRQ stuff */
+ int dual_irq; /* set if separate IRQs are used */
+ int irq_io, irq_eth; /* IRQ numbers */
+ /* GPIO magic */
+ spinlock_t gpio_lock;
+ unsigned int gpdr_shadow;
+ /* NIC identifiers */
+ char nic_part[32];
+ char nic_serial[16];
+ char nic_mac[6];
+ /* submodule set */
+ int class;
+ void *data[IOC3_MAX_SUBMODULES]; /* for submodule use */
+ int active[IOC3_MAX_SUBMODULES]; /* set if probe succeeds */
+ /* is_ir_lock must be held while
+ * modifying sio_ie values, so
+ * we can be sure that sio_ie is
+ * not changing when we read it
+ * along with sio_ir.
+ */
+ spinlock_t ir_lock; /* SIO_IE[SC] mod lock */
+};
+
+/* One per submodule */
+struct ioc3_submodule {
+ char *name; /* descriptive submodule name */
+ struct module *owner; /* owning kernel module */
+ int ethernet; /* set for ethernet drivers */
+ int (*probe) (struct ioc3_submodule *, struct ioc3_driver_data *);
+ int (*remove) (struct ioc3_submodule *, struct ioc3_driver_data *);
+ int id; /* assigned by IOC3, index for the "data" array */
+ /* IRQ stuff */
+ unsigned int irq_mask; /* IOC3 IRQ mask, leave clear for Ethernet */
+ int reset_mask; /* non-zero if you want the ioc3.c module to reset interrupts */
+ int (*intr) (struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
+ /* private submodule data */
+ void *data; /* assigned by submodule */
+};
+
+/**********************************
+ * Functions needed by submodules *
+ **********************************/
+
+#define IOC3_W_IES 0
+#define IOC3_W_IEC 1
+
+/* registers a submodule for all existing and future IOC3 chips */
+extern int ioc3_register_submodule(struct ioc3_submodule *);
+/* unregisters a submodule */
+extern void ioc3_unregister_submodule(struct ioc3_submodule *);
+/* enables IRQs indicated by irq_mask for a specified IOC3 chip */
+extern void ioc3_enable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
+/* ackowledges specified IRQs */
+extern void ioc3_ack(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
+/* disables IRQs indicated by irq_mask for a specified IOC3 chip */
+extern void ioc3_disable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
+/* atomically sets GPCR bits */
+extern void ioc3_gpcr_set(struct ioc3_driver_data *, unsigned int);
+/* general ireg writer */
+extern void ioc3_write_ireg(struct ioc3_driver_data *idd, uint32_t value, int reg);
+
+#endif
diff --git a/include/linux/ioc4.h b/include/linux/ioc4.h
new file mode 100644
index 000000000..51e2b9fb6
--- /dev/null
+++ b/include/linux/ioc4.h
@@ -0,0 +1,184 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#ifndef _LINUX_IOC4_H
+#define _LINUX_IOC4_H
+
+#include <linux/interrupt.h>
+
+/***************
+ * Definitions *
+ ***************/
+
+/* Miscellaneous values inherent to hardware */
+
+#define IOC4_EXTINT_COUNT_DIVISOR 520 /* PCI clocks per COUNT tick */
+
+/***********************************
+ * Structures needed by subdrivers *
+ ***********************************/
+
+/* This structure fully describes the IOC4 miscellaneous registers which
+ * appear at bar[0]+0x00000 through bar[0]+0x0005c. The corresponding
+ * PCI resource is managed by the main IOC4 driver because it contains
+ * registers of interest to many different IOC4 subdrivers.
+ */
+struct ioc4_misc_regs {
+ /* Miscellaneous IOC4 registers */
+ union ioc4_pci_err_addr_l {
+ uint32_t raw;
+ struct {
+ uint32_t valid:1; /* Address captured */
+ uint32_t master_id:4; /* Unit causing error
+ * 0/1: Serial port 0 TX/RX
+ * 2/3: Serial port 1 TX/RX
+ * 4/5: Serial port 2 TX/RX
+ * 6/7: Serial port 3 TX/RX
+ * 8: ATA/ATAPI
+ * 9-15: Undefined
+ */
+ uint32_t mul_err:1; /* Multiple errors occurred */
+ uint32_t addr:26; /* Bits 31-6 of error addr */
+ } fields;
+ } pci_err_addr_l;
+ uint32_t pci_err_addr_h; /* Bits 63-32 of error addr */
+ union ioc4_sio_int {
+ uint32_t raw;
+ struct {
+ uint8_t tx_mt:1; /* TX ring buffer empty */
+ uint8_t rx_full:1; /* RX ring buffer full */
+ uint8_t rx_high:1; /* RX high-water exceeded */
+ uint8_t rx_timer:1; /* RX timer has triggered */
+ uint8_t delta_dcd:1; /* DELTA_DCD seen */
+ uint8_t delta_cts:1; /* DELTA_CTS seen */
+ uint8_t intr_pass:1; /* Interrupt pass-through */
+ uint8_t tx_explicit:1; /* TX, MCW, or delay complete */
+ } fields[4];
+ } sio_ir; /* Serial interrupt state */
+ union ioc4_other_int {
+ uint32_t raw;
+ struct {
+ uint32_t ata_int:1; /* ATA port passthru */
+ uint32_t ata_memerr:1; /* ATA halted by mem error */
+ uint32_t memerr:4; /* Serial halted by mem err */
+ uint32_t kbd_int:1; /* kbd/mouse intr asserted */
+ uint32_t reserved:16; /* zero */
+ uint32_t rt_int:1; /* INT_OUT section latch */
+ uint32_t gen_int:8; /* Intr. from generic pins */
+ } fields;
+ } other_ir; /* Other interrupt state */
+ union ioc4_sio_int sio_ies; /* Serial interrupt enable set */
+ union ioc4_other_int other_ies; /* Other interrupt enable set */
+ union ioc4_sio_int sio_iec; /* Serial interrupt enable clear */
+ union ioc4_other_int other_iec; /* Other interrupt enable clear */
+ union ioc4_sio_cr {
+ uint32_t raw;
+ struct {
+ uint32_t cmd_pulse:4; /* Bytebus strobe width */
+ uint32_t arb_diag:3; /* PCI bus requester */
+ uint32_t sio_diag_idle:1; /* Active ser req? */
+ uint32_t ata_diag_idle:1; /* Active ATA req? */
+ uint32_t ata_diag_active:1; /* ATA req is winner */
+ uint32_t reserved:22; /* zero */
+ } fields;
+ } sio_cr;
+ uint32_t unused1;
+ union ioc4_int_out {
+ uint32_t raw;
+ struct {
+ uint32_t count:16; /* Period control */
+ uint32_t mode:3; /* Output signal shape */
+ uint32_t reserved:11; /* zero */
+ uint32_t diag:1; /* Timebase control */
+ uint32_t int_out:1; /* Current value */
+ } fields;
+ } int_out; /* External interrupt output control */
+ uint32_t unused2;
+ union ioc4_gpcr {
+ uint32_t raw;
+ struct {
+ uint32_t dir:8; /* Pin direction */
+ uint32_t edge:8; /* Edge/level mode */
+ uint32_t reserved1:4; /* zero */
+ uint32_t int_out_en:1; /* INT_OUT enable */
+ uint32_t reserved2:11; /* zero */
+ } fields;
+ } gpcr_s; /* Generic PIO control set */
+ union ioc4_gpcr gpcr_c; /* Generic PIO control clear */
+ union ioc4_gpdr {
+ uint32_t raw;
+ struct {
+ uint32_t gen_pin:8; /* State of pins */
+ uint32_t reserved:24;
+ } fields;
+ } gpdr; /* Generic PIO data */
+ uint32_t unused3;
+ union ioc4_gppr {
+ uint32_t raw;
+ struct {
+ uint32_t gen_pin:1; /* Single pin state */
+ uint32_t reserved:31;
+ } fields;
+ } gppr[8]; /* Generic PIO pins */
+};
+
+/* Masks for GPCR DIR pins */
+#define IOC4_GPCR_DIR_0 0x01 /* External interrupt output */
+#define IOC4_GPCR_DIR_1 0x02 /* External interrupt input */
+#define IOC4_GPCR_DIR_2 0x04
+#define IOC4_GPCR_DIR_3 0x08 /* Keyboard/mouse presence */
+#define IOC4_GPCR_DIR_4 0x10 /* Ser. port 0 xcvr select (0=232, 1=422) */
+#define IOC4_GPCR_DIR_5 0x20 /* Ser. port 1 xcvr select (0=232, 1=422) */
+#define IOC4_GPCR_DIR_6 0x40 /* Ser. port 2 xcvr select (0=232, 1=422) */
+#define IOC4_GPCR_DIR_7 0x80 /* Ser. port 3 xcvr select (0=232, 1=422) */
+
+/* Masks for GPCR EDGE pins */
+#define IOC4_GPCR_EDGE_0 0x01
+#define IOC4_GPCR_EDGE_1 0x02 /* External interrupt input */
+#define IOC4_GPCR_EDGE_2 0x04
+#define IOC4_GPCR_EDGE_3 0x08
+#define IOC4_GPCR_EDGE_4 0x10
+#define IOC4_GPCR_EDGE_5 0x20
+#define IOC4_GPCR_EDGE_6 0x40
+#define IOC4_GPCR_EDGE_7 0x80
+
+#define IOC4_VARIANT_IO9 0x0900
+#define IOC4_VARIANT_PCI_RT 0x0901
+#define IOC4_VARIANT_IO10 0x1000
+
+/* One of these per IOC4 */
+struct ioc4_driver_data {
+ struct list_head idd_list;
+ unsigned long idd_bar0;
+ struct pci_dev *idd_pdev;
+ const struct pci_device_id *idd_pci_id;
+ struct ioc4_misc_regs __iomem *idd_misc_regs;
+ unsigned long count_period;
+ void *idd_serial_data;
+ unsigned int idd_variant;
+};
+
+/* One per submodule */
+struct ioc4_submodule {
+ struct list_head is_list;
+ char *is_name;
+ struct module *is_owner;
+ int (*is_probe) (struct ioc4_driver_data *);
+ int (*is_remove) (struct ioc4_driver_data *);
+};
+
+#define IOC4_NUM_CARDS 8 /* max cards per partition */
+
+/**********************************
+ * Functions needed by submodules *
+ **********************************/
+
+extern int ioc4_register_submodule(struct ioc4_submodule *);
+extern void ioc4_unregister_submodule(struct ioc4_submodule *);
+
+#endif /* _LINUX_IOC4_H */
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
new file mode 100644
index 000000000..df38db2ef
--- /dev/null
+++ b/include/linux/iocontext.h
@@ -0,0 +1,157 @@
+#ifndef IOCONTEXT_H
+#define IOCONTEXT_H
+
+#include <linux/radix-tree.h>
+#include <linux/rcupdate.h>
+#include <linux/workqueue.h>
+
+enum {
+ ICQ_EXITED = 1 << 2,
+};
+
+/*
+ * An io_cq (icq) is association between an io_context (ioc) and a
+ * request_queue (q). This is used by elevators which need to track
+ * information per ioc - q pair.
+ *
+ * Elevator can request use of icq by setting elevator_type->icq_size and
+ * ->icq_align. Both size and align must be larger than that of struct
+ * io_cq and elevator can use the tail area for private information. The
+ * recommended way to do this is defining a struct which contains io_cq as
+ * the first member followed by private members and using its size and
+ * align. For example,
+ *
+ * struct snail_io_cq {
+ * struct io_cq icq;
+ * int poke_snail;
+ * int feed_snail;
+ * };
+ *
+ * struct elevator_type snail_elv_type {
+ * .ops = { ... },
+ * .icq_size = sizeof(struct snail_io_cq),
+ * .icq_align = __alignof__(struct snail_io_cq),
+ * ...
+ * };
+ *
+ * If icq_size is set, block core will manage icq's. All requests will
+ * have its ->elv.icq field set before elevator_ops->elevator_set_req_fn()
+ * is called and be holding a reference to the associated io_context.
+ *
+ * Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is
+ * called and, on destruction, ->elevator_exit_icq_fn(). Both functions
+ * are called with both the associated io_context and queue locks held.
+ *
+ * Elevator is allowed to lookup icq using ioc_lookup_icq() while holding
+ * queue lock but the returned icq is valid only until the queue lock is
+ * released. Elevators can not and should not try to create or destroy
+ * icq's.
+ *
+ * As icq's are linked from both ioc and q, the locking rules are a bit
+ * complex.
+ *
+ * - ioc lock nests inside q lock.
+ *
+ * - ioc->icq_list and icq->ioc_node are protected by ioc lock.
+ * q->icq_list and icq->q_node by q lock.
+ *
+ * - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq
+ * itself is protected by q lock. However, both the indexes and icq
+ * itself are also RCU managed and lookup can be performed holding only
+ * the q lock.
+ *
+ * - icq's are not reference counted. They are destroyed when either the
+ * ioc or q goes away. Each request with icq set holds an extra
+ * reference to ioc to ensure it stays until the request is completed.
+ *
+ * - Linking and unlinking icq's are performed while holding both ioc and q
+ * locks. Due to the lock ordering, q exit is simple but ioc exit
+ * requires reverse-order double lock dance.
+ */
+struct io_cq {
+ struct request_queue *q;
+ struct io_context *ioc;
+
+ /*
+ * q_node and ioc_node link io_cq through icq_list of q and ioc
+ * respectively. Both fields are unused once ioc_exit_icq() is
+ * called and shared with __rcu_icq_cache and __rcu_head which are
+ * used for RCU free of io_cq.
+ */
+ union {
+ struct list_head q_node;
+ struct kmem_cache *__rcu_icq_cache;
+ };
+ union {
+ struct hlist_node ioc_node;
+ struct rcu_head __rcu_head;
+ };
+
+ unsigned int flags;
+};
+
+/*
+ * I/O subsystem state of the associated processes. It is refcounted
+ * and kmalloc'ed. These could be shared between processes.
+ */
+struct io_context {
+ atomic_long_t refcount;
+ atomic_t active_ref;
+ atomic_t nr_tasks;
+
+ /* all the fields below are protected by this lock */
+ spinlock_t lock;
+
+ unsigned short ioprio;
+
+ /*
+ * For request batching
+ */
+ int nr_batch_requests; /* Number of requests left in the batch */
+ unsigned long last_waited; /* Time last woken after wait for request */
+
+ struct radix_tree_root icq_tree;
+ struct io_cq __rcu *icq_hint;
+ struct hlist_head icq_list;
+
+ struct work_struct release_work;
+};
+
+/**
+ * get_io_context_active - get active reference on ioc
+ * @ioc: ioc of interest
+ *
+ * Only iocs with active reference can issue new IOs. This function
+ * acquires an active reference on @ioc. The caller must already have an
+ * active reference on @ioc.
+ */
+static inline void get_io_context_active(struct io_context *ioc)
+{
+ WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
+ WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
+ atomic_long_inc(&ioc->refcount);
+ atomic_inc(&ioc->active_ref);
+}
+
+static inline void ioc_task_link(struct io_context *ioc)
+{
+ get_io_context_active(ioc);
+
+ WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
+ atomic_inc(&ioc->nr_tasks);
+}
+
+struct task_struct;
+#ifdef CONFIG_BLOCK
+void put_io_context(struct io_context *ioc);
+void put_io_context_active(struct io_context *ioc);
+void exit_io_context(struct task_struct *task);
+struct io_context *get_task_io_context(struct task_struct *task,
+ gfp_t gfp_flags, int node);
+#else
+struct io_context;
+static inline void put_io_context(struct io_context *ioc) { }
+static inline void exit_io_context(struct task_struct *task) { }
+#endif
+
+#endif
diff --git a/include/linux/iommu-common.h b/include/linux/iommu-common.h
new file mode 100644
index 000000000..bbced83b3
--- /dev/null
+++ b/include/linux/iommu-common.h
@@ -0,0 +1,51 @@
+#ifndef _LINUX_IOMMU_COMMON_H
+#define _LINUX_IOMMU_COMMON_H
+
+#include <linux/spinlock_types.h>
+#include <linux/device.h>
+#include <asm/page.h>
+
+#define IOMMU_POOL_HASHBITS 4
+#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
+
+struct iommu_pool {
+ unsigned long start;
+ unsigned long end;
+ unsigned long hint;
+ spinlock_t lock;
+};
+
+struct iommu_map_table {
+ unsigned long table_map_base;
+ unsigned long table_shift;
+ unsigned long nr_pools;
+ void (*lazy_flush)(struct iommu_map_table *);
+ unsigned long poolsize;
+ struct iommu_pool pools[IOMMU_NR_POOLS];
+ u32 flags;
+#define IOMMU_HAS_LARGE_POOL 0x00000001
+#define IOMMU_NO_SPAN_BOUND 0x00000002
+#define IOMMU_NEED_FLUSH 0x00000004
+ struct iommu_pool large_pool;
+ unsigned long *map;
+};
+
+extern void iommu_tbl_pool_init(struct iommu_map_table *iommu,
+ unsigned long num_entries,
+ u32 table_shift,
+ void (*lazy_flush)(struct iommu_map_table *),
+ bool large_pool, u32 npools,
+ bool skip_span_boundary_check);
+
+extern unsigned long iommu_tbl_range_alloc(struct device *dev,
+ struct iommu_map_table *iommu,
+ unsigned long npages,
+ unsigned long *handle,
+ unsigned long mask,
+ unsigned int align_order);
+
+extern void iommu_tbl_range_free(struct iommu_map_table *iommu,
+ u64 dma_addr, unsigned long npages,
+ unsigned long entry);
+
+#endif
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h
new file mode 100644
index 000000000..86bdeffe4
--- /dev/null
+++ b/include/linux/iommu-helper.h
@@ -0,0 +1,34 @@
+#ifndef _LINUX_IOMMU_HELPER_H
+#define _LINUX_IOMMU_HELPER_H
+
+#include <linux/kernel.h>
+
+static inline unsigned long iommu_device_max_index(unsigned long size,
+ unsigned long offset,
+ u64 dma_mask)
+{
+ if (size + offset > dma_mask)
+ return dma_mask - offset + 1;
+ else
+ return size;
+}
+
+extern int iommu_is_span_boundary(unsigned int index, unsigned int nr,
+ unsigned long shift,
+ unsigned long boundary_size);
+extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr,
+ unsigned long shift,
+ unsigned long boundary_size,
+ unsigned long align_mask);
+
+static inline unsigned long iommu_num_pages(unsigned long addr,
+ unsigned long len,
+ unsigned long io_page_size)
+{
+ unsigned long size = (addr & (io_page_size - 1)) + len;
+
+ return DIV_ROUND_UP(size, io_page_size);
+}
+
+#endif
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
new file mode 100644
index 000000000..0546b8710
--- /dev/null
+++ b/include/linux/iommu.h
@@ -0,0 +1,487 @@
+/*
+ * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <joerg.roedel@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_IOMMU_H
+#define __LINUX_IOMMU_H
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+#include <trace/events/iommu.h>
+
+#define IOMMU_READ (1 << 0)
+#define IOMMU_WRITE (1 << 1)
+#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
+#define IOMMU_NOEXEC (1 << 3)
+
+struct iommu_ops;
+struct iommu_group;
+struct bus_type;
+struct device;
+struct iommu_domain;
+struct notifier_block;
+
+/* iommu fault flags */
+#define IOMMU_FAULT_READ 0x0
+#define IOMMU_FAULT_WRITE 0x1
+
+typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
+ struct device *, unsigned long, int, void *);
+
+struct iommu_domain_geometry {
+ dma_addr_t aperture_start; /* First address that can be mapped */
+ dma_addr_t aperture_end; /* Last address that can be mapped */
+ bool force_aperture; /* DMA only allowed in mappable range? */
+};
+
+/* Domain feature flags */
+#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
+#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
+ implementation */
+#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
+
+/*
+ * This are the possible domain-types
+ *
+ * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
+ * devices
+ * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
+ * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
+ * for VMs
+ * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
+ * This flag allows IOMMU drivers to implement
+ * certain optimizations for these domains
+ */
+#define IOMMU_DOMAIN_BLOCKED (0U)
+#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
+#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
+#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
+ __IOMMU_DOMAIN_DMA_API)
+
+struct iommu_domain {
+ unsigned type;
+ const struct iommu_ops *ops;
+ iommu_fault_handler_t handler;
+ void *handler_token;
+ struct iommu_domain_geometry geometry;
+};
+
+enum iommu_cap {
+ IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
+ transactions */
+ IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
+ IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
+};
+
+/*
+ * Following constraints are specifc to FSL_PAMUV1:
+ * -aperture must be power of 2, and naturally aligned
+ * -number of windows must be power of 2, and address space size
+ * of each window is determined by aperture size / # of windows
+ * -the actual size of the mapped region of a window must be power
+ * of 2 starting with 4KB and physical address must be naturally
+ * aligned.
+ * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
+ * The caller can invoke iommu_domain_get_attr to check if the underlying
+ * iommu implementation supports these constraints.
+ */
+
+enum iommu_attr {
+ DOMAIN_ATTR_GEOMETRY,
+ DOMAIN_ATTR_PAGING,
+ DOMAIN_ATTR_WINDOWS,
+ DOMAIN_ATTR_FSL_PAMU_STASH,
+ DOMAIN_ATTR_FSL_PAMU_ENABLE,
+ DOMAIN_ATTR_FSL_PAMUV1,
+ DOMAIN_ATTR_NESTING, /* two stages of translation */
+ DOMAIN_ATTR_MAX,
+};
+
+#ifdef CONFIG_IOMMU_API
+
+/**
+ * struct iommu_ops - iommu ops and capabilities
+ * @domain_init: init iommu domain
+ * @domain_destroy: destroy iommu domain
+ * @attach_dev: attach device to an iommu domain
+ * @detach_dev: detach device from an iommu domain
+ * @map: map a physically contiguous memory region to an iommu domain
+ * @unmap: unmap a physically contiguous memory region from an iommu domain
+ * @map_sg: map a scatter-gather list of physically contiguous memory chunks
+ * to an iommu domain
+ * @iova_to_phys: translate iova to physical address
+ * @add_device: add device to iommu grouping
+ * @remove_device: remove device from iommu grouping
+ * @domain_get_attr: Query domain attributes
+ * @domain_set_attr: Change domain attributes
+ * @of_xlate: add OF master IDs to iommu grouping
+ * @pgsize_bitmap: bitmap of supported page sizes
+ * @priv: per-instance data private to the iommu driver
+ */
+struct iommu_ops {
+ bool (*capable)(enum iommu_cap);
+
+ /* Domain allocation and freeing by the iommu driver */
+ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
+ void (*domain_free)(struct iommu_domain *);
+
+ int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
+ void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
+ int (*map)(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
+ size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
+ size_t size);
+ size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot);
+ phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
+ int (*add_device)(struct device *dev);
+ void (*remove_device)(struct device *dev);
+ int (*device_group)(struct device *dev, unsigned int *groupid);
+ int (*domain_get_attr)(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data);
+ int (*domain_set_attr)(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data);
+
+ /* Window handling functions */
+ int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
+ phys_addr_t paddr, u64 size, int prot);
+ void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
+ /* Set the numer of window per domain */
+ int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
+ /* Get the numer of window per domain */
+ u32 (*domain_get_windows)(struct iommu_domain *domain);
+
+#ifdef CONFIG_OF_IOMMU
+ int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
+#endif
+
+ unsigned long pgsize_bitmap;
+ void *priv;
+};
+
+#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
+#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
+#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
+#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
+#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
+#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
+
+extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
+extern bool iommu_present(struct bus_type *bus);
+extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
+extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
+extern struct iommu_group *iommu_group_get_by_id(int id);
+extern void iommu_domain_free(struct iommu_domain *domain);
+extern int iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev);
+extern void iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev);
+extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
+extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+ size_t size);
+extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg,unsigned int nents,
+ int prot);
+extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
+extern void iommu_set_fault_handler(struct iommu_domain *domain,
+ iommu_fault_handler_t handler, void *token);
+
+extern int iommu_attach_group(struct iommu_domain *domain,
+ struct iommu_group *group);
+extern void iommu_detach_group(struct iommu_domain *domain,
+ struct iommu_group *group);
+extern struct iommu_group *iommu_group_alloc(void);
+extern void *iommu_group_get_iommudata(struct iommu_group *group);
+extern void iommu_group_set_iommudata(struct iommu_group *group,
+ void *iommu_data,
+ void (*release)(void *iommu_data));
+extern int iommu_group_set_name(struct iommu_group *group, const char *name);
+extern int iommu_group_add_device(struct iommu_group *group,
+ struct device *dev);
+extern void iommu_group_remove_device(struct device *dev);
+extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
+ int (*fn)(struct device *, void *));
+extern struct iommu_group *iommu_group_get(struct device *dev);
+extern void iommu_group_put(struct iommu_group *group);
+extern int iommu_group_register_notifier(struct iommu_group *group,
+ struct notifier_block *nb);
+extern int iommu_group_unregister_notifier(struct iommu_group *group,
+ struct notifier_block *nb);
+extern int iommu_group_id(struct iommu_group *group);
+extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
+
+extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
+ void *data);
+extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
+ void *data);
+struct device *iommu_device_create(struct device *parent, void *drvdata,
+ const struct attribute_group **groups,
+ const char *fmt, ...);
+void iommu_device_destroy(struct device *dev);
+int iommu_device_link(struct device *dev, struct device *link);
+void iommu_device_unlink(struct device *dev, struct device *link);
+
+/* Window handling function prototypes */
+extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
+ phys_addr_t offset, u64 size,
+ int prot);
+extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
+/**
+ * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
+ * @domain: the iommu domain where the fault has happened
+ * @dev: the device where the fault has happened
+ * @iova: the faulting address
+ * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
+ *
+ * This function should be called by the low-level IOMMU implementations
+ * whenever IOMMU faults happen, to allow high-level users, that are
+ * interested in such events, to know about them.
+ *
+ * This event may be useful for several possible use cases:
+ * - mere logging of the event
+ * - dynamic TLB/PTE loading
+ * - if restarting of the faulting device is required
+ *
+ * Returns 0 on success and an appropriate error code otherwise (if dynamic
+ * PTE/TLB loading will one day be supported, implementations will be able
+ * to tell whether it succeeded or not according to this return value).
+ *
+ * Specifically, -ENOSYS is returned if a fault handler isn't installed
+ * (though fault handlers can also return -ENOSYS, in case they want to
+ * elicit the default behavior of the IOMMU drivers).
+ */
+static inline int report_iommu_fault(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova, int flags)
+{
+ int ret = -ENOSYS;
+
+ /*
+ * if upper layers showed interest and installed a fault handler,
+ * invoke it.
+ */
+ if (domain->handler)
+ ret = domain->handler(domain, dev, iova, flags,
+ domain->handler_token);
+
+ trace_io_page_fault(dev, iova, flags);
+ return ret;
+}
+
+static inline size_t iommu_map_sg(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot)
+{
+ return domain->ops->map_sg(domain, iova, sg, nents, prot);
+}
+
+#else /* CONFIG_IOMMU_API */
+
+struct iommu_ops {};
+struct iommu_group {};
+
+static inline bool iommu_present(struct bus_type *bus)
+{
+ return false;
+}
+
+static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
+{
+ return false;
+}
+
+static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
+{
+ return NULL;
+}
+
+static inline struct iommu_group *iommu_group_get_by_id(int id)
+{
+ return NULL;
+}
+
+static inline void iommu_domain_free(struct iommu_domain *domain)
+{
+}
+
+static inline int iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+}
+
+static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, int gfp_order, int prot)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
+ int gfp_order)
+{
+ return -ENODEV;
+}
+
+static inline size_t iommu_map_sg(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_domain_window_enable(struct iommu_domain *domain,
+ u32 wnd_nr, phys_addr_t paddr,
+ u64 size, int prot)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_domain_window_disable(struct iommu_domain *domain,
+ u32 wnd_nr)
+{
+}
+
+static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
+{
+ return 0;
+}
+
+static inline void iommu_set_fault_handler(struct iommu_domain *domain,
+ iommu_fault_handler_t handler, void *token)
+{
+}
+
+static inline int iommu_attach_group(struct iommu_domain *domain,
+ struct iommu_group *group)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_detach_group(struct iommu_domain *domain,
+ struct iommu_group *group)
+{
+}
+
+static inline struct iommu_group *iommu_group_alloc(void)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void *iommu_group_get_iommudata(struct iommu_group *group)
+{
+ return NULL;
+}
+
+static inline void iommu_group_set_iommudata(struct iommu_group *group,
+ void *iommu_data,
+ void (*release)(void *iommu_data))
+{
+}
+
+static inline int iommu_group_set_name(struct iommu_group *group,
+ const char *name)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_group_add_device(struct iommu_group *group,
+ struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_group_remove_device(struct device *dev)
+{
+}
+
+static inline int iommu_group_for_each_dev(struct iommu_group *group,
+ void *data,
+ int (*fn)(struct device *, void *))
+{
+ return -ENODEV;
+}
+
+static inline struct iommu_group *iommu_group_get(struct device *dev)
+{
+ return NULL;
+}
+
+static inline void iommu_group_put(struct iommu_group *group)
+{
+}
+
+static inline int iommu_group_register_notifier(struct iommu_group *group,
+ struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_group_unregister_notifier(struct iommu_group *group,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int iommu_group_id(struct iommu_group *group)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_domain_get_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ return -EINVAL;
+}
+
+static inline int iommu_domain_set_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ return -EINVAL;
+}
+
+static inline struct device *iommu_device_create(struct device *parent,
+ void *drvdata,
+ const struct attribute_group **groups,
+ const char *fmt, ...)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void iommu_device_destroy(struct device *dev)
+{
+}
+
+static inline int iommu_device_link(struct device *dev, struct device *link)
+{
+ return -EINVAL;
+}
+
+static inline void iommu_device_unlink(struct device *dev, struct device *link)
+{
+}
+
+#endif /* CONFIG_IOMMU_API */
+
+#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
new file mode 100644
index 000000000..1c30014ed
--- /dev/null
+++ b/include/linux/iopoll.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_IOPOLL_H
+#define _LINUX_IOPOLL_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/hrtimer.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+
+/**
+ * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
+ * @op: accessor function (takes @addr as its only argument)
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.txt).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ */
+#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
+({ \
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
+ might_sleep_if(sleep_us); \
+ for (;;) { \
+ (val) = op(addr); \
+ if (cond) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
+ (val) = op(addr); \
+ break; \
+ } \
+ if (sleep_us) \
+ usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+/**
+ * readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs
+ * @op: accessor function (takes @addr as its only argument)
+ * @addr: Address to poll
+ * @val: Variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @delay_us: Time to udelay between reads in us (0 tight-loops). Should
+ * be less than ~10us since udelay is used (see
+ * Documentation/timers/timers-howto.txt).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @addr is stored in @val.
+ *
+ * When available, you'll probably want to use one of the specialized
+ * macros defined below rather than this macro directly.
+ */
+#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
+({ \
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
+ for (;;) { \
+ (val) = op(addr); \
+ if (cond) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
+ (val) = op(addr); \
+ break; \
+ } \
+ if (delay_us) \
+ udelay(delay_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+
+#define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us)
+
+#define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us)
+
+#define readw_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readw, addr, val, cond, delay_us, timeout_us)
+
+#define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us)
+
+#define readl_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readl, addr, val, cond, delay_us, timeout_us)
+
+#define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us)
+
+#define readq_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readq, addr, val, cond, delay_us, timeout_us)
+
+#define readq_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readq, addr, val, cond, delay_us, timeout_us)
+
+#define readb_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readb_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readb_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readb_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readw_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readw_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readw_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readw_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readl_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readl_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readl_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readl_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readq_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(readq_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#define readq_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout_atomic(readq_relaxed, addr, val, cond, delay_us, timeout_us)
+
+#endif /* _LINUX_IOPOLL_H */
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
new file mode 100644
index 000000000..388e3ae94
--- /dev/null
+++ b/include/linux/ioport.h
@@ -0,0 +1,252 @@
+/*
+ * ioport.h Definitions of routines for detecting, reserving and
+ * allocating system resources.
+ *
+ * Authors: Linus Torvalds
+ */
+
+#ifndef _LINUX_IOPORT_H
+#define _LINUX_IOPORT_H
+
+#ifndef __ASSEMBLY__
+#include <linux/compiler.h>
+#include <linux/types.h>
+/*
+ * Resources are tree-like, allowing
+ * nesting etc..
+ */
+struct resource {
+ resource_size_t start;
+ resource_size_t end;
+ const char *name;
+ unsigned long flags;
+ struct resource *parent, *sibling, *child;
+};
+
+/*
+ * IO resources have these defined flags.
+ */
+#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */
+
+#define IORESOURCE_TYPE_BITS 0x00001f00 /* Resource type */
+#define IORESOURCE_IO 0x00000100 /* PCI/ISA I/O ports */
+#define IORESOURCE_MEM 0x00000200
+#define IORESOURCE_REG 0x00000300 /* Register offsets */
+#define IORESOURCE_IRQ 0x00000400
+#define IORESOURCE_DMA 0x00000800
+#define IORESOURCE_BUS 0x00001000
+
+#define IORESOURCE_PREFETCH 0x00002000 /* No side effects */
+#define IORESOURCE_READONLY 0x00004000
+#define IORESOURCE_CACHEABLE 0x00008000
+#define IORESOURCE_RANGELENGTH 0x00010000
+#define IORESOURCE_SHADOWABLE 0x00020000
+
+#define IORESOURCE_SIZEALIGN 0x00040000 /* size indicates alignment */
+#define IORESOURCE_STARTALIGN 0x00080000 /* start field is alignment */
+
+#define IORESOURCE_MEM_64 0x00100000
+#define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */
+#define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */
+
+#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
+#define IORESOURCE_DISABLED 0x10000000
+#define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */
+#define IORESOURCE_AUTO 0x40000000
+#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */
+
+/* PnP IRQ specific bits (IORESOURCE_BITS) */
+#define IORESOURCE_IRQ_HIGHEDGE (1<<0)
+#define IORESOURCE_IRQ_LOWEDGE (1<<1)
+#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
+#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
+#define IORESOURCE_IRQ_SHAREABLE (1<<4)
+#define IORESOURCE_IRQ_OPTIONAL (1<<5)
+
+/* PnP DMA specific bits (IORESOURCE_BITS) */
+#define IORESOURCE_DMA_TYPE_MASK (3<<0)
+#define IORESOURCE_DMA_8BIT (0<<0)
+#define IORESOURCE_DMA_8AND16BIT (1<<0)
+#define IORESOURCE_DMA_16BIT (2<<0)
+
+#define IORESOURCE_DMA_MASTER (1<<2)
+#define IORESOURCE_DMA_BYTE (1<<3)
+#define IORESOURCE_DMA_WORD (1<<4)
+
+#define IORESOURCE_DMA_SPEED_MASK (3<<6)
+#define IORESOURCE_DMA_COMPATIBLE (0<<6)
+#define IORESOURCE_DMA_TYPEA (1<<6)
+#define IORESOURCE_DMA_TYPEB (2<<6)
+#define IORESOURCE_DMA_TYPEF (3<<6)
+
+/* PnP memory I/O specific bits (IORESOURCE_BITS) */
+#define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */
+#define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */
+#define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */
+#define IORESOURCE_MEM_TYPE_MASK (3<<3)
+#define IORESOURCE_MEM_8BIT (0<<3)
+#define IORESOURCE_MEM_16BIT (1<<3)
+#define IORESOURCE_MEM_8AND16BIT (2<<3)
+#define IORESOURCE_MEM_32BIT (3<<3)
+#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */
+#define IORESOURCE_MEM_EXPANSIONROM (1<<6)
+
+/* PnP I/O specific bits (IORESOURCE_BITS) */
+#define IORESOURCE_IO_16BIT_ADDR (1<<0)
+#define IORESOURCE_IO_FIXED (1<<1)
+
+/* PCI ROM control bits (IORESOURCE_BITS) */
+#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
+#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */
+#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */
+#define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */
+
+/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
+#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
+
+
+/* helpers to define resources */
+#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
+ { \
+ .start = (_start), \
+ .end = (_start) + (_size) - 1, \
+ .name = (_name), \
+ .flags = (_flags), \
+ }
+
+#define DEFINE_RES_IO_NAMED(_start, _size, _name) \
+ DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_IO)
+#define DEFINE_RES_IO(_start, _size) \
+ DEFINE_RES_IO_NAMED((_start), (_size), NULL)
+
+#define DEFINE_RES_MEM_NAMED(_start, _size, _name) \
+ DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_MEM)
+#define DEFINE_RES_MEM(_start, _size) \
+ DEFINE_RES_MEM_NAMED((_start), (_size), NULL)
+
+#define DEFINE_RES_IRQ_NAMED(_irq, _name) \
+ DEFINE_RES_NAMED((_irq), 1, (_name), IORESOURCE_IRQ)
+#define DEFINE_RES_IRQ(_irq) \
+ DEFINE_RES_IRQ_NAMED((_irq), NULL)
+
+#define DEFINE_RES_DMA_NAMED(_dma, _name) \
+ DEFINE_RES_NAMED((_dma), 1, (_name), IORESOURCE_DMA)
+#define DEFINE_RES_DMA(_dma) \
+ DEFINE_RES_DMA_NAMED((_dma), NULL)
+
+/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
+extern struct resource ioport_resource;
+extern struct resource iomem_resource;
+
+extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
+extern int request_resource(struct resource *root, struct resource *new);
+extern int release_resource(struct resource *new);
+void release_child_resources(struct resource *new);
+extern void reserve_region_with_split(struct resource *root,
+ resource_size_t start, resource_size_t end,
+ const char *name);
+extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
+extern int insert_resource(struct resource *parent, struct resource *new);
+extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
+extern void arch_remove_reservations(struct resource *avail);
+extern int allocate_resource(struct resource *root, struct resource *new,
+ resource_size_t size, resource_size_t min,
+ resource_size_t max, resource_size_t align,
+ resource_size_t (*alignf)(void *,
+ const struct resource *,
+ resource_size_t,
+ resource_size_t),
+ void *alignf_data);
+struct resource *lookup_resource(struct resource *root, resource_size_t start);
+int adjust_resource(struct resource *res, resource_size_t start,
+ resource_size_t size);
+resource_size_t resource_alignment(struct resource *res);
+static inline resource_size_t resource_size(const struct resource *res)
+{
+ return res->end - res->start + 1;
+}
+static inline unsigned long resource_type(const struct resource *res)
+{
+ return res->flags & IORESOURCE_TYPE_BITS;
+}
+/* True iff r1 completely contains r2 */
+static inline bool resource_contains(struct resource *r1, struct resource *r2)
+{
+ if (resource_type(r1) != resource_type(r2))
+ return false;
+ if (r1->flags & IORESOURCE_UNSET || r2->flags & IORESOURCE_UNSET)
+ return false;
+ return r1->start <= r2->start && r1->end >= r2->end;
+}
+
+
+/* Convenience shorthand with allocation */
+#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0)
+#define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED)
+#define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl)
+#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0)
+#define request_mem_region_exclusive(start,n,name) \
+ __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE)
+#define rename_region(region, newname) do { (region)->name = (newname); } while (0)
+
+extern struct resource * __request_region(struct resource *,
+ resource_size_t start,
+ resource_size_t n,
+ const char *name, int flags);
+
+/* Compatibility cruft */
+#define release_region(start,n) __release_region(&ioport_resource, (start), (n))
+#define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n))
+
+extern void __release_region(struct resource *, resource_size_t,
+ resource_size_t);
+#ifdef CONFIG_MEMORY_HOTREMOVE
+extern int release_mem_region_adjustable(struct resource *, resource_size_t,
+ resource_size_t);
+#endif
+
+/* Wrappers for managed devices */
+struct device;
+
+extern int devm_request_resource(struct device *dev, struct resource *root,
+ struct resource *new);
+extern void devm_release_resource(struct device *dev, struct resource *new);
+
+#define devm_request_region(dev,start,n,name) \
+ __devm_request_region(dev, &ioport_resource, (start), (n), (name))
+#define devm_request_mem_region(dev,start,n,name) \
+ __devm_request_region(dev, &iomem_resource, (start), (n), (name))
+
+extern struct resource * __devm_request_region(struct device *dev,
+ struct resource *parent, resource_size_t start,
+ resource_size_t n, const char *name);
+
+#define devm_release_region(dev, start, n) \
+ __devm_release_region(dev, &ioport_resource, (start), (n))
+#define devm_release_mem_region(dev, start, n) \
+ __devm_release_region(dev, &iomem_resource, (start), (n))
+
+extern void __devm_release_region(struct device *dev, struct resource *parent,
+ resource_size_t start, resource_size_t n);
+extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
+extern int iomem_is_exclusive(u64 addr);
+
+extern int
+walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
+ void *arg, int (*func)(unsigned long, unsigned long, void *));
+extern int
+walk_system_ram_res(u64 start, u64 end, void *arg,
+ int (*func)(u64, u64, void *));
+extern int
+walk_iomem_res(char *name, unsigned long flags, u64 start, u64 end, void *arg,
+ int (*func)(u64, u64, void *));
+
+/* True if any part of r1 overlaps r2 */
+static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
+{
+ return (r1->start <= r2->end && r1->end >= r2->start);
+}
+
+
+#endif /* __ASSEMBLY__ */
+#endif /* _LINUX_IOPORT_H */
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
new file mode 100644
index 000000000..ce2fc3c74
--- /dev/null
+++ b/include/linux/ioprio.h
@@ -0,0 +1,81 @@
+#ifndef IOPRIO_H
+#define IOPRIO_H
+
+#include <linux/sched.h>
+#include <linux/iocontext.h>
+
+/*
+ * Gives us 8 prio classes with 13-bits of data for each class
+ */
+#define IOPRIO_BITS (16)
+#define IOPRIO_CLASS_SHIFT (13)
+#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1)
+
+#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT)
+#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK)
+#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data)
+
+#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE)
+
+/*
+ * These are the io priority groups as implemented by CFQ. RT is the realtime
+ * class, it always gets premium service. BE is the best-effort scheduling
+ * class, the default for any process. IDLE is the idle scheduling class, it
+ * is only served when no one else is using the disk.
+ */
+enum {
+ IOPRIO_CLASS_NONE,
+ IOPRIO_CLASS_RT,
+ IOPRIO_CLASS_BE,
+ IOPRIO_CLASS_IDLE,
+};
+
+/*
+ * 8 best effort priority levels are supported
+ */
+#define IOPRIO_BE_NR (8)
+
+enum {
+ IOPRIO_WHO_PROCESS = 1,
+ IOPRIO_WHO_PGRP,
+ IOPRIO_WHO_USER,
+};
+
+/*
+ * Fallback BE priority
+ */
+#define IOPRIO_NORM (4)
+
+/*
+ * if process has set io priority explicitly, use that. if not, convert
+ * the cpu scheduler nice value to an io priority
+ */
+static inline int task_nice_ioprio(struct task_struct *task)
+{
+ if (iso_task(task))
+ return 0;
+ return (task_nice(task) + 20) / 5;
+}
+
+/*
+ * This is for the case where the task hasn't asked for a specific IO class.
+ * Check for idle and rt task process, and return appropriate IO class.
+ */
+static inline int task_nice_ioclass(struct task_struct *task)
+{
+ if (task->policy == SCHED_IDLE)
+ return IOPRIO_CLASS_IDLE;
+ else if (task->policy == SCHED_FIFO || task->policy == SCHED_RR)
+ return IOPRIO_CLASS_RT;
+ else
+ return IOPRIO_CLASS_BE;
+}
+
+/*
+ * For inheritance, return the highest of the two given priorities
+ */
+extern int ioprio_best(unsigned short aprio, unsigned short bprio);
+
+extern int set_task_ioprio(struct task_struct *task, int ioprio);
+
+#endif
diff --git a/include/linux/iova.h b/include/linux/iova.h
new file mode 100644
index 000000000..3920a19d8
--- /dev/null
+++ b/include/linux/iova.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2006, Intel Corporation.
+ *
+ * This file is released under the GPLv2.
+ *
+ * Copyright (C) 2006-2008 Intel Corporation
+ * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
+ *
+ */
+
+#ifndef _IOVA_H_
+#define _IOVA_H_
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/rbtree.h>
+#include <linux/dma-mapping.h>
+
+/* iova structure */
+struct iova {
+ struct rb_node node;
+ unsigned long pfn_hi; /* IOMMU dish out addr hi */
+ unsigned long pfn_lo; /* IOMMU dish out addr lo */
+};
+
+/* holds all the iova translations for a domain */
+struct iova_domain {
+ spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
+ struct rb_root rbroot; /* iova domain rbtree root */
+ struct rb_node *cached32_node; /* Save last alloced node */
+ unsigned long granule; /* pfn granularity for this domain */
+ unsigned long start_pfn; /* Lower limit for this domain */
+ unsigned long dma_32bit_pfn;
+};
+
+static inline unsigned long iova_size(struct iova *iova)
+{
+ return iova->pfn_hi - iova->pfn_lo + 1;
+}
+
+static inline unsigned long iova_shift(struct iova_domain *iovad)
+{
+ return __ffs(iovad->granule);
+}
+
+static inline unsigned long iova_mask(struct iova_domain *iovad)
+{
+ return iovad->granule - 1;
+}
+
+static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
+{
+ return iova & iova_mask(iovad);
+}
+
+static inline size_t iova_align(struct iova_domain *iovad, size_t size)
+{
+ return ALIGN(size, iovad->granule);
+}
+
+static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
+{
+ return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
+}
+
+static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
+{
+ return iova >> iova_shift(iovad);
+}
+
+int iommu_iova_cache_init(void);
+void iommu_iova_cache_destroy(void);
+
+struct iova *alloc_iova_mem(void);
+void free_iova_mem(struct iova *iova);
+void free_iova(struct iova_domain *iovad, unsigned long pfn);
+void __free_iova(struct iova_domain *iovad, struct iova *iova);
+struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
+ unsigned long limit_pfn,
+ bool size_aligned);
+struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
+ unsigned long pfn_hi);
+void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
+void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
+ unsigned long start_pfn, unsigned long pfn_32bit);
+struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
+void put_iova_domain(struct iova_domain *iovad);
+struct iova *split_and_remove_iova(struct iova_domain *iovad,
+ struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
+
+#endif
diff --git a/include/linux/ip.h b/include/linux/ip.h
new file mode 100644
index 000000000..492bc6513
--- /dev/null
+++ b/include/linux/ip.h
@@ -0,0 +1,37 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the IP protocol.
+ *
+ * Version: @(#)ip.h 1.0.2 04/28/93
+ *
+ * Authors: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IP_H
+#define _LINUX_IP_H
+
+#include <linux/skbuff.h>
+#include <uapi/linux/ip.h>
+
+static inline struct iphdr *ip_hdr(const struct sk_buff *skb)
+{
+ return (struct iphdr *)skb_network_header(skb);
+}
+
+static inline struct iphdr *inner_ip_hdr(const struct sk_buff *skb)
+{
+ return (struct iphdr *)skb_inner_network_header(skb);
+}
+
+static inline struct iphdr *ipip_hdr(const struct sk_buff *skb)
+{
+ return (struct iphdr *)skb_transport_header(skb);
+}
+#endif /* _LINUX_IP_H */
diff --git a/include/linux/ipack.h b/include/linux/ipack.h
new file mode 100644
index 000000000..8bddc3fbd
--- /dev/null
+++ b/include/linux/ipack.h
@@ -0,0 +1,289 @@
+/*
+ * Industry-pack bus.
+ *
+ * Copyright (C) 2011-2012 CERN (www.cern.ch)
+ * Author: Samuel Iglesias Gonsalvez <siglesias@igalia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+
+#define IPACK_IDPROM_OFFSET_I 0x01
+#define IPACK_IDPROM_OFFSET_P 0x03
+#define IPACK_IDPROM_OFFSET_A 0x05
+#define IPACK_IDPROM_OFFSET_C 0x07
+#define IPACK_IDPROM_OFFSET_MANUFACTURER_ID 0x09
+#define IPACK_IDPROM_OFFSET_MODEL 0x0B
+#define IPACK_IDPROM_OFFSET_REVISION 0x0D
+#define IPACK_IDPROM_OFFSET_RESERVED 0x0F
+#define IPACK_IDPROM_OFFSET_DRIVER_ID_L 0x11
+#define IPACK_IDPROM_OFFSET_DRIVER_ID_H 0x13
+#define IPACK_IDPROM_OFFSET_NUM_BYTES 0x15
+#define IPACK_IDPROM_OFFSET_CRC 0x17
+
+/*
+ * IndustryPack Fromat, Vendor and Device IDs.
+ */
+
+/* ID section format versions */
+#define IPACK_ID_VERSION_INVALID 0x00
+#define IPACK_ID_VERSION_1 0x01
+#define IPACK_ID_VERSION_2 0x02
+
+/* Vendors and devices. Sort key: vendor first, device next. */
+#define IPACK1_VENDOR_ID_RESERVED1 0x00
+#define IPACK1_VENDOR_ID_RESERVED2 0xFF
+#define IPACK1_VENDOR_ID_UNREGISTRED01 0x01
+#define IPACK1_VENDOR_ID_UNREGISTRED02 0x02
+#define IPACK1_VENDOR_ID_UNREGISTRED03 0x03
+#define IPACK1_VENDOR_ID_UNREGISTRED04 0x04
+#define IPACK1_VENDOR_ID_UNREGISTRED05 0x05
+#define IPACK1_VENDOR_ID_UNREGISTRED06 0x06
+#define IPACK1_VENDOR_ID_UNREGISTRED07 0x07
+#define IPACK1_VENDOR_ID_UNREGISTRED08 0x08
+#define IPACK1_VENDOR_ID_UNREGISTRED09 0x09
+#define IPACK1_VENDOR_ID_UNREGISTRED10 0x0A
+#define IPACK1_VENDOR_ID_UNREGISTRED11 0x0B
+#define IPACK1_VENDOR_ID_UNREGISTRED12 0x0C
+#define IPACK1_VENDOR_ID_UNREGISTRED13 0x0D
+#define IPACK1_VENDOR_ID_UNREGISTRED14 0x0E
+#define IPACK1_VENDOR_ID_UNREGISTRED15 0x0F
+
+#define IPACK1_VENDOR_ID_SBS 0xF0
+#define IPACK1_DEVICE_ID_SBS_OCTAL_232 0x22
+#define IPACK1_DEVICE_ID_SBS_OCTAL_422 0x2A
+#define IPACK1_DEVICE_ID_SBS_OCTAL_485 0x48
+
+struct ipack_bus_ops;
+struct ipack_driver;
+
+enum ipack_space {
+ IPACK_IO_SPACE = 0,
+ IPACK_ID_SPACE,
+ IPACK_INT_SPACE,
+ IPACK_MEM8_SPACE,
+ IPACK_MEM16_SPACE,
+ /* Dummy for counting the number of entries. Must remain the last
+ * entry */
+ IPACK_SPACE_COUNT,
+};
+
+/**
+ */
+struct ipack_region {
+ phys_addr_t start;
+ size_t size;
+};
+
+/**
+ * struct ipack_device
+ *
+ * @slot: Slot where the device is plugged in the carrier board
+ * @bus: ipack_bus_device where the device is plugged to.
+ * @id_space: Virtual address to ID space.
+ * @io_space: Virtual address to IO space.
+ * @mem_space: Virtual address to MEM space.
+ * @dev: device in kernel representation.
+ *
+ * Warning: Direct access to mapped memory is possible but the endianness
+ * is not the same with PCI carrier or VME carrier. The endianness is managed
+ * by the carrier board throught bus->ops.
+ */
+struct ipack_device {
+ unsigned int slot;
+ struct ipack_bus_device *bus;
+ struct device dev;
+ void (*release) (struct ipack_device *dev);
+ struct ipack_region region[IPACK_SPACE_COUNT];
+ u8 *id;
+ size_t id_avail;
+ u32 id_vendor;
+ u32 id_device;
+ u8 id_format;
+ unsigned int id_crc_correct:1;
+ unsigned int speed_8mhz:1;
+ unsigned int speed_32mhz:1;
+};
+
+/**
+ * struct ipack_driver_ops -- Callbacks to IPack device driver
+ *
+ * @probe: Probe function
+ * @remove: Prepare imminent removal of the device. Services provided by the
+ * device should be revoked.
+ */
+
+struct ipack_driver_ops {
+ int (*probe) (struct ipack_device *dev);
+ void (*remove) (struct ipack_device *dev);
+};
+
+/**
+ * struct ipack_driver -- Specific data to each ipack device driver
+ *
+ * @driver: Device driver kernel representation
+ * @ops: Callbacks provided by the IPack device driver
+ */
+struct ipack_driver {
+ struct device_driver driver;
+ const struct ipack_device_id *id_table;
+ const struct ipack_driver_ops *ops;
+};
+
+/**
+ * struct ipack_bus_ops - available operations on a bridge module
+ *
+ * @map_space: map IP address space
+ * @unmap_space: unmap IP address space
+ * @request_irq: request IRQ
+ * @free_irq: free IRQ
+ * @get_clockrate: Returns the clockrate the carrier is currently
+ * communicating with the device at.
+ * @set_clockrate: Sets the clock-rate for carrier / module communication.
+ * Should return -EINVAL if the requested speed is not supported.
+ * @get_error: Returns the error state for the slot the device is attached
+ * to.
+ * @get_timeout: Returns 1 if the communication with the device has
+ * previously timed out.
+ * @reset_timeout: Resets the state returned by get_timeout.
+ */
+struct ipack_bus_ops {
+ int (*request_irq) (struct ipack_device *dev,
+ irqreturn_t (*handler)(void *), void *arg);
+ int (*free_irq) (struct ipack_device *dev);
+ int (*get_clockrate) (struct ipack_device *dev);
+ int (*set_clockrate) (struct ipack_device *dev, int mherz);
+ int (*get_error) (struct ipack_device *dev);
+ int (*get_timeout) (struct ipack_device *dev);
+ int (*reset_timeout) (struct ipack_device *dev);
+};
+
+/**
+ * struct ipack_bus_device
+ *
+ * @dev: pointer to carrier device
+ * @slots: number of slots available
+ * @bus_nr: ipack bus number
+ * @ops: bus operations for the mezzanine drivers
+ */
+struct ipack_bus_device {
+ struct module *owner;
+ struct device *parent;
+ int slots;
+ int bus_nr;
+ const struct ipack_bus_ops *ops;
+};
+
+/**
+ * ipack_bus_register -- register a new ipack bus
+ *
+ * @parent: pointer to the parent device, if any.
+ * @slots: number of slots available in the bus device.
+ * @ops: bus operations for the mezzanine drivers.
+ *
+ * The carrier board device should call this function to register itself as
+ * available bus device in ipack.
+ */
+struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots,
+ const struct ipack_bus_ops *ops,
+ struct module *owner);
+
+/**
+ * ipack_bus_unregister -- unregister an ipack bus
+ */
+int ipack_bus_unregister(struct ipack_bus_device *bus);
+
+/**
+ * ipack_driver_register -- Register a new ipack device driver
+ *
+ * Called by a ipack driver to register itself as a driver
+ * that can manage ipack devices.
+ */
+int ipack_driver_register(struct ipack_driver *edrv, struct module *owner,
+ const char *name);
+void ipack_driver_unregister(struct ipack_driver *edrv);
+
+/**
+ * ipack_device_init -- initialize an IPack device
+ * @dev: the new device to initialize.
+ *
+ * Initialize a new IPack device ("module" in IndustryPack jargon). The call
+ * is done by the carrier driver. The carrier should populate the fields
+ * bus and slot as well as the region array of @dev prior to calling this
+ * function. The rest of the fields will be allocated and populated
+ * during initalization.
+ *
+ * Return zero on success or error code on failure.
+ *
+ * NOTE: _Never_ directly free @dev after calling this function, even
+ * if it returned an error! Always use ipack_put_device() to give up the
+ * reference initialized in this function instead.
+ */
+int ipack_device_init(struct ipack_device *dev);
+
+/**
+ * ipack_device_add -- Add an IPack device
+ * @dev: the new device to add.
+ *
+ * Add a new IPack device. The call is done by the carrier driver
+ * after calling ipack_device_init().
+ *
+ * Return zero on success or error code on failure.
+ *
+ * NOTE: _Never_ directly free @dev after calling this function, even
+ * if it returned an error! Always use ipack_put_device() to give up the
+ * reference initialized in this function instead.
+ */
+int ipack_device_add(struct ipack_device *dev);
+void ipack_device_del(struct ipack_device *dev);
+
+void ipack_get_device(struct ipack_device *dev);
+void ipack_put_device(struct ipack_device *dev);
+
+/**
+ * DEFINE_IPACK_DEVICE_TABLE - macro used to describe a IndustryPack table
+ * @_table: device table name
+ *
+ * This macro is used to create a struct ipack_device_id array (a device table)
+ * in a generic manner.
+ */
+#define DEFINE_IPACK_DEVICE_TABLE(_table) \
+ const struct ipack_device_id _table[]
+/**
+ * IPACK_DEVICE - macro used to describe a specific IndustryPack device
+ * @_format: the format version (currently either 1 or 2, 8 bit value)
+ * @vend: the 8 or 24 bit IndustryPack Vendor ID
+ * @dev: the 8 or 16 bit IndustryPack Device ID
+ *
+ * This macro is used to create a struct ipack_device_id that matches a specific
+ * device.
+ */
+#define IPACK_DEVICE(_format, vend, dev) \
+ .format = (_format), \
+ .vendor = (vend), \
+ .device = (dev)
+
+/**
+ * ipack_get_carrier - it increase the carrier ref. counter of
+ * the carrier module
+ * @dev: mezzanine device which wants to get the carrier
+ */
+static inline int ipack_get_carrier(struct ipack_device *dev)
+{
+ return try_module_get(dev->bus->owner);
+}
+
+/**
+ * ipack_get_carrier - it decrease the carrier ref. counter of
+ * the carrier module
+ * @dev: mezzanine device which wants to get the carrier
+ */
+static inline void ipack_put_carrier(struct ipack_device *dev)
+{
+ module_put(dev->bus->owner);
+}
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
new file mode 100644
index 000000000..9d84942ae
--- /dev/null
+++ b/include/linux/ipc.h
@@ -0,0 +1,26 @@
+#ifndef _LINUX_IPC_H
+#define _LINUX_IPC_H
+
+#include <linux/spinlock.h>
+#include <linux/uidgid.h>
+#include <uapi/linux/ipc.h>
+
+#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */
+
+/* used by in-kernel data structures */
+struct kern_ipc_perm
+{
+ spinlock_t lock;
+ bool deleted;
+ int id;
+ key_t key;
+ kuid_t uid;
+ kgid_t gid;
+ kuid_t cuid;
+ kgid_t cgid;
+ umode_t mode;
+ unsigned long seq;
+ void *security;
+};
+
+#endif /* _LINUX_IPC_H */
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
new file mode 100644
index 000000000..1eee6bcfc
--- /dev/null
+++ b/include/linux/ipc_namespace.h
@@ -0,0 +1,160 @@
+#ifndef __IPC_NAMESPACE_H__
+#define __IPC_NAMESPACE_H__
+
+#include <linux/err.h>
+#include <linux/idr.h>
+#include <linux/rwsem.h>
+#include <linux/notifier.h>
+#include <linux/nsproxy.h>
+#include <linux/ns_common.h>
+
+struct user_namespace;
+
+struct ipc_ids {
+ int in_use;
+ unsigned short seq;
+ struct rw_semaphore rwsem;
+ struct idr ipcs_idr;
+ int next_id;
+};
+
+struct ipc_namespace {
+ atomic_t count;
+ struct ipc_ids ids[3];
+
+ int sem_ctls[4];
+ int used_sems;
+
+ unsigned int msg_ctlmax;
+ unsigned int msg_ctlmnb;
+ unsigned int msg_ctlmni;
+ atomic_t msg_bytes;
+ atomic_t msg_hdrs;
+
+ size_t shm_ctlmax;
+ size_t shm_ctlall;
+ unsigned long shm_tot;
+ int shm_ctlmni;
+ /*
+ * Defines whether IPC_RMID is forced for _all_ shm segments regardless
+ * of shmctl()
+ */
+ int shm_rmid_forced;
+
+ struct notifier_block ipcns_nb;
+
+ /* The kern_mount of the mqueuefs sb. We take a ref on it */
+ struct vfsmount *mq_mnt;
+
+ /* # queues in this ns, protected by mq_lock */
+ unsigned int mq_queues_count;
+
+ /* next fields are set through sysctl */
+ unsigned int mq_queues_max; /* initialized to DFLT_QUEUESMAX */
+ unsigned int mq_msg_max; /* initialized to DFLT_MSGMAX */
+ unsigned int mq_msgsize_max; /* initialized to DFLT_MSGSIZEMAX */
+ unsigned int mq_msg_default;
+ unsigned int mq_msgsize_default;
+
+ /* user_ns which owns the ipc ns */
+ struct user_namespace *user_ns;
+
+ struct ns_common ns;
+};
+
+extern struct ipc_namespace init_ipc_ns;
+extern atomic_t nr_ipc_ns;
+
+extern spinlock_t mq_lock;
+
+#ifdef CONFIG_SYSVIPC
+extern void shm_destroy_orphaned(struct ipc_namespace *ns);
+#else /* CONFIG_SYSVIPC */
+static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {}
+#endif /* CONFIG_SYSVIPC */
+
+#ifdef CONFIG_POSIX_MQUEUE
+extern int mq_init_ns(struct ipc_namespace *ns);
+/*
+ * POSIX Message Queue default values:
+ *
+ * MIN_*: Lowest value an admin can set the maximum unprivileged limit to
+ * DFLT_*MAX: Default values for the maximum unprivileged limits
+ * DFLT_{MSG,MSGSIZE}: Default values used when the user doesn't supply
+ * an attribute to the open call and the queue must be created
+ * HARD_*: Highest value the maximums can be set to. These are enforced
+ * on CAP_SYS_RESOURCE apps as well making them inviolate (so make them
+ * suitably high)
+ *
+ * POSIX Requirements:
+ * Per app minimum openable message queues - 8. This does not map well
+ * to the fact that we limit the number of queues on a per namespace
+ * basis instead of a per app basis. So, make the default high enough
+ * that no given app should have a hard time opening 8 queues.
+ * Minimum maximum for HARD_MSGMAX - 32767. I bumped this to 65536.
+ * Minimum maximum for HARD_MSGSIZEMAX - POSIX is silent on this. However,
+ * we have run into a situation where running applications in the wild
+ * require this to be at least 5MB, and preferably 10MB, so I set the
+ * value to 16MB in hopes that this user is the worst of the bunch and
+ * the new maximum will handle anyone else. I may have to revisit this
+ * in the future.
+ */
+#define DFLT_QUEUESMAX 256
+#define MIN_MSGMAX 1
+#define DFLT_MSG 10U
+#define DFLT_MSGMAX 10
+#define HARD_MSGMAX 65536
+#define MIN_MSGSIZEMAX 128
+#define DFLT_MSGSIZE 8192U
+#define DFLT_MSGSIZEMAX 8192
+#define HARD_MSGSIZEMAX (16*1024*1024)
+#else
+static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
+#endif
+
+#if defined(CONFIG_IPC_NS)
+extern struct ipc_namespace *copy_ipcs(unsigned long flags,
+ struct user_namespace *user_ns, struct ipc_namespace *ns);
+
+static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
+{
+ if (ns)
+ atomic_inc(&ns->count);
+ return ns;
+}
+
+extern void put_ipc_ns(struct ipc_namespace *ns);
+#else
+static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
+ struct user_namespace *user_ns, struct ipc_namespace *ns)
+{
+ if (flags & CLONE_NEWIPC)
+ return ERR_PTR(-EINVAL);
+
+ return ns;
+}
+
+static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
+{
+ return ns;
+}
+
+static inline void put_ipc_ns(struct ipc_namespace *ns)
+{
+}
+#endif
+
+#ifdef CONFIG_POSIX_MQUEUE_SYSCTL
+
+struct ctl_table_header;
+extern struct ctl_table_header *mq_register_sysctl_table(void);
+
+#else /* CONFIG_POSIX_MQUEUE_SYSCTL */
+
+static inline struct ctl_table_header *mq_register_sysctl_table(void)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_POSIX_MQUEUE_SYSCTL */
+#endif
diff --git a/include/linux/ipmi-fru.h b/include/linux/ipmi-fru.h
new file mode 100644
index 000000000..4d3a76380
--- /dev/null
+++ b/include/linux/ipmi-fru.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2012 CERN (www.cern.ch)
+ * Author: Alessandro Rubini <rubini@gnudd.com>
+ *
+ * Released according to the GNU GPL, version 2 or any later version.
+ *
+ * This work is part of the White Rabbit project, a research effort led
+ * by CERN, the European Institute for Nuclear Research.
+ */
+#ifndef __LINUX_IPMI_FRU_H__
+#define __LINUX_IPMI_FRU_H__
+#ifdef __KERNEL__
+# include <linux/types.h>
+# include <linux/string.h>
+#else
+# include <stdint.h>
+# include <string.h>
+#endif
+
+/*
+ * These structures match the unaligned crap we have in FRU1011.pdf
+ * (http://download.intel.com/design/servers/ipmi/FRU1011.pdf)
+ */
+
+/* chapter 8, page 5 */
+struct fru_common_header {
+ uint8_t format; /* 0x01 */
+ uint8_t internal_use_off; /* multiple of 8 bytes */
+ uint8_t chassis_info_off; /* multiple of 8 bytes */
+ uint8_t board_area_off; /* multiple of 8 bytes */
+ uint8_t product_area_off; /* multiple of 8 bytes */
+ uint8_t multirecord_off; /* multiple of 8 bytes */
+ uint8_t pad; /* must be 0 */
+ uint8_t checksum; /* sum modulo 256 must be 0 */
+};
+
+/* chapter 9, page 5 -- internal_use: not used by us */
+
+/* chapter 10, page 6 -- chassis info: not used by us */
+
+/* chapter 13, page 9 -- used by board_info_area below */
+struct fru_type_length {
+ uint8_t type_length;
+ uint8_t data[0];
+};
+
+/* chapter 11, page 7 */
+struct fru_board_info_area {
+ uint8_t format; /* 0x01 */
+ uint8_t area_len; /* multiple of 8 bytes */
+ uint8_t language; /* I hope it's 0 */
+ uint8_t mfg_date[3]; /* LSB, minutes since 1996-01-01 */
+ struct fru_type_length tl[0]; /* type-length stuff follows */
+
+ /*
+ * the TL there are in order:
+ * Board Manufacturer
+ * Board Product Name
+ * Board Serial Number
+ * Board Part Number
+ * FRU File ID (may be null)
+ * more manufacturer-specific stuff
+ * 0xc1 as a terminator
+ * 0x00 pad to a multiple of 8 bytes - 1
+ * checksum (sum of all stuff module 256 must be zero)
+ */
+};
+
+enum fru_type {
+ FRU_TYPE_BINARY = 0x00,
+ FRU_TYPE_BCDPLUS = 0x40,
+ FRU_TYPE_ASCII6 = 0x80,
+ FRU_TYPE_ASCII = 0xc0, /* not ascii: depends on language */
+};
+
+/*
+ * some helpers
+ */
+static inline struct fru_board_info_area *fru_get_board_area(
+ const struct fru_common_header *header)
+{
+ /* we know for sure that the header is 8 bytes in size */
+ return (struct fru_board_info_area *)(header + header->board_area_off);
+}
+
+static inline int fru_type(struct fru_type_length *tl)
+{
+ return tl->type_length & 0xc0;
+}
+
+static inline int fru_length(struct fru_type_length *tl)
+{
+ return (tl->type_length & 0x3f) + 1; /* len of whole record */
+}
+
+/* assume ascii-latin1 encoding */
+static inline int fru_strlen(struct fru_type_length *tl)
+{
+ return fru_length(tl) - 1;
+}
+
+static inline char *fru_strcpy(char *dest, struct fru_type_length *tl)
+{
+ int len = fru_strlen(tl);
+ memcpy(dest, tl->data, len);
+ dest[len] = '\0';
+ return dest;
+}
+
+static inline struct fru_type_length *fru_next_tl(struct fru_type_length *tl)
+{
+ return tl + fru_length(tl);
+}
+
+static inline int fru_is_eof(struct fru_type_length *tl)
+{
+ return tl->type_length == 0xc1;
+}
+
+/*
+ * External functions defined in fru-parse.c.
+ */
+extern int fru_header_cksum_ok(struct fru_common_header *header);
+extern int fru_bia_cksum_ok(struct fru_board_info_area *bia);
+
+/* All these 4 return allocated strings by calling fru_alloc() */
+extern char *fru_get_board_manufacturer(struct fru_common_header *header);
+extern char *fru_get_product_name(struct fru_common_header *header);
+extern char *fru_get_serial_number(struct fru_common_header *header);
+extern char *fru_get_part_number(struct fru_common_header *header);
+
+/* This must be defined by the caller of the above functions */
+extern void *fru_alloc(size_t size);
+
+#endif /* __LINUX_IMPI_FRU_H__ */
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
new file mode 100644
index 000000000..838dbfa3c
--- /dev/null
+++ b/include/linux/ipmi.h
@@ -0,0 +1,316 @@
+/*
+ * ipmi.h
+ *
+ * MontaVista IPMI interface
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __LINUX_IPMI_H
+#define __LINUX_IPMI_H
+
+#include <uapi/linux/ipmi.h>
+
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/acpi.h> /* For acpi_handle */
+
+struct module;
+struct device;
+
+/* Opaque type for a IPMI message user. One of these is needed to
+ send and receive messages. */
+typedef struct ipmi_user *ipmi_user_t;
+
+/*
+ * Stuff coming from the receive interface comes as one of these.
+ * They are allocated, the receiver must free them with
+ * ipmi_free_recv_msg() when done with the message. The link is not
+ * used after the message is delivered, so the upper layer may use the
+ * link to build a linked list, if it likes.
+ */
+struct ipmi_recv_msg {
+ struct list_head link;
+
+ /* The type of message as defined in the "Receive Types"
+ defines above. */
+ int recv_type;
+
+ ipmi_user_t user;
+ struct ipmi_addr addr;
+ long msgid;
+ struct kernel_ipmi_msg msg;
+
+ /* The user_msg_data is the data supplied when a message was
+ sent, if this is a response to a sent message. If this is
+ not a response to a sent message, then user_msg_data will
+ be NULL. If the user above is NULL, then this will be the
+ intf. */
+ void *user_msg_data;
+
+ /* Call this when done with the message. It will presumably free
+ the message and do any other necessary cleanup. */
+ void (*done)(struct ipmi_recv_msg *msg);
+
+ /* Place-holder for the data, don't make any assumptions about
+ the size or existence of this, since it may change. */
+ unsigned char msg_data[IPMI_MAX_MSG_LENGTH];
+};
+
+/* Allocate and free the receive message. */
+void ipmi_free_recv_msg(struct ipmi_recv_msg *msg);
+
+struct ipmi_user_hndl {
+ /* Routine type to call when a message needs to be routed to
+ the upper layer. This will be called with some locks held,
+ the only IPMI routines that can be called are ipmi_request
+ and the alloc/free operations. The handler_data is the
+ variable supplied when the receive handler was registered. */
+ void (*ipmi_recv_hndl)(struct ipmi_recv_msg *msg,
+ void *user_msg_data);
+
+ /* Called when the interface detects a watchdog pre-timeout. If
+ this is NULL, it will be ignored for the user. */
+ void (*ipmi_watchdog_pretimeout)(void *handler_data);
+};
+
+/* Create a new user of the IPMI layer on the given interface number. */
+int ipmi_create_user(unsigned int if_num,
+ struct ipmi_user_hndl *handler,
+ void *handler_data,
+ ipmi_user_t *user);
+
+/* Destroy the given user of the IPMI layer. Note that after this
+ function returns, the system is guaranteed to not call any
+ callbacks for the user. Thus as long as you destroy all the users
+ before you unload a module, you will be safe. And if you destroy
+ the users before you destroy the callback structures, it should be
+ safe, too. */
+int ipmi_destroy_user(ipmi_user_t user);
+
+/* Get the IPMI version of the BMC we are talking to. */
+void ipmi_get_version(ipmi_user_t user,
+ unsigned char *major,
+ unsigned char *minor);
+
+/* Set and get the slave address and LUN that we will use for our
+ source messages. Note that this affects the interface, not just
+ this user, so it will affect all users of this interface. This is
+ so some initialization code can come in and do the OEM-specific
+ things it takes to determine your address (if not the BMC) and set
+ it for everyone else. Note that each channel can have its own address. */
+int ipmi_set_my_address(ipmi_user_t user,
+ unsigned int channel,
+ unsigned char address);
+int ipmi_get_my_address(ipmi_user_t user,
+ unsigned int channel,
+ unsigned char *address);
+int ipmi_set_my_LUN(ipmi_user_t user,
+ unsigned int channel,
+ unsigned char LUN);
+int ipmi_get_my_LUN(ipmi_user_t user,
+ unsigned int channel,
+ unsigned char *LUN);
+
+/*
+ * Like ipmi_request, but lets you specify the number of retries and
+ * the retry time. The retries is the number of times the message
+ * will be resent if no reply is received. If set to -1, the default
+ * value will be used. The retry time is the time in milliseconds
+ * between retries. If set to zero, the default value will be
+ * used.
+ *
+ * Don't use this unless you *really* have to. It's primarily for the
+ * IPMI over LAN converter; since the LAN stuff does its own retries,
+ * it makes no sense to do it here. However, this can be used if you
+ * have unusual requirements.
+ */
+int ipmi_request_settime(ipmi_user_t user,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ void *user_msg_data,
+ int priority,
+ int max_retries,
+ unsigned int retry_time_ms);
+
+/*
+ * Like ipmi_request, but with messages supplied. This will not
+ * allocate any memory, and the messages may be statically allocated
+ * (just make sure to do the "done" handling on them). Note that this
+ * is primarily for the watchdog timer, since it should be able to
+ * send messages even if no memory is available. This is subject to
+ * change as the system changes, so don't use it unless you REALLY
+ * have to.
+ */
+int ipmi_request_supply_msgs(ipmi_user_t user,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ void *user_msg_data,
+ void *supplied_smi,
+ struct ipmi_recv_msg *supplied_recv,
+ int priority);
+
+/*
+ * Poll the IPMI interface for the user. This causes the IPMI code to
+ * do an immediate check for information from the driver and handle
+ * anything that is immediately pending. This will not block in any
+ * way. This is useful if you need to spin waiting for something to
+ * happen in the IPMI driver.
+ */
+void ipmi_poll_interface(ipmi_user_t user);
+
+/*
+ * When commands come in to the SMS, the user can register to receive
+ * them. Only one user can be listening on a specific netfn/cmd/chan tuple
+ * at a time, you will get an EBUSY error if the command is already
+ * registered. If a command is received that does not have a user
+ * registered, the driver will automatically return the proper
+ * error. Channels are specified as a bitfield, use IPMI_CHAN_ALL to
+ * mean all channels.
+ */
+int ipmi_register_for_cmd(ipmi_user_t user,
+ unsigned char netfn,
+ unsigned char cmd,
+ unsigned int chans);
+int ipmi_unregister_for_cmd(ipmi_user_t user,
+ unsigned char netfn,
+ unsigned char cmd,
+ unsigned int chans);
+
+/*
+ * Go into a mode where the driver will not autonomously attempt to do
+ * things with the interface. It will still respond to attentions and
+ * interrupts, and it will expect that commands will complete. It
+ * will not automatcially check for flags, events, or things of that
+ * nature.
+ *
+ * This is primarily used for firmware upgrades. The idea is that
+ * when you go into firmware upgrade mode, you do this operation
+ * and the driver will not attempt to do anything but what you tell
+ * it or what the BMC asks for.
+ *
+ * Note that if you send a command that resets the BMC, the driver
+ * will still expect a response from that command. So the BMC should
+ * reset itself *after* the response is sent. Resetting before the
+ * response is just silly.
+ *
+ * If in auto maintenance mode, the driver will automatically go into
+ * maintenance mode for 30 seconds if it sees a cold reset, a warm
+ * reset, or a firmware NetFN. This means that code that uses only
+ * firmware NetFN commands to do upgrades will work automatically
+ * without change, assuming it sends a message every 30 seconds or
+ * less.
+ *
+ * See the IPMI_MAINTENANCE_MODE_xxx defines for what the mode means.
+ */
+int ipmi_get_maintenance_mode(ipmi_user_t user);
+int ipmi_set_maintenance_mode(ipmi_user_t user, int mode);
+
+/*
+ * When the user is created, it will not receive IPMI events by
+ * default. The user must set this to TRUE to get incoming events.
+ * The first user that sets this to TRUE will receive all events that
+ * have been queued while no one was waiting for events.
+ */
+int ipmi_set_gets_events(ipmi_user_t user, bool val);
+
+/*
+ * Called when a new SMI is registered. This will also be called on
+ * every existing interface when a new watcher is registered with
+ * ipmi_smi_watcher_register().
+ */
+struct ipmi_smi_watcher {
+ struct list_head link;
+
+ /* You must set the owner to the current module, if you are in
+ a module (generally just set it to "THIS_MODULE"). */
+ struct module *owner;
+
+ /* These two are called with read locks held for the interface
+ the watcher list. So you can add and remove users from the
+ IPMI interface, send messages, etc., but you cannot add
+ or remove SMI watchers or SMI interfaces. */
+ void (*new_smi)(int if_num, struct device *dev);
+ void (*smi_gone)(int if_num);
+};
+
+int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher);
+int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher);
+
+/* The following are various helper functions for dealing with IPMI
+ addresses. */
+
+/* Return the maximum length of an IPMI address given it's type. */
+unsigned int ipmi_addr_length(int addr_type);
+
+/* Validate that the given IPMI address is valid. */
+int ipmi_validate_addr(struct ipmi_addr *addr, int len);
+
+/*
+ * How did the IPMI driver find out about the device?
+ */
+enum ipmi_addr_src {
+ SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
+ SI_PCI, SI_DEVICETREE, SI_DEFAULT
+};
+const char *ipmi_addr_src_to_str(enum ipmi_addr_src src);
+
+union ipmi_smi_info_union {
+#ifdef CONFIG_ACPI
+ /*
+ * the acpi_info element is defined for the SI_ACPI
+ * address type
+ */
+ struct {
+ acpi_handle acpi_handle;
+ } acpi_info;
+#endif
+};
+
+struct ipmi_smi_info {
+ enum ipmi_addr_src addr_src;
+
+ /*
+ * Base device for the interface. Don't forget to put this when
+ * you are done.
+ */
+ struct device *dev;
+
+ /*
+ * The addr_info provides more detailed info for some IPMI
+ * devices, depending on the addr_src. Currently only SI_ACPI
+ * info is provided.
+ */
+ union ipmi_smi_info_union addr_info;
+};
+
+/* This is to get the private info of ipmi_smi_t */
+extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data);
+
+#endif /* __LINUX_IPMI_H */
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
new file mode 100644
index 000000000..0b1e569f5
--- /dev/null
+++ b/include/linux/ipmi_smi.h
@@ -0,0 +1,247 @@
+/*
+ * ipmi_smi.h
+ *
+ * MontaVista IPMI system management interface
+ *
+ * Author: MontaVista Software, Inc.
+ * Corey Minyard <minyard@mvista.com>
+ * source@mvista.com
+ *
+ * Copyright 2002 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_IPMI_SMI_H
+#define __LINUX_IPMI_SMI_H
+
+#include <linux/ipmi_msgdefs.h>
+#include <linux/proc_fs.h>
+#include <linux/platform_device.h>
+#include <linux/ipmi.h>
+
+struct device;
+
+/* This files describes the interface for IPMI system management interface
+ drivers to bind into the IPMI message handler. */
+
+/* Structure for the low-level drivers. */
+typedef struct ipmi_smi *ipmi_smi_t;
+
+/*
+ * Messages to/from the lower layer. The smi interface will take one
+ * of these to send. After the send has occurred and a response has
+ * been received, it will report this same data structure back up to
+ * the upper layer. If an error occurs, it should fill in the
+ * response with an error code in the completion code location. When
+ * asynchronous data is received, one of these is allocated, the
+ * data_size is set to zero and the response holds the data from the
+ * get message or get event command that the interface initiated.
+ * Note that it is the interfaces responsibility to detect
+ * asynchronous data and messages and request them from the
+ * interface.
+ */
+struct ipmi_smi_msg {
+ struct list_head link;
+
+ long msgid;
+ void *user_data;
+
+ int data_size;
+ unsigned char data[IPMI_MAX_MSG_LENGTH];
+
+ int rsp_size;
+ unsigned char rsp[IPMI_MAX_MSG_LENGTH];
+
+ /* Will be called when the system is done with the message
+ (presumably to free it). */
+ void (*done)(struct ipmi_smi_msg *msg);
+};
+
+struct ipmi_smi_handlers {
+ struct module *owner;
+
+ /* The low-level interface cannot start sending messages to
+ the upper layer until this function is called. This may
+ not be NULL, the lower layer must take the interface from
+ this call. */
+ int (*start_processing)(void *send_info,
+ ipmi_smi_t new_intf);
+
+ /*
+ * Get the detailed private info of the low level interface and store
+ * it into the structure of ipmi_smi_data. For example: the
+ * ACPI device handle will be returned for the pnp_acpi IPMI device.
+ */
+ int (*get_smi_info)(void *send_info, struct ipmi_smi_info *data);
+
+ /* Called to enqueue an SMI message to be sent. This
+ operation is not allowed to fail. If an error occurs, it
+ should report back the error in a received message. It may
+ do this in the current call context, since no write locks
+ are held when this is run. Message are delivered one at
+ a time by the message handler, a new message will not be
+ delivered until the previous message is returned. */
+ void (*sender)(void *send_info,
+ struct ipmi_smi_msg *msg);
+
+ /* Called by the upper layer to request that we try to get
+ events from the BMC we are attached to. */
+ void (*request_events)(void *send_info);
+
+ /* Called by the upper layer when some user requires that the
+ interface watch for events, received messages, watchdog
+ pretimeouts, or not. Used by the SMI to know if it should
+ watch for these. This may be NULL if the SMI does not
+ implement it. */
+ void (*set_need_watch)(void *send_info, bool enable);
+
+ /* Called when the interface should go into "run to
+ completion" mode. If this call sets the value to true, the
+ interface should make sure that all messages are flushed
+ out and that none are pending, and any new requests are run
+ to completion immediately. */
+ void (*set_run_to_completion)(void *send_info, bool run_to_completion);
+
+ /* Called to poll for work to do. This is so upper layers can
+ poll for operations during things like crash dumps. */
+ void (*poll)(void *send_info);
+
+ /* Enable/disable firmware maintenance mode. Note that this
+ is *not* the modes defined, this is simply an on/off
+ setting. The message handler does the mode handling. Note
+ that this is called from interrupt context, so it cannot
+ block. */
+ void (*set_maintenance_mode)(void *send_info, bool enable);
+
+ /* Tell the handler that we are using it/not using it. The
+ message handler get the modules that this handler belongs
+ to; this function lets the SMI claim any modules that it
+ uses. These may be NULL if this is not required. */
+ int (*inc_usecount)(void *send_info);
+ void (*dec_usecount)(void *send_info);
+};
+
+struct ipmi_device_id {
+ unsigned char device_id;
+ unsigned char device_revision;
+ unsigned char firmware_revision_1;
+ unsigned char firmware_revision_2;
+ unsigned char ipmi_version;
+ unsigned char additional_device_support;
+ unsigned int manufacturer_id;
+ unsigned int product_id;
+ unsigned char aux_firmware_revision[4];
+ unsigned int aux_firmware_revision_set : 1;
+};
+
+#define ipmi_version_major(v) ((v)->ipmi_version & 0xf)
+#define ipmi_version_minor(v) ((v)->ipmi_version >> 4)
+
+/* Take a pointer to a raw data buffer and a length and extract device
+ id information from it. The first byte of data must point to the
+ netfn << 2, the data should be of the format:
+ netfn << 2, cmd, completion code, data
+ as normally comes from a device interface. */
+static inline int ipmi_demangle_device_id(const unsigned char *data,
+ unsigned int data_len,
+ struct ipmi_device_id *id)
+{
+ if (data_len < 9)
+ return -EINVAL;
+ if (data[0] != IPMI_NETFN_APP_RESPONSE << 2 ||
+ data[1] != IPMI_GET_DEVICE_ID_CMD)
+ /* Strange, didn't get the response we expected. */
+ return -EINVAL;
+ if (data[2] != 0)
+ /* That's odd, it shouldn't be able to fail. */
+ return -EINVAL;
+
+ data += 3;
+ data_len -= 3;
+ id->device_id = data[0];
+ id->device_revision = data[1];
+ id->firmware_revision_1 = data[2];
+ id->firmware_revision_2 = data[3];
+ id->ipmi_version = data[4];
+ id->additional_device_support = data[5];
+ if (data_len >= 11) {
+ id->manufacturer_id = (data[6] | (data[7] << 8) |
+ (data[8] << 16));
+ id->product_id = data[9] | (data[10] << 8);
+ } else {
+ id->manufacturer_id = 0;
+ id->product_id = 0;
+ }
+ if (data_len >= 15) {
+ memcpy(id->aux_firmware_revision, data+11, 4);
+ id->aux_firmware_revision_set = 1;
+ } else
+ id->aux_firmware_revision_set = 0;
+
+ return 0;
+}
+
+/* Add a low-level interface to the IPMI driver. Note that if the
+ interface doesn't know its slave address, it should pass in zero.
+ The low-level interface should not deliver any messages to the
+ upper layer until the start_processing() function in the handlers
+ is called, and the lower layer must get the interface from that
+ call. */
+int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
+ void *send_info,
+ struct ipmi_device_id *device_id,
+ struct device *dev,
+ unsigned char slave_addr);
+
+/*
+ * Remove a low-level interface from the IPMI driver. This will
+ * return an error if the interface is still in use by a user.
+ */
+int ipmi_unregister_smi(ipmi_smi_t intf);
+
+/*
+ * The lower layer reports received messages through this interface.
+ * The data_size should be zero if this is an asynchronous message. If
+ * the lower layer gets an error sending a message, it should format
+ * an error response in the message response.
+ */
+void ipmi_smi_msg_received(ipmi_smi_t intf,
+ struct ipmi_smi_msg *msg);
+
+/* The lower layer received a watchdog pre-timeout on interface. */
+void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf);
+
+struct ipmi_smi_msg *ipmi_alloc_smi_msg(void);
+static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg)
+{
+ msg->done(msg);
+}
+
+/* Allow the lower layer to add things to the proc filesystem
+ directory for this interface. Note that the entry will
+ automatically be dstroyed when the interface is destroyed. */
+int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
+ const struct file_operations *proc_ops,
+ void *data);
+
+#endif /* __LINUX_IPMI_SMI_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
new file mode 100644
index 000000000..82806c60a
--- /dev/null
+++ b/include/linux/ipv6.h
@@ -0,0 +1,327 @@
+#ifndef _IPV6_H
+#define _IPV6_H
+
+#include <uapi/linux/ipv6.h>
+
+#define ipv6_optlen(p) (((p)->hdrlen+1) << 3)
+#define ipv6_authlen(p) (((p)->hdrlen+2) << 2)
+/*
+ * This structure contains configuration options per IPv6 link.
+ */
+struct ipv6_devconf {
+ __s32 forwarding;
+ __s32 hop_limit;
+ __s32 mtu6;
+ __s32 accept_ra;
+ __s32 accept_redirects;
+ __s32 autoconf;
+ __s32 dad_transmits;
+ __s32 rtr_solicits;
+ __s32 rtr_solicit_interval;
+ __s32 rtr_solicit_delay;
+ __s32 force_mld_version;
+ __s32 mldv1_unsolicited_report_interval;
+ __s32 mldv2_unsolicited_report_interval;
+ __s32 use_tempaddr;
+ __s32 temp_valid_lft;
+ __s32 temp_prefered_lft;
+ __s32 regen_max_retry;
+ __s32 max_desync_factor;
+ __s32 max_addresses;
+ __s32 accept_ra_defrtr;
+ __s32 accept_ra_pinfo;
+#ifdef CONFIG_IPV6_ROUTER_PREF
+ __s32 accept_ra_rtr_pref;
+ __s32 rtr_probe_interval;
+#ifdef CONFIG_IPV6_ROUTE_INFO
+ __s32 accept_ra_rt_info_max_plen;
+#endif
+#endif
+ __s32 proxy_ndp;
+ __s32 accept_source_route;
+ __s32 accept_ra_from_local;
+#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+ __s32 optimistic_dad;
+ __s32 use_optimistic;
+#endif
+#ifdef CONFIG_IPV6_MROUTE
+ __s32 mc_forwarding;
+#endif
+ __s32 disable_ipv6;
+ __s32 accept_dad;
+ __s32 force_tllao;
+ __s32 ndisc_notify;
+ __s32 suppress_frag_ndisc;
+ __s32 accept_ra_mtu;
+ struct ipv6_stable_secret {
+ bool initialized;
+ struct in6_addr secret;
+ } stable_secret;
+ void *sysctl;
+};
+
+struct ipv6_params {
+ __s32 disable_ipv6;
+ __s32 autoconf;
+};
+extern struct ipv6_params ipv6_defaults;
+#include <linux/icmpv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+
+#include <net/inet_sock.h>
+
+static inline struct ipv6hdr *ipv6_hdr(const struct sk_buff *skb)
+{
+ return (struct ipv6hdr *)skb_network_header(skb);
+}
+
+static inline struct ipv6hdr *inner_ipv6_hdr(const struct sk_buff *skb)
+{
+ return (struct ipv6hdr *)skb_inner_network_header(skb);
+}
+
+static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
+{
+ return (struct ipv6hdr *)skb_transport_header(skb);
+}
+
+/*
+ This structure contains results of exthdrs parsing
+ as offsets from skb->nh.
+ */
+
+struct inet6_skb_parm {
+ int iif;
+ __be16 ra;
+ __u16 hop;
+ __u16 dst0;
+ __u16 srcrt;
+ __u16 dst1;
+ __u16 lastopt;
+ __u16 nhoff;
+ __u16 flags;
+#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+ __u16 dsthao;
+#endif
+ __u16 frag_max_size;
+
+#define IP6SKB_XFRM_TRANSFORMED 1
+#define IP6SKB_FORWARDED 2
+#define IP6SKB_REROUTED 4
+#define IP6SKB_ROUTERALERT 8
+#define IP6SKB_FRAGMENTED 16
+};
+
+#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
+#define IP6CBMTU(skb) ((struct ip6_mtuinfo *)((skb)->cb))
+
+static inline int inet6_iif(const struct sk_buff *skb)
+{
+ return IP6CB(skb)->iif;
+}
+
+struct tcp6_request_sock {
+ struct tcp_request_sock tcp6rsk_tcp;
+};
+
+struct ipv6_mc_socklist;
+struct ipv6_ac_socklist;
+struct ipv6_fl_socklist;
+
+struct inet6_cork {
+ struct ipv6_txoptions *opt;
+ u8 hop_limit;
+ u8 tclass;
+};
+
+/**
+ * struct ipv6_pinfo - ipv6 private area
+ *
+ * In the struct sock hierarchy (tcp6_sock, upd6_sock, etc)
+ * this _must_ be the last member, so that inet6_sk_generic
+ * is able to calculate its offset from the base struct sock
+ * by using the struct proto->slab_obj_size member. -acme
+ */
+struct ipv6_pinfo {
+ struct in6_addr saddr;
+ struct in6_pktinfo sticky_pktinfo;
+ const struct in6_addr *daddr_cache;
+#ifdef CONFIG_IPV6_SUBTREES
+ const struct in6_addr *saddr_cache;
+#endif
+
+ __be32 flow_label;
+ __u32 frag_size;
+
+ /*
+ * Packed in 16bits.
+ * Omit one shift by by putting the signed field at MSB.
+ */
+#if defined(__BIG_ENDIAN_BITFIELD)
+ __s16 hop_limit:9;
+ __u16 __unused_1:7;
+#else
+ __u16 __unused_1:7;
+ __s16 hop_limit:9;
+#endif
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /* Packed in 16bits. */
+ __s16 mcast_hops:9;
+ __u16 __unused_2:6,
+ mc_loop:1;
+#else
+ __u16 mc_loop:1,
+ __unused_2:6;
+ __s16 mcast_hops:9;
+#endif
+ int ucast_oif;
+ int mcast_oif;
+
+ /* pktoption flags */
+ union {
+ struct {
+ __u16 srcrt:1,
+ osrcrt:1,
+ rxinfo:1,
+ rxoinfo:1,
+ rxhlim:1,
+ rxohlim:1,
+ hopopts:1,
+ ohopopts:1,
+ dstopts:1,
+ odstopts:1,
+ rxflow:1,
+ rxtclass:1,
+ rxpmtu:1,
+ rxorigdstaddr:1;
+ /* 2 bits hole */
+ } bits;
+ __u16 all;
+ } rxopt;
+
+ /* sockopt flags */
+ __u16 recverr:1,
+ sndflow:1,
+ repflow:1,
+ pmtudisc:3,
+ padding:1, /* 1 bit hole */
+ srcprefs:3, /* 001: prefer temporary address
+ * 010: prefer public address
+ * 100: prefer care-of address
+ */
+ dontfrag:1,
+ autoflowlabel:1;
+ __u8 min_hopcount;
+ __u8 tclass;
+ __be32 rcv_flowinfo;
+
+ __u32 dst_cookie;
+ __u32 rx_dst_cookie;
+
+ struct ipv6_mc_socklist __rcu *ipv6_mc_list;
+ struct ipv6_ac_socklist *ipv6_ac_list;
+ struct ipv6_fl_socklist __rcu *ipv6_fl_list;
+
+ struct ipv6_txoptions *opt;
+ struct sk_buff *pktoptions;
+ struct sk_buff *rxpmtu;
+ struct inet6_cork cork;
+};
+
+/* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */
+struct raw6_sock {
+ /* inet_sock has to be the first member of raw6_sock */
+ struct inet_sock inet;
+ __u32 checksum; /* perform checksum */
+ __u32 offset; /* checksum offset */
+ struct icmp6_filter filter;
+ __u32 ip6mr_table;
+ /* ipv6_pinfo has to be the last member of raw6_sock, see inet6_sk_generic */
+ struct ipv6_pinfo inet6;
+};
+
+struct udp6_sock {
+ struct udp_sock udp;
+ /* ipv6_pinfo has to be the last member of udp6_sock, see inet6_sk_generic */
+ struct ipv6_pinfo inet6;
+};
+
+struct tcp6_sock {
+ struct tcp_sock tcp;
+ /* ipv6_pinfo has to be the last member of tcp6_sock, see inet6_sk_generic */
+ struct ipv6_pinfo inet6;
+};
+
+extern int inet6_sk_rebuild_header(struct sock *sk);
+
+struct tcp6_timewait_sock {
+ struct tcp_timewait_sock tcp6tw_tcp;
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
+{
+ return inet_sk(__sk)->pinet6;
+}
+
+static inline struct raw6_sock *raw6_sk(const struct sock *sk)
+{
+ return (struct raw6_sock *)sk;
+}
+
+static inline void inet_sk_copy_descendant(struct sock *sk_to,
+ const struct sock *sk_from)
+{
+ int ancestor_size = sizeof(struct inet_sock);
+
+ if (sk_from->sk_family == PF_INET6)
+ ancestor_size += sizeof(struct ipv6_pinfo);
+
+ __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
+}
+
+#define __ipv6_only_sock(sk) (sk->sk_ipv6only)
+#define ipv6_only_sock(sk) (__ipv6_only_sock(sk))
+#define ipv6_sk_rxinfo(sk) ((sk)->sk_family == PF_INET6 && \
+ inet6_sk(sk)->rxopt.bits.rxinfo)
+
+static inline const struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
+{
+ if (sk->sk_family == AF_INET6)
+ return &sk->sk_v6_rcv_saddr;
+ return NULL;
+}
+
+static inline int inet_v6_ipv6only(const struct sock *sk)
+{
+ /* ipv6only field is at same position for timewait and other sockets */
+ return ipv6_only_sock(sk);
+}
+#else
+#define __ipv6_only_sock(sk) 0
+#define ipv6_only_sock(sk) 0
+#define ipv6_sk_rxinfo(sk) 0
+
+static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
+{
+ return NULL;
+}
+
+static inline struct inet6_request_sock *
+ inet6_rsk(const struct request_sock *rsk)
+{
+ return NULL;
+}
+
+static inline struct raw6_sock *raw6_sk(const struct sock *sk)
+{
+ return NULL;
+}
+
+#define inet6_rcv_saddr(__sk) NULL
+#define tcp_twsk_ipv6only(__sk) 0
+#define inet_v6_ipv6only(__sk) 0
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+#endif /* _IPV6_H */
diff --git a/include/linux/ipv6_route.h b/include/linux/ipv6_route.h
new file mode 100644
index 000000000..25b5f1f5e
--- /dev/null
+++ b/include/linux/ipv6_route.h
@@ -0,0 +1,19 @@
+/*
+ * Linux INET6 implementation
+ *
+ * Authors:
+ * Pedro Roque <roque@di.fc.ul.pt>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IPV6_ROUTE_H
+#define _LINUX_IPV6_ROUTE_H
+
+#include <uapi/linux/ipv6_route.h>
+
+#define IPV6_EXTRACT_PREF(flag) (((flag) & RTF_PREF_MASK) >> 27)
+#define IPV6_DECODE_PREF(pref) ((pref) ^ 2) /* 1:low,2:med,3:high */
+#endif
diff --git a/include/linux/irq.h b/include/linux/irq.h
new file mode 100644
index 000000000..62c6901ca
--- /dev/null
+++ b/include/linux/irq.h
@@ -0,0 +1,872 @@
+#ifndef _LINUX_IRQ_H
+#define _LINUX_IRQ_H
+
+/*
+ * Please do not include this file in generic code. There is currently
+ * no requirement for any architecture to implement anything held
+ * within this file.
+ *
+ * Thanks. --rmk
+ */
+
+#include <linux/smp.h>
+#include <linux/linkage.h>
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/cpumask.h>
+#include <linux/gfp.h>
+#include <linux/irqhandler.h>
+#include <linux/irqreturn.h>
+#include <linux/irqnr.h>
+#include <linux/errno.h>
+#include <linux/topology.h>
+#include <linux/wait.h>
+#include <linux/io.h>
+
+#include <asm/irq.h>
+#include <asm/ptrace.h>
+#include <asm/irq_regs.h>
+
+struct seq_file;
+struct module;
+struct msi_msg;
+enum irqchip_irq_state;
+
+/*
+ * IRQ line status.
+ *
+ * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h
+ *
+ * IRQ_TYPE_NONE - default, unspecified type
+ * IRQ_TYPE_EDGE_RISING - rising edge triggered
+ * IRQ_TYPE_EDGE_FALLING - falling edge triggered
+ * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered
+ * IRQ_TYPE_LEVEL_HIGH - high level triggered
+ * IRQ_TYPE_LEVEL_LOW - low level triggered
+ * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits
+ * IRQ_TYPE_SENSE_MASK - Mask for all the above bits
+ * IRQ_TYPE_DEFAULT - For use by some PICs to ask irq_set_type
+ * to setup the HW to a sane default (used
+ * by irqdomain map() callbacks to synchronize
+ * the HW state and SW flags for a newly
+ * allocated descriptor).
+ *
+ * IRQ_TYPE_PROBE - Special flag for probing in progress
+ *
+ * Bits which can be modified via irq_set/clear/modify_status_flags()
+ * IRQ_LEVEL - Interrupt is level type. Will be also
+ * updated in the code when the above trigger
+ * bits are modified via irq_set_irq_type()
+ * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect
+ * it from affinity setting
+ * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing
+ * IRQ_NOREQUEST - Interrupt cannot be requested via
+ * request_irq()
+ * IRQ_NOTHREAD - Interrupt cannot be threaded
+ * IRQ_NOAUTOEN - Interrupt is not automatically enabled in
+ * request/setup_irq()
+ * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
+ * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
+ * IRQ_NESTED_TRHEAD - Interrupt nests into another thread
+ * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
+ * IRQ_IS_POLLED - Always polled by another interrupt. Exclude
+ * it from the spurious interrupt detection
+ * mechanism and from core side polling.
+ */
+enum {
+ IRQ_TYPE_NONE = 0x00000000,
+ IRQ_TYPE_EDGE_RISING = 0x00000001,
+ IRQ_TYPE_EDGE_FALLING = 0x00000002,
+ IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING),
+ IRQ_TYPE_LEVEL_HIGH = 0x00000004,
+ IRQ_TYPE_LEVEL_LOW = 0x00000008,
+ IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH),
+ IRQ_TYPE_SENSE_MASK = 0x0000000f,
+ IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK,
+
+ IRQ_TYPE_PROBE = 0x00000010,
+
+ IRQ_LEVEL = (1 << 8),
+ IRQ_PER_CPU = (1 << 9),
+ IRQ_NOPROBE = (1 << 10),
+ IRQ_NOREQUEST = (1 << 11),
+ IRQ_NOAUTOEN = (1 << 12),
+ IRQ_NO_BALANCING = (1 << 13),
+ IRQ_MOVE_PCNTXT = (1 << 14),
+ IRQ_NESTED_THREAD = (1 << 15),
+ IRQ_NOTHREAD = (1 << 16),
+ IRQ_PER_CPU_DEVID = (1 << 17),
+ IRQ_IS_POLLED = (1 << 18),
+};
+
+#define IRQF_MODIFY_MASK \
+ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
+ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
+ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
+ IRQ_IS_POLLED)
+
+#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
+
+/*
+ * Return value for chip->irq_set_affinity()
+ *
+ * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity
+ * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity
+ * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
+ * support stacked irqchips, which indicates skipping
+ * all descendent irqchips.
+ */
+enum {
+ IRQ_SET_MASK_OK = 0,
+ IRQ_SET_MASK_OK_NOCOPY,
+ IRQ_SET_MASK_OK_DONE,
+};
+
+struct msi_desc;
+struct irq_domain;
+
+/**
+ * struct irq_data - per irq and irq chip data passed down to chip functions
+ * @mask: precomputed bitmask for accessing the chip registers
+ * @irq: interrupt number
+ * @hwirq: hardware interrupt number, local to the interrupt domain
+ * @node: node index useful for balancing
+ * @state_use_accessors: status information for irq chip functions.
+ * Use accessor functions to deal with it
+ * @chip: low level interrupt hardware access
+ * @domain: Interrupt translation domain; responsible for mapping
+ * between hwirq number and linux irq number.
+ * @parent_data: pointer to parent struct irq_data to support hierarchy
+ * irq_domain
+ * @handler_data: per-IRQ data for the irq_chip methods
+ * @chip_data: platform-specific per-chip private data for the chip
+ * methods, to allow shared chip implementations
+ * @msi_desc: MSI descriptor
+ * @affinity: IRQ affinity on SMP
+ *
+ * The fields here need to overlay the ones in irq_desc until we
+ * cleaned up the direct references and switched everything over to
+ * irq_data.
+ */
+struct irq_data {
+ u32 mask;
+ unsigned int irq;
+ unsigned long hwirq;
+ unsigned int node;
+ unsigned int state_use_accessors;
+ struct irq_chip *chip;
+ struct irq_domain *domain;
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ struct irq_data *parent_data;
+#endif
+ void *handler_data;
+ void *chip_data;
+ struct msi_desc *msi_desc;
+ cpumask_var_t affinity;
+};
+
+/*
+ * Bit masks for irq_data.state
+ *
+ * IRQD_TRIGGER_MASK - Mask for the trigger type bits
+ * IRQD_SETAFFINITY_PENDING - Affinity setting is pending
+ * IRQD_NO_BALANCING - Balancing disabled for this IRQ
+ * IRQD_PER_CPU - Interrupt is per cpu
+ * IRQD_AFFINITY_SET - Interrupt affinity was set
+ * IRQD_LEVEL - Interrupt is level triggered
+ * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup
+ * from suspend
+ * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process
+ * context
+ * IRQD_IRQ_DISABLED - Disabled state of the interrupt
+ * IRQD_IRQ_MASKED - Masked state of the interrupt
+ * IRQD_IRQ_INPROGRESS - In progress state of the interrupt
+ * IRQD_WAKEUP_ARMED - Wakeup mode armed
+ */
+enum {
+ IRQD_TRIGGER_MASK = 0xf,
+ IRQD_SETAFFINITY_PENDING = (1 << 8),
+ IRQD_NO_BALANCING = (1 << 10),
+ IRQD_PER_CPU = (1 << 11),
+ IRQD_AFFINITY_SET = (1 << 12),
+ IRQD_LEVEL = (1 << 13),
+ IRQD_WAKEUP_STATE = (1 << 14),
+ IRQD_MOVE_PCNTXT = (1 << 15),
+ IRQD_IRQ_DISABLED = (1 << 16),
+ IRQD_IRQ_MASKED = (1 << 17),
+ IRQD_IRQ_INPROGRESS = (1 << 18),
+ IRQD_WAKEUP_ARMED = (1 << 19),
+};
+
+static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_SETAFFINITY_PENDING;
+}
+
+static inline bool irqd_is_per_cpu(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_PER_CPU;
+}
+
+static inline bool irqd_can_balance(struct irq_data *d)
+{
+ return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING));
+}
+
+static inline bool irqd_affinity_was_set(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_AFFINITY_SET;
+}
+
+static inline void irqd_mark_affinity_was_set(struct irq_data *d)
+{
+ d->state_use_accessors |= IRQD_AFFINITY_SET;
+}
+
+static inline u32 irqd_get_trigger_type(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_TRIGGER_MASK;
+}
+
+/*
+ * Must only be called inside irq_chip.irq_set_type() functions.
+ */
+static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
+{
+ d->state_use_accessors &= ~IRQD_TRIGGER_MASK;
+ d->state_use_accessors |= type & IRQD_TRIGGER_MASK;
+}
+
+static inline bool irqd_is_level_type(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_LEVEL;
+}
+
+static inline bool irqd_is_wakeup_set(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_WAKEUP_STATE;
+}
+
+static inline bool irqd_can_move_in_process_context(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_MOVE_PCNTXT;
+}
+
+static inline bool irqd_irq_disabled(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_IRQ_DISABLED;
+}
+
+static inline bool irqd_irq_masked(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_IRQ_MASKED;
+}
+
+static inline bool irqd_irq_inprogress(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_IRQ_INPROGRESS;
+}
+
+static inline bool irqd_is_wakeup_armed(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_WAKEUP_ARMED;
+}
+
+
+/*
+ * Functions for chained handlers which can be enabled/disabled by the
+ * standard disable_irq/enable_irq calls. Must be called with
+ * irq_desc->lock held.
+ */
+static inline void irqd_set_chained_irq_inprogress(struct irq_data *d)
+{
+ d->state_use_accessors |= IRQD_IRQ_INPROGRESS;
+}
+
+static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d)
+{
+ d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS;
+}
+
+static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
+{
+ return d->hwirq;
+}
+
+/**
+ * struct irq_chip - hardware interrupt chip descriptor
+ *
+ * @name: name for /proc/interrupts
+ * @irq_startup: start up the interrupt (defaults to ->enable if NULL)
+ * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL)
+ * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL)
+ * @irq_disable: disable the interrupt
+ * @irq_ack: start of a new interrupt
+ * @irq_mask: mask an interrupt source
+ * @irq_mask_ack: ack and mask an interrupt source
+ * @irq_unmask: unmask an interrupt source
+ * @irq_eoi: end of interrupt
+ * @irq_set_affinity: set the CPU affinity on SMP machines
+ * @irq_retrigger: resend an IRQ to the CPU
+ * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
+ * @irq_set_wake: enable/disable power-management wake-on of an IRQ
+ * @irq_bus_lock: function to lock access to slow bus (i2c) chips
+ * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
+ * @irq_cpu_online: configure an interrupt source for a secondary CPU
+ * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU
+ * @irq_suspend: function called from core code on suspend once per chip
+ * @irq_resume: function called from core code on resume once per chip
+ * @irq_pm_shutdown: function called from core code on shutdown once per chip
+ * @irq_calc_mask: Optional function to set irq_data.mask for special cases
+ * @irq_print_chip: optional to print special chip info in show_interrupts
+ * @irq_request_resources: optional to request resources before calling
+ * any other callback related to this irq
+ * @irq_release_resources: optional to release resources acquired with
+ * irq_request_resources
+ * @irq_compose_msi_msg: optional to compose message content for MSI
+ * @irq_write_msi_msg: optional to write message content for MSI
+ * @irq_get_irqchip_state: return the internal state of an interrupt
+ * @irq_set_irqchip_state: set the internal state of a interrupt
+ * @flags: chip specific flags
+ */
+struct irq_chip {
+ const char *name;
+ unsigned int (*irq_startup)(struct irq_data *data);
+ void (*irq_shutdown)(struct irq_data *data);
+ void (*irq_enable)(struct irq_data *data);
+ void (*irq_disable)(struct irq_data *data);
+
+ void (*irq_ack)(struct irq_data *data);
+ void (*irq_mask)(struct irq_data *data);
+ void (*irq_mask_ack)(struct irq_data *data);
+ void (*irq_unmask)(struct irq_data *data);
+ void (*irq_eoi)(struct irq_data *data);
+
+ int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
+ int (*irq_retrigger)(struct irq_data *data);
+ int (*irq_set_type)(struct irq_data *data, unsigned int flow_type);
+ int (*irq_set_wake)(struct irq_data *data, unsigned int on);
+
+ void (*irq_bus_lock)(struct irq_data *data);
+ void (*irq_bus_sync_unlock)(struct irq_data *data);
+
+ void (*irq_cpu_online)(struct irq_data *data);
+ void (*irq_cpu_offline)(struct irq_data *data);
+
+ void (*irq_suspend)(struct irq_data *data);
+ void (*irq_resume)(struct irq_data *data);
+ void (*irq_pm_shutdown)(struct irq_data *data);
+
+ void (*irq_calc_mask)(struct irq_data *data);
+
+ void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
+ int (*irq_request_resources)(struct irq_data *data);
+ void (*irq_release_resources)(struct irq_data *data);
+
+ void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg);
+ void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
+
+ int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state);
+ int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state);
+
+ unsigned long flags;
+};
+
+/*
+ * irq_chip specific flags
+ *
+ * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type()
+ * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled
+ * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path
+ * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks
+ * when irq enabled
+ * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
+ * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
+ * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
+ */
+enum {
+ IRQCHIP_SET_TYPE_MASKED = (1 << 0),
+ IRQCHIP_EOI_IF_HANDLED = (1 << 1),
+ IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
+ IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
+ IRQCHIP_SKIP_SET_WAKE = (1 << 4),
+ IRQCHIP_ONESHOT_SAFE = (1 << 5),
+ IRQCHIP_EOI_THREADED = (1 << 6),
+};
+
+/* This include will go away once we isolated irq_desc usage to core code */
+#include <linux/irqdesc.h>
+
+/*
+ * Pick up the arch-dependent methods:
+ */
+#include <asm/hw_irq.h>
+
+#ifndef NR_IRQS_LEGACY
+# define NR_IRQS_LEGACY 0
+#endif
+
+#ifndef ARCH_IRQ_INIT_FLAGS
+# define ARCH_IRQ_INIT_FLAGS 0
+#endif
+
+#define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS
+
+struct irqaction;
+extern int setup_irq(unsigned int irq, struct irqaction *new);
+extern void remove_irq(unsigned int irq, struct irqaction *act);
+extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
+extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
+
+extern void irq_cpu_online(void);
+extern void irq_cpu_offline(void);
+extern int irq_set_affinity_locked(struct irq_data *data,
+ const struct cpumask *cpumask, bool force);
+
+#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
+void irq_move_irq(struct irq_data *data);
+void irq_move_masked_irq(struct irq_data *data);
+#else
+static inline void irq_move_irq(struct irq_data *data) { }
+static inline void irq_move_masked_irq(struct irq_data *data) { }
+#endif
+
+extern int no_irq_affinity;
+
+#ifdef CONFIG_HARDIRQS_SW_RESEND
+int irq_set_parent(int irq, int parent_irq);
+#else
+static inline int irq_set_parent(int irq, int parent_irq)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Built-in IRQ handlers for various IRQ types,
+ * callable via desc->handle_irq()
+ */
+extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_nested_irq(unsigned int irq);
+
+extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+extern void irq_chip_ack_parent(struct irq_data *data);
+extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
+extern void irq_chip_mask_parent(struct irq_data *data);
+extern void irq_chip_unmask_parent(struct irq_data *data);
+extern void irq_chip_eoi_parent(struct irq_data *data);
+extern int irq_chip_set_affinity_parent(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force);
+extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
+#endif
+
+/* Handling of unhandled and spurious interrupts: */
+extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
+ irqreturn_t action_ret);
+
+
+/* Enable/disable irq debugging output: */
+extern int noirqdebug_setup(char *str);
+
+/* Checks whether the interrupt can be requested by request_irq(): */
+extern int can_request_irq(unsigned int irq, unsigned long irqflags);
+
+/* Dummy irq-chip implementations: */
+extern struct irq_chip no_irq_chip;
+extern struct irq_chip dummy_irq_chip;
+
+extern void
+irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
+ irq_flow_handler_t handle, const char *name);
+
+static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip,
+ irq_flow_handler_t handle)
+{
+ irq_set_chip_and_handler_name(irq, chip, handle, NULL);
+}
+
+extern int irq_set_percpu_devid(unsigned int irq);
+
+extern void
+__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
+ const char *name);
+
+static inline void
+irq_set_handler(unsigned int irq, irq_flow_handler_t handle)
+{
+ __irq_set_handler(irq, handle, 0, NULL);
+}
+
+/*
+ * Set a highlevel chained flow handler for a given IRQ.
+ * (a chained handler is automatically enabled and set to
+ * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD)
+ */
+static inline void
+irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle)
+{
+ __irq_set_handler(irq, handle, 1, NULL);
+}
+
+void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
+
+static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
+{
+ irq_modify_status(irq, 0, set);
+}
+
+static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr)
+{
+ irq_modify_status(irq, clr, 0);
+}
+
+static inline void irq_set_noprobe(unsigned int irq)
+{
+ irq_modify_status(irq, 0, IRQ_NOPROBE);
+}
+
+static inline void irq_set_probe(unsigned int irq)
+{
+ irq_modify_status(irq, IRQ_NOPROBE, 0);
+}
+
+static inline void irq_set_nothread(unsigned int irq)
+{
+ irq_modify_status(irq, 0, IRQ_NOTHREAD);
+}
+
+static inline void irq_set_thread(unsigned int irq)
+{
+ irq_modify_status(irq, IRQ_NOTHREAD, 0);
+}
+
+static inline void irq_set_nested_thread(unsigned int irq, bool nest)
+{
+ if (nest)
+ irq_set_status_flags(irq, IRQ_NESTED_THREAD);
+ else
+ irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
+}
+
+static inline void irq_set_percpu_devid_flags(unsigned int irq)
+{
+ irq_set_status_flags(irq,
+ IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD |
+ IRQ_NOPROBE | IRQ_PER_CPU_DEVID);
+}
+
+/* Set/get chip/data for an IRQ: */
+extern int irq_set_chip(unsigned int irq, struct irq_chip *chip);
+extern int irq_set_handler_data(unsigned int irq, void *data);
+extern int irq_set_chip_data(unsigned int irq, void *data);
+extern int irq_set_irq_type(unsigned int irq, unsigned int type);
+extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
+extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
+ struct msi_desc *entry);
+extern struct irq_data *irq_get_irq_data(unsigned int irq);
+
+static inline struct irq_chip *irq_get_chip(unsigned int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+ return d ? d->chip : NULL;
+}
+
+static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d)
+{
+ return d->chip;
+}
+
+static inline void *irq_get_chip_data(unsigned int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+ return d ? d->chip_data : NULL;
+}
+
+static inline void *irq_data_get_irq_chip_data(struct irq_data *d)
+{
+ return d->chip_data;
+}
+
+static inline void *irq_get_handler_data(unsigned int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+ return d ? d->handler_data : NULL;
+}
+
+static inline void *irq_data_get_irq_handler_data(struct irq_data *d)
+{
+ return d->handler_data;
+}
+
+static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+ return d ? d->msi_desc : NULL;
+}
+
+static inline struct msi_desc *irq_data_get_msi(struct irq_data *d)
+{
+ return d->msi_desc;
+}
+
+static inline u32 irq_get_trigger_type(unsigned int irq)
+{
+ struct irq_data *d = irq_get_irq_data(irq);
+ return d ? irqd_get_trigger_type(d) : 0;
+}
+
+unsigned int arch_dynirq_lower_bound(unsigned int from);
+
+int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
+ struct module *owner);
+
+/* use macros to avoid needing export.h for THIS_MODULE */
+#define irq_alloc_descs(irq, from, cnt, node) \
+ __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE)
+
+#define irq_alloc_desc(node) \
+ irq_alloc_descs(-1, 0, 1, node)
+
+#define irq_alloc_desc_at(at, node) \
+ irq_alloc_descs(at, at, 1, node)
+
+#define irq_alloc_desc_from(from, node) \
+ irq_alloc_descs(-1, from, 1, node)
+
+#define irq_alloc_descs_from(from, cnt, node) \
+ irq_alloc_descs(-1, from, cnt, node)
+
+void irq_free_descs(unsigned int irq, unsigned int cnt);
+static inline void irq_free_desc(unsigned int irq)
+{
+ irq_free_descs(irq, 1);
+}
+
+#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
+unsigned int irq_alloc_hwirqs(int cnt, int node);
+static inline unsigned int irq_alloc_hwirq(int node)
+{
+ return irq_alloc_hwirqs(1, node);
+}
+void irq_free_hwirqs(unsigned int from, int cnt);
+static inline void irq_free_hwirq(unsigned int irq)
+{
+ return irq_free_hwirqs(irq, 1);
+}
+int arch_setup_hwirq(unsigned int irq, int node);
+void arch_teardown_hwirq(unsigned int irq);
+#endif
+
+#ifdef CONFIG_GENERIC_IRQ_LEGACY
+void irq_init_desc(unsigned int irq);
+#endif
+
+/**
+ * struct irq_chip_regs - register offsets for struct irq_gci
+ * @enable: Enable register offset to reg_base
+ * @disable: Disable register offset to reg_base
+ * @mask: Mask register offset to reg_base
+ * @ack: Ack register offset to reg_base
+ * @eoi: Eoi register offset to reg_base
+ * @type: Type configuration register offset to reg_base
+ * @polarity: Polarity configuration register offset to reg_base
+ */
+struct irq_chip_regs {
+ unsigned long enable;
+ unsigned long disable;
+ unsigned long mask;
+ unsigned long ack;
+ unsigned long eoi;
+ unsigned long type;
+ unsigned long polarity;
+};
+
+/**
+ * struct irq_chip_type - Generic interrupt chip instance for a flow type
+ * @chip: The real interrupt chip which provides the callbacks
+ * @regs: Register offsets for this chip
+ * @handler: Flow handler associated with this chip
+ * @type: Chip can handle these flow types
+ * @mask_cache_priv: Cached mask register private to the chip type
+ * @mask_cache: Pointer to cached mask register
+ *
+ * A irq_generic_chip can have several instances of irq_chip_type when
+ * it requires different functions and register offsets for different
+ * flow types.
+ */
+struct irq_chip_type {
+ struct irq_chip chip;
+ struct irq_chip_regs regs;
+ irq_flow_handler_t handler;
+ u32 type;
+ u32 mask_cache_priv;
+ u32 *mask_cache;
+};
+
+/**
+ * struct irq_chip_generic - Generic irq chip data structure
+ * @lock: Lock to protect register and cache data access
+ * @reg_base: Register base address (virtual)
+ * @reg_readl: Alternate I/O accessor (defaults to readl if NULL)
+ * @reg_writel: Alternate I/O accessor (defaults to writel if NULL)
+ * @irq_base: Interrupt base nr for this chip
+ * @irq_cnt: Number of interrupts handled by this chip
+ * @mask_cache: Cached mask register shared between all chip types
+ * @type_cache: Cached type register
+ * @polarity_cache: Cached polarity register
+ * @wake_enabled: Interrupt can wakeup from suspend
+ * @wake_active: Interrupt is marked as an wakeup from suspend source
+ * @num_ct: Number of available irq_chip_type instances (usually 1)
+ * @private: Private data for non generic chip callbacks
+ * @installed: bitfield to denote installed interrupts
+ * @unused: bitfield to denote unused interrupts
+ * @domain: irq domain pointer
+ * @list: List head for keeping track of instances
+ * @chip_types: Array of interrupt irq_chip_types
+ *
+ * Note, that irq_chip_generic can have multiple irq_chip_type
+ * implementations which can be associated to a particular irq line of
+ * an irq_chip_generic instance. That allows to share and protect
+ * state in an irq_chip_generic instance when we need to implement
+ * different flow mechanisms (level/edge) for it.
+ */
+struct irq_chip_generic {
+ raw_spinlock_t lock;
+ void __iomem *reg_base;
+ u32 (*reg_readl)(void __iomem *addr);
+ void (*reg_writel)(u32 val, void __iomem *addr);
+ unsigned int irq_base;
+ unsigned int irq_cnt;
+ u32 mask_cache;
+ u32 type_cache;
+ u32 polarity_cache;
+ u32 wake_enabled;
+ u32 wake_active;
+ unsigned int num_ct;
+ void *private;
+ unsigned long installed;
+ unsigned long unused;
+ struct irq_domain *domain;
+ struct list_head list;
+ struct irq_chip_type chip_types[0];
+};
+
+/**
+ * enum irq_gc_flags - Initialization flags for generic irq chips
+ * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg
+ * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for
+ * irq chips which need to call irq_set_wake() on
+ * the parent irq. Usually GPIO implementations
+ * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private
+ * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask
+ * @IRQ_GC_BE_IO: Use big-endian register accesses (default: LE)
+ */
+enum irq_gc_flags {
+ IRQ_GC_INIT_MASK_CACHE = 1 << 0,
+ IRQ_GC_INIT_NESTED_LOCK = 1 << 1,
+ IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2,
+ IRQ_GC_NO_MASK = 1 << 3,
+ IRQ_GC_BE_IO = 1 << 4,
+};
+
+/*
+ * struct irq_domain_chip_generic - Generic irq chip data structure for irq domains
+ * @irqs_per_chip: Number of interrupts per chip
+ * @num_chips: Number of chips
+ * @irq_flags_to_set: IRQ* flags to set on irq setup
+ * @irq_flags_to_clear: IRQ* flags to clear on irq setup
+ * @gc_flags: Generic chip specific setup flags
+ * @gc: Array of pointers to generic interrupt chips
+ */
+struct irq_domain_chip_generic {
+ unsigned int irqs_per_chip;
+ unsigned int num_chips;
+ unsigned int irq_flags_to_clear;
+ unsigned int irq_flags_to_set;
+ enum irq_gc_flags gc_flags;
+ struct irq_chip_generic *gc[0];
+};
+
+/* Generic chip callback functions */
+void irq_gc_noop(struct irq_data *d);
+void irq_gc_mask_disable_reg(struct irq_data *d);
+void irq_gc_mask_set_bit(struct irq_data *d);
+void irq_gc_mask_clr_bit(struct irq_data *d);
+void irq_gc_unmask_enable_reg(struct irq_data *d);
+void irq_gc_ack_set_bit(struct irq_data *d);
+void irq_gc_ack_clr_bit(struct irq_data *d);
+void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
+void irq_gc_eoi(struct irq_data *d);
+int irq_gc_set_wake(struct irq_data *d, unsigned int on);
+
+/* Setup functions for irq_chip_generic */
+int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw_irq);
+struct irq_chip_generic *
+irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base,
+ void __iomem *reg_base, irq_flow_handler_t handler);
+void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
+ enum irq_gc_flags flags, unsigned int clr,
+ unsigned int set);
+int irq_setup_alt_chip(struct irq_data *d, unsigned int type);
+void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
+ unsigned int clr, unsigned int set);
+
+struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq);
+int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
+ int num_ct, const char *name,
+ irq_flow_handler_t handler,
+ unsigned int clr, unsigned int set,
+ enum irq_gc_flags flags);
+
+
+static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
+{
+ return container_of(d->chip, struct irq_chip_type, chip);
+}
+
+#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
+
+#ifdef CONFIG_SMP
+static inline void irq_gc_lock(struct irq_chip_generic *gc)
+{
+ raw_spin_lock(&gc->lock);
+}
+
+static inline void irq_gc_unlock(struct irq_chip_generic *gc)
+{
+ raw_spin_unlock(&gc->lock);
+}
+#else
+static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
+static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
+#endif
+
+static inline void irq_reg_writel(struct irq_chip_generic *gc,
+ u32 val, int reg_offset)
+{
+ if (gc->reg_writel)
+ gc->reg_writel(val, gc->reg_base + reg_offset);
+ else
+ writel(val, gc->reg_base + reg_offset);
+}
+
+static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
+ int reg_offset)
+{
+ if (gc->reg_readl)
+ return gc->reg_readl(gc->reg_base + reg_offset);
+ else
+ return readl(gc->reg_base + reg_offset);
+}
+
+#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irq_cpustat.h b/include/linux/irq_cpustat.h
new file mode 100644
index 000000000..77e4bac29
--- /dev/null
+++ b/include/linux/irq_cpustat.h
@@ -0,0 +1,31 @@
+#ifndef __irq_cpustat_h
+#define __irq_cpustat_h
+
+/*
+ * Contains default mappings for irq_cpustat_t, used by almost every
+ * architecture. Some arch (like s390) have per cpu hardware pages and
+ * they define their own mappings for irq_stat.
+ *
+ * Keith Owens <kaos@ocs.com.au> July 2000.
+ */
+
+
+/*
+ * Simple wrappers reducing source bloat. Define all irq_stat fields
+ * here, even ones that are arch dependent. That way we get common
+ * definitions instead of differing sets for each arch.
+ */
+
+#ifndef __ARCH_IRQ_STAT
+extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */
+#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
+#endif
+
+ /* arch independent irq_stat fields */
+#define local_softirq_pending() \
+ __IRQ_STAT(smp_processor_id(), __softirq_pending)
+
+ /* arch dependent irq_stat fields */
+#define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386 */
+
+#endif /* __irq_cpustat_h */
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
new file mode 100644
index 000000000..47b9ebd4a
--- /dev/null
+++ b/include/linux/irq_work.h
@@ -0,0 +1,54 @@
+#ifndef _LINUX_IRQ_WORK_H
+#define _LINUX_IRQ_WORK_H
+
+#include <linux/llist.h>
+
+/*
+ * An entry can be in one of four states:
+ *
+ * free NULL, 0 -> {claimed} : free to be used
+ * claimed NULL, 3 -> {pending} : claimed to be enqueued
+ * pending next, 3 -> {busy} : queued, pending callback
+ * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
+ */
+
+#define IRQ_WORK_PENDING 1UL
+#define IRQ_WORK_BUSY 2UL
+#define IRQ_WORK_FLAGS 3UL
+#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
+
+struct irq_work {
+ unsigned long flags;
+ struct llist_node llnode;
+ void (*func)(struct irq_work *);
+};
+
+static inline
+void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
+{
+ work->flags = 0;
+ work->func = func;
+}
+
+#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
+
+bool irq_work_queue(struct irq_work *work);
+
+#ifdef CONFIG_SMP
+bool irq_work_queue_on(struct irq_work *work, int cpu);
+#endif
+
+void irq_work_tick(void);
+void irq_work_sync(struct irq_work *work);
+
+#ifdef CONFIG_IRQ_WORK
+#include <asm/irq_work.h>
+
+void irq_work_run(void);
+bool irq_work_needs_cpu(void);
+#else
+static inline bool irq_work_needs_cpu(void) { return false; }
+static inline void irq_work_run(void) { }
+#endif
+
+#endif /* _LINUX_IRQ_WORK_H */
diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h
new file mode 100644
index 000000000..14d79131f
--- /dev/null
+++ b/include/linux/irqchip.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2012 Thomas Petazzoni
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _LINUX_IRQCHIP_H
+#define _LINUX_IRQCHIP_H
+
+#ifdef CONFIG_IRQCHIP
+void irqchip_init(void);
+#else
+static inline void irqchip_init(void) {}
+#endif
+
+#endif
diff --git a/include/linux/irqchip/arm-gic-acpi.h b/include/linux/irqchip/arm-gic-acpi.h
new file mode 100644
index 000000000..de3419ed3
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-acpi.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2014, Linaro Ltd.
+ * Author: Tomasz Nowicki <tomasz.nowicki@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ARM_GIC_ACPI_H_
+#define ARM_GIC_ACPI_H_
+
+#ifdef CONFIG_ACPI
+
+/*
+ * Hard code here, we can not get memory size from MADT (but FDT does),
+ * Actually no need to do that, because this size can be inferred
+ * from GIC spec.
+ */
+#define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K)
+#define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K)
+
+struct acpi_table_header;
+
+int gic_v2_acpi_init(struct acpi_table_header *table);
+void acpi_gic_init(void);
+#else
+static inline void acpi_gic_init(void) { }
+#endif
+
+#endif /* ARM_GIC_ACPI_H_ */
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
new file mode 100644
index 000000000..ffbc034c8
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -0,0 +1,394 @@
+/*
+ * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H
+#define __LINUX_IRQCHIP_ARM_GIC_V3_H
+
+#include <asm/sysreg.h>
+
+/*
+ * Distributor registers. We assume we're running non-secure, with ARE
+ * being set. Secure-only and non-ARE registers are not described.
+ */
+#define GICD_CTLR 0x0000
+#define GICD_TYPER 0x0004
+#define GICD_IIDR 0x0008
+#define GICD_STATUSR 0x0010
+#define GICD_SETSPI_NSR 0x0040
+#define GICD_CLRSPI_NSR 0x0048
+#define GICD_SETSPI_SR 0x0050
+#define GICD_CLRSPI_SR 0x0058
+#define GICD_SEIR 0x0068
+#define GICD_IGROUPR 0x0080
+#define GICD_ISENABLER 0x0100
+#define GICD_ICENABLER 0x0180
+#define GICD_ISPENDR 0x0200
+#define GICD_ICPENDR 0x0280
+#define GICD_ISACTIVER 0x0300
+#define GICD_ICACTIVER 0x0380
+#define GICD_IPRIORITYR 0x0400
+#define GICD_ICFGR 0x0C00
+#define GICD_IGRPMODR 0x0D00
+#define GICD_NSACR 0x0E00
+#define GICD_IROUTER 0x6000
+#define GICD_IDREGS 0xFFD0
+#define GICD_PIDR2 0xFFE8
+
+/*
+ * Those registers are actually from GICv2, but the spec demands that they
+ * are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3).
+ */
+#define GICD_ITARGETSR 0x0800
+#define GICD_SGIR 0x0F00
+#define GICD_CPENDSGIR 0x0F10
+#define GICD_SPENDSGIR 0x0F20
+
+#define GICD_CTLR_RWP (1U << 31)
+#define GICD_CTLR_DS (1U << 6)
+#define GICD_CTLR_ARE_NS (1U << 4)
+#define GICD_CTLR_ENABLE_G1A (1U << 1)
+#define GICD_CTLR_ENABLE_G1 (1U << 0)
+
+/*
+ * In systems with a single security state (what we emulate in KVM)
+ * the meaning of the interrupt group enable bits is slightly different
+ */
+#define GICD_CTLR_ENABLE_SS_G1 (1U << 1)
+#define GICD_CTLR_ENABLE_SS_G0 (1U << 0)
+
+#define GICD_TYPER_LPIS (1U << 17)
+#define GICD_TYPER_MBIS (1U << 16)
+
+#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
+#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
+#define GICD_TYPER_LPIS (1U << 17)
+
+#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
+#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
+
+#define GIC_PIDR2_ARCH_MASK 0xf0
+#define GIC_PIDR2_ARCH_GICv3 0x30
+#define GIC_PIDR2_ARCH_GICv4 0x40
+
+#define GIC_V3_DIST_SIZE 0x10000
+
+/*
+ * Re-Distributor registers, offsets from RD_base
+ */
+#define GICR_CTLR GICD_CTLR
+#define GICR_IIDR 0x0004
+#define GICR_TYPER 0x0008
+#define GICR_STATUSR GICD_STATUSR
+#define GICR_WAKER 0x0014
+#define GICR_SETLPIR 0x0040
+#define GICR_CLRLPIR 0x0048
+#define GICR_SEIR GICD_SEIR
+#define GICR_PROPBASER 0x0070
+#define GICR_PENDBASER 0x0078
+#define GICR_INVLPIR 0x00A0
+#define GICR_INVALLR 0x00B0
+#define GICR_SYNCR 0x00C0
+#define GICR_MOVLPIR 0x0100
+#define GICR_MOVALLR 0x0110
+#define GICR_IDREGS GICD_IDREGS
+#define GICR_PIDR2 GICD_PIDR2
+
+#define GICR_CTLR_ENABLE_LPIS (1UL << 0)
+
+#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff)
+
+#define GICR_WAKER_ProcessorSleep (1U << 1)
+#define GICR_WAKER_ChildrenAsleep (1U << 2)
+
+#define GICR_PROPBASER_NonShareable (0U << 10)
+#define GICR_PROPBASER_InnerShareable (1U << 10)
+#define GICR_PROPBASER_OuterShareable (2U << 10)
+#define GICR_PROPBASER_SHAREABILITY_MASK (3UL << 10)
+#define GICR_PROPBASER_nCnB (0U << 7)
+#define GICR_PROPBASER_nC (1U << 7)
+#define GICR_PROPBASER_RaWt (2U << 7)
+#define GICR_PROPBASER_RaWb (3U << 7)
+#define GICR_PROPBASER_WaWt (4U << 7)
+#define GICR_PROPBASER_WaWb (5U << 7)
+#define GICR_PROPBASER_RaWaWt (6U << 7)
+#define GICR_PROPBASER_RaWaWb (7U << 7)
+#define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7)
+#define GICR_PROPBASER_IDBITS_MASK (0x1f)
+
+#define GICR_PENDBASER_NonShareable (0U << 10)
+#define GICR_PENDBASER_InnerShareable (1U << 10)
+#define GICR_PENDBASER_OuterShareable (2U << 10)
+#define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10)
+#define GICR_PENDBASER_nCnB (0U << 7)
+#define GICR_PENDBASER_nC (1U << 7)
+#define GICR_PENDBASER_RaWt (2U << 7)
+#define GICR_PENDBASER_RaWb (3U << 7)
+#define GICR_PENDBASER_WaWt (4U << 7)
+#define GICR_PENDBASER_WaWb (5U << 7)
+#define GICR_PENDBASER_RaWaWt (6U << 7)
+#define GICR_PENDBASER_RaWaWb (7U << 7)
+#define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7)
+
+/*
+ * Re-Distributor registers, offsets from SGI_base
+ */
+#define GICR_IGROUPR0 GICD_IGROUPR
+#define GICR_ISENABLER0 GICD_ISENABLER
+#define GICR_ICENABLER0 GICD_ICENABLER
+#define GICR_ISPENDR0 GICD_ISPENDR
+#define GICR_ICPENDR0 GICD_ICPENDR
+#define GICR_ISACTIVER0 GICD_ISACTIVER
+#define GICR_ICACTIVER0 GICD_ICACTIVER
+#define GICR_IPRIORITYR0 GICD_IPRIORITYR
+#define GICR_ICFGR0 GICD_ICFGR
+#define GICR_IGRPMODR0 GICD_IGRPMODR
+#define GICR_NSACR GICD_NSACR
+
+#define GICR_TYPER_PLPIS (1U << 0)
+#define GICR_TYPER_VLPIS (1U << 1)
+#define GICR_TYPER_LAST (1U << 4)
+
+#define GIC_V3_REDIST_SIZE 0x20000
+
+#define LPI_PROP_GROUP1 (1 << 1)
+#define LPI_PROP_ENABLED (1 << 0)
+
+/*
+ * ITS registers, offsets from ITS_base
+ */
+#define GITS_CTLR 0x0000
+#define GITS_IIDR 0x0004
+#define GITS_TYPER 0x0008
+#define GITS_CBASER 0x0080
+#define GITS_CWRITER 0x0088
+#define GITS_CREADR 0x0090
+#define GITS_BASER 0x0100
+#define GITS_PIDR2 GICR_PIDR2
+
+#define GITS_TRANSLATER 0x10040
+
+#define GITS_CTLR_ENABLE (1U << 0)
+#define GITS_CTLR_QUIESCENT (1U << 31)
+
+#define GITS_TYPER_DEVBITS_SHIFT 13
+#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
+#define GITS_TYPER_PTA (1UL << 19)
+
+#define GITS_CBASER_VALID (1UL << 63)
+#define GITS_CBASER_nCnB (0UL << 59)
+#define GITS_CBASER_nC (1UL << 59)
+#define GITS_CBASER_RaWt (2UL << 59)
+#define GITS_CBASER_RaWb (3UL << 59)
+#define GITS_CBASER_WaWt (4UL << 59)
+#define GITS_CBASER_WaWb (5UL << 59)
+#define GITS_CBASER_RaWaWt (6UL << 59)
+#define GITS_CBASER_RaWaWb (7UL << 59)
+#define GITS_CBASER_CACHEABILITY_MASK (7UL << 59)
+#define GITS_CBASER_NonShareable (0UL << 10)
+#define GITS_CBASER_InnerShareable (1UL << 10)
+#define GITS_CBASER_OuterShareable (2UL << 10)
+#define GITS_CBASER_SHAREABILITY_MASK (3UL << 10)
+
+#define GITS_BASER_NR_REGS 8
+
+#define GITS_BASER_VALID (1UL << 63)
+#define GITS_BASER_nCnB (0UL << 59)
+#define GITS_BASER_nC (1UL << 59)
+#define GITS_BASER_RaWt (2UL << 59)
+#define GITS_BASER_RaWb (3UL << 59)
+#define GITS_BASER_WaWt (4UL << 59)
+#define GITS_BASER_WaWb (5UL << 59)
+#define GITS_BASER_RaWaWt (6UL << 59)
+#define GITS_BASER_RaWaWb (7UL << 59)
+#define GITS_BASER_CACHEABILITY_MASK (7UL << 59)
+#define GITS_BASER_TYPE_SHIFT (56)
+#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
+#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
+#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
+#define GITS_BASER_NonShareable (0UL << 10)
+#define GITS_BASER_InnerShareable (1UL << 10)
+#define GITS_BASER_OuterShareable (2UL << 10)
+#define GITS_BASER_SHAREABILITY_SHIFT (10)
+#define GITS_BASER_SHAREABILITY_MASK (3UL << GITS_BASER_SHAREABILITY_SHIFT)
+#define GITS_BASER_PAGE_SIZE_SHIFT (8)
+#define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT)
+
+#define GITS_BASER_TYPE_NONE 0
+#define GITS_BASER_TYPE_DEVICE 1
+#define GITS_BASER_TYPE_VCPU 2
+#define GITS_BASER_TYPE_CPU 3
+#define GITS_BASER_TYPE_COLLECTION 4
+#define GITS_BASER_TYPE_RESERVED5 5
+#define GITS_BASER_TYPE_RESERVED6 6
+#define GITS_BASER_TYPE_RESERVED7 7
+
+/*
+ * ITS commands
+ */
+#define GITS_CMD_MAPD 0x08
+#define GITS_CMD_MAPC 0x09
+#define GITS_CMD_MAPVI 0x0a
+#define GITS_CMD_MOVI 0x01
+#define GITS_CMD_DISCARD 0x0f
+#define GITS_CMD_INV 0x0c
+#define GITS_CMD_MOVALL 0x0e
+#define GITS_CMD_INVALL 0x0d
+#define GITS_CMD_INT 0x03
+#define GITS_CMD_CLEAR 0x04
+#define GITS_CMD_SYNC 0x05
+
+/*
+ * CPU interface registers
+ */
+#define ICC_CTLR_EL1_EOImode_drop_dir (0U << 1)
+#define ICC_CTLR_EL1_EOImode_drop (1U << 1)
+#define ICC_SRE_EL1_SRE (1U << 0)
+
+/*
+ * Hypervisor interface registers (SRE only)
+ */
+#define ICH_LR_VIRTUAL_ID_MASK ((1UL << 32) - 1)
+
+#define ICH_LR_EOI (1UL << 41)
+#define ICH_LR_GROUP (1UL << 60)
+#define ICH_LR_STATE (3UL << 62)
+#define ICH_LR_PENDING_BIT (1UL << 62)
+#define ICH_LR_ACTIVE_BIT (1UL << 63)
+
+#define ICH_MISR_EOI (1 << 0)
+#define ICH_MISR_U (1 << 1)
+
+#define ICH_HCR_EN (1 << 0)
+#define ICH_HCR_UIE (1 << 1)
+
+#define ICH_VMCR_CTLR_SHIFT 0
+#define ICH_VMCR_CTLR_MASK (0x21f << ICH_VMCR_CTLR_SHIFT)
+#define ICH_VMCR_BPR1_SHIFT 18
+#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT)
+#define ICH_VMCR_BPR0_SHIFT 21
+#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT)
+#define ICH_VMCR_PMR_SHIFT 24
+#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
+
+#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
+#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
+#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
+#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
+#define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
+#define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
+#define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
+
+#define ICC_IAR1_EL1_SPURIOUS 0x3ff
+
+#define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
+
+#define ICC_SRE_EL2_SRE (1 << 0)
+#define ICC_SRE_EL2_ENABLE (1 << 3)
+
+#define ICC_SGI1R_TARGET_LIST_SHIFT 0
+#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT)
+#define ICC_SGI1R_AFFINITY_1_SHIFT 16
+#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
+#define ICC_SGI1R_SGI_ID_SHIFT 24
+#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT)
+#define ICC_SGI1R_AFFINITY_2_SHIFT 32
+#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
+#define ICC_SGI1R_AFFINITY_3_SHIFT 48
+#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+
+/*
+ * System register definitions
+ */
+#define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
+#define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
+#define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
+#define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
+#define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
+#define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5)
+#define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
+
+#define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
+#define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x)
+
+#define ICH_LR0_EL2 __LR0_EL2(0)
+#define ICH_LR1_EL2 __LR0_EL2(1)
+#define ICH_LR2_EL2 __LR0_EL2(2)
+#define ICH_LR3_EL2 __LR0_EL2(3)
+#define ICH_LR4_EL2 __LR0_EL2(4)
+#define ICH_LR5_EL2 __LR0_EL2(5)
+#define ICH_LR6_EL2 __LR0_EL2(6)
+#define ICH_LR7_EL2 __LR0_EL2(7)
+#define ICH_LR8_EL2 __LR8_EL2(0)
+#define ICH_LR9_EL2 __LR8_EL2(1)
+#define ICH_LR10_EL2 __LR8_EL2(2)
+#define ICH_LR11_EL2 __LR8_EL2(3)
+#define ICH_LR12_EL2 __LR8_EL2(4)
+#define ICH_LR13_EL2 __LR8_EL2(5)
+#define ICH_LR14_EL2 __LR8_EL2(6)
+#define ICH_LR15_EL2 __LR8_EL2(7)
+
+#define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
+#define ICH_AP0R0_EL2 __AP0Rx_EL2(0)
+#define ICH_AP0R1_EL2 __AP0Rx_EL2(1)
+#define ICH_AP0R2_EL2 __AP0Rx_EL2(2)
+#define ICH_AP0R3_EL2 __AP0Rx_EL2(3)
+
+#define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x)
+#define ICH_AP1R0_EL2 __AP1Rx_EL2(0)
+#define ICH_AP1R1_EL2 __AP1Rx_EL2(1)
+#define ICH_AP1R2_EL2 __AP1Rx_EL2(2)
+#define ICH_AP1R3_EL2 __AP1Rx_EL2(3)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/stringify.h>
+
+/*
+ * We need a value to serve as a irq-type for LPIs. Choose one that will
+ * hopefully pique the interest of the reviewer.
+ */
+#define GIC_IRQ_TYPE_LPI 0xa110c8ed
+
+struct rdists {
+ struct {
+ void __iomem *rd_base;
+ struct page *pend_page;
+ phys_addr_t phys_base;
+ } __percpu *rdist;
+ struct page *prop_page;
+ int id_bits;
+ u64 flags;
+};
+
+static inline void gic_write_eoir(u64 irq)
+{
+ asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
+ isb();
+}
+
+struct irq_domain;
+int its_cpu_init(void);
+int its_init(struct device_node *node, struct rdists *rdists,
+ struct irq_domain *domain);
+
+#endif
+
+#endif
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
new file mode 100644
index 000000000..9de976b4f
--- /dev/null
+++ b/include/linux/irqchip/arm-gic.h
@@ -0,0 +1,118 @@
+/*
+ * include/linux/irqchip/arm-gic.h
+ *
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_GIC_H
+#define __LINUX_IRQCHIP_ARM_GIC_H
+
+#define GIC_CPU_CTRL 0x00
+#define GIC_CPU_PRIMASK 0x04
+#define GIC_CPU_BINPOINT 0x08
+#define GIC_CPU_INTACK 0x0c
+#define GIC_CPU_EOI 0x10
+#define GIC_CPU_RUNNINGPRI 0x14
+#define GIC_CPU_HIGHPRI 0x18
+#define GIC_CPU_ALIAS_BINPOINT 0x1c
+#define GIC_CPU_ACTIVEPRIO 0xd0
+#define GIC_CPU_IDENT 0xfc
+
+#define GICC_ENABLE 0x1
+#define GICC_INT_PRI_THRESHOLD 0xf0
+#define GICC_IAR_INT_ID_MASK 0x3ff
+#define GICC_INT_SPURIOUS 1023
+#define GICC_DIS_BYPASS_MASK 0x1e0
+
+#define GIC_DIST_CTRL 0x000
+#define GIC_DIST_CTR 0x004
+#define GIC_DIST_IGROUP 0x080
+#define GIC_DIST_ENABLE_SET 0x100
+#define GIC_DIST_ENABLE_CLEAR 0x180
+#define GIC_DIST_PENDING_SET 0x200
+#define GIC_DIST_PENDING_CLEAR 0x280
+#define GIC_DIST_ACTIVE_SET 0x300
+#define GIC_DIST_ACTIVE_CLEAR 0x380
+#define GIC_DIST_PRI 0x400
+#define GIC_DIST_TARGET 0x800
+#define GIC_DIST_CONFIG 0xc00
+#define GIC_DIST_SOFTINT 0xf00
+#define GIC_DIST_SGI_PENDING_CLEAR 0xf10
+#define GIC_DIST_SGI_PENDING_SET 0xf20
+
+#define GICD_ENABLE 0x1
+#define GICD_DISABLE 0x0
+#define GICD_INT_ACTLOW_LVLTRIG 0x0
+#define GICD_INT_EN_CLR_X32 0xffffffff
+#define GICD_INT_EN_SET_SGI 0x0000ffff
+#define GICD_INT_EN_CLR_PPI 0xffff0000
+#define GICD_INT_DEF_PRI 0xa0
+#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
+ (GICD_INT_DEF_PRI << 16) |\
+ (GICD_INT_DEF_PRI << 8) |\
+ GICD_INT_DEF_PRI)
+
+#define GICH_HCR 0x0
+#define GICH_VTR 0x4
+#define GICH_VMCR 0x8
+#define GICH_MISR 0x10
+#define GICH_EISR0 0x20
+#define GICH_EISR1 0x24
+#define GICH_ELRSR0 0x30
+#define GICH_ELRSR1 0x34
+#define GICH_APR 0xf0
+#define GICH_LR0 0x100
+
+#define GICH_HCR_EN (1 << 0)
+#define GICH_HCR_UIE (1 << 1)
+
+#define GICH_LR_VIRTUALID (0x3ff << 0)
+#define GICH_LR_PHYSID_CPUID_SHIFT (10)
+#define GICH_LR_PHYSID_CPUID (7 << GICH_LR_PHYSID_CPUID_SHIFT)
+#define GICH_LR_STATE (3 << 28)
+#define GICH_LR_PENDING_BIT (1 << 28)
+#define GICH_LR_ACTIVE_BIT (1 << 29)
+#define GICH_LR_EOI (1 << 19)
+
+#define GICH_VMCR_CTRL_SHIFT 0
+#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT)
+#define GICH_VMCR_PRIMASK_SHIFT 27
+#define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT)
+#define GICH_VMCR_BINPOINT_SHIFT 21
+#define GICH_VMCR_BINPOINT_MASK (0x7 << GICH_VMCR_BINPOINT_SHIFT)
+#define GICH_VMCR_ALIAS_BINPOINT_SHIFT 18
+#define GICH_VMCR_ALIAS_BINPOINT_MASK (0x7 << GICH_VMCR_ALIAS_BINPOINT_SHIFT)
+
+#define GICH_MISR_EOI (1 << 0)
+#define GICH_MISR_U (1 << 1)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/irqdomain.h>
+
+struct device_node;
+
+void gic_set_irqchip_flags(unsigned long flags);
+void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
+ u32 offset, struct device_node *);
+void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
+void gic_cpu_if_down(void);
+
+static inline void gic_init(unsigned int nr, int start,
+ void __iomem *dist , void __iomem *cpu)
+{
+ gic_init_bases(nr, start, dist, cpu, 0, NULL);
+}
+
+int gicv2m_of_init(struct device_node *node, struct irq_domain *parent);
+
+void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
+int gic_get_cpu_id(unsigned int cpu);
+void gic_migrate_target(unsigned int new_cpu_id);
+unsigned long gic_get_sgir_physaddr(void);
+
+#endif /* __ASSEMBLY */
+#endif
diff --git a/include/linux/irqchip/arm-vic.h b/include/linux/irqchip/arm-vic.h
new file mode 100644
index 000000000..ba46c794b
--- /dev/null
+++ b/include/linux/irqchip/arm-vic.h
@@ -0,0 +1,38 @@
+/*
+ * arch/arm/include/asm/hardware/vic.h
+ *
+ * Copyright (c) ARM Limited 2003. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __ASM_ARM_HARDWARE_VIC_H
+#define __ASM_ARM_HARDWARE_VIC_H
+
+#include <linux/types.h>
+
+#define VIC_RAW_STATUS 0x08
+#define VIC_INT_ENABLE 0x10 /* 1 = enable, 0 = disable */
+#define VIC_INT_ENABLE_CLEAR 0x14
+
+struct device_node;
+struct pt_regs;
+
+void __vic_init(void __iomem *base, int parent_irq, int irq_start,
+ u32 vic_sources, u32 resume_sources, struct device_node *node);
+void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources);
+int vic_init_cascaded(void __iomem *base, unsigned int parent_irq,
+ u32 vic_sources, u32 resume_sources);
+
+#endif
diff --git a/include/linux/irqchip/chained_irq.h b/include/linux/irqchip/chained_irq.h
new file mode 100644
index 000000000..adf4c30f3
--- /dev/null
+++ b/include/linux/irqchip/chained_irq.h
@@ -0,0 +1,52 @@
+/*
+ * Chained IRQ handlers support.
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __IRQCHIP_CHAINED_IRQ_H
+#define __IRQCHIP_CHAINED_IRQ_H
+
+#include <linux/irq.h>
+
+/*
+ * Entry/exit functions for chained handlers where the primary IRQ chip
+ * may implement either fasteoi or level-trigger flow control.
+ */
+static inline void chained_irq_enter(struct irq_chip *chip,
+ struct irq_desc *desc)
+{
+ /* FastEOI controllers require no action on entry. */
+ if (chip->irq_eoi)
+ return;
+
+ if (chip->irq_mask_ack) {
+ chip->irq_mask_ack(&desc->irq_data);
+ } else {
+ chip->irq_mask(&desc->irq_data);
+ if (chip->irq_ack)
+ chip->irq_ack(&desc->irq_data);
+ }
+}
+
+static inline void chained_irq_exit(struct irq_chip *chip,
+ struct irq_desc *desc)
+{
+ if (chip->irq_eoi)
+ chip->irq_eoi(&desc->irq_data);
+ else
+ chip->irq_unmask(&desc->irq_data);
+}
+
+#endif /* __IRQCHIP_CHAINED_IRQ_H */
diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h
new file mode 100644
index 000000000..2e3d1afeb
--- /dev/null
+++ b/include/linux/irqchip/irq-omap-intc.h
@@ -0,0 +1,30 @@
+/**
+ * irq-omap-intc.h - INTC Idle Functions
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Felipe Balbi <balbi@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
+#define __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
+
+void omap3_init_irq(void);
+
+int omap_irq_pending(void);
+void omap_intc_save_context(void);
+void omap_intc_restore_context(void);
+void omap3_intc_suspend(void);
+void omap3_intc_prepare_idle(void);
+void omap3_intc_resume_idle(void);
+
+#endif /* __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H */
diff --git a/include/linux/irqchip/metag-ext.h b/include/linux/irqchip/metag-ext.h
new file mode 100644
index 000000000..697af0fe7
--- /dev/null
+++ b/include/linux/irqchip/metag-ext.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2012 Imagination Technologies
+ */
+
+#ifndef _LINUX_IRQCHIP_METAG_EXT_H_
+#define _LINUX_IRQCHIP_METAG_EXT_H_
+
+struct irq_data;
+struct platform_device;
+
+/* called from core irq code at init */
+int init_external_IRQ(void);
+
+/*
+ * called from SoC init_irq() callback to dynamically indicate the lack of
+ * HWMASKEXT registers.
+ */
+void meta_intc_no_mask(void);
+
+/*
+ * These allow SoCs to specialise the interrupt controller from their init_irq
+ * callbacks.
+ */
+
+extern struct irq_chip meta_intc_edge_chip;
+extern struct irq_chip meta_intc_level_chip;
+
+/* this should be called in the mask callback */
+void meta_intc_mask_irq_simple(struct irq_data *data);
+/* this should be called in the unmask callback */
+void meta_intc_unmask_irq_simple(struct irq_data *data);
+
+#endif /* _LINUX_IRQCHIP_METAG_EXT_H_ */
diff --git a/include/linux/irqchip/metag.h b/include/linux/irqchip/metag.h
new file mode 100644
index 000000000..4ebdfb310
--- /dev/null
+++ b/include/linux/irqchip/metag.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2011 Imagination Technologies
+ */
+
+#ifndef _LINUX_IRQCHIP_METAG_H_
+#define _LINUX_IRQCHIP_METAG_H_
+
+#include <linux/errno.h>
+
+#ifdef CONFIG_METAG_PERFCOUNTER_IRQS
+extern int init_internal_IRQ(void);
+extern int internal_irq_map(unsigned int hw);
+#else
+static inline int init_internal_IRQ(void)
+{
+ return 0;
+}
+static inline int internal_irq_map(unsigned int hw)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* _LINUX_IRQCHIP_METAG_H_ */
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
new file mode 100644
index 000000000..9b1ad3734
--- /dev/null
+++ b/include/linux/irqchip/mips-gic.h
@@ -0,0 +1,257 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000, 07 MIPS Technologies, Inc.
+ */
+#ifndef __LINUX_IRQCHIP_MIPS_GIC_H
+#define __LINUX_IRQCHIP_MIPS_GIC_H
+
+#include <linux/clocksource.h>
+
+#define GIC_MAX_INTRS 256
+
+/* Constants */
+#define GIC_POL_POS 1
+#define GIC_POL_NEG 0
+#define GIC_TRIG_EDGE 1
+#define GIC_TRIG_LEVEL 0
+#define GIC_TRIG_DUAL_ENABLE 1
+#define GIC_TRIG_DUAL_DISABLE 0
+
+#define MSK(n) ((1 << (n)) - 1)
+
+/* Accessors */
+#define GIC_REG(segment, offset) (segment##_##SECTION_OFS + offset##_##OFS)
+
+/* GIC Address Space */
+#define SHARED_SECTION_OFS 0x0000
+#define SHARED_SECTION_SIZE 0x8000
+#define VPE_LOCAL_SECTION_OFS 0x8000
+#define VPE_LOCAL_SECTION_SIZE 0x4000
+#define VPE_OTHER_SECTION_OFS 0xc000
+#define VPE_OTHER_SECTION_SIZE 0x4000
+#define USM_VISIBLE_SECTION_OFS 0x10000
+#define USM_VISIBLE_SECTION_SIZE 0x10000
+
+/* Register Map for Shared Section */
+
+#define GIC_SH_CONFIG_OFS 0x0000
+
+/* Shared Global Counter */
+#define GIC_SH_COUNTER_31_00_OFS 0x0010
+#define GIC_SH_COUNTER_63_32_OFS 0x0014
+#define GIC_SH_REVISIONID_OFS 0x0020
+
+/* Convert an interrupt number to a byte offset/bit for multi-word registers */
+#define GIC_INTR_OFS(intr) (((intr) / 32) * 4)
+#define GIC_INTR_BIT(intr) ((intr) % 32)
+
+/* Polarity : Reset Value is always 0 */
+#define GIC_SH_SET_POLARITY_OFS 0x0100
+
+/* Triggering : Reset Value is always 0 */
+#define GIC_SH_SET_TRIGGER_OFS 0x0180
+
+/* Dual edge triggering : Reset Value is always 0 */
+#define GIC_SH_SET_DUAL_OFS 0x0200
+
+/* Set/Clear corresponding bit in Edge Detect Register */
+#define GIC_SH_WEDGE_OFS 0x0280
+
+/* Mask manipulation */
+#define GIC_SH_RMASK_OFS 0x0300
+#define GIC_SH_SMASK_OFS 0x0380
+
+/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */
+#define GIC_SH_MASK_OFS 0x0400
+
+/* Pending Global Interrupts (RO) */
+#define GIC_SH_PEND_OFS 0x0480
+
+/* Maps Interrupt X to a Pin */
+#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500
+#define GIC_SH_MAP_TO_PIN(intr) (4 * (intr))
+
+/* Maps Interrupt X to a VPE */
+#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000
+#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \
+ ((32 * (intr)) + (((vpe) / 32) * 4))
+#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32))
+
+/* Register Map for Local Section */
+#define GIC_VPE_CTL_OFS 0x0000
+#define GIC_VPE_PEND_OFS 0x0004
+#define GIC_VPE_MASK_OFS 0x0008
+#define GIC_VPE_RMASK_OFS 0x000c
+#define GIC_VPE_SMASK_OFS 0x0010
+#define GIC_VPE_WD_MAP_OFS 0x0040
+#define GIC_VPE_COMPARE_MAP_OFS 0x0044
+#define GIC_VPE_TIMER_MAP_OFS 0x0048
+#define GIC_VPE_FDC_MAP_OFS 0x004c
+#define GIC_VPE_PERFCTR_MAP_OFS 0x0050
+#define GIC_VPE_SWINT0_MAP_OFS 0x0054
+#define GIC_VPE_SWINT1_MAP_OFS 0x0058
+#define GIC_VPE_OTHER_ADDR_OFS 0x0080
+#define GIC_VPE_WD_CONFIG0_OFS 0x0090
+#define GIC_VPE_WD_COUNT0_OFS 0x0094
+#define GIC_VPE_WD_INITIAL0_OFS 0x0098
+#define GIC_VPE_COMPARE_LO_OFS 0x00a0
+#define GIC_VPE_COMPARE_HI_OFS 0x00a4
+
+#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100
+#define GIC_VPE_EIC_SS(intr) (4 * (intr))
+
+#define GIC_VPE_EIC_VEC_BASE_OFS 0x0800
+#define GIC_VPE_EIC_VEC(intr) (4 * (intr))
+
+#define GIC_VPE_TENABLE_NMI_OFS 0x1000
+#define GIC_VPE_TENABLE_YQ_OFS 0x1004
+#define GIC_VPE_TENABLE_INT_31_0_OFS 0x1080
+#define GIC_VPE_TENABLE_INT_63_32_OFS 0x1084
+
+/* User Mode Visible Section Register Map */
+#define GIC_UMV_SH_COUNTER_31_00_OFS 0x0000
+#define GIC_UMV_SH_COUNTER_63_32_OFS 0x0004
+
+/* Masks */
+#define GIC_SH_CONFIG_COUNTSTOP_SHF 28
+#define GIC_SH_CONFIG_COUNTSTOP_MSK (MSK(1) << GIC_SH_CONFIG_COUNTSTOP_SHF)
+
+#define GIC_SH_CONFIG_COUNTBITS_SHF 24
+#define GIC_SH_CONFIG_COUNTBITS_MSK (MSK(4) << GIC_SH_CONFIG_COUNTBITS_SHF)
+
+#define GIC_SH_CONFIG_NUMINTRS_SHF 16
+#define GIC_SH_CONFIG_NUMINTRS_MSK (MSK(8) << GIC_SH_CONFIG_NUMINTRS_SHF)
+
+#define GIC_SH_CONFIG_NUMVPES_SHF 0
+#define GIC_SH_CONFIG_NUMVPES_MSK (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF)
+
+#define GIC_SH_WEDGE_SET(intr) ((intr) | (0x1 << 31))
+#define GIC_SH_WEDGE_CLR(intr) ((intr) & ~(0x1 << 31))
+
+#define GIC_MAP_TO_PIN_SHF 31
+#define GIC_MAP_TO_PIN_MSK (MSK(1) << GIC_MAP_TO_PIN_SHF)
+#define GIC_MAP_TO_NMI_SHF 30
+#define GIC_MAP_TO_NMI_MSK (MSK(1) << GIC_MAP_TO_NMI_SHF)
+#define GIC_MAP_TO_YQ_SHF 29
+#define GIC_MAP_TO_YQ_MSK (MSK(1) << GIC_MAP_TO_YQ_SHF)
+#define GIC_MAP_SHF 0
+#define GIC_MAP_MSK (MSK(6) << GIC_MAP_SHF)
+
+/* GIC_VPE_CTL Masks */
+#define GIC_VPE_CTL_FDC_RTBL_SHF 4
+#define GIC_VPE_CTL_FDC_RTBL_MSK (MSK(1) << GIC_VPE_CTL_FDC_RTBL_SHF)
+#define GIC_VPE_CTL_SWINT_RTBL_SHF 3
+#define GIC_VPE_CTL_SWINT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_SWINT_RTBL_SHF)
+#define GIC_VPE_CTL_PERFCNT_RTBL_SHF 2
+#define GIC_VPE_CTL_PERFCNT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF)
+#define GIC_VPE_CTL_TIMER_RTBL_SHF 1
+#define GIC_VPE_CTL_TIMER_RTBL_MSK (MSK(1) << GIC_VPE_CTL_TIMER_RTBL_SHF)
+#define GIC_VPE_CTL_EIC_MODE_SHF 0
+#define GIC_VPE_CTL_EIC_MODE_MSK (MSK(1) << GIC_VPE_CTL_EIC_MODE_SHF)
+
+/* GIC_VPE_PEND Masks */
+#define GIC_VPE_PEND_WD_SHF 0
+#define GIC_VPE_PEND_WD_MSK (MSK(1) << GIC_VPE_PEND_WD_SHF)
+#define GIC_VPE_PEND_CMP_SHF 1
+#define GIC_VPE_PEND_CMP_MSK (MSK(1) << GIC_VPE_PEND_CMP_SHF)
+#define GIC_VPE_PEND_TIMER_SHF 2
+#define GIC_VPE_PEND_TIMER_MSK (MSK(1) << GIC_VPE_PEND_TIMER_SHF)
+#define GIC_VPE_PEND_PERFCOUNT_SHF 3
+#define GIC_VPE_PEND_PERFCOUNT_MSK (MSK(1) << GIC_VPE_PEND_PERFCOUNT_SHF)
+#define GIC_VPE_PEND_SWINT0_SHF 4
+#define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF)
+#define GIC_VPE_PEND_SWINT1_SHF 5
+#define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF)
+#define GIC_VPE_PEND_FDC_SHF 6
+#define GIC_VPE_PEND_FDC_MSK (MSK(1) << GIC_VPE_PEND_FDC_SHF)
+
+/* GIC_VPE_RMASK Masks */
+#define GIC_VPE_RMASK_WD_SHF 0
+#define GIC_VPE_RMASK_WD_MSK (MSK(1) << GIC_VPE_RMASK_WD_SHF)
+#define GIC_VPE_RMASK_CMP_SHF 1
+#define GIC_VPE_RMASK_CMP_MSK (MSK(1) << GIC_VPE_RMASK_CMP_SHF)
+#define GIC_VPE_RMASK_TIMER_SHF 2
+#define GIC_VPE_RMASK_TIMER_MSK (MSK(1) << GIC_VPE_RMASK_TIMER_SHF)
+#define GIC_VPE_RMASK_PERFCNT_SHF 3
+#define GIC_VPE_RMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_RMASK_PERFCNT_SHF)
+#define GIC_VPE_RMASK_SWINT0_SHF 4
+#define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF)
+#define GIC_VPE_RMASK_SWINT1_SHF 5
+#define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF)
+#define GIC_VPE_RMASK_FDC_SHF 6
+#define GIC_VPE_RMASK_FDC_MSK (MSK(1) << GIC_VPE_RMASK_FDC_SHF)
+
+/* GIC_VPE_SMASK Masks */
+#define GIC_VPE_SMASK_WD_SHF 0
+#define GIC_VPE_SMASK_WD_MSK (MSK(1) << GIC_VPE_SMASK_WD_SHF)
+#define GIC_VPE_SMASK_CMP_SHF 1
+#define GIC_VPE_SMASK_CMP_MSK (MSK(1) << GIC_VPE_SMASK_CMP_SHF)
+#define GIC_VPE_SMASK_TIMER_SHF 2
+#define GIC_VPE_SMASK_TIMER_MSK (MSK(1) << GIC_VPE_SMASK_TIMER_SHF)
+#define GIC_VPE_SMASK_PERFCNT_SHF 3
+#define GIC_VPE_SMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_SMASK_PERFCNT_SHF)
+#define GIC_VPE_SMASK_SWINT0_SHF 4
+#define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF)
+#define GIC_VPE_SMASK_SWINT1_SHF 5
+#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF)
+#define GIC_VPE_SMASK_FDC_SHF 6
+#define GIC_VPE_SMASK_FDC_MSK (MSK(1) << GIC_VPE_SMASK_FDC_SHF)
+
+/* GIC nomenclature for Core Interrupt Pins. */
+#define GIC_CPU_INT0 0 /* Core Interrupt 2 */
+#define GIC_CPU_INT1 1 /* . */
+#define GIC_CPU_INT2 2 /* . */
+#define GIC_CPU_INT3 3 /* . */
+#define GIC_CPU_INT4 4 /* . */
+#define GIC_CPU_INT5 5 /* Core Interrupt 7 */
+
+/* Add 2 to convert GIC CPU pin to core interrupt */
+#define GIC_CPU_PIN_OFFSET 2
+
+/* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */
+#define GIC_CPU_TO_VEC_OFFSET 2
+
+/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
+#define GIC_PIN_TO_VEC_OFFSET 1
+
+/* Local GIC interrupts. */
+#define GIC_LOCAL_INT_WD 0 /* GIC watchdog */
+#define GIC_LOCAL_INT_COMPARE 1 /* GIC count and compare timer */
+#define GIC_LOCAL_INT_TIMER 2 /* CPU timer interrupt */
+#define GIC_LOCAL_INT_PERFCTR 3 /* CPU performance counter */
+#define GIC_LOCAL_INT_SWINT0 4 /* CPU software interrupt 0 */
+#define GIC_LOCAL_INT_SWINT1 5 /* CPU software interrupt 1 */
+#define GIC_LOCAL_INT_FDC 6 /* CPU fast debug channel */
+#define GIC_NUM_LOCAL_INTRS 7
+
+/* Convert between local/shared IRQ number and GIC HW IRQ number. */
+#define GIC_LOCAL_HWIRQ_BASE 0
+#define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x))
+#define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
+#define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS
+#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
+#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
+
+extern unsigned int gic_present;
+
+extern void gic_init(unsigned long gic_base_addr,
+ unsigned long gic_addrspace_size, unsigned int cpu_vec,
+ unsigned int irqbase);
+extern void gic_clocksource_init(unsigned int);
+extern cycle_t gic_read_count(void);
+extern unsigned int gic_get_count_width(void);
+extern cycle_t gic_read_compare(void);
+extern void gic_write_compare(cycle_t cnt);
+extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
+extern void gic_start_count(void);
+extern void gic_stop_count(void);
+extern void gic_send_ipi(unsigned int intr);
+extern unsigned int plat_ipi_call_int_xlate(unsigned int);
+extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
+extern int gic_get_c0_compare_int(void);
+extern int gic_get_c0_perfcount_int(void);
+extern int gic_get_c0_fdc_int(void);
+#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
diff --git a/include/linux/irqchip/mmp.h b/include/linux/irqchip/mmp.h
new file mode 100644
index 000000000..c78a89211
--- /dev/null
+++ b/include/linux/irqchip/mmp.h
@@ -0,0 +1,6 @@
+#ifndef __IRQCHIP_MMP_H
+#define __IRQCHIP_MMP_H
+
+extern struct irq_chip icu_irq_chip;
+
+#endif /* __IRQCHIP_MMP_H */
diff --git a/include/linux/irqchip/mxs.h b/include/linux/irqchip/mxs.h
new file mode 100644
index 000000000..9039a538a
--- /dev/null
+++ b/include/linux/irqchip/mxs.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_IRQCHIP_MXS_H
+#define __LINUX_IRQCHIP_MXS_H
+
+extern void icoll_handle_irq(struct pt_regs *);
+
+#endif
diff --git a/include/linux/irqchip/versatile-fpga.h b/include/linux/irqchip/versatile-fpga.h
new file mode 100644
index 000000000..1fac9651d
--- /dev/null
+++ b/include/linux/irqchip/versatile-fpga.h
@@ -0,0 +1,13 @@
+#ifndef PLAT_FPGA_IRQ_H
+#define PLAT_FPGA_IRQ_H
+
+struct device_node;
+struct pt_regs;
+
+void fpga_handle_irq(struct pt_regs *regs);
+void fpga_irq_init(void __iomem *, const char *, int, int, u32,
+ struct device_node *node);
+int fpga_irq_of_init(struct device_node *node,
+ struct device_node *parent);
+
+#endif
diff --git a/include/linux/irqchip/xtensa-mx.h b/include/linux/irqchip/xtensa-mx.h
new file mode 100644
index 000000000..9c3b6ecc8
--- /dev/null
+++ b/include/linux/irqchip/xtensa-mx.h
@@ -0,0 +1,17 @@
+/*
+ * Xtensa MX interrupt distributor
+ *
+ * Copyright (C) 2002 - 2013 Tensilica, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef __LINUX_IRQCHIP_XTENSA_MX_H
+#define __LINUX_IRQCHIP_XTENSA_MX_H
+
+struct device_node;
+int xtensa_mx_init_legacy(struct device_node *interrupt_parent);
+
+#endif /* __LINUX_IRQCHIP_XTENSA_MX_H */
diff --git a/include/linux/irqchip/xtensa-pic.h b/include/linux/irqchip/xtensa-pic.h
new file mode 100644
index 000000000..48718ae5a
--- /dev/null
+++ b/include/linux/irqchip/xtensa-pic.h
@@ -0,0 +1,18 @@
+/*
+ * Xtensa built-in interrupt controller
+ *
+ * Copyright (C) 2002 - 2013 Tensilica, Inc.
+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef __LINUX_IRQCHIP_XTENSA_PIC_H
+#define __LINUX_IRQCHIP_XTENSA_PIC_H
+
+struct device_node;
+int xtensa_pic_init_legacy(struct device_node *interrupt_parent);
+
+#endif /* __LINUX_IRQCHIP_XTENSA_PIC_H */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
new file mode 100644
index 000000000..dd1109fb2
--- /dev/null
+++ b/include/linux/irqdesc.h
@@ -0,0 +1,217 @@
+#ifndef _LINUX_IRQDESC_H
+#define _LINUX_IRQDESC_H
+
+/*
+ * Core internal functions to deal with irq descriptors
+ *
+ * This include will move to kernel/irq once we cleaned up the tree.
+ * For now it's included from <linux/irq.h>
+ */
+
+struct irq_affinity_notify;
+struct proc_dir_entry;
+struct module;
+struct irq_desc;
+struct irq_domain;
+struct pt_regs;
+
+/**
+ * struct irq_desc - interrupt descriptor
+ * @irq_data: per irq and chip data passed down to chip functions
+ * @kstat_irqs: irq stats per cpu
+ * @handle_irq: highlevel irq-events handler
+ * @preflow_handler: handler called before the flow handler (currently used by sparc)
+ * @action: the irq action chain
+ * @status: status information
+ * @core_internal_state__do_not_mess_with_it: core internal status information
+ * @depth: disable-depth, for nested irq_disable() calls
+ * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers
+ * @irq_count: stats field to detect stalled irqs
+ * @last_unhandled: aging timer for unhandled count
+ * @irqs_unhandled: stats field for spurious unhandled interrupts
+ * @threads_handled: stats field for deferred spurious detection of threaded handlers
+ * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
+ * @lock: locking for SMP
+ * @affinity_hint: hint to user space for preferred irq affinity
+ * @affinity_notify: context for notification of affinity changes
+ * @pending_mask: pending rebalanced interrupts
+ * @threads_oneshot: bitfield to handle shared oneshot threads
+ * @threads_active: number of irqaction threads currently running
+ * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
+ * @nr_actions: number of installed actions on this descriptor
+ * @no_suspend_depth: number of irqactions on a irq descriptor with
+ * IRQF_NO_SUSPEND set
+ * @force_resume_depth: number of irqactions on a irq descriptor with
+ * IRQF_FORCE_RESUME set
+ * @dir: /proc/irq/ procfs entry
+ * @name: flow handler name for /proc/interrupts output
+ */
+struct irq_desc {
+ struct irq_data irq_data;
+ unsigned int __percpu *kstat_irqs;
+ irq_flow_handler_t handle_irq;
+#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
+ irq_preflow_handler_t preflow_handler;
+#endif
+ struct irqaction *action; /* IRQ action list */
+ unsigned int status_use_accessors;
+ unsigned int core_internal_state__do_not_mess_with_it;
+ unsigned int depth; /* nested irq disables */
+ unsigned int wake_depth; /* nested wake enables */
+ unsigned int irq_count; /* For detecting broken IRQs */
+ unsigned long last_unhandled; /* Aging timer for unhandled count */
+ unsigned int irqs_unhandled;
+ atomic_t threads_handled;
+ int threads_handled_last;
+ raw_spinlock_t lock;
+ struct cpumask *percpu_enabled;
+#ifdef CONFIG_SMP
+ const struct cpumask *affinity_hint;
+ struct irq_affinity_notify *affinity_notify;
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ cpumask_var_t pending_mask;
+#endif
+#endif
+ unsigned long threads_oneshot;
+ atomic_t threads_active;
+ wait_queue_head_t wait_for_threads;
+#ifdef CONFIG_PM_SLEEP
+ unsigned int nr_actions;
+ unsigned int no_suspend_depth;
+ unsigned int cond_suspend_depth;
+ unsigned int force_resume_depth;
+#endif
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *dir;
+#endif
+ int parent_irq;
+ struct module *owner;
+ const char *name;
+} ____cacheline_internodealigned_in_smp;
+
+#ifndef CONFIG_SPARSE_IRQ
+extern struct irq_desc irq_desc[NR_IRQS];
+#endif
+
+static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
+{
+ return &desc->irq_data;
+}
+
+static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc)
+{
+ return desc->irq_data.chip;
+}
+
+static inline void *irq_desc_get_chip_data(struct irq_desc *desc)
+{
+ return desc->irq_data.chip_data;
+}
+
+static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
+{
+ return desc->irq_data.handler_data;
+}
+
+static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
+{
+ return desc->irq_data.msi_desc;
+}
+
+/*
+ * Architectures call this to let the generic IRQ layer
+ * handle an interrupt. If the descriptor is attached to an
+ * irqchip-style controller then we call the ->handle_irq() handler,
+ * and it calls __do_IRQ() if it's attached to an irqtype-style controller.
+ */
+static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
+{
+ desc->handle_irq(irq, desc);
+}
+
+int generic_handle_irq(unsigned int irq);
+
+#ifdef CONFIG_HANDLE_DOMAIN_IRQ
+/*
+ * Convert a HW interrupt number to a logical one using a IRQ domain,
+ * and handle the result interrupt number. Return -EINVAL if
+ * conversion failed. Providing a NULL domain indicates that the
+ * conversion has already been done.
+ */
+int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
+ bool lookup, struct pt_regs *regs);
+
+static inline int handle_domain_irq(struct irq_domain *domain,
+ unsigned int hwirq, struct pt_regs *regs)
+{
+ return __handle_domain_irq(domain, hwirq, true, regs);
+}
+#endif
+
+/* Test to see if a driver has successfully requested an irq */
+static inline int irq_has_action(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ return desc->action != NULL;
+}
+
+/* caller has locked the irq_desc and both params are valid */
+static inline void __irq_set_handler_locked(unsigned int irq,
+ irq_flow_handler_t handler)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ desc->handle_irq = handler;
+}
+
+/* caller has locked the irq_desc and both params are valid */
+static inline void
+__irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip,
+ irq_flow_handler_t handler, const char *name)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ irq_desc_get_irq_data(desc)->chip = chip;
+ desc->handle_irq = handler;
+ desc->name = name;
+}
+
+static inline int irq_balancing_disabled(unsigned int irq)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
+}
+
+static inline int irq_is_percpu(unsigned int irq)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ return desc->status_use_accessors & IRQ_PER_CPU;
+}
+
+static inline void
+irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc)
+ lockdep_set_class(&desc->lock, class);
+}
+
+#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
+static inline void
+__irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ desc->preflow_handler = handler;
+}
+#endif
+
+#endif
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
new file mode 100644
index 000000000..676d7306a
--- /dev/null
+++ b/include/linux/irqdomain.h
@@ -0,0 +1,328 @@
+/*
+ * irq_domain - IRQ translation domains
+ *
+ * Translation infrastructure between hw and linux irq numbers. This is
+ * helpful for interrupt controllers to implement mapping between hardware
+ * irq numbers and the Linux irq number space.
+ *
+ * irq_domains also have a hook for translating device tree interrupt
+ * representation into a hardware irq number that can be mapped back to a
+ * Linux irq number without any extra platform support code.
+ *
+ * Interrupt controller "domain" data structure. This could be defined as a
+ * irq domain controller. That is, it handles the mapping between hardware
+ * and virtual interrupt numbers for a given interrupt domain. The domain
+ * structure is generally created by the PIC code for a given PIC instance
+ * (though a domain can cover more than one PIC if they have a flat number
+ * model). It's the domain callbacks that are responsible for setting the
+ * irq_chip on a given irq_desc after it's been mapped.
+ *
+ * The host code and data structures are agnostic to whether or not
+ * we use an open firmware device-tree. We do have references to struct
+ * device_node in two places: in irq_find_host() to find the host matching
+ * a given interrupt controller node, and of course as an argument to its
+ * counterpart domain->ops->match() callback. However, those are treated as
+ * generic pointers by the core and the fact that it's actually a device-node
+ * pointer is purely a convention between callers and implementation. This
+ * code could thus be used on other architectures by replacing those two
+ * by some sort of arch-specific void * "token" used to identify interrupt
+ * controllers.
+ */
+
+#ifndef _LINUX_IRQDOMAIN_H
+#define _LINUX_IRQDOMAIN_H
+
+#include <linux/types.h>
+#include <linux/irqhandler.h>
+#include <linux/radix-tree.h>
+
+struct device_node;
+struct irq_domain;
+struct of_device_id;
+struct irq_chip;
+struct irq_data;
+
+/* Number of irqs reserved for a legacy isa controller */
+#define NUM_ISA_INTERRUPTS 16
+
+/**
+ * struct irq_domain_ops - Methods for irq_domain objects
+ * @match: Match an interrupt controller device node to a host, returns
+ * 1 on a match
+ * @map: Create or update a mapping between a virtual irq number and a hw
+ * irq number. This is called only once for a given mapping.
+ * @unmap: Dispose of such a mapping
+ * @xlate: Given a device tree node and interrupt specifier, decode
+ * the hardware irq number and linux irq type value.
+ *
+ * Functions below are provided by the driver and called whenever a new mapping
+ * is created or an old mapping is disposed. The driver can then proceed to
+ * whatever internal data structures management is required. It also needs
+ * to setup the irq_desc when returning from map().
+ */
+struct irq_domain_ops {
+ int (*match)(struct irq_domain *d, struct device_node *node);
+ int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
+ void (*unmap)(struct irq_domain *d, unsigned int virq);
+ int (*xlate)(struct irq_domain *d, struct device_node *node,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq, unsigned int *out_type);
+
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ /* extended V2 interfaces to support hierarchy irq_domains */
+ int (*alloc)(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs, void *arg);
+ void (*free)(struct irq_domain *d, unsigned int virq,
+ unsigned int nr_irqs);
+ void (*activate)(struct irq_domain *d, struct irq_data *irq_data);
+ void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
+#endif
+};
+
+extern struct irq_domain_ops irq_generic_chip_ops;
+
+struct irq_domain_chip_generic;
+
+/**
+ * struct irq_domain - Hardware interrupt number translation object
+ * @link: Element in global irq_domain list.
+ * @name: Name of interrupt domain
+ * @ops: pointer to irq_domain methods
+ * @host_data: private data pointer for use by owner. Not touched by irq_domain
+ * core code.
+ * @flags: host per irq_domain flags
+ *
+ * Optional elements
+ * @of_node: Pointer to device tree nodes associated with the irq_domain. Used
+ * when decoding device tree interrupt specifiers.
+ * @gc: Pointer to a list of generic chips. There is a helper function for
+ * setting up one or more generic chips for interrupt controllers
+ * drivers using the generic chip library which uses this pointer.
+ * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
+ *
+ * Revmap data, used internally by irq_domain
+ * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that
+ * support direct mapping
+ * @revmap_size: Size of the linear map table @linear_revmap[]
+ * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map
+ * @linear_revmap: Linear table of hwirq->virq reverse mappings
+ */
+struct irq_domain {
+ struct list_head link;
+ const char *name;
+ const struct irq_domain_ops *ops;
+ void *host_data;
+ unsigned int flags;
+
+ /* Optional data */
+ struct device_node *of_node;
+ struct irq_domain_chip_generic *gc;
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ struct irq_domain *parent;
+#endif
+
+ /* reverse map data. The linear map gets appended to the irq_domain */
+ irq_hw_number_t hwirq_max;
+ unsigned int revmap_direct_max_irq;
+ unsigned int revmap_size;
+ struct radix_tree_root revmap_tree;
+ unsigned int linear_revmap[];
+};
+
+/* Irq domain flags */
+enum {
+ /* Irq domain is hierarchical */
+ IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
+
+ /* Core calls alloc/free recursive through the domain hierarchy. */
+ IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1),
+
+ /*
+ * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
+ * for implementation specific purposes and ignored by the
+ * core code.
+ */
+ IRQ_DOMAIN_FLAG_NONCORE = (1 << 16),
+};
+
+#ifdef CONFIG_IRQ_DOMAIN
+struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
+ irq_hw_number_t hwirq_max, int direct_max,
+ const struct irq_domain_ops *ops,
+ void *host_data);
+struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
+ unsigned int size,
+ unsigned int first_irq,
+ const struct irq_domain_ops *ops,
+ void *host_data);
+struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
+ unsigned int size,
+ unsigned int first_irq,
+ irq_hw_number_t first_hwirq,
+ const struct irq_domain_ops *ops,
+ void *host_data);
+extern struct irq_domain *irq_find_host(struct device_node *node);
+extern void irq_set_default_host(struct irq_domain *host);
+
+/**
+ * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
+ * @of_node: pointer to interrupt controller's device tree node.
+ * @size: Number of interrupts in the domain.
+ * @ops: map/unmap domain callbacks
+ * @host_data: Controller private data pointer
+ */
+static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
+ unsigned int size,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ return __irq_domain_add(of_node, size, size, 0, ops, host_data);
+}
+static inline struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
+ unsigned int max_irq,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ return __irq_domain_add(of_node, 0, max_irq, max_irq, ops, host_data);
+}
+static inline struct irq_domain *irq_domain_add_legacy_isa(
+ struct device_node *of_node,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops,
+ host_data);
+}
+static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
+ const struct irq_domain_ops *ops,
+ void *host_data)
+{
+ return __irq_domain_add(of_node, 0, ~0, 0, ops, host_data);
+}
+
+extern void irq_domain_remove(struct irq_domain *host);
+
+extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq);
+extern void irq_domain_associate_many(struct irq_domain *domain,
+ unsigned int irq_base,
+ irq_hw_number_t hwirq_base, int count);
+extern void irq_domain_disassociate(struct irq_domain *domain,
+ unsigned int irq);
+
+extern unsigned int irq_create_mapping(struct irq_domain *host,
+ irq_hw_number_t hwirq);
+extern void irq_dispose_mapping(unsigned int virq);
+
+/**
+ * irq_linear_revmap() - Find a linux irq from a hw irq number.
+ * @domain: domain owning this hardware interrupt
+ * @hwirq: hardware irq number in that domain space
+ *
+ * This is a fast path alternative to irq_find_mapping() that can be
+ * called directly by irq controller code to save a handful of
+ * instructions. It is always safe to call, but won't find irqs mapped
+ * using the radix tree.
+ */
+static inline unsigned int irq_linear_revmap(struct irq_domain *domain,
+ irq_hw_number_t hwirq)
+{
+ return hwirq < domain->revmap_size ? domain->linear_revmap[hwirq] : 0;
+}
+extern unsigned int irq_find_mapping(struct irq_domain *host,
+ irq_hw_number_t hwirq);
+extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
+extern int irq_create_strict_mappings(struct irq_domain *domain,
+ unsigned int irq_base,
+ irq_hw_number_t hwirq_base, int count);
+
+static inline int irq_create_identity_mapping(struct irq_domain *host,
+ irq_hw_number_t hwirq)
+{
+ return irq_create_strict_mappings(host, hwirq, hwirq, 1);
+}
+
+extern const struct irq_domain_ops irq_domain_simple_ops;
+
+/* stock xlate functions */
+int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
+int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
+int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type);
+
+/* V2 interfaces to support hierarchy IRQ domains. */
+extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
+ unsigned int virq);
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
+ unsigned int flags, unsigned int size,
+ struct device_node *node,
+ const struct irq_domain_ops *ops, void *host_data);
+extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
+ unsigned int nr_irqs, int node, void *arg,
+ bool realloc);
+extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
+extern void irq_domain_activate_irq(struct irq_data *irq_data);
+extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
+
+static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
+ unsigned int nr_irqs, int node, void *arg)
+{
+ return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false);
+}
+
+extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
+ unsigned int virq,
+ irq_hw_number_t hwirq,
+ struct irq_chip *chip,
+ void *chip_data);
+extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq, struct irq_chip *chip,
+ void *chip_data, irq_flow_handler_t handler,
+ void *handler_data, const char *handler_name);
+extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
+extern void irq_domain_free_irqs_common(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs);
+extern void irq_domain_free_irqs_top(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs);
+
+extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
+ unsigned int irq_base,
+ unsigned int nr_irqs, void *arg);
+
+extern void irq_domain_free_irqs_parent(struct irq_domain *domain,
+ unsigned int irq_base,
+ unsigned int nr_irqs);
+
+static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
+{
+ return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY;
+}
+#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
+static inline void irq_domain_activate_irq(struct irq_data *data) { }
+static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
+static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
+ unsigned int nr_irqs, int node, void *arg)
+{
+ return -1;
+}
+
+static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
+{
+ return false;
+}
+#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
+
+#else /* CONFIG_IRQ_DOMAIN */
+static inline void irq_dispose_mapping(unsigned int virq) { }
+static inline void irq_domain_activate_irq(struct irq_data *data) { }
+static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
+#endif /* !CONFIG_IRQ_DOMAIN */
+
+#endif /* _LINUX_IRQDOMAIN_H */
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
new file mode 100644
index 000000000..5dd1272d1
--- /dev/null
+++ b/include/linux/irqflags.h
@@ -0,0 +1,151 @@
+/*
+ * include/linux/irqflags.h
+ *
+ * IRQ flags tracing: follow the state of the hardirq and softirq flags and
+ * provide callbacks for transitions between ON and OFF states.
+ *
+ * This file gets included from lowlevel asm headers too, to provide
+ * wrapped versions of the local_irq_*() APIs, based on the
+ * raw_local_irq_*() macros from the lowlevel headers.
+ */
+#ifndef _LINUX_TRACE_IRQFLAGS_H
+#define _LINUX_TRACE_IRQFLAGS_H
+
+#include <linux/typecheck.h>
+#include <asm/irqflags.h>
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+ extern void trace_softirqs_on(unsigned long ip);
+ extern void trace_softirqs_off(unsigned long ip);
+ extern void trace_hardirqs_on(void);
+ extern void trace_hardirqs_off(void);
+# define trace_hardirq_context(p) ((p)->hardirq_context)
+# define trace_softirq_context(p) ((p)->softirq_context)
+# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled)
+# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
+# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
+# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
+# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
+# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
+# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
+#else
+# define trace_hardirqs_on() do { } while (0)
+# define trace_hardirqs_off() do { } while (0)
+# define trace_softirqs_on(ip) do { } while (0)
+# define trace_softirqs_off(ip) do { } while (0)
+# define trace_hardirq_context(p) 0
+# define trace_softirq_context(p) 0
+# define trace_hardirqs_enabled(p) 0
+# define trace_softirqs_enabled(p) 0
+# define trace_hardirq_enter() do { } while (0)
+# define trace_hardirq_exit() do { } while (0)
+# define lockdep_softirq_enter() do { } while (0)
+# define lockdep_softirq_exit() do { } while (0)
+# define INIT_TRACE_IRQFLAGS
+#endif
+
+#if defined(CONFIG_IRQSOFF_TRACER) || \
+ defined(CONFIG_PREEMPT_TRACER)
+ extern void stop_critical_timings(void);
+ extern void start_critical_timings(void);
+#else
+# define stop_critical_timings() do { } while (0)
+# define start_critical_timings() do { } while (0)
+#endif
+
+/*
+ * Wrap the arch provided IRQ routines to provide appropriate checks.
+ */
+#define raw_local_irq_disable() arch_local_irq_disable()
+#define raw_local_irq_enable() arch_local_irq_enable()
+#define raw_local_irq_save(flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = arch_local_irq_save(); \
+ } while (0)
+#define raw_local_irq_restore(flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ arch_local_irq_restore(flags); \
+ } while (0)
+#define raw_local_save_flags(flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = arch_local_save_flags(); \
+ } while (0)
+#define raw_irqs_disabled_flags(flags) \
+ ({ \
+ typecheck(unsigned long, flags); \
+ arch_irqs_disabled_flags(flags); \
+ })
+#define raw_irqs_disabled() (arch_irqs_disabled())
+#define raw_safe_halt() arch_safe_halt()
+
+/*
+ * The local_irq_*() APIs are equal to the raw_local_irq*()
+ * if !TRACE_IRQFLAGS.
+ */
+#ifdef CONFIG_TRACE_IRQFLAGS
+#define local_irq_enable() \
+ do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
+#define local_irq_disable() \
+ do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0)
+#define local_irq_save(flags) \
+ do { \
+ raw_local_irq_save(flags); \
+ trace_hardirqs_off(); \
+ } while (0)
+
+
+#define local_irq_restore(flags) \
+ do { \
+ if (raw_irqs_disabled_flags(flags)) { \
+ raw_local_irq_restore(flags); \
+ trace_hardirqs_off(); \
+ } else { \
+ trace_hardirqs_on(); \
+ raw_local_irq_restore(flags); \
+ } \
+ } while (0)
+
+#define safe_halt() \
+ do { \
+ trace_hardirqs_on(); \
+ raw_safe_halt(); \
+ } while (0)
+
+
+#else /* !CONFIG_TRACE_IRQFLAGS */
+
+#define local_irq_enable() do { raw_local_irq_enable(); } while (0)
+#define local_irq_disable() do { raw_local_irq_disable(); } while (0)
+#define local_irq_save(flags) \
+ do { \
+ raw_local_irq_save(flags); \
+ } while (0)
+#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0)
+#define safe_halt() do { raw_safe_halt(); } while (0)
+
+#endif /* CONFIG_TRACE_IRQFLAGS */
+
+#define local_save_flags(flags) raw_local_save_flags(flags)
+
+/*
+ * Some architectures don't define arch_irqs_disabled(), so even if either
+ * definition would be fine we need to use different ones for the time being
+ * to avoid build issues.
+ */
+#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+#define irqs_disabled() \
+ ({ \
+ unsigned long _flags; \
+ raw_local_save_flags(_flags); \
+ raw_irqs_disabled_flags(_flags); \
+ })
+#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
+#define irqs_disabled() raw_irqs_disabled()
+#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
+
+#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
+
+#endif
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h
new file mode 100644
index 000000000..62d543004
--- /dev/null
+++ b/include/linux/irqhandler.h
@@ -0,0 +1,14 @@
+#ifndef _LINUX_IRQHANDLER_H
+#define _LINUX_IRQHANDLER_H
+
+/*
+ * Interrupt flow handler typedefs are defined here to avoid circular
+ * include dependencies.
+ */
+
+struct irq_desc;
+struct irq_data;
+typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc);
+typedef void (*irq_preflow_handler_t)(struct irq_data *data);
+
+#endif
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
new file mode 100644
index 000000000..fdd5cc16c
--- /dev/null
+++ b/include/linux/irqnr.h
@@ -0,0 +1,39 @@
+#ifndef _LINUX_IRQNR_H
+#define _LINUX_IRQNR_H
+
+#include <uapi/linux/irqnr.h>
+
+
+extern int nr_irqs;
+extern struct irq_desc *irq_to_desc(unsigned int irq);
+unsigned int irq_get_next_irq(unsigned int offset);
+
+# define for_each_irq_desc(irq, desc) \
+ for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \
+ irq++, desc = irq_to_desc(irq)) \
+ if (!desc) \
+ ; \
+ else
+
+
+# define for_each_irq_desc_reverse(irq, desc) \
+ for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \
+ irq--, desc = irq_to_desc(irq)) \
+ if (!desc) \
+ ; \
+ else
+
+#ifdef CONFIG_SMP
+#define irq_node(irq) (irq_get_irq_data(irq)->node)
+#else
+#define irq_node(irq) 0
+#endif
+
+# define for_each_active_irq(irq) \
+ for (irq = irq_get_next_irq(0); irq < nr_irqs; \
+ irq = irq_get_next_irq(irq + 1))
+
+#define for_each_irq_nr(irq) \
+ for (irq = 0; irq < nr_irqs; irq++)
+
+#endif
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h
new file mode 100644
index 000000000..e374e369f
--- /dev/null
+++ b/include/linux/irqreturn.h
@@ -0,0 +1,19 @@
+#ifndef _LINUX_IRQRETURN_H
+#define _LINUX_IRQRETURN_H
+
+/**
+ * enum irqreturn
+ * @IRQ_NONE interrupt was not from this device
+ * @IRQ_HANDLED interrupt was handled by this device
+ * @IRQ_WAKE_THREAD handler requests to wake the handler thread
+ */
+enum irqreturn {
+ IRQ_NONE = (0 << 0),
+ IRQ_HANDLED = (1 << 0),
+ IRQ_WAKE_THREAD = (1 << 1),
+};
+
+typedef enum irqreturn irqreturn_t;
+#define IRQ_RETVAL(x) ((x) ? IRQ_HANDLED : IRQ_NONE)
+
+#endif
diff --git a/include/linux/isa.h b/include/linux/isa.h
new file mode 100644
index 000000000..b0270e381
--- /dev/null
+++ b/include/linux/isa.h
@@ -0,0 +1,39 @@
+/*
+ * ISA bus.
+ */
+
+#ifndef __LINUX_ISA_H
+#define __LINUX_ISA_H
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+struct isa_driver {
+ int (*match)(struct device *, unsigned int);
+ int (*probe)(struct device *, unsigned int);
+ int (*remove)(struct device *, unsigned int);
+ void (*shutdown)(struct device *, unsigned int);
+ int (*suspend)(struct device *, unsigned int, pm_message_t);
+ int (*resume)(struct device *, unsigned int);
+
+ struct device_driver driver;
+ struct device *devices;
+};
+
+#define to_isa_driver(x) container_of((x), struct isa_driver, driver)
+
+#ifdef CONFIG_ISA
+int isa_register_driver(struct isa_driver *, unsigned int);
+void isa_unregister_driver(struct isa_driver *);
+#else
+static inline int isa_register_driver(struct isa_driver *d, unsigned int i)
+{
+ return 0;
+}
+
+static inline void isa_unregister_driver(struct isa_driver *d)
+{
+}
+#endif
+
+#endif /* __LINUX_ISA_H */
diff --git a/include/linux/isapnp.h b/include/linux/isapnp.h
new file mode 100644
index 000000000..3c77bf9b1
--- /dev/null
+++ b/include/linux/isapnp.h
@@ -0,0 +1,121 @@
+/*
+ * ISA Plug & Play support
+ * Copyright (c) by Jaroslav Kysela <perex@suse.cz>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef LINUX_ISAPNP_H
+#define LINUX_ISAPNP_H
+
+#include <linux/errno.h>
+#include <linux/pnp.h>
+
+/*
+ *
+ */
+
+#define ISAPNP_VENDOR(a,b,c) (((((a)-'A'+1)&0x3f)<<2)|\
+ ((((b)-'A'+1)&0x18)>>3)|((((b)-'A'+1)&7)<<13)|\
+ ((((c)-'A'+1)&0x1f)<<8))
+#define ISAPNP_DEVICE(x) ((((x)&0xf000)>>8)|\
+ (((x)&0x0f00)>>8)|\
+ (((x)&0x00f0)<<8)|\
+ (((x)&0x000f)<<8))
+#define ISAPNP_FUNCTION(x) ISAPNP_DEVICE(x)
+
+/*
+ *
+ */
+
+#ifdef __KERNEL__
+#include <linux/mod_devicetable.h>
+
+#define DEVICE_COUNT_COMPATIBLE 4
+
+#define ISAPNP_CARD_DEVS 8
+
+#define ISAPNP_CARD_ID(_va, _vb, _vc, _device) \
+ .card_vendor = ISAPNP_VENDOR(_va, _vb, _vc), .card_device = ISAPNP_DEVICE(_device)
+#define ISAPNP_CARD_END \
+ .card_vendor = 0, .card_device = 0
+#define ISAPNP_DEVICE_ID(_va, _vb, _vc, _function) \
+ { .vendor = ISAPNP_VENDOR(_va, _vb, _vc), .function = ISAPNP_FUNCTION(_function) }
+
+struct isapnp_card_id {
+ unsigned long driver_data; /* data private to the driver */
+ unsigned short card_vendor, card_device;
+ struct {
+ unsigned short vendor, function;
+ } devs[ISAPNP_CARD_DEVS]; /* logical devices */
+};
+
+#define ISAPNP_DEVICE_SINGLE(_cva, _cvb, _cvc, _cdevice, _dva, _dvb, _dvc, _dfunction) \
+ .card_vendor = ISAPNP_VENDOR(_cva, _cvb, _cvc), .card_device = ISAPNP_DEVICE(_cdevice), \
+ .vendor = ISAPNP_VENDOR(_dva, _dvb, _dvc), .function = ISAPNP_FUNCTION(_dfunction)
+#define ISAPNP_DEVICE_SINGLE_END \
+ .card_vendor = 0, .card_device = 0
+
+#if defined(CONFIG_ISAPNP) || (defined(CONFIG_ISAPNP_MODULE) && defined(MODULE))
+
+#define __ISAPNP__
+
+/* lowlevel configuration */
+int isapnp_present(void);
+int isapnp_cfg_begin(int csn, int device);
+int isapnp_cfg_end(void);
+unsigned char isapnp_read_byte(unsigned char idx);
+void isapnp_write_byte(unsigned char idx, unsigned char val);
+
+#ifdef CONFIG_PROC_FS
+int isapnp_proc_init(void);
+int isapnp_proc_done(void);
+#else
+static inline int isapnp_proc_init(void) { return 0; }
+static inline int isapnp_proc_done(void) { return 0; }
+#endif
+
+/* compat */
+struct pnp_card *pnp_find_card(unsigned short vendor,
+ unsigned short device,
+ struct pnp_card *from);
+struct pnp_dev *pnp_find_dev(struct pnp_card *card,
+ unsigned short vendor,
+ unsigned short function,
+ struct pnp_dev *from);
+
+#else /* !CONFIG_ISAPNP */
+
+/* lowlevel configuration */
+static inline int isapnp_present(void) { return 0; }
+static inline int isapnp_cfg_begin(int csn, int device) { return -ENODEV; }
+static inline int isapnp_cfg_end(void) { return -ENODEV; }
+static inline unsigned char isapnp_read_byte(unsigned char idx) { return 0xff; }
+static inline void isapnp_write_byte(unsigned char idx, unsigned char val) { ; }
+
+static inline struct pnp_card *pnp_find_card(unsigned short vendor,
+ unsigned short device,
+ struct pnp_card *from) { return NULL; }
+static inline struct pnp_dev *pnp_find_dev(struct pnp_card *card,
+ unsigned short vendor,
+ unsigned short function,
+ struct pnp_dev *from) { return NULL; }
+
+#endif /* CONFIG_ISAPNP */
+
+#endif /* __KERNEL__ */
+#endif /* LINUX_ISAPNP_H */
diff --git a/include/linux/iscsi_boot_sysfs.h b/include/linux/iscsi_boot_sysfs.h
new file mode 100644
index 000000000..2a8b1659b
--- /dev/null
+++ b/include/linux/iscsi_boot_sysfs.h
@@ -0,0 +1,133 @@
+/*
+ * Export the iSCSI boot info to userland via sysfs.
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _ISCSI_BOOT_SYSFS_
+#define _ISCSI_BOOT_SYSFS_
+
+/*
+ * The text attributes names for each of the kobjects.
+*/
+enum iscsi_boot_eth_properties_enum {
+ ISCSI_BOOT_ETH_INDEX,
+ ISCSI_BOOT_ETH_FLAGS,
+ ISCSI_BOOT_ETH_IP_ADDR,
+ ISCSI_BOOT_ETH_SUBNET_MASK,
+ ISCSI_BOOT_ETH_ORIGIN,
+ ISCSI_BOOT_ETH_GATEWAY,
+ ISCSI_BOOT_ETH_PRIMARY_DNS,
+ ISCSI_BOOT_ETH_SECONDARY_DNS,
+ ISCSI_BOOT_ETH_DHCP,
+ ISCSI_BOOT_ETH_VLAN,
+ ISCSI_BOOT_ETH_MAC,
+ /* eth_pci_bdf - this is replaced by link to the device itself. */
+ ISCSI_BOOT_ETH_HOSTNAME,
+ ISCSI_BOOT_ETH_END_MARKER,
+};
+
+enum iscsi_boot_tgt_properties_enum {
+ ISCSI_BOOT_TGT_INDEX,
+ ISCSI_BOOT_TGT_FLAGS,
+ ISCSI_BOOT_TGT_IP_ADDR,
+ ISCSI_BOOT_TGT_PORT,
+ ISCSI_BOOT_TGT_LUN,
+ ISCSI_BOOT_TGT_CHAP_TYPE,
+ ISCSI_BOOT_TGT_NIC_ASSOC,
+ ISCSI_BOOT_TGT_NAME,
+ ISCSI_BOOT_TGT_CHAP_NAME,
+ ISCSI_BOOT_TGT_CHAP_SECRET,
+ ISCSI_BOOT_TGT_REV_CHAP_NAME,
+ ISCSI_BOOT_TGT_REV_CHAP_SECRET,
+ ISCSI_BOOT_TGT_END_MARKER,
+};
+
+enum iscsi_boot_initiator_properties_enum {
+ ISCSI_BOOT_INI_INDEX,
+ ISCSI_BOOT_INI_FLAGS,
+ ISCSI_BOOT_INI_ISNS_SERVER,
+ ISCSI_BOOT_INI_SLP_SERVER,
+ ISCSI_BOOT_INI_PRI_RADIUS_SERVER,
+ ISCSI_BOOT_INI_SEC_RADIUS_SERVER,
+ ISCSI_BOOT_INI_INITIATOR_NAME,
+ ISCSI_BOOT_INI_END_MARKER,
+};
+
+struct attribute_group;
+
+struct iscsi_boot_kobj {
+ struct kobject kobj;
+ struct attribute_group *attr_group;
+ struct list_head list;
+
+ /*
+ * Pointer to store driver specific info. If set this will
+ * be freed for the LLD when the kobj release function is called.
+ */
+ void *data;
+ /*
+ * Driver specific show function.
+ *
+ * The enum of the type. This can be any value of the above
+ * properties.
+ */
+ ssize_t (*show) (void *data, int type, char *buf);
+
+ /*
+ * Drivers specific visibility function.
+ * The function should return if they the attr should be readable
+ * writable or should not be shown.
+ *
+ * The enum of the type. This can be any value of the above
+ * properties.
+ */
+ umode_t (*is_visible) (void *data, int type);
+
+ /*
+ * Driver specific release function.
+ *
+ * The function should free the data passed in.
+ */
+ void (*release) (void *data);
+};
+
+struct iscsi_boot_kset {
+ struct list_head kobj_list;
+ struct kset *kset;
+};
+
+struct iscsi_boot_kobj *
+iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index,
+ void *data,
+ ssize_t (*show) (void *data, int type, char *buf),
+ umode_t (*is_visible) (void *data, int type),
+ void (*release) (void *data));
+
+struct iscsi_boot_kobj *
+iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index,
+ void *data,
+ ssize_t (*show) (void *data, int type, char *buf),
+ umode_t (*is_visible) (void *data, int type),
+ void (*release) (void *data));
+struct iscsi_boot_kobj *
+iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index,
+ void *data,
+ ssize_t (*show) (void *data, int type, char *buf),
+ umode_t (*is_visible) (void *data, int type),
+ void (*release) (void *data));
+
+struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name);
+struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno);
+void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset);
+
+#endif
diff --git a/include/linux/iscsi_ibft.h b/include/linux/iscsi_ibft.h
new file mode 100644
index 000000000..605cc5c33
--- /dev/null
+++ b/include/linux/iscsi_ibft.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2007 Red Hat, Inc.
+ * by Peter Jones <pjones@redhat.com>
+ * Copyright 2007 IBM, Inc.
+ * by Konrad Rzeszutek <konradr@linux.vnet.ibm.com>
+ * Copyright 2008
+ * by Konrad Rzeszutek <ketuzsezr@darnok.org>
+ *
+ * This code exposes the iSCSI Boot Format Table to userland via sysfs.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License v2.0 as published by
+ * the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef ISCSI_IBFT_H
+#define ISCSI_IBFT_H
+
+#include <linux/acpi.h>
+
+/*
+ * Logical location of iSCSI Boot Format Table.
+ * If the value is NULL there is no iBFT on the machine.
+ */
+extern struct acpi_table_ibft *ibft_addr;
+
+/*
+ * Routine used to find and reserve the iSCSI Boot Format Table. The
+ * mapped address is set in the ibft_addr variable.
+ */
+#ifdef CONFIG_ISCSI_IBFT_FIND
+unsigned long find_ibft_region(unsigned long *sizep);
+#else
+static inline unsigned long find_ibft_region(unsigned long *sizep)
+{
+ *sizep = 0;
+ return 0;
+}
+#endif
+
+#endif /* ISCSI_IBFT_H */
diff --git a/include/linux/isdn.h b/include/linux/isdn.h
new file mode 100644
index 000000000..1e9a0f2a8
--- /dev/null
+++ b/include/linux/isdn.h
@@ -0,0 +1,472 @@
+/* $Id: isdn.h,v 1.125.2.3 2004/02/10 01:07:14 keil Exp $
+ *
+ * Main header for the Linux ISDN subsystem (linklevel).
+ *
+ * Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de)
+ * Copyright 1995,96 by Thinking Objects Software GmbH Wuerzburg
+ * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+#ifndef __ISDN_H__
+#define __ISDN_H__
+
+
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/major.h>
+#include <asm/io.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_reg.h>
+#include <linux/fcntl.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <linux/mutex.h>
+#include <uapi/linux/isdn.h>
+
+#define ISDN_TTY_MAJOR 43
+#define ISDN_TTYAUX_MAJOR 44
+#define ISDN_MAJOR 45
+
+/* The minor-devicenumbers for Channel 0 and 1 are used as arguments for
+ * physical Channel-Mapping, so they MUST NOT be changed without changing
+ * the correspondent code in isdn.c
+ */
+
+#define ISDN_MINOR_B 0
+#define ISDN_MINOR_BMAX (ISDN_MAX_CHANNELS-1)
+#define ISDN_MINOR_CTRL 64
+#define ISDN_MINOR_CTRLMAX (64 + (ISDN_MAX_CHANNELS-1))
+#define ISDN_MINOR_PPP 128
+#define ISDN_MINOR_PPPMAX (128 + (ISDN_MAX_CHANNELS-1))
+#define ISDN_MINOR_STATUS 255
+
+#ifdef CONFIG_ISDN_PPP
+
+#ifdef CONFIG_ISDN_PPP_VJ
+# include <net/slhc_vj.h>
+#endif
+
+#include <linux/ppp_defs.h>
+#include <linux/ppp-ioctl.h>
+
+#include <linux/isdn_ppp.h>
+#endif
+
+#ifdef CONFIG_ISDN_X25
+# include <linux/concap.h>
+#endif
+
+#include <linux/isdnif.h>
+
+#define ISDN_DRVIOCTL_MASK 0x7f /* Mask for Device-ioctl */
+
+/* Until now unused */
+#define ISDN_SERVICE_VOICE 1
+#define ISDN_SERVICE_AB 1<<1
+#define ISDN_SERVICE_X21 1<<2
+#define ISDN_SERVICE_G4 1<<3
+#define ISDN_SERVICE_BTX 1<<4
+#define ISDN_SERVICE_DFUE 1<<5
+#define ISDN_SERVICE_X25 1<<6
+#define ISDN_SERVICE_TTX 1<<7
+#define ISDN_SERVICE_MIXED 1<<8
+#define ISDN_SERVICE_FW 1<<9
+#define ISDN_SERVICE_GTEL 1<<10
+#define ISDN_SERVICE_BTXN 1<<11
+#define ISDN_SERVICE_BTEL 1<<12
+
+/* Macros checking plain usage */
+#define USG_NONE(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_NONE)
+#define USG_RAW(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_RAW)
+#define USG_MODEM(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_MODEM)
+#define USG_VOICE(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_VOICE)
+#define USG_NET(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_NET)
+#define USG_FAX(x) ((x & ISDN_USAGE_MASK)==ISDN_USAGE_FAX)
+#define USG_OUTGOING(x) ((x & ISDN_USAGE_OUTGOING)==ISDN_USAGE_OUTGOING)
+#define USG_MODEMORVOICE(x) (((x & ISDN_USAGE_MASK)==ISDN_USAGE_MODEM) || \
+ ((x & ISDN_USAGE_MASK)==ISDN_USAGE_VOICE) )
+
+/* Timer-delays and scheduling-flags */
+#define ISDN_TIMER_RES 4 /* Main Timer-Resolution */
+#define ISDN_TIMER_02SEC (HZ/ISDN_TIMER_RES/5) /* Slow-Timer1 .2 sec */
+#define ISDN_TIMER_1SEC (HZ/ISDN_TIMER_RES) /* Slow-Timer2 1 sec */
+#define ISDN_TIMER_RINGING 5 /* tty RINGs = ISDN_TIMER_1SEC * this factor */
+#define ISDN_TIMER_KEEPINT 10 /* Cisco-Keepalive = ISDN_TIMER_1SEC * this factor */
+#define ISDN_TIMER_MODEMREAD 1
+#define ISDN_TIMER_MODEMPLUS 2
+#define ISDN_TIMER_MODEMRING 4
+#define ISDN_TIMER_MODEMXMIT 8
+#define ISDN_TIMER_NETDIAL 16
+#define ISDN_TIMER_NETHANGUP 32
+#define ISDN_TIMER_CARRIER 256 /* Wait for Carrier */
+#define ISDN_TIMER_FAST (ISDN_TIMER_MODEMREAD | ISDN_TIMER_MODEMPLUS | \
+ ISDN_TIMER_MODEMXMIT)
+#define ISDN_TIMER_SLOW (ISDN_TIMER_MODEMRING | ISDN_TIMER_NETHANGUP | \
+ ISDN_TIMER_NETDIAL | ISDN_TIMER_CARRIER)
+
+/* Timeout-Values for isdn_net_dial() */
+#define ISDN_TIMER_DTIMEOUT10 (10*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1)))
+#define ISDN_TIMER_DTIMEOUT15 (15*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1)))
+#define ISDN_TIMER_DTIMEOUT60 (60*HZ/(ISDN_TIMER_02SEC*(ISDN_TIMER_RES+1)))
+
+/* GLOBAL_FLAGS */
+#define ISDN_GLOBAL_STOPPED 1
+
+/*=================== Start of ip-over-ISDN stuff =========================*/
+
+/* Feature- and status-flags for a net-interface */
+#define ISDN_NET_CONNECTED 0x01 /* Bound to ISDN-Channel */
+#define ISDN_NET_SECURE 0x02 /* Accept calls from phonelist only */
+#define ISDN_NET_CALLBACK 0x04 /* activate callback */
+#define ISDN_NET_CBHUP 0x08 /* hangup before callback */
+#define ISDN_NET_CBOUT 0x10 /* remote machine does callback */
+
+#define ISDN_NET_MAGIC 0x49344C02 /* for paranoia-checking */
+
+/* Phone-list-element */
+typedef struct {
+ void *next;
+ char num[ISDN_MSNLEN];
+} isdn_net_phone;
+
+/*
+ Principles when extending structures for generic encapsulation protocol
+ ("concap") support:
+ - Stuff which is hardware specific (here i4l-specific) goes in
+ the netdev -> local structure (here: isdn_net_local)
+ - Stuff which is encapsulation protocol specific goes in the structure
+ which holds the linux device structure (here: isdn_net_device)
+*/
+
+/* Local interface-data */
+typedef struct isdn_net_local_s {
+ ulong magic;
+ struct net_device_stats stats; /* Ethernet Statistics */
+ int isdn_device; /* Index to isdn-device */
+ int isdn_channel; /* Index to isdn-channel */
+ int ppp_slot; /* PPPD device slot number */
+ int pre_device; /* Preselected isdn-device */
+ int pre_channel; /* Preselected isdn-channel */
+ int exclusive; /* If non-zero idx to reserved chan.*/
+ int flags; /* Connection-flags */
+ int dialretry; /* Counter for Dialout-retries */
+ int dialmax; /* Max. Number of Dial-retries */
+ int cbdelay; /* Delay before Callback starts */
+ int dtimer; /* Timeout-counter for dialing */
+ char msn[ISDN_MSNLEN]; /* MSNs/EAZs for this interface */
+ u_char cbhup; /* Flag: Reject Call before Callback*/
+ u_char dialstate; /* State for dialing */
+ u_char p_encap; /* Packet encapsulation */
+ /* 0 = Ethernet over ISDN */
+ /* 1 = RAW-IP */
+ /* 2 = IP with type field */
+ u_char l2_proto; /* Layer-2-protocol */
+ /* See ISDN_PROTO_L2..-constants in */
+ /* isdnif.h */
+ /* 0 = X75/LAPB with I-Frames */
+ /* 1 = X75/LAPB with UI-Frames */
+ /* 2 = X75/LAPB with BUI-Frames */
+ /* 3 = HDLC */
+ u_char l3_proto; /* Layer-3-protocol */
+ /* See ISDN_PROTO_L3..-constants in */
+ /* isdnif.h */
+ /* 0 = Transparent */
+ int huptimer; /* Timeout-counter for auto-hangup */
+ int charge; /* Counter for charging units */
+ ulong chargetime; /* Timer for Charging info */
+ int hupflags; /* Flags for charge-unit-hangup: */
+ /* bit0: chargeint is invalid */
+ /* bit1: Getting charge-interval */
+ /* bit2: Do charge-unit-hangup */
+ /* bit3: Do hangup even on incoming */
+ int outgoing; /* Flag: outgoing call */
+ int onhtime; /* Time to keep link up */
+ int chargeint; /* Interval between charge-infos */
+ int onum; /* Flag: at least 1 outgoing number */
+ int cps; /* current speed of this interface */
+ int transcount; /* byte-counter for cps-calculation */
+ int sqfull; /* Flag: netdev-queue overloaded */
+ ulong sqfull_stamp; /* Start-Time of overload */
+ ulong slavedelay; /* Dynamic bundling delaytime */
+ int triggercps; /* BogoCPS needed for trigger slave */
+ isdn_net_phone *phone[2]; /* List of remote-phonenumbers */
+ /* phone[0] = Incoming Numbers */
+ /* phone[1] = Outgoing Numbers */
+ isdn_net_phone *dial; /* Pointer to dialed number */
+ struct net_device *master; /* Ptr to Master device for slaves */
+ struct net_device *slave; /* Ptr to Slave device for masters */
+ struct isdn_net_local_s *next; /* Ptr to next link in bundle */
+ struct isdn_net_local_s *last; /* Ptr to last link in bundle */
+ struct isdn_net_dev_s *netdev; /* Ptr to netdev */
+ struct sk_buff_head super_tx_queue; /* List of supervisory frames to */
+ /* be transmitted asap */
+ atomic_t frame_cnt; /* number of frames currently */
+ /* queued in HL driver */
+ /* Ptr to orig. hard_header_cache */
+ spinlock_t xmit_lock; /* used to protect the xmit path of */
+ /* a particular channel (including */
+ /* the frame_cnt */
+
+ int pppbind; /* ippp device for bindings */
+ int dialtimeout; /* How long shall we try on dialing? (jiffies) */
+ int dialwait; /* How long shall we wait after failed attempt? (jiffies) */
+ ulong dialstarted; /* jiffies of first dialing-attempt */
+ ulong dialwait_timer; /* jiffies of earliest next dialing-attempt */
+ int huptimeout; /* How long will the connection be up? (seconds) */
+#ifdef CONFIG_ISDN_X25
+ struct concap_device_ops *dops; /* callbacks used by encapsulator */
+#endif
+ /* use an own struct for that in later versions */
+ ulong cisco_myseq; /* Local keepalive seq. for Cisco */
+ ulong cisco_mineseen; /* returned keepalive seq. from remote */
+ ulong cisco_yourseq; /* Remote keepalive seq. for Cisco */
+ int cisco_keepalive_period; /* keepalive period */
+ ulong cisco_last_slarp_in; /* jiffie of last keepalive packet we received */
+ char cisco_line_state; /* state of line according to keepalive packets */
+ char cisco_debserint; /* debugging flag of cisco hdlc with slarp */
+ struct timer_list cisco_timer;
+ struct work_struct tqueue;
+} isdn_net_local;
+
+/* the interface itself */
+typedef struct isdn_net_dev_s {
+ isdn_net_local *local;
+ isdn_net_local *queue; /* circular list of all bundled
+ channels, which are currently
+ online */
+ spinlock_t queue_lock; /* lock to protect queue */
+ void *next; /* Pointer to next isdn-interface */
+ struct net_device *dev; /* interface to upper levels */
+#ifdef CONFIG_ISDN_PPP
+ ippp_bundle * pb; /* pointer to the common bundle structure
+ * with the per-bundle data */
+#endif
+#ifdef CONFIG_ISDN_X25
+ struct concap_proto *cprot; /* connection oriented encapsulation protocol */
+#endif
+
+} isdn_net_dev;
+
+/*===================== End of ip-over-ISDN stuff ===========================*/
+
+/*======================= Start of ISDN-tty stuff ===========================*/
+
+#define ISDN_ASYNC_MAGIC 0x49344C01 /* for paranoia-checking */
+#define ISDN_SERIAL_XMIT_SIZE 1024 /* Default bufsize for write */
+#define ISDN_SERIAL_XMIT_MAX 4000 /* Maximum bufsize for write */
+
+#ifdef CONFIG_ISDN_AUDIO
+/* For using sk_buffs with audio we need some private variables
+ * within each sk_buff. For this purpose, we declare a struct here,
+ * and put it always at the private skb->cb data array. A few macros help
+ * accessing the variables.
+ */
+typedef struct _isdn_audio_data {
+ unsigned short dle_count;
+ unsigned char lock;
+} isdn_audio_data_t;
+
+#define ISDN_AUDIO_SKB_DLECOUNT(skb) (((isdn_audio_data_t *)&skb->cb[0])->dle_count)
+#define ISDN_AUDIO_SKB_LOCK(skb) (((isdn_audio_data_t *)&skb->cb[0])->lock)
+#endif
+
+/* Private data of AT-command-interpreter */
+typedef struct atemu {
+ u_char profile[ISDN_MODEM_NUMREG]; /* Modem-Regs. Profile 0 */
+ u_char mdmreg[ISDN_MODEM_NUMREG]; /* Modem-Registers */
+ char pmsn[ISDN_MSNLEN]; /* EAZ/MSNs Profile 0 */
+ char msn[ISDN_MSNLEN]; /* EAZ/MSN */
+ char plmsn[ISDN_LMSNLEN]; /* Listening MSNs Profile 0 */
+ char lmsn[ISDN_LMSNLEN]; /* Listening MSNs */
+ char cpn[ISDN_MSNLEN]; /* CalledPartyNumber on incoming call */
+ char connmsg[ISDN_CMSGLEN]; /* CONNECT-Msg from HL-Driver */
+#ifdef CONFIG_ISDN_AUDIO
+ u_char vpar[10]; /* Voice-parameters */
+ int lastDLE; /* Flag for voice-coding: DLE seen */
+#endif
+ int mdmcmdl; /* Length of Modem-Commandbuffer */
+ int pluscount; /* Counter for +++ sequence */
+ u_long lastplus; /* Timestamp of last + */
+ int carrierwait; /* Seconds of carrier waiting */
+ char mdmcmd[255]; /* Modem-Commandbuffer */
+ unsigned int charge; /* Charge units of current connection */
+} atemu;
+
+/* Private data (similar to async_struct in <linux/serial.h>) */
+typedef struct modem_info {
+ int magic;
+ struct tty_port port;
+ int x_char; /* xon/xoff character */
+ int mcr; /* Modem control register */
+ int msr; /* Modem status register */
+ int lsr; /* Line status register */
+ int line;
+ int online; /* 1 = B-Channel is up, drop data */
+ /* 2 = B-Channel is up, deliver d.*/
+ int dialing; /* Dial in progress or ATA */
+ int rcvsched; /* Receive needs schedule */
+ int isdn_driver; /* Index to isdn-driver */
+ int isdn_channel; /* Index to isdn-channel */
+ int drv_index; /* Index to dev->usage */
+ int ncarrier; /* Flag: schedule NO CARRIER */
+ unsigned char last_cause[8]; /* Last cause message */
+ unsigned char last_num[ISDN_MSNLEN];
+ /* Last phone-number */
+ unsigned char last_l2; /* Last layer-2 protocol */
+ unsigned char last_si; /* Last service */
+ unsigned char last_lhup; /* Last hangup local? */
+ unsigned char last_dir; /* Last direction (in or out) */
+ struct timer_list nc_timer; /* Timer for delayed NO CARRIER */
+ int send_outstanding;/* # of outstanding send-requests */
+ int xmit_size; /* max. # of chars in xmit_buf */
+ int xmit_count; /* # of chars in xmit_buf */
+ struct sk_buff_head xmit_queue; /* transmit queue */
+ atomic_t xmit_lock; /* Semaphore for isdn_tty_write */
+#ifdef CONFIG_ISDN_AUDIO
+ int vonline; /* Voice-channel status */
+ /* Bit 0 = recording */
+ /* Bit 1 = playback */
+ /* Bit 2 = playback, DLE-ETX seen */
+ struct sk_buff_head dtmf_queue; /* queue for dtmf results */
+ void *adpcms; /* state for adpcm decompression */
+ void *adpcmr; /* state for adpcm compression */
+ void *dtmf_state; /* state for dtmf decoder */
+ void *silence_state; /* state for silence detection */
+#endif
+#ifdef CONFIG_ISDN_TTY_FAX
+ struct T30_s *fax; /* T30 Fax Group 3 data/interface */
+ int faxonline; /* Fax-channel status */
+#endif
+ atemu emu; /* AT-emulator data */
+ spinlock_t readlock;
+} modem_info;
+
+#define ISDN_MODEM_WINSIZE 8
+
+/* Description of one ISDN-tty */
+typedef struct _isdn_modem {
+ int refcount; /* Number of opens */
+ struct tty_driver *tty_modem; /* tty-device */
+ struct tty_struct *modem_table[ISDN_MAX_CHANNELS]; /* ?? copied from Orig */
+ struct ktermios *modem_termios[ISDN_MAX_CHANNELS];
+ struct ktermios *modem_termios_locked[ISDN_MAX_CHANNELS];
+ modem_info info[ISDN_MAX_CHANNELS]; /* Private data */
+} isdn_modem_t;
+
+/*======================= End of ISDN-tty stuff ============================*/
+
+/*======================== Start of V.110 stuff ============================*/
+#define V110_BUFSIZE 1024
+
+typedef struct {
+ int nbytes; /* 1 Matrixbyte -> nbytes in stream */
+ int nbits; /* Number of used bits in streambyte */
+ unsigned char key; /* Bitmask in stream eg. 11 (nbits=2) */
+ int decodelen; /* Amount of data in decodebuf */
+ int SyncInit; /* Number of sync frames to send */
+ unsigned char *OnlineFrame; /* Precalculated V110 idle frame */
+ unsigned char *OfflineFrame; /* Precalculated V110 sync Frame */
+ int framelen; /* Length of frames */
+ int skbuser; /* Number of unacked userdata skbs */
+ int skbidle; /* Number of unacked idle/sync skbs */
+ int introducer; /* Local vars for decoder */
+ int dbit;
+ unsigned char b;
+ int skbres; /* space to reserve in outgoing skb */
+ int maxsize; /* maxbufsize of lowlevel driver */
+ unsigned char *encodebuf; /* temporary buffer for encoding */
+ unsigned char decodebuf[V110_BUFSIZE]; /* incomplete V110 matrices */
+} isdn_v110_stream;
+
+/*========================= End of V.110 stuff =============================*/
+
+/*======================= Start of general stuff ===========================*/
+
+typedef struct {
+ char *next;
+ char *private;
+} infostruct;
+
+#define DRV_FLAG_RUNNING 1
+#define DRV_FLAG_REJBUS 2
+#define DRV_FLAG_LOADED 4
+
+/* Description of hardware-level-driver */
+typedef struct _isdn_driver {
+ ulong online; /* Channel-Online flags */
+ ulong flags; /* Misc driver Flags */
+ int locks; /* Number of locks for this driver */
+ int channels; /* Number of channels */
+ wait_queue_head_t st_waitq; /* Wait-Queue for status-read's */
+ int maxbufsize; /* Maximum Buffersize supported */
+ unsigned long pktcount; /* Until now: unused */
+ int stavail; /* Chars avail on Status-device */
+ isdn_if *interface; /* Interface to driver */
+ int *rcverr; /* Error-counters for B-Ch.-receive */
+ int *rcvcount; /* Byte-counters for B-Ch.-receive */
+#ifdef CONFIG_ISDN_AUDIO
+ unsigned long DLEflag; /* Flags: Insert DLE at next read */
+#endif
+ struct sk_buff_head *rpqueue; /* Pointers to start of Rcv-Queue */
+ wait_queue_head_t *rcv_waitq; /* Wait-Queues for B-Channel-Reads */
+ wait_queue_head_t *snd_waitq; /* Wait-Queue for B-Channel-Send's */
+ char msn2eaz[10][ISDN_MSNLEN]; /* Mapping-Table MSN->EAZ */
+} isdn_driver_t;
+
+/* Main driver-data */
+typedef struct isdn_devt {
+ struct module *owner;
+ spinlock_t lock;
+ unsigned short flags; /* Bitmapped Flags: */
+ int drivers; /* Current number of drivers */
+ int channels; /* Current number of channels */
+ int net_verbose; /* Verbose-Flag */
+ int modempoll; /* Flag: tty-read active */
+ spinlock_t timerlock;
+ int tflags; /* Timer-Flags: */
+ /* see ISDN_TIMER_..defines */
+ int global_flags;
+ infostruct *infochain; /* List of open info-devs. */
+ wait_queue_head_t info_waitq; /* Wait-Queue for isdninfo */
+ struct timer_list timer; /* Misc.-function Timer */
+ int chanmap[ISDN_MAX_CHANNELS]; /* Map minor->device-channel */
+ int drvmap[ISDN_MAX_CHANNELS]; /* Map minor->driver-index */
+ int usage[ISDN_MAX_CHANNELS]; /* Used by tty/ip/voice */
+ char num[ISDN_MAX_CHANNELS][ISDN_MSNLEN];
+ /* Remote number of active ch.*/
+ int m_idx[ISDN_MAX_CHANNELS]; /* Index for mdm.... */
+ isdn_driver_t *drv[ISDN_MAX_DRIVERS]; /* Array of drivers */
+ isdn_net_dev *netdev; /* Linked list of net-if's */
+ char drvid[ISDN_MAX_DRIVERS][20];/* Driver-ID */
+ struct task_struct *profd; /* For iprofd */
+ isdn_modem_t mdm; /* tty-driver-data */
+ isdn_net_dev *rx_netdev[ISDN_MAX_CHANNELS]; /* rx netdev-pointers */
+ isdn_net_dev *st_netdev[ISDN_MAX_CHANNELS]; /* stat netdev-pointers */
+ ulong ibytes[ISDN_MAX_CHANNELS]; /* Statistics incoming bytes */
+ ulong obytes[ISDN_MAX_CHANNELS]; /* Statistics outgoing bytes */
+ int v110emu[ISDN_MAX_CHANNELS]; /* V.110 emulator-mode 0=none */
+ atomic_t v110use[ISDN_MAX_CHANNELS]; /* Usage-Semaphore for stream */
+ isdn_v110_stream *v110[ISDN_MAX_CHANNELS]; /* V.110 private data */
+ struct mutex mtx; /* serialize list access*/
+ unsigned long global_features;
+} isdn_dev;
+
+extern isdn_dev *dev;
+
+
+#endif /* __ISDN_H__ */
diff --git a/include/linux/isdn/capilli.h b/include/linux/isdn/capilli.h
new file mode 100644
index 000000000..11b57c485
--- /dev/null
+++ b/include/linux/isdn/capilli.h
@@ -0,0 +1,113 @@
+/* $Id: capilli.h,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
+ *
+ * Kernel CAPI 2.0 Driver Interface for Linux
+ *
+ * Copyright 1999 by Carsten Paeth <calle@calle.de>
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#ifndef __CAPILLI_H__
+#define __CAPILLI_H__
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/capi.h>
+#include <linux/kernelcapi.h>
+
+typedef struct capiloaddatapart {
+ int user; /* data in userspace ? */
+ int len;
+ unsigned char *data;
+} capiloaddatapart;
+
+typedef struct capiloaddata {
+ capiloaddatapart firmware;
+ capiloaddatapart configuration;
+} capiloaddata;
+
+typedef struct capicardparams {
+ unsigned int port;
+ unsigned irq;
+ int cardtype;
+ int cardnr;
+ unsigned int membase;
+} capicardparams;
+
+struct capi_ctr {
+ /* filled in before calling attach_capi_ctr */
+ struct module *owner;
+ void *driverdata; /* driver specific */
+ char name[32]; /* name of controller */
+ char *driver_name; /* name of driver */
+ int (*load_firmware)(struct capi_ctr *, capiloaddata *);
+ void (*reset_ctr)(struct capi_ctr *);
+ void (*register_appl)(struct capi_ctr *, u16 appl,
+ capi_register_params *);
+ void (*release_appl)(struct capi_ctr *, u16 appl);
+ u16 (*send_message)(struct capi_ctr *, struct sk_buff *skb);
+
+ char *(*procinfo)(struct capi_ctr *);
+ const struct file_operations *proc_fops;
+
+ /* filled in before calling ready callback */
+ u8 manu[CAPI_MANUFACTURER_LEN]; /* CAPI_GET_MANUFACTURER */
+ capi_version version; /* CAPI_GET_VERSION */
+ capi_profile profile; /* CAPI_GET_PROFILE */
+ u8 serial[CAPI_SERIAL_LEN]; /* CAPI_GET_SERIAL */
+
+ /* management information for kcapi */
+
+ unsigned long nrecvctlpkt;
+ unsigned long nrecvdatapkt;
+ unsigned long nsentctlpkt;
+ unsigned long nsentdatapkt;
+
+ int cnr; /* controller number */
+ unsigned short state; /* controller state */
+ int blocked; /* output blocked */
+ int traceflag; /* capi trace */
+ wait_queue_head_t state_wait_queue;
+
+ struct proc_dir_entry *procent;
+ char procfn[128];
+};
+
+int attach_capi_ctr(struct capi_ctr *);
+int detach_capi_ctr(struct capi_ctr *);
+
+void capi_ctr_ready(struct capi_ctr * card);
+void capi_ctr_down(struct capi_ctr * card);
+void capi_ctr_suspend_output(struct capi_ctr * card);
+void capi_ctr_resume_output(struct capi_ctr * card);
+void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb);
+
+// ---------------------------------------------------------------------------
+// needed for AVM capi drivers
+
+struct capi_driver {
+ char name[32]; /* driver name */
+ char revision[32];
+
+ int (*add_card)(struct capi_driver *driver, capicardparams *data);
+
+ /* management information for kcapi */
+ struct list_head list;
+};
+
+void register_capi_driver(struct capi_driver *driver);
+void unregister_capi_driver(struct capi_driver *driver);
+
+// ---------------------------------------------------------------------------
+// library functions for use by hardware controller drivers
+
+void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize);
+void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci);
+void capilib_release_appl(struct list_head *head, u16 applid);
+void capilib_release(struct list_head *head);
+void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid);
+u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid);
+
+#endif /* __CAPILLI_H__ */
diff --git a/include/linux/isdn/capiutil.h b/include/linux/isdn/capiutil.h
new file mode 100644
index 000000000..44bd6046e
--- /dev/null
+++ b/include/linux/isdn/capiutil.h
@@ -0,0 +1,516 @@
+/* $Id: capiutil.h,v 1.5.6.2 2001/09/23 22:24:33 kai Exp $
+ *
+ * CAPI 2.0 defines & types
+ *
+ * From CAPI 2.0 Development Kit AVM 1995 (msg.c)
+ * Rewritten for Linux 1996 by Carsten Paeth <calle@calle.de>
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+
+#ifndef __CAPIUTIL_H__
+#define __CAPIUTIL_H__
+
+#include <asm/types.h>
+
+#define CAPIMSG_BASELEN 8
+#define CAPIMSG_U8(m, off) (m[off])
+#define CAPIMSG_U16(m, off) (m[off]|(m[(off)+1]<<8))
+#define CAPIMSG_U32(m, off) (m[off]|(m[(off)+1]<<8)|(m[(off)+2]<<16)|(m[(off)+3]<<24))
+#define CAPIMSG_LEN(m) CAPIMSG_U16(m,0)
+#define CAPIMSG_APPID(m) CAPIMSG_U16(m,2)
+#define CAPIMSG_COMMAND(m) CAPIMSG_U8(m,4)
+#define CAPIMSG_SUBCOMMAND(m) CAPIMSG_U8(m,5)
+#define CAPIMSG_CMD(m) (((m[4])<<8)|(m[5]))
+#define CAPIMSG_MSGID(m) CAPIMSG_U16(m,6)
+#define CAPIMSG_CONTROLLER(m) (m[8] & 0x7f)
+#define CAPIMSG_CONTROL(m) CAPIMSG_U32(m, 8)
+#define CAPIMSG_NCCI(m) CAPIMSG_CONTROL(m)
+#define CAPIMSG_DATALEN(m) CAPIMSG_U16(m,16) /* DATA_B3_REQ */
+
+static inline void capimsg_setu8(void *m, int off, __u8 val)
+{
+ ((__u8 *)m)[off] = val;
+}
+
+static inline void capimsg_setu16(void *m, int off, __u16 val)
+{
+ ((__u8 *)m)[off] = val & 0xff;
+ ((__u8 *)m)[off+1] = (val >> 8) & 0xff;
+}
+
+static inline void capimsg_setu32(void *m, int off, __u32 val)
+{
+ ((__u8 *)m)[off] = val & 0xff;
+ ((__u8 *)m)[off+1] = (val >> 8) & 0xff;
+ ((__u8 *)m)[off+2] = (val >> 16) & 0xff;
+ ((__u8 *)m)[off+3] = (val >> 24) & 0xff;
+}
+
+#define CAPIMSG_SETLEN(m, len) capimsg_setu16(m, 0, len)
+#define CAPIMSG_SETAPPID(m, applid) capimsg_setu16(m, 2, applid)
+#define CAPIMSG_SETCOMMAND(m,cmd) capimsg_setu8(m, 4, cmd)
+#define CAPIMSG_SETSUBCOMMAND(m, cmd) capimsg_setu8(m, 5, cmd)
+#define CAPIMSG_SETMSGID(m, msgid) capimsg_setu16(m, 6, msgid)
+#define CAPIMSG_SETCONTROL(m, contr) capimsg_setu32(m, 8, contr)
+#define CAPIMSG_SETDATALEN(m, len) capimsg_setu16(m, 16, len)
+
+/*----- basic-type definitions -----*/
+
+typedef __u8 *_cstruct;
+
+typedef enum {
+ CAPI_COMPOSE,
+ CAPI_DEFAULT
+} _cmstruct;
+
+/*
+ The _cmsg structure contains all possible CAPI 2.0 parameter.
+ All parameters are stored here first. The function CAPI_CMSG_2_MESSAGE
+ assembles the parameter and builds CAPI2.0 conform messages.
+ CAPI_MESSAGE_2_CMSG disassembles CAPI 2.0 messages and stores the
+ parameter in the _cmsg structure
+ */
+
+typedef struct {
+ /* Header */
+ __u16 ApplId;
+ __u8 Command;
+ __u8 Subcommand;
+ __u16 Messagenumber;
+
+ /* Parameter */
+ union {
+ __u32 adrController;
+ __u32 adrPLCI;
+ __u32 adrNCCI;
+ } adr;
+
+ _cmstruct AdditionalInfo;
+ _cstruct B1configuration;
+ __u16 B1protocol;
+ _cstruct B2configuration;
+ __u16 B2protocol;
+ _cstruct B3configuration;
+ __u16 B3protocol;
+ _cstruct BC;
+ _cstruct BChannelinformation;
+ _cmstruct BProtocol;
+ _cstruct CalledPartyNumber;
+ _cstruct CalledPartySubaddress;
+ _cstruct CallingPartyNumber;
+ _cstruct CallingPartySubaddress;
+ __u32 CIPmask;
+ __u32 CIPmask2;
+ __u16 CIPValue;
+ __u32 Class;
+ _cstruct ConnectedNumber;
+ _cstruct ConnectedSubaddress;
+ __u32 Data;
+ __u16 DataHandle;
+ __u16 DataLength;
+ _cstruct FacilityConfirmationParameter;
+ _cstruct Facilitydataarray;
+ _cstruct FacilityIndicationParameter;
+ _cstruct FacilityRequestParameter;
+ __u16 FacilitySelector;
+ __u16 Flags;
+ __u32 Function;
+ _cstruct HLC;
+ __u16 Info;
+ _cstruct InfoElement;
+ __u32 InfoMask;
+ __u16 InfoNumber;
+ _cstruct Keypadfacility;
+ _cstruct LLC;
+ _cstruct ManuData;
+ __u32 ManuID;
+ _cstruct NCPI;
+ __u16 Reason;
+ __u16 Reason_B3;
+ __u16 Reject;
+ _cstruct Useruserdata;
+
+ /* intern */
+ unsigned l, p;
+ unsigned char *par;
+ __u8 *m;
+
+ /* buffer to construct message */
+ __u8 buf[180];
+
+} _cmsg;
+
+/*
+ * capi_cmsg2message() assembles the parameter from _cmsg to a CAPI 2.0
+ * conform message
+ */
+unsigned capi_cmsg2message(_cmsg * cmsg, __u8 * msg);
+
+/*
+ * capi_message2cmsg disassembles a CAPI message an writes the parameter
+ * into _cmsg for easy access
+ */
+unsigned capi_message2cmsg(_cmsg * cmsg, __u8 * msg);
+
+/*
+ * capi_cmsg_header() fills the _cmsg structure with default values, so only
+ * parameter with non default values must be changed before sending the
+ * message.
+ */
+unsigned capi_cmsg_header(_cmsg * cmsg, __u16 _ApplId,
+ __u8 _Command, __u8 _Subcommand,
+ __u16 _Messagenumber, __u32 _Controller);
+
+/*-----------------------------------------------------------------------*/
+
+/*
+ * Debugging / Tracing functions
+ */
+
+char *capi_cmd2str(__u8 cmd, __u8 subcmd);
+
+typedef struct {
+ u_char *buf;
+ u_char *p;
+ size_t size;
+ size_t pos;
+} _cdebbuf;
+
+#define CDEBUG_SIZE 1024
+#define CDEBUG_GSIZE 4096
+
+void cdebbuf_free(_cdebbuf *cdb);
+int cdebug_init(void);
+void cdebug_exit(void);
+
+_cdebbuf *capi_cmsg2str(_cmsg *cmsg);
+_cdebbuf *capi_message2str(__u8 *msg);
+
+/*-----------------------------------------------------------------------*/
+
+static inline void capi_cmsg_answer(_cmsg * cmsg)
+{
+ cmsg->Subcommand |= 0x01;
+}
+
+/*-----------------------------------------------------------------------*/
+
+static inline void capi_fill_CONNECT_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr,
+ _cstruct NCPI)
+{
+ capi_cmsg_header(cmsg, ApplId, 0x82, 0x80, Messagenumber, adr);
+ cmsg->NCPI = NCPI;
+}
+
+static inline void capi_fill_FACILITY_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr,
+ __u16 FacilitySelector,
+ _cstruct FacilityRequestParameter)
+{
+ capi_cmsg_header(cmsg, ApplId, 0x80, 0x80, Messagenumber, adr);
+ cmsg->FacilitySelector = FacilitySelector;
+ cmsg->FacilityRequestParameter = FacilityRequestParameter;
+}
+
+static inline void capi_fill_INFO_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr,
+ _cstruct CalledPartyNumber,
+ _cstruct BChannelinformation,
+ _cstruct Keypadfacility,
+ _cstruct Useruserdata,
+ _cstruct Facilitydataarray)
+{
+ capi_cmsg_header(cmsg, ApplId, 0x08, 0x80, Messagenumber, adr);
+ cmsg->CalledPartyNumber = CalledPartyNumber;
+ cmsg->BChannelinformation = BChannelinformation;
+ cmsg->Keypadfacility = Keypadfacility;
+ cmsg->Useruserdata = Useruserdata;
+ cmsg->Facilitydataarray = Facilitydataarray;
+}
+
+static inline void capi_fill_LISTEN_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr,
+ __u32 InfoMask,
+ __u32 CIPmask,
+ __u32 CIPmask2,
+ _cstruct CallingPartyNumber,
+ _cstruct CallingPartySubaddress)
+{
+ capi_cmsg_header(cmsg, ApplId, 0x05, 0x80, Messagenumber, adr);
+ cmsg->InfoMask = InfoMask;
+ cmsg->CIPmask = CIPmask;
+ cmsg->CIPmask2 = CIPmask2;
+ cmsg->CallingPartyNumber = CallingPartyNumber;
+ cmsg->CallingPartySubaddress = CallingPartySubaddress;
+}
+
+static inline void capi_fill_ALERT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr,
+ _cstruct BChannelinformation,
+ _cstruct Keypadfacility,
+ _cstruct Useruserdata,
+ _cstruct Facilitydataarray)
+{
+ capi_cmsg_header(cmsg, ApplId, 0x01, 0x80, Messagenumber, adr);
+ cmsg->BChannelinformation = BChannelinformation;
+ cmsg->Keypadfacility = Keypadfacility;
+ cmsg->Useruserdata = Useruserdata;
+ cmsg->Facilitydataarray = Facilitydataarray;
+}
+
+static inline void capi_fill_CONNECT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr,
+ __u16 CIPValue,
+ _cstruct CalledPartyNumber,
+ _cstruct CallingPartyNumber,
+ _cstruct CalledPartySubaddress,
+ _cstruct CallingPartySubaddress,
+ __u16 B1protocol,
+ __u16 B2protocol,
+ __u16 B3protocol,
+ _cstruct B1configuration,
+ _cstruct B2configuration,
+ _cstruct B3configuration,
+ _cstruct BC,
+ _cstruct LLC,
+ _cstruct HLC,
+ _cstruct BChannelinformation,
+ _cstruct Keypadfacility,
+ _cstruct Useruserdata,
+ _cstruct Facilitydataarray)
+{
+
+ capi_cmsg_header(cmsg, ApplId, 0x02, 0x80, Messagenumber, adr);
+ cmsg->CIPValue = CIPValue;
+ cmsg->CalledPartyNumber = CalledPartyNumber;
+ cmsg->CallingPartyNumber = CallingPartyNumber;
+ cmsg->CalledPartySubaddress = CalledPartySubaddress;
+ cmsg->CallingPartySubaddress = CallingPartySubaddress;
+ cmsg->B1protocol = B1protocol;
+ cmsg->B2protocol = B2protocol;
+ cmsg->B3protocol = B3protocol;
+ cmsg->B1configuration = B1configuration;
+ cmsg->B2configuration = B2configuration;
+ cmsg->B3configuration = B3configuration;
+ cmsg->BC = BC;
+ cmsg->LLC = LLC;
+ cmsg->HLC = HLC;
+ cmsg->BChannelinformation = BChannelinformation;
+ cmsg->Keypadfacility = Keypadfacility;
+ cmsg->Useruserdata = Useruserdata;
+ cmsg->Facilitydataarray = Facilitydataarray;
+}
+
+static inline void capi_fill_DATA_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr,
+ __u32 Data,
+ __u16 DataLength,
+ __u16 DataHandle,
+ __u16 Flags)
+{
+
+ capi_cmsg_header(cmsg, ApplId, 0x86, 0x80, Messagenumber, adr);
+ cmsg->Data = Data;
+ cmsg->DataLength = DataLength;
+ cmsg->DataHandle = DataHandle;
+ cmsg->Flags = Flags;
+}
+
+static inline void capi_fill_DISCONNECT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr,
+ _cstruct BChannelinformation,
+ _cstruct Keypadfacility,
+ _cstruct Useruserdata,
+ _cstruct Facilitydataarray)
+{
+
+ capi_cmsg_header(cmsg, ApplId, 0x04, 0x80, Messagenumber, adr);
+ cmsg->BChannelinformation = BChannelinformation;
+ cmsg->Keypadfacility = Keypadfacility;
+ cmsg->Useruserdata = Useruserdata;
+ cmsg->Facilitydataarray = Facilitydataarray;
+}
+
+static inline void capi_fill_DISCONNECT_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr,
+ _cstruct NCPI)
+{
+
+ capi_cmsg_header(cmsg, ApplId, 0x84, 0x80, Messagenumber, adr);
+ cmsg->NCPI = NCPI;
+}
+
+static inline void capi_fill_MANUFACTURER_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr,
+ __u32 ManuID,
+ __u32 Class,
+ __u32 Function,
+ _cstruct ManuData)
+{
+
+ capi_cmsg_header(cmsg, ApplId, 0xff, 0x80, Messagenumber, adr);
+ cmsg->ManuID = ManuID;
+ cmsg->Class = Class;
+ cmsg->Function = Function;
+ cmsg->ManuData = ManuData;
+}
+
+static inline void capi_fill_RESET_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr,
+ _cstruct NCPI)
+{
+
+ capi_cmsg_header(cmsg, ApplId, 0x87, 0x80, Messagenumber, adr);
+ cmsg->NCPI = NCPI;
+}
+
+static inline void capi_fill_SELECT_B_PROTOCOL_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr,
+ __u16 B1protocol,
+ __u16 B2protocol,
+ __u16 B3protocol,
+ _cstruct B1configuration,
+ _cstruct B2configuration,
+ _cstruct B3configuration)
+{
+
+ capi_cmsg_header(cmsg, ApplId, 0x41, 0x80, Messagenumber, adr);
+ cmsg->B1protocol = B1protocol;
+ cmsg->B2protocol = B2protocol;
+ cmsg->B3protocol = B3protocol;
+ cmsg->B1configuration = B1configuration;
+ cmsg->B2configuration = B2configuration;
+ cmsg->B3configuration = B3configuration;
+}
+
+static inline void capi_fill_CONNECT_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr,
+ __u16 Reject,
+ __u16 B1protocol,
+ __u16 B2protocol,
+ __u16 B3protocol,
+ _cstruct B1configuration,
+ _cstruct B2configuration,
+ _cstruct B3configuration,
+ _cstruct ConnectedNumber,
+ _cstruct ConnectedSubaddress,
+ _cstruct LLC,
+ _cstruct BChannelinformation,
+ _cstruct Keypadfacility,
+ _cstruct Useruserdata,
+ _cstruct Facilitydataarray)
+{
+ capi_cmsg_header(cmsg, ApplId, 0x02, 0x83, Messagenumber, adr);
+ cmsg->Reject = Reject;
+ cmsg->B1protocol = B1protocol;
+ cmsg->B2protocol = B2protocol;
+ cmsg->B3protocol = B3protocol;
+ cmsg->B1configuration = B1configuration;
+ cmsg->B2configuration = B2configuration;
+ cmsg->B3configuration = B3configuration;
+ cmsg->ConnectedNumber = ConnectedNumber;
+ cmsg->ConnectedSubaddress = ConnectedSubaddress;
+ cmsg->LLC = LLC;
+ cmsg->BChannelinformation = BChannelinformation;
+ cmsg->Keypadfacility = Keypadfacility;
+ cmsg->Useruserdata = Useruserdata;
+ cmsg->Facilitydataarray = Facilitydataarray;
+}
+
+static inline void capi_fill_CONNECT_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr)
+{
+
+ capi_cmsg_header(cmsg, ApplId, 0x03, 0x83, Messagenumber, adr);
+}
+
+static inline void capi_fill_CONNECT_B3_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr)
+{
+
+ capi_cmsg_header(cmsg, ApplId, 0x83, 0x83, Messagenumber, adr);
+}
+
+static inline void capi_fill_CONNECT_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr,
+ __u16 Reject,
+ _cstruct NCPI)
+{
+ capi_cmsg_header(cmsg, ApplId, 0x82, 0x83, Messagenumber, adr);
+ cmsg->Reject = Reject;
+ cmsg->NCPI = NCPI;
+}
+
+static inline void capi_fill_CONNECT_B3_T90_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr)
+{
+
+ capi_cmsg_header(cmsg, ApplId, 0x88, 0x83, Messagenumber, adr);
+}
+
+static inline void capi_fill_DATA_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr,
+ __u16 DataHandle)
+{
+
+ capi_cmsg_header(cmsg, ApplId, 0x86, 0x83, Messagenumber, adr);
+ cmsg->DataHandle = DataHandle;
+}
+
+static inline void capi_fill_DISCONNECT_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr)
+{
+
+ capi_cmsg_header(cmsg, ApplId, 0x84, 0x83, Messagenumber, adr);
+}
+
+static inline void capi_fill_DISCONNECT_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr)
+{
+
+ capi_cmsg_header(cmsg, ApplId, 0x04, 0x83, Messagenumber, adr);
+}
+
+static inline void capi_fill_FACILITY_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr,
+ __u16 FacilitySelector)
+{
+
+ capi_cmsg_header(cmsg, ApplId, 0x80, 0x83, Messagenumber, adr);
+ cmsg->FacilitySelector = FacilitySelector;
+}
+
+static inline void capi_fill_INFO_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr)
+{
+
+ capi_cmsg_header(cmsg, ApplId, 0x08, 0x83, Messagenumber, adr);
+}
+
+static inline void capi_fill_MANUFACTURER_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr,
+ __u32 ManuID,
+ __u32 Class,
+ __u32 Function,
+ _cstruct ManuData)
+{
+
+ capi_cmsg_header(cmsg, ApplId, 0xff, 0x83, Messagenumber, adr);
+ cmsg->ManuID = ManuID;
+ cmsg->Class = Class;
+ cmsg->Function = Function;
+ cmsg->ManuData = ManuData;
+}
+
+static inline void capi_fill_RESET_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
+ __u32 adr)
+{
+
+ capi_cmsg_header(cmsg, ApplId, 0x87, 0x83, Messagenumber, adr);
+}
+
+#endif /* __CAPIUTIL_H__ */
diff --git a/include/linux/isdn/hdlc.h b/include/linux/isdn/hdlc.h
new file mode 100644
index 000000000..96521370c
--- /dev/null
+++ b/include/linux/isdn/hdlc.h
@@ -0,0 +1,82 @@
+/*
+ * hdlc.h -- General purpose ISDN HDLC decoder.
+ *
+ * Implementation of a HDLC decoder/encoder in software.
+ * Necessary because some ISDN devices don't have HDLC
+ * controllers.
+ *
+ * Copyright (C)
+ * 2009 Karsten Keil <keil@b1-systems.de>
+ * 2002 Wolfgang Mües <wolfgang@iksw-muees.de>
+ * 2001 Frode Isaksen <fisaksen@bewan.com>
+ * 2001 Kai Germaschewski <kai.germaschewski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __ISDNHDLC_H__
+#define __ISDNHDLC_H__
+
+struct isdnhdlc_vars {
+ int bit_shift;
+ int hdlc_bits1;
+ int data_bits;
+ int ffbit_shift; /* encoding only */
+ int state;
+ int dstpos;
+
+ u16 crc;
+
+ u8 cbin;
+ u8 shift_reg;
+ u8 ffvalue;
+
+ /* set if transferring data */
+ u32 data_received:1;
+ /* set if D channel (send idle instead of flags) */
+ u32 dchannel:1;
+ /* set if 56K adaptation */
+ u32 do_adapt56:1;
+ /* set if in closing phase (need to send CRC + flag) */
+ u32 do_closing:1;
+ /* set if data is bitreverse */
+ u32 do_bitreverse:1;
+};
+
+/* Feature Flags */
+#define HDLC_56KBIT 0x01
+#define HDLC_DCHANNEL 0x02
+#define HDLC_BITREVERSE 0x04
+
+/*
+ The return value from isdnhdlc_decode is
+ the frame length, 0 if no complete frame was decoded,
+ or a negative error number
+*/
+#define HDLC_FRAMING_ERROR 1
+#define HDLC_CRC_ERROR 2
+#define HDLC_LENGTH_ERROR 3
+
+extern void isdnhdlc_rcv_init(struct isdnhdlc_vars *hdlc, u32 features);
+
+extern int isdnhdlc_decode(struct isdnhdlc_vars *hdlc, const u8 *src,
+ int slen, int *count, u8 *dst, int dsize);
+
+extern void isdnhdlc_out_init(struct isdnhdlc_vars *hdlc, u32 features);
+
+extern int isdnhdlc_encode(struct isdnhdlc_vars *hdlc, const u8 *src,
+ u16 slen, int *count, u8 *dst, int dsize);
+
+#endif /* __ISDNHDLC_H__ */
diff --git a/include/linux/isdn_divertif.h b/include/linux/isdn_divertif.h
new file mode 100644
index 000000000..19ab361f9
--- /dev/null
+++ b/include/linux/isdn_divertif.h
@@ -0,0 +1,35 @@
+/* $Id: isdn_divertif.h,v 1.4.6.1 2001/09/23 22:25:05 kai Exp $
+ *
+ * Header for the diversion supplementary interface for i4l.
+ *
+ * Author Werner Cornelius (werner@titro.de)
+ * Copyright by Werner Cornelius (werner@titro.de)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+#ifndef _LINUX_ISDN_DIVERTIF_H
+#define _LINUX_ISDN_DIVERTIF_H
+
+#include <linux/isdnif.h>
+#include <linux/types.h>
+#include <uapi/linux/isdn_divertif.h>
+
+/***************************************************************/
+/* structure exchanging data between isdn hl and divert module */
+/***************************************************************/
+typedef struct
+ { ulong if_magic; /* magic info and version */
+ int cmd; /* command */
+ int (*stat_callback)(isdn_ctrl *); /* supplied by divert module when calling */
+ int (*ll_cmd)(isdn_ctrl *); /* supplied by hl on return */
+ char * (*drv_to_name)(int); /* map a driver id to name, supplied by hl */
+ int (*name_to_drv)(char *); /* map a driver id to name, supplied by hl */
+ } isdn_divert_if;
+
+/*********************/
+/* function register */
+/*********************/
+extern int DIVERT_REG_NAME(isdn_divert_if *);
+#endif /* _LINUX_ISDN_DIVERTIF_H */
diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h
new file mode 100644
index 000000000..a0070c6df
--- /dev/null
+++ b/include/linux/isdn_ppp.h
@@ -0,0 +1,194 @@
+/* Linux ISDN subsystem, sync PPP, interface to ipppd
+ *
+ * Copyright 1994-1999 by Fritz Elfert (fritz@isdn4linux.de)
+ * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg
+ * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de)
+ * Copyright 2000-2002 by Kai Germaschewski (kai@germaschewski.name)
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+#ifndef _LINUX_ISDN_PPP_H
+#define _LINUX_ISDN_PPP_H
+
+
+
+
+#ifdef CONFIG_IPPP_FILTER
+#include <linux/filter.h>
+#endif
+#include <uapi/linux/isdn_ppp.h>
+
+#define DECOMP_ERR_NOMEM (-10)
+
+#define MP_END_FRAG 0x40
+#define MP_BEGIN_FRAG 0x80
+
+#define MP_MAX_QUEUE_LEN 16
+
+/*
+ * We need a way for the decompressor to influence the generation of CCP
+ * Reset-Requests in a variety of ways. The decompressor is already returning
+ * a lot of information (generated skb length, error conditions) so we use
+ * another parameter. This parameter is a pointer to a structure which is
+ * to be marked valid by the decompressor and only in this case is ever used.
+ * Furthermore, the only case where this data is used is when the decom-
+ * pressor returns DECOMP_ERROR.
+ *
+ * We use this same struct for the reset entry of the compressor to commu-
+ * nicate to its caller how to deal with sending of a Reset Ack. In this
+ * case, expra is not used, but other options still apply (suppressing
+ * sending with rsend, appending arbitrary data, etc).
+ */
+
+#define IPPP_RESET_MAXDATABYTES 32
+
+struct isdn_ppp_resetparams {
+ unsigned char valid:1; /* rw Is this structure filled at all ? */
+ unsigned char rsend:1; /* rw Should we send one at all ? */
+ unsigned char idval:1; /* rw Is the id field valid ? */
+ unsigned char dtval:1; /* rw Is the data field valid ? */
+ unsigned char expra:1; /* rw Is an Ack expected for this Req ? */
+ unsigned char id; /* wo Send CCP ResetReq with this id */
+ unsigned short maxdlen; /* ro Max bytes to be stored in data field */
+ unsigned short dlen; /* rw Bytes stored in data field */
+ unsigned char *data; /* wo Data for ResetReq info field */
+};
+
+/*
+ * this is an 'old friend' from ppp-comp.h under a new name
+ * check the original include for more information
+ */
+struct isdn_ppp_compressor {
+ struct isdn_ppp_compressor *next, *prev;
+ struct module *owner;
+ int num; /* CCP compression protocol number */
+
+ void *(*alloc) (struct isdn_ppp_comp_data *);
+ void (*free) (void *state);
+ int (*init) (void *state, struct isdn_ppp_comp_data *,
+ int unit,int debug);
+
+ /* The reset entry needs to get more exact information about the
+ ResetReq or ResetAck it was called with. The parameters are
+ obvious. If reset is called without a Req or Ack frame which
+ could be handed into it, code MUST be set to 0. Using rsparm,
+ the reset entry can control if and how a ResetAck is returned. */
+
+ void (*reset) (void *state, unsigned char code, unsigned char id,
+ unsigned char *data, unsigned len,
+ struct isdn_ppp_resetparams *rsparm);
+
+ int (*compress) (void *state, struct sk_buff *in,
+ struct sk_buff *skb_out, int proto);
+
+ int (*decompress) (void *state,struct sk_buff *in,
+ struct sk_buff *skb_out,
+ struct isdn_ppp_resetparams *rsparm);
+
+ void (*incomp) (void *state, struct sk_buff *in,int proto);
+ void (*stat) (void *state, struct compstat *stats);
+};
+
+extern int isdn_ppp_register_compressor(struct isdn_ppp_compressor *);
+extern int isdn_ppp_unregister_compressor(struct isdn_ppp_compressor *);
+extern int isdn_ppp_dial_slave(char *);
+extern int isdn_ppp_hangup_slave(char *);
+
+typedef struct {
+ unsigned long seqerrs;
+ unsigned long frame_drops;
+ unsigned long overflows;
+ unsigned long max_queue_len;
+} isdn_mppp_stats;
+
+typedef struct {
+ int mp_mrru; /* unused */
+ struct sk_buff * frags; /* fragments sl list -- use skb->next */
+ long frames; /* number of frames in the frame list */
+ unsigned int seq; /* last processed packet seq #: any packets
+ * with smaller seq # will be dropped
+ * unconditionally */
+ spinlock_t lock;
+ int ref_ct;
+ /* statistics */
+ isdn_mppp_stats stats;
+} ippp_bundle;
+
+#define NUM_RCV_BUFFS 64
+
+struct ippp_buf_queue {
+ struct ippp_buf_queue *next;
+ struct ippp_buf_queue *last;
+ char *buf; /* NULL here indicates end of queue */
+ int len;
+};
+
+/* The data structure for one CCP reset transaction */
+enum ippp_ccp_reset_states {
+ CCPResetIdle,
+ CCPResetSentReq,
+ CCPResetRcvdReq,
+ CCPResetSentAck,
+ CCPResetRcvdAck
+};
+
+struct ippp_ccp_reset_state {
+ enum ippp_ccp_reset_states state; /* State of this transaction */
+ struct ippp_struct *is; /* Backlink to device stuff */
+ unsigned char id; /* Backlink id index */
+ unsigned char ta:1; /* The timer is active (flag) */
+ unsigned char expra:1; /* We expect a ResetAck at all */
+ int dlen; /* Databytes stored in data */
+ struct timer_list timer; /* For timeouts/retries */
+ /* This is a hack but seems sufficient for the moment. We do not want
+ to have this be yet another allocation for some bytes, it is more
+ memory management overhead than the whole mess is worth. */
+ unsigned char data[IPPP_RESET_MAXDATABYTES];
+};
+
+/* The data structure keeping track of the currently outstanding CCP Reset
+ transactions. */
+struct ippp_ccp_reset {
+ struct ippp_ccp_reset_state *rs[256]; /* One per possible id */
+ unsigned char lastid; /* Last id allocated by the engine */
+};
+
+struct ippp_struct {
+ struct ippp_struct *next_link;
+ int state;
+ spinlock_t buflock;
+ struct ippp_buf_queue rq[NUM_RCV_BUFFS]; /* packet queue for isdn_ppp_read() */
+ struct ippp_buf_queue *first; /* pointer to (current) first packet */
+ struct ippp_buf_queue *last; /* pointer to (current) last used packet in queue */
+ wait_queue_head_t wq;
+ struct task_struct *tk;
+ unsigned int mpppcfg;
+ unsigned int pppcfg;
+ unsigned int mru;
+ unsigned int mpmru;
+ unsigned int mpmtu;
+ unsigned int maxcid;
+ struct isdn_net_local_s *lp;
+ int unit;
+ int minor;
+ unsigned int last_link_seqno;
+ long mp_seqno;
+#ifdef CONFIG_ISDN_PPP_VJ
+ unsigned char *cbuf;
+ struct slcompress *slcomp;
+#endif
+#ifdef CONFIG_IPPP_FILTER
+ struct bpf_prog *pass_filter; /* filter for packets to pass */
+ struct bpf_prog *active_filter; /* filter for pkts to reset idle */
+#endif
+ unsigned long debug;
+ struct isdn_ppp_compressor *compressor,*decompressor;
+ struct isdn_ppp_compressor *link_compressor,*link_decompressor;
+ void *decomp_stat,*comp_stat,*link_decomp_stat,*link_comp_stat;
+ struct ippp_ccp_reset *reset; /* Allocated on demand, may never be needed */
+ unsigned long compflags;
+};
+
+#endif /* _LINUX_ISDN_PPP_H */
diff --git a/include/linux/isdnif.h b/include/linux/isdnif.h
new file mode 100644
index 000000000..0fc6ff276
--- /dev/null
+++ b/include/linux/isdnif.h
@@ -0,0 +1,505 @@
+/* $Id: isdnif.h,v 1.43.2.2 2004/01/12 23:08:35 keil Exp $
+ *
+ * Linux ISDN subsystem
+ * Definition of the interface between the subsystem and its low-level drivers.
+ *
+ * Copyright 1994,95,96 by Fritz Elfert (fritz@isdn4linux.de)
+ * Copyright 1995,96 Thinking Objects Software GmbH Wuerzburg
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ */
+#ifndef __ISDNIF_H__
+#define __ISDNIF_H__
+
+
+#include <linux/skbuff.h>
+#include <uapi/linux/isdnif.h>
+
+/***************************************************************************/
+/* Extensions made by Werner Cornelius (werner@ikt.de) */
+/* */
+/* The proceed command holds a incoming call in a state to leave processes */
+/* enough time to check whether ist should be accepted. */
+/* The PROT_IO Command extends the interface to make protocol dependent */
+/* features available (call diversion, call waiting...). */
+/* */
+/* The PROT_IO Command is executed with the desired driver id and the arg */
+/* parameter coded as follows: */
+/* The lower 8 bits of arg contain the desired protocol from ISDN_PTYPE */
+/* definitions. The upper 24 bits represent the protocol specific cmd/stat.*/
+/* Any additional data is protocol and command specific. */
+/* This mechanism also applies to the statcallb callback STAT_PROT. */
+/* */
+/* This suggested extension permits an easy expansion of protocol specific */
+/* handling. Extensions may be added at any time without changing the HL */
+/* driver code and not getting conflicts without certifications. */
+/* The well known CAPI 2.0 interface handles such extensions in a similar */
+/* way. Perhaps a protocol specific module may be added and separately */
+/* loaded and linked to the basic isdn module for handling. */
+/***************************************************************************/
+
+/*****************/
+/* DSS1 commands */
+/*****************/
+#define DSS1_CMD_INVOKE ((0x00 << 8) | ISDN_PTYPE_EURO) /* invoke a supplementary service */
+#define DSS1_CMD_INVOKE_ABORT ((0x01 << 8) | ISDN_PTYPE_EURO) /* abort a invoke cmd */
+
+/*******************************/
+/* DSS1 Status callback values */
+/*******************************/
+#define DSS1_STAT_INVOKE_RES ((0x80 << 8) | ISDN_PTYPE_EURO) /* Result for invocation */
+#define DSS1_STAT_INVOKE_ERR ((0x81 << 8) | ISDN_PTYPE_EURO) /* Error Return for invocation */
+#define DSS1_STAT_INVOKE_BRD ((0x82 << 8) | ISDN_PTYPE_EURO) /* Deliver invoke broadcast info */
+
+
+/*********************************************************************/
+/* structures for DSS1 commands and callback */
+/* */
+/* An action is invoked by sending a DSS1_CMD_INVOKE. The ll_id, proc*/
+/* timeout, datalen and data fields must be set before calling. */
+/* */
+/* The return value is a positive hl_id value also delivered in the */
+/* hl_id field. A value of zero signals no more left hl_id capacitys.*/
+/* A negative return value signals errors in LL. So if the return */
+/* value is <= 0 no action in LL will be taken -> request ignored */
+/* */
+/* The timeout field must be filled with a positive value specifying */
+/* the amount of time the INVOKED process waits for a reaction from */
+/* the network. */
+/* If a response (either error or result) is received during this */
+/* intervall, a reporting callback is initiated and the process will */
+/* be deleted, the hl identifier will be freed. */
+/* If no response is received during the specified intervall, a error*/
+/* callback is initiated with timeout set to -1 and a datalen set */
+/* to 0. */
+/* If timeout is set to a value <= 0 during INVOCATION the process is*/
+/* immediately deleted after sending the data. No callback occurs ! */
+/* */
+/* A currently waiting process may be aborted with INVOKE_ABORT. No */
+/* callback will occur when a process has been aborted. */
+/* */
+/* Broadcast invoke frames from the network are reported via the */
+/* STAT_INVOKE_BRD callback. The ll_id is set to 0, the other fields */
+/* are supplied by the network and not by the HL. */
+/*********************************************************************/
+
+/*****************/
+/* NI1 commands */
+/*****************/
+#define NI1_CMD_INVOKE ((0x00 << 8) | ISDN_PTYPE_NI1) /* invoke a supplementary service */
+#define NI1_CMD_INVOKE_ABORT ((0x01 << 8) | ISDN_PTYPE_NI1) /* abort a invoke cmd */
+
+/*******************************/
+/* NI1 Status callback values */
+/*******************************/
+#define NI1_STAT_INVOKE_RES ((0x80 << 8) | ISDN_PTYPE_NI1) /* Result for invocation */
+#define NI1_STAT_INVOKE_ERR ((0x81 << 8) | ISDN_PTYPE_NI1) /* Error Return for invocation */
+#define NI1_STAT_INVOKE_BRD ((0x82 << 8) | ISDN_PTYPE_NI1) /* Deliver invoke broadcast info */
+
+typedef struct
+ { ulong ll_id; /* ID supplied by LL when executing */
+ /* a command and returned by HL for */
+ /* INVOKE_RES and INVOKE_ERR */
+ int hl_id; /* ID supplied by HL when called */
+ /* for executing a cmd and delivered */
+ /* for results and errors */
+ /* must be supplied by LL when aborting*/
+ int proc; /* invoke procedure used by CMD_INVOKE */
+ /* returned by callback and broadcast */
+ int timeout; /* timeout for INVOKE CMD in ms */
+ /* -1 in stat callback when timed out */
+ /* error value when error callback */
+ int datalen; /* length of cmd or stat data */
+ u_char *data;/* pointer to data delivered or send */
+ } isdn_cmd_stat;
+
+/*
+ * Commands from linklevel to lowlevel
+ *
+ */
+#define ISDN_CMD_IOCTL 0 /* Perform ioctl */
+#define ISDN_CMD_DIAL 1 /* Dial out */
+#define ISDN_CMD_ACCEPTD 2 /* Accept an incoming call on D-Chan. */
+#define ISDN_CMD_ACCEPTB 3 /* Request B-Channel connect. */
+#define ISDN_CMD_HANGUP 4 /* Hangup */
+#define ISDN_CMD_CLREAZ 5 /* Clear EAZ(s) of channel */
+#define ISDN_CMD_SETEAZ 6 /* Set EAZ(s) of channel */
+#define ISDN_CMD_GETEAZ 7 /* Get EAZ(s) of channel */
+#define ISDN_CMD_SETSIL 8 /* Set Service-Indicator-List of channel */
+#define ISDN_CMD_GETSIL 9 /* Get Service-Indicator-List of channel */
+#define ISDN_CMD_SETL2 10 /* Set B-Chan. Layer2-Parameter */
+#define ISDN_CMD_GETL2 11 /* Get B-Chan. Layer2-Parameter */
+#define ISDN_CMD_SETL3 12 /* Set B-Chan. Layer3-Parameter */
+#define ISDN_CMD_GETL3 13 /* Get B-Chan. Layer3-Parameter */
+// #define ISDN_CMD_LOCK 14 /* Signal usage by upper levels */
+// #define ISDN_CMD_UNLOCK 15 /* Release usage-lock */
+#define ISDN_CMD_SUSPEND 16 /* Suspend connection */
+#define ISDN_CMD_RESUME 17 /* Resume connection */
+#define ISDN_CMD_PROCEED 18 /* Proceed with call establishment */
+#define ISDN_CMD_ALERT 19 /* Alert after Proceeding */
+#define ISDN_CMD_REDIR 20 /* Redir a incoming call */
+#define ISDN_CMD_PROT_IO 21 /* Protocol specific commands */
+#define CAPI_PUT_MESSAGE 22 /* CAPI message send down or up */
+#define ISDN_CMD_FAXCMD 23 /* FAX commands to HL-driver */
+#define ISDN_CMD_AUDIO 24 /* DSP, DTMF, ... settings */
+
+/*
+ * Status-Values delivered from lowlevel to linklevel via
+ * statcallb().
+ *
+ */
+#define ISDN_STAT_STAVAIL 256 /* Raw status-data available */
+#define ISDN_STAT_ICALL 257 /* Incoming call detected */
+#define ISDN_STAT_RUN 258 /* Signal protocol-code is running */
+#define ISDN_STAT_STOP 259 /* Signal halt of protocol-code */
+#define ISDN_STAT_DCONN 260 /* Signal D-Channel connect */
+#define ISDN_STAT_BCONN 261 /* Signal B-Channel connect */
+#define ISDN_STAT_DHUP 262 /* Signal D-Channel disconnect */
+#define ISDN_STAT_BHUP 263 /* Signal B-Channel disconnect */
+#define ISDN_STAT_CINF 264 /* Charge-Info */
+#define ISDN_STAT_LOAD 265 /* Signal new lowlevel-driver is loaded */
+#define ISDN_STAT_UNLOAD 266 /* Signal unload of lowlevel-driver */
+#define ISDN_STAT_BSENT 267 /* Signal packet sent */
+#define ISDN_STAT_NODCH 268 /* Signal no D-Channel */
+#define ISDN_STAT_ADDCH 269 /* Add more Channels */
+#define ISDN_STAT_CAUSE 270 /* Cause-Message */
+#define ISDN_STAT_ICALLW 271 /* Incoming call without B-chan waiting */
+#define ISDN_STAT_REDIR 272 /* Redir result */
+#define ISDN_STAT_PROT 273 /* protocol IO specific callback */
+#define ISDN_STAT_DISPLAY 274 /* deliver a received display message */
+#define ISDN_STAT_L1ERR 275 /* Signal Layer-1 Error */
+#define ISDN_STAT_FAXIND 276 /* FAX indications from HL-driver */
+#define ISDN_STAT_AUDIO 277 /* DTMF, DSP indications */
+#define ISDN_STAT_DISCH 278 /* Disable/Enable channel usage */
+
+/*
+ * Audio commands
+ */
+#define ISDN_AUDIO_SETDD 0 /* Set DTMF detection */
+#define ISDN_AUDIO_DTMF 1 /* Rx/Tx DTMF */
+
+/*
+ * Values for errcode field
+ */
+#define ISDN_STAT_L1ERR_SEND 1
+#define ISDN_STAT_L1ERR_RECV 2
+
+/*
+ * Values for feature-field of interface-struct.
+ */
+/* Layer 2 */
+#define ISDN_FEATURE_L2_X75I (0x0001 << ISDN_PROTO_L2_X75I)
+#define ISDN_FEATURE_L2_X75UI (0x0001 << ISDN_PROTO_L2_X75UI)
+#define ISDN_FEATURE_L2_X75BUI (0x0001 << ISDN_PROTO_L2_X75BUI)
+#define ISDN_FEATURE_L2_HDLC (0x0001 << ISDN_PROTO_L2_HDLC)
+#define ISDN_FEATURE_L2_TRANS (0x0001 << ISDN_PROTO_L2_TRANS)
+#define ISDN_FEATURE_L2_X25DTE (0x0001 << ISDN_PROTO_L2_X25DTE)
+#define ISDN_FEATURE_L2_X25DCE (0x0001 << ISDN_PROTO_L2_X25DCE)
+#define ISDN_FEATURE_L2_V11096 (0x0001 << ISDN_PROTO_L2_V11096)
+#define ISDN_FEATURE_L2_V11019 (0x0001 << ISDN_PROTO_L2_V11019)
+#define ISDN_FEATURE_L2_V11038 (0x0001 << ISDN_PROTO_L2_V11038)
+#define ISDN_FEATURE_L2_MODEM (0x0001 << ISDN_PROTO_L2_MODEM)
+#define ISDN_FEATURE_L2_FAX (0x0001 << ISDN_PROTO_L2_FAX)
+#define ISDN_FEATURE_L2_HDLC_56K (0x0001 << ISDN_PROTO_L2_HDLC_56K)
+
+#define ISDN_FEATURE_L2_MASK (0x0FFFF) /* Max. 16 protocols */
+#define ISDN_FEATURE_L2_SHIFT (0)
+
+/* Layer 3 */
+#define ISDN_FEATURE_L3_TRANS (0x10000 << ISDN_PROTO_L3_TRANS)
+#define ISDN_FEATURE_L3_TRANSDSP (0x10000 << ISDN_PROTO_L3_TRANSDSP)
+#define ISDN_FEATURE_L3_FCLASS2 (0x10000 << ISDN_PROTO_L3_FCLASS2)
+#define ISDN_FEATURE_L3_FCLASS1 (0x10000 << ISDN_PROTO_L3_FCLASS1)
+
+#define ISDN_FEATURE_L3_MASK (0x0FF0000) /* Max. 8 Protocols */
+#define ISDN_FEATURE_L3_SHIFT (16)
+
+/* Signaling */
+#define ISDN_FEATURE_P_UNKNOWN (0x1000000 << ISDN_PTYPE_UNKNOWN)
+#define ISDN_FEATURE_P_1TR6 (0x1000000 << ISDN_PTYPE_1TR6)
+#define ISDN_FEATURE_P_EURO (0x1000000 << ISDN_PTYPE_EURO)
+#define ISDN_FEATURE_P_NI1 (0x1000000 << ISDN_PTYPE_NI1)
+
+#define ISDN_FEATURE_P_MASK (0x0FF000000) /* Max. 8 Protocols */
+#define ISDN_FEATURE_P_SHIFT (24)
+
+typedef struct setup_parm {
+ unsigned char phone[32]; /* Remote Phone-Number */
+ unsigned char eazmsn[32]; /* Local EAZ or MSN */
+ unsigned char si1; /* Service Indicator 1 */
+ unsigned char si2; /* Service Indicator 2 */
+ unsigned char plan; /* Numbering plan */
+ unsigned char screen; /* Screening info */
+} setup_parm;
+
+
+#ifdef CONFIG_ISDN_TTY_FAX
+/* T.30 Fax G3 */
+
+#define FAXIDLEN 21
+
+typedef struct T30_s {
+ /* session parameters */
+ __u8 resolution;
+ __u8 rate;
+ __u8 width;
+ __u8 length;
+ __u8 compression;
+ __u8 ecm;
+ __u8 binary;
+ __u8 scantime;
+ __u8 id[FAXIDLEN];
+ /* additional parameters */
+ __u8 phase;
+ __u8 direction;
+ __u8 code;
+ __u8 badlin;
+ __u8 badmul;
+ __u8 bor;
+ __u8 fet;
+ __u8 pollid[FAXIDLEN];
+ __u8 cq;
+ __u8 cr;
+ __u8 ctcrty;
+ __u8 minsp;
+ __u8 phcto;
+ __u8 rel;
+ __u8 nbc;
+ /* remote station parameters */
+ __u8 r_resolution;
+ __u8 r_rate;
+ __u8 r_width;
+ __u8 r_length;
+ __u8 r_compression;
+ __u8 r_ecm;
+ __u8 r_binary;
+ __u8 r_scantime;
+ __u8 r_id[FAXIDLEN];
+ __u8 r_code;
+} __packed T30_s;
+
+#define ISDN_TTY_FAX_CONN_IN 0
+#define ISDN_TTY_FAX_CONN_OUT 1
+
+#define ISDN_TTY_FAX_FCON 0
+#define ISDN_TTY_FAX_DIS 1
+#define ISDN_TTY_FAX_FTT 2
+#define ISDN_TTY_FAX_MCF 3
+#define ISDN_TTY_FAX_DCS 4
+#define ISDN_TTY_FAX_TRAIN_OK 5
+#define ISDN_TTY_FAX_EOP 6
+#define ISDN_TTY_FAX_EOM 7
+#define ISDN_TTY_FAX_MPS 8
+#define ISDN_TTY_FAX_DTC 9
+#define ISDN_TTY_FAX_RID 10
+#define ISDN_TTY_FAX_HNG 11
+#define ISDN_TTY_FAX_DT 12
+#define ISDN_TTY_FAX_FCON_I 13
+#define ISDN_TTY_FAX_DR 14
+#define ISDN_TTY_FAX_ET 15
+#define ISDN_TTY_FAX_CFR 16
+#define ISDN_TTY_FAX_PTS 17
+#define ISDN_TTY_FAX_SENT 18
+
+#define ISDN_FAX_PHASE_IDLE 0
+#define ISDN_FAX_PHASE_A 1
+#define ISDN_FAX_PHASE_B 2
+#define ISDN_FAX_PHASE_C 3
+#define ISDN_FAX_PHASE_D 4
+#define ISDN_FAX_PHASE_E 5
+
+#endif /* TTY_FAX */
+
+#define ISDN_FAX_CLASS1_FAE 0
+#define ISDN_FAX_CLASS1_FTS 1
+#define ISDN_FAX_CLASS1_FRS 2
+#define ISDN_FAX_CLASS1_FTM 3
+#define ISDN_FAX_CLASS1_FRM 4
+#define ISDN_FAX_CLASS1_FTH 5
+#define ISDN_FAX_CLASS1_FRH 6
+#define ISDN_FAX_CLASS1_CTRL 7
+
+#define ISDN_FAX_CLASS1_OK 0
+#define ISDN_FAX_CLASS1_CONNECT 1
+#define ISDN_FAX_CLASS1_NOCARR 2
+#define ISDN_FAX_CLASS1_ERROR 3
+#define ISDN_FAX_CLASS1_FCERROR 4
+#define ISDN_FAX_CLASS1_QUERY 5
+
+typedef struct {
+ __u8 cmd;
+ __u8 subcmd;
+ __u8 para[50];
+} aux_s;
+
+#define AT_COMMAND 0
+#define AT_EQ_VALUE 1
+#define AT_QUERY 2
+#define AT_EQ_QUERY 3
+
+/* CAPI structs */
+
+/* this is compatible to the old union size */
+#define MAX_CAPI_PARA_LEN 50
+
+typedef struct {
+ /* Header */
+ __u16 Length;
+ __u16 ApplId;
+ __u8 Command;
+ __u8 Subcommand;
+ __u16 Messagenumber;
+
+ /* Parameter */
+ union {
+ __u32 Controller;
+ __u32 PLCI;
+ __u32 NCCI;
+ } adr;
+ __u8 para[MAX_CAPI_PARA_LEN];
+} capi_msg;
+
+/*
+ * Structure for exchanging above infos
+ *
+ */
+typedef struct {
+ int driver; /* Lowlevel-Driver-ID */
+ int command; /* Command or Status (see above) */
+ ulong arg; /* Additional Data */
+ union {
+ ulong errcode; /* Type of error with STAT_L1ERR */
+ int length; /* Amount of bytes sent with STAT_BSENT */
+ u_char num[50]; /* Additional Data */
+ setup_parm setup;/* For SETUP msg */
+ capi_msg cmsg; /* For CAPI like messages */
+ char display[85];/* display message data */
+ isdn_cmd_stat isdn_io; /* ISDN IO-parameter/result */
+ aux_s aux; /* for modem commands/indications */
+#ifdef CONFIG_ISDN_TTY_FAX
+ T30_s *fax; /* Pointer to ttys fax struct */
+#endif
+ ulong userdata; /* User Data */
+ } parm;
+} isdn_ctrl;
+
+#define dss1_io isdn_io
+#define ni1_io isdn_io
+
+/*
+ * The interface-struct itself (initialized at load-time of lowlevel-driver)
+ *
+ * See Documentation/isdn/INTERFACE for a description, how the communication
+ * between the ISDN subsystem and its drivers is done.
+ *
+ */
+typedef struct {
+ struct module *owner;
+
+ /* Number of channels supported by this driver
+ */
+ int channels;
+
+ /*
+ * Maximum Size of transmit/receive-buffer this driver supports.
+ */
+ int maxbufsize;
+
+ /* Feature-Flags for this driver.
+ * See defines ISDN_FEATURE_... for Values
+ */
+ unsigned long features;
+
+ /*
+ * Needed for calculating
+ * dev->hard_header_len = linklayer header + hl_hdrlen;
+ * Drivers, not supporting sk_buff's should set this to 0.
+ */
+ unsigned short hl_hdrlen;
+
+ /*
+ * Receive-Callback using sk_buff's
+ * Parameters:
+ * int Driver-ID
+ * int local channel-number (0 ...)
+ * struct sk_buff *skb received Data
+ */
+ void (*rcvcallb_skb)(int, int, struct sk_buff *);
+
+ /* Status-Callback
+ * Parameters:
+ * isdn_ctrl*
+ * driver = Driver ID.
+ * command = One of above ISDN_STAT_... constants.
+ * arg = depending on status-type.
+ * num = depending on status-type.
+ */
+ int (*statcallb)(isdn_ctrl*);
+
+ /* Send command
+ * Parameters:
+ * isdn_ctrl*
+ * driver = Driver ID.
+ * command = One of above ISDN_CMD_... constants.
+ * arg = depending on command.
+ * num = depending on command.
+ */
+ int (*command)(isdn_ctrl*);
+
+ /*
+ * Send data using sk_buff's
+ * Parameters:
+ * int driverId
+ * int local channel-number (0...)
+ * int Flag: Need ACK for this packet.
+ * struct sk_buff *skb Data to send
+ */
+ int (*writebuf_skb) (int, int, int, struct sk_buff *);
+
+ /* Send raw D-Channel-Commands
+ * Parameters:
+ * u_char pointer data
+ * int length of data
+ * int driverId
+ * int local channel-number (0 ...)
+ */
+ int (*writecmd)(const u_char __user *, int, int, int);
+
+ /* Read raw Status replies
+ * u_char pointer data (volatile)
+ * int length of buffer
+ * int driverId
+ * int local channel-number (0 ...)
+ */
+ int (*readstat)(u_char __user *, int, int, int);
+
+ char id[20];
+} isdn_if;
+
+/*
+ * Function which must be called by lowlevel-driver at loadtime with
+ * the following fields of above struct set:
+ *
+ * channels Number of channels that will be supported.
+ * hl_hdrlen Space to preserve in sk_buff's when sending. Drivers, not
+ * supporting sk_buff's should set this to 0.
+ * command Address of Command-Handler.
+ * features Bitwise coded Features of this driver. (use ISDN_FEATURE_...)
+ * writebuf_skb Address of Skbuff-Send-Handler.
+ * writecmd " " D-Channel " which accepts raw D-Ch-Commands.
+ * readstat " " D-Channel " which delivers raw Status-Data.
+ *
+ * The linklevel-driver fills the following fields:
+ *
+ * channels Driver-ID assigned to this driver. (Must be used on all
+ * subsequent callbacks.
+ * rcvcallb_skb Address of handler for received Skbuff's.
+ * statcallb " " " for status-changes.
+ *
+ */
+extern int register_isdn(isdn_if*);
+#include <asm/uaccess.h>
+
+#endif /* __ISDNIF_H__ */
diff --git a/include/linux/isicom.h b/include/linux/isicom.h
new file mode 100644
index 000000000..b92e05650
--- /dev/null
+++ b/include/linux/isicom.h
@@ -0,0 +1,84 @@
+#ifndef _LINUX_ISICOM_H
+#define _LINUX_ISICOM_H
+
+#define YES 1
+#define NO 0
+
+/*
+ * ISICOM Driver definitions ...
+ *
+ */
+
+#define ISICOM_NAME "ISICom"
+
+/*
+ * PCI definitions
+ */
+
+#define DEVID_COUNT 9
+#define VENDOR_ID 0x10b5
+
+/*
+ * These are now officially allocated numbers
+ */
+
+#define ISICOM_NMAJOR 112 /* normal */
+#define ISICOM_CMAJOR 113 /* callout */
+#define ISICOM_MAGIC (('M' << 8) | 'T')
+
+#define WAKEUP_CHARS 256 /* hard coded for now */
+#define TX_SIZE 254
+
+#define BOARD_COUNT 4
+#define PORT_COUNT (BOARD_COUNT*16)
+
+/* character sizes */
+
+#define ISICOM_CS5 0x0000
+#define ISICOM_CS6 0x0001
+#define ISICOM_CS7 0x0002
+#define ISICOM_CS8 0x0003
+
+/* stop bits */
+
+#define ISICOM_1SB 0x0000
+#define ISICOM_2SB 0x0004
+
+/* parity */
+
+#define ISICOM_NOPAR 0x0000
+#define ISICOM_ODPAR 0x0008
+#define ISICOM_EVPAR 0x0018
+
+/* flow control */
+
+#define ISICOM_CTSRTS 0x03
+#define ISICOM_INITIATE_XONXOFF 0x04
+#define ISICOM_RESPOND_XONXOFF 0x08
+
+#define BOARD(line) (((line) >> 4) & 0x3)
+
+ /* isi kill queue bitmap */
+
+#define ISICOM_KILLTX 0x01
+#define ISICOM_KILLRX 0x02
+
+ /* isi_board status bitmap */
+
+#define FIRMWARE_LOADED 0x0001
+#define BOARD_ACTIVE 0x0002
+#define BOARD_INIT 0x0004
+
+ /* isi_port status bitmap */
+
+#define ISI_CTS 0x1000
+#define ISI_DSR 0x2000
+#define ISI_RI 0x4000
+#define ISI_DCD 0x8000
+#define ISI_DTR 0x0100
+#define ISI_RTS 0x0200
+
+
+#define ISI_TXOK 0x0001
+
+#endif /* ISICOM_H */
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
new file mode 100644
index 000000000..d32615280
--- /dev/null
+++ b/include/linux/jbd.h
@@ -0,0 +1,1047 @@
+/*
+ * linux/include/linux/jbd.h
+ *
+ * Written by Stephen C. Tweedie <sct@redhat.com>
+ *
+ * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * Definitions for transaction data structures for the buffer cache
+ * filesystem journaling support.
+ */
+
+#ifndef _LINUX_JBD_H
+#define _LINUX_JBD_H
+
+/* Allow this file to be included directly into e2fsprogs */
+#ifndef __KERNEL__
+#include "jfs_compat.h"
+#define JFS_DEBUG
+#define jfs_debug jbd_debug
+#else
+
+#include <linux/types.h>
+#include <linux/buffer_head.h>
+#include <linux/journal-head.h>
+#include <linux/stddef.h>
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/lockdep.h>
+#include <linux/slab.h>
+
+#define journal_oom_retry 1
+
+/*
+ * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds
+ * certain classes of error which can occur due to failed IOs. Under
+ * normal use we want ext3 to continue after such errors, because
+ * hardware _can_ fail, but for debugging purposes when running tests on
+ * known-good hardware we may want to trap these errors.
+ */
+#undef JBD_PARANOID_IOFAIL
+
+/*
+ * The default maximum commit age, in seconds.
+ */
+#define JBD_DEFAULT_MAX_COMMIT_AGE 5
+
+#ifdef CONFIG_JBD_DEBUG
+/*
+ * Define JBD_EXPENSIVE_CHECKING to enable more expensive internal
+ * consistency checks. By default we don't do this unless
+ * CONFIG_JBD_DEBUG is on.
+ */
+#define JBD_EXPENSIVE_CHECKING
+extern u8 journal_enable_debug;
+
+void __jbd_debug(int level, const char *file, const char *func,
+ unsigned int line, const char *fmt, ...);
+
+#define jbd_debug(n, fmt, a...) \
+ __jbd_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
+#else
+#define jbd_debug(n, fmt, a...) /**/
+#endif
+
+static inline void *jbd_alloc(size_t size, gfp_t flags)
+{
+ return (void *)__get_free_pages(flags, get_order(size));
+}
+
+static inline void jbd_free(void *ptr, size_t size)
+{
+ free_pages((unsigned long)ptr, get_order(size));
+}
+
+#define JFS_MIN_JOURNAL_BLOCKS 1024
+
+
+/**
+ * typedef handle_t - The handle_t type represents a single atomic update being performed by some process.
+ *
+ * All filesystem modifications made by the process go
+ * through this handle. Recursive operations (such as quota operations)
+ * are gathered into a single update.
+ *
+ * The buffer credits field is used to account for journaled buffers
+ * being modified by the running process. To ensure that there is
+ * enough log space for all outstanding operations, we need to limit the
+ * number of outstanding buffers possible at any time. When the
+ * operation completes, any buffer credits not used are credited back to
+ * the transaction, so that at all times we know how many buffers the
+ * outstanding updates on a transaction might possibly touch.
+ *
+ * This is an opaque datatype.
+ **/
+typedef struct handle_s handle_t; /* Atomic operation type */
+
+
+/**
+ * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem.
+ *
+ * journal_t is linked to from the fs superblock structure.
+ *
+ * We use the journal_t to keep track of all outstanding transaction
+ * activity on the filesystem, and to manage the state of the log
+ * writing process.
+ *
+ * This is an opaque datatype.
+ **/
+typedef struct journal_s journal_t; /* Journal control structure */
+#endif
+
+/*
+ * Internal structures used by the logging mechanism:
+ */
+
+#define JFS_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
+
+/*
+ * On-disk structures
+ */
+
+/*
+ * Descriptor block types:
+ */
+
+#define JFS_DESCRIPTOR_BLOCK 1
+#define JFS_COMMIT_BLOCK 2
+#define JFS_SUPERBLOCK_V1 3
+#define JFS_SUPERBLOCK_V2 4
+#define JFS_REVOKE_BLOCK 5
+
+/*
+ * Standard header for all descriptor blocks:
+ */
+typedef struct journal_header_s
+{
+ __be32 h_magic;
+ __be32 h_blocktype;
+ __be32 h_sequence;
+} journal_header_t;
+
+
+/*
+ * The block tag: used to describe a single buffer in the journal
+ */
+typedef struct journal_block_tag_s
+{
+ __be32 t_blocknr; /* The on-disk block number */
+ __be32 t_flags; /* See below */
+} journal_block_tag_t;
+
+/*
+ * The revoke descriptor: used on disk to describe a series of blocks to
+ * be revoked from the log
+ */
+typedef struct journal_revoke_header_s
+{
+ journal_header_t r_header;
+ __be32 r_count; /* Count of bytes used in the block */
+} journal_revoke_header_t;
+
+
+/* Definitions for the journal tag flags word: */
+#define JFS_FLAG_ESCAPE 1 /* on-disk block is escaped */
+#define JFS_FLAG_SAME_UUID 2 /* block has same uuid as previous */
+#define JFS_FLAG_DELETED 4 /* block deleted by this transaction */
+#define JFS_FLAG_LAST_TAG 8 /* last tag in this descriptor block */
+
+
+/*
+ * The journal superblock. All fields are in big-endian byte order.
+ */
+typedef struct journal_superblock_s
+{
+/* 0x0000 */
+ journal_header_t s_header;
+
+/* 0x000C */
+ /* Static information describing the journal */
+ __be32 s_blocksize; /* journal device blocksize */
+ __be32 s_maxlen; /* total blocks in journal file */
+ __be32 s_first; /* first block of log information */
+
+/* 0x0018 */
+ /* Dynamic information describing the current state of the log */
+ __be32 s_sequence; /* first commit ID expected in log */
+ __be32 s_start; /* blocknr of start of log */
+
+/* 0x0020 */
+ /* Error value, as set by journal_abort(). */
+ __be32 s_errno;
+
+/* 0x0024 */
+ /* Remaining fields are only valid in a version-2 superblock */
+ __be32 s_feature_compat; /* compatible feature set */
+ __be32 s_feature_incompat; /* incompatible feature set */
+ __be32 s_feature_ro_compat; /* readonly-compatible feature set */
+/* 0x0030 */
+ __u8 s_uuid[16]; /* 128-bit uuid for journal */
+
+/* 0x0040 */
+ __be32 s_nr_users; /* Nr of filesystems sharing log */
+
+ __be32 s_dynsuper; /* Blocknr of dynamic superblock copy*/
+
+/* 0x0048 */
+ __be32 s_max_transaction; /* Limit of journal blocks per trans.*/
+ __be32 s_max_trans_data; /* Limit of data blocks per trans. */
+
+/* 0x0050 */
+ __u32 s_padding[44];
+
+/* 0x0100 */
+ __u8 s_users[16*48]; /* ids of all fs'es sharing the log */
+/* 0x0400 */
+} journal_superblock_t;
+
+#define JFS_HAS_COMPAT_FEATURE(j,mask) \
+ ((j)->j_format_version >= 2 && \
+ ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
+#define JFS_HAS_RO_COMPAT_FEATURE(j,mask) \
+ ((j)->j_format_version >= 2 && \
+ ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
+#define JFS_HAS_INCOMPAT_FEATURE(j,mask) \
+ ((j)->j_format_version >= 2 && \
+ ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
+
+#define JFS_FEATURE_INCOMPAT_REVOKE 0x00000001
+
+/* Features known to this kernel version: */
+#define JFS_KNOWN_COMPAT_FEATURES 0
+#define JFS_KNOWN_ROCOMPAT_FEATURES 0
+#define JFS_KNOWN_INCOMPAT_FEATURES JFS_FEATURE_INCOMPAT_REVOKE
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/sched.h>
+
+enum jbd_state_bits {
+ BH_JBD /* Has an attached ext3 journal_head */
+ = BH_PrivateStart,
+ BH_JWrite, /* Being written to log (@@@ DEBUGGING) */
+ BH_Freed, /* Has been freed (truncated) */
+ BH_Revoked, /* Has been revoked from the log */
+ BH_RevokeValid, /* Revoked flag is valid */
+ BH_JBDDirty, /* Is dirty but journaled */
+ BH_State, /* Pins most journal_head state */
+ BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
+ BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */
+ BH_JBDPrivateStart, /* First bit available for private use by FS */
+};
+
+BUFFER_FNS(JBD, jbd)
+BUFFER_FNS(JWrite, jwrite)
+BUFFER_FNS(JBDDirty, jbddirty)
+TAS_BUFFER_FNS(JBDDirty, jbddirty)
+BUFFER_FNS(Revoked, revoked)
+TAS_BUFFER_FNS(Revoked, revoked)
+BUFFER_FNS(RevokeValid, revokevalid)
+TAS_BUFFER_FNS(RevokeValid, revokevalid)
+BUFFER_FNS(Freed, freed)
+
+#include <linux/jbd_common.h>
+
+#define J_ASSERT(assert) BUG_ON(!(assert))
+
+#define J_ASSERT_BH(bh, expr) J_ASSERT(expr)
+#define J_ASSERT_JH(jh, expr) J_ASSERT(expr)
+
+#if defined(JBD_PARANOID_IOFAIL)
+#define J_EXPECT(expr, why...) J_ASSERT(expr)
+#define J_EXPECT_BH(bh, expr, why...) J_ASSERT_BH(bh, expr)
+#define J_EXPECT_JH(jh, expr, why...) J_ASSERT_JH(jh, expr)
+#else
+#define __journal_expect(expr, why...) \
+ ({ \
+ int val = (expr); \
+ if (!val) { \
+ printk(KERN_ERR \
+ "EXT3-fs unexpected failure: %s;\n",# expr); \
+ printk(KERN_ERR why "\n"); \
+ } \
+ val; \
+ })
+#define J_EXPECT(expr, why...) __journal_expect(expr, ## why)
+#define J_EXPECT_BH(bh, expr, why...) __journal_expect(expr, ## why)
+#define J_EXPECT_JH(jh, expr, why...) __journal_expect(expr, ## why)
+#endif
+
+struct jbd_revoke_table_s;
+
+/**
+ * struct handle_s - this is the concrete type associated with handle_t.
+ * @h_transaction: Which compound transaction is this update a part of?
+ * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
+ * @h_ref: Reference count on this handle
+ * @h_err: Field for caller's use to track errors through large fs operations
+ * @h_sync: flag for sync-on-close
+ * @h_jdata: flag to force data journaling
+ * @h_aborted: flag indicating fatal error on handle
+ * @h_lockdep_map: lockdep info for debugging lock problems
+ */
+struct handle_s
+{
+ /* Which compound transaction is this update a part of? */
+ transaction_t *h_transaction;
+
+ /* Number of remaining buffers we are allowed to dirty: */
+ int h_buffer_credits;
+
+ /* Reference count on this handle */
+ int h_ref;
+
+ /* Field for caller's use to track errors through large fs */
+ /* operations */
+ int h_err;
+
+ /* Flags [no locking] */
+ unsigned int h_sync: 1; /* sync-on-close */
+ unsigned int h_jdata: 1; /* force data journaling */
+ unsigned int h_aborted: 1; /* fatal error on handle */
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map h_lockdep_map;
+#endif
+};
+
+
+/* The transaction_t type is the guts of the journaling mechanism. It
+ * tracks a compound transaction through its various states:
+ *
+ * RUNNING: accepting new updates
+ * LOCKED: Updates still running but we don't accept new ones
+ * RUNDOWN: Updates are tidying up but have finished requesting
+ * new buffers to modify (state not used for now)
+ * FLUSH: All updates complete, but we are still writing to disk
+ * COMMIT: All data on disk, writing commit record
+ * FINISHED: We still have to keep the transaction for checkpointing.
+ *
+ * The transaction keeps track of all of the buffers modified by a
+ * running transaction, and all of the buffers committed but not yet
+ * flushed to home for finished transactions.
+ */
+
+/*
+ * Lock ranking:
+ *
+ * j_list_lock
+ * ->jbd_lock_bh_journal_head() (This is "innermost")
+ *
+ * j_state_lock
+ * ->jbd_lock_bh_state()
+ *
+ * jbd_lock_bh_state()
+ * ->j_list_lock
+ *
+ * j_state_lock
+ * ->t_handle_lock
+ *
+ * j_state_lock
+ * ->j_list_lock (journal_unmap_buffer)
+ *
+ */
+
+struct transaction_s
+{
+ /* Pointer to the journal for this transaction. [no locking] */
+ journal_t *t_journal;
+
+ /* Sequence number for this transaction [no locking] */
+ tid_t t_tid;
+
+ /*
+ * Transaction's current state
+ * [no locking - only kjournald alters this]
+ * [j_list_lock] guards transition of a transaction into T_FINISHED
+ * state and subsequent call of __journal_drop_transaction()
+ * FIXME: needs barriers
+ * KLUDGE: [use j_state_lock]
+ */
+ enum {
+ T_RUNNING,
+ T_LOCKED,
+ T_FLUSH,
+ T_COMMIT,
+ T_COMMIT_RECORD,
+ T_FINISHED
+ } t_state;
+
+ /*
+ * Where in the log does this transaction's commit start? [no locking]
+ */
+ unsigned int t_log_start;
+
+ /* Number of buffers on the t_buffers list [j_list_lock] */
+ int t_nr_buffers;
+
+ /*
+ * Doubly-linked circular list of all buffers reserved but not yet
+ * modified by this transaction [j_list_lock]
+ */
+ struct journal_head *t_reserved_list;
+
+ /*
+ * Doubly-linked circular list of all buffers under writeout during
+ * commit [j_list_lock]
+ */
+ struct journal_head *t_locked_list;
+
+ /*
+ * Doubly-linked circular list of all metadata buffers owned by this
+ * transaction [j_list_lock]
+ */
+ struct journal_head *t_buffers;
+
+ /*
+ * Doubly-linked circular list of all data buffers still to be
+ * flushed before this transaction can be committed [j_list_lock]
+ */
+ struct journal_head *t_sync_datalist;
+
+ /*
+ * Doubly-linked circular list of all forget buffers (superseded
+ * buffers which we can un-checkpoint once this transaction commits)
+ * [j_list_lock]
+ */
+ struct journal_head *t_forget;
+
+ /*
+ * Doubly-linked circular list of all buffers still to be flushed before
+ * this transaction can be checkpointed. [j_list_lock]
+ */
+ struct journal_head *t_checkpoint_list;
+
+ /*
+ * Doubly-linked circular list of all buffers submitted for IO while
+ * checkpointing. [j_list_lock]
+ */
+ struct journal_head *t_checkpoint_io_list;
+
+ /*
+ * Doubly-linked circular list of temporary buffers currently undergoing
+ * IO in the log [j_list_lock]
+ */
+ struct journal_head *t_iobuf_list;
+
+ /*
+ * Doubly-linked circular list of metadata buffers being shadowed by log
+ * IO. The IO buffers on the iobuf list and the shadow buffers on this
+ * list match each other one for one at all times. [j_list_lock]
+ */
+ struct journal_head *t_shadow_list;
+
+ /*
+ * Doubly-linked circular list of control buffers being written to the
+ * log. [j_list_lock]
+ */
+ struct journal_head *t_log_list;
+
+ /*
+ * Protects info related to handles
+ */
+ spinlock_t t_handle_lock;
+
+ /*
+ * Number of outstanding updates running on this transaction
+ * [t_handle_lock]
+ */
+ int t_updates;
+
+ /*
+ * Number of buffers reserved for use by all handles in this transaction
+ * handle but not yet modified. [t_handle_lock]
+ */
+ int t_outstanding_credits;
+
+ /*
+ * Forward and backward links for the circular list of all transactions
+ * awaiting checkpoint. [j_list_lock]
+ */
+ transaction_t *t_cpnext, *t_cpprev;
+
+ /*
+ * When will the transaction expire (become due for commit), in jiffies?
+ * [no locking]
+ */
+ unsigned long t_expires;
+
+ /*
+ * When this transaction started, in nanoseconds [no locking]
+ */
+ ktime_t t_start_time;
+
+ /*
+ * How many handles used this transaction? [t_handle_lock]
+ */
+ int t_handle_count;
+};
+
+/**
+ * struct journal_s - this is the concrete type associated with journal_t.
+ * @j_flags: General journaling state flags
+ * @j_errno: Is there an outstanding uncleared error on the journal (from a
+ * prior abort)?
+ * @j_sb_buffer: First part of superblock buffer
+ * @j_superblock: Second part of superblock buffer
+ * @j_format_version: Version of the superblock format
+ * @j_state_lock: Protect the various scalars in the journal
+ * @j_barrier_count: Number of processes waiting to create a barrier lock
+ * @j_running_transaction: The current running transaction..
+ * @j_committing_transaction: the transaction we are pushing to disk
+ * @j_checkpoint_transactions: a linked circular list of all transactions
+ * waiting for checkpointing
+ * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
+ * to start committing, or for a barrier lock to be released
+ * @j_wait_logspace: Wait queue for waiting for checkpointing to complete
+ * @j_wait_done_commit: Wait queue for waiting for commit to complete
+ * @j_wait_checkpoint: Wait queue to trigger checkpointing
+ * @j_wait_commit: Wait queue to trigger commit
+ * @j_wait_updates: Wait queue to wait for updates to complete
+ * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
+ * @j_head: Journal head - identifies the first unused block in the journal
+ * @j_tail: Journal tail - identifies the oldest still-used block in the
+ * journal.
+ * @j_free: Journal free - how many free blocks are there in the journal?
+ * @j_first: The block number of the first usable block
+ * @j_last: The block number one beyond the last usable block
+ * @j_dev: Device where we store the journal
+ * @j_blocksize: blocksize for the location where we store the journal.
+ * @j_blk_offset: starting block offset for into the device where we store the
+ * journal
+ * @j_fs_dev: Device which holds the client fs. For internal journal this will
+ * be equal to j_dev
+ * @j_maxlen: Total maximum capacity of the journal region on disk.
+ * @j_list_lock: Protects the buffer lists and internal buffer state.
+ * @j_inode: Optional inode where we store the journal. If present, all journal
+ * block numbers are mapped into this inode via bmap().
+ * @j_tail_sequence: Sequence number of the oldest transaction in the log
+ * @j_transaction_sequence: Sequence number of the next transaction to grant
+ * @j_commit_sequence: Sequence number of the most recently committed
+ * transaction
+ * @j_commit_request: Sequence number of the most recent transaction wanting
+ * commit
+ * @j_commit_waited: Sequence number of the most recent transaction someone
+ * is waiting for to commit.
+ * @j_uuid: Uuid of client object.
+ * @j_task: Pointer to the current commit thread for this journal
+ * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
+ * single compound commit transaction
+ * @j_commit_interval: What is the maximum transaction lifetime before we begin
+ * a commit?
+ * @j_commit_timer: The timer used to wakeup the commit thread
+ * @j_revoke_lock: Protect the revoke table
+ * @j_revoke: The revoke table - maintains the list of revoked blocks in the
+ * current transaction.
+ * @j_revoke_table: alternate revoke tables for j_revoke
+ * @j_wbuf: array of buffer_heads for journal_commit_transaction
+ * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
+ * number that will fit in j_blocksize
+ * @j_last_sync_writer: most recent pid which did a synchronous write
+ * @j_average_commit_time: the average amount of time in nanoseconds it
+ * takes to commit a transaction to the disk.
+ * @j_private: An opaque pointer to fs-private information.
+ */
+
+struct journal_s
+{
+ /* General journaling state flags [j_state_lock] */
+ unsigned long j_flags;
+
+ /*
+ * Is there an outstanding uncleared error on the journal (from a prior
+ * abort)? [j_state_lock]
+ */
+ int j_errno;
+
+ /* The superblock buffer */
+ struct buffer_head *j_sb_buffer;
+ journal_superblock_t *j_superblock;
+
+ /* Version of the superblock format */
+ int j_format_version;
+
+ /*
+ * Protect the various scalars in the journal
+ */
+ spinlock_t j_state_lock;
+
+ /*
+ * Number of processes waiting to create a barrier lock [j_state_lock]
+ */
+ int j_barrier_count;
+
+ /*
+ * Transactions: The current running transaction...
+ * [j_state_lock] [caller holding open handle]
+ */
+ transaction_t *j_running_transaction;
+
+ /*
+ * the transaction we are pushing to disk
+ * [j_state_lock] [caller holding open handle]
+ */
+ transaction_t *j_committing_transaction;
+
+ /*
+ * ... and a linked circular list of all transactions waiting for
+ * checkpointing. [j_list_lock]
+ */
+ transaction_t *j_checkpoint_transactions;
+
+ /*
+ * Wait queue for waiting for a locked transaction to start committing,
+ * or for a barrier lock to be released
+ */
+ wait_queue_head_t j_wait_transaction_locked;
+
+ /* Wait queue for waiting for checkpointing to complete */
+ wait_queue_head_t j_wait_logspace;
+
+ /* Wait queue for waiting for commit to complete */
+ wait_queue_head_t j_wait_done_commit;
+
+ /* Wait queue to trigger checkpointing */
+ wait_queue_head_t j_wait_checkpoint;
+
+ /* Wait queue to trigger commit */
+ wait_queue_head_t j_wait_commit;
+
+ /* Wait queue to wait for updates to complete */
+ wait_queue_head_t j_wait_updates;
+
+ /* Semaphore for locking against concurrent checkpoints */
+ struct mutex j_checkpoint_mutex;
+
+ /*
+ * Journal head: identifies the first unused block in the journal.
+ * [j_state_lock]
+ */
+ unsigned int j_head;
+
+ /*
+ * Journal tail: identifies the oldest still-used block in the journal.
+ * [j_state_lock]
+ */
+ unsigned int j_tail;
+
+ /*
+ * Journal free: how many free blocks are there in the journal?
+ * [j_state_lock]
+ */
+ unsigned int j_free;
+
+ /*
+ * Journal start and end: the block numbers of the first usable block
+ * and one beyond the last usable block in the journal. [j_state_lock]
+ */
+ unsigned int j_first;
+ unsigned int j_last;
+
+ /*
+ * Device, blocksize and starting block offset for the location where we
+ * store the journal.
+ */
+ struct block_device *j_dev;
+ int j_blocksize;
+ unsigned int j_blk_offset;
+
+ /*
+ * Device which holds the client fs. For internal journal this will be
+ * equal to j_dev.
+ */
+ struct block_device *j_fs_dev;
+
+ /* Total maximum capacity of the journal region on disk. */
+ unsigned int j_maxlen;
+
+ /*
+ * Protects the buffer lists and internal buffer state.
+ */
+ spinlock_t j_list_lock;
+
+ /* Optional inode where we store the journal. If present, all */
+ /* journal block numbers are mapped into this inode via */
+ /* bmap(). */
+ struct inode *j_inode;
+
+ /*
+ * Sequence number of the oldest transaction in the log [j_state_lock]
+ */
+ tid_t j_tail_sequence;
+
+ /*
+ * Sequence number of the next transaction to grant [j_state_lock]
+ */
+ tid_t j_transaction_sequence;
+
+ /*
+ * Sequence number of the most recently committed transaction
+ * [j_state_lock].
+ */
+ tid_t j_commit_sequence;
+
+ /*
+ * Sequence number of the most recent transaction wanting commit
+ * [j_state_lock]
+ */
+ tid_t j_commit_request;
+
+ /*
+ * Sequence number of the most recent transaction someone is waiting
+ * for to commit.
+ * [j_state_lock]
+ */
+ tid_t j_commit_waited;
+
+ /*
+ * Journal uuid: identifies the object (filesystem, LVM volume etc)
+ * backed by this journal. This will eventually be replaced by an array
+ * of uuids, allowing us to index multiple devices within a single
+ * journal and to perform atomic updates across them.
+ */
+ __u8 j_uuid[16];
+
+ /* Pointer to the current commit thread for this journal */
+ struct task_struct *j_task;
+
+ /*
+ * Maximum number of metadata buffers to allow in a single compound
+ * commit transaction
+ */
+ int j_max_transaction_buffers;
+
+ /*
+ * What is the maximum transaction lifetime before we begin a commit?
+ */
+ unsigned long j_commit_interval;
+
+ /* The timer used to wakeup the commit thread: */
+ struct timer_list j_commit_timer;
+
+ /*
+ * The revoke table: maintains the list of revoked blocks in the
+ * current transaction. [j_revoke_lock]
+ */
+ spinlock_t j_revoke_lock;
+ struct jbd_revoke_table_s *j_revoke;
+ struct jbd_revoke_table_s *j_revoke_table[2];
+
+ /*
+ * array of bhs for journal_commit_transaction
+ */
+ struct buffer_head **j_wbuf;
+ int j_wbufsize;
+
+ /*
+ * this is the pid of the last person to run a synchronous operation
+ * through the journal.
+ */
+ pid_t j_last_sync_writer;
+
+ /*
+ * the average amount of time in nanoseconds it takes to commit a
+ * transaction to the disk. [j_state_lock]
+ */
+ u64 j_average_commit_time;
+
+ /*
+ * An opaque pointer to fs-private information. ext3 puts its
+ * superblock pointer here
+ */
+ void *j_private;
+};
+
+/*
+ * Journal flag definitions
+ */
+#define JFS_UNMOUNT 0x001 /* Journal thread is being destroyed */
+#define JFS_ABORT 0x002 /* Journaling has been aborted for errors. */
+#define JFS_ACK_ERR 0x004 /* The errno in the sb has been acked */
+#define JFS_FLUSHED 0x008 /* The journal superblock has been flushed */
+#define JFS_LOADED 0x010 /* The journal superblock has been loaded */
+#define JFS_BARRIER 0x020 /* Use IDE barriers */
+#define JFS_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
+ * data write error in ordered
+ * mode */
+
+/*
+ * Function declarations for the journaling transaction and buffer
+ * management
+ */
+
+/* Filing buffers */
+extern void journal_unfile_buffer(journal_t *, struct journal_head *);
+extern void __journal_unfile_buffer(struct journal_head *);
+extern void __journal_refile_buffer(struct journal_head *);
+extern void journal_refile_buffer(journal_t *, struct journal_head *);
+extern void __journal_file_buffer(struct journal_head *, transaction_t *, int);
+extern void __journal_free_buffer(struct journal_head *bh);
+extern void journal_file_buffer(struct journal_head *, transaction_t *, int);
+extern void __journal_clean_data_list(transaction_t *transaction);
+
+/* Log buffer allocation */
+extern struct journal_head * journal_get_descriptor_buffer(journal_t *);
+int journal_next_log_block(journal_t *, unsigned int *);
+
+/* Commit management */
+extern void journal_commit_transaction(journal_t *);
+
+/* Checkpoint list management */
+int __journal_clean_checkpoint_list(journal_t *journal);
+int __journal_remove_checkpoint(struct journal_head *);
+void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
+
+/* Buffer IO */
+extern int
+journal_write_metadata_buffer(transaction_t *transaction,
+ struct journal_head *jh_in,
+ struct journal_head **jh_out,
+ unsigned int blocknr);
+
+/* Transaction locking */
+extern void __wait_on_journal (journal_t *);
+
+/*
+ * Journal locking.
+ *
+ * We need to lock the journal during transaction state changes so that nobody
+ * ever tries to take a handle on the running transaction while we are in the
+ * middle of moving it to the commit phase. j_state_lock does this.
+ *
+ * Note that the locking is completely interrupt unsafe. We never touch
+ * journal structures from interrupts.
+ */
+
+static inline handle_t *journal_current_handle(void)
+{
+ return current->journal_info;
+}
+
+/* The journaling code user interface:
+ *
+ * Create and destroy handles
+ * Register buffer modifications against the current transaction.
+ */
+
+extern handle_t *journal_start(journal_t *, int nblocks);
+extern int journal_restart (handle_t *, int nblocks);
+extern int journal_extend (handle_t *, int nblocks);
+extern int journal_get_write_access(handle_t *, struct buffer_head *);
+extern int journal_get_create_access (handle_t *, struct buffer_head *);
+extern int journal_get_undo_access(handle_t *, struct buffer_head *);
+extern int journal_dirty_data (handle_t *, struct buffer_head *);
+extern int journal_dirty_metadata (handle_t *, struct buffer_head *);
+extern void journal_release_buffer (handle_t *, struct buffer_head *);
+extern int journal_forget (handle_t *, struct buffer_head *);
+extern void journal_sync_buffer (struct buffer_head *);
+extern void journal_invalidatepage(journal_t *,
+ struct page *, unsigned int, unsigned int);
+extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
+extern int journal_stop(handle_t *);
+extern int journal_flush (journal_t *);
+extern void journal_lock_updates (journal_t *);
+extern void journal_unlock_updates (journal_t *);
+
+extern journal_t * journal_init_dev(struct block_device *bdev,
+ struct block_device *fs_dev,
+ int start, int len, int bsize);
+extern journal_t * journal_init_inode (struct inode *);
+extern int journal_update_format (journal_t *);
+extern int journal_check_used_features
+ (journal_t *, unsigned long, unsigned long, unsigned long);
+extern int journal_check_available_features
+ (journal_t *, unsigned long, unsigned long, unsigned long);
+extern int journal_set_features
+ (journal_t *, unsigned long, unsigned long, unsigned long);
+extern int journal_create (journal_t *);
+extern int journal_load (journal_t *journal);
+extern int journal_destroy (journal_t *);
+extern int journal_recover (journal_t *journal);
+extern int journal_wipe (journal_t *, int);
+extern int journal_skip_recovery (journal_t *);
+extern void journal_update_sb_log_tail (journal_t *, tid_t, unsigned int,
+ int);
+extern void journal_abort (journal_t *, int);
+extern int journal_errno (journal_t *);
+extern void journal_ack_err (journal_t *);
+extern int journal_clear_err (journal_t *);
+extern int journal_bmap(journal_t *, unsigned int, unsigned int *);
+extern int journal_force_commit(journal_t *);
+
+/*
+ * journal_head management
+ */
+struct journal_head *journal_add_journal_head(struct buffer_head *bh);
+struct journal_head *journal_grab_journal_head(struct buffer_head *bh);
+void journal_put_journal_head(struct journal_head *jh);
+
+/*
+ * handle management
+ */
+extern struct kmem_cache *jbd_handle_cache;
+
+static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
+{
+ return kmem_cache_zalloc(jbd_handle_cache, gfp_flags);
+}
+
+static inline void jbd_free_handle(handle_t *handle)
+{
+ kmem_cache_free(jbd_handle_cache, handle);
+}
+
+/* Primary revoke support */
+#define JOURNAL_REVOKE_DEFAULT_HASH 256
+extern int journal_init_revoke(journal_t *, int);
+extern void journal_destroy_revoke_caches(void);
+extern int journal_init_revoke_caches(void);
+
+extern void journal_destroy_revoke(journal_t *);
+extern int journal_revoke (handle_t *,
+ unsigned int, struct buffer_head *);
+extern int journal_cancel_revoke(handle_t *, struct journal_head *);
+extern void journal_write_revoke_records(journal_t *,
+ transaction_t *, int);
+
+/* Recovery revoke support */
+extern int journal_set_revoke(journal_t *, unsigned int, tid_t);
+extern int journal_test_revoke(journal_t *, unsigned int, tid_t);
+extern void journal_clear_revoke(journal_t *);
+extern void journal_switch_revoke_table(journal_t *journal);
+extern void journal_clear_buffer_revoked_flags(journal_t *journal);
+
+/*
+ * The log thread user interface:
+ *
+ * Request space in the current transaction, and force transaction commit
+ * transitions on demand.
+ */
+
+int __log_space_left(journal_t *); /* Called with journal locked */
+int log_start_commit(journal_t *journal, tid_t tid);
+int __log_start_commit(journal_t *journal, tid_t tid);
+int journal_start_commit(journal_t *journal, tid_t *tid);
+int journal_force_commit_nested(journal_t *journal);
+int log_wait_commit(journal_t *journal, tid_t tid);
+int log_do_checkpoint(journal_t *journal);
+int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
+
+void __log_wait_for_space(journal_t *journal);
+extern void __journal_drop_transaction(journal_t *, transaction_t *);
+extern int cleanup_journal_tail(journal_t *);
+
+/*
+ * is_journal_abort
+ *
+ * Simple test wrapper function to test the JFS_ABORT state flag. This
+ * bit, when set, indicates that we have had a fatal error somewhere,
+ * either inside the journaling layer or indicated to us by the client
+ * (eg. ext3), and that we and should not commit any further
+ * transactions.
+ */
+
+static inline int is_journal_aborted(journal_t *journal)
+{
+ return journal->j_flags & JFS_ABORT;
+}
+
+static inline int is_handle_aborted(handle_t *handle)
+{
+ if (handle->h_aborted)
+ return 1;
+ return is_journal_aborted(handle->h_transaction->t_journal);
+}
+
+static inline void journal_abort_handle(handle_t *handle)
+{
+ handle->h_aborted = 1;
+}
+
+#endif /* __KERNEL__ */
+
+/* Comparison functions for transaction IDs: perform comparisons using
+ * modulo arithmetic so that they work over sequence number wraps. */
+
+static inline int tid_gt(tid_t x, tid_t y)
+{
+ int difference = (x - y);
+ return (difference > 0);
+}
+
+static inline int tid_geq(tid_t x, tid_t y)
+{
+ int difference = (x - y);
+ return (difference >= 0);
+}
+
+extern int journal_blocks_per_page(struct inode *inode);
+
+/*
+ * Return the minimum number of blocks which must be free in the journal
+ * before a new transaction may be started. Must be called under j_state_lock.
+ */
+static inline int jbd_space_needed(journal_t *journal)
+{
+ int nblocks = journal->j_max_transaction_buffers;
+ if (journal->j_committing_transaction)
+ nblocks += journal->j_committing_transaction->
+ t_outstanding_credits;
+ return nblocks;
+}
+
+/*
+ * Definitions which augment the buffer_head layer
+ */
+
+/* journaling buffer types */
+#define BJ_None 0 /* Not journaled */
+#define BJ_SyncData 1 /* Normal data: flush before commit */
+#define BJ_Metadata 2 /* Normal journaled metadata */
+#define BJ_Forget 3 /* Buffer superseded by this transaction */
+#define BJ_IO 4 /* Buffer is for temporary IO use */
+#define BJ_Shadow 5 /* Buffer contents being shadowed to the log */
+#define BJ_LogCtl 6 /* Buffer contains log descriptors */
+#define BJ_Reserved 7 /* Buffer is reserved for access by journal */
+#define BJ_Locked 8 /* Locked for I/O during commit */
+#define BJ_Types 9
+
+extern int jbd_blocks_per_page(struct inode *inode);
+
+#ifdef __KERNEL__
+
+#define buffer_trace_init(bh) do {} while (0)
+#define print_buffer_fields(bh) do {} while (0)
+#define print_buffer_trace(bh) do {} while (0)
+#define BUFFER_TRACE(bh, info) do {} while (0)
+#define BUFFER_TRACE2(bh, bh2, info) do {} while (0)
+#define JBUFFER_TRACE(jh, info) do {} while (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_JBD_H */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
new file mode 100644
index 000000000..edb640ae9
--- /dev/null
+++ b/include/linux/jbd2.h
@@ -0,0 +1,1407 @@
+/*
+ * linux/include/linux/jbd2.h
+ *
+ * Written by Stephen C. Tweedie <sct@redhat.com>
+ *
+ * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ *
+ * Definitions for transaction data structures for the buffer cache
+ * filesystem journaling support.
+ */
+
+#ifndef _LINUX_JBD2_H
+#define _LINUX_JBD2_H
+
+/* Allow this file to be included directly into e2fsprogs */
+#ifndef __KERNEL__
+#include "jfs_compat.h"
+#define JBD2_DEBUG
+#else
+
+#include <linux/types.h>
+#include <linux/buffer_head.h>
+#include <linux/journal-head.h>
+#include <linux/stddef.h>
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <crypto/hash.h>
+#endif
+
+#define journal_oom_retry 1
+
+/*
+ * Define JBD2_PARANIOD_IOFAIL to cause a kernel BUG() if ext4 finds
+ * certain classes of error which can occur due to failed IOs. Under
+ * normal use we want ext4 to continue after such errors, because
+ * hardware _can_ fail, but for debugging purposes when running tests on
+ * known-good hardware we may want to trap these errors.
+ */
+#undef JBD2_PARANOID_IOFAIL
+
+/*
+ * The default maximum commit age, in seconds.
+ */
+#define JBD2_DEFAULT_MAX_COMMIT_AGE 5
+
+#ifdef CONFIG_JBD2_DEBUG
+/*
+ * Define JBD2_EXPENSIVE_CHECKING to enable more expensive internal
+ * consistency checks. By default we don't do this unless
+ * CONFIG_JBD2_DEBUG is on.
+ */
+#define JBD2_EXPENSIVE_CHECKING
+extern ushort jbd2_journal_enable_debug;
+void __jbd2_debug(int level, const char *file, const char *func,
+ unsigned int line, const char *fmt, ...);
+
+#define jbd_debug(n, fmt, a...) \
+ __jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
+#else
+#define jbd_debug(n, fmt, a...) /**/
+#endif
+
+extern void *jbd2_alloc(size_t size, gfp_t flags);
+extern void jbd2_free(void *ptr, size_t size);
+
+#define JBD2_MIN_JOURNAL_BLOCKS 1024
+
+#ifdef __KERNEL__
+
+/**
+ * typedef handle_t - The handle_t type represents a single atomic update being performed by some process.
+ *
+ * All filesystem modifications made by the process go
+ * through this handle. Recursive operations (such as quota operations)
+ * are gathered into a single update.
+ *
+ * The buffer credits field is used to account for journaled buffers
+ * being modified by the running process. To ensure that there is
+ * enough log space for all outstanding operations, we need to limit the
+ * number of outstanding buffers possible at any time. When the
+ * operation completes, any buffer credits not used are credited back to
+ * the transaction, so that at all times we know how many buffers the
+ * outstanding updates on a transaction might possibly touch.
+ *
+ * This is an opaque datatype.
+ **/
+typedef struct jbd2_journal_handle handle_t; /* Atomic operation type */
+
+
+/**
+ * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem.
+ *
+ * journal_t is linked to from the fs superblock structure.
+ *
+ * We use the journal_t to keep track of all outstanding transaction
+ * activity on the filesystem, and to manage the state of the log
+ * writing process.
+ *
+ * This is an opaque datatype.
+ **/
+typedef struct journal_s journal_t; /* Journal control structure */
+#endif
+
+/*
+ * Internal structures used by the logging mechanism:
+ */
+
+#define JBD2_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
+
+/*
+ * On-disk structures
+ */
+
+/*
+ * Descriptor block types:
+ */
+
+#define JBD2_DESCRIPTOR_BLOCK 1
+#define JBD2_COMMIT_BLOCK 2
+#define JBD2_SUPERBLOCK_V1 3
+#define JBD2_SUPERBLOCK_V2 4
+#define JBD2_REVOKE_BLOCK 5
+
+/*
+ * Standard header for all descriptor blocks:
+ */
+typedef struct journal_header_s
+{
+ __be32 h_magic;
+ __be32 h_blocktype;
+ __be32 h_sequence;
+} journal_header_t;
+
+/*
+ * Checksum types.
+ */
+#define JBD2_CRC32_CHKSUM 1
+#define JBD2_MD5_CHKSUM 2
+#define JBD2_SHA1_CHKSUM 3
+#define JBD2_CRC32C_CHKSUM 4
+
+#define JBD2_CRC32_CHKSUM_SIZE 4
+
+#define JBD2_CHECKSUM_BYTES (32 / sizeof(u32))
+/*
+ * Commit block header for storing transactional checksums:
+ *
+ * NOTE: If FEATURE_COMPAT_CHECKSUM (checksum v1) is set, the h_chksum*
+ * fields are used to store a checksum of the descriptor and data blocks.
+ *
+ * If FEATURE_INCOMPAT_CSUM_V2 (checksum v2) is set, then the h_chksum
+ * field is used to store crc32c(uuid+commit_block). Each journal metadata
+ * block gets its own checksum, and data block checksums are stored in
+ * journal_block_tag (in the descriptor). The other h_chksum* fields are
+ * not used.
+ *
+ * If FEATURE_INCOMPAT_CSUM_V3 is set, the descriptor block uses
+ * journal_block_tag3_t to store a full 32-bit checksum. Everything else
+ * is the same as v2.
+ *
+ * Checksum v1, v2, and v3 are mutually exclusive features.
+ */
+struct commit_header {
+ __be32 h_magic;
+ __be32 h_blocktype;
+ __be32 h_sequence;
+ unsigned char h_chksum_type;
+ unsigned char h_chksum_size;
+ unsigned char h_padding[2];
+ __be32 h_chksum[JBD2_CHECKSUM_BYTES];
+ __be64 h_commit_sec;
+ __be32 h_commit_nsec;
+};
+
+/*
+ * The block tag: used to describe a single buffer in the journal.
+ * t_blocknr_high is only used if INCOMPAT_64BIT is set, so this
+ * raw struct shouldn't be used for pointer math or sizeof() - use
+ * journal_tag_bytes(journal) instead to compute this.
+ */
+typedef struct journal_block_tag3_s
+{
+ __be32 t_blocknr; /* The on-disk block number */
+ __be32 t_flags; /* See below */
+ __be32 t_blocknr_high; /* most-significant high 32bits. */
+ __be32 t_checksum; /* crc32c(uuid+seq+block) */
+} journal_block_tag3_t;
+
+typedef struct journal_block_tag_s
+{
+ __be32 t_blocknr; /* The on-disk block number */
+ __be16 t_checksum; /* truncated crc32c(uuid+seq+block) */
+ __be16 t_flags; /* See below */
+ __be32 t_blocknr_high; /* most-significant high 32bits. */
+} journal_block_tag_t;
+
+/* Tail of descriptor block, for checksumming */
+struct jbd2_journal_block_tail {
+ __be32 t_checksum; /* crc32c(uuid+descr_block) */
+};
+
+/*
+ * The revoke descriptor: used on disk to describe a series of blocks to
+ * be revoked from the log
+ */
+typedef struct jbd2_journal_revoke_header_s
+{
+ journal_header_t r_header;
+ __be32 r_count; /* Count of bytes used in the block */
+} jbd2_journal_revoke_header_t;
+
+/* Tail of revoke block, for checksumming */
+struct jbd2_journal_revoke_tail {
+ __be32 r_checksum; /* crc32c(uuid+revoke_block) */
+};
+
+/* Definitions for the journal tag flags word: */
+#define JBD2_FLAG_ESCAPE 1 /* on-disk block is escaped */
+#define JBD2_FLAG_SAME_UUID 2 /* block has same uuid as previous */
+#define JBD2_FLAG_DELETED 4 /* block deleted by this transaction */
+#define JBD2_FLAG_LAST_TAG 8 /* last tag in this descriptor block */
+
+
+/*
+ * The journal superblock. All fields are in big-endian byte order.
+ */
+typedef struct journal_superblock_s
+{
+/* 0x0000 */
+ journal_header_t s_header;
+
+/* 0x000C */
+ /* Static information describing the journal */
+ __be32 s_blocksize; /* journal device blocksize */
+ __be32 s_maxlen; /* total blocks in journal file */
+ __be32 s_first; /* first block of log information */
+
+/* 0x0018 */
+ /* Dynamic information describing the current state of the log */
+ __be32 s_sequence; /* first commit ID expected in log */
+ __be32 s_start; /* blocknr of start of log */
+
+/* 0x0020 */
+ /* Error value, as set by jbd2_journal_abort(). */
+ __be32 s_errno;
+
+/* 0x0024 */
+ /* Remaining fields are only valid in a version-2 superblock */
+ __be32 s_feature_compat; /* compatible feature set */
+ __be32 s_feature_incompat; /* incompatible feature set */
+ __be32 s_feature_ro_compat; /* readonly-compatible feature set */
+/* 0x0030 */
+ __u8 s_uuid[16]; /* 128-bit uuid for journal */
+
+/* 0x0040 */
+ __be32 s_nr_users; /* Nr of filesystems sharing log */
+
+ __be32 s_dynsuper; /* Blocknr of dynamic superblock copy*/
+
+/* 0x0048 */
+ __be32 s_max_transaction; /* Limit of journal blocks per trans.*/
+ __be32 s_max_trans_data; /* Limit of data blocks per trans. */
+
+/* 0x0050 */
+ __u8 s_checksum_type; /* checksum type */
+ __u8 s_padding2[3];
+ __u32 s_padding[42];
+ __be32 s_checksum; /* crc32c(superblock) */
+
+/* 0x0100 */
+ __u8 s_users[16*48]; /* ids of all fs'es sharing the log */
+/* 0x0400 */
+} journal_superblock_t;
+
+#define JBD2_HAS_COMPAT_FEATURE(j,mask) \
+ ((j)->j_format_version >= 2 && \
+ ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
+#define JBD2_HAS_RO_COMPAT_FEATURE(j,mask) \
+ ((j)->j_format_version >= 2 && \
+ ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
+#define JBD2_HAS_INCOMPAT_FEATURE(j,mask) \
+ ((j)->j_format_version >= 2 && \
+ ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
+
+#define JBD2_FEATURE_COMPAT_CHECKSUM 0x00000001
+
+#define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001
+#define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002
+#define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004
+#define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008
+#define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010
+
+/* Features known to this kernel version: */
+#define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM
+#define JBD2_KNOWN_ROCOMPAT_FEATURES 0
+#define JBD2_KNOWN_INCOMPAT_FEATURES (JBD2_FEATURE_INCOMPAT_REVOKE | \
+ JBD2_FEATURE_INCOMPAT_64BIT | \
+ JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \
+ JBD2_FEATURE_INCOMPAT_CSUM_V2 | \
+ JBD2_FEATURE_INCOMPAT_CSUM_V3)
+
+#ifdef __KERNEL__
+
+#include <linux/fs.h>
+#include <linux/sched.h>
+
+enum jbd_state_bits {
+ BH_JBD /* Has an attached ext3 journal_head */
+ = BH_PrivateStart,
+ BH_JWrite, /* Being written to log (@@@ DEBUGGING) */
+ BH_Freed, /* Has been freed (truncated) */
+ BH_Revoked, /* Has been revoked from the log */
+ BH_RevokeValid, /* Revoked flag is valid */
+ BH_JBDDirty, /* Is dirty but journaled */
+ BH_State, /* Pins most journal_head state */
+ BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
+ BH_Shadow, /* IO on shadow buffer is running */
+ BH_Verified, /* Metadata block has been verified ok */
+ BH_JBDPrivateStart, /* First bit available for private use by FS */
+};
+
+BUFFER_FNS(JBD, jbd)
+BUFFER_FNS(JWrite, jwrite)
+BUFFER_FNS(JBDDirty, jbddirty)
+TAS_BUFFER_FNS(JBDDirty, jbddirty)
+BUFFER_FNS(Revoked, revoked)
+TAS_BUFFER_FNS(Revoked, revoked)
+BUFFER_FNS(RevokeValid, revokevalid)
+TAS_BUFFER_FNS(RevokeValid, revokevalid)
+BUFFER_FNS(Freed, freed)
+BUFFER_FNS(Shadow, shadow)
+BUFFER_FNS(Verified, verified)
+
+#include <linux/jbd_common.h>
+
+#define J_ASSERT(assert) BUG_ON(!(assert))
+
+#define J_ASSERT_BH(bh, expr) J_ASSERT(expr)
+#define J_ASSERT_JH(jh, expr) J_ASSERT(expr)
+
+#if defined(JBD2_PARANOID_IOFAIL)
+#define J_EXPECT(expr, why...) J_ASSERT(expr)
+#define J_EXPECT_BH(bh, expr, why...) J_ASSERT_BH(bh, expr)
+#define J_EXPECT_JH(jh, expr, why...) J_ASSERT_JH(jh, expr)
+#else
+#define __journal_expect(expr, why...) \
+ ({ \
+ int val = (expr); \
+ if (!val) { \
+ printk(KERN_ERR \
+ "JBD2 unexpected failure: %s: %s;\n", \
+ __func__, #expr); \
+ printk(KERN_ERR why "\n"); \
+ } \
+ val; \
+ })
+#define J_EXPECT(expr, why...) __journal_expect(expr, ## why)
+#define J_EXPECT_BH(bh, expr, why...) __journal_expect(expr, ## why)
+#define J_EXPECT_JH(jh, expr, why...) __journal_expect(expr, ## why)
+#endif
+
+/* Flags in jbd_inode->i_flags */
+#define __JI_COMMIT_RUNNING 0
+/* Commit of the inode data in progress. We use this flag to protect us from
+ * concurrent deletion of inode. We cannot use reference to inode for this
+ * since we cannot afford doing last iput() on behalf of kjournald
+ */
+#define JI_COMMIT_RUNNING (1 << __JI_COMMIT_RUNNING)
+
+/**
+ * struct jbd_inode is the structure linking inodes in ordered mode
+ * present in a transaction so that we can sync them during commit.
+ */
+struct jbd2_inode {
+ /* Which transaction does this inode belong to? Either the running
+ * transaction or the committing one. [j_list_lock] */
+ transaction_t *i_transaction;
+
+ /* Pointer to the running transaction modifying inode's data in case
+ * there is already a committing transaction touching it. [j_list_lock] */
+ transaction_t *i_next_transaction;
+
+ /* List of inodes in the i_transaction [j_list_lock] */
+ struct list_head i_list;
+
+ /* VFS inode this inode belongs to [constant during the lifetime
+ * of the structure] */
+ struct inode *i_vfs_inode;
+
+ /* Flags of inode [j_list_lock] */
+ unsigned long i_flags;
+};
+
+struct jbd2_revoke_table_s;
+
+/**
+ * struct handle_s - The handle_s type is the concrete type associated with
+ * handle_t.
+ * @h_transaction: Which compound transaction is this update a part of?
+ * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
+ * @h_ref: Reference count on this handle
+ * @h_err: Field for caller's use to track errors through large fs operations
+ * @h_sync: flag for sync-on-close
+ * @h_jdata: flag to force data journaling
+ * @h_aborted: flag indicating fatal error on handle
+ **/
+
+/* Docbook can't yet cope with the bit fields, but will leave the documentation
+ * in so it can be fixed later.
+ */
+
+struct jbd2_journal_handle
+{
+ union {
+ /* Which compound transaction is this update a part of? */
+ transaction_t *h_transaction;
+ /* Which journal handle belongs to - used iff h_reserved set */
+ journal_t *h_journal;
+ };
+
+ /* Handle reserved for finishing the logical operation */
+ handle_t *h_rsv_handle;
+
+ /* Number of remaining buffers we are allowed to dirty: */
+ int h_buffer_credits;
+
+ /* Reference count on this handle */
+ int h_ref;
+
+ /* Field for caller's use to track errors through large fs */
+ /* operations */
+ int h_err;
+
+ /* Flags [no locking] */
+ unsigned int h_sync: 1; /* sync-on-close */
+ unsigned int h_jdata: 1; /* force data journaling */
+ unsigned int h_reserved: 1; /* handle with reserved credits */
+ unsigned int h_aborted: 1; /* fatal error on handle */
+ unsigned int h_type: 8; /* for handle statistics */
+ unsigned int h_line_no: 16; /* for handle statistics */
+
+ unsigned long h_start_jiffies;
+ unsigned int h_requested_credits;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map h_lockdep_map;
+#endif
+};
+
+
+/*
+ * Some stats for checkpoint phase
+ */
+struct transaction_chp_stats_s {
+ unsigned long cs_chp_time;
+ __u32 cs_forced_to_close;
+ __u32 cs_written;
+ __u32 cs_dropped;
+};
+
+/* The transaction_t type is the guts of the journaling mechanism. It
+ * tracks a compound transaction through its various states:
+ *
+ * RUNNING: accepting new updates
+ * LOCKED: Updates still running but we don't accept new ones
+ * RUNDOWN: Updates are tidying up but have finished requesting
+ * new buffers to modify (state not used for now)
+ * FLUSH: All updates complete, but we are still writing to disk
+ * COMMIT: All data on disk, writing commit record
+ * FINISHED: We still have to keep the transaction for checkpointing.
+ *
+ * The transaction keeps track of all of the buffers modified by a
+ * running transaction, and all of the buffers committed but not yet
+ * flushed to home for finished transactions.
+ */
+
+/*
+ * Lock ranking:
+ *
+ * j_list_lock
+ * ->jbd_lock_bh_journal_head() (This is "innermost")
+ *
+ * j_state_lock
+ * ->jbd_lock_bh_state()
+ *
+ * jbd_lock_bh_state()
+ * ->j_list_lock
+ *
+ * j_state_lock
+ * ->t_handle_lock
+ *
+ * j_state_lock
+ * ->j_list_lock (journal_unmap_buffer)
+ *
+ */
+
+struct transaction_s
+{
+ /* Pointer to the journal for this transaction. [no locking] */
+ journal_t *t_journal;
+
+ /* Sequence number for this transaction [no locking] */
+ tid_t t_tid;
+
+ /*
+ * Transaction's current state
+ * [no locking - only kjournald2 alters this]
+ * [j_list_lock] guards transition of a transaction into T_FINISHED
+ * state and subsequent call of __jbd2_journal_drop_transaction()
+ * FIXME: needs barriers
+ * KLUDGE: [use j_state_lock]
+ */
+ enum {
+ T_RUNNING,
+ T_LOCKED,
+ T_FLUSH,
+ T_COMMIT,
+ T_COMMIT_DFLUSH,
+ T_COMMIT_JFLUSH,
+ T_COMMIT_CALLBACK,
+ T_FINISHED
+ } t_state;
+
+ /*
+ * Where in the log does this transaction's commit start? [no locking]
+ */
+ unsigned long t_log_start;
+
+ /* Number of buffers on the t_buffers list [j_list_lock] */
+ int t_nr_buffers;
+
+ /*
+ * Doubly-linked circular list of all buffers reserved but not yet
+ * modified by this transaction [j_list_lock]
+ */
+ struct journal_head *t_reserved_list;
+
+ /*
+ * Doubly-linked circular list of all metadata buffers owned by this
+ * transaction [j_list_lock]
+ */
+ struct journal_head *t_buffers;
+
+ /*
+ * Doubly-linked circular list of all forget buffers (superseded
+ * buffers which we can un-checkpoint once this transaction commits)
+ * [j_list_lock]
+ */
+ struct journal_head *t_forget;
+
+ /*
+ * Doubly-linked circular list of all buffers still to be flushed before
+ * this transaction can be checkpointed. [j_list_lock]
+ */
+ struct journal_head *t_checkpoint_list;
+
+ /*
+ * Doubly-linked circular list of all buffers submitted for IO while
+ * checkpointing. [j_list_lock]
+ */
+ struct journal_head *t_checkpoint_io_list;
+
+ /*
+ * Doubly-linked circular list of metadata buffers being shadowed by log
+ * IO. The IO buffers on the iobuf list and the shadow buffers on this
+ * list match each other one for one at all times. [j_list_lock]
+ */
+ struct journal_head *t_shadow_list;
+
+ /*
+ * List of inodes whose data we've modified in data=ordered mode.
+ * [j_list_lock]
+ */
+ struct list_head t_inode_list;
+
+ /*
+ * Protects info related to handles
+ */
+ spinlock_t t_handle_lock;
+
+ /*
+ * Longest time some handle had to wait for running transaction
+ */
+ unsigned long t_max_wait;
+
+ /*
+ * When transaction started
+ */
+ unsigned long t_start;
+
+ /*
+ * When commit was requested
+ */
+ unsigned long t_requested;
+
+ /*
+ * Checkpointing stats [j_checkpoint_sem]
+ */
+ struct transaction_chp_stats_s t_chp_stats;
+
+ /*
+ * Number of outstanding updates running on this transaction
+ * [t_handle_lock]
+ */
+ atomic_t t_updates;
+
+ /*
+ * Number of buffers reserved for use by all handles in this transaction
+ * handle but not yet modified. [t_handle_lock]
+ */
+ atomic_t t_outstanding_credits;
+
+ /*
+ * Forward and backward links for the circular list of all transactions
+ * awaiting checkpoint. [j_list_lock]
+ */
+ transaction_t *t_cpnext, *t_cpprev;
+
+ /*
+ * When will the transaction expire (become due for commit), in jiffies?
+ * [no locking]
+ */
+ unsigned long t_expires;
+
+ /*
+ * When this transaction started, in nanoseconds [no locking]
+ */
+ ktime_t t_start_time;
+
+ /*
+ * How many handles used this transaction? [t_handle_lock]
+ */
+ atomic_t t_handle_count;
+
+ /*
+ * This transaction is being forced and some process is
+ * waiting for it to finish.
+ */
+ unsigned int t_synchronous_commit:1;
+
+ /* Disk flush needs to be sent to fs partition [no locking] */
+ int t_need_data_flush;
+
+ /*
+ * For use by the filesystem to store fs-specific data
+ * structures associated with the transaction
+ */
+ struct list_head t_private_list;
+};
+
+struct transaction_run_stats_s {
+ unsigned long rs_wait;
+ unsigned long rs_request_delay;
+ unsigned long rs_running;
+ unsigned long rs_locked;
+ unsigned long rs_flushing;
+ unsigned long rs_logging;
+
+ __u32 rs_handle_count;
+ __u32 rs_blocks;
+ __u32 rs_blocks_logged;
+};
+
+struct transaction_stats_s {
+ unsigned long ts_tid;
+ unsigned long ts_requested;
+ struct transaction_run_stats_s run;
+};
+
+static inline unsigned long
+jbd2_time_diff(unsigned long start, unsigned long end)
+{
+ if (end >= start)
+ return end - start;
+
+ return end + (MAX_JIFFY_OFFSET - start);
+}
+
+#define JBD2_NR_BATCH 64
+
+/**
+ * struct journal_s - The journal_s type is the concrete type associated with
+ * journal_t.
+ * @j_flags: General journaling state flags
+ * @j_errno: Is there an outstanding uncleared error on the journal (from a
+ * prior abort)?
+ * @j_sb_buffer: First part of superblock buffer
+ * @j_superblock: Second part of superblock buffer
+ * @j_format_version: Version of the superblock format
+ * @j_state_lock: Protect the various scalars in the journal
+ * @j_barrier_count: Number of processes waiting to create a barrier lock
+ * @j_barrier: The barrier lock itself
+ * @j_running_transaction: The current running transaction..
+ * @j_committing_transaction: the transaction we are pushing to disk
+ * @j_checkpoint_transactions: a linked circular list of all transactions
+ * waiting for checkpointing
+ * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
+ * to start committing, or for a barrier lock to be released
+ * @j_wait_done_commit: Wait queue for waiting for commit to complete
+ * @j_wait_commit: Wait queue to trigger commit
+ * @j_wait_updates: Wait queue to wait for updates to complete
+ * @j_wait_reserved: Wait queue to wait for reserved buffer credits to drop
+ * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
+ * @j_head: Journal head - identifies the first unused block in the journal
+ * @j_tail: Journal tail - identifies the oldest still-used block in the
+ * journal.
+ * @j_free: Journal free - how many free blocks are there in the journal?
+ * @j_first: The block number of the first usable block
+ * @j_last: The block number one beyond the last usable block
+ * @j_dev: Device where we store the journal
+ * @j_blocksize: blocksize for the location where we store the journal.
+ * @j_blk_offset: starting block offset for into the device where we store the
+ * journal
+ * @j_fs_dev: Device which holds the client fs. For internal journal this will
+ * be equal to j_dev
+ * @j_reserved_credits: Number of buffers reserved from the running transaction
+ * @j_maxlen: Total maximum capacity of the journal region on disk.
+ * @j_list_lock: Protects the buffer lists and internal buffer state.
+ * @j_inode: Optional inode where we store the journal. If present, all journal
+ * block numbers are mapped into this inode via bmap().
+ * @j_tail_sequence: Sequence number of the oldest transaction in the log
+ * @j_transaction_sequence: Sequence number of the next transaction to grant
+ * @j_commit_sequence: Sequence number of the most recently committed
+ * transaction
+ * @j_commit_request: Sequence number of the most recent transaction wanting
+ * commit
+ * @j_uuid: Uuid of client object.
+ * @j_task: Pointer to the current commit thread for this journal
+ * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
+ * single compound commit transaction
+ * @j_commit_interval: What is the maximum transaction lifetime before we begin
+ * a commit?
+ * @j_commit_timer: The timer used to wakeup the commit thread
+ * @j_revoke_lock: Protect the revoke table
+ * @j_revoke: The revoke table - maintains the list of revoked blocks in the
+ * current transaction.
+ * @j_revoke_table: alternate revoke tables for j_revoke
+ * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction
+ * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
+ * number that will fit in j_blocksize
+ * @j_last_sync_writer: most recent pid which did a synchronous write
+ * @j_history: Buffer storing the transactions statistics history
+ * @j_history_max: Maximum number of transactions in the statistics history
+ * @j_history_cur: Current number of transactions in the statistics history
+ * @j_history_lock: Protect the transactions statistics history
+ * @j_proc_entry: procfs entry for the jbd statistics directory
+ * @j_stats: Overall statistics
+ * @j_private: An opaque pointer to fs-private information.
+ */
+
+struct journal_s
+{
+ /* General journaling state flags [j_state_lock] */
+ unsigned long j_flags;
+
+ /*
+ * Is there an outstanding uncleared error on the journal (from a prior
+ * abort)? [j_state_lock]
+ */
+ int j_errno;
+
+ /* The superblock buffer */
+ struct buffer_head *j_sb_buffer;
+ journal_superblock_t *j_superblock;
+
+ /* Version of the superblock format */
+ int j_format_version;
+
+ /*
+ * Protect the various scalars in the journal
+ */
+ rwlock_t j_state_lock;
+
+ /*
+ * Number of processes waiting to create a barrier lock [j_state_lock]
+ */
+ int j_barrier_count;
+
+ /* The barrier lock itself */
+ struct mutex j_barrier;
+
+ /*
+ * Transactions: The current running transaction...
+ * [j_state_lock] [caller holding open handle]
+ */
+ transaction_t *j_running_transaction;
+
+ /*
+ * the transaction we are pushing to disk
+ * [j_state_lock] [caller holding open handle]
+ */
+ transaction_t *j_committing_transaction;
+
+ /*
+ * ... and a linked circular list of all transactions waiting for
+ * checkpointing. [j_list_lock]
+ */
+ transaction_t *j_checkpoint_transactions;
+
+ /*
+ * Wait queue for waiting for a locked transaction to start committing,
+ * or for a barrier lock to be released
+ */
+ wait_queue_head_t j_wait_transaction_locked;
+
+ /* Wait queue for waiting for commit to complete */
+ wait_queue_head_t j_wait_done_commit;
+
+ /* Wait queue to trigger commit */
+ wait_queue_head_t j_wait_commit;
+
+ /* Wait queue to wait for updates to complete */
+ wait_queue_head_t j_wait_updates;
+
+ /* Wait queue to wait for reserved buffer credits to drop */
+ wait_queue_head_t j_wait_reserved;
+
+ /* Semaphore for locking against concurrent checkpoints */
+ struct mutex j_checkpoint_mutex;
+
+ /*
+ * List of buffer heads used by the checkpoint routine. This
+ * was moved from jbd2_log_do_checkpoint() to reduce stack
+ * usage. Access to this array is controlled by the
+ * j_checkpoint_mutex. [j_checkpoint_mutex]
+ */
+ struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH];
+
+ /*
+ * Journal head: identifies the first unused block in the journal.
+ * [j_state_lock]
+ */
+ unsigned long j_head;
+
+ /*
+ * Journal tail: identifies the oldest still-used block in the journal.
+ * [j_state_lock]
+ */
+ unsigned long j_tail;
+
+ /*
+ * Journal free: how many free blocks are there in the journal?
+ * [j_state_lock]
+ */
+ unsigned long j_free;
+
+ /*
+ * Journal start and end: the block numbers of the first usable block
+ * and one beyond the last usable block in the journal. [j_state_lock]
+ */
+ unsigned long j_first;
+ unsigned long j_last;
+
+ /*
+ * Device, blocksize and starting block offset for the location where we
+ * store the journal.
+ */
+ struct block_device *j_dev;
+ int j_blocksize;
+ unsigned long long j_blk_offset;
+ char j_devname[BDEVNAME_SIZE+24];
+
+ /*
+ * Device which holds the client fs. For internal journal this will be
+ * equal to j_dev.
+ */
+ struct block_device *j_fs_dev;
+
+ /* Total maximum capacity of the journal region on disk. */
+ unsigned int j_maxlen;
+
+ /* Number of buffers reserved from the running transaction */
+ atomic_t j_reserved_credits;
+
+ /*
+ * Protects the buffer lists and internal buffer state.
+ */
+ spinlock_t j_list_lock;
+
+ /* Optional inode where we store the journal. If present, all */
+ /* journal block numbers are mapped into this inode via */
+ /* bmap(). */
+ struct inode *j_inode;
+
+ /*
+ * Sequence number of the oldest transaction in the log [j_state_lock]
+ */
+ tid_t j_tail_sequence;
+
+ /*
+ * Sequence number of the next transaction to grant [j_state_lock]
+ */
+ tid_t j_transaction_sequence;
+
+ /*
+ * Sequence number of the most recently committed transaction
+ * [j_state_lock].
+ */
+ tid_t j_commit_sequence;
+
+ /*
+ * Sequence number of the most recent transaction wanting commit
+ * [j_state_lock]
+ */
+ tid_t j_commit_request;
+
+ /*
+ * Journal uuid: identifies the object (filesystem, LVM volume etc)
+ * backed by this journal. This will eventually be replaced by an array
+ * of uuids, allowing us to index multiple devices within a single
+ * journal and to perform atomic updates across them.
+ */
+ __u8 j_uuid[16];
+
+ /* Pointer to the current commit thread for this journal */
+ struct task_struct *j_task;
+
+ /*
+ * Maximum number of metadata buffers to allow in a single compound
+ * commit transaction
+ */
+ int j_max_transaction_buffers;
+
+ /*
+ * What is the maximum transaction lifetime before we begin a commit?
+ */
+ unsigned long j_commit_interval;
+
+ /* The timer used to wakeup the commit thread: */
+ struct timer_list j_commit_timer;
+
+ /*
+ * The revoke table: maintains the list of revoked blocks in the
+ * current transaction. [j_revoke_lock]
+ */
+ spinlock_t j_revoke_lock;
+ struct jbd2_revoke_table_s *j_revoke;
+ struct jbd2_revoke_table_s *j_revoke_table[2];
+
+ /*
+ * array of bhs for jbd2_journal_commit_transaction
+ */
+ struct buffer_head **j_wbuf;
+ int j_wbufsize;
+
+ /*
+ * this is the pid of hte last person to run a synchronous operation
+ * through the journal
+ */
+ pid_t j_last_sync_writer;
+
+ /*
+ * the average amount of time in nanoseconds it takes to commit a
+ * transaction to disk. [j_state_lock]
+ */
+ u64 j_average_commit_time;
+
+ /*
+ * minimum and maximum times that we should wait for
+ * additional filesystem operations to get batched into a
+ * synchronous handle in microseconds
+ */
+ u32 j_min_batch_time;
+ u32 j_max_batch_time;
+
+ /* This function is called when a transaction is closed */
+ void (*j_commit_callback)(journal_t *,
+ transaction_t *);
+
+ /*
+ * Journal statistics
+ */
+ spinlock_t j_history_lock;
+ struct proc_dir_entry *j_proc_entry;
+ struct transaction_stats_s j_stats;
+
+ /* Failed journal commit ID */
+ unsigned int j_failed_commit;
+
+ /*
+ * An opaque pointer to fs-private information. ext3 puts its
+ * superblock pointer here
+ */
+ void *j_private;
+
+ /* Reference to checksum algorithm driver via cryptoapi */
+ struct crypto_shash *j_chksum_driver;
+
+ /* Precomputed journal UUID checksum for seeding other checksums */
+ __u32 j_csum_seed;
+};
+
+/*
+ * Journal flag definitions
+ */
+#define JBD2_UNMOUNT 0x001 /* Journal thread is being destroyed */
+#define JBD2_ABORT 0x002 /* Journaling has been aborted for errors. */
+#define JBD2_ACK_ERR 0x004 /* The errno in the sb has been acked */
+#define JBD2_FLUSHED 0x008 /* The journal superblock has been flushed */
+#define JBD2_LOADED 0x010 /* The journal superblock has been loaded */
+#define JBD2_BARRIER 0x020 /* Use IDE barriers */
+#define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
+ * data write error in ordered
+ * mode */
+
+/*
+ * Function declarations for the journaling transaction and buffer
+ * management
+ */
+
+/* Filing buffers */
+extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
+extern void __jbd2_journal_refile_buffer(struct journal_head *);
+extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
+extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
+extern void __journal_free_buffer(struct journal_head *bh);
+extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
+extern void __journal_clean_data_list(transaction_t *transaction);
+static inline void jbd2_file_log_bh(struct list_head *head, struct buffer_head *bh)
+{
+ list_add_tail(&bh->b_assoc_buffers, head);
+}
+static inline void jbd2_unfile_log_bh(struct buffer_head *bh)
+{
+ list_del_init(&bh->b_assoc_buffers);
+}
+
+/* Log buffer allocation */
+struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal);
+int jbd2_journal_next_log_block(journal_t *, unsigned long long *);
+int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
+ unsigned long *block);
+int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
+void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
+
+/* Commit management */
+extern void jbd2_journal_commit_transaction(journal_t *);
+
+/* Checkpoint list management */
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal);
+int __jbd2_journal_remove_checkpoint(struct journal_head *);
+void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
+
+
+/*
+ * Triggers
+ */
+
+struct jbd2_buffer_trigger_type {
+ /*
+ * Fired a the moment data to write to the journal are known to be
+ * stable - so either at the moment b_frozen_data is created or just
+ * before a buffer is written to the journal. mapped_data is a mapped
+ * buffer that is the frozen data for commit.
+ */
+ void (*t_frozen)(struct jbd2_buffer_trigger_type *type,
+ struct buffer_head *bh, void *mapped_data,
+ size_t size);
+
+ /*
+ * Fired during journal abort for dirty buffers that will not be
+ * committed.
+ */
+ void (*t_abort)(struct jbd2_buffer_trigger_type *type,
+ struct buffer_head *bh);
+};
+
+extern void jbd2_buffer_frozen_trigger(struct journal_head *jh,
+ void *mapped_data,
+ struct jbd2_buffer_trigger_type *triggers);
+extern void jbd2_buffer_abort_trigger(struct journal_head *jh,
+ struct jbd2_buffer_trigger_type *triggers);
+
+/* Buffer IO */
+extern int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
+ struct journal_head *jh_in,
+ struct buffer_head **bh_out,
+ sector_t blocknr);
+
+/* Transaction locking */
+extern void __wait_on_journal (journal_t *);
+
+/* Transaction cache support */
+extern void jbd2_journal_destroy_transaction_cache(void);
+extern int jbd2_journal_init_transaction_cache(void);
+extern void jbd2_journal_free_transaction(transaction_t *);
+
+/*
+ * Journal locking.
+ *
+ * We need to lock the journal during transaction state changes so that nobody
+ * ever tries to take a handle on the running transaction while we are in the
+ * middle of moving it to the commit phase. j_state_lock does this.
+ *
+ * Note that the locking is completely interrupt unsafe. We never touch
+ * journal structures from interrupts.
+ */
+
+static inline handle_t *journal_current_handle(void)
+{
+ return current->journal_info;
+}
+
+/* The journaling code user interface:
+ *
+ * Create and destroy handles
+ * Register buffer modifications against the current transaction.
+ */
+
+extern handle_t *jbd2_journal_start(journal_t *, int nblocks);
+extern handle_t *jbd2__journal_start(journal_t *, int blocks, int rsv_blocks,
+ gfp_t gfp_mask, unsigned int type,
+ unsigned int line_no);
+extern int jbd2_journal_restart(handle_t *, int nblocks);
+extern int jbd2__journal_restart(handle_t *, int nblocks, gfp_t gfp_mask);
+extern int jbd2_journal_start_reserved(handle_t *handle,
+ unsigned int type, unsigned int line_no);
+extern void jbd2_journal_free_reserved(handle_t *handle);
+extern int jbd2_journal_extend (handle_t *, int nblocks);
+extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *);
+extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *);
+extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *);
+void jbd2_journal_set_triggers(struct buffer_head *,
+ struct jbd2_buffer_trigger_type *type);
+extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *);
+extern int jbd2_journal_forget (handle_t *, struct buffer_head *);
+extern void journal_sync_buffer (struct buffer_head *);
+extern int jbd2_journal_invalidatepage(journal_t *,
+ struct page *, unsigned int, unsigned int);
+extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
+extern int jbd2_journal_stop(handle_t *);
+extern int jbd2_journal_flush (journal_t *);
+extern void jbd2_journal_lock_updates (journal_t *);
+extern void jbd2_journal_unlock_updates (journal_t *);
+
+extern journal_t * jbd2_journal_init_dev(struct block_device *bdev,
+ struct block_device *fs_dev,
+ unsigned long long start, int len, int bsize);
+extern journal_t * jbd2_journal_init_inode (struct inode *);
+extern int jbd2_journal_update_format (journal_t *);
+extern int jbd2_journal_check_used_features
+ (journal_t *, unsigned long, unsigned long, unsigned long);
+extern int jbd2_journal_check_available_features
+ (journal_t *, unsigned long, unsigned long, unsigned long);
+extern int jbd2_journal_set_features
+ (journal_t *, unsigned long, unsigned long, unsigned long);
+extern void jbd2_journal_clear_features
+ (journal_t *, unsigned long, unsigned long, unsigned long);
+extern int jbd2_journal_load (journal_t *journal);
+extern int jbd2_journal_destroy (journal_t *);
+extern int jbd2_journal_recover (journal_t *journal);
+extern int jbd2_journal_wipe (journal_t *, int);
+extern int jbd2_journal_skip_recovery (journal_t *);
+extern void jbd2_journal_update_sb_errno(journal_t *);
+extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
+ unsigned long, int);
+extern void __jbd2_journal_abort_hard (journal_t *);
+extern void jbd2_journal_abort (journal_t *, int);
+extern int jbd2_journal_errno (journal_t *);
+extern void jbd2_journal_ack_err (journal_t *);
+extern int jbd2_journal_clear_err (journal_t *);
+extern int jbd2_journal_bmap(journal_t *, unsigned long, unsigned long long *);
+extern int jbd2_journal_force_commit(journal_t *);
+extern int jbd2_journal_force_commit_nested(journal_t *);
+extern int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *inode);
+extern int jbd2_journal_begin_ordered_truncate(journal_t *journal,
+ struct jbd2_inode *inode, loff_t new_size);
+extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode);
+extern void jbd2_journal_release_jbd_inode(journal_t *journal, struct jbd2_inode *jinode);
+
+/*
+ * journal_head management
+ */
+struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh);
+struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh);
+void jbd2_journal_put_journal_head(struct journal_head *jh);
+
+/*
+ * handle management
+ */
+extern struct kmem_cache *jbd2_handle_cache;
+
+static inline handle_t *jbd2_alloc_handle(gfp_t gfp_flags)
+{
+ return kmem_cache_zalloc(jbd2_handle_cache, gfp_flags);
+}
+
+static inline void jbd2_free_handle(handle_t *handle)
+{
+ kmem_cache_free(jbd2_handle_cache, handle);
+}
+
+/*
+ * jbd2_inode management (optional, for those file systems that want to use
+ * dynamically allocated jbd2_inode structures)
+ */
+extern struct kmem_cache *jbd2_inode_cache;
+
+static inline struct jbd2_inode *jbd2_alloc_inode(gfp_t gfp_flags)
+{
+ return kmem_cache_alloc(jbd2_inode_cache, gfp_flags);
+}
+
+static inline void jbd2_free_inode(struct jbd2_inode *jinode)
+{
+ kmem_cache_free(jbd2_inode_cache, jinode);
+}
+
+/* Primary revoke support */
+#define JOURNAL_REVOKE_DEFAULT_HASH 256
+extern int jbd2_journal_init_revoke(journal_t *, int);
+extern void jbd2_journal_destroy_revoke_caches(void);
+extern int jbd2_journal_init_revoke_caches(void);
+
+extern void jbd2_journal_destroy_revoke(journal_t *);
+extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *);
+extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *);
+extern void jbd2_journal_write_revoke_records(journal_t *journal,
+ transaction_t *transaction,
+ struct list_head *log_bufs,
+ int write_op);
+
+/* Recovery revoke support */
+extern int jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t);
+extern int jbd2_journal_test_revoke(journal_t *, unsigned long long, tid_t);
+extern void jbd2_journal_clear_revoke(journal_t *);
+extern void jbd2_journal_switch_revoke_table(journal_t *journal);
+extern void jbd2_clear_buffer_revoked_flags(journal_t *journal);
+
+/*
+ * The log thread user interface:
+ *
+ * Request space in the current transaction, and force transaction commit
+ * transitions on demand.
+ */
+
+int jbd2_log_start_commit(journal_t *journal, tid_t tid);
+int __jbd2_log_start_commit(journal_t *journal, tid_t tid);
+int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
+int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
+int jbd2_complete_transaction(journal_t *journal, tid_t tid);
+int jbd2_log_do_checkpoint(journal_t *journal);
+int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
+
+void __jbd2_log_wait_for_space(journal_t *journal);
+extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
+extern int jbd2_cleanup_journal_tail(journal_t *);
+
+/*
+ * is_journal_abort
+ *
+ * Simple test wrapper function to test the JBD2_ABORT state flag. This
+ * bit, when set, indicates that we have had a fatal error somewhere,
+ * either inside the journaling layer or indicated to us by the client
+ * (eg. ext3), and that we and should not commit any further
+ * transactions.
+ */
+
+static inline int is_journal_aborted(journal_t *journal)
+{
+ return journal->j_flags & JBD2_ABORT;
+}
+
+static inline int is_handle_aborted(handle_t *handle)
+{
+ if (handle->h_aborted || !handle->h_transaction)
+ return 1;
+ return is_journal_aborted(handle->h_transaction->t_journal);
+}
+
+static inline void jbd2_journal_abort_handle(handle_t *handle)
+{
+ handle->h_aborted = 1;
+}
+
+#endif /* __KERNEL__ */
+
+/* Comparison functions for transaction IDs: perform comparisons using
+ * modulo arithmetic so that they work over sequence number wraps. */
+
+static inline int tid_gt(tid_t x, tid_t y)
+{
+ int difference = (x - y);
+ return (difference > 0);
+}
+
+static inline int tid_geq(tid_t x, tid_t y)
+{
+ int difference = (x - y);
+ return (difference >= 0);
+}
+
+extern int jbd2_journal_blocks_per_page(struct inode *inode);
+extern size_t journal_tag_bytes(journal_t *journal);
+
+static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
+{
+ if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) ||
+ JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for
+ * transaction control blocks.
+ */
+#define JBD2_CONTROL_BLOCKS_SHIFT 5
+
+/*
+ * Return the minimum number of blocks which must be free in the journal
+ * before a new transaction may be started. Must be called under j_state_lock.
+ */
+static inline int jbd2_space_needed(journal_t *journal)
+{
+ int nblocks = journal->j_max_transaction_buffers;
+ return nblocks + (nblocks >> JBD2_CONTROL_BLOCKS_SHIFT);
+}
+
+/*
+ * Return number of free blocks in the log. Must be called under j_state_lock.
+ */
+static inline unsigned long jbd2_log_space_left(journal_t *journal)
+{
+ /* Allow for rounding errors */
+ unsigned long free = journal->j_free - 32;
+
+ if (journal->j_committing_transaction) {
+ unsigned long committing = atomic_read(&journal->
+ j_committing_transaction->t_outstanding_credits);
+
+ /* Transaction + control blocks */
+ free -= committing + (committing >> JBD2_CONTROL_BLOCKS_SHIFT);
+ }
+ return free;
+}
+
+/*
+ * Definitions which augment the buffer_head layer
+ */
+
+/* journaling buffer types */
+#define BJ_None 0 /* Not journaled */
+#define BJ_Metadata 1 /* Normal journaled metadata */
+#define BJ_Forget 2 /* Buffer superseded by this transaction */
+#define BJ_Shadow 3 /* Buffer contents being shadowed to the log */
+#define BJ_Reserved 4 /* Buffer is reserved for access by journal */
+#define BJ_Types 5
+
+extern int jbd_blocks_per_page(struct inode *inode);
+
+/* JBD uses a CRC32 checksum */
+#define JBD_MAX_CHECKSUM_SIZE 4
+
+static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
+ const void *address, unsigned int length)
+{
+ struct {
+ struct shash_desc shash;
+ char ctx[JBD_MAX_CHECKSUM_SIZE];
+ } desc;
+ int err;
+
+ BUG_ON(crypto_shash_descsize(journal->j_chksum_driver) >
+ JBD_MAX_CHECKSUM_SIZE);
+
+ desc.shash.tfm = journal->j_chksum_driver;
+ desc.shash.flags = 0;
+ *(u32 *)desc.ctx = crc;
+
+ err = crypto_shash_update(&desc.shash, address, length);
+ BUG_ON(err);
+
+ return *(u32 *)desc.ctx;
+}
+
+/* Return most recent uncommitted transaction */
+static inline tid_t jbd2_get_latest_transaction(journal_t *journal)
+{
+ tid_t tid;
+
+ read_lock(&journal->j_state_lock);
+ tid = journal->j_commit_request;
+ if (journal->j_running_transaction)
+ tid = journal->j_running_transaction->t_tid;
+ read_unlock(&journal->j_state_lock);
+ return tid;
+}
+
+#ifdef __KERNEL__
+
+#define buffer_trace_init(bh) do {} while (0)
+#define print_buffer_fields(bh) do {} while (0)
+#define print_buffer_trace(bh) do {} while (0)
+#define BUFFER_TRACE(bh, info) do {} while (0)
+#define BUFFER_TRACE2(bh, bh2, info) do {} while (0)
+#define JBUFFER_TRACE(jh, info) do {} while (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_JBD2_H */
diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h
new file mode 100644
index 000000000..3dc534323
--- /dev/null
+++ b/include/linux/jbd_common.h
@@ -0,0 +1,46 @@
+#ifndef _LINUX_JBD_STATE_H
+#define _LINUX_JBD_STATE_H
+
+#include <linux/bit_spinlock.h>
+
+static inline struct buffer_head *jh2bh(struct journal_head *jh)
+{
+ return jh->b_bh;
+}
+
+static inline struct journal_head *bh2jh(struct buffer_head *bh)
+{
+ return bh->b_private;
+}
+
+static inline void jbd_lock_bh_state(struct buffer_head *bh)
+{
+ bit_spin_lock(BH_State, &bh->b_state);
+}
+
+static inline int jbd_trylock_bh_state(struct buffer_head *bh)
+{
+ return bit_spin_trylock(BH_State, &bh->b_state);
+}
+
+static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
+{
+ return bit_spin_is_locked(BH_State, &bh->b_state);
+}
+
+static inline void jbd_unlock_bh_state(struct buffer_head *bh)
+{
+ bit_spin_unlock(BH_State, &bh->b_state);
+}
+
+static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
+{
+ bit_spin_lock(BH_JournalHead, &bh->b_state);
+}
+
+static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
+{
+ bit_spin_unlock(BH_JournalHead, &bh->b_state);
+}
+
+#endif
diff --git a/include/linux/jhash.h b/include/linux/jhash.h
new file mode 100644
index 000000000..348c6f47e
--- /dev/null
+++ b/include/linux/jhash.h
@@ -0,0 +1,175 @@
+#ifndef _LINUX_JHASH_H
+#define _LINUX_JHASH_H
+
+/* jhash.h: Jenkins hash support.
+ *
+ * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
+ * lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+ *
+ * These are functions for producing 32-bit hashes for hash table lookup.
+ * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
+ * are externally useful functions. Routines to test the hash are included
+ * if SELF_TEST is defined. You can use this free for any purpose. It's in
+ * the public domain. It has no warranty.
+ *
+ * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
+ *
+ * I've modified Bob's hash to be useful in the Linux kernel, and
+ * any bugs present are my fault.
+ * Jozsef
+ */
+#include <linux/bitops.h>
+#include <linux/unaligned/packed_struct.h>
+
+/* Best hash sizes are of power of two */
+#define jhash_size(n) ((u32)1<<(n))
+/* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */
+#define jhash_mask(n) (jhash_size(n)-1)
+
+/* __jhash_mix -- mix 3 32-bit values reversibly. */
+#define __jhash_mix(a, b, c) \
+{ \
+ a -= c; a ^= rol32(c, 4); c += b; \
+ b -= a; b ^= rol32(a, 6); a += c; \
+ c -= b; c ^= rol32(b, 8); b += a; \
+ a -= c; a ^= rol32(c, 16); c += b; \
+ b -= a; b ^= rol32(a, 19); a += c; \
+ c -= b; c ^= rol32(b, 4); b += a; \
+}
+
+/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */
+#define __jhash_final(a, b, c) \
+{ \
+ c ^= b; c -= rol32(b, 14); \
+ a ^= c; a -= rol32(c, 11); \
+ b ^= a; b -= rol32(a, 25); \
+ c ^= b; c -= rol32(b, 16); \
+ a ^= c; a -= rol32(c, 4); \
+ b ^= a; b -= rol32(a, 14); \
+ c ^= b; c -= rol32(b, 24); \
+}
+
+/* An arbitrary initial parameter */
+#define JHASH_INITVAL 0xdeadbeef
+
+/* jhash - hash an arbitrary key
+ * @k: sequence of bytes as key
+ * @length: the length of the key
+ * @initval: the previous hash, or an arbitray value
+ *
+ * The generic version, hashes an arbitrary sequence of bytes.
+ * No alignment or length assumptions are made about the input key.
+ *
+ * Returns the hash value of the key. The result depends on endianness.
+ */
+static inline u32 jhash(const void *key, u32 length, u32 initval)
+{
+ u32 a, b, c;
+ const u8 *k = key;
+
+ /* Set up the internal state */
+ a = b = c = JHASH_INITVAL + length + initval;
+
+ /* All but the last block: affect some 32 bits of (a,b,c) */
+ while (length > 12) {
+ a += __get_unaligned_cpu32(k);
+ b += __get_unaligned_cpu32(k + 4);
+ c += __get_unaligned_cpu32(k + 8);
+ __jhash_mix(a, b, c);
+ length -= 12;
+ k += 12;
+ }
+ /* Last block: affect all 32 bits of (c) */
+ /* All the case statements fall through */
+ switch (length) {
+ case 12: c += (u32)k[11]<<24;
+ case 11: c += (u32)k[10]<<16;
+ case 10: c += (u32)k[9]<<8;
+ case 9: c += k[8];
+ case 8: b += (u32)k[7]<<24;
+ case 7: b += (u32)k[6]<<16;
+ case 6: b += (u32)k[5]<<8;
+ case 5: b += k[4];
+ case 4: a += (u32)k[3]<<24;
+ case 3: a += (u32)k[2]<<16;
+ case 2: a += (u32)k[1]<<8;
+ case 1: a += k[0];
+ __jhash_final(a, b, c);
+ case 0: /* Nothing left to add */
+ break;
+ }
+
+ return c;
+}
+
+/* jhash2 - hash an array of u32's
+ * @k: the key which must be an array of u32's
+ * @length: the number of u32's in the key
+ * @initval: the previous hash, or an arbitray value
+ *
+ * Returns the hash value of the key.
+ */
+static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
+{
+ u32 a, b, c;
+
+ /* Set up the internal state */
+ a = b = c = JHASH_INITVAL + (length<<2) + initval;
+
+ /* Handle most of the key */
+ while (length > 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ __jhash_mix(a, b, c);
+ length -= 3;
+ k += 3;
+ }
+
+ /* Handle the last 3 u32's: all the case statements fall through */
+ switch (length) {
+ case 3: c += k[2];
+ case 2: b += k[1];
+ case 1: a += k[0];
+ __jhash_final(a, b, c);
+ case 0: /* Nothing left to add */
+ break;
+ }
+
+ return c;
+}
+
+
+/* __jhash_nwords - hash exactly 3, 2 or 1 word(s) */
+static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
+{
+ a += initval;
+ b += initval;
+ c += initval;
+
+ __jhash_final(a, b, c);
+
+ return c;
+}
+
+static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
+{
+ return __jhash_nwords(a, b, c, initval + JHASH_INITVAL + (3 << 2));
+}
+
+static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
+{
+ return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
+}
+
+static inline u32 jhash_1word(u32 a, u32 initval)
+{
+ return __jhash_nwords(a, 0, 0, initval + JHASH_INITVAL + (1 << 2));
+}
+
+#endif /* _LINUX_JHASH_H */
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
new file mode 100644
index 000000000..55feba7a5
--- /dev/null
+++ b/include/linux/jiffies.h
@@ -0,0 +1,314 @@
+#ifndef _LINUX_JIFFIES_H
+#define _LINUX_JIFFIES_H
+
+#include <linux/math64.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <asm/param.h> /* for HZ */
+
+/*
+ * The following defines establish the engineering parameters of the PLL
+ * model. The HZ variable establishes the timer interrupt frequency, 100 Hz
+ * for the SunOS kernel, 256 Hz for the Ultrix kernel and 1024 Hz for the
+ * OSF/1 kernel. The SHIFT_HZ define expresses the same value as the
+ * nearest power of two in order to avoid hardware multiply operations.
+ */
+#if HZ >= 12 && HZ < 24
+# define SHIFT_HZ 4
+#elif HZ >= 24 && HZ < 48
+# define SHIFT_HZ 5
+#elif HZ >= 48 && HZ < 96
+# define SHIFT_HZ 6
+#elif HZ >= 96 && HZ < 192
+# define SHIFT_HZ 7
+#elif HZ >= 192 && HZ < 384
+# define SHIFT_HZ 8
+#elif HZ >= 384 && HZ < 768
+# define SHIFT_HZ 9
+#elif HZ >= 768 && HZ < 1536
+# define SHIFT_HZ 10
+#elif HZ >= 1536 && HZ < 3072
+# define SHIFT_HZ 11
+#elif HZ >= 3072 && HZ < 6144
+# define SHIFT_HZ 12
+#elif HZ >= 6144 && HZ < 12288
+# define SHIFT_HZ 13
+#else
+# error Invalid value of HZ.
+#endif
+
+/* Suppose we want to divide two numbers NOM and DEN: NOM/DEN, then we can
+ * improve accuracy by shifting LSH bits, hence calculating:
+ * (NOM << LSH) / DEN
+ * This however means trouble for large NOM, because (NOM << LSH) may no
+ * longer fit in 32 bits. The following way of calculating this gives us
+ * some slack, under the following conditions:
+ * - (NOM / DEN) fits in (32 - LSH) bits.
+ * - (NOM % DEN) fits in (32 - LSH) bits.
+ */
+#define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \
+ + ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN))
+
+/* LATCH is used in the interval timer and ftape setup. */
+#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
+
+extern int register_refined_jiffies(long clock_tick_rate);
+
+/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */
+#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ)
+
+/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
+#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
+
+/* some arch's have a small-data section that can be accessed register-relative
+ * but that can only take up to, say, 4-byte variables. jiffies being part of
+ * an 8-byte variable may not be correctly accessed unless we force the issue
+ */
+#define __jiffy_data __attribute__((section(".data")))
+
+/*
+ * The 64-bit value is not atomic - you MUST NOT read it
+ * without sampling the sequence number in jiffies_lock.
+ * get_jiffies_64() will do this for you as appropriate.
+ */
+extern u64 __jiffy_data jiffies_64;
+extern unsigned long volatile __jiffy_data jiffies;
+
+#if (BITS_PER_LONG < 64)
+u64 get_jiffies_64(void);
+#else
+static inline u64 get_jiffies_64(void)
+{
+ return (u64)jiffies;
+}
+#endif
+
+/*
+ * These inlines deal with timer wrapping correctly. You are
+ * strongly encouraged to use them
+ * 1. Because people otherwise forget
+ * 2. Because if the timer wrap changes in future you won't have to
+ * alter your driver code.
+ *
+ * time_after(a,b) returns true if the time a is after time b.
+ *
+ * Do this with "<0" and ">=0" to only test the sign of the result. A
+ * good compiler would generate better code (and a really good compiler
+ * wouldn't care). Gcc is currently neither.
+ */
+#define time_after(a,b) \
+ (typecheck(unsigned long, a) && \
+ typecheck(unsigned long, b) && \
+ ((long)((b) - (a)) < 0))
+#define time_before(a,b) time_after(b,a)
+
+#define time_after_eq(a,b) \
+ (typecheck(unsigned long, a) && \
+ typecheck(unsigned long, b) && \
+ ((long)((a) - (b)) >= 0))
+#define time_before_eq(a,b) time_after_eq(b,a)
+
+/*
+ * Calculate whether a is in the range of [b, c].
+ */
+#define time_in_range(a,b,c) \
+ (time_after_eq(a,b) && \
+ time_before_eq(a,c))
+
+/*
+ * Calculate whether a is in the range of [b, c).
+ */
+#define time_in_range_open(a,b,c) \
+ (time_after_eq(a,b) && \
+ time_before(a,c))
+
+/* Same as above, but does so with platform independent 64bit types.
+ * These must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64() */
+#define time_after64(a,b) \
+ (typecheck(__u64, a) && \
+ typecheck(__u64, b) && \
+ ((__s64)((b) - (a)) < 0))
+#define time_before64(a,b) time_after64(b,a)
+
+#define time_after_eq64(a,b) \
+ (typecheck(__u64, a) && \
+ typecheck(__u64, b) && \
+ ((__s64)((a) - (b)) >= 0))
+#define time_before_eq64(a,b) time_after_eq64(b,a)
+
+#define time_in_range64(a, b, c) \
+ (time_after_eq64(a, b) && \
+ time_before_eq64(a, c))
+
+/*
+ * These four macros compare jiffies and 'a' for convenience.
+ */
+
+/* time_is_before_jiffies(a) return true if a is before jiffies */
+#define time_is_before_jiffies(a) time_after(jiffies, a)
+
+/* time_is_after_jiffies(a) return true if a is after jiffies */
+#define time_is_after_jiffies(a) time_before(jiffies, a)
+
+/* time_is_before_eq_jiffies(a) return true if a is before or equal to jiffies*/
+#define time_is_before_eq_jiffies(a) time_after_eq(jiffies, a)
+
+/* time_is_after_eq_jiffies(a) return true if a is after or equal to jiffies*/
+#define time_is_after_eq_jiffies(a) time_before_eq(jiffies, a)
+
+/*
+ * Have the 32 bit jiffies value wrap 5 minutes after boot
+ * so jiffies wrap bugs show up earlier.
+ */
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-10*HZ))
+
+/*
+ * Change timeval to jiffies, trying to avoid the
+ * most obvious overflows..
+ *
+ * And some not so obvious.
+ *
+ * Note that we don't want to return LONG_MAX, because
+ * for various timeout reasons we often end up having
+ * to wait "jiffies+1" in order to guarantee that we wait
+ * at _least_ "jiffies" - so "jiffies+1" had better still
+ * be positive.
+ */
+#define MAX_JIFFY_OFFSET ((LONG_MAX >> 1)-1)
+
+extern unsigned long preset_lpj;
+
+/*
+ * We want to do realistic conversions of time so we need to use the same
+ * values the update wall clock code uses as the jiffies size. This value
+ * is: TICK_NSEC (which is defined in timex.h). This
+ * is a constant and is in nanoseconds. We will use scaled math
+ * with a set of scales defined here as SEC_JIFFIE_SC, USEC_JIFFIE_SC and
+ * NSEC_JIFFIE_SC. Note that these defines contain nothing but
+ * constants and so are computed at compile time. SHIFT_HZ (computed in
+ * timex.h) adjusts the scaling for different HZ values.
+
+ * Scaled math??? What is that?
+ *
+ * Scaled math is a way to do integer math on values that would,
+ * otherwise, either overflow, underflow, or cause undesired div
+ * instructions to appear in the execution path. In short, we "scale"
+ * up the operands so they take more bits (more precision, less
+ * underflow), do the desired operation and then "scale" the result back
+ * by the same amount. If we do the scaling by shifting we avoid the
+ * costly mpy and the dastardly div instructions.
+
+ * Suppose, for example, we want to convert from seconds to jiffies
+ * where jiffies is defined in nanoseconds as NSEC_PER_JIFFIE. The
+ * simple math is: jiff = (sec * NSEC_PER_SEC) / NSEC_PER_JIFFIE; We
+ * observe that (NSEC_PER_SEC / NSEC_PER_JIFFIE) is a constant which we
+ * might calculate at compile time, however, the result will only have
+ * about 3-4 bits of precision (less for smaller values of HZ).
+ *
+ * So, we scale as follows:
+ * jiff = (sec) * (NSEC_PER_SEC / NSEC_PER_JIFFIE);
+ * jiff = ((sec) * ((NSEC_PER_SEC * SCALE)/ NSEC_PER_JIFFIE)) / SCALE;
+ * Then we make SCALE a power of two so:
+ * jiff = ((sec) * ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE)) >> SCALE;
+ * Now we define:
+ * #define SEC_CONV = ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE))
+ * jiff = (sec * SEC_CONV) >> SCALE;
+ *
+ * Often the math we use will expand beyond 32-bits so we tell C how to
+ * do this and pass the 64-bit result of the mpy through the ">> SCALE"
+ * which should take the result back to 32-bits. We want this expansion
+ * to capture as much precision as possible. At the same time we don't
+ * want to overflow so we pick the SCALE to avoid this. In this file,
+ * that means using a different scale for each range of HZ values (as
+ * defined in timex.h).
+ *
+ * For those who want to know, gcc will give a 64-bit result from a "*"
+ * operator if the result is a long long AND at least one of the
+ * operands is cast to long long (usually just prior to the "*" so as
+ * not to confuse it into thinking it really has a 64-bit operand,
+ * which, buy the way, it can do, but it takes more code and at least 2
+ * mpys).
+
+ * We also need to be aware that one second in nanoseconds is only a
+ * couple of bits away from overflowing a 32-bit word, so we MUST use
+ * 64-bits to get the full range time in nanoseconds.
+
+ */
+
+/*
+ * Here are the scales we will use. One for seconds, nanoseconds and
+ * microseconds.
+ *
+ * Within the limits of cpp we do a rough cut at the SEC_JIFFIE_SC and
+ * check if the sign bit is set. If not, we bump the shift count by 1.
+ * (Gets an extra bit of precision where we can use it.)
+ * We know it is set for HZ = 1024 and HZ = 100 not for 1000.
+ * Haven't tested others.
+
+ * Limits of cpp (for #if expressions) only long (no long long), but
+ * then we only need the most signicant bit.
+ */
+
+#define SEC_JIFFIE_SC (31 - SHIFT_HZ)
+#if !((((NSEC_PER_SEC << 2) / TICK_NSEC) << (SEC_JIFFIE_SC - 2)) & 0x80000000)
+#undef SEC_JIFFIE_SC
+#define SEC_JIFFIE_SC (32 - SHIFT_HZ)
+#endif
+#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
+#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
+ TICK_NSEC -1) / (u64)TICK_NSEC))
+
+#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
+ TICK_NSEC -1) / (u64)TICK_NSEC))
+/*
+ * The maximum jiffie value is (MAX_INT >> 1). Here we translate that
+ * into seconds. The 64-bit case will overflow if we are not careful,
+ * so use the messy SH_DIV macro to do it. Still all constants.
+ */
+#if BITS_PER_LONG < 64
+# define MAX_SEC_IN_JIFFIES \
+ (long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC)
+#else /* take care of overflow on 64 bits machines */
+# define MAX_SEC_IN_JIFFIES \
+ (SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 1) - 1)
+
+#endif
+
+/*
+ * Convert various time units to each other:
+ */
+extern unsigned int jiffies_to_msecs(const unsigned long j);
+extern unsigned int jiffies_to_usecs(const unsigned long j);
+
+static inline u64 jiffies_to_nsecs(const unsigned long j)
+{
+ return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
+}
+
+extern unsigned long msecs_to_jiffies(const unsigned int m);
+extern unsigned long usecs_to_jiffies(const unsigned int u);
+extern unsigned long timespec_to_jiffies(const struct timespec *value);
+extern void jiffies_to_timespec(const unsigned long jiffies,
+ struct timespec *value);
+extern unsigned long timeval_to_jiffies(const struct timeval *value);
+extern void jiffies_to_timeval(const unsigned long jiffies,
+ struct timeval *value);
+
+extern clock_t jiffies_to_clock_t(unsigned long x);
+static inline clock_t jiffies_delta_to_clock_t(long delta)
+{
+ return jiffies_to_clock_t(max(0L, delta));
+}
+
+extern unsigned long clock_t_to_jiffies(unsigned long x);
+extern u64 jiffies_64_to_clock_t(u64 x);
+extern u64 nsec_to_clock_t(u64 x);
+extern u64 nsecs_to_jiffies64(u64 n);
+extern unsigned long nsecs_to_jiffies(u64 n);
+
+#define TIMESTAMP_SIZE 30
+
+#endif
diff --git a/include/linux/journal-head.h b/include/linux/journal-head.h
new file mode 100644
index 000000000..98cd41bb3
--- /dev/null
+++ b/include/linux/journal-head.h
@@ -0,0 +1,106 @@
+/*
+ * include/linux/journal-head.h
+ *
+ * buffer_head fields for JBD
+ *
+ * 27 May 2001 Andrew Morton
+ * Created - pulled out of fs.h
+ */
+
+#ifndef JOURNAL_HEAD_H_INCLUDED
+#define JOURNAL_HEAD_H_INCLUDED
+
+typedef unsigned int tid_t; /* Unique transaction ID */
+typedef struct transaction_s transaction_t; /* Compound transaction type */
+
+
+struct buffer_head;
+
+struct journal_head {
+ /*
+ * Points back to our buffer_head. [jbd_lock_bh_journal_head()]
+ */
+ struct buffer_head *b_bh;
+
+ /*
+ * Reference count - see description in journal.c
+ * [jbd_lock_bh_journal_head()]
+ */
+ int b_jcount;
+
+ /*
+ * Journalling list for this buffer [jbd_lock_bh_state()]
+ * NOTE: We *cannot* combine this with b_modified into a bitfield
+ * as gcc would then (which the C standard allows but which is
+ * very unuseful) make 64-bit accesses to the bitfield and clobber
+ * b_jcount if its update races with bitfield modification.
+ */
+ unsigned b_jlist;
+
+ /*
+ * This flag signals the buffer has been modified by
+ * the currently running transaction
+ * [jbd_lock_bh_state()]
+ */
+ unsigned b_modified;
+
+ /*
+ * Copy of the buffer data frozen for writing to the log.
+ * [jbd_lock_bh_state()]
+ */
+ char *b_frozen_data;
+
+ /*
+ * Pointer to a saved copy of the buffer containing no uncommitted
+ * deallocation references, so that allocations can avoid overwriting
+ * uncommitted deletes. [jbd_lock_bh_state()]
+ */
+ char *b_committed_data;
+
+ /*
+ * Pointer to the compound transaction which owns this buffer's
+ * metadata: either the running transaction or the committing
+ * transaction (if there is one). Only applies to buffers on a
+ * transaction's data or metadata journaling list.
+ * [j_list_lock] [jbd_lock_bh_state()]
+ * Either of these locks is enough for reading, both are needed for
+ * changes.
+ */
+ transaction_t *b_transaction;
+
+ /*
+ * Pointer to the running compound transaction which is currently
+ * modifying the buffer's metadata, if there was already a transaction
+ * committing it when the new transaction touched it.
+ * [t_list_lock] [jbd_lock_bh_state()]
+ */
+ transaction_t *b_next_transaction;
+
+ /*
+ * Doubly-linked list of buffers on a transaction's data, metadata or
+ * forget queue. [t_list_lock] [jbd_lock_bh_state()]
+ */
+ struct journal_head *b_tnext, *b_tprev;
+
+ /*
+ * Pointer to the compound transaction against which this buffer
+ * is checkpointed. Only dirty buffers can be checkpointed.
+ * [j_list_lock]
+ */
+ transaction_t *b_cp_transaction;
+
+ /*
+ * Doubly-linked list of buffers still remaining to be flushed
+ * before an old transaction can be checkpointed.
+ * [j_list_lock]
+ */
+ struct journal_head *b_cpnext, *b_cpprev;
+
+ /* Trigger type */
+ struct jbd2_buffer_trigger_type *b_triggers;
+
+ /* Trigger type for the committing transaction's frozen data */
+ struct jbd2_buffer_trigger_type *b_frozen_triggers;
+};
+
+#endif /* JOURNAL_HEAD_H_INCLUDED */
diff --git a/include/linux/joystick.h b/include/linux/joystick.h
new file mode 100644
index 000000000..cbf2aa9e9
--- /dev/null
+++ b/include/linux/joystick.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 1996-2000 Vojtech Pavlik
+ *
+ * Sponsored by SuSE
+ */
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Should you need to contact me, the author, you can do so either by
+ * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
+ * Vojtech Pavlik, Ucitelska 1576, Prague 8, 182 00 Czech Republic
+ */
+#ifndef _LINUX_JOYSTICK_H
+#define _LINUX_JOYSTICK_H
+
+#include <uapi/linux/joystick.h>
+
+#if BITS_PER_LONG == 64
+#define JS_DATA_SAVE_TYPE JS_DATA_SAVE_TYPE_64
+#elif BITS_PER_LONG == 32
+#define JS_DATA_SAVE_TYPE JS_DATA_SAVE_TYPE_32
+#else
+#error Unexpected BITS_PER_LONG
+#endif
+#endif /* _LINUX_JOYSTICK_H */
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
new file mode 100644
index 000000000..f4de473f2
--- /dev/null
+++ b/include/linux/jump_label.h
@@ -0,0 +1,218 @@
+#ifndef _LINUX_JUMP_LABEL_H
+#define _LINUX_JUMP_LABEL_H
+
+/*
+ * Jump label support
+ *
+ * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
+ * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
+ *
+ * Jump labels provide an interface to generate dynamic branches using
+ * self-modifying code. Assuming toolchain and architecture support, the result
+ * of a "if (static_key_false(&key))" statement is an unconditional branch (which
+ * defaults to false - and the true block is placed out of line).
+ *
+ * However at runtime we can change the branch target using
+ * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
+ * object, and for as long as there are references all branches referring to
+ * that particular key will point to the (out of line) true block.
+ *
+ * Since this relies on modifying code, the static_key_slow_{inc,dec}() functions
+ * must be considered absolute slow paths (machine wide synchronization etc.).
+ * OTOH, since the affected branches are unconditional, their runtime overhead
+ * will be absolutely minimal, esp. in the default (off) case where the total
+ * effect is a single NOP of appropriate size. The on case will patch in a jump
+ * to the out-of-line block.
+ *
+ * When the control is directly exposed to userspace, it is prudent to delay the
+ * decrement to avoid high frequency code modifications which can (and do)
+ * cause significant performance degradation. Struct static_key_deferred and
+ * static_key_slow_dec_deferred() provide for this.
+ *
+ * Lacking toolchain and or architecture support, jump labels fall back to a simple
+ * conditional branch.
+ *
+ * struct static_key my_key = STATIC_KEY_INIT_TRUE;
+ *
+ * if (static_key_true(&my_key)) {
+ * }
+ *
+ * will result in the true case being in-line and starts the key with a single
+ * reference. Mixing static_key_true() and static_key_false() on the same key is not
+ * allowed.
+ *
+ * Not initializing the key (static data is initialized to 0s anyway) is the
+ * same as using STATIC_KEY_INIT_FALSE.
+ */
+
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+# define HAVE_JUMP_LABEL
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/bug.h>
+
+extern bool static_key_initialized;
+
+#define STATIC_KEY_CHECK_USE() WARN(!static_key_initialized, \
+ "%s used before call to jump_label_init", \
+ __func__)
+
+#ifdef HAVE_JUMP_LABEL
+
+struct static_key {
+ atomic_t enabled;
+/* Set lsb bit to 1 if branch is default true, 0 ot */
+ struct jump_entry *entries;
+#ifdef CONFIG_MODULES
+ struct static_key_mod *next;
+#endif
+};
+
+#else
+struct static_key {
+ atomic_t enabled;
+};
+#endif /* HAVE_JUMP_LABEL */
+#endif /* __ASSEMBLY__ */
+
+#ifdef HAVE_JUMP_LABEL
+#include <asm/jump_label.h>
+#endif
+
+#ifndef __ASSEMBLY__
+
+enum jump_label_type {
+ JUMP_LABEL_DISABLE = 0,
+ JUMP_LABEL_ENABLE,
+};
+
+struct module;
+
+#include <linux/atomic.h>
+
+static inline int static_key_count(struct static_key *key)
+{
+ return atomic_read(&key->enabled);
+}
+
+#ifdef HAVE_JUMP_LABEL
+
+#define JUMP_LABEL_TYPE_FALSE_BRANCH 0UL
+#define JUMP_LABEL_TYPE_TRUE_BRANCH 1UL
+#define JUMP_LABEL_TYPE_MASK 1UL
+
+static
+inline struct jump_entry *jump_label_get_entries(struct static_key *key)
+{
+ return (struct jump_entry *)((unsigned long)key->entries
+ & ~JUMP_LABEL_TYPE_MASK);
+}
+
+static inline bool jump_label_get_branch_default(struct static_key *key)
+{
+ if (((unsigned long)key->entries & JUMP_LABEL_TYPE_MASK) ==
+ JUMP_LABEL_TYPE_TRUE_BRANCH)
+ return true;
+ return false;
+}
+
+static __always_inline bool static_key_false(struct static_key *key)
+{
+ return arch_static_branch(key);
+}
+
+static __always_inline bool static_key_true(struct static_key *key)
+{
+ return !static_key_false(key);
+}
+
+extern struct jump_entry __start___jump_table[];
+extern struct jump_entry __stop___jump_table[];
+
+extern void jump_label_init(void);
+extern void jump_label_lock(void);
+extern void jump_label_unlock(void);
+extern void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type);
+extern void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type);
+extern int jump_label_text_reserved(void *start, void *end);
+extern void static_key_slow_inc(struct static_key *key);
+extern void static_key_slow_dec(struct static_key *key);
+extern void jump_label_apply_nops(struct module *mod);
+
+#define STATIC_KEY_INIT_TRUE ((struct static_key) \
+ { .enabled = ATOMIC_INIT(1), \
+ .entries = (void *)JUMP_LABEL_TYPE_TRUE_BRANCH })
+#define STATIC_KEY_INIT_FALSE ((struct static_key) \
+ { .enabled = ATOMIC_INIT(0), \
+ .entries = (void *)JUMP_LABEL_TYPE_FALSE_BRANCH })
+
+#else /* !HAVE_JUMP_LABEL */
+
+static __always_inline void jump_label_init(void)
+{
+ static_key_initialized = true;
+}
+
+static __always_inline bool static_key_false(struct static_key *key)
+{
+ if (unlikely(static_key_count(key) > 0))
+ return true;
+ return false;
+}
+
+static __always_inline bool static_key_true(struct static_key *key)
+{
+ if (likely(static_key_count(key) > 0))
+ return true;
+ return false;
+}
+
+static inline void static_key_slow_inc(struct static_key *key)
+{
+ STATIC_KEY_CHECK_USE();
+ atomic_inc(&key->enabled);
+}
+
+static inline void static_key_slow_dec(struct static_key *key)
+{
+ STATIC_KEY_CHECK_USE();
+ atomic_dec(&key->enabled);
+}
+
+static inline int jump_label_text_reserved(void *start, void *end)
+{
+ return 0;
+}
+
+static inline void jump_label_lock(void) {}
+static inline void jump_label_unlock(void) {}
+
+static inline int jump_label_apply_nops(struct module *mod)
+{
+ return 0;
+}
+
+#define STATIC_KEY_INIT_TRUE ((struct static_key) \
+ { .enabled = ATOMIC_INIT(1) })
+#define STATIC_KEY_INIT_FALSE ((struct static_key) \
+ { .enabled = ATOMIC_INIT(0) })
+
+#endif /* HAVE_JUMP_LABEL */
+
+#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
+#define jump_label_enabled static_key_enabled
+
+static inline bool static_key_enabled(struct static_key *key)
+{
+ return static_key_count(key) > 0;
+}
+
+#endif /* _LINUX_JUMP_LABEL_H */
+
+#endif /* __ASSEMBLY__ */
diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
new file mode 100644
index 000000000..089f70f83
--- /dev/null
+++ b/include/linux/jump_label_ratelimit.h
@@ -0,0 +1,36 @@
+#ifndef _LINUX_JUMP_LABEL_RATELIMIT_H
+#define _LINUX_JUMP_LABEL_RATELIMIT_H
+
+#include <linux/jump_label.h>
+#include <linux/workqueue.h>
+
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+struct static_key_deferred {
+ struct static_key key;
+ unsigned long timeout;
+ struct delayed_work work;
+};
+#endif
+
+#ifdef HAVE_JUMP_LABEL
+extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
+extern void
+jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
+
+#else /* !HAVE_JUMP_LABEL */
+struct static_key_deferred {
+ struct static_key key;
+};
+static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
+{
+ STATIC_KEY_CHECK_USE();
+ static_key_slow_dec(&key->key);
+}
+static inline void
+jump_label_rate_limit(struct static_key_deferred *key,
+ unsigned long rl)
+{
+ STATIC_KEY_CHECK_USE();
+}
+#endif /* HAVE_JUMP_LABEL */
+#endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */
diff --git a/include/linux/jz4740-adc.h b/include/linux/jz4740-adc.h
new file mode 100644
index 000000000..8184578fb
--- /dev/null
+++ b/include/linux/jz4740-adc.h
@@ -0,0 +1,32 @@
+
+#ifndef __LINUX_JZ4740_ADC
+#define __LINUX_JZ4740_ADC
+
+struct device;
+
+/*
+ * jz4740_adc_set_config - Configure a JZ4740 adc device
+ * @dev: Pointer to a jz4740-adc device
+ * @mask: Mask for the config value to be set
+ * @val: Value to be set
+ *
+ * This function can be used by the JZ4740 ADC mfd cells to configure their
+ * options in the shared config register.
+*/
+int jz4740_adc_set_config(struct device *dev, uint32_t mask, uint32_t val);
+
+#define JZ_ADC_CONFIG_SPZZ BIT(31)
+#define JZ_ADC_CONFIG_EX_IN BIT(30)
+#define JZ_ADC_CONFIG_DNUM_MASK (0x7 << 16)
+#define JZ_ADC_CONFIG_DMA_ENABLE BIT(15)
+#define JZ_ADC_CONFIG_XYZ_MASK (0x2 << 13)
+#define JZ_ADC_CONFIG_SAMPLE_NUM_MASK (0x7 << 10)
+#define JZ_ADC_CONFIG_CLKDIV_MASK (0xf << 5)
+#define JZ_ADC_CONFIG_BAT_MB BIT(4)
+
+#define JZ_ADC_CONFIG_DNUM(dnum) ((dnum) << 16)
+#define JZ_ADC_CONFIG_XYZ_OFFSET(dnum) ((xyz) << 13)
+#define JZ_ADC_CONFIG_SAMPLE_NUM(x) ((x) << 10)
+#define JZ_ADC_CONFIG_CLKDIV(div) ((div) << 5)
+
+#endif
diff --git a/include/linux/jz4780-nemc.h b/include/linux/jz4780-nemc.h
new file mode 100644
index 000000000..e7f1cc7a2
--- /dev/null
+++ b/include/linux/jz4780-nemc.h
@@ -0,0 +1,43 @@
+/*
+ * JZ4780 NAND/external memory controller (NEMC)
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex@alex-smith.me.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_JZ4780_NEMC_H__
+#define __LINUX_JZ4780_NEMC_H__
+
+#include <linux/types.h>
+
+struct device;
+
+/*
+ * Number of NEMC banks. Note that there are actually 6, but they are numbered
+ * from 1.
+ */
+#define JZ4780_NEMC_NUM_BANKS 7
+
+/**
+ * enum jz4780_nemc_bank_type - device types which can be connected to a bank
+ * @JZ4780_NEMC_BANK_SRAM: SRAM
+ * @JZ4780_NEMC_BANK_NAND: NAND
+ */
+enum jz4780_nemc_bank_type {
+ JZ4780_NEMC_BANK_SRAM,
+ JZ4780_NEMC_BANK_NAND,
+};
+
+extern unsigned int jz4780_nemc_num_banks(struct device *dev);
+
+extern void jz4780_nemc_set_type(struct device *dev, unsigned int bank,
+ enum jz4780_nemc_bank_type type);
+extern void jz4780_nemc_assert(struct device *dev, unsigned int bank,
+ bool assert);
+
+#endif /* __LINUX_JZ4780_NEMC_H__ */
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
new file mode 100644
index 000000000..6883e197a
--- /dev/null
+++ b/include/linux/kallsyms.h
@@ -0,0 +1,128 @@
+/* Rewritten and vastly simplified by Rusty Russell for in-kernel
+ * module loader:
+ * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
+ */
+#ifndef _LINUX_KALLSYMS_H
+#define _LINUX_KALLSYMS_H
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+
+#define KSYM_NAME_LEN 128
+#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
+ 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
+
+struct module;
+
+#ifdef CONFIG_KALLSYMS
+/* Lookup the address for a symbol. Returns 0 if not found. */
+unsigned long kallsyms_lookup_name(const char *name);
+
+/* Call a function on each kallsyms symbol in the core kernel */
+int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
+ unsigned long),
+ void *data);
+
+extern int kallsyms_lookup_size_offset(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset);
+
+/* Lookup an address. modname is set to NULL if it's in the kernel. */
+const char *kallsyms_lookup(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset,
+ char **modname, char *namebuf);
+
+/* Look up a kernel symbol and return it in a text buffer. */
+extern int sprint_symbol(char *buffer, unsigned long address);
+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
+extern int sprint_backtrace(char *buffer, unsigned long address);
+
+/* Look up a kernel symbol and print it to the kernel messages. */
+extern void __print_symbol(const char *fmt, unsigned long address);
+
+int lookup_symbol_name(unsigned long addr, char *symname);
+int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
+
+#else /* !CONFIG_KALLSYMS */
+
+static inline unsigned long kallsyms_lookup_name(const char *name)
+{
+ return 0;
+}
+
+static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ struct module *,
+ unsigned long),
+ void *data)
+{
+ return 0;
+}
+
+static inline int kallsyms_lookup_size_offset(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset)
+{
+ return 0;
+}
+
+static inline const char *kallsyms_lookup(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset,
+ char **modname, char *namebuf)
+{
+ return NULL;
+}
+
+static inline int sprint_symbol(char *buffer, unsigned long addr)
+{
+ *buffer = '\0';
+ return 0;
+}
+
+static inline int sprint_symbol_no_offset(char *buffer, unsigned long addr)
+{
+ *buffer = '\0';
+ return 0;
+}
+
+static inline int sprint_backtrace(char *buffer, unsigned long addr)
+{
+ *buffer = '\0';
+ return 0;
+}
+
+static inline int lookup_symbol_name(unsigned long addr, char *symname)
+{
+ return -ERANGE;
+}
+
+static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name)
+{
+ return -ERANGE;
+}
+
+/* Stupid that this does nothing, but I didn't create this mess. */
+#define __print_symbol(fmt, addr)
+#endif /*CONFIG_KALLSYMS*/
+
+/* This macro allows us to keep printk typechecking */
+static __printf(1, 2)
+void __check_printsym_format(const char *fmt, ...)
+{
+}
+
+static inline void print_symbol(const char *fmt, unsigned long addr)
+{
+ __check_printsym_format(fmt, "");
+ __print_symbol(fmt, (unsigned long)
+ __builtin_extract_return_addr((void *)addr));
+}
+
+static inline void print_ip_sym(unsigned long ip)
+{
+ printk("[<%p>] %pS\n", (void *) ip, (void *) ip);
+}
+
+#endif /*_LINUX_KALLSYMS_H*/
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
new file mode 100644
index 000000000..5486d777b
--- /dev/null
+++ b/include/linux/kasan.h
@@ -0,0 +1,88 @@
+#ifndef _LINUX_KASAN_H
+#define _LINUX_KASAN_H
+
+#include <linux/types.h>
+
+struct kmem_cache;
+struct page;
+struct vm_struct;
+
+#ifdef CONFIG_KASAN
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+
+#include <asm/kasan.h>
+#include <linux/sched.h>
+
+static inline void *kasan_mem_to_shadow(const void *addr)
+{
+ return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
+ + KASAN_SHADOW_OFFSET;
+}
+
+/* Enable reporting bugs after kasan_disable_current() */
+static inline void kasan_enable_current(void)
+{
+ current->kasan_depth++;
+}
+
+/* Disable reporting bugs for current task */
+static inline void kasan_disable_current(void)
+{
+ current->kasan_depth--;
+}
+
+void kasan_unpoison_shadow(const void *address, size_t size);
+
+void kasan_alloc_pages(struct page *page, unsigned int order);
+void kasan_free_pages(struct page *page, unsigned int order);
+
+void kasan_poison_slab(struct page *page);
+void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
+void kasan_poison_object_data(struct kmem_cache *cache, void *object);
+
+void kasan_kmalloc_large(const void *ptr, size_t size);
+void kasan_kfree_large(const void *ptr);
+void kasan_kfree(void *ptr);
+void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
+void kasan_krealloc(const void *object, size_t new_size);
+
+void kasan_slab_alloc(struct kmem_cache *s, void *object);
+void kasan_slab_free(struct kmem_cache *s, void *object);
+
+int kasan_module_alloc(void *addr, size_t size);
+void kasan_free_shadow(const struct vm_struct *vm);
+
+#else /* CONFIG_KASAN */
+
+static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
+
+static inline void kasan_enable_current(void) {}
+static inline void kasan_disable_current(void) {}
+
+static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
+static inline void kasan_free_pages(struct page *page, unsigned int order) {}
+
+static inline void kasan_poison_slab(struct page *page) {}
+static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
+ void *object) {}
+static inline void kasan_poison_object_data(struct kmem_cache *cache,
+ void *object) {}
+
+static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
+static inline void kasan_kfree_large(const void *ptr) {}
+static inline void kasan_kfree(void *ptr) {}
+static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
+ size_t size) {}
+static inline void kasan_krealloc(const void *object, size_t new_size) {}
+
+static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
+static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
+
+static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
+static inline void kasan_free_shadow(const struct vm_struct *vm) {}
+
+#endif /* CONFIG_KASAN */
+
+#endif /* LINUX_KASAN_H */
diff --git a/include/linux/kbd_diacr.h b/include/linux/kbd_diacr.h
new file mode 100644
index 000000000..7274ec68c
--- /dev/null
+++ b/include/linux/kbd_diacr.h
@@ -0,0 +1,8 @@
+#ifndef _DIACR_H
+#define _DIACR_H
+#include <linux/kd.h>
+
+extern struct kbdiacruc accent_table[];
+extern unsigned int accent_table_size;
+
+#endif /* _DIACR_H */
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
new file mode 100644
index 000000000..cbfb171bb
--- /dev/null
+++ b/include/linux/kbd_kern.h
@@ -0,0 +1,146 @@
+#ifndef _KBD_KERN_H
+#define _KBD_KERN_H
+
+#include <linux/tty.h>
+#include <linux/interrupt.h>
+#include <linux/keyboard.h>
+
+extern struct tasklet_struct keyboard_tasklet;
+
+extern char *func_table[MAX_NR_FUNC];
+extern char func_buf[];
+extern char *funcbufptr;
+extern int funcbufsize, funcbufleft;
+
+/*
+ * kbd->xxx contains the VC-local things (flag settings etc..)
+ *
+ * Note: externally visible are LED_SCR, LED_NUM, LED_CAP defined in kd.h
+ * The code in KDGETLED / KDSETLED depends on the internal and
+ * external order being the same.
+ *
+ * Note: lockstate is used as index in the array key_map.
+ */
+struct kbd_struct {
+
+ unsigned char lockstate;
+/* 8 modifiers - the names do not have any meaning at all;
+ they can be associated to arbitrarily chosen keys */
+#define VC_SHIFTLOCK KG_SHIFT /* shift lock mode */
+#define VC_ALTGRLOCK KG_ALTGR /* altgr lock mode */
+#define VC_CTRLLOCK KG_CTRL /* control lock mode */
+#define VC_ALTLOCK KG_ALT /* alt lock mode */
+#define VC_SHIFTLLOCK KG_SHIFTL /* shiftl lock mode */
+#define VC_SHIFTRLOCK KG_SHIFTR /* shiftr lock mode */
+#define VC_CTRLLLOCK KG_CTRLL /* ctrll lock mode */
+#define VC_CTRLRLOCK KG_CTRLR /* ctrlr lock mode */
+ unsigned char slockstate; /* for `sticky' Shift, Ctrl, etc. */
+
+ unsigned char ledmode:1;
+#define LED_SHOW_FLAGS 0 /* traditional state */
+#define LED_SHOW_IOCTL 1 /* only change leds upon ioctl */
+
+ unsigned char ledflagstate:4; /* flags, not lights */
+ unsigned char default_ledflagstate:4;
+#define VC_SCROLLOCK 0 /* scroll-lock mode */
+#define VC_NUMLOCK 1 /* numeric lock mode */
+#define VC_CAPSLOCK 2 /* capslock mode */
+#define VC_KANALOCK 3 /* kanalock mode */
+
+ unsigned char kbdmode:3; /* one 3-bit value */
+#define VC_XLATE 0 /* translate keycodes using keymap */
+#define VC_MEDIUMRAW 1 /* medium raw (keycode) mode */
+#define VC_RAW 2 /* raw (scancode) mode */
+#define VC_UNICODE 3 /* Unicode mode */
+#define VC_OFF 4 /* disabled mode */
+
+ unsigned char modeflags:5;
+#define VC_APPLIC 0 /* application key mode */
+#define VC_CKMODE 1 /* cursor key mode */
+#define VC_REPEAT 2 /* keyboard repeat */
+#define VC_CRLF 3 /* 0 - enter sends CR, 1 - enter sends CRLF */
+#define VC_META 4 /* 0 - meta, 1 - meta=prefix with ESC */
+};
+
+extern int kbd_init(void);
+
+extern void setledstate(struct kbd_struct *kbd, unsigned int led);
+
+extern int do_poke_blanked_console;
+
+extern void (*kbd_ledfunc)(unsigned int led);
+
+extern int set_console(int nr);
+extern void schedule_console_callback(void);
+
+/* FIXME: review locking for vt.c callers */
+static inline void set_leds(void)
+{
+ tasklet_schedule(&keyboard_tasklet);
+}
+
+static inline int vc_kbd_mode(struct kbd_struct * kbd, int flag)
+{
+ return ((kbd->modeflags >> flag) & 1);
+}
+
+static inline int vc_kbd_led(struct kbd_struct * kbd, int flag)
+{
+ return ((kbd->ledflagstate >> flag) & 1);
+}
+
+static inline void set_vc_kbd_mode(struct kbd_struct * kbd, int flag)
+{
+ kbd->modeflags |= 1 << flag;
+}
+
+static inline void set_vc_kbd_led(struct kbd_struct * kbd, int flag)
+{
+ kbd->ledflagstate |= 1 << flag;
+}
+
+static inline void clr_vc_kbd_mode(struct kbd_struct * kbd, int flag)
+{
+ kbd->modeflags &= ~(1 << flag);
+}
+
+static inline void clr_vc_kbd_led(struct kbd_struct * kbd, int flag)
+{
+ kbd->ledflagstate &= ~(1 << flag);
+}
+
+static inline void chg_vc_kbd_lock(struct kbd_struct * kbd, int flag)
+{
+ kbd->lockstate ^= 1 << flag;
+}
+
+static inline void chg_vc_kbd_slock(struct kbd_struct * kbd, int flag)
+{
+ kbd->slockstate ^= 1 << flag;
+}
+
+static inline void chg_vc_kbd_mode(struct kbd_struct * kbd, int flag)
+{
+ kbd->modeflags ^= 1 << flag;
+}
+
+static inline void chg_vc_kbd_led(struct kbd_struct * kbd, int flag)
+{
+ kbd->ledflagstate ^= 1 << flag;
+}
+
+#define U(x) ((x) ^ 0xf000)
+
+#define BRL_UC_ROW 0x2800
+
+/* keyboard.c */
+
+struct console;
+
+void compute_shiftstate(void);
+
+/* defkeymap.c */
+
+extern unsigned int keymap_count;
+
+#endif
diff --git a/include/linux/kbuild.h b/include/linux/kbuild.h
new file mode 100644
index 000000000..22a72198c
--- /dev/null
+++ b/include/linux/kbuild.h
@@ -0,0 +1,15 @@
+#ifndef __LINUX_KBUILD_H
+#define __LINUX_KBUILD_H
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+#define OFFSET(sym, str, mem) \
+ DEFINE(sym, offsetof(struct str, mem))
+
+#define COMMENT(x) \
+ asm volatile("\n->#" x)
+
+#endif
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
new file mode 100644
index 000000000..b33c7797e
--- /dev/null
+++ b/include/linux/kconfig.h
@@ -0,0 +1,54 @@
+#ifndef __LINUX_KCONFIG_H
+#define __LINUX_KCONFIG_H
+
+#include <generated/autoconf.h>
+
+/*
+ * Helper macros to use CONFIG_ options in C/CPP expressions. Note that
+ * these only work with boolean and tristate options.
+ */
+
+/*
+ * Getting something that works in C and CPP for an arg that may or may
+ * not be defined is tricky. Here, if we have "#define CONFIG_BOOGER 1"
+ * we match on the placeholder define, insert the "0," for arg1 and generate
+ * the triplet (0, 1, 0). Then the last step cherry picks the 2nd arg (a one).
+ * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
+ * the last step cherry picks the 2nd arg, we get a zero.
+ */
+#define __ARG_PLACEHOLDER_1 0,
+#define config_enabled(cfg) _config_enabled(cfg)
+#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
+#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
+#define ___config_enabled(__ignored, val, ...) val
+
+/*
+ * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0
+ * otherwise. For boolean options, this is equivalent to
+ * IS_ENABLED(CONFIG_FOO).
+ */
+#define IS_BUILTIN(option) config_enabled(option)
+
+/*
+ * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
+ * otherwise.
+ */
+#define IS_MODULE(option) config_enabled(option##_MODULE)
+
+/*
+ * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
+ * code can call a function defined in code compiled based on CONFIG_FOO.
+ * This is similar to IS_ENABLED(), but returns false when invoked from
+ * built-in code when CONFIG_FOO is set to 'm'.
+ */
+#define IS_REACHABLE(option) (config_enabled(option) || \
+ (config_enabled(option##_MODULE) && config_enabled(MODULE)))
+
+/*
+ * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
+ * 0 otherwise.
+ */
+#define IS_ENABLED(option) \
+ (IS_BUILTIN(option) || IS_MODULE(option))
+
+#endif /* __LINUX_KCONFIG_H */
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
new file mode 100644
index 000000000..d92762286
--- /dev/null
+++ b/include/linux/kcore.h
@@ -0,0 +1,38 @@
+/*
+ * /proc/kcore definitions
+ */
+#ifndef _LINUX_KCORE_H
+#define _LINUX_KCORE_H
+
+enum kcore_type {
+ KCORE_TEXT,
+ KCORE_VMALLOC,
+ KCORE_RAM,
+ KCORE_VMEMMAP,
+ KCORE_OTHER,
+};
+
+struct kcore_list {
+ struct list_head list;
+ unsigned long addr;
+ size_t size;
+ int type;
+};
+
+struct vmcore {
+ struct list_head list;
+ unsigned long long paddr;
+ unsigned long long size;
+ loff_t offset;
+};
+
+#ifdef CONFIG_PROC_KCORE
+extern void kclist_add(struct kcore_list *, void *, size_t, int type);
+#else
+static inline
+void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
+{
+}
+#endif
+
+#endif /* _LINUX_KCORE_H */
diff --git a/include/linux/kd.h b/include/linux/kd.h
new file mode 100644
index 000000000..25bd17fad
--- /dev/null
+++ b/include/linux/kd.h
@@ -0,0 +1,7 @@
+#ifndef _LINUX_KD_H
+#define _LINUX_KD_H
+
+#include <uapi/linux/kd.h>
+
+#define KD_FONT_FLAG_OLD 0x80000000 /* Invoked via old interface [compat] */
+#endif /* _LINUX_KD_H */
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
new file mode 100644
index 000000000..a19bcf9e7
--- /dev/null
+++ b/include/linux/kdb.h
@@ -0,0 +1,221 @@
+#ifndef _KDB_H
+#define _KDB_H
+
+/*
+ * Kernel Debugger Architecture Independent Global Headers
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
+ */
+
+/* Shifted versions of the command enable bits are be used if the command
+ * has no arguments (see kdb_check_flags). This allows commands, such as
+ * go, to have different permissions depending upon whether it is called
+ * with an argument.
+ */
+#define KDB_ENABLE_NO_ARGS_SHIFT 10
+
+typedef enum {
+ KDB_ENABLE_ALL = (1 << 0), /* Enable everything */
+ KDB_ENABLE_MEM_READ = (1 << 1),
+ KDB_ENABLE_MEM_WRITE = (1 << 2),
+ KDB_ENABLE_REG_READ = (1 << 3),
+ KDB_ENABLE_REG_WRITE = (1 << 4),
+ KDB_ENABLE_INSPECT = (1 << 5),
+ KDB_ENABLE_FLOW_CTRL = (1 << 6),
+ KDB_ENABLE_SIGNAL = (1 << 7),
+ KDB_ENABLE_REBOOT = (1 << 8),
+ /* User exposed values stop here, all remaining flags are
+ * exclusively used to describe a commands behaviour.
+ */
+
+ KDB_ENABLE_ALWAYS_SAFE = (1 << 9),
+ KDB_ENABLE_MASK = (1 << KDB_ENABLE_NO_ARGS_SHIFT) - 1,
+
+ KDB_ENABLE_ALL_NO_ARGS = KDB_ENABLE_ALL << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_MEM_READ_NO_ARGS = KDB_ENABLE_MEM_READ
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_MEM_WRITE_NO_ARGS = KDB_ENABLE_MEM_WRITE
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_REG_READ_NO_ARGS = KDB_ENABLE_REG_READ
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_REG_WRITE_NO_ARGS = KDB_ENABLE_REG_WRITE
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_INSPECT_NO_ARGS = KDB_ENABLE_INSPECT
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_FLOW_CTRL_NO_ARGS = KDB_ENABLE_FLOW_CTRL
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_SIGNAL_NO_ARGS = KDB_ENABLE_SIGNAL
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_REBOOT_NO_ARGS = KDB_ENABLE_REBOOT
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = KDB_ENABLE_ALWAYS_SAFE
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_MASK_NO_ARGS = KDB_ENABLE_MASK << KDB_ENABLE_NO_ARGS_SHIFT,
+
+ KDB_REPEAT_NO_ARGS = 0x40000000, /* Repeat the command w/o arguments */
+ KDB_REPEAT_WITH_ARGS = 0x80000000, /* Repeat the command with args */
+} kdb_cmdflags_t;
+
+typedef int (*kdb_func_t)(int, const char **);
+
+#ifdef CONFIG_KGDB_KDB
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+
+#define KDB_POLL_FUNC_MAX 5
+extern int kdb_poll_idx;
+
+/*
+ * kdb_initial_cpu is initialized to -1, and is set to the cpu
+ * number whenever the kernel debugger is entered.
+ */
+extern int kdb_initial_cpu;
+extern atomic_t kdb_event;
+
+/* Types and messages used for dynamically added kdb shell commands */
+
+#define KDB_MAXARGS 16 /* Maximum number of arguments to a function */
+
+/* KDB return codes from a command or internal kdb function */
+#define KDB_NOTFOUND (-1)
+#define KDB_ARGCOUNT (-2)
+#define KDB_BADWIDTH (-3)
+#define KDB_BADRADIX (-4)
+#define KDB_NOTENV (-5)
+#define KDB_NOENVVALUE (-6)
+#define KDB_NOTIMP (-7)
+#define KDB_ENVFULL (-8)
+#define KDB_ENVBUFFULL (-9)
+#define KDB_TOOMANYBPT (-10)
+#define KDB_TOOMANYDBREGS (-11)
+#define KDB_DUPBPT (-12)
+#define KDB_BPTNOTFOUND (-13)
+#define KDB_BADMODE (-14)
+#define KDB_BADINT (-15)
+#define KDB_INVADDRFMT (-16)
+#define KDB_BADREG (-17)
+#define KDB_BADCPUNUM (-18)
+#define KDB_BADLENGTH (-19)
+#define KDB_NOBP (-20)
+#define KDB_BADADDR (-21)
+#define KDB_NOPERM (-22)
+
+/*
+ * kdb_diemsg
+ *
+ * Contains a pointer to the last string supplied to the
+ * kernel 'die' panic function.
+ */
+extern const char *kdb_diemsg;
+
+#define KDB_FLAG_EARLYKDB (1 << 0) /* set from boot parameter kdb=early */
+#define KDB_FLAG_CATASTROPHIC (1 << 1) /* A catastrophic event has occurred */
+#define KDB_FLAG_CMD_INTERRUPT (1 << 2) /* Previous command was interrupted */
+#define KDB_FLAG_NOIPI (1 << 3) /* Do not send IPIs */
+#define KDB_FLAG_NO_CONSOLE (1 << 5) /* No console is available,
+ * kdb is disabled */
+#define KDB_FLAG_NO_VT_CONSOLE (1 << 6) /* No VT console is available, do
+ * not use keyboard */
+#define KDB_FLAG_NO_I8042 (1 << 7) /* No i8042 chip is available, do
+ * not use keyboard */
+
+extern int kdb_flags; /* Global flags, see kdb_state for per cpu state */
+
+extern void kdb_save_flags(void);
+extern void kdb_restore_flags(void);
+
+#define KDB_FLAG(flag) (kdb_flags & KDB_FLAG_##flag)
+#define KDB_FLAG_SET(flag) ((void)(kdb_flags |= KDB_FLAG_##flag))
+#define KDB_FLAG_CLEAR(flag) ((void)(kdb_flags &= ~KDB_FLAG_##flag))
+
+/*
+ * External entry point for the kernel debugger. The pt_regs
+ * at the time of entry are supplied along with the reason for
+ * entry to the kernel debugger.
+ */
+
+typedef enum {
+ KDB_REASON_ENTER = 1, /* KDB_ENTER() trap/fault - regs valid */
+ KDB_REASON_ENTER_SLAVE, /* KDB_ENTER_SLAVE() trap/fault - regs valid */
+ KDB_REASON_BREAK, /* Breakpoint inst. - regs valid */
+ KDB_REASON_DEBUG, /* Debug Fault - regs valid */
+ KDB_REASON_OOPS, /* Kernel Oops - regs valid */
+ KDB_REASON_SWITCH, /* CPU switch - regs valid*/
+ KDB_REASON_KEYBOARD, /* Keyboard entry - regs valid */
+ KDB_REASON_NMI, /* Non-maskable interrupt; regs valid */
+ KDB_REASON_RECURSE, /* Recursive entry to kdb;
+ * regs probably valid */
+ KDB_REASON_SSTEP, /* Single Step trap. - regs valid */
+ KDB_REASON_SYSTEM_NMI, /* In NMI due to SYSTEM cmd; regs valid */
+} kdb_reason_t;
+
+enum kdb_msgsrc {
+ KDB_MSGSRC_INTERNAL, /* direct call to kdb_printf() */
+ KDB_MSGSRC_PRINTK, /* trapped from printk() */
+};
+
+extern int kdb_trap_printk;
+extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt,
+ va_list args);
+extern __printf(1, 2) int kdb_printf(const char *, ...);
+typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
+
+extern void kdb_init(int level);
+
+/* Access to kdb specific polling devices */
+typedef int (*get_char_func)(void);
+extern get_char_func kdb_poll_funcs[];
+extern int kdb_get_kbd_char(void);
+
+static inline
+int kdb_process_cpu(const struct task_struct *p)
+{
+ unsigned int cpu = task_thread_info(p)->cpu;
+ if (cpu > num_possible_cpus())
+ cpu = 0;
+ return cpu;
+}
+
+/* kdb access to register set for stack dumping */
+extern struct pt_regs *kdb_current_regs;
+#ifdef CONFIG_KALLSYMS
+extern const char *kdb_walk_kallsyms(loff_t *pos);
+#else /* ! CONFIG_KALLSYMS */
+static inline const char *kdb_walk_kallsyms(loff_t *pos)
+{
+ return NULL;
+}
+#endif /* ! CONFIG_KALLSYMS */
+
+/* Dynamic kdb shell command registration */
+extern int kdb_register(char *, kdb_func_t, char *, char *, short);
+extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
+ short, kdb_cmdflags_t);
+extern int kdb_unregister(char *);
+#else /* ! CONFIG_KGDB_KDB */
+static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
+static inline void kdb_init(int level) {}
+static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
+ char *help, short minlen) { return 0; }
+static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage,
+ char *help, short minlen,
+ kdb_cmdflags_t flags) { return 0; }
+static inline int kdb_unregister(char *cmd) { return 0; }
+#endif /* CONFIG_KGDB_KDB */
+enum {
+ KDB_NOT_INITIALIZED,
+ KDB_INIT_EARLY,
+ KDB_INIT_FULL,
+};
+
+extern int kdbgetintenv(const char *, int *);
+extern int kdb_set(int, const char **);
+
+#endif /* !_KDB_H */
diff --git a/include/linux/kdebug.h b/include/linux/kdebug.h
new file mode 100644
index 000000000..ed815090b
--- /dev/null
+++ b/include/linux/kdebug.h
@@ -0,0 +1,22 @@
+#ifndef _LINUX_KDEBUG_H
+#define _LINUX_KDEBUG_H
+
+#include <asm/kdebug.h>
+
+struct notifier_block;
+
+struct die_args {
+ struct pt_regs *regs;
+ const char *str;
+ long err;
+ int trapnr;
+ int signr;
+};
+
+int register_die_notifier(struct notifier_block *nb);
+int unregister_die_notifier(struct notifier_block *nb);
+
+int notify_die(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig);
+
+#endif /* _LINUX_KDEBUG_H */
diff --git a/include/linux/kdev_t.h b/include/linux/kdev_t.h
new file mode 100644
index 000000000..c838abe3e
--- /dev/null
+++ b/include/linux/kdev_t.h
@@ -0,0 +1,92 @@
+#ifndef _LINUX_KDEV_T_H
+#define _LINUX_KDEV_T_H
+
+#include <uapi/linux/kdev_t.h>
+
+#define MINORBITS 20
+#define MINORMASK ((1U << MINORBITS) - 1)
+
+#define MAJOR(dev) ((unsigned int) ((dev) >> MINORBITS))
+#define MINOR(dev) ((unsigned int) ((dev) & MINORMASK))
+#define MKDEV(ma,mi) (((ma) << MINORBITS) | (mi))
+
+#define print_dev_t(buffer, dev) \
+ sprintf((buffer), "%u:%u\n", MAJOR(dev), MINOR(dev))
+
+#define format_dev_t(buffer, dev) \
+ ({ \
+ sprintf(buffer, "%u:%u", MAJOR(dev), MINOR(dev)); \
+ buffer; \
+ })
+
+/* acceptable for old filesystems */
+static inline int old_valid_dev(dev_t dev)
+{
+ return MAJOR(dev) < 256 && MINOR(dev) < 256;
+}
+
+static inline u16 old_encode_dev(dev_t dev)
+{
+ return (MAJOR(dev) << 8) | MINOR(dev);
+}
+
+static inline dev_t old_decode_dev(u16 val)
+{
+ return MKDEV((val >> 8) & 255, val & 255);
+}
+
+static inline int new_valid_dev(dev_t dev)
+{
+ return 1;
+}
+
+static inline u32 new_encode_dev(dev_t dev)
+{
+ unsigned major = MAJOR(dev);
+ unsigned minor = MINOR(dev);
+ return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
+}
+
+static inline dev_t new_decode_dev(u32 dev)
+{
+ unsigned major = (dev & 0xfff00) >> 8;
+ unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
+ return MKDEV(major, minor);
+}
+
+static inline int huge_valid_dev(dev_t dev)
+{
+ return 1;
+}
+
+static inline u64 huge_encode_dev(dev_t dev)
+{
+ return new_encode_dev(dev);
+}
+
+static inline dev_t huge_decode_dev(u64 dev)
+{
+ return new_decode_dev(dev);
+}
+
+static inline int sysv_valid_dev(dev_t dev)
+{
+ return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18);
+}
+
+static inline u32 sysv_encode_dev(dev_t dev)
+{
+ return MINOR(dev) | (MAJOR(dev) << 18);
+}
+
+static inline unsigned sysv_major(u32 dev)
+{
+ return (dev >> 18) & 0x3fff;
+}
+
+static inline unsigned sysv_minor(u32 dev)
+{
+ return dev & 0x3ffff;
+}
+
+#endif
diff --git a/include/linux/kern_levels.h b/include/linux/kern_levels.h
new file mode 100644
index 000000000..c2ce155d8
--- /dev/null
+++ b/include/linux/kern_levels.h
@@ -0,0 +1,38 @@
+#ifndef __KERN_LEVELS_H__
+#define __KERN_LEVELS_H__
+
+#define KERN_SOH "\001" /* ASCII Start Of Header */
+#define KERN_SOH_ASCII '\001'
+
+#define KERN_EMERG KERN_SOH "0" /* system is unusable */
+#define KERN_ALERT KERN_SOH "1" /* action must be taken immediately */
+#define KERN_CRIT KERN_SOH "2" /* critical conditions */
+#define KERN_ERR KERN_SOH "3" /* error conditions */
+#define KERN_WARNING KERN_SOH "4" /* warning conditions */
+#define KERN_NOTICE KERN_SOH "5" /* normal but significant condition */
+#define KERN_INFO KERN_SOH "6" /* informational */
+#define KERN_DEBUG KERN_SOH "7" /* debug-level messages */
+
+#define KERN_DEFAULT KERN_SOH "d" /* the default kernel loglevel */
+
+/*
+ * Annotation for a "continued" line of log printout (only done after a
+ * line that had no enclosing \n). Only to be used by core/arch code
+ * during early bootup (a continued line is not SMP-safe otherwise).
+ */
+#define KERN_CONT ""
+
+/* integer equivalents of KERN_<LEVEL> */
+#define LOGLEVEL_SCHED -2 /* Deferred messages from sched code
+ * are set to this special level */
+#define LOGLEVEL_DEFAULT -1 /* default (or last) loglevel */
+#define LOGLEVEL_EMERG 0 /* system is unusable */
+#define LOGLEVEL_ALERT 1 /* action must be taken immediately */
+#define LOGLEVEL_CRIT 2 /* critical conditions */
+#define LOGLEVEL_ERR 3 /* error conditions */
+#define LOGLEVEL_WARNING 4 /* warning conditions */
+#define LOGLEVEL_NOTICE 5 /* normal but significant condition */
+#define LOGLEVEL_INFO 6 /* informational */
+#define LOGLEVEL_DEBUG 7 /* debug-level messages */
+
+#endif
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
new file mode 100644
index 000000000..f65ce0978
--- /dev/null
+++ b/include/linux/kernel-page-flags.h
@@ -0,0 +1,20 @@
+#ifndef LINUX_KERNEL_PAGE_FLAGS_H
+#define LINUX_KERNEL_PAGE_FLAGS_H
+
+#include <uapi/linux/kernel-page-flags.h>
+
+
+/* kernel hacking assistances
+ * WARNING: subject to change, never rely on them!
+ */
+#define KPF_RESERVED 32
+#define KPF_MLOCKED 33
+#define KPF_MAPPEDTODISK 34
+#define KPF_PRIVATE 35
+#define KPF_PRIVATE_2 36
+#define KPF_OWNER_PRIVATE 37
+#define KPF_ARCH 38
+#define KPF_UNCACHED 39
+#define KPF_SOFTDIRTY 40
+
+#endif /* LINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
new file mode 100644
index 000000000..3a5b48e52
--- /dev/null
+++ b/include/linux/kernel.h
@@ -0,0 +1,830 @@
+#ifndef _LINUX_KERNEL_H
+#define _LINUX_KERNEL_H
+
+
+#include <stdarg.h>
+#include <linux/linkage.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+#include <linux/typecheck.h>
+#include <linux/printk.h>
+#include <linux/dynamic_debug.h>
+#include <asm/byteorder.h>
+#include <uapi/linux/kernel.h>
+
+#define USHRT_MAX ((u16)(~0U))
+#define SHRT_MAX ((s16)(USHRT_MAX>>1))
+#define SHRT_MIN ((s16)(-SHRT_MAX - 1))
+#define INT_MAX ((int)(~0U>>1))
+#define INT_MIN (-INT_MAX - 1)
+#define UINT_MAX (~0U)
+#define LONG_MAX ((long)(~0UL>>1))
+#define LONG_MIN (-LONG_MAX - 1)
+#define ULONG_MAX (~0UL)
+#define LLONG_MAX ((long long)(~0ULL>>1))
+#define LLONG_MIN (-LLONG_MAX - 1)
+#define ULLONG_MAX (~0ULL)
+#define SIZE_MAX (~(size_t)0)
+
+#define U8_MAX ((u8)~0U)
+#define S8_MAX ((s8)(U8_MAX>>1))
+#define S8_MIN ((s8)(-S8_MAX - 1))
+#define U16_MAX ((u16)~0U)
+#define S16_MAX ((s16)(U16_MAX>>1))
+#define S16_MIN ((s16)(-S16_MAX - 1))
+#define U32_MAX ((u32)~0U)
+#define S32_MAX ((s32)(U32_MAX>>1))
+#define S32_MIN ((s32)(-S32_MAX - 1))
+#define U64_MAX ((u64)~0ULL)
+#define S64_MAX ((s64)(U64_MAX>>1))
+#define S64_MIN ((s64)(-S64_MAX - 1))
+
+#define STACK_MAGIC 0xdeadbeef
+
+#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
+
+#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
+#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
+#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
+#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
+#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#define DIV_ROUND_UP_ULL(ll,d) \
+ ({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
+
+#if BITS_PER_LONG == 32
+# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
+#else
+# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
+#endif
+
+/* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */
+#define roundup(x, y) ( \
+{ \
+ const typeof(y) __y = y; \
+ (((x) + (__y - 1)) / __y) * __y; \
+} \
+)
+#define rounddown(x, y) ( \
+{ \
+ typeof(x) __x = (x); \
+ __x - (__x % (y)); \
+} \
+)
+
+/*
+ * Divide positive or negative dividend by positive divisor and round
+ * to closest integer. Result is undefined for negative divisors and
+ * for negative dividends if the divisor variable type is unsigned.
+ */
+#define DIV_ROUND_CLOSEST(x, divisor)( \
+{ \
+ typeof(x) __x = x; \
+ typeof(divisor) __d = divisor; \
+ (((typeof(x))-1) > 0 || \
+ ((typeof(divisor))-1) > 0 || (__x) > 0) ? \
+ (((__x) + ((__d) / 2)) / (__d)) : \
+ (((__x) - ((__d) / 2)) / (__d)); \
+} \
+)
+/*
+ * Same as above but for u64 dividends. divisor must be a 32-bit
+ * number.
+ */
+#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
+{ \
+ typeof(divisor) __d = divisor; \
+ unsigned long long _tmp = (x) + (__d) / 2; \
+ do_div(_tmp, __d); \
+ _tmp; \
+} \
+)
+
+/*
+ * Multiplies an integer by a fraction, while avoiding unnecessary
+ * overflow or loss of precision.
+ */
+#define mult_frac(x, numer, denom)( \
+{ \
+ typeof(x) quot = (x) / (denom); \
+ typeof(x) rem = (x) % (denom); \
+ (quot * (numer)) + ((rem * (numer)) / (denom)); \
+} \
+)
+
+
+#define _RET_IP_ (unsigned long)__builtin_return_address(0)
+#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+
+#ifdef CONFIG_LBDAF
+# include <asm/div64.h>
+# define sector_div(a, b) do_div(a, b)
+#else
+# define sector_div(n, b)( \
+{ \
+ int _res; \
+ _res = (n) % (b); \
+ (n) /= (b); \
+ _res; \
+} \
+)
+#endif
+
+/**
+ * upper_32_bits - return bits 32-63 of a number
+ * @n: the number we're accessing
+ *
+ * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
+ * the "right shift count >= width of type" warning when that quantity is
+ * 32-bits.
+ */
+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+
+/**
+ * lower_32_bits - return bits 0-31 of a number
+ * @n: the number we're accessing
+ */
+#define lower_32_bits(n) ((u32)(n))
+
+struct completion;
+struct pt_regs;
+struct user;
+
+#ifdef CONFIG_PREEMPT_VOLUNTARY
+extern int _cond_resched(void);
+# define might_resched() _cond_resched()
+#else
+# define might_resched() do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ void ___might_sleep(const char *file, int line, int preempt_offset);
+ void __might_sleep(const char *file, int line, int preempt_offset);
+/**
+ * might_sleep - annotation for functions that can sleep
+ *
+ * this macro will print a stack trace if it is executed in an atomic
+ * context (spinlock, irq-handler, ...).
+ *
+ * This is a useful debugging help to be able to catch problems early and not
+ * be bitten later when the calling function happens to sleep when it is not
+ * supposed to.
+ */
+# define might_sleep() \
+ do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
+# define sched_annotate_sleep() (current->task_state_change = 0)
+#else
+ static inline void ___might_sleep(const char *file, int line,
+ int preempt_offset) { }
+ static inline void __might_sleep(const char *file, int line,
+ int preempt_offset) { }
+# define might_sleep() do { might_resched(); } while (0)
+# define sched_annotate_sleep() do { } while (0)
+#endif
+
+#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
+
+/*
+ * abs() handles unsigned and signed longs, ints, shorts and chars. For all
+ * input types abs() returns a signed long.
+ * abs() should not be used for 64-bit types (s64, u64, long long) - use abs64()
+ * for those.
+ */
+#define abs(x) ({ \
+ long ret; \
+ if (sizeof(x) == sizeof(long)) { \
+ long __x = (x); \
+ ret = (__x < 0) ? -__x : __x; \
+ } else { \
+ int __x = (x); \
+ ret = (__x < 0) ? -__x : __x; \
+ } \
+ ret; \
+ })
+
+#define abs64(x) ({ \
+ s64 __x = (x); \
+ (__x < 0) ? -__x : __x; \
+ })
+
+/**
+ * reciprocal_scale - "scale" a value into range [0, ep_ro)
+ * @val: value
+ * @ep_ro: right open interval endpoint
+ *
+ * Perform a "reciprocal multiplication" in order to "scale" a value into
+ * range [0, ep_ro), where the upper interval endpoint is right-open.
+ * This is useful, e.g. for accessing a index of an array containing
+ * ep_ro elements, for example. Think of it as sort of modulus, only that
+ * the result isn't that of modulo. ;) Note that if initial input is a
+ * small value, then result will return 0.
+ *
+ * Return: a result based on val in interval [0, ep_ro).
+ */
+static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
+{
+ return (u32)(((u64) val * ep_ro) >> 32);
+}
+
+#if defined(CONFIG_MMU) && \
+ (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
+void might_fault(void);
+#else
+static inline void might_fault(void) { }
+#endif
+
+extern struct atomic_notifier_head panic_notifier_list;
+extern long (*panic_blink)(int state);
+__printf(1, 2)
+void panic(const char *fmt, ...)
+ __noreturn __cold;
+extern void oops_enter(void);
+extern void oops_exit(void);
+void print_oops_end_marker(void);
+extern int oops_may_print(void);
+void do_exit(long error_code)
+ __noreturn;
+void complete_and_exit(struct completion *, long)
+ __noreturn;
+
+/* Internal, do not use. */
+int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
+int __must_check _kstrtol(const char *s, unsigned int base, long *res);
+
+int __must_check kstrtoull(const char *s, unsigned int base, unsigned long long *res);
+int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
+
+/**
+ * kstrtoul - convert a string to an unsigned long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign, but not a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Used as a replacement for the obsolete simple_strtoull. Return code must
+ * be checked.
+*/
+static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
+{
+ /*
+ * We want to shortcut function call, but
+ * __builtin_types_compatible_p(unsigned long, unsigned long long) = 0.
+ */
+ if (sizeof(unsigned long) == sizeof(unsigned long long) &&
+ __alignof__(unsigned long) == __alignof__(unsigned long long))
+ return kstrtoull(s, base, (unsigned long long *)res);
+ else
+ return _kstrtoul(s, base, res);
+}
+
+/**
+ * kstrtol - convert a string to a long
+ * @s: The start of the string. The string must be null-terminated, and may also
+ * include a single newline before its terminating null. The first character
+ * may also be a plus sign or a minus sign.
+ * @base: The number base to use. The maximum supported base is 16. If base is
+ * given as 0, then the base of the string is automatically detected with the
+ * conventional semantics - If it begins with 0x the number will be parsed as a
+ * hexadecimal (case insensitive), if it otherwise begins with 0, it will be
+ * parsed as an octal number. Otherwise it will be parsed as a decimal.
+ * @res: Where to write the result of the conversion on success.
+ *
+ * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
+ * Used as a replacement for the obsolete simple_strtoull. Return code must
+ * be checked.
+ */
+static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
+{
+ /*
+ * We want to shortcut function call, but
+ * __builtin_types_compatible_p(long, long long) = 0.
+ */
+ if (sizeof(long) == sizeof(long long) &&
+ __alignof__(long) == __alignof__(long long))
+ return kstrtoll(s, base, (long long *)res);
+ else
+ return _kstrtol(s, base, res);
+}
+
+int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
+int __must_check kstrtoint(const char *s, unsigned int base, int *res);
+
+static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
+{
+ return kstrtoull(s, base, res);
+}
+
+static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
+{
+ return kstrtoll(s, base, res);
+}
+
+static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
+{
+ return kstrtouint(s, base, res);
+}
+
+static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
+{
+ return kstrtoint(s, base, res);
+}
+
+int __must_check kstrtou16(const char *s, unsigned int base, u16 *res);
+int __must_check kstrtos16(const char *s, unsigned int base, s16 *res);
+int __must_check kstrtou8(const char *s, unsigned int base, u8 *res);
+int __must_check kstrtos8(const char *s, unsigned int base, s8 *res);
+
+int __must_check kstrtoull_from_user(const char __user *s, size_t count, unsigned int base, unsigned long long *res);
+int __must_check kstrtoll_from_user(const char __user *s, size_t count, unsigned int base, long long *res);
+int __must_check kstrtoul_from_user(const char __user *s, size_t count, unsigned int base, unsigned long *res);
+int __must_check kstrtol_from_user(const char __user *s, size_t count, unsigned int base, long *res);
+int __must_check kstrtouint_from_user(const char __user *s, size_t count, unsigned int base, unsigned int *res);
+int __must_check kstrtoint_from_user(const char __user *s, size_t count, unsigned int base, int *res);
+int __must_check kstrtou16_from_user(const char __user *s, size_t count, unsigned int base, u16 *res);
+int __must_check kstrtos16_from_user(const char __user *s, size_t count, unsigned int base, s16 *res);
+int __must_check kstrtou8_from_user(const char __user *s, size_t count, unsigned int base, u8 *res);
+int __must_check kstrtos8_from_user(const char __user *s, size_t count, unsigned int base, s8 *res);
+
+static inline int __must_check kstrtou64_from_user(const char __user *s, size_t count, unsigned int base, u64 *res)
+{
+ return kstrtoull_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtos64_from_user(const char __user *s, size_t count, unsigned int base, s64 *res)
+{
+ return kstrtoll_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtou32_from_user(const char __user *s, size_t count, unsigned int base, u32 *res)
+{
+ return kstrtouint_from_user(s, count, base, res);
+}
+
+static inline int __must_check kstrtos32_from_user(const char __user *s, size_t count, unsigned int base, s32 *res)
+{
+ return kstrtoint_from_user(s, count, base, res);
+}
+
+/* Obsolete, do not use. Use kstrto<foo> instead */
+
+extern unsigned long simple_strtoul(const char *,char **,unsigned int);
+extern long simple_strtol(const char *,char **,unsigned int);
+extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
+extern long long simple_strtoll(const char *,char **,unsigned int);
+
+extern int num_to_str(char *buf, int size, unsigned long long num);
+
+/* lib/printf utilities */
+
+extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
+extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
+extern __printf(3, 4)
+int snprintf(char *buf, size_t size, const char *fmt, ...);
+extern __printf(3, 0)
+int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+extern __printf(3, 4)
+int scnprintf(char *buf, size_t size, const char *fmt, ...);
+extern __printf(3, 0)
+int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
+extern __printf(2, 3)
+char *kasprintf(gfp_t gfp, const char *fmt, ...);
+extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
+
+extern __scanf(2, 3)
+int sscanf(const char *, const char *, ...);
+extern __scanf(2, 0)
+int vsscanf(const char *, const char *, va_list);
+
+extern int get_option(char **str, int *pint);
+extern char *get_options(const char *str, int nints, int *ints);
+extern unsigned long long memparse(const char *ptr, char **retptr);
+extern bool parse_option_str(const char *str, const char *option);
+
+extern int core_kernel_text(unsigned long addr);
+extern int core_kernel_data(unsigned long addr);
+extern int __kernel_text_address(unsigned long addr);
+extern int kernel_text_address(unsigned long addr);
+extern int func_ptr_is_kernel_text(void *ptr);
+
+unsigned long int_sqrt(unsigned long);
+
+extern void bust_spinlocks(int yes);
+extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
+extern int panic_timeout;
+extern int panic_on_oops;
+extern int panic_on_unrecovered_nmi;
+extern int panic_on_io_nmi;
+extern int panic_on_warn;
+extern int sysctl_panic_on_stackoverflow;
+/*
+ * Only to be used by arch init code. If the user over-wrote the default
+ * CONFIG_PANIC_TIMEOUT, honor it.
+ */
+static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout)
+{
+ if (panic_timeout == arch_default_timeout)
+ panic_timeout = timeout;
+}
+extern const char *print_tainted(void);
+enum lockdep_ok {
+ LOCKDEP_STILL_OK,
+ LOCKDEP_NOW_UNRELIABLE
+};
+extern void add_taint(unsigned flag, enum lockdep_ok);
+extern int test_taint(unsigned flag);
+extern unsigned long get_taint(void);
+extern int root_mountflags;
+
+extern bool early_boot_irqs_disabled;
+
+/* Values used for system_state */
+extern enum system_states {
+ SYSTEM_BOOTING,
+ SYSTEM_RUNNING,
+ SYSTEM_HALT,
+ SYSTEM_POWER_OFF,
+ SYSTEM_RESTART,
+} system_state;
+
+#define TAINT_PROPRIETARY_MODULE 0
+#define TAINT_FORCED_MODULE 1
+#define TAINT_CPU_OUT_OF_SPEC 2
+#define TAINT_FORCED_RMMOD 3
+#define TAINT_MACHINE_CHECK 4
+#define TAINT_BAD_PAGE 5
+#define TAINT_USER 6
+#define TAINT_DIE 7
+#define TAINT_OVERRIDDEN_ACPI_TABLE 8
+#define TAINT_WARN 9
+#define TAINT_CRAP 10
+#define TAINT_FIRMWARE_WORKAROUND 11
+#define TAINT_OOT_MODULE 12
+#define TAINT_UNSIGNED_MODULE 13
+#define TAINT_SOFTLOCKUP 14
+#define TAINT_LIVEPATCH 15
+
+extern const char hex_asc[];
+#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
+#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
+
+static inline char *hex_byte_pack(char *buf, u8 byte)
+{
+ *buf++ = hex_asc_hi(byte);
+ *buf++ = hex_asc_lo(byte);
+ return buf;
+}
+
+extern const char hex_asc_upper[];
+#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)]
+#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4]
+
+static inline char *hex_byte_pack_upper(char *buf, u8 byte)
+{
+ *buf++ = hex_asc_upper_hi(byte);
+ *buf++ = hex_asc_upper_lo(byte);
+ return buf;
+}
+
+extern int hex_to_bin(char ch);
+extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
+extern char *bin2hex(char *dst, const void *src, size_t count);
+
+bool mac_pton(const char *s, u8 *mac);
+
+/*
+ * General tracing related utility functions - trace_printk(),
+ * tracing_on/tracing_off and tracing_start()/tracing_stop
+ *
+ * Use tracing_on/tracing_off when you want to quickly turn on or off
+ * tracing. It simply enables or disables the recording of the trace events.
+ * This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on
+ * file, which gives a means for the kernel and userspace to interact.
+ * Place a tracing_off() in the kernel where you want tracing to end.
+ * From user space, examine the trace, and then echo 1 > tracing_on
+ * to continue tracing.
+ *
+ * tracing_stop/tracing_start has slightly more overhead. It is used
+ * by things like suspend to ram where disabling the recording of the
+ * trace is not enough, but tracing must actually stop because things
+ * like calling smp_processor_id() may crash the system.
+ *
+ * Most likely, you want to use tracing_on/tracing_off.
+ */
+#ifdef CONFIG_RING_BUFFER
+/* trace_off_permanent stops recording with no way to bring it back */
+void tracing_off_permanent(void);
+#else
+static inline void tracing_off_permanent(void) { }
+#endif
+
+enum ftrace_dump_mode {
+ DUMP_NONE,
+ DUMP_ALL,
+ DUMP_ORIG,
+};
+
+#ifdef CONFIG_TRACING
+void tracing_on(void);
+void tracing_off(void);
+int tracing_is_on(void);
+void tracing_snapshot(void);
+void tracing_snapshot_alloc(void);
+
+extern void tracing_start(void);
+extern void tracing_stop(void);
+
+static inline __printf(1, 2)
+void ____trace_printk_check_format(const char *fmt, ...)
+{
+}
+#define __trace_printk_check_format(fmt, args...) \
+do { \
+ if (0) \
+ ____trace_printk_check_format(fmt, ##args); \
+} while (0)
+
+/**
+ * trace_printk - printf formatting in the ftrace buffer
+ * @fmt: the printf format for printing
+ *
+ * Note: __trace_printk is an internal function for trace_printk and
+ * the @ip is passed in via the trace_printk macro.
+ *
+ * This function allows a kernel developer to debug fast path sections
+ * that printk is not appropriate for. By scattering in various
+ * printk like tracing in the code, a developer can quickly see
+ * where problems are occurring.
+ *
+ * This is intended as a debugging tool for the developer only.
+ * Please refrain from leaving trace_printks scattered around in
+ * your code. (Extra memory is used for special buffers that are
+ * allocated when trace_printk() is used)
+ *
+ * A little optization trick is done here. If there's only one
+ * argument, there's no need to scan the string for printf formats.
+ * The trace_puts() will suffice. But how can we take advantage of
+ * using trace_puts() when trace_printk() has only one argument?
+ * By stringifying the args and checking the size we can tell
+ * whether or not there are args. __stringify((__VA_ARGS__)) will
+ * turn into "()\0" with a size of 3 when there are no args, anything
+ * else will be bigger. All we need to do is define a string to this,
+ * and then take its size and compare to 3. If it's bigger, use
+ * do_trace_printk() otherwise, optimize it to trace_puts(). Then just
+ * let gcc optimize the rest.
+ */
+
+#define trace_printk(fmt, ...) \
+do { \
+ char _______STR[] = __stringify((__VA_ARGS__)); \
+ if (sizeof(_______STR) > 3) \
+ do_trace_printk(fmt, ##__VA_ARGS__); \
+ else \
+ trace_puts(fmt); \
+} while (0)
+
+#define do_trace_printk(fmt, args...) \
+do { \
+ static const char *trace_printk_fmt \
+ __attribute__((section("__trace_printk_fmt"))) = \
+ __builtin_constant_p(fmt) ? fmt : NULL; \
+ \
+ __trace_printk_check_format(fmt, ##args); \
+ \
+ if (__builtin_constant_p(fmt)) \
+ __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
+ else \
+ __trace_printk(_THIS_IP_, fmt, ##args); \
+} while (0)
+
+extern __printf(2, 3)
+int __trace_bprintk(unsigned long ip, const char *fmt, ...);
+
+extern __printf(2, 3)
+int __trace_printk(unsigned long ip, const char *fmt, ...);
+
+/**
+ * trace_puts - write a string into the ftrace buffer
+ * @str: the string to record
+ *
+ * Note: __trace_bputs is an internal function for trace_puts and
+ * the @ip is passed in via the trace_puts macro.
+ *
+ * This is similar to trace_printk() but is made for those really fast
+ * paths that a developer wants the least amount of "Heisenbug" affects,
+ * where the processing of the print format is still too much.
+ *
+ * This function allows a kernel developer to debug fast path sections
+ * that printk is not appropriate for. By scattering in various
+ * printk like tracing in the code, a developer can quickly see
+ * where problems are occurring.
+ *
+ * This is intended as a debugging tool for the developer only.
+ * Please refrain from leaving trace_puts scattered around in
+ * your code. (Extra memory is used for special buffers that are
+ * allocated when trace_puts() is used)
+ *
+ * Returns: 0 if nothing was written, positive # if string was.
+ * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
+ */
+
+#define trace_puts(str) ({ \
+ static const char *trace_printk_fmt \
+ __attribute__((section("__trace_printk_fmt"))) = \
+ __builtin_constant_p(str) ? str : NULL; \
+ \
+ if (__builtin_constant_p(str)) \
+ __trace_bputs(_THIS_IP_, trace_printk_fmt); \
+ else \
+ __trace_puts(_THIS_IP_, str, strlen(str)); \
+})
+extern int __trace_bputs(unsigned long ip, const char *str);
+extern int __trace_puts(unsigned long ip, const char *str, int size);
+
+extern void trace_dump_stack(int skip);
+
+/*
+ * The double __builtin_constant_p is because gcc will give us an error
+ * if we try to allocate the static variable to fmt if it is not a
+ * constant. Even with the outer if statement.
+ */
+#define ftrace_vprintk(fmt, vargs) \
+do { \
+ if (__builtin_constant_p(fmt)) { \
+ static const char *trace_printk_fmt \
+ __attribute__((section("__trace_printk_fmt"))) = \
+ __builtin_constant_p(fmt) ? fmt : NULL; \
+ \
+ __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
+ } else \
+ __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
+} while (0)
+
+extern int
+__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
+
+extern int
+__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
+
+extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
+#else
+static inline void tracing_start(void) { }
+static inline void tracing_stop(void) { }
+static inline void trace_dump_stack(int skip) { }
+
+static inline void tracing_on(void) { }
+static inline void tracing_off(void) { }
+static inline int tracing_is_on(void) { return 0; }
+static inline void tracing_snapshot(void) { }
+static inline void tracing_snapshot_alloc(void) { }
+
+static inline __printf(1, 2)
+int trace_printk(const char *fmt, ...)
+{
+ return 0;
+}
+static inline int
+ftrace_vprintk(const char *fmt, va_list ap)
+{
+ return 0;
+}
+static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
+#endif /* CONFIG_TRACING */
+
+/*
+ * min()/max()/clamp() macros that also do
+ * strict type-checking.. See the
+ * "unnecessary" pointer comparison.
+ */
+#define min(x, y) ({ \
+ typeof(x) _min1 = (x); \
+ typeof(y) _min2 = (y); \
+ (void) (&_min1 == &_min2); \
+ _min1 < _min2 ? _min1 : _min2; })
+
+#define max(x, y) ({ \
+ typeof(x) _max1 = (x); \
+ typeof(y) _max2 = (y); \
+ (void) (&_max1 == &_max2); \
+ _max1 > _max2 ? _max1 : _max2; })
+
+#define min3(x, y, z) min((typeof(x))min(x, y), z)
+#define max3(x, y, z) max((typeof(x))max(x, y), z)
+
+/**
+ * min_not_zero - return the minimum that is _not_ zero, unless both are zero
+ * @x: value1
+ * @y: value2
+ */
+#define min_not_zero(x, y) ({ \
+ typeof(x) __x = (x); \
+ typeof(y) __y = (y); \
+ __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
+
+/**
+ * clamp - return a value clamped to a given range with strict typechecking
+ * @val: current value
+ * @lo: lowest allowable value
+ * @hi: highest allowable value
+ *
+ * This macro does strict typechecking of lo/hi to make sure they are of the
+ * same type as val. See the unnecessary pointer comparisons.
+ */
+#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
+
+/*
+ * ..and if you can't take the strict
+ * types, you can specify one yourself.
+ *
+ * Or not use min/max/clamp at all, of course.
+ */
+#define min_t(type, x, y) ({ \
+ type __min1 = (x); \
+ type __min2 = (y); \
+ __min1 < __min2 ? __min1: __min2; })
+
+#define max_t(type, x, y) ({ \
+ type __max1 = (x); \
+ type __max2 = (y); \
+ __max1 > __max2 ? __max1: __max2; })
+
+/**
+ * clamp_t - return a value clamped to a given range using a given type
+ * @type: the type of variable to use
+ * @val: current value
+ * @lo: minimum allowable value
+ * @hi: maximum allowable value
+ *
+ * This macro does no typechecking and uses temporary variables of type
+ * 'type' to make all the comparisons.
+ */
+#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
+
+/**
+ * clamp_val - return a value clamped to a given range using val's type
+ * @val: current value
+ * @lo: minimum allowable value
+ * @hi: maximum allowable value
+ *
+ * This macro does no typechecking and uses temporary variables of whatever
+ * type the input argument 'val' is. This is useful when val is an unsigned
+ * type and min and max are literals that will otherwise be assigned a signed
+ * integer type.
+ */
+#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
+
+
+/*
+ * swap - swap value of @a and @b
+ */
+#define swap(a, b) \
+ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({ \
+ const typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)( (char *)__mptr - offsetof(type,member) );})
+
+/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
+#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
+#endif
+
+/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
+#define VERIFY_OCTAL_PERMISSIONS(perms) \
+ (BUILD_BUG_ON_ZERO((perms) < 0) + \
+ BUILD_BUG_ON_ZERO((perms) > 0777) + \
+ /* User perms >= group perms >= other perms */ \
+ BUILD_BUG_ON_ZERO(((perms) >> 6) < (((perms) >> 3) & 7)) + \
+ BUILD_BUG_ON_ZERO((((perms) >> 3) & 7) < ((perms) & 7)) + \
+ /* Other writable? Generally considered a bad idea. */ \
+ BUILD_BUG_ON_ZERO((perms) & 2) + \
+ (perms))
+#endif
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
new file mode 100644
index 000000000..25a822f6f
--- /dev/null
+++ b/include/linux/kernel_stat.h
@@ -0,0 +1,98 @@
+#ifndef _LINUX_KERNEL_STAT_H
+#define _LINUX_KERNEL_STAT_H
+
+#include <linux/smp.h>
+#include <linux/threads.h>
+#include <linux/percpu.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/vtime.h>
+#include <asm/irq.h>
+#include <linux/cputime.h>
+
+/*
+ * 'kernel_stat.h' contains the definitions needed for doing
+ * some kernel statistics (CPU usage, context switches ...),
+ * used by rstatd/perfmeter
+ */
+
+enum cpu_usage_stat {
+ CPUTIME_USER,
+ CPUTIME_NICE,
+ CPUTIME_SYSTEM,
+ CPUTIME_SOFTIRQ,
+ CPUTIME_IRQ,
+ CPUTIME_IDLE,
+ CPUTIME_IOWAIT,
+ CPUTIME_STEAL,
+ CPUTIME_GUEST,
+ CPUTIME_GUEST_NICE,
+ NR_STATS,
+};
+
+struct kernel_cpustat {
+ u64 cpustat[NR_STATS];
+};
+
+struct kernel_stat {
+ unsigned long irqs_sum;
+ unsigned int softirqs[NR_SOFTIRQS];
+};
+
+DECLARE_PER_CPU(struct kernel_stat, kstat);
+DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
+
+/* Must have preemption disabled for this to be meaningful. */
+#define kstat_this_cpu this_cpu_ptr(&kstat)
+#define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat)
+#define kstat_cpu(cpu) per_cpu(kstat, cpu)
+#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
+
+extern unsigned long long nr_context_switches(void);
+
+extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
+extern void kstat_incr_irq_this_cpu(unsigned int irq);
+
+static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
+{
+ __this_cpu_inc(kstat.softirqs[irq]);
+}
+
+static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
+{
+ return kstat_cpu(cpu).softirqs[irq];
+}
+
+/*
+ * Number of interrupts per specific IRQ source, since bootup
+ */
+extern unsigned int kstat_irqs(unsigned int irq);
+extern unsigned int kstat_irqs_usr(unsigned int irq);
+
+/*
+ * Number of interrupts per cpu, since bootup
+ */
+static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
+{
+ return kstat_cpu(cpu).irqs_sum;
+}
+
+extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
+extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
+extern void account_steal_time(cputime_t);
+extern void account_idle_time(cputime_t);
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+static inline void account_process_tick(struct task_struct *tsk, int user)
+{
+ vtime_account_user(tsk);
+}
+#else
+extern void account_process_tick(struct task_struct *, int user);
+#endif
+
+extern void account_steal_ticks(unsigned long ticks);
+extern void account_idle_ticks(unsigned long ticks);
+
+#endif /* _LINUX_KERNEL_STAT_H */
diff --git a/include/linux/kernelcapi.h b/include/linux/kernelcapi.h
new file mode 100644
index 000000000..e985ba679
--- /dev/null
+++ b/include/linux/kernelcapi.h
@@ -0,0 +1,119 @@
+/*
+ * $Id: kernelcapi.h,v 1.8.6.2 2001/02/07 11:31:31 kai Exp $
+ *
+ * Kernel CAPI 2.0 Interface for Linux
+ *
+ * (c) Copyright 1997 by Carsten Paeth (calle@calle.in-berlin.de)
+ *
+ */
+#ifndef __KERNELCAPI_H__
+#define __KERNELCAPI_H__
+
+
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <linux/notifier.h>
+#include <uapi/linux/kernelcapi.h>
+
+struct capi20_appl {
+ u16 applid;
+ capi_register_params rparam;
+ void (*recv_message)(struct capi20_appl *ap, struct sk_buff *skb);
+ void *private;
+
+ /* internal to kernelcapi.o */
+ unsigned long nrecvctlpkt;
+ unsigned long nrecvdatapkt;
+ unsigned long nsentctlpkt;
+ unsigned long nsentdatapkt;
+ struct mutex recv_mtx;
+ struct sk_buff_head recv_queue;
+ struct work_struct recv_work;
+ int release_in_progress;
+};
+
+u16 capi20_isinstalled(void);
+u16 capi20_register(struct capi20_appl *ap);
+u16 capi20_release(struct capi20_appl *ap);
+u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb);
+u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]);
+u16 capi20_get_version(u32 contr, struct capi_version *verp);
+u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]);
+u16 capi20_get_profile(u32 contr, struct capi_profile *profp);
+int capi20_manufacturer(unsigned long cmd, void __user *data);
+
+#define CAPICTR_UP 0
+#define CAPICTR_DOWN 1
+
+int register_capictr_notifier(struct notifier_block *nb);
+int unregister_capictr_notifier(struct notifier_block *nb);
+
+#define CAPI_NOERROR 0x0000
+
+#define CAPI_TOOMANYAPPLS 0x1001
+#define CAPI_LOGBLKSIZETOSMALL 0x1002
+#define CAPI_BUFFEXECEEDS64K 0x1003
+#define CAPI_MSGBUFSIZETOOSMALL 0x1004
+#define CAPI_ANZLOGCONNNOTSUPPORTED 0x1005
+#define CAPI_REGRESERVED 0x1006
+#define CAPI_REGBUSY 0x1007
+#define CAPI_REGOSRESOURCEERR 0x1008
+#define CAPI_REGNOTINSTALLED 0x1009
+#define CAPI_REGCTRLERNOTSUPPORTEXTEQUIP 0x100a
+#define CAPI_REGCTRLERONLYSUPPORTEXTEQUIP 0x100b
+
+#define CAPI_ILLAPPNR 0x1101
+#define CAPI_ILLCMDORSUBCMDORMSGTOSMALL 0x1102
+#define CAPI_SENDQUEUEFULL 0x1103
+#define CAPI_RECEIVEQUEUEEMPTY 0x1104
+#define CAPI_RECEIVEOVERFLOW 0x1105
+#define CAPI_UNKNOWNNOTPAR 0x1106
+#define CAPI_MSGBUSY 0x1107
+#define CAPI_MSGOSRESOURCEERR 0x1108
+#define CAPI_MSGNOTINSTALLED 0x1109
+#define CAPI_MSGCTRLERNOTSUPPORTEXTEQUIP 0x110a
+#define CAPI_MSGCTRLERONLYSUPPORTEXTEQUIP 0x110b
+
+typedef enum {
+ CapiMessageNotSupportedInCurrentState = 0x2001,
+ CapiIllContrPlciNcci = 0x2002,
+ CapiNoPlciAvailable = 0x2003,
+ CapiNoNcciAvailable = 0x2004,
+ CapiNoListenResourcesAvailable = 0x2005,
+ CapiNoFaxResourcesAvailable = 0x2006,
+ CapiIllMessageParmCoding = 0x2007,
+} RESOURCE_CODING_PROBLEM;
+
+typedef enum {
+ CapiB1ProtocolNotSupported = 0x3001,
+ CapiB2ProtocolNotSupported = 0x3002,
+ CapiB3ProtocolNotSupported = 0x3003,
+ CapiB1ProtocolParameterNotSupported = 0x3004,
+ CapiB2ProtocolParameterNotSupported = 0x3005,
+ CapiB3ProtocolParameterNotSupported = 0x3006,
+ CapiBProtocolCombinationNotSupported = 0x3007,
+ CapiNcpiNotSupported = 0x3008,
+ CapiCipValueUnknown = 0x3009,
+ CapiFlagsNotSupported = 0x300a,
+ CapiFacilityNotSupported = 0x300b,
+ CapiDataLengthNotSupportedByCurrentProtocol = 0x300c,
+ CapiResetProcedureNotSupportedByCurrentProtocol = 0x300d,
+ CapiTeiAssignmentFailed = 0x300e,
+} REQUESTED_SERVICES_PROBLEM;
+
+typedef enum {
+ CapiSuccess = 0x0000,
+ CapiSupplementaryServiceNotSupported = 0x300e,
+ CapiRequestNotAllowedInThisState = 0x3010,
+} SUPPLEMENTARY_SERVICE_INFO;
+
+typedef enum {
+ CapiProtocolErrorLayer1 = 0x3301,
+ CapiProtocolErrorLayer2 = 0x3302,
+ CapiProtocolErrorLayer3 = 0x3303,
+ CapiTimeOut = 0x3303, // SuppServiceReason
+ CapiCallGivenToOtherApplication = 0x3304,
+} CAPI_REASON;
+
+#endif /* __KERNELCAPI_H__ */
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
new file mode 100644
index 000000000..29d1896c3
--- /dev/null
+++ b/include/linux/kernfs.h
@@ -0,0 +1,474 @@
+/*
+ * kernfs.h - pseudo filesystem decoupled from vfs locking
+ *
+ * This file is released under the GPLv2.
+ */
+
+#ifndef __LINUX_KERNFS_H
+#define __LINUX_KERNFS_H
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/idr.h>
+#include <linux/lockdep.h>
+#include <linux/rbtree.h>
+#include <linux/atomic.h>
+#include <linux/wait.h>
+
+struct file;
+struct dentry;
+struct iattr;
+struct seq_file;
+struct vm_area_struct;
+struct super_block;
+struct file_system_type;
+
+struct kernfs_open_node;
+struct kernfs_iattrs;
+
+enum kernfs_node_type {
+ KERNFS_DIR = 0x0001,
+ KERNFS_FILE = 0x0002,
+ KERNFS_LINK = 0x0004,
+};
+
+#define KERNFS_TYPE_MASK 0x000f
+#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK
+
+enum kernfs_node_flag {
+ KERNFS_ACTIVATED = 0x0010,
+ KERNFS_NS = 0x0020,
+ KERNFS_HAS_SEQ_SHOW = 0x0040,
+ KERNFS_HAS_MMAP = 0x0080,
+ KERNFS_LOCKDEP = 0x0100,
+ KERNFS_SUICIDAL = 0x0400,
+ KERNFS_SUICIDED = 0x0800,
+ KERNFS_EMPTY_DIR = 0x1000,
+};
+
+/* @flags for kernfs_create_root() */
+enum kernfs_root_flag {
+ /*
+ * kernfs_nodes are created in the deactivated state and invisible.
+ * They require explicit kernfs_activate() to become visible. This
+ * can be used to make related nodes become visible atomically
+ * after all nodes are created successfully.
+ */
+ KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001,
+
+ /*
+ * For regular flies, if the opener has CAP_DAC_OVERRIDE, open(2)
+ * succeeds regardless of the RW permissions. sysfs had an extra
+ * layer of enforcement where open(2) fails with -EACCES regardless
+ * of CAP_DAC_OVERRIDE if the permission doesn't have the
+ * respective read or write access at all (none of S_IRUGO or
+ * S_IWUGO) or the respective operation isn't implemented. The
+ * following flag enables that behavior.
+ */
+ KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 0x0002,
+};
+
+/* type-specific structures for kernfs_node union members */
+struct kernfs_elem_dir {
+ unsigned long subdirs;
+ /* children rbtree starts here and goes through kn->rb */
+ struct rb_root children;
+
+ /*
+ * The kernfs hierarchy this directory belongs to. This fits
+ * better directly in kernfs_node but is here to save space.
+ */
+ struct kernfs_root *root;
+};
+
+struct kernfs_elem_symlink {
+ struct kernfs_node *target_kn;
+};
+
+struct kernfs_elem_attr {
+ const struct kernfs_ops *ops;
+ struct kernfs_open_node *open;
+ loff_t size;
+ struct kernfs_node *notify_next; /* for kernfs_notify() */
+};
+
+/*
+ * kernfs_node - the building block of kernfs hierarchy. Each and every
+ * kernfs node is represented by single kernfs_node. Most fields are
+ * private to kernfs and shouldn't be accessed directly by kernfs users.
+ *
+ * As long as s_count reference is held, the kernfs_node itself is
+ * accessible. Dereferencing elem or any other outer entity requires
+ * active reference.
+ */
+struct kernfs_node {
+ atomic_t count;
+ atomic_t active;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+ /*
+ * Use kernfs_get_parent() and kernfs_name/path() instead of
+ * accessing the following two fields directly. If the node is
+ * never moved to a different parent, it is safe to access the
+ * parent directly.
+ */
+ struct kernfs_node *parent;
+ const char *name;
+
+ struct rb_node rb;
+
+ const void *ns; /* namespace tag */
+ unsigned int hash; /* ns + name hash */
+ union {
+ struct kernfs_elem_dir dir;
+ struct kernfs_elem_symlink symlink;
+ struct kernfs_elem_attr attr;
+ };
+
+ void *priv;
+
+ unsigned short flags;
+ umode_t mode;
+ unsigned int ino;
+ struct kernfs_iattrs *iattr;
+};
+
+/*
+ * kernfs_syscall_ops may be specified on kernfs_create_root() to support
+ * syscalls. These optional callbacks are invoked on the matching syscalls
+ * and can perform any kernfs operations which don't necessarily have to be
+ * the exact operation requested. An active reference is held for each
+ * kernfs_node parameter.
+ */
+struct kernfs_syscall_ops {
+ int (*remount_fs)(struct kernfs_root *root, int *flags, char *data);
+ int (*show_options)(struct seq_file *sf, struct kernfs_root *root);
+
+ int (*mkdir)(struct kernfs_node *parent, const char *name,
+ umode_t mode);
+ int (*rmdir)(struct kernfs_node *kn);
+ int (*rename)(struct kernfs_node *kn, struct kernfs_node *new_parent,
+ const char *new_name);
+};
+
+struct kernfs_root {
+ /* published fields */
+ struct kernfs_node *kn;
+ unsigned int flags; /* KERNFS_ROOT_* flags */
+
+ /* private fields, do not use outside kernfs proper */
+ struct ida ino_ida;
+ struct kernfs_syscall_ops *syscall_ops;
+
+ /* list of kernfs_super_info of this root, protected by kernfs_mutex */
+ struct list_head supers;
+
+ wait_queue_head_t deactivate_waitq;
+};
+
+struct kernfs_open_file {
+ /* published fields */
+ struct kernfs_node *kn;
+ struct file *file;
+ void *priv;
+
+ /* private fields, do not use outside kernfs proper */
+ struct mutex mutex;
+ int event;
+ struct list_head list;
+ char *prealloc_buf;
+
+ size_t atomic_write_len;
+ bool mmapped;
+ const struct vm_operations_struct *vm_ops;
+};
+
+struct kernfs_ops {
+ /*
+ * Read is handled by either seq_file or raw_read().
+ *
+ * If seq_show() is present, seq_file path is active. Other seq
+ * operations are optional and if not implemented, the behavior is
+ * equivalent to single_open(). @sf->private points to the
+ * associated kernfs_open_file.
+ *
+ * read() is bounced through kernel buffer and a read larger than
+ * PAGE_SIZE results in partial operation of PAGE_SIZE.
+ */
+ int (*seq_show)(struct seq_file *sf, void *v);
+
+ void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
+ void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
+ void (*seq_stop)(struct seq_file *sf, void *v);
+
+ ssize_t (*read)(struct kernfs_open_file *of, char *buf, size_t bytes,
+ loff_t off);
+
+ /*
+ * write() is bounced through kernel buffer. If atomic_write_len
+ * is not set, a write larger than PAGE_SIZE results in partial
+ * operations of PAGE_SIZE chunks. If atomic_write_len is set,
+ * writes upto the specified size are executed atomically but
+ * larger ones are rejected with -E2BIG.
+ */
+ size_t atomic_write_len;
+ /*
+ * "prealloc" causes a buffer to be allocated at open for
+ * all read/write requests. As ->seq_show uses seq_read()
+ * which does its own allocation, it is incompatible with
+ * ->prealloc. Provide ->read and ->write with ->prealloc.
+ */
+ bool prealloc;
+ ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes,
+ loff_t off);
+
+ int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lock_class_key lockdep_key;
+#endif
+};
+
+#ifdef CONFIG_KERNFS
+
+static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn)
+{
+ return kn->flags & KERNFS_TYPE_MASK;
+}
+
+/**
+ * kernfs_enable_ns - enable namespace under a directory
+ * @kn: directory of interest, should be empty
+ *
+ * This is to be called right after @kn is created to enable namespace
+ * under it. All children of @kn must have non-NULL namespace tags and
+ * only the ones which match the super_block's tag will be visible.
+ */
+static inline void kernfs_enable_ns(struct kernfs_node *kn)
+{
+ WARN_ON_ONCE(kernfs_type(kn) != KERNFS_DIR);
+ WARN_ON_ONCE(!RB_EMPTY_ROOT(&kn->dir.children));
+ kn->flags |= KERNFS_NS;
+}
+
+/**
+ * kernfs_ns_enabled - test whether namespace is enabled
+ * @kn: the node to test
+ *
+ * Test whether namespace filtering is enabled for the children of @ns.
+ */
+static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
+{
+ return kn->flags & KERNFS_NS;
+}
+
+int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen);
+char * __must_check kernfs_path(struct kernfs_node *kn, char *buf,
+ size_t buflen);
+void pr_cont_kernfs_name(struct kernfs_node *kn);
+void pr_cont_kernfs_path(struct kernfs_node *kn);
+struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn);
+struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
+ const char *name, const void *ns);
+void kernfs_get(struct kernfs_node *kn);
+void kernfs_put(struct kernfs_node *kn);
+
+struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry);
+struct kernfs_root *kernfs_root_from_sb(struct super_block *sb);
+
+struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
+ unsigned int flags, void *priv);
+void kernfs_destroy_root(struct kernfs_root *root);
+
+struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
+ const char *name, umode_t mode,
+ void *priv, const void *ns);
+struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
+ const char *name);
+struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
+ const char *name,
+ umode_t mode, loff_t size,
+ const struct kernfs_ops *ops,
+ void *priv, const void *ns,
+ struct lock_class_key *key);
+struct kernfs_node *kernfs_create_link(struct kernfs_node *parent,
+ const char *name,
+ struct kernfs_node *target);
+void kernfs_activate(struct kernfs_node *kn);
+void kernfs_remove(struct kernfs_node *kn);
+void kernfs_break_active_protection(struct kernfs_node *kn);
+void kernfs_unbreak_active_protection(struct kernfs_node *kn);
+bool kernfs_remove_self(struct kernfs_node *kn);
+int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
+ const void *ns);
+int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
+ const char *new_name, const void *new_ns);
+int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr);
+void kernfs_notify(struct kernfs_node *kn);
+
+const void *kernfs_super_ns(struct super_block *sb);
+struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
+ struct kernfs_root *root, unsigned long magic,
+ bool *new_sb_created, const void *ns);
+void kernfs_kill_sb(struct super_block *sb);
+struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns);
+
+void kernfs_init(void);
+
+#else /* CONFIG_KERNFS */
+
+static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn)
+{ return 0; } /* whatever */
+
+static inline void kernfs_enable_ns(struct kernfs_node *kn) { }
+
+static inline bool kernfs_ns_enabled(struct kernfs_node *kn)
+{ return false; }
+
+static inline int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
+{ return -ENOSYS; }
+
+static inline char * __must_check kernfs_path(struct kernfs_node *kn, char *buf,
+ size_t buflen)
+{ return NULL; }
+
+static inline void pr_cont_kernfs_name(struct kernfs_node *kn) { }
+static inline void pr_cont_kernfs_path(struct kernfs_node *kn) { }
+
+static inline struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
+{ return NULL; }
+
+static inline struct kernfs_node *
+kernfs_find_and_get_ns(struct kernfs_node *parent, const char *name,
+ const void *ns)
+{ return NULL; }
+
+static inline void kernfs_get(struct kernfs_node *kn) { }
+static inline void kernfs_put(struct kernfs_node *kn) { }
+
+static inline struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry)
+{ return NULL; }
+
+static inline struct kernfs_root *kernfs_root_from_sb(struct super_block *sb)
+{ return NULL; }
+
+static inline struct kernfs_root *
+kernfs_create_root(struct kernfs_syscall_ops *scops, unsigned int flags,
+ void *priv)
+{ return ERR_PTR(-ENOSYS); }
+
+static inline void kernfs_destroy_root(struct kernfs_root *root) { }
+
+static inline struct kernfs_node *
+kernfs_create_dir_ns(struct kernfs_node *parent, const char *name,
+ umode_t mode, void *priv, const void *ns)
+{ return ERR_PTR(-ENOSYS); }
+
+static inline struct kernfs_node *
+__kernfs_create_file(struct kernfs_node *parent, const char *name,
+ umode_t mode, loff_t size, const struct kernfs_ops *ops,
+ void *priv, const void *ns, struct lock_class_key *key)
+{ return ERR_PTR(-ENOSYS); }
+
+static inline struct kernfs_node *
+kernfs_create_link(struct kernfs_node *parent, const char *name,
+ struct kernfs_node *target)
+{ return ERR_PTR(-ENOSYS); }
+
+static inline void kernfs_activate(struct kernfs_node *kn) { }
+
+static inline void kernfs_remove(struct kernfs_node *kn) { }
+
+static inline bool kernfs_remove_self(struct kernfs_node *kn)
+{ return false; }
+
+static inline int kernfs_remove_by_name_ns(struct kernfs_node *kn,
+ const char *name, const void *ns)
+{ return -ENOSYS; }
+
+static inline int kernfs_rename_ns(struct kernfs_node *kn,
+ struct kernfs_node *new_parent,
+ const char *new_name, const void *new_ns)
+{ return -ENOSYS; }
+
+static inline int kernfs_setattr(struct kernfs_node *kn,
+ const struct iattr *iattr)
+{ return -ENOSYS; }
+
+static inline void kernfs_notify(struct kernfs_node *kn) { }
+
+static inline const void *kernfs_super_ns(struct super_block *sb)
+{ return NULL; }
+
+static inline struct dentry *
+kernfs_mount_ns(struct file_system_type *fs_type, int flags,
+ struct kernfs_root *root, unsigned long magic,
+ bool *new_sb_created, const void *ns)
+{ return ERR_PTR(-ENOSYS); }
+
+static inline void kernfs_kill_sb(struct super_block *sb) { }
+
+static inline void kernfs_init(void) { }
+
+#endif /* CONFIG_KERNFS */
+
+static inline struct kernfs_node *
+kernfs_find_and_get(struct kernfs_node *kn, const char *name)
+{
+ return kernfs_find_and_get_ns(kn, name, NULL);
+}
+
+static inline struct kernfs_node *
+kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode,
+ void *priv)
+{
+ return kernfs_create_dir_ns(parent, name, mode, priv, NULL);
+}
+
+static inline struct kernfs_node *
+kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
+ umode_t mode, loff_t size, const struct kernfs_ops *ops,
+ void *priv, const void *ns)
+{
+ struct lock_class_key *key = NULL;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ key = (struct lock_class_key *)&ops->lockdep_key;
+#endif
+ return __kernfs_create_file(parent, name, mode, size, ops, priv, ns,
+ key);
+}
+
+static inline struct kernfs_node *
+kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode,
+ loff_t size, const struct kernfs_ops *ops, void *priv)
+{
+ return kernfs_create_file_ns(parent, name, mode, size, ops, priv, NULL);
+}
+
+static inline int kernfs_remove_by_name(struct kernfs_node *parent,
+ const char *name)
+{
+ return kernfs_remove_by_name_ns(parent, name, NULL);
+}
+
+static inline int kernfs_rename(struct kernfs_node *kn,
+ struct kernfs_node *new_parent,
+ const char *new_name)
+{
+ return kernfs_rename_ns(kn, new_parent, new_name, NULL);
+}
+
+static inline struct dentry *
+kernfs_mount(struct file_system_type *fs_type, int flags,
+ struct kernfs_root *root, unsigned long magic,
+ bool *new_sb_created)
+{
+ return kernfs_mount_ns(fs_type, flags, root,
+ magic, new_sb_created, NULL);
+}
+
+#endif /* __LINUX_KERNFS_H */
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
new file mode 100644
index 000000000..e804306ef
--- /dev/null
+++ b/include/linux/kexec.h
@@ -0,0 +1,330 @@
+#ifndef LINUX_KEXEC_H
+#define LINUX_KEXEC_H
+
+#define IND_DESTINATION_BIT 0
+#define IND_INDIRECTION_BIT 1
+#define IND_DONE_BIT 2
+#define IND_SOURCE_BIT 3
+
+#define IND_DESTINATION (1 << IND_DESTINATION_BIT)
+#define IND_INDIRECTION (1 << IND_INDIRECTION_BIT)
+#define IND_DONE (1 << IND_DONE_BIT)
+#define IND_SOURCE (1 << IND_SOURCE_BIT)
+#define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE)
+
+#if !defined(__ASSEMBLY__)
+
+#include <uapi/linux/kexec.h>
+
+#ifdef CONFIG_KEXEC
+#include <linux/list.h>
+#include <linux/linkage.h>
+#include <linux/compat.h>
+#include <linux/ioport.h>
+#include <linux/elfcore.h>
+#include <linux/elf.h>
+#include <linux/module.h>
+#include <asm/kexec.h>
+
+/* Verify architecture specific macros are defined */
+
+#ifndef KEXEC_SOURCE_MEMORY_LIMIT
+#error KEXEC_SOURCE_MEMORY_LIMIT not defined
+#endif
+
+#ifndef KEXEC_DESTINATION_MEMORY_LIMIT
+#error KEXEC_DESTINATION_MEMORY_LIMIT not defined
+#endif
+
+#ifndef KEXEC_CONTROL_MEMORY_LIMIT
+#error KEXEC_CONTROL_MEMORY_LIMIT not defined
+#endif
+
+#ifndef KEXEC_CONTROL_MEMORY_GFP
+#define KEXEC_CONTROL_MEMORY_GFP GFP_KERNEL
+#endif
+
+#ifndef KEXEC_CONTROL_PAGE_SIZE
+#error KEXEC_CONTROL_PAGE_SIZE not defined
+#endif
+
+#ifndef KEXEC_ARCH
+#error KEXEC_ARCH not defined
+#endif
+
+#ifndef KEXEC_CRASH_CONTROL_MEMORY_LIMIT
+#define KEXEC_CRASH_CONTROL_MEMORY_LIMIT KEXEC_CONTROL_MEMORY_LIMIT
+#endif
+
+#ifndef KEXEC_CRASH_MEM_ALIGN
+#define KEXEC_CRASH_MEM_ALIGN PAGE_SIZE
+#endif
+
+#define KEXEC_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4)
+#define KEXEC_CORE_NOTE_NAME "CORE"
+#define KEXEC_CORE_NOTE_NAME_BYTES ALIGN(sizeof(KEXEC_CORE_NOTE_NAME), 4)
+#define KEXEC_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4)
+/*
+ * The per-cpu notes area is a list of notes terminated by a "NULL"
+ * note header. For kdump, the code in vmcore.c runs in the context
+ * of the second kernel to combine them into one note.
+ */
+#ifndef KEXEC_NOTE_BYTES
+#define KEXEC_NOTE_BYTES ( (KEXEC_NOTE_HEAD_BYTES * 2) + \
+ KEXEC_CORE_NOTE_NAME_BYTES + \
+ KEXEC_CORE_NOTE_DESC_BYTES )
+#endif
+
+/*
+ * This structure is used to hold the arguments that are used when loading
+ * kernel binaries.
+ */
+
+typedef unsigned long kimage_entry_t;
+
+struct kexec_segment {
+ /*
+ * This pointer can point to user memory if kexec_load() system
+ * call is used or will point to kernel memory if
+ * kexec_file_load() system call is used.
+ *
+ * Use ->buf when expecting to deal with user memory and use ->kbuf
+ * when expecting to deal with kernel memory.
+ */
+ union {
+ void __user *buf;
+ void *kbuf;
+ };
+ size_t bufsz;
+ unsigned long mem;
+ size_t memsz;
+};
+
+#ifdef CONFIG_COMPAT
+struct compat_kexec_segment {
+ compat_uptr_t buf;
+ compat_size_t bufsz;
+ compat_ulong_t mem; /* User space sees this as a (void *) ... */
+ compat_size_t memsz;
+};
+#endif
+
+struct kexec_sha_region {
+ unsigned long start;
+ unsigned long len;
+};
+
+struct purgatory_info {
+ /* Pointer to elf header of read only purgatory */
+ Elf_Ehdr *ehdr;
+
+ /* Pointer to purgatory sechdrs which are modifiable */
+ Elf_Shdr *sechdrs;
+ /*
+ * Temporary buffer location where purgatory is loaded and relocated
+ * This memory can be freed post image load
+ */
+ void *purgatory_buf;
+
+ /* Address where purgatory is finally loaded and is executed from */
+ unsigned long purgatory_load_addr;
+};
+
+struct kimage {
+ kimage_entry_t head;
+ kimage_entry_t *entry;
+ kimage_entry_t *last_entry;
+
+ unsigned long start;
+ struct page *control_code_page;
+ struct page *swap_page;
+
+ unsigned long nr_segments;
+ struct kexec_segment segment[KEXEC_SEGMENT_MAX];
+
+ struct list_head control_pages;
+ struct list_head dest_pages;
+ struct list_head unusable_pages;
+
+ /* Address of next control page to allocate for crash kernels. */
+ unsigned long control_page;
+
+ /* Flags to indicate special processing */
+ unsigned int type : 1;
+#define KEXEC_TYPE_DEFAULT 0
+#define KEXEC_TYPE_CRASH 1
+ unsigned int preserve_context : 1;
+ /* If set, we are using file mode kexec syscall */
+ unsigned int file_mode:1;
+
+#ifdef ARCH_HAS_KIMAGE_ARCH
+ struct kimage_arch arch;
+#endif
+
+ /* Additional fields for file based kexec syscall */
+ void *kernel_buf;
+ unsigned long kernel_buf_len;
+
+ void *initrd_buf;
+ unsigned long initrd_buf_len;
+
+ char *cmdline_buf;
+ unsigned long cmdline_buf_len;
+
+ /* File operations provided by image loader */
+ struct kexec_file_ops *fops;
+
+ /* Image loader handling the kernel can store a pointer here */
+ void *image_loader_data;
+
+ /* Information for loading purgatory */
+ struct purgatory_info purgatory_info;
+};
+
+/*
+ * Keeps track of buffer parameters as provided by caller for requesting
+ * memory placement of buffer.
+ */
+struct kexec_buf {
+ struct kimage *image;
+ char *buffer;
+ unsigned long bufsz;
+ unsigned long mem;
+ unsigned long memsz;
+ unsigned long buf_align;
+ unsigned long buf_min;
+ unsigned long buf_max;
+ bool top_down; /* allocate from top of memory hole */
+};
+
+typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size);
+typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf,
+ unsigned long kernel_len, char *initrd,
+ unsigned long initrd_len, char *cmdline,
+ unsigned long cmdline_len);
+typedef int (kexec_cleanup_t)(void *loader_data);
+typedef int (kexec_verify_sig_t)(const char *kernel_buf,
+ unsigned long kernel_len);
+
+struct kexec_file_ops {
+ kexec_probe_t *probe;
+ kexec_load_t *load;
+ kexec_cleanup_t *cleanup;
+ kexec_verify_sig_t *verify_sig;
+};
+
+/* kexec interface functions */
+extern void machine_kexec(struct kimage *image);
+extern int machine_kexec_prepare(struct kimage *image);
+extern void machine_kexec_cleanup(struct kimage *image);
+extern asmlinkage long sys_kexec_load(unsigned long entry,
+ unsigned long nr_segments,
+ struct kexec_segment __user *segments,
+ unsigned long flags);
+extern int kernel_kexec(void);
+extern int kexec_add_buffer(struct kimage *image, char *buffer,
+ unsigned long bufsz, unsigned long memsz,
+ unsigned long buf_align, unsigned long buf_min,
+ unsigned long buf_max, bool top_down,
+ unsigned long *load_addr);
+extern struct page *kimage_alloc_control_pages(struct kimage *image,
+ unsigned int order);
+extern int kexec_load_purgatory(struct kimage *image, unsigned long min,
+ unsigned long max, int top_down,
+ unsigned long *load_addr);
+extern int kexec_purgatory_get_set_symbol(struct kimage *image,
+ const char *name, void *buf,
+ unsigned int size, bool get_value);
+extern void *kexec_purgatory_get_symbol_addr(struct kimage *image,
+ const char *name);
+extern void crash_kexec(struct pt_regs *);
+int kexec_should_crash(struct task_struct *);
+void crash_save_cpu(struct pt_regs *regs, int cpu);
+void crash_save_vmcoreinfo(void);
+void crash_map_reserved_pages(void);
+void crash_unmap_reserved_pages(void);
+void arch_crash_save_vmcoreinfo(void);
+__printf(1, 2)
+void vmcoreinfo_append_str(const char *fmt, ...);
+unsigned long paddr_vmcoreinfo_note(void);
+
+#define VMCOREINFO_OSRELEASE(value) \
+ vmcoreinfo_append_str("OSRELEASE=%s\n", value)
+#define VMCOREINFO_PAGESIZE(value) \
+ vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
+#define VMCOREINFO_SYMBOL(name) \
+ vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
+#define VMCOREINFO_SIZE(name) \
+ vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
+ (unsigned long)sizeof(name))
+#define VMCOREINFO_STRUCT_SIZE(name) \
+ vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
+ (unsigned long)sizeof(struct name))
+#define VMCOREINFO_OFFSET(name, field) \
+ vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
+ (unsigned long)offsetof(struct name, field))
+#define VMCOREINFO_LENGTH(name, value) \
+ vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value)
+#define VMCOREINFO_NUMBER(name) \
+ vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name)
+#define VMCOREINFO_CONFIG(name) \
+ vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
+
+extern struct kimage *kexec_image;
+extern struct kimage *kexec_crash_image;
+extern int kexec_load_disabled;
+
+#ifndef kexec_flush_icache_page
+#define kexec_flush_icache_page(page)
+#endif
+
+/* List of defined/legal kexec flags */
+#ifndef CONFIG_KEXEC_JUMP
+#define KEXEC_FLAGS KEXEC_ON_CRASH
+#else
+#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT)
+#endif
+
+/* List of defined/legal kexec file flags */
+#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \
+ KEXEC_FILE_NO_INITRAMFS)
+
+#define VMCOREINFO_BYTES (4096)
+#define VMCOREINFO_NOTE_NAME "VMCOREINFO"
+#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4)
+#define VMCOREINFO_NOTE_SIZE (KEXEC_NOTE_HEAD_BYTES*2 + VMCOREINFO_BYTES \
+ + VMCOREINFO_NOTE_NAME_BYTES)
+
+/* Location of a reserved region to hold the crash kernel.
+ */
+extern struct resource crashk_res;
+extern struct resource crashk_low_res;
+typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4];
+extern note_buf_t __percpu *crash_notes;
+extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
+extern size_t vmcoreinfo_size;
+extern size_t vmcoreinfo_max_size;
+
+/* flag to track if kexec reboot is in progress */
+extern bool kexec_in_progress;
+
+int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
+ unsigned long long *crash_size, unsigned long long *crash_base);
+int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
+ unsigned long long *crash_size, unsigned long long *crash_base);
+int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
+ unsigned long long *crash_size, unsigned long long *crash_base);
+int crash_shrink_memory(unsigned long new_size);
+size_t crash_get_memory_size(void);
+void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
+
+#else /* !CONFIG_KEXEC */
+struct pt_regs;
+struct task_struct;
+static inline void crash_kexec(struct pt_regs *regs) { }
+static inline int kexec_should_crash(struct task_struct *p) { return 0; }
+#endif /* CONFIG_KEXEC */
+
+#endif /* !defined(__ASSEBMLY__) */
+
+#endif /* LINUX_KEXEC_H */
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
new file mode 100644
index 000000000..ff9f1d394
--- /dev/null
+++ b/include/linux/key-type.h
@@ -0,0 +1,186 @@
+/* Definitions for key type implementations
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_KEY_TYPE_H
+#define _LINUX_KEY_TYPE_H
+
+#include <linux/key.h>
+#include <linux/errno.h>
+
+#ifdef CONFIG_KEYS
+
+/*
+ * key under-construction record
+ * - passed to the request_key actor if supplied
+ */
+struct key_construction {
+ struct key *key; /* key being constructed */
+ struct key *authkey;/* authorisation for key being constructed */
+};
+
+/*
+ * Pre-parsed payload, used by key add, update and instantiate.
+ *
+ * This struct will be cleared and data and datalen will be set with the data
+ * and length parameters from the caller and quotalen will be set from
+ * def_datalen from the key type. Then if the preparse() op is provided by the
+ * key type, that will be called. Then the struct will be passed to the
+ * instantiate() or the update() op.
+ *
+ * If the preparse() op is given, the free_preparse() op will be called to
+ * clear the contents.
+ */
+struct key_preparsed_payload {
+ char *description; /* Proposed key description (or NULL) */
+ void *type_data[2]; /* Private key-type data */
+ void *payload[2]; /* Proposed payload */
+ const void *data; /* Raw data */
+ size_t datalen; /* Raw datalen */
+ size_t quotalen; /* Quota length for proposed payload */
+ time_t expiry; /* Expiry time of key */
+ bool trusted; /* True if key is trusted */
+};
+
+typedef int (*request_key_actor_t)(struct key_construction *key,
+ const char *op, void *aux);
+
+/*
+ * Preparsed matching criterion.
+ */
+struct key_match_data {
+ /* Comparison function, defaults to exact description match, but can be
+ * overridden by type->match_preparse(). Should return true if a match
+ * is found and false if not.
+ */
+ bool (*cmp)(const struct key *key,
+ const struct key_match_data *match_data);
+
+ const void *raw_data; /* Raw match data */
+ void *preparsed; /* For ->match_preparse() to stash stuff */
+ unsigned lookup_type; /* Type of lookup for this search. */
+#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */
+#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */
+};
+
+/*
+ * kernel managed key type definition
+ */
+struct key_type {
+ /* name of the type */
+ const char *name;
+
+ /* default payload length for quota precalculation (optional)
+ * - this can be used instead of calling key_payload_reserve(), that
+ * function only needs to be called if the real datalen is different
+ */
+ size_t def_datalen;
+
+ /* vet a description */
+ int (*vet_description)(const char *description);
+
+ /* Preparse the data blob from userspace that is to be the payload,
+ * generating a proposed description and payload that will be handed to
+ * the instantiate() and update() ops.
+ */
+ int (*preparse)(struct key_preparsed_payload *prep);
+
+ /* Free a preparse data structure.
+ */
+ void (*free_preparse)(struct key_preparsed_payload *prep);
+
+ /* instantiate a key of this type
+ * - this method should call key_payload_reserve() to determine if the
+ * user's quota will hold the payload
+ */
+ int (*instantiate)(struct key *key, struct key_preparsed_payload *prep);
+
+ /* update a key of this type (optional)
+ * - this method should call key_payload_reserve() to recalculate the
+ * quota consumption
+ * - the key must be locked against read when modifying
+ */
+ int (*update)(struct key *key, struct key_preparsed_payload *prep);
+
+ /* Preparse the data supplied to ->match() (optional). The
+ * data to be preparsed can be found in match_data->raw_data.
+ * The lookup type can also be set by this function.
+ */
+ int (*match_preparse)(struct key_match_data *match_data);
+
+ /* Free preparsed match data (optional). This should be supplied it
+ * ->match_preparse() is supplied. */
+ void (*match_free)(struct key_match_data *match_data);
+
+ /* clear some of the data from a key on revokation (optional)
+ * - the key's semaphore will be write-locked by the caller
+ */
+ void (*revoke)(struct key *key);
+
+ /* clear the data from a key (optional) */
+ void (*destroy)(struct key *key);
+
+ /* describe a key */
+ void (*describe)(const struct key *key, struct seq_file *p);
+
+ /* read a key's data (optional)
+ * - permission checks will be done by the caller
+ * - the key's semaphore will be readlocked by the caller
+ * - should return the amount of data that could be read, no matter how
+ * much is copied into the buffer
+ * - shouldn't do the copy if the buffer is NULL
+ */
+ long (*read)(const struct key *key, char __user *buffer, size_t buflen);
+
+ /* handle request_key() for this type instead of invoking
+ * /sbin/request-key (optional)
+ * - key is the key to instantiate
+ * - authkey is the authority to assume when instantiating this key
+ * - op is the operation to be done, usually "create"
+ * - the call must not return until the instantiation process has run
+ * its course
+ */
+ request_key_actor_t request_key;
+
+ /* internal fields */
+ struct list_head link; /* link in types list */
+ struct lock_class_key lock_class; /* key->sem lock class */
+};
+
+extern struct key_type key_type_keyring;
+
+extern int register_key_type(struct key_type *ktype);
+extern void unregister_key_type(struct key_type *ktype);
+
+extern int key_payload_reserve(struct key *key, size_t datalen);
+extern int key_instantiate_and_link(struct key *key,
+ const void *data,
+ size_t datalen,
+ struct key *keyring,
+ struct key *instkey);
+extern int key_reject_and_link(struct key *key,
+ unsigned timeout,
+ unsigned error,
+ struct key *keyring,
+ struct key *instkey);
+extern void complete_request_key(struct key_construction *cons, int error);
+
+static inline int key_negate_and_link(struct key *key,
+ unsigned timeout,
+ struct key *keyring,
+ struct key *instkey)
+{
+ return key_reject_and_link(key, timeout, ENOKEY, keyring, instkey);
+}
+
+extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep);
+
+#endif /* CONFIG_KEYS */
+#endif /* _LINUX_KEY_TYPE_H */
diff --git a/include/linux/key.h b/include/linux/key.h
new file mode 100644
index 000000000..e1d4715f3
--- /dev/null
+++ b/include/linux/key.h
@@ -0,0 +1,376 @@
+/* Authentication token and access key management
+ *
+ * Copyright (C) 2004, 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ *
+ * See Documentation/security/keys.txt for information on keys/keyrings.
+ */
+
+#ifndef _LINUX_KEY_H
+#define _LINUX_KEY_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/rcupdate.h>
+#include <linux/sysctl.h>
+#include <linux/rwsem.h>
+#include <linux/atomic.h>
+#include <linux/assoc_array.h>
+
+#ifdef __KERNEL__
+#include <linux/uidgid.h>
+
+/* key handle serial number */
+typedef int32_t key_serial_t;
+
+/* key handle permissions mask */
+typedef uint32_t key_perm_t;
+
+struct key;
+
+#ifdef CONFIG_KEYS
+
+#undef KEY_DEBUGGING
+
+#define KEY_POS_VIEW 0x01000000 /* possessor can view a key's attributes */
+#define KEY_POS_READ 0x02000000 /* possessor can read key payload / view keyring */
+#define KEY_POS_WRITE 0x04000000 /* possessor can update key payload / add link to keyring */
+#define KEY_POS_SEARCH 0x08000000 /* possessor can find a key in search / search a keyring */
+#define KEY_POS_LINK 0x10000000 /* possessor can create a link to a key/keyring */
+#define KEY_POS_SETATTR 0x20000000 /* possessor can set key attributes */
+#define KEY_POS_ALL 0x3f000000
+
+#define KEY_USR_VIEW 0x00010000 /* user permissions... */
+#define KEY_USR_READ 0x00020000
+#define KEY_USR_WRITE 0x00040000
+#define KEY_USR_SEARCH 0x00080000
+#define KEY_USR_LINK 0x00100000
+#define KEY_USR_SETATTR 0x00200000
+#define KEY_USR_ALL 0x003f0000
+
+#define KEY_GRP_VIEW 0x00000100 /* group permissions... */
+#define KEY_GRP_READ 0x00000200
+#define KEY_GRP_WRITE 0x00000400
+#define KEY_GRP_SEARCH 0x00000800
+#define KEY_GRP_LINK 0x00001000
+#define KEY_GRP_SETATTR 0x00002000
+#define KEY_GRP_ALL 0x00003f00
+
+#define KEY_OTH_VIEW 0x00000001 /* third party permissions... */
+#define KEY_OTH_READ 0x00000002
+#define KEY_OTH_WRITE 0x00000004
+#define KEY_OTH_SEARCH 0x00000008
+#define KEY_OTH_LINK 0x00000010
+#define KEY_OTH_SETATTR 0x00000020
+#define KEY_OTH_ALL 0x0000003f
+
+#define KEY_PERM_UNDEF 0xffffffff
+
+struct seq_file;
+struct user_struct;
+struct signal_struct;
+struct cred;
+
+struct key_type;
+struct key_owner;
+struct keyring_list;
+struct keyring_name;
+
+struct keyring_index_key {
+ struct key_type *type;
+ const char *description;
+ size_t desc_len;
+};
+
+/*****************************************************************************/
+/*
+ * key reference with possession attribute handling
+ *
+ * NOTE! key_ref_t is a typedef'd pointer to a type that is not actually
+ * defined. This is because we abuse the bottom bit of the reference to carry a
+ * flag to indicate whether the calling process possesses that key in one of
+ * its keyrings.
+ *
+ * the key_ref_t has been made a separate type so that the compiler can reject
+ * attempts to dereference it without proper conversion.
+ *
+ * the three functions are used to assemble and disassemble references
+ */
+typedef struct __key_reference_with_attributes *key_ref_t;
+
+static inline key_ref_t make_key_ref(const struct key *key,
+ bool possession)
+{
+ return (key_ref_t) ((unsigned long) key | possession);
+}
+
+static inline struct key *key_ref_to_ptr(const key_ref_t key_ref)
+{
+ return (struct key *) ((unsigned long) key_ref & ~1UL);
+}
+
+static inline bool is_key_possessed(const key_ref_t key_ref)
+{
+ return (unsigned long) key_ref & 1UL;
+}
+
+/*****************************************************************************/
+/*
+ * authentication token / access credential / keyring
+ * - types of key include:
+ * - keyrings
+ * - disk encryption IDs
+ * - Kerberos TGTs and tickets
+ */
+struct key {
+ atomic_t usage; /* number of references */
+ key_serial_t serial; /* key serial number */
+ union {
+ struct list_head graveyard_link;
+ struct rb_node serial_node;
+ };
+ struct rw_semaphore sem; /* change vs change sem */
+ struct key_user *user; /* owner of this key */
+ void *security; /* security data for this key */
+ union {
+ time_t expiry; /* time at which key expires (or 0) */
+ time_t revoked_at; /* time at which key was revoked */
+ };
+ time_t last_used_at; /* last time used for LRU keyring discard */
+ kuid_t uid;
+ kgid_t gid;
+ key_perm_t perm; /* access permissions */
+ unsigned short quotalen; /* length added to quota */
+ unsigned short datalen; /* payload data length
+ * - may not match RCU dereferenced payload
+ * - payload should contain own length
+ */
+
+#ifdef KEY_DEBUGGING
+ unsigned magic;
+#define KEY_DEBUG_MAGIC 0x18273645u
+#define KEY_DEBUG_MAGIC_X 0xf8e9dacbu
+#endif
+
+ unsigned long flags; /* status flags (change with bitops) */
+#define KEY_FLAG_INSTANTIATED 0 /* set if key has been instantiated */
+#define KEY_FLAG_DEAD 1 /* set if key type has been deleted */
+#define KEY_FLAG_REVOKED 2 /* set if key had been revoked */
+#define KEY_FLAG_IN_QUOTA 3 /* set if key consumes quota */
+#define KEY_FLAG_USER_CONSTRUCT 4 /* set if key is being constructed in userspace */
+#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */
+#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */
+#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */
+#define KEY_FLAG_TRUSTED 8 /* set if key is trusted */
+#define KEY_FLAG_TRUSTED_ONLY 9 /* set if keyring only accepts links to trusted keys */
+#define KEY_FLAG_BUILTIN 10 /* set if key is builtin */
+#define KEY_FLAG_ROOT_CAN_INVAL 11 /* set if key can be invalidated by root without permission */
+
+ /* the key type and key description string
+ * - the desc is used to match a key against search criteria
+ * - it should be a printable string
+ * - eg: for krb5 AFS, this might be "afs@REDHAT.COM"
+ */
+ union {
+ struct keyring_index_key index_key;
+ struct {
+ struct key_type *type; /* type of key */
+ char *description;
+ };
+ };
+
+ /* type specific data
+ * - this is used by the keyring type to index the name
+ */
+ union {
+ struct list_head link;
+ unsigned long x[2];
+ void *p[2];
+ int reject_error;
+ } type_data;
+
+ /* key data
+ * - this is used to hold the data actually used in cryptography or
+ * whatever
+ */
+ union {
+ union {
+ unsigned long value;
+ void __rcu *rcudata;
+ void *data;
+ void *data2[2];
+ } payload;
+ struct assoc_array keys;
+ };
+};
+
+extern struct key *key_alloc(struct key_type *type,
+ const char *desc,
+ kuid_t uid, kgid_t gid,
+ const struct cred *cred,
+ key_perm_t perm,
+ unsigned long flags);
+
+
+#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */
+#define KEY_ALLOC_QUOTA_OVERRUN 0x0001 /* add to quota, permit even if overrun */
+#define KEY_ALLOC_NOT_IN_QUOTA 0x0002 /* not in quota */
+#define KEY_ALLOC_TRUSTED 0x0004 /* Key should be flagged as trusted */
+
+extern void key_revoke(struct key *key);
+extern void key_invalidate(struct key *key);
+extern void key_put(struct key *key);
+
+static inline struct key *__key_get(struct key *key)
+{
+ atomic_inc(&key->usage);
+ return key;
+}
+
+static inline struct key *key_get(struct key *key)
+{
+ return key ? __key_get(key) : key;
+}
+
+static inline void key_ref_put(key_ref_t key_ref)
+{
+ key_put(key_ref_to_ptr(key_ref));
+}
+
+extern struct key *request_key(struct key_type *type,
+ const char *description,
+ const char *callout_info);
+
+extern struct key *request_key_with_auxdata(struct key_type *type,
+ const char *description,
+ const void *callout_info,
+ size_t callout_len,
+ void *aux);
+
+extern struct key *request_key_async(struct key_type *type,
+ const char *description,
+ const void *callout_info,
+ size_t callout_len);
+
+extern struct key *request_key_async_with_auxdata(struct key_type *type,
+ const char *description,
+ const void *callout_info,
+ size_t callout_len,
+ void *aux);
+
+extern int wait_for_key_construction(struct key *key, bool intr);
+
+extern int key_validate(const struct key *key);
+
+extern key_ref_t key_create_or_update(key_ref_t keyring,
+ const char *type,
+ const char *description,
+ const void *payload,
+ size_t plen,
+ key_perm_t perm,
+ unsigned long flags);
+
+extern int key_update(key_ref_t key,
+ const void *payload,
+ size_t plen);
+
+extern int key_link(struct key *keyring,
+ struct key *key);
+
+extern int key_unlink(struct key *keyring,
+ struct key *key);
+
+extern struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
+ const struct cred *cred,
+ key_perm_t perm,
+ unsigned long flags,
+ struct key *dest);
+
+extern int keyring_clear(struct key *keyring);
+
+extern key_ref_t keyring_search(key_ref_t keyring,
+ struct key_type *type,
+ const char *description);
+
+extern int keyring_add_key(struct key *keyring,
+ struct key *key);
+
+extern struct key *key_lookup(key_serial_t id);
+
+static inline key_serial_t key_serial(const struct key *key)
+{
+ return key ? key->serial : 0;
+}
+
+extern void key_set_timeout(struct key *, unsigned);
+
+/*
+ * The permissions required on a key that we're looking up.
+ */
+#define KEY_NEED_VIEW 0x01 /* Require permission to view attributes */
+#define KEY_NEED_READ 0x02 /* Require permission to read content */
+#define KEY_NEED_WRITE 0x04 /* Require permission to update / modify */
+#define KEY_NEED_SEARCH 0x08 /* Require permission to search (keyring) or find (key) */
+#define KEY_NEED_LINK 0x10 /* Require permission to link */
+#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */
+#define KEY_NEED_ALL 0x3f /* All the above permissions */
+
+/**
+ * key_is_instantiated - Determine if a key has been positively instantiated
+ * @key: The key to check.
+ *
+ * Return true if the specified key has been positively instantiated, false
+ * otherwise.
+ */
+static inline bool key_is_instantiated(const struct key *key)
+{
+ return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
+ !test_bit(KEY_FLAG_NEGATIVE, &key->flags);
+}
+
+#define rcu_dereference_key(KEY) \
+ (rcu_dereference_protected((KEY)->payload.rcudata, \
+ rwsem_is_locked(&((struct key *)(KEY))->sem)))
+
+#define rcu_assign_keypointer(KEY, PAYLOAD) \
+do { \
+ rcu_assign_pointer((KEY)->payload.rcudata, (PAYLOAD)); \
+} while (0)
+
+#ifdef CONFIG_SYSCTL
+extern struct ctl_table key_sysctls[];
+#endif
+/*
+ * the userspace interface
+ */
+extern int install_thread_keyring_to_cred(struct cred *cred);
+extern void key_fsuid_changed(struct task_struct *tsk);
+extern void key_fsgid_changed(struct task_struct *tsk);
+extern void key_init(void);
+
+#else /* CONFIG_KEYS */
+
+#define key_validate(k) 0
+#define key_serial(k) 0
+#define key_get(k) ({ NULL; })
+#define key_revoke(k) do { } while(0)
+#define key_invalidate(k) do { } while(0)
+#define key_put(k) do { } while(0)
+#define key_ref_put(k) do { } while(0)
+#define make_key_ref(k, p) NULL
+#define key_ref_to_ptr(k) NULL
+#define is_key_possessed(k) 0
+#define key_fsuid_changed(t) do { } while(0)
+#define key_fsgid_changed(t) do { } while(0)
+#define key_init() do { } while(0)
+
+#endif /* CONFIG_KEYS */
+#endif /* __KERNEL__ */
+#endif /* _LINUX_KEY_H */
diff --git a/include/linux/keyboard.h b/include/linux/keyboard.h
new file mode 100644
index 000000000..131ed5146
--- /dev/null
+++ b/include/linux/keyboard.h
@@ -0,0 +1,20 @@
+#ifndef __LINUX_KEYBOARD_H
+#define __LINUX_KEYBOARD_H
+
+#include <uapi/linux/keyboard.h>
+
+struct notifier_block;
+extern unsigned short *key_maps[MAX_NR_KEYMAPS];
+extern unsigned short plain_map[NR_KEYS];
+
+struct keyboard_notifier_param {
+ struct vc_data *vc; /* VC on which the keyboard press was done */
+ int down; /* Pressure of the key? */
+ int shift; /* Current shift mask */
+ int ledstate; /* Current led state */
+ unsigned int value; /* keycode, unicode value or keysym */
+};
+
+extern int register_keyboard_notifier(struct notifier_block *nb);
+extern int unregister_keyboard_notifier(struct notifier_block *nb);
+#endif
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
new file mode 100644
index 000000000..473b43678
--- /dev/null
+++ b/include/linux/kfifo.h
@@ -0,0 +1,833 @@
+/*
+ * A generic kernel FIFO implementation
+ *
+ * Copyright (C) 2013 Stefani Seibold <stefani@seibold.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef _LINUX_KFIFO_H
+#define _LINUX_KFIFO_H
+
+/*
+ * How to porting drivers to the new generic FIFO API:
+ *
+ * - Modify the declaration of the "struct kfifo *" object into a
+ * in-place "struct kfifo" object
+ * - Init the in-place object with kfifo_alloc() or kfifo_init()
+ * Note: The address of the in-place "struct kfifo" object must be
+ * passed as the first argument to this functions
+ * - Replace the use of __kfifo_put into kfifo_in and __kfifo_get
+ * into kfifo_out
+ * - Replace the use of kfifo_put into kfifo_in_spinlocked and kfifo_get
+ * into kfifo_out_spinlocked
+ * Note: the spinlock pointer formerly passed to kfifo_init/kfifo_alloc
+ * must be passed now to the kfifo_in_spinlocked and kfifo_out_spinlocked
+ * as the last parameter
+ * - The formerly __kfifo_* functions are renamed into kfifo_*
+ */
+
+/*
+ * Note about locking : There is no locking required until only * one reader
+ * and one writer is using the fifo and no kfifo_reset() will be * called
+ * kfifo_reset_out() can be safely used, until it will be only called
+ * in the reader thread.
+ * For multiple writer and one reader there is only a need to lock the writer.
+ * And vice versa for only one writer and multiple reader there is only a need
+ * to lock the reader.
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/scatterlist.h>
+
+struct __kfifo {
+ unsigned int in;
+ unsigned int out;
+ unsigned int mask;
+ unsigned int esize;
+ void *data;
+};
+
+#define __STRUCT_KFIFO_COMMON(datatype, recsize, ptrtype) \
+ union { \
+ struct __kfifo kfifo; \
+ datatype *type; \
+ const datatype *const_type; \
+ char (*rectype)[recsize]; \
+ ptrtype *ptr; \
+ ptrtype const *ptr_const; \
+ }
+
+#define __STRUCT_KFIFO(type, size, recsize, ptrtype) \
+{ \
+ __STRUCT_KFIFO_COMMON(type, recsize, ptrtype); \
+ type buf[((size < 2) || (size & (size - 1))) ? -1 : size]; \
+}
+
+#define STRUCT_KFIFO(type, size) \
+ struct __STRUCT_KFIFO(type, size, 0, type)
+
+#define __STRUCT_KFIFO_PTR(type, recsize, ptrtype) \
+{ \
+ __STRUCT_KFIFO_COMMON(type, recsize, ptrtype); \
+ type buf[0]; \
+}
+
+#define STRUCT_KFIFO_PTR(type) \
+ struct __STRUCT_KFIFO_PTR(type, 0, type)
+
+/*
+ * define compatibility "struct kfifo" for dynamic allocated fifos
+ */
+struct kfifo __STRUCT_KFIFO_PTR(unsigned char, 0, void);
+
+#define STRUCT_KFIFO_REC_1(size) \
+ struct __STRUCT_KFIFO(unsigned char, size, 1, void)
+
+#define STRUCT_KFIFO_REC_2(size) \
+ struct __STRUCT_KFIFO(unsigned char, size, 2, void)
+
+/*
+ * define kfifo_rec types
+ */
+struct kfifo_rec_ptr_1 __STRUCT_KFIFO_PTR(unsigned char, 1, void);
+struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void);
+
+/*
+ * helper macro to distinguish between real in place fifo where the fifo
+ * array is a part of the structure and the fifo type where the array is
+ * outside of the fifo structure.
+ */
+#define __is_kfifo_ptr(fifo) (sizeof(*fifo) == sizeof(struct __kfifo))
+
+/**
+ * DECLARE_KFIFO_PTR - macro to declare a fifo pointer object
+ * @fifo: name of the declared fifo
+ * @type: type of the fifo elements
+ */
+#define DECLARE_KFIFO_PTR(fifo, type) STRUCT_KFIFO_PTR(type) fifo
+
+/**
+ * DECLARE_KFIFO - macro to declare a fifo object
+ * @fifo: name of the declared fifo
+ * @type: type of the fifo elements
+ * @size: the number of elements in the fifo, this must be a power of 2
+ */
+#define DECLARE_KFIFO(fifo, type, size) STRUCT_KFIFO(type, size) fifo
+
+/**
+ * INIT_KFIFO - Initialize a fifo declared by DECLARE_KFIFO
+ * @fifo: name of the declared fifo datatype
+ */
+#define INIT_KFIFO(fifo) \
+(void)({ \
+ typeof(&(fifo)) __tmp = &(fifo); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ __kfifo->in = 0; \
+ __kfifo->out = 0; \
+ __kfifo->mask = __is_kfifo_ptr(__tmp) ? 0 : ARRAY_SIZE(__tmp->buf) - 1;\
+ __kfifo->esize = sizeof(*__tmp->buf); \
+ __kfifo->data = __is_kfifo_ptr(__tmp) ? NULL : __tmp->buf; \
+})
+
+/**
+ * DEFINE_KFIFO - macro to define and initialize a fifo
+ * @fifo: name of the declared fifo datatype
+ * @type: type of the fifo elements
+ * @size: the number of elements in the fifo, this must be a power of 2
+ *
+ * Note: the macro can be used for global and local fifo data type variables.
+ */
+#define DEFINE_KFIFO(fifo, type, size) \
+ DECLARE_KFIFO(fifo, type, size) = \
+ (typeof(fifo)) { \
+ { \
+ { \
+ .in = 0, \
+ .out = 0, \
+ .mask = __is_kfifo_ptr(&(fifo)) ? \
+ 0 : \
+ ARRAY_SIZE((fifo).buf) - 1, \
+ .esize = sizeof(*(fifo).buf), \
+ .data = __is_kfifo_ptr(&(fifo)) ? \
+ NULL : \
+ (fifo).buf, \
+ } \
+ } \
+ }
+
+
+static inline unsigned int __must_check
+__kfifo_uint_must_check_helper(unsigned int val)
+{
+ return val;
+}
+
+static inline int __must_check
+__kfifo_int_must_check_helper(int val)
+{
+ return val;
+}
+
+/**
+ * kfifo_initialized - Check if the fifo is initialized
+ * @fifo: address of the fifo to check
+ *
+ * Return %true if fifo is initialized, otherwise %false.
+ * Assumes the fifo was 0 before.
+ */
+#define kfifo_initialized(fifo) ((fifo)->kfifo.mask)
+
+/**
+ * kfifo_esize - returns the size of the element managed by the fifo
+ * @fifo: address of the fifo to be used
+ */
+#define kfifo_esize(fifo) ((fifo)->kfifo.esize)
+
+/**
+ * kfifo_recsize - returns the size of the record length field
+ * @fifo: address of the fifo to be used
+ */
+#define kfifo_recsize(fifo) (sizeof(*(fifo)->rectype))
+
+/**
+ * kfifo_size - returns the size of the fifo in elements
+ * @fifo: address of the fifo to be used
+ */
+#define kfifo_size(fifo) ((fifo)->kfifo.mask + 1)
+
+/**
+ * kfifo_reset - removes the entire fifo content
+ * @fifo: address of the fifo to be used
+ *
+ * Note: usage of kfifo_reset() is dangerous. It should be only called when the
+ * fifo is exclusived locked or when it is secured that no other thread is
+ * accessing the fifo.
+ */
+#define kfifo_reset(fifo) \
+(void)({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ __tmp->kfifo.in = __tmp->kfifo.out = 0; \
+})
+
+/**
+ * kfifo_reset_out - skip fifo content
+ * @fifo: address of the fifo to be used
+ *
+ * Note: The usage of kfifo_reset_out() is safe until it will be only called
+ * from the reader thread and there is only one concurrent reader. Otherwise
+ * it is dangerous and must be handled in the same way as kfifo_reset().
+ */
+#define kfifo_reset_out(fifo) \
+(void)({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ __tmp->kfifo.out = __tmp->kfifo.in; \
+})
+
+/**
+ * kfifo_len - returns the number of used elements in the fifo
+ * @fifo: address of the fifo to be used
+ */
+#define kfifo_len(fifo) \
+({ \
+ typeof((fifo) + 1) __tmpl = (fifo); \
+ __tmpl->kfifo.in - __tmpl->kfifo.out; \
+})
+
+/**
+ * kfifo_is_empty - returns true if the fifo is empty
+ * @fifo: address of the fifo to be used
+ */
+#define kfifo_is_empty(fifo) \
+({ \
+ typeof((fifo) + 1) __tmpq = (fifo); \
+ __tmpq->kfifo.in == __tmpq->kfifo.out; \
+})
+
+/**
+ * kfifo_is_full - returns true if the fifo is full
+ * @fifo: address of the fifo to be used
+ */
+#define kfifo_is_full(fifo) \
+({ \
+ typeof((fifo) + 1) __tmpq = (fifo); \
+ kfifo_len(__tmpq) > __tmpq->kfifo.mask; \
+})
+
+/**
+ * kfifo_avail - returns the number of unused elements in the fifo
+ * @fifo: address of the fifo to be used
+ */
+#define kfifo_avail(fifo) \
+__kfifo_uint_must_check_helper( \
+({ \
+ typeof((fifo) + 1) __tmpq = (fifo); \
+ const size_t __recsize = sizeof(*__tmpq->rectype); \
+ unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \
+ (__recsize) ? ((__avail <= __recsize) ? 0 : \
+ __kfifo_max_r(__avail - __recsize, __recsize)) : \
+ __avail; \
+}) \
+)
+
+/**
+ * kfifo_skip - skip output data
+ * @fifo: address of the fifo to be used
+ */
+#define kfifo_skip(fifo) \
+(void)({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ const size_t __recsize = sizeof(*__tmp->rectype); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ if (__recsize) \
+ __kfifo_skip_r(__kfifo, __recsize); \
+ else \
+ __kfifo->out++; \
+})
+
+/**
+ * kfifo_peek_len - gets the size of the next fifo record
+ * @fifo: address of the fifo to be used
+ *
+ * This function returns the size of the next fifo record in number of bytes.
+ */
+#define kfifo_peek_len(fifo) \
+__kfifo_uint_must_check_helper( \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ const size_t __recsize = sizeof(*__tmp->rectype); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \
+ __kfifo_len_r(__kfifo, __recsize); \
+}) \
+)
+
+/**
+ * kfifo_alloc - dynamically allocates a new fifo buffer
+ * @fifo: pointer to the fifo
+ * @size: the number of elements in the fifo, this must be a power of 2
+ * @gfp_mask: get_free_pages mask, passed to kmalloc()
+ *
+ * This macro dynamically allocates a new fifo buffer.
+ *
+ * The numer of elements will be rounded-up to a power of 2.
+ * The fifo will be release with kfifo_free().
+ * Return 0 if no error, otherwise an error code.
+ */
+#define kfifo_alloc(fifo, size, gfp_mask) \
+__kfifo_int_must_check_helper( \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ __is_kfifo_ptr(__tmp) ? \
+ __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \
+ -EINVAL; \
+}) \
+)
+
+/**
+ * kfifo_free - frees the fifo
+ * @fifo: the fifo to be freed
+ */
+#define kfifo_free(fifo) \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ if (__is_kfifo_ptr(__tmp)) \
+ __kfifo_free(__kfifo); \
+})
+
+/**
+ * kfifo_init - initialize a fifo using a preallocated buffer
+ * @fifo: the fifo to assign the buffer
+ * @buffer: the preallocated buffer to be used
+ * @size: the size of the internal buffer, this have to be a power of 2
+ *
+ * This macro initialize a fifo using a preallocated buffer.
+ *
+ * The numer of elements will be rounded-up to a power of 2.
+ * Return 0 if no error, otherwise an error code.
+ */
+#define kfifo_init(fifo, buffer, size) \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ __is_kfifo_ptr(__tmp) ? \
+ __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \
+ -EINVAL; \
+})
+
+/**
+ * kfifo_put - put data into the fifo
+ * @fifo: address of the fifo to be used
+ * @val: the data to be added
+ *
+ * This macro copies the given value into the fifo.
+ * It returns 0 if the fifo was full. Otherwise it returns the number
+ * processed elements.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macro.
+ */
+#define kfifo_put(fifo, val) \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof(*__tmp->const_type) __val = (val); \
+ unsigned int __ret; \
+ size_t __recsize = sizeof(*__tmp->rectype); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ if (__recsize) \
+ __ret = __kfifo_in_r(__kfifo, &__val, sizeof(__val), \
+ __recsize); \
+ else { \
+ __ret = !kfifo_is_full(__tmp); \
+ if (__ret) { \
+ (__is_kfifo_ptr(__tmp) ? \
+ ((typeof(__tmp->type))__kfifo->data) : \
+ (__tmp->buf) \
+ )[__kfifo->in & __tmp->kfifo.mask] = \
+ (typeof(*__tmp->type))__val; \
+ smp_wmb(); \
+ __kfifo->in++; \
+ } \
+ } \
+ __ret; \
+})
+
+/**
+ * kfifo_get - get data from the fifo
+ * @fifo: address of the fifo to be used
+ * @val: address where to store the data
+ *
+ * This macro reads the data from the fifo.
+ * It returns 0 if the fifo was empty. Otherwise it returns the number
+ * processed elements.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macro.
+ */
+#define kfifo_get(fifo, val) \
+__kfifo_uint_must_check_helper( \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof(__tmp->ptr) __val = (val); \
+ unsigned int __ret; \
+ const size_t __recsize = sizeof(*__tmp->rectype); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ if (__recsize) \
+ __ret = __kfifo_out_r(__kfifo, __val, sizeof(*__val), \
+ __recsize); \
+ else { \
+ __ret = !kfifo_is_empty(__tmp); \
+ if (__ret) { \
+ *(typeof(__tmp->type))__val = \
+ (__is_kfifo_ptr(__tmp) ? \
+ ((typeof(__tmp->type))__kfifo->data) : \
+ (__tmp->buf) \
+ )[__kfifo->out & __tmp->kfifo.mask]; \
+ smp_wmb(); \
+ __kfifo->out++; \
+ } \
+ } \
+ __ret; \
+}) \
+)
+
+/**
+ * kfifo_peek - get data from the fifo without removing
+ * @fifo: address of the fifo to be used
+ * @val: address where to store the data
+ *
+ * This reads the data from the fifo without removing it from the fifo.
+ * It returns 0 if the fifo was empty. Otherwise it returns the number
+ * processed elements.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macro.
+ */
+#define kfifo_peek(fifo, val) \
+__kfifo_uint_must_check_helper( \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof(__tmp->ptr) __val = (val); \
+ unsigned int __ret; \
+ const size_t __recsize = sizeof(*__tmp->rectype); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ if (__recsize) \
+ __ret = __kfifo_out_peek_r(__kfifo, __val, sizeof(*__val), \
+ __recsize); \
+ else { \
+ __ret = !kfifo_is_empty(__tmp); \
+ if (__ret) { \
+ *(typeof(__tmp->type))__val = \
+ (__is_kfifo_ptr(__tmp) ? \
+ ((typeof(__tmp->type))__kfifo->data) : \
+ (__tmp->buf) \
+ )[__kfifo->out & __tmp->kfifo.mask]; \
+ smp_wmb(); \
+ } \
+ } \
+ __ret; \
+}) \
+)
+
+/**
+ * kfifo_in - put data into the fifo
+ * @fifo: address of the fifo to be used
+ * @buf: the data to be added
+ * @n: number of elements to be added
+ *
+ * This macro copies the given buffer into the fifo and returns the
+ * number of copied elements.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macro.
+ */
+#define kfifo_in(fifo, buf, n) \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof(__tmp->ptr_const) __buf = (buf); \
+ unsigned long __n = (n); \
+ const size_t __recsize = sizeof(*__tmp->rectype); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ (__recsize) ?\
+ __kfifo_in_r(__kfifo, __buf, __n, __recsize) : \
+ __kfifo_in(__kfifo, __buf, __n); \
+})
+
+/**
+ * kfifo_in_spinlocked - put data into the fifo using a spinlock for locking
+ * @fifo: address of the fifo to be used
+ * @buf: the data to be added
+ * @n: number of elements to be added
+ * @lock: pointer to the spinlock to use for locking
+ *
+ * This macro copies the given values buffer into the fifo and returns the
+ * number of copied elements.
+ */
+#define kfifo_in_spinlocked(fifo, buf, n, lock) \
+({ \
+ unsigned long __flags; \
+ unsigned int __ret; \
+ spin_lock_irqsave(lock, __flags); \
+ __ret = kfifo_in(fifo, buf, n); \
+ spin_unlock_irqrestore(lock, __flags); \
+ __ret; \
+})
+
+/* alias for kfifo_in_spinlocked, will be removed in a future release */
+#define kfifo_in_locked(fifo, buf, n, lock) \
+ kfifo_in_spinlocked(fifo, buf, n, lock)
+
+/**
+ * kfifo_out - get data from the fifo
+ * @fifo: address of the fifo to be used
+ * @buf: pointer to the storage buffer
+ * @n: max. number of elements to get
+ *
+ * This macro get some data from the fifo and return the numbers of elements
+ * copied.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macro.
+ */
+#define kfifo_out(fifo, buf, n) \
+__kfifo_uint_must_check_helper( \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof(__tmp->ptr) __buf = (buf); \
+ unsigned long __n = (n); \
+ const size_t __recsize = sizeof(*__tmp->rectype); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ (__recsize) ?\
+ __kfifo_out_r(__kfifo, __buf, __n, __recsize) : \
+ __kfifo_out(__kfifo, __buf, __n); \
+}) \
+)
+
+/**
+ * kfifo_out_spinlocked - get data from the fifo using a spinlock for locking
+ * @fifo: address of the fifo to be used
+ * @buf: pointer to the storage buffer
+ * @n: max. number of elements to get
+ * @lock: pointer to the spinlock to use for locking
+ *
+ * This macro get the data from the fifo and return the numbers of elements
+ * copied.
+ */
+#define kfifo_out_spinlocked(fifo, buf, n, lock) \
+__kfifo_uint_must_check_helper( \
+({ \
+ unsigned long __flags; \
+ unsigned int __ret; \
+ spin_lock_irqsave(lock, __flags); \
+ __ret = kfifo_out(fifo, buf, n); \
+ spin_unlock_irqrestore(lock, __flags); \
+ __ret; \
+}) \
+)
+
+/* alias for kfifo_out_spinlocked, will be removed in a future release */
+#define kfifo_out_locked(fifo, buf, n, lock) \
+ kfifo_out_spinlocked(fifo, buf, n, lock)
+
+/**
+ * kfifo_from_user - puts some data from user space into the fifo
+ * @fifo: address of the fifo to be used
+ * @from: pointer to the data to be added
+ * @len: the length of the data to be added
+ * @copied: pointer to output variable to store the number of copied bytes
+ *
+ * This macro copies at most @len bytes from the @from into the
+ * fifo, depending of the available space and returns -EFAULT/0.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macro.
+ */
+#define kfifo_from_user(fifo, from, len, copied) \
+__kfifo_uint_must_check_helper( \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ const void __user *__from = (from); \
+ unsigned int __len = (len); \
+ unsigned int *__copied = (copied); \
+ const size_t __recsize = sizeof(*__tmp->rectype); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ (__recsize) ? \
+ __kfifo_from_user_r(__kfifo, __from, __len, __copied, __recsize) : \
+ __kfifo_from_user(__kfifo, __from, __len, __copied); \
+}) \
+)
+
+/**
+ * kfifo_to_user - copies data from the fifo into user space
+ * @fifo: address of the fifo to be used
+ * @to: where the data must be copied
+ * @len: the size of the destination buffer
+ * @copied: pointer to output variable to store the number of copied bytes
+ *
+ * This macro copies at most @len bytes from the fifo into the
+ * @to buffer and returns -EFAULT/0.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macro.
+ */
+#define kfifo_to_user(fifo, to, len, copied) \
+__kfifo_uint_must_check_helper( \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ void __user *__to = (to); \
+ unsigned int __len = (len); \
+ unsigned int *__copied = (copied); \
+ const size_t __recsize = sizeof(*__tmp->rectype); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ (__recsize) ? \
+ __kfifo_to_user_r(__kfifo, __to, __len, __copied, __recsize) : \
+ __kfifo_to_user(__kfifo, __to, __len, __copied); \
+}) \
+)
+
+/**
+ * kfifo_dma_in_prepare - setup a scatterlist for DMA input
+ * @fifo: address of the fifo to be used
+ * @sgl: pointer to the scatterlist array
+ * @nents: number of entries in the scatterlist array
+ * @len: number of elements to transfer
+ *
+ * This macro fills a scatterlist for DMA input.
+ * It returns the number entries in the scatterlist array.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macros.
+ */
+#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ struct scatterlist *__sgl = (sgl); \
+ int __nents = (nents); \
+ unsigned int __len = (len); \
+ const size_t __recsize = sizeof(*__tmp->rectype); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ (__recsize) ? \
+ __kfifo_dma_in_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \
+ __kfifo_dma_in_prepare(__kfifo, __sgl, __nents, __len); \
+})
+
+/**
+ * kfifo_dma_in_finish - finish a DMA IN operation
+ * @fifo: address of the fifo to be used
+ * @len: number of bytes to received
+ *
+ * This macro finish a DMA IN operation. The in counter will be updated by
+ * the len parameter. No error checking will be done.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macros.
+ */
+#define kfifo_dma_in_finish(fifo, len) \
+(void)({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ unsigned int __len = (len); \
+ const size_t __recsize = sizeof(*__tmp->rectype); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ if (__recsize) \
+ __kfifo_dma_in_finish_r(__kfifo, __len, __recsize); \
+ else \
+ __kfifo->in += __len / sizeof(*__tmp->type); \
+})
+
+/**
+ * kfifo_dma_out_prepare - setup a scatterlist for DMA output
+ * @fifo: address of the fifo to be used
+ * @sgl: pointer to the scatterlist array
+ * @nents: number of entries in the scatterlist array
+ * @len: number of elements to transfer
+ *
+ * This macro fills a scatterlist for DMA output which at most @len bytes
+ * to transfer.
+ * It returns the number entries in the scatterlist array.
+ * A zero means there is no space available and the scatterlist is not filled.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macros.
+ */
+#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ struct scatterlist *__sgl = (sgl); \
+ int __nents = (nents); \
+ unsigned int __len = (len); \
+ const size_t __recsize = sizeof(*__tmp->rectype); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ (__recsize) ? \
+ __kfifo_dma_out_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \
+ __kfifo_dma_out_prepare(__kfifo, __sgl, __nents, __len); \
+})
+
+/**
+ * kfifo_dma_out_finish - finish a DMA OUT operation
+ * @fifo: address of the fifo to be used
+ * @len: number of bytes transferred
+ *
+ * This macro finish a DMA OUT operation. The out counter will be updated by
+ * the len parameter. No error checking will be done.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macros.
+ */
+#define kfifo_dma_out_finish(fifo, len) \
+(void)({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ unsigned int __len = (len); \
+ const size_t __recsize = sizeof(*__tmp->rectype); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ if (__recsize) \
+ __kfifo_dma_out_finish_r(__kfifo, __recsize); \
+ else \
+ __kfifo->out += __len / sizeof(*__tmp->type); \
+})
+
+/**
+ * kfifo_out_peek - gets some data from the fifo
+ * @fifo: address of the fifo to be used
+ * @buf: pointer to the storage buffer
+ * @n: max. number of elements to get
+ *
+ * This macro get the data from the fifo and return the numbers of elements
+ * copied. The data is not removed from the fifo.
+ *
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macro.
+ */
+#define kfifo_out_peek(fifo, buf, n) \
+__kfifo_uint_must_check_helper( \
+({ \
+ typeof((fifo) + 1) __tmp = (fifo); \
+ typeof(__tmp->ptr) __buf = (buf); \
+ unsigned long __n = (n); \
+ const size_t __recsize = sizeof(*__tmp->rectype); \
+ struct __kfifo *__kfifo = &__tmp->kfifo; \
+ (__recsize) ? \
+ __kfifo_out_peek_r(__kfifo, __buf, __n, __recsize) : \
+ __kfifo_out_peek(__kfifo, __buf, __n); \
+}) \
+)
+
+extern int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
+ size_t esize, gfp_t gfp_mask);
+
+extern void __kfifo_free(struct __kfifo *fifo);
+
+extern int __kfifo_init(struct __kfifo *fifo, void *buffer,
+ unsigned int size, size_t esize);
+
+extern unsigned int __kfifo_in(struct __kfifo *fifo,
+ const void *buf, unsigned int len);
+
+extern unsigned int __kfifo_out(struct __kfifo *fifo,
+ void *buf, unsigned int len);
+
+extern int __kfifo_from_user(struct __kfifo *fifo,
+ const void __user *from, unsigned long len, unsigned int *copied);
+
+extern int __kfifo_to_user(struct __kfifo *fifo,
+ void __user *to, unsigned long len, unsigned int *copied);
+
+extern unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo,
+ struct scatterlist *sgl, int nents, unsigned int len);
+
+extern unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo,
+ struct scatterlist *sgl, int nents, unsigned int len);
+
+extern unsigned int __kfifo_out_peek(struct __kfifo *fifo,
+ void *buf, unsigned int len);
+
+extern unsigned int __kfifo_in_r(struct __kfifo *fifo,
+ const void *buf, unsigned int len, size_t recsize);
+
+extern unsigned int __kfifo_out_r(struct __kfifo *fifo,
+ void *buf, unsigned int len, size_t recsize);
+
+extern int __kfifo_from_user_r(struct __kfifo *fifo,
+ const void __user *from, unsigned long len, unsigned int *copied,
+ size_t recsize);
+
+extern int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to,
+ unsigned long len, unsigned int *copied, size_t recsize);
+
+extern unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
+ struct scatterlist *sgl, int nents, unsigned int len, size_t recsize);
+
+extern void __kfifo_dma_in_finish_r(struct __kfifo *fifo,
+ unsigned int len, size_t recsize);
+
+extern unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
+ struct scatterlist *sgl, int nents, unsigned int len, size_t recsize);
+
+extern void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize);
+
+extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize);
+
+extern void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize);
+
+extern unsigned int __kfifo_out_peek_r(struct __kfifo *fifo,
+ void *buf, unsigned int len, size_t recsize);
+
+extern unsigned int __kfifo_max_r(unsigned int len, size_t recsize);
+
+#endif
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
new file mode 100644
index 000000000..e465bb159
--- /dev/null
+++ b/include/linux/kgdb.h
@@ -0,0 +1,327 @@
+/*
+ * This provides the callbacks and functions that KGDB needs to share between
+ * the core, I/O and arch-specific portions.
+ *
+ * Author: Amit Kale <amitkale@linsyssoft.com> and
+ * Tom Rini <trini@kernel.crashing.org>
+ *
+ * 2001-2004 (c) Amit S. Kale and 2003-2005 (c) MontaVista Software, Inc.
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#ifndef _KGDB_H_
+#define _KGDB_H_
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <linux/atomic.h>
+#ifdef CONFIG_HAVE_ARCH_KGDB
+#include <asm/kgdb.h>
+#endif
+
+#ifdef CONFIG_KGDB
+struct pt_regs;
+
+/**
+ * kgdb_skipexception - (optional) exit kgdb_handle_exception early
+ * @exception: Exception vector number
+ * @regs: Current &struct pt_regs.
+ *
+ * On some architectures it is required to skip a breakpoint
+ * exception when it occurs after a breakpoint has been removed.
+ * This can be implemented in the architecture specific portion of kgdb.
+ */
+extern int kgdb_skipexception(int exception, struct pt_regs *regs);
+
+struct tasklet_struct;
+struct task_struct;
+struct uart_port;
+
+/**
+ * kgdb_breakpoint - compiled in breakpoint
+ *
+ * This will be implemented as a static inline per architecture. This
+ * function is called by the kgdb core to execute an architecture
+ * specific trap to cause kgdb to enter the exception processing.
+ *
+ */
+void kgdb_breakpoint(void);
+
+extern int kgdb_connected;
+extern int kgdb_io_module_registered;
+
+extern atomic_t kgdb_setting_breakpoint;
+extern atomic_t kgdb_cpu_doing_single_step;
+
+extern struct task_struct *kgdb_usethread;
+extern struct task_struct *kgdb_contthread;
+
+enum kgdb_bptype {
+ BP_BREAKPOINT = 0,
+ BP_HARDWARE_BREAKPOINT,
+ BP_WRITE_WATCHPOINT,
+ BP_READ_WATCHPOINT,
+ BP_ACCESS_WATCHPOINT,
+ BP_POKE_BREAKPOINT,
+};
+
+enum kgdb_bpstate {
+ BP_UNDEFINED = 0,
+ BP_REMOVED,
+ BP_SET,
+ BP_ACTIVE
+};
+
+struct kgdb_bkpt {
+ unsigned long bpt_addr;
+ unsigned char saved_instr[BREAK_INSTR_SIZE];
+ enum kgdb_bptype type;
+ enum kgdb_bpstate state;
+};
+
+struct dbg_reg_def_t {
+ char *name;
+ int size;
+ int offset;
+};
+
+#ifndef DBG_MAX_REG_NUM
+#define DBG_MAX_REG_NUM 0
+#else
+extern struct dbg_reg_def_t dbg_reg_def[];
+extern char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs);
+extern int dbg_set_reg(int regno, void *mem, struct pt_regs *regs);
+#endif
+#ifndef KGDB_MAX_BREAKPOINTS
+# define KGDB_MAX_BREAKPOINTS 1000
+#endif
+
+#define KGDB_HW_BREAKPOINT 1
+
+/*
+ * Functions each KGDB-supporting architecture must provide:
+ */
+
+/**
+ * kgdb_arch_init - Perform any architecture specific initalization.
+ *
+ * This function will handle the initalization of any architecture
+ * specific callbacks.
+ */
+extern int kgdb_arch_init(void);
+
+/**
+ * kgdb_arch_exit - Perform any architecture specific uninitalization.
+ *
+ * This function will handle the uninitalization of any architecture
+ * specific callbacks, for dynamic registration and unregistration.
+ */
+extern void kgdb_arch_exit(void);
+
+/**
+ * pt_regs_to_gdb_regs - Convert ptrace regs to GDB regs
+ * @gdb_regs: A pointer to hold the registers in the order GDB wants.
+ * @regs: The &struct pt_regs of the current process.
+ *
+ * Convert the pt_regs in @regs into the format for registers that
+ * GDB expects, stored in @gdb_regs.
+ */
+extern void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs);
+
+/**
+ * sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs
+ * @gdb_regs: A pointer to hold the registers in the order GDB wants.
+ * @p: The &struct task_struct of the desired process.
+ *
+ * Convert the register values of the sleeping process in @p to
+ * the format that GDB expects.
+ * This function is called when kgdb does not have access to the
+ * &struct pt_regs and therefore it should fill the gdb registers
+ * @gdb_regs with what has been saved in &struct thread_struct
+ * thread field during switch_to.
+ */
+extern void
+sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p);
+
+/**
+ * gdb_regs_to_pt_regs - Convert GDB regs to ptrace regs.
+ * @gdb_regs: A pointer to hold the registers we've received from GDB.
+ * @regs: A pointer to a &struct pt_regs to hold these values in.
+ *
+ * Convert the GDB regs in @gdb_regs into the pt_regs, and store them
+ * in @regs.
+ */
+extern void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs);
+
+/**
+ * kgdb_arch_handle_exception - Handle architecture specific GDB packets.
+ * @vector: The error vector of the exception that happened.
+ * @signo: The signal number of the exception that happened.
+ * @err_code: The error code of the exception that happened.
+ * @remcom_in_buffer: The buffer of the packet we have read.
+ * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
+ * @regs: The &struct pt_regs of the current process.
+ *
+ * This function MUST handle the 'c' and 's' command packets,
+ * as well packets to set / remove a hardware breakpoint, if used.
+ * If there are additional packets which the hardware needs to handle,
+ * they are handled here. The code should return -1 if it wants to
+ * process more packets, and a %0 or %1 if it wants to exit from the
+ * kgdb callback.
+ */
+extern int
+kgdb_arch_handle_exception(int vector, int signo, int err_code,
+ char *remcom_in_buffer,
+ char *remcom_out_buffer,
+ struct pt_regs *regs);
+
+/**
+ * kgdb_roundup_cpus - Get other CPUs into a holding pattern
+ * @flags: Current IRQ state
+ *
+ * On SMP systems, we need to get the attention of the other CPUs
+ * and get them into a known state. This should do what is needed
+ * to get the other CPUs to call kgdb_wait(). Note that on some arches,
+ * the NMI approach is not used for rounding up all the CPUs. For example,
+ * in case of MIPS, smp_call_function() is used to roundup CPUs. In
+ * this case, we have to make sure that interrupts are enabled before
+ * calling smp_call_function(). The argument to this function is
+ * the flags that will be used when restoring the interrupts. There is
+ * local_irq_save() call before kgdb_roundup_cpus().
+ *
+ * On non-SMP systems, this is not called.
+ */
+extern void kgdb_roundup_cpus(unsigned long flags);
+
+/**
+ * kgdb_arch_set_pc - Generic call back to the program counter
+ * @regs: Current &struct pt_regs.
+ * @pc: The new value for the program counter
+ *
+ * This function handles updating the program counter and requires an
+ * architecture specific implementation.
+ */
+extern void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc);
+
+
+/* Optional functions. */
+extern int kgdb_validate_break_address(unsigned long addr);
+extern int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt);
+extern int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt);
+
+/**
+ * kgdb_arch_late - Perform any architecture specific initalization.
+ *
+ * This function will handle the late initalization of any
+ * architecture specific callbacks. This is an optional function for
+ * handling things like late initialization of hw breakpoints. The
+ * default implementation does nothing.
+ */
+extern void kgdb_arch_late(void);
+
+
+/**
+ * struct kgdb_arch - Describe architecture specific values.
+ * @gdb_bpt_instr: The instruction to trigger a breakpoint.
+ * @flags: Flags for the breakpoint, currently just %KGDB_HW_BREAKPOINT.
+ * @set_breakpoint: Allow an architecture to specify how to set a software
+ * breakpoint.
+ * @remove_breakpoint: Allow an architecture to specify how to remove a
+ * software breakpoint.
+ * @set_hw_breakpoint: Allow an architecture to specify how to set a hardware
+ * breakpoint.
+ * @remove_hw_breakpoint: Allow an architecture to specify how to remove a
+ * hardware breakpoint.
+ * @disable_hw_break: Allow an architecture to specify how to disable
+ * hardware breakpoints for a single cpu.
+ * @remove_all_hw_break: Allow an architecture to specify how to remove all
+ * hardware breakpoints.
+ * @correct_hw_break: Allow an architecture to specify how to correct the
+ * hardware debug registers.
+ * @enable_nmi: Manage NMI-triggered entry to KGDB
+ */
+struct kgdb_arch {
+ unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
+ unsigned long flags;
+
+ int (*set_breakpoint)(unsigned long, char *);
+ int (*remove_breakpoint)(unsigned long, char *);
+ int (*set_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
+ int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
+ void (*disable_hw_break)(struct pt_regs *regs);
+ void (*remove_all_hw_break)(void);
+ void (*correct_hw_break)(void);
+
+ void (*enable_nmi)(bool on);
+};
+
+/**
+ * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
+ * @name: Name of the I/O driver.
+ * @read_char: Pointer to a function that will return one char.
+ * @write_char: Pointer to a function that will write one char.
+ * @flush: Pointer to a function that will flush any pending writes.
+ * @init: Pointer to a function that will initialize the device.
+ * @pre_exception: Pointer to a function that will do any prep work for
+ * the I/O driver.
+ * @post_exception: Pointer to a function that will do any cleanup work
+ * for the I/O driver.
+ * @is_console: 1 if the end device is a console 0 if the I/O device is
+ * not a console
+ */
+struct kgdb_io {
+ const char *name;
+ int (*read_char) (void);
+ void (*write_char) (u8);
+ void (*flush) (void);
+ int (*init) (void);
+ void (*pre_exception) (void);
+ void (*post_exception) (void);
+ int is_console;
+};
+
+extern struct kgdb_arch arch_kgdb_ops;
+
+extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
+
+#ifdef CONFIG_SERIAL_KGDB_NMI
+extern int kgdb_register_nmi_console(void);
+extern int kgdb_unregister_nmi_console(void);
+extern bool kgdb_nmi_poll_knock(void);
+#else
+static inline int kgdb_register_nmi_console(void) { return 0; }
+static inline int kgdb_unregister_nmi_console(void) { return 0; }
+static inline bool kgdb_nmi_poll_knock(void) { return 1; }
+#endif
+
+extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
+extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
+extern struct kgdb_io *dbg_io_ops;
+
+extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
+extern char *kgdb_mem2hex(char *mem, char *buf, int count);
+extern int kgdb_hex2mem(char *buf, char *mem, int count);
+
+extern int kgdb_isremovedbreak(unsigned long addr);
+extern void kgdb_schedule_breakpoint(void);
+
+extern int
+kgdb_handle_exception(int ex_vector, int signo, int err_code,
+ struct pt_regs *regs);
+extern int kgdb_nmicallback(int cpu, void *regs);
+extern int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
+ atomic_t *snd_rdy);
+extern void gdbstub_exit(int status);
+
+extern int kgdb_single_step;
+extern atomic_t kgdb_active;
+#define in_dbg_master() \
+ (raw_smp_processor_id() == atomic_read(&kgdb_active))
+extern bool dbg_is_early;
+extern void __init dbg_late_init(void);
+#else /* ! CONFIG_KGDB */
+#define in_dbg_master() (0)
+#define dbg_late_init()
+#endif /* ! CONFIG_KGDB */
+#endif /* _KGDB_H_ */
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
new file mode 100644
index 000000000..eeb307985
--- /dev/null
+++ b/include/linux/khugepaged.h
@@ -0,0 +1,70 @@
+#ifndef _LINUX_KHUGEPAGED_H
+#define _LINUX_KHUGEPAGED_H
+
+#include <linux/sched.h> /* MMF_VM_HUGEPAGE */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern int __khugepaged_enter(struct mm_struct *mm);
+extern void __khugepaged_exit(struct mm_struct *mm);
+extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
+ unsigned long vm_flags);
+
+#define khugepaged_enabled() \
+ (transparent_hugepage_flags & \
+ ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
+ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
+#define khugepaged_always() \
+ (transparent_hugepage_flags & \
+ (1<<TRANSPARENT_HUGEPAGE_FLAG))
+#define khugepaged_req_madv() \
+ (transparent_hugepage_flags & \
+ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
+#define khugepaged_defrag() \
+ (transparent_hugepage_flags & \
+ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
+
+static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+ if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
+ return __khugepaged_enter(mm);
+ return 0;
+}
+
+static inline void khugepaged_exit(struct mm_struct *mm)
+{
+ if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
+ __khugepaged_exit(mm);
+}
+
+static inline int khugepaged_enter(struct vm_area_struct *vma,
+ unsigned long vm_flags)
+{
+ if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
+ if ((khugepaged_always() ||
+ (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
+ !(vm_flags & VM_NOHUGEPAGE))
+ if (__khugepaged_enter(vma->vm_mm))
+ return -ENOMEM;
+ return 0;
+}
+#else /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+ return 0;
+}
+static inline void khugepaged_exit(struct mm_struct *mm)
+{
+}
+static inline int khugepaged_enter(struct vm_area_struct *vma,
+ unsigned long vm_flags)
+{
+ return 0;
+}
+static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
+ unsigned long vm_flags)
+{
+ return 0;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#endif /* _LINUX_KHUGEPAGED_H */
diff --git a/include/linux/klist.h b/include/linux/klist.h
new file mode 100644
index 000000000..61e5b723a
--- /dev/null
+++ b/include/linux/klist.h
@@ -0,0 +1,68 @@
+/*
+ * klist.h - Some generic list helpers, extending struct list_head a bit.
+ *
+ * Implementations are found in lib/klist.c
+ *
+ *
+ * Copyright (C) 2005 Patrick Mochel
+ *
+ * This file is rleased under the GPL v2.
+ */
+
+#ifndef _LINUX_KLIST_H
+#define _LINUX_KLIST_H
+
+#include <linux/spinlock.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+
+struct klist_node;
+struct klist {
+ spinlock_t k_lock;
+ struct list_head k_list;
+ void (*get)(struct klist_node *);
+ void (*put)(struct klist_node *);
+} __attribute__ ((aligned (sizeof(void *))));
+
+#define KLIST_INIT(_name, _get, _put) \
+ { .k_lock = __SPIN_LOCK_UNLOCKED(_name.k_lock), \
+ .k_list = LIST_HEAD_INIT(_name.k_list), \
+ .get = _get, \
+ .put = _put, }
+
+#define DEFINE_KLIST(_name, _get, _put) \
+ struct klist _name = KLIST_INIT(_name, _get, _put)
+
+extern void klist_init(struct klist *k, void (*get)(struct klist_node *),
+ void (*put)(struct klist_node *));
+
+struct klist_node {
+ void *n_klist; /* never access directly */
+ struct list_head n_node;
+ struct kref n_ref;
+};
+
+extern void klist_add_tail(struct klist_node *n, struct klist *k);
+extern void klist_add_head(struct klist_node *n, struct klist *k);
+extern void klist_add_behind(struct klist_node *n, struct klist_node *pos);
+extern void klist_add_before(struct klist_node *n, struct klist_node *pos);
+
+extern void klist_del(struct klist_node *n);
+extern void klist_remove(struct klist_node *n);
+
+extern int klist_node_attached(struct klist_node *n);
+
+
+struct klist_iter {
+ struct klist *i_klist;
+ struct klist_node *i_cur;
+};
+
+
+extern void klist_iter_init(struct klist *k, struct klist_iter *i);
+extern void klist_iter_init_node(struct klist *k, struct klist_iter *i,
+ struct klist_node *n);
+extern void klist_iter_exit(struct klist_iter *i);
+extern struct klist_node *klist_next(struct klist_iter *i);
+
+#endif
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
new file mode 100644
index 000000000..39f845323
--- /dev/null
+++ b/include/linux/kmemcheck.h
@@ -0,0 +1,171 @@
+#ifndef LINUX_KMEMCHECK_H
+#define LINUX_KMEMCHECK_H
+
+#include <linux/mm_types.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_KMEMCHECK
+extern int kmemcheck_enabled;
+
+/* The slab-related functions. */
+void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node);
+void kmemcheck_free_shadow(struct page *page, int order);
+void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
+ size_t size);
+void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size);
+
+void kmemcheck_pagealloc_alloc(struct page *p, unsigned int order,
+ gfp_t gfpflags);
+
+void kmemcheck_show_pages(struct page *p, unsigned int n);
+void kmemcheck_hide_pages(struct page *p, unsigned int n);
+
+bool kmemcheck_page_is_tracked(struct page *p);
+
+void kmemcheck_mark_unallocated(void *address, unsigned int n);
+void kmemcheck_mark_uninitialized(void *address, unsigned int n);
+void kmemcheck_mark_initialized(void *address, unsigned int n);
+void kmemcheck_mark_freed(void *address, unsigned int n);
+
+void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n);
+void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n);
+void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n);
+
+int kmemcheck_show_addr(unsigned long address);
+int kmemcheck_hide_addr(unsigned long address);
+
+bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
+
+/*
+ * Bitfield annotations
+ *
+ * How to use: If you have a struct using bitfields, for example
+ *
+ * struct a {
+ * int x:8, y:8;
+ * };
+ *
+ * then this should be rewritten as
+ *
+ * struct a {
+ * kmemcheck_bitfield_begin(flags);
+ * int x:8, y:8;
+ * kmemcheck_bitfield_end(flags);
+ * };
+ *
+ * Now the "flags_begin" and "flags_end" members may be used to refer to the
+ * beginning and end, respectively, of the bitfield (and things like
+ * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
+ * fields should be annotated:
+ *
+ * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
+ * kmemcheck_annotate_bitfield(a, flags);
+ */
+#define kmemcheck_bitfield_begin(name) \
+ int name##_begin[0];
+
+#define kmemcheck_bitfield_end(name) \
+ int name##_end[0];
+
+#define kmemcheck_annotate_bitfield(ptr, name) \
+ do { \
+ int _n; \
+ \
+ if (!ptr) \
+ break; \
+ \
+ _n = (long) &((ptr)->name##_end) \
+ - (long) &((ptr)->name##_begin); \
+ BUILD_BUG_ON(_n < 0); \
+ \
+ kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
+ } while (0)
+
+#define kmemcheck_annotate_variable(var) \
+ do { \
+ kmemcheck_mark_initialized(&(var), sizeof(var)); \
+ } while (0) \
+
+#else
+#define kmemcheck_enabled 0
+
+static inline void
+kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
+{
+}
+
+static inline void
+kmemcheck_free_shadow(struct page *page, int order)
+{
+}
+
+static inline void
+kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
+ size_t size)
+{
+}
+
+static inline void kmemcheck_slab_free(struct kmem_cache *s, void *object,
+ size_t size)
+{
+}
+
+static inline void kmemcheck_pagealloc_alloc(struct page *p,
+ unsigned int order, gfp_t gfpflags)
+{
+}
+
+static inline bool kmemcheck_page_is_tracked(struct page *p)
+{
+ return false;
+}
+
+static inline void kmemcheck_mark_unallocated(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_uninitialized(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_initialized(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_freed(void *address, unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_unallocated_pages(struct page *p,
+ unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_uninitialized_pages(struct page *p,
+ unsigned int n)
+{
+}
+
+static inline void kmemcheck_mark_initialized_pages(struct page *p,
+ unsigned int n)
+{
+}
+
+static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
+{
+ return true;
+}
+
+#define kmemcheck_bitfield_begin(name)
+#define kmemcheck_bitfield_end(name)
+#define kmemcheck_annotate_bitfield(ptr, name) \
+ do { \
+ } while (0)
+
+#define kmemcheck_annotate_variable(var) \
+ do { \
+ } while (0)
+
+#endif /* CONFIG_KMEMCHECK */
+
+#endif /* LINUX_KMEMCHECK_H */
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
new file mode 100644
index 000000000..d0a1f99e2
--- /dev/null
+++ b/include/linux/kmemleak.h
@@ -0,0 +1,112 @@
+/*
+ * include/linux/kmemleak.h
+ *
+ * Copyright (C) 2008 ARM Limited
+ * Written by Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __KMEMLEAK_H
+#define __KMEMLEAK_H
+
+#include <linux/slab.h>
+
+#ifdef CONFIG_DEBUG_KMEMLEAK
+
+extern void kmemleak_init(void) __ref;
+extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
+ gfp_t gfp) __ref;
+extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
+ gfp_t gfp) __ref;
+extern void kmemleak_free(const void *ptr) __ref;
+extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
+extern void kmemleak_free_percpu(const void __percpu *ptr) __ref;
+extern void kmemleak_update_trace(const void *ptr) __ref;
+extern void kmemleak_not_leak(const void *ptr) __ref;
+extern void kmemleak_ignore(const void *ptr) __ref;
+extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
+extern void kmemleak_no_scan(const void *ptr) __ref;
+
+static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
+ int min_count, unsigned long flags,
+ gfp_t gfp)
+{
+ if (!(flags & SLAB_NOLEAKTRACE))
+ kmemleak_alloc(ptr, size, min_count, gfp);
+}
+
+static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
+{
+ if (!(flags & SLAB_NOLEAKTRACE))
+ kmemleak_free(ptr);
+}
+
+static inline void kmemleak_erase(void **ptr)
+{
+ *ptr = NULL;
+}
+
+#else
+
+static inline void kmemleak_init(void)
+{
+}
+static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
+ gfp_t gfp)
+{
+}
+static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
+ int min_count, unsigned long flags,
+ gfp_t gfp)
+{
+}
+static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
+ gfp_t gfp)
+{
+}
+static inline void kmemleak_free(const void *ptr)
+{
+}
+static inline void kmemleak_free_part(const void *ptr, size_t size)
+{
+}
+static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
+{
+}
+static inline void kmemleak_free_percpu(const void __percpu *ptr)
+{
+}
+static inline void kmemleak_update_trace(const void *ptr)
+{
+}
+static inline void kmemleak_not_leak(const void *ptr)
+{
+}
+static inline void kmemleak_ignore(const void *ptr)
+{
+}
+static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
+{
+}
+static inline void kmemleak_erase(void **ptr)
+{
+}
+static inline void kmemleak_no_scan(const void *ptr)
+{
+}
+
+#endif /* CONFIG_DEBUG_KMEMLEAK */
+
+#endif /* __KMEMLEAK_H */
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
new file mode 100644
index 000000000..0555cc66a
--- /dev/null
+++ b/include/linux/kmod.h
@@ -0,0 +1,107 @@
+#ifndef __LINUX_KMOD_H__
+#define __LINUX_KMOD_H__
+
+/*
+ * include/linux/kmod.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/gfp.h>
+#include <linux/stddef.h>
+#include <linux/errno.h>
+#include <linux/compiler.h>
+#include <linux/workqueue.h>
+#include <linux/sysctl.h>
+
+#define KMOD_PATH_LEN 256
+
+#ifdef CONFIG_MODULES
+extern char modprobe_path[]; /* for sysctl */
+/* modprobe exit status on success, -ve on error. Return value
+ * usually useless though. */
+extern __printf(2, 3)
+int __request_module(bool wait, const char *name, ...);
+#define request_module(mod...) __request_module(true, mod)
+#define request_module_nowait(mod...) __request_module(false, mod)
+#define try_then_request_module(x, mod...) \
+ ((x) ?: (__request_module(true, mod), (x)))
+#else
+static inline int request_module(const char *name, ...) { return -ENOSYS; }
+static inline int request_module_nowait(const char *name, ...) { return -ENOSYS; }
+#define try_then_request_module(x, mod...) (x)
+#endif
+
+
+struct cred;
+struct file;
+
+#define UMH_NO_WAIT 0 /* don't wait at all */
+#define UMH_WAIT_EXEC 1 /* wait for the exec, but not the process */
+#define UMH_WAIT_PROC 2 /* wait for the process to complete */
+#define UMH_KILLABLE 4 /* wait for EXEC/PROC killable */
+
+struct subprocess_info {
+ struct work_struct work;
+ struct completion *complete;
+ char *path;
+ char **argv;
+ char **envp;
+ int wait;
+ int retval;
+ int (*init)(struct subprocess_info *info, struct cred *new);
+ void (*cleanup)(struct subprocess_info *info);
+ void *data;
+};
+
+extern int
+call_usermodehelper(char *path, char **argv, char **envp, int wait);
+
+extern struct subprocess_info *
+call_usermodehelper_setup(char *path, char **argv, char **envp, gfp_t gfp_mask,
+ int (*init)(struct subprocess_info *info, struct cred *new),
+ void (*cleanup)(struct subprocess_info *), void *data);
+
+extern int
+call_usermodehelper_exec(struct subprocess_info *info, int wait);
+
+extern struct ctl_table usermodehelper_table[];
+
+enum umh_disable_depth {
+ UMH_ENABLED = 0,
+ UMH_FREEZING,
+ UMH_DISABLED,
+};
+
+extern void usermodehelper_init(void);
+
+extern int __usermodehelper_disable(enum umh_disable_depth depth);
+extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth);
+
+static inline int usermodehelper_disable(void)
+{
+ return __usermodehelper_disable(UMH_DISABLED);
+}
+
+static inline void usermodehelper_enable(void)
+{
+ __usermodehelper_set_disable_depth(UMH_ENABLED);
+}
+
+extern int usermodehelper_read_trylock(void);
+extern long usermodehelper_read_lock_wait(long timeout);
+extern void usermodehelper_read_unlock(void);
+
+#endif /* __LINUX_KMOD_H__ */
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
new file mode 100644
index 000000000..2e7a1e032
--- /dev/null
+++ b/include/linux/kmsg_dump.h
@@ -0,0 +1,117 @@
+/*
+ * linux/include/kmsg_dump.h
+ *
+ * Copyright (C) 2009 Net Insight AB
+ *
+ * Author: Simon Kagstrom <simon.kagstrom@netinsight.net>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+#ifndef _LINUX_KMSG_DUMP_H
+#define _LINUX_KMSG_DUMP_H
+
+#include <linux/errno.h>
+#include <linux/list.h>
+
+/*
+ * Keep this list arranged in rough order of priority. Anything listed after
+ * KMSG_DUMP_OOPS will not be logged by default unless printk.always_kmsg_dump
+ * is passed to the kernel.
+ */
+enum kmsg_dump_reason {
+ KMSG_DUMP_UNDEF,
+ KMSG_DUMP_PANIC,
+ KMSG_DUMP_OOPS,
+ KMSG_DUMP_EMERG,
+ KMSG_DUMP_RESTART,
+ KMSG_DUMP_HALT,
+ KMSG_DUMP_POWEROFF,
+};
+
+/**
+ * struct kmsg_dumper - kernel crash message dumper structure
+ * @list: Entry in the dumper list (private)
+ * @dump: Call into dumping code which will retrieve the data with
+ * through the record iterator
+ * @max_reason: filter for highest reason number that should be dumped
+ * @registered: Flag that specifies if this is already registered
+ */
+struct kmsg_dumper {
+ struct list_head list;
+ void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason);
+ enum kmsg_dump_reason max_reason;
+ bool active;
+ bool registered;
+
+ /* private state of the kmsg iterator */
+ u32 cur_idx;
+ u32 next_idx;
+ u64 cur_seq;
+ u64 next_seq;
+};
+
+#ifdef CONFIG_PRINTK
+void kmsg_dump(enum kmsg_dump_reason reason);
+
+bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
+ char *line, size_t size, size_t *len);
+
+bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+ char *line, size_t size, size_t *len);
+
+bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+ char *buf, size_t size, size_t *len);
+
+void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper);
+
+void kmsg_dump_rewind(struct kmsg_dumper *dumper);
+
+int kmsg_dump_register(struct kmsg_dumper *dumper);
+
+int kmsg_dump_unregister(struct kmsg_dumper *dumper);
+#else
+static inline void kmsg_dump(enum kmsg_dump_reason reason)
+{
+}
+
+static inline bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper,
+ bool syslog, const char *line,
+ size_t size, size_t *len)
+{
+ return false;
+}
+
+static inline bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+ const char *line, size_t size, size_t *len)
+{
+ return false;
+}
+
+static inline bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+ char *buf, size_t size, size_t *len)
+{
+ return false;
+}
+
+static inline void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
+{
+}
+
+static inline void kmsg_dump_rewind(struct kmsg_dumper *dumper)
+{
+}
+
+static inline int kmsg_dump_register(struct kmsg_dumper *dumper)
+{
+ return -EINVAL;
+}
+
+static inline int kmsg_dump_unregister(struct kmsg_dumper *dumper)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* _LINUX_KMSG_DUMP_H */
diff --git a/include/linux/kobj_map.h b/include/linux/kobj_map.h
new file mode 100644
index 000000000..18ca75ffc
--- /dev/null
+++ b/include/linux/kobj_map.h
@@ -0,0 +1,19 @@
+/*
+ * kobj_map.h
+ */
+
+#ifndef _KOBJ_MAP_H_
+#define _KOBJ_MAP_H_
+
+#include <linux/mutex.h>
+
+typedef struct kobject *kobj_probe_t(dev_t, int *, void *);
+struct kobj_map;
+
+int kobj_map(struct kobj_map *, dev_t, unsigned long, struct module *,
+ kobj_probe_t *, int (*)(dev_t, void *), void *);
+void kobj_unmap(struct kobj_map *, dev_t, unsigned long);
+struct kobject *kobj_lookup(struct kobj_map *, dev_t, int *);
+struct kobj_map *kobj_map_init(kobj_probe_t *, struct mutex *);
+
+#endif /* _KOBJ_MAP_H_ */
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
new file mode 100644
index 000000000..2d61b909f
--- /dev/null
+++ b/include/linux/kobject.h
@@ -0,0 +1,224 @@
+/*
+ * kobject.h - generic kernel object infrastructure.
+ *
+ * Copyright (c) 2002-2003 Patrick Mochel
+ * Copyright (c) 2002-2003 Open Source Development Labs
+ * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (c) 2006-2008 Novell Inc.
+ *
+ * This file is released under the GPLv2.
+ *
+ * Please read Documentation/kobject.txt before using the kobject
+ * interface, ESPECIALLY the parts about reference counts and object
+ * destructors.
+ */
+
+#ifndef _KOBJECT_H_
+#define _KOBJECT_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/sysfs.h>
+#include <linux/compiler.h>
+#include <linux/spinlock.h>
+#include <linux/kref.h>
+#include <linux/kobject_ns.h>
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/atomic.h>
+#include <linux/workqueue.h>
+
+#define UEVENT_HELPER_PATH_LEN 256
+#define UEVENT_NUM_ENVP 32 /* number of env pointers */
+#define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */
+
+#ifdef CONFIG_UEVENT_HELPER
+/* path to the userspace helper executed on an event */
+extern char uevent_helper[];
+#endif
+
+/* counter to tag the uevent, read only except for the kobject core */
+extern u64 uevent_seqnum;
+
+/*
+ * The actions here must match the index to the string array
+ * in lib/kobject_uevent.c
+ *
+ * Do not add new actions here without checking with the driver-core
+ * maintainers. Action strings are not meant to express subsystem
+ * or device specific properties. In most cases you want to send a
+ * kobject_uevent_env(kobj, KOBJ_CHANGE, env) with additional event
+ * specific variables added to the event environment.
+ */
+enum kobject_action {
+ KOBJ_ADD,
+ KOBJ_REMOVE,
+ KOBJ_CHANGE,
+ KOBJ_MOVE,
+ KOBJ_ONLINE,
+ KOBJ_OFFLINE,
+ KOBJ_MAX
+};
+
+struct kobject {
+ const char *name;
+ struct list_head entry;
+ struct kobject *parent;
+ struct kset *kset;
+ struct kobj_type *ktype;
+ struct kernfs_node *sd;
+ struct kref kref;
+#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
+ struct delayed_work release;
+#endif
+ unsigned int state_initialized:1;
+ unsigned int state_in_sysfs:1;
+ unsigned int state_add_uevent_sent:1;
+ unsigned int state_remove_uevent_sent:1;
+ unsigned int uevent_suppress:1;
+};
+
+extern __printf(2, 3)
+int kobject_set_name(struct kobject *kobj, const char *name, ...);
+extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
+ va_list vargs);
+
+static inline const char *kobject_name(const struct kobject *kobj)
+{
+ return kobj->name;
+}
+
+extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype);
+extern __printf(3, 4) __must_check
+int kobject_add(struct kobject *kobj, struct kobject *parent,
+ const char *fmt, ...);
+extern __printf(4, 5) __must_check
+int kobject_init_and_add(struct kobject *kobj,
+ struct kobj_type *ktype, struct kobject *parent,
+ const char *fmt, ...);
+
+extern void kobject_del(struct kobject *kobj);
+
+extern struct kobject * __must_check kobject_create(void);
+extern struct kobject * __must_check kobject_create_and_add(const char *name,
+ struct kobject *parent);
+
+extern int __must_check kobject_rename(struct kobject *, const char *new_name);
+extern int __must_check kobject_move(struct kobject *, struct kobject *);
+
+extern struct kobject *kobject_get(struct kobject *kobj);
+extern void kobject_put(struct kobject *kobj);
+
+extern const void *kobject_namespace(struct kobject *kobj);
+extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
+
+struct kobj_type {
+ void (*release)(struct kobject *kobj);
+ const struct sysfs_ops *sysfs_ops;
+ struct attribute **default_attrs;
+ const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
+ const void *(*namespace)(struct kobject *kobj);
+};
+
+struct kobj_uevent_env {
+ char *argv[3];
+ char *envp[UEVENT_NUM_ENVP];
+ int envp_idx;
+ char buf[UEVENT_BUFFER_SIZE];
+ int buflen;
+};
+
+struct kset_uevent_ops {
+ int (* const filter)(struct kset *kset, struct kobject *kobj);
+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
+ struct kobj_uevent_env *env);
+};
+
+struct kobj_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count);
+};
+
+extern const struct sysfs_ops kobj_sysfs_ops;
+
+struct sock;
+
+/**
+ * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
+ *
+ * A kset defines a group of kobjects. They can be individually
+ * different "types" but overall these kobjects all want to be grouped
+ * together and operated on in the same manner. ksets are used to
+ * define the attribute callbacks and other common events that happen to
+ * a kobject.
+ *
+ * @list: the list of all kobjects for this kset
+ * @list_lock: a lock for iterating over the kobjects
+ * @kobj: the embedded kobject for this kset (recursion, isn't it fun...)
+ * @uevent_ops: the set of uevent operations for this kset. These are
+ * called whenever a kobject has something happen to it so that the kset
+ * can add new environment variables, or filter out the uevents if so
+ * desired.
+ */
+struct kset {
+ struct list_head list;
+ spinlock_t list_lock;
+ struct kobject kobj;
+ const struct kset_uevent_ops *uevent_ops;
+};
+
+extern void kset_init(struct kset *kset);
+extern int __must_check kset_register(struct kset *kset);
+extern void kset_unregister(struct kset *kset);
+extern struct kset * __must_check kset_create_and_add(const char *name,
+ const struct kset_uevent_ops *u,
+ struct kobject *parent_kobj);
+
+static inline struct kset *to_kset(struct kobject *kobj)
+{
+ return kobj ? container_of(kobj, struct kset, kobj) : NULL;
+}
+
+static inline struct kset *kset_get(struct kset *k)
+{
+ return k ? to_kset(kobject_get(&k->kobj)) : NULL;
+}
+
+static inline void kset_put(struct kset *k)
+{
+ kobject_put(&k->kobj);
+}
+
+static inline struct kobj_type *get_ktype(struct kobject *kobj)
+{
+ return kobj->ktype;
+}
+
+extern struct kobject *kset_find_obj(struct kset *, const char *);
+
+/* The global /sys/kernel/ kobject for people to chain off of */
+extern struct kobject *kernel_kobj;
+/* The global /sys/kernel/mm/ kobject for people to chain off of */
+extern struct kobject *mm_kobj;
+/* The global /sys/hypervisor/ kobject for people to chain off of */
+extern struct kobject *hypervisor_kobj;
+/* The global /sys/power/ kobject for people to chain off of */
+extern struct kobject *power_kobj;
+/* The global /sys/firmware/ kobject for people to chain off of */
+extern struct kobject *firmware_kobj;
+
+int kobject_uevent(struct kobject *kobj, enum kobject_action action);
+int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
+ char *envp[]);
+
+__printf(2, 3)
+int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...);
+
+int kobject_action_type(const char *buf, size_t count,
+ enum kobject_action *type);
+
+#endif /* _KOBJECT_H_ */
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
new file mode 100644
index 000000000..df32d2508
--- /dev/null
+++ b/include/linux/kobject_ns.h
@@ -0,0 +1,60 @@
+/* Kernel object name space definitions
+ *
+ * Copyright (c) 2002-2003 Patrick Mochel
+ * Copyright (c) 2002-2003 Open Source Development Labs
+ * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (c) 2006-2008 Novell Inc.
+ *
+ * Split from kobject.h by David Howells (dhowells@redhat.com)
+ *
+ * This file is released under the GPLv2.
+ *
+ * Please read Documentation/kobject.txt before using the kobject
+ * interface, ESPECIALLY the parts about reference counts and object
+ * destructors.
+ */
+
+#ifndef _LINUX_KOBJECT_NS_H
+#define _LINUX_KOBJECT_NS_H
+
+struct sock;
+struct kobject;
+
+/*
+ * Namespace types which are used to tag kobjects and sysfs entries.
+ * Network namespace will likely be the first.
+ */
+enum kobj_ns_type {
+ KOBJ_NS_TYPE_NONE = 0,
+ KOBJ_NS_TYPE_NET,
+ KOBJ_NS_TYPES
+};
+
+/*
+ * Callbacks so sysfs can determine namespaces
+ * @grab_current_ns: return a new reference to calling task's namespace
+ * @netlink_ns: return namespace to which a sock belongs (right?)
+ * @initial_ns: return the initial namespace (i.e. init_net_ns)
+ * @drop_ns: drops a reference to namespace
+ */
+struct kobj_ns_type_operations {
+ enum kobj_ns_type type;
+ bool (*current_may_mount)(void);
+ void *(*grab_current_ns)(void);
+ const void *(*netlink_ns)(struct sock *sk);
+ const void *(*initial_ns)(void);
+ void (*drop_ns)(void *);
+};
+
+int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
+int kobj_ns_type_registered(enum kobj_ns_type type);
+const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
+const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
+
+bool kobj_ns_current_may_mount(enum kobj_ns_type type);
+void *kobj_ns_grab_current(enum kobj_ns_type type);
+const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
+const void *kobj_ns_initial(enum kobj_ns_type type);
+void kobj_ns_drop(enum kobj_ns_type type, void *ns);
+
+#endif /* _LINUX_KOBJECT_NS_H */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
new file mode 100644
index 000000000..1ab54754a
--- /dev/null
+++ b/include/linux/kprobes.h
@@ -0,0 +1,496 @@
+#ifndef _LINUX_KPROBES_H
+#define _LINUX_KPROBES_H
+/*
+ * Kernel Probes (KProbes)
+ * include/linux/kprobes.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ *
+ * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
+ * Probes initial implementation ( includes suggestions from
+ * Rusty Russell).
+ * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
+ * interface to access function arguments.
+ * 2005-May Hien Nguyen <hien@us.ibm.com> and Jim Keniston
+ * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
+ * <prasanna@in.ibm.com> added function-return probes.
+ */
+#include <linux/compiler.h> /* for __kprobes */
+#include <linux/linkage.h>
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <linux/smp.h>
+#include <linux/bug.h>
+#include <linux/percpu.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/mutex.h>
+#include <linux/ftrace.h>
+
+#ifdef CONFIG_KPROBES
+#include <asm/kprobes.h>
+
+/* kprobe_status settings */
+#define KPROBE_HIT_ACTIVE 0x00000001
+#define KPROBE_HIT_SS 0x00000002
+#define KPROBE_REENTER 0x00000004
+#define KPROBE_HIT_SSDONE 0x00000008
+
+#else /* CONFIG_KPROBES */
+typedef int kprobe_opcode_t;
+struct arch_specific_insn {
+ int dummy;
+};
+#endif /* CONFIG_KPROBES */
+
+struct kprobe;
+struct pt_regs;
+struct kretprobe;
+struct kretprobe_instance;
+typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
+typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *);
+typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
+ unsigned long flags);
+typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
+ int trapnr);
+typedef int (*kretprobe_handler_t) (struct kretprobe_instance *,
+ struct pt_regs *);
+
+struct kprobe {
+ struct hlist_node hlist;
+
+ /* list of kprobes for multi-handler support */
+ struct list_head list;
+
+ /*count the number of times this probe was temporarily disarmed */
+ unsigned long nmissed;
+
+ /* location of the probe point */
+ kprobe_opcode_t *addr;
+
+ /* Allow user to indicate symbol name of the probe point */
+ const char *symbol_name;
+
+ /* Offset into the symbol */
+ unsigned int offset;
+
+ /* Called before addr is executed. */
+ kprobe_pre_handler_t pre_handler;
+
+ /* Called after addr is executed, unless... */
+ kprobe_post_handler_t post_handler;
+
+ /*
+ * ... called if executing addr causes a fault (eg. page fault).
+ * Return 1 if it handled fault, otherwise kernel will see it.
+ */
+ kprobe_fault_handler_t fault_handler;
+
+ /*
+ * ... called if breakpoint trap occurs in probe handler.
+ * Return 1 if it handled break, otherwise kernel will see it.
+ */
+ kprobe_break_handler_t break_handler;
+
+ /* Saved opcode (which has been replaced with breakpoint) */
+ kprobe_opcode_t opcode;
+
+ /* copy of the original instruction */
+ struct arch_specific_insn ainsn;
+
+ /*
+ * Indicates various status flags.
+ * Protected by kprobe_mutex after this kprobe is registered.
+ */
+ u32 flags;
+};
+
+/* Kprobe status flags */
+#define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */
+#define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */
+#define KPROBE_FLAG_OPTIMIZED 4 /*
+ * probe is really optimized.
+ * NOTE:
+ * this flag is only for optimized_kprobe.
+ */
+#define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
+
+/* Has this kprobe gone ? */
+static inline int kprobe_gone(struct kprobe *p)
+{
+ return p->flags & KPROBE_FLAG_GONE;
+}
+
+/* Is this kprobe disabled ? */
+static inline int kprobe_disabled(struct kprobe *p)
+{
+ return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
+}
+
+/* Is this kprobe really running optimized path ? */
+static inline int kprobe_optimized(struct kprobe *p)
+{
+ return p->flags & KPROBE_FLAG_OPTIMIZED;
+}
+
+/* Is this kprobe uses ftrace ? */
+static inline int kprobe_ftrace(struct kprobe *p)
+{
+ return p->flags & KPROBE_FLAG_FTRACE;
+}
+
+/*
+ * Special probe type that uses setjmp-longjmp type tricks to resume
+ * execution at a specified entry with a matching prototype corresponding
+ * to the probed function - a trick to enable arguments to become
+ * accessible seamlessly by probe handling logic.
+ * Note:
+ * Because of the way compilers allocate stack space for local variables
+ * etc upfront, regardless of sub-scopes within a function, this mirroring
+ * principle currently works only for probes placed on function entry points.
+ */
+struct jprobe {
+ struct kprobe kp;
+ void *entry; /* probe handling code to jump to */
+};
+
+/* For backward compatibility with old code using JPROBE_ENTRY() */
+#define JPROBE_ENTRY(handler) (handler)
+
+/*
+ * Function-return probe -
+ * Note:
+ * User needs to provide a handler function, and initialize maxactive.
+ * maxactive - The maximum number of instances of the probed function that
+ * can be active concurrently.
+ * nmissed - tracks the number of times the probed function's return was
+ * ignored, due to maxactive being too low.
+ *
+ */
+struct kretprobe {
+ struct kprobe kp;
+ kretprobe_handler_t handler;
+ kretprobe_handler_t entry_handler;
+ int maxactive;
+ int nmissed;
+ size_t data_size;
+ struct hlist_head free_instances;
+ raw_spinlock_t lock;
+};
+
+struct kretprobe_instance {
+ struct hlist_node hlist;
+ struct kretprobe *rp;
+ kprobe_opcode_t *ret_addr;
+ struct task_struct *task;
+ char data[0];
+};
+
+struct kretprobe_blackpoint {
+ const char *name;
+ void *addr;
+};
+
+struct kprobe_blacklist_entry {
+ struct list_head list;
+ unsigned long start_addr;
+ unsigned long end_addr;
+};
+
+#ifdef CONFIG_KPROBES
+DECLARE_PER_CPU(struct kprobe *, current_kprobe);
+DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+/*
+ * For #ifdef avoidance:
+ */
+static inline int kprobes_built_in(void)
+{
+ return 1;
+}
+
+#ifdef CONFIG_KRETPROBES
+extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs);
+extern int arch_trampoline_kprobe(struct kprobe *p);
+#else /* CONFIG_KRETPROBES */
+static inline void arch_prepare_kretprobe(struct kretprobe *rp,
+ struct pt_regs *regs)
+{
+}
+static inline int arch_trampoline_kprobe(struct kprobe *p)
+{
+ return 0;
+}
+#endif /* CONFIG_KRETPROBES */
+
+extern struct kretprobe_blackpoint kretprobe_blacklist[];
+
+static inline void kretprobe_assert(struct kretprobe_instance *ri,
+ unsigned long orig_ret_address, unsigned long trampoline_address)
+{
+ if (!orig_ret_address || (orig_ret_address == trampoline_address)) {
+ printk("kretprobe BUG!: Processing kretprobe %p @ %p\n",
+ ri->rp, ri->rp->kp.addr);
+ BUG();
+ }
+}
+
+#ifdef CONFIG_KPROBES_SANITY_TEST
+extern int init_test_probes(void);
+#else
+static inline int init_test_probes(void)
+{
+ return 0;
+}
+#endif /* CONFIG_KPROBES_SANITY_TEST */
+
+extern int arch_prepare_kprobe(struct kprobe *p);
+extern void arch_arm_kprobe(struct kprobe *p);
+extern void arch_disarm_kprobe(struct kprobe *p);
+extern int arch_init_kprobes(void);
+extern void show_registers(struct pt_regs *regs);
+extern void kprobes_inc_nmissed_count(struct kprobe *p);
+extern bool arch_within_kprobe_blacklist(unsigned long addr);
+
+struct kprobe_insn_cache {
+ struct mutex mutex;
+ void *(*alloc)(void); /* allocate insn page */
+ void (*free)(void *); /* free insn page */
+ struct list_head pages; /* list of kprobe_insn_page */
+ size_t insn_size; /* size of instruction slot */
+ int nr_garbage;
+};
+
+extern kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c);
+extern void __free_insn_slot(struct kprobe_insn_cache *c,
+ kprobe_opcode_t *slot, int dirty);
+
+#define DEFINE_INSN_CACHE_OPS(__name) \
+extern struct kprobe_insn_cache kprobe_##__name##_slots; \
+ \
+static inline kprobe_opcode_t *get_##__name##_slot(void) \
+{ \
+ return __get_insn_slot(&kprobe_##__name##_slots); \
+} \
+ \
+static inline void free_##__name##_slot(kprobe_opcode_t *slot, int dirty)\
+{ \
+ __free_insn_slot(&kprobe_##__name##_slots, slot, dirty); \
+} \
+
+DEFINE_INSN_CACHE_OPS(insn);
+
+#ifdef CONFIG_OPTPROBES
+/*
+ * Internal structure for direct jump optimized probe
+ */
+struct optimized_kprobe {
+ struct kprobe kp;
+ struct list_head list; /* list for optimizing queue */
+ struct arch_optimized_insn optinsn;
+};
+
+/* Architecture dependent functions for direct jump optimization */
+extern int arch_prepared_optinsn(struct arch_optimized_insn *optinsn);
+extern int arch_check_optimized_kprobe(struct optimized_kprobe *op);
+extern int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
+ struct kprobe *orig);
+extern void arch_remove_optimized_kprobe(struct optimized_kprobe *op);
+extern void arch_optimize_kprobes(struct list_head *oplist);
+extern void arch_unoptimize_kprobes(struct list_head *oplist,
+ struct list_head *done_list);
+extern void arch_unoptimize_kprobe(struct optimized_kprobe *op);
+extern int arch_within_optimized_kprobe(struct optimized_kprobe *op,
+ unsigned long addr);
+
+extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
+
+DEFINE_INSN_CACHE_OPS(optinsn);
+
+#ifdef CONFIG_SYSCTL
+extern int sysctl_kprobes_optimization;
+extern int proc_kprobes_optimization_handler(struct ctl_table *table,
+ int write, void __user *buffer,
+ size_t *length, loff_t *ppos);
+#endif
+
+#endif /* CONFIG_OPTPROBES */
+#ifdef CONFIG_KPROBES_ON_FTRACE
+extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct pt_regs *regs);
+extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
+#endif
+
+int arch_check_ftrace_location(struct kprobe *p);
+
+/* Get the kprobe at this addr (if any) - called with preemption disabled */
+struct kprobe *get_kprobe(void *addr);
+void kretprobe_hash_lock(struct task_struct *tsk,
+ struct hlist_head **head, unsigned long *flags);
+void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags);
+struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk);
+
+/* kprobe_running() will just return the current_kprobe on this CPU */
+static inline struct kprobe *kprobe_running(void)
+{
+ return (__this_cpu_read(current_kprobe));
+}
+
+static inline void reset_current_kprobe(void)
+{
+ __this_cpu_write(current_kprobe, NULL);
+}
+
+static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
+{
+ return this_cpu_ptr(&kprobe_ctlblk);
+}
+
+int register_kprobe(struct kprobe *p);
+void unregister_kprobe(struct kprobe *p);
+int register_kprobes(struct kprobe **kps, int num);
+void unregister_kprobes(struct kprobe **kps, int num);
+int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
+int longjmp_break_handler(struct kprobe *, struct pt_regs *);
+int register_jprobe(struct jprobe *p);
+void unregister_jprobe(struct jprobe *p);
+int register_jprobes(struct jprobe **jps, int num);
+void unregister_jprobes(struct jprobe **jps, int num);
+void jprobe_return(void);
+unsigned long arch_deref_entry_point(void *);
+
+int register_kretprobe(struct kretprobe *rp);
+void unregister_kretprobe(struct kretprobe *rp);
+int register_kretprobes(struct kretprobe **rps, int num);
+void unregister_kretprobes(struct kretprobe **rps, int num);
+
+void kprobe_flush_task(struct task_struct *tk);
+void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
+
+int disable_kprobe(struct kprobe *kp);
+int enable_kprobe(struct kprobe *kp);
+
+void dump_kprobe(struct kprobe *kp);
+
+#else /* !CONFIG_KPROBES: */
+
+static inline int kprobes_built_in(void)
+{
+ return 0;
+}
+static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ return 0;
+}
+static inline struct kprobe *get_kprobe(void *addr)
+{
+ return NULL;
+}
+static inline struct kprobe *kprobe_running(void)
+{
+ return NULL;
+}
+static inline int register_kprobe(struct kprobe *p)
+{
+ return -ENOSYS;
+}
+static inline int register_kprobes(struct kprobe **kps, int num)
+{
+ return -ENOSYS;
+}
+static inline void unregister_kprobe(struct kprobe *p)
+{
+}
+static inline void unregister_kprobes(struct kprobe **kps, int num)
+{
+}
+static inline int register_jprobe(struct jprobe *p)
+{
+ return -ENOSYS;
+}
+static inline int register_jprobes(struct jprobe **jps, int num)
+{
+ return -ENOSYS;
+}
+static inline void unregister_jprobe(struct jprobe *p)
+{
+}
+static inline void unregister_jprobes(struct jprobe **jps, int num)
+{
+}
+static inline void jprobe_return(void)
+{
+}
+static inline int register_kretprobe(struct kretprobe *rp)
+{
+ return -ENOSYS;
+}
+static inline int register_kretprobes(struct kretprobe **rps, int num)
+{
+ return -ENOSYS;
+}
+static inline void unregister_kretprobe(struct kretprobe *rp)
+{
+}
+static inline void unregister_kretprobes(struct kretprobe **rps, int num)
+{
+}
+static inline void kprobe_flush_task(struct task_struct *tk)
+{
+}
+static inline int disable_kprobe(struct kprobe *kp)
+{
+ return -ENOSYS;
+}
+static inline int enable_kprobe(struct kprobe *kp)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_KPROBES */
+static inline int disable_kretprobe(struct kretprobe *rp)
+{
+ return disable_kprobe(&rp->kp);
+}
+static inline int enable_kretprobe(struct kretprobe *rp)
+{
+ return enable_kprobe(&rp->kp);
+}
+static inline int disable_jprobe(struct jprobe *jp)
+{
+ return disable_kprobe(&jp->kp);
+}
+static inline int enable_jprobe(struct jprobe *jp)
+{
+ return enable_kprobe(&jp->kp);
+}
+
+#ifdef CONFIG_KPROBES
+/*
+ * Blacklist ganerating macro. Specify functions which is not probed
+ * by using this macro.
+ */
+#define __NOKPROBE_SYMBOL(fname) \
+static unsigned long __used \
+ __attribute__((section("_kprobe_blacklist"))) \
+ _kbl_addr_##fname = (unsigned long)fname;
+#define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname)
+#else
+#define NOKPROBE_SYMBOL(fname)
+#endif
+
+#endif /* _LINUX_KPROBES_H */
diff --git a/include/linux/kref.h b/include/linux/kref.h
new file mode 100644
index 000000000..484604d18
--- /dev/null
+++ b/include/linux/kref.h
@@ -0,0 +1,171 @@
+/*
+ * kref.h - library routines for handling generic reference counted objects
+ *
+ * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2004 IBM Corp.
+ *
+ * based on kobject.h which was:
+ * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (C) 2002-2003 Open Source Development Labs
+ *
+ * This file is released under the GPLv2.
+ *
+ */
+
+#ifndef _KREF_H_
+#define _KREF_H_
+
+#include <linux/bug.h>
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+struct kref {
+ atomic_t refcount;
+};
+
+/**
+ * kref_init - initialize object.
+ * @kref: object in question.
+ */
+static inline void kref_init(struct kref *kref)
+{
+ atomic_set(&kref->refcount, 1);
+}
+
+/**
+ * kref_get - increment refcount for object.
+ * @kref: object.
+ */
+static inline void kref_get(struct kref *kref)
+{
+ /* If refcount was 0 before incrementing then we have a race
+ * condition when this kref is freeing by some other thread right now.
+ * In this case one should use kref_get_unless_zero()
+ */
+ WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2);
+}
+
+/**
+ * kref_sub - subtract a number of refcounts for object.
+ * @kref: object.
+ * @count: Number of recounts to subtract.
+ * @release: pointer to the function that will clean up the object when the
+ * last reference to the object is released.
+ * This pointer is required, and it is not acceptable to pass kfree
+ * in as this function. If the caller does pass kfree to this
+ * function, you will be publicly mocked mercilessly by the kref
+ * maintainer, and anyone else who happens to notice it. You have
+ * been warned.
+ *
+ * Subtract @count from the refcount, and if 0, call release().
+ * Return 1 if the object was removed, otherwise return 0. Beware, if this
+ * function returns 0, you still can not count on the kref from remaining in
+ * memory. Only use the return value if you want to see if the kref is now
+ * gone, not present.
+ */
+static inline int kref_sub(struct kref *kref, unsigned int count,
+ void (*release)(struct kref *kref))
+{
+ WARN_ON(release == NULL);
+
+ if (atomic_sub_and_test((int) count, &kref->refcount)) {
+ release(kref);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * kref_put - decrement refcount for object.
+ * @kref: object.
+ * @release: pointer to the function that will clean up the object when the
+ * last reference to the object is released.
+ * This pointer is required, and it is not acceptable to pass kfree
+ * in as this function. If the caller does pass kfree to this
+ * function, you will be publicly mocked mercilessly by the kref
+ * maintainer, and anyone else who happens to notice it. You have
+ * been warned.
+ *
+ * Decrement the refcount, and if 0, call release().
+ * Return 1 if the object was removed, otherwise return 0. Beware, if this
+ * function returns 0, you still can not count on the kref from remaining in
+ * memory. Only use the return value if you want to see if the kref is now
+ * gone, not present.
+ */
+static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
+{
+ return kref_sub(kref, 1, release);
+}
+
+/**
+ * kref_put_spinlock_irqsave - decrement refcount for object.
+ * @kref: object.
+ * @release: pointer to the function that will clean up the object when the
+ * last reference to the object is released.
+ * This pointer is required, and it is not acceptable to pass kfree
+ * in as this function.
+ * @lock: lock to take in release case
+ *
+ * Behaves identical to kref_put with one exception. If the reference count
+ * drops to zero, the lock will be taken atomically wrt dropping the reference
+ * count. The release function has to call spin_unlock() without _irqrestore.
+ */
+static inline int kref_put_spinlock_irqsave(struct kref *kref,
+ void (*release)(struct kref *kref),
+ spinlock_t *lock)
+{
+ unsigned long flags;
+
+ WARN_ON(release == NULL);
+ if (atomic_add_unless(&kref->refcount, -1, 1))
+ return 0;
+ spin_lock_irqsave(lock, flags);
+ if (atomic_dec_and_test(&kref->refcount)) {
+ release(kref);
+ local_irq_restore(flags);
+ return 1;
+ }
+ spin_unlock_irqrestore(lock, flags);
+ return 0;
+}
+
+static inline int kref_put_mutex(struct kref *kref,
+ void (*release)(struct kref *kref),
+ struct mutex *lock)
+{
+ WARN_ON(release == NULL);
+ if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
+ mutex_lock(lock);
+ if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
+ mutex_unlock(lock);
+ return 0;
+ }
+ release(kref);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * kref_get_unless_zero - Increment refcount for object unless it is zero.
+ * @kref: object.
+ *
+ * Return non-zero if the increment succeeded. Otherwise return 0.
+ *
+ * This function is intended to simplify locking around refcounting for
+ * objects that can be looked up from a lookup structure, and which are
+ * removed from that lookup structure in the object destructor.
+ * Operations on such objects require at least a read lock around
+ * lookup + kref_get, and a write lock around kref_put + remove from lookup
+ * structure. Furthermore, RCU implementations become extremely tricky.
+ * With a lookup followed by a kref_get_unless_zero *with return value check*
+ * locking in the kref_put path can be deferred to the actual removal from
+ * the lookup structure and RCU lookups become trivial.
+ */
+static inline int __must_check kref_get_unless_zero(struct kref *kref)
+{
+ return atomic_add_unless(&kref->refcount, 1, 0);
+}
+#endif /* _KREF_H_ */
diff --git a/include/linux/ks0108.h b/include/linux/ks0108.h
new file mode 100644
index 000000000..cb311798e
--- /dev/null
+++ b/include/linux/ks0108.h
@@ -0,0 +1,49 @@
+/*
+ * Filename: ks0108.h
+ * Version: 0.1.0
+ * Description: ks0108 LCD Controller driver header
+ * License: GPLv2
+ *
+ * Author: Copyright (C) Miguel Ojeda Sandonis
+ * Date: 2006-10-31
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef _KS0108_H_
+#define _KS0108_H_
+
+/* Write a byte to the data port */
+extern void ks0108_writedata(unsigned char byte);
+
+/* Write a byte to the control port */
+extern void ks0108_writecontrol(unsigned char byte);
+
+/* Set the controller's current display state (0..1) */
+extern void ks0108_displaystate(unsigned char state);
+
+/* Set the controller's current startline (0..63) */
+extern void ks0108_startline(unsigned char startline);
+
+/* Set the controller's current address (0..63) */
+extern void ks0108_address(unsigned char address);
+
+/* Set the controller's current page (0..7) */
+extern void ks0108_page(unsigned char page);
+
+/* Is the module inited? */
+extern unsigned char ks0108_isinited(void);
+
+#endif /* _KS0108_H_ */
diff --git a/include/linux/ks8842.h b/include/linux/ks8842.h
new file mode 100644
index 000000000..14ba44522
--- /dev/null
+++ b/include/linux/ks8842.h
@@ -0,0 +1,38 @@
+/*
+ * ks8842.h KS8842 platform data struct definition
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _LINUX_KS8842_H
+#define _LINUX_KS8842_H
+
+#include <linux/if_ether.h>
+
+/**
+ * struct ks8842_platform_data - Platform data of the KS8842 network driver
+ * @macaddr: The MAC address of the device, set to all 0:s to use the on in
+ * the chip.
+ * @rx_dma_channel: The DMA channel to use for RX, -1 for none.
+ * @tx_dma_channel: The DMA channel to use for TX, -1 for none.
+ *
+ */
+struct ks8842_platform_data {
+ u8 macaddr[ETH_ALEN];
+ int rx_dma_channel;
+ int tx_dma_channel;
+};
+
+#endif
diff --git a/include/linux/ks8851_mll.h b/include/linux/ks8851_mll.h
new file mode 100644
index 000000000..e9ccfb59e
--- /dev/null
+++ b/include/linux/ks8851_mll.h
@@ -0,0 +1,33 @@
+/*
+ * ks8861_mll platform data struct definition
+ * Copyright (c) 2012 BTicino S.p.A.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _LINUX_KS8851_MLL_H
+#define _LINUX_KS8851_MLL_H
+
+#include <linux/if_ether.h>
+
+/**
+ * struct ks8851_mll_platform_data - Platform data of the KS8851_MLL network driver
+ * @macaddr: The MAC address of the device, set to all 0:s to use the on in
+ * the chip.
+ */
+struct ks8851_mll_platform_data {
+ u8 mac_addr[ETH_ALEN];
+};
+
+#endif
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
new file mode 100644
index 000000000..06861d8ef
--- /dev/null
+++ b/include/linux/ksm.h
@@ -0,0 +1,123 @@
+#ifndef __LINUX_KSM_H
+#define __LINUX_KSM_H
+/*
+ * Memory merging support.
+ *
+ * This code enables dynamic sharing of identical pages found in different
+ * memory areas, even if they are not shared by fork().
+ */
+
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/rmap.h>
+#include <linux/sched.h>
+
+struct stable_node;
+struct mem_cgroup;
+
+#ifdef CONFIG_KSM
+int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, int advice, unsigned long *vm_flags);
+
+static inline struct stable_node *page_stable_node(struct page *page)
+{
+ return PageKsm(page) ? page_rmapping(page) : NULL;
+}
+
+static inline void set_page_stable_node(struct page *page,
+ struct stable_node *stable_node)
+{
+ page->mapping = (void *)stable_node +
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+}
+
+/*
+ * When do_swap_page() first faults in from swap what used to be a KSM page,
+ * no problem, it will be assigned to this vma's anon_vma; but thereafter,
+ * it might be faulted into a different anon_vma (or perhaps to a different
+ * offset in the same anon_vma). do_swap_page() cannot do all the locking
+ * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
+ * a copy, and leave remerging the pages to a later pass of ksmd.
+ *
+ * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
+ * but what if the vma was unmerged while the page was swapped out?
+ */
+struct page *ksm_might_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address);
+
+int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
+void ksm_migrate_page(struct page *newpage, struct page *oldpage);
+
+#ifdef CONFIG_KSM_LEGACY
+int __ksm_enter(struct mm_struct *mm);
+void __ksm_exit(struct mm_struct *mm);
+static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+ if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
+ return __ksm_enter(mm);
+ return 0;
+}
+
+static inline void ksm_exit(struct mm_struct *mm)
+{
+ if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
+ __ksm_exit(mm);
+}
+
+#elif defined(CONFIG_UKSM)
+static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+ return 0;
+}
+
+static inline void ksm_exit(struct mm_struct *mm)
+{
+}
+#endif /* !CONFIG_UKSM */
+
+#else /* !CONFIG_KSM */
+
+static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
+{
+ return 0;
+}
+
+static inline void ksm_exit(struct mm_struct *mm)
+{
+}
+
+#ifdef CONFIG_MMU
+static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, int advice, unsigned long *vm_flags)
+{
+ return 0;
+}
+
+static inline struct page *ksm_might_need_to_copy(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ return page;
+}
+
+static inline int page_referenced_ksm(struct page *page,
+ struct mem_cgroup *memcg, unsigned long *vm_flags)
+{
+ return 0;
+}
+
+static inline int rmap_walk_ksm(struct page *page,
+ struct rmap_walk_control *rwc)
+{
+ return 0;
+}
+
+static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
+{
+}
+#endif /* CONFIG_MMU */
+#endif /* !CONFIG_KSM */
+
+#include <linux/uksm.h>
+
+#endif /* __LINUX_KSM_H */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
new file mode 100644
index 000000000..13d55206c
--- /dev/null
+++ b/include/linux/kthread.h
@@ -0,0 +1,131 @@
+#ifndef _LINUX_KTHREAD_H
+#define _LINUX_KTHREAD_H
+/* Simple interface for creating and stopping kernel threads without mess. */
+#include <linux/err.h>
+#include <linux/sched.h>
+
+__printf(4, 5)
+struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
+ void *data,
+ int node,
+ const char namefmt[], ...);
+
+#define kthread_create(threadfn, data, namefmt, arg...) \
+ kthread_create_on_node(threadfn, data, -1, namefmt, ##arg)
+
+
+struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
+ void *data,
+ unsigned int cpu,
+ const char *namefmt);
+
+/**
+ * kthread_run - create and wake a thread.
+ * @threadfn: the function to run until signal_pending(current).
+ * @data: data ptr for @threadfn.
+ * @namefmt: printf-style name for the thread.
+ *
+ * Description: Convenient wrapper for kthread_create() followed by
+ * wake_up_process(). Returns the kthread or ERR_PTR(-ENOMEM).
+ */
+#define kthread_run(threadfn, data, namefmt, ...) \
+({ \
+ struct task_struct *__k \
+ = kthread_create(threadfn, data, namefmt, ## __VA_ARGS__); \
+ if (!IS_ERR(__k)) \
+ wake_up_process(__k); \
+ __k; \
+})
+
+void kthread_bind(struct task_struct *k, unsigned int cpu);
+int kthread_stop(struct task_struct *k);
+bool kthread_should_stop(void);
+bool kthread_should_park(void);
+bool kthread_freezable_should_stop(bool *was_frozen);
+void *kthread_data(struct task_struct *k);
+void *probe_kthread_data(struct task_struct *k);
+int kthread_park(struct task_struct *k);
+void kthread_unpark(struct task_struct *k);
+void kthread_parkme(void);
+
+int kthreadd(void *unused);
+extern struct task_struct *kthreadd_task;
+extern int tsk_fork_get_node(struct task_struct *tsk);
+
+/*
+ * Simple work processor based on kthread.
+ *
+ * This provides easier way to make use of kthreads. A kthread_work
+ * can be queued and flushed using queue/flush_kthread_work()
+ * respectively. Queued kthread_works are processed by a kthread
+ * running kthread_worker_fn().
+ */
+struct kthread_work;
+typedef void (*kthread_work_func_t)(struct kthread_work *work);
+
+struct kthread_worker {
+ spinlock_t lock;
+ struct list_head work_list;
+ struct task_struct *task;
+ struct kthread_work *current_work;
+};
+
+struct kthread_work {
+ struct list_head node;
+ kthread_work_func_t func;
+ struct kthread_worker *worker;
+};
+
+#define KTHREAD_WORKER_INIT(worker) { \
+ .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \
+ .work_list = LIST_HEAD_INIT((worker).work_list), \
+ }
+
+#define KTHREAD_WORK_INIT(work, fn) { \
+ .node = LIST_HEAD_INIT((work).node), \
+ .func = (fn), \
+ }
+
+#define DEFINE_KTHREAD_WORKER(worker) \
+ struct kthread_worker worker = KTHREAD_WORKER_INIT(worker)
+
+#define DEFINE_KTHREAD_WORK(work, fn) \
+ struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
+
+/*
+ * kthread_worker.lock needs its own lockdep class key when defined on
+ * stack with lockdep enabled. Use the following macros in such cases.
+ */
+#ifdef CONFIG_LOCKDEP
+# define KTHREAD_WORKER_INIT_ONSTACK(worker) \
+ ({ init_kthread_worker(&worker); worker; })
+# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \
+ struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
+#else
+# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
+#endif
+
+extern void __init_kthread_worker(struct kthread_worker *worker,
+ const char *name, struct lock_class_key *key);
+
+#define init_kthread_worker(worker) \
+ do { \
+ static struct lock_class_key __key; \
+ __init_kthread_worker((worker), "("#worker")->lock", &__key); \
+ } while (0)
+
+#define init_kthread_work(work, fn) \
+ do { \
+ memset((work), 0, sizeof(struct kthread_work)); \
+ INIT_LIST_HEAD(&(work)->node); \
+ (work)->func = (fn); \
+ } while (0)
+
+int kthread_worker_fn(void *worker_ptr);
+
+bool queue_kthread_work(struct kthread_worker *worker,
+ struct kthread_work *work);
+void flush_kthread_work(struct kthread_work *work);
+void flush_kthread_worker(struct kthread_worker *worker);
+
+#endif /* _LINUX_KTHREAD_H */
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
new file mode 100644
index 000000000..2b6a204bd
--- /dev/null
+++ b/include/linux/ktime.h
@@ -0,0 +1,299 @@
+/*
+ * include/linux/ktime.h
+ *
+ * ktime_t - nanosecond-resolution time format.
+ *
+ * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar
+ *
+ * data type definitions, declarations, prototypes and macros.
+ *
+ * Started by: Thomas Gleixner and Ingo Molnar
+ *
+ * Credits:
+ *
+ * Roman Zippel provided the ideas and primary code snippets of
+ * the ktime_t union and further simplifications of the original
+ * code.
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+#ifndef _LINUX_KTIME_H
+#define _LINUX_KTIME_H
+
+#include <linux/time.h>
+#include <linux/jiffies.h>
+
+/*
+ * ktime_t:
+ *
+ * A single 64-bit variable is used to store the hrtimers
+ * internal representation of time values in scalar nanoseconds. The
+ * design plays out best on 64-bit CPUs, where most conversions are
+ * NOPs and most arithmetic ktime_t operations are plain arithmetic
+ * operations.
+ *
+ */
+union ktime {
+ s64 tv64;
+};
+
+typedef union ktime ktime_t; /* Kill this */
+
+/**
+ * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
+ * @secs: seconds to set
+ * @nsecs: nanoseconds to set
+ *
+ * Return: The ktime_t representation of the value.
+ */
+static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
+{
+ if (unlikely(secs >= KTIME_SEC_MAX))
+ return (ktime_t){ .tv64 = KTIME_MAX };
+
+ return (ktime_t) { .tv64 = secs * NSEC_PER_SEC + (s64)nsecs };
+}
+
+/* Subtract two ktime_t variables. rem = lhs -rhs: */
+#define ktime_sub(lhs, rhs) \
+ ({ (ktime_t){ .tv64 = (lhs).tv64 - (rhs).tv64 }; })
+
+/* Add two ktime_t variables. res = lhs + rhs: */
+#define ktime_add(lhs, rhs) \
+ ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; })
+
+/*
+ * Add a ktime_t variable and a scalar nanosecond value.
+ * res = kt + nsval:
+ */
+#define ktime_add_ns(kt, nsval) \
+ ({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; })
+
+/*
+ * Subtract a scalar nanosecod from a ktime_t variable
+ * res = kt - nsval:
+ */
+#define ktime_sub_ns(kt, nsval) \
+ ({ (ktime_t){ .tv64 = (kt).tv64 - (nsval) }; })
+
+/* convert a timespec to ktime_t format: */
+static inline ktime_t timespec_to_ktime(struct timespec ts)
+{
+ return ktime_set(ts.tv_sec, ts.tv_nsec);
+}
+
+/* convert a timespec64 to ktime_t format: */
+static inline ktime_t timespec64_to_ktime(struct timespec64 ts)
+{
+ return ktime_set(ts.tv_sec, ts.tv_nsec);
+}
+
+/* convert a timeval to ktime_t format: */
+static inline ktime_t timeval_to_ktime(struct timeval tv)
+{
+ return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
+}
+
+/* Map the ktime_t to timespec conversion to ns_to_timespec function */
+#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64)
+
+/* Map the ktime_t to timespec conversion to ns_to_timespec function */
+#define ktime_to_timespec64(kt) ns_to_timespec64((kt).tv64)
+
+/* Map the ktime_t to timeval conversion to ns_to_timeval function */
+#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64)
+
+/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
+#define ktime_to_ns(kt) ((kt).tv64)
+
+
+/**
+ * ktime_equal - Compares two ktime_t variables to see if they are equal
+ * @cmp1: comparable1
+ * @cmp2: comparable2
+ *
+ * Compare two ktime_t variables.
+ *
+ * Return: 1 if equal.
+ */
+static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2)
+{
+ return cmp1.tv64 == cmp2.tv64;
+}
+
+/**
+ * ktime_compare - Compares two ktime_t variables for less, greater or equal
+ * @cmp1: comparable1
+ * @cmp2: comparable2
+ *
+ * Return: ...
+ * cmp1 < cmp2: return <0
+ * cmp1 == cmp2: return 0
+ * cmp1 > cmp2: return >0
+ */
+static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2)
+{
+ if (cmp1.tv64 < cmp2.tv64)
+ return -1;
+ if (cmp1.tv64 > cmp2.tv64)
+ return 1;
+ return 0;
+}
+
+/**
+ * ktime_after - Compare if a ktime_t value is bigger than another one.
+ * @cmp1: comparable1
+ * @cmp2: comparable2
+ *
+ * Return: true if cmp1 happened after cmp2.
+ */
+static inline bool ktime_after(const ktime_t cmp1, const ktime_t cmp2)
+{
+ return ktime_compare(cmp1, cmp2) > 0;
+}
+
+/**
+ * ktime_before - Compare if a ktime_t value is smaller than another one.
+ * @cmp1: comparable1
+ * @cmp2: comparable2
+ *
+ * Return: true if cmp1 happened before cmp2.
+ */
+static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
+{
+ return ktime_compare(cmp1, cmp2) < 0;
+}
+
+#if BITS_PER_LONG < 64
+extern s64 __ktime_divns(const ktime_t kt, s64 div);
+static inline s64 ktime_divns(const ktime_t kt, s64 div)
+{
+ /*
+ * Negative divisors could cause an inf loop,
+ * so bug out here.
+ */
+ BUG_ON(div < 0);
+ if (__builtin_constant_p(div) && !(div >> 32)) {
+ s64 ns = kt.tv64;
+ u64 tmp = ns < 0 ? -ns : ns;
+
+ do_div(tmp, div);
+ return ns < 0 ? -tmp : tmp;
+ } else {
+ return __ktime_divns(kt, div);
+ }
+}
+#else /* BITS_PER_LONG < 64 */
+static inline s64 ktime_divns(const ktime_t kt, s64 div)
+{
+ /*
+ * 32-bit implementation cannot handle negative divisors,
+ * so catch them on 64bit as well.
+ */
+ WARN_ON(div < 0);
+ return kt.tv64 / div;
+}
+#endif
+
+static inline s64 ktime_to_us(const ktime_t kt)
+{
+ return ktime_divns(kt, NSEC_PER_USEC);
+}
+
+static inline s64 ktime_to_ms(const ktime_t kt)
+{
+ return ktime_divns(kt, NSEC_PER_MSEC);
+}
+
+static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
+{
+ return ktime_to_us(ktime_sub(later, earlier));
+}
+
+static inline s64 ktime_ms_delta(const ktime_t later, const ktime_t earlier)
+{
+ return ktime_to_ms(ktime_sub(later, earlier));
+}
+
+static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
+{
+ return ktime_add_ns(kt, usec * NSEC_PER_USEC);
+}
+
+static inline ktime_t ktime_add_ms(const ktime_t kt, const u64 msec)
+{
+ return ktime_add_ns(kt, msec * NSEC_PER_MSEC);
+}
+
+static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
+{
+ return ktime_sub_ns(kt, usec * NSEC_PER_USEC);
+}
+
+extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
+
+/**
+ * ktime_to_timespec_cond - convert a ktime_t variable to timespec
+ * format only if the variable contains data
+ * @kt: the ktime_t variable to convert
+ * @ts: the timespec variable to store the result in
+ *
+ * Return: %true if there was a successful conversion, %false if kt was 0.
+ */
+static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
+ struct timespec *ts)
+{
+ if (kt.tv64) {
+ *ts = ktime_to_timespec(kt);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/**
+ * ktime_to_timespec64_cond - convert a ktime_t variable to timespec64
+ * format only if the variable contains data
+ * @kt: the ktime_t variable to convert
+ * @ts: the timespec variable to store the result in
+ *
+ * Return: %true if there was a successful conversion, %false if kt was 0.
+ */
+static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
+ struct timespec64 *ts)
+{
+ if (kt.tv64) {
+ *ts = ktime_to_timespec64(kt);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/*
+ * The resolution of the clocks. The resolution value is returned in
+ * the clock_getres() system call to give application programmers an
+ * idea of the (in)accuracy of timers. Timer values are rounded up to
+ * this resolution values.
+ */
+#define LOW_RES_NSEC TICK_NSEC
+#define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC }
+
+static inline ktime_t ns_to_ktime(u64 ns)
+{
+ static const ktime_t ktime_zero = { .tv64 = 0 };
+
+ return ktime_add_ns(ktime_zero, ns);
+}
+
+static inline ktime_t ms_to_ktime(u64 ms)
+{
+ static const ktime_t ktime_zero = { .tv64 = 0 };
+
+ return ktime_add_ms(ktime_zero, ms);
+}
+
+# include <linux/timekeeping.h>
+
+#endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
new file mode 100644
index 000000000..ad4505430
--- /dev/null
+++ b/include/linux/kvm_host.h
@@ -0,0 +1,1083 @@
+#ifndef __KVM_HOST_H
+#define __KVM_HOST_H
+
+/*
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <linux/types.h>
+#include <linux/hardirq.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/bug.h>
+#include <linux/mm.h>
+#include <linux/mmu_notifier.h>
+#include <linux/preempt.h>
+#include <linux/msi.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+#include <linux/ratelimit.h>
+#include <linux/err.h>
+#include <linux/irqflags.h>
+#include <linux/context_tracking.h>
+#include <asm/signal.h>
+
+#include <linux/kvm.h>
+#include <linux/kvm_para.h>
+
+#include <linux/kvm_types.h>
+
+#include <asm/kvm_host.h>
+
+/*
+ * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
+ * in kvm, other bits are visible for userspace which are defined in
+ * include/linux/kvm_h.
+ */
+#define KVM_MEMSLOT_INVALID (1UL << 16)
+#define KVM_MEMSLOT_INCOHERENT (1UL << 17)
+
+/* Two fragments for cross MMIO pages. */
+#define KVM_MAX_MMIO_FRAGMENTS 2
+
+/*
+ * For the normal pfn, the highest 12 bits should be zero,
+ * so we can mask bit 62 ~ bit 52 to indicate the error pfn,
+ * mask bit 63 to indicate the noslot pfn.
+ */
+#define KVM_PFN_ERR_MASK (0x7ffULL << 52)
+#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
+#define KVM_PFN_NOSLOT (0x1ULL << 63)
+
+#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
+#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
+#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
+
+/*
+ * error pfns indicate that the gfn is in slot but faild to
+ * translate it to pfn on host.
+ */
+static inline bool is_error_pfn(pfn_t pfn)
+{
+ return !!(pfn & KVM_PFN_ERR_MASK);
+}
+
+/*
+ * error_noslot pfns indicate that the gfn can not be
+ * translated to pfn - it is not in slot or failed to
+ * translate it to pfn.
+ */
+static inline bool is_error_noslot_pfn(pfn_t pfn)
+{
+ return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
+}
+
+/* noslot pfn indicates that the gfn is not in slot. */
+static inline bool is_noslot_pfn(pfn_t pfn)
+{
+ return pfn == KVM_PFN_NOSLOT;
+}
+
+/*
+ * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
+ * provide own defines and kvm_is_error_hva
+ */
+#ifndef KVM_HVA_ERR_BAD
+
+#define KVM_HVA_ERR_BAD (PAGE_OFFSET)
+#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
+
+static inline bool kvm_is_error_hva(unsigned long addr)
+{
+ return addr >= PAGE_OFFSET;
+}
+
+#endif
+
+#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
+
+static inline bool is_error_page(struct page *page)
+{
+ return IS_ERR(page);
+}
+
+/*
+ * vcpu->requests bit members
+ */
+#define KVM_REQ_TLB_FLUSH 0
+#define KVM_REQ_MIGRATE_TIMER 1
+#define KVM_REQ_REPORT_TPR_ACCESS 2
+#define KVM_REQ_MMU_RELOAD 3
+#define KVM_REQ_TRIPLE_FAULT 4
+#define KVM_REQ_PENDING_TIMER 5
+#define KVM_REQ_UNHALT 6
+#define KVM_REQ_MMU_SYNC 7
+#define KVM_REQ_CLOCK_UPDATE 8
+#define KVM_REQ_KICK 9
+#define KVM_REQ_DEACTIVATE_FPU 10
+#define KVM_REQ_EVENT 11
+#define KVM_REQ_APF_HALT 12
+#define KVM_REQ_STEAL_UPDATE 13
+#define KVM_REQ_NMI 14
+#define KVM_REQ_PMU 15
+#define KVM_REQ_PMI 16
+#define KVM_REQ_WATCHDOG 17
+#define KVM_REQ_MASTERCLOCK_UPDATE 18
+#define KVM_REQ_MCLOCK_INPROGRESS 19
+#define KVM_REQ_EPR_EXIT 20
+#define KVM_REQ_SCAN_IOAPIC 21
+#define KVM_REQ_GLOBAL_CLOCK_UPDATE 22
+#define KVM_REQ_ENABLE_IBS 23
+#define KVM_REQ_DISABLE_IBS 24
+#define KVM_REQ_APIC_PAGE_RELOAD 25
+
+#define KVM_USERSPACE_IRQ_SOURCE_ID 0
+#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
+
+extern struct kmem_cache *kvm_vcpu_cache;
+
+extern spinlock_t kvm_lock;
+extern struct list_head vm_list;
+
+struct kvm_io_range {
+ gpa_t addr;
+ int len;
+ struct kvm_io_device *dev;
+};
+
+#define NR_IOBUS_DEVS 1000
+
+struct kvm_io_bus {
+ int dev_count;
+ int ioeventfd_count;
+ struct kvm_io_range range[];
+};
+
+enum kvm_bus {
+ KVM_MMIO_BUS,
+ KVM_PIO_BUS,
+ KVM_VIRTIO_CCW_NOTIFY_BUS,
+ KVM_FAST_MMIO_BUS,
+ KVM_NR_BUSES
+};
+
+int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
+ int len, const void *val);
+int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
+ gpa_t addr, int len, const void *val, long cookie);
+int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
+ int len, void *val);
+int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ int len, struct kvm_io_device *dev);
+int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev);
+
+#ifdef CONFIG_KVM_ASYNC_PF
+struct kvm_async_pf {
+ struct work_struct work;
+ struct list_head link;
+ struct list_head queue;
+ struct kvm_vcpu *vcpu;
+ struct mm_struct *mm;
+ gva_t gva;
+ unsigned long addr;
+ struct kvm_arch_async_pf arch;
+ bool wakeup_all;
+};
+
+void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
+void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
+int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
+ struct kvm_arch_async_pf *arch);
+int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
+#endif
+
+enum {
+ OUTSIDE_GUEST_MODE,
+ IN_GUEST_MODE,
+ EXITING_GUEST_MODE,
+ READING_SHADOW_PAGE_TABLES,
+};
+
+/*
+ * Sometimes a large or cross-page mmio needs to be broken up into separate
+ * exits for userspace servicing.
+ */
+struct kvm_mmio_fragment {
+ gpa_t gpa;
+ void *data;
+ unsigned len;
+};
+
+struct kvm_vcpu {
+ struct kvm *kvm;
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+ struct preempt_notifier preempt_notifier;
+#endif
+ int cpu;
+ int vcpu_id;
+ int srcu_idx;
+ int mode;
+ unsigned long requests;
+ unsigned long guest_debug;
+
+ struct mutex mutex;
+ struct kvm_run *run;
+
+ int fpu_active;
+ int guest_fpu_loaded, guest_xcr0_loaded;
+ wait_queue_head_t wq;
+ struct pid *pid;
+ int sigset_active;
+ sigset_t sigset;
+ struct kvm_vcpu_stat stat;
+
+#ifdef CONFIG_HAS_IOMEM
+ int mmio_needed;
+ int mmio_read_completed;
+ int mmio_is_write;
+ int mmio_cur_fragment;
+ int mmio_nr_fragments;
+ struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
+#endif
+
+#ifdef CONFIG_KVM_ASYNC_PF
+ struct {
+ u32 queued;
+ struct list_head queue;
+ struct list_head done;
+ spinlock_t lock;
+ } async_pf;
+#endif
+
+#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
+ /*
+ * Cpu relax intercept or pause loop exit optimization
+ * in_spin_loop: set when a vcpu does a pause loop exit
+ * or cpu relax intercepted.
+ * dy_eligible: indicates whether vcpu is eligible for directed yield.
+ */
+ struct {
+ bool in_spin_loop;
+ bool dy_eligible;
+ } spin_loop;
+#endif
+ bool preempted;
+ struct kvm_vcpu_arch arch;
+};
+
+static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
+{
+ return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
+}
+
+/*
+ * Some of the bitops functions do not support too long bitmaps.
+ * This number must be determined not to exceed such limits.
+ */
+#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
+
+struct kvm_memory_slot {
+ gfn_t base_gfn;
+ unsigned long npages;
+ unsigned long *dirty_bitmap;
+ struct kvm_arch_memory_slot arch;
+ unsigned long userspace_addr;
+ u32 flags;
+ short id;
+};
+
+static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
+{
+ return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+}
+
+struct kvm_s390_adapter_int {
+ u64 ind_addr;
+ u64 summary_addr;
+ u64 ind_offset;
+ u32 summary_offset;
+ u32 adapter_id;
+};
+
+struct kvm_kernel_irq_routing_entry {
+ u32 gsi;
+ u32 type;
+ int (*set)(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id, int level,
+ bool line_status);
+ union {
+ struct {
+ unsigned irqchip;
+ unsigned pin;
+ } irqchip;
+ struct msi_msg msi;
+ struct kvm_s390_adapter_int adapter;
+ };
+ struct hlist_node link;
+};
+
+#ifndef KVM_PRIVATE_MEM_SLOTS
+#define KVM_PRIVATE_MEM_SLOTS 0
+#endif
+
+#ifndef KVM_MEM_SLOTS_NUM
+#define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
+#endif
+
+/*
+ * Note:
+ * memslots are not sorted by id anymore, please use id_to_memslot()
+ * to get the memslot by its id.
+ */
+struct kvm_memslots {
+ u64 generation;
+ struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
+ /* The mapping table from slot id to the index in memslots[]. */
+ short id_to_index[KVM_MEM_SLOTS_NUM];
+ atomic_t lru_slot;
+ int used_slots;
+};
+
+struct kvm {
+ spinlock_t mmu_lock;
+ struct mutex slots_lock;
+ struct mm_struct *mm; /* userspace tied to this vm */
+ struct kvm_memslots *memslots;
+ struct srcu_struct srcu;
+ struct srcu_struct irq_srcu;
+#ifdef CONFIG_KVM_APIC_ARCHITECTURE
+ u32 bsp_vcpu_id;
+#endif
+ struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+ atomic_t online_vcpus;
+ int last_boosted_vcpu;
+ struct list_head vm_list;
+ struct mutex lock;
+ struct kvm_io_bus *buses[KVM_NR_BUSES];
+#ifdef CONFIG_HAVE_KVM_EVENTFD
+ struct {
+ spinlock_t lock;
+ struct list_head items;
+ struct list_head resampler_list;
+ struct mutex resampler_lock;
+ } irqfds;
+ struct list_head ioeventfds;
+#endif
+ struct kvm_vm_stat stat;
+ struct kvm_arch arch;
+ atomic_t users_count;
+#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
+ struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
+ spinlock_t ring_lock;
+ struct list_head coalesced_zones;
+#endif
+
+ struct mutex irq_lock;
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
+ /*
+ * Update side is protected by irq_lock.
+ */
+ struct kvm_irq_routing_table __rcu *irq_routing;
+#endif
+#ifdef CONFIG_HAVE_KVM_IRQFD
+ struct hlist_head irq_ack_notifier_list;
+#endif
+
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+ struct mmu_notifier mmu_notifier;
+ unsigned long mmu_notifier_seq;
+ long mmu_notifier_count;
+#endif
+ long tlbs_dirty;
+ struct list_head devices;
+};
+
+#define kvm_err(fmt, ...) \
+ pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
+#define kvm_info(fmt, ...) \
+ pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
+#define kvm_debug(fmt, ...) \
+ pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
+#define kvm_pr_unimpl(fmt, ...) \
+ pr_err_ratelimited("kvm [%i]: " fmt, \
+ task_tgid_nr(current), ## __VA_ARGS__)
+
+/* The guest did something we don't support. */
+#define vcpu_unimpl(vcpu, fmt, ...) \
+ kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+
+static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
+{
+ smp_rmb();
+ return kvm->vcpus[i];
+}
+
+#define kvm_for_each_vcpu(idx, vcpup, kvm) \
+ for (idx = 0; \
+ idx < atomic_read(&kvm->online_vcpus) && \
+ (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
+ idx++)
+
+#define kvm_for_each_memslot(memslot, slots) \
+ for (memslot = &slots->memslots[0]; \
+ memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
+ memslot++)
+
+int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
+void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
+
+int __must_check vcpu_load(struct kvm_vcpu *vcpu);
+void vcpu_put(struct kvm_vcpu *vcpu);
+
+#ifdef __KVM_HAVE_IOAPIC
+void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
+#else
+static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
+{
+}
+#endif
+
+#ifdef CONFIG_HAVE_KVM_IRQFD
+int kvm_irqfd_init(void);
+void kvm_irqfd_exit(void);
+#else
+static inline int kvm_irqfd_init(void)
+{
+ return 0;
+}
+
+static inline void kvm_irqfd_exit(void)
+{
+}
+#endif
+int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
+ struct module *module);
+void kvm_exit(void);
+
+void kvm_get_kvm(struct kvm *kvm);
+void kvm_put_kvm(struct kvm *kvm);
+
+static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
+{
+ return rcu_dereference_check(kvm->memslots,
+ srcu_read_lock_held(&kvm->srcu)
+ || lockdep_is_held(&kvm->slots_lock));
+}
+
+static inline struct kvm_memory_slot *
+id_to_memslot(struct kvm_memslots *slots, int id)
+{
+ int index = slots->id_to_index[id];
+ struct kvm_memory_slot *slot;
+
+ slot = &slots->memslots[index];
+
+ WARN_ON(slot->id != id);
+ return slot;
+}
+
+/*
+ * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
+ * - create a new memory slot
+ * - delete an existing memory slot
+ * - modify an existing memory slot
+ * -- move it in the guest physical memory space
+ * -- just change its flags
+ *
+ * Since flags can be changed by some of these operations, the following
+ * differentiation is the best we can do for __kvm_set_memory_region():
+ */
+enum kvm_mr_change {
+ KVM_MR_CREATE,
+ KVM_MR_DELETE,
+ KVM_MR_MOVE,
+ KVM_MR_FLAGS_ONLY,
+};
+
+int kvm_set_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem);
+int __kvm_set_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem);
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont);
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+ unsigned long npages);
+void kvm_arch_memslots_updated(struct kvm *kvm);
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_userspace_memory_region *mem,
+ enum kvm_mr_change change);
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ enum kvm_mr_change change);
+bool kvm_largepages_enabled(void);
+void kvm_disable_largepages(void);
+/* flush all memory translations */
+void kvm_arch_flush_shadow_all(struct kvm *kvm);
+/* flush memory translations pointing to 'slot' */
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot);
+
+int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
+ int nr_pages);
+
+struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
+unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
+unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
+unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
+unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
+ bool *writable);
+void kvm_release_page_clean(struct page *page);
+void kvm_release_page_dirty(struct page *page);
+void kvm_set_page_accessed(struct page *page);
+
+pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
+pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
+ bool write_fault, bool *writable);
+pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
+pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
+ bool *writable);
+pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
+pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
+
+void kvm_release_pfn_clean(pfn_t pfn);
+void kvm_set_pfn_dirty(pfn_t pfn);
+void kvm_set_pfn_accessed(pfn_t pfn);
+void kvm_get_pfn(pfn_t pfn);
+
+int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
+ int len);
+int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
+ unsigned long len);
+int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
+int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned long len);
+int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
+ int offset, int len);
+int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
+ unsigned long len);
+int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned long len);
+int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ gpa_t gpa, unsigned long len);
+int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
+int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
+struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
+int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
+unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
+void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
+
+void kvm_vcpu_block(struct kvm_vcpu *vcpu);
+void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
+int kvm_vcpu_yield_to(struct kvm_vcpu *target);
+void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
+void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
+void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
+
+void kvm_flush_remote_tlbs(struct kvm *kvm);
+void kvm_reload_remote_mmus(struct kvm *kvm);
+void kvm_make_mclock_inprogress_request(struct kvm *kvm);
+void kvm_make_scan_ioapic_request(struct kvm *kvm);
+bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
+
+long kvm_arch_dev_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg);
+long kvm_arch_vcpu_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg);
+int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
+
+int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
+
+int kvm_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log, int *is_dirty);
+
+int kvm_get_dirty_log_protect(struct kvm *kvm,
+ struct kvm_dirty_log *log, bool *is_dirty);
+
+void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn_offset,
+ unsigned long mask);
+
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log);
+
+int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
+ bool line_status);
+long kvm_arch_vm_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg);
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
+
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr);
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs);
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs);
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state);
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state);
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg);
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
+
+int kvm_arch_init(void *opaque);
+void kvm_arch_exit(void);
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
+
+void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
+
+int kvm_arch_hardware_enable(void);
+void kvm_arch_hardware_disable(void);
+int kvm_arch_hardware_setup(void);
+void kvm_arch_hardware_unsetup(void);
+void kvm_arch_check_processor_compat(void *rtn);
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
+
+void *kvm_kvzalloc(unsigned long size);
+
+#ifndef __KVM_HAVE_ARCH_VM_ALLOC
+static inline struct kvm *kvm_arch_alloc_vm(void)
+{
+ return kzalloc(sizeof(struct kvm), GFP_KERNEL);
+}
+
+static inline void kvm_arch_free_vm(struct kvm *kvm)
+{
+ kfree(kvm);
+}
+#endif
+
+#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
+void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
+void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
+bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
+#else
+static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
+{
+}
+
+static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
+{
+}
+
+static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
+{
+ return false;
+}
+#endif
+
+static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
+{
+#ifdef __KVM_HAVE_ARCH_WQP
+ return vcpu->arch.wqp;
+#else
+ return &vcpu->wq;
+#endif
+}
+
+#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
+/*
+ * returns true if the virtual interrupt controller is initialized and
+ * ready to accept virtual IRQ. On some architectures the virtual interrupt
+ * controller is dynamically instantiated and this is not always true.
+ */
+bool kvm_arch_intc_initialized(struct kvm *kvm);
+#else
+static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
+{
+ return true;
+}
+#endif
+
+int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
+void kvm_arch_destroy_vm(struct kvm *kvm);
+void kvm_arch_sync_events(struct kvm *kvm);
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
+void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
+
+bool kvm_is_reserved_pfn(pfn_t pfn);
+
+struct kvm_irq_ack_notifier {
+ struct hlist_node link;
+ unsigned gsi;
+ void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
+};
+
+int kvm_irq_map_gsi(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *entries, int gsi);
+int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
+
+int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
+ bool line_status);
+int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
+int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
+ int irq_source_id, int level, bool line_status);
+bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
+void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
+void kvm_register_irq_ack_notifier(struct kvm *kvm,
+ struct kvm_irq_ack_notifier *kian);
+void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
+ struct kvm_irq_ack_notifier *kian);
+int kvm_request_irq_source_id(struct kvm *kvm);
+void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
+
+#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
+int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
+void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
+#else
+static inline int kvm_iommu_map_pages(struct kvm *kvm,
+ struct kvm_memory_slot *slot)
+{
+ return 0;
+}
+
+static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
+ struct kvm_memory_slot *slot)
+{
+}
+#endif
+
+static inline void kvm_guest_enter(void)
+{
+ unsigned long flags;
+
+ BUG_ON(preemptible());
+
+ local_irq_save(flags);
+ guest_enter();
+ local_irq_restore(flags);
+
+ /* KVM does not hold any references to rcu protected data when it
+ * switches CPU into a guest mode. In fact switching to a guest mode
+ * is very similar to exiting to userspace from rcu point of view. In
+ * addition CPU may stay in a guest mode for quite a long time (up to
+ * one time slice). Lets treat guest mode as quiescent state, just like
+ * we do with user-mode execution.
+ */
+ if (!context_tracking_cpu_is_enabled())
+ rcu_virt_note_context_switch(smp_processor_id());
+}
+
+static inline void kvm_guest_exit(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ guest_exit();
+ local_irq_restore(flags);
+}
+
+/*
+ * search_memslots() and __gfn_to_memslot() are here because they are
+ * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
+ * gfn_to_memslot() itself isn't here as an inline because that would
+ * bloat other code too much.
+ */
+static inline struct kvm_memory_slot *
+search_memslots(struct kvm_memslots *slots, gfn_t gfn)
+{
+ int start = 0, end = slots->used_slots;
+ int slot = atomic_read(&slots->lru_slot);
+ struct kvm_memory_slot *memslots = slots->memslots;
+
+ if (gfn >= memslots[slot].base_gfn &&
+ gfn < memslots[slot].base_gfn + memslots[slot].npages)
+ return &memslots[slot];
+
+ while (start < end) {
+ slot = start + (end - start) / 2;
+
+ if (gfn >= memslots[slot].base_gfn)
+ end = slot;
+ else
+ start = slot + 1;
+ }
+
+ if (gfn >= memslots[start].base_gfn &&
+ gfn < memslots[start].base_gfn + memslots[start].npages) {
+ atomic_set(&slots->lru_slot, start);
+ return &memslots[start];
+ }
+
+ return NULL;
+}
+
+static inline struct kvm_memory_slot *
+__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
+{
+ return search_memslots(slots, gfn);
+}
+
+static inline unsigned long
+__gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
+{
+ return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
+}
+
+static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
+{
+ return gfn_to_memslot(kvm, gfn)->id;
+}
+
+static inline gfn_t
+hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
+{
+ gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
+
+ return slot->base_gfn + gfn_offset;
+}
+
+static inline gpa_t gfn_to_gpa(gfn_t gfn)
+{
+ return (gpa_t)gfn << PAGE_SHIFT;
+}
+
+static inline gfn_t gpa_to_gfn(gpa_t gpa)
+{
+ return (gfn_t)(gpa >> PAGE_SHIFT);
+}
+
+static inline hpa_t pfn_to_hpa(pfn_t pfn)
+{
+ return (hpa_t)pfn << PAGE_SHIFT;
+}
+
+static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
+{
+ unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
+
+ return kvm_is_error_hva(hva);
+}
+
+static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
+{
+ set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
+}
+
+enum kvm_stat_kind {
+ KVM_STAT_VM,
+ KVM_STAT_VCPU,
+};
+
+struct kvm_stats_debugfs_item {
+ const char *name;
+ int offset;
+ enum kvm_stat_kind kind;
+ struct dentry *dentry;
+};
+extern struct kvm_stats_debugfs_item debugfs_entries[];
+extern struct dentry *kvm_debugfs_dir;
+
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
+{
+ if (unlikely(kvm->mmu_notifier_count))
+ return 1;
+ /*
+ * Ensure the read of mmu_notifier_count happens before the read
+ * of mmu_notifier_seq. This interacts with the smp_wmb() in
+ * mmu_notifier_invalidate_range_end to make sure that the caller
+ * either sees the old (non-zero) value of mmu_notifier_count or
+ * the new (incremented) value of mmu_notifier_seq.
+ * PowerPC Book3s HV KVM calls this under a per-page lock
+ * rather than under kvm->mmu_lock, for scalability, so
+ * can't rely on kvm->mmu_lock to keep things ordered.
+ */
+ smp_rmb();
+ if (kvm->mmu_notifier_seq != mmu_seq)
+ return 1;
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
+
+#ifdef CONFIG_S390
+#define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that...
+#else
+#define KVM_MAX_IRQ_ROUTES 1024
+#endif
+
+int kvm_setup_default_irq_routing(struct kvm *kvm);
+int kvm_set_irq_routing(struct kvm *kvm,
+ const struct kvm_irq_routing_entry *entries,
+ unsigned nr,
+ unsigned flags);
+int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
+ const struct kvm_irq_routing_entry *ue);
+void kvm_free_irq_routing(struct kvm *kvm);
+
+#else
+
+static inline void kvm_free_irq_routing(struct kvm *kvm) {}
+
+#endif
+
+int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
+
+#ifdef CONFIG_HAVE_KVM_EVENTFD
+
+void kvm_eventfd_init(struct kvm *kvm);
+int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
+
+#ifdef CONFIG_HAVE_KVM_IRQFD
+int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
+void kvm_irqfd_release(struct kvm *kvm);
+void kvm_irq_routing_update(struct kvm *);
+#else
+static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
+{
+ return -EINVAL;
+}
+
+static inline void kvm_irqfd_release(struct kvm *kvm) {}
+#endif
+
+#else
+
+static inline void kvm_eventfd_init(struct kvm *kvm) {}
+
+static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
+{
+ return -EINVAL;
+}
+
+static inline void kvm_irqfd_release(struct kvm *kvm) {}
+
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
+static inline void kvm_irq_routing_update(struct kvm *kvm)
+{
+}
+#endif
+
+static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+{
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_HAVE_KVM_EVENTFD */
+
+#ifdef CONFIG_KVM_APIC_ARCHITECTURE
+static inline bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
+{
+ return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
+}
+
+static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
+{
+ return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
+}
+
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
+
+#else
+
+static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
+
+#endif
+
+static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
+{
+ set_bit(req, &vcpu->requests);
+}
+
+static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
+{
+ if (test_bit(req, &vcpu->requests)) {
+ clear_bit(req, &vcpu->requests);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+extern bool kvm_rebooting;
+
+struct kvm_device {
+ struct kvm_device_ops *ops;
+ struct kvm *kvm;
+ void *private;
+ struct list_head vm_node;
+};
+
+/* create, destroy, and name are mandatory */
+struct kvm_device_ops {
+ const char *name;
+ int (*create)(struct kvm_device *dev, u32 type);
+
+ /*
+ * Destroy is responsible for freeing dev.
+ *
+ * Destroy may be called before or after destructors are called
+ * on emulated I/O regions, depending on whether a reference is
+ * held by a vcpu or other kvm component that gets destroyed
+ * after the emulated I/O.
+ */
+ void (*destroy)(struct kvm_device *dev);
+
+ int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
+ int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
+ int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
+ long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
+ unsigned long arg);
+};
+
+void kvm_device_get(struct kvm_device *dev);
+void kvm_device_put(struct kvm_device *dev);
+struct kvm_device *kvm_device_from_filp(struct file *filp);
+int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
+void kvm_unregister_device_ops(u32 type);
+
+extern struct kvm_device_ops kvm_mpic_ops;
+extern struct kvm_device_ops kvm_xics_ops;
+extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
+extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
+
+#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
+
+static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
+{
+ vcpu->spin_loop.in_spin_loop = val;
+}
+static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
+{
+ vcpu->spin_loop.dy_eligible = val;
+}
+
+#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
+
+static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
+{
+}
+
+static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
+{
+}
+#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
+#endif
+
diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h
new file mode 100644
index 000000000..00a97bb90
--- /dev/null
+++ b/include/linux/kvm_para.h
@@ -0,0 +1,13 @@
+#ifndef __LINUX_KVM_PARA_H
+#define __LINUX_KVM_PARA_H
+
+#include <uapi/linux/kvm_para.h>
+
+
+static inline int kvm_para_has_feature(unsigned int feature)
+{
+ if (kvm_arch_para_features() & (1UL << feature))
+ return 1;
+ return 0;
+}
+#endif /* __LINUX_KVM_PARA_H */
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
new file mode 100644
index 000000000..931da7e91
--- /dev/null
+++ b/include/linux/kvm_types.h
@@ -0,0 +1,65 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __KVM_TYPES_H__
+#define __KVM_TYPES_H__
+
+struct kvm;
+struct kvm_async_pf;
+struct kvm_device_ops;
+struct kvm_interrupt;
+struct kvm_irq_routing_table;
+struct kvm_memory_slot;
+struct kvm_one_reg;
+struct kvm_run;
+struct kvm_userspace_memory_region;
+struct kvm_vcpu;
+struct kvm_vcpu_init;
+
+enum kvm_mr_change;
+
+#include <asm/types.h>
+
+/*
+ * Address types:
+ *
+ * gva - guest virtual address
+ * gpa - guest physical address
+ * gfn - guest frame number
+ * hva - host virtual address
+ * hpa - host physical address
+ * hfn - host frame number
+ */
+
+typedef unsigned long gva_t;
+typedef u64 gpa_t;
+typedef u64 gfn_t;
+
+typedef unsigned long hva_t;
+typedef u64 hpa_t;
+typedef u64 hfn_t;
+
+typedef hfn_t pfn_t;
+
+struct gfn_to_hva_cache {
+ u64 generation;
+ gpa_t gpa;
+ unsigned long hva;
+ unsigned long len;
+ struct kvm_memory_slot *memslot;
+};
+
+#endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/l2tp.h b/include/linux/l2tp.h
new file mode 100644
index 000000000..bffdb962f
--- /dev/null
+++ b/include/linux/l2tp.h
@@ -0,0 +1,13 @@
+/*
+ * L2TP-over-IP socket for L2TPv3.
+ *
+ * Author: James Chapman <jchapman@katalix.com>
+ */
+#ifndef _LINUX_L2TP_H_
+#define _LINUX_L2TP_H_
+
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <uapi/linux/l2tp.h>
+
+#endif
diff --git a/include/linux/lapb.h b/include/linux/lapb.h
new file mode 100644
index 000000000..873c1eb63
--- /dev/null
+++ b/include/linux/lapb.h
@@ -0,0 +1,57 @@
+/*
+ * These are the public elements of the Linux LAPB module.
+ */
+
+#ifndef LAPB_KERNEL_H
+#define LAPB_KERNEL_H
+
+#define LAPB_OK 0
+#define LAPB_BADTOKEN 1
+#define LAPB_INVALUE 2
+#define LAPB_CONNECTED 3
+#define LAPB_NOTCONNECTED 4
+#define LAPB_REFUSED 5
+#define LAPB_TIMEDOUT 6
+#define LAPB_NOMEM 7
+
+#define LAPB_STANDARD 0x00
+#define LAPB_EXTENDED 0x01
+
+#define LAPB_SLP 0x00
+#define LAPB_MLP 0x02
+
+#define LAPB_DTE 0x00
+#define LAPB_DCE 0x04
+
+struct lapb_register_struct {
+ void (*connect_confirmation)(struct net_device *dev, int reason);
+ void (*connect_indication)(struct net_device *dev, int reason);
+ void (*disconnect_confirmation)(struct net_device *dev, int reason);
+ void (*disconnect_indication)(struct net_device *dev, int reason);
+ int (*data_indication)(struct net_device *dev, struct sk_buff *skb);
+ void (*data_transmit)(struct net_device *dev, struct sk_buff *skb);
+};
+
+struct lapb_parms_struct {
+ unsigned int t1;
+ unsigned int t1timer;
+ unsigned int t2;
+ unsigned int t2timer;
+ unsigned int n2;
+ unsigned int n2count;
+ unsigned int window;
+ unsigned int state;
+ unsigned int mode;
+};
+
+extern int lapb_register(struct net_device *dev,
+ const struct lapb_register_struct *callbacks);
+extern int lapb_unregister(struct net_device *dev);
+extern int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms);
+extern int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms);
+extern int lapb_connect_request(struct net_device *dev);
+extern int lapb_disconnect_request(struct net_device *dev);
+extern int lapb_data_request(struct net_device *dev, struct sk_buff *skb);
+extern int lapb_data_received(struct net_device *dev, struct sk_buff *skb);
+
+#endif
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
new file mode 100644
index 000000000..e23121f9d
--- /dev/null
+++ b/include/linux/latencytop.h
@@ -0,0 +1,53 @@
+/*
+ * latencytop.h: Infrastructure for displaying latency
+ *
+ * (C) Copyright 2008 Intel Corporation
+ * Author: Arjan van de Ven <arjan@linux.intel.com>
+ *
+ */
+
+#ifndef _INCLUDE_GUARD_LATENCYTOP_H_
+#define _INCLUDE_GUARD_LATENCYTOP_H_
+
+#include <linux/compiler.h>
+struct task_struct;
+
+#ifdef CONFIG_LATENCYTOP
+
+#define LT_SAVECOUNT 32
+#define LT_BACKTRACEDEPTH 12
+
+struct latency_record {
+ unsigned long backtrace[LT_BACKTRACEDEPTH];
+ unsigned int count;
+ unsigned long time;
+ unsigned long max;
+};
+
+
+
+extern int latencytop_enabled;
+void __account_scheduler_latency(struct task_struct *task, int usecs, int inter);
+static inline void
+account_scheduler_latency(struct task_struct *task, int usecs, int inter)
+{
+ if (unlikely(latencytop_enabled))
+ __account_scheduler_latency(task, usecs, inter);
+}
+
+void clear_all_latency_tracing(struct task_struct *p);
+
+#else
+
+static inline void
+account_scheduler_latency(struct task_struct *task, int usecs, int inter)
+{
+}
+
+static inline void clear_all_latency_tracing(struct task_struct *p)
+{
+}
+
+#endif
+
+#endif
diff --git a/include/linux/lcd.h b/include/linux/lcd.h
new file mode 100644
index 000000000..504f6246f
--- /dev/null
+++ b/include/linux/lcd.h
@@ -0,0 +1,130 @@
+/*
+ * LCD Lowlevel Control Abstraction
+ *
+ * Copyright (C) 2003,2004 Hewlett-Packard Company
+ *
+ */
+
+#ifndef _LINUX_LCD_H
+#define _LINUX_LCD_H
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/fb.h>
+
+/* Notes on locking:
+ *
+ * lcd_device->ops_lock is an internal backlight lock protecting the ops
+ * field and no code outside the core should need to touch it.
+ *
+ * Access to set_power() is serialised by the update_lock mutex since
+ * most drivers seem to need this and historically get it wrong.
+ *
+ * Most drivers don't need locking on their get_power() method.
+ * If yours does, you need to implement it in the driver. You can use the
+ * update_lock mutex if appropriate.
+ *
+ * Any other use of the locks below is probably wrong.
+ */
+
+struct lcd_device;
+struct fb_info;
+
+struct lcd_properties {
+ /* The maximum value for contrast (read-only) */
+ int max_contrast;
+};
+
+struct lcd_ops {
+ /* Get the LCD panel power status (0: full on, 1..3: controller
+ power on, flat panel power off, 4: full off), see FB_BLANK_XXX */
+ int (*get_power)(struct lcd_device *);
+ /*
+ * Enable or disable power to the LCD(0: on; 4: off, see FB_BLANK_XXX)
+ * and this callback would be called proir to fb driver's callback.
+ *
+ * P.S. note that if early_set_power is not NULL then early fb notifier
+ * would be registered.
+ */
+ int (*early_set_power)(struct lcd_device *, int power);
+ /* revert the effects of the early blank event. */
+ int (*r_early_set_power)(struct lcd_device *, int power);
+ /* Enable or disable power to the LCD (0: on; 4: off, see FB_BLANK_XXX) */
+ int (*set_power)(struct lcd_device *, int power);
+ /* Get the current contrast setting (0-max_contrast) */
+ int (*get_contrast)(struct lcd_device *);
+ /* Set LCD panel contrast */
+ int (*set_contrast)(struct lcd_device *, int contrast);
+ /* Set LCD panel mode (resolutions ...) */
+ int (*set_mode)(struct lcd_device *, struct fb_videomode *);
+ /* Check if given framebuffer device is the one LCD is bound to;
+ return 0 if not, !=0 if it is. If NULL, lcd always matches the fb. */
+ int (*check_fb)(struct lcd_device *, struct fb_info *);
+};
+
+struct lcd_device {
+ struct lcd_properties props;
+ /* This protects the 'ops' field. If 'ops' is NULL, the driver that
+ registered this device has been unloaded, and if class_get_devdata()
+ points to something in the body of that driver, it is also invalid. */
+ struct mutex ops_lock;
+ /* If this is NULL, the backing module is unloaded */
+ struct lcd_ops *ops;
+ /* Serialise access to set_power method */
+ struct mutex update_lock;
+ /* The framebuffer notifier block */
+ struct notifier_block fb_notif;
+
+ struct device dev;
+};
+
+struct lcd_platform_data {
+ /* reset lcd panel device. */
+ int (*reset)(struct lcd_device *ld);
+ /* on or off to lcd panel. if 'enable' is 0 then
+ lcd power off and 1, lcd power on. */
+ int (*power_on)(struct lcd_device *ld, int enable);
+
+ /* it indicates whether lcd panel was enabled
+ from bootloader or not. */
+ int lcd_enabled;
+ /* it means delay for stable time when it becomes low to high
+ or high to low that is dependent on whether reset gpio is
+ low active or high active. */
+ unsigned int reset_delay;
+ /* stable time needing to become lcd power on. */
+ unsigned int power_on_delay;
+ /* stable time needing to become lcd power off. */
+ unsigned int power_off_delay;
+
+ /* it could be used for any purpose. */
+ void *pdata;
+};
+
+static inline void lcd_set_power(struct lcd_device *ld, int power)
+{
+ mutex_lock(&ld->update_lock);
+ if (ld->ops && ld->ops->set_power)
+ ld->ops->set_power(ld, power);
+ mutex_unlock(&ld->update_lock);
+}
+
+extern struct lcd_device *lcd_device_register(const char *name,
+ struct device *parent, void *devdata, struct lcd_ops *ops);
+extern struct lcd_device *devm_lcd_device_register(struct device *dev,
+ const char *name, struct device *parent,
+ void *devdata, struct lcd_ops *ops);
+extern void lcd_device_unregister(struct lcd_device *ld);
+extern void devm_lcd_device_unregister(struct device *dev,
+ struct lcd_device *ld);
+
+#define to_lcd_device(obj) container_of(obj, struct lcd_device, dev)
+
+static inline void * lcd_get_data(struct lcd_device *ld_dev)
+{
+ return dev_get_drvdata(&ld_dev->dev);
+}
+
+
+#endif
diff --git a/include/linux/lcm.h b/include/linux/lcm.h
new file mode 100644
index 000000000..1ce79a7f1
--- /dev/null
+++ b/include/linux/lcm.h
@@ -0,0 +1,9 @@
+#ifndef _LCM_H
+#define _LCM_H
+
+#include <linux/compiler.h>
+
+unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__;
+unsigned long lcm_not_zero(unsigned long a, unsigned long b) __attribute_const__;
+
+#endif /* _LCM_H */
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h
new file mode 100644
index 000000000..e97966d1f
--- /dev/null
+++ b/include/linux/led-class-flash.h
@@ -0,0 +1,192 @@
+/*
+ * LED Flash class interface
+ *
+ * Copyright (C) 2015 Samsung Electronics Co., Ltd.
+ * Author: Jacek Anaszewski <j.anaszewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef __LINUX_FLASH_LEDS_H_INCLUDED
+#define __LINUX_FLASH_LEDS_H_INCLUDED
+
+#include <linux/leds.h>
+
+struct device_node;
+struct led_classdev_flash;
+
+/*
+ * Supported led fault bits - must be kept in synch
+ * with V4L2_FLASH_FAULT bits.
+ */
+#define LED_FAULT_OVER_VOLTAGE (1 << 0)
+#define LED_FAULT_TIMEOUT (1 << 1)
+#define LED_FAULT_OVER_TEMPERATURE (1 << 2)
+#define LED_FAULT_SHORT_CIRCUIT (1 << 3)
+#define LED_FAULT_OVER_CURRENT (1 << 4)
+#define LED_FAULT_INDICATOR (1 << 5)
+#define LED_FAULT_UNDER_VOLTAGE (1 << 6)
+#define LED_FAULT_INPUT_VOLTAGE (1 << 7)
+#define LED_FAULT_LED_OVER_TEMPERATURE (1 << 8)
+#define LED_NUM_FLASH_FAULTS 9
+
+#define LED_FLASH_SYSFS_GROUPS_SIZE 5
+
+struct led_flash_ops {
+ /* set flash brightness */
+ int (*flash_brightness_set)(struct led_classdev_flash *fled_cdev,
+ u32 brightness);
+ /* get flash brightness */
+ int (*flash_brightness_get)(struct led_classdev_flash *fled_cdev,
+ u32 *brightness);
+ /* set flash strobe state */
+ int (*strobe_set)(struct led_classdev_flash *fled_cdev, bool state);
+ /* get flash strobe state */
+ int (*strobe_get)(struct led_classdev_flash *fled_cdev, bool *state);
+ /* set flash timeout */
+ int (*timeout_set)(struct led_classdev_flash *fled_cdev, u32 timeout);
+ /* get the flash LED fault */
+ int (*fault_get)(struct led_classdev_flash *fled_cdev, u32 *fault);
+};
+
+/*
+ * Current value of a flash setting along
+ * with its constraints.
+ */
+struct led_flash_setting {
+ /* maximum allowed value */
+ u32 min;
+ /* maximum allowed value */
+ u32 max;
+ /* step value */
+ u32 step;
+ /* current value */
+ u32 val;
+};
+
+struct led_classdev_flash {
+ /* led class device */
+ struct led_classdev led_cdev;
+
+ /* flash led specific ops */
+ const struct led_flash_ops *ops;
+
+ /* flash brightness value in microamperes along with its constraints */
+ struct led_flash_setting brightness;
+
+ /* flash timeout value in microseconds along with its constraints */
+ struct led_flash_setting timeout;
+
+ /* LED Flash class sysfs groups */
+ const struct attribute_group *sysfs_groups[LED_FLASH_SYSFS_GROUPS_SIZE];
+};
+
+static inline struct led_classdev_flash *lcdev_to_flcdev(
+ struct led_classdev *lcdev)
+{
+ return container_of(lcdev, struct led_classdev_flash, led_cdev);
+}
+
+/**
+ * led_classdev_flash_register - register a new object of led_classdev class
+ * with support for flash LEDs
+ * @parent: the flash LED to register
+ * @fled_cdev: the led_classdev_flash structure for this device
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+extern int led_classdev_flash_register(struct device *parent,
+ struct led_classdev_flash *fled_cdev);
+
+/**
+ * led_classdev_flash_unregister - unregisters an object of led_classdev class
+ * with support for flash LEDs
+ * @fled_cdev: the flash LED to unregister
+ *
+ * Unregister a previously registered via led_classdev_flash_register object
+ */
+extern void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev);
+
+/**
+ * led_set_flash_strobe - setup flash strobe
+ * @fled_cdev: the flash LED to set strobe on
+ * @state: 1 - strobe flash, 0 - stop flash strobe
+ *
+ * Strobe the flash LED.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev,
+ bool state)
+{
+ return fled_cdev->ops->strobe_set(fled_cdev, state);
+}
+
+/**
+ * led_get_flash_strobe - get flash strobe status
+ * @fled_cdev: the flash LED to query
+ * @state: 1 - flash is strobing, 0 - flash is off
+ *
+ * Check whether the flash is strobing at the moment.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+static inline int led_get_flash_strobe(struct led_classdev_flash *fled_cdev,
+ bool *state)
+{
+ if (fled_cdev->ops->strobe_get)
+ return fled_cdev->ops->strobe_get(fled_cdev, state);
+
+ return -EINVAL;
+}
+
+/**
+ * led_set_flash_brightness - set flash LED brightness
+ * @fled_cdev: the flash LED to set
+ * @brightness: the brightness to set it to
+ *
+ * Set a flash LED's brightness.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+extern int led_set_flash_brightness(struct led_classdev_flash *fled_cdev,
+ u32 brightness);
+
+/**
+ * led_update_flash_brightness - update flash LED brightness
+ * @fled_cdev: the flash LED to query
+ *
+ * Get a flash LED's current brightness and update led_flash->brightness
+ * member with the obtained value.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+extern int led_update_flash_brightness(struct led_classdev_flash *fled_cdev);
+
+/**
+ * led_set_flash_timeout - set flash LED timeout
+ * @fled_cdev: the flash LED to set
+ * @timeout: the flash timeout to set it to
+ *
+ * Set the flash strobe duration.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+extern int led_set_flash_timeout(struct led_classdev_flash *fled_cdev,
+ u32 timeout);
+
+/**
+ * led_get_flash_fault - get the flash LED fault
+ * @fled_cdev: the flash LED to query
+ * @fault: bitmask containing flash faults
+ *
+ * Get the flash LED fault.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+extern int led_get_flash_fault(struct led_classdev_flash *fled_cdev,
+ u32 *fault);
+
+#endif /* __LINUX_FLASH_LEDS_H_INCLUDED */
diff --git a/include/linux/led-lm3530.h b/include/linux/led-lm3530.h
new file mode 100644
index 000000000..4b133479d
--- /dev/null
+++ b/include/linux/led-lm3530.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson SA.
+ * Copyright (C) 2009 Motorola, Inc.
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Simple driver for National Semiconductor LM35330 Backlight driver chip
+ *
+ * Author: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>
+ * based on leds-lm3530.c by Dan Murphy <D.Murphy@motorola.com>
+ */
+
+#ifndef _LINUX_LED_LM3530_H__
+#define _LINUX_LED_LM3530_H__
+
+#define LM3530_FS_CURR_5mA (0) /* Full Scale Current */
+#define LM3530_FS_CURR_8mA (1)
+#define LM3530_FS_CURR_12mA (2)
+#define LM3530_FS_CURR_15mA (3)
+#define LM3530_FS_CURR_19mA (4)
+#define LM3530_FS_CURR_22mA (5)
+#define LM3530_FS_CURR_26mA (6)
+#define LM3530_FS_CURR_29mA (7)
+
+#define LM3530_ALS_AVRG_TIME_32ms (0) /* ALS Averaging Time */
+#define LM3530_ALS_AVRG_TIME_64ms (1)
+#define LM3530_ALS_AVRG_TIME_128ms (2)
+#define LM3530_ALS_AVRG_TIME_256ms (3)
+#define LM3530_ALS_AVRG_TIME_512ms (4)
+#define LM3530_ALS_AVRG_TIME_1024ms (5)
+#define LM3530_ALS_AVRG_TIME_2048ms (6)
+#define LM3530_ALS_AVRG_TIME_4096ms (7)
+
+#define LM3530_RAMP_TIME_1ms (0) /* Brigtness Ramp Time */
+#define LM3530_RAMP_TIME_130ms (1) /* Max to 0 and vice versa */
+#define LM3530_RAMP_TIME_260ms (2)
+#define LM3530_RAMP_TIME_520ms (3)
+#define LM3530_RAMP_TIME_1s (4)
+#define LM3530_RAMP_TIME_2s (5)
+#define LM3530_RAMP_TIME_4s (6)
+#define LM3530_RAMP_TIME_8s (7)
+
+/* ALS Resistor Select */
+#define LM3530_ALS_IMPD_Z (0x00) /* ALS Impedance */
+#define LM3530_ALS_IMPD_13_53kOhm (0x01)
+#define LM3530_ALS_IMPD_9_01kOhm (0x02)
+#define LM3530_ALS_IMPD_5_41kOhm (0x03)
+#define LM3530_ALS_IMPD_2_27kOhm (0x04)
+#define LM3530_ALS_IMPD_1_94kOhm (0x05)
+#define LM3530_ALS_IMPD_1_81kOhm (0x06)
+#define LM3530_ALS_IMPD_1_6kOhm (0x07)
+#define LM3530_ALS_IMPD_1_138kOhm (0x08)
+#define LM3530_ALS_IMPD_1_05kOhm (0x09)
+#define LM3530_ALS_IMPD_1_011kOhm (0x0A)
+#define LM3530_ALS_IMPD_941Ohm (0x0B)
+#define LM3530_ALS_IMPD_759Ohm (0x0C)
+#define LM3530_ALS_IMPD_719Ohm (0x0D)
+#define LM3530_ALS_IMPD_700Ohm (0x0E)
+#define LM3530_ALS_IMPD_667Ohm (0x0F)
+
+enum lm3530_mode {
+ LM3530_BL_MODE_MANUAL = 0, /* "man" */
+ LM3530_BL_MODE_ALS, /* "als" */
+ LM3530_BL_MODE_PWM, /* "pwm" */
+};
+
+/* ALS input select */
+enum lm3530_als_mode {
+ LM3530_INPUT_AVRG = 0, /* ALS1 and ALS2 input average */
+ LM3530_INPUT_ALS1, /* ALS1 Input */
+ LM3530_INPUT_ALS2, /* ALS2 Input */
+ LM3530_INPUT_CEIL, /* Max of ALS1 and ALS2 */
+};
+
+/* PWM Platform Specific Data */
+struct lm3530_pwm_data {
+ void (*pwm_set_intensity) (int brightness, int max_brightness);
+ int (*pwm_get_intensity) (int max_brightness);
+};
+
+/**
+ * struct lm3530_platform_data
+ * @mode: mode of operation i.e. Manual, ALS or PWM
+ * @als_input_mode: select source of ALS input - ALS1/2 or average
+ * @max_current: full scale LED current
+ * @pwm_pol_hi: PWM input polarity - active high/active low
+ * @als_avrg_time: ALS input averaging time
+ * @brt_ramp_law: brightness mapping mode - exponential/linear
+ * @brt_ramp_fall: rate of fall of led current
+ * @brt_ramp_rise: rate of rise of led current
+ * @als1_resistor_sel: internal resistance from ALS1 input to ground
+ * @als2_resistor_sel: internal resistance from ALS2 input to ground
+ * @als_vmin: als input voltage calibrated for max brightness in mV
+ * @als_vmax: als input voltage calibrated for min brightness in mV
+ * @brt_val: brightness value (0-127)
+ * @pwm_data: PWM control functions (only valid when the mode is PWM)
+ */
+struct lm3530_platform_data {
+ enum lm3530_mode mode;
+ enum lm3530_als_mode als_input_mode;
+
+ u8 max_current;
+ bool pwm_pol_hi;
+ u8 als_avrg_time;
+
+ bool brt_ramp_law;
+ u8 brt_ramp_fall;
+ u8 brt_ramp_rise;
+
+ u8 als1_resistor_sel;
+ u8 als2_resistor_sel;
+
+ u32 als_vmin;
+ u32 als_vmax;
+
+ u8 brt_val;
+
+ struct lm3530_pwm_data pwm_data;
+};
+
+#endif /* _LINUX_LED_LM3530_H__ */
diff --git a/include/linux/leds-bd2802.h b/include/linux/leds-bd2802.h
new file mode 100644
index 000000000..42f854a1a
--- /dev/null
+++ b/include/linux/leds-bd2802.h
@@ -0,0 +1,26 @@
+/*
+ * leds-bd2802.h - RGB LED Driver
+ *
+ * Copyright (C) 2009 Samsung Electronics
+ * Kim Kyuwon <q1.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Datasheet: http://www.rohm.com/products/databook/driver/pdf/bd2802gu-e.pdf
+ *
+ */
+#ifndef _LEDS_BD2802_H_
+#define _LEDS_BD2802_H_
+
+struct bd2802_led_platform_data{
+ int reset_gpio;
+ u8 rgb_time;
+};
+
+#define RGB_TIME(slopedown, slopeup, waveform) \
+ ((slopedown) << 6 | (slopeup) << 4 | (waveform))
+
+#endif /* _LEDS_BD2802_H_ */
+
diff --git a/include/linux/leds-lp3944.h b/include/linux/leds-lp3944.h
new file mode 100644
index 000000000..2618aa906
--- /dev/null
+++ b/include/linux/leds-lp3944.h
@@ -0,0 +1,50 @@
+/*
+ * leds-lp3944.h - platform data structure for lp3944 led controller
+ *
+ * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __LINUX_LEDS_LP3944_H
+#define __LINUX_LEDS_LP3944_H
+
+#define LP3944_LED0 0
+#define LP3944_LED1 1
+#define LP3944_LED2 2
+#define LP3944_LED3 3
+#define LP3944_LED4 4
+#define LP3944_LED5 5
+#define LP3944_LED6 6
+#define LP3944_LED7 7
+#define LP3944_LEDS_MAX 8
+
+#define LP3944_LED_STATUS_MASK 0x03
+enum lp3944_status {
+ LP3944_LED_STATUS_OFF = 0x0,
+ LP3944_LED_STATUS_ON = 0x1,
+ LP3944_LED_STATUS_DIM0 = 0x2,
+ LP3944_LED_STATUS_DIM1 = 0x3
+};
+
+enum lp3944_type {
+ LP3944_LED_TYPE_NONE,
+ LP3944_LED_TYPE_LED,
+ LP3944_LED_TYPE_LED_INVERTED,
+};
+
+struct lp3944_led {
+ char *name;
+ enum lp3944_type type;
+ enum lp3944_status status;
+};
+
+struct lp3944_platform_data {
+ struct lp3944_led leds[LP3944_LEDS_MAX];
+ u8 leds_size;
+};
+
+#endif /* __LINUX_LEDS_LP3944_H */
diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h
new file mode 100644
index 000000000..b8d6fffed
--- /dev/null
+++ b/include/linux/leds-pca9532.h
@@ -0,0 +1,48 @@
+/*
+ * pca9532.h - platform data structure for pca9532 led controller
+ *
+ * Copyright (C) 2008 Riku Voipio <riku.voipio@movial.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * Datasheet: http://www.nxp.com/acrobat/datasheets/PCA9532_3.pdf
+ *
+ */
+
+#ifndef __LINUX_PCA9532_H
+#define __LINUX_PCA9532_H
+
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+
+enum pca9532_state {
+ PCA9532_OFF = 0x0,
+ PCA9532_ON = 0x1,
+ PCA9532_PWM0 = 0x2,
+ PCA9532_PWM1 = 0x3
+};
+
+enum pca9532_type { PCA9532_TYPE_NONE, PCA9532_TYPE_LED,
+ PCA9532_TYPE_N2100_BEEP, PCA9532_TYPE_GPIO };
+
+struct pca9532_led {
+ u8 id;
+ struct i2c_client *client;
+ char *name;
+ struct led_classdev ldev;
+ struct work_struct work;
+ enum pca9532_type type;
+ enum pca9532_state state;
+};
+
+struct pca9532_platform_data {
+ struct pca9532_led leds[16];
+ u8 pwm[2];
+ u8 psc[2];
+ int gpio_base;
+};
+
+#endif /* __LINUX_PCA9532_H */
+
diff --git a/include/linux/leds-regulator.h b/include/linux/leds-regulator.h
new file mode 100644
index 000000000..e2337a8c9
--- /dev/null
+++ b/include/linux/leds-regulator.h
@@ -0,0 +1,46 @@
+/*
+ * leds-regulator.h - platform data structure for regulator driven LEDs.
+ *
+ * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __LINUX_LEDS_REGULATOR_H
+#define __LINUX_LEDS_REGULATOR_H
+
+/*
+ * Use "vled" as supply id when declaring the regulator consumer:
+ *
+ * static struct regulator_consumer_supply pcap_regulator_VVIB_consumers [] = {
+ * { .dev_name = "leds-regulator.0", .supply = "vled" },
+ * };
+ *
+ * If you have several regulator driven LEDs, you can append a numerical id to
+ * .dev_name as done above, and use the same id when declaring the platform
+ * device:
+ *
+ * static struct led_regulator_platform_data a780_vibrator_data = {
+ * .name = "a780::vibrator",
+ * };
+ *
+ * static struct platform_device a780_vibrator = {
+ * .name = "leds-regulator",
+ * .id = 0,
+ * .dev = {
+ * .platform_data = &a780_vibrator_data,
+ * },
+ * };
+ */
+
+#include <linux/leds.h>
+
+struct led_regulator_platform_data {
+ char *name; /* LED name as expected by LED class */
+ enum led_brightness brightness; /* initial brightness value */
+};
+
+#endif /* __LINUX_LEDS_REGULATOR_H */
diff --git a/include/linux/leds-tca6507.h b/include/linux/leds-tca6507.h
new file mode 100644
index 000000000..dcabf4fa2
--- /dev/null
+++ b/include/linux/leds-tca6507.h
@@ -0,0 +1,34 @@
+/*
+ * TCA6507 LED chip driver.
+ *
+ * Copyright (C) 2011 Neil Brown <neil@brown.name>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_TCA6507_H
+#define __LINUX_TCA6507_H
+#include <linux/leds.h>
+
+struct tca6507_platform_data {
+ struct led_platform_data leds;
+#ifdef CONFIG_GPIOLIB
+ int gpio_base;
+ void (*setup)(unsigned gpio_base, unsigned ngpio);
+#endif
+};
+
+#define TCA6507_MAKE_GPIO 1
+#endif /* __LINUX_TCA6507_H*/
diff --git a/include/linux/leds.h b/include/linux/leds.h
new file mode 100644
index 000000000..9a2b00009
--- /dev/null
+++ b/include/linux/leds.h
@@ -0,0 +1,347 @@
+/*
+ * Driver model for leds and led triggers
+ *
+ * Copyright (C) 2005 John Lenz <lenz@cs.wisc.edu>
+ * Copyright (C) 2005 Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef __LINUX_LEDS_H_INCLUDED
+#define __LINUX_LEDS_H_INCLUDED
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
+struct device;
+/*
+ * LED Core
+ */
+
+enum led_brightness {
+ LED_OFF = 0,
+ LED_HALF = 127,
+ LED_FULL = 255,
+};
+
+struct led_classdev {
+ const char *name;
+ enum led_brightness brightness;
+ enum led_brightness max_brightness;
+ int flags;
+
+ /* Lower 16 bits reflect status */
+#define LED_SUSPENDED (1 << 0)
+ /* Upper 16 bits reflect control information */
+#define LED_CORE_SUSPENDRESUME (1 << 16)
+#define LED_BLINK_ONESHOT (1 << 17)
+#define LED_BLINK_ONESHOT_STOP (1 << 18)
+#define LED_BLINK_INVERT (1 << 19)
+#define LED_SYSFS_DISABLE (1 << 20)
+#define SET_BRIGHTNESS_ASYNC (1 << 21)
+#define SET_BRIGHTNESS_SYNC (1 << 22)
+#define LED_DEV_CAP_FLASH (1 << 23)
+
+ /* Set LED brightness level */
+ /* Must not sleep, use a workqueue if needed */
+ void (*brightness_set)(struct led_classdev *led_cdev,
+ enum led_brightness brightness);
+ /*
+ * Set LED brightness level immediately - it can block the caller for
+ * the time required for accessing a LED device register.
+ */
+ int (*brightness_set_sync)(struct led_classdev *led_cdev,
+ enum led_brightness brightness);
+ /* Get LED brightness level */
+ enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
+
+ /*
+ * Activate hardware accelerated blink, delays are in milliseconds
+ * and if both are zero then a sensible default should be chosen.
+ * The call should adjust the timings in that case and if it can't
+ * match the values specified exactly.
+ * Deactivate blinking again when the brightness is set to a fixed
+ * value via the brightness_set() callback.
+ */
+ int (*blink_set)(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off);
+
+ struct device *dev;
+ const struct attribute_group **groups;
+
+ struct list_head node; /* LED Device list */
+ const char *default_trigger; /* Trigger to use */
+
+ unsigned long blink_delay_on, blink_delay_off;
+ struct timer_list blink_timer;
+ int blink_brightness;
+ void (*flash_resume)(struct led_classdev *led_cdev);
+
+ struct work_struct set_brightness_work;
+ int delayed_set_value;
+
+#ifdef CONFIG_LEDS_TRIGGERS
+ /* Protects the trigger data below */
+ struct rw_semaphore trigger_lock;
+
+ struct led_trigger *trigger;
+ struct list_head trig_list;
+ void *trigger_data;
+ /* true if activated - deactivate routine uses it to do cleanup */
+ bool activated;
+#endif
+
+ /* Ensures consistent access to the LED Flash Class device */
+ struct mutex led_access;
+};
+
+extern int led_classdev_register(struct device *parent,
+ struct led_classdev *led_cdev);
+extern int devm_led_classdev_register(struct device *parent,
+ struct led_classdev *led_cdev);
+extern void led_classdev_unregister(struct led_classdev *led_cdev);
+extern void devm_led_classdev_unregister(struct device *parent,
+ struct led_classdev *led_cdev);
+extern void led_classdev_suspend(struct led_classdev *led_cdev);
+extern void led_classdev_resume(struct led_classdev *led_cdev);
+
+/**
+ * led_blink_set - set blinking with software fallback
+ * @led_cdev: the LED to start blinking
+ * @delay_on: the time it should be on (in ms)
+ * @delay_off: the time it should ble off (in ms)
+ *
+ * This function makes the LED blink, attempting to use the
+ * hardware acceleration if possible, but falling back to
+ * software blinking if there is no hardware blinking or if
+ * the LED refuses the passed values.
+ *
+ * Note that if software blinking is active, simply calling
+ * led_cdev->brightness_set() will not stop the blinking,
+ * use led_classdev_brightness_set() instead.
+ */
+extern void led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off);
+/**
+ * led_blink_set_oneshot - do a oneshot software blink
+ * @led_cdev: the LED to start blinking
+ * @delay_on: the time it should be on (in ms)
+ * @delay_off: the time it should ble off (in ms)
+ * @invert: blink off, then on, leaving the led on
+ *
+ * This function makes the LED blink one time for delay_on +
+ * delay_off time, ignoring the request if another one-shot
+ * blink is already in progress.
+ *
+ * If invert is set, led blinks for delay_off first, then for
+ * delay_on and leave the led on after the on-off cycle.
+ */
+extern void led_blink_set_oneshot(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off,
+ int invert);
+/**
+ * led_set_brightness - set LED brightness
+ * @led_cdev: the LED to set
+ * @brightness: the brightness to set it to
+ *
+ * Set an LED's brightness, and, if necessary, cancel the
+ * software blink timer that implements blinking when the
+ * hardware doesn't.
+ */
+extern void led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness);
+/**
+ * led_update_brightness - update LED brightness
+ * @led_cdev: the LED to query
+ *
+ * Get an LED's current brightness and update led_cdev->brightness
+ * member with the obtained value.
+ *
+ * Returns: 0 on success or negative error value on failure
+ */
+extern int led_update_brightness(struct led_classdev *led_cdev);
+
+/**
+ * led_sysfs_disable - disable LED sysfs interface
+ * @led_cdev: the LED to set
+ *
+ * Disable the led_cdev's sysfs interface.
+ */
+extern void led_sysfs_disable(struct led_classdev *led_cdev);
+
+/**
+ * led_sysfs_enable - enable LED sysfs interface
+ * @led_cdev: the LED to set
+ *
+ * Enable the led_cdev's sysfs interface.
+ */
+extern void led_sysfs_enable(struct led_classdev *led_cdev);
+
+/**
+ * led_sysfs_is_disabled - check if LED sysfs interface is disabled
+ * @led_cdev: the LED to query
+ *
+ * Returns: true if the led_cdev's sysfs interface is disabled.
+ */
+static inline bool led_sysfs_is_disabled(struct led_classdev *led_cdev)
+{
+ return led_cdev->flags & LED_SYSFS_DISABLE;
+}
+
+/*
+ * LED Triggers
+ */
+/* Registration functions for simple triggers */
+#define DEFINE_LED_TRIGGER(x) static struct led_trigger *x;
+#define DEFINE_LED_TRIGGER_GLOBAL(x) struct led_trigger *x;
+
+#ifdef CONFIG_LEDS_TRIGGERS
+
+#define TRIG_NAME_MAX 50
+
+struct led_trigger {
+ /* Trigger Properties */
+ const char *name;
+ void (*activate)(struct led_classdev *led_cdev);
+ void (*deactivate)(struct led_classdev *led_cdev);
+
+ /* LEDs under control by this trigger (for simple triggers) */
+ rwlock_t leddev_list_lock;
+ struct list_head led_cdevs;
+
+ /* Link to next registered trigger */
+ struct list_head next_trig;
+};
+
+/* Registration functions for complex triggers */
+extern int led_trigger_register(struct led_trigger *trigger);
+extern void led_trigger_unregister(struct led_trigger *trigger);
+
+extern void led_trigger_register_simple(const char *name,
+ struct led_trigger **trigger);
+extern void led_trigger_unregister_simple(struct led_trigger *trigger);
+extern void led_trigger_event(struct led_trigger *trigger,
+ enum led_brightness event);
+extern void led_trigger_blink(struct led_trigger *trigger,
+ unsigned long *delay_on,
+ unsigned long *delay_off);
+extern void led_trigger_blink_oneshot(struct led_trigger *trigger,
+ unsigned long *delay_on,
+ unsigned long *delay_off,
+ int invert);
+/**
+ * led_trigger_rename_static - rename a trigger
+ * @name: the new trigger name
+ * @trig: the LED trigger to rename
+ *
+ * Change a LED trigger name by copying the string passed in
+ * name into current trigger name, which MUST be large
+ * enough for the new string.
+ *
+ * Note that name must NOT point to the same string used
+ * during LED registration, as that could lead to races.
+ *
+ * This is meant to be used on triggers with statically
+ * allocated name.
+ */
+extern void led_trigger_rename_static(const char *name,
+ struct led_trigger *trig);
+
+#else
+
+/* Trigger has no members */
+struct led_trigger {};
+
+/* Trigger inline empty functions */
+static inline void led_trigger_register_simple(const char *name,
+ struct led_trigger **trigger) {}
+static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {}
+static inline void led_trigger_event(struct led_trigger *trigger,
+ enum led_brightness event) {}
+#endif /* CONFIG_LEDS_TRIGGERS */
+
+/* Trigger specific functions */
+#ifdef CONFIG_LEDS_TRIGGER_IDE_DISK
+extern void ledtrig_ide_activity(void);
+#else
+static inline void ledtrig_ide_activity(void) {}
+#endif
+
+#if defined(CONFIG_LEDS_TRIGGER_CAMERA) || defined(CONFIG_LEDS_TRIGGER_CAMERA_MODULE)
+extern void ledtrig_flash_ctrl(bool on);
+extern void ledtrig_torch_ctrl(bool on);
+#else
+static inline void ledtrig_flash_ctrl(bool on) {}
+static inline void ledtrig_torch_ctrl(bool on) {}
+#endif
+
+/*
+ * Generic LED platform data for describing LED names and default triggers.
+ */
+struct led_info {
+ const char *name;
+ const char *default_trigger;
+ int flags;
+};
+
+struct led_platform_data {
+ int num_leds;
+ struct led_info *leds;
+};
+
+/* For the leds-gpio driver */
+struct gpio_led {
+ const char *name;
+ const char *default_trigger;
+ unsigned gpio;
+ unsigned active_low : 1;
+ unsigned retain_state_suspended : 1;
+ unsigned default_state : 2;
+ /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */
+ struct gpio_desc *gpiod;
+};
+#define LEDS_GPIO_DEFSTATE_OFF 0
+#define LEDS_GPIO_DEFSTATE_ON 1
+#define LEDS_GPIO_DEFSTATE_KEEP 2
+
+struct gpio_led_platform_data {
+ int num_leds;
+ const struct gpio_led *leds;
+
+#define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */
+#define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */
+#define GPIO_LED_BLINK 2 /* Please, blink */
+ int (*gpio_blink_set)(struct gpio_desc *desc, int state,
+ unsigned long *delay_on,
+ unsigned long *delay_off);
+};
+
+struct platform_device *gpio_led_register_device(
+ int id, const struct gpio_led_platform_data *pdata);
+
+enum cpu_led_event {
+ CPU_LED_IDLE_START, /* CPU enters idle */
+ CPU_LED_IDLE_END, /* CPU idle ends */
+ CPU_LED_START, /* Machine starts, especially resume */
+ CPU_LED_STOP, /* Machine stops, especially suspend */
+ CPU_LED_HALTED, /* Machine shutdown */
+};
+#ifdef CONFIG_LEDS_TRIGGER_CPU
+extern void ledtrig_cpu(enum cpu_led_event evt);
+#else
+static inline void ledtrig_cpu(enum cpu_led_event evt)
+{
+ return;
+}
+#endif
+
+#endif /* __LINUX_LEDS_H_INCLUDED */
diff --git a/include/linux/leds_pwm.h b/include/linux/leds_pwm.h
new file mode 100644
index 000000000..a65e9646e
--- /dev/null
+++ b/include/linux/leds_pwm.h
@@ -0,0 +1,21 @@
+/*
+ * PWM LED driver data - see drivers/leds/leds-pwm.c
+ */
+#ifndef __LINUX_LEDS_PWM_H
+#define __LINUX_LEDS_PWM_H
+
+struct led_pwm {
+ const char *name;
+ const char *default_trigger;
+ unsigned pwm_id __deprecated;
+ u8 active_low;
+ unsigned max_brightness;
+ unsigned pwm_period_ns;
+};
+
+struct led_pwm_platform_data {
+ int num_leds;
+ struct led_pwm *leds;
+};
+
+#endif
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
new file mode 100644
index 000000000..0081f000e
--- /dev/null
+++ b/include/linux/lglock.h
@@ -0,0 +1,76 @@
+/*
+ * Specialised local-global spinlock. Can only be declared as global variables
+ * to avoid overhead and keep things simple (and we don't want to start using
+ * these inside dynamically allocated structures).
+ *
+ * "local/global locks" (lglocks) can be used to:
+ *
+ * - Provide fast exclusive access to per-CPU data, with exclusive access to
+ * another CPU's data allowed but possibly subject to contention, and to
+ * provide very slow exclusive access to all per-CPU data.
+ * - Or to provide very fast and scalable read serialisation, and to provide
+ * very slow exclusive serialisation of data (not necessarily per-CPU data).
+ *
+ * Brlocks are also implemented as a short-hand notation for the latter use
+ * case.
+ *
+ * Copyright 2009, 2010, Nick Piggin, Novell Inc.
+ */
+#ifndef __LINUX_LGLOCK_H
+#define __LINUX_LGLOCK_H
+
+#include <linux/spinlock.h>
+#include <linux/lockdep.h>
+#include <linux/percpu.h>
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+
+#ifdef CONFIG_SMP
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define LOCKDEP_INIT_MAP lockdep_init_map
+#else
+#define LOCKDEP_INIT_MAP(a, b, c, d)
+#endif
+
+struct lglock {
+ arch_spinlock_t __percpu *lock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lock_class_key lock_key;
+ struct lockdep_map lock_dep_map;
+#endif
+};
+
+#define DEFINE_LGLOCK(name) \
+ static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
+ = __ARCH_SPIN_LOCK_UNLOCKED; \
+ struct lglock name = { .lock = &name ## _lock }
+
+#define DEFINE_STATIC_LGLOCK(name) \
+ static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
+ = __ARCH_SPIN_LOCK_UNLOCKED; \
+ static struct lglock name = { .lock = &name ## _lock }
+
+void lg_lock_init(struct lglock *lg, char *name);
+void lg_local_lock(struct lglock *lg);
+void lg_local_unlock(struct lglock *lg);
+void lg_local_lock_cpu(struct lglock *lg, int cpu);
+void lg_local_unlock_cpu(struct lglock *lg, int cpu);
+void lg_global_lock(struct lglock *lg);
+void lg_global_unlock(struct lglock *lg);
+
+#else
+/* When !CONFIG_SMP, map lglock to spinlock */
+#define lglock spinlock
+#define DEFINE_LGLOCK(name) DEFINE_SPINLOCK(name)
+#define DEFINE_STATIC_LGLOCK(name) static DEFINE_SPINLOCK(name)
+#define lg_lock_init(lg, name) spin_lock_init(lg)
+#define lg_local_lock spin_lock
+#define lg_local_unlock spin_unlock
+#define lg_local_lock_cpu(lg, cpu) spin_lock(lg)
+#define lg_local_unlock_cpu(lg, cpu) spin_unlock(lg)
+#define lg_global_lock spin_lock
+#define lg_global_unlock spin_unlock
+#endif
+
+#endif
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
new file mode 100644
index 000000000..6db19f35f
--- /dev/null
+++ b/include/linux/lguest.h
@@ -0,0 +1,73 @@
+/*
+ * Things the lguest guest needs to know. Note: like all lguest interfaces,
+ * this is subject to wild and random change between versions.
+ */
+#ifndef _LINUX_LGUEST_H
+#define _LINUX_LGUEST_H
+
+#ifndef __ASSEMBLY__
+#include <linux/time.h>
+#include <asm/irq.h>
+#include <asm/lguest_hcall.h>
+
+#define LG_CLOCK_MIN_DELTA 100UL
+#define LG_CLOCK_MAX_DELTA ULONG_MAX
+
+/*G:031
+ * The second method of communicating with the Host is to via "struct
+ * lguest_data". Once the Guest's initialization hypercall tells the Host where
+ * this is, the Guest and Host both publish information in it.
+:*/
+struct lguest_data {
+ /*
+ * 512 == enabled (same as eflags in normal hardware). The Guest
+ * changes interrupts so often that a hypercall is too slow.
+ */
+ unsigned int irq_enabled;
+ /* Fine-grained interrupt disabling by the Guest */
+ DECLARE_BITMAP(blocked_interrupts, LGUEST_IRQS);
+
+ /*
+ * The Host writes the virtual address of the last page fault here,
+ * which saves the Guest a hypercall. CR2 is the native register where
+ * this address would normally be found.
+ */
+ unsigned long cr2;
+
+ /* Wallclock time set by the Host. */
+ struct timespec time;
+
+ /*
+ * Interrupt pending set by the Host. The Guest should do a hypercall
+ * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF).
+ */
+ int irq_pending;
+
+ /*
+ * Async hypercall ring. Instead of directly making hypercalls, we can
+ * place them in here for processing the next time the Host wants.
+ * This batching can be quite efficient.
+ */
+
+ /* 0xFF == done (set by Host), 0 == pending (set by Guest). */
+ u8 hcall_status[LHCALL_RING_SIZE];
+ /* The actual registers for the hypercalls. */
+ struct hcall_args hcalls[LHCALL_RING_SIZE];
+
+/* Fields initialized by the Host at boot: */
+ /* Memory not to try to access */
+ unsigned long reserve_mem;
+ /* KHz for the TSC clock. */
+ u32 tsc_khz;
+
+/* Fields initialized by the Guest at boot: */
+ /* Instruction to suppress interrupts even if enabled */
+ unsigned long noirq_iret;
+ /* Address above which page tables are all identical. */
+ unsigned long kernel_address;
+ /* The vector to try to use for system calls (0x40 or 0x80). */
+ unsigned int syscall_vec;
+};
+extern struct lguest_data lguest_data;
+#endif /* __ASSEMBLY__ */
+#endif /* _LINUX_LGUEST_H */
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h
new file mode 100644
index 000000000..acd5b1256
--- /dev/null
+++ b/include/linux/lguest_launcher.h
@@ -0,0 +1,44 @@
+#ifndef _LINUX_LGUEST_LAUNCHER
+#define _LINUX_LGUEST_LAUNCHER
+/* Everything the "lguest" userspace program needs to know. */
+#include <linux/types.h>
+
+/*D:010
+ * Drivers
+ *
+ * The Guest needs devices to do anything useful. Since we don't let it touch
+ * real devices (think of the damage it could do!) we provide virtual devices.
+ * We emulate a PCI bus with virtio devices on it; we used to have our own
+ * lguest bus which was far simpler, but this tests the virtio 1.0 standard.
+ *
+ * Virtio devices are also used by kvm, so we can simply reuse their optimized
+ * device drivers. And one day when everyone uses virtio, my plan will be
+ * complete. Bwahahahah!
+ */
+
+/* Write command first word is a request. */
+enum lguest_req
+{
+ LHREQ_INITIALIZE, /* + base, pfnlimit, start */
+ LHREQ_GETDMA, /* No longer used */
+ LHREQ_IRQ, /* + irq */
+ LHREQ_BREAK, /* No longer used */
+ LHREQ_EVENTFD, /* No longer used. */
+ LHREQ_GETREG, /* + offset within struct pt_regs (then read value). */
+ LHREQ_SETREG, /* + offset within struct pt_regs, value. */
+ LHREQ_TRAP, /* + trap number to deliver to guest. */
+};
+
+/*
+ * This is what read() of the lguest fd populates. trap ==
+ * LGUEST_TRAP_ENTRY for an LHCALL_NOTIFY (addr is the
+ * argument), 14 for a page fault in the MMIO region (addr is
+ * the trap address, insn is the instruction), or 13 for a GPF
+ * (insn is the instruction).
+ */
+struct lguest_pending {
+ __u8 trap;
+ __u8 insn[7];
+ __u32 addr;
+};
+#endif /* _LINUX_LGUEST_LAUNCHER */
diff --git a/include/linux/libata.h b/include/linux/libata.h
new file mode 100644
index 000000000..c27a346a1
--- /dev/null
+++ b/include/linux/libata.h
@@ -0,0 +1,1964 @@
+/*
+ * Copyright 2003-2005 Red Hat, Inc. All rights reserved.
+ * Copyright 2003-2005 Jeff Garzik
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * libata documentation is available via 'make {ps|pdf}docs',
+ * as Documentation/DocBook/libata.*
+ *
+ */
+
+#ifndef __LINUX_LIBATA_H__
+#define __LINUX_LIBATA_H__
+
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/io.h>
+#include <linux/ata.h>
+#include <linux/workqueue.h>
+#include <scsi/scsi_host.h>
+#include <linux/acpi.h>
+#include <linux/cdrom.h>
+#include <linux/sched.h>
+
+/*
+ * Define if arch has non-standard setup. This is a _PCI_ standard
+ * not a legacy or ISA standard.
+ */
+#ifdef CONFIG_ATA_NONSTANDARD
+#include <asm/libata-portmap.h>
+#else
+#include <asm-generic/libata-portmap.h>
+#endif
+
+/*
+ * compile-time options: to be removed as soon as all the drivers are
+ * converted to the new debugging mechanism
+ */
+#undef ATA_DEBUG /* debugging output */
+#undef ATA_VERBOSE_DEBUG /* yet more debugging output */
+#undef ATA_IRQ_TRAP /* define to ack screaming irqs */
+#undef ATA_NDEBUG /* define to disable quick runtime checks */
+
+
+/* note: prints function name for you */
+#ifdef ATA_DEBUG
+#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
+#ifdef ATA_VERBOSE_DEBUG
+#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
+#else
+#define VPRINTK(fmt, args...)
+#endif /* ATA_VERBOSE_DEBUG */
+#else
+#define DPRINTK(fmt, args...)
+#define VPRINTK(fmt, args...)
+#endif /* ATA_DEBUG */
+
+#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args)
+
+#define ata_print_version_once(dev, version) \
+({ \
+ static bool __print_once; \
+ \
+ if (!__print_once) { \
+ __print_once = true; \
+ ata_print_version(dev, version); \
+ } \
+})
+
+/* NEW: debug levels */
+#define HAVE_LIBATA_MSG 1
+
+enum {
+ ATA_MSG_DRV = 0x0001,
+ ATA_MSG_INFO = 0x0002,
+ ATA_MSG_PROBE = 0x0004,
+ ATA_MSG_WARN = 0x0008,
+ ATA_MSG_MALLOC = 0x0010,
+ ATA_MSG_CTL = 0x0020,
+ ATA_MSG_INTR = 0x0040,
+ ATA_MSG_ERR = 0x0080,
+};
+
+#define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV)
+#define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO)
+#define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE)
+#define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN)
+#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
+#define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL)
+#define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR)
+#define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR)
+
+static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
+{
+ if (dval < 0 || dval >= (sizeof(u32) * 8))
+ return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
+ if (!dval)
+ return 0;
+ return (1 << dval) - 1;
+}
+
+/* defines only for the constants which don't work well as enums */
+#define ATA_TAG_POISON 0xfafbfcfdU
+
+enum {
+ /* various global constants */
+ LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
+ LIBATA_DUMB_MAX_PRD = ATA_MAX_PRD / 4, /* Worst case */
+ ATA_DEF_QUEUE = 1,
+ /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
+ ATA_MAX_QUEUE = 32,
+ ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1,
+ ATA_SHORT_PAUSE = 16,
+
+ ATAPI_MAX_DRAIN = 16 << 10,
+
+ ATA_ALL_DEVICES = (1 << ATA_MAX_DEVICES) - 1,
+
+ ATA_SHT_EMULATED = 1,
+ ATA_SHT_CMD_PER_LUN = 1,
+ ATA_SHT_THIS_ID = -1,
+ ATA_SHT_USE_CLUSTERING = 1,
+
+ /* struct ata_taskfile flags */
+ ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */
+ ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */
+ ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */
+ ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */
+ ATA_TFLAG_LBA = (1 << 4), /* enable LBA */
+ ATA_TFLAG_FUA = (1 << 5), /* enable FUA */
+ ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */
+
+ /* protocol flags */
+ ATA_PROT_FLAG_PIO = (1 << 0), /* is PIO */
+ ATA_PROT_FLAG_DMA = (1 << 1), /* is DMA */
+ ATA_PROT_FLAG_DATA = ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA,
+ ATA_PROT_FLAG_NCQ = (1 << 2), /* is NCQ */
+ ATA_PROT_FLAG_ATAPI = (1 << 3), /* is ATAPI */
+
+ /* struct ata_device stuff */
+ ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */
+ ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */
+ ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */
+ ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */
+ ATA_DFLAG_FLUSH_EXT = (1 << 4), /* do FLUSH_EXT instead of FLUSH */
+ ATA_DFLAG_ACPI_PENDING = (1 << 5), /* ACPI resume action pending */
+ ATA_DFLAG_ACPI_FAILED = (1 << 6), /* ACPI on devcfg has failed */
+ ATA_DFLAG_AN = (1 << 7), /* AN configured */
+ ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */
+ ATA_DFLAG_CFG_MASK = (1 << 12) - 1,
+
+ ATA_DFLAG_PIO = (1 << 12), /* device limited to PIO mode */
+ ATA_DFLAG_NCQ_OFF = (1 << 13), /* device limited to non-NCQ mode */
+ ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */
+ ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
+ ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
+ ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */
+ ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */
+ ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
+
+ ATA_DFLAG_DETACH = (1 << 24),
+ ATA_DFLAG_DETACHED = (1 << 25),
+
+ ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */
+ ATA_DFLAG_DEVSLP = (1 << 27), /* device supports Device Sleep */
+ ATA_DFLAG_ACPI_DISABLED = (1 << 28), /* ACPI for the device is disabled */
+
+ ATA_DEV_UNKNOWN = 0, /* unknown device */
+ ATA_DEV_ATA = 1, /* ATA device */
+ ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */
+ ATA_DEV_ATAPI = 3, /* ATAPI device */
+ ATA_DEV_ATAPI_UNSUP = 4, /* ATAPI device (unsupported) */
+ ATA_DEV_PMP = 5, /* SATA port multiplier */
+ ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */
+ ATA_DEV_SEMB = 7, /* SEMB */
+ ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */
+ ATA_DEV_ZAC = 9, /* ZAC device */
+ ATA_DEV_NONE = 10, /* no device */
+
+ /* struct ata_link flags */
+ ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */
+ ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */
+ ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */
+ ATA_LFLAG_ASSUME_SEMB = (1 << 4), /* assume SEMB class */
+ ATA_LFLAG_ASSUME_CLASS = ATA_LFLAG_ASSUME_ATA | ATA_LFLAG_ASSUME_SEMB,
+ ATA_LFLAG_NO_RETRY = (1 << 5), /* don't retry this link */
+ ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */
+ ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */
+ ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */
+ ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */
+ ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */
+
+ /* struct ata_port flags */
+ ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
+ /* (doesn't imply presence) */
+ ATA_FLAG_SATA = (1 << 1),
+ ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
+ ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
+ ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */
+ ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD
+ * doesn't handle PIO interrupts */
+ ATA_FLAG_NCQ = (1 << 10), /* host supports NCQ */
+ ATA_FLAG_NO_POWEROFF_SPINDOWN = (1 << 11), /* don't spindown before poweroff */
+ ATA_FLAG_NO_HIBERNATE_SPINDOWN = (1 << 12), /* don't spindown before hibernation */
+ ATA_FLAG_DEBUGMSG = (1 << 13),
+ ATA_FLAG_FPDMA_AA = (1 << 14), /* driver supports Auto-Activate */
+ ATA_FLAG_IGN_SIMPLEX = (1 << 15), /* ignore SIMPLEX */
+ ATA_FLAG_NO_IORDY = (1 << 16), /* controller lacks iordy */
+ ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */
+ ATA_FLAG_AN = (1 << 18), /* controller supports AN */
+ ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */
+ ATA_FLAG_FPDMA_AUX = (1 << 20), /* controller supports H2DFIS aux field */
+ ATA_FLAG_EM = (1 << 21), /* driver supports enclosure
+ * management */
+ ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity
+ * led */
+ ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */
+ ATA_FLAG_SAS_HOST = (1 << 24), /* SAS host */
+
+ /* bits 24:31 of ap->flags are reserved for LLD specific flags */
+
+
+ /* struct ata_port pflags */
+ ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */
+ ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */
+ ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */
+ ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */
+ ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */
+ ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */
+ ATA_PFLAG_INITIALIZING = (1 << 7), /* being initialized, don't touch */
+ ATA_PFLAG_RESETTING = (1 << 8), /* reset in progress */
+ ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */
+ ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */
+
+ ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
+ ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
+ ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */
+
+ ATA_PFLAG_PIO32 = (1 << 20), /* 32bit PIO */
+ ATA_PFLAG_PIO32CHANGE = (1 << 21), /* 32bit PIO can be turned on/off */
+
+ /* struct ata_queued_cmd flags */
+ ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
+ ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */
+ ATA_QCFLAG_IO = (1 << 3), /* standard IO command */
+ ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */
+ ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */
+ ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */
+ ATA_QCFLAG_RETRY = (1 << 7), /* retry after failure */
+
+ ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */
+ ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */
+ ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
+
+ /* host set flags */
+ ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host only */
+ ATA_HOST_STARTED = (1 << 1), /* Host started */
+ ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */
+ ATA_HOST_IGNORE_ATA = (1 << 3), /* Ignore ATA devices on this host. */
+
+ /* bits 24:31 of host->flags are reserved for LLD specific flags */
+
+ /* various lengths of time */
+ ATA_TMOUT_BOOT = 30000, /* heuristic */
+ ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */
+ ATA_TMOUT_INTERNAL_QUICK = 5000,
+ ATA_TMOUT_MAX_PARK = 30000,
+
+ /*
+ * GoVault needs 2s and iVDR disk HHD424020F7SV00 800ms. 2s
+ * is too much without parallel probing. Use 2s if parallel
+ * probing is available, 800ms otherwise.
+ */
+ ATA_TMOUT_FF_WAIT_LONG = 2000,
+ ATA_TMOUT_FF_WAIT = 800,
+
+ /* Spec mandates to wait for ">= 2ms" before checking status
+ * after reset. We wait 150ms, because that was the magic
+ * delay used for ATAPI devices in Hale Landis's ATADRVR, for
+ * the period of time between when the ATA command register is
+ * written, and then status is checked. Because waiting for
+ * "a while" before checking status is fine, post SRST, we
+ * perform this magic delay here as well.
+ *
+ * Old drivers/ide uses the 2mS rule and then waits for ready.
+ */
+ ATA_WAIT_AFTER_RESET = 150,
+
+ /* If PMP is supported, we have to do follow-up SRST. As some
+ * PMPs don't send D2H Reg FIS after hardreset, LLDs are
+ * advised to wait only for the following duration before
+ * doing SRST.
+ */
+ ATA_TMOUT_PMP_SRST_WAIT = 5000,
+
+ /* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
+ * be a spurious PHY event, so ignore the first PHY event that
+ * occurs within 10s after the policy change.
+ */
+ ATA_TMOUT_SPURIOUS_PHY = 10000,
+
+ /* ATA bus states */
+ BUS_UNKNOWN = 0,
+ BUS_DMA = 1,
+ BUS_IDLE = 2,
+ BUS_NOINTR = 3,
+ BUS_NODATA = 4,
+ BUS_TIMER = 5,
+ BUS_PIO = 6,
+ BUS_EDD = 7,
+ BUS_IDENTIFY = 8,
+ BUS_PACKET = 9,
+
+ /* SATA port states */
+ PORT_UNKNOWN = 0,
+ PORT_ENABLED = 1,
+ PORT_DISABLED = 2,
+
+ /* encoding various smaller bitmaps into a single
+ * unsigned long bitmap
+ */
+ ATA_NR_PIO_MODES = 7,
+ ATA_NR_MWDMA_MODES = 5,
+ ATA_NR_UDMA_MODES = 8,
+
+ ATA_SHIFT_PIO = 0,
+ ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_NR_PIO_MODES,
+ ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_NR_MWDMA_MODES,
+
+ /* size of buffer to pad xfers ending on unaligned boundaries */
+ ATA_DMA_PAD_SZ = 4,
+
+ /* ering size */
+ ATA_ERING_SIZE = 32,
+
+ /* return values for ->qc_defer */
+ ATA_DEFER_LINK = 1,
+ ATA_DEFER_PORT = 2,
+
+ /* desc_len for ata_eh_info and context */
+ ATA_EH_DESC_LEN = 80,
+
+ /* reset / recovery action types */
+ ATA_EH_REVALIDATE = (1 << 0),
+ ATA_EH_SOFTRESET = (1 << 1), /* meaningful only in ->prereset */
+ ATA_EH_HARDRESET = (1 << 2), /* meaningful only in ->prereset */
+ ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
+ ATA_EH_ENABLE_LINK = (1 << 3),
+ ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */
+
+ ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK,
+ ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET |
+ ATA_EH_ENABLE_LINK,
+
+ /* ata_eh_info->flags */
+ ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
+ ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
+ ATA_EHI_QUIET = (1 << 3), /* be quiet */
+ ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */
+
+ ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */
+ ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */
+ ATA_EHI_PRINTINFO = (1 << 18), /* print configuration info */
+ ATA_EHI_SETMODE = (1 << 19), /* configure transfer mode */
+ ATA_EHI_POST_SETMODE = (1 << 20), /* revalidating after setmode */
+
+ ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET,
+
+ /* mask of flags to transfer *to* the slave link */
+ ATA_EHI_TO_SLAVE_MASK = ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET,
+
+ /* max tries if error condition is still set after ->error_handler */
+ ATA_EH_MAX_TRIES = 5,
+
+ /* sometimes resuming a link requires several retries */
+ ATA_LINK_RESUME_TRIES = 5,
+
+ /* how hard are we gonna try to probe/recover devices */
+ ATA_PROBE_MAX_TRIES = 3,
+ ATA_EH_DEV_TRIES = 3,
+ ATA_EH_PMP_TRIES = 5,
+ ATA_EH_PMP_LINK_TRIES = 3,
+
+ SATA_PMP_RW_TIMEOUT = 3000, /* PMP read/write timeout */
+
+ /* This should match the actual table size of
+ * ata_eh_cmd_timeout_table in libata-eh.c.
+ */
+ ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 6,
+
+ /* Horkage types. May be set by libata or controller on drives
+ (some horkage may be drive/controller pair dependent */
+
+ ATA_HORKAGE_DIAGNOSTIC = (1 << 0), /* Failed boot diag */
+ ATA_HORKAGE_NODMA = (1 << 1), /* DMA problems */
+ ATA_HORKAGE_NONCQ = (1 << 2), /* Don't use NCQ */
+ ATA_HORKAGE_MAX_SEC_128 = (1 << 3), /* Limit max sects to 128 */
+ ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */
+ ATA_HORKAGE_DISABLE = (1 << 5), /* Disable it */
+ ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */
+ ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */
+ ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */
+ ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */
+ ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands
+ not multiple of 16 bytes */
+ ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
+ ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
+ ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
+ ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
+ ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
+ ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
+ ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */
+ ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
+ ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
+ ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
+ ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
+ ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */
+ ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
+ ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
+
+ /* DMA mask for user DMA control: User visible values; DO NOT
+ renumber */
+ ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */
+ ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */
+ ATA_DMA_MASK_CFA = (1 << 2), /* DMA on CF Card */
+
+ /* ATAPI command types */
+ ATAPI_READ = 0, /* READs */
+ ATAPI_WRITE = 1, /* WRITEs */
+ ATAPI_READ_CD = 2, /* READ CD [MSF] */
+ ATAPI_PASS_THRU = 3, /* SAT pass-thru */
+ ATAPI_MISC = 4, /* the rest */
+
+ /* Timing constants */
+ ATA_TIMING_SETUP = (1 << 0),
+ ATA_TIMING_ACT8B = (1 << 1),
+ ATA_TIMING_REC8B = (1 << 2),
+ ATA_TIMING_CYC8B = (1 << 3),
+ ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B |
+ ATA_TIMING_CYC8B,
+ ATA_TIMING_ACTIVE = (1 << 4),
+ ATA_TIMING_RECOVER = (1 << 5),
+ ATA_TIMING_DMACK_HOLD = (1 << 6),
+ ATA_TIMING_CYCLE = (1 << 7),
+ ATA_TIMING_UDMA = (1 << 8),
+ ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B |
+ ATA_TIMING_REC8B | ATA_TIMING_CYC8B |
+ ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER |
+ ATA_TIMING_DMACK_HOLD | ATA_TIMING_CYCLE |
+ ATA_TIMING_UDMA,
+
+ /* ACPI constants */
+ ATA_ACPI_FILTER_SETXFER = 1 << 0,
+ ATA_ACPI_FILTER_LOCK = 1 << 1,
+ ATA_ACPI_FILTER_DIPM = 1 << 2,
+ ATA_ACPI_FILTER_FPDMA_OFFSET = 1 << 3, /* FPDMA non-zero offset */
+ ATA_ACPI_FILTER_FPDMA_AA = 1 << 4, /* FPDMA auto activate */
+
+ ATA_ACPI_FILTER_DEFAULT = ATA_ACPI_FILTER_SETXFER |
+ ATA_ACPI_FILTER_LOCK |
+ ATA_ACPI_FILTER_DIPM,
+};
+
+enum ata_xfer_mask {
+ ATA_MASK_PIO = ((1LU << ATA_NR_PIO_MODES) - 1)
+ << ATA_SHIFT_PIO,
+ ATA_MASK_MWDMA = ((1LU << ATA_NR_MWDMA_MODES) - 1)
+ << ATA_SHIFT_MWDMA,
+ ATA_MASK_UDMA = ((1LU << ATA_NR_UDMA_MODES) - 1)
+ << ATA_SHIFT_UDMA,
+};
+
+enum hsm_task_states {
+ HSM_ST_IDLE, /* no command on going */
+ HSM_ST_FIRST, /* (waiting the device to)
+ write CDB or first data block */
+ HSM_ST, /* (waiting the device to) transfer data */
+ HSM_ST_LAST, /* (waiting the device to) complete command */
+ HSM_ST_ERR, /* error */
+};
+
+enum ata_completion_errors {
+ AC_ERR_DEV = (1 << 0), /* device reported error */
+ AC_ERR_HSM = (1 << 1), /* host state machine violation */
+ AC_ERR_TIMEOUT = (1 << 2), /* timeout */
+ AC_ERR_MEDIA = (1 << 3), /* media error */
+ AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */
+ AC_ERR_HOST_BUS = (1 << 5), /* host bus error */
+ AC_ERR_SYSTEM = (1 << 6), /* system error */
+ AC_ERR_INVALID = (1 << 7), /* invalid argument */
+ AC_ERR_OTHER = (1 << 8), /* unknown */
+ AC_ERR_NODEV_HINT = (1 << 9), /* polling device detection hint */
+ AC_ERR_NCQ = (1 << 10), /* marker for offending NCQ qc */
+};
+
+/*
+ * Link power management policy: If you alter this, you also need to
+ * alter libata-scsi.c (for the ascii descriptions)
+ */
+enum ata_lpm_policy {
+ ATA_LPM_UNKNOWN,
+ ATA_LPM_MAX_POWER,
+ ATA_LPM_FIRMWARE_DEFAULTS,
+ ATA_LPM_MED_POWER,
+ ATA_LPM_MIN_POWER,
+};
+
+enum ata_lpm_hints {
+ ATA_LPM_EMPTY = (1 << 0), /* port empty/probing */
+ ATA_LPM_HIPM = (1 << 1), /* may use HIPM */
+};
+
+/* forward declarations */
+struct scsi_device;
+struct ata_port_operations;
+struct ata_port;
+struct ata_link;
+struct ata_queued_cmd;
+
+/* typedefs */
+typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
+typedef int (*ata_prereset_fn_t)(struct ata_link *link, unsigned long deadline);
+typedef int (*ata_reset_fn_t)(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline);
+typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes);
+
+extern struct device_attribute dev_attr_link_power_management_policy;
+extern struct device_attribute dev_attr_unload_heads;
+extern struct device_attribute dev_attr_em_message_type;
+extern struct device_attribute dev_attr_em_message;
+extern struct device_attribute dev_attr_sw_activity;
+
+enum sw_activity {
+ OFF,
+ BLINK_ON,
+ BLINK_OFF,
+};
+
+struct ata_taskfile {
+ unsigned long flags; /* ATA_TFLAG_xxx */
+ u8 protocol; /* ATA_PROT_xxx */
+
+ u8 ctl; /* control reg */
+
+ u8 hob_feature; /* additional data */
+ u8 hob_nsect; /* to support LBA48 */
+ u8 hob_lbal;
+ u8 hob_lbam;
+ u8 hob_lbah;
+
+ u8 feature;
+ u8 nsect;
+ u8 lbal;
+ u8 lbam;
+ u8 lbah;
+
+ u8 device;
+
+ u8 command; /* IO operation */
+
+ u32 auxiliary; /* auxiliary field */
+ /* from SATA 3.1 and */
+ /* ATA-8 ACS-3 */
+};
+
+#ifdef CONFIG_ATA_SFF
+struct ata_ioports {
+ void __iomem *cmd_addr;
+ void __iomem *data_addr;
+ void __iomem *error_addr;
+ void __iomem *feature_addr;
+ void __iomem *nsect_addr;
+ void __iomem *lbal_addr;
+ void __iomem *lbam_addr;
+ void __iomem *lbah_addr;
+ void __iomem *device_addr;
+ void __iomem *status_addr;
+ void __iomem *command_addr;
+ void __iomem *altstatus_addr;
+ void __iomem *ctl_addr;
+#ifdef CONFIG_ATA_BMDMA
+ void __iomem *bmdma_addr;
+#endif /* CONFIG_ATA_BMDMA */
+ void __iomem *scr_addr;
+};
+#endif /* CONFIG_ATA_SFF */
+
+struct ata_host {
+ spinlock_t lock;
+ struct device *dev;
+ void __iomem * const *iomap;
+ unsigned int n_ports;
+ unsigned int n_tags; /* nr of NCQ tags */
+ void *private_data;
+ struct ata_port_operations *ops;
+ unsigned long flags;
+
+ struct mutex eh_mutex;
+ struct task_struct *eh_owner;
+
+ struct ata_port *simplex_claimed; /* channel owning the DMA */
+ struct ata_port *ports[0];
+};
+
+struct ata_queued_cmd {
+ struct ata_port *ap;
+ struct ata_device *dev;
+
+ struct scsi_cmnd *scsicmd;
+ void (*scsidone)(struct scsi_cmnd *);
+
+ struct ata_taskfile tf;
+ u8 cdb[ATAPI_CDB_LEN];
+
+ unsigned long flags; /* ATA_QCFLAG_xxx */
+ unsigned int tag;
+ unsigned int n_elem;
+ unsigned int orig_n_elem;
+
+ int dma_dir;
+
+ unsigned int sect_size;
+
+ unsigned int nbytes;
+ unsigned int extrabytes;
+ unsigned int curbytes;
+
+ struct scatterlist sgent;
+
+ struct scatterlist *sg;
+
+ struct scatterlist *cursg;
+ unsigned int cursg_ofs;
+
+ unsigned int err_mask;
+ struct ata_taskfile result_tf;
+ ata_qc_cb_t complete_fn;
+
+ void *private_data;
+ void *lldd_task;
+};
+
+struct ata_port_stats {
+ unsigned long unhandled_irq;
+ unsigned long idle_irq;
+ unsigned long rw_reqbuf;
+};
+
+struct ata_ering_entry {
+ unsigned int eflags;
+ unsigned int err_mask;
+ u64 timestamp;
+};
+
+struct ata_ering {
+ int cursor;
+ struct ata_ering_entry ring[ATA_ERING_SIZE];
+};
+
+struct ata_device {
+ struct ata_link *link;
+ unsigned int devno; /* 0 or 1 */
+ unsigned int horkage; /* List of broken features */
+ unsigned long flags; /* ATA_DFLAG_xxx */
+ struct scsi_device *sdev; /* attached SCSI device */
+ void *private_data;
+#ifdef CONFIG_ATA_ACPI
+ union acpi_object *gtf_cache;
+ unsigned int gtf_filter;
+#endif
+#ifdef CONFIG_SATA_ZPODD
+ void *zpodd;
+#endif
+ struct device tdev;
+ /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */
+ u64 n_sectors; /* size of device, if ATA */
+ u64 n_native_sectors; /* native size, if ATA */
+ unsigned int class; /* ATA_DEV_xxx */
+ unsigned long unpark_deadline;
+
+ u8 pio_mode;
+ u8 dma_mode;
+ u8 xfer_mode;
+ unsigned int xfer_shift; /* ATA_SHIFT_xxx */
+
+ unsigned int multi_count; /* sectors count for
+ READ/WRITE MULTIPLE */
+ unsigned int max_sectors; /* per-device max sectors */
+ unsigned int cdb_len;
+
+ /* per-dev xfer mask */
+ unsigned long pio_mask;
+ unsigned long mwdma_mask;
+ unsigned long udma_mask;
+
+ /* for CHS addressing */
+ u16 cylinders; /* Number of cylinders */
+ u16 heads; /* Number of heads */
+ u16 sectors; /* Number of sectors per track */
+
+ union {
+ u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
+ u32 gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
+ };
+
+ /* DEVSLP Timing Variables from Identify Device Data Log */
+ u8 devslp_timing[ATA_LOG_DEVSLP_SIZE];
+
+ /* NCQ send and receive log subcommand support */
+ u8 ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_SIZE];
+
+ /* error history */
+ int spdn_cnt;
+ /* ering is CLEAR_END, read comment above CLEAR_END */
+ struct ata_ering ering;
+ /* Initial DIPM configuration */
+ bool init_dipm;
+};
+
+/* Fields between ATA_DEVICE_CLEAR_BEGIN and ATA_DEVICE_CLEAR_END are
+ * cleared to zero on ata_dev_init().
+ */
+#define ATA_DEVICE_CLEAR_BEGIN offsetof(struct ata_device, n_sectors)
+#define ATA_DEVICE_CLEAR_END offsetof(struct ata_device, ering)
+
+struct ata_eh_info {
+ struct ata_device *dev; /* offending device */
+ u32 serror; /* SError from LLDD */
+ unsigned int err_mask; /* port-wide err_mask */
+ unsigned int action; /* ATA_EH_* action mask */
+ unsigned int dev_action[ATA_MAX_DEVICES]; /* dev EH action */
+ unsigned int flags; /* ATA_EHI_* flags */
+
+ unsigned int probe_mask;
+
+ char desc[ATA_EH_DESC_LEN];
+ int desc_len;
+};
+
+struct ata_eh_context {
+ struct ata_eh_info i;
+ int tries[ATA_MAX_DEVICES];
+ int cmd_timeout_idx[ATA_MAX_DEVICES]
+ [ATA_EH_CMD_TIMEOUT_TABLE_SIZE];
+ unsigned int classes[ATA_MAX_DEVICES];
+ unsigned int did_probe_mask;
+ unsigned int unloaded_mask;
+ unsigned int saved_ncq_enabled;
+ u8 saved_xfer_mode[ATA_MAX_DEVICES];
+ /* timestamp for the last reset attempt or success */
+ unsigned long last_reset;
+};
+
+struct ata_acpi_drive
+{
+ u32 pio;
+ u32 dma;
+} __packed;
+
+struct ata_acpi_gtm {
+ struct ata_acpi_drive drive[2];
+ u32 flags;
+} __packed;
+
+struct ata_link {
+ struct ata_port *ap;
+ int pmp; /* port multiplier port # */
+
+ struct device tdev;
+ unsigned int active_tag; /* active tag on this link */
+ u32 sactive; /* active NCQ commands */
+
+ unsigned int flags; /* ATA_LFLAG_xxx */
+
+ u32 saved_scontrol; /* SControl on probe */
+ unsigned int hw_sata_spd_limit;
+ unsigned int sata_spd_limit;
+ unsigned int sata_spd; /* current SATA PHY speed */
+ enum ata_lpm_policy lpm_policy;
+
+ /* record runtime error info, protected by host_set lock */
+ struct ata_eh_info eh_info;
+ /* EH context */
+ struct ata_eh_context eh_context;
+
+ struct ata_device device[ATA_MAX_DEVICES];
+
+ u8 init_lpm; /* initial lpm configuration */
+ unsigned long last_lpm_change; /* when last LPM change happened */
+};
+#define ATA_LINK_CLEAR_BEGIN offsetof(struct ata_link, active_tag)
+#define ATA_LINK_CLEAR_END offsetof(struct ata_link, device[0])
+
+struct ata_port {
+ struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
+ struct ata_port_operations *ops;
+ spinlock_t *lock;
+ /* Flags owned by the EH context. Only EH should touch these once the
+ port is active */
+ unsigned long flags; /* ATA_FLAG_xxx */
+ /* Flags that change dynamically, protected by ap->lock */
+ unsigned int pflags; /* ATA_PFLAG_xxx */
+ unsigned int print_id; /* user visible unique port ID */
+ unsigned int local_port_no; /* host local port num */
+ unsigned int port_no; /* 0 based port no. inside the host */
+
+#ifdef CONFIG_ATA_SFF
+ struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
+ u8 ctl; /* cache of ATA control register */
+ u8 last_ctl; /* Cache last written value */
+ struct ata_link* sff_pio_task_link; /* link currently used */
+ struct delayed_work sff_pio_task;
+#ifdef CONFIG_ATA_BMDMA
+ struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */
+ dma_addr_t bmdma_prd_dma; /* and its DMA mapping */
+#endif /* CONFIG_ATA_BMDMA */
+#endif /* CONFIG_ATA_SFF */
+
+ unsigned int pio_mask;
+ unsigned int mwdma_mask;
+ unsigned int udma_mask;
+ unsigned int cbl; /* cable type; ATA_CBL_xxx */
+
+ struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
+ unsigned long sas_tag_allocated; /* for sas tag allocation only */
+ unsigned int qc_active;
+ int nr_active_links; /* #links with active qcs */
+ unsigned int sas_last_tag; /* track next tag hw expects */
+
+ struct ata_link link; /* host default link */
+ struct ata_link *slave_link; /* see ata_slave_link_init() */
+
+ int nr_pmp_links; /* nr of available PMP links */
+ struct ata_link *pmp_link; /* array of PMP links */
+ struct ata_link *excl_link; /* for PMP qc exclusion */
+
+ struct ata_port_stats stats;
+ struct ata_host *host;
+ struct device *dev;
+ struct device tdev;
+
+ struct mutex scsi_scan_mutex;
+ struct delayed_work hotplug_task;
+ struct work_struct scsi_rescan_task;
+
+ unsigned int hsm_task_state;
+
+ u32 msg_enable;
+ struct list_head eh_done_q;
+ wait_queue_head_t eh_wait_q;
+ int eh_tries;
+ struct completion park_req_pending;
+
+ pm_message_t pm_mesg;
+ enum ata_lpm_policy target_lpm_policy;
+
+ struct timer_list fastdrain_timer;
+ unsigned long fastdrain_cnt;
+
+ int em_message_type;
+ void *private_data;
+
+#ifdef CONFIG_ATA_ACPI
+ struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */
+#endif
+ /* owned by EH */
+ u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
+};
+
+/* The following initializer overrides a method to NULL whether one of
+ * its parent has the method defined or not. This is equivalent to
+ * ERR_PTR(-ENOENT). Unfortunately, ERR_PTR doesn't render a constant
+ * expression and thus can't be used as an initializer.
+ */
+#define ATA_OP_NULL (void *)(unsigned long)(-ENOENT)
+
+struct ata_port_operations {
+ /*
+ * Command execution
+ */
+ int (*qc_defer)(struct ata_queued_cmd *qc);
+ int (*check_atapi_dma)(struct ata_queued_cmd *qc);
+ void (*qc_prep)(struct ata_queued_cmd *qc);
+ unsigned int (*qc_issue)(struct ata_queued_cmd *qc);
+ bool (*qc_fill_rtf)(struct ata_queued_cmd *qc);
+
+ /*
+ * Configuration and exception handling
+ */
+ int (*cable_detect)(struct ata_port *ap);
+ unsigned long (*mode_filter)(struct ata_device *dev, unsigned long xfer_mask);
+ void (*set_piomode)(struct ata_port *ap, struct ata_device *dev);
+ void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev);
+ int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev);
+ unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf, u16 *id);
+
+ void (*dev_config)(struct ata_device *dev);
+
+ void (*freeze)(struct ata_port *ap);
+ void (*thaw)(struct ata_port *ap);
+ ata_prereset_fn_t prereset;
+ ata_reset_fn_t softreset;
+ ata_reset_fn_t hardreset;
+ ata_postreset_fn_t postreset;
+ ata_prereset_fn_t pmp_prereset;
+ ata_reset_fn_t pmp_softreset;
+ ata_reset_fn_t pmp_hardreset;
+ ata_postreset_fn_t pmp_postreset;
+ void (*error_handler)(struct ata_port *ap);
+ void (*lost_interrupt)(struct ata_port *ap);
+ void (*post_internal_cmd)(struct ata_queued_cmd *qc);
+ void (*sched_eh)(struct ata_port *ap);
+ void (*end_eh)(struct ata_port *ap);
+
+ /*
+ * Optional features
+ */
+ int (*scr_read)(struct ata_link *link, unsigned int sc_reg, u32 *val);
+ int (*scr_write)(struct ata_link *link, unsigned int sc_reg, u32 val);
+ void (*pmp_attach)(struct ata_port *ap);
+ void (*pmp_detach)(struct ata_port *ap);
+ int (*set_lpm)(struct ata_link *link, enum ata_lpm_policy policy,
+ unsigned hints);
+
+ /*
+ * Start, stop, suspend and resume
+ */
+ int (*port_suspend)(struct ata_port *ap, pm_message_t mesg);
+ int (*port_resume)(struct ata_port *ap);
+ int (*port_start)(struct ata_port *ap);
+ void (*port_stop)(struct ata_port *ap);
+ void (*host_stop)(struct ata_host *host);
+
+#ifdef CONFIG_ATA_SFF
+ /*
+ * SFF / taskfile oriented ops
+ */
+ void (*sff_dev_select)(struct ata_port *ap, unsigned int device);
+ void (*sff_set_devctl)(struct ata_port *ap, u8 ctl);
+ u8 (*sff_check_status)(struct ata_port *ap);
+ u8 (*sff_check_altstatus)(struct ata_port *ap);
+ void (*sff_tf_load)(struct ata_port *ap, const struct ata_taskfile *tf);
+ void (*sff_tf_read)(struct ata_port *ap, struct ata_taskfile *tf);
+ void (*sff_exec_command)(struct ata_port *ap,
+ const struct ata_taskfile *tf);
+ unsigned int (*sff_data_xfer)(struct ata_device *dev,
+ unsigned char *buf, unsigned int buflen, int rw);
+ void (*sff_irq_on)(struct ata_port *);
+ bool (*sff_irq_check)(struct ata_port *);
+ void (*sff_irq_clear)(struct ata_port *);
+ void (*sff_drain_fifo)(struct ata_queued_cmd *qc);
+
+#ifdef CONFIG_ATA_BMDMA
+ void (*bmdma_setup)(struct ata_queued_cmd *qc);
+ void (*bmdma_start)(struct ata_queued_cmd *qc);
+ void (*bmdma_stop)(struct ata_queued_cmd *qc);
+ u8 (*bmdma_status)(struct ata_port *ap);
+#endif /* CONFIG_ATA_BMDMA */
+#endif /* CONFIG_ATA_SFF */
+
+ ssize_t (*em_show)(struct ata_port *ap, char *buf);
+ ssize_t (*em_store)(struct ata_port *ap, const char *message,
+ size_t size);
+ ssize_t (*sw_activity_show)(struct ata_device *dev, char *buf);
+ ssize_t (*sw_activity_store)(struct ata_device *dev,
+ enum sw_activity val);
+ ssize_t (*transmit_led_message)(struct ata_port *ap, u32 state,
+ ssize_t size);
+
+ /*
+ * Obsolete
+ */
+ void (*phy_reset)(struct ata_port *ap);
+ void (*eng_timeout)(struct ata_port *ap);
+
+ /*
+ * ->inherits must be the last field and all the preceding
+ * fields must be pointers.
+ */
+ const struct ata_port_operations *inherits;
+};
+
+struct ata_port_info {
+ unsigned long flags;
+ unsigned long link_flags;
+ unsigned long pio_mask;
+ unsigned long mwdma_mask;
+ unsigned long udma_mask;
+ struct ata_port_operations *port_ops;
+ void *private_data;
+};
+
+struct ata_timing {
+ unsigned short mode; /* ATA mode */
+ unsigned short setup; /* t1 */
+ unsigned short act8b; /* t2 for 8-bit I/O */
+ unsigned short rec8b; /* t2i for 8-bit I/O */
+ unsigned short cyc8b; /* t0 for 8-bit I/O */
+ unsigned short active; /* t2 or tD */
+ unsigned short recover; /* t2i or tK */
+ unsigned short dmack_hold; /* tj */
+ unsigned short cycle; /* t0 */
+ unsigned short udma; /* t2CYCTYP/2 */
+};
+
+/*
+ * Core layer - drivers/ata/libata-core.c
+ */
+extern const unsigned long sata_deb_timing_normal[];
+extern const unsigned long sata_deb_timing_hotplug[];
+extern const unsigned long sata_deb_timing_long[];
+
+extern struct ata_port_operations ata_dummy_port_ops;
+extern const struct ata_port_info ata_dummy_port_info;
+
+/*
+ * protocol tests
+ */
+static inline unsigned int ata_prot_flags(u8 prot)
+{
+ switch (prot) {
+ case ATA_PROT_NODATA:
+ return 0;
+ case ATA_PROT_PIO:
+ return ATA_PROT_FLAG_PIO;
+ case ATA_PROT_DMA:
+ return ATA_PROT_FLAG_DMA;
+ case ATA_PROT_NCQ:
+ return ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ;
+ case ATAPI_PROT_NODATA:
+ return ATA_PROT_FLAG_ATAPI;
+ case ATAPI_PROT_PIO:
+ return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO;
+ case ATAPI_PROT_DMA:
+ return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA;
+ }
+ return 0;
+}
+
+static inline int ata_is_atapi(u8 prot)
+{
+ return ata_prot_flags(prot) & ATA_PROT_FLAG_ATAPI;
+}
+
+static inline int ata_is_nodata(u8 prot)
+{
+ return !(ata_prot_flags(prot) & ATA_PROT_FLAG_DATA);
+}
+
+static inline int ata_is_pio(u8 prot)
+{
+ return ata_prot_flags(prot) & ATA_PROT_FLAG_PIO;
+}
+
+static inline int ata_is_dma(u8 prot)
+{
+ return ata_prot_flags(prot) & ATA_PROT_FLAG_DMA;
+}
+
+static inline int ata_is_ncq(u8 prot)
+{
+ return ata_prot_flags(prot) & ATA_PROT_FLAG_NCQ;
+}
+
+static inline int ata_is_data(u8 prot)
+{
+ return ata_prot_flags(prot) & ATA_PROT_FLAG_DATA;
+}
+
+static inline int is_multi_taskfile(struct ata_taskfile *tf)
+{
+ return (tf->command == ATA_CMD_READ_MULTI) ||
+ (tf->command == ATA_CMD_WRITE_MULTI) ||
+ (tf->command == ATA_CMD_READ_MULTI_EXT) ||
+ (tf->command == ATA_CMD_WRITE_MULTI_EXT) ||
+ (tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT);
+}
+
+static inline const unsigned long *
+sata_ehc_deb_timing(struct ata_eh_context *ehc)
+{
+ if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
+ return sata_deb_timing_hotplug;
+ else
+ return sata_deb_timing_normal;
+}
+
+static inline int ata_port_is_dummy(struct ata_port *ap)
+{
+ return ap->ops == &ata_dummy_port_ops;
+}
+
+extern int sata_set_spd(struct ata_link *link);
+extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
+extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
+ int (*check_ready)(struct ata_link *link));
+extern int sata_link_debounce(struct ata_link *link,
+ const unsigned long *params, unsigned long deadline);
+extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
+ unsigned long deadline);
+extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+ bool spm_wakeup);
+extern int sata_link_hardreset(struct ata_link *link,
+ const unsigned long *timing, unsigned long deadline,
+ bool *online, int (*check_ready)(struct ata_link *));
+extern int sata_std_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
+
+extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
+extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
+ const struct ata_port_info * const * ppi, int n_ports);
+extern int ata_slave_link_init(struct ata_port *ap);
+extern int ata_host_start(struct ata_host *host);
+extern int ata_host_register(struct ata_host *host,
+ struct scsi_host_template *sht);
+extern int ata_host_activate(struct ata_host *host, int irq,
+ irq_handler_t irq_handler, unsigned long irq_flags,
+ struct scsi_host_template *sht);
+extern void ata_host_detach(struct ata_host *host);
+extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_operations *);
+extern int ata_scsi_detect(struct scsi_host_template *sht);
+extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
+extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd);
+extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev,
+ int cmd, void __user *arg);
+extern void ata_sas_port_destroy(struct ata_port *);
+extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
+ struct ata_port_info *, struct Scsi_Host *);
+extern void ata_sas_async_probe(struct ata_port *ap);
+extern int ata_sas_sync_probe(struct ata_port *ap);
+extern int ata_sas_port_init(struct ata_port *);
+extern int ata_sas_port_start(struct ata_port *ap);
+extern void ata_sas_port_stop(struct ata_port *ap);
+extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
+extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
+extern int sata_scr_valid(struct ata_link *link);
+extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
+extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
+extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
+extern bool ata_link_online(struct ata_link *link);
+extern bool ata_link_offline(struct ata_link *link);
+#ifdef CONFIG_PM
+extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
+extern void ata_host_resume(struct ata_host *host);
+extern void ata_sas_port_suspend(struct ata_port *ap);
+extern void ata_sas_port_resume(struct ata_port *ap);
+#else
+static inline void ata_sas_port_suspend(struct ata_port *ap)
+{
+}
+static inline void ata_sas_port_resume(struct ata_port *ap)
+{
+}
+#endif
+extern int ata_ratelimit(void);
+extern void ata_msleep(struct ata_port *ap, unsigned int msecs);
+extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask,
+ u32 val, unsigned long interval, unsigned long timeout);
+extern int atapi_cmd_type(u8 opcode);
+extern void ata_tf_to_fis(const struct ata_taskfile *tf,
+ u8 pmp, int is_cmd, u8 *fis);
+extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
+extern unsigned long ata_pack_xfermask(unsigned long pio_mask,
+ unsigned long mwdma_mask, unsigned long udma_mask);
+extern void ata_unpack_xfermask(unsigned long xfer_mask,
+ unsigned long *pio_mask, unsigned long *mwdma_mask,
+ unsigned long *udma_mask);
+extern u8 ata_xfer_mask2mode(unsigned long xfer_mask);
+extern unsigned long ata_xfer_mode2mask(u8 xfer_mode);
+extern int ata_xfer_mode2shift(unsigned long xfer_mode);
+extern const char *ata_mode_string(unsigned long xfer_mask);
+extern unsigned long ata_id_xfermask(const u16 *id);
+extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
+extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
+extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
+ unsigned int n_elem);
+extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
+extern void ata_dev_disable(struct ata_device *adev);
+extern void ata_id_string(const u16 *id, unsigned char *s,
+ unsigned int ofs, unsigned int len);
+extern void ata_id_c_string(const u16 *id, unsigned char *s,
+ unsigned int ofs, unsigned int len);
+extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
+ struct ata_taskfile *tf, u16 *id);
+extern void ata_qc_complete(struct ata_queued_cmd *qc);
+extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active);
+extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
+extern int ata_std_bios_param(struct scsi_device *sdev,
+ struct block_device *bdev,
+ sector_t capacity, int geom[]);
+extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
+extern int ata_scsi_slave_config(struct scsi_device *sdev);
+extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
+extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
+ int queue_depth);
+extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
+ int queue_depth);
+extern struct ata_device *ata_dev_pair(struct ata_device *adev);
+extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
+extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
+extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
+extern bool sata_lpm_ignore_phy_events(struct ata_link *link);
+
+extern int ata_cable_40wire(struct ata_port *ap);
+extern int ata_cable_80wire(struct ata_port *ap);
+extern int ata_cable_sata(struct ata_port *ap);
+extern int ata_cable_ignore(struct ata_port *ap);
+extern int ata_cable_unknown(struct ata_port *ap);
+
+/* Timing helpers */
+extern unsigned int ata_pio_need_iordy(const struct ata_device *);
+extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode);
+extern int ata_timing_compute(struct ata_device *, unsigned short,
+ struct ata_timing *, int, int);
+extern void ata_timing_merge(const struct ata_timing *,
+ const struct ata_timing *, struct ata_timing *,
+ unsigned int);
+extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle);
+
+/* PCI */
+#ifdef CONFIG_PCI
+struct pci_dev;
+
+struct pci_bits {
+ unsigned int reg; /* PCI config register to read */
+ unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */
+ unsigned long mask;
+ unsigned long val;
+};
+
+extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
+extern void ata_pci_remove_one(struct pci_dev *pdev);
+
+#ifdef CONFIG_PM
+extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg);
+extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev);
+extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
+extern int ata_pci_device_resume(struct pci_dev *pdev);
+#endif /* CONFIG_PM */
+#endif /* CONFIG_PCI */
+
+struct platform_device;
+
+extern int ata_platform_remove_one(struct platform_device *pdev);
+
+/*
+ * ACPI - drivers/ata/libata-acpi.c
+ */
+#ifdef CONFIG_ATA_ACPI
+static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
+{
+ if (ap->pflags & ATA_PFLAG_INIT_GTM_VALID)
+ return &ap->__acpi_init_gtm;
+ return NULL;
+}
+int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm);
+int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm);
+unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev,
+ const struct ata_acpi_gtm *gtm);
+int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm);
+#else
+static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
+{
+ return NULL;
+}
+
+static inline int ata_acpi_stm(const struct ata_port *ap,
+ struct ata_acpi_gtm *stm)
+{
+ return -ENOSYS;
+}
+
+static inline int ata_acpi_gtm(const struct ata_port *ap,
+ struct ata_acpi_gtm *stm)
+{
+ return -ENOSYS;
+}
+
+static inline unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
+ const struct ata_acpi_gtm *gtm)
+{
+ return 0;
+}
+
+static inline int ata_acpi_cbl_80wire(struct ata_port *ap,
+ const struct ata_acpi_gtm *gtm)
+{
+ return 0;
+}
+#endif
+
+/*
+ * EH - drivers/ata/libata-eh.c
+ */
+extern void ata_port_schedule_eh(struct ata_port *ap);
+extern void ata_port_wait_eh(struct ata_port *ap);
+extern int ata_link_abort(struct ata_link *link);
+extern int ata_port_abort(struct ata_port *ap);
+extern int ata_port_freeze(struct ata_port *ap);
+extern int sata_async_notification(struct ata_port *ap);
+
+extern void ata_eh_freeze_port(struct ata_port *ap);
+extern void ata_eh_thaw_port(struct ata_port *ap);
+
+extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
+extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
+extern void ata_eh_analyze_ncq_error(struct ata_link *link);
+
+extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
+ ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
+ ata_postreset_fn_t postreset);
+extern void ata_std_error_handler(struct ata_port *ap);
+extern void ata_std_sched_eh(struct ata_port *ap);
+extern void ata_std_end_eh(struct ata_port *ap);
+extern int ata_link_nr_enabled(struct ata_link *link);
+
+/*
+ * Base operations to inherit from and initializers for sht
+ *
+ * Operations
+ *
+ * base : Common to all libata drivers.
+ * sata : SATA controllers w/ native interface.
+ * pmp : SATA controllers w/ PMP support.
+ * sff : SFF ATA controllers w/o BMDMA support.
+ * bmdma : SFF ATA controllers w/ BMDMA support.
+ *
+ * sht initializers
+ *
+ * BASE : Common to all libata drivers. The user must set
+ * sg_tablesize and dma_boundary.
+ * PIO : SFF ATA controllers w/ only PIO support.
+ * BMDMA : SFF ATA controllers w/ BMDMA support. sg_tablesize and
+ * dma_boundary are set to BMDMA limits.
+ * NCQ : SATA controllers supporting NCQ. The user must set
+ * sg_tablesize, dma_boundary and can_queue.
+ */
+extern const struct ata_port_operations ata_base_port_ops;
+extern const struct ata_port_operations sata_port_ops;
+extern struct device_attribute *ata_common_sdev_attrs[];
+
+/*
+ * All sht initializers (BASE, PIO, BMDMA, NCQ) must be instantiated
+ * by the edge drivers. Because the 'module' field of sht must be the
+ * edge driver's module reference, otherwise the driver can be unloaded
+ * even if the scsi_device is being accessed.
+ */
+#define ATA_BASE_SHT(drv_name) \
+ .module = THIS_MODULE, \
+ .name = drv_name, \
+ .ioctl = ata_scsi_ioctl, \
+ .queuecommand = ata_scsi_queuecmd, \
+ .can_queue = ATA_DEF_QUEUE, \
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .this_id = ATA_SHT_THIS_ID, \
+ .cmd_per_lun = ATA_SHT_CMD_PER_LUN, \
+ .emulated = ATA_SHT_EMULATED, \
+ .use_clustering = ATA_SHT_USE_CLUSTERING, \
+ .proc_name = drv_name, \
+ .slave_configure = ata_scsi_slave_config, \
+ .slave_destroy = ata_scsi_slave_destroy, \
+ .bios_param = ata_std_bios_param, \
+ .unlock_native_capacity = ata_scsi_unlock_native_capacity, \
+ .sdev_attrs = ata_common_sdev_attrs
+
+#define ATA_NCQ_SHT(drv_name) \
+ ATA_BASE_SHT(drv_name), \
+ .change_queue_depth = ata_scsi_change_queue_depth
+
+/*
+ * PMP helpers
+ */
+#ifdef CONFIG_SATA_PMP
+static inline bool sata_pmp_supported(struct ata_port *ap)
+{
+ return ap->flags & ATA_FLAG_PMP;
+}
+
+static inline bool sata_pmp_attached(struct ata_port *ap)
+{
+ return ap->nr_pmp_links != 0;
+}
+
+static inline int ata_is_host_link(const struct ata_link *link)
+{
+ return link == &link->ap->link || link == link->ap->slave_link;
+}
+#else /* CONFIG_SATA_PMP */
+static inline bool sata_pmp_supported(struct ata_port *ap)
+{
+ return false;
+}
+
+static inline bool sata_pmp_attached(struct ata_port *ap)
+{
+ return false;
+}
+
+static inline int ata_is_host_link(const struct ata_link *link)
+{
+ return 1;
+}
+#endif /* CONFIG_SATA_PMP */
+
+static inline int sata_srst_pmp(struct ata_link *link)
+{
+ if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
+ return SATA_PMP_CTRL_PORT;
+ return link->pmp;
+}
+
+/*
+ * printk helpers
+ */
+__printf(3, 4)
+void ata_port_printk(const struct ata_port *ap, const char *level,
+ const char *fmt, ...);
+__printf(3, 4)
+void ata_link_printk(const struct ata_link *link, const char *level,
+ const char *fmt, ...);
+__printf(3, 4)
+void ata_dev_printk(const struct ata_device *dev, const char *level,
+ const char *fmt, ...);
+
+#define ata_port_err(ap, fmt, ...) \
+ ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__)
+#define ata_port_warn(ap, fmt, ...) \
+ ata_port_printk(ap, KERN_WARNING, fmt, ##__VA_ARGS__)
+#define ata_port_notice(ap, fmt, ...) \
+ ata_port_printk(ap, KERN_NOTICE, fmt, ##__VA_ARGS__)
+#define ata_port_info(ap, fmt, ...) \
+ ata_port_printk(ap, KERN_INFO, fmt, ##__VA_ARGS__)
+#define ata_port_dbg(ap, fmt, ...) \
+ ata_port_printk(ap, KERN_DEBUG, fmt, ##__VA_ARGS__)
+
+#define ata_link_err(link, fmt, ...) \
+ ata_link_printk(link, KERN_ERR, fmt, ##__VA_ARGS__)
+#define ata_link_warn(link, fmt, ...) \
+ ata_link_printk(link, KERN_WARNING, fmt, ##__VA_ARGS__)
+#define ata_link_notice(link, fmt, ...) \
+ ata_link_printk(link, KERN_NOTICE, fmt, ##__VA_ARGS__)
+#define ata_link_info(link, fmt, ...) \
+ ata_link_printk(link, KERN_INFO, fmt, ##__VA_ARGS__)
+#define ata_link_dbg(link, fmt, ...) \
+ ata_link_printk(link, KERN_DEBUG, fmt, ##__VA_ARGS__)
+
+#define ata_dev_err(dev, fmt, ...) \
+ ata_dev_printk(dev, KERN_ERR, fmt, ##__VA_ARGS__)
+#define ata_dev_warn(dev, fmt, ...) \
+ ata_dev_printk(dev, KERN_WARNING, fmt, ##__VA_ARGS__)
+#define ata_dev_notice(dev, fmt, ...) \
+ ata_dev_printk(dev, KERN_NOTICE, fmt, ##__VA_ARGS__)
+#define ata_dev_info(dev, fmt, ...) \
+ ata_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
+#define ata_dev_dbg(dev, fmt, ...) \
+ ata_dev_printk(dev, KERN_DEBUG, fmt, ##__VA_ARGS__)
+
+void ata_print_version(const struct device *dev, const char *version);
+
+/*
+ * ata_eh_info helpers
+ */
+extern __printf(2, 3)
+void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...);
+extern __printf(2, 3)
+void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...);
+extern void ata_ehi_clear_desc(struct ata_eh_info *ehi);
+
+static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi)
+{
+ ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
+ ehi->flags |= ATA_EHI_HOTPLUGGED;
+ ehi->action |= ATA_EH_RESET | ATA_EH_ENABLE_LINK;
+ ehi->err_mask |= AC_ERR_ATA_BUS;
+}
+
+/*
+ * port description helpers
+ */
+extern __printf(2, 3)
+void ata_port_desc(struct ata_port *ap, const char *fmt, ...);
+#ifdef CONFIG_PCI
+extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
+ const char *name);
+#endif
+
+static inline unsigned int ata_tag_valid(unsigned int tag)
+{
+ return (tag < ATA_MAX_QUEUE) ? 1 : 0;
+}
+
+static inline unsigned int ata_tag_internal(unsigned int tag)
+{
+ return tag == ATA_TAG_INTERNAL;
+}
+
+/*
+ * device helpers
+ */
+static inline unsigned int ata_class_enabled(unsigned int class)
+{
+ return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI ||
+ class == ATA_DEV_PMP || class == ATA_DEV_SEMB ||
+ class == ATA_DEV_ZAC;
+}
+
+static inline unsigned int ata_class_disabled(unsigned int class)
+{
+ return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP ||
+ class == ATA_DEV_PMP_UNSUP || class == ATA_DEV_SEMB_UNSUP;
+}
+
+static inline unsigned int ata_class_absent(unsigned int class)
+{
+ return !ata_class_enabled(class) && !ata_class_disabled(class);
+}
+
+static inline unsigned int ata_dev_enabled(const struct ata_device *dev)
+{
+ return ata_class_enabled(dev->class);
+}
+
+static inline unsigned int ata_dev_disabled(const struct ata_device *dev)
+{
+ return ata_class_disabled(dev->class);
+}
+
+static inline unsigned int ata_dev_absent(const struct ata_device *dev)
+{
+ return ata_class_absent(dev->class);
+}
+
+/*
+ * link helpers
+ */
+static inline int ata_link_max_devices(const struct ata_link *link)
+{
+ if (ata_is_host_link(link) && link->ap->flags & ATA_FLAG_SLAVE_POSS)
+ return 2;
+ return 1;
+}
+
+static inline int ata_link_active(struct ata_link *link)
+{
+ return ata_tag_valid(link->active_tag) || link->sactive;
+}
+
+/*
+ * Iterators
+ *
+ * ATA_LITER_* constants are used to select link iteration mode and
+ * ATA_DITER_* device iteration mode.
+ *
+ * For a custom iteration directly using ata_{link|dev}_next(), if
+ * @link or @dev, respectively, is NULL, the first element is
+ * returned. @dev and @link can be any valid device or link and the
+ * next element according to the iteration mode will be returned.
+ * After the last element, NULL is returned.
+ */
+enum ata_link_iter_mode {
+ ATA_LITER_EDGE, /* if present, PMP links only; otherwise,
+ * host link. no slave link */
+ ATA_LITER_HOST_FIRST, /* host link followed by PMP or slave links */
+ ATA_LITER_PMP_FIRST, /* PMP links followed by host link,
+ * slave link still comes after host link */
+};
+
+enum ata_dev_iter_mode {
+ ATA_DITER_ENABLED,
+ ATA_DITER_ENABLED_REVERSE,
+ ATA_DITER_ALL,
+ ATA_DITER_ALL_REVERSE,
+};
+
+extern struct ata_link *ata_link_next(struct ata_link *link,
+ struct ata_port *ap,
+ enum ata_link_iter_mode mode);
+
+extern struct ata_device *ata_dev_next(struct ata_device *dev,
+ struct ata_link *link,
+ enum ata_dev_iter_mode mode);
+
+/*
+ * Shortcut notation for iterations
+ *
+ * ata_for_each_link() iterates over each link of @ap according to
+ * @mode. @link points to the current link in the loop. @link is
+ * NULL after loop termination. ata_for_each_dev() works the same way
+ * except that it iterates over each device of @link.
+ *
+ * Note that the mode prefixes ATA_{L|D}ITER_ shouldn't need to be
+ * specified when using the following shorthand notations. Only the
+ * mode itself (EDGE, HOST_FIRST, ENABLED, etc...) should be
+ * specified. This not only increases brevity but also makes it
+ * impossible to use ATA_LITER_* for device iteration or vice-versa.
+ */
+#define ata_for_each_link(link, ap, mode) \
+ for ((link) = ata_link_next(NULL, (ap), ATA_LITER_##mode); (link); \
+ (link) = ata_link_next((link), (ap), ATA_LITER_##mode))
+
+#define ata_for_each_dev(dev, link, mode) \
+ for ((dev) = ata_dev_next(NULL, (link), ATA_DITER_##mode); (dev); \
+ (dev) = ata_dev_next((dev), (link), ATA_DITER_##mode))
+
+/**
+ * ata_ncq_enabled - Test whether NCQ is enabled
+ * @dev: ATA device to test for
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ *
+ * RETURNS:
+ * 1 if NCQ is enabled for @dev, 0 otherwise.
+ */
+static inline int ata_ncq_enabled(struct ata_device *dev)
+{
+ return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
+ ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
+}
+
+static inline bool ata_fpdma_dsm_supported(struct ata_device *dev)
+{
+ return (dev->flags & ATA_DFLAG_NCQ_SEND_RECV) &&
+ (dev->ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &
+ ATA_LOG_NCQ_SEND_RECV_DSM_TRIM);
+}
+
+static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
+{
+ qc->tf.ctl |= ATA_NIEN;
+}
+
+static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
+ unsigned int tag)
+{
+ if (likely(ata_tag_valid(tag)))
+ return &ap->qcmd[tag];
+ return NULL;
+}
+
+static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
+ unsigned int tag)
+{
+ struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
+
+ if (unlikely(!qc) || !ap->ops->error_handler)
+ return qc;
+
+ if ((qc->flags & (ATA_QCFLAG_ACTIVE |
+ ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
+ return qc;
+
+ return NULL;
+}
+
+static inline unsigned int ata_qc_raw_nbytes(struct ata_queued_cmd *qc)
+{
+ return qc->nbytes - min(qc->extrabytes, qc->nbytes);
+}
+
+static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf)
+{
+ memset(tf, 0, sizeof(*tf));
+
+#ifdef CONFIG_ATA_SFF
+ tf->ctl = dev->link->ap->ctl;
+#else
+ tf->ctl = ATA_DEVCTL_OBS;
+#endif
+ if (dev->devno == 0)
+ tf->device = ATA_DEVICE_OBS;
+ else
+ tf->device = ATA_DEVICE_OBS | ATA_DEV1;
+}
+
+static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
+{
+ qc->dma_dir = DMA_NONE;
+ qc->sg = NULL;
+ qc->flags = 0;
+ qc->cursg = NULL;
+ qc->cursg_ofs = 0;
+ qc->nbytes = qc->extrabytes = qc->curbytes = 0;
+ qc->n_elem = 0;
+ qc->err_mask = 0;
+ qc->sect_size = ATA_SECT_SIZE;
+
+ ata_tf_init(qc->dev, &qc->tf);
+
+ /* init result_tf such that it indicates normal completion */
+ qc->result_tf.command = ATA_DRDY;
+ qc->result_tf.feature = 0;
+}
+
+static inline int ata_try_flush_cache(const struct ata_device *dev)
+{
+ return ata_id_wcache_enabled(dev->id) ||
+ ata_id_has_flush(dev->id) ||
+ ata_id_has_flush_ext(dev->id);
+}
+
+static inline unsigned int ac_err_mask(u8 status)
+{
+ if (status & (ATA_BUSY | ATA_DRQ))
+ return AC_ERR_HSM;
+ if (status & (ATA_ERR | ATA_DF))
+ return AC_ERR_DEV;
+ return 0;
+}
+
+static inline unsigned int __ac_err_mask(u8 status)
+{
+ unsigned int mask = ac_err_mask(status);
+ if (mask == 0)
+ return AC_ERR_OTHER;
+ return mask;
+}
+
+static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host)
+{
+ return *(struct ata_port **)&host->hostdata[0];
+}
+
+static inline int ata_check_ready(u8 status)
+{
+ if (!(status & ATA_BUSY))
+ return 1;
+
+ /* 0xff indicates either no device or device not ready */
+ if (status == 0xff)
+ return -ENODEV;
+
+ return 0;
+}
+
+static inline unsigned long ata_deadline(unsigned long from_jiffies,
+ unsigned long timeout_msecs)
+{
+ return from_jiffies + msecs_to_jiffies(timeout_msecs);
+}
+
+/* Don't open code these in drivers as there are traps. Firstly the range may
+ change in future hardware and specs, secondly 0xFF means 'no DMA' but is
+ > UDMA_0. Dyma ddreigiau */
+
+static inline int ata_using_mwdma(struct ata_device *adev)
+{
+ if (adev->dma_mode >= XFER_MW_DMA_0 && adev->dma_mode <= XFER_MW_DMA_4)
+ return 1;
+ return 0;
+}
+
+static inline int ata_using_udma(struct ata_device *adev)
+{
+ if (adev->dma_mode >= XFER_UDMA_0 && adev->dma_mode <= XFER_UDMA_7)
+ return 1;
+ return 0;
+}
+
+static inline int ata_dma_enabled(struct ata_device *adev)
+{
+ return (adev->dma_mode == 0xFF ? 0 : 1);
+}
+
+/**************************************************************************
+ * PMP - drivers/ata/libata-pmp.c
+ */
+#ifdef CONFIG_SATA_PMP
+
+extern const struct ata_port_operations sata_pmp_port_ops;
+
+extern int sata_pmp_qc_defer_cmd_switch(struct ata_queued_cmd *qc);
+extern void sata_pmp_error_handler(struct ata_port *ap);
+
+#else /* CONFIG_SATA_PMP */
+
+#define sata_pmp_port_ops sata_port_ops
+#define sata_pmp_qc_defer_cmd_switch ata_std_qc_defer
+#define sata_pmp_error_handler ata_std_error_handler
+
+#endif /* CONFIG_SATA_PMP */
+
+
+/**************************************************************************
+ * SFF - drivers/ata/libata-sff.c
+ */
+#ifdef CONFIG_ATA_SFF
+
+extern const struct ata_port_operations ata_sff_port_ops;
+extern const struct ata_port_operations ata_bmdma32_port_ops;
+
+/* PIO only, sg_tablesize and dma_boundary limits can be removed */
+#define ATA_PIO_SHT(drv_name) \
+ ATA_BASE_SHT(drv_name), \
+ .sg_tablesize = LIBATA_MAX_PRD, \
+ .dma_boundary = ATA_DMA_BOUNDARY
+
+extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device);
+extern u8 ata_sff_check_status(struct ata_port *ap);
+extern void ata_sff_pause(struct ata_port *ap);
+extern void ata_sff_dma_pause(struct ata_port *ap);
+extern int ata_sff_busy_sleep(struct ata_port *ap,
+ unsigned long timeout_pat, unsigned long timeout);
+extern int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline);
+extern void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
+extern void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
+extern void ata_sff_exec_command(struct ata_port *ap,
+ const struct ata_taskfile *tf);
+extern unsigned int ata_sff_data_xfer(struct ata_device *dev,
+ unsigned char *buf, unsigned int buflen, int rw);
+extern unsigned int ata_sff_data_xfer32(struct ata_device *dev,
+ unsigned char *buf, unsigned int buflen, int rw);
+extern unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev,
+ unsigned char *buf, unsigned int buflen, int rw);
+extern void ata_sff_irq_on(struct ata_port *ap);
+extern void ata_sff_irq_clear(struct ata_port *ap);
+extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
+ u8 status, int in_wq);
+extern void ata_sff_queue_work(struct work_struct *work);
+extern void ata_sff_queue_delayed_work(struct delayed_work *dwork,
+ unsigned long delay);
+extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay);
+extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc);
+extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc);
+extern unsigned int ata_sff_port_intr(struct ata_port *ap,
+ struct ata_queued_cmd *qc);
+extern irqreturn_t ata_sff_interrupt(int irq, void *dev_instance);
+extern void ata_sff_lost_interrupt(struct ata_port *ap);
+extern void ata_sff_freeze(struct ata_port *ap);
+extern void ata_sff_thaw(struct ata_port *ap);
+extern int ata_sff_prereset(struct ata_link *link, unsigned long deadline);
+extern unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
+ u8 *r_err);
+extern int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
+ unsigned long deadline);
+extern int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline);
+extern int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+extern void ata_sff_postreset(struct ata_link *link, unsigned int *classes);
+extern void ata_sff_drain_fifo(struct ata_queued_cmd *qc);
+extern void ata_sff_error_handler(struct ata_port *ap);
+extern void ata_sff_std_ports(struct ata_ioports *ioaddr);
+#ifdef CONFIG_PCI
+extern int ata_pci_sff_init_host(struct ata_host *host);
+extern int ata_pci_sff_prepare_host(struct pci_dev *pdev,
+ const struct ata_port_info * const * ppi,
+ struct ata_host **r_host);
+extern int ata_pci_sff_activate_host(struct ata_host *host,
+ irq_handler_t irq_handler,
+ struct scsi_host_template *sht);
+extern int ata_pci_sff_init_one(struct pci_dev *pdev,
+ const struct ata_port_info * const * ppi,
+ struct scsi_host_template *sht, void *host_priv, int hflags);
+#endif /* CONFIG_PCI */
+
+#ifdef CONFIG_ATA_BMDMA
+
+extern const struct ata_port_operations ata_bmdma_port_ops;
+
+#define ATA_BMDMA_SHT(drv_name) \
+ ATA_BASE_SHT(drv_name), \
+ .sg_tablesize = LIBATA_MAX_PRD, \
+ .dma_boundary = ATA_DMA_BOUNDARY
+
+extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
+extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc);
+extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
+extern unsigned int ata_bmdma_port_intr(struct ata_port *ap,
+ struct ata_queued_cmd *qc);
+extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance);
+extern void ata_bmdma_error_handler(struct ata_port *ap);
+extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
+extern void ata_bmdma_irq_clear(struct ata_port *ap);
+extern void ata_bmdma_setup(struct ata_queued_cmd *qc);
+extern void ata_bmdma_start(struct ata_queued_cmd *qc);
+extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
+extern u8 ata_bmdma_status(struct ata_port *ap);
+extern int ata_bmdma_port_start(struct ata_port *ap);
+extern int ata_bmdma_port_start32(struct ata_port *ap);
+
+#ifdef CONFIG_PCI
+extern int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev);
+extern void ata_pci_bmdma_init(struct ata_host *host);
+extern int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
+ const struct ata_port_info * const * ppi,
+ struct ata_host **r_host);
+extern int ata_pci_bmdma_init_one(struct pci_dev *pdev,
+ const struct ata_port_info * const * ppi,
+ struct scsi_host_template *sht,
+ void *host_priv, int hflags);
+#endif /* CONFIG_PCI */
+#endif /* CONFIG_ATA_BMDMA */
+
+/**
+ * ata_sff_busy_wait - Wait for a port status register
+ * @ap: Port to wait for.
+ * @bits: bits that must be clear
+ * @max: number of 10uS waits to perform
+ *
+ * Waits up to max*10 microseconds for the selected bits in the port's
+ * status register to be cleared.
+ * Returns final value of status register.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static inline u8 ata_sff_busy_wait(struct ata_port *ap, unsigned int bits,
+ unsigned int max)
+{
+ u8 status;
+
+ do {
+ udelay(10);
+ status = ap->ops->sff_check_status(ap);
+ max--;
+ } while (status != 0xff && (status & bits) && (max > 0));
+
+ return status;
+}
+
+/**
+ * ata_wait_idle - Wait for a port to be idle.
+ * @ap: Port to wait for.
+ *
+ * Waits up to 10ms for port's BUSY and DRQ signals to clear.
+ * Returns final value of status register.
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static inline u8 ata_wait_idle(struct ata_port *ap)
+{
+ u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
+
+#ifdef ATA_DEBUG
+ if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ)))
+ ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n",
+ status);
+#endif
+
+ return status;
+}
+#endif /* CONFIG_ATA_SFF */
+
+#endif /* __LINUX_LIBATA_H__ */
diff --git a/include/linux/libfdt.h b/include/linux/libfdt.h
new file mode 100644
index 000000000..4c0306c69
--- /dev/null
+++ b/include/linux/libfdt.h
@@ -0,0 +1,8 @@
+#ifndef _INCLUDE_LIBFDT_H_
+#define _INCLUDE_LIBFDT_H_
+
+#include <linux/libfdt_env.h>
+#include "../../scripts/dtc/libfdt/fdt.h"
+#include "../../scripts/dtc/libfdt/libfdt.h"
+
+#endif /* _INCLUDE_LIBFDT_H_ */
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
new file mode 100644
index 000000000..01508c7b8
--- /dev/null
+++ b/include/linux/libfdt_env.h
@@ -0,0 +1,13 @@
+#ifndef _LIBFDT_ENV_H
+#define _LIBFDT_ENV_H
+
+#include <linux/string.h>
+
+#include <asm/byteorder.h>
+
+#define fdt32_to_cpu(x) be32_to_cpu(x)
+#define cpu_to_fdt32(x) cpu_to_be32(x)
+#define fdt64_to_cpu(x) be64_to_cpu(x)
+#define cpu_to_fdt64(x) cpu_to_be64(x)
+
+#endif /* _LIBFDT_ENV_H */
diff --git a/include/linux/libps2.h b/include/linux/libps2.h
new file mode 100644
index 000000000..4ad06e824
--- /dev/null
+++ b/include/linux/libps2.h
@@ -0,0 +1,56 @@
+#ifndef _LIBPS2_H
+#define _LIBPS2_H
+
+/*
+ * Copyright (C) 1999-2002 Vojtech Pavlik
+ * Copyright (C) 2004 Dmitry Torokhov
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+
+#define PS2_CMD_GETID 0x02f2
+#define PS2_CMD_RESET_BAT 0x02ff
+
+#define PS2_RET_BAT 0xaa
+#define PS2_RET_ID 0x00
+#define PS2_RET_ACK 0xfa
+#define PS2_RET_NAK 0xfe
+#define PS2_RET_ERR 0xfc
+
+#define PS2_FLAG_ACK 1 /* Waiting for ACK/NAK */
+#define PS2_FLAG_CMD 2 /* Waiting for command to finish */
+#define PS2_FLAG_CMD1 4 /* Waiting for the first byte of command response */
+#define PS2_FLAG_WAITID 8 /* Command execiting is GET ID */
+#define PS2_FLAG_NAK 16 /* Last transmission was NAKed */
+
+struct ps2dev {
+ struct serio *serio;
+
+ /* Ensures that only one command is executing at a time */
+ struct mutex cmd_mutex;
+
+ /* Used to signal completion from interrupt handler */
+ wait_queue_head_t wait;
+
+ unsigned long flags;
+ unsigned char cmdbuf[8];
+ unsigned char cmdcnt;
+ unsigned char nak;
+};
+
+void ps2_init(struct ps2dev *ps2dev, struct serio *serio);
+int ps2_sendbyte(struct ps2dev *ps2dev, unsigned char byte, int timeout);
+void ps2_drain(struct ps2dev *ps2dev, int maxbytes, int timeout);
+void ps2_begin_command(struct ps2dev *ps2dev);
+void ps2_end_command(struct ps2dev *ps2dev);
+int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command);
+int ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command);
+int ps2_handle_ack(struct ps2dev *ps2dev, unsigned char data);
+int ps2_handle_response(struct ps2dev *ps2dev, unsigned char data);
+void ps2_cmd_aborted(struct ps2dev *ps2dev);
+int ps2_is_keyboard_id(char id);
+
+#endif /* _LIBPS2_H */
diff --git a/include/linux/license.h b/include/linux/license.h
new file mode 100644
index 000000000..decdbf43c
--- /dev/null
+++ b/include/linux/license.h
@@ -0,0 +1,14 @@
+#ifndef __LICENSE_H
+#define __LICENSE_H
+
+static inline int license_is_gpl_compatible(const char *license)
+{
+ return (strcmp(license, "GPL") == 0
+ || strcmp(license, "GPL v2") == 0
+ || strcmp(license, "GPL and additional rights") == 0
+ || strcmp(license, "Dual BSD/GPL") == 0
+ || strcmp(license, "Dual MIT/GPL") == 0
+ || strcmp(license, "Dual MPL/GPL") == 0);
+}
+
+#endif
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
new file mode 100644
index 000000000..a6a42dd02
--- /dev/null
+++ b/include/linux/linkage.h
@@ -0,0 +1,112 @@
+#ifndef _LINUX_LINKAGE_H
+#define _LINUX_LINKAGE_H
+
+#include <linux/compiler.h>
+#include <linux/stringify.h>
+#include <linux/export.h>
+#include <asm/linkage.h>
+
+/* Some toolchains use other characters (e.g. '`') to mark new line in macro */
+#ifndef ASM_NL
+#define ASM_NL ;
+#endif
+
+#ifdef __cplusplus
+#define CPP_ASMLINKAGE extern "C"
+#else
+#define CPP_ASMLINKAGE
+#endif
+
+#ifndef asmlinkage
+#define asmlinkage CPP_ASMLINKAGE
+#endif
+
+#ifndef cond_syscall
+#define cond_syscall(x) asm( \
+ ".weak " VMLINUX_SYMBOL_STR(x) "\n\t" \
+ ".set " VMLINUX_SYMBOL_STR(x) "," \
+ VMLINUX_SYMBOL_STR(sys_ni_syscall))
+#endif
+
+#ifndef SYSCALL_ALIAS
+#define SYSCALL_ALIAS(alias, name) asm( \
+ ".globl " VMLINUX_SYMBOL_STR(alias) "\n\t" \
+ ".set " VMLINUX_SYMBOL_STR(alias) "," \
+ VMLINUX_SYMBOL_STR(name))
+#endif
+
+#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
+#define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
+
+/*
+ * For assembly routines.
+ *
+ * Note when using these that you must specify the appropriate
+ * alignment directives yourself
+ */
+#define __PAGE_ALIGNED_DATA .section ".data..page_aligned", "aw"
+#define __PAGE_ALIGNED_BSS .section ".bss..page_aligned", "aw"
+
+/*
+ * This is used by architectures to keep arguments on the stack
+ * untouched by the compiler by keeping them live until the end.
+ * The argument stack may be owned by the assembly-language
+ * caller, not the callee, and gcc doesn't always understand
+ * that.
+ *
+ * We have the return value, and a maximum of six arguments.
+ *
+ * This should always be followed by a "return ret" for the
+ * protection to work (ie no more work that the compiler might
+ * end up needing stack temporaries for).
+ */
+/* Assembly files may be compiled with -traditional .. */
+#ifndef __ASSEMBLY__
+#ifndef asmlinkage_protect
+# define asmlinkage_protect(n, ret, args...) do { } while (0)
+#endif
+#endif
+
+#ifndef __ALIGN
+#define __ALIGN .align 4,0x90
+#define __ALIGN_STR ".align 4,0x90"
+#endif
+
+#ifdef __ASSEMBLY__
+
+#ifndef LINKER_SCRIPT
+#define ALIGN __ALIGN
+#define ALIGN_STR __ALIGN_STR
+
+#ifndef ENTRY
+#define ENTRY(name) \
+ .globl name ASM_NL \
+ ALIGN ASM_NL \
+ name:
+#endif
+#endif /* LINKER_SCRIPT */
+
+#ifndef WEAK
+#define WEAK(name) \
+ .weak name ASM_NL \
+ name:
+#endif
+
+#ifndef END
+#define END(name) \
+ .size name, .-name
+#endif
+
+/* If symbol 'name' is treated as a subroutine (gets called, and returns)
+ * then please use ENDPROC to mark 'name' as STT_FUNC for the benefit of
+ * static analysis tools such as stack depth analyzer.
+ */
+#ifndef ENDPROC
+#define ENDPROC(name) \
+ .type name, @function ASM_NL \
+ END(name)
+#endif
+
+#endif
+
+#endif
diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h
new file mode 100644
index 000000000..ca5bd91d1
--- /dev/null
+++ b/include/linux/linux_logo.h
@@ -0,0 +1,61 @@
+#ifndef _LINUX_LINUX_LOGO_H
+#define _LINUX_LINUX_LOGO_H
+
+/*
+ * Linux logo to be displayed on boot
+ *
+ * Copyright (C) 1996 Larry Ewing (lewing@isc.tamu.edu)
+ * Copyright (C) 1996,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 2001 Greg Banks <gnb@alphalink.com.au>
+ * Copyright (C) 2001 Jan-Benedict Glaw <jbglaw@lug-owl.de>
+ * Copyright (C) 2003 Geert Uytterhoeven <geert@linux-m68k.org>
+ *
+ * Serial_console ascii image can be any size,
+ * but should contain %s to display the version
+ */
+
+#include <linux/init.h>
+
+
+#define LINUX_LOGO_MONO 1 /* monochrome black/white */
+#define LINUX_LOGO_VGA16 2 /* 16 colors VGA text palette */
+#define LINUX_LOGO_CLUT224 3 /* 224 colors */
+#define LINUX_LOGO_GRAY256 4 /* 256 levels grayscale */
+
+
+struct linux_logo {
+ int type; /* one of LINUX_LOGO_* */
+ unsigned int width;
+ unsigned int height;
+ unsigned int clutsize; /* LINUX_LOGO_CLUT224 only */
+ const unsigned char *clut; /* LINUX_LOGO_CLUT224 only */
+ const unsigned char *data;
+};
+
+extern const struct linux_logo logo_linux_mono;
+extern const struct linux_logo logo_linux_vga16;
+extern const struct linux_logo logo_linux_clut224;
+extern const struct linux_logo logo_blackfin_vga16;
+extern const struct linux_logo logo_blackfin_clut224;
+extern const struct linux_logo logo_dec_clut224;
+extern const struct linux_logo logo_mac_clut224;
+extern const struct linux_logo logo_parisc_clut224;
+extern const struct linux_logo logo_sgi_clut224;
+extern const struct linux_logo logo_sun_clut224;
+extern const struct linux_logo logo_superh_mono;
+extern const struct linux_logo logo_superh_vga16;
+extern const struct linux_logo logo_superh_clut224;
+extern const struct linux_logo logo_m32r_clut224;
+extern const struct linux_logo logo_spe_clut224;
+
+extern const struct linux_logo *fb_find_logo(int depth);
+#ifdef CONFIG_FB_LOGO_EXTRA
+extern void fb_append_extra_logo(const struct linux_logo *logo,
+ unsigned int n);
+#else
+static inline void fb_append_extra_logo(const struct linux_logo *logo,
+ unsigned int n)
+{}
+#endif
+
+#endif /* _LINUX_LINUX_LOGO_H */
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h
new file mode 100644
index 000000000..f1664c636
--- /dev/null
+++ b/include/linux/lis3lv02d.h
@@ -0,0 +1,127 @@
+#ifndef __LIS3LV02D_H_
+#define __LIS3LV02D_H_
+
+/**
+ * struct lis3lv02d_platform_data - lis3 chip family platform data
+ * @click_flags: Click detection unit configuration
+ * @click_thresh_x: Click detection unit x axis threshold
+ * @click_thresh_y: Click detection unit y axis threshold
+ * @click_thresh_z: Click detection unit z axis threshold
+ * @click_time_limit: Click detection unit time parameter
+ * @click_latency: Click detection unit latency parameter
+ * @click_window: Click detection unit window parameter
+ * @irq_cfg: On chip irq source and type configuration (click /
+ * data available / wake up, open drain, polarity)
+ * @irq_flags1: Additional irq triggering flags for irq channel 0
+ * @irq_flags2: Additional irq triggering flags for irq channel 1
+ * @duration1: Wake up unit 1 duration parameter
+ * @duration2: Wake up unit 2 duration parameter
+ * @wakeup_flags: Wake up unit 1 flags
+ * @wakeup_thresh: Wake up unit 1 threshold value
+ * @wakeup_flags2: Wake up unit 2 flags
+ * @wakeup_thresh2: Wake up unit 2 threshold value
+ * @hipass_ctrl: High pass filter control (enable / disable, cut off
+ * frequency)
+ * @axis_x: Sensor orientation remapping for x-axis
+ * @axis_y: Sensor orientation remapping for y-axis
+ * @axis_z: Sensor orientation remapping for z-axis
+ * @driver_features: Enable bits for different features. Disabled by default
+ * @default_rate: Default sampling rate. 0 means reset default
+ * @setup_resources: Interrupt line setup call back function
+ * @release_resources: Interrupt line release call back function
+ * @st_min_limits[3]: Selftest acceptance minimum values
+ * @st_max_limits[3]: Selftest acceptance maximum values
+ * @irq2: Irq line 2 number
+ *
+ * Platform data is used to setup the sensor chip. Meaning of the different
+ * chip features can be found from the data sheet. It is publicly available
+ * at www.st.com web pages. Currently the platform data is used
+ * only for the 8 bit device. The 8 bit device has two wake up / free fall
+ * detection units and click detection unit. There are plenty of ways to
+ * configure the chip which makes is quite hard to explain deeper meaning of
+ * the fields here. Behaviour of the detection blocks varies heavily depending
+ * on the configuration. For example, interrupt detection block can use high
+ * pass filtered data which makes it react to the changes in the acceleration.
+ * Irq_flags can be used to enable interrupt detection on the both edges.
+ * With proper chip configuration this produces interrupt when some trigger
+ * starts and when it goes away.
+ */
+
+struct lis3lv02d_platform_data {
+ /* please note: the 'click' feature is only supported for
+ * LIS[32]02DL variants of the chip and will be ignored for
+ * others */
+#define LIS3_CLICK_SINGLE_X (1 << 0)
+#define LIS3_CLICK_DOUBLE_X (1 << 1)
+#define LIS3_CLICK_SINGLE_Y (1 << 2)
+#define LIS3_CLICK_DOUBLE_Y (1 << 3)
+#define LIS3_CLICK_SINGLE_Z (1 << 4)
+#define LIS3_CLICK_DOUBLE_Z (1 << 5)
+ unsigned char click_flags;
+ unsigned char click_thresh_x;
+ unsigned char click_thresh_y;
+ unsigned char click_thresh_z;
+ unsigned char click_time_limit;
+ unsigned char click_latency;
+ unsigned char click_window;
+
+#define LIS3_IRQ1_DISABLE (0 << 0)
+#define LIS3_IRQ1_FF_WU_1 (1 << 0)
+#define LIS3_IRQ1_FF_WU_2 (2 << 0)
+#define LIS3_IRQ1_FF_WU_12 (3 << 0)
+#define LIS3_IRQ1_DATA_READY (4 << 0)
+#define LIS3_IRQ1_CLICK (7 << 0)
+#define LIS3_IRQ1_MASK (7 << 0)
+#define LIS3_IRQ2_DISABLE (0 << 3)
+#define LIS3_IRQ2_FF_WU_1 (1 << 3)
+#define LIS3_IRQ2_FF_WU_2 (2 << 3)
+#define LIS3_IRQ2_FF_WU_12 (3 << 3)
+#define LIS3_IRQ2_DATA_READY (4 << 3)
+#define LIS3_IRQ2_CLICK (7 << 3)
+#define LIS3_IRQ2_MASK (7 << 3)
+#define LIS3_IRQ_OPEN_DRAIN (1 << 6)
+#define LIS3_IRQ_ACTIVE_LOW (1 << 7)
+ unsigned char irq_cfg;
+ unsigned char irq_flags1; /* Additional irq edge / level flags */
+ unsigned char irq_flags2; /* Additional irq edge / level flags */
+ unsigned char duration1;
+ unsigned char duration2;
+#define LIS3_WAKEUP_X_LO (1 << 0)
+#define LIS3_WAKEUP_X_HI (1 << 1)
+#define LIS3_WAKEUP_Y_LO (1 << 2)
+#define LIS3_WAKEUP_Y_HI (1 << 3)
+#define LIS3_WAKEUP_Z_LO (1 << 4)
+#define LIS3_WAKEUP_Z_HI (1 << 5)
+ unsigned char wakeup_flags;
+ unsigned char wakeup_thresh;
+ unsigned char wakeup_flags2;
+ unsigned char wakeup_thresh2;
+#define LIS3_HIPASS_CUTFF_8HZ 0
+#define LIS3_HIPASS_CUTFF_4HZ 1
+#define LIS3_HIPASS_CUTFF_2HZ 2
+#define LIS3_HIPASS_CUTFF_1HZ 3
+#define LIS3_HIPASS1_DISABLE (1 << 2)
+#define LIS3_HIPASS2_DISABLE (1 << 3)
+ unsigned char hipass_ctrl;
+#define LIS3_NO_MAP 0
+#define LIS3_DEV_X 1
+#define LIS3_DEV_Y 2
+#define LIS3_DEV_Z 3
+#define LIS3_INV_DEV_X -1
+#define LIS3_INV_DEV_Y -2
+#define LIS3_INV_DEV_Z -3
+ s8 axis_x;
+ s8 axis_y;
+ s8 axis_z;
+#define LIS3_USE_BLOCK_READ 0x02
+ u16 driver_features;
+ int default_rate;
+ int (*setup_resources)(void);
+ int (*release_resources)(void);
+ /* Limits for selftest are specified in chip data sheet */
+ s16 st_min_limits[3]; /* min pass limit x, y, z */
+ s16 st_max_limits[3]; /* max pass limit x, y, z */
+ int irq2;
+};
+
+#endif /* __LIS3LV02D_H_ */
diff --git a/include/linux/list.h b/include/linux/list.h
new file mode 100644
index 000000000..feb773c76
--- /dev/null
+++ b/include/linux/list.h
@@ -0,0 +1,744 @@
+#ifndef _LINUX_LIST_H
+#define _LINUX_LIST_H
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/poison.h>
+#include <linux/const.h>
+#include <linux/kernel.h>
+
+/*
+ * Simple doubly linked list implementation.
+ *
+ * Some of the internal functions ("__xxx") are useful when
+ * manipulating whole lists rather than single entries, as
+ * sometimes we already know the next/prev entries and we can
+ * generate better code by using them directly rather than
+ * using the generic single-entry routines.
+ */
+
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+
+static inline void INIT_LIST_HEAD(struct list_head *list)
+{
+ list->next = list;
+ list->prev = list;
+}
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+#ifndef CONFIG_DEBUG_LIST
+static inline void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+#else
+extern void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next);
+#endif
+
+/**
+ * list_add - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
+static inline void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+
+
+/**
+ * list_add_tail - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ */
+static inline void list_add_tail(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head->prev, head);
+}
+
+/*
+ * Delete a list entry by making the prev/next entries
+ * point to each other.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+static inline void __list_del(struct list_head * prev, struct list_head * next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty() on entry does not return true after this, the entry is
+ * in an undefined state.
+ */
+#ifndef CONFIG_DEBUG_LIST
+static inline void __list_del_entry(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+}
+
+static inline void list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ entry->next = LIST_POISON1;
+ entry->prev = LIST_POISON2;
+}
+#else
+extern void __list_del_entry(struct list_head *entry);
+extern void list_del(struct list_head *entry);
+#endif
+
+/**
+ * list_replace - replace old entry by new one
+ * @old : the element to be replaced
+ * @new : the new element to insert
+ *
+ * If @old was empty, it will be overwritten.
+ */
+static inline void list_replace(struct list_head *old,
+ struct list_head *new)
+{
+ new->next = old->next;
+ new->next->prev = new;
+ new->prev = old->prev;
+ new->prev->next = new;
+}
+
+static inline void list_replace_init(struct list_head *old,
+ struct list_head *new)
+{
+ list_replace(old, new);
+ INIT_LIST_HEAD(old);
+}
+
+/**
+ * list_del_init - deletes entry from list and reinitialize it.
+ * @entry: the element to delete from the list.
+ */
+static inline void list_del_init(struct list_head *entry)
+{
+ __list_del_entry(entry);
+ INIT_LIST_HEAD(entry);
+}
+
+/**
+ * list_move - delete from one list and add as another's head
+ * @list: the entry to move
+ * @head: the head that will precede our entry
+ */
+static inline void list_move(struct list_head *list, struct list_head *head)
+{
+ __list_del_entry(list);
+ list_add(list, head);
+}
+
+/**
+ * list_move_tail - delete from one list and add as another's tail
+ * @list: the entry to move
+ * @head: the head that will follow our entry
+ */
+static inline void list_move_tail(struct list_head *list,
+ struct list_head *head)
+{
+ __list_del_entry(list);
+ list_add_tail(list, head);
+}
+
+/**
+ * list_is_last - tests whether @list is the last entry in list @head
+ * @list: the entry to test
+ * @head: the head of the list
+ */
+static inline int list_is_last(const struct list_head *list,
+ const struct list_head *head)
+{
+ return list->next == head;
+}
+
+/**
+ * list_empty - tests whether a list is empty
+ * @head: the list to test.
+ */
+static inline int list_empty(const struct list_head *head)
+{
+ return head->next == head;
+}
+
+/**
+ * list_empty_careful - tests whether a list is empty and not being modified
+ * @head: the list to test
+ *
+ * Description:
+ * tests whether a list is empty _and_ checks that no other CPU might be
+ * in the process of modifying either member (next or prev)
+ *
+ * NOTE: using list_empty_careful() without synchronization
+ * can only be safe if the only activity that can happen
+ * to the list entry is list_del_init(). Eg. it cannot be used
+ * if another CPU could re-list_add() it.
+ */
+static inline int list_empty_careful(const struct list_head *head)
+{
+ struct list_head *next = head->next;
+ return (next == head) && (next == head->prev);
+}
+
+/**
+ * list_rotate_left - rotate the list to the left
+ * @head: the head of the list
+ */
+static inline void list_rotate_left(struct list_head *head)
+{
+ struct list_head *first;
+
+ if (!list_empty(head)) {
+ first = head->next;
+ list_move_tail(first, head);
+ }
+}
+
+/**
+ * list_is_singular - tests whether a list has just one entry.
+ * @head: the list to test.
+ */
+static inline int list_is_singular(const struct list_head *head)
+{
+ return !list_empty(head) && (head->next == head->prev);
+}
+
+static inline void __list_cut_position(struct list_head *list,
+ struct list_head *head, struct list_head *entry)
+{
+ struct list_head *new_first = entry->next;
+ list->next = head->next;
+ list->next->prev = list;
+ list->prev = entry;
+ entry->next = list;
+ head->next = new_first;
+ new_first->prev = head;
+}
+
+/**
+ * list_cut_position - cut a list into two
+ * @list: a new list to add all removed entries
+ * @head: a list with entries
+ * @entry: an entry within head, could be the head itself
+ * and if so we won't cut the list
+ *
+ * This helper moves the initial part of @head, up to and
+ * including @entry, from @head to @list. You should
+ * pass on @entry an element you know is on @head. @list
+ * should be an empty list or a list you do not care about
+ * losing its data.
+ *
+ */
+static inline void list_cut_position(struct list_head *list,
+ struct list_head *head, struct list_head *entry)
+{
+ if (list_empty(head))
+ return;
+ if (list_is_singular(head) &&
+ (head->next != entry && head != entry))
+ return;
+ if (entry == head)
+ INIT_LIST_HEAD(list);
+ else
+ __list_cut_position(list, head, entry);
+}
+
+static inline void __list_splice(const struct list_head *list,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ struct list_head *first = list->next;
+ struct list_head *last = list->prev;
+
+ first->prev = prev;
+ prev->next = first;
+
+ last->next = next;
+ next->prev = last;
+}
+
+/**
+ * list_splice - join two lists, this is designed for stacks
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ */
+static inline void list_splice(const struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list))
+ __list_splice(list, head, head->next);
+}
+
+/**
+ * list_splice_tail - join two lists, each list being a queue
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ */
+static inline void list_splice_tail(struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list))
+ __list_splice(list, head->prev, head);
+}
+
+/**
+ * list_splice_init - join two lists and reinitialise the emptied list.
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ *
+ * The list at @list is reinitialised
+ */
+static inline void list_splice_init(struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list)) {
+ __list_splice(list, head, head->next);
+ INIT_LIST_HEAD(list);
+ }
+}
+
+/**
+ * list_splice_tail_init - join two lists and reinitialise the emptied list
+ * @list: the new list to add.
+ * @head: the place to add it in the first list.
+ *
+ * Each of the lists is a queue.
+ * The list at @list is reinitialised
+ */
+static inline void list_splice_tail_init(struct list_head *list,
+ struct list_head *head)
+{
+ if (!list_empty(list)) {
+ __list_splice(list, head->prev, head);
+ INIT_LIST_HEAD(list);
+ }
+}
+
+/**
+ * list_entry - get the struct for this entry
+ * @ptr: the &struct list_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
+/**
+ * list_first_entry - get the first element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ *
+ * Note, that list is expected to be not empty.
+ */
+#define list_first_entry(ptr, type, member) \
+ list_entry((ptr)->next, type, member)
+
+/**
+ * list_last_entry - get the last element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ *
+ * Note, that list is expected to be not empty.
+ */
+#define list_last_entry(ptr, type, member) \
+ list_entry((ptr)->prev, type, member)
+
+/**
+ * list_first_entry_or_null - get the first element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ *
+ * Note that if the list is empty, it returns NULL.
+ */
+#define list_first_entry_or_null(ptr, type, member) \
+ (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
+
+/**
+ * list_next_entry - get the next element in list
+ * @pos: the type * to cursor
+ * @member: the name of the list_head within the struct.
+ */
+#define list_next_entry(pos, member) \
+ list_entry((pos)->member.next, typeof(*(pos)), member)
+
+/**
+ * list_prev_entry - get the prev element in list
+ * @pos: the type * to cursor
+ * @member: the name of the list_head within the struct.
+ */
+#define list_prev_entry(pos, member) \
+ list_entry((pos)->member.prev, typeof(*(pos)), member)
+
+/**
+ * list_for_each - iterate over a list
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ */
+#define list_for_each(pos, head) \
+ for (pos = (head)->next; pos != (head); pos = pos->next)
+
+/**
+ * list_for_each_prev - iterate over a list backwards
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ */
+#define list_for_each_prev(pos, head) \
+ for (pos = (head)->prev; pos != (head); pos = pos->prev)
+
+/**
+ * list_for_each_safe - iterate over a list safe against removal of list entry
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @n: another &struct list_head to use as temporary storage
+ * @head: the head for your list.
+ */
+#define list_for_each_safe(pos, n, head) \
+ for (pos = (head)->next, n = pos->next; pos != (head); \
+ pos = n, n = pos->next)
+
+/**
+ * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @n: another &struct list_head to use as temporary storage
+ * @head: the head for your list.
+ */
+#define list_for_each_prev_safe(pos, n, head) \
+ for (pos = (head)->prev, n = pos->prev; \
+ pos != (head); \
+ pos = n, n = pos->prev)
+
+/**
+ * list_for_each_entry - iterate over list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_for_each_entry(pos, head, member) \
+ for (pos = list_first_entry(head, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_next_entry(pos, member))
+
+/**
+ * list_for_each_entry_reverse - iterate backwards over list of given type.
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_for_each_entry_reverse(pos, head, member) \
+ for (pos = list_last_entry(head, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_prev_entry(pos, member))
+
+/**
+ * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
+ * @pos: the type * to use as a start point
+ * @head: the head of the list
+ * @member: the name of the list_head within the struct.
+ *
+ * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
+ */
+#define list_prepare_entry(pos, head, member) \
+ ((pos) ? : list_entry(head, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_continue - continue iteration over list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Continue to iterate over list of given type, continuing after
+ * the current position.
+ */
+#define list_for_each_entry_continue(pos, head, member) \
+ for (pos = list_next_entry(pos, member); \
+ &pos->member != (head); \
+ pos = list_next_entry(pos, member))
+
+/**
+ * list_for_each_entry_continue_reverse - iterate backwards from the given point
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Start to iterate over list of given type backwards, continuing after
+ * the current position.
+ */
+#define list_for_each_entry_continue_reverse(pos, head, member) \
+ for (pos = list_prev_entry(pos, member); \
+ &pos->member != (head); \
+ pos = list_prev_entry(pos, member))
+
+/**
+ * list_for_each_entry_from - iterate over list of given type from the current point
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate over list of given type, continuing from current position.
+ */
+#define list_for_each_entry_from(pos, head, member) \
+ for (; &pos->member != (head); \
+ pos = list_next_entry(pos, member))
+
+/**
+ * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ */
+#define list_for_each_entry_safe(pos, n, head, member) \
+ for (pos = list_first_entry(head, typeof(*pos), member), \
+ n = list_next_entry(pos, member); \
+ &pos->member != (head); \
+ pos = n, n = list_next_entry(n, member))
+
+/**
+ * list_for_each_entry_safe_continue - continue list iteration safe against removal
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate over list of given type, continuing after current point,
+ * safe against removal of list entry.
+ */
+#define list_for_each_entry_safe_continue(pos, n, head, member) \
+ for (pos = list_next_entry(pos, member), \
+ n = list_next_entry(pos, member); \
+ &pos->member != (head); \
+ pos = n, n = list_next_entry(n, member))
+
+/**
+ * list_for_each_entry_safe_from - iterate over list from current point safe against removal
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate over list of given type from current point, safe against
+ * removal of list entry.
+ */
+#define list_for_each_entry_safe_from(pos, n, head, member) \
+ for (n = list_next_entry(pos, member); \
+ &pos->member != (head); \
+ pos = n, n = list_next_entry(n, member))
+
+/**
+ * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Iterate backwards over list of given type, safe against removal
+ * of list entry.
+ */
+#define list_for_each_entry_safe_reverse(pos, n, head, member) \
+ for (pos = list_last_entry(head, typeof(*pos), member), \
+ n = list_prev_entry(pos, member); \
+ &pos->member != (head); \
+ pos = n, n = list_prev_entry(n, member))
+
+/**
+ * list_safe_reset_next - reset a stale list_for_each_entry_safe loop
+ * @pos: the loop cursor used in the list_for_each_entry_safe loop
+ * @n: temporary storage used in list_for_each_entry_safe
+ * @member: the name of the list_head within the struct.
+ *
+ * list_safe_reset_next is not safe to use in general if the list may be
+ * modified concurrently (eg. the lock is dropped in the loop body). An
+ * exception to this is if the cursor element (pos) is pinned in the list,
+ * and list_safe_reset_next is called after re-taking the lock and before
+ * completing the current iteration of the loop body.
+ */
+#define list_safe_reset_next(pos, n, member) \
+ n = list_next_entry(pos, member)
+
+/*
+ * Double linked lists with a single pointer list head.
+ * Mostly useful for hash tables where the two pointer list head is
+ * too wasteful.
+ * You lose the ability to access the tail in O(1).
+ */
+
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+static inline void INIT_HLIST_NODE(struct hlist_node *h)
+{
+ h->next = NULL;
+ h->pprev = NULL;
+}
+
+static inline int hlist_unhashed(const struct hlist_node *h)
+{
+ return !h->pprev;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+ return !h->first;
+}
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+ struct hlist_node *next = n->next;
+ struct hlist_node **pprev = n->pprev;
+ *pprev = next;
+ if (next)
+ next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+ __hlist_del(n);
+ n->next = LIST_POISON1;
+ n->pprev = LIST_POISON2;
+}
+
+static inline void hlist_del_init(struct hlist_node *n)
+{
+ if (!hlist_unhashed(n)) {
+ __hlist_del(n);
+ INIT_HLIST_NODE(n);
+ }
+}
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+ n->next = first;
+ if (first)
+ first->pprev = &n->next;
+ h->first = n;
+ n->pprev = &h->first;
+}
+
+/* next must be != NULL */
+static inline void hlist_add_before(struct hlist_node *n,
+ struct hlist_node *next)
+{
+ n->pprev = next->pprev;
+ n->next = next;
+ next->pprev = &n->next;
+ *(n->pprev) = n;
+}
+
+static inline void hlist_add_behind(struct hlist_node *n,
+ struct hlist_node *prev)
+{
+ n->next = prev->next;
+ prev->next = n;
+ n->pprev = &prev->next;
+
+ if (n->next)
+ n->next->pprev = &n->next;
+}
+
+/* after that we'll appear to be on some hlist and hlist_del will work */
+static inline void hlist_add_fake(struct hlist_node *n)
+{
+ n->pprev = &n->next;
+}
+
+/*
+ * Move a list from one list head to another. Fixup the pprev
+ * reference of the first entry if it exists.
+ */
+static inline void hlist_move_list(struct hlist_head *old,
+ struct hlist_head *new)
+{
+ new->first = old->first;
+ if (new->first)
+ new->first->pprev = &new->first;
+ old->first = NULL;
+}
+
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+
+#define hlist_for_each(pos, head) \
+ for (pos = (head)->first; pos ; pos = pos->next)
+
+#define hlist_for_each_safe(pos, n, head) \
+ for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
+ pos = n)
+
+#define hlist_entry_safe(ptr, type, member) \
+ ({ typeof(ptr) ____ptr = (ptr); \
+ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
+ })
+
+/**
+ * hlist_for_each_entry - iterate over list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry(pos, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
+ pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
+ * @pos: the type * to use as a loop cursor.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_continue(pos, member) \
+ for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
+ pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_from - iterate over a hlist continuing from current point
+ * @pos: the type * to use as a loop cursor.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_from(pos, member) \
+ for (; pos; \
+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos: the type * to use as a loop cursor.
+ * @n: another &struct hlist_node to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_safe(pos, n, head, member) \
+ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
+ pos && ({ n = pos->member.next; 1; }); \
+ pos = hlist_entry_safe(n, typeof(*pos), member))
+
+#endif
diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h
new file mode 100644
index 000000000..2eb88556c
--- /dev/null
+++ b/include/linux/list_bl.h
@@ -0,0 +1,161 @@
+#ifndef _LINUX_LIST_BL_H
+#define _LINUX_LIST_BL_H
+
+#include <linux/list.h>
+#include <linux/bit_spinlock.h>
+
+/*
+ * Special version of lists, where head of the list has a lock in the lowest
+ * bit. This is useful for scalable hash tables without increasing memory
+ * footprint overhead.
+ *
+ * For modification operations, the 0 bit of hlist_bl_head->first
+ * pointer must be set.
+ *
+ * With some small modifications, this can easily be adapted to store several
+ * arbitrary bits (not just a single lock bit), if the need arises to store
+ * some fast and compact auxiliary data.
+ */
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+#define LIST_BL_LOCKMASK 1UL
+#else
+#define LIST_BL_LOCKMASK 0UL
+#endif
+
+#ifdef CONFIG_DEBUG_LIST
+#define LIST_BL_BUG_ON(x) BUG_ON(x)
+#else
+#define LIST_BL_BUG_ON(x)
+#endif
+
+
+struct hlist_bl_head {
+ struct hlist_bl_node *first;
+};
+
+struct hlist_bl_node {
+ struct hlist_bl_node *next, **pprev;
+};
+#define INIT_HLIST_BL_HEAD(ptr) \
+ ((ptr)->first = NULL)
+
+static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
+{
+ h->next = NULL;
+ h->pprev = NULL;
+}
+
+#define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member)
+
+static inline int hlist_bl_unhashed(const struct hlist_bl_node *h)
+{
+ return !h->pprev;
+}
+
+static inline struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h)
+{
+ return (struct hlist_bl_node *)
+ ((unsigned long)h->first & ~LIST_BL_LOCKMASK);
+}
+
+static inline void hlist_bl_set_first(struct hlist_bl_head *h,
+ struct hlist_bl_node *n)
+{
+ LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
+ LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) !=
+ LIST_BL_LOCKMASK);
+ h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK);
+}
+
+static inline int hlist_bl_empty(const struct hlist_bl_head *h)
+{
+ return !((unsigned long)h->first & ~LIST_BL_LOCKMASK);
+}
+
+static inline void hlist_bl_add_head(struct hlist_bl_node *n,
+ struct hlist_bl_head *h)
+{
+ struct hlist_bl_node *first = hlist_bl_first(h);
+
+ n->next = first;
+ if (first)
+ first->pprev = &n->next;
+ n->pprev = &h->first;
+ hlist_bl_set_first(h, n);
+}
+
+static inline void __hlist_bl_del(struct hlist_bl_node *n)
+{
+ struct hlist_bl_node *next = n->next;
+ struct hlist_bl_node **pprev = n->pprev;
+
+ LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
+
+ /* pprev may be `first`, so be careful not to lose the lock bit */
+ *pprev = (struct hlist_bl_node *)
+ ((unsigned long)next |
+ ((unsigned long)*pprev & LIST_BL_LOCKMASK));
+ if (next)
+ next->pprev = pprev;
+}
+
+static inline void hlist_bl_del(struct hlist_bl_node *n)
+{
+ __hlist_bl_del(n);
+ n->next = LIST_POISON1;
+ n->pprev = LIST_POISON2;
+}
+
+static inline void hlist_bl_del_init(struct hlist_bl_node *n)
+{
+ if (!hlist_bl_unhashed(n)) {
+ __hlist_bl_del(n);
+ INIT_HLIST_BL_NODE(n);
+ }
+}
+
+static inline void hlist_bl_lock(struct hlist_bl_head *b)
+{
+ bit_spin_lock(0, (unsigned long *)b);
+}
+
+static inline void hlist_bl_unlock(struct hlist_bl_head *b)
+{
+ __bit_spin_unlock(0, (unsigned long *)b);
+}
+
+static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
+{
+ return bit_spin_is_locked(0, (unsigned long *)b);
+}
+
+/**
+ * hlist_bl_for_each_entry - iterate over list of given type
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_node to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ *
+ */
+#define hlist_bl_for_each_entry(tpos, pos, head, member) \
+ for (pos = hlist_bl_first(head); \
+ pos && \
+ ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = pos->next)
+
+/**
+ * hlist_bl_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_node to use as a loop cursor.
+ * @n: another &struct hlist_node to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_bl_for_each_entry_safe(tpos, pos, n, head, member) \
+ for (pos = hlist_bl_first(head); \
+ pos && ({ n = pos->next; 1; }) && \
+ ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = n)
+
+#endif
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
new file mode 100644
index 000000000..2a6b9947a
--- /dev/null
+++ b/include/linux/list_lru.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
+ * Authors: David Chinner and Glauber Costa
+ *
+ * Generic LRU infrastructure
+ */
+#ifndef _LRU_LIST_H
+#define _LRU_LIST_H
+
+#include <linux/list.h>
+#include <linux/nodemask.h>
+#include <linux/shrinker.h>
+
+struct mem_cgroup;
+
+/* list_lru_walk_cb has to always return one of those */
+enum lru_status {
+ LRU_REMOVED, /* item removed from list */
+ LRU_REMOVED_RETRY, /* item removed, but lock has been
+ dropped and reacquired */
+ LRU_ROTATE, /* item referenced, give another pass */
+ LRU_SKIP, /* item cannot be locked, skip */
+ LRU_RETRY, /* item not freeable. May drop the lock
+ internally, but has to return locked. */
+};
+
+struct list_lru_one {
+ struct list_head list;
+ /* may become negative during memcg reparenting */
+ long nr_items;
+};
+
+struct list_lru_memcg {
+ /* array of per cgroup lists, indexed by memcg_cache_id */
+ struct list_lru_one *lru[0];
+};
+
+struct list_lru_node {
+ /* protects all lists on the node, including per cgroup */
+ spinlock_t lock;
+ /* global list, used for the root cgroup in cgroup aware lrus */
+ struct list_lru_one lru;
+#ifdef CONFIG_MEMCG_KMEM
+ /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
+ struct list_lru_memcg *memcg_lrus;
+#endif
+} ____cacheline_aligned_in_smp;
+
+struct list_lru {
+ struct list_lru_node *node;
+#ifdef CONFIG_MEMCG_KMEM
+ struct list_head list;
+#endif
+};
+
+void list_lru_destroy(struct list_lru *lru);
+int __list_lru_init(struct list_lru *lru, bool memcg_aware,
+ struct lock_class_key *key);
+
+#define list_lru_init(lru) __list_lru_init((lru), false, NULL)
+#define list_lru_init_key(lru, key) __list_lru_init((lru), false, (key))
+#define list_lru_init_memcg(lru) __list_lru_init((lru), true, NULL)
+
+int memcg_update_all_list_lrus(int num_memcgs);
+void memcg_drain_all_list_lrus(int src_idx, int dst_idx);
+
+/**
+ * list_lru_add: add an element to the lru list's tail
+ * @list_lru: the lru pointer
+ * @item: the item to be added.
+ *
+ * If the element is already part of a list, this function returns doing
+ * nothing. Therefore the caller does not need to keep state about whether or
+ * not the element already belongs in the list and is allowed to lazy update
+ * it. Note however that this is valid for *a* list, not *this* list. If
+ * the caller organize itself in a way that elements can be in more than
+ * one type of list, it is up to the caller to fully remove the item from
+ * the previous list (with list_lru_del() for instance) before moving it
+ * to @list_lru
+ *
+ * Return value: true if the list was updated, false otherwise
+ */
+bool list_lru_add(struct list_lru *lru, struct list_head *item);
+
+/**
+ * list_lru_del: delete an element to the lru list
+ * @list_lru: the lru pointer
+ * @item: the item to be deleted.
+ *
+ * This function works analogously as list_lru_add in terms of list
+ * manipulation. The comments about an element already pertaining to
+ * a list are also valid for list_lru_del.
+ *
+ * Return value: true if the list was updated, false otherwise
+ */
+bool list_lru_del(struct list_lru *lru, struct list_head *item);
+
+/**
+ * list_lru_count_one: return the number of objects currently held by @lru
+ * @lru: the lru pointer.
+ * @nid: the node id to count from.
+ * @memcg: the cgroup to count from.
+ *
+ * Always return a non-negative number, 0 for empty lists. There is no
+ * guarantee that the list is not updated while the count is being computed.
+ * Callers that want such a guarantee need to provide an outer lock.
+ */
+unsigned long list_lru_count_one(struct list_lru *lru,
+ int nid, struct mem_cgroup *memcg);
+unsigned long list_lru_count_node(struct list_lru *lru, int nid);
+
+static inline unsigned long list_lru_shrink_count(struct list_lru *lru,
+ struct shrink_control *sc)
+{
+ return list_lru_count_one(lru, sc->nid, sc->memcg);
+}
+
+static inline unsigned long list_lru_count(struct list_lru *lru)
+{
+ long count = 0;
+ int nid;
+
+ for_each_node_state(nid, N_NORMAL_MEMORY)
+ count += list_lru_count_node(lru, nid);
+
+ return count;
+}
+
+void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
+void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
+ struct list_head *head);
+
+typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
+ struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
+
+/**
+ * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
+ * @lru: the lru pointer.
+ * @nid: the node id to scan from.
+ * @memcg: the cgroup to scan from.
+ * @isolate: callback function that is resposible for deciding what to do with
+ * the item currently being scanned
+ * @cb_arg: opaque type that will be passed to @isolate
+ * @nr_to_walk: how many items to scan.
+ *
+ * This function will scan all elements in a particular list_lru, calling the
+ * @isolate callback for each of those items, along with the current list
+ * spinlock and a caller-provided opaque. The @isolate callback can choose to
+ * drop the lock internally, but *must* return with the lock held. The callback
+ * will return an enum lru_status telling the list_lru infrastructure what to
+ * do with the object being scanned.
+ *
+ * Please note that nr_to_walk does not mean how many objects will be freed,
+ * just how many objects will be scanned.
+ *
+ * Return value: the number of objects effectively removed from the LRU.
+ */
+unsigned long list_lru_walk_one(struct list_lru *lru,
+ int nid, struct mem_cgroup *memcg,
+ list_lru_walk_cb isolate, void *cb_arg,
+ unsigned long *nr_to_walk);
+unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
+ list_lru_walk_cb isolate, void *cb_arg,
+ unsigned long *nr_to_walk);
+
+static inline unsigned long
+list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
+ list_lru_walk_cb isolate, void *cb_arg)
+{
+ return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg,
+ &sc->nr_to_scan);
+}
+
+static inline unsigned long
+list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
+ void *cb_arg, unsigned long nr_to_walk)
+{
+ long isolated = 0;
+ int nid;
+
+ for_each_node_state(nid, N_NORMAL_MEMORY) {
+ isolated += list_lru_walk_node(lru, nid, isolate,
+ cb_arg, &nr_to_walk);
+ if (nr_to_walk <= 0)
+ break;
+ }
+ return isolated;
+}
+#endif /* _LRU_LIST_H */
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
new file mode 100644
index 000000000..f266661d2
--- /dev/null
+++ b/include/linux/list_nulls.h
@@ -0,0 +1,116 @@
+#ifndef _LINUX_LIST_NULLS_H
+#define _LINUX_LIST_NULLS_H
+
+#include <linux/poison.h>
+#include <linux/const.h>
+
+/*
+ * Special version of lists, where end of list is not a NULL pointer,
+ * but a 'nulls' marker, which can have many different values.
+ * (up to 2^31 different values guaranteed on all platforms)
+ *
+ * In the standard hlist, termination of a list is the NULL pointer.
+ * In this special 'nulls' variant, we use the fact that objects stored in
+ * a list are aligned on a word (4 or 8 bytes alignment).
+ * We therefore use the last significant bit of 'ptr' :
+ * Set to 1 : This is a 'nulls' end-of-list marker (ptr >> 1)
+ * Set to 0 : This is a pointer to some object (ptr)
+ */
+
+struct hlist_nulls_head {
+ struct hlist_nulls_node *first;
+};
+
+struct hlist_nulls_node {
+ struct hlist_nulls_node *next, **pprev;
+};
+#define NULLS_MARKER(value) (1UL | (((long)value) << 1))
+#define INIT_HLIST_NULLS_HEAD(ptr, nulls) \
+ ((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
+
+#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
+/**
+ * ptr_is_a_nulls - Test if a ptr is a nulls
+ * @ptr: ptr to be tested
+ *
+ */
+static inline int is_a_nulls(const struct hlist_nulls_node *ptr)
+{
+ return ((unsigned long)ptr & 1);
+}
+
+/**
+ * get_nulls_value - Get the 'nulls' value of the end of chain
+ * @ptr: end of chain
+ *
+ * Should be called only if is_a_nulls(ptr);
+ */
+static inline unsigned long get_nulls_value(const struct hlist_nulls_node *ptr)
+{
+ return ((unsigned long)ptr) >> 1;
+}
+
+static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h)
+{
+ return !h->pprev;
+}
+
+static inline int hlist_nulls_empty(const struct hlist_nulls_head *h)
+{
+ return is_a_nulls(h->first);
+}
+
+static inline void hlist_nulls_add_head(struct hlist_nulls_node *n,
+ struct hlist_nulls_head *h)
+{
+ struct hlist_nulls_node *first = h->first;
+
+ n->next = first;
+ n->pprev = &h->first;
+ h->first = n;
+ if (!is_a_nulls(first))
+ first->pprev = &n->next;
+}
+
+static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
+{
+ struct hlist_nulls_node *next = n->next;
+ struct hlist_nulls_node **pprev = n->pprev;
+ *pprev = next;
+ if (!is_a_nulls(next))
+ next->pprev = pprev;
+}
+
+static inline void hlist_nulls_del(struct hlist_nulls_node *n)
+{
+ __hlist_nulls_del(n);
+ n->pprev = LIST_POISON2;
+}
+
+/**
+ * hlist_nulls_for_each_entry - iterate over list of given type
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_node to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ *
+ */
+#define hlist_nulls_for_each_entry(tpos, pos, head, member) \
+ for (pos = (head)->first; \
+ (!is_a_nulls(pos)) && \
+ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = pos->next)
+
+/**
+ * hlist_nulls_for_each_entry_from - iterate over a hlist continuing from current point
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_node to use as a loop cursor.
+ * @member: the name of the hlist_node within the struct.
+ *
+ */
+#define hlist_nulls_for_each_entry_from(tpos, pos, member) \
+ for (; (!is_a_nulls(pos)) && \
+ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1;}); \
+ pos = pos->next)
+
+#endif
diff --git a/include/linux/list_sort.h b/include/linux/list_sort.h
new file mode 100644
index 000000000..1a2df2efb
--- /dev/null
+++ b/include/linux/list_sort.h
@@ -0,0 +1,11 @@
+#ifndef _LINUX_LIST_SORT_H
+#define _LINUX_LIST_SORT_H
+
+#include <linux/types.h>
+
+struct list_head;
+
+void list_sort(void *priv, struct list_head *head,
+ int (*cmp)(void *priv, struct list_head *a,
+ struct list_head *b));
+#endif
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
new file mode 100644
index 000000000..ee6dbb39a
--- /dev/null
+++ b/include/linux/livepatch.h
@@ -0,0 +1,133 @@
+/*
+ * livepatch.h - Kernel Live Patching Core
+ *
+ * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
+ * Copyright (C) 2014 SUSE
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _LINUX_LIVEPATCH_H_
+#define _LINUX_LIVEPATCH_H_
+
+#include <linux/module.h>
+#include <linux/ftrace.h>
+
+#if IS_ENABLED(CONFIG_LIVEPATCH)
+
+#include <asm/livepatch.h>
+
+enum klp_state {
+ KLP_DISABLED,
+ KLP_ENABLED
+};
+
+/**
+ * struct klp_func - function structure for live patching
+ * @old_name: name of the function to be patched
+ * @new_func: pointer to the patched function code
+ * @old_addr: a hint conveying at what address the old function
+ * can be found (optional, vmlinux patches only)
+ * @kobj: kobject for sysfs resources
+ * @state: tracks function-level patch application state
+ * @stack_node: list node for klp_ops func_stack list
+ */
+struct klp_func {
+ /* external */
+ const char *old_name;
+ void *new_func;
+ /*
+ * The old_addr field is optional and can be used to resolve
+ * duplicate symbol names in the vmlinux object. If this
+ * information is not present, the symbol is located by name
+ * with kallsyms. If the name is not unique and old_addr is
+ * not provided, the patch application fails as there is no
+ * way to resolve the ambiguity.
+ */
+ unsigned long old_addr;
+
+ /* internal */
+ struct kobject kobj;
+ enum klp_state state;
+ struct list_head stack_node;
+};
+
+/**
+ * struct klp_reloc - relocation structure for live patching
+ * @loc: address where the relocation will be written
+ * @val: address of the referenced symbol (optional,
+ * vmlinux patches only)
+ * @type: ELF relocation type
+ * @name: name of the referenced symbol (for lookup/verification)
+ * @addend: offset from the referenced symbol
+ * @external: symbol is either exported or within the live patch module itself
+ */
+struct klp_reloc {
+ unsigned long loc;
+ unsigned long val;
+ unsigned long type;
+ const char *name;
+ int addend;
+ int external;
+};
+
+/**
+ * struct klp_object - kernel object structure for live patching
+ * @name: module name (or NULL for vmlinux)
+ * @relocs: relocation entries to be applied at load time
+ * @funcs: function entries for functions to be patched in the object
+ * @kobj: kobject for sysfs resources
+ * @mod: kernel module associated with the patched object
+ * (NULL for vmlinux)
+ * @state: tracks object-level patch application state
+ */
+struct klp_object {
+ /* external */
+ const char *name;
+ struct klp_reloc *relocs;
+ struct klp_func *funcs;
+
+ /* internal */
+ struct kobject *kobj;
+ struct module *mod;
+ enum klp_state state;
+};
+
+/**
+ * struct klp_patch - patch structure for live patching
+ * @mod: reference to the live patch module
+ * @objs: object entries for kernel objects to be patched
+ * @list: list node for global list of registered patches
+ * @kobj: kobject for sysfs resources
+ * @state: tracks patch-level application state
+ */
+struct klp_patch {
+ /* external */
+ struct module *mod;
+ struct klp_object *objs;
+
+ /* internal */
+ struct list_head list;
+ struct kobject kobj;
+ enum klp_state state;
+};
+
+int klp_register_patch(struct klp_patch *);
+int klp_unregister_patch(struct klp_patch *);
+int klp_enable_patch(struct klp_patch *);
+int klp_disable_patch(struct klp_patch *);
+
+#endif /* CONFIG_LIVEPATCH */
+
+#endif /* _LINUX_LIVEPATCH_H_ */
diff --git a/include/linux/llc.h b/include/linux/llc.h
new file mode 100644
index 000000000..b965314d0
--- /dev/null
+++ b/include/linux/llc.h
@@ -0,0 +1,23 @@
+/*
+ * IEEE 802.2 User Interface SAPs for Linux, data structures and indicators.
+ *
+ * Copyright (c) 2001 by Jay Schulist <jschlst@samba.org>
+ *
+ * This program can be redistributed or modified under the terms of the
+ * GNU General Public License as published by the Free Software Foundation.
+ * This program is distributed without any warranty or implied warranty
+ * of merchantability or fitness for a particular purpose.
+ *
+ * See the GNU General Public License for more details.
+ */
+#ifndef __LINUX_LLC_H
+#define __LINUX_LLC_H
+
+#include <uapi/linux/llc.h>
+
+#define LLC_SAP_DYN_START 0xC0
+#define LLC_SAP_DYN_STOP 0xDE
+#define LLC_SAP_DYN_TRIES 4
+
+#define llc_ui_skb_cb(__skb) ((struct sockaddr_llc *)&((__skb)->cb[0]))
+#endif /* __LINUX_LLC_H */
diff --git a/include/linux/llist.h b/include/linux/llist.h
new file mode 100644
index 000000000..fbf10a0bc
--- /dev/null
+++ b/include/linux/llist.h
@@ -0,0 +1,200 @@
+#ifndef LLIST_H
+#define LLIST_H
+/*
+ * Lock-less NULL terminated single linked list
+ *
+ * If there are multiple producers and multiple consumers, llist_add
+ * can be used in producers and llist_del_all can be used in
+ * consumers. They can work simultaneously without lock. But
+ * llist_del_first can not be used here. Because llist_del_first
+ * depends on list->first->next does not changed if list->first is not
+ * changed during its operation, but llist_del_first, llist_add,
+ * llist_add (or llist_del_all, llist_add, llist_add) sequence in
+ * another consumer may violate that.
+ *
+ * If there are multiple producers and one consumer, llist_add can be
+ * used in producers and llist_del_all or llist_del_first can be used
+ * in the consumer.
+ *
+ * This can be summarized as follow:
+ *
+ * | add | del_first | del_all
+ * add | - | - | -
+ * del_first | | L | L
+ * del_all | | | -
+ *
+ * Where "-" stands for no lock is needed, while "L" stands for lock
+ * is needed.
+ *
+ * The list entries deleted via llist_del_all can be traversed with
+ * traversing function such as llist_for_each etc. But the list
+ * entries can not be traversed safely before deleted from the list.
+ * The order of deleted entries is from the newest to the oldest added
+ * one. If you want to traverse from the oldest to the newest, you
+ * must reverse the order by yourself before traversing.
+ *
+ * The basic atomic operation of this list is cmpxchg on long. On
+ * architectures that don't have NMI-safe cmpxchg implementation, the
+ * list can NOT be used in NMI handlers. So code that uses the list in
+ * an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
+ *
+ * Copyright 2010,2011 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <asm/cmpxchg.h>
+
+struct llist_head {
+ struct llist_node *first;
+};
+
+struct llist_node {
+ struct llist_node *next;
+};
+
+#define LLIST_HEAD_INIT(name) { NULL }
+#define LLIST_HEAD(name) struct llist_head name = LLIST_HEAD_INIT(name)
+
+/**
+ * init_llist_head - initialize lock-less list head
+ * @head: the head for your lock-less list
+ */
+static inline void init_llist_head(struct llist_head *list)
+{
+ list->first = NULL;
+}
+
+/**
+ * llist_entry - get the struct of this entry
+ * @ptr: the &struct llist_node pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the llist_node within the struct.
+ */
+#define llist_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
+/**
+ * llist_for_each - iterate over some deleted entries of a lock-less list
+ * @pos: the &struct llist_node to use as a loop cursor
+ * @node: the first entry of deleted list entries
+ *
+ * In general, some entries of the lock-less list can be traversed
+ * safely only after being deleted from list, so start with an entry
+ * instead of list head.
+ *
+ * If being used on entries deleted from lock-less list directly, the
+ * traverse order is from the newest to the oldest added entry. If
+ * you want to traverse from the oldest to the newest, you must
+ * reverse the order by yourself before traversing.
+ */
+#define llist_for_each(pos, node) \
+ for ((pos) = (node); pos; (pos) = (pos)->next)
+
+/**
+ * llist_for_each_entry - iterate over some deleted entries of lock-less list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @node: the fist entry of deleted list entries.
+ * @member: the name of the llist_node with the struct.
+ *
+ * In general, some entries of the lock-less list can be traversed
+ * safely only after being removed from list, so start with an entry
+ * instead of list head.
+ *
+ * If being used on entries deleted from lock-less list directly, the
+ * traverse order is from the newest to the oldest added entry. If
+ * you want to traverse from the oldest to the newest, you must
+ * reverse the order by yourself before traversing.
+ */
+#define llist_for_each_entry(pos, node, member) \
+ for ((pos) = llist_entry((node), typeof(*(pos)), member); \
+ &(pos)->member != NULL; \
+ (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
+
+/**
+ * llist_for_each_entry_safe - iterate over some deleted entries of lock-less list of given type
+ * safe against removal of list entry
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @node: the first entry of deleted list entries.
+ * @member: the name of the llist_node with the struct.
+ *
+ * In general, some entries of the lock-less list can be traversed
+ * safely only after being removed from list, so start with an entry
+ * instead of list head.
+ *
+ * If being used on entries deleted from lock-less list directly, the
+ * traverse order is from the newest to the oldest added entry. If
+ * you want to traverse from the oldest to the newest, you must
+ * reverse the order by yourself before traversing.
+ */
+#define llist_for_each_entry_safe(pos, n, node, member) \
+ for (pos = llist_entry((node), typeof(*pos), member); \
+ &pos->member != NULL && \
+ (n = llist_entry(pos->member.next, typeof(*n), member), true); \
+ pos = n)
+
+/**
+ * llist_empty - tests whether a lock-less list is empty
+ * @head: the list to test
+ *
+ * Not guaranteed to be accurate or up to date. Just a quick way to
+ * test whether the list is empty without deleting something from the
+ * list.
+ */
+static inline bool llist_empty(const struct llist_head *head)
+{
+ return ACCESS_ONCE(head->first) == NULL;
+}
+
+static inline struct llist_node *llist_next(struct llist_node *node)
+{
+ return node->next;
+}
+
+extern bool llist_add_batch(struct llist_node *new_first,
+ struct llist_node *new_last,
+ struct llist_head *head);
+/**
+ * llist_add - add a new entry
+ * @new: new entry to be added
+ * @head: the head for your lock-less list
+ *
+ * Returns true if the list was empty prior to adding this entry.
+ */
+static inline bool llist_add(struct llist_node *new, struct llist_head *head)
+{
+ return llist_add_batch(new, new, head);
+}
+
+/**
+ * llist_del_all - delete all entries from lock-less list
+ * @head: the head of lock-less list to delete all entries
+ *
+ * If list is empty, return NULL, otherwise, delete all entries and
+ * return the pointer to the first entry. The order of entries
+ * deleted is from the newest to the oldest added one.
+ */
+static inline struct llist_node *llist_del_all(struct llist_head *head)
+{
+ return xchg(&head->first, NULL);
+}
+
+extern struct llist_node *llist_del_first(struct llist_head *head);
+
+struct llist_node *llist_reverse_order(struct llist_node *head);
+
+#endif /* LLIST_H */
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
new file mode 100644
index 000000000..4d24d6457
--- /dev/null
+++ b/include/linux/lockd/bind.h
@@ -0,0 +1,60 @@
+/*
+ * linux/include/linux/lockd/bind.h
+ *
+ * This is the part of lockd visible to nfsd and the nfs client.
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#ifndef LINUX_LOCKD_BIND_H
+#define LINUX_LOCKD_BIND_H
+
+#include <linux/lockd/nlm.h>
+/* need xdr-encoded error codes too, so... */
+#include <linux/lockd/xdr.h>
+#ifdef CONFIG_LOCKD_V4
+#include <linux/lockd/xdr4.h>
+#endif
+
+/* Dummy declarations */
+struct svc_rqst;
+
+/*
+ * This is the set of functions for lockd->nfsd communication
+ */
+struct nlmsvc_binding {
+ __be32 (*fopen)(struct svc_rqst *,
+ struct nfs_fh *,
+ struct file **);
+ void (*fclose)(struct file *);
+};
+
+extern struct nlmsvc_binding * nlmsvc_ops;
+
+/*
+ * Similar to nfs_client_initdata, but without the NFS-specific
+ * rpc_ops field.
+ */
+struct nlmclnt_initdata {
+ const char *hostname;
+ const struct sockaddr *address;
+ size_t addrlen;
+ unsigned short protocol;
+ u32 nfs_version;
+ int noresvport;
+ struct net *net;
+};
+
+/*
+ * Functions exported by the lockd module
+ */
+
+extern struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init);
+extern void nlmclnt_done(struct nlm_host *host);
+
+extern int nlmclnt_proc(struct nlm_host *host, int cmd,
+ struct file_lock *fl);
+extern int lockd_up(struct net *net);
+extern void lockd_down(struct net *net);
+
+#endif /* LINUX_LOCKD_BIND_H */
diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h
new file mode 100644
index 000000000..0ca810993
--- /dev/null
+++ b/include/linux/lockd/debug.h
@@ -0,0 +1,43 @@
+/*
+ * linux/include/linux/lockd/debug.h
+ *
+ * Debugging stuff.
+ *
+ * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#ifndef LINUX_LOCKD_DEBUG_H
+#define LINUX_LOCKD_DEBUG_H
+
+#ifdef __KERNEL__
+
+#include <linux/sunrpc/debug.h>
+
+/*
+ * Enable lockd debugging.
+ * Requires RPC_DEBUG.
+ */
+#undef ifdebug
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+# define ifdebug(flag) if (unlikely(nlm_debug & NLMDBG_##flag))
+#else
+# define ifdebug(flag) if (0)
+#endif
+
+#endif /* __KERNEL__ */
+
+/*
+ * Debug flags
+ */
+#define NLMDBG_SVC 0x0001
+#define NLMDBG_CLIENT 0x0002
+#define NLMDBG_CLNTLOCK 0x0004
+#define NLMDBG_SVCLOCK 0x0008
+#define NLMDBG_MONITOR 0x0010
+#define NLMDBG_CLNTSUBS 0x0020
+#define NLMDBG_SVCSUBS 0x0040
+#define NLMDBG_HOSTCACHE 0x0080
+#define NLMDBG_XDR 0x0100
+#define NLMDBG_ALL 0x7fff
+
+#endif /* LINUX_LOCKD_DEBUG_H */
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
new file mode 100644
index 000000000..ff82a3287
--- /dev/null
+++ b/include/linux/lockd/lockd.h
@@ -0,0 +1,365 @@
+/*
+ * linux/include/linux/lockd/lockd.h
+ *
+ * General-purpose lockd include file.
+ *
+ * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#ifndef LINUX_LOCKD_LOCKD_H
+#define LINUX_LOCKD_LOCKD_H
+
+#ifdef __KERNEL__
+
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <net/ipv6.h>
+#include <linux/fs.h>
+#include <linux/kref.h>
+#include <linux/utsname.h>
+#include <linux/lockd/bind.h>
+#include <linux/lockd/xdr.h>
+#ifdef CONFIG_LOCKD_V4
+#include <linux/lockd/xdr4.h>
+#endif
+#include <linux/lockd/debug.h>
+#include <linux/sunrpc/svc.h>
+
+/*
+ * Version string
+ */
+#define LOCKD_VERSION "0.5"
+
+/*
+ * Default timeout for RPC calls (seconds)
+ */
+#define LOCKD_DFLT_TIMEO 10
+
+/*
+ * Lockd host handle (used both by the client and server personality).
+ */
+struct nlm_host {
+ struct hlist_node h_hash; /* doubly linked list */
+ struct sockaddr_storage h_addr; /* peer address */
+ size_t h_addrlen;
+ struct sockaddr_storage h_srcaddr; /* our address (optional) */
+ size_t h_srcaddrlen;
+ struct rpc_clnt *h_rpcclnt; /* RPC client to talk to peer */
+ char *h_name; /* remote hostname */
+ u32 h_version; /* interface version */
+ unsigned short h_proto; /* transport proto */
+ unsigned short h_reclaiming : 1,
+ h_server : 1, /* server side, not client side */
+ h_noresvport : 1,
+ h_inuse : 1;
+ wait_queue_head_t h_gracewait; /* wait while reclaiming */
+ struct rw_semaphore h_rwsem; /* Reboot recovery lock */
+ u32 h_state; /* pseudo-state counter */
+ u32 h_nsmstate; /* true remote NSM state */
+ u32 h_pidcount; /* Pseudopids */
+ atomic_t h_count; /* reference count */
+ struct mutex h_mutex; /* mutex for pmap binding */
+ unsigned long h_nextrebind; /* next portmap call */
+ unsigned long h_expires; /* eligible for GC */
+ struct list_head h_lockowners; /* Lockowners for the client */
+ spinlock_t h_lock;
+ struct list_head h_granted; /* Locks in GRANTED state */
+ struct list_head h_reclaim; /* Locks in RECLAIM state */
+ struct nsm_handle *h_nsmhandle; /* NSM status handle */
+ char *h_addrbuf; /* address eyecatcher */
+ struct net *net; /* host net */
+};
+
+/*
+ * The largest string sm_addrbuf should hold is a full-size IPv6 address
+ * (no "::" anywhere) with a scope ID. The buffer size is computed to
+ * hold eight groups of colon-separated four-hex-digit numbers, a
+ * percent sign, a scope id (at most 32 bits, in decimal), and NUL.
+ */
+#define NSM_ADDRBUF ((8 * 4 + 7) + (1 + 10) + 1)
+
+struct nsm_handle {
+ struct list_head sm_link;
+ atomic_t sm_count;
+ char *sm_mon_name;
+ char *sm_name;
+ struct sockaddr_storage sm_addr;
+ size_t sm_addrlen;
+ unsigned int sm_monitored : 1,
+ sm_sticky : 1; /* don't unmonitor */
+ struct nsm_private sm_priv;
+ char sm_addrbuf[NSM_ADDRBUF];
+};
+
+/*
+ * Rigorous type checking on sockaddr type conversions
+ */
+static inline struct sockaddr_in *nlm_addr_in(const struct nlm_host *host)
+{
+ return (struct sockaddr_in *)&host->h_addr;
+}
+
+static inline struct sockaddr *nlm_addr(const struct nlm_host *host)
+{
+ return (struct sockaddr *)&host->h_addr;
+}
+
+static inline struct sockaddr_in *nlm_srcaddr_in(const struct nlm_host *host)
+{
+ return (struct sockaddr_in *)&host->h_srcaddr;
+}
+
+static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host)
+{
+ return (struct sockaddr *)&host->h_srcaddr;
+}
+
+/*
+ * Map an fl_owner_t into a unique 32-bit "pid"
+ */
+struct nlm_lockowner {
+ struct list_head list;
+ atomic_t count;
+
+ struct nlm_host *host;
+ fl_owner_t owner;
+ uint32_t pid;
+};
+
+struct nlm_wait;
+
+/*
+ * Memory chunk for NLM client RPC request.
+ */
+#define NLMCLNT_OHSIZE ((__NEW_UTS_LEN) + 10u)
+struct nlm_rqst {
+ atomic_t a_count;
+ unsigned int a_flags; /* initial RPC task flags */
+ struct nlm_host * a_host; /* host handle */
+ struct nlm_args a_args; /* arguments */
+ struct nlm_res a_res; /* result */
+ struct nlm_block * a_block;
+ unsigned int a_retries; /* Retry count */
+ u8 a_owner[NLMCLNT_OHSIZE];
+};
+
+/*
+ * This struct describes a file held open by lockd on behalf of
+ * an NFS client.
+ */
+struct nlm_file {
+ struct hlist_node f_list; /* linked list */
+ struct nfs_fh f_handle; /* NFS file handle */
+ struct file * f_file; /* VFS file pointer */
+ struct nlm_share * f_shares; /* DOS shares */
+ struct list_head f_blocks; /* blocked locks */
+ unsigned int f_locks; /* guesstimate # of locks */
+ unsigned int f_count; /* reference count */
+ struct mutex f_mutex; /* avoid concurrent access */
+};
+
+/*
+ * This is a server block (i.e. a lock requested by some client which
+ * couldn't be granted because of a conflicting lock).
+ */
+#define NLM_NEVER (~(unsigned long) 0)
+/* timeout on non-blocking call: */
+#define NLM_TIMEOUT (7 * HZ)
+
+struct nlm_block {
+ struct kref b_count; /* Reference count */
+ struct list_head b_list; /* linked list of all blocks */
+ struct list_head b_flist; /* linked list (per file) */
+ struct nlm_rqst * b_call; /* RPC args & callback info */
+ struct svc_serv * b_daemon; /* NLM service */
+ struct nlm_host * b_host; /* host handle for RPC clnt */
+ unsigned long b_when; /* next re-xmit */
+ unsigned int b_id; /* block id */
+ unsigned char b_granted; /* VFS granted lock */
+ struct nlm_file * b_file; /* file in question */
+ struct cache_req * b_cache_req; /* deferred request handling */
+ struct cache_deferred_req * b_deferred_req;
+ unsigned int b_flags; /* block flags */
+#define B_QUEUED 1 /* lock queued */
+#define B_GOT_CALLBACK 2 /* got lock or conflicting lock */
+#define B_TIMED_OUT 4 /* filesystem too slow to respond */
+};
+
+/*
+ * Global variables
+ */
+extern const struct rpc_program nlm_program;
+extern struct svc_procedure nlmsvc_procedures[];
+#ifdef CONFIG_LOCKD_V4
+extern struct svc_procedure nlmsvc_procedures4[];
+#endif
+extern int nlmsvc_grace_period;
+extern unsigned long nlmsvc_timeout;
+extern bool nsm_use_hostnames;
+extern u32 nsm_local_state;
+
+/*
+ * Lockd client functions
+ */
+struct nlm_rqst * nlm_alloc_call(struct nlm_host *host);
+int nlm_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *);
+int nlm_async_reply(struct nlm_rqst *, u32, const struct rpc_call_ops *);
+void nlmclnt_release_call(struct nlm_rqst *);
+struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl);
+void nlmclnt_finish_block(struct nlm_wait *block);
+int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
+__be32 nlmclnt_grant(const struct sockaddr *addr,
+ const struct nlm_lock *lock);
+void nlmclnt_recovery(struct nlm_host *);
+int nlmclnt_reclaim(struct nlm_host *, struct file_lock *,
+ struct nlm_rqst *);
+void nlmclnt_next_cookie(struct nlm_cookie *);
+
+/*
+ * Host cache
+ */
+struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
+ const size_t salen,
+ const unsigned short protocol,
+ const u32 version,
+ const char *hostname,
+ int noresvport,
+ struct net *net);
+void nlmclnt_release_host(struct nlm_host *);
+struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
+ const char *hostname,
+ const size_t hostname_len);
+void nlmsvc_release_host(struct nlm_host *);
+struct rpc_clnt * nlm_bind_host(struct nlm_host *);
+void nlm_rebind_host(struct nlm_host *);
+struct nlm_host * nlm_get_host(struct nlm_host *);
+void nlm_shutdown_hosts(void);
+void nlm_shutdown_hosts_net(struct net *net);
+void nlm_host_rebooted(const struct nlm_reboot *);
+
+/*
+ * Host monitoring
+ */
+int nsm_monitor(const struct nlm_host *host);
+void nsm_unmonitor(const struct nlm_host *host);
+
+struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
+ const size_t salen,
+ const char *hostname,
+ const size_t hostname_len);
+struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info);
+void nsm_release(struct nsm_handle *nsm);
+
+/*
+ * This is used in garbage collection and resource reclaim
+ * A return value != 0 means destroy the lock/block/share
+ */
+typedef int (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref);
+
+/*
+ * Server-side lock handling
+ */
+__be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *,
+ struct nlm_host *, struct nlm_lock *, int,
+ struct nlm_cookie *, int);
+__be32 nlmsvc_unlock(struct net *net, struct nlm_file *, struct nlm_lock *);
+__be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *,
+ struct nlm_host *, struct nlm_lock *,
+ struct nlm_lock *, struct nlm_cookie *);
+__be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *);
+unsigned long nlmsvc_retry_blocked(void);
+void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
+ nlm_host_match_fn_t match);
+void nlmsvc_grant_reply(struct nlm_cookie *, __be32);
+void nlmsvc_release_call(struct nlm_rqst *);
+
+/*
+ * File handling for the server personality
+ */
+__be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **,
+ struct nfs_fh *);
+void nlm_release_file(struct nlm_file *);
+void nlmsvc_mark_resources(struct net *);
+void nlmsvc_free_host_resources(struct nlm_host *);
+void nlmsvc_invalidate_all(void);
+
+/*
+ * Cluster failover support
+ */
+int nlmsvc_unlock_all_by_sb(struct super_block *sb);
+int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr);
+
+static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
+{
+ return file_inode(file->f_file);
+}
+
+static inline int __nlm_privileged_request4(const struct sockaddr *sap)
+{
+ const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
+
+ if (ntohs(sin->sin_port) > 1023)
+ return 0;
+
+ return ipv4_is_loopback(sin->sin_addr.s_addr);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int __nlm_privileged_request6(const struct sockaddr *sap)
+{
+ const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
+
+ if (ntohs(sin6->sin6_port) > 1023)
+ return 0;
+
+ if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_MAPPED)
+ return ipv4_is_loopback(sin6->sin6_addr.s6_addr32[3]);
+
+ return ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LOOPBACK;
+}
+#else /* IS_ENABLED(CONFIG_IPV6) */
+static inline int __nlm_privileged_request6(const struct sockaddr *sap)
+{
+ return 0;
+}
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+
+/*
+ * Ensure incoming requests are from local privileged callers.
+ *
+ * Return TRUE if sender is local and is connecting via a privileged port;
+ * otherwise return FALSE.
+ */
+static inline int nlm_privileged_requester(const struct svc_rqst *rqstp)
+{
+ const struct sockaddr *sap = svc_addr(rqstp);
+
+ switch (sap->sa_family) {
+ case AF_INET:
+ return __nlm_privileged_request4(sap);
+ case AF_INET6:
+ return __nlm_privileged_request6(sap);
+ default:
+ return 0;
+ }
+}
+
+/*
+ * Compare two NLM locks.
+ * When the second lock is of type F_UNLCK, this acts like a wildcard.
+ */
+static inline int nlm_compare_locks(const struct file_lock *fl1,
+ const struct file_lock *fl2)
+{
+ return fl1->fl_pid == fl2->fl_pid
+ && fl1->fl_owner == fl2->fl_owner
+ && fl1->fl_start == fl2->fl_start
+ && fl1->fl_end == fl2->fl_end
+ &&(fl1->fl_type == fl2->fl_type || fl2->fl_type == F_UNLCK);
+}
+
+extern const struct lock_manager_operations nlmsvc_lock_operations;
+
+#endif /* __KERNEL__ */
+
+#endif /* LINUX_LOCKD_LOCKD_H */
diff --git a/include/linux/lockd/nlm.h b/include/linux/lockd/nlm.h
new file mode 100644
index 000000000..d9d46e442
--- /dev/null
+++ b/include/linux/lockd/nlm.h
@@ -0,0 +1,57 @@
+/*
+ * linux/include/linux/lockd/nlm.h
+ *
+ * Declarations for the Network Lock Manager protocol.
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#ifndef LINUX_LOCKD_NLM_H
+#define LINUX_LOCKD_NLM_H
+
+
+/* Maximum file offset in file_lock.fl_end */
+# define NLM_OFFSET_MAX ((s32) 0x7fffffff)
+# define NLM4_OFFSET_MAX ((s64) ((~(u64)0) >> 1))
+
+/* Return states for NLM */
+enum {
+ NLM_LCK_GRANTED = 0,
+ NLM_LCK_DENIED = 1,
+ NLM_LCK_DENIED_NOLOCKS = 2,
+ NLM_LCK_BLOCKED = 3,
+ NLM_LCK_DENIED_GRACE_PERIOD = 4,
+#ifdef CONFIG_LOCKD_V4
+ NLM_DEADLCK = 5,
+ NLM_ROFS = 6,
+ NLM_STALE_FH = 7,
+ NLM_FBIG = 8,
+ NLM_FAILED = 9,
+#endif
+};
+
+#define NLM_PROGRAM 100021
+
+#define NLMPROC_NULL 0
+#define NLMPROC_TEST 1
+#define NLMPROC_LOCK 2
+#define NLMPROC_CANCEL 3
+#define NLMPROC_UNLOCK 4
+#define NLMPROC_GRANTED 5
+#define NLMPROC_TEST_MSG 6
+#define NLMPROC_LOCK_MSG 7
+#define NLMPROC_CANCEL_MSG 8
+#define NLMPROC_UNLOCK_MSG 9
+#define NLMPROC_GRANTED_MSG 10
+#define NLMPROC_TEST_RES 11
+#define NLMPROC_LOCK_RES 12
+#define NLMPROC_CANCEL_RES 13
+#define NLMPROC_UNLOCK_RES 14
+#define NLMPROC_GRANTED_RES 15
+#define NLMPROC_NSM_NOTIFY 16 /* statd callback */
+#define NLMPROC_SHARE 20
+#define NLMPROC_UNSHARE 21
+#define NLMPROC_NM_LOCK 22
+#define NLMPROC_FREE_ALL 23
+
+#endif /* LINUX_LOCKD_NLM_H */
diff --git a/include/linux/lockd/share.h b/include/linux/lockd/share.h
new file mode 100644
index 000000000..630c5bf69
--- /dev/null
+++ b/include/linux/lockd/share.h
@@ -0,0 +1,31 @@
+/*
+ * linux/include/linux/lockd/share.h
+ *
+ * DOS share management for lockd.
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#ifndef LINUX_LOCKD_SHARE_H
+#define LINUX_LOCKD_SHARE_H
+
+/*
+ * DOS share for a specific file
+ */
+struct nlm_share {
+ struct nlm_share * s_next; /* linked list */
+ struct nlm_host * s_host; /* client host */
+ struct nlm_file * s_file; /* shared file */
+ struct xdr_netobj s_owner; /* owner handle */
+ u32 s_access; /* access mode */
+ u32 s_mode; /* deny mode */
+};
+
+__be32 nlmsvc_share_file(struct nlm_host *, struct nlm_file *,
+ struct nlm_args *);
+__be32 nlmsvc_unshare_file(struct nlm_host *, struct nlm_file *,
+ struct nlm_args *);
+void nlmsvc_traverse_shares(struct nlm_host *, struct nlm_file *,
+ nlm_host_match_fn_t);
+
+#endif /* LINUX_LOCKD_SHARE_H */
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
new file mode 100644
index 000000000..d39ed1cc5
--- /dev/null
+++ b/include/linux/lockd/xdr.h
@@ -0,0 +1,118 @@
+/*
+ * linux/include/linux/lockd/xdr.h
+ *
+ * XDR types for the NLM protocol
+ *
+ * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#ifndef LOCKD_XDR_H
+#define LOCKD_XDR_H
+
+#include <linux/fs.h>
+#include <linux/nfs.h>
+#include <linux/sunrpc/xdr.h>
+
+#define SM_MAXSTRLEN 1024
+#define SM_PRIV_SIZE 16
+
+struct nsm_private {
+ unsigned char data[SM_PRIV_SIZE];
+};
+
+struct svc_rqst;
+
+#define NLM_MAXCOOKIELEN 32
+#define NLM_MAXSTRLEN 1024
+
+#define nlm_granted cpu_to_be32(NLM_LCK_GRANTED)
+#define nlm_lck_denied cpu_to_be32(NLM_LCK_DENIED)
+#define nlm_lck_denied_nolocks cpu_to_be32(NLM_LCK_DENIED_NOLOCKS)
+#define nlm_lck_blocked cpu_to_be32(NLM_LCK_BLOCKED)
+#define nlm_lck_denied_grace_period cpu_to_be32(NLM_LCK_DENIED_GRACE_PERIOD)
+
+#define nlm_drop_reply cpu_to_be32(30000)
+
+/* Lock info passed via NLM */
+struct nlm_lock {
+ char * caller;
+ unsigned int len; /* length of "caller" */
+ struct nfs_fh fh;
+ struct xdr_netobj oh;
+ u32 svid;
+ struct file_lock fl;
+};
+
+/*
+ * NLM cookies. Technically they can be 1K, but Linux only uses 8 bytes.
+ * FreeBSD uses 16, Apple Mac OS X 10.3 uses 20. Therefore we set it to
+ * 32 bytes.
+ */
+
+struct nlm_cookie
+{
+ unsigned char data[NLM_MAXCOOKIELEN];
+ unsigned int len;
+};
+
+/*
+ * Generic lockd arguments for all but sm_notify
+ */
+struct nlm_args {
+ struct nlm_cookie cookie;
+ struct nlm_lock lock;
+ u32 block;
+ u32 reclaim;
+ u32 state;
+ u32 monitor;
+ u32 fsm_access;
+ u32 fsm_mode;
+};
+
+typedef struct nlm_args nlm_args;
+
+/*
+ * Generic lockd result
+ */
+struct nlm_res {
+ struct nlm_cookie cookie;
+ __be32 status;
+ struct nlm_lock lock;
+};
+
+/*
+ * statd callback when client has rebooted
+ */
+struct nlm_reboot {
+ char *mon;
+ unsigned int len;
+ u32 state;
+ struct nsm_private priv;
+};
+
+/*
+ * Contents of statd callback when monitored host rebooted
+ */
+#define NLMSVC_XDRSIZE sizeof(struct nlm_args)
+
+int nlmsvc_decode_testargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+int nlmsvc_encode_testres(struct svc_rqst *, __be32 *, struct nlm_res *);
+int nlmsvc_decode_lockargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+int nlmsvc_decode_cancargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+int nlmsvc_decode_unlockargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+int nlmsvc_encode_res(struct svc_rqst *, __be32 *, struct nlm_res *);
+int nlmsvc_decode_res(struct svc_rqst *, __be32 *, struct nlm_res *);
+int nlmsvc_encode_void(struct svc_rqst *, __be32 *, void *);
+int nlmsvc_decode_void(struct svc_rqst *, __be32 *, void *);
+int nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+int nlmsvc_encode_shareres(struct svc_rqst *, __be32 *, struct nlm_res *);
+int nlmsvc_decode_notify(struct svc_rqst *, __be32 *, struct nlm_args *);
+int nlmsvc_decode_reboot(struct svc_rqst *, __be32 *, struct nlm_reboot *);
+/*
+int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *);
+int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
+int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *);
+int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
+ */
+
+#endif /* LOCKD_XDR_H */
diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h
new file mode 100644
index 000000000..e58c88b52
--- /dev/null
+++ b/include/linux/lockd/xdr4.h
@@ -0,0 +1,47 @@
+/*
+ * linux/include/linux/lockd/xdr4.h
+ *
+ * XDR types for the NLM protocol
+ *
+ * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#ifndef LOCKD_XDR4_H
+#define LOCKD_XDR4_H
+
+#include <linux/fs.h>
+#include <linux/nfs.h>
+#include <linux/sunrpc/xdr.h>
+#include <linux/lockd/xdr.h>
+
+/* error codes new to NLMv4 */
+#define nlm4_deadlock cpu_to_be32(NLM_DEADLCK)
+#define nlm4_rofs cpu_to_be32(NLM_ROFS)
+#define nlm4_stale_fh cpu_to_be32(NLM_STALE_FH)
+#define nlm4_fbig cpu_to_be32(NLM_FBIG)
+#define nlm4_failed cpu_to_be32(NLM_FAILED)
+
+
+
+int nlm4svc_decode_testargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+int nlm4svc_encode_testres(struct svc_rqst *, __be32 *, struct nlm_res *);
+int nlm4svc_decode_lockargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+int nlm4svc_decode_cancargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+int nlm4svc_decode_unlockargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+int nlm4svc_encode_res(struct svc_rqst *, __be32 *, struct nlm_res *);
+int nlm4svc_decode_res(struct svc_rqst *, __be32 *, struct nlm_res *);
+int nlm4svc_encode_void(struct svc_rqst *, __be32 *, void *);
+int nlm4svc_decode_void(struct svc_rqst *, __be32 *, void *);
+int nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *, struct nlm_args *);
+int nlm4svc_encode_shareres(struct svc_rqst *, __be32 *, struct nlm_res *);
+int nlm4svc_decode_notify(struct svc_rqst *, __be32 *, struct nlm_args *);
+int nlm4svc_decode_reboot(struct svc_rqst *, __be32 *, struct nlm_reboot *);
+/*
+int nlmclt_encode_testargs(struct rpc_rqst *, u32 *, struct nlm_args *);
+int nlmclt_encode_lockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
+int nlmclt_encode_cancargs(struct rpc_rqst *, u32 *, struct nlm_args *);
+int nlmclt_encode_unlockargs(struct rpc_rqst *, u32 *, struct nlm_args *);
+ */
+extern const struct rpc_version nlm_version4;
+
+#endif /* LOCKD_XDR4_H */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
new file mode 100644
index 000000000..066ba4157
--- /dev/null
+++ b/include/linux/lockdep.h
@@ -0,0 +1,543 @@
+/*
+ * Runtime locking correctness validator
+ *
+ * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *
+ * see Documentation/locking/lockdep-design.txt for more details.
+ */
+#ifndef __LINUX_LOCKDEP_H
+#define __LINUX_LOCKDEP_H
+
+struct task_struct;
+struct lockdep_map;
+
+/* for sysctl */
+extern int prove_locking;
+extern int lock_stat;
+
+#ifdef CONFIG_LOCKDEP
+
+#include <linux/linkage.h>
+#include <linux/list.h>
+#include <linux/debug_locks.h>
+#include <linux/stacktrace.h>
+
+/*
+ * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
+ * the total number of states... :-(
+ */
+#define XXX_LOCK_USAGE_STATES (1+3*4)
+
+#define MAX_LOCKDEP_SUBCLASSES 8UL
+
+/*
+ * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
+ * cached in the instance of lockdep_map
+ *
+ * Currently main class (subclass == 0) and signle depth subclass
+ * are cached in lockdep_map. This optimization is mainly targeting
+ * on rq->lock. double_rq_lock() acquires this highly competitive with
+ * single depth.
+ */
+#define NR_LOCKDEP_CACHING_CLASSES 2
+
+/*
+ * Lock-classes are keyed via unique addresses, by embedding the
+ * lockclass-key into the kernel (or module) .data section. (For
+ * static locks we use the lock address itself as the key.)
+ */
+struct lockdep_subclass_key {
+ char __one_byte;
+} __attribute__ ((__packed__));
+
+struct lock_class_key {
+ struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
+};
+
+extern struct lock_class_key __lockdep_no_validate__;
+
+#define LOCKSTAT_POINTS 4
+
+/*
+ * The lock-class itself:
+ */
+struct lock_class {
+ /*
+ * class-hash:
+ */
+ struct list_head hash_entry;
+
+ /*
+ * global list of all lock-classes:
+ */
+ struct list_head lock_entry;
+
+ struct lockdep_subclass_key *key;
+ unsigned int subclass;
+ unsigned int dep_gen_id;
+
+ /*
+ * IRQ/softirq usage tracking bits:
+ */
+ unsigned long usage_mask;
+ struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
+
+ /*
+ * These fields represent a directed graph of lock dependencies,
+ * to every node we attach a list of "forward" and a list of
+ * "backward" graph nodes.
+ */
+ struct list_head locks_after, locks_before;
+
+ /*
+ * Generation counter, when doing certain classes of graph walking,
+ * to ensure that we check one node only once:
+ */
+ unsigned int version;
+
+ /*
+ * Statistics counter:
+ */
+ unsigned long ops;
+
+ const char *name;
+ int name_version;
+
+#ifdef CONFIG_LOCK_STAT
+ unsigned long contention_point[LOCKSTAT_POINTS];
+ unsigned long contending_point[LOCKSTAT_POINTS];
+#endif
+};
+
+#ifdef CONFIG_LOCK_STAT
+struct lock_time {
+ s64 min;
+ s64 max;
+ s64 total;
+ unsigned long nr;
+};
+
+enum bounce_type {
+ bounce_acquired_write,
+ bounce_acquired_read,
+ bounce_contended_write,
+ bounce_contended_read,
+ nr_bounce_types,
+
+ bounce_acquired = bounce_acquired_write,
+ bounce_contended = bounce_contended_write,
+};
+
+struct lock_class_stats {
+ unsigned long contention_point[4];
+ unsigned long contending_point[4];
+ struct lock_time read_waittime;
+ struct lock_time write_waittime;
+ struct lock_time read_holdtime;
+ struct lock_time write_holdtime;
+ unsigned long bounces[nr_bounce_types];
+};
+
+struct lock_class_stats lock_stats(struct lock_class *class);
+void clear_lock_stats(struct lock_class *class);
+#endif
+
+/*
+ * Map the lock object (the lock instance) to the lock-class object.
+ * This is embedded into specific lock instances:
+ */
+struct lockdep_map {
+ struct lock_class_key *key;
+ struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
+ const char *name;
+#ifdef CONFIG_LOCK_STAT
+ int cpu;
+ unsigned long ip;
+#endif
+};
+
+static inline void lockdep_copy_map(struct lockdep_map *to,
+ struct lockdep_map *from)
+{
+ int i;
+
+ *to = *from;
+ /*
+ * Since the class cache can be modified concurrently we could observe
+ * half pointers (64bit arch using 32bit copy insns). Therefore clear
+ * the caches and take the performance hit.
+ *
+ * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
+ * that relies on cache abuse.
+ */
+ for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
+ to->class_cache[i] = NULL;
+}
+
+/*
+ * Every lock has a list of other locks that were taken after it.
+ * We only grow the list, never remove from it:
+ */
+struct lock_list {
+ struct list_head entry;
+ struct lock_class *class;
+ struct stack_trace trace;
+ int distance;
+
+ /*
+ * The parent field is used to implement breadth-first search, and the
+ * bit 0 is reused to indicate if the lock has been accessed in BFS.
+ */
+ struct lock_list *parent;
+};
+
+/*
+ * We record lock dependency chains, so that we can cache them:
+ */
+struct lock_chain {
+ u8 irq_context;
+ u8 depth;
+ u16 base;
+ struct list_head entry;
+ u64 chain_key;
+};
+
+#define MAX_LOCKDEP_KEYS_BITS 13
+/*
+ * Subtract one because we offset hlock->class_idx by 1 in order
+ * to make 0 mean no class. This avoids overflowing the class_idx
+ * bitfield and hitting the BUG in hlock_class().
+ */
+#define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
+
+struct held_lock {
+ /*
+ * One-way hash of the dependency chain up to this point. We
+ * hash the hashes step by step as the dependency chain grows.
+ *
+ * We use it for dependency-caching and we skip detection
+ * passes and dependency-updates if there is a cache-hit, so
+ * it is absolutely critical for 100% coverage of the validator
+ * to have a unique key value for every unique dependency path
+ * that can occur in the system, to make a unique hash value
+ * as likely as possible - hence the 64-bit width.
+ *
+ * The task struct holds the current hash value (initialized
+ * with zero), here we store the previous hash value:
+ */
+ u64 prev_chain_key;
+ unsigned long acquire_ip;
+ struct lockdep_map *instance;
+ struct lockdep_map *nest_lock;
+#ifdef CONFIG_LOCK_STAT
+ u64 waittime_stamp;
+ u64 holdtime_stamp;
+#endif
+ unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
+ /*
+ * The lock-stack is unified in that the lock chains of interrupt
+ * contexts nest ontop of process context chains, but we 'separate'
+ * the hashes by starting with 0 if we cross into an interrupt
+ * context, and we also keep do not add cross-context lock
+ * dependencies - the lock usage graph walking covers that area
+ * anyway, and we'd just unnecessarily increase the number of
+ * dependencies otherwise. [Note: hardirq and softirq contexts
+ * are separated from each other too.]
+ *
+ * The following field is used to detect when we cross into an
+ * interrupt context:
+ */
+ unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
+ unsigned int trylock:1; /* 16 bits */
+
+ unsigned int read:2; /* see lock_acquire() comment */
+ unsigned int check:1; /* see lock_acquire() comment */
+ unsigned int hardirqs_off:1;
+ unsigned int references:12; /* 32 bits */
+};
+
+/*
+ * Initialization, self-test and debugging-output methods:
+ */
+extern void lockdep_init(void);
+extern void lockdep_info(void);
+extern void lockdep_reset(void);
+extern void lockdep_reset_lock(struct lockdep_map *lock);
+extern void lockdep_free_key_range(void *start, unsigned long size);
+extern asmlinkage void lockdep_sys_exit(void);
+
+extern void lockdep_off(void);
+extern void lockdep_on(void);
+
+/*
+ * These methods are used by specific locking variants (spinlocks,
+ * rwlocks, mutexes and rwsems) to pass init/acquire/release events
+ * to lockdep:
+ */
+
+extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass);
+
+/*
+ * To initialize a lockdep_map statically use this macro.
+ * Note that _name must not be NULL.
+ */
+#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
+ { .name = (_name), .key = (void *)(_key), }
+
+/*
+ * Reinitialize a lock key - for cases where there is special locking or
+ * special initialization of locks so that the validator gets the scope
+ * of dependencies wrong: they are either too broad (they need a class-split)
+ * or they are too narrow (they suffer from a false class-split):
+ */
+#define lockdep_set_class(lock, key) \
+ lockdep_init_map(&(lock)->dep_map, #key, key, 0)
+#define lockdep_set_class_and_name(lock, key, name) \
+ lockdep_init_map(&(lock)->dep_map, name, key, 0)
+#define lockdep_set_class_and_subclass(lock, key, sub) \
+ lockdep_init_map(&(lock)->dep_map, #key, key, sub)
+#define lockdep_set_subclass(lock, sub) \
+ lockdep_init_map(&(lock)->dep_map, #lock, \
+ (lock)->dep_map.key, sub)
+
+#define lockdep_set_novalidate_class(lock) \
+ lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
+/*
+ * Compare locking classes
+ */
+#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
+
+static inline int lockdep_match_key(struct lockdep_map *lock,
+ struct lock_class_key *key)
+{
+ return lock->key == key;
+}
+
+/*
+ * Acquire a lock.
+ *
+ * Values for "read":
+ *
+ * 0: exclusive (write) acquire
+ * 1: read-acquire (no recursion allowed)
+ * 2: read-acquire with same-instance recursion allowed
+ *
+ * Values for check:
+ *
+ * 0: simple checks (freeing, held-at-exit-time, etc.)
+ * 1: full validation
+ */
+extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
+ int trylock, int read, int check,
+ struct lockdep_map *nest_lock, unsigned long ip);
+
+extern void lock_release(struct lockdep_map *lock, int nested,
+ unsigned long ip);
+
+#define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
+
+extern int lock_is_held(struct lockdep_map *lock);
+
+extern void lock_set_class(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, unsigned int subclass,
+ unsigned long ip);
+
+static inline void lock_set_subclass(struct lockdep_map *lock,
+ unsigned int subclass, unsigned long ip)
+{
+ lock_set_class(lock, lock->name, lock->key, subclass, ip);
+}
+
+extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
+extern void lockdep_clear_current_reclaim_state(void);
+extern void lockdep_trace_alloc(gfp_t mask);
+
+# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
+
+#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
+
+#define lockdep_assert_held(l) do { \
+ WARN_ON(debug_locks && !lockdep_is_held(l)); \
+ } while (0)
+
+#define lockdep_assert_held_once(l) do { \
+ WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
+ } while (0)
+
+#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
+
+#else /* !CONFIG_LOCKDEP */
+
+static inline void lockdep_off(void)
+{
+}
+
+static inline void lockdep_on(void)
+{
+}
+
+# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
+# define lock_release(l, n, i) do { } while (0)
+# define lock_set_class(l, n, k, s, i) do { } while (0)
+# define lock_set_subclass(l, s, i) do { } while (0)
+# define lockdep_set_current_reclaim_state(g) do { } while (0)
+# define lockdep_clear_current_reclaim_state() do { } while (0)
+# define lockdep_trace_alloc(g) do { } while (0)
+# define lockdep_init() do { } while (0)
+# define lockdep_info() do { } while (0)
+# define lockdep_init_map(lock, name, key, sub) \
+ do { (void)(name); (void)(key); } while (0)
+# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
+# define lockdep_set_class_and_name(lock, key, name) \
+ do { (void)(key); (void)(name); } while (0)
+#define lockdep_set_class_and_subclass(lock, key, sub) \
+ do { (void)(key); } while (0)
+#define lockdep_set_subclass(lock, sub) do { } while (0)
+
+#define lockdep_set_novalidate_class(lock) do { } while (0)
+
+/*
+ * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
+ * case since the result is not well defined and the caller should rather
+ * #ifdef the call himself.
+ */
+
+# define INIT_LOCKDEP
+# define lockdep_reset() do { debug_locks = 1; } while (0)
+# define lockdep_free_key_range(start, size) do { } while (0)
+# define lockdep_sys_exit() do { } while (0)
+/*
+ * The class key takes no space if lockdep is disabled:
+ */
+struct lock_class_key { };
+
+#define lockdep_depth(tsk) (0)
+
+#define lockdep_assert_held(l) do { (void)(l); } while (0)
+#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
+
+#define lockdep_recursing(tsk) (0)
+
+#endif /* !LOCKDEP */
+
+#ifdef CONFIG_LOCK_STAT
+
+extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
+extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
+
+#define LOCK_CONTENDED(_lock, try, lock) \
+do { \
+ if (!try(_lock)) { \
+ lock_contended(&(_lock)->dep_map, _RET_IP_); \
+ lock(_lock); \
+ } \
+ lock_acquired(&(_lock)->dep_map, _RET_IP_); \
+} while (0)
+
+#else /* CONFIG_LOCK_STAT */
+
+#define lock_contended(lockdep_map, ip) do {} while (0)
+#define lock_acquired(lockdep_map, ip) do {} while (0)
+
+#define LOCK_CONTENDED(_lock, try, lock) \
+ lock(_lock)
+
+#endif /* CONFIG_LOCK_STAT */
+
+#ifdef CONFIG_LOCKDEP
+
+/*
+ * On lockdep we dont want the hand-coded irq-enable of
+ * _raw_*_lock_flags() code, because lockdep assumes
+ * that interrupts are not re-enabled during lock-acquire:
+ */
+#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
+ LOCK_CONTENDED((_lock), (try), (lock))
+
+#else /* CONFIG_LOCKDEP */
+
+#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
+ lockfl((_lock), (flags))
+
+#endif /* CONFIG_LOCKDEP */
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+extern void print_irqtrace_events(struct task_struct *curr);
+#else
+static inline void print_irqtrace_events(struct task_struct *curr)
+{
+}
+#endif
+
+/*
+ * For trivial one-depth nesting of a lock-class, the following
+ * global define can be used. (Subsystems with multiple levels
+ * of nesting should define their own lock-nesting subclasses.)
+ */
+#define SINGLE_DEPTH_NESTING 1
+
+/*
+ * Map the dependency ops to NOP or to real lockdep ops, depending
+ * on the per lock-class debug mode:
+ */
+
+#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
+#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
+#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
+
+#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
+#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
+#define spin_release(l, n, i) lock_release(l, n, i)
+
+#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
+#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
+#define rwlock_release(l, n, i) lock_release(l, n, i)
+
+#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
+#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
+#define seqcount_release(l, n, i) lock_release(l, n, i)
+
+#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
+#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
+#define mutex_release(l, n, i) lock_release(l, n, i)
+
+#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
+#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
+#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
+#define rwsem_release(l, n, i) lock_release(l, n, i)
+
+#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
+#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
+#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
+#define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
+
+#ifdef CONFIG_PROVE_LOCKING
+# define might_lock(lock) \
+do { \
+ typecheck(struct lockdep_map *, &(lock)->dep_map); \
+ lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
+ lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
+} while (0)
+# define might_lock_read(lock) \
+do { \
+ typecheck(struct lockdep_map *, &(lock)->dep_map); \
+ lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
+ lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
+} while (0)
+#else
+# define might_lock(lock) do { } while (0)
+# define might_lock_read(lock) do { } while (0)
+#endif
+
+#ifdef CONFIG_LOCKDEP
+void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
+#else
+static inline void
+lockdep_rcu_suspicious(const char *file, const int line, const char *s)
+{
+}
+#endif
+
+#endif /* __LINUX_LOCKDEP_H */
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
new file mode 100644
index 000000000..b10b122dd
--- /dev/null
+++ b/include/linux/lockref.h
@@ -0,0 +1,51 @@
+#ifndef __LINUX_LOCKREF_H
+#define __LINUX_LOCKREF_H
+
+/*
+ * Locked reference counts.
+ *
+ * These are different from just plain atomic refcounts in that they
+ * are atomic with respect to the spinlock that goes with them. In
+ * particular, there can be implementations that don't actually get
+ * the spinlock for the common decrement/increment operations, but they
+ * still have to check that the operation is done semantically as if
+ * the spinlock had been taken (using a cmpxchg operation that covers
+ * both the lock and the count word, or using memory transactions, for
+ * example).
+ */
+
+#include <linux/spinlock.h>
+#include <generated/bounds.h>
+
+#define USE_CMPXCHG_LOCKREF \
+ (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
+ IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
+
+struct lockref {
+ union {
+#if USE_CMPXCHG_LOCKREF
+ aligned_u64 lock_count;
+#endif
+ struct {
+ spinlock_t lock;
+ int count;
+ };
+ };
+};
+
+extern void lockref_get(struct lockref *);
+extern int lockref_put_return(struct lockref *);
+extern int lockref_get_not_zero(struct lockref *);
+extern int lockref_get_or_lock(struct lockref *);
+extern int lockref_put_or_lock(struct lockref *);
+
+extern void lockref_mark_dead(struct lockref *);
+extern int lockref_get_not_dead(struct lockref *);
+
+/* Must be called under spinlock for reliable results */
+static inline int __lockref_is_dead(const struct lockref *l)
+{
+ return ((int)l->count < 0);
+}
+
+#endif /* __LINUX_LOCKREF_H */
diff --git a/include/linux/log2.h b/include/linux/log2.h
new file mode 100644
index 000000000..fd7ff3d91
--- /dev/null
+++ b/include/linux/log2.h
@@ -0,0 +1,208 @@
+/* Integer base 2 logarithm calculation
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_LOG2_H
+#define _LINUX_LOG2_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+/*
+ * deal with unrepresentable constant logarithms
+ */
+extern __attribute__((const, noreturn))
+int ____ilog2_NaN(void);
+
+/*
+ * non-constant log of base 2 calculators
+ * - the arch may override these in asm/bitops.h if they can be implemented
+ * more efficiently than using fls() and fls64()
+ * - the arch is not required to handle n==0 if implementing the fallback
+ */
+#ifndef CONFIG_ARCH_HAS_ILOG2_U32
+static inline __attribute__((const))
+int __ilog2_u32(u32 n)
+{
+ return fls(n) - 1;
+}
+#endif
+
+#ifndef CONFIG_ARCH_HAS_ILOG2_U64
+static inline __attribute__((const))
+int __ilog2_u64(u64 n)
+{
+ return fls64(n) - 1;
+}
+#endif
+
+/*
+ * Determine whether some value is a power of two, where zero is
+ * *not* considered a power of two.
+ */
+
+static inline __attribute__((const))
+bool is_power_of_2(unsigned long n)
+{
+ return (n != 0 && ((n & (n - 1)) == 0));
+}
+
+/*
+ * round up to nearest power of two
+ */
+static inline __attribute__((const))
+unsigned long __roundup_pow_of_two(unsigned long n)
+{
+ return 1UL << fls_long(n - 1);
+}
+
+/*
+ * round down to nearest power of two
+ */
+static inline __attribute__((const))
+unsigned long __rounddown_pow_of_two(unsigned long n)
+{
+ return 1UL << (fls_long(n) - 1);
+}
+
+/**
+ * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
+ * @n - parameter
+ *
+ * constant-capable log of base 2 calculation
+ * - this can be used to initialise global variables from constant data, hence
+ * the massive ternary operator construction
+ *
+ * selects the appropriately-sized optimised version depending on sizeof(n)
+ */
+#define ilog2(n) \
+( \
+ __builtin_constant_p(n) ? ( \
+ (n) < 1 ? ____ilog2_NaN() : \
+ (n) & (1ULL << 63) ? 63 : \
+ (n) & (1ULL << 62) ? 62 : \
+ (n) & (1ULL << 61) ? 61 : \
+ (n) & (1ULL << 60) ? 60 : \
+ (n) & (1ULL << 59) ? 59 : \
+ (n) & (1ULL << 58) ? 58 : \
+ (n) & (1ULL << 57) ? 57 : \
+ (n) & (1ULL << 56) ? 56 : \
+ (n) & (1ULL << 55) ? 55 : \
+ (n) & (1ULL << 54) ? 54 : \
+ (n) & (1ULL << 53) ? 53 : \
+ (n) & (1ULL << 52) ? 52 : \
+ (n) & (1ULL << 51) ? 51 : \
+ (n) & (1ULL << 50) ? 50 : \
+ (n) & (1ULL << 49) ? 49 : \
+ (n) & (1ULL << 48) ? 48 : \
+ (n) & (1ULL << 47) ? 47 : \
+ (n) & (1ULL << 46) ? 46 : \
+ (n) & (1ULL << 45) ? 45 : \
+ (n) & (1ULL << 44) ? 44 : \
+ (n) & (1ULL << 43) ? 43 : \
+ (n) & (1ULL << 42) ? 42 : \
+ (n) & (1ULL << 41) ? 41 : \
+ (n) & (1ULL << 40) ? 40 : \
+ (n) & (1ULL << 39) ? 39 : \
+ (n) & (1ULL << 38) ? 38 : \
+ (n) & (1ULL << 37) ? 37 : \
+ (n) & (1ULL << 36) ? 36 : \
+ (n) & (1ULL << 35) ? 35 : \
+ (n) & (1ULL << 34) ? 34 : \
+ (n) & (1ULL << 33) ? 33 : \
+ (n) & (1ULL << 32) ? 32 : \
+ (n) & (1ULL << 31) ? 31 : \
+ (n) & (1ULL << 30) ? 30 : \
+ (n) & (1ULL << 29) ? 29 : \
+ (n) & (1ULL << 28) ? 28 : \
+ (n) & (1ULL << 27) ? 27 : \
+ (n) & (1ULL << 26) ? 26 : \
+ (n) & (1ULL << 25) ? 25 : \
+ (n) & (1ULL << 24) ? 24 : \
+ (n) & (1ULL << 23) ? 23 : \
+ (n) & (1ULL << 22) ? 22 : \
+ (n) & (1ULL << 21) ? 21 : \
+ (n) & (1ULL << 20) ? 20 : \
+ (n) & (1ULL << 19) ? 19 : \
+ (n) & (1ULL << 18) ? 18 : \
+ (n) & (1ULL << 17) ? 17 : \
+ (n) & (1ULL << 16) ? 16 : \
+ (n) & (1ULL << 15) ? 15 : \
+ (n) & (1ULL << 14) ? 14 : \
+ (n) & (1ULL << 13) ? 13 : \
+ (n) & (1ULL << 12) ? 12 : \
+ (n) & (1ULL << 11) ? 11 : \
+ (n) & (1ULL << 10) ? 10 : \
+ (n) & (1ULL << 9) ? 9 : \
+ (n) & (1ULL << 8) ? 8 : \
+ (n) & (1ULL << 7) ? 7 : \
+ (n) & (1ULL << 6) ? 6 : \
+ (n) & (1ULL << 5) ? 5 : \
+ (n) & (1ULL << 4) ? 4 : \
+ (n) & (1ULL << 3) ? 3 : \
+ (n) & (1ULL << 2) ? 2 : \
+ (n) & (1ULL << 1) ? 1 : \
+ (n) & (1ULL << 0) ? 0 : \
+ ____ilog2_NaN() \
+ ) : \
+ (sizeof(n) <= 4) ? \
+ __ilog2_u32(n) : \
+ __ilog2_u64(n) \
+ )
+
+/**
+ * roundup_pow_of_two - round the given value up to nearest power of two
+ * @n - parameter
+ *
+ * round the given value up to the nearest power of two
+ * - the result is undefined when n == 0
+ * - this can be used to initialise global variables from constant data
+ */
+#define roundup_pow_of_two(n) \
+( \
+ __builtin_constant_p(n) ? ( \
+ (n == 1) ? 1 : \
+ (1UL << (ilog2((n) - 1) + 1)) \
+ ) : \
+ __roundup_pow_of_two(n) \
+ )
+
+/**
+ * rounddown_pow_of_two - round the given value down to nearest power of two
+ * @n - parameter
+ *
+ * round the given value down to the nearest power of two
+ * - the result is undefined when n == 0
+ * - this can be used to initialise global variables from constant data
+ */
+#define rounddown_pow_of_two(n) \
+( \
+ __builtin_constant_p(n) ? ( \
+ (1UL << ilog2(n))) : \
+ __rounddown_pow_of_two(n) \
+ )
+
+/**
+ * order_base_2 - calculate the (rounded up) base 2 order of the argument
+ * @n: parameter
+ *
+ * The first few values calculated by this routine:
+ * ob2(0) = 0
+ * ob2(1) = 0
+ * ob2(2) = 1
+ * ob2(3) = 2
+ * ob2(4) = 2
+ * ob2(5) = 3
+ * ... and so on.
+ */
+
+#define order_base_2(n) ilog2(roundup_pow_of_two(n))
+
+#endif /* _LINUX_LOG2_H */
diff --git a/include/linux/lp.h b/include/linux/lp.h
new file mode 100644
index 000000000..0dd276af9
--- /dev/null
+++ b/include/linux/lp.h
@@ -0,0 +1,100 @@
+/*
+ * usr/include/linux/lp.h c.1991-1992 James Wiegand
+ * many modifications copyright (C) 1992 Michael K. Johnson
+ * Interrupt support added 1993 Nigel Gamble
+ * Removed 8255 status defines from inside __KERNEL__ Marcelo Tosatti
+ */
+#ifndef _LINUX_LP_H
+#define _LINUX_LP_H
+
+
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <uapi/linux/lp.h>
+
+/* Magic numbers for defining port-device mappings */
+#define LP_PARPORT_UNSPEC -4
+#define LP_PARPORT_AUTO -3
+#define LP_PARPORT_OFF -2
+#define LP_PARPORT_NONE -1
+
+#define LP_F(minor) lp_table[(minor)].flags /* flags for busy, etc. */
+#define LP_CHAR(minor) lp_table[(minor)].chars /* busy timeout */
+#define LP_TIME(minor) lp_table[(minor)].time /* wait time */
+#define LP_WAIT(minor) lp_table[(minor)].wait /* strobe wait */
+#define LP_IRQ(minor) lp_table[(minor)].dev->port->irq /* interrupt # */
+ /* PARPORT_IRQ_NONE means polled */
+#ifdef LP_STATS
+#define LP_STAT(minor) lp_table[(minor)].stats /* statistics area */
+#endif
+#define LP_BUFFER_SIZE PAGE_SIZE
+
+#define LP_BASE(x) lp_table[(x)].dev->port->base
+
+#ifdef LP_STATS
+struct lp_stats {
+ unsigned long chars;
+ unsigned long sleeps;
+ unsigned int maxrun;
+ unsigned int maxwait;
+ unsigned int meanwait;
+ unsigned int mdev;
+};
+#endif
+
+struct lp_struct {
+ struct pardevice *dev;
+ unsigned long flags;
+ unsigned int chars;
+ unsigned int time;
+ unsigned int wait;
+ char *lp_buffer;
+#ifdef LP_STATS
+ unsigned int lastcall;
+ unsigned int runchars;
+ struct lp_stats stats;
+#endif
+ wait_queue_head_t waitq;
+ unsigned int last_error;
+ struct mutex port_mutex;
+ wait_queue_head_t dataq;
+ long timeout;
+ unsigned int best_mode;
+ unsigned int current_mode;
+ unsigned long bits;
+};
+
+/*
+ * The following constants describe the various signals of the printer port
+ * hardware. Note that the hardware inverts some signals and that some
+ * signals are active low. An example is LP_STROBE, which must be programmed
+ * with 1 for being active and 0 for being inactive, because the strobe signal
+ * gets inverted, but it is also active low.
+ */
+
+
+/*
+ * defines for 8255 control port
+ * base + 2
+ * accessed with LP_C(minor)
+ */
+#define LP_PINTEN 0x10 /* high to read data in or-ed with data out */
+#define LP_PSELECP 0x08 /* inverted output, active low */
+#define LP_PINITP 0x04 /* unchanged output, active low */
+#define LP_PAUTOLF 0x02 /* inverted output, active low */
+#define LP_PSTROBE 0x01 /* short high output on raising edge */
+
+/*
+ * the value written to ports to test existence. PC-style ports will
+ * return the value written. AT-style ports will return 0. so why not
+ * make them the same ?
+ */
+#define LP_DUMMY 0x00
+
+/*
+ * This is the port delay time, in microseconds.
+ * It is used only in the lp_init() and lp_reset() routine.
+ */
+#define LP_DELAY 50
+
+#endif
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
new file mode 100644
index 000000000..46262284d
--- /dev/null
+++ b/include/linux/lru_cache.h
@@ -0,0 +1,314 @@
+/*
+ lru_cache.c
+
+ This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
+
+ Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
+ Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
+ Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
+
+ drbd is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ drbd is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with drbd; see the file COPYING. If not, write to
+ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ */
+
+#ifndef LRU_CACHE_H
+#define LRU_CACHE_H
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/string.h> /* for memset */
+#include <linux/seq_file.h>
+
+/*
+This header file (and its .c file; kernel-doc of functions see there)
+ define a helper framework to easily keep track of index:label associations,
+ and changes to an "active set" of objects, as well as pending transactions,
+ to persistently record those changes.
+
+ We use an LRU policy if it is necessary to "cool down" a region currently in
+ the active set before we can "heat" a previously unused region.
+
+ Because of this later property, it is called "lru_cache".
+ As it actually Tracks Objects in an Active SeT, we could also call it
+ toast (incidentally that is what may happen to the data on the
+ backend storage uppon next resync, if we don't get it right).
+
+What for?
+
+We replicate IO (more or less synchronously) to local and remote disk.
+
+For crash recovery after replication node failure,
+ we need to resync all regions that have been target of in-flight WRITE IO
+ (in use, or "hot", regions), as we don't know whether or not those WRITEs
+ have made it to stable storage.
+
+ To avoid a "full resync", we need to persistently track these regions.
+
+ This is known as "write intent log", and can be implemented as on-disk
+ (coarse or fine grained) bitmap, or other meta data.
+
+ To avoid the overhead of frequent extra writes to this meta data area,
+ usually the condition is softened to regions that _may_ have been target of
+ in-flight WRITE IO, e.g. by only lazily clearing the on-disk write-intent
+ bitmap, trading frequency of meta data transactions against amount of
+ (possibly unnecessary) resync traffic.
+
+ If we set a hard limit on the area that may be "hot" at any given time, we
+ limit the amount of resync traffic needed for crash recovery.
+
+For recovery after replication link failure,
+ we need to resync all blocks that have been changed on the other replica
+ in the mean time, or, if both replica have been changed independently [*],
+ all blocks that have been changed on either replica in the mean time.
+ [*] usually as a result of a cluster split-brain and insufficient protection.
+ but there are valid use cases to do this on purpose.
+
+ Tracking those blocks can be implemented as "dirty bitmap".
+ Having it fine-grained reduces the amount of resync traffic.
+ It should also be persistent, to allow for reboots (or crashes)
+ while the replication link is down.
+
+There are various possible implementations for persistently storing
+write intent log information, three of which are mentioned here.
+
+"Chunk dirtying"
+ The on-disk "dirty bitmap" may be re-used as "write-intent" bitmap as well.
+ To reduce the frequency of bitmap updates for write-intent log purposes,
+ one could dirty "chunks" (of some size) at a time of the (fine grained)
+ on-disk bitmap, while keeping the in-memory "dirty" bitmap as clean as
+ possible, flushing it to disk again when a previously "hot" (and on-disk
+ dirtied as full chunk) area "cools down" again (no IO in flight anymore,
+ and none expected in the near future either).
+
+"Explicit (coarse) write intent bitmap"
+ An other implementation could chose a (probably coarse) explicit bitmap,
+ for write-intent log purposes, additionally to the fine grained dirty bitmap.
+
+"Activity log"
+ Yet an other implementation may keep track of the hot regions, by starting
+ with an empty set, and writing down a journal of region numbers that have
+ become "hot", or have "cooled down" again.
+
+ To be able to use a ring buffer for this journal of changes to the active
+ set, we not only record the actual changes to that set, but also record the
+ not changing members of the set in a round robin fashion. To do so, we use a
+ fixed (but configurable) number of slots which we can identify by index, and
+ associate region numbers (labels) with these indices.
+ For each transaction recording a change to the active set, we record the
+ change itself (index: -old_label, +new_label), and which index is associated
+ with which label (index: current_label) within a certain sliding window that
+ is moved further over the available indices with each such transaction.
+
+ Thus, for crash recovery, if the ringbuffer is sufficiently large, we can
+ accurately reconstruct the active set.
+
+ Sufficiently large depends only on maximum number of active objects, and the
+ size of the sliding window recording "index: current_label" associations within
+ each transaction.
+
+ This is what we call the "activity log".
+
+ Currently we need one activity log transaction per single label change, which
+ does not give much benefit over the "dirty chunks of bitmap" approach, other
+ than potentially less seeks.
+
+ We plan to change the transaction format to support multiple changes per
+ transaction, which then would reduce several (disjoint, "random") updates to
+ the bitmap into one transaction to the activity log ring buffer.
+*/
+
+/* this defines an element in a tracked set
+ * .colision is for hash table lookup.
+ * When we process a new IO request, we know its sector, thus can deduce the
+ * region number (label) easily. To do the label -> object lookup without a
+ * full list walk, we use a simple hash table.
+ *
+ * .list is on one of three lists:
+ * in_use: currently in use (refcnt > 0, lc_number != LC_FREE)
+ * lru: unused but ready to be reused or recycled
+ * (lc_refcnt == 0, lc_number != LC_FREE),
+ * free: unused but ready to be recycled
+ * (lc_refcnt == 0, lc_number == LC_FREE),
+ *
+ * an element is said to be "in the active set",
+ * if either on "in_use" or "lru", i.e. lc_number != LC_FREE.
+ *
+ * DRBD currently (May 2009) only uses 61 elements on the resync lru_cache
+ * (total memory usage 2 pages), and up to 3833 elements on the act_log
+ * lru_cache, totalling ~215 kB for 64bit architecture, ~53 pages.
+ *
+ * We usually do not actually free these objects again, but only "recycle"
+ * them, as the change "index: -old_label, +LC_FREE" would need a transaction
+ * as well. Which also means that using a kmem_cache to allocate the objects
+ * from wastes some resources.
+ * But it avoids high order page allocations in kmalloc.
+ */
+struct lc_element {
+ struct hlist_node colision;
+ struct list_head list; /* LRU list or free list */
+ unsigned refcnt;
+ /* back "pointer" into lc_cache->element[index],
+ * for paranoia, and for "lc_element_to_index" */
+ unsigned lc_index;
+ /* if we want to track a larger set of objects,
+ * it needs to become arch independend u64 */
+ unsigned lc_number;
+ /* special label when on free list */
+#define LC_FREE (~0U)
+
+ /* for pending changes */
+ unsigned lc_new_number;
+};
+
+struct lru_cache {
+ /* the least recently used item is kept at lru->prev */
+ struct list_head lru;
+ struct list_head free;
+ struct list_head in_use;
+ struct list_head to_be_changed;
+
+ /* the pre-created kmem cache to allocate the objects from */
+ struct kmem_cache *lc_cache;
+
+ /* size of tracked objects, used to memset(,0,) them in lc_reset */
+ size_t element_size;
+ /* offset of struct lc_element member in the tracked object */
+ size_t element_off;
+
+ /* number of elements (indices) */
+ unsigned int nr_elements;
+ /* Arbitrary limit on maximum tracked objects. Practical limit is much
+ * lower due to allocation failures, probably. For typical use cases,
+ * nr_elements should be a few thousand at most.
+ * This also limits the maximum value of lc_element.lc_index, allowing the
+ * 8 high bits of .lc_index to be overloaded with flags in the future. */
+#define LC_MAX_ACTIVE (1<<24)
+
+ /* allow to accumulate a few (index:label) changes,
+ * but no more than max_pending_changes */
+ unsigned int max_pending_changes;
+ /* number of elements currently on to_be_changed list */
+ unsigned int pending_changes;
+
+ /* statistics */
+ unsigned used; /* number of elements currently on in_use list */
+ unsigned long hits, misses, starving, locked, changed;
+
+ /* see below: flag-bits for lru_cache */
+ unsigned long flags;
+
+
+ void *lc_private;
+ const char *name;
+
+ /* nr_elements there */
+ struct hlist_head *lc_slot;
+ struct lc_element **lc_element;
+};
+
+
+/* flag-bits for lru_cache */
+enum {
+ /* debugging aid, to catch concurrent access early.
+ * user needs to guarantee exclusive access by proper locking! */
+ __LC_PARANOIA,
+
+ /* annotate that the set is "dirty", possibly accumulating further
+ * changes, until a transaction is finally triggered */
+ __LC_DIRTY,
+
+ /* Locked, no further changes allowed.
+ * Also used to serialize changing transactions. */
+ __LC_LOCKED,
+
+ /* if we need to change the set, but currently there is no free nor
+ * unused element available, we are "starving", and must not give out
+ * further references, to guarantee that eventually some refcnt will
+ * drop to zero and we will be able to make progress again, changing
+ * the set, writing the transaction.
+ * if the statistics say we are frequently starving,
+ * nr_elements is too small. */
+ __LC_STARVING,
+};
+#define LC_PARANOIA (1<<__LC_PARANOIA)
+#define LC_DIRTY (1<<__LC_DIRTY)
+#define LC_LOCKED (1<<__LC_LOCKED)
+#define LC_STARVING (1<<__LC_STARVING)
+
+extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
+ unsigned max_pending_changes,
+ unsigned e_count, size_t e_size, size_t e_off);
+extern void lc_reset(struct lru_cache *lc);
+extern void lc_destroy(struct lru_cache *lc);
+extern void lc_set(struct lru_cache *lc, unsigned int enr, int index);
+extern void lc_del(struct lru_cache *lc, struct lc_element *element);
+
+extern struct lc_element *lc_get_cumulative(struct lru_cache *lc, unsigned int enr);
+extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr);
+extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr);
+extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr);
+extern unsigned int lc_put(struct lru_cache *lc, struct lc_element *e);
+extern void lc_committed(struct lru_cache *lc);
+
+struct seq_file;
+extern size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc);
+
+extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext,
+ void (*detail) (struct seq_file *, struct lc_element *));
+
+/**
+ * lc_try_lock_for_transaction - can be used to stop lc_get() from changing the tracked set
+ * @lc: the lru cache to operate on
+ *
+ * Allows (expects) the set to be "dirty". Note that the reference counts and
+ * order on the active and lru lists may still change. Used to serialize
+ * changing transactions. Returns true if we aquired the lock.
+ */
+static inline int lc_try_lock_for_transaction(struct lru_cache *lc)
+{
+ return !test_and_set_bit(__LC_LOCKED, &lc->flags);
+}
+
+/**
+ * lc_try_lock - variant to stop lc_get() from changing the tracked set
+ * @lc: the lru cache to operate on
+ *
+ * Note that the reference counts and order on the active and lru lists may
+ * still change. Only works on a "clean" set. Returns true if we aquired the
+ * lock, which means there are no pending changes, and any further attempt to
+ * change the set will not succeed until the next lc_unlock().
+ */
+extern int lc_try_lock(struct lru_cache *lc);
+
+/**
+ * lc_unlock - unlock @lc, allow lc_get() to change the set again
+ * @lc: the lru cache to operate on
+ */
+static inline void lc_unlock(struct lru_cache *lc)
+{
+ clear_bit(__LC_DIRTY, &lc->flags);
+ clear_bit_unlock(__LC_LOCKED, &lc->flags);
+}
+
+extern bool lc_is_used(struct lru_cache *lc, unsigned int enr);
+
+#define lc_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
+extern struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i);
+extern unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e);
+
+#endif
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
new file mode 100644
index 000000000..1cc89e9df
--- /dev/null
+++ b/include/linux/lsm_audit.h
@@ -0,0 +1,99 @@
+/*
+ * Common LSM logging functions
+ * Heavily borrowed from selinux/avc.h
+ *
+ * Author : Etienne BASSET <etienne.basset@ensta.org>
+ *
+ * All credits to : Stephen Smalley, <sds@epoch.ncsc.mil>
+ * All BUGS to : Etienne BASSET <etienne.basset@ensta.org>
+ */
+#ifndef _LSM_COMMON_LOGGING_
+#define _LSM_COMMON_LOGGING_
+
+#include <linux/stddef.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/kdev_t.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/audit.h>
+#include <linux/in6.h>
+#include <linux/path.h>
+#include <linux/key.h>
+#include <linux/skbuff.h>
+
+struct lsm_network_audit {
+ int netif;
+ struct sock *sk;
+ u16 family;
+ __be16 dport;
+ __be16 sport;
+ union {
+ struct {
+ __be32 daddr;
+ __be32 saddr;
+ } v4;
+ struct {
+ struct in6_addr daddr;
+ struct in6_addr saddr;
+ } v6;
+ } fam;
+};
+
+/* Auxiliary data to use in generating the audit record. */
+struct common_audit_data {
+ char type;
+#define LSM_AUDIT_DATA_PATH 1
+#define LSM_AUDIT_DATA_NET 2
+#define LSM_AUDIT_DATA_CAP 3
+#define LSM_AUDIT_DATA_IPC 4
+#define LSM_AUDIT_DATA_TASK 5
+#define LSM_AUDIT_DATA_KEY 6
+#define LSM_AUDIT_DATA_NONE 7
+#define LSM_AUDIT_DATA_KMOD 8
+#define LSM_AUDIT_DATA_INODE 9
+#define LSM_AUDIT_DATA_DENTRY 10
+ union {
+ struct path path;
+ struct dentry *dentry;
+ struct inode *inode;
+ struct lsm_network_audit *net;
+ int cap;
+ int ipc_id;
+ struct task_struct *tsk;
+#ifdef CONFIG_KEYS
+ struct {
+ key_serial_t key;
+ char *key_desc;
+ } key_struct;
+#endif
+ char *kmod_name;
+ } u;
+ /* this union contains LSM specific data */
+ union {
+#ifdef CONFIG_SECURITY_SMACK
+ struct smack_audit_data *smack_audit_data;
+#endif
+#ifdef CONFIG_SECURITY_SELINUX
+ struct selinux_audit_data *selinux_audit_data;
+#endif
+#ifdef CONFIG_SECURITY_APPARMOR
+ struct apparmor_audit_data *apparmor_audit_data;
+#endif
+ }; /* per LSM data pointer union */
+};
+
+#define v4info fam.v4
+#define v6info fam.v6
+
+int ipv4_skb_to_auditdata(struct sk_buff *skb,
+ struct common_audit_data *ad, u8 *proto);
+
+int ipv6_skb_to_auditdata(struct sk_buff *skb,
+ struct common_audit_data *ad, u8 *proto);
+
+void common_lsm_audit(struct common_audit_data *a,
+ void (*pre_audit)(struct audit_buffer *, void *),
+ void (*post_audit)(struct audit_buffer *, void *));
+
+#endif
diff --git a/include/linux/lz4.h b/include/linux/lz4.h
new file mode 100644
index 000000000..4356686b0
--- /dev/null
+++ b/include/linux/lz4.h
@@ -0,0 +1,87 @@
+#ifndef __LZ4_H__
+#define __LZ4_H__
+/*
+ * LZ4 Kernel Interface
+ *
+ * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define LZ4_MEM_COMPRESS (4096 * sizeof(unsigned char *))
+#define LZ4HC_MEM_COMPRESS (65538 * sizeof(unsigned char *))
+
+/*
+ * lz4_compressbound()
+ * Provides the maximum size that LZ4 may output in a "worst case" scenario
+ * (input data not compressible)
+ */
+static inline size_t lz4_compressbound(size_t isize)
+{
+ return isize + (isize / 255) + 16;
+}
+
+/*
+ * lz4_compress()
+ * src : source address of the original data
+ * src_len : size of the original data
+ * dst : output buffer address of the compressed data
+ * This requires 'dst' of size LZ4_COMPRESSBOUND.
+ * dst_len : is the output size, which is returned after compress done
+ * workmem : address of the working memory.
+ * This requires 'workmem' of size LZ4_MEM_COMPRESS.
+ * return : Success if return 0
+ * Error if return (< 0)
+ * note : Destination buffer and workmem must be already allocated with
+ * the defined size.
+ */
+int lz4_compress(const unsigned char *src, size_t src_len,
+ unsigned char *dst, size_t *dst_len, void *wrkmem);
+
+ /*
+ * lz4hc_compress()
+ * src : source address of the original data
+ * src_len : size of the original data
+ * dst : output buffer address of the compressed data
+ * This requires 'dst' of size LZ4_COMPRESSBOUND.
+ * dst_len : is the output size, which is returned after compress done
+ * workmem : address of the working memory.
+ * This requires 'workmem' of size LZ4HC_MEM_COMPRESS.
+ * return : Success if return 0
+ * Error if return (< 0)
+ * note : Destination buffer and workmem must be already allocated with
+ * the defined size.
+ */
+int lz4hc_compress(const unsigned char *src, size_t src_len,
+ unsigned char *dst, size_t *dst_len, void *wrkmem);
+
+/*
+ * lz4_decompress()
+ * src : source address of the compressed data
+ * src_len : is the input size, whcih is returned after decompress done
+ * dest : output buffer address of the decompressed data
+ * actual_dest_len: is the size of uncompressed data, supposing it's known
+ * return : Success if return 0
+ * Error if return (< 0)
+ * note : Destination buffer must be already allocated.
+ * slightly faster than lz4_decompress_unknownoutputsize()
+ */
+int lz4_decompress(const unsigned char *src, size_t *src_len,
+ unsigned char *dest, size_t actual_dest_len);
+
+/*
+ * lz4_decompress_unknownoutputsize()
+ * src : source address of the compressed data
+ * src_len : is the input size, therefore the compressed size
+ * dest : output buffer address of the decompressed data
+ * dest_len: is the max size of the destination buffer, which is
+ * returned with actual size of decompressed data after
+ * decompress done
+ * return : Success if return 0
+ * Error if return (< 0)
+ * note : Destination buffer must be already allocated.
+ */
+int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len,
+ unsigned char *dest, size_t *dest_len);
+#endif
diff --git a/include/linux/lzo.h b/include/linux/lzo.h
new file mode 100644
index 000000000..a0848d937
--- /dev/null
+++ b/include/linux/lzo.h
@@ -0,0 +1,45 @@
+#ifndef __LZO_H__
+#define __LZO_H__
+/*
+ * LZO Public Kernel Interface
+ * A mini subset of the LZO real-time data compression library
+ *
+ * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
+ *
+ * The full LZO package can be found at:
+ * http://www.oberhumer.com/opensource/lzo/
+ *
+ * Changed for Linux kernel use by:
+ * Nitin Gupta <nitingupta910@gmail.com>
+ * Richard Purdie <rpurdie@openedhand.com>
+ */
+
+#define LZO1X_1_MEM_COMPRESS (8192 * sizeof(unsigned short))
+#define LZO1X_MEM_COMPRESS LZO1X_1_MEM_COMPRESS
+
+#define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3)
+
+/* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */
+int lzo1x_1_compress(const unsigned char *src, size_t src_len,
+ unsigned char *dst, size_t *dst_len, void *wrkmem);
+
+/* safe decompression with overrun testing */
+int lzo1x_decompress_safe(const unsigned char *src, size_t src_len,
+ unsigned char *dst, size_t *dst_len);
+
+/*
+ * Return values (< 0 = Error)
+ */
+#define LZO_E_OK 0
+#define LZO_E_ERROR (-1)
+#define LZO_E_OUT_OF_MEMORY (-2)
+#define LZO_E_NOT_COMPRESSIBLE (-3)
+#define LZO_E_INPUT_OVERRUN (-4)
+#define LZO_E_OUTPUT_OVERRUN (-5)
+#define LZO_E_LOOKBEHIND_OVERRUN (-6)
+#define LZO_E_EOF_NOT_FOUND (-7)
+#define LZO_E_INPUT_NOT_CONSUMED (-8)
+#define LZO_E_NOT_YET_IMPLEMENTED (-9)
+#define LZO_E_INVALID_ARGUMENT (-10)
+
+#endif
diff --git a/include/linux/m48t86.h b/include/linux/m48t86.h
new file mode 100644
index 000000000..915d6b4f0
--- /dev/null
+++ b/include/linux/m48t86.h
@@ -0,0 +1,16 @@
+/*
+ * ST M48T86 / Dallas DS12887 RTC driver
+ * Copyright (c) 2006 Tower Technologies
+ *
+ * Author: Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+struct m48t86_ops
+{
+ void (*writebyte)(unsigned char value, unsigned long addr);
+ unsigned char (*readbyte)(unsigned long addr);
+};
diff --git a/include/linux/mISDNdsp.h b/include/linux/mISDNdsp.h
new file mode 100644
index 000000000..41d1eeb9b
--- /dev/null
+++ b/include/linux/mISDNdsp.h
@@ -0,0 +1,39 @@
+#ifndef __mISDNdsp_H__
+#define __mISDNdsp_H__
+
+struct mISDN_dsp_element_arg {
+ char *name;
+ char *def;
+ char *desc;
+};
+
+struct mISDN_dsp_element {
+ char *name;
+ void *(*new)(const char *arg);
+ void (*free)(void *p);
+ void (*process_tx)(void *p, unsigned char *data, int len);
+ void (*process_rx)(void *p, unsigned char *data, int len,
+ unsigned int txlen);
+ int num_args;
+ struct mISDN_dsp_element_arg
+ *args;
+};
+
+extern int mISDN_dsp_element_register(struct mISDN_dsp_element *elem);
+extern void mISDN_dsp_element_unregister(struct mISDN_dsp_element *elem);
+
+struct dsp_features {
+ int hfc_id; /* unique id to identify the chip (or -1) */
+ int hfc_dtmf; /* set if HFCmulti card supports dtmf */
+ int hfc_conf; /* set if HFCmulti card supports conferences */
+ int hfc_loops; /* set if card supports tone loops */
+ int hfc_echocanhw; /* set if card supports echocancelation*/
+ int pcm_id; /* unique id to identify the pcm bus (or -1) */
+ int pcm_slots; /* number of slots on the pcm bus */
+ int pcm_banks; /* number of IO banks of pcm bus */
+ int unclocked; /* data is not clocked (has jitter/loss) */
+ int unordered; /* data is unordered (packets have index) */
+};
+
+#endif
+
diff --git a/include/linux/mISDNhw.h b/include/linux/mISDNhw.h
new file mode 100644
index 000000000..9d96d5d4d
--- /dev/null
+++ b/include/linux/mISDNhw.h
@@ -0,0 +1,201 @@
+/*
+ *
+ * Author Karsten Keil <kkeil@novell.com>
+ *
+ * Basic declarations for the mISDN HW channels
+ *
+ * Copyright 2008 by Karsten Keil <kkeil@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef MISDNHW_H
+#define MISDNHW_H
+#include <linux/mISDNif.h>
+#include <linux/timer.h>
+
+/*
+ * HW DEBUG 0xHHHHGGGG
+ * H - hardware driver specific bits
+ * G - for all drivers
+ */
+
+#define DEBUG_HW 0x00000001
+#define DEBUG_HW_OPEN 0x00000002
+#define DEBUG_HW_DCHANNEL 0x00000100
+#define DEBUG_HW_DFIFO 0x00000200
+#define DEBUG_HW_BCHANNEL 0x00001000
+#define DEBUG_HW_BFIFO 0x00002000
+
+#define MAX_DFRAME_LEN_L1 300
+#define MAX_MON_FRAME 32
+#define MAX_LOG_SPACE 2048
+#define MISDN_COPY_SIZE 32
+
+/* channel->Flags bit field */
+#define FLG_TX_BUSY 0 /* tx_buf in use */
+#define FLG_TX_NEXT 1 /* next_skb in use */
+#define FLG_L1_BUSY 2 /* L1 is permanent busy */
+#define FLG_L2_ACTIVATED 3 /* activated from L2 */
+#define FLG_OPEN 5 /* channel is in use */
+#define FLG_ACTIVE 6 /* channel is activated */
+#define FLG_BUSY_TIMER 7
+/* channel type */
+#define FLG_DCHANNEL 8 /* channel is D-channel */
+#define FLG_BCHANNEL 9 /* channel is B-channel */
+#define FLG_ECHANNEL 10 /* channel is E-channel */
+#define FLG_TRANSPARENT 12 /* channel use transparent data */
+#define FLG_HDLC 13 /* channel use hdlc data */
+#define FLG_L2DATA 14 /* channel use L2 DATA primitivs */
+#define FLG_ORIGIN 15 /* channel is on origin site */
+/* channel specific stuff */
+#define FLG_FILLEMPTY 16 /* fill fifo on first frame (empty) */
+/* arcofi specific */
+#define FLG_ARCOFI_TIMER 17
+#define FLG_ARCOFI_ERROR 18
+/* isar specific */
+#define FLG_INITIALIZED 17
+#define FLG_DLEETX 18
+#define FLG_LASTDLE 19
+#define FLG_FIRST 20
+#define FLG_LASTDATA 21
+#define FLG_NMD_DATA 22
+#define FLG_FTI_RUN 23
+#define FLG_LL_OK 24
+#define FLG_LL_CONN 25
+#define FLG_DTMFSEND 26
+#define FLG_TX_EMPTY 27
+/* stop sending received data upstream */
+#define FLG_RX_OFF 28
+/* workq events */
+#define FLG_RECVQUEUE 30
+#define FLG_PHCHANGE 31
+
+#define schedule_event(s, ev) do { \
+ test_and_set_bit(ev, &((s)->Flags)); \
+ schedule_work(&((s)->workq)); \
+ } while (0)
+
+struct dchannel {
+ struct mISDNdevice dev;
+ u_long Flags;
+ struct work_struct workq;
+ void (*phfunc) (struct dchannel *);
+ u_int state;
+ void *l1;
+ void *hw;
+ int slot; /* multiport card channel slot */
+ struct timer_list timer;
+ /* receive data */
+ struct sk_buff *rx_skb;
+ int maxlen;
+ /* send data */
+ struct sk_buff_head squeue;
+ struct sk_buff_head rqueue;
+ struct sk_buff *tx_skb;
+ int tx_idx;
+ int debug;
+ /* statistics */
+ int err_crc;
+ int err_tx;
+ int err_rx;
+};
+
+typedef int (dchannel_l1callback)(struct dchannel *, u_int);
+extern int create_l1(struct dchannel *, dchannel_l1callback *);
+
+/* private L1 commands */
+#define INFO0 0x8002
+#define INFO1 0x8102
+#define INFO2 0x8202
+#define INFO3_P8 0x8302
+#define INFO3_P10 0x8402
+#define INFO4_P8 0x8502
+#define INFO4_P10 0x8602
+#define LOSTFRAMING 0x8702
+#define ANYSIGNAL 0x8802
+#define HW_POWERDOWN 0x8902
+#define HW_RESET_REQ 0x8a02
+#define HW_POWERUP_REQ 0x8b02
+#define HW_DEACT_REQ 0x8c02
+#define HW_ACTIVATE_REQ 0x8e02
+#define HW_D_NOBLOCKED 0x8f02
+#define HW_RESET_IND 0x9002
+#define HW_POWERUP_IND 0x9102
+#define HW_DEACT_IND 0x9202
+#define HW_ACTIVATE_IND 0x9302
+#define HW_DEACT_CNF 0x9402
+#define HW_TESTLOOP 0x9502
+#define HW_TESTRX_RAW 0x9602
+#define HW_TESTRX_HDLC 0x9702
+#define HW_TESTRX_OFF 0x9802
+#define HW_TIMER3_IND 0x9902
+#define HW_TIMER3_VALUE 0x9a00
+#define HW_TIMER3_VMASK 0x00FF
+
+struct layer1;
+extern int l1_event(struct layer1 *, u_int);
+
+#define MISDN_BCH_FILL_SIZE 4
+
+struct bchannel {
+ struct mISDNchannel ch;
+ int nr;
+ u_long Flags;
+ struct work_struct workq;
+ u_int state;
+ void *hw;
+ int slot; /* multiport card channel slot */
+ struct timer_list timer;
+ /* receive data */
+ u8 fill[MISDN_BCH_FILL_SIZE];
+ struct sk_buff *rx_skb;
+ unsigned short maxlen;
+ unsigned short init_maxlen; /* initial value */
+ unsigned short next_maxlen; /* pending value */
+ unsigned short minlen; /* for transparent data */
+ unsigned short init_minlen; /* initial value */
+ unsigned short next_minlen; /* pending value */
+ /* send data */
+ struct sk_buff *next_skb;
+ struct sk_buff *tx_skb;
+ struct sk_buff_head rqueue;
+ int rcount;
+ int tx_idx;
+ int debug;
+ /* statistics */
+ int err_crc;
+ int err_tx;
+ int err_rx;
+ int dropcnt;
+};
+
+extern int mISDN_initdchannel(struct dchannel *, int, void *);
+extern int mISDN_initbchannel(struct bchannel *, unsigned short,
+ unsigned short);
+extern int mISDN_freedchannel(struct dchannel *);
+extern void mISDN_clear_bchannel(struct bchannel *);
+extern void mISDN_freebchannel(struct bchannel *);
+extern int mISDN_ctrl_bchannel(struct bchannel *, struct mISDN_ctrl_req *);
+extern void queue_ch_frame(struct mISDNchannel *, u_int,
+ int, struct sk_buff *);
+extern int dchannel_senddata(struct dchannel *, struct sk_buff *);
+extern int bchannel_senddata(struct bchannel *, struct sk_buff *);
+extern int bchannel_get_rxbuf(struct bchannel *, int);
+extern void recv_Dchannel(struct dchannel *);
+extern void recv_Echannel(struct dchannel *, struct dchannel *);
+extern void recv_Bchannel(struct bchannel *, unsigned int, bool);
+extern void recv_Dchannel_skb(struct dchannel *, struct sk_buff *);
+extern void recv_Bchannel_skb(struct bchannel *, struct sk_buff *);
+extern int get_next_bframe(struct bchannel *);
+extern int get_next_dframe(struct dchannel *);
+
+#endif
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
new file mode 100644
index 000000000..246a3529e
--- /dev/null
+++ b/include/linux/mISDNif.h
@@ -0,0 +1,604 @@
+/*
+ *
+ * Author Karsten Keil <kkeil@novell.com>
+ *
+ * Copyright 2008 by Karsten Keil <kkeil@novell.com>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU LESSER GENERAL PUBLIC LICENSE
+ * version 2.1 as published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU LESSER GENERAL PUBLIC LICENSE for more details.
+ *
+ */
+
+#ifndef mISDNIF_H
+#define mISDNIF_H
+
+#include <stdarg.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/socket.h>
+
+/*
+ * ABI Version 32 bit
+ *
+ * <8 bit> Major version
+ * - changed if any interface become backwards incompatible
+ *
+ * <8 bit> Minor version
+ * - changed if any interface is extended but backwards compatible
+ *
+ * <16 bit> Release number
+ * - should be incremented on every checkin
+ */
+#define MISDN_MAJOR_VERSION 1
+#define MISDN_MINOR_VERSION 1
+#define MISDN_RELEASE 29
+
+/* primitives for information exchange
+ * generell format
+ * <16 bit 0 >
+ * <8 bit command>
+ * BIT 8 = 1 LAYER private
+ * BIT 7 = 1 answer
+ * BIT 6 = 1 DATA
+ * <8 bit target layer mask>
+ *
+ * Layer = 00 is reserved for general commands
+ Layer = 01 L2 -> HW
+ Layer = 02 HW -> L2
+ Layer = 04 L3 -> L2
+ Layer = 08 L2 -> L3
+ * Layer = FF is reserved for broadcast commands
+ */
+
+#define MISDN_CMDMASK 0xff00
+#define MISDN_LAYERMASK 0x00ff
+
+/* generell commands */
+#define OPEN_CHANNEL 0x0100
+#define CLOSE_CHANNEL 0x0200
+#define CONTROL_CHANNEL 0x0300
+#define CHECK_DATA 0x0400
+
+/* layer 2 -> layer 1 */
+#define PH_ACTIVATE_REQ 0x0101
+#define PH_DEACTIVATE_REQ 0x0201
+#define PH_DATA_REQ 0x2001
+#define MPH_ACTIVATE_REQ 0x0501
+#define MPH_DEACTIVATE_REQ 0x0601
+#define MPH_INFORMATION_REQ 0x0701
+#define PH_CONTROL_REQ 0x0801
+
+/* layer 1 -> layer 2 */
+#define PH_ACTIVATE_IND 0x0102
+#define PH_ACTIVATE_CNF 0x4102
+#define PH_DEACTIVATE_IND 0x0202
+#define PH_DEACTIVATE_CNF 0x4202
+#define PH_DATA_IND 0x2002
+#define PH_DATA_E_IND 0x3002
+#define MPH_ACTIVATE_IND 0x0502
+#define MPH_DEACTIVATE_IND 0x0602
+#define MPH_INFORMATION_IND 0x0702
+#define PH_DATA_CNF 0x6002
+#define PH_CONTROL_IND 0x0802
+#define PH_CONTROL_CNF 0x4802
+
+/* layer 3 -> layer 2 */
+#define DL_ESTABLISH_REQ 0x1004
+#define DL_RELEASE_REQ 0x1104
+#define DL_DATA_REQ 0x3004
+#define DL_UNITDATA_REQ 0x3104
+#define DL_INFORMATION_REQ 0x0004
+
+/* layer 2 -> layer 3 */
+#define DL_ESTABLISH_IND 0x1008
+#define DL_ESTABLISH_CNF 0x5008
+#define DL_RELEASE_IND 0x1108
+#define DL_RELEASE_CNF 0x5108
+#define DL_DATA_IND 0x3008
+#define DL_UNITDATA_IND 0x3108
+#define DL_INFORMATION_IND 0x0008
+
+/* intern layer 2 management */
+#define MDL_ASSIGN_REQ 0x1804
+#define MDL_ASSIGN_IND 0x1904
+#define MDL_REMOVE_REQ 0x1A04
+#define MDL_REMOVE_IND 0x1B04
+#define MDL_STATUS_UP_IND 0x1C04
+#define MDL_STATUS_DOWN_IND 0x1D04
+#define MDL_STATUS_UI_IND 0x1E04
+#define MDL_ERROR_IND 0x1F04
+#define MDL_ERROR_RSP 0x5F04
+
+/* intern layer 2 */
+#define DL_TIMER200_IND 0x7004
+#define DL_TIMER203_IND 0x7304
+#define DL_INTERN_MSG 0x7804
+
+/* DL_INFORMATION_IND types */
+#define DL_INFO_L2_CONNECT 0x0001
+#define DL_INFO_L2_REMOVED 0x0002
+
+/* PH_CONTROL types */
+/* TOUCH TONE IS 0x20XX XX "0"..."9", "A","B","C","D","*","#" */
+#define DTMF_TONE_VAL 0x2000
+#define DTMF_TONE_MASK 0x007F
+#define DTMF_TONE_START 0x2100
+#define DTMF_TONE_STOP 0x2200
+#define DTMF_HFC_COEF 0x4000
+#define DSP_CONF_JOIN 0x2403
+#define DSP_CONF_SPLIT 0x2404
+#define DSP_RECEIVE_OFF 0x2405
+#define DSP_RECEIVE_ON 0x2406
+#define DSP_ECHO_ON 0x2407
+#define DSP_ECHO_OFF 0x2408
+#define DSP_MIX_ON 0x2409
+#define DSP_MIX_OFF 0x240a
+#define DSP_DELAY 0x240b
+#define DSP_JITTER 0x240c
+#define DSP_TXDATA_ON 0x240d
+#define DSP_TXDATA_OFF 0x240e
+#define DSP_TX_DEJITTER 0x240f
+#define DSP_TX_DEJ_OFF 0x2410
+#define DSP_TONE_PATT_ON 0x2411
+#define DSP_TONE_PATT_OFF 0x2412
+#define DSP_VOL_CHANGE_TX 0x2413
+#define DSP_VOL_CHANGE_RX 0x2414
+#define DSP_BF_ENABLE_KEY 0x2415
+#define DSP_BF_DISABLE 0x2416
+#define DSP_BF_ACCEPT 0x2416
+#define DSP_BF_REJECT 0x2417
+#define DSP_PIPELINE_CFG 0x2418
+#define HFC_VOL_CHANGE_TX 0x2601
+#define HFC_VOL_CHANGE_RX 0x2602
+#define HFC_SPL_LOOP_ON 0x2603
+#define HFC_SPL_LOOP_OFF 0x2604
+/* for T30 FAX and analog modem */
+#define HW_MOD_FRM 0x4000
+#define HW_MOD_FRH 0x4001
+#define HW_MOD_FTM 0x4002
+#define HW_MOD_FTH 0x4003
+#define HW_MOD_FTS 0x4004
+#define HW_MOD_CONNECT 0x4010
+#define HW_MOD_OK 0x4011
+#define HW_MOD_NOCARR 0x4012
+#define HW_MOD_FCERROR 0x4013
+#define HW_MOD_READY 0x4014
+#define HW_MOD_LASTDATA 0x4015
+
+/* DSP_TONE_PATT_ON parameter */
+#define TONE_OFF 0x0000
+#define TONE_GERMAN_DIALTONE 0x0001
+#define TONE_GERMAN_OLDDIALTONE 0x0002
+#define TONE_AMERICAN_DIALTONE 0x0003
+#define TONE_GERMAN_DIALPBX 0x0004
+#define TONE_GERMAN_OLDDIALPBX 0x0005
+#define TONE_AMERICAN_DIALPBX 0x0006
+#define TONE_GERMAN_RINGING 0x0007
+#define TONE_GERMAN_OLDRINGING 0x0008
+#define TONE_AMERICAN_RINGPBX 0x000b
+#define TONE_GERMAN_RINGPBX 0x000c
+#define TONE_GERMAN_OLDRINGPBX 0x000d
+#define TONE_AMERICAN_RINGING 0x000e
+#define TONE_GERMAN_BUSY 0x000f
+#define TONE_GERMAN_OLDBUSY 0x0010
+#define TONE_AMERICAN_BUSY 0x0011
+#define TONE_GERMAN_HANGUP 0x0012
+#define TONE_GERMAN_OLDHANGUP 0x0013
+#define TONE_AMERICAN_HANGUP 0x0014
+#define TONE_SPECIAL_INFO 0x0015
+#define TONE_GERMAN_GASSENBESETZT 0x0016
+#define TONE_GERMAN_AUFSCHALTTON 0x0016
+
+/* MPH_INFORMATION_IND */
+#define L1_SIGNAL_LOS_OFF 0x0010
+#define L1_SIGNAL_LOS_ON 0x0011
+#define L1_SIGNAL_AIS_OFF 0x0012
+#define L1_SIGNAL_AIS_ON 0x0013
+#define L1_SIGNAL_RDI_OFF 0x0014
+#define L1_SIGNAL_RDI_ON 0x0015
+#define L1_SIGNAL_SLIP_RX 0x0020
+#define L1_SIGNAL_SLIP_TX 0x0021
+
+/*
+ * protocol ids
+ * D channel 1-31
+ * B channel 33 - 63
+ */
+
+#define ISDN_P_NONE 0
+#define ISDN_P_BASE 0
+#define ISDN_P_TE_S0 0x01
+#define ISDN_P_NT_S0 0x02
+#define ISDN_P_TE_E1 0x03
+#define ISDN_P_NT_E1 0x04
+#define ISDN_P_TE_UP0 0x05
+#define ISDN_P_NT_UP0 0x06
+
+#define IS_ISDN_P_TE(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_TE_E1) || \
+ (p == ISDN_P_TE_UP0) || (p == ISDN_P_LAPD_TE))
+#define IS_ISDN_P_NT(p) ((p == ISDN_P_NT_S0) || (p == ISDN_P_NT_E1) || \
+ (p == ISDN_P_NT_UP0) || (p == ISDN_P_LAPD_NT))
+#define IS_ISDN_P_S0(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_NT_S0))
+#define IS_ISDN_P_E1(p) ((p == ISDN_P_TE_E1) || (p == ISDN_P_NT_E1))
+#define IS_ISDN_P_UP0(p) ((p == ISDN_P_TE_UP0) || (p == ISDN_P_NT_UP0))
+
+
+#define ISDN_P_LAPD_TE 0x10
+#define ISDN_P_LAPD_NT 0x11
+
+#define ISDN_P_B_MASK 0x1f
+#define ISDN_P_B_START 0x20
+
+#define ISDN_P_B_RAW 0x21
+#define ISDN_P_B_HDLC 0x22
+#define ISDN_P_B_X75SLP 0x23
+#define ISDN_P_B_L2DTMF 0x24
+#define ISDN_P_B_L2DSP 0x25
+#define ISDN_P_B_L2DSPHDLC 0x26
+#define ISDN_P_B_T30_FAX 0x27
+#define ISDN_P_B_MODEM_ASYNC 0x28
+
+#define OPTION_L2_PMX 1
+#define OPTION_L2_PTP 2
+#define OPTION_L2_FIXEDTEI 3
+#define OPTION_L2_CLEANUP 4
+#define OPTION_L1_HOLD 5
+
+/* should be in sync with linux/kobject.h:KOBJ_NAME_LEN */
+#define MISDN_MAX_IDLEN 20
+
+struct mISDNhead {
+ unsigned int prim;
+ unsigned int id;
+} __packed;
+
+#define MISDN_HEADER_LEN sizeof(struct mISDNhead)
+#define MAX_DATA_SIZE 2048
+#define MAX_DATA_MEM (MAX_DATA_SIZE + MISDN_HEADER_LEN)
+#define MAX_DFRAME_LEN 260
+
+#define MISDN_ID_ADDR_MASK 0xFFFF
+#define MISDN_ID_TEI_MASK 0xFF00
+#define MISDN_ID_SAPI_MASK 0x00FF
+#define MISDN_ID_TEI_ANY 0x7F00
+
+#define MISDN_ID_ANY 0xFFFF
+#define MISDN_ID_NONE 0xFFFE
+
+#define GROUP_TEI 127
+#define TEI_SAPI 63
+#define CTRL_SAPI 0
+
+#define MISDN_MAX_CHANNEL 127
+#define MISDN_CHMAP_SIZE ((MISDN_MAX_CHANNEL + 1) >> 3)
+
+#define SOL_MISDN 0
+
+struct sockaddr_mISDN {
+ sa_family_t family;
+ unsigned char dev;
+ unsigned char channel;
+ unsigned char sapi;
+ unsigned char tei;
+};
+
+struct mISDNversion {
+ unsigned char major;
+ unsigned char minor;
+ unsigned short release;
+};
+
+struct mISDN_devinfo {
+ u_int id;
+ u_int Dprotocols;
+ u_int Bprotocols;
+ u_int protocol;
+ u_char channelmap[MISDN_CHMAP_SIZE];
+ u_int nrbchan;
+ char name[MISDN_MAX_IDLEN];
+};
+
+struct mISDN_devrename {
+ u_int id;
+ char name[MISDN_MAX_IDLEN]; /* new name */
+};
+
+/* MPH_INFORMATION_REQ payload */
+struct ph_info_ch {
+ __u32 protocol;
+ __u64 Flags;
+};
+
+struct ph_info_dch {
+ struct ph_info_ch ch;
+ __u16 state;
+ __u16 num_bch;
+};
+
+struct ph_info {
+ struct ph_info_dch dch;
+ struct ph_info_ch bch[];
+};
+
+/* timer device ioctl */
+#define IMADDTIMER _IOR('I', 64, int)
+#define IMDELTIMER _IOR('I', 65, int)
+
+/* socket ioctls */
+#define IMGETVERSION _IOR('I', 66, int)
+#define IMGETCOUNT _IOR('I', 67, int)
+#define IMGETDEVINFO _IOR('I', 68, int)
+#define IMCTRLREQ _IOR('I', 69, int)
+#define IMCLEAR_L2 _IOR('I', 70, int)
+#define IMSETDEVNAME _IOR('I', 71, struct mISDN_devrename)
+#define IMHOLD_L1 _IOR('I', 72, int)
+
+static inline int
+test_channelmap(u_int nr, u_char *map)
+{
+ if (nr <= MISDN_MAX_CHANNEL)
+ return map[nr >> 3] & (1 << (nr & 7));
+ else
+ return 0;
+}
+
+static inline void
+set_channelmap(u_int nr, u_char *map)
+{
+ map[nr >> 3] |= (1 << (nr & 7));
+}
+
+static inline void
+clear_channelmap(u_int nr, u_char *map)
+{
+ map[nr >> 3] &= ~(1 << (nr & 7));
+}
+
+/* CONTROL_CHANNEL parameters */
+#define MISDN_CTRL_GETOP 0x0000
+#define MISDN_CTRL_LOOP 0x0001
+#define MISDN_CTRL_CONNECT 0x0002
+#define MISDN_CTRL_DISCONNECT 0x0004
+#define MISDN_CTRL_RX_BUFFER 0x0008
+#define MISDN_CTRL_PCMCONNECT 0x0010
+#define MISDN_CTRL_PCMDISCONNECT 0x0020
+#define MISDN_CTRL_SETPEER 0x0040
+#define MISDN_CTRL_UNSETPEER 0x0080
+#define MISDN_CTRL_RX_OFF 0x0100
+#define MISDN_CTRL_FILL_EMPTY 0x0200
+#define MISDN_CTRL_GETPEER 0x0400
+#define MISDN_CTRL_L1_TIMER3 0x0800
+#define MISDN_CTRL_HW_FEATURES_OP 0x2000
+#define MISDN_CTRL_HW_FEATURES 0x2001
+#define MISDN_CTRL_HFC_OP 0x4000
+#define MISDN_CTRL_HFC_PCM_CONN 0x4001
+#define MISDN_CTRL_HFC_PCM_DISC 0x4002
+#define MISDN_CTRL_HFC_CONF_JOIN 0x4003
+#define MISDN_CTRL_HFC_CONF_SPLIT 0x4004
+#define MISDN_CTRL_HFC_RECEIVE_OFF 0x4005
+#define MISDN_CTRL_HFC_RECEIVE_ON 0x4006
+#define MISDN_CTRL_HFC_ECHOCAN_ON 0x4007
+#define MISDN_CTRL_HFC_ECHOCAN_OFF 0x4008
+#define MISDN_CTRL_HFC_WD_INIT 0x4009
+#define MISDN_CTRL_HFC_WD_RESET 0x400A
+
+/* special RX buffer value for MISDN_CTRL_RX_BUFFER request.p1 is the minimum
+ * buffer size request.p2 the maximum. Using MISDN_CTRL_RX_SIZE_IGNORE will
+ * not change the value, but still read back the actual stetting.
+ */
+#define MISDN_CTRL_RX_SIZE_IGNORE -1
+
+/* socket options */
+#define MISDN_TIME_STAMP 0x0001
+
+struct mISDN_ctrl_req {
+ int op;
+ int channel;
+ int p1;
+ int p2;
+};
+
+/* muxer options */
+#define MISDN_OPT_ALL 1
+#define MISDN_OPT_TEIMGR 2
+
+#ifdef __KERNEL__
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/net.h>
+#include <net/sock.h>
+#include <linux/completion.h>
+
+#define DEBUG_CORE 0x000000ff
+#define DEBUG_CORE_FUNC 0x00000002
+#define DEBUG_SOCKET 0x00000004
+#define DEBUG_MANAGER 0x00000008
+#define DEBUG_SEND_ERR 0x00000010
+#define DEBUG_MSG_THREAD 0x00000020
+#define DEBUG_QUEUE_FUNC 0x00000040
+#define DEBUG_L1 0x0000ff00
+#define DEBUG_L1_FSM 0x00000200
+#define DEBUG_L2 0x00ff0000
+#define DEBUG_L2_FSM 0x00020000
+#define DEBUG_L2_CTRL 0x00040000
+#define DEBUG_L2_RECV 0x00080000
+#define DEBUG_L2_TEI 0x00100000
+#define DEBUG_L2_TEIFSM 0x00200000
+#define DEBUG_TIMER 0x01000000
+#define DEBUG_CLOCK 0x02000000
+
+#define mISDN_HEAD_P(s) ((struct mISDNhead *)&s->cb[0])
+#define mISDN_HEAD_PRIM(s) (((struct mISDNhead *)&s->cb[0])->prim)
+#define mISDN_HEAD_ID(s) (((struct mISDNhead *)&s->cb[0])->id)
+
+/* socket states */
+#define MISDN_OPEN 1
+#define MISDN_BOUND 2
+#define MISDN_CLOSED 3
+
+struct mISDNchannel;
+struct mISDNdevice;
+struct mISDNstack;
+struct mISDNclock;
+
+struct channel_req {
+ u_int protocol;
+ struct sockaddr_mISDN adr;
+ struct mISDNchannel *ch;
+};
+
+typedef int (ctrl_func_t)(struct mISDNchannel *, u_int, void *);
+typedef int (send_func_t)(struct mISDNchannel *, struct sk_buff *);
+typedef int (create_func_t)(struct channel_req *);
+
+struct Bprotocol {
+ struct list_head list;
+ char *name;
+ u_int Bprotocols;
+ create_func_t *create;
+};
+
+struct mISDNchannel {
+ struct list_head list;
+ u_int protocol;
+ u_int nr;
+ u_long opt;
+ u_int addr;
+ struct mISDNstack *st;
+ struct mISDNchannel *peer;
+ send_func_t *send;
+ send_func_t *recv;
+ ctrl_func_t *ctrl;
+};
+
+struct mISDN_sock_list {
+ struct hlist_head head;
+ rwlock_t lock;
+};
+
+struct mISDN_sock {
+ struct sock sk;
+ struct mISDNchannel ch;
+ u_int cmask;
+ struct mISDNdevice *dev;
+};
+
+
+
+struct mISDNdevice {
+ struct mISDNchannel D;
+ u_int id;
+ u_int Dprotocols;
+ u_int Bprotocols;
+ u_int nrbchan;
+ u_char channelmap[MISDN_CHMAP_SIZE];
+ struct list_head bchannels;
+ struct mISDNchannel *teimgr;
+ struct device dev;
+};
+
+struct mISDNstack {
+ u_long status;
+ struct mISDNdevice *dev;
+ struct task_struct *thread;
+ struct completion *notify;
+ wait_queue_head_t workq;
+ struct sk_buff_head msgq;
+ struct list_head layer2;
+ struct mISDNchannel *layer1;
+ struct mISDNchannel own;
+ struct mutex lmutex; /* protect lists */
+ struct mISDN_sock_list l1sock;
+#ifdef MISDN_MSG_STATS
+ u_int msg_cnt;
+ u_int sleep_cnt;
+ u_int stopped_cnt;
+#endif
+};
+
+typedef int (clockctl_func_t)(void *, int);
+
+struct mISDNclock {
+ struct list_head list;
+ char name[64];
+ int pri;
+ clockctl_func_t *ctl;
+ void *priv;
+};
+
+/* global alloc/queue functions */
+
+static inline struct sk_buff *
+mI_alloc_skb(unsigned int len, gfp_t gfp_mask)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len + MISDN_HEADER_LEN, gfp_mask);
+ if (likely(skb))
+ skb_reserve(skb, MISDN_HEADER_LEN);
+ return skb;
+}
+
+static inline struct sk_buff *
+_alloc_mISDN_skb(u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask)
+{
+ struct sk_buff *skb = mI_alloc_skb(len, gfp_mask);
+ struct mISDNhead *hh;
+
+ if (!skb)
+ return NULL;
+ if (len)
+ memcpy(skb_put(skb, len), dp, len);
+ hh = mISDN_HEAD_P(skb);
+ hh->prim = prim;
+ hh->id = id;
+ return skb;
+}
+
+static inline void
+_queue_data(struct mISDNchannel *ch, u_int prim,
+ u_int id, u_int len, void *dp, gfp_t gfp_mask)
+{
+ struct sk_buff *skb;
+
+ if (!ch->peer)
+ return;
+ skb = _alloc_mISDN_skb(prim, id, len, dp, gfp_mask);
+ if (!skb)
+ return;
+ if (ch->recv(ch->peer, skb))
+ dev_kfree_skb(skb);
+}
+
+/* global register/unregister functions */
+
+extern int mISDN_register_device(struct mISDNdevice *,
+ struct device *parent, char *name);
+extern void mISDN_unregister_device(struct mISDNdevice *);
+extern int mISDN_register_Bprotocol(struct Bprotocol *);
+extern void mISDN_unregister_Bprotocol(struct Bprotocol *);
+extern struct mISDNclock *mISDN_register_clock(char *, int, clockctl_func_t *,
+ void *);
+extern void mISDN_unregister_clock(struct mISDNclock *);
+
+static inline struct mISDNdevice *dev_to_mISDN(struct device *dev)
+{
+ if (dev)
+ return dev_get_drvdata(dev);
+ else
+ return NULL;
+}
+
+extern void set_channel_address(struct mISDNchannel *, u_int, u_int);
+extern void mISDN_clock_update(struct mISDNclock *, int, struct timeval *);
+extern unsigned short mISDN_clock_get(void);
+extern const char *mISDNDevName4ch(struct mISDNchannel *);
+
+#endif /* __KERNEL__ */
+#endif /* mISDNIF_H */
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
new file mode 100644
index 000000000..1726ccbd8
--- /dev/null
+++ b/include/linux/mailbox_client.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2013-2014 Linaro Ltd.
+ * Author: Jassi Brar <jassisinghbrar@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MAILBOX_CLIENT_H
+#define __MAILBOX_CLIENT_H
+
+#include <linux/of.h>
+#include <linux/device.h>
+
+struct mbox_chan;
+
+/**
+ * struct mbox_client - User of a mailbox
+ * @dev: The client device
+ * @tx_block: If the mbox_send_message should block until data is
+ * transmitted.
+ * @tx_tout: Max block period in ms before TX is assumed failure
+ * @knows_txdone: If the client could run the TX state machine. Usually
+ * if the client receives some ACK packet for transmission.
+ * Unused if the controller already has TX_Done/RTR IRQ.
+ * @rx_callback: Atomic callback to provide client the data received
+ * @tx_prepare: Atomic callback to ask client to prepare the payload
+ * before initiating the transmission if required.
+ * @tx_done: Atomic callback to tell client of data transmission
+ */
+struct mbox_client {
+ struct device *dev;
+ bool tx_block;
+ unsigned long tx_tout;
+ bool knows_txdone;
+
+ void (*rx_callback)(struct mbox_client *cl, void *mssg);
+ void (*tx_prepare)(struct mbox_client *cl, void *mssg);
+ void (*tx_done)(struct mbox_client *cl, void *mssg, int r);
+};
+
+struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index);
+int mbox_send_message(struct mbox_chan *chan, void *mssg);
+void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */
+bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */
+void mbox_free_channel(struct mbox_chan *chan); /* may sleep */
+
+#endif /* __MAILBOX_CLIENT_H */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
new file mode 100644
index 000000000..d4cf96f07
--- /dev/null
+++ b/include/linux/mailbox_controller.h
@@ -0,0 +1,133 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MAILBOX_CONTROLLER_H
+#define __MAILBOX_CONTROLLER_H
+
+#include <linux/of.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/device.h>
+#include <linux/completion.h>
+
+struct mbox_chan;
+
+/**
+ * struct mbox_chan_ops - methods to control mailbox channels
+ * @send_data: The API asks the MBOX controller driver, in atomic
+ * context try to transmit a message on the bus. Returns 0 if
+ * data is accepted for transmission, -EBUSY while rejecting
+ * if the remote hasn't yet read the last data sent. Actual
+ * transmission of data is reported by the controller via
+ * mbox_chan_txdone (if it has some TX ACK irq). It must not
+ * sleep.
+ * @startup: Called when a client requests the chan. The controller
+ * could ask clients for additional parameters of communication
+ * to be provided via client's chan_data. This call may
+ * block. After this call the Controller must forward any
+ * data received on the chan by calling mbox_chan_received_data.
+ * The controller may do stuff that need to sleep.
+ * @shutdown: Called when a client relinquishes control of a chan.
+ * This call may block too. The controller must not forward
+ * any received data anymore.
+ * The controller may do stuff that need to sleep.
+ * @last_tx_done: If the controller sets 'txdone_poll', the API calls
+ * this to poll status of last TX. The controller must
+ * give priority to IRQ method over polling and never
+ * set both txdone_poll and txdone_irq. Only in polling
+ * mode 'send_data' is expected to return -EBUSY.
+ * The controller may do stuff that need to sleep/block.
+ * Used only if txdone_poll:=true && txdone_irq:=false
+ * @peek_data: Atomic check for any received data. Return true if controller
+ * has some data to push to the client. False otherwise.
+ */
+struct mbox_chan_ops {
+ int (*send_data)(struct mbox_chan *chan, void *data);
+ int (*startup)(struct mbox_chan *chan);
+ void (*shutdown)(struct mbox_chan *chan);
+ bool (*last_tx_done)(struct mbox_chan *chan);
+ bool (*peek_data)(struct mbox_chan *chan);
+};
+
+/**
+ * struct mbox_controller - Controller of a class of communication channels
+ * @dev: Device backing this controller
+ * @ops: Operators that work on each communication chan
+ * @chans: Array of channels
+ * @num_chans: Number of channels in the 'chans' array.
+ * @txdone_irq: Indicates if the controller can report to API when
+ * the last transmitted data was read by the remote.
+ * Eg, if it has some TX ACK irq.
+ * @txdone_poll: If the controller can read but not report the TX
+ * done. Ex, some register shows the TX status but
+ * no interrupt rises. Ignored if 'txdone_irq' is set.
+ * @txpoll_period: If 'txdone_poll' is in effect, the API polls for
+ * last TX's status after these many millisecs
+ * @of_xlate: Controller driver specific mapping of channel via DT
+ * @poll: API private. Used to poll for TXDONE on all channels.
+ * @node: API private. To hook into list of controllers.
+ */
+struct mbox_controller {
+ struct device *dev;
+ struct mbox_chan_ops *ops;
+ struct mbox_chan *chans;
+ int num_chans;
+ bool txdone_irq;
+ bool txdone_poll;
+ unsigned txpoll_period;
+ struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp);
+ /* Internal to API */
+ struct timer_list poll;
+ struct list_head node;
+};
+
+/*
+ * The length of circular buffer for queuing messages from a client.
+ * 'msg_count' tracks the number of buffered messages while 'msg_free'
+ * is the index where the next message would be buffered.
+ * We shouldn't need it too big because every transfer is interrupt
+ * triggered and if we have lots of data to transfer, the interrupt
+ * latencies are going to be the bottleneck, not the buffer length.
+ * Besides, mbox_send_message could be called from atomic context and
+ * the client could also queue another message from the notifier 'tx_done'
+ * of the last transfer done.
+ * REVISIT: If too many platforms see the "Try increasing MBOX_TX_QUEUE_LEN"
+ * print, it needs to be taken from config option or somesuch.
+ */
+#define MBOX_TX_QUEUE_LEN 20
+
+/**
+ * struct mbox_chan - s/w representation of a communication chan
+ * @mbox: Pointer to the parent/provider of this channel
+ * @txdone_method: Way to detect TXDone chosen by the API
+ * @cl: Pointer to the current owner of this channel
+ * @tx_complete: Transmission completion
+ * @active_req: Currently active request hook
+ * @msg_count: No. of mssg currently queued
+ * @msg_free: Index of next available mssg slot
+ * @msg_data: Hook for data packet
+ * @lock: Serialise access to the channel
+ * @con_priv: Hook for controller driver to attach private data
+ */
+struct mbox_chan {
+ struct mbox_controller *mbox;
+ unsigned txdone_method;
+ struct mbox_client *cl;
+ struct completion tx_complete;
+ void *active_req;
+ unsigned msg_count, msg_free;
+ void *msg_data[MBOX_TX_QUEUE_LEN];
+ spinlock_t lock; /* Serialise access to the channel */
+ void *con_priv;
+};
+
+int mbox_controller_register(struct mbox_controller *mbox); /* can sleep */
+void mbox_controller_unregister(struct mbox_controller *mbox); /* can sleep */
+void mbox_chan_received_data(struct mbox_chan *chan, void *data); /* atomic */
+void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */
+
+#endif /* __MAILBOX_CONTROLLER_H */
diff --git a/include/linux/maple.h b/include/linux/maple.h
new file mode 100644
index 000000000..c37288b23
--- /dev/null
+++ b/include/linux/maple.h
@@ -0,0 +1,105 @@
+#ifndef __LINUX_MAPLE_H
+#define __LINUX_MAPLE_H
+
+#include <mach/maple.h>
+
+struct device;
+extern struct bus_type maple_bus_type;
+
+/* Maple Bus command and response codes */
+enum maple_code {
+ MAPLE_RESPONSE_FILEERR = -5,
+ MAPLE_RESPONSE_AGAIN, /* retransmit */
+ MAPLE_RESPONSE_BADCMD,
+ MAPLE_RESPONSE_BADFUNC,
+ MAPLE_RESPONSE_NONE, /* unit didn't respond*/
+ MAPLE_COMMAND_DEVINFO = 1,
+ MAPLE_COMMAND_ALLINFO,
+ MAPLE_COMMAND_RESET,
+ MAPLE_COMMAND_KILL,
+ MAPLE_RESPONSE_DEVINFO,
+ MAPLE_RESPONSE_ALLINFO,
+ MAPLE_RESPONSE_OK,
+ MAPLE_RESPONSE_DATATRF,
+ MAPLE_COMMAND_GETCOND,
+ MAPLE_COMMAND_GETMINFO,
+ MAPLE_COMMAND_BREAD,
+ MAPLE_COMMAND_BWRITE,
+ MAPLE_COMMAND_BSYNC,
+ MAPLE_COMMAND_SETCOND,
+ MAPLE_COMMAND_MICCONTROL
+};
+
+enum maple_file_errors {
+ MAPLE_FILEERR_INVALID_PARTITION = 0x01000000,
+ MAPLE_FILEERR_PHASE_ERROR = 0x02000000,
+ MAPLE_FILEERR_INVALID_BLOCK = 0x04000000,
+ MAPLE_FILEERR_WRITE_ERROR = 0x08000000,
+ MAPLE_FILEERR_INVALID_WRITE_LENGTH = 0x10000000,
+ MAPLE_FILEERR_BAD_CRC = 0x20000000
+};
+
+struct maple_buffer {
+ char bufx[0x400];
+ void *buf;
+};
+
+struct mapleq {
+ struct list_head list;
+ struct maple_device *dev;
+ struct maple_buffer *recvbuf;
+ void *sendbuf, *recvbuf_p2;
+ unsigned char length;
+ enum maple_code command;
+};
+
+struct maple_devinfo {
+ unsigned long function;
+ unsigned long function_data[3];
+ unsigned char area_code;
+ unsigned char connector_direction;
+ char product_name[31];
+ char product_licence[61];
+ unsigned short standby_power;
+ unsigned short max_power;
+};
+
+struct maple_device {
+ struct maple_driver *driver;
+ struct mapleq *mq;
+ void (*callback) (struct mapleq * mq);
+ void (*fileerr_handler)(struct maple_device *mdev, void *recvbuf);
+ int (*can_unload)(struct maple_device *mdev);
+ unsigned long when, interval, function;
+ struct maple_devinfo devinfo;
+ unsigned char port, unit;
+ char product_name[32];
+ char product_licence[64];
+ atomic_t busy;
+ wait_queue_head_t maple_wait;
+ struct device dev;
+};
+
+struct maple_driver {
+ unsigned long function;
+ struct device_driver drv;
+};
+
+void maple_getcond_callback(struct maple_device *dev,
+ void (*callback) (struct mapleq * mq),
+ unsigned long interval,
+ unsigned long function);
+int maple_driver_register(struct maple_driver *);
+void maple_driver_unregister(struct maple_driver *);
+
+int maple_add_packet(struct maple_device *mdev, u32 function,
+ u32 command, u32 length, void *data);
+void maple_clear_dev(struct maple_device *mdev);
+
+#define to_maple_dev(n) container_of(n, struct maple_device, dev)
+#define to_maple_driver(n) container_of(n, struct maple_driver, drv)
+
+#define maple_get_drvdata(d) dev_get_drvdata(&(d)->dev)
+#define maple_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p))
+
+#endif /* __LINUX_MAPLE_H */
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
new file mode 100644
index 000000000..e6982ac32
--- /dev/null
+++ b/include/linux/marvell_phy.h
@@ -0,0 +1,25 @@
+#ifndef _MARVELL_PHY_H
+#define _MARVELL_PHY_H
+
+/* Mask used for ID comparisons */
+#define MARVELL_PHY_ID_MASK 0xfffffff0
+
+/* Known PHY IDs */
+#define MARVELL_PHY_ID_88E1101 0x01410c60
+#define MARVELL_PHY_ID_88E1112 0x01410c90
+#define MARVELL_PHY_ID_88E1111 0x01410cc0
+#define MARVELL_PHY_ID_88E1118 0x01410e10
+#define MARVELL_PHY_ID_88E1121R 0x01410cb0
+#define MARVELL_PHY_ID_88E1145 0x01410cd0
+#define MARVELL_PHY_ID_88E1149R 0x01410e50
+#define MARVELL_PHY_ID_88E1240 0x01410e30
+#define MARVELL_PHY_ID_88E1318S 0x01410e90
+#define MARVELL_PHY_ID_88E1116R 0x01410e40
+#define MARVELL_PHY_ID_88E1510 0x01410dd0
+#define MARVELL_PHY_ID_88E3016 0x01410e60
+
+/* struct phy_device dev_flags definitions */
+#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
+#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002
+
+#endif /* _MARVELL_PHY_H */
diff --git a/include/linux/math64.h b/include/linux/math64.h
new file mode 100644
index 000000000..c45c089bf
--- /dev/null
+++ b/include/linux/math64.h
@@ -0,0 +1,166 @@
+#ifndef _LINUX_MATH64_H
+#define _LINUX_MATH64_H
+
+#include <linux/types.h>
+#include <asm/div64.h>
+
+#if BITS_PER_LONG == 64
+
+#define div64_long(x, y) div64_s64((x), (y))
+#define div64_ul(x, y) div64_u64((x), (y))
+
+/**
+ * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
+ *
+ * This is commonly provided by 32bit archs to provide an optimized 64bit
+ * divide.
+ */
+static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+{
+ *remainder = dividend % divisor;
+ return dividend / divisor;
+}
+
+/**
+ * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
+ */
+static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
+{
+ *remainder = dividend % divisor;
+ return dividend / divisor;
+}
+
+/**
+ * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
+ */
+static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
+{
+ *remainder = dividend % divisor;
+ return dividend / divisor;
+}
+
+/**
+ * div64_u64 - unsigned 64bit divide with 64bit divisor
+ */
+static inline u64 div64_u64(u64 dividend, u64 divisor)
+{
+ return dividend / divisor;
+}
+
+/**
+ * div64_s64 - signed 64bit divide with 64bit divisor
+ */
+static inline s64 div64_s64(s64 dividend, s64 divisor)
+{
+ return dividend / divisor;
+}
+
+#elif BITS_PER_LONG == 32
+
+#define div64_long(x, y) div_s64((x), (y))
+#define div64_ul(x, y) div_u64((x), (y))
+
+#ifndef div_u64_rem
+static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+{
+ *remainder = do_div(dividend, divisor);
+ return dividend;
+}
+#endif
+
+#ifndef div_s64_rem
+extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
+#endif
+
+#ifndef div64_u64_rem
+extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
+#endif
+
+#ifndef div64_u64
+extern u64 div64_u64(u64 dividend, u64 divisor);
+#endif
+
+#ifndef div64_s64
+extern s64 div64_s64(s64 dividend, s64 divisor);
+#endif
+
+#endif /* BITS_PER_LONG */
+
+/**
+ * div_u64 - unsigned 64bit divide with 32bit divisor
+ *
+ * This is the most common 64bit divide and should be used if possible,
+ * as many 32bit archs can optimize this variant better than a full 64bit
+ * divide.
+ */
+#ifndef div_u64
+static inline u64 div_u64(u64 dividend, u32 divisor)
+{
+ u32 remainder;
+ return div_u64_rem(dividend, divisor, &remainder);
+}
+#endif
+
+/**
+ * div_s64 - signed 64bit divide with 32bit divisor
+ */
+#ifndef div_s64
+static inline s64 div_s64(s64 dividend, s32 divisor)
+{
+ s32 remainder;
+ return div_s64_rem(dividend, divisor, &remainder);
+}
+#endif
+
+u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
+
+static __always_inline u32
+__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
+{
+ u32 ret = 0;
+
+ while (dividend >= divisor) {
+ /* The following asm() prevents the compiler from
+ optimising this loop into a modulo operation. */
+ asm("" : "+rm"(dividend));
+
+ dividend -= divisor;
+ ret++;
+ }
+
+ *remainder = dividend;
+
+ return ret;
+}
+
+#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+
+#ifndef mul_u64_u32_shr
+static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+{
+ return (u64)(((unsigned __int128)a * mul) >> shift);
+}
+#endif /* mul_u64_u32_shr */
+
+#else
+
+#ifndef mul_u64_u32_shr
+static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+{
+ u32 ah, al;
+ u64 ret;
+
+ al = a;
+ ah = a >> 32;
+
+ ret = ((u64)al * mul) >> shift;
+ if (ah)
+ ret += ((u64)ah * mul) << (32 - shift);
+
+ return ret;
+}
+#endif /* mul_u64_u32_shr */
+
+#endif
+
+#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/max17040_battery.h b/include/linux/max17040_battery.h
new file mode 100644
index 000000000..ad97b06cf
--- /dev/null
+++ b/include/linux/max17040_battery.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2009 Samsung Electronics
+ * Minkyu Kang <mk7.kang@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MAX17040_BATTERY_H_
+#define __MAX17040_BATTERY_H_
+
+struct max17040_platform_data {
+ int (*battery_online)(void);
+ int (*charger_online)(void);
+ int (*charger_enable)(void);
+};
+
+#endif
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
new file mode 100644
index 000000000..6a392e7a7
--- /dev/null
+++ b/include/linux/mbcache.h
@@ -0,0 +1,55 @@
+/*
+ File: linux/mbcache.h
+
+ (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
+*/
+struct mb_cache_entry {
+ struct list_head e_lru_list;
+ struct mb_cache *e_cache;
+ unsigned short e_used;
+ unsigned short e_queued;
+ atomic_t e_refcnt;
+ struct block_device *e_bdev;
+ sector_t e_block;
+ struct hlist_bl_node e_block_list;
+ struct {
+ struct hlist_bl_node o_list;
+ unsigned int o_key;
+ } e_index;
+ struct hlist_bl_head *e_block_hash_p;
+ struct hlist_bl_head *e_index_hash_p;
+};
+
+struct mb_cache {
+ struct list_head c_cache_list;
+ const char *c_name;
+ atomic_t c_entry_count;
+ int c_max_entries;
+ int c_bucket_bits;
+ struct kmem_cache *c_entry_cache;
+ struct hlist_bl_head *c_block_hash;
+ struct hlist_bl_head *c_index_hash;
+};
+
+/* Functions on caches */
+
+struct mb_cache *mb_cache_create(const char *, int);
+void mb_cache_shrink(struct block_device *);
+void mb_cache_destroy(struct mb_cache *);
+
+/* Functions on cache entries */
+
+struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *, gfp_t);
+int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *,
+ sector_t, unsigned int);
+void mb_cache_entry_release(struct mb_cache_entry *);
+void mb_cache_entry_free(struct mb_cache_entry *);
+struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *,
+ struct block_device *,
+ sector_t);
+struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
+ struct block_device *,
+ unsigned int);
+struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache_entry *,
+ struct block_device *,
+ unsigned int);
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
new file mode 100644
index 000000000..611b69fa8
--- /dev/null
+++ b/include/linux/mbus.h
@@ -0,0 +1,79 @@
+/*
+ * Marvell MBUS common definitions.
+ *
+ * Copyright (C) 2008 Marvell Semiconductor
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __LINUX_MBUS_H
+#define __LINUX_MBUS_H
+
+struct resource;
+
+struct mbus_dram_target_info
+{
+ /*
+ * The 4-bit MBUS target ID of the DRAM controller.
+ */
+ u8 mbus_dram_target_id;
+
+ /*
+ * The base address, size, and MBUS attribute ID for each
+ * of the possible DRAM chip selects. Peripherals are
+ * required to support at least 4 decode windows.
+ */
+ int num_cs;
+ struct mbus_dram_window {
+ u8 cs_index;
+ u8 mbus_attr;
+ u32 base;
+ u32 size;
+ } cs[4];
+};
+
+/* Flags for PCI/PCIe address decoding regions */
+#define MVEBU_MBUS_PCI_IO 0x1
+#define MVEBU_MBUS_PCI_MEM 0x2
+#define MVEBU_MBUS_PCI_WA 0x3
+
+/*
+ * Magic value that explicits that we don't need a remapping-capable
+ * address decoding window.
+ */
+#define MVEBU_MBUS_NO_REMAP (0xffffffff)
+
+/* Maximum size of a mbus window name */
+#define MVEBU_MBUS_MAX_WINNAME_SZ 32
+
+/*
+ * The Marvell mbus is to be found only on SOCs from the Orion family
+ * at the moment. Provide a dummy stub for other architectures.
+ */
+#ifdef CONFIG_PLAT_ORION
+extern const struct mbus_dram_target_info *mv_mbus_dram_info(void);
+#else
+static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void)
+{
+ return NULL;
+}
+#endif
+
+int mvebu_mbus_save_cpu_target(u32 *store_addr);
+void mvebu_mbus_get_pcie_mem_aperture(struct resource *res);
+void mvebu_mbus_get_pcie_io_aperture(struct resource *res);
+int mvebu_mbus_add_window_remap_by_id(unsigned int target,
+ unsigned int attribute,
+ phys_addr_t base, size_t size,
+ phys_addr_t remap);
+int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute,
+ phys_addr_t base, size_t size);
+int mvebu_mbus_del_window(phys_addr_t base, size_t size);
+int mvebu_mbus_init(const char *soc, phys_addr_t mbus_phys_base,
+ size_t mbus_size, phys_addr_t sdram_phys_base,
+ size_t sdram_size);
+int mvebu_mbus_dt_init(bool is_coherent);
+
+#endif /* __LINUX_MBUS_H */
diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
new file mode 100644
index 000000000..433e0c74d
--- /dev/null
+++ b/include/linux/mc146818rtc.h
@@ -0,0 +1,123 @@
+/* mc146818rtc.h - register definitions for the Real-Time-Clock / CMOS RAM
+ * Copyright Torsten Duwe <duwe@informatik.uni-erlangen.de> 1993
+ * derived from Data Sheet, Copyright Motorola 1984 (!).
+ * It was written to be part of the Linux operating system.
+ */
+/* permission is hereby granted to copy, modify and redistribute this code
+ * in terms of the GNU Library General Public License, Version 2 or later,
+ * at your option.
+ */
+
+#ifndef _MC146818RTC_H
+#define _MC146818RTC_H
+
+#include <asm/io.h>
+#include <linux/rtc.h> /* get the user-level API */
+#include <asm/mc146818rtc.h> /* register access macros */
+
+#ifdef __KERNEL__
+#include <linux/spinlock.h> /* spinlock_t */
+extern spinlock_t rtc_lock; /* serialize CMOS RAM access */
+
+/* Some RTCs extend the mc146818 register set to support alarms of more
+ * than 24 hours in the future; or dates that include a century code.
+ * This platform_data structure can pass this information to the driver.
+ *
+ * Also, some platforms need suspend()/resume() hooks to kick in special
+ * handling of wake alarms, e.g. activating ACPI BIOS hooks or setting up
+ * a separate wakeup alarm used by some almost-clone chips.
+ */
+struct cmos_rtc_board_info {
+ void (*wake_on)(struct device *dev);
+ void (*wake_off)(struct device *dev);
+
+ u32 flags;
+#define CMOS_RTC_FLAGS_NOFREQ (1 << 0)
+ int address_space;
+
+ u8 rtc_day_alarm; /* zero, or register index */
+ u8 rtc_mon_alarm; /* zero, or register index */
+ u8 rtc_century; /* zero, or register index */
+};
+#endif
+
+/**********************************************************************
+ * register summary
+ **********************************************************************/
+#define RTC_SECONDS 0
+#define RTC_SECONDS_ALARM 1
+#define RTC_MINUTES 2
+#define RTC_MINUTES_ALARM 3
+#define RTC_HOURS 4
+#define RTC_HOURS_ALARM 5
+/* RTC_*_alarm is always true if 2 MSBs are set */
+# define RTC_ALARM_DONT_CARE 0xC0
+
+#define RTC_DAY_OF_WEEK 6
+#define RTC_DAY_OF_MONTH 7
+#define RTC_MONTH 8
+#define RTC_YEAR 9
+
+/* control registers - Moto names
+ */
+#define RTC_REG_A 10
+#define RTC_REG_B 11
+#define RTC_REG_C 12
+#define RTC_REG_D 13
+
+/**********************************************************************
+ * register details
+ **********************************************************************/
+#define RTC_FREQ_SELECT RTC_REG_A
+
+/* update-in-progress - set to "1" 244 microsecs before RTC goes off the bus,
+ * reset after update (may take 1.984ms @ 32768Hz RefClock) is complete,
+ * totalling to a max high interval of 2.228 ms.
+ */
+# define RTC_UIP 0x80
+# define RTC_DIV_CTL 0x70
+ /* divider control: refclock values 4.194 / 1.049 MHz / 32.768 kHz */
+# define RTC_REF_CLCK_4MHZ 0x00
+# define RTC_REF_CLCK_1MHZ 0x10
+# define RTC_REF_CLCK_32KHZ 0x20
+ /* 2 values for divider stage reset, others for "testing purposes only" */
+# define RTC_DIV_RESET1 0x60
+# define RTC_DIV_RESET2 0x70
+ /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */
+# define RTC_RATE_SELECT 0x0F
+
+/**********************************************************************/
+#define RTC_CONTROL RTC_REG_B
+# define RTC_SET 0x80 /* disable updates for clock setting */
+# define RTC_PIE 0x40 /* periodic interrupt enable */
+# define RTC_AIE 0x20 /* alarm interrupt enable */
+# define RTC_UIE 0x10 /* update-finished interrupt enable */
+# define RTC_SQWE 0x08 /* enable square-wave output */
+# define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
+# define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
+# define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
+
+/**********************************************************************/
+#define RTC_INTR_FLAGS RTC_REG_C
+/* caution - cleared by read */
+# define RTC_IRQF 0x80 /* any of the following 3 is active */
+# define RTC_PF 0x40
+# define RTC_AF 0x20
+# define RTC_UF 0x10
+
+/**********************************************************************/
+#define RTC_VALID RTC_REG_D
+# define RTC_VRT 0x80 /* valid RAM and time */
+/**********************************************************************/
+
+#ifndef ARCH_RTC_LOCATION /* Override by <asm/mc146818rtc.h>? */
+
+#define RTC_IO_EXTENT 0x8
+#define RTC_IO_EXTENT_USED 0x2
+#define RTC_IOMAPPED 1 /* Default to I/O mapping. */
+
+#else
+#define RTC_IO_EXTENT_USED RTC_IO_EXTENT
+#endif /* ARCH_RTC_LOCATION */
+
+#endif /* _MC146818RTC_H */
diff --git a/include/linux/mc6821.h b/include/linux/mc6821.h
new file mode 100644
index 000000000..28e301e29
--- /dev/null
+++ b/include/linux/mc6821.h
@@ -0,0 +1,51 @@
+#ifndef _MC6821_H_
+#define _MC6821_H_
+
+/*
+ * This file describes the memery mapping of the MC6821 PIA.
+ * The unions describe overlayed registers. Which of them is used is
+ * determined by bit 2 of the corresponding control register.
+ * this files expects the PIA_REG_PADWIDTH to be defined the numeric
+ * value of the register spacing.
+ *
+ * Data came from MFC-31-Developer Kit (from Ralph Seidel,
+ * zodiac@darkness.gun.de) and Motorola Data Sheet (from
+ * Richard Hirst, srh@gpt.co.uk)
+ *
+ * 6.11.95 copyright Joerg Dorchain (dorchain@mpi-sb.mpg.de)
+ *
+ */
+
+#ifndef PIA_REG_PADWIDTH
+#define PIA_REG_PADWIDTH 255
+#endif
+
+struct pia {
+ union {
+ volatile u_char pra;
+ volatile u_char ddra;
+ } ua;
+ u_char pad1[PIA_REG_PADWIDTH];
+ volatile u_char cra;
+ u_char pad2[PIA_REG_PADWIDTH];
+ union {
+ volatile u_char prb;
+ volatile u_char ddrb;
+ } ub;
+ u_char pad3[PIA_REG_PADWIDTH];
+ volatile u_char crb;
+ u_char pad4[PIA_REG_PADWIDTH];
+};
+
+#define ppra ua.pra
+#define pddra ua.ddra
+#define pprb ub.prb
+#define pddrb ub.ddrb
+
+#define PIA_C1_ENABLE_IRQ (1<<0)
+#define PIA_C1_LOW_TO_HIGH (1<<1)
+#define PIA_DDR (1<<2)
+#define PIA_IRQ2 (1<<6)
+#define PIA_IRQ1 (1<<7)
+
+#endif
diff --git a/include/linux/mcb.h b/include/linux/mcb.h
new file mode 100644
index 000000000..ed06e15a3
--- /dev/null
+++ b/include/linux/mcb.h
@@ -0,0 +1,123 @@
+/*
+ * MEN Chameleon Bus.
+ *
+ * Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de)
+ * Author: Johannes Thumshirn <johannes.thumshirn@men.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; version 2 of the License.
+ */
+#ifndef _LINUX_MCB_H
+#define _LINUX_MCB_H
+
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/irqreturn.h>
+
+struct mcb_driver;
+struct mcb_device;
+
+/**
+ * struct mcb_bus - MEN Chameleon Bus
+ *
+ * @dev: pointer to carrier device
+ * @children: the child busses
+ * @bus_nr: mcb bus number
+ * @get_irq: callback to get IRQ number
+ */
+struct mcb_bus {
+ struct list_head children;
+ struct device dev;
+ struct device *carrier;
+ int bus_nr;
+ int (*get_irq)(struct mcb_device *dev);
+};
+#define to_mcb_bus(b) container_of((b), struct mcb_bus, dev)
+
+/**
+ * struct mcb_device - MEN Chameleon Bus device
+ *
+ * @bus_list: internal list handling for bus code
+ * @dev: device in kernel representation
+ * @bus: mcb bus the device is plugged to
+ * @subordinate: subordinate MCBus in case of bridge
+ * @is_added: flag to check if device is added to bus
+ * @driver: associated mcb_driver
+ * @id: mcb device id
+ * @inst: instance in Chameleon table
+ * @group: group in Chameleon table
+ * @var: variant in Chameleon table
+ * @bar: BAR in Chameleon table
+ * @rev: revision in Chameleon table
+ * @irq: IRQ resource
+ * @memory: memory resource
+ */
+struct mcb_device {
+ struct list_head bus_list;
+ struct device dev;
+ struct mcb_bus *bus;
+ struct mcb_bus *subordinate;
+ bool is_added;
+ struct mcb_driver *driver;
+ u16 id;
+ int inst;
+ int group;
+ int var;
+ int bar;
+ int rev;
+ struct resource irq;
+ struct resource mem;
+};
+#define to_mcb_device(x) container_of((x), struct mcb_device, dev)
+
+/**
+ * struct mcb_driver - MEN Chameleon Bus device driver
+ *
+ * @driver: device_driver
+ * @id_table: mcb id table
+ * @probe: probe callback
+ * @remove: remove callback
+ * @shutdown: shutdown callback
+ */
+struct mcb_driver {
+ struct device_driver driver;
+ const struct mcb_device_id *id_table;
+ int (*probe)(struct mcb_device *mdev, const struct mcb_device_id *id);
+ void (*remove)(struct mcb_device *mdev);
+ void (*shutdown)(struct mcb_device *mdev);
+};
+#define to_mcb_driver(x) container_of((x), struct mcb_driver, driver)
+
+static inline void *mcb_get_drvdata(struct mcb_device *dev)
+{
+ return dev_get_drvdata(&dev->dev);
+}
+
+static inline void mcb_set_drvdata(struct mcb_device *dev, void *data)
+{
+ dev_set_drvdata(&dev->dev, data);
+}
+
+extern int __must_check __mcb_register_driver(struct mcb_driver *drv,
+ struct module *owner,
+ const char *mod_name);
+#define mcb_register_driver(driver) \
+ __mcb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
+extern void mcb_unregister_driver(struct mcb_driver *driver);
+#define module_mcb_driver(__mcb_driver) \
+ module_driver(__mcb_driver, mcb_register_driver, mcb_unregister_driver);
+extern void mcb_bus_add_devices(const struct mcb_bus *bus);
+extern int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev);
+extern struct mcb_bus *mcb_alloc_bus(struct device *carrier);
+extern struct mcb_bus *mcb_bus_get(struct mcb_bus *bus);
+extern void mcb_bus_put(struct mcb_bus *bus);
+extern struct mcb_device *mcb_alloc_dev(struct mcb_bus *bus);
+extern void mcb_free_dev(struct mcb_device *dev);
+extern void mcb_release_bus(struct mcb_bus *bus);
+extern struct resource *mcb_request_mem(struct mcb_device *dev,
+ const char *name);
+extern void mcb_release_mem(struct resource *mem);
+extern int mcb_get_irq(struct mcb_device *dev);
+
+#endif /* _LINUX_MCB_H */
diff --git a/include/linux/mdio-bitbang.h b/include/linux/mdio-bitbang.h
new file mode 100644
index 000000000..76f52bbbb
--- /dev/null
+++ b/include/linux/mdio-bitbang.h
@@ -0,0 +1,45 @@
+#ifndef __LINUX_MDIO_BITBANG_H
+#define __LINUX_MDIO_BITBANG_H
+
+#include <linux/phy.h>
+
+struct module;
+
+struct mdiobb_ctrl;
+
+struct mdiobb_ops {
+ struct module *owner;
+
+ /* Set the Management Data Clock high if level is one,
+ * low if level is zero.
+ */
+ void (*set_mdc)(struct mdiobb_ctrl *ctrl, int level);
+
+ /* Configure the Management Data I/O pin as an input if
+ * "output" is zero, or an output if "output" is one.
+ */
+ void (*set_mdio_dir)(struct mdiobb_ctrl *ctrl, int output);
+
+ /* Set the Management Data I/O pin high if value is one,
+ * low if "value" is zero. This may only be called
+ * when the MDIO pin is configured as an output.
+ */
+ void (*set_mdio_data)(struct mdiobb_ctrl *ctrl, int value);
+
+ /* Retrieve the state Management Data I/O pin. */
+ int (*get_mdio_data)(struct mdiobb_ctrl *ctrl);
+};
+
+struct mdiobb_ctrl {
+ const struct mdiobb_ops *ops;
+ /* reset callback */
+ int (*reset)(struct mii_bus *bus);
+};
+
+/* The returned bus is not yet registered with the phy layer. */
+struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl);
+
+/* The bus must already have been unregistered. */
+void free_mdio_bitbang(struct mii_bus *bus);
+
+#endif
diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h
new file mode 100644
index 000000000..66c30a763
--- /dev/null
+++ b/include/linux/mdio-gpio.h
@@ -0,0 +1,32 @@
+/*
+ * MDIO-GPIO bus platform data structures
+ *
+ * Copyright (C) 2008, Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __LINUX_MDIO_GPIO_H
+#define __LINUX_MDIO_GPIO_H
+
+#include <linux/mdio-bitbang.h>
+
+struct mdio_gpio_platform_data {
+ /* GPIO numbers for bus pins */
+ unsigned int mdc;
+ unsigned int mdio;
+ unsigned int mdo;
+
+ bool mdc_active_low;
+ bool mdio_active_low;
+ bool mdo_active_low;
+
+ unsigned int phy_mask;
+ int irqs[PHY_MAX_ADDR];
+ /* reset callback */
+ int (*reset)(struct mii_bus *bus);
+};
+
+#endif /* __LINUX_MDIO_GPIO_H */
diff --git a/include/linux/mdio-mux.h b/include/linux/mdio-mux.h
new file mode 100644
index 000000000..a243dbba8
--- /dev/null
+++ b/include/linux/mdio-mux.h
@@ -0,0 +1,21 @@
+/*
+ * MDIO bus multiplexer framwork.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011, 2012 Cavium, Inc.
+ */
+#ifndef __LINUX_MDIO_MUX_H
+#define __LINUX_MDIO_MUX_H
+#include <linux/device.h>
+
+int mdio_mux_init(struct device *dev,
+ int (*switch_fn) (int cur, int desired, void *data),
+ void **mux_handle,
+ void *data);
+
+void mdio_mux_uninit(void *mux_handle);
+
+#endif /* __LINUX_MDIO_MUX_H */
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
new file mode 100644
index 000000000..b42963bc8
--- /dev/null
+++ b/include/linux/mdio.h
@@ -0,0 +1,176 @@
+/*
+ * linux/mdio.h: definitions for MDIO (clause 45) transceivers
+ * Copyright 2006-2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#ifndef __LINUX_MDIO_H__
+#define __LINUX_MDIO_H__
+
+#include <uapi/linux/mdio.h>
+
+
+static inline bool mdio_phy_id_is_c45(int phy_id)
+{
+ return (phy_id & MDIO_PHY_ID_C45) && !(phy_id & ~MDIO_PHY_ID_C45_MASK);
+}
+
+static inline __u16 mdio_phy_id_prtad(int phy_id)
+{
+ return (phy_id & MDIO_PHY_ID_PRTAD) >> 5;
+}
+
+static inline __u16 mdio_phy_id_devad(int phy_id)
+{
+ return phy_id & MDIO_PHY_ID_DEVAD;
+}
+
+/**
+ * struct mdio_if_info - Ethernet controller MDIO interface
+ * @prtad: PRTAD of the PHY (%MDIO_PRTAD_NONE if not present/unknown)
+ * @mmds: Mask of MMDs expected to be present in the PHY. This must be
+ * non-zero unless @prtad = %MDIO_PRTAD_NONE.
+ * @mode_support: MDIO modes supported. If %MDIO_SUPPORTS_C22 is set then
+ * MII register access will be passed through with @devad =
+ * %MDIO_DEVAD_NONE. If %MDIO_EMULATE_C22 is set then access to
+ * commonly used clause 22 registers will be translated into
+ * clause 45 registers.
+ * @dev: Net device structure
+ * @mdio_read: Register read function; returns value or negative error code
+ * @mdio_write: Register write function; returns 0 or negative error code
+ */
+struct mdio_if_info {
+ int prtad;
+ u32 mmds;
+ unsigned mode_support;
+
+ struct net_device *dev;
+ int (*mdio_read)(struct net_device *dev, int prtad, int devad,
+ u16 addr);
+ int (*mdio_write)(struct net_device *dev, int prtad, int devad,
+ u16 addr, u16 val);
+};
+
+#define MDIO_PRTAD_NONE (-1)
+#define MDIO_DEVAD_NONE (-1)
+#define MDIO_SUPPORTS_C22 1
+#define MDIO_SUPPORTS_C45 2
+#define MDIO_EMULATE_C22 4
+
+struct ethtool_cmd;
+struct ethtool_pauseparam;
+extern int mdio45_probe(struct mdio_if_info *mdio, int prtad);
+extern int mdio_set_flag(const struct mdio_if_info *mdio,
+ int prtad, int devad, u16 addr, int mask,
+ bool sense);
+extern int mdio45_links_ok(const struct mdio_if_info *mdio, u32 mmds);
+extern int mdio45_nway_restart(const struct mdio_if_info *mdio);
+extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
+ struct ethtool_cmd *ecmd,
+ u32 npage_adv, u32 npage_lpa);
+
+/**
+ * mdio45_ethtool_gset - get settings for ETHTOOL_GSET
+ * @mdio: MDIO interface
+ * @ecmd: Ethtool request structure
+ *
+ * Since the CSRs for auto-negotiation using next pages are not fully
+ * standardised, this function does not attempt to decode them. Use
+ * mdio45_ethtool_gset_npage() to specify advertisement bits from next
+ * pages.
+ */
+static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio,
+ struct ethtool_cmd *ecmd)
+{
+ mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0);
+}
+
+extern int mdio_mii_ioctl(const struct mdio_if_info *mdio,
+ struct mii_ioctl_data *mii_data, int cmd);
+
+/**
+ * mmd_eee_cap_to_ethtool_sup_t
+ * @eee_cap: value of the MMD EEE Capability register
+ *
+ * A small helper function that translates MMD EEE Capability (3.20) bits
+ * to ethtool supported settings.
+ */
+static inline u32 mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap)
+{
+ u32 supported = 0;
+
+ if (eee_cap & MDIO_EEE_100TX)
+ supported |= SUPPORTED_100baseT_Full;
+ if (eee_cap & MDIO_EEE_1000T)
+ supported |= SUPPORTED_1000baseT_Full;
+ if (eee_cap & MDIO_EEE_10GT)
+ supported |= SUPPORTED_10000baseT_Full;
+ if (eee_cap & MDIO_EEE_1000KX)
+ supported |= SUPPORTED_1000baseKX_Full;
+ if (eee_cap & MDIO_EEE_10GKX4)
+ supported |= SUPPORTED_10000baseKX4_Full;
+ if (eee_cap & MDIO_EEE_10GKR)
+ supported |= SUPPORTED_10000baseKR_Full;
+
+ return supported;
+}
+
+/**
+ * mmd_eee_adv_to_ethtool_adv_t
+ * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
+ *
+ * A small helper function that translates the MMD EEE Advertisment (7.60)
+ * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
+ * settings.
+ */
+static inline u32 mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv)
+{
+ u32 adv = 0;
+
+ if (eee_adv & MDIO_EEE_100TX)
+ adv |= ADVERTISED_100baseT_Full;
+ if (eee_adv & MDIO_EEE_1000T)
+ adv |= ADVERTISED_1000baseT_Full;
+ if (eee_adv & MDIO_EEE_10GT)
+ adv |= ADVERTISED_10000baseT_Full;
+ if (eee_adv & MDIO_EEE_1000KX)
+ adv |= ADVERTISED_1000baseKX_Full;
+ if (eee_adv & MDIO_EEE_10GKX4)
+ adv |= ADVERTISED_10000baseKX4_Full;
+ if (eee_adv & MDIO_EEE_10GKR)
+ adv |= ADVERTISED_10000baseKR_Full;
+
+ return adv;
+}
+
+/**
+ * ethtool_adv_to_mmd_eee_adv_t
+ * @adv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement settings
+ * to EEE advertisements for the MMD EEE Advertisement (7.60) and
+ * MMD EEE Link Partner Ability (7.61) registers.
+ */
+static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv)
+{
+ u16 reg = 0;
+
+ if (adv & ADVERTISED_100baseT_Full)
+ reg |= MDIO_EEE_100TX;
+ if (adv & ADVERTISED_1000baseT_Full)
+ reg |= MDIO_EEE_1000T;
+ if (adv & ADVERTISED_10000baseT_Full)
+ reg |= MDIO_EEE_10GT;
+ if (adv & ADVERTISED_1000baseKX_Full)
+ reg |= MDIO_EEE_1000KX;
+ if (adv & ADVERTISED_10000baseKX4_Full)
+ reg |= MDIO_EEE_10GKX4;
+ if (adv & ADVERTISED_10000baseKR_Full)
+ reg |= MDIO_EEE_10GKR;
+
+ return reg;
+}
+
+#endif /* __LINUX_MDIO_H__ */
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
new file mode 100644
index 000000000..0819d36a3
--- /dev/null
+++ b/include/linux/mei_cl_bus.h
@@ -0,0 +1,45 @@
+#ifndef _LINUX_MEI_CL_BUS_H
+#define _LINUX_MEI_CL_BUS_H
+
+#include <linux/device.h>
+#include <linux/uuid.h>
+#include <linux/mod_devicetable.h>
+
+struct mei_cl_device;
+
+struct mei_cl_driver {
+ struct device_driver driver;
+ const char *name;
+
+ const struct mei_cl_device_id *id_table;
+
+ int (*probe)(struct mei_cl_device *dev,
+ const struct mei_cl_device_id *id);
+ int (*remove)(struct mei_cl_device *dev);
+};
+
+int __mei_cl_driver_register(struct mei_cl_driver *driver,
+ struct module *owner);
+#define mei_cl_driver_register(driver) \
+ __mei_cl_driver_register(driver, THIS_MODULE)
+
+void mei_cl_driver_unregister(struct mei_cl_driver *driver);
+
+ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length);
+ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length);
+
+typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
+ u32 events, void *context);
+int mei_cl_register_event_cb(struct mei_cl_device *device,
+ mei_cl_event_cb_t read_cb, void *context);
+
+#define MEI_CL_EVENT_RX 0
+#define MEI_CL_EVENT_TX 1
+
+void *mei_cl_get_drvdata(const struct mei_cl_device *device);
+void mei_cl_set_drvdata(struct mei_cl_device *device, void *data);
+
+int mei_cl_enable_device(struct mei_cl_device *device);
+int mei_cl_disable_device(struct mei_cl_device *device);
+
+#endif /* _LINUX_MEI_CL_BUS_H */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
new file mode 100644
index 000000000..9497ec7c7
--- /dev/null
+++ b/include/linux/memblock.h
@@ -0,0 +1,386 @@
+#ifndef _LINUX_MEMBLOCK_H
+#define _LINUX_MEMBLOCK_H
+#ifdef __KERNEL__
+
+#ifdef CONFIG_HAVE_MEMBLOCK
+/*
+ * Logical memory blocks.
+ *
+ * Copyright (C) 2001 Peter Bergner, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+
+#define INIT_MEMBLOCK_REGIONS 128
+#define INIT_PHYSMEM_REGIONS 4
+
+/* Definition of memblock flags. */
+#define MEMBLOCK_HOTPLUG 0x1 /* hotpluggable region */
+
+struct memblock_region {
+ phys_addr_t base;
+ phys_addr_t size;
+ unsigned long flags;
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+ int nid;
+#endif
+};
+
+struct memblock_type {
+ unsigned long cnt; /* number of regions */
+ unsigned long max; /* size of the allocated array */
+ phys_addr_t total_size; /* size of all regions */
+ struct memblock_region *regions;
+};
+
+struct memblock {
+ bool bottom_up; /* is bottom up direction? */
+ phys_addr_t current_limit;
+ struct memblock_type memory;
+ struct memblock_type reserved;
+#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
+ struct memblock_type physmem;
+#endif
+};
+
+extern struct memblock memblock;
+extern int memblock_debug;
+#ifdef CONFIG_MOVABLE_NODE
+/* If movable_node boot option specified */
+extern bool movable_node_enabled;
+#endif /* CONFIG_MOVABLE_NODE */
+
+#define memblock_dbg(fmt, ...) \
+ if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+
+phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
+ phys_addr_t start, phys_addr_t end,
+ int nid);
+phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
+ phys_addr_t size, phys_addr_t align);
+phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
+phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr);
+void memblock_allow_resize(void);
+int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
+int memblock_add(phys_addr_t base, phys_addr_t size);
+int memblock_remove(phys_addr_t base, phys_addr_t size);
+int memblock_free(phys_addr_t base, phys_addr_t size);
+int memblock_reserve(phys_addr_t base, phys_addr_t size);
+void memblock_trim_memory(phys_addr_t align);
+int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
+int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
+
+/* Low level functions */
+int memblock_add_range(struct memblock_type *type,
+ phys_addr_t base, phys_addr_t size,
+ int nid, unsigned long flags);
+
+int memblock_remove_range(struct memblock_type *type,
+ phys_addr_t base,
+ phys_addr_t size);
+
+void __next_mem_range(u64 *idx, int nid, struct memblock_type *type_a,
+ struct memblock_type *type_b, phys_addr_t *out_start,
+ phys_addr_t *out_end, int *out_nid);
+
+void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a,
+ struct memblock_type *type_b, phys_addr_t *out_start,
+ phys_addr_t *out_end, int *out_nid);
+
+/**
+ * for_each_mem_range - iterate through memblock areas from type_a and not
+ * included in type_b. Or just type_a if type_b is NULL.
+ * @i: u64 used as loop variable
+ * @type_a: ptr to memblock_type to iterate
+ * @type_b: ptr to memblock_type which excludes from the iteration
+ * @nid: node selector, %NUMA_NO_NODE for all nodes
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ */
+#define for_each_mem_range(i, type_a, type_b, nid, \
+ p_start, p_end, p_nid) \
+ for (i = 0, __next_mem_range(&i, nid, type_a, type_b, \
+ p_start, p_end, p_nid); \
+ i != (u64)ULLONG_MAX; \
+ __next_mem_range(&i, nid, type_a, type_b, \
+ p_start, p_end, p_nid))
+
+/**
+ * for_each_mem_range_rev - reverse iterate through memblock areas from
+ * type_a and not included in type_b. Or just type_a if type_b is NULL.
+ * @i: u64 used as loop variable
+ * @type_a: ptr to memblock_type to iterate
+ * @type_b: ptr to memblock_type which excludes from the iteration
+ * @nid: node selector, %NUMA_NO_NODE for all nodes
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ */
+#define for_each_mem_range_rev(i, type_a, type_b, nid, \
+ p_start, p_end, p_nid) \
+ for (i = (u64)ULLONG_MAX, \
+ __next_mem_range_rev(&i, nid, type_a, type_b, \
+ p_start, p_end, p_nid); \
+ i != (u64)ULLONG_MAX; \
+ __next_mem_range_rev(&i, nid, type_a, type_b, \
+ p_start, p_end, p_nid))
+
+#ifdef CONFIG_MOVABLE_NODE
+static inline bool memblock_is_hotpluggable(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_HOTPLUG;
+}
+
+static inline bool movable_node_is_enabled(void)
+{
+ return movable_node_enabled;
+}
+#else
+static inline bool memblock_is_hotpluggable(struct memblock_region *m)
+{
+ return false;
+}
+static inline bool movable_node_is_enabled(void)
+{
+ return false;
+}
+#endif
+
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
+ unsigned long *end_pfn);
+void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
+ unsigned long *out_end_pfn, int *out_nid);
+
+/**
+ * for_each_mem_pfn_range - early memory pfn range iterator
+ * @i: an integer used as loop variable
+ * @nid: node selector, %MAX_NUMNODES for all nodes
+ * @p_start: ptr to ulong for start pfn of the range, can be %NULL
+ * @p_end: ptr to ulong for end pfn of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ *
+ * Walks over configured memory ranges.
+ */
+#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
+ for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
+ i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
+/**
+ * for_each_free_mem_range - iterate through free memblock areas
+ * @i: u64 used as loop variable
+ * @nid: node selector, %NUMA_NO_NODE for all nodes
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ *
+ * Walks over free (memory && !reserved) areas of memblock. Available as
+ * soon as memblock is initialized.
+ */
+#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \
+ for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
+ nid, p_start, p_end, p_nid)
+
+/**
+ * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
+ * @i: u64 used as loop variable
+ * @nid: node selector, %NUMA_NO_NODE for all nodes
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ * @p_nid: ptr to int for nid of the range, can be %NULL
+ *
+ * Walks over free (memory && !reserved) areas of memblock in reverse
+ * order. Available as soon as memblock is initialized.
+ */
+#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \
+ for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
+ nid, p_start, p_end, p_nid)
+
+static inline void memblock_set_region_flags(struct memblock_region *r,
+ unsigned long flags)
+{
+ r->flags |= flags;
+}
+
+static inline void memblock_clear_region_flags(struct memblock_region *r,
+ unsigned long flags)
+{
+ r->flags &= ~flags;
+}
+
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+int memblock_set_node(phys_addr_t base, phys_addr_t size,
+ struct memblock_type *type, int nid);
+
+static inline void memblock_set_region_node(struct memblock_region *r, int nid)
+{
+ r->nid = nid;
+}
+
+static inline int memblock_get_region_node(const struct memblock_region *r)
+{
+ return r->nid;
+}
+#else
+static inline void memblock_set_region_node(struct memblock_region *r, int nid)
+{
+}
+
+static inline int memblock_get_region_node(const struct memblock_region *r)
+{
+ return 0;
+}
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
+phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
+phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
+
+phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
+
+#ifdef CONFIG_MOVABLE_NODE
+/*
+ * Set the allocation direction to bottom-up or top-down.
+ */
+static inline void __init memblock_set_bottom_up(bool enable)
+{
+ memblock.bottom_up = enable;
+}
+
+/*
+ * Check if the allocation direction is bottom-up or not.
+ * if this is true, that said, memblock will allocate memory
+ * in bottom-up direction.
+ */
+static inline bool memblock_bottom_up(void)
+{
+ return memblock.bottom_up;
+}
+#else
+static inline void __init memblock_set_bottom_up(bool enable) {}
+static inline bool memblock_bottom_up(void) { return false; }
+#endif
+
+/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
+#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
+#define MEMBLOCK_ALLOC_ACCESSIBLE 0
+
+phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
+ phys_addr_t start, phys_addr_t end);
+phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
+ phys_addr_t max_addr);
+phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
+ phys_addr_t max_addr);
+phys_addr_t memblock_phys_mem_size(void);
+phys_addr_t memblock_mem_size(unsigned long limit_pfn);
+phys_addr_t memblock_start_of_DRAM(void);
+phys_addr_t memblock_end_of_DRAM(void);
+void memblock_enforce_memory_limit(phys_addr_t memory_limit);
+int memblock_is_memory(phys_addr_t addr);
+int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
+int memblock_is_reserved(phys_addr_t addr);
+int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
+
+extern void __memblock_dump_all(void);
+
+static inline void memblock_dump_all(void)
+{
+ if (memblock_debug)
+ __memblock_dump_all();
+}
+
+/**
+ * memblock_set_current_limit - Set the current allocation limit to allow
+ * limiting allocations to what is currently
+ * accessible during boot
+ * @limit: New limit value (physical address)
+ */
+void memblock_set_current_limit(phys_addr_t limit);
+
+
+phys_addr_t memblock_get_current_limit(void);
+
+/*
+ * pfn conversion functions
+ *
+ * While the memory MEMBLOCKs should always be page aligned, the reserved
+ * MEMBLOCKs may not be. This accessor attempt to provide a very clear
+ * idea of what they return for such non aligned MEMBLOCKs.
+ */
+
+/**
+ * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region
+ * @reg: memblock_region structure
+ */
+static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
+{
+ return PFN_UP(reg->base);
+}
+
+/**
+ * memblock_region_memory_end_pfn - Return the end_pfn this region
+ * @reg: memblock_region structure
+ */
+static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
+{
+ return PFN_DOWN(reg->base + reg->size);
+}
+
+/**
+ * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region
+ * @reg: memblock_region structure
+ */
+static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
+{
+ return PFN_DOWN(reg->base);
+}
+
+/**
+ * memblock_region_reserved_end_pfn - Return the end_pfn this region
+ * @reg: memblock_region structure
+ */
+static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
+{
+ return PFN_UP(reg->base + reg->size);
+}
+
+#define for_each_memblock(memblock_type, region) \
+ for (region = memblock.memblock_type.regions; \
+ region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
+ region++)
+
+
+#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+#define __init_memblock __meminit
+#define __initdata_memblock __meminitdata
+#else
+#define __init_memblock
+#define __initdata_memblock
+#endif
+
+#ifdef CONFIG_MEMTEST
+extern void early_memtest(phys_addr_t start, phys_addr_t end);
+#else
+static inline void early_memtest(phys_addr_t start, phys_addr_t end)
+{
+}
+#endif
+
+#else
+static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
+{
+ return 0;
+}
+
+#endif /* CONFIG_HAVE_MEMBLOCK */
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_MEMBLOCK_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
new file mode 100644
index 000000000..6c8918114
--- /dev/null
+++ b/include/linux/memcontrol.h
@@ -0,0 +1,609 @@
+/* memcontrol.h - Memory Controller
+ *
+ * Copyright IBM Corporation, 2007
+ * Author Balbir Singh <balbir@linux.vnet.ibm.com>
+ *
+ * Copyright 2007 OpenVZ SWsoft Inc
+ * Author: Pavel Emelianov <xemul@openvz.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_MEMCONTROL_H
+#define _LINUX_MEMCONTROL_H
+#include <linux/cgroup.h>
+#include <linux/vm_event_item.h>
+#include <linux/hardirq.h>
+#include <linux/jump_label.h>
+
+struct mem_cgroup;
+struct page;
+struct mm_struct;
+struct kmem_cache;
+
+/*
+ * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
+ * These two lists should keep in accord with each other.
+ */
+enum mem_cgroup_stat_index {
+ /*
+ * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
+ */
+ MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
+ MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
+ MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
+ MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
+ MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
+ MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
+ MEM_CGROUP_STAT_NSTATS,
+};
+
+struct mem_cgroup_reclaim_cookie {
+ struct zone *zone;
+ int priority;
+ unsigned int generation;
+};
+
+enum mem_cgroup_events_index {
+ MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
+ MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
+ MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
+ MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
+ MEM_CGROUP_EVENTS_NSTATS,
+ /* default hierarchy events */
+ MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS,
+ MEMCG_HIGH,
+ MEMCG_MAX,
+ MEMCG_OOM,
+ MEMCG_NR_EVENTS,
+};
+
+#ifdef CONFIG_MEMCG
+void mem_cgroup_events(struct mem_cgroup *memcg,
+ enum mem_cgroup_events_index idx,
+ unsigned int nr);
+
+bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
+
+int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask, struct mem_cgroup **memcgp);
+void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
+ bool lrucare);
+void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg);
+void mem_cgroup_uncharge(struct page *page);
+void mem_cgroup_uncharge_list(struct list_head *page_list);
+
+void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
+ bool lrucare);
+
+struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
+struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
+
+bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
+ struct mem_cgroup *root);
+bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
+
+extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
+extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
+
+extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
+extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
+
+static inline bool mm_match_cgroup(struct mm_struct *mm,
+ struct mem_cgroup *memcg)
+{
+ struct mem_cgroup *task_memcg;
+ bool match = false;
+
+ rcu_read_lock();
+ task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (task_memcg)
+ match = mem_cgroup_is_descendant(task_memcg, memcg);
+ rcu_read_unlock();
+ return match;
+}
+
+extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
+
+struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
+ struct mem_cgroup *,
+ struct mem_cgroup_reclaim_cookie *);
+void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
+
+/*
+ * For memory reclaim.
+ */
+int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
+bool mem_cgroup_lruvec_online(struct lruvec *lruvec);
+int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
+unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
+void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
+extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
+ struct task_struct *p);
+
+static inline void mem_cgroup_oom_enable(void)
+{
+ WARN_ON(current->memcg_oom.may_oom);
+ current->memcg_oom.may_oom = 1;
+}
+
+static inline void mem_cgroup_oom_disable(void)
+{
+ WARN_ON(!current->memcg_oom.may_oom);
+ current->memcg_oom.may_oom = 0;
+}
+
+static inline bool task_in_memcg_oom(struct task_struct *p)
+{
+ return p->memcg_oom.memcg;
+}
+
+bool mem_cgroup_oom_synchronize(bool wait);
+
+#ifdef CONFIG_MEMCG_SWAP
+extern int do_swap_account;
+#endif
+
+static inline bool mem_cgroup_disabled(void)
+{
+ if (memory_cgrp_subsys.disabled)
+ return true;
+ return false;
+}
+
+struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page);
+void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
+ enum mem_cgroup_stat_index idx, int val);
+void mem_cgroup_end_page_stat(struct mem_cgroup *memcg);
+
+static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
+ enum mem_cgroup_stat_index idx)
+{
+ mem_cgroup_update_page_stat(memcg, idx, 1);
+}
+
+static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
+ enum mem_cgroup_stat_index idx)
+{
+ mem_cgroup_update_page_stat(memcg, idx, -1);
+}
+
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned);
+
+void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
+static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
+ enum vm_event_item idx)
+{
+ if (mem_cgroup_disabled())
+ return;
+ __mem_cgroup_count_vm_event(mm, idx);
+}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void mem_cgroup_split_huge_fixup(struct page *head);
+#endif
+
+#else /* CONFIG_MEMCG */
+struct mem_cgroup;
+
+static inline void mem_cgroup_events(struct mem_cgroup *memcg,
+ enum mem_cgroup_events_index idx,
+ unsigned int nr)
+{
+}
+
+static inline bool mem_cgroup_low(struct mem_cgroup *root,
+ struct mem_cgroup *memcg)
+{
+ return false;
+}
+
+static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask,
+ struct mem_cgroup **memcgp)
+{
+ *memcgp = NULL;
+ return 0;
+}
+
+static inline void mem_cgroup_commit_charge(struct page *page,
+ struct mem_cgroup *memcg,
+ bool lrucare)
+{
+}
+
+static inline void mem_cgroup_cancel_charge(struct page *page,
+ struct mem_cgroup *memcg)
+{
+}
+
+static inline void mem_cgroup_uncharge(struct page *page)
+{
+}
+
+static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
+{
+}
+
+static inline void mem_cgroup_migrate(struct page *oldpage,
+ struct page *newpage,
+ bool lrucare)
+{
+}
+
+static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
+ struct mem_cgroup *memcg)
+{
+ return &zone->lruvec;
+}
+
+static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
+ struct zone *zone)
+{
+ return &zone->lruvec;
+}
+
+static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
+{
+ return NULL;
+}
+
+static inline bool mm_match_cgroup(struct mm_struct *mm,
+ struct mem_cgroup *memcg)
+{
+ return true;
+}
+
+static inline bool task_in_mem_cgroup(struct task_struct *task,
+ const struct mem_cgroup *memcg)
+{
+ return true;
+}
+
+static inline struct cgroup_subsys_state
+ *mem_cgroup_css(struct mem_cgroup *memcg)
+{
+ return NULL;
+}
+
+static inline struct mem_cgroup *
+mem_cgroup_iter(struct mem_cgroup *root,
+ struct mem_cgroup *prev,
+ struct mem_cgroup_reclaim_cookie *reclaim)
+{
+ return NULL;
+}
+
+static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
+ struct mem_cgroup *prev)
+{
+}
+
+static inline bool mem_cgroup_disabled(void)
+{
+ return true;
+}
+
+static inline int
+mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
+{
+ return 1;
+}
+
+static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
+{
+ return true;
+}
+
+static inline unsigned long
+mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
+{
+ return 0;
+}
+
+static inline void
+mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
+ int increment)
+{
+}
+
+static inline void
+mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
+{
+}
+
+static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
+{
+ return NULL;
+}
+
+static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
+{
+}
+
+static inline void mem_cgroup_oom_enable(void)
+{
+}
+
+static inline void mem_cgroup_oom_disable(void)
+{
+}
+
+static inline bool task_in_memcg_oom(struct task_struct *p)
+{
+ return false;
+}
+
+static inline bool mem_cgroup_oom_synchronize(bool wait)
+{
+ return false;
+}
+
+static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
+ enum mem_cgroup_stat_index idx)
+{
+}
+
+static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
+ enum mem_cgroup_stat_index idx)
+{
+}
+
+static inline
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+ gfp_t gfp_mask,
+ unsigned long *total_scanned)
+{
+ return 0;
+}
+
+static inline void mem_cgroup_split_huge_fixup(struct page *head)
+{
+}
+
+static inline
+void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
+{
+}
+#endif /* CONFIG_MEMCG */
+
+enum {
+ UNDER_LIMIT,
+ SOFT_LIMIT,
+ OVER_LIMIT,
+};
+
+struct sock;
+#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
+void sock_update_memcg(struct sock *sk);
+void sock_release_memcg(struct sock *sk);
+#else
+static inline void sock_update_memcg(struct sock *sk)
+{
+}
+static inline void sock_release_memcg(struct sock *sk)
+{
+}
+#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
+
+#ifdef CONFIG_MEMCG_KMEM
+extern struct static_key memcg_kmem_enabled_key;
+
+extern int memcg_nr_cache_ids;
+extern void memcg_get_cache_ids(void);
+extern void memcg_put_cache_ids(void);
+
+/*
+ * Helper macro to loop through all memcg-specific caches. Callers must still
+ * check if the cache is valid (it is either valid or NULL).
+ * the slab_mutex must be held when looping through those caches
+ */
+#define for_each_memcg_cache_index(_idx) \
+ for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
+
+static inline bool memcg_kmem_enabled(void)
+{
+ return static_key_false(&memcg_kmem_enabled_key);
+}
+
+bool memcg_kmem_is_active(struct mem_cgroup *memcg);
+
+/*
+ * In general, we'll do everything in our power to not incur in any overhead
+ * for non-memcg users for the kmem functions. Not even a function call, if we
+ * can avoid it.
+ *
+ * Therefore, we'll inline all those functions so that in the best case, we'll
+ * see that kmemcg is off for everybody and proceed quickly. If it is on,
+ * we'll still do most of the flag checking inline. We check a lot of
+ * conditions, but because they are pretty simple, they are expected to be
+ * fast.
+ */
+bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
+ int order);
+void __memcg_kmem_commit_charge(struct page *page,
+ struct mem_cgroup *memcg, int order);
+void __memcg_kmem_uncharge_pages(struct page *page, int order);
+
+int memcg_cache_id(struct mem_cgroup *memcg);
+
+struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
+void __memcg_kmem_put_cache(struct kmem_cache *cachep);
+
+struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr);
+
+int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
+ unsigned long nr_pages);
+void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages);
+
+/**
+ * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
+ * @gfp: the gfp allocation flags.
+ * @memcg: a pointer to the memcg this was charged against.
+ * @order: allocation order.
+ *
+ * returns true if the memcg where the current task belongs can hold this
+ * allocation.
+ *
+ * We return true automatically if this allocation is not to be accounted to
+ * any memcg.
+ */
+static inline bool
+memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
+{
+ if (!memcg_kmem_enabled())
+ return true;
+
+ if (gfp & __GFP_NOACCOUNT)
+ return true;
+ /*
+ * __GFP_NOFAIL allocations will move on even if charging is not
+ * possible. Therefore we don't even try, and have this allocation
+ * unaccounted. We could in theory charge it forcibly, but we hope
+ * those allocations are rare, and won't be worth the trouble.
+ */
+ if (gfp & __GFP_NOFAIL)
+ return true;
+ if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
+ return true;
+
+ /* If the test is dying, just let it go. */
+ if (unlikely(fatal_signal_pending(current)))
+ return true;
+
+ return __memcg_kmem_newpage_charge(gfp, memcg, order);
+}
+
+/**
+ * memcg_kmem_uncharge_pages: uncharge pages from memcg
+ * @page: pointer to struct page being freed
+ * @order: allocation order.
+ */
+static inline void
+memcg_kmem_uncharge_pages(struct page *page, int order)
+{
+ if (memcg_kmem_enabled())
+ __memcg_kmem_uncharge_pages(page, order);
+}
+
+/**
+ * memcg_kmem_commit_charge: embeds correct memcg in a page
+ * @page: pointer to struct page recently allocated
+ * @memcg: the memcg structure we charged against
+ * @order: allocation order.
+ *
+ * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
+ * failure of the allocation. if @page is NULL, this function will revert the
+ * charges. Otherwise, it will commit @page to @memcg.
+ */
+static inline void
+memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
+{
+ if (memcg_kmem_enabled() && memcg)
+ __memcg_kmem_commit_charge(page, memcg, order);
+}
+
+/**
+ * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
+ * @cachep: the original global kmem cache
+ * @gfp: allocation flags.
+ *
+ * All memory allocated from a per-memcg cache is charged to the owner memcg.
+ */
+static __always_inline struct kmem_cache *
+memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
+{
+ if (!memcg_kmem_enabled())
+ return cachep;
+ if (gfp & __GFP_NOACCOUNT)
+ return cachep;
+ if (gfp & __GFP_NOFAIL)
+ return cachep;
+ if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
+ return cachep;
+ if (unlikely(fatal_signal_pending(current)))
+ return cachep;
+
+ return __memcg_kmem_get_cache(cachep);
+}
+
+static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
+{
+ if (memcg_kmem_enabled())
+ __memcg_kmem_put_cache(cachep);
+}
+
+static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
+{
+ if (!memcg_kmem_enabled())
+ return NULL;
+ return __mem_cgroup_from_kmem(ptr);
+}
+#else
+#define for_each_memcg_cache_index(_idx) \
+ for (; NULL; )
+
+static inline bool memcg_kmem_enabled(void)
+{
+ return false;
+}
+
+static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
+{
+ return false;
+}
+
+static inline bool
+memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
+{
+ return true;
+}
+
+static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
+{
+}
+
+static inline void
+memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
+{
+}
+
+static inline int memcg_cache_id(struct mem_cgroup *memcg)
+{
+ return -1;
+}
+
+static inline void memcg_get_cache_ids(void)
+{
+}
+
+static inline void memcg_put_cache_ids(void)
+{
+}
+
+static inline struct kmem_cache *
+memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
+{
+ return cachep;
+}
+
+static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
+{
+}
+
+static inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
+{
+ return NULL;
+}
+#endif /* CONFIG_MEMCG_KMEM */
+#endif /* _LINUX_MEMCONTROL_H */
+
diff --git a/include/linux/memory.h b/include/linux/memory.h
new file mode 100644
index 000000000..8b8d8d123
--- /dev/null
+++ b/include/linux/memory.h
@@ -0,0 +1,156 @@
+/*
+ * include/linux/memory.h - generic memory definition
+ *
+ * This is mainly for topological representation. We define the
+ * basic "struct memory_block" here, which can be embedded in per-arch
+ * definitions or NUMA information.
+ *
+ * Basic handling of the devices is done in drivers/base/memory.c
+ * and system devices are handled in drivers/base/sys.c.
+ *
+ * Memory block are exported via sysfs in the class/memory/devices/
+ * directory.
+ *
+ */
+#ifndef _LINUX_MEMORY_H_
+#define _LINUX_MEMORY_H_
+
+#include <linux/node.h>
+#include <linux/compiler.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+
+#define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS)
+
+struct memory_block {
+ unsigned long start_section_nr;
+ unsigned long end_section_nr;
+ unsigned long state; /* serialized by the dev->lock */
+ int section_count; /* serialized by mem_sysfs_mutex */
+ int online_type; /* for passing data to online routine */
+ int phys_device; /* to which fru does this belong? */
+ void *hw; /* optional pointer to fw/hw data */
+ int (*phys_callback)(struct memory_block *);
+ struct device dev;
+};
+
+int arch_get_memory_phys_device(unsigned long start_pfn);
+unsigned long memory_block_size_bytes(void);
+
+/* These states are exposed to userspace as text strings in sysfs */
+#define MEM_ONLINE (1<<0) /* exposed to userspace */
+#define MEM_GOING_OFFLINE (1<<1) /* exposed to userspace */
+#define MEM_OFFLINE (1<<2) /* exposed to userspace */
+#define MEM_GOING_ONLINE (1<<3)
+#define MEM_CANCEL_ONLINE (1<<4)
+#define MEM_CANCEL_OFFLINE (1<<5)
+
+struct memory_notify {
+ unsigned long start_pfn;
+ unsigned long nr_pages;
+ int status_change_nid_normal;
+ int status_change_nid_high;
+ int status_change_nid;
+};
+
+/*
+ * During pageblock isolation, count the number of pages within the
+ * range [start_pfn, start_pfn + nr_pages) which are owned by code
+ * in the notifier chain.
+ */
+#define MEM_ISOLATE_COUNT (1<<0)
+
+struct memory_isolate_notify {
+ unsigned long start_pfn; /* Start of range to check */
+ unsigned int nr_pages; /* # pages in range to check */
+ unsigned int pages_found; /* # pages owned found by callbacks */
+};
+
+struct notifier_block;
+struct mem_section;
+
+/*
+ * Priorities for the hotplug memory callback routines (stored in decreasing
+ * order in the callback chain)
+ */
+#define SLAB_CALLBACK_PRI 1
+#define IPC_CALLBACK_PRI 10
+
+#ifndef CONFIG_MEMORY_HOTPLUG_SPARSE
+static inline int memory_dev_init(void)
+{
+ return 0;
+}
+static inline int register_memory_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+static inline void unregister_memory_notifier(struct notifier_block *nb)
+{
+}
+static inline int memory_notify(unsigned long val, void *v)
+{
+ return 0;
+}
+static inline int register_memory_isolate_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+static inline void unregister_memory_isolate_notifier(struct notifier_block *nb)
+{
+}
+static inline int memory_isolate_notify(unsigned long val, void *v)
+{
+ return 0;
+}
+#else
+extern int register_memory_notifier(struct notifier_block *nb);
+extern void unregister_memory_notifier(struct notifier_block *nb);
+extern int register_memory_isolate_notifier(struct notifier_block *nb);
+extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
+extern int register_new_memory(int, struct mem_section *);
+#ifdef CONFIG_MEMORY_HOTREMOVE
+extern int unregister_memory_section(struct mem_section *);
+#endif
+extern int memory_dev_init(void);
+extern int memory_notify(unsigned long val, void *v);
+extern int memory_isolate_notify(unsigned long val, void *v);
+extern struct memory_block *find_memory_block_hinted(struct mem_section *,
+ struct memory_block *);
+extern struct memory_block *find_memory_block(struct mem_section *);
+#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
+#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+#define hotplug_memory_notifier(fn, pri) ({ \
+ static __meminitdata struct notifier_block fn##_mem_nb =\
+ { .notifier_call = fn, .priority = pri };\
+ register_memory_notifier(&fn##_mem_nb); \
+})
+#define register_hotmemory_notifier(nb) register_memory_notifier(nb)
+#define unregister_hotmemory_notifier(nb) unregister_memory_notifier(nb)
+#else
+#define hotplug_memory_notifier(fn, pri) ({ 0; })
+/* These aren't inline functions due to a GCC bug. */
+#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; })
+#define unregister_hotmemory_notifier(nb) ({ (void)(nb); })
+#endif
+
+/*
+ * 'struct memory_accessor' is a generic interface to provide
+ * in-kernel access to persistent memory such as i2c or SPI EEPROMs
+ */
+struct memory_accessor {
+ ssize_t (*read)(struct memory_accessor *, char *buf, off_t offset,
+ size_t count);
+ ssize_t (*write)(struct memory_accessor *, const char *buf,
+ off_t offset, size_t count);
+};
+
+/*
+ * Kernel text modification mutex, used for code patching. Users of this lock
+ * can sleep.
+ */
+extern struct mutex text_mutex;
+
+#endif /* _LINUX_MEMORY_H_ */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
new file mode 100644
index 000000000..6ffa0ac7f
--- /dev/null
+++ b/include/linux/memory_hotplug.h
@@ -0,0 +1,279 @@
+#ifndef __LINUX_MEMORY_HOTPLUG_H
+#define __LINUX_MEMORY_HOTPLUG_H
+
+#include <linux/mmzone.h>
+#include <linux/spinlock.h>
+#include <linux/notifier.h>
+#include <linux/bug.h>
+
+struct page;
+struct zone;
+struct pglist_data;
+struct mem_section;
+struct memory_block;
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+
+/*
+ * Types for free bootmem stored in page->lru.next. These have to be in
+ * some random range in unsigned long space for debugging purposes.
+ */
+enum {
+ MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
+ SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE,
+ MIX_SECTION_INFO,
+ NODE_INFO,
+ MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = NODE_INFO,
+};
+
+/* Types for control the zone type of onlined and offlined memory */
+enum {
+ MMOP_OFFLINE = -1,
+ MMOP_ONLINE_KEEP,
+ MMOP_ONLINE_KERNEL,
+ MMOP_ONLINE_MOVABLE,
+};
+
+/*
+ * pgdat resizing functions
+ */
+static inline
+void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
+{
+ spin_lock_irqsave(&pgdat->node_size_lock, *flags);
+}
+static inline
+void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
+{
+ spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
+}
+static inline
+void pgdat_resize_init(struct pglist_data *pgdat)
+{
+ spin_lock_init(&pgdat->node_size_lock);
+}
+/*
+ * Zone resizing functions
+ *
+ * Note: any attempt to resize a zone should has pgdat_resize_lock()
+ * zone_span_writelock() both held. This ensure the size of a zone
+ * can't be changed while pgdat_resize_lock() held.
+ */
+static inline unsigned zone_span_seqbegin(struct zone *zone)
+{
+ return read_seqbegin(&zone->span_seqlock);
+}
+static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
+{
+ return read_seqretry(&zone->span_seqlock, iv);
+}
+static inline void zone_span_writelock(struct zone *zone)
+{
+ write_seqlock(&zone->span_seqlock);
+}
+static inline void zone_span_writeunlock(struct zone *zone)
+{
+ write_sequnlock(&zone->span_seqlock);
+}
+static inline void zone_seqlock_init(struct zone *zone)
+{
+ seqlock_init(&zone->span_seqlock);
+}
+extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
+extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
+extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
+/* VM interface that may be used by firmware interface */
+extern int online_pages(unsigned long, unsigned long, int);
+extern int test_pages_in_a_zone(unsigned long, unsigned long);
+extern void __offline_isolated_pages(unsigned long, unsigned long);
+
+typedef void (*online_page_callback_t)(struct page *page);
+
+extern int set_online_page_callback(online_page_callback_t callback);
+extern int restore_online_page_callback(online_page_callback_t callback);
+
+extern void __online_page_set_limits(struct page *page);
+extern void __online_page_increment_counters(struct page *page);
+extern void __online_page_free(struct page *page);
+
+extern int try_online_node(int nid);
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+extern bool is_pageblock_removable_nolock(struct page *page);
+extern int arch_remove_memory(u64 start, u64 size);
+extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
+ unsigned long nr_pages);
+#endif /* CONFIG_MEMORY_HOTREMOVE */
+
+/* reasonably generic interface to expand the physical pages in a zone */
+extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
+ unsigned long nr_pages);
+
+#ifdef CONFIG_NUMA
+extern int memory_add_physaddr_to_nid(u64 start);
+#else
+static inline int memory_add_physaddr_to_nid(u64 start)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
+/*
+ * For supporting node-hotadd, we have to allocate a new pgdat.
+ *
+ * If an arch has generic style NODE_DATA(),
+ * node_data[nid] = kzalloc() works well. But it depends on the architecture.
+ *
+ * In general, generic_alloc_nodedata() is used.
+ * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
+ *
+ */
+extern pg_data_t *arch_alloc_nodedata(int nid);
+extern void arch_free_nodedata(pg_data_t *pgdat);
+extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
+
+#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
+
+#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
+#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
+
+#ifdef CONFIG_NUMA
+/*
+ * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
+ * XXX: kmalloc_node() can't work well to get new node's memory at this time.
+ * Because, pgdat for the new node is not allocated/initialized yet itself.
+ * To use new node's memory, more consideration will be necessary.
+ */
+#define generic_alloc_nodedata(nid) \
+({ \
+ kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
+})
+/*
+ * This definition is just for error path in node hotadd.
+ * For node hotremove, we have to replace this.
+ */
+#define generic_free_nodedata(pgdat) kfree(pgdat)
+
+extern pg_data_t *node_data[];
+static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+{
+ node_data[nid] = pgdat;
+}
+
+#else /* !CONFIG_NUMA */
+
+/* never called */
+static inline pg_data_t *generic_alloc_nodedata(int nid)
+{
+ BUG();
+ return NULL;
+}
+static inline void generic_free_nodedata(pg_data_t *pgdat)
+{
+}
+static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+{
+}
+#endif /* CONFIG_NUMA */
+#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
+
+#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
+extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
+#else
+static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+{
+}
+#endif
+extern void put_page_bootmem(struct page *page);
+extern void get_page_bootmem(unsigned long ingo, struct page *page,
+ unsigned long type);
+
+void get_online_mems(void);
+void put_online_mems(void);
+
+void mem_hotplug_begin(void);
+void mem_hotplug_done(void);
+
+#else /* ! CONFIG_MEMORY_HOTPLUG */
+/*
+ * Stub functions for when hotplug is off
+ */
+static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
+static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
+static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
+
+static inline unsigned zone_span_seqbegin(struct zone *zone)
+{
+ return 0;
+}
+static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
+{
+ return 0;
+}
+static inline void zone_span_writelock(struct zone *zone) {}
+static inline void zone_span_writeunlock(struct zone *zone) {}
+static inline void zone_seqlock_init(struct zone *zone) {}
+
+static inline int mhp_notimplemented(const char *func)
+{
+ printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
+ dump_stack();
+ return -ENOSYS;
+}
+
+static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+{
+}
+
+static inline int try_online_node(int nid)
+{
+ return 0;
+}
+
+static inline void get_online_mems(void) {}
+static inline void put_online_mems(void) {}
+
+static inline void mem_hotplug_begin(void) {}
+static inline void mem_hotplug_done(void) {}
+
+#endif /* ! CONFIG_MEMORY_HOTPLUG */
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+
+extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
+extern void try_offline_node(int nid);
+extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
+extern void remove_memory(int nid, u64 start, u64 size);
+
+#else
+static inline int is_mem_section_removable(unsigned long pfn,
+ unsigned long nr_pages)
+{
+ return 0;
+}
+
+static inline void try_offline_node(int nid) {}
+
+static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
+{
+ return -EINVAL;
+}
+
+static inline void remove_memory(int nid, u64 start, u64 size) {}
+#endif /* CONFIG_MEMORY_HOTREMOVE */
+
+extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
+ void *arg, int (*func)(struct memory_block *, void *));
+extern int add_memory(int nid, u64 start, u64 size);
+extern int zone_for_memory(int nid, u64 start, u64 size, int zone_default);
+extern int arch_add_memory(int nid, u64 start, u64 size);
+extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
+extern bool is_memblock_offlined(struct memory_block *mem);
+extern void remove_memory(int nid, u64 start, u64 size);
+extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn);
+extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
+extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
+ unsigned long pnum);
+
+#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
new file mode 100644
index 000000000..3d385c81c
--- /dev/null
+++ b/include/linux/mempolicy.h
@@ -0,0 +1,295 @@
+/*
+ * NUMA memory policies for Linux.
+ * Copyright 2003,2004 Andi Kleen SuSE Labs
+ */
+#ifndef _LINUX_MEMPOLICY_H
+#define _LINUX_MEMPOLICY_H 1
+
+
+#include <linux/mmzone.h>
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/nodemask.h>
+#include <linux/pagemap.h>
+#include <uapi/linux/mempolicy.h>
+
+struct mm_struct;
+
+#ifdef CONFIG_NUMA
+
+/*
+ * Describe a memory policy.
+ *
+ * A mempolicy can be either associated with a process or with a VMA.
+ * For VMA related allocations the VMA policy is preferred, otherwise
+ * the process policy is used. Interrupts ignore the memory policy
+ * of the current process.
+ *
+ * Locking policy for interlave:
+ * In process context there is no locking because only the process accesses
+ * its own state. All vma manipulation is somewhat protected by a down_read on
+ * mmap_sem.
+ *
+ * Freeing policy:
+ * Mempolicy objects are reference counted. A mempolicy will be freed when
+ * mpol_put() decrements the reference count to zero.
+ *
+ * Duplicating policy objects:
+ * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
+ * to the new storage. The reference count of the new object is initialized
+ * to 1, representing the caller of mpol_dup().
+ */
+struct mempolicy {
+ atomic_t refcnt;
+ unsigned short mode; /* See MPOL_* above */
+ unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
+ union {
+ short preferred_node; /* preferred */
+ nodemask_t nodes; /* interleave/bind */
+ /* undefined for default */
+ } v;
+ union {
+ nodemask_t cpuset_mems_allowed; /* relative to these nodes */
+ nodemask_t user_nodemask; /* nodemask passed by user */
+ } w;
+};
+
+/*
+ * Support for managing mempolicy data objects (clone, copy, destroy)
+ * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
+ */
+
+extern void __mpol_put(struct mempolicy *pol);
+static inline void mpol_put(struct mempolicy *pol)
+{
+ if (pol)
+ __mpol_put(pol);
+}
+
+/*
+ * Does mempolicy pol need explicit unref after use?
+ * Currently only needed for shared policies.
+ */
+static inline int mpol_needs_cond_ref(struct mempolicy *pol)
+{
+ return (pol && (pol->flags & MPOL_F_SHARED));
+}
+
+static inline void mpol_cond_put(struct mempolicy *pol)
+{
+ if (mpol_needs_cond_ref(pol))
+ __mpol_put(pol);
+}
+
+extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
+static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
+{
+ if (pol)
+ pol = __mpol_dup(pol);
+ return pol;
+}
+
+#define vma_policy(vma) ((vma)->vm_policy)
+
+static inline void mpol_get(struct mempolicy *pol)
+{
+ if (pol)
+ atomic_inc(&pol->refcnt);
+}
+
+extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
+static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
+{
+ if (a == b)
+ return true;
+ return __mpol_equal(a, b);
+}
+
+/*
+ * Tree of shared policies for a shared memory region.
+ * Maintain the policies in a pseudo mm that contains vmas. The vmas
+ * carry the policy. As a special twist the pseudo mm is indexed in pages, not
+ * bytes, so that we can work with shared memory segments bigger than
+ * unsigned long.
+ */
+
+struct sp_node {
+ struct rb_node nd;
+ unsigned long start, end;
+ struct mempolicy *policy;
+};
+
+struct shared_policy {
+ struct rb_root root;
+ spinlock_t lock;
+};
+
+int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
+void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
+int mpol_set_shared_policy(struct shared_policy *info,
+ struct vm_area_struct *vma,
+ struct mempolicy *new);
+void mpol_free_shared_policy(struct shared_policy *p);
+struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
+ unsigned long idx);
+
+struct mempolicy *get_task_policy(struct task_struct *p);
+struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
+ unsigned long addr);
+bool vma_policy_mof(struct vm_area_struct *vma);
+
+extern void numa_default_policy(void);
+extern void numa_policy_init(void);
+extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
+ enum mpol_rebind_step step);
+extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
+
+extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
+ unsigned long addr, gfp_t gfp_flags,
+ struct mempolicy **mpol, nodemask_t **nodemask);
+extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
+extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
+ const nodemask_t *mask);
+extern unsigned int mempolicy_slab_node(void);
+
+extern enum zone_type policy_zone;
+
+static inline void check_highest_zone(enum zone_type k)
+{
+ if (k > policy_zone && k != ZONE_MOVABLE)
+ policy_zone = k;
+}
+
+int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+ const nodemask_t *to, int flags);
+
+
+#ifdef CONFIG_TMPFS
+extern int mpol_parse_str(char *str, struct mempolicy **mpol);
+#endif
+
+extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
+
+/* Check if a vma is migratable */
+static inline int vma_migratable(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP))
+ return 0;
+
+#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+ if (vma->vm_flags & VM_HUGETLB)
+ return 0;
+#endif
+
+ /*
+ * Migration allocates pages in the highest zone. If we cannot
+ * do so then migration (at least from node to node) is not
+ * possible.
+ */
+ if (vma->vm_file &&
+ gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
+ < policy_zone)
+ return 0;
+ return 1;
+}
+
+extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
+
+#else
+
+struct mempolicy {};
+
+static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
+{
+ return true;
+}
+
+static inline void mpol_put(struct mempolicy *p)
+{
+}
+
+static inline void mpol_cond_put(struct mempolicy *pol)
+{
+}
+
+static inline void mpol_get(struct mempolicy *pol)
+{
+}
+
+struct shared_policy {};
+
+static inline void mpol_shared_policy_init(struct shared_policy *sp,
+ struct mempolicy *mpol)
+{
+}
+
+static inline void mpol_free_shared_policy(struct shared_policy *p)
+{
+}
+
+#define vma_policy(vma) NULL
+
+static inline int
+vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
+{
+ return 0;
+}
+
+static inline void numa_policy_init(void)
+{
+}
+
+static inline void numa_default_policy(void)
+{
+}
+
+static inline void mpol_rebind_task(struct task_struct *tsk,
+ const nodemask_t *new,
+ enum mpol_rebind_step step)
+{
+}
+
+static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
+{
+}
+
+static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
+ unsigned long addr, gfp_t gfp_flags,
+ struct mempolicy **mpol, nodemask_t **nodemask)
+{
+ *mpol = NULL;
+ *nodemask = NULL;
+ return node_zonelist(0, gfp_flags);
+}
+
+static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
+{
+ return false;
+}
+
+static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
+ const nodemask_t *to, int flags)
+{
+ return 0;
+}
+
+static inline void check_highest_zone(int k)
+{
+}
+
+#ifdef CONFIG_TMPFS
+static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
+{
+ return 1; /* error */
+}
+#endif
+
+static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
+ unsigned long address)
+{
+ return -1; /* no node preference */
+}
+
+#endif /* CONFIG_NUMA */
+#endif
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
new file mode 100644
index 000000000..69b6951e8
--- /dev/null
+++ b/include/linux/mempool.h
@@ -0,0 +1,75 @@
+/*
+ * memory buffer pool support
+ */
+#ifndef _LINUX_MEMPOOL_H
+#define _LINUX_MEMPOOL_H
+
+#include <linux/wait.h>
+
+struct kmem_cache;
+
+typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
+typedef void (mempool_free_t)(void *element, void *pool_data);
+
+typedef struct mempool_s {
+ spinlock_t lock;
+ int min_nr; /* nr of elements at *elements */
+ int curr_nr; /* Current nr of elements at *elements */
+ void **elements;
+
+ void *pool_data;
+ mempool_alloc_t *alloc;
+ mempool_free_t *free;
+ wait_queue_head_t wait;
+} mempool_t;
+
+extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
+ mempool_free_t *free_fn, void *pool_data);
+extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
+ mempool_free_t *free_fn, void *pool_data,
+ gfp_t gfp_mask, int nid);
+
+extern int mempool_resize(mempool_t *pool, int new_min_nr);
+extern void mempool_destroy(mempool_t *pool);
+extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask);
+extern void mempool_free(void *element, mempool_t *pool);
+
+/*
+ * A mempool_alloc_t and mempool_free_t that get the memory from
+ * a slab cache that is passed in through pool_data.
+ * Note: the slab cache may not have a ctor function.
+ */
+void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
+void mempool_free_slab(void *element, void *pool_data);
+static inline mempool_t *
+mempool_create_slab_pool(int min_nr, struct kmem_cache *kc)
+{
+ return mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab,
+ (void *) kc);
+}
+
+/*
+ * a mempool_alloc_t and a mempool_free_t to kmalloc and kfree the
+ * amount of memory specified by pool_data
+ */
+void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
+void mempool_kfree(void *element, void *pool_data);
+static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size)
+{
+ return mempool_create(min_nr, mempool_kmalloc, mempool_kfree,
+ (void *) size);
+}
+
+/*
+ * A mempool_alloc_t and mempool_free_t for a simple page allocator that
+ * allocates pages of the order specified by pool_data
+ */
+void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
+void mempool_free_pages(void *element, void *pool_data);
+static inline mempool_t *mempool_create_page_pool(int min_nr, int order)
+{
+ return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages,
+ (void *)(long)order);
+}
+
+#endif /* _LINUX_MEMPOOL_H */
diff --git a/include/linux/memstick.h b/include/linux/memstick.h
new file mode 100644
index 000000000..690c35a9d
--- /dev/null
+++ b/include/linux/memstick.h
@@ -0,0 +1,347 @@
+/*
+ * Sony MemoryStick support
+ *
+ * Copyright (C) 2007 Alex Dubov <oakad@yahoo.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _MEMSTICK_H
+#define _MEMSTICK_H
+
+#include <linux/workqueue.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+
+/*** Hardware based structures ***/
+
+struct ms_status_register {
+ unsigned char reserved;
+ unsigned char interrupt;
+#define MEMSTICK_INT_CMDNAK 0x01
+#define MEMSTICK_INT_IOREQ 0x08
+#define MEMSTICK_INT_IOBREQ 0x10
+#define MEMSTICK_INT_BREQ 0x20
+#define MEMSTICK_INT_ERR 0x40
+#define MEMSTICK_INT_CED 0x80
+
+ unsigned char status0;
+#define MEMSTICK_STATUS0_WP 0x01
+#define MEMSTICK_STATUS0_SL 0x02
+#define MEMSTICK_STATUS0_BF 0x10
+#define MEMSTICK_STATUS0_BE 0x20
+#define MEMSTICK_STATUS0_FB0 0x40
+#define MEMSTICK_STATUS0_MB 0x80
+
+ unsigned char status1;
+#define MEMSTICK_STATUS1_UCFG 0x01
+#define MEMSTICK_STATUS1_FGER 0x02
+#define MEMSTICK_STATUS1_UCEX 0x04
+#define MEMSTICK_STATUS1_EXER 0x08
+#define MEMSTICK_STATUS1_UCDT 0x10
+#define MEMSTICK_STATUS1_DTER 0x20
+#define MEMSTICK_STATUS1_FB1 0x40
+#define MEMSTICK_STATUS1_MB 0x80
+} __attribute__((packed));
+
+struct ms_id_register {
+ unsigned char type;
+ unsigned char if_mode;
+ unsigned char category;
+ unsigned char class;
+} __attribute__((packed));
+
+struct ms_param_register {
+ unsigned char system;
+#define MEMSTICK_SYS_PAM 0x08
+#define MEMSTICK_SYS_BAMD 0x80
+
+ unsigned char block_address_msb;
+ unsigned short block_address;
+ unsigned char cp;
+#define MEMSTICK_CP_BLOCK 0x00
+#define MEMSTICK_CP_PAGE 0x20
+#define MEMSTICK_CP_EXTRA 0x40
+#define MEMSTICK_CP_OVERWRITE 0x80
+
+ unsigned char page_address;
+} __attribute__((packed));
+
+struct ms_extra_data_register {
+ unsigned char overwrite_flag;
+#define MEMSTICK_OVERWRITE_UDST 0x10
+#define MEMSTICK_OVERWRITE_PGST1 0x20
+#define MEMSTICK_OVERWRITE_PGST0 0x40
+#define MEMSTICK_OVERWRITE_BKST 0x80
+
+ unsigned char management_flag;
+#define MEMSTICK_MANAGEMENT_SYSFLG 0x04
+#define MEMSTICK_MANAGEMENT_ATFLG 0x08
+#define MEMSTICK_MANAGEMENT_SCMS1 0x10
+#define MEMSTICK_MANAGEMENT_SCMS0 0x20
+
+ unsigned short logical_address;
+} __attribute__((packed));
+
+struct ms_register {
+ struct ms_status_register status;
+ struct ms_id_register id;
+ unsigned char reserved[8];
+ struct ms_param_register param;
+ struct ms_extra_data_register extra_data;
+} __attribute__((packed));
+
+struct mspro_param_register {
+ unsigned char system;
+#define MEMSTICK_SYS_PAR4 0x00
+#define MEMSTICK_SYS_PAR8 0x40
+#define MEMSTICK_SYS_SERIAL 0x80
+
+ __be16 data_count;
+ __be32 data_address;
+ unsigned char tpc_param;
+} __attribute__((packed));
+
+struct mspro_io_info_register {
+ unsigned char version;
+ unsigned char io_category;
+ unsigned char current_req;
+ unsigned char card_opt_info;
+ unsigned char rdy_wait_time;
+} __attribute__((packed));
+
+struct mspro_io_func_register {
+ unsigned char func_enable;
+ unsigned char func_select;
+ unsigned char func_intmask;
+ unsigned char transfer_mode;
+} __attribute__((packed));
+
+struct mspro_io_cmd_register {
+ unsigned short tpc_param;
+ unsigned short data_count;
+ unsigned int data_address;
+} __attribute__((packed));
+
+struct mspro_register {
+ struct ms_status_register status;
+ struct ms_id_register id;
+ unsigned char reserved0[8];
+ struct mspro_param_register param;
+ unsigned char reserved1[8];
+ struct mspro_io_info_register io_info;
+ struct mspro_io_func_register io_func;
+ unsigned char reserved2[7];
+ struct mspro_io_cmd_register io_cmd;
+ unsigned char io_int;
+ unsigned char io_int_func;
+} __attribute__((packed));
+
+struct ms_register_addr {
+ unsigned char r_offset;
+ unsigned char r_length;
+ unsigned char w_offset;
+ unsigned char w_length;
+} __attribute__((packed));
+
+enum memstick_tpc {
+ MS_TPC_READ_MG_STATUS = 0x01,
+ MS_TPC_READ_LONG_DATA = 0x02,
+ MS_TPC_READ_SHORT_DATA = 0x03,
+ MS_TPC_READ_MG_DATA = 0x03,
+ MS_TPC_READ_REG = 0x04,
+ MS_TPC_READ_QUAD_DATA = 0x05,
+ MS_TPC_READ_IO_DATA = 0x05,
+ MS_TPC_GET_INT = 0x07,
+ MS_TPC_SET_RW_REG_ADRS = 0x08,
+ MS_TPC_EX_SET_CMD = 0x09,
+ MS_TPC_WRITE_QUAD_DATA = 0x0a,
+ MS_TPC_WRITE_IO_DATA = 0x0a,
+ MS_TPC_WRITE_REG = 0x0b,
+ MS_TPC_WRITE_SHORT_DATA = 0x0c,
+ MS_TPC_WRITE_MG_DATA = 0x0c,
+ MS_TPC_WRITE_LONG_DATA = 0x0d,
+ MS_TPC_SET_CMD = 0x0e
+};
+
+enum memstick_command {
+ MS_CMD_BLOCK_END = 0x33,
+ MS_CMD_RESET = 0x3c,
+ MS_CMD_BLOCK_WRITE = 0x55,
+ MS_CMD_SLEEP = 0x5a,
+ MS_CMD_BLOCK_ERASE = 0x99,
+ MS_CMD_BLOCK_READ = 0xaa,
+ MS_CMD_CLEAR_BUF = 0xc3,
+ MS_CMD_FLASH_STOP = 0xcc,
+ MS_CMD_LOAD_ID = 0x60,
+ MS_CMD_CMP_ICV = 0x7f,
+ MSPRO_CMD_FORMAT = 0x10,
+ MSPRO_CMD_SLEEP = 0x11,
+ MSPRO_CMD_WAKEUP = 0x12,
+ MSPRO_CMD_READ_DATA = 0x20,
+ MSPRO_CMD_WRITE_DATA = 0x21,
+ MSPRO_CMD_READ_ATRB = 0x24,
+ MSPRO_CMD_STOP = 0x25,
+ MSPRO_CMD_ERASE = 0x26,
+ MSPRO_CMD_READ_QUAD = 0x27,
+ MSPRO_CMD_WRITE_QUAD = 0x28,
+ MSPRO_CMD_SET_IBD = 0x46,
+ MSPRO_CMD_GET_IBD = 0x47,
+ MSPRO_CMD_IN_IO_DATA = 0xb0,
+ MSPRO_CMD_OUT_IO_DATA = 0xb1,
+ MSPRO_CMD_READ_IO_ATRB = 0xb2,
+ MSPRO_CMD_IN_IO_FIFO = 0xb3,
+ MSPRO_CMD_OUT_IO_FIFO = 0xb4,
+ MSPRO_CMD_IN_IOM = 0xb5,
+ MSPRO_CMD_OUT_IOM = 0xb6,
+};
+
+/*** Driver structures and functions ***/
+
+enum memstick_param { MEMSTICK_POWER = 1, MEMSTICK_INTERFACE };
+
+#define MEMSTICK_POWER_OFF 0
+#define MEMSTICK_POWER_ON 1
+
+#define MEMSTICK_SERIAL 0
+#define MEMSTICK_PAR4 1
+#define MEMSTICK_PAR8 2
+
+struct memstick_host;
+struct memstick_driver;
+
+struct memstick_device_id {
+ unsigned char match_flags;
+#define MEMSTICK_MATCH_ALL 0x01
+
+ unsigned char type;
+#define MEMSTICK_TYPE_LEGACY 0xff
+#define MEMSTICK_TYPE_DUO 0x00
+#define MEMSTICK_TYPE_PRO 0x01
+
+ unsigned char category;
+#define MEMSTICK_CATEGORY_STORAGE 0xff
+#define MEMSTICK_CATEGORY_STORAGE_DUO 0x00
+#define MEMSTICK_CATEGORY_IO 0x01
+#define MEMSTICK_CATEGORY_IO_PRO 0x10
+
+ unsigned char class;
+#define MEMSTICK_CLASS_FLASH 0xff
+#define MEMSTICK_CLASS_DUO 0x00
+#define MEMSTICK_CLASS_ROM 0x01
+#define MEMSTICK_CLASS_RO 0x02
+#define MEMSTICK_CLASS_WP 0x03
+};
+
+struct memstick_request {
+ unsigned char tpc;
+ unsigned char data_dir:1,
+ need_card_int:1,
+ long_data:1;
+ unsigned char int_reg;
+ int error;
+ union {
+ struct scatterlist sg;
+ struct {
+ unsigned char data_len;
+ unsigned char data[15];
+ };
+ };
+};
+
+struct memstick_dev {
+ struct memstick_device_id id;
+ struct memstick_host *host;
+ struct ms_register_addr reg_addr;
+ struct completion mrq_complete;
+ struct memstick_request current_mrq;
+
+ /* Check that media driver is still willing to operate the device. */
+ int (*check)(struct memstick_dev *card);
+ /* Get next request from the media driver. */
+ int (*next_request)(struct memstick_dev *card,
+ struct memstick_request **mrq);
+ /* Tell the media driver to stop doing things */
+ void (*stop)(struct memstick_dev *card);
+ /* Allow the media driver to continue */
+ void (*start)(struct memstick_dev *card);
+
+ struct device dev;
+};
+
+struct memstick_host {
+ struct mutex lock;
+ unsigned int id;
+ unsigned int caps;
+#define MEMSTICK_CAP_AUTO_GET_INT 1
+#define MEMSTICK_CAP_PAR4 2
+#define MEMSTICK_CAP_PAR8 4
+
+ struct work_struct media_checker;
+ struct device dev;
+
+ struct memstick_dev *card;
+ unsigned int retries;
+
+ /* Notify the host that some requests are pending. */
+ void (*request)(struct memstick_host *host);
+ /* Set host IO parameters (power, clock, etc). */
+ int (*set_param)(struct memstick_host *host,
+ enum memstick_param param,
+ int value);
+ unsigned long private[0] ____cacheline_aligned;
+};
+
+struct memstick_driver {
+ struct memstick_device_id *id_table;
+ int (*probe)(struct memstick_dev *card);
+ void (*remove)(struct memstick_dev *card);
+ int (*suspend)(struct memstick_dev *card,
+ pm_message_t state);
+ int (*resume)(struct memstick_dev *card);
+
+ struct device_driver driver;
+};
+
+int memstick_register_driver(struct memstick_driver *drv);
+void memstick_unregister_driver(struct memstick_driver *drv);
+
+struct memstick_host *memstick_alloc_host(unsigned int extra,
+ struct device *dev);
+
+int memstick_add_host(struct memstick_host *host);
+void memstick_remove_host(struct memstick_host *host);
+void memstick_free_host(struct memstick_host *host);
+void memstick_detect_change(struct memstick_host *host);
+void memstick_suspend_host(struct memstick_host *host);
+void memstick_resume_host(struct memstick_host *host);
+
+void memstick_init_req_sg(struct memstick_request *mrq, unsigned char tpc,
+ const struct scatterlist *sg);
+void memstick_init_req(struct memstick_request *mrq, unsigned char tpc,
+ const void *buf, size_t length);
+int memstick_next_req(struct memstick_host *host,
+ struct memstick_request **mrq);
+void memstick_new_req(struct memstick_host *host);
+
+int memstick_set_rw_addr(struct memstick_dev *card);
+
+static inline void *memstick_priv(struct memstick_host *host)
+{
+ return (void *)host->private;
+}
+
+static inline void *memstick_get_drvdata(struct memstick_dev *card)
+{
+ return dev_get_drvdata(&card->dev);
+}
+
+static inline void memstick_set_drvdata(struct memstick_dev *card, void *data)
+{
+ dev_set_drvdata(&card->dev, data);
+}
+
+#endif
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
new file mode 100644
index 000000000..97cb283cc
--- /dev/null
+++ b/include/linux/mfd/88pm80x.h
@@ -0,0 +1,372 @@
+/*
+ * Marvell 88PM80x Interface
+ *
+ * Copyright (C) 2012 Marvell International Ltd.
+ * Qiao Zhou <zhouqiao@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_88PM80X_H
+#define __LINUX_MFD_88PM80X_H
+
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/atomic.h>
+
+enum {
+ CHIP_INVALID = 0,
+ CHIP_PM800,
+ CHIP_PM805,
+ CHIP_MAX,
+};
+
+enum {
+ PM800_ID_BUCK1 = 0,
+ PM800_ID_BUCK2,
+ PM800_ID_BUCK3,
+ PM800_ID_BUCK4,
+ PM800_ID_BUCK5,
+
+ PM800_ID_LDO1,
+ PM800_ID_LDO2,
+ PM800_ID_LDO3,
+ PM800_ID_LDO4,
+ PM800_ID_LDO5,
+ PM800_ID_LDO6,
+ PM800_ID_LDO7,
+ PM800_ID_LDO8,
+ PM800_ID_LDO9,
+ PM800_ID_LDO10,
+ PM800_ID_LDO11,
+ PM800_ID_LDO12,
+ PM800_ID_LDO13,
+ PM800_ID_LDO14,
+ PM800_ID_LDO15,
+ PM800_ID_LDO16,
+ PM800_ID_LDO17,
+ PM800_ID_LDO18,
+ PM800_ID_LDO19,
+
+ PM800_ID_RG_MAX,
+};
+#define PM800_MAX_REGULATOR PM800_ID_RG_MAX /* 5 Bucks, 19 LDOs */
+#define PM800_NUM_BUCK (5) /*5 Bucks */
+#define PM800_NUM_LDO (19) /*19 Bucks */
+
+/* page 0 basic: slave adder 0x60 */
+
+#define PM800_STATUS_1 (0x01)
+#define PM800_ONKEY_STS1 (1 << 0)
+#define PM800_EXTON_STS1 (1 << 1)
+#define PM800_CHG_STS1 (1 << 2)
+#define PM800_BAT_STS1 (1 << 3)
+#define PM800_VBUS_STS1 (1 << 4)
+#define PM800_LDO_PGOOD_STS1 (1 << 5)
+#define PM800_BUCK_PGOOD_STS1 (1 << 6)
+
+#define PM800_STATUS_2 (0x02)
+#define PM800_RTC_ALARM_STS2 (1 << 0)
+
+/* Wakeup Registers */
+#define PM800_WAKEUP1 (0x0D)
+
+#define PM800_WAKEUP2 (0x0E)
+#define PM800_WAKEUP2_INV_INT (1 << 0)
+#define PM800_WAKEUP2_INT_CLEAR (1 << 1)
+#define PM800_WAKEUP2_INT_MASK (1 << 2)
+
+#define PM800_POWER_UP_LOG (0x10)
+
+/* Referance and low power registers */
+#define PM800_LOW_POWER1 (0x20)
+#define PM800_LOW_POWER2 (0x21)
+#define PM800_LOW_POWER_CONFIG3 (0x22)
+#define PM800_LOW_POWER_CONFIG4 (0x23)
+
+/* GPIO register */
+#define PM800_GPIO_0_1_CNTRL (0x30)
+#define PM800_GPIO0_VAL (1 << 0)
+#define PM800_GPIO0_GPIO_MODE(x) (x << 1)
+#define PM800_GPIO1_VAL (1 << 4)
+#define PM800_GPIO1_GPIO_MODE(x) (x << 5)
+
+#define PM800_GPIO_2_3_CNTRL (0x31)
+#define PM800_GPIO2_VAL (1 << 0)
+#define PM800_GPIO2_GPIO_MODE(x) (x << 1)
+#define PM800_GPIO3_VAL (1 << 4)
+#define PM800_GPIO3_GPIO_MODE(x) (x << 5)
+#define PM800_GPIO3_MODE_MASK 0x1F
+#define PM800_GPIO3_HEADSET_MODE PM800_GPIO3_GPIO_MODE(6)
+
+#define PM800_GPIO_4_CNTRL (0x32)
+#define PM800_GPIO4_VAL (1 << 0)
+#define PM800_GPIO4_GPIO_MODE(x) (x << 1)
+
+#define PM800_HEADSET_CNTRL (0x38)
+#define PM800_HEADSET_DET_EN (1 << 7)
+#define PM800_HSDET_SLP (1 << 1)
+/* PWM register */
+#define PM800_PWM1 (0x40)
+#define PM800_PWM2 (0x41)
+#define PM800_PWM3 (0x42)
+#define PM800_PWM4 (0x43)
+
+/* RTC Registers */
+#define PM800_RTC_CONTROL (0xD0)
+#define PM800_RTC_MISC1 (0xE1)
+#define PM800_RTC_MISC2 (0xE2)
+#define PM800_RTC_MISC3 (0xE3)
+#define PM800_RTC_MISC4 (0xE4)
+#define PM800_RTC_MISC5 (0xE7)
+/* bit definitions of RTC Register 1 (0xD0) */
+#define PM800_ALARM1_EN (1 << 0)
+#define PM800_ALARM_WAKEUP (1 << 4)
+#define PM800_ALARM (1 << 5)
+#define PM800_RTC1_USE_XO (1 << 7)
+
+/* Regulator Control Registers: BUCK1,BUCK5,LDO1 have DVC */
+
+/* buck registers */
+#define PM800_SLEEP_BUCK1 (0x30)
+
+/* BUCK Sleep Mode Register 1: BUCK[1..4] */
+#define PM800_BUCK_SLP1 (0x5A)
+#define PM800_BUCK1_SLP1_SHIFT 0
+#define PM800_BUCK1_SLP1_MASK (0x3 << PM800_BUCK1_SLP1_SHIFT)
+
+/* page 2 GPADC: slave adder 0x02 */
+#define PM800_GPADC_MEAS_EN1 (0x01)
+#define PM800_MEAS_EN1_VBAT (1 << 2)
+#define PM800_GPADC_MEAS_EN2 (0x02)
+#define PM800_MEAS_EN2_RFTMP (1 << 0)
+#define PM800_MEAS_GP0_EN (1 << 2)
+#define PM800_MEAS_GP1_EN (1 << 3)
+#define PM800_MEAS_GP2_EN (1 << 4)
+#define PM800_MEAS_GP3_EN (1 << 5)
+#define PM800_MEAS_GP4_EN (1 << 6)
+
+#define PM800_GPADC_MISC_CONFIG1 (0x05)
+#define PM800_GPADC_MISC_CONFIG2 (0x06)
+#define PM800_GPADC_MISC_GPFSM_EN (1 << 0)
+#define PM800_GPADC_SLOW_MODE(x) (x << 3)
+
+#define PM800_GPADC_MISC_CONFIG3 (0x09)
+#define PM800_GPADC_MISC_CONFIG4 (0x0A)
+
+#define PM800_GPADC_PREBIAS1 (0x0F)
+#define PM800_GPADC0_GP_PREBIAS_TIME(x) (x << 0)
+#define PM800_GPADC_PREBIAS2 (0x10)
+
+#define PM800_GP_BIAS_ENA1 (0x14)
+#define PM800_GPADC_GP_BIAS_EN0 (1 << 0)
+#define PM800_GPADC_GP_BIAS_EN1 (1 << 1)
+#define PM800_GPADC_GP_BIAS_EN2 (1 << 2)
+#define PM800_GPADC_GP_BIAS_EN3 (1 << 3)
+
+#define PM800_GP_BIAS_OUT1 (0x15)
+#define PM800_BIAS_OUT_GP0 (1 << 0)
+#define PM800_BIAS_OUT_GP1 (1 << 1)
+#define PM800_BIAS_OUT_GP2 (1 << 2)
+#define PM800_BIAS_OUT_GP3 (1 << 3)
+
+#define PM800_GPADC0_LOW_TH 0x20
+#define PM800_GPADC1_LOW_TH 0x21
+#define PM800_GPADC2_LOW_TH 0x22
+#define PM800_GPADC3_LOW_TH 0x23
+#define PM800_GPADC4_LOW_TH 0x24
+
+#define PM800_GPADC0_UPP_TH 0x30
+#define PM800_GPADC1_UPP_TH 0x31
+#define PM800_GPADC2_UPP_TH 0x32
+#define PM800_GPADC3_UPP_TH 0x33
+#define PM800_GPADC4_UPP_TH 0x34
+
+#define PM800_VBBAT_MEAS1 0x40
+#define PM800_VBBAT_MEAS2 0x41
+#define PM800_VBAT_MEAS1 0x42
+#define PM800_VBAT_MEAS2 0x43
+#define PM800_VSYS_MEAS1 0x44
+#define PM800_VSYS_MEAS2 0x45
+#define PM800_VCHG_MEAS1 0x46
+#define PM800_VCHG_MEAS2 0x47
+#define PM800_TINT_MEAS1 0x50
+#define PM800_TINT_MEAS2 0x51
+#define PM800_PMOD_MEAS1 0x52
+#define PM800_PMOD_MEAS2 0x53
+
+#define PM800_GPADC0_MEAS1 0x54
+#define PM800_GPADC0_MEAS2 0x55
+#define PM800_GPADC1_MEAS1 0x56
+#define PM800_GPADC1_MEAS2 0x57
+#define PM800_GPADC2_MEAS1 0x58
+#define PM800_GPADC2_MEAS2 0x59
+#define PM800_GPADC3_MEAS1 0x5A
+#define PM800_GPADC3_MEAS2 0x5B
+#define PM800_GPADC4_MEAS1 0x5C
+#define PM800_GPADC4_MEAS2 0x5D
+
+#define PM800_GPADC4_AVG1 0xA8
+#define PM800_GPADC4_AVG2 0xA9
+
+/* 88PM805 Registers */
+#define PM805_MAIN_POWERUP (0x01)
+#define PM805_INT_STATUS0 (0x02) /* for ena/dis all interrupts */
+
+#define PM805_STATUS0_INT_CLEAR (1 << 0)
+#define PM805_STATUS0_INV_INT (1 << 1)
+#define PM800_STATUS0_INT_MASK (1 << 2)
+
+#define PM805_INT_STATUS1 (0x03)
+
+#define PM805_INT1_HP1_SHRT (1 << 0)
+#define PM805_INT1_HP2_SHRT (1 << 1)
+#define PM805_INT1_MIC_CONFLICT (1 << 2)
+#define PM805_INT1_CLIP_FAULT (1 << 3)
+#define PM805_INT1_LDO_OFF (1 << 4)
+#define PM805_INT1_SRC_DPLL_LOCK (1 << 5)
+
+#define PM805_INT_STATUS2 (0x04)
+
+#define PM805_INT2_MIC_DET (1 << 0)
+#define PM805_INT2_SHRT_BTN_DET (1 << 1)
+#define PM805_INT2_VOLM_BTN_DET (1 << 2)
+#define PM805_INT2_VOLP_BTN_DET (1 << 3)
+#define PM805_INT2_RAW_PLL_FAULT (1 << 4)
+#define PM805_INT2_FINE_PLL_FAULT (1 << 5)
+
+#define PM805_INT_MASK1 (0x05)
+#define PM805_INT_MASK2 (0x06)
+#define PM805_SHRT_BTN_DET (1 << 1)
+
+/* number of status and int reg in a row */
+#define PM805_INT_REG_NUM (2)
+
+#define PM805_MIC_DET1 (0x07)
+#define PM805_MIC_DET_EN_MIC_DET (1 << 0)
+#define PM805_MIC_DET2 (0x08)
+#define PM805_MIC_DET_STATUS1 (0x09)
+
+#define PM805_MIC_DET_STATUS3 (0x0A)
+#define PM805_AUTO_SEQ_STATUS1 (0x0B)
+#define PM805_AUTO_SEQ_STATUS2 (0x0C)
+
+#define PM805_ADC_SETTING1 (0x10)
+#define PM805_ADC_SETTING2 (0x11)
+#define PM805_ADC_SETTING3 (0x11)
+#define PM805_ADC_GAIN1 (0x12)
+#define PM805_ADC_GAIN2 (0x13)
+#define PM805_DMIC_SETTING (0x15)
+#define PM805_DWS_SETTING (0x16)
+#define PM805_MIC_CONFLICT_STS (0x17)
+
+#define PM805_PDM_SETTING1 (0x20)
+#define PM805_PDM_SETTING2 (0x21)
+#define PM805_PDM_SETTING3 (0x22)
+#define PM805_PDM_CONTROL1 (0x23)
+#define PM805_PDM_CONTROL2 (0x24)
+#define PM805_PDM_CONTROL3 (0x25)
+
+#define PM805_HEADPHONE_SETTING (0x26)
+#define PM805_HEADPHONE_GAIN_A2A (0x27)
+#define PM805_HEADPHONE_SHORT_STATE (0x28)
+#define PM805_EARPHONE_SETTING (0x29)
+#define PM805_AUTO_SEQ_SETTING (0x2A)
+
+struct pm80x_rtc_pdata {
+ int vrtc;
+ int rtc_wakeup;
+};
+
+struct pm80x_subchip {
+ struct i2c_client *power_page; /* chip client for power page */
+ struct i2c_client *gpadc_page; /* chip client for gpadc page */
+ struct regmap *regmap_power;
+ struct regmap *regmap_gpadc;
+ unsigned short power_page_addr; /* power page I2C address */
+ unsigned short gpadc_page_addr; /* gpadc page I2C address */
+};
+
+struct pm80x_chip {
+ struct pm80x_subchip *subchip;
+ struct device *dev;
+ struct i2c_client *client;
+ struct i2c_client *companion;
+ struct regmap *regmap;
+ struct regmap_irq_chip *regmap_irq_chip;
+ struct regmap_irq_chip_data *irq_data;
+ int type;
+ int irq;
+ int irq_mode;
+ unsigned long wu_flag;
+ spinlock_t lock;
+};
+
+struct pm80x_platform_data {
+ struct pm80x_rtc_pdata *rtc;
+ /*
+ * For the regulator not defined, set regulators[not_defined] to be
+ * NULL. num_regulators are the number of regulators supposed to be
+ * initialized. If all regulators are not defined, set num_regulators
+ * to be 0.
+ */
+ struct regulator_init_data *regulators[PM800_ID_RG_MAX];
+ unsigned int num_regulators;
+ int irq_mode; /* Clear interrupt by read/write(0/1) */
+ int batt_det; /* enable/disable */
+ int (*plat_config)(struct pm80x_chip *chip,
+ struct pm80x_platform_data *pdata);
+};
+
+extern const struct dev_pm_ops pm80x_pm_ops;
+extern const struct regmap_config pm80x_regmap_config;
+
+static inline int pm80x_request_irq(struct pm80x_chip *pm80x, int irq,
+ irq_handler_t handler, unsigned long flags,
+ const char *name, void *data)
+{
+ if (!pm80x->irq_data)
+ return -EINVAL;
+ return request_threaded_irq(regmap_irq_get_virq(pm80x->irq_data, irq),
+ NULL, handler, flags, name, data);
+}
+
+static inline void pm80x_free_irq(struct pm80x_chip *pm80x, int irq, void *data)
+{
+ if (!pm80x->irq_data)
+ return;
+ free_irq(regmap_irq_get_virq(pm80x->irq_data, irq), data);
+}
+
+#ifdef CONFIG_PM
+static inline int pm80x_dev_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (device_may_wakeup(dev))
+ set_bit((1 << irq), &chip->wu_flag);
+
+ return 0;
+}
+
+static inline int pm80x_dev_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ int irq = platform_get_irq(pdev, 0);
+
+ if (device_may_wakeup(dev))
+ clear_bit((1 << irq), &chip->wu_flag);
+
+ return 0;
+}
+#endif
+
+extern int pm80x_init(struct i2c_client *client);
+extern int pm80x_deinit(void);
+#endif /* __LINUX_MFD_88PM80X_H */
diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h
new file mode 100644
index 000000000..cd9753020
--- /dev/null
+++ b/include/linux/mfd/88pm860x.h
@@ -0,0 +1,487 @@
+/*
+ * Marvell 88PM860x Interface
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_88PM860X_H
+#define __LINUX_MFD_88PM860X_H
+
+#include <linux/interrupt.h>
+
+#define MFD_NAME_SIZE (40)
+
+enum {
+ CHIP_INVALID = 0,
+ CHIP_PM8606,
+ CHIP_PM8607,
+ CHIP_MAX,
+};
+
+enum {
+ PM8606_ID_INVALID,
+ PM8606_ID_BACKLIGHT,
+ PM8606_ID_LED,
+ PM8606_ID_VIBRATOR,
+ PM8606_ID_TOUCH,
+ PM8606_ID_SOUND,
+ PM8606_ID_CHARGER,
+ PM8606_ID_MAX,
+};
+
+
+/* 8606 Registers */
+#define PM8606_DCM_BOOST (0x00)
+#define PM8606_PWM (0x01)
+
+#define PM8607_MISC2 (0x42)
+
+/* Power Up Log Register */
+#define PM8607_POWER_UP_LOG (0x3F)
+
+/* Charger Control Registers */
+#define PM8607_CCNT (0x47)
+#define PM8607_CHG_CTRL1 (0x48)
+#define PM8607_CHG_CTRL2 (0x49)
+#define PM8607_CHG_CTRL3 (0x4A)
+#define PM8607_CHG_CTRL4 (0x4B)
+#define PM8607_CHG_CTRL5 (0x4C)
+#define PM8607_CHG_CTRL6 (0x4D)
+#define PM8607_CHG_CTRL7 (0x4E)
+
+/* Backlight Registers */
+#define PM8606_WLED1A (0x02)
+#define PM8606_WLED1B (0x03)
+#define PM8606_WLED2A (0x04)
+#define PM8606_WLED2B (0x05)
+#define PM8606_WLED3A (0x06)
+#define PM8606_WLED3B (0x07)
+
+/* LED Registers */
+#define PM8606_RGB2A (0x08)
+#define PM8606_RGB2B (0x09)
+#define PM8606_RGB2C (0x0A)
+#define PM8606_RGB2D (0x0B)
+#define PM8606_RGB1A (0x0C)
+#define PM8606_RGB1B (0x0D)
+#define PM8606_RGB1C (0x0E)
+#define PM8606_RGB1D (0x0F)
+
+#define PM8606_PREREGULATORA (0x10)
+#define PM8606_PREREGULATORB (0x11)
+#define PM8606_VIBRATORA (0x12)
+#define PM8606_VIBRATORB (0x13)
+#define PM8606_VCHG (0x14)
+#define PM8606_VSYS (0x15)
+#define PM8606_MISC (0x16)
+#define PM8606_CHIP_ID (0x17)
+#define PM8606_STATUS (0x18)
+#define PM8606_FLAGS (0x19)
+#define PM8606_PROTECTA (0x1A)
+#define PM8606_PROTECTB (0x1B)
+#define PM8606_PROTECTC (0x1C)
+
+/* Bit definitions of PM8606 registers */
+#define PM8606_DCM_500MA (0x0) /* current limit */
+#define PM8606_DCM_750MA (0x1)
+#define PM8606_DCM_1000MA (0x2)
+#define PM8606_DCM_1250MA (0x3)
+#define PM8606_DCM_250MV (0x0 << 2)
+#define PM8606_DCM_300MV (0x1 << 2)
+#define PM8606_DCM_350MV (0x2 << 2)
+#define PM8606_DCM_400MV (0x3 << 2)
+
+#define PM8606_PWM_31200HZ (0x0)
+#define PM8606_PWM_15600HZ (0x1)
+#define PM8606_PWM_7800HZ (0x2)
+#define PM8606_PWM_3900HZ (0x3)
+#define PM8606_PWM_1950HZ (0x4)
+#define PM8606_PWM_976HZ (0x5)
+#define PM8606_PWM_488HZ (0x6)
+#define PM8606_PWM_244HZ (0x7)
+#define PM8606_PWM_FREQ_MASK (0x7)
+
+#define PM8606_WLED_ON (1 << 0)
+#define PM8606_WLED_CURRENT(x) ((x & 0x1F) << 1)
+
+#define PM8606_LED_CURRENT(x) (((x >> 2) & 0x07) << 5)
+
+#define PM8606_VSYS_EN (1 << 1)
+
+#define PM8606_MISC_OSC_EN (1 << 4)
+
+enum {
+ PM8607_ID_BUCK1 = 0,
+ PM8607_ID_BUCK2,
+ PM8607_ID_BUCK3,
+
+ PM8607_ID_LDO1,
+ PM8607_ID_LDO2,
+ PM8607_ID_LDO3,
+ PM8607_ID_LDO4,
+ PM8607_ID_LDO5,
+ PM8607_ID_LDO6,
+ PM8607_ID_LDO7,
+ PM8607_ID_LDO8,
+ PM8607_ID_LDO9,
+ PM8607_ID_LDO10,
+ PM8607_ID_LDO11,
+ PM8607_ID_LDO12,
+ PM8607_ID_LDO13,
+ PM8607_ID_LDO14,
+ PM8607_ID_LDO15,
+ PM8606_ID_PREG,
+
+ PM8607_ID_RG_MAX,
+};
+
+/* 8607 chip ID is 0x40 or 0x50 */
+#define PM8607_VERSION_MASK (0xF0) /* 8607 chip ID mask */
+
+/* Interrupt Registers */
+#define PM8607_STATUS_1 (0x01)
+#define PM8607_STATUS_2 (0x02)
+#define PM8607_INT_STATUS1 (0x03)
+#define PM8607_INT_STATUS2 (0x04)
+#define PM8607_INT_STATUS3 (0x05)
+#define PM8607_INT_MASK_1 (0x06)
+#define PM8607_INT_MASK_2 (0x07)
+#define PM8607_INT_MASK_3 (0x08)
+
+/* Regulator Control Registers */
+#define PM8607_LDO1 (0x10)
+#define PM8607_LDO2 (0x11)
+#define PM8607_LDO3 (0x12)
+#define PM8607_LDO4 (0x13)
+#define PM8607_LDO5 (0x14)
+#define PM8607_LDO6 (0x15)
+#define PM8607_LDO7 (0x16)
+#define PM8607_LDO8 (0x17)
+#define PM8607_LDO9 (0x18)
+#define PM8607_LDO10 (0x19)
+#define PM8607_LDO12 (0x1A)
+#define PM8607_LDO14 (0x1B)
+#define PM8607_SLEEP_MODE1 (0x1C)
+#define PM8607_SLEEP_MODE2 (0x1D)
+#define PM8607_SLEEP_MODE3 (0x1E)
+#define PM8607_SLEEP_MODE4 (0x1F)
+#define PM8607_GO (0x20)
+#define PM8607_SLEEP_BUCK1 (0x21)
+#define PM8607_SLEEP_BUCK2 (0x22)
+#define PM8607_SLEEP_BUCK3 (0x23)
+#define PM8607_BUCK1 (0x24)
+#define PM8607_BUCK2 (0x25)
+#define PM8607_BUCK3 (0x26)
+#define PM8607_BUCK_CONTROLS (0x27)
+#define PM8607_SUPPLIES_EN11 (0x2B)
+#define PM8607_SUPPLIES_EN12 (0x2C)
+#define PM8607_GROUP1 (0x2D)
+#define PM8607_GROUP2 (0x2E)
+#define PM8607_GROUP3 (0x2F)
+#define PM8607_GROUP4 (0x30)
+#define PM8607_GROUP5 (0x31)
+#define PM8607_GROUP6 (0x32)
+#define PM8607_SUPPLIES_EN21 (0x33)
+#define PM8607_SUPPLIES_EN22 (0x34)
+
+/* Vibrator Control Registers */
+#define PM8607_VIBRATOR_SET (0x28)
+#define PM8607_VIBRATOR_PWM (0x29)
+
+/* GPADC Registers */
+#define PM8607_GP_BIAS1 (0x4F)
+#define PM8607_MEAS_EN1 (0x50)
+#define PM8607_MEAS_EN2 (0x51)
+#define PM8607_MEAS_EN3 (0x52)
+#define PM8607_MEAS_OFF_TIME1 (0x53)
+#define PM8607_MEAS_OFF_TIME2 (0x54)
+#define PM8607_TSI_PREBIAS (0x55) /* prebias time */
+#define PM8607_PD_PREBIAS (0x56) /* prebias time */
+#define PM8607_GPADC_MISC1 (0x57)
+
+/* bit definitions of MEAS_EN1*/
+#define PM8607_MEAS_EN1_VBAT (1 << 0)
+#define PM8607_MEAS_EN1_VCHG (1 << 1)
+#define PM8607_MEAS_EN1_VSYS (1 << 2)
+#define PM8607_MEAS_EN1_TINT (1 << 3)
+#define PM8607_MEAS_EN1_RFTMP (1 << 4)
+#define PM8607_MEAS_EN1_TBAT (1 << 5)
+#define PM8607_MEAS_EN1_GPADC2 (1 << 6)
+#define PM8607_MEAS_EN1_GPADC3 (1 << 7)
+
+/* Battery Monitor Registers */
+#define PM8607_GP_BIAS2 (0x5A)
+#define PM8607_VBAT_LOWTH (0x5B)
+#define PM8607_VCHG_LOWTH (0x5C)
+#define PM8607_VSYS_LOWTH (0x5D)
+#define PM8607_TINT_LOWTH (0x5E)
+#define PM8607_GPADC0_LOWTH (0x5F)
+#define PM8607_GPADC1_LOWTH (0x60)
+#define PM8607_GPADC2_LOWTH (0x61)
+#define PM8607_GPADC3_LOWTH (0x62)
+#define PM8607_VBAT_HIGHTH (0x63)
+#define PM8607_VCHG_HIGHTH (0x64)
+#define PM8607_VSYS_HIGHTH (0x65)
+#define PM8607_TINT_HIGHTH (0x66)
+#define PM8607_GPADC0_HIGHTH (0x67)
+#define PM8607_GPADC1_HIGHTH (0x68)
+#define PM8607_GPADC2_HIGHTH (0x69)
+#define PM8607_GPADC3_HIGHTH (0x6A)
+#define PM8607_IBAT_MEAS1 (0x6B)
+#define PM8607_IBAT_MEAS2 (0x6C)
+#define PM8607_VBAT_MEAS1 (0x6D)
+#define PM8607_VBAT_MEAS2 (0x6E)
+#define PM8607_VCHG_MEAS1 (0x6F)
+#define PM8607_VCHG_MEAS2 (0x70)
+#define PM8607_VSYS_MEAS1 (0x71)
+#define PM8607_VSYS_MEAS2 (0x72)
+#define PM8607_TINT_MEAS1 (0x73)
+#define PM8607_TINT_MEAS2 (0x74)
+#define PM8607_GPADC0_MEAS1 (0x75)
+#define PM8607_GPADC0_MEAS2 (0x76)
+#define PM8607_GPADC1_MEAS1 (0x77)
+#define PM8607_GPADC1_MEAS2 (0x78)
+#define PM8607_GPADC2_MEAS1 (0x79)
+#define PM8607_GPADC2_MEAS2 (0x7A)
+#define PM8607_GPADC3_MEAS1 (0x7B)
+#define PM8607_GPADC3_MEAS2 (0x7C)
+#define PM8607_CCNT_MEAS1 (0x95)
+#define PM8607_CCNT_MEAS2 (0x96)
+#define PM8607_VBAT_AVG (0x97)
+#define PM8607_VCHG_AVG (0x98)
+#define PM8607_VSYS_AVG (0x99)
+#define PM8607_VBAT_MIN (0x9A)
+#define PM8607_VCHG_MIN (0x9B)
+#define PM8607_VSYS_MIN (0x9C)
+#define PM8607_VBAT_MAX (0x9D)
+#define PM8607_VCHG_MAX (0x9E)
+#define PM8607_VSYS_MAX (0x9F)
+
+#define PM8607_GPADC_MISC2 (0x59)
+#define PM8607_GPADC0_GP_BIAS_A0 (1 << 0)
+#define PM8607_GPADC1_GP_BIAS_A1 (1 << 1)
+#define PM8607_GPADC2_GP_BIAS_A2 (1 << 2)
+#define PM8607_GPADC3_GP_BIAS_A3 (1 << 3)
+#define PM8607_GPADC2_GP_BIAS_OUT2 (1 << 6)
+
+/* RTC Control Registers */
+#define PM8607_RTC1 (0xA0)
+#define PM8607_RTC_COUNTER1 (0xA1)
+#define PM8607_RTC_COUNTER2 (0xA2)
+#define PM8607_RTC_COUNTER3 (0xA3)
+#define PM8607_RTC_COUNTER4 (0xA4)
+#define PM8607_RTC_EXPIRE1 (0xA5)
+#define PM8607_RTC_EXPIRE2 (0xA6)
+#define PM8607_RTC_EXPIRE3 (0xA7)
+#define PM8607_RTC_EXPIRE4 (0xA8)
+#define PM8607_RTC_TRIM1 (0xA9)
+#define PM8607_RTC_TRIM2 (0xAA)
+#define PM8607_RTC_TRIM3 (0xAB)
+#define PM8607_RTC_TRIM4 (0xAC)
+#define PM8607_RTC_MISC1 (0xAD)
+#define PM8607_RTC_MISC2 (0xAE)
+#define PM8607_RTC_MISC3 (0xAF)
+
+/* Misc Registers */
+#define PM8607_CHIP_ID (0x00)
+#define PM8607_B0_MISC1 (0x0C)
+#define PM8607_LDO1 (0x10)
+#define PM8607_DVC3 (0x26)
+#define PM8607_A1_MISC1 (0x40)
+
+/* bit definitions of Status Query Interface */
+#define PM8607_STATUS_CC (1 << 3)
+#define PM8607_STATUS_PEN (1 << 4)
+#define PM8607_STATUS_HEADSET (1 << 5)
+#define PM8607_STATUS_HOOK (1 << 6)
+#define PM8607_STATUS_MICIN (1 << 7)
+#define PM8607_STATUS_ONKEY (1 << 8)
+#define PM8607_STATUS_EXTON (1 << 9)
+#define PM8607_STATUS_CHG (1 << 10)
+#define PM8607_STATUS_BAT (1 << 11)
+#define PM8607_STATUS_VBUS (1 << 12)
+#define PM8607_STATUS_OV (1 << 13)
+
+/* bit definitions of BUCK3 */
+#define PM8607_BUCK3_DOUBLE (1 << 6)
+
+/* bit definitions of Misc1 */
+#define PM8607_A1_MISC1_PI2C (1 << 0)
+#define PM8607_B0_MISC1_INV_INT (1 << 0)
+#define PM8607_B0_MISC1_INT_CLEAR (1 << 1)
+#define PM8607_B0_MISC1_INT_MASK (1 << 2)
+#define PM8607_B0_MISC1_PI2C (1 << 3)
+#define PM8607_B0_MISC1_RESET (1 << 6)
+
+/* bits definitions of GPADC */
+#define PM8607_GPADC_EN (1 << 0)
+#define PM8607_GPADC_PREBIAS_MASK (3 << 1)
+#define PM8607_GPADC_SLOT_CYCLE_MASK (3 << 3) /* slow mode */
+#define PM8607_GPADC_OFF_SCALE_MASK (3 << 5) /* GP sleep mode */
+#define PM8607_GPADC_SW_CAL_MASK (1 << 7)
+
+#define PM8607_PD_PREBIAS_MASK (0x1F << 0)
+#define PM8607_PD_PRECHG_MASK (7 << 5)
+
+#define PM8606_REF_GP_OSC_OFF 0
+#define PM8606_REF_GP_OSC_ON 1
+#define PM8606_REF_GP_OSC_UNKNOWN 2
+
+/* Clients of reference group and 8MHz oscillator in 88PM8606 */
+enum pm8606_ref_gp_and_osc_clients {
+ REF_GP_NO_CLIENTS = 0,
+ WLED1_DUTY = (1<<0), /*PF 0x02.7:0*/
+ WLED2_DUTY = (1<<1), /*PF 0x04.7:0*/
+ WLED3_DUTY = (1<<2), /*PF 0x06.7:0*/
+ RGB1_ENABLE = (1<<3), /*PF 0x07.1*/
+ RGB2_ENABLE = (1<<4), /*PF 0x07.2*/
+ LDO_VBR_EN = (1<<5), /*PF 0x12.0*/
+ REF_GP_MAX_CLIENT = 0xFFFF
+};
+
+/* Interrupt Number in 88PM8607 */
+enum {
+ PM8607_IRQ_ONKEY,
+ PM8607_IRQ_EXTON,
+ PM8607_IRQ_CHG,
+ PM8607_IRQ_BAT,
+ PM8607_IRQ_RTC,
+ PM8607_IRQ_CC,
+ PM8607_IRQ_VBAT,
+ PM8607_IRQ_VCHG,
+ PM8607_IRQ_VSYS,
+ PM8607_IRQ_TINT,
+ PM8607_IRQ_GPADC0,
+ PM8607_IRQ_GPADC1,
+ PM8607_IRQ_GPADC2,
+ PM8607_IRQ_GPADC3,
+ PM8607_IRQ_AUDIO_SHORT,
+ PM8607_IRQ_PEN,
+ PM8607_IRQ_HEADSET,
+ PM8607_IRQ_HOOK,
+ PM8607_IRQ_MICIN,
+ PM8607_IRQ_CHG_FAIL,
+ PM8607_IRQ_CHG_DONE,
+ PM8607_IRQ_CHG_FAULT,
+};
+
+enum {
+ PM8607_CHIP_A0 = 0x40,
+ PM8607_CHIP_A1 = 0x41,
+ PM8607_CHIP_B0 = 0x48,
+};
+
+struct pm860x_chip {
+ struct device *dev;
+ struct mutex irq_lock;
+ struct mutex osc_lock;
+ struct i2c_client *client;
+ struct i2c_client *companion; /* companion chip client */
+ struct regmap *regmap;
+ struct regmap *regmap_companion;
+
+ int buck3_double; /* DVC ramp slope double */
+ int companion_addr;
+ unsigned short osc_vote;
+ int id;
+ int irq_mode;
+ int irq_base;
+ int core_irq;
+ unsigned char chip_version;
+ unsigned char osc_status;
+
+ unsigned int wakeup_flag;
+};
+
+enum {
+ GI2C_PORT = 0,
+ PI2C_PORT,
+};
+
+struct pm860x_backlight_pdata {
+ int pwm;
+ int iset;
+};
+
+struct pm860x_led_pdata {
+ int iset;
+};
+
+struct pm860x_rtc_pdata {
+ int (*sync)(unsigned int ticks);
+ int vrtc;
+};
+
+struct pm860x_touch_pdata {
+ int gpadc_prebias;
+ int slot_cycle;
+ int off_scale;
+ int sw_cal;
+ int tsi_prebias; /* time, slot */
+ int pen_prebias; /* time, slot */
+ int pen_prechg; /* time, slot */
+ int res_x; /* resistor of Xplate */
+ unsigned long flags;
+};
+
+struct pm860x_power_pdata {
+ int max_capacity;
+ int resistor;
+};
+
+struct pm860x_platform_data {
+ struct pm860x_backlight_pdata *backlight;
+ struct pm860x_led_pdata *led;
+ struct pm860x_rtc_pdata *rtc;
+ struct pm860x_touch_pdata *touch;
+ struct pm860x_power_pdata *power;
+ struct regulator_init_data *buck1;
+ struct regulator_init_data *buck2;
+ struct regulator_init_data *buck3;
+ struct regulator_init_data *ldo1;
+ struct regulator_init_data *ldo2;
+ struct regulator_init_data *ldo3;
+ struct regulator_init_data *ldo4;
+ struct regulator_init_data *ldo5;
+ struct regulator_init_data *ldo6;
+ struct regulator_init_data *ldo7;
+ struct regulator_init_data *ldo8;
+ struct regulator_init_data *ldo9;
+ struct regulator_init_data *ldo10;
+ struct regulator_init_data *ldo12;
+ struct regulator_init_data *ldo_vibrator;
+ struct regulator_init_data *ldo14;
+ struct charger_desc *chg_desc;
+
+ int companion_addr; /* I2C address of companion chip */
+ int i2c_port; /* Controlled by GI2C or PI2C */
+ int irq_mode; /* Clear interrupt by read/write(0/1) */
+ int irq_base; /* IRQ base number of 88pm860x */
+ int num_leds;
+ int num_backlights;
+};
+
+extern int pm8606_osc_enable(struct pm860x_chip *, unsigned short);
+extern int pm8606_osc_disable(struct pm860x_chip *, unsigned short);
+
+extern int pm860x_reg_read(struct i2c_client *, int);
+extern int pm860x_reg_write(struct i2c_client *, int, unsigned char);
+extern int pm860x_bulk_read(struct i2c_client *, int, int, unsigned char *);
+extern int pm860x_bulk_write(struct i2c_client *, int, int, unsigned char *);
+extern int pm860x_set_bits(struct i2c_client *, int, unsigned char,
+ unsigned char);
+extern int pm860x_page_reg_read(struct i2c_client *, int);
+extern int pm860x_page_reg_write(struct i2c_client *, int, unsigned char);
+extern int pm860x_page_bulk_read(struct i2c_client *, int, int,
+ unsigned char *);
+extern int pm860x_page_bulk_write(struct i2c_client *, int, int,
+ unsigned char *);
+extern int pm860x_page_set_bits(struct i2c_client *, int, unsigned char,
+ unsigned char);
+
+#endif /* __LINUX_MFD_88PM860X_H */
diff --git a/include/linux/mfd/aat2870.h b/include/linux/mfd/aat2870.h
new file mode 100644
index 000000000..f7316c29b
--- /dev/null
+++ b/include/linux/mfd/aat2870.h
@@ -0,0 +1,181 @@
+/*
+ * linux/include/linux/mfd/aat2870.h
+ *
+ * Copyright (c) 2011, NVIDIA Corporation.
+ * Author: Jin Park <jinyoungp@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_MFD_AAT2870_H
+#define __LINUX_MFD_AAT2870_H
+
+#include <linux/debugfs.h>
+#include <linux/i2c.h>
+
+/* Register offsets */
+#define AAT2870_BL_CH_EN 0x00
+#define AAT2870_BLM 0x01
+#define AAT2870_BLS 0x02
+#define AAT2870_BL1 0x03
+#define AAT2870_BL2 0x04
+#define AAT2870_BL3 0x05
+#define AAT2870_BL4 0x06
+#define AAT2870_BL5 0x07
+#define AAT2870_BL6 0x08
+#define AAT2870_BL7 0x09
+#define AAT2870_BL8 0x0A
+#define AAT2870_FLR 0x0B
+#define AAT2870_FM 0x0C
+#define AAT2870_FS 0x0D
+#define AAT2870_ALS_CFG0 0x0E
+#define AAT2870_ALS_CFG1 0x0F
+#define AAT2870_ALS_CFG2 0x10
+#define AAT2870_AMB 0x11
+#define AAT2870_ALS0 0x12
+#define AAT2870_ALS1 0x13
+#define AAT2870_ALS2 0x14
+#define AAT2870_ALS3 0x15
+#define AAT2870_ALS4 0x16
+#define AAT2870_ALS5 0x17
+#define AAT2870_ALS6 0x18
+#define AAT2870_ALS7 0x19
+#define AAT2870_ALS8 0x1A
+#define AAT2870_ALS9 0x1B
+#define AAT2870_ALSA 0x1C
+#define AAT2870_ALSB 0x1D
+#define AAT2870_ALSC 0x1E
+#define AAT2870_ALSD 0x1F
+#define AAT2870_ALSE 0x20
+#define AAT2870_ALSF 0x21
+#define AAT2870_SUB_SET 0x22
+#define AAT2870_SUB_CTRL 0x23
+#define AAT2870_LDO_AB 0x24
+#define AAT2870_LDO_CD 0x25
+#define AAT2870_LDO_EN 0x26
+#define AAT2870_REG_NUM 0x27
+
+/* Device IDs */
+enum aat2870_id {
+ AAT2870_ID_BL,
+ AAT2870_ID_LDOA,
+ AAT2870_ID_LDOB,
+ AAT2870_ID_LDOC,
+ AAT2870_ID_LDOD
+};
+
+/* Backlight channels */
+#define AAT2870_BL_CH1 0x01
+#define AAT2870_BL_CH2 0x02
+#define AAT2870_BL_CH3 0x04
+#define AAT2870_BL_CH4 0x08
+#define AAT2870_BL_CH5 0x10
+#define AAT2870_BL_CH6 0x20
+#define AAT2870_BL_CH7 0x40
+#define AAT2870_BL_CH8 0x80
+#define AAT2870_BL_CH_ALL 0xFF
+
+/* Backlight current magnitude (mA) */
+enum aat2870_current {
+ AAT2870_CURRENT_0_45 = 1,
+ AAT2870_CURRENT_0_90,
+ AAT2870_CURRENT_1_80,
+ AAT2870_CURRENT_2_70,
+ AAT2870_CURRENT_3_60,
+ AAT2870_CURRENT_4_50,
+ AAT2870_CURRENT_5_40,
+ AAT2870_CURRENT_6_30,
+ AAT2870_CURRENT_7_20,
+ AAT2870_CURRENT_8_10,
+ AAT2870_CURRENT_9_00,
+ AAT2870_CURRENT_9_90,
+ AAT2870_CURRENT_10_8,
+ AAT2870_CURRENT_11_7,
+ AAT2870_CURRENT_12_6,
+ AAT2870_CURRENT_13_5,
+ AAT2870_CURRENT_14_4,
+ AAT2870_CURRENT_15_3,
+ AAT2870_CURRENT_16_2,
+ AAT2870_CURRENT_17_1,
+ AAT2870_CURRENT_18_0,
+ AAT2870_CURRENT_18_9,
+ AAT2870_CURRENT_19_8,
+ AAT2870_CURRENT_20_7,
+ AAT2870_CURRENT_21_6,
+ AAT2870_CURRENT_22_5,
+ AAT2870_CURRENT_23_4,
+ AAT2870_CURRENT_24_3,
+ AAT2870_CURRENT_25_2,
+ AAT2870_CURRENT_26_1,
+ AAT2870_CURRENT_27_0,
+ AAT2870_CURRENT_27_9
+};
+
+struct aat2870_register {
+ bool readable;
+ bool writeable;
+ u8 value;
+};
+
+struct aat2870_data {
+ struct device *dev;
+ struct i2c_client *client;
+
+ struct mutex io_lock;
+ struct aat2870_register *reg_cache; /* register cache */
+ int en_pin; /* enable GPIO pin (if < 0, ignore this value) */
+ bool is_enable;
+
+ /* init and uninit for platform specified */
+ int (*init)(struct aat2870_data *aat2870);
+ void (*uninit)(struct aat2870_data *aat2870);
+
+ /* i2c io funcntions */
+ int (*read)(struct aat2870_data *aat2870, u8 addr, u8 *val);
+ int (*write)(struct aat2870_data *aat2870, u8 addr, u8 val);
+ int (*update)(struct aat2870_data *aat2870, u8 addr, u8 mask, u8 val);
+
+ /* for debugfs */
+ struct dentry *dentry_root;
+ struct dentry *dentry_reg;
+};
+
+struct aat2870_subdev_info {
+ int id;
+ const char *name;
+ void *platform_data;
+};
+
+struct aat2870_platform_data {
+ int en_pin; /* enable GPIO pin (if < 0, ignore this value) */
+
+ struct aat2870_subdev_info *subdevs;
+ int num_subdevs;
+
+ /* init and uninit for platform specified */
+ int (*init)(struct aat2870_data *aat2870);
+ void (*uninit)(struct aat2870_data *aat2870);
+};
+
+struct aat2870_bl_platform_data {
+ /* backlight channels, default is AAT2870_BL_CH_ALL */
+ int channels;
+ /* backlight current magnitude, default is AAT2870_CURRENT_27_9 */
+ int max_current;
+ /* maximum brightness, default is 255 */
+ int max_brightness;
+};
+
+#endif /* __LINUX_MFD_AAT2870_H */
diff --git a/include/linux/mfd/ab3100.h b/include/linux/mfd/ab3100.h
new file mode 100644
index 000000000..afd3080bd
--- /dev/null
+++ b/include/linux/mfd/ab3100.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2007-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * AB3100 core access functions
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ *
+ */
+
+#include <linux/regulator/machine.h>
+
+struct device;
+
+#ifndef MFD_AB3100_H
+#define MFD_AB3100_H
+
+
+#define AB3100_P1A 0xc0
+#define AB3100_P1B 0xc1
+#define AB3100_P1C 0xc2
+#define AB3100_P1D 0xc3
+#define AB3100_P1E 0xc4
+#define AB3100_P1F 0xc5
+#define AB3100_P1G 0xc6
+#define AB3100_R2A 0xc7
+#define AB3100_R2B 0xc8
+
+/*
+ * AB3100, EVENTA1, A2 and A3 event register flags
+ * these are catenated into a single 32-bit flag in the code
+ * for event notification broadcasts.
+ */
+#define AB3100_EVENTA1_ONSWA (0x01<<16)
+#define AB3100_EVENTA1_ONSWB (0x02<<16)
+#define AB3100_EVENTA1_ONSWC (0x04<<16)
+#define AB3100_EVENTA1_DCIO (0x08<<16)
+#define AB3100_EVENTA1_OVER_TEMP (0x10<<16)
+#define AB3100_EVENTA1_SIM_OFF (0x20<<16)
+#define AB3100_EVENTA1_VBUS (0x40<<16)
+#define AB3100_EVENTA1_VSET_USB (0x80<<16)
+
+#define AB3100_EVENTA2_READY_TX (0x01<<8)
+#define AB3100_EVENTA2_READY_RX (0x02<<8)
+#define AB3100_EVENTA2_OVERRUN_ERROR (0x04<<8)
+#define AB3100_EVENTA2_FRAMING_ERROR (0x08<<8)
+#define AB3100_EVENTA2_CHARG_OVERCURRENT (0x10<<8)
+#define AB3100_EVENTA2_MIDR (0x20<<8)
+#define AB3100_EVENTA2_BATTERY_REM (0x40<<8)
+#define AB3100_EVENTA2_ALARM (0x80<<8)
+
+#define AB3100_EVENTA3_ADC_TRIG5 (0x01)
+#define AB3100_EVENTA3_ADC_TRIG4 (0x02)
+#define AB3100_EVENTA3_ADC_TRIG3 (0x04)
+#define AB3100_EVENTA3_ADC_TRIG2 (0x08)
+#define AB3100_EVENTA3_ADC_TRIGVBAT (0x10)
+#define AB3100_EVENTA3_ADC_TRIGVTX (0x20)
+#define AB3100_EVENTA3_ADC_TRIG1 (0x40)
+#define AB3100_EVENTA3_ADC_TRIG0 (0x80)
+
+/* AB3100, STR register flags */
+#define AB3100_STR_ONSWA (0x01)
+#define AB3100_STR_ONSWB (0x02)
+#define AB3100_STR_ONSWC (0x04)
+#define AB3100_STR_DCIO (0x08)
+#define AB3100_STR_BOOT_MODE (0x10)
+#define AB3100_STR_SIM_OFF (0x20)
+#define AB3100_STR_BATT_REMOVAL (0x40)
+#define AB3100_STR_VBUS (0x80)
+
+/*
+ * AB3100 contains 8 regulators, one external regulator controller
+ * and a buck converter, further the LDO E and buck converter can
+ * have separate settings if they are in sleep mode, this is
+ * modeled as a separate regulator.
+ */
+#define AB3100_NUM_REGULATORS 10
+
+/**
+ * struct ab3100
+ * @access_mutex: lock out concurrent accesses to the AB3100 registers
+ * @dev: pointer to the containing device
+ * @i2c_client: I2C client for this chip
+ * @testreg_client: secondary client for test registers
+ * @chip_name: name of this chip variant
+ * @chip_id: 8 bit chip ID for this chip variant
+ * @event_subscribers: event subscribers are listed here
+ * @startup_events: a copy of the first reading of the event registers
+ * @startup_events_read: whether the first events have been read
+ *
+ * This struct is PRIVATE and devices using it should NOT
+ * access ANY fields. It is used as a token for calling the
+ * AB3100 functions.
+ */
+struct ab3100 {
+ struct mutex access_mutex;
+ struct device *dev;
+ struct i2c_client *i2c_client;
+ struct i2c_client *testreg_client;
+ char chip_name[32];
+ u8 chip_id;
+ struct blocking_notifier_head event_subscribers;
+ u8 startup_events[3];
+ bool startup_events_read;
+};
+
+/**
+ * struct ab3100_platform_data
+ * Data supplied to initialize board connections to the AB3100
+ * @reg_constraints: regulator constraints for target board
+ * the order of these constraints are: LDO A, C, D, E,
+ * F, G, H, K, EXT and BUCK.
+ * @reg_initvals: initial values for the regulator registers
+ * plus two sleep settings for LDO E and the BUCK converter.
+ * exactly AB3100_NUM_REGULATORS+2 values must be sent in.
+ * Order: LDO A, C, E, E sleep, F, G, H, K, EXT, BUCK,
+ * BUCK sleep, LDO D. (LDO D need to be initialized last.)
+ * @external_voltage: voltage level of the external regulator.
+ */
+struct ab3100_platform_data {
+ struct regulator_init_data reg_constraints[AB3100_NUM_REGULATORS];
+ u8 reg_initvals[AB3100_NUM_REGULATORS+2];
+ int external_voltage;
+};
+
+int ab3100_event_register(struct ab3100 *ab3100,
+ struct notifier_block *nb);
+int ab3100_event_unregister(struct ab3100 *ab3100,
+ struct notifier_block *nb);
+
+#endif /* MFD_AB3100_H */
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
new file mode 100644
index 000000000..552cc1d61
--- /dev/null
+++ b/include/linux/mfd/abx500.h
@@ -0,0 +1,348 @@
+/*
+ * Copyright (C) 2007-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * ABX500 core access functions.
+ * The abx500 interface is used for the Analog Baseband chips.
+ *
+ * Author: Mattias Wallin <mattias.wallin@stericsson.com>
+ * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com>
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>
+ */
+
+#include <linux/regulator/machine.h>
+
+struct device;
+
+#ifndef MFD_ABX500_H
+#define MFD_ABX500_H
+
+/**
+ * struct abx500_init_setting
+ * Initial value of the registers for driver to use during setup.
+ */
+struct abx500_init_settings {
+ u8 bank;
+ u8 reg;
+ u8 setting;
+};
+
+/* Battery driver related data */
+/*
+ * ADC for the battery thermistor.
+ * When using the ABx500_ADC_THERM_BATCTRL the battery ID resistor is combined
+ * with a NTC resistor to both identify the battery and to measure its
+ * temperature. Different phone manufactures uses different techniques to both
+ * identify the battery and to read its temperature.
+ */
+enum abx500_adc_therm {
+ ABx500_ADC_THERM_BATCTRL,
+ ABx500_ADC_THERM_BATTEMP,
+};
+
+/**
+ * struct abx500_res_to_temp - defines one point in a temp to res curve. To
+ * be used in battery packs that combines the identification resistor with a
+ * NTC resistor.
+ * @temp: battery pack temperature in Celcius
+ * @resist: NTC resistor net total resistance
+ */
+struct abx500_res_to_temp {
+ int temp;
+ int resist;
+};
+
+/**
+ * struct abx500_v_to_cap - Table for translating voltage to capacity
+ * @voltage: Voltage in mV
+ * @capacity: Capacity in percent
+ */
+struct abx500_v_to_cap {
+ int voltage;
+ int capacity;
+};
+
+/* Forward declaration */
+struct abx500_fg;
+
+/**
+ * struct abx500_fg_parameters - Fuel gauge algorithm parameters, in seconds
+ * if not specified
+ * @recovery_sleep_timer: Time between measurements while recovering
+ * @recovery_total_time: Total recovery time
+ * @init_timer: Measurement interval during startup
+ * @init_discard_time: Time we discard voltage measurement at startup
+ * @init_total_time: Total init time during startup
+ * @high_curr_time: Time current has to be high to go to recovery
+ * @accu_charging: FG accumulation time while charging
+ * @accu_high_curr: FG accumulation time in high current mode
+ * @high_curr_threshold: High current threshold, in mA
+ * @lowbat_threshold: Low battery threshold, in mV
+ * @overbat_threshold: Over battery threshold, in mV
+ * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0
+ * Resolution in 50 mV step.
+ * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1
+ * Resolution in 50 mV step.
+ * @user_cap_limit Capacity reported from user must be within this
+ * limit to be considered as sane, in percentage
+ * points.
+ * @maint_thres This is the threshold where we stop reporting
+ * battery full while in maintenance, in per cent
+ * @pcut_enable: Enable power cut feature in ab8505
+ * @pcut_max_time: Max time threshold
+ * @pcut_flag_time: Flagtime threshold
+ * @pcut_max_restart: Max number of restarts
+ * @pcut_debounce_time: Sets battery debounce time
+ */
+struct abx500_fg_parameters {
+ int recovery_sleep_timer;
+ int recovery_total_time;
+ int init_timer;
+ int init_discard_time;
+ int init_total_time;
+ int high_curr_time;
+ int accu_charging;
+ int accu_high_curr;
+ int high_curr_threshold;
+ int lowbat_threshold;
+ int overbat_threshold;
+ int battok_falling_th_sel0;
+ int battok_raising_th_sel1;
+ int user_cap_limit;
+ int maint_thres;
+ bool pcut_enable;
+ u8 pcut_max_time;
+ u8 pcut_flag_time;
+ u8 pcut_max_restart;
+ u8 pcut_debounce_time;
+};
+
+/**
+ * struct abx500_charger_maximization - struct used by the board config.
+ * @use_maxi: Enable maximization for this battery type
+ * @maxi_chg_curr: Maximum charger current allowed
+ * @maxi_wait_cycles: cycles to wait before setting charger current
+ * @charger_curr_step delta between two charger current settings (mA)
+ */
+struct abx500_maxim_parameters {
+ bool ena_maxi;
+ int chg_curr;
+ int wait_cycles;
+ int charger_curr_step;
+};
+
+/**
+ * struct abx500_battery_type - different batteries supported
+ * @name: battery technology
+ * @resis_high: battery upper resistance limit
+ * @resis_low: battery lower resistance limit
+ * @charge_full_design: Maximum battery capacity in mAh
+ * @nominal_voltage: Nominal voltage of the battery in mV
+ * @termination_vol: max voltage upto which battery can be charged
+ * @termination_curr battery charging termination current in mA
+ * @recharge_cap battery capacity limit that will trigger a new
+ * full charging cycle in the case where maintenan-
+ * -ce charging has been disabled
+ * @normal_cur_lvl: charger current in normal state in mA
+ * @normal_vol_lvl: charger voltage in normal state in mV
+ * @maint_a_cur_lvl: charger current in maintenance A state in mA
+ * @maint_a_vol_lvl: charger voltage in maintenance A state in mV
+ * @maint_a_chg_timer_h: charge time in maintenance A state
+ * @maint_b_cur_lvl: charger current in maintenance B state in mA
+ * @maint_b_vol_lvl: charger voltage in maintenance B state in mV
+ * @maint_b_chg_timer_h: charge time in maintenance B state
+ * @low_high_cur_lvl: charger current in temp low/high state in mA
+ * @low_high_vol_lvl: charger voltage in temp low/high state in mV'
+ * @battery_resistance: battery inner resistance in mOhm.
+ * @n_r_t_tbl_elements: number of elements in r_to_t_tbl
+ * @r_to_t_tbl: table containing resistance to temp points
+ * @n_v_cap_tbl_elements: number of elements in v_to_cap_tbl
+ * @v_to_cap_tbl: Voltage to capacity (in %) table
+ * @n_batres_tbl_elements number of elements in the batres_tbl
+ * @batres_tbl battery internal resistance vs temperature table
+ */
+struct abx500_battery_type {
+ int name;
+ int resis_high;
+ int resis_low;
+ int charge_full_design;
+ int nominal_voltage;
+ int termination_vol;
+ int termination_curr;
+ int recharge_cap;
+ int normal_cur_lvl;
+ int normal_vol_lvl;
+ int maint_a_cur_lvl;
+ int maint_a_vol_lvl;
+ int maint_a_chg_timer_h;
+ int maint_b_cur_lvl;
+ int maint_b_vol_lvl;
+ int maint_b_chg_timer_h;
+ int low_high_cur_lvl;
+ int low_high_vol_lvl;
+ int battery_resistance;
+ int n_temp_tbl_elements;
+ const struct abx500_res_to_temp *r_to_t_tbl;
+ int n_v_cap_tbl_elements;
+ const struct abx500_v_to_cap *v_to_cap_tbl;
+ int n_batres_tbl_elements;
+ const struct batres_vs_temp *batres_tbl;
+};
+
+/**
+ * struct abx500_bm_capacity_levels - abx500 capacity level data
+ * @critical: critical capacity level in percent
+ * @low: low capacity level in percent
+ * @normal: normal capacity level in percent
+ * @high: high capacity level in percent
+ * @full: full capacity level in percent
+ */
+struct abx500_bm_capacity_levels {
+ int critical;
+ int low;
+ int normal;
+ int high;
+ int full;
+};
+
+/**
+ * struct abx500_bm_charger_parameters - Charger specific parameters
+ * @usb_volt_max: maximum allowed USB charger voltage in mV
+ * @usb_curr_max: maximum allowed USB charger current in mA
+ * @ac_volt_max: maximum allowed AC charger voltage in mV
+ * @ac_curr_max: maximum allowed AC charger current in mA
+ */
+struct abx500_bm_charger_parameters {
+ int usb_volt_max;
+ int usb_curr_max;
+ int ac_volt_max;
+ int ac_curr_max;
+};
+
+/**
+ * struct abx500_bm_data - abx500 battery management data
+ * @temp_under under this temp, charging is stopped
+ * @temp_low between this temp and temp_under charging is reduced
+ * @temp_high between this temp and temp_over charging is reduced
+ * @temp_over over this temp, charging is stopped
+ * @temp_now present battery temperature
+ * @temp_interval_chg temperature measurement interval in s when charging
+ * @temp_interval_nochg temperature measurement interval in s when not charging
+ * @main_safety_tmr_h safety timer for main charger
+ * @usb_safety_tmr_h safety timer for usb charger
+ * @bkup_bat_v voltage which we charge the backup battery with
+ * @bkup_bat_i current which we charge the backup battery with
+ * @no_maintenance indicates that maintenance charging is disabled
+ * @capacity_scaling indicates whether capacity scaling is to be used
+ * @abx500_adc_therm placement of thermistor, batctrl or battemp adc
+ * @chg_unknown_bat flag to enable charging of unknown batteries
+ * @enable_overshoot flag to enable VBAT overshoot control
+ * @auto_trig flag to enable auto adc trigger
+ * @fg_res resistance of FG resistor in 0.1mOhm
+ * @n_btypes number of elements in array bat_type
+ * @batt_id index of the identified battery in array bat_type
+ * @interval_charging charge alg cycle period time when charging (sec)
+ * @interval_not_charging charge alg cycle period time when not charging (sec)
+ * @temp_hysteresis temperature hysteresis
+ * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm)
+ * @n_chg_out_curr number of elements in array chg_output_curr
+ * @n_chg_in_curr number of elements in array chg_input_curr
+ * @chg_output_curr charger output current level map
+ * @chg_input_curr charger input current level map
+ * @maxi maximization parameters
+ * @cap_levels capacity in percent for the different capacity levels
+ * @bat_type table of supported battery types
+ * @chg_params charger parameters
+ * @fg_params fuel gauge parameters
+ */
+struct abx500_bm_data {
+ int temp_under;
+ int temp_low;
+ int temp_high;
+ int temp_over;
+ int temp_now;
+ int temp_interval_chg;
+ int temp_interval_nochg;
+ int main_safety_tmr_h;
+ int usb_safety_tmr_h;
+ int bkup_bat_v;
+ int bkup_bat_i;
+ bool autopower_cfg;
+ bool ac_enabled;
+ bool usb_enabled;
+ bool usb_power_path;
+ bool no_maintenance;
+ bool capacity_scaling;
+ bool chg_unknown_bat;
+ bool enable_overshoot;
+ bool auto_trig;
+ enum abx500_adc_therm adc_therm;
+ int fg_res;
+ int n_btypes;
+ int batt_id;
+ int interval_charging;
+ int interval_not_charging;
+ int temp_hysteresis;
+ int gnd_lift_resistance;
+ int n_chg_out_curr;
+ int n_chg_in_curr;
+ int *chg_output_curr;
+ int *chg_input_curr;
+ const struct abx500_maxim_parameters *maxi;
+ const struct abx500_bm_capacity_levels *cap_levels;
+ struct abx500_battery_type *bat_type;
+ const struct abx500_bm_charger_parameters *chg_params;
+ const struct abx500_fg_parameters *fg_params;
+};
+
+enum {
+ NTC_EXTERNAL = 0,
+ NTC_INTERNAL,
+};
+
+int ab8500_bm_of_probe(struct device *dev,
+ struct device_node *np,
+ struct abx500_bm_data *bm);
+
+int abx500_set_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 value);
+int abx500_get_register_interruptible(struct device *dev, u8 bank, u8 reg,
+ u8 *value);
+int abx500_get_register_page_interruptible(struct device *dev, u8 bank,
+ u8 first_reg, u8 *regvals, u8 numregs);
+int abx500_set_register_page_interruptible(struct device *dev, u8 bank,
+ u8 first_reg, u8 *regvals, u8 numregs);
+/**
+ * abx500_mask_and_set_register_inerruptible() - Modifies selected bits of a
+ * target register
+ *
+ * @dev: The AB sub device.
+ * @bank: The i2c bank number.
+ * @bitmask: The bit mask to use.
+ * @bitvalues: The new bit values.
+ *
+ * Updates the value of an AB register:
+ * value -> ((value & ~bitmask) | (bitvalues & bitmask))
+ */
+int abx500_mask_and_set_register_interruptible(struct device *dev, u8 bank,
+ u8 reg, u8 bitmask, u8 bitvalues);
+int abx500_get_chip_id(struct device *dev);
+int abx500_event_registers_startup_state_get(struct device *dev, u8 *event);
+int abx500_startup_irq_enabled(struct device *dev, unsigned int irq);
+
+struct abx500_ops {
+ int (*get_chip_id) (struct device *);
+ int (*get_register) (struct device *, u8, u8, u8 *);
+ int (*set_register) (struct device *, u8, u8, u8);
+ int (*get_register_page) (struct device *, u8, u8, u8 *, u8);
+ int (*set_register_page) (struct device *, u8, u8, u8 *, u8);
+ int (*mask_and_set_register) (struct device *, u8, u8, u8, u8);
+ int (*event_registers_startup_state_get) (struct device *, u8 *);
+ int (*startup_irq_enabled) (struct device *, unsigned int);
+ void (*dump_all_banks) (struct device *);
+};
+
+int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
+void abx500_remove_ops(struct device *dev);
+#endif
diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h
new file mode 100644
index 000000000..12a5b3969
--- /dev/null
+++ b/include/linux/mfd/abx500/ab8500-bm.h
@@ -0,0 +1,478 @@
+/*
+ * Copyright ST-Ericsson 2012.
+ *
+ * Author: Arun Murthy <arun.murthy@stericsson.com>
+ * Licensed under GPLv2.
+ */
+
+#ifndef _AB8500_BM_H
+#define _AB8500_BM_H
+
+#include <linux/kernel.h>
+#include <linux/mfd/abx500.h>
+
+/*
+ * System control 2 register offsets.
+ * bank = 0x02
+ */
+#define AB8500_MAIN_WDOG_CTRL_REG 0x01
+#define AB8500_LOW_BAT_REG 0x03
+#define AB8500_BATT_OK_REG 0x04
+/*
+ * USB/ULPI register offsets
+ * Bank : 0x5
+ */
+#define AB8500_USB_LINE_STAT_REG 0x80
+#define AB8500_USB_LINE_CTRL2_REG 0x82
+#define AB8500_USB_LINK1_STAT_REG 0x94
+
+/*
+ * Charger / status register offfsets
+ * Bank : 0x0B
+ */
+#define AB8500_CH_STATUS1_REG 0x00
+#define AB8500_CH_STATUS2_REG 0x01
+#define AB8500_CH_USBCH_STAT1_REG 0x02
+#define AB8500_CH_USBCH_STAT2_REG 0x03
+#define AB8540_CH_USBCH_STAT3_REG 0x04
+#define AB8500_CH_STAT_REG 0x05
+
+/*
+ * Charger / control register offfsets
+ * Bank : 0x0B
+ */
+#define AB8500_CH_VOLT_LVL_REG 0x40
+#define AB8500_CH_VOLT_LVL_MAX_REG 0x41 /*Only in Cut2.0*/
+#define AB8500_CH_OPT_CRNTLVL_REG 0x42
+#define AB8500_CH_OPT_CRNTLVL_MAX_REG 0x43 /*Only in Cut2.0*/
+#define AB8500_CH_WD_TIMER_REG 0x50
+#define AB8500_CHARG_WD_CTRL 0x51
+#define AB8500_BTEMP_HIGH_TH 0x52
+#define AB8500_LED_INDICATOR_PWM_CTRL 0x53
+#define AB8500_LED_INDICATOR_PWM_DUTY 0x54
+#define AB8500_BATT_OVV 0x55
+#define AB8500_CHARGER_CTRL 0x56
+#define AB8500_BAT_CTRL_CURRENT_SOURCE 0x60 /*Only in Cut2.0*/
+
+/*
+ * Charger / main control register offsets
+ * Bank : 0x0B
+ */
+#define AB8500_MCH_CTRL1 0x80
+#define AB8500_MCH_CTRL2 0x81
+#define AB8500_MCH_IPT_CURLVL_REG 0x82
+#define AB8500_CH_WD_REG 0x83
+
+/*
+ * Charger / USB control register offsets
+ * Bank : 0x0B
+ */
+#define AB8500_USBCH_CTRL1_REG 0xC0
+#define AB8500_USBCH_CTRL2_REG 0xC1
+#define AB8500_USBCH_IPT_CRNTLVL_REG 0xC2
+#define AB8540_USB_PP_MODE_REG 0xC5
+#define AB8540_USB_PP_CHR_REG 0xC6
+
+/*
+ * Gas Gauge register offsets
+ * Bank : 0x0C
+ */
+#define AB8500_GASG_CC_CTRL_REG 0x00
+#define AB8500_GASG_CC_ACCU1_REG 0x01
+#define AB8500_GASG_CC_ACCU2_REG 0x02
+#define AB8500_GASG_CC_ACCU3_REG 0x03
+#define AB8500_GASG_CC_ACCU4_REG 0x04
+#define AB8500_GASG_CC_SMPL_CNTRL_REG 0x05
+#define AB8500_GASG_CC_SMPL_CNTRH_REG 0x06
+#define AB8500_GASG_CC_SMPL_CNVL_REG 0x07
+#define AB8500_GASG_CC_SMPL_CNVH_REG 0x08
+#define AB8500_GASG_CC_CNTR_AVGOFF_REG 0x09
+#define AB8500_GASG_CC_OFFSET_REG 0x0A
+#define AB8500_GASG_CC_NCOV_ACCU 0x10
+#define AB8500_GASG_CC_NCOV_ACCU_CTRL 0x11
+#define AB8500_GASG_CC_NCOV_ACCU_LOW 0x12
+#define AB8500_GASG_CC_NCOV_ACCU_MED 0x13
+#define AB8500_GASG_CC_NCOV_ACCU_HIGH 0x14
+
+/*
+ * Interrupt register offsets
+ * Bank : 0x0E
+ */
+#define AB8500_IT_SOURCE2_REG 0x01
+#define AB8500_IT_SOURCE21_REG 0x14
+
+/*
+ * RTC register offsets
+ * Bank: 0x0F
+ */
+#define AB8500_RTC_BACKUP_CHG_REG 0x0C
+#define AB8500_RTC_CC_CONF_REG 0x01
+#define AB8500_RTC_CTRL_REG 0x0B
+#define AB8500_RTC_CTRL1_REG 0x11
+
+/*
+ * OTP register offsets
+ * Bank : 0x15
+ */
+#define AB8500_OTP_CONF_15 0x0E
+
+/* GPADC constants from AB8500 spec, UM0836 */
+#define ADC_RESOLUTION 1024
+#define ADC_CH_MAIN_MIN 0
+#define ADC_CH_MAIN_MAX 20030
+#define ADC_CH_VBUS_MIN 0
+#define ADC_CH_VBUS_MAX 20030
+#define ADC_CH_VBAT_MIN 2300
+#define ADC_CH_VBAT_MAX 4800
+#define ADC_CH_BKBAT_MIN 0
+#define ADC_CH_BKBAT_MAX 3200
+
+/* Main charge i/p current */
+#define MAIN_CH_IP_CUR_0P9A 0x80
+#define MAIN_CH_IP_CUR_1P0A 0x90
+#define MAIN_CH_IP_CUR_1P1A 0xA0
+#define MAIN_CH_IP_CUR_1P2A 0xB0
+#define MAIN_CH_IP_CUR_1P3A 0xC0
+#define MAIN_CH_IP_CUR_1P4A 0xD0
+#define MAIN_CH_IP_CUR_1P5A 0xE0
+
+/* ChVoltLevel */
+#define CH_VOL_LVL_3P5 0x00
+#define CH_VOL_LVL_4P0 0x14
+#define CH_VOL_LVL_4P05 0x16
+#define CH_VOL_LVL_4P1 0x1B
+#define CH_VOL_LVL_4P15 0x20
+#define CH_VOL_LVL_4P2 0x25
+#define CH_VOL_LVL_4P6 0x4D
+
+/* ChOutputCurrentLevel */
+#define CH_OP_CUR_LVL_0P1 0x00
+#define CH_OP_CUR_LVL_0P2 0x01
+#define CH_OP_CUR_LVL_0P3 0x02
+#define CH_OP_CUR_LVL_0P4 0x03
+#define CH_OP_CUR_LVL_0P5 0x04
+#define CH_OP_CUR_LVL_0P6 0x05
+#define CH_OP_CUR_LVL_0P7 0x06
+#define CH_OP_CUR_LVL_0P8 0x07
+#define CH_OP_CUR_LVL_0P9 0x08
+#define CH_OP_CUR_LVL_1P4 0x0D
+#define CH_OP_CUR_LVL_1P5 0x0E
+#define CH_OP_CUR_LVL_1P6 0x0F
+#define CH_OP_CUR_LVL_2P 0x3F
+
+/* BTEMP High thermal limits */
+#define BTEMP_HIGH_TH_57_0 0x00
+#define BTEMP_HIGH_TH_52 0x01
+#define BTEMP_HIGH_TH_57_1 0x02
+#define BTEMP_HIGH_TH_62 0x03
+
+/* current is mA */
+#define USB_0P1A 100
+#define USB_0P2A 200
+#define USB_0P3A 300
+#define USB_0P4A 400
+#define USB_0P5A 500
+
+#define LOW_BAT_3P1V 0x20
+#define LOW_BAT_2P3V 0x00
+#define LOW_BAT_RESET 0x01
+#define LOW_BAT_ENABLE 0x01
+
+/* Backup battery constants */
+#define BUP_ICH_SEL_50UA 0x00
+#define BUP_ICH_SEL_150UA 0x04
+#define BUP_ICH_SEL_300UA 0x08
+#define BUP_ICH_SEL_700UA 0x0C
+
+enum bup_vch_sel {
+ BUP_VCH_SEL_2P5V,
+ BUP_VCH_SEL_2P6V,
+ BUP_VCH_SEL_2P8V,
+ BUP_VCH_SEL_3P1V,
+ /*
+ * Note that the following 5 values 2.7v, 2.9v, 3.0v, 3.2v, 3.3v
+ * are only available on ab8540. You can't choose these 5
+ * voltage on ab8500/ab8505/ab9540.
+ */
+ BUP_VCH_SEL_2P7V,
+ BUP_VCH_SEL_2P9V,
+ BUP_VCH_SEL_3P0V,
+ BUP_VCH_SEL_3P2V,
+ BUP_VCH_SEL_3P3V,
+};
+
+#define BUP_VCH_RANGE 0x02
+#define VBUP33_VRTCN 0x01
+
+/* Battery OVV constants */
+#define BATT_OVV_ENA 0x02
+#define BATT_OVV_TH_3P7 0x00
+#define BATT_OVV_TH_4P75 0x01
+
+/* A value to indicate over voltage */
+#define BATT_OVV_VALUE 4750
+
+/* VBUS OVV constants */
+#define VBUS_OVV_SELECT_MASK 0x78
+#define VBUS_OVV_SELECT_5P6V 0x00
+#define VBUS_OVV_SELECT_5P7V 0x08
+#define VBUS_OVV_SELECT_5P8V 0x10
+#define VBUS_OVV_SELECT_5P9V 0x18
+#define VBUS_OVV_SELECT_6P0V 0x20
+#define VBUS_OVV_SELECT_6P1V 0x28
+#define VBUS_OVV_SELECT_6P2V 0x30
+#define VBUS_OVV_SELECT_6P3V 0x38
+
+#define VBUS_AUTO_IN_CURR_LIM_ENA 0x04
+
+/* Fuel Gauge constants */
+#define RESET_ACCU 0x02
+#define READ_REQ 0x01
+#define CC_DEEP_SLEEP_ENA 0x02
+#define CC_PWR_UP_ENA 0x01
+#define CC_SAMPLES_40 0x28
+#define RD_NCONV_ACCU_REQ 0x01
+#define CC_CALIB 0x08
+#define CC_INTAVGOFFSET_ENA 0x10
+#define CC_MUXOFFSET 0x80
+#define CC_INT_CAL_N_AVG_MASK 0x60
+#define CC_INT_CAL_SAMPLES_16 0x40
+#define CC_INT_CAL_SAMPLES_8 0x20
+#define CC_INT_CAL_SAMPLES_4 0x00
+
+/* RTC constants */
+#define RTC_BUP_CH_ENA 0x10
+
+/* BatCtrl Current Source Constants */
+#define BAT_CTRL_7U_ENA 0x01
+#define BAT_CTRL_20U_ENA 0x02
+#define BAT_CTRL_18U_ENA 0x01
+#define BAT_CTRL_16U_ENA 0x02
+#define BAT_CTRL_60U_ENA 0x01
+#define BAT_CTRL_120U_ENA 0x02
+#define BAT_CTRL_CMP_ENA 0x04
+#define FORCE_BAT_CTRL_CMP_HIGH 0x08
+#define BAT_CTRL_PULL_UP_ENA 0x10
+
+/* Battery type */
+#define BATTERY_UNKNOWN 00
+
+/* Registers for pcut feature in ab8505 and ab9540 */
+#define AB8505_RTC_PCUT_CTL_STATUS_REG 0x12
+#define AB8505_RTC_PCUT_TIME_REG 0x13
+#define AB8505_RTC_PCUT_MAX_TIME_REG 0x14
+#define AB8505_RTC_PCUT_FLAG_TIME_REG 0x15
+#define AB8505_RTC_PCUT_RESTART_REG 0x16
+#define AB8505_RTC_PCUT_DEBOUNCE_REG 0x17
+
+/* USB Power Path constants for ab8540 */
+#define BUS_VSYS_VOL_SELECT_MASK 0x06
+#define BUS_VSYS_VOL_SELECT_3P6V 0x00
+#define BUS_VSYS_VOL_SELECT_3P325V 0x02
+#define BUS_VSYS_VOL_SELECT_3P9V 0x04
+#define BUS_VSYS_VOL_SELECT_4P3V 0x06
+#define BUS_POWER_PATH_MODE_ENA 0x01
+#define BUS_PP_PRECHG_CURRENT_MASK 0x0E
+#define BUS_POWER_PATH_PRECHG_ENA 0x01
+
+/**
+ * struct res_to_temp - defines one point in a temp to res curve. To
+ * be used in battery packs that combines the identification resistor with a
+ * NTC resistor.
+ * @temp: battery pack temperature in Celcius
+ * @resist: NTC resistor net total resistance
+ */
+struct res_to_temp {
+ int temp;
+ int resist;
+};
+
+/**
+ * struct batres_vs_temp - defines one point in a temp vs battery internal
+ * resistance curve.
+ * @temp: battery pack temperature in Celcius
+ * @resist: battery internal reistance in mOhm
+ */
+struct batres_vs_temp {
+ int temp;
+ int resist;
+};
+
+/* Forward declaration */
+struct ab8500_fg;
+
+/**
+ * struct ab8500_fg_parameters - Fuel gauge algorithm parameters, in seconds
+ * if not specified
+ * @recovery_sleep_timer: Time between measurements while recovering
+ * @recovery_total_time: Total recovery time
+ * @init_timer: Measurement interval during startup
+ * @init_discard_time: Time we discard voltage measurement at startup
+ * @init_total_time: Total init time during startup
+ * @high_curr_time: Time current has to be high to go to recovery
+ * @accu_charging: FG accumulation time while charging
+ * @accu_high_curr: FG accumulation time in high current mode
+ * @high_curr_threshold: High current threshold, in mA
+ * @lowbat_threshold: Low battery threshold, in mV
+ * @battok_falling_th_sel0 Threshold in mV for battOk signal sel0
+ * Resolution in 50 mV step.
+ * @battok_raising_th_sel1 Threshold in mV for battOk signal sel1
+ * Resolution in 50 mV step.
+ * @user_cap_limit Capacity reported from user must be within this
+ * limit to be considered as sane, in percentage
+ * points.
+ * @maint_thres This is the threshold where we stop reporting
+ * battery full while in maintenance, in per cent
+ * @pcut_enable: Enable power cut feature in ab8505
+ * @pcut_max_time: Max time threshold
+ * @pcut_flag_time: Flagtime threshold
+ * @pcut_max_restart: Max number of restarts
+ * @pcut_debunce_time: Sets battery debounce time
+ */
+struct ab8500_fg_parameters {
+ int recovery_sleep_timer;
+ int recovery_total_time;
+ int init_timer;
+ int init_discard_time;
+ int init_total_time;
+ int high_curr_time;
+ int accu_charging;
+ int accu_high_curr;
+ int high_curr_threshold;
+ int lowbat_threshold;
+ int battok_falling_th_sel0;
+ int battok_raising_th_sel1;
+ int user_cap_limit;
+ int maint_thres;
+ bool pcut_enable;
+ u8 pcut_max_time;
+ u8 pcut_flag_time;
+ u8 pcut_max_restart;
+ u8 pcut_debunce_time;
+};
+
+/**
+ * struct ab8500_charger_maximization - struct used by the board config.
+ * @use_maxi: Enable maximization for this battery type
+ * @maxi_chg_curr: Maximum charger current allowed
+ * @maxi_wait_cycles: cycles to wait before setting charger current
+ * @charger_curr_step delta between two charger current settings (mA)
+ */
+struct ab8500_maxim_parameters {
+ bool ena_maxi;
+ int chg_curr;
+ int wait_cycles;
+ int charger_curr_step;
+};
+
+/**
+ * struct ab8500_bm_capacity_levels - ab8500 capacity level data
+ * @critical: critical capacity level in percent
+ * @low: low capacity level in percent
+ * @normal: normal capacity level in percent
+ * @high: high capacity level in percent
+ * @full: full capacity level in percent
+ */
+struct ab8500_bm_capacity_levels {
+ int critical;
+ int low;
+ int normal;
+ int high;
+ int full;
+};
+
+/**
+ * struct ab8500_bm_charger_parameters - Charger specific parameters
+ * @usb_volt_max: maximum allowed USB charger voltage in mV
+ * @usb_curr_max: maximum allowed USB charger current in mA
+ * @ac_volt_max: maximum allowed AC charger voltage in mV
+ * @ac_curr_max: maximum allowed AC charger current in mA
+ */
+struct ab8500_bm_charger_parameters {
+ int usb_volt_max;
+ int usb_curr_max;
+ int ac_volt_max;
+ int ac_curr_max;
+};
+
+/**
+ * struct ab8500_bm_data - ab8500 battery management data
+ * @temp_under under this temp, charging is stopped
+ * @temp_low between this temp and temp_under charging is reduced
+ * @temp_high between this temp and temp_over charging is reduced
+ * @temp_over over this temp, charging is stopped
+ * @temp_interval_chg temperature measurement interval in s when charging
+ * @temp_interval_nochg temperature measurement interval in s when not charging
+ * @main_safety_tmr_h safety timer for main charger
+ * @usb_safety_tmr_h safety timer for usb charger
+ * @bkup_bat_v voltage which we charge the backup battery with
+ * @bkup_bat_i current which we charge the backup battery with
+ * @no_maintenance indicates that maintenance charging is disabled
+ * @capacity_scaling indicates whether capacity scaling is to be used
+ * @adc_therm placement of thermistor, batctrl or battemp adc
+ * @chg_unknown_bat flag to enable charging of unknown batteries
+ * @enable_overshoot flag to enable VBAT overshoot control
+ * @fg_res resistance of FG resistor in 0.1mOhm
+ * @n_btypes number of elements in array bat_type
+ * @batt_id index of the identified battery in array bat_type
+ * @interval_charging charge alg cycle period time when charging (sec)
+ * @interval_not_charging charge alg cycle period time when not charging (sec)
+ * @temp_hysteresis temperature hysteresis
+ * @gnd_lift_resistance Battery ground to phone ground resistance (mOhm)
+ * @maxi: maximization parameters
+ * @cap_levels capacity in percent for the different capacity levels
+ * @bat_type table of supported battery types
+ * @chg_params charger parameters
+ * @fg_params fuel gauge parameters
+ */
+struct ab8500_bm_data {
+ int temp_under;
+ int temp_low;
+ int temp_high;
+ int temp_over;
+ int temp_interval_chg;
+ int temp_interval_nochg;
+ int main_safety_tmr_h;
+ int usb_safety_tmr_h;
+ int bkup_bat_v;
+ int bkup_bat_i;
+ bool no_maintenance;
+ bool capacity_scaling;
+ bool chg_unknown_bat;
+ bool enable_overshoot;
+ enum abx500_adc_therm adc_therm;
+ int fg_res;
+ int n_btypes;
+ int batt_id;
+ int interval_charging;
+ int interval_not_charging;
+ int temp_hysteresis;
+ int gnd_lift_resistance;
+ const struct ab8500_maxim_parameters *maxi;
+ const struct ab8500_bm_capacity_levels *cap_levels;
+ const struct ab8500_bm_charger_parameters *chg_params;
+ const struct ab8500_fg_parameters *fg_params;
+};
+
+struct ab8500_btemp;
+struct ab8500_gpadc;
+struct ab8500_fg;
+
+#ifdef CONFIG_AB8500_BM
+extern struct abx500_bm_data ab8500_bm_data;
+
+void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);
+struct ab8500_btemp *ab8500_btemp_get(void);
+int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp);
+int ab8500_btemp_get_temp(struct ab8500_btemp *btemp);
+struct ab8500_fg *ab8500_fg_get(void);
+int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev);
+int ab8500_fg_inst_curr_start(struct ab8500_fg *di);
+int ab8500_fg_inst_curr_finalize(struct ab8500_fg *di, int *res);
+int ab8500_fg_inst_curr_started(struct ab8500_fg *di);
+int ab8500_fg_inst_curr_done(struct ab8500_fg *di);
+
+#else
+static struct abx500_bm_data ab8500_bm_data;
+#endif
+#endif /* _AB8500_BM_H */
diff --git a/include/linux/mfd/abx500/ab8500-codec.h b/include/linux/mfd/abx500/ab8500-codec.h
new file mode 100644
index 000000000..d7079413d
--- /dev/null
+++ b/include/linux/mfd/abx500/ab8500-codec.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef AB8500_CORE_CODEC_H
+#define AB8500_CORE_CODEC_H
+
+/* Mic-types */
+enum amic_type {
+ AMIC_TYPE_SINGLE_ENDED,
+ AMIC_TYPE_DIFFERENTIAL
+};
+
+/* Mic-biases */
+enum amic_micbias {
+ AMIC_MICBIAS_VAMIC1,
+ AMIC_MICBIAS_VAMIC2,
+ AMIC_MICBIAS_UNKNOWN
+};
+
+/* Bias-voltage */
+enum ear_cm_voltage {
+ EAR_CMV_0_95V,
+ EAR_CMV_1_10V,
+ EAR_CMV_1_27V,
+ EAR_CMV_1_58V,
+ EAR_CMV_UNKNOWN
+};
+
+/* Analog microphone settings */
+struct amic_settings {
+ enum amic_type mic1_type;
+ enum amic_type mic2_type;
+ enum amic_micbias mic1a_micbias;
+ enum amic_micbias mic1b_micbias;
+ enum amic_micbias mic2_micbias;
+};
+
+/* Platform data structure for the audio-parts of the AB8500 */
+struct ab8500_codec_platform_data {
+ struct amic_settings amics;
+ enum ear_cm_voltage ear_cmv;
+};
+
+#endif
diff --git a/include/linux/mfd/abx500/ab8500-gpadc.h b/include/linux/mfd/abx500/ab8500-gpadc.h
new file mode 100644
index 000000000..49ded0010
--- /dev/null
+++ b/include/linux/mfd/abx500/ab8500-gpadc.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2010 ST-Ericsson SA
+ * Licensed under GPLv2.
+ *
+ * Author: Arun R Murthy <arun.murthy@stericsson.com>
+ * Author: Daniel Willerud <daniel.willerud@stericsson.com>
+ * Author: M'boumba Cedric Madianga <cedric.madianga@stericsson.com>
+ */
+
+#ifndef _AB8500_GPADC_H
+#define _AB8500_GPADC_H
+
+/* GPADC source: From datasheet(ADCSwSel[4:0] in GPADCCtrl2
+ * and ADCHwSel[4:0] in GPADCCtrl3 ) */
+#define BAT_CTRL 0x01
+#define BTEMP_BALL 0x02
+#define MAIN_CHARGER_V 0x03
+#define ACC_DETECT1 0x04
+#define ACC_DETECT2 0x05
+#define ADC_AUX1 0x06
+#define ADC_AUX2 0x07
+#define MAIN_BAT_V 0x08
+#define VBUS_V 0x09
+#define MAIN_CHARGER_C 0x0A
+#define USB_CHARGER_C 0x0B
+#define BK_BAT_V 0x0C
+#define DIE_TEMP 0x0D
+#define USB_ID 0x0E
+#define XTAL_TEMP 0x12
+#define VBAT_TRUE_MEAS 0x13
+#define BAT_CTRL_AND_IBAT 0x1C
+#define VBAT_MEAS_AND_IBAT 0x1D
+#define VBAT_TRUE_MEAS_AND_IBAT 0x1E
+#define BAT_TEMP_AND_IBAT 0x1F
+
+/* Virtual channel used only for ibat convertion to ampere
+ * Battery current conversion (ibat) cannot be requested as a single conversion
+ * but it is always in combination with other input requests
+ */
+#define IBAT_VIRTUAL_CHANNEL 0xFF
+
+#define SAMPLE_1 1
+#define SAMPLE_4 4
+#define SAMPLE_8 8
+#define SAMPLE_16 16
+#define RISING_EDGE 0
+#define FALLING_EDGE 1
+
+/* Arbitrary ADC conversion type constants */
+#define ADC_SW 0
+#define ADC_HW 1
+
+struct ab8500_gpadc;
+
+struct ab8500_gpadc *ab8500_gpadc_get(char *name);
+int ab8500_gpadc_sw_hw_convert(struct ab8500_gpadc *gpadc, u8 channel,
+ u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type);
+static inline int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 channel)
+{
+ return ab8500_gpadc_sw_hw_convert(gpadc, channel,
+ SAMPLE_16, 0, 0, ADC_SW);
+}
+
+int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
+ u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type);
+int ab8500_gpadc_double_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
+ u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type,
+ int *ibat);
+int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc,
+ u8 channel, int ad_value);
+void ab8540_gpadc_get_otp(struct ab8500_gpadc *gpadc,
+ u16 *vmain_l, u16 *vmain_h, u16 *btemp_l, u16 *btemp_h,
+ u16 *vbat_l, u16 *vbat_h, u16 *ibat_l, u16 *ibat_h);
+
+#endif /* _AB8500_GPADC_H */
diff --git a/include/linux/mfd/abx500/ab8500-sysctrl.h b/include/linux/mfd/abx500/ab8500-sysctrl.h
new file mode 100644
index 000000000..689312745
--- /dev/null
+++ b/include/linux/mfd/abx500/ab8500-sysctrl.h
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> for ST Ericsson.
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __AB8500_SYSCTRL_H
+#define __AB8500_SYSCTRL_H
+
+#include <linux/bitops.h>
+
+#ifdef CONFIG_AB8500_CORE
+
+int ab8500_sysctrl_read(u16 reg, u8 *value);
+int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value);
+
+#else
+
+static inline int ab8500_sysctrl_read(u16 reg, u8 *value)
+{
+ return 0;
+}
+
+static inline int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
+{
+ return 0;
+}
+
+#endif /* CONFIG_AB8500_CORE */
+
+static inline int ab8500_sysctrl_set(u16 reg, u8 bits)
+{
+ return ab8500_sysctrl_write(reg, bits, bits);
+}
+
+static inline int ab8500_sysctrl_clear(u16 reg, u8 bits)
+{
+ return ab8500_sysctrl_write(reg, bits, 0);
+}
+
+/* Configuration data for SysClkReq1RfClkBuf - SysClkReq8RfClkBuf */
+struct ab8500_sysctrl_platform_data {
+ u8 initial_req_buf_config[8];
+ u16 (*reboot_reason_code)(const char *cmd);
+};
+
+/* Registers */
+#define AB8500_TURNONSTATUS 0x100
+#define AB8500_RESETSTATUS 0x101
+#define AB8500_PONKEY1PRESSSTATUS 0x102
+#define AB8500_SYSCLKREQSTATUS 0x142
+#define AB8500_STW4500CTRL1 0x180
+#define AB8500_STW4500CTRL2 0x181
+#define AB8500_STW4500CTRL3 0x200
+#define AB8500_MAINWDOGCTRL 0x201
+#define AB8500_MAINWDOGTIMER 0x202
+#define AB8500_LOWBAT 0x203
+#define AB8500_BATTOK 0x204
+#define AB8500_SYSCLKTIMER 0x205
+#define AB8500_SMPSCLKCTRL 0x206
+#define AB8500_SMPSCLKSEL1 0x207
+#define AB8500_SMPSCLKSEL2 0x208
+#define AB8500_SMPSCLKSEL3 0x209
+#define AB8500_SYSULPCLKCONF 0x20A
+#define AB8500_SYSULPCLKCTRL1 0x20B
+#define AB8500_SYSCLKCTRL 0x20C
+#define AB8500_SYSCLKREQ1VALID 0x20D
+#define AB8500_SYSTEMCTRLSUP 0x20F
+#define AB8500_SYSCLKREQ1RFCLKBUF 0x210
+#define AB8500_SYSCLKREQ2RFCLKBUF 0x211
+#define AB8500_SYSCLKREQ3RFCLKBUF 0x212
+#define AB8500_SYSCLKREQ4RFCLKBUF 0x213
+#define AB8500_SYSCLKREQ5RFCLKBUF 0x214
+#define AB8500_SYSCLKREQ6RFCLKBUF 0x215
+#define AB8500_SYSCLKREQ7RFCLKBUF 0x216
+#define AB8500_SYSCLKREQ8RFCLKBUF 0x217
+#define AB8500_DITHERCLKCTRL 0x220
+#define AB8500_SWATCTRL 0x230
+#define AB8500_HIQCLKCTRL 0x232
+#define AB8500_VSIMSYSCLKCTRL 0x233
+#define AB9540_SYSCLK12BUFCTRL 0x234
+#define AB9540_SYSCLK12CONFCTRL 0x235
+#define AB9540_SYSCLK12BUFCTRL2 0x236
+#define AB9540_SYSCLK12BUF1VALID 0x237
+#define AB9540_SYSCLK12BUF2VALID 0x238
+#define AB9540_SYSCLK12BUF3VALID 0x239
+#define AB9540_SYSCLK12BUF4VALID 0x23A
+
+/* Bits */
+#define AB8500_TURNONSTATUS_PORNVBAT BIT(0)
+#define AB8500_TURNONSTATUS_PONKEY1DBF BIT(1)
+#define AB8500_TURNONSTATUS_PONKEY2DBF BIT(2)
+#define AB8500_TURNONSTATUS_RTCALARM BIT(3)
+#define AB8500_TURNONSTATUS_MAINCHDET BIT(4)
+#define AB8500_TURNONSTATUS_VBUSDET BIT(5)
+#define AB8500_TURNONSTATUS_USBIDDETECT BIT(6)
+
+#define AB8500_RESETSTATUS_RESETN4500NSTATUS BIT(0)
+#define AB8500_RESETSTATUS_SWRESETN4500NSTATUS BIT(2)
+
+#define AB8500_PONKEY1PRESSSTATUS_PONKEY1PRESSTIME_MASK 0x7F
+#define AB8500_PONKEY1PRESSSTATUS_PONKEY1PRESSTIME_SHIFT 0
+
+#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ1STATUS BIT(0)
+#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ2STATUS BIT(1)
+#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ3STATUS BIT(2)
+#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ4STATUS BIT(3)
+#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ5STATUS BIT(4)
+#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ6STATUS BIT(5)
+#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ7STATUS BIT(6)
+#define AB8500_SYSCLKREQSTATUS_SYSCLKREQ8STATUS BIT(7)
+
+#define AB8500_STW4500CTRL1_SWOFF BIT(0)
+#define AB8500_STW4500CTRL1_SWRESET4500N BIT(1)
+#define AB8500_STW4500CTRL1_THDB8500SWOFF BIT(2)
+
+#define AB8500_STW4500CTRL2_RESETNVAUX1VALID BIT(0)
+#define AB8500_STW4500CTRL2_RESETNVAUX2VALID BIT(1)
+#define AB8500_STW4500CTRL2_RESETNVAUX3VALID BIT(2)
+#define AB8500_STW4500CTRL2_RESETNVMODVALID BIT(3)
+#define AB8500_STW4500CTRL2_RESETNVEXTSUPPLY1VALID BIT(4)
+#define AB8500_STW4500CTRL2_RESETNVEXTSUPPLY2VALID BIT(5)
+#define AB8500_STW4500CTRL2_RESETNVEXTSUPPLY3VALID BIT(6)
+#define AB8500_STW4500CTRL2_RESETNVSMPS1VALID BIT(7)
+
+#define AB8500_STW4500CTRL3_CLK32KOUT2DIS BIT(0)
+#define AB8500_STW4500CTRL3_RESETAUDN BIT(1)
+#define AB8500_STW4500CTRL3_RESETDENCN BIT(2)
+#define AB8500_STW4500CTRL3_THSDENA BIT(3)
+
+#define AB8500_MAINWDOGCTRL_MAINWDOGENA BIT(0)
+#define AB8500_MAINWDOGCTRL_MAINWDOGKICK BIT(1)
+#define AB8500_MAINWDOGCTRL_WDEXPTURNONVALID BIT(4)
+
+#define AB8500_MAINWDOGTIMER_MAINWDOGTIMER_MASK 0x7F
+#define AB8500_MAINWDOGTIMER_MAINWDOGTIMER_SHIFT 0
+
+#define AB8500_LOWBAT_LOWBATENA BIT(0)
+#define AB8500_LOWBAT_LOWBAT_MASK 0x7E
+#define AB8500_LOWBAT_LOWBAT_SHIFT 1
+
+#define AB8500_BATTOK_BATTOKSEL0THF_MASK 0x0F
+#define AB8500_BATTOK_BATTOKSEL0THF_SHIFT 0
+#define AB8500_BATTOK_BATTOKSEL1THF_MASK 0xF0
+#define AB8500_BATTOK_BATTOKSEL1THF_SHIFT 4
+
+#define AB8500_SYSCLKTIMER_SYSCLKTIMER_MASK 0x0F
+#define AB8500_SYSCLKTIMER_SYSCLKTIMER_SHIFT 0
+#define AB8500_SYSCLKTIMER_SYSCLKTIMERADJ_MASK 0xF0
+#define AB8500_SYSCLKTIMER_SYSCLKTIMERADJ_SHIFT 4
+
+#define AB8500_SMPSCLKCTRL_SMPSCLKINTSEL_MASK 0x03
+#define AB8500_SMPSCLKCTRL_SMPSCLKINTSEL_SHIFT 0
+#define AB8500_SMPSCLKCTRL_3M2CLKINTENA BIT(2)
+
+#define AB8500_SMPSCLKSEL1_VARMCLKSEL_MASK 0x07
+#define AB8500_SMPSCLKSEL1_VARMCLKSEL_SHIFT 0
+#define AB8500_SMPSCLKSEL1_VAPECLKSEL_MASK 0x38
+#define AB8500_SMPSCLKSEL1_VAPECLKSEL_SHIFT 3
+
+#define AB8500_SMPSCLKSEL2_VMODCLKSEL_MASK 0x07
+#define AB8500_SMPSCLKSEL2_VMODCLKSEL_SHIFT 0
+#define AB8500_SMPSCLKSEL2_VSMPS1CLKSEL_MASK 0x38
+#define AB8500_SMPSCLKSEL2_VSMPS1CLKSEL_SHIFT 3
+
+#define AB8500_SMPSCLKSEL3_VSMPS2CLKSEL_MASK 0x07
+#define AB8500_SMPSCLKSEL3_VSMPS2CLKSEL_SHIFT 0
+#define AB8500_SMPSCLKSEL3_VSMPS3CLKSEL_MASK 0x38
+#define AB8500_SMPSCLKSEL3_VSMPS3CLKSEL_SHIFT 3
+
+#define AB8500_SYSULPCLKCONF_ULPCLKCONF_MASK 0x03
+#define AB8500_SYSULPCLKCONF_ULPCLKCONF_SHIFT 0
+#define AB8500_SYSULPCLKCONF_CLK27MHZSTRE BIT(2)
+#define AB8500_SYSULPCLKCONF_TVOUTCLKDELN BIT(3)
+#define AB8500_SYSULPCLKCONF_TVOUTCLKINV BIT(4)
+#define AB8500_SYSULPCLKCONF_ULPCLKSTRE BIT(5)
+#define AB8500_SYSULPCLKCONF_CLK27MHZBUFENA BIT(6)
+#define AB8500_SYSULPCLKCONF_CLK27MHZPDENA BIT(7)
+
+#define AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_MASK 0x03
+#define AB8500_SYSULPCLKCTRL1_SYSULPCLKINTSEL_SHIFT 0
+#define AB8500_SYSULPCLKCTRL1_ULPCLKREQ BIT(2)
+#define AB8500_SYSULPCLKCTRL1_4500SYSCLKREQ BIT(3)
+#define AB8500_SYSULPCLKCTRL1_AUDIOCLKENA BIT(4)
+#define AB8500_SYSULPCLKCTRL1_SYSCLKBUF2REQ BIT(5)
+#define AB8500_SYSULPCLKCTRL1_SYSCLKBUF3REQ BIT(6)
+#define AB8500_SYSULPCLKCTRL1_SYSCLKBUF4REQ BIT(7)
+
+#define AB8500_SYSCLKCTRL_TVOUTPLLENA BIT(0)
+#define AB8500_SYSCLKCTRL_TVOUTCLKENA BIT(1)
+#define AB8500_SYSCLKCTRL_USBCLKENA BIT(2)
+
+#define AB8500_SYSCLKREQ1VALID_SYSCLKREQ1VALID BIT(0)
+#define AB8500_SYSCLKREQ1VALID_ULPCLKREQ1VALID BIT(1)
+#define AB8500_SYSCLKREQ1VALID_USBSYSCLKREQ1VALID BIT(2)
+
+#define AB8500_SYSTEMCTRLSUP_EXTSUP12LPNCLKSEL_MASK 0x03
+#define AB8500_SYSTEMCTRLSUP_EXTSUP12LPNCLKSEL_SHIFT 0
+#define AB8500_SYSTEMCTRLSUP_EXTSUP3LPNCLKSEL_MASK 0x0C
+#define AB8500_SYSTEMCTRLSUP_EXTSUP3LPNCLKSEL_SHIFT 2
+#define AB8500_SYSTEMCTRLSUP_INTDB8500NOD BIT(4)
+
+#define AB8500_SYSCLKREQ1RFCLKBUF_SYSCLKREQ1RFCLKBUF2 BIT(2)
+#define AB8500_SYSCLKREQ1RFCLKBUF_SYSCLKREQ1RFCLKBUF3 BIT(3)
+#define AB8500_SYSCLKREQ1RFCLKBUF_SYSCLKREQ1RFCLKBUF4 BIT(4)
+
+#define AB8500_SYSCLKREQ2RFCLKBUF_SYSCLKREQ2RFCLKBUF2 BIT(2)
+#define AB8500_SYSCLKREQ2RFCLKBUF_SYSCLKREQ2RFCLKBUF3 BIT(3)
+#define AB8500_SYSCLKREQ2RFCLKBUF_SYSCLKREQ2RFCLKBUF4 BIT(4)
+
+#define AB8500_SYSCLKREQ3RFCLKBUF_SYSCLKREQ3RFCLKBUF2 BIT(2)
+#define AB8500_SYSCLKREQ3RFCLKBUF_SYSCLKREQ3RFCLKBUF3 BIT(3)
+#define AB8500_SYSCLKREQ3RFCLKBUF_SYSCLKREQ3RFCLKBUF4 BIT(4)
+
+#define AB8500_SYSCLKREQ4RFCLKBUF_SYSCLKREQ4RFCLKBUF2 BIT(2)
+#define AB8500_SYSCLKREQ4RFCLKBUF_SYSCLKREQ4RFCLKBUF3 BIT(3)
+#define AB8500_SYSCLKREQ4RFCLKBUF_SYSCLKREQ4RFCLKBUF4 BIT(4)
+
+#define AB8500_SYSCLKREQ5RFCLKBUF_SYSCLKREQ5RFCLKBUF2 BIT(2)
+#define AB8500_SYSCLKREQ5RFCLKBUF_SYSCLKREQ5RFCLKBUF3 BIT(3)
+#define AB8500_SYSCLKREQ5RFCLKBUF_SYSCLKREQ5RFCLKBUF4 BIT(4)
+
+#define AB8500_SYSCLKREQ6RFCLKBUF_SYSCLKREQ6RFCLKBUF2 BIT(2)
+#define AB8500_SYSCLKREQ6RFCLKBUF_SYSCLKREQ6RFCLKBUF3 BIT(3)
+#define AB8500_SYSCLKREQ6RFCLKBUF_SYSCLKREQ6RFCLKBUF4 BIT(4)
+
+#define AB8500_SYSCLKREQ7RFCLKBUF_SYSCLKREQ7RFCLKBUF2 BIT(2)
+#define AB8500_SYSCLKREQ7RFCLKBUF_SYSCLKREQ7RFCLKBUF3 BIT(3)
+#define AB8500_SYSCLKREQ7RFCLKBUF_SYSCLKREQ7RFCLKBUF4 BIT(4)
+
+#define AB8500_SYSCLKREQ8RFCLKBUF_SYSCLKREQ8RFCLKBUF2 BIT(2)
+#define AB8500_SYSCLKREQ8RFCLKBUF_SYSCLKREQ8RFCLKBUF3 BIT(3)
+#define AB8500_SYSCLKREQ8RFCLKBUF_SYSCLKREQ8RFCLKBUF4 BIT(4)
+
+#define AB8500_DITHERCLKCTRL_VARMDITHERENA BIT(0)
+#define AB8500_DITHERCLKCTRL_VSMPS3DITHERENA BIT(1)
+#define AB8500_DITHERCLKCTRL_VSMPS1DITHERENA BIT(2)
+#define AB8500_DITHERCLKCTRL_VSMPS2DITHERENA BIT(3)
+#define AB8500_DITHERCLKCTRL_VMODDITHERENA BIT(4)
+#define AB8500_DITHERCLKCTRL_VAPEDITHERENA BIT(5)
+#define AB8500_DITHERCLKCTRL_DITHERDEL_MASK 0xC0
+#define AB8500_DITHERCLKCTRL_DITHERDEL_SHIFT 6
+
+#define AB8500_SWATCTRL_UPDATERF BIT(0)
+#define AB8500_SWATCTRL_SWATENABLE BIT(1)
+#define AB8500_SWATCTRL_RFOFFTIMER_MASK 0x1C
+#define AB8500_SWATCTRL_RFOFFTIMER_SHIFT 2
+#define AB8500_SWATCTRL_SWATBIT5 BIT(6)
+
+#define AB8500_HIQCLKCTRL_SYSCLKREQ1HIQENAVALID BIT(0)
+#define AB8500_HIQCLKCTRL_SYSCLKREQ2HIQENAVALID BIT(1)
+#define AB8500_HIQCLKCTRL_SYSCLKREQ3HIQENAVALID BIT(2)
+#define AB8500_HIQCLKCTRL_SYSCLKREQ4HIQENAVALID BIT(3)
+#define AB8500_HIQCLKCTRL_SYSCLKREQ5HIQENAVALID BIT(4)
+#define AB8500_HIQCLKCTRL_SYSCLKREQ6HIQENAVALID BIT(5)
+#define AB8500_HIQCLKCTRL_SYSCLKREQ7HIQENAVALID BIT(6)
+#define AB8500_HIQCLKCTRL_SYSCLKREQ8HIQENAVALID BIT(7)
+
+#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ1VALID BIT(0)
+#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ2VALID BIT(1)
+#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ3VALID BIT(2)
+#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ4VALID BIT(3)
+#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ5VALID BIT(4)
+#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ6VALID BIT(5)
+#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ7VALID BIT(6)
+#define AB8500_VSIMSYSCLKCTRL_VSIMSYSCLKREQ8VALID BIT(7)
+
+#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF1ENA BIT(0)
+#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF2ENA BIT(1)
+#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF3ENA BIT(2)
+#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF4ENA BIT(3)
+#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUFENA_MASK 0x0F
+#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF1STRE BIT(4)
+#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF2STRE BIT(5)
+#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF3STRE BIT(6)
+#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUF4STRE BIT(7)
+#define AB9540_SYSCLK12BUFCTRL_SYSCLK12BUFSTRE_MASK 0xF0
+
+#define AB9540_SYSCLK12CONFCTRL_PLL26TO38ENA BIT(0)
+#define AB9540_SYSCLK12CONFCTRL_SYSCLK12USBMUXSEL BIT(1)
+#define AB9540_SYSCLK12CONFCTRL_INT384MHZMUXSEL0 BIT(2)
+#define AB9540_SYSCLK12CONFCTRL_INT384MHZMUXSEL1 BIT(3)
+#define AB9540_SYSCLK12CONFCTRL_SYSCLK12BUFMUX BIT(4)
+#define AB9540_SYSCLK12CONFCTRL_SYSCLK12PLLMUX BIT(5)
+#define AB9540_SYSCLK12CONFCTRL_SYSCLK2MUXVALID BIT(6)
+
+#define AB9540_SYSCLK12BUFCTRL2_SYSCLK12BUF1PDENA BIT(0)
+#define AB9540_SYSCLK12BUFCTRL2_SYSCLK12BUF2PDENA BIT(1)
+#define AB9540_SYSCLK12BUFCTRL2_SYSCLK12BUF3PDENA BIT(2)
+#define AB9540_SYSCLK12BUFCTRL2_SYSCLK12BUF4PDENA BIT(3)
+
+#define AB9540_SYSCLK12BUF1VALID_SYSCLK12BUF1VALID_MASK 0xFF
+#define AB9540_SYSCLK12BUF1VALID_SYSCLK12BUF1VALID_SHIFT 0
+
+#define AB9540_SYSCLK12BUF2VALID_SYSCLK12BUF2VALID_MASK 0xFF
+#define AB9540_SYSCLK12BUF2VALID_SYSCLK12BUF2VALID_SHIFT 0
+
+#define AB9540_SYSCLK12BUF3VALID_SYSCLK12BUF3VALID_MASK 0xFF
+#define AB9540_SYSCLK12BUF3VALID_SYSCLK12BUF3VALID_SHIFT 0
+
+#define AB9540_SYSCLK12BUF4VALID_SYSCLK12BUF4VALID_MASK 0xFF
+#define AB9540_SYSCLK12BUF4VALID_SYSCLK12BUF4VALID_SHIFT 0
+
+#define AB8500_ENABLE_WD 0x1
+#define AB8500_KICK_WD 0x2
+#define AB8500_WD_RESTART_ON_EXPIRE 0x10
+
+#endif /* __AB8500_SYSCTRL_H */
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
new file mode 100644
index 000000000..9475fee2b
--- /dev/null
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -0,0 +1,516 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
+ */
+#ifndef MFD_AB8500_H
+#define MFD_AB8500_H
+
+#include <linux/atomic.h>
+#include <linux/mutex.h>
+#include <linux/irqdomain.h>
+
+struct device;
+
+/*
+ * AB IC versions
+ *
+ * AB8500_VERSION_AB8500 should be 0xFF but will never be read as need a
+ * non-supported multi-byte I2C access via PRCMU. Set to 0x00 to ease the
+ * print of version string.
+ */
+enum ab8500_version {
+ AB8500_VERSION_AB8500 = 0x0,
+ AB8500_VERSION_AB8505 = 0x1,
+ AB8500_VERSION_AB9540 = 0x2,
+ AB8500_VERSION_AB8540 = 0x4,
+ AB8500_VERSION_UNDEFINED,
+};
+
+/* AB8500 CIDs*/
+#define AB8500_CUTEARLY 0x00
+#define AB8500_CUT1P0 0x10
+#define AB8500_CUT1P1 0x11
+#define AB8500_CUT1P2 0x12 /* Only valid for AB8540 */
+#define AB8500_CUT2P0 0x20
+#define AB8500_CUT3P0 0x30
+#define AB8500_CUT3P3 0x33
+
+/*
+ * AB8500 bank addresses
+ */
+#define AB8500_M_FSM_RANK 0x0
+#define AB8500_SYS_CTRL1_BLOCK 0x1
+#define AB8500_SYS_CTRL2_BLOCK 0x2
+#define AB8500_REGU_CTRL1 0x3
+#define AB8500_REGU_CTRL2 0x4
+#define AB8500_USB 0x5
+#define AB8500_TVOUT 0x6
+#define AB8500_DBI 0x7
+#define AB8500_ECI_AV_ACC 0x8
+#define AB8500_RESERVED 0x9
+#define AB8500_GPADC 0xA
+#define AB8500_CHARGER 0xB
+#define AB8500_GAS_GAUGE 0xC
+#define AB8500_AUDIO 0xD
+#define AB8500_INTERRUPT 0xE
+#define AB8500_RTC 0xF
+#define AB8500_MISC 0x10
+#define AB8500_DEVELOPMENT 0x11
+#define AB8500_DEBUG 0x12
+#define AB8500_PROD_TEST 0x13
+#define AB8500_STE_TEST 0x14
+#define AB8500_OTP_EMUL 0x15
+
+/*
+ * Interrupts
+ * Values used to index into array ab8500_irq_regoffset[] defined in
+ * drivers/mdf/ab8500-core.c
+ */
+/* Definitions for AB8500, AB9540 and AB8540 */
+/* ab8500_irq_regoffset[0] -> IT[Source|Latch|Mask]1 */
+#define AB8500_INT_MAIN_EXT_CH_NOT_OK 0 /* not 8505/9540 */
+#define AB8500_INT_UN_PLUG_TV_DET 1 /* not 8505/9540/8540 */
+#define AB8500_INT_PLUG_TV_DET 2 /* not 8505/9540/8540 */
+#define AB8500_INT_TEMP_WARM 3
+#define AB8500_INT_PON_KEY2DB_F 4
+#define AB8500_INT_PON_KEY2DB_R 5
+#define AB8500_INT_PON_KEY1DB_F 6
+#define AB8500_INT_PON_KEY1DB_R 7
+/* ab8500_irq_regoffset[1] -> IT[Source|Latch|Mask]2 */
+#define AB8500_INT_BATT_OVV 8
+#define AB8500_INT_MAIN_CH_UNPLUG_DET 10 /* not 8505/8540 */
+#define AB8500_INT_MAIN_CH_PLUG_DET 11 /* not 8505/8540 */
+#define AB8500_INT_VBUS_DET_F 14
+#define AB8500_INT_VBUS_DET_R 15
+/* ab8500_irq_regoffset[2] -> IT[Source|Latch|Mask]3 */
+#define AB8500_INT_VBUS_CH_DROP_END 16
+#define AB8500_INT_RTC_60S 17
+#define AB8500_INT_RTC_ALARM 18
+#define AB8540_INT_BIF_INT 19
+#define AB8500_INT_BAT_CTRL_INDB 20
+#define AB8500_INT_CH_WD_EXP 21
+#define AB8500_INT_VBUS_OVV 22
+#define AB8500_INT_MAIN_CH_DROP_END 23 /* not 8505/9540/8540 */
+/* ab8500_irq_regoffset[3] -> IT[Source|Latch|Mask]4 */
+#define AB8500_INT_CCN_CONV_ACC 24
+#define AB8500_INT_INT_AUD 25
+#define AB8500_INT_CCEOC 26
+#define AB8500_INT_CC_INT_CALIB 27
+#define AB8500_INT_LOW_BAT_F 28
+#define AB8500_INT_LOW_BAT_R 29
+#define AB8500_INT_BUP_CHG_NOT_OK 30
+#define AB8500_INT_BUP_CHG_OK 31
+/* ab8500_irq_regoffset[4] -> IT[Source|Latch|Mask]5 */
+#define AB8500_INT_GP_HW_ADC_CONV_END 32 /* not 8505/8540 */
+#define AB8500_INT_ACC_DETECT_1DB_F 33
+#define AB8500_INT_ACC_DETECT_1DB_R 34
+#define AB8500_INT_ACC_DETECT_22DB_F 35
+#define AB8500_INT_ACC_DETECT_22DB_R 36
+#define AB8500_INT_ACC_DETECT_21DB_F 37
+#define AB8500_INT_ACC_DETECT_21DB_R 38
+#define AB8500_INT_GP_SW_ADC_CONV_END 39
+/* ab8500_irq_regoffset[5] -> IT[Source|Latch|Mask]7 */
+#define AB8500_INT_GPIO6R 40 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO7R 41 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO8R 42 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO9R 43 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO10R 44 /* not 8540 */
+#define AB8500_INT_GPIO11R 45 /* not 8540 */
+#define AB8500_INT_GPIO12R 46 /* not 8505/8540 */
+#define AB8500_INT_GPIO13R 47 /* not 8540 */
+/* ab8500_irq_regoffset[6] -> IT[Source|Latch|Mask]8 */
+#define AB8500_INT_GPIO24R 48 /* not 8505/8540 */
+#define AB8500_INT_GPIO25R 49 /* not 8505/8540 */
+#define AB8500_INT_GPIO36R 50 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO37R 51 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO38R 52 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO39R 53 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO40R 54 /* not 8540 */
+#define AB8500_INT_GPIO41R 55 /* not 8540 */
+/* ab8500_irq_regoffset[7] -> IT[Source|Latch|Mask]9 */
+#define AB8500_INT_GPIO6F 56 /* not 8505/9540 */
+#define AB8500_INT_GPIO7F 57 /* not 8505/9540 */
+#define AB8500_INT_GPIO8F 58 /* not 8505/9540 */
+#define AB8500_INT_GPIO9F 59 /* not 8505/9540 */
+#define AB8500_INT_GPIO10F 60
+#define AB8500_INT_GPIO11F 61
+#define AB8500_INT_GPIO12F 62 /* not 8505 */
+#define AB8500_INT_GPIO13F 63
+/* ab8500_irq_regoffset[8] -> IT[Source|Latch|Mask]10 */
+#define AB8500_INT_GPIO24F 64 /* not 8505/8540 */
+#define AB8500_INT_GPIO25F 65 /* not 8505/8540 */
+#define AB8500_INT_GPIO36F 66 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO37F 67 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO38F 68 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO39F 69 /* not 8505/9540/8540 */
+#define AB8500_INT_GPIO40F 70 /* not 8540 */
+#define AB8500_INT_GPIO41F 71 /* not 8540 */
+/* ab8500_irq_regoffset[9] -> IT[Source|Latch|Mask]12 */
+#define AB8500_INT_ADP_SOURCE_ERROR 72
+#define AB8500_INT_ADP_SINK_ERROR 73
+#define AB8500_INT_ADP_PROBE_PLUG 74
+#define AB8500_INT_ADP_PROBE_UNPLUG 75
+#define AB8500_INT_ADP_SENSE_OFF 76
+#define AB8500_INT_USB_PHY_POWER_ERR 78
+#define AB8500_INT_USB_LINK_STATUS 79
+/* ab8500_irq_regoffset[10] -> IT[Source|Latch|Mask]19 */
+#define AB8500_INT_BTEMP_LOW 80
+#define AB8500_INT_BTEMP_LOW_MEDIUM 81
+#define AB8500_INT_BTEMP_MEDIUM_HIGH 82
+#define AB8500_INT_BTEMP_HIGH 83
+/* ab8500_irq_regoffset[11] -> IT[Source|Latch|Mask]20 */
+#define AB8500_INT_SRP_DETECT 88
+#define AB8500_INT_USB_CHARGER_NOT_OKR 89
+#define AB8500_INT_ID_WAKEUP_R 90
+#define AB8500_INT_ID_DET_PLUGR 91 /* 8505/9540 cut2.0 */
+#define AB8500_INT_ID_DET_R1R 92
+#define AB8500_INT_ID_DET_R2R 93
+#define AB8500_INT_ID_DET_R3R 94
+#define AB8500_INT_ID_DET_R4R 95
+/* ab8500_irq_regoffset[12] -> IT[Source|Latch|Mask]21 */
+#define AB8500_INT_ID_WAKEUP_F 96 /* not 8505/9540 */
+#define AB8500_INT_ID_DET_PLUGF 97 /* 8505/9540 cut2.0 */
+#define AB8500_INT_ID_DET_R1F 98 /* not 8505/9540 */
+#define AB8500_INT_ID_DET_R2F 99 /* not 8505/9540 */
+#define AB8500_INT_ID_DET_R3F 100 /* not 8505/9540 */
+#define AB8500_INT_ID_DET_R4F 101 /* not 8505/9540 */
+#define AB8500_INT_CHAUTORESTARTAFTSEC 102 /* not 8505/9540 */
+#define AB8500_INT_CHSTOPBYSEC 103
+/* ab8500_irq_regoffset[13] -> IT[Source|Latch|Mask]22 */
+#define AB8500_INT_USB_CH_TH_PROT_F 104
+#define AB8500_INT_USB_CH_TH_PROT_R 105
+#define AB8500_INT_MAIN_CH_TH_PROT_F 106 /* not 8505/9540 */
+#define AB8500_INT_MAIN_CH_TH_PROT_R 107 /* not 8505/9540 */
+#define AB8500_INT_CHCURLIMNOHSCHIRP 109
+#define AB8500_INT_CHCURLIMHSCHIRP 110
+#define AB8500_INT_XTAL32K_KO 111
+
+/* Definitions for AB9540 / AB8505 */
+/* ab8500_irq_regoffset[14] -> IT[Source|Latch|Mask]13 */
+#define AB9540_INT_GPIO50R 113 /* not 8540 */
+#define AB9540_INT_GPIO51R 114 /* not 8505/8540 */
+#define AB9540_INT_GPIO52R 115 /* not 8540 */
+#define AB9540_INT_GPIO53R 116 /* not 8540 */
+#define AB9540_INT_GPIO54R 117 /* not 8505/8540 */
+#define AB9540_INT_IEXT_CH_RF_BFN_R 118
+/* ab8500_irq_regoffset[15] -> IT[Source|Latch|Mask]14 */
+#define AB9540_INT_GPIO50F 121 /* not 8540 */
+#define AB9540_INT_GPIO51F 122 /* not 8505/8540 */
+#define AB9540_INT_GPIO52F 123 /* not 8540 */
+#define AB9540_INT_GPIO53F 124 /* not 8540 */
+#define AB9540_INT_GPIO54F 125 /* not 8505/8540 */
+#define AB9540_INT_IEXT_CH_RF_BFN_F 126
+/* ab8500_irq_regoffset[16] -> IT[Source|Latch|Mask]25 */
+#define AB8505_INT_KEYSTUCK 128
+#define AB8505_INT_IKR 129
+#define AB8505_INT_IKP 130
+#define AB8505_INT_KP 131
+#define AB8505_INT_KEYDEGLITCH 132
+#define AB8505_INT_MODPWRSTATUSF 134
+#define AB8505_INT_MODPWRSTATUSR 135
+/* ab8500_irq_regoffset[17] -> IT[Source|Latch|Mask]6 */
+#define AB8500_INT_HOOK_DET_NEG_F 138
+#define AB8500_INT_HOOK_DET_NEG_R 139
+#define AB8500_INT_HOOK_DET_POS_F 140
+#define AB8500_INT_HOOK_DET_POS_R 141
+#define AB8500_INT_PLUG_DET_COMP_F 142
+#define AB8500_INT_PLUG_DET_COMP_R 143
+/* ab8500_irq_regoffset[18] -> IT[Source|Latch|Mask]23 */
+#define AB8505_INT_COLL 144
+#define AB8505_INT_RESERR 145
+#define AB8505_INT_FRAERR 146
+#define AB8505_INT_COMERR 147
+#define AB8505_INT_SPDSET 148
+#define AB8505_INT_DSENT 149
+#define AB8505_INT_DREC 150
+#define AB8505_INT_ACC_INT 151
+/* ab8500_irq_regoffset[19] -> IT[Source|Latch|Mask]24 */
+#define AB8505_INT_NOPINT 152
+/* ab8540_irq_regoffset[20] -> IT[Source|Latch|Mask]26 */
+#define AB8540_INT_IDPLUGDETCOMPF 160
+#define AB8540_INT_IDPLUGDETCOMPR 161
+#define AB8540_INT_FMDETCOMPLOF 162
+#define AB8540_INT_FMDETCOMPLOR 163
+#define AB8540_INT_FMDETCOMPHIF 164
+#define AB8540_INT_FMDETCOMPHIR 165
+#define AB8540_INT_ID5VDETCOMPF 166
+#define AB8540_INT_ID5VDETCOMPR 167
+/* ab8540_irq_regoffset[21] -> IT[Source|Latch|Mask]27 */
+#define AB8540_INT_GPIO43F 168
+#define AB8540_INT_GPIO43R 169
+#define AB8540_INT_GPIO44F 170
+#define AB8540_INT_GPIO44R 171
+#define AB8540_INT_KEYPOSDETCOMPF 172
+#define AB8540_INT_KEYPOSDETCOMPR 173
+#define AB8540_INT_KEYNEGDETCOMPF 174
+#define AB8540_INT_KEYNEGDETCOMPR 175
+/* ab8540_irq_regoffset[22] -> IT[Source|Latch|Mask]28 */
+#define AB8540_INT_GPIO1VBATF 176
+#define AB8540_INT_GPIO1VBATR 177
+#define AB8540_INT_GPIO2VBATF 178
+#define AB8540_INT_GPIO2VBATR 179
+#define AB8540_INT_GPIO3VBATF 180
+#define AB8540_INT_GPIO3VBATR 181
+#define AB8540_INT_GPIO4VBATF 182
+#define AB8540_INT_GPIO4VBATR 183
+/* ab8540_irq_regoffset[23] -> IT[Source|Latch|Mask]29 */
+#define AB8540_INT_SYSCLKREQ2F 184
+#define AB8540_INT_SYSCLKREQ2R 185
+#define AB8540_INT_SYSCLKREQ3F 186
+#define AB8540_INT_SYSCLKREQ3R 187
+#define AB8540_INT_SYSCLKREQ4F 188
+#define AB8540_INT_SYSCLKREQ4R 189
+#define AB8540_INT_SYSCLKREQ5F 190
+#define AB8540_INT_SYSCLKREQ5R 191
+/* ab8540_irq_regoffset[24] -> IT[Source|Latch|Mask]30 */
+#define AB8540_INT_PWMOUT1F 192
+#define AB8540_INT_PWMOUT1R 193
+#define AB8540_INT_PWMCTRL0F 194
+#define AB8540_INT_PWMCTRL0R 195
+#define AB8540_INT_PWMCTRL1F 196
+#define AB8540_INT_PWMCTRL1R 197
+#define AB8540_INT_SYSCLKREQ6F 198
+#define AB8540_INT_SYSCLKREQ6R 199
+/* ab8540_irq_regoffset[25] -> IT[Source|Latch|Mask]31 */
+#define AB8540_INT_PWMEXTVIBRA1F 200
+#define AB8540_INT_PWMEXTVIBRA1R 201
+#define AB8540_INT_PWMEXTVIBRA2F 202
+#define AB8540_INT_PWMEXTVIBRA2R 203
+#define AB8540_INT_PWMOUT2F 204
+#define AB8540_INT_PWMOUT2R 205
+#define AB8540_INT_PWMOUT3F 206
+#define AB8540_INT_PWMOUT3R 207
+/* ab8540_irq_regoffset[26] -> IT[Source|Latch|Mask]32 */
+#define AB8540_INT_ADDATA2F 208
+#define AB8540_INT_ADDATA2R 209
+#define AB8540_INT_DADATA2F 210
+#define AB8540_INT_DADATA2R 211
+#define AB8540_INT_FSYNC2F 212
+#define AB8540_INT_FSYNC2R 213
+#define AB8540_INT_BITCLK2F 214
+#define AB8540_INT_BITCLK2R 215
+/* ab8540_irq_regoffset[27] -> IT[Source|Latch|Mask]33 */
+#define AB8540_INT_RTC_1S 216
+
+/*
+ * AB8500_AB9540_NR_IRQS is used when configuring the IRQ numbers for the
+ * entire platform. This is a "compile time" constant so this must be set to
+ * the largest possible value that may be encountered with different AB SOCs.
+ * Of the currently supported AB devices, AB8500 and AB9540, it is the AB9540
+ * which is larger.
+ */
+#define AB8500_NR_IRQS 112
+#define AB8505_NR_IRQS 153
+#define AB9540_NR_IRQS 153
+#define AB8540_NR_IRQS 216
+/* This is set to the roof of any AB8500 chip variant IRQ counts */
+#define AB8500_MAX_NR_IRQS AB8540_NR_IRQS
+
+#define AB8500_NUM_IRQ_REGS 14
+#define AB9540_NUM_IRQ_REGS 20
+#define AB8540_NUM_IRQ_REGS 27
+
+/* Turn On Status Event */
+#define AB8500_POR_ON_VBAT 0x01
+#define AB8500_POW_KEY_1_ON 0x02
+#define AB8500_POW_KEY_2_ON 0x04
+#define AB8500_RTC_ALARM 0x08
+#define AB8500_MAIN_CH_DET 0x10
+#define AB8500_VBUS_DET 0x20
+#define AB8500_USB_ID_DET 0x40
+
+/**
+ * struct ab8500 - ab8500 internal structure
+ * @dev: parent device
+ * @lock: read/write operations lock
+ * @irq_lock: genirq bus lock
+ * @transfer_ongoing: 0 if no transfer ongoing
+ * @irq: irq line
+ * @irq_domain: irq domain
+ * @version: chip version id (e.g. ab8500 or ab9540)
+ * @chip_id: chip revision id
+ * @write: register write
+ * @write_masked: masked register write
+ * @read: register read
+ * @rx_buf: rx buf for SPI
+ * @tx_buf: tx buf for SPI
+ * @mask: cache of IRQ regs for bus lock
+ * @oldmask: cache of previous IRQ regs for bus lock
+ * @mask_size: Actual number of valid entries in mask[], oldmask[] and
+ * irq_reg_offset
+ * @irq_reg_offset: Array of offsets into IRQ registers
+ */
+struct ab8500 {
+ struct device *dev;
+ struct mutex lock;
+ struct mutex irq_lock;
+ atomic_t transfer_ongoing;
+ int irq;
+ struct irq_domain *domain;
+ enum ab8500_version version;
+ u8 chip_id;
+
+ int (*write)(struct ab8500 *ab8500, u16 addr, u8 data);
+ int (*write_masked)(struct ab8500 *ab8500, u16 addr, u8 mask, u8 data);
+ int (*read)(struct ab8500 *ab8500, u16 addr);
+
+ unsigned long tx_buf[4];
+ unsigned long rx_buf[4];
+
+ u8 *mask;
+ u8 *oldmask;
+ int mask_size;
+ const int *irq_reg_offset;
+ int it_latchhier_num;
+};
+
+struct ab8500_regulator_platform_data;
+struct ab8500_codec_platform_data;
+struct ab8500_sysctrl_platform_data;
+
+/**
+ * struct ab8500_platform_data - AB8500 platform data
+ * @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used
+ * @init: board-specific initialization after detection of ab8500
+ * @regulator: machine-specific constraints for regulators
+ */
+struct ab8500_platform_data {
+ void (*init) (struct ab8500 *);
+ struct ab8500_regulator_platform_data *regulator;
+ struct ab8500_codec_platform_data *codec;
+ struct ab8500_sysctrl_platform_data *sysctrl;
+};
+
+extern int ab8500_init(struct ab8500 *ab8500,
+ enum ab8500_version version);
+extern int ab8500_exit(struct ab8500 *ab8500);
+
+extern int ab8500_suspend(struct ab8500 *ab8500);
+
+static inline int is_ab8500(struct ab8500 *ab)
+{
+ return ab->version == AB8500_VERSION_AB8500;
+}
+
+static inline int is_ab8505(struct ab8500 *ab)
+{
+ return ab->version == AB8500_VERSION_AB8505;
+}
+
+static inline int is_ab9540(struct ab8500 *ab)
+{
+ return ab->version == AB8500_VERSION_AB9540;
+}
+
+static inline int is_ab8540(struct ab8500 *ab)
+{
+ return ab->version == AB8500_VERSION_AB8540;
+}
+
+/* exclude also ab8505, ab9540... */
+static inline int is_ab8500_1p0_or_earlier(struct ab8500 *ab)
+{
+ return (is_ab8500(ab) && (ab->chip_id <= AB8500_CUT1P0));
+}
+
+/* exclude also ab8505, ab9540... */
+static inline int is_ab8500_1p1_or_earlier(struct ab8500 *ab)
+{
+ return (is_ab8500(ab) && (ab->chip_id <= AB8500_CUT1P1));
+}
+
+/* exclude also ab8505, ab9540... */
+static inline int is_ab8500_2p0_or_earlier(struct ab8500 *ab)
+{
+ return (is_ab8500(ab) && (ab->chip_id <= AB8500_CUT2P0));
+}
+
+static inline int is_ab8500_3p3_or_earlier(struct ab8500 *ab)
+{
+ return (is_ab8500(ab) && (ab->chip_id <= AB8500_CUT3P3));
+}
+
+/* exclude also ab8505, ab9540... */
+static inline int is_ab8500_2p0(struct ab8500 *ab)
+{
+ return (is_ab8500(ab) && (ab->chip_id == AB8500_CUT2P0));
+}
+
+static inline int is_ab8505_1p0_or_earlier(struct ab8500 *ab)
+{
+ return (is_ab8505(ab) && (ab->chip_id <= AB8500_CUT1P0));
+}
+
+static inline int is_ab8505_2p0(struct ab8500 *ab)
+{
+ return (is_ab8505(ab) && (ab->chip_id == AB8500_CUT2P0));
+}
+
+static inline int is_ab9540_1p0_or_earlier(struct ab8500 *ab)
+{
+ return (is_ab9540(ab) && (ab->chip_id <= AB8500_CUT1P0));
+}
+
+static inline int is_ab9540_2p0(struct ab8500 *ab)
+{
+ return (is_ab9540(ab) && (ab->chip_id == AB8500_CUT2P0));
+}
+
+/*
+ * Be careful, the marketing name for this chip is 2.1
+ * but the value read from the chip is 3.0 (0x30)
+ */
+static inline int is_ab9540_3p0(struct ab8500 *ab)
+{
+ return (is_ab9540(ab) && (ab->chip_id == AB8500_CUT3P0));
+}
+
+static inline int is_ab8540_1p0_or_earlier(struct ab8500 *ab)
+{
+ return is_ab8540(ab) && (ab->chip_id <= AB8500_CUT1P0);
+}
+
+static inline int is_ab8540_1p1_or_earlier(struct ab8500 *ab)
+{
+ return is_ab8540(ab) && (ab->chip_id <= AB8500_CUT1P1);
+}
+
+static inline int is_ab8540_1p2_or_earlier(struct ab8500 *ab)
+{
+ return is_ab8540(ab) && (ab->chip_id <= AB8500_CUT1P2);
+}
+
+static inline int is_ab8540_2p0_or_earlier(struct ab8500 *ab)
+{
+ return is_ab8540(ab) && (ab->chip_id <= AB8500_CUT2P0);
+}
+
+static inline int is_ab8540_2p0(struct ab8500 *ab)
+{
+ return is_ab8540(ab) && (ab->chip_id == AB8500_CUT2P0);
+}
+
+static inline int is_ab8505_2p0_earlier(struct ab8500 *ab)
+{
+ return (is_ab8505(ab) && (ab->chip_id < AB8500_CUT2P0));
+}
+
+static inline int is_ab9540_2p0_or_earlier(struct ab8500 *ab)
+{
+ return (is_ab9540(ab) && (ab->chip_id < AB8500_CUT2P0));
+}
+
+void ab8500_override_turn_on_stat(u8 mask, u8 set);
+
+#ifdef CONFIG_AB8500_DEBUG
+extern int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
+void ab8500_dump_all_banks(struct device *dev);
+void ab8500_debug_register_interrupt(int line);
+#else
+static inline void ab8500_dump_all_banks(struct device *dev) {}
+static inline void ab8500_debug_register_interrupt(int line) {}
+#endif
+
+#endif /* MFD_AB8500_H */
diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
new file mode 100644
index 000000000..67703f23e
--- /dev/null
+++ b/include/linux/mfd/abx500/ux500_chargalg.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ * Author: Johan Gardsmark <johan.gardsmark@stericsson.com> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _UX500_CHARGALG_H
+#define _UX500_CHARGALG_H
+
+#include <linux/power_supply.h>
+
+/*
+ * Valid only for supplies of type:
+ * - POWER_SUPPLY_TYPE_MAINS,
+ * - POWER_SUPPLY_TYPE_USB,
+ * because only them store as drv_data pointer to struct ux500_charger.
+ */
+#define psy_to_ux500_charger(x) power_supply_get_drvdata(psy)
+
+/* Forward declaration */
+struct ux500_charger;
+
+struct ux500_charger_ops {
+ int (*enable) (struct ux500_charger *, int, int, int);
+ int (*check_enable) (struct ux500_charger *, int, int);
+ int (*kick_wd) (struct ux500_charger *);
+ int (*update_curr) (struct ux500_charger *, int);
+ int (*pp_enable) (struct ux500_charger *, bool);
+ int (*pre_chg_enable) (struct ux500_charger *, bool);
+};
+
+/**
+ * struct ux500_charger - power supply ux500 charger sub class
+ * @psy power supply base class
+ * @ops ux500 charger operations
+ * @max_out_volt maximum output charger voltage in mV
+ * @max_out_curr maximum output charger current in mA
+ * @enabled indicates if this charger is used or not
+ * @external external charger unit (pm2xxx)
+ * @power_path USB power path support
+ */
+struct ux500_charger {
+ struct power_supply *psy;
+ struct ux500_charger_ops ops;
+ int max_out_volt;
+ int max_out_curr;
+ int wdt_refresh;
+ bool enabled;
+ bool external;
+ bool power_path;
+};
+
+extern struct blocking_notifier_head charger_notifier_list;
+
+#endif
diff --git a/include/linux/mfd/adp5520.h b/include/linux/mfd/adp5520.h
new file mode 100644
index 000000000..ac37558a4
--- /dev/null
+++ b/include/linux/mfd/adp5520.h
@@ -0,0 +1,299 @@
+/*
+ * Definitions and platform data for Analog Devices
+ * ADP5520/ADP5501 MFD PMICs (Backlight, LED, GPIO and Keys)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+
+#ifndef __LINUX_MFD_ADP5520_H
+#define __LINUX_MFD_ADP5520_H
+
+#define ID_ADP5520 5520
+#define ID_ADP5501 5501
+
+/*
+ * ADP5520/ADP5501 Register Map
+ */
+
+#define ADP5520_MODE_STATUS 0x00
+#define ADP5520_INTERRUPT_ENABLE 0x01
+#define ADP5520_BL_CONTROL 0x02
+#define ADP5520_BL_TIME 0x03
+#define ADP5520_BL_FADE 0x04
+#define ADP5520_DAYLIGHT_MAX 0x05
+#define ADP5520_DAYLIGHT_DIM 0x06
+#define ADP5520_OFFICE_MAX 0x07
+#define ADP5520_OFFICE_DIM 0x08
+#define ADP5520_DARK_MAX 0x09
+#define ADP5520_DARK_DIM 0x0A
+#define ADP5520_BL_VALUE 0x0B
+#define ADP5520_ALS_CMPR_CFG 0x0C
+#define ADP5520_L2_TRIP 0x0D
+#define ADP5520_L2_HYS 0x0E
+#define ADP5520_L3_TRIP 0x0F
+#define ADP5520_L3_HYS 0x10
+#define ADP5520_LED_CONTROL 0x11
+#define ADP5520_LED_TIME 0x12
+#define ADP5520_LED_FADE 0x13
+#define ADP5520_LED1_CURRENT 0x14
+#define ADP5520_LED2_CURRENT 0x15
+#define ADP5520_LED3_CURRENT 0x16
+
+/*
+ * ADP5520 Register Map
+ */
+
+#define ADP5520_GPIO_CFG_1 0x17
+#define ADP5520_GPIO_CFG_2 0x18
+#define ADP5520_GPIO_IN 0x19
+#define ADP5520_GPIO_OUT 0x1A
+#define ADP5520_GPIO_INT_EN 0x1B
+#define ADP5520_GPIO_INT_STAT 0x1C
+#define ADP5520_GPIO_INT_LVL 0x1D
+#define ADP5520_GPIO_DEBOUNCE 0x1E
+#define ADP5520_GPIO_PULLUP 0x1F
+#define ADP5520_KP_INT_STAT_1 0x20
+#define ADP5520_KP_INT_STAT_2 0x21
+#define ADP5520_KR_INT_STAT_1 0x22
+#define ADP5520_KR_INT_STAT_2 0x23
+#define ADP5520_KEY_STAT_1 0x24
+#define ADP5520_KEY_STAT_2 0x25
+
+/*
+ * MODE_STATUS bits
+ */
+
+#define ADP5520_nSTNBY (1 << 7)
+#define ADP5520_BL_EN (1 << 6)
+#define ADP5520_DIM_EN (1 << 5)
+#define ADP5520_OVP_INT (1 << 4)
+#define ADP5520_CMPR_INT (1 << 3)
+#define ADP5520_GPI_INT (1 << 2)
+#define ADP5520_KR_INT (1 << 1)
+#define ADP5520_KP_INT (1 << 0)
+
+/*
+ * INTERRUPT_ENABLE bits
+ */
+
+#define ADP5520_AUTO_LD_EN (1 << 4)
+#define ADP5520_CMPR_IEN (1 << 3)
+#define ADP5520_OVP_IEN (1 << 2)
+#define ADP5520_KR_IEN (1 << 1)
+#define ADP5520_KP_IEN (1 << 0)
+
+/*
+ * BL_CONTROL bits
+ */
+
+#define ADP5520_BL_LVL ((x) << 5)
+#define ADP5520_BL_LAW ((x) << 4)
+#define ADP5520_BL_AUTO_ADJ (1 << 3)
+#define ADP5520_OVP_EN (1 << 2)
+#define ADP5520_FOVR (1 << 1)
+#define ADP5520_KP_BL_EN (1 << 0)
+
+/*
+ * ALS_CMPR_CFG bits
+ */
+
+#define ADP5520_L3_OUT (1 << 3)
+#define ADP5520_L2_OUT (1 << 2)
+#define ADP5520_L3_EN (1 << 1)
+
+#define ADP5020_MAX_BRIGHTNESS 0x7F
+
+#define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4))
+#define BL_CTRL_VAL(law, auto) (((1 & (auto)) << 3) | ((0x3 & (law)) << 4))
+#define ALS_CMPR_CFG_VAL(filt, l3_en) (((0x7 & filt) << 5) | l3_en)
+
+/*
+ * LEDs subdevice bits and masks
+ */
+
+#define ADP5520_01_MAXLEDS 3
+
+#define ADP5520_FLAG_LED_MASK 0x3
+#define ADP5520_FLAG_OFFT_SHIFT 8
+#define ADP5520_FLAG_OFFT_MASK 0x3
+
+#define ADP5520_R3_MODE (1 << 5)
+#define ADP5520_C3_MODE (1 << 4)
+#define ADP5520_LED_LAW (1 << 3)
+#define ADP5520_LED3_EN (1 << 2)
+#define ADP5520_LED2_EN (1 << 1)
+#define ADP5520_LED1_EN (1 << 0)
+
+/*
+ * GPIO subdevice bits and masks
+ */
+
+#define ADP5520_MAXGPIOS 8
+
+#define ADP5520_GPIO_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */
+#define ADP5520_GPIO_C2 (1 << 6)
+#define ADP5520_GPIO_C1 (1 << 5)
+#define ADP5520_GPIO_C0 (1 << 4)
+#define ADP5520_GPIO_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */
+#define ADP5520_GPIO_R2 (1 << 2)
+#define ADP5520_GPIO_R1 (1 << 1)
+#define ADP5520_GPIO_R0 (1 << 0)
+
+struct adp5520_gpio_platform_data {
+ unsigned gpio_start;
+ u8 gpio_en_mask;
+ u8 gpio_pullup_mask;
+};
+
+/*
+ * Keypad subdevice bits and masks
+ */
+
+#define ADP5520_MAXKEYS 16
+
+#define ADP5520_COL_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */
+#define ADP5520_COL_C2 (1 << 6)
+#define ADP5520_COL_C1 (1 << 5)
+#define ADP5520_COL_C0 (1 << 4)
+#define ADP5520_ROW_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */
+#define ADP5520_ROW_R2 (1 << 2)
+#define ADP5520_ROW_R1 (1 << 1)
+#define ADP5520_ROW_R0 (1 << 0)
+
+#define ADP5520_KEY(row, col) (col + row * 4)
+#define ADP5520_KEYMAPSIZE ADP5520_MAXKEYS
+
+struct adp5520_keys_platform_data {
+ int rows_en_mask; /* Number of rows */
+ int cols_en_mask; /* Number of columns */
+ const unsigned short *keymap; /* Pointer to keymap */
+ unsigned short keymapsize; /* Keymap size */
+ unsigned repeat:1; /* Enable key repeat */
+};
+
+
+/*
+ * LEDs subdevice platform data
+ */
+
+#define FLAG_ID_ADP5520_LED1_ADP5501_LED0 1 /* ADP5520 PIN ILED */
+#define FLAG_ID_ADP5520_LED2_ADP5501_LED1 2 /* ADP5520 PIN C3 */
+#define FLAG_ID_ADP5520_LED3_ADP5501_LED2 3 /* ADP5520 PIN R3 */
+
+#define ADP5520_LED_DIS_BLINK (0 << ADP5520_FLAG_OFFT_SHIFT)
+#define ADP5520_LED_OFFT_600ms (1 << ADP5520_FLAG_OFFT_SHIFT)
+#define ADP5520_LED_OFFT_800ms (2 << ADP5520_FLAG_OFFT_SHIFT)
+#define ADP5520_LED_OFFT_1200ms (3 << ADP5520_FLAG_OFFT_SHIFT)
+
+#define ADP5520_LED_ONT_200ms 0
+#define ADP5520_LED_ONT_600ms 1
+#define ADP5520_LED_ONT_800ms 2
+#define ADP5520_LED_ONT_1200ms 3
+
+struct adp5520_leds_platform_data {
+ int num_leds;
+ struct led_info *leds;
+ u8 fade_in; /* Backlight Fade-In Timer */
+ u8 fade_out; /* Backlight Fade-Out Timer */
+ u8 led_on_time;
+};
+
+/*
+ * Backlight subdevice platform data
+ */
+
+#define ADP5520_FADE_T_DIS 0 /* Fade Timer Disabled */
+#define ADP5520_FADE_T_300ms 1 /* 0.3 Sec */
+#define ADP5520_FADE_T_600ms 2
+#define ADP5520_FADE_T_900ms 3
+#define ADP5520_FADE_T_1200ms 4
+#define ADP5520_FADE_T_1500ms 5
+#define ADP5520_FADE_T_1800ms 6
+#define ADP5520_FADE_T_2100ms 7
+#define ADP5520_FADE_T_2400ms 8
+#define ADP5520_FADE_T_2700ms 9
+#define ADP5520_FADE_T_3000ms 10
+#define ADP5520_FADE_T_3500ms 11
+#define ADP5520_FADE_T_4000ms 12
+#define ADP5520_FADE_T_4500ms 13
+#define ADP5520_FADE_T_5000ms 14
+#define ADP5520_FADE_T_5500ms 15 /* 5.5 Sec */
+
+#define ADP5520_BL_LAW_LINEAR 0
+#define ADP5520_BL_LAW_SQUARE 1
+#define ADP5520_BL_LAW_CUBIC1 2
+#define ADP5520_BL_LAW_CUBIC2 3
+
+#define ADP5520_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */
+#define ADP5520_BL_AMBL_FILT_160ms 1
+#define ADP5520_BL_AMBL_FILT_320ms 2
+#define ADP5520_BL_AMBL_FILT_640ms 3
+#define ADP5520_BL_AMBL_FILT_1280ms 4
+#define ADP5520_BL_AMBL_FILT_2560ms 5
+#define ADP5520_BL_AMBL_FILT_5120ms 6
+#define ADP5520_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */
+
+ /*
+ * Blacklight current 0..30mA
+ */
+#define ADP5520_BL_CUR_mA(I) ((I * 127) / 30)
+
+ /*
+ * L2 comparator current 0..1000uA
+ */
+#define ADP5520_L2_COMP_CURR_uA(I) ((I * 255) / 1000)
+
+ /*
+ * L3 comparator current 0..127uA
+ */
+#define ADP5520_L3_COMP_CURR_uA(I) ((I * 255) / 127)
+
+struct adp5520_backlight_platform_data {
+ u8 fade_in; /* Backlight Fade-In Timer */
+ u8 fade_out; /* Backlight Fade-Out Timer */
+ u8 fade_led_law; /* fade-on/fade-off transfer characteristic */
+
+ u8 en_ambl_sens; /* 1 = enable ambient light sensor */
+ u8 abml_filt; /* Light sensor filter time */
+ u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l3_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l3_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
+ u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */
+ u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */
+ u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */
+ u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */
+};
+
+/*
+ * MFD chip platform data
+ */
+
+struct adp5520_platform_data {
+ struct adp5520_keys_platform_data *keys;
+ struct adp5520_gpio_platform_data *gpio;
+ struct adp5520_leds_platform_data *leds;
+ struct adp5520_backlight_platform_data *backlight;
+};
+
+/*
+ * MFD chip functions
+ */
+
+extern int adp5520_read(struct device *dev, int reg, uint8_t *val);
+extern int adp5520_write(struct device *dev, int reg, u8 val);
+extern int adp5520_clr_bits(struct device *dev, int reg, uint8_t bit_mask);
+extern int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask);
+
+extern int adp5520_register_notifier(struct device *dev,
+ struct notifier_block *nb, unsigned int events);
+
+extern int adp5520_unregister_notifier(struct device *dev,
+ struct notifier_block *nb, unsigned int events);
+
+#endif /* __LINUX_MFD_ADP5520_H */
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
new file mode 100644
index 000000000..16a498f48
--- /dev/null
+++ b/include/linux/mfd/arizona/core.h
@@ -0,0 +1,163 @@
+/*
+ * Arizona MFD internals
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM_ARIZONA_CORE_H
+#define _WM_ARIZONA_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mfd/arizona/pdata.h>
+
+#define ARIZONA_MAX_CORE_SUPPLIES 2
+
+enum arizona_type {
+ WM5102 = 1,
+ WM5110 = 2,
+ WM8997 = 3,
+ WM8280 = 4,
+};
+
+#define ARIZONA_IRQ_GP1 0
+#define ARIZONA_IRQ_GP2 1
+#define ARIZONA_IRQ_GP3 2
+#define ARIZONA_IRQ_GP4 3
+#define ARIZONA_IRQ_GP5_FALL 4
+#define ARIZONA_IRQ_GP5_RISE 5
+#define ARIZONA_IRQ_JD_FALL 6
+#define ARIZONA_IRQ_JD_RISE 7
+#define ARIZONA_IRQ_DSP1_RAM_RDY 8
+#define ARIZONA_IRQ_DSP2_RAM_RDY 9
+#define ARIZONA_IRQ_DSP3_RAM_RDY 10
+#define ARIZONA_IRQ_DSP4_RAM_RDY 11
+#define ARIZONA_IRQ_DSP_IRQ1 12
+#define ARIZONA_IRQ_DSP_IRQ2 13
+#define ARIZONA_IRQ_DSP_IRQ3 14
+#define ARIZONA_IRQ_DSP_IRQ4 15
+#define ARIZONA_IRQ_DSP_IRQ5 16
+#define ARIZONA_IRQ_DSP_IRQ6 17
+#define ARIZONA_IRQ_DSP_IRQ7 18
+#define ARIZONA_IRQ_DSP_IRQ8 19
+#define ARIZONA_IRQ_SPK_OVERHEAT_WARN 20
+#define ARIZONA_IRQ_SPK_OVERHEAT 21
+#define ARIZONA_IRQ_MICDET 22
+#define ARIZONA_IRQ_HPDET 23
+#define ARIZONA_IRQ_WSEQ_DONE 24
+#define ARIZONA_IRQ_DRC2_SIG_DET 25
+#define ARIZONA_IRQ_DRC1_SIG_DET 26
+#define ARIZONA_IRQ_ASRC2_LOCK 27
+#define ARIZONA_IRQ_ASRC1_LOCK 28
+#define ARIZONA_IRQ_UNDERCLOCKED 29
+#define ARIZONA_IRQ_OVERCLOCKED 30
+#define ARIZONA_IRQ_FLL2_LOCK 31
+#define ARIZONA_IRQ_FLL1_LOCK 32
+#define ARIZONA_IRQ_CLKGEN_ERR 33
+#define ARIZONA_IRQ_CLKGEN_ERR_ASYNC 34
+#define ARIZONA_IRQ_ASRC_CFG_ERR 35
+#define ARIZONA_IRQ_AIF3_ERR 36
+#define ARIZONA_IRQ_AIF2_ERR 37
+#define ARIZONA_IRQ_AIF1_ERR 38
+#define ARIZONA_IRQ_CTRLIF_ERR 39
+#define ARIZONA_IRQ_MIXER_DROPPED_SAMPLES 40
+#define ARIZONA_IRQ_ASYNC_CLK_ENA_LOW 41
+#define ARIZONA_IRQ_SYSCLK_ENA_LOW 42
+#define ARIZONA_IRQ_ISRC1_CFG_ERR 43
+#define ARIZONA_IRQ_ISRC2_CFG_ERR 44
+#define ARIZONA_IRQ_BOOT_DONE 45
+#define ARIZONA_IRQ_DCS_DAC_DONE 46
+#define ARIZONA_IRQ_DCS_HP_DONE 47
+#define ARIZONA_IRQ_FLL2_CLOCK_OK 48
+#define ARIZONA_IRQ_FLL1_CLOCK_OK 49
+#define ARIZONA_IRQ_MICD_CLAMP_RISE 50
+#define ARIZONA_IRQ_MICD_CLAMP_FALL 51
+#define ARIZONA_IRQ_HP3R_DONE 52
+#define ARIZONA_IRQ_HP3L_DONE 53
+#define ARIZONA_IRQ_HP2R_DONE 54
+#define ARIZONA_IRQ_HP2L_DONE 55
+#define ARIZONA_IRQ_HP1R_DONE 56
+#define ARIZONA_IRQ_HP1L_DONE 57
+#define ARIZONA_IRQ_ISRC3_CFG_ERR 58
+#define ARIZONA_IRQ_DSP_SHARED_WR_COLL 59
+#define ARIZONA_IRQ_SPK_SHUTDOWN 60
+#define ARIZONA_IRQ_SPK1R_SHORT 61
+#define ARIZONA_IRQ_SPK1L_SHORT 62
+#define ARIZONA_IRQ_HP3R_SC_NEG 63
+#define ARIZONA_IRQ_HP3R_SC_POS 64
+#define ARIZONA_IRQ_HP3L_SC_NEG 65
+#define ARIZONA_IRQ_HP3L_SC_POS 66
+#define ARIZONA_IRQ_HP2R_SC_NEG 67
+#define ARIZONA_IRQ_HP2R_SC_POS 68
+#define ARIZONA_IRQ_HP2L_SC_NEG 69
+#define ARIZONA_IRQ_HP2L_SC_POS 70
+#define ARIZONA_IRQ_HP1R_SC_NEG 71
+#define ARIZONA_IRQ_HP1R_SC_POS 72
+#define ARIZONA_IRQ_HP1L_SC_NEG 73
+#define ARIZONA_IRQ_HP1L_SC_POS 74
+
+#define ARIZONA_NUM_IRQ 75
+
+struct snd_soc_dapm_context;
+
+struct arizona {
+ struct regmap *regmap;
+ struct device *dev;
+
+ enum arizona_type type;
+ unsigned int rev;
+
+ int num_core_supplies;
+ struct regulator_bulk_data core_supplies[ARIZONA_MAX_CORE_SUPPLIES];
+ struct regulator *dcvdd;
+
+ struct arizona_pdata pdata;
+
+ unsigned int external_dcvdd:1;
+
+ int irq;
+ struct irq_domain *virq;
+ struct regmap_irq_chip_data *aod_irq_chip;
+ struct regmap_irq_chip_data *irq_chip;
+
+ bool hpdet_clamp;
+ unsigned int hp_ena;
+
+ struct mutex clk_lock;
+ int clk32k_ref;
+
+ bool ctrlif_error;
+
+ struct snd_soc_dapm_context *dapm;
+
+ int tdm_width[ARIZONA_MAX_AIF];
+ int tdm_slots[ARIZONA_MAX_AIF];
+
+ uint16_t dac_comp_coeff;
+ uint8_t dac_comp_enabled;
+ struct mutex dac_comp_lock;
+};
+
+int arizona_clk32k_enable(struct arizona *arizona);
+int arizona_clk32k_disable(struct arizona *arizona);
+
+int arizona_request_irq(struct arizona *arizona, int irq, char *name,
+ irq_handler_t handler, void *data);
+void arizona_free_irq(struct arizona *arizona, int irq, void *data);
+int arizona_set_irq_wake(struct arizona *arizona, int irq, int on);
+
+int wm5102_patch(struct arizona *arizona);
+int wm5110_patch(struct arizona *arizona);
+int wm8997_patch(struct arizona *arizona);
+
+extern int arizona_of_get_named_gpio(struct arizona *arizona, const char *prop,
+ bool mandatory);
+
+#endif
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
new file mode 100644
index 000000000..1789cb0f4
--- /dev/null
+++ b/include/linux/mfd/arizona/pdata.h
@@ -0,0 +1,178 @@
+/*
+ * Platform data for Arizona devices
+ *
+ * Copyright 2012 Wolfson Microelectronics. PLC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ARIZONA_PDATA_H
+#define _ARIZONA_PDATA_H
+
+#include <dt-bindings/mfd/arizona.h>
+
+#define ARIZONA_GPN_DIR_MASK 0x8000 /* GPN_DIR */
+#define ARIZONA_GPN_DIR_SHIFT 15 /* GPN_DIR */
+#define ARIZONA_GPN_DIR_WIDTH 1 /* GPN_DIR */
+#define ARIZONA_GPN_PU_MASK 0x4000 /* GPN_PU */
+#define ARIZONA_GPN_PU_SHIFT 14 /* GPN_PU */
+#define ARIZONA_GPN_PU_WIDTH 1 /* GPN_PU */
+#define ARIZONA_GPN_PD_MASK 0x2000 /* GPN_PD */
+#define ARIZONA_GPN_PD_SHIFT 13 /* GPN_PD */
+#define ARIZONA_GPN_PD_WIDTH 1 /* GPN_PD */
+#define ARIZONA_GPN_LVL_MASK 0x0800 /* GPN_LVL */
+#define ARIZONA_GPN_LVL_SHIFT 11 /* GPN_LVL */
+#define ARIZONA_GPN_LVL_WIDTH 1 /* GPN_LVL */
+#define ARIZONA_GPN_POL_MASK 0x0400 /* GPN_POL */
+#define ARIZONA_GPN_POL_SHIFT 10 /* GPN_POL */
+#define ARIZONA_GPN_POL_WIDTH 1 /* GPN_POL */
+#define ARIZONA_GPN_OP_CFG_MASK 0x0200 /* GPN_OP_CFG */
+#define ARIZONA_GPN_OP_CFG_SHIFT 9 /* GPN_OP_CFG */
+#define ARIZONA_GPN_OP_CFG_WIDTH 1 /* GPN_OP_CFG */
+#define ARIZONA_GPN_DB_MASK 0x0100 /* GPN_DB */
+#define ARIZONA_GPN_DB_SHIFT 8 /* GPN_DB */
+#define ARIZONA_GPN_DB_WIDTH 1 /* GPN_DB */
+#define ARIZONA_GPN_FN_MASK 0x007F /* GPN_FN - [6:0] */
+#define ARIZONA_GPN_FN_SHIFT 0 /* GPN_FN - [6:0] */
+#define ARIZONA_GPN_FN_WIDTH 7 /* GPN_FN - [6:0] */
+
+#define ARIZONA_MAX_GPIO 5
+
+#define ARIZONA_MAX_INPUT 4
+
+#define ARIZONA_MAX_MICBIAS 3
+
+#define ARIZONA_MAX_OUTPUT 6
+
+#define ARIZONA_MAX_AIF 3
+
+#define ARIZONA_HAP_ACT_ERM 0
+#define ARIZONA_HAP_ACT_LRA 2
+
+#define ARIZONA_MAX_PDM_SPK 2
+
+struct regulator_init_data;
+
+struct arizona_micbias {
+ int mV; /** Regulated voltage */
+ unsigned int ext_cap:1; /** External capacitor fitted */
+ unsigned int discharge:1; /** Actively discharge */
+ unsigned int soft_start:1; /** Disable aggressive startup ramp rate */
+ unsigned int bypass:1; /** Use bypass mode */
+};
+
+struct arizona_micd_config {
+ unsigned int src;
+ unsigned int bias;
+ bool gpio;
+};
+
+struct arizona_micd_range {
+ int max; /** Ohms */
+ int key; /** Key to report to input layer */
+};
+
+struct arizona_pdata {
+ int reset; /** GPIO controlling /RESET, if any */
+ int ldoena; /** GPIO controlling LODENA, if any */
+
+ /** Regulator configuration for MICVDD */
+ struct regulator_init_data *micvdd;
+
+ /** Regulator configuration for LDO1 */
+ struct regulator_init_data *ldo1;
+
+ /** If a direct 32kHz clock is provided on an MCLK specify it here */
+ int clk32k_src;
+
+ /** Mode for primary IRQ (defaults to active low) */
+ unsigned int irq_flags;
+
+ /* Base GPIO */
+ int gpio_base;
+
+ /** Pin state for GPIO pins */
+ unsigned int gpio_defaults[ARIZONA_MAX_GPIO];
+
+ /**
+ * Maximum number of channels clocks will be generated for,
+ * useful for systems where and I2S bus with multiple data
+ * lines is mastered.
+ */
+ int max_channels_clocked[ARIZONA_MAX_AIF];
+
+ /** GPIO5 is used for jack detection */
+ bool jd_gpio5;
+
+ /** Internal pull on GPIO5 is disabled when used for jack detection */
+ bool jd_gpio5_nopull;
+
+ /** set to true if jackdet contact opens on insert */
+ bool jd_invert;
+
+ /** Use the headphone detect circuit to identify the accessory */
+ bool hpdet_acc_id;
+
+ /** Check for line output with HPDET method */
+ bool hpdet_acc_id_line;
+
+ /** GPIO used for mic isolation with HPDET */
+ int hpdet_id_gpio;
+
+ /** Extra debounce timeout used during initial mic detection (ms) */
+ int micd_detect_debounce;
+
+ /** GPIO for mic detection polarity */
+ int micd_pol_gpio;
+
+ /** Mic detect ramp rate */
+ int micd_bias_start_time;
+
+ /** Mic detect sample rate */
+ int micd_rate;
+
+ /** Mic detect debounce level */
+ int micd_dbtime;
+
+ /** Mic detect timeout (ms) */
+ int micd_timeout;
+
+ /** Force MICBIAS on for mic detect */
+ bool micd_force_micbias;
+
+ /** Mic detect level parameters */
+ const struct arizona_micd_range *micd_ranges;
+ int num_micd_ranges;
+
+ /** Headset polarity configurations */
+ struct arizona_micd_config *micd_configs;
+ int num_micd_configs;
+
+ /** Reference voltage for DMIC inputs */
+ int dmic_ref[ARIZONA_MAX_INPUT];
+
+ /** MICBIAS configurations */
+ struct arizona_micbias micbias[ARIZONA_MAX_MICBIAS];
+
+ /** Mode of input structures */
+ int inmode[ARIZONA_MAX_INPUT];
+
+ /** Mode for outputs */
+ bool out_mono[ARIZONA_MAX_OUTPUT];
+
+ /** PDM speaker mute setting */
+ unsigned int spk_mute[ARIZONA_MAX_PDM_SPK];
+
+ /** PDM speaker format */
+ unsigned int spk_fmt[ARIZONA_MAX_PDM_SPK];
+
+ /** Haptic actuator type */
+ unsigned int hap_act;
+
+ /** GPIO for primary IRQ (used for edge triggered emulation) */
+ int irq_gpio;
+};
+
+#endif
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
new file mode 100644
index 000000000..aacc10d77
--- /dev/null
+++ b/include/linux/mfd/arizona/registers.h
@@ -0,0 +1,7832 @@
+/*
+ * ARIZONA register definitions
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ARIZONA_REGISTERS_H
+#define _ARIZONA_REGISTERS_H
+
+/*
+ * Register values.
+ */
+#define ARIZONA_SOFTWARE_RESET 0x00
+#define ARIZONA_DEVICE_REVISION 0x01
+#define ARIZONA_CTRL_IF_SPI_CFG_1 0x08
+#define ARIZONA_CTRL_IF_I2C1_CFG_1 0x09
+#define ARIZONA_CTRL_IF_I2C2_CFG_1 0x0A
+#define ARIZONA_CTRL_IF_I2C1_CFG_2 0x0B
+#define ARIZONA_CTRL_IF_I2C2_CFG_2 0x0C
+#define ARIZONA_CTRL_IF_STATUS_1 0x0D
+#define ARIZONA_WRITE_SEQUENCER_CTRL_0 0x16
+#define ARIZONA_WRITE_SEQUENCER_CTRL_1 0x17
+#define ARIZONA_WRITE_SEQUENCER_CTRL_2 0x18
+#define ARIZONA_WRITE_SEQUENCER_CTRL_3 0x19
+#define ARIZONA_WRITE_SEQUENCER_PROM 0x1A
+#define ARIZONA_TONE_GENERATOR_1 0x20
+#define ARIZONA_TONE_GENERATOR_2 0x21
+#define ARIZONA_TONE_GENERATOR_3 0x22
+#define ARIZONA_TONE_GENERATOR_4 0x23
+#define ARIZONA_TONE_GENERATOR_5 0x24
+#define ARIZONA_PWM_DRIVE_1 0x30
+#define ARIZONA_PWM_DRIVE_2 0x31
+#define ARIZONA_PWM_DRIVE_3 0x32
+#define ARIZONA_WAKE_CONTROL 0x40
+#define ARIZONA_SEQUENCE_CONTROL 0x41
+#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1 0x61
+#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2 0x62
+#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3 0x63
+#define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_4 0x64
+#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1 0x66
+#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2 0x67
+#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_3 0x68
+#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4 0x69
+#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5 0x6A
+#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6 0x6B
+#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_7 0x6C
+#define ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_8 0x6D
+#define ARIZONA_COMFORT_NOISE_GENERATOR 0x70
+#define ARIZONA_HAPTICS_CONTROL_1 0x90
+#define ARIZONA_HAPTICS_CONTROL_2 0x91
+#define ARIZONA_HAPTICS_PHASE_1_INTENSITY 0x92
+#define ARIZONA_HAPTICS_PHASE_1_DURATION 0x93
+#define ARIZONA_HAPTICS_PHASE_2_INTENSITY 0x94
+#define ARIZONA_HAPTICS_PHASE_2_DURATION 0x95
+#define ARIZONA_HAPTICS_PHASE_3_INTENSITY 0x96
+#define ARIZONA_HAPTICS_PHASE_3_DURATION 0x97
+#define ARIZONA_HAPTICS_STATUS 0x98
+#define ARIZONA_CLOCK_32K_1 0x100
+#define ARIZONA_SYSTEM_CLOCK_1 0x101
+#define ARIZONA_SAMPLE_RATE_1 0x102
+#define ARIZONA_SAMPLE_RATE_2 0x103
+#define ARIZONA_SAMPLE_RATE_3 0x104
+#define ARIZONA_SAMPLE_RATE_1_STATUS 0x10A
+#define ARIZONA_SAMPLE_RATE_2_STATUS 0x10B
+#define ARIZONA_SAMPLE_RATE_3_STATUS 0x10C
+#define ARIZONA_ASYNC_CLOCK_1 0x112
+#define ARIZONA_ASYNC_SAMPLE_RATE_1 0x113
+#define ARIZONA_ASYNC_SAMPLE_RATE_2 0x114
+#define ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS 0x11B
+#define ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS 0x11C
+#define ARIZONA_OUTPUT_SYSTEM_CLOCK 0x149
+#define ARIZONA_OUTPUT_ASYNC_CLOCK 0x14A
+#define ARIZONA_RATE_ESTIMATOR_1 0x152
+#define ARIZONA_RATE_ESTIMATOR_2 0x153
+#define ARIZONA_RATE_ESTIMATOR_3 0x154
+#define ARIZONA_RATE_ESTIMATOR_4 0x155
+#define ARIZONA_RATE_ESTIMATOR_5 0x156
+#define ARIZONA_DYNAMIC_FREQUENCY_SCALING_1 0x161
+#define ARIZONA_FLL1_CONTROL_1 0x171
+#define ARIZONA_FLL1_CONTROL_2 0x172
+#define ARIZONA_FLL1_CONTROL_3 0x173
+#define ARIZONA_FLL1_CONTROL_4 0x174
+#define ARIZONA_FLL1_CONTROL_5 0x175
+#define ARIZONA_FLL1_CONTROL_6 0x176
+#define ARIZONA_FLL1_LOOP_FILTER_TEST_1 0x177
+#define ARIZONA_FLL1_NCO_TEST_0 0x178
+#define ARIZONA_FLL1_CONTROL_7 0x179
+#define ARIZONA_FLL1_SYNCHRONISER_1 0x181
+#define ARIZONA_FLL1_SYNCHRONISER_2 0x182
+#define ARIZONA_FLL1_SYNCHRONISER_3 0x183
+#define ARIZONA_FLL1_SYNCHRONISER_4 0x184
+#define ARIZONA_FLL1_SYNCHRONISER_5 0x185
+#define ARIZONA_FLL1_SYNCHRONISER_6 0x186
+#define ARIZONA_FLL1_SYNCHRONISER_7 0x187
+#define ARIZONA_FLL1_SPREAD_SPECTRUM 0x189
+#define ARIZONA_FLL1_GPIO_CLOCK 0x18A
+#define ARIZONA_FLL2_CONTROL_1 0x191
+#define ARIZONA_FLL2_CONTROL_2 0x192
+#define ARIZONA_FLL2_CONTROL_3 0x193
+#define ARIZONA_FLL2_CONTROL_4 0x194
+#define ARIZONA_FLL2_CONTROL_5 0x195
+#define ARIZONA_FLL2_CONTROL_6 0x196
+#define ARIZONA_FLL2_LOOP_FILTER_TEST_1 0x197
+#define ARIZONA_FLL2_NCO_TEST_0 0x198
+#define ARIZONA_FLL2_CONTROL_7 0x199
+#define ARIZONA_FLL2_SYNCHRONISER_1 0x1A1
+#define ARIZONA_FLL2_SYNCHRONISER_2 0x1A2
+#define ARIZONA_FLL2_SYNCHRONISER_3 0x1A3
+#define ARIZONA_FLL2_SYNCHRONISER_4 0x1A4
+#define ARIZONA_FLL2_SYNCHRONISER_5 0x1A5
+#define ARIZONA_FLL2_SYNCHRONISER_6 0x1A6
+#define ARIZONA_FLL2_SYNCHRONISER_7 0x1A7
+#define ARIZONA_FLL2_SPREAD_SPECTRUM 0x1A9
+#define ARIZONA_FLL2_GPIO_CLOCK 0x1AA
+#define ARIZONA_MIC_CHARGE_PUMP_1 0x200
+#define ARIZONA_LDO1_CONTROL_1 0x210
+#define ARIZONA_LDO1_CONTROL_2 0x212
+#define ARIZONA_LDO2_CONTROL_1 0x213
+#define ARIZONA_MIC_BIAS_CTRL_1 0x218
+#define ARIZONA_MIC_BIAS_CTRL_2 0x219
+#define ARIZONA_MIC_BIAS_CTRL_3 0x21A
+#define ARIZONA_HP_CTRL_1L 0x225
+#define ARIZONA_HP_CTRL_1R 0x226
+#define ARIZONA_ACCESSORY_DETECT_MODE_1 0x293
+#define ARIZONA_HEADPHONE_DETECT_1 0x29B
+#define ARIZONA_HEADPHONE_DETECT_2 0x29C
+#define ARIZONA_HP_DACVAL 0x29F
+#define ARIZONA_MICD_CLAMP_CONTROL 0x2A2
+#define ARIZONA_MIC_DETECT_1 0x2A3
+#define ARIZONA_MIC_DETECT_2 0x2A4
+#define ARIZONA_MIC_DETECT_3 0x2A5
+#define ARIZONA_MIC_DETECT_LEVEL_1 0x2A6
+#define ARIZONA_MIC_DETECT_LEVEL_2 0x2A7
+#define ARIZONA_MIC_DETECT_LEVEL_3 0x2A8
+#define ARIZONA_MIC_DETECT_LEVEL_4 0x2A9
+#define ARIZONA_MIC_NOISE_MIX_CONTROL_1 0x2C3
+#define ARIZONA_ISOLATION_CONTROL 0x2CB
+#define ARIZONA_JACK_DETECT_ANALOGUE 0x2D3
+#define ARIZONA_INPUT_ENABLES 0x300
+#define ARIZONA_INPUT_ENABLES_STATUS 0x301
+#define ARIZONA_INPUT_RATE 0x308
+#define ARIZONA_INPUT_VOLUME_RAMP 0x309
+#define ARIZONA_HPF_CONTROL 0x30C
+#define ARIZONA_IN1L_CONTROL 0x310
+#define ARIZONA_ADC_DIGITAL_VOLUME_1L 0x311
+#define ARIZONA_DMIC1L_CONTROL 0x312
+#define ARIZONA_IN1R_CONTROL 0x314
+#define ARIZONA_ADC_DIGITAL_VOLUME_1R 0x315
+#define ARIZONA_DMIC1R_CONTROL 0x316
+#define ARIZONA_IN2L_CONTROL 0x318
+#define ARIZONA_ADC_DIGITAL_VOLUME_2L 0x319
+#define ARIZONA_DMIC2L_CONTROL 0x31A
+#define ARIZONA_IN2R_CONTROL 0x31C
+#define ARIZONA_ADC_DIGITAL_VOLUME_2R 0x31D
+#define ARIZONA_DMIC2R_CONTROL 0x31E
+#define ARIZONA_IN3L_CONTROL 0x320
+#define ARIZONA_ADC_DIGITAL_VOLUME_3L 0x321
+#define ARIZONA_DMIC3L_CONTROL 0x322
+#define ARIZONA_IN3R_CONTROL 0x324
+#define ARIZONA_ADC_DIGITAL_VOLUME_3R 0x325
+#define ARIZONA_DMIC3R_CONTROL 0x326
+#define ARIZONA_IN4L_CONTROL 0x328
+#define ARIZONA_ADC_DIGITAL_VOLUME_4L 0x329
+#define ARIZONA_DMIC4L_CONTROL 0x32A
+#define ARIZONA_IN4R_CONTROL 0x32C
+#define ARIZONA_ADC_DIGITAL_VOLUME_4R 0x32D
+#define ARIZONA_DMIC4R_CONTROL 0x32E
+#define ARIZONA_OUTPUT_ENABLES_1 0x400
+#define ARIZONA_OUTPUT_STATUS_1 0x401
+#define ARIZONA_RAW_OUTPUT_STATUS_1 0x406
+#define ARIZONA_OUTPUT_RATE_1 0x408
+#define ARIZONA_OUTPUT_VOLUME_RAMP 0x409
+#define ARIZONA_OUTPUT_PATH_CONFIG_1L 0x410
+#define ARIZONA_DAC_DIGITAL_VOLUME_1L 0x411
+#define ARIZONA_DAC_VOLUME_LIMIT_1L 0x412
+#define ARIZONA_NOISE_GATE_SELECT_1L 0x413
+#define ARIZONA_OUTPUT_PATH_CONFIG_1R 0x414
+#define ARIZONA_DAC_DIGITAL_VOLUME_1R 0x415
+#define ARIZONA_DAC_VOLUME_LIMIT_1R 0x416
+#define ARIZONA_NOISE_GATE_SELECT_1R 0x417
+#define ARIZONA_OUTPUT_PATH_CONFIG_2L 0x418
+#define ARIZONA_DAC_DIGITAL_VOLUME_2L 0x419
+#define ARIZONA_DAC_VOLUME_LIMIT_2L 0x41A
+#define ARIZONA_NOISE_GATE_SELECT_2L 0x41B
+#define ARIZONA_OUTPUT_PATH_CONFIG_2R 0x41C
+#define ARIZONA_DAC_DIGITAL_VOLUME_2R 0x41D
+#define ARIZONA_DAC_VOLUME_LIMIT_2R 0x41E
+#define ARIZONA_NOISE_GATE_SELECT_2R 0x41F
+#define ARIZONA_OUTPUT_PATH_CONFIG_3L 0x420
+#define ARIZONA_DAC_DIGITAL_VOLUME_3L 0x421
+#define ARIZONA_DAC_VOLUME_LIMIT_3L 0x422
+#define ARIZONA_NOISE_GATE_SELECT_3L 0x423
+#define ARIZONA_OUTPUT_PATH_CONFIG_3R 0x424
+#define ARIZONA_DAC_DIGITAL_VOLUME_3R 0x425
+#define ARIZONA_DAC_VOLUME_LIMIT_3R 0x426
+#define ARIZONA_NOISE_GATE_SELECT_3R 0x427
+#define ARIZONA_OUTPUT_PATH_CONFIG_4L 0x428
+#define ARIZONA_DAC_DIGITAL_VOLUME_4L 0x429
+#define ARIZONA_OUT_VOLUME_4L 0x42A
+#define ARIZONA_NOISE_GATE_SELECT_4L 0x42B
+#define ARIZONA_OUTPUT_PATH_CONFIG_4R 0x42C
+#define ARIZONA_DAC_DIGITAL_VOLUME_4R 0x42D
+#define ARIZONA_OUT_VOLUME_4R 0x42E
+#define ARIZONA_NOISE_GATE_SELECT_4R 0x42F
+#define ARIZONA_OUTPUT_PATH_CONFIG_5L 0x430
+#define ARIZONA_DAC_DIGITAL_VOLUME_5L 0x431
+#define ARIZONA_DAC_VOLUME_LIMIT_5L 0x432
+#define ARIZONA_NOISE_GATE_SELECT_5L 0x433
+#define ARIZONA_OUTPUT_PATH_CONFIG_5R 0x434
+#define ARIZONA_DAC_DIGITAL_VOLUME_5R 0x435
+#define ARIZONA_DAC_VOLUME_LIMIT_5R 0x436
+#define ARIZONA_NOISE_GATE_SELECT_5R 0x437
+#define ARIZONA_OUTPUT_PATH_CONFIG_6L 0x438
+#define ARIZONA_DAC_DIGITAL_VOLUME_6L 0x439
+#define ARIZONA_DAC_VOLUME_LIMIT_6L 0x43A
+#define ARIZONA_NOISE_GATE_SELECT_6L 0x43B
+#define ARIZONA_OUTPUT_PATH_CONFIG_6R 0x43C
+#define ARIZONA_DAC_DIGITAL_VOLUME_6R 0x43D
+#define ARIZONA_DAC_VOLUME_LIMIT_6R 0x43E
+#define ARIZONA_NOISE_GATE_SELECT_6R 0x43F
+#define ARIZONA_DRE_ENABLE 0x440
+#define ARIZONA_DRE_CONTROL_2 0x442
+#define ARIZONA_DRE_CONTROL_3 0x443
+#define ARIZONA_DAC_AEC_CONTROL_1 0x450
+#define ARIZONA_NOISE_GATE_CONTROL 0x458
+#define ARIZONA_PDM_SPK1_CTRL_1 0x490
+#define ARIZONA_PDM_SPK1_CTRL_2 0x491
+#define ARIZONA_PDM_SPK2_CTRL_1 0x492
+#define ARIZONA_PDM_SPK2_CTRL_2 0x493
+#define ARIZONA_HP1_SHORT_CIRCUIT_CTRL 0x4A0
+#define ARIZONA_HP2_SHORT_CIRCUIT_CTRL 0x4A1
+#define ARIZONA_HP3_SHORT_CIRCUIT_CTRL 0x4A2
+#define ARIZONA_SPK_CTRL_2 0x4B5
+#define ARIZONA_SPK_CTRL_3 0x4B6
+#define ARIZONA_DAC_COMP_1 0x4DC
+#define ARIZONA_DAC_COMP_2 0x4DD
+#define ARIZONA_DAC_COMP_3 0x4DE
+#define ARIZONA_DAC_COMP_4 0x4DF
+#define ARIZONA_AIF1_BCLK_CTRL 0x500
+#define ARIZONA_AIF1_TX_PIN_CTRL 0x501
+#define ARIZONA_AIF1_RX_PIN_CTRL 0x502
+#define ARIZONA_AIF1_RATE_CTRL 0x503
+#define ARIZONA_AIF1_FORMAT 0x504
+#define ARIZONA_AIF1_TX_BCLK_RATE 0x505
+#define ARIZONA_AIF1_RX_BCLK_RATE 0x506
+#define ARIZONA_AIF1_FRAME_CTRL_1 0x507
+#define ARIZONA_AIF1_FRAME_CTRL_2 0x508
+#define ARIZONA_AIF1_FRAME_CTRL_3 0x509
+#define ARIZONA_AIF1_FRAME_CTRL_4 0x50A
+#define ARIZONA_AIF1_FRAME_CTRL_5 0x50B
+#define ARIZONA_AIF1_FRAME_CTRL_6 0x50C
+#define ARIZONA_AIF1_FRAME_CTRL_7 0x50D
+#define ARIZONA_AIF1_FRAME_CTRL_8 0x50E
+#define ARIZONA_AIF1_FRAME_CTRL_9 0x50F
+#define ARIZONA_AIF1_FRAME_CTRL_10 0x510
+#define ARIZONA_AIF1_FRAME_CTRL_11 0x511
+#define ARIZONA_AIF1_FRAME_CTRL_12 0x512
+#define ARIZONA_AIF1_FRAME_CTRL_13 0x513
+#define ARIZONA_AIF1_FRAME_CTRL_14 0x514
+#define ARIZONA_AIF1_FRAME_CTRL_15 0x515
+#define ARIZONA_AIF1_FRAME_CTRL_16 0x516
+#define ARIZONA_AIF1_FRAME_CTRL_17 0x517
+#define ARIZONA_AIF1_FRAME_CTRL_18 0x518
+#define ARIZONA_AIF1_TX_ENABLES 0x519
+#define ARIZONA_AIF1_RX_ENABLES 0x51A
+#define ARIZONA_AIF1_FORCE_WRITE 0x51B
+#define ARIZONA_AIF2_BCLK_CTRL 0x540
+#define ARIZONA_AIF2_TX_PIN_CTRL 0x541
+#define ARIZONA_AIF2_RX_PIN_CTRL 0x542
+#define ARIZONA_AIF2_RATE_CTRL 0x543
+#define ARIZONA_AIF2_FORMAT 0x544
+#define ARIZONA_AIF2_TX_BCLK_RATE 0x545
+#define ARIZONA_AIF2_RX_BCLK_RATE 0x546
+#define ARIZONA_AIF2_FRAME_CTRL_1 0x547
+#define ARIZONA_AIF2_FRAME_CTRL_2 0x548
+#define ARIZONA_AIF2_FRAME_CTRL_3 0x549
+#define ARIZONA_AIF2_FRAME_CTRL_4 0x54A
+#define ARIZONA_AIF2_FRAME_CTRL_5 0x54B
+#define ARIZONA_AIF2_FRAME_CTRL_6 0x54C
+#define ARIZONA_AIF2_FRAME_CTRL_7 0x54D
+#define ARIZONA_AIF2_FRAME_CTRL_8 0x54E
+#define ARIZONA_AIF2_FRAME_CTRL_11 0x551
+#define ARIZONA_AIF2_FRAME_CTRL_12 0x552
+#define ARIZONA_AIF2_FRAME_CTRL_13 0x553
+#define ARIZONA_AIF2_FRAME_CTRL_14 0x554
+#define ARIZONA_AIF2_FRAME_CTRL_15 0x555
+#define ARIZONA_AIF2_FRAME_CTRL_16 0x556
+#define ARIZONA_AIF2_TX_ENABLES 0x559
+#define ARIZONA_AIF2_RX_ENABLES 0x55A
+#define ARIZONA_AIF2_FORCE_WRITE 0x55B
+#define ARIZONA_AIF3_BCLK_CTRL 0x580
+#define ARIZONA_AIF3_TX_PIN_CTRL 0x581
+#define ARIZONA_AIF3_RX_PIN_CTRL 0x582
+#define ARIZONA_AIF3_RATE_CTRL 0x583
+#define ARIZONA_AIF3_FORMAT 0x584
+#define ARIZONA_AIF3_TX_BCLK_RATE 0x585
+#define ARIZONA_AIF3_RX_BCLK_RATE 0x586
+#define ARIZONA_AIF3_FRAME_CTRL_1 0x587
+#define ARIZONA_AIF3_FRAME_CTRL_2 0x588
+#define ARIZONA_AIF3_FRAME_CTRL_3 0x589
+#define ARIZONA_AIF3_FRAME_CTRL_4 0x58A
+#define ARIZONA_AIF3_FRAME_CTRL_11 0x591
+#define ARIZONA_AIF3_FRAME_CTRL_12 0x592
+#define ARIZONA_AIF3_TX_ENABLES 0x599
+#define ARIZONA_AIF3_RX_ENABLES 0x59A
+#define ARIZONA_AIF3_FORCE_WRITE 0x59B
+#define ARIZONA_SLIMBUS_FRAMER_REF_GEAR 0x5E3
+#define ARIZONA_SLIMBUS_RATES_1 0x5E5
+#define ARIZONA_SLIMBUS_RATES_2 0x5E6
+#define ARIZONA_SLIMBUS_RATES_3 0x5E7
+#define ARIZONA_SLIMBUS_RATES_4 0x5E8
+#define ARIZONA_SLIMBUS_RATES_5 0x5E9
+#define ARIZONA_SLIMBUS_RATES_6 0x5EA
+#define ARIZONA_SLIMBUS_RATES_7 0x5EB
+#define ARIZONA_SLIMBUS_RATES_8 0x5EC
+#define ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE 0x5F5
+#define ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE 0x5F6
+#define ARIZONA_SLIMBUS_RX_PORT_STATUS 0x5F7
+#define ARIZONA_SLIMBUS_TX_PORT_STATUS 0x5F8
+#define ARIZONA_PWM1MIX_INPUT_1_SOURCE 0x640
+#define ARIZONA_PWM1MIX_INPUT_1_VOLUME 0x641
+#define ARIZONA_PWM1MIX_INPUT_2_SOURCE 0x642
+#define ARIZONA_PWM1MIX_INPUT_2_VOLUME 0x643
+#define ARIZONA_PWM1MIX_INPUT_3_SOURCE 0x644
+#define ARIZONA_PWM1MIX_INPUT_3_VOLUME 0x645
+#define ARIZONA_PWM1MIX_INPUT_4_SOURCE 0x646
+#define ARIZONA_PWM1MIX_INPUT_4_VOLUME 0x647
+#define ARIZONA_PWM2MIX_INPUT_1_SOURCE 0x648
+#define ARIZONA_PWM2MIX_INPUT_1_VOLUME 0x649
+#define ARIZONA_PWM2MIX_INPUT_2_SOURCE 0x64A
+#define ARIZONA_PWM2MIX_INPUT_2_VOLUME 0x64B
+#define ARIZONA_PWM2MIX_INPUT_3_SOURCE 0x64C
+#define ARIZONA_PWM2MIX_INPUT_3_VOLUME 0x64D
+#define ARIZONA_PWM2MIX_INPUT_4_SOURCE 0x64E
+#define ARIZONA_PWM2MIX_INPUT_4_VOLUME 0x64F
+#define ARIZONA_MICMIX_INPUT_1_SOURCE 0x660
+#define ARIZONA_MICMIX_INPUT_1_VOLUME 0x661
+#define ARIZONA_MICMIX_INPUT_2_SOURCE 0x662
+#define ARIZONA_MICMIX_INPUT_2_VOLUME 0x663
+#define ARIZONA_MICMIX_INPUT_3_SOURCE 0x664
+#define ARIZONA_MICMIX_INPUT_3_VOLUME 0x665
+#define ARIZONA_MICMIX_INPUT_4_SOURCE 0x666
+#define ARIZONA_MICMIX_INPUT_4_VOLUME 0x667
+#define ARIZONA_NOISEMIX_INPUT_1_SOURCE 0x668
+#define ARIZONA_NOISEMIX_INPUT_1_VOLUME 0x669
+#define ARIZONA_NOISEMIX_INPUT_2_SOURCE 0x66A
+#define ARIZONA_NOISEMIX_INPUT_2_VOLUME 0x66B
+#define ARIZONA_NOISEMIX_INPUT_3_SOURCE 0x66C
+#define ARIZONA_NOISEMIX_INPUT_3_VOLUME 0x66D
+#define ARIZONA_NOISEMIX_INPUT_4_SOURCE 0x66E
+#define ARIZONA_NOISEMIX_INPUT_4_VOLUME 0x66F
+#define ARIZONA_OUT1LMIX_INPUT_1_SOURCE 0x680
+#define ARIZONA_OUT1LMIX_INPUT_1_VOLUME 0x681
+#define ARIZONA_OUT1LMIX_INPUT_2_SOURCE 0x682
+#define ARIZONA_OUT1LMIX_INPUT_2_VOLUME 0x683
+#define ARIZONA_OUT1LMIX_INPUT_3_SOURCE 0x684
+#define ARIZONA_OUT1LMIX_INPUT_3_VOLUME 0x685
+#define ARIZONA_OUT1LMIX_INPUT_4_SOURCE 0x686
+#define ARIZONA_OUT1LMIX_INPUT_4_VOLUME 0x687
+#define ARIZONA_OUT1RMIX_INPUT_1_SOURCE 0x688
+#define ARIZONA_OUT1RMIX_INPUT_1_VOLUME 0x689
+#define ARIZONA_OUT1RMIX_INPUT_2_SOURCE 0x68A
+#define ARIZONA_OUT1RMIX_INPUT_2_VOLUME 0x68B
+#define ARIZONA_OUT1RMIX_INPUT_3_SOURCE 0x68C
+#define ARIZONA_OUT1RMIX_INPUT_3_VOLUME 0x68D
+#define ARIZONA_OUT1RMIX_INPUT_4_SOURCE 0x68E
+#define ARIZONA_OUT1RMIX_INPUT_4_VOLUME 0x68F
+#define ARIZONA_OUT2LMIX_INPUT_1_SOURCE 0x690
+#define ARIZONA_OUT2LMIX_INPUT_1_VOLUME 0x691
+#define ARIZONA_OUT2LMIX_INPUT_2_SOURCE 0x692
+#define ARIZONA_OUT2LMIX_INPUT_2_VOLUME 0x693
+#define ARIZONA_OUT2LMIX_INPUT_3_SOURCE 0x694
+#define ARIZONA_OUT2LMIX_INPUT_3_VOLUME 0x695
+#define ARIZONA_OUT2LMIX_INPUT_4_SOURCE 0x696
+#define ARIZONA_OUT2LMIX_INPUT_4_VOLUME 0x697
+#define ARIZONA_OUT2RMIX_INPUT_1_SOURCE 0x698
+#define ARIZONA_OUT2RMIX_INPUT_1_VOLUME 0x699
+#define ARIZONA_OUT2RMIX_INPUT_2_SOURCE 0x69A
+#define ARIZONA_OUT2RMIX_INPUT_2_VOLUME 0x69B
+#define ARIZONA_OUT2RMIX_INPUT_3_SOURCE 0x69C
+#define ARIZONA_OUT2RMIX_INPUT_3_VOLUME 0x69D
+#define ARIZONA_OUT2RMIX_INPUT_4_SOURCE 0x69E
+#define ARIZONA_OUT2RMIX_INPUT_4_VOLUME 0x69F
+#define ARIZONA_OUT3LMIX_INPUT_1_SOURCE 0x6A0
+#define ARIZONA_OUT3LMIX_INPUT_1_VOLUME 0x6A1
+#define ARIZONA_OUT3LMIX_INPUT_2_SOURCE 0x6A2
+#define ARIZONA_OUT3LMIX_INPUT_2_VOLUME 0x6A3
+#define ARIZONA_OUT3LMIX_INPUT_3_SOURCE 0x6A4
+#define ARIZONA_OUT3LMIX_INPUT_3_VOLUME 0x6A5
+#define ARIZONA_OUT3LMIX_INPUT_4_SOURCE 0x6A6
+#define ARIZONA_OUT3LMIX_INPUT_4_VOLUME 0x6A7
+#define ARIZONA_OUT3RMIX_INPUT_1_SOURCE 0x6A8
+#define ARIZONA_OUT3RMIX_INPUT_1_VOLUME 0x6A9
+#define ARIZONA_OUT3RMIX_INPUT_2_SOURCE 0x6AA
+#define ARIZONA_OUT3RMIX_INPUT_2_VOLUME 0x6AB
+#define ARIZONA_OUT3RMIX_INPUT_3_SOURCE 0x6AC
+#define ARIZONA_OUT3RMIX_INPUT_3_VOLUME 0x6AD
+#define ARIZONA_OUT3RMIX_INPUT_4_SOURCE 0x6AE
+#define ARIZONA_OUT3RMIX_INPUT_4_VOLUME 0x6AF
+#define ARIZONA_OUT4LMIX_INPUT_1_SOURCE 0x6B0
+#define ARIZONA_OUT4LMIX_INPUT_1_VOLUME 0x6B1
+#define ARIZONA_OUT4LMIX_INPUT_2_SOURCE 0x6B2
+#define ARIZONA_OUT4LMIX_INPUT_2_VOLUME 0x6B3
+#define ARIZONA_OUT4LMIX_INPUT_3_SOURCE 0x6B4
+#define ARIZONA_OUT4LMIX_INPUT_3_VOLUME 0x6B5
+#define ARIZONA_OUT4LMIX_INPUT_4_SOURCE 0x6B6
+#define ARIZONA_OUT4LMIX_INPUT_4_VOLUME 0x6B7
+#define ARIZONA_OUT4RMIX_INPUT_1_SOURCE 0x6B8
+#define ARIZONA_OUT4RMIX_INPUT_1_VOLUME 0x6B9
+#define ARIZONA_OUT4RMIX_INPUT_2_SOURCE 0x6BA
+#define ARIZONA_OUT4RMIX_INPUT_2_VOLUME 0x6BB
+#define ARIZONA_OUT4RMIX_INPUT_3_SOURCE 0x6BC
+#define ARIZONA_OUT4RMIX_INPUT_3_VOLUME 0x6BD
+#define ARIZONA_OUT4RMIX_INPUT_4_SOURCE 0x6BE
+#define ARIZONA_OUT4RMIX_INPUT_4_VOLUME 0x6BF
+#define ARIZONA_OUT5LMIX_INPUT_1_SOURCE 0x6C0
+#define ARIZONA_OUT5LMIX_INPUT_1_VOLUME 0x6C1
+#define ARIZONA_OUT5LMIX_INPUT_2_SOURCE 0x6C2
+#define ARIZONA_OUT5LMIX_INPUT_2_VOLUME 0x6C3
+#define ARIZONA_OUT5LMIX_INPUT_3_SOURCE 0x6C4
+#define ARIZONA_OUT5LMIX_INPUT_3_VOLUME 0x6C5
+#define ARIZONA_OUT5LMIX_INPUT_4_SOURCE 0x6C6
+#define ARIZONA_OUT5LMIX_INPUT_4_VOLUME 0x6C7
+#define ARIZONA_OUT5RMIX_INPUT_1_SOURCE 0x6C8
+#define ARIZONA_OUT5RMIX_INPUT_1_VOLUME 0x6C9
+#define ARIZONA_OUT5RMIX_INPUT_2_SOURCE 0x6CA
+#define ARIZONA_OUT5RMIX_INPUT_2_VOLUME 0x6CB
+#define ARIZONA_OUT5RMIX_INPUT_3_SOURCE 0x6CC
+#define ARIZONA_OUT5RMIX_INPUT_3_VOLUME 0x6CD
+#define ARIZONA_OUT5RMIX_INPUT_4_SOURCE 0x6CE
+#define ARIZONA_OUT5RMIX_INPUT_4_VOLUME 0x6CF
+#define ARIZONA_OUT6LMIX_INPUT_1_SOURCE 0x6D0
+#define ARIZONA_OUT6LMIX_INPUT_1_VOLUME 0x6D1
+#define ARIZONA_OUT6LMIX_INPUT_2_SOURCE 0x6D2
+#define ARIZONA_OUT6LMIX_INPUT_2_VOLUME 0x6D3
+#define ARIZONA_OUT6LMIX_INPUT_3_SOURCE 0x6D4
+#define ARIZONA_OUT6LMIX_INPUT_3_VOLUME 0x6D5
+#define ARIZONA_OUT6LMIX_INPUT_4_SOURCE 0x6D6
+#define ARIZONA_OUT6LMIX_INPUT_4_VOLUME 0x6D7
+#define ARIZONA_OUT6RMIX_INPUT_1_SOURCE 0x6D8
+#define ARIZONA_OUT6RMIX_INPUT_1_VOLUME 0x6D9
+#define ARIZONA_OUT6RMIX_INPUT_2_SOURCE 0x6DA
+#define ARIZONA_OUT6RMIX_INPUT_2_VOLUME 0x6DB
+#define ARIZONA_OUT6RMIX_INPUT_3_SOURCE 0x6DC
+#define ARIZONA_OUT6RMIX_INPUT_3_VOLUME 0x6DD
+#define ARIZONA_OUT6RMIX_INPUT_4_SOURCE 0x6DE
+#define ARIZONA_OUT6RMIX_INPUT_4_VOLUME 0x6DF
+#define ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE 0x700
+#define ARIZONA_AIF1TX1MIX_INPUT_1_VOLUME 0x701
+#define ARIZONA_AIF1TX1MIX_INPUT_2_SOURCE 0x702
+#define ARIZONA_AIF1TX1MIX_INPUT_2_VOLUME 0x703
+#define ARIZONA_AIF1TX1MIX_INPUT_3_SOURCE 0x704
+#define ARIZONA_AIF1TX1MIX_INPUT_3_VOLUME 0x705
+#define ARIZONA_AIF1TX1MIX_INPUT_4_SOURCE 0x706
+#define ARIZONA_AIF1TX1MIX_INPUT_4_VOLUME 0x707
+#define ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE 0x708
+#define ARIZONA_AIF1TX2MIX_INPUT_1_VOLUME 0x709
+#define ARIZONA_AIF1TX2MIX_INPUT_2_SOURCE 0x70A
+#define ARIZONA_AIF1TX2MIX_INPUT_2_VOLUME 0x70B
+#define ARIZONA_AIF1TX2MIX_INPUT_3_SOURCE 0x70C
+#define ARIZONA_AIF1TX2MIX_INPUT_3_VOLUME 0x70D
+#define ARIZONA_AIF1TX2MIX_INPUT_4_SOURCE 0x70E
+#define ARIZONA_AIF1TX2MIX_INPUT_4_VOLUME 0x70F
+#define ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE 0x710
+#define ARIZONA_AIF1TX3MIX_INPUT_1_VOLUME 0x711
+#define ARIZONA_AIF1TX3MIX_INPUT_2_SOURCE 0x712
+#define ARIZONA_AIF1TX3MIX_INPUT_2_VOLUME 0x713
+#define ARIZONA_AIF1TX3MIX_INPUT_3_SOURCE 0x714
+#define ARIZONA_AIF1TX3MIX_INPUT_3_VOLUME 0x715
+#define ARIZONA_AIF1TX3MIX_INPUT_4_SOURCE 0x716
+#define ARIZONA_AIF1TX3MIX_INPUT_4_VOLUME 0x717
+#define ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE 0x718
+#define ARIZONA_AIF1TX4MIX_INPUT_1_VOLUME 0x719
+#define ARIZONA_AIF1TX4MIX_INPUT_2_SOURCE 0x71A
+#define ARIZONA_AIF1TX4MIX_INPUT_2_VOLUME 0x71B
+#define ARIZONA_AIF1TX4MIX_INPUT_3_SOURCE 0x71C
+#define ARIZONA_AIF1TX4MIX_INPUT_3_VOLUME 0x71D
+#define ARIZONA_AIF1TX4MIX_INPUT_4_SOURCE 0x71E
+#define ARIZONA_AIF1TX4MIX_INPUT_4_VOLUME 0x71F
+#define ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE 0x720
+#define ARIZONA_AIF1TX5MIX_INPUT_1_VOLUME 0x721
+#define ARIZONA_AIF1TX5MIX_INPUT_2_SOURCE 0x722
+#define ARIZONA_AIF1TX5MIX_INPUT_2_VOLUME 0x723
+#define ARIZONA_AIF1TX5MIX_INPUT_3_SOURCE 0x724
+#define ARIZONA_AIF1TX5MIX_INPUT_3_VOLUME 0x725
+#define ARIZONA_AIF1TX5MIX_INPUT_4_SOURCE 0x726
+#define ARIZONA_AIF1TX5MIX_INPUT_4_VOLUME 0x727
+#define ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE 0x728
+#define ARIZONA_AIF1TX6MIX_INPUT_1_VOLUME 0x729
+#define ARIZONA_AIF1TX6MIX_INPUT_2_SOURCE 0x72A
+#define ARIZONA_AIF1TX6MIX_INPUT_2_VOLUME 0x72B
+#define ARIZONA_AIF1TX6MIX_INPUT_3_SOURCE 0x72C
+#define ARIZONA_AIF1TX6MIX_INPUT_3_VOLUME 0x72D
+#define ARIZONA_AIF1TX6MIX_INPUT_4_SOURCE 0x72E
+#define ARIZONA_AIF1TX6MIX_INPUT_4_VOLUME 0x72F
+#define ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE 0x730
+#define ARIZONA_AIF1TX7MIX_INPUT_1_VOLUME 0x731
+#define ARIZONA_AIF1TX7MIX_INPUT_2_SOURCE 0x732
+#define ARIZONA_AIF1TX7MIX_INPUT_2_VOLUME 0x733
+#define ARIZONA_AIF1TX7MIX_INPUT_3_SOURCE 0x734
+#define ARIZONA_AIF1TX7MIX_INPUT_3_VOLUME 0x735
+#define ARIZONA_AIF1TX7MIX_INPUT_4_SOURCE 0x736
+#define ARIZONA_AIF1TX7MIX_INPUT_4_VOLUME 0x737
+#define ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE 0x738
+#define ARIZONA_AIF1TX8MIX_INPUT_1_VOLUME 0x739
+#define ARIZONA_AIF1TX8MIX_INPUT_2_SOURCE 0x73A
+#define ARIZONA_AIF1TX8MIX_INPUT_2_VOLUME 0x73B
+#define ARIZONA_AIF1TX8MIX_INPUT_3_SOURCE 0x73C
+#define ARIZONA_AIF1TX8MIX_INPUT_3_VOLUME 0x73D
+#define ARIZONA_AIF1TX8MIX_INPUT_4_SOURCE 0x73E
+#define ARIZONA_AIF1TX8MIX_INPUT_4_VOLUME 0x73F
+#define ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE 0x740
+#define ARIZONA_AIF2TX1MIX_INPUT_1_VOLUME 0x741
+#define ARIZONA_AIF2TX1MIX_INPUT_2_SOURCE 0x742
+#define ARIZONA_AIF2TX1MIX_INPUT_2_VOLUME 0x743
+#define ARIZONA_AIF2TX1MIX_INPUT_3_SOURCE 0x744
+#define ARIZONA_AIF2TX1MIX_INPUT_3_VOLUME 0x745
+#define ARIZONA_AIF2TX1MIX_INPUT_4_SOURCE 0x746
+#define ARIZONA_AIF2TX1MIX_INPUT_4_VOLUME 0x747
+#define ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE 0x748
+#define ARIZONA_AIF2TX2MIX_INPUT_1_VOLUME 0x749
+#define ARIZONA_AIF2TX2MIX_INPUT_2_SOURCE 0x74A
+#define ARIZONA_AIF2TX2MIX_INPUT_2_VOLUME 0x74B
+#define ARIZONA_AIF2TX2MIX_INPUT_3_SOURCE 0x74C
+#define ARIZONA_AIF2TX2MIX_INPUT_3_VOLUME 0x74D
+#define ARIZONA_AIF2TX2MIX_INPUT_4_SOURCE 0x74E
+#define ARIZONA_AIF2TX2MIX_INPUT_4_VOLUME 0x74F
+#define ARIZONA_AIF2TX3MIX_INPUT_1_SOURCE 0x750
+#define ARIZONA_AIF2TX3MIX_INPUT_1_VOLUME 0x751
+#define ARIZONA_AIF2TX3MIX_INPUT_2_SOURCE 0x752
+#define ARIZONA_AIF2TX3MIX_INPUT_2_VOLUME 0x753
+#define ARIZONA_AIF2TX3MIX_INPUT_3_SOURCE 0x754
+#define ARIZONA_AIF2TX3MIX_INPUT_3_VOLUME 0x755
+#define ARIZONA_AIF2TX3MIX_INPUT_4_SOURCE 0x756
+#define ARIZONA_AIF2TX3MIX_INPUT_4_VOLUME 0x757
+#define ARIZONA_AIF2TX4MIX_INPUT_1_SOURCE 0x758
+#define ARIZONA_AIF2TX4MIX_INPUT_1_VOLUME 0x759
+#define ARIZONA_AIF2TX4MIX_INPUT_2_SOURCE 0x75A
+#define ARIZONA_AIF2TX4MIX_INPUT_2_VOLUME 0x75B
+#define ARIZONA_AIF2TX4MIX_INPUT_3_SOURCE 0x75C
+#define ARIZONA_AIF2TX4MIX_INPUT_3_VOLUME 0x75D
+#define ARIZONA_AIF2TX4MIX_INPUT_4_SOURCE 0x75E
+#define ARIZONA_AIF2TX4MIX_INPUT_4_VOLUME 0x75F
+#define ARIZONA_AIF2TX5MIX_INPUT_1_SOURCE 0x760
+#define ARIZONA_AIF2TX5MIX_INPUT_1_VOLUME 0x761
+#define ARIZONA_AIF2TX5MIX_INPUT_2_SOURCE 0x762
+#define ARIZONA_AIF2TX5MIX_INPUT_2_VOLUME 0x763
+#define ARIZONA_AIF2TX5MIX_INPUT_3_SOURCE 0x764
+#define ARIZONA_AIF2TX5MIX_INPUT_3_VOLUME 0x765
+#define ARIZONA_AIF2TX5MIX_INPUT_4_SOURCE 0x766
+#define ARIZONA_AIF2TX5MIX_INPUT_4_VOLUME 0x767
+#define ARIZONA_AIF2TX6MIX_INPUT_1_SOURCE 0x768
+#define ARIZONA_AIF2TX6MIX_INPUT_1_VOLUME 0x769
+#define ARIZONA_AIF2TX6MIX_INPUT_2_SOURCE 0x76A
+#define ARIZONA_AIF2TX6MIX_INPUT_2_VOLUME 0x76B
+#define ARIZONA_AIF2TX6MIX_INPUT_3_SOURCE 0x76C
+#define ARIZONA_AIF2TX6MIX_INPUT_3_VOLUME 0x76D
+#define ARIZONA_AIF2TX6MIX_INPUT_4_SOURCE 0x76E
+#define ARIZONA_AIF2TX6MIX_INPUT_4_VOLUME 0x76F
+#define ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE 0x780
+#define ARIZONA_AIF3TX1MIX_INPUT_1_VOLUME 0x781
+#define ARIZONA_AIF3TX1MIX_INPUT_2_SOURCE 0x782
+#define ARIZONA_AIF3TX1MIX_INPUT_2_VOLUME 0x783
+#define ARIZONA_AIF3TX1MIX_INPUT_3_SOURCE 0x784
+#define ARIZONA_AIF3TX1MIX_INPUT_3_VOLUME 0x785
+#define ARIZONA_AIF3TX1MIX_INPUT_4_SOURCE 0x786
+#define ARIZONA_AIF3TX1MIX_INPUT_4_VOLUME 0x787
+#define ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE 0x788
+#define ARIZONA_AIF3TX2MIX_INPUT_1_VOLUME 0x789
+#define ARIZONA_AIF3TX2MIX_INPUT_2_SOURCE 0x78A
+#define ARIZONA_AIF3TX2MIX_INPUT_2_VOLUME 0x78B
+#define ARIZONA_AIF3TX2MIX_INPUT_3_SOURCE 0x78C
+#define ARIZONA_AIF3TX2MIX_INPUT_3_VOLUME 0x78D
+#define ARIZONA_AIF3TX2MIX_INPUT_4_SOURCE 0x78E
+#define ARIZONA_AIF3TX2MIX_INPUT_4_VOLUME 0x78F
+#define ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE 0x7C0
+#define ARIZONA_SLIMTX1MIX_INPUT_1_VOLUME 0x7C1
+#define ARIZONA_SLIMTX1MIX_INPUT_2_SOURCE 0x7C2
+#define ARIZONA_SLIMTX1MIX_INPUT_2_VOLUME 0x7C3
+#define ARIZONA_SLIMTX1MIX_INPUT_3_SOURCE 0x7C4
+#define ARIZONA_SLIMTX1MIX_INPUT_3_VOLUME 0x7C5
+#define ARIZONA_SLIMTX1MIX_INPUT_4_SOURCE 0x7C6
+#define ARIZONA_SLIMTX1MIX_INPUT_4_VOLUME 0x7C7
+#define ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE 0x7C8
+#define ARIZONA_SLIMTX2MIX_INPUT_1_VOLUME 0x7C9
+#define ARIZONA_SLIMTX2MIX_INPUT_2_SOURCE 0x7CA
+#define ARIZONA_SLIMTX2MIX_INPUT_2_VOLUME 0x7CB
+#define ARIZONA_SLIMTX2MIX_INPUT_3_SOURCE 0x7CC
+#define ARIZONA_SLIMTX2MIX_INPUT_3_VOLUME 0x7CD
+#define ARIZONA_SLIMTX2MIX_INPUT_4_SOURCE 0x7CE
+#define ARIZONA_SLIMTX2MIX_INPUT_4_VOLUME 0x7CF
+#define ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE 0x7D0
+#define ARIZONA_SLIMTX3MIX_INPUT_1_VOLUME 0x7D1
+#define ARIZONA_SLIMTX3MIX_INPUT_2_SOURCE 0x7D2
+#define ARIZONA_SLIMTX3MIX_INPUT_2_VOLUME 0x7D3
+#define ARIZONA_SLIMTX3MIX_INPUT_3_SOURCE 0x7D4
+#define ARIZONA_SLIMTX3MIX_INPUT_3_VOLUME 0x7D5
+#define ARIZONA_SLIMTX3MIX_INPUT_4_SOURCE 0x7D6
+#define ARIZONA_SLIMTX3MIX_INPUT_4_VOLUME 0x7D7
+#define ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE 0x7D8
+#define ARIZONA_SLIMTX4MIX_INPUT_1_VOLUME 0x7D9
+#define ARIZONA_SLIMTX4MIX_INPUT_2_SOURCE 0x7DA
+#define ARIZONA_SLIMTX4MIX_INPUT_2_VOLUME 0x7DB
+#define ARIZONA_SLIMTX4MIX_INPUT_3_SOURCE 0x7DC
+#define ARIZONA_SLIMTX4MIX_INPUT_3_VOLUME 0x7DD
+#define ARIZONA_SLIMTX4MIX_INPUT_4_SOURCE 0x7DE
+#define ARIZONA_SLIMTX4MIX_INPUT_4_VOLUME 0x7DF
+#define ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE 0x7E0
+#define ARIZONA_SLIMTX5MIX_INPUT_1_VOLUME 0x7E1
+#define ARIZONA_SLIMTX5MIX_INPUT_2_SOURCE 0x7E2
+#define ARIZONA_SLIMTX5MIX_INPUT_2_VOLUME 0x7E3
+#define ARIZONA_SLIMTX5MIX_INPUT_3_SOURCE 0x7E4
+#define ARIZONA_SLIMTX5MIX_INPUT_3_VOLUME 0x7E5
+#define ARIZONA_SLIMTX5MIX_INPUT_4_SOURCE 0x7E6
+#define ARIZONA_SLIMTX5MIX_INPUT_4_VOLUME 0x7E7
+#define ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE 0x7E8
+#define ARIZONA_SLIMTX6MIX_INPUT_1_VOLUME 0x7E9
+#define ARIZONA_SLIMTX6MIX_INPUT_2_SOURCE 0x7EA
+#define ARIZONA_SLIMTX6MIX_INPUT_2_VOLUME 0x7EB
+#define ARIZONA_SLIMTX6MIX_INPUT_3_SOURCE 0x7EC
+#define ARIZONA_SLIMTX6MIX_INPUT_3_VOLUME 0x7ED
+#define ARIZONA_SLIMTX6MIX_INPUT_4_SOURCE 0x7EE
+#define ARIZONA_SLIMTX6MIX_INPUT_4_VOLUME 0x7EF
+#define ARIZONA_SLIMTX7MIX_INPUT_1_SOURCE 0x7F0
+#define ARIZONA_SLIMTX7MIX_INPUT_1_VOLUME 0x7F1
+#define ARIZONA_SLIMTX7MIX_INPUT_2_SOURCE 0x7F2
+#define ARIZONA_SLIMTX7MIX_INPUT_2_VOLUME 0x7F3
+#define ARIZONA_SLIMTX7MIX_INPUT_3_SOURCE 0x7F4
+#define ARIZONA_SLIMTX7MIX_INPUT_3_VOLUME 0x7F5
+#define ARIZONA_SLIMTX7MIX_INPUT_4_SOURCE 0x7F6
+#define ARIZONA_SLIMTX7MIX_INPUT_4_VOLUME 0x7F7
+#define ARIZONA_SLIMTX8MIX_INPUT_1_SOURCE 0x7F8
+#define ARIZONA_SLIMTX8MIX_INPUT_1_VOLUME 0x7F9
+#define ARIZONA_SLIMTX8MIX_INPUT_2_SOURCE 0x7FA
+#define ARIZONA_SLIMTX8MIX_INPUT_2_VOLUME 0x7FB
+#define ARIZONA_SLIMTX8MIX_INPUT_3_SOURCE 0x7FC
+#define ARIZONA_SLIMTX8MIX_INPUT_3_VOLUME 0x7FD
+#define ARIZONA_SLIMTX8MIX_INPUT_4_SOURCE 0x7FE
+#define ARIZONA_SLIMTX8MIX_INPUT_4_VOLUME 0x7FF
+#define ARIZONA_EQ1MIX_INPUT_1_SOURCE 0x880
+#define ARIZONA_EQ1MIX_INPUT_1_VOLUME 0x881
+#define ARIZONA_EQ1MIX_INPUT_2_SOURCE 0x882
+#define ARIZONA_EQ1MIX_INPUT_2_VOLUME 0x883
+#define ARIZONA_EQ1MIX_INPUT_3_SOURCE 0x884
+#define ARIZONA_EQ1MIX_INPUT_3_VOLUME 0x885
+#define ARIZONA_EQ1MIX_INPUT_4_SOURCE 0x886
+#define ARIZONA_EQ1MIX_INPUT_4_VOLUME 0x887
+#define ARIZONA_EQ2MIX_INPUT_1_SOURCE 0x888
+#define ARIZONA_EQ2MIX_INPUT_1_VOLUME 0x889
+#define ARIZONA_EQ2MIX_INPUT_2_SOURCE 0x88A
+#define ARIZONA_EQ2MIX_INPUT_2_VOLUME 0x88B
+#define ARIZONA_EQ2MIX_INPUT_3_SOURCE 0x88C
+#define ARIZONA_EQ2MIX_INPUT_3_VOLUME 0x88D
+#define ARIZONA_EQ2MIX_INPUT_4_SOURCE 0x88E
+#define ARIZONA_EQ2MIX_INPUT_4_VOLUME 0x88F
+#define ARIZONA_EQ3MIX_INPUT_1_SOURCE 0x890
+#define ARIZONA_EQ3MIX_INPUT_1_VOLUME 0x891
+#define ARIZONA_EQ3MIX_INPUT_2_SOURCE 0x892
+#define ARIZONA_EQ3MIX_INPUT_2_VOLUME 0x893
+#define ARIZONA_EQ3MIX_INPUT_3_SOURCE 0x894
+#define ARIZONA_EQ3MIX_INPUT_3_VOLUME 0x895
+#define ARIZONA_EQ3MIX_INPUT_4_SOURCE 0x896
+#define ARIZONA_EQ3MIX_INPUT_4_VOLUME 0x897
+#define ARIZONA_EQ4MIX_INPUT_1_SOURCE 0x898
+#define ARIZONA_EQ4MIX_INPUT_1_VOLUME 0x899
+#define ARIZONA_EQ4MIX_INPUT_2_SOURCE 0x89A
+#define ARIZONA_EQ4MIX_INPUT_2_VOLUME 0x89B
+#define ARIZONA_EQ4MIX_INPUT_3_SOURCE 0x89C
+#define ARIZONA_EQ4MIX_INPUT_3_VOLUME 0x89D
+#define ARIZONA_EQ4MIX_INPUT_4_SOURCE 0x89E
+#define ARIZONA_EQ4MIX_INPUT_4_VOLUME 0x89F
+#define ARIZONA_DRC1LMIX_INPUT_1_SOURCE 0x8C0
+#define ARIZONA_DRC1LMIX_INPUT_1_VOLUME 0x8C1
+#define ARIZONA_DRC1LMIX_INPUT_2_SOURCE 0x8C2
+#define ARIZONA_DRC1LMIX_INPUT_2_VOLUME 0x8C3
+#define ARIZONA_DRC1LMIX_INPUT_3_SOURCE 0x8C4
+#define ARIZONA_DRC1LMIX_INPUT_3_VOLUME 0x8C5
+#define ARIZONA_DRC1LMIX_INPUT_4_SOURCE 0x8C6
+#define ARIZONA_DRC1LMIX_INPUT_4_VOLUME 0x8C7
+#define ARIZONA_DRC1RMIX_INPUT_1_SOURCE 0x8C8
+#define ARIZONA_DRC1RMIX_INPUT_1_VOLUME 0x8C9
+#define ARIZONA_DRC1RMIX_INPUT_2_SOURCE 0x8CA
+#define ARIZONA_DRC1RMIX_INPUT_2_VOLUME 0x8CB
+#define ARIZONA_DRC1RMIX_INPUT_3_SOURCE 0x8CC
+#define ARIZONA_DRC1RMIX_INPUT_3_VOLUME 0x8CD
+#define ARIZONA_DRC1RMIX_INPUT_4_SOURCE 0x8CE
+#define ARIZONA_DRC1RMIX_INPUT_4_VOLUME 0x8CF
+#define ARIZONA_DRC2LMIX_INPUT_1_SOURCE 0x8D0
+#define ARIZONA_DRC2LMIX_INPUT_1_VOLUME 0x8D1
+#define ARIZONA_DRC2LMIX_INPUT_2_SOURCE 0x8D2
+#define ARIZONA_DRC2LMIX_INPUT_2_VOLUME 0x8D3
+#define ARIZONA_DRC2LMIX_INPUT_3_SOURCE 0x8D4
+#define ARIZONA_DRC2LMIX_INPUT_3_VOLUME 0x8D5
+#define ARIZONA_DRC2LMIX_INPUT_4_SOURCE 0x8D6
+#define ARIZONA_DRC2LMIX_INPUT_4_VOLUME 0x8D7
+#define ARIZONA_DRC2RMIX_INPUT_1_SOURCE 0x8D8
+#define ARIZONA_DRC2RMIX_INPUT_1_VOLUME 0x8D9
+#define ARIZONA_DRC2RMIX_INPUT_2_SOURCE 0x8DA
+#define ARIZONA_DRC2RMIX_INPUT_2_VOLUME 0x8DB
+#define ARIZONA_DRC2RMIX_INPUT_3_SOURCE 0x8DC
+#define ARIZONA_DRC2RMIX_INPUT_3_VOLUME 0x8DD
+#define ARIZONA_DRC2RMIX_INPUT_4_SOURCE 0x8DE
+#define ARIZONA_DRC2RMIX_INPUT_4_VOLUME 0x8DF
+#define ARIZONA_HPLP1MIX_INPUT_1_SOURCE 0x900
+#define ARIZONA_HPLP1MIX_INPUT_1_VOLUME 0x901
+#define ARIZONA_HPLP1MIX_INPUT_2_SOURCE 0x902
+#define ARIZONA_HPLP1MIX_INPUT_2_VOLUME 0x903
+#define ARIZONA_HPLP1MIX_INPUT_3_SOURCE 0x904
+#define ARIZONA_HPLP1MIX_INPUT_3_VOLUME 0x905
+#define ARIZONA_HPLP1MIX_INPUT_4_SOURCE 0x906
+#define ARIZONA_HPLP1MIX_INPUT_4_VOLUME 0x907
+#define ARIZONA_HPLP2MIX_INPUT_1_SOURCE 0x908
+#define ARIZONA_HPLP2MIX_INPUT_1_VOLUME 0x909
+#define ARIZONA_HPLP2MIX_INPUT_2_SOURCE 0x90A
+#define ARIZONA_HPLP2MIX_INPUT_2_VOLUME 0x90B
+#define ARIZONA_HPLP2MIX_INPUT_3_SOURCE 0x90C
+#define ARIZONA_HPLP2MIX_INPUT_3_VOLUME 0x90D
+#define ARIZONA_HPLP2MIX_INPUT_4_SOURCE 0x90E
+#define ARIZONA_HPLP2MIX_INPUT_4_VOLUME 0x90F
+#define ARIZONA_HPLP3MIX_INPUT_1_SOURCE 0x910
+#define ARIZONA_HPLP3MIX_INPUT_1_VOLUME 0x911
+#define ARIZONA_HPLP3MIX_INPUT_2_SOURCE 0x912
+#define ARIZONA_HPLP3MIX_INPUT_2_VOLUME 0x913
+#define ARIZONA_HPLP3MIX_INPUT_3_SOURCE 0x914
+#define ARIZONA_HPLP3MIX_INPUT_3_VOLUME 0x915
+#define ARIZONA_HPLP3MIX_INPUT_4_SOURCE 0x916
+#define ARIZONA_HPLP3MIX_INPUT_4_VOLUME 0x917
+#define ARIZONA_HPLP4MIX_INPUT_1_SOURCE 0x918
+#define ARIZONA_HPLP4MIX_INPUT_1_VOLUME 0x919
+#define ARIZONA_HPLP4MIX_INPUT_2_SOURCE 0x91A
+#define ARIZONA_HPLP4MIX_INPUT_2_VOLUME 0x91B
+#define ARIZONA_HPLP4MIX_INPUT_3_SOURCE 0x91C
+#define ARIZONA_HPLP4MIX_INPUT_3_VOLUME 0x91D
+#define ARIZONA_HPLP4MIX_INPUT_4_SOURCE 0x91E
+#define ARIZONA_HPLP4MIX_INPUT_4_VOLUME 0x91F
+#define ARIZONA_DSP1LMIX_INPUT_1_SOURCE 0x940
+#define ARIZONA_DSP1LMIX_INPUT_1_VOLUME 0x941
+#define ARIZONA_DSP1LMIX_INPUT_2_SOURCE 0x942
+#define ARIZONA_DSP1LMIX_INPUT_2_VOLUME 0x943
+#define ARIZONA_DSP1LMIX_INPUT_3_SOURCE 0x944
+#define ARIZONA_DSP1LMIX_INPUT_3_VOLUME 0x945
+#define ARIZONA_DSP1LMIX_INPUT_4_SOURCE 0x946
+#define ARIZONA_DSP1LMIX_INPUT_4_VOLUME 0x947
+#define ARIZONA_DSP1RMIX_INPUT_1_SOURCE 0x948
+#define ARIZONA_DSP1RMIX_INPUT_1_VOLUME 0x949
+#define ARIZONA_DSP1RMIX_INPUT_2_SOURCE 0x94A
+#define ARIZONA_DSP1RMIX_INPUT_2_VOLUME 0x94B
+#define ARIZONA_DSP1RMIX_INPUT_3_SOURCE 0x94C
+#define ARIZONA_DSP1RMIX_INPUT_3_VOLUME 0x94D
+#define ARIZONA_DSP1RMIX_INPUT_4_SOURCE 0x94E
+#define ARIZONA_DSP1RMIX_INPUT_4_VOLUME 0x94F
+#define ARIZONA_DSP1AUX1MIX_INPUT_1_SOURCE 0x950
+#define ARIZONA_DSP1AUX2MIX_INPUT_1_SOURCE 0x958
+#define ARIZONA_DSP1AUX3MIX_INPUT_1_SOURCE 0x960
+#define ARIZONA_DSP1AUX4MIX_INPUT_1_SOURCE 0x968
+#define ARIZONA_DSP1AUX5MIX_INPUT_1_SOURCE 0x970
+#define ARIZONA_DSP1AUX6MIX_INPUT_1_SOURCE 0x978
+#define ARIZONA_DSP2LMIX_INPUT_1_SOURCE 0x980
+#define ARIZONA_DSP2LMIX_INPUT_1_VOLUME 0x981
+#define ARIZONA_DSP2LMIX_INPUT_2_SOURCE 0x982
+#define ARIZONA_DSP2LMIX_INPUT_2_VOLUME 0x983
+#define ARIZONA_DSP2LMIX_INPUT_3_SOURCE 0x984
+#define ARIZONA_DSP2LMIX_INPUT_3_VOLUME 0x985
+#define ARIZONA_DSP2LMIX_INPUT_4_SOURCE 0x986
+#define ARIZONA_DSP2LMIX_INPUT_4_VOLUME 0x987
+#define ARIZONA_DSP2RMIX_INPUT_1_SOURCE 0x988
+#define ARIZONA_DSP2RMIX_INPUT_1_VOLUME 0x989
+#define ARIZONA_DSP2RMIX_INPUT_2_SOURCE 0x98A
+#define ARIZONA_DSP2RMIX_INPUT_2_VOLUME 0x98B
+#define ARIZONA_DSP2RMIX_INPUT_3_SOURCE 0x98C
+#define ARIZONA_DSP2RMIX_INPUT_3_VOLUME 0x98D
+#define ARIZONA_DSP2RMIX_INPUT_4_SOURCE 0x98E
+#define ARIZONA_DSP2RMIX_INPUT_4_VOLUME 0x98F
+#define ARIZONA_DSP2AUX1MIX_INPUT_1_SOURCE 0x990
+#define ARIZONA_DSP2AUX2MIX_INPUT_1_SOURCE 0x998
+#define ARIZONA_DSP2AUX3MIX_INPUT_1_SOURCE 0x9A0
+#define ARIZONA_DSP2AUX4MIX_INPUT_1_SOURCE 0x9A8
+#define ARIZONA_DSP2AUX5MIX_INPUT_1_SOURCE 0x9B0
+#define ARIZONA_DSP2AUX6MIX_INPUT_1_SOURCE 0x9B8
+#define ARIZONA_DSP3LMIX_INPUT_1_SOURCE 0x9C0
+#define ARIZONA_DSP3LMIX_INPUT_1_VOLUME 0x9C1
+#define ARIZONA_DSP3LMIX_INPUT_2_SOURCE 0x9C2
+#define ARIZONA_DSP3LMIX_INPUT_2_VOLUME 0x9C3
+#define ARIZONA_DSP3LMIX_INPUT_3_SOURCE 0x9C4
+#define ARIZONA_DSP3LMIX_INPUT_3_VOLUME 0x9C5
+#define ARIZONA_DSP3LMIX_INPUT_4_SOURCE 0x9C6
+#define ARIZONA_DSP3LMIX_INPUT_4_VOLUME 0x9C7
+#define ARIZONA_DSP3RMIX_INPUT_1_SOURCE 0x9C8
+#define ARIZONA_DSP3RMIX_INPUT_1_VOLUME 0x9C9
+#define ARIZONA_DSP3RMIX_INPUT_2_SOURCE 0x9CA
+#define ARIZONA_DSP3RMIX_INPUT_2_VOLUME 0x9CB
+#define ARIZONA_DSP3RMIX_INPUT_3_SOURCE 0x9CC
+#define ARIZONA_DSP3RMIX_INPUT_3_VOLUME 0x9CD
+#define ARIZONA_DSP3RMIX_INPUT_4_SOURCE 0x9CE
+#define ARIZONA_DSP3RMIX_INPUT_4_VOLUME 0x9CF
+#define ARIZONA_DSP3AUX1MIX_INPUT_1_SOURCE 0x9D0
+#define ARIZONA_DSP3AUX2MIX_INPUT_1_SOURCE 0x9D8
+#define ARIZONA_DSP3AUX3MIX_INPUT_1_SOURCE 0x9E0
+#define ARIZONA_DSP3AUX4MIX_INPUT_1_SOURCE 0x9E8
+#define ARIZONA_DSP3AUX5MIX_INPUT_1_SOURCE 0x9F0
+#define ARIZONA_DSP3AUX6MIX_INPUT_1_SOURCE 0x9F8
+#define ARIZONA_DSP4LMIX_INPUT_1_SOURCE 0xA00
+#define ARIZONA_DSP4LMIX_INPUT_1_VOLUME 0xA01
+#define ARIZONA_DSP4LMIX_INPUT_2_SOURCE 0xA02
+#define ARIZONA_DSP4LMIX_INPUT_2_VOLUME 0xA03
+#define ARIZONA_DSP4LMIX_INPUT_3_SOURCE 0xA04
+#define ARIZONA_DSP4LMIX_INPUT_3_VOLUME 0xA05
+#define ARIZONA_DSP4LMIX_INPUT_4_SOURCE 0xA06
+#define ARIZONA_DSP4LMIX_INPUT_4_VOLUME 0xA07
+#define ARIZONA_DSP4RMIX_INPUT_1_SOURCE 0xA08
+#define ARIZONA_DSP4RMIX_INPUT_1_VOLUME 0xA09
+#define ARIZONA_DSP4RMIX_INPUT_2_SOURCE 0xA0A
+#define ARIZONA_DSP4RMIX_INPUT_2_VOLUME 0xA0B
+#define ARIZONA_DSP4RMIX_INPUT_3_SOURCE 0xA0C
+#define ARIZONA_DSP4RMIX_INPUT_3_VOLUME 0xA0D
+#define ARIZONA_DSP4RMIX_INPUT_4_SOURCE 0xA0E
+#define ARIZONA_DSP4RMIX_INPUT_4_VOLUME 0xA0F
+#define ARIZONA_DSP4AUX1MIX_INPUT_1_SOURCE 0xA10
+#define ARIZONA_DSP4AUX2MIX_INPUT_1_SOURCE 0xA18
+#define ARIZONA_DSP4AUX3MIX_INPUT_1_SOURCE 0xA20
+#define ARIZONA_DSP4AUX4MIX_INPUT_1_SOURCE 0xA28
+#define ARIZONA_DSP4AUX5MIX_INPUT_1_SOURCE 0xA30
+#define ARIZONA_DSP4AUX6MIX_INPUT_1_SOURCE 0xA38
+#define ARIZONA_ASRC1LMIX_INPUT_1_SOURCE 0xA80
+#define ARIZONA_ASRC1RMIX_INPUT_1_SOURCE 0xA88
+#define ARIZONA_ASRC2LMIX_INPUT_1_SOURCE 0xA90
+#define ARIZONA_ASRC2RMIX_INPUT_1_SOURCE 0xA98
+#define ARIZONA_ISRC1DEC1MIX_INPUT_1_SOURCE 0xB00
+#define ARIZONA_ISRC1DEC2MIX_INPUT_1_SOURCE 0xB08
+#define ARIZONA_ISRC1DEC3MIX_INPUT_1_SOURCE 0xB10
+#define ARIZONA_ISRC1DEC4MIX_INPUT_1_SOURCE 0xB18
+#define ARIZONA_ISRC1INT1MIX_INPUT_1_SOURCE 0xB20
+#define ARIZONA_ISRC1INT2MIX_INPUT_1_SOURCE 0xB28
+#define ARIZONA_ISRC1INT3MIX_INPUT_1_SOURCE 0xB30
+#define ARIZONA_ISRC1INT4MIX_INPUT_1_SOURCE 0xB38
+#define ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE 0xB40
+#define ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE 0xB48
+#define ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE 0xB60
+#define ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE 0xB68
+#define ARIZONA_ISRC1INT3MIX_INPUT_1_SOURCE 0xB30
+#define ARIZONA_ISRC1INT4MIX_INPUT_1_SOURCE 0xB38
+#define ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE 0xB40
+#define ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE 0xB48
+#define ARIZONA_ISRC2DEC3MIX_INPUT_1_SOURCE 0xB50
+#define ARIZONA_ISRC2DEC4MIX_INPUT_1_SOURCE 0xB58
+#define ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE 0xB60
+#define ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE 0xB68
+#define ARIZONA_ISRC2INT3MIX_INPUT_1_SOURCE 0xB70
+#define ARIZONA_ISRC2INT4MIX_INPUT_1_SOURCE 0xB78
+#define ARIZONA_ISRC3DEC1MIX_INPUT_1_SOURCE 0xB80
+#define ARIZONA_ISRC3DEC2MIX_INPUT_1_SOURCE 0xB88
+#define ARIZONA_ISRC3DEC3MIX_INPUT_1_SOURCE 0xB90
+#define ARIZONA_ISRC3DEC4MIX_INPUT_1_SOURCE 0xB98
+#define ARIZONA_ISRC3INT1MIX_INPUT_1_SOURCE 0xBA0
+#define ARIZONA_ISRC3INT2MIX_INPUT_1_SOURCE 0xBA8
+#define ARIZONA_ISRC3INT3MIX_INPUT_1_SOURCE 0xBB0
+#define ARIZONA_ISRC3INT4MIX_INPUT_1_SOURCE 0xBB8
+#define ARIZONA_GPIO1_CTRL 0xC00
+#define ARIZONA_GPIO2_CTRL 0xC01
+#define ARIZONA_GPIO3_CTRL 0xC02
+#define ARIZONA_GPIO4_CTRL 0xC03
+#define ARIZONA_GPIO5_CTRL 0xC04
+#define ARIZONA_IRQ_CTRL_1 0xC0F
+#define ARIZONA_GPIO_DEBOUNCE_CONFIG 0xC10
+#define ARIZONA_MISC_PAD_CTRL_1 0xC20
+#define ARIZONA_MISC_PAD_CTRL_2 0xC21
+#define ARIZONA_MISC_PAD_CTRL_3 0xC22
+#define ARIZONA_MISC_PAD_CTRL_4 0xC23
+#define ARIZONA_MISC_PAD_CTRL_5 0xC24
+#define ARIZONA_MISC_PAD_CTRL_6 0xC25
+#define ARIZONA_MISC_PAD_CTRL_7 0xC30
+#define ARIZONA_MISC_PAD_CTRL_8 0xC31
+#define ARIZONA_MISC_PAD_CTRL_9 0xC32
+#define ARIZONA_MISC_PAD_CTRL_10 0xC33
+#define ARIZONA_MISC_PAD_CTRL_11 0xC34
+#define ARIZONA_MISC_PAD_CTRL_12 0xC35
+#define ARIZONA_MISC_PAD_CTRL_13 0xC36
+#define ARIZONA_MISC_PAD_CTRL_14 0xC37
+#define ARIZONA_MISC_PAD_CTRL_15 0xC38
+#define ARIZONA_MISC_PAD_CTRL_16 0xC39
+#define ARIZONA_MISC_PAD_CTRL_17 0xC3A
+#define ARIZONA_MISC_PAD_CTRL_18 0xC3B
+#define ARIZONA_INTERRUPT_STATUS_1 0xD00
+#define ARIZONA_INTERRUPT_STATUS_2 0xD01
+#define ARIZONA_INTERRUPT_STATUS_3 0xD02
+#define ARIZONA_INTERRUPT_STATUS_4 0xD03
+#define ARIZONA_INTERRUPT_STATUS_5 0xD04
+#define ARIZONA_INTERRUPT_STATUS_6 0xD05
+#define ARIZONA_INTERRUPT_STATUS_1_MASK 0xD08
+#define ARIZONA_INTERRUPT_STATUS_2_MASK 0xD09
+#define ARIZONA_INTERRUPT_STATUS_3_MASK 0xD0A
+#define ARIZONA_INTERRUPT_STATUS_4_MASK 0xD0B
+#define ARIZONA_INTERRUPT_STATUS_5_MASK 0xD0C
+#define ARIZONA_INTERRUPT_STATUS_6_MASK 0xD0D
+#define ARIZONA_INTERRUPT_CONTROL 0xD0F
+#define ARIZONA_IRQ2_STATUS_1 0xD10
+#define ARIZONA_IRQ2_STATUS_2 0xD11
+#define ARIZONA_IRQ2_STATUS_3 0xD12
+#define ARIZONA_IRQ2_STATUS_4 0xD13
+#define ARIZONA_IRQ2_STATUS_5 0xD14
+#define ARIZONA_IRQ2_STATUS_6 0xD15
+#define ARIZONA_IRQ2_STATUS_1_MASK 0xD18
+#define ARIZONA_IRQ2_STATUS_2_MASK 0xD19
+#define ARIZONA_IRQ2_STATUS_3_MASK 0xD1A
+#define ARIZONA_IRQ2_STATUS_4_MASK 0xD1B
+#define ARIZONA_IRQ2_STATUS_5_MASK 0xD1C
+#define ARIZONA_IRQ2_STATUS_6_MASK 0xD1D
+#define ARIZONA_IRQ2_CONTROL 0xD1F
+#define ARIZONA_INTERRUPT_RAW_STATUS_2 0xD20
+#define ARIZONA_INTERRUPT_RAW_STATUS_3 0xD21
+#define ARIZONA_INTERRUPT_RAW_STATUS_4 0xD22
+#define ARIZONA_INTERRUPT_RAW_STATUS_5 0xD23
+#define ARIZONA_INTERRUPT_RAW_STATUS_6 0xD24
+#define ARIZONA_INTERRUPT_RAW_STATUS_7 0xD25
+#define ARIZONA_INTERRUPT_RAW_STATUS_8 0xD26
+#define ARIZONA_INTERRUPT_RAW_STATUS_9 0xD28
+#define ARIZONA_IRQ_PIN_STATUS 0xD40
+#define ARIZONA_ADSP2_IRQ0 0xD41
+#define ARIZONA_AOD_WKUP_AND_TRIG 0xD50
+#define ARIZONA_AOD_IRQ1 0xD51
+#define ARIZONA_AOD_IRQ2 0xD52
+#define ARIZONA_AOD_IRQ_MASK_IRQ1 0xD53
+#define ARIZONA_AOD_IRQ_MASK_IRQ2 0xD54
+#define ARIZONA_AOD_IRQ_RAW_STATUS 0xD55
+#define ARIZONA_JACK_DETECT_DEBOUNCE 0xD56
+#define ARIZONA_FX_CTRL1 0xE00
+#define ARIZONA_FX_CTRL2 0xE01
+#define ARIZONA_EQ1_1 0xE10
+#define ARIZONA_EQ1_2 0xE11
+#define ARIZONA_EQ1_3 0xE12
+#define ARIZONA_EQ1_4 0xE13
+#define ARIZONA_EQ1_5 0xE14
+#define ARIZONA_EQ1_6 0xE15
+#define ARIZONA_EQ1_7 0xE16
+#define ARIZONA_EQ1_8 0xE17
+#define ARIZONA_EQ1_9 0xE18
+#define ARIZONA_EQ1_10 0xE19
+#define ARIZONA_EQ1_11 0xE1A
+#define ARIZONA_EQ1_12 0xE1B
+#define ARIZONA_EQ1_13 0xE1C
+#define ARIZONA_EQ1_14 0xE1D
+#define ARIZONA_EQ1_15 0xE1E
+#define ARIZONA_EQ1_16 0xE1F
+#define ARIZONA_EQ1_17 0xE20
+#define ARIZONA_EQ1_18 0xE21
+#define ARIZONA_EQ1_19 0xE22
+#define ARIZONA_EQ1_20 0xE23
+#define ARIZONA_EQ1_21 0xE24
+#define ARIZONA_EQ2_1 0xE26
+#define ARIZONA_EQ2_2 0xE27
+#define ARIZONA_EQ2_3 0xE28
+#define ARIZONA_EQ2_4 0xE29
+#define ARIZONA_EQ2_5 0xE2A
+#define ARIZONA_EQ2_6 0xE2B
+#define ARIZONA_EQ2_7 0xE2C
+#define ARIZONA_EQ2_8 0xE2D
+#define ARIZONA_EQ2_9 0xE2E
+#define ARIZONA_EQ2_10 0xE2F
+#define ARIZONA_EQ2_11 0xE30
+#define ARIZONA_EQ2_12 0xE31
+#define ARIZONA_EQ2_13 0xE32
+#define ARIZONA_EQ2_14 0xE33
+#define ARIZONA_EQ2_15 0xE34
+#define ARIZONA_EQ2_16 0xE35
+#define ARIZONA_EQ2_17 0xE36
+#define ARIZONA_EQ2_18 0xE37
+#define ARIZONA_EQ2_19 0xE38
+#define ARIZONA_EQ2_20 0xE39
+#define ARIZONA_EQ2_21 0xE3A
+#define ARIZONA_EQ3_1 0xE3C
+#define ARIZONA_EQ3_2 0xE3D
+#define ARIZONA_EQ3_3 0xE3E
+#define ARIZONA_EQ3_4 0xE3F
+#define ARIZONA_EQ3_5 0xE40
+#define ARIZONA_EQ3_6 0xE41
+#define ARIZONA_EQ3_7 0xE42
+#define ARIZONA_EQ3_8 0xE43
+#define ARIZONA_EQ3_9 0xE44
+#define ARIZONA_EQ3_10 0xE45
+#define ARIZONA_EQ3_11 0xE46
+#define ARIZONA_EQ3_12 0xE47
+#define ARIZONA_EQ3_13 0xE48
+#define ARIZONA_EQ3_14 0xE49
+#define ARIZONA_EQ3_15 0xE4A
+#define ARIZONA_EQ3_16 0xE4B
+#define ARIZONA_EQ3_17 0xE4C
+#define ARIZONA_EQ3_18 0xE4D
+#define ARIZONA_EQ3_19 0xE4E
+#define ARIZONA_EQ3_20 0xE4F
+#define ARIZONA_EQ3_21 0xE50
+#define ARIZONA_EQ4_1 0xE52
+#define ARIZONA_EQ4_2 0xE53
+#define ARIZONA_EQ4_3 0xE54
+#define ARIZONA_EQ4_4 0xE55
+#define ARIZONA_EQ4_5 0xE56
+#define ARIZONA_EQ4_6 0xE57
+#define ARIZONA_EQ4_7 0xE58
+#define ARIZONA_EQ4_8 0xE59
+#define ARIZONA_EQ4_9 0xE5A
+#define ARIZONA_EQ4_10 0xE5B
+#define ARIZONA_EQ4_11 0xE5C
+#define ARIZONA_EQ4_12 0xE5D
+#define ARIZONA_EQ4_13 0xE5E
+#define ARIZONA_EQ4_14 0xE5F
+#define ARIZONA_EQ4_15 0xE60
+#define ARIZONA_EQ4_16 0xE61
+#define ARIZONA_EQ4_17 0xE62
+#define ARIZONA_EQ4_18 0xE63
+#define ARIZONA_EQ4_19 0xE64
+#define ARIZONA_EQ4_20 0xE65
+#define ARIZONA_EQ4_21 0xE66
+#define ARIZONA_DRC1_CTRL1 0xE80
+#define ARIZONA_DRC1_CTRL2 0xE81
+#define ARIZONA_DRC1_CTRL3 0xE82
+#define ARIZONA_DRC1_CTRL4 0xE83
+#define ARIZONA_DRC1_CTRL5 0xE84
+#define ARIZONA_DRC2_CTRL1 0xE89
+#define ARIZONA_DRC2_CTRL2 0xE8A
+#define ARIZONA_DRC2_CTRL3 0xE8B
+#define ARIZONA_DRC2_CTRL4 0xE8C
+#define ARIZONA_DRC2_CTRL5 0xE8D
+#define ARIZONA_HPLPF1_1 0xEC0
+#define ARIZONA_HPLPF1_2 0xEC1
+#define ARIZONA_HPLPF2_1 0xEC4
+#define ARIZONA_HPLPF2_2 0xEC5
+#define ARIZONA_HPLPF3_1 0xEC8
+#define ARIZONA_HPLPF3_2 0xEC9
+#define ARIZONA_HPLPF4_1 0xECC
+#define ARIZONA_HPLPF4_2 0xECD
+#define ARIZONA_ASRC_ENABLE 0xEE0
+#define ARIZONA_ASRC_STATUS 0xEE1
+#define ARIZONA_ASRC_RATE1 0xEE2
+#define ARIZONA_ASRC_RATE2 0xEE3
+#define ARIZONA_ISRC_1_CTRL_1 0xEF0
+#define ARIZONA_ISRC_1_CTRL_2 0xEF1
+#define ARIZONA_ISRC_1_CTRL_3 0xEF2
+#define ARIZONA_ISRC_2_CTRL_1 0xEF3
+#define ARIZONA_ISRC_2_CTRL_2 0xEF4
+#define ARIZONA_ISRC_2_CTRL_3 0xEF5
+#define ARIZONA_ISRC_3_CTRL_1 0xEF6
+#define ARIZONA_ISRC_3_CTRL_2 0xEF7
+#define ARIZONA_ISRC_3_CTRL_3 0xEF8
+#define ARIZONA_CLOCK_CONTROL 0xF00
+#define ARIZONA_ANC_SRC 0xF01
+#define ARIZONA_DSP_STATUS 0xF02
+#define ARIZONA_DSP1_CONTROL_1 0x1100
+#define ARIZONA_DSP1_CLOCKING_1 0x1101
+#define ARIZONA_DSP1_STATUS_1 0x1104
+#define ARIZONA_DSP1_STATUS_2 0x1105
+#define ARIZONA_DSP1_STATUS_3 0x1106
+#define ARIZONA_DSP1_STATUS_4 0x1107
+#define ARIZONA_DSP1_WDMA_BUFFER_1 0x1110
+#define ARIZONA_DSP1_WDMA_BUFFER_2 0x1111
+#define ARIZONA_DSP1_WDMA_BUFFER_3 0x1112
+#define ARIZONA_DSP1_WDMA_BUFFER_4 0x1113
+#define ARIZONA_DSP1_WDMA_BUFFER_5 0x1114
+#define ARIZONA_DSP1_WDMA_BUFFER_6 0x1115
+#define ARIZONA_DSP1_WDMA_BUFFER_7 0x1116
+#define ARIZONA_DSP1_WDMA_BUFFER_8 0x1117
+#define ARIZONA_DSP1_RDMA_BUFFER_1 0x1120
+#define ARIZONA_DSP1_RDMA_BUFFER_2 0x1121
+#define ARIZONA_DSP1_RDMA_BUFFER_3 0x1122
+#define ARIZONA_DSP1_RDMA_BUFFER_4 0x1123
+#define ARIZONA_DSP1_RDMA_BUFFER_5 0x1124
+#define ARIZONA_DSP1_RDMA_BUFFER_6 0x1125
+#define ARIZONA_DSP1_WDMA_CONFIG_1 0x1130
+#define ARIZONA_DSP1_WDMA_CONFIG_2 0x1131
+#define ARIZONA_DSP1_WDMA_OFFSET_1 0x1132
+#define ARIZONA_DSP1_RDMA_CONFIG_1 0x1134
+#define ARIZONA_DSP1_RDMA_OFFSET_1 0x1135
+#define ARIZONA_DSP1_EXTERNAL_START_SELECT_1 0x1138
+#define ARIZONA_DSP1_SCRATCH_0 0x1140
+#define ARIZONA_DSP1_SCRATCH_1 0x1141
+#define ARIZONA_DSP1_SCRATCH_2 0x1142
+#define ARIZONA_DSP1_SCRATCH_3 0x1143
+#define ARIZONA_DSP2_CONTROL_1 0x1200
+#define ARIZONA_DSP2_CLOCKING_1 0x1201
+#define ARIZONA_DSP2_STATUS_1 0x1204
+#define ARIZONA_DSP2_STATUS_2 0x1205
+#define ARIZONA_DSP2_STATUS_3 0x1206
+#define ARIZONA_DSP2_STATUS_4 0x1207
+#define ARIZONA_DSP2_WDMA_BUFFER_1 0x1210
+#define ARIZONA_DSP2_WDMA_BUFFER_2 0x1211
+#define ARIZONA_DSP2_WDMA_BUFFER_3 0x1212
+#define ARIZONA_DSP2_WDMA_BUFFER_4 0x1213
+#define ARIZONA_DSP2_WDMA_BUFFER_5 0x1214
+#define ARIZONA_DSP2_WDMA_BUFFER_6 0x1215
+#define ARIZONA_DSP2_WDMA_BUFFER_7 0x1216
+#define ARIZONA_DSP2_WDMA_BUFFER_8 0x1217
+#define ARIZONA_DSP2_RDMA_BUFFER_1 0x1220
+#define ARIZONA_DSP2_RDMA_BUFFER_2 0x1221
+#define ARIZONA_DSP2_RDMA_BUFFER_3 0x1222
+#define ARIZONA_DSP2_RDMA_BUFFER_4 0x1223
+#define ARIZONA_DSP2_RDMA_BUFFER_5 0x1224
+#define ARIZONA_DSP2_RDMA_BUFFER_6 0x1225
+#define ARIZONA_DSP2_WDMA_CONFIG_1 0x1230
+#define ARIZONA_DSP2_WDMA_CONFIG_2 0x1231
+#define ARIZONA_DSP2_WDMA_OFFSET_1 0x1232
+#define ARIZONA_DSP2_RDMA_CONFIG_1 0x1234
+#define ARIZONA_DSP2_RDMA_OFFSET_1 0x1235
+#define ARIZONA_DSP2_EXTERNAL_START_SELECT_1 0x1238
+#define ARIZONA_DSP2_SCRATCH_0 0x1240
+#define ARIZONA_DSP2_SCRATCH_1 0x1241
+#define ARIZONA_DSP2_SCRATCH_2 0x1242
+#define ARIZONA_DSP2_SCRATCH_3 0x1243
+#define ARIZONA_DSP3_CONTROL_1 0x1300
+#define ARIZONA_DSP3_CLOCKING_1 0x1301
+#define ARIZONA_DSP3_STATUS_1 0x1304
+#define ARIZONA_DSP3_STATUS_2 0x1305
+#define ARIZONA_DSP3_STATUS_3 0x1306
+#define ARIZONA_DSP3_STATUS_4 0x1307
+#define ARIZONA_DSP3_WDMA_BUFFER_1 0x1310
+#define ARIZONA_DSP3_WDMA_BUFFER_2 0x1311
+#define ARIZONA_DSP3_WDMA_BUFFER_3 0x1312
+#define ARIZONA_DSP3_WDMA_BUFFER_4 0x1313
+#define ARIZONA_DSP3_WDMA_BUFFER_5 0x1314
+#define ARIZONA_DSP3_WDMA_BUFFER_6 0x1315
+#define ARIZONA_DSP3_WDMA_BUFFER_7 0x1316
+#define ARIZONA_DSP3_WDMA_BUFFER_8 0x1317
+#define ARIZONA_DSP3_RDMA_BUFFER_1 0x1320
+#define ARIZONA_DSP3_RDMA_BUFFER_2 0x1321
+#define ARIZONA_DSP3_RDMA_BUFFER_3 0x1322
+#define ARIZONA_DSP3_RDMA_BUFFER_4 0x1323
+#define ARIZONA_DSP3_RDMA_BUFFER_5 0x1324
+#define ARIZONA_DSP3_RDMA_BUFFER_6 0x1325
+#define ARIZONA_DSP3_WDMA_CONFIG_1 0x1330
+#define ARIZONA_DSP3_WDMA_CONFIG_2 0x1331
+#define ARIZONA_DSP3_WDMA_OFFSET_1 0x1332
+#define ARIZONA_DSP3_RDMA_CONFIG_1 0x1334
+#define ARIZONA_DSP3_RDMA_OFFSET_1 0x1335
+#define ARIZONA_DSP3_EXTERNAL_START_SELECT_1 0x1338
+#define ARIZONA_DSP3_SCRATCH_0 0x1340
+#define ARIZONA_DSP3_SCRATCH_1 0x1341
+#define ARIZONA_DSP3_SCRATCH_2 0x1342
+#define ARIZONA_DSP3_SCRATCH_3 0x1343
+#define ARIZONA_DSP4_CONTROL_1 0x1400
+#define ARIZONA_DSP4_CLOCKING_1 0x1401
+#define ARIZONA_DSP4_STATUS_1 0x1404
+#define ARIZONA_DSP4_STATUS_2 0x1405
+#define ARIZONA_DSP4_STATUS_3 0x1406
+#define ARIZONA_DSP4_STATUS_4 0x1407
+#define ARIZONA_DSP4_WDMA_BUFFER_1 0x1410
+#define ARIZONA_DSP4_WDMA_BUFFER_2 0x1411
+#define ARIZONA_DSP4_WDMA_BUFFER_3 0x1412
+#define ARIZONA_DSP4_WDMA_BUFFER_4 0x1413
+#define ARIZONA_DSP4_WDMA_BUFFER_5 0x1414
+#define ARIZONA_DSP4_WDMA_BUFFER_6 0x1415
+#define ARIZONA_DSP4_WDMA_BUFFER_7 0x1416
+#define ARIZONA_DSP4_WDMA_BUFFER_8 0x1417
+#define ARIZONA_DSP4_RDMA_BUFFER_1 0x1420
+#define ARIZONA_DSP4_RDMA_BUFFER_2 0x1421
+#define ARIZONA_DSP4_RDMA_BUFFER_3 0x1422
+#define ARIZONA_DSP4_RDMA_BUFFER_4 0x1423
+#define ARIZONA_DSP4_RDMA_BUFFER_5 0x1424
+#define ARIZONA_DSP4_RDMA_BUFFER_6 0x1425
+#define ARIZONA_DSP4_WDMA_CONFIG_1 0x1430
+#define ARIZONA_DSP4_WDMA_CONFIG_2 0x1431
+#define ARIZONA_DSP4_WDMA_OFFSET_1 0x1432
+#define ARIZONA_DSP4_RDMA_CONFIG_1 0x1434
+#define ARIZONA_DSP4_RDMA_OFFSET_1 0x1435
+#define ARIZONA_DSP4_EXTERNAL_START_SELECT_1 0x1438
+#define ARIZONA_DSP4_SCRATCH_0 0x1440
+#define ARIZONA_DSP4_SCRATCH_1 0x1441
+#define ARIZONA_DSP4_SCRATCH_2 0x1442
+#define ARIZONA_DSP4_SCRATCH_3 0x1443
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R0 (0x00) - software reset
+ */
+#define ARIZONA_SW_RST_DEV_ID1_MASK 0xFFFF /* SW_RST_DEV_ID1 - [15:0] */
+#define ARIZONA_SW_RST_DEV_ID1_SHIFT 0 /* SW_RST_DEV_ID1 - [15:0] */
+#define ARIZONA_SW_RST_DEV_ID1_WIDTH 16 /* SW_RST_DEV_ID1 - [15:0] */
+
+/*
+ * R1 (0x01) - Device Revision
+ */
+#define ARIZONA_DEVICE_REVISION_MASK 0x00FF /* DEVICE_REVISION - [7:0] */
+#define ARIZONA_DEVICE_REVISION_SHIFT 0 /* DEVICE_REVISION - [7:0] */
+#define ARIZONA_DEVICE_REVISION_WIDTH 8 /* DEVICE_REVISION - [7:0] */
+
+/*
+ * R8 (0x08) - Ctrl IF SPI CFG 1
+ */
+#define ARIZONA_SPI_CFG 0x0010 /* SPI_CFG */
+#define ARIZONA_SPI_CFG_MASK 0x0010 /* SPI_CFG */
+#define ARIZONA_SPI_CFG_SHIFT 4 /* SPI_CFG */
+#define ARIZONA_SPI_CFG_WIDTH 1 /* SPI_CFG */
+#define ARIZONA_SPI_4WIRE 0x0008 /* SPI_4WIRE */
+#define ARIZONA_SPI_4WIRE_MASK 0x0008 /* SPI_4WIRE */
+#define ARIZONA_SPI_4WIRE_SHIFT 3 /* SPI_4WIRE */
+#define ARIZONA_SPI_4WIRE_WIDTH 1 /* SPI_4WIRE */
+#define ARIZONA_SPI_AUTO_INC_MASK 0x0003 /* SPI_AUTO_INC - [1:0] */
+#define ARIZONA_SPI_AUTO_INC_SHIFT 0 /* SPI_AUTO_INC - [1:0] */
+#define ARIZONA_SPI_AUTO_INC_WIDTH 2 /* SPI_AUTO_INC - [1:0] */
+
+/*
+ * R9 (0x09) - Ctrl IF I2C1 CFG 1
+ */
+#define ARIZONA_I2C1_AUTO_INC_MASK 0x0003 /* I2C1_AUTO_INC - [1:0] */
+#define ARIZONA_I2C1_AUTO_INC_SHIFT 0 /* I2C1_AUTO_INC - [1:0] */
+#define ARIZONA_I2C1_AUTO_INC_WIDTH 2 /* I2C1_AUTO_INC - [1:0] */
+
+/*
+ * R13 (0x0D) - Ctrl IF Status 1
+ */
+#define ARIZONA_I2C1_BUSY 0x0020 /* I2C1_BUSY */
+#define ARIZONA_I2C1_BUSY_MASK 0x0020 /* I2C1_BUSY */
+#define ARIZONA_I2C1_BUSY_SHIFT 5 /* I2C1_BUSY */
+#define ARIZONA_I2C1_BUSY_WIDTH 1 /* I2C1_BUSY */
+#define ARIZONA_SPI_BUSY 0x0010 /* SPI_BUSY */
+#define ARIZONA_SPI_BUSY_MASK 0x0010 /* SPI_BUSY */
+#define ARIZONA_SPI_BUSY_SHIFT 4 /* SPI_BUSY */
+#define ARIZONA_SPI_BUSY_WIDTH 1 /* SPI_BUSY */
+
+/*
+ * R22 (0x16) - Write Sequencer Ctrl 0
+ */
+#define ARIZONA_WSEQ_ABORT 0x0800 /* WSEQ_ABORT */
+#define ARIZONA_WSEQ_ABORT_MASK 0x0800 /* WSEQ_ABORT */
+#define ARIZONA_WSEQ_ABORT_SHIFT 11 /* WSEQ_ABORT */
+#define ARIZONA_WSEQ_ABORT_WIDTH 1 /* WSEQ_ABORT */
+#define ARIZONA_WSEQ_START 0x0400 /* WSEQ_START */
+#define ARIZONA_WSEQ_START_MASK 0x0400 /* WSEQ_START */
+#define ARIZONA_WSEQ_START_SHIFT 10 /* WSEQ_START */
+#define ARIZONA_WSEQ_START_WIDTH 1 /* WSEQ_START */
+#define ARIZONA_WSEQ_ENA 0x0200 /* WSEQ_ENA */
+#define ARIZONA_WSEQ_ENA_MASK 0x0200 /* WSEQ_ENA */
+#define ARIZONA_WSEQ_ENA_SHIFT 9 /* WSEQ_ENA */
+#define ARIZONA_WSEQ_ENA_WIDTH 1 /* WSEQ_ENA */
+#define ARIZONA_WSEQ_START_INDEX_MASK 0x01FF /* WSEQ_START_INDEX - [8:0] */
+#define ARIZONA_WSEQ_START_INDEX_SHIFT 0 /* WSEQ_START_INDEX - [8:0] */
+#define ARIZONA_WSEQ_START_INDEX_WIDTH 9 /* WSEQ_START_INDEX - [8:0] */
+
+/*
+ * R23 (0x17) - Write Sequencer Ctrl 1
+ */
+#define ARIZONA_WSEQ_BUSY 0x0200 /* WSEQ_BUSY */
+#define ARIZONA_WSEQ_BUSY_MASK 0x0200 /* WSEQ_BUSY */
+#define ARIZONA_WSEQ_BUSY_SHIFT 9 /* WSEQ_BUSY */
+#define ARIZONA_WSEQ_BUSY_WIDTH 1 /* WSEQ_BUSY */
+#define ARIZONA_WSEQ_CURRENT_INDEX_MASK 0x01FF /* WSEQ_CURRENT_INDEX - [8:0] */
+#define ARIZONA_WSEQ_CURRENT_INDEX_SHIFT 0 /* WSEQ_CURRENT_INDEX - [8:0] */
+#define ARIZONA_WSEQ_CURRENT_INDEX_WIDTH 9 /* WSEQ_CURRENT_INDEX - [8:0] */
+
+/*
+ * R24 (0x18) - Write Sequencer Ctrl 2
+ */
+#define ARIZONA_LOAD_DEFAULTS 0x0002 /* LOAD_DEFAULTS */
+#define ARIZONA_LOAD_DEFAULTS_MASK 0x0002 /* LOAD_DEFAULTS */
+#define ARIZONA_LOAD_DEFAULTS_SHIFT 1 /* LOAD_DEFAULTS */
+#define ARIZONA_LOAD_DEFAULTS_WIDTH 1 /* LOAD_DEFAULTS */
+#define ARIZONA_WSEQ_LOAD_MEM 0x0001 /* WSEQ_LOAD_MEM */
+#define ARIZONA_WSEQ_LOAD_MEM_MASK 0x0001 /* WSEQ_LOAD_MEM */
+#define ARIZONA_WSEQ_LOAD_MEM_SHIFT 0 /* WSEQ_LOAD_MEM */
+#define ARIZONA_WSEQ_LOAD_MEM_WIDTH 1 /* WSEQ_LOAD_MEM */
+
+/*
+ * R26 (0x1A) - Write Sequencer PROM
+ */
+#define ARIZONA_WSEQ_OTP_WRITE 0x0001 /* WSEQ_OTP_WRITE */
+#define ARIZONA_WSEQ_OTP_WRITE_MASK 0x0001 /* WSEQ_OTP_WRITE */
+#define ARIZONA_WSEQ_OTP_WRITE_SHIFT 0 /* WSEQ_OTP_WRITE */
+#define ARIZONA_WSEQ_OTP_WRITE_WIDTH 1 /* WSEQ_OTP_WRITE */
+
+/*
+ * R32 (0x20) - Tone Generator 1
+ */
+#define ARIZONA_TONE_RATE_MASK 0x7800 /* TONE_RATE - [14:11] */
+#define ARIZONA_TONE_RATE_SHIFT 11 /* TONE_RATE - [14:11] */
+#define ARIZONA_TONE_RATE_WIDTH 4 /* TONE_RATE - [14:11] */
+#define ARIZONA_TONE_OFFSET_MASK 0x0300 /* TONE_OFFSET - [9:8] */
+#define ARIZONA_TONE_OFFSET_SHIFT 8 /* TONE_OFFSET - [9:8] */
+#define ARIZONA_TONE_OFFSET_WIDTH 2 /* TONE_OFFSET - [9:8] */
+#define ARIZONA_TONE2_OVD 0x0020 /* TONE2_OVD */
+#define ARIZONA_TONE2_OVD_MASK 0x0020 /* TONE2_OVD */
+#define ARIZONA_TONE2_OVD_SHIFT 5 /* TONE2_OVD */
+#define ARIZONA_TONE2_OVD_WIDTH 1 /* TONE2_OVD */
+#define ARIZONA_TONE1_OVD 0x0010 /* TONE1_OVD */
+#define ARIZONA_TONE1_OVD_MASK 0x0010 /* TONE1_OVD */
+#define ARIZONA_TONE1_OVD_SHIFT 4 /* TONE1_OVD */
+#define ARIZONA_TONE1_OVD_WIDTH 1 /* TONE1_OVD */
+#define ARIZONA_TONE2_ENA 0x0002 /* TONE2_ENA */
+#define ARIZONA_TONE2_ENA_MASK 0x0002 /* TONE2_ENA */
+#define ARIZONA_TONE2_ENA_SHIFT 1 /* TONE2_ENA */
+#define ARIZONA_TONE2_ENA_WIDTH 1 /* TONE2_ENA */
+#define ARIZONA_TONE1_ENA 0x0001 /* TONE1_ENA */
+#define ARIZONA_TONE1_ENA_MASK 0x0001 /* TONE1_ENA */
+#define ARIZONA_TONE1_ENA_SHIFT 0 /* TONE1_ENA */
+#define ARIZONA_TONE1_ENA_WIDTH 1 /* TONE1_ENA */
+
+/*
+ * R33 (0x21) - Tone Generator 2
+ */
+#define ARIZONA_TONE1_LVL_0_MASK 0xFFFF /* TONE1_LVL - [15:0] */
+#define ARIZONA_TONE1_LVL_0_SHIFT 0 /* TONE1_LVL - [15:0] */
+#define ARIZONA_TONE1_LVL_0_WIDTH 16 /* TONE1_LVL - [15:0] */
+
+/*
+ * R34 (0x22) - Tone Generator 3
+ */
+#define ARIZONA_TONE1_LVL_MASK 0x00FF /* TONE1_LVL - [7:0] */
+#define ARIZONA_TONE1_LVL_SHIFT 0 /* TONE1_LVL - [7:0] */
+#define ARIZONA_TONE1_LVL_WIDTH 8 /* TONE1_LVL - [7:0] */
+
+/*
+ * R35 (0x23) - Tone Generator 4
+ */
+#define ARIZONA_TONE2_LVL_0_MASK 0xFFFF /* TONE2_LVL - [15:0] */
+#define ARIZONA_TONE2_LVL_0_SHIFT 0 /* TONE2_LVL - [15:0] */
+#define ARIZONA_TONE2_LVL_0_WIDTH 16 /* TONE2_LVL - [15:0] */
+
+/*
+ * R36 (0x24) - Tone Generator 5
+ */
+#define ARIZONA_TONE2_LVL_MASK 0x00FF /* TONE2_LVL - [7:0] */
+#define ARIZONA_TONE2_LVL_SHIFT 0 /* TONE2_LVL - [7:0] */
+#define ARIZONA_TONE2_LVL_WIDTH 8 /* TONE2_LVL - [7:0] */
+
+/*
+ * R48 (0x30) - PWM Drive 1
+ */
+#define ARIZONA_PWM_RATE_MASK 0x7800 /* PWM_RATE - [14:11] */
+#define ARIZONA_PWM_RATE_SHIFT 11 /* PWM_RATE - [14:11] */
+#define ARIZONA_PWM_RATE_WIDTH 4 /* PWM_RATE - [14:11] */
+#define ARIZONA_PWM_CLK_SEL_MASK 0x0700 /* PWM_CLK_SEL - [10:8] */
+#define ARIZONA_PWM_CLK_SEL_SHIFT 8 /* PWM_CLK_SEL - [10:8] */
+#define ARIZONA_PWM_CLK_SEL_WIDTH 3 /* PWM_CLK_SEL - [10:8] */
+#define ARIZONA_PWM2_OVD 0x0020 /* PWM2_OVD */
+#define ARIZONA_PWM2_OVD_MASK 0x0020 /* PWM2_OVD */
+#define ARIZONA_PWM2_OVD_SHIFT 5 /* PWM2_OVD */
+#define ARIZONA_PWM2_OVD_WIDTH 1 /* PWM2_OVD */
+#define ARIZONA_PWM1_OVD 0x0010 /* PWM1_OVD */
+#define ARIZONA_PWM1_OVD_MASK 0x0010 /* PWM1_OVD */
+#define ARIZONA_PWM1_OVD_SHIFT 4 /* PWM1_OVD */
+#define ARIZONA_PWM1_OVD_WIDTH 1 /* PWM1_OVD */
+#define ARIZONA_PWM2_ENA 0x0002 /* PWM2_ENA */
+#define ARIZONA_PWM2_ENA_MASK 0x0002 /* PWM2_ENA */
+#define ARIZONA_PWM2_ENA_SHIFT 1 /* PWM2_ENA */
+#define ARIZONA_PWM2_ENA_WIDTH 1 /* PWM2_ENA */
+#define ARIZONA_PWM1_ENA 0x0001 /* PWM1_ENA */
+#define ARIZONA_PWM1_ENA_MASK 0x0001 /* PWM1_ENA */
+#define ARIZONA_PWM1_ENA_SHIFT 0 /* PWM1_ENA */
+#define ARIZONA_PWM1_ENA_WIDTH 1 /* PWM1_ENA */
+
+/*
+ * R49 (0x31) - PWM Drive 2
+ */
+#define ARIZONA_PWM1_LVL_MASK 0x03FF /* PWM1_LVL - [9:0] */
+#define ARIZONA_PWM1_LVL_SHIFT 0 /* PWM1_LVL - [9:0] */
+#define ARIZONA_PWM1_LVL_WIDTH 10 /* PWM1_LVL - [9:0] */
+
+/*
+ * R50 (0x32) - PWM Drive 3
+ */
+#define ARIZONA_PWM2_LVL_MASK 0x03FF /* PWM2_LVL - [9:0] */
+#define ARIZONA_PWM2_LVL_SHIFT 0 /* PWM2_LVL - [9:0] */
+#define ARIZONA_PWM2_LVL_WIDTH 10 /* PWM2_LVL - [9:0] */
+
+/*
+ * R64 (0x40) - Wake control
+ */
+#define ARIZONA_WKUP_MICD_CLAMP_FALL 0x0080 /* WKUP_MICD_CLAMP_FALL */
+#define ARIZONA_WKUP_MICD_CLAMP_FALL_MASK 0x0080 /* WKUP_MICD_CLAMP_FALL */
+#define ARIZONA_WKUP_MICD_CLAMP_FALL_SHIFT 7 /* WKUP_MICD_CLAMP_FALL */
+#define ARIZONA_WKUP_MICD_CLAMP_FALL_WIDTH 1 /* WKUP_MICD_CLAMP_FALL */
+#define ARIZONA_WKUP_MICD_CLAMP_RISE 0x0040 /* WKUP_MICD_CLAMP_RISE */
+#define ARIZONA_WKUP_MICD_CLAMP_RISE_MASK 0x0040 /* WKUP_MICD_CLAMP_RISE */
+#define ARIZONA_WKUP_MICD_CLAMP_RISE_SHIFT 6 /* WKUP_MICD_CLAMP_RISE */
+#define ARIZONA_WKUP_MICD_CLAMP_RISE_WIDTH 1 /* WKUP_MICD_CLAMP_RISE */
+#define ARIZONA_WKUP_GP5_FALL 0x0020 /* WKUP_GP5_FALL */
+#define ARIZONA_WKUP_GP5_FALL_MASK 0x0020 /* WKUP_GP5_FALL */
+#define ARIZONA_WKUP_GP5_FALL_SHIFT 5 /* WKUP_GP5_FALL */
+#define ARIZONA_WKUP_GP5_FALL_WIDTH 1 /* WKUP_GP5_FALL */
+#define ARIZONA_WKUP_GP5_RISE 0x0010 /* WKUP_GP5_RISE */
+#define ARIZONA_WKUP_GP5_RISE_MASK 0x0010 /* WKUP_GP5_RISE */
+#define ARIZONA_WKUP_GP5_RISE_SHIFT 4 /* WKUP_GP5_RISE */
+#define ARIZONA_WKUP_GP5_RISE_WIDTH 1 /* WKUP_GP5_RISE */
+#define ARIZONA_WKUP_JD1_FALL 0x0008 /* WKUP_JD1_FALL */
+#define ARIZONA_WKUP_JD1_FALL_MASK 0x0008 /* WKUP_JD1_FALL */
+#define ARIZONA_WKUP_JD1_FALL_SHIFT 3 /* WKUP_JD1_FALL */
+#define ARIZONA_WKUP_JD1_FALL_WIDTH 1 /* WKUP_JD1_FALL */
+#define ARIZONA_WKUP_JD1_RISE 0x0004 /* WKUP_JD1_RISE */
+#define ARIZONA_WKUP_JD1_RISE_MASK 0x0004 /* WKUP_JD1_RISE */
+#define ARIZONA_WKUP_JD1_RISE_SHIFT 2 /* WKUP_JD1_RISE */
+#define ARIZONA_WKUP_JD1_RISE_WIDTH 1 /* WKUP_JD1_RISE */
+#define ARIZONA_WKUP_JD2_FALL 0x0002 /* WKUP_JD2_FALL */
+#define ARIZONA_WKUP_JD2_FALL_MASK 0x0002 /* WKUP_JD2_FALL */
+#define ARIZONA_WKUP_JD2_FALL_SHIFT 1 /* WKUP_JD2_FALL */
+#define ARIZONA_WKUP_JD2_FALL_WIDTH 1 /* WKUP_JD2_FALL */
+#define ARIZONA_WKUP_JD2_RISE 0x0001 /* WKUP_JD2_RISE */
+#define ARIZONA_WKUP_JD2_RISE_MASK 0x0001 /* WKUP_JD2_RISE */
+#define ARIZONA_WKUP_JD2_RISE_SHIFT 0 /* WKUP_JD2_RISE */
+#define ARIZONA_WKUP_JD2_RISE_WIDTH 1 /* WKUP_JD2_RISE */
+
+/*
+ * R65 (0x41) - Sequence control
+ */
+#define ARIZONA_WSEQ_ENA_GP5_FALL 0x0020 /* WSEQ_ENA_GP5_FALL */
+#define ARIZONA_WSEQ_ENA_GP5_FALL_MASK 0x0020 /* WSEQ_ENA_GP5_FALL */
+#define ARIZONA_WSEQ_ENA_GP5_FALL_SHIFT 5 /* WSEQ_ENA_GP5_FALL */
+#define ARIZONA_WSEQ_ENA_GP5_FALL_WIDTH 1 /* WSEQ_ENA_GP5_FALL */
+#define ARIZONA_WSEQ_ENA_GP5_RISE 0x0010 /* WSEQ_ENA_GP5_RISE */
+#define ARIZONA_WSEQ_ENA_GP5_RISE_MASK 0x0010 /* WSEQ_ENA_GP5_RISE */
+#define ARIZONA_WSEQ_ENA_GP5_RISE_SHIFT 4 /* WSEQ_ENA_GP5_RISE */
+#define ARIZONA_WSEQ_ENA_GP5_RISE_WIDTH 1 /* WSEQ_ENA_GP5_RISE */
+#define ARIZONA_WSEQ_ENA_JD1_FALL 0x0008 /* WSEQ_ENA_JD1_FALL */
+#define ARIZONA_WSEQ_ENA_JD1_FALL_MASK 0x0008 /* WSEQ_ENA_JD1_FALL */
+#define ARIZONA_WSEQ_ENA_JD1_FALL_SHIFT 3 /* WSEQ_ENA_JD1_FALL */
+#define ARIZONA_WSEQ_ENA_JD1_FALL_WIDTH 1 /* WSEQ_ENA_JD1_FALL */
+#define ARIZONA_WSEQ_ENA_JD1_RISE 0x0004 /* WSEQ_ENA_JD1_RISE */
+#define ARIZONA_WSEQ_ENA_JD1_RISE_MASK 0x0004 /* WSEQ_ENA_JD1_RISE */
+#define ARIZONA_WSEQ_ENA_JD1_RISE_SHIFT 2 /* WSEQ_ENA_JD1_RISE */
+#define ARIZONA_WSEQ_ENA_JD1_RISE_WIDTH 1 /* WSEQ_ENA_JD1_RISE */
+#define ARIZONA_WSEQ_ENA_JD2_FALL 0x0002 /* WSEQ_ENA_JD2_FALL */
+#define ARIZONA_WSEQ_ENA_JD2_FALL_MASK 0x0002 /* WSEQ_ENA_JD2_FALL */
+#define ARIZONA_WSEQ_ENA_JD2_FALL_SHIFT 1 /* WSEQ_ENA_JD2_FALL */
+#define ARIZONA_WSEQ_ENA_JD2_FALL_WIDTH 1 /* WSEQ_ENA_JD2_FALL */
+#define ARIZONA_WSEQ_ENA_JD2_RISE 0x0001 /* WSEQ_ENA_JD2_RISE */
+#define ARIZONA_WSEQ_ENA_JD2_RISE_MASK 0x0001 /* WSEQ_ENA_JD2_RISE */
+#define ARIZONA_WSEQ_ENA_JD2_RISE_SHIFT 0 /* WSEQ_ENA_JD2_RISE */
+#define ARIZONA_WSEQ_ENA_JD2_RISE_WIDTH 1 /* WSEQ_ENA_JD2_RISE */
+
+/*
+ * R97 (0x61) - Sample Rate Sequence Select 1
+ */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR_MASK 0x01FF /* WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR_SHIFT 0 /* WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR_WIDTH 9 /* WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR - [8:0] */
+
+/*
+ * R98 (0x62) - Sample Rate Sequence Select 2
+ */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_B_SEQ_ADDR_MASK 0x01FF /* WSEQ_SAMPLE_RATE_DETECT_B_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_B_SEQ_ADDR_SHIFT 0 /* WSEQ_SAMPLE_RATE_DETECT_B_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_B_SEQ_ADDR_WIDTH 9 /* WSEQ_SAMPLE_RATE_DETECT_B_SEQ_ADDR - [8:0] */
+
+/*
+ * R99 (0x63) - Sample Rate Sequence Select 3
+ */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_C_SEQ_ADDR_MASK 0x01FF /* WSEQ_SAMPLE_RATE_DETECT_C_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_C_SEQ_ADDR_SHIFT 0 /* WSEQ_SAMPLE_RATE_DETECT_C_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_C_SEQ_ADDR_WIDTH 9 /* WSEQ_SAMPLE_RATE_DETECT_C_SEQ_ADDR - [8:0] */
+
+/*
+ * R100 (0x64) - Sample Rate Sequence Select 4
+ */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_D_SEQ_ADDR_MASK 0x01FF /* WSEQ_SAMPLE_RATE_DETECT_D_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_D_SEQ_ADDR_SHIFT 0 /* WSEQ_SAMPLE_RATE_DETECT_D_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_D_SEQ_ADDR_WIDTH 9 /* WSEQ_SAMPLE_RATE_DETECT_D_SEQ_ADDR - [8:0] */
+
+/*
+ * R104 (0x68) - Always On Triggers Sequence Select 1
+ */
+#define ARIZONA_WSEQ_GP5_RISE_SEQ_ADDR_MASK 0x01FF /* WSEQ_GP5_RISE_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_GP5_RISE_SEQ_ADDR_SHIFT 0 /* WSEQ_GP5_RISE_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_GP5_RISE_SEQ_ADDR_WIDTH 9 /* WSEQ_GP5_RISE_SEQ_ADDR - [8:0] */
+
+/*
+ * R105 (0x69) - Always On Triggers Sequence Select 2
+ */
+#define ARIZONA_WSEQ_GP5_FALL_SEQ_ADDR_MASK 0x01FF /* WSEQ_GP5_FALL_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_GP5_FALL_SEQ_ADDR_SHIFT 0 /* WSEQ_GP5_FALL_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_GP5_FALL_SEQ_ADDR_WIDTH 9 /* WSEQ_GP5_FALL_SEQ_ADDR - [8:0] */
+
+/*
+ * R106 (0x6A) - Always On Triggers Sequence Select 3
+ */
+#define ARIZONA_WSEQ_JD1_RISE_SEQ_ADDR_MASK 0x01FF /* WSEQ_JD1_RISE_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_JD1_RISE_SEQ_ADDR_SHIFT 0 /* WSEQ_JD1_RISE_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_JD1_RISE_SEQ_ADDR_WIDTH 9 /* WSEQ_JD1_RISE_SEQ_ADDR - [8:0] */
+
+/*
+ * R107 (0x6B) - Always On Triggers Sequence Select 4
+ */
+#define ARIZONA_WSEQ_JD1_FALL_SEQ_ADDR_MASK 0x01FF /* WSEQ_JD1_FALL_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_JD1_FALL_SEQ_ADDR_SHIFT 0 /* WSEQ_JD1_FALL_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_JD1_FALL_SEQ_ADDR_WIDTH 9 /* WSEQ_JD1_FALL_SEQ_ADDR - [8:0] */
+
+/*
+ * R108 (0x6C) - Always On Triggers Sequence Select 5
+ */
+#define ARIZONA_WSEQ_JD2_RISE_SEQ_ADDR_MASK 0x01FF /* WSEQ_JD2_RISE_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_JD2_RISE_SEQ_ADDR_SHIFT 0 /* WSEQ_JD2_RISE_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_JD2_RISE_SEQ_ADDR_WIDTH 9 /* WSEQ_JD2_RISE_SEQ_ADDR - [8:0] */
+
+/*
+ * R109 (0x6D) - Always On Triggers Sequence Select 6
+ */
+#define ARIZONA_WSEQ_JD2_FALL_SEQ_ADDR_MASK 0x01FF /* WSEQ_JD2_FALL_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_JD2_FALL_SEQ_ADDR_SHIFT 0 /* WSEQ_JD2_FALL_SEQ_ADDR - [8:0] */
+#define ARIZONA_WSEQ_JD2_FALL_SEQ_ADDR_WIDTH 9 /* WSEQ_JD2_FALL_SEQ_ADDR - [8:0] */
+
+/*
+ * R112 (0x70) - Comfort Noise Generator
+ */
+#define ARIZONA_NOISE_GEN_RATE_MASK 0x7800 /* NOISE_GEN_RATE - [14:11] */
+#define ARIZONA_NOISE_GEN_RATE_SHIFT 11 /* NOISE_GEN_RATE - [14:11] */
+#define ARIZONA_NOISE_GEN_RATE_WIDTH 4 /* NOISE_GEN_RATE - [14:11] */
+#define ARIZONA_NOISE_GEN_ENA 0x0020 /* NOISE_GEN_ENA */
+#define ARIZONA_NOISE_GEN_ENA_MASK 0x0020 /* NOISE_GEN_ENA */
+#define ARIZONA_NOISE_GEN_ENA_SHIFT 5 /* NOISE_GEN_ENA */
+#define ARIZONA_NOISE_GEN_ENA_WIDTH 1 /* NOISE_GEN_ENA */
+#define ARIZONA_NOISE_GEN_GAIN_MASK 0x001F /* NOISE_GEN_GAIN - [4:0] */
+#define ARIZONA_NOISE_GEN_GAIN_SHIFT 0 /* NOISE_GEN_GAIN - [4:0] */
+#define ARIZONA_NOISE_GEN_GAIN_WIDTH 5 /* NOISE_GEN_GAIN - [4:0] */
+
+/*
+ * R144 (0x90) - Haptics Control 1
+ */
+#define ARIZONA_HAP_RATE_MASK 0x7800 /* HAP_RATE - [14:11] */
+#define ARIZONA_HAP_RATE_SHIFT 11 /* HAP_RATE - [14:11] */
+#define ARIZONA_HAP_RATE_WIDTH 4 /* HAP_RATE - [14:11] */
+#define ARIZONA_ONESHOT_TRIG 0x0010 /* ONESHOT_TRIG */
+#define ARIZONA_ONESHOT_TRIG_MASK 0x0010 /* ONESHOT_TRIG */
+#define ARIZONA_ONESHOT_TRIG_SHIFT 4 /* ONESHOT_TRIG */
+#define ARIZONA_ONESHOT_TRIG_WIDTH 1 /* ONESHOT_TRIG */
+#define ARIZONA_HAP_CTRL_MASK 0x000C /* HAP_CTRL - [3:2] */
+#define ARIZONA_HAP_CTRL_SHIFT 2 /* HAP_CTRL - [3:2] */
+#define ARIZONA_HAP_CTRL_WIDTH 2 /* HAP_CTRL - [3:2] */
+#define ARIZONA_HAP_ACT 0x0002 /* HAP_ACT */
+#define ARIZONA_HAP_ACT_MASK 0x0002 /* HAP_ACT */
+#define ARIZONA_HAP_ACT_SHIFT 1 /* HAP_ACT */
+#define ARIZONA_HAP_ACT_WIDTH 1 /* HAP_ACT */
+
+/*
+ * R145 (0x91) - Haptics Control 2
+ */
+#define ARIZONA_LRA_FREQ_MASK 0x7FFF /* LRA_FREQ - [14:0] */
+#define ARIZONA_LRA_FREQ_SHIFT 0 /* LRA_FREQ - [14:0] */
+#define ARIZONA_LRA_FREQ_WIDTH 15 /* LRA_FREQ - [14:0] */
+
+/*
+ * R146 (0x92) - Haptics phase 1 intensity
+ */
+#define ARIZONA_PHASE1_INTENSITY_MASK 0x00FF /* PHASE1_INTENSITY - [7:0] */
+#define ARIZONA_PHASE1_INTENSITY_SHIFT 0 /* PHASE1_INTENSITY - [7:0] */
+#define ARIZONA_PHASE1_INTENSITY_WIDTH 8 /* PHASE1_INTENSITY - [7:0] */
+
+/*
+ * R147 (0x93) - Haptics phase 1 duration
+ */
+#define ARIZONA_PHASE1_DURATION_MASK 0x01FF /* PHASE1_DURATION - [8:0] */
+#define ARIZONA_PHASE1_DURATION_SHIFT 0 /* PHASE1_DURATION - [8:0] */
+#define ARIZONA_PHASE1_DURATION_WIDTH 9 /* PHASE1_DURATION - [8:0] */
+
+/*
+ * R148 (0x94) - Haptics phase 2 intensity
+ */
+#define ARIZONA_PHASE2_INTENSITY_MASK 0x00FF /* PHASE2_INTENSITY - [7:0] */
+#define ARIZONA_PHASE2_INTENSITY_SHIFT 0 /* PHASE2_INTENSITY - [7:0] */
+#define ARIZONA_PHASE2_INTENSITY_WIDTH 8 /* PHASE2_INTENSITY - [7:0] */
+
+/*
+ * R149 (0x95) - Haptics phase 2 duration
+ */
+#define ARIZONA_PHASE2_DURATION_MASK 0x07FF /* PHASE2_DURATION - [10:0] */
+#define ARIZONA_PHASE2_DURATION_SHIFT 0 /* PHASE2_DURATION - [10:0] */
+#define ARIZONA_PHASE2_DURATION_WIDTH 11 /* PHASE2_DURATION - [10:0] */
+
+/*
+ * R150 (0x96) - Haptics phase 3 intensity
+ */
+#define ARIZONA_PHASE3_INTENSITY_MASK 0x00FF /* PHASE3_INTENSITY - [7:0] */
+#define ARIZONA_PHASE3_INTENSITY_SHIFT 0 /* PHASE3_INTENSITY - [7:0] */
+#define ARIZONA_PHASE3_INTENSITY_WIDTH 8 /* PHASE3_INTENSITY - [7:0] */
+
+/*
+ * R151 (0x97) - Haptics phase 3 duration
+ */
+#define ARIZONA_PHASE3_DURATION_MASK 0x01FF /* PHASE3_DURATION - [8:0] */
+#define ARIZONA_PHASE3_DURATION_SHIFT 0 /* PHASE3_DURATION - [8:0] */
+#define ARIZONA_PHASE3_DURATION_WIDTH 9 /* PHASE3_DURATION - [8:0] */
+
+/*
+ * R152 (0x98) - Haptics Status
+ */
+#define ARIZONA_ONESHOT_STS 0x0001 /* ONESHOT_STS */
+#define ARIZONA_ONESHOT_STS_MASK 0x0001 /* ONESHOT_STS */
+#define ARIZONA_ONESHOT_STS_SHIFT 0 /* ONESHOT_STS */
+#define ARIZONA_ONESHOT_STS_WIDTH 1 /* ONESHOT_STS */
+
+/*
+ * R256 (0x100) - Clock 32k 1
+ */
+#define ARIZONA_CLK_32K_ENA 0x0040 /* CLK_32K_ENA */
+#define ARIZONA_CLK_32K_ENA_MASK 0x0040 /* CLK_32K_ENA */
+#define ARIZONA_CLK_32K_ENA_SHIFT 6 /* CLK_32K_ENA */
+#define ARIZONA_CLK_32K_ENA_WIDTH 1 /* CLK_32K_ENA */
+#define ARIZONA_CLK_32K_SRC_MASK 0x0003 /* CLK_32K_SRC - [1:0] */
+#define ARIZONA_CLK_32K_SRC_SHIFT 0 /* CLK_32K_SRC - [1:0] */
+#define ARIZONA_CLK_32K_SRC_WIDTH 2 /* CLK_32K_SRC - [1:0] */
+
+/*
+ * R257 (0x101) - System Clock 1
+ */
+#define ARIZONA_SYSCLK_FRAC 0x8000 /* SYSCLK_FRAC */
+#define ARIZONA_SYSCLK_FRAC_MASK 0x8000 /* SYSCLK_FRAC */
+#define ARIZONA_SYSCLK_FRAC_SHIFT 15 /* SYSCLK_FRAC */
+#define ARIZONA_SYSCLK_FRAC_WIDTH 1 /* SYSCLK_FRAC */
+#define ARIZONA_SYSCLK_FREQ_MASK 0x0700 /* SYSCLK_FREQ - [10:8] */
+#define ARIZONA_SYSCLK_FREQ_SHIFT 8 /* SYSCLK_FREQ - [10:8] */
+#define ARIZONA_SYSCLK_FREQ_WIDTH 3 /* SYSCLK_FREQ - [10:8] */
+#define ARIZONA_SYSCLK_ENA 0x0040 /* SYSCLK_ENA */
+#define ARIZONA_SYSCLK_ENA_MASK 0x0040 /* SYSCLK_ENA */
+#define ARIZONA_SYSCLK_ENA_SHIFT 6 /* SYSCLK_ENA */
+#define ARIZONA_SYSCLK_ENA_WIDTH 1 /* SYSCLK_ENA */
+#define ARIZONA_SYSCLK_SRC_MASK 0x000F /* SYSCLK_SRC - [3:0] */
+#define ARIZONA_SYSCLK_SRC_SHIFT 0 /* SYSCLK_SRC - [3:0] */
+#define ARIZONA_SYSCLK_SRC_WIDTH 4 /* SYSCLK_SRC - [3:0] */
+
+/*
+ * R258 (0x102) - Sample rate 1
+ */
+#define ARIZONA_SAMPLE_RATE_1_MASK 0x001F /* SAMPLE_RATE_1 - [4:0] */
+#define ARIZONA_SAMPLE_RATE_1_SHIFT 0 /* SAMPLE_RATE_1 - [4:0] */
+#define ARIZONA_SAMPLE_RATE_1_WIDTH 5 /* SAMPLE_RATE_1 - [4:0] */
+
+/*
+ * R259 (0x103) - Sample rate 2
+ */
+#define ARIZONA_SAMPLE_RATE_2_MASK 0x001F /* SAMPLE_RATE_2 - [4:0] */
+#define ARIZONA_SAMPLE_RATE_2_SHIFT 0 /* SAMPLE_RATE_2 - [4:0] */
+#define ARIZONA_SAMPLE_RATE_2_WIDTH 5 /* SAMPLE_RATE_2 - [4:0] */
+
+/*
+ * R260 (0x104) - Sample rate 3
+ */
+#define ARIZONA_SAMPLE_RATE_3_MASK 0x001F /* SAMPLE_RATE_3 - [4:0] */
+#define ARIZONA_SAMPLE_RATE_3_SHIFT 0 /* SAMPLE_RATE_3 - [4:0] */
+#define ARIZONA_SAMPLE_RATE_3_WIDTH 5 /* SAMPLE_RATE_3 - [4:0] */
+
+/*
+ * R266 (0x10A) - Sample rate 1 status
+ */
+#define ARIZONA_SAMPLE_RATE_1_STS_MASK 0x001F /* SAMPLE_RATE_1_STS - [4:0] */
+#define ARIZONA_SAMPLE_RATE_1_STS_SHIFT 0 /* SAMPLE_RATE_1_STS - [4:0] */
+#define ARIZONA_SAMPLE_RATE_1_STS_WIDTH 5 /* SAMPLE_RATE_1_STS - [4:0] */
+
+/*
+ * R267 (0x10B) - Sample rate 2 status
+ */
+#define ARIZONA_SAMPLE_RATE_2_STS_MASK 0x001F /* SAMPLE_RATE_2_STS - [4:0] */
+#define ARIZONA_SAMPLE_RATE_2_STS_SHIFT 0 /* SAMPLE_RATE_2_STS - [4:0] */
+#define ARIZONA_SAMPLE_RATE_2_STS_WIDTH 5 /* SAMPLE_RATE_2_STS - [4:0] */
+
+/*
+ * R268 (0x10C) - Sample rate 3 status
+ */
+#define ARIZONA_SAMPLE_RATE_3_STS_MASK 0x001F /* SAMPLE_RATE_3_STS - [4:0] */
+#define ARIZONA_SAMPLE_RATE_3_STS_SHIFT 0 /* SAMPLE_RATE_3_STS - [4:0] */
+#define ARIZONA_SAMPLE_RATE_3_STS_WIDTH 5 /* SAMPLE_RATE_3_STS - [4:0] */
+
+/*
+ * R274 (0x112) - Async clock 1
+ */
+#define ARIZONA_ASYNC_CLK_FREQ_MASK 0x0700 /* ASYNC_CLK_FREQ - [10:8] */
+#define ARIZONA_ASYNC_CLK_FREQ_SHIFT 8 /* ASYNC_CLK_FREQ - [10:8] */
+#define ARIZONA_ASYNC_CLK_FREQ_WIDTH 3 /* ASYNC_CLK_FREQ - [10:8] */
+#define ARIZONA_ASYNC_CLK_ENA 0x0040 /* ASYNC_CLK_ENA */
+#define ARIZONA_ASYNC_CLK_ENA_MASK 0x0040 /* ASYNC_CLK_ENA */
+#define ARIZONA_ASYNC_CLK_ENA_SHIFT 6 /* ASYNC_CLK_ENA */
+#define ARIZONA_ASYNC_CLK_ENA_WIDTH 1 /* ASYNC_CLK_ENA */
+#define ARIZONA_ASYNC_CLK_SRC_MASK 0x000F /* ASYNC_CLK_SRC - [3:0] */
+#define ARIZONA_ASYNC_CLK_SRC_SHIFT 0 /* ASYNC_CLK_SRC - [3:0] */
+#define ARIZONA_ASYNC_CLK_SRC_WIDTH 4 /* ASYNC_CLK_SRC - [3:0] */
+
+/*
+ * R275 (0x113) - Async sample rate 1
+ */
+#define ARIZONA_ASYNC_SAMPLE_RATE_1_MASK 0x001F /* ASYNC_SAMPLE_RATE_1 - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_1_SHIFT 0 /* ASYNC_SAMPLE_RATE_1 - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_1_WIDTH 5 /* ASYNC_SAMPLE_RATE_1 - [4:0] */
+
+/*
+ * R276 (0x114) - Async sample rate 2
+ */
+#define ARIZONA_ASYNC_SAMPLE_RATE_2_MASK 0x001F /* ASYNC_SAMPLE_RATE_2 - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_2_SHIFT 0 /* ASYNC_SAMPLE_RATE_2 - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_2_WIDTH 5 /* ASYNC_SAMPLE_RATE_2 - [4:0] */
+
+/*
+ * R283 (0x11B) - Async sample rate 1 status
+ */
+#define ARIZONA_ASYNC_SAMPLE_RATE_1_STS_MASK 0x001F /* ASYNC_SAMPLE_RATE_1_STS - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_1_STS_SHIFT 0 /* ASYNC_SAMPLE_RATE_1_STS - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_1_STS_WIDTH 5 /* ASYNC_SAMPLE_RATE_1_STS - [4:0] */
+
+/*
+ * R284 (0x11C) - Async sample rate 2 status
+ */
+#define ARIZONA_ASYNC_SAMPLE_RATE_2_STS_MASK 0x001F /* ASYNC_SAMPLE_RATE_2_STS - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_2_STS_SHIFT 0 /* ASYNC_SAMPLE_RATE_2_STS - [4:0] */
+#define ARIZONA_ASYNC_SAMPLE_RATE_2_STS_WIDTH 5 /* ASYNC_SAMPLE_RATE_2_STS - [4:0] */
+
+/*
+ * R329 (0x149) - Output system clock
+ */
+#define ARIZONA_OPCLK_ENA 0x8000 /* OPCLK_ENA */
+#define ARIZONA_OPCLK_ENA_MASK 0x8000 /* OPCLK_ENA */
+#define ARIZONA_OPCLK_ENA_SHIFT 15 /* OPCLK_ENA */
+#define ARIZONA_OPCLK_ENA_WIDTH 1 /* OPCLK_ENA */
+#define ARIZONA_OPCLK_DIV_MASK 0x00F8 /* OPCLK_DIV - [7:3] */
+#define ARIZONA_OPCLK_DIV_SHIFT 3 /* OPCLK_DIV - [7:3] */
+#define ARIZONA_OPCLK_DIV_WIDTH 5 /* OPCLK_DIV - [7:3] */
+#define ARIZONA_OPCLK_SEL_MASK 0x0007 /* OPCLK_SEL - [2:0] */
+#define ARIZONA_OPCLK_SEL_SHIFT 0 /* OPCLK_SEL - [2:0] */
+#define ARIZONA_OPCLK_SEL_WIDTH 3 /* OPCLK_SEL - [2:0] */
+
+/*
+ * R330 (0x14A) - Output async clock
+ */
+#define ARIZONA_OPCLK_ASYNC_ENA 0x8000 /* OPCLK_ASYNC_ENA */
+#define ARIZONA_OPCLK_ASYNC_ENA_MASK 0x8000 /* OPCLK_ASYNC_ENA */
+#define ARIZONA_OPCLK_ASYNC_ENA_SHIFT 15 /* OPCLK_ASYNC_ENA */
+#define ARIZONA_OPCLK_ASYNC_ENA_WIDTH 1 /* OPCLK_ASYNC_ENA */
+#define ARIZONA_OPCLK_ASYNC_DIV_MASK 0x00F8 /* OPCLK_ASYNC_DIV - [7:3] */
+#define ARIZONA_OPCLK_ASYNC_DIV_SHIFT 3 /* OPCLK_ASYNC_DIV - [7:3] */
+#define ARIZONA_OPCLK_ASYNC_DIV_WIDTH 5 /* OPCLK_ASYNC_DIV - [7:3] */
+#define ARIZONA_OPCLK_ASYNC_SEL_MASK 0x0007 /* OPCLK_ASYNC_SEL - [2:0] */
+#define ARIZONA_OPCLK_ASYNC_SEL_SHIFT 0 /* OPCLK_ASYNC_SEL - [2:0] */
+#define ARIZONA_OPCLK_ASYNC_SEL_WIDTH 3 /* OPCLK_ASYNC_SEL - [2:0] */
+
+/*
+ * R338 (0x152) - Rate Estimator 1
+ */
+#define ARIZONA_TRIG_ON_STARTUP 0x0010 /* TRIG_ON_STARTUP */
+#define ARIZONA_TRIG_ON_STARTUP_MASK 0x0010 /* TRIG_ON_STARTUP */
+#define ARIZONA_TRIG_ON_STARTUP_SHIFT 4 /* TRIG_ON_STARTUP */
+#define ARIZONA_TRIG_ON_STARTUP_WIDTH 1 /* TRIG_ON_STARTUP */
+#define ARIZONA_LRCLK_SRC_MASK 0x000E /* LRCLK_SRC - [3:1] */
+#define ARIZONA_LRCLK_SRC_SHIFT 1 /* LRCLK_SRC - [3:1] */
+#define ARIZONA_LRCLK_SRC_WIDTH 3 /* LRCLK_SRC - [3:1] */
+#define ARIZONA_RATE_EST_ENA 0x0001 /* RATE_EST_ENA */
+#define ARIZONA_RATE_EST_ENA_MASK 0x0001 /* RATE_EST_ENA */
+#define ARIZONA_RATE_EST_ENA_SHIFT 0 /* RATE_EST_ENA */
+#define ARIZONA_RATE_EST_ENA_WIDTH 1 /* RATE_EST_ENA */
+
+/*
+ * R339 (0x153) - Rate Estimator 2
+ */
+#define ARIZONA_SAMPLE_RATE_DETECT_A_MASK 0x001F /* SAMPLE_RATE_DETECT_A - [4:0] */
+#define ARIZONA_SAMPLE_RATE_DETECT_A_SHIFT 0 /* SAMPLE_RATE_DETECT_A - [4:0] */
+#define ARIZONA_SAMPLE_RATE_DETECT_A_WIDTH 5 /* SAMPLE_RATE_DETECT_A - [4:0] */
+
+/*
+ * R340 (0x154) - Rate Estimator 3
+ */
+#define ARIZONA_SAMPLE_RATE_DETECT_B_MASK 0x001F /* SAMPLE_RATE_DETECT_B - [4:0] */
+#define ARIZONA_SAMPLE_RATE_DETECT_B_SHIFT 0 /* SAMPLE_RATE_DETECT_B - [4:0] */
+#define ARIZONA_SAMPLE_RATE_DETECT_B_WIDTH 5 /* SAMPLE_RATE_DETECT_B - [4:0] */
+
+/*
+ * R341 (0x155) - Rate Estimator 4
+ */
+#define ARIZONA_SAMPLE_RATE_DETECT_C_MASK 0x001F /* SAMPLE_RATE_DETECT_C - [4:0] */
+#define ARIZONA_SAMPLE_RATE_DETECT_C_SHIFT 0 /* SAMPLE_RATE_DETECT_C - [4:0] */
+#define ARIZONA_SAMPLE_RATE_DETECT_C_WIDTH 5 /* SAMPLE_RATE_DETECT_C - [4:0] */
+
+/*
+ * R342 (0x156) - Rate Estimator 5
+ */
+#define ARIZONA_SAMPLE_RATE_DETECT_D_MASK 0x001F /* SAMPLE_RATE_DETECT_D - [4:0] */
+#define ARIZONA_SAMPLE_RATE_DETECT_D_SHIFT 0 /* SAMPLE_RATE_DETECT_D - [4:0] */
+#define ARIZONA_SAMPLE_RATE_DETECT_D_WIDTH 5 /* SAMPLE_RATE_DETECT_D - [4:0] */
+
+/*
+ * R353 (0x161) - Dynamic Frequency Scaling 1
+ */
+#define ARIZONA_SUBSYS_MAX_FREQ 0x0001 /* SUBSYS_MAX_FREQ */
+#define ARIZONA_SUBSYS_MAX_FREQ_SHIFT 0 /* SUBSYS_MAX_FREQ */
+#define ARIZONA_SUBSYS_MAX_FREQ_WIDTH 1 /* SUBSYS_MAX_FREQ */
+
+/*
+ * R369 (0x171) - FLL1 Control 1
+ */
+#define ARIZONA_FLL1_FREERUN 0x0002 /* FLL1_FREERUN */
+#define ARIZONA_FLL1_FREERUN_MASK 0x0002 /* FLL1_FREERUN */
+#define ARIZONA_FLL1_FREERUN_SHIFT 1 /* FLL1_FREERUN */
+#define ARIZONA_FLL1_FREERUN_WIDTH 1 /* FLL1_FREERUN */
+#define ARIZONA_FLL1_ENA 0x0001 /* FLL1_ENA */
+#define ARIZONA_FLL1_ENA_MASK 0x0001 /* FLL1_ENA */
+#define ARIZONA_FLL1_ENA_SHIFT 0 /* FLL1_ENA */
+#define ARIZONA_FLL1_ENA_WIDTH 1 /* FLL1_ENA */
+
+/*
+ * R370 (0x172) - FLL1 Control 2
+ */
+#define ARIZONA_FLL1_CTRL_UPD 0x8000 /* FLL1_CTRL_UPD */
+#define ARIZONA_FLL1_CTRL_UPD_MASK 0x8000 /* FLL1_CTRL_UPD */
+#define ARIZONA_FLL1_CTRL_UPD_SHIFT 15 /* FLL1_CTRL_UPD */
+#define ARIZONA_FLL1_CTRL_UPD_WIDTH 1 /* FLL1_CTRL_UPD */
+#define ARIZONA_FLL1_N_MASK 0x03FF /* FLL1_N - [9:0] */
+#define ARIZONA_FLL1_N_SHIFT 0 /* FLL1_N - [9:0] */
+#define ARIZONA_FLL1_N_WIDTH 10 /* FLL1_N - [9:0] */
+
+/*
+ * R371 (0x173) - FLL1 Control 3
+ */
+#define ARIZONA_FLL1_THETA_MASK 0xFFFF /* FLL1_THETA - [15:0] */
+#define ARIZONA_FLL1_THETA_SHIFT 0 /* FLL1_THETA - [15:0] */
+#define ARIZONA_FLL1_THETA_WIDTH 16 /* FLL1_THETA - [15:0] */
+
+/*
+ * R372 (0x174) - FLL1 Control 4
+ */
+#define ARIZONA_FLL1_LAMBDA_MASK 0xFFFF /* FLL1_LAMBDA - [15:0] */
+#define ARIZONA_FLL1_LAMBDA_SHIFT 0 /* FLL1_LAMBDA - [15:0] */
+#define ARIZONA_FLL1_LAMBDA_WIDTH 16 /* FLL1_LAMBDA - [15:0] */
+
+/*
+ * R373 (0x175) - FLL1 Control 5
+ */
+#define ARIZONA_FLL1_FRATIO_MASK 0x0F00 /* FLL1_FRATIO - [11:8] */
+#define ARIZONA_FLL1_FRATIO_SHIFT 8 /* FLL1_FRATIO - [11:8] */
+#define ARIZONA_FLL1_FRATIO_WIDTH 4 /* FLL1_FRATIO - [11:8] */
+#define ARIZONA_FLL1_OUTDIV_MASK 0x000E /* FLL1_OUTDIV - [3:1] */
+#define ARIZONA_FLL1_OUTDIV_SHIFT 1 /* FLL1_OUTDIV - [3:1] */
+#define ARIZONA_FLL1_OUTDIV_WIDTH 3 /* FLL1_OUTDIV - [3:1] */
+
+/*
+ * R374 (0x176) - FLL1 Control 6
+ */
+#define ARIZONA_FLL1_CLK_REF_DIV_MASK 0x00C0 /* FLL1_CLK_REF_DIV - [7:6] */
+#define ARIZONA_FLL1_CLK_REF_DIV_SHIFT 6 /* FLL1_CLK_REF_DIV - [7:6] */
+#define ARIZONA_FLL1_CLK_REF_DIV_WIDTH 2 /* FLL1_CLK_REF_DIV - [7:6] */
+#define ARIZONA_FLL1_CLK_REF_SRC_MASK 0x000F /* FLL1_CLK_REF_SRC - [3:0] */
+#define ARIZONA_FLL1_CLK_REF_SRC_SHIFT 0 /* FLL1_CLK_REF_SRC - [3:0] */
+#define ARIZONA_FLL1_CLK_REF_SRC_WIDTH 4 /* FLL1_CLK_REF_SRC - [3:0] */
+
+/*
+ * R375 (0x177) - FLL1 Loop Filter Test 1
+ */
+#define ARIZONA_FLL1_FRC_INTEG_UPD 0x8000 /* FLL1_FRC_INTEG_UPD */
+#define ARIZONA_FLL1_FRC_INTEG_UPD_MASK 0x8000 /* FLL1_FRC_INTEG_UPD */
+#define ARIZONA_FLL1_FRC_INTEG_UPD_SHIFT 15 /* FLL1_FRC_INTEG_UPD */
+#define ARIZONA_FLL1_FRC_INTEG_UPD_WIDTH 1 /* FLL1_FRC_INTEG_UPD */
+#define ARIZONA_FLL1_FRC_INTEG_VAL_MASK 0x0FFF /* FLL1_FRC_INTEG_VAL - [11:0] */
+#define ARIZONA_FLL1_FRC_INTEG_VAL_SHIFT 0 /* FLL1_FRC_INTEG_VAL - [11:0] */
+#define ARIZONA_FLL1_FRC_INTEG_VAL_WIDTH 12 /* FLL1_FRC_INTEG_VAL - [11:0] */
+
+/*
+ * R377 (0x179) - FLL1 Control 7
+ */
+#define ARIZONA_FLL1_GAIN_MASK 0x003c /* FLL1_GAIN */
+#define ARIZONA_FLL1_GAIN_SHIFT 2 /* FLL1_GAIN */
+#define ARIZONA_FLL1_GAIN_WIDTH 4 /* FLL1_GAIN */
+
+/*
+ * R385 (0x181) - FLL1 Synchroniser 1
+ */
+#define ARIZONA_FLL1_SYNC_ENA 0x0001 /* FLL1_SYNC_ENA */
+#define ARIZONA_FLL1_SYNC_ENA_MASK 0x0001 /* FLL1_SYNC_ENA */
+#define ARIZONA_FLL1_SYNC_ENA_SHIFT 0 /* FLL1_SYNC_ENA */
+#define ARIZONA_FLL1_SYNC_ENA_WIDTH 1 /* FLL1_SYNC_ENA */
+
+/*
+ * R386 (0x182) - FLL1 Synchroniser 2
+ */
+#define ARIZONA_FLL1_SYNC_N_MASK 0x03FF /* FLL1_SYNC_N - [9:0] */
+#define ARIZONA_FLL1_SYNC_N_SHIFT 0 /* FLL1_SYNC_N - [9:0] */
+#define ARIZONA_FLL1_SYNC_N_WIDTH 10 /* FLL1_SYNC_N - [9:0] */
+
+/*
+ * R387 (0x183) - FLL1 Synchroniser 3
+ */
+#define ARIZONA_FLL1_SYNC_THETA_MASK 0xFFFF /* FLL1_SYNC_THETA - [15:0] */
+#define ARIZONA_FLL1_SYNC_THETA_SHIFT 0 /* FLL1_SYNC_THETA - [15:0] */
+#define ARIZONA_FLL1_SYNC_THETA_WIDTH 16 /* FLL1_SYNC_THETA - [15:0] */
+
+/*
+ * R388 (0x184) - FLL1 Synchroniser 4
+ */
+#define ARIZONA_FLL1_SYNC_LAMBDA_MASK 0xFFFF /* FLL1_SYNC_LAMBDA - [15:0] */
+#define ARIZONA_FLL1_SYNC_LAMBDA_SHIFT 0 /* FLL1_SYNC_LAMBDA - [15:0] */
+#define ARIZONA_FLL1_SYNC_LAMBDA_WIDTH 16 /* FLL1_SYNC_LAMBDA - [15:0] */
+
+/*
+ * R389 (0x185) - FLL1 Synchroniser 5
+ */
+#define ARIZONA_FLL1_SYNC_FRATIO_MASK 0x0700 /* FLL1_SYNC_FRATIO - [10:8] */
+#define ARIZONA_FLL1_SYNC_FRATIO_SHIFT 8 /* FLL1_SYNC_FRATIO - [10:8] */
+#define ARIZONA_FLL1_SYNC_FRATIO_WIDTH 3 /* FLL1_SYNC_FRATIO - [10:8] */
+
+/*
+ * R390 (0x186) - FLL1 Synchroniser 6
+ */
+#define ARIZONA_FLL1_CLK_SYNC_DIV_MASK 0x00C0 /* FLL1_CLK_SYNC_DIV - [7:6] */
+#define ARIZONA_FLL1_CLK_SYNC_DIV_SHIFT 6 /* FLL1_CLK_SYNC_DIV - [7:6] */
+#define ARIZONA_FLL1_CLK_SYNC_DIV_WIDTH 2 /* FLL1_CLK_SYNC_DIV - [7:6] */
+#define ARIZONA_FLL1_CLK_SYNC_SRC_MASK 0x000F /* FLL1_CLK_SYNC_SRC - [3:0] */
+#define ARIZONA_FLL1_CLK_SYNC_SRC_SHIFT 0 /* FLL1_CLK_SYNC_SRC - [3:0] */
+#define ARIZONA_FLL1_CLK_SYNC_SRC_WIDTH 4 /* FLL1_CLK_SYNC_SRC - [3:0] */
+
+/*
+ * R391 (0x187) - FLL1 Synchroniser 7
+ */
+#define ARIZONA_FLL1_SYNC_GAIN_MASK 0x003c /* FLL1_SYNC_GAIN */
+#define ARIZONA_FLL1_SYNC_GAIN_SHIFT 2 /* FLL1_SYNC_GAIN */
+#define ARIZONA_FLL1_SYNC_GAIN_WIDTH 4 /* FLL1_SYNC_GAIN */
+#define ARIZONA_FLL1_SYNC_BW 0x0001 /* FLL1_SYNC_BW */
+#define ARIZONA_FLL1_SYNC_BW_MASK 0x0001 /* FLL1_SYNC_BW */
+#define ARIZONA_FLL1_SYNC_BW_SHIFT 0 /* FLL1_SYNC_BW */
+#define ARIZONA_FLL1_SYNC_BW_WIDTH 1 /* FLL1_SYNC_BW */
+
+/*
+ * R393 (0x189) - FLL1 Spread Spectrum
+ */
+#define ARIZONA_FLL1_SS_AMPL_MASK 0x0030 /* FLL1_SS_AMPL - [5:4] */
+#define ARIZONA_FLL1_SS_AMPL_SHIFT 4 /* FLL1_SS_AMPL - [5:4] */
+#define ARIZONA_FLL1_SS_AMPL_WIDTH 2 /* FLL1_SS_AMPL - [5:4] */
+#define ARIZONA_FLL1_SS_FREQ_MASK 0x000C /* FLL1_SS_FREQ - [3:2] */
+#define ARIZONA_FLL1_SS_FREQ_SHIFT 2 /* FLL1_SS_FREQ - [3:2] */
+#define ARIZONA_FLL1_SS_FREQ_WIDTH 2 /* FLL1_SS_FREQ - [3:2] */
+#define ARIZONA_FLL1_SS_SEL_MASK 0x0003 /* FLL1_SS_SEL - [1:0] */
+#define ARIZONA_FLL1_SS_SEL_SHIFT 0 /* FLL1_SS_SEL - [1:0] */
+#define ARIZONA_FLL1_SS_SEL_WIDTH 2 /* FLL1_SS_SEL - [1:0] */
+
+/*
+ * R394 (0x18A) - FLL1 GPIO Clock
+ */
+#define ARIZONA_FLL1_GPDIV_MASK 0x00FE /* FLL1_GPDIV - [7:1] */
+#define ARIZONA_FLL1_GPDIV_SHIFT 1 /* FLL1_GPDIV - [7:1] */
+#define ARIZONA_FLL1_GPDIV_WIDTH 7 /* FLL1_GPDIV - [7:1] */
+#define ARIZONA_FLL1_GPDIV_ENA 0x0001 /* FLL1_GPDIV_ENA */
+#define ARIZONA_FLL1_GPDIV_ENA_MASK 0x0001 /* FLL1_GPDIV_ENA */
+#define ARIZONA_FLL1_GPDIV_ENA_SHIFT 0 /* FLL1_GPDIV_ENA */
+#define ARIZONA_FLL1_GPDIV_ENA_WIDTH 1 /* FLL1_GPDIV_ENA */
+
+/*
+ * R401 (0x191) - FLL2 Control 1
+ */
+#define ARIZONA_FLL2_FREERUN 0x0002 /* FLL2_FREERUN */
+#define ARIZONA_FLL2_FREERUN_MASK 0x0002 /* FLL2_FREERUN */
+#define ARIZONA_FLL2_FREERUN_SHIFT 1 /* FLL2_FREERUN */
+#define ARIZONA_FLL2_FREERUN_WIDTH 1 /* FLL2_FREERUN */
+#define ARIZONA_FLL2_ENA 0x0001 /* FLL2_ENA */
+#define ARIZONA_FLL2_ENA_MASK 0x0001 /* FLL2_ENA */
+#define ARIZONA_FLL2_ENA_SHIFT 0 /* FLL2_ENA */
+#define ARIZONA_FLL2_ENA_WIDTH 1 /* FLL2_ENA */
+
+/*
+ * R402 (0x192) - FLL2 Control 2
+ */
+#define ARIZONA_FLL2_CTRL_UPD 0x8000 /* FLL2_CTRL_UPD */
+#define ARIZONA_FLL2_CTRL_UPD_MASK 0x8000 /* FLL2_CTRL_UPD */
+#define ARIZONA_FLL2_CTRL_UPD_SHIFT 15 /* FLL2_CTRL_UPD */
+#define ARIZONA_FLL2_CTRL_UPD_WIDTH 1 /* FLL2_CTRL_UPD */
+#define ARIZONA_FLL2_N_MASK 0x03FF /* FLL2_N - [9:0] */
+#define ARIZONA_FLL2_N_SHIFT 0 /* FLL2_N - [9:0] */
+#define ARIZONA_FLL2_N_WIDTH 10 /* FLL2_N - [9:0] */
+
+/*
+ * R403 (0x193) - FLL2 Control 3
+ */
+#define ARIZONA_FLL2_THETA_MASK 0xFFFF /* FLL2_THETA - [15:0] */
+#define ARIZONA_FLL2_THETA_SHIFT 0 /* FLL2_THETA - [15:0] */
+#define ARIZONA_FLL2_THETA_WIDTH 16 /* FLL2_THETA - [15:0] */
+
+/*
+ * R404 (0x194) - FLL2 Control 4
+ */
+#define ARIZONA_FLL2_LAMBDA_MASK 0xFFFF /* FLL2_LAMBDA - [15:0] */
+#define ARIZONA_FLL2_LAMBDA_SHIFT 0 /* FLL2_LAMBDA - [15:0] */
+#define ARIZONA_FLL2_LAMBDA_WIDTH 16 /* FLL2_LAMBDA - [15:0] */
+
+/*
+ * R405 (0x195) - FLL2 Control 5
+ */
+#define ARIZONA_FLL2_FRATIO_MASK 0x0700 /* FLL2_FRATIO - [10:8] */
+#define ARIZONA_FLL2_FRATIO_SHIFT 8 /* FLL2_FRATIO - [10:8] */
+#define ARIZONA_FLL2_FRATIO_WIDTH 3 /* FLL2_FRATIO - [10:8] */
+#define ARIZONA_FLL2_OUTDIV_MASK 0x000E /* FLL2_OUTDIV - [3:1] */
+#define ARIZONA_FLL2_OUTDIV_SHIFT 1 /* FLL2_OUTDIV - [3:1] */
+#define ARIZONA_FLL2_OUTDIV_WIDTH 3 /* FLL2_OUTDIV - [3:1] */
+
+/*
+ * R406 (0x196) - FLL2 Control 6
+ */
+#define ARIZONA_FLL2_CLK_REF_DIV_MASK 0x00C0 /* FLL2_CLK_REF_DIV - [7:6] */
+#define ARIZONA_FLL2_CLK_REF_DIV_SHIFT 6 /* FLL2_CLK_REF_DIV - [7:6] */
+#define ARIZONA_FLL2_CLK_REF_DIV_WIDTH 2 /* FLL2_CLK_REF_DIV - [7:6] */
+#define ARIZONA_FLL2_CLK_REF_SRC_MASK 0x000F /* FLL2_CLK_REF_SRC - [3:0] */
+#define ARIZONA_FLL2_CLK_REF_SRC_SHIFT 0 /* FLL2_CLK_REF_SRC - [3:0] */
+#define ARIZONA_FLL2_CLK_REF_SRC_WIDTH 4 /* FLL2_CLK_REF_SRC - [3:0] */
+
+/*
+ * R407 (0x197) - FLL2 Loop Filter Test 1
+ */
+#define ARIZONA_FLL2_FRC_INTEG_UPD 0x8000 /* FLL2_FRC_INTEG_UPD */
+#define ARIZONA_FLL2_FRC_INTEG_UPD_MASK 0x8000 /* FLL2_FRC_INTEG_UPD */
+#define ARIZONA_FLL2_FRC_INTEG_UPD_SHIFT 15 /* FLL2_FRC_INTEG_UPD */
+#define ARIZONA_FLL2_FRC_INTEG_UPD_WIDTH 1 /* FLL2_FRC_INTEG_UPD */
+#define ARIZONA_FLL2_FRC_INTEG_VAL_MASK 0x0FFF /* FLL2_FRC_INTEG_VAL - [11:0] */
+#define ARIZONA_FLL2_FRC_INTEG_VAL_SHIFT 0 /* FLL2_FRC_INTEG_VAL - [11:0] */
+#define ARIZONA_FLL2_FRC_INTEG_VAL_WIDTH 12 /* FLL2_FRC_INTEG_VAL - [11:0] */
+
+/*
+ * R409 (0x199) - FLL2 Control 7
+ */
+#define ARIZONA_FLL2_GAIN_MASK 0x003c /* FLL2_GAIN */
+#define ARIZONA_FLL2_GAIN_SHIFT 2 /* FLL2_GAIN */
+#define ARIZONA_FLL2_GAIN_WIDTH 4 /* FLL2_GAIN */
+
+/*
+ * R417 (0x1A1) - FLL2 Synchroniser 1
+ */
+#define ARIZONA_FLL2_SYNC_ENA 0x0001 /* FLL2_SYNC_ENA */
+#define ARIZONA_FLL2_SYNC_ENA_MASK 0x0001 /* FLL2_SYNC_ENA */
+#define ARIZONA_FLL2_SYNC_ENA_SHIFT 0 /* FLL2_SYNC_ENA */
+#define ARIZONA_FLL2_SYNC_ENA_WIDTH 1 /* FLL2_SYNC_ENA */
+
+/*
+ * R418 (0x1A2) - FLL2 Synchroniser 2
+ */
+#define ARIZONA_FLL2_SYNC_N_MASK 0x03FF /* FLL2_SYNC_N - [9:0] */
+#define ARIZONA_FLL2_SYNC_N_SHIFT 0 /* FLL2_SYNC_N - [9:0] */
+#define ARIZONA_FLL2_SYNC_N_WIDTH 10 /* FLL2_SYNC_N - [9:0] */
+
+/*
+ * R419 (0x1A3) - FLL2 Synchroniser 3
+ */
+#define ARIZONA_FLL2_SYNC_THETA_MASK 0xFFFF /* FLL2_SYNC_THETA - [15:0] */
+#define ARIZONA_FLL2_SYNC_THETA_SHIFT 0 /* FLL2_SYNC_THETA - [15:0] */
+#define ARIZONA_FLL2_SYNC_THETA_WIDTH 16 /* FLL2_SYNC_THETA - [15:0] */
+
+/*
+ * R420 (0x1A4) - FLL2 Synchroniser 4
+ */
+#define ARIZONA_FLL2_SYNC_LAMBDA_MASK 0xFFFF /* FLL2_SYNC_LAMBDA - [15:0] */
+#define ARIZONA_FLL2_SYNC_LAMBDA_SHIFT 0 /* FLL2_SYNC_LAMBDA - [15:0] */
+#define ARIZONA_FLL2_SYNC_LAMBDA_WIDTH 16 /* FLL2_SYNC_LAMBDA - [15:0] */
+
+/*
+ * R421 (0x1A5) - FLL2 Synchroniser 5
+ */
+#define ARIZONA_FLL2_SYNC_FRATIO_MASK 0x0700 /* FLL2_SYNC_FRATIO - [10:8] */
+#define ARIZONA_FLL2_SYNC_FRATIO_SHIFT 8 /* FLL2_SYNC_FRATIO - [10:8] */
+#define ARIZONA_FLL2_SYNC_FRATIO_WIDTH 3 /* FLL2_SYNC_FRATIO - [10:8] */
+
+/*
+ * R422 (0x1A6) - FLL2 Synchroniser 6
+ */
+#define ARIZONA_FLL2_CLK_SYNC_DIV_MASK 0x00C0 /* FLL2_CLK_SYNC_DIV - [7:6] */
+#define ARIZONA_FLL2_CLK_SYNC_DIV_SHIFT 6 /* FLL2_CLK_SYNC_DIV - [7:6] */
+#define ARIZONA_FLL2_CLK_SYNC_DIV_WIDTH 2 /* FLL2_CLK_SYNC_DIV - [7:6] */
+#define ARIZONA_FLL2_CLK_SYNC_SRC_MASK 0x000F /* FLL2_CLK_SYNC_SRC - [3:0] */
+#define ARIZONA_FLL2_CLK_SYNC_SRC_SHIFT 0 /* FLL2_CLK_SYNC_SRC - [3:0] */
+#define ARIZONA_FLL2_CLK_SYNC_SRC_WIDTH 4 /* FLL2_CLK_SYNC_SRC - [3:0] */
+
+/*
+ * R423 (0x1A7) - FLL2 Synchroniser 7
+ */
+#define ARIZONA_FLL2_SYNC_GAIN_MASK 0x003c /* FLL2_SYNC_GAIN */
+#define ARIZONA_FLL2_SYNC_GAIN_SHIFT 2 /* FLL2_SYNC_GAIN */
+#define ARIZONA_FLL2_SYNC_GAIN_WIDTH 4 /* FLL2_SYNC_GAIN */
+#define ARIZONA_FLL2_SYNC_BW 0x0001 /* FLL2_SYNC_BW */
+#define ARIZONA_FLL2_SYNC_BW_MASK 0x0001 /* FLL2_SYNC_BW */
+#define ARIZONA_FLL2_SYNC_BW_SHIFT 0 /* FLL2_SYNC_BW */
+#define ARIZONA_FLL2_SYNC_BW_WIDTH 1 /* FLL2_SYNC_BW */
+
+/*
+ * R425 (0x1A9) - FLL2 Spread Spectrum
+ */
+#define ARIZONA_FLL2_SS_AMPL_MASK 0x0030 /* FLL2_SS_AMPL - [5:4] */
+#define ARIZONA_FLL2_SS_AMPL_SHIFT 4 /* FLL2_SS_AMPL - [5:4] */
+#define ARIZONA_FLL2_SS_AMPL_WIDTH 2 /* FLL2_SS_AMPL - [5:4] */
+#define ARIZONA_FLL2_SS_FREQ_MASK 0x000C /* FLL2_SS_FREQ - [3:2] */
+#define ARIZONA_FLL2_SS_FREQ_SHIFT 2 /* FLL2_SS_FREQ - [3:2] */
+#define ARIZONA_FLL2_SS_FREQ_WIDTH 2 /* FLL2_SS_FREQ - [3:2] */
+#define ARIZONA_FLL2_SS_SEL_MASK 0x0003 /* FLL2_SS_SEL - [1:0] */
+#define ARIZONA_FLL2_SS_SEL_SHIFT 0 /* FLL2_SS_SEL - [1:0] */
+#define ARIZONA_FLL2_SS_SEL_WIDTH 2 /* FLL2_SS_SEL - [1:0] */
+
+/*
+ * R426 (0x1AA) - FLL2 GPIO Clock
+ */
+#define ARIZONA_FLL2_GPDIV_MASK 0x00FE /* FLL2_GPDIV - [7:1] */
+#define ARIZONA_FLL2_GPDIV_SHIFT 1 /* FLL2_GPDIV - [7:1] */
+#define ARIZONA_FLL2_GPDIV_WIDTH 7 /* FLL2_GPDIV - [7:1] */
+#define ARIZONA_FLL2_GPDIV_ENA 0x0001 /* FLL2_GPDIV_ENA */
+#define ARIZONA_FLL2_GPDIV_ENA_MASK 0x0001 /* FLL2_GPDIV_ENA */
+#define ARIZONA_FLL2_GPDIV_ENA_SHIFT 0 /* FLL2_GPDIV_ENA */
+#define ARIZONA_FLL2_GPDIV_ENA_WIDTH 1 /* FLL2_GPDIV_ENA */
+
+/*
+ * R512 (0x200) - Mic Charge Pump 1
+ */
+#define ARIZONA_CPMIC_DISCH 0x0004 /* CPMIC_DISCH */
+#define ARIZONA_CPMIC_DISCH_MASK 0x0004 /* CPMIC_DISCH */
+#define ARIZONA_CPMIC_DISCH_SHIFT 2 /* CPMIC_DISCH */
+#define ARIZONA_CPMIC_DISCH_WIDTH 1 /* CPMIC_DISCH */
+#define ARIZONA_CPMIC_BYPASS 0x0002 /* CPMIC_BYPASS */
+#define ARIZONA_CPMIC_BYPASS_MASK 0x0002 /* CPMIC_BYPASS */
+#define ARIZONA_CPMIC_BYPASS_SHIFT 1 /* CPMIC_BYPASS */
+#define ARIZONA_CPMIC_BYPASS_WIDTH 1 /* CPMIC_BYPASS */
+#define ARIZONA_CPMIC_ENA 0x0001 /* CPMIC_ENA */
+#define ARIZONA_CPMIC_ENA_MASK 0x0001 /* CPMIC_ENA */
+#define ARIZONA_CPMIC_ENA_SHIFT 0 /* CPMIC_ENA */
+#define ARIZONA_CPMIC_ENA_WIDTH 1 /* CPMIC_ENA */
+
+/*
+ * R528 (0x210) - LDO1 Control 1
+ */
+#define ARIZONA_LDO1_VSEL_MASK 0x07E0 /* LDO1_VSEL - [10:5] */
+#define ARIZONA_LDO1_VSEL_SHIFT 5 /* LDO1_VSEL - [10:5] */
+#define ARIZONA_LDO1_VSEL_WIDTH 6 /* LDO1_VSEL - [10:5] */
+#define ARIZONA_LDO1_FAST 0x0010 /* LDO1_FAST */
+#define ARIZONA_LDO1_FAST_MASK 0x0010 /* LDO1_FAST */
+#define ARIZONA_LDO1_FAST_SHIFT 4 /* LDO1_FAST */
+#define ARIZONA_LDO1_FAST_WIDTH 1 /* LDO1_FAST */
+#define ARIZONA_LDO1_DISCH 0x0004 /* LDO1_DISCH */
+#define ARIZONA_LDO1_DISCH_MASK 0x0004 /* LDO1_DISCH */
+#define ARIZONA_LDO1_DISCH_SHIFT 2 /* LDO1_DISCH */
+#define ARIZONA_LDO1_DISCH_WIDTH 1 /* LDO1_DISCH */
+#define ARIZONA_LDO1_BYPASS 0x0002 /* LDO1_BYPASS */
+#define ARIZONA_LDO1_BYPASS_MASK 0x0002 /* LDO1_BYPASS */
+#define ARIZONA_LDO1_BYPASS_SHIFT 1 /* LDO1_BYPASS */
+#define ARIZONA_LDO1_BYPASS_WIDTH 1 /* LDO1_BYPASS */
+#define ARIZONA_LDO1_ENA 0x0001 /* LDO1_ENA */
+#define ARIZONA_LDO1_ENA_MASK 0x0001 /* LDO1_ENA */
+#define ARIZONA_LDO1_ENA_SHIFT 0 /* LDO1_ENA */
+#define ARIZONA_LDO1_ENA_WIDTH 1 /* LDO1_ENA */
+
+/*
+ * R530 (0x212) - LDO1 Control 2
+ */
+#define ARIZONA_LDO1_HI_PWR 0x0001 /* LDO1_HI_PWR */
+#define ARIZONA_LDO1_HI_PWR_SHIFT 0 /* LDO1_HI_PWR */
+#define ARIZONA_LDO1_HI_PWR_WIDTH 1 /* LDO1_HI_PWR */
+
+/*
+ * R531 (0x213) - LDO2 Control 1
+ */
+#define ARIZONA_LDO2_VSEL_MASK 0x07E0 /* LDO2_VSEL - [10:5] */
+#define ARIZONA_LDO2_VSEL_SHIFT 5 /* LDO2_VSEL - [10:5] */
+#define ARIZONA_LDO2_VSEL_WIDTH 6 /* LDO2_VSEL - [10:5] */
+#define ARIZONA_LDO2_FAST 0x0010 /* LDO2_FAST */
+#define ARIZONA_LDO2_FAST_MASK 0x0010 /* LDO2_FAST */
+#define ARIZONA_LDO2_FAST_SHIFT 4 /* LDO2_FAST */
+#define ARIZONA_LDO2_FAST_WIDTH 1 /* LDO2_FAST */
+#define ARIZONA_LDO2_DISCH 0x0004 /* LDO2_DISCH */
+#define ARIZONA_LDO2_DISCH_MASK 0x0004 /* LDO2_DISCH */
+#define ARIZONA_LDO2_DISCH_SHIFT 2 /* LDO2_DISCH */
+#define ARIZONA_LDO2_DISCH_WIDTH 1 /* LDO2_DISCH */
+#define ARIZONA_LDO2_BYPASS 0x0002 /* LDO2_BYPASS */
+#define ARIZONA_LDO2_BYPASS_MASK 0x0002 /* LDO2_BYPASS */
+#define ARIZONA_LDO2_BYPASS_SHIFT 1 /* LDO2_BYPASS */
+#define ARIZONA_LDO2_BYPASS_WIDTH 1 /* LDO2_BYPASS */
+#define ARIZONA_LDO2_ENA 0x0001 /* LDO2_ENA */
+#define ARIZONA_LDO2_ENA_MASK 0x0001 /* LDO2_ENA */
+#define ARIZONA_LDO2_ENA_SHIFT 0 /* LDO2_ENA */
+#define ARIZONA_LDO2_ENA_WIDTH 1 /* LDO2_ENA */
+
+/*
+ * R536 (0x218) - Mic Bias Ctrl 1
+ */
+#define ARIZONA_MICB1_EXT_CAP 0x8000 /* MICB1_EXT_CAP */
+#define ARIZONA_MICB1_EXT_CAP_MASK 0x8000 /* MICB1_EXT_CAP */
+#define ARIZONA_MICB1_EXT_CAP_SHIFT 15 /* MICB1_EXT_CAP */
+#define ARIZONA_MICB1_EXT_CAP_WIDTH 1 /* MICB1_EXT_CAP */
+#define ARIZONA_MICB1_LVL_MASK 0x01E0 /* MICB1_LVL - [8:5] */
+#define ARIZONA_MICB1_LVL_SHIFT 5 /* MICB1_LVL - [8:5] */
+#define ARIZONA_MICB1_LVL_WIDTH 4 /* MICB1_LVL - [8:5] */
+#define ARIZONA_MICB1_FAST 0x0010 /* MICB1_FAST */
+#define ARIZONA_MICB1_FAST_MASK 0x0010 /* MICB1_FAST */
+#define ARIZONA_MICB1_FAST_SHIFT 4 /* MICB1_FAST */
+#define ARIZONA_MICB1_FAST_WIDTH 1 /* MICB1_FAST */
+#define ARIZONA_MICB1_RATE 0x0008 /* MICB1_RATE */
+#define ARIZONA_MICB1_RATE_MASK 0x0008 /* MICB1_RATE */
+#define ARIZONA_MICB1_RATE_SHIFT 3 /* MICB1_RATE */
+#define ARIZONA_MICB1_RATE_WIDTH 1 /* MICB1_RATE */
+#define ARIZONA_MICB1_DISCH 0x0004 /* MICB1_DISCH */
+#define ARIZONA_MICB1_DISCH_MASK 0x0004 /* MICB1_DISCH */
+#define ARIZONA_MICB1_DISCH_SHIFT 2 /* MICB1_DISCH */
+#define ARIZONA_MICB1_DISCH_WIDTH 1 /* MICB1_DISCH */
+#define ARIZONA_MICB1_BYPASS 0x0002 /* MICB1_BYPASS */
+#define ARIZONA_MICB1_BYPASS_MASK 0x0002 /* MICB1_BYPASS */
+#define ARIZONA_MICB1_BYPASS_SHIFT 1 /* MICB1_BYPASS */
+#define ARIZONA_MICB1_BYPASS_WIDTH 1 /* MICB1_BYPASS */
+#define ARIZONA_MICB1_ENA 0x0001 /* MICB1_ENA */
+#define ARIZONA_MICB1_ENA_MASK 0x0001 /* MICB1_ENA */
+#define ARIZONA_MICB1_ENA_SHIFT 0 /* MICB1_ENA */
+#define ARIZONA_MICB1_ENA_WIDTH 1 /* MICB1_ENA */
+
+/*
+ * R537 (0x219) - Mic Bias Ctrl 2
+ */
+#define ARIZONA_MICB2_EXT_CAP 0x8000 /* MICB2_EXT_CAP */
+#define ARIZONA_MICB2_EXT_CAP_MASK 0x8000 /* MICB2_EXT_CAP */
+#define ARIZONA_MICB2_EXT_CAP_SHIFT 15 /* MICB2_EXT_CAP */
+#define ARIZONA_MICB2_EXT_CAP_WIDTH 1 /* MICB2_EXT_CAP */
+#define ARIZONA_MICB2_LVL_MASK 0x01E0 /* MICB2_LVL - [8:5] */
+#define ARIZONA_MICB2_LVL_SHIFT 5 /* MICB2_LVL - [8:5] */
+#define ARIZONA_MICB2_LVL_WIDTH 4 /* MICB2_LVL - [8:5] */
+#define ARIZONA_MICB2_FAST 0x0010 /* MICB2_FAST */
+#define ARIZONA_MICB2_FAST_MASK 0x0010 /* MICB2_FAST */
+#define ARIZONA_MICB2_FAST_SHIFT 4 /* MICB2_FAST */
+#define ARIZONA_MICB2_FAST_WIDTH 1 /* MICB2_FAST */
+#define ARIZONA_MICB2_RATE 0x0008 /* MICB2_RATE */
+#define ARIZONA_MICB2_RATE_MASK 0x0008 /* MICB2_RATE */
+#define ARIZONA_MICB2_RATE_SHIFT 3 /* MICB2_RATE */
+#define ARIZONA_MICB2_RATE_WIDTH 1 /* MICB2_RATE */
+#define ARIZONA_MICB2_DISCH 0x0004 /* MICB2_DISCH */
+#define ARIZONA_MICB2_DISCH_MASK 0x0004 /* MICB2_DISCH */
+#define ARIZONA_MICB2_DISCH_SHIFT 2 /* MICB2_DISCH */
+#define ARIZONA_MICB2_DISCH_WIDTH 1 /* MICB2_DISCH */
+#define ARIZONA_MICB2_BYPASS 0x0002 /* MICB2_BYPASS */
+#define ARIZONA_MICB2_BYPASS_MASK 0x0002 /* MICB2_BYPASS */
+#define ARIZONA_MICB2_BYPASS_SHIFT 1 /* MICB2_BYPASS */
+#define ARIZONA_MICB2_BYPASS_WIDTH 1 /* MICB2_BYPASS */
+#define ARIZONA_MICB2_ENA 0x0001 /* MICB2_ENA */
+#define ARIZONA_MICB2_ENA_MASK 0x0001 /* MICB2_ENA */
+#define ARIZONA_MICB2_ENA_SHIFT 0 /* MICB2_ENA */
+#define ARIZONA_MICB2_ENA_WIDTH 1 /* MICB2_ENA */
+
+/*
+ * R538 (0x21A) - Mic Bias Ctrl 3
+ */
+#define ARIZONA_MICB3_EXT_CAP 0x8000 /* MICB3_EXT_CAP */
+#define ARIZONA_MICB3_EXT_CAP_MASK 0x8000 /* MICB3_EXT_CAP */
+#define ARIZONA_MICB3_EXT_CAP_SHIFT 15 /* MICB3_EXT_CAP */
+#define ARIZONA_MICB3_EXT_CAP_WIDTH 1 /* MICB3_EXT_CAP */
+#define ARIZONA_MICB3_LVL_MASK 0x01E0 /* MICB3_LVL - [8:5] */
+#define ARIZONA_MICB3_LVL_SHIFT 5 /* MICB3_LVL - [8:5] */
+#define ARIZONA_MICB3_LVL_WIDTH 4 /* MICB3_LVL - [8:5] */
+#define ARIZONA_MICB3_FAST 0x0010 /* MICB3_FAST */
+#define ARIZONA_MICB3_FAST_MASK 0x0010 /* MICB3_FAST */
+#define ARIZONA_MICB3_FAST_SHIFT 4 /* MICB3_FAST */
+#define ARIZONA_MICB3_FAST_WIDTH 1 /* MICB3_FAST */
+#define ARIZONA_MICB3_RATE 0x0008 /* MICB3_RATE */
+#define ARIZONA_MICB3_RATE_MASK 0x0008 /* MICB3_RATE */
+#define ARIZONA_MICB3_RATE_SHIFT 3 /* MICB3_RATE */
+#define ARIZONA_MICB3_RATE_WIDTH 1 /* MICB3_RATE */
+#define ARIZONA_MICB3_DISCH 0x0004 /* MICB3_DISCH */
+#define ARIZONA_MICB3_DISCH_MASK 0x0004 /* MICB3_DISCH */
+#define ARIZONA_MICB3_DISCH_SHIFT 2 /* MICB3_DISCH */
+#define ARIZONA_MICB3_DISCH_WIDTH 1 /* MICB3_DISCH */
+#define ARIZONA_MICB3_BYPASS 0x0002 /* MICB3_BYPASS */
+#define ARIZONA_MICB3_BYPASS_MASK 0x0002 /* MICB3_BYPASS */
+#define ARIZONA_MICB3_BYPASS_SHIFT 1 /* MICB3_BYPASS */
+#define ARIZONA_MICB3_BYPASS_WIDTH 1 /* MICB3_BYPASS */
+#define ARIZONA_MICB3_ENA 0x0001 /* MICB3_ENA */
+#define ARIZONA_MICB3_ENA_MASK 0x0001 /* MICB3_ENA */
+#define ARIZONA_MICB3_ENA_SHIFT 0 /* MICB3_ENA */
+#define ARIZONA_MICB3_ENA_WIDTH 1 /* MICB3_ENA */
+
+/*
+ * R549 (0x225) - HP Ctrl 1L
+ */
+#define ARIZONA_RMV_SHRT_HP1L 0x4000 /* RMV_SHRT_HP1L */
+#define ARIZONA_RMV_SHRT_HP1L_MASK 0x4000 /* RMV_SHRT_HP1L */
+#define ARIZONA_RMV_SHRT_HP1L_SHIFT 14 /* RMV_SHRT_HP1L */
+#define ARIZONA_RMV_SHRT_HP1L_WIDTH 1 /* RMV_SHRT_HP1L */
+#define ARIZONA_HP1L_FLWR 0x0004 /* HP1L_FLWR */
+#define ARIZONA_HP1L_FLWR_MASK 0x0004 /* HP1L_FLWR */
+#define ARIZONA_HP1L_FLWR_SHIFT 2 /* HP1L_FLWR */
+#define ARIZONA_HP1L_FLWR_WIDTH 1 /* HP1L_FLWR */
+#define ARIZONA_HP1L_SHRTI 0x0002 /* HP1L_SHRTI */
+#define ARIZONA_HP1L_SHRTI_MASK 0x0002 /* HP1L_SHRTI */
+#define ARIZONA_HP1L_SHRTI_SHIFT 1 /* HP1L_SHRTI */
+#define ARIZONA_HP1L_SHRTI_WIDTH 1 /* HP1L_SHRTI */
+#define ARIZONA_HP1L_SHRTO 0x0001 /* HP1L_SHRTO */
+#define ARIZONA_HP1L_SHRTO_MASK 0x0001 /* HP1L_SHRTO */
+#define ARIZONA_HP1L_SHRTO_SHIFT 0 /* HP1L_SHRTO */
+#define ARIZONA_HP1L_SHRTO_WIDTH 1 /* HP1L_SHRTO */
+
+/*
+ * R550 (0x226) - HP Ctrl 1R
+ */
+#define ARIZONA_RMV_SHRT_HP1R 0x4000 /* RMV_SHRT_HP1R */
+#define ARIZONA_RMV_SHRT_HP1R_MASK 0x4000 /* RMV_SHRT_HP1R */
+#define ARIZONA_RMV_SHRT_HP1R_SHIFT 14 /* RMV_SHRT_HP1R */
+#define ARIZONA_RMV_SHRT_HP1R_WIDTH 1 /* RMV_SHRT_HP1R */
+#define ARIZONA_HP1R_FLWR 0x0004 /* HP1R_FLWR */
+#define ARIZONA_HP1R_FLWR_MASK 0x0004 /* HP1R_FLWR */
+#define ARIZONA_HP1R_FLWR_SHIFT 2 /* HP1R_FLWR */
+#define ARIZONA_HP1R_FLWR_WIDTH 1 /* HP1R_FLWR */
+#define ARIZONA_HP1R_SHRTI 0x0002 /* HP1R_SHRTI */
+#define ARIZONA_HP1R_SHRTI_MASK 0x0002 /* HP1R_SHRTI */
+#define ARIZONA_HP1R_SHRTI_SHIFT 1 /* HP1R_SHRTI */
+#define ARIZONA_HP1R_SHRTI_WIDTH 1 /* HP1R_SHRTI */
+#define ARIZONA_HP1R_SHRTO 0x0001 /* HP1R_SHRTO */
+#define ARIZONA_HP1R_SHRTO_MASK 0x0001 /* HP1R_SHRTO */
+#define ARIZONA_HP1R_SHRTO_SHIFT 0 /* HP1R_SHRTO */
+#define ARIZONA_HP1R_SHRTO_WIDTH 1 /* HP1R_SHRTO */
+
+/*
+ * R659 (0x293) - Accessory Detect Mode 1
+ */
+#define ARIZONA_ACCDET_SRC 0x2000 /* ACCDET_SRC */
+#define ARIZONA_ACCDET_SRC_MASK 0x2000 /* ACCDET_SRC */
+#define ARIZONA_ACCDET_SRC_SHIFT 13 /* ACCDET_SRC */
+#define ARIZONA_ACCDET_SRC_WIDTH 1 /* ACCDET_SRC */
+#define ARIZONA_ACCDET_MODE_MASK 0x0003 /* ACCDET_MODE - [1:0] */
+#define ARIZONA_ACCDET_MODE_SHIFT 0 /* ACCDET_MODE - [1:0] */
+#define ARIZONA_ACCDET_MODE_WIDTH 2 /* ACCDET_MODE - [1:0] */
+
+/*
+ * R667 (0x29B) - Headphone Detect 1
+ */
+#define ARIZONA_HP_IMPEDANCE_RANGE_MASK 0x0600 /* HP_IMPEDANCE_RANGE - [10:9] */
+#define ARIZONA_HP_IMPEDANCE_RANGE_SHIFT 9 /* HP_IMPEDANCE_RANGE - [10:9] */
+#define ARIZONA_HP_IMPEDANCE_RANGE_WIDTH 2 /* HP_IMPEDANCE_RANGE - [10:9] */
+#define ARIZONA_HP_STEP_SIZE 0x0100 /* HP_STEP_SIZE */
+#define ARIZONA_HP_STEP_SIZE_MASK 0x0100 /* HP_STEP_SIZE */
+#define ARIZONA_HP_STEP_SIZE_SHIFT 8 /* HP_STEP_SIZE */
+#define ARIZONA_HP_STEP_SIZE_WIDTH 1 /* HP_STEP_SIZE */
+#define ARIZONA_HP_HOLDTIME_MASK 0x00E0 /* HP_HOLDTIME - [7:5] */
+#define ARIZONA_HP_HOLDTIME_SHIFT 5 /* HP_HOLDTIME - [7:5] */
+#define ARIZONA_HP_HOLDTIME_WIDTH 3 /* HP_HOLDTIME - [7:5] */
+#define ARIZONA_HP_CLK_DIV_MASK 0x0018 /* HP_CLK_DIV - [4:3] */
+#define ARIZONA_HP_CLK_DIV_SHIFT 3 /* HP_CLK_DIV - [4:3] */
+#define ARIZONA_HP_CLK_DIV_WIDTH 2 /* HP_CLK_DIV - [4:3] */
+#define ARIZONA_HP_IDAC_STEER 0x0004 /* HP_IDAC_STEER */
+#define ARIZONA_HP_IDAC_STEER_MASK 0x0004 /* HP_IDAC_STEER */
+#define ARIZONA_HP_IDAC_STEER_SHIFT 2 /* HP_IDAC_STEER */
+#define ARIZONA_HP_IDAC_STEER_WIDTH 1 /* HP_IDAC_STEER */
+#define ARIZONA_HP_RATE 0x0002 /* HP_RATE */
+#define ARIZONA_HP_RATE_MASK 0x0002 /* HP_RATE */
+#define ARIZONA_HP_RATE_SHIFT 1 /* HP_RATE */
+#define ARIZONA_HP_RATE_WIDTH 1 /* HP_RATE */
+#define ARIZONA_HP_POLL 0x0001 /* HP_POLL */
+#define ARIZONA_HP_POLL_MASK 0x0001 /* HP_POLL */
+#define ARIZONA_HP_POLL_SHIFT 0 /* HP_POLL */
+#define ARIZONA_HP_POLL_WIDTH 1 /* HP_POLL */
+
+/*
+ * R668 (0x29C) - Headphone Detect 2
+ */
+#define ARIZONA_HP_DONE 0x0080 /* HP_DONE */
+#define ARIZONA_HP_DONE_MASK 0x0080 /* HP_DONE */
+#define ARIZONA_HP_DONE_SHIFT 7 /* HP_DONE */
+#define ARIZONA_HP_DONE_WIDTH 1 /* HP_DONE */
+#define ARIZONA_HP_LVL_MASK 0x007F /* HP_LVL - [6:0] */
+#define ARIZONA_HP_LVL_SHIFT 0 /* HP_LVL - [6:0] */
+#define ARIZONA_HP_LVL_WIDTH 7 /* HP_LVL - [6:0] */
+
+#define ARIZONA_HP_DONE_B 0x8000 /* HP_DONE */
+#define ARIZONA_HP_DONE_B_MASK 0x8000 /* HP_DONE */
+#define ARIZONA_HP_DONE_B_SHIFT 15 /* HP_DONE */
+#define ARIZONA_HP_DONE_B_WIDTH 1 /* HP_DONE */
+#define ARIZONA_HP_LVL_B_MASK 0x7FFF /* HP_LVL - [14:0] */
+#define ARIZONA_HP_LVL_B_SHIFT 0 /* HP_LVL - [14:0] */
+#define ARIZONA_HP_LVL_B_WIDTH 15 /* HP_LVL - [14:0] */
+
+/*
+ * R674 (0x2A2) - MICD clamp control
+ */
+#define ARIZONA_MICD_CLAMP_MODE_MASK 0x000F /* MICD_CLAMP_MODE - [3:0] */
+#define ARIZONA_MICD_CLAMP_MODE_SHIFT 0 /* MICD_CLAMP_MODE - [3:0] */
+#define ARIZONA_MICD_CLAMP_MODE_WIDTH 4 /* MICD_CLAMP_MODE - [3:0] */
+
+/*
+ * R675 (0x2A3) - Mic Detect 1
+ */
+#define ARIZONA_MICD_BIAS_STARTTIME_MASK 0xF000 /* MICD_BIAS_STARTTIME - [15:12] */
+#define ARIZONA_MICD_BIAS_STARTTIME_SHIFT 12 /* MICD_BIAS_STARTTIME - [15:12] */
+#define ARIZONA_MICD_BIAS_STARTTIME_WIDTH 4 /* MICD_BIAS_STARTTIME - [15:12] */
+#define ARIZONA_MICD_RATE_MASK 0x0F00 /* MICD_RATE - [11:8] */
+#define ARIZONA_MICD_RATE_SHIFT 8 /* MICD_RATE - [11:8] */
+#define ARIZONA_MICD_RATE_WIDTH 4 /* MICD_RATE - [11:8] */
+#define ARIZONA_MICD_BIAS_SRC_MASK 0x0030 /* MICD_BIAS_SRC - [5:4] */
+#define ARIZONA_MICD_BIAS_SRC_SHIFT 4 /* MICD_BIAS_SRC - [5:4] */
+#define ARIZONA_MICD_BIAS_SRC_WIDTH 2 /* MICD_BIAS_SRC - [5:4] */
+#define ARIZONA_MICD_DBTIME 0x0002 /* MICD_DBTIME */
+#define ARIZONA_MICD_DBTIME_MASK 0x0002 /* MICD_DBTIME */
+#define ARIZONA_MICD_DBTIME_SHIFT 1 /* MICD_DBTIME */
+#define ARIZONA_MICD_DBTIME_WIDTH 1 /* MICD_DBTIME */
+#define ARIZONA_MICD_ENA 0x0001 /* MICD_ENA */
+#define ARIZONA_MICD_ENA_MASK 0x0001 /* MICD_ENA */
+#define ARIZONA_MICD_ENA_SHIFT 0 /* MICD_ENA */
+#define ARIZONA_MICD_ENA_WIDTH 1 /* MICD_ENA */
+
+/*
+ * R676 (0x2A4) - Mic Detect 2
+ */
+#define ARIZONA_MICD_LVL_SEL_MASK 0x00FF /* MICD_LVL_SEL - [7:0] */
+#define ARIZONA_MICD_LVL_SEL_SHIFT 0 /* MICD_LVL_SEL - [7:0] */
+#define ARIZONA_MICD_LVL_SEL_WIDTH 8 /* MICD_LVL_SEL - [7:0] */
+
+/*
+ * R677 (0x2A5) - Mic Detect 3
+ */
+#define ARIZONA_MICD_LVL_0 0x0004 /* MICD_LVL - [2] */
+#define ARIZONA_MICD_LVL_1 0x0008 /* MICD_LVL - [3] */
+#define ARIZONA_MICD_LVL_2 0x0010 /* MICD_LVL - [4] */
+#define ARIZONA_MICD_LVL_3 0x0020 /* MICD_LVL - [5] */
+#define ARIZONA_MICD_LVL_4 0x0040 /* MICD_LVL - [6] */
+#define ARIZONA_MICD_LVL_5 0x0080 /* MICD_LVL - [7] */
+#define ARIZONA_MICD_LVL_6 0x0100 /* MICD_LVL - [8] */
+#define ARIZONA_MICD_LVL_7 0x0200 /* MICD_LVL - [9] */
+#define ARIZONA_MICD_LVL_8 0x0400 /* MICD_LVL - [10] */
+#define ARIZONA_MICD_LVL_MASK 0x07FC /* MICD_LVL - [10:2] */
+#define ARIZONA_MICD_LVL_SHIFT 2 /* MICD_LVL - [10:2] */
+#define ARIZONA_MICD_LVL_WIDTH 9 /* MICD_LVL - [10:2] */
+#define ARIZONA_MICD_VALID 0x0002 /* MICD_VALID */
+#define ARIZONA_MICD_VALID_MASK 0x0002 /* MICD_VALID */
+#define ARIZONA_MICD_VALID_SHIFT 1 /* MICD_VALID */
+#define ARIZONA_MICD_VALID_WIDTH 1 /* MICD_VALID */
+#define ARIZONA_MICD_STS 0x0001 /* MICD_STS */
+#define ARIZONA_MICD_STS_MASK 0x0001 /* MICD_STS */
+#define ARIZONA_MICD_STS_SHIFT 0 /* MICD_STS */
+#define ARIZONA_MICD_STS_WIDTH 1 /* MICD_STS */
+
+/*
+ * R707 (0x2C3) - Mic noise mix control 1
+ */
+#define ARIZONA_MICMUTE_RATE_MASK 0x7800 /* MICMUTE_RATE - [14:11] */
+#define ARIZONA_MICMUTE_RATE_SHIFT 11 /* MICMUTE_RATE - [14:11] */
+#define ARIZONA_MICMUTE_RATE_WIDTH 4 /* MICMUTE_RATE - [14:11] */
+#define ARIZONA_MICMUTE_MIX_ENA 0x0040 /* MICMUTE_MIX_ENA */
+#define ARIZONA_MICMUTE_MIX_ENA_MASK 0x0040 /* MICMUTE_MIX_ENA */
+#define ARIZONA_MICMUTE_MIX_ENA_SHIFT 6 /* MICMUTE_MIX_ENA */
+#define ARIZONA_MICMUTE_MIX_ENA_WIDTH 1 /* MICMUTE_MIX_ENA */
+
+/*
+ * R715 (0x2CB) - Isolation control
+ */
+#define ARIZONA_ISOLATE_DCVDD1 0x0001 /* ISOLATE_DCVDD1 */
+#define ARIZONA_ISOLATE_DCVDD1_MASK 0x0001 /* ISOLATE_DCVDD1 */
+#define ARIZONA_ISOLATE_DCVDD1_SHIFT 0 /* ISOLATE_DCVDD1 */
+#define ARIZONA_ISOLATE_DCVDD1_WIDTH 1 /* ISOLATE_DCVDD1 */
+
+/*
+ * R723 (0x2D3) - Jack detect analogue
+ */
+#define ARIZONA_JD2_ENA 0x0002 /* JD2_ENA */
+#define ARIZONA_JD2_ENA_MASK 0x0002 /* JD2_ENA */
+#define ARIZONA_JD2_ENA_SHIFT 1 /* JD2_ENA */
+#define ARIZONA_JD2_ENA_WIDTH 1 /* JD2_ENA */
+#define ARIZONA_JD1_ENA 0x0001 /* JD1_ENA */
+#define ARIZONA_JD1_ENA_MASK 0x0001 /* JD1_ENA */
+#define ARIZONA_JD1_ENA_SHIFT 0 /* JD1_ENA */
+#define ARIZONA_JD1_ENA_WIDTH 1 /* JD1_ENA */
+
+/*
+ * R768 (0x300) - Input Enables
+ */
+#define ARIZONA_IN4L_ENA 0x0080 /* IN4L_ENA */
+#define ARIZONA_IN4L_ENA_MASK 0x0080 /* IN4L_ENA */
+#define ARIZONA_IN4L_ENA_SHIFT 7 /* IN4L_ENA */
+#define ARIZONA_IN4L_ENA_WIDTH 1 /* IN4L_ENA */
+#define ARIZONA_IN4R_ENA 0x0040 /* IN4R_ENA */
+#define ARIZONA_IN4R_ENA_MASK 0x0040 /* IN4R_ENA */
+#define ARIZONA_IN4R_ENA_SHIFT 6 /* IN4R_ENA */
+#define ARIZONA_IN4R_ENA_WIDTH 1 /* IN4R_ENA */
+#define ARIZONA_IN3L_ENA 0x0020 /* IN3L_ENA */
+#define ARIZONA_IN3L_ENA_MASK 0x0020 /* IN3L_ENA */
+#define ARIZONA_IN3L_ENA_SHIFT 5 /* IN3L_ENA */
+#define ARIZONA_IN3L_ENA_WIDTH 1 /* IN3L_ENA */
+#define ARIZONA_IN3R_ENA 0x0010 /* IN3R_ENA */
+#define ARIZONA_IN3R_ENA_MASK 0x0010 /* IN3R_ENA */
+#define ARIZONA_IN3R_ENA_SHIFT 4 /* IN3R_ENA */
+#define ARIZONA_IN3R_ENA_WIDTH 1 /* IN3R_ENA */
+#define ARIZONA_IN2L_ENA 0x0008 /* IN2L_ENA */
+#define ARIZONA_IN2L_ENA_MASK 0x0008 /* IN2L_ENA */
+#define ARIZONA_IN2L_ENA_SHIFT 3 /* IN2L_ENA */
+#define ARIZONA_IN2L_ENA_WIDTH 1 /* IN2L_ENA */
+#define ARIZONA_IN2R_ENA 0x0004 /* IN2R_ENA */
+#define ARIZONA_IN2R_ENA_MASK 0x0004 /* IN2R_ENA */
+#define ARIZONA_IN2R_ENA_SHIFT 2 /* IN2R_ENA */
+#define ARIZONA_IN2R_ENA_WIDTH 1 /* IN2R_ENA */
+#define ARIZONA_IN1L_ENA 0x0002 /* IN1L_ENA */
+#define ARIZONA_IN1L_ENA_MASK 0x0002 /* IN1L_ENA */
+#define ARIZONA_IN1L_ENA_SHIFT 1 /* IN1L_ENA */
+#define ARIZONA_IN1L_ENA_WIDTH 1 /* IN1L_ENA */
+#define ARIZONA_IN1R_ENA 0x0001 /* IN1R_ENA */
+#define ARIZONA_IN1R_ENA_MASK 0x0001 /* IN1R_ENA */
+#define ARIZONA_IN1R_ENA_SHIFT 0 /* IN1R_ENA */
+#define ARIZONA_IN1R_ENA_WIDTH 1 /* IN1R_ENA */
+
+/*
+ * R776 (0x308) - Input Rate
+ */
+#define ARIZONA_IN_RATE_MASK 0x7800 /* IN_RATE - [14:11] */
+#define ARIZONA_IN_RATE_SHIFT 11 /* IN_RATE - [14:11] */
+#define ARIZONA_IN_RATE_WIDTH 4 /* IN_RATE - [14:11] */
+
+/*
+ * R777 (0x309) - Input Volume Ramp
+ */
+#define ARIZONA_IN_VD_RAMP_MASK 0x0070 /* IN_VD_RAMP - [6:4] */
+#define ARIZONA_IN_VD_RAMP_SHIFT 4 /* IN_VD_RAMP - [6:4] */
+#define ARIZONA_IN_VD_RAMP_WIDTH 3 /* IN_VD_RAMP - [6:4] */
+#define ARIZONA_IN_VI_RAMP_MASK 0x0007 /* IN_VI_RAMP - [2:0] */
+#define ARIZONA_IN_VI_RAMP_SHIFT 0 /* IN_VI_RAMP - [2:0] */
+#define ARIZONA_IN_VI_RAMP_WIDTH 3 /* IN_VI_RAMP - [2:0] */
+
+/*
+ * R780 (0x30C) - HPF Control
+ */
+#define ARIZONA_IN_HPF_CUT_MASK 0x0007 /* IN_HPF_CUT [2:0] */
+#define ARIZONA_IN_HPF_CUT_SHIFT 0 /* IN_HPF_CUT [2:0] */
+#define ARIZONA_IN_HPF_CUT_WIDTH 3 /* IN_HPF_CUT [2:0] */
+
+/*
+ * R784 (0x310) - IN1L Control
+ */
+#define ARIZONA_IN1L_HPF_MASK 0x8000 /* IN1L_HPF - [15] */
+#define ARIZONA_IN1L_HPF_SHIFT 15 /* IN1L_HPF - [15] */
+#define ARIZONA_IN1L_HPF_WIDTH 1 /* IN1L_HPF - [15] */
+#define ARIZONA_IN1_OSR_MASK 0x6000 /* IN1_OSR - [14:13] */
+#define ARIZONA_IN1_OSR_SHIFT 13 /* IN1_OSR - [14:13] */
+#define ARIZONA_IN1_OSR_WIDTH 2 /* IN1_OSR - [14:13] */
+#define ARIZONA_IN1_DMIC_SUP_MASK 0x1800 /* IN1_DMIC_SUP - [12:11] */
+#define ARIZONA_IN1_DMIC_SUP_SHIFT 11 /* IN1_DMIC_SUP - [12:11] */
+#define ARIZONA_IN1_DMIC_SUP_WIDTH 2 /* IN1_DMIC_SUP - [12:11] */
+#define ARIZONA_IN1_MODE_MASK 0x0600 /* IN1_MODE - [10:9] */
+#define ARIZONA_IN1_MODE_SHIFT 9 /* IN1_MODE - [10:9] */
+#define ARIZONA_IN1_MODE_WIDTH 2 /* IN1_MODE - [10:9] */
+#define ARIZONA_IN1L_PGA_VOL_MASK 0x00FE /* IN1L_PGA_VOL - [7:1] */
+#define ARIZONA_IN1L_PGA_VOL_SHIFT 1 /* IN1L_PGA_VOL - [7:1] */
+#define ARIZONA_IN1L_PGA_VOL_WIDTH 7 /* IN1L_PGA_VOL - [7:1] */
+
+/*
+ * R785 (0x311) - ADC Digital Volume 1L
+ */
+#define ARIZONA_IN_VU 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
+#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */
+#define ARIZONA_IN1L_MUTE 0x0100 /* IN1L_MUTE */
+#define ARIZONA_IN1L_MUTE_MASK 0x0100 /* IN1L_MUTE */
+#define ARIZONA_IN1L_MUTE_SHIFT 8 /* IN1L_MUTE */
+#define ARIZONA_IN1L_MUTE_WIDTH 1 /* IN1L_MUTE */
+#define ARIZONA_IN1L_DIG_VOL_MASK 0x00FF /* IN1L_DIG_VOL - [7:0] */
+#define ARIZONA_IN1L_DIG_VOL_SHIFT 0 /* IN1L_DIG_VOL - [7:0] */
+#define ARIZONA_IN1L_DIG_VOL_WIDTH 8 /* IN1L_DIG_VOL - [7:0] */
+
+/*
+ * R786 (0x312) - DMIC1L Control
+ */
+#define ARIZONA_IN1_DMICL_DLY_MASK 0x003F /* IN1_DMICL_DLY - [5:0] */
+#define ARIZONA_IN1_DMICL_DLY_SHIFT 0 /* IN1_DMICL_DLY - [5:0] */
+#define ARIZONA_IN1_DMICL_DLY_WIDTH 6 /* IN1_DMICL_DLY - [5:0] */
+
+/*
+ * R788 (0x314) - IN1R Control
+ */
+#define ARIZONA_IN1R_HPF_MASK 0x8000 /* IN1R_HPF - [15] */
+#define ARIZONA_IN1R_HPF_SHIFT 15 /* IN1R_HPF - [15] */
+#define ARIZONA_IN1R_HPF_WIDTH 1 /* IN1R_HPF - [15] */
+#define ARIZONA_IN1R_PGA_VOL_MASK 0x00FE /* IN1R_PGA_VOL - [7:1] */
+#define ARIZONA_IN1R_PGA_VOL_SHIFT 1 /* IN1R_PGA_VOL - [7:1] */
+#define ARIZONA_IN1R_PGA_VOL_WIDTH 7 /* IN1R_PGA_VOL - [7:1] */
+
+/*
+ * R789 (0x315) - ADC Digital Volume 1R
+ */
+#define ARIZONA_IN_VU 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
+#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */
+#define ARIZONA_IN1R_MUTE 0x0100 /* IN1R_MUTE */
+#define ARIZONA_IN1R_MUTE_MASK 0x0100 /* IN1R_MUTE */
+#define ARIZONA_IN1R_MUTE_SHIFT 8 /* IN1R_MUTE */
+#define ARIZONA_IN1R_MUTE_WIDTH 1 /* IN1R_MUTE */
+#define ARIZONA_IN1R_DIG_VOL_MASK 0x00FF /* IN1R_DIG_VOL - [7:0] */
+#define ARIZONA_IN1R_DIG_VOL_SHIFT 0 /* IN1R_DIG_VOL - [7:0] */
+#define ARIZONA_IN1R_DIG_VOL_WIDTH 8 /* IN1R_DIG_VOL - [7:0] */
+
+/*
+ * R790 (0x316) - DMIC1R Control
+ */
+#define ARIZONA_IN1_DMICR_DLY_MASK 0x003F /* IN1_DMICR_DLY - [5:0] */
+#define ARIZONA_IN1_DMICR_DLY_SHIFT 0 /* IN1_DMICR_DLY - [5:0] */
+#define ARIZONA_IN1_DMICR_DLY_WIDTH 6 /* IN1_DMICR_DLY - [5:0] */
+
+/*
+ * R792 (0x318) - IN2L Control
+ */
+#define ARIZONA_IN2L_HPF_MASK 0x8000 /* IN2L_HPF - [15] */
+#define ARIZONA_IN2L_HPF_SHIFT 15 /* IN2L_HPF - [15] */
+#define ARIZONA_IN2L_HPF_WIDTH 1 /* IN2L_HPF - [15] */
+#define ARIZONA_IN2_OSR_MASK 0x6000 /* IN2_OSR - [14:13] */
+#define ARIZONA_IN2_OSR_SHIFT 13 /* IN2_OSR - [14:13] */
+#define ARIZONA_IN2_OSR_WIDTH 2 /* IN2_OSR - [14:13] */
+#define ARIZONA_IN2_DMIC_SUP_MASK 0x1800 /* IN2_DMIC_SUP - [12:11] */
+#define ARIZONA_IN2_DMIC_SUP_SHIFT 11 /* IN2_DMIC_SUP - [12:11] */
+#define ARIZONA_IN2_DMIC_SUP_WIDTH 2 /* IN2_DMIC_SUP - [12:11] */
+#define ARIZONA_IN2_MODE_MASK 0x0600 /* IN2_MODE - [10:9] */
+#define ARIZONA_IN2_MODE_SHIFT 9 /* IN2_MODE - [10:9] */
+#define ARIZONA_IN2_MODE_WIDTH 2 /* IN2_MODE - [10:9] */
+#define ARIZONA_IN2L_PGA_VOL_MASK 0x00FE /* IN2L_PGA_VOL - [7:1] */
+#define ARIZONA_IN2L_PGA_VOL_SHIFT 1 /* IN2L_PGA_VOL - [7:1] */
+#define ARIZONA_IN2L_PGA_VOL_WIDTH 7 /* IN2L_PGA_VOL - [7:1] */
+
+/*
+ * R793 (0x319) - ADC Digital Volume 2L
+ */
+#define ARIZONA_IN_VU 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
+#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */
+#define ARIZONA_IN2L_MUTE 0x0100 /* IN2L_MUTE */
+#define ARIZONA_IN2L_MUTE_MASK 0x0100 /* IN2L_MUTE */
+#define ARIZONA_IN2L_MUTE_SHIFT 8 /* IN2L_MUTE */
+#define ARIZONA_IN2L_MUTE_WIDTH 1 /* IN2L_MUTE */
+#define ARIZONA_IN2L_DIG_VOL_MASK 0x00FF /* IN2L_DIG_VOL - [7:0] */
+#define ARIZONA_IN2L_DIG_VOL_SHIFT 0 /* IN2L_DIG_VOL - [7:0] */
+#define ARIZONA_IN2L_DIG_VOL_WIDTH 8 /* IN2L_DIG_VOL - [7:0] */
+
+/*
+ * R794 (0x31A) - DMIC2L Control
+ */
+#define ARIZONA_IN2_DMICL_DLY_MASK 0x003F /* IN2_DMICL_DLY - [5:0] */
+#define ARIZONA_IN2_DMICL_DLY_SHIFT 0 /* IN2_DMICL_DLY - [5:0] */
+#define ARIZONA_IN2_DMICL_DLY_WIDTH 6 /* IN2_DMICL_DLY - [5:0] */
+
+/*
+ * R796 (0x31C) - IN2R Control
+ */
+#define ARIZONA_IN2R_HPF_MASK 0x8000 /* IN2R_HPF - [15] */
+#define ARIZONA_IN2R_HPF_SHIFT 15 /* IN2R_HPF - [15] */
+#define ARIZONA_IN2R_HPF_WIDTH 1 /* IN2R_HPF - [15] */
+#define ARIZONA_IN2R_PGA_VOL_MASK 0x00FE /* IN2R_PGA_VOL - [7:1] */
+#define ARIZONA_IN2R_PGA_VOL_SHIFT 1 /* IN2R_PGA_VOL - [7:1] */
+#define ARIZONA_IN2R_PGA_VOL_WIDTH 7 /* IN2R_PGA_VOL - [7:1] */
+
+/*
+ * R797 (0x31D) - ADC Digital Volume 2R
+ */
+#define ARIZONA_IN_VU 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
+#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */
+#define ARIZONA_IN2R_MUTE 0x0100 /* IN2R_MUTE */
+#define ARIZONA_IN2R_MUTE_MASK 0x0100 /* IN2R_MUTE */
+#define ARIZONA_IN2R_MUTE_SHIFT 8 /* IN2R_MUTE */
+#define ARIZONA_IN2R_MUTE_WIDTH 1 /* IN2R_MUTE */
+#define ARIZONA_IN2R_DIG_VOL_MASK 0x00FF /* IN2R_DIG_VOL - [7:0] */
+#define ARIZONA_IN2R_DIG_VOL_SHIFT 0 /* IN2R_DIG_VOL - [7:0] */
+#define ARIZONA_IN2R_DIG_VOL_WIDTH 8 /* IN2R_DIG_VOL - [7:0] */
+
+/*
+ * R798 (0x31E) - DMIC2R Control
+ */
+#define ARIZONA_IN2_DMICR_DLY_MASK 0x003F /* IN2_DMICR_DLY - [5:0] */
+#define ARIZONA_IN2_DMICR_DLY_SHIFT 0 /* IN2_DMICR_DLY - [5:0] */
+#define ARIZONA_IN2_DMICR_DLY_WIDTH 6 /* IN2_DMICR_DLY - [5:0] */
+
+/*
+ * R800 (0x320) - IN3L Control
+ */
+#define ARIZONA_IN3L_HPF_MASK 0x8000 /* IN3L_HPF - [15] */
+#define ARIZONA_IN3L_HPF_SHIFT 15 /* IN3L_HPF - [15] */
+#define ARIZONA_IN3L_HPF_WIDTH 1 /* IN3L_HPF - [15] */
+#define ARIZONA_IN3_OSR_MASK 0x6000 /* IN3_OSR - [14:13] */
+#define ARIZONA_IN3_OSR_SHIFT 13 /* IN3_OSR - [14:13] */
+#define ARIZONA_IN3_OSR_WIDTH 2 /* IN3_OSR - [14:13] */
+#define ARIZONA_IN3_DMIC_SUP_MASK 0x1800 /* IN3_DMIC_SUP - [12:11] */
+#define ARIZONA_IN3_DMIC_SUP_SHIFT 11 /* IN3_DMIC_SUP - [12:11] */
+#define ARIZONA_IN3_DMIC_SUP_WIDTH 2 /* IN3_DMIC_SUP - [12:11] */
+#define ARIZONA_IN3_MODE_MASK 0x0600 /* IN3_MODE - [10:9] */
+#define ARIZONA_IN3_MODE_SHIFT 9 /* IN3_MODE - [10:9] */
+#define ARIZONA_IN3_MODE_WIDTH 2 /* IN3_MODE - [10:9] */
+#define ARIZONA_IN3L_PGA_VOL_MASK 0x00FE /* IN3L_PGA_VOL - [7:1] */
+#define ARIZONA_IN3L_PGA_VOL_SHIFT 1 /* IN3L_PGA_VOL - [7:1] */
+#define ARIZONA_IN3L_PGA_VOL_WIDTH 7 /* IN3L_PGA_VOL - [7:1] */
+
+/*
+ * R801 (0x321) - ADC Digital Volume 3L
+ */
+#define ARIZONA_IN_VU 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
+#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */
+#define ARIZONA_IN3L_MUTE 0x0100 /* IN3L_MUTE */
+#define ARIZONA_IN3L_MUTE_MASK 0x0100 /* IN3L_MUTE */
+#define ARIZONA_IN3L_MUTE_SHIFT 8 /* IN3L_MUTE */
+#define ARIZONA_IN3L_MUTE_WIDTH 1 /* IN3L_MUTE */
+#define ARIZONA_IN3L_DIG_VOL_MASK 0x00FF /* IN3L_DIG_VOL - [7:0] */
+#define ARIZONA_IN3L_DIG_VOL_SHIFT 0 /* IN3L_DIG_VOL - [7:0] */
+#define ARIZONA_IN3L_DIG_VOL_WIDTH 8 /* IN3L_DIG_VOL - [7:0] */
+
+/*
+ * R802 (0x322) - DMIC3L Control
+ */
+#define ARIZONA_IN3_DMICL_DLY_MASK 0x003F /* IN3_DMICL_DLY - [5:0] */
+#define ARIZONA_IN3_DMICL_DLY_SHIFT 0 /* IN3_DMICL_DLY - [5:0] */
+#define ARIZONA_IN3_DMICL_DLY_WIDTH 6 /* IN3_DMICL_DLY - [5:0] */
+
+/*
+ * R804 (0x324) - IN3R Control
+ */
+#define ARIZONA_IN3R_HPF_MASK 0x8000 /* IN3R_HPF - [15] */
+#define ARIZONA_IN3R_HPF_SHIFT 15 /* IN3R_HPF - [15] */
+#define ARIZONA_IN3R_HPF_WIDTH 1 /* IN3R_HPF - [15] */
+#define ARIZONA_IN3R_PGA_VOL_MASK 0x00FE /* IN3R_PGA_VOL - [7:1] */
+#define ARIZONA_IN3R_PGA_VOL_SHIFT 1 /* IN3R_PGA_VOL - [7:1] */
+#define ARIZONA_IN3R_PGA_VOL_WIDTH 7 /* IN3R_PGA_VOL - [7:1] */
+
+/*
+ * R805 (0x325) - ADC Digital Volume 3R
+ */
+#define ARIZONA_IN_VU 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
+#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */
+#define ARIZONA_IN3R_MUTE 0x0100 /* IN3R_MUTE */
+#define ARIZONA_IN3R_MUTE_MASK 0x0100 /* IN3R_MUTE */
+#define ARIZONA_IN3R_MUTE_SHIFT 8 /* IN3R_MUTE */
+#define ARIZONA_IN3R_MUTE_WIDTH 1 /* IN3R_MUTE */
+#define ARIZONA_IN3R_DIG_VOL_MASK 0x00FF /* IN3R_DIG_VOL - [7:0] */
+#define ARIZONA_IN3R_DIG_VOL_SHIFT 0 /* IN3R_DIG_VOL - [7:0] */
+#define ARIZONA_IN3R_DIG_VOL_WIDTH 8 /* IN3R_DIG_VOL - [7:0] */
+
+/*
+ * R806 (0x326) - DMIC3R Control
+ */
+#define ARIZONA_IN3_DMICR_DLY_MASK 0x003F /* IN3_DMICR_DLY - [5:0] */
+#define ARIZONA_IN3_DMICR_DLY_SHIFT 0 /* IN3_DMICR_DLY - [5:0] */
+#define ARIZONA_IN3_DMICR_DLY_WIDTH 6 /* IN3_DMICR_DLY - [5:0] */
+
+/*
+ * R808 (0x328) - IN4 Control
+ */
+#define ARIZONA_IN4L_HPF_MASK 0x8000 /* IN4L_HPF - [15] */
+#define ARIZONA_IN4L_HPF_SHIFT 15 /* IN4L_HPF - [15] */
+#define ARIZONA_IN4L_HPF_WIDTH 1 /* IN4L_HPF - [15] */
+#define ARIZONA_IN4_OSR_MASK 0x6000 /* IN4_OSR - [14:13] */
+#define ARIZONA_IN4_OSR_SHIFT 13 /* IN4_OSR - [14:13] */
+#define ARIZONA_IN4_OSR_WIDTH 2 /* IN4_OSR - [14:13] */
+#define ARIZONA_IN4_DMIC_SUP_MASK 0x1800 /* IN4_DMIC_SUP - [12:11] */
+#define ARIZONA_IN4_DMIC_SUP_SHIFT 11 /* IN4_DMIC_SUP - [12:11] */
+#define ARIZONA_IN4_DMIC_SUP_WIDTH 2 /* IN4_DMIC_SUP - [12:11] */
+
+/*
+ * R809 (0x329) - ADC Digital Volume 4L
+ */
+#define ARIZONA_IN_VU 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
+#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */
+#define ARIZONA_IN4L_MUTE 0x0100 /* IN4L_MUTE */
+#define ARIZONA_IN4L_MUTE_MASK 0x0100 /* IN4L_MUTE */
+#define ARIZONA_IN4L_MUTE_SHIFT 8 /* IN4L_MUTE */
+#define ARIZONA_IN4L_MUTE_WIDTH 1 /* IN4L_MUTE */
+#define ARIZONA_IN4L_DIG_VOL_MASK 0x00FF /* IN4L_DIG_VOL - [7:0] */
+#define ARIZONA_IN4L_DIG_VOL_SHIFT 0 /* IN4L_DIG_VOL - [7:0] */
+#define ARIZONA_IN4L_DIG_VOL_WIDTH 8 /* IN4L_DIG_VOL - [7:0] */
+
+/*
+ * R810 (0x32A) - DMIC4L Control
+ */
+#define ARIZONA_IN4L_DMIC_DLY_MASK 0x003F /* IN4L_DMIC_DLY - [5:0] */
+#define ARIZONA_IN4L_DMIC_DLY_SHIFT 0 /* IN4L_DMIC_DLY - [5:0] */
+#define ARIZONA_IN4L_DMIC_DLY_WIDTH 6 /* IN4L_DMIC_DLY - [5:0] */
+
+/*
+ * R812 (0x32C) - IN4R Control
+ */
+#define ARIZONA_IN4R_HPF_MASK 0x8000 /* IN4R_HPF - [15] */
+#define ARIZONA_IN4R_HPF_SHIFT 15 /* IN4R_HPF - [15] */
+#define ARIZONA_IN4R_HPF_WIDTH 1 /* IN4R_HPF - [15] */
+
+/*
+ * R813 (0x32D) - ADC Digital Volume 4R
+ */
+#define ARIZONA_IN_VU 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_MASK 0x0200 /* IN_VU */
+#define ARIZONA_IN_VU_SHIFT 9 /* IN_VU */
+#define ARIZONA_IN_VU_WIDTH 1 /* IN_VU */
+#define ARIZONA_IN4R_MUTE 0x0100 /* IN4R_MUTE */
+#define ARIZONA_IN4R_MUTE_MASK 0x0100 /* IN4R_MUTE */
+#define ARIZONA_IN4R_MUTE_SHIFT 8 /* IN4R_MUTE */
+#define ARIZONA_IN4R_MUTE_WIDTH 1 /* IN4R_MUTE */
+#define ARIZONA_IN4R_DIG_VOL_MASK 0x00FF /* IN4R_DIG_VOL - [7:0] */
+#define ARIZONA_IN4R_DIG_VOL_SHIFT 0 /* IN4R_DIG_VOL - [7:0] */
+#define ARIZONA_IN4R_DIG_VOL_WIDTH 8 /* IN4R_DIG_VOL - [7:0] */
+
+/*
+ * R814 (0x32E) - DMIC4R Control
+ */
+#define ARIZONA_IN4R_DMIC_DLY_MASK 0x003F /* IN4R_DMIC_DLY - [5:0] */
+#define ARIZONA_IN4R_DMIC_DLY_SHIFT 0 /* IN4R_DMIC_DLY - [5:0] */
+#define ARIZONA_IN4R_DMIC_DLY_WIDTH 6 /* IN4R_DMIC_DLY - [5:0] */
+
+/*
+ * R1024 (0x400) - Output Enables 1
+ */
+#define ARIZONA_OUT6L_ENA 0x0800 /* OUT6L_ENA */
+#define ARIZONA_OUT6L_ENA_MASK 0x0800 /* OUT6L_ENA */
+#define ARIZONA_OUT6L_ENA_SHIFT 11 /* OUT6L_ENA */
+#define ARIZONA_OUT6L_ENA_WIDTH 1 /* OUT6L_ENA */
+#define ARIZONA_OUT6R_ENA 0x0400 /* OUT6R_ENA */
+#define ARIZONA_OUT6R_ENA_MASK 0x0400 /* OUT6R_ENA */
+#define ARIZONA_OUT6R_ENA_SHIFT 10 /* OUT6R_ENA */
+#define ARIZONA_OUT6R_ENA_WIDTH 1 /* OUT6R_ENA */
+#define ARIZONA_OUT5L_ENA 0x0200 /* OUT5L_ENA */
+#define ARIZONA_OUT5L_ENA_MASK 0x0200 /* OUT5L_ENA */
+#define ARIZONA_OUT5L_ENA_SHIFT 9 /* OUT5L_ENA */
+#define ARIZONA_OUT5L_ENA_WIDTH 1 /* OUT5L_ENA */
+#define ARIZONA_OUT5R_ENA 0x0100 /* OUT5R_ENA */
+#define ARIZONA_OUT5R_ENA_MASK 0x0100 /* OUT5R_ENA */
+#define ARIZONA_OUT5R_ENA_SHIFT 8 /* OUT5R_ENA */
+#define ARIZONA_OUT5R_ENA_WIDTH 1 /* OUT5R_ENA */
+#define ARIZONA_OUT4L_ENA 0x0080 /* OUT4L_ENA */
+#define ARIZONA_OUT4L_ENA_MASK 0x0080 /* OUT4L_ENA */
+#define ARIZONA_OUT4L_ENA_SHIFT 7 /* OUT4L_ENA */
+#define ARIZONA_OUT4L_ENA_WIDTH 1 /* OUT4L_ENA */
+#define ARIZONA_OUT4R_ENA 0x0040 /* OUT4R_ENA */
+#define ARIZONA_OUT4R_ENA_MASK 0x0040 /* OUT4R_ENA */
+#define ARIZONA_OUT4R_ENA_SHIFT 6 /* OUT4R_ENA */
+#define ARIZONA_OUT4R_ENA_WIDTH 1 /* OUT4R_ENA */
+#define ARIZONA_OUT3L_ENA 0x0020 /* OUT3L_ENA */
+#define ARIZONA_OUT3L_ENA_MASK 0x0020 /* OUT3L_ENA */
+#define ARIZONA_OUT3L_ENA_SHIFT 5 /* OUT3L_ENA */
+#define ARIZONA_OUT3L_ENA_WIDTH 1 /* OUT3L_ENA */
+#define ARIZONA_OUT3R_ENA 0x0010 /* OUT3R_ENA */
+#define ARIZONA_OUT3R_ENA_MASK 0x0010 /* OUT3R_ENA */
+#define ARIZONA_OUT3R_ENA_SHIFT 4 /* OUT3R_ENA */
+#define ARIZONA_OUT3R_ENA_WIDTH 1 /* OUT3R_ENA */
+#define ARIZONA_OUT2L_ENA 0x0008 /* OUT2L_ENA */
+#define ARIZONA_OUT2L_ENA_MASK 0x0008 /* OUT2L_ENA */
+#define ARIZONA_OUT2L_ENA_SHIFT 3 /* OUT2L_ENA */
+#define ARIZONA_OUT2L_ENA_WIDTH 1 /* OUT2L_ENA */
+#define ARIZONA_OUT2R_ENA 0x0004 /* OUT2R_ENA */
+#define ARIZONA_OUT2R_ENA_MASK 0x0004 /* OUT2R_ENA */
+#define ARIZONA_OUT2R_ENA_SHIFT 2 /* OUT2R_ENA */
+#define ARIZONA_OUT2R_ENA_WIDTH 1 /* OUT2R_ENA */
+#define ARIZONA_OUT1L_ENA 0x0002 /* OUT1L_ENA */
+#define ARIZONA_OUT1L_ENA_MASK 0x0002 /* OUT1L_ENA */
+#define ARIZONA_OUT1L_ENA_SHIFT 1 /* OUT1L_ENA */
+#define ARIZONA_OUT1L_ENA_WIDTH 1 /* OUT1L_ENA */
+#define ARIZONA_OUT1R_ENA 0x0001 /* OUT1R_ENA */
+#define ARIZONA_OUT1R_ENA_MASK 0x0001 /* OUT1R_ENA */
+#define ARIZONA_OUT1R_ENA_SHIFT 0 /* OUT1R_ENA */
+#define ARIZONA_OUT1R_ENA_WIDTH 1 /* OUT1R_ENA */
+
+/*
+ * R1025 (0x401) - Output Status 1
+ */
+#define ARIZONA_OUT6L_ENA_STS 0x0800 /* OUT6L_ENA_STS */
+#define ARIZONA_OUT6L_ENA_STS_MASK 0x0800 /* OUT6L_ENA_STS */
+#define ARIZONA_OUT6L_ENA_STS_SHIFT 11 /* OUT6L_ENA_STS */
+#define ARIZONA_OUT6L_ENA_STS_WIDTH 1 /* OUT6L_ENA_STS */
+#define ARIZONA_OUT6R_ENA_STS 0x0400 /* OUT6R_ENA_STS */
+#define ARIZONA_OUT6R_ENA_STS_MASK 0x0400 /* OUT6R_ENA_STS */
+#define ARIZONA_OUT6R_ENA_STS_SHIFT 10 /* OUT6R_ENA_STS */
+#define ARIZONA_OUT6R_ENA_STS_WIDTH 1 /* OUT6R_ENA_STS */
+#define ARIZONA_OUT5L_ENA_STS 0x0200 /* OUT5L_ENA_STS */
+#define ARIZONA_OUT5L_ENA_STS_MASK 0x0200 /* OUT5L_ENA_STS */
+#define ARIZONA_OUT5L_ENA_STS_SHIFT 9 /* OUT5L_ENA_STS */
+#define ARIZONA_OUT5L_ENA_STS_WIDTH 1 /* OUT5L_ENA_STS */
+#define ARIZONA_OUT5R_ENA_STS 0x0100 /* OUT5R_ENA_STS */
+#define ARIZONA_OUT5R_ENA_STS_MASK 0x0100 /* OUT5R_ENA_STS */
+#define ARIZONA_OUT5R_ENA_STS_SHIFT 8 /* OUT5R_ENA_STS */
+#define ARIZONA_OUT5R_ENA_STS_WIDTH 1 /* OUT5R_ENA_STS */
+#define ARIZONA_OUT4L_ENA_STS 0x0080 /* OUT4L_ENA_STS */
+#define ARIZONA_OUT4L_ENA_STS_MASK 0x0080 /* OUT4L_ENA_STS */
+#define ARIZONA_OUT4L_ENA_STS_SHIFT 7 /* OUT4L_ENA_STS */
+#define ARIZONA_OUT4L_ENA_STS_WIDTH 1 /* OUT4L_ENA_STS */
+#define ARIZONA_OUT4R_ENA_STS 0x0040 /* OUT4R_ENA_STS */
+#define ARIZONA_OUT4R_ENA_STS_MASK 0x0040 /* OUT4R_ENA_STS */
+#define ARIZONA_OUT4R_ENA_STS_SHIFT 6 /* OUT4R_ENA_STS */
+#define ARIZONA_OUT4R_ENA_STS_WIDTH 1 /* OUT4R_ENA_STS */
+
+/*
+ * R1032 (0x408) - Output Rate 1
+ */
+#define ARIZONA_OUT_RATE_MASK 0x7800 /* OUT_RATE - [14:11] */
+#define ARIZONA_OUT_RATE_SHIFT 11 /* OUT_RATE - [14:11] */
+#define ARIZONA_OUT_RATE_WIDTH 4 /* OUT_RATE - [14:11] */
+
+/*
+ * R1033 (0x409) - Output Volume Ramp
+ */
+#define ARIZONA_OUT_VD_RAMP_MASK 0x0070 /* OUT_VD_RAMP - [6:4] */
+#define ARIZONA_OUT_VD_RAMP_SHIFT 4 /* OUT_VD_RAMP - [6:4] */
+#define ARIZONA_OUT_VD_RAMP_WIDTH 3 /* OUT_VD_RAMP - [6:4] */
+#define ARIZONA_OUT_VI_RAMP_MASK 0x0007 /* OUT_VI_RAMP - [2:0] */
+#define ARIZONA_OUT_VI_RAMP_SHIFT 0 /* OUT_VI_RAMP - [2:0] */
+#define ARIZONA_OUT_VI_RAMP_WIDTH 3 /* OUT_VI_RAMP - [2:0] */
+
+/*
+ * R1040 (0x410) - Output Path Config 1L
+ */
+#define ARIZONA_OUT1_LP_MODE 0x8000 /* OUT1_LP_MODE */
+#define ARIZONA_OUT1_LP_MODE_MASK 0x8000 /* OUT1_LP_MODE */
+#define ARIZONA_OUT1_LP_MODE_SHIFT 15 /* OUT1_LP_MODE */
+#define ARIZONA_OUT1_LP_MODE_WIDTH 1 /* OUT1_LP_MODE */
+#define ARIZONA_OUT1_OSR 0x2000 /* OUT1_OSR */
+#define ARIZONA_OUT1_OSR_MASK 0x2000 /* OUT1_OSR */
+#define ARIZONA_OUT1_OSR_SHIFT 13 /* OUT1_OSR */
+#define ARIZONA_OUT1_OSR_WIDTH 1 /* OUT1_OSR */
+#define ARIZONA_OUT1_MONO 0x1000 /* OUT1_MONO */
+#define ARIZONA_OUT1_MONO_MASK 0x1000 /* OUT1_MONO */
+#define ARIZONA_OUT1_MONO_SHIFT 12 /* OUT1_MONO */
+#define ARIZONA_OUT1_MONO_WIDTH 1 /* OUT1_MONO */
+#define ARIZONA_OUT1L_ANC_SRC_MASK 0x0C00 /* OUT1L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT1L_ANC_SRC_SHIFT 10 /* OUT1L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT1L_ANC_SRC_WIDTH 2 /* OUT1L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT1L_PGA_VOL_MASK 0x00FE /* OUT1L_PGA_VOL - [7:1] */
+#define ARIZONA_OUT1L_PGA_VOL_SHIFT 1 /* OUT1L_PGA_VOL - [7:1] */
+#define ARIZONA_OUT1L_PGA_VOL_WIDTH 7 /* OUT1L_PGA_VOL - [7:1] */
+
+/*
+ * R1041 (0x411) - DAC Digital Volume 1L
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT1L_MUTE 0x0100 /* OUT1L_MUTE */
+#define ARIZONA_OUT1L_MUTE_MASK 0x0100 /* OUT1L_MUTE */
+#define ARIZONA_OUT1L_MUTE_SHIFT 8 /* OUT1L_MUTE */
+#define ARIZONA_OUT1L_MUTE_WIDTH 1 /* OUT1L_MUTE */
+#define ARIZONA_OUT1L_VOL_MASK 0x00FF /* OUT1L_VOL - [7:0] */
+#define ARIZONA_OUT1L_VOL_SHIFT 0 /* OUT1L_VOL - [7:0] */
+#define ARIZONA_OUT1L_VOL_WIDTH 8 /* OUT1L_VOL - [7:0] */
+
+/*
+ * R1042 (0x412) - DAC Volume Limit 1L
+ */
+#define ARIZONA_OUT1L_VOL_LIM_MASK 0x00FF /* OUT1L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT1L_VOL_LIM_SHIFT 0 /* OUT1L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT1L_VOL_LIM_WIDTH 8 /* OUT1L_VOL_LIM - [7:0] */
+
+/*
+ * R1043 (0x413) - Noise Gate Select 1L
+ */
+#define ARIZONA_OUT1L_NGATE_SRC_MASK 0x0FFF /* OUT1L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT1L_NGATE_SRC_SHIFT 0 /* OUT1L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT1L_NGATE_SRC_WIDTH 12 /* OUT1L_NGATE_SRC - [11:0] */
+
+/*
+ * R1044 (0x414) - Output Path Config 1R
+ */
+#define ARIZONA_OUT1R_ANC_SRC_MASK 0x0C00 /* OUT1R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT1R_ANC_SRC_SHIFT 10 /* OUT1R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT1R_ANC_SRC_WIDTH 2 /* OUT1R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT1R_PGA_VOL_MASK 0x00FE /* OUT1R_PGA_VOL - [7:1] */
+#define ARIZONA_OUT1R_PGA_VOL_SHIFT 1 /* OUT1R_PGA_VOL - [7:1] */
+#define ARIZONA_OUT1R_PGA_VOL_WIDTH 7 /* OUT1R_PGA_VOL - [7:1] */
+
+/*
+ * R1045 (0x415) - DAC Digital Volume 1R
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT1R_MUTE 0x0100 /* OUT1R_MUTE */
+#define ARIZONA_OUT1R_MUTE_MASK 0x0100 /* OUT1R_MUTE */
+#define ARIZONA_OUT1R_MUTE_SHIFT 8 /* OUT1R_MUTE */
+#define ARIZONA_OUT1R_MUTE_WIDTH 1 /* OUT1R_MUTE */
+#define ARIZONA_OUT1R_VOL_MASK 0x00FF /* OUT1R_VOL - [7:0] */
+#define ARIZONA_OUT1R_VOL_SHIFT 0 /* OUT1R_VOL - [7:0] */
+#define ARIZONA_OUT1R_VOL_WIDTH 8 /* OUT1R_VOL - [7:0] */
+
+/*
+ * R1046 (0x416) - DAC Volume Limit 1R
+ */
+#define ARIZONA_OUT1R_VOL_LIM_MASK 0x00FF /* OUT1R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT1R_VOL_LIM_SHIFT 0 /* OUT1R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT1R_VOL_LIM_WIDTH 8 /* OUT1R_VOL_LIM - [7:0] */
+
+/*
+ * R1047 (0x417) - Noise Gate Select 1R
+ */
+#define ARIZONA_OUT1R_NGATE_SRC_MASK 0x0FFF /* OUT1R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT1R_NGATE_SRC_SHIFT 0 /* OUT1R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT1R_NGATE_SRC_WIDTH 12 /* OUT1R_NGATE_SRC - [11:0] */
+
+/*
+ * R1048 (0x418) - Output Path Config 2L
+ */
+#define ARIZONA_OUT2_LP_MODE 0x8000 /* OUT2_LP_MODE */
+#define ARIZONA_OUT2_LP_MODE_MASK 0x8000 /* OUT2_LP_MODE */
+#define ARIZONA_OUT2_LP_MODE_SHIFT 15 /* OUT2_LP_MODE */
+#define ARIZONA_OUT2_LP_MODE_WIDTH 1 /* OUT2_LP_MODE */
+#define ARIZONA_OUT2_OSR 0x2000 /* OUT2_OSR */
+#define ARIZONA_OUT2_OSR_MASK 0x2000 /* OUT2_OSR */
+#define ARIZONA_OUT2_OSR_SHIFT 13 /* OUT2_OSR */
+#define ARIZONA_OUT2_OSR_WIDTH 1 /* OUT2_OSR */
+#define ARIZONA_OUT2_MONO 0x1000 /* OUT2_MONO */
+#define ARIZONA_OUT2_MONO_MASK 0x1000 /* OUT2_MONO */
+#define ARIZONA_OUT2_MONO_SHIFT 12 /* OUT2_MONO */
+#define ARIZONA_OUT2_MONO_WIDTH 1 /* OUT2_MONO */
+#define ARIZONA_OUT2L_ANC_SRC_MASK 0x0C00 /* OUT2L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT2L_ANC_SRC_SHIFT 10 /* OUT2L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT2L_ANC_SRC_WIDTH 2 /* OUT2L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT2L_PGA_VOL_MASK 0x00FE /* OUT2L_PGA_VOL - [7:1] */
+#define ARIZONA_OUT2L_PGA_VOL_SHIFT 1 /* OUT2L_PGA_VOL - [7:1] */
+#define ARIZONA_OUT2L_PGA_VOL_WIDTH 7 /* OUT2L_PGA_VOL - [7:1] */
+
+/*
+ * R1049 (0x419) - DAC Digital Volume 2L
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT2L_MUTE 0x0100 /* OUT2L_MUTE */
+#define ARIZONA_OUT2L_MUTE_MASK 0x0100 /* OUT2L_MUTE */
+#define ARIZONA_OUT2L_MUTE_SHIFT 8 /* OUT2L_MUTE */
+#define ARIZONA_OUT2L_MUTE_WIDTH 1 /* OUT2L_MUTE */
+#define ARIZONA_OUT2L_VOL_MASK 0x00FF /* OUT2L_VOL - [7:0] */
+#define ARIZONA_OUT2L_VOL_SHIFT 0 /* OUT2L_VOL - [7:0] */
+#define ARIZONA_OUT2L_VOL_WIDTH 8 /* OUT2L_VOL - [7:0] */
+
+/*
+ * R1050 (0x41A) - DAC Volume Limit 2L
+ */
+#define ARIZONA_OUT2L_VOL_LIM_MASK 0x00FF /* OUT2L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT2L_VOL_LIM_SHIFT 0 /* OUT2L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT2L_VOL_LIM_WIDTH 8 /* OUT2L_VOL_LIM - [7:0] */
+
+/*
+ * R1051 (0x41B) - Noise Gate Select 2L
+ */
+#define ARIZONA_OUT2L_NGATE_SRC_MASK 0x0FFF /* OUT2L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT2L_NGATE_SRC_SHIFT 0 /* OUT2L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT2L_NGATE_SRC_WIDTH 12 /* OUT2L_NGATE_SRC - [11:0] */
+
+/*
+ * R1052 (0x41C) - Output Path Config 2R
+ */
+#define ARIZONA_OUT2R_ANC_SRC_MASK 0x0C00 /* OUT2R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT2R_ANC_SRC_SHIFT 10 /* OUT2R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT2R_ANC_SRC_WIDTH 2 /* OUT2R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT2R_PGA_VOL_MASK 0x00FE /* OUT2R_PGA_VOL - [7:1] */
+#define ARIZONA_OUT2R_PGA_VOL_SHIFT 1 /* OUT2R_PGA_VOL - [7:1] */
+#define ARIZONA_OUT2R_PGA_VOL_WIDTH 7 /* OUT2R_PGA_VOL - [7:1] */
+
+/*
+ * R1053 (0x41D) - DAC Digital Volume 2R
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT2R_MUTE 0x0100 /* OUT2R_MUTE */
+#define ARIZONA_OUT2R_MUTE_MASK 0x0100 /* OUT2R_MUTE */
+#define ARIZONA_OUT2R_MUTE_SHIFT 8 /* OUT2R_MUTE */
+#define ARIZONA_OUT2R_MUTE_WIDTH 1 /* OUT2R_MUTE */
+#define ARIZONA_OUT2R_VOL_MASK 0x00FF /* OUT2R_VOL - [7:0] */
+#define ARIZONA_OUT2R_VOL_SHIFT 0 /* OUT2R_VOL - [7:0] */
+#define ARIZONA_OUT2R_VOL_WIDTH 8 /* OUT2R_VOL - [7:0] */
+
+/*
+ * R1054 (0x41E) - DAC Volume Limit 2R
+ */
+#define ARIZONA_OUT2R_VOL_LIM_MASK 0x00FF /* OUT2R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT2R_VOL_LIM_SHIFT 0 /* OUT2R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT2R_VOL_LIM_WIDTH 8 /* OUT2R_VOL_LIM - [7:0] */
+
+/*
+ * R1055 (0x41F) - Noise Gate Select 2R
+ */
+#define ARIZONA_OUT2R_NGATE_SRC_MASK 0x0FFF /* OUT2R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT2R_NGATE_SRC_SHIFT 0 /* OUT2R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT2R_NGATE_SRC_WIDTH 12 /* OUT2R_NGATE_SRC - [11:0] */
+
+/*
+ * R1056 (0x420) - Output Path Config 3L
+ */
+#define ARIZONA_OUT3_LP_MODE 0x8000 /* OUT3_LP_MODE */
+#define ARIZONA_OUT3_LP_MODE_MASK 0x8000 /* OUT3_LP_MODE */
+#define ARIZONA_OUT3_LP_MODE_SHIFT 15 /* OUT3_LP_MODE */
+#define ARIZONA_OUT3_LP_MODE_WIDTH 1 /* OUT3_LP_MODE */
+#define ARIZONA_OUT3_OSR 0x2000 /* OUT3_OSR */
+#define ARIZONA_OUT3_OSR_MASK 0x2000 /* OUT3_OSR */
+#define ARIZONA_OUT3_OSR_SHIFT 13 /* OUT3_OSR */
+#define ARIZONA_OUT3_OSR_WIDTH 1 /* OUT3_OSR */
+#define ARIZONA_OUT3_MONO 0x1000 /* OUT3_MONO */
+#define ARIZONA_OUT3_MONO_MASK 0x1000 /* OUT3_MONO */
+#define ARIZONA_OUT3_MONO_SHIFT 12 /* OUT3_MONO */
+#define ARIZONA_OUT3_MONO_WIDTH 1 /* OUT3_MONO */
+#define ARIZONA_OUT3L_ANC_SRC_MASK 0x0C00 /* OUT3L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT3L_ANC_SRC_SHIFT 10 /* OUT3L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT3L_ANC_SRC_WIDTH 2 /* OUT3L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT3L_PGA_VOL_MASK 0x00FE /* OUT3L_PGA_VOL - [7:1] */
+#define ARIZONA_OUT3L_PGA_VOL_SHIFT 1 /* OUT3L_PGA_VOL - [7:1] */
+#define ARIZONA_OUT3L_PGA_VOL_WIDTH 7 /* OUT3L_PGA_VOL - [7:1] */
+
+/*
+ * R1057 (0x421) - DAC Digital Volume 3L
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT3L_MUTE 0x0100 /* OUT3L_MUTE */
+#define ARIZONA_OUT3L_MUTE_MASK 0x0100 /* OUT3L_MUTE */
+#define ARIZONA_OUT3L_MUTE_SHIFT 8 /* OUT3L_MUTE */
+#define ARIZONA_OUT3L_MUTE_WIDTH 1 /* OUT3L_MUTE */
+#define ARIZONA_OUT3L_VOL_MASK 0x00FF /* OUT3L_VOL - [7:0] */
+#define ARIZONA_OUT3L_VOL_SHIFT 0 /* OUT3L_VOL - [7:0] */
+#define ARIZONA_OUT3L_VOL_WIDTH 8 /* OUT3L_VOL - [7:0] */
+
+/*
+ * R1058 (0x422) - DAC Volume Limit 3L
+ */
+#define ARIZONA_OUT3L_VOL_LIM_MASK 0x00FF /* OUT3L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT3L_VOL_LIM_SHIFT 0 /* OUT3L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT3L_VOL_LIM_WIDTH 8 /* OUT3L_VOL_LIM - [7:0] */
+
+/*
+ * R1059 (0x423) - Noise Gate Select 3L
+ */
+#define ARIZONA_OUT3_NGATE_SRC_MASK 0x0FFF /* OUT3_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT3_NGATE_SRC_SHIFT 0 /* OUT3_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT3_NGATE_SRC_WIDTH 12 /* OUT3_NGATE_SRC - [11:0] */
+
+/*
+ * R1060 (0x424) - Output Path Config 3R
+ */
+#define ARIZONA_OUT3R_PGA_VOL_MASK 0x00FE /* OUT3R_PGA_VOL - [7:1] */
+#define ARIZONA_OUT3R_PGA_VOL_SHIFT 1 /* OUT3R_PGA_VOL - [7:1] */
+#define ARIZONA_OUT3R_PGA_VOL_WIDTH 7 /* OUT3R_PGA_VOL - [7:1] */
+
+/*
+ * R1061 (0x425) - DAC Digital Volume 3R
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT3R_MUTE 0x0100 /* OUT3R_MUTE */
+#define ARIZONA_OUT3R_MUTE_MASK 0x0100 /* OUT3R_MUTE */
+#define ARIZONA_OUT3R_MUTE_SHIFT 8 /* OUT3R_MUTE */
+#define ARIZONA_OUT3R_MUTE_WIDTH 1 /* OUT3R_MUTE */
+#define ARIZONA_OUT3R_VOL_MASK 0x00FF /* OUT3R_VOL - [7:0] */
+#define ARIZONA_OUT3R_VOL_SHIFT 0 /* OUT3R_VOL - [7:0] */
+#define ARIZONA_OUT3R_VOL_WIDTH 8 /* OUT3R_VOL - [7:0] */
+
+/*
+ * R1062 (0x426) - DAC Volume Limit 3R
+ */
+#define ARIZONA_OUT3R_ANC_SRC_MASK 0x0C00 /* OUT3R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT3R_ANC_SRC_SHIFT 10 /* OUT3R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT3R_ANC_SRC_WIDTH 2 /* OUT3R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT3R_VOL_LIM_MASK 0x00FF /* OUT3R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT3R_VOL_LIM_SHIFT 0 /* OUT3R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT3R_VOL_LIM_WIDTH 8 /* OUT3R_VOL_LIM - [7:0] */
+
+/*
+ * R1064 (0x428) - Output Path Config 4L
+ */
+#define ARIZONA_OUT4_OSR 0x2000 /* OUT4_OSR */
+#define ARIZONA_OUT4_OSR_MASK 0x2000 /* OUT4_OSR */
+#define ARIZONA_OUT4_OSR_SHIFT 13 /* OUT4_OSR */
+#define ARIZONA_OUT4_OSR_WIDTH 1 /* OUT4_OSR */
+#define ARIZONA_OUT4L_ANC_SRC_MASK 0x0C00 /* OUT4L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT4L_ANC_SRC_SHIFT 10 /* OUT4L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT4L_ANC_SRC_WIDTH 2 /* OUT4L_ANC_SRC - [11:10] */
+
+/*
+ * R1065 (0x429) - DAC Digital Volume 4L
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT4L_MUTE 0x0100 /* OUT4L_MUTE */
+#define ARIZONA_OUT4L_MUTE_MASK 0x0100 /* OUT4L_MUTE */
+#define ARIZONA_OUT4L_MUTE_SHIFT 8 /* OUT4L_MUTE */
+#define ARIZONA_OUT4L_MUTE_WIDTH 1 /* OUT4L_MUTE */
+#define ARIZONA_OUT4L_VOL_MASK 0x00FF /* OUT4L_VOL - [7:0] */
+#define ARIZONA_OUT4L_VOL_SHIFT 0 /* OUT4L_VOL - [7:0] */
+#define ARIZONA_OUT4L_VOL_WIDTH 8 /* OUT4L_VOL - [7:0] */
+
+/*
+ * R1066 (0x42A) - Out Volume 4L
+ */
+#define ARIZONA_OUT4L_VOL_LIM_MASK 0x00FF /* OUT4L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT4L_VOL_LIM_SHIFT 0 /* OUT4L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT4L_VOL_LIM_WIDTH 8 /* OUT4L_VOL_LIM - [7:0] */
+
+/*
+ * R1067 (0x42B) - Noise Gate Select 4L
+ */
+#define ARIZONA_OUT4L_NGATE_SRC_MASK 0x0FFF /* OUT4L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT4L_NGATE_SRC_SHIFT 0 /* OUT4L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT4L_NGATE_SRC_WIDTH 12 /* OUT4L_NGATE_SRC - [11:0] */
+
+/*
+ * R1068 (0x42C) - Output Path Config 4R
+ */
+#define ARIZONA_OUT4R_ANC_SRC_MASK 0x0C00 /* OUT4R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT4R_ANC_SRC_SHIFT 10 /* OUT4R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT4R_ANC_SRC_WIDTH 2 /* OUT4R_ANC_SRC - [11:10] */
+
+/*
+ * R1069 (0x42D) - DAC Digital Volume 4R
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT4R_MUTE 0x0100 /* OUT4R_MUTE */
+#define ARIZONA_OUT4R_MUTE_MASK 0x0100 /* OUT4R_MUTE */
+#define ARIZONA_OUT4R_MUTE_SHIFT 8 /* OUT4R_MUTE */
+#define ARIZONA_OUT4R_MUTE_WIDTH 1 /* OUT4R_MUTE */
+#define ARIZONA_OUT4R_VOL_MASK 0x00FF /* OUT4R_VOL - [7:0] */
+#define ARIZONA_OUT4R_VOL_SHIFT 0 /* OUT4R_VOL - [7:0] */
+#define ARIZONA_OUT4R_VOL_WIDTH 8 /* OUT4R_VOL - [7:0] */
+
+/*
+ * R1070 (0x42E) - Out Volume 4R
+ */
+#define ARIZONA_OUT4R_VOL_LIM_MASK 0x00FF /* OUT4R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT4R_VOL_LIM_SHIFT 0 /* OUT4R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT4R_VOL_LIM_WIDTH 8 /* OUT4R_VOL_LIM - [7:0] */
+
+/*
+ * R1071 (0x42F) - Noise Gate Select 4R
+ */
+#define ARIZONA_OUT4R_NGATE_SRC_MASK 0x0FFF /* OUT4R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT4R_NGATE_SRC_SHIFT 0 /* OUT4R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT4R_NGATE_SRC_WIDTH 12 /* OUT4R_NGATE_SRC - [11:0] */
+
+/*
+ * R1072 (0x430) - Output Path Config 5L
+ */
+#define ARIZONA_OUT5_OSR 0x2000 /* OUT5_OSR */
+#define ARIZONA_OUT5_OSR_MASK 0x2000 /* OUT5_OSR */
+#define ARIZONA_OUT5_OSR_SHIFT 13 /* OUT5_OSR */
+#define ARIZONA_OUT5_OSR_WIDTH 1 /* OUT5_OSR */
+#define ARIZONA_OUT5L_ANC_SRC_MASK 0x0C00 /* OUT5L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT5L_ANC_SRC_SHIFT 10 /* OUT5L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT5L_ANC_SRC_WIDTH 2 /* OUT5L_ANC_SRC - [11:10] */
+
+/*
+ * R1073 (0x431) - DAC Digital Volume 5L
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT5L_MUTE 0x0100 /* OUT5L_MUTE */
+#define ARIZONA_OUT5L_MUTE_MASK 0x0100 /* OUT5L_MUTE */
+#define ARIZONA_OUT5L_MUTE_SHIFT 8 /* OUT5L_MUTE */
+#define ARIZONA_OUT5L_MUTE_WIDTH 1 /* OUT5L_MUTE */
+#define ARIZONA_OUT5L_VOL_MASK 0x00FF /* OUT5L_VOL - [7:0] */
+#define ARIZONA_OUT5L_VOL_SHIFT 0 /* OUT5L_VOL - [7:0] */
+#define ARIZONA_OUT5L_VOL_WIDTH 8 /* OUT5L_VOL - [7:0] */
+
+/*
+ * R1074 (0x432) - DAC Volume Limit 5L
+ */
+#define ARIZONA_OUT5L_VOL_LIM_MASK 0x00FF /* OUT5L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT5L_VOL_LIM_SHIFT 0 /* OUT5L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT5L_VOL_LIM_WIDTH 8 /* OUT5L_VOL_LIM - [7:0] */
+
+/*
+ * R1075 (0x433) - Noise Gate Select 5L
+ */
+#define ARIZONA_OUT5L_NGATE_SRC_MASK 0x0FFF /* OUT5L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT5L_NGATE_SRC_SHIFT 0 /* OUT5L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT5L_NGATE_SRC_WIDTH 12 /* OUT5L_NGATE_SRC - [11:0] */
+
+/*
+ * R1076 (0x434) - Output Path Config 5R
+ */
+#define ARIZONA_OUT5R_ANC_SRC_MASK 0x0C00 /* OUT5R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT5R_ANC_SRC_SHIFT 10 /* OUT5R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT5R_ANC_SRC_WIDTH 2 /* OUT5R_ANC_SRC - [11:10] */
+
+/*
+ * R1077 (0x435) - DAC Digital Volume 5R
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT5R_MUTE 0x0100 /* OUT5R_MUTE */
+#define ARIZONA_OUT5R_MUTE_MASK 0x0100 /* OUT5R_MUTE */
+#define ARIZONA_OUT5R_MUTE_SHIFT 8 /* OUT5R_MUTE */
+#define ARIZONA_OUT5R_MUTE_WIDTH 1 /* OUT5R_MUTE */
+#define ARIZONA_OUT5R_VOL_MASK 0x00FF /* OUT5R_VOL - [7:0] */
+#define ARIZONA_OUT5R_VOL_SHIFT 0 /* OUT5R_VOL - [7:0] */
+#define ARIZONA_OUT5R_VOL_WIDTH 8 /* OUT5R_VOL - [7:0] */
+
+/*
+ * R1078 (0x436) - DAC Volume Limit 5R
+ */
+#define ARIZONA_OUT5R_VOL_LIM_MASK 0x00FF /* OUT5R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT5R_VOL_LIM_SHIFT 0 /* OUT5R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT5R_VOL_LIM_WIDTH 8 /* OUT5R_VOL_LIM - [7:0] */
+
+/*
+ * R1079 (0x437) - Noise Gate Select 5R
+ */
+#define ARIZONA_OUT5R_NGATE_SRC_MASK 0x0FFF /* OUT5R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT5R_NGATE_SRC_SHIFT 0 /* OUT5R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT5R_NGATE_SRC_WIDTH 12 /* OUT5R_NGATE_SRC - [11:0] */
+
+/*
+ * R1080 (0x438) - Output Path Config 6L
+ */
+#define ARIZONA_OUT6_OSR 0x2000 /* OUT6_OSR */
+#define ARIZONA_OUT6_OSR_MASK 0x2000 /* OUT6_OSR */
+#define ARIZONA_OUT6_OSR_SHIFT 13 /* OUT6_OSR */
+#define ARIZONA_OUT6_OSR_WIDTH 1 /* OUT6_OSR */
+#define ARIZONA_OUT6L_ANC_SRC_MASK 0x0C00 /* OUT6L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT6L_ANC_SRC_SHIFT 10 /* OUT6L_ANC_SRC - [11:10] */
+#define ARIZONA_OUT6L_ANC_SRC_WIDTH 2 /* OUT6L_ANC_SRC - [11:10] */
+
+/*
+ * R1081 (0x439) - DAC Digital Volume 6L
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT6L_MUTE 0x0100 /* OUT6L_MUTE */
+#define ARIZONA_OUT6L_MUTE_MASK 0x0100 /* OUT6L_MUTE */
+#define ARIZONA_OUT6L_MUTE_SHIFT 8 /* OUT6L_MUTE */
+#define ARIZONA_OUT6L_MUTE_WIDTH 1 /* OUT6L_MUTE */
+#define ARIZONA_OUT6L_VOL_MASK 0x00FF /* OUT6L_VOL - [7:0] */
+#define ARIZONA_OUT6L_VOL_SHIFT 0 /* OUT6L_VOL - [7:0] */
+#define ARIZONA_OUT6L_VOL_WIDTH 8 /* OUT6L_VOL - [7:0] */
+
+/*
+ * R1082 (0x43A) - DAC Volume Limit 6L
+ */
+#define ARIZONA_OUT6L_VOL_LIM_MASK 0x00FF /* OUT6L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT6L_VOL_LIM_SHIFT 0 /* OUT6L_VOL_LIM - [7:0] */
+#define ARIZONA_OUT6L_VOL_LIM_WIDTH 8 /* OUT6L_VOL_LIM - [7:0] */
+
+/*
+ * R1083 (0x43B) - Noise Gate Select 6L
+ */
+#define ARIZONA_OUT6L_NGATE_SRC_MASK 0x0FFF /* OUT6L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT6L_NGATE_SRC_SHIFT 0 /* OUT6L_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT6L_NGATE_SRC_WIDTH 12 /* OUT6L_NGATE_SRC - [11:0] */
+
+/*
+ * R1084 (0x43C) - Output Path Config 6R
+ */
+#define ARIZONA_OUT6R_ANC_SRC_MASK 0x0C00 /* OUT6R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT6R_ANC_SRC_SHIFT 10 /* OUT6R_ANC_SRC - [11:10] */
+#define ARIZONA_OUT6R_ANC_SRC_WIDTH 2 /* OUT6R_ANC_SRC - [11:10] */
+
+/*
+ * R1085 (0x43D) - DAC Digital Volume 6R
+ */
+#define ARIZONA_OUT_VU 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_MASK 0x0200 /* OUT_VU */
+#define ARIZONA_OUT_VU_SHIFT 9 /* OUT_VU */
+#define ARIZONA_OUT_VU_WIDTH 1 /* OUT_VU */
+#define ARIZONA_OUT6R_MUTE 0x0100 /* OUT6R_MUTE */
+#define ARIZONA_OUT6R_MUTE_MASK 0x0100 /* OUT6R_MUTE */
+#define ARIZONA_OUT6R_MUTE_SHIFT 8 /* OUT6R_MUTE */
+#define ARIZONA_OUT6R_MUTE_WIDTH 1 /* OUT6R_MUTE */
+#define ARIZONA_OUT6R_VOL_MASK 0x00FF /* OUT6R_VOL - [7:0] */
+#define ARIZONA_OUT6R_VOL_SHIFT 0 /* OUT6R_VOL - [7:0] */
+#define ARIZONA_OUT6R_VOL_WIDTH 8 /* OUT6R_VOL - [7:0] */
+
+/*
+ * R1086 (0x43E) - DAC Volume Limit 6R
+ */
+#define ARIZONA_OUT6R_VOL_LIM_MASK 0x00FF /* OUT6R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT6R_VOL_LIM_SHIFT 0 /* OUT6R_VOL_LIM - [7:0] */
+#define ARIZONA_OUT6R_VOL_LIM_WIDTH 8 /* OUT6R_VOL_LIM - [7:0] */
+
+/*
+ * R1087 (0x43F) - Noise Gate Select 6R
+ */
+#define ARIZONA_OUT6R_NGATE_SRC_MASK 0x0FFF /* OUT6R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT6R_NGATE_SRC_SHIFT 0 /* OUT6R_NGATE_SRC - [11:0] */
+#define ARIZONA_OUT6R_NGATE_SRC_WIDTH 12 /* OUT6R_NGATE_SRC - [11:0] */
+
+/*
+ * R1088 (0x440) - DRE Enable
+ */
+#define ARIZONA_DRE3R_ENA 0x0020 /* DRE3R_ENA */
+#define ARIZONA_DRE3R_ENA_MASK 0x0020 /* DRE3R_ENA */
+#define ARIZONA_DRE3R_ENA_SHIFT 5 /* DRE3R_ENA */
+#define ARIZONA_DRE3R_ENA_WIDTH 1 /* DRE3R_ENA */
+#define ARIZONA_DRE3L_ENA 0x0010 /* DRE3L_ENA */
+#define ARIZONA_DRE3L_ENA_MASK 0x0010 /* DRE3L_ENA */
+#define ARIZONA_DRE3L_ENA_SHIFT 4 /* DRE3L_ENA */
+#define ARIZONA_DRE3L_ENA_WIDTH 1 /* DRE3L_ENA */
+#define ARIZONA_DRE2R_ENA 0x0008 /* DRE2R_ENA */
+#define ARIZONA_DRE2R_ENA_MASK 0x0008 /* DRE2R_ENA */
+#define ARIZONA_DRE2R_ENA_SHIFT 3 /* DRE2R_ENA */
+#define ARIZONA_DRE2R_ENA_WIDTH 1 /* DRE2R_ENA */
+#define ARIZONA_DRE2L_ENA 0x0004 /* DRE2L_ENA */
+#define ARIZONA_DRE2L_ENA_MASK 0x0004 /* DRE2L_ENA */
+#define ARIZONA_DRE2L_ENA_SHIFT 2 /* DRE2L_ENA */
+#define ARIZONA_DRE2L_ENA_WIDTH 1 /* DRE2L_ENA */
+#define ARIZONA_DRE1R_ENA 0x0002 /* DRE1R_ENA */
+#define ARIZONA_DRE1R_ENA_MASK 0x0002 /* DRE1R_ENA */
+#define ARIZONA_DRE1R_ENA_SHIFT 1 /* DRE1R_ENA */
+#define ARIZONA_DRE1R_ENA_WIDTH 1 /* DRE1R_ENA */
+#define ARIZONA_DRE1L_ENA 0x0001 /* DRE1L_ENA */
+#define ARIZONA_DRE1L_ENA_MASK 0x0001 /* DRE1L_ENA */
+#define ARIZONA_DRE1L_ENA_SHIFT 0 /* DRE1L_ENA */
+#define ARIZONA_DRE1L_ENA_WIDTH 1 /* DRE1L_ENA */
+
+/*
+ * R1090 (0x442) - DRE Control 2
+ */
+#define ARIZONA_DRE_T_LOW_MASK 0x3F00 /* DRE_T_LOW - [13:8] */
+#define ARIZONA_DRE_T_LOW_SHIFT 8 /* DRE_T_LOW - [13:8] */
+#define ARIZONA_DRE_T_LOW_WIDTH 6 /* DRE_T_LOW - [13:8] */
+
+/*
+ * R1091 (0x443) - DRE Control 3
+ */
+#define ARIZONA_DRE_GAIN_SHIFT_MASK 0xC000 /* DRE_GAIN_SHIFT - [15:14] */
+#define ARIZONA_DRE_GAIN_SHIFT_SHIFT 14 /* DRE_GAIN_SHIFT - [15:14] */
+#define ARIZONA_DRE_GAIN_SHIFT_WIDTH 2 /* DRE_GAIN_SHIFT - [15:14] */
+#define ARIZONA_DRE_LOW_LEVEL_ABS_MASK 0x000F /* LOW_LEVEL_ABS - [3:0] */
+#define ARIZONA_DRE_LOW_LEVEL_ABS_SHIFT 0 /* LOW_LEVEL_ABS - [3:0] */
+#define ARIZONA_DRE_LOW_LEVEL_ABS_WIDTH 4 /* LOW_LEVEL_ABS - [3:0] */
+
+/*
+ * R1104 (0x450) - DAC AEC Control 1
+ */
+#define ARIZONA_AEC_LOOPBACK_SRC_MASK 0x003C /* AEC_LOOPBACK_SRC - [5:2] */
+#define ARIZONA_AEC_LOOPBACK_SRC_SHIFT 2 /* AEC_LOOPBACK_SRC - [5:2] */
+#define ARIZONA_AEC_LOOPBACK_SRC_WIDTH 4 /* AEC_LOOPBACK_SRC - [5:2] */
+#define ARIZONA_AEC_ENA_STS 0x0002 /* AEC_ENA_STS */
+#define ARIZONA_AEC_ENA_STS_MASK 0x0002 /* AEC_ENA_STS */
+#define ARIZONA_AEC_ENA_STS_SHIFT 1 /* AEC_ENA_STS */
+#define ARIZONA_AEC_ENA_STS_WIDTH 1 /* AEC_ENA_STS */
+#define ARIZONA_AEC_LOOPBACK_ENA 0x0001 /* AEC_LOOPBACK_ENA */
+#define ARIZONA_AEC_LOOPBACK_ENA_MASK 0x0001 /* AEC_LOOPBACK_ENA */
+#define ARIZONA_AEC_LOOPBACK_ENA_SHIFT 0 /* AEC_LOOPBACK_ENA */
+#define ARIZONA_AEC_LOOPBACK_ENA_WIDTH 1 /* AEC_LOOPBACK_ENA */
+
+/*
+ * R1112 (0x458) - Noise Gate Control
+ */
+#define ARIZONA_NGATE_HOLD_MASK 0x0030 /* NGATE_HOLD - [5:4] */
+#define ARIZONA_NGATE_HOLD_SHIFT 4 /* NGATE_HOLD - [5:4] */
+#define ARIZONA_NGATE_HOLD_WIDTH 2 /* NGATE_HOLD - [5:4] */
+#define ARIZONA_NGATE_THR_MASK 0x000E /* NGATE_THR - [3:1] */
+#define ARIZONA_NGATE_THR_SHIFT 1 /* NGATE_THR - [3:1] */
+#define ARIZONA_NGATE_THR_WIDTH 3 /* NGATE_THR - [3:1] */
+#define ARIZONA_NGATE_ENA 0x0001 /* NGATE_ENA */
+#define ARIZONA_NGATE_ENA_MASK 0x0001 /* NGATE_ENA */
+#define ARIZONA_NGATE_ENA_SHIFT 0 /* NGATE_ENA */
+#define ARIZONA_NGATE_ENA_WIDTH 1 /* NGATE_ENA */
+
+/*
+ * R1168 (0x490) - PDM SPK1 CTRL 1
+ */
+#define ARIZONA_SPK1R_MUTE 0x2000 /* SPK1R_MUTE */
+#define ARIZONA_SPK1R_MUTE_MASK 0x2000 /* SPK1R_MUTE */
+#define ARIZONA_SPK1R_MUTE_SHIFT 13 /* SPK1R_MUTE */
+#define ARIZONA_SPK1R_MUTE_WIDTH 1 /* SPK1R_MUTE */
+#define ARIZONA_SPK1L_MUTE 0x1000 /* SPK1L_MUTE */
+#define ARIZONA_SPK1L_MUTE_MASK 0x1000 /* SPK1L_MUTE */
+#define ARIZONA_SPK1L_MUTE_SHIFT 12 /* SPK1L_MUTE */
+#define ARIZONA_SPK1L_MUTE_WIDTH 1 /* SPK1L_MUTE */
+#define ARIZONA_SPK1_MUTE_ENDIAN 0x0100 /* SPK1_MUTE_ENDIAN */
+#define ARIZONA_SPK1_MUTE_ENDIAN_MASK 0x0100 /* SPK1_MUTE_ENDIAN */
+#define ARIZONA_SPK1_MUTE_ENDIAN_SHIFT 8 /* SPK1_MUTE_ENDIAN */
+#define ARIZONA_SPK1_MUTE_ENDIAN_WIDTH 1 /* SPK1_MUTE_ENDIAN */
+#define ARIZONA_SPK1_MUTE_SEQ1_MASK 0x00FF /* SPK1_MUTE_SEQ1 - [7:0] */
+#define ARIZONA_SPK1_MUTE_SEQ1_SHIFT 0 /* SPK1_MUTE_SEQ1 - [7:0] */
+#define ARIZONA_SPK1_MUTE_SEQ1_WIDTH 8 /* SPK1_MUTE_SEQ1 - [7:0] */
+
+/*
+ * R1169 (0x491) - PDM SPK1 CTRL 2
+ */
+#define ARIZONA_SPK1_FMT 0x0001 /* SPK1_FMT */
+#define ARIZONA_SPK1_FMT_MASK 0x0001 /* SPK1_FMT */
+#define ARIZONA_SPK1_FMT_SHIFT 0 /* SPK1_FMT */
+#define ARIZONA_SPK1_FMT_WIDTH 1 /* SPK1_FMT */
+
+/*
+ * R1170 (0x492) - PDM SPK2 CTRL 1
+ */
+#define ARIZONA_SPK2R_MUTE 0x2000 /* SPK2R_MUTE */
+#define ARIZONA_SPK2R_MUTE_MASK 0x2000 /* SPK2R_MUTE */
+#define ARIZONA_SPK2R_MUTE_SHIFT 13 /* SPK2R_MUTE */
+#define ARIZONA_SPK2R_MUTE_WIDTH 1 /* SPK2R_MUTE */
+#define ARIZONA_SPK2L_MUTE 0x1000 /* SPK2L_MUTE */
+#define ARIZONA_SPK2L_MUTE_MASK 0x1000 /* SPK2L_MUTE */
+#define ARIZONA_SPK2L_MUTE_SHIFT 12 /* SPK2L_MUTE */
+#define ARIZONA_SPK2L_MUTE_WIDTH 1 /* SPK2L_MUTE */
+#define ARIZONA_SPK2_MUTE_ENDIAN 0x0100 /* SPK2_MUTE_ENDIAN */
+#define ARIZONA_SPK2_MUTE_ENDIAN_MASK 0x0100 /* SPK2_MUTE_ENDIAN */
+#define ARIZONA_SPK2_MUTE_ENDIAN_SHIFT 8 /* SPK2_MUTE_ENDIAN */
+#define ARIZONA_SPK2_MUTE_ENDIAN_WIDTH 1 /* SPK2_MUTE_ENDIAN */
+#define ARIZONA_SPK2_MUTE_SEQ_MASK 0x00FF /* SPK2_MUTE_SEQ - [7:0] */
+#define ARIZONA_SPK2_MUTE_SEQ_SHIFT 0 /* SPK2_MUTE_SEQ - [7:0] */
+#define ARIZONA_SPK2_MUTE_SEQ_WIDTH 8 /* SPK2_MUTE_SEQ - [7:0] */
+
+/*
+ * R1171 (0x493) - PDM SPK2 CTRL 2
+ */
+#define ARIZONA_SPK2_FMT 0x0001 /* SPK2_FMT */
+#define ARIZONA_SPK2_FMT_MASK 0x0001 /* SPK2_FMT */
+#define ARIZONA_SPK2_FMT_SHIFT 0 /* SPK2_FMT */
+#define ARIZONA_SPK2_FMT_WIDTH 1 /* SPK2_FMT */
+
+/*
+ * R1184 (0x4A0) - HP1 Short Circuit Ctrl
+ */
+#define ARIZONA_HP1_SC_ENA 0x1000 /* HP1_SC_ENA */
+#define ARIZONA_HP1_SC_ENA_MASK 0x1000 /* HP1_SC_ENA */
+#define ARIZONA_HP1_SC_ENA_SHIFT 12 /* HP1_SC_ENA */
+#define ARIZONA_HP1_SC_ENA_WIDTH 1 /* HP1_SC_ENA */
+
+/*
+ * R1185 (0x4A1) - HP2 Short Circuit Ctrl
+ */
+#define ARIZONA_HP2_SC_ENA 0x1000 /* HP2_SC_ENA */
+#define ARIZONA_HP2_SC_ENA_MASK 0x1000 /* HP2_SC_ENA */
+#define ARIZONA_HP2_SC_ENA_SHIFT 12 /* HP2_SC_ENA */
+#define ARIZONA_HP2_SC_ENA_WIDTH 1 /* HP2_SC_ENA */
+
+/*
+ * R1186 (0x4A2) - HP3 Short Circuit Ctrl
+ */
+#define ARIZONA_HP3_SC_ENA 0x1000 /* HP3_SC_ENA */
+#define ARIZONA_HP3_SC_ENA_MASK 0x1000 /* HP3_SC_ENA */
+#define ARIZONA_HP3_SC_ENA_SHIFT 12 /* HP3_SC_ENA */
+#define ARIZONA_HP3_SC_ENA_WIDTH 1 /* HP3_SC_ENA */
+
+/*
+ * R1244 (0x4DC) - DAC comp 1
+ */
+#define ARIZONA_OUT_COMP_COEFF_MASK 0xFFFF /* OUT_COMP_COEFF - [15:0] */
+#define ARIZONA_OUT_COMP_COEFF_SHIFT 0 /* OUT_COMP_COEFF - [15:0] */
+#define ARIZONA_OUT_COMP_COEFF_WIDTH 16 /* OUT_COMP_COEFF - [15:0] */
+
+/*
+ * R1245 (0x4DD) - DAC comp 2
+ */
+#define ARIZONA_OUT_COMP_COEFF_1 0x0002 /* OUT_COMP_COEFF */
+#define ARIZONA_OUT_COMP_COEFF_1_MASK 0x0002 /* OUT_COMP_COEFF */
+#define ARIZONA_OUT_COMP_COEFF_1_SHIFT 1 /* OUT_COMP_COEFF */
+#define ARIZONA_OUT_COMP_COEFF_1_WIDTH 1 /* OUT_COMP_COEFF */
+#define ARIZONA_OUT_COMP_COEFF_SEL 0x0001 /* OUT_COMP_COEFF_SEL */
+#define ARIZONA_OUT_COMP_COEFF_SEL_MASK 0x0001 /* OUT_COMP_COEFF_SEL */
+#define ARIZONA_OUT_COMP_COEFF_SEL_SHIFT 0 /* OUT_COMP_COEFF_SEL */
+#define ARIZONA_OUT_COMP_COEFF_SEL_WIDTH 1 /* OUT_COMP_COEFF_SEL */
+
+/*
+ * R1246 (0x4DE) - DAC comp 3
+ */
+#define ARIZONA_AEC_COMP_COEFF_MASK 0xFFFF /* AEC_COMP_COEFF - [15:0] */
+#define ARIZONA_AEC_COMP_COEFF_SHIFT 0 /* AEC_COMP_COEFF - [15:0] */
+#define ARIZONA_AEC_COMP_COEFF_WIDTH 16 /* AEC_COMP_COEFF - [15:0] */
+
+/*
+ * R1247 (0x4DF) - DAC comp 4
+ */
+#define ARIZONA_AEC_COMP_COEFF_1 0x0002 /* AEC_COMP_COEFF */
+#define ARIZONA_AEC_COMP_COEFF_1_MASK 0x0002 /* AEC_COMP_COEFF */
+#define ARIZONA_AEC_COMP_COEFF_1_SHIFT 1 /* AEC_COMP_COEFF */
+#define ARIZONA_AEC_COMP_COEFF_1_WIDTH 1 /* AEC_COMP_COEFF */
+#define ARIZONA_AEC_COMP_COEFF_SEL 0x0001 /* AEC_COMP_COEFF_SEL */
+#define ARIZONA_AEC_COMP_COEFF_SEL_MASK 0x0001 /* AEC_COMP_COEFF_SEL */
+#define ARIZONA_AEC_COMP_COEFF_SEL_SHIFT 0 /* AEC_COMP_COEFF_SEL */
+#define ARIZONA_AEC_COMP_COEFF_SEL_WIDTH 1 /* AEC_COMP_COEFF_SEL */
+
+/*
+ * R1280 (0x500) - AIF1 BCLK Ctrl
+ */
+#define ARIZONA_AIF1_BCLK_INV 0x0080 /* AIF1_BCLK_INV */
+#define ARIZONA_AIF1_BCLK_INV_MASK 0x0080 /* AIF1_BCLK_INV */
+#define ARIZONA_AIF1_BCLK_INV_SHIFT 7 /* AIF1_BCLK_INV */
+#define ARIZONA_AIF1_BCLK_INV_WIDTH 1 /* AIF1_BCLK_INV */
+#define ARIZONA_AIF1_BCLK_FRC 0x0040 /* AIF1_BCLK_FRC */
+#define ARIZONA_AIF1_BCLK_FRC_MASK 0x0040 /* AIF1_BCLK_FRC */
+#define ARIZONA_AIF1_BCLK_FRC_SHIFT 6 /* AIF1_BCLK_FRC */
+#define ARIZONA_AIF1_BCLK_FRC_WIDTH 1 /* AIF1_BCLK_FRC */
+#define ARIZONA_AIF1_BCLK_MSTR 0x0020 /* AIF1_BCLK_MSTR */
+#define ARIZONA_AIF1_BCLK_MSTR_MASK 0x0020 /* AIF1_BCLK_MSTR */
+#define ARIZONA_AIF1_BCLK_MSTR_SHIFT 5 /* AIF1_BCLK_MSTR */
+#define ARIZONA_AIF1_BCLK_MSTR_WIDTH 1 /* AIF1_BCLK_MSTR */
+#define ARIZONA_AIF1_BCLK_FREQ_MASK 0x001F /* AIF1_BCLK_FREQ - [4:0] */
+#define ARIZONA_AIF1_BCLK_FREQ_SHIFT 0 /* AIF1_BCLK_FREQ - [4:0] */
+#define ARIZONA_AIF1_BCLK_FREQ_WIDTH 5 /* AIF1_BCLK_FREQ - [4:0] */
+
+/*
+ * R1281 (0x501) - AIF1 Tx Pin Ctrl
+ */
+#define ARIZONA_AIF1TX_DAT_TRI 0x0020 /* AIF1TX_DAT_TRI */
+#define ARIZONA_AIF1TX_DAT_TRI_MASK 0x0020 /* AIF1TX_DAT_TRI */
+#define ARIZONA_AIF1TX_DAT_TRI_SHIFT 5 /* AIF1TX_DAT_TRI */
+#define ARIZONA_AIF1TX_DAT_TRI_WIDTH 1 /* AIF1TX_DAT_TRI */
+#define ARIZONA_AIF1TX_LRCLK_SRC 0x0008 /* AIF1TX_LRCLK_SRC */
+#define ARIZONA_AIF1TX_LRCLK_SRC_MASK 0x0008 /* AIF1TX_LRCLK_SRC */
+#define ARIZONA_AIF1TX_LRCLK_SRC_SHIFT 3 /* AIF1TX_LRCLK_SRC */
+#define ARIZONA_AIF1TX_LRCLK_SRC_WIDTH 1 /* AIF1TX_LRCLK_SRC */
+#define ARIZONA_AIF1TX_LRCLK_INV 0x0004 /* AIF1TX_LRCLK_INV */
+#define ARIZONA_AIF1TX_LRCLK_INV_MASK 0x0004 /* AIF1TX_LRCLK_INV */
+#define ARIZONA_AIF1TX_LRCLK_INV_SHIFT 2 /* AIF1TX_LRCLK_INV */
+#define ARIZONA_AIF1TX_LRCLK_INV_WIDTH 1 /* AIF1TX_LRCLK_INV */
+#define ARIZONA_AIF1TX_LRCLK_FRC 0x0002 /* AIF1TX_LRCLK_FRC */
+#define ARIZONA_AIF1TX_LRCLK_FRC_MASK 0x0002 /* AIF1TX_LRCLK_FRC */
+#define ARIZONA_AIF1TX_LRCLK_FRC_SHIFT 1 /* AIF1TX_LRCLK_FRC */
+#define ARIZONA_AIF1TX_LRCLK_FRC_WIDTH 1 /* AIF1TX_LRCLK_FRC */
+#define ARIZONA_AIF1TX_LRCLK_MSTR 0x0001 /* AIF1TX_LRCLK_MSTR */
+#define ARIZONA_AIF1TX_LRCLK_MSTR_MASK 0x0001 /* AIF1TX_LRCLK_MSTR */
+#define ARIZONA_AIF1TX_LRCLK_MSTR_SHIFT 0 /* AIF1TX_LRCLK_MSTR */
+#define ARIZONA_AIF1TX_LRCLK_MSTR_WIDTH 1 /* AIF1TX_LRCLK_MSTR */
+
+/*
+ * R1282 (0x502) - AIF1 Rx Pin Ctrl
+ */
+#define ARIZONA_AIF1RX_LRCLK_INV 0x0004 /* AIF1RX_LRCLK_INV */
+#define ARIZONA_AIF1RX_LRCLK_INV_MASK 0x0004 /* AIF1RX_LRCLK_INV */
+#define ARIZONA_AIF1RX_LRCLK_INV_SHIFT 2 /* AIF1RX_LRCLK_INV */
+#define ARIZONA_AIF1RX_LRCLK_INV_WIDTH 1 /* AIF1RX_LRCLK_INV */
+#define ARIZONA_AIF1RX_LRCLK_FRC 0x0002 /* AIF1RX_LRCLK_FRC */
+#define ARIZONA_AIF1RX_LRCLK_FRC_MASK 0x0002 /* AIF1RX_LRCLK_FRC */
+#define ARIZONA_AIF1RX_LRCLK_FRC_SHIFT 1 /* AIF1RX_LRCLK_FRC */
+#define ARIZONA_AIF1RX_LRCLK_FRC_WIDTH 1 /* AIF1RX_LRCLK_FRC */
+#define ARIZONA_AIF1RX_LRCLK_MSTR 0x0001 /* AIF1RX_LRCLK_MSTR */
+#define ARIZONA_AIF1RX_LRCLK_MSTR_MASK 0x0001 /* AIF1RX_LRCLK_MSTR */
+#define ARIZONA_AIF1RX_LRCLK_MSTR_SHIFT 0 /* AIF1RX_LRCLK_MSTR */
+#define ARIZONA_AIF1RX_LRCLK_MSTR_WIDTH 1 /* AIF1RX_LRCLK_MSTR */
+
+/*
+ * R1283 (0x503) - AIF1 Rate Ctrl
+ */
+#define ARIZONA_AIF1_RATE_MASK 0x7800 /* AIF1_RATE - [14:11] */
+#define ARIZONA_AIF1_RATE_SHIFT 11 /* AIF1_RATE - [14:11] */
+#define ARIZONA_AIF1_RATE_WIDTH 4 /* AIF1_RATE - [14:11] */
+#define ARIZONA_AIF1_TRI 0x0040 /* AIF1_TRI */
+#define ARIZONA_AIF1_TRI_MASK 0x0040 /* AIF1_TRI */
+#define ARIZONA_AIF1_TRI_SHIFT 6 /* AIF1_TRI */
+#define ARIZONA_AIF1_TRI_WIDTH 1 /* AIF1_TRI */
+
+/*
+ * R1284 (0x504) - AIF1 Format
+ */
+#define ARIZONA_AIF1_FMT_MASK 0x0007 /* AIF1_FMT - [2:0] */
+#define ARIZONA_AIF1_FMT_SHIFT 0 /* AIF1_FMT - [2:0] */
+#define ARIZONA_AIF1_FMT_WIDTH 3 /* AIF1_FMT - [2:0] */
+
+/*
+ * R1285 (0x505) - AIF1 Tx BCLK Rate
+ */
+#define ARIZONA_AIF1TX_BCPF_MASK 0x1FFF /* AIF1TX_BCPF - [12:0] */
+#define ARIZONA_AIF1TX_BCPF_SHIFT 0 /* AIF1TX_BCPF - [12:0] */
+#define ARIZONA_AIF1TX_BCPF_WIDTH 13 /* AIF1TX_BCPF - [12:0] */
+
+/*
+ * R1286 (0x506) - AIF1 Rx BCLK Rate
+ */
+#define ARIZONA_AIF1RX_BCPF_MASK 0x1FFF /* AIF1RX_BCPF - [12:0] */
+#define ARIZONA_AIF1RX_BCPF_SHIFT 0 /* AIF1RX_BCPF - [12:0] */
+#define ARIZONA_AIF1RX_BCPF_WIDTH 13 /* AIF1RX_BCPF - [12:0] */
+
+/*
+ * R1287 (0x507) - AIF1 Frame Ctrl 1
+ */
+#define ARIZONA_AIF1TX_WL_MASK 0x3F00 /* AIF1TX_WL - [13:8] */
+#define ARIZONA_AIF1TX_WL_SHIFT 8 /* AIF1TX_WL - [13:8] */
+#define ARIZONA_AIF1TX_WL_WIDTH 6 /* AIF1TX_WL - [13:8] */
+#define ARIZONA_AIF1TX_SLOT_LEN_MASK 0x00FF /* AIF1TX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF1TX_SLOT_LEN_SHIFT 0 /* AIF1TX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF1TX_SLOT_LEN_WIDTH 8 /* AIF1TX_SLOT_LEN - [7:0] */
+
+/*
+ * R1288 (0x508) - AIF1 Frame Ctrl 2
+ */
+#define ARIZONA_AIF1RX_WL_MASK 0x3F00 /* AIF1RX_WL - [13:8] */
+#define ARIZONA_AIF1RX_WL_SHIFT 8 /* AIF1RX_WL - [13:8] */
+#define ARIZONA_AIF1RX_WL_WIDTH 6 /* AIF1RX_WL - [13:8] */
+#define ARIZONA_AIF1RX_SLOT_LEN_MASK 0x00FF /* AIF1RX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF1RX_SLOT_LEN_SHIFT 0 /* AIF1RX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF1RX_SLOT_LEN_WIDTH 8 /* AIF1RX_SLOT_LEN - [7:0] */
+
+/*
+ * R1289 (0x509) - AIF1 Frame Ctrl 3
+ */
+#define ARIZONA_AIF1TX1_SLOT_MASK 0x003F /* AIF1TX1_SLOT - [5:0] */
+#define ARIZONA_AIF1TX1_SLOT_SHIFT 0 /* AIF1TX1_SLOT - [5:0] */
+#define ARIZONA_AIF1TX1_SLOT_WIDTH 6 /* AIF1TX1_SLOT - [5:0] */
+
+/*
+ * R1290 (0x50A) - AIF1 Frame Ctrl 4
+ */
+#define ARIZONA_AIF1TX2_SLOT_MASK 0x003F /* AIF1TX2_SLOT - [5:0] */
+#define ARIZONA_AIF1TX2_SLOT_SHIFT 0 /* AIF1TX2_SLOT - [5:0] */
+#define ARIZONA_AIF1TX2_SLOT_WIDTH 6 /* AIF1TX2_SLOT - [5:0] */
+
+/*
+ * R1291 (0x50B) - AIF1 Frame Ctrl 5
+ */
+#define ARIZONA_AIF1TX3_SLOT_MASK 0x003F /* AIF1TX3_SLOT - [5:0] */
+#define ARIZONA_AIF1TX3_SLOT_SHIFT 0 /* AIF1TX3_SLOT - [5:0] */
+#define ARIZONA_AIF1TX3_SLOT_WIDTH 6 /* AIF1TX3_SLOT - [5:0] */
+
+/*
+ * R1292 (0x50C) - AIF1 Frame Ctrl 6
+ */
+#define ARIZONA_AIF1TX4_SLOT_MASK 0x003F /* AIF1TX4_SLOT - [5:0] */
+#define ARIZONA_AIF1TX4_SLOT_SHIFT 0 /* AIF1TX4_SLOT - [5:0] */
+#define ARIZONA_AIF1TX4_SLOT_WIDTH 6 /* AIF1TX4_SLOT - [5:0] */
+
+/*
+ * R1293 (0x50D) - AIF1 Frame Ctrl 7
+ */
+#define ARIZONA_AIF1TX5_SLOT_MASK 0x003F /* AIF1TX5_SLOT - [5:0] */
+#define ARIZONA_AIF1TX5_SLOT_SHIFT 0 /* AIF1TX5_SLOT - [5:0] */
+#define ARIZONA_AIF1TX5_SLOT_WIDTH 6 /* AIF1TX5_SLOT - [5:0] */
+
+/*
+ * R1294 (0x50E) - AIF1 Frame Ctrl 8
+ */
+#define ARIZONA_AIF1TX6_SLOT_MASK 0x003F /* AIF1TX6_SLOT - [5:0] */
+#define ARIZONA_AIF1TX6_SLOT_SHIFT 0 /* AIF1TX6_SLOT - [5:0] */
+#define ARIZONA_AIF1TX6_SLOT_WIDTH 6 /* AIF1TX6_SLOT - [5:0] */
+
+/*
+ * R1295 (0x50F) - AIF1 Frame Ctrl 9
+ */
+#define ARIZONA_AIF1TX7_SLOT_MASK 0x003F /* AIF1TX7_SLOT - [5:0] */
+#define ARIZONA_AIF1TX7_SLOT_SHIFT 0 /* AIF1TX7_SLOT - [5:0] */
+#define ARIZONA_AIF1TX7_SLOT_WIDTH 6 /* AIF1TX7_SLOT - [5:0] */
+
+/*
+ * R1296 (0x510) - AIF1 Frame Ctrl 10
+ */
+#define ARIZONA_AIF1TX8_SLOT_MASK 0x003F /* AIF1TX8_SLOT - [5:0] */
+#define ARIZONA_AIF1TX8_SLOT_SHIFT 0 /* AIF1TX8_SLOT - [5:0] */
+#define ARIZONA_AIF1TX8_SLOT_WIDTH 6 /* AIF1TX8_SLOT - [5:0] */
+
+/*
+ * R1297 (0x511) - AIF1 Frame Ctrl 11
+ */
+#define ARIZONA_AIF1RX1_SLOT_MASK 0x003F /* AIF1RX1_SLOT - [5:0] */
+#define ARIZONA_AIF1RX1_SLOT_SHIFT 0 /* AIF1RX1_SLOT - [5:0] */
+#define ARIZONA_AIF1RX1_SLOT_WIDTH 6 /* AIF1RX1_SLOT - [5:0] */
+
+/*
+ * R1298 (0x512) - AIF1 Frame Ctrl 12
+ */
+#define ARIZONA_AIF1RX2_SLOT_MASK 0x003F /* AIF1RX2_SLOT - [5:0] */
+#define ARIZONA_AIF1RX2_SLOT_SHIFT 0 /* AIF1RX2_SLOT - [5:0] */
+#define ARIZONA_AIF1RX2_SLOT_WIDTH 6 /* AIF1RX2_SLOT - [5:0] */
+
+/*
+ * R1299 (0x513) - AIF1 Frame Ctrl 13
+ */
+#define ARIZONA_AIF1RX3_SLOT_MASK 0x003F /* AIF1RX3_SLOT - [5:0] */
+#define ARIZONA_AIF1RX3_SLOT_SHIFT 0 /* AIF1RX3_SLOT - [5:0] */
+#define ARIZONA_AIF1RX3_SLOT_WIDTH 6 /* AIF1RX3_SLOT - [5:0] */
+
+/*
+ * R1300 (0x514) - AIF1 Frame Ctrl 14
+ */
+#define ARIZONA_AIF1RX4_SLOT_MASK 0x003F /* AIF1RX4_SLOT - [5:0] */
+#define ARIZONA_AIF1RX4_SLOT_SHIFT 0 /* AIF1RX4_SLOT - [5:0] */
+#define ARIZONA_AIF1RX4_SLOT_WIDTH 6 /* AIF1RX4_SLOT - [5:0] */
+
+/*
+ * R1301 (0x515) - AIF1 Frame Ctrl 15
+ */
+#define ARIZONA_AIF1RX5_SLOT_MASK 0x003F /* AIF1RX5_SLOT - [5:0] */
+#define ARIZONA_AIF1RX5_SLOT_SHIFT 0 /* AIF1RX5_SLOT - [5:0] */
+#define ARIZONA_AIF1RX5_SLOT_WIDTH 6 /* AIF1RX5_SLOT - [5:0] */
+
+/*
+ * R1302 (0x516) - AIF1 Frame Ctrl 16
+ */
+#define ARIZONA_AIF1RX6_SLOT_MASK 0x003F /* AIF1RX6_SLOT - [5:0] */
+#define ARIZONA_AIF1RX6_SLOT_SHIFT 0 /* AIF1RX6_SLOT - [5:0] */
+#define ARIZONA_AIF1RX6_SLOT_WIDTH 6 /* AIF1RX6_SLOT - [5:0] */
+
+/*
+ * R1303 (0x517) - AIF1 Frame Ctrl 17
+ */
+#define ARIZONA_AIF1RX7_SLOT_MASK 0x003F /* AIF1RX7_SLOT - [5:0] */
+#define ARIZONA_AIF1RX7_SLOT_SHIFT 0 /* AIF1RX7_SLOT - [5:0] */
+#define ARIZONA_AIF1RX7_SLOT_WIDTH 6 /* AIF1RX7_SLOT - [5:0] */
+
+/*
+ * R1304 (0x518) - AIF1 Frame Ctrl 18
+ */
+#define ARIZONA_AIF1RX8_SLOT_MASK 0x003F /* AIF1RX8_SLOT - [5:0] */
+#define ARIZONA_AIF1RX8_SLOT_SHIFT 0 /* AIF1RX8_SLOT - [5:0] */
+#define ARIZONA_AIF1RX8_SLOT_WIDTH 6 /* AIF1RX8_SLOT - [5:0] */
+
+/*
+ * R1305 (0x519) - AIF1 Tx Enables
+ */
+#define ARIZONA_AIF1TX8_ENA 0x0080 /* AIF1TX8_ENA */
+#define ARIZONA_AIF1TX8_ENA_MASK 0x0080 /* AIF1TX8_ENA */
+#define ARIZONA_AIF1TX8_ENA_SHIFT 7 /* AIF1TX8_ENA */
+#define ARIZONA_AIF1TX8_ENA_WIDTH 1 /* AIF1TX8_ENA */
+#define ARIZONA_AIF1TX7_ENA 0x0040 /* AIF1TX7_ENA */
+#define ARIZONA_AIF1TX7_ENA_MASK 0x0040 /* AIF1TX7_ENA */
+#define ARIZONA_AIF1TX7_ENA_SHIFT 6 /* AIF1TX7_ENA */
+#define ARIZONA_AIF1TX7_ENA_WIDTH 1 /* AIF1TX7_ENA */
+#define ARIZONA_AIF1TX6_ENA 0x0020 /* AIF1TX6_ENA */
+#define ARIZONA_AIF1TX6_ENA_MASK 0x0020 /* AIF1TX6_ENA */
+#define ARIZONA_AIF1TX6_ENA_SHIFT 5 /* AIF1TX6_ENA */
+#define ARIZONA_AIF1TX6_ENA_WIDTH 1 /* AIF1TX6_ENA */
+#define ARIZONA_AIF1TX5_ENA 0x0010 /* AIF1TX5_ENA */
+#define ARIZONA_AIF1TX5_ENA_MASK 0x0010 /* AIF1TX5_ENA */
+#define ARIZONA_AIF1TX5_ENA_SHIFT 4 /* AIF1TX5_ENA */
+#define ARIZONA_AIF1TX5_ENA_WIDTH 1 /* AIF1TX5_ENA */
+#define ARIZONA_AIF1TX4_ENA 0x0008 /* AIF1TX4_ENA */
+#define ARIZONA_AIF1TX4_ENA_MASK 0x0008 /* AIF1TX4_ENA */
+#define ARIZONA_AIF1TX4_ENA_SHIFT 3 /* AIF1TX4_ENA */
+#define ARIZONA_AIF1TX4_ENA_WIDTH 1 /* AIF1TX4_ENA */
+#define ARIZONA_AIF1TX3_ENA 0x0004 /* AIF1TX3_ENA */
+#define ARIZONA_AIF1TX3_ENA_MASK 0x0004 /* AIF1TX3_ENA */
+#define ARIZONA_AIF1TX3_ENA_SHIFT 2 /* AIF1TX3_ENA */
+#define ARIZONA_AIF1TX3_ENA_WIDTH 1 /* AIF1TX3_ENA */
+#define ARIZONA_AIF1TX2_ENA 0x0002 /* AIF1TX2_ENA */
+#define ARIZONA_AIF1TX2_ENA_MASK 0x0002 /* AIF1TX2_ENA */
+#define ARIZONA_AIF1TX2_ENA_SHIFT 1 /* AIF1TX2_ENA */
+#define ARIZONA_AIF1TX2_ENA_WIDTH 1 /* AIF1TX2_ENA */
+#define ARIZONA_AIF1TX1_ENA 0x0001 /* AIF1TX1_ENA */
+#define ARIZONA_AIF1TX1_ENA_MASK 0x0001 /* AIF1TX1_ENA */
+#define ARIZONA_AIF1TX1_ENA_SHIFT 0 /* AIF1TX1_ENA */
+#define ARIZONA_AIF1TX1_ENA_WIDTH 1 /* AIF1TX1_ENA */
+
+/*
+ * R1306 (0x51A) - AIF1 Rx Enables
+ */
+#define ARIZONA_AIF1RX8_ENA 0x0080 /* AIF1RX8_ENA */
+#define ARIZONA_AIF1RX8_ENA_MASK 0x0080 /* AIF1RX8_ENA */
+#define ARIZONA_AIF1RX8_ENA_SHIFT 7 /* AIF1RX8_ENA */
+#define ARIZONA_AIF1RX8_ENA_WIDTH 1 /* AIF1RX8_ENA */
+#define ARIZONA_AIF1RX7_ENA 0x0040 /* AIF1RX7_ENA */
+#define ARIZONA_AIF1RX7_ENA_MASK 0x0040 /* AIF1RX7_ENA */
+#define ARIZONA_AIF1RX7_ENA_SHIFT 6 /* AIF1RX7_ENA */
+#define ARIZONA_AIF1RX7_ENA_WIDTH 1 /* AIF1RX7_ENA */
+#define ARIZONA_AIF1RX6_ENA 0x0020 /* AIF1RX6_ENA */
+#define ARIZONA_AIF1RX6_ENA_MASK 0x0020 /* AIF1RX6_ENA */
+#define ARIZONA_AIF1RX6_ENA_SHIFT 5 /* AIF1RX6_ENA */
+#define ARIZONA_AIF1RX6_ENA_WIDTH 1 /* AIF1RX6_ENA */
+#define ARIZONA_AIF1RX5_ENA 0x0010 /* AIF1RX5_ENA */
+#define ARIZONA_AIF1RX5_ENA_MASK 0x0010 /* AIF1RX5_ENA */
+#define ARIZONA_AIF1RX5_ENA_SHIFT 4 /* AIF1RX5_ENA */
+#define ARIZONA_AIF1RX5_ENA_WIDTH 1 /* AIF1RX5_ENA */
+#define ARIZONA_AIF1RX4_ENA 0x0008 /* AIF1RX4_ENA */
+#define ARIZONA_AIF1RX4_ENA_MASK 0x0008 /* AIF1RX4_ENA */
+#define ARIZONA_AIF1RX4_ENA_SHIFT 3 /* AIF1RX4_ENA */
+#define ARIZONA_AIF1RX4_ENA_WIDTH 1 /* AIF1RX4_ENA */
+#define ARIZONA_AIF1RX3_ENA 0x0004 /* AIF1RX3_ENA */
+#define ARIZONA_AIF1RX3_ENA_MASK 0x0004 /* AIF1RX3_ENA */
+#define ARIZONA_AIF1RX3_ENA_SHIFT 2 /* AIF1RX3_ENA */
+#define ARIZONA_AIF1RX3_ENA_WIDTH 1 /* AIF1RX3_ENA */
+#define ARIZONA_AIF1RX2_ENA 0x0002 /* AIF1RX2_ENA */
+#define ARIZONA_AIF1RX2_ENA_MASK 0x0002 /* AIF1RX2_ENA */
+#define ARIZONA_AIF1RX2_ENA_SHIFT 1 /* AIF1RX2_ENA */
+#define ARIZONA_AIF1RX2_ENA_WIDTH 1 /* AIF1RX2_ENA */
+#define ARIZONA_AIF1RX1_ENA 0x0001 /* AIF1RX1_ENA */
+#define ARIZONA_AIF1RX1_ENA_MASK 0x0001 /* AIF1RX1_ENA */
+#define ARIZONA_AIF1RX1_ENA_SHIFT 0 /* AIF1RX1_ENA */
+#define ARIZONA_AIF1RX1_ENA_WIDTH 1 /* AIF1RX1_ENA */
+
+/*
+ * R1307 (0x51B) - AIF1 Force Write
+ */
+#define ARIZONA_AIF1_FRC_WR 0x0001 /* AIF1_FRC_WR */
+#define ARIZONA_AIF1_FRC_WR_MASK 0x0001 /* AIF1_FRC_WR */
+#define ARIZONA_AIF1_FRC_WR_SHIFT 0 /* AIF1_FRC_WR */
+#define ARIZONA_AIF1_FRC_WR_WIDTH 1 /* AIF1_FRC_WR */
+
+/*
+ * R1344 (0x540) - AIF2 BCLK Ctrl
+ */
+#define ARIZONA_AIF2_BCLK_INV 0x0080 /* AIF2_BCLK_INV */
+#define ARIZONA_AIF2_BCLK_INV_MASK 0x0080 /* AIF2_BCLK_INV */
+#define ARIZONA_AIF2_BCLK_INV_SHIFT 7 /* AIF2_BCLK_INV */
+#define ARIZONA_AIF2_BCLK_INV_WIDTH 1 /* AIF2_BCLK_INV */
+#define ARIZONA_AIF2_BCLK_FRC 0x0040 /* AIF2_BCLK_FRC */
+#define ARIZONA_AIF2_BCLK_FRC_MASK 0x0040 /* AIF2_BCLK_FRC */
+#define ARIZONA_AIF2_BCLK_FRC_SHIFT 6 /* AIF2_BCLK_FRC */
+#define ARIZONA_AIF2_BCLK_FRC_WIDTH 1 /* AIF2_BCLK_FRC */
+#define ARIZONA_AIF2_BCLK_MSTR 0x0020 /* AIF2_BCLK_MSTR */
+#define ARIZONA_AIF2_BCLK_MSTR_MASK 0x0020 /* AIF2_BCLK_MSTR */
+#define ARIZONA_AIF2_BCLK_MSTR_SHIFT 5 /* AIF2_BCLK_MSTR */
+#define ARIZONA_AIF2_BCLK_MSTR_WIDTH 1 /* AIF2_BCLK_MSTR */
+#define ARIZONA_AIF2_BCLK_FREQ_MASK 0x001F /* AIF2_BCLK_FREQ - [4:0] */
+#define ARIZONA_AIF2_BCLK_FREQ_SHIFT 0 /* AIF2_BCLK_FREQ - [4:0] */
+#define ARIZONA_AIF2_BCLK_FREQ_WIDTH 5 /* AIF2_BCLK_FREQ - [4:0] */
+
+/*
+ * R1345 (0x541) - AIF2 Tx Pin Ctrl
+ */
+#define ARIZONA_AIF2TX_DAT_TRI 0x0020 /* AIF2TX_DAT_TRI */
+#define ARIZONA_AIF2TX_DAT_TRI_MASK 0x0020 /* AIF2TX_DAT_TRI */
+#define ARIZONA_AIF2TX_DAT_TRI_SHIFT 5 /* AIF2TX_DAT_TRI */
+#define ARIZONA_AIF2TX_DAT_TRI_WIDTH 1 /* AIF2TX_DAT_TRI */
+#define ARIZONA_AIF2TX_LRCLK_SRC 0x0008 /* AIF2TX_LRCLK_SRC */
+#define ARIZONA_AIF2TX_LRCLK_SRC_MASK 0x0008 /* AIF2TX_LRCLK_SRC */
+#define ARIZONA_AIF2TX_LRCLK_SRC_SHIFT 3 /* AIF2TX_LRCLK_SRC */
+#define ARIZONA_AIF2TX_LRCLK_SRC_WIDTH 1 /* AIF2TX_LRCLK_SRC */
+#define ARIZONA_AIF2TX_LRCLK_INV 0x0004 /* AIF2TX_LRCLK_INV */
+#define ARIZONA_AIF2TX_LRCLK_INV_MASK 0x0004 /* AIF2TX_LRCLK_INV */
+#define ARIZONA_AIF2TX_LRCLK_INV_SHIFT 2 /* AIF2TX_LRCLK_INV */
+#define ARIZONA_AIF2TX_LRCLK_INV_WIDTH 1 /* AIF2TX_LRCLK_INV */
+#define ARIZONA_AIF2TX_LRCLK_FRC 0x0002 /* AIF2TX_LRCLK_FRC */
+#define ARIZONA_AIF2TX_LRCLK_FRC_MASK 0x0002 /* AIF2TX_LRCLK_FRC */
+#define ARIZONA_AIF2TX_LRCLK_FRC_SHIFT 1 /* AIF2TX_LRCLK_FRC */
+#define ARIZONA_AIF2TX_LRCLK_FRC_WIDTH 1 /* AIF2TX_LRCLK_FRC */
+#define ARIZONA_AIF2TX_LRCLK_MSTR 0x0001 /* AIF2TX_LRCLK_MSTR */
+#define ARIZONA_AIF2TX_LRCLK_MSTR_MASK 0x0001 /* AIF2TX_LRCLK_MSTR */
+#define ARIZONA_AIF2TX_LRCLK_MSTR_SHIFT 0 /* AIF2TX_LRCLK_MSTR */
+#define ARIZONA_AIF2TX_LRCLK_MSTR_WIDTH 1 /* AIF2TX_LRCLK_MSTR */
+
+/*
+ * R1346 (0x542) - AIF2 Rx Pin Ctrl
+ */
+#define ARIZONA_AIF2RX_LRCLK_INV 0x0004 /* AIF2RX_LRCLK_INV */
+#define ARIZONA_AIF2RX_LRCLK_INV_MASK 0x0004 /* AIF2RX_LRCLK_INV */
+#define ARIZONA_AIF2RX_LRCLK_INV_SHIFT 2 /* AIF2RX_LRCLK_INV */
+#define ARIZONA_AIF2RX_LRCLK_INV_WIDTH 1 /* AIF2RX_LRCLK_INV */
+#define ARIZONA_AIF2RX_LRCLK_FRC 0x0002 /* AIF2RX_LRCLK_FRC */
+#define ARIZONA_AIF2RX_LRCLK_FRC_MASK 0x0002 /* AIF2RX_LRCLK_FRC */
+#define ARIZONA_AIF2RX_LRCLK_FRC_SHIFT 1 /* AIF2RX_LRCLK_FRC */
+#define ARIZONA_AIF2RX_LRCLK_FRC_WIDTH 1 /* AIF2RX_LRCLK_FRC */
+#define ARIZONA_AIF2RX_LRCLK_MSTR 0x0001 /* AIF2RX_LRCLK_MSTR */
+#define ARIZONA_AIF2RX_LRCLK_MSTR_MASK 0x0001 /* AIF2RX_LRCLK_MSTR */
+#define ARIZONA_AIF2RX_LRCLK_MSTR_SHIFT 0 /* AIF2RX_LRCLK_MSTR */
+#define ARIZONA_AIF2RX_LRCLK_MSTR_WIDTH 1 /* AIF2RX_LRCLK_MSTR */
+
+/*
+ * R1347 (0x543) - AIF2 Rate Ctrl
+ */
+#define ARIZONA_AIF2_RATE_MASK 0x7800 /* AIF2_RATE - [14:11] */
+#define ARIZONA_AIF2_RATE_SHIFT 11 /* AIF2_RATE - [14:11] */
+#define ARIZONA_AIF2_RATE_WIDTH 4 /* AIF2_RATE - [14:11] */
+#define ARIZONA_AIF2_TRI 0x0040 /* AIF2_TRI */
+#define ARIZONA_AIF2_TRI_MASK 0x0040 /* AIF2_TRI */
+#define ARIZONA_AIF2_TRI_SHIFT 6 /* AIF2_TRI */
+#define ARIZONA_AIF2_TRI_WIDTH 1 /* AIF2_TRI */
+
+/*
+ * R1348 (0x544) - AIF2 Format
+ */
+#define ARIZONA_AIF2_FMT_MASK 0x0007 /* AIF2_FMT - [2:0] */
+#define ARIZONA_AIF2_FMT_SHIFT 0 /* AIF2_FMT - [2:0] */
+#define ARIZONA_AIF2_FMT_WIDTH 3 /* AIF2_FMT - [2:0] */
+
+/*
+ * R1349 (0x545) - AIF2 Tx BCLK Rate
+ */
+#define ARIZONA_AIF2TX_BCPF_MASK 0x1FFF /* AIF2TX_BCPF - [12:0] */
+#define ARIZONA_AIF2TX_BCPF_SHIFT 0 /* AIF2TX_BCPF - [12:0] */
+#define ARIZONA_AIF2TX_BCPF_WIDTH 13 /* AIF2TX_BCPF - [12:0] */
+
+/*
+ * R1350 (0x546) - AIF2 Rx BCLK Rate
+ */
+#define ARIZONA_AIF2RX_BCPF_MASK 0x1FFF /* AIF2RX_BCPF - [12:0] */
+#define ARIZONA_AIF2RX_BCPF_SHIFT 0 /* AIF2RX_BCPF - [12:0] */
+#define ARIZONA_AIF2RX_BCPF_WIDTH 13 /* AIF2RX_BCPF - [12:0] */
+
+/*
+ * R1351 (0x547) - AIF2 Frame Ctrl 1
+ */
+#define ARIZONA_AIF2TX_WL_MASK 0x3F00 /* AIF2TX_WL - [13:8] */
+#define ARIZONA_AIF2TX_WL_SHIFT 8 /* AIF2TX_WL - [13:8] */
+#define ARIZONA_AIF2TX_WL_WIDTH 6 /* AIF2TX_WL - [13:8] */
+#define ARIZONA_AIF2TX_SLOT_LEN_MASK 0x00FF /* AIF2TX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF2TX_SLOT_LEN_SHIFT 0 /* AIF2TX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF2TX_SLOT_LEN_WIDTH 8 /* AIF2TX_SLOT_LEN - [7:0] */
+
+/*
+ * R1352 (0x548) - AIF2 Frame Ctrl 2
+ */
+#define ARIZONA_AIF2RX_WL_MASK 0x3F00 /* AIF2RX_WL - [13:8] */
+#define ARIZONA_AIF2RX_WL_SHIFT 8 /* AIF2RX_WL - [13:8] */
+#define ARIZONA_AIF2RX_WL_WIDTH 6 /* AIF2RX_WL - [13:8] */
+#define ARIZONA_AIF2RX_SLOT_LEN_MASK 0x00FF /* AIF2RX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF2RX_SLOT_LEN_SHIFT 0 /* AIF2RX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF2RX_SLOT_LEN_WIDTH 8 /* AIF2RX_SLOT_LEN - [7:0] */
+
+/*
+ * R1353 (0x549) - AIF2 Frame Ctrl 3
+ */
+#define ARIZONA_AIF2TX1_SLOT_MASK 0x003F /* AIF2TX1_SLOT - [5:0] */
+#define ARIZONA_AIF2TX1_SLOT_SHIFT 0 /* AIF2TX1_SLOT - [5:0] */
+#define ARIZONA_AIF2TX1_SLOT_WIDTH 6 /* AIF2TX1_SLOT - [5:0] */
+
+/*
+ * R1354 (0x54A) - AIF2 Frame Ctrl 4
+ */
+#define ARIZONA_AIF2TX2_SLOT_MASK 0x003F /* AIF2TX2_SLOT - [5:0] */
+#define ARIZONA_AIF2TX2_SLOT_SHIFT 0 /* AIF2TX2_SLOT - [5:0] */
+#define ARIZONA_AIF2TX2_SLOT_WIDTH 6 /* AIF2TX2_SLOT - [5:0] */
+
+/*
+ * R1355 (0x54B) - AIF2 Frame Ctrl 5
+ */
+#define ARIZONA_AIF2TX3_SLOT_MASK 0x003F /* AIF2TX3_SLOT - [5:0] */
+#define ARIZONA_AIF2TX3_SLOT_SHIFT 0 /* AIF2TX3_SLOT - [5:0] */
+#define ARIZONA_AIF2TX3_SLOT_WIDTH 6 /* AIF2TX3_SLOT - [5:0] */
+
+/*
+ * R1356 (0x54C) - AIF2 Frame Ctrl 6
+ */
+#define ARIZONA_AIF2TX4_SLOT_MASK 0x003F /* AIF2TX4_SLOT - [5:0] */
+#define ARIZONA_AIF2TX4_SLOT_SHIFT 0 /* AIF2TX4_SLOT - [5:0] */
+#define ARIZONA_AIF2TX4_SLOT_WIDTH 6 /* AIF2TX4_SLOT - [5:0] */
+
+
+/*
+ * R1357 (0x54D) - AIF2 Frame Ctrl 7
+ */
+#define ARIZONA_AIF2TX5_SLOT_MASK 0x003F /* AIF2TX5_SLOT - [5:0] */
+#define ARIZONA_AIF2TX5_SLOT_SHIFT 0 /* AIF2TX5_SLOT - [5:0] */
+#define ARIZONA_AIF2TX5_SLOT_WIDTH 6 /* AIF2TX5_SLOT - [5:0] */
+
+/*
+ * R1358 (0x54E) - AIF2 Frame Ctrl 8
+ */
+#define ARIZONA_AIF2TX6_SLOT_MASK 0x003F /* AIF2TX6_SLOT - [5:0] */
+#define ARIZONA_AIF2TX6_SLOT_SHIFT 0 /* AIF2TX6_SLOT - [5:0] */
+#define ARIZONA_AIF2TX6_SLOT_WIDTH 6 /* AIF2TX6_SLOT - [5:0] */
+
+/*
+ * R1361 (0x551) - AIF2 Frame Ctrl 11
+ */
+#define ARIZONA_AIF2RX1_SLOT_MASK 0x003F /* AIF2RX1_SLOT - [5:0] */
+#define ARIZONA_AIF2RX1_SLOT_SHIFT 0 /* AIF2RX1_SLOT - [5:0] */
+#define ARIZONA_AIF2RX1_SLOT_WIDTH 6 /* AIF2RX1_SLOT - [5:0] */
+
+/*
+ * R1362 (0x552) - AIF2 Frame Ctrl 12
+ */
+#define ARIZONA_AIF2RX2_SLOT_MASK 0x003F /* AIF2RX2_SLOT - [5:0] */
+#define ARIZONA_AIF2RX2_SLOT_SHIFT 0 /* AIF2RX2_SLOT - [5:0] */
+#define ARIZONA_AIF2RX2_SLOT_WIDTH 6 /* AIF2RX2_SLOT - [5:0] */
+
+/*
+ * R1363 (0x553) - AIF2 Frame Ctrl 13
+ */
+#define ARIZONA_AIF2RX3_SLOT_MASK 0x003F /* AIF2RX3_SLOT - [5:0] */
+#define ARIZONA_AIF2RX3_SLOT_SHIFT 0 /* AIF2RX3_SLOT - [5:0] */
+#define ARIZONA_AIF2RX3_SLOT_WIDTH 6 /* AIF2RX3_SLOT - [5:0] */
+
+/*
+ * R1364 (0x554) - AIF2 Frame Ctrl 14
+ */
+#define ARIZONA_AIF2RX4_SLOT_MASK 0x003F /* AIF2RX4_SLOT - [5:0] */
+#define ARIZONA_AIF2RX4_SLOT_SHIFT 0 /* AIF2RX4_SLOT - [5:0] */
+#define ARIZONA_AIF2RX4_SLOT_WIDTH 6 /* AIF2RX4_SLOT - [5:0] */
+
+/*
+ * R1365 (0x555) - AIF2 Frame Ctrl 15
+ */
+#define ARIZONA_AIF2RX5_SLOT_MASK 0x003F /* AIF2RX5_SLOT - [5:0] */
+#define ARIZONA_AIF2RX5_SLOT_SHIFT 0 /* AIF2RX5_SLOT - [5:0] */
+#define ARIZONA_AIF2RX5_SLOT_WIDTH 6 /* AIF2RX5_SLOT - [5:0] */
+
+/*
+ * R1366 (0x556) - AIF2 Frame Ctrl 16
+ */
+#define ARIZONA_AIF2RX6_SLOT_MASK 0x003F /* AIF2RX6_SLOT - [5:0] */
+#define ARIZONA_AIF2RX6_SLOT_SHIFT 0 /* AIF2RX6_SLOT - [5:0] */
+#define ARIZONA_AIF2RX6_SLOT_WIDTH 6 /* AIF2RX6_SLOT - [5:0] */
+
+/*
+ * R1369 (0x559) - AIF2 Tx Enables
+ */
+#define ARIZONA_AIF2TX6_ENA 0x0020 /* AIF2TX6_ENA */
+#define ARIZONA_AIF2TX6_ENA_MASK 0x0020 /* AIF2TX6_ENA */
+#define ARIZONA_AIF2TX6_ENA_SHIFT 5 /* AIF2TX6_ENA */
+#define ARIZONA_AIF2TX6_ENA_WIDTH 1 /* AIF2TX6_ENA */
+#define ARIZONA_AIF2TX5_ENA 0x0010 /* AIF2TX5_ENA */
+#define ARIZONA_AIF2TX5_ENA_MASK 0x0010 /* AIF2TX5_ENA */
+#define ARIZONA_AIF2TX5_ENA_SHIFT 4 /* AIF2TX5_ENA */
+#define ARIZONA_AIF2TX5_ENA_WIDTH 1 /* AIF2TX5_ENA */
+#define ARIZONA_AIF2TX4_ENA 0x0008 /* AIF2TX4_ENA */
+#define ARIZONA_AIF2TX4_ENA_MASK 0x0008 /* AIF2TX4_ENA */
+#define ARIZONA_AIF2TX4_ENA_SHIFT 3 /* AIF2TX4_ENA */
+#define ARIZONA_AIF2TX4_ENA_WIDTH 1 /* AIF2TX4_ENA */
+#define ARIZONA_AIF2TX3_ENA 0x0004 /* AIF2TX3_ENA */
+#define ARIZONA_AIF2TX3_ENA_MASK 0x0004 /* AIF2TX3_ENA */
+#define ARIZONA_AIF2TX3_ENA_SHIFT 2 /* AIF2TX3_ENA */
+#define ARIZONA_AIF2TX3_ENA_WIDTH 1 /* AIF2TX3_ENA */
+#define ARIZONA_AIF2TX2_ENA 0x0002 /* AIF2TX2_ENA */
+#define ARIZONA_AIF2TX2_ENA_MASK 0x0002 /* AIF2TX2_ENA */
+#define ARIZONA_AIF2TX2_ENA_SHIFT 1 /* AIF2TX2_ENA */
+#define ARIZONA_AIF2TX2_ENA_WIDTH 1 /* AIF2TX2_ENA */
+#define ARIZONA_AIF2TX1_ENA 0x0001 /* AIF2TX1_ENA */
+#define ARIZONA_AIF2TX1_ENA_MASK 0x0001 /* AIF2TX1_ENA */
+#define ARIZONA_AIF2TX1_ENA_SHIFT 0 /* AIF2TX1_ENA */
+#define ARIZONA_AIF2TX1_ENA_WIDTH 1 /* AIF2TX1_ENA */
+
+/*
+ * R1370 (0x55A) - AIF2 Rx Enables
+ */
+#define ARIZONA_AIF2RX6_ENA 0x0020 /* AIF2RX6_ENA */
+#define ARIZONA_AIF2RX6_ENA_MASK 0x0020 /* AIF2RX6_ENA */
+#define ARIZONA_AIF2RX6_ENA_SHIFT 5 /* AIF2RX6_ENA */
+#define ARIZONA_AIF2RX6_ENA_WIDTH 1 /* AIF2RX6_ENA */
+#define ARIZONA_AIF2RX5_ENA 0x0010 /* AIF2RX5_ENA */
+#define ARIZONA_AIF2RX5_ENA_MASK 0x0010 /* AIF2RX5_ENA */
+#define ARIZONA_AIF2RX5_ENA_SHIFT 4 /* AIF2RX5_ENA */
+#define ARIZONA_AIF2RX5_ENA_WIDTH 1 /* AIF2RX5_ENA */
+#define ARIZONA_AIF2RX4_ENA 0x0008 /* AIF2RX4_ENA */
+#define ARIZONA_AIF2RX4_ENA_MASK 0x0008 /* AIF2RX4_ENA */
+#define ARIZONA_AIF2RX4_ENA_SHIFT 3 /* AIF2RX4_ENA */
+#define ARIZONA_AIF2RX4_ENA_WIDTH 1 /* AIF2RX4_ENA */
+#define ARIZONA_AIF2RX3_ENA 0x0004 /* AIF2RX3_ENA */
+#define ARIZONA_AIF2RX3_ENA_MASK 0x0004 /* AIF2RX3_ENA */
+#define ARIZONA_AIF2RX3_ENA_SHIFT 2 /* AIF2RX3_ENA */
+#define ARIZONA_AIF2RX3_ENA_WIDTH 1 /* AIF2RX3_ENA */
+#define ARIZONA_AIF2RX2_ENA 0x0002 /* AIF2RX2_ENA */
+#define ARIZONA_AIF2RX2_ENA_MASK 0x0002 /* AIF2RX2_ENA */
+#define ARIZONA_AIF2RX2_ENA_SHIFT 1 /* AIF2RX2_ENA */
+#define ARIZONA_AIF2RX2_ENA_WIDTH 1 /* AIF2RX2_ENA */
+#define ARIZONA_AIF2RX1_ENA 0x0001 /* AIF2RX1_ENA */
+#define ARIZONA_AIF2RX1_ENA_MASK 0x0001 /* AIF2RX1_ENA */
+#define ARIZONA_AIF2RX1_ENA_SHIFT 0 /* AIF2RX1_ENA */
+#define ARIZONA_AIF2RX1_ENA_WIDTH 1 /* AIF2RX1_ENA */
+
+/*
+ * R1371 (0x55B) - AIF2 Force Write
+ */
+#define ARIZONA_AIF2_FRC_WR 0x0001 /* AIF2_FRC_WR */
+#define ARIZONA_AIF2_FRC_WR_MASK 0x0001 /* AIF2_FRC_WR */
+#define ARIZONA_AIF2_FRC_WR_SHIFT 0 /* AIF2_FRC_WR */
+#define ARIZONA_AIF2_FRC_WR_WIDTH 1 /* AIF2_FRC_WR */
+
+/*
+ * R1408 (0x580) - AIF3 BCLK Ctrl
+ */
+#define ARIZONA_AIF3_BCLK_INV 0x0080 /* AIF3_BCLK_INV */
+#define ARIZONA_AIF3_BCLK_INV_MASK 0x0080 /* AIF3_BCLK_INV */
+#define ARIZONA_AIF3_BCLK_INV_SHIFT 7 /* AIF3_BCLK_INV */
+#define ARIZONA_AIF3_BCLK_INV_WIDTH 1 /* AIF3_BCLK_INV */
+#define ARIZONA_AIF3_BCLK_FRC 0x0040 /* AIF3_BCLK_FRC */
+#define ARIZONA_AIF3_BCLK_FRC_MASK 0x0040 /* AIF3_BCLK_FRC */
+#define ARIZONA_AIF3_BCLK_FRC_SHIFT 6 /* AIF3_BCLK_FRC */
+#define ARIZONA_AIF3_BCLK_FRC_WIDTH 1 /* AIF3_BCLK_FRC */
+#define ARIZONA_AIF3_BCLK_MSTR 0x0020 /* AIF3_BCLK_MSTR */
+#define ARIZONA_AIF3_BCLK_MSTR_MASK 0x0020 /* AIF3_BCLK_MSTR */
+#define ARIZONA_AIF3_BCLK_MSTR_SHIFT 5 /* AIF3_BCLK_MSTR */
+#define ARIZONA_AIF3_BCLK_MSTR_WIDTH 1 /* AIF3_BCLK_MSTR */
+#define ARIZONA_AIF3_BCLK_FREQ_MASK 0x001F /* AIF3_BCLK_FREQ - [4:0] */
+#define ARIZONA_AIF3_BCLK_FREQ_SHIFT 0 /* AIF3_BCLK_FREQ - [4:0] */
+#define ARIZONA_AIF3_BCLK_FREQ_WIDTH 5 /* AIF3_BCLK_FREQ - [4:0] */
+
+/*
+ * R1409 (0x581) - AIF3 Tx Pin Ctrl
+ */
+#define ARIZONA_AIF3TX_DAT_TRI 0x0020 /* AIF3TX_DAT_TRI */
+#define ARIZONA_AIF3TX_DAT_TRI_MASK 0x0020 /* AIF3TX_DAT_TRI */
+#define ARIZONA_AIF3TX_DAT_TRI_SHIFT 5 /* AIF3TX_DAT_TRI */
+#define ARIZONA_AIF3TX_DAT_TRI_WIDTH 1 /* AIF3TX_DAT_TRI */
+#define ARIZONA_AIF3TX_LRCLK_SRC 0x0008 /* AIF3TX_LRCLK_SRC */
+#define ARIZONA_AIF3TX_LRCLK_SRC_MASK 0x0008 /* AIF3TX_LRCLK_SRC */
+#define ARIZONA_AIF3TX_LRCLK_SRC_SHIFT 3 /* AIF3TX_LRCLK_SRC */
+#define ARIZONA_AIF3TX_LRCLK_SRC_WIDTH 1 /* AIF3TX_LRCLK_SRC */
+#define ARIZONA_AIF3TX_LRCLK_INV 0x0004 /* AIF3TX_LRCLK_INV */
+#define ARIZONA_AIF3TX_LRCLK_INV_MASK 0x0004 /* AIF3TX_LRCLK_INV */
+#define ARIZONA_AIF3TX_LRCLK_INV_SHIFT 2 /* AIF3TX_LRCLK_INV */
+#define ARIZONA_AIF3TX_LRCLK_INV_WIDTH 1 /* AIF3TX_LRCLK_INV */
+#define ARIZONA_AIF3TX_LRCLK_FRC 0x0002 /* AIF3TX_LRCLK_FRC */
+#define ARIZONA_AIF3TX_LRCLK_FRC_MASK 0x0002 /* AIF3TX_LRCLK_FRC */
+#define ARIZONA_AIF3TX_LRCLK_FRC_SHIFT 1 /* AIF3TX_LRCLK_FRC */
+#define ARIZONA_AIF3TX_LRCLK_FRC_WIDTH 1 /* AIF3TX_LRCLK_FRC */
+#define ARIZONA_AIF3TX_LRCLK_MSTR 0x0001 /* AIF3TX_LRCLK_MSTR */
+#define ARIZONA_AIF3TX_LRCLK_MSTR_MASK 0x0001 /* AIF3TX_LRCLK_MSTR */
+#define ARIZONA_AIF3TX_LRCLK_MSTR_SHIFT 0 /* AIF3TX_LRCLK_MSTR */
+#define ARIZONA_AIF3TX_LRCLK_MSTR_WIDTH 1 /* AIF3TX_LRCLK_MSTR */
+
+/*
+ * R1410 (0x582) - AIF3 Rx Pin Ctrl
+ */
+#define ARIZONA_AIF3RX_LRCLK_INV 0x0004 /* AIF3RX_LRCLK_INV */
+#define ARIZONA_AIF3RX_LRCLK_INV_MASK 0x0004 /* AIF3RX_LRCLK_INV */
+#define ARIZONA_AIF3RX_LRCLK_INV_SHIFT 2 /* AIF3RX_LRCLK_INV */
+#define ARIZONA_AIF3RX_LRCLK_INV_WIDTH 1 /* AIF3RX_LRCLK_INV */
+#define ARIZONA_AIF3RX_LRCLK_FRC 0x0002 /* AIF3RX_LRCLK_FRC */
+#define ARIZONA_AIF3RX_LRCLK_FRC_MASK 0x0002 /* AIF3RX_LRCLK_FRC */
+#define ARIZONA_AIF3RX_LRCLK_FRC_SHIFT 1 /* AIF3RX_LRCLK_FRC */
+#define ARIZONA_AIF3RX_LRCLK_FRC_WIDTH 1 /* AIF3RX_LRCLK_FRC */
+#define ARIZONA_AIF3RX_LRCLK_MSTR 0x0001 /* AIF3RX_LRCLK_MSTR */
+#define ARIZONA_AIF3RX_LRCLK_MSTR_MASK 0x0001 /* AIF3RX_LRCLK_MSTR */
+#define ARIZONA_AIF3RX_LRCLK_MSTR_SHIFT 0 /* AIF3RX_LRCLK_MSTR */
+#define ARIZONA_AIF3RX_LRCLK_MSTR_WIDTH 1 /* AIF3RX_LRCLK_MSTR */
+
+/*
+ * R1411 (0x583) - AIF3 Rate Ctrl
+ */
+#define ARIZONA_AIF3_RATE_MASK 0x7800 /* AIF3_RATE - [14:11] */
+#define ARIZONA_AIF3_RATE_SHIFT 11 /* AIF3_RATE - [14:11] */
+#define ARIZONA_AIF3_RATE_WIDTH 4 /* AIF3_RATE - [14:11] */
+#define ARIZONA_AIF3_TRI 0x0040 /* AIF3_TRI */
+#define ARIZONA_AIF3_TRI_MASK 0x0040 /* AIF3_TRI */
+#define ARIZONA_AIF3_TRI_SHIFT 6 /* AIF3_TRI */
+#define ARIZONA_AIF3_TRI_WIDTH 1 /* AIF3_TRI */
+
+/*
+ * R1412 (0x584) - AIF3 Format
+ */
+#define ARIZONA_AIF3_FMT_MASK 0x0007 /* AIF3_FMT - [2:0] */
+#define ARIZONA_AIF3_FMT_SHIFT 0 /* AIF3_FMT - [2:0] */
+#define ARIZONA_AIF3_FMT_WIDTH 3 /* AIF3_FMT - [2:0] */
+
+/*
+ * R1413 (0x585) - AIF3 Tx BCLK Rate
+ */
+#define ARIZONA_AIF3TX_BCPF_MASK 0x1FFF /* AIF3TX_BCPF - [12:0] */
+#define ARIZONA_AIF3TX_BCPF_SHIFT 0 /* AIF3TX_BCPF - [12:0] */
+#define ARIZONA_AIF3TX_BCPF_WIDTH 13 /* AIF3TX_BCPF - [12:0] */
+
+/*
+ * R1414 (0x586) - AIF3 Rx BCLK Rate
+ */
+#define ARIZONA_AIF3RX_BCPF_MASK 0x1FFF /* AIF3RX_BCPF - [12:0] */
+#define ARIZONA_AIF3RX_BCPF_SHIFT 0 /* AIF3RX_BCPF - [12:0] */
+#define ARIZONA_AIF3RX_BCPF_WIDTH 13 /* AIF3RX_BCPF - [12:0] */
+
+/*
+ * R1415 (0x587) - AIF3 Frame Ctrl 1
+ */
+#define ARIZONA_AIF3TX_WL_MASK 0x3F00 /* AIF3TX_WL - [13:8] */
+#define ARIZONA_AIF3TX_WL_SHIFT 8 /* AIF3TX_WL - [13:8] */
+#define ARIZONA_AIF3TX_WL_WIDTH 6 /* AIF3TX_WL - [13:8] */
+#define ARIZONA_AIF3TX_SLOT_LEN_MASK 0x00FF /* AIF3TX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF3TX_SLOT_LEN_SHIFT 0 /* AIF3TX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF3TX_SLOT_LEN_WIDTH 8 /* AIF3TX_SLOT_LEN - [7:0] */
+
+/*
+ * R1416 (0x588) - AIF3 Frame Ctrl 2
+ */
+#define ARIZONA_AIF3RX_WL_MASK 0x3F00 /* AIF3RX_WL - [13:8] */
+#define ARIZONA_AIF3RX_WL_SHIFT 8 /* AIF3RX_WL - [13:8] */
+#define ARIZONA_AIF3RX_WL_WIDTH 6 /* AIF3RX_WL - [13:8] */
+#define ARIZONA_AIF3RX_SLOT_LEN_MASK 0x00FF /* AIF3RX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF3RX_SLOT_LEN_SHIFT 0 /* AIF3RX_SLOT_LEN - [7:0] */
+#define ARIZONA_AIF3RX_SLOT_LEN_WIDTH 8 /* AIF3RX_SLOT_LEN - [7:0] */
+
+/*
+ * R1417 (0x589) - AIF3 Frame Ctrl 3
+ */
+#define ARIZONA_AIF3TX1_SLOT_MASK 0x003F /* AIF3TX1_SLOT - [5:0] */
+#define ARIZONA_AIF3TX1_SLOT_SHIFT 0 /* AIF3TX1_SLOT - [5:0] */
+#define ARIZONA_AIF3TX1_SLOT_WIDTH 6 /* AIF3TX1_SLOT - [5:0] */
+
+/*
+ * R1418 (0x58A) - AIF3 Frame Ctrl 4
+ */
+#define ARIZONA_AIF3TX2_SLOT_MASK 0x003F /* AIF3TX2_SLOT - [5:0] */
+#define ARIZONA_AIF3TX2_SLOT_SHIFT 0 /* AIF3TX2_SLOT - [5:0] */
+#define ARIZONA_AIF3TX2_SLOT_WIDTH 6 /* AIF3TX2_SLOT - [5:0] */
+
+/*
+ * R1425 (0x591) - AIF3 Frame Ctrl 11
+ */
+#define ARIZONA_AIF3RX1_SLOT_MASK 0x003F /* AIF3RX1_SLOT - [5:0] */
+#define ARIZONA_AIF3RX1_SLOT_SHIFT 0 /* AIF3RX1_SLOT - [5:0] */
+#define ARIZONA_AIF3RX1_SLOT_WIDTH 6 /* AIF3RX1_SLOT - [5:0] */
+
+/*
+ * R1426 (0x592) - AIF3 Frame Ctrl 12
+ */
+#define ARIZONA_AIF3RX2_SLOT_MASK 0x003F /* AIF3RX2_SLOT - [5:0] */
+#define ARIZONA_AIF3RX2_SLOT_SHIFT 0 /* AIF3RX2_SLOT - [5:0] */
+#define ARIZONA_AIF3RX2_SLOT_WIDTH 6 /* AIF3RX2_SLOT - [5:0] */
+
+/*
+ * R1433 (0x599) - AIF3 Tx Enables
+ */
+#define ARIZONA_AIF3TX2_ENA 0x0002 /* AIF3TX2_ENA */
+#define ARIZONA_AIF3TX2_ENA_MASK 0x0002 /* AIF3TX2_ENA */
+#define ARIZONA_AIF3TX2_ENA_SHIFT 1 /* AIF3TX2_ENA */
+#define ARIZONA_AIF3TX2_ENA_WIDTH 1 /* AIF3TX2_ENA */
+#define ARIZONA_AIF3TX1_ENA 0x0001 /* AIF3TX1_ENA */
+#define ARIZONA_AIF3TX1_ENA_MASK 0x0001 /* AIF3TX1_ENA */
+#define ARIZONA_AIF3TX1_ENA_SHIFT 0 /* AIF3TX1_ENA */
+#define ARIZONA_AIF3TX1_ENA_WIDTH 1 /* AIF3TX1_ENA */
+
+/*
+ * R1434 (0x59A) - AIF3 Rx Enables
+ */
+#define ARIZONA_AIF3RX2_ENA 0x0002 /* AIF3RX2_ENA */
+#define ARIZONA_AIF3RX2_ENA_MASK 0x0002 /* AIF3RX2_ENA */
+#define ARIZONA_AIF3RX2_ENA_SHIFT 1 /* AIF3RX2_ENA */
+#define ARIZONA_AIF3RX2_ENA_WIDTH 1 /* AIF3RX2_ENA */
+#define ARIZONA_AIF3RX1_ENA 0x0001 /* AIF3RX1_ENA */
+#define ARIZONA_AIF3RX1_ENA_MASK 0x0001 /* AIF3RX1_ENA */
+#define ARIZONA_AIF3RX1_ENA_SHIFT 0 /* AIF3RX1_ENA */
+#define ARIZONA_AIF3RX1_ENA_WIDTH 1 /* AIF3RX1_ENA */
+
+/*
+ * R1435 (0x59B) - AIF3 Force Write
+ */
+#define ARIZONA_AIF3_FRC_WR 0x0001 /* AIF3_FRC_WR */
+#define ARIZONA_AIF3_FRC_WR_MASK 0x0001 /* AIF3_FRC_WR */
+#define ARIZONA_AIF3_FRC_WR_SHIFT 0 /* AIF3_FRC_WR */
+#define ARIZONA_AIF3_FRC_WR_WIDTH 1 /* AIF3_FRC_WR */
+
+/*
+ * R1507 (0x5E3) - SLIMbus Framer Ref Gear
+ */
+#define ARIZONA_SLIMCLK_SRC 0x0010 /* SLIMCLK_SRC */
+#define ARIZONA_SLIMCLK_SRC_MASK 0x0010 /* SLIMCLK_SRC */
+#define ARIZONA_SLIMCLK_SRC_SHIFT 4 /* SLIMCLK_SRC */
+#define ARIZONA_SLIMCLK_SRC_WIDTH 1 /* SLIMCLK_SRC */
+#define ARIZONA_FRAMER_REF_GEAR_MASK 0x000F /* FRAMER_REF_GEAR - [3:0] */
+#define ARIZONA_FRAMER_REF_GEAR_SHIFT 0 /* FRAMER_REF_GEAR - [3:0] */
+#define ARIZONA_FRAMER_REF_GEAR_WIDTH 4 /* FRAMER_REF_GEAR - [3:0] */
+
+/*
+ * R1509 (0x5E5) - SLIMbus Rates 1
+ */
+#define ARIZONA_SLIMRX2_RATE_MASK 0x7800 /* SLIMRX2_RATE - [14:11] */
+#define ARIZONA_SLIMRX2_RATE_SHIFT 11 /* SLIMRX2_RATE - [14:11] */
+#define ARIZONA_SLIMRX2_RATE_WIDTH 4 /* SLIMRX2_RATE - [14:11] */
+#define ARIZONA_SLIMRX1_RATE_MASK 0x0078 /* SLIMRX1_RATE - [6:3] */
+#define ARIZONA_SLIMRX1_RATE_SHIFT 3 /* SLIMRX1_RATE - [6:3] */
+#define ARIZONA_SLIMRX1_RATE_WIDTH 4 /* SLIMRX1_RATE - [6:3] */
+
+/*
+ * R1510 (0x5E6) - SLIMbus Rates 2
+ */
+#define ARIZONA_SLIMRX4_RATE_MASK 0x7800 /* SLIMRX4_RATE - [14:11] */
+#define ARIZONA_SLIMRX4_RATE_SHIFT 11 /* SLIMRX4_RATE - [14:11] */
+#define ARIZONA_SLIMRX4_RATE_WIDTH 4 /* SLIMRX4_RATE - [14:11] */
+#define ARIZONA_SLIMRX3_RATE_MASK 0x0078 /* SLIMRX3_RATE - [6:3] */
+#define ARIZONA_SLIMRX3_RATE_SHIFT 3 /* SLIMRX3_RATE - [6:3] */
+#define ARIZONA_SLIMRX3_RATE_WIDTH 4 /* SLIMRX3_RATE - [6:3] */
+
+/*
+ * R1511 (0x5E7) - SLIMbus Rates 3
+ */
+#define ARIZONA_SLIMRX6_RATE_MASK 0x7800 /* SLIMRX6_RATE - [14:11] */
+#define ARIZONA_SLIMRX6_RATE_SHIFT 11 /* SLIMRX6_RATE - [14:11] */
+#define ARIZONA_SLIMRX6_RATE_WIDTH 4 /* SLIMRX6_RATE - [14:11] */
+#define ARIZONA_SLIMRX5_RATE_MASK 0x0078 /* SLIMRX5_RATE - [6:3] */
+#define ARIZONA_SLIMRX5_RATE_SHIFT 3 /* SLIMRX5_RATE - [6:3] */
+#define ARIZONA_SLIMRX5_RATE_WIDTH 4 /* SLIMRX5_RATE - [6:3] */
+
+/*
+ * R1512 (0x5E8) - SLIMbus Rates 4
+ */
+#define ARIZONA_SLIMRX8_RATE_MASK 0x7800 /* SLIMRX8_RATE - [14:11] */
+#define ARIZONA_SLIMRX8_RATE_SHIFT 11 /* SLIMRX8_RATE - [14:11] */
+#define ARIZONA_SLIMRX8_RATE_WIDTH 4 /* SLIMRX8_RATE - [14:11] */
+#define ARIZONA_SLIMRX7_RATE_MASK 0x0078 /* SLIMRX7_RATE - [6:3] */
+#define ARIZONA_SLIMRX7_RATE_SHIFT 3 /* SLIMRX7_RATE - [6:3] */
+#define ARIZONA_SLIMRX7_RATE_WIDTH 4 /* SLIMRX7_RATE - [6:3] */
+
+/*
+ * R1513 (0x5E9) - SLIMbus Rates 5
+ */
+#define ARIZONA_SLIMTX2_RATE_MASK 0x7800 /* SLIMTX2_RATE - [14:11] */
+#define ARIZONA_SLIMTX2_RATE_SHIFT 11 /* SLIMTX2_RATE - [14:11] */
+#define ARIZONA_SLIMTX2_RATE_WIDTH 4 /* SLIMTX2_RATE - [14:11] */
+#define ARIZONA_SLIMTX1_RATE_MASK 0x0078 /* SLIMTX1_RATE - [6:3] */
+#define ARIZONA_SLIMTX1_RATE_SHIFT 3 /* SLIMTX1_RATE - [6:3] */
+#define ARIZONA_SLIMTX1_RATE_WIDTH 4 /* SLIMTX1_RATE - [6:3] */
+
+/*
+ * R1514 (0x5EA) - SLIMbus Rates 6
+ */
+#define ARIZONA_SLIMTX4_RATE_MASK 0x7800 /* SLIMTX4_RATE - [14:11] */
+#define ARIZONA_SLIMTX4_RATE_SHIFT 11 /* SLIMTX4_RATE - [14:11] */
+#define ARIZONA_SLIMTX4_RATE_WIDTH 4 /* SLIMTX4_RATE - [14:11] */
+#define ARIZONA_SLIMTX3_RATE_MASK 0x0078 /* SLIMTX3_RATE - [6:3] */
+#define ARIZONA_SLIMTX3_RATE_SHIFT 3 /* SLIMTX3_RATE - [6:3] */
+#define ARIZONA_SLIMTX3_RATE_WIDTH 4 /* SLIMTX3_RATE - [6:3] */
+
+/*
+ * R1515 (0x5EB) - SLIMbus Rates 7
+ */
+#define ARIZONA_SLIMTX6_RATE_MASK 0x7800 /* SLIMTX6_RATE - [14:11] */
+#define ARIZONA_SLIMTX6_RATE_SHIFT 11 /* SLIMTX6_RATE - [14:11] */
+#define ARIZONA_SLIMTX6_RATE_WIDTH 4 /* SLIMTX6_RATE - [14:11] */
+#define ARIZONA_SLIMTX5_RATE_MASK 0x0078 /* SLIMTX5_RATE - [6:3] */
+#define ARIZONA_SLIMTX5_RATE_SHIFT 3 /* SLIMTX5_RATE - [6:3] */
+#define ARIZONA_SLIMTX5_RATE_WIDTH 4 /* SLIMTX5_RATE - [6:3] */
+
+/*
+ * R1516 (0x5EC) - SLIMbus Rates 8
+ */
+#define ARIZONA_SLIMTX8_RATE_MASK 0x7800 /* SLIMTX8_RATE - [14:11] */
+#define ARIZONA_SLIMTX8_RATE_SHIFT 11 /* SLIMTX8_RATE - [14:11] */
+#define ARIZONA_SLIMTX8_RATE_WIDTH 4 /* SLIMTX8_RATE - [14:11] */
+#define ARIZONA_SLIMTX7_RATE_MASK 0x0078 /* SLIMTX7_RATE - [6:3] */
+#define ARIZONA_SLIMTX7_RATE_SHIFT 3 /* SLIMTX7_RATE - [6:3] */
+#define ARIZONA_SLIMTX7_RATE_WIDTH 4 /* SLIMTX7_RATE - [6:3] */
+
+/*
+ * R1525 (0x5F5) - SLIMbus RX Channel Enable
+ */
+#define ARIZONA_SLIMRX8_ENA 0x0080 /* SLIMRX8_ENA */
+#define ARIZONA_SLIMRX8_ENA_MASK 0x0080 /* SLIMRX8_ENA */
+#define ARIZONA_SLIMRX8_ENA_SHIFT 7 /* SLIMRX8_ENA */
+#define ARIZONA_SLIMRX8_ENA_WIDTH 1 /* SLIMRX8_ENA */
+#define ARIZONA_SLIMRX7_ENA 0x0040 /* SLIMRX7_ENA */
+#define ARIZONA_SLIMRX7_ENA_MASK 0x0040 /* SLIMRX7_ENA */
+#define ARIZONA_SLIMRX7_ENA_SHIFT 6 /* SLIMRX7_ENA */
+#define ARIZONA_SLIMRX7_ENA_WIDTH 1 /* SLIMRX7_ENA */
+#define ARIZONA_SLIMRX6_ENA 0x0020 /* SLIMRX6_ENA */
+#define ARIZONA_SLIMRX6_ENA_MASK 0x0020 /* SLIMRX6_ENA */
+#define ARIZONA_SLIMRX6_ENA_SHIFT 5 /* SLIMRX6_ENA */
+#define ARIZONA_SLIMRX6_ENA_WIDTH 1 /* SLIMRX6_ENA */
+#define ARIZONA_SLIMRX5_ENA 0x0010 /* SLIMRX5_ENA */
+#define ARIZONA_SLIMRX5_ENA_MASK 0x0010 /* SLIMRX5_ENA */
+#define ARIZONA_SLIMRX5_ENA_SHIFT 4 /* SLIMRX5_ENA */
+#define ARIZONA_SLIMRX5_ENA_WIDTH 1 /* SLIMRX5_ENA */
+#define ARIZONA_SLIMRX4_ENA 0x0008 /* SLIMRX4_ENA */
+#define ARIZONA_SLIMRX4_ENA_MASK 0x0008 /* SLIMRX4_ENA */
+#define ARIZONA_SLIMRX4_ENA_SHIFT 3 /* SLIMRX4_ENA */
+#define ARIZONA_SLIMRX4_ENA_WIDTH 1 /* SLIMRX4_ENA */
+#define ARIZONA_SLIMRX3_ENA 0x0004 /* SLIMRX3_ENA */
+#define ARIZONA_SLIMRX3_ENA_MASK 0x0004 /* SLIMRX3_ENA */
+#define ARIZONA_SLIMRX3_ENA_SHIFT 2 /* SLIMRX3_ENA */
+#define ARIZONA_SLIMRX3_ENA_WIDTH 1 /* SLIMRX3_ENA */
+#define ARIZONA_SLIMRX2_ENA 0x0002 /* SLIMRX2_ENA */
+#define ARIZONA_SLIMRX2_ENA_MASK 0x0002 /* SLIMRX2_ENA */
+#define ARIZONA_SLIMRX2_ENA_SHIFT 1 /* SLIMRX2_ENA */
+#define ARIZONA_SLIMRX2_ENA_WIDTH 1 /* SLIMRX2_ENA */
+#define ARIZONA_SLIMRX1_ENA 0x0001 /* SLIMRX1_ENA */
+#define ARIZONA_SLIMRX1_ENA_MASK 0x0001 /* SLIMRX1_ENA */
+#define ARIZONA_SLIMRX1_ENA_SHIFT 0 /* SLIMRX1_ENA */
+#define ARIZONA_SLIMRX1_ENA_WIDTH 1 /* SLIMRX1_ENA */
+
+/*
+ * R1526 (0x5F6) - SLIMbus TX Channel Enable
+ */
+#define ARIZONA_SLIMTX8_ENA 0x0080 /* SLIMTX8_ENA */
+#define ARIZONA_SLIMTX8_ENA_MASK 0x0080 /* SLIMTX8_ENA */
+#define ARIZONA_SLIMTX8_ENA_SHIFT 7 /* SLIMTX8_ENA */
+#define ARIZONA_SLIMTX8_ENA_WIDTH 1 /* SLIMTX8_ENA */
+#define ARIZONA_SLIMTX7_ENA 0x0040 /* SLIMTX7_ENA */
+#define ARIZONA_SLIMTX7_ENA_MASK 0x0040 /* SLIMTX7_ENA */
+#define ARIZONA_SLIMTX7_ENA_SHIFT 6 /* SLIMTX7_ENA */
+#define ARIZONA_SLIMTX7_ENA_WIDTH 1 /* SLIMTX7_ENA */
+#define ARIZONA_SLIMTX6_ENA 0x0020 /* SLIMTX6_ENA */
+#define ARIZONA_SLIMTX6_ENA_MASK 0x0020 /* SLIMTX6_ENA */
+#define ARIZONA_SLIMTX6_ENA_SHIFT 5 /* SLIMTX6_ENA */
+#define ARIZONA_SLIMTX6_ENA_WIDTH 1 /* SLIMTX6_ENA */
+#define ARIZONA_SLIMTX5_ENA 0x0010 /* SLIMTX5_ENA */
+#define ARIZONA_SLIMTX5_ENA_MASK 0x0010 /* SLIMTX5_ENA */
+#define ARIZONA_SLIMTX5_ENA_SHIFT 4 /* SLIMTX5_ENA */
+#define ARIZONA_SLIMTX5_ENA_WIDTH 1 /* SLIMTX5_ENA */
+#define ARIZONA_SLIMTX4_ENA 0x0008 /* SLIMTX4_ENA */
+#define ARIZONA_SLIMTX4_ENA_MASK 0x0008 /* SLIMTX4_ENA */
+#define ARIZONA_SLIMTX4_ENA_SHIFT 3 /* SLIMTX4_ENA */
+#define ARIZONA_SLIMTX4_ENA_WIDTH 1 /* SLIMTX4_ENA */
+#define ARIZONA_SLIMTX3_ENA 0x0004 /* SLIMTX3_ENA */
+#define ARIZONA_SLIMTX3_ENA_MASK 0x0004 /* SLIMTX3_ENA */
+#define ARIZONA_SLIMTX3_ENA_SHIFT 2 /* SLIMTX3_ENA */
+#define ARIZONA_SLIMTX3_ENA_WIDTH 1 /* SLIMTX3_ENA */
+#define ARIZONA_SLIMTX2_ENA 0x0002 /* SLIMTX2_ENA */
+#define ARIZONA_SLIMTX2_ENA_MASK 0x0002 /* SLIMTX2_ENA */
+#define ARIZONA_SLIMTX2_ENA_SHIFT 1 /* SLIMTX2_ENA */
+#define ARIZONA_SLIMTX2_ENA_WIDTH 1 /* SLIMTX2_ENA */
+#define ARIZONA_SLIMTX1_ENA 0x0001 /* SLIMTX1_ENA */
+#define ARIZONA_SLIMTX1_ENA_MASK 0x0001 /* SLIMTX1_ENA */
+#define ARIZONA_SLIMTX1_ENA_SHIFT 0 /* SLIMTX1_ENA */
+#define ARIZONA_SLIMTX1_ENA_WIDTH 1 /* SLIMTX1_ENA */
+
+/*
+ * R1527 (0x5F7) - SLIMbus RX Port Status
+ */
+#define ARIZONA_SLIMRX8_PORT_STS 0x0080 /* SLIMRX8_PORT_STS */
+#define ARIZONA_SLIMRX8_PORT_STS_MASK 0x0080 /* SLIMRX8_PORT_STS */
+#define ARIZONA_SLIMRX8_PORT_STS_SHIFT 7 /* SLIMRX8_PORT_STS */
+#define ARIZONA_SLIMRX8_PORT_STS_WIDTH 1 /* SLIMRX8_PORT_STS */
+#define ARIZONA_SLIMRX7_PORT_STS 0x0040 /* SLIMRX7_PORT_STS */
+#define ARIZONA_SLIMRX7_PORT_STS_MASK 0x0040 /* SLIMRX7_PORT_STS */
+#define ARIZONA_SLIMRX7_PORT_STS_SHIFT 6 /* SLIMRX7_PORT_STS */
+#define ARIZONA_SLIMRX7_PORT_STS_WIDTH 1 /* SLIMRX7_PORT_STS */
+#define ARIZONA_SLIMRX6_PORT_STS 0x0020 /* SLIMRX6_PORT_STS */
+#define ARIZONA_SLIMRX6_PORT_STS_MASK 0x0020 /* SLIMRX6_PORT_STS */
+#define ARIZONA_SLIMRX6_PORT_STS_SHIFT 5 /* SLIMRX6_PORT_STS */
+#define ARIZONA_SLIMRX6_PORT_STS_WIDTH 1 /* SLIMRX6_PORT_STS */
+#define ARIZONA_SLIMRX5_PORT_STS 0x0010 /* SLIMRX5_PORT_STS */
+#define ARIZONA_SLIMRX5_PORT_STS_MASK 0x0010 /* SLIMRX5_PORT_STS */
+#define ARIZONA_SLIMRX5_PORT_STS_SHIFT 4 /* SLIMRX5_PORT_STS */
+#define ARIZONA_SLIMRX5_PORT_STS_WIDTH 1 /* SLIMRX5_PORT_STS */
+#define ARIZONA_SLIMRX4_PORT_STS 0x0008 /* SLIMRX4_PORT_STS */
+#define ARIZONA_SLIMRX4_PORT_STS_MASK 0x0008 /* SLIMRX4_PORT_STS */
+#define ARIZONA_SLIMRX4_PORT_STS_SHIFT 3 /* SLIMRX4_PORT_STS */
+#define ARIZONA_SLIMRX4_PORT_STS_WIDTH 1 /* SLIMRX4_PORT_STS */
+#define ARIZONA_SLIMRX3_PORT_STS 0x0004 /* SLIMRX3_PORT_STS */
+#define ARIZONA_SLIMRX3_PORT_STS_MASK 0x0004 /* SLIMRX3_PORT_STS */
+#define ARIZONA_SLIMRX3_PORT_STS_SHIFT 2 /* SLIMRX3_PORT_STS */
+#define ARIZONA_SLIMRX3_PORT_STS_WIDTH 1 /* SLIMRX3_PORT_STS */
+#define ARIZONA_SLIMRX2_PORT_STS 0x0002 /* SLIMRX2_PORT_STS */
+#define ARIZONA_SLIMRX2_PORT_STS_MASK 0x0002 /* SLIMRX2_PORT_STS */
+#define ARIZONA_SLIMRX2_PORT_STS_SHIFT 1 /* SLIMRX2_PORT_STS */
+#define ARIZONA_SLIMRX2_PORT_STS_WIDTH 1 /* SLIMRX2_PORT_STS */
+#define ARIZONA_SLIMRX1_PORT_STS 0x0001 /* SLIMRX1_PORT_STS */
+#define ARIZONA_SLIMRX1_PORT_STS_MASK 0x0001 /* SLIMRX1_PORT_STS */
+#define ARIZONA_SLIMRX1_PORT_STS_SHIFT 0 /* SLIMRX1_PORT_STS */
+#define ARIZONA_SLIMRX1_PORT_STS_WIDTH 1 /* SLIMRX1_PORT_STS */
+
+/*
+ * R1528 (0x5F8) - SLIMbus TX Port Status
+ */
+#define ARIZONA_SLIMTX8_PORT_STS 0x0080 /* SLIMTX8_PORT_STS */
+#define ARIZONA_SLIMTX8_PORT_STS_MASK 0x0080 /* SLIMTX8_PORT_STS */
+#define ARIZONA_SLIMTX8_PORT_STS_SHIFT 7 /* SLIMTX8_PORT_STS */
+#define ARIZONA_SLIMTX8_PORT_STS_WIDTH 1 /* SLIMTX8_PORT_STS */
+#define ARIZONA_SLIMTX7_PORT_STS 0x0040 /* SLIMTX7_PORT_STS */
+#define ARIZONA_SLIMTX7_PORT_STS_MASK 0x0040 /* SLIMTX7_PORT_STS */
+#define ARIZONA_SLIMTX7_PORT_STS_SHIFT 6 /* SLIMTX7_PORT_STS */
+#define ARIZONA_SLIMTX7_PORT_STS_WIDTH 1 /* SLIMTX7_PORT_STS */
+#define ARIZONA_SLIMTX6_PORT_STS 0x0020 /* SLIMTX6_PORT_STS */
+#define ARIZONA_SLIMTX6_PORT_STS_MASK 0x0020 /* SLIMTX6_PORT_STS */
+#define ARIZONA_SLIMTX6_PORT_STS_SHIFT 5 /* SLIMTX6_PORT_STS */
+#define ARIZONA_SLIMTX6_PORT_STS_WIDTH 1 /* SLIMTX6_PORT_STS */
+#define ARIZONA_SLIMTX5_PORT_STS 0x0010 /* SLIMTX5_PORT_STS */
+#define ARIZONA_SLIMTX5_PORT_STS_MASK 0x0010 /* SLIMTX5_PORT_STS */
+#define ARIZONA_SLIMTX5_PORT_STS_SHIFT 4 /* SLIMTX5_PORT_STS */
+#define ARIZONA_SLIMTX5_PORT_STS_WIDTH 1 /* SLIMTX5_PORT_STS */
+#define ARIZONA_SLIMTX4_PORT_STS 0x0008 /* SLIMTX4_PORT_STS */
+#define ARIZONA_SLIMTX4_PORT_STS_MASK 0x0008 /* SLIMTX4_PORT_STS */
+#define ARIZONA_SLIMTX4_PORT_STS_SHIFT 3 /* SLIMTX4_PORT_STS */
+#define ARIZONA_SLIMTX4_PORT_STS_WIDTH 1 /* SLIMTX4_PORT_STS */
+#define ARIZONA_SLIMTX3_PORT_STS 0x0004 /* SLIMTX3_PORT_STS */
+#define ARIZONA_SLIMTX3_PORT_STS_MASK 0x0004 /* SLIMTX3_PORT_STS */
+#define ARIZONA_SLIMTX3_PORT_STS_SHIFT 2 /* SLIMTX3_PORT_STS */
+#define ARIZONA_SLIMTX3_PORT_STS_WIDTH 1 /* SLIMTX3_PORT_STS */
+#define ARIZONA_SLIMTX2_PORT_STS 0x0002 /* SLIMTX2_PORT_STS */
+#define ARIZONA_SLIMTX2_PORT_STS_MASK 0x0002 /* SLIMTX2_PORT_STS */
+#define ARIZONA_SLIMTX2_PORT_STS_SHIFT 1 /* SLIMTX2_PORT_STS */
+#define ARIZONA_SLIMTX2_PORT_STS_WIDTH 1 /* SLIMTX2_PORT_STS */
+#define ARIZONA_SLIMTX1_PORT_STS 0x0001 /* SLIMTX1_PORT_STS */
+#define ARIZONA_SLIMTX1_PORT_STS_MASK 0x0001 /* SLIMTX1_PORT_STS */
+#define ARIZONA_SLIMTX1_PORT_STS_SHIFT 0 /* SLIMTX1_PORT_STS */
+#define ARIZONA_SLIMTX1_PORT_STS_WIDTH 1 /* SLIMTX1_PORT_STS */
+
+/*
+ * R3087 (0xC0F) - IRQ CTRL 1
+ */
+#define ARIZONA_IRQ_POL 0x0400 /* IRQ_POL */
+#define ARIZONA_IRQ_POL_MASK 0x0400 /* IRQ_POL */
+#define ARIZONA_IRQ_POL_SHIFT 10 /* IRQ_POL */
+#define ARIZONA_IRQ_POL_WIDTH 1 /* IRQ_POL */
+#define ARIZONA_IRQ_OP_CFG 0x0200 /* IRQ_OP_CFG */
+#define ARIZONA_IRQ_OP_CFG_MASK 0x0200 /* IRQ_OP_CFG */
+#define ARIZONA_IRQ_OP_CFG_SHIFT 9 /* IRQ_OP_CFG */
+#define ARIZONA_IRQ_OP_CFG_WIDTH 1 /* IRQ_OP_CFG */
+
+/*
+ * R3088 (0xC10) - GPIO Debounce Config
+ */
+#define ARIZONA_GP_DBTIME_MASK 0xF000 /* GP_DBTIME - [15:12] */
+#define ARIZONA_GP_DBTIME_SHIFT 12 /* GP_DBTIME - [15:12] */
+#define ARIZONA_GP_DBTIME_WIDTH 4 /* GP_DBTIME - [15:12] */
+
+/*
+ * R3104 (0xC20) - Misc Pad Ctrl 1
+ */
+#define ARIZONA_LDO1ENA_PD 0x8000 /* LDO1ENA_PD */
+#define ARIZONA_LDO1ENA_PD_MASK 0x8000 /* LDO1ENA_PD */
+#define ARIZONA_LDO1ENA_PD_SHIFT 15 /* LDO1ENA_PD */
+#define ARIZONA_LDO1ENA_PD_WIDTH 1 /* LDO1ENA_PD */
+#define ARIZONA_MCLK2_PD 0x2000 /* MCLK2_PD */
+#define ARIZONA_MCLK2_PD_MASK 0x2000 /* MCLK2_PD */
+#define ARIZONA_MCLK2_PD_SHIFT 13 /* MCLK2_PD */
+#define ARIZONA_MCLK2_PD_WIDTH 1 /* MCLK2_PD */
+#define ARIZONA_RSTB_PU 0x0002 /* RSTB_PU */
+#define ARIZONA_RSTB_PU_MASK 0x0002 /* RSTB_PU */
+#define ARIZONA_RSTB_PU_SHIFT 1 /* RSTB_PU */
+#define ARIZONA_RSTB_PU_WIDTH 1 /* RSTB_PU */
+
+/*
+ * R3105 (0xC21) - Misc Pad Ctrl 2
+ */
+#define ARIZONA_MCLK1_PD 0x1000 /* MCLK1_PD */
+#define ARIZONA_MCLK1_PD_MASK 0x1000 /* MCLK1_PD */
+#define ARIZONA_MCLK1_PD_SHIFT 12 /* MCLK1_PD */
+#define ARIZONA_MCLK1_PD_WIDTH 1 /* MCLK1_PD */
+#define ARIZONA_MICD_PD 0x0100 /* MICD_PD */
+#define ARIZONA_MICD_PD_MASK 0x0100 /* MICD_PD */
+#define ARIZONA_MICD_PD_SHIFT 8 /* MICD_PD */
+#define ARIZONA_MICD_PD_WIDTH 1 /* MICD_PD */
+#define ARIZONA_ADDR_PD 0x0001 /* ADDR_PD */
+#define ARIZONA_ADDR_PD_MASK 0x0001 /* ADDR_PD */
+#define ARIZONA_ADDR_PD_SHIFT 0 /* ADDR_PD */
+#define ARIZONA_ADDR_PD_WIDTH 1 /* ADDR_PD */
+
+/*
+ * R3106 (0xC22) - Misc Pad Ctrl 3
+ */
+#define ARIZONA_DMICDAT4_PD 0x0008 /* DMICDAT4_PD */
+#define ARIZONA_DMICDAT4_PD_MASK 0x0008 /* DMICDAT4_PD */
+#define ARIZONA_DMICDAT4_PD_SHIFT 3 /* DMICDAT4_PD */
+#define ARIZONA_DMICDAT4_PD_WIDTH 1 /* DMICDAT4_PD */
+#define ARIZONA_DMICDAT3_PD 0x0004 /* DMICDAT3_PD */
+#define ARIZONA_DMICDAT3_PD_MASK 0x0004 /* DMICDAT3_PD */
+#define ARIZONA_DMICDAT3_PD_SHIFT 2 /* DMICDAT3_PD */
+#define ARIZONA_DMICDAT3_PD_WIDTH 1 /* DMICDAT3_PD */
+#define ARIZONA_DMICDAT2_PD 0x0002 /* DMICDAT2_PD */
+#define ARIZONA_DMICDAT2_PD_MASK 0x0002 /* DMICDAT2_PD */
+#define ARIZONA_DMICDAT2_PD_SHIFT 1 /* DMICDAT2_PD */
+#define ARIZONA_DMICDAT2_PD_WIDTH 1 /* DMICDAT2_PD */
+#define ARIZONA_DMICDAT1_PD 0x0001 /* DMICDAT1_PD */
+#define ARIZONA_DMICDAT1_PD_MASK 0x0001 /* DMICDAT1_PD */
+#define ARIZONA_DMICDAT1_PD_SHIFT 0 /* DMICDAT1_PD */
+#define ARIZONA_DMICDAT1_PD_WIDTH 1 /* DMICDAT1_PD */
+
+/*
+ * R3107 (0xC23) - Misc Pad Ctrl 4
+ */
+#define ARIZONA_AIF1RXLRCLK_PU 0x0020 /* AIF1RXLRCLK_PU */
+#define ARIZONA_AIF1RXLRCLK_PU_MASK 0x0020 /* AIF1RXLRCLK_PU */
+#define ARIZONA_AIF1RXLRCLK_PU_SHIFT 5 /* AIF1RXLRCLK_PU */
+#define ARIZONA_AIF1RXLRCLK_PU_WIDTH 1 /* AIF1RXLRCLK_PU */
+#define ARIZONA_AIF1RXLRCLK_PD 0x0010 /* AIF1RXLRCLK_PD */
+#define ARIZONA_AIF1RXLRCLK_PD_MASK 0x0010 /* AIF1RXLRCLK_PD */
+#define ARIZONA_AIF1RXLRCLK_PD_SHIFT 4 /* AIF1RXLRCLK_PD */
+#define ARIZONA_AIF1RXLRCLK_PD_WIDTH 1 /* AIF1RXLRCLK_PD */
+#define ARIZONA_AIF1BCLK_PU 0x0008 /* AIF1BCLK_PU */
+#define ARIZONA_AIF1BCLK_PU_MASK 0x0008 /* AIF1BCLK_PU */
+#define ARIZONA_AIF1BCLK_PU_SHIFT 3 /* AIF1BCLK_PU */
+#define ARIZONA_AIF1BCLK_PU_WIDTH 1 /* AIF1BCLK_PU */
+#define ARIZONA_AIF1BCLK_PD 0x0004 /* AIF1BCLK_PD */
+#define ARIZONA_AIF1BCLK_PD_MASK 0x0004 /* AIF1BCLK_PD */
+#define ARIZONA_AIF1BCLK_PD_SHIFT 2 /* AIF1BCLK_PD */
+#define ARIZONA_AIF1BCLK_PD_WIDTH 1 /* AIF1BCLK_PD */
+#define ARIZONA_AIF1RXDAT_PU 0x0002 /* AIF1RXDAT_PU */
+#define ARIZONA_AIF1RXDAT_PU_MASK 0x0002 /* AIF1RXDAT_PU */
+#define ARIZONA_AIF1RXDAT_PU_SHIFT 1 /* AIF1RXDAT_PU */
+#define ARIZONA_AIF1RXDAT_PU_WIDTH 1 /* AIF1RXDAT_PU */
+#define ARIZONA_AIF1RXDAT_PD 0x0001 /* AIF1RXDAT_PD */
+#define ARIZONA_AIF1RXDAT_PD_MASK 0x0001 /* AIF1RXDAT_PD */
+#define ARIZONA_AIF1RXDAT_PD_SHIFT 0 /* AIF1RXDAT_PD */
+#define ARIZONA_AIF1RXDAT_PD_WIDTH 1 /* AIF1RXDAT_PD */
+
+/*
+ * R3108 (0xC24) - Misc Pad Ctrl 5
+ */
+#define ARIZONA_AIF2RXLRCLK_PU 0x0020 /* AIF2RXLRCLK_PU */
+#define ARIZONA_AIF2RXLRCLK_PU_MASK 0x0020 /* AIF2RXLRCLK_PU */
+#define ARIZONA_AIF2RXLRCLK_PU_SHIFT 5 /* AIF2RXLRCLK_PU */
+#define ARIZONA_AIF2RXLRCLK_PU_WIDTH 1 /* AIF2RXLRCLK_PU */
+#define ARIZONA_AIF2RXLRCLK_PD 0x0010 /* AIF2RXLRCLK_PD */
+#define ARIZONA_AIF2RXLRCLK_PD_MASK 0x0010 /* AIF2RXLRCLK_PD */
+#define ARIZONA_AIF2RXLRCLK_PD_SHIFT 4 /* AIF2RXLRCLK_PD */
+#define ARIZONA_AIF2RXLRCLK_PD_WIDTH 1 /* AIF2RXLRCLK_PD */
+#define ARIZONA_AIF2BCLK_PU 0x0008 /* AIF2BCLK_PU */
+#define ARIZONA_AIF2BCLK_PU_MASK 0x0008 /* AIF2BCLK_PU */
+#define ARIZONA_AIF2BCLK_PU_SHIFT 3 /* AIF2BCLK_PU */
+#define ARIZONA_AIF2BCLK_PU_WIDTH 1 /* AIF2BCLK_PU */
+#define ARIZONA_AIF2BCLK_PD 0x0004 /* AIF2BCLK_PD */
+#define ARIZONA_AIF2BCLK_PD_MASK 0x0004 /* AIF2BCLK_PD */
+#define ARIZONA_AIF2BCLK_PD_SHIFT 2 /* AIF2BCLK_PD */
+#define ARIZONA_AIF2BCLK_PD_WIDTH 1 /* AIF2BCLK_PD */
+#define ARIZONA_AIF2RXDAT_PU 0x0002 /* AIF2RXDAT_PU */
+#define ARIZONA_AIF2RXDAT_PU_MASK 0x0002 /* AIF2RXDAT_PU */
+#define ARIZONA_AIF2RXDAT_PU_SHIFT 1 /* AIF2RXDAT_PU */
+#define ARIZONA_AIF2RXDAT_PU_WIDTH 1 /* AIF2RXDAT_PU */
+#define ARIZONA_AIF2RXDAT_PD 0x0001 /* AIF2RXDAT_PD */
+#define ARIZONA_AIF2RXDAT_PD_MASK 0x0001 /* AIF2RXDAT_PD */
+#define ARIZONA_AIF2RXDAT_PD_SHIFT 0 /* AIF2RXDAT_PD */
+#define ARIZONA_AIF2RXDAT_PD_WIDTH 1 /* AIF2RXDAT_PD */
+
+/*
+ * R3109 (0xC25) - Misc Pad Ctrl 6
+ */
+#define ARIZONA_AIF3RXLRCLK_PU 0x0020 /* AIF3RXLRCLK_PU */
+#define ARIZONA_AIF3RXLRCLK_PU_MASK 0x0020 /* AIF3RXLRCLK_PU */
+#define ARIZONA_AIF3RXLRCLK_PU_SHIFT 5 /* AIF3RXLRCLK_PU */
+#define ARIZONA_AIF3RXLRCLK_PU_WIDTH 1 /* AIF3RXLRCLK_PU */
+#define ARIZONA_AIF3RXLRCLK_PD 0x0010 /* AIF3RXLRCLK_PD */
+#define ARIZONA_AIF3RXLRCLK_PD_MASK 0x0010 /* AIF3RXLRCLK_PD */
+#define ARIZONA_AIF3RXLRCLK_PD_SHIFT 4 /* AIF3RXLRCLK_PD */
+#define ARIZONA_AIF3RXLRCLK_PD_WIDTH 1 /* AIF3RXLRCLK_PD */
+#define ARIZONA_AIF3BCLK_PU 0x0008 /* AIF3BCLK_PU */
+#define ARIZONA_AIF3BCLK_PU_MASK 0x0008 /* AIF3BCLK_PU */
+#define ARIZONA_AIF3BCLK_PU_SHIFT 3 /* AIF3BCLK_PU */
+#define ARIZONA_AIF3BCLK_PU_WIDTH 1 /* AIF3BCLK_PU */
+#define ARIZONA_AIF3BCLK_PD 0x0004 /* AIF3BCLK_PD */
+#define ARIZONA_AIF3BCLK_PD_MASK 0x0004 /* AIF3BCLK_PD */
+#define ARIZONA_AIF3BCLK_PD_SHIFT 2 /* AIF3BCLK_PD */
+#define ARIZONA_AIF3BCLK_PD_WIDTH 1 /* AIF3BCLK_PD */
+#define ARIZONA_AIF3RXDAT_PU 0x0002 /* AIF3RXDAT_PU */
+#define ARIZONA_AIF3RXDAT_PU_MASK 0x0002 /* AIF3RXDAT_PU */
+#define ARIZONA_AIF3RXDAT_PU_SHIFT 1 /* AIF3RXDAT_PU */
+#define ARIZONA_AIF3RXDAT_PU_WIDTH 1 /* AIF3RXDAT_PU */
+#define ARIZONA_AIF3RXDAT_PD 0x0001 /* AIF3RXDAT_PD */
+#define ARIZONA_AIF3RXDAT_PD_MASK 0x0001 /* AIF3RXDAT_PD */
+#define ARIZONA_AIF3RXDAT_PD_SHIFT 0 /* AIF3RXDAT_PD */
+#define ARIZONA_AIF3RXDAT_PD_WIDTH 1 /* AIF3RXDAT_PD */
+
+/*
+ * R3328 (0xD00) - Interrupt Status 1
+ */
+#define ARIZONA_GP4_EINT1 0x0008 /* GP4_EINT1 */
+#define ARIZONA_GP4_EINT1_MASK 0x0008 /* GP4_EINT1 */
+#define ARIZONA_GP4_EINT1_SHIFT 3 /* GP4_EINT1 */
+#define ARIZONA_GP4_EINT1_WIDTH 1 /* GP4_EINT1 */
+#define ARIZONA_GP3_EINT1 0x0004 /* GP3_EINT1 */
+#define ARIZONA_GP3_EINT1_MASK 0x0004 /* GP3_EINT1 */
+#define ARIZONA_GP3_EINT1_SHIFT 2 /* GP3_EINT1 */
+#define ARIZONA_GP3_EINT1_WIDTH 1 /* GP3_EINT1 */
+#define ARIZONA_GP2_EINT1 0x0002 /* GP2_EINT1 */
+#define ARIZONA_GP2_EINT1_MASK 0x0002 /* GP2_EINT1 */
+#define ARIZONA_GP2_EINT1_SHIFT 1 /* GP2_EINT1 */
+#define ARIZONA_GP2_EINT1_WIDTH 1 /* GP2_EINT1 */
+#define ARIZONA_GP1_EINT1 0x0001 /* GP1_EINT1 */
+#define ARIZONA_GP1_EINT1_MASK 0x0001 /* GP1_EINT1 */
+#define ARIZONA_GP1_EINT1_SHIFT 0 /* GP1_EINT1 */
+#define ARIZONA_GP1_EINT1_WIDTH 1 /* GP1_EINT1 */
+
+/*
+ * R3329 (0xD01) - Interrupt Status 2
+ */
+#define ARIZONA_DSP4_RAM_RDY_EINT1 0x0800 /* DSP4_RAM_RDY_EINT1 */
+#define ARIZONA_DSP4_RAM_RDY_EINT1_MASK 0x0800 /* DSP4_RAM_RDY_EINT1 */
+#define ARIZONA_DSP4_RAM_RDY_EINT1_SHIFT 11 /* DSP4_RAM_RDY_EINT1 */
+#define ARIZONA_DSP4_RAM_RDY_EINT1_WIDTH 1 /* DSP4_RAM_RDY_EINT1 */
+#define ARIZONA_DSP3_RAM_RDY_EINT1 0x0400 /* DSP3_RAM_RDY_EINT1 */
+#define ARIZONA_DSP3_RAM_RDY_EINT1_MASK 0x0400 /* DSP3_RAM_RDY_EINT1 */
+#define ARIZONA_DSP3_RAM_RDY_EINT1_SHIFT 10 /* DSP3_RAM_RDY_EINT1 */
+#define ARIZONA_DSP3_RAM_RDY_EINT1_WIDTH 1 /* DSP3_RAM_RDY_EINT1 */
+#define ARIZONA_DSP2_RAM_RDY_EINT1 0x0200 /* DSP2_RAM_RDY_EINT1 */
+#define ARIZONA_DSP2_RAM_RDY_EINT1_MASK 0x0200 /* DSP2_RAM_RDY_EINT1 */
+#define ARIZONA_DSP2_RAM_RDY_EINT1_SHIFT 9 /* DSP2_RAM_RDY_EINT1 */
+#define ARIZONA_DSP2_RAM_RDY_EINT1_WIDTH 1 /* DSP2_RAM_RDY_EINT1 */
+#define ARIZONA_DSP1_RAM_RDY_EINT1 0x0100 /* DSP1_RAM_RDY_EINT1 */
+#define ARIZONA_DSP1_RAM_RDY_EINT1_MASK 0x0100 /* DSP1_RAM_RDY_EINT1 */
+#define ARIZONA_DSP1_RAM_RDY_EINT1_SHIFT 8 /* DSP1_RAM_RDY_EINT1 */
+#define ARIZONA_DSP1_RAM_RDY_EINT1_WIDTH 1 /* DSP1_RAM_RDY_EINT1 */
+#define ARIZONA_DSP_IRQ8_EINT1 0x0080 /* DSP_IRQ8_EINT1 */
+#define ARIZONA_DSP_IRQ8_EINT1_MASK 0x0080 /* DSP_IRQ8_EINT1 */
+#define ARIZONA_DSP_IRQ8_EINT1_SHIFT 7 /* DSP_IRQ8_EINT1 */
+#define ARIZONA_DSP_IRQ8_EINT1_WIDTH 1 /* DSP_IRQ8_EINT1 */
+#define ARIZONA_DSP_IRQ7_EINT1 0x0040 /* DSP_IRQ7_EINT1 */
+#define ARIZONA_DSP_IRQ7_EINT1_MASK 0x0040 /* DSP_IRQ7_EINT1 */
+#define ARIZONA_DSP_IRQ7_EINT1_SHIFT 6 /* DSP_IRQ7_EINT1 */
+#define ARIZONA_DSP_IRQ7_EINT1_WIDTH 1 /* DSP_IRQ7_EINT1 */
+#define ARIZONA_DSP_IRQ6_EINT1 0x0020 /* DSP_IRQ6_EINT1 */
+#define ARIZONA_DSP_IRQ6_EINT1_MASK 0x0020 /* DSP_IRQ6_EINT1 */
+#define ARIZONA_DSP_IRQ6_EINT1_SHIFT 5 /* DSP_IRQ6_EINT1 */
+#define ARIZONA_DSP_IRQ6_EINT1_WIDTH 1 /* DSP_IRQ6_EINT1 */
+#define ARIZONA_DSP_IRQ5_EINT1 0x0010 /* DSP_IRQ5_EINT1 */
+#define ARIZONA_DSP_IRQ5_EINT1_MASK 0x0010 /* DSP_IRQ5_EINT1 */
+#define ARIZONA_DSP_IRQ5_EINT1_SHIFT 4 /* DSP_IRQ5_EINT1 */
+#define ARIZONA_DSP_IRQ5_EINT1_WIDTH 1 /* DSP_IRQ5_EINT1 */
+#define ARIZONA_DSP_IRQ4_EINT1 0x0008 /* DSP_IRQ4_EINT1 */
+#define ARIZONA_DSP_IRQ4_EINT1_MASK 0x0008 /* DSP_IRQ4_EINT1 */
+#define ARIZONA_DSP_IRQ4_EINT1_SHIFT 3 /* DSP_IRQ4_EINT1 */
+#define ARIZONA_DSP_IRQ4_EINT1_WIDTH 1 /* DSP_IRQ4_EINT1 */
+#define ARIZONA_DSP_IRQ3_EINT1 0x0004 /* DSP_IRQ3_EINT1 */
+#define ARIZONA_DSP_IRQ3_EINT1_MASK 0x0004 /* DSP_IRQ3_EINT1 */
+#define ARIZONA_DSP_IRQ3_EINT1_SHIFT 2 /* DSP_IRQ3_EINT1 */
+#define ARIZONA_DSP_IRQ3_EINT1_WIDTH 1 /* DSP_IRQ3_EINT1 */
+#define ARIZONA_DSP_IRQ2_EINT1 0x0002 /* DSP_IRQ2_EINT1 */
+#define ARIZONA_DSP_IRQ2_EINT1_MASK 0x0002 /* DSP_IRQ2_EINT1 */
+#define ARIZONA_DSP_IRQ2_EINT1_SHIFT 1 /* DSP_IRQ2_EINT1 */
+#define ARIZONA_DSP_IRQ2_EINT1_WIDTH 1 /* DSP_IRQ2_EINT1 */
+#define ARIZONA_DSP_IRQ1_EINT1 0x0001 /* DSP_IRQ1_EINT1 */
+#define ARIZONA_DSP_IRQ1_EINT1_MASK 0x0001 /* DSP_IRQ1_EINT1 */
+#define ARIZONA_DSP_IRQ1_EINT1_SHIFT 0 /* DSP_IRQ1_EINT1 */
+#define ARIZONA_DSP_IRQ1_EINT1_WIDTH 1 /* DSP_IRQ1_EINT1 */
+
+/*
+ * R3330 (0xD02) - Interrupt Status 3
+ */
+#define ARIZONA_SPK_OVERHEAT_WARN_EINT1 0x8000 /* SPK_OVERHEAT_WARN_EINT1 */
+#define ARIZONA_SPK_OVERHEAT_WARN_EINT1_MASK 0x8000 /* SPK_OVERHEAD_WARN_EINT1 */
+#define ARIZONA_SPK_OVERHEAT_WARN_EINT1_SHIFT 15 /* SPK_OVERHEAT_WARN_EINT1 */
+#define ARIZONA_SPK_OVERHEAT_WARN_EINT1_WIDTH 1 /* SPK_OVERHEAT_WARN_EINT1 */
+#define ARIZONA_SPK_OVERHEAT_EINT1 0x4000 /* SPK_OVERHEAT_EINT1 */
+#define ARIZONA_SPK_OVERHEAT_EINT1_MASK 0x4000 /* SPK_OVERHEAT_EINT1 */
+#define ARIZONA_SPK_OVERHEAT_EINT1_SHIFT 14 /* SPK_OVERHEAT_EINT1 */
+#define ARIZONA_SPK_OVERHEAT_EINT1_WIDTH 1 /* SPK_OVERHEAT_EINT1 */
+#define ARIZONA_HPDET_EINT1 0x2000 /* HPDET_EINT1 */
+#define ARIZONA_HPDET_EINT1_MASK 0x2000 /* HPDET_EINT1 */
+#define ARIZONA_HPDET_EINT1_SHIFT 13 /* HPDET_EINT1 */
+#define ARIZONA_HPDET_EINT1_WIDTH 1 /* HPDET_EINT1 */
+#define ARIZONA_MICDET_EINT1 0x1000 /* MICDET_EINT1 */
+#define ARIZONA_MICDET_EINT1_MASK 0x1000 /* MICDET_EINT1 */
+#define ARIZONA_MICDET_EINT1_SHIFT 12 /* MICDET_EINT1 */
+#define ARIZONA_MICDET_EINT1_WIDTH 1 /* MICDET_EINT1 */
+#define ARIZONA_WSEQ_DONE_EINT1 0x0800 /* WSEQ_DONE_EINT1 */
+#define ARIZONA_WSEQ_DONE_EINT1_MASK 0x0800 /* WSEQ_DONE_EINT1 */
+#define ARIZONA_WSEQ_DONE_EINT1_SHIFT 11 /* WSEQ_DONE_EINT1 */
+#define ARIZONA_WSEQ_DONE_EINT1_WIDTH 1 /* WSEQ_DONE_EINT1 */
+#define ARIZONA_DRC2_SIG_DET_EINT1 0x0400 /* DRC2_SIG_DET_EINT1 */
+#define ARIZONA_DRC2_SIG_DET_EINT1_MASK 0x0400 /* DRC2_SIG_DET_EINT1 */
+#define ARIZONA_DRC2_SIG_DET_EINT1_SHIFT 10 /* DRC2_SIG_DET_EINT1 */
+#define ARIZONA_DRC2_SIG_DET_EINT1_WIDTH 1 /* DRC2_SIG_DET_EINT1 */
+#define ARIZONA_DRC1_SIG_DET_EINT1 0x0200 /* DRC1_SIG_DET_EINT1 */
+#define ARIZONA_DRC1_SIG_DET_EINT1_MASK 0x0200 /* DRC1_SIG_DET_EINT1 */
+#define ARIZONA_DRC1_SIG_DET_EINT1_SHIFT 9 /* DRC1_SIG_DET_EINT1 */
+#define ARIZONA_DRC1_SIG_DET_EINT1_WIDTH 1 /* DRC1_SIG_DET_EINT1 */
+#define ARIZONA_ASRC2_LOCK_EINT1 0x0100 /* ASRC2_LOCK_EINT1 */
+#define ARIZONA_ASRC2_LOCK_EINT1_MASK 0x0100 /* ASRC2_LOCK_EINT1 */
+#define ARIZONA_ASRC2_LOCK_EINT1_SHIFT 8 /* ASRC2_LOCK_EINT1 */
+#define ARIZONA_ASRC2_LOCK_EINT1_WIDTH 1 /* ASRC2_LOCK_EINT1 */
+#define ARIZONA_ASRC1_LOCK_EINT1 0x0080 /* ASRC1_LOCK_EINT1 */
+#define ARIZONA_ASRC1_LOCK_EINT1_MASK 0x0080 /* ASRC1_LOCK_EINT1 */
+#define ARIZONA_ASRC1_LOCK_EINT1_SHIFT 7 /* ASRC1_LOCK_EINT1 */
+#define ARIZONA_ASRC1_LOCK_EINT1_WIDTH 1 /* ASRC1_LOCK_EINT1 */
+#define ARIZONA_UNDERCLOCKED_EINT1 0x0040 /* UNDERCLOCKED_EINT1 */
+#define ARIZONA_UNDERCLOCKED_EINT1_MASK 0x0040 /* UNDERCLOCKED_EINT1 */
+#define ARIZONA_UNDERCLOCKED_EINT1_SHIFT 6 /* UNDERCLOCKED_EINT1 */
+#define ARIZONA_UNDERCLOCKED_EINT1_WIDTH 1 /* UNDERCLOCKED_EINT1 */
+#define ARIZONA_OVERCLOCKED_EINT1 0x0020 /* OVERCLOCKED_EINT1 */
+#define ARIZONA_OVERCLOCKED_EINT1_MASK 0x0020 /* OVERCLOCKED_EINT1 */
+#define ARIZONA_OVERCLOCKED_EINT1_SHIFT 5 /* OVERCLOCKED_EINT1 */
+#define ARIZONA_OVERCLOCKED_EINT1_WIDTH 1 /* OVERCLOCKED_EINT1 */
+#define ARIZONA_FLL2_LOCK_EINT1 0x0008 /* FLL2_LOCK_EINT1 */
+#define ARIZONA_FLL2_LOCK_EINT1_MASK 0x0008 /* FLL2_LOCK_EINT1 */
+#define ARIZONA_FLL2_LOCK_EINT1_SHIFT 3 /* FLL2_LOCK_EINT1 */
+#define ARIZONA_FLL2_LOCK_EINT1_WIDTH 1 /* FLL2_LOCK_EINT1 */
+#define ARIZONA_FLL1_LOCK_EINT1 0x0004 /* FLL1_LOCK_EINT1 */
+#define ARIZONA_FLL1_LOCK_EINT1_MASK 0x0004 /* FLL1_LOCK_EINT1 */
+#define ARIZONA_FLL1_LOCK_EINT1_SHIFT 2 /* FLL1_LOCK_EINT1 */
+#define ARIZONA_FLL1_LOCK_EINT1_WIDTH 1 /* FLL1_LOCK_EINT1 */
+#define ARIZONA_CLKGEN_ERR_EINT1 0x0002 /* CLKGEN_ERR_EINT1 */
+#define ARIZONA_CLKGEN_ERR_EINT1_MASK 0x0002 /* CLKGEN_ERR_EINT1 */
+#define ARIZONA_CLKGEN_ERR_EINT1_SHIFT 1 /* CLKGEN_ERR_EINT1 */
+#define ARIZONA_CLKGEN_ERR_EINT1_WIDTH 1 /* CLKGEN_ERR_EINT1 */
+#define ARIZONA_CLKGEN_ERR_ASYNC_EINT1 0x0001 /* CLKGEN_ERR_ASYNC_EINT1 */
+#define ARIZONA_CLKGEN_ERR_ASYNC_EINT1_MASK 0x0001 /* CLKGEN_ERR_ASYNC_EINT1 */
+#define ARIZONA_CLKGEN_ERR_ASYNC_EINT1_SHIFT 0 /* CLKGEN_ERR_ASYNC_EINT1 */
+#define ARIZONA_CLKGEN_ERR_ASYNC_EINT1_WIDTH 1 /* CLKGEN_ERR_ASYNC_EINT1 */
+
+/*
+ * R3331 (0xD03) - Interrupt Status 4
+ */
+#define ARIZONA_ASRC_CFG_ERR_EINT1 0x8000 /* ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_ASRC_CFG_ERR_EINT1_MASK 0x8000 /* ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_ASRC_CFG_ERR_EINT1_SHIFT 15 /* ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_ASRC_CFG_ERR_EINT1_WIDTH 1 /* ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_AIF3_ERR_EINT1 0x4000 /* AIF3_ERR_EINT1 */
+#define ARIZONA_AIF3_ERR_EINT1_MASK 0x4000 /* AIF3_ERR_EINT1 */
+#define ARIZONA_AIF3_ERR_EINT1_SHIFT 14 /* AIF3_ERR_EINT1 */
+#define ARIZONA_AIF3_ERR_EINT1_WIDTH 1 /* AIF3_ERR_EINT1 */
+#define ARIZONA_AIF2_ERR_EINT1 0x2000 /* AIF2_ERR_EINT1 */
+#define ARIZONA_AIF2_ERR_EINT1_MASK 0x2000 /* AIF2_ERR_EINT1 */
+#define ARIZONA_AIF2_ERR_EINT1_SHIFT 13 /* AIF2_ERR_EINT1 */
+#define ARIZONA_AIF2_ERR_EINT1_WIDTH 1 /* AIF2_ERR_EINT1 */
+#define ARIZONA_AIF1_ERR_EINT1 0x1000 /* AIF1_ERR_EINT1 */
+#define ARIZONA_AIF1_ERR_EINT1_MASK 0x1000 /* AIF1_ERR_EINT1 */
+#define ARIZONA_AIF1_ERR_EINT1_SHIFT 12 /* AIF1_ERR_EINT1 */
+#define ARIZONA_AIF1_ERR_EINT1_WIDTH 1 /* AIF1_ERR_EINT1 */
+#define ARIZONA_CTRLIF_ERR_EINT1 0x0800 /* CTRLIF_ERR_EINT1 */
+#define ARIZONA_CTRLIF_ERR_EINT1_MASK 0x0800 /* CTRLIF_ERR_EINT1 */
+#define ARIZONA_CTRLIF_ERR_EINT1_SHIFT 11 /* CTRLIF_ERR_EINT1 */
+#define ARIZONA_CTRLIF_ERR_EINT1_WIDTH 1 /* CTRLIF_ERR_EINT1 */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT1 0x0400 /* MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT1_MASK 0x0400 /* MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT1_SHIFT 10 /* MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT1_WIDTH 1 /* MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT1 0x0200 /* ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT1_MASK 0x0200 /* ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT1_SHIFT 9 /* ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT1_WIDTH 1 /* ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_SYSCLK_ENA_LOW_EINT1 0x0100 /* SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_SYSCLK_ENA_LOW_EINT1_MASK 0x0100 /* SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_SYSCLK_ENA_LOW_EINT1_SHIFT 8 /* SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_SYSCLK_ENA_LOW_EINT1_WIDTH 1 /* SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_ISRC1_CFG_ERR_EINT1 0x0080 /* ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_ISRC1_CFG_ERR_EINT1_MASK 0x0080 /* ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_ISRC1_CFG_ERR_EINT1_SHIFT 7 /* ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_ISRC1_CFG_ERR_EINT1_WIDTH 1 /* ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_ISRC2_CFG_ERR_EINT1 0x0040 /* ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_ISRC2_CFG_ERR_EINT1_MASK 0x0040 /* ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_ISRC2_CFG_ERR_EINT1_SHIFT 6 /* ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_ISRC2_CFG_ERR_EINT1_WIDTH 1 /* ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_HP3R_DONE_EINT1 0x0020 /* HP3R_DONE_EINT1 */
+#define ARIZONA_HP3R_DONE_EINT1_MASK 0x0020 /* HP3R_DONE_EINT1 */
+#define ARIZONA_HP3R_DONE_EINT1_SHIFT 5 /* HP3R_DONE_EINT1 */
+#define ARIZONA_HP3R_DONE_EINT1_WIDTH 1 /* HP3R_DONE_EINT1 */
+#define ARIZONA_HP3L_DONE_EINT1 0x0010 /* HP3L_DONE_EINT1 */
+#define ARIZONA_HP3L_DONE_EINT1_MASK 0x0010 /* HP3L_DONE_EINT1 */
+#define ARIZONA_HP3L_DONE_EINT1_SHIFT 4 /* HP3L_DONE_EINT1 */
+#define ARIZONA_HP3L_DONE_EINT1_WIDTH 1 /* HP3L_DONE_EINT1 */
+#define ARIZONA_HP2R_DONE_EINT1 0x0008 /* HP2R_DONE_EINT1 */
+#define ARIZONA_HP2R_DONE_EINT1_MASK 0x0008 /* HP2R_DONE_EINT1 */
+#define ARIZONA_HP2R_DONE_EINT1_SHIFT 3 /* HP2R_DONE_EINT1 */
+#define ARIZONA_HP2R_DONE_EINT1_WIDTH 1 /* HP2R_DONE_EINT1 */
+#define ARIZONA_HP2L_DONE_EINT1 0x0004 /* HP2L_DONE_EINT1 */
+#define ARIZONA_HP2L_DONE_EINT1_MASK 0x0004 /* HP2L_DONE_EINT1 */
+#define ARIZONA_HP2L_DONE_EINT1_SHIFT 2 /* HP2L_DONE_EINT1 */
+#define ARIZONA_HP2L_DONE_EINT1_WIDTH 1 /* HP2L_DONE_EINT1 */
+#define ARIZONA_HP1R_DONE_EINT1 0x0002 /* HP1R_DONE_EINT1 */
+#define ARIZONA_HP1R_DONE_EINT1_MASK 0x0002 /* HP1R_DONE_EINT1 */
+#define ARIZONA_HP1R_DONE_EINT1_SHIFT 1 /* HP1R_DONE_EINT1 */
+#define ARIZONA_HP1R_DONE_EINT1_WIDTH 1 /* HP1R_DONE_EINT1 */
+#define ARIZONA_HP1L_DONE_EINT1 0x0001 /* HP1L_DONE_EINT1 */
+#define ARIZONA_HP1L_DONE_EINT1_MASK 0x0001 /* HP1L_DONE_EINT1 */
+#define ARIZONA_HP1L_DONE_EINT1_SHIFT 0 /* HP1L_DONE_EINT1 */
+#define ARIZONA_HP1L_DONE_EINT1_WIDTH 1 /* HP1L_DONE_EINT1 */
+
+/*
+ * R3331 (0xD03) - Interrupt Status 4 (Alternate layout)
+ *
+ * Alternate layout used on later devices, note only fields that have moved
+ * are specified
+ */
+#define ARIZONA_V2_AIF3_ERR_EINT1 0x8000 /* AIF3_ERR_EINT1 */
+#define ARIZONA_V2_AIF3_ERR_EINT1_MASK 0x8000 /* AIF3_ERR_EINT1 */
+#define ARIZONA_V2_AIF3_ERR_EINT1_SHIFT 15 /* AIF3_ERR_EINT1 */
+#define ARIZONA_V2_AIF3_ERR_EINT1_WIDTH 1 /* AIF3_ERR_EINT1 */
+#define ARIZONA_V2_AIF2_ERR_EINT1 0x4000 /* AIF2_ERR_EINT1 */
+#define ARIZONA_V2_AIF2_ERR_EINT1_MASK 0x4000 /* AIF2_ERR_EINT1 */
+#define ARIZONA_V2_AIF2_ERR_EINT1_SHIFT 14 /* AIF2_ERR_EINT1 */
+#define ARIZONA_V2_AIF2_ERR_EINT1_WIDTH 1 /* AIF2_ERR_EINT1 */
+#define ARIZONA_V2_AIF1_ERR_EINT1 0x2000 /* AIF1_ERR_EINT1 */
+#define ARIZONA_V2_AIF1_ERR_EINT1_MASK 0x2000 /* AIF1_ERR_EINT1 */
+#define ARIZONA_V2_AIF1_ERR_EINT1_SHIFT 13 /* AIF1_ERR_EINT1 */
+#define ARIZONA_V2_AIF1_ERR_EINT1_WIDTH 1 /* AIF1_ERR_EINT1 */
+#define ARIZONA_V2_CTRLIF_ERR_EINT1 0x1000 /* CTRLIF_ERR_EINT1 */
+#define ARIZONA_V2_CTRLIF_ERR_EINT1_MASK 0x1000 /* CTRLIF_ERR_EINT1 */
+#define ARIZONA_V2_CTRLIF_ERR_EINT1_SHIFT 12 /* CTRLIF_ERR_EINT1 */
+#define ARIZONA_V2_CTRLIF_ERR_EINT1_WIDTH 1 /* CTRLIF_ERR_EINT1 */
+#define ARIZONA_V2_MIXER_DROPPED_SAMPLE_EINT1 0x0800 /* MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_V2_MIXER_DROPPED_SAMPLE_EINT1_MASK 0x0800 /* MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_V2_MIXER_DROPPED_SAMPLE_EINT1_SHIFT 11 /* MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_V2_MIXER_DROPPED_SAMPLE_EINT1_WIDTH 1 /* MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_V2_ASYNC_CLK_ENA_LOW_EINT1 0x0400 /* ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_V2_ASYNC_CLK_ENA_LOW_EINT1_MASK 0x0400 /* ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_V2_ASYNC_CLK_ENA_LOW_EINT1_SHIFT 10 /* ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_V2_ASYNC_CLK_ENA_LOW_EINT1_WIDTH 1 /* ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_V2_SYSCLK_ENA_LOW_EINT1 0x0200 /* SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_V2_SYSCLK_ENA_LOW_EINT1_MASK 0x0200 /* SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_V2_SYSCLK_ENA_LOW_EINT1_SHIFT 9 /* SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_V2_SYSCLK_ENA_LOW_EINT1_WIDTH 1 /* SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_V2_ISRC1_CFG_ERR_EINT1 0x0100 /* ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_V2_ISRC1_CFG_ERR_EINT1_MASK 0x0100 /* ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_V2_ISRC1_CFG_ERR_EINT1_SHIFT 8 /* ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_V2_ISRC1_CFG_ERR_EINT1_WIDTH 1 /* ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_V2_ISRC2_CFG_ERR_EINT1 0x0080 /* ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_V2_ISRC2_CFG_ERR_EINT1_MASK 0x0080 /* ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_V2_ISRC2_CFG_ERR_EINT1_SHIFT 7 /* ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_V2_ISRC2_CFG_ERR_EINT1_WIDTH 1 /* ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_V2_ISRC3_CFG_ERR_EINT1 0x0040 /* ISRC3_CFG_ERR_EINT1 */
+#define ARIZONA_V2_ISRC3_CFG_ERR_EINT1_MASK 0x0040 /* ISRC3_CFG_ERR_EINT1 */
+#define ARIZONA_V2_ISRC3_CFG_ERR_EINT1_SHIFT 6 /* ISRC3_CFG_ERR_EINT1 */
+#define ARIZONA_V2_ISRC3_CFG_ERR_EINT1_WIDTH 1 /* ISRC3_CFG_ERR_EINT1 */
+
+/*
+ * R3332 (0xD04) - Interrupt Status 5
+ */
+#define ARIZONA_BOOT_DONE_EINT1 0x0100 /* BOOT_DONE_EINT1 */
+#define ARIZONA_BOOT_DONE_EINT1_MASK 0x0100 /* BOOT_DONE_EINT1 */
+#define ARIZONA_BOOT_DONE_EINT1_SHIFT 8 /* BOOT_DONE_EINT1 */
+#define ARIZONA_BOOT_DONE_EINT1_WIDTH 1 /* BOOT_DONE_EINT1 */
+#define ARIZONA_DCS_DAC_DONE_EINT1 0x0080 /* DCS_DAC_DONE_EINT1 */
+#define ARIZONA_DCS_DAC_DONE_EINT1_MASK 0x0080 /* DCS_DAC_DONE_EINT1 */
+#define ARIZONA_DCS_DAC_DONE_EINT1_SHIFT 7 /* DCS_DAC_DONE_EINT1 */
+#define ARIZONA_DCS_DAC_DONE_EINT1_WIDTH 1 /* DCS_DAC_DONE_EINT1 */
+#define ARIZONA_DCS_HP_DONE_EINT1 0x0040 /* DCS_HP_DONE_EINT1 */
+#define ARIZONA_DCS_HP_DONE_EINT1_MASK 0x0040 /* DCS_HP_DONE_EINT1 */
+#define ARIZONA_DCS_HP_DONE_EINT1_SHIFT 6 /* DCS_HP_DONE_EINT1 */
+#define ARIZONA_DCS_HP_DONE_EINT1_WIDTH 1 /* DCS_HP_DONE_EINT1 */
+#define ARIZONA_FLL2_CLOCK_OK_EINT1 0x0002 /* FLL2_CLOCK_OK_EINT1 */
+#define ARIZONA_FLL2_CLOCK_OK_EINT1_MASK 0x0002 /* FLL2_CLOCK_OK_EINT1 */
+#define ARIZONA_FLL2_CLOCK_OK_EINT1_SHIFT 1 /* FLL2_CLOCK_OK_EINT1 */
+#define ARIZONA_FLL2_CLOCK_OK_EINT1_WIDTH 1 /* FLL2_CLOCK_OK_EINT1 */
+#define ARIZONA_FLL1_CLOCK_OK_EINT1 0x0001 /* FLL1_CLOCK_OK_EINT1 */
+#define ARIZONA_FLL1_CLOCK_OK_EINT1_MASK 0x0001 /* FLL1_CLOCK_OK_EINT1 */
+#define ARIZONA_FLL1_CLOCK_OK_EINT1_SHIFT 0 /* FLL1_CLOCK_OK_EINT1 */
+#define ARIZONA_FLL1_CLOCK_OK_EINT1_WIDTH 1 /* FLL1_CLOCK_OK_EINT1 */
+
+/*
+ * R3332 (0xD05) - Interrupt Status 5 (Alternate layout)
+ *
+ * Alternate layout used on later devices, note only fields that have moved
+ * are specified
+ */
+#define ARIZONA_V2_ASRC_CFG_ERR_EINT1 0x0008 /* ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_V2_ASRC_CFG_ERR_EINT1_MASK 0x0008 /* ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_V2_ASRC_CFG_ERR_EINT1_SHIFT 3 /* ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_V2_ASRC_CFG_ERR_EINT1_WIDTH 1 /* ASRC_CFG_ERR_EINT1 */
+
+/*
+ * R3333 (0xD05) - Interrupt Status 6
+ */
+#define ARIZONA_DSP_SHARED_WR_COLL_EINT1 0x8000 /* DSP_SHARED_WR_COLL_EINT1 */
+#define ARIZONA_DSP_SHARED_WR_COLL_EINT1_MASK 0x8000 /* DSP_SHARED_WR_COLL_EINT1 */
+#define ARIZONA_DSP_SHARED_WR_COLL_EINT1_SHIFT 15 /* DSP_SHARED_WR_COLL_EINT1 */
+#define ARIZONA_DSP_SHARED_WR_COLL_EINT1_WIDTH 1 /* DSP_SHARED_WR_COLL_EINT1 */
+#define ARIZONA_SPK_SHUTDOWN_EINT1 0x4000 /* SPK_SHUTDOWN_EINT1 */
+#define ARIZONA_SPK_SHUTDOWN_EINT1_MASK 0x4000 /* SPK_SHUTDOWN_EINT1 */
+#define ARIZONA_SPK_SHUTDOWN_EINT1_SHIFT 14 /* SPK_SHUTDOWN_EINT1 */
+#define ARIZONA_SPK_SHUTDOWN_EINT1_WIDTH 1 /* SPK_SHUTDOWN_EINT1 */
+#define ARIZONA_SPK1R_SHORT_EINT1 0x2000 /* SPK1R_SHORT_EINT1 */
+#define ARIZONA_SPK1R_SHORT_EINT1_MASK 0x2000 /* SPK1R_SHORT_EINT1 */
+#define ARIZONA_SPK1R_SHORT_EINT1_SHIFT 13 /* SPK1R_SHORT_EINT1 */
+#define ARIZONA_SPK1R_SHORT_EINT1_WIDTH 1 /* SPK1R_SHORT_EINT1 */
+#define ARIZONA_SPK1L_SHORT_EINT1 0x1000 /* SPK1L_SHORT_EINT1 */
+#define ARIZONA_SPK1L_SHORT_EINT1_MASK 0x1000 /* SPK1L_SHORT_EINT1 */
+#define ARIZONA_SPK1L_SHORT_EINT1_SHIFT 12 /* SPK1L_SHORT_EINT1 */
+#define ARIZONA_SPK1L_SHORT_EINT1_WIDTH 1 /* SPK1L_SHORT_EINT1 */
+#define ARIZONA_HP3R_SC_NEG_EINT1 0x0800 /* HP3R_SC_NEG_EINT1 */
+#define ARIZONA_HP3R_SC_NEG_EINT1_MASK 0x0800 /* HP3R_SC_NEG_EINT1 */
+#define ARIZONA_HP3R_SC_NEG_EINT1_SHIFT 11 /* HP3R_SC_NEG_EINT1 */
+#define ARIZONA_HP3R_SC_NEG_EINT1_WIDTH 1 /* HP3R_SC_NEG_EINT1 */
+#define ARIZONA_HP3R_SC_POS_EINT1 0x0400 /* HP3R_SC_POS_EINT1 */
+#define ARIZONA_HP3R_SC_POS_EINT1_MASK 0x0400 /* HP3R_SC_POS_EINT1 */
+#define ARIZONA_HP3R_SC_POS_EINT1_SHIFT 10 /* HP3R_SC_POS_EINT1 */
+#define ARIZONA_HP3R_SC_POS_EINT1_WIDTH 1 /* HP3R_SC_POS_EINT1 */
+#define ARIZONA_HP3L_SC_NEG_EINT1 0x0200 /* HP3L_SC_NEG_EINT1 */
+#define ARIZONA_HP3L_SC_NEG_EINT1_MASK 0x0200 /* HP3L_SC_NEG_EINT1 */
+#define ARIZONA_HP3L_SC_NEG_EINT1_SHIFT 9 /* HP3L_SC_NEG_EINT1 */
+#define ARIZONA_HP3L_SC_NEG_EINT1_WIDTH 1 /* HP3L_SC_NEG_EINT1 */
+#define ARIZONA_HP3L_SC_POS_EINT1 0x0100 /* HP3L_SC_POS_EINT1 */
+#define ARIZONA_HP3L_SC_POS_EINT1_MASK 0x0100 /* HP3L_SC_POS_EINT1 */
+#define ARIZONA_HP3L_SC_POS_EINT1_SHIFT 8 /* HP3L_SC_POS_EINT1 */
+#define ARIZONA_HP3L_SC_POS_EINT1_WIDTH 1 /* HP3L_SC_POS_EINT1 */
+#define ARIZONA_HP2R_SC_NEG_EINT1 0x0080 /* HP2R_SC_NEG_EINT1 */
+#define ARIZONA_HP2R_SC_NEG_EINT1_MASK 0x0080 /* HP2R_SC_NEG_EINT1 */
+#define ARIZONA_HP2R_SC_NEG_EINT1_SHIFT 7 /* HP2R_SC_NEG_EINT1 */
+#define ARIZONA_HP2R_SC_NEG_EINT1_WIDTH 1 /* HP2R_SC_NEG_EINT1 */
+#define ARIZONA_HP2R_SC_POS_EINT1 0x0040 /* HP2R_SC_POS_EINT1 */
+#define ARIZONA_HP2R_SC_POS_EINT1_MASK 0x0040 /* HP2R_SC_POS_EINT1 */
+#define ARIZONA_HP2R_SC_POS_EINT1_SHIFT 6 /* HP2R_SC_POS_EINT1 */
+#define ARIZONA_HP2R_SC_POS_EINT1_WIDTH 1 /* HP2R_SC_POS_EINT1 */
+#define ARIZONA_HP2L_SC_NEG_EINT1 0x0020 /* HP2L_SC_NEG_EINT1 */
+#define ARIZONA_HP2L_SC_NEG_EINT1_MASK 0x0020 /* HP2L_SC_NEG_EINT1 */
+#define ARIZONA_HP2L_SC_NEG_EINT1_SHIFT 5 /* HP2L_SC_NEG_EINT1 */
+#define ARIZONA_HP2L_SC_NEG_EINT1_WIDTH 1 /* HP2L_SC_NEG_EINT1 */
+#define ARIZONA_HP2L_SC_POS_EINT1 0x0010 /* HP2L_SC_POS_EINT1 */
+#define ARIZONA_HP2L_SC_POS_EINT1_MASK 0x0010 /* HP2L_SC_POS_EINT1 */
+#define ARIZONA_HP2L_SC_POS_EINT1_SHIFT 4 /* HP2L_SC_POS_EINT1 */
+#define ARIZONA_HP2L_SC_POS_EINT1_WIDTH 1 /* HP2L_SC_POS_EINT1 */
+#define ARIZONA_HP1R_SC_NEG_EINT1 0x0008 /* HP1R_SC_NEG_EINT1 */
+#define ARIZONA_HP1R_SC_NEG_EINT1_MASK 0x0008 /* HP1R_SC_NEG_EINT1 */
+#define ARIZONA_HP1R_SC_NEG_EINT1_SHIFT 3 /* HP1R_SC_NEG_EINT1 */
+#define ARIZONA_HP1R_SC_NEG_EINT1_WIDTH 1 /* HP1R_SC_NEG_EINT1 */
+#define ARIZONA_HP1R_SC_POS_EINT1 0x0004 /* HP1R_SC_POS_EINT1 */
+#define ARIZONA_HP1R_SC_POS_EINT1_MASK 0x0004 /* HP1R_SC_POS_EINT1 */
+#define ARIZONA_HP1R_SC_POS_EINT1_SHIFT 2 /* HP1R_SC_POS_EINT1 */
+#define ARIZONA_HP1R_SC_POS_EINT1_WIDTH 1 /* HP1R_SC_POS_EINT1 */
+#define ARIZONA_HP1L_SC_NEG_EINT1 0x0002 /* HP1L_SC_NEG_EINT1 */
+#define ARIZONA_HP1L_SC_NEG_EINT1_MASK 0x0002 /* HP1L_SC_NEG_EINT1 */
+#define ARIZONA_HP1L_SC_NEG_EINT1_SHIFT 1 /* HP1L_SC_NEG_EINT1 */
+#define ARIZONA_HP1L_SC_NEG_EINT1_WIDTH 1 /* HP1L_SC_NEG_EINT1 */
+#define ARIZONA_HP1L_SC_POS_EINT1 0x0001 /* HP1L_SC_POS_EINT1 */
+#define ARIZONA_HP1L_SC_POS_EINT1_MASK 0x0001 /* HP1L_SC_POS_EINT1 */
+#define ARIZONA_HP1L_SC_POS_EINT1_SHIFT 0 /* HP1L_SC_POS_EINT1 */
+#define ARIZONA_HP1L_SC_POS_EINT1_WIDTH 1 /* HP1L_SC_POS_EINT1 */
+
+/*
+ * R3336 (0xD08) - Interrupt Status 1 Mask
+ */
+#define ARIZONA_IM_GP4_EINT1 0x0008 /* IM_GP4_EINT1 */
+#define ARIZONA_IM_GP4_EINT1_MASK 0x0008 /* IM_GP4_EINT1 */
+#define ARIZONA_IM_GP4_EINT1_SHIFT 3 /* IM_GP4_EINT1 */
+#define ARIZONA_IM_GP4_EINT1_WIDTH 1 /* IM_GP4_EINT1 */
+#define ARIZONA_IM_GP3_EINT1 0x0004 /* IM_GP3_EINT1 */
+#define ARIZONA_IM_GP3_EINT1_MASK 0x0004 /* IM_GP3_EINT1 */
+#define ARIZONA_IM_GP3_EINT1_SHIFT 2 /* IM_GP3_EINT1 */
+#define ARIZONA_IM_GP3_EINT1_WIDTH 1 /* IM_GP3_EINT1 */
+#define ARIZONA_IM_GP2_EINT1 0x0002 /* IM_GP2_EINT1 */
+#define ARIZONA_IM_GP2_EINT1_MASK 0x0002 /* IM_GP2_EINT1 */
+#define ARIZONA_IM_GP2_EINT1_SHIFT 1 /* IM_GP2_EINT1 */
+#define ARIZONA_IM_GP2_EINT1_WIDTH 1 /* IM_GP2_EINT1 */
+#define ARIZONA_IM_GP1_EINT1 0x0001 /* IM_GP1_EINT1 */
+#define ARIZONA_IM_GP1_EINT1_MASK 0x0001 /* IM_GP1_EINT1 */
+#define ARIZONA_IM_GP1_EINT1_SHIFT 0 /* IM_GP1_EINT1 */
+#define ARIZONA_IM_GP1_EINT1_WIDTH 1 /* IM_GP1_EINT1 */
+
+/*
+ * R3337 (0xD09) - Interrupt Status 2 Mask
+ */
+#define ARIZONA_IM_DSP1_RAM_RDY_EINT1 0x0100 /* IM_DSP1_RAM_RDY_EINT1 */
+#define ARIZONA_IM_DSP1_RAM_RDY_EINT1_MASK 0x0100 /* IM_DSP1_RAM_RDY_EINT1 */
+#define ARIZONA_IM_DSP1_RAM_RDY_EINT1_SHIFT 8 /* IM_DSP1_RAM_RDY_EINT1 */
+#define ARIZONA_IM_DSP1_RAM_RDY_EINT1_WIDTH 1 /* IM_DSP1_RAM_RDY_EINT1 */
+#define ARIZONA_IM_DSP_IRQ2_EINT1 0x0002 /* IM_DSP_IRQ2_EINT1 */
+#define ARIZONA_IM_DSP_IRQ2_EINT1_MASK 0x0002 /* IM_DSP_IRQ2_EINT1 */
+#define ARIZONA_IM_DSP_IRQ2_EINT1_SHIFT 1 /* IM_DSP_IRQ2_EINT1 */
+#define ARIZONA_IM_DSP_IRQ2_EINT1_WIDTH 1 /* IM_DSP_IRQ2_EINT1 */
+#define ARIZONA_IM_DSP_IRQ1_EINT1 0x0001 /* IM_DSP_IRQ1_EINT1 */
+#define ARIZONA_IM_DSP_IRQ1_EINT1_MASK 0x0001 /* IM_DSP_IRQ1_EINT1 */
+#define ARIZONA_IM_DSP_IRQ1_EINT1_SHIFT 0 /* IM_DSP_IRQ1_EINT1 */
+#define ARIZONA_IM_DSP_IRQ1_EINT1_WIDTH 1 /* IM_DSP_IRQ1_EINT1 */
+
+/*
+ * R3338 (0xD0A) - Interrupt Status 3 Mask
+ */
+#define ARIZONA_IM_SPK_OVERHEAT_WARN_EINT1 0x8000 /* IM_SPK_OVERHEAT_WARN_EINT1 */
+#define ARIZONA_IM_SPK_OVERHEAT_WARN_EINT1_MASK 0x8000 /* IM_SPK_OVERHEAT_WARN_EINT1 */
+#define ARIZONA_IM_SPK_OVERHEAT_WARN_EINT1_SHIFT 15 /* IM_SPK_OVERHEAT_WARN_EINT1 */
+#define ARIZONA_IM_SPK_OVERHEAT_WARN_EINT1_WIDTH 1 /* IM_SPK_OVERHEAT_WARN_EINT1 */
+#define ARIZONA_IM_SPK_OVERHEAT_EINT1 0x4000 /* IM_SPK_OVERHEAT_EINT1 */
+#define ARIZONA_IM_SPK_OVERHEAT_EINT1_MASK 0x4000 /* IM_SPK_OVERHEAT_EINT1 */
+#define ARIZONA_IM_SPK_OVERHEAT_EINT1_SHIFT 14 /* IM_SPK_OVERHEAT_EINT1 */
+#define ARIZONA_IM_SPK_OVERHEAT_EINT1_WIDTH 1 /* IM_SPK_OVERHEAT_EINT1 */
+#define ARIZONA_IM_HPDET_EINT1 0x2000 /* IM_HPDET_EINT1 */
+#define ARIZONA_IM_HPDET_EINT1_MASK 0x2000 /* IM_HPDET_EINT1 */
+#define ARIZONA_IM_HPDET_EINT1_SHIFT 13 /* IM_HPDET_EINT1 */
+#define ARIZONA_IM_HPDET_EINT1_WIDTH 1 /* IM_HPDET_EINT1 */
+#define ARIZONA_IM_MICDET_EINT1 0x1000 /* IM_MICDET_EINT1 */
+#define ARIZONA_IM_MICDET_EINT1_MASK 0x1000 /* IM_MICDET_EINT1 */
+#define ARIZONA_IM_MICDET_EINT1_SHIFT 12 /* IM_MICDET_EINT1 */
+#define ARIZONA_IM_MICDET_EINT1_WIDTH 1 /* IM_MICDET_EINT1 */
+#define ARIZONA_IM_WSEQ_DONE_EINT1 0x0800 /* IM_WSEQ_DONE_EINT1 */
+#define ARIZONA_IM_WSEQ_DONE_EINT1_MASK 0x0800 /* IM_WSEQ_DONE_EINT1 */
+#define ARIZONA_IM_WSEQ_DONE_EINT1_SHIFT 11 /* IM_WSEQ_DONE_EINT1 */
+#define ARIZONA_IM_WSEQ_DONE_EINT1_WIDTH 1 /* IM_WSEQ_DONE_EINT1 */
+#define ARIZONA_IM_DRC2_SIG_DET_EINT1 0x0400 /* IM_DRC2_SIG_DET_EINT1 */
+#define ARIZONA_IM_DRC2_SIG_DET_EINT1_MASK 0x0400 /* IM_DRC2_SIG_DET_EINT1 */
+#define ARIZONA_IM_DRC2_SIG_DET_EINT1_SHIFT 10 /* IM_DRC2_SIG_DET_EINT1 */
+#define ARIZONA_IM_DRC2_SIG_DET_EINT1_WIDTH 1 /* IM_DRC2_SIG_DET_EINT1 */
+#define ARIZONA_IM_DRC1_SIG_DET_EINT1 0x0200 /* IM_DRC1_SIG_DET_EINT1 */
+#define ARIZONA_IM_DRC1_SIG_DET_EINT1_MASK 0x0200 /* IM_DRC1_SIG_DET_EINT1 */
+#define ARIZONA_IM_DRC1_SIG_DET_EINT1_SHIFT 9 /* IM_DRC1_SIG_DET_EINT1 */
+#define ARIZONA_IM_DRC1_SIG_DET_EINT1_WIDTH 1 /* IM_DRC1_SIG_DET_EINT1 */
+#define ARIZONA_IM_ASRC2_LOCK_EINT1 0x0100 /* IM_ASRC2_LOCK_EINT1 */
+#define ARIZONA_IM_ASRC2_LOCK_EINT1_MASK 0x0100 /* IM_ASRC2_LOCK_EINT1 */
+#define ARIZONA_IM_ASRC2_LOCK_EINT1_SHIFT 8 /* IM_ASRC2_LOCK_EINT1 */
+#define ARIZONA_IM_ASRC2_LOCK_EINT1_WIDTH 1 /* IM_ASRC2_LOCK_EINT1 */
+#define ARIZONA_IM_ASRC1_LOCK_EINT1 0x0080 /* IM_ASRC1_LOCK_EINT1 */
+#define ARIZONA_IM_ASRC1_LOCK_EINT1_MASK 0x0080 /* IM_ASRC1_LOCK_EINT1 */
+#define ARIZONA_IM_ASRC1_LOCK_EINT1_SHIFT 7 /* IM_ASRC1_LOCK_EINT1 */
+#define ARIZONA_IM_ASRC1_LOCK_EINT1_WIDTH 1 /* IM_ASRC1_LOCK_EINT1 */
+#define ARIZONA_IM_UNDERCLOCKED_EINT1 0x0040 /* IM_UNDERCLOCKED_EINT1 */
+#define ARIZONA_IM_UNDERCLOCKED_EINT1_MASK 0x0040 /* IM_UNDERCLOCKED_EINT1 */
+#define ARIZONA_IM_UNDERCLOCKED_EINT1_SHIFT 6 /* IM_UNDERCLOCKED_EINT1 */
+#define ARIZONA_IM_UNDERCLOCKED_EINT1_WIDTH 1 /* IM_UNDERCLOCKED_EINT1 */
+#define ARIZONA_IM_OVERCLOCKED_EINT1 0x0020 /* IM_OVERCLOCKED_EINT1 */
+#define ARIZONA_IM_OVERCLOCKED_EINT1_MASK 0x0020 /* IM_OVERCLOCKED_EINT1 */
+#define ARIZONA_IM_OVERCLOCKED_EINT1_SHIFT 5 /* IM_OVERCLOCKED_EINT1 */
+#define ARIZONA_IM_OVERCLOCKED_EINT1_WIDTH 1 /* IM_OVERCLOCKED_EINT1 */
+#define ARIZONA_IM_FLL2_LOCK_EINT1 0x0008 /* IM_FLL2_LOCK_EINT1 */
+#define ARIZONA_IM_FLL2_LOCK_EINT1_MASK 0x0008 /* IM_FLL2_LOCK_EINT1 */
+#define ARIZONA_IM_FLL2_LOCK_EINT1_SHIFT 3 /* IM_FLL2_LOCK_EINT1 */
+#define ARIZONA_IM_FLL2_LOCK_EINT1_WIDTH 1 /* IM_FLL2_LOCK_EINT1 */
+#define ARIZONA_IM_FLL1_LOCK_EINT1 0x0004 /* IM_FLL1_LOCK_EINT1 */
+#define ARIZONA_IM_FLL1_LOCK_EINT1_MASK 0x0004 /* IM_FLL1_LOCK_EINT1 */
+#define ARIZONA_IM_FLL1_LOCK_EINT1_SHIFT 2 /* IM_FLL1_LOCK_EINT1 */
+#define ARIZONA_IM_FLL1_LOCK_EINT1_WIDTH 1 /* IM_FLL1_LOCK_EINT1 */
+#define ARIZONA_IM_CLKGEN_ERR_EINT1 0x0002 /* IM_CLKGEN_ERR_EINT1 */
+#define ARIZONA_IM_CLKGEN_ERR_EINT1_MASK 0x0002 /* IM_CLKGEN_ERR_EINT1 */
+#define ARIZONA_IM_CLKGEN_ERR_EINT1_SHIFT 1 /* IM_CLKGEN_ERR_EINT1 */
+#define ARIZONA_IM_CLKGEN_ERR_EINT1_WIDTH 1 /* IM_CLKGEN_ERR_EINT1 */
+#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT1 0x0001 /* IM_CLKGEN_ERR_ASYNC_EINT1 */
+#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT1_MASK 0x0001 /* IM_CLKGEN_ERR_ASYNC_EINT1 */
+#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT1_SHIFT 0 /* IM_CLKGEN_ERR_ASYNC_EINT1 */
+#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT1_WIDTH 1 /* IM_CLKGEN_ERR_ASYNC_EINT1 */
+
+/*
+ * R3339 (0xD0B) - Interrupt Status 4 Mask
+ */
+#define ARIZONA_IM_ASRC_CFG_ERR_EINT1 0x8000 /* IM_ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ASRC_CFG_ERR_EINT1_MASK 0x8000 /* IM_ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ASRC_CFG_ERR_EINT1_SHIFT 15 /* IM_ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ASRC_CFG_ERR_EINT1_WIDTH 1 /* IM_ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_IM_AIF3_ERR_EINT1 0x4000 /* IM_AIF3_ERR_EINT1 */
+#define ARIZONA_IM_AIF3_ERR_EINT1_MASK 0x4000 /* IM_AIF3_ERR_EINT1 */
+#define ARIZONA_IM_AIF3_ERR_EINT1_SHIFT 14 /* IM_AIF3_ERR_EINT1 */
+#define ARIZONA_IM_AIF3_ERR_EINT1_WIDTH 1 /* IM_AIF3_ERR_EINT1 */
+#define ARIZONA_IM_AIF2_ERR_EINT1 0x2000 /* IM_AIF2_ERR_EINT1 */
+#define ARIZONA_IM_AIF2_ERR_EINT1_MASK 0x2000 /* IM_AIF2_ERR_EINT1 */
+#define ARIZONA_IM_AIF2_ERR_EINT1_SHIFT 13 /* IM_AIF2_ERR_EINT1 */
+#define ARIZONA_IM_AIF2_ERR_EINT1_WIDTH 1 /* IM_AIF2_ERR_EINT1 */
+#define ARIZONA_IM_AIF1_ERR_EINT1 0x1000 /* IM_AIF1_ERR_EINT1 */
+#define ARIZONA_IM_AIF1_ERR_EINT1_MASK 0x1000 /* IM_AIF1_ERR_EINT1 */
+#define ARIZONA_IM_AIF1_ERR_EINT1_SHIFT 12 /* IM_AIF1_ERR_EINT1 */
+#define ARIZONA_IM_AIF1_ERR_EINT1_WIDTH 1 /* IM_AIF1_ERR_EINT1 */
+#define ARIZONA_IM_CTRLIF_ERR_EINT1 0x0800 /* IM_CTRLIF_ERR_EINT1 */
+#define ARIZONA_IM_CTRLIF_ERR_EINT1_MASK 0x0800 /* IM_CTRLIF_ERR_EINT1 */
+#define ARIZONA_IM_CTRLIF_ERR_EINT1_SHIFT 11 /* IM_CTRLIF_ERR_EINT1 */
+#define ARIZONA_IM_CTRLIF_ERR_EINT1_WIDTH 1 /* IM_CTRLIF_ERR_EINT1 */
+#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT1 0x0400 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT1_MASK 0x0400 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT1_SHIFT 10 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT1_WIDTH 1 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT1 0x0200 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT1_MASK 0x0200 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT1_SHIFT 9 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT1_WIDTH 1 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT1 0x0100 /* IM_SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT1_MASK 0x0100 /* IM_SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT1_SHIFT 8 /* IM_SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT1_WIDTH 1 /* IM_SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_IM_ISRC1_CFG_ERR_EINT1 0x0080 /* IM_ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ISRC1_CFG_ERR_EINT1_MASK 0x0080 /* IM_ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ISRC1_CFG_ERR_EINT1_SHIFT 7 /* IM_ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ISRC1_CFG_ERR_EINT1_WIDTH 1 /* IM_ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ISRC2_CFG_ERR_EINT1 0x0040 /* IM_ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ISRC2_CFG_ERR_EINT1_MASK 0x0040 /* IM_ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ISRC2_CFG_ERR_EINT1_SHIFT 6 /* IM_ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_IM_ISRC2_CFG_ERR_EINT1_WIDTH 1 /* IM_ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_IM_HP3R_DONE_EINT1 0x0020 /* IM_HP3R_DONE_EINT1 */
+#define ARIZONA_IM_HP3R_DONE_EINT1_MASK 0x0020 /* IM_HP3R_DONE_EINT1 */
+#define ARIZONA_IM_HP3R_DONE_EINT1_SHIFT 5 /* IM_HP3R_DONE_EINT1 */
+#define ARIZONA_IM_HP3R_DONE_EINT1_WIDTH 1 /* IM_HP3R_DONE_EINT1 */
+#define ARIZONA_IM_HP3L_DONE_EINT1 0x0010 /* IM_HP3L_DONE_EINT1 */
+#define ARIZONA_IM_HP3L_DONE_EINT1_MASK 0x0010 /* IM_HP3L_DONE_EINT1 */
+#define ARIZONA_IM_HP3L_DONE_EINT1_SHIFT 4 /* IM_HP3L_DONE_EINT1 */
+#define ARIZONA_IM_HP3L_DONE_EINT1_WIDTH 1 /* IM_HP3L_DONE_EINT1 */
+#define ARIZONA_IM_HP2R_DONE_EINT1 0x0008 /* IM_HP2R_DONE_EINT1 */
+#define ARIZONA_IM_HP2R_DONE_EINT1_MASK 0x0008 /* IM_HP2R_DONE_EINT1 */
+#define ARIZONA_IM_HP2R_DONE_EINT1_SHIFT 3 /* IM_HP2R_DONE_EINT1 */
+#define ARIZONA_IM_HP2R_DONE_EINT1_WIDTH 1 /* IM_HP2R_DONE_EINT1 */
+#define ARIZONA_IM_HP2L_DONE_EINT1 0x0004 /* IM_HP2L_DONE_EINT1 */
+#define ARIZONA_IM_HP2L_DONE_EINT1_MASK 0x0004 /* IM_HP2L_DONE_EINT1 */
+#define ARIZONA_IM_HP2L_DONE_EINT1_SHIFT 2 /* IM_HP2L_DONE_EINT1 */
+#define ARIZONA_IM_HP2L_DONE_EINT1_WIDTH 1 /* IM_HP2L_DONE_EINT1 */
+#define ARIZONA_IM_HP1R_DONE_EINT1 0x0002 /* IM_HP1R_DONE_EINT1 */
+#define ARIZONA_IM_HP1R_DONE_EINT1_MASK 0x0002 /* IM_HP1R_DONE_EINT1 */
+#define ARIZONA_IM_HP1R_DONE_EINT1_SHIFT 1 /* IM_HP1R_DONE_EINT1 */
+#define ARIZONA_IM_HP1R_DONE_EINT1_WIDTH 1 /* IM_HP1R_DONE_EINT1 */
+#define ARIZONA_IM_HP1L_DONE_EINT1 0x0001 /* IM_HP1L_DONE_EINT1 */
+#define ARIZONA_IM_HP1L_DONE_EINT1_MASK 0x0001 /* IM_HP1L_DONE_EINT1 */
+#define ARIZONA_IM_HP1L_DONE_EINT1_SHIFT 0 /* IM_HP1L_DONE_EINT1 */
+#define ARIZONA_IM_HP1L_DONE_EINT1_WIDTH 1 /* IM_HP1L_DONE_EINT1 */
+
+/*
+ * R3339 (0xD0B) - Interrupt Status 4 Mask (Alternate layout)
+ *
+ * Alternate layout used on later devices, note only fields that have moved
+ * are specified
+ */
+#define ARIZONA_V2_IM_AIF3_ERR_EINT1 0x8000 /* IM_AIF3_ERR_EINT1 */
+#define ARIZONA_V2_IM_AIF3_ERR_EINT1_MASK 0x8000 /* IM_AIF3_ERR_EINT1 */
+#define ARIZONA_V2_IM_AIF3_ERR_EINT1_SHIFT 15 /* IM_AIF3_ERR_EINT1 */
+#define ARIZONA_V2_IM_AIF3_ERR_EINT1_WIDTH 1 /* IM_AIF3_ERR_EINT1 */
+#define ARIZONA_V2_IM_AIF2_ERR_EINT1 0x4000 /* IM_AIF2_ERR_EINT1 */
+#define ARIZONA_V2_IM_AIF2_ERR_EINT1_MASK 0x4000 /* IM_AIF2_ERR_EINT1 */
+#define ARIZONA_V2_IM_AIF2_ERR_EINT1_SHIFT 14 /* IM_AIF2_ERR_EINT1 */
+#define ARIZONA_V2_IM_AIF2_ERR_EINT1_WIDTH 1 /* IM_AIF2_ERR_EINT1 */
+#define ARIZONA_V2_IM_AIF1_ERR_EINT1 0x2000 /* IM_AIF1_ERR_EINT1 */
+#define ARIZONA_V2_IM_AIF1_ERR_EINT1_MASK 0x2000 /* IM_AIF1_ERR_EINT1 */
+#define ARIZONA_V2_IM_AIF1_ERR_EINT1_SHIFT 13 /* IM_AIF1_ERR_EINT1 */
+#define ARIZONA_V2_IM_AIF1_ERR_EINT1_WIDTH 1 /* IM_AIF1_ERR_EINT1 */
+#define ARIZONA_V2_IM_CTRLIF_ERR_EINT1 0x1000 /* IM_CTRLIF_ERR_EINT1 */
+#define ARIZONA_V2_IM_CTRLIF_ERR_EINT1_MASK 0x1000 /* IM_CTRLIF_ERR_EINT1 */
+#define ARIZONA_V2_IM_CTRLIF_ERR_EINT1_SHIFT 12 /* IM_CTRLIF_ERR_EINT1 */
+#define ARIZONA_V2_IM_CTRLIF_ERR_EINT1_WIDTH 1 /* IM_CTRLIF_ERR_EINT1 */
+#define ARIZONA_V2_IM_MIXER_DROPPED_SAMPLE_EINT1 0x0800 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_V2_IM_MIXER_DROPPED_SAMPLE_EINT1_MASK 0x0800 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_V2_IM_MIXER_DROPPED_SAMPLE_EINT1_SHIFT 11 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_V2_IM_MIXER_DROPPED_SAMPLE_EINT1_WIDTH 1 /* IM_MIXER_DROPPED_SAMPLE_EINT1 */
+#define ARIZONA_V2_IM_ASYNC_CLK_ENA_LOW_EINT1 0x0400 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_V2_IM_ASYNC_CLK_ENA_LOW_EINT1_MASK 0x0400 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_V2_IM_ASYNC_CLK_ENA_LOW_EINT1_SHIFT 10 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_V2_IM_ASYNC_CLK_ENA_LOW_EINT1_WIDTH 1 /* IM_ASYNC_CLK_ENA_LOW_EINT1 */
+#define ARIZONA_V2_IM_SYSCLK_ENA_LOW_EINT1 0x0200 /* IM_SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_V2_IM_SYSCLK_ENA_LOW_EINT1_MASK 0x0200 /* IM_SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_V2_IM_SYSCLK_ENA_LOW_EINT1_SHIFT 9 /* IM_SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_V2_IM_SYSCLK_ENA_LOW_EINT1_WIDTH 1 /* IM_SYSCLK_ENA_LOW_EINT1 */
+#define ARIZONA_V2_IM_ISRC1_CFG_ERR_EINT1 0x0100 /* IM_ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_V2_IM_ISRC1_CFG_ERR_EINT1_MASK 0x0100 /* IM_ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_V2_IM_ISRC1_CFG_ERR_EINT1_SHIFT 8 /* IM_ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_V2_IM_ISRC1_CFG_ERR_EINT1_WIDTH 1 /* IM_ISRC1_CFG_ERR_EINT1 */
+#define ARIZONA_V2_IM_ISRC2_CFG_ERR_EINT1 0x0080 /* IM_ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_V2_IM_ISRC2_CFG_ERR_EINT1_MASK 0x0080 /* IM_ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_V2_IM_ISRC2_CFG_ERR_EINT1_SHIFT 7 /* IM_ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_V2_IM_ISRC2_CFG_ERR_EINT1_WIDTH 1 /* IM_ISRC2_CFG_ERR_EINT1 */
+#define ARIZONA_V2_IM_ISRC3_CFG_ERR_EINT1 0x0040 /* IM_ISRC3_CFG_ERR_EINT1 */
+#define ARIZONA_V2_IM_ISRC3_CFG_ERR_EINT1_MASK 0x0040 /* IM_ISRC3_CFG_ERR_EINT1 */
+#define ARIZONA_V2_IM_ISRC3_CFG_ERR_EINT1_SHIFT 6 /* IM_ISRC3_CFG_ERR_EINT1 */
+#define ARIZONA_V2_IM_ISRC3_CFG_ERR_EINT1_WIDTH 1 /* IM_ISRC3_CFG_ERR_EINT1 */
+
+/*
+ * R3340 (0xD0C) - Interrupt Status 5 Mask
+ */
+#define ARIZONA_IM_BOOT_DONE_EINT1 0x0100 /* IM_BOOT_DONE_EINT1 */
+#define ARIZONA_IM_BOOT_DONE_EINT1_MASK 0x0100 /* IM_BOOT_DONE_EINT1 */
+#define ARIZONA_IM_BOOT_DONE_EINT1_SHIFT 8 /* IM_BOOT_DONE_EINT1 */
+#define ARIZONA_IM_BOOT_DONE_EINT1_WIDTH 1 /* IM_BOOT_DONE_EINT1 */
+#define ARIZONA_IM_DCS_DAC_DONE_EINT1 0x0080 /* IM_DCS_DAC_DONE_EINT1 */
+#define ARIZONA_IM_DCS_DAC_DONE_EINT1_MASK 0x0080 /* IM_DCS_DAC_DONE_EINT1 */
+#define ARIZONA_IM_DCS_DAC_DONE_EINT1_SHIFT 7 /* IM_DCS_DAC_DONE_EINT1 */
+#define ARIZONA_IM_DCS_DAC_DONE_EINT1_WIDTH 1 /* IM_DCS_DAC_DONE_EINT1 */
+#define ARIZONA_IM_DCS_HP_DONE_EINT1 0x0040 /* IM_DCS_HP_DONE_EINT1 */
+#define ARIZONA_IM_DCS_HP_DONE_EINT1_MASK 0x0040 /* IM_DCS_HP_DONE_EINT1 */
+#define ARIZONA_IM_DCS_HP_DONE_EINT1_SHIFT 6 /* IM_DCS_HP_DONE_EINT1 */
+#define ARIZONA_IM_DCS_HP_DONE_EINT1_WIDTH 1 /* IM_DCS_HP_DONE_EINT1 */
+#define ARIZONA_IM_FLL2_CLOCK_OK_EINT1 0x0002 /* IM_FLL2_CLOCK_OK_EINT1 */
+#define ARIZONA_IM_FLL2_CLOCK_OK_EINT1_MASK 0x0002 /* IM_FLL2_CLOCK_OK_EINT1 */
+#define ARIZONA_IM_FLL2_CLOCK_OK_EINT1_SHIFT 1 /* IM_FLL2_CLOCK_OK_EINT1 */
+#define ARIZONA_IM_FLL2_CLOCK_OK_EINT1_WIDTH 1 /* IM_FLL2_CLOCK_OK_EINT1 */
+#define ARIZONA_IM_FLL1_CLOCK_OK_EINT1 0x0001 /* IM_FLL1_CLOCK_OK_EINT1 */
+#define ARIZONA_IM_FLL1_CLOCK_OK_EINT1_MASK 0x0001 /* IM_FLL1_CLOCK_OK_EINT1 */
+#define ARIZONA_IM_FLL1_CLOCK_OK_EINT1_SHIFT 0 /* IM_FLL1_CLOCK_OK_EINT1 */
+#define ARIZONA_IM_FLL1_CLOCK_OK_EINT1_WIDTH 1 /* IM_FLL1_CLOCK_OK_EINT1 */
+
+/*
+ * R3340 (0xD0C) - Interrupt Status 5 Mask (Alternate layout)
+ *
+ * Alternate layout used on later devices, note only fields that have moved
+ * are specified
+ */
+#define ARIZONA_V2_IM_ASRC_CFG_ERR_EINT1 0x0008 /* IM_ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_V2_IM_ASRC_CFG_ERR_EINT1_MASK 0x0008 /* IM_ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_V2_IM_ASRC_CFG_ERR_EINT1_SHIFT 3 /* IM_ASRC_CFG_ERR_EINT1 */
+#define ARIZONA_V2_IM_ASRC_CFG_ERR_EINT1_WIDTH 1 /* IM_ASRC_CFG_ERR_EINT1 */
+
+/*
+ * R3341 (0xD0D) - Interrupt Status 6 Mask
+ */
+#define ARIZONA_IM_DSP_SHARED_WR_COLL_EINT1 0x8000 /* IM_DSP_SHARED_WR_COLL_EINT1 */
+#define ARIZONA_IM_DSP_SHARED_WR_COLL_EINT1_MASK 0x8000 /* IM_DSP_SHARED_WR_COLL_EINT1 */
+#define ARIZONA_IM_DSP_SHARED_WR_COLL_EINT1_SHIFT 15 /* IM_DSP_SHARED_WR_COLL_EINT1 */
+#define ARIZONA_IM_DSP_SHARED_WR_COLL_EINT1_WIDTH 1 /* IM_DSP_SHARED_WR_COLL_EINT1 */
+#define ARIZONA_IM_SPK_SHUTDOWN_EINT1 0x4000 /* IM_SPK_SHUTDOWN_EINT1 */
+#define ARIZONA_IM_SPK_SHUTDOWN_EINT1_MASK 0x4000 /* IM_SPK_SHUTDOWN_EINT1 */
+#define ARIZONA_IM_SPK_SHUTDOWN_EINT1_SHIFT 14 /* IM_SPK_SHUTDOWN_EINT1 */
+#define ARIZONA_IM_SPK_SHUTDOWN_EINT1_WIDTH 1 /* IM_SPK_SHUTDOWN_EINT1 */
+#define ARIZONA_IM_SPK1R_SHORT_EINT1 0x2000 /* IM_SPK1R_SHORT_EINT1 */
+#define ARIZONA_IM_SPK1R_SHORT_EINT1_MASK 0x2000 /* IM_SPK1R_SHORT_EINT1 */
+#define ARIZONA_IM_SPK1R_SHORT_EINT1_SHIFT 13 /* IM_SPK1R_SHORT_EINT1 */
+#define ARIZONA_IM_SPK1R_SHORT_EINT1_WIDTH 1 /* IM_SPK1R_SHORT_EINT1 */
+#define ARIZONA_IM_SPK1L_SHORT_EINT1 0x1000 /* IM_SPK1L_SHORT_EINT1 */
+#define ARIZONA_IM_SPK1L_SHORT_EINT1_MASK 0x1000 /* IM_SPK1L_SHORT_EINT1 */
+#define ARIZONA_IM_SPK1L_SHORT_EINT1_SHIFT 12 /* IM_SPK1L_SHORT_EINT1 */
+#define ARIZONA_IM_SPK1L_SHORT_EINT1_WIDTH 1 /* IM_SPK1L_SHORT_EINT1 */
+#define ARIZONA_IM_HP3R_SC_NEG_EINT1 0x0800 /* IM_HP3R_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP3R_SC_NEG_EINT1_MASK 0x0800 /* IM_HP3R_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP3R_SC_NEG_EINT1_SHIFT 11 /* IM_HP3R_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP3R_SC_NEG_EINT1_WIDTH 1 /* IM_HP3R_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP3R_SC_POS_EINT1 0x0400 /* IM_HP3R_SC_POS_EINT1 */
+#define ARIZONA_IM_HP3R_SC_POS_EINT1_MASK 0x0400 /* IM_HP3R_SC_POS_EINT1 */
+#define ARIZONA_IM_HP3R_SC_POS_EINT1_SHIFT 10 /* IM_HP3R_SC_POS_EINT1 */
+#define ARIZONA_IM_HP3R_SC_POS_EINT1_WIDTH 1 /* IM_HP3R_SC_POS_EINT1 */
+#define ARIZONA_IM_HP3L_SC_NEG_EINT1 0x0200 /* IM_HP3L_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP3L_SC_NEG_EINT1_MASK 0x0200 /* IM_HP3L_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP3L_SC_NEG_EINT1_SHIFT 9 /* IM_HP3L_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP3L_SC_NEG_EINT1_WIDTH 1 /* IM_HP3L_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP3L_SC_POS_EINT1 0x0100 /* IM_HP3L_SC_POS_EINT1 */
+#define ARIZONA_IM_HP3L_SC_POS_EINT1_MASK 0x0100 /* IM_HP3L_SC_POS_EINT1 */
+#define ARIZONA_IM_HP3L_SC_POS_EINT1_SHIFT 8 /* IM_HP3L_SC_POS_EINT1 */
+#define ARIZONA_IM_HP3L_SC_POS_EINT1_WIDTH 1 /* IM_HP3L_SC_POS_EINT1 */
+#define ARIZONA_IM_HP2R_SC_NEG_EINT1 0x0080 /* IM_HP2R_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP2R_SC_NEG_EINT1_MASK 0x0080 /* IM_HP2R_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP2R_SC_NEG_EINT1_SHIFT 7 /* IM_HP2R_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP2R_SC_NEG_EINT1_WIDTH 1 /* IM_HP2R_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP2R_SC_POS_EINT1 0x0040 /* IM_HP2R_SC_POS_EINT1 */
+#define ARIZONA_IM_HP2R_SC_POS_EINT1_MASK 0x0040 /* IM_HP2R_SC_POS_EINT1 */
+#define ARIZONA_IM_HP2R_SC_POS_EINT1_SHIFT 6 /* IM_HP2R_SC_POS_EINT1 */
+#define ARIZONA_IM_HP2R_SC_POS_EINT1_WIDTH 1 /* IM_HP2R_SC_POS_EINT1 */
+#define ARIZONA_IM_HP2L_SC_NEG_EINT1 0x0020 /* IM_HP2L_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP2L_SC_NEG_EINT1_MASK 0x0020 /* IM_HP2L_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP2L_SC_NEG_EINT1_SHIFT 5 /* IM_HP2L_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP2L_SC_NEG_EINT1_WIDTH 1 /* IM_HP2L_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP2L_SC_POS_EINT1 0x0010 /* IM_HP2L_SC_POS_EINT1 */
+#define ARIZONA_IM_HP2L_SC_POS_EINT1_MASK 0x0010 /* IM_HP2L_SC_POS_EINT1 */
+#define ARIZONA_IM_HP2L_SC_POS_EINT1_SHIFT 4 /* IM_HP2L_SC_POS_EINT1 */
+#define ARIZONA_IM_HP2L_SC_POS_EINT1_WIDTH 1 /* IM_HP2L_SC_POS_EINT1 */
+#define ARIZONA_IM_HP1R_SC_NEG_EINT1 0x0008 /* IM_HP1R_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP1R_SC_NEG_EINT1_MASK 0x0008 /* IM_HP1R_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP1R_SC_NEG_EINT1_SHIFT 3 /* IM_HP1R_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP1R_SC_NEG_EINT1_WIDTH 1 /* IM_HP1R_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP1R_SC_POS_EINT1 0x0004 /* IM_HP1R_SC_POS_EINT1 */
+#define ARIZONA_IM_HP1R_SC_POS_EINT1_MASK 0x0004 /* IM_HP1R_SC_POS_EINT1 */
+#define ARIZONA_IM_HP1R_SC_POS_EINT1_SHIFT 2 /* IM_HP1R_SC_POS_EINT1 */
+#define ARIZONA_IM_HP1R_SC_POS_EINT1_WIDTH 1 /* IM_HP1R_SC_POS_EINT1 */
+#define ARIZONA_IM_HP1L_SC_NEG_EINT1 0x0002 /* IM_HP1L_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP1L_SC_NEG_EINT1_MASK 0x0002 /* IM_HP1L_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP1L_SC_NEG_EINT1_SHIFT 1 /* IM_HP1L_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP1L_SC_NEG_EINT1_WIDTH 1 /* IM_HP1L_SC_NEG_EINT1 */
+#define ARIZONA_IM_HP1L_SC_POS_EINT1 0x0001 /* IM_HP1L_SC_POS_EINT1 */
+#define ARIZONA_IM_HP1L_SC_POS_EINT1_MASK 0x0001 /* IM_HP1L_SC_POS_EINT1 */
+#define ARIZONA_IM_HP1L_SC_POS_EINT1_SHIFT 0 /* IM_HP1L_SC_POS_EINT1 */
+#define ARIZONA_IM_HP1L_SC_POS_EINT1_WIDTH 1 /* IM_HP1L_SC_POS_EINT1 */
+
+/*
+ * R3343 (0xD0F) - Interrupt Control
+ */
+#define ARIZONA_IM_IRQ1 0x0001 /* IM_IRQ1 */
+#define ARIZONA_IM_IRQ1_MASK 0x0001 /* IM_IRQ1 */
+#define ARIZONA_IM_IRQ1_SHIFT 0 /* IM_IRQ1 */
+#define ARIZONA_IM_IRQ1_WIDTH 1 /* IM_IRQ1 */
+
+/*
+ * R3344 (0xD10) - IRQ2 Status 1
+ */
+#define ARIZONA_GP4_EINT2 0x0008 /* GP4_EINT2 */
+#define ARIZONA_GP4_EINT2_MASK 0x0008 /* GP4_EINT2 */
+#define ARIZONA_GP4_EINT2_SHIFT 3 /* GP4_EINT2 */
+#define ARIZONA_GP4_EINT2_WIDTH 1 /* GP4_EINT2 */
+#define ARIZONA_GP3_EINT2 0x0004 /* GP3_EINT2 */
+#define ARIZONA_GP3_EINT2_MASK 0x0004 /* GP3_EINT2 */
+#define ARIZONA_GP3_EINT2_SHIFT 2 /* GP3_EINT2 */
+#define ARIZONA_GP3_EINT2_WIDTH 1 /* GP3_EINT2 */
+#define ARIZONA_GP2_EINT2 0x0002 /* GP2_EINT2 */
+#define ARIZONA_GP2_EINT2_MASK 0x0002 /* GP2_EINT2 */
+#define ARIZONA_GP2_EINT2_SHIFT 1 /* GP2_EINT2 */
+#define ARIZONA_GP2_EINT2_WIDTH 1 /* GP2_EINT2 */
+#define ARIZONA_GP1_EINT2 0x0001 /* GP1_EINT2 */
+#define ARIZONA_GP1_EINT2_MASK 0x0001 /* GP1_EINT2 */
+#define ARIZONA_GP1_EINT2_SHIFT 0 /* GP1_EINT2 */
+#define ARIZONA_GP1_EINT2_WIDTH 1 /* GP1_EINT2 */
+
+/*
+ * R3345 (0xD11) - IRQ2 Status 2
+ */
+#define ARIZONA_DSP1_RAM_RDY_EINT2 0x0100 /* DSP1_RAM_RDY_EINT2 */
+#define ARIZONA_DSP1_RAM_RDY_EINT2_MASK 0x0100 /* DSP1_RAM_RDY_EINT2 */
+#define ARIZONA_DSP1_RAM_RDY_EINT2_SHIFT 8 /* DSP1_RAM_RDY_EINT2 */
+#define ARIZONA_DSP1_RAM_RDY_EINT2_WIDTH 1 /* DSP1_RAM_RDY_EINT2 */
+#define ARIZONA_DSP_IRQ2_EINT2 0x0002 /* DSP_IRQ2_EINT2 */
+#define ARIZONA_DSP_IRQ2_EINT2_MASK 0x0002 /* DSP_IRQ2_EINT2 */
+#define ARIZONA_DSP_IRQ2_EINT2_SHIFT 1 /* DSP_IRQ2_EINT2 */
+#define ARIZONA_DSP_IRQ2_EINT2_WIDTH 1 /* DSP_IRQ2_EINT2 */
+#define ARIZONA_DSP_IRQ1_EINT2 0x0001 /* DSP_IRQ1_EINT2 */
+#define ARIZONA_DSP_IRQ1_EINT2_MASK 0x0001 /* DSP_IRQ1_EINT2 */
+#define ARIZONA_DSP_IRQ1_EINT2_SHIFT 0 /* DSP_IRQ1_EINT2 */
+#define ARIZONA_DSP_IRQ1_EINT2_WIDTH 1 /* DSP_IRQ1_EINT2 */
+
+/*
+ * R3346 (0xD12) - IRQ2 Status 3
+ */
+#define ARIZONA_SPK_OVERHEAT_WARN_EINT2 0x8000 /* SPK_OVERHEAT_WARN_EINT2 */
+#define ARIZONA_SPK_OVERHEAT_WARN_EINT2_MASK 0x8000 /* SPK_OVERHEAT_WARN_EINT2 */
+#define ARIZONA_SPK_OVERHEAT_WARN_EINT2_SHIFT 15 /* SPK_OVERHEAT_WARN_EINT2 */
+#define ARIZONA_SPK_OVERHEAT_WARN_EINT2_WIDTH 1 /* SPK_OVERHEAT_WARN_EINT2 */
+#define ARIZONA_SPK_OVERHEAT_EINT2 0x4000 /* SPK_OVERHEAT_EINT2 */
+#define ARIZONA_SPK_OVERHEAT_EINT2_MASK 0x4000 /* SPK_OVERHEAT_EINT2 */
+#define ARIZONA_SPK_OVERHEAT_EINT2_SHIFT 14 /* SPK_OVERHEAT_EINT2 */
+#define ARIZONA_SPK_OVERHEAT_EINT2_WIDTH 1 /* SPK_OVERHEAT_EINT2 */
+#define ARIZONA_HPDET_EINT2 0x2000 /* HPDET_EINT2 */
+#define ARIZONA_HPDET_EINT2_MASK 0x2000 /* HPDET_EINT2 */
+#define ARIZONA_HPDET_EINT2_SHIFT 13 /* HPDET_EINT2 */
+#define ARIZONA_HPDET_EINT2_WIDTH 1 /* HPDET_EINT2 */
+#define ARIZONA_MICDET_EINT2 0x1000 /* MICDET_EINT2 */
+#define ARIZONA_MICDET_EINT2_MASK 0x1000 /* MICDET_EINT2 */
+#define ARIZONA_MICDET_EINT2_SHIFT 12 /* MICDET_EINT2 */
+#define ARIZONA_MICDET_EINT2_WIDTH 1 /* MICDET_EINT2 */
+#define ARIZONA_WSEQ_DONE_EINT2 0x0800 /* WSEQ_DONE_EINT2 */
+#define ARIZONA_WSEQ_DONE_EINT2_MASK 0x0800 /* WSEQ_DONE_EINT2 */
+#define ARIZONA_WSEQ_DONE_EINT2_SHIFT 11 /* WSEQ_DONE_EINT2 */
+#define ARIZONA_WSEQ_DONE_EINT2_WIDTH 1 /* WSEQ_DONE_EINT2 */
+#define ARIZONA_DRC2_SIG_DET_EINT2 0x0400 /* DRC2_SIG_DET_EINT2 */
+#define ARIZONA_DRC2_SIG_DET_EINT2_MASK 0x0400 /* DRC2_SIG_DET_EINT2 */
+#define ARIZONA_DRC2_SIG_DET_EINT2_SHIFT 10 /* DRC2_SIG_DET_EINT2 */
+#define ARIZONA_DRC2_SIG_DET_EINT2_WIDTH 1 /* DRC2_SIG_DET_EINT2 */
+#define ARIZONA_DRC1_SIG_DET_EINT2 0x0200 /* DRC1_SIG_DET_EINT2 */
+#define ARIZONA_DRC1_SIG_DET_EINT2_MASK 0x0200 /* DRC1_SIG_DET_EINT2 */
+#define ARIZONA_DRC1_SIG_DET_EINT2_SHIFT 9 /* DRC1_SIG_DET_EINT2 */
+#define ARIZONA_DRC1_SIG_DET_EINT2_WIDTH 1 /* DRC1_SIG_DET_EINT2 */
+#define ARIZONA_ASRC2_LOCK_EINT2 0x0100 /* ASRC2_LOCK_EINT2 */
+#define ARIZONA_ASRC2_LOCK_EINT2_MASK 0x0100 /* ASRC2_LOCK_EINT2 */
+#define ARIZONA_ASRC2_LOCK_EINT2_SHIFT 8 /* ASRC2_LOCK_EINT2 */
+#define ARIZONA_ASRC2_LOCK_EINT2_WIDTH 1 /* ASRC2_LOCK_EINT2 */
+#define ARIZONA_ASRC1_LOCK_EINT2 0x0080 /* ASRC1_LOCK_EINT2 */
+#define ARIZONA_ASRC1_LOCK_EINT2_MASK 0x0080 /* ASRC1_LOCK_EINT2 */
+#define ARIZONA_ASRC1_LOCK_EINT2_SHIFT 7 /* ASRC1_LOCK_EINT2 */
+#define ARIZONA_ASRC1_LOCK_EINT2_WIDTH 1 /* ASRC1_LOCK_EINT2 */
+#define ARIZONA_UNDERCLOCKED_EINT2 0x0040 /* UNDERCLOCKED_EINT2 */
+#define ARIZONA_UNDERCLOCKED_EINT2_MASK 0x0040 /* UNDERCLOCKED_EINT2 */
+#define ARIZONA_UNDERCLOCKED_EINT2_SHIFT 6 /* UNDERCLOCKED_EINT2 */
+#define ARIZONA_UNDERCLOCKED_EINT2_WIDTH 1 /* UNDERCLOCKED_EINT2 */
+#define ARIZONA_OVERCLOCKED_EINT2 0x0020 /* OVERCLOCKED_EINT2 */
+#define ARIZONA_OVERCLOCKED_EINT2_MASK 0x0020 /* OVERCLOCKED_EINT2 */
+#define ARIZONA_OVERCLOCKED_EINT2_SHIFT 5 /* OVERCLOCKED_EINT2 */
+#define ARIZONA_OVERCLOCKED_EINT2_WIDTH 1 /* OVERCLOCKED_EINT2 */
+#define ARIZONA_FLL2_LOCK_EINT2 0x0008 /* FLL2_LOCK_EINT2 */
+#define ARIZONA_FLL2_LOCK_EINT2_MASK 0x0008 /* FLL2_LOCK_EINT2 */
+#define ARIZONA_FLL2_LOCK_EINT2_SHIFT 3 /* FLL2_LOCK_EINT2 */
+#define ARIZONA_FLL2_LOCK_EINT2_WIDTH 1 /* FLL2_LOCK_EINT2 */
+#define ARIZONA_FLL1_LOCK_EINT2 0x0004 /* FLL1_LOCK_EINT2 */
+#define ARIZONA_FLL1_LOCK_EINT2_MASK 0x0004 /* FLL1_LOCK_EINT2 */
+#define ARIZONA_FLL1_LOCK_EINT2_SHIFT 2 /* FLL1_LOCK_EINT2 */
+#define ARIZONA_FLL1_LOCK_EINT2_WIDTH 1 /* FLL1_LOCK_EINT2 */
+#define ARIZONA_CLKGEN_ERR_EINT2 0x0002 /* CLKGEN_ERR_EINT2 */
+#define ARIZONA_CLKGEN_ERR_EINT2_MASK 0x0002 /* CLKGEN_ERR_EINT2 */
+#define ARIZONA_CLKGEN_ERR_EINT2_SHIFT 1 /* CLKGEN_ERR_EINT2 */
+#define ARIZONA_CLKGEN_ERR_EINT2_WIDTH 1 /* CLKGEN_ERR_EINT2 */
+#define ARIZONA_CLKGEN_ERR_ASYNC_EINT2 0x0001 /* CLKGEN_ERR_ASYNC_EINT2 */
+#define ARIZONA_CLKGEN_ERR_ASYNC_EINT2_MASK 0x0001 /* CLKGEN_ERR_ASYNC_EINT2 */
+#define ARIZONA_CLKGEN_ERR_ASYNC_EINT2_SHIFT 0 /* CLKGEN_ERR_ASYNC_EINT2 */
+#define ARIZONA_CLKGEN_ERR_ASYNC_EINT2_WIDTH 1 /* CLKGEN_ERR_ASYNC_EINT2 */
+
+/*
+ * R3347 (0xD13) - IRQ2 Status 4
+ */
+#define ARIZONA_ASRC_CFG_ERR_EINT2 0x8000 /* ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_ASRC_CFG_ERR_EINT2_MASK 0x8000 /* ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_ASRC_CFG_ERR_EINT2_SHIFT 15 /* ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_ASRC_CFG_ERR_EINT2_WIDTH 1 /* ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_AIF3_ERR_EINT2 0x4000 /* AIF3_ERR_EINT2 */
+#define ARIZONA_AIF3_ERR_EINT2_MASK 0x4000 /* AIF3_ERR_EINT2 */
+#define ARIZONA_AIF3_ERR_EINT2_SHIFT 14 /* AIF3_ERR_EINT2 */
+#define ARIZONA_AIF3_ERR_EINT2_WIDTH 1 /* AIF3_ERR_EINT2 */
+#define ARIZONA_AIF2_ERR_EINT2 0x2000 /* AIF2_ERR_EINT2 */
+#define ARIZONA_AIF2_ERR_EINT2_MASK 0x2000 /* AIF2_ERR_EINT2 */
+#define ARIZONA_AIF2_ERR_EINT2_SHIFT 13 /* AIF2_ERR_EINT2 */
+#define ARIZONA_AIF2_ERR_EINT2_WIDTH 1 /* AIF2_ERR_EINT2 */
+#define ARIZONA_AIF1_ERR_EINT2 0x1000 /* AIF1_ERR_EINT2 */
+#define ARIZONA_AIF1_ERR_EINT2_MASK 0x1000 /* AIF1_ERR_EINT2 */
+#define ARIZONA_AIF1_ERR_EINT2_SHIFT 12 /* AIF1_ERR_EINT2 */
+#define ARIZONA_AIF1_ERR_EINT2_WIDTH 1 /* AIF1_ERR_EINT2 */
+#define ARIZONA_CTRLIF_ERR_EINT2 0x0800 /* CTRLIF_ERR_EINT2 */
+#define ARIZONA_CTRLIF_ERR_EINT2_MASK 0x0800 /* CTRLIF_ERR_EINT2 */
+#define ARIZONA_CTRLIF_ERR_EINT2_SHIFT 11 /* CTRLIF_ERR_EINT2 */
+#define ARIZONA_CTRLIF_ERR_EINT2_WIDTH 1 /* CTRLIF_ERR_EINT2 */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT2 0x0400 /* MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT2_MASK 0x0400 /* MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT2_SHIFT 10 /* MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_EINT2_WIDTH 1 /* MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT2 0x0200 /* ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT2_MASK 0x0200 /* ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT2_SHIFT 9 /* ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_EINT2_WIDTH 1 /* ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_SYSCLK_ENA_LOW_EINT2 0x0100 /* SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_SYSCLK_ENA_LOW_EINT2_MASK 0x0100 /* SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_SYSCLK_ENA_LOW_EINT2_SHIFT 8 /* SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_SYSCLK_ENA_LOW_EINT2_WIDTH 1 /* SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_ISRC1_CFG_ERR_EINT2 0x0080 /* ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_ISRC1_CFG_ERR_EINT2_MASK 0x0080 /* ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_ISRC1_CFG_ERR_EINT2_SHIFT 7 /* ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_ISRC1_CFG_ERR_EINT2_WIDTH 1 /* ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_ISRC2_CFG_ERR_EINT2 0x0040 /* ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_ISRC2_CFG_ERR_EINT2_MASK 0x0040 /* ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_ISRC2_CFG_ERR_EINT2_SHIFT 6 /* ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_ISRC2_CFG_ERR_EINT2_WIDTH 1 /* ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_HP3R_DONE_EINT2 0x0020 /* HP3R_DONE_EINT2 */
+#define ARIZONA_HP3R_DONE_EINT2_MASK 0x0020 /* HP3R_DONE_EINT2 */
+#define ARIZONA_HP3R_DONE_EINT2_SHIFT 5 /* HP3R_DONE_EINT2 */
+#define ARIZONA_HP3R_DONE_EINT2_WIDTH 1 /* HP3R_DONE_EINT2 */
+#define ARIZONA_HP3L_DONE_EINT2 0x0010 /* HP3L_DONE_EINT2 */
+#define ARIZONA_HP3L_DONE_EINT2_MASK 0x0010 /* HP3L_DONE_EINT2 */
+#define ARIZONA_HP3L_DONE_EINT2_SHIFT 4 /* HP3L_DONE_EINT2 */
+#define ARIZONA_HP3L_DONE_EINT2_WIDTH 1 /* HP3L_DONE_EINT2 */
+#define ARIZONA_HP2R_DONE_EINT2 0x0008 /* HP2R_DONE_EINT2 */
+#define ARIZONA_HP2R_DONE_EINT2_MASK 0x0008 /* HP2R_DONE_EINT2 */
+#define ARIZONA_HP2R_DONE_EINT2_SHIFT 3 /* HP2R_DONE_EINT2 */
+#define ARIZONA_HP2R_DONE_EINT2_WIDTH 1 /* HP2R_DONE_EINT2 */
+#define ARIZONA_HP2L_DONE_EINT2 0x0004 /* HP2L_DONE_EINT2 */
+#define ARIZONA_HP2L_DONE_EINT2_MASK 0x0004 /* HP2L_DONE_EINT2 */
+#define ARIZONA_HP2L_DONE_EINT2_SHIFT 2 /* HP2L_DONE_EINT2 */
+#define ARIZONA_HP2L_DONE_EINT2_WIDTH 1 /* HP2L_DONE_EINT2 */
+#define ARIZONA_HP1R_DONE_EINT2 0x0002 /* HP1R_DONE_EINT2 */
+#define ARIZONA_HP1R_DONE_EINT2_MASK 0x0002 /* HP1R_DONE_EINT2 */
+#define ARIZONA_HP1R_DONE_EINT2_SHIFT 1 /* HP1R_DONE_EINT2 */
+#define ARIZONA_HP1R_DONE_EINT2_WIDTH 1 /* HP1R_DONE_EINT2 */
+#define ARIZONA_HP1L_DONE_EINT2 0x0001 /* HP1L_DONE_EINT2 */
+#define ARIZONA_HP1L_DONE_EINT2_MASK 0x0001 /* HP1L_DONE_EINT2 */
+#define ARIZONA_HP1L_DONE_EINT2_SHIFT 0 /* HP1L_DONE_EINT2 */
+#define ARIZONA_HP1L_DONE_EINT2_WIDTH 1 /* HP1L_DONE_EINT2 */
+
+/*
+ * R3347 (0xD13) - IRQ2 Status 4 (Alternate layout)
+ *
+ * Alternate layout used on later devices, note only fields that have moved
+ * are specified
+ */
+#define ARIZONA_V2_AIF3_ERR_EINT2 0x8000 /* AIF3_ERR_EINT2 */
+#define ARIZONA_V2_AIF3_ERR_EINT2_MASK 0x8000 /* AIF3_ERR_EINT2 */
+#define ARIZONA_V2_AIF3_ERR_EINT2_SHIFT 15 /* AIF3_ERR_EINT2 */
+#define ARIZONA_V2_AIF3_ERR_EINT2_WIDTH 1 /* AIF3_ERR_EINT2 */
+#define ARIZONA_V2_AIF2_ERR_EINT2 0x4000 /* AIF2_ERR_EINT2 */
+#define ARIZONA_V2_AIF2_ERR_EINT2_MASK 0x4000 /* AIF2_ERR_EINT2 */
+#define ARIZONA_V2_AIF2_ERR_EINT2_SHIFT 14 /* AIF2_ERR_EINT2 */
+#define ARIZONA_V2_AIF2_ERR_EINT2_WIDTH 1 /* AIF2_ERR_EINT2 */
+#define ARIZONA_V2_AIF1_ERR_EINT2 0x2000 /* AIF1_ERR_EINT2 */
+#define ARIZONA_V2_AIF1_ERR_EINT2_MASK 0x2000 /* AIF1_ERR_EINT2 */
+#define ARIZONA_V2_AIF1_ERR_EINT2_SHIFT 13 /* AIF1_ERR_EINT2 */
+#define ARIZONA_V2_AIF1_ERR_EINT2_WIDTH 1 /* AIF1_ERR_EINT2 */
+#define ARIZONA_V2_CTRLIF_ERR_EINT2 0x1000 /* CTRLIF_ERR_EINT2 */
+#define ARIZONA_V2_CTRLIF_ERR_EINT2_MASK 0x1000 /* CTRLIF_ERR_EINT2 */
+#define ARIZONA_V2_CTRLIF_ERR_EINT2_SHIFT 12 /* CTRLIF_ERR_EINT2 */
+#define ARIZONA_V2_CTRLIF_ERR_EINT2_WIDTH 1 /* CTRLIF_ERR_EINT2 */
+#define ARIZONA_V2_MIXER_DROPPED_SAMPLE_EINT2 0x0800 /* MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_V2_MIXER_DROPPED_SAMPLE_EINT2_MASK 0x0800 /* MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_V2_MIXER_DROPPED_SAMPLE_EINT2_SHIFT 11 /* MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_V2_MIXER_DROPPED_SAMPLE_EINT2_WIDTH 1 /* MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_V2_ASYNC_CLK_ENA_LOW_EINT2 0x0400 /* ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_V2_ASYNC_CLK_ENA_LOW_EINT2_MASK 0x0400 /* ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_V2_ASYNC_CLK_ENA_LOW_EINT2_SHIFT 10 /* ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_V2_ASYNC_CLK_ENA_LOW_EINT2_WIDTH 1 /* ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_V2_SYSCLK_ENA_LOW_EINT2 0x0200 /* SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_V2_SYSCLK_ENA_LOW_EINT2_MASK 0x0200 /* SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_V2_SYSCLK_ENA_LOW_EINT2_SHIFT 9 /* SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_V2_SYSCLK_ENA_LOW_EINT2_WIDTH 1 /* SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_V2_ISRC1_CFG_ERR_EINT2 0x0100 /* ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_V2_ISRC1_CFG_ERR_EINT2_MASK 0x0100 /* ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_V2_ISRC1_CFG_ERR_EINT2_SHIFT 8 /* ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_V2_ISRC1_CFG_ERR_EINT2_WIDTH 1 /* ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_V2_ISRC2_CFG_ERR_EINT2 0x0080 /* ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_V2_ISRC2_CFG_ERR_EINT2_MASK 0x0080 /* ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_V2_ISRC2_CFG_ERR_EINT2_SHIFT 7 /* ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_V2_ISRC2_CFG_ERR_EINT2_WIDTH 1 /* ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_V2_ISRC3_CFG_ERR_EINT2 0x0040 /* ISRC3_CFG_ERR_EINT2 */
+#define ARIZONA_V2_ISRC3_CFG_ERR_EINT2_MASK 0x0040 /* ISRC3_CFG_ERR_EINT2 */
+#define ARIZONA_V2_ISRC3_CFG_ERR_EINT2_SHIFT 6 /* ISRC3_CFG_ERR_EINT2 */
+#define ARIZONA_V2_ISRC3_CFG_ERR_EINT2_WIDTH 1 /* ISRC3_CFG_ERR_EINT2 */
+
+/*
+ * R3348 (0xD14) - IRQ2 Status 5
+ */
+#define ARIZONA_BOOT_DONE_EINT2 0x0100 /* BOOT_DONE_EINT2 */
+#define ARIZONA_BOOT_DONE_EINT2_MASK 0x0100 /* BOOT_DONE_EINT2 */
+#define ARIZONA_BOOT_DONE_EINT2_SHIFT 8 /* BOOT_DONE_EINT2 */
+#define ARIZONA_BOOT_DONE_EINT2_WIDTH 1 /* BOOT_DONE_EINT2 */
+#define ARIZONA_DCS_DAC_DONE_EINT2 0x0080 /* DCS_DAC_DONE_EINT2 */
+#define ARIZONA_DCS_DAC_DONE_EINT2_MASK 0x0080 /* DCS_DAC_DONE_EINT2 */
+#define ARIZONA_DCS_DAC_DONE_EINT2_SHIFT 7 /* DCS_DAC_DONE_EINT2 */
+#define ARIZONA_DCS_DAC_DONE_EINT2_WIDTH 1 /* DCS_DAC_DONE_EINT2 */
+#define ARIZONA_DCS_HP_DONE_EINT2 0x0040 /* DCS_HP_DONE_EINT2 */
+#define ARIZONA_DCS_HP_DONE_EINT2_MASK 0x0040 /* DCS_HP_DONE_EINT2 */
+#define ARIZONA_DCS_HP_DONE_EINT2_SHIFT 6 /* DCS_HP_DONE_EINT2 */
+#define ARIZONA_DCS_HP_DONE_EINT2_WIDTH 1 /* DCS_HP_DONE_EINT2 */
+#define ARIZONA_FLL2_CLOCK_OK_EINT2 0x0002 /* FLL2_CLOCK_OK_EINT2 */
+#define ARIZONA_FLL2_CLOCK_OK_EINT2_MASK 0x0002 /* FLL2_CLOCK_OK_EINT2 */
+#define ARIZONA_FLL2_CLOCK_OK_EINT2_SHIFT 1 /* FLL2_CLOCK_OK_EINT2 */
+#define ARIZONA_FLL2_CLOCK_OK_EINT2_WIDTH 1 /* FLL2_CLOCK_OK_EINT2 */
+#define ARIZONA_FLL1_CLOCK_OK_EINT2 0x0001 /* FLL1_CLOCK_OK_EINT2 */
+#define ARIZONA_FLL1_CLOCK_OK_EINT2_MASK 0x0001 /* FLL1_CLOCK_OK_EINT2 */
+#define ARIZONA_FLL1_CLOCK_OK_EINT2_SHIFT 0 /* FLL1_CLOCK_OK_EINT2 */
+#define ARIZONA_FLL1_CLOCK_OK_EINT2_WIDTH 1 /* FLL1_CLOCK_OK_EINT2 */
+
+/*
+ * R3348 (0xD14) - IRQ2 Status 5 (Alternate layout)
+ *
+ * Alternate layout used on later devices, note only fields that have moved
+ * are specified
+ */
+#define ARIZONA_V2_ASRC_CFG_ERR_EINT2 0x0008 /* ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_V2_ASRC_CFG_ERR_EINT2_MASK 0x0008 /* ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_V2_ASRC_CFG_ERR_EINT2_SHIFT 3 /* ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_V2_ASRC_CFG_ERR_EINT2_WIDTH 1 /* ASRC_CFG_ERR_EINT2 */
+
+/*
+ * R3349 (0xD15) - IRQ2 Status 6
+ */
+#define ARIZONA_DSP_SHARED_WR_COLL_EINT2 0x8000 /* DSP_SHARED_WR_COLL_EINT2 */
+#define ARIZONA_DSP_SHARED_WR_COLL_EINT2_MASK 0x8000 /* DSP_SHARED_WR_COLL_EINT2 */
+#define ARIZONA_DSP_SHARED_WR_COLL_EINT2_SHIFT 15 /* DSP_SHARED_WR_COLL_EINT2 */
+#define ARIZONA_DSP_SHARED_WR_COLL_EINT2_WIDTH 1 /* DSP_SHARED_WR_COLL_EINT2 */
+#define ARIZONA_SPK_SHUTDOWN_EINT2 0x4000 /* SPK_SHUTDOWN_EINT2 */
+#define ARIZONA_SPK_SHUTDOWN_EINT2_MASK 0x4000 /* SPK_SHUTDOWN_EINT2 */
+#define ARIZONA_SPK_SHUTDOWN_EINT2_SHIFT 14 /* SPK_SHUTDOWN_EINT2 */
+#define ARIZONA_SPK_SHUTDOWN_EINT2_WIDTH 1 /* SPK_SHUTDOWN_EINT2 */
+#define ARIZONA_SPK1R_SHORT_EINT2 0x2000 /* SPK1R_SHORT_EINT2 */
+#define ARIZONA_SPK1R_SHORT_EINT2_MASK 0x2000 /* SPK1R_SHORT_EINT2 */
+#define ARIZONA_SPK1R_SHORT_EINT2_SHIFT 13 /* SPK1R_SHORT_EINT2 */
+#define ARIZONA_SPK1R_SHORT_EINT2_WIDTH 1 /* SPK1R_SHORT_EINT2 */
+#define ARIZONA_SPK1L_SHORT_EINT2 0x1000 /* SPK1L_SHORT_EINT2 */
+#define ARIZONA_SPK1L_SHORT_EINT2_MASK 0x1000 /* SPK1L_SHORT_EINT2 */
+#define ARIZONA_SPK1L_SHORT_EINT2_SHIFT 12 /* SPK1L_SHORT_EINT2 */
+#define ARIZONA_SPK1L_SHORT_EINT2_WIDTH 1 /* SPK1L_SHORT_EINT2 */
+#define ARIZONA_HP3R_SC_NEG_EINT2 0x0800 /* HP3R_SC_NEG_EINT2 */
+#define ARIZONA_HP3R_SC_NEG_EINT2_MASK 0x0800 /* HP3R_SC_NEG_EINT2 */
+#define ARIZONA_HP3R_SC_NEG_EINT2_SHIFT 11 /* HP3R_SC_NEG_EINT2 */
+#define ARIZONA_HP3R_SC_NEG_EINT2_WIDTH 1 /* HP3R_SC_NEG_EINT2 */
+#define ARIZONA_HP3R_SC_POS_EINT2 0x0400 /* HP3R_SC_POS_EINT2 */
+#define ARIZONA_HP3R_SC_POS_EINT2_MASK 0x0400 /* HP3R_SC_POS_EINT2 */
+#define ARIZONA_HP3R_SC_POS_EINT2_SHIFT 10 /* HP3R_SC_POS_EINT2 */
+#define ARIZONA_HP3R_SC_POS_EINT2_WIDTH 1 /* HP3R_SC_POS_EINT2 */
+#define ARIZONA_HP3L_SC_NEG_EINT2 0x0200 /* HP3L_SC_NEG_EINT2 */
+#define ARIZONA_HP3L_SC_NEG_EINT2_MASK 0x0200 /* HP3L_SC_NEG_EINT2 */
+#define ARIZONA_HP3L_SC_NEG_EINT2_SHIFT 9 /* HP3L_SC_NEG_EINT2 */
+#define ARIZONA_HP3L_SC_NEG_EINT2_WIDTH 1 /* HP3L_SC_NEG_EINT2 */
+#define ARIZONA_HP3L_SC_POS_EINT2 0x0100 /* HP3L_SC_POS_EINT2 */
+#define ARIZONA_HP3L_SC_POS_EINT2_MASK 0x0100 /* HP3L_SC_POS_EINT2 */
+#define ARIZONA_HP3L_SC_POS_EINT2_SHIFT 8 /* HP3L_SC_POS_EINT2 */
+#define ARIZONA_HP3L_SC_POS_EINT2_WIDTH 1 /* HP3L_SC_POS_EINT2 */
+#define ARIZONA_HP2R_SC_NEG_EINT2 0x0080 /* HP2R_SC_NEG_EINT2 */
+#define ARIZONA_HP2R_SC_NEG_EINT2_MASK 0x0080 /* HP2R_SC_NEG_EINT2 */
+#define ARIZONA_HP2R_SC_NEG_EINT2_SHIFT 7 /* HP2R_SC_NEG_EINT2 */
+#define ARIZONA_HP2R_SC_NEG_EINT2_WIDTH 1 /* HP2R_SC_NEG_EINT2 */
+#define ARIZONA_HP2R_SC_POS_EINT2 0x0040 /* HP2R_SC_POS_EINT2 */
+#define ARIZONA_HP2R_SC_POS_EINT2_MASK 0x0040 /* HP2R_SC_POS_EINT2 */
+#define ARIZONA_HP2R_SC_POS_EINT2_SHIFT 6 /* HP2R_SC_POS_EINT2 */
+#define ARIZONA_HP2R_SC_POS_EINT2_WIDTH 1 /* HP2R_SC_POS_EINT2 */
+#define ARIZONA_HP2L_SC_NEG_EINT2 0x0020 /* HP2L_SC_NEG_EINT2 */
+#define ARIZONA_HP2L_SC_NEG_EINT2_MASK 0x0020 /* HP2L_SC_NEG_EINT2 */
+#define ARIZONA_HP2L_SC_NEG_EINT2_SHIFT 5 /* HP2L_SC_NEG_EINT2 */
+#define ARIZONA_HP2L_SC_NEG_EINT2_WIDTH 1 /* HP2L_SC_NEG_EINT2 */
+#define ARIZONA_HP2L_SC_POS_EINT2 0x0010 /* HP2L_SC_POS_EINT2 */
+#define ARIZONA_HP2L_SC_POS_EINT2_MASK 0x0010 /* HP2L_SC_POS_EINT2 */
+#define ARIZONA_HP2L_SC_POS_EINT2_SHIFT 4 /* HP2L_SC_POS_EINT2 */
+#define ARIZONA_HP2L_SC_POS_EINT2_WIDTH 1 /* HP2L_SC_POS_EINT2 */
+#define ARIZONA_HP1R_SC_NEG_EINT2 0x0008 /* HP1R_SC_NEG_EINT2 */
+#define ARIZONA_HP1R_SC_NEG_EINT2_MASK 0x0008 /* HP1R_SC_NEG_EINT2 */
+#define ARIZONA_HP1R_SC_NEG_EINT2_SHIFT 3 /* HP1R_SC_NEG_EINT2 */
+#define ARIZONA_HP1R_SC_NEG_EINT2_WIDTH 1 /* HP1R_SC_NEG_EINT2 */
+#define ARIZONA_HP1R_SC_POS_EINT2 0x0004 /* HP1R_SC_POS_EINT2 */
+#define ARIZONA_HP1R_SC_POS_EINT2_MASK 0x0004 /* HP1R_SC_POS_EINT2 */
+#define ARIZONA_HP1R_SC_POS_EINT2_SHIFT 2 /* HP1R_SC_POS_EINT2 */
+#define ARIZONA_HP1R_SC_POS_EINT2_WIDTH 1 /* HP1R_SC_POS_EINT2 */
+#define ARIZONA_HP1L_SC_NEG_EINT2 0x0002 /* HP1L_SC_NEG_EINT2 */
+#define ARIZONA_HP1L_SC_NEG_EINT2_MASK 0x0002 /* HP1L_SC_NEG_EINT2 */
+#define ARIZONA_HP1L_SC_NEG_EINT2_SHIFT 1 /* HP1L_SC_NEG_EINT2 */
+#define ARIZONA_HP1L_SC_NEG_EINT2_WIDTH 1 /* HP1L_SC_NEG_EINT2 */
+#define ARIZONA_HP1L_SC_POS_EINT2 0x0001 /* HP1L_SC_POS_EINT2 */
+#define ARIZONA_HP1L_SC_POS_EINT2_MASK 0x0001 /* HP1L_SC_POS_EINT2 */
+#define ARIZONA_HP1L_SC_POS_EINT2_SHIFT 0 /* HP1L_SC_POS_EINT2 */
+#define ARIZONA_HP1L_SC_POS_EINT2_WIDTH 1 /* HP1L_SC_POS_EINT2 */
+
+/*
+ * R3352 (0xD18) - IRQ2 Status 1 Mask
+ */
+#define ARIZONA_IM_GP4_EINT2 0x0008 /* IM_GP4_EINT2 */
+#define ARIZONA_IM_GP4_EINT2_MASK 0x0008 /* IM_GP4_EINT2 */
+#define ARIZONA_IM_GP4_EINT2_SHIFT 3 /* IM_GP4_EINT2 */
+#define ARIZONA_IM_GP4_EINT2_WIDTH 1 /* IM_GP4_EINT2 */
+#define ARIZONA_IM_GP3_EINT2 0x0004 /* IM_GP3_EINT2 */
+#define ARIZONA_IM_GP3_EINT2_MASK 0x0004 /* IM_GP3_EINT2 */
+#define ARIZONA_IM_GP3_EINT2_SHIFT 2 /* IM_GP3_EINT2 */
+#define ARIZONA_IM_GP3_EINT2_WIDTH 1 /* IM_GP3_EINT2 */
+#define ARIZONA_IM_GP2_EINT2 0x0002 /* IM_GP2_EINT2 */
+#define ARIZONA_IM_GP2_EINT2_MASK 0x0002 /* IM_GP2_EINT2 */
+#define ARIZONA_IM_GP2_EINT2_SHIFT 1 /* IM_GP2_EINT2 */
+#define ARIZONA_IM_GP2_EINT2_WIDTH 1 /* IM_GP2_EINT2 */
+#define ARIZONA_IM_GP1_EINT2 0x0001 /* IM_GP1_EINT2 */
+#define ARIZONA_IM_GP1_EINT2_MASK 0x0001 /* IM_GP1_EINT2 */
+#define ARIZONA_IM_GP1_EINT2_SHIFT 0 /* IM_GP1_EINT2 */
+#define ARIZONA_IM_GP1_EINT2_WIDTH 1 /* IM_GP1_EINT2 */
+
+/*
+ * R3353 (0xD19) - IRQ2 Status 2 Mask
+ */
+#define ARIZONA_IM_DSP1_RAM_RDY_EINT2 0x0100 /* IM_DSP1_RAM_RDY_EINT2 */
+#define ARIZONA_IM_DSP1_RAM_RDY_EINT2_MASK 0x0100 /* IM_DSP1_RAM_RDY_EINT2 */
+#define ARIZONA_IM_DSP1_RAM_RDY_EINT2_SHIFT 8 /* IM_DSP1_RAM_RDY_EINT2 */
+#define ARIZONA_IM_DSP1_RAM_RDY_EINT2_WIDTH 1 /* IM_DSP1_RAM_RDY_EINT2 */
+#define ARIZONA_IM_DSP_IRQ2_EINT2 0x0002 /* IM_DSP_IRQ2_EINT2 */
+#define ARIZONA_IM_DSP_IRQ2_EINT2_MASK 0x0002 /* IM_DSP_IRQ2_EINT2 */
+#define ARIZONA_IM_DSP_IRQ2_EINT2_SHIFT 1 /* IM_DSP_IRQ2_EINT2 */
+#define ARIZONA_IM_DSP_IRQ2_EINT2_WIDTH 1 /* IM_DSP_IRQ2_EINT2 */
+#define ARIZONA_IM_DSP_IRQ1_EINT2 0x0001 /* IM_DSP_IRQ1_EINT2 */
+#define ARIZONA_IM_DSP_IRQ1_EINT2_MASK 0x0001 /* IM_DSP_IRQ1_EINT2 */
+#define ARIZONA_IM_DSP_IRQ1_EINT2_SHIFT 0 /* IM_DSP_IRQ1_EINT2 */
+#define ARIZONA_IM_DSP_IRQ1_EINT2_WIDTH 1 /* IM_DSP_IRQ1_EINT2 */
+
+/*
+ * R3354 (0xD1A) - IRQ2 Status 3 Mask
+ */
+#define ARIZONA_IM_SPK_OVERHEAT_WARN_EINT2 0x8000 /* IM_SPK_OVERHEAT_WARN_EINT2 */
+#define ARIZONA_IM_SPK_OVERHEAT_WARN_EINT2_MASK 0x8000 /* IM_SPK_OVERHEAT_WARN_EINT2 */
+#define ARIZONA_IM_SPK_OVERHEAT_WARN_EINT2_SHIFT 15 /* IM_SPK_OVERHEAT_WARN_EINT2 */
+#define ARIZONA_IM_SPK_OVERHEAT_WARN_EINT2_WIDTH 1 /* IM_SPK_OVERHEAT_WARN_EINT2 */
+#define ARIZONA_IM_SPK_OVERHEAT_EINT2 0x4000 /* IM_SPK_OVERHEAT_EINT2 */
+#define ARIZONA_IM_SPK_OVERHEAT_EINT2_MASK 0x4000 /* IM_SPK_OVERHEAT_EINT2 */
+#define ARIZONA_IM_SPK_OVERHEAT_EINT2_SHIFT 14 /* IM_SPK_OVERHEAT_EINT2 */
+#define ARIZONA_IM_SPK_OVERHEAT_EINT2_WIDTH 1 /* IM_SPK_OVERHEAT_EINT2 */
+#define ARIZONA_IM_HPDET_EINT2 0x2000 /* IM_HPDET_EINT2 */
+#define ARIZONA_IM_HPDET_EINT2_MASK 0x2000 /* IM_HPDET_EINT2 */
+#define ARIZONA_IM_HPDET_EINT2_SHIFT 13 /* IM_HPDET_EINT2 */
+#define ARIZONA_IM_HPDET_EINT2_WIDTH 1 /* IM_HPDET_EINT2 */
+#define ARIZONA_IM_MICDET_EINT2 0x1000 /* IM_MICDET_EINT2 */
+#define ARIZONA_IM_MICDET_EINT2_MASK 0x1000 /* IM_MICDET_EINT2 */
+#define ARIZONA_IM_MICDET_EINT2_SHIFT 12 /* IM_MICDET_EINT2 */
+#define ARIZONA_IM_MICDET_EINT2_WIDTH 1 /* IM_MICDET_EINT2 */
+#define ARIZONA_IM_WSEQ_DONE_EINT2 0x0800 /* IM_WSEQ_DONE_EINT2 */
+#define ARIZONA_IM_WSEQ_DONE_EINT2_MASK 0x0800 /* IM_WSEQ_DONE_EINT2 */
+#define ARIZONA_IM_WSEQ_DONE_EINT2_SHIFT 11 /* IM_WSEQ_DONE_EINT2 */
+#define ARIZONA_IM_WSEQ_DONE_EINT2_WIDTH 1 /* IM_WSEQ_DONE_EINT2 */
+#define ARIZONA_IM_DRC2_SIG_DET_EINT2 0x0400 /* IM_DRC2_SIG_DET_EINT2 */
+#define ARIZONA_IM_DRC2_SIG_DET_EINT2_MASK 0x0400 /* IM_DRC2_SIG_DET_EINT2 */
+#define ARIZONA_IM_DRC2_SIG_DET_EINT2_SHIFT 10 /* IM_DRC2_SIG_DET_EINT2 */
+#define ARIZONA_IM_DRC2_SIG_DET_EINT2_WIDTH 1 /* IM_DRC2_SIG_DET_EINT2 */
+#define ARIZONA_IM_DRC1_SIG_DET_EINT2 0x0200 /* IM_DRC1_SIG_DET_EINT2 */
+#define ARIZONA_IM_DRC1_SIG_DET_EINT2_MASK 0x0200 /* IM_DRC1_SIG_DET_EINT2 */
+#define ARIZONA_IM_DRC1_SIG_DET_EINT2_SHIFT 9 /* IM_DRC1_SIG_DET_EINT2 */
+#define ARIZONA_IM_DRC1_SIG_DET_EINT2_WIDTH 1 /* IM_DRC1_SIG_DET_EINT2 */
+#define ARIZONA_IM_ASRC2_LOCK_EINT2 0x0100 /* IM_ASRC2_LOCK_EINT2 */
+#define ARIZONA_IM_ASRC2_LOCK_EINT2_MASK 0x0100 /* IM_ASRC2_LOCK_EINT2 */
+#define ARIZONA_IM_ASRC2_LOCK_EINT2_SHIFT 8 /* IM_ASRC2_LOCK_EINT2 */
+#define ARIZONA_IM_ASRC2_LOCK_EINT2_WIDTH 1 /* IM_ASRC2_LOCK_EINT2 */
+#define ARIZONA_IM_ASRC1_LOCK_EINT2 0x0080 /* IM_ASRC1_LOCK_EINT2 */
+#define ARIZONA_IM_ASRC1_LOCK_EINT2_MASK 0x0080 /* IM_ASRC1_LOCK_EINT2 */
+#define ARIZONA_IM_ASRC1_LOCK_EINT2_SHIFT 7 /* IM_ASRC1_LOCK_EINT2 */
+#define ARIZONA_IM_ASRC1_LOCK_EINT2_WIDTH 1 /* IM_ASRC1_LOCK_EINT2 */
+#define ARIZONA_IM_UNDERCLOCKED_EINT2 0x0040 /* IM_UNDERCLOCKED_EINT2 */
+#define ARIZONA_IM_UNDERCLOCKED_EINT2_MASK 0x0040 /* IM_UNDERCLOCKED_EINT2 */
+#define ARIZONA_IM_UNDERCLOCKED_EINT2_SHIFT 6 /* IM_UNDERCLOCKED_EINT2 */
+#define ARIZONA_IM_UNDERCLOCKED_EINT2_WIDTH 1 /* IM_UNDERCLOCKED_EINT2 */
+#define ARIZONA_IM_OVERCLOCKED_EINT2 0x0020 /* IM_OVERCLOCKED_EINT2 */
+#define ARIZONA_IM_OVERCLOCKED_EINT2_MASK 0x0020 /* IM_OVERCLOCKED_EINT2 */
+#define ARIZONA_IM_OVERCLOCKED_EINT2_SHIFT 5 /* IM_OVERCLOCKED_EINT2 */
+#define ARIZONA_IM_OVERCLOCKED_EINT2_WIDTH 1 /* IM_OVERCLOCKED_EINT2 */
+#define ARIZONA_IM_FLL2_LOCK_EINT2 0x0008 /* IM_FLL2_LOCK_EINT2 */
+#define ARIZONA_IM_FLL2_LOCK_EINT2_MASK 0x0008 /* IM_FLL2_LOCK_EINT2 */
+#define ARIZONA_IM_FLL2_LOCK_EINT2_SHIFT 3 /* IM_FLL2_LOCK_EINT2 */
+#define ARIZONA_IM_FLL2_LOCK_EINT2_WIDTH 1 /* IM_FLL2_LOCK_EINT2 */
+#define ARIZONA_IM_FLL1_LOCK_EINT2 0x0004 /* IM_FLL1_LOCK_EINT2 */
+#define ARIZONA_IM_FLL1_LOCK_EINT2_MASK 0x0004 /* IM_FLL1_LOCK_EINT2 */
+#define ARIZONA_IM_FLL1_LOCK_EINT2_SHIFT 2 /* IM_FLL1_LOCK_EINT2 */
+#define ARIZONA_IM_FLL1_LOCK_EINT2_WIDTH 1 /* IM_FLL1_LOCK_EINT2 */
+#define ARIZONA_IM_CLKGEN_ERR_EINT2 0x0002 /* IM_CLKGEN_ERR_EINT2 */
+#define ARIZONA_IM_CLKGEN_ERR_EINT2_MASK 0x0002 /* IM_CLKGEN_ERR_EINT2 */
+#define ARIZONA_IM_CLKGEN_ERR_EINT2_SHIFT 1 /* IM_CLKGEN_ERR_EINT2 */
+#define ARIZONA_IM_CLKGEN_ERR_EINT2_WIDTH 1 /* IM_CLKGEN_ERR_EINT2 */
+#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT2 0x0001 /* IM_CLKGEN_ERR_ASYNC_EINT2 */
+#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT2_MASK 0x0001 /* IM_CLKGEN_ERR_ASYNC_EINT2 */
+#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT2_SHIFT 0 /* IM_CLKGEN_ERR_ASYNC_EINT2 */
+#define ARIZONA_IM_CLKGEN_ERR_ASYNC_EINT2_WIDTH 1 /* IM_CLKGEN_ERR_ASYNC_EINT2 */
+
+/*
+ * R3355 (0xD1B) - IRQ2 Status 4 Mask
+ */
+#define ARIZONA_IM_ASRC_CFG_ERR_EINT2 0x8000 /* IM_ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ASRC_CFG_ERR_EINT2_MASK 0x8000 /* IM_ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ASRC_CFG_ERR_EINT2_SHIFT 15 /* IM_ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ASRC_CFG_ERR_EINT2_WIDTH 1 /* IM_ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_IM_AIF3_ERR_EINT2 0x4000 /* IM_AIF3_ERR_EINT2 */
+#define ARIZONA_IM_AIF3_ERR_EINT2_MASK 0x4000 /* IM_AIF3_ERR_EINT2 */
+#define ARIZONA_IM_AIF3_ERR_EINT2_SHIFT 14 /* IM_AIF3_ERR_EINT2 */
+#define ARIZONA_IM_AIF3_ERR_EINT2_WIDTH 1 /* IM_AIF3_ERR_EINT2 */
+#define ARIZONA_IM_AIF2_ERR_EINT2 0x2000 /* IM_AIF2_ERR_EINT2 */
+#define ARIZONA_IM_AIF2_ERR_EINT2_MASK 0x2000 /* IM_AIF2_ERR_EINT2 */
+#define ARIZONA_IM_AIF2_ERR_EINT2_SHIFT 13 /* IM_AIF2_ERR_EINT2 */
+#define ARIZONA_IM_AIF2_ERR_EINT2_WIDTH 1 /* IM_AIF2_ERR_EINT2 */
+#define ARIZONA_IM_AIF1_ERR_EINT2 0x1000 /* IM_AIF1_ERR_EINT2 */
+#define ARIZONA_IM_AIF1_ERR_EINT2_MASK 0x1000 /* IM_AIF1_ERR_EINT2 */
+#define ARIZONA_IM_AIF1_ERR_EINT2_SHIFT 12 /* IM_AIF1_ERR_EINT2 */
+#define ARIZONA_IM_AIF1_ERR_EINT2_WIDTH 1 /* IM_AIF1_ERR_EINT2 */
+#define ARIZONA_IM_CTRLIF_ERR_EINT2 0x0800 /* IM_CTRLIF_ERR_EINT2 */
+#define ARIZONA_IM_CTRLIF_ERR_EINT2_MASK 0x0800 /* IM_CTRLIF_ERR_EINT2 */
+#define ARIZONA_IM_CTRLIF_ERR_EINT2_SHIFT 11 /* IM_CTRLIF_ERR_EINT2 */
+#define ARIZONA_IM_CTRLIF_ERR_EINT2_WIDTH 1 /* IM_CTRLIF_ERR_EINT2 */
+#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT2 0x0400 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT2_MASK 0x0400 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT2_SHIFT 10 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_IM_MIXER_DROPPED_SAMPLE_EINT2_WIDTH 1 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT2 0x0200 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT2_MASK 0x0200 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT2_SHIFT 9 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_IM_ASYNC_CLK_ENA_LOW_EINT2_WIDTH 1 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT2 0x0100 /* IM_SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT2_MASK 0x0100 /* IM_SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT2_SHIFT 8 /* IM_SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_IM_SYSCLK_ENA_LOW_EINT2_WIDTH 1 /* IM_SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_IM_ISRC1_CFG_ERR_EINT2 0x0080 /* IM_ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ISRC1_CFG_ERR_EINT2_MASK 0x0080 /* IM_ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ISRC1_CFG_ERR_EINT2_SHIFT 7 /* IM_ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ISRC1_CFG_ERR_EINT2_WIDTH 1 /* IM_ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ISRC2_CFG_ERR_EINT2 0x0040 /* IM_ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ISRC2_CFG_ERR_EINT2_MASK 0x0040 /* IM_ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ISRC2_CFG_ERR_EINT2_SHIFT 6 /* IM_ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_IM_ISRC2_CFG_ERR_EINT2_WIDTH 1 /* IM_ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_IM_HP3R_DONE_EINT2 0x0020 /* IM_HP3R_DONE_EINT2 */
+#define ARIZONA_IM_HP3R_DONE_EINT2_MASK 0x0020 /* IM_HP3R_DONE_EINT2 */
+#define ARIZONA_IM_HP3R_DONE_EINT2_SHIFT 5 /* IM_HP3R_DONE_EINT2 */
+#define ARIZONA_IM_HP3R_DONE_EINT2_WIDTH 1 /* IM_HP3R_DONE_EINT2 */
+#define ARIZONA_IM_HP3L_DONE_EINT2 0x0010 /* IM_HP3L_DONE_EINT2 */
+#define ARIZONA_IM_HP3L_DONE_EINT2_MASK 0x0010 /* IM_HP3L_DONE_EINT2 */
+#define ARIZONA_IM_HP3L_DONE_EINT2_SHIFT 4 /* IM_HP3L_DONE_EINT2 */
+#define ARIZONA_IM_HP3L_DONE_EINT2_WIDTH 1 /* IM_HP3L_DONE_EINT2 */
+#define ARIZONA_IM_HP2R_DONE_EINT2 0x0008 /* IM_HP2R_DONE_EINT2 */
+#define ARIZONA_IM_HP2R_DONE_EINT2_MASK 0x0008 /* IM_HP2R_DONE_EINT2 */
+#define ARIZONA_IM_HP2R_DONE_EINT2_SHIFT 3 /* IM_HP2R_DONE_EINT2 */
+#define ARIZONA_IM_HP2R_DONE_EINT2_WIDTH 1 /* IM_HP2R_DONE_EINT2 */
+#define ARIZONA_IM_HP2L_DONE_EINT2 0x0004 /* IM_HP2L_DONE_EINT2 */
+#define ARIZONA_IM_HP2L_DONE_EINT2_MASK 0x0004 /* IM_HP2L_DONE_EINT2 */
+#define ARIZONA_IM_HP2L_DONE_EINT2_SHIFT 2 /* IM_HP2L_DONE_EINT2 */
+#define ARIZONA_IM_HP2L_DONE_EINT2_WIDTH 1 /* IM_HP2L_DONE_EINT2 */
+#define ARIZONA_IM_HP1R_DONE_EINT2 0x0002 /* IM_HP1R_DONE_EINT2 */
+#define ARIZONA_IM_HP1R_DONE_EINT2_MASK 0x0002 /* IM_HP1R_DONE_EINT2 */
+#define ARIZONA_IM_HP1R_DONE_EINT2_SHIFT 1 /* IM_HP1R_DONE_EINT2 */
+#define ARIZONA_IM_HP1R_DONE_EINT2_WIDTH 1 /* IM_HP1R_DONE_EINT2 */
+#define ARIZONA_IM_HP1L_DONE_EINT2 0x0001 /* IM_HP1L_DONE_EINT2 */
+#define ARIZONA_IM_HP1L_DONE_EINT2_MASK 0x0001 /* IM_HP1L_DONE_EINT2 */
+#define ARIZONA_IM_HP1L_DONE_EINT2_SHIFT 0 /* IM_HP1L_DONE_EINT2 */
+#define ARIZONA_IM_HP1L_DONE_EINT2_WIDTH 1 /* IM_HP1L_DONE_EINT2 */
+
+/*
+ * R3355 (0xD1B) - IRQ2 Status 4 Mask (Alternate layout)
+ *
+ * Alternate layout used on later devices, note only fields that have moved
+ * are specified
+ */
+#define ARIZONA_V2_IM_AIF3_ERR_EINT2 0x8000 /* IM_AIF3_ERR_EINT2 */
+#define ARIZONA_V2_IM_AIF3_ERR_EINT2_MASK 0x8000 /* IM_AIF3_ERR_EINT2 */
+#define ARIZONA_V2_IM_AIF3_ERR_EINT2_SHIFT 15 /* IM_AIF3_ERR_EINT2 */
+#define ARIZONA_V2_IM_AIF3_ERR_EINT2_WIDTH 1 /* IM_AIF3_ERR_EINT2 */
+#define ARIZONA_V2_IM_AIF2_ERR_EINT2 0x4000 /* IM_AIF2_ERR_EINT2 */
+#define ARIZONA_V2_IM_AIF2_ERR_EINT2_MASK 0x4000 /* IM_AIF2_ERR_EINT2 */
+#define ARIZONA_V2_IM_AIF2_ERR_EINT2_SHIFT 14 /* IM_AIF2_ERR_EINT2 */
+#define ARIZONA_V2_IM_AIF2_ERR_EINT2_WIDTH 1 /* IM_AIF2_ERR_EINT2 */
+#define ARIZONA_V2_IM_AIF1_ERR_EINT2 0x2000 /* IM_AIF1_ERR_EINT2 */
+#define ARIZONA_V2_IM_AIF1_ERR_EINT2_MASK 0x2000 /* IM_AIF1_ERR_EINT2 */
+#define ARIZONA_V2_IM_AIF1_ERR_EINT2_SHIFT 13 /* IM_AIF1_ERR_EINT2 */
+#define ARIZONA_V2_IM_AIF1_ERR_EINT2_WIDTH 1 /* IM_AIF1_ERR_EINT2 */
+#define ARIZONA_V2_IM_CTRLIF_ERR_EINT2 0x1000 /* IM_CTRLIF_ERR_EINT2 */
+#define ARIZONA_V2_IM_CTRLIF_ERR_EINT2_MASK 0x1000 /* IM_CTRLIF_ERR_EINT2 */
+#define ARIZONA_V2_IM_CTRLIF_ERR_EINT2_SHIFT 12 /* IM_CTRLIF_ERR_EINT2 */
+#define ARIZONA_V2_IM_CTRLIF_ERR_EINT2_WIDTH 1 /* IM_CTRLIF_ERR_EINT2 */
+#define ARIZONA_V2_IM_MIXER_DROPPED_SAMPLE_EINT2 0x0800 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_V2_IM_MIXER_DROPPED_SAMPLE_EINT2_MASK 0x0800 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_V2_IM_MIXER_DROPPED_SAMPLE_EINT2_SHIFT 11 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_V2_IM_MIXER_DROPPED_SAMPLE_EINT2_WIDTH 1 /* IM_MIXER_DROPPED_SAMPLE_EINT2 */
+#define ARIZONA_V2_IM_ASYNC_CLK_ENA_LOW_EINT2 0x0400 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_V2_IM_ASYNC_CLK_ENA_LOW_EINT2_MASK 0x0400 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_V2_IM_ASYNC_CLK_ENA_LOW_EINT2_SHIFT 10 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_V2_IM_ASYNC_CLK_ENA_LOW_EINT2_WIDTH 1 /* IM_ASYNC_CLK_ENA_LOW_EINT2 */
+#define ARIZONA_V2_IM_SYSCLK_ENA_LOW_EINT2 0x0200 /* IM_SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_V2_IM_SYSCLK_ENA_LOW_EINT2_MASK 0x0200 /* IM_SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_V2_IM_SYSCLK_ENA_LOW_EINT2_SHIFT 9 /* IM_SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_V2_IM_SYSCLK_ENA_LOW_EINT2_WIDTH 1 /* IM_SYSCLK_ENA_LOW_EINT2 */
+#define ARIZONA_V2_IM_ISRC1_CFG_ERR_EINT2 0x0100 /* IM_ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_V2_IM_ISRC1_CFG_ERR_EINT2_MASK 0x0100 /* IM_ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_V2_IM_ISRC1_CFG_ERR_EINT2_SHIFT 8 /* IM_ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_V2_IM_ISRC1_CFG_ERR_EINT2_WIDTH 1 /* IM_ISRC1_CFG_ERR_EINT2 */
+#define ARIZONA_V2_IM_ISRC2_CFG_ERR_EINT2 0x0080 /* IM_ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_V2_IM_ISRC2_CFG_ERR_EINT2_MASK 0x0080 /* IM_ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_V2_IM_ISRC2_CFG_ERR_EINT2_SHIFT 7 /* IM_ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_V2_IM_ISRC2_CFG_ERR_EINT2_WIDTH 1 /* IM_ISRC2_CFG_ERR_EINT2 */
+#define ARIZONA_V2_IM_ISRC3_CFG_ERR_EINT2 0x0040 /* IM_ISRC3_CFG_ERR_EINT2 */
+#define ARIZONA_V2_IM_ISRC3_CFG_ERR_EINT2_MASK 0x0040 /* IM_ISRC3_CFG_ERR_EINT2 */
+#define ARIZONA_V2_IM_ISRC3_CFG_ERR_EINT2_SHIFT 6 /* IM_ISRC3_CFG_ERR_EINT2 */
+#define ARIZONA_V2_IM_ISRC3_CFG_ERR_EINT2_WIDTH 1 /* IM_ISRC3_CFG_ERR_EINT2 */
+
+/*
+ * R3356 (0xD1C) - IRQ2 Status 5 Mask
+ */
+
+#define ARIZONA_IM_BOOT_DONE_EINT2 0x0100 /* IM_BOOT_DONE_EINT2 */
+#define ARIZONA_IM_BOOT_DONE_EINT2_MASK 0x0100 /* IM_BOOT_DONE_EINT2 */
+#define ARIZONA_IM_BOOT_DONE_EINT2_SHIFT 8 /* IM_BOOT_DONE_EINT2 */
+#define ARIZONA_IM_BOOT_DONE_EINT2_WIDTH 1 /* IM_BOOT_DONE_EINT2 */
+#define ARIZONA_IM_DCS_DAC_DONE_EINT2 0x0080 /* IM_DCS_DAC_DONE_EINT2 */
+#define ARIZONA_IM_DCS_DAC_DONE_EINT2_MASK 0x0080 /* IM_DCS_DAC_DONE_EINT2 */
+#define ARIZONA_IM_DCS_DAC_DONE_EINT2_SHIFT 7 /* IM_DCS_DAC_DONE_EINT2 */
+#define ARIZONA_IM_DCS_DAC_DONE_EINT2_WIDTH 1 /* IM_DCS_DAC_DONE_EINT2 */
+#define ARIZONA_IM_DCS_HP_DONE_EINT2 0x0040 /* IM_DCS_HP_DONE_EINT2 */
+#define ARIZONA_IM_DCS_HP_DONE_EINT2_MASK 0x0040 /* IM_DCS_HP_DONE_EINT2 */
+#define ARIZONA_IM_DCS_HP_DONE_EINT2_SHIFT 6 /* IM_DCS_HP_DONE_EINT2 */
+#define ARIZONA_IM_DCS_HP_DONE_EINT2_WIDTH 1 /* IM_DCS_HP_DONE_EINT2 */
+#define ARIZONA_IM_FLL2_CLOCK_OK_EINT2 0x0002 /* IM_FLL2_CLOCK_OK_EINT2 */
+#define ARIZONA_IM_FLL2_CLOCK_OK_EINT2_MASK 0x0002 /* IM_FLL2_CLOCK_OK_EINT2 */
+#define ARIZONA_IM_FLL2_CLOCK_OK_EINT2_SHIFT 1 /* IM_FLL2_CLOCK_OK_EINT2 */
+#define ARIZONA_IM_FLL2_CLOCK_OK_EINT2_WIDTH 1 /* IM_FLL2_CLOCK_OK_EINT2 */
+#define ARIZONA_IM_FLL1_CLOCK_OK_EINT2 0x0001 /* IM_FLL1_CLOCK_OK_EINT2 */
+#define ARIZONA_IM_FLL1_CLOCK_OK_EINT2_MASK 0x0001 /* IM_FLL1_CLOCK_OK_EINT2 */
+#define ARIZONA_IM_FLL1_CLOCK_OK_EINT2_SHIFT 0 /* IM_FLL1_CLOCK_OK_EINT2 */
+#define ARIZONA_IM_FLL1_CLOCK_OK_EINT2_WIDTH 1 /* IM_FLL1_CLOCK_OK_EINT2 */
+
+/*
+ * R3340 (0xD0C) - Interrupt Status 5 Mask (Alternate layout)
+ *
+ * Alternate layout used on later devices, note only fields that have moved
+ * are specified
+ */
+#define ARIZONA_V2_IM_ASRC_CFG_ERR_EINT2 0x0008 /* IM_ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_V2_IM_ASRC_CFG_ERR_EINT2_MASK 0x0008 /* IM_ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_V2_IM_ASRC_CFG_ERR_EINT2_SHIFT 3 /* IM_ASRC_CFG_ERR_EINT2 */
+#define ARIZONA_V2_IM_ASRC_CFG_ERR_EINT2_WIDTH 1 /* IM_ASRC_CFG_ERR_EINT2 */
+
+/*
+ * R3357 (0xD1D) - IRQ2 Status 6 Mask
+ */
+#define ARIZONA_IM_DSP_SHARED_WR_COLL_EINT2 0x8000 /* IM_DSP_SHARED_WR_COLL_EINT2 */
+#define ARIZONA_IM_DSP_SHARED_WR_COLL_EINT2_MASK 0x8000 /* IM_DSP_SHARED_WR_COLL_EINT2 */
+#define ARIZONA_IM_DSP_SHARED_WR_COLL_EINT2_SHIFT 15 /* IM_DSP_SHARED_WR_COLL_EINT2 */
+#define ARIZONA_IM_DSP_SHARED_WR_COLL_EINT2_WIDTH 1 /* IM_DSP_SHARED_WR_COLL_EINT2 */
+#define ARIZONA_IM_SPK_SHUTDOWN_EINT2 0x4000 /* IM_SPK_SHUTDOWN_EINT2 */
+#define ARIZONA_IM_SPK_SHUTDOWN_EINT2_MASK 0x4000 /* IM_SPK_SHUTDOWN_EINT2 */
+#define ARIZONA_IM_SPK_SHUTDOWN_EINT2_SHIFT 14 /* IM_SPK_SHUTDOWN_EINT2 */
+#define ARIZONA_IM_SPK_SHUTDOWN_EINT2_WIDTH 1 /* IM_SPK_SHUTDOWN_EINT2 */
+#define ARIZONA_IM_SPK1R_SHORT_EINT2 0x2000 /* IM_SPK1R_SHORT_EINT2 */
+#define ARIZONA_IM_SPK1R_SHORT_EINT2_MASK 0x2000 /* IM_SPK1R_SHORT_EINT2 */
+#define ARIZONA_IM_SPK1R_SHORT_EINT2_SHIFT 13 /* IM_SPK1R_SHORT_EINT2 */
+#define ARIZONA_IM_SPK1R_SHORT_EINT2_WIDTH 1 /* IM_SPK1R_SHORT_EINT2 */
+#define ARIZONA_IM_SPK1L_SHORT_EINT2 0x1000 /* IM_SPK1L_SHORT_EINT2 */
+#define ARIZONA_IM_SPK1L_SHORT_EINT2_MASK 0x1000 /* IM_SPK1L_SHORT_EINT2 */
+#define ARIZONA_IM_SPK1L_SHORT_EINT2_SHIFT 12 /* IM_SPK1L_SHORT_EINT2 */
+#define ARIZONA_IM_SPK1L_SHORT_EINT2_WIDTH 1 /* IM_SPK1L_SHORT_EINT2 */
+#define ARIZONA_IM_HP3R_SC_NEG_EINT2 0x0800 /* IM_HP3R_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP3R_SC_NEG_EINT2_MASK 0x0800 /* IM_HP3R_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP3R_SC_NEG_EINT2_SHIFT 11 /* IM_HP3R_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP3R_SC_NEG_EINT2_WIDTH 1 /* IM_HP3R_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP3R_SC_POS_EINT2 0x0400 /* IM_HP3R_SC_POS_EINT2 */
+#define ARIZONA_IM_HP3R_SC_POS_EINT2_MASK 0x0400 /* IM_HP3R_SC_POS_EINT2 */
+#define ARIZONA_IM_HP3R_SC_POS_EINT2_SHIFT 10 /* IM_HP3R_SC_POS_EINT2 */
+#define ARIZONA_IM_HP3R_SC_POS_EINT2_WIDTH 1 /* IM_HP3R_SC_POS_EINT2 */
+#define ARIZONA_IM_HP3L_SC_NEG_EINT2 0x0200 /* IM_HP3L_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP3L_SC_NEG_EINT2_MASK 0x0200 /* IM_HP3L_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP3L_SC_NEG_EINT2_SHIFT 9 /* IM_HP3L_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP3L_SC_NEG_EINT2_WIDTH 1 /* IM_HP3L_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP3L_SC_POS_EINT2 0x0100 /* IM_HP3L_SC_POS_EINT2 */
+#define ARIZONA_IM_HP3L_SC_POS_EINT2_MASK 0x0100 /* IM_HP3L_SC_POS_EINT2 */
+#define ARIZONA_IM_HP3L_SC_POS_EINT2_SHIFT 8 /* IM_HP3L_SC_POS_EINT2 */
+#define ARIZONA_IM_HP3L_SC_POS_EINT2_WIDTH 1 /* IM_HP3L_SC_POS_EINT2 */
+#define ARIZONA_IM_HP2R_SC_NEG_EINT2 0x0080 /* IM_HP2R_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP2R_SC_NEG_EINT2_MASK 0x0080 /* IM_HP2R_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP2R_SC_NEG_EINT2_SHIFT 7 /* IM_HP2R_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP2R_SC_NEG_EINT2_WIDTH 1 /* IM_HP2R_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP2R_SC_POS_EINT2 0x0040 /* IM_HP2R_SC_POS_EINT2 */
+#define ARIZONA_IM_HP2R_SC_POS_EINT2_MASK 0x0040 /* IM_HP2R_SC_POS_EINT2 */
+#define ARIZONA_IM_HP2R_SC_POS_EINT2_SHIFT 6 /* IM_HP2R_SC_POS_EINT2 */
+#define ARIZONA_IM_HP2R_SC_POS_EINT2_WIDTH 1 /* IM_HP2R_SC_POS_EINT2 */
+#define ARIZONA_IM_HP2L_SC_NEG_EINT2 0x0020 /* IM_HP2L_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP2L_SC_NEG_EINT2_MASK 0x0020 /* IM_HP2L_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP2L_SC_NEG_EINT2_SHIFT 5 /* IM_HP2L_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP2L_SC_NEG_EINT2_WIDTH 1 /* IM_HP2L_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP2L_SC_POS_EINT2 0x0010 /* IM_HP2L_SC_POS_EINT2 */
+#define ARIZONA_IM_HP2L_SC_POS_EINT2_MASK 0x0010 /* IM_HP2L_SC_POS_EINT2 */
+#define ARIZONA_IM_HP2L_SC_POS_EINT2_SHIFT 4 /* IM_HP2L_SC_POS_EINT2 */
+#define ARIZONA_IM_HP2L_SC_POS_EINT2_WIDTH 1 /* IM_HP2L_SC_POS_EINT2 */
+#define ARIZONA_IM_HP1R_SC_NEG_EINT2 0x0008 /* IM_HP1R_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP1R_SC_NEG_EINT2_MASK 0x0008 /* IM_HP1R_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP1R_SC_NEG_EINT2_SHIFT 3 /* IM_HP1R_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP1R_SC_NEG_EINT2_WIDTH 1 /* IM_HP1R_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP1R_SC_POS_EINT2 0x0004 /* IM_HP1R_SC_POS_EINT2 */
+#define ARIZONA_IM_HP1R_SC_POS_EINT2_MASK 0x0004 /* IM_HP1R_SC_POS_EINT2 */
+#define ARIZONA_IM_HP1R_SC_POS_EINT2_SHIFT 2 /* IM_HP1R_SC_POS_EINT2 */
+#define ARIZONA_IM_HP1R_SC_POS_EINT2_WIDTH 1 /* IM_HP1R_SC_POS_EINT2 */
+#define ARIZONA_IM_HP1L_SC_NEG_EINT2 0x0002 /* IM_HP1L_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP1L_SC_NEG_EINT2_MASK 0x0002 /* IM_HP1L_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP1L_SC_NEG_EINT2_SHIFT 1 /* IM_HP1L_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP1L_SC_NEG_EINT2_WIDTH 1 /* IM_HP1L_SC_NEG_EINT2 */
+#define ARIZONA_IM_HP1L_SC_POS_EINT2 0x0001 /* IM_HP1L_SC_POS_EINT2 */
+#define ARIZONA_IM_HP1L_SC_POS_EINT2_MASK 0x0001 /* IM_HP1L_SC_POS_EINT2 */
+#define ARIZONA_IM_HP1L_SC_POS_EINT2_SHIFT 0 /* IM_HP1L_SC_POS_EINT2 */
+#define ARIZONA_IM_HP1L_SC_POS_EINT2_WIDTH 1 /* IM_HP1L_SC_POS_EINT2 */
+
+/*
+ * R3359 (0xD1F) - IRQ2 Control
+ */
+#define ARIZONA_IM_IRQ2 0x0001 /* IM_IRQ2 */
+#define ARIZONA_IM_IRQ2_MASK 0x0001 /* IM_IRQ2 */
+#define ARIZONA_IM_IRQ2_SHIFT 0 /* IM_IRQ2 */
+#define ARIZONA_IM_IRQ2_WIDTH 1 /* IM_IRQ2 */
+
+/*
+ * R3360 (0xD20) - Interrupt Raw Status 2
+ */
+#define ARIZONA_DSP1_RAM_RDY_STS 0x0100 /* DSP1_RAM_RDY_STS */
+#define ARIZONA_DSP1_RAM_RDY_STS_MASK 0x0100 /* DSP1_RAM_RDY_STS */
+#define ARIZONA_DSP1_RAM_RDY_STS_SHIFT 8 /* DSP1_RAM_RDY_STS */
+#define ARIZONA_DSP1_RAM_RDY_STS_WIDTH 1 /* DSP1_RAM_RDY_STS */
+#define ARIZONA_DSP_IRQ2_STS 0x0002 /* DSP_IRQ2_STS */
+#define ARIZONA_DSP_IRQ2_STS_MASK 0x0002 /* DSP_IRQ2_STS */
+#define ARIZONA_DSP_IRQ2_STS_SHIFT 1 /* DSP_IRQ2_STS */
+#define ARIZONA_DSP_IRQ2_STS_WIDTH 1 /* DSP_IRQ2_STS */
+#define ARIZONA_DSP_IRQ1_STS 0x0001 /* DSP_IRQ1_STS */
+#define ARIZONA_DSP_IRQ1_STS_MASK 0x0001 /* DSP_IRQ1_STS */
+#define ARIZONA_DSP_IRQ1_STS_SHIFT 0 /* DSP_IRQ1_STS */
+#define ARIZONA_DSP_IRQ1_STS_WIDTH 1 /* DSP_IRQ1_STS */
+
+/*
+ * R3361 (0xD21) - Interrupt Raw Status 3
+ */
+#define ARIZONA_SPK_OVERHEAT_WARN_STS 0x8000 /* SPK_OVERHEAT_WARN_STS */
+#define ARIZONA_SPK_OVERHEAT_WARN_STS_MASK 0x8000 /* SPK_OVERHEAT_WARN_STS */
+#define ARIZONA_SPK_OVERHEAT_WARN_STS_SHIFT 15 /* SPK_OVERHEAT_WARN_STS */
+#define ARIZONA_SPK_OVERHEAT_WARN_STS_WIDTH 1 /* SPK_OVERHEAT_WARN_STS */
+#define ARIZONA_SPK_OVERHEAT_STS 0x4000 /* SPK_OVERHEAT_STS */
+#define ARIZONA_SPK_OVERHEAT_STS_MASK 0x4000 /* SPK_OVERHEAT_STS */
+#define ARIZONA_SPK_OVERHEAT_STS_SHIFT 14 /* SPK_OVERHEAT_STS */
+#define ARIZONA_SPK_OVERHEAT_STS_WIDTH 1 /* SPK_OVERHEAT_STS */
+#define ARIZONA_HPDET_STS 0x2000 /* HPDET_STS */
+#define ARIZONA_HPDET_STS_MASK 0x2000 /* HPDET_STS */
+#define ARIZONA_HPDET_STS_SHIFT 13 /* HPDET_STS */
+#define ARIZONA_HPDET_STS_WIDTH 1 /* HPDET_STS */
+#define ARIZONA_MICDET_STS 0x1000 /* MICDET_STS */
+#define ARIZONA_MICDET_STS_MASK 0x1000 /* MICDET_STS */
+#define ARIZONA_MICDET_STS_SHIFT 12 /* MICDET_STS */
+#define ARIZONA_MICDET_STS_WIDTH 1 /* MICDET_STS */
+#define ARIZONA_WSEQ_DONE_STS 0x0800 /* WSEQ_DONE_STS */
+#define ARIZONA_WSEQ_DONE_STS_MASK 0x0800 /* WSEQ_DONE_STS */
+#define ARIZONA_WSEQ_DONE_STS_SHIFT 11 /* WSEQ_DONE_STS */
+#define ARIZONA_WSEQ_DONE_STS_WIDTH 1 /* WSEQ_DONE_STS */
+#define ARIZONA_DRC2_SIG_DET_STS 0x0400 /* DRC2_SIG_DET_STS */
+#define ARIZONA_DRC2_SIG_DET_STS_MASK 0x0400 /* DRC2_SIG_DET_STS */
+#define ARIZONA_DRC2_SIG_DET_STS_SHIFT 10 /* DRC2_SIG_DET_STS */
+#define ARIZONA_DRC2_SIG_DET_STS_WIDTH 1 /* DRC2_SIG_DET_STS */
+#define ARIZONA_DRC1_SIG_DET_STS 0x0200 /* DRC1_SIG_DET_STS */
+#define ARIZONA_DRC1_SIG_DET_STS_MASK 0x0200 /* DRC1_SIG_DET_STS */
+#define ARIZONA_DRC1_SIG_DET_STS_SHIFT 9 /* DRC1_SIG_DET_STS */
+#define ARIZONA_DRC1_SIG_DET_STS_WIDTH 1 /* DRC1_SIG_DET_STS */
+#define ARIZONA_ASRC2_LOCK_STS 0x0100 /* ASRC2_LOCK_STS */
+#define ARIZONA_ASRC2_LOCK_STS_MASK 0x0100 /* ASRC2_LOCK_STS */
+#define ARIZONA_ASRC2_LOCK_STS_SHIFT 8 /* ASRC2_LOCK_STS */
+#define ARIZONA_ASRC2_LOCK_STS_WIDTH 1 /* ASRC2_LOCK_STS */
+#define ARIZONA_ASRC1_LOCK_STS 0x0080 /* ASRC1_LOCK_STS */
+#define ARIZONA_ASRC1_LOCK_STS_MASK 0x0080 /* ASRC1_LOCK_STS */
+#define ARIZONA_ASRC1_LOCK_STS_SHIFT 7 /* ASRC1_LOCK_STS */
+#define ARIZONA_ASRC1_LOCK_STS_WIDTH 1 /* ASRC1_LOCK_STS */
+#define ARIZONA_UNDERCLOCKED_STS 0x0040 /* UNDERCLOCKED_STS */
+#define ARIZONA_UNDERCLOCKED_STS_MASK 0x0040 /* UNDERCLOCKED_STS */
+#define ARIZONA_UNDERCLOCKED_STS_SHIFT 6 /* UNDERCLOCKED_STS */
+#define ARIZONA_UNDERCLOCKED_STS_WIDTH 1 /* UNDERCLOCKED_STS */
+#define ARIZONA_OVERCLOCKED_STS 0x0020 /* OVERCLOCKED_STS */
+#define ARIZONA_OVERCLOCKED_STS_MASK 0x0020 /* OVERCLOCKED_STS */
+#define ARIZONA_OVERCLOCKED_STS_SHIFT 5 /* OVERCLOCKED_STS */
+#define ARIZONA_OVERCLOCKED_STS_WIDTH 1 /* OVERCLOCKED_STS */
+#define ARIZONA_FLL2_LOCK_STS 0x0008 /* FLL2_LOCK_STS */
+#define ARIZONA_FLL2_LOCK_STS_MASK 0x0008 /* FLL2_LOCK_STS */
+#define ARIZONA_FLL2_LOCK_STS_SHIFT 3 /* FLL2_LOCK_STS */
+#define ARIZONA_FLL2_LOCK_STS_WIDTH 1 /* FLL2_LOCK_STS */
+#define ARIZONA_FLL1_LOCK_STS 0x0004 /* FLL1_LOCK_STS */
+#define ARIZONA_FLL1_LOCK_STS_MASK 0x0004 /* FLL1_LOCK_STS */
+#define ARIZONA_FLL1_LOCK_STS_SHIFT 2 /* FLL1_LOCK_STS */
+#define ARIZONA_FLL1_LOCK_STS_WIDTH 1 /* FLL1_LOCK_STS */
+#define ARIZONA_CLKGEN_ERR_STS 0x0002 /* CLKGEN_ERR_STS */
+#define ARIZONA_CLKGEN_ERR_STS_MASK 0x0002 /* CLKGEN_ERR_STS */
+#define ARIZONA_CLKGEN_ERR_STS_SHIFT 1 /* CLKGEN_ERR_STS */
+#define ARIZONA_CLKGEN_ERR_STS_WIDTH 1 /* CLKGEN_ERR_STS */
+#define ARIZONA_CLKGEN_ERR_ASYNC_STS 0x0001 /* CLKGEN_ERR_ASYNC_STS */
+#define ARIZONA_CLKGEN_ERR_ASYNC_STS_MASK 0x0001 /* CLKGEN_ERR_ASYNC_STS */
+#define ARIZONA_CLKGEN_ERR_ASYNC_STS_SHIFT 0 /* CLKGEN_ERR_ASYNC_STS */
+#define ARIZONA_CLKGEN_ERR_ASYNC_STS_WIDTH 1 /* CLKGEN_ERR_ASYNC_STS */
+
+/*
+ * R3362 (0xD22) - Interrupt Raw Status 4
+ */
+#define ARIZONA_ASRC_CFG_ERR_STS 0x8000 /* ASRC_CFG_ERR_STS */
+#define ARIZONA_ASRC_CFG_ERR_STS_MASK 0x8000 /* ASRC_CFG_ERR_STS */
+#define ARIZONA_ASRC_CFG_ERR_STS_SHIFT 15 /* ASRC_CFG_ERR_STS */
+#define ARIZONA_ASRC_CFG_ERR_STS_WIDTH 1 /* ASRC_CFG_ERR_STS */
+#define ARIZONA_AIF3_ERR_STS 0x4000 /* AIF3_ERR_STS */
+#define ARIZONA_AIF3_ERR_STS_MASK 0x4000 /* AIF3_ERR_STS */
+#define ARIZONA_AIF3_ERR_STS_SHIFT 14 /* AIF3_ERR_STS */
+#define ARIZONA_AIF3_ERR_STS_WIDTH 1 /* AIF3_ERR_STS */
+#define ARIZONA_AIF2_ERR_STS 0x2000 /* AIF2_ERR_STS */
+#define ARIZONA_AIF2_ERR_STS_MASK 0x2000 /* AIF2_ERR_STS */
+#define ARIZONA_AIF2_ERR_STS_SHIFT 13 /* AIF2_ERR_STS */
+#define ARIZONA_AIF2_ERR_STS_WIDTH 1 /* AIF2_ERR_STS */
+#define ARIZONA_AIF1_ERR_STS 0x1000 /* AIF1_ERR_STS */
+#define ARIZONA_AIF1_ERR_STS_MASK 0x1000 /* AIF1_ERR_STS */
+#define ARIZONA_AIF1_ERR_STS_SHIFT 12 /* AIF1_ERR_STS */
+#define ARIZONA_AIF1_ERR_STS_WIDTH 1 /* AIF1_ERR_STS */
+#define ARIZONA_CTRLIF_ERR_STS 0x0800 /* CTRLIF_ERR_STS */
+#define ARIZONA_CTRLIF_ERR_STS_MASK 0x0800 /* CTRLIF_ERR_STS */
+#define ARIZONA_CTRLIF_ERR_STS_SHIFT 11 /* CTRLIF_ERR_STS */
+#define ARIZONA_CTRLIF_ERR_STS_WIDTH 1 /* CTRLIF_ERR_STS */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_STS 0x0400 /* MIXER_DROPPED_SAMPLE_STS */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_STS_MASK 0x0400 /* MIXER_DROPPED_SAMPLE_STS */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_STS_SHIFT 10 /* MIXER_DROPPED_SAMPLE_STS */
+#define ARIZONA_MIXER_DROPPED_SAMPLE_STS_WIDTH 1 /* MIXER_DROPPED_SAMPLE_STS */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_STS 0x0200 /* ASYNC_CLK_ENA_LOW_STS */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_STS_MASK 0x0200 /* ASYNC_CLK_ENA_LOW_STS */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_STS_SHIFT 9 /* ASYNC_CLK_ENA_LOW_STS */
+#define ARIZONA_ASYNC_CLK_ENA_LOW_STS_WIDTH 1 /* ASYNC_CLK_ENA_LOW_STS */
+#define ARIZONA_SYSCLK_ENA_LOW_STS 0x0100 /* SYSCLK_ENA_LOW_STS */
+#define ARIZONA_SYSCLK_ENA_LOW_STS_MASK 0x0100 /* SYSCLK_ENA_LOW_STS */
+#define ARIZONA_SYSCLK_ENA_LOW_STS_SHIFT 8 /* SYSCLK_ENA_LOW_STS */
+#define ARIZONA_SYSCLK_ENA_LOW_STS_WIDTH 1 /* SYSCLK_ENA_LOW_STS */
+#define ARIZONA_ISRC1_CFG_ERR_STS 0x0080 /* ISRC1_CFG_ERR_STS */
+#define ARIZONA_ISRC1_CFG_ERR_STS_MASK 0x0080 /* ISRC1_CFG_ERR_STS */
+#define ARIZONA_ISRC1_CFG_ERR_STS_SHIFT 7 /* ISRC1_CFG_ERR_STS */
+#define ARIZONA_ISRC1_CFG_ERR_STS_WIDTH 1 /* ISRC1_CFG_ERR_STS */
+#define ARIZONA_ISRC2_CFG_ERR_STS 0x0040 /* ISRC2_CFG_ERR_STS */
+#define ARIZONA_ISRC2_CFG_ERR_STS_MASK 0x0040 /* ISRC2_CFG_ERR_STS */
+#define ARIZONA_ISRC2_CFG_ERR_STS_SHIFT 6 /* ISRC2_CFG_ERR_STS */
+#define ARIZONA_ISRC2_CFG_ERR_STS_WIDTH 1 /* ISRC2_CFG_ERR_STS */
+#define ARIZONA_HP3R_DONE_STS 0x0020 /* HP3R_DONE_STS */
+#define ARIZONA_HP3R_DONE_STS_MASK 0x0020 /* HP3R_DONE_STS */
+#define ARIZONA_HP3R_DONE_STS_SHIFT 5 /* HP3R_DONE_STS */
+#define ARIZONA_HP3R_DONE_STS_WIDTH 1 /* HP3R_DONE_STS */
+#define ARIZONA_HP3L_DONE_STS 0x0010 /* HP3L_DONE_STS */
+#define ARIZONA_HP3L_DONE_STS_MASK 0x0010 /* HP3L_DONE_STS */
+#define ARIZONA_HP3L_DONE_STS_SHIFT 4 /* HP3L_DONE_STS */
+#define ARIZONA_HP3L_DONE_STS_WIDTH 1 /* HP3L_DONE_STS */
+#define ARIZONA_HP2R_DONE_STS 0x0008 /* HP2R_DONE_STS */
+#define ARIZONA_HP2R_DONE_STS_MASK 0x0008 /* HP2R_DONE_STS */
+#define ARIZONA_HP2R_DONE_STS_SHIFT 3 /* HP2R_DONE_STS */
+#define ARIZONA_HP2R_DONE_STS_WIDTH 1 /* HP2R_DONE_STS */
+#define ARIZONA_HP2L_DONE_STS 0x0004 /* HP2L_DONE_STS */
+#define ARIZONA_HP2L_DONE_STS_MASK 0x0004 /* HP2L_DONE_STS */
+#define ARIZONA_HP2L_DONE_STS_SHIFT 2 /* HP2L_DONE_STS */
+#define ARIZONA_HP2L_DONE_STS_WIDTH 1 /* HP2L_DONE_STS */
+#define ARIZONA_HP1R_DONE_STS 0x0002 /* HP1R_DONE_STS */
+#define ARIZONA_HP1R_DONE_STS_MASK 0x0002 /* HP1R_DONE_STS */
+#define ARIZONA_HP1R_DONE_STS_SHIFT 1 /* HP1R_DONE_STS */
+#define ARIZONA_HP1R_DONE_STS_WIDTH 1 /* HP1R_DONE_STS */
+#define ARIZONA_HP1L_DONE_STS 0x0001 /* HP1L_DONE_STS */
+#define ARIZONA_HP1L_DONE_STS_MASK 0x0001 /* HP1L_DONE_STS */
+#define ARIZONA_HP1L_DONE_STS_SHIFT 0 /* HP1L_DONE_STS */
+#define ARIZONA_HP1L_DONE_STS_WIDTH 1 /* HP1L_DONE_STS */
+
+/*
+ * R3363 (0xD23) - Interrupt Raw Status 5
+ */
+#define ARIZONA_BOOT_DONE_STS 0x0100 /* BOOT_DONE_STS */
+#define ARIZONA_BOOT_DONE_STS_MASK 0x0100 /* BOOT_DONE_STS */
+#define ARIZONA_BOOT_DONE_STS_SHIFT 8 /* BOOT_DONE_STS */
+#define ARIZONA_BOOT_DONE_STS_WIDTH 1 /* BOOT_DONE_STS */
+#define ARIZONA_DCS_DAC_DONE_STS 0x0080 /* DCS_DAC_DONE_STS */
+#define ARIZONA_DCS_DAC_DONE_STS_MASK 0x0080 /* DCS_DAC_DONE_STS */
+#define ARIZONA_DCS_DAC_DONE_STS_SHIFT 7 /* DCS_DAC_DONE_STS */
+#define ARIZONA_DCS_DAC_DONE_STS_WIDTH 1 /* DCS_DAC_DONE_STS */
+#define ARIZONA_DCS_HP_DONE_STS 0x0040 /* DCS_HP_DONE_STS */
+#define ARIZONA_DCS_HP_DONE_STS_MASK 0x0040 /* DCS_HP_DONE_STS */
+#define ARIZONA_DCS_HP_DONE_STS_SHIFT 6 /* DCS_HP_DONE_STS */
+#define ARIZONA_DCS_HP_DONE_STS_WIDTH 1 /* DCS_HP_DONE_STS */
+#define ARIZONA_FLL2_CLOCK_OK_STS 0x0002 /* FLL2_CLOCK_OK_STS */
+#define ARIZONA_FLL2_CLOCK_OK_STS_MASK 0x0002 /* FLL2_CLOCK_OK_STS */
+#define ARIZONA_FLL2_CLOCK_OK_STS_SHIFT 1 /* FLL2_CLOCK_OK_STS */
+#define ARIZONA_FLL2_CLOCK_OK_STS_WIDTH 1 /* FLL2_CLOCK_OK_STS */
+#define ARIZONA_FLL1_CLOCK_OK_STS 0x0001 /* FLL1_CLOCK_OK_STS */
+#define ARIZONA_FLL1_CLOCK_OK_STS_MASK 0x0001 /* FLL1_CLOCK_OK_STS */
+#define ARIZONA_FLL1_CLOCK_OK_STS_SHIFT 0 /* FLL1_CLOCK_OK_STS */
+#define ARIZONA_FLL1_CLOCK_OK_STS_WIDTH 1 /* FLL1_CLOCK_OK_STS */
+
+/*
+ * R3364 (0xD24) - Interrupt Raw Status 6
+ */
+#define ARIZONA_PWM_OVERCLOCKED_STS 0x2000 /* PWM_OVERCLOCKED_STS */
+#define ARIZONA_PWM_OVERCLOCKED_STS_MASK 0x2000 /* PWM_OVERCLOCKED_STS */
+#define ARIZONA_PWM_OVERCLOCKED_STS_SHIFT 13 /* PWM_OVERCLOCKED_STS */
+#define ARIZONA_PWM_OVERCLOCKED_STS_WIDTH 1 /* PWM_OVERCLOCKED_STS */
+#define ARIZONA_FX_CORE_OVERCLOCKED_STS 0x1000 /* FX_CORE_OVERCLOCKED_STS */
+#define ARIZONA_FX_CORE_OVERCLOCKED_STS_MASK 0x1000 /* FX_CORE_OVERCLOCKED_STS */
+#define ARIZONA_FX_CORE_OVERCLOCKED_STS_SHIFT 12 /* FX_CORE_OVERCLOCKED_STS */
+#define ARIZONA_FX_CORE_OVERCLOCKED_STS_WIDTH 1 /* FX_CORE_OVERCLOCKED_STS */
+#define ARIZONA_DAC_SYS_OVERCLOCKED_STS 0x0400 /* DAC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_DAC_SYS_OVERCLOCKED_STS_MASK 0x0400 /* DAC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_DAC_SYS_OVERCLOCKED_STS_SHIFT 10 /* DAC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_DAC_SYS_OVERCLOCKED_STS_WIDTH 1 /* DAC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_DAC_WARP_OVERCLOCKED_STS 0x0200 /* DAC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_DAC_WARP_OVERCLOCKED_STS_MASK 0x0200 /* DAC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_DAC_WARP_OVERCLOCKED_STS_SHIFT 9 /* DAC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_DAC_WARP_OVERCLOCKED_STS_WIDTH 1 /* DAC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_ADC_OVERCLOCKED_STS 0x0100 /* ADC_OVERCLOCKED_STS */
+#define ARIZONA_ADC_OVERCLOCKED_STS_MASK 0x0100 /* ADC_OVERCLOCKED_STS */
+#define ARIZONA_ADC_OVERCLOCKED_STS_SHIFT 8 /* ADC_OVERCLOCKED_STS */
+#define ARIZONA_ADC_OVERCLOCKED_STS_WIDTH 1 /* ADC_OVERCLOCKED_STS */
+#define ARIZONA_MIXER_OVERCLOCKED_STS 0x0080 /* MIXER_OVERCLOCKED_STS */
+#define ARIZONA_MIXER_OVERCLOCKED_STS_MASK 0x0080 /* MIXER_OVERCLOCKED_STS */
+#define ARIZONA_MIXER_OVERCLOCKED_STS_SHIFT 7 /* MIXER_OVERCLOCKED_STS */
+#define ARIZONA_MIXER_OVERCLOCKED_STS_WIDTH 1 /* MIXER_OVERCLOCKED_STS */
+#define ARIZONA_AIF3_ASYNC_OVERCLOCKED_STS 0x0040 /* AIF3_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF3_ASYNC_OVERCLOCKED_STS_MASK 0x0040 /* AIF3_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF3_ASYNC_OVERCLOCKED_STS_SHIFT 6 /* AIF3_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF3_ASYNC_OVERCLOCKED_STS_WIDTH 1 /* AIF3_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF2_ASYNC_OVERCLOCKED_STS 0x0020 /* AIF2_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF2_ASYNC_OVERCLOCKED_STS_MASK 0x0020 /* AIF2_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF2_ASYNC_OVERCLOCKED_STS_SHIFT 5 /* AIF2_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF2_ASYNC_OVERCLOCKED_STS_WIDTH 1 /* AIF2_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF1_ASYNC_OVERCLOCKED_STS 0x0010 /* AIF1_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF1_ASYNC_OVERCLOCKED_STS_MASK 0x0010 /* AIF1_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF1_ASYNC_OVERCLOCKED_STS_SHIFT 4 /* AIF1_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF1_ASYNC_OVERCLOCKED_STS_WIDTH 1 /* AIF1_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF3_SYNC_OVERCLOCKED_STS 0x0008 /* AIF3_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF3_SYNC_OVERCLOCKED_STS_MASK 0x0008 /* AIF3_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF3_SYNC_OVERCLOCKED_STS_SHIFT 3 /* AIF3_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF3_SYNC_OVERCLOCKED_STS_WIDTH 1 /* AIF3_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF2_SYNC_OVERCLOCKED_STS 0x0004 /* AIF2_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF2_SYNC_OVERCLOCKED_STS_MASK 0x0004 /* AIF2_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF2_SYNC_OVERCLOCKED_STS_SHIFT 2 /* AIF2_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF2_SYNC_OVERCLOCKED_STS_WIDTH 1 /* AIF2_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF1_SYNC_OVERCLOCKED_STS 0x0002 /* AIF1_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF1_SYNC_OVERCLOCKED_STS_MASK 0x0002 /* AIF1_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF1_SYNC_OVERCLOCKED_STS_SHIFT 1 /* AIF1_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_AIF1_SYNC_OVERCLOCKED_STS_WIDTH 1 /* AIF1_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_PAD_CTRL_OVERCLOCKED_STS 0x0001 /* PAD_CTRL_OVERCLOCKED_STS */
+#define ARIZONA_PAD_CTRL_OVERCLOCKED_STS_MASK 0x0001 /* PAD_CTRL_OVERCLOCKED_STS */
+#define ARIZONA_PAD_CTRL_OVERCLOCKED_STS_SHIFT 0 /* PAD_CTRL_OVERCLOCKED_STS */
+#define ARIZONA_PAD_CTRL_OVERCLOCKED_STS_WIDTH 1 /* PAD_CTRL_OVERCLOCKED_STS */
+
+/*
+ * R3365 (0xD25) - Interrupt Raw Status 7
+ */
+#define ARIZONA_SLIMBUS_SUBSYS_OVERCLOCKED_STS 0x8000 /* SLIMBUS_SUBSYS_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_SUBSYS_OVERCLOCKED_STS_MASK 0x8000 /* SLIMBUS_SUBSYS_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_SUBSYS_OVERCLOCKED_STS_SHIFT 15 /* SLIMBUS_SUBSYS_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_SUBSYS_OVERCLOCKED_STS_WIDTH 1 /* SLIMBUS_SUBSYS_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_ASYNC_OVERCLOCKED_STS 0x4000 /* SLIMBUS_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_ASYNC_OVERCLOCKED_STS_MASK 0x4000 /* SLIMBUS_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_ASYNC_OVERCLOCKED_STS_SHIFT 14 /* SLIMBUS_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_ASYNC_OVERCLOCKED_STS_WIDTH 1 /* SLIMBUS_ASYNC_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_SYNC_OVERCLOCKED_STS 0x2000 /* SLIMBUS_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_SYNC_OVERCLOCKED_STS_MASK 0x2000 /* SLIMBUS_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_SYNC_OVERCLOCKED_STS_SHIFT 13 /* SLIMBUS_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_SLIMBUS_SYNC_OVERCLOCKED_STS_WIDTH 1 /* SLIMBUS_SYNC_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_ASYNC_SYS_OVERCLOCKED_STS 0x1000 /* ASRC_ASYNC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_ASYNC_SYS_OVERCLOCKED_STS_MASK 0x1000 /* ASRC_ASYNC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_ASYNC_SYS_OVERCLOCKED_STS_SHIFT 12 /* ASRC_ASYNC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_ASYNC_SYS_OVERCLOCKED_STS_WIDTH 1 /* ASRC_ASYNC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_ASYNC_WARP_OVERCLOCKED_STS 0x0800 /* ASRC_ASYNC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_ASYNC_WARP_OVERCLOCKED_STS_MASK 0x0800 /* ASRC_ASYNC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_ASYNC_WARP_OVERCLOCKED_STS_SHIFT 11 /* ASRC_ASYNC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_ASYNC_WARP_OVERCLOCKED_STS_WIDTH 1 /* ASRC_ASYNC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_SYNC_SYS_OVERCLOCKED_STS 0x0400 /* ASRC_SYNC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_SYNC_SYS_OVERCLOCKED_STS_MASK 0x0400 /* ASRC_SYNC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_SYNC_SYS_OVERCLOCKED_STS_SHIFT 10 /* ASRC_SYNC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_SYNC_SYS_OVERCLOCKED_STS_WIDTH 1 /* ASRC_SYNC_SYS_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_SYNC_WARP_OVERCLOCKED_STS 0x0200 /* ASRC_SYNC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_SYNC_WARP_OVERCLOCKED_STS_MASK 0x0200 /* ASRC_SYNC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_SYNC_WARP_OVERCLOCKED_STS_SHIFT 9 /* ASRC_SYNC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_ASRC_SYNC_WARP_OVERCLOCKED_STS_WIDTH 1 /* ASRC_SYNC_WARP_OVERCLOCKED_STS */
+#define ARIZONA_ADSP2_1_OVERCLOCKED_STS 0x0008 /* ADSP2_1_OVERCLOCKED_STS */
+#define ARIZONA_ADSP2_1_OVERCLOCKED_STS_MASK 0x0008 /* ADSP2_1_OVERCLOCKED_STS */
+#define ARIZONA_ADSP2_1_OVERCLOCKED_STS_SHIFT 3 /* ADSP2_1_OVERCLOCKED_STS */
+#define ARIZONA_ADSP2_1_OVERCLOCKED_STS_WIDTH 1 /* ADSP2_1_OVERCLOCKED_STS */
+#define ARIZONA_ISRC3_OVERCLOCKED_STS 0x0004 /* ISRC3_OVERCLOCKED_STS */
+#define ARIZONA_ISRC3_OVERCLOCKED_STS_MASK 0x0004 /* ISRC3_OVERCLOCKED_STS */
+#define ARIZONA_ISRC3_OVERCLOCKED_STS_SHIFT 2 /* ISRC3_OVERCLOCKED_STS */
+#define ARIZONA_ISRC3_OVERCLOCKED_STS_WIDTH 1 /* ISRC3_OVERCLOCKED_STS */
+#define ARIZONA_ISRC2_OVERCLOCKED_STS 0x0002 /* ISRC2_OVERCLOCKED_STS */
+#define ARIZONA_ISRC2_OVERCLOCKED_STS_MASK 0x0002 /* ISRC2_OVERCLOCKED_STS */
+#define ARIZONA_ISRC2_OVERCLOCKED_STS_SHIFT 1 /* ISRC2_OVERCLOCKED_STS */
+#define ARIZONA_ISRC2_OVERCLOCKED_STS_WIDTH 1 /* ISRC2_OVERCLOCKED_STS */
+#define ARIZONA_ISRC1_OVERCLOCKED_STS 0x0001 /* ISRC1_OVERCLOCKED_STS */
+#define ARIZONA_ISRC1_OVERCLOCKED_STS_MASK 0x0001 /* ISRC1_OVERCLOCKED_STS */
+#define ARIZONA_ISRC1_OVERCLOCKED_STS_SHIFT 0 /* ISRC1_OVERCLOCKED_STS */
+#define ARIZONA_ISRC1_OVERCLOCKED_STS_WIDTH 1 /* ISRC1_OVERCLOCKED_STS */
+
+/*
+ * R3366 (0xD26) - Interrupt Raw Status 8
+ */
+#define ARIZONA_AIF3_UNDERCLOCKED_STS 0x0400 /* AIF3_UNDERCLOCKED_STS */
+#define ARIZONA_AIF3_UNDERCLOCKED_STS_MASK 0x0400 /* AIF3_UNDERCLOCKED_STS */
+#define ARIZONA_AIF3_UNDERCLOCKED_STS_SHIFT 10 /* AIF3_UNDERCLOCKED_STS */
+#define ARIZONA_AIF3_UNDERCLOCKED_STS_WIDTH 1 /* AIF3_UNDERCLOCKED_STS */
+#define ARIZONA_AIF2_UNDERCLOCKED_STS 0x0200 /* AIF2_UNDERCLOCKED_STS */
+#define ARIZONA_AIF2_UNDERCLOCKED_STS_MASK 0x0200 /* AIF2_UNDERCLOCKED_STS */
+#define ARIZONA_AIF2_UNDERCLOCKED_STS_SHIFT 9 /* AIF2_UNDERCLOCKED_STS */
+#define ARIZONA_AIF2_UNDERCLOCKED_STS_WIDTH 1 /* AIF2_UNDERCLOCKED_STS */
+#define ARIZONA_AIF1_UNDERCLOCKED_STS 0x0100 /* AIF1_UNDERCLOCKED_STS */
+#define ARIZONA_AIF1_UNDERCLOCKED_STS_MASK 0x0100 /* AIF1_UNDERCLOCKED_STS */
+#define ARIZONA_AIF1_UNDERCLOCKED_STS_SHIFT 8 /* AIF1_UNDERCLOCKED_STS */
+#define ARIZONA_AIF1_UNDERCLOCKED_STS_WIDTH 1 /* AIF1_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC3_UNDERCLOCKED_STS 0x0080 /* ISRC3_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC3_UNDERCLOCKED_STS_MASK 0x0080 /* ISRC3_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC3_UNDERCLOCKED_STS_SHIFT 7 /* ISRC3_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC3_UNDERCLOCKED_STS_WIDTH 1 /* ISRC3_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC2_UNDERCLOCKED_STS 0x0040 /* ISRC2_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC2_UNDERCLOCKED_STS_MASK 0x0040 /* ISRC2_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC2_UNDERCLOCKED_STS_SHIFT 6 /* ISRC2_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC2_UNDERCLOCKED_STS_WIDTH 1 /* ISRC2_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC1_UNDERCLOCKED_STS 0x0020 /* ISRC1_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC1_UNDERCLOCKED_STS_MASK 0x0020 /* ISRC1_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC1_UNDERCLOCKED_STS_SHIFT 5 /* ISRC1_UNDERCLOCKED_STS */
+#define ARIZONA_ISRC1_UNDERCLOCKED_STS_WIDTH 1 /* ISRC1_UNDERCLOCKED_STS */
+#define ARIZONA_FX_UNDERCLOCKED_STS 0x0010 /* FX_UNDERCLOCKED_STS */
+#define ARIZONA_FX_UNDERCLOCKED_STS_MASK 0x0010 /* FX_UNDERCLOCKED_STS */
+#define ARIZONA_FX_UNDERCLOCKED_STS_SHIFT 4 /* FX_UNDERCLOCKED_STS */
+#define ARIZONA_FX_UNDERCLOCKED_STS_WIDTH 1 /* FX_UNDERCLOCKED_STS */
+#define ARIZONA_ASRC_UNDERCLOCKED_STS 0x0008 /* ASRC_UNDERCLOCKED_STS */
+#define ARIZONA_ASRC_UNDERCLOCKED_STS_MASK 0x0008 /* ASRC_UNDERCLOCKED_STS */
+#define ARIZONA_ASRC_UNDERCLOCKED_STS_SHIFT 3 /* ASRC_UNDERCLOCKED_STS */
+#define ARIZONA_ASRC_UNDERCLOCKED_STS_WIDTH 1 /* ASRC_UNDERCLOCKED_STS */
+#define ARIZONA_DAC_UNDERCLOCKED_STS 0x0004 /* DAC_UNDERCLOCKED_STS */
+#define ARIZONA_DAC_UNDERCLOCKED_STS_MASK 0x0004 /* DAC_UNDERCLOCKED_STS */
+#define ARIZONA_DAC_UNDERCLOCKED_STS_SHIFT 2 /* DAC_UNDERCLOCKED_STS */
+#define ARIZONA_DAC_UNDERCLOCKED_STS_WIDTH 1 /* DAC_UNDERCLOCKED_STS */
+#define ARIZONA_ADC_UNDERCLOCKED_STS 0x0002 /* ADC_UNDERCLOCKED_STS */
+#define ARIZONA_ADC_UNDERCLOCKED_STS_MASK 0x0002 /* ADC_UNDERCLOCKED_STS */
+#define ARIZONA_ADC_UNDERCLOCKED_STS_SHIFT 1 /* ADC_UNDERCLOCKED_STS */
+#define ARIZONA_ADC_UNDERCLOCKED_STS_WIDTH 1 /* ADC_UNDERCLOCKED_STS */
+#define ARIZONA_MIXER_UNDERCLOCKED_STS 0x0001 /* MIXER_UNDERCLOCKED_STS */
+#define ARIZONA_MIXER_UNDERCLOCKED_STS_MASK 0x0001 /* MIXER_UNDERCLOCKED_STS */
+#define ARIZONA_MIXER_UNDERCLOCKED_STS_SHIFT 0 /* MIXER_UNDERCLOCKED_STS */
+#define ARIZONA_MIXER_UNDERCLOCKED_STS_WIDTH 1 /* MIXER_UNDERCLOCKED_STS */
+
+/*
+ * R3368 (0xD28) - Interrupt Raw Status 9
+ */
+#define ARIZONA_DSP_SHARED_WR_COLL_STS 0x8000 /* DSP_SHARED_WR_COLL_STS */
+#define ARIZONA_DSP_SHARED_WR_COLL_STS_MASK 0x8000 /* DSP_SHARED_WR_COLL_STS */
+#define ARIZONA_DSP_SHARED_WR_COLL_STS_SHIFT 15 /* DSP_SHARED_WR_COLL_STS */
+#define ARIZONA_DSP_SHARED_WR_COLL_STS_WIDTH 1 /* DSP_SHARED_WR_COLL_STS */
+#define ARIZONA_SPK_SHUTDOWN_STS 0x4000 /* SPK_SHUTDOWN_STS */
+#define ARIZONA_SPK_SHUTDOWN_STS_MASK 0x4000 /* SPK_SHUTDOWN_STS */
+#define ARIZONA_SPK_SHUTDOWN_STS_SHIFT 14 /* SPK_SHUTDOWN_STS */
+#define ARIZONA_SPK_SHUTDOWN_STS_WIDTH 1 /* SPK_SHUTDOWN_STS */
+#define ARIZONA_SPK1R_SHORT_STS 0x2000 /* SPK1R_SHORT_STS */
+#define ARIZONA_SPK1R_SHORT_STS_MASK 0x2000 /* SPK1R_SHORT_STS */
+#define ARIZONA_SPK1R_SHORT_STS_SHIFT 13 /* SPK1R_SHORT_STS */
+#define ARIZONA_SPK1R_SHORT_STS_WIDTH 1 /* SPK1R_SHORT_STS */
+#define ARIZONA_SPK1L_SHORT_STS 0x1000 /* SPK1L_SHORT_STS */
+#define ARIZONA_SPK1L_SHORT_STS_MASK 0x1000 /* SPK1L_SHORT_STS */
+#define ARIZONA_SPK1L_SHORT_STS_SHIFT 12 /* SPK1L_SHORT_STS */
+#define ARIZONA_SPK1L_SHORT_STS_WIDTH 1 /* SPK1L_SHORT_STS */
+#define ARIZONA_HP3R_SC_NEG_STS 0x0800 /* HP3R_SC_NEG_STS */
+#define ARIZONA_HP3R_SC_NEG_STS_MASK 0x0800 /* HP3R_SC_NEG_STS */
+#define ARIZONA_HP3R_SC_NEG_STS_SHIFT 11 /* HP3R_SC_NEG_STS */
+#define ARIZONA_HP3R_SC_NEG_STS_WIDTH 1 /* HP3R_SC_NEG_STS */
+#define ARIZONA_HP3R_SC_POS_STS 0x0400 /* HP3R_SC_POS_STS */
+#define ARIZONA_HP3R_SC_POS_STS_MASK 0x0400 /* HP3R_SC_POS_STS */
+#define ARIZONA_HP3R_SC_POS_STS_SHIFT 10 /* HP3R_SC_POS_STS */
+#define ARIZONA_HP3R_SC_POS_STS_WIDTH 1 /* HP3R_SC_POS_STS */
+#define ARIZONA_HP3L_SC_NEG_STS 0x0200 /* HP3L_SC_NEG_STS */
+#define ARIZONA_HP3L_SC_NEG_STS_MASK 0x0200 /* HP3L_SC_NEG_STS */
+#define ARIZONA_HP3L_SC_NEG_STS_SHIFT 9 /* HP3L_SC_NEG_STS */
+#define ARIZONA_HP3L_SC_NEG_STS_WIDTH 1 /* HP3L_SC_NEG_STS */
+#define ARIZONA_HP3L_SC_POS_STS 0x0100 /* HP3L_SC_POS_STS */
+#define ARIZONA_HP3L_SC_POS_STS_MASK 0x0100 /* HP3L_SC_POS_STS */
+#define ARIZONA_HP3L_SC_POS_STS_SHIFT 8 /* HP3L_SC_POS_STS */
+#define ARIZONA_HP3L_SC_POS_STS_WIDTH 1 /* HP3L_SC_POS_STS */
+#define ARIZONA_HP2R_SC_NEG_STS 0x0080 /* HP2R_SC_NEG_STS */
+#define ARIZONA_HP2R_SC_NEG_STS_MASK 0x0080 /* HP2R_SC_NEG_STS */
+#define ARIZONA_HP2R_SC_NEG_STS_SHIFT 7 /* HP2R_SC_NEG_STS */
+#define ARIZONA_HP2R_SC_NEG_STS_WIDTH 1 /* HP2R_SC_NEG_STS */
+#define ARIZONA_HP2R_SC_POS_STS 0x0040 /* HP2R_SC_POS_STS */
+#define ARIZONA_HP2R_SC_POS_STS_MASK 0x0040 /* HP2R_SC_POS_STS */
+#define ARIZONA_HP2R_SC_POS_STS_SHIFT 6 /* HP2R_SC_POS_STS */
+#define ARIZONA_HP2R_SC_POS_STS_WIDTH 1 /* HP2R_SC_POS_STS */
+#define ARIZONA_HP2L_SC_NEG_STS 0x0020 /* HP2L_SC_NEG_STS */
+#define ARIZONA_HP2L_SC_NEG_STS_MASK 0x0020 /* HP2L_SC_NEG_STS */
+#define ARIZONA_HP2L_SC_NEG_STS_SHIFT 5 /* HP2L_SC_NEG_STS */
+#define ARIZONA_HP2L_SC_NEG_STS_WIDTH 1 /* HP2L_SC_NEG_STS */
+#define ARIZONA_HP2L_SC_POS_STS 0x0010 /* HP2L_SC_POS_STS */
+#define ARIZONA_HP2L_SC_POS_STS_MASK 0x0010 /* HP2L_SC_POS_STS */
+#define ARIZONA_HP2L_SC_POS_STS_SHIFT 4 /* HP2L_SC_POS_STS */
+#define ARIZONA_HP2L_SC_POS_STS_WIDTH 1 /* HP2L_SC_POS_STS */
+#define ARIZONA_HP1R_SC_NEG_STS 0x0008 /* HP1R_SC_NEG_STS */
+#define ARIZONA_HP1R_SC_NEG_STS_MASK 0x0008 /* HP1R_SC_NEG_STS */
+#define ARIZONA_HP1R_SC_NEG_STS_SHIFT 3 /* HP1R_SC_NEG_STS */
+#define ARIZONA_HP1R_SC_NEG_STS_WIDTH 1 /* HP1R_SC_NEG_STS */
+#define ARIZONA_HP1R_SC_POS_STS 0x0004 /* HP1R_SC_POS_STS */
+#define ARIZONA_HP1R_SC_POS_STS_MASK 0x0004 /* HP1R_SC_POS_STS */
+#define ARIZONA_HP1R_SC_POS_STS_SHIFT 2 /* HP1R_SC_POS_STS */
+#define ARIZONA_HP1R_SC_POS_STS_WIDTH 1 /* HP1R_SC_POS_STS */
+#define ARIZONA_HP1L_SC_NEG_STS 0x0002 /* HP1L_SC_NEG_STS */
+#define ARIZONA_HP1L_SC_NEG_STS_MASK 0x0002 /* HP1L_SC_NEG_STS */
+#define ARIZONA_HP1L_SC_NEG_STS_SHIFT 1 /* HP1L_SC_NEG_STS */
+#define ARIZONA_HP1L_SC_NEG_STS_WIDTH 1 /* HP1L_SC_NEG_STS */
+#define ARIZONA_HP1L_SC_POS_STS 0x0001 /* HP1L_SC_POS_STS */
+#define ARIZONA_HP1L_SC_POS_STS_MASK 0x0001 /* HP1L_SC_POS_STS */
+#define ARIZONA_HP1L_SC_POS_STS_SHIFT 0 /* HP1L_SC_POS_STS */
+#define ARIZONA_HP1L_SC_POS_STS_WIDTH 1 /* HP1L_SC_POS_STS */
+
+/*
+ * R3392 (0xD40) - IRQ Pin Status
+ */
+#define ARIZONA_IRQ2_STS 0x0002 /* IRQ2_STS */
+#define ARIZONA_IRQ2_STS_MASK 0x0002 /* IRQ2_STS */
+#define ARIZONA_IRQ2_STS_SHIFT 1 /* IRQ2_STS */
+#define ARIZONA_IRQ2_STS_WIDTH 1 /* IRQ2_STS */
+#define ARIZONA_IRQ1_STS 0x0001 /* IRQ1_STS */
+#define ARIZONA_IRQ1_STS_MASK 0x0001 /* IRQ1_STS */
+#define ARIZONA_IRQ1_STS_SHIFT 0 /* IRQ1_STS */
+#define ARIZONA_IRQ1_STS_WIDTH 1 /* IRQ1_STS */
+
+/*
+ * R3393 (0xD41) - ADSP2 IRQ0
+ */
+#define ARIZONA_DSP_IRQ2 0x0002 /* DSP_IRQ2 */
+#define ARIZONA_DSP_IRQ2_MASK 0x0002 /* DSP_IRQ2 */
+#define ARIZONA_DSP_IRQ2_SHIFT 1 /* DSP_IRQ2 */
+#define ARIZONA_DSP_IRQ2_WIDTH 1 /* DSP_IRQ2 */
+#define ARIZONA_DSP_IRQ1 0x0001 /* DSP_IRQ1 */
+#define ARIZONA_DSP_IRQ1_MASK 0x0001 /* DSP_IRQ1 */
+#define ARIZONA_DSP_IRQ1_SHIFT 0 /* DSP_IRQ1 */
+#define ARIZONA_DSP_IRQ1_WIDTH 1 /* DSP_IRQ1 */
+
+/*
+ * R3408 (0xD50) - AOD wkup and trig
+ */
+#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS 0x0080 /* MICD_CLAMP_FALL_TRIG_STS */
+#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS_MASK 0x0080 /* MICD_CLAMP_FALL_TRIG_STS */
+#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS_SHIFT 7 /* MICD_CLAMP_FALL_TRIG_STS */
+#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS_WIDTH 1 /* MICD_CLAMP_FALL_TRIG_STS */
+#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS 0x0040 /* MICD_CLAMP_RISE_TRIG_STS */
+#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS_MASK 0x0040 /* MICD_CLAMP_RISE_TRIG_STS */
+#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS_SHIFT 6 /* MICD_CLAMP_RISE_TRIG_STS */
+#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS_WIDTH 1 /* MICD_CLAMP_RISE_TRIG_STS */
+#define ARIZONA_GP5_FALL_TRIG_STS 0x0020 /* GP5_FALL_TRIG_STS */
+#define ARIZONA_GP5_FALL_TRIG_STS_MASK 0x0020 /* GP5_FALL_TRIG_STS */
+#define ARIZONA_GP5_FALL_TRIG_STS_SHIFT 5 /* GP5_FALL_TRIG_STS */
+#define ARIZONA_GP5_FALL_TRIG_STS_WIDTH 1 /* GP5_FALL_TRIG_STS */
+#define ARIZONA_GP5_RISE_TRIG_STS 0x0010 /* GP5_RISE_TRIG_STS */
+#define ARIZONA_GP5_RISE_TRIG_STS_MASK 0x0010 /* GP5_RISE_TRIG_STS */
+#define ARIZONA_GP5_RISE_TRIG_STS_SHIFT 4 /* GP5_RISE_TRIG_STS */
+#define ARIZONA_GP5_RISE_TRIG_STS_WIDTH 1 /* GP5_RISE_TRIG_STS */
+#define ARIZONA_JD1_FALL_TRIG_STS 0x0008 /* JD1_FALL_TRIG_STS */
+#define ARIZONA_JD1_FALL_TRIG_STS_MASK 0x0008 /* JD1_FALL_TRIG_STS */
+#define ARIZONA_JD1_FALL_TRIG_STS_SHIFT 3 /* JD1_FALL_TRIG_STS */
+#define ARIZONA_JD1_FALL_TRIG_STS_WIDTH 1 /* JD1_FALL_TRIG_STS */
+#define ARIZONA_JD1_RISE_TRIG_STS 0x0004 /* JD1_RISE_TRIG_STS */
+#define ARIZONA_JD1_RISE_TRIG_STS_MASK 0x0004 /* JD1_RISE_TRIG_STS */
+#define ARIZONA_JD1_RISE_TRIG_STS_SHIFT 2 /* JD1_RISE_TRIG_STS */
+#define ARIZONA_JD1_RISE_TRIG_STS_WIDTH 1 /* JD1_RISE_TRIG_STS */
+#define ARIZONA_JD2_FALL_TRIG_STS 0x0002 /* JD2_FALL_TRIG_STS */
+#define ARIZONA_JD2_FALL_TRIG_STS_MASK 0x0002 /* JD2_FALL_TRIG_STS */
+#define ARIZONA_JD2_FALL_TRIG_STS_SHIFT 1 /* JD2_FALL_TRIG_STS */
+#define ARIZONA_JD2_FALL_TRIG_STS_WIDTH 1 /* JD2_FALL_TRIG_STS */
+#define ARIZONA_JD2_RISE_TRIG_STS 0x0001 /* JD2_RISE_TRIG_STS */
+#define ARIZONA_JD2_RISE_TRIG_STS_MASK 0x0001 /* JD2_RISE_TRIG_STS */
+#define ARIZONA_JD2_RISE_TRIG_STS_SHIFT 0 /* JD2_RISE_TRIG_STS */
+#define ARIZONA_JD2_RISE_TRIG_STS_WIDTH 1 /* JD2_RISE_TRIG_STS */
+
+/*
+ * R3409 (0xD51) - AOD IRQ1
+ */
+#define ARIZONA_MICD_CLAMP_FALL_EINT1 0x0080 /* MICD_CLAMP_FALL_EINT1 */
+#define ARIZONA_MICD_CLAMP_FALL_EINT1_MASK 0x0080 /* MICD_CLAMP_FALL_EINT1 */
+#define ARIZONA_MICD_CLAMP_FALL_EINT1_SHIFT 7 /* MICD_CLAMP_FALL_EINT1 */
+#define ARIZONA_MICD_CLAMP_RISE_EINT1 0x0040 /* MICD_CLAMP_RISE_EINT1 */
+#define ARIZONA_MICD_CLAMP_RISE_EINT1_MASK 0x0040 /* MICD_CLAMP_RISE_EINT1 */
+#define ARIZONA_MICD_CLAMP_RISE_EINT1_SHIFT 6 /* MICD_CLAMP_RISE_EINT1 */
+#define ARIZONA_GP5_FALL_EINT1 0x0020 /* GP5_FALL_EINT1 */
+#define ARIZONA_GP5_FALL_EINT1_MASK 0x0020 /* GP5_FALL_EINT1 */
+#define ARIZONA_GP5_FALL_EINT1_SHIFT 5 /* GP5_FALL_EINT1 */
+#define ARIZONA_GP5_FALL_EINT1_WIDTH 1 /* GP5_FALL_EINT1 */
+#define ARIZONA_GP5_RISE_EINT1 0x0010 /* GP5_RISE_EINT1 */
+#define ARIZONA_GP5_RISE_EINT1_MASK 0x0010 /* GP5_RISE_EINT1 */
+#define ARIZONA_GP5_RISE_EINT1_SHIFT 4 /* GP5_RISE_EINT1 */
+#define ARIZONA_GP5_RISE_EINT1_WIDTH 1 /* GP5_RISE_EINT1 */
+#define ARIZONA_JD1_FALL_EINT1 0x0008 /* JD1_FALL_EINT1 */
+#define ARIZONA_JD1_FALL_EINT1_MASK 0x0008 /* JD1_FALL_EINT1 */
+#define ARIZONA_JD1_FALL_EINT1_SHIFT 3 /* JD1_FALL_EINT1 */
+#define ARIZONA_JD1_FALL_EINT1_WIDTH 1 /* JD1_FALL_EINT1 */
+#define ARIZONA_JD1_RISE_EINT1 0x0004 /* JD1_RISE_EINT1 */
+#define ARIZONA_JD1_RISE_EINT1_MASK 0x0004 /* JD1_RISE_EINT1 */
+#define ARIZONA_JD1_RISE_EINT1_SHIFT 2 /* JD1_RISE_EINT1 */
+#define ARIZONA_JD1_RISE_EINT1_WIDTH 1 /* JD1_RISE_EINT1 */
+#define ARIZONA_JD2_FALL_EINT1 0x0002 /* JD2_FALL_EINT1 */
+#define ARIZONA_JD2_FALL_EINT1_MASK 0x0002 /* JD2_FALL_EINT1 */
+#define ARIZONA_JD2_FALL_EINT1_SHIFT 1 /* JD2_FALL_EINT1 */
+#define ARIZONA_JD2_FALL_EINT1_WIDTH 1 /* JD2_FALL_EINT1 */
+#define ARIZONA_JD2_RISE_EINT1 0x0001 /* JD2_RISE_EINT1 */
+#define ARIZONA_JD2_RISE_EINT1_MASK 0x0001 /* JD2_RISE_EINT1 */
+#define ARIZONA_JD2_RISE_EINT1_SHIFT 0 /* JD2_RISE_EINT1 */
+#define ARIZONA_JD2_RISE_EINT1_WIDTH 1 /* JD2_RISE_EINT1 */
+
+/*
+ * R3410 (0xD52) - AOD IRQ2
+ */
+#define ARIZONA_MICD_CLAMP_FALL_EINT2 0x0080 /* MICD_CLAMP_FALL_EINT2 */
+#define ARIZONA_MICD_CLAMP_FALL_EINT2_MASK 0x0080 /* MICD_CLAMP_FALL_EINT2 */
+#define ARIZONA_MICD_CLAMP_FALL_EINT2_SHIFT 7 /* MICD_CLAMP_FALL_EINT2 */
+#define ARIZONA_MICD_CLAMP_RISE_EINT2 0x0040 /* MICD_CLAMP_RISE_EINT2 */
+#define ARIZONA_MICD_CLAMP_RISE_EINT2_MASK 0x0040 /* MICD_CLAMP_RISE_EINT2 */
+#define ARIZONA_MICD_CLAMP_RISE_EINT2_SHIFT 6 /* MICD_CLAMP_RISE_EINT2 */
+#define ARIZONA_GP5_FALL_EINT2 0x0020 /* GP5_FALL_EINT2 */
+#define ARIZONA_GP5_FALL_EINT2_MASK 0x0020 /* GP5_FALL_EINT2 */
+#define ARIZONA_GP5_FALL_EINT2_SHIFT 5 /* GP5_FALL_EINT2 */
+#define ARIZONA_GP5_FALL_EINT2_WIDTH 1 /* GP5_FALL_EINT2 */
+#define ARIZONA_GP5_RISE_EINT2 0x0010 /* GP5_RISE_EINT2 */
+#define ARIZONA_GP5_RISE_EINT2_MASK 0x0010 /* GP5_RISE_EINT2 */
+#define ARIZONA_GP5_RISE_EINT2_SHIFT 4 /* GP5_RISE_EINT2 */
+#define ARIZONA_GP5_RISE_EINT2_WIDTH 1 /* GP5_RISE_EINT2 */
+#define ARIZONA_JD1_FALL_EINT2 0x0008 /* JD1_FALL_EINT2 */
+#define ARIZONA_JD1_FALL_EINT2_MASK 0x0008 /* JD1_FALL_EINT2 */
+#define ARIZONA_JD1_FALL_EINT2_SHIFT 3 /* JD1_FALL_EINT2 */
+#define ARIZONA_JD1_FALL_EINT2_WIDTH 1 /* JD1_FALL_EINT2 */
+#define ARIZONA_JD1_RISE_EINT2 0x0004 /* JD1_RISE_EINT2 */
+#define ARIZONA_JD1_RISE_EINT2_MASK 0x0004 /* JD1_RISE_EINT2 */
+#define ARIZONA_JD1_RISE_EINT2_SHIFT 2 /* JD1_RISE_EINT2 */
+#define ARIZONA_JD1_RISE_EINT2_WIDTH 1 /* JD1_RISE_EINT2 */
+#define ARIZONA_JD2_FALL_EINT2 0x0002 /* JD2_FALL_EINT2 */
+#define ARIZONA_JD2_FALL_EINT2_MASK 0x0002 /* JD2_FALL_EINT2 */
+#define ARIZONA_JD2_FALL_EINT2_SHIFT 1 /* JD2_FALL_EINT2 */
+#define ARIZONA_JD2_FALL_EINT2_WIDTH 1 /* JD2_FALL_EINT2 */
+#define ARIZONA_JD2_RISE_EINT2 0x0001 /* JD2_RISE_EINT2 */
+#define ARIZONA_JD2_RISE_EINT2_MASK 0x0001 /* JD2_RISE_EINT2 */
+#define ARIZONA_JD2_RISE_EINT2_SHIFT 0 /* JD2_RISE_EINT2 */
+#define ARIZONA_JD2_RISE_EINT2_WIDTH 1 /* JD2_RISE_EINT2 */
+
+/*
+ * R3411 (0xD53) - AOD IRQ Mask IRQ1
+ */
+#define ARIZONA_IM_GP5_FALL_EINT1 0x0020 /* IM_GP5_FALL_EINT1 */
+#define ARIZONA_IM_GP5_FALL_EINT1_MASK 0x0020 /* IM_GP5_FALL_EINT1 */
+#define ARIZONA_IM_GP5_FALL_EINT1_SHIFT 5 /* IM_GP5_FALL_EINT1 */
+#define ARIZONA_IM_GP5_FALL_EINT1_WIDTH 1 /* IM_GP5_FALL_EINT1 */
+#define ARIZONA_IM_GP5_RISE_EINT1 0x0010 /* IM_GP5_RISE_EINT1 */
+#define ARIZONA_IM_GP5_RISE_EINT1_MASK 0x0010 /* IM_GP5_RISE_EINT1 */
+#define ARIZONA_IM_GP5_RISE_EINT1_SHIFT 4 /* IM_GP5_RISE_EINT1 */
+#define ARIZONA_IM_GP5_RISE_EINT1_WIDTH 1 /* IM_GP5_RISE_EINT1 */
+#define ARIZONA_IM_JD1_FALL_EINT1 0x0008 /* IM_JD1_FALL_EINT1 */
+#define ARIZONA_IM_JD1_FALL_EINT1_MASK 0x0008 /* IM_JD1_FALL_EINT1 */
+#define ARIZONA_IM_JD1_FALL_EINT1_SHIFT 3 /* IM_JD1_FALL_EINT1 */
+#define ARIZONA_IM_JD1_FALL_EINT1_WIDTH 1 /* IM_JD1_FALL_EINT1 */
+#define ARIZONA_IM_JD1_RISE_EINT1 0x0004 /* IM_JD1_RISE_EINT1 */
+#define ARIZONA_IM_JD1_RISE_EINT1_MASK 0x0004 /* IM_JD1_RISE_EINT1 */
+#define ARIZONA_IM_JD1_RISE_EINT1_SHIFT 2 /* IM_JD1_RISE_EINT1 */
+#define ARIZONA_IM_JD1_RISE_EINT1_WIDTH 1 /* IM_JD1_RISE_EINT1 */
+#define ARIZONA_IM_JD2_FALL_EINT1 0x0002 /* IM_JD2_FALL_EINT1 */
+#define ARIZONA_IM_JD2_FALL_EINT1_MASK 0x0002 /* IM_JD2_FALL_EINT1 */
+#define ARIZONA_IM_JD2_FALL_EINT1_SHIFT 1 /* IM_JD2_FALL_EINT1 */
+#define ARIZONA_IM_JD2_FALL_EINT1_WIDTH 1 /* IM_JD2_FALL_EINT1 */
+#define ARIZONA_IM_JD2_RISE_EINT1 0x0001 /* IM_JD2_RISE_EINT1 */
+#define ARIZONA_IM_JD2_RISE_EINT1_MASK 0x0001 /* IM_JD2_RISE_EINT1 */
+#define ARIZONA_IM_JD2_RISE_EINT1_SHIFT 0 /* IM_JD2_RISE_EINT1 */
+#define ARIZONA_IM_JD2_RISE_EINT1_WIDTH 1 /* IM_JD2_RISE_EINT1 */
+
+/*
+ * R3412 (0xD54) - AOD IRQ Mask IRQ2
+ */
+#define ARIZONA_IM_GP5_FALL_EINT2 0x0020 /* IM_GP5_FALL_EINT2 */
+#define ARIZONA_IM_GP5_FALL_EINT2_MASK 0x0020 /* IM_GP5_FALL_EINT2 */
+#define ARIZONA_IM_GP5_FALL_EINT2_SHIFT 5 /* IM_GP5_FALL_EINT2 */
+#define ARIZONA_IM_GP5_FALL_EINT2_WIDTH 1 /* IM_GP5_FALL_EINT2 */
+#define ARIZONA_IM_GP5_RISE_EINT2 0x0010 /* IM_GP5_RISE_EINT2 */
+#define ARIZONA_IM_GP5_RISE_EINT2_MASK 0x0010 /* IM_GP5_RISE_EINT2 */
+#define ARIZONA_IM_GP5_RISE_EINT2_SHIFT 4 /* IM_GP5_RISE_EINT2 */
+#define ARIZONA_IM_GP5_RISE_EINT2_WIDTH 1 /* IM_GP5_RISE_EINT2 */
+#define ARIZONA_IM_JD1_FALL_EINT2 0x0008 /* IM_JD1_FALL_EINT2 */
+#define ARIZONA_IM_JD1_FALL_EINT2_MASK 0x0008 /* IM_JD1_FALL_EINT2 */
+#define ARIZONA_IM_JD1_FALL_EINT2_SHIFT 3 /* IM_JD1_FALL_EINT2 */
+#define ARIZONA_IM_JD1_FALL_EINT2_WIDTH 1 /* IM_JD1_FALL_EINT2 */
+#define ARIZONA_IM_JD1_RISE_EINT2 0x0004 /* IM_JD1_RISE_EINT2 */
+#define ARIZONA_IM_JD1_RISE_EINT2_MASK 0x0004 /* IM_JD1_RISE_EINT2 */
+#define ARIZONA_IM_JD1_RISE_EINT2_SHIFT 2 /* IM_JD1_RISE_EINT2 */
+#define ARIZONA_IM_JD1_RISE_EINT2_WIDTH 1 /* IM_JD1_RISE_EINT2 */
+#define ARIZONA_IM_JD2_FALL_EINT2 0x0002 /* IM_JD2_FALL_EINT2 */
+#define ARIZONA_IM_JD2_FALL_EINT2_MASK 0x0002 /* IM_JD2_FALL_EINT2 */
+#define ARIZONA_IM_JD2_FALL_EINT2_SHIFT 1 /* IM_JD2_FALL_EINT2 */
+#define ARIZONA_IM_JD2_FALL_EINT2_WIDTH 1 /* IM_JD2_FALL_EINT2 */
+#define ARIZONA_IM_JD2_RISE_EINT2 0x0001 /* IM_JD2_RISE_EINT2 */
+#define ARIZONA_IM_JD2_RISE_EINT2_MASK 0x0001 /* IM_JD2_RISE_EINT2 */
+#define ARIZONA_IM_JD2_RISE_EINT2_SHIFT 0 /* IM_JD2_RISE_EINT2 */
+#define ARIZONA_IM_JD2_RISE_EINT2_WIDTH 1 /* IM_JD2_RISE_EINT2 */
+
+/*
+ * R3413 (0xD55) - AOD IRQ Raw Status
+ */
+#define ARIZONA_MICD_CLAMP_STS 0x0008 /* MICD_CLAMP_STS */
+#define ARIZONA_MICD_CLAMP_STS_MASK 0x0008 /* MICD_CLAMP_STS */
+#define ARIZONA_MICD_CLAMP_STS_SHIFT 3 /* MICD_CLAMP_STS */
+#define ARIZONA_MICD_CLAMP_STS_WIDTH 1 /* MICD_CLAMP_STS */
+#define ARIZONA_GP5_STS 0x0004 /* GP5_STS */
+#define ARIZONA_GP5_STS_MASK 0x0004 /* GP5_STS */
+#define ARIZONA_GP5_STS_SHIFT 2 /* GP5_STS */
+#define ARIZONA_GP5_STS_WIDTH 1 /* GP5_STS */
+#define ARIZONA_JD2_STS 0x0002 /* JD2_STS */
+#define ARIZONA_JD2_STS_MASK 0x0002 /* JD2_STS */
+#define ARIZONA_JD2_STS_SHIFT 1 /* JD2_STS */
+#define ARIZONA_JD2_STS_WIDTH 1 /* JD2_STS */
+#define ARIZONA_JD1_STS 0x0001 /* JD1_STS */
+#define ARIZONA_JD1_STS_MASK 0x0001 /* JD1_STS */
+#define ARIZONA_JD1_STS_SHIFT 0 /* JD1_STS */
+#define ARIZONA_JD1_STS_WIDTH 1 /* JD1_STS */
+
+/*
+ * R3414 (0xD56) - Jack detect debounce
+ */
+#define ARIZONA_MICD_CLAMP_DB 0x0008 /* MICD_CLAMP_DB */
+#define ARIZONA_MICD_CLAMP_DB_MASK 0x0008 /* MICD_CLAMP_DB */
+#define ARIZONA_MICD_CLAMP_DB_SHIFT 3 /* MICD_CLAMP_DB */
+#define ARIZONA_MICD_CLAMP_DB_WIDTH 1 /* MICD_CLAMP_DB */
+#define ARIZONA_JD2_DB 0x0002 /* JD2_DB */
+#define ARIZONA_JD2_DB_MASK 0x0002 /* JD2_DB */
+#define ARIZONA_JD2_DB_SHIFT 1 /* JD2_DB */
+#define ARIZONA_JD2_DB_WIDTH 1 /* JD2_DB */
+#define ARIZONA_JD1_DB 0x0001 /* JD1_DB */
+#define ARIZONA_JD1_DB_MASK 0x0001 /* JD1_DB */
+#define ARIZONA_JD1_DB_SHIFT 0 /* JD1_DB */
+#define ARIZONA_JD1_DB_WIDTH 1 /* JD1_DB */
+
+/*
+ * R3584 (0xE00) - FX_Ctrl1
+ */
+#define ARIZONA_FX_RATE_MASK 0x7800 /* FX_RATE - [14:11] */
+#define ARIZONA_FX_RATE_SHIFT 11 /* FX_RATE - [14:11] */
+#define ARIZONA_FX_RATE_WIDTH 4 /* FX_RATE - [14:11] */
+
+/*
+ * R3585 (0xE01) - FX_Ctrl2
+ */
+#define ARIZONA_FX_STS_MASK 0xFFF0 /* FX_STS - [15:4] */
+#define ARIZONA_FX_STS_SHIFT 4 /* FX_STS - [15:4] */
+#define ARIZONA_FX_STS_WIDTH 12 /* FX_STS - [15:4] */
+
+/*
+ * R3600 (0xE10) - EQ1_1
+ */
+#define ARIZONA_EQ1_B1_GAIN_MASK 0xF800 /* EQ1_B1_GAIN - [15:11] */
+#define ARIZONA_EQ1_B1_GAIN_SHIFT 11 /* EQ1_B1_GAIN - [15:11] */
+#define ARIZONA_EQ1_B1_GAIN_WIDTH 5 /* EQ1_B1_GAIN - [15:11] */
+#define ARIZONA_EQ1_B2_GAIN_MASK 0x07C0 /* EQ1_B2_GAIN - [10:6] */
+#define ARIZONA_EQ1_B2_GAIN_SHIFT 6 /* EQ1_B2_GAIN - [10:6] */
+#define ARIZONA_EQ1_B2_GAIN_WIDTH 5 /* EQ1_B2_GAIN - [10:6] */
+#define ARIZONA_EQ1_B3_GAIN_MASK 0x003E /* EQ1_B3_GAIN - [5:1] */
+#define ARIZONA_EQ1_B3_GAIN_SHIFT 1 /* EQ1_B3_GAIN - [5:1] */
+#define ARIZONA_EQ1_B3_GAIN_WIDTH 5 /* EQ1_B3_GAIN - [5:1] */
+#define ARIZONA_EQ1_ENA 0x0001 /* EQ1_ENA */
+#define ARIZONA_EQ1_ENA_MASK 0x0001 /* EQ1_ENA */
+#define ARIZONA_EQ1_ENA_SHIFT 0 /* EQ1_ENA */
+#define ARIZONA_EQ1_ENA_WIDTH 1 /* EQ1_ENA */
+
+/*
+ * R3601 (0xE11) - EQ1_2
+ */
+#define ARIZONA_EQ1_B4_GAIN_MASK 0xF800 /* EQ1_B4_GAIN - [15:11] */
+#define ARIZONA_EQ1_B4_GAIN_SHIFT 11 /* EQ1_B4_GAIN - [15:11] */
+#define ARIZONA_EQ1_B4_GAIN_WIDTH 5 /* EQ1_B4_GAIN - [15:11] */
+#define ARIZONA_EQ1_B5_GAIN_MASK 0x07C0 /* EQ1_B5_GAIN - [10:6] */
+#define ARIZONA_EQ1_B5_GAIN_SHIFT 6 /* EQ1_B5_GAIN - [10:6] */
+#define ARIZONA_EQ1_B5_GAIN_WIDTH 5 /* EQ1_B5_GAIN - [10:6] */
+#define ARIZONA_EQ1_B1_MODE 0x0001 /* EQ1_B1_MODE */
+#define ARIZONA_EQ1_B1_MODE_MASK 0x0001 /* EQ1_B1_MODE */
+#define ARIZONA_EQ1_B1_MODE_SHIFT 0 /* EQ1_B1_MODE */
+#define ARIZONA_EQ1_B1_MODE_WIDTH 1 /* EQ1_B1_MODE */
+
+/*
+ * R3602 (0xE12) - EQ1_3
+ */
+#define ARIZONA_EQ1_B1_A_MASK 0xFFFF /* EQ1_B1_A - [15:0] */
+#define ARIZONA_EQ1_B1_A_SHIFT 0 /* EQ1_B1_A - [15:0] */
+#define ARIZONA_EQ1_B1_A_WIDTH 16 /* EQ1_B1_A - [15:0] */
+
+/*
+ * R3603 (0xE13) - EQ1_4
+ */
+#define ARIZONA_EQ1_B1_B_MASK 0xFFFF /* EQ1_B1_B - [15:0] */
+#define ARIZONA_EQ1_B1_B_SHIFT 0 /* EQ1_B1_B - [15:0] */
+#define ARIZONA_EQ1_B1_B_WIDTH 16 /* EQ1_B1_B - [15:0] */
+
+/*
+ * R3604 (0xE14) - EQ1_5
+ */
+#define ARIZONA_EQ1_B1_PG_MASK 0xFFFF /* EQ1_B1_PG - [15:0] */
+#define ARIZONA_EQ1_B1_PG_SHIFT 0 /* EQ1_B1_PG - [15:0] */
+#define ARIZONA_EQ1_B1_PG_WIDTH 16 /* EQ1_B1_PG - [15:0] */
+
+/*
+ * R3605 (0xE15) - EQ1_6
+ */
+#define ARIZONA_EQ1_B2_A_MASK 0xFFFF /* EQ1_B2_A - [15:0] */
+#define ARIZONA_EQ1_B2_A_SHIFT 0 /* EQ1_B2_A - [15:0] */
+#define ARIZONA_EQ1_B2_A_WIDTH 16 /* EQ1_B2_A - [15:0] */
+
+/*
+ * R3606 (0xE16) - EQ1_7
+ */
+#define ARIZONA_EQ1_B2_B_MASK 0xFFFF /* EQ1_B2_B - [15:0] */
+#define ARIZONA_EQ1_B2_B_SHIFT 0 /* EQ1_B2_B - [15:0] */
+#define ARIZONA_EQ1_B2_B_WIDTH 16 /* EQ1_B2_B - [15:0] */
+
+/*
+ * R3607 (0xE17) - EQ1_8
+ */
+#define ARIZONA_EQ1_B2_C_MASK 0xFFFF /* EQ1_B2_C - [15:0] */
+#define ARIZONA_EQ1_B2_C_SHIFT 0 /* EQ1_B2_C - [15:0] */
+#define ARIZONA_EQ1_B2_C_WIDTH 16 /* EQ1_B2_C - [15:0] */
+
+/*
+ * R3608 (0xE18) - EQ1_9
+ */
+#define ARIZONA_EQ1_B2_PG_MASK 0xFFFF /* EQ1_B2_PG - [15:0] */
+#define ARIZONA_EQ1_B2_PG_SHIFT 0 /* EQ1_B2_PG - [15:0] */
+#define ARIZONA_EQ1_B2_PG_WIDTH 16 /* EQ1_B2_PG - [15:0] */
+
+/*
+ * R3609 (0xE19) - EQ1_10
+ */
+#define ARIZONA_EQ1_B3_A_MASK 0xFFFF /* EQ1_B3_A - [15:0] */
+#define ARIZONA_EQ1_B3_A_SHIFT 0 /* EQ1_B3_A - [15:0] */
+#define ARIZONA_EQ1_B3_A_WIDTH 16 /* EQ1_B3_A - [15:0] */
+
+/*
+ * R3610 (0xE1A) - EQ1_11
+ */
+#define ARIZONA_EQ1_B3_B_MASK 0xFFFF /* EQ1_B3_B - [15:0] */
+#define ARIZONA_EQ1_B3_B_SHIFT 0 /* EQ1_B3_B - [15:0] */
+#define ARIZONA_EQ1_B3_B_WIDTH 16 /* EQ1_B3_B - [15:0] */
+
+/*
+ * R3611 (0xE1B) - EQ1_12
+ */
+#define ARIZONA_EQ1_B3_C_MASK 0xFFFF /* EQ1_B3_C - [15:0] */
+#define ARIZONA_EQ1_B3_C_SHIFT 0 /* EQ1_B3_C - [15:0] */
+#define ARIZONA_EQ1_B3_C_WIDTH 16 /* EQ1_B3_C - [15:0] */
+
+/*
+ * R3612 (0xE1C) - EQ1_13
+ */
+#define ARIZONA_EQ1_B3_PG_MASK 0xFFFF /* EQ1_B3_PG - [15:0] */
+#define ARIZONA_EQ1_B3_PG_SHIFT 0 /* EQ1_B3_PG - [15:0] */
+#define ARIZONA_EQ1_B3_PG_WIDTH 16 /* EQ1_B3_PG - [15:0] */
+
+/*
+ * R3613 (0xE1D) - EQ1_14
+ */
+#define ARIZONA_EQ1_B4_A_MASK 0xFFFF /* EQ1_B4_A - [15:0] */
+#define ARIZONA_EQ1_B4_A_SHIFT 0 /* EQ1_B4_A - [15:0] */
+#define ARIZONA_EQ1_B4_A_WIDTH 16 /* EQ1_B4_A - [15:0] */
+
+/*
+ * R3614 (0xE1E) - EQ1_15
+ */
+#define ARIZONA_EQ1_B4_B_MASK 0xFFFF /* EQ1_B4_B - [15:0] */
+#define ARIZONA_EQ1_B4_B_SHIFT 0 /* EQ1_B4_B - [15:0] */
+#define ARIZONA_EQ1_B4_B_WIDTH 16 /* EQ1_B4_B - [15:0] */
+
+/*
+ * R3615 (0xE1F) - EQ1_16
+ */
+#define ARIZONA_EQ1_B4_C_MASK 0xFFFF /* EQ1_B4_C - [15:0] */
+#define ARIZONA_EQ1_B4_C_SHIFT 0 /* EQ1_B4_C - [15:0] */
+#define ARIZONA_EQ1_B4_C_WIDTH 16 /* EQ1_B4_C - [15:0] */
+
+/*
+ * R3616 (0xE20) - EQ1_17
+ */
+#define ARIZONA_EQ1_B4_PG_MASK 0xFFFF /* EQ1_B4_PG - [15:0] */
+#define ARIZONA_EQ1_B4_PG_SHIFT 0 /* EQ1_B4_PG - [15:0] */
+#define ARIZONA_EQ1_B4_PG_WIDTH 16 /* EQ1_B4_PG - [15:0] */
+
+/*
+ * R3617 (0xE21) - EQ1_18
+ */
+#define ARIZONA_EQ1_B5_A_MASK 0xFFFF /* EQ1_B5_A - [15:0] */
+#define ARIZONA_EQ1_B5_A_SHIFT 0 /* EQ1_B5_A - [15:0] */
+#define ARIZONA_EQ1_B5_A_WIDTH 16 /* EQ1_B5_A - [15:0] */
+
+/*
+ * R3618 (0xE22) - EQ1_19
+ */
+#define ARIZONA_EQ1_B5_B_MASK 0xFFFF /* EQ1_B5_B - [15:0] */
+#define ARIZONA_EQ1_B5_B_SHIFT 0 /* EQ1_B5_B - [15:0] */
+#define ARIZONA_EQ1_B5_B_WIDTH 16 /* EQ1_B5_B - [15:0] */
+
+/*
+ * R3619 (0xE23) - EQ1_20
+ */
+#define ARIZONA_EQ1_B5_PG_MASK 0xFFFF /* EQ1_B5_PG - [15:0] */
+#define ARIZONA_EQ1_B5_PG_SHIFT 0 /* EQ1_B5_PG - [15:0] */
+#define ARIZONA_EQ1_B5_PG_WIDTH 16 /* EQ1_B5_PG - [15:0] */
+
+/*
+ * R3620 (0xE24) - EQ1_21
+ */
+#define ARIZONA_EQ1_B1_C_MASK 0xFFFF /* EQ1_B1_C - [15:0] */
+#define ARIZONA_EQ1_B1_C_SHIFT 0 /* EQ1_B1_C - [15:0] */
+#define ARIZONA_EQ1_B1_C_WIDTH 16 /* EQ1_B1_C - [15:0] */
+
+/*
+ * R3622 (0xE26) - EQ2_1
+ */
+#define ARIZONA_EQ2_B1_GAIN_MASK 0xF800 /* EQ2_B1_GAIN - [15:11] */
+#define ARIZONA_EQ2_B1_GAIN_SHIFT 11 /* EQ2_B1_GAIN - [15:11] */
+#define ARIZONA_EQ2_B1_GAIN_WIDTH 5 /* EQ2_B1_GAIN - [15:11] */
+#define ARIZONA_EQ2_B2_GAIN_MASK 0x07C0 /* EQ2_B2_GAIN - [10:6] */
+#define ARIZONA_EQ2_B2_GAIN_SHIFT 6 /* EQ2_B2_GAIN - [10:6] */
+#define ARIZONA_EQ2_B2_GAIN_WIDTH 5 /* EQ2_B2_GAIN - [10:6] */
+#define ARIZONA_EQ2_B3_GAIN_MASK 0x003E /* EQ2_B3_GAIN - [5:1] */
+#define ARIZONA_EQ2_B3_GAIN_SHIFT 1 /* EQ2_B3_GAIN - [5:1] */
+#define ARIZONA_EQ2_B3_GAIN_WIDTH 5 /* EQ2_B3_GAIN - [5:1] */
+#define ARIZONA_EQ2_ENA 0x0001 /* EQ2_ENA */
+#define ARIZONA_EQ2_ENA_MASK 0x0001 /* EQ2_ENA */
+#define ARIZONA_EQ2_ENA_SHIFT 0 /* EQ2_ENA */
+#define ARIZONA_EQ2_ENA_WIDTH 1 /* EQ2_ENA */
+
+/*
+ * R3623 (0xE27) - EQ2_2
+ */
+#define ARIZONA_EQ2_B4_GAIN_MASK 0xF800 /* EQ2_B4_GAIN - [15:11] */
+#define ARIZONA_EQ2_B4_GAIN_SHIFT 11 /* EQ2_B4_GAIN - [15:11] */
+#define ARIZONA_EQ2_B4_GAIN_WIDTH 5 /* EQ2_B4_GAIN - [15:11] */
+#define ARIZONA_EQ2_B5_GAIN_MASK 0x07C0 /* EQ2_B5_GAIN - [10:6] */
+#define ARIZONA_EQ2_B5_GAIN_SHIFT 6 /* EQ2_B5_GAIN - [10:6] */
+#define ARIZONA_EQ2_B5_GAIN_WIDTH 5 /* EQ2_B5_GAIN - [10:6] */
+#define ARIZONA_EQ2_B1_MODE 0x0001 /* EQ2_B1_MODE */
+#define ARIZONA_EQ2_B1_MODE_MASK 0x0001 /* EQ2_B1_MODE */
+#define ARIZONA_EQ2_B1_MODE_SHIFT 0 /* EQ2_B1_MODE */
+#define ARIZONA_EQ2_B1_MODE_WIDTH 1 /* EQ2_B1_MODE */
+
+/*
+ * R3624 (0xE28) - EQ2_3
+ */
+#define ARIZONA_EQ2_B1_A_MASK 0xFFFF /* EQ2_B1_A - [15:0] */
+#define ARIZONA_EQ2_B1_A_SHIFT 0 /* EQ2_B1_A - [15:0] */
+#define ARIZONA_EQ2_B1_A_WIDTH 16 /* EQ2_B1_A - [15:0] */
+
+/*
+ * R3625 (0xE29) - EQ2_4
+ */
+#define ARIZONA_EQ2_B1_B_MASK 0xFFFF /* EQ2_B1_B - [15:0] */
+#define ARIZONA_EQ2_B1_B_SHIFT 0 /* EQ2_B1_B - [15:0] */
+#define ARIZONA_EQ2_B1_B_WIDTH 16 /* EQ2_B1_B - [15:0] */
+
+/*
+ * R3626 (0xE2A) - EQ2_5
+ */
+#define ARIZONA_EQ2_B1_PG_MASK 0xFFFF /* EQ2_B1_PG - [15:0] */
+#define ARIZONA_EQ2_B1_PG_SHIFT 0 /* EQ2_B1_PG - [15:0] */
+#define ARIZONA_EQ2_B1_PG_WIDTH 16 /* EQ2_B1_PG - [15:0] */
+
+/*
+ * R3627 (0xE2B) - EQ2_6
+ */
+#define ARIZONA_EQ2_B2_A_MASK 0xFFFF /* EQ2_B2_A - [15:0] */
+#define ARIZONA_EQ2_B2_A_SHIFT 0 /* EQ2_B2_A - [15:0] */
+#define ARIZONA_EQ2_B2_A_WIDTH 16 /* EQ2_B2_A - [15:0] */
+
+/*
+ * R3628 (0xE2C) - EQ2_7
+ */
+#define ARIZONA_EQ2_B2_B_MASK 0xFFFF /* EQ2_B2_B - [15:0] */
+#define ARIZONA_EQ2_B2_B_SHIFT 0 /* EQ2_B2_B - [15:0] */
+#define ARIZONA_EQ2_B2_B_WIDTH 16 /* EQ2_B2_B - [15:0] */
+
+/*
+ * R3629 (0xE2D) - EQ2_8
+ */
+#define ARIZONA_EQ2_B2_C_MASK 0xFFFF /* EQ2_B2_C - [15:0] */
+#define ARIZONA_EQ2_B2_C_SHIFT 0 /* EQ2_B2_C - [15:0] */
+#define ARIZONA_EQ2_B2_C_WIDTH 16 /* EQ2_B2_C - [15:0] */
+
+/*
+ * R3630 (0xE2E) - EQ2_9
+ */
+#define ARIZONA_EQ2_B2_PG_MASK 0xFFFF /* EQ2_B2_PG - [15:0] */
+#define ARIZONA_EQ2_B2_PG_SHIFT 0 /* EQ2_B2_PG - [15:0] */
+#define ARIZONA_EQ2_B2_PG_WIDTH 16 /* EQ2_B2_PG - [15:0] */
+
+/*
+ * R3631 (0xE2F) - EQ2_10
+ */
+#define ARIZONA_EQ2_B3_A_MASK 0xFFFF /* EQ2_B3_A - [15:0] */
+#define ARIZONA_EQ2_B3_A_SHIFT 0 /* EQ2_B3_A - [15:0] */
+#define ARIZONA_EQ2_B3_A_WIDTH 16 /* EQ2_B3_A - [15:0] */
+
+/*
+ * R3632 (0xE30) - EQ2_11
+ */
+#define ARIZONA_EQ2_B3_B_MASK 0xFFFF /* EQ2_B3_B - [15:0] */
+#define ARIZONA_EQ2_B3_B_SHIFT 0 /* EQ2_B3_B - [15:0] */
+#define ARIZONA_EQ2_B3_B_WIDTH 16 /* EQ2_B3_B - [15:0] */
+
+/*
+ * R3633 (0xE31) - EQ2_12
+ */
+#define ARIZONA_EQ2_B3_C_MASK 0xFFFF /* EQ2_B3_C - [15:0] */
+#define ARIZONA_EQ2_B3_C_SHIFT 0 /* EQ2_B3_C - [15:0] */
+#define ARIZONA_EQ2_B3_C_WIDTH 16 /* EQ2_B3_C - [15:0] */
+
+/*
+ * R3634 (0xE32) - EQ2_13
+ */
+#define ARIZONA_EQ2_B3_PG_MASK 0xFFFF /* EQ2_B3_PG - [15:0] */
+#define ARIZONA_EQ2_B3_PG_SHIFT 0 /* EQ2_B3_PG - [15:0] */
+#define ARIZONA_EQ2_B3_PG_WIDTH 16 /* EQ2_B3_PG - [15:0] */
+
+/*
+ * R3635 (0xE33) - EQ2_14
+ */
+#define ARIZONA_EQ2_B4_A_MASK 0xFFFF /* EQ2_B4_A - [15:0] */
+#define ARIZONA_EQ2_B4_A_SHIFT 0 /* EQ2_B4_A - [15:0] */
+#define ARIZONA_EQ2_B4_A_WIDTH 16 /* EQ2_B4_A - [15:0] */
+
+/*
+ * R3636 (0xE34) - EQ2_15
+ */
+#define ARIZONA_EQ2_B4_B_MASK 0xFFFF /* EQ2_B4_B - [15:0] */
+#define ARIZONA_EQ2_B4_B_SHIFT 0 /* EQ2_B4_B - [15:0] */
+#define ARIZONA_EQ2_B4_B_WIDTH 16 /* EQ2_B4_B - [15:0] */
+
+/*
+ * R3637 (0xE35) - EQ2_16
+ */
+#define ARIZONA_EQ2_B4_C_MASK 0xFFFF /* EQ2_B4_C - [15:0] */
+#define ARIZONA_EQ2_B4_C_SHIFT 0 /* EQ2_B4_C - [15:0] */
+#define ARIZONA_EQ2_B4_C_WIDTH 16 /* EQ2_B4_C - [15:0] */
+
+/*
+ * R3638 (0xE36) - EQ2_17
+ */
+#define ARIZONA_EQ2_B4_PG_MASK 0xFFFF /* EQ2_B4_PG - [15:0] */
+#define ARIZONA_EQ2_B4_PG_SHIFT 0 /* EQ2_B4_PG - [15:0] */
+#define ARIZONA_EQ2_B4_PG_WIDTH 16 /* EQ2_B4_PG - [15:0] */
+
+/*
+ * R3639 (0xE37) - EQ2_18
+ */
+#define ARIZONA_EQ2_B5_A_MASK 0xFFFF /* EQ2_B5_A - [15:0] */
+#define ARIZONA_EQ2_B5_A_SHIFT 0 /* EQ2_B5_A - [15:0] */
+#define ARIZONA_EQ2_B5_A_WIDTH 16 /* EQ2_B5_A - [15:0] */
+
+/*
+ * R3640 (0xE38) - EQ2_19
+ */
+#define ARIZONA_EQ2_B5_B_MASK 0xFFFF /* EQ2_B5_B - [15:0] */
+#define ARIZONA_EQ2_B5_B_SHIFT 0 /* EQ2_B5_B - [15:0] */
+#define ARIZONA_EQ2_B5_B_WIDTH 16 /* EQ2_B5_B - [15:0] */
+
+/*
+ * R3641 (0xE39) - EQ2_20
+ */
+#define ARIZONA_EQ2_B5_PG_MASK 0xFFFF /* EQ2_B5_PG - [15:0] */
+#define ARIZONA_EQ2_B5_PG_SHIFT 0 /* EQ2_B5_PG - [15:0] */
+#define ARIZONA_EQ2_B5_PG_WIDTH 16 /* EQ2_B5_PG - [15:0] */
+
+/*
+ * R3642 (0xE3A) - EQ2_21
+ */
+#define ARIZONA_EQ2_B1_C_MASK 0xFFFF /* EQ2_B1_C - [15:0] */
+#define ARIZONA_EQ2_B1_C_SHIFT 0 /* EQ2_B1_C - [15:0] */
+#define ARIZONA_EQ2_B1_C_WIDTH 16 /* EQ2_B1_C - [15:0] */
+
+/*
+ * R3644 (0xE3C) - EQ3_1
+ */
+#define ARIZONA_EQ3_B1_GAIN_MASK 0xF800 /* EQ3_B1_GAIN - [15:11] */
+#define ARIZONA_EQ3_B1_GAIN_SHIFT 11 /* EQ3_B1_GAIN - [15:11] */
+#define ARIZONA_EQ3_B1_GAIN_WIDTH 5 /* EQ3_B1_GAIN - [15:11] */
+#define ARIZONA_EQ3_B2_GAIN_MASK 0x07C0 /* EQ3_B2_GAIN - [10:6] */
+#define ARIZONA_EQ3_B2_GAIN_SHIFT 6 /* EQ3_B2_GAIN - [10:6] */
+#define ARIZONA_EQ3_B2_GAIN_WIDTH 5 /* EQ3_B2_GAIN - [10:6] */
+#define ARIZONA_EQ3_B3_GAIN_MASK 0x003E /* EQ3_B3_GAIN - [5:1] */
+#define ARIZONA_EQ3_B3_GAIN_SHIFT 1 /* EQ3_B3_GAIN - [5:1] */
+#define ARIZONA_EQ3_B3_GAIN_WIDTH 5 /* EQ3_B3_GAIN - [5:1] */
+#define ARIZONA_EQ3_ENA 0x0001 /* EQ3_ENA */
+#define ARIZONA_EQ3_ENA_MASK 0x0001 /* EQ3_ENA */
+#define ARIZONA_EQ3_ENA_SHIFT 0 /* EQ3_ENA */
+#define ARIZONA_EQ3_ENA_WIDTH 1 /* EQ3_ENA */
+
+/*
+ * R3645 (0xE3D) - EQ3_2
+ */
+#define ARIZONA_EQ3_B4_GAIN_MASK 0xF800 /* EQ3_B4_GAIN - [15:11] */
+#define ARIZONA_EQ3_B4_GAIN_SHIFT 11 /* EQ3_B4_GAIN - [15:11] */
+#define ARIZONA_EQ3_B4_GAIN_WIDTH 5 /* EQ3_B4_GAIN - [15:11] */
+#define ARIZONA_EQ3_B5_GAIN_MASK 0x07C0 /* EQ3_B5_GAIN - [10:6] */
+#define ARIZONA_EQ3_B5_GAIN_SHIFT 6 /* EQ3_B5_GAIN - [10:6] */
+#define ARIZONA_EQ3_B5_GAIN_WIDTH 5 /* EQ3_B5_GAIN - [10:6] */
+#define ARIZONA_EQ3_B1_MODE 0x0001 /* EQ3_B1_MODE */
+#define ARIZONA_EQ3_B1_MODE_MASK 0x0001 /* EQ3_B1_MODE */
+#define ARIZONA_EQ3_B1_MODE_SHIFT 0 /* EQ3_B1_MODE */
+#define ARIZONA_EQ3_B1_MODE_WIDTH 1 /* EQ3_B1_MODE */
+
+/*
+ * R3646 (0xE3E) - EQ3_3
+ */
+#define ARIZONA_EQ3_B1_A_MASK 0xFFFF /* EQ3_B1_A - [15:0] */
+#define ARIZONA_EQ3_B1_A_SHIFT 0 /* EQ3_B1_A - [15:0] */
+#define ARIZONA_EQ3_B1_A_WIDTH 16 /* EQ3_B1_A - [15:0] */
+
+/*
+ * R3647 (0xE3F) - EQ3_4
+ */
+#define ARIZONA_EQ3_B1_B_MASK 0xFFFF /* EQ3_B1_B - [15:0] */
+#define ARIZONA_EQ3_B1_B_SHIFT 0 /* EQ3_B1_B - [15:0] */
+#define ARIZONA_EQ3_B1_B_WIDTH 16 /* EQ3_B1_B - [15:0] */
+
+/*
+ * R3648 (0xE40) - EQ3_5
+ */
+#define ARIZONA_EQ3_B1_PG_MASK 0xFFFF /* EQ3_B1_PG - [15:0] */
+#define ARIZONA_EQ3_B1_PG_SHIFT 0 /* EQ3_B1_PG - [15:0] */
+#define ARIZONA_EQ3_B1_PG_WIDTH 16 /* EQ3_B1_PG - [15:0] */
+
+/*
+ * R3649 (0xE41) - EQ3_6
+ */
+#define ARIZONA_EQ3_B2_A_MASK 0xFFFF /* EQ3_B2_A - [15:0] */
+#define ARIZONA_EQ3_B2_A_SHIFT 0 /* EQ3_B2_A - [15:0] */
+#define ARIZONA_EQ3_B2_A_WIDTH 16 /* EQ3_B2_A - [15:0] */
+
+/*
+ * R3650 (0xE42) - EQ3_7
+ */
+#define ARIZONA_EQ3_B2_B_MASK 0xFFFF /* EQ3_B2_B - [15:0] */
+#define ARIZONA_EQ3_B2_B_SHIFT 0 /* EQ3_B2_B - [15:0] */
+#define ARIZONA_EQ3_B2_B_WIDTH 16 /* EQ3_B2_B - [15:0] */
+
+/*
+ * R3651 (0xE43) - EQ3_8
+ */
+#define ARIZONA_EQ3_B2_C_MASK 0xFFFF /* EQ3_B2_C - [15:0] */
+#define ARIZONA_EQ3_B2_C_SHIFT 0 /* EQ3_B2_C - [15:0] */
+#define ARIZONA_EQ3_B2_C_WIDTH 16 /* EQ3_B2_C - [15:0] */
+
+/*
+ * R3652 (0xE44) - EQ3_9
+ */
+#define ARIZONA_EQ3_B2_PG_MASK 0xFFFF /* EQ3_B2_PG - [15:0] */
+#define ARIZONA_EQ3_B2_PG_SHIFT 0 /* EQ3_B2_PG - [15:0] */
+#define ARIZONA_EQ3_B2_PG_WIDTH 16 /* EQ3_B2_PG - [15:0] */
+
+/*
+ * R3653 (0xE45) - EQ3_10
+ */
+#define ARIZONA_EQ3_B3_A_MASK 0xFFFF /* EQ3_B3_A - [15:0] */
+#define ARIZONA_EQ3_B3_A_SHIFT 0 /* EQ3_B3_A - [15:0] */
+#define ARIZONA_EQ3_B3_A_WIDTH 16 /* EQ3_B3_A - [15:0] */
+
+/*
+ * R3654 (0xE46) - EQ3_11
+ */
+#define ARIZONA_EQ3_B3_B_MASK 0xFFFF /* EQ3_B3_B - [15:0] */
+#define ARIZONA_EQ3_B3_B_SHIFT 0 /* EQ3_B3_B - [15:0] */
+#define ARIZONA_EQ3_B3_B_WIDTH 16 /* EQ3_B3_B - [15:0] */
+
+/*
+ * R3655 (0xE47) - EQ3_12
+ */
+#define ARIZONA_EQ3_B3_C_MASK 0xFFFF /* EQ3_B3_C - [15:0] */
+#define ARIZONA_EQ3_B3_C_SHIFT 0 /* EQ3_B3_C - [15:0] */
+#define ARIZONA_EQ3_B3_C_WIDTH 16 /* EQ3_B3_C - [15:0] */
+
+/*
+ * R3656 (0xE48) - EQ3_13
+ */
+#define ARIZONA_EQ3_B3_PG_MASK 0xFFFF /* EQ3_B3_PG - [15:0] */
+#define ARIZONA_EQ3_B3_PG_SHIFT 0 /* EQ3_B3_PG - [15:0] */
+#define ARIZONA_EQ3_B3_PG_WIDTH 16 /* EQ3_B3_PG - [15:0] */
+
+/*
+ * R3657 (0xE49) - EQ3_14
+ */
+#define ARIZONA_EQ3_B4_A_MASK 0xFFFF /* EQ3_B4_A - [15:0] */
+#define ARIZONA_EQ3_B4_A_SHIFT 0 /* EQ3_B4_A - [15:0] */
+#define ARIZONA_EQ3_B4_A_WIDTH 16 /* EQ3_B4_A - [15:0] */
+
+/*
+ * R3658 (0xE4A) - EQ3_15
+ */
+#define ARIZONA_EQ3_B4_B_MASK 0xFFFF /* EQ3_B4_B - [15:0] */
+#define ARIZONA_EQ3_B4_B_SHIFT 0 /* EQ3_B4_B - [15:0] */
+#define ARIZONA_EQ3_B4_B_WIDTH 16 /* EQ3_B4_B - [15:0] */
+
+/*
+ * R3659 (0xE4B) - EQ3_16
+ */
+#define ARIZONA_EQ3_B4_C_MASK 0xFFFF /* EQ3_B4_C - [15:0] */
+#define ARIZONA_EQ3_B4_C_SHIFT 0 /* EQ3_B4_C - [15:0] */
+#define ARIZONA_EQ3_B4_C_WIDTH 16 /* EQ3_B4_C - [15:0] */
+
+/*
+ * R3660 (0xE4C) - EQ3_17
+ */
+#define ARIZONA_EQ3_B4_PG_MASK 0xFFFF /* EQ3_B4_PG - [15:0] */
+#define ARIZONA_EQ3_B4_PG_SHIFT 0 /* EQ3_B4_PG - [15:0] */
+#define ARIZONA_EQ3_B4_PG_WIDTH 16 /* EQ3_B4_PG - [15:0] */
+
+/*
+ * R3661 (0xE4D) - EQ3_18
+ */
+#define ARIZONA_EQ3_B5_A_MASK 0xFFFF /* EQ3_B5_A - [15:0] */
+#define ARIZONA_EQ3_B5_A_SHIFT 0 /* EQ3_B5_A - [15:0] */
+#define ARIZONA_EQ3_B5_A_WIDTH 16 /* EQ3_B5_A - [15:0] */
+
+/*
+ * R3662 (0xE4E) - EQ3_19
+ */
+#define ARIZONA_EQ3_B5_B_MASK 0xFFFF /* EQ3_B5_B - [15:0] */
+#define ARIZONA_EQ3_B5_B_SHIFT 0 /* EQ3_B5_B - [15:0] */
+#define ARIZONA_EQ3_B5_B_WIDTH 16 /* EQ3_B5_B - [15:0] */
+
+/*
+ * R3663 (0xE4F) - EQ3_20
+ */
+#define ARIZONA_EQ3_B5_PG_MASK 0xFFFF /* EQ3_B5_PG - [15:0] */
+#define ARIZONA_EQ3_B5_PG_SHIFT 0 /* EQ3_B5_PG - [15:0] */
+#define ARIZONA_EQ3_B5_PG_WIDTH 16 /* EQ3_B5_PG - [15:0] */
+
+/*
+ * R3664 (0xE50) - EQ3_21
+ */
+#define ARIZONA_EQ3_B1_C_MASK 0xFFFF /* EQ3_B1_C - [15:0] */
+#define ARIZONA_EQ3_B1_C_SHIFT 0 /* EQ3_B1_C - [15:0] */
+#define ARIZONA_EQ3_B1_C_WIDTH 16 /* EQ3_B1_C - [15:0] */
+
+/*
+ * R3666 (0xE52) - EQ4_1
+ */
+#define ARIZONA_EQ4_B1_GAIN_MASK 0xF800 /* EQ4_B1_GAIN - [15:11] */
+#define ARIZONA_EQ4_B1_GAIN_SHIFT 11 /* EQ4_B1_GAIN - [15:11] */
+#define ARIZONA_EQ4_B1_GAIN_WIDTH 5 /* EQ4_B1_GAIN - [15:11] */
+#define ARIZONA_EQ4_B2_GAIN_MASK 0x07C0 /* EQ4_B2_GAIN - [10:6] */
+#define ARIZONA_EQ4_B2_GAIN_SHIFT 6 /* EQ4_B2_GAIN - [10:6] */
+#define ARIZONA_EQ4_B2_GAIN_WIDTH 5 /* EQ4_B2_GAIN - [10:6] */
+#define ARIZONA_EQ4_B3_GAIN_MASK 0x003E /* EQ4_B3_GAIN - [5:1] */
+#define ARIZONA_EQ4_B3_GAIN_SHIFT 1 /* EQ4_B3_GAIN - [5:1] */
+#define ARIZONA_EQ4_B3_GAIN_WIDTH 5 /* EQ4_B3_GAIN - [5:1] */
+#define ARIZONA_EQ4_ENA 0x0001 /* EQ4_ENA */
+#define ARIZONA_EQ4_ENA_MASK 0x0001 /* EQ4_ENA */
+#define ARIZONA_EQ4_ENA_SHIFT 0 /* EQ4_ENA */
+#define ARIZONA_EQ4_ENA_WIDTH 1 /* EQ4_ENA */
+
+/*
+ * R3667 (0xE53) - EQ4_2
+ */
+#define ARIZONA_EQ4_B4_GAIN_MASK 0xF800 /* EQ4_B4_GAIN - [15:11] */
+#define ARIZONA_EQ4_B4_GAIN_SHIFT 11 /* EQ4_B4_GAIN - [15:11] */
+#define ARIZONA_EQ4_B4_GAIN_WIDTH 5 /* EQ4_B4_GAIN - [15:11] */
+#define ARIZONA_EQ4_B5_GAIN_MASK 0x07C0 /* EQ4_B5_GAIN - [10:6] */
+#define ARIZONA_EQ4_B5_GAIN_SHIFT 6 /* EQ4_B5_GAIN - [10:6] */
+#define ARIZONA_EQ4_B5_GAIN_WIDTH 5 /* EQ4_B5_GAIN - [10:6] */
+#define ARIZONA_EQ4_B1_MODE 0x0001 /* EQ4_B1_MODE */
+#define ARIZONA_EQ4_B1_MODE_MASK 0x0001 /* EQ4_B1_MODE */
+#define ARIZONA_EQ4_B1_MODE_SHIFT 0 /* EQ4_B1_MODE */
+#define ARIZONA_EQ4_B1_MODE_WIDTH 1 /* EQ4_B1_MODE */
+
+/*
+ * R3668 (0xE54) - EQ4_3
+ */
+#define ARIZONA_EQ4_B1_A_MASK 0xFFFF /* EQ4_B1_A - [15:0] */
+#define ARIZONA_EQ4_B1_A_SHIFT 0 /* EQ4_B1_A - [15:0] */
+#define ARIZONA_EQ4_B1_A_WIDTH 16 /* EQ4_B1_A - [15:0] */
+
+/*
+ * R3669 (0xE55) - EQ4_4
+ */
+#define ARIZONA_EQ4_B1_B_MASK 0xFFFF /* EQ4_B1_B - [15:0] */
+#define ARIZONA_EQ4_B1_B_SHIFT 0 /* EQ4_B1_B - [15:0] */
+#define ARIZONA_EQ4_B1_B_WIDTH 16 /* EQ4_B1_B - [15:0] */
+
+/*
+ * R3670 (0xE56) - EQ4_5
+ */
+#define ARIZONA_EQ4_B1_PG_MASK 0xFFFF /* EQ4_B1_PG - [15:0] */
+#define ARIZONA_EQ4_B1_PG_SHIFT 0 /* EQ4_B1_PG - [15:0] */
+#define ARIZONA_EQ4_B1_PG_WIDTH 16 /* EQ4_B1_PG - [15:0] */
+
+/*
+ * R3671 (0xE57) - EQ4_6
+ */
+#define ARIZONA_EQ4_B2_A_MASK 0xFFFF /* EQ4_B2_A - [15:0] */
+#define ARIZONA_EQ4_B2_A_SHIFT 0 /* EQ4_B2_A - [15:0] */
+#define ARIZONA_EQ4_B2_A_WIDTH 16 /* EQ4_B2_A - [15:0] */
+
+/*
+ * R3672 (0xE58) - EQ4_7
+ */
+#define ARIZONA_EQ4_B2_B_MASK 0xFFFF /* EQ4_B2_B - [15:0] */
+#define ARIZONA_EQ4_B2_B_SHIFT 0 /* EQ4_B2_B - [15:0] */
+#define ARIZONA_EQ4_B2_B_WIDTH 16 /* EQ4_B2_B - [15:0] */
+
+/*
+ * R3673 (0xE59) - EQ4_8
+ */
+#define ARIZONA_EQ4_B2_C_MASK 0xFFFF /* EQ4_B2_C - [15:0] */
+#define ARIZONA_EQ4_B2_C_SHIFT 0 /* EQ4_B2_C - [15:0] */
+#define ARIZONA_EQ4_B2_C_WIDTH 16 /* EQ4_B2_C - [15:0] */
+
+/*
+ * R3674 (0xE5A) - EQ4_9
+ */
+#define ARIZONA_EQ4_B2_PG_MASK 0xFFFF /* EQ4_B2_PG - [15:0] */
+#define ARIZONA_EQ4_B2_PG_SHIFT 0 /* EQ4_B2_PG - [15:0] */
+#define ARIZONA_EQ4_B2_PG_WIDTH 16 /* EQ4_B2_PG - [15:0] */
+
+/*
+ * R3675 (0xE5B) - EQ4_10
+ */
+#define ARIZONA_EQ4_B3_A_MASK 0xFFFF /* EQ4_B3_A - [15:0] */
+#define ARIZONA_EQ4_B3_A_SHIFT 0 /* EQ4_B3_A - [15:0] */
+#define ARIZONA_EQ4_B3_A_WIDTH 16 /* EQ4_B3_A - [15:0] */
+
+/*
+ * R3676 (0xE5C) - EQ4_11
+ */
+#define ARIZONA_EQ4_B3_B_MASK 0xFFFF /* EQ4_B3_B - [15:0] */
+#define ARIZONA_EQ4_B3_B_SHIFT 0 /* EQ4_B3_B - [15:0] */
+#define ARIZONA_EQ4_B3_B_WIDTH 16 /* EQ4_B3_B - [15:0] */
+
+/*
+ * R3677 (0xE5D) - EQ4_12
+ */
+#define ARIZONA_EQ4_B3_C_MASK 0xFFFF /* EQ4_B3_C - [15:0] */
+#define ARIZONA_EQ4_B3_C_SHIFT 0 /* EQ4_B3_C - [15:0] */
+#define ARIZONA_EQ4_B3_C_WIDTH 16 /* EQ4_B3_C - [15:0] */
+
+/*
+ * R3678 (0xE5E) - EQ4_13
+ */
+#define ARIZONA_EQ4_B3_PG_MASK 0xFFFF /* EQ4_B3_PG - [15:0] */
+#define ARIZONA_EQ4_B3_PG_SHIFT 0 /* EQ4_B3_PG - [15:0] */
+#define ARIZONA_EQ4_B3_PG_WIDTH 16 /* EQ4_B3_PG - [15:0] */
+
+/*
+ * R3679 (0xE5F) - EQ4_14
+ */
+#define ARIZONA_EQ4_B4_A_MASK 0xFFFF /* EQ4_B4_A - [15:0] */
+#define ARIZONA_EQ4_B4_A_SHIFT 0 /* EQ4_B4_A - [15:0] */
+#define ARIZONA_EQ4_B4_A_WIDTH 16 /* EQ4_B4_A - [15:0] */
+
+/*
+ * R3680 (0xE60) - EQ4_15
+ */
+#define ARIZONA_EQ4_B4_B_MASK 0xFFFF /* EQ4_B4_B - [15:0] */
+#define ARIZONA_EQ4_B4_B_SHIFT 0 /* EQ4_B4_B - [15:0] */
+#define ARIZONA_EQ4_B4_B_WIDTH 16 /* EQ4_B4_B - [15:0] */
+
+/*
+ * R3681 (0xE61) - EQ4_16
+ */
+#define ARIZONA_EQ4_B4_C_MASK 0xFFFF /* EQ4_B4_C - [15:0] */
+#define ARIZONA_EQ4_B4_C_SHIFT 0 /* EQ4_B4_C - [15:0] */
+#define ARIZONA_EQ4_B4_C_WIDTH 16 /* EQ4_B4_C - [15:0] */
+
+/*
+ * R3682 (0xE62) - EQ4_17
+ */
+#define ARIZONA_EQ4_B4_PG_MASK 0xFFFF /* EQ4_B4_PG - [15:0] */
+#define ARIZONA_EQ4_B4_PG_SHIFT 0 /* EQ4_B4_PG - [15:0] */
+#define ARIZONA_EQ4_B4_PG_WIDTH 16 /* EQ4_B4_PG - [15:0] */
+
+/*
+ * R3683 (0xE63) - EQ4_18
+ */
+#define ARIZONA_EQ4_B5_A_MASK 0xFFFF /* EQ4_B5_A - [15:0] */
+#define ARIZONA_EQ4_B5_A_SHIFT 0 /* EQ4_B5_A - [15:0] */
+#define ARIZONA_EQ4_B5_A_WIDTH 16 /* EQ4_B5_A - [15:0] */
+
+/*
+ * R3684 (0xE64) - EQ4_19
+ */
+#define ARIZONA_EQ4_B5_B_MASK 0xFFFF /* EQ4_B5_B - [15:0] */
+#define ARIZONA_EQ4_B5_B_SHIFT 0 /* EQ4_B5_B - [15:0] */
+#define ARIZONA_EQ4_B5_B_WIDTH 16 /* EQ4_B5_B - [15:0] */
+
+/*
+ * R3685 (0xE65) - EQ4_20
+ */
+#define ARIZONA_EQ4_B5_PG_MASK 0xFFFF /* EQ4_B5_PG - [15:0] */
+#define ARIZONA_EQ4_B5_PG_SHIFT 0 /* EQ4_B5_PG - [15:0] */
+#define ARIZONA_EQ4_B5_PG_WIDTH 16 /* EQ4_B5_PG - [15:0] */
+
+/*
+ * R3686 (0xE66) - EQ4_21
+ */
+#define ARIZONA_EQ4_B1_C_MASK 0xFFFF /* EQ4_B1_C - [15:0] */
+#define ARIZONA_EQ4_B1_C_SHIFT 0 /* EQ4_B1_C - [15:0] */
+#define ARIZONA_EQ4_B1_C_WIDTH 16 /* EQ4_B1_C - [15:0] */
+
+/*
+ * R3712 (0xE80) - DRC1 ctrl1
+ */
+#define ARIZONA_DRC1_SIG_DET_RMS_MASK 0xF800 /* DRC1_SIG_DET_RMS - [15:11] */
+#define ARIZONA_DRC1_SIG_DET_RMS_SHIFT 11 /* DRC1_SIG_DET_RMS - [15:11] */
+#define ARIZONA_DRC1_SIG_DET_RMS_WIDTH 5 /* DRC1_SIG_DET_RMS - [15:11] */
+#define ARIZONA_DRC1_SIG_DET_PK_MASK 0x0600 /* DRC1_SIG_DET_PK - [10:9] */
+#define ARIZONA_DRC1_SIG_DET_PK_SHIFT 9 /* DRC1_SIG_DET_PK - [10:9] */
+#define ARIZONA_DRC1_SIG_DET_PK_WIDTH 2 /* DRC1_SIG_DET_PK - [10:9] */
+#define ARIZONA_DRC1_NG_ENA 0x0100 /* DRC1_NG_ENA */
+#define ARIZONA_DRC1_NG_ENA_MASK 0x0100 /* DRC1_NG_ENA */
+#define ARIZONA_DRC1_NG_ENA_SHIFT 8 /* DRC1_NG_ENA */
+#define ARIZONA_DRC1_NG_ENA_WIDTH 1 /* DRC1_NG_ENA */
+#define ARIZONA_DRC1_SIG_DET_MODE 0x0080 /* DRC1_SIG_DET_MODE */
+#define ARIZONA_DRC1_SIG_DET_MODE_MASK 0x0080 /* DRC1_SIG_DET_MODE */
+#define ARIZONA_DRC1_SIG_DET_MODE_SHIFT 7 /* DRC1_SIG_DET_MODE */
+#define ARIZONA_DRC1_SIG_DET_MODE_WIDTH 1 /* DRC1_SIG_DET_MODE */
+#define ARIZONA_DRC1_SIG_DET 0x0040 /* DRC1_SIG_DET */
+#define ARIZONA_DRC1_SIG_DET_MASK 0x0040 /* DRC1_SIG_DET */
+#define ARIZONA_DRC1_SIG_DET_SHIFT 6 /* DRC1_SIG_DET */
+#define ARIZONA_DRC1_SIG_DET_WIDTH 1 /* DRC1_SIG_DET */
+#define ARIZONA_DRC1_KNEE2_OP_ENA 0x0020 /* DRC1_KNEE2_OP_ENA */
+#define ARIZONA_DRC1_KNEE2_OP_ENA_MASK 0x0020 /* DRC1_KNEE2_OP_ENA */
+#define ARIZONA_DRC1_KNEE2_OP_ENA_SHIFT 5 /* DRC1_KNEE2_OP_ENA */
+#define ARIZONA_DRC1_KNEE2_OP_ENA_WIDTH 1 /* DRC1_KNEE2_OP_ENA */
+#define ARIZONA_DRC1_QR 0x0010 /* DRC1_QR */
+#define ARIZONA_DRC1_QR_MASK 0x0010 /* DRC1_QR */
+#define ARIZONA_DRC1_QR_SHIFT 4 /* DRC1_QR */
+#define ARIZONA_DRC1_QR_WIDTH 1 /* DRC1_QR */
+#define ARIZONA_DRC1_ANTICLIP 0x0008 /* DRC1_ANTICLIP */
+#define ARIZONA_DRC1_ANTICLIP_MASK 0x0008 /* DRC1_ANTICLIP */
+#define ARIZONA_DRC1_ANTICLIP_SHIFT 3 /* DRC1_ANTICLIP */
+#define ARIZONA_DRC1_ANTICLIP_WIDTH 1 /* DRC1_ANTICLIP */
+#define ARIZONA_DRC1L_ENA 0x0002 /* DRC1L_ENA */
+#define ARIZONA_DRC1L_ENA_MASK 0x0002 /* DRC1L_ENA */
+#define ARIZONA_DRC1L_ENA_SHIFT 1 /* DRC1L_ENA */
+#define ARIZONA_DRC1L_ENA_WIDTH 1 /* DRC1L_ENA */
+#define ARIZONA_DRC1R_ENA 0x0001 /* DRC1R_ENA */
+#define ARIZONA_DRC1R_ENA_MASK 0x0001 /* DRC1R_ENA */
+#define ARIZONA_DRC1R_ENA_SHIFT 0 /* DRC1R_ENA */
+#define ARIZONA_DRC1R_ENA_WIDTH 1 /* DRC1R_ENA */
+
+/*
+ * R3713 (0xE81) - DRC1 ctrl2
+ */
+#define ARIZONA_DRC1_ATK_MASK 0x1E00 /* DRC1_ATK - [12:9] */
+#define ARIZONA_DRC1_ATK_SHIFT 9 /* DRC1_ATK - [12:9] */
+#define ARIZONA_DRC1_ATK_WIDTH 4 /* DRC1_ATK - [12:9] */
+#define ARIZONA_DRC1_DCY_MASK 0x01E0 /* DRC1_DCY - [8:5] */
+#define ARIZONA_DRC1_DCY_SHIFT 5 /* DRC1_DCY - [8:5] */
+#define ARIZONA_DRC1_DCY_WIDTH 4 /* DRC1_DCY - [8:5] */
+#define ARIZONA_DRC1_MINGAIN_MASK 0x001C /* DRC1_MINGAIN - [4:2] */
+#define ARIZONA_DRC1_MINGAIN_SHIFT 2 /* DRC1_MINGAIN - [4:2] */
+#define ARIZONA_DRC1_MINGAIN_WIDTH 3 /* DRC1_MINGAIN - [4:2] */
+#define ARIZONA_DRC1_MAXGAIN_MASK 0x0003 /* DRC1_MAXGAIN - [1:0] */
+#define ARIZONA_DRC1_MAXGAIN_SHIFT 0 /* DRC1_MAXGAIN - [1:0] */
+#define ARIZONA_DRC1_MAXGAIN_WIDTH 2 /* DRC1_MAXGAIN - [1:0] */
+
+/*
+ * R3714 (0xE82) - DRC1 ctrl3
+ */
+#define ARIZONA_DRC1_NG_MINGAIN_MASK 0xF000 /* DRC1_NG_MINGAIN - [15:12] */
+#define ARIZONA_DRC1_NG_MINGAIN_SHIFT 12 /* DRC1_NG_MINGAIN - [15:12] */
+#define ARIZONA_DRC1_NG_MINGAIN_WIDTH 4 /* DRC1_NG_MINGAIN - [15:12] */
+#define ARIZONA_DRC1_NG_EXP_MASK 0x0C00 /* DRC1_NG_EXP - [11:10] */
+#define ARIZONA_DRC1_NG_EXP_SHIFT 10 /* DRC1_NG_EXP - [11:10] */
+#define ARIZONA_DRC1_NG_EXP_WIDTH 2 /* DRC1_NG_EXP - [11:10] */
+#define ARIZONA_DRC1_QR_THR_MASK 0x0300 /* DRC1_QR_THR - [9:8] */
+#define ARIZONA_DRC1_QR_THR_SHIFT 8 /* DRC1_QR_THR - [9:8] */
+#define ARIZONA_DRC1_QR_THR_WIDTH 2 /* DRC1_QR_THR - [9:8] */
+#define ARIZONA_DRC1_QR_DCY_MASK 0x00C0 /* DRC1_QR_DCY - [7:6] */
+#define ARIZONA_DRC1_QR_DCY_SHIFT 6 /* DRC1_QR_DCY - [7:6] */
+#define ARIZONA_DRC1_QR_DCY_WIDTH 2 /* DRC1_QR_DCY - [7:6] */
+#define ARIZONA_DRC1_HI_COMP_MASK 0x0038 /* DRC1_HI_COMP - [5:3] */
+#define ARIZONA_DRC1_HI_COMP_SHIFT 3 /* DRC1_HI_COMP - [5:3] */
+#define ARIZONA_DRC1_HI_COMP_WIDTH 3 /* DRC1_HI_COMP - [5:3] */
+#define ARIZONA_DRC1_LO_COMP_MASK 0x0007 /* DRC1_LO_COMP - [2:0] */
+#define ARIZONA_DRC1_LO_COMP_SHIFT 0 /* DRC1_LO_COMP - [2:0] */
+#define ARIZONA_DRC1_LO_COMP_WIDTH 3 /* DRC1_LO_COMP - [2:0] */
+
+/*
+ * R3715 (0xE83) - DRC1 ctrl4
+ */
+#define ARIZONA_DRC1_KNEE_IP_MASK 0x07E0 /* DRC1_KNEE_IP - [10:5] */
+#define ARIZONA_DRC1_KNEE_IP_SHIFT 5 /* DRC1_KNEE_IP - [10:5] */
+#define ARIZONA_DRC1_KNEE_IP_WIDTH 6 /* DRC1_KNEE_IP - [10:5] */
+#define ARIZONA_DRC1_KNEE_OP_MASK 0x001F /* DRC1_KNEE_OP - [4:0] */
+#define ARIZONA_DRC1_KNEE_OP_SHIFT 0 /* DRC1_KNEE_OP - [4:0] */
+#define ARIZONA_DRC1_KNEE_OP_WIDTH 5 /* DRC1_KNEE_OP - [4:0] */
+
+/*
+ * R3716 (0xE84) - DRC1 ctrl5
+ */
+#define ARIZONA_DRC1_KNEE2_IP_MASK 0x03E0 /* DRC1_KNEE2_IP - [9:5] */
+#define ARIZONA_DRC1_KNEE2_IP_SHIFT 5 /* DRC1_KNEE2_IP - [9:5] */
+#define ARIZONA_DRC1_KNEE2_IP_WIDTH 5 /* DRC1_KNEE2_IP - [9:5] */
+#define ARIZONA_DRC1_KNEE2_OP_MASK 0x001F /* DRC1_KNEE2_OP - [4:0] */
+#define ARIZONA_DRC1_KNEE2_OP_SHIFT 0 /* DRC1_KNEE2_OP - [4:0] */
+#define ARIZONA_DRC1_KNEE2_OP_WIDTH 5 /* DRC1_KNEE2_OP - [4:0] */
+
+/*
+ * R3721 (0xE89) - DRC2 ctrl1
+ */
+#define ARIZONA_DRC2_SIG_DET_RMS_MASK 0xF800 /* DRC2_SIG_DET_RMS - [15:11] */
+#define ARIZONA_DRC2_SIG_DET_RMS_SHIFT 11 /* DRC2_SIG_DET_RMS - [15:11] */
+#define ARIZONA_DRC2_SIG_DET_RMS_WIDTH 5 /* DRC2_SIG_DET_RMS - [15:11] */
+#define ARIZONA_DRC2_SIG_DET_PK_MASK 0x0600 /* DRC2_SIG_DET_PK - [10:9] */
+#define ARIZONA_DRC2_SIG_DET_PK_SHIFT 9 /* DRC2_SIG_DET_PK - [10:9] */
+#define ARIZONA_DRC2_SIG_DET_PK_WIDTH 2 /* DRC2_SIG_DET_PK - [10:9] */
+#define ARIZONA_DRC2_NG_ENA 0x0100 /* DRC2_NG_ENA */
+#define ARIZONA_DRC2_NG_ENA_MASK 0x0100 /* DRC2_NG_ENA */
+#define ARIZONA_DRC2_NG_ENA_SHIFT 8 /* DRC2_NG_ENA */
+#define ARIZONA_DRC2_NG_ENA_WIDTH 1 /* DRC2_NG_ENA */
+#define ARIZONA_DRC2_SIG_DET_MODE 0x0080 /* DRC2_SIG_DET_MODE */
+#define ARIZONA_DRC2_SIG_DET_MODE_MASK 0x0080 /* DRC2_SIG_DET_MODE */
+#define ARIZONA_DRC2_SIG_DET_MODE_SHIFT 7 /* DRC2_SIG_DET_MODE */
+#define ARIZONA_DRC2_SIG_DET_MODE_WIDTH 1 /* DRC2_SIG_DET_MODE */
+#define ARIZONA_DRC2_SIG_DET 0x0040 /* DRC2_SIG_DET */
+#define ARIZONA_DRC2_SIG_DET_MASK 0x0040 /* DRC2_SIG_DET */
+#define ARIZONA_DRC2_SIG_DET_SHIFT 6 /* DRC2_SIG_DET */
+#define ARIZONA_DRC2_SIG_DET_WIDTH 1 /* DRC2_SIG_DET */
+#define ARIZONA_DRC2_KNEE2_OP_ENA 0x0020 /* DRC2_KNEE2_OP_ENA */
+#define ARIZONA_DRC2_KNEE2_OP_ENA_MASK 0x0020 /* DRC2_KNEE2_OP_ENA */
+#define ARIZONA_DRC2_KNEE2_OP_ENA_SHIFT 5 /* DRC2_KNEE2_OP_ENA */
+#define ARIZONA_DRC2_KNEE2_OP_ENA_WIDTH 1 /* DRC2_KNEE2_OP_ENA */
+#define ARIZONA_DRC2_QR 0x0010 /* DRC2_QR */
+#define ARIZONA_DRC2_QR_MASK 0x0010 /* DRC2_QR */
+#define ARIZONA_DRC2_QR_SHIFT 4 /* DRC2_QR */
+#define ARIZONA_DRC2_QR_WIDTH 1 /* DRC2_QR */
+#define ARIZONA_DRC2_ANTICLIP 0x0008 /* DRC2_ANTICLIP */
+#define ARIZONA_DRC2_ANTICLIP_MASK 0x0008 /* DRC2_ANTICLIP */
+#define ARIZONA_DRC2_ANTICLIP_SHIFT 3 /* DRC2_ANTICLIP */
+#define ARIZONA_DRC2_ANTICLIP_WIDTH 1 /* DRC2_ANTICLIP */
+#define ARIZONA_DRC2L_ENA 0x0002 /* DRC2L_ENA */
+#define ARIZONA_DRC2L_ENA_MASK 0x0002 /* DRC2L_ENA */
+#define ARIZONA_DRC2L_ENA_SHIFT 1 /* DRC2L_ENA */
+#define ARIZONA_DRC2L_ENA_WIDTH 1 /* DRC2L_ENA */
+#define ARIZONA_DRC2R_ENA 0x0001 /* DRC2R_ENA */
+#define ARIZONA_DRC2R_ENA_MASK 0x0001 /* DRC2R_ENA */
+#define ARIZONA_DRC2R_ENA_SHIFT 0 /* DRC2R_ENA */
+#define ARIZONA_DRC2R_ENA_WIDTH 1 /* DRC2R_ENA */
+
+/*
+ * R3722 (0xE8A) - DRC2 ctrl2
+ */
+#define ARIZONA_DRC2_ATK_MASK 0x1E00 /* DRC2_ATK - [12:9] */
+#define ARIZONA_DRC2_ATK_SHIFT 9 /* DRC2_ATK - [12:9] */
+#define ARIZONA_DRC2_ATK_WIDTH 4 /* DRC2_ATK - [12:9] */
+#define ARIZONA_DRC2_DCY_MASK 0x01E0 /* DRC2_DCY - [8:5] */
+#define ARIZONA_DRC2_DCY_SHIFT 5 /* DRC2_DCY - [8:5] */
+#define ARIZONA_DRC2_DCY_WIDTH 4 /* DRC2_DCY - [8:5] */
+#define ARIZONA_DRC2_MINGAIN_MASK 0x001C /* DRC2_MINGAIN - [4:2] */
+#define ARIZONA_DRC2_MINGAIN_SHIFT 2 /* DRC2_MINGAIN - [4:2] */
+#define ARIZONA_DRC2_MINGAIN_WIDTH 3 /* DRC2_MINGAIN - [4:2] */
+#define ARIZONA_DRC2_MAXGAIN_MASK 0x0003 /* DRC2_MAXGAIN - [1:0] */
+#define ARIZONA_DRC2_MAXGAIN_SHIFT 0 /* DRC2_MAXGAIN - [1:0] */
+#define ARIZONA_DRC2_MAXGAIN_WIDTH 2 /* DRC2_MAXGAIN - [1:0] */
+
+/*
+ * R3723 (0xE8B) - DRC2 ctrl3
+ */
+#define ARIZONA_DRC2_NG_MINGAIN_MASK 0xF000 /* DRC2_NG_MINGAIN - [15:12] */
+#define ARIZONA_DRC2_NG_MINGAIN_SHIFT 12 /* DRC2_NG_MINGAIN - [15:12] */
+#define ARIZONA_DRC2_NG_MINGAIN_WIDTH 4 /* DRC2_NG_MINGAIN - [15:12] */
+#define ARIZONA_DRC2_NG_EXP_MASK 0x0C00 /* DRC2_NG_EXP - [11:10] */
+#define ARIZONA_DRC2_NG_EXP_SHIFT 10 /* DRC2_NG_EXP - [11:10] */
+#define ARIZONA_DRC2_NG_EXP_WIDTH 2 /* DRC2_NG_EXP - [11:10] */
+#define ARIZONA_DRC2_QR_THR_MASK 0x0300 /* DRC2_QR_THR - [9:8] */
+#define ARIZONA_DRC2_QR_THR_SHIFT 8 /* DRC2_QR_THR - [9:8] */
+#define ARIZONA_DRC2_QR_THR_WIDTH 2 /* DRC2_QR_THR - [9:8] */
+#define ARIZONA_DRC2_QR_DCY_MASK 0x00C0 /* DRC2_QR_DCY - [7:6] */
+#define ARIZONA_DRC2_QR_DCY_SHIFT 6 /* DRC2_QR_DCY - [7:6] */
+#define ARIZONA_DRC2_QR_DCY_WIDTH 2 /* DRC2_QR_DCY - [7:6] */
+#define ARIZONA_DRC2_HI_COMP_MASK 0x0038 /* DRC2_HI_COMP - [5:3] */
+#define ARIZONA_DRC2_HI_COMP_SHIFT 3 /* DRC2_HI_COMP - [5:3] */
+#define ARIZONA_DRC2_HI_COMP_WIDTH 3 /* DRC2_HI_COMP - [5:3] */
+#define ARIZONA_DRC2_LO_COMP_MASK 0x0007 /* DRC2_LO_COMP - [2:0] */
+#define ARIZONA_DRC2_LO_COMP_SHIFT 0 /* DRC2_LO_COMP - [2:0] */
+#define ARIZONA_DRC2_LO_COMP_WIDTH 3 /* DRC2_LO_COMP - [2:0] */
+
+/*
+ * R3724 (0xE8C) - DRC2 ctrl4
+ */
+#define ARIZONA_DRC2_KNEE_IP_MASK 0x07E0 /* DRC2_KNEE_IP - [10:5] */
+#define ARIZONA_DRC2_KNEE_IP_SHIFT 5 /* DRC2_KNEE_IP - [10:5] */
+#define ARIZONA_DRC2_KNEE_IP_WIDTH 6 /* DRC2_KNEE_IP - [10:5] */
+#define ARIZONA_DRC2_KNEE_OP_MASK 0x001F /* DRC2_KNEE_OP - [4:0] */
+#define ARIZONA_DRC2_KNEE_OP_SHIFT 0 /* DRC2_KNEE_OP - [4:0] */
+#define ARIZONA_DRC2_KNEE_OP_WIDTH 5 /* DRC2_KNEE_OP - [4:0] */
+
+/*
+ * R3725 (0xE8D) - DRC2 ctrl5
+ */
+#define ARIZONA_DRC2_KNEE2_IP_MASK 0x03E0 /* DRC2_KNEE2_IP - [9:5] */
+#define ARIZONA_DRC2_KNEE2_IP_SHIFT 5 /* DRC2_KNEE2_IP - [9:5] */
+#define ARIZONA_DRC2_KNEE2_IP_WIDTH 5 /* DRC2_KNEE2_IP - [9:5] */
+#define ARIZONA_DRC2_KNEE2_OP_MASK 0x001F /* DRC2_KNEE2_OP - [4:0] */
+#define ARIZONA_DRC2_KNEE2_OP_SHIFT 0 /* DRC2_KNEE2_OP - [4:0] */
+#define ARIZONA_DRC2_KNEE2_OP_WIDTH 5 /* DRC2_KNEE2_OP - [4:0] */
+
+/*
+ * R3776 (0xEC0) - HPLPF1_1
+ */
+#define ARIZONA_LHPF1_MODE 0x0002 /* LHPF1_MODE */
+#define ARIZONA_LHPF1_MODE_MASK 0x0002 /* LHPF1_MODE */
+#define ARIZONA_LHPF1_MODE_SHIFT 1 /* LHPF1_MODE */
+#define ARIZONA_LHPF1_MODE_WIDTH 1 /* LHPF1_MODE */
+#define ARIZONA_LHPF1_ENA 0x0001 /* LHPF1_ENA */
+#define ARIZONA_LHPF1_ENA_MASK 0x0001 /* LHPF1_ENA */
+#define ARIZONA_LHPF1_ENA_SHIFT 0 /* LHPF1_ENA */
+#define ARIZONA_LHPF1_ENA_WIDTH 1 /* LHPF1_ENA */
+
+/*
+ * R3777 (0xEC1) - HPLPF1_2
+ */
+#define ARIZONA_LHPF1_COEFF_MASK 0xFFFF /* LHPF1_COEFF - [15:0] */
+#define ARIZONA_LHPF1_COEFF_SHIFT 0 /* LHPF1_COEFF - [15:0] */
+#define ARIZONA_LHPF1_COEFF_WIDTH 16 /* LHPF1_COEFF - [15:0] */
+
+/*
+ * R3780 (0xEC4) - HPLPF2_1
+ */
+#define ARIZONA_LHPF2_MODE 0x0002 /* LHPF2_MODE */
+#define ARIZONA_LHPF2_MODE_MASK 0x0002 /* LHPF2_MODE */
+#define ARIZONA_LHPF2_MODE_SHIFT 1 /* LHPF2_MODE */
+#define ARIZONA_LHPF2_MODE_WIDTH 1 /* LHPF2_MODE */
+#define ARIZONA_LHPF2_ENA 0x0001 /* LHPF2_ENA */
+#define ARIZONA_LHPF2_ENA_MASK 0x0001 /* LHPF2_ENA */
+#define ARIZONA_LHPF2_ENA_SHIFT 0 /* LHPF2_ENA */
+#define ARIZONA_LHPF2_ENA_WIDTH 1 /* LHPF2_ENA */
+
+/*
+ * R3781 (0xEC5) - HPLPF2_2
+ */
+#define ARIZONA_LHPF2_COEFF_MASK 0xFFFF /* LHPF2_COEFF - [15:0] */
+#define ARIZONA_LHPF2_COEFF_SHIFT 0 /* LHPF2_COEFF - [15:0] */
+#define ARIZONA_LHPF2_COEFF_WIDTH 16 /* LHPF2_COEFF - [15:0] */
+
+/*
+ * R3784 (0xEC8) - HPLPF3_1
+ */
+#define ARIZONA_LHPF3_MODE 0x0002 /* LHPF3_MODE */
+#define ARIZONA_LHPF3_MODE_MASK 0x0002 /* LHPF3_MODE */
+#define ARIZONA_LHPF3_MODE_SHIFT 1 /* LHPF3_MODE */
+#define ARIZONA_LHPF3_MODE_WIDTH 1 /* LHPF3_MODE */
+#define ARIZONA_LHPF3_ENA 0x0001 /* LHPF3_ENA */
+#define ARIZONA_LHPF3_ENA_MASK 0x0001 /* LHPF3_ENA */
+#define ARIZONA_LHPF3_ENA_SHIFT 0 /* LHPF3_ENA */
+#define ARIZONA_LHPF3_ENA_WIDTH 1 /* LHPF3_ENA */
+
+/*
+ * R3785 (0xEC9) - HPLPF3_2
+ */
+#define ARIZONA_LHPF3_COEFF_MASK 0xFFFF /* LHPF3_COEFF - [15:0] */
+#define ARIZONA_LHPF3_COEFF_SHIFT 0 /* LHPF3_COEFF - [15:0] */
+#define ARIZONA_LHPF3_COEFF_WIDTH 16 /* LHPF3_COEFF - [15:0] */
+
+/*
+ * R3788 (0xECC) - HPLPF4_1
+ */
+#define ARIZONA_LHPF4_MODE 0x0002 /* LHPF4_MODE */
+#define ARIZONA_LHPF4_MODE_MASK 0x0002 /* LHPF4_MODE */
+#define ARIZONA_LHPF4_MODE_SHIFT 1 /* LHPF4_MODE */
+#define ARIZONA_LHPF4_MODE_WIDTH 1 /* LHPF4_MODE */
+#define ARIZONA_LHPF4_ENA 0x0001 /* LHPF4_ENA */
+#define ARIZONA_LHPF4_ENA_MASK 0x0001 /* LHPF4_ENA */
+#define ARIZONA_LHPF4_ENA_SHIFT 0 /* LHPF4_ENA */
+#define ARIZONA_LHPF4_ENA_WIDTH 1 /* LHPF4_ENA */
+
+/*
+ * R3789 (0xECD) - HPLPF4_2
+ */
+#define ARIZONA_LHPF4_COEFF_MASK 0xFFFF /* LHPF4_COEFF - [15:0] */
+#define ARIZONA_LHPF4_COEFF_SHIFT 0 /* LHPF4_COEFF - [15:0] */
+#define ARIZONA_LHPF4_COEFF_WIDTH 16 /* LHPF4_COEFF - [15:0] */
+
+/*
+ * R3808 (0xEE0) - ASRC_ENABLE
+ */
+#define ARIZONA_ASRC2L_ENA 0x0008 /* ASRC2L_ENA */
+#define ARIZONA_ASRC2L_ENA_MASK 0x0008 /* ASRC2L_ENA */
+#define ARIZONA_ASRC2L_ENA_SHIFT 3 /* ASRC2L_ENA */
+#define ARIZONA_ASRC2L_ENA_WIDTH 1 /* ASRC2L_ENA */
+#define ARIZONA_ASRC2R_ENA 0x0004 /* ASRC2R_ENA */
+#define ARIZONA_ASRC2R_ENA_MASK 0x0004 /* ASRC2R_ENA */
+#define ARIZONA_ASRC2R_ENA_SHIFT 2 /* ASRC2R_ENA */
+#define ARIZONA_ASRC2R_ENA_WIDTH 1 /* ASRC2R_ENA */
+#define ARIZONA_ASRC1L_ENA 0x0002 /* ASRC1L_ENA */
+#define ARIZONA_ASRC1L_ENA_MASK 0x0002 /* ASRC1L_ENA */
+#define ARIZONA_ASRC1L_ENA_SHIFT 1 /* ASRC1L_ENA */
+#define ARIZONA_ASRC1L_ENA_WIDTH 1 /* ASRC1L_ENA */
+#define ARIZONA_ASRC1R_ENA 0x0001 /* ASRC1R_ENA */
+#define ARIZONA_ASRC1R_ENA_MASK 0x0001 /* ASRC1R_ENA */
+#define ARIZONA_ASRC1R_ENA_SHIFT 0 /* ASRC1R_ENA */
+#define ARIZONA_ASRC1R_ENA_WIDTH 1 /* ASRC1R_ENA */
+
+/*
+ * R3810 (0xEE2) - ASRC_RATE1
+ */
+#define ARIZONA_ASRC_RATE1_MASK 0x7800 /* ASRC_RATE1 - [14:11] */
+#define ARIZONA_ASRC_RATE1_SHIFT 11 /* ASRC_RATE1 - [14:11] */
+#define ARIZONA_ASRC_RATE1_WIDTH 4 /* ASRC_RATE1 - [14:11] */
+
+/*
+ * R3811 (0xEE3) - ASRC_RATE2
+ */
+#define ARIZONA_ASRC_RATE2_MASK 0x7800 /* ASRC_RATE2 - [14:11] */
+#define ARIZONA_ASRC_RATE2_SHIFT 11 /* ASRC_RATE2 - [14:11] */
+#define ARIZONA_ASRC_RATE2_WIDTH 4 /* ASRC_RATE2 - [14:11] */
+
+/*
+ * R3824 (0xEF0) - ISRC 1 CTRL 1
+ */
+#define ARIZONA_ISRC1_FSH_MASK 0x7800 /* ISRC1_FSH - [14:11] */
+#define ARIZONA_ISRC1_FSH_SHIFT 11 /* ISRC1_FSH - [14:11] */
+#define ARIZONA_ISRC1_FSH_WIDTH 4 /* ISRC1_FSH - [14:11] */
+#define ARIZONA_ISRC1_CLK_SEL_MASK 0x0700 /* ISRC1_CLK_SEL - [10:8] */
+#define ARIZONA_ISRC1_CLK_SEL_SHIFT 8 /* ISRC1_CLK_SEL - [10:8] */
+#define ARIZONA_ISRC1_CLK_SEL_WIDTH 3 /* ISRC1_CLK_SEL - [10:8] */
+
+/*
+ * R3825 (0xEF1) - ISRC 1 CTRL 2
+ */
+#define ARIZONA_ISRC1_FSL_MASK 0x7800 /* ISRC1_FSL - [14:11] */
+#define ARIZONA_ISRC1_FSL_SHIFT 11 /* ISRC1_FSL - [14:11] */
+#define ARIZONA_ISRC1_FSL_WIDTH 4 /* ISRC1_FSL - [14:11] */
+
+/*
+ * R3826 (0xEF2) - ISRC 1 CTRL 3
+ */
+#define ARIZONA_ISRC1_INT0_ENA 0x8000 /* ISRC1_INT0_ENA */
+#define ARIZONA_ISRC1_INT0_ENA_MASK 0x8000 /* ISRC1_INT0_ENA */
+#define ARIZONA_ISRC1_INT0_ENA_SHIFT 15 /* ISRC1_INT0_ENA */
+#define ARIZONA_ISRC1_INT0_ENA_WIDTH 1 /* ISRC1_INT0_ENA */
+#define ARIZONA_ISRC1_INT1_ENA 0x4000 /* ISRC1_INT1_ENA */
+#define ARIZONA_ISRC1_INT1_ENA_MASK 0x4000 /* ISRC1_INT1_ENA */
+#define ARIZONA_ISRC1_INT1_ENA_SHIFT 14 /* ISRC1_INT1_ENA */
+#define ARIZONA_ISRC1_INT1_ENA_WIDTH 1 /* ISRC1_INT1_ENA */
+#define ARIZONA_ISRC1_INT2_ENA 0x2000 /* ISRC1_INT2_ENA */
+#define ARIZONA_ISRC1_INT2_ENA_MASK 0x2000 /* ISRC1_INT2_ENA */
+#define ARIZONA_ISRC1_INT2_ENA_SHIFT 13 /* ISRC1_INT2_ENA */
+#define ARIZONA_ISRC1_INT2_ENA_WIDTH 1 /* ISRC1_INT2_ENA */
+#define ARIZONA_ISRC1_INT3_ENA 0x1000 /* ISRC1_INT3_ENA */
+#define ARIZONA_ISRC1_INT3_ENA_MASK 0x1000 /* ISRC1_INT3_ENA */
+#define ARIZONA_ISRC1_INT3_ENA_SHIFT 12 /* ISRC1_INT3_ENA */
+#define ARIZONA_ISRC1_INT3_ENA_WIDTH 1 /* ISRC1_INT3_ENA */
+#define ARIZONA_ISRC1_DEC0_ENA 0x0200 /* ISRC1_DEC0_ENA */
+#define ARIZONA_ISRC1_DEC0_ENA_MASK 0x0200 /* ISRC1_DEC0_ENA */
+#define ARIZONA_ISRC1_DEC0_ENA_SHIFT 9 /* ISRC1_DEC0_ENA */
+#define ARIZONA_ISRC1_DEC0_ENA_WIDTH 1 /* ISRC1_DEC0_ENA */
+#define ARIZONA_ISRC1_DEC1_ENA 0x0100 /* ISRC1_DEC1_ENA */
+#define ARIZONA_ISRC1_DEC1_ENA_MASK 0x0100 /* ISRC1_DEC1_ENA */
+#define ARIZONA_ISRC1_DEC1_ENA_SHIFT 8 /* ISRC1_DEC1_ENA */
+#define ARIZONA_ISRC1_DEC1_ENA_WIDTH 1 /* ISRC1_DEC1_ENA */
+#define ARIZONA_ISRC1_DEC2_ENA 0x0080 /* ISRC1_DEC2_ENA */
+#define ARIZONA_ISRC1_DEC2_ENA_MASK 0x0080 /* ISRC1_DEC2_ENA */
+#define ARIZONA_ISRC1_DEC2_ENA_SHIFT 7 /* ISRC1_DEC2_ENA */
+#define ARIZONA_ISRC1_DEC2_ENA_WIDTH 1 /* ISRC1_DEC2_ENA */
+#define ARIZONA_ISRC1_DEC3_ENA 0x0040 /* ISRC1_DEC3_ENA */
+#define ARIZONA_ISRC1_DEC3_ENA_MASK 0x0040 /* ISRC1_DEC3_ENA */
+#define ARIZONA_ISRC1_DEC3_ENA_SHIFT 6 /* ISRC1_DEC3_ENA */
+#define ARIZONA_ISRC1_DEC3_ENA_WIDTH 1 /* ISRC1_DEC3_ENA */
+#define ARIZONA_ISRC1_NOTCH_ENA 0x0001 /* ISRC1_NOTCH_ENA */
+#define ARIZONA_ISRC1_NOTCH_ENA_MASK 0x0001 /* ISRC1_NOTCH_ENA */
+#define ARIZONA_ISRC1_NOTCH_ENA_SHIFT 0 /* ISRC1_NOTCH_ENA */
+#define ARIZONA_ISRC1_NOTCH_ENA_WIDTH 1 /* ISRC1_NOTCH_ENA */
+
+/*
+ * R3827 (0xEF3) - ISRC 2 CTRL 1
+ */
+#define ARIZONA_ISRC2_FSH_MASK 0x7800 /* ISRC2_FSH - [14:11] */
+#define ARIZONA_ISRC2_FSH_SHIFT 11 /* ISRC2_FSH - [14:11] */
+#define ARIZONA_ISRC2_FSH_WIDTH 4 /* ISRC2_FSH - [14:11] */
+#define ARIZONA_ISRC2_CLK_SEL_MASK 0x0700 /* ISRC2_CLK_SEL - [10:8] */
+#define ARIZONA_ISRC2_CLK_SEL_SHIFT 8 /* ISRC2_CLK_SEL - [10:8] */
+#define ARIZONA_ISRC2_CLK_SEL_WIDTH 3 /* ISRC2_CLK_SEL - [10:8] */
+
+/*
+ * R3828 (0xEF4) - ISRC 2 CTRL 2
+ */
+#define ARIZONA_ISRC2_FSL_MASK 0x7800 /* ISRC2_FSL - [14:11] */
+#define ARIZONA_ISRC2_FSL_SHIFT 11 /* ISRC2_FSL - [14:11] */
+#define ARIZONA_ISRC2_FSL_WIDTH 4 /* ISRC2_FSL - [14:11] */
+
+/*
+ * R3829 (0xEF5) - ISRC 2 CTRL 3
+ */
+#define ARIZONA_ISRC2_INT0_ENA 0x8000 /* ISRC2_INT0_ENA */
+#define ARIZONA_ISRC2_INT0_ENA_MASK 0x8000 /* ISRC2_INT0_ENA */
+#define ARIZONA_ISRC2_INT0_ENA_SHIFT 15 /* ISRC2_INT0_ENA */
+#define ARIZONA_ISRC2_INT0_ENA_WIDTH 1 /* ISRC2_INT0_ENA */
+#define ARIZONA_ISRC2_INT1_ENA 0x4000 /* ISRC2_INT1_ENA */
+#define ARIZONA_ISRC2_INT1_ENA_MASK 0x4000 /* ISRC2_INT1_ENA */
+#define ARIZONA_ISRC2_INT1_ENA_SHIFT 14 /* ISRC2_INT1_ENA */
+#define ARIZONA_ISRC2_INT1_ENA_WIDTH 1 /* ISRC2_INT1_ENA */
+#define ARIZONA_ISRC2_INT2_ENA 0x2000 /* ISRC2_INT2_ENA */
+#define ARIZONA_ISRC2_INT2_ENA_MASK 0x2000 /* ISRC2_INT2_ENA */
+#define ARIZONA_ISRC2_INT2_ENA_SHIFT 13 /* ISRC2_INT2_ENA */
+#define ARIZONA_ISRC2_INT2_ENA_WIDTH 1 /* ISRC2_INT2_ENA */
+#define ARIZONA_ISRC2_INT3_ENA 0x1000 /* ISRC2_INT3_ENA */
+#define ARIZONA_ISRC2_INT3_ENA_MASK 0x1000 /* ISRC2_INT3_ENA */
+#define ARIZONA_ISRC2_INT3_ENA_SHIFT 12 /* ISRC2_INT3_ENA */
+#define ARIZONA_ISRC2_INT3_ENA_WIDTH 1 /* ISRC2_INT3_ENA */
+#define ARIZONA_ISRC2_DEC0_ENA 0x0200 /* ISRC2_DEC0_ENA */
+#define ARIZONA_ISRC2_DEC0_ENA_MASK 0x0200 /* ISRC2_DEC0_ENA */
+#define ARIZONA_ISRC2_DEC0_ENA_SHIFT 9 /* ISRC2_DEC0_ENA */
+#define ARIZONA_ISRC2_DEC0_ENA_WIDTH 1 /* ISRC2_DEC0_ENA */
+#define ARIZONA_ISRC2_DEC1_ENA 0x0100 /* ISRC2_DEC1_ENA */
+#define ARIZONA_ISRC2_DEC1_ENA_MASK 0x0100 /* ISRC2_DEC1_ENA */
+#define ARIZONA_ISRC2_DEC1_ENA_SHIFT 8 /* ISRC2_DEC1_ENA */
+#define ARIZONA_ISRC2_DEC1_ENA_WIDTH 1 /* ISRC2_DEC1_ENA */
+#define ARIZONA_ISRC2_DEC2_ENA 0x0080 /* ISRC2_DEC2_ENA */
+#define ARIZONA_ISRC2_DEC2_ENA_MASK 0x0080 /* ISRC2_DEC2_ENA */
+#define ARIZONA_ISRC2_DEC2_ENA_SHIFT 7 /* ISRC2_DEC2_ENA */
+#define ARIZONA_ISRC2_DEC2_ENA_WIDTH 1 /* ISRC2_DEC2_ENA */
+#define ARIZONA_ISRC2_DEC3_ENA 0x0040 /* ISRC2_DEC3_ENA */
+#define ARIZONA_ISRC2_DEC3_ENA_MASK 0x0040 /* ISRC2_DEC3_ENA */
+#define ARIZONA_ISRC2_DEC3_ENA_SHIFT 6 /* ISRC2_DEC3_ENA */
+#define ARIZONA_ISRC2_DEC3_ENA_WIDTH 1 /* ISRC2_DEC3_ENA */
+#define ARIZONA_ISRC2_NOTCH_ENA 0x0001 /* ISRC2_NOTCH_ENA */
+#define ARIZONA_ISRC2_NOTCH_ENA_MASK 0x0001 /* ISRC2_NOTCH_ENA */
+#define ARIZONA_ISRC2_NOTCH_ENA_SHIFT 0 /* ISRC2_NOTCH_ENA */
+#define ARIZONA_ISRC2_NOTCH_ENA_WIDTH 1 /* ISRC2_NOTCH_ENA */
+
+/*
+ * R3830 (0xEF6) - ISRC 3 CTRL 1
+ */
+#define ARIZONA_ISRC3_FSH_MASK 0x7800 /* ISRC3_FSH - [14:11] */
+#define ARIZONA_ISRC3_FSH_SHIFT 11 /* ISRC3_FSH - [14:11] */
+#define ARIZONA_ISRC3_FSH_WIDTH 4 /* ISRC3_FSH - [14:11] */
+#define ARIZONA_ISRC3_CLK_SEL_MASK 0x0700 /* ISRC3_CLK_SEL - [10:8] */
+#define ARIZONA_ISRC3_CLK_SEL_SHIFT 8 /* ISRC3_CLK_SEL - [10:8] */
+#define ARIZONA_ISRC3_CLK_SEL_WIDTH 3 /* ISRC3_CLK_SEL - [10:8] */
+
+/*
+ * R3831 (0xEF7) - ISRC 3 CTRL 2
+ */
+#define ARIZONA_ISRC3_FSL_MASK 0x7800 /* ISRC3_FSL - [14:11] */
+#define ARIZONA_ISRC3_FSL_SHIFT 11 /* ISRC3_FSL - [14:11] */
+#define ARIZONA_ISRC3_FSL_WIDTH 4 /* ISRC3_FSL - [14:11] */
+
+/*
+ * R3832 (0xEF8) - ISRC 3 CTRL 3
+ */
+#define ARIZONA_ISRC3_INT0_ENA 0x8000 /* ISRC3_INT0_ENA */
+#define ARIZONA_ISRC3_INT0_ENA_MASK 0x8000 /* ISRC3_INT0_ENA */
+#define ARIZONA_ISRC3_INT0_ENA_SHIFT 15 /* ISRC3_INT0_ENA */
+#define ARIZONA_ISRC3_INT0_ENA_WIDTH 1 /* ISRC3_INT0_ENA */
+#define ARIZONA_ISRC3_INT1_ENA 0x4000 /* ISRC3_INT1_ENA */
+#define ARIZONA_ISRC3_INT1_ENA_MASK 0x4000 /* ISRC3_INT1_ENA */
+#define ARIZONA_ISRC3_INT1_ENA_SHIFT 14 /* ISRC3_INT1_ENA */
+#define ARIZONA_ISRC3_INT1_ENA_WIDTH 1 /* ISRC3_INT1_ENA */
+#define ARIZONA_ISRC3_INT2_ENA 0x2000 /* ISRC3_INT2_ENA */
+#define ARIZONA_ISRC3_INT2_ENA_MASK 0x2000 /* ISRC3_INT2_ENA */
+#define ARIZONA_ISRC3_INT2_ENA_SHIFT 13 /* ISRC3_INT2_ENA */
+#define ARIZONA_ISRC3_INT2_ENA_WIDTH 1 /* ISRC3_INT2_ENA */
+#define ARIZONA_ISRC3_INT3_ENA 0x1000 /* ISRC3_INT3_ENA */
+#define ARIZONA_ISRC3_INT3_ENA_MASK 0x1000 /* ISRC3_INT3_ENA */
+#define ARIZONA_ISRC3_INT3_ENA_SHIFT 12 /* ISRC3_INT3_ENA */
+#define ARIZONA_ISRC3_INT3_ENA_WIDTH 1 /* ISRC3_INT3_ENA */
+#define ARIZONA_ISRC3_DEC0_ENA 0x0200 /* ISRC3_DEC0_ENA */
+#define ARIZONA_ISRC3_DEC0_ENA_MASK 0x0200 /* ISRC3_DEC0_ENA */
+#define ARIZONA_ISRC3_DEC0_ENA_SHIFT 9 /* ISRC3_DEC0_ENA */
+#define ARIZONA_ISRC3_DEC0_ENA_WIDTH 1 /* ISRC3_DEC0_ENA */
+#define ARIZONA_ISRC3_DEC1_ENA 0x0100 /* ISRC3_DEC1_ENA */
+#define ARIZONA_ISRC3_DEC1_ENA_MASK 0x0100 /* ISRC3_DEC1_ENA */
+#define ARIZONA_ISRC3_DEC1_ENA_SHIFT 8 /* ISRC3_DEC1_ENA */
+#define ARIZONA_ISRC3_DEC1_ENA_WIDTH 1 /* ISRC3_DEC1_ENA */
+#define ARIZONA_ISRC3_DEC2_ENA 0x0080 /* ISRC3_DEC2_ENA */
+#define ARIZONA_ISRC3_DEC2_ENA_MASK 0x0080 /* ISRC3_DEC2_ENA */
+#define ARIZONA_ISRC3_DEC2_ENA_SHIFT 7 /* ISRC3_DEC2_ENA */
+#define ARIZONA_ISRC3_DEC2_ENA_WIDTH 1 /* ISRC3_DEC2_ENA */
+#define ARIZONA_ISRC3_DEC3_ENA 0x0040 /* ISRC3_DEC3_ENA */
+#define ARIZONA_ISRC3_DEC3_ENA_MASK 0x0040 /* ISRC3_DEC3_ENA */
+#define ARIZONA_ISRC3_DEC3_ENA_SHIFT 6 /* ISRC3_DEC3_ENA */
+#define ARIZONA_ISRC3_DEC3_ENA_WIDTH 1 /* ISRC3_DEC3_ENA */
+#define ARIZONA_ISRC3_NOTCH_ENA 0x0001 /* ISRC3_NOTCH_ENA */
+#define ARIZONA_ISRC3_NOTCH_ENA_MASK 0x0001 /* ISRC3_NOTCH_ENA */
+#define ARIZONA_ISRC3_NOTCH_ENA_SHIFT 0 /* ISRC3_NOTCH_ENA */
+#define ARIZONA_ISRC3_NOTCH_ENA_WIDTH 1 /* ISRC3_NOTCH_ENA */
+
+/*
+ * R4352 (0x1100) - DSP1 Control 1
+ */
+#define ARIZONA_DSP1_RATE_MASK 0x7800 /* DSP1_RATE - [14:11] */
+#define ARIZONA_DSP1_RATE_SHIFT 11 /* DSP1_RATE - [14:11] */
+#define ARIZONA_DSP1_RATE_WIDTH 4 /* DSP1_RATE - [14:11] */
+#define ARIZONA_DSP1_MEM_ENA 0x0010 /* DSP1_MEM_ENA */
+#define ARIZONA_DSP1_MEM_ENA_MASK 0x0010 /* DSP1_MEM_ENA */
+#define ARIZONA_DSP1_MEM_ENA_SHIFT 4 /* DSP1_MEM_ENA */
+#define ARIZONA_DSP1_MEM_ENA_WIDTH 1 /* DSP1_MEM_ENA */
+#define ARIZONA_DSP1_SYS_ENA 0x0004 /* DSP1_SYS_ENA */
+#define ARIZONA_DSP1_SYS_ENA_MASK 0x0004 /* DSP1_SYS_ENA */
+#define ARIZONA_DSP1_SYS_ENA_SHIFT 2 /* DSP1_SYS_ENA */
+#define ARIZONA_DSP1_SYS_ENA_WIDTH 1 /* DSP1_SYS_ENA */
+#define ARIZONA_DSP1_CORE_ENA 0x0002 /* DSP1_CORE_ENA */
+#define ARIZONA_DSP1_CORE_ENA_MASK 0x0002 /* DSP1_CORE_ENA */
+#define ARIZONA_DSP1_CORE_ENA_SHIFT 1 /* DSP1_CORE_ENA */
+#define ARIZONA_DSP1_CORE_ENA_WIDTH 1 /* DSP1_CORE_ENA */
+#define ARIZONA_DSP1_START 0x0001 /* DSP1_START */
+#define ARIZONA_DSP1_START_MASK 0x0001 /* DSP1_START */
+#define ARIZONA_DSP1_START_SHIFT 0 /* DSP1_START */
+#define ARIZONA_DSP1_START_WIDTH 1 /* DSP1_START */
+
+/*
+ * R4353 (0x1101) - DSP1 Clocking 1
+ */
+#define ARIZONA_DSP1_CLK_SEL_MASK 0x0007 /* DSP1_CLK_SEL - [2:0] */
+#define ARIZONA_DSP1_CLK_SEL_SHIFT 0 /* DSP1_CLK_SEL - [2:0] */
+#define ARIZONA_DSP1_CLK_SEL_WIDTH 3 /* DSP1_CLK_SEL - [2:0] */
+
+/*
+ * R4356 (0x1104) - DSP1 Status 1
+ */
+#define ARIZONA_DSP1_RAM_RDY 0x0001 /* DSP1_RAM_RDY */
+#define ARIZONA_DSP1_RAM_RDY_MASK 0x0001 /* DSP1_RAM_RDY */
+#define ARIZONA_DSP1_RAM_RDY_SHIFT 0 /* DSP1_RAM_RDY */
+#define ARIZONA_DSP1_RAM_RDY_WIDTH 1 /* DSP1_RAM_RDY */
+
+/*
+ * R4357 (0x1105) - DSP1 Status 2
+ */
+#define ARIZONA_DSP1_PING_FULL 0x8000 /* DSP1_PING_FULL */
+#define ARIZONA_DSP1_PING_FULL_MASK 0x8000 /* DSP1_PING_FULL */
+#define ARIZONA_DSP1_PING_FULL_SHIFT 15 /* DSP1_PING_FULL */
+#define ARIZONA_DSP1_PING_FULL_WIDTH 1 /* DSP1_PING_FULL */
+#define ARIZONA_DSP1_PONG_FULL 0x4000 /* DSP1_PONG_FULL */
+#define ARIZONA_DSP1_PONG_FULL_MASK 0x4000 /* DSP1_PONG_FULL */
+#define ARIZONA_DSP1_PONG_FULL_SHIFT 14 /* DSP1_PONG_FULL */
+#define ARIZONA_DSP1_PONG_FULL_WIDTH 1 /* DSP1_PONG_FULL */
+#define ARIZONA_DSP1_WDMA_ACTIVE_CHANNELS_MASK 0x00FF /* DSP1_WDMA_ACTIVE_CHANNELS - [7:0] */
+#define ARIZONA_DSP1_WDMA_ACTIVE_CHANNELS_SHIFT 0 /* DSP1_WDMA_ACTIVE_CHANNELS - [7:0] */
+#define ARIZONA_DSP1_WDMA_ACTIVE_CHANNELS_WIDTH 8 /* DSP1_WDMA_ACTIVE_CHANNELS - [7:0] */
+
+#endif
diff --git a/include/linux/mfd/as3711.h b/include/linux/mfd/as3711.h
new file mode 100644
index 000000000..38452ce1e
--- /dev/null
+++ b/include/linux/mfd/as3711.h
@@ -0,0 +1,126 @@
+/*
+ * AS3711 PMIC MFC driver header
+ *
+ * Copyright (C) 2012 Renesas Electronics Corporation
+ * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License as
+ * published by the Free Software Foundation
+ */
+
+#ifndef MFD_AS3711_H
+#define MFD_AS3711_H
+
+/*
+ * Client data
+ */
+
+/* Register addresses */
+#define AS3711_SD_1_VOLTAGE 0 /* Digital Step-Down */
+#define AS3711_SD_2_VOLTAGE 1
+#define AS3711_SD_3_VOLTAGE 2
+#define AS3711_SD_4_VOLTAGE 3
+#define AS3711_LDO_1_VOLTAGE 4 /* Analog LDO */
+#define AS3711_LDO_2_VOLTAGE 5
+#define AS3711_LDO_3_VOLTAGE 6 /* Digital LDO */
+#define AS3711_LDO_4_VOLTAGE 7
+#define AS3711_LDO_5_VOLTAGE 8
+#define AS3711_LDO_6_VOLTAGE 9
+#define AS3711_LDO_7_VOLTAGE 0xa
+#define AS3711_LDO_8_VOLTAGE 0xb
+#define AS3711_SD_CONTROL 0x10
+#define AS3711_GPIO_SIGNAL_OUT 0x20
+#define AS3711_GPIO_SIGNAL_IN 0x21
+#define AS3711_SD_CONTROL_1 0x30
+#define AS3711_SD_CONTROL_2 0x31
+#define AS3711_CURR_CONTROL 0x40
+#define AS3711_CURR1_VALUE 0x43
+#define AS3711_CURR2_VALUE 0x44
+#define AS3711_CURR3_VALUE 0x45
+#define AS3711_STEPUP_CONTROL_1 0x50
+#define AS3711_STEPUP_CONTROL_2 0x51
+#define AS3711_STEPUP_CONTROL_4 0x53
+#define AS3711_STEPUP_CONTROL_5 0x54
+#define AS3711_REG_STATUS 0x73
+#define AS3711_INTERRUPT_STATUS_1 0x77
+#define AS3711_INTERRUPT_STATUS_2 0x78
+#define AS3711_INTERRUPT_STATUS_3 0x79
+#define AS3711_CHARGER_STATUS_1 0x86
+#define AS3711_CHARGER_STATUS_2 0x87
+#define AS3711_ASIC_ID_1 0x90
+#define AS3711_ASIC_ID_2 0x91
+
+#define AS3711_MAX_REGS 0x92
+
+/* Regulators */
+enum {
+ AS3711_REGULATOR_SD_1,
+ AS3711_REGULATOR_SD_2,
+ AS3711_REGULATOR_SD_3,
+ AS3711_REGULATOR_SD_4,
+ AS3711_REGULATOR_LDO_1,
+ AS3711_REGULATOR_LDO_2,
+ AS3711_REGULATOR_LDO_3,
+ AS3711_REGULATOR_LDO_4,
+ AS3711_REGULATOR_LDO_5,
+ AS3711_REGULATOR_LDO_6,
+ AS3711_REGULATOR_LDO_7,
+ AS3711_REGULATOR_LDO_8,
+
+ AS3711_REGULATOR_MAX,
+};
+
+struct device;
+struct regmap;
+
+struct as3711 {
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+#define AS3711_MAX_STEPDOWN 4
+#define AS3711_MAX_STEPUP 2
+#define AS3711_MAX_LDO 8
+
+enum as3711_su2_feedback {
+ AS3711_SU2_VOLTAGE,
+ AS3711_SU2_CURR1,
+ AS3711_SU2_CURR2,
+ AS3711_SU2_CURR3,
+ AS3711_SU2_CURR_AUTO,
+};
+
+enum as3711_su2_fbprot {
+ AS3711_SU2_LX_SD4,
+ AS3711_SU2_GPIO2,
+ AS3711_SU2_GPIO3,
+ AS3711_SU2_GPIO4,
+};
+
+/*
+ * Platform data
+ */
+
+struct as3711_regulator_pdata {
+ struct regulator_init_data *init_data[AS3711_REGULATOR_MAX];
+};
+
+struct as3711_bl_pdata {
+ const char *su1_fb;
+ int su1_max_uA;
+ const char *su2_fb;
+ int su2_max_uA;
+ enum as3711_su2_feedback su2_feedback;
+ enum as3711_su2_fbprot su2_fbprot;
+ bool su2_auto_curr1;
+ bool su2_auto_curr2;
+ bool su2_auto_curr3;
+};
+
+struct as3711_platform_data {
+ struct as3711_regulator_pdata regulator;
+ struct as3711_bl_pdata backlight;
+};
+
+#endif
diff --git a/include/linux/mfd/as3722.h b/include/linux/mfd/as3722.h
new file mode 100644
index 000000000..8d43e9f2a
--- /dev/null
+++ b/include/linux/mfd/as3722.h
@@ -0,0 +1,428 @@
+/*
+ * as3722 definitions
+ *
+ * Copyright (C) 2013 ams
+ * Copyright (c) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Author: Florian Lobmaier <florian.lobmaier@ams.com>
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __LINUX_MFD_AS3722_H__
+#define __LINUX_MFD_AS3722_H__
+
+#include <linux/regmap.h>
+
+/* AS3722 registers */
+#define AS3722_SD0_VOLTAGE_REG 0x00
+#define AS3722_SD1_VOLTAGE_REG 0x01
+#define AS3722_SD2_VOLTAGE_REG 0x02
+#define AS3722_SD3_VOLTAGE_REG 0x03
+#define AS3722_SD4_VOLTAGE_REG 0x04
+#define AS3722_SD5_VOLTAGE_REG 0x05
+#define AS3722_SD6_VOLTAGE_REG 0x06
+#define AS3722_GPIO0_CONTROL_REG 0x08
+#define AS3722_GPIO1_CONTROL_REG 0x09
+#define AS3722_GPIO2_CONTROL_REG 0x0A
+#define AS3722_GPIO3_CONTROL_REG 0x0B
+#define AS3722_GPIO4_CONTROL_REG 0x0C
+#define AS3722_GPIO5_CONTROL_REG 0x0D
+#define AS3722_GPIO6_CONTROL_REG 0x0E
+#define AS3722_GPIO7_CONTROL_REG 0x0F
+#define AS3722_LDO0_VOLTAGE_REG 0x10
+#define AS3722_LDO1_VOLTAGE_REG 0x11
+#define AS3722_LDO2_VOLTAGE_REG 0x12
+#define AS3722_LDO3_VOLTAGE_REG 0x13
+#define AS3722_LDO4_VOLTAGE_REG 0x14
+#define AS3722_LDO5_VOLTAGE_REG 0x15
+#define AS3722_LDO6_VOLTAGE_REG 0x16
+#define AS3722_LDO7_VOLTAGE_REG 0x17
+#define AS3722_LDO9_VOLTAGE_REG 0x19
+#define AS3722_LDO10_VOLTAGE_REG 0x1A
+#define AS3722_LDO11_VOLTAGE_REG 0x1B
+#define AS3722_GPIO_DEB1_REG 0x1E
+#define AS3722_GPIO_DEB2_REG 0x1F
+#define AS3722_GPIO_SIGNAL_OUT_REG 0x20
+#define AS3722_GPIO_SIGNAL_IN_REG 0x21
+#define AS3722_REG_SEQU_MOD1_REG 0x22
+#define AS3722_REG_SEQU_MOD2_REG 0x23
+#define AS3722_REG_SEQU_MOD3_REG 0x24
+#define AS3722_SD_PHSW_CTRL_REG 0x27
+#define AS3722_SD_PHSW_STATUS 0x28
+#define AS3722_SD0_CONTROL_REG 0x29
+#define AS3722_SD1_CONTROL_REG 0x2A
+#define AS3722_SDmph_CONTROL_REG 0x2B
+#define AS3722_SD23_CONTROL_REG 0x2C
+#define AS3722_SD4_CONTROL_REG 0x2D
+#define AS3722_SD5_CONTROL_REG 0x2E
+#define AS3722_SD6_CONTROL_REG 0x2F
+#define AS3722_SD_DVM_REG 0x30
+#define AS3722_RESET_REASON_REG 0x31
+#define AS3722_BATTERY_VOLTAGE_MONITOR_REG 0x32
+#define AS3722_STARTUP_CONTROL_REG 0x33
+#define AS3722_RESET_TIMER_REG 0x34
+#define AS3722_REFERENCE_CONTROL_REG 0x35
+#define AS3722_RESET_CONTROL_REG 0x36
+#define AS3722_OVER_TEMP_CONTROL_REG 0x37
+#define AS3722_WATCHDOG_CONTROL_REG 0x38
+#define AS3722_REG_STANDBY_MOD1_REG 0x39
+#define AS3722_REG_STANDBY_MOD2_REG 0x3A
+#define AS3722_REG_STANDBY_MOD3_REG 0x3B
+#define AS3722_ENABLE_CTRL1_REG 0x3C
+#define AS3722_ENABLE_CTRL2_REG 0x3D
+#define AS3722_ENABLE_CTRL3_REG 0x3E
+#define AS3722_ENABLE_CTRL4_REG 0x3F
+#define AS3722_ENABLE_CTRL5_REG 0x40
+#define AS3722_PWM_CONTROL_L_REG 0x41
+#define AS3722_PWM_CONTROL_H_REG 0x42
+#define AS3722_WATCHDOG_TIMER_REG 0x46
+#define AS3722_WATCHDOG_SOFTWARE_SIGNAL_REG 0x48
+#define AS3722_IOVOLTAGE_REG 0x49
+#define AS3722_BATTERY_VOLTAGE_MONITOR2_REG 0x4A
+#define AS3722_SD_CONTROL_REG 0x4D
+#define AS3722_LDOCONTROL0_REG 0x4E
+#define AS3722_LDOCONTROL1_REG 0x4F
+#define AS3722_SD0_PROTECT_REG 0x50
+#define AS3722_SD6_PROTECT_REG 0x51
+#define AS3722_PWM_VCONTROL1_REG 0x52
+#define AS3722_PWM_VCONTROL2_REG 0x53
+#define AS3722_PWM_VCONTROL3_REG 0x54
+#define AS3722_PWM_VCONTROL4_REG 0x55
+#define AS3722_BB_CHARGER_REG 0x57
+#define AS3722_CTRL_SEQU1_REG 0x58
+#define AS3722_CTRL_SEQU2_REG 0x59
+#define AS3722_OVCURRENT_REG 0x5A
+#define AS3722_OVCURRENT_DEB_REG 0x5B
+#define AS3722_SDLV_DEB_REG 0x5C
+#define AS3722_OC_PG_CTRL_REG 0x5D
+#define AS3722_OC_PG_CTRL2_REG 0x5E
+#define AS3722_CTRL_STATUS 0x5F
+#define AS3722_RTC_CONTROL_REG 0x60
+#define AS3722_RTC_SECOND_REG 0x61
+#define AS3722_RTC_MINUTE_REG 0x62
+#define AS3722_RTC_HOUR_REG 0x63
+#define AS3722_RTC_DAY_REG 0x64
+#define AS3722_RTC_MONTH_REG 0x65
+#define AS3722_RTC_YEAR_REG 0x66
+#define AS3722_RTC_ALARM_SECOND_REG 0x67
+#define AS3722_RTC_ALARM_MINUTE_REG 0x68
+#define AS3722_RTC_ALARM_HOUR_REG 0x69
+#define AS3722_RTC_ALARM_DAY_REG 0x6A
+#define AS3722_RTC_ALARM_MONTH_REG 0x6B
+#define AS3722_RTC_ALARM_YEAR_REG 0x6C
+#define AS3722_SRAM_REG 0x6D
+#define AS3722_RTC_ACCESS_REG 0x6F
+#define AS3722_RTC_STATUS_REG 0x73
+#define AS3722_INTERRUPT_MASK1_REG 0x74
+#define AS3722_INTERRUPT_MASK2_REG 0x75
+#define AS3722_INTERRUPT_MASK3_REG 0x76
+#define AS3722_INTERRUPT_MASK4_REG 0x77
+#define AS3722_INTERRUPT_STATUS1_REG 0x78
+#define AS3722_INTERRUPT_STATUS2_REG 0x79
+#define AS3722_INTERRUPT_STATUS3_REG 0x7A
+#define AS3722_INTERRUPT_STATUS4_REG 0x7B
+#define AS3722_TEMP_STATUS_REG 0x7D
+#define AS3722_ADC0_CONTROL_REG 0x80
+#define AS3722_ADC1_CONTROL_REG 0x81
+#define AS3722_ADC0_MSB_RESULT_REG 0x82
+#define AS3722_ADC0_LSB_RESULT_REG 0x83
+#define AS3722_ADC1_MSB_RESULT_REG 0x84
+#define AS3722_ADC1_LSB_RESULT_REG 0x85
+#define AS3722_ADC1_THRESHOLD_HI_MSB_REG 0x86
+#define AS3722_ADC1_THRESHOLD_HI_LSB_REG 0x87
+#define AS3722_ADC1_THRESHOLD_LO_MSB_REG 0x88
+#define AS3722_ADC1_THRESHOLD_LO_LSB_REG 0x89
+#define AS3722_ADC_CONFIGURATION_REG 0x8A
+#define AS3722_ASIC_ID1_REG 0x90
+#define AS3722_ASIC_ID2_REG 0x91
+#define AS3722_LOCK_REG 0x9E
+#define AS3722_FUSE7_REG 0xA7
+#define AS3722_MAX_REGISTER 0xF4
+
+#define AS3722_SD0_EXT_ENABLE_MASK 0x03
+#define AS3722_SD1_EXT_ENABLE_MASK 0x0C
+#define AS3722_SD2_EXT_ENABLE_MASK 0x30
+#define AS3722_SD3_EXT_ENABLE_MASK 0xC0
+#define AS3722_SD4_EXT_ENABLE_MASK 0x03
+#define AS3722_SD5_EXT_ENABLE_MASK 0x0C
+#define AS3722_SD6_EXT_ENABLE_MASK 0x30
+#define AS3722_LDO0_EXT_ENABLE_MASK 0x03
+#define AS3722_LDO1_EXT_ENABLE_MASK 0x0C
+#define AS3722_LDO2_EXT_ENABLE_MASK 0x30
+#define AS3722_LDO3_EXT_ENABLE_MASK 0xC0
+#define AS3722_LDO4_EXT_ENABLE_MASK 0x03
+#define AS3722_LDO5_EXT_ENABLE_MASK 0x0C
+#define AS3722_LDO6_EXT_ENABLE_MASK 0x30
+#define AS3722_LDO7_EXT_ENABLE_MASK 0xC0
+#define AS3722_LDO9_EXT_ENABLE_MASK 0x0C
+#define AS3722_LDO10_EXT_ENABLE_MASK 0x30
+#define AS3722_LDO11_EXT_ENABLE_MASK 0xC0
+
+#define AS3722_OVCURRENT_SD0_ALARM_MASK 0x07
+#define AS3722_OVCURRENT_SD0_ALARM_SHIFT 0x01
+#define AS3722_OVCURRENT_SD0_TRIP_MASK 0x18
+#define AS3722_OVCURRENT_SD0_TRIP_SHIFT 0x03
+#define AS3722_OVCURRENT_SD1_TRIP_MASK 0x60
+#define AS3722_OVCURRENT_SD1_TRIP_SHIFT 0x05
+
+#define AS3722_OVCURRENT_SD6_ALARM_MASK 0x07
+#define AS3722_OVCURRENT_SD6_ALARM_SHIFT 0x01
+#define AS3722_OVCURRENT_SD6_TRIP_MASK 0x18
+#define AS3722_OVCURRENT_SD6_TRIP_SHIFT 0x03
+
+/* AS3722 register bits and bit masks */
+#define AS3722_LDO_ILIMIT_MASK BIT(7)
+#define AS3722_LDO_ILIMIT_BIT BIT(7)
+#define AS3722_LDO0_VSEL_MASK 0x1F
+#define AS3722_LDO0_VSEL_MIN 0x01
+#define AS3722_LDO0_VSEL_MAX 0x12
+#define AS3722_LDO0_NUM_VOLT 0x12
+#define AS3722_LDO3_VSEL_MASK 0x3F
+#define AS3722_LDO3_VSEL_MIN 0x01
+#define AS3722_LDO3_VSEL_MAX 0x2D
+#define AS3722_LDO3_NUM_VOLT 0x2D
+#define AS3722_LDO_VSEL_MASK 0x7F
+#define AS3722_LDO_VSEL_MIN 0x01
+#define AS3722_LDO_VSEL_MAX 0x7F
+#define AS3722_LDO_VSEL_DNU_MIN 0x25
+#define AS3722_LDO_VSEL_DNU_MAX 0x3F
+#define AS3722_LDO_NUM_VOLT 0x80
+
+#define AS3722_LDO0_CTRL BIT(0)
+#define AS3722_LDO1_CTRL BIT(1)
+#define AS3722_LDO2_CTRL BIT(2)
+#define AS3722_LDO3_CTRL BIT(3)
+#define AS3722_LDO4_CTRL BIT(4)
+#define AS3722_LDO5_CTRL BIT(5)
+#define AS3722_LDO6_CTRL BIT(6)
+#define AS3722_LDO7_CTRL BIT(7)
+#define AS3722_LDO9_CTRL BIT(1)
+#define AS3722_LDO10_CTRL BIT(2)
+#define AS3722_LDO11_CTRL BIT(3)
+
+#define AS3722_LDO3_MODE_MASK (3 << 6)
+#define AS3722_LDO3_MODE_VAL(n) (((n) & 0x3) << 6)
+#define AS3722_LDO3_MODE_PMOS AS3722_LDO3_MODE_VAL(0)
+#define AS3722_LDO3_MODE_PMOS_TRACKING AS3722_LDO3_MODE_VAL(1)
+#define AS3722_LDO3_MODE_NMOS AS3722_LDO3_MODE_VAL(2)
+#define AS3722_LDO3_MODE_SWITCH AS3722_LDO3_MODE_VAL(3)
+
+#define AS3722_SD_VSEL_MASK 0x7F
+#define AS3722_SD0_VSEL_MIN 0x01
+#define AS3722_SD0_VSEL_MAX 0x5A
+#define AS3722_SD0_VSEL_LOW_VOL_MAX 0x6E
+#define AS3722_SD2_VSEL_MIN 0x01
+#define AS3722_SD2_VSEL_MAX 0x7F
+
+#define AS3722_SDn_CTRL(n) BIT(n)
+
+#define AS3722_SD0_MODE_FAST BIT(4)
+#define AS3722_SD1_MODE_FAST BIT(4)
+#define AS3722_SD2_MODE_FAST BIT(2)
+#define AS3722_SD3_MODE_FAST BIT(6)
+#define AS3722_SD4_MODE_FAST BIT(2)
+#define AS3722_SD5_MODE_FAST BIT(2)
+#define AS3722_SD6_MODE_FAST BIT(4)
+
+#define AS3722_POWER_OFF BIT(1)
+
+#define AS3722_INTERRUPT_MASK1_LID BIT(0)
+#define AS3722_INTERRUPT_MASK1_ACOK BIT(1)
+#define AS3722_INTERRUPT_MASK1_ENABLE1 BIT(2)
+#define AS3722_INTERRUPT_MASK1_OCURR_ALARM_SD0 BIT(3)
+#define AS3722_INTERRUPT_MASK1_ONKEY_LONG BIT(4)
+#define AS3722_INTERRUPT_MASK1_ONKEY BIT(5)
+#define AS3722_INTERRUPT_MASK1_OVTMP BIT(6)
+#define AS3722_INTERRUPT_MASK1_LOWBAT BIT(7)
+
+#define AS3722_INTERRUPT_MASK2_SD0_LV BIT(0)
+#define AS3722_INTERRUPT_MASK2_SD1_LV BIT(1)
+#define AS3722_INTERRUPT_MASK2_SD2345_LV BIT(2)
+#define AS3722_INTERRUPT_MASK2_PWM1_OV_PROT BIT(3)
+#define AS3722_INTERRUPT_MASK2_PWM2_OV_PROT BIT(4)
+#define AS3722_INTERRUPT_MASK2_ENABLE2 BIT(5)
+#define AS3722_INTERRUPT_MASK2_SD6_LV BIT(6)
+#define AS3722_INTERRUPT_MASK2_RTC_REP BIT(7)
+
+#define AS3722_INTERRUPT_MASK3_RTC_ALARM BIT(0)
+#define AS3722_INTERRUPT_MASK3_GPIO1 BIT(1)
+#define AS3722_INTERRUPT_MASK3_GPIO2 BIT(2)
+#define AS3722_INTERRUPT_MASK3_GPIO3 BIT(3)
+#define AS3722_INTERRUPT_MASK3_GPIO4 BIT(4)
+#define AS3722_INTERRUPT_MASK3_GPIO5 BIT(5)
+#define AS3722_INTERRUPT_MASK3_WATCHDOG BIT(6)
+#define AS3722_INTERRUPT_MASK3_ENABLE3 BIT(7)
+
+#define AS3722_INTERRUPT_MASK4_TEMP_SD0_SHUTDOWN BIT(0)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD1_SHUTDOWN BIT(1)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD6_SHUTDOWN BIT(2)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD0_ALARM BIT(3)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD1_ALARM BIT(4)
+#define AS3722_INTERRUPT_MASK4_TEMP_SD6_ALARM BIT(5)
+#define AS3722_INTERRUPT_MASK4_OCCUR_ALARM_SD6 BIT(6)
+#define AS3722_INTERRUPT_MASK4_ADC BIT(7)
+
+#define AS3722_ADC1_INTERVAL_TIME BIT(0)
+#define AS3722_ADC1_INT_MODE_ON BIT(1)
+#define AS3722_ADC_BUF_ON BIT(2)
+#define AS3722_ADC1_LOW_VOLTAGE_RANGE BIT(5)
+#define AS3722_ADC1_INTEVAL_SCAN BIT(6)
+#define AS3722_ADC1_INT_MASK BIT(7)
+
+#define AS3722_ADC_MSB_VAL_MASK 0x7F
+#define AS3722_ADC_LSB_VAL_MASK 0x07
+
+#define AS3722_ADC0_CONV_START BIT(7)
+#define AS3722_ADC0_CONV_NOTREADY BIT(7)
+#define AS3722_ADC0_SOURCE_SELECT_MASK 0x1F
+
+#define AS3722_ADC1_CONV_START BIT(7)
+#define AS3722_ADC1_CONV_NOTREADY BIT(7)
+#define AS3722_ADC1_SOURCE_SELECT_MASK 0x1F
+
+/* GPIO modes */
+#define AS3722_GPIO_MODE_MASK 0x07
+#define AS3722_GPIO_MODE_INPUT 0x00
+#define AS3722_GPIO_MODE_OUTPUT_VDDH 0x01
+#define AS3722_GPIO_MODE_IO_OPEN_DRAIN 0x02
+#define AS3722_GPIO_MODE_ADC_IN 0x03
+#define AS3722_GPIO_MODE_INPUT_PULL_UP 0x04
+#define AS3722_GPIO_MODE_INPUT_PULL_DOWN 0x05
+#define AS3722_GPIO_MODE_IO_OPEN_DRAIN_PULL_UP 0x06
+#define AS3722_GPIO_MODE_OUTPUT_VDDL 0x07
+#define AS3722_GPIO_MODE_VAL(n) ((n) & AS3722_GPIO_MODE_MASK)
+
+#define AS3722_GPIO_INV BIT(7)
+#define AS3722_GPIO_IOSF_MASK 0x78
+#define AS3722_GPIO_IOSF_VAL(n) (((n) & 0xF) << 3)
+#define AS3722_GPIO_IOSF_NORMAL AS3722_GPIO_IOSF_VAL(0)
+#define AS3722_GPIO_IOSF_INTERRUPT_OUT AS3722_GPIO_IOSF_VAL(1)
+#define AS3722_GPIO_IOSF_VSUP_LOW_OUT AS3722_GPIO_IOSF_VAL(2)
+#define AS3722_GPIO_IOSF_GPIO_INTERRUPT_IN AS3722_GPIO_IOSF_VAL(3)
+#define AS3722_GPIO_IOSF_ISINK_PWM_IN AS3722_GPIO_IOSF_VAL(4)
+#define AS3722_GPIO_IOSF_VOLTAGE_STBY AS3722_GPIO_IOSF_VAL(5)
+#define AS3722_GPIO_IOSF_SD0_OUT AS3722_GPIO_IOSF_VAL(6)
+#define AS3722_GPIO_IOSF_PWR_GOOD_OUT AS3722_GPIO_IOSF_VAL(7)
+#define AS3722_GPIO_IOSF_Q32K_OUT AS3722_GPIO_IOSF_VAL(8)
+#define AS3722_GPIO_IOSF_WATCHDOG_IN AS3722_GPIO_IOSF_VAL(9)
+#define AS3722_GPIO_IOSF_SOFT_RESET_IN AS3722_GPIO_IOSF_VAL(11)
+#define AS3722_GPIO_IOSF_PWM_OUT AS3722_GPIO_IOSF_VAL(12)
+#define AS3722_GPIO_IOSF_VSUP_LOW_DEB_OUT AS3722_GPIO_IOSF_VAL(13)
+#define AS3722_GPIO_IOSF_SD6_LOW_VOLT_LOW AS3722_GPIO_IOSF_VAL(14)
+
+#define AS3722_GPIOn_SIGNAL(n) BIT(n)
+#define AS3722_GPIOn_CONTROL_REG(n) (AS3722_GPIO0_CONTROL_REG + n)
+#define AS3722_I2C_PULL_UP BIT(4)
+#define AS3722_INT_PULL_UP BIT(5)
+
+#define AS3722_RTC_REP_WAKEUP_EN BIT(0)
+#define AS3722_RTC_ALARM_WAKEUP_EN BIT(1)
+#define AS3722_RTC_ON BIT(2)
+#define AS3722_RTC_IRQMODE BIT(3)
+#define AS3722_RTC_CLK32K_OUT_EN BIT(5)
+
+#define AS3722_WATCHDOG_TIMER_MAX 0x7F
+#define AS3722_WATCHDOG_ON BIT(0)
+#define AS3722_WATCHDOG_SW_SIG BIT(0)
+
+#define AS3722_EXT_CONTROL_ENABLE1 0x1
+#define AS3722_EXT_CONTROL_ENABLE2 0x2
+#define AS3722_EXT_CONTROL_ENABLE3 0x3
+
+#define AS3722_FUSE7_SD0_LOW_VOLTAGE BIT(4)
+
+/* Interrupt IDs */
+enum as3722_irq {
+ AS3722_IRQ_LID,
+ AS3722_IRQ_ACOK,
+ AS3722_IRQ_ENABLE1,
+ AS3722_IRQ_OCCUR_ALARM_SD0,
+ AS3722_IRQ_ONKEY_LONG_PRESS,
+ AS3722_IRQ_ONKEY,
+ AS3722_IRQ_OVTMP,
+ AS3722_IRQ_LOWBAT,
+ AS3722_IRQ_SD0_LV,
+ AS3722_IRQ_SD1_LV,
+ AS3722_IRQ_SD2_LV,
+ AS3722_IRQ_PWM1_OV_PROT,
+ AS3722_IRQ_PWM2_OV_PROT,
+ AS3722_IRQ_ENABLE2,
+ AS3722_IRQ_SD6_LV,
+ AS3722_IRQ_RTC_REP,
+ AS3722_IRQ_RTC_ALARM,
+ AS3722_IRQ_GPIO1,
+ AS3722_IRQ_GPIO2,
+ AS3722_IRQ_GPIO3,
+ AS3722_IRQ_GPIO4,
+ AS3722_IRQ_GPIO5,
+ AS3722_IRQ_WATCHDOG,
+ AS3722_IRQ_ENABLE3,
+ AS3722_IRQ_TEMP_SD0_SHUTDOWN,
+ AS3722_IRQ_TEMP_SD1_SHUTDOWN,
+ AS3722_IRQ_TEMP_SD2_SHUTDOWN,
+ AS3722_IRQ_TEMP_SD0_ALARM,
+ AS3722_IRQ_TEMP_SD1_ALARM,
+ AS3722_IRQ_TEMP_SD6_ALARM,
+ AS3722_IRQ_OCCUR_ALARM_SD6,
+ AS3722_IRQ_ADC,
+ AS3722_IRQ_MAX,
+};
+
+struct as3722 {
+ struct device *dev;
+ struct regmap *regmap;
+ int chip_irq;
+ unsigned long irq_flags;
+ bool en_intern_int_pullup;
+ bool en_intern_i2c_pullup;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+static inline int as3722_read(struct as3722 *as3722, u32 reg, u32 *dest)
+{
+ return regmap_read(as3722->regmap, reg, dest);
+}
+
+static inline int as3722_write(struct as3722 *as3722, u32 reg, u32 value)
+{
+ return regmap_write(as3722->regmap, reg, value);
+}
+
+static inline int as3722_block_read(struct as3722 *as3722, u32 reg,
+ int count, u8 *buf)
+{
+ return regmap_bulk_read(as3722->regmap, reg, buf, count);
+}
+
+static inline int as3722_block_write(struct as3722 *as3722, u32 reg,
+ int count, u8 *data)
+{
+ return regmap_bulk_write(as3722->regmap, reg, data, count);
+}
+
+static inline int as3722_update_bits(struct as3722 *as3722, u32 reg,
+ u32 mask, u8 val)
+{
+ return regmap_update_bits(as3722->regmap, reg, mask, val);
+}
+
+static inline int as3722_irq_get_virq(struct as3722 *as3722, int irq)
+{
+ return regmap_irq_get_virq(as3722->irq_data, irq);
+}
+#endif /* __LINUX_MFD_AS3722_H__ */
diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h
new file mode 100644
index 000000000..e1148d037
--- /dev/null
+++ b/include/linux/mfd/asic3.h
@@ -0,0 +1,316 @@
+/*
+ * include/linux/mfd/asic3.h
+ *
+ * Compaq ASIC3 headers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright 2001 Compaq Computer Corporation.
+ * Copyright 2007-2008 OpenedHand Ltd.
+ */
+
+#ifndef __ASIC3_H__
+#define __ASIC3_H__
+
+#include <linux/types.h>
+
+struct led_classdev;
+struct asic3_led {
+ const char *name;
+ const char *default_trigger;
+ struct led_classdev *cdev;
+};
+
+struct asic3_platform_data {
+ u16 *gpio_config;
+ unsigned int gpio_config_num;
+
+ unsigned int irq_base;
+
+ unsigned int gpio_base;
+
+ unsigned int clock_rate;
+
+ struct asic3_led *leds;
+};
+
+#define ASIC3_NUM_GPIO_BANKS 4
+#define ASIC3_GPIOS_PER_BANK 16
+#define ASIC3_NUM_GPIOS 64
+#define ASIC3_NR_IRQS ASIC3_NUM_GPIOS + 6
+
+#define ASIC3_IRQ_LED0 64
+#define ASIC3_IRQ_LED1 65
+#define ASIC3_IRQ_LED2 66
+#define ASIC3_IRQ_SPI 67
+#define ASIC3_IRQ_SMBUS 68
+#define ASIC3_IRQ_OWM 69
+
+#define ASIC3_TO_GPIO(gpio) (NR_BUILTIN_GPIO + (gpio))
+
+#define ASIC3_GPIO_BANK_A 0
+#define ASIC3_GPIO_BANK_B 1
+#define ASIC3_GPIO_BANK_C 2
+#define ASIC3_GPIO_BANK_D 3
+
+#define ASIC3_GPIO(bank, gpio) \
+ ((ASIC3_GPIOS_PER_BANK * ASIC3_GPIO_BANK_##bank) + (gpio))
+#define ASIC3_GPIO_bit(gpio) (1 << (gpio & 0xf))
+/* All offsets below are specified with this address bus shift */
+#define ASIC3_DEFAULT_ADDR_SHIFT 2
+
+#define ASIC3_OFFSET(base, reg) (ASIC3_##base##_BASE + ASIC3_##base##_##reg)
+#define ASIC3_GPIO_OFFSET(base, reg) \
+ (ASIC3_GPIO_##base##_BASE + ASIC3_GPIO_##reg)
+
+#define ASIC3_GPIO_A_BASE 0x0000
+#define ASIC3_GPIO_B_BASE 0x0100
+#define ASIC3_GPIO_C_BASE 0x0200
+#define ASIC3_GPIO_D_BASE 0x0300
+
+#define ASIC3_GPIO_TO_BANK(gpio) ((gpio) >> 4)
+#define ASIC3_GPIO_TO_BIT(gpio) ((gpio) - \
+ (ASIC3_GPIOS_PER_BANK * ((gpio) >> 4)))
+#define ASIC3_GPIO_TO_MASK(gpio) (1 << ASIC3_GPIO_TO_BIT(gpio))
+#define ASIC3_GPIO_TO_BASE(gpio) (ASIC3_GPIO_A_BASE + (((gpio) >> 4) * 0x0100))
+#define ASIC3_BANK_TO_BASE(bank) (ASIC3_GPIO_A_BASE + ((bank) * 0x100))
+
+#define ASIC3_GPIO_MASK 0x00 /* R/W 0:don't mask */
+#define ASIC3_GPIO_DIRECTION 0x04 /* R/W 0:input */
+#define ASIC3_GPIO_OUT 0x08 /* R/W 0:output low */
+#define ASIC3_GPIO_TRIGGER_TYPE 0x0c /* R/W 0:level */
+#define ASIC3_GPIO_EDGE_TRIGGER 0x10 /* R/W 0:falling */
+#define ASIC3_GPIO_LEVEL_TRIGGER 0x14 /* R/W 0:low level detect */
+#define ASIC3_GPIO_SLEEP_MASK 0x18 /* R/W 0:don't mask in sleep mode */
+#define ASIC3_GPIO_SLEEP_OUT 0x1c /* R/W level 0:low in sleep mode */
+#define ASIC3_GPIO_BAT_FAULT_OUT 0x20 /* R/W level 0:low in batt_fault */
+#define ASIC3_GPIO_INT_STATUS 0x24 /* R/W 0:none, 1:detect */
+#define ASIC3_GPIO_ALT_FUNCTION 0x28 /* R/W 1:LED register control */
+#define ASIC3_GPIO_SLEEP_CONF 0x2c /*
+ * R/W bit 1: autosleep
+ * 0: disable gposlpout in normal mode,
+ * enable gposlpout in sleep mode.
+ */
+#define ASIC3_GPIO_STATUS 0x30 /* R Pin status */
+
+/*
+ * ASIC3 GPIO config
+ *
+ * Bits 0..6 gpio number
+ * Bits 7..13 Alternate function
+ * Bit 14 Direction
+ * Bit 15 Initial value
+ *
+ */
+#define ASIC3_CONFIG_GPIO_PIN(config) ((config) & 0x7f)
+#define ASIC3_CONFIG_GPIO_ALT(config) (((config) & (0x7f << 7)) >> 7)
+#define ASIC3_CONFIG_GPIO_DIR(config) ((config & (1 << 14)) >> 14)
+#define ASIC3_CONFIG_GPIO_INIT(config) ((config & (1 << 15)) >> 15)
+#define ASIC3_CONFIG_GPIO(gpio, alt, dir, init) (((gpio) & 0x7f) \
+ | (((alt) & 0x7f) << 7) | (((dir) & 0x1) << 14) \
+ | (((init) & 0x1) << 15))
+#define ASIC3_CONFIG_GPIO_DEFAULT(gpio, dir, init) \
+ ASIC3_CONFIG_GPIO((gpio), 0, (dir), (init))
+#define ASIC3_CONFIG_GPIO_DEFAULT_OUT(gpio, init) \
+ ASIC3_CONFIG_GPIO((gpio), 0, 1, (init))
+
+/*
+ * Alternate functions
+ */
+#define ASIC3_GPIOA11_PWM0 ASIC3_CONFIG_GPIO(11, 1, 1, 0)
+#define ASIC3_GPIOA12_PWM1 ASIC3_CONFIG_GPIO(12, 1, 1, 0)
+#define ASIC3_GPIOA15_CONTROL_CX ASIC3_CONFIG_GPIO(15, 1, 1, 0)
+#define ASIC3_GPIOC0_LED0 ASIC3_CONFIG_GPIO(32, 1, 0, 0)
+#define ASIC3_GPIOC1_LED1 ASIC3_CONFIG_GPIO(33, 1, 0, 0)
+#define ASIC3_GPIOC2_LED2 ASIC3_CONFIG_GPIO(34, 1, 0, 0)
+#define ASIC3_GPIOC3_SPI_RXD ASIC3_CONFIG_GPIO(35, 1, 0, 0)
+#define ASIC3_GPIOC4_CF_nCD ASIC3_CONFIG_GPIO(36, 1, 0, 0)
+#define ASIC3_GPIOC4_SPI_TXD ASIC3_CONFIG_GPIO(36, 1, 1, 0)
+#define ASIC3_GPIOC5_SPI_CLK ASIC3_CONFIG_GPIO(37, 1, 1, 0)
+#define ASIC3_GPIOC5_nCIOW ASIC3_CONFIG_GPIO(37, 1, 1, 0)
+#define ASIC3_GPIOC6_nCIOR ASIC3_CONFIG_GPIO(38, 1, 1, 0)
+#define ASIC3_GPIOC7_nPCE_1 ASIC3_CONFIG_GPIO(39, 1, 0, 0)
+#define ASIC3_GPIOC8_nPCE_2 ASIC3_CONFIG_GPIO(40, 1, 0, 0)
+#define ASIC3_GPIOC9_nPOE ASIC3_CONFIG_GPIO(41, 1, 0, 0)
+#define ASIC3_GPIOC10_nPWE ASIC3_CONFIG_GPIO(42, 1, 0, 0)
+#define ASIC3_GPIOC11_PSKTSEL ASIC3_CONFIG_GPIO(43, 1, 0, 0)
+#define ASIC3_GPIOC12_nPREG ASIC3_CONFIG_GPIO(44, 1, 0, 0)
+#define ASIC3_GPIOC13_nPWAIT ASIC3_CONFIG_GPIO(45, 1, 1, 0)
+#define ASIC3_GPIOC14_nPIOIS16 ASIC3_CONFIG_GPIO(46, 1, 1, 0)
+#define ASIC3_GPIOC15_nPIOR ASIC3_CONFIG_GPIO(47, 1, 0, 0)
+#define ASIC3_GPIOD4_CF_nCD ASIC3_CONFIG_GPIO(52, 1, 0, 0)
+#define ASIC3_GPIOD11_nCIOIS16 ASIC3_CONFIG_GPIO(59, 1, 0, 0)
+#define ASIC3_GPIOD12_nCWAIT ASIC3_CONFIG_GPIO(60, 1, 0, 0)
+#define ASIC3_GPIOD15_nPIOW ASIC3_CONFIG_GPIO(63, 1, 0, 0)
+
+
+#define ASIC3_SPI_Base 0x0400
+#define ASIC3_SPI_Control 0x0000
+#define ASIC3_SPI_TxData 0x0004
+#define ASIC3_SPI_RxData 0x0008
+#define ASIC3_SPI_Int 0x000c
+#define ASIC3_SPI_Status 0x0010
+
+#define SPI_CONTROL_SPR(clk) ((clk) & 0x0f) /* Clock rate */
+
+#define ASIC3_PWM_0_Base 0x0500
+#define ASIC3_PWM_1_Base 0x0600
+#define ASIC3_PWM_TimeBase 0x0000
+#define ASIC3_PWM_PeriodTime 0x0004
+#define ASIC3_PWM_DutyTime 0x0008
+
+#define PWM_TIMEBASE_VALUE(x) ((x)&0xf) /* Low 4 bits sets time base */
+#define PWM_TIMEBASE_ENABLE (1 << 4) /* Enable clock */
+
+#define ASIC3_NUM_LEDS 3
+#define ASIC3_LED_0_Base 0x0700
+#define ASIC3_LED_1_Base 0x0800
+#define ASIC3_LED_2_Base 0x0900
+#define ASIC3_LED_TimeBase 0x0000 /* R/W 7 bits */
+#define ASIC3_LED_PeriodTime 0x0004 /* R/W 12 bits */
+#define ASIC3_LED_DutyTime 0x0008 /* R/W 12 bits */
+#define ASIC3_LED_AutoStopCount 0x000c /* R/W 16 bits */
+
+/* LED TimeBase bits - match ASIC2 */
+#define LED_TBS 0x0f /* Low 4 bits sets time base, max = 13 */
+ /* Note: max = 5 on hx4700 */
+ /* 0: maximum time base */
+ /* 1: maximum time base / 2 */
+ /* n: maximum time base / 2^n */
+
+#define LED_EN (1 << 4) /* LED ON/OFF 0:off, 1:on */
+#define LED_AUTOSTOP (1 << 5) /* LED ON/OFF auto stop 0:disable, 1:enable */
+#define LED_ALWAYS (1 << 6) /* LED Interrupt Mask 0:No mask, 1:mask */
+
+#define ASIC3_CLOCK_BASE 0x0A00
+#define ASIC3_CLOCK_CDEX 0x00
+#define ASIC3_CLOCK_SEL 0x04
+
+#define CLOCK_CDEX_SOURCE (1 << 0) /* 2 bits */
+#define CLOCK_CDEX_SOURCE0 (1 << 0)
+#define CLOCK_CDEX_SOURCE1 (1 << 1)
+#define CLOCK_CDEX_SPI (1 << 2)
+#define CLOCK_CDEX_OWM (1 << 3)
+#define CLOCK_CDEX_PWM0 (1 << 4)
+#define CLOCK_CDEX_PWM1 (1 << 5)
+#define CLOCK_CDEX_LED0 (1 << 6)
+#define CLOCK_CDEX_LED1 (1 << 7)
+#define CLOCK_CDEX_LED2 (1 << 8)
+
+/* Clocks settings: 1 for 24.576 MHz, 0 for 12.288Mhz */
+#define CLOCK_CDEX_SD_HOST (1 << 9) /* R/W: SD host clock source */
+#define CLOCK_CDEX_SD_BUS (1 << 10) /* R/W: SD bus clock source ctrl */
+#define CLOCK_CDEX_SMBUS (1 << 11)
+#define CLOCK_CDEX_CONTROL_CX (1 << 12)
+
+#define CLOCK_CDEX_EX0 (1 << 13) /* R/W: 32.768 kHz crystal */
+#define CLOCK_CDEX_EX1 (1 << 14) /* R/W: 24.576 MHz crystal */
+
+#define CLOCK_SEL_SD_HCLK_SEL (1 << 0) /* R/W: SDIO host clock select */
+#define CLOCK_SEL_SD_BCLK_SEL (1 << 1) /* R/W: SDIO bus clock select */
+
+/* R/W: INT clock source control (32.768 kHz) */
+#define CLOCK_SEL_CX (1 << 2)
+
+
+#define ASIC3_INTR_BASE 0x0B00
+
+#define ASIC3_INTR_INT_MASK 0x00 /* Interrupt mask control */
+#define ASIC3_INTR_P_INT_STAT 0x04 /* Peripheral interrupt status */
+#define ASIC3_INTR_INT_CPS 0x08 /* Interrupt timer clock pre-scale */
+#define ASIC3_INTR_INT_TBS 0x0c /* Interrupt timer set */
+
+#define ASIC3_INTMASK_GINTMASK (1 << 0) /* Global INTs mask 1:enable */
+#define ASIC3_INTMASK_GINTEL (1 << 1) /* 1: rising edge, 0: hi level */
+#define ASIC3_INTMASK_MASK0 (1 << 2)
+#define ASIC3_INTMASK_MASK1 (1 << 3)
+#define ASIC3_INTMASK_MASK2 (1 << 4)
+#define ASIC3_INTMASK_MASK3 (1 << 5)
+#define ASIC3_INTMASK_MASK4 (1 << 6)
+#define ASIC3_INTMASK_MASK5 (1 << 7)
+
+#define ASIC3_INTR_PERIPHERAL_A (1 << 0)
+#define ASIC3_INTR_PERIPHERAL_B (1 << 1)
+#define ASIC3_INTR_PERIPHERAL_C (1 << 2)
+#define ASIC3_INTR_PERIPHERAL_D (1 << 3)
+#define ASIC3_INTR_LED0 (1 << 4)
+#define ASIC3_INTR_LED1 (1 << 5)
+#define ASIC3_INTR_LED2 (1 << 6)
+#define ASIC3_INTR_SPI (1 << 7)
+#define ASIC3_INTR_SMBUS (1 << 8)
+#define ASIC3_INTR_OWM (1 << 9)
+
+#define ASIC3_INTR_CPS(x) ((x)&0x0f) /* 4 bits, max 14 */
+#define ASIC3_INTR_CPS_SET (1 << 4) /* Time base enable */
+
+
+/* Basic control of the SD ASIC */
+#define ASIC3_SDHWCTRL_BASE 0x0E00
+#define ASIC3_SDHWCTRL_SDCONF 0x00
+
+#define ASIC3_SDHWCTRL_SUSPEND (1 << 0) /* 1=suspend all SD operations */
+#define ASIC3_SDHWCTRL_CLKSEL (1 << 1) /* 1=SDICK, 0=HCLK */
+#define ASIC3_SDHWCTRL_PCLR (1 << 2) /* All registers of SDIO cleared */
+#define ASIC3_SDHWCTRL_LEVCD (1 << 3) /* SD card detection: 0:low */
+
+/* SD card write protection: 0=high */
+#define ASIC3_SDHWCTRL_LEVWP (1 << 4)
+#define ASIC3_SDHWCTRL_SDLED (1 << 5) /* SD card LED signal 0=disable */
+
+/* SD card power supply ctrl 1=enable */
+#define ASIC3_SDHWCTRL_SDPWR (1 << 6)
+
+#define ASIC3_EXTCF_BASE 0x1100
+
+#define ASIC3_EXTCF_SELECT 0x00
+#define ASIC3_EXTCF_RESET 0x04
+
+#define ASIC3_EXTCF_SMOD0 (1 << 0) /* slot number of mode 0 */
+#define ASIC3_EXTCF_SMOD1 (1 << 1) /* slot number of mode 1 */
+#define ASIC3_EXTCF_SMOD2 (1 << 2) /* slot number of mode 2 */
+#define ASIC3_EXTCF_OWM_EN (1 << 4) /* enable onewire module */
+#define ASIC3_EXTCF_OWM_SMB (1 << 5) /* OWM bus selection */
+#define ASIC3_EXTCF_OWM_RESET (1 << 6) /* ?? used by OWM and CF */
+#define ASIC3_EXTCF_CF0_SLEEP_MODE (1 << 7) /* CF0 sleep state */
+#define ASIC3_EXTCF_CF1_SLEEP_MODE (1 << 8) /* CF1 sleep state */
+#define ASIC3_EXTCF_CF0_PWAIT_EN (1 << 10) /* CF0 PWAIT_n control */
+#define ASIC3_EXTCF_CF1_PWAIT_EN (1 << 11) /* CF1 PWAIT_n control */
+#define ASIC3_EXTCF_CF0_BUF_EN (1 << 12) /* CF0 buffer control */
+#define ASIC3_EXTCF_CF1_BUF_EN (1 << 13) /* CF1 buffer control */
+#define ASIC3_EXTCF_SD_MEM_ENABLE (1 << 14)
+#define ASIC3_EXTCF_CF_SLEEP (1 << 15) /* CF sleep mode control */
+
+/*********************************************
+ * The Onewire interface (DS1WM) is handled
+ * by the ds1wm driver.
+ *
+ *********************************************/
+
+#define ASIC3_OWM_BASE 0xC00
+
+/*****************************************************************************
+ * The SD configuration registers are at a completely different location
+ * in memory. They are divided into three sets of registers:
+ *
+ * SD_CONFIG Core configuration register
+ * SD_CTRL Control registers for SD operations
+ * SDIO_CTRL Control registers for SDIO operations
+ *
+ *****************************************************************************/
+#define ASIC3_SD_CONFIG_BASE 0x0400 /* Assumes 32 bit addressing */
+#define ASIC3_SD_CONFIG_SIZE 0x0200 /* Assumes 32 bit addressing */
+#define ASIC3_SD_CTRL_BASE 0x1000
+#define ASIC3_SDIO_CTRL_BASE 0x1200
+
+#define ASIC3_MAP_SIZE_32BIT 0x2000
+#define ASIC3_MAP_SIZE_16BIT 0x1000
+
+/* Functions needed by leds-asic3 */
+
+struct asic3;
+extern void asic3_write_register(struct asic3 *asic, unsigned int reg, u32 val);
+extern u32 asic3_read_register(struct asic3 *asic, unsigned int reg);
+
+#endif /* __ASIC3_H__ */
diff --git a/include/linux/mfd/atmel-hlcdc.h b/include/linux/mfd/atmel-hlcdc.h
new file mode 100644
index 000000000..1279ab164
--- /dev/null
+++ b/include/linux/mfd/atmel-hlcdc.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2014 Free Electrons
+ * Copyright (C) 2014 Atmel
+ *
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LINUX_MFD_HLCDC_H
+#define __LINUX_MFD_HLCDC_H
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+
+#define ATMEL_HLCDC_CFG(i) ((i) * 0x4)
+#define ATMEL_HLCDC_SIG_CFG LCDCFG(5)
+#define ATMEL_HLCDC_HSPOL BIT(0)
+#define ATMEL_HLCDC_VSPOL BIT(1)
+#define ATMEL_HLCDC_VSPDLYS BIT(2)
+#define ATMEL_HLCDC_VSPDLYE BIT(3)
+#define ATMEL_HLCDC_DISPPOL BIT(4)
+#define ATMEL_HLCDC_DITHER BIT(6)
+#define ATMEL_HLCDC_DISPDLY BIT(7)
+#define ATMEL_HLCDC_MODE_MASK GENMASK(9, 8)
+#define ATMEL_HLCDC_PP BIT(10)
+#define ATMEL_HLCDC_VSPSU BIT(12)
+#define ATMEL_HLCDC_VSPHO BIT(13)
+#define ATMEL_HLCDC_GUARDTIME_MASK GENMASK(20, 16)
+
+#define ATMEL_HLCDC_EN 0x20
+#define ATMEL_HLCDC_DIS 0x24
+#define ATMEL_HLCDC_SR 0x28
+#define ATMEL_HLCDC_IER 0x2c
+#define ATMEL_HLCDC_IDR 0x30
+#define ATMEL_HLCDC_IMR 0x34
+#define ATMEL_HLCDC_ISR 0x38
+
+#define ATMEL_HLCDC_CLKPOL BIT(0)
+#define ATMEL_HLCDC_CLKSEL BIT(2)
+#define ATMEL_HLCDC_CLKPWMSEL BIT(3)
+#define ATMEL_HLCDC_CGDIS(i) BIT(8 + (i))
+#define ATMEL_HLCDC_CLKDIV_SHFT 16
+#define ATMEL_HLCDC_CLKDIV_MASK GENMASK(23, 16)
+#define ATMEL_HLCDC_CLKDIV(div) ((div - 2) << ATMEL_HLCDC_CLKDIV_SHFT)
+
+#define ATMEL_HLCDC_PIXEL_CLK BIT(0)
+#define ATMEL_HLCDC_SYNC BIT(1)
+#define ATMEL_HLCDC_DISP BIT(2)
+#define ATMEL_HLCDC_PWM BIT(3)
+#define ATMEL_HLCDC_SIP BIT(4)
+
+#define ATMEL_HLCDC_SOF BIT(0)
+#define ATMEL_HLCDC_SYNCDIS BIT(1)
+#define ATMEL_HLCDC_FIFOERR BIT(4)
+#define ATMEL_HLCDC_LAYER_STATUS(x) BIT((x) + 8)
+
+/**
+ * Structure shared by the MFD device and its subdevices.
+ *
+ * @regmap: register map used to access HLCDC IP registers
+ * @periph_clk: the hlcdc peripheral clock
+ * @sys_clk: the hlcdc system clock
+ * @slow_clk: the system slow clk
+ * @irq: the hlcdc irq
+ */
+struct atmel_hlcdc {
+ struct regmap *regmap;
+ struct clk *periph_clk;
+ struct clk *sys_clk;
+ struct clk *slow_clk;
+ int irq;
+};
+
+#endif /* __LINUX_MFD_HLCDC_H */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
new file mode 100644
index 000000000..dfabd6db7
--- /dev/null
+++ b/include/linux/mfd/axp20x.h
@@ -0,0 +1,278 @@
+/*
+ * Functions and registers to access AXP20X power management chip.
+ *
+ * Copyright (C) 2013, Carlo Caione <carlo@caione.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_AXP20X_H
+#define __LINUX_MFD_AXP20X_H
+
+enum {
+ AXP202_ID = 0,
+ AXP209_ID,
+ AXP288_ID,
+ NR_AXP20X_VARIANTS,
+};
+
+#define AXP20X_DATACACHE(m) (0x04 + (m))
+
+/* Power supply */
+#define AXP20X_PWR_INPUT_STATUS 0x00
+#define AXP20X_PWR_OP_MODE 0x01
+#define AXP20X_USB_OTG_STATUS 0x02
+#define AXP20X_PWR_OUT_CTRL 0x12
+#define AXP20X_DCDC2_V_OUT 0x23
+#define AXP20X_DCDC2_LDO3_V_SCAL 0x25
+#define AXP20X_DCDC3_V_OUT 0x27
+#define AXP20X_LDO24_V_OUT 0x28
+#define AXP20X_LDO3_V_OUT 0x29
+#define AXP20X_VBUS_IPSOUT_MGMT 0x30
+#define AXP20X_V_OFF 0x31
+#define AXP20X_OFF_CTRL 0x32
+#define AXP20X_CHRG_CTRL1 0x33
+#define AXP20X_CHRG_CTRL2 0x34
+#define AXP20X_CHRG_BAK_CTRL 0x35
+#define AXP20X_PEK_KEY 0x36
+#define AXP20X_DCDC_FREQ 0x37
+#define AXP20X_V_LTF_CHRG 0x38
+#define AXP20X_V_HTF_CHRG 0x39
+#define AXP20X_APS_WARN_L1 0x3a
+#define AXP20X_APS_WARN_L2 0x3b
+#define AXP20X_V_LTF_DISCHRG 0x3c
+#define AXP20X_V_HTF_DISCHRG 0x3d
+
+/* Interrupt */
+#define AXP20X_IRQ1_EN 0x40
+#define AXP20X_IRQ2_EN 0x41
+#define AXP20X_IRQ3_EN 0x42
+#define AXP20X_IRQ4_EN 0x43
+#define AXP20X_IRQ5_EN 0x44
+#define AXP20X_IRQ6_EN 0x45
+#define AXP20X_IRQ1_STATE 0x48
+#define AXP20X_IRQ2_STATE 0x49
+#define AXP20X_IRQ3_STATE 0x4a
+#define AXP20X_IRQ4_STATE 0x4b
+#define AXP20X_IRQ5_STATE 0x4c
+#define AXP20X_IRQ6_STATE 0x4d
+
+/* ADC */
+#define AXP20X_ACIN_V_ADC_H 0x56
+#define AXP20X_ACIN_V_ADC_L 0x57
+#define AXP20X_ACIN_I_ADC_H 0x58
+#define AXP20X_ACIN_I_ADC_L 0x59
+#define AXP20X_VBUS_V_ADC_H 0x5a
+#define AXP20X_VBUS_V_ADC_L 0x5b
+#define AXP20X_VBUS_I_ADC_H 0x5c
+#define AXP20X_VBUS_I_ADC_L 0x5d
+#define AXP20X_TEMP_ADC_H 0x5e
+#define AXP20X_TEMP_ADC_L 0x5f
+#define AXP20X_TS_IN_H 0x62
+#define AXP20X_TS_IN_L 0x63
+#define AXP20X_GPIO0_V_ADC_H 0x64
+#define AXP20X_GPIO0_V_ADC_L 0x65
+#define AXP20X_GPIO1_V_ADC_H 0x66
+#define AXP20X_GPIO1_V_ADC_L 0x67
+#define AXP20X_PWR_BATT_H 0x70
+#define AXP20X_PWR_BATT_M 0x71
+#define AXP20X_PWR_BATT_L 0x72
+#define AXP20X_BATT_V_H 0x78
+#define AXP20X_BATT_V_L 0x79
+#define AXP20X_BATT_CHRG_I_H 0x7a
+#define AXP20X_BATT_CHRG_I_L 0x7b
+#define AXP20X_BATT_DISCHRG_I_H 0x7c
+#define AXP20X_BATT_DISCHRG_I_L 0x7d
+#define AXP20X_IPSOUT_V_HIGH_H 0x7e
+#define AXP20X_IPSOUT_V_HIGH_L 0x7f
+
+/* Power supply */
+#define AXP20X_DCDC_MODE 0x80
+#define AXP20X_ADC_EN1 0x82
+#define AXP20X_ADC_EN2 0x83
+#define AXP20X_ADC_RATE 0x84
+#define AXP20X_GPIO10_IN_RANGE 0x85
+#define AXP20X_GPIO1_ADC_IRQ_RIS 0x86
+#define AXP20X_GPIO1_ADC_IRQ_FAL 0x87
+#define AXP20X_TIMER_CTRL 0x8a
+#define AXP20X_VBUS_MON 0x8b
+#define AXP20X_OVER_TMP 0x8f
+
+/* GPIO */
+#define AXP20X_GPIO0_CTRL 0x90
+#define AXP20X_LDO5_V_OUT 0x91
+#define AXP20X_GPIO1_CTRL 0x92
+#define AXP20X_GPIO2_CTRL 0x93
+#define AXP20X_GPIO20_SS 0x94
+#define AXP20X_GPIO3_CTRL 0x95
+
+/* Battery */
+#define AXP20X_CHRG_CC_31_24 0xb0
+#define AXP20X_CHRG_CC_23_16 0xb1
+#define AXP20X_CHRG_CC_15_8 0xb2
+#define AXP20X_CHRG_CC_7_0 0xb3
+#define AXP20X_DISCHRG_CC_31_24 0xb4
+#define AXP20X_DISCHRG_CC_23_16 0xb5
+#define AXP20X_DISCHRG_CC_15_8 0xb6
+#define AXP20X_DISCHRG_CC_7_0 0xb7
+#define AXP20X_CC_CTRL 0xb8
+#define AXP20X_FG_RES 0xb9
+
+/* AXP288 specific registers */
+#define AXP288_PMIC_ADC_H 0x56
+#define AXP288_PMIC_ADC_L 0x57
+#define AXP288_ADC_TS_PIN_CTRL 0x84
+#define AXP288_PMIC_ADC_EN 0x84
+
+/* Fuel Gauge */
+#define AXP288_FG_RDC1_REG 0xba
+#define AXP288_FG_RDC0_REG 0xbb
+#define AXP288_FG_OCVH_REG 0xbc
+#define AXP288_FG_OCVL_REG 0xbd
+#define AXP288_FG_OCV_CURVE_REG 0xc0
+#define AXP288_FG_DES_CAP1_REG 0xe0
+#define AXP288_FG_DES_CAP0_REG 0xe1
+#define AXP288_FG_CC_MTR1_REG 0xe2
+#define AXP288_FG_CC_MTR0_REG 0xe3
+#define AXP288_FG_OCV_CAP_REG 0xe4
+#define AXP288_FG_CC_CAP_REG 0xe5
+#define AXP288_FG_LOW_CAP_REG 0xe6
+#define AXP288_FG_TUNE0 0xe8
+#define AXP288_FG_TUNE1 0xe9
+#define AXP288_FG_TUNE2 0xea
+#define AXP288_FG_TUNE3 0xeb
+#define AXP288_FG_TUNE4 0xec
+#define AXP288_FG_TUNE5 0xed
+
+/* Regulators IDs */
+enum {
+ AXP20X_LDO1 = 0,
+ AXP20X_LDO2,
+ AXP20X_LDO3,
+ AXP20X_LDO4,
+ AXP20X_LDO5,
+ AXP20X_DCDC2,
+ AXP20X_DCDC3,
+ AXP20X_REG_ID_MAX,
+};
+
+/* IRQs */
+enum {
+ AXP20X_IRQ_ACIN_OVER_V = 1,
+ AXP20X_IRQ_ACIN_PLUGIN,
+ AXP20X_IRQ_ACIN_REMOVAL,
+ AXP20X_IRQ_VBUS_OVER_V,
+ AXP20X_IRQ_VBUS_PLUGIN,
+ AXP20X_IRQ_VBUS_REMOVAL,
+ AXP20X_IRQ_VBUS_V_LOW,
+ AXP20X_IRQ_BATT_PLUGIN,
+ AXP20X_IRQ_BATT_REMOVAL,
+ AXP20X_IRQ_BATT_ENT_ACT_MODE,
+ AXP20X_IRQ_BATT_EXIT_ACT_MODE,
+ AXP20X_IRQ_CHARG,
+ AXP20X_IRQ_CHARG_DONE,
+ AXP20X_IRQ_BATT_TEMP_HIGH,
+ AXP20X_IRQ_BATT_TEMP_LOW,
+ AXP20X_IRQ_DIE_TEMP_HIGH,
+ AXP20X_IRQ_CHARG_I_LOW,
+ AXP20X_IRQ_DCDC1_V_LONG,
+ AXP20X_IRQ_DCDC2_V_LONG,
+ AXP20X_IRQ_DCDC3_V_LONG,
+ AXP20X_IRQ_PEK_SHORT = 22,
+ AXP20X_IRQ_PEK_LONG,
+ AXP20X_IRQ_N_OE_PWR_ON,
+ AXP20X_IRQ_N_OE_PWR_OFF,
+ AXP20X_IRQ_VBUS_VALID,
+ AXP20X_IRQ_VBUS_NOT_VALID,
+ AXP20X_IRQ_VBUS_SESS_VALID,
+ AXP20X_IRQ_VBUS_SESS_END,
+ AXP20X_IRQ_LOW_PWR_LVL1,
+ AXP20X_IRQ_LOW_PWR_LVL2,
+ AXP20X_IRQ_TIMER,
+ AXP20X_IRQ_PEK_RIS_EDGE,
+ AXP20X_IRQ_PEK_FAL_EDGE,
+ AXP20X_IRQ_GPIO3_INPUT,
+ AXP20X_IRQ_GPIO2_INPUT,
+ AXP20X_IRQ_GPIO1_INPUT,
+ AXP20X_IRQ_GPIO0_INPUT,
+};
+
+enum axp288_irqs {
+ AXP288_IRQ_VBUS_FALL = 2,
+ AXP288_IRQ_VBUS_RISE,
+ AXP288_IRQ_OV,
+ AXP288_IRQ_FALLING_ALT,
+ AXP288_IRQ_RISING_ALT,
+ AXP288_IRQ_OV_ALT,
+ AXP288_IRQ_DONE = 10,
+ AXP288_IRQ_CHARGING,
+ AXP288_IRQ_SAFE_QUIT,
+ AXP288_IRQ_SAFE_ENTER,
+ AXP288_IRQ_ABSENT,
+ AXP288_IRQ_APPEND,
+ AXP288_IRQ_QWBTU,
+ AXP288_IRQ_WBTU,
+ AXP288_IRQ_QWBTO,
+ AXP288_IRQ_WBTO,
+ AXP288_IRQ_QCBTU,
+ AXP288_IRQ_CBTU,
+ AXP288_IRQ_QCBTO,
+ AXP288_IRQ_CBTO,
+ AXP288_IRQ_WL2,
+ AXP288_IRQ_WL1,
+ AXP288_IRQ_GPADC,
+ AXP288_IRQ_OT = 31,
+ AXP288_IRQ_GPIO0,
+ AXP288_IRQ_GPIO1,
+ AXP288_IRQ_POKO,
+ AXP288_IRQ_POKL,
+ AXP288_IRQ_POKS,
+ AXP288_IRQ_POKN,
+ AXP288_IRQ_POKP,
+ AXP288_IRQ_TIMER,
+ AXP288_IRQ_MV_CHNG,
+ AXP288_IRQ_BC_USB_CHNG,
+};
+
+#define AXP288_TS_ADC_H 0x58
+#define AXP288_TS_ADC_L 0x59
+#define AXP288_GP_ADC_H 0x5a
+#define AXP288_GP_ADC_L 0x5b
+
+struct axp20x_dev {
+ struct device *dev;
+ struct i2c_client *i2c_client;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *regmap_irqc;
+ long variant;
+ int nr_cells;
+ struct mfd_cell *cells;
+ const struct regmap_config *regmap_cfg;
+ const struct regmap_irq_chip *regmap_irq_chip;
+};
+
+#define BATTID_LEN 64
+#define OCV_CURVE_SIZE 32
+#define MAX_THERM_CURVE_SIZE 25
+#define PD_DEF_MIN_TEMP 0
+#define PD_DEF_MAX_TEMP 55
+
+struct axp20x_fg_pdata {
+ char battid[BATTID_LEN + 1];
+ int design_cap;
+ int min_volt;
+ int max_volt;
+ int max_temp;
+ int min_temp;
+ int cap1;
+ int cap0;
+ int rdc1;
+ int rdc0;
+ int ocv_curve[OCV_CURVE_SIZE];
+ int tcsz;
+ int thermistor_curve[MAX_THERM_CURVE_SIZE][2];
+};
+
+#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mfd/bcm590xx.h b/include/linux/mfd/bcm590xx.h
new file mode 100644
index 000000000..267aedee1
--- /dev/null
+++ b/include/linux/mfd/bcm590xx.h
@@ -0,0 +1,34 @@
+/*
+ * Broadcom BCM590xx PMU
+ *
+ * Copyright 2014 Linaro Limited
+ * Author: Matt Porter <mporter@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_BCM590XX_H
+#define __LINUX_MFD_BCM590XX_H
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+/* max register address */
+#define BCM590XX_MAX_REGISTER_PRI 0xe7
+#define BCM590XX_MAX_REGISTER_SEC 0xf0
+
+struct bcm590xx {
+ struct device *dev;
+ struct i2c_client *i2c_pri;
+ struct i2c_client *i2c_sec;
+ struct regmap *regmap_pri;
+ struct regmap *regmap_sec;
+ unsigned int id;
+};
+
+#endif /* __LINUX_MFD_BCM590XX_H */
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
new file mode 100644
index 000000000..a76bc100b
--- /dev/null
+++ b/include/linux/mfd/core.h
@@ -0,0 +1,123 @@
+/*
+ * drivers/mfd/mfd-core.h
+ *
+ * core MFD support
+ * Copyright (c) 2006 Ian Molton
+ * Copyright (c) 2007 Dmitry Baryshkov
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef MFD_CORE_H
+#define MFD_CORE_H
+
+#include <linux/platform_device.h>
+
+struct irq_domain;
+
+/*
+ * This struct describes the MFD part ("cell").
+ * After registration the copy of this structure will become the platform data
+ * of the resulting platform_device
+ */
+struct mfd_cell {
+ const char *name;
+ int id;
+
+ /* refcounting for multiple drivers to use a single cell */
+ atomic_t *usage_count;
+ int (*enable)(struct platform_device *dev);
+ int (*disable)(struct platform_device *dev);
+
+ int (*suspend)(struct platform_device *dev);
+ int (*resume)(struct platform_device *dev);
+
+ /* platform data passed to the sub devices drivers */
+ void *platform_data;
+ size_t pdata_size;
+ /*
+ * Device Tree compatible string
+ * See: Documentation/devicetree/usage-model.txt Chapter 2.2 for details
+ */
+ const char *of_compatible;
+
+ /* Matches ACPI PNP id, either _HID or _CID */
+ const char *acpi_pnpid;
+
+ /*
+ * These resources can be specified relative to the parent device.
+ * For accessing hardware you should use resources from the platform dev
+ */
+ int num_resources;
+ const struct resource *resources;
+
+ /* don't check for resource conflicts */
+ bool ignore_resource_conflicts;
+
+ /*
+ * Disable runtime PM callbacks for this subdevice - see
+ * pm_runtime_no_callbacks().
+ */
+ bool pm_runtime_no_callbacks;
+
+ /* A list of regulator supplies that should be mapped to the MFD
+ * device rather than the child device when requested
+ */
+ const char * const *parent_supplies;
+ int num_parent_supplies;
+};
+
+/*
+ * Convenience functions for clients using shared cells. Refcounting
+ * happens automatically, with the cell's enable/disable callbacks
+ * being called only when a device is first being enabled or no other
+ * clients are making use of it.
+ */
+extern int mfd_cell_enable(struct platform_device *pdev);
+extern int mfd_cell_disable(struct platform_device *pdev);
+
+/*
+ * "Clone" multiple platform devices for a single cell. This is to be used
+ * for devices that have multiple users of a cell. For example, if an mfd
+ * driver wants the cell "foo" to be used by a GPIO driver, an MTD driver,
+ * and a platform driver, the following bit of code would be use after first
+ * calling mfd_add_devices():
+ *
+ * const char *fclones[] = { "foo-gpio", "foo-mtd" };
+ * err = mfd_clone_cells("foo", fclones, ARRAY_SIZE(fclones));
+ *
+ * Each driver (MTD, GPIO, and platform driver) would then register
+ * platform_drivers for "foo-mtd", "foo-gpio", and "foo", respectively.
+ * The cell's .enable/.disable hooks should be used to deal with hardware
+ * resource contention.
+ */
+extern int mfd_clone_cell(const char *cell, const char **clones,
+ size_t n_clones);
+
+/*
+ * Given a platform device that's been created by mfd_add_devices(), fetch
+ * the mfd_cell that created it.
+ */
+static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev)
+{
+ return pdev->mfd_cell;
+}
+
+extern int mfd_add_devices(struct device *parent, int id,
+ const struct mfd_cell *cells, int n_devs,
+ struct resource *mem_base,
+ int irq_base, struct irq_domain *irq_domain);
+
+static inline int mfd_add_hotplug_devices(struct device *parent,
+ const struct mfd_cell *cells, int n_devs)
+{
+ return mfd_add_devices(parent, PLATFORM_DEVID_AUTO, cells, n_devs,
+ NULL, 0, NULL);
+}
+
+extern void mfd_remove_devices(struct device *parent);
+
+#endif
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
new file mode 100644
index 000000000..324a34683
--- /dev/null
+++ b/include/linux/mfd/cros_ec.h
@@ -0,0 +1,201 @@
+/*
+ * ChromeOS EC multi-function device
+ *
+ * Copyright (C) 2012 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_MFD_CROS_EC_H
+#define __LINUX_MFD_CROS_EC_H
+
+#include <linux/cdev.h>
+#include <linux/notifier.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/mutex.h>
+
+/*
+ * Command interface between EC and AP, for LPC, I2C and SPI interfaces.
+ */
+enum {
+ EC_MSG_TX_HEADER_BYTES = 3,
+ EC_MSG_TX_TRAILER_BYTES = 1,
+ EC_MSG_TX_PROTO_BYTES = EC_MSG_TX_HEADER_BYTES +
+ EC_MSG_TX_TRAILER_BYTES,
+ EC_MSG_RX_PROTO_BYTES = 3,
+
+ /* Max length of messages */
+ EC_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE +
+ EC_MSG_TX_PROTO_BYTES,
+};
+
+/*
+ * @version: Command version number (often 0)
+ * @command: Command to send (EC_CMD_...)
+ * @outsize: Outgoing length in bytes
+ * @insize: Max number of bytes to accept from EC
+ * @result: EC's response to the command (separate from communication failure)
+ * @outdata: Outgoing data to EC
+ * @indata: Where to put the incoming data from EC
+ */
+struct cros_ec_command {
+ uint32_t version;
+ uint32_t command;
+ uint32_t outsize;
+ uint32_t insize;
+ uint32_t result;
+ uint8_t outdata[EC_PROTO2_MAX_PARAM_SIZE];
+ uint8_t indata[EC_PROTO2_MAX_PARAM_SIZE];
+};
+
+/**
+ * struct cros_ec_device - Information about a ChromeOS EC device
+ *
+ * @ec_name: name of EC device (e.g. 'chromeos-ec')
+ * @phys_name: name of physical comms layer (e.g. 'i2c-4')
+ * @dev: Device pointer for physical comms device
+ * @vdev: Device pointer for virtual comms device
+ * @cdev: Character device structure for virtual comms device
+ * @was_wake_device: true if this device was set to wake the system from
+ * sleep at the last suspend
+ * @cmd_readmem: direct read of the EC memory-mapped region, if supported
+ * @offset is within EC_LPC_ADDR_MEMMAP region.
+ * @bytes: number of bytes to read. zero means "read a string" (including
+ * the trailing '\0'). At most only EC_MEMMAP_SIZE bytes can be read.
+ * Caller must ensure that the buffer is large enough for the result when
+ * reading a string.
+ *
+ * @priv: Private data
+ * @irq: Interrupt to use
+ * @din: input buffer (for data from EC)
+ * @dout: output buffer (for data to EC)
+ * \note
+ * These two buffers will always be dword-aligned and include enough
+ * space for up to 7 word-alignment bytes also, so we can ensure that
+ * the body of the message is always dword-aligned (64-bit).
+ * We use this alignment to keep ARM and x86 happy. Probably word
+ * alignment would be OK, there might be a small performance advantage
+ * to using dword.
+ * @din_size: size of din buffer to allocate (zero to use static din)
+ * @dout_size: size of dout buffer to allocate (zero to use static dout)
+ * @parent: pointer to parent device (e.g. i2c or spi device)
+ * @wake_enabled: true if this device can wake the system from sleep
+ * @cmd_xfer: send command to EC and get response
+ * Returns the number of bytes received if the communication succeeded, but
+ * that doesn't mean the EC was happy with the command. The caller
+ * should check msg.result for the EC's result code.
+ * @lock: one transaction at a time
+ */
+struct cros_ec_device {
+
+ /* These are used by other drivers that want to talk to the EC */
+ const char *ec_name;
+ const char *phys_name;
+ struct device *dev;
+ struct device *vdev;
+ struct cdev cdev;
+ bool was_wake_device;
+ struct class *cros_class;
+ int (*cmd_readmem)(struct cros_ec_device *ec, unsigned int offset,
+ unsigned int bytes, void *dest);
+
+ /* These are used to implement the platform-specific interface */
+ void *priv;
+ int irq;
+ uint8_t *din;
+ uint8_t *dout;
+ int din_size;
+ int dout_size;
+ struct device *parent;
+ bool wake_enabled;
+ int (*cmd_xfer)(struct cros_ec_device *ec,
+ struct cros_ec_command *msg);
+ struct mutex lock;
+};
+
+/**
+ * cros_ec_suspend - Handle a suspend operation for the ChromeOS EC device
+ *
+ * This can be called by drivers to handle a suspend event.
+ *
+ * ec_dev: Device to suspend
+ * @return 0 if ok, -ve on error
+ */
+int cros_ec_suspend(struct cros_ec_device *ec_dev);
+
+/**
+ * cros_ec_resume - Handle a resume operation for the ChromeOS EC device
+ *
+ * This can be called by drivers to handle a resume event.
+ *
+ * @ec_dev: Device to resume
+ * @return 0 if ok, -ve on error
+ */
+int cros_ec_resume(struct cros_ec_device *ec_dev);
+
+/**
+ * cros_ec_prepare_tx - Prepare an outgoing message in the output buffer
+ *
+ * This is intended to be used by all ChromeOS EC drivers, but at present
+ * only SPI uses it. Once LPC uses the same protocol it can start using it.
+ * I2C could use it now, with a refactor of the existing code.
+ *
+ * @ec_dev: Device to register
+ * @msg: Message to write
+ */
+int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg);
+
+/**
+ * cros_ec_check_result - Check ec_msg->result
+ *
+ * This is used by ChromeOS EC drivers to check the ec_msg->result for
+ * errors and to warn about them.
+ *
+ * @ec_dev: EC device
+ * @msg: Message to check
+ */
+int cros_ec_check_result(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg);
+
+/**
+ * cros_ec_cmd_xfer - Send a command to the ChromeOS EC
+ *
+ * Call this to send a command to the ChromeOS EC. This should be used
+ * instead of calling the EC's cmd_xfer() callback directly.
+ *
+ * @ec_dev: EC device
+ * @msg: Message to write
+ */
+int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg);
+
+/**
+ * cros_ec_remove - Remove a ChromeOS EC
+ *
+ * Call this to deregister a ChromeOS EC, then clean up any private data.
+ *
+ * @ec_dev: Device to register
+ * @return 0 if ok, -ve on error
+ */
+int cros_ec_remove(struct cros_ec_device *ec_dev);
+
+/**
+ * cros_ec_register - Register a new ChromeOS EC, using the provided info
+ *
+ * Before calling this, allocate a pointer to a new device and then fill
+ * in all the fields up to the --private-- marker.
+ *
+ * @ec_dev: Device to register
+ * @return 0 if ok, -ve on error
+ */
+int cros_ec_register(struct cros_ec_device *ec_dev);
+
+#endif /* __LINUX_MFD_CROS_EC_H */
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
new file mode 100644
index 000000000..a49cd41fe
--- /dev/null
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -0,0 +1,2350 @@
+/*
+ * Host communication command constants for ChromeOS EC
+ *
+ * Copyright (C) 2012 Google, Inc
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * The ChromeOS EC multi function device is used to mux all the requests
+ * to the EC device for its multiple features: keyboard controller,
+ * battery charging and regulator control, firmware update.
+ *
+ * NOTE: This file is copied verbatim from the ChromeOS EC Open Source
+ * project in an attempt to make future updates easy to make.
+ */
+
+#ifndef __CROS_EC_COMMANDS_H
+#define __CROS_EC_COMMANDS_H
+
+/*
+ * Current version of this protocol
+ *
+ * TODO(crosbug.com/p/11223): This is effectively useless; protocol is
+ * determined in other ways. Remove this once the kernel code no longer
+ * depends on it.
+ */
+#define EC_PROTO_VERSION 0x00000002
+
+/* Command version mask */
+#define EC_VER_MASK(version) (1UL << (version))
+
+/* I/O addresses for ACPI commands */
+#define EC_LPC_ADDR_ACPI_DATA 0x62
+#define EC_LPC_ADDR_ACPI_CMD 0x66
+
+/* I/O addresses for host command */
+#define EC_LPC_ADDR_HOST_DATA 0x200
+#define EC_LPC_ADDR_HOST_CMD 0x204
+
+/* I/O addresses for host command args and params */
+/* Protocol version 2 */
+#define EC_LPC_ADDR_HOST_ARGS 0x800 /* And 0x801, 0x802, 0x803 */
+#define EC_LPC_ADDR_HOST_PARAM 0x804 /* For version 2 params; size is
+ * EC_PROTO2_MAX_PARAM_SIZE */
+/* Protocol version 3 */
+#define EC_LPC_ADDR_HOST_PACKET 0x800 /* Offset of version 3 packet */
+#define EC_LPC_HOST_PACKET_SIZE 0x100 /* Max size of version 3 packet */
+
+/* The actual block is 0x800-0x8ff, but some BIOSes think it's 0x880-0x8ff
+ * and they tell the kernel that so we have to think of it as two parts. */
+#define EC_HOST_CMD_REGION0 0x800
+#define EC_HOST_CMD_REGION1 0x880
+#define EC_HOST_CMD_REGION_SIZE 0x80
+
+/* EC command register bit functions */
+#define EC_LPC_CMDR_DATA (1 << 0) /* Data ready for host to read */
+#define EC_LPC_CMDR_PENDING (1 << 1) /* Write pending to EC */
+#define EC_LPC_CMDR_BUSY (1 << 2) /* EC is busy processing a command */
+#define EC_LPC_CMDR_CMD (1 << 3) /* Last host write was a command */
+#define EC_LPC_CMDR_ACPI_BRST (1 << 4) /* Burst mode (not used) */
+#define EC_LPC_CMDR_SCI (1 << 5) /* SCI event is pending */
+#define EC_LPC_CMDR_SMI (1 << 6) /* SMI event is pending */
+
+#define EC_LPC_ADDR_MEMMAP 0x900
+#define EC_MEMMAP_SIZE 255 /* ACPI IO buffer max is 255 bytes */
+#define EC_MEMMAP_TEXT_MAX 8 /* Size of a string in the memory map */
+
+/* The offset address of each type of data in mapped memory. */
+#define EC_MEMMAP_TEMP_SENSOR 0x00 /* Temp sensors 0x00 - 0x0f */
+#define EC_MEMMAP_FAN 0x10 /* Fan speeds 0x10 - 0x17 */
+#define EC_MEMMAP_TEMP_SENSOR_B 0x18 /* More temp sensors 0x18 - 0x1f */
+#define EC_MEMMAP_ID 0x20 /* 0x20 == 'E', 0x21 == 'C' */
+#define EC_MEMMAP_ID_VERSION 0x22 /* Version of data in 0x20 - 0x2f */
+#define EC_MEMMAP_THERMAL_VERSION 0x23 /* Version of data in 0x00 - 0x1f */
+#define EC_MEMMAP_BATTERY_VERSION 0x24 /* Version of data in 0x40 - 0x7f */
+#define EC_MEMMAP_SWITCHES_VERSION 0x25 /* Version of data in 0x30 - 0x33 */
+#define EC_MEMMAP_EVENTS_VERSION 0x26 /* Version of data in 0x34 - 0x3f */
+#define EC_MEMMAP_HOST_CMD_FLAGS 0x27 /* Host cmd interface flags (8 bits) */
+/* Unused 0x28 - 0x2f */
+#define EC_MEMMAP_SWITCHES 0x30 /* 8 bits */
+/* Unused 0x31 - 0x33 */
+#define EC_MEMMAP_HOST_EVENTS 0x34 /* 32 bits */
+/* Reserve 0x38 - 0x3f for additional host event-related stuff */
+/* Battery values are all 32 bits */
+#define EC_MEMMAP_BATT_VOLT 0x40 /* Battery Present Voltage */
+#define EC_MEMMAP_BATT_RATE 0x44 /* Battery Present Rate */
+#define EC_MEMMAP_BATT_CAP 0x48 /* Battery Remaining Capacity */
+#define EC_MEMMAP_BATT_FLAG 0x4c /* Battery State, defined below */
+#define EC_MEMMAP_BATT_DCAP 0x50 /* Battery Design Capacity */
+#define EC_MEMMAP_BATT_DVLT 0x54 /* Battery Design Voltage */
+#define EC_MEMMAP_BATT_LFCC 0x58 /* Battery Last Full Charge Capacity */
+#define EC_MEMMAP_BATT_CCNT 0x5c /* Battery Cycle Count */
+/* Strings are all 8 bytes (EC_MEMMAP_TEXT_MAX) */
+#define EC_MEMMAP_BATT_MFGR 0x60 /* Battery Manufacturer String */
+#define EC_MEMMAP_BATT_MODEL 0x68 /* Battery Model Number String */
+#define EC_MEMMAP_BATT_SERIAL 0x70 /* Battery Serial Number String */
+#define EC_MEMMAP_BATT_TYPE 0x78 /* Battery Type String */
+#define EC_MEMMAP_ALS 0x80 /* ALS readings in lux (2 X 16 bits) */
+/* Unused 0x84 - 0x8f */
+#define EC_MEMMAP_ACC_STATUS 0x90 /* Accelerometer status (8 bits )*/
+/* Unused 0x91 */
+#define EC_MEMMAP_ACC_DATA 0x92 /* Accelerometer data 0x92 - 0x9f */
+#define EC_MEMMAP_GYRO_DATA 0xa0 /* Gyroscope data 0xa0 - 0xa5 */
+/* Unused 0xa6 - 0xfe (remember, 0xff is NOT part of the memmap region) */
+
+
+/* Define the format of the accelerometer mapped memory status byte. */
+#define EC_MEMMAP_ACC_STATUS_SAMPLE_ID_MASK 0x0f
+#define EC_MEMMAP_ACC_STATUS_BUSY_BIT (1 << 4)
+#define EC_MEMMAP_ACC_STATUS_PRESENCE_BIT (1 << 7)
+
+/* Number of temp sensors at EC_MEMMAP_TEMP_SENSOR */
+#define EC_TEMP_SENSOR_ENTRIES 16
+/*
+ * Number of temp sensors at EC_MEMMAP_TEMP_SENSOR_B.
+ *
+ * Valid only if EC_MEMMAP_THERMAL_VERSION returns >= 2.
+ */
+#define EC_TEMP_SENSOR_B_ENTRIES 8
+
+/* Special values for mapped temperature sensors */
+#define EC_TEMP_SENSOR_NOT_PRESENT 0xff
+#define EC_TEMP_SENSOR_ERROR 0xfe
+#define EC_TEMP_SENSOR_NOT_POWERED 0xfd
+#define EC_TEMP_SENSOR_NOT_CALIBRATED 0xfc
+/*
+ * The offset of temperature value stored in mapped memory. This allows
+ * reporting a temperature range of 200K to 454K = -73C to 181C.
+ */
+#define EC_TEMP_SENSOR_OFFSET 200
+
+/*
+ * Number of ALS readings at EC_MEMMAP_ALS
+ */
+#define EC_ALS_ENTRIES 2
+
+/*
+ * The default value a temperature sensor will return when it is present but
+ * has not been read this boot. This is a reasonable number to avoid
+ * triggering alarms on the host.
+ */
+#define EC_TEMP_SENSOR_DEFAULT (296 - EC_TEMP_SENSOR_OFFSET)
+
+#define EC_FAN_SPEED_ENTRIES 4 /* Number of fans at EC_MEMMAP_FAN */
+#define EC_FAN_SPEED_NOT_PRESENT 0xffff /* Entry not present */
+#define EC_FAN_SPEED_STALLED 0xfffe /* Fan stalled */
+
+/* Battery bit flags at EC_MEMMAP_BATT_FLAG. */
+#define EC_BATT_FLAG_AC_PRESENT 0x01
+#define EC_BATT_FLAG_BATT_PRESENT 0x02
+#define EC_BATT_FLAG_DISCHARGING 0x04
+#define EC_BATT_FLAG_CHARGING 0x08
+#define EC_BATT_FLAG_LEVEL_CRITICAL 0x10
+
+/* Switch flags at EC_MEMMAP_SWITCHES */
+#define EC_SWITCH_LID_OPEN 0x01
+#define EC_SWITCH_POWER_BUTTON_PRESSED 0x02
+#define EC_SWITCH_WRITE_PROTECT_DISABLED 0x04
+/* Was recovery requested via keyboard; now unused. */
+#define EC_SWITCH_IGNORE1 0x08
+/* Recovery requested via dedicated signal (from servo board) */
+#define EC_SWITCH_DEDICATED_RECOVERY 0x10
+/* Was fake developer mode switch; now unused. Remove in next refactor. */
+#define EC_SWITCH_IGNORE0 0x20
+
+/* Host command interface flags */
+/* Host command interface supports LPC args (LPC interface only) */
+#define EC_HOST_CMD_FLAG_LPC_ARGS_SUPPORTED 0x01
+/* Host command interface supports version 3 protocol */
+#define EC_HOST_CMD_FLAG_VERSION_3 0x02
+
+/* Wireless switch flags */
+#define EC_WIRELESS_SWITCH_ALL ~0x00 /* All flags */
+#define EC_WIRELESS_SWITCH_WLAN 0x01 /* WLAN radio */
+#define EC_WIRELESS_SWITCH_BLUETOOTH 0x02 /* Bluetooth radio */
+#define EC_WIRELESS_SWITCH_WWAN 0x04 /* WWAN power */
+#define EC_WIRELESS_SWITCH_WLAN_POWER 0x08 /* WLAN power */
+
+/*
+ * This header file is used in coreboot both in C and ACPI code. The ACPI code
+ * is pre-processed to handle constants but the ASL compiler is unable to
+ * handle actual C code so keep it separate.
+ */
+#ifndef __ACPI__
+
+/*
+ * Define __packed if someone hasn't beat us to it. Linux kernel style
+ * checking prefers __packed over __attribute__((packed)).
+ */
+#ifndef __packed
+#define __packed __attribute__((packed))
+#endif
+
+/* LPC command status byte masks */
+/* EC has written a byte in the data register and host hasn't read it yet */
+#define EC_LPC_STATUS_TO_HOST 0x01
+/* Host has written a command/data byte and the EC hasn't read it yet */
+#define EC_LPC_STATUS_FROM_HOST 0x02
+/* EC is processing a command */
+#define EC_LPC_STATUS_PROCESSING 0x04
+/* Last write to EC was a command, not data */
+#define EC_LPC_STATUS_LAST_CMD 0x08
+/* EC is in burst mode. Unsupported by Chrome EC, so this bit is never set */
+#define EC_LPC_STATUS_BURST_MODE 0x10
+/* SCI event is pending (requesting SCI query) */
+#define EC_LPC_STATUS_SCI_PENDING 0x20
+/* SMI event is pending (requesting SMI query) */
+#define EC_LPC_STATUS_SMI_PENDING 0x40
+/* (reserved) */
+#define EC_LPC_STATUS_RESERVED 0x80
+
+/*
+ * EC is busy. This covers both the EC processing a command, and the host has
+ * written a new command but the EC hasn't picked it up yet.
+ */
+#define EC_LPC_STATUS_BUSY_MASK \
+ (EC_LPC_STATUS_FROM_HOST | EC_LPC_STATUS_PROCESSING)
+
+/* Host command response codes */
+enum ec_status {
+ EC_RES_SUCCESS = 0,
+ EC_RES_INVALID_COMMAND = 1,
+ EC_RES_ERROR = 2,
+ EC_RES_INVALID_PARAM = 3,
+ EC_RES_ACCESS_DENIED = 4,
+ EC_RES_INVALID_RESPONSE = 5,
+ EC_RES_INVALID_VERSION = 6,
+ EC_RES_INVALID_CHECKSUM = 7,
+ EC_RES_IN_PROGRESS = 8, /* Accepted, command in progress */
+ EC_RES_UNAVAILABLE = 9, /* No response available */
+ EC_RES_TIMEOUT = 10, /* We got a timeout */
+ EC_RES_OVERFLOW = 11, /* Table / data overflow */
+ EC_RES_INVALID_HEADER = 12, /* Header contains invalid data */
+ EC_RES_REQUEST_TRUNCATED = 13, /* Didn't get the entire request */
+ EC_RES_RESPONSE_TOO_BIG = 14 /* Response was too big to handle */
+};
+
+/*
+ * Host event codes. Note these are 1-based, not 0-based, because ACPI query
+ * EC command uses code 0 to mean "no event pending". We explicitly specify
+ * each value in the enum listing so they won't change if we delete/insert an
+ * item or rearrange the list (it needs to be stable across platforms, not
+ * just within a single compiled instance).
+ */
+enum host_event_code {
+ EC_HOST_EVENT_LID_CLOSED = 1,
+ EC_HOST_EVENT_LID_OPEN = 2,
+ EC_HOST_EVENT_POWER_BUTTON = 3,
+ EC_HOST_EVENT_AC_CONNECTED = 4,
+ EC_HOST_EVENT_AC_DISCONNECTED = 5,
+ EC_HOST_EVENT_BATTERY_LOW = 6,
+ EC_HOST_EVENT_BATTERY_CRITICAL = 7,
+ EC_HOST_EVENT_BATTERY = 8,
+ EC_HOST_EVENT_THERMAL_THRESHOLD = 9,
+ EC_HOST_EVENT_THERMAL_OVERLOAD = 10,
+ EC_HOST_EVENT_THERMAL = 11,
+ EC_HOST_EVENT_USB_CHARGER = 12,
+ EC_HOST_EVENT_KEY_PRESSED = 13,
+ /*
+ * EC has finished initializing the host interface. The host can check
+ * for this event following sending a EC_CMD_REBOOT_EC command to
+ * determine when the EC is ready to accept subsequent commands.
+ */
+ EC_HOST_EVENT_INTERFACE_READY = 14,
+ /* Keyboard recovery combo has been pressed */
+ EC_HOST_EVENT_KEYBOARD_RECOVERY = 15,
+
+ /* Shutdown due to thermal overload */
+ EC_HOST_EVENT_THERMAL_SHUTDOWN = 16,
+ /* Shutdown due to battery level too low */
+ EC_HOST_EVENT_BATTERY_SHUTDOWN = 17,
+
+ /* Suggest that the AP throttle itself */
+ EC_HOST_EVENT_THROTTLE_START = 18,
+ /* Suggest that the AP resume normal speed */
+ EC_HOST_EVENT_THROTTLE_STOP = 19,
+
+ /* Hang detect logic detected a hang and host event timeout expired */
+ EC_HOST_EVENT_HANG_DETECT = 20,
+ /* Hang detect logic detected a hang and warm rebooted the AP */
+ EC_HOST_EVENT_HANG_REBOOT = 21,
+
+ /*
+ * The high bit of the event mask is not used as a host event code. If
+ * it reads back as set, then the entire event mask should be
+ * considered invalid by the host. This can happen when reading the
+ * raw event status via EC_MEMMAP_HOST_EVENTS but the LPC interface is
+ * not initialized on the EC, or improperly configured on the host.
+ */
+ EC_HOST_EVENT_INVALID = 32
+};
+/* Host event mask */
+#define EC_HOST_EVENT_MASK(event_code) (1UL << ((event_code) - 1))
+
+/* Arguments at EC_LPC_ADDR_HOST_ARGS */
+struct ec_lpc_host_args {
+ uint8_t flags;
+ uint8_t command_version;
+ uint8_t data_size;
+ /*
+ * Checksum; sum of command + flags + command_version + data_size +
+ * all params/response data bytes.
+ */
+ uint8_t checksum;
+} __packed;
+
+/* Flags for ec_lpc_host_args.flags */
+/*
+ * Args are from host. Data area at EC_LPC_ADDR_HOST_PARAM contains command
+ * params.
+ *
+ * If EC gets a command and this flag is not set, this is an old-style command.
+ * Command version is 0 and params from host are at EC_LPC_ADDR_OLD_PARAM with
+ * unknown length. EC must respond with an old-style response (that is,
+ * withouth setting EC_HOST_ARGS_FLAG_TO_HOST).
+ */
+#define EC_HOST_ARGS_FLAG_FROM_HOST 0x01
+/*
+ * Args are from EC. Data area at EC_LPC_ADDR_HOST_PARAM contains response.
+ *
+ * If EC responds to a command and this flag is not set, this is an old-style
+ * response. Command version is 0 and response data from EC is at
+ * EC_LPC_ADDR_OLD_PARAM with unknown length.
+ */
+#define EC_HOST_ARGS_FLAG_TO_HOST 0x02
+
+/*****************************************************************************/
+/*
+ * Byte codes returned by EC over SPI interface.
+ *
+ * These can be used by the AP to debug the EC interface, and to determine
+ * when the EC is not in a state where it will ever get around to responding
+ * to the AP.
+ *
+ * Example of sequence of bytes read from EC for a current good transfer:
+ * 1. - - AP asserts chip select (CS#)
+ * 2. EC_SPI_OLD_READY - AP sends first byte(s) of request
+ * 3. - - EC starts handling CS# interrupt
+ * 4. EC_SPI_RECEIVING - AP sends remaining byte(s) of request
+ * 5. EC_SPI_PROCESSING - EC starts processing request; AP is clocking in
+ * bytes looking for EC_SPI_FRAME_START
+ * 6. - - EC finishes processing and sets up response
+ * 7. EC_SPI_FRAME_START - AP reads frame byte
+ * 8. (response packet) - AP reads response packet
+ * 9. EC_SPI_PAST_END - Any additional bytes read by AP
+ * 10 - - AP deasserts chip select
+ * 11 - - EC processes CS# interrupt and sets up DMA for
+ * next request
+ *
+ * If the AP is waiting for EC_SPI_FRAME_START and sees any value other than
+ * the following byte values:
+ * EC_SPI_OLD_READY
+ * EC_SPI_RX_READY
+ * EC_SPI_RECEIVING
+ * EC_SPI_PROCESSING
+ *
+ * Then the EC found an error in the request, or was not ready for the request
+ * and lost data. The AP should give up waiting for EC_SPI_FRAME_START,
+ * because the EC is unable to tell when the AP is done sending its request.
+ */
+
+/*
+ * Framing byte which precedes a response packet from the EC. After sending a
+ * request, the AP will clock in bytes until it sees the framing byte, then
+ * clock in the response packet.
+ */
+#define EC_SPI_FRAME_START 0xec
+
+/*
+ * Padding bytes which are clocked out after the end of a response packet.
+ */
+#define EC_SPI_PAST_END 0xed
+
+/*
+ * EC is ready to receive, and has ignored the byte sent by the AP. EC expects
+ * that the AP will send a valid packet header (starting with
+ * EC_COMMAND_PROTOCOL_3) in the next 32 bytes.
+ */
+#define EC_SPI_RX_READY 0xf8
+
+/*
+ * EC has started receiving the request from the AP, but hasn't started
+ * processing it yet.
+ */
+#define EC_SPI_RECEIVING 0xf9
+
+/* EC has received the entire request from the AP and is processing it. */
+#define EC_SPI_PROCESSING 0xfa
+
+/*
+ * EC received bad data from the AP, such as a packet header with an invalid
+ * length. EC will ignore all data until chip select deasserts.
+ */
+#define EC_SPI_RX_BAD_DATA 0xfb
+
+/*
+ * EC received data from the AP before it was ready. That is, the AP asserted
+ * chip select and started clocking data before the EC was ready to receive it.
+ * EC will ignore all data until chip select deasserts.
+ */
+#define EC_SPI_NOT_READY 0xfc
+
+/*
+ * EC was ready to receive a request from the AP. EC has treated the byte sent
+ * by the AP as part of a request packet, or (for old-style ECs) is processing
+ * a fully received packet but is not ready to respond yet.
+ */
+#define EC_SPI_OLD_READY 0xfd
+
+/*****************************************************************************/
+
+/*
+ * Protocol version 2 for I2C and SPI send a request this way:
+ *
+ * 0 EC_CMD_VERSION0 + (command version)
+ * 1 Command number
+ * 2 Length of params = N
+ * 3..N+2 Params, if any
+ * N+3 8-bit checksum of bytes 0..N+2
+ *
+ * The corresponding response is:
+ *
+ * 0 Result code (EC_RES_*)
+ * 1 Length of params = M
+ * 2..M+1 Params, if any
+ * M+2 8-bit checksum of bytes 0..M+1
+ */
+#define EC_PROTO2_REQUEST_HEADER_BYTES 3
+#define EC_PROTO2_REQUEST_TRAILER_BYTES 1
+#define EC_PROTO2_REQUEST_OVERHEAD (EC_PROTO2_REQUEST_HEADER_BYTES + \
+ EC_PROTO2_REQUEST_TRAILER_BYTES)
+
+#define EC_PROTO2_RESPONSE_HEADER_BYTES 2
+#define EC_PROTO2_RESPONSE_TRAILER_BYTES 1
+#define EC_PROTO2_RESPONSE_OVERHEAD (EC_PROTO2_RESPONSE_HEADER_BYTES + \
+ EC_PROTO2_RESPONSE_TRAILER_BYTES)
+
+/* Parameter length was limited by the LPC interface */
+#define EC_PROTO2_MAX_PARAM_SIZE 0xfc
+
+/* Maximum request and response packet sizes for protocol version 2 */
+#define EC_PROTO2_MAX_REQUEST_SIZE (EC_PROTO2_REQUEST_OVERHEAD + \
+ EC_PROTO2_MAX_PARAM_SIZE)
+#define EC_PROTO2_MAX_RESPONSE_SIZE (EC_PROTO2_RESPONSE_OVERHEAD + \
+ EC_PROTO2_MAX_PARAM_SIZE)
+
+/*****************************************************************************/
+
+/*
+ * Value written to legacy command port / prefix byte to indicate protocol
+ * 3+ structs are being used. Usage is bus-dependent.
+ */
+#define EC_COMMAND_PROTOCOL_3 0xda
+
+#define EC_HOST_REQUEST_VERSION 3
+
+/* Version 3 request from host */
+struct ec_host_request {
+ /* Struct version (=3)
+ *
+ * EC will return EC_RES_INVALID_HEADER if it receives a header with a
+ * version it doesn't know how to parse.
+ */
+ uint8_t struct_version;
+
+ /*
+ * Checksum of request and data; sum of all bytes including checksum
+ * should total to 0.
+ */
+ uint8_t checksum;
+
+ /* Command code */
+ uint16_t command;
+
+ /* Command version */
+ uint8_t command_version;
+
+ /* Unused byte in current protocol version; set to 0 */
+ uint8_t reserved;
+
+ /* Length of data which follows this header */
+ uint16_t data_len;
+} __packed;
+
+#define EC_HOST_RESPONSE_VERSION 3
+
+/* Version 3 response from EC */
+struct ec_host_response {
+ /* Struct version (=3) */
+ uint8_t struct_version;
+
+ /*
+ * Checksum of response and data; sum of all bytes including checksum
+ * should total to 0.
+ */
+ uint8_t checksum;
+
+ /* Result code (EC_RES_*) */
+ uint16_t result;
+
+ /* Length of data which follows this header */
+ uint16_t data_len;
+
+ /* Unused bytes in current protocol version; set to 0 */
+ uint16_t reserved;
+} __packed;
+
+/*****************************************************************************/
+/*
+ * Notes on commands:
+ *
+ * Each command is an 8-byte command value. Commands which take params or
+ * return response data specify structs for that data. If no struct is
+ * specified, the command does not input or output data, respectively.
+ * Parameter/response length is implicit in the structs. Some underlying
+ * communication protocols (I2C, SPI) may add length or checksum headers, but
+ * those are implementation-dependent and not defined here.
+ */
+
+/*****************************************************************************/
+/* General / test commands */
+
+/*
+ * Get protocol version, used to deal with non-backward compatible protocol
+ * changes.
+ */
+#define EC_CMD_PROTO_VERSION 0x00
+
+struct ec_response_proto_version {
+ uint32_t version;
+} __packed;
+
+/*
+ * Hello. This is a simple command to test the EC is responsive to
+ * commands.
+ */
+#define EC_CMD_HELLO 0x01
+
+struct ec_params_hello {
+ uint32_t in_data; /* Pass anything here */
+} __packed;
+
+struct ec_response_hello {
+ uint32_t out_data; /* Output will be in_data + 0x01020304 */
+} __packed;
+
+/* Get version number */
+#define EC_CMD_GET_VERSION 0x02
+
+enum ec_current_image {
+ EC_IMAGE_UNKNOWN = 0,
+ EC_IMAGE_RO,
+ EC_IMAGE_RW
+};
+
+struct ec_response_get_version {
+ /* Null-terminated version strings for RO, RW */
+ char version_string_ro[32];
+ char version_string_rw[32];
+ char reserved[32]; /* Was previously RW-B string */
+ uint32_t current_image; /* One of ec_current_image */
+} __packed;
+
+/* Read test */
+#define EC_CMD_READ_TEST 0x03
+
+struct ec_params_read_test {
+ uint32_t offset; /* Starting value for read buffer */
+ uint32_t size; /* Size to read in bytes */
+} __packed;
+
+struct ec_response_read_test {
+ uint32_t data[32];
+} __packed;
+
+/*
+ * Get build information
+ *
+ * Response is null-terminated string.
+ */
+#define EC_CMD_GET_BUILD_INFO 0x04
+
+/* Get chip info */
+#define EC_CMD_GET_CHIP_INFO 0x05
+
+struct ec_response_get_chip_info {
+ /* Null-terminated strings */
+ char vendor[32];
+ char name[32];
+ char revision[32]; /* Mask version */
+} __packed;
+
+/* Get board HW version */
+#define EC_CMD_GET_BOARD_VERSION 0x06
+
+struct ec_response_board_version {
+ uint16_t board_version; /* A monotonously incrementing number. */
+} __packed;
+
+/*
+ * Read memory-mapped data.
+ *
+ * This is an alternate interface to memory-mapped data for bus protocols
+ * which don't support direct-mapped memory - I2C, SPI, etc.
+ *
+ * Response is params.size bytes of data.
+ */
+#define EC_CMD_READ_MEMMAP 0x07
+
+struct ec_params_read_memmap {
+ uint8_t offset; /* Offset in memmap (EC_MEMMAP_*) */
+ uint8_t size; /* Size to read in bytes */
+} __packed;
+
+/* Read versions supported for a command */
+#define EC_CMD_GET_CMD_VERSIONS 0x08
+
+struct ec_params_get_cmd_versions {
+ uint8_t cmd; /* Command to check */
+} __packed;
+
+struct ec_response_get_cmd_versions {
+ /*
+ * Mask of supported versions; use EC_VER_MASK() to compare with a
+ * desired version.
+ */
+ uint32_t version_mask;
+} __packed;
+
+/*
+ * Check EC communcations status (busy). This is needed on i2c/spi but not
+ * on lpc since it has its own out-of-band busy indicator.
+ *
+ * lpc must read the status from the command register. Attempting this on
+ * lpc will overwrite the args/parameter space and corrupt its data.
+ */
+#define EC_CMD_GET_COMMS_STATUS 0x09
+
+/* Avoid using ec_status which is for return values */
+enum ec_comms_status {
+ EC_COMMS_STATUS_PROCESSING = 1 << 0, /* Processing cmd */
+};
+
+struct ec_response_get_comms_status {
+ uint32_t flags; /* Mask of enum ec_comms_status */
+} __packed;
+
+/* Fake a variety of responses, purely for testing purposes. */
+#define EC_CMD_TEST_PROTOCOL 0x0a
+
+/* Tell the EC what to send back to us. */
+struct ec_params_test_protocol {
+ uint32_t ec_result;
+ uint32_t ret_len;
+ uint8_t buf[32];
+} __packed;
+
+/* Here it comes... */
+struct ec_response_test_protocol {
+ uint8_t buf[32];
+} __packed;
+
+/* Get prococol information */
+#define EC_CMD_GET_PROTOCOL_INFO 0x0b
+
+/* Flags for ec_response_get_protocol_info.flags */
+/* EC_RES_IN_PROGRESS may be returned if a command is slow */
+#define EC_PROTOCOL_INFO_IN_PROGRESS_SUPPORTED (1 << 0)
+
+struct ec_response_get_protocol_info {
+ /* Fields which exist if at least protocol version 3 supported */
+
+ /* Bitmask of protocol versions supported (1 << n means version n)*/
+ uint32_t protocol_versions;
+
+ /* Maximum request packet size, in bytes */
+ uint16_t max_request_packet_size;
+
+ /* Maximum response packet size, in bytes */
+ uint16_t max_response_packet_size;
+
+ /* Flags; see EC_PROTOCOL_INFO_* */
+ uint32_t flags;
+} __packed;
+
+
+/*****************************************************************************/
+/* Get/Set miscellaneous values */
+
+/* The upper byte of .flags tells what to do (nothing means "get") */
+#define EC_GSV_SET 0x80000000
+
+/* The lower three bytes of .flags identifies the parameter, if that has
+ meaning for an individual command. */
+#define EC_GSV_PARAM_MASK 0x00ffffff
+
+struct ec_params_get_set_value {
+ uint32_t flags;
+ uint32_t value;
+} __packed;
+
+struct ec_response_get_set_value {
+ uint32_t flags;
+ uint32_t value;
+} __packed;
+
+/* More than one command can use these structs to get/set paramters. */
+#define EC_CMD_GSV_PAUSE_IN_S5 0x0c
+
+
+/*****************************************************************************/
+/* Flash commands */
+
+/* Get flash info */
+#define EC_CMD_FLASH_INFO 0x10
+
+/* Version 0 returns these fields */
+struct ec_response_flash_info {
+ /* Usable flash size, in bytes */
+ uint32_t flash_size;
+ /*
+ * Write block size. Write offset and size must be a multiple
+ * of this.
+ */
+ uint32_t write_block_size;
+ /*
+ * Erase block size. Erase offset and size must be a multiple
+ * of this.
+ */
+ uint32_t erase_block_size;
+ /*
+ * Protection block size. Protection offset and size must be a
+ * multiple of this.
+ */
+ uint32_t protect_block_size;
+} __packed;
+
+/* Flags for version 1+ flash info command */
+/* EC flash erases bits to 0 instead of 1 */
+#define EC_FLASH_INFO_ERASE_TO_0 (1 << 0)
+
+/*
+ * Version 1 returns the same initial fields as version 0, with additional
+ * fields following.
+ *
+ * gcc anonymous structs don't seem to get along with the __packed directive;
+ * if they did we'd define the version 0 struct as a sub-struct of this one.
+ */
+struct ec_response_flash_info_1 {
+ /* Version 0 fields; see above for description */
+ uint32_t flash_size;
+ uint32_t write_block_size;
+ uint32_t erase_block_size;
+ uint32_t protect_block_size;
+
+ /* Version 1 adds these fields: */
+ /*
+ * Ideal write size in bytes. Writes will be fastest if size is
+ * exactly this and offset is a multiple of this. For example, an EC
+ * may have a write buffer which can do half-page operations if data is
+ * aligned, and a slower word-at-a-time write mode.
+ */
+ uint32_t write_ideal_size;
+
+ /* Flags; see EC_FLASH_INFO_* */
+ uint32_t flags;
+} __packed;
+
+/*
+ * Read flash
+ *
+ * Response is params.size bytes of data.
+ */
+#define EC_CMD_FLASH_READ 0x11
+
+struct ec_params_flash_read {
+ uint32_t offset; /* Byte offset to read */
+ uint32_t size; /* Size to read in bytes */
+} __packed;
+
+/* Write flash */
+#define EC_CMD_FLASH_WRITE 0x12
+#define EC_VER_FLASH_WRITE 1
+
+/* Version 0 of the flash command supported only 64 bytes of data */
+#define EC_FLASH_WRITE_VER0_SIZE 64
+
+struct ec_params_flash_write {
+ uint32_t offset; /* Byte offset to write */
+ uint32_t size; /* Size to write in bytes */
+ /* Followed by data to write */
+} __packed;
+
+/* Erase flash */
+#define EC_CMD_FLASH_ERASE 0x13
+
+struct ec_params_flash_erase {
+ uint32_t offset; /* Byte offset to erase */
+ uint32_t size; /* Size to erase in bytes */
+} __packed;
+
+/*
+ * Get/set flash protection.
+ *
+ * If mask!=0, sets/clear the requested bits of flags. Depending on the
+ * firmware write protect GPIO, not all flags will take effect immediately;
+ * some flags require a subsequent hard reset to take effect. Check the
+ * returned flags bits to see what actually happened.
+ *
+ * If mask=0, simply returns the current flags state.
+ */
+#define EC_CMD_FLASH_PROTECT 0x15
+#define EC_VER_FLASH_PROTECT 1 /* Command version 1 */
+
+/* Flags for flash protection */
+/* RO flash code protected when the EC boots */
+#define EC_FLASH_PROTECT_RO_AT_BOOT (1 << 0)
+/*
+ * RO flash code protected now. If this bit is set, at-boot status cannot
+ * be changed.
+ */
+#define EC_FLASH_PROTECT_RO_NOW (1 << 1)
+/* Entire flash code protected now, until reboot. */
+#define EC_FLASH_PROTECT_ALL_NOW (1 << 2)
+/* Flash write protect GPIO is asserted now */
+#define EC_FLASH_PROTECT_GPIO_ASSERTED (1 << 3)
+/* Error - at least one bank of flash is stuck locked, and cannot be unlocked */
+#define EC_FLASH_PROTECT_ERROR_STUCK (1 << 4)
+/*
+ * Error - flash protection is in inconsistent state. At least one bank of
+ * flash which should be protected is not protected. Usually fixed by
+ * re-requesting the desired flags, or by a hard reset if that fails.
+ */
+#define EC_FLASH_PROTECT_ERROR_INCONSISTENT (1 << 5)
+/* Entile flash code protected when the EC boots */
+#define EC_FLASH_PROTECT_ALL_AT_BOOT (1 << 6)
+
+struct ec_params_flash_protect {
+ uint32_t mask; /* Bits in flags to apply */
+ uint32_t flags; /* New flags to apply */
+} __packed;
+
+struct ec_response_flash_protect {
+ /* Current value of flash protect flags */
+ uint32_t flags;
+ /*
+ * Flags which are valid on this platform. This allows the caller
+ * to distinguish between flags which aren't set vs. flags which can't
+ * be set on this platform.
+ */
+ uint32_t valid_flags;
+ /* Flags which can be changed given the current protection state */
+ uint32_t writable_flags;
+} __packed;
+
+/*
+ * Note: commands 0x14 - 0x19 version 0 were old commands to get/set flash
+ * write protect. These commands may be reused with version > 0.
+ */
+
+/* Get the region offset/size */
+#define EC_CMD_FLASH_REGION_INFO 0x16
+#define EC_VER_FLASH_REGION_INFO 1
+
+enum ec_flash_region {
+ /* Region which holds read-only EC image */
+ EC_FLASH_REGION_RO = 0,
+ /* Region which holds rewritable EC image */
+ EC_FLASH_REGION_RW,
+ /*
+ * Region which should be write-protected in the factory (a superset of
+ * EC_FLASH_REGION_RO)
+ */
+ EC_FLASH_REGION_WP_RO,
+ /* Number of regions */
+ EC_FLASH_REGION_COUNT,
+};
+
+struct ec_params_flash_region_info {
+ uint32_t region; /* enum ec_flash_region */
+} __packed;
+
+struct ec_response_flash_region_info {
+ uint32_t offset;
+ uint32_t size;
+} __packed;
+
+/* Read/write VbNvContext */
+#define EC_CMD_VBNV_CONTEXT 0x17
+#define EC_VER_VBNV_CONTEXT 1
+#define EC_VBNV_BLOCK_SIZE 16
+
+enum ec_vbnvcontext_op {
+ EC_VBNV_CONTEXT_OP_READ,
+ EC_VBNV_CONTEXT_OP_WRITE,
+};
+
+struct ec_params_vbnvcontext {
+ uint32_t op;
+ uint8_t block[EC_VBNV_BLOCK_SIZE];
+} __packed;
+
+struct ec_response_vbnvcontext {
+ uint8_t block[EC_VBNV_BLOCK_SIZE];
+} __packed;
+
+/*****************************************************************************/
+/* PWM commands */
+
+/* Get fan target RPM */
+#define EC_CMD_PWM_GET_FAN_TARGET_RPM 0x20
+
+struct ec_response_pwm_get_fan_rpm {
+ uint32_t rpm;
+} __packed;
+
+/* Set target fan RPM */
+#define EC_CMD_PWM_SET_FAN_TARGET_RPM 0x21
+
+struct ec_params_pwm_set_fan_target_rpm {
+ uint32_t rpm;
+} __packed;
+
+/* Get keyboard backlight */
+#define EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT 0x22
+
+struct ec_response_pwm_get_keyboard_backlight {
+ uint8_t percent;
+ uint8_t enabled;
+} __packed;
+
+/* Set keyboard backlight */
+#define EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT 0x23
+
+struct ec_params_pwm_set_keyboard_backlight {
+ uint8_t percent;
+} __packed;
+
+/* Set target fan PWM duty cycle */
+#define EC_CMD_PWM_SET_FAN_DUTY 0x24
+
+struct ec_params_pwm_set_fan_duty {
+ uint32_t percent;
+} __packed;
+
+/*****************************************************************************/
+/*
+ * Lightbar commands. This looks worse than it is. Since we only use one HOST
+ * command to say "talk to the lightbar", we put the "and tell it to do X" part
+ * into a subcommand. We'll make separate structs for subcommands with
+ * different input args, so that we know how much to expect.
+ */
+#define EC_CMD_LIGHTBAR_CMD 0x28
+
+struct rgb_s {
+ uint8_t r, g, b;
+};
+
+#define LB_BATTERY_LEVELS 4
+/* List of tweakable parameters. NOTE: It's __packed so it can be sent in a
+ * host command, but the alignment is the same regardless. Keep it that way.
+ */
+struct lightbar_params {
+ /* Timing */
+ int32_t google_ramp_up;
+ int32_t google_ramp_down;
+ int32_t s3s0_ramp_up;
+ int32_t s0_tick_delay[2]; /* AC=0/1 */
+ int32_t s0a_tick_delay[2]; /* AC=0/1 */
+ int32_t s0s3_ramp_down;
+ int32_t s3_sleep_for;
+ int32_t s3_ramp_up;
+ int32_t s3_ramp_down;
+
+ /* Oscillation */
+ uint8_t new_s0;
+ uint8_t osc_min[2]; /* AC=0/1 */
+ uint8_t osc_max[2]; /* AC=0/1 */
+ uint8_t w_ofs[2]; /* AC=0/1 */
+
+ /* Brightness limits based on the backlight and AC. */
+ uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */
+ uint8_t bright_bl_on_min[2]; /* AC=0/1 */
+ uint8_t bright_bl_on_max[2]; /* AC=0/1 */
+
+ /* Battery level thresholds */
+ uint8_t battery_threshold[LB_BATTERY_LEVELS - 1];
+
+ /* Map [AC][battery_level] to color index */
+ uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */
+ uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */
+
+ /* Color palette */
+ struct rgb_s color[8]; /* 0-3 are Google colors */
+} __packed;
+
+struct ec_params_lightbar {
+ uint8_t cmd; /* Command (see enum lightbar_command) */
+ union {
+ struct {
+ /* no args */
+ } dump, off, on, init, get_seq, get_params, version;
+
+ struct num {
+ uint8_t num;
+ } brightness, seq, demo;
+
+ struct reg {
+ uint8_t ctrl, reg, value;
+ } reg;
+
+ struct rgb {
+ uint8_t led, red, green, blue;
+ } rgb;
+
+ struct lightbar_params set_params;
+ };
+} __packed;
+
+struct ec_response_lightbar {
+ union {
+ struct dump {
+ struct {
+ uint8_t reg;
+ uint8_t ic0;
+ uint8_t ic1;
+ } vals[23];
+ } dump;
+
+ struct get_seq {
+ uint8_t num;
+ } get_seq;
+
+ struct lightbar_params get_params;
+
+ struct version {
+ uint32_t num;
+ uint32_t flags;
+ } version;
+
+ struct {
+ /* no return params */
+ } off, on, init, brightness, seq, reg, rgb, demo, set_params;
+ };
+} __packed;
+
+/* Lightbar commands */
+enum lightbar_command {
+ LIGHTBAR_CMD_DUMP = 0,
+ LIGHTBAR_CMD_OFF = 1,
+ LIGHTBAR_CMD_ON = 2,
+ LIGHTBAR_CMD_INIT = 3,
+ LIGHTBAR_CMD_BRIGHTNESS = 4,
+ LIGHTBAR_CMD_SEQ = 5,
+ LIGHTBAR_CMD_REG = 6,
+ LIGHTBAR_CMD_RGB = 7,
+ LIGHTBAR_CMD_GET_SEQ = 8,
+ LIGHTBAR_CMD_DEMO = 9,
+ LIGHTBAR_CMD_GET_PARAMS = 10,
+ LIGHTBAR_CMD_SET_PARAMS = 11,
+ LIGHTBAR_CMD_VERSION = 12,
+ LIGHTBAR_NUM_CMDS
+};
+
+/*****************************************************************************/
+/* LED control commands */
+
+#define EC_CMD_LED_CONTROL 0x29
+
+enum ec_led_id {
+ /* LED to indicate battery state of charge */
+ EC_LED_ID_BATTERY_LED = 0,
+ /*
+ * LED to indicate system power state (on or in suspend).
+ * May be on power button or on C-panel.
+ */
+ EC_LED_ID_POWER_LED,
+ /* LED on power adapter or its plug */
+ EC_LED_ID_ADAPTER_LED,
+
+ EC_LED_ID_COUNT
+};
+
+/* LED control flags */
+#define EC_LED_FLAGS_QUERY (1 << 0) /* Query LED capability only */
+#define EC_LED_FLAGS_AUTO (1 << 1) /* Switch LED back to automatic control */
+
+enum ec_led_colors {
+ EC_LED_COLOR_RED = 0,
+ EC_LED_COLOR_GREEN,
+ EC_LED_COLOR_BLUE,
+ EC_LED_COLOR_YELLOW,
+ EC_LED_COLOR_WHITE,
+
+ EC_LED_COLOR_COUNT
+};
+
+struct ec_params_led_control {
+ uint8_t led_id; /* Which LED to control */
+ uint8_t flags; /* Control flags */
+
+ uint8_t brightness[EC_LED_COLOR_COUNT];
+} __packed;
+
+struct ec_response_led_control {
+ /*
+ * Available brightness value range.
+ *
+ * Range 0 means color channel not present.
+ * Range 1 means on/off control.
+ * Other values means the LED is control by PWM.
+ */
+ uint8_t brightness_range[EC_LED_COLOR_COUNT];
+} __packed;
+
+/*****************************************************************************/
+/* Verified boot commands */
+
+/*
+ * Note: command code 0x29 version 0 was VBOOT_CMD in Link EVT; it may be
+ * reused for other purposes with version > 0.
+ */
+
+/* Verified boot hash command */
+#define EC_CMD_VBOOT_HASH 0x2A
+
+struct ec_params_vboot_hash {
+ uint8_t cmd; /* enum ec_vboot_hash_cmd */
+ uint8_t hash_type; /* enum ec_vboot_hash_type */
+ uint8_t nonce_size; /* Nonce size; may be 0 */
+ uint8_t reserved0; /* Reserved; set 0 */
+ uint32_t offset; /* Offset in flash to hash */
+ uint32_t size; /* Number of bytes to hash */
+ uint8_t nonce_data[64]; /* Nonce data; ignored if nonce_size=0 */
+} __packed;
+
+struct ec_response_vboot_hash {
+ uint8_t status; /* enum ec_vboot_hash_status */
+ uint8_t hash_type; /* enum ec_vboot_hash_type */
+ uint8_t digest_size; /* Size of hash digest in bytes */
+ uint8_t reserved0; /* Ignore; will be 0 */
+ uint32_t offset; /* Offset in flash which was hashed */
+ uint32_t size; /* Number of bytes hashed */
+ uint8_t hash_digest[64]; /* Hash digest data */
+} __packed;
+
+enum ec_vboot_hash_cmd {
+ EC_VBOOT_HASH_GET = 0, /* Get current hash status */
+ EC_VBOOT_HASH_ABORT = 1, /* Abort calculating current hash */
+ EC_VBOOT_HASH_START = 2, /* Start computing a new hash */
+ EC_VBOOT_HASH_RECALC = 3, /* Synchronously compute a new hash */
+};
+
+enum ec_vboot_hash_type {
+ EC_VBOOT_HASH_TYPE_SHA256 = 0, /* SHA-256 */
+};
+
+enum ec_vboot_hash_status {
+ EC_VBOOT_HASH_STATUS_NONE = 0, /* No hash (not started, or aborted) */
+ EC_VBOOT_HASH_STATUS_DONE = 1, /* Finished computing a hash */
+ EC_VBOOT_HASH_STATUS_BUSY = 2, /* Busy computing a hash */
+};
+
+/*
+ * Special values for offset for EC_VBOOT_HASH_START and EC_VBOOT_HASH_RECALC.
+ * If one of these is specified, the EC will automatically update offset and
+ * size to the correct values for the specified image (RO or RW).
+ */
+#define EC_VBOOT_HASH_OFFSET_RO 0xfffffffe
+#define EC_VBOOT_HASH_OFFSET_RW 0xfffffffd
+
+/*****************************************************************************/
+/*
+ * Motion sense commands. We'll make separate structs for sub-commands with
+ * different input args, so that we know how much to expect.
+ */
+#define EC_CMD_MOTION_SENSE_CMD 0x2B
+
+/* Motion sense commands */
+enum motionsense_command {
+ /*
+ * Dump command returns all motion sensor data including motion sense
+ * module flags and individual sensor flags.
+ */
+ MOTIONSENSE_CMD_DUMP = 0,
+
+ /*
+ * Info command returns data describing the details of a given sensor,
+ * including enum motionsensor_type, enum motionsensor_location, and
+ * enum motionsensor_chip.
+ */
+ MOTIONSENSE_CMD_INFO = 1,
+
+ /*
+ * EC Rate command is a setter/getter command for the EC sampling rate
+ * of all motion sensors in milliseconds.
+ */
+ MOTIONSENSE_CMD_EC_RATE = 2,
+
+ /*
+ * Sensor ODR command is a setter/getter command for the output data
+ * rate of a specific motion sensor in millihertz.
+ */
+ MOTIONSENSE_CMD_SENSOR_ODR = 3,
+
+ /*
+ * Sensor range command is a setter/getter command for the range of
+ * a specified motion sensor in +/-G's or +/- deg/s.
+ */
+ MOTIONSENSE_CMD_SENSOR_RANGE = 4,
+
+ /*
+ * Setter/getter command for the keyboard wake angle. When the lid
+ * angle is greater than this value, keyboard wake is disabled in S3,
+ * and when the lid angle goes less than this value, keyboard wake is
+ * enabled. Note, the lid angle measurement is an approximate,
+ * un-calibrated value, hence the wake angle isn't exact.
+ */
+ MOTIONSENSE_CMD_KB_WAKE_ANGLE = 5,
+
+ /* Number of motionsense sub-commands. */
+ MOTIONSENSE_NUM_CMDS
+};
+
+enum motionsensor_id {
+ EC_MOTION_SENSOR_ACCEL_BASE = 0,
+ EC_MOTION_SENSOR_ACCEL_LID = 1,
+ EC_MOTION_SENSOR_GYRO = 2,
+
+ /*
+ * Note, if more sensors are added and this count changes, the padding
+ * in ec_response_motion_sense dump command must be modified.
+ */
+ EC_MOTION_SENSOR_COUNT = 3
+};
+
+/* List of motion sensor types. */
+enum motionsensor_type {
+ MOTIONSENSE_TYPE_ACCEL = 0,
+ MOTIONSENSE_TYPE_GYRO = 1,
+};
+
+/* List of motion sensor locations. */
+enum motionsensor_location {
+ MOTIONSENSE_LOC_BASE = 0,
+ MOTIONSENSE_LOC_LID = 1,
+};
+
+/* List of motion sensor chips. */
+enum motionsensor_chip {
+ MOTIONSENSE_CHIP_KXCJ9 = 0,
+};
+
+/* Module flag masks used for the dump sub-command. */
+#define MOTIONSENSE_MODULE_FLAG_ACTIVE (1<<0)
+
+/* Sensor flag masks used for the dump sub-command. */
+#define MOTIONSENSE_SENSOR_FLAG_PRESENT (1<<0)
+
+/*
+ * Send this value for the data element to only perform a read. If you
+ * send any other value, the EC will interpret it as data to set and will
+ * return the actual value set.
+ */
+#define EC_MOTION_SENSE_NO_VALUE -1
+
+struct ec_params_motion_sense {
+ uint8_t cmd;
+ union {
+ /* Used for MOTIONSENSE_CMD_DUMP. */
+ struct {
+ /* no args */
+ } dump;
+
+ /*
+ * Used for MOTIONSENSE_CMD_EC_RATE and
+ * MOTIONSENSE_CMD_KB_WAKE_ANGLE.
+ */
+ struct {
+ /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */
+ int16_t data;
+ } ec_rate, kb_wake_angle;
+
+ /* Used for MOTIONSENSE_CMD_INFO. */
+ struct {
+ /* Should be element of enum motionsensor_id. */
+ uint8_t sensor_num;
+ } info;
+
+ /*
+ * Used for MOTIONSENSE_CMD_SENSOR_ODR and
+ * MOTIONSENSE_CMD_SENSOR_RANGE.
+ */
+ struct {
+ /* Should be element of enum motionsensor_id. */
+ uint8_t sensor_num;
+
+ /* Rounding flag, true for round-up, false for down. */
+ uint8_t roundup;
+
+ uint16_t reserved;
+
+ /* Data to set or EC_MOTION_SENSE_NO_VALUE to read. */
+ int32_t data;
+ } sensor_odr, sensor_range;
+ };
+} __packed;
+
+struct ec_response_motion_sense {
+ union {
+ /* Used for MOTIONSENSE_CMD_DUMP. */
+ struct {
+ /* Flags representing the motion sensor module. */
+ uint8_t module_flags;
+
+ /* Flags for each sensor in enum motionsensor_id. */
+ uint8_t sensor_flags[EC_MOTION_SENSOR_COUNT];
+
+ /* Array of all sensor data. Each sensor is 3-axis. */
+ int16_t data[3*EC_MOTION_SENSOR_COUNT];
+ } dump;
+
+ /* Used for MOTIONSENSE_CMD_INFO. */
+ struct {
+ /* Should be element of enum motionsensor_type. */
+ uint8_t type;
+
+ /* Should be element of enum motionsensor_location. */
+ uint8_t location;
+
+ /* Should be element of enum motionsensor_chip. */
+ uint8_t chip;
+ } info;
+
+ /*
+ * Used for MOTIONSENSE_CMD_EC_RATE, MOTIONSENSE_CMD_SENSOR_ODR,
+ * MOTIONSENSE_CMD_SENSOR_RANGE, and
+ * MOTIONSENSE_CMD_KB_WAKE_ANGLE.
+ */
+ struct {
+ /* Current value of the parameter queried. */
+ int32_t ret;
+ } ec_rate, sensor_odr, sensor_range, kb_wake_angle;
+ };
+} __packed;
+
+/*****************************************************************************/
+/* USB charging control commands */
+
+/* Set USB port charging mode */
+#define EC_CMD_USB_CHARGE_SET_MODE 0x30
+
+struct ec_params_usb_charge_set_mode {
+ uint8_t usb_port_id;
+ uint8_t mode;
+} __packed;
+
+/*****************************************************************************/
+/* Persistent storage for host */
+
+/* Maximum bytes that can be read/written in a single command */
+#define EC_PSTORE_SIZE_MAX 64
+
+/* Get persistent storage info */
+#define EC_CMD_PSTORE_INFO 0x40
+
+struct ec_response_pstore_info {
+ /* Persistent storage size, in bytes */
+ uint32_t pstore_size;
+ /* Access size; read/write offset and size must be a multiple of this */
+ uint32_t access_size;
+} __packed;
+
+/*
+ * Read persistent storage
+ *
+ * Response is params.size bytes of data.
+ */
+#define EC_CMD_PSTORE_READ 0x41
+
+struct ec_params_pstore_read {
+ uint32_t offset; /* Byte offset to read */
+ uint32_t size; /* Size to read in bytes */
+} __packed;
+
+/* Write persistent storage */
+#define EC_CMD_PSTORE_WRITE 0x42
+
+struct ec_params_pstore_write {
+ uint32_t offset; /* Byte offset to write */
+ uint32_t size; /* Size to write in bytes */
+ uint8_t data[EC_PSTORE_SIZE_MAX];
+} __packed;
+
+/*****************************************************************************/
+/* Real-time clock */
+
+/* RTC params and response structures */
+struct ec_params_rtc {
+ uint32_t time;
+} __packed;
+
+struct ec_response_rtc {
+ uint32_t time;
+} __packed;
+
+/* These use ec_response_rtc */
+#define EC_CMD_RTC_GET_VALUE 0x44
+#define EC_CMD_RTC_GET_ALARM 0x45
+
+/* These all use ec_params_rtc */
+#define EC_CMD_RTC_SET_VALUE 0x46
+#define EC_CMD_RTC_SET_ALARM 0x47
+
+/*****************************************************************************/
+/* Port80 log access */
+
+/* Get last port80 code from previous boot */
+#define EC_CMD_PORT80_LAST_BOOT 0x48
+
+struct ec_response_port80_last_boot {
+ uint16_t code;
+} __packed;
+
+/*****************************************************************************/
+/* Thermal engine commands. Note that there are two implementations. We'll
+ * reuse the command number, but the data and behavior is incompatible.
+ * Version 0 is what originally shipped on Link.
+ * Version 1 separates the CPU thermal limits from the fan control.
+ */
+
+#define EC_CMD_THERMAL_SET_THRESHOLD 0x50
+#define EC_CMD_THERMAL_GET_THRESHOLD 0x51
+
+/* The version 0 structs are opaque. You have to know what they are for
+ * the get/set commands to make any sense.
+ */
+
+/* Version 0 - set */
+struct ec_params_thermal_set_threshold {
+ uint8_t sensor_type;
+ uint8_t threshold_id;
+ uint16_t value;
+} __packed;
+
+/* Version 0 - get */
+struct ec_params_thermal_get_threshold {
+ uint8_t sensor_type;
+ uint8_t threshold_id;
+} __packed;
+
+struct ec_response_thermal_get_threshold {
+ uint16_t value;
+} __packed;
+
+
+/* The version 1 structs are visible. */
+enum ec_temp_thresholds {
+ EC_TEMP_THRESH_WARN = 0,
+ EC_TEMP_THRESH_HIGH,
+ EC_TEMP_THRESH_HALT,
+
+ EC_TEMP_THRESH_COUNT
+};
+
+/* Thermal configuration for one temperature sensor. Temps are in degrees K.
+ * Zero values will be silently ignored by the thermal task.
+ */
+struct ec_thermal_config {
+ uint32_t temp_host[EC_TEMP_THRESH_COUNT]; /* levels of hotness */
+ uint32_t temp_fan_off; /* no active cooling needed */
+ uint32_t temp_fan_max; /* max active cooling needed */
+} __packed;
+
+/* Version 1 - get config for one sensor. */
+struct ec_params_thermal_get_threshold_v1 {
+ uint32_t sensor_num;
+} __packed;
+/* This returns a struct ec_thermal_config */
+
+/* Version 1 - set config for one sensor.
+ * Use read-modify-write for best results! */
+struct ec_params_thermal_set_threshold_v1 {
+ uint32_t sensor_num;
+ struct ec_thermal_config cfg;
+} __packed;
+/* This returns no data */
+
+/****************************************************************************/
+
+/* Toggle automatic fan control */
+#define EC_CMD_THERMAL_AUTO_FAN_CTRL 0x52
+
+/* Get TMP006 calibration data */
+#define EC_CMD_TMP006_GET_CALIBRATION 0x53
+
+struct ec_params_tmp006_get_calibration {
+ uint8_t index;
+} __packed;
+
+struct ec_response_tmp006_get_calibration {
+ float s0;
+ float b0;
+ float b1;
+ float b2;
+} __packed;
+
+/* Set TMP006 calibration data */
+#define EC_CMD_TMP006_SET_CALIBRATION 0x54
+
+struct ec_params_tmp006_set_calibration {
+ uint8_t index;
+ uint8_t reserved[3]; /* Reserved; set 0 */
+ float s0;
+ float b0;
+ float b1;
+ float b2;
+} __packed;
+
+/* Read raw TMP006 data */
+#define EC_CMD_TMP006_GET_RAW 0x55
+
+struct ec_params_tmp006_get_raw {
+ uint8_t index;
+} __packed;
+
+struct ec_response_tmp006_get_raw {
+ int32_t t; /* In 1/100 K */
+ int32_t v; /* In nV */
+};
+
+/*****************************************************************************/
+/* MKBP - Matrix KeyBoard Protocol */
+
+/*
+ * Read key state
+ *
+ * Returns raw data for keyboard cols; see ec_response_mkbp_info.cols for
+ * expected response size.
+ */
+#define EC_CMD_MKBP_STATE 0x60
+
+/* Provide information about the matrix : number of rows and columns */
+#define EC_CMD_MKBP_INFO 0x61
+
+struct ec_response_mkbp_info {
+ uint32_t rows;
+ uint32_t cols;
+ uint8_t switches;
+} __packed;
+
+/* Simulate key press */
+#define EC_CMD_MKBP_SIMULATE_KEY 0x62
+
+struct ec_params_mkbp_simulate_key {
+ uint8_t col;
+ uint8_t row;
+ uint8_t pressed;
+} __packed;
+
+/* Configure keyboard scanning */
+#define EC_CMD_MKBP_SET_CONFIG 0x64
+#define EC_CMD_MKBP_GET_CONFIG 0x65
+
+/* flags */
+enum mkbp_config_flags {
+ EC_MKBP_FLAGS_ENABLE = 1, /* Enable keyboard scanning */
+};
+
+enum mkbp_config_valid {
+ EC_MKBP_VALID_SCAN_PERIOD = 1 << 0,
+ EC_MKBP_VALID_POLL_TIMEOUT = 1 << 1,
+ EC_MKBP_VALID_MIN_POST_SCAN_DELAY = 1 << 3,
+ EC_MKBP_VALID_OUTPUT_SETTLE = 1 << 4,
+ EC_MKBP_VALID_DEBOUNCE_DOWN = 1 << 5,
+ EC_MKBP_VALID_DEBOUNCE_UP = 1 << 6,
+ EC_MKBP_VALID_FIFO_MAX_DEPTH = 1 << 7,
+};
+
+/* Configuration for our key scanning algorithm */
+struct ec_mkbp_config {
+ uint32_t valid_mask; /* valid fields */
+ uint8_t flags; /* some flags (enum mkbp_config_flags) */
+ uint8_t valid_flags; /* which flags are valid */
+ uint16_t scan_period_us; /* period between start of scans */
+ /* revert to interrupt mode after no activity for this long */
+ uint32_t poll_timeout_us;
+ /*
+ * minimum post-scan relax time. Once we finish a scan we check
+ * the time until we are due to start the next one. If this time is
+ * shorter this field, we use this instead.
+ */
+ uint16_t min_post_scan_delay_us;
+ /* delay between setting up output and waiting for it to settle */
+ uint16_t output_settle_us;
+ uint16_t debounce_down_us; /* time for debounce on key down */
+ uint16_t debounce_up_us; /* time for debounce on key up */
+ /* maximum depth to allow for fifo (0 = no keyscan output) */
+ uint8_t fifo_max_depth;
+} __packed;
+
+struct ec_params_mkbp_set_config {
+ struct ec_mkbp_config config;
+} __packed;
+
+struct ec_response_mkbp_get_config {
+ struct ec_mkbp_config config;
+} __packed;
+
+/* Run the key scan emulation */
+#define EC_CMD_KEYSCAN_SEQ_CTRL 0x66
+
+enum ec_keyscan_seq_cmd {
+ EC_KEYSCAN_SEQ_STATUS = 0, /* Get status information */
+ EC_KEYSCAN_SEQ_CLEAR = 1, /* Clear sequence */
+ EC_KEYSCAN_SEQ_ADD = 2, /* Add item to sequence */
+ EC_KEYSCAN_SEQ_START = 3, /* Start running sequence */
+ EC_KEYSCAN_SEQ_COLLECT = 4, /* Collect sequence summary data */
+};
+
+enum ec_collect_flags {
+ /*
+ * Indicates this scan was processed by the EC. Due to timing, some
+ * scans may be skipped.
+ */
+ EC_KEYSCAN_SEQ_FLAG_DONE = 1 << 0,
+};
+
+struct ec_collect_item {
+ uint8_t flags; /* some flags (enum ec_collect_flags) */
+};
+
+struct ec_params_keyscan_seq_ctrl {
+ uint8_t cmd; /* Command to send (enum ec_keyscan_seq_cmd) */
+ union {
+ struct {
+ uint8_t active; /* still active */
+ uint8_t num_items; /* number of items */
+ /* Current item being presented */
+ uint8_t cur_item;
+ } status;
+ struct {
+ /*
+ * Absolute time for this scan, measured from the
+ * start of the sequence.
+ */
+ uint32_t time_us;
+ uint8_t scan[0]; /* keyscan data */
+ } add;
+ struct {
+ uint8_t start_item; /* First item to return */
+ uint8_t num_items; /* Number of items to return */
+ } collect;
+ };
+} __packed;
+
+struct ec_result_keyscan_seq_ctrl {
+ union {
+ struct {
+ uint8_t num_items; /* Number of items */
+ /* Data for each item */
+ struct ec_collect_item item[0];
+ } collect;
+ };
+} __packed;
+
+/*****************************************************************************/
+/* Temperature sensor commands */
+
+/* Read temperature sensor info */
+#define EC_CMD_TEMP_SENSOR_GET_INFO 0x70
+
+struct ec_params_temp_sensor_get_info {
+ uint8_t id;
+} __packed;
+
+struct ec_response_temp_sensor_get_info {
+ char sensor_name[32];
+ uint8_t sensor_type;
+} __packed;
+
+/*****************************************************************************/
+
+/*
+ * Note: host commands 0x80 - 0x87 are reserved to avoid conflict with ACPI
+ * commands accidentally sent to the wrong interface. See the ACPI section
+ * below.
+ */
+
+/*****************************************************************************/
+/* Host event commands */
+
+/*
+ * Host event mask params and response structures, shared by all of the host
+ * event commands below.
+ */
+struct ec_params_host_event_mask {
+ uint32_t mask;
+} __packed;
+
+struct ec_response_host_event_mask {
+ uint32_t mask;
+} __packed;
+
+/* These all use ec_response_host_event_mask */
+#define EC_CMD_HOST_EVENT_GET_B 0x87
+#define EC_CMD_HOST_EVENT_GET_SMI_MASK 0x88
+#define EC_CMD_HOST_EVENT_GET_SCI_MASK 0x89
+#define EC_CMD_HOST_EVENT_GET_WAKE_MASK 0x8d
+
+/* These all use ec_params_host_event_mask */
+#define EC_CMD_HOST_EVENT_SET_SMI_MASK 0x8a
+#define EC_CMD_HOST_EVENT_SET_SCI_MASK 0x8b
+#define EC_CMD_HOST_EVENT_CLEAR 0x8c
+#define EC_CMD_HOST_EVENT_SET_WAKE_MASK 0x8e
+#define EC_CMD_HOST_EVENT_CLEAR_B 0x8f
+
+/*****************************************************************************/
+/* Switch commands */
+
+/* Enable/disable LCD backlight */
+#define EC_CMD_SWITCH_ENABLE_BKLIGHT 0x90
+
+struct ec_params_switch_enable_backlight {
+ uint8_t enabled;
+} __packed;
+
+/* Enable/disable WLAN/Bluetooth */
+#define EC_CMD_SWITCH_ENABLE_WIRELESS 0x91
+#define EC_VER_SWITCH_ENABLE_WIRELESS 1
+
+/* Version 0 params; no response */
+struct ec_params_switch_enable_wireless_v0 {
+ uint8_t enabled;
+} __packed;
+
+/* Version 1 params */
+struct ec_params_switch_enable_wireless_v1 {
+ /* Flags to enable now */
+ uint8_t now_flags;
+
+ /* Which flags to copy from now_flags */
+ uint8_t now_mask;
+
+ /*
+ * Flags to leave enabled in S3, if they're on at the S0->S3
+ * transition. (Other flags will be disabled by the S0->S3
+ * transition.)
+ */
+ uint8_t suspend_flags;
+
+ /* Which flags to copy from suspend_flags */
+ uint8_t suspend_mask;
+} __packed;
+
+/* Version 1 response */
+struct ec_response_switch_enable_wireless_v1 {
+ /* Flags to enable now */
+ uint8_t now_flags;
+
+ /* Flags to leave enabled in S3 */
+ uint8_t suspend_flags;
+} __packed;
+
+/*****************************************************************************/
+/* GPIO commands. Only available on EC if write protect has been disabled. */
+
+/* Set GPIO output value */
+#define EC_CMD_GPIO_SET 0x92
+
+struct ec_params_gpio_set {
+ char name[32];
+ uint8_t val;
+} __packed;
+
+/* Get GPIO value */
+#define EC_CMD_GPIO_GET 0x93
+
+struct ec_params_gpio_get {
+ char name[32];
+} __packed;
+struct ec_response_gpio_get {
+ uint8_t val;
+} __packed;
+
+/*****************************************************************************/
+/* I2C commands. Only available when flash write protect is unlocked. */
+
+/*
+ * TODO(crosbug.com/p/23570): These commands are deprecated, and will be
+ * removed soon. Use EC_CMD_I2C_XFER instead.
+ */
+
+/* Read I2C bus */
+#define EC_CMD_I2C_READ 0x94
+
+struct ec_params_i2c_read {
+ uint16_t addr; /* 8-bit address (7-bit shifted << 1) */
+ uint8_t read_size; /* Either 8 or 16. */
+ uint8_t port;
+ uint8_t offset;
+} __packed;
+struct ec_response_i2c_read {
+ uint16_t data;
+} __packed;
+
+/* Write I2C bus */
+#define EC_CMD_I2C_WRITE 0x95
+
+struct ec_params_i2c_write {
+ uint16_t data;
+ uint16_t addr; /* 8-bit address (7-bit shifted << 1) */
+ uint8_t write_size; /* Either 8 or 16. */
+ uint8_t port;
+ uint8_t offset;
+} __packed;
+
+/*****************************************************************************/
+/* Charge state commands. Only available when flash write protect unlocked. */
+
+/* Force charge state machine to stop charging the battery or force it to
+ * discharge the battery.
+ */
+#define EC_CMD_CHARGE_CONTROL 0x96
+#define EC_VER_CHARGE_CONTROL 1
+
+enum ec_charge_control_mode {
+ CHARGE_CONTROL_NORMAL = 0,
+ CHARGE_CONTROL_IDLE,
+ CHARGE_CONTROL_DISCHARGE,
+};
+
+struct ec_params_charge_control {
+ uint32_t mode; /* enum charge_control_mode */
+} __packed;
+
+/*****************************************************************************/
+/* Console commands. Only available when flash write protect is unlocked. */
+
+/* Snapshot console output buffer for use by EC_CMD_CONSOLE_READ. */
+#define EC_CMD_CONSOLE_SNAPSHOT 0x97
+
+/*
+ * Read next chunk of data from saved snapshot.
+ *
+ * Response is null-terminated string. Empty string, if there is no more
+ * remaining output.
+ */
+#define EC_CMD_CONSOLE_READ 0x98
+
+/*****************************************************************************/
+
+/*
+ * Cut off battery power output if the battery supports.
+ *
+ * For unsupported battery, just don't implement this command and lets EC
+ * return EC_RES_INVALID_COMMAND.
+ */
+#define EC_CMD_BATTERY_CUT_OFF 0x99
+
+/*****************************************************************************/
+/* USB port mux control. */
+
+/*
+ * Switch USB mux or return to automatic switching.
+ */
+#define EC_CMD_USB_MUX 0x9a
+
+struct ec_params_usb_mux {
+ uint8_t mux;
+} __packed;
+
+/*****************************************************************************/
+/* LDOs / FETs control. */
+
+enum ec_ldo_state {
+ EC_LDO_STATE_OFF = 0, /* the LDO / FET is shut down */
+ EC_LDO_STATE_ON = 1, /* the LDO / FET is ON / providing power */
+};
+
+/*
+ * Switch on/off a LDO.
+ */
+#define EC_CMD_LDO_SET 0x9b
+
+struct ec_params_ldo_set {
+ uint8_t index;
+ uint8_t state;
+} __packed;
+
+/*
+ * Get LDO state.
+ */
+#define EC_CMD_LDO_GET 0x9c
+
+struct ec_params_ldo_get {
+ uint8_t index;
+} __packed;
+
+struct ec_response_ldo_get {
+ uint8_t state;
+} __packed;
+
+/*****************************************************************************/
+/* Power info. */
+
+/*
+ * Get power info.
+ */
+#define EC_CMD_POWER_INFO 0x9d
+
+struct ec_response_power_info {
+ uint32_t usb_dev_type;
+ uint16_t voltage_ac;
+ uint16_t voltage_system;
+ uint16_t current_system;
+ uint16_t usb_current_limit;
+} __packed;
+
+/*****************************************************************************/
+/* I2C passthru command */
+
+#define EC_CMD_I2C_PASSTHRU 0x9e
+
+/* Read data; if not present, message is a write */
+#define EC_I2C_FLAG_READ (1 << 15)
+
+/* Mask for address */
+#define EC_I2C_ADDR_MASK 0x3ff
+
+#define EC_I2C_STATUS_NAK (1 << 0) /* Transfer was not acknowledged */
+#define EC_I2C_STATUS_TIMEOUT (1 << 1) /* Timeout during transfer */
+
+/* Any error */
+#define EC_I2C_STATUS_ERROR (EC_I2C_STATUS_NAK | EC_I2C_STATUS_TIMEOUT)
+
+struct ec_params_i2c_passthru_msg {
+ uint16_t addr_flags; /* I2C slave address (7 or 10 bits) and flags */
+ uint16_t len; /* Number of bytes to read or write */
+} __packed;
+
+struct ec_params_i2c_passthru {
+ uint8_t port; /* I2C port number */
+ uint8_t num_msgs; /* Number of messages */
+ struct ec_params_i2c_passthru_msg msg[];
+ /* Data to write for all messages is concatenated here */
+} __packed;
+
+struct ec_response_i2c_passthru {
+ uint8_t i2c_status; /* Status flags (EC_I2C_STATUS_...) */
+ uint8_t num_msgs; /* Number of messages processed */
+ uint8_t data[]; /* Data read by messages concatenated here */
+} __packed;
+
+/*****************************************************************************/
+/* Power button hang detect */
+
+#define EC_CMD_HANG_DETECT 0x9f
+
+/* Reasons to start hang detection timer */
+/* Power button pressed */
+#define EC_HANG_START_ON_POWER_PRESS (1 << 0)
+
+/* Lid closed */
+#define EC_HANG_START_ON_LID_CLOSE (1 << 1)
+
+ /* Lid opened */
+#define EC_HANG_START_ON_LID_OPEN (1 << 2)
+
+/* Start of AP S3->S0 transition (booting or resuming from suspend) */
+#define EC_HANG_START_ON_RESUME (1 << 3)
+
+/* Reasons to cancel hang detection */
+
+/* Power button released */
+#define EC_HANG_STOP_ON_POWER_RELEASE (1 << 8)
+
+/* Any host command from AP received */
+#define EC_HANG_STOP_ON_HOST_COMMAND (1 << 9)
+
+/* Stop on end of AP S0->S3 transition (suspending or shutting down) */
+#define EC_HANG_STOP_ON_SUSPEND (1 << 10)
+
+/*
+ * If this flag is set, all the other fields are ignored, and the hang detect
+ * timer is started. This provides the AP a way to start the hang timer
+ * without reconfiguring any of the other hang detect settings. Note that
+ * you must previously have configured the timeouts.
+ */
+#define EC_HANG_START_NOW (1 << 30)
+
+/*
+ * If this flag is set, all the other fields are ignored (including
+ * EC_HANG_START_NOW). This provides the AP a way to stop the hang timer
+ * without reconfiguring any of the other hang detect settings.
+ */
+#define EC_HANG_STOP_NOW (1 << 31)
+
+struct ec_params_hang_detect {
+ /* Flags; see EC_HANG_* */
+ uint32_t flags;
+
+ /* Timeout in msec before generating host event, if enabled */
+ uint16_t host_event_timeout_msec;
+
+ /* Timeout in msec before generating warm reboot, if enabled */
+ uint16_t warm_reboot_timeout_msec;
+} __packed;
+
+/*****************************************************************************/
+/* Commands for battery charging */
+
+/*
+ * This is the single catch-all host command to exchange data regarding the
+ * charge state machine (v2 and up).
+ */
+#define EC_CMD_CHARGE_STATE 0xa0
+
+/* Subcommands for this host command */
+enum charge_state_command {
+ CHARGE_STATE_CMD_GET_STATE,
+ CHARGE_STATE_CMD_GET_PARAM,
+ CHARGE_STATE_CMD_SET_PARAM,
+ CHARGE_STATE_NUM_CMDS
+};
+
+/*
+ * Known param numbers are defined here. Ranges are reserved for board-specific
+ * params, which are handled by the particular implementations.
+ */
+enum charge_state_params {
+ CS_PARAM_CHG_VOLTAGE, /* charger voltage limit */
+ CS_PARAM_CHG_CURRENT, /* charger current limit */
+ CS_PARAM_CHG_INPUT_CURRENT, /* charger input current limit */
+ CS_PARAM_CHG_STATUS, /* charger-specific status */
+ CS_PARAM_CHG_OPTION, /* charger-specific options */
+ /* How many so far? */
+ CS_NUM_BASE_PARAMS,
+
+ /* Range for CONFIG_CHARGER_PROFILE_OVERRIDE params */
+ CS_PARAM_CUSTOM_PROFILE_MIN = 0x10000,
+ CS_PARAM_CUSTOM_PROFILE_MAX = 0x1ffff,
+
+ /* Other custom param ranges go here... */
+};
+
+struct ec_params_charge_state {
+ uint8_t cmd; /* enum charge_state_command */
+ union {
+ struct {
+ /* no args */
+ } get_state;
+
+ struct {
+ uint32_t param; /* enum charge_state_param */
+ } get_param;
+
+ struct {
+ uint32_t param; /* param to set */
+ uint32_t value; /* value to set */
+ } set_param;
+ };
+} __packed;
+
+struct ec_response_charge_state {
+ union {
+ struct {
+ int ac;
+ int chg_voltage;
+ int chg_current;
+ int chg_input_current;
+ int batt_state_of_charge;
+ } get_state;
+
+ struct {
+ uint32_t value;
+ } get_param;
+ struct {
+ /* no return values */
+ } set_param;
+ };
+} __packed;
+
+
+/*
+ * Set maximum battery charging current.
+ */
+#define EC_CMD_CHARGE_CURRENT_LIMIT 0xa1
+
+struct ec_params_current_limit {
+ uint32_t limit; /* in mA */
+} __packed;
+
+/*
+ * Set maximum external power current.
+ */
+#define EC_CMD_EXT_POWER_CURRENT_LIMIT 0xa2
+
+struct ec_params_ext_power_current_limit {
+ uint32_t limit; /* in mA */
+} __packed;
+
+/*****************************************************************************/
+/* Smart battery pass-through */
+
+/* Get / Set 16-bit smart battery registers */
+#define EC_CMD_SB_READ_WORD 0xb0
+#define EC_CMD_SB_WRITE_WORD 0xb1
+
+/* Get / Set string smart battery parameters
+ * formatted as SMBUS "block".
+ */
+#define EC_CMD_SB_READ_BLOCK 0xb2
+#define EC_CMD_SB_WRITE_BLOCK 0xb3
+
+struct ec_params_sb_rd {
+ uint8_t reg;
+} __packed;
+
+struct ec_response_sb_rd_word {
+ uint16_t value;
+} __packed;
+
+struct ec_params_sb_wr_word {
+ uint8_t reg;
+ uint16_t value;
+} __packed;
+
+struct ec_response_sb_rd_block {
+ uint8_t data[32];
+} __packed;
+
+struct ec_params_sb_wr_block {
+ uint8_t reg;
+ uint16_t data[32];
+} __packed;
+
+/*****************************************************************************/
+/* System commands */
+
+/*
+ * TODO(crosbug.com/p/23747): This is a confusing name, since it doesn't
+ * necessarily reboot the EC. Rename to "image" or something similar?
+ */
+#define EC_CMD_REBOOT_EC 0xd2
+
+/* Command */
+enum ec_reboot_cmd {
+ EC_REBOOT_CANCEL = 0, /* Cancel a pending reboot */
+ EC_REBOOT_JUMP_RO = 1, /* Jump to RO without rebooting */
+ EC_REBOOT_JUMP_RW = 2, /* Jump to RW without rebooting */
+ /* (command 3 was jump to RW-B) */
+ EC_REBOOT_COLD = 4, /* Cold-reboot */
+ EC_REBOOT_DISABLE_JUMP = 5, /* Disable jump until next reboot */
+ EC_REBOOT_HIBERNATE = 6 /* Hibernate EC */
+};
+
+/* Flags for ec_params_reboot_ec.reboot_flags */
+#define EC_REBOOT_FLAG_RESERVED0 (1 << 0) /* Was recovery request */
+#define EC_REBOOT_FLAG_ON_AP_SHUTDOWN (1 << 1) /* Reboot after AP shutdown */
+
+struct ec_params_reboot_ec {
+ uint8_t cmd; /* enum ec_reboot_cmd */
+ uint8_t flags; /* See EC_REBOOT_FLAG_* */
+} __packed;
+
+/*
+ * Get information on last EC panic.
+ *
+ * Returns variable-length platform-dependent panic information. See panic.h
+ * for details.
+ */
+#define EC_CMD_GET_PANIC_INFO 0xd3
+
+/*****************************************************************************/
+/*
+ * ACPI commands
+ *
+ * These are valid ONLY on the ACPI command/data port.
+ */
+
+/*
+ * ACPI Read Embedded Controller
+ *
+ * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*).
+ *
+ * Use the following sequence:
+ *
+ * - Write EC_CMD_ACPI_READ to EC_LPC_ADDR_ACPI_CMD
+ * - Wait for EC_LPC_CMDR_PENDING bit to clear
+ * - Write address to EC_LPC_ADDR_ACPI_DATA
+ * - Wait for EC_LPC_CMDR_DATA bit to set
+ * - Read value from EC_LPC_ADDR_ACPI_DATA
+ */
+#define EC_CMD_ACPI_READ 0x80
+
+/*
+ * ACPI Write Embedded Controller
+ *
+ * This reads from ACPI memory space on the EC (EC_ACPI_MEM_*).
+ *
+ * Use the following sequence:
+ *
+ * - Write EC_CMD_ACPI_WRITE to EC_LPC_ADDR_ACPI_CMD
+ * - Wait for EC_LPC_CMDR_PENDING bit to clear
+ * - Write address to EC_LPC_ADDR_ACPI_DATA
+ * - Wait for EC_LPC_CMDR_PENDING bit to clear
+ * - Write value to EC_LPC_ADDR_ACPI_DATA
+ */
+#define EC_CMD_ACPI_WRITE 0x81
+
+/*
+ * ACPI Query Embedded Controller
+ *
+ * This clears the lowest-order bit in the currently pending host events, and
+ * sets the result code to the 1-based index of the bit (event 0x00000001 = 1,
+ * event 0x80000000 = 32), or 0 if no event was pending.
+ */
+#define EC_CMD_ACPI_QUERY_EVENT 0x84
+
+/* Valid addresses in ACPI memory space, for read/write commands */
+
+/* Memory space version; set to EC_ACPI_MEM_VERSION_CURRENT */
+#define EC_ACPI_MEM_VERSION 0x00
+/*
+ * Test location; writing value here updates test compliment byte to (0xff -
+ * value).
+ */
+#define EC_ACPI_MEM_TEST 0x01
+/* Test compliment; writes here are ignored. */
+#define EC_ACPI_MEM_TEST_COMPLIMENT 0x02
+
+/* Keyboard backlight brightness percent (0 - 100) */
+#define EC_ACPI_MEM_KEYBOARD_BACKLIGHT 0x03
+/* DPTF Target Fan Duty (0-100, 0xff for auto/none) */
+#define EC_ACPI_MEM_FAN_DUTY 0x04
+
+/*
+ * DPTF temp thresholds. Any of the EC's temp sensors can have up to two
+ * independent thresholds attached to them. The current value of the ID
+ * register determines which sensor is affected by the THRESHOLD and COMMIT
+ * registers. The THRESHOLD register uses the same EC_TEMP_SENSOR_OFFSET scheme
+ * as the memory-mapped sensors. The COMMIT register applies those settings.
+ *
+ * The spec does not mandate any way to read back the threshold settings
+ * themselves, but when a threshold is crossed the AP needs a way to determine
+ * which sensor(s) are responsible. Each reading of the ID register clears and
+ * returns one sensor ID that has crossed one of its threshold (in either
+ * direction) since the last read. A value of 0xFF means "no new thresholds
+ * have tripped". Setting or enabling the thresholds for a sensor will clear
+ * the unread event count for that sensor.
+ */
+#define EC_ACPI_MEM_TEMP_ID 0x05
+#define EC_ACPI_MEM_TEMP_THRESHOLD 0x06
+#define EC_ACPI_MEM_TEMP_COMMIT 0x07
+/*
+ * Here are the bits for the COMMIT register:
+ * bit 0 selects the threshold index for the chosen sensor (0/1)
+ * bit 1 enables/disables the selected threshold (0 = off, 1 = on)
+ * Each write to the commit register affects one threshold.
+ */
+#define EC_ACPI_MEM_TEMP_COMMIT_SELECT_MASK (1 << 0)
+#define EC_ACPI_MEM_TEMP_COMMIT_ENABLE_MASK (1 << 1)
+/*
+ * Example:
+ *
+ * Set the thresholds for sensor 2 to 50 C and 60 C:
+ * write 2 to [0x05] -- select temp sensor 2
+ * write 0x7b to [0x06] -- C_TO_K(50) - EC_TEMP_SENSOR_OFFSET
+ * write 0x2 to [0x07] -- enable threshold 0 with this value
+ * write 0x85 to [0x06] -- C_TO_K(60) - EC_TEMP_SENSOR_OFFSET
+ * write 0x3 to [0x07] -- enable threshold 1 with this value
+ *
+ * Disable the 60 C threshold, leaving the 50 C threshold unchanged:
+ * write 2 to [0x05] -- select temp sensor 2
+ * write 0x1 to [0x07] -- disable threshold 1
+ */
+
+/* DPTF battery charging current limit */
+#define EC_ACPI_MEM_CHARGING_LIMIT 0x08
+
+/* Charging limit is specified in 64 mA steps */
+#define EC_ACPI_MEM_CHARGING_LIMIT_STEP_MA 64
+/* Value to disable DPTF battery charging limit */
+#define EC_ACPI_MEM_CHARGING_LIMIT_DISABLED 0xff
+
+/* Current version of ACPI memory address space */
+#define EC_ACPI_MEM_VERSION_CURRENT 1
+
+
+/*****************************************************************************/
+/*
+ * Special commands
+ *
+ * These do not follow the normal rules for commands. See each command for
+ * details.
+ */
+
+/*
+ * Reboot NOW
+ *
+ * This command will work even when the EC LPC interface is busy, because the
+ * reboot command is processed at interrupt level. Note that when the EC
+ * reboots, the host will reboot too, so there is no response to this command.
+ *
+ * Use EC_CMD_REBOOT_EC to reboot the EC more politely.
+ */
+#define EC_CMD_REBOOT 0xd1 /* Think "die" */
+
+/*
+ * Resend last response (not supported on LPC).
+ *
+ * Returns EC_RES_UNAVAILABLE if there is no response available - for example,
+ * there was no previous command, or the previous command's response was too
+ * big to save.
+ */
+#define EC_CMD_RESEND_RESPONSE 0xdb
+
+/*
+ * This header byte on a command indicate version 0. Any header byte less
+ * than this means that we are talking to an old EC which doesn't support
+ * versioning. In that case, we assume version 0.
+ *
+ * Header bytes greater than this indicate a later version. For example,
+ * EC_CMD_VERSION0 + 1 means we are using version 1.
+ *
+ * The old EC interface must not use commands 0xdc or higher.
+ */
+#define EC_CMD_VERSION0 0xdc
+
+#endif /* !__ACPI__ */
+
+/*****************************************************************************/
+/*
+ * Deprecated constants. These constants have been renamed for clarity. The
+ * meaning and size has not changed. Programs that use the old names should
+ * switch to the new names soon, as the old names may not be carried forward
+ * forever.
+ */
+#define EC_HOST_PARAM_SIZE EC_PROTO2_MAX_PARAM_SIZE
+#define EC_LPC_ADDR_OLD_PARAM EC_HOST_CMD_REGION1
+#define EC_OLD_PARAM_SIZE EC_HOST_CMD_REGION_SIZE
+
+#endif /* __CROS_EC_COMMANDS_H */
diff --git a/include/linux/mfd/da903x.h b/include/linux/mfd/da903x.h
new file mode 100644
index 000000000..0aa3a1a49
--- /dev/null
+++ b/include/linux/mfd/da903x.h
@@ -0,0 +1,247 @@
+#ifndef __LINUX_PMIC_DA903X_H
+#define __LINUX_PMIC_DA903X_H
+
+/* Unified sub device IDs for DA9030/DA9034/DA9035 */
+enum {
+ DA9030_ID_LED_1,
+ DA9030_ID_LED_2,
+ DA9030_ID_LED_3,
+ DA9030_ID_LED_4,
+ DA9030_ID_LED_PC,
+ DA9030_ID_VIBRA,
+ DA9030_ID_WLED,
+ DA9030_ID_BUCK1,
+ DA9030_ID_BUCK2,
+ DA9030_ID_LDO1,
+ DA9030_ID_LDO2,
+ DA9030_ID_LDO3,
+ DA9030_ID_LDO4,
+ DA9030_ID_LDO5,
+ DA9030_ID_LDO6,
+ DA9030_ID_LDO7,
+ DA9030_ID_LDO8,
+ DA9030_ID_LDO9,
+ DA9030_ID_LDO10,
+ DA9030_ID_LDO11,
+ DA9030_ID_LDO12,
+ DA9030_ID_LDO13,
+ DA9030_ID_LDO14,
+ DA9030_ID_LDO15,
+ DA9030_ID_LDO16,
+ DA9030_ID_LDO17,
+ DA9030_ID_LDO18,
+ DA9030_ID_LDO19,
+ DA9030_ID_LDO_INT, /* LDO Internal */
+ DA9030_ID_BAT, /* battery charger */
+
+ DA9034_ID_LED_1,
+ DA9034_ID_LED_2,
+ DA9034_ID_VIBRA,
+ DA9034_ID_WLED,
+ DA9034_ID_TOUCH,
+
+ DA9034_ID_BUCK1,
+ DA9034_ID_BUCK2,
+ DA9034_ID_LDO1,
+ DA9034_ID_LDO2,
+ DA9034_ID_LDO3,
+ DA9034_ID_LDO4,
+ DA9034_ID_LDO5,
+ DA9034_ID_LDO6,
+ DA9034_ID_LDO7,
+ DA9034_ID_LDO8,
+ DA9034_ID_LDO9,
+ DA9034_ID_LDO10,
+ DA9034_ID_LDO11,
+ DA9034_ID_LDO12,
+ DA9034_ID_LDO13,
+ DA9034_ID_LDO14,
+ DA9034_ID_LDO15,
+
+ DA9035_ID_BUCK3,
+};
+
+/*
+ * DA9030/DA9034 LEDs sub-devices uses generic "struct led_info"
+ * as the platform_data
+ */
+
+/* DA9030 flags for "struct led_info"
+ */
+#define DA9030_LED_RATE_ON (0 << 5)
+#define DA9030_LED_RATE_052S (1 << 5)
+#define DA9030_LED_DUTY_1_16 (0 << 3)
+#define DA9030_LED_DUTY_1_8 (1 << 3)
+#define DA9030_LED_DUTY_1_4 (2 << 3)
+#define DA9030_LED_DUTY_1_2 (3 << 3)
+
+#define DA9030_VIBRA_MODE_1P3V (0 << 1)
+#define DA9030_VIBRA_MODE_2P7V (1 << 1)
+#define DA9030_VIBRA_FREQ_1HZ (0 << 2)
+#define DA9030_VIBRA_FREQ_2HZ (1 << 2)
+#define DA9030_VIBRA_FREQ_4HZ (2 << 2)
+#define DA9030_VIBRA_FREQ_8HZ (3 << 2)
+#define DA9030_VIBRA_DUTY_ON (0 << 4)
+#define DA9030_VIBRA_DUTY_75P (1 << 4)
+#define DA9030_VIBRA_DUTY_50P (2 << 4)
+#define DA9030_VIBRA_DUTY_25P (3 << 4)
+
+/* DA9034 flags for "struct led_info" */
+#define DA9034_LED_RAMP (1 << 7)
+
+/* DA9034 touch screen platform data */
+struct da9034_touch_pdata {
+ int interval_ms; /* sampling interval while pen down */
+ int x_inverted;
+ int y_inverted;
+};
+
+struct da9034_backlight_pdata {
+ int output_current; /* output current of WLED, from 0-31 (in mA) */
+};
+
+/* DA9030 battery charger data */
+struct power_supply_info;
+
+struct da9030_battery_info {
+ /* battery parameters */
+ struct power_supply_info *battery_info;
+
+ /* current and voltage to use for battery charging */
+ unsigned int charge_milliamp;
+ unsigned int charge_millivolt;
+
+ /* voltage thresholds (in millivolts) */
+ int vbat_low;
+ int vbat_crit;
+ int vbat_charge_start;
+ int vbat_charge_stop;
+ int vbat_charge_restart;
+
+ /* battery nominal minimal and maximal voltages in millivolts */
+ int vcharge_min;
+ int vcharge_max;
+
+ /* Temperature thresholds. These are DA9030 register values
+ "as is" and should be measured for each battery type */
+ int tbat_low;
+ int tbat_high;
+ int tbat_restart;
+
+
+ /* battery monitor interval (seconds) */
+ unsigned int batmon_interval;
+
+ /* platform callbacks for battery low and critical events */
+ void (*battery_low)(void);
+ void (*battery_critical)(void);
+};
+
+struct da903x_subdev_info {
+ int id;
+ const char *name;
+ void *platform_data;
+};
+
+struct da903x_platform_data {
+ int num_subdevs;
+ struct da903x_subdev_info *subdevs;
+};
+
+/* bit definitions for DA9030 events */
+#define DA9030_EVENT_ONKEY (1 << 0)
+#define DA9030_EVENT_PWREN (1 << 1)
+#define DA9030_EVENT_EXTON (1 << 2)
+#define DA9030_EVENT_CHDET (1 << 3)
+#define DA9030_EVENT_TBAT (1 << 4)
+#define DA9030_EVENT_VBATMON (1 << 5)
+#define DA9030_EVENT_VBATMON_TXON (1 << 6)
+#define DA9030_EVENT_CHIOVER (1 << 7)
+#define DA9030_EVENT_TCTO (1 << 8)
+#define DA9030_EVENT_CCTO (1 << 9)
+#define DA9030_EVENT_ADC_READY (1 << 10)
+#define DA9030_EVENT_VBUS_4P4 (1 << 11)
+#define DA9030_EVENT_VBUS_4P0 (1 << 12)
+#define DA9030_EVENT_SESS_VALID (1 << 13)
+#define DA9030_EVENT_SRP_DETECT (1 << 14)
+#define DA9030_EVENT_WATCHDOG (1 << 15)
+#define DA9030_EVENT_LDO15 (1 << 16)
+#define DA9030_EVENT_LDO16 (1 << 17)
+#define DA9030_EVENT_LDO17 (1 << 18)
+#define DA9030_EVENT_LDO18 (1 << 19)
+#define DA9030_EVENT_LDO19 (1 << 20)
+#define DA9030_EVENT_BUCK2 (1 << 21)
+
+/* bit definitions for DA9034 events */
+#define DA9034_EVENT_ONKEY (1 << 0)
+#define DA9034_EVENT_EXTON (1 << 2)
+#define DA9034_EVENT_CHDET (1 << 3)
+#define DA9034_EVENT_TBAT (1 << 4)
+#define DA9034_EVENT_VBATMON (1 << 5)
+#define DA9034_EVENT_REV_IOVER (1 << 6)
+#define DA9034_EVENT_CH_IOVER (1 << 7)
+#define DA9034_EVENT_CH_TCTO (1 << 8)
+#define DA9034_EVENT_CH_CCTO (1 << 9)
+#define DA9034_EVENT_USB_DEV (1 << 10)
+#define DA9034_EVENT_OTGCP_IOVER (1 << 11)
+#define DA9034_EVENT_VBUS_4P55 (1 << 12)
+#define DA9034_EVENT_VBUS_3P8 (1 << 13)
+#define DA9034_EVENT_SESS_1P8 (1 << 14)
+#define DA9034_EVENT_SRP_READY (1 << 15)
+#define DA9034_EVENT_ADC_MAN (1 << 16)
+#define DA9034_EVENT_ADC_AUTO4 (1 << 17)
+#define DA9034_EVENT_ADC_AUTO5 (1 << 18)
+#define DA9034_EVENT_ADC_AUTO6 (1 << 19)
+#define DA9034_EVENT_PEN_DOWN (1 << 20)
+#define DA9034_EVENT_TSI_READY (1 << 21)
+#define DA9034_EVENT_UART_TX (1 << 22)
+#define DA9034_EVENT_UART_RX (1 << 23)
+#define DA9034_EVENT_HEADSET (1 << 25)
+#define DA9034_EVENT_HOOKSWITCH (1 << 26)
+#define DA9034_EVENT_WATCHDOG (1 << 27)
+
+extern int da903x_register_notifier(struct device *dev,
+ struct notifier_block *nb, unsigned int events);
+extern int da903x_unregister_notifier(struct device *dev,
+ struct notifier_block *nb, unsigned int events);
+
+/* Status Query Interface */
+#define DA9030_STATUS_ONKEY (1 << 0)
+#define DA9030_STATUS_PWREN1 (1 << 1)
+#define DA9030_STATUS_EXTON (1 << 2)
+#define DA9030_STATUS_CHDET (1 << 3)
+#define DA9030_STATUS_TBAT (1 << 4)
+#define DA9030_STATUS_VBATMON (1 << 5)
+#define DA9030_STATUS_VBATMON_TXON (1 << 6)
+#define DA9030_STATUS_MCLKDET (1 << 7)
+
+#define DA9034_STATUS_ONKEY (1 << 0)
+#define DA9034_STATUS_EXTON (1 << 2)
+#define DA9034_STATUS_CHDET (1 << 3)
+#define DA9034_STATUS_TBAT (1 << 4)
+#define DA9034_STATUS_VBATMON (1 << 5)
+#define DA9034_STATUS_PEN_DOWN (1 << 6)
+#define DA9034_STATUS_MCLKDET (1 << 7)
+#define DA9034_STATUS_USB_DEV (1 << 8)
+#define DA9034_STATUS_HEADSET (1 << 9)
+#define DA9034_STATUS_HOOKSWITCH (1 << 10)
+#define DA9034_STATUS_REMCON (1 << 11)
+#define DA9034_STATUS_VBUS_VALID_4P55 (1 << 12)
+#define DA9034_STATUS_VBUS_VALID_3P8 (1 << 13)
+#define DA9034_STATUS_SESS_VALID_1P8 (1 << 14)
+#define DA9034_STATUS_SRP_READY (1 << 15)
+
+extern int da903x_query_status(struct device *dev, unsigned int status);
+
+
+/* NOTE: the functions below are not intended for use outside
+ * of the DA903x sub-device drivers
+ */
+extern int da903x_write(struct device *dev, int reg, uint8_t val);
+extern int da903x_writes(struct device *dev, int reg, int len, uint8_t *val);
+extern int da903x_read(struct device *dev, int reg, uint8_t *val);
+extern int da903x_reads(struct device *dev, int reg, int len, uint8_t *val);
+extern int da903x_update(struct device *dev, int reg, uint8_t val, uint8_t mask);
+extern int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask);
+extern int da903x_clr_bits(struct device *dev, int reg, uint8_t bit_mask);
+#endif /* __LINUX_PMIC_DA903X_H */
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
new file mode 100644
index 000000000..c18a4c19d
--- /dev/null
+++ b/include/linux/mfd/da9052/da9052.h
@@ -0,0 +1,226 @@
+/*
+ * da9052 declarations for DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __MFD_DA9052_DA9052_H
+#define __MFD_DA9052_DA9052_H
+
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/mfd/core.h>
+
+#include <linux/mfd/da9052/reg.h>
+
+/* Common - HWMON Channel Definations */
+#define DA9052_ADC_VDDOUT 0
+#define DA9052_ADC_ICH 1
+#define DA9052_ADC_TBAT 2
+#define DA9052_ADC_VBAT 3
+#define DA9052_ADC_IN4 4
+#define DA9052_ADC_IN5 5
+#define DA9052_ADC_IN6 6
+#define DA9052_ADC_TSI 7
+#define DA9052_ADC_TJUNC 8
+#define DA9052_ADC_VBBAT 9
+
+#define DA9052_IRQ_DCIN 0
+#define DA9052_IRQ_VBUS 1
+#define DA9052_IRQ_DCINREM 2
+#define DA9052_IRQ_VBUSREM 3
+#define DA9052_IRQ_VDDLOW 4
+#define DA9052_IRQ_ALARM 5
+#define DA9052_IRQ_SEQRDY 6
+#define DA9052_IRQ_COMP1V2 7
+#define DA9052_IRQ_NONKEY 8
+#define DA9052_IRQ_IDFLOAT 9
+#define DA9052_IRQ_IDGND 10
+#define DA9052_IRQ_CHGEND 11
+#define DA9052_IRQ_TBAT 12
+#define DA9052_IRQ_ADC_EOM 13
+#define DA9052_IRQ_PENDOWN 14
+#define DA9052_IRQ_TSIREADY 15
+#define DA9052_IRQ_GPI0 16
+#define DA9052_IRQ_GPI1 17
+#define DA9052_IRQ_GPI2 18
+#define DA9052_IRQ_GPI3 19
+#define DA9052_IRQ_GPI4 20
+#define DA9052_IRQ_GPI5 21
+#define DA9052_IRQ_GPI6 22
+#define DA9052_IRQ_GPI7 23
+#define DA9052_IRQ_GPI8 24
+#define DA9052_IRQ_GPI9 25
+#define DA9052_IRQ_GPI10 26
+#define DA9052_IRQ_GPI11 27
+#define DA9052_IRQ_GPI12 28
+#define DA9052_IRQ_GPI13 29
+#define DA9052_IRQ_GPI14 30
+#define DA9052_IRQ_GPI15 31
+
+enum da9052_chip_id {
+ DA9052,
+ DA9053_AA,
+ DA9053_BA,
+ DA9053_BB,
+ DA9053_BC,
+};
+
+struct da9052_pdata;
+
+struct da9052 {
+ struct device *dev;
+ struct regmap *regmap;
+
+ struct mutex auxadc_lock;
+ struct completion done;
+
+ int irq_base;
+ struct regmap_irq_chip_data *irq_data;
+ u8 chip_id;
+
+ int chip_irq;
+
+ /* SOC I/O transfer related fixes for DA9052/53 */
+ int (*fix_io) (struct da9052 *da9052, unsigned char reg);
+};
+
+/* ADC API */
+int da9052_adc_manual_read(struct da9052 *da9052, unsigned char channel);
+int da9052_adc_read_temp(struct da9052 *da9052);
+
+/* Device I/O API */
+static inline int da9052_reg_read(struct da9052 *da9052, unsigned char reg)
+{
+ int val, ret;
+
+ ret = regmap_read(da9052->regmap, reg, &val);
+ if (ret < 0)
+ return ret;
+
+ if (da9052->fix_io) {
+ ret = da9052->fix_io(da9052, reg);
+ if (ret < 0)
+ return ret;
+ }
+
+ return val;
+}
+
+static inline int da9052_reg_write(struct da9052 *da9052, unsigned char reg,
+ unsigned char val)
+{
+ int ret;
+
+ ret = regmap_write(da9052->regmap, reg, val);
+ if (ret < 0)
+ return ret;
+
+ if (da9052->fix_io) {
+ ret = da9052->fix_io(da9052, reg);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static inline int da9052_group_read(struct da9052 *da9052, unsigned char reg,
+ unsigned reg_cnt, unsigned char *val)
+{
+ int ret;
+ unsigned int tmp;
+ int i;
+
+ for (i = 0; i < reg_cnt; i++) {
+ ret = regmap_read(da9052->regmap, reg + i, &tmp);
+ val[i] = (unsigned char)tmp;
+ if (ret < 0)
+ return ret;
+ }
+
+ if (da9052->fix_io) {
+ ret = da9052->fix_io(da9052, reg);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static inline int da9052_group_write(struct da9052 *da9052, unsigned char reg,
+ unsigned reg_cnt, unsigned char *val)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < reg_cnt; i++) {
+ ret = regmap_write(da9052->regmap, reg + i, val[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (da9052->fix_io) {
+ ret = da9052->fix_io(da9052, reg);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static inline int da9052_reg_update(struct da9052 *da9052, unsigned char reg,
+ unsigned char bit_mask,
+ unsigned char reg_val)
+{
+ int ret;
+
+ ret = regmap_update_bits(da9052->regmap, reg, bit_mask, reg_val);
+ if (ret < 0)
+ return ret;
+
+ if (da9052->fix_io) {
+ ret = da9052->fix_io(da9052, reg);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+int da9052_device_init(struct da9052 *da9052, u8 chip_id);
+void da9052_device_exit(struct da9052 *da9052);
+
+extern const struct regmap_config da9052_regmap_config;
+
+int da9052_irq_init(struct da9052 *da9052);
+int da9052_irq_exit(struct da9052 *da9052);
+int da9052_request_irq(struct da9052 *da9052, int irq, char *name,
+ irq_handler_t handler, void *data);
+void da9052_free_irq(struct da9052 *da9052, int irq, void *data);
+
+int da9052_enable_irq(struct da9052 *da9052, int irq);
+int da9052_disable_irq(struct da9052 *da9052, int irq);
+int da9052_disable_irq_nosync(struct da9052 *da9052, int irq);
+
+#endif /* __MFD_DA9052_DA9052_H */
diff --git a/include/linux/mfd/da9052/pdata.h b/include/linux/mfd/da9052/pdata.h
new file mode 100644
index 000000000..62c5c3c29
--- /dev/null
+++ b/include/linux/mfd/da9052/pdata.h
@@ -0,0 +1,40 @@
+/*
+ * Platform data declarations for DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __MFD_DA9052_PDATA_H__
+#define __MFD_DA9052_PDATA_H__
+
+#define DA9052_MAX_REGULATORS 14
+
+struct da9052;
+
+struct da9052_pdata {
+ struct led_platform_data *pled;
+ int (*init) (struct da9052 *da9052);
+ int irq_base;
+ int gpio_base;
+ int use_for_apm;
+ struct regulator_init_data *regulators[DA9052_MAX_REGULATORS];
+};
+
+#endif
diff --git a/include/linux/mfd/da9052/reg.h b/include/linux/mfd/da9052/reg.h
new file mode 100644
index 000000000..c4dd3a8ad
--- /dev/null
+++ b/include/linux/mfd/da9052/reg.h
@@ -0,0 +1,752 @@
+/*
+ * Register declarations for DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __LINUX_MFD_DA9052_REG_H
+#define __LINUX_MFD_DA9052_REG_H
+
+/* PAGE REGISTERS */
+#define DA9052_PAGE0_CON_REG 0
+#define DA9052_PAGE1_CON_REG 128
+
+/* STATUS REGISTERS */
+#define DA9052_STATUS_A_REG 1
+#define DA9052_STATUS_B_REG 2
+#define DA9052_STATUS_C_REG 3
+#define DA9052_STATUS_D_REG 4
+
+/* PARK REGISTER */
+#define DA9052_PARK_REGISTER DA9052_STATUS_D_REG
+
+/* EVENT REGISTERS */
+#define DA9052_EVENT_A_REG 5
+#define DA9052_EVENT_B_REG 6
+#define DA9052_EVENT_C_REG 7
+#define DA9052_EVENT_D_REG 8
+#define DA9052_FAULTLOG_REG 9
+
+/* IRQ REGISTERS */
+#define DA9052_IRQ_MASK_A_REG 10
+#define DA9052_IRQ_MASK_B_REG 11
+#define DA9052_IRQ_MASK_C_REG 12
+#define DA9052_IRQ_MASK_D_REG 13
+
+/* CONTROL REGISTERS */
+#define DA9052_CONTROL_A_REG 14
+#define DA9052_CONTROL_B_REG 15
+#define DA9052_CONTROL_C_REG 16
+#define DA9052_CONTROL_D_REG 17
+
+#define DA9052_PDDIS_REG 18
+#define DA9052_INTERFACE_REG 19
+#define DA9052_RESET_REG 20
+
+/* GPIO REGISTERS */
+#define DA9052_GPIO_0_1_REG 21
+#define DA9052_GPIO_2_3_REG 22
+#define DA9052_GPIO_4_5_REG 23
+#define DA9052_GPIO_6_7_REG 24
+#define DA9052_GPIO_14_15_REG 28
+
+/* POWER SEQUENCER CONTROL REGISTERS */
+#define DA9052_ID_0_1_REG 29
+#define DA9052_ID_2_3_REG 30
+#define DA9052_ID_4_5_REG 31
+#define DA9052_ID_6_7_REG 32
+#define DA9052_ID_8_9_REG 33
+#define DA9052_ID_10_11_REG 34
+#define DA9052_ID_12_13_REG 35
+#define DA9052_ID_14_15_REG 36
+#define DA9052_ID_16_17_REG 37
+#define DA9052_ID_18_19_REG 38
+#define DA9052_ID_20_21_REG 39
+#define DA9052_SEQ_STATUS_REG 40
+#define DA9052_SEQ_A_REG 41
+#define DA9052_SEQ_B_REG 42
+#define DA9052_SEQ_TIMER_REG 43
+
+/* LDO AND BUCK REGISTERS */
+#define DA9052_BUCKA_REG 44
+#define DA9052_BUCKB_REG 45
+#define DA9052_BUCKCORE_REG 46
+#define DA9052_BUCKPRO_REG 47
+#define DA9052_BUCKMEM_REG 48
+#define DA9052_BUCKPERI_REG 49
+#define DA9052_LDO1_REG 50
+#define DA9052_LDO2_REG 51
+#define DA9052_LDO3_REG 52
+#define DA9052_LDO4_REG 53
+#define DA9052_LDO5_REG 54
+#define DA9052_LDO6_REG 55
+#define DA9052_LDO7_REG 56
+#define DA9052_LDO8_REG 57
+#define DA9052_LDO9_REG 58
+#define DA9052_LDO10_REG 59
+#define DA9052_SUPPLY_REG 60
+#define DA9052_PULLDOWN_REG 61
+#define DA9052_CHGBUCK_REG 62
+#define DA9052_WAITCONT_REG 63
+#define DA9052_ISET_REG 64
+#define DA9052_BATCHG_REG 65
+
+/* BATTERY CONTROL REGISTRS */
+#define DA9052_CHG_CONT_REG 66
+#define DA9052_INPUT_CONT_REG 67
+#define DA9052_CHG_TIME_REG 68
+#define DA9052_BBAT_CONT_REG 69
+
+/* LED CONTROL REGISTERS */
+#define DA9052_BOOST_REG 70
+#define DA9052_LED_CONT_REG 71
+#define DA9052_LEDMIN123_REG 72
+#define DA9052_LED1_CONF_REG 73
+#define DA9052_LED2_CONF_REG 74
+#define DA9052_LED3_CONF_REG 75
+#define DA9052_LED1CONT_REG 76
+#define DA9052_LED2CONT_REG 77
+#define DA9052_LED3CONT_REG 78
+#define DA9052_LED_CONT_4_REG 79
+#define DA9052_LED_CONT_5_REG 80
+
+/* ADC CONTROL REGISTERS */
+#define DA9052_ADC_MAN_REG 81
+#define DA9052_ADC_CONT_REG 82
+#define DA9052_ADC_RES_L_REG 83
+#define DA9052_ADC_RES_H_REG 84
+#define DA9052_VDD_RES_REG 85
+#define DA9052_VDD_MON_REG 86
+
+#define DA9052_ICHG_AV_REG 87
+#define DA9052_ICHG_THD_REG 88
+#define DA9052_ICHG_END_REG 89
+#define DA9052_TBAT_RES_REG 90
+#define DA9052_TBAT_HIGHP_REG 91
+#define DA9052_TBAT_HIGHN_REG 92
+#define DA9052_TBAT_LOW_REG 93
+#define DA9052_T_OFFSET_REG 94
+
+#define DA9052_ADCIN4_RES_REG 95
+#define DA9052_AUTO4_HIGH_REG 96
+#define DA9052_AUTO4_LOW_REG 97
+#define DA9052_ADCIN5_RES_REG 98
+#define DA9052_AUTO5_HIGH_REG 99
+#define DA9052_AUTO5_LOW_REG 100
+#define DA9052_ADCIN6_RES_REG 101
+#define DA9052_AUTO6_HIGH_REG 102
+#define DA9052_AUTO6_LOW_REG 103
+
+#define DA9052_TJUNC_RES_REG 104
+
+/* TSI CONTROL REGISTERS */
+#define DA9052_TSI_CONT_A_REG 105
+#define DA9052_TSI_CONT_B_REG 106
+#define DA9052_TSI_X_MSB_REG 107
+#define DA9052_TSI_Y_MSB_REG 108
+#define DA9052_TSI_LSB_REG 109
+#define DA9052_TSI_Z_MSB_REG 110
+
+/* RTC COUNT REGISTERS */
+#define DA9052_COUNT_S_REG 111
+#define DA9052_COUNT_MI_REG 112
+#define DA9052_COUNT_H_REG 113
+#define DA9052_COUNT_D_REG 114
+#define DA9052_COUNT_MO_REG 115
+#define DA9052_COUNT_Y_REG 116
+
+/* RTC CONTROL REGISTERS */
+#define DA9052_ALARM_MI_REG 117
+#define DA9052_ALARM_H_REG 118
+#define DA9052_ALARM_D_REG 119
+#define DA9052_ALARM_MO_REG 120
+#define DA9052_ALARM_Y_REG 121
+#define DA9052_SECOND_A_REG 122
+#define DA9052_SECOND_B_REG 123
+#define DA9052_SECOND_C_REG 124
+#define DA9052_SECOND_D_REG 125
+
+/* PAGE CONFIGURATION BIT */
+#define DA9052_PAGE_CONF 0X80
+
+/* STATUS REGISTER A BITS */
+#define DA9052_STATUSA_VDATDET 0X80
+#define DA9052_STATUSA_VBUSSEL 0X40
+#define DA9052_STATUSA_DCINSEL 0X20
+#define DA9052_STATUSA_VBUSDET 0X10
+#define DA9052_STATUSA_DCINDET 0X08
+#define DA9052_STATUSA_IDGND 0X04
+#define DA9052_STATUSA_IDFLOAT 0X02
+#define DA9052_STATUSA_NONKEY 0X01
+
+/* STATUS REGISTER B BITS */
+#define DA9052_STATUSB_COMPDET 0X80
+#define DA9052_STATUSB_SEQUENCING 0X40
+#define DA9052_STATUSB_GPFB2 0X20
+#define DA9052_STATUSB_CHGTO 0X10
+#define DA9052_STATUSB_CHGEND 0X08
+#define DA9052_STATUSB_CHGLIM 0X04
+#define DA9052_STATUSB_CHGPRE 0X02
+#define DA9052_STATUSB_CHGATT 0X01
+
+/* STATUS REGISTER C BITS */
+#define DA9052_STATUSC_GPI7 0X80
+#define DA9052_STATUSC_GPI6 0X40
+#define DA9052_STATUSC_GPI5 0X20
+#define DA9052_STATUSC_GPI4 0X10
+#define DA9052_STATUSC_GPI3 0X08
+#define DA9052_STATUSC_GPI2 0X04
+#define DA9052_STATUSC_GPI1 0X02
+#define DA9052_STATUSC_GPI0 0X01
+
+/* STATUS REGISTER D BITS */
+#define DA9052_STATUSD_GPI15 0X80
+#define DA9052_STATUSD_GPI14 0X40
+#define DA9052_STATUSD_GPI13 0X20
+#define DA9052_STATUSD_GPI12 0X10
+#define DA9052_STATUSD_GPI11 0X08
+#define DA9052_STATUSD_GPI10 0X04
+#define DA9052_STATUSD_GPI9 0X02
+#define DA9052_STATUSD_GPI8 0X01
+
+/* EVENT REGISTER A BITS */
+#define DA9052_EVENTA_ECOMP1V2 0X80
+#define DA9052_EVENTA_ESEQRDY 0X40
+#define DA9052_EVENTA_EALRAM 0X20
+#define DA9052_EVENTA_EVDDLOW 0X10
+#define DA9052_EVENTA_EVBUSREM 0X08
+#define DA9052_EVENTA_EDCINREM 0X04
+#define DA9052_EVENTA_EVBUSDET 0X02
+#define DA9052_EVENTA_EDCINDET 0X01
+
+/* EVENT REGISTER B BITS */
+#define DA9052_EVENTB_ETSIREADY 0X80
+#define DA9052_EVENTB_EPENDOWN 0X40
+#define DA9052_EVENTB_EADCEOM 0X20
+#define DA9052_EVENTB_ETBAT 0X10
+#define DA9052_EVENTB_ECHGEND 0X08
+#define DA9052_EVENTB_EIDGND 0X04
+#define DA9052_EVENTB_EIDFLOAT 0X02
+#define DA9052_EVENTB_ENONKEY 0X01
+
+/* EVENT REGISTER C BITS */
+#define DA9052_EVENTC_EGPI7 0X80
+#define DA9052_EVENTC_EGPI6 0X40
+#define DA9052_EVENTC_EGPI5 0X20
+#define DA9052_EVENTC_EGPI4 0X10
+#define DA9052_EVENTC_EGPI3 0X08
+#define DA9052_EVENTC_EGPI2 0X04
+#define DA9052_EVENTC_EGPI1 0X02
+#define DA9052_EVENTC_EGPI0 0X01
+
+/* EVENT REGISTER D BITS */
+#define DA9052_EVENTD_EGPI15 0X80
+#define DA9052_EVENTD_EGPI14 0X40
+#define DA9052_EVENTD_EGPI13 0X20
+#define DA9052_EVENTD_EGPI12 0X10
+#define DA9052_EVENTD_EGPI11 0X08
+#define DA9052_EVENTD_EGPI10 0X04
+#define DA9052_EVENTD_EGPI9 0X02
+#define DA9052_EVENTD_EGPI8 0X01
+
+/* IRQ MASK REGISTERS BITS */
+#define DA9052_M_NONKEY 0X0100
+
+/* TSI EVENT REGISTERS BITS */
+#define DA9052_E_PEN_DOWN 0X4000
+#define DA9052_E_TSI_READY 0X8000
+
+/* FAULT LOG REGISTER BITS */
+#define DA9052_FAULTLOG_WAITSET 0X80
+#define DA9052_FAULTLOG_NSDSET 0X40
+#define DA9052_FAULTLOG_KEYSHUT 0X20
+#define DA9052_FAULTLOG_TEMPOVER 0X08
+#define DA9052_FAULTLOG_VDDSTART 0X04
+#define DA9052_FAULTLOG_VDDFAULT 0X02
+#define DA9052_FAULTLOG_TWDERROR 0X01
+
+/* CONTROL REGISTER A BITS */
+#define DA9052_CONTROLA_GPIV 0X80
+#define DA9052_CONTROLA_PMOTYPE 0X20
+#define DA9052_CONTROLA_PMOV 0X10
+#define DA9052_CONTROLA_PMIV 0X08
+#define DA9052_CONTROLA_PMIFV 0X08
+#define DA9052_CONTROLA_PWR1EN 0X04
+#define DA9052_CONTROLA_PWREN 0X02
+#define DA9052_CONTROLA_SYSEN 0X01
+
+/* CONTROL REGISTER B BITS */
+#define DA9052_CONTROLB_SHUTDOWN 0X80
+#define DA9052_CONTROLB_DEEPSLEEP 0X40
+#define DA9052_CONTROL_B_WRITEMODE 0X20
+#define DA9052_CONTROLB_BBATEN 0X10
+#define DA9052_CONTROLB_OTPREADEN 0X08
+#define DA9052_CONTROLB_AUTOBOOT 0X04
+#define DA9052_CONTROLB_ACTDIODE 0X02
+#define DA9052_CONTROLB_BUCKMERGE 0X01
+
+/* CONTROL REGISTER C BITS */
+#define DA9052_CONTROLC_BLINKDUR 0X80
+#define DA9052_CONTROLC_BLINKFRQ 0X60
+#define DA9052_CONTROLC_DEBOUNCING 0X1C
+#define DA9052_CONTROLC_PMFB2PIN 0X02
+#define DA9052_CONTROLC_PMFB1PIN 0X01
+
+/* CONTROL REGISTER D BITS */
+#define DA9052_CONTROLD_WATCHDOG 0X80
+#define DA9052_CONTROLD_ACCDETEN 0X40
+#define DA9052_CONTROLD_GPI1415SD 0X20
+#define DA9052_CONTROLD_NONKEYSD 0X10
+#define DA9052_CONTROLD_KEEPACTEN 0X08
+#define DA9052_CONTROLD_TWDSCALE 0X07
+
+/* POWER DOWN DISABLE REGISTER BITS */
+#define DA9052_PDDIS_PMCONTPD 0X80
+#define DA9052_PDDIS_OUT32KPD 0X40
+#define DA9052_PDDIS_CHGBBATPD 0X20
+#define DA9052_PDDIS_CHGPD 0X10
+#define DA9052_PDDIS_HS2WIREPD 0X08
+#define DA9052_PDDIS_PMIFPD 0X04
+#define DA9052_PDDIS_GPADCPD 0X02
+#define DA9052_PDDIS_GPIOPD 0X01
+
+/* CONTROL REGISTER D BITS */
+#define DA9052_INTERFACE_IFBASEADDR 0XE0
+#define DA9052_INTERFACE_NCSPOL 0X10
+#define DA9052_INTERFACE_RWPOL 0X08
+#define DA9052_INTERFACE_CPHA 0X04
+#define DA9052_INTERFACE_CPOL 0X02
+#define DA9052_INTERFACE_IFTYPE 0X01
+
+/* CONTROL REGISTER D BITS */
+#define DA9052_RESET_RESETEVENT 0XC0
+#define DA9052_RESET_RESETTIMER 0X3F
+
+/* GPIO REGISTERS */
+/* GPIO CONTROL REGISTER BITS */
+#define DA9052_GPIO_EVEN_PORT_PIN 0X03
+#define DA9052_GPIO_EVEN_PORT_TYPE 0X04
+#define DA9052_GPIO_EVEN_PORT_MODE 0X08
+
+#define DA9052_GPIO_ODD_PORT_PIN 0X30
+#define DA9052_GPIO_ODD_PORT_TYPE 0X40
+#define DA9052_GPIO_ODD_PORT_MODE 0X80
+
+/*POWER SEQUENCER REGISTER BITS */
+/* SEQ CONTROL REGISTER BITS FOR ID 0 AND 1 */
+#define DA9052_ID01_LDO1STEP 0XF0
+#define DA9052_ID01_SYSPRE 0X04
+#define DA9052_ID01_DEFSUPPLY 0X02
+#define DA9052_ID01_NRESMODE 0X01
+
+/* SEQ CONTROL REGISTER BITS FOR ID 2 AND 3 */
+#define DA9052_ID23_LDO3STEP 0XF0
+#define DA9052_ID23_LDO2STEP 0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 4 AND 5 */
+#define DA9052_ID45_LDO5STEP 0XF0
+#define DA9052_ID45_LDO4STEP 0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 6 AND 7 */
+#define DA9052_ID67_LDO7STEP 0XF0
+#define DA9052_ID67_LDO6STEP 0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 8 AND 9 */
+#define DA9052_ID89_LDO9STEP 0XF0
+#define DA9052_ID89_LDO8STEP 0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 10 AND 11 */
+#define DA9052_ID1011_PDDISSTEP 0XF0
+#define DA9052_ID1011_LDO10STEP 0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 12 AND 13 */
+#define DA9052_ID1213_VMEMSWSTEP 0XF0
+#define DA9052_ID1213_VPERISWSTEP 0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 14 AND 15 */
+#define DA9052_ID1415_BUCKPROSTEP 0XF0
+#define DA9052_ID1415_BUCKCORESTEP 0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 16 AND 17 */
+#define DA9052_ID1617_BUCKPERISTEP 0XF0
+#define DA9052_ID1617_BUCKMEMSTEP 0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 18 AND 19 */
+#define DA9052_ID1819_GPRISE2STEP 0XF0
+#define DA9052_ID1819_GPRISE1STEP 0X0F
+
+/* SEQ CONTROL REGISTER BITS FOR ID 20 AND 21 */
+#define DA9052_ID2021_GPFALL2STEP 0XF0
+#define DA9052_ID2021_GPFALL1STEP 0X0F
+
+/* POWER SEQ STATUS REGISTER BITS */
+#define DA9052_SEQSTATUS_SEQPOINTER 0XF0
+#define DA9052_SEQSTATUS_WAITSTEP 0X0F
+
+/* POWER SEQ A REGISTER BITS */
+#define DA9052_SEQA_POWEREND 0XF0
+#define DA9052_SEQA_SYSTEMEND 0X0F
+
+/* POWER SEQ B REGISTER BITS */
+#define DA9052_SEQB_PARTDOWN 0XF0
+#define DA9052_SEQB_MAXCOUNT 0X0F
+
+/* POWER SEQ TIMER REGISTER BITS */
+#define DA9052_SEQTIMER_SEQDUMMY 0XF0
+#define DA9052_SEQTIMER_SEQTIME 0X0F
+
+/*POWER SUPPLY CONTROL REGISTER BITS */
+/* BUCK REGISTER A BITS */
+#define DA9052_BUCKA_BPROILIM 0XC0
+#define DA9052_BUCKA_BPROMODE 0X30
+#define DA9052_BUCKA_BCOREILIM 0X0C
+#define DA9052_BUCKA_BCOREMODE 0X03
+
+/* BUCK REGISTER B BITS */
+#define DA9052_BUCKB_BERIILIM 0XC0
+#define DA9052_BUCKB_BPERIMODE 0X30
+#define DA9052_BUCKB_BMEMILIM 0X0C
+#define DA9052_BUCKB_BMEMMODE 0X03
+
+/* BUCKCORE REGISTER BITS */
+#define DA9052_BUCKCORE_BCORECONF 0X80
+#define DA9052_BUCKCORE_BCOREEN 0X40
+#define DA9052_BUCKCORE_VBCORE 0X3F
+
+/* BUCKPRO REGISTER BITS */
+#define DA9052_BUCKPRO_BPROCONF 0X80
+#define DA9052_BUCKPRO_BPROEN 0X40
+#define DA9052_BUCKPRO_VBPRO 0X3F
+
+/* BUCKMEM REGISTER BITS */
+#define DA9052_BUCKMEM_BMEMCONF 0X80
+#define DA9052_BUCKMEM_BMEMEN 0X40
+#define DA9052_BUCKMEM_VBMEM 0X3F
+
+/* BUCKPERI REGISTER BITS */
+#define DA9052_BUCKPERI_BPERICONF 0X80
+#define DA9052_BUCKPERI_BPERIEN 0X40
+#define DA9052_BUCKPERI_BPERIHS 0X20
+#define DA9052_BUCKPERI_VBPERI 0X1F
+
+/* LDO1 REGISTER BITS */
+#define DA9052_LDO1_LDO1CONF 0X80
+#define DA9052_LDO1_LDO1EN 0X40
+#define DA9052_LDO1_VLDO1 0X1F
+
+/* LDO2 REGISTER BITS */
+#define DA9052_LDO2_LDO2CONF 0X80
+#define DA9052_LDO2_LDO2EN 0X40
+#define DA9052_LDO2_VLDO2 0X3F
+
+/* LDO3 REGISTER BITS */
+#define DA9052_LDO3_LDO3CONF 0X80
+#define DA9052_LDO3_LDO3EN 0X40
+#define DA9052_LDO3_VLDO3 0X3F
+
+/* LDO4 REGISTER BITS */
+#define DA9052_LDO4_LDO4CONF 0X80
+#define DA9052_LDO4_LDO4EN 0X40
+#define DA9052_LDO4_VLDO4 0X3F
+
+/* LDO5 REGISTER BITS */
+#define DA9052_LDO5_LDO5CONF 0X80
+#define DA9052_LDO5_LDO5EN 0X40
+#define DA9052_LDO5_VLDO5 0X3F
+
+/* LDO6 REGISTER BITS */
+#define DA9052_LDO6_LDO6CONF 0X80
+#define DA9052_LDO6_LDO6EN 0X40
+#define DA9052_LDO6_VLDO6 0X3F
+
+/* LDO7 REGISTER BITS */
+#define DA9052_LDO7_LDO7CONF 0X80
+#define DA9052_LDO7_LDO7EN 0X40
+#define DA9052_LDO7_VLDO7 0X3F
+
+/* LDO8 REGISTER BITS */
+#define DA9052_LDO8_LDO8CONF 0X80
+#define DA9052_LDO8_LDO8EN 0X40
+#define DA9052_LDO8_VLDO8 0X3F
+
+/* LDO9 REGISTER BITS */
+#define DA9052_LDO9_LDO9CONF 0X80
+#define DA9052_LDO9_LDO9EN 0X40
+#define DA9052_LDO9_VLDO9 0X3F
+
+/* LDO10 REGISTER BITS */
+#define DA9052_LDO10_LDO10CONF 0X80
+#define DA9052_LDO10_LDO10EN 0X40
+#define DA9052_LDO10_VLDO10 0X3F
+
+/* SUPPLY REGISTER BITS */
+#define DA9052_SUPPLY_VLOCK 0X80
+#define DA9052_SUPPLY_VMEMSWEN 0X40
+#define DA9052_SUPPLY_VPERISWEN 0X20
+#define DA9052_SUPPLY_VLDO3GO 0X10
+#define DA9052_SUPPLY_VLDO2GO 0X08
+#define DA9052_SUPPLY_VBMEMGO 0X04
+#define DA9052_SUPPLY_VBPROGO 0X02
+#define DA9052_SUPPLY_VBCOREGO 0X01
+
+/* PULLDOWN REGISTER BITS */
+#define DA9052_PULLDOWN_LDO5PDDIS 0X20
+#define DA9052_PULLDOWN_LDO2PDDIS 0X10
+#define DA9052_PULLDOWN_LDO1PDDIS 0X08
+#define DA9052_PULLDOWN_MEMPDDIS 0X04
+#define DA9052_PULLDOWN_PROPDDIS 0X02
+#define DA9052_PULLDOWN_COREPDDIS 0X01
+
+/* BAT CHARGER REGISTER BITS */
+/* CHARGER BUCK REGISTER BITS */
+#define DA9052_CHGBUCK_CHGTEMP 0X80
+#define DA9052_CHGBUCK_CHGUSBILIM 0X40
+#define DA9052_CHGBUCK_CHGBUCKLP 0X20
+#define DA9052_CHGBUCK_CHGBUCKEN 0X10
+#define DA9052_CHGBUCK_ISETBUCK 0X0F
+
+/* WAIT COUNTER REGISTER BITS */
+#define DA9052_WAITCONT_WAITDIR 0X80
+#define DA9052_WAITCONT_RTCCLOCK 0X40
+#define DA9052_WAITCONT_WAITMODE 0X20
+#define DA9052_WAITCONT_EN32KOUT 0X10
+#define DA9052_WAITCONT_DELAYTIME 0X0F
+
+/* ISET CONTROL REGISTER BITS */
+#define DA9052_ISET_ISETDCIN 0XF0
+#define DA9052_ISET_ISETVBUS 0X0F
+
+/* BATTERY CHARGER CONTROL REGISTER BITS */
+#define DA9052_BATCHG_ICHGPRE 0XC0
+#define DA9052_BATCHG_ICHGBAT 0X3F
+
+/* CHARGER COUNTER REGISTER BITS */
+#define DA9052_CHG_CONT_VCHG_BAT 0XF8
+#define DA9052_CHG_CONT_TCTR 0X07
+
+/* INPUT CONTROL REGISTER BITS */
+#define DA9052_INPUT_CONT_TCTR_MODE 0X80
+#define DA9052_INPUT_CONT_VBUS_SUSP 0X10
+#define DA9052_INPUT_CONT_DCIN_SUSP 0X08
+
+/* CHARGING TIME REGISTER BITS */
+#define DA9052_CHGTIME_CHGTIME 0XFF
+
+/* BACKUP BATTERY CONTROL REGISTER BITS */
+#define DA9052_BBATCONT_BCHARGERISET 0XF0
+#define DA9052_BBATCONT_BCHARGERVSET 0X0F
+
+/* LED REGISTERS BITS */
+/* LED BOOST REGISTER BITS */
+#define DA9052_BOOST_EBFAULT 0X80
+#define DA9052_BOOST_MBFAULT 0X40
+#define DA9052_BOOST_BOOSTFRQ 0X20
+#define DA9052_BOOST_BOOSTILIM 0X10
+#define DA9052_BOOST_LED3INEN 0X08
+#define DA9052_BOOST_LED2INEN 0X04
+#define DA9052_BOOST_LED1INEN 0X02
+#define DA9052_BOOST_BOOSTEN 0X01
+
+/* LED CONTROL REGISTER BITS */
+#define DA9052_LEDCONT_SELLEDMODE 0X80
+#define DA9052_LEDCONT_LED3ICONT 0X40
+#define DA9052_LEDCONT_LED3RAMP 0X20
+#define DA9052_LEDCONT_LED3EN 0X10
+#define DA9052_LEDCONT_LED2RAMP 0X08
+#define DA9052_LEDCONT_LED2EN 0X04
+#define DA9052_LEDCONT_LED1RAMP 0X02
+#define DA9052_LEDCONT_LED1EN 0X01
+
+/* LEDMIN123 REGISTER BIT */
+#define DA9052_LEDMIN123_LEDMINCURRENT 0XFF
+
+/* LED1CONF REGISTER BIT */
+#define DA9052_LED1CONF_LED1CURRENT 0XFF
+
+/* LED2CONF REGISTER BIT */
+#define DA9052_LED2CONF_LED2CURRENT 0XFF
+
+/* LED3CONF REGISTER BIT */
+#define DA9052_LED3CONF_LED3CURRENT 0XFF
+
+/* LED COUNT REGISTER BIT */
+#define DA9052_LED_CONT_DIM 0X80
+
+/* ADC MAN REGISTERS BITS */
+#define DA9052_ADC_MAN_MAN_CONV 0X10
+#define DA9052_ADC_MAN_MUXSEL_VDDOUT 0X00
+#define DA9052_ADC_MAN_MUXSEL_ICH 0X01
+#define DA9052_ADC_MAN_MUXSEL_TBAT 0X02
+#define DA9052_ADC_MAN_MUXSEL_VBAT 0X03
+#define DA9052_ADC_MAN_MUXSEL_AD4 0X04
+#define DA9052_ADC_MAN_MUXSEL_AD5 0X05
+#define DA9052_ADC_MAN_MUXSEL_AD6 0X06
+#define DA9052_ADC_MAN_MUXSEL_VBBAT 0X09
+
+/* ADC CONTROL REGSISTERS BITS */
+#define DA9052_ADCCONT_COMP1V2EN 0X80
+#define DA9052_ADCCONT_ADCMODE 0X40
+#define DA9052_ADCCONT_TBATISRCEN 0X20
+#define DA9052_ADCCONT_AD4ISRCEN 0X10
+#define DA9052_ADCCONT_AUTOAD6EN 0X08
+#define DA9052_ADCCONT_AUTOAD5EN 0X04
+#define DA9052_ADCCONT_AUTOAD4EN 0X02
+#define DA9052_ADCCONT_AUTOVDDEN 0X01
+
+/* ADC 10 BIT MANUAL CONVERSION RESULT LOW REGISTER */
+#define DA9052_ADC_RES_LSB 0X03
+
+/* ADC 10 BIT MANUAL CONVERSION RESULT HIGH REGISTER */
+#define DA9052_ADCRESH_ADCRESMSB 0XFF
+
+/* VDD RES REGSISTER BIT*/
+#define DA9052_VDDRES_VDDOUTRES 0XFF
+
+/* VDD MON REGSISTER BIT */
+#define DA9052_VDDMON_VDDOUTMON 0XFF
+
+/* ICHG_AV REGSISTER BIT */
+#define DA9052_ICHGAV_ICHGAV 0XFF
+
+/* ICHG_THD REGSISTER BIT */
+#define DA9052_ICHGTHD_ICHGTHD 0XFF
+
+/* ICHG_END REGSISTER BIT */
+#define DA9052_ICHGEND_ICHGEND 0XFF
+
+/* TBAT_RES REGSISTER BIT */
+#define DA9052_TBATRES_TBATRES 0XFF
+
+/* TBAT_HIGHP REGSISTER BIT */
+#define DA9052_TBATHIGHP_TBATHIGHP 0XFF
+
+/* TBAT_HIGHN REGSISTER BIT */
+#define DA9052_TBATHIGHN_TBATHIGHN 0XFF
+
+/* TBAT_LOW REGSISTER BIT */
+#define DA9052_TBATLOW_TBATLOW 0XFF
+
+/* T_OFFSET REGSISTER BIT */
+#define DA9052_TOFFSET_TOFFSET 0XFF
+
+/* ADCIN4_RES REGSISTER BIT */
+#define DA9052_ADCIN4RES_ADCIN4RES 0XFF
+
+/* ADCIN4_HIGH REGSISTER BIT */
+#define DA9052_AUTO4HIGH_AUTO4HIGH 0XFF
+
+/* ADCIN4_LOW REGSISTER BIT */
+#define DA9052_AUTO4LOW_AUTO4LOW 0XFF
+
+/* ADCIN5_RES REGSISTER BIT */
+#define DA9052_ADCIN5RES_ADCIN5RES 0XFF
+
+/* ADCIN5_HIGH REGSISTER BIT */
+#define DA9052_AUTO5HIGH_AUTOHIGH 0XFF
+
+/* ADCIN5_LOW REGSISTER BIT */
+#define DA9052_AUTO5LOW_AUTO5LOW 0XFF
+
+/* ADCIN6_RES REGSISTER BIT */
+#define DA9052_ADCIN6RES_ADCIN6RES 0XFF
+
+/* ADCIN6_HIGH REGSISTER BIT */
+#define DA9052_AUTO6HIGH_AUTO6HIGH 0XFF
+
+/* ADCIN6_LOW REGSISTER BIT */
+#define DA9052_AUTO6LOW_AUTO6LOW 0XFF
+
+/* TJUNC_RES REGSISTER BIT*/
+#define DA9052_TJUNCRES_TJUNCRES 0XFF
+
+/* TSI REGISTER */
+/* TSI CONTROL REGISTER A BITS */
+#define DA9052_TSICONTA_TSIDELAY 0XC0
+#define DA9052_TSICONTA_TSISKIP 0X38
+#define DA9052_TSICONTA_TSIMODE 0X04
+#define DA9052_TSICONTA_PENDETEN 0X02
+#define DA9052_TSICONTA_AUTOTSIEN 0X01
+
+/* TSI CONTROL REGISTER B BITS */
+#define DA9052_TSICONTB_ADCREF 0X80
+#define DA9052_TSICONTB_TSIMAN 0X40
+#define DA9052_TSICONTB_TSIMUX 0X30
+#define DA9052_TSICONTB_TSISEL3 0X08
+#define DA9052_TSICONTB_TSISEL2 0X04
+#define DA9052_TSICONTB_TSISEL1 0X02
+#define DA9052_TSICONTB_TSISEL0 0X01
+
+/* TSI X CO-ORDINATE MSB RESULT REGISTER BITS */
+#define DA9052_TSIXMSB_TSIXM 0XFF
+
+/* TSI Y CO-ORDINATE MSB RESULT REGISTER BITS */
+#define DA9052_TSIYMSB_TSIYM 0XFF
+
+/* TSI CO-ORDINATE LSB RESULT REGISTER BITS */
+#define DA9052_TSILSB_PENDOWN 0X40
+#define DA9052_TSILSB_TSIZL 0X30
+#define DA9052_TSILSB_TSIYL 0X0C
+#define DA9052_TSILSB_TSIXL 0X03
+
+/* TSI Z MEASUREMENT MSB RESULT REGISTER BIT */
+#define DA9052_TSIZMSB_TSIZM 0XFF
+
+/* RTC REGISTER */
+/* RTC TIMER SECONDS REGISTER BITS */
+#define DA9052_COUNTS_MONITOR 0X40
+#define DA9052_RTC_SEC 0X3F
+
+/* RTC TIMER MINUTES REGISTER BIT */
+#define DA9052_RTC_MIN 0X3F
+
+/* RTC TIMER HOUR REGISTER BIT */
+#define DA9052_RTC_HOUR 0X1F
+
+/* RTC TIMER DAYS REGISTER BIT */
+#define DA9052_RTC_DAY 0X1F
+
+/* RTC TIMER MONTHS REGISTER BIT */
+#define DA9052_RTC_MONTH 0X0F
+
+/* RTC TIMER YEARS REGISTER BIT */
+#define DA9052_RTC_YEAR 0X3F
+
+/* RTC ALARM MINUTES REGISTER BITS */
+#define DA9052_ALARMM_I_TICK_TYPE 0X80
+#define DA9052_ALARMMI_ALARMTYPE 0X40
+
+/* RTC ALARM YEARS REGISTER BITS */
+#define DA9052_ALARM_Y_TICK_ON 0X80
+#define DA9052_ALARM_Y_ALARM_ON 0X40
+
+/* RTC SECONDS REGISTER A BITS */
+#define DA9052_SECONDA_SECONDSA 0XFF
+
+/* RTC SECONDS REGISTER B BITS */
+#define DA9052_SECONDB_SECONDSB 0XFF
+
+/* RTC SECONDS REGISTER C BITS */
+#define DA9052_SECONDC_SECONDSC 0XFF
+
+/* RTC SECONDS REGISTER D BITS */
+#define DA9052_SECONDD_SECONDSD 0XFF
+
+#endif
+/* __LINUX_MFD_DA9052_REG_H */
diff --git a/include/linux/mfd/da9055/core.h b/include/linux/mfd/da9055/core.h
new file mode 100644
index 000000000..956afa445
--- /dev/null
+++ b/include/linux/mfd/da9055/core.h
@@ -0,0 +1,94 @@
+/*
+ * da9055 declarations for DA9055 PMICs.
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __DA9055_CORE_H
+#define __DA9055_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+
+/*
+ * PMIC IRQ
+ */
+#define DA9055_IRQ_ALARM 0x01
+#define DA9055_IRQ_TICK 0x02
+#define DA9055_IRQ_NONKEY 0x00
+#define DA9055_IRQ_REGULATOR 0x0B
+#define DA9055_IRQ_HWMON 0x03
+
+struct da9055_pdata;
+
+struct da9055 {
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *irq_data;
+ struct device *dev;
+ struct i2c_client *i2c_client;
+
+ int irq_base;
+ int chip_irq;
+};
+
+/* Device I/O */
+static inline int da9055_reg_read(struct da9055 *da9055, unsigned char reg)
+{
+ int val, ret;
+
+ ret = regmap_read(da9055->regmap, reg, &val);
+ if (ret < 0)
+ return ret;
+
+ return val;
+}
+
+static inline int da9055_reg_write(struct da9055 *da9055, unsigned char reg,
+ unsigned char val)
+{
+ return regmap_write(da9055->regmap, reg, val);
+}
+
+static inline int da9055_group_read(struct da9055 *da9055, unsigned char reg,
+ unsigned reg_cnt, unsigned char *val)
+{
+ return regmap_bulk_read(da9055->regmap, reg, val, reg_cnt);
+}
+
+static inline int da9055_group_write(struct da9055 *da9055, unsigned char reg,
+ unsigned reg_cnt, unsigned char *val)
+{
+ return regmap_raw_write(da9055->regmap, reg, val, reg_cnt);
+}
+
+static inline int da9055_reg_update(struct da9055 *da9055, unsigned char reg,
+ unsigned char bit_mask,
+ unsigned char reg_val)
+{
+ return regmap_update_bits(da9055->regmap, reg, bit_mask, reg_val);
+}
+
+/* Generic Device API */
+int da9055_device_init(struct da9055 *da9055);
+void da9055_device_exit(struct da9055 *da9055);
+
+extern struct regmap_config da9055_regmap_config;
+
+#endif /* __DA9055_CORE_H */
diff --git a/include/linux/mfd/da9055/pdata.h b/include/linux/mfd/da9055/pdata.h
new file mode 100644
index 000000000..04e092be4
--- /dev/null
+++ b/include/linux/mfd/da9055/pdata.h
@@ -0,0 +1,53 @@
+/* Copyright (C) 2012 Dialog Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#ifndef __DA9055_PDATA_H
+#define __DA9055_PDATA_H
+
+#define DA9055_MAX_REGULATORS 8
+
+struct da9055;
+
+enum gpio_select {
+ NO_GPIO = 0,
+ GPIO_1,
+ GPIO_2
+};
+
+struct da9055_pdata {
+ int (*init) (struct da9055 *da9055);
+ int irq_base;
+ int gpio_base;
+
+ struct regulator_init_data *regulators[DA9055_MAX_REGULATORS];
+ /* Enable RTC in RESET Mode */
+ bool reset_enable;
+ /*
+ * GPI muxed pin to control
+ * regulator state A/B, 0 if not available.
+ */
+ int *gpio_ren;
+ /*
+ * GPI muxed pin to control
+ * regulator set, 0 if not available.
+ */
+ int *gpio_rsel;
+ /*
+ * Regulator mode control bits value (GPI offset) that
+ * that controls the regulator state, 0 if not available.
+ */
+ enum gpio_select *reg_ren;
+ /*
+ * Regulator mode control bits value (GPI offset) that
+ * controls the regulator set A/B, 0 if not available.
+ */
+ enum gpio_select *reg_rsel;
+ /* GPIOs to enable regulator, 0 if not available */
+ int *ena_gpio;
+};
+#endif /* __DA9055_PDATA_H */
diff --git a/include/linux/mfd/da9055/reg.h b/include/linux/mfd/da9055/reg.h
new file mode 100644
index 000000000..2b592e072
--- /dev/null
+++ b/include/linux/mfd/da9055/reg.h
@@ -0,0 +1,699 @@
+/*
+ * DA9055 declarations for DA9055 PMICs.
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __DA9055_REG_H
+#define __DA9055_REG_H
+
+/*
+ * PMIC registers
+ */
+ /* PAGE0 */
+#define DA9055_REG_PAGE_CON 0x00
+
+/* System Control and Event Registers */
+#define DA9055_REG_STATUS_A 0x01
+#define DA9055_REG_STATUS_B 0x02
+#define DA9055_REG_FAULT_LOG 0x03
+#define DA9055_REG_EVENT_A 0x04
+#define DA9055_REG_EVENT_B 0x05
+#define DA9055_REG_EVENT_C 0x06
+#define DA9055_REG_IRQ_MASK_A 0x07
+#define DA9055_REG_IRQ_MASK_B 0x08
+#define DA9055_REG_IRQ_MASK_C 0x09
+#define DA9055_REG_CONTROL_A 0x0A
+#define DA9055_REG_CONTROL_B 0x0B
+#define DA9055_REG_CONTROL_C 0x0C
+#define DA9055_REG_CONTROL_D 0x0D
+#define DA9055_REG_CONTROL_E 0x0E
+#define DA9055_REG_PD_DIS 0x0F
+
+/* GPIO Control Registers */
+#define DA9055_REG_GPIO0_1 0x10
+#define DA9055_REG_GPIO2 0x11
+#define DA9055_REG_GPIO_MODE0_2 0x12
+
+/* Regulator Control Registers */
+#define DA9055_REG_BCORE_CONT 0x13
+#define DA9055_REG_BMEM_CONT 0x14
+#define DA9055_REG_LDO1_CONT 0x15
+#define DA9055_REG_LDO2_CONT 0x16
+#define DA9055_REG_LDO3_CONT 0x17
+#define DA9055_REG_LDO4_CONT 0x18
+#define DA9055_REG_LDO5_CONT 0x19
+#define DA9055_REG_LDO6_CONT 0x1A
+
+/* GP-ADC Control Registers */
+#define DA9055_REG_ADC_MAN 0x1B
+#define DA9055_REG_ADC_CONT 0x1C
+#define DA9055_REG_VSYS_MON 0x1D
+#define DA9055_REG_ADC_RES_L 0x1E
+#define DA9055_REG_ADC_RES_H 0x1F
+#define DA9055_REG_VSYS_RES 0x20
+#define DA9055_REG_ADCIN1_RES 0x21
+#define DA9055_REG_ADCIN2_RES 0x22
+#define DA9055_REG_ADCIN3_RES 0x23
+
+/* Sequencer Control Registers */
+#define DA9055_REG_EN_32K 0x35
+
+/* Regulator Setting Registers */
+#define DA9055_REG_BUCK_LIM 0x37
+#define DA9055_REG_BCORE_MODE 0x38
+#define DA9055_REG_VBCORE_A 0x39
+#define DA9055_REG_VBMEM_A 0x3A
+#define DA9055_REG_VLDO1_A 0x3B
+#define DA9055_REG_VLDO2_A 0x3C
+#define DA9055_REG_VLDO3_A 0x3D
+#define DA9055_REG_VLDO4_A 0x3E
+#define DA9055_REG_VLDO5_A 0x3F
+#define DA9055_REG_VLDO6_A 0x40
+#define DA9055_REG_VBCORE_B 0x41
+#define DA9055_REG_VBMEM_B 0x42
+#define DA9055_REG_VLDO1_B 0x43
+#define DA9055_REG_VLDO2_B 0x44
+#define DA9055_REG_VLDO3_B 0x45
+#define DA9055_REG_VLDO4_B 0x46
+#define DA9055_REG_VLDO5_B 0x47
+#define DA9055_REG_VLDO6_B 0x48
+
+/* GP-ADC Threshold Registers */
+#define DA9055_REG_AUTO1_HIGH 0x49
+#define DA9055_REG_AUTO1_LOW 0x4A
+#define DA9055_REG_AUTO2_HIGH 0x4B
+#define DA9055_REG_AUTO2_LOW 0x4C
+#define DA9055_REG_AUTO3_HIGH 0x4D
+#define DA9055_REG_AUTO3_LOW 0x4E
+
+/* OTP */
+#define DA9055_REG_OPT_COUNT 0x50
+#define DA9055_REG_OPT_ADDR 0x51
+#define DA9055_REG_OPT_DATA 0x52
+
+/* RTC Calendar and Alarm Registers */
+#define DA9055_REG_COUNT_S 0x53
+#define DA9055_REG_COUNT_MI 0x54
+#define DA9055_REG_COUNT_H 0x55
+#define DA9055_REG_COUNT_D 0x56
+#define DA9055_REG_COUNT_MO 0x57
+#define DA9055_REG_COUNT_Y 0x58
+#define DA9055_REG_ALARM_MI 0x59
+#define DA9055_REG_ALARM_H 0x5A
+#define DA9055_REG_ALARM_D 0x5B
+#define DA9055_REG_ALARM_MO 0x5C
+#define DA9055_REG_ALARM_Y 0x5D
+#define DA9055_REG_SECOND_A 0x5E
+#define DA9055_REG_SECOND_B 0x5F
+#define DA9055_REG_SECOND_C 0x60
+#define DA9055_REG_SECOND_D 0x61
+
+/* Customer Trim and Configuration */
+#define DA9055_REG_T_OFFSET 0x63
+#define DA9055_REG_INTERFACE 0x64
+#define DA9055_REG_CONFIG_A 0x65
+#define DA9055_REG_CONFIG_B 0x66
+#define DA9055_REG_CONFIG_C 0x67
+#define DA9055_REG_CONFIG_D 0x68
+#define DA9055_REG_CONFIG_E 0x69
+#define DA9055_REG_TRIM_CLDR 0x6F
+
+/* General Purpose Registers */
+#define DA9055_REG_GP_ID_0 0x70
+#define DA9055_REG_GP_ID_1 0x71
+#define DA9055_REG_GP_ID_2 0x72
+#define DA9055_REG_GP_ID_3 0x73
+#define DA9055_REG_GP_ID_4 0x74
+#define DA9055_REG_GP_ID_5 0x75
+#define DA9055_REG_GP_ID_6 0x76
+#define DA9055_REG_GP_ID_7 0x77
+#define DA9055_REG_GP_ID_8 0x78
+#define DA9055_REG_GP_ID_9 0x79
+#define DA9055_REG_GP_ID_10 0x7A
+#define DA9055_REG_GP_ID_11 0x7B
+#define DA9055_REG_GP_ID_12 0x7C
+#define DA9055_REG_GP_ID_13 0x7D
+#define DA9055_REG_GP_ID_14 0x7E
+#define DA9055_REG_GP_ID_15 0x7F
+#define DA9055_REG_GP_ID_16 0x80
+#define DA9055_REG_GP_ID_17 0x81
+#define DA9055_REG_GP_ID_18 0x82
+#define DA9055_REG_GP_ID_19 0x83
+
+#define DA9055_MAX_REGISTER_CNT DA9055_REG_GP_ID_19
+
+/*
+ * PMIC registers bits
+ */
+
+/* DA9055_REG_PAGE_CON (addr=0x00) */
+#define DA9055_PAGE_WRITE_MODE (0<<6)
+#define DA9055_REPEAT_WRITE_MODE (1<<6)
+
+/* DA9055_REG_STATUS_A (addr=0x01) */
+#define DA9055_NOKEY_STS 0x01
+#define DA9055_WAKE_STS 0x02
+#define DA9055_DVC_BUSY_STS 0x04
+#define DA9055_COMP1V2_STS 0x08
+#define DA9055_NJIG_STS 0x10
+#define DA9055_LDO5_LIM_STS 0x20
+#define DA9055_LDO6_LIM_STS 0x40
+
+/* DA9055_REG_STATUS_B (addr=0x02) */
+#define DA9055_GPI0_STS 0x01
+#define DA9055_GPI1_STS 0x02
+#define DA9055_GPI2_STS 0x04
+
+/* DA9055_REG_FAULT_LOG (addr=0x03) */
+#define DA9055_TWD_ERROR_FLG 0x01
+#define DA9055_POR_FLG 0x02
+#define DA9055_VDD_FAULT_FLG 0x04
+#define DA9055_VDD_START_FLG 0x08
+#define DA9055_TEMP_CRIT_FLG 0x10
+#define DA9055_KEY_RESET_FLG 0x20
+#define DA9055_WAIT_SHUT_FLG 0x80
+
+/* DA9055_REG_EVENT_A (addr=0x04) */
+#define DA9055_NOKEY_EINT 0x01
+#define DA9055_ALARM_EINT 0x02
+#define DA9055_TICK_EINT 0x04
+#define DA9055_ADC_RDY_EINT 0x08
+#define DA9055_SEQ_RDY_EINT 0x10
+#define DA9055_EVENTS_B_EINT 0x20
+#define DA9055_EVENTS_C_EINT 0x40
+
+/* DA9055_REG_EVENT_B (addr=0x05) */
+#define DA9055_E_WAKE_EINT 0x01
+#define DA9055_E_TEMP_EINT 0x02
+#define DA9055_E_COMP1V2_EINT 0x04
+#define DA9055_E_LDO_LIM_EINT 0x08
+#define DA9055_E_NJIG_EINT 0x20
+#define DA9055_E_VDD_MON_EINT 0x40
+#define DA9055_E_VDD_WARN_EINT 0x80
+
+/* DA9055_REG_EVENT_C (addr=0x06) */
+#define DA9055_E_GPI0_EINT 0x01
+#define DA9055_E_GPI1_EINT 0x02
+#define DA9055_E_GPI2_EINT 0x04
+
+/* DA9055_REG_IRQ_MASK_A (addr=0x07) */
+#define DA9055_M_NONKEY_EINT 0x01
+#define DA9055_M_ALARM_EINT 0x02
+#define DA9055_M_TICK_EINT 0x04
+#define DA9055_M_ADC_RDY_EINT 0x08
+#define DA9055_M_SEQ_RDY_EINT 0x10
+
+/* DA9055_REG_IRQ_MASK_B (addr=0x08) */
+#define DA9055_M_WAKE_EINT 0x01
+#define DA9055_M_TEMP_EINT 0x02
+#define DA9055_M_COMP_1V2_EINT 0x04
+#define DA9055_M_LDO_LIM_EINT 0x08
+#define DA9055_M_NJIG_EINT 0x20
+#define DA9055_M_VDD_MON_EINT 0x40
+#define DA9055_M_VDD_WARN_EINT 0x80
+
+/* DA9055_REG_IRQ_MASK_C (addr=0x09) */
+#define DA9055_M_GPI0_EINT 0x01
+#define DA9055_M_GPI1_EINT 0x02
+#define DA9055_M_GPI2_EINT 0x04
+
+/* DA9055_REG_CONTROL_A (addr=0xA) */
+#define DA9055_DEBOUNCING_SHIFT 0x00
+#define DA9055_DEBOUNCING_MASK 0x07
+#define DA9055_NRES_MODE_SHIFT 0x03
+#define DA9055_NRES_MODE_MASK 0x08
+#define DA9055_SLEW_RATE_SHIFT 0x04
+#define DA9055_SLEW_RATE_MASK 0x30
+#define DA9055_NOKEY_LOCK_SHIFT 0x06
+#define DA9055_NOKEY_LOCK_MASK 0x40
+
+/* DA9055_REG_CONTROL_B (addr=0xB) */
+#define DA9055_RTC_MODE_PD 0x01
+#define DA9055_RTC_MODE_SD_SHIFT 0x01
+#define DA9055_RTC_MODE_SD 0x02
+#define DA9055_RTC_EN 0x04
+#define DA9055_ECO_MODE_SHIFT 0x03
+#define DA9055_ECO_MODE_MASK 0x08
+#define DA9055_TWDSCALE_SHIFT 4
+#define DA9055_TWDSCALE_MASK 0x70
+#define DA9055_V_LOCK_SHIFT 0x07
+#define DA9055_V_LOCK_MASK 0x80
+
+/* DA9055_REG_CONTROL_C (addr=0xC) */
+#define DA9055_SYSTEM_EN_SHIFT 0x00
+#define DA9055_SYSTEM_EN_MASK 0x01
+#define DA9055_POWERN_EN_SHIFT 0x01
+#define DA9055_POWERN_EN_MASK 0x02
+#define DA9055_POWER1_EN_SHIFT 0x02
+#define DA9055_POWER1_EN_MASK 0x04
+
+/* DA9055_REG_CONTROL_D (addr=0xD) */
+#define DA9055_STANDBY_SHIFT 0x02
+#define DA9055_STANDBY_MASK 0x08
+#define DA9055_AUTO_BOOT_SHIFT 0x03
+#define DA9055_AUTO_BOOT_MASK 0x04
+
+/* DA9055_REG_CONTROL_E (addr=0xE) */
+#define DA9055_WATCHDOG_SHIFT 0x00
+#define DA9055_WATCHDOG_MASK 0x01
+#define DA9055_SHUTDOWN_SHIFT 0x01
+#define DA9055_SHUTDOWN_MASK 0x02
+#define DA9055_WAKE_UP_SHIFT 0x02
+#define DA9055_WAKE_UP_MASK 0x04
+
+/* DA9055_REG_GPIO (addr=0x10/0x11) */
+#define DA9055_GPIO0_PIN_SHIFT 0x00
+#define DA9055_GPIO0_PIN_MASK 0x03
+#define DA9055_GPIO0_TYPE_SHIFT 0x02
+#define DA9055_GPIO0_TYPE_MASK 0x04
+#define DA9055_GPIO0_WEN_SHIFT 0x03
+#define DA9055_GPIO0_WEN_MASK 0x08
+#define DA9055_GPIO1_PIN_SHIFT 0x04
+#define DA9055_GPIO1_PIN_MASK 0x30
+#define DA9055_GPIO1_TYPE_SHIFT 0x06
+#define DA9055_GPIO1_TYPE_MASK 0x40
+#define DA9055_GPIO1_WEN_SHIFT 0x07
+#define DA9055_GPIO1_WEN_MASK 0x80
+#define DA9055_GPIO2_PIN_SHIFT 0x00
+#define DA9055_GPIO2_PIN_MASK 0x30
+#define DA9055_GPIO2_TYPE_SHIFT 0x02
+#define DA9055_GPIO2_TYPE_MASK 0x04
+#define DA9055_GPIO2_WEN_SHIFT 0x03
+#define DA9055_GPIO2_WEN_MASK 0x08
+
+/* DA9055_REG_GPIO_MODE (addr=0x12) */
+#define DA9055_GPIO0_MODE_SHIFT 0x00
+#define DA9055_GPIO0_MODE_MASK 0x01
+#define DA9055_GPIO1_MODE_SHIFT 0x01
+#define DA9055_GPIO1_MODE_MASK 0x02
+#define DA9055_GPIO2_MODE_SHIFT 0x02
+#define DA9055_GPIO2_MODE_MASK 0x04
+
+/* DA9055_REG_BCORE_CONT (addr=0x13) */
+#define DA9055_BCORE_EN_SHIFT 0x00
+#define DA9055_BCORE_EN_MASK 0x01
+#define DA9055_BCORE_GPI_SHIFT 0x01
+#define DA9055_BCORE_GPI_MASK 0x02
+#define DA9055_BCORE_PD_DIS_SHIFT 0x03
+#define DA9055_BCORE_PD_DIS_MASK 0x04
+#define DA9055_VBCORE_SEL_SHIFT 0x04
+#define DA9055_SEL_REG_A 0x0
+#define DA9055_SEL_REG_B 0x10
+#define DA9055_VBCORE_SEL_MASK 0x10
+#define DA9055_V_GPI_MASK 0x60
+#define DA9055_V_GPI_SHIFT 0x05
+#define DA9055_E_GPI_MASK 0x06
+#define DA9055_E_GPI_SHIFT 0x01
+#define DA9055_VBCORE_GPI_SHIFT 0x05
+#define DA9055_VBCORE_GPI_MASK 0x60
+#define DA9055_BCORE_CONF_SHIFT 0x07
+#define DA9055_BCORE_CONF_MASK 0x80
+
+/* DA9055_REG_BMEM_CONT (addr=0x14) */
+#define DA9055_BMEM_EN_SHIFT 0x00
+#define DA9055_BMEM_EN_MASK 0x01
+#define DA9055_BMEM_GPI_SHIFT 0x01
+#define DA9055_BMEM_GPI_MASK 0x06
+#define DA9055_BMEM_PD_DIS_SHIFT 0x03
+#define DA9055_BMEM_PD_DIS_MASK 0x08
+#define DA9055_VBMEM_SEL_SHIT 0x04
+#define DA9055_VBMEM_SEL_VBMEM_A (0<<4)
+#define DA9055_VBMEM_SEL_VBMEM_B (1<<4)
+#define DA9055_VBMEM_SEL_MASK 0x10
+#define DA9055_VBMEM_GPI_SHIFT 0x05
+#define DA9055_VBMEM_GPI_MASK 0x60
+#define DA9055_BMEM_CONF_SHIFT 0x07
+#define DA9055_BMEM_CONF_MASK 0x80
+
+/* DA9055_REG_LDO_CONT (addr=0x15-0x1A) */
+#define DA9055_LDO_EN_SHIFT 0x00
+#define DA9055_LDO_EN_MASK 0x01
+#define DA9055_LDO_GPI_SHIFT 0x01
+#define DA9055_LDO_GPI_MASK 0x06
+#define DA9055_LDO_PD_DIS_SHIFT 0x03
+#define DA9055_LDO_PD_DIS_MASK 0x08
+#define DA9055_VLDO_SEL_SHIFT 0x04
+#define DA9055_VLDO_SEL_MASK 0x10
+#define DA9055_VLDO_SEL_VLDO_A 0x00
+#define DA9055_VLDO_SEL_VLDO_B 0x01
+#define DA9055_VLDO_GPI_SHIFT 0x05
+#define DA9055_VLDO_GPI_MASK 0x60
+#define DA9055_LDO_CONF_SHIFT 0x07
+#define DA9055_LDO_CONF_MASK 0x80
+#define DA9055_REGUALTOR_SET_A 0x00
+#define DA9055_REGUALTOR_SET_B 0x10
+
+/* DA9055_REG_ADC_MAN (addr=0x1B) */
+#define DA9055_ADC_MUX_SHIFT 0
+#define DA9055_ADC_MUX_MASK 0xF
+#define DA9055_ADC_MUX_VSYS 0x0
+#define DA9055_ADC_MUX_ADCIN1 0x01
+#define DA9055_ADC_MUX_ADCIN2 0x02
+#define DA9055_ADC_MUX_ADCIN3 0x03
+#define DA9055_ADC_MUX_T_SENSE 0x04
+#define DA9055_ADC_MAN_SHIFT 0x04
+#define DA9055_ADC_MAN_CONV 0x10
+#define DA9055_ADC_LSB_MASK 0X03
+#define DA9055_ADC_MODE_MASK 0x20
+#define DA9055_ADC_MODE_SHIFT 5
+#define DA9055_ADC_MODE_1MS (1<<5)
+#define DA9055_COMP1V2_EN_SHIFT 7
+
+/* DA9055_REG_ADC_CONT (addr=0x1C) */
+#define DA9055_ADC_AUTO_VSYS_EN_SHIFT 0
+#define DA9055_ADC_AUTO_AD1_EN_SHIFT 1
+#define DA9055_ADC_AUTO_AD2_EN_SHIFT 2
+#define DA9055_ADC_AUTO_AD3_EN_SHIFT 3
+#define DA9055_ADC_ISRC_EN_SHIFT 4
+#define DA9055_ADC_ADCIN1_DEB_SHIFT 5
+#define DA9055_ADC_ADCIN2_DEB_SHIFT 6
+#define DA9055_ADC_ADCIN3_DEB_SHIFT 7
+#define DA9055_AD1_ISRC_MASK 0x10
+#define DA9055_AD1_ISRC_SHIFT 4
+
+/* DA9055_REG_VSYS_MON (addr=0x1D) */
+#define DA9055_VSYS_VAL_SHIFT 0
+#define DA9055_VSYS_VAL_MASK 0xFF
+#define DA9055_VSYS_VAL_BASE 0x00
+#define DA9055_VSYS_VAL_MAX DA9055_VSYS_VAL_MASK
+#define DA9055_VSYS_VOLT_BASE 2500
+#define DA9055_VSYS_VOLT_INC 10
+#define DA9055_VSYS_STEPS 255
+#define DA9055_VSYS_VOLT_MIN 2500
+
+/* DA9044_REG_XXX_RES (addr=0x20-0x23) */
+#define DA9055_ADC_VAL_SHIFT 0
+#define DA9055_ADC_VAL_MASK 0xFF
+#define DA9055_ADC_VAL_BASE 0x00
+#define DA9055_ADC_VAL_MAX DA9055_ADC_VAL_MASK
+#define DA9055_ADC_VOLT_BASE 0
+#define DA9055_ADC_VSYS_VOLT_BASE 2500
+#define DA9055_ADC_VOLT_INC 10
+#define DA9055_ADC_VSYS_VOLT_INC 12
+#define DA9055_ADC_STEPS 255
+
+/* DA9055_REG_EN_32K (addr=0x35)*/
+#define DA9055_STARTUP_TIME_MASK 0x07
+#define DA9055_STARTUP_TIME_0S 0x0
+#define DA9055_STARTUP_TIME_0_52S 0x1
+#define DA9055_STARTUP_TIME_1S 0x2
+#define DA9055_CRYSTAL_EN 0x08
+#define DA9055_DELAY_MODE_EN 0x10
+#define DA9055_OUT_CLCK_GATED 0x20
+#define DA9055_RTC_CLOCK_GATED 0x40
+#define DA9055_EN_32KOUT_BUF 0x80
+
+/* DA9055_REG_RESET (addr=0x36) */
+/* Timer up to 31.744 ms */
+#define DA9055_RESET_TIMER_VAL_SHIFT 0
+#define DA9055_RESET_LOW_VAL_MASK 0x3F
+#define DA9055_RESET_LOW_VAL_BASE 0
+#define DA9055_RESET_LOW_VAL_MAX DA9055_RESET_LOW_VAL_MASK
+#define DA9055_RESET_US_LOW_BASE 1024 /* min val in units of us */
+#define DA9055_RESET_US_LOW_INC 1024 /* inc val in units of us */
+#define DA9055_RESET_US_LOW_STEP 30
+
+/* Timer up to 1048.576ms */
+#define DA9055_RESET_HIGH_VAL_MASK 0x3F
+#define DA9055_RESET_HIGH_VAL_BASE 0
+#define DA9055_RESET_HIGH_VAL_MAX DA9055_RESET_HIGH_VAL_MASK
+#define DA9055_RESET_US_HIGH_BASE 32768 /* min val in units of us */
+#define DA9055_RESET_US_HIGH_INC 32768 /* inv val in units of us */
+#define DA9055_RESET_US_HIGH_STEP 31
+
+/* DA9055_REG_BUCK_ILIM (addr=0x37)*/
+#define DA9055_BMEM_ILIM_SHIFT 0
+#define DA9055_ILIM_MASK 0x3
+#define DA9055_ILIM_500MA 0x0
+#define DA9055_ILIM_600MA 0x1
+#define DA9055_ILIM_700MA 0x2
+#define DA9055_ILIM_800MA 0x3
+#define DA9055_BCORE_ILIM_SHIFT 2
+
+/* DA9055_REG_BCORE_MODE (addr=0x38) */
+#define DA9055_BMEM_MODE_SHIFT 0
+#define DA9055_MODE_MASK 0x3
+#define DA9055_MODE_AB 0x0
+#define DA9055_MODE_SLEEP 0x1
+#define DA9055_MODE_SYNCHRO 0x2
+#define DA9055_MODE_AUTO 0x3
+#define DA9055_BCORE_MODE_SHIFT 2
+
+/* DA9055_REG_VBCORE_A/B (addr=0x39/0x41)*/
+#define DA9055_VBCORE_VAL_SHIFT 0
+#define DA9055_VBCORE_VAL_MASK 0x3F
+#define DA9055_VBCORE_VAL_BASE 0x09
+#define DA9055_VBCORE_VAL_MAX DA9055_VBCORE_VAL_MASK
+#define DA9055_VBCORE_VOLT_BASE 750
+#define DA9055_VBCORE_VOLT_INC 25
+#define DA9055_VBCORE_STEPS 53
+#define DA9055_VBCORE_VOLT_MIN DA9055_VBCORE_VOLT_BASE
+#define DA9055_BCORE_SL_SYNCHRO (0<<7)
+#define DA9055_BCORE_SL_SLEEP (1<<7)
+
+/* DA9055_REG_VBMEM_A/B (addr=0x3A/0x42)*/
+#define DA9055_VBMEM_VAL_SHIFT 0
+#define DA9055_VBMEM_VAL_MASK 0x3F
+#define DA9055_VBMEM_VAL_BASE 0x00
+#define DA9055_VBMEM_VAL_MAX DA9055_VBMEM_VAL_MASK
+#define DA9055_VBMEM_VOLT_BASE 925
+#define DA9055_VBMEM_VOLT_INC 25
+#define DA9055_VBMEM_STEPS 63
+#define DA9055_VBMEM_VOLT_MIN DA9055_VBMEM_VOLT_BASE
+#define DA9055_BCMEM_SL_SYNCHRO (0<<7)
+#define DA9055_BCMEM_SL_SLEEP (1<<7)
+
+
+/* DA9055_REG_VLDO (addr=0x3B-0x40/0x43-0x48)*/
+#define DA9055_VLDO_VAL_SHIFT 0
+#define DA9055_VLDO_VAL_MASK 0x3F
+#define DA9055_VLDO6_VAL_MASK 0x7F
+#define DA9055_VLDO_VAL_BASE 0x02
+#define DA9055_VLDO2_VAL_BASE 0x03
+#define DA9055_VLDO6_VAL_BASE 0x00
+#define DA9055_VLDO_VAL_MAX DA9055_VLDO_VAL_MASK
+#define DA9055_VLDO6_VAL_MAX DA9055_VLDO6_VAL_MASK
+#define DA9055_VLDO_VOLT_BASE 900
+#define DA9055_VLDO_VOLT_INC 50
+#define DA9055_VLDO6_VOLT_INC 20
+#define DA9055_VLDO_STEPS 48
+#define DA9055_VLDO5_STEPS 37
+#define DA9055_VLDO6_STEPS 120
+#define DA9055_VLDO_VOLT_MIN DA9055_VLDO_VOLT_BASE
+#define DA9055_LDO_MODE_SHIFT 7
+#define DA9055_LDO_SL_NORMAL 0
+#define DA9055_LDO_SL_SLEEP 1
+
+/* DA9055_REG_OTP_CONT (addr=0x50) */
+#define DA9055_OTP_TIM_NORMAL (0<<0)
+#define DA9055_OTP_TIM_MARGINAL (1<<0)
+#define DA9055_OTP_GP_RD_SHIFT 1
+#define DA9055_OTP_APPS_RD_SHIFT 2
+#define DA9055_PC_DONE_SHIFT 3
+#define DA9055_OTP_GP_LOCK_SHIFT 4
+#define DA9055_OTP_APPS_LOCK_SHIFT 5
+#define DA9055_OTP_CONF_LOCK_SHIFT 6
+#define DA9055_OTP_WRITE_DIS_SHIFT 7
+
+/* DA9055_REG_COUNT_S (addr=0x53) */
+#define DA9055_RTC_SEC 0x3F
+#define DA9055_RTC_MONITOR_EN 0x40
+#define DA9055_RTC_READ 0x80
+
+/* DA9055_REG_COUNT_MI (addr=0x54) */
+#define DA9055_RTC_MIN 0x3F
+
+/* DA9055_REG_COUNT_H (addr=0x55) */
+#define DA9055_RTC_HOUR 0x1F
+
+/* DA9055_REG_COUNT_D (addr=0x56) */
+#define DA9055_RTC_DAY 0x1F
+
+/* DA9055_REG_COUNT_MO (addr=0x57) */
+#define DA9055_RTC_MONTH 0x0F
+
+/* DA9055_REG_COUNT_Y (addr=0x58) */
+#define DA9055_RTC_YEAR 0x3F
+#define DA9055_RTC_YEAR_BASE 2000
+
+/* DA9055_REG_ALARM_MI (addr=0x59) */
+#define DA9055_RTC_ALM_MIN 0x3F
+#define DA9055_ALARM_STATUS_SHIFT 6
+#define DA9055_ALARM_STATUS_MASK 0x3
+#define DA9055_ALARM_STATUS_NO_ALARM 0x0
+#define DA9055_ALARM_STATUS_TICK 0x1
+#define DA9055_ALARM_STATUS_TIMER_ALARM 0x2
+#define DA9055_ALARM_STATUS_BOTH 0x3
+
+/* DA9055_REG_ALARM_H (addr=0x5A) */
+#define DA9055_RTC_ALM_HOUR 0x1F
+
+/* DA9055_REG_ALARM_D (addr=0x5B) */
+#define DA9055_RTC_ALM_DAY 0x1F
+
+/* DA9055_REG_ALARM_MO (addr=0x5C) */
+#define DA9055_RTC_ALM_MONTH 0x0F
+#define DA9055_RTC_TICK_WAKE_MASK 0x20
+#define DA9055_RTC_TICK_WAKE_SHIFT 5
+#define DA9055_RTC_TICK_TYPE 0x10
+#define DA9055_RTC_TICK_TYPE_SHIFT 0x4
+#define DA9055_RTC_TICK_SEC 0x0
+#define DA9055_RTC_TICK_MIN 0x1
+#define DA9055_ALARAM_TICK_WAKE 0x20
+
+/* DA9055_REG_ALARM_Y (addr=0x5D) */
+#define DA9055_RTC_TICK_EN 0x80
+#define DA9055_RTC_ALM_EN 0x40
+#define DA9055_RTC_TICK_ALM_MASK 0xC0
+#define DA9055_RTC_ALM_YEAR 0x3F
+
+/* DA9055_REG_TRIM_CLDR (addr=0x62) */
+#define DA9055_TRIM_32K_SHIFT 0
+#define DA9055_TRIM_32K_MASK 0x7F
+#define DA9055_TRIM_DECREMENT (1<<7)
+#define DA9055_TRIM_INCREMENT (0<<7)
+#define DA9055_TRIM_VAL_BASE 0x0
+#define DA9055_TRIM_PPM_BASE 0x0 /* min val in units of 0.1PPM */
+#define DA9055_TRIM_PPM_INC 19 /* min inc in units of 0.1PPM */
+#define DA9055_TRIM_STEPS 127
+
+/* DA9055_REG_CONFIG_A (addr=0x65) */
+#define DA9055_PM_I_V_VDDCORE (0<<0)
+#define DA9055_PM_I_V_VDD_IO (1<<0)
+#define DA9055_VDD_FAULT_TYPE_ACT_LOW (0<<1)
+#define DA9055_VDD_FAULT_TYPE_ACT_HIGH (1<<1)
+#define DA9055_PM_O_TYPE_PUSH_PULL (0<<2)
+#define DA9055_PM_O_TYPE_OPEN_DRAIN (1<<2)
+#define DA9055_IRQ_TYPE_ACT_LOW (0<<3)
+#define DA9055_IRQ_TYPE_ACT_HIGH (1<<3)
+#define DA9055_NIRQ_MODE_IMM (0<<4)
+#define DA9055_NIRQ_MODE_ACTIVE (1<<4)
+#define DA9055_GPI_V_VDDCORE (0<<5)
+#define DA9055_GPI_V_VDD_IO (1<<5)
+#define DA9055_PM_IF_V_VDDCORE (0<<6)
+#define DA9055_PM_IF_V_VDD_IO (1<<6)
+
+/* DA9055_REG_CONFIG_B (addr=0x66) */
+#define DA9055_VDD_FAULT_VAL_SHIFT 0
+#define DA9055_VDD_FAULT_VAL_MASK 0xF
+#define DA9055_VDD_FAULT_VAL_BASE 0x0
+#define DA9055_VDD_FAULT_VAL_MAX DA9055_VDD_FAULT_VAL_MASK
+#define DA9055_VDD_FAULT_VOLT_BASE 2500
+#define DA9055_VDD_FAULT_VOLT_INC 50
+#define DA9055_VDD_FAULT_STEPS 15
+
+#define DA9055_VDD_HYST_VAL_SHIFT 4
+#define DA9055_VDD_HYST_VAL_MASK 0x7
+#define DA9055_VDD_HYST_VAL_BASE 0x0
+#define DA9055_VDD_HYST_VAL_MAX DA9055_VDD_HYST_VAL_MASK
+#define DA9055_VDD_HYST_VOLT_BASE 100
+#define DA9055_VDD_HYST_VOLT_INC 50
+#define DA9055_VDD_HYST_STEPS 7
+#define DA9055_VDD_HYST_VOLT_MIN DA9055_VDD_HYST_VOLT_BASE
+
+#define DA9055_VDD_FAULT_EN_SHIFT 7
+
+/* DA9055_REG_CONFIG_C (addr=0x67) */
+#define DA9055_BCORE_CLK_INV_SHIFT 0
+#define DA9055_BMEM_CLK_INV_SHIFT 1
+#define DA9055_NFAULT_CONF_SHIFT 2
+#define DA9055_LDO_SD_SHIFT 4
+#define DA9055_LDO5_BYP_SHIFT 6
+#define DA9055_LDO6_BYP_SHIFT 7
+
+/* DA9055_REG_CONFIG_D (addr=0x68) */
+#define DA9055_NONKEY_PIN_SHIFT 0
+#define DA9055_NONKEY_PIN_MASK 0x3
+#define DA9055_NONKEY_PIN_PORT_MODE 0x0
+#define DA9055_NONKEY_PIN_KEY_MODE 0x1
+#define DA9055_NONKEY_PIN_MULTI_FUNC 0x2
+#define DA9055_NONKEY_PIN_DEDICT 0x3
+#define DA9055_NONKEY_SD_SHIFT 2
+#define DA9055_KEY_DELAY_SHIFT 3
+#define DA9055_KEY_DELAY_MASK 0x3
+#define DA9055_KEY_DELAY_4S 0x0
+#define DA9055_KEY_DELAY_6S 0x1
+#define DA9055_KEY_DELAY_8S 0x2
+#define DA9055_KEY_DELAY_10S 0x3
+
+/* DA9055_REG_CONFIG_E (addr=0x69) */
+#define DA9055_GPIO_PUPD_PULL_UP 0x0
+#define DA9055_GPIO_PUPD_OPEN_DRAIN 0x1
+#define DA9055_GPIO0_PUPD_SHIFT 0
+#define DA9055_GPIO1_PUPD_SHIFT 1
+#define DA9055_GPIO2_PUPD_SHIFT 2
+#define DA9055_UVOV_DELAY_SHIFT 4
+#define DA9055_UVOV_DELAY_MASK 0x3
+#define DA9055_RESET_DURATION_SHIFT 6
+#define DA9055_RESET_DURATION_MASK 0x3
+#define DA9055_RESET_DURATION_0MS 0x0
+#define DA9055_RESET_DURATION_100MS 0x1
+#define DA9055_RESET_DURATION_500MS 0x2
+#define DA9055_RESET_DURATION_1000MS 0x3
+
+/* DA9055_REG_MON_REG_1 (addr=0x6A) */
+#define DA9055_MON_THRES_SHIFT 0
+#define DA9055_MON_THRES_MASK 0x3
+#define DA9055_MON_RES_SHIFT 2
+#define DA9055_MON_DEB_SHIFT 3
+#define DA9055_MON_MODE_SHIFT 4
+#define DA9055_MON_MODE_MASK 0x3
+#define DA9055_START_MAX_SHIFT 6
+#define DA9055_START_MAX_MASK 0x3
+
+/* DA9055_REG_MON_REG_2 (addr=0x6B) */
+#define DA9055_LDO1_MON_EN_SHIFT 0
+#define DA9055_LDO2_MON_EN_SHIFT 1
+#define DA9055_LDO3_MON_EN_SHIFT 2
+#define DA9055_LDO4_MON_EN_SHIFT 3
+#define DA9055_LDO5_MON_EN_SHIFT 4
+#define DA9055_LDO6_MON_EN_SHIFT 5
+#define DA9055_BCORE_MON_EN_SHIFT 6
+#define DA9055_BMEM_MON_EN_SHIFT 7
+
+/* DA9055_REG_CONFIG_F (addr=0x6C) */
+#define DA9055_LDO1_DEF_SHIFT 0
+#define DA9055_LDO2_DEF_SHIFT 1
+#define DA9055_LDO3_DEF_SHIFT 2
+#define DA9055_LDO4_DEF_SHIFT 3
+#define DA9055_LDO5_DEF_SHIFT 4
+#define DA9055_LDO6_DEF_SHIFT 5
+#define DA9055_BCORE_DEF_SHIFT 6
+#define DA9055_BMEM_DEF_SHIFT 7
+
+/* DA9055_REG_MON_REG_4 (addr=0x6D) */
+#define DA9055_MON_A8_IDX_SHIFT 0
+#define DA9055_MON_A89_IDX_MASK 0x3
+#define DA9055_MON_A89_IDX_NONE 0x0
+#define DA9055_MON_A89_IDX_BUCKCORE 0x1
+#define DA9055_MON_A89_IDX_LDO3 0x2
+#define DA9055_MON_A9_IDX_SHIFT 5
+
+/* DA9055_REG_MON_REG_5 (addr=0x6E) */
+#define DA9055_MON_A10_IDX_SHIFT 0
+#define DA9055_MON_A10_IDX_MASK 0x3
+#define DA9055_MON_A10_IDX_NONE 0x0
+#define DA9055_MON_A10_IDX_LDO1 0x1
+#define DA9055_MON_A10_IDX_LDO2 0x2
+#define DA9055_MON_A10_IDX_LDO5 0x3
+#define DA9055_MON_A10_IDX_LDO6 0x4
+
+#endif /* __DA9055_REG_H */
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
new file mode 100644
index 000000000..79f4d822b
--- /dev/null
+++ b/include/linux/mfd/da9063/core.h
@@ -0,0 +1,99 @@
+/*
+ * Definitions for DA9063 MFD driver
+ *
+ * Copyright 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: Michal Hajduk <michal.hajduk@diasemi.com>
+ * Krystian Garbaciak <krystian.garbaciak@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_DA9063_CORE_H__
+#define __MFD_DA9063_CORE_H__
+
+#include <linux/interrupt.h>
+#include <linux/mfd/da9063/registers.h>
+
+/* DA9063 modules */
+#define DA9063_DRVNAME_CORE "da9063-core"
+#define DA9063_DRVNAME_REGULATORS "da9063-regulators"
+#define DA9063_DRVNAME_LEDS "da9063-leds"
+#define DA9063_DRVNAME_WATCHDOG "da9063-watchdog"
+#define DA9063_DRVNAME_HWMON "da9063-hwmon"
+#define DA9063_DRVNAME_ONKEY "da9063-onkey"
+#define DA9063_DRVNAME_RTC "da9063-rtc"
+#define DA9063_DRVNAME_VIBRATION "da9063-vibration"
+
+enum da9063_models {
+ PMIC_DA9063 = 0x61,
+};
+
+enum da9063_variant_codes {
+ PMIC_DA9063_AD = 0x3,
+ PMIC_DA9063_BB = 0x5,
+ PMIC_DA9063_CA = 0x6,
+};
+
+/* Interrupts */
+enum da9063_irqs {
+ DA9063_IRQ_ONKEY = 0,
+ DA9063_IRQ_ALARM,
+ DA9063_IRQ_TICK,
+ DA9063_IRQ_ADC_RDY,
+ DA9063_IRQ_SEQ_RDY,
+ DA9063_IRQ_WAKE,
+ DA9063_IRQ_TEMP,
+ DA9063_IRQ_COMP_1V2,
+ DA9063_IRQ_LDO_LIM,
+ DA9063_IRQ_REG_UVOV,
+ DA9063_IRQ_VDD_MON,
+ DA9063_IRQ_WARN,
+ DA9063_IRQ_GPI0,
+ DA9063_IRQ_GPI1,
+ DA9063_IRQ_GPI2,
+ DA9063_IRQ_GPI3,
+ DA9063_IRQ_GPI4,
+ DA9063_IRQ_GPI5,
+ DA9063_IRQ_GPI6,
+ DA9063_IRQ_GPI7,
+ DA9063_IRQ_GPI8,
+ DA9063_IRQ_GPI9,
+ DA9063_IRQ_GPI10,
+ DA9063_IRQ_GPI11,
+ DA9063_IRQ_GPI12,
+ DA9063_IRQ_GPI13,
+ DA9063_IRQ_GPI14,
+ DA9063_IRQ_GPI15,
+};
+
+#define DA9063_IRQ_BASE_OFFSET 0
+#define DA9063_NUM_IRQ (DA9063_IRQ_GPI15 + 1 - DA9063_IRQ_BASE_OFFSET)
+
+struct da9063 {
+ /* Device */
+ struct device *dev;
+ unsigned short model;
+ unsigned char variant_code;
+ unsigned int flags;
+
+ /* Control interface */
+ struct regmap *regmap;
+
+ /* Interrupts */
+ int chip_irq;
+ unsigned int irq_base;
+ struct regmap_irq_chip_data *regmap_irq;
+};
+
+int da9063_device_init(struct da9063 *da9063, unsigned int irq);
+int da9063_irq_init(struct da9063 *da9063);
+
+void da9063_device_exit(struct da9063 *da9063);
+void da9063_irq_exit(struct da9063 *da9063);
+
+#endif /* __MFD_DA9063_CORE_H__ */
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h
new file mode 100644
index 000000000..95c874221
--- /dev/null
+++ b/include/linux/mfd/da9063/pdata.h
@@ -0,0 +1,111 @@
+/*
+ * Platform configuration options for DA9063
+ *
+ * Copyright 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: Michal Hajduk <michal.hajduk@diasemi.com>
+ * Author: Krystian Garbaciak <krystian.garbaciak@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_DA9063_PDATA_H__
+#define __MFD_DA9063_PDATA_H__
+
+#include <linux/regulator/machine.h>
+
+/*
+ * Regulator configuration
+ */
+/* DA9063 regulator IDs */
+enum {
+ /* BUCKs */
+ DA9063_ID_BCORE1,
+ DA9063_ID_BCORE2,
+ DA9063_ID_BPRO,
+ DA9063_ID_BMEM,
+ DA9063_ID_BIO,
+ DA9063_ID_BPERI,
+
+ /* BCORE1 and BCORE2 in merged mode */
+ DA9063_ID_BCORES_MERGED,
+ /* BMEM and BIO in merged mode */
+ DA9063_ID_BMEM_BIO_MERGED,
+ /* When two BUCKs are merged, they cannot be reused separately */
+
+ /* LDOs */
+ DA9063_ID_LDO1,
+ DA9063_ID_LDO2,
+ DA9063_ID_LDO3,
+ DA9063_ID_LDO4,
+ DA9063_ID_LDO5,
+ DA9063_ID_LDO6,
+ DA9063_ID_LDO7,
+ DA9063_ID_LDO8,
+ DA9063_ID_LDO9,
+ DA9063_ID_LDO10,
+ DA9063_ID_LDO11,
+};
+
+/* Regulators platform data */
+struct da9063_regulator_data {
+ int id;
+ struct regulator_init_data *initdata;
+};
+
+struct da9063_regulators_pdata {
+ unsigned n_regulators;
+ struct da9063_regulator_data *regulator_data;
+};
+
+
+/*
+ * RGB LED configuration
+ */
+/* LED IDs for flags in struct led_info. */
+enum {
+ DA9063_GPIO11_LED,
+ DA9063_GPIO14_LED,
+ DA9063_GPIO15_LED,
+
+ DA9063_LED_NUM
+};
+#define DA9063_LED_ID_MASK 0x3
+
+/* LED polarity for flags in struct led_info. */
+#define DA9063_LED_HIGH_LEVEL_ACTIVE 0x0
+#define DA9063_LED_LOW_LEVEL_ACTIVE 0x4
+
+
+/*
+ * General PMIC configuration
+ */
+/* HWMON ADC channels configuration */
+#define DA9063_FLG_FORCE_IN0_MANUAL_MODE 0x0010
+#define DA9063_FLG_FORCE_IN0_AUTO_MODE 0x0020
+#define DA9063_FLG_FORCE_IN1_MANUAL_MODE 0x0040
+#define DA9063_FLG_FORCE_IN1_AUTO_MODE 0x0080
+#define DA9063_FLG_FORCE_IN2_MANUAL_MODE 0x0100
+#define DA9063_FLG_FORCE_IN2_AUTO_MODE 0x0200
+#define DA9063_FLG_FORCE_IN3_MANUAL_MODE 0x0400
+#define DA9063_FLG_FORCE_IN3_AUTO_MODE 0x0800
+
+/* Disable register caching. */
+#define DA9063_FLG_NO_CACHE 0x0008
+
+struct da9063;
+
+/* DA9063 platform data */
+struct da9063_pdata {
+ int (*init)(struct da9063 *da9063);
+ int irq_base;
+ unsigned flags;
+ struct da9063_regulators_pdata *regulators_pdata;
+ struct led_platform_data *leds_pdata;
+};
+
+#endif /* __MFD_DA9063_PDATA_H__ */
diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
new file mode 100644
index 000000000..2e0ba6d5f
--- /dev/null
+++ b/include/linux/mfd/da9063/registers.h
@@ -0,0 +1,1073 @@
+/*
+ * Registers definition for DA9063 modules
+ *
+ * Copyright 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: Michal Hajduk <michal.hajduk@diasemi.com>
+ * Krystian Garbaciak <krystian.garbaciak@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef _DA9063_REG_H
+#define _DA9063_REG_H
+
+#define DA9063_I2C_PAGE_SEL_SHIFT 1
+#define DA9063_EVENT_REG_NUM 4
+
+/* Page selection I2C or SPI always in the begining of any page. */
+/* Page 0 : I2C access 0x000 - 0x0FF SPI access 0x000 - 0x07F */
+/* Page 1 : SPI access 0x080 - 0x0FF */
+/* Page 2 : I2C access 0x100 - 0x1FF SPI access 0x100 - 0x17F */
+/* Page 3 : SPI access 0x180 - 0x1FF */
+#define DA9063_REG_PAGE_CON 0x00
+
+/* System Control and Event Registers */
+#define DA9063_REG_STATUS_A 0x01
+#define DA9063_REG_STATUS_B 0x02
+#define DA9063_REG_STATUS_C 0x03
+#define DA9063_REG_STATUS_D 0x04
+#define DA9063_REG_FAULT_LOG 0x05
+#define DA9063_REG_EVENT_A 0x06
+#define DA9063_REG_EVENT_B 0x07
+#define DA9063_REG_EVENT_C 0x08
+#define DA9063_REG_EVENT_D 0x09
+#define DA9063_REG_IRQ_MASK_A 0x0A
+#define DA9063_REG_IRQ_MASK_B 0x0B
+#define DA9063_REG_IRQ_MASK_C 0x0C
+#define DA9063_REG_IRQ_MASK_D 0x0D
+#define DA9063_REG_CONTROL_A 0x0E
+#define DA9063_REG_CONTROL_B 0x0F
+#define DA9063_REG_CONTROL_C 0x10
+#define DA9063_REG_CONTROL_D 0x11
+#define DA9063_REG_CONTROL_E 0x12
+#define DA9063_REG_CONTROL_F 0x13
+#define DA9063_REG_PD_DIS 0x14
+
+/* GPIO Control Registers */
+#define DA9063_REG_GPIO_0_1 0x15
+#define DA9063_REG_GPIO_2_3 0x16
+#define DA9063_REG_GPIO_4_5 0x17
+#define DA9063_REG_GPIO_6_7 0x18
+#define DA9063_REG_GPIO_8_9 0x19
+#define DA9063_REG_GPIO_10_11 0x1A
+#define DA9063_REG_GPIO_12_13 0x1B
+#define DA9063_REG_GPIO_14_15 0x1C
+#define DA9063_REG_GPIO_MODE0_7 0x1D
+#define DA9063_REG_GPIO_MODE8_15 0x1E
+#define DA9063_REG_SWITCH_CONT 0x1F
+
+/* Regulator Control Registers */
+#define DA9063_REG_BCORE2_CONT 0x20
+#define DA9063_REG_BCORE1_CONT 0x21
+#define DA9063_REG_BPRO_CONT 0x22
+#define DA9063_REG_BMEM_CONT 0x23
+#define DA9063_REG_BIO_CONT 0x24
+#define DA9063_REG_BPERI_CONT 0x25
+#define DA9063_REG_LDO1_CONT 0x26
+#define DA9063_REG_LDO2_CONT 0x27
+#define DA9063_REG_LDO3_CONT 0x28
+#define DA9063_REG_LDO4_CONT 0x29
+#define DA9063_REG_LDO5_CONT 0x2A
+#define DA9063_REG_LDO6_CONT 0x2B
+#define DA9063_REG_LDO7_CONT 0x2C
+#define DA9063_REG_LDO8_CONT 0x2D
+#define DA9063_REG_LDO9_CONT 0x2E
+#define DA9063_REG_LDO10_CONT 0x2F
+#define DA9063_REG_LDO11_CONT 0x30
+#define DA9063_REG_SUPPLIES 0x31
+#define DA9063_REG_DVC_1 0x32
+#define DA9063_REG_DVC_2 0x33
+
+/* GP-ADC Control Registers */
+#define DA9063_REG_ADC_MAN 0x34
+#define DA9063_REG_ADC_CONT 0x35
+#define DA9063_REG_VSYS_MON 0x36
+#define DA9063_REG_ADC_RES_L 0x37
+#define DA9063_REG_ADC_RES_H 0x38
+#define DA9063_REG_VSYS_RES 0x39
+#define DA9063_REG_ADCIN1_RES 0x3A
+#define DA9063_REG_ADCIN2_RES 0x3B
+#define DA9063_REG_ADCIN3_RES 0x3C
+#define DA9063_REG_MON_A8_RES 0x3D
+#define DA9063_REG_MON_A9_RES 0x3E
+#define DA9063_REG_MON_A10_RES 0x3F
+
+/* RTC Calendar and Alarm Registers */
+#define DA9063_REG_COUNT_S 0x40
+#define DA9063_REG_COUNT_MI 0x41
+#define DA9063_REG_COUNT_H 0x42
+#define DA9063_REG_COUNT_D 0x43
+#define DA9063_REG_COUNT_MO 0x44
+#define DA9063_REG_COUNT_Y 0x45
+
+#define DA9063_AD_REG_ALARM_MI 0x46
+#define DA9063_AD_REG_ALARM_H 0x47
+#define DA9063_AD_REG_ALARM_D 0x48
+#define DA9063_AD_REG_ALARM_MO 0x49
+#define DA9063_AD_REG_ALARM_Y 0x4A
+#define DA9063_AD_REG_SECOND_A 0x4B
+#define DA9063_AD_REG_SECOND_B 0x4C
+#define DA9063_AD_REG_SECOND_C 0x4D
+#define DA9063_AD_REG_SECOND_D 0x4E
+
+#define DA9063_BB_REG_ALARM_S 0x46
+#define DA9063_BB_REG_ALARM_MI 0x47
+#define DA9063_BB_REG_ALARM_H 0x48
+#define DA9063_BB_REG_ALARM_D 0x49
+#define DA9063_BB_REG_ALARM_MO 0x4A
+#define DA9063_BB_REG_ALARM_Y 0x4B
+#define DA9063_BB_REG_SECOND_A 0x4C
+#define DA9063_BB_REG_SECOND_B 0x4D
+#define DA9063_BB_REG_SECOND_C 0x4E
+#define DA9063_BB_REG_SECOND_D 0x4F
+
+/* Sequencer Control Registers */
+#define DA9063_REG_SEQ 0x81
+#define DA9063_REG_SEQ_TIMER 0x82
+#define DA9063_REG_ID_2_1 0x83
+#define DA9063_REG_ID_4_3 0x84
+#define DA9063_REG_ID_6_5 0x85
+#define DA9063_REG_ID_8_7 0x86
+#define DA9063_REG_ID_10_9 0x87
+#define DA9063_REG_ID_12_11 0x88
+#define DA9063_REG_ID_14_13 0x89
+#define DA9063_REG_ID_16_15 0x8A
+#define DA9063_REG_ID_18_17 0x8B
+#define DA9063_REG_ID_20_19 0x8C
+#define DA9063_REG_ID_22_21 0x8D
+#define DA9063_REG_ID_24_23 0x8E
+#define DA9063_REG_ID_26_25 0x8F
+#define DA9063_REG_ID_28_27 0x90
+#define DA9063_REG_ID_30_29 0x91
+#define DA9063_REG_ID_32_31 0x92
+#define DA9063_REG_SEQ_A 0x95
+#define DA9063_REG_SEQ_B 0x96
+#define DA9063_REG_WAIT 0x97
+#define DA9063_REG_EN_32K 0x98
+#define DA9063_REG_RESET 0x99
+
+/* Regulator Setting Registers */
+#define DA9063_REG_BUCK_ILIM_A 0x9A
+#define DA9063_REG_BUCK_ILIM_B 0x9B
+#define DA9063_REG_BUCK_ILIM_C 0x9C
+#define DA9063_REG_BCORE2_CFG 0x9D
+#define DA9063_REG_BCORE1_CFG 0x9E
+#define DA9063_REG_BPRO_CFG 0x9F
+#define DA9063_REG_BIO_CFG 0xA0
+#define DA9063_REG_BMEM_CFG 0xA1
+#define DA9063_REG_BPERI_CFG 0xA2
+#define DA9063_REG_VBCORE2_A 0xA3
+#define DA9063_REG_VBCORE1_A 0xA4
+#define DA9063_REG_VBPRO_A 0xA5
+#define DA9063_REG_VBMEM_A 0xA6
+#define DA9063_REG_VBIO_A 0xA7
+#define DA9063_REG_VBPERI_A 0xA8
+#define DA9063_REG_VLDO1_A 0xA9
+#define DA9063_REG_VLDO2_A 0xAA
+#define DA9063_REG_VLDO3_A 0xAB
+#define DA9063_REG_VLDO4_A 0xAC
+#define DA9063_REG_VLDO5_A 0xAD
+#define DA9063_REG_VLDO6_A 0xAE
+#define DA9063_REG_VLDO7_A 0xAF
+#define DA9063_REG_VLDO8_A 0xB0
+#define DA9063_REG_VLDO9_A 0xB1
+#define DA9063_REG_VLDO10_A 0xB2
+#define DA9063_REG_VLDO11_A 0xB3
+#define DA9063_REG_VBCORE2_B 0xB4
+#define DA9063_REG_VBCORE1_B 0xB5
+#define DA9063_REG_VBPRO_B 0xB6
+#define DA9063_REG_VBMEM_B 0xB7
+#define DA9063_REG_VBIO_B 0xB8
+#define DA9063_REG_VBPERI_B 0xB9
+#define DA9063_REG_VLDO1_B 0xBA
+#define DA9063_REG_VLDO2_B 0xBB
+#define DA9063_REG_VLDO3_B 0xBC
+#define DA9063_REG_VLDO4_B 0xBD
+#define DA9063_REG_VLDO5_B 0xBE
+#define DA9063_REG_VLDO6_B 0xBF
+#define DA9063_REG_VLDO7_B 0xC0
+#define DA9063_REG_VLDO8_B 0xC1
+#define DA9063_REG_VLDO9_B 0xC2
+#define DA9063_REG_VLDO10_B 0xC3
+#define DA9063_REG_VLDO11_B 0xC4
+
+/* Backup Battery Charger Control Register */
+#define DA9063_REG_BBAT_CONT 0xC5
+
+/* GPIO PWM (LED) */
+#define DA9063_REG_GPO11_LED 0xC6
+#define DA9063_REG_GPO14_LED 0xC7
+#define DA9063_REG_GPO15_LED 0xC8
+
+/* GP-ADC Threshold Registers */
+#define DA9063_REG_ADC_CFG 0xC9
+#define DA9063_REG_AUTO1_HIGH 0xCA
+#define DA9063_REG_AUTO1_LOW 0xCB
+#define DA9063_REG_AUTO2_HIGH 0xCC
+#define DA9063_REG_AUTO2_LOW 0xCD
+#define DA9063_REG_AUTO3_HIGH 0xCE
+#define DA9063_REG_AUTO3_LOW 0xCF
+
+/* DA9063 Configuration registers */
+/* OTP */
+#define DA9063_REG_OPT_COUNT 0x101
+#define DA9063_REG_OPT_ADDR 0x102
+#define DA9063_REG_OPT_DATA 0x103
+
+/* Customer Trim and Configuration */
+#define DA9063_REG_T_OFFSET 0x104
+#define DA9063_REG_INTERFACE 0x105
+#define DA9063_REG_CONFIG_A 0x106
+#define DA9063_REG_CONFIG_B 0x107
+#define DA9063_REG_CONFIG_C 0x108
+#define DA9063_REG_CONFIG_D 0x109
+#define DA9063_REG_CONFIG_E 0x10A
+#define DA9063_REG_CONFIG_F 0x10B
+#define DA9063_REG_CONFIG_G 0x10C
+#define DA9063_REG_CONFIG_H 0x10D
+#define DA9063_REG_CONFIG_I 0x10E
+#define DA9063_REG_CONFIG_J 0x10F
+#define DA9063_REG_CONFIG_K 0x110
+#define DA9063_REG_CONFIG_L 0x111
+
+#define DA9063_AD_REG_MON_REG_1 0x112
+#define DA9063_AD_REG_MON_REG_2 0x113
+#define DA9063_AD_REG_MON_REG_3 0x114
+#define DA9063_AD_REG_MON_REG_4 0x115
+#define DA9063_AD_REG_MON_REG_5 0x116
+#define DA9063_AD_REG_MON_REG_6 0x117
+#define DA9063_AD_REG_TRIM_CLDR 0x118
+
+#define DA9063_AD_REG_GP_ID_0 0x119
+#define DA9063_AD_REG_GP_ID_1 0x11A
+#define DA9063_AD_REG_GP_ID_2 0x11B
+#define DA9063_AD_REG_GP_ID_3 0x11C
+#define DA9063_AD_REG_GP_ID_4 0x11D
+#define DA9063_AD_REG_GP_ID_5 0x11E
+#define DA9063_AD_REG_GP_ID_6 0x11F
+#define DA9063_AD_REG_GP_ID_7 0x120
+#define DA9063_AD_REG_GP_ID_8 0x121
+#define DA9063_AD_REG_GP_ID_9 0x122
+#define DA9063_AD_REG_GP_ID_10 0x123
+#define DA9063_AD_REG_GP_ID_11 0x124
+#define DA9063_AD_REG_GP_ID_12 0x125
+#define DA9063_AD_REG_GP_ID_13 0x126
+#define DA9063_AD_REG_GP_ID_14 0x127
+#define DA9063_AD_REG_GP_ID_15 0x128
+#define DA9063_AD_REG_GP_ID_16 0x129
+#define DA9063_AD_REG_GP_ID_17 0x12A
+#define DA9063_AD_REG_GP_ID_18 0x12B
+#define DA9063_AD_REG_GP_ID_19 0x12C
+
+#define DA9063_BB_REG_CONFIG_M 0x112
+#define DA9063_BB_REG_CONFIG_N 0x113
+
+#define DA9063_BB_REG_MON_REG_1 0x114
+#define DA9063_BB_REG_MON_REG_2 0x115
+#define DA9063_BB_REG_MON_REG_3 0x116
+#define DA9063_BB_REG_MON_REG_4 0x117
+#define DA9063_BB_REG_MON_REG_5 0x11E
+#define DA9063_BB_REG_MON_REG_6 0x11F
+#define DA9063_BB_REG_TRIM_CLDR 0x120
+/* General Purpose Registers */
+#define DA9063_BB_REG_GP_ID_0 0x121
+#define DA9063_BB_REG_GP_ID_1 0x122
+#define DA9063_BB_REG_GP_ID_2 0x123
+#define DA9063_BB_REG_GP_ID_3 0x124
+#define DA9063_BB_REG_GP_ID_4 0x125
+#define DA9063_BB_REG_GP_ID_5 0x126
+#define DA9063_BB_REG_GP_ID_6 0x127
+#define DA9063_BB_REG_GP_ID_7 0x128
+#define DA9063_BB_REG_GP_ID_8 0x129
+#define DA9063_BB_REG_GP_ID_9 0x12A
+#define DA9063_BB_REG_GP_ID_10 0x12B
+#define DA9063_BB_REG_GP_ID_11 0x12C
+#define DA9063_BB_REG_GP_ID_12 0x12D
+#define DA9063_BB_REG_GP_ID_13 0x12E
+#define DA9063_BB_REG_GP_ID_14 0x12F
+#define DA9063_BB_REG_GP_ID_15 0x130
+#define DA9063_BB_REG_GP_ID_16 0x131
+#define DA9063_BB_REG_GP_ID_17 0x132
+#define DA9063_BB_REG_GP_ID_18 0x133
+#define DA9063_BB_REG_GP_ID_19 0x134
+
+/* Chip ID and variant */
+#define DA9063_REG_CHIP_ID 0x181
+#define DA9063_REG_CHIP_VARIANT 0x182
+
+/*
+ * PMIC registers bits
+ */
+/* DA9063_REG_PAGE_CON (addr=0x00) */
+#define DA9063_PEG_PAGE_SHIFT 0
+#define DA9063_REG_PAGE_MASK 0x07
+#define DA9063_REG_PAGE0 0x00
+#define DA9063_REG_PAGE2 0x02
+#define DA9063_PAGE_WRITE_MODE 0x00
+#define DA9063_REPEAT_WRITE_MODE 0x40
+#define DA9063_PAGE_REVERT 0x80
+
+/* DA9063_REG_STATUS_A (addr=0x01) */
+#define DA9063_NONKEY 0x01
+#define DA9063_WAKE 0x02
+#define DA9063_DVC_BUSY 0x04
+#define DA9063_COMP_1V2 0x08
+
+/* DA9063_REG_STATUS_B (addr=0x02) */
+#define DA9063_GPI0 0x01
+#define DA9063_GPI1 0x02
+#define DA9063_GPI2 0x04
+#define DA9063_GPI3 0x08
+#define DA9063_GPI4 0x10
+#define DA9063_GPI5 0x20
+#define DA9063_GPI6 0x40
+#define DA9063_GPI7 0x80
+
+/* DA9063_REG_STATUS_C (addr=0x03) */
+#define DA9063_GPI8 0x01
+#define DA9063_GPI9 0x02
+#define DA9063_GPI10 0x04
+#define DA9063_GPI11 0x08
+#define DA9063_GPI12 0x10
+#define DA9063_GPI13 0x20
+#define DA9063_GPI14 0x40
+#define DA9063_GPI15 0x80
+
+/* DA9063_REG_STATUS_D (addr=0x04) */
+#define DA9063_LDO3_LIM 0x08
+#define DA9063_LDO4_LIM 0x10
+#define DA9063_LDO7_LIM 0x20
+#define DA9063_LDO8_LIM 0x40
+#define DA9063_LDO11_LIM 0x80
+
+/* DA9063_REG_FAULT_LOG (addr=0x05) */
+#define DA9063_TWD_ERROR 0x01
+#define DA9063_POR 0x02
+#define DA9063_VDD_FAULT 0x04
+#define DA9063_VDD_START 0x08
+#define DA9063_TEMP_CRIT 0x10
+#define DA9063_KEY_RESET 0x20
+#define DA9063_NSHUTDOWN 0x40
+#define DA9063_WAIT_SHUT 0x80
+
+/* DA9063_REG_EVENT_A (addr=0x06) */
+#define DA9063_E_NONKEY 0x01
+#define DA9063_E_ALARM 0x02
+#define DA9063_E_TICK 0x04
+#define DA9063_E_ADC_RDY 0x08
+#define DA9063_E_SEQ_RDY 0x10
+#define DA9063_EVENTS_B 0x20
+#define DA9063_EVENTS_C 0x40
+#define DA9063_EVENTS_D 0x80
+
+/* DA9063_REG_EVENT_B (addr=0x07) */
+#define DA9063_E_WAKE 0x01
+#define DA9063_E_TEMP 0x02
+#define DA9063_E_COMP_1V2 0x04
+#define DA9063_E_LDO_LIM 0x08
+#define DA9063_E_REG_UVOV 0x10
+#define DA9063_E_DVC_RDY 0x20
+#define DA9063_E_VDD_MON 0x40
+#define DA9063_E_VDD_WARN 0x80
+
+/* DA9063_REG_EVENT_C (addr=0x08) */
+#define DA9063_E_GPI0 0x01
+#define DA9063_E_GPI1 0x02
+#define DA9063_E_GPI2 0x04
+#define DA9063_E_GPI3 0x08
+#define DA9063_E_GPI4 0x10
+#define DA9063_E_GPI5 0x20
+#define DA9063_E_GPI6 0x40
+#define DA9063_E_GPI7 0x80
+
+/* DA9063_REG_EVENT_D (addr=0x09) */
+#define DA9063_E_GPI8 0x01
+#define DA9063_E_GPI9 0x02
+#define DA9063_E_GPI10 0x04
+#define DA9063_E_GPI11 0x08
+#define DA9063_E_GPI12 0x10
+#define DA9063_E_GPI13 0x20
+#define DA9063_E_GPI14 0x40
+#define DA9063_E_GPI15 0x80
+
+/* DA9063_REG_IRQ_MASK_A (addr=0x0A) */
+#define DA9063_M_ONKEY 0x01
+#define DA9063_M_ALARM 0x02
+#define DA9063_M_TICK 0x04
+#define DA9063_M_ADC_RDY 0x08
+#define DA9063_M_SEQ_RDY 0x10
+
+/* DA9063_REG_IRQ_MASK_B (addr=0x0B) */
+#define DA9063_M_WAKE 0x01
+#define DA9063_M_TEMP 0x02
+#define DA9063_M_COMP_1V2 0x04
+#define DA9063_M_LDO_LIM 0x08
+#define DA9063_M_UVOV 0x10
+#define DA9063_M_DVC_RDY 0x20
+#define DA9063_M_VDD_MON 0x40
+#define DA9063_M_VDD_WARN 0x80
+
+/* DA9063_REG_IRQ_MASK_C (addr=0x0C) */
+#define DA9063_M_GPI0 0x01
+#define DA9063_M_GPI1 0x02
+#define DA9063_M_GPI2 0x04
+#define DA9063_M_GPI3 0x08
+#define DA9063_M_GPI4 0x10
+#define DA9063_M_GPI5 0x20
+#define DA9063_M_GPI6 0x40
+#define DA9063_M_GPI7 0x80
+
+/* DA9063_REG_IRQ_MASK_D (addr=0x0D) */
+#define DA9063_M_GPI8 0x01
+#define DA9063_M_GPI9 0x02
+#define DA9063_M_GPI10 0x04
+#define DA9063_M_GPI11 0x08
+#define DA9063_M_GPI12 0x10
+#define DA9063_M_GPI13 0x20
+#define DA9063_M_GPI14 0x40
+#define DA9063_M_GPI15 0x80
+
+/* DA9063_REG_CONTROL_A (addr=0x0E) */
+#define DA9063_SYSTEM_EN 0x01
+#define DA9063_POWER_EN 0x02
+#define DA9063_POWER1_EN 0x04
+#define DA9063_STANDBY 0x08
+#define DA9063_M_SYSTEM_EN 0x10
+#define DA9063_M_POWER_EN 0x20
+#define DA9063_M_POWER1_EN 0x40
+#define DA9063_CP_EN 0x80
+
+/* DA9063_REG_CONTROL_B (addr=0x0F) */
+#define DA9063_CHG_SEL 0x01
+#define DA9063_WATCHDOG_PD 0x02
+#define DA9063_BB_RESET_BLINKING 0x04
+#define DA9063_NRES_MODE 0x08
+#define DA9063_NONKEY_LOCK 0x10
+#define DA9063_BB_BUCK_SLOWSTART 0x80
+
+/* DA9063_REG_CONTROL_C (addr=0x10) */
+#define DA9063_DEBOUNCING_MASK 0x07
+#define DA9063_DEBOUNCING_OFF 0x0
+#define DA9063_DEBOUNCING_0MS1 0x1
+#define DA9063_DEBOUNCING_1MS 0x2
+#define DA9063_DEBOUNCING_10MS24 0x3
+#define DA9063_DEBOUNCING_51MS2 0x4
+#define DA9063_DEBOUNCING_256MS 0x5
+#define DA9063_DEBOUNCING_512MS 0x6
+#define DA9063_DEBOUNCING_1024MS 0x7
+
+#define DA9063_AUTO_BOOT 0x08
+#define DA9063_OTPREAD_EN 0x10
+#define DA9063_SLEW_RATE_MASK 0x60
+#define DA9063_SLEW_RATE_4US 0x00
+#define DA9063_SLEW_RATE_3US 0x20
+#define DA9063_SLEW_RATE_1US 0x40
+#define DA9063_SLEW_RATE_0US5 0x60
+#define DA9063_DEF_SUPPLY 0x80
+
+/* DA9063_REG_CONTROL_D (addr=0x11) */
+#define DA9063_TWDSCALE_MASK 0x07
+#define DA9063_BLINK_FRQ_MASK 0x38
+#define DA9063_BLINK_FRQ_OFF 0x00
+#define DA9063_BLINK_FRQ_1S0 0x08
+#define DA9063_BLINK_FRQ_2S0 0x10
+#define DA9063_BLINK_FRQ_4S0 0x18
+#define DA9063_BLINK_FRQ_0S18 0x20
+#define DA9063_BLINK_FRQ_2S0_VDD 0x28
+#define DA9063_BLINK_FRQ_4S0_VDD 0x30
+#define DA9063_BLINK_FRQ_0S18_VDD 0x38
+
+#define DA9063_BLINK_DUR_MASK 0xC0
+#define DA9063_BLINK_DUR_10MS 0x00
+#define DA9063_BLINK_DUR_20MS 0x40
+#define DA9063_BLINK_DUR_40MS 0x80
+#define DA9063_BLINK_DUR_20MSDBL 0xC0
+
+/* DA9063_REG_CONTROL_E (addr=0x12) */
+#define DA9063_RTC_MODE_PD 0x01
+#define DA9063_RTC_MODE_SD 0x02
+#define DA9063_RTC_EN 0x04
+#define DA9063_ECO_MODE 0x08
+#define DA9063_PM_FB1_PIN 0x10
+#define DA9063_PM_FB2_PIN 0x20
+#define DA9063_PM_FB3_PIN 0x40
+#define DA9063_V_LOCK 0x80
+
+/* DA9063_REG_CONTROL_F (addr=0x13) */
+#define DA9063_WATCHDOG 0x01
+#define DA9063_SHUTDOWN 0x02
+#define DA9063_WAKE_UP 0x04
+
+/* DA9063_REG_PD_DIS (addr=0x14) */
+#define DA9063_GPI_DIS 0x01
+#define DA9063_GPADC_PAUSE 0x02
+#define DA9063_PMIF_DIS 0x04
+#define DA9063_HS2WIRE_DIS 0x08
+#define DA9063_BB_CLDR_PAUSE 0x10
+#define DA9063_BBAT_DIS 0x20
+#define DA9063_OUT_32K_PAUSE 0x40
+#define DA9063_PMCONT_DIS 0x80
+
+/* DA9063_REG_GPIO_0_1 (addr=0x15) */
+#define DA9063_GPIO0_PIN_MASK 0x03
+#define DA9063_GPIO0_PIN_ADCIN1 0x00
+#define DA9063_GPIO0_PIN_GPI 0x01
+#define DA9063_GPIO0_PIN_GPO_OD 0x02
+#define DA9063_GPIO0_PIN_GPO 0x03
+#define DA9063_GPIO0_TYPE 0x04
+#define DA9063_GPIO0_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO0_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO0_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO0_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO0_NO_WAKEUP 0x08
+#define DA9063_GPIO1_PIN_MASK 0x30
+#define DA9063_GPIO1_PIN_ADCIN2_COMP 0x00
+#define DA9063_GPIO1_PIN_GPI 0x10
+#define DA9063_GPIO1_PIN_GPO_OD 0x20
+#define DA9063_GPIO1_PIN_GPO 0x30
+#define DA9063_GPIO1_TYPE 0x40
+#define DA9063_GPIO1_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO1_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO1_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO1_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO1_NO_WAKEUP 0x80
+
+/* DA9063_REG_GPIO_2_3 (addr=0x16) */
+#define DA9063_GPIO2_PIN_MASK 0x03
+#define DA9063_GPIO2_PIN_ADCIN3 0x00
+#define DA9063_GPIO2_PIN_GPI 0x01
+#define DA9063_GPIO2_PIN_GPO_PSS 0x02
+#define DA9063_GPIO2_PIN_GPO 0x03
+#define DA9063_GPIO2_TYPE 0x04
+#define DA9063_GPIO2_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO2_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO2_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO2_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO2_NO_WAKEUP 0x08
+#define DA9063_GPIO3_PIN_MASK 0x30
+#define DA9063_GPIO3_PIN_CORE_SW_G 0x00
+#define DA9063_GPIO3_PIN_GPI 0x10
+#define DA9063_GPIO3_PIN_GPO_OD 0x20
+#define DA9063_GPIO3_PIN_GPO 0x30
+#define DA9063_GPIO3_TYPE 0x40
+#define DA9063_GPIO3_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO3_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO3_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO3_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO3_NO_WAKEUP 0x80
+
+/* DA9063_REG_GPIO_4_5 (addr=0x17) */
+#define DA9063_GPIO4_PIN_MASK 0x03
+#define DA9063_GPIO4_PIN_CORE_SW_S 0x00
+#define DA9063_GPIO4_PIN_GPI 0x01
+#define DA9063_GPIO4_PIN_GPO_OD 0x02
+#define DA9063_GPIO4_PIN_GPO 0x03
+#define DA9063_GPIO4_TYPE 0x04
+#define DA9063_GPIO4_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO4_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO4_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO4_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO4_NO_WAKEUP 0x08
+#define DA9063_GPIO5_PIN_MASK 0x30
+#define DA9063_GPIO5_PIN_PERI_SW_G 0x00
+#define DA9063_GPIO5_PIN_GPI 0x10
+#define DA9063_GPIO5_PIN_GPO_OD 0x20
+#define DA9063_GPIO5_PIN_GPO 0x30
+#define DA9063_GPIO5_TYPE 0x40
+#define DA9063_GPIO5_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO5_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO5_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO5_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO5_NO_WAKEUP 0x80
+
+/* DA9063_REG_GPIO_6_7 (addr=0x18) */
+#define DA9063_GPIO6_PIN_MASK 0x03
+#define DA9063_GPIO6_PIN_PERI_SW_S 0x00
+#define DA9063_GPIO6_PIN_GPI 0x01
+#define DA9063_GPIO6_PIN_GPO_OD 0x02
+#define DA9063_GPIO6_PIN_GPO 0x03
+#define DA9063_GPIO6_TYPE 0x04
+#define DA9063_GPIO6_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO6_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO6_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO6_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO6_NO_WAKEUP 0x08
+#define DA9063_GPIO7_PIN_MASK 0x30
+#define DA9063_GPIO7_PIN_GPI 0x10
+#define DA9063_GPIO7_PIN_GPO_PSS 0x20
+#define DA9063_GPIO7_PIN_GPO 0x30
+#define DA9063_GPIO7_TYPE 0x40
+#define DA9063_GPIO7_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO7_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO7_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO7_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO7_NO_WAKEUP 0x80
+
+/* DA9063_REG_GPIO_8_9 (addr=0x19) */
+#define DA9063_GPIO8_PIN_MASK 0x03
+#define DA9063_GPIO8_PIN_GPI_SYS_EN 0x00
+#define DA9063_GPIO8_PIN_GPI 0x01
+#define DA9063_GPIO8_PIN_GPO_PSS 0x02
+#define DA9063_GPIO8_PIN_GPO 0x03
+#define DA9063_GPIO8_TYPE 0x04
+#define DA9063_GPIO8_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO8_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO8_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO8_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO8_NO_WAKEUP 0x08
+#define DA9063_GPIO9_PIN_MASK 0x30
+#define DA9063_GPIO9_PIN_GPI_PWR_EN 0x00
+#define DA9063_GPIO9_PIN_GPI 0x10
+#define DA9063_GPIO9_PIN_GPO_PSS 0x20
+#define DA9063_GPIO9_PIN_GPO 0x30
+#define DA9063_GPIO9_TYPE 0x40
+#define DA9063_GPIO9_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO9_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO9_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO9_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO9_NO_WAKEUP 0x80
+
+/* DA9063_REG_GPIO_10_11 (addr=0x1A) */
+#define DA9063_GPIO10_PIN_MASK 0x03
+#define DA9063_GPIO10_PIN_GPI_PWR1_EN 0x00
+#define DA9063_GPIO10_PIN_GPI 0x01
+#define DA9063_GPIO10_PIN_GPO_OD 0x02
+#define DA9063_GPIO10_PIN_GPO 0x03
+#define DA9063_GPIO10_TYPE 0x04
+#define DA9063_GPIO10_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO10_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO10_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO10_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO10_NO_WAKEUP 0x08
+#define DA9063_GPIO11_PIN_MASK 0x30
+#define DA9063_GPIO11_PIN_GPO_OD 0x00
+#define DA9063_GPIO11_PIN_GPI 0x10
+#define DA9063_GPIO11_PIN_GPO_PSS 0x20
+#define DA9063_GPIO11_PIN_GPO 0x30
+#define DA9063_GPIO11_TYPE 0x40
+#define DA9063_GPIO11_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO11_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO11_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO11_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO11_NO_WAKEUP 0x80
+
+/* DA9063_REG_GPIO_12_13 (addr=0x1B) */
+#define DA9063_GPIO12_PIN_MASK 0x03
+#define DA9063_GPIO12_PIN_NVDDFLT_OUT 0x00
+#define DA9063_GPIO12_PIN_GPI 0x01
+#define DA9063_GPIO12_PIN_VSYSMON_OUT 0x02
+#define DA9063_GPIO12_PIN_GPO 0x03
+#define DA9063_GPIO12_TYPE 0x04
+#define DA9063_GPIO12_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO12_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO12_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO12_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO12_NO_WAKEUP 0x08
+#define DA9063_GPIO13_PIN_MASK 0x30
+#define DA9063_GPIO13_PIN_GPFB1_OUT 0x00
+#define DA9063_GPIO13_PIN_GPI 0x10
+#define DA9063_GPIO13_PIN_GPFB1_OUTOD 0x20
+#define DA9063_GPIO13_PIN_GPO 0x30
+#define DA9063_GPIO13_TYPE 0x40
+#define DA9063_GPIO13_TYPE_GPFB1_OUT 0x00
+#define DA9063_GPIO13_TYPE_GPI 0x00
+#define DA9063_GPIO13_TYPE_GPFB1_OUTOD 0x04
+#define DA9063_GPIO13_TYPE_GPO 0x04
+#define DA9063_GPIO13_NO_WAKEUP 0x80
+
+/* DA9063_REG_GPIO_14_15 (addr=0x1C) */
+#define DA9063_GPIO14_PIN_MASK 0x03
+#define DA9063_GPIO14_PIN_GPO_OD 0x00
+#define DA9063_GPIO14_PIN_GPI 0x01
+#define DA9063_GPIO14_PIN_HS2DATA 0x02
+#define DA9063_GPIO14_PIN_GPO 0x03
+#define DA9063_GPIO14_TYPE 0x04
+#define DA9063_GPIO14_TYPE_GPI_ACT_LOW 0x00
+#define DA9063_GPIO14_TYPE_GPO_VDD_IO1 0x00
+#define DA9063_GPIO14_TYPE_GPI_ACT_HIGH 0x04
+#define DA9063_GPIO14_TYPE_GPO_VDD_IO2 0x04
+#define DA9063_GPIO14_NO_WAKEUP 0x08
+#define DA9063_GPIO15_PIN_MASK 0x30
+#define DA9063_GPIO15_PIN_GPO_OD 0x00
+#define DA9063_GPIO15_PIN_GPI 0x10
+#define DA9063_GPIO15_PIN_GPO 0x30
+#define DA9063_GPIO15_TYPE 0x40
+#define DA9063_GPIO15_TYPE_GPFB1_OUT 0x00
+#define DA9063_GPIO15_TYPE_GPI 0x00
+#define DA9063_GPIO15_TYPE_GPFB1_OUTOD 0x04
+#define DA9063_GPIO15_TYPE_GPO 0x04
+#define DA9063_GPIO15_NO_WAKEUP 0x80
+
+/* DA9063_REG_GPIO_MODE0_7 (addr=0x1D) */
+#define DA9063_GPIO0_MODE 0x01
+#define DA9063_GPIO1_MODE 0x02
+#define DA9063_GPIO2_MODE 0x04
+#define DA9063_GPIO3_MODE 0x08
+#define DA9063_GPIO4_MODE 0x10
+#define DA9063_GPIO5_MODE 0x20
+#define DA9063_GPIO6_MODE 0x40
+#define DA9063_GPIO7_MODE 0x80
+
+/* DA9063_REG_GPIO_MODE8_15 (addr=0x1E) */
+#define DA9063_GPIO8_MODE 0x01
+#define DA9063_GPIO9_MODE 0x02
+#define DA9063_GPIO10_MODE 0x04
+#define DA9063_GPIO11_MODE 0x08
+#define DA9063_GPIO11_MODE_LED_ACT_HIGH 0x00
+#define DA9063_GPIO11_MODE_LED_ACT_LOW 0x08
+#define DA9063_GPIO12_MODE 0x10
+#define DA9063_GPIO13_MODE 0x20
+#define DA9063_GPIO14_MODE 0x40
+#define DA9063_GPIO14_MODE_LED_ACT_HIGH 0x00
+#define DA9063_GPIO14_MODE_LED_ACT_LOW 0x40
+#define DA9063_GPIO15_MODE 0x80
+#define DA9063_GPIO15_MODE_LED_ACT_HIGH 0x00
+#define DA9063_GPIO15_MODE_LED_ACT_LOW 0x80
+
+/* DA9063_REG_SWITCH_CONT (addr=0x1F) */
+#define DA9063_CORE_SW_GPI_MASK 0x03
+#define DA9063_CORE_SW_GPI_OFF 0x00
+#define DA9063_CORE_SW_GPI_GPIO1 0x01
+#define DA9063_CORE_SW_GPI_GPIO2 0x02
+#define DA9063_CORE_SW_GPI_GPIO13 0x03
+#define DA9063_PERI_SW_GPI_MASK 0x0C
+#define DA9063_PERI_SW_GPI_OFF 0x00
+#define DA9063_PERI_SW_GPI_GPIO1 0x04
+#define DA9063_PERI_SW_GPI_GPIO2 0x08
+#define DA9063_PERI_SW_GPI_GPIO13 0x0C
+#define DA9063_SWITCH_SR_MASK 0x30
+#define DA9063_SWITCH_SR_1MV 0x00
+#define DA9063_SWITCH_SR_5MV 0x10
+#define DA9063_SWITCH_SR_10MV 0x20
+#define DA9063_SWITCH_SR_50MV 0x30
+#define DA9063_CORE_SW_INTERNAL 0x40
+#define DA9063_CP_EN_MODE 0x80
+
+/* DA9063_REGL_Bxxxx_CONT common bits (addr=0x20-0x25) */
+#define DA9063_BUCK_EN 0x01
+#define DA9063_BUCK_GPI_MASK 0x06
+#define DA9063_BUCK_GPI_OFF 0x00
+#define DA9063_BUCK_GPI_GPIO1 0x02
+#define DA9063_BUCK_GPI_GPIO2 0x04
+#define DA9063_BUCK_GPI_GPIO13 0x06
+#define DA9063_BUCK_CONF 0x08
+#define DA9063_VBUCK_GPI_MASK 0x60
+#define DA9063_VBUCK_GPI_OFF 0x00
+#define DA9063_VBUCK_GPI_GPIO1 0x20
+#define DA9063_VBUCK_GPI_GPIO2 0x40
+#define DA9063_VBUCK_GPI_GPIO13 0x60
+
+/* DA9063_REG_BCORE1_CONT specific bits (addr=0x21) */
+#define DA9063_CORE_SW_EN 0x10
+#define DA9063_CORE_SW_CONF 0x80
+
+/* DA9063_REG_BPERI_CONT specific bits (addr=0x25) */
+#define DA9063_PERI_SW_EN 0x10
+#define DA9063_PERI_SW_CONF 0x80
+
+/* DA9063_REG_LDOx_CONT common bits (addr=0x26-0x30) */
+#define DA9063_LDO_EN 0x01
+#define DA9063_LDO_GPI_MASK 0x06
+#define DA9063_LDO_GPI_OFF 0x00
+#define DA9063_LDO_GPI_GPIO1 0x02
+#define DA9063_LDO_GPI_GPIO2 0x04
+#define DA9063_LDO_GPI_GPIO13 0x06
+#define DA9063_LDO_PD_DIS 0x08
+#define DA9063_VLDO_GPI_MASK 0x60
+#define DA9063_VLDO_GPI_OFF 0x00
+#define DA9063_VLDO_GPI_GPIO1 0x20
+#define DA9063_VLDO_GPI_GPIO2 0x40
+#define DA9063_VLDO_GPI_GPIO13 0x60
+#define DA9063_LDO_CONF 0x80
+
+/* DA9063_REG_LDO5_CONT specific bits (addr=0x2A) */
+#define DA9063_VLDO5_SEL 0x10
+
+/* DA9063_REG_LDO6_CONT specific bits (addr=0x2B) */
+#define DA9063_VLDO6_SEL 0x10
+
+/* DA9063_REG_LDO7_CONT specific bits (addr=0x2C) */
+#define DA9063_VLDO7_SEL 0x10
+
+/* DA9063_REG_LDO8_CONT specific bits (addr=0x2D) */
+#define DA9063_VLDO8_SEL 0x10
+
+/* DA9063_REG_LDO9_CONT specific bits (addr=0x2E) */
+#define DA9063_VLDO9_SEL 0x10
+
+/* DA9063_REG_LDO10_CONT specific bits (addr=0x2F) */
+#define DA9063_VLDO10_SEL 0x10
+
+/* DA9063_REG_LDO11_CONT specific bits (addr=0x30) */
+#define DA9063_VLDO11_SEL 0x10
+
+/* DA9063_REG_VIB (addr=0x31) */
+#define DA9063_VIB_SET_MASK 0x3F
+#define DA9063_VIB_SET_OFF 0
+#define DA9063_VIB_SET_MAX 0x3F
+
+/* DA9063_REG_DVC_1 (addr=0x32) */
+#define DA9063_VBCORE1_SEL 0x01
+#define DA9063_VBCORE2_SEL 0x02
+#define DA9063_VBPRO_SEL 0x04
+#define DA9063_VBMEM_SEL 0x08
+#define DA9063_VBPERI_SEL 0x10
+#define DA9063_VLDO1_SEL 0x20
+#define DA9063_VLDO2_SEL 0x40
+#define DA9063_VLDO3_SEL 0x80
+
+/* DA9063_REG_DVC_2 (addr=0x33) */
+#define DA9063_VBIO_SEL 0x01
+#define DA9063_VLDO4_SEL 0x80
+
+/* DA9063_REG_ADC_MAN (addr=0x34) */
+#define DA9063_ADC_MUX_MASK 0x0F
+#define DA9063_ADC_MUX_VSYS 0x00
+#define DA9063_ADC_MUX_ADCIN1 0x01
+#define DA9063_ADC_MUX_ADCIN2 0x02
+#define DA9063_ADC_MUX_ADCIN3 0x03
+#define DA9063_ADC_MUX_T_SENSE 0x04
+#define DA9063_ADC_MUX_VBBAT 0x05
+#define DA9063_ADC_MUX_LDO_G1 0x08
+#define DA9063_ADC_MUX_LDO_G2 0x09
+#define DA9063_ADC_MUX_LDO_G3 0x0A
+#define DA9063_ADC_MAN 0x10
+#define DA9063_ADC_MODE 0x20
+
+/* DA9063_REG_ADC_CONT (addr=0x35) */
+#define DA9063_ADC_AUTO_VSYS_EN 0x01
+#define DA9063_ADC_AUTO_AD1_EN 0x02
+#define DA9063_ADC_AUTO_AD2_EN 0x04
+#define DA9063_ADC_AUTO_AD3_EN 0x08
+#define DA9063_ADC_AD1_ISRC_EN 0x10
+#define DA9063_ADC_AD2_ISRC_EN 0x20
+#define DA9063_ADC_AD3_ISRC_EN 0x40
+#define DA9063_COMP1V2_EN 0x80
+
+/* DA9063_REG_VSYS_MON (addr=0x36) */
+#define DA9063_VSYS_VAL_MASK 0xFF
+#define DA9063_VSYS_VAL_BASE 0x00
+
+/* DA9063_REG_ADC_RES_L (addr=0x37) */
+#define DA9063_ADC_RES_L_BITS 2
+#define DA9063_ADC_RES_L_MASK 0xC0
+
+/* DA9063_REG_ADC_RES_H (addr=0x38) */
+#define DA9063_ADC_RES_M_BITS 8
+#define DA9063_ADC_RES_M_MASK 0xFF
+
+/* DA9063_REG_(xxx_RES/ADC_RES_H) (addr=0x39-0x3F) */
+#define DA9063_ADC_VAL_MASK 0xFF
+
+/* DA9063_REG_COUNT_S (addr=0x40) */
+#define DA9063_RTC_READ 0x80
+#define DA9063_COUNT_SEC_MASK 0x3F
+
+/* DA9063_REG_COUNT_MI (addr=0x41) */
+#define DA9063_COUNT_MIN_MASK 0x3F
+
+/* DA9063_REG_COUNT_H (addr=0x42) */
+#define DA9063_COUNT_HOUR_MASK 0x1F
+
+/* DA9063_REG_COUNT_D (addr=0x43) */
+#define DA9063_COUNT_DAY_MASK 0x1F
+
+/* DA9063_REG_COUNT_MO (addr=0x44) */
+#define DA9063_COUNT_MONTH_MASK 0x0F
+
+/* DA9063_REG_COUNT_Y (addr=0x45) */
+#define DA9063_COUNT_YEAR_MASK 0x3F
+#define DA9063_MONITOR 0x40
+
+/* DA9063_REG_ALARM_S (addr=0x46) */
+#define DA9063_BB_ALARM_S_MASK 0x3F
+#define DA9063_ALARM_STATUS_ALARM 0x80
+#define DA9063_ALARM_STATUS_TICK 0x40
+/* DA9063_REG_ALARM_MI (addr=0x47) */
+#define DA9063_ALARM_MIN_MASK 0x3F
+
+/* DA9063_REG_ALARM_H (addr=0x48) */
+#define DA9063_ALARM_HOUR_MASK 0x1F
+
+/* DA9063_REG_ALARM_D (addr=0x49) */
+#define DA9063_ALARM_DAY_MASK 0x1F
+
+/* DA9063_REG_ALARM_MO (addr=0x4A) */
+#define DA9063_TICK_WAKE 0x20
+#define DA9063_TICK_TYPE 0x10
+#define DA9063_TICK_TYPE_SEC 0x00
+#define DA9063_TICK_TYPE_MIN 0x10
+#define DA9063_ALARM_MONTH_MASK 0x0F
+
+/* DA9063_REG_ALARM_Y (addr=0x4B) */
+#define DA9063_TICK_ON 0x80
+#define DA9063_ALARM_ON 0x40
+#define DA9063_ALARM_YEAR_MASK 0x3F
+
+/* DA9063_REG_WAIT (addr=0x97)*/
+#define DA9063_REG_WAIT_TIME_MASK 0xF
+#define DA9063_WAIT_TIME_0_US 0x0
+#define DA9063_WAIT_TIME_512_US 0x1
+#define DA9063_WAIT_TIME_1_MS 0x2
+#define DA9063_WAIT_TIME_2_MS 0x3
+#define DA9063_WAIT_TIME_4_1_MS 0x4
+#define DA9063_WAIT_TIME_8_2_MS 0x5
+#define DA9063_WAIT_TIME_16_4_MS 0x6
+#define DA9063_WAIT_TIME_32_8_MS 0x7
+#define DA9063_WAIT_TIME_65_5_MS 0x8
+#define DA9063_WAIT_TIME_128_MS 0x9
+#define DA9063_WAIT_TIME_256_MS 0xA
+#define DA9063_WAIT_TIME_512_MS 0xB
+#define DA9063_WAIT_TIME_1_S 0xC
+#define DA9063_WAIT_TIME_2_1_S 0xD
+
+/* DA9063_REG_EN_32K (addr=0x98)*/
+#define DA9063_STABILIZ_TIME_MASK 0x7
+#define DA9063_CRYSTAL 0x08
+#define DA9063_DELAY_MODE 0x10
+#define DA9063_OUT_CLOCK 0x20
+#define DA9063_RTC_CLOCK 0x40
+#define DA9063_OUT_32K_EN 0x80
+
+/* DA9063_REG_CHIP_VARIANT */
+#define DA9063_CHIP_VARIANT_SHIFT 4
+
+/* DA9063_REG_BUCK_ILIM_A (addr=0x9A) */
+#define DA9063_BIO_ILIM_MASK 0x0F
+#define DA9063_BMEM_ILIM_MASK 0xF0
+
+/* DA9063_REG_BUCK_ILIM_B (addr=0x9B) */
+#define DA9063_BPRO_ILIM_MASK 0x0F
+#define DA9063_BPERI_ILIM_MASK 0xF0
+
+/* DA9063_REG_BUCK_ILIM_C (addr=0x9C) */
+#define DA9063_BCORE1_ILIM_MASK 0x0F
+#define DA9063_BCORE2_ILIM_MASK 0xF0
+
+/* DA9063_REG_Bxxxx_CFG common bits (addr=0x9D-0xA2) */
+#define DA9063_BUCK_FB_MASK 0x07
+#define DA9063_BUCK_PD_DIS_MASK 0x20
+#define DA9063_BUCK_MODE_MASK 0xC0
+#define DA9063_BUCK_MODE_MANUAL 0x00
+#define DA9063_BUCK_MODE_SLEEP 0x40
+#define DA9063_BUCK_MODE_SYNC 0x80
+#define DA9063_BUCK_MODE_AUTO 0xC0
+
+/* DA9063_REG_BPRO_CFG (addr=0x9F) */
+#define DA9063_BPRO_VTTR_EN 0x08
+#define DA9063_BPRO_VTT_EN 0x10
+
+/* DA9063_REG_VBxxxx_A/B (addr=0xA3-0xA8, 0xB4-0xB9) */
+#define DA9063_VBUCK_MASK 0x7F
+#define DA9063_VBUCK_BIAS 0
+#define DA9063_BUCK_SL 0x80
+
+/* DA9063_REG_VLDOx_A/B (addr=0xA9-0x3, 0xBA-0xC4) */
+#define DA9063_LDO_SL 0x80
+
+/* DA9063_REG_VLDO1_A/B (addr=0xA9, 0xBA) */
+#define DA9063_VLDO1_MASK 0x3F
+#define DA9063_VLDO1_BIAS 0
+
+/* DA9063_REG_VLDO2_A/B (addr=0xAA, 0xBB) */
+#define DA9063_VLDO2_MASK 0x3F
+#define DA9063_VLDO2_BIAS 0
+
+/* DA9063_REG_VLDO3_A/B (addr=0xAB, 0xBC) */
+#define DA9063_VLDO3_MASK 0x7F
+#define DA9063_VLDO3_BIAS 0
+
+/* DA9063_REG_VLDO4_A/B (addr=0xAC, 0xBD) */
+#define DA9063_VLDO4_MASK 0x7F
+#define DA9063_VLDO4_BIAS 0
+
+/* DA9063_REG_VLDO5_A/B (addr=0xAD, 0xBE) */
+#define DA9063_VLDO5_MASK 0x3F
+#define DA9063_VLDO5_BIAS 2
+
+/* DA9063_REG_VLDO6_A/B (addr=0xAE, 0xBF) */
+#define DA9063_VLDO6_MASK 0x3F
+#define DA9063_VLDO6_BIAS 2
+
+/* DA9063_REG_VLDO7_A/B (addr=0xAF, 0xC0) */
+#define DA9063_VLDO7_MASK 0x3F
+#define DA9063_VLDO7_BIAS 2
+
+/* DA9063_REG_VLDO8_A/B (addr=0xB0, 0xC1) */
+#define DA9063_VLDO8_MASK 0x3F
+#define DA9063_VLDO8_BIAS 2
+
+/* DA9063_REG_VLDO9_A/B (addr=0xB1, 0xC2) */
+#define DA9063_VLDO9_MASK 0x3F
+#define DA9063_VLDO9_BIAS 3
+
+/* DA9063_REG_VLDO10_A/B (addr=0xB2, 0xC3) */
+#define DA9063_VLDO10_MASK 0x3F
+#define DA9063_VLDO10_BIAS 2
+
+/* DA9063_REG_VLDO11_A/B (addr=0xB3, 0xC4) */
+#define DA9063_VLDO11_MASK 0x3F
+#define DA9063_VLDO11_BIAS 2
+
+/* DA9063_REG_GPO11_LED (addr=0xC6) */
+/* DA9063_REG_GPO14_LED (addr=0xC7) */
+/* DA9063_REG_GPO15_LED (addr=0xC8) */
+#define DA9063_GPIO_DIM 0x80
+#define DA9063_GPIO_PWM_MASK 0x7F
+
+/* DA9063_REG_CONFIG_H (addr=0x10D) */
+#define DA9063_PWM_CLK_MASK 0x01
+#define DA9063_PWM_CLK_PWM2MHZ 0x00
+#define DA9063_PWM_CLK_PWM1MHZ 0x01
+#define DA9063_LDO8_MODE_MASK 0x02
+#define DA9063_LDO8_MODE_LDO 0
+#define DA9063_LDO8_MODE_VIBR 0x02
+#define DA9063_MERGE_SENSE_MASK 0x04
+#define DA9063_MERGE_SENSE_GP_FB2 0x00
+#define DA9063_MERGE_SENSE_GPIO4 0x04
+#define DA9063_BCORE_MERGE 0x08
+#define DA9063_BPRO_OD 0x10
+#define DA9063_BCORE2_OD 0x20
+#define DA9063_BCORE1_OD 0x40
+#define DA9063_BUCK_MERGE 0x80
+
+/* DA9063_REG_CONFIG_I (addr=0x10E) */
+#define DA9063_NONKEY_PIN_MASK 0x03
+#define DA9063_NONKEY_PIN_PORT 0x00
+#define DA9063_NONKEY_PIN_SWDOWN 0x01
+#define DA9063_NONKEY_PIN_AUTODOWN 0x02
+#define DA9063_NONKEY_PIN_AUTOFLPRT 0x03
+
+/* DA9063_REG_MON_REG_5 (addr=0x116) */
+#define DA9063_MON_A8_IDX_MASK 0x07
+#define DA9063_MON_A8_IDX_NONE 0x00
+#define DA9063_MON_A8_IDX_BCORE1 0x01
+#define DA9063_MON_A8_IDX_BCORE2 0x02
+#define DA9063_MON_A8_IDX_BPRO 0x03
+#define DA9063_MON_A8_IDX_LDO3 0x04
+#define DA9063_MON_A8_IDX_LDO4 0x05
+#define DA9063_MON_A8_IDX_LDO11 0x06
+#define DA9063_MON_A9_IDX_MASK 0x70
+#define DA9063_MON_A9_IDX_NONE 0x00
+#define DA9063_MON_A9_IDX_BIO 0x01
+#define DA9063_MON_A9_IDX_BMEM 0x02
+#define DA9063_MON_A9_IDX_BPERI 0x03
+#define DA9063_MON_A9_IDX_LDO1 0x04
+#define DA9063_MON_A9_IDX_LDO2 0x05
+#define DA9063_MON_A9_IDX_LDO5 0x06
+
+/* DA9063_REG_MON_REG_6 (addr=0x117) */
+#define DA9063_MON_A10_IDX_MASK 0x07
+#define DA9063_MON_A10_IDX_NONE 0x00
+#define DA9063_MON_A10_IDX_LDO6 0x01
+#define DA9063_MON_A10_IDX_LDO7 0x02
+#define DA9063_MON_A10_IDX_LDO8 0x03
+#define DA9063_MON_A10_IDX_LDO9 0x04
+#define DA9063_MON_A10_IDX_LDO10 0x05
+
+#endif /* _DA9063_REG_H */
diff --git a/include/linux/mfd/da9150/core.h b/include/linux/mfd/da9150/core.h
new file mode 100644
index 000000000..76e668933
--- /dev/null
+++ b/include/linux/mfd/da9150/core.h
@@ -0,0 +1,68 @@
+/*
+ * DA9150 MFD Driver - Core Data
+ *
+ * Copyright (c) 2014 Dialog Semiconductor
+ *
+ * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __DA9150_CORE_H
+#define __DA9150_CORE_H
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+
+/* I2C address paging */
+#define DA9150_REG_PAGE_SHIFT 8
+#define DA9150_REG_PAGE_MASK 0xFF
+
+/* IRQs */
+#define DA9150_NUM_IRQ_REGS 4
+#define DA9150_IRQ_VBUS 0
+#define DA9150_IRQ_CHG 1
+#define DA9150_IRQ_TCLASS 2
+#define DA9150_IRQ_TJUNC 3
+#define DA9150_IRQ_VFAULT 4
+#define DA9150_IRQ_CONF 5
+#define DA9150_IRQ_DAT 6
+#define DA9150_IRQ_DTYPE 7
+#define DA9150_IRQ_ID 8
+#define DA9150_IRQ_ADP 9
+#define DA9150_IRQ_SESS_END 10
+#define DA9150_IRQ_SESS_VLD 11
+#define DA9150_IRQ_FG 12
+#define DA9150_IRQ_GP 13
+#define DA9150_IRQ_TBAT 14
+#define DA9150_IRQ_GPIOA 15
+#define DA9150_IRQ_GPIOB 16
+#define DA9150_IRQ_GPIOC 17
+#define DA9150_IRQ_GPIOD 18
+#define DA9150_IRQ_GPADC 19
+#define DA9150_IRQ_WKUP 20
+
+struct da9150_pdata {
+ int irq_base;
+};
+
+struct da9150 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *regmap_irq_data;
+ int irq;
+ int irq_base;
+};
+
+/* Device I/O */
+u8 da9150_reg_read(struct da9150 *da9150, u16 reg);
+void da9150_reg_write(struct da9150 *da9150, u16 reg, u8 val);
+void da9150_set_bits(struct da9150 *da9150, u16 reg, u8 mask, u8 val);
+
+void da9150_bulk_read(struct da9150 *da9150, u16 reg, int count, u8 *buf);
+void da9150_bulk_write(struct da9150 *da9150, u16 reg, int count, const u8 *buf);
+#endif /* __DA9150_CORE_H */
diff --git a/include/linux/mfd/da9150/registers.h b/include/linux/mfd/da9150/registers.h
new file mode 100644
index 000000000..27ca6ee4d
--- /dev/null
+++ b/include/linux/mfd/da9150/registers.h
@@ -0,0 +1,1155 @@
+/*
+ * DA9150 MFD Driver - Registers
+ *
+ * Copyright (c) 2014 Dialog Semiconductor
+ *
+ * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __DA9150_REGISTERS_H
+#define __DA9150_REGISTERS_H
+
+#include <linux/bitops.h>
+
+/* Registers */
+#define DA9150_PAGE_CON 0x000
+#define DA9150_STATUS_A 0x068
+#define DA9150_STATUS_B 0x069
+#define DA9150_STATUS_C 0x06A
+#define DA9150_STATUS_D 0x06B
+#define DA9150_STATUS_E 0x06C
+#define DA9150_STATUS_F 0x06D
+#define DA9150_STATUS_G 0x06E
+#define DA9150_STATUS_H 0x06F
+#define DA9150_STATUS_I 0x070
+#define DA9150_STATUS_J 0x071
+#define DA9150_STATUS_K 0x072
+#define DA9150_STATUS_L 0x073
+#define DA9150_STATUS_N 0x074
+#define DA9150_FAULT_LOG_A 0x076
+#define DA9150_FAULT_LOG_B 0x077
+#define DA9150_EVENT_E 0x078
+#define DA9150_EVENT_F 0x079
+#define DA9150_EVENT_G 0x07A
+#define DA9150_EVENT_H 0x07B
+#define DA9150_IRQ_MASK_E 0x07C
+#define DA9150_IRQ_MASK_F 0x07D
+#define DA9150_IRQ_MASK_G 0x07E
+#define DA9150_IRQ_MASK_H 0x07F
+#define DA9150_PAGE_CON_1 0x080
+#define DA9150_CONFIG_A 0x0E0
+#define DA9150_CONFIG_B 0x0E1
+#define DA9150_CONFIG_C 0x0E2
+#define DA9150_CONFIG_D 0x0E3
+#define DA9150_CONFIG_E 0x0E4
+#define DA9150_CONTROL_A 0x0E5
+#define DA9150_CONTROL_B 0x0E6
+#define DA9150_CONTROL_C 0x0E7
+#define DA9150_GPIO_A_B 0x0E8
+#define DA9150_GPIO_C_D 0x0E9
+#define DA9150_GPIO_MODE_CONT 0x0EA
+#define DA9150_GPIO_CTRL_B 0x0EB
+#define DA9150_GPIO_CTRL_A 0x0EC
+#define DA9150_GPIO_CTRL_C 0x0ED
+#define DA9150_GPIO_CFG_A 0x0EE
+#define DA9150_GPIO_CFG_B 0x0EF
+#define DA9150_GPIO_CFG_C 0x0F0
+#define DA9150_GPADC_MAN 0x0F2
+#define DA9150_GPADC_RES_A 0x0F4
+#define DA9150_GPADC_RES_B 0x0F5
+#define DA9150_PAGE_CON_2 0x100
+#define DA9150_OTP_CONT_SHARED 0x101
+#define DA9150_INTERFACE_SHARED 0x105
+#define DA9150_CONFIG_A_SHARED 0x106
+#define DA9150_CONFIG_D_SHARED 0x109
+#define DA9150_ADETVB_CFG_C 0x150
+#define DA9150_ADETD_STAT 0x151
+#define DA9150_ADET_CMPSTAT 0x152
+#define DA9150_ADET_CTRL_A 0x153
+#define DA9150_ADETVB_CFG_B 0x154
+#define DA9150_ADETVB_CFG_A 0x155
+#define DA9150_ADETAC_CFG_A 0x156
+#define DA9150_ADDETAC_CFG_B 0x157
+#define DA9150_ADETAC_CFG_C 0x158
+#define DA9150_ADETAC_CFG_D 0x159
+#define DA9150_ADETVB_CFG_D 0x15A
+#define DA9150_ADETID_CFG_A 0x15B
+#define DA9150_ADET_RID_PT_CHG_H 0x15C
+#define DA9150_ADET_RID_PT_CHG_L 0x15D
+#define DA9150_PPR_TCTR_B 0x160
+#define DA9150_PPR_BKCTRL_A 0x163
+#define DA9150_PPR_BKCFG_A 0x164
+#define DA9150_PPR_BKCFG_B 0x165
+#define DA9150_PPR_CHGCTRL_A 0x166
+#define DA9150_PPR_CHGCTRL_B 0x167
+#define DA9150_PPR_CHGCTRL_C 0x168
+#define DA9150_PPR_TCTR_A 0x169
+#define DA9150_PPR_CHGCTRL_D 0x16A
+#define DA9150_PPR_CHGCTRL_E 0x16B
+#define DA9150_PPR_CHGCTRL_F 0x16C
+#define DA9150_PPR_CHGCTRL_G 0x16D
+#define DA9150_PPR_CHGCTRL_H 0x16E
+#define DA9150_PPR_CHGCTRL_I 0x16F
+#define DA9150_PPR_CHGCTRL_J 0x170
+#define DA9150_PPR_CHGCTRL_K 0x171
+#define DA9150_PPR_CHGCTRL_L 0x172
+#define DA9150_PPR_CHGCTRL_M 0x173
+#define DA9150_PPR_THYST_A 0x174
+#define DA9150_PPR_THYST_B 0x175
+#define DA9150_PPR_THYST_C 0x176
+#define DA9150_PPR_THYST_D 0x177
+#define DA9150_PPR_THYST_E 0x178
+#define DA9150_PPR_THYST_F 0x179
+#define DA9150_PPR_THYST_G 0x17A
+#define DA9150_PAGE_CON_3 0x180
+#define DA9150_PAGE_CON_4 0x200
+#define DA9150_PAGE_CON_5 0x280
+#define DA9150_PAGE_CON_6 0x300
+#define DA9150_COREBTLD_STAT_A 0x302
+#define DA9150_COREBTLD_CTRL_A 0x303
+#define DA9150_CORE_CONFIG_A 0x304
+#define DA9150_CORE_CONFIG_C 0x305
+#define DA9150_CORE_CONFIG_B 0x306
+#define DA9150_CORE_CFG_DATA_A 0x307
+#define DA9150_CORE_CFG_DATA_B 0x308
+#define DA9150_CORE_CMD_A 0x309
+#define DA9150_CORE_DATA_A 0x30A
+#define DA9150_CORE_DATA_B 0x30B
+#define DA9150_CORE_DATA_C 0x30C
+#define DA9150_CORE_DATA_D 0x30D
+#define DA9150_CORE2WIRE_STAT_A 0x310
+#define DA9150_CORE2WIRE_CTRL_A 0x311
+#define DA9150_FW_CTRL_A 0x312
+#define DA9150_FW_CTRL_C 0x313
+#define DA9150_FW_CTRL_D 0x314
+#define DA9150_FG_CTRL_A 0x315
+#define DA9150_FG_CTRL_B 0x316
+#define DA9150_FW_CTRL_E 0x317
+#define DA9150_FW_CTRL_B 0x318
+#define DA9150_GPADC_CMAN 0x320
+#define DA9150_GPADC_CRES_A 0x322
+#define DA9150_GPADC_CRES_B 0x323
+#define DA9150_CC_CFG_A 0x328
+#define DA9150_CC_CFG_B 0x329
+#define DA9150_CC_ICHG_RES_A 0x32A
+#define DA9150_CC_ICHG_RES_B 0x32B
+#define DA9150_CC_IAVG_RES_A 0x32C
+#define DA9150_CC_IAVG_RES_B 0x32D
+#define DA9150_TAUX_CTRL_A 0x330
+#define DA9150_TAUX_RELOAD_H 0x332
+#define DA9150_TAUX_RELOAD_L 0x333
+#define DA9150_TAUX_VALUE_H 0x334
+#define DA9150_TAUX_VALUE_L 0x335
+#define DA9150_AUX_DATA_0 0x338
+#define DA9150_AUX_DATA_1 0x339
+#define DA9150_AUX_DATA_2 0x33A
+#define DA9150_AUX_DATA_3 0x33B
+#define DA9150_BIF_CTRL 0x340
+#define DA9150_TBAT_CTRL_A 0x342
+#define DA9150_TBAT_CTRL_B 0x343
+#define DA9150_TBAT_RES_A 0x344
+#define DA9150_TBAT_RES_B 0x345
+
+/* DA9150_PAGE_CON = 0x000 */
+#define DA9150_PAGE_SHIFT 0
+#define DA9150_PAGE_MASK (0x3f << 0)
+#define DA9150_I2C_PAGE_SHIFT 1
+#define DA9150_I2C_PAGE_MASK (0x1f << 1)
+#define DA9150_WRITE_MODE_SHIFT 6
+#define DA9150_WRITE_MODE_MASK BIT(6)
+#define DA9150_REVERT_SHIFT 7
+#define DA9150_REVERT_MASK BIT(7)
+
+/* DA9150_STATUS_A = 0x068 */
+#define DA9150_WKUP_STAT_SHIFT 2
+#define DA9150_WKUP_STAT_MASK (0x0f << 2)
+#define DA9150_SLEEP_STAT_SHIFT 6
+#define DA9150_SLEEP_STAT_MASK (0x03 << 6)
+
+/* DA9150_STATUS_B = 0x069 */
+#define DA9150_VFAULT_STAT_SHIFT 0
+#define DA9150_VFAULT_STAT_MASK BIT(0)
+#define DA9150_TFAULT_STAT_SHIFT 1
+#define DA9150_TFAULT_STAT_MASK BIT(1)
+
+/* DA9150_STATUS_C = 0x06A */
+#define DA9150_VDD33_STAT_SHIFT 0
+#define DA9150_VDD33_STAT_MASK BIT(0)
+#define DA9150_VDD33_SLEEP_SHIFT 1
+#define DA9150_VDD33_SLEEP_MASK BIT(1)
+#define DA9150_LFOSC_STAT_SHIFT 7
+#define DA9150_LFOSC_STAT_MASK BIT(7)
+
+/* DA9150_STATUS_D = 0x06B */
+#define DA9150_GPIOA_STAT_SHIFT 0
+#define DA9150_GPIOA_STAT_MASK BIT(0)
+#define DA9150_GPIOB_STAT_SHIFT 1
+#define DA9150_GPIOB_STAT_MASK BIT(1)
+#define DA9150_GPIOC_STAT_SHIFT 2
+#define DA9150_GPIOC_STAT_MASK BIT(2)
+#define DA9150_GPIOD_STAT_SHIFT 3
+#define DA9150_GPIOD_STAT_MASK BIT(3)
+
+/* DA9150_STATUS_E = 0x06C */
+#define DA9150_DTYPE_SHIFT 0
+#define DA9150_DTYPE_MASK (0x1f << 0)
+#define DA9150_DTYPE_DT_NIL (0x00 << 0)
+#define DA9150_DTYPE_DT_USB_OTG BIT(0)
+#define DA9150_DTYPE_DT_USB_STD (0x02 << 0)
+#define DA9150_DTYPE_DT_USB_CHG (0x03 << 0)
+#define DA9150_DTYPE_DT_ACA_CHG (0x04 << 0)
+#define DA9150_DTYPE_DT_ACA_OTG (0x05 << 0)
+#define DA9150_DTYPE_DT_ACA_DOC (0x06 << 0)
+#define DA9150_DTYPE_DT_DED_CHG (0x07 << 0)
+#define DA9150_DTYPE_DT_CR5_CHG (0x08 << 0)
+#define DA9150_DTYPE_DT_CR4_CHG (0x0c << 0)
+#define DA9150_DTYPE_DT_PT_CHG (0x11 << 0)
+#define DA9150_DTYPE_DT_NN_ACC (0x16 << 0)
+#define DA9150_DTYPE_DT_NN_CHG (0x17 << 0)
+
+/* DA9150_STATUS_F = 0x06D */
+#define DA9150_SESS_VLD_SHIFT 0
+#define DA9150_SESS_VLD_MASK BIT(0)
+#define DA9150_ID_ERR_SHIFT 1
+#define DA9150_ID_ERR_MASK BIT(1)
+#define DA9150_PT_CHG_SHIFT 2
+#define DA9150_PT_CHG_MASK BIT(2)
+
+/* DA9150_STATUS_G = 0x06E */
+#define DA9150_RID_SHIFT 0
+#define DA9150_RID_MASK (0xff << 0)
+
+/* DA9150_STATUS_H = 0x06F */
+#define DA9150_VBUS_STAT_SHIFT 0
+#define DA9150_VBUS_STAT_MASK (0x07 << 0)
+#define DA9150_VBUS_STAT_OFF (0x00 << 0)
+#define DA9150_VBUS_STAT_WAIT BIT(0)
+#define DA9150_VBUS_STAT_CHG (0x02 << 0)
+#define DA9150_VBUS_TRED_SHIFT 3
+#define DA9150_VBUS_TRED_MASK BIT(3)
+#define DA9150_VBUS_DROP_STAT_SHIFT 4
+#define DA9150_VBUS_DROP_STAT_MASK (0x0f << 4)
+
+/* DA9150_STATUS_I = 0x070 */
+#define DA9150_VBUS_ISET_STAT_SHIFT 0
+#define DA9150_VBUS_ISET_STAT_MASK (0x1f << 0)
+#define DA9150_VBUS_OT_SHIFT 7
+#define DA9150_VBUS_OT_MASK BIT(7)
+
+/* DA9150_STATUS_J = 0x071 */
+#define DA9150_CHG_STAT_SHIFT 0
+#define DA9150_CHG_STAT_MASK (0x0f << 0)
+#define DA9150_CHG_STAT_OFF (0x00 << 0)
+#define DA9150_CHG_STAT_SUSP BIT(0)
+#define DA9150_CHG_STAT_ACT (0x02 << 0)
+#define DA9150_CHG_STAT_PRE (0x03 << 0)
+#define DA9150_CHG_STAT_CC (0x04 << 0)
+#define DA9150_CHG_STAT_CV (0x05 << 0)
+#define DA9150_CHG_STAT_FULL (0x06 << 0)
+#define DA9150_CHG_STAT_TEMP (0x07 << 0)
+#define DA9150_CHG_STAT_TIME (0x08 << 0)
+#define DA9150_CHG_STAT_BAT (0x09 << 0)
+#define DA9150_CHG_TEMP_SHIFT 4
+#define DA9150_CHG_TEMP_MASK (0x07 << 4)
+#define DA9150_CHG_TEMP_UNDER (0x06 << 4)
+#define DA9150_CHG_TEMP_OVER (0x07 << 4)
+#define DA9150_CHG_IEND_STAT_SHIFT 7
+#define DA9150_CHG_IEND_STAT_MASK BIT(7)
+
+/* DA9150_STATUS_K = 0x072 */
+#define DA9150_CHG_IAV_H_SHIFT 0
+#define DA9150_CHG_IAV_H_MASK (0xff << 0)
+
+/* DA9150_STATUS_L = 0x073 */
+#define DA9150_CHG_IAV_L_SHIFT 5
+#define DA9150_CHG_IAV_L_MASK (0x07 << 5)
+
+/* DA9150_STATUS_N = 0x074 */
+#define DA9150_CHG_TIME_SHIFT 1
+#define DA9150_CHG_TIME_MASK BIT(1)
+#define DA9150_CHG_TRED_SHIFT 2
+#define DA9150_CHG_TRED_MASK BIT(2)
+#define DA9150_CHG_TJUNC_CLASS_SHIFT 3
+#define DA9150_CHG_TJUNC_CLASS_MASK (0x07 << 3)
+#define DA9150_CHG_TJUNC_CLASS_6 (0x06 << 3)
+#define DA9150_EBS_STAT_SHIFT 6
+#define DA9150_EBS_STAT_MASK BIT(6)
+#define DA9150_CHG_BAT_REMOVED_SHIFT 7
+#define DA9150_CHG_BAT_REMOVED_MASK BIT(7)
+
+/* DA9150_FAULT_LOG_A = 0x076 */
+#define DA9150_TEMP_FAULT_SHIFT 0
+#define DA9150_TEMP_FAULT_MASK BIT(0)
+#define DA9150_VSYS_FAULT_SHIFT 1
+#define DA9150_VSYS_FAULT_MASK BIT(1)
+#define DA9150_START_FAULT_SHIFT 2
+#define DA9150_START_FAULT_MASK BIT(2)
+#define DA9150_EXT_FAULT_SHIFT 3
+#define DA9150_EXT_FAULT_MASK BIT(3)
+#define DA9150_POR_FAULT_SHIFT 4
+#define DA9150_POR_FAULT_MASK BIT(4)
+
+/* DA9150_FAULT_LOG_B = 0x077 */
+#define DA9150_VBUS_FAULT_SHIFT 0
+#define DA9150_VBUS_FAULT_MASK BIT(0)
+#define DA9150_OTG_FAULT_SHIFT 1
+#define DA9150_OTG_FAULT_MASK BIT(1)
+
+/* DA9150_EVENT_E = 0x078 */
+#define DA9150_E_VBUS_SHIFT 0
+#define DA9150_E_VBUS_MASK BIT(0)
+#define DA9150_E_CHG_SHIFT 1
+#define DA9150_E_CHG_MASK BIT(1)
+#define DA9150_E_TCLASS_SHIFT 2
+#define DA9150_E_TCLASS_MASK BIT(2)
+#define DA9150_E_TJUNC_SHIFT 3
+#define DA9150_E_TJUNC_MASK BIT(3)
+#define DA9150_E_VFAULT_SHIFT 4
+#define DA9150_E_VFAULT_MASK BIT(4)
+#define DA9150_EVENTS_H_SHIFT 5
+#define DA9150_EVENTS_H_MASK BIT(5)
+#define DA9150_EVENTS_G_SHIFT 6
+#define DA9150_EVENTS_G_MASK BIT(6)
+#define DA9150_EVENTS_F_SHIFT 7
+#define DA9150_EVENTS_F_MASK BIT(7)
+
+/* DA9150_EVENT_F = 0x079 */
+#define DA9150_E_CONF_SHIFT 0
+#define DA9150_E_CONF_MASK BIT(0)
+#define DA9150_E_DAT_SHIFT 1
+#define DA9150_E_DAT_MASK BIT(1)
+#define DA9150_E_DTYPE_SHIFT 3
+#define DA9150_E_DTYPE_MASK BIT(3)
+#define DA9150_E_ID_SHIFT 4
+#define DA9150_E_ID_MASK BIT(4)
+#define DA9150_E_ADP_SHIFT 5
+#define DA9150_E_ADP_MASK BIT(5)
+#define DA9150_E_SESS_END_SHIFT 6
+#define DA9150_E_SESS_END_MASK BIT(6)
+#define DA9150_E_SESS_VLD_SHIFT 7
+#define DA9150_E_SESS_VLD_MASK BIT(7)
+
+/* DA9150_EVENT_G = 0x07A */
+#define DA9150_E_FG_SHIFT 0
+#define DA9150_E_FG_MASK BIT(0)
+#define DA9150_E_GP_SHIFT 1
+#define DA9150_E_GP_MASK BIT(1)
+#define DA9150_E_TBAT_SHIFT 2
+#define DA9150_E_TBAT_MASK BIT(2)
+#define DA9150_E_GPIOA_SHIFT 3
+#define DA9150_E_GPIOA_MASK BIT(3)
+#define DA9150_E_GPIOB_SHIFT 4
+#define DA9150_E_GPIOB_MASK BIT(4)
+#define DA9150_E_GPIOC_SHIFT 5
+#define DA9150_E_GPIOC_MASK BIT(5)
+#define DA9150_E_GPIOD_SHIFT 6
+#define DA9150_E_GPIOD_MASK BIT(6)
+#define DA9150_E_GPADC_SHIFT 7
+#define DA9150_E_GPADC_MASK BIT(7)
+
+/* DA9150_EVENT_H = 0x07B */
+#define DA9150_E_WKUP_SHIFT 0
+#define DA9150_E_WKUP_MASK BIT(0)
+
+/* DA9150_IRQ_MASK_E = 0x07C */
+#define DA9150_M_VBUS_SHIFT 0
+#define DA9150_M_VBUS_MASK BIT(0)
+#define DA9150_M_CHG_SHIFT 1
+#define DA9150_M_CHG_MASK BIT(1)
+#define DA9150_M_TJUNC_SHIFT 3
+#define DA9150_M_TJUNC_MASK BIT(3)
+#define DA9150_M_VFAULT_SHIFT 4
+#define DA9150_M_VFAULT_MASK BIT(4)
+
+/* DA9150_IRQ_MASK_F = 0x07D */
+#define DA9150_M_CONF_SHIFT 0
+#define DA9150_M_CONF_MASK BIT(0)
+#define DA9150_M_DAT_SHIFT 1
+#define DA9150_M_DAT_MASK BIT(1)
+#define DA9150_M_DTYPE_SHIFT 3
+#define DA9150_M_DTYPE_MASK BIT(3)
+#define DA9150_M_ID_SHIFT 4
+#define DA9150_M_ID_MASK BIT(4)
+#define DA9150_M_ADP_SHIFT 5
+#define DA9150_M_ADP_MASK BIT(5)
+#define DA9150_M_SESS_END_SHIFT 6
+#define DA9150_M_SESS_END_MASK BIT(6)
+#define DA9150_M_SESS_VLD_SHIFT 7
+#define DA9150_M_SESS_VLD_MASK BIT(7)
+
+/* DA9150_IRQ_MASK_G = 0x07E */
+#define DA9150_M_FG_SHIFT 0
+#define DA9150_M_FG_MASK BIT(0)
+#define DA9150_M_GP_SHIFT 1
+#define DA9150_M_GP_MASK BIT(1)
+#define DA9150_M_TBAT_SHIFT 2
+#define DA9150_M_TBAT_MASK BIT(2)
+#define DA9150_M_GPIOA_SHIFT 3
+#define DA9150_M_GPIOA_MASK BIT(3)
+#define DA9150_M_GPIOB_SHIFT 4
+#define DA9150_M_GPIOB_MASK BIT(4)
+#define DA9150_M_GPIOC_SHIFT 5
+#define DA9150_M_GPIOC_MASK BIT(5)
+#define DA9150_M_GPIOD_SHIFT 6
+#define DA9150_M_GPIOD_MASK BIT(6)
+#define DA9150_M_GPADC_SHIFT 7
+#define DA9150_M_GPADC_MASK BIT(7)
+
+/* DA9150_IRQ_MASK_H = 0x07F */
+#define DA9150_M_WKUP_SHIFT 0
+#define DA9150_M_WKUP_MASK BIT(0)
+
+/* DA9150_PAGE_CON_1 = 0x080 */
+#define DA9150_PAGE_SHIFT 0
+#define DA9150_PAGE_MASK (0x3f << 0)
+#define DA9150_WRITE_MODE_SHIFT 6
+#define DA9150_WRITE_MODE_MASK BIT(6)
+#define DA9150_REVERT_SHIFT 7
+#define DA9150_REVERT_MASK BIT(7)
+
+/* DA9150_CONFIG_A = 0x0E0 */
+#define DA9150_RESET_DUR_SHIFT 0
+#define DA9150_RESET_DUR_MASK (0x03 << 0)
+#define DA9150_RESET_EXT_SHIFT 2
+#define DA9150_RESET_EXT_MASK (0x03 << 2)
+#define DA9150_START_MAX_SHIFT 4
+#define DA9150_START_MAX_MASK (0x03 << 4)
+#define DA9150_PS_WAIT_EN_SHIFT 6
+#define DA9150_PS_WAIT_EN_MASK BIT(6)
+#define DA9150_PS_DISABLE_DIRECT_SHIFT 7
+#define DA9150_PS_DISABLE_DIRECT_MASK BIT(7)
+
+/* DA9150_CONFIG_B = 0x0E1 */
+#define DA9150_VFAULT_ADJ_SHIFT 0
+#define DA9150_VFAULT_ADJ_MASK (0x0f << 0)
+#define DA9150_VFAULT_HYST_SHIFT 4
+#define DA9150_VFAULT_HYST_MASK (0x07 << 4)
+#define DA9150_VFAULT_EN_SHIFT 7
+#define DA9150_VFAULT_EN_MASK BIT(7)
+
+/* DA9150_CONFIG_C = 0x0E2 */
+#define DA9150_VSYS_MIN_SHIFT 3
+#define DA9150_VSYS_MIN_MASK (0x1f << 3)
+
+/* DA9150_CONFIG_D = 0x0E3 */
+#define DA9150_LFOSC_EXT_SHIFT 0
+#define DA9150_LFOSC_EXT_MASK BIT(0)
+#define DA9150_VDD33_DWN_SHIFT 1
+#define DA9150_VDD33_DWN_MASK BIT(1)
+#define DA9150_WKUP_PM_EN_SHIFT 2
+#define DA9150_WKUP_PM_EN_MASK BIT(2)
+#define DA9150_WKUP_CE_SEL_SHIFT 3
+#define DA9150_WKUP_CE_SEL_MASK (0x03 << 3)
+#define DA9150_WKUP_CLK32K_EN_SHIFT 5
+#define DA9150_WKUP_CLK32K_EN_MASK BIT(5)
+#define DA9150_DISABLE_DEL_SHIFT 7
+#define DA9150_DISABLE_DEL_MASK BIT(7)
+
+/* DA9150_CONFIG_E = 0x0E4 */
+#define DA9150_PM_SPKSUP_DIS_SHIFT 0
+#define DA9150_PM_SPKSUP_DIS_MASK BIT(0)
+#define DA9150_PM_MERGE_SHIFT 1
+#define DA9150_PM_MERGE_MASK BIT(1)
+#define DA9150_PM_SR_OFF_SHIFT 2
+#define DA9150_PM_SR_OFF_MASK BIT(2)
+#define DA9150_PM_TIMEOUT_EN_SHIFT 3
+#define DA9150_PM_TIMEOUT_EN_MASK BIT(3)
+#define DA9150_PM_DLY_SEL_SHIFT 4
+#define DA9150_PM_DLY_SEL_MASK (0x07 << 4)
+#define DA9150_PM_OUT_DLY_SEL_SHIFT 7
+#define DA9150_PM_OUT_DLY_SEL_MASK BIT(7)
+
+/* DA9150_CONTROL_A = 0x0E5 */
+#define DA9150_VDD33_SL_SHIFT 0
+#define DA9150_VDD33_SL_MASK BIT(0)
+#define DA9150_VDD33_LPM_SHIFT 1
+#define DA9150_VDD33_LPM_MASK (0x03 << 1)
+#define DA9150_VDD33_EN_SHIFT 3
+#define DA9150_VDD33_EN_MASK BIT(3)
+#define DA9150_GPI_LPM_SHIFT 6
+#define DA9150_GPI_LPM_MASK BIT(6)
+#define DA9150_PM_IF_LPM_SHIFT 7
+#define DA9150_PM_IF_LPM_MASK BIT(7)
+
+/* DA9150_CONTROL_B = 0x0E6 */
+#define DA9150_LPM_SHIFT 0
+#define DA9150_LPM_MASK BIT(0)
+#define DA9150_RESET_SHIFT 1
+#define DA9150_RESET_MASK BIT(1)
+#define DA9150_RESET_USRCONF_EN_SHIFT 2
+#define DA9150_RESET_USRCONF_EN_MASK BIT(2)
+
+/* DA9150_CONTROL_C = 0x0E7 */
+#define DA9150_DISABLE_SHIFT 0
+#define DA9150_DISABLE_MASK BIT(0)
+
+/* DA9150_GPIO_A_B = 0x0E8 */
+#define DA9150_GPIOA_PIN_SHIFT 0
+#define DA9150_GPIOA_PIN_MASK (0x07 << 0)
+#define DA9150_GPIOA_PIN_GPI (0x00 << 0)
+#define DA9150_GPIOA_PIN_GPO_OD BIT(0)
+#define DA9150_GPIOA_TYPE_SHIFT 3
+#define DA9150_GPIOA_TYPE_MASK BIT(3)
+#define DA9150_GPIOB_PIN_SHIFT 4
+#define DA9150_GPIOB_PIN_MASK (0x07 << 4)
+#define DA9150_GPIOB_PIN_GPI (0x00 << 4)
+#define DA9150_GPIOB_PIN_GPO_OD BIT(4)
+#define DA9150_GPIOB_TYPE_SHIFT 7
+#define DA9150_GPIOB_TYPE_MASK BIT(7)
+
+/* DA9150_GPIO_C_D = 0x0E9 */
+#define DA9150_GPIOC_PIN_SHIFT 0
+#define DA9150_GPIOC_PIN_MASK (0x07 << 0)
+#define DA9150_GPIOC_PIN_GPI (0x00 << 0)
+#define DA9150_GPIOC_PIN_GPO_OD BIT(0)
+#define DA9150_GPIOC_TYPE_SHIFT 3
+#define DA9150_GPIOC_TYPE_MASK BIT(3)
+#define DA9150_GPIOD_PIN_SHIFT 4
+#define DA9150_GPIOD_PIN_MASK (0x07 << 4)
+#define DA9150_GPIOD_PIN_GPI (0x00 << 4)
+#define DA9150_GPIOD_PIN_GPO_OD BIT(4)
+#define DA9150_GPIOD_TYPE_SHIFT 7
+#define DA9150_GPIOD_TYPE_MASK BIT(7)
+
+/* DA9150_GPIO_MODE_CONT = 0x0EA */
+#define DA9150_GPIOA_MODE_SHIFT 0
+#define DA9150_GPIOA_MODE_MASK BIT(0)
+#define DA9150_GPIOB_MODE_SHIFT 1
+#define DA9150_GPIOB_MODE_MASK BIT(1)
+#define DA9150_GPIOC_MODE_SHIFT 2
+#define DA9150_GPIOC_MODE_MASK BIT(2)
+#define DA9150_GPIOD_MODE_SHIFT 3
+#define DA9150_GPIOD_MODE_MASK BIT(3)
+#define DA9150_GPIOA_CONT_SHIFT 4
+#define DA9150_GPIOA_CONT_MASK BIT(4)
+#define DA9150_GPIOB_CONT_SHIFT 5
+#define DA9150_GPIOB_CONT_MASK BIT(5)
+#define DA9150_GPIOC_CONT_SHIFT 6
+#define DA9150_GPIOC_CONT_MASK BIT(6)
+#define DA9150_GPIOD_CONT_SHIFT 7
+#define DA9150_GPIOD_CONT_MASK BIT(7)
+
+/* DA9150_GPIO_CTRL_B = 0x0EB */
+#define DA9150_WAKE_PIN_SHIFT 0
+#define DA9150_WAKE_PIN_MASK (0x03 << 0)
+#define DA9150_WAKE_MODE_SHIFT 2
+#define DA9150_WAKE_MODE_MASK BIT(2)
+#define DA9150_WAKE_CONT_SHIFT 3
+#define DA9150_WAKE_CONT_MASK BIT(3)
+#define DA9150_WAKE_DLY_SHIFT 4
+#define DA9150_WAKE_DLY_MASK BIT(4)
+
+/* DA9150_GPIO_CTRL_A = 0x0EC */
+#define DA9150_GPIOA_ANAEN_SHIFT 0
+#define DA9150_GPIOA_ANAEN_MASK BIT(0)
+#define DA9150_GPIOB_ANAEN_SHIFT 1
+#define DA9150_GPIOB_ANAEN_MASK BIT(1)
+#define DA9150_GPIOC_ANAEN_SHIFT 2
+#define DA9150_GPIOC_ANAEN_MASK BIT(2)
+#define DA9150_GPIOD_ANAEN_SHIFT 3
+#define DA9150_GPIOD_ANAEN_MASK BIT(3)
+#define DA9150_GPIO_ANAEN 0x01
+#define DA9150_GPIO_ANAEN_MASK 0x0F
+#define DA9150_CHGLED_PIN_SHIFT 5
+#define DA9150_CHGLED_PIN_MASK (0x07 << 5)
+
+/* DA9150_GPIO_CTRL_C = 0x0ED */
+#define DA9150_CHGBL_DUR_SHIFT 0
+#define DA9150_CHGBL_DUR_MASK (0x03 << 0)
+#define DA9150_CHGBL_DBL_SHIFT 2
+#define DA9150_CHGBL_DBL_MASK BIT(2)
+#define DA9150_CHGBL_FRQ_SHIFT 3
+#define DA9150_CHGBL_FRQ_MASK (0x03 << 3)
+#define DA9150_CHGBL_FLKR_SHIFT 5
+#define DA9150_CHGBL_FLKR_MASK BIT(5)
+
+/* DA9150_GPIO_CFG_A = 0x0EE */
+#define DA9150_CE_LPM_DEB_SHIFT 0
+#define DA9150_CE_LPM_DEB_MASK (0x07 << 0)
+
+/* DA9150_GPIO_CFG_B = 0x0EF */
+#define DA9150_GPIOA_PUPD_SHIFT 0
+#define DA9150_GPIOA_PUPD_MASK BIT(0)
+#define DA9150_GPIOB_PUPD_SHIFT 1
+#define DA9150_GPIOB_PUPD_MASK BIT(1)
+#define DA9150_GPIOC_PUPD_SHIFT 2
+#define DA9150_GPIOC_PUPD_MASK BIT(2)
+#define DA9150_GPIOD_PUPD_SHIFT 3
+#define DA9150_GPIOD_PUPD_MASK BIT(3)
+#define DA9150_GPIO_PUPD_MASK (0xF << 0)
+#define DA9150_GPI_DEB_SHIFT 4
+#define DA9150_GPI_DEB_MASK (0x07 << 4)
+#define DA9150_LPM_EN_SHIFT 7
+#define DA9150_LPM_EN_MASK BIT(7)
+
+/* DA9150_GPIO_CFG_C = 0x0F0 */
+#define DA9150_GPI_V_SHIFT 0
+#define DA9150_GPI_V_MASK BIT(0)
+#define DA9150_VDDIO_INT_SHIFT 1
+#define DA9150_VDDIO_INT_MASK BIT(1)
+#define DA9150_FAULT_PIN_SHIFT 3
+#define DA9150_FAULT_PIN_MASK (0x07 << 3)
+#define DA9150_FAULT_TYPE_SHIFT 6
+#define DA9150_FAULT_TYPE_MASK BIT(6)
+#define DA9150_NIRQ_PUPD_SHIFT 7
+#define DA9150_NIRQ_PUPD_MASK BIT(7)
+
+/* DA9150_GPADC_MAN = 0x0F2 */
+#define DA9150_GPADC_EN_SHIFT 0
+#define DA9150_GPADC_EN_MASK BIT(0)
+#define DA9150_GPADC_MUX_SHIFT 1
+#define DA9150_GPADC_MUX_MASK (0x1f << 1)
+
+/* DA9150_GPADC_RES_A = 0x0F4 */
+#define DA9150_GPADC_RES_H_SHIFT 0
+#define DA9150_GPADC_RES_H_MASK (0xff << 0)
+
+/* DA9150_GPADC_RES_B = 0x0F5 */
+#define DA9150_GPADC_RUN_SHIFT 0
+#define DA9150_GPADC_RUN_MASK BIT(0)
+#define DA9150_GPADC_RES_L_SHIFT 6
+#define DA9150_GPADC_RES_L_MASK (0x03 << 6)
+#define DA9150_GPADC_RES_L_BITS 2
+
+/* DA9150_PAGE_CON_2 = 0x100 */
+#define DA9150_PAGE_SHIFT 0
+#define DA9150_PAGE_MASK (0x3f << 0)
+#define DA9150_WRITE_MODE_SHIFT 6
+#define DA9150_WRITE_MODE_MASK BIT(6)
+#define DA9150_REVERT_SHIFT 7
+#define DA9150_REVERT_MASK BIT(7)
+
+/* DA9150_OTP_CONT_SHARED = 0x101 */
+#define DA9150_PC_DONE_SHIFT 3
+#define DA9150_PC_DONE_MASK BIT(3)
+
+/* DA9150_INTERFACE_SHARED = 0x105 */
+#define DA9150_IF_BASE_ADDR_SHIFT 4
+#define DA9150_IF_BASE_ADDR_MASK (0x0f << 4)
+
+/* DA9150_CONFIG_A_SHARED = 0x106 */
+#define DA9150_NIRQ_VDD_SHIFT 1
+#define DA9150_NIRQ_VDD_MASK BIT(1)
+#define DA9150_NIRQ_PIN_SHIFT 2
+#define DA9150_NIRQ_PIN_MASK BIT(2)
+#define DA9150_NIRQ_TYPE_SHIFT 3
+#define DA9150_NIRQ_TYPE_MASK BIT(3)
+#define DA9150_PM_IF_V_SHIFT 4
+#define DA9150_PM_IF_V_MASK BIT(4)
+#define DA9150_PM_IF_FMP_SHIFT 5
+#define DA9150_PM_IF_FMP_MASK BIT(5)
+#define DA9150_PM_IF_HSM_SHIFT 6
+#define DA9150_PM_IF_HSM_MASK BIT(6)
+
+/* DA9150_CONFIG_D_SHARED = 0x109 */
+#define DA9150_NIRQ_MODE_SHIFT 1
+#define DA9150_NIRQ_MODE_MASK BIT(1)
+
+/* DA9150_ADETVB_CFG_C = 0x150 */
+#define DA9150_TADP_RISE_SHIFT 0
+#define DA9150_TADP_RISE_MASK (0xff << 0)
+
+/* DA9150_ADETD_STAT = 0x151 */
+#define DA9150_DCD_STAT_SHIFT 0
+#define DA9150_DCD_STAT_MASK BIT(0)
+#define DA9150_PCD_STAT_SHIFT 1
+#define DA9150_PCD_STAT_MASK (0x03 << 1)
+#define DA9150_SCD_STAT_SHIFT 3
+#define DA9150_SCD_STAT_MASK (0x03 << 3)
+#define DA9150_DP_STAT_SHIFT 5
+#define DA9150_DP_STAT_MASK BIT(5)
+#define DA9150_DM_STAT_SHIFT 6
+#define DA9150_DM_STAT_MASK BIT(6)
+
+/* DA9150_ADET_CMPSTAT = 0x152 */
+#define DA9150_DP_COMP_SHIFT 1
+#define DA9150_DP_COMP_MASK BIT(1)
+#define DA9150_DM_COMP_SHIFT 2
+#define DA9150_DM_COMP_MASK BIT(2)
+#define DA9150_ADP_SNS_COMP_SHIFT 3
+#define DA9150_ADP_SNS_COMP_MASK BIT(3)
+#define DA9150_ADP_PRB_COMP_SHIFT 4
+#define DA9150_ADP_PRB_COMP_MASK BIT(4)
+#define DA9150_ID_COMP_SHIFT 5
+#define DA9150_ID_COMP_MASK BIT(5)
+
+/* DA9150_ADET_CTRL_A = 0x153 */
+#define DA9150_AID_DAT_SHIFT 0
+#define DA9150_AID_DAT_MASK BIT(0)
+#define DA9150_AID_ID_SHIFT 1
+#define DA9150_AID_ID_MASK BIT(1)
+#define DA9150_AID_TRIG_SHIFT 2
+#define DA9150_AID_TRIG_MASK BIT(2)
+
+/* DA9150_ADETVB_CFG_B = 0x154 */
+#define DA9150_VB_MODE_SHIFT 0
+#define DA9150_VB_MODE_MASK (0x03 << 0)
+#define DA9150_VB_MODE_VB_SESS BIT(0)
+
+#define DA9150_TADP_PRB_SHIFT 2
+#define DA9150_TADP_PRB_MASK BIT(2)
+#define DA9150_DAT_RPD_EXT_SHIFT 5
+#define DA9150_DAT_RPD_EXT_MASK BIT(5)
+#define DA9150_CONF_RPD_SHIFT 6
+#define DA9150_CONF_RPD_MASK BIT(6)
+#define DA9150_CONF_SRP_SHIFT 7
+#define DA9150_CONF_SRP_MASK BIT(7)
+
+/* DA9150_ADETVB_CFG_A = 0x155 */
+#define DA9150_AID_MODE_SHIFT 0
+#define DA9150_AID_MODE_MASK (0x03 << 0)
+#define DA9150_AID_EXT_POL_SHIFT 2
+#define DA9150_AID_EXT_POL_MASK BIT(2)
+
+/* DA9150_ADETAC_CFG_A = 0x156 */
+#define DA9150_ISET_CDP_SHIFT 0
+#define DA9150_ISET_CDP_MASK (0x1f << 0)
+#define DA9150_CONF_DBP_SHIFT 5
+#define DA9150_CONF_DBP_MASK BIT(5)
+
+/* DA9150_ADDETAC_CFG_B = 0x157 */
+#define DA9150_ISET_DCHG_SHIFT 0
+#define DA9150_ISET_DCHG_MASK (0x1f << 0)
+#define DA9150_CONF_GPIOA_SHIFT 5
+#define DA9150_CONF_GPIOA_MASK BIT(5)
+#define DA9150_CONF_GPIOB_SHIFT 6
+#define DA9150_CONF_GPIOB_MASK BIT(6)
+#define DA9150_AID_VB_SHIFT 7
+#define DA9150_AID_VB_MASK BIT(7)
+
+/* DA9150_ADETAC_CFG_C = 0x158 */
+#define DA9150_ISET_DEF_SHIFT 0
+#define DA9150_ISET_DEF_MASK (0x1f << 0)
+#define DA9150_CONF_MODE_SHIFT 5
+#define DA9150_CONF_MODE_MASK (0x03 << 5)
+#define DA9150_AID_CR_DIS_SHIFT 7
+#define DA9150_AID_CR_DIS_MASK BIT(7)
+
+/* DA9150_ADETAC_CFG_D = 0x159 */
+#define DA9150_ISET_UNIT_SHIFT 0
+#define DA9150_ISET_UNIT_MASK (0x1f << 0)
+#define DA9150_AID_UNCLAMP_SHIFT 5
+#define DA9150_AID_UNCLAMP_MASK BIT(5)
+
+/* DA9150_ADETVB_CFG_D = 0x15A */
+#define DA9150_ID_MODE_SHIFT 0
+#define DA9150_ID_MODE_MASK (0x03 << 0)
+#define DA9150_DAT_MODE_SHIFT 2
+#define DA9150_DAT_MODE_MASK (0x0f << 2)
+#define DA9150_DAT_SWP_SHIFT 6
+#define DA9150_DAT_SWP_MASK BIT(6)
+#define DA9150_DAT_CLAMP_EXT_SHIFT 7
+#define DA9150_DAT_CLAMP_EXT_MASK BIT(7)
+
+/* DA9150_ADETID_CFG_A = 0x15B */
+#define DA9150_TID_POLL_SHIFT 0
+#define DA9150_TID_POLL_MASK (0x07 << 0)
+#define DA9150_RID_CONV_SHIFT 3
+#define DA9150_RID_CONV_MASK BIT(3)
+
+/* DA9150_ADET_RID_PT_CHG_H = 0x15C */
+#define DA9150_RID_PT_CHG_H_SHIFT 0
+#define DA9150_RID_PT_CHG_H_MASK (0xff << 0)
+
+/* DA9150_ADET_RID_PT_CHG_L = 0x15D */
+#define DA9150_RID_PT_CHG_L_SHIFT 6
+#define DA9150_RID_PT_CHG_L_MASK (0x03 << 6)
+
+/* DA9150_PPR_TCTR_B = 0x160 */
+#define DA9150_CHG_TCTR_VAL_SHIFT 0
+#define DA9150_CHG_TCTR_VAL_MASK (0xff << 0)
+
+/* DA9150_PPR_BKCTRL_A = 0x163 */
+#define DA9150_VBUS_MODE_SHIFT 0
+#define DA9150_VBUS_MODE_MASK (0x03 << 0)
+#define DA9150_VBUS_MODE_CHG BIT(0)
+#define DA9150_VBUS_MODE_OTG (0x02 << 0)
+#define DA9150_VBUS_LPM_SHIFT 2
+#define DA9150_VBUS_LPM_MASK (0x03 << 2)
+#define DA9150_VBUS_SUSP_SHIFT 4
+#define DA9150_VBUS_SUSP_MASK BIT(4)
+#define DA9150_VBUS_PWM_SHIFT 5
+#define DA9150_VBUS_PWM_MASK BIT(5)
+#define DA9150_VBUS_ISO_SHIFT 6
+#define DA9150_VBUS_ISO_MASK BIT(6)
+#define DA9150_VBUS_LDO_SHIFT 7
+#define DA9150_VBUS_LDO_MASK BIT(7)
+
+/* DA9150_PPR_BKCFG_A = 0x164 */
+#define DA9150_VBUS_ISET_SHIFT 0
+#define DA9150_VBUS_ISET_MASK (0x1f << 0)
+#define DA9150_VBUS_IMAX_SHIFT 5
+#define DA9150_VBUS_IMAX_MASK BIT(5)
+#define DA9150_VBUS_IOTG_SHIFT 6
+#define DA9150_VBUS_IOTG_MASK (0x03 << 6)
+
+/* DA9150_PPR_BKCFG_B = 0x165 */
+#define DA9150_VBUS_DROP_SHIFT 0
+#define DA9150_VBUS_DROP_MASK (0x0f << 0)
+#define DA9150_VBUS_FAULT_DIS_SHIFT 6
+#define DA9150_VBUS_FAULT_DIS_MASK BIT(6)
+#define DA9150_OTG_FAULT_DIS_SHIFT 7
+#define DA9150_OTG_FAULT_DIS_MASK BIT(7)
+
+/* DA9150_PPR_CHGCTRL_A = 0x166 */
+#define DA9150_CHG_EN_SHIFT 0
+#define DA9150_CHG_EN_MASK BIT(0)
+
+/* DA9150_PPR_CHGCTRL_B = 0x167 */
+#define DA9150_CHG_VBAT_SHIFT 0
+#define DA9150_CHG_VBAT_MASK (0x1f << 0)
+#define DA9150_CHG_VDROP_SHIFT 6
+#define DA9150_CHG_VDROP_MASK (0x03 << 6)
+
+/* DA9150_PPR_CHGCTRL_C = 0x168 */
+#define DA9150_CHG_VFAULT_SHIFT 0
+#define DA9150_CHG_VFAULT_MASK (0x0f << 0)
+#define DA9150_CHG_IPRE_SHIFT 4
+#define DA9150_CHG_IPRE_MASK (0x03 << 4)
+
+/* DA9150_PPR_TCTR_A = 0x169 */
+#define DA9150_CHG_TCTR_SHIFT 0
+#define DA9150_CHG_TCTR_MASK (0x07 << 0)
+#define DA9150_CHG_TCTR_MODE_SHIFT 4
+#define DA9150_CHG_TCTR_MODE_MASK BIT(4)
+
+/* DA9150_PPR_CHGCTRL_D = 0x16A */
+#define DA9150_CHG_IBAT_SHIFT 0
+#define DA9150_CHG_IBAT_MASK (0xff << 0)
+
+/* DA9150_PPR_CHGCTRL_E = 0x16B */
+#define DA9150_CHG_IEND_SHIFT 0
+#define DA9150_CHG_IEND_MASK (0xff << 0)
+
+/* DA9150_PPR_CHGCTRL_F = 0x16C */
+#define DA9150_CHG_VCOLD_SHIFT 0
+#define DA9150_CHG_VCOLD_MASK (0x1f << 0)
+#define DA9150_TBAT_TQA_EN_SHIFT 6
+#define DA9150_TBAT_TQA_EN_MASK BIT(6)
+#define DA9150_TBAT_TDP_EN_SHIFT 7
+#define DA9150_TBAT_TDP_EN_MASK BIT(7)
+
+/* DA9150_PPR_CHGCTRL_G = 0x16D */
+#define DA9150_CHG_VWARM_SHIFT 0
+#define DA9150_CHG_VWARM_MASK (0x1f << 0)
+
+/* DA9150_PPR_CHGCTRL_H = 0x16E */
+#define DA9150_CHG_VHOT_SHIFT 0
+#define DA9150_CHG_VHOT_MASK (0x1f << 0)
+
+/* DA9150_PPR_CHGCTRL_I = 0x16F */
+#define DA9150_CHG_ICOLD_SHIFT 0
+#define DA9150_CHG_ICOLD_MASK (0xff << 0)
+
+/* DA9150_PPR_CHGCTRL_J = 0x170 */
+#define DA9150_CHG_IWARM_SHIFT 0
+#define DA9150_CHG_IWARM_MASK (0xff << 0)
+
+/* DA9150_PPR_CHGCTRL_K = 0x171 */
+#define DA9150_CHG_IHOT_SHIFT 0
+#define DA9150_CHG_IHOT_MASK (0xff << 0)
+
+/* DA9150_PPR_CHGCTRL_L = 0x172 */
+#define DA9150_CHG_IBAT_TRED_SHIFT 0
+#define DA9150_CHG_IBAT_TRED_MASK (0xff << 0)
+
+/* DA9150_PPR_CHGCTRL_M = 0x173 */
+#define DA9150_CHG_VFLOAT_SHIFT 0
+#define DA9150_CHG_VFLOAT_MASK (0x0f << 0)
+#define DA9150_CHG_LPM_SHIFT 5
+#define DA9150_CHG_LPM_MASK BIT(5)
+#define DA9150_CHG_NBLO_SHIFT 6
+#define DA9150_CHG_NBLO_MASK BIT(6)
+#define DA9150_EBS_EN_SHIFT 7
+#define DA9150_EBS_EN_MASK BIT(7)
+
+/* DA9150_PPR_THYST_A = 0x174 */
+#define DA9150_TBAT_T1_SHIFT 0
+#define DA9150_TBAT_T1_MASK (0xff << 0)
+
+/* DA9150_PPR_THYST_B = 0x175 */
+#define DA9150_TBAT_T2_SHIFT 0
+#define DA9150_TBAT_T2_MASK (0xff << 0)
+
+/* DA9150_PPR_THYST_C = 0x176 */
+#define DA9150_TBAT_T3_SHIFT 0
+#define DA9150_TBAT_T3_MASK (0xff << 0)
+
+/* DA9150_PPR_THYST_D = 0x177 */
+#define DA9150_TBAT_T4_SHIFT 0
+#define DA9150_TBAT_T4_MASK (0xff << 0)
+
+/* DA9150_PPR_THYST_E = 0x178 */
+#define DA9150_TBAT_T5_SHIFT 0
+#define DA9150_TBAT_T5_MASK (0xff << 0)
+
+/* DA9150_PPR_THYST_F = 0x179 */
+#define DA9150_TBAT_H1_SHIFT 0
+#define DA9150_TBAT_H1_MASK (0xff << 0)
+
+/* DA9150_PPR_THYST_G = 0x17A */
+#define DA9150_TBAT_H5_SHIFT 0
+#define DA9150_TBAT_H5_MASK (0xff << 0)
+
+/* DA9150_PAGE_CON_3 = 0x180 */
+#define DA9150_PAGE_SHIFT 0
+#define DA9150_PAGE_MASK (0x3f << 0)
+#define DA9150_WRITE_MODE_SHIFT 6
+#define DA9150_WRITE_MODE_MASK BIT(6)
+#define DA9150_REVERT_SHIFT 7
+#define DA9150_REVERT_MASK BIT(7)
+
+/* DA9150_PAGE_CON_4 = 0x200 */
+#define DA9150_PAGE_SHIFT 0
+#define DA9150_PAGE_MASK (0x3f << 0)
+#define DA9150_WRITE_MODE_SHIFT 6
+#define DA9150_WRITE_MODE_MASK BIT(6)
+#define DA9150_REVERT_SHIFT 7
+#define DA9150_REVERT_MASK BIT(7)
+
+/* DA9150_PAGE_CON_5 = 0x280 */
+#define DA9150_PAGE_SHIFT 0
+#define DA9150_PAGE_MASK (0x3f << 0)
+#define DA9150_WRITE_MODE_SHIFT 6
+#define DA9150_WRITE_MODE_MASK BIT(6)
+#define DA9150_REVERT_SHIFT 7
+#define DA9150_REVERT_MASK BIT(7)
+
+/* DA9150_PAGE_CON_6 = 0x300 */
+#define DA9150_PAGE_SHIFT 0
+#define DA9150_PAGE_MASK (0x3f << 0)
+#define DA9150_WRITE_MODE_SHIFT 6
+#define DA9150_WRITE_MODE_MASK BIT(6)
+#define DA9150_REVERT_SHIFT 7
+#define DA9150_REVERT_MASK BIT(7)
+
+/* DA9150_COREBTLD_STAT_A = 0x302 */
+#define DA9150_BOOTLD_STAT_SHIFT 0
+#define DA9150_BOOTLD_STAT_MASK (0x03 << 0)
+#define DA9150_CORE_LOCKUP_SHIFT 2
+#define DA9150_CORE_LOCKUP_MASK BIT(2)
+
+/* DA9150_COREBTLD_CTRL_A = 0x303 */
+#define DA9150_CORE_RESET_SHIFT 0
+#define DA9150_CORE_RESET_MASK BIT(0)
+#define DA9150_CORE_STOP_SHIFT 1
+#define DA9150_CORE_STOP_MASK BIT(1)
+
+/* DA9150_CORE_CONFIG_A = 0x304 */
+#define DA9150_CORE_MEMMUX_SHIFT 0
+#define DA9150_CORE_MEMMUX_MASK (0x03 << 0)
+#define DA9150_WDT_AUTO_START_SHIFT 2
+#define DA9150_WDT_AUTO_START_MASK BIT(2)
+#define DA9150_WDT_AUTO_LOCK_SHIFT 3
+#define DA9150_WDT_AUTO_LOCK_MASK BIT(3)
+#define DA9150_WDT_HLT_NO_CLK_SHIFT 4
+#define DA9150_WDT_HLT_NO_CLK_MASK BIT(4)
+
+/* DA9150_CORE_CONFIG_C = 0x305 */
+#define DA9150_CORE_SW_SIZE_SHIFT 0
+#define DA9150_CORE_SW_SIZE_MASK (0xff << 0)
+
+/* DA9150_CORE_CONFIG_B = 0x306 */
+#define DA9150_BOOTLD_EN_SHIFT 0
+#define DA9150_BOOTLD_EN_MASK BIT(0)
+#define DA9150_CORE_EN_SHIFT 2
+#define DA9150_CORE_EN_MASK BIT(2)
+#define DA9150_CORE_SW_SRC_SHIFT 3
+#define DA9150_CORE_SW_SRC_MASK (0x07 << 3)
+#define DA9150_DEEP_SLEEP_EN_SHIFT 7
+#define DA9150_DEEP_SLEEP_EN_MASK BIT(7)
+
+/* DA9150_CORE_CFG_DATA_A = 0x307 */
+#define DA9150_CORE_CFG_DT_A_SHIFT 0
+#define DA9150_CORE_CFG_DT_A_MASK (0xff << 0)
+
+/* DA9150_CORE_CFG_DATA_B = 0x308 */
+#define DA9150_CORE_CFG_DT_B_SHIFT 0
+#define DA9150_CORE_CFG_DT_B_MASK (0xff << 0)
+
+/* DA9150_CORE_CMD_A = 0x309 */
+#define DA9150_CORE_CMD_SHIFT 0
+#define DA9150_CORE_CMD_MASK (0xff << 0)
+
+/* DA9150_CORE_DATA_A = 0x30A */
+#define DA9150_CORE_DATA_0_SHIFT 0
+#define DA9150_CORE_DATA_0_MASK (0xff << 0)
+
+/* DA9150_CORE_DATA_B = 0x30B */
+#define DA9150_CORE_DATA_1_SHIFT 0
+#define DA9150_CORE_DATA_1_MASK (0xff << 0)
+
+/* DA9150_CORE_DATA_C = 0x30C */
+#define DA9150_CORE_DATA_2_SHIFT 0
+#define DA9150_CORE_DATA_2_MASK (0xff << 0)
+
+/* DA9150_CORE_DATA_D = 0x30D */
+#define DA9150_CORE_DATA_3_SHIFT 0
+#define DA9150_CORE_DATA_3_MASK (0xff << 0)
+
+/* DA9150_CORE2WIRE_STAT_A = 0x310 */
+#define DA9150_FW_FWDL_ERR_SHIFT 7
+#define DA9150_FW_FWDL_ERR_MASK BIT(7)
+
+/* DA9150_CORE2WIRE_CTRL_A = 0x311 */
+#define DA9150_FW_FWDL_EN_SHIFT 0
+#define DA9150_FW_FWDL_EN_MASK BIT(0)
+#define DA9150_FG_QIF_EN_SHIFT 1
+#define DA9150_FG_QIF_EN_MASK BIT(1)
+#define DA9150_CORE_BASE_ADDR_SHIFT 4
+#define DA9150_CORE_BASE_ADDR_MASK (0x0f << 4)
+
+/* DA9150_FW_CTRL_A = 0x312 */
+#define DA9150_FW_SEAL_SHIFT 0
+#define DA9150_FW_SEAL_MASK (0xff << 0)
+
+/* DA9150_FW_CTRL_C = 0x313 */
+#define DA9150_FW_FWDL_CRC_SHIFT 0
+#define DA9150_FW_FWDL_CRC_MASK (0xff << 0)
+
+/* DA9150_FW_CTRL_D = 0x314 */
+#define DA9150_FW_FWDL_BASE_SHIFT 0
+#define DA9150_FW_FWDL_BASE_MASK (0x0f << 0)
+
+/* DA9150_FG_CTRL_A = 0x315 */
+#define DA9150_FG_QIF_CODE_SHIFT 0
+#define DA9150_FG_QIF_CODE_MASK (0xff << 0)
+
+/* DA9150_FG_CTRL_B = 0x316 */
+#define DA9150_FG_QIF_VALUE_SHIFT 0
+#define DA9150_FG_QIF_VALUE_MASK (0xff << 0)
+
+/* DA9150_FW_CTRL_E = 0x317 */
+#define DA9150_FW_FWDL_SEG_SHIFT 0
+#define DA9150_FW_FWDL_SEG_MASK (0xff << 0)
+
+/* DA9150_FW_CTRL_B = 0x318 */
+#define DA9150_FW_FWDL_VALUE_SHIFT 0
+#define DA9150_FW_FWDL_VALUE_MASK (0xff << 0)
+
+/* DA9150_GPADC_CMAN = 0x320 */
+#define DA9150_GPADC_CEN_SHIFT 0
+#define DA9150_GPADC_CEN_MASK BIT(0)
+#define DA9150_GPADC_CMUX_SHIFT 1
+#define DA9150_GPADC_CMUX_MASK (0x1f << 1)
+
+/* DA9150_GPADC_CRES_A = 0x322 */
+#define DA9150_GPADC_CRES_H_SHIFT 0
+#define DA9150_GPADC_CRES_H_MASK (0xff << 0)
+
+/* DA9150_GPADC_CRES_B = 0x323 */
+#define DA9150_GPADC_CRUN_SHIFT 0
+#define DA9150_GPADC_CRUN_MASK BIT(0)
+#define DA9150_GPADC_CRES_L_SHIFT 6
+#define DA9150_GPADC_CRES_L_MASK (0x03 << 6)
+
+/* DA9150_CC_CFG_A = 0x328 */
+#define DA9150_CC_EN_SHIFT 0
+#define DA9150_CC_EN_MASK BIT(0)
+#define DA9150_CC_TIMEBASE_SHIFT 1
+#define DA9150_CC_TIMEBASE_MASK (0x03 << 1)
+#define DA9150_CC_CFG_SHIFT 5
+#define DA9150_CC_CFG_MASK (0x03 << 5)
+#define DA9150_CC_ENDLESS_MODE_SHIFT 7
+#define DA9150_CC_ENDLESS_MODE_MASK BIT(7)
+
+/* DA9150_CC_CFG_B = 0x329 */
+#define DA9150_CC_OPT_SHIFT 0
+#define DA9150_CC_OPT_MASK (0x03 << 0)
+#define DA9150_CC_PREAMP_SHIFT 2
+#define DA9150_CC_PREAMP_MASK (0x03 << 2)
+
+/* DA9150_CC_ICHG_RES_A = 0x32A */
+#define DA9150_CC_ICHG_RES_H_SHIFT 0
+#define DA9150_CC_ICHG_RES_H_MASK (0xff << 0)
+
+/* DA9150_CC_ICHG_RES_B = 0x32B */
+#define DA9150_CC_ICHG_RES_L_SHIFT 3
+#define DA9150_CC_ICHG_RES_L_MASK (0x1f << 3)
+
+/* DA9150_CC_IAVG_RES_A = 0x32C */
+#define DA9150_CC_IAVG_RES_H_SHIFT 0
+#define DA9150_CC_IAVG_RES_H_MASK (0xff << 0)
+
+/* DA9150_CC_IAVG_RES_B = 0x32D */
+#define DA9150_CC_IAVG_RES_L_SHIFT 0
+#define DA9150_CC_IAVG_RES_L_MASK (0xff << 0)
+
+/* DA9150_TAUX_CTRL_A = 0x330 */
+#define DA9150_TAUX_EN_SHIFT 0
+#define DA9150_TAUX_EN_MASK BIT(0)
+#define DA9150_TAUX_MOD_SHIFT 1
+#define DA9150_TAUX_MOD_MASK BIT(1)
+#define DA9150_TAUX_UPDATE_SHIFT 2
+#define DA9150_TAUX_UPDATE_MASK BIT(2)
+
+/* DA9150_TAUX_RELOAD_H = 0x332 */
+#define DA9150_TAUX_RLD_H_SHIFT 0
+#define DA9150_TAUX_RLD_H_MASK (0xff << 0)
+
+/* DA9150_TAUX_RELOAD_L = 0x333 */
+#define DA9150_TAUX_RLD_L_SHIFT 3
+#define DA9150_TAUX_RLD_L_MASK (0x1f << 3)
+
+/* DA9150_TAUX_VALUE_H = 0x334 */
+#define DA9150_TAUX_VAL_H_SHIFT 0
+#define DA9150_TAUX_VAL_H_MASK (0xff << 0)
+
+/* DA9150_TAUX_VALUE_L = 0x335 */
+#define DA9150_TAUX_VAL_L_SHIFT 3
+#define DA9150_TAUX_VAL_L_MASK (0x1f << 3)
+
+/* DA9150_AUX_DATA_0 = 0x338 */
+#define DA9150_AUX_DAT_0_SHIFT 0
+#define DA9150_AUX_DAT_0_MASK (0xff << 0)
+
+/* DA9150_AUX_DATA_1 = 0x339 */
+#define DA9150_AUX_DAT_1_SHIFT 0
+#define DA9150_AUX_DAT_1_MASK (0xff << 0)
+
+/* DA9150_AUX_DATA_2 = 0x33A */
+#define DA9150_AUX_DAT_2_SHIFT 0
+#define DA9150_AUX_DAT_2_MASK (0xff << 0)
+
+/* DA9150_AUX_DATA_3 = 0x33B */
+#define DA9150_AUX_DAT_3_SHIFT 0
+#define DA9150_AUX_DAT_3_MASK (0xff << 0)
+
+/* DA9150_BIF_CTRL = 0x340 */
+#define DA9150_BIF_ISRC_EN_SHIFT 0
+#define DA9150_BIF_ISRC_EN_MASK BIT(0)
+
+/* DA9150_TBAT_CTRL_A = 0x342 */
+#define DA9150_TBAT_EN_SHIFT 0
+#define DA9150_TBAT_EN_MASK BIT(0)
+#define DA9150_TBAT_SW1_SHIFT 1
+#define DA9150_TBAT_SW1_MASK BIT(1)
+#define DA9150_TBAT_SW2_SHIFT 2
+#define DA9150_TBAT_SW2_MASK BIT(2)
+
+/* DA9150_TBAT_CTRL_B = 0x343 */
+#define DA9150_TBAT_SW_FRC_SHIFT 0
+#define DA9150_TBAT_SW_FRC_MASK BIT(0)
+#define DA9150_TBAT_STAT_SW1_SHIFT 1
+#define DA9150_TBAT_STAT_SW1_MASK BIT(1)
+#define DA9150_TBAT_STAT_SW2_SHIFT 2
+#define DA9150_TBAT_STAT_SW2_MASK BIT(2)
+#define DA9150_TBAT_HIGH_CURR_SHIFT 3
+#define DA9150_TBAT_HIGH_CURR_MASK BIT(3)
+
+/* DA9150_TBAT_RES_A = 0x344 */
+#define DA9150_TBAT_RES_H_SHIFT 0
+#define DA9150_TBAT_RES_H_MASK (0xff << 0)
+
+/* DA9150_TBAT_RES_B = 0x345 */
+#define DA9150_TBAT_RES_DIS_SHIFT 0
+#define DA9150_TBAT_RES_DIS_MASK BIT(0)
+#define DA9150_TBAT_RES_L_SHIFT 6
+#define DA9150_TBAT_RES_L_MASK (0x03 << 6)
+
+#endif /* __DA9150_REGISTERS_H */
diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h
new file mode 100644
index 000000000..8e1cdbef3
--- /dev/null
+++ b/include/linux/mfd/davinci_voicecodec.h
@@ -0,0 +1,121 @@
+/*
+ * DaVinci Voice Codec Core Interface for TI platforms
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc
+ *
+ * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_MFD_DAVINCI_VOICECODEC_H_
+#define __LINUX_MFD_DAVINCI_VOICECODEC_H_
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/platform_data/edma.h>
+
+#include <mach/hardware.h>
+
+struct regmap;
+
+/*
+ * Register values.
+ */
+#define DAVINCI_VC_PID 0x00
+#define DAVINCI_VC_CTRL 0x04
+#define DAVINCI_VC_INTEN 0x08
+#define DAVINCI_VC_INTSTATUS 0x0c
+#define DAVINCI_VC_INTCLR 0x10
+#define DAVINCI_VC_EMUL_CTRL 0x14
+#define DAVINCI_VC_RFIFO 0x20
+#define DAVINCI_VC_WFIFO 0x24
+#define DAVINCI_VC_FIFOSTAT 0x28
+#define DAVINCI_VC_TST_CTRL 0x2C
+#define DAVINCI_VC_REG05 0x94
+#define DAVINCI_VC_REG09 0xA4
+#define DAVINCI_VC_REG12 0xB0
+
+/* DAVINCI_VC_CTRL bit fields */
+#define DAVINCI_VC_CTRL_MASK 0x5500
+#define DAVINCI_VC_CTRL_RSTADC BIT(0)
+#define DAVINCI_VC_CTRL_RSTDAC BIT(1)
+#define DAVINCI_VC_CTRL_RD_BITS_8 BIT(4)
+#define DAVINCI_VC_CTRL_RD_UNSIGNED BIT(5)
+#define DAVINCI_VC_CTRL_WD_BITS_8 BIT(6)
+#define DAVINCI_VC_CTRL_WD_UNSIGNED BIT(7)
+#define DAVINCI_VC_CTRL_RFIFOEN BIT(8)
+#define DAVINCI_VC_CTRL_RFIFOCL BIT(9)
+#define DAVINCI_VC_CTRL_RFIFOMD_WORD_1 BIT(10)
+#define DAVINCI_VC_CTRL_WFIFOEN BIT(12)
+#define DAVINCI_VC_CTRL_WFIFOCL BIT(13)
+#define DAVINCI_VC_CTRL_WFIFOMD_WORD_1 BIT(14)
+
+/* DAVINCI_VC_INT bit fields */
+#define DAVINCI_VC_INT_MASK 0x3F
+#define DAVINCI_VC_INT_RDRDY_MASK BIT(0)
+#define DAVINCI_VC_INT_RERROVF_MASK BIT(1)
+#define DAVINCI_VC_INT_RERRUDR_MASK BIT(2)
+#define DAVINCI_VC_INT_WDREQ_MASK BIT(3)
+#define DAVINCI_VC_INT_WERROVF_MASKBIT BIT(4)
+#define DAVINCI_VC_INT_WERRUDR_MASK BIT(5)
+
+/* DAVINCI_VC_REG05 bit fields */
+#define DAVINCI_VC_REG05_PGA_GAIN 0x07
+
+/* DAVINCI_VC_REG09 bit fields */
+#define DAVINCI_VC_REG09_MUTE 0x40
+#define DAVINCI_VC_REG09_DIG_ATTEN 0x3F
+
+/* DAVINCI_VC_REG12 bit fields */
+#define DAVINCI_VC_REG12_POWER_ALL_ON 0xFD
+#define DAVINCI_VC_REG12_POWER_ALL_OFF 0x00
+
+#define DAVINCI_VC_CELLS 2
+
+enum davinci_vc_cells {
+ DAVINCI_VC_VCIF_CELL,
+ DAVINCI_VC_CQ93VC_CELL,
+};
+
+struct davinci_vcif {
+ struct platform_device *pdev;
+ u32 dma_tx_channel;
+ u32 dma_rx_channel;
+ dma_addr_t dma_tx_addr;
+ dma_addr_t dma_rx_addr;
+};
+
+struct davinci_vc;
+
+struct davinci_vc {
+ /* Device data */
+ struct device *dev;
+ struct platform_device *pdev;
+ struct clk *clk;
+
+ /* Memory resources */
+ void __iomem *base;
+ struct regmap *regmap;
+
+ /* MFD cells */
+ struct mfd_cell cells[DAVINCI_VC_CELLS];
+
+ /* Client devices */
+ struct davinci_vcif davinci_vcif;
+};
+
+#endif
diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h
new file mode 100644
index 000000000..0bd69446b
--- /dev/null
+++ b/include/linux/mfd/db8500-prcmu.h
@@ -0,0 +1,772 @@
+/*
+ * Copyright (C) STMicroelectronics 2009
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
+ *
+ * PRCMU f/w APIs
+ */
+#ifndef __MFD_DB8500_PRCMU_H
+#define __MFD_DB8500_PRCMU_H
+
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+
+/*
+ * Registers
+ */
+#define DB8500_PRCM_LINE_VALUE 0x170
+#define DB8500_PRCM_LINE_VALUE_HSI_CAWAKE0 BIT(3)
+
+#define DB8500_PRCM_DSI_SW_RESET 0x324
+#define DB8500_PRCM_DSI_SW_RESET_DSI0_SW_RESETN BIT(0)
+#define DB8500_PRCM_DSI_SW_RESET_DSI1_SW_RESETN BIT(1)
+#define DB8500_PRCM_DSI_SW_RESET_DSI2_SW_RESETN BIT(2)
+
+/* This portion previously known as <mach/prcmu-fw-defs_v1.h> */
+
+/**
+ * enum state - ON/OFF state definition
+ * @OFF: State is ON
+ * @ON: State is OFF
+ *
+ */
+enum state {
+ OFF = 0x0,
+ ON = 0x1,
+};
+
+/**
+ * enum ret_state - general purpose On/Off/Retention states
+ *
+ */
+enum ret_state {
+ OFFST = 0,
+ ONST = 1,
+ RETST = 2
+};
+
+/**
+ * enum clk_arm - ARM Cortex A9 clock schemes
+ * @A9_OFF:
+ * @A9_BOOT:
+ * @A9_OPPT1:
+ * @A9_OPPT2:
+ * @A9_EXTCLK:
+ */
+enum clk_arm {
+ A9_OFF,
+ A9_BOOT,
+ A9_OPPT1,
+ A9_OPPT2,
+ A9_EXTCLK
+};
+
+/**
+ * enum clk_gen - GEN#0/GEN#1 clock schemes
+ * @GEN_OFF:
+ * @GEN_BOOT:
+ * @GEN_OPPT1:
+ */
+enum clk_gen {
+ GEN_OFF,
+ GEN_BOOT,
+ GEN_OPPT1,
+};
+
+/* some information between arm and xp70 */
+
+/**
+ * enum romcode_write - Romcode message written by A9 AND read by XP70
+ * @RDY_2_DS: Value set when ApDeepSleep state can be executed by XP70
+ * @RDY_2_XP70_RST: Value set when 0x0F has been successfully polled by the
+ * romcode. The xp70 will go into self-reset
+ */
+enum romcode_write {
+ RDY_2_DS = 0x09,
+ RDY_2_XP70_RST = 0x10
+};
+
+/**
+ * enum romcode_read - Romcode message written by XP70 and read by A9
+ * @INIT: Init value when romcode field is not used
+ * @FS_2_DS: Value set when power state is going from ApExecute to
+ * ApDeepSleep
+ * @END_DS: Value set when ApDeepSleep power state is reached coming from
+ * ApExecute state
+ * @DS_TO_FS: Value set when power state is going from ApDeepSleep to
+ * ApExecute
+ * @END_FS: Value set when ApExecute power state is reached coming from
+ * ApDeepSleep state
+ * @SWR: Value set when power state is going to ApReset
+ * @END_SWR: Value set when the xp70 finished executing ApReset actions and
+ * waits for romcode acknowledgment to go to self-reset
+ */
+enum romcode_read {
+ INIT = 0x00,
+ FS_2_DS = 0x0A,
+ END_DS = 0x0B,
+ DS_TO_FS = 0x0C,
+ END_FS = 0x0D,
+ SWR = 0x0E,
+ END_SWR = 0x0F
+};
+
+/**
+ * enum ap_pwrst - current power states defined in PRCMU firmware
+ * @NO_PWRST: Current power state init
+ * @AP_BOOT: Current power state is apBoot
+ * @AP_EXECUTE: Current power state is apExecute
+ * @AP_DEEP_SLEEP: Current power state is apDeepSleep
+ * @AP_SLEEP: Current power state is apSleep
+ * @AP_IDLE: Current power state is apIdle
+ * @AP_RESET: Current power state is apReset
+ */
+enum ap_pwrst {
+ NO_PWRST = 0x00,
+ AP_BOOT = 0x01,
+ AP_EXECUTE = 0x02,
+ AP_DEEP_SLEEP = 0x03,
+ AP_SLEEP = 0x04,
+ AP_IDLE = 0x05,
+ AP_RESET = 0x06
+};
+
+/**
+ * enum ap_pwrst_trans - Transition states defined in PRCMU firmware
+ * @NO_TRANSITION: No power state transition
+ * @APEXECUTE_TO_APSLEEP: Power state transition from ApExecute to ApSleep
+ * @APIDLE_TO_APSLEEP: Power state transition from ApIdle to ApSleep
+ * @APBOOT_TO_APEXECUTE: Power state transition from ApBoot to ApExecute
+ * @APEXECUTE_TO_APDEEPSLEEP: Power state transition from ApExecute to
+ * ApDeepSleep
+ * @APEXECUTE_TO_APIDLE: Power state transition from ApExecute to ApIdle
+ */
+enum ap_pwrst_trans {
+ PRCMU_AP_NO_CHANGE = 0x00,
+ APEXECUTE_TO_APSLEEP = 0x01,
+ APIDLE_TO_APSLEEP = 0x02, /* To be removed */
+ PRCMU_AP_SLEEP = 0x01,
+ APBOOT_TO_APEXECUTE = 0x03,
+ APEXECUTE_TO_APDEEPSLEEP = 0x04, /* To be removed */
+ PRCMU_AP_DEEP_SLEEP = 0x04,
+ APEXECUTE_TO_APIDLE = 0x05, /* To be removed */
+ PRCMU_AP_IDLE = 0x05,
+ PRCMU_AP_DEEP_IDLE = 0x07,
+};
+
+/**
+ * enum hw_acc_state - State definition for hardware accelerator
+ * @HW_NO_CHANGE: The hardware accelerator state must remain unchanged
+ * @HW_OFF: The hardware accelerator must be switched off
+ * @HW_OFF_RAMRET: The hardware accelerator must be switched off with its
+ * internal RAM in retention
+ * @HW_ON: The hwa hardware accelerator hwa must be switched on
+ *
+ * NOTE! Deprecated, to be removed when all users switched over to use the
+ * regulator API.
+ */
+enum hw_acc_state {
+ HW_NO_CHANGE = 0x00,
+ HW_OFF = 0x01,
+ HW_OFF_RAMRET = 0x02,
+ HW_ON = 0x04
+};
+
+/**
+ * enum mbox_2_arm_stat - Status messages definition for mbox_arm
+ * @BOOT_TO_EXECUTEOK: The apBoot to apExecute state transition has been
+ * completed
+ * @DEEPSLEEPOK: The apExecute to apDeepSleep state transition has been
+ * completed
+ * @SLEEPOK: The apExecute to apSleep state transition has been completed
+ * @IDLEOK: The apExecute to apIdle state transition has been completed
+ * @SOFTRESETOK: The A9 watchdog/ SoftReset state has been completed
+ * @SOFTRESETGO : The A9 watchdog/SoftReset state is on going
+ * @BOOT_TO_EXECUTE: The apBoot to apExecute state transition is on going
+ * @EXECUTE_TO_DEEPSLEEP: The apExecute to apDeepSleep state transition is on
+ * going
+ * @DEEPSLEEP_TO_EXECUTE: The apDeepSleep to apExecute state transition is on
+ * going
+ * @DEEPSLEEP_TO_EXECUTEOK: The apDeepSleep to apExecute state transition has
+ * been completed
+ * @EXECUTE_TO_SLEEP: The apExecute to apSleep state transition is on going
+ * @SLEEP_TO_EXECUTE: The apSleep to apExecute state transition is on going
+ * @SLEEP_TO_EXECUTEOK: The apSleep to apExecute state transition has been
+ * completed
+ * @EXECUTE_TO_IDLE: The apExecute to apIdle state transition is on going
+ * @IDLE_TO_EXECUTE: The apIdle to apExecute state transition is on going
+ * @IDLE_TO_EXECUTEOK: The apIdle to apExecute state transition has been
+ * completed
+ * @INIT_STATUS: Status init
+ */
+enum ap_pwrsttr_status {
+ BOOT_TO_EXECUTEOK = 0xFF,
+ DEEPSLEEPOK = 0xFE,
+ SLEEPOK = 0xFD,
+ IDLEOK = 0xFC,
+ SOFTRESETOK = 0xFB,
+ SOFTRESETGO = 0xFA,
+ BOOT_TO_EXECUTE = 0xF9,
+ EXECUTE_TO_DEEPSLEEP = 0xF8,
+ DEEPSLEEP_TO_EXECUTE = 0xF7,
+ DEEPSLEEP_TO_EXECUTEOK = 0xF6,
+ EXECUTE_TO_SLEEP = 0xF5,
+ SLEEP_TO_EXECUTE = 0xF4,
+ SLEEP_TO_EXECUTEOK = 0xF3,
+ EXECUTE_TO_IDLE = 0xF2,
+ IDLE_TO_EXECUTE = 0xF1,
+ IDLE_TO_EXECUTEOK = 0xF0,
+ RDYTODS_RETURNTOEXE = 0xEF,
+ NORDYTODS_RETURNTOEXE = 0xEE,
+ EXETOSLEEP_RETURNTOEXE = 0xED,
+ EXETOIDLE_RETURNTOEXE = 0xEC,
+ INIT_STATUS = 0xEB,
+
+ /*error messages */
+ INITERROR = 0x00,
+ PLLARMLOCKP_ER = 0x01,
+ PLLDDRLOCKP_ER = 0x02,
+ PLLSOCLOCKP_ER = 0x03,
+ PLLSOCK1LOCKP_ER = 0x04,
+ ARMWFI_ER = 0x05,
+ SYSCLKOK_ER = 0x06,
+ I2C_NACK_DATA_ER = 0x07,
+ BOOT_ER = 0x08,
+ I2C_STATUS_ALWAYS_1 = 0x0A,
+ I2C_NACK_REG_ADDR_ER = 0x0B,
+ I2C_NACK_DATA0123_ER = 0x1B,
+ I2C_NACK_ADDR_ER = 0x1F,
+ CURAPPWRSTISNOT_BOOT = 0x20,
+ CURAPPWRSTISNOT_EXECUTE = 0x21,
+ CURAPPWRSTISNOT_SLEEPMODE = 0x22,
+ CURAPPWRSTISNOT_CORRECTFORIT10 = 0x23,
+ FIFO4500WUISNOT_WUPEVENT = 0x24,
+ PLL32KLOCKP_ER = 0x29,
+ DDRDEEPSLEEPOK_ER = 0x2A,
+ ROMCODEREADY_ER = 0x50,
+ WUPBEFOREDS = 0x51,
+ DDRCONFIG_ER = 0x52,
+ WUPBEFORESLEEP = 0x53,
+ WUPBEFOREIDLE = 0x54
+}; /* earlier called as mbox_2_arm_stat */
+
+/**
+ * enum dvfs_stat - DVFS status messages definition
+ * @DVFS_GO: A state transition DVFS is on going
+ * @DVFS_ARM100OPPOK: The state transition DVFS has been completed for 100OPP
+ * @DVFS_ARM50OPPOK: The state transition DVFS has been completed for 50OPP
+ * @DVFS_ARMEXTCLKOK: The state transition DVFS has been completed for EXTCLK
+ * @DVFS_NOCHGTCLKOK: The state transition DVFS has been completed for
+ * NOCHGCLK
+ * @DVFS_INITSTATUS: Value init
+ */
+enum dvfs_stat {
+ DVFS_GO = 0xFF,
+ DVFS_ARM100OPPOK = 0xFE,
+ DVFS_ARM50OPPOK = 0xFD,
+ DVFS_ARMEXTCLKOK = 0xFC,
+ DVFS_NOCHGTCLKOK = 0xFB,
+ DVFS_INITSTATUS = 0x00
+};
+
+/**
+ * enum sva_mmdsp_stat - SVA MMDSP status messages
+ * @SVA_MMDSP_GO: SVAMMDSP interrupt has happened
+ * @SVA_MMDSP_INIT: Status init
+ */
+enum sva_mmdsp_stat {
+ SVA_MMDSP_GO = 0xFF,
+ SVA_MMDSP_INIT = 0x00
+};
+
+/**
+ * enum sia_mmdsp_stat - SIA MMDSP status messages
+ * @SIA_MMDSP_GO: SIAMMDSP interrupt has happened
+ * @SIA_MMDSP_INIT: Status init
+ */
+enum sia_mmdsp_stat {
+ SIA_MMDSP_GO = 0xFF,
+ SIA_MMDSP_INIT = 0x00
+};
+
+/**
+ * enum mbox_to_arm_err - Error messages definition
+ * @INIT_ERR: Init value
+ * @PLLARMLOCKP_ERR: PLLARM has not been correctly locked in given time
+ * @PLLDDRLOCKP_ERR: PLLDDR has not been correctly locked in the given time
+ * @PLLSOC0LOCKP_ERR: PLLSOC0 has not been correctly locked in the given time
+ * @PLLSOC1LOCKP_ERR: PLLSOC1 has not been correctly locked in the given time
+ * @ARMWFI_ERR: The ARM WFI has not been correctly executed in the given time
+ * @SYSCLKOK_ERR: The SYSCLK is not available in the given time
+ * @BOOT_ERR: Romcode has not validated the XP70 self reset in the given time
+ * @ROMCODESAVECONTEXT: The Romcode didn.t correctly save it secure context
+ * @VARMHIGHSPEEDVALTO_ERR: The ARM high speed supply value transfered
+ * through I2C has not been correctly executed in the given time
+ * @VARMHIGHSPEEDACCESS_ERR: The command value of VarmHighSpeedVal transfered
+ * through I2C has not been correctly executed in the given time
+ * @VARMLOWSPEEDVALTO_ERR:The ARM low speed supply value transfered through
+ * I2C has not been correctly executed in the given time
+ * @VARMLOWSPEEDACCESS_ERR: The command value of VarmLowSpeedVal transfered
+ * through I2C has not been correctly executed in the given time
+ * @VARMRETENTIONVALTO_ERR: The ARM retention supply value transfered through
+ * I2C has not been correctly executed in the given time
+ * @VARMRETENTIONACCESS_ERR: The command value of VarmRetentionVal transfered
+ * through I2C has not been correctly executed in the given time
+ * @VAPEHIGHSPEEDVALTO_ERR: The APE highspeed supply value transfered through
+ * I2C has not been correctly executed in the given time
+ * @VSAFEHPVALTO_ERR: The SAFE high power supply value transfered through I2C
+ * has not been correctly executed in the given time
+ * @VMODSEL1VALTO_ERR: The MODEM sel1 supply value transfered through I2C has
+ * not been correctly executed in the given time
+ * @VMODSEL2VALTO_ERR: The MODEM sel2 supply value transfered through I2C has
+ * not been correctly executed in the given time
+ * @VARMOFFACCESS_ERR: The command value of Varm ON/OFF transfered through
+ * I2C has not been correctly executed in the given time
+ * @VAPEOFFACCESS_ERR: The command value of Vape ON/OFF transfered through
+ * I2C has not been correctly executed in the given time
+ * @VARMRETACCES_ERR: The command value of Varm retention ON/OFF transfered
+ * through I2C has not been correctly executed in the given time
+ * @CURAPPWRSTISNOTBOOT:Generated when Arm want to do power state transition
+ * ApBoot to ApExecute but the power current state is not Apboot
+ * @CURAPPWRSTISNOTEXECUTE: Generated when Arm want to do power state
+ * transition from ApExecute to others power state but the
+ * power current state is not ApExecute
+ * @CURAPPWRSTISNOTSLEEPMODE: Generated when wake up events are transmitted
+ * but the power current state is not ApDeepSleep/ApSleep/ApIdle
+ * @CURAPPWRSTISNOTCORRECTDBG: Generated when wake up events are transmitted
+ * but the power current state is not correct
+ * @ARMREGU1VALTO_ERR:The ArmRegu1 value transferred through I2C has not
+ * been correctly executed in the given time
+ * @ARMREGU2VALTO_ERR: The ArmRegu2 value transferred through I2C has not
+ * been correctly executed in the given time
+ * @VAPEREGUVALTO_ERR: The VApeRegu value transfered through I2C has not
+ * been correctly executed in the given time
+ * @VSMPS3REGUVALTO_ERR: The VSmps3Regu value transfered through I2C has not
+ * been correctly executed in the given time
+ * @VMODREGUVALTO_ERR: The VModemRegu value transfered through I2C has not
+ * been correctly executed in the given time
+ */
+enum mbox_to_arm_err {
+ INIT_ERR = 0x00,
+ PLLARMLOCKP_ERR = 0x01,
+ PLLDDRLOCKP_ERR = 0x02,
+ PLLSOC0LOCKP_ERR = 0x03,
+ PLLSOC1LOCKP_ERR = 0x04,
+ ARMWFI_ERR = 0x05,
+ SYSCLKOK_ERR = 0x06,
+ BOOT_ERR = 0x07,
+ ROMCODESAVECONTEXT = 0x08,
+ VARMHIGHSPEEDVALTO_ERR = 0x10,
+ VARMHIGHSPEEDACCESS_ERR = 0x11,
+ VARMLOWSPEEDVALTO_ERR = 0x12,
+ VARMLOWSPEEDACCESS_ERR = 0x13,
+ VARMRETENTIONVALTO_ERR = 0x14,
+ VARMRETENTIONACCESS_ERR = 0x15,
+ VAPEHIGHSPEEDVALTO_ERR = 0x16,
+ VSAFEHPVALTO_ERR = 0x17,
+ VMODSEL1VALTO_ERR = 0x18,
+ VMODSEL2VALTO_ERR = 0x19,
+ VARMOFFACCESS_ERR = 0x1A,
+ VAPEOFFACCESS_ERR = 0x1B,
+ VARMRETACCES_ERR = 0x1C,
+ CURAPPWRSTISNOTBOOT = 0x20,
+ CURAPPWRSTISNOTEXECUTE = 0x21,
+ CURAPPWRSTISNOTSLEEPMODE = 0x22,
+ CURAPPWRSTISNOTCORRECTDBG = 0x23,
+ ARMREGU1VALTO_ERR = 0x24,
+ ARMREGU2VALTO_ERR = 0x25,
+ VAPEREGUVALTO_ERR = 0x26,
+ VSMPS3REGUVALTO_ERR = 0x27,
+ VMODREGUVALTO_ERR = 0x28
+};
+
+enum hw_acc {
+ SVAMMDSP = 0,
+ SVAPIPE = 1,
+ SIAMMDSP = 2,
+ SIAPIPE = 3,
+ SGA = 4,
+ B2R2MCDE = 5,
+ ESRAM12 = 6,
+ ESRAM34 = 7,
+};
+
+enum cs_pwrmgt {
+ PWRDNCS0 = 0,
+ WKUPCS0 = 1,
+ PWRDNCS1 = 2,
+ WKUPCS1 = 3
+};
+
+/* Defs related to autonomous power management */
+
+/**
+ * enum sia_sva_pwr_policy - Power policy
+ * @NO_CHGT: No change
+ * @DSPOFF_HWPOFF:
+ * @DSPOFFRAMRET_HWPOFF:
+ * @DSPCLKOFF_HWPOFF:
+ * @DSPCLKOFF_HWPCLKOFF:
+ *
+ */
+enum sia_sva_pwr_policy {
+ NO_CHGT = 0x0,
+ DSPOFF_HWPOFF = 0x1,
+ DSPOFFRAMRET_HWPOFF = 0x2,
+ DSPCLKOFF_HWPOFF = 0x3,
+ DSPCLKOFF_HWPCLKOFF = 0x4,
+};
+
+/**
+ * enum auto_enable - Auto Power enable
+ * @AUTO_OFF:
+ * @AUTO_ON:
+ *
+ */
+enum auto_enable {
+ AUTO_OFF = 0x0,
+ AUTO_ON = 0x1,
+};
+
+/* End of file previously known as prcmu-fw-defs_v1.h */
+
+/**
+ * enum prcmu_power_status - results from set_power_state
+ * @PRCMU_SLEEP_OK: Sleep went ok
+ * @PRCMU_DEEP_SLEEP_OK: DeepSleep went ok
+ * @PRCMU_IDLE_OK: Idle went ok
+ * @PRCMU_DEEPIDLE_OK: DeepIdle went ok
+ * @PRCMU_PRCMU2ARMPENDINGIT_ER: Pending interrupt detected
+ * @PRCMU_ARMPENDINGIT_ER: Pending interrupt detected
+ *
+ */
+enum prcmu_power_status {
+ PRCMU_SLEEP_OK = 0xf3,
+ PRCMU_DEEP_SLEEP_OK = 0xf6,
+ PRCMU_IDLE_OK = 0xf0,
+ PRCMU_DEEPIDLE_OK = 0xe3,
+ PRCMU_PRCMU2ARMPENDINGIT_ER = 0x91,
+ PRCMU_ARMPENDINGIT_ER = 0x93,
+};
+
+/*
+ * Definitions for autonomous power management configuration.
+ */
+
+#define PRCMU_AUTO_PM_OFF 0
+#define PRCMU_AUTO_PM_ON 1
+
+#define PRCMU_AUTO_PM_POWER_ON_HSEM BIT(0)
+#define PRCMU_AUTO_PM_POWER_ON_ABB_FIFO_IT BIT(1)
+
+enum prcmu_auto_pm_policy {
+ PRCMU_AUTO_PM_POLICY_NO_CHANGE,
+ PRCMU_AUTO_PM_POLICY_DSP_OFF_HWP_OFF,
+ PRCMU_AUTO_PM_POLICY_DSP_OFF_RAMRET_HWP_OFF,
+ PRCMU_AUTO_PM_POLICY_DSP_CLK_OFF_HWP_OFF,
+ PRCMU_AUTO_PM_POLICY_DSP_CLK_OFF_HWP_CLK_OFF,
+};
+
+/**
+ * struct prcmu_auto_pm_config - Autonomous power management configuration.
+ * @sia_auto_pm_enable: SIA autonomous pm enable. (PRCMU_AUTO_PM_{OFF,ON})
+ * @sia_power_on: SIA power ON enable. (PRCMU_AUTO_PM_POWER_ON_* bitmask)
+ * @sia_policy: SIA power policy. (enum prcmu_auto_pm_policy)
+ * @sva_auto_pm_enable: SVA autonomous pm enable. (PRCMU_AUTO_PM_{OFF,ON})
+ * @sva_power_on: SVA power ON enable. (PRCMU_AUTO_PM_POWER_ON_* bitmask)
+ * @sva_policy: SVA power policy. (enum prcmu_auto_pm_policy)
+ */
+struct prcmu_auto_pm_config {
+ u8 sia_auto_pm_enable;
+ u8 sia_power_on;
+ u8 sia_policy;
+ u8 sva_auto_pm_enable;
+ u8 sva_power_on;
+ u8 sva_policy;
+};
+
+#ifdef CONFIG_MFD_DB8500_PRCMU
+
+void db8500_prcmu_early_init(u32 phy_base, u32 size);
+int prcmu_set_rc_a2p(enum romcode_write);
+enum romcode_read prcmu_get_rc_p2a(void);
+enum ap_pwrst prcmu_get_xp70_current_state(void);
+bool prcmu_has_arm_maxopp(void);
+struct prcmu_fw_version *prcmu_get_fw_version(void);
+int prcmu_release_usb_wakeup_state(void);
+void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep,
+ struct prcmu_auto_pm_config *idle);
+bool prcmu_is_auto_pm_enabled(void);
+
+int prcmu_config_clkout(u8 clkout, u8 source, u8 div);
+int prcmu_set_clock_divider(u8 clock, u8 divider);
+int db8500_prcmu_config_hotdog(u8 threshold);
+int db8500_prcmu_config_hotmon(u8 low, u8 high);
+int db8500_prcmu_start_temp_sense(u16 cycles32k);
+int db8500_prcmu_stop_temp_sense(void);
+int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
+int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size);
+
+int prcmu_ac_wake_req(void);
+void prcmu_ac_sleep_req(void);
+void db8500_prcmu_modem_reset(void);
+
+int db8500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off);
+int db8500_prcmu_enable_a9wdog(u8 id);
+int db8500_prcmu_disable_a9wdog(u8 id);
+int db8500_prcmu_kick_a9wdog(u8 id);
+int db8500_prcmu_load_a9wdog(u8 id, u32 val);
+
+void db8500_prcmu_system_reset(u16 reset_code);
+int db8500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll);
+u8 db8500_prcmu_get_power_state_result(void);
+void db8500_prcmu_enable_wakeups(u32 wakeups);
+int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state);
+int db8500_prcmu_request_clock(u8 clock, bool enable);
+int db8500_prcmu_set_display_clocks(void);
+int db8500_prcmu_disable_dsipll(void);
+int db8500_prcmu_enable_dsipll(void);
+void db8500_prcmu_config_abb_event_readout(u32 abb_events);
+void db8500_prcmu_get_abb_event_buffer(void __iomem **buf);
+int db8500_prcmu_config_esram0_deep_sleep(u8 state);
+u16 db8500_prcmu_get_reset_code(void);
+bool db8500_prcmu_is_ac_wake_requested(void);
+int db8500_prcmu_set_arm_opp(u8 opp);
+int db8500_prcmu_get_arm_opp(void);
+int db8500_prcmu_set_ape_opp(u8 opp);
+int db8500_prcmu_get_ape_opp(void);
+int db8500_prcmu_request_ape_opp_100_voltage(bool enable);
+int db8500_prcmu_set_ddr_opp(u8 opp);
+int db8500_prcmu_get_ddr_opp(void);
+
+u32 db8500_prcmu_read(unsigned int reg);
+void db8500_prcmu_write(unsigned int reg, u32 value);
+void db8500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value);
+
+#else /* !CONFIG_MFD_DB8500_PRCMU */
+
+static inline void db8500_prcmu_early_init(u32 phy_base, u32 size) {}
+
+static inline int prcmu_set_rc_a2p(enum romcode_write code)
+{
+ return 0;
+}
+
+static inline enum romcode_read prcmu_get_rc_p2a(void)
+{
+ return INIT;
+}
+
+static inline enum ap_pwrst prcmu_get_xp70_current_state(void)
+{
+ return AP_EXECUTE;
+}
+
+static inline bool prcmu_has_arm_maxopp(void)
+{
+ return false;
+}
+
+static inline struct prcmu_fw_version *prcmu_get_fw_version(void)
+{
+ return NULL;
+}
+
+static inline int db8500_prcmu_set_ape_opp(u8 opp)
+{
+ return 0;
+}
+
+static inline int db8500_prcmu_get_ape_opp(void)
+{
+ return APE_100_OPP;
+}
+
+static inline int db8500_prcmu_request_ape_opp_100_voltage(bool enable)
+{
+ return 0;
+}
+
+static inline int prcmu_release_usb_wakeup_state(void)
+{
+ return 0;
+}
+
+static inline int db8500_prcmu_set_ddr_opp(u8 opp)
+{
+ return 0;
+}
+
+static inline int db8500_prcmu_get_ddr_opp(void)
+{
+ return DDR_100_OPP;
+}
+
+static inline void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep,
+ struct prcmu_auto_pm_config *idle)
+{
+}
+
+static inline bool prcmu_is_auto_pm_enabled(void)
+{
+ return false;
+}
+
+static inline int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
+{
+ return 0;
+}
+
+static inline int prcmu_set_clock_divider(u8 clock, u8 divider)
+{
+ return 0;
+}
+
+static inline int db8500_prcmu_config_hotdog(u8 threshold)
+{
+ return 0;
+}
+
+static inline int db8500_prcmu_config_hotmon(u8 low, u8 high)
+{
+ return 0;
+}
+
+static inline int db8500_prcmu_start_temp_sense(u16 cycles32k)
+{
+ return 0;
+}
+
+static inline int db8500_prcmu_stop_temp_sense(void)
+{
+ return 0;
+}
+
+static inline int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
+{
+ return -ENOSYS;
+}
+
+static inline int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
+{
+ return -ENOSYS;
+}
+
+static inline int prcmu_ac_wake_req(void)
+{
+ return 0;
+}
+
+static inline void prcmu_ac_sleep_req(void) {}
+
+static inline void db8500_prcmu_modem_reset(void) {}
+
+static inline void db8500_prcmu_system_reset(u16 reset_code) {}
+
+static inline int db8500_prcmu_set_power_state(u8 state, bool keep_ulp_clk,
+ bool keep_ap_pll)
+{
+ return 0;
+}
+
+static inline u8 db8500_prcmu_get_power_state_result(void)
+{
+ return 0;
+}
+
+static inline void db8500_prcmu_enable_wakeups(u32 wakeups) {}
+
+static inline int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state)
+{
+ return 0;
+}
+
+static inline int db8500_prcmu_request_clock(u8 clock, bool enable)
+{
+ return 0;
+}
+
+static inline int db8500_prcmu_set_display_clocks(void)
+{
+ return 0;
+}
+
+static inline int db8500_prcmu_disable_dsipll(void)
+{
+ return 0;
+}
+
+static inline int db8500_prcmu_enable_dsipll(void)
+{
+ return 0;
+}
+
+static inline int db8500_prcmu_config_esram0_deep_sleep(u8 state)
+{
+ return 0;
+}
+
+static inline void db8500_prcmu_config_abb_event_readout(u32 abb_events) {}
+
+static inline void db8500_prcmu_get_abb_event_buffer(void __iomem **buf) {}
+
+static inline u16 db8500_prcmu_get_reset_code(void)
+{
+ return 0;
+}
+
+static inline int db8500_prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
+{
+ return 0;
+}
+
+static inline int db8500_prcmu_enable_a9wdog(u8 id)
+{
+ return 0;
+}
+
+static inline int db8500_prcmu_disable_a9wdog(u8 id)
+{
+ return 0;
+}
+
+static inline int db8500_prcmu_kick_a9wdog(u8 id)
+{
+ return 0;
+}
+
+static inline int db8500_prcmu_load_a9wdog(u8 id, u32 val)
+{
+ return 0;
+}
+
+static inline bool db8500_prcmu_is_ac_wake_requested(void)
+{
+ return 0;
+}
+
+static inline int db8500_prcmu_set_arm_opp(u8 opp)
+{
+ return 0;
+}
+
+static inline int db8500_prcmu_get_arm_opp(void)
+{
+ return 0;
+}
+
+static inline u32 db8500_prcmu_read(unsigned int reg)
+{
+ return 0;
+}
+
+static inline void db8500_prcmu_write(unsigned int reg, u32 value) {}
+
+static inline void db8500_prcmu_write_masked(unsigned int reg, u32 mask,
+ u32 value) {}
+
+#endif /* !CONFIG_MFD_DB8500_PRCMU */
+
+#endif /* __MFD_DB8500_PRCMU_H */
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
new file mode 100644
index 000000000..bf5109d38
--- /dev/null
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -0,0 +1,666 @@
+/*
+ * Copyright (C) ST Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * STE Ux500 PRCMU API
+ */
+#ifndef __MACH_PRCMU_H
+#define __MACH_PRCMU_H
+
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+#include <linux/err.h>
+
+#include <dt-bindings/mfd/dbx500-prcmu.h> /* For clock identifiers */
+
+/* Offset for the firmware version within the TCPM */
+#define DB8500_PRCMU_FW_VERSION_OFFSET 0xA4
+#define DBX540_PRCMU_FW_VERSION_OFFSET 0xA8
+
+/* PRCMU Wakeup defines */
+enum prcmu_wakeup_index {
+ PRCMU_WAKEUP_INDEX_RTC,
+ PRCMU_WAKEUP_INDEX_RTT0,
+ PRCMU_WAKEUP_INDEX_RTT1,
+ PRCMU_WAKEUP_INDEX_HSI0,
+ PRCMU_WAKEUP_INDEX_HSI1,
+ PRCMU_WAKEUP_INDEX_USB,
+ PRCMU_WAKEUP_INDEX_ABB,
+ PRCMU_WAKEUP_INDEX_ABB_FIFO,
+ PRCMU_WAKEUP_INDEX_ARM,
+ PRCMU_WAKEUP_INDEX_CD_IRQ,
+ NUM_PRCMU_WAKEUP_INDICES
+};
+#define PRCMU_WAKEUP(_name) (BIT(PRCMU_WAKEUP_INDEX_##_name))
+
+/* EPOD (power domain) IDs */
+
+/*
+ * DB8500 EPODs
+ * - EPOD_ID_SVAMMDSP: power domain for SVA MMDSP
+ * - EPOD_ID_SVAPIPE: power domain for SVA pipe
+ * - EPOD_ID_SIAMMDSP: power domain for SIA MMDSP
+ * - EPOD_ID_SIAPIPE: power domain for SIA pipe
+ * - EPOD_ID_SGA: power domain for SGA
+ * - EPOD_ID_B2R2_MCDE: power domain for B2R2 and MCDE
+ * - EPOD_ID_ESRAM12: power domain for ESRAM 1 and 2
+ * - EPOD_ID_ESRAM34: power domain for ESRAM 3 and 4
+ * - NUM_EPOD_ID: number of power domains
+ *
+ * TODO: These should be prefixed.
+ */
+#define EPOD_ID_SVAMMDSP 0
+#define EPOD_ID_SVAPIPE 1
+#define EPOD_ID_SIAMMDSP 2
+#define EPOD_ID_SIAPIPE 3
+#define EPOD_ID_SGA 4
+#define EPOD_ID_B2R2_MCDE 5
+#define EPOD_ID_ESRAM12 6
+#define EPOD_ID_ESRAM34 7
+#define NUM_EPOD_ID 8
+
+/*
+ * state definition for EPOD (power domain)
+ * - EPOD_STATE_NO_CHANGE: The EPOD should remain unchanged
+ * - EPOD_STATE_OFF: The EPOD is switched off
+ * - EPOD_STATE_RAMRET: The EPOD is switched off with its internal RAM in
+ * retention
+ * - EPOD_STATE_ON_CLK_OFF: The EPOD is switched on, clock is still off
+ * - EPOD_STATE_ON: Same as above, but with clock enabled
+ */
+#define EPOD_STATE_NO_CHANGE 0x00
+#define EPOD_STATE_OFF 0x01
+#define EPOD_STATE_RAMRET 0x02
+#define EPOD_STATE_ON_CLK_OFF 0x03
+#define EPOD_STATE_ON 0x04
+
+/*
+ * CLKOUT sources
+ */
+#define PRCMU_CLKSRC_CLK38M 0x00
+#define PRCMU_CLKSRC_ACLK 0x01
+#define PRCMU_CLKSRC_SYSCLK 0x02
+#define PRCMU_CLKSRC_LCDCLK 0x03
+#define PRCMU_CLKSRC_SDMMCCLK 0x04
+#define PRCMU_CLKSRC_TVCLK 0x05
+#define PRCMU_CLKSRC_TIMCLK 0x06
+#define PRCMU_CLKSRC_CLK009 0x07
+/* These are only valid for CLKOUT1: */
+#define PRCMU_CLKSRC_SIAMMDSPCLK 0x40
+#define PRCMU_CLKSRC_I2CCLK 0x41
+#define PRCMU_CLKSRC_MSP02CLK 0x42
+#define PRCMU_CLKSRC_ARMPLL_OBSCLK 0x43
+#define PRCMU_CLKSRC_HSIRXCLK 0x44
+#define PRCMU_CLKSRC_HSITXCLK 0x45
+#define PRCMU_CLKSRC_ARMCLKFIX 0x46
+#define PRCMU_CLKSRC_HDMICLK 0x47
+
+/**
+ * enum prcmu_wdog_id - PRCMU watchdog IDs
+ * @PRCMU_WDOG_ALL: use all timers
+ * @PRCMU_WDOG_CPU1: use first CPU timer only
+ * @PRCMU_WDOG_CPU2: use second CPU timer conly
+ */
+enum prcmu_wdog_id {
+ PRCMU_WDOG_ALL = 0x00,
+ PRCMU_WDOG_CPU1 = 0x01,
+ PRCMU_WDOG_CPU2 = 0x02,
+};
+
+/**
+ * enum ape_opp - APE OPP states definition
+ * @APE_OPP_INIT:
+ * @APE_NO_CHANGE: The APE operating point is unchanged
+ * @APE_100_OPP: The new APE operating point is ape100opp
+ * @APE_50_OPP: 50%
+ * @APE_50_PARTLY_25_OPP: 50%, except some clocks at 25%.
+ */
+enum ape_opp {
+ APE_OPP_INIT = 0x00,
+ APE_NO_CHANGE = 0x01,
+ APE_100_OPP = 0x02,
+ APE_50_OPP = 0x03,
+ APE_50_PARTLY_25_OPP = 0xFF,
+};
+
+/**
+ * enum arm_opp - ARM OPP states definition
+ * @ARM_OPP_INIT:
+ * @ARM_NO_CHANGE: The ARM operating point is unchanged
+ * @ARM_100_OPP: The new ARM operating point is arm100opp
+ * @ARM_50_OPP: The new ARM operating point is arm50opp
+ * @ARM_MAX_OPP: Operating point is "max" (more than 100)
+ * @ARM_MAX_FREQ100OPP: Set max opp if available, else 100
+ * @ARM_EXTCLK: The new ARM operating point is armExtClk
+ */
+enum arm_opp {
+ ARM_OPP_INIT = 0x00,
+ ARM_NO_CHANGE = 0x01,
+ ARM_100_OPP = 0x02,
+ ARM_50_OPP = 0x03,
+ ARM_MAX_OPP = 0x04,
+ ARM_MAX_FREQ100OPP = 0x05,
+ ARM_EXTCLK = 0x07
+};
+
+/**
+ * enum ddr_opp - DDR OPP states definition
+ * @DDR_100_OPP: The new DDR operating point is ddr100opp
+ * @DDR_50_OPP: The new DDR operating point is ddr50opp
+ * @DDR_25_OPP: The new DDR operating point is ddr25opp
+ */
+enum ddr_opp {
+ DDR_100_OPP = 0x00,
+ DDR_50_OPP = 0x01,
+ DDR_25_OPP = 0x02,
+};
+
+/*
+ * Definitions for controlling ESRAM0 in deep sleep.
+ */
+#define ESRAM0_DEEP_SLEEP_STATE_OFF 1
+#define ESRAM0_DEEP_SLEEP_STATE_RET 2
+
+/**
+ * enum ddr_pwrst - DDR power states definition
+ * @DDR_PWR_STATE_UNCHANGED: SDRAM and DDR controller state is unchanged
+ * @DDR_PWR_STATE_ON:
+ * @DDR_PWR_STATE_OFFLOWLAT:
+ * @DDR_PWR_STATE_OFFHIGHLAT:
+ */
+enum ddr_pwrst {
+ DDR_PWR_STATE_UNCHANGED = 0x00,
+ DDR_PWR_STATE_ON = 0x01,
+ DDR_PWR_STATE_OFFLOWLAT = 0x02,
+ DDR_PWR_STATE_OFFHIGHLAT = 0x03
+};
+
+#define DB8500_PRCMU_LEGACY_OFFSET 0xDD4
+
+struct prcmu_pdata
+{
+ bool enable_set_ddr_opp;
+ bool enable_ape_opp_100_voltage;
+ struct ab8500_platform_data *ab_platdata;
+ u32 version_offset;
+ u32 legacy_offset;
+ u32 adt_offset;
+};
+
+#define PRCMU_FW_PROJECT_U8500 2
+#define PRCMU_FW_PROJECT_U8400 3
+#define PRCMU_FW_PROJECT_U9500 4 /* Customer specific */
+#define PRCMU_FW_PROJECT_U8500_MBB 5
+#define PRCMU_FW_PROJECT_U8500_C1 6
+#define PRCMU_FW_PROJECT_U8500_C2 7
+#define PRCMU_FW_PROJECT_U8500_C3 8
+#define PRCMU_FW_PROJECT_U8500_C4 9
+#define PRCMU_FW_PROJECT_U9500_MBL 10
+#define PRCMU_FW_PROJECT_U8500_MBL 11 /* Customer specific */
+#define PRCMU_FW_PROJECT_U8500_MBL2 12 /* Customer specific */
+#define PRCMU_FW_PROJECT_U8520 13
+#define PRCMU_FW_PROJECT_U8420 14
+#define PRCMU_FW_PROJECT_A9420 20
+/* [32..63] 9540 and derivatives */
+#define PRCMU_FW_PROJECT_U9540 32
+/* [64..95] 8540 and derivatives */
+#define PRCMU_FW_PROJECT_L8540 64
+/* [96..126] 8580 and derivatives */
+#define PRCMU_FW_PROJECT_L8580 96
+
+#define PRCMU_FW_PROJECT_NAME_LEN 20
+struct prcmu_fw_version {
+ u32 project; /* Notice, project shifted with 8 on ux540 */
+ u8 api_version;
+ u8 func_version;
+ u8 errata;
+ char project_name[PRCMU_FW_PROJECT_NAME_LEN];
+};
+
+#include <linux/mfd/db8500-prcmu.h>
+
+#if defined(CONFIG_UX500_SOC_DB8500)
+
+static inline void prcmu_early_init(u32 phy_base, u32 size)
+{
+ return db8500_prcmu_early_init(phy_base, size);
+}
+
+static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk,
+ bool keep_ap_pll)
+{
+ return db8500_prcmu_set_power_state(state, keep_ulp_clk,
+ keep_ap_pll);
+}
+
+static inline u8 prcmu_get_power_state_result(void)
+{
+ return db8500_prcmu_get_power_state_result();
+}
+
+static inline int prcmu_set_epod(u16 epod_id, u8 epod_state)
+{
+ return db8500_prcmu_set_epod(epod_id, epod_state);
+}
+
+static inline void prcmu_enable_wakeups(u32 wakeups)
+{
+ db8500_prcmu_enable_wakeups(wakeups);
+}
+
+static inline void prcmu_disable_wakeups(void)
+{
+ prcmu_enable_wakeups(0);
+}
+
+static inline void prcmu_config_abb_event_readout(u32 abb_events)
+{
+ db8500_prcmu_config_abb_event_readout(abb_events);
+}
+
+static inline void prcmu_get_abb_event_buffer(void __iomem **buf)
+{
+ db8500_prcmu_get_abb_event_buffer(buf);
+}
+
+int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
+int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size);
+int prcmu_abb_write_masked(u8 slave, u8 reg, u8 *value, u8 *mask, u8 size);
+
+int prcmu_config_clkout(u8 clkout, u8 source, u8 div);
+
+static inline int prcmu_request_clock(u8 clock, bool enable)
+{
+ return db8500_prcmu_request_clock(clock, enable);
+}
+
+unsigned long prcmu_clock_rate(u8 clock);
+long prcmu_round_clock_rate(u8 clock, unsigned long rate);
+int prcmu_set_clock_rate(u8 clock, unsigned long rate);
+
+static inline int prcmu_set_ddr_opp(u8 opp)
+{
+ return db8500_prcmu_set_ddr_opp(opp);
+}
+static inline int prcmu_get_ddr_opp(void)
+{
+ return db8500_prcmu_get_ddr_opp();
+}
+
+static inline int prcmu_set_arm_opp(u8 opp)
+{
+ return db8500_prcmu_set_arm_opp(opp);
+}
+
+static inline int prcmu_get_arm_opp(void)
+{
+ return db8500_prcmu_get_arm_opp();
+}
+
+static inline int prcmu_set_ape_opp(u8 opp)
+{
+ return db8500_prcmu_set_ape_opp(opp);
+}
+
+static inline int prcmu_get_ape_opp(void)
+{
+ return db8500_prcmu_get_ape_opp();
+}
+
+static inline int prcmu_request_ape_opp_100_voltage(bool enable)
+{
+ return db8500_prcmu_request_ape_opp_100_voltage(enable);
+}
+
+static inline void prcmu_system_reset(u16 reset_code)
+{
+ return db8500_prcmu_system_reset(reset_code);
+}
+
+static inline u16 prcmu_get_reset_code(void)
+{
+ return db8500_prcmu_get_reset_code();
+}
+
+int prcmu_ac_wake_req(void);
+void prcmu_ac_sleep_req(void);
+static inline void prcmu_modem_reset(void)
+{
+ return db8500_prcmu_modem_reset();
+}
+
+static inline bool prcmu_is_ac_wake_requested(void)
+{
+ return db8500_prcmu_is_ac_wake_requested();
+}
+
+static inline int prcmu_set_display_clocks(void)
+{
+ return db8500_prcmu_set_display_clocks();
+}
+
+static inline int prcmu_disable_dsipll(void)
+{
+ return db8500_prcmu_disable_dsipll();
+}
+
+static inline int prcmu_enable_dsipll(void)
+{
+ return db8500_prcmu_enable_dsipll();
+}
+
+static inline int prcmu_config_esram0_deep_sleep(u8 state)
+{
+ return db8500_prcmu_config_esram0_deep_sleep(state);
+}
+
+static inline int prcmu_config_hotdog(u8 threshold)
+{
+ return db8500_prcmu_config_hotdog(threshold);
+}
+
+static inline int prcmu_config_hotmon(u8 low, u8 high)
+{
+ return db8500_prcmu_config_hotmon(low, high);
+}
+
+static inline int prcmu_start_temp_sense(u16 cycles32k)
+{
+ return db8500_prcmu_start_temp_sense(cycles32k);
+}
+
+static inline int prcmu_stop_temp_sense(void)
+{
+ return db8500_prcmu_stop_temp_sense();
+}
+
+static inline u32 prcmu_read(unsigned int reg)
+{
+ return db8500_prcmu_read(reg);
+}
+
+static inline void prcmu_write(unsigned int reg, u32 value)
+{
+ db8500_prcmu_write(reg, value);
+}
+
+static inline void prcmu_write_masked(unsigned int reg, u32 mask, u32 value)
+{
+ db8500_prcmu_write_masked(reg, mask, value);
+}
+
+static inline int prcmu_enable_a9wdog(u8 id)
+{
+ return db8500_prcmu_enable_a9wdog(id);
+}
+
+static inline int prcmu_disable_a9wdog(u8 id)
+{
+ return db8500_prcmu_disable_a9wdog(id);
+}
+
+static inline int prcmu_kick_a9wdog(u8 id)
+{
+ return db8500_prcmu_kick_a9wdog(id);
+}
+
+static inline int prcmu_load_a9wdog(u8 id, u32 timeout)
+{
+ return db8500_prcmu_load_a9wdog(id, timeout);
+}
+
+static inline int prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
+{
+ return db8500_prcmu_config_a9wdog(num, sleep_auto_off);
+}
+#else
+
+static inline void prcmu_early_init(u32 phy_base, u32 size) {}
+
+static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk,
+ bool keep_ap_pll)
+{
+ return 0;
+}
+
+static inline int prcmu_set_epod(u16 epod_id, u8 epod_state)
+{
+ return 0;
+}
+
+static inline void prcmu_enable_wakeups(u32 wakeups) {}
+
+static inline void prcmu_disable_wakeups(void) {}
+
+static inline int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
+{
+ return -ENOSYS;
+}
+
+static inline int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
+{
+ return -ENOSYS;
+}
+
+static inline int prcmu_abb_write_masked(u8 slave, u8 reg, u8 *value, u8 *mask,
+ u8 size)
+{
+ return -ENOSYS;
+}
+
+static inline int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
+{
+ return 0;
+}
+
+static inline int prcmu_request_clock(u8 clock, bool enable)
+{
+ return 0;
+}
+
+static inline long prcmu_round_clock_rate(u8 clock, unsigned long rate)
+{
+ return 0;
+}
+
+static inline int prcmu_set_clock_rate(u8 clock, unsigned long rate)
+{
+ return 0;
+}
+
+static inline unsigned long prcmu_clock_rate(u8 clock)
+{
+ return 0;
+}
+
+static inline int prcmu_set_ape_opp(u8 opp)
+{
+ return 0;
+}
+
+static inline int prcmu_get_ape_opp(void)
+{
+ return APE_100_OPP;
+}
+
+static inline int prcmu_request_ape_opp_100_voltage(bool enable)
+{
+ return 0;
+}
+
+static inline int prcmu_set_arm_opp(u8 opp)
+{
+ return 0;
+}
+
+static inline int prcmu_get_arm_opp(void)
+{
+ return ARM_100_OPP;
+}
+
+static inline int prcmu_set_ddr_opp(u8 opp)
+{
+ return 0;
+}
+
+static inline int prcmu_get_ddr_opp(void)
+{
+ return DDR_100_OPP;
+}
+
+static inline void prcmu_system_reset(u16 reset_code) {}
+
+static inline u16 prcmu_get_reset_code(void)
+{
+ return 0;
+}
+
+static inline int prcmu_ac_wake_req(void)
+{
+ return 0;
+}
+
+static inline void prcmu_ac_sleep_req(void) {}
+
+static inline void prcmu_modem_reset(void) {}
+
+static inline bool prcmu_is_ac_wake_requested(void)
+{
+ return false;
+}
+
+static inline int prcmu_set_display_clocks(void)
+{
+ return 0;
+}
+
+static inline int prcmu_disable_dsipll(void)
+{
+ return 0;
+}
+
+static inline int prcmu_enable_dsipll(void)
+{
+ return 0;
+}
+
+static inline int prcmu_config_esram0_deep_sleep(u8 state)
+{
+ return 0;
+}
+
+static inline void prcmu_config_abb_event_readout(u32 abb_events) {}
+
+static inline void prcmu_get_abb_event_buffer(void __iomem **buf)
+{
+ *buf = NULL;
+}
+
+static inline int prcmu_config_hotdog(u8 threshold)
+{
+ return 0;
+}
+
+static inline int prcmu_config_hotmon(u8 low, u8 high)
+{
+ return 0;
+}
+
+static inline int prcmu_start_temp_sense(u16 cycles32k)
+{
+ return 0;
+}
+
+static inline int prcmu_stop_temp_sense(void)
+{
+ return 0;
+}
+
+static inline u32 prcmu_read(unsigned int reg)
+{
+ return 0;
+}
+
+static inline void prcmu_write(unsigned int reg, u32 value) {}
+
+static inline void prcmu_write_masked(unsigned int reg, u32 mask, u32 value) {}
+
+#endif
+
+static inline void prcmu_set(unsigned int reg, u32 bits)
+{
+ prcmu_write_masked(reg, bits, bits);
+}
+
+static inline void prcmu_clear(unsigned int reg, u32 bits)
+{
+ prcmu_write_masked(reg, bits, 0);
+}
+
+/* PRCMU QoS APE OPP class */
+#define PRCMU_QOS_APE_OPP 1
+#define PRCMU_QOS_DDR_OPP 2
+#define PRCMU_QOS_ARM_OPP 3
+#define PRCMU_QOS_DEFAULT_VALUE -1
+
+#ifdef CONFIG_DBX500_PRCMU_QOS_POWER
+
+unsigned long prcmu_qos_get_cpufreq_opp_delay(void);
+void prcmu_qos_set_cpufreq_opp_delay(unsigned long);
+void prcmu_qos_force_opp(int, s32);
+int prcmu_qos_requirement(int pm_qos_class);
+int prcmu_qos_add_requirement(int pm_qos_class, char *name, s32 value);
+int prcmu_qos_update_requirement(int pm_qos_class, char *name, s32 new_value);
+void prcmu_qos_remove_requirement(int pm_qos_class, char *name);
+int prcmu_qos_add_notifier(int prcmu_qos_class,
+ struct notifier_block *notifier);
+int prcmu_qos_remove_notifier(int prcmu_qos_class,
+ struct notifier_block *notifier);
+
+#else
+
+static inline unsigned long prcmu_qos_get_cpufreq_opp_delay(void)
+{
+ return 0;
+}
+
+static inline void prcmu_qos_set_cpufreq_opp_delay(unsigned long n) {}
+
+static inline void prcmu_qos_force_opp(int prcmu_qos_class, s32 i) {}
+
+static inline int prcmu_qos_requirement(int prcmu_qos_class)
+{
+ return 0;
+}
+
+static inline int prcmu_qos_add_requirement(int prcmu_qos_class,
+ char *name, s32 value)
+{
+ return 0;
+}
+
+static inline int prcmu_qos_update_requirement(int prcmu_qos_class,
+ char *name, s32 new_value)
+{
+ return 0;
+}
+
+static inline void prcmu_qos_remove_requirement(int prcmu_qos_class, char *name)
+{
+}
+
+static inline int prcmu_qos_add_notifier(int prcmu_qos_class,
+ struct notifier_block *notifier)
+{
+ return 0;
+}
+static inline int prcmu_qos_remove_notifier(int prcmu_qos_class,
+ struct notifier_block *notifier)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* __MACH_PRCMU_H */
diff --git a/include/linux/mfd/dln2.h b/include/linux/mfd/dln2.h
new file mode 100644
index 000000000..004b24576
--- /dev/null
+++ b/include/linux/mfd/dln2.h
@@ -0,0 +1,103 @@
+#ifndef __LINUX_USB_DLN2_H
+#define __LINUX_USB_DLN2_H
+
+#define DLN2_CMD(cmd, id) ((cmd) | ((id) << 8))
+
+struct dln2_platform_data {
+ u16 handle; /* sub-driver handle (internally used only) */
+ u8 port; /* I2C/SPI port */
+};
+
+/**
+ * dln2_event_cb_t - event callback function signature
+ *
+ * @pdev - the sub-device that registered this callback
+ * @echo - the echo header field received in the message
+ * @data - the data payload
+ * @len - the data payload length
+ *
+ * The callback function is called in interrupt context and the data payload is
+ * only valid during the call. If the user needs later access of the data, it
+ * must copy it.
+ */
+
+typedef void (*dln2_event_cb_t)(struct platform_device *pdev, u16 echo,
+ const void *data, int len);
+
+/**
+ * dl2n_register_event_cb - register a callback function for an event
+ *
+ * @pdev - the sub-device that registers the callback
+ * @event - the event for which to register a callback
+ * @event_cb - the callback function
+ *
+ * @return 0 in case of success, negative value in case of error
+ */
+int dln2_register_event_cb(struct platform_device *pdev, u16 event,
+ dln2_event_cb_t event_cb);
+
+/**
+ * dln2_unregister_event_cb - unregister the callback function for an event
+ *
+ * @pdev - the sub-device that registered the callback
+ * @event - the event for which to register a callback
+ */
+void dln2_unregister_event_cb(struct platform_device *pdev, u16 event);
+
+/**
+ * dln2_transfer - issue a DLN2 command and wait for a response and the
+ * associated data
+ *
+ * @pdev - the sub-device which is issuing this transfer
+ * @cmd - the command to be sent to the device
+ * @obuf - the buffer to be sent to the device; it can be NULL if the user
+ * doesn't need to transmit data with this command
+ * @obuf_len - the size of the buffer to be sent to the device
+ * @ibuf - any data associated with the response will be copied here; it can be
+ * NULL if the user doesn't need the response data
+ * @ibuf_len - must be initialized to the input buffer size; it will be modified
+ * to indicate the actual data transferred;
+ *
+ * @return 0 for success, negative value for errors
+ */
+int dln2_transfer(struct platform_device *pdev, u16 cmd,
+ const void *obuf, unsigned obuf_len,
+ void *ibuf, unsigned *ibuf_len);
+
+/**
+ * dln2_transfer_rx - variant of @dln2_transfer() where TX buffer is not needed
+ *
+ * @pdev - the sub-device which is issuing this transfer
+ * @cmd - the command to be sent to the device
+ * @ibuf - any data associated with the response will be copied here; it can be
+ * NULL if the user doesn't need the response data
+ * @ibuf_len - must be initialized to the input buffer size; it will be modified
+ * to indicate the actual data transferred;
+ *
+ * @return 0 for success, negative value for errors
+ */
+
+static inline int dln2_transfer_rx(struct platform_device *pdev, u16 cmd,
+ void *ibuf, unsigned *ibuf_len)
+{
+ return dln2_transfer(pdev, cmd, NULL, 0, ibuf, ibuf_len);
+}
+
+/**
+ * dln2_transfer_tx - variant of @dln2_transfer() where RX buffer is not needed
+ *
+ * @pdev - the sub-device which is issuing this transfer
+ * @cmd - the command to be sent to the device
+ * @obuf - the buffer to be sent to the device; it can be NULL if the
+ * user doesn't need to transmit data with this command
+ * @obuf_len - the size of the buffer to be sent to the device
+ *
+ * @return 0 for success, negative value for errors
+ */
+static inline int dln2_transfer_tx(struct platform_device *pdev, u16 cmd,
+ const void *obuf, unsigned obuf_len)
+{
+ return dln2_transfer(pdev, cmd, obuf, obuf_len, NULL, NULL);
+}
+
+#endif
diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h
new file mode 100644
index 000000000..38a372a0e
--- /dev/null
+++ b/include/linux/mfd/ds1wm.h
@@ -0,0 +1,13 @@
+/* MFD cell driver data for the DS1WM driver */
+
+struct ds1wm_driver_data {
+ int active_high;
+ int clock_rate;
+ /* in milliseconds, the amount of time to */
+ /* sleep following a reset pulse. Zero */
+ /* should work if your bus devices recover*/
+ /* time respects the 1-wire spec since the*/
+ /* ds1wm implements the precise timings of*/
+ /* a reset pulse/presence detect sequence.*/
+ unsigned int reset_recover_delay;
+};
diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
new file mode 100644
index 000000000..32a1b5cfe
--- /dev/null
+++ b/include/linux/mfd/ezx-pcap.h
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2009 Daniel Ribeiro <drwyrm@gmail.com>
+ *
+ * For further information, please see http://wiki.openezx.org/PCAP2
+ */
+
+#ifndef EZX_PCAP_H
+#define EZX_PCAP_H
+
+struct pcap_subdev {
+ int id;
+ const char *name;
+ void *platform_data;
+};
+
+struct pcap_platform_data {
+ unsigned int irq_base;
+ unsigned int config;
+ int gpio;
+ void (*init) (void *); /* board specific init */
+ int num_subdevs;
+ struct pcap_subdev *subdevs;
+};
+
+struct pcap_chip;
+
+int ezx_pcap_write(struct pcap_chip *, u8, u32);
+int ezx_pcap_read(struct pcap_chip *, u8, u32 *);
+int ezx_pcap_set_bits(struct pcap_chip *, u8, u32, u32);
+int pcap_to_irq(struct pcap_chip *, int);
+int irq_to_pcap(struct pcap_chip *, int);
+int pcap_adc_async(struct pcap_chip *, u8, u32, u8[], void *, void *);
+int pcap_adc_sync(struct pcap_chip *, u8, u32, u8[], u16[]);
+void pcap_set_ts_bits(struct pcap_chip *, u32);
+
+#define PCAP_SECOND_PORT 1
+#define PCAP_CS_AH 2
+
+#define PCAP_REGISTER_WRITE_OP_BIT 0x80000000
+#define PCAP_REGISTER_READ_OP_BIT 0x00000000
+
+#define PCAP_REGISTER_VALUE_MASK 0x01ffffff
+#define PCAP_REGISTER_ADDRESS_MASK 0x7c000000
+#define PCAP_REGISTER_ADDRESS_SHIFT 26
+#define PCAP_REGISTER_NUMBER 32
+#define PCAP_CLEAR_INTERRUPT_REGISTER 0x01ffffff
+#define PCAP_MASK_ALL_INTERRUPT 0x01ffffff
+
+/* registers accessible by both pcap ports */
+#define PCAP_REG_ISR 0x0 /* Interrupt Status */
+#define PCAP_REG_MSR 0x1 /* Interrupt Mask */
+#define PCAP_REG_PSTAT 0x2 /* Processor Status */
+#define PCAP_REG_VREG2 0x6 /* Regulator Bank 2 Control */
+#define PCAP_REG_AUXVREG 0x7 /* Auxiliary Regulator Control */
+#define PCAP_REG_BATT 0x8 /* Battery Control */
+#define PCAP_REG_ADC 0x9 /* AD Control */
+#define PCAP_REG_ADR 0xa /* AD Result */
+#define PCAP_REG_CODEC 0xb /* Audio Codec Control */
+#define PCAP_REG_RX_AMPS 0xc /* RX Audio Amplifiers Control */
+#define PCAP_REG_ST_DAC 0xd /* Stereo DAC Control */
+#define PCAP_REG_BUSCTRL 0x14 /* Connectivity Control */
+#define PCAP_REG_PERIPH 0x15 /* Peripheral Control */
+#define PCAP_REG_LOWPWR 0x18 /* Regulator Low Power Control */
+#define PCAP_REG_TX_AMPS 0x1a /* TX Audio Amplifiers Control */
+#define PCAP_REG_GP 0x1b /* General Purpose */
+#define PCAP_REG_TEST1 0x1c
+#define PCAP_REG_TEST2 0x1d
+#define PCAP_REG_VENDOR_TEST1 0x1e
+#define PCAP_REG_VENDOR_TEST2 0x1f
+
+/* registers accessible by pcap port 1 only (a1200, e2 & e6) */
+#define PCAP_REG_INT_SEL 0x3 /* Interrupt Select */
+#define PCAP_REG_SWCTRL 0x4 /* Switching Regulator Control */
+#define PCAP_REG_VREG1 0x5 /* Regulator Bank 1 Control */
+#define PCAP_REG_RTC_TOD 0xe /* RTC Time of Day */
+#define PCAP_REG_RTC_TODA 0xf /* RTC Time of Day Alarm */
+#define PCAP_REG_RTC_DAY 0x10 /* RTC Day */
+#define PCAP_REG_RTC_DAYA 0x11 /* RTC Day Alarm */
+#define PCAP_REG_MTRTMR 0x12 /* AD Monitor Timer */
+#define PCAP_REG_PWR 0x13 /* Power Control */
+#define PCAP_REG_AUXVREG_MASK 0x16 /* Auxiliary Regulator Mask */
+#define PCAP_REG_VENDOR_REV 0x17
+#define PCAP_REG_PERIPH_MASK 0x19 /* Peripheral Mask */
+
+/* PCAP2 Interrupts */
+#define PCAP_NIRQS 23
+#define PCAP_IRQ_ADCDONE 0 /* ADC done port 1 */
+#define PCAP_IRQ_TS 1 /* Touch Screen */
+#define PCAP_IRQ_1HZ 2 /* 1HZ timer */
+#define PCAP_IRQ_WH 3 /* ADC above high limit */
+#define PCAP_IRQ_WL 4 /* ADC below low limit */
+#define PCAP_IRQ_TODA 5 /* Time of day alarm */
+#define PCAP_IRQ_USB4V 6 /* USB above 4V */
+#define PCAP_IRQ_ONOFF 7 /* On/Off button */
+#define PCAP_IRQ_ONOFF2 8 /* On/Off button 2 */
+#define PCAP_IRQ_USB1V 9 /* USB above 1V */
+#define PCAP_IRQ_MOBPORT 10
+#define PCAP_IRQ_MIC 11 /* Mic attach/HS button */
+#define PCAP_IRQ_HS 12 /* Headset attach */
+#define PCAP_IRQ_ST 13
+#define PCAP_IRQ_PC 14 /* Power Cut */
+#define PCAP_IRQ_WARM 15
+#define PCAP_IRQ_EOL 16 /* Battery End Of Life */
+#define PCAP_IRQ_CLK 17
+#define PCAP_IRQ_SYSRST 18 /* System Reset */
+#define PCAP_IRQ_DUMMY 19
+#define PCAP_IRQ_ADCDONE2 20 /* ADC done port 2 */
+#define PCAP_IRQ_SOFTRESET 21
+#define PCAP_IRQ_MNEXB 22
+
+/* voltage regulators */
+#define V1 0
+#define V2 1
+#define V3 2
+#define V4 3
+#define V5 4
+#define V6 5
+#define V7 6
+#define V8 7
+#define V9 8
+#define V10 9
+#define VAUX1 10
+#define VAUX2 11
+#define VAUX3 12
+#define VAUX4 13
+#define VSIM 14
+#define VSIM2 15
+#define VVIB 16
+#define SW1 17
+#define SW2 18
+#define SW3 19
+#define SW1S 20
+#define SW2S 21
+
+#define PCAP_BATT_DAC_MASK 0x000000ff
+#define PCAP_BATT_DAC_SHIFT 0
+#define PCAP_BATT_B_FDBK (1 << 8)
+#define PCAP_BATT_EXT_ISENSE (1 << 9)
+#define PCAP_BATT_V_COIN_MASK 0x00003c00
+#define PCAP_BATT_V_COIN_SHIFT 10
+#define PCAP_BATT_I_COIN (1 << 14)
+#define PCAP_BATT_COIN_CH_EN (1 << 15)
+#define PCAP_BATT_EOL_SEL_MASK 0x000e0000
+#define PCAP_BATT_EOL_SEL_SHIFT 17
+#define PCAP_BATT_EOL_CMP_EN (1 << 20)
+#define PCAP_BATT_BATT_DET_EN (1 << 21)
+#define PCAP_BATT_THERMBIAS_CTRL (1 << 22)
+
+#define PCAP_ADC_ADEN (1 << 0)
+#define PCAP_ADC_RAND (1 << 1)
+#define PCAP_ADC_AD_SEL1 (1 << 2)
+#define PCAP_ADC_AD_SEL2 (1 << 3)
+#define PCAP_ADC_ADA1_MASK 0x00000070
+#define PCAP_ADC_ADA1_SHIFT 4
+#define PCAP_ADC_ADA2_MASK 0x00000380
+#define PCAP_ADC_ADA2_SHIFT 7
+#define PCAP_ADC_ATO_MASK 0x00003c00
+#define PCAP_ADC_ATO_SHIFT 10
+#define PCAP_ADC_ATOX (1 << 14)
+#define PCAP_ADC_MTR1 (1 << 15)
+#define PCAP_ADC_MTR2 (1 << 16)
+#define PCAP_ADC_TS_M_MASK 0x000e0000
+#define PCAP_ADC_TS_M_SHIFT 17
+#define PCAP_ADC_TS_REF_LOWPWR (1 << 20)
+#define PCAP_ADC_TS_REFENB (1 << 21)
+#define PCAP_ADC_BATT_I_POLARITY (1 << 22)
+#define PCAP_ADC_BATT_I_ADC (1 << 23)
+
+#define PCAP_ADC_BANK_0 0
+#define PCAP_ADC_BANK_1 1
+/* ADC bank 0 */
+#define PCAP_ADC_CH_COIN 0
+#define PCAP_ADC_CH_BATT 1
+#define PCAP_ADC_CH_BPLUS 2
+#define PCAP_ADC_CH_MOBPORTB 3
+#define PCAP_ADC_CH_TEMPERATURE 4
+#define PCAP_ADC_CH_CHARGER_ID 5
+#define PCAP_ADC_CH_AD6 6
+/* ADC bank 1 */
+#define PCAP_ADC_CH_AD7 0
+#define PCAP_ADC_CH_AD8 1
+#define PCAP_ADC_CH_AD9 2
+#define PCAP_ADC_CH_TS_X1 3
+#define PCAP_ADC_CH_TS_X2 4
+#define PCAP_ADC_CH_TS_Y1 5
+#define PCAP_ADC_CH_TS_Y2 6
+
+#define PCAP_ADC_T_NOW 0
+#define PCAP_ADC_T_IN_BURST 1
+#define PCAP_ADC_T_OUT_BURST 2
+
+#define PCAP_ADC_ATO_IN_BURST 6
+#define PCAP_ADC_ATO_OUT_BURST 0
+
+#define PCAP_ADC_TS_M_XY 1
+#define PCAP_ADC_TS_M_PRESSURE 2
+#define PCAP_ADC_TS_M_PLATE_X 3
+#define PCAP_ADC_TS_M_PLATE_Y 4
+#define PCAP_ADC_TS_M_STANDBY 5
+#define PCAP_ADC_TS_M_NONTS 6
+
+#define PCAP_ADR_ADD1_MASK 0x000003ff
+#define PCAP_ADR_ADD1_SHIFT 0
+#define PCAP_ADR_ADD2_MASK 0x000ffc00
+#define PCAP_ADR_ADD2_SHIFT 10
+#define PCAP_ADR_ADINC1 (1 << 20)
+#define PCAP_ADR_ADINC2 (1 << 21)
+#define PCAP_ADR_ASC (1 << 22)
+#define PCAP_ADR_ONESHOT (1 << 23)
+
+#define PCAP_BUSCTRL_FSENB (1 << 0)
+#define PCAP_BUSCTRL_USB_SUSPEND (1 << 1)
+#define PCAP_BUSCTRL_USB_PU (1 << 2)
+#define PCAP_BUSCTRL_USB_PD (1 << 3)
+#define PCAP_BUSCTRL_VUSB_EN (1 << 4)
+#define PCAP_BUSCTRL_USB_PS (1 << 5)
+#define PCAP_BUSCTRL_VUSB_MSTR_EN (1 << 6)
+#define PCAP_BUSCTRL_VBUS_PD_ENB (1 << 7)
+#define PCAP_BUSCTRL_CURRLIM (1 << 8)
+#define PCAP_BUSCTRL_RS232ENB (1 << 9)
+#define PCAP_BUSCTRL_RS232_DIR (1 << 10)
+#define PCAP_BUSCTRL_SE0_CONN (1 << 11)
+#define PCAP_BUSCTRL_USB_PDM (1 << 12)
+#define PCAP_BUSCTRL_BUS_PRI_ADJ (1 << 24)
+
+/* leds */
+#define PCAP_LED0 0
+#define PCAP_LED1 1
+#define PCAP_BL0 2
+#define PCAP_BL1 3
+#define PCAP_LED_3MA 0
+#define PCAP_LED_4MA 1
+#define PCAP_LED_5MA 2
+#define PCAP_LED_9MA 3
+#define PCAP_LED_T_MASK 0xf
+#define PCAP_LED_C_MASK 0x3
+#define PCAP_BL_MASK 0x1f
+#define PCAP_BL0_SHIFT 0
+#define PCAP_LED0_EN (1 << 5)
+#define PCAP_LED1_EN (1 << 6)
+#define PCAP_LED0_T_SHIFT 7
+#define PCAP_LED1_T_SHIFT 11
+#define PCAP_LED0_C_SHIFT 15
+#define PCAP_LED1_C_SHIFT 17
+#define PCAP_BL1_SHIFT 20
+
+/* RTC */
+#define PCAP_RTC_DAY_MASK 0x3fff
+#define PCAP_RTC_TOD_MASK 0xffff
+#define PCAP_RTC_PC_MASK 0x7
+#define SEC_PER_DAY 86400
+
+#endif
diff --git a/include/linux/mfd/hi6421-pmic.h b/include/linux/mfd/hi6421-pmic.h
new file mode 100644
index 000000000..587273e35
--- /dev/null
+++ b/include/linux/mfd/hi6421-pmic.h
@@ -0,0 +1,41 @@
+/*
+ * Header file for device driver Hi6421 PMIC
+ *
+ * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd.
+ * http://www.hisilicon.com
+ * Copyright (c) <2013-2014> Linaro Ltd.
+ * http://www.linaro.org
+ *
+ * Author: Guodong Xu <guodong.xu@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __HI6421_PMIC_H
+#define __HI6421_PMIC_H
+
+/* Hi6421 registers are mapped to memory bus in 4 bytes stride */
+#define HI6421_REG_TO_BUS_ADDR(x) (x << 2)
+
+/* Hi6421 maximum register number */
+#define HI6421_REG_MAX 0xFF
+
+/* Hi6421 OCP (over current protection) and DEB (debounce) control register */
+#define HI6421_OCP_DEB_CTRL_REG HI6421_REG_TO_BUS_ADDR(0x51)
+#define HI6421_OCP_DEB_SEL_MASK 0x0C
+#define HI6421_OCP_DEB_SEL_8MS 0x00
+#define HI6421_OCP_DEB_SEL_16MS 0x04
+#define HI6421_OCP_DEB_SEL_32MS 0x08
+#define HI6421_OCP_DEB_SEL_64MS 0x0C
+#define HI6421_OCP_EN_DEBOUNCE_MASK 0x02
+#define HI6421_OCP_EN_DEBOUNCE_ENABLE 0x02
+#define HI6421_OCP_AUTO_STOP_MASK 0x01
+#define HI6421_OCP_AUTO_STOP_ENABLE 0x01
+
+struct hi6421_pmic {
+ struct regmap *regmap;
+};
+
+#endif /* __HI6421_PMIC_H */
diff --git a/include/linux/mfd/htc-egpio.h b/include/linux/mfd/htc-egpio.h
new file mode 100644
index 000000000..b4201c971
--- /dev/null
+++ b/include/linux/mfd/htc-egpio.h
@@ -0,0 +1,57 @@
+/*
+ * HTC simple EGPIO irq and gpio extender
+ */
+
+#ifndef __HTC_EGPIO_H__
+#define __HTC_EGPIO_H__
+
+#include <linux/gpio.h>
+
+/* Descriptive values for all-in or all-out htc_egpio_chip descriptors. */
+#define HTC_EGPIO_OUTPUT (~0)
+#define HTC_EGPIO_INPUT 0
+
+/**
+ * struct htc_egpio_chip - descriptor to create gpio_chip for register range
+ * @reg_start: index of first register
+ * @gpio_base: gpio number of first pin in this register range
+ * @num_gpios: number of gpios in this register range, max BITS_PER_LONG
+ * (number of registers = DIV_ROUND_UP(num_gpios, reg_width))
+ * @direction: bitfield, '0' = input, '1' = output,
+ */
+struct htc_egpio_chip {
+ int reg_start;
+ int gpio_base;
+ int num_gpios;
+ unsigned long direction;
+ unsigned long initial_values;
+};
+
+/**
+ * struct htc_egpio_platform_data - description provided by the arch
+ * @irq_base: beginning of available IRQs (eg, IRQ_BOARD_START)
+ * @num_irqs: number of irqs
+ * @reg_width: number of bits per register, either 8 or 16 bit
+ * @bus_width: alignment of the registers, either 16 or 32 bit
+ * @invert_acks: set if chip requires writing '0' to ack an irq, instead of '1'
+ * @ack_register: location of the irq/ack register
+ * @chip: pointer to array of htc_egpio_chip descriptors
+ * @num_chips: number of egpio chip descriptors
+ */
+struct htc_egpio_platform_data {
+ int bus_width;
+ int reg_width;
+
+ int irq_base;
+ int num_irqs;
+ int invert_acks;
+ int ack_register;
+
+ struct htc_egpio_chip *chip;
+ int num_chips;
+};
+
+/* Determine the wakeup irq, to be called during early resume */
+extern int htc_egpio_get_wakeup_irq(struct device *dev);
+
+#endif
diff --git a/include/linux/mfd/htc-pasic3.h b/include/linux/mfd/htc-pasic3.h
new file mode 100644
index 000000000..3d3ed67bd
--- /dev/null
+++ b/include/linux/mfd/htc-pasic3.h
@@ -0,0 +1,54 @@
+/*
+ * HTC PASIC3 driver - LEDs and DS1WM
+ *
+ * Copyright (c) 2007 Philipp Zabel <philipp.zabel@gmail.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ */
+
+#ifndef __PASIC3_H
+#define __PASIC3_H
+
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+
+extern void pasic3_write_register(struct device *dev, u32 reg, u8 val);
+extern u8 pasic3_read_register(struct device *dev, u32 reg);
+
+/*
+ * mask for registers 0x20,0x21,0x22
+ */
+#define PASIC3_MASK_LED0 0x04
+#define PASIC3_MASK_LED1 0x08
+#define PASIC3_MASK_LED2 0x40
+
+/*
+ * bits in register 0x06
+ */
+#define PASIC3_BIT2_LED0 0x08
+#define PASIC3_BIT2_LED1 0x10
+#define PASIC3_BIT2_LED2 0x20
+
+struct pasic3_led {
+ struct led_classdev led;
+ unsigned int hw_num;
+ unsigned int bit2;
+ unsigned int mask;
+ struct pasic3_leds_machinfo *pdata;
+};
+
+struct pasic3_leds_machinfo {
+ unsigned int num_leds;
+ unsigned int power_gpio;
+ struct pasic3_led *leds;
+};
+
+struct pasic3_platform_data {
+ struct pasic3_leds_machinfo *led_pdata;
+ unsigned int clock_rate;
+};
+
+#endif
diff --git a/include/linux/mfd/intel_msic.h b/include/linux/mfd/intel_msic.h
new file mode 100644
index 000000000..439a7a617
--- /dev/null
+++ b/include/linux/mfd/intel_msic.h
@@ -0,0 +1,456 @@
+/*
+ * include/linux/mfd/intel_msic.h - Core interface for Intel MSIC
+ *
+ * Copyright (C) 2011, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_INTEL_MSIC_H__
+#define __LINUX_MFD_INTEL_MSIC_H__
+
+/* ID */
+#define INTEL_MSIC_ID0 0x000 /* RO */
+#define INTEL_MSIC_ID1 0x001 /* RO */
+
+/* IRQ */
+#define INTEL_MSIC_IRQLVL1 0x002
+#define INTEL_MSIC_ADC1INT 0x003
+#define INTEL_MSIC_CCINT 0x004
+#define INTEL_MSIC_PWRSRCINT 0x005
+#define INTEL_MSIC_PWRSRCINT1 0x006
+#define INTEL_MSIC_CHRINT 0x007
+#define INTEL_MSIC_CHRINT1 0x008
+#define INTEL_MSIC_RTCIRQ 0x009
+#define INTEL_MSIC_GPIO0LVIRQ 0x00a
+#define INTEL_MSIC_GPIO1LVIRQ 0x00b
+#define INTEL_MSIC_GPIOHVIRQ 0x00c
+#define INTEL_MSIC_VRINT 0x00d
+#define INTEL_MSIC_OCAUDIO 0x00e
+#define INTEL_MSIC_ACCDET 0x00f
+#define INTEL_MSIC_RESETIRQ1 0x010
+#define INTEL_MSIC_RESETIRQ2 0x011
+#define INTEL_MSIC_MADC1INT 0x012
+#define INTEL_MSIC_MCCINT 0x013
+#define INTEL_MSIC_MPWRSRCINT 0x014
+#define INTEL_MSIC_MPWRSRCINT1 0x015
+#define INTEL_MSIC_MCHRINT 0x016
+#define INTEL_MSIC_MCHRINT1 0x017
+#define INTEL_MSIC_RTCIRQMASK 0x018
+#define INTEL_MSIC_GPIO0LVIRQMASK 0x019
+#define INTEL_MSIC_GPIO1LVIRQMASK 0x01a
+#define INTEL_MSIC_GPIOHVIRQMASK 0x01b
+#define INTEL_MSIC_VRINTMASK 0x01c
+#define INTEL_MSIC_OCAUDIOMASK 0x01d
+#define INTEL_MSIC_ACCDETMASK 0x01e
+#define INTEL_MSIC_RESETIRQ1MASK 0x01f
+#define INTEL_MSIC_RESETIRQ2MASK 0x020
+#define INTEL_MSIC_IRQLVL1MSK 0x021
+#define INTEL_MSIC_PBCONFIG 0x03e
+#define INTEL_MSIC_PBSTATUS 0x03f /* RO */
+
+/* GPIO */
+#define INTEL_MSIC_GPIO0LV7CTLO 0x040
+#define INTEL_MSIC_GPIO0LV6CTLO 0x041
+#define INTEL_MSIC_GPIO0LV5CTLO 0x042
+#define INTEL_MSIC_GPIO0LV4CTLO 0x043
+#define INTEL_MSIC_GPIO0LV3CTLO 0x044
+#define INTEL_MSIC_GPIO0LV2CTLO 0x045
+#define INTEL_MSIC_GPIO0LV1CTLO 0x046
+#define INTEL_MSIC_GPIO0LV0CTLO 0x047
+#define INTEL_MSIC_GPIO1LV7CTLOS 0x048
+#define INTEL_MSIC_GPIO1LV6CTLO 0x049
+#define INTEL_MSIC_GPIO1LV5CTLO 0x04a
+#define INTEL_MSIC_GPIO1LV4CTLO 0x04b
+#define INTEL_MSIC_GPIO1LV3CTLO 0x04c
+#define INTEL_MSIC_GPIO1LV2CTLO 0x04d
+#define INTEL_MSIC_GPIO1LV1CTLO 0x04e
+#define INTEL_MSIC_GPIO1LV0CTLO 0x04f
+#define INTEL_MSIC_GPIO0LV7CTLI 0x050
+#define INTEL_MSIC_GPIO0LV6CTLI 0x051
+#define INTEL_MSIC_GPIO0LV5CTLI 0x052
+#define INTEL_MSIC_GPIO0LV4CTLI 0x053
+#define INTEL_MSIC_GPIO0LV3CTLI 0x054
+#define INTEL_MSIC_GPIO0LV2CTLI 0x055
+#define INTEL_MSIC_GPIO0LV1CTLI 0x056
+#define INTEL_MSIC_GPIO0LV0CTLI 0x057
+#define INTEL_MSIC_GPIO1LV7CTLIS 0x058
+#define INTEL_MSIC_GPIO1LV6CTLI 0x059
+#define INTEL_MSIC_GPIO1LV5CTLI 0x05a
+#define INTEL_MSIC_GPIO1LV4CTLI 0x05b
+#define INTEL_MSIC_GPIO1LV3CTLI 0x05c
+#define INTEL_MSIC_GPIO1LV2CTLI 0x05d
+#define INTEL_MSIC_GPIO1LV1CTLI 0x05e
+#define INTEL_MSIC_GPIO1LV0CTLI 0x05f
+#define INTEL_MSIC_PWM0CLKDIV1 0x061
+#define INTEL_MSIC_PWM0CLKDIV0 0x062
+#define INTEL_MSIC_PWM1CLKDIV1 0x063
+#define INTEL_MSIC_PWM1CLKDIV0 0x064
+#define INTEL_MSIC_PWM2CLKDIV1 0x065
+#define INTEL_MSIC_PWM2CLKDIV0 0x066
+#define INTEL_MSIC_PWM0DUTYCYCLE 0x067
+#define INTEL_MSIC_PWM1DUTYCYCLE 0x068
+#define INTEL_MSIC_PWM2DUTYCYCLE 0x069
+#define INTEL_MSIC_GPIO0HV3CTLO 0x06d
+#define INTEL_MSIC_GPIO0HV2CTLO 0x06e
+#define INTEL_MSIC_GPIO0HV1CTLO 0x06f
+#define INTEL_MSIC_GPIO0HV0CTLO 0x070
+#define INTEL_MSIC_GPIO1HV3CTLO 0x071
+#define INTEL_MSIC_GPIO1HV2CTLO 0x072
+#define INTEL_MSIC_GPIO1HV1CTLO 0x073
+#define INTEL_MSIC_GPIO1HV0CTLO 0x074
+#define INTEL_MSIC_GPIO0HV3CTLI 0x075
+#define INTEL_MSIC_GPIO0HV2CTLI 0x076
+#define INTEL_MSIC_GPIO0HV1CTLI 0x077
+#define INTEL_MSIC_GPIO0HV0CTLI 0x078
+#define INTEL_MSIC_GPIO1HV3CTLI 0x079
+#define INTEL_MSIC_GPIO1HV2CTLI 0x07a
+#define INTEL_MSIC_GPIO1HV1CTLI 0x07b
+#define INTEL_MSIC_GPIO1HV0CTLI 0x07c
+
+/* SVID */
+#define INTEL_MSIC_SVIDCTRL0 0x080
+#define INTEL_MSIC_SVIDCTRL1 0x081
+#define INTEL_MSIC_SVIDCTRL2 0x082
+#define INTEL_MSIC_SVIDTXLASTPKT3 0x083 /* RO */
+#define INTEL_MSIC_SVIDTXLASTPKT2 0x084 /* RO */
+#define INTEL_MSIC_SVIDTXLASTPKT1 0x085 /* RO */
+#define INTEL_MSIC_SVIDTXLASTPKT0 0x086 /* RO */
+#define INTEL_MSIC_SVIDPKTOUTBYTE3 0x087
+#define INTEL_MSIC_SVIDPKTOUTBYTE2 0x088
+#define INTEL_MSIC_SVIDPKTOUTBYTE1 0x089
+#define INTEL_MSIC_SVIDPKTOUTBYTE0 0x08a
+#define INTEL_MSIC_SVIDRXVPDEBUG1 0x08b
+#define INTEL_MSIC_SVIDRXVPDEBUG0 0x08c
+#define INTEL_MSIC_SVIDRXLASTPKT3 0x08d /* RO */
+#define INTEL_MSIC_SVIDRXLASTPKT2 0x08e /* RO */
+#define INTEL_MSIC_SVIDRXLASTPKT1 0x08f /* RO */
+#define INTEL_MSIC_SVIDRXLASTPKT0 0x090 /* RO */
+#define INTEL_MSIC_SVIDRXCHKSTATUS3 0x091 /* RO */
+#define INTEL_MSIC_SVIDRXCHKSTATUS2 0x092 /* RO */
+#define INTEL_MSIC_SVIDRXCHKSTATUS1 0x093 /* RO */
+#define INTEL_MSIC_SVIDRXCHKSTATUS0 0x094 /* RO */
+
+/* VREG */
+#define INTEL_MSIC_VCCLATCH 0x0c0
+#define INTEL_MSIC_VNNLATCH 0x0c1
+#define INTEL_MSIC_VCCCNT 0x0c2
+#define INTEL_MSIC_SMPSRAMP 0x0c3
+#define INTEL_MSIC_VNNCNT 0x0c4
+#define INTEL_MSIC_VNNAONCNT 0x0c5
+#define INTEL_MSIC_VCC122AONCNT 0x0c6
+#define INTEL_MSIC_V180AONCNT 0x0c7
+#define INTEL_MSIC_V500CNT 0x0c8
+#define INTEL_MSIC_VIHFCNT 0x0c9
+#define INTEL_MSIC_LDORAMP1 0x0ca
+#define INTEL_MSIC_LDORAMP2 0x0cb
+#define INTEL_MSIC_VCC108AONCNT 0x0cc
+#define INTEL_MSIC_VCC108ASCNT 0x0cd
+#define INTEL_MSIC_VCC108CNT 0x0ce
+#define INTEL_MSIC_VCCA100ASCNT 0x0cf
+#define INTEL_MSIC_VCCA100CNT 0x0d0
+#define INTEL_MSIC_VCC180AONCNT 0x0d1
+#define INTEL_MSIC_VCC180CNT 0x0d2
+#define INTEL_MSIC_VCC330CNT 0x0d3
+#define INTEL_MSIC_VUSB330CNT 0x0d4
+#define INTEL_MSIC_VCCSDIOCNT 0x0d5
+#define INTEL_MSIC_VPROG1CNT 0x0d6
+#define INTEL_MSIC_VPROG2CNT 0x0d7
+#define INTEL_MSIC_VEMMCSCNT 0x0d8
+#define INTEL_MSIC_VEMMC1CNT 0x0d9
+#define INTEL_MSIC_VEMMC2CNT 0x0da
+#define INTEL_MSIC_VAUDACNT 0x0db
+#define INTEL_MSIC_VHSPCNT 0x0dc
+#define INTEL_MSIC_VHSNCNT 0x0dd
+#define INTEL_MSIC_VHDMICNT 0x0de
+#define INTEL_MSIC_VOTGCNT 0x0df
+#define INTEL_MSIC_V1P35CNT 0x0e0
+#define INTEL_MSIC_V330AONCNT 0x0e1
+
+/* RESET */
+#define INTEL_MSIC_CHIPCNTRL 0x100 /* WO */
+#define INTEL_MSIC_ERCONFIG 0x101
+
+/* BURST */
+#define INTEL_MSIC_BATCURRENTLIMIT12 0x102
+#define INTEL_MSIC_BATTIMELIMIT12 0x103
+#define INTEL_MSIC_BATTIMELIMIT3 0x104
+#define INTEL_MSIC_BATTIMEDB 0x105
+#define INTEL_MSIC_BRSTCONFIGOUTPUTS 0x106
+#define INTEL_MSIC_BRSTCONFIGACTIONS 0x107
+#define INTEL_MSIC_BURSTCONTROLSTATUS 0x108
+
+/* RTC */
+#define INTEL_MSIC_RTCB1 0x140 /* RO */
+#define INTEL_MSIC_RTCB2 0x141 /* RO */
+#define INTEL_MSIC_RTCB3 0x142 /* RO */
+#define INTEL_MSIC_RTCB4 0x143 /* RO */
+#define INTEL_MSIC_RTCOB1 0x144
+#define INTEL_MSIC_RTCOB2 0x145
+#define INTEL_MSIC_RTCOB3 0x146
+#define INTEL_MSIC_RTCOB4 0x147
+#define INTEL_MSIC_RTCAB1 0x148
+#define INTEL_MSIC_RTCAB2 0x149
+#define INTEL_MSIC_RTCAB3 0x14a
+#define INTEL_MSIC_RTCAB4 0x14b
+#define INTEL_MSIC_RTCWAB1 0x14c
+#define INTEL_MSIC_RTCWAB2 0x14d
+#define INTEL_MSIC_RTCWAB3 0x14e
+#define INTEL_MSIC_RTCWAB4 0x14f
+#define INTEL_MSIC_RTCSC1 0x150
+#define INTEL_MSIC_RTCSC2 0x151
+#define INTEL_MSIC_RTCSC3 0x152
+#define INTEL_MSIC_RTCSC4 0x153
+#define INTEL_MSIC_RTCSTATUS 0x154 /* RO */
+#define INTEL_MSIC_RTCCONFIG1 0x155
+#define INTEL_MSIC_RTCCONFIG2 0x156
+
+/* CHARGER */
+#define INTEL_MSIC_BDTIMER 0x180
+#define INTEL_MSIC_BATTRMV 0x181
+#define INTEL_MSIC_VBUSDET 0x182
+#define INTEL_MSIC_VBUSDET1 0x183
+#define INTEL_MSIC_ADPHVDET 0x184
+#define INTEL_MSIC_ADPLVDET 0x185
+#define INTEL_MSIC_ADPDETDBDM 0x186
+#define INTEL_MSIC_LOWBATTDET 0x187
+#define INTEL_MSIC_CHRCTRL 0x188
+#define INTEL_MSIC_CHRCVOLTAGE 0x189
+#define INTEL_MSIC_CHRCCURRENT 0x18a
+#define INTEL_MSIC_SPCHARGER 0x18b
+#define INTEL_MSIC_CHRTTIME 0x18c
+#define INTEL_MSIC_CHRCTRL1 0x18d
+#define INTEL_MSIC_PWRSRCLMT 0x18e
+#define INTEL_MSIC_CHRSTWDT 0x18f
+#define INTEL_MSIC_WDTWRITE 0x190 /* WO */
+#define INTEL_MSIC_CHRSAFELMT 0x191
+#define INTEL_MSIC_SPWRSRCINT 0x192 /* RO */
+#define INTEL_MSIC_SPWRSRCINT1 0x193 /* RO */
+#define INTEL_MSIC_CHRLEDPWM 0x194
+#define INTEL_MSIC_CHRLEDCTRL 0x195
+
+/* ADC */
+#define INTEL_MSIC_ADC1CNTL1 0x1c0
+#define INTEL_MSIC_ADC1CNTL2 0x1c1
+#define INTEL_MSIC_ADC1CNTL3 0x1c2
+#define INTEL_MSIC_ADC1OFFSETH 0x1c3 /* RO */
+#define INTEL_MSIC_ADC1OFFSETL 0x1c4 /* RO */
+#define INTEL_MSIC_ADC1ADDR0 0x1c5
+#define INTEL_MSIC_ADC1ADDR1 0x1c6
+#define INTEL_MSIC_ADC1ADDR2 0x1c7
+#define INTEL_MSIC_ADC1ADDR3 0x1c8
+#define INTEL_MSIC_ADC1ADDR4 0x1c9
+#define INTEL_MSIC_ADC1ADDR5 0x1ca
+#define INTEL_MSIC_ADC1ADDR6 0x1cb
+#define INTEL_MSIC_ADC1ADDR7 0x1cc
+#define INTEL_MSIC_ADC1ADDR8 0x1cd
+#define INTEL_MSIC_ADC1ADDR9 0x1ce
+#define INTEL_MSIC_ADC1ADDR10 0x1cf
+#define INTEL_MSIC_ADC1ADDR11 0x1d0
+#define INTEL_MSIC_ADC1ADDR12 0x1d1
+#define INTEL_MSIC_ADC1ADDR13 0x1d2
+#define INTEL_MSIC_ADC1ADDR14 0x1d3
+#define INTEL_MSIC_ADC1SNS0H 0x1d4 /* RO */
+#define INTEL_MSIC_ADC1SNS0L 0x1d5 /* RO */
+#define INTEL_MSIC_ADC1SNS1H 0x1d6 /* RO */
+#define INTEL_MSIC_ADC1SNS1L 0x1d7 /* RO */
+#define INTEL_MSIC_ADC1SNS2H 0x1d8 /* RO */
+#define INTEL_MSIC_ADC1SNS2L 0x1d9 /* RO */
+#define INTEL_MSIC_ADC1SNS3H 0x1da /* RO */
+#define INTEL_MSIC_ADC1SNS3L 0x1db /* RO */
+#define INTEL_MSIC_ADC1SNS4H 0x1dc /* RO */
+#define INTEL_MSIC_ADC1SNS4L 0x1dd /* RO */
+#define INTEL_MSIC_ADC1SNS5H 0x1de /* RO */
+#define INTEL_MSIC_ADC1SNS5L 0x1df /* RO */
+#define INTEL_MSIC_ADC1SNS6H 0x1e0 /* RO */
+#define INTEL_MSIC_ADC1SNS6L 0x1e1 /* RO */
+#define INTEL_MSIC_ADC1SNS7H 0x1e2 /* RO */
+#define INTEL_MSIC_ADC1SNS7L 0x1e3 /* RO */
+#define INTEL_MSIC_ADC1SNS8H 0x1e4 /* RO */
+#define INTEL_MSIC_ADC1SNS8L 0x1e5 /* RO */
+#define INTEL_MSIC_ADC1SNS9H 0x1e6 /* RO */
+#define INTEL_MSIC_ADC1SNS9L 0x1e7 /* RO */
+#define INTEL_MSIC_ADC1SNS10H 0x1e8 /* RO */
+#define INTEL_MSIC_ADC1SNS10L 0x1e9 /* RO */
+#define INTEL_MSIC_ADC1SNS11H 0x1ea /* RO */
+#define INTEL_MSIC_ADC1SNS11L 0x1eb /* RO */
+#define INTEL_MSIC_ADC1SNS12H 0x1ec /* RO */
+#define INTEL_MSIC_ADC1SNS12L 0x1ed /* RO */
+#define INTEL_MSIC_ADC1SNS13H 0x1ee /* RO */
+#define INTEL_MSIC_ADC1SNS13L 0x1ef /* RO */
+#define INTEL_MSIC_ADC1SNS14H 0x1f0 /* RO */
+#define INTEL_MSIC_ADC1SNS14L 0x1f1 /* RO */
+#define INTEL_MSIC_ADC1BV0H 0x1f2 /* RO */
+#define INTEL_MSIC_ADC1BV0L 0x1f3 /* RO */
+#define INTEL_MSIC_ADC1BV1H 0x1f4 /* RO */
+#define INTEL_MSIC_ADC1BV1L 0x1f5 /* RO */
+#define INTEL_MSIC_ADC1BV2H 0x1f6 /* RO */
+#define INTEL_MSIC_ADC1BV2L 0x1f7 /* RO */
+#define INTEL_MSIC_ADC1BV3H 0x1f8 /* RO */
+#define INTEL_MSIC_ADC1BV3L 0x1f9 /* RO */
+#define INTEL_MSIC_ADC1BI0H 0x1fa /* RO */
+#define INTEL_MSIC_ADC1BI0L 0x1fb /* RO */
+#define INTEL_MSIC_ADC1BI1H 0x1fc /* RO */
+#define INTEL_MSIC_ADC1BI1L 0x1fd /* RO */
+#define INTEL_MSIC_ADC1BI2H 0x1fe /* RO */
+#define INTEL_MSIC_ADC1BI2L 0x1ff /* RO */
+#define INTEL_MSIC_ADC1BI3H 0x200 /* RO */
+#define INTEL_MSIC_ADC1BI3L 0x201 /* RO */
+#define INTEL_MSIC_CCCNTL 0x202
+#define INTEL_MSIC_CCOFFSETH 0x203 /* RO */
+#define INTEL_MSIC_CCOFFSETL 0x204 /* RO */
+#define INTEL_MSIC_CCADCHA 0x205 /* RO */
+#define INTEL_MSIC_CCADCLA 0x206 /* RO */
+
+/* AUDIO */
+#define INTEL_MSIC_AUDPLLCTRL 0x240
+#define INTEL_MSIC_DMICBUF0123 0x241
+#define INTEL_MSIC_DMICBUF45 0x242
+#define INTEL_MSIC_DMICGPO 0x244
+#define INTEL_MSIC_DMICMUX 0x245
+#define INTEL_MSIC_DMICCLK 0x246
+#define INTEL_MSIC_MICBIAS 0x247
+#define INTEL_MSIC_ADCCONFIG 0x248
+#define INTEL_MSIC_MICAMP1 0x249
+#define INTEL_MSIC_MICAMP2 0x24a
+#define INTEL_MSIC_NOISEMUX 0x24b
+#define INTEL_MSIC_AUDIOMUX12 0x24c
+#define INTEL_MSIC_AUDIOMUX34 0x24d
+#define INTEL_MSIC_AUDIOSINC 0x24e
+#define INTEL_MSIC_AUDIOTXEN 0x24f
+#define INTEL_MSIC_HSEPRXCTRL 0x250
+#define INTEL_MSIC_IHFRXCTRL 0x251
+#define INTEL_MSIC_VOICETXVOL 0x252
+#define INTEL_MSIC_SIDETONEVOL 0x253
+#define INTEL_MSIC_MUSICSHARVOL 0x254
+#define INTEL_MSIC_VOICETXCTRL 0x255
+#define INTEL_MSIC_HSMIXER 0x256
+#define INTEL_MSIC_DACCONFIG 0x257
+#define INTEL_MSIC_SOFTMUTE 0x258
+#define INTEL_MSIC_HSLVOLCTRL 0x259
+#define INTEL_MSIC_HSRVOLCTRL 0x25a
+#define INTEL_MSIC_IHFLVOLCTRL 0x25b
+#define INTEL_MSIC_IHFRVOLCTRL 0x25c
+#define INTEL_MSIC_DRIVEREN 0x25d
+#define INTEL_MSIC_LINEOUTCTRL 0x25e
+#define INTEL_MSIC_VIB1CTRL1 0x25f
+#define INTEL_MSIC_VIB1CTRL2 0x260
+#define INTEL_MSIC_VIB1CTRL3 0x261
+#define INTEL_MSIC_VIB1SPIPCM_1 0x262
+#define INTEL_MSIC_VIB1SPIPCM_2 0x263
+#define INTEL_MSIC_VIB1CTRL5 0x264
+#define INTEL_MSIC_VIB2CTRL1 0x265
+#define INTEL_MSIC_VIB2CTRL2 0x266
+#define INTEL_MSIC_VIB2CTRL3 0x267
+#define INTEL_MSIC_VIB2SPIPCM_1 0x268
+#define INTEL_MSIC_VIB2SPIPCM_2 0x269
+#define INTEL_MSIC_VIB2CTRL5 0x26a
+#define INTEL_MSIC_BTNCTRL1 0x26b
+#define INTEL_MSIC_BTNCTRL2 0x26c
+#define INTEL_MSIC_PCM1TXSLOT01 0x26d
+#define INTEL_MSIC_PCM1TXSLOT23 0x26e
+#define INTEL_MSIC_PCM1TXSLOT45 0x26f
+#define INTEL_MSIC_PCM1RXSLOT0123 0x270
+#define INTEL_MSIC_PCM1RXSLOT045 0x271
+#define INTEL_MSIC_PCM2TXSLOT01 0x272
+#define INTEL_MSIC_PCM2TXSLOT23 0x273
+#define INTEL_MSIC_PCM2TXSLOT45 0x274
+#define INTEL_MSIC_PCM2RXSLOT01 0x275
+#define INTEL_MSIC_PCM2RXSLOT23 0x276
+#define INTEL_MSIC_PCM2RXSLOT45 0x277
+#define INTEL_MSIC_PCM1CTRL1 0x278
+#define INTEL_MSIC_PCM1CTRL2 0x279
+#define INTEL_MSIC_PCM1CTRL3 0x27a
+#define INTEL_MSIC_PCM2CTRL1 0x27b
+#define INTEL_MSIC_PCM2CTRL2 0x27c
+
+/* HDMI */
+#define INTEL_MSIC_HDMIPUEN 0x280
+#define INTEL_MSIC_HDMISTATUS 0x281 /* RO */
+
+/* Physical address of the start of the MSIC interrupt tree in SRAM */
+#define INTEL_MSIC_IRQ_PHYS_BASE 0xffff7fc0
+
+/**
+ * struct intel_msic_gpio_pdata - platform data for the MSIC GPIO driver
+ * @gpio_base: base number for the GPIOs
+ */
+struct intel_msic_gpio_pdata {
+ unsigned gpio_base;
+};
+
+/**
+ * struct intel_msic_ocd_pdata - platform data for the MSIC OCD driver
+ * @gpio: GPIO number used for OCD interrupts
+ *
+ * The MSIC MFD driver converts @gpio into an IRQ number and passes it to
+ * the OCD driver as %IORESOURCE_IRQ.
+ */
+struct intel_msic_ocd_pdata {
+ unsigned gpio;
+};
+
+/* MSIC embedded blocks (subdevices) */
+enum intel_msic_block {
+ INTEL_MSIC_BLOCK_TOUCH,
+ INTEL_MSIC_BLOCK_ADC,
+ INTEL_MSIC_BLOCK_BATTERY,
+ INTEL_MSIC_BLOCK_GPIO,
+ INTEL_MSIC_BLOCK_AUDIO,
+ INTEL_MSIC_BLOCK_HDMI,
+ INTEL_MSIC_BLOCK_THERMAL,
+ INTEL_MSIC_BLOCK_POWER_BTN,
+ INTEL_MSIC_BLOCK_OCD,
+
+ INTEL_MSIC_BLOCK_LAST,
+};
+
+/**
+ * struct intel_msic_platform_data - platform data for the MSIC driver
+ * @irq: array of interrupt numbers, one per device. If @irq is set to %0
+ * for a given block, the corresponding platform device is not
+ * created. For devices which don't have an interrupt, use %0xff
+ * (this is same as in SFI spec).
+ * @gpio: platform data for the MSIC GPIO driver
+ * @ocd: platform data for the MSIC OCD driver
+ *
+ * Once the MSIC driver is initialized, the register interface is ready to
+ * use. All the platform devices for subdevices are created after the
+ * register interface is ready so that we can guarantee its availability to
+ * the subdevice drivers.
+ *
+ * Interrupt numbers are passed to the subdevices via %IORESOURCE_IRQ
+ * resources of the created platform device.
+ */
+struct intel_msic_platform_data {
+ int irq[INTEL_MSIC_BLOCK_LAST];
+ struct intel_msic_gpio_pdata *gpio;
+ struct intel_msic_ocd_pdata *ocd;
+};
+
+struct intel_msic;
+
+extern int intel_msic_reg_read(unsigned short reg, u8 *val);
+extern int intel_msic_reg_write(unsigned short reg, u8 val);
+extern int intel_msic_reg_update(unsigned short reg, u8 val, u8 mask);
+extern int intel_msic_bulk_read(unsigned short *reg, u8 *buf, size_t count);
+extern int intel_msic_bulk_write(unsigned short *reg, u8 *buf, size_t count);
+
+/*
+ * pdev_to_intel_msic - gets an MSIC instance from the platform device
+ * @pdev: platform device pointer
+ *
+ * The client drivers need to have pointer to the MSIC instance if they
+ * want to call intel_msic_irq_read(). This macro can be used for
+ * convenience to get the MSIC pointer from @pdev where needed. This is
+ * _only_ valid for devices which are managed by the MSIC.
+ */
+#define pdev_to_intel_msic(pdev) (dev_get_drvdata(pdev->dev.parent))
+
+extern int intel_msic_irq_read(struct intel_msic *msic, unsigned short reg,
+ u8 *val);
+
+#endif /* __LINUX_MFD_INTEL_MSIC_H__ */
diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h
new file mode 100644
index 000000000..abcbfcf32
--- /dev/null
+++ b/include/linux/mfd/intel_soc_pmic.h
@@ -0,0 +1,30 @@
+/*
+ * intel_soc_pmic.h - Intel SoC PMIC Driver
+ *
+ * Copyright (C) 2012-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Yang, Bin <bin.yang@intel.com>
+ * Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
+ */
+
+#ifndef __INTEL_SOC_PMIC_H__
+#define __INTEL_SOC_PMIC_H__
+
+#include <linux/regmap.h>
+
+struct intel_soc_pmic {
+ int irq;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *irq_chip_data;
+};
+
+#endif /* __INTEL_SOC_PMIC_H__ */
diff --git a/include/linux/mfd/ipaq-micro.h b/include/linux/mfd/ipaq-micro.h
new file mode 100644
index 000000000..5c4d29f66
--- /dev/null
+++ b/include/linux/mfd/ipaq-micro.h
@@ -0,0 +1,148 @@
+/*
+ * Header file for the compaq Micro MFD
+ */
+
+#ifndef _MFD_IPAQ_MICRO_H_
+#define _MFD_IPAQ_MICRO_H_
+
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+
+#define TX_BUF_SIZE 32
+#define RX_BUF_SIZE 16
+#define CHAR_SOF 0x02
+
+/*
+ * These are the different messages that can be sent to the microcontroller
+ * to control various aspects.
+ */
+#define MSG_VERSION 0x0
+#define MSG_KEYBOARD 0x2
+#define MSG_TOUCHSCREEN 0x3
+#define MSG_EEPROM_READ 0x4
+#define MSG_EEPROM_WRITE 0x5
+#define MSG_THERMAL_SENSOR 0x6
+#define MSG_NOTIFY_LED 0x8
+#define MSG_BATTERY 0x9
+#define MSG_SPI_READ 0xb
+#define MSG_SPI_WRITE 0xc
+#define MSG_BACKLIGHT 0xd /* H3600 only */
+#define MSG_CODEC_CTRL 0xe /* H3100 only */
+#define MSG_DISPLAY_CTRL 0xf /* H3100 only */
+
+/* state of receiver parser */
+enum rx_state {
+ STATE_SOF = 0, /* Next byte should be start of frame */
+ STATE_ID, /* Next byte is ID & message length */
+ STATE_DATA, /* Next byte is a data byte */
+ STATE_CHKSUM /* Next byte should be checksum */
+};
+
+/**
+ * struct ipaq_micro_txdev - TX state
+ * @len: length of message in TX buffer
+ * @index: current index into TX buffer
+ * @buf: TX buffer
+ */
+struct ipaq_micro_txdev {
+ u8 len;
+ u8 index;
+ u8 buf[TX_BUF_SIZE];
+};
+
+/**
+ * struct ipaq_micro_rxdev - RX state
+ * @state: context of RX state machine
+ * @chksum: calculated checksum
+ * @id: message ID from packet
+ * @len: RX buffer length
+ * @index: RX buffer index
+ * @buf: RX buffer
+ */
+struct ipaq_micro_rxdev {
+ enum rx_state state;
+ unsigned char chksum;
+ u8 id;
+ unsigned int len;
+ unsigned int index;
+ u8 buf[RX_BUF_SIZE];
+};
+
+/**
+ * struct ipaq_micro_msg - message to the iPAQ microcontroller
+ * @id: 4-bit ID of the message
+ * @tx_len: length of TX data
+ * @tx_data: TX data to send
+ * @rx_len: length of receieved RX data
+ * @rx_data: RX data to recieve
+ * @ack: a completion that will be completed when RX is complete
+ * @node: list node if message gets queued
+ */
+struct ipaq_micro_msg {
+ u8 id;
+ u8 tx_len;
+ u8 tx_data[TX_BUF_SIZE];
+ u8 rx_len;
+ u8 rx_data[RX_BUF_SIZE];
+ struct completion ack;
+ struct list_head node;
+};
+
+/**
+ * struct ipaq_micro - iPAQ microcontroller state
+ * @dev: corresponding platform device
+ * @base: virtual memory base for underlying serial device
+ * @sdlc: virtual memory base for Synchronous Data Link Controller
+ * @version: version string
+ * @tx: TX state
+ * @rx: RX state
+ * @lock: lock for this state container
+ * @msg: current message
+ * @queue: message queue
+ * @key: callback for asynchronous key events
+ * @key_data: data to pass along with key events
+ * @ts: callback for asynchronous touchscreen events
+ * @ts_data: data to pass along with key events
+ */
+struct ipaq_micro {
+ struct device *dev;
+ void __iomem *base;
+ void __iomem *sdlc;
+ char version[5];
+ struct ipaq_micro_txdev tx; /* transmit ISR state */
+ struct ipaq_micro_rxdev rx; /* receive ISR state */
+ spinlock_t lock;
+ struct ipaq_micro_msg *msg;
+ struct list_head queue;
+ void (*key) (void *data, int len, unsigned char *rxdata);
+ void *key_data;
+ void (*ts) (void *data, int len, unsigned char *rxdata);
+ void *ts_data;
+};
+
+extern int
+ipaq_micro_tx_msg(struct ipaq_micro *micro, struct ipaq_micro_msg *msg);
+
+static inline int
+ipaq_micro_tx_msg_sync(struct ipaq_micro *micro,
+ struct ipaq_micro_msg *msg)
+{
+ int ret;
+
+ init_completion(&msg->ack);
+ ret = ipaq_micro_tx_msg(micro, msg);
+ wait_for_completion(&msg->ack);
+
+ return ret;
+}
+
+static inline int
+ipaq_micro_tx_msg_async(struct ipaq_micro *micro,
+ struct ipaq_micro_msg *msg)
+{
+ init_completion(&msg->ack);
+ return ipaq_micro_tx_msg(micro, msg);
+}
+
+#endif /* _MFD_IPAQ_MICRO_H_ */
diff --git a/include/linux/mfd/janz.h b/include/linux/mfd/janz.h
new file mode 100644
index 000000000..e9994c469
--- /dev/null
+++ b/include/linux/mfd/janz.h
@@ -0,0 +1,54 @@
+/*
+ * Common Definitions for Janz MODULbus devices
+ *
+ * Copyright (c) 2010 Ira W. Snyder <iws@ovro.caltech.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef JANZ_H
+#define JANZ_H
+
+struct janz_platform_data {
+ /* MODULbus Module Number */
+ unsigned int modno;
+};
+
+/* PLX bridge chip onboard registers */
+struct janz_cmodio_onboard_regs {
+ u8 unused1;
+
+ /*
+ * Read access: interrupt status
+ * Write access: interrupt disable
+ */
+ u8 int_disable;
+ u8 unused2;
+
+ /*
+ * Read access: MODULbus number (hex switch)
+ * Write access: interrupt enable
+ */
+ u8 int_enable;
+ u8 unused3;
+
+ /* write-only */
+ u8 reset_assert;
+ u8 unused4;
+
+ /* write-only */
+ u8 reset_deassert;
+ u8 unused5;
+
+ /* read-write access to serial EEPROM */
+ u8 eep;
+ u8 unused6;
+
+ /* write-only access to EEPROM chip select */
+ u8 enid;
+};
+
+#endif /* JANZ_H */
diff --git a/include/linux/mfd/kempld.h b/include/linux/mfd/kempld.h
new file mode 100644
index 000000000..26e0b469e
--- /dev/null
+++ b/include/linux/mfd/kempld.h
@@ -0,0 +1,129 @@
+/*
+ * Kontron PLD driver definitions
+ *
+ * Copyright (c) 2010-2012 Kontron Europe GmbH
+ * Author: Michael Brunner <michael.brunner@kontron.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_MFD_KEMPLD_H_
+#define _LINUX_MFD_KEMPLD_H_
+
+/* kempld register definitions */
+#define KEMPLD_IOINDEX 0xa80
+#define KEMPLD_IODATA 0xa81
+#define KEMPLD_MUTEX_KEY 0x80
+#define KEMPLD_VERSION 0x00
+#define KEMPLD_VERSION_LSB 0x00
+#define KEMPLD_VERSION_MSB 0x01
+#define KEMPLD_VERSION_GET_MINOR(x) (x & 0x1f)
+#define KEMPLD_VERSION_GET_MAJOR(x) ((x >> 5) & 0x1f)
+#define KEMPLD_VERSION_GET_NUMBER(x) ((x >> 10) & 0xf)
+#define KEMPLD_VERSION_GET_TYPE(x) ((x >> 14) & 0x3)
+#define KEMPLD_BUILDNR 0x02
+#define KEMPLD_BUILDNR_LSB 0x02
+#define KEMPLD_BUILDNR_MSB 0x03
+#define KEMPLD_FEATURE 0x04
+#define KEMPLD_FEATURE_LSB 0x04
+#define KEMPLD_FEATURE_MSB 0x05
+#define KEMPLD_FEATURE_BIT_I2C (1 << 0)
+#define KEMPLD_FEATURE_BIT_WATCHDOG (1 << 1)
+#define KEMPLD_FEATURE_BIT_GPIO (1 << 2)
+#define KEMPLD_FEATURE_MASK_UART (7 << 3)
+#define KEMPLD_FEATURE_BIT_NMI (1 << 8)
+#define KEMPLD_FEATURE_BIT_SMI (1 << 9)
+#define KEMPLD_FEATURE_BIT_SCI (1 << 10)
+#define KEMPLD_SPEC 0x06
+#define KEMPLD_SPEC_GET_MINOR(x) (x & 0x0f)
+#define KEMPLD_SPEC_GET_MAJOR(x) ((x >> 4) & 0x0f)
+#define KEMPLD_IRQ_GPIO 0x35
+#define KEMPLD_IRQ_I2C 0x36
+#define KEMPLD_CFG 0x37
+#define KEMPLD_CFG_GPIO_I2C_MUX (1 << 0)
+#define KEMPLD_CFG_BIOS_WP (1 << 7)
+
+#define KEMPLD_CLK 33333333
+
+#define KEMPLD_TYPE_RELEASE 0x0
+#define KEMPLD_TYPE_DEBUG 0x1
+#define KEMPLD_TYPE_CUSTOM 0x2
+
+#define KEMPLD_VERSION_LEN 10
+
+/**
+ * struct kempld_info - PLD device information structure
+ * @major: PLD major revision
+ * @minor: PLD minor revision
+ * @buildnr: PLD build number
+ * @number: PLD board specific index
+ * @type: PLD type
+ * @spec_major: PLD FW specification major revision
+ * @spec_minor: PLD FW specification minor revision
+ * @version: PLD version string
+ */
+struct kempld_info {
+ unsigned int major;
+ unsigned int minor;
+ unsigned int buildnr;
+ unsigned int number;
+ unsigned int type;
+ unsigned int spec_major;
+ unsigned int spec_minor;
+ char version[KEMPLD_VERSION_LEN];
+};
+
+/**
+ * struct kempld_device_data - Internal representation of the PLD device
+ * @io_base: Pointer to the IO memory
+ * @io_index: Pointer to the IO index register
+ * @io_data: Pointer to the IO data register
+ * @pld_clock: PLD clock frequency
+ * @feature_mask: PLD feature mask
+ * @dev: Pointer to kernel device structure
+ * @info: KEMPLD info structure
+ * @lock: PLD mutex
+ */
+struct kempld_device_data {
+ void __iomem *io_base;
+ void __iomem *io_index;
+ void __iomem *io_data;
+ u32 pld_clock;
+ u32 feature_mask;
+ struct device *dev;
+ struct kempld_info info;
+ struct mutex lock;
+};
+
+/**
+ * struct kempld_platform_data - PLD hardware configuration structure
+ * @pld_clock: PLD clock frequency
+ * @gpio_base GPIO base pin number
+ * @ioresource: IO addresses of the PLD
+ * @get_mutex: PLD specific get_mutex callback
+ * @release_mutex: PLD specific release_mutex callback
+ * @get_info: PLD specific get_info callback
+ * @register_cells: PLD specific register_cells callback
+ */
+struct kempld_platform_data {
+ u32 pld_clock;
+ int gpio_base;
+ struct resource *ioresource;
+ void (*get_hardware_mutex) (struct kempld_device_data *);
+ void (*release_hardware_mutex) (struct kempld_device_data *);
+ int (*get_info) (struct kempld_device_data *);
+ int (*register_cells) (struct kempld_device_data *);
+};
+
+extern void kempld_get_mutex(struct kempld_device_data *pld);
+extern void kempld_release_mutex(struct kempld_device_data *pld);
+extern u8 kempld_read8(struct kempld_device_data *pld, u8 index);
+extern void kempld_write8(struct kempld_device_data *pld, u8 index, u8 data);
+extern u16 kempld_read16(struct kempld_device_data *pld, u8 index);
+extern void kempld_write16(struct kempld_device_data *pld, u8 index, u16 data);
+extern u32 kempld_read32(struct kempld_device_data *pld, u8 index);
+extern void kempld_write32(struct kempld_device_data *pld, u8 index, u32 data);
+
+#endif /* _LINUX_MFD_KEMPLD_H_ */
diff --git a/include/linux/mfd/lm3533.h b/include/linux/mfd/lm3533.h
new file mode 100644
index 000000000..594bc591f
--- /dev/null
+++ b/include/linux/mfd/lm3533.h
@@ -0,0 +1,104 @@
+/*
+ * lm3533.h -- LM3533 interface
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_MFD_LM3533_H
+#define __LINUX_MFD_LM3533_H
+
+#define LM3533_ATTR_RO(_name) \
+ DEVICE_ATTR(_name, S_IRUGO, show_##_name, NULL)
+#define LM3533_ATTR_RW(_name) \
+ DEVICE_ATTR(_name, S_IRUGO | S_IWUSR , show_##_name, store_##_name)
+
+struct device;
+struct regmap;
+
+struct lm3533 {
+ struct device *dev;
+
+ struct regmap *regmap;
+
+ int gpio_hwen;
+ int irq;
+
+ unsigned have_als:1;
+ unsigned have_backlights:1;
+ unsigned have_leds:1;
+};
+
+struct lm3533_ctrlbank {
+ struct lm3533 *lm3533;
+ struct device *dev;
+ int id;
+};
+
+struct lm3533_als_platform_data {
+ unsigned pwm_mode:1; /* PWM input mode (default analog) */
+ u8 r_select; /* 1 - 127 (ignored in PWM-mode) */
+};
+
+struct lm3533_bl_platform_data {
+ char *name;
+ u16 max_current; /* 5000 - 29800 uA (800 uA step) */
+ u8 default_brightness; /* 0 - 255 */
+ u8 pwm; /* 0 - 0x3f */
+};
+
+struct lm3533_led_platform_data {
+ char *name;
+ const char *default_trigger;
+ u16 max_current; /* 5000 - 29800 uA (800 uA step) */
+ u8 pwm; /* 0 - 0x3f */
+};
+
+enum lm3533_boost_freq {
+ LM3533_BOOST_FREQ_500KHZ,
+ LM3533_BOOST_FREQ_1000KHZ,
+};
+
+enum lm3533_boost_ovp {
+ LM3533_BOOST_OVP_16V,
+ LM3533_BOOST_OVP_24V,
+ LM3533_BOOST_OVP_32V,
+ LM3533_BOOST_OVP_40V,
+};
+
+struct lm3533_platform_data {
+ int gpio_hwen;
+
+ enum lm3533_boost_ovp boost_ovp;
+ enum lm3533_boost_freq boost_freq;
+
+ struct lm3533_als_platform_data *als;
+
+ struct lm3533_bl_platform_data *backlights;
+ int num_backlights;
+
+ struct lm3533_led_platform_data *leds;
+ int num_leds;
+};
+
+extern int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb);
+extern int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb);
+
+extern int lm3533_ctrlbank_set_brightness(struct lm3533_ctrlbank *cb, u8 val);
+extern int lm3533_ctrlbank_get_brightness(struct lm3533_ctrlbank *cb, u8 *val);
+extern int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb,
+ u16 imax);
+extern int lm3533_ctrlbank_set_pwm(struct lm3533_ctrlbank *cb, u8 val);
+extern int lm3533_ctrlbank_get_pwm(struct lm3533_ctrlbank *cb, u8 *val);
+
+extern int lm3533_read(struct lm3533 *lm3533, u8 reg, u8 *val);
+extern int lm3533_write(struct lm3533 *lm3533, u8 reg, u8 val);
+extern int lm3533_update(struct lm3533 *lm3533, u8 reg, u8 val, u8 mask);
+
+#endif /* __LINUX_MFD_LM3533_H */
diff --git a/include/linux/mfd/lp3943.h b/include/linux/mfd/lp3943.h
new file mode 100644
index 000000000..3490db782
--- /dev/null
+++ b/include/linux/mfd/lp3943.h
@@ -0,0 +1,114 @@
+/*
+ * TI/National Semiconductor LP3943 Device
+ *
+ * Copyright 2013 Texas Instruments
+ *
+ * Author: Milo Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __MFD_LP3943_H__
+#define __MFD_LP3943_H__
+
+#include <linux/gpio.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+
+/* Registers */
+#define LP3943_REG_GPIO_A 0x00
+#define LP3943_REG_GPIO_B 0x01
+#define LP3943_REG_PRESCALE0 0x02
+#define LP3943_REG_PWM0 0x03
+#define LP3943_REG_PRESCALE1 0x04
+#define LP3943_REG_PWM1 0x05
+#define LP3943_REG_MUX0 0x06
+#define LP3943_REG_MUX1 0x07
+#define LP3943_REG_MUX2 0x08
+#define LP3943_REG_MUX3 0x09
+
+/* Bit description for LP3943_REG_MUX0 ~ 3 */
+#define LP3943_GPIO_IN 0x00
+#define LP3943_GPIO_OUT_HIGH 0x00
+#define LP3943_GPIO_OUT_LOW 0x01
+#define LP3943_DIM_PWM0 0x02
+#define LP3943_DIM_PWM1 0x03
+
+#define LP3943_NUM_PWMS 2
+
+enum lp3943_pwm_output {
+ LP3943_PWM_OUT0,
+ LP3943_PWM_OUT1,
+ LP3943_PWM_OUT2,
+ LP3943_PWM_OUT3,
+ LP3943_PWM_OUT4,
+ LP3943_PWM_OUT5,
+ LP3943_PWM_OUT6,
+ LP3943_PWM_OUT7,
+ LP3943_PWM_OUT8,
+ LP3943_PWM_OUT9,
+ LP3943_PWM_OUT10,
+ LP3943_PWM_OUT11,
+ LP3943_PWM_OUT12,
+ LP3943_PWM_OUT13,
+ LP3943_PWM_OUT14,
+ LP3943_PWM_OUT15,
+};
+
+/*
+ * struct lp3943_pwm_map
+ * @output: Output pins which are mapped to each PWM channel
+ * @num_outputs: Number of outputs
+ */
+struct lp3943_pwm_map {
+ enum lp3943_pwm_output *output;
+ int num_outputs;
+};
+
+/*
+ * struct lp3943_platform_data
+ * @pwms: Output channel definitions for PWM channel 0 and 1
+ */
+struct lp3943_platform_data {
+ struct lp3943_pwm_map *pwms[LP3943_NUM_PWMS];
+};
+
+/*
+ * struct lp3943_reg_cfg
+ * @reg: Register address
+ * @mask: Register bit mask to be updated
+ * @shift: Register bit shift
+ */
+struct lp3943_reg_cfg {
+ u8 reg;
+ u8 mask;
+ u8 shift;
+};
+
+/*
+ * struct lp3943
+ * @dev: Parent device pointer
+ * @regmap: Used for I2C communication on accessing registers
+ * @pdata: LP3943 platform specific data
+ * @mux_cfg: Register configuration for pin MUX
+ * @pin_used: Bit mask for output pin used.
+ * This bitmask is used for pin assignment management.
+ * 1 = pin used, 0 = available.
+ * Only LSB 16 bits are used, but it is unsigned long type
+ * for atomic bitwise operations.
+ */
+struct lp3943 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct lp3943_platform_data *pdata;
+ const struct lp3943_reg_cfg *mux_cfg;
+ unsigned long pin_used;
+};
+
+int lp3943_read_byte(struct lp3943 *lp3943, u8 reg, u8 *read);
+int lp3943_write_byte(struct lp3943 *lp3943, u8 reg, u8 data);
+int lp3943_update_bits(struct lp3943 *lp3943, u8 reg, u8 mask, u8 data);
+#endif
diff --git a/include/linux/mfd/lp8788-isink.h b/include/linux/mfd/lp8788-isink.h
new file mode 100644
index 000000000..f38262d21
--- /dev/null
+++ b/include/linux/mfd/lp8788-isink.h
@@ -0,0 +1,52 @@
+/*
+ * TI LP8788 MFD - common definitions for current sinks
+ *
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ISINK_LP8788_H__
+#define __ISINK_LP8788_H__
+
+/* register address */
+#define LP8788_ISINK_CTRL 0x99
+#define LP8788_ISINK12_IOUT 0x9A
+#define LP8788_ISINK3_IOUT 0x9B
+#define LP8788_ISINK1_PWM 0x9C
+#define LP8788_ISINK2_PWM 0x9D
+#define LP8788_ISINK3_PWM 0x9E
+
+/* mask bits */
+#define LP8788_ISINK1_IOUT_M 0x0F /* Addr 9Ah */
+#define LP8788_ISINK2_IOUT_M 0xF0
+#define LP8788_ISINK3_IOUT_M 0x0F /* Addr 9Bh */
+
+/* 6 bits used for PWM code : Addr 9C ~ 9Eh */
+#define LP8788_ISINK_MAX_PWM 63
+#define LP8788_ISINK_SCALE_OFFSET 3
+
+static const u8 lp8788_iout_addr[] = {
+ LP8788_ISINK12_IOUT,
+ LP8788_ISINK12_IOUT,
+ LP8788_ISINK3_IOUT,
+};
+
+static const u8 lp8788_iout_mask[] = {
+ LP8788_ISINK1_IOUT_M,
+ LP8788_ISINK2_IOUT_M,
+ LP8788_ISINK3_IOUT_M,
+};
+
+static const u8 lp8788_pwm_addr[] = {
+ LP8788_ISINK1_PWM,
+ LP8788_ISINK2_PWM,
+ LP8788_ISINK3_PWM,
+};
+
+#endif
diff --git a/include/linux/mfd/lp8788.h b/include/linux/mfd/lp8788.h
new file mode 100644
index 000000000..786bf6679
--- /dev/null
+++ b/include/linux/mfd/lp8788.h
@@ -0,0 +1,350 @@
+/*
+ * TI LP8788 MFD Device
+ *
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __MFD_LP8788_H__
+#define __MFD_LP8788_H__
+
+#include <linux/gpio.h>
+#include <linux/irqdomain.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+
+#define LP8788_DEV_BUCK "lp8788-buck"
+#define LP8788_DEV_DLDO "lp8788-dldo"
+#define LP8788_DEV_ALDO "lp8788-aldo"
+#define LP8788_DEV_CHARGER "lp8788-charger"
+#define LP8788_DEV_RTC "lp8788-rtc"
+#define LP8788_DEV_BACKLIGHT "lp8788-backlight"
+#define LP8788_DEV_VIBRATOR "lp8788-vibrator"
+#define LP8788_DEV_KEYLED "lp8788-keyled"
+#define LP8788_DEV_ADC "lp8788-adc"
+
+#define LP8788_NUM_BUCKS 4
+#define LP8788_NUM_DLDOS 12
+#define LP8788_NUM_ALDOS 10
+#define LP8788_NUM_BUCK2_DVS 2
+
+#define LP8788_CHG_IRQ "CHG_IRQ"
+#define LP8788_PRSW_IRQ "PRSW_IRQ"
+#define LP8788_BATT_IRQ "BATT_IRQ"
+#define LP8788_ALM_IRQ "ALARM_IRQ"
+
+enum lp8788_int_id {
+ /* interrup register 1 : Addr 00h */
+ LP8788_INT_TSDL,
+ LP8788_INT_TSDH,
+ LP8788_INT_UVLO,
+ LP8788_INT_FLAGMON,
+ LP8788_INT_PWRON_TIME,
+ LP8788_INT_PWRON,
+ LP8788_INT_COMP1,
+ LP8788_INT_COMP2,
+
+ /* interrupt register 2 : Addr 01h */
+ LP8788_INT_CHG_INPUT_STATE,
+ LP8788_INT_CHG_STATE,
+ LP8788_INT_EOC,
+ LP8788_INT_CHG_RESTART,
+ LP8788_INT_RESTART_TIMEOUT,
+ LP8788_INT_FULLCHG_TIMEOUT,
+ LP8788_INT_PRECHG_TIMEOUT,
+
+ /* interrupt register 3 : Addr 02h */
+ LP8788_INT_RTC_ALARM1 = 17,
+ LP8788_INT_RTC_ALARM2,
+ LP8788_INT_ENTER_SYS_SUPPORT,
+ LP8788_INT_EXIT_SYS_SUPPORT,
+ LP8788_INT_BATT_LOW,
+ LP8788_INT_NO_BATT,
+
+ LP8788_INT_MAX = 24,
+};
+
+enum lp8788_dvs_sel {
+ DVS_SEL_V0,
+ DVS_SEL_V1,
+ DVS_SEL_V2,
+ DVS_SEL_V3,
+};
+
+enum lp8788_ext_ldo_en_id {
+ EN_ALDO1,
+ EN_ALDO234,
+ EN_ALDO5,
+ EN_ALDO7,
+ EN_DLDO7,
+ EN_DLDO911,
+ EN_LDOS_MAX,
+};
+
+enum lp8788_charger_event {
+ NO_CHARGER,
+ CHARGER_DETECTED,
+};
+
+enum lp8788_bl_ctrl_mode {
+ LP8788_BL_REGISTER_ONLY,
+ LP8788_BL_COMB_PWM_BASED, /* PWM + I2C, changed by PWM input */
+ LP8788_BL_COMB_REGISTER_BASED, /* PWM + I2C, changed by I2C */
+};
+
+enum lp8788_bl_dim_mode {
+ LP8788_DIM_EXPONENTIAL,
+ LP8788_DIM_LINEAR,
+};
+
+enum lp8788_bl_full_scale_current {
+ LP8788_FULLSCALE_5000uA,
+ LP8788_FULLSCALE_8500uA,
+ LP8788_FULLSCALE_1200uA,
+ LP8788_FULLSCALE_1550uA,
+ LP8788_FULLSCALE_1900uA,
+ LP8788_FULLSCALE_2250uA,
+ LP8788_FULLSCALE_2600uA,
+ LP8788_FULLSCALE_2950uA,
+};
+
+enum lp8788_bl_ramp_step {
+ LP8788_RAMP_8us,
+ LP8788_RAMP_1024us,
+ LP8788_RAMP_2048us,
+ LP8788_RAMP_4096us,
+ LP8788_RAMP_8192us,
+ LP8788_RAMP_16384us,
+ LP8788_RAMP_32768us,
+ LP8788_RAMP_65538us,
+};
+
+enum lp8788_isink_scale {
+ LP8788_ISINK_SCALE_100mA,
+ LP8788_ISINK_SCALE_120mA,
+};
+
+enum lp8788_isink_number {
+ LP8788_ISINK_1,
+ LP8788_ISINK_2,
+ LP8788_ISINK_3,
+};
+
+enum lp8788_alarm_sel {
+ LP8788_ALARM_1,
+ LP8788_ALARM_2,
+ LP8788_ALARM_MAX,
+};
+
+enum lp8788_adc_id {
+ LPADC_VBATT_5P5,
+ LPADC_VIN_CHG,
+ LPADC_IBATT,
+ LPADC_IC_TEMP,
+ LPADC_VBATT_6P0,
+ LPADC_VBATT_5P0,
+ LPADC_ADC1,
+ LPADC_ADC2,
+ LPADC_VDD,
+ LPADC_VCOIN,
+ LPADC_VDD_LDO,
+ LPADC_ADC3,
+ LPADC_ADC4,
+ LPADC_MAX,
+};
+
+struct lp8788;
+
+/*
+ * lp8788_buck1_dvs
+ * @gpio : gpio pin number for dvs control
+ * @vsel : dvs selector for buck v1 register
+ */
+struct lp8788_buck1_dvs {
+ int gpio;
+ enum lp8788_dvs_sel vsel;
+};
+
+/*
+ * lp8788_buck2_dvs
+ * @gpio : two gpio pin numbers are used for dvs
+ * @vsel : dvs selector for buck v2 register
+ */
+struct lp8788_buck2_dvs {
+ int gpio[LP8788_NUM_BUCK2_DVS];
+ enum lp8788_dvs_sel vsel;
+};
+
+/*
+ * struct lp8788_ldo_enable_pin
+ *
+ * Basically, all LDOs are enabled through the I2C commands.
+ * But ALDO 1 ~ 5, 7, DLDO 7, 9, 11 can be enabled by external gpio pins.
+ *
+ * @gpio : gpio number which is used for enabling ldos
+ * @init_state : initial gpio state (ex. GPIOF_OUT_INIT_LOW)
+ */
+struct lp8788_ldo_enable_pin {
+ int gpio;
+ int init_state;
+};
+
+/*
+ * struct lp8788_chg_param
+ * @addr : charging control register address (range : 0x11 ~ 0x1C)
+ * @val : charging parameter value
+ */
+struct lp8788_chg_param {
+ u8 addr;
+ u8 val;
+};
+
+/*
+ * struct lp8788_charger_platform_data
+ * @adc_vbatt : adc channel name for battery voltage
+ * @adc_batt_temp : adc channel name for battery temperature
+ * @max_vbatt_mv : used for calculating battery capacity
+ * @chg_params : initial charging parameters
+ * @num_chg_params : numbers of charging parameters
+ * @charger_event : the charger event can be reported to the platform side
+ */
+struct lp8788_charger_platform_data {
+ const char *adc_vbatt;
+ const char *adc_batt_temp;
+ unsigned int max_vbatt_mv;
+ struct lp8788_chg_param *chg_params;
+ int num_chg_params;
+ void (*charger_event) (struct lp8788 *lp,
+ enum lp8788_charger_event event);
+};
+
+/*
+ * struct lp8788_backlight_platform_data
+ * @name : backlight driver name. (default: "lcd-backlight")
+ * @initial_brightness : initial value of backlight brightness
+ * @bl_mode : brightness control by pwm or lp8788 register
+ * @dim_mode : dimming mode selection
+ * @full_scale : full scale current setting
+ * @rise_time : brightness ramp up step time
+ * @fall_time : brightness ramp down step time
+ * @pwm_pol : pwm polarity setting when bl_mode is pwm based
+ * @period_ns : platform specific pwm period value. unit is nano.
+ Only valid when bl_mode is LP8788_BL_COMB_PWM_BASED
+ */
+struct lp8788_backlight_platform_data {
+ char *name;
+ int initial_brightness;
+ enum lp8788_bl_ctrl_mode bl_mode;
+ enum lp8788_bl_dim_mode dim_mode;
+ enum lp8788_bl_full_scale_current full_scale;
+ enum lp8788_bl_ramp_step rise_time;
+ enum lp8788_bl_ramp_step fall_time;
+ enum pwm_polarity pwm_pol;
+ unsigned int period_ns;
+};
+
+/*
+ * struct lp8788_led_platform_data
+ * @name : led driver name. (default: "keyboard-backlight")
+ * @scale : current scale
+ * @num : current sink number
+ * @iout_code : current output value (Addr 9Ah ~ 9Bh)
+ */
+struct lp8788_led_platform_data {
+ char *name;
+ enum lp8788_isink_scale scale;
+ enum lp8788_isink_number num;
+ int iout_code;
+};
+
+/*
+ * struct lp8788_vib_platform_data
+ * @name : vibrator driver name
+ * @scale : current scale
+ * @num : current sink number
+ * @iout_code : current output value (Addr 9Ah ~ 9Bh)
+ * @pwm_code : PWM code value (Addr 9Ch ~ 9Eh)
+ */
+struct lp8788_vib_platform_data {
+ char *name;
+ enum lp8788_isink_scale scale;
+ enum lp8788_isink_number num;
+ int iout_code;
+ int pwm_code;
+};
+
+/*
+ * struct lp8788_platform_data
+ * @init_func : used for initializing registers
+ * before mfd driver is registered
+ * @buck_data : regulator initial data for buck
+ * @dldo_data : regulator initial data for digital ldo
+ * @aldo_data : regulator initial data for analog ldo
+ * @buck1_dvs : gpio configurations for buck1 dvs
+ * @buck2_dvs : gpio configurations for buck2 dvs
+ * @ldo_pin : gpio configurations for enabling LDOs
+ * @chg_pdata : platform data for charger driver
+ * @alarm_sel : rtc alarm selection (1 or 2)
+ * @bl_pdata : configurable data for backlight driver
+ * @led_pdata : configurable data for led driver
+ * @vib_pdata : configurable data for vibrator driver
+ * @adc_pdata : iio map data for adc driver
+ */
+struct lp8788_platform_data {
+ /* general system information */
+ int (*init_func) (struct lp8788 *lp);
+
+ /* regulators */
+ struct regulator_init_data *buck_data[LP8788_NUM_BUCKS];
+ struct regulator_init_data *dldo_data[LP8788_NUM_DLDOS];
+ struct regulator_init_data *aldo_data[LP8788_NUM_ALDOS];
+ struct lp8788_buck1_dvs *buck1_dvs;
+ struct lp8788_buck2_dvs *buck2_dvs;
+ struct lp8788_ldo_enable_pin *ldo_pin[EN_LDOS_MAX];
+
+ /* charger */
+ struct lp8788_charger_platform_data *chg_pdata;
+
+ /* rtc alarm */
+ enum lp8788_alarm_sel alarm_sel;
+
+ /* backlight */
+ struct lp8788_backlight_platform_data *bl_pdata;
+
+ /* current sinks */
+ struct lp8788_led_platform_data *led_pdata;
+ struct lp8788_vib_platform_data *vib_pdata;
+
+ /* adc iio map data */
+ struct iio_map *adc_pdata;
+};
+
+/*
+ * struct lp8788
+ * @dev : parent device pointer
+ * @regmap : used for i2c communcation on accessing registers
+ * @irqdm : interrupt domain for handling nested interrupt
+ * @irq : pin number of IRQ_N
+ * @pdata : lp8788 platform specific data
+ */
+struct lp8788 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct irq_domain *irqdm;
+ int irq;
+ struct lp8788_platform_data *pdata;
+};
+
+int lp8788_irq_init(struct lp8788 *lp, int chip_irq);
+void lp8788_irq_exit(struct lp8788 *lp);
+int lp8788_read_byte(struct lp8788 *lp, u8 reg, u8 *data);
+int lp8788_read_multi_bytes(struct lp8788 *lp, u8 reg, u8 *data, size_t count);
+int lp8788_write_byte(struct lp8788 *lp, u8 reg, u8 data);
+int lp8788_update_bits(struct lp8788 *lp, u8 reg, u8 mask, u8 data);
+#endif
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
new file mode 100644
index 000000000..8feac782f
--- /dev/null
+++ b/include/linux/mfd/lpc_ich.h
@@ -0,0 +1,52 @@
+/*
+ * linux/drivers/mfd/lpc_ich.h
+ *
+ * Copyright (c) 2012 Extreme Engineering Solution, Inc.
+ * Author: Aaron Sierra <asierra@xes-inc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef LPC_ICH_H
+#define LPC_ICH_H
+
+/* Watchdog resources */
+#define ICH_RES_IO_TCO 0
+#define ICH_RES_IO_SMI 1
+#define ICH_RES_MEM_OFF 2
+#define ICH_RES_MEM_GCS_PMC 0
+
+/* GPIO resources */
+#define ICH_RES_GPIO 0
+#define ICH_RES_GPE0 1
+
+/* GPIO compatibility */
+enum {
+ ICH_I3100_GPIO,
+ ICH_V5_GPIO,
+ ICH_V6_GPIO,
+ ICH_V7_GPIO,
+ ICH_V9_GPIO,
+ ICH_V10CORP_GPIO,
+ ICH_V10CONS_GPIO,
+ AVOTON_GPIO,
+};
+
+struct lpc_ich_info {
+ char name[32];
+ unsigned int iTCO_version;
+ unsigned int gpio_version;
+ u8 use_gpio;
+};
+
+#endif
diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h
new file mode 100644
index 000000000..f01c1fae4
--- /dev/null
+++ b/include/linux/mfd/max14577-private.h
@@ -0,0 +1,485 @@
+/*
+ * max14577-private.h - Common API for the Maxim 14577/77836 internal sub chip
+ *
+ * Copyright (C) 2014 Samsung Electrnoics
+ * Chanwoo Choi <cw00.choi@samsung.com>
+ * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MAX14577_PRIVATE_H__
+#define __MAX14577_PRIVATE_H__
+
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+#define I2C_ADDR_PMIC (0x46 >> 1)
+#define I2C_ADDR_MUIC (0x4A >> 1)
+#define I2C_ADDR_FG (0x6C >> 1)
+
+enum maxim_device_type {
+ MAXIM_DEVICE_TYPE_UNKNOWN = 0,
+ MAXIM_DEVICE_TYPE_MAX14577,
+ MAXIM_DEVICE_TYPE_MAX77836,
+
+ MAXIM_DEVICE_TYPE_NUM,
+};
+
+/* Slave addr = 0x4A: MUIC and Charger */
+enum max14577_reg {
+ MAX14577_REG_DEVICEID = 0x00,
+ MAX14577_REG_INT1 = 0x01,
+ MAX14577_REG_INT2 = 0x02,
+ MAX14577_REG_INT3 = 0x03,
+ MAX14577_REG_STATUS1 = 0x04,
+ MAX14577_REG_STATUS2 = 0x05,
+ MAX14577_REG_STATUS3 = 0x06,
+ MAX14577_REG_INTMASK1 = 0x07,
+ MAX14577_REG_INTMASK2 = 0x08,
+ MAX14577_REG_INTMASK3 = 0x09,
+ MAX14577_REG_CDETCTRL1 = 0x0A,
+ MAX14577_REG_RFU = 0x0B,
+ MAX14577_REG_CONTROL1 = 0x0C,
+ MAX14577_REG_CONTROL2 = 0x0D,
+ MAX14577_REG_CONTROL3 = 0x0E,
+ MAX14577_REG_CHGCTRL1 = 0x0F,
+ MAX14577_REG_CHGCTRL2 = 0x10,
+ MAX14577_REG_CHGCTRL3 = 0x11,
+ MAX14577_REG_CHGCTRL4 = 0x12,
+ MAX14577_REG_CHGCTRL5 = 0x13,
+ MAX14577_REG_CHGCTRL6 = 0x14,
+ MAX14577_REG_CHGCTRL7 = 0x15,
+
+ MAX14577_REG_END,
+};
+
+/* Slave addr = 0x4A: MUIC */
+enum max14577_muic_reg {
+ MAX14577_MUIC_REG_STATUS1 = 0x04,
+ MAX14577_MUIC_REG_STATUS2 = 0x05,
+ MAX14577_MUIC_REG_CONTROL1 = 0x0C,
+ MAX14577_MUIC_REG_CONTROL3 = 0x0E,
+
+ MAX14577_MUIC_REG_END,
+};
+
+/*
+ * Combined charger types for max14577 and max77836.
+ *
+ * On max14577 three lower bits map to STATUS2/CHGTYP field.
+ * However the max77836 has different two last values of STATUS2/CHGTYP.
+ * To indicate the difference enum has two additional values for max77836.
+ * These values are just a register value bitwise OR with 0x8.
+ */
+enum max14577_muic_charger_type {
+ MAX14577_CHARGER_TYPE_NONE = 0x0,
+ MAX14577_CHARGER_TYPE_USB = 0x1,
+ MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT = 0x2,
+ MAX14577_CHARGER_TYPE_DEDICATED_CHG = 0x3,
+ MAX14577_CHARGER_TYPE_SPECIAL_500MA = 0x4,
+ /* Special 1A or 2A charger */
+ MAX14577_CHARGER_TYPE_SPECIAL_1A = 0x5,
+ /* max14577: reserved, used on max77836 */
+ MAX14577_CHARGER_TYPE_RESERVED = 0x6,
+ /* max14577: dead-battery charing with maximum current 100mA */
+ MAX14577_CHARGER_TYPE_DEAD_BATTERY = 0x7,
+ /*
+ * max77836: special charger (bias on D+/D-),
+ * matches register value of 0x6
+ */
+ MAX77836_CHARGER_TYPE_SPECIAL_BIAS = 0xe,
+ /* max77836: reserved, register value 0x7 */
+ MAX77836_CHARGER_TYPE_RESERVED = 0xf,
+};
+
+/* MAX14577 interrupts */
+#define MAX14577_INT1_ADC_MASK BIT(0)
+#define MAX14577_INT1_ADCLOW_MASK BIT(1)
+#define MAX14577_INT1_ADCERR_MASK BIT(2)
+#define MAX77836_INT1_ADC1K_MASK BIT(3)
+
+#define MAX14577_INT2_CHGTYP_MASK BIT(0)
+#define MAX14577_INT2_CHGDETRUN_MASK BIT(1)
+#define MAX14577_INT2_DCDTMR_MASK BIT(2)
+#define MAX14577_INT2_DBCHG_MASK BIT(3)
+#define MAX14577_INT2_VBVOLT_MASK BIT(4)
+#define MAX77836_INT2_VIDRM_MASK BIT(5)
+
+#define MAX14577_INT3_EOC_MASK BIT(0)
+#define MAX14577_INT3_CGMBC_MASK BIT(1)
+#define MAX14577_INT3_OVP_MASK BIT(2)
+#define MAX14577_INT3_MBCCHGERR_MASK BIT(3)
+
+/* MAX14577 DEVICE ID register */
+#define DEVID_VENDORID_SHIFT 0
+#define DEVID_DEVICEID_SHIFT 3
+#define DEVID_VENDORID_MASK (0x07 << DEVID_VENDORID_SHIFT)
+#define DEVID_DEVICEID_MASK (0x1f << DEVID_DEVICEID_SHIFT)
+
+/* MAX14577 STATUS1 register */
+#define STATUS1_ADC_SHIFT 0
+#define STATUS1_ADCLOW_SHIFT 5
+#define STATUS1_ADCERR_SHIFT 6
+#define MAX77836_STATUS1_ADC1K_SHIFT 7
+#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
+#define STATUS1_ADCLOW_MASK BIT(STATUS1_ADCLOW_SHIFT)
+#define STATUS1_ADCERR_MASK BIT(STATUS1_ADCERR_SHIFT)
+#define MAX77836_STATUS1_ADC1K_MASK BIT(MAX77836_STATUS1_ADC1K_SHIFT)
+
+/* MAX14577 STATUS2 register */
+#define STATUS2_CHGTYP_SHIFT 0
+#define STATUS2_CHGDETRUN_SHIFT 3
+#define STATUS2_DCDTMR_SHIFT 4
+#define MAX14577_STATUS2_DBCHG_SHIFT 5
+#define MAX77836_STATUS2_DXOVP_SHIFT 5
+#define STATUS2_VBVOLT_SHIFT 6
+#define MAX77836_STATUS2_VIDRM_SHIFT 7
+#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
+#define STATUS2_CHGDETRUN_MASK BIT(STATUS2_CHGDETRUN_SHIFT)
+#define STATUS2_DCDTMR_MASK BIT(STATUS2_DCDTMR_SHIFT)
+#define MAX14577_STATUS2_DBCHG_MASK BIT(MAX14577_STATUS2_DBCHG_SHIFT)
+#define MAX77836_STATUS2_DXOVP_MASK BIT(MAX77836_STATUS2_DXOVP_SHIFT)
+#define STATUS2_VBVOLT_MASK BIT(STATUS2_VBVOLT_SHIFT)
+#define MAX77836_STATUS2_VIDRM_MASK BIT(MAX77836_STATUS2_VIDRM_SHIFT)
+
+/* MAX14577 CONTROL1 register */
+#define COMN1SW_SHIFT 0
+#define COMP2SW_SHIFT 3
+#define MICEN_SHIFT 6
+#define IDBEN_SHIFT 7
+#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
+#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
+#define MICEN_MASK BIT(MICEN_SHIFT)
+#define IDBEN_MASK BIT(IDBEN_SHIFT)
+#define CLEAR_IDBEN_MICEN_MASK (COMN1SW_MASK | COMP2SW_MASK)
+#define CTRL1_SW_USB ((1 << COMP2SW_SHIFT) \
+ | (1 << COMN1SW_SHIFT))
+#define CTRL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
+ | (2 << COMN1SW_SHIFT))
+#define CTRL1_SW_UART ((3 << COMP2SW_SHIFT) \
+ | (3 << COMN1SW_SHIFT))
+#define CTRL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
+ | (0 << COMN1SW_SHIFT))
+
+/* MAX14577 CONTROL2 register */
+#define CTRL2_LOWPWR_SHIFT (0)
+#define CTRL2_ADCEN_SHIFT (1)
+#define CTRL2_CPEN_SHIFT (2)
+#define CTRL2_SFOUTASRT_SHIFT (3)
+#define CTRL2_SFOUTORD_SHIFT (4)
+#define CTRL2_ACCDET_SHIFT (5)
+#define CTRL2_USBCPINT_SHIFT (6)
+#define CTRL2_RCPS_SHIFT (7)
+#define CTRL2_LOWPWR_MASK BIT(CTRL2_LOWPWR_SHIFT)
+#define CTRL2_ADCEN_MASK BIT(CTRL2_ADCEN_SHIFT)
+#define CTRL2_CPEN_MASK BIT(CTRL2_CPEN_SHIFT)
+#define CTRL2_SFOUTASRT_MASK BIT(CTRL2_SFOUTASRT_SHIFT)
+#define CTRL2_SFOUTORD_MASK BIT(CTRL2_SFOUTORD_SHIFT)
+#define CTRL2_ACCDET_MASK BIT(CTRL2_ACCDET_SHIFT)
+#define CTRL2_USBCPINT_MASK BIT(CTRL2_USBCPINT_SHIFT)
+#define CTRL2_RCPS_MASK BIT(CTRL2_RCPS_SHIFT)
+
+#define CTRL2_CPEN1_LOWPWR0 ((1 << CTRL2_CPEN_SHIFT) | \
+ (0 << CTRL2_LOWPWR_SHIFT))
+#define CTRL2_CPEN0_LOWPWR1 ((0 << CTRL2_CPEN_SHIFT) | \
+ (1 << CTRL2_LOWPWR_SHIFT))
+
+/* MAX14577 CONTROL3 register */
+#define CTRL3_JIGSET_SHIFT 0
+#define CTRL3_BOOTSET_SHIFT 2
+#define CTRL3_ADCDBSET_SHIFT 4
+#define CTRL3_WBTH_SHIFT 6
+#define CTRL3_JIGSET_MASK (0x3 << CTRL3_JIGSET_SHIFT)
+#define CTRL3_BOOTSET_MASK (0x3 << CTRL3_BOOTSET_SHIFT)
+#define CTRL3_ADCDBSET_MASK (0x3 << CTRL3_ADCDBSET_SHIFT)
+#define CTRL3_WBTH_MASK (0x3 << CTRL3_WBTH_SHIFT)
+
+/* Slave addr = 0x4A: Charger */
+enum max14577_charger_reg {
+ MAX14577_CHG_REG_STATUS3 = 0x06,
+ MAX14577_CHG_REG_CHG_CTRL1 = 0x0F,
+ MAX14577_CHG_REG_CHG_CTRL2 = 0x10,
+ MAX14577_CHG_REG_CHG_CTRL3 = 0x11,
+ MAX14577_CHG_REG_CHG_CTRL4 = 0x12,
+ MAX14577_CHG_REG_CHG_CTRL5 = 0x13,
+ MAX14577_CHG_REG_CHG_CTRL6 = 0x14,
+ MAX14577_CHG_REG_CHG_CTRL7 = 0x15,
+
+ MAX14577_CHG_REG_END,
+};
+
+/* MAX14577 STATUS3 register */
+#define STATUS3_EOC_SHIFT 0
+#define STATUS3_CGMBC_SHIFT 1
+#define STATUS3_OVP_SHIFT 2
+#define STATUS3_MBCCHGERR_SHIFT 3
+#define STATUS3_EOC_MASK (0x1 << STATUS3_EOC_SHIFT)
+#define STATUS3_CGMBC_MASK (0x1 << STATUS3_CGMBC_SHIFT)
+#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
+#define STATUS3_MBCCHGERR_MASK (0x1 << STATUS3_MBCCHGERR_SHIFT)
+
+/* MAX14577 CDETCTRL1 register */
+#define CDETCTRL1_CHGDETEN_SHIFT 0
+#define CDETCTRL1_CHGTYPMAN_SHIFT 1
+#define CDETCTRL1_DCDEN_SHIFT 2
+#define CDETCTRL1_DCD2SCT_SHIFT 3
+#define MAX14577_CDETCTRL1_DCHKTM_SHIFT 4
+#define MAX77836_CDETCTRL1_CDLY_SHIFT 4
+#define MAX14577_CDETCTRL1_DBEXIT_SHIFT 5
+#define MAX77836_CDETCTRL1_DCDCPL_SHIFT 5
+#define CDETCTRL1_DBIDLE_SHIFT 6
+#define CDETCTRL1_CDPDET_SHIFT 7
+#define CDETCTRL1_CHGDETEN_MASK BIT(CDETCTRL1_CHGDETEN_SHIFT)
+#define CDETCTRL1_CHGTYPMAN_MASK BIT(CDETCTRL1_CHGTYPMAN_SHIFT)
+#define CDETCTRL1_DCDEN_MASK BIT(CDETCTRL1_DCDEN_SHIFT)
+#define CDETCTRL1_DCD2SCT_MASK BIT(CDETCTRL1_DCD2SCT_SHIFT)
+#define MAX14577_CDETCTRL1_DCHKTM_MASK BIT(MAX14577_CDETCTRL1_DCHKTM_SHIFT)
+#define MAX77836_CDETCTRL1_CDDLY_MASK BIT(MAX77836_CDETCTRL1_CDDLY_SHIFT)
+#define MAX14577_CDETCTRL1_DBEXIT_MASK BIT(MAX14577_CDETCTRL1_DBEXIT_SHIFT)
+#define MAX77836_CDETCTRL1_DCDCPL_MASK BIT(MAX77836_CDETCTRL1_DCDCPL_SHIFT)
+#define CDETCTRL1_DBIDLE_MASK BIT(CDETCTRL1_DBIDLE_SHIFT)
+#define CDETCTRL1_CDPDET_MASK BIT(CDETCTRL1_CDPDET_SHIFT)
+
+/* MAX14577 CHGCTRL1 register */
+#define CHGCTRL1_TCHW_SHIFT 4
+#define CHGCTRL1_TCHW_MASK (0x7 << CHGCTRL1_TCHW_SHIFT)
+
+/* MAX14577 CHGCTRL2 register */
+#define CHGCTRL2_MBCHOSTEN_SHIFT 6
+#define CHGCTRL2_MBCHOSTEN_MASK BIT(CHGCTRL2_MBCHOSTEN_SHIFT)
+#define CHGCTRL2_VCHGR_RC_SHIFT 7
+#define CHGCTRL2_VCHGR_RC_MASK BIT(CHGCTRL2_VCHGR_RC_SHIFT)
+
+/* MAX14577 CHGCTRL3 register */
+#define CHGCTRL3_MBCCVWRC_SHIFT 0
+#define CHGCTRL3_MBCCVWRC_MASK (0xf << CHGCTRL3_MBCCVWRC_SHIFT)
+
+/* MAX14577 CHGCTRL4 register */
+#define CHGCTRL4_MBCICHWRCH_SHIFT 0
+#define CHGCTRL4_MBCICHWRCH_MASK (0xf << CHGCTRL4_MBCICHWRCH_SHIFT)
+#define CHGCTRL4_MBCICHWRCL_SHIFT 4
+#define CHGCTRL4_MBCICHWRCL_MASK BIT(CHGCTRL4_MBCICHWRCL_SHIFT)
+
+/* MAX14577 CHGCTRL5 register */
+#define CHGCTRL5_EOCS_SHIFT 0
+#define CHGCTRL5_EOCS_MASK (0xf << CHGCTRL5_EOCS_SHIFT)
+
+/* MAX14577 CHGCTRL6 register */
+#define CHGCTRL6_AUTOSTOP_SHIFT 5
+#define CHGCTRL6_AUTOSTOP_MASK BIT(CHGCTRL6_AUTOSTOP_SHIFT)
+
+/* MAX14577 CHGCTRL7 register */
+#define CHGCTRL7_OTPCGHCVS_SHIFT 0
+#define CHGCTRL7_OTPCGHCVS_MASK (0x3 << CHGCTRL7_OTPCGHCVS_SHIFT)
+
+/* MAX14577 charger current limits (as in CHGCTRL4 register), uA */
+#define MAX14577_CHARGER_CURRENT_LIMIT_MIN 90000U
+#define MAX14577_CHARGER_CURRENT_LIMIT_HIGH_START 200000U
+#define MAX14577_CHARGER_CURRENT_LIMIT_HIGH_STEP 50000U
+#define MAX14577_CHARGER_CURRENT_LIMIT_MAX 950000U
+
+/* MAX77836 charger current limits (as in CHGCTRL4 register), uA */
+#define MAX77836_CHARGER_CURRENT_LIMIT_MIN 45000U
+#define MAX77836_CHARGER_CURRENT_LIMIT_HIGH_START 100000U
+#define MAX77836_CHARGER_CURRENT_LIMIT_HIGH_STEP 25000U
+#define MAX77836_CHARGER_CURRENT_LIMIT_MAX 475000U
+
+/*
+ * MAX14577 charger End-Of-Charge current limits
+ * (as in CHGCTRL5 register), uA
+ */
+#define MAX14577_CHARGER_EOC_CURRENT_LIMIT_MIN 50000U
+#define MAX14577_CHARGER_EOC_CURRENT_LIMIT_STEP 10000U
+#define MAX14577_CHARGER_EOC_CURRENT_LIMIT_MAX 200000U
+
+/*
+ * MAX14577/MAX77836 Battery Constant Voltage
+ * (as in CHGCTRL3 register), uV
+ */
+#define MAXIM_CHARGER_CONSTANT_VOLTAGE_MIN 4000000U
+#define MAXIM_CHARGER_CONSTANT_VOLTAGE_STEP 20000U
+#define MAXIM_CHARGER_CONSTANT_VOLTAGE_MAX 4350000U
+
+/* Default value for fast charge timer, in hours */
+#define MAXIM_CHARGER_FAST_CHARGE_TIMER_DEFAULT 5
+
+/* MAX14577 regulator SFOUT LDO voltage, fixed, uV */
+#define MAX14577_REGULATOR_SAFEOUT_VOLTAGE 4900000
+
+/* MAX77836 regulator LDOx voltage, uV */
+#define MAX77836_REGULATOR_LDO_VOLTAGE_MIN 800000
+#define MAX77836_REGULATOR_LDO_VOLTAGE_MAX 3950000
+#define MAX77836_REGULATOR_LDO_VOLTAGE_STEP 50000
+#define MAX77836_REGULATOR_LDO_VOLTAGE_STEPS_NUM 64
+
+/* Slave addr = 0x46: PMIC */
+enum max77836_pmic_reg {
+ MAX77836_PMIC_REG_PMIC_ID = 0x20,
+ MAX77836_PMIC_REG_PMIC_REV = 0x21,
+ MAX77836_PMIC_REG_INTSRC = 0x22,
+ MAX77836_PMIC_REG_INTSRC_MASK = 0x23,
+ MAX77836_PMIC_REG_TOPSYS_INT = 0x24,
+ MAX77836_PMIC_REG_TOPSYS_INT_MASK = 0x26,
+ MAX77836_PMIC_REG_TOPSYS_STAT = 0x28,
+ MAX77836_PMIC_REG_MRSTB_CNTL = 0x2A,
+ MAX77836_PMIC_REG_LSCNFG = 0x2B,
+
+ MAX77836_LDO_REG_CNFG1_LDO1 = 0x51,
+ MAX77836_LDO_REG_CNFG2_LDO1 = 0x52,
+ MAX77836_LDO_REG_CNFG1_LDO2 = 0x53,
+ MAX77836_LDO_REG_CNFG2_LDO2 = 0x54,
+ MAX77836_LDO_REG_CNFG_LDO_BIAS = 0x55,
+
+ MAX77836_COMP_REG_COMP1 = 0x60,
+
+ MAX77836_PMIC_REG_END,
+};
+
+#define MAX77836_INTSRC_MASK_TOP_INT_SHIFT 1
+#define MAX77836_INTSRC_MASK_MUIC_CHG_INT_SHIFT 3
+#define MAX77836_INTSRC_MASK_TOP_INT_MASK BIT(MAX77836_INTSRC_MASK_TOP_INT_SHIFT)
+#define MAX77836_INTSRC_MASK_MUIC_CHG_INT_MASK BIT(MAX77836_INTSRC_MASK_MUIC_CHG_INT_SHIFT)
+
+/* MAX77836 PMIC interrupts */
+#define MAX77836_TOPSYS_INT_T120C_SHIFT 0
+#define MAX77836_TOPSYS_INT_T140C_SHIFT 1
+#define MAX77836_TOPSYS_INT_T120C_MASK BIT(MAX77836_TOPSYS_INT_T120C_SHIFT)
+#define MAX77836_TOPSYS_INT_T140C_MASK BIT(MAX77836_TOPSYS_INT_T140C_SHIFT)
+
+/* LDO1/LDO2 CONFIG1 register */
+#define MAX77836_CNFG1_LDO_PWRMD_SHIFT 6
+#define MAX77836_CNFG1_LDO_TV_SHIFT 0
+#define MAX77836_CNFG1_LDO_PWRMD_MASK (0x3 << MAX77836_CNFG1_LDO_PWRMD_SHIFT)
+#define MAX77836_CNFG1_LDO_TV_MASK (0x3f << MAX77836_CNFG1_LDO_TV_SHIFT)
+
+/* LDO1/LDO2 CONFIG2 register */
+#define MAX77836_CNFG2_LDO_OVCLMPEN_SHIFT 7
+#define MAX77836_CNFG2_LDO_ALPMEN_SHIFT 6
+#define MAX77836_CNFG2_LDO_COMP_SHIFT 4
+#define MAX77836_CNFG2_LDO_POK_SHIFT 3
+#define MAX77836_CNFG2_LDO_ADE_SHIFT 1
+#define MAX77836_CNFG2_LDO_SS_SHIFT 0
+#define MAX77836_CNFG2_LDO_OVCLMPEN_MASK BIT(MAX77836_CNFG2_LDO_OVCLMPEN_SHIFT)
+#define MAX77836_CNFG2_LDO_ALPMEN_MASK BIT(MAX77836_CNFG2_LDO_ALPMEN_SHIFT)
+#define MAX77836_CNFG2_LDO_COMP_MASK (0x3 << MAX77836_CNFG2_LDO_COMP_SHIFT)
+#define MAX77836_CNFG2_LDO_POK_MASK BIT(MAX77836_CNFG2_LDO_POK_SHIFT)
+#define MAX77836_CNFG2_LDO_ADE_MASK BIT(MAX77836_CNFG2_LDO_ADE_SHIFT)
+#define MAX77836_CNFG2_LDO_SS_MASK BIT(MAX77836_CNFG2_LDO_SS_SHIFT)
+
+/* Slave addr = 0x6C: Fuel-Gauge/Battery */
+enum max77836_fg_reg {
+ MAX77836_FG_REG_VCELL_MSB = 0x02,
+ MAX77836_FG_REG_VCELL_LSB = 0x03,
+ MAX77836_FG_REG_SOC_MSB = 0x04,
+ MAX77836_FG_REG_SOC_LSB = 0x05,
+ MAX77836_FG_REG_MODE_H = 0x06,
+ MAX77836_FG_REG_MODE_L = 0x07,
+ MAX77836_FG_REG_VERSION_MSB = 0x08,
+ MAX77836_FG_REG_VERSION_LSB = 0x09,
+ MAX77836_FG_REG_HIBRT_H = 0x0A,
+ MAX77836_FG_REG_HIBRT_L = 0x0B,
+ MAX77836_FG_REG_CONFIG_H = 0x0C,
+ MAX77836_FG_REG_CONFIG_L = 0x0D,
+ MAX77836_FG_REG_VALRT_MIN = 0x14,
+ MAX77836_FG_REG_VALRT_MAX = 0x15,
+ MAX77836_FG_REG_CRATE_MSB = 0x16,
+ MAX77836_FG_REG_CRATE_LSB = 0x17,
+ MAX77836_FG_REG_VRESET = 0x18,
+ MAX77836_FG_REG_FGID = 0x19,
+ MAX77836_FG_REG_STATUS_H = 0x1A,
+ MAX77836_FG_REG_STATUS_L = 0x1B,
+ /*
+ * TODO: TABLE registers
+ * TODO: CMD register
+ */
+
+ MAX77836_FG_REG_END,
+};
+
+enum max14577_irq {
+ /* INT1 */
+ MAX14577_IRQ_INT1_ADC,
+ MAX14577_IRQ_INT1_ADCLOW,
+ MAX14577_IRQ_INT1_ADCERR,
+ MAX77836_IRQ_INT1_ADC1K,
+
+ /* INT2 */
+ MAX14577_IRQ_INT2_CHGTYP,
+ MAX14577_IRQ_INT2_CHGDETRUN,
+ MAX14577_IRQ_INT2_DCDTMR,
+ MAX14577_IRQ_INT2_DBCHG,
+ MAX14577_IRQ_INT2_VBVOLT,
+ MAX77836_IRQ_INT2_VIDRM,
+
+ /* INT3 */
+ MAX14577_IRQ_INT3_EOC,
+ MAX14577_IRQ_INT3_CGMBC,
+ MAX14577_IRQ_INT3_OVP,
+ MAX14577_IRQ_INT3_MBCCHGERR,
+
+ /* TOPSYS_INT, only MAX77836 */
+ MAX77836_IRQ_TOPSYS_T140C,
+ MAX77836_IRQ_TOPSYS_T120C,
+
+ MAX14577_IRQ_NUM,
+};
+
+struct max14577 {
+ struct device *dev;
+ struct i2c_client *i2c; /* Slave addr = 0x4A */
+ struct i2c_client *i2c_pmic; /* Slave addr = 0x46 */
+ enum maxim_device_type dev_type;
+
+ struct regmap *regmap; /* For MUIC and Charger */
+ struct regmap *regmap_pmic;
+
+ struct regmap_irq_chip_data *irq_data; /* For MUIC and Charger */
+ struct regmap_irq_chip_data *irq_data_pmic;
+ int irq;
+};
+
+/* MAX14577 shared regmap API function */
+static inline int max14577_read_reg(struct regmap *map, u8 reg, u8 *dest)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(map, reg, &val);
+ *dest = val;
+
+ return ret;
+}
+
+static inline int max14577_bulk_read(struct regmap *map, u8 reg, u8 *buf,
+ int count)
+{
+ return regmap_bulk_read(map, reg, buf, count);
+}
+
+static inline int max14577_write_reg(struct regmap *map, u8 reg, u8 value)
+{
+ return regmap_write(map, reg, value);
+}
+
+static inline int max14577_bulk_write(struct regmap *map, u8 reg, u8 *buf,
+ int count)
+{
+ return regmap_bulk_write(map, reg, buf, count);
+}
+
+static inline int max14577_update_reg(struct regmap *map, u8 reg, u8 mask,
+ u8 val)
+{
+ return regmap_update_bits(map, reg, mask, val);
+}
+
+#endif /* __MAX14577_PRIVATE_H__ */
diff --git a/include/linux/mfd/max14577.h b/include/linux/mfd/max14577.h
new file mode 100644
index 000000000..ccfaf952c
--- /dev/null
+++ b/include/linux/mfd/max14577.h
@@ -0,0 +1,107 @@
+/*
+ * max14577.h - Driver for the Maxim 14577/77836
+ *
+ * Copyright (C) 2014 Samsung Electrnoics
+ * Chanwoo Choi <cw00.choi@samsung.com>
+ * Krzysztof Kozlowski <k.kozlowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This driver is based on max8997.h
+ *
+ * MAX14577 has MUIC, Charger devices.
+ * The devices share the same I2C bus and interrupt line
+ * included in this mfd driver.
+ *
+ * MAX77836 has additional PMIC and Fuel-Gauge on different I2C slave
+ * addresses.
+ */
+
+#ifndef __MAX14577_H__
+#define __MAX14577_H__
+
+#include <linux/regulator/consumer.h>
+
+/* MAX14577 regulator IDs */
+enum max14577_regulators {
+ MAX14577_SAFEOUT = 0,
+ MAX14577_CHARGER,
+
+ MAX14577_REGULATOR_NUM,
+};
+
+/* MAX77836 regulator IDs */
+enum max77836_regulators {
+ MAX77836_SAFEOUT = 0,
+ MAX77836_CHARGER,
+ MAX77836_LDO1,
+ MAX77836_LDO2,
+
+ MAX77836_REGULATOR_NUM,
+};
+
+struct max14577_regulator_platform_data {
+ int id;
+ struct regulator_init_data *initdata;
+ struct device_node *of_node;
+};
+
+struct max14577_charger_platform_data {
+ u32 constant_uvolt;
+ u32 fast_charge_uamp;
+ u32 eoc_uamp;
+ u32 ovp_uvolt;
+};
+
+/*
+ * MAX14577 MFD platform data
+ */
+struct max14577_platform_data {
+ /* IRQ */
+ int irq_base;
+
+ /* current control GPIOs */
+ int gpio_pogo_vbatt_en;
+ int gpio_pogo_vbus_en;
+
+ /* current control GPIO control function */
+ int (*set_gpio_pogo_vbatt_en) (int gpio_val);
+ int (*set_gpio_pogo_vbus_en) (int gpio_val);
+
+ int (*set_gpio_pogo_cb) (int new_dev);
+
+ struct max14577_regulator_platform_data *regulators;
+};
+
+/*
+ * Valid limits of current for max14577 and max77836 chargers.
+ * They must correspond to MBCICHWRCL and MBCICHWRCH fields in CHGCTRL4
+ * register for given chipset.
+ */
+struct maxim_charger_current {
+ /* Minimal current, set in CHGCTRL4/MBCICHWRCL, uA */
+ unsigned int min;
+ /*
+ * Minimal current when high setting is active,
+ * set in CHGCTRL4/MBCICHWRCH, uA
+ */
+ unsigned int high_start;
+ /* Value of one step in high setting, uA */
+ unsigned int high_step;
+ /* Maximum current of high setting, uA */
+ unsigned int max;
+};
+
+extern const struct maxim_charger_current maxim_charger_currents[];
+extern int maxim_charger_calc_reg_current(const struct maxim_charger_current *limits,
+ unsigned int min_ua, unsigned int max_ua, u8 *dst);
+
+#endif /* __MAX14577_H__ */
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
new file mode 100644
index 000000000..f5043490d
--- /dev/null
+++ b/include/linux/mfd/max77686-private.h
@@ -0,0 +1,464 @@
+/*
+ * max77686-private.h - Voltage regulator driver for the Maxim 77686/802
+ *
+ * Copyright (C) 2012 Samsung Electrnoics
+ * Chiwoong Byun <woong.byun@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_MFD_MAX77686_PRIV_H
+#define __LINUX_MFD_MAX77686_PRIV_H
+
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/module.h>
+
+#define MAX77686_REG_INVALID (0xff)
+
+/* MAX77686 PMIC registers */
+enum max77686_pmic_reg {
+ MAX77686_REG_DEVICE_ID = 0x00,
+ MAX77686_REG_INTSRC = 0x01,
+ MAX77686_REG_INT1 = 0x02,
+ MAX77686_REG_INT2 = 0x03,
+
+ MAX77686_REG_INT1MSK = 0x04,
+ MAX77686_REG_INT2MSK = 0x05,
+
+ MAX77686_REG_STATUS1 = 0x06,
+ MAX77686_REG_STATUS2 = 0x07,
+
+ MAX77686_REG_PWRON = 0x08,
+ MAX77686_REG_ONOFF_DELAY = 0x09,
+ MAX77686_REG_MRSTB = 0x0A,
+ /* Reserved: 0x0B-0x0F */
+
+ MAX77686_REG_BUCK1CTRL = 0x10,
+ MAX77686_REG_BUCK1OUT = 0x11,
+ MAX77686_REG_BUCK2CTRL1 = 0x12,
+ MAX77686_REG_BUCK234FREQ = 0x13,
+ MAX77686_REG_BUCK2DVS1 = 0x14,
+ MAX77686_REG_BUCK2DVS2 = 0x15,
+ MAX77686_REG_BUCK2DVS3 = 0x16,
+ MAX77686_REG_BUCK2DVS4 = 0x17,
+ MAX77686_REG_BUCK2DVS5 = 0x18,
+ MAX77686_REG_BUCK2DVS6 = 0x19,
+ MAX77686_REG_BUCK2DVS7 = 0x1A,
+ MAX77686_REG_BUCK2DVS8 = 0x1B,
+ MAX77686_REG_BUCK3CTRL1 = 0x1C,
+ /* Reserved: 0x1D */
+ MAX77686_REG_BUCK3DVS1 = 0x1E,
+ MAX77686_REG_BUCK3DVS2 = 0x1F,
+ MAX77686_REG_BUCK3DVS3 = 0x20,
+ MAX77686_REG_BUCK3DVS4 = 0x21,
+ MAX77686_REG_BUCK3DVS5 = 0x22,
+ MAX77686_REG_BUCK3DVS6 = 0x23,
+ MAX77686_REG_BUCK3DVS7 = 0x24,
+ MAX77686_REG_BUCK3DVS8 = 0x25,
+ MAX77686_REG_BUCK4CTRL1 = 0x26,
+ /* Reserved: 0x27 */
+ MAX77686_REG_BUCK4DVS1 = 0x28,
+ MAX77686_REG_BUCK4DVS2 = 0x29,
+ MAX77686_REG_BUCK4DVS3 = 0x2A,
+ MAX77686_REG_BUCK4DVS4 = 0x2B,
+ MAX77686_REG_BUCK4DVS5 = 0x2C,
+ MAX77686_REG_BUCK4DVS6 = 0x2D,
+ MAX77686_REG_BUCK4DVS7 = 0x2E,
+ MAX77686_REG_BUCK4DVS8 = 0x2F,
+ MAX77686_REG_BUCK5CTRL = 0x30,
+ MAX77686_REG_BUCK5OUT = 0x31,
+ MAX77686_REG_BUCK6CTRL = 0x32,
+ MAX77686_REG_BUCK6OUT = 0x33,
+ MAX77686_REG_BUCK7CTRL = 0x34,
+ MAX77686_REG_BUCK7OUT = 0x35,
+ MAX77686_REG_BUCK8CTRL = 0x36,
+ MAX77686_REG_BUCK8OUT = 0x37,
+ MAX77686_REG_BUCK9CTRL = 0x38,
+ MAX77686_REG_BUCK9OUT = 0x39,
+ /* Reserved: 0x3A-0x3F */
+
+ MAX77686_REG_LDO1CTRL1 = 0x40,
+ MAX77686_REG_LDO2CTRL1 = 0x41,
+ MAX77686_REG_LDO3CTRL1 = 0x42,
+ MAX77686_REG_LDO4CTRL1 = 0x43,
+ MAX77686_REG_LDO5CTRL1 = 0x44,
+ MAX77686_REG_LDO6CTRL1 = 0x45,
+ MAX77686_REG_LDO7CTRL1 = 0x46,
+ MAX77686_REG_LDO8CTRL1 = 0x47,
+ MAX77686_REG_LDO9CTRL1 = 0x48,
+ MAX77686_REG_LDO10CTRL1 = 0x49,
+ MAX77686_REG_LDO11CTRL1 = 0x4A,
+ MAX77686_REG_LDO12CTRL1 = 0x4B,
+ MAX77686_REG_LDO13CTRL1 = 0x4C,
+ MAX77686_REG_LDO14CTRL1 = 0x4D,
+ MAX77686_REG_LDO15CTRL1 = 0x4E,
+ MAX77686_REG_LDO16CTRL1 = 0x4F,
+ MAX77686_REG_LDO17CTRL1 = 0x50,
+ MAX77686_REG_LDO18CTRL1 = 0x51,
+ MAX77686_REG_LDO19CTRL1 = 0x52,
+ MAX77686_REG_LDO20CTRL1 = 0x53,
+ MAX77686_REG_LDO21CTRL1 = 0x54,
+ MAX77686_REG_LDO22CTRL1 = 0x55,
+ MAX77686_REG_LDO23CTRL1 = 0x56,
+ MAX77686_REG_LDO24CTRL1 = 0x57,
+ MAX77686_REG_LDO25CTRL1 = 0x58,
+ MAX77686_REG_LDO26CTRL1 = 0x59,
+ /* Reserved: 0x5A-0x5F */
+ MAX77686_REG_LDO1CTRL2 = 0x60,
+ MAX77686_REG_LDO2CTRL2 = 0x61,
+ MAX77686_REG_LDO3CTRL2 = 0x62,
+ MAX77686_REG_LDO4CTRL2 = 0x63,
+ MAX77686_REG_LDO5CTRL2 = 0x64,
+ MAX77686_REG_LDO6CTRL2 = 0x65,
+ MAX77686_REG_LDO7CTRL2 = 0x66,
+ MAX77686_REG_LDO8CTRL2 = 0x67,
+ MAX77686_REG_LDO9CTRL2 = 0x68,
+ MAX77686_REG_LDO10CTRL2 = 0x69,
+ MAX77686_REG_LDO11CTRL2 = 0x6A,
+ MAX77686_REG_LDO12CTRL2 = 0x6B,
+ MAX77686_REG_LDO13CTRL2 = 0x6C,
+ MAX77686_REG_LDO14CTRL2 = 0x6D,
+ MAX77686_REG_LDO15CTRL2 = 0x6E,
+ MAX77686_REG_LDO16CTRL2 = 0x6F,
+ MAX77686_REG_LDO17CTRL2 = 0x70,
+ MAX77686_REG_LDO18CTRL2 = 0x71,
+ MAX77686_REG_LDO19CTRL2 = 0x72,
+ MAX77686_REG_LDO20CTRL2 = 0x73,
+ MAX77686_REG_LDO21CTRL2 = 0x74,
+ MAX77686_REG_LDO22CTRL2 = 0x75,
+ MAX77686_REG_LDO23CTRL2 = 0x76,
+ MAX77686_REG_LDO24CTRL2 = 0x77,
+ MAX77686_REG_LDO25CTRL2 = 0x78,
+ MAX77686_REG_LDO26CTRL2 = 0x79,
+ /* Reserved: 0x7A-0x7D */
+
+ MAX77686_REG_BBAT_CHG = 0x7E,
+ MAX77686_REG_32KHZ = 0x7F,
+
+ MAX77686_REG_PMIC_END = 0x80,
+};
+
+enum max77686_rtc_reg {
+ MAX77686_RTC_INT = 0x00,
+ MAX77686_RTC_INTM = 0x01,
+ MAX77686_RTC_CONTROLM = 0x02,
+ MAX77686_RTC_CONTROL = 0x03,
+ MAX77686_RTC_UPDATE0 = 0x04,
+ /* Reserved: 0x5 */
+ MAX77686_WTSR_SMPL_CNTL = 0x06,
+ MAX77686_RTC_SEC = 0x07,
+ MAX77686_RTC_MIN = 0x08,
+ MAX77686_RTC_HOUR = 0x09,
+ MAX77686_RTC_WEEKDAY = 0x0A,
+ MAX77686_RTC_MONTH = 0x0B,
+ MAX77686_RTC_YEAR = 0x0C,
+ MAX77686_RTC_DATE = 0x0D,
+ MAX77686_ALARM1_SEC = 0x0E,
+ MAX77686_ALARM1_MIN = 0x0F,
+ MAX77686_ALARM1_HOUR = 0x10,
+ MAX77686_ALARM1_WEEKDAY = 0x11,
+ MAX77686_ALARM1_MONTH = 0x12,
+ MAX77686_ALARM1_YEAR = 0x13,
+ MAX77686_ALARM1_DATE = 0x14,
+ MAX77686_ALARM2_SEC = 0x15,
+ MAX77686_ALARM2_MIN = 0x16,
+ MAX77686_ALARM2_HOUR = 0x17,
+ MAX77686_ALARM2_WEEKDAY = 0x18,
+ MAX77686_ALARM2_MONTH = 0x19,
+ MAX77686_ALARM2_YEAR = 0x1A,
+ MAX77686_ALARM2_DATE = 0x1B,
+};
+
+/* MAX77802 PMIC registers */
+enum max77802_pmic_reg {
+ MAX77802_REG_DEVICE_ID = 0x00,
+ MAX77802_REG_INTSRC = 0x01,
+ MAX77802_REG_INT1 = 0x02,
+ MAX77802_REG_INT2 = 0x03,
+
+ MAX77802_REG_INT1MSK = 0x04,
+ MAX77802_REG_INT2MSK = 0x05,
+
+ MAX77802_REG_STATUS1 = 0x06,
+ MAX77802_REG_STATUS2 = 0x07,
+
+ MAX77802_REG_PWRON = 0x08,
+ /* Reserved: 0x09 */
+ MAX77802_REG_MRSTB = 0x0A,
+ MAX77802_REG_EPWRHOLD = 0x0B,
+ /* Reserved: 0x0C-0x0D */
+ MAX77802_REG_BOOSTCTRL = 0x0E,
+ MAX77802_REG_BOOSTOUT = 0x0F,
+
+ MAX77802_REG_BUCK1CTRL = 0x10,
+ MAX77802_REG_BUCK1DVS1 = 0x11,
+ MAX77802_REG_BUCK1DVS2 = 0x12,
+ MAX77802_REG_BUCK1DVS3 = 0x13,
+ MAX77802_REG_BUCK1DVS4 = 0x14,
+ MAX77802_REG_BUCK1DVS5 = 0x15,
+ MAX77802_REG_BUCK1DVS6 = 0x16,
+ MAX77802_REG_BUCK1DVS7 = 0x17,
+ MAX77802_REG_BUCK1DVS8 = 0x18,
+ /* Reserved: 0x19 */
+ MAX77802_REG_BUCK2CTRL1 = 0x1A,
+ MAX77802_REG_BUCK2CTRL2 = 0x1B,
+ MAX77802_REG_BUCK2PHTRAN = 0x1C,
+ MAX77802_REG_BUCK2DVS1 = 0x1D,
+ MAX77802_REG_BUCK2DVS2 = 0x1E,
+ MAX77802_REG_BUCK2DVS3 = 0x1F,
+ MAX77802_REG_BUCK2DVS4 = 0x20,
+ MAX77802_REG_BUCK2DVS5 = 0x21,
+ MAX77802_REG_BUCK2DVS6 = 0x22,
+ MAX77802_REG_BUCK2DVS7 = 0x23,
+ MAX77802_REG_BUCK2DVS8 = 0x24,
+ /* Reserved: 0x25-0x26 */
+ MAX77802_REG_BUCK3CTRL1 = 0x27,
+ MAX77802_REG_BUCK3DVS1 = 0x28,
+ MAX77802_REG_BUCK3DVS2 = 0x29,
+ MAX77802_REG_BUCK3DVS3 = 0x2A,
+ MAX77802_REG_BUCK3DVS4 = 0x2B,
+ MAX77802_REG_BUCK3DVS5 = 0x2C,
+ MAX77802_REG_BUCK3DVS6 = 0x2D,
+ MAX77802_REG_BUCK3DVS7 = 0x2E,
+ MAX77802_REG_BUCK3DVS8 = 0x2F,
+ /* Reserved: 0x30-0x36 */
+ MAX77802_REG_BUCK4CTRL1 = 0x37,
+ MAX77802_REG_BUCK4DVS1 = 0x38,
+ MAX77802_REG_BUCK4DVS2 = 0x39,
+ MAX77802_REG_BUCK4DVS3 = 0x3A,
+ MAX77802_REG_BUCK4DVS4 = 0x3B,
+ MAX77802_REG_BUCK4DVS5 = 0x3C,
+ MAX77802_REG_BUCK4DVS6 = 0x3D,
+ MAX77802_REG_BUCK4DVS7 = 0x3E,
+ MAX77802_REG_BUCK4DVS8 = 0x3F,
+ /* Reserved: 0x40 */
+ MAX77802_REG_BUCK5CTRL = 0x41,
+ MAX77802_REG_BUCK5OUT = 0x42,
+ /* Reserved: 0x43 */
+ MAX77802_REG_BUCK6CTRL = 0x44,
+ MAX77802_REG_BUCK6DVS1 = 0x45,
+ MAX77802_REG_BUCK6DVS2 = 0x46,
+ MAX77802_REG_BUCK6DVS3 = 0x47,
+ MAX77802_REG_BUCK6DVS4 = 0x48,
+ MAX77802_REG_BUCK6DVS5 = 0x49,
+ MAX77802_REG_BUCK6DVS6 = 0x4A,
+ MAX77802_REG_BUCK6DVS7 = 0x4B,
+ MAX77802_REG_BUCK6DVS8 = 0x4C,
+ /* Reserved: 0x4D */
+ MAX77802_REG_BUCK7CTRL = 0x4E,
+ MAX77802_REG_BUCK7OUT = 0x4F,
+ /* Reserved: 0x50 */
+ MAX77802_REG_BUCK8CTRL = 0x51,
+ MAX77802_REG_BUCK8OUT = 0x52,
+ /* Reserved: 0x53 */
+ MAX77802_REG_BUCK9CTRL = 0x54,
+ MAX77802_REG_BUCK9OUT = 0x55,
+ /* Reserved: 0x56 */
+ MAX77802_REG_BUCK10CTRL = 0x57,
+ MAX77802_REG_BUCK10OUT = 0x58,
+
+ /* Reserved: 0x59-0x5F */
+
+ MAX77802_REG_LDO1CTRL1 = 0x60,
+ MAX77802_REG_LDO2CTRL1 = 0x61,
+ MAX77802_REG_LDO3CTRL1 = 0x62,
+ MAX77802_REG_LDO4CTRL1 = 0x63,
+ MAX77802_REG_LDO5CTRL1 = 0x64,
+ MAX77802_REG_LDO6CTRL1 = 0x65,
+ MAX77802_REG_LDO7CTRL1 = 0x66,
+ MAX77802_REG_LDO8CTRL1 = 0x67,
+ MAX77802_REG_LDO9CTRL1 = 0x68,
+ MAX77802_REG_LDO10CTRL1 = 0x69,
+ MAX77802_REG_LDO11CTRL1 = 0x6A,
+ MAX77802_REG_LDO12CTRL1 = 0x6B,
+ MAX77802_REG_LDO13CTRL1 = 0x6C,
+ MAX77802_REG_LDO14CTRL1 = 0x6D,
+ MAX77802_REG_LDO15CTRL1 = 0x6E,
+ /* Reserved: 0x6F */
+ MAX77802_REG_LDO17CTRL1 = 0x70,
+ MAX77802_REG_LDO18CTRL1 = 0x71,
+ MAX77802_REG_LDO19CTRL1 = 0x72,
+ MAX77802_REG_LDO20CTRL1 = 0x73,
+ MAX77802_REG_LDO21CTRL1 = 0x74,
+ MAX77802_REG_LDO22CTRL1 = 0x75,
+ MAX77802_REG_LDO23CTRL1 = 0x76,
+ MAX77802_REG_LDO24CTRL1 = 0x77,
+ MAX77802_REG_LDO25CTRL1 = 0x78,
+ MAX77802_REG_LDO26CTRL1 = 0x79,
+ MAX77802_REG_LDO27CTRL1 = 0x7A,
+ MAX77802_REG_LDO28CTRL1 = 0x7B,
+ MAX77802_REG_LDO29CTRL1 = 0x7C,
+ MAX77802_REG_LDO30CTRL1 = 0x7D,
+ /* Reserved: 0x7E */
+ MAX77802_REG_LDO32CTRL1 = 0x7F,
+ MAX77802_REG_LDO33CTRL1 = 0x80,
+ MAX77802_REG_LDO34CTRL1 = 0x81,
+ MAX77802_REG_LDO35CTRL1 = 0x82,
+ /* Reserved: 0x83-0x8F */
+ MAX77802_REG_LDO1CTRL2 = 0x90,
+ MAX77802_REG_LDO2CTRL2 = 0x91,
+ MAX77802_REG_LDO3CTRL2 = 0x92,
+ MAX77802_REG_LDO4CTRL2 = 0x93,
+ MAX77802_REG_LDO5CTRL2 = 0x94,
+ MAX77802_REG_LDO6CTRL2 = 0x95,
+ MAX77802_REG_LDO7CTRL2 = 0x96,
+ MAX77802_REG_LDO8CTRL2 = 0x97,
+ MAX77802_REG_LDO9CTRL2 = 0x98,
+ MAX77802_REG_LDO10CTRL2 = 0x99,
+ MAX77802_REG_LDO11CTRL2 = 0x9A,
+ MAX77802_REG_LDO12CTRL2 = 0x9B,
+ MAX77802_REG_LDO13CTRL2 = 0x9C,
+ MAX77802_REG_LDO14CTRL2 = 0x9D,
+ MAX77802_REG_LDO15CTRL2 = 0x9E,
+ /* Reserved: 0x9F */
+ MAX77802_REG_LDO17CTRL2 = 0xA0,
+ MAX77802_REG_LDO18CTRL2 = 0xA1,
+ MAX77802_REG_LDO19CTRL2 = 0xA2,
+ MAX77802_REG_LDO20CTRL2 = 0xA3,
+ MAX77802_REG_LDO21CTRL2 = 0xA4,
+ MAX77802_REG_LDO22CTRL2 = 0xA5,
+ MAX77802_REG_LDO23CTRL2 = 0xA6,
+ MAX77802_REG_LDO24CTRL2 = 0xA7,
+ MAX77802_REG_LDO25CTRL2 = 0xA8,
+ MAX77802_REG_LDO26CTRL2 = 0xA9,
+ MAX77802_REG_LDO27CTRL2 = 0xAA,
+ MAX77802_REG_LDO28CTRL2 = 0xAB,
+ MAX77802_REG_LDO29CTRL2 = 0xAC,
+ MAX77802_REG_LDO30CTRL2 = 0xAD,
+ /* Reserved: 0xAE */
+ MAX77802_REG_LDO32CTRL2 = 0xAF,
+ MAX77802_REG_LDO33CTRL2 = 0xB0,
+ MAX77802_REG_LDO34CTRL2 = 0xB1,
+ MAX77802_REG_LDO35CTRL2 = 0xB2,
+ /* Reserved: 0xB3 */
+
+ MAX77802_REG_BBAT_CHG = 0xB4,
+ MAX77802_REG_32KHZ = 0xB5,
+
+ MAX77802_REG_PMIC_END = 0xB6,
+};
+
+enum max77802_rtc_reg {
+ MAX77802_RTC_INT = 0xC0,
+ MAX77802_RTC_INTM = 0xC1,
+ MAX77802_RTC_CONTROLM = 0xC2,
+ MAX77802_RTC_CONTROL = 0xC3,
+ MAX77802_RTC_UPDATE0 = 0xC4,
+ MAX77802_RTC_UPDATE1 = 0xC5,
+ MAX77802_WTSR_SMPL_CNTL = 0xC6,
+ MAX77802_RTC_SEC = 0xC7,
+ MAX77802_RTC_MIN = 0xC8,
+ MAX77802_RTC_HOUR = 0xC9,
+ MAX77802_RTC_WEEKDAY = 0xCA,
+ MAX77802_RTC_MONTH = 0xCB,
+ MAX77802_RTC_YEAR = 0xCC,
+ MAX77802_RTC_DATE = 0xCD,
+ MAX77802_RTC_AE1 = 0xCE,
+ MAX77802_ALARM1_SEC = 0xCF,
+ MAX77802_ALARM1_MIN = 0xD0,
+ MAX77802_ALARM1_HOUR = 0xD1,
+ MAX77802_ALARM1_WEEKDAY = 0xD2,
+ MAX77802_ALARM1_MONTH = 0xD3,
+ MAX77802_ALARM1_YEAR = 0xD4,
+ MAX77802_ALARM1_DATE = 0xD5,
+ MAX77802_RTC_AE2 = 0xD6,
+ MAX77802_ALARM2_SEC = 0xD7,
+ MAX77802_ALARM2_MIN = 0xD8,
+ MAX77802_ALARM2_HOUR = 0xD9,
+ MAX77802_ALARM2_WEEKDAY = 0xDA,
+ MAX77802_ALARM2_MONTH = 0xDB,
+ MAX77802_ALARM2_YEAR = 0xDC,
+ MAX77802_ALARM2_DATE = 0xDD,
+
+ MAX77802_RTC_END = 0xDF,
+};
+
+enum max77686_irq_source {
+ PMIC_INT1 = 0,
+ PMIC_INT2,
+ RTC_INT,
+
+ MAX77686_IRQ_GROUP_NR,
+};
+
+enum max77686_irq {
+ MAX77686_PMICIRQ_PWRONF,
+ MAX77686_PMICIRQ_PWRONR,
+ MAX77686_PMICIRQ_JIGONBF,
+ MAX77686_PMICIRQ_JIGONBR,
+ MAX77686_PMICIRQ_ACOKBF,
+ MAX77686_PMICIRQ_ACOKBR,
+ MAX77686_PMICIRQ_ONKEY1S,
+ MAX77686_PMICIRQ_MRSTB,
+
+ MAX77686_PMICIRQ_140C,
+ MAX77686_PMICIRQ_120C,
+
+ MAX77686_RTCIRQ_RTC60S = 0,
+ MAX77686_RTCIRQ_RTCA1,
+ MAX77686_RTCIRQ_RTCA2,
+ MAX77686_RTCIRQ_SMPL,
+ MAX77686_RTCIRQ_RTC1S,
+ MAX77686_RTCIRQ_WTSR,
+};
+
+#define MAX77686_INT1_PWRONF_MSK BIT(0)
+#define MAX77686_INT1_PWRONR_MSK BIT(1)
+#define MAX77686_INT1_JIGONBF_MSK BIT(2)
+#define MAX77686_INT1_JIGONBR_MSK BIT(3)
+#define MAX77686_INT1_ACOKBF_MSK BIT(4)
+#define MAX77686_INT1_ACOKBR_MSK BIT(5)
+#define MAX77686_INT1_ONKEY1S_MSK BIT(6)
+#define MAX77686_INT1_MRSTB_MSK BIT(7)
+
+#define MAX77686_INT2_140C_MSK BIT(0)
+#define MAX77686_INT2_120C_MSK BIT(1)
+
+#define MAX77686_RTCINT_RTC60S_MSK BIT(0)
+#define MAX77686_RTCINT_RTCA1_MSK BIT(1)
+#define MAX77686_RTCINT_RTCA2_MSK BIT(2)
+#define MAX77686_RTCINT_SMPL_MSK BIT(3)
+#define MAX77686_RTCINT_RTC1S_MSK BIT(4)
+#define MAX77686_RTCINT_WTSR_MSK BIT(5)
+
+struct max77686_dev {
+ struct device *dev;
+ struct i2c_client *i2c; /* 0xcc / PMIC, Battery Control, and FLASH */
+ struct i2c_client *rtc; /* slave addr 0x0c */
+
+ unsigned long type;
+
+ struct regmap *regmap; /* regmap for mfd */
+ struct regmap *rtc_regmap; /* regmap for rtc */
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap_irq_chip_data *rtc_irq_data;
+
+ int irq;
+ struct mutex irqlock;
+ int irq_masks_cur[MAX77686_IRQ_GROUP_NR];
+ int irq_masks_cache[MAX77686_IRQ_GROUP_NR];
+};
+
+enum max77686_types {
+ TYPE_MAX77686,
+ TYPE_MAX77802,
+};
+
+extern int max77686_irq_init(struct max77686_dev *max77686);
+extern void max77686_irq_exit(struct max77686_dev *max77686);
+extern int max77686_irq_resume(struct max77686_dev *max77686);
+
+#endif /* __LINUX_MFD_MAX77686_PRIV_H */
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h
new file mode 100644
index 000000000..bb995ab9a
--- /dev/null
+++ b/include/linux/mfd/max77686.h
@@ -0,0 +1,133 @@
+/*
+ * max77686.h - Driver for the Maxim 77686/802
+ *
+ * Copyright (C) 2012 Samsung Electrnoics
+ * Chiwoong Byun <woong.byun@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8997.h
+ *
+ * MAX77686 has PMIC, RTC devices.
+ * The devices share the same I2C bus and included in
+ * this mfd driver.
+ */
+
+#ifndef __LINUX_MFD_MAX77686_H
+#define __LINUX_MFD_MAX77686_H
+
+#include <linux/regulator/consumer.h>
+
+/* MAX77686 regulator IDs */
+enum max77686_regulators {
+ MAX77686_LDO1 = 0,
+ MAX77686_LDO2,
+ MAX77686_LDO3,
+ MAX77686_LDO4,
+ MAX77686_LDO5,
+ MAX77686_LDO6,
+ MAX77686_LDO7,
+ MAX77686_LDO8,
+ MAX77686_LDO9,
+ MAX77686_LDO10,
+ MAX77686_LDO11,
+ MAX77686_LDO12,
+ MAX77686_LDO13,
+ MAX77686_LDO14,
+ MAX77686_LDO15,
+ MAX77686_LDO16,
+ MAX77686_LDO17,
+ MAX77686_LDO18,
+ MAX77686_LDO19,
+ MAX77686_LDO20,
+ MAX77686_LDO21,
+ MAX77686_LDO22,
+ MAX77686_LDO23,
+ MAX77686_LDO24,
+ MAX77686_LDO25,
+ MAX77686_LDO26,
+ MAX77686_BUCK1,
+ MAX77686_BUCK2,
+ MAX77686_BUCK3,
+ MAX77686_BUCK4,
+ MAX77686_BUCK5,
+ MAX77686_BUCK6,
+ MAX77686_BUCK7,
+ MAX77686_BUCK8,
+ MAX77686_BUCK9,
+
+ MAX77686_REG_MAX,
+};
+
+/* MAX77802 regulator IDs */
+enum max77802_regulators {
+ MAX77802_BUCK1 = 0,
+ MAX77802_BUCK2,
+ MAX77802_BUCK3,
+ MAX77802_BUCK4,
+ MAX77802_BUCK5,
+ MAX77802_BUCK6,
+ MAX77802_BUCK7,
+ MAX77802_BUCK8,
+ MAX77802_BUCK9,
+ MAX77802_BUCK10,
+ MAX77802_LDO1,
+ MAX77802_LDO2,
+ MAX77802_LDO3,
+ MAX77802_LDO4,
+ MAX77802_LDO5,
+ MAX77802_LDO6,
+ MAX77802_LDO7,
+ MAX77802_LDO8,
+ MAX77802_LDO9,
+ MAX77802_LDO10,
+ MAX77802_LDO11,
+ MAX77802_LDO12,
+ MAX77802_LDO13,
+ MAX77802_LDO14,
+ MAX77802_LDO15,
+ MAX77802_LDO17,
+ MAX77802_LDO18,
+ MAX77802_LDO19,
+ MAX77802_LDO20,
+ MAX77802_LDO21,
+ MAX77802_LDO23,
+ MAX77802_LDO24,
+ MAX77802_LDO25,
+ MAX77802_LDO26,
+ MAX77802_LDO27,
+ MAX77802_LDO28,
+ MAX77802_LDO29,
+ MAX77802_LDO30,
+ MAX77802_LDO32,
+ MAX77802_LDO33,
+ MAX77802_LDO34,
+ MAX77802_LDO35,
+
+ MAX77802_REG_MAX,
+};
+
+enum max77686_opmode {
+ MAX77686_OPMODE_NORMAL,
+ MAX77686_OPMODE_LP,
+ MAX77686_OPMODE_STANDBY,
+};
+
+struct max77686_opmode_data {
+ int id;
+ int mode;
+};
+
+#endif /* __LINUX_MFD_MAX77686_H */
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
new file mode 100644
index 000000000..51633ea6f
--- /dev/null
+++ b/include/linux/mfd/max77693-private.h
@@ -0,0 +1,564 @@
+/*
+ * max77693-private.h - Voltage regulator driver for the Maxim 77693
+ *
+ * Copyright (C) 2012 Samsung Electrnoics
+ * SangYoung Son <hello.son@samsung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_MFD_MAX77693_PRIV_H
+#define __LINUX_MFD_MAX77693_PRIV_H
+
+#include <linux/i2c.h>
+
+#define MAX77693_REG_INVALID (0xff)
+
+/* Slave addr = 0xCC: PMIC, Charger, Flash LED */
+enum max77693_pmic_reg {
+ MAX77693_LED_REG_IFLASH1 = 0x00,
+ MAX77693_LED_REG_IFLASH2 = 0x01,
+ MAX77693_LED_REG_ITORCH = 0x02,
+ MAX77693_LED_REG_ITORCHTIMER = 0x03,
+ MAX77693_LED_REG_FLASH_TIMER = 0x04,
+ MAX77693_LED_REG_FLASH_EN = 0x05,
+ MAX77693_LED_REG_MAX_FLASH1 = 0x06,
+ MAX77693_LED_REG_MAX_FLASH2 = 0x07,
+ MAX77693_LED_REG_MAX_FLASH3 = 0x08,
+ MAX77693_LED_REG_MAX_FLASH4 = 0x09,
+ MAX77693_LED_REG_VOUT_CNTL = 0x0A,
+ MAX77693_LED_REG_VOUT_FLASH1 = 0x0B,
+ MAX77693_LED_REG_VOUT_FLASH2 = 0x0C,
+ MAX77693_LED_REG_FLASH_INT = 0x0E,
+ MAX77693_LED_REG_FLASH_INT_MASK = 0x0F,
+ MAX77693_LED_REG_FLASH_STATUS = 0x10,
+
+ MAX77693_PMIC_REG_PMIC_ID1 = 0x20,
+ MAX77693_PMIC_REG_PMIC_ID2 = 0x21,
+ MAX77693_PMIC_REG_INTSRC = 0x22,
+ MAX77693_PMIC_REG_INTSRC_MASK = 0x23,
+ MAX77693_PMIC_REG_TOPSYS_INT = 0x24,
+ MAX77693_PMIC_REG_TOPSYS_INT_MASK = 0x26,
+ MAX77693_PMIC_REG_TOPSYS_STAT = 0x28,
+ MAX77693_PMIC_REG_MAINCTRL1 = 0x2A,
+ MAX77693_PMIC_REG_LSCNFG = 0x2B,
+
+ MAX77693_CHG_REG_CHG_INT = 0xB0,
+ MAX77693_CHG_REG_CHG_INT_MASK = 0xB1,
+ MAX77693_CHG_REG_CHG_INT_OK = 0xB2,
+ MAX77693_CHG_REG_CHG_DETAILS_00 = 0xB3,
+ MAX77693_CHG_REG_CHG_DETAILS_01 = 0xB4,
+ MAX77693_CHG_REG_CHG_DETAILS_02 = 0xB5,
+ MAX77693_CHG_REG_CHG_DETAILS_03 = 0xB6,
+ MAX77693_CHG_REG_CHG_CNFG_00 = 0xB7,
+ MAX77693_CHG_REG_CHG_CNFG_01 = 0xB8,
+ MAX77693_CHG_REG_CHG_CNFG_02 = 0xB9,
+ MAX77693_CHG_REG_CHG_CNFG_03 = 0xBA,
+ MAX77693_CHG_REG_CHG_CNFG_04 = 0xBB,
+ MAX77693_CHG_REG_CHG_CNFG_05 = 0xBC,
+ MAX77693_CHG_REG_CHG_CNFG_06 = 0xBD,
+ MAX77693_CHG_REG_CHG_CNFG_07 = 0xBE,
+ MAX77693_CHG_REG_CHG_CNFG_08 = 0xBF,
+ MAX77693_CHG_REG_CHG_CNFG_09 = 0xC0,
+ MAX77693_CHG_REG_CHG_CNFG_10 = 0xC1,
+ MAX77693_CHG_REG_CHG_CNFG_11 = 0xC2,
+ MAX77693_CHG_REG_CHG_CNFG_12 = 0xC3,
+ MAX77693_CHG_REG_CHG_CNFG_13 = 0xC4,
+ MAX77693_CHG_REG_CHG_CNFG_14 = 0xC5,
+ MAX77693_CHG_REG_SAFEOUT_CTRL = 0xC6,
+
+ MAX77693_PMIC_REG_END,
+};
+
+/* MAX77693 ITORCH register */
+#define TORCH_IOUT1_SHIFT 0
+#define TORCH_IOUT2_SHIFT 4
+#define TORCH_IOUT_MASK(x) (0xf << (x))
+#define TORCH_IOUT_MIN 15625
+#define TORCH_IOUT_MAX 250000
+#define TORCH_IOUT_STEP 15625
+
+/* MAX77693 IFLASH1 and IFLASH2 registers */
+#define FLASH_IOUT_MIN 15625
+#define FLASH_IOUT_MAX_1LED 1000000
+#define FLASH_IOUT_MAX_2LEDS 625000
+#define FLASH_IOUT_STEP 15625
+
+/* MAX77693 TORCH_TIMER register */
+#define TORCH_TMR_NO_TIMER 0x40
+#define TORCH_TIMEOUT_MIN 262000
+#define TORCH_TIMEOUT_MAX 15728000
+
+/* MAX77693 FLASH_TIMER register */
+#define FLASH_TMR_LEVEL 0x80
+#define FLASH_TIMEOUT_MIN 62500
+#define FLASH_TIMEOUT_MAX 1000000
+#define FLASH_TIMEOUT_STEP 62500
+
+/* MAX77693 FLASH_EN register */
+#define FLASH_EN_OFF 0x0
+#define FLASH_EN_FLASH 0x1
+#define FLASH_EN_TORCH 0x2
+#define FLASH_EN_ON 0x3
+#define FLASH_EN_SHIFT(x) (6 - (x) * 2)
+#define TORCH_EN_SHIFT(x) (2 - (x) * 2)
+
+/* MAX77693 MAX_FLASH1 register */
+#define MAX_FLASH1_MAX_FL_EN 0x80
+#define MAX_FLASH1_VSYS_MIN 2400
+#define MAX_FLASH1_VSYS_MAX 3400
+#define MAX_FLASH1_VSYS_STEP 33
+
+/* MAX77693 VOUT_CNTL register */
+#define FLASH_BOOST_FIXED 0x04
+#define FLASH_BOOST_LEDNUM_2 0x80
+
+/* MAX77693 VOUT_FLASH1 register */
+#define FLASH_VOUT_MIN 3300
+#define FLASH_VOUT_MAX 5500
+#define FLASH_VOUT_STEP 25
+#define FLASH_VOUT_RMIN 0x0c
+
+/* MAX77693 FLASH_STATUS register */
+#define FLASH_STATUS_FLASH_ON BIT(3)
+#define FLASH_STATUS_TORCH_ON BIT(2)
+
+/* MAX77693 FLASH_INT register */
+#define FLASH_INT_FLED2_OPEN BIT(0)
+#define FLASH_INT_FLED2_SHORT BIT(1)
+#define FLASH_INT_FLED1_OPEN BIT(2)
+#define FLASH_INT_FLED1_SHORT BIT(3)
+#define FLASH_INT_OVER_CURRENT BIT(4)
+
+/* Fast charge timer in in hours */
+#define DEFAULT_FAST_CHARGE_TIMER 4
+/* microamps */
+#define DEFAULT_TOP_OFF_THRESHOLD_CURRENT 150000
+/* minutes */
+#define DEFAULT_TOP_OFF_TIMER 30
+/* microvolts */
+#define DEFAULT_CONSTANT_VOLT 4200000
+/* microvolts */
+#define DEFAULT_MIN_SYSTEM_VOLT 3600000
+/* celsius */
+#define DEFAULT_THERMAL_REGULATION_TEMP 100
+/* microamps */
+#define DEFAULT_BATTERY_OVERCURRENT 3500000
+/* microvolts */
+#define DEFAULT_CHARGER_INPUT_THRESHOLD_VOLT 4300000
+
+/* MAX77693_CHG_REG_CHG_INT_OK register */
+#define CHG_INT_OK_BYP_SHIFT 0
+#define CHG_INT_OK_BAT_SHIFT 3
+#define CHG_INT_OK_CHG_SHIFT 4
+#define CHG_INT_OK_CHGIN_SHIFT 6
+#define CHG_INT_OK_DETBAT_SHIFT 7
+#define CHG_INT_OK_BYP_MASK BIT(CHG_INT_OK_BYP_SHIFT)
+#define CHG_INT_OK_BAT_MASK BIT(CHG_INT_OK_BAT_SHIFT)
+#define CHG_INT_OK_CHG_MASK BIT(CHG_INT_OK_CHG_SHIFT)
+#define CHG_INT_OK_CHGIN_MASK BIT(CHG_INT_OK_CHGIN_SHIFT)
+#define CHG_INT_OK_DETBAT_MASK BIT(CHG_INT_OK_DETBAT_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_DETAILS_00 register */
+#define CHG_DETAILS_00_CHGIN_SHIFT 5
+#define CHG_DETAILS_00_CHGIN_MASK (0x3 << CHG_DETAILS_00_CHGIN_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_DETAILS_01 register */
+#define CHG_DETAILS_01_CHG_SHIFT 0
+#define CHG_DETAILS_01_BAT_SHIFT 4
+#define CHG_DETAILS_01_TREG_SHIFT 7
+#define CHG_DETAILS_01_CHG_MASK (0xf << CHG_DETAILS_01_CHG_SHIFT)
+#define CHG_DETAILS_01_BAT_MASK (0x7 << CHG_DETAILS_01_BAT_SHIFT)
+#define CHG_DETAILS_01_TREG_MASK BIT(7)
+
+/* MAX77693_CHG_REG_CHG_DETAILS_01/CHG field */
+enum max77693_charger_charging_state {
+ MAX77693_CHARGING_PREQUALIFICATION = 0x0,
+ MAX77693_CHARGING_FAST_CONST_CURRENT,
+ MAX77693_CHARGING_FAST_CONST_VOLTAGE,
+ MAX77693_CHARGING_TOP_OFF,
+ MAX77693_CHARGING_DONE,
+ MAX77693_CHARGING_HIGH_TEMP,
+ MAX77693_CHARGING_TIMER_EXPIRED,
+ MAX77693_CHARGING_THERMISTOR_SUSPEND,
+ MAX77693_CHARGING_OFF,
+ MAX77693_CHARGING_RESERVED,
+ MAX77693_CHARGING_OVER_TEMP,
+ MAX77693_CHARGING_WATCHDOG_EXPIRED,
+};
+
+/* MAX77693_CHG_REG_CHG_DETAILS_01/BAT field */
+enum max77693_charger_battery_state {
+ MAX77693_BATTERY_NOBAT = 0x0,
+ /* Dead-battery or low-battery prequalification */
+ MAX77693_BATTERY_PREQUALIFICATION,
+ MAX77693_BATTERY_TIMER_EXPIRED,
+ MAX77693_BATTERY_GOOD,
+ MAX77693_BATTERY_LOWVOLTAGE,
+ MAX77693_BATTERY_OVERVOLTAGE,
+ MAX77693_BATTERY_OVERCURRENT,
+ MAX77693_BATTERY_RESERVED,
+};
+
+/* MAX77693_CHG_REG_CHG_DETAILS_02 register */
+#define CHG_DETAILS_02_BYP_SHIFT 0
+#define CHG_DETAILS_02_BYP_MASK (0xf << CHG_DETAILS_02_BYP_SHIFT)
+
+/* MAX77693 CHG_CNFG_00 register */
+#define CHG_CNFG_00_CHG_MASK 0x1
+#define CHG_CNFG_00_BUCK_MASK 0x4
+
+/* MAX77693_CHG_REG_CHG_CNFG_01 register */
+#define CHG_CNFG_01_FCHGTIME_SHIFT 0
+#define CHG_CNFG_01_CHGRSTRT_SHIFT 4
+#define CHG_CNFG_01_PQEN_SHIFT 7
+#define CHG_CNFG_01_FCHGTIME_MASK (0x7 << CHG_CNFG_01_FCHGTIME_SHIFT)
+#define CHG_CNFG_01_CHGRSTRT_MASK (0x3 << CHG_CNFG_01_CHGRSTRT_SHIFT)
+#define CHG_CNFG_01_PQEN_MAKS BIT(CHG_CNFG_01_PQEN_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_CNFG_03 register */
+#define CHG_CNFG_03_TOITH_SHIFT 0
+#define CHG_CNFG_03_TOTIME_SHIFT 3
+#define CHG_CNFG_03_TOITH_MASK (0x7 << CHG_CNFG_03_TOITH_SHIFT)
+#define CHG_CNFG_03_TOTIME_MASK (0x7 << CHG_CNFG_03_TOTIME_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_CNFG_04 register */
+#define CHG_CNFG_04_CHGCVPRM_SHIFT 0
+#define CHG_CNFG_04_MINVSYS_SHIFT 5
+#define CHG_CNFG_04_CHGCVPRM_MASK (0x1f << CHG_CNFG_04_CHGCVPRM_SHIFT)
+#define CHG_CNFG_04_MINVSYS_MASK (0x7 << CHG_CNFG_04_MINVSYS_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_CNFG_06 register */
+#define CHG_CNFG_06_CHGPROT_SHIFT 2
+#define CHG_CNFG_06_CHGPROT_MASK (0x3 << CHG_CNFG_06_CHGPROT_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_CNFG_07 register */
+#define CHG_CNFG_07_REGTEMP_SHIFT 5
+#define CHG_CNFG_07_REGTEMP_MASK (0x3 << CHG_CNFG_07_REGTEMP_SHIFT)
+
+/* MAX77693_CHG_REG_CHG_CNFG_12 register */
+#define CHG_CNFG_12_B2SOVRC_SHIFT 0
+#define CHG_CNFG_12_VCHGINREG_SHIFT 3
+#define CHG_CNFG_12_B2SOVRC_MASK (0x7 << CHG_CNFG_12_B2SOVRC_SHIFT)
+#define CHG_CNFG_12_VCHGINREG_MASK (0x3 << CHG_CNFG_12_VCHGINREG_SHIFT)
+
+/* MAX77693 CHG_CNFG_09 Register */
+#define CHG_CNFG_09_CHGIN_ILIM_MASK 0x7F
+
+/* MAX77693 CHG_CTRL Register */
+#define SAFEOUT_CTRL_SAFEOUT1_MASK 0x3
+#define SAFEOUT_CTRL_SAFEOUT2_MASK 0xC
+#define SAFEOUT_CTRL_ENSAFEOUT1_MASK 0x40
+#define SAFEOUT_CTRL_ENSAFEOUT2_MASK 0x80
+
+/* Slave addr = 0x4A: MUIC */
+enum max77693_muic_reg {
+ MAX77693_MUIC_REG_ID = 0x00,
+ MAX77693_MUIC_REG_INT1 = 0x01,
+ MAX77693_MUIC_REG_INT2 = 0x02,
+ MAX77693_MUIC_REG_INT3 = 0x03,
+ MAX77693_MUIC_REG_STATUS1 = 0x04,
+ MAX77693_MUIC_REG_STATUS2 = 0x05,
+ MAX77693_MUIC_REG_STATUS3 = 0x06,
+ MAX77693_MUIC_REG_INTMASK1 = 0x07,
+ MAX77693_MUIC_REG_INTMASK2 = 0x08,
+ MAX77693_MUIC_REG_INTMASK3 = 0x09,
+ MAX77693_MUIC_REG_CDETCTRL1 = 0x0A,
+ MAX77693_MUIC_REG_CDETCTRL2 = 0x0B,
+ MAX77693_MUIC_REG_CTRL1 = 0x0C,
+ MAX77693_MUIC_REG_CTRL2 = 0x0D,
+ MAX77693_MUIC_REG_CTRL3 = 0x0E,
+
+ MAX77693_MUIC_REG_END,
+};
+
+/* MAX77693 INTMASK1~2 Register */
+#define INTMASK1_ADC1K_SHIFT 3
+#define INTMASK1_ADCERR_SHIFT 2
+#define INTMASK1_ADCLOW_SHIFT 1
+#define INTMASK1_ADC_SHIFT 0
+#define INTMASK1_ADC1K_MASK (1 << INTMASK1_ADC1K_SHIFT)
+#define INTMASK1_ADCERR_MASK (1 << INTMASK1_ADCERR_SHIFT)
+#define INTMASK1_ADCLOW_MASK (1 << INTMASK1_ADCLOW_SHIFT)
+#define INTMASK1_ADC_MASK (1 << INTMASK1_ADC_SHIFT)
+
+#define INTMASK2_VIDRM_SHIFT 5
+#define INTMASK2_VBVOLT_SHIFT 4
+#define INTMASK2_DXOVP_SHIFT 3
+#define INTMASK2_DCDTMR_SHIFT 2
+#define INTMASK2_CHGDETRUN_SHIFT 1
+#define INTMASK2_CHGTYP_SHIFT 0
+#define INTMASK2_VIDRM_MASK (1 << INTMASK2_VIDRM_SHIFT)
+#define INTMASK2_VBVOLT_MASK (1 << INTMASK2_VBVOLT_SHIFT)
+#define INTMASK2_DXOVP_MASK (1 << INTMASK2_DXOVP_SHIFT)
+#define INTMASK2_DCDTMR_MASK (1 << INTMASK2_DCDTMR_SHIFT)
+#define INTMASK2_CHGDETRUN_MASK (1 << INTMASK2_CHGDETRUN_SHIFT)
+#define INTMASK2_CHGTYP_MASK (1 << INTMASK2_CHGTYP_SHIFT)
+
+/* MAX77693 MUIC - STATUS1~3 Register */
+#define STATUS1_ADC_SHIFT (0)
+#define STATUS1_ADCLOW_SHIFT (5)
+#define STATUS1_ADCERR_SHIFT (6)
+#define STATUS1_ADC1K_SHIFT (7)
+#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
+#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
+#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
+#define STATUS1_ADC1K_MASK (0x1 << STATUS1_ADC1K_SHIFT)
+
+#define STATUS2_CHGTYP_SHIFT (0)
+#define STATUS2_CHGDETRUN_SHIFT (3)
+#define STATUS2_DCDTMR_SHIFT (4)
+#define STATUS2_DXOVP_SHIFT (5)
+#define STATUS2_VBVOLT_SHIFT (6)
+#define STATUS2_VIDRM_SHIFT (7)
+#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
+#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
+#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
+#define STATUS2_DXOVP_MASK (0x1 << STATUS2_DXOVP_SHIFT)
+#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
+#define STATUS2_VIDRM_MASK (0x1 << STATUS2_VIDRM_SHIFT)
+
+#define STATUS3_OVP_SHIFT (2)
+#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
+
+/* MAX77693 CDETCTRL1~2 register */
+#define CDETCTRL1_CHGDETEN_SHIFT (0)
+#define CDETCTRL1_CHGTYPMAN_SHIFT (1)
+#define CDETCTRL1_DCDEN_SHIFT (2)
+#define CDETCTRL1_DCD2SCT_SHIFT (3)
+#define CDETCTRL1_CDDELAY_SHIFT (4)
+#define CDETCTRL1_DCDCPL_SHIFT (5)
+#define CDETCTRL1_CDPDET_SHIFT (7)
+#define CDETCTRL1_CHGDETEN_MASK (0x1 << CDETCTRL1_CHGDETEN_SHIFT)
+#define CDETCTRL1_CHGTYPMAN_MASK (0x1 << CDETCTRL1_CHGTYPMAN_SHIFT)
+#define CDETCTRL1_DCDEN_MASK (0x1 << CDETCTRL1_DCDEN_SHIFT)
+#define CDETCTRL1_DCD2SCT_MASK (0x1 << CDETCTRL1_DCD2SCT_SHIFT)
+#define CDETCTRL1_CDDELAY_MASK (0x1 << CDETCTRL1_CDDELAY_SHIFT)
+#define CDETCTRL1_DCDCPL_MASK (0x1 << CDETCTRL1_DCDCPL_SHIFT)
+#define CDETCTRL1_CDPDET_MASK (0x1 << CDETCTRL1_CDPDET_SHIFT)
+
+#define CDETCTRL2_VIDRMEN_SHIFT (1)
+#define CDETCTRL2_DXOVPEN_SHIFT (3)
+#define CDETCTRL2_VIDRMEN_MASK (0x1 << CDETCTRL2_VIDRMEN_SHIFT)
+#define CDETCTRL2_DXOVPEN_MASK (0x1 << CDETCTRL2_DXOVPEN_SHIFT)
+
+/* MAX77693 MUIC - CONTROL1~3 register */
+#define COMN1SW_SHIFT (0)
+#define COMP2SW_SHIFT (3)
+#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
+#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
+#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
+#define CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \
+ | (1 << COMN1SW_SHIFT))
+#define CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
+ | (2 << COMN1SW_SHIFT))
+#define CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \
+ | (3 << COMN1SW_SHIFT))
+#define CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
+ | (0 << COMN1SW_SHIFT))
+
+#define CONTROL2_LOWPWR_SHIFT (0)
+#define CONTROL2_ADCEN_SHIFT (1)
+#define CONTROL2_CPEN_SHIFT (2)
+#define CONTROL2_SFOUTASRT_SHIFT (3)
+#define CONTROL2_SFOUTORD_SHIFT (4)
+#define CONTROL2_ACCDET_SHIFT (5)
+#define CONTROL2_USBCPINT_SHIFT (6)
+#define CONTROL2_RCPS_SHIFT (7)
+#define CONTROL2_LOWPWR_MASK (0x1 << CONTROL2_LOWPWR_SHIFT)
+#define CONTROL2_ADCEN_MASK (0x1 << CONTROL2_ADCEN_SHIFT)
+#define CONTROL2_CPEN_MASK (0x1 << CONTROL2_CPEN_SHIFT)
+#define CONTROL2_SFOUTASRT_MASK (0x1 << CONTROL2_SFOUTASRT_SHIFT)
+#define CONTROL2_SFOUTORD_MASK (0x1 << CONTROL2_SFOUTORD_SHIFT)
+#define CONTROL2_ACCDET_MASK (0x1 << CONTROL2_ACCDET_SHIFT)
+#define CONTROL2_USBCPINT_MASK (0x1 << CONTROL2_USBCPINT_SHIFT)
+#define CONTROL2_RCPS_MASK (0x1 << CONTROL2_RCPS_SHIFT)
+
+#define CONTROL3_JIGSET_SHIFT (0)
+#define CONTROL3_BTLDSET_SHIFT (2)
+#define CONTROL3_ADCDBSET_SHIFT (4)
+#define CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
+#define CONTROL3_BTLDSET_MASK (0x3 << CONTROL3_BTLDSET_SHIFT)
+#define CONTROL3_ADCDBSET_MASK (0x3 << CONTROL3_ADCDBSET_SHIFT)
+
+/* Slave addr = 0x90: Haptic */
+enum max77693_haptic_reg {
+ MAX77693_HAPTIC_REG_STATUS = 0x00,
+ MAX77693_HAPTIC_REG_CONFIG1 = 0x01,
+ MAX77693_HAPTIC_REG_CONFIG2 = 0x02,
+ MAX77693_HAPTIC_REG_CONFIG_CHNL = 0x03,
+ MAX77693_HAPTIC_REG_CONFG_CYC1 = 0x04,
+ MAX77693_HAPTIC_REG_CONFG_CYC2 = 0x05,
+ MAX77693_HAPTIC_REG_CONFIG_PER1 = 0x06,
+ MAX77693_HAPTIC_REG_CONFIG_PER2 = 0x07,
+ MAX77693_HAPTIC_REG_CONFIG_PER3 = 0x08,
+ MAX77693_HAPTIC_REG_CONFIG_PER4 = 0x09,
+ MAX77693_HAPTIC_REG_CONFIG_DUTY1 = 0x0A,
+ MAX77693_HAPTIC_REG_CONFIG_DUTY2 = 0x0B,
+ MAX77693_HAPTIC_REG_CONFIG_PWM1 = 0x0C,
+ MAX77693_HAPTIC_REG_CONFIG_PWM2 = 0x0D,
+ MAX77693_HAPTIC_REG_CONFIG_PWM3 = 0x0E,
+ MAX77693_HAPTIC_REG_CONFIG_PWM4 = 0x0F,
+ MAX77693_HAPTIC_REG_REV = 0x10,
+
+ MAX77693_HAPTIC_REG_END,
+};
+
+/* max77693-pmic LSCNFG configuraton register */
+#define MAX77693_PMIC_LOW_SYS_MASK 0x80
+#define MAX77693_PMIC_LOW_SYS_SHIFT 7
+
+/* max77693-haptic configuration register */
+#define MAX77693_CONFIG2_MODE 7
+#define MAX77693_CONFIG2_MEN 6
+#define MAX77693_CONFIG2_HTYP 5
+
+enum max77693_irq_source {
+ LED_INT = 0,
+ TOPSYS_INT,
+ CHG_INT,
+ MUIC_INT1,
+ MUIC_INT2,
+ MUIC_INT3,
+
+ MAX77693_IRQ_GROUP_NR,
+};
+
+#define SRC_IRQ_CHARGER BIT(0)
+#define SRC_IRQ_TOP BIT(1)
+#define SRC_IRQ_FLASH BIT(2)
+#define SRC_IRQ_MUIC BIT(3)
+#define SRC_IRQ_ALL (SRC_IRQ_CHARGER | SRC_IRQ_TOP \
+ | SRC_IRQ_FLASH | SRC_IRQ_MUIC)
+
+#define LED_IRQ_FLED2_OPEN BIT(0)
+#define LED_IRQ_FLED2_SHORT BIT(1)
+#define LED_IRQ_FLED1_OPEN BIT(2)
+#define LED_IRQ_FLED1_SHORT BIT(3)
+#define LED_IRQ_MAX_FLASH BIT(4)
+
+#define TOPSYS_IRQ_T120C_INT BIT(0)
+#define TOPSYS_IRQ_T140C_INT BIT(1)
+#define TOPSYS_IRQ_LOWSYS_INT BIT(3)
+
+#define CHG_IRQ_BYP_I BIT(0)
+#define CHG_IRQ_THM_I BIT(2)
+#define CHG_IRQ_BAT_I BIT(3)
+#define CHG_IRQ_CHG_I BIT(4)
+#define CHG_IRQ_CHGIN_I BIT(6)
+
+#define MUIC_IRQ_INT1_ADC BIT(0)
+#define MUIC_IRQ_INT1_ADC_LOW BIT(1)
+#define MUIC_IRQ_INT1_ADC_ERR BIT(2)
+#define MUIC_IRQ_INT1_ADC1K BIT(3)
+
+#define MUIC_IRQ_INT2_CHGTYP BIT(0)
+#define MUIC_IRQ_INT2_CHGDETREUN BIT(1)
+#define MUIC_IRQ_INT2_DCDTMR BIT(2)
+#define MUIC_IRQ_INT2_DXOVP BIT(3)
+#define MUIC_IRQ_INT2_VBVOLT BIT(4)
+#define MUIC_IRQ_INT2_VIDRM BIT(5)
+
+#define MUIC_IRQ_INT3_EOC BIT(0)
+#define MUIC_IRQ_INT3_CGMBC BIT(1)
+#define MUIC_IRQ_INT3_OVP BIT(2)
+#define MUIC_IRQ_INT3_MBCCHG_ERR BIT(3)
+#define MUIC_IRQ_INT3_CHG_ENABLED BIT(4)
+#define MUIC_IRQ_INT3_BAT_DET BIT(5)
+
+enum max77693_irq {
+ /* PMIC - FLASH */
+ MAX77693_LED_IRQ_FLED2_OPEN,
+ MAX77693_LED_IRQ_FLED2_SHORT,
+ MAX77693_LED_IRQ_FLED1_OPEN,
+ MAX77693_LED_IRQ_FLED1_SHORT,
+ MAX77693_LED_IRQ_MAX_FLASH,
+
+ /* PMIC - TOPSYS */
+ MAX77693_TOPSYS_IRQ_T120C_INT,
+ MAX77693_TOPSYS_IRQ_T140C_INT,
+ MAX77693_TOPSYS_IRQ_LOWSYS_INT,
+
+ /* PMIC - Charger */
+ MAX77693_CHG_IRQ_BYP_I,
+ MAX77693_CHG_IRQ_THM_I,
+ MAX77693_CHG_IRQ_BAT_I,
+ MAX77693_CHG_IRQ_CHG_I,
+ MAX77693_CHG_IRQ_CHGIN_I,
+
+ MAX77693_IRQ_NR,
+};
+
+enum max77693_irq_muic {
+ /* MUIC INT1 */
+ MAX77693_MUIC_IRQ_INT1_ADC,
+ MAX77693_MUIC_IRQ_INT1_ADC_LOW,
+ MAX77693_MUIC_IRQ_INT1_ADC_ERR,
+ MAX77693_MUIC_IRQ_INT1_ADC1K,
+
+ /* MUIC INT2 */
+ MAX77693_MUIC_IRQ_INT2_CHGTYP,
+ MAX77693_MUIC_IRQ_INT2_CHGDETREUN,
+ MAX77693_MUIC_IRQ_INT2_DCDTMR,
+ MAX77693_MUIC_IRQ_INT2_DXOVP,
+ MAX77693_MUIC_IRQ_INT2_VBVOLT,
+ MAX77693_MUIC_IRQ_INT2_VIDRM,
+
+ /* MUIC INT3 */
+ MAX77693_MUIC_IRQ_INT3_EOC,
+ MAX77693_MUIC_IRQ_INT3_CGMBC,
+ MAX77693_MUIC_IRQ_INT3_OVP,
+ MAX77693_MUIC_IRQ_INT3_MBCCHG_ERR,
+ MAX77693_MUIC_IRQ_INT3_CHG_ENABLED,
+ MAX77693_MUIC_IRQ_INT3_BAT_DET,
+
+ MAX77693_MUIC_IRQ_NR,
+};
+
+struct max77693_dev {
+ struct device *dev;
+ struct i2c_client *i2c; /* 0xCC , PMIC, Charger, Flash LED */
+ struct i2c_client *muic; /* 0x4A , MUIC */
+ struct i2c_client *haptic; /* 0x90 , Haptic */
+
+ int type;
+
+ struct regmap *regmap;
+ struct regmap *regmap_muic;
+ struct regmap *regmap_haptic;
+
+ struct regmap_irq_chip_data *irq_data_led;
+ struct regmap_irq_chip_data *irq_data_topsys;
+ struct regmap_irq_chip_data *irq_data_charger;
+ struct regmap_irq_chip_data *irq_data_muic;
+
+ int irq;
+ int irq_gpio;
+ struct mutex irqlock;
+ int irq_masks_cur[MAX77693_IRQ_GROUP_NR];
+ int irq_masks_cache[MAX77693_IRQ_GROUP_NR];
+};
+
+enum max77693_types {
+ TYPE_MAX77693,
+};
+
+extern int max77693_irq_init(struct max77693_dev *max77686);
+extern void max77693_irq_exit(struct max77693_dev *max77686);
+extern int max77693_irq_resume(struct max77693_dev *max77686);
+
+#endif /* __LINUX_MFD_MAX77693_PRIV_H */
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
new file mode 100644
index 000000000..d450f6873
--- /dev/null
+++ b/include/linux/mfd/max77693.h
@@ -0,0 +1,91 @@
+/*
+ * max77693.h - Driver for the Maxim 77693
+ *
+ * Copyright (C) 2012 Samsung Electrnoics
+ * SangYoung Son <hello.son@samsung.com>
+ *
+ * This program is not provided / owned by Maxim Integrated Products.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8997.h
+ *
+ * MAX77693 has PMIC, Charger, Flash LED, Haptic, MUIC devices.
+ * The devices share the same I2C bus and included in
+ * this mfd driver.
+ */
+
+#ifndef __LINUX_MFD_MAX77693_H
+#define __LINUX_MFD_MAX77693_H
+
+/* MAX77693 regulator IDs */
+enum max77693_regulators {
+ MAX77693_ESAFEOUT1 = 0,
+ MAX77693_ESAFEOUT2,
+ MAX77693_CHARGER,
+ MAX77693_REG_MAX,
+};
+
+struct max77693_reg_data {
+ u8 addr;
+ u8 data;
+};
+
+struct max77693_muic_platform_data {
+ struct max77693_reg_data *init_data;
+ int num_init_data;
+
+ int detcable_delay_ms;
+
+ /*
+ * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+ * h/w path of COMP2/COMN1 on CONTROL1 register.
+ */
+ int path_usb;
+ int path_uart;
+};
+
+/* MAX77693 led flash */
+
+/* triggers */
+enum max77693_led_trigger {
+ MAX77693_LED_TRIG_OFF,
+ MAX77693_LED_TRIG_FLASH,
+ MAX77693_LED_TRIG_TORCH,
+ MAX77693_LED_TRIG_EXT,
+ MAX77693_LED_TRIG_SOFT,
+};
+
+/* trigger types */
+enum max77693_led_trigger_type {
+ MAX77693_LED_TRIG_TYPE_EDGE,
+ MAX77693_LED_TRIG_TYPE_LEVEL,
+};
+
+/* boost modes */
+enum max77693_led_boost_mode {
+ MAX77693_LED_BOOST_NONE,
+ MAX77693_LED_BOOST_ADAPTIVE,
+ MAX77693_LED_BOOST_FIXED,
+};
+
+/* MAX77693 */
+
+struct max77693_platform_data {
+ /* muic data */
+ struct max77693_muic_platform_data *muic_data;
+ struct max77693_led_platform_data *led_data;
+};
+#endif /* __LINUX_MFD_MAX77693_H */
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
new file mode 100644
index 000000000..7178ace83
--- /dev/null
+++ b/include/linux/mfd/max77843-private.h
@@ -0,0 +1,454 @@
+/*
+ * Common variables for the Maxim MAX77843 driver
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ * Author: Jaewon Kim <jaewon02.kim@samsung.com>
+ * Author: Beomho Seo <beomho.seo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __MAX77843_PRIVATE_H_
+#define __MAX77843_PRIVATE_H_
+
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+#define I2C_ADDR_TOPSYS (0xCC >> 1)
+#define I2C_ADDR_CHG (0xD2 >> 1)
+#define I2C_ADDR_FG (0x6C >> 1)
+#define I2C_ADDR_MUIC (0x4A >> 1)
+
+/* Topsys, Haptic and LED registers */
+enum max77843_sys_reg {
+ MAX77843_SYS_REG_PMICID = 0x00,
+ MAX77843_SYS_REG_PMICREV = 0x01,
+ MAX77843_SYS_REG_MAINCTRL1 = 0x02,
+ MAX77843_SYS_REG_INTSRC = 0x22,
+ MAX77843_SYS_REG_INTSRCMASK = 0x23,
+ MAX77843_SYS_REG_SYSINTSRC = 0x24,
+ MAX77843_SYS_REG_SYSINTMASK = 0x26,
+ MAX77843_SYS_REG_TOPSYS_STAT = 0x28,
+ MAX77843_SYS_REG_SAFEOUTCTRL = 0xC6,
+
+ MAX77843_SYS_REG_END,
+};
+
+enum max77843_haptic_reg {
+ MAX77843_HAP_REG_MCONFIG = 0x10,
+
+ MAX77843_HAP_REG_END,
+};
+
+enum max77843_led_reg {
+ MAX77843_LED_REG_LEDEN = 0x30,
+ MAX77843_LED_REG_LED0BRT = 0x31,
+ MAX77843_LED_REG_LED1BRT = 0x32,
+ MAX77843_LED_REG_LED2BRT = 0x33,
+ MAX77843_LED_REG_LED3BRT = 0x34,
+ MAX77843_LED_REG_LEDBLNK = 0x38,
+ MAX77843_LED_REG_LEDRAMP = 0x36,
+
+ MAX77843_LED_REG_END,
+};
+
+/* Charger registers */
+enum max77843_charger_reg {
+ MAX77843_CHG_REG_CHG_INT = 0xB0,
+ MAX77843_CHG_REG_CHG_INT_MASK = 0xB1,
+ MAX77843_CHG_REG_CHG_INT_OK = 0xB2,
+ MAX77843_CHG_REG_CHG_DTLS_00 = 0xB3,
+ MAX77843_CHG_REG_CHG_DTLS_01 = 0xB4,
+ MAX77843_CHG_REG_CHG_DTLS_02 = 0xB5,
+ MAX77843_CHG_REG_CHG_CNFG_00 = 0xB7,
+ MAX77843_CHG_REG_CHG_CNFG_01 = 0xB8,
+ MAX77843_CHG_REG_CHG_CNFG_02 = 0xB9,
+ MAX77843_CHG_REG_CHG_CNFG_03 = 0xBA,
+ MAX77843_CHG_REG_CHG_CNFG_04 = 0xBB,
+ MAX77843_CHG_REG_CHG_CNFG_06 = 0xBD,
+ MAX77843_CHG_REG_CHG_CNFG_07 = 0xBE,
+ MAX77843_CHG_REG_CHG_CNFG_09 = 0xC0,
+ MAX77843_CHG_REG_CHG_CNFG_10 = 0xC1,
+ MAX77843_CHG_REG_CHG_CNFG_11 = 0xC2,
+ MAX77843_CHG_REG_CHG_CNFG_12 = 0xC3,
+
+ MAX77843_CHG_REG_END,
+};
+
+/* Fuel gauge registers */
+enum max77843_fuelgauge {
+ MAX77843_FG_REG_STATUS = 0x00,
+ MAX77843_FG_REG_VALRT_TH = 0x01,
+ MAX77843_FG_REG_TALRT_TH = 0x02,
+ MAX77843_FG_REG_SALRT_TH = 0x03,
+ MAX77843_FG_RATE_AT_RATE = 0x04,
+ MAX77843_FG_REG_REMCAP_REP = 0x05,
+ MAX77843_FG_REG_SOCREP = 0x06,
+ MAX77843_FG_REG_AGE = 0x07,
+ MAX77843_FG_REG_TEMP = 0x08,
+ MAX77843_FG_REG_VCELL = 0x09,
+ MAX77843_FG_REG_CURRENT = 0x0A,
+ MAX77843_FG_REG_AVG_CURRENT = 0x0B,
+ MAX77843_FG_REG_SOCMIX = 0x0D,
+ MAX77843_FG_REG_SOCAV = 0x0E,
+ MAX77843_FG_REG_REMCAP_MIX = 0x0F,
+ MAX77843_FG_REG_FULLCAP = 0x10,
+ MAX77843_FG_REG_AVG_TEMP = 0x16,
+ MAX77843_FG_REG_CYCLES = 0x17,
+ MAX77843_FG_REG_AVG_VCELL = 0x19,
+ MAX77843_FG_REG_CONFIG = 0x1D,
+ MAX77843_FG_REG_REMCAP_AV = 0x1F,
+ MAX77843_FG_REG_FULLCAP_NOM = 0x23,
+ MAX77843_FG_REG_MISCCFG = 0x2B,
+ MAX77843_FG_REG_RCOMP = 0x38,
+ MAX77843_FG_REG_FSTAT = 0x3D,
+ MAX77843_FG_REG_DQACC = 0x45,
+ MAX77843_FG_REG_DPACC = 0x46,
+ MAX77843_FG_REG_OCV = 0xEE,
+ MAX77843_FG_REG_VFOCV = 0xFB,
+ MAX77843_FG_SOCVF = 0xFF,
+
+ MAX77843_FG_END,
+};
+
+/* MUIC registers */
+enum max77843_muic_reg {
+ MAX77843_MUIC_REG_ID = 0x00,
+ MAX77843_MUIC_REG_INT1 = 0x01,
+ MAX77843_MUIC_REG_INT2 = 0x02,
+ MAX77843_MUIC_REG_INT3 = 0x03,
+ MAX77843_MUIC_REG_STATUS1 = 0x04,
+ MAX77843_MUIC_REG_STATUS2 = 0x05,
+ MAX77843_MUIC_REG_STATUS3 = 0x06,
+ MAX77843_MUIC_REG_INTMASK1 = 0x07,
+ MAX77843_MUIC_REG_INTMASK2 = 0x08,
+ MAX77843_MUIC_REG_INTMASK3 = 0x09,
+ MAX77843_MUIC_REG_CDETCTRL1 = 0x0A,
+ MAX77843_MUIC_REG_CDETCTRL2 = 0x0B,
+ MAX77843_MUIC_REG_CONTROL1 = 0x0C,
+ MAX77843_MUIC_REG_CONTROL2 = 0x0D,
+ MAX77843_MUIC_REG_CONTROL3 = 0x0E,
+ MAX77843_MUIC_REG_CONTROL4 = 0x16,
+ MAX77843_MUIC_REG_HVCONTROL1 = 0x17,
+ MAX77843_MUIC_REG_HVCONTROL2 = 0x18,
+
+ MAX77843_MUIC_REG_END,
+};
+
+enum max77843_irq {
+ /* Topsys: SYSTEM */
+ MAX77843_SYS_IRQ_SYSINTSRC_SYSUVLO_INT,
+ MAX77843_SYS_IRQ_SYSINTSRC_SYSOVLO_INT,
+ MAX77843_SYS_IRQ_SYSINTSRC_TSHDN_INT,
+ MAX77843_SYS_IRQ_SYSINTSRC_TM_INT,
+
+ /* Charger: CHG_INT */
+ MAX77843_CHG_IRQ_CHG_INT_BYP_I,
+ MAX77843_CHG_IRQ_CHG_INT_BATP_I,
+ MAX77843_CHG_IRQ_CHG_INT_BAT_I,
+ MAX77843_CHG_IRQ_CHG_INT_CHG_I,
+ MAX77843_CHG_IRQ_CHG_INT_WCIN_I,
+ MAX77843_CHG_IRQ_CHG_INT_CHGIN_I,
+ MAX77843_CHG_IRQ_CHG_INT_AICL_I,
+
+ MAX77843_IRQ_NUM,
+};
+
+enum max77843_irq_muic {
+ /* MUIC: INT1 */
+ MAX77843_MUIC_IRQ_INT1_ADC,
+ MAX77843_MUIC_IRQ_INT1_ADCERROR,
+ MAX77843_MUIC_IRQ_INT1_ADC1K,
+
+ /* MUIC: INT2 */
+ MAX77843_MUIC_IRQ_INT2_CHGTYP,
+ MAX77843_MUIC_IRQ_INT2_CHGDETRUN,
+ MAX77843_MUIC_IRQ_INT2_DCDTMR,
+ MAX77843_MUIC_IRQ_INT2_DXOVP,
+ MAX77843_MUIC_IRQ_INT2_VBVOLT,
+
+ /* MUIC: INT3 */
+ MAX77843_MUIC_IRQ_INT3_VBADC,
+ MAX77843_MUIC_IRQ_INT3_VDNMON,
+ MAX77843_MUIC_IRQ_INT3_DNRES,
+ MAX77843_MUIC_IRQ_INT3_MPNACK,
+ MAX77843_MUIC_IRQ_INT3_MRXBUFOW,
+ MAX77843_MUIC_IRQ_INT3_MRXTRF,
+ MAX77843_MUIC_IRQ_INT3_MRXPERR,
+ MAX77843_MUIC_IRQ_INT3_MRXRDY,
+
+ MAX77843_MUIC_IRQ_NUM,
+};
+
+/* MAX77843 interrupts */
+#define MAX77843_SYS_IRQ_SYSUVLO_INT BIT(0)
+#define MAX77843_SYS_IRQ_SYSOVLO_INT BIT(1)
+#define MAX77843_SYS_IRQ_TSHDN_INT BIT(2)
+#define MAX77843_SYS_IRQ_TM_INT BIT(3)
+
+/* MAX77843 MAINCTRL1 register */
+#define MAINCTRL1_BIASEN_SHIFT 7
+#define MAX77843_MAINCTRL1_BIASEN_MASK BIT(MAINCTRL1_BIASEN_SHIFT)
+
+/* MAX77843 MCONFIG register */
+#define MCONFIG_MODE_SHIFT 7
+#define MCONFIG_MEN_SHIFT 6
+#define MCONFIG_PDIV_SHIFT 0
+
+#define MAX77843_MCONFIG_MODE_MASK BIT(MCONFIG_MODE_SHIFT)
+#define MAX77843_MCONFIG_MEN_MASK BIT(MCONFIG_MEN_SHIFT)
+#define MAX77843_MCONFIG_PDIV_MASK (0x3 << MCONFIG_PDIV_SHIFT)
+
+/* Max77843 charger insterrupts */
+#define MAX77843_CHG_BYP_I BIT(0)
+#define MAX77843_CHG_BATP_I BIT(2)
+#define MAX77843_CHG_BAT_I BIT(3)
+#define MAX77843_CHG_CHG_I BIT(4)
+#define MAX77843_CHG_WCIN_I BIT(5)
+#define MAX77843_CHG_CHGIN_I BIT(6)
+#define MAX77843_CHG_AICL_I BIT(7)
+
+/* MAX77843 CHG_INT_OK register */
+#define MAX77843_CHG_BYP_OK BIT(0)
+#define MAX77843_CHG_BATP_OK BIT(2)
+#define MAX77843_CHG_BAT_OK BIT(3)
+#define MAX77843_CHG_CHG_OK BIT(4)
+#define MAX77843_CHG_WCIN_OK BIT(5)
+#define MAX77843_CHG_CHGIN_OK BIT(6)
+#define MAX77843_CHG_AICL_OK BIT(7)
+
+/* MAX77843 CHG_DETAILS_00 register */
+#define MAX77843_CHG_BAT_DTLS BIT(0)
+
+/* MAX77843 CHG_DETAILS_01 register */
+#define MAX77843_CHG_DTLS_MASK 0x0f
+#define MAX77843_CHG_PQ_MODE 0x00
+#define MAX77843_CHG_CC_MODE 0x01
+#define MAX77843_CHG_CV_MODE 0x02
+#define MAX77843_CHG_TO_MODE 0x03
+#define MAX77843_CHG_DO_MODE 0x04
+#define MAX77843_CHG_HT_MODE 0x05
+#define MAX77843_CHG_TF_MODE 0x06
+#define MAX77843_CHG_TS_MODE 0x07
+#define MAX77843_CHG_OFF_MODE 0x08
+
+#define MAX77843_CHG_BAT_DTLS_MASK 0xf0
+#define MAX77843_CHG_NO_BAT (0x00 << 4)
+#define MAX77843_CHG_LOW_VOLT_BAT (0x01 << 4)
+#define MAX77843_CHG_LONG_BAT_TIME (0x02 << 4)
+#define MAX77843_CHG_OK_BAT (0x03 << 4)
+#define MAX77843_CHG_OK_LOW_VOLT_BAT (0x04 << 4)
+#define MAX77843_CHG_OVER_VOLT_BAT (0x05 << 4)
+#define MAX77843_CHG_OVER_CURRENT_BAT (0x06 << 4)
+
+/* MAX77843 CHG_CNFG_00 register */
+#define MAX77843_CHG_DISABLE 0x00
+#define MAX77843_CHG_ENABLE 0x05
+#define MAX77843_CHG_MASK 0x01
+#define MAX77843_CHG_BUCK_MASK 0x04
+
+/* MAX77843 CHG_CNFG_01 register */
+#define MAX77843_CHG_RESTART_THRESHOLD_100 0x00
+#define MAX77843_CHG_RESTART_THRESHOLD_150 0x10
+#define MAX77843_CHG_RESTART_THRESHOLD_200 0x20
+#define MAX77843_CHG_RESTART_THRESHOLD_DISABLE 0x30
+
+/* MAX77843 CHG_CNFG_02 register */
+#define MAX77843_CHG_FAST_CHG_CURRENT_MIN 100000
+#define MAX77843_CHG_FAST_CHG_CURRENT_MAX 3150000
+#define MAX77843_CHG_FAST_CHG_CURRENT_STEP 50000
+#define MAX77843_CHG_FAST_CHG_CURRENT_MASK 0x3f
+#define MAX77843_CHG_OTG_ILIMIT_500 (0x00 << 6)
+#define MAX77843_CHG_OTG_ILIMIT_900 (0x01 << 6)
+#define MAX77843_CHG_OTG_ILIMIT_1200 (0x02 << 6)
+#define MAX77843_CHG_OTG_ILIMIT_1500 (0x03 << 6)
+#define MAX77843_CHG_OTG_ILIMIT_MASK 0xc0
+
+/* MAX77843 CHG_CNFG_03 register */
+#define MAX77843_CHG_TOP_OFF_CURRENT_MIN 125000
+#define MAX77843_CHG_TOP_OFF_CURRENT_MAX 650000
+#define MAX77843_CHG_TOP_OFF_CURRENT_STEP 75000
+#define MAX77843_CHG_TOP_OFF_CURRENT_MASK 0x07
+
+/* MAX77843 CHG_CNFG_06 register */
+#define MAX77843_CHG_WRITE_CAP_BLOCK 0x10
+#define MAX77843_CHG_WRITE_CAP_UNBLOCK 0x0C
+
+/* MAX77843_CHG_CNFG_09_register */
+#define MAX77843_CHG_INPUT_CURRENT_LIMIT_MIN 100000
+#define MAX77843_CHG_INPUT_CURRENT_LIMIT_MAX 4000000
+#define MAX77843_CHG_INPUT_CURRENT_LIMIT_REF 3367000
+#define MAX77843_CHG_INPUT_CURRENT_LIMIT_STEP 33000
+
+#define MAX77843_MUIC_ADC BIT(0)
+#define MAX77843_MUIC_ADCERROR BIT(2)
+#define MAX77843_MUIC_ADC1K BIT(3)
+
+#define MAX77843_MUIC_CHGTYP BIT(0)
+#define MAX77843_MUIC_CHGDETRUN BIT(1)
+#define MAX77843_MUIC_DCDTMR BIT(2)
+#define MAX77843_MUIC_DXOVP BIT(3)
+#define MAX77843_MUIC_VBVOLT BIT(4)
+
+#define MAX77843_MUIC_VBADC BIT(0)
+#define MAX77843_MUIC_VDNMON BIT(1)
+#define MAX77843_MUIC_DNRES BIT(2)
+#define MAX77843_MUIC_MPNACK BIT(3)
+#define MAX77843_MUIC_MRXBUFOW BIT(4)
+#define MAX77843_MUIC_MRXTRF BIT(5)
+#define MAX77843_MUIC_MRXPERR BIT(6)
+#define MAX77843_MUIC_MRXRDY BIT(7)
+
+/* MAX77843 INTSRCMASK register */
+#define MAX77843_INTSRCMASK_CHGR 0
+#define MAX77843_INTSRCMASK_SYS 1
+#define MAX77843_INTSRCMASK_FG 2
+#define MAX77843_INTSRCMASK_MUIC 3
+
+#define MAX77843_INTSRCMASK_CHGR_MASK BIT(MAX77843_INTSRCMASK_CHGR)
+#define MAX77843_INTSRCMASK_SYS_MASK BIT(MAX77843_INTSRCMASK_SYS)
+#define MAX77843_INTSRCMASK_FG_MASK BIT(MAX77843_INTSRCMASK_FG)
+#define MAX77843_INTSRCMASK_MUIC_MASK BIT(MAX77843_INTSRCMASK_MUIC)
+
+#define MAX77843_INTSRC_MASK_MASK \
+ (MAX77843_INTSRCMASK_MUIC_MASK | MAX77843_INTSRCMASK_FG_MASK | \
+ MAX77843_INTSRCMASK_SYS_MASK | MAX77843_INTSRCMASK_CHGR_MASK)
+
+/* MAX77843 STATUS register*/
+#define STATUS1_ADC_SHIFT 0
+#define STATUS1_ADCERROR_SHIFT 6
+#define STATUS1_ADC1K_SHIFT 7
+#define STATUS2_CHGTYP_SHIFT 0
+#define STATUS2_CHGDETRUN_SHIFT 3
+#define STATUS2_DCDTMR_SHIFT 4
+#define STATUS2_DXOVP_SHIFT 5
+#define STATUS2_VBVOLT_SHIFT 6
+#define STATUS3_VBADC_SHIFT 0
+#define STATUS3_VDNMON_SHIFT 4
+#define STATUS3_DNRES_SHIFT 5
+#define STATUS3_MPNACK_SHIFT 6
+
+#define MAX77843_MUIC_STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADCERROR_MASK BIT(STATUS1_ADCERROR_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADC1K_MASK BIT(STATUS1_ADC1K_SHIFT)
+#define MAX77843_MUIC_STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
+#define MAX77843_MUIC_STATUS2_CHGDETRUN_MASK BIT(STATUS2_CHGDETRUN_SHIFT)
+#define MAX77843_MUIC_STATUS2_DCDTMR_MASK BIT(STATUS2_DCDTMR_SHIFT)
+#define MAX77843_MUIC_STATUS2_DXOVP_MASK BIT(STATUS2_DXOVP_SHIFT)
+#define MAX77843_MUIC_STATUS2_VBVOLT_MASK BIT(STATUS2_VBVOLT_SHIFT)
+#define MAX77843_MUIC_STATUS3_VBADC_MASK (0xf << STATUS3_VBADC_SHIFT)
+#define MAX77843_MUIC_STATUS3_VDNMON_MASK BIT(STATUS3_VDNMON_SHIFT)
+#define MAX77843_MUIC_STATUS3_DNRES_MASK BIT(STATUS3_DNRES_SHIFT)
+#define MAX77843_MUIC_STATUS3_MPNACK_MASK BIT(STATUS3_MPNACK_SHIFT)
+
+/* MAX77843 CONTROL register */
+#define CONTROL1_COMP1SW_SHIFT 0
+#define CONTROL1_COMP2SW_SHIFT 3
+#define CONTROL1_IDBEN_SHIFT 7
+#define CONTROL2_LOWPWR_SHIFT 0
+#define CONTROL2_ADCEN_SHIFT 1
+#define CONTROL2_CPEN_SHIFT 2
+#define CONTROL2_ACC_DET_SHIFT 5
+#define CONTROL2_USBCPINT_SHIFT 6
+#define CONTROL2_RCPS_SHIFT 7
+#define CONTROL3_JIGSET_SHIFT 0
+#define CONTROL4_ADCDBSET_SHIFT 0
+#define CONTROL4_USBAUTO_SHIFT 4
+#define CONTROL4_FCTAUTO_SHIFT 5
+#define CONTROL4_ADCMODE_SHIFT 6
+
+#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK (0x7 << CONTROL1_COMP1SW_SHIFT)
+#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK (0x7 << CONTROL1_COMP2SW_SHIFT)
+#define MAX77843_MUIC_CONTROL1_IDBEN_MASK BIT(CONTROL1_IDBEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK BIT(CONTROL2_LOWPWR_SHIFT)
+#define MAX77843_MUIC_CONTROL2_ADCEN_MASK BIT(CONTROL2_ADCEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_CPEN_MASK BIT(CONTROL2_CPEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_ACC_DET_MASK BIT(CONTROL2_ACC_DET_SHIFT)
+#define MAX77843_MUIC_CONTROL2_USBCPINT_MASK BIT(CONTROL2_USBCPINT_SHIFT)
+#define MAX77843_MUIC_CONTROL2_RCPS_MASK BIT(CONTROL2_RCPS_SHIFT)
+#define MAX77843_MUIC_CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
+#define MAX77843_MUIC_CONTROL4_ADCDBSET_MASK (0x3 << CONTROL4_ADCDBSET_SHIFT)
+#define MAX77843_MUIC_CONTROL4_USBAUTO_MASK BIT(CONTROL4_USBAUTO_SHIFT)
+#define MAX77843_MUIC_CONTROL4_FCTAUTO_MASK BIT(CONTROL4_FCTAUTO_SHIFT)
+#define MAX77843_MUIC_CONTROL4_ADCMODE_MASK (0x3 << CONTROL4_ADCMODE_SHIFT)
+
+/* MAX77843 switch port */
+#define COM_OPEN 0
+#define COM_USB 1
+#define COM_AUDIO 2
+#define COM_UART 3
+#define COM_AUX_USB 4
+#define COM_AUX_UART 5
+
+#define CONTROL1_COM_SW \
+ ((MAX77843_MUIC_CONTROL1_COMP1SW_MASK | \
+ MAX77843_MUIC_CONTROL1_COMP2SW_MASK))
+
+#define CONTROL1_SW_OPEN \
+ ((COM_OPEN << CONTROL1_COMP1SW_SHIFT | \
+ COM_OPEN << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_USB \
+ ((COM_USB << CONTROL1_COMP1SW_SHIFT | \
+ COM_USB << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_AUDIO \
+ ((COM_AUDIO << CONTROL1_COMP1SW_SHIFT | \
+ COM_AUDIO << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_UART \
+ ((COM_UART << CONTROL1_COMP1SW_SHIFT | \
+ COM_UART << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_AUX_USB \
+ ((COM_AUX_USB << CONTROL1_COMP1SW_SHIFT | \
+ COM_AUX_USB << CONTROL1_COMP2SW_SHIFT))
+#define CONTROL1_SW_AUX_UART \
+ ((COM_AUX_UART << CONTROL1_COMP1SW_SHIFT | \
+ COM_AUX_UART << CONTROL1_COMP2SW_SHIFT))
+
+#define MAX77843_DISABLE 0
+#define MAX77843_ENABLE 1
+
+#define CONTROL4_AUTO_DISABLE \
+ ((MAX77843_DISABLE << CONTROL4_USBAUTO_SHIFT) | \
+ (MAX77843_DISABLE << CONTROL4_FCTAUTO_SHIFT))
+#define CONTROL4_AUTO_ENABLE \
+ ((MAX77843_ENABLE << CONTROL4_USBAUTO_SHIFT) | \
+ (MAX77843_ENABLE << CONTROL4_FCTAUTO_SHIFT))
+
+/* MAX77843 SAFEOUT LDO Control register */
+#define SAFEOUTCTRL_SAFEOUT1_SHIFT 0
+#define SAFEOUTCTRL_SAFEOUT2_SHIFT 2
+#define SAFEOUTCTRL_ENSAFEOUT1_SHIFT 6
+#define SAFEOUTCTRL_ENSAFEOUT2_SHIFT 7
+
+#define MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT1 \
+ BIT(SAFEOUTCTRL_ENSAFEOUT1_SHIFT)
+#define MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT2 \
+ BIT(SAFEOUTCTRL_ENSAFEOUT2_SHIFT)
+#define MAX77843_REG_SAFEOUTCTRL_SAFEOUT1_MASK \
+ (0x3 << SAFEOUTCTRL_SAFEOUT1_SHIFT)
+#define MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK \
+ (0x3 << SAFEOUTCTRL_SAFEOUT2_SHIFT)
+
+struct max77843 {
+ struct device *dev;
+
+ struct i2c_client *i2c;
+ struct i2c_client *i2c_chg;
+ struct i2c_client *i2c_fuel;
+ struct i2c_client *i2c_muic;
+
+ struct regmap *regmap;
+ struct regmap *regmap_chg;
+ struct regmap *regmap_fuel;
+ struct regmap *regmap_muic;
+
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap_irq_chip_data *irq_data_chg;
+ struct regmap_irq_chip_data *irq_data_fuel;
+ struct regmap_irq_chip_data *irq_data_muic;
+
+ int irq;
+};
+#endif /* __MAX77843_H__ */
diff --git a/include/linux/mfd/max8907.h b/include/linux/mfd/max8907.h
new file mode 100644
index 000000000..b06f7a6a1
--- /dev/null
+++ b/include/linux/mfd/max8907.h
@@ -0,0 +1,252 @@
+/*
+ * Functions to access MAX8907 power management chip.
+ *
+ * Copyright (C) 2010 Gyungoh Yoo <jack.yoo@maxim-ic.com>
+ * Copyright (C) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_MAX8907_H
+#define __LINUX_MFD_MAX8907_H
+
+#include <linux/mutex.h>
+#include <linux/pm.h>
+
+#define MAX8907_GEN_I2C_ADDR (0x78 >> 1)
+#define MAX8907_ADC_I2C_ADDR (0x8e >> 1)
+#define MAX8907_RTC_I2C_ADDR (0xd0 >> 1)
+
+/* MAX8907 register map */
+#define MAX8907_REG_SYSENSEL 0x00
+#define MAX8907_REG_ON_OFF_IRQ1 0x01
+#define MAX8907_REG_ON_OFF_IRQ1_MASK 0x02
+#define MAX8907_REG_ON_OFF_STAT 0x03
+#define MAX8907_REG_SDCTL1 0x04
+#define MAX8907_REG_SDSEQCNT1 0x05
+#define MAX8907_REG_SDV1 0x06
+#define MAX8907_REG_SDCTL2 0x07
+#define MAX8907_REG_SDSEQCNT2 0x08
+#define MAX8907_REG_SDV2 0x09
+#define MAX8907_REG_SDCTL3 0x0A
+#define MAX8907_REG_SDSEQCNT3 0x0B
+#define MAX8907_REG_SDV3 0x0C
+#define MAX8907_REG_ON_OFF_IRQ2 0x0D
+#define MAX8907_REG_ON_OFF_IRQ2_MASK 0x0E
+#define MAX8907_REG_RESET_CNFG 0x0F
+#define MAX8907_REG_LDOCTL16 0x10
+#define MAX8907_REG_LDOSEQCNT16 0x11
+#define MAX8907_REG_LDO16VOUT 0x12
+#define MAX8907_REG_SDBYSEQCNT 0x13
+#define MAX8907_REG_LDOCTL17 0x14
+#define MAX8907_REG_LDOSEQCNT17 0x15
+#define MAX8907_REG_LDO17VOUT 0x16
+#define MAX8907_REG_LDOCTL1 0x18
+#define MAX8907_REG_LDOSEQCNT1 0x19
+#define MAX8907_REG_LDO1VOUT 0x1A
+#define MAX8907_REG_LDOCTL2 0x1C
+#define MAX8907_REG_LDOSEQCNT2 0x1D
+#define MAX8907_REG_LDO2VOUT 0x1E
+#define MAX8907_REG_LDOCTL3 0x20
+#define MAX8907_REG_LDOSEQCNT3 0x21
+#define MAX8907_REG_LDO3VOUT 0x22
+#define MAX8907_REG_LDOCTL4 0x24
+#define MAX8907_REG_LDOSEQCNT4 0x25
+#define MAX8907_REG_LDO4VOUT 0x26
+#define MAX8907_REG_LDOCTL5 0x28
+#define MAX8907_REG_LDOSEQCNT5 0x29
+#define MAX8907_REG_LDO5VOUT 0x2A
+#define MAX8907_REG_LDOCTL6 0x2C
+#define MAX8907_REG_LDOSEQCNT6 0x2D
+#define MAX8907_REG_LDO6VOUT 0x2E
+#define MAX8907_REG_LDOCTL7 0x30
+#define MAX8907_REG_LDOSEQCNT7 0x31
+#define MAX8907_REG_LDO7VOUT 0x32
+#define MAX8907_REG_LDOCTL8 0x34
+#define MAX8907_REG_LDOSEQCNT8 0x35
+#define MAX8907_REG_LDO8VOUT 0x36
+#define MAX8907_REG_LDOCTL9 0x38
+#define MAX8907_REG_LDOSEQCNT9 0x39
+#define MAX8907_REG_LDO9VOUT 0x3A
+#define MAX8907_REG_LDOCTL10 0x3C
+#define MAX8907_REG_LDOSEQCNT10 0x3D
+#define MAX8907_REG_LDO10VOUT 0x3E
+#define MAX8907_REG_LDOCTL11 0x40
+#define MAX8907_REG_LDOSEQCNT11 0x41
+#define MAX8907_REG_LDO11VOUT 0x42
+#define MAX8907_REG_LDOCTL12 0x44
+#define MAX8907_REG_LDOSEQCNT12 0x45
+#define MAX8907_REG_LDO12VOUT 0x46
+#define MAX8907_REG_LDOCTL13 0x48
+#define MAX8907_REG_LDOSEQCNT13 0x49
+#define MAX8907_REG_LDO13VOUT 0x4A
+#define MAX8907_REG_LDOCTL14 0x4C
+#define MAX8907_REG_LDOSEQCNT14 0x4D
+#define MAX8907_REG_LDO14VOUT 0x4E
+#define MAX8907_REG_LDOCTL15 0x50
+#define MAX8907_REG_LDOSEQCNT15 0x51
+#define MAX8907_REG_LDO15VOUT 0x52
+#define MAX8907_REG_OUT5VEN 0x54
+#define MAX8907_REG_OUT5VSEQ 0x55
+#define MAX8907_REG_OUT33VEN 0x58
+#define MAX8907_REG_OUT33VSEQ 0x59
+#define MAX8907_REG_LDOCTL19 0x5C
+#define MAX8907_REG_LDOSEQCNT19 0x5D
+#define MAX8907_REG_LDO19VOUT 0x5E
+#define MAX8907_REG_LBCNFG 0x60
+#define MAX8907_REG_SEQ1CNFG 0x64
+#define MAX8907_REG_SEQ2CNFG 0x65
+#define MAX8907_REG_SEQ3CNFG 0x66
+#define MAX8907_REG_SEQ4CNFG 0x67
+#define MAX8907_REG_SEQ5CNFG 0x68
+#define MAX8907_REG_SEQ6CNFG 0x69
+#define MAX8907_REG_SEQ7CNFG 0x6A
+#define MAX8907_REG_LDOCTL18 0x72
+#define MAX8907_REG_LDOSEQCNT18 0x73
+#define MAX8907_REG_LDO18VOUT 0x74
+#define MAX8907_REG_BBAT_CNFG 0x78
+#define MAX8907_REG_CHG_CNTL1 0x7C
+#define MAX8907_REG_CHG_CNTL2 0x7D
+#define MAX8907_REG_CHG_IRQ1 0x7E
+#define MAX8907_REG_CHG_IRQ2 0x7F
+#define MAX8907_REG_CHG_IRQ1_MASK 0x80
+#define MAX8907_REG_CHG_IRQ2_MASK 0x81
+#define MAX8907_REG_CHG_STAT 0x82
+#define MAX8907_REG_WLED_MODE_CNTL 0x84
+#define MAX8907_REG_ILED_CNTL 0x84
+#define MAX8907_REG_II1RR 0x8E
+#define MAX8907_REG_II2RR 0x8F
+#define MAX8907_REG_LDOCTL20 0x9C
+#define MAX8907_REG_LDOSEQCNT20 0x9D
+#define MAX8907_REG_LDO20VOUT 0x9E
+
+/* RTC register map */
+#define MAX8907_REG_RTC_SEC 0x00
+#define MAX8907_REG_RTC_MIN 0x01
+#define MAX8907_REG_RTC_HOURS 0x02
+#define MAX8907_REG_RTC_WEEKDAY 0x03
+#define MAX8907_REG_RTC_DATE 0x04
+#define MAX8907_REG_RTC_MONTH 0x05
+#define MAX8907_REG_RTC_YEAR1 0x06
+#define MAX8907_REG_RTC_YEAR2 0x07
+#define MAX8907_REG_ALARM0_SEC 0x08
+#define MAX8907_REG_ALARM0_MIN 0x09
+#define MAX8907_REG_ALARM0_HOURS 0x0A
+#define MAX8907_REG_ALARM0_WEEKDAY 0x0B
+#define MAX8907_REG_ALARM0_DATE 0x0C
+#define MAX8907_REG_ALARM0_MONTH 0x0D
+#define MAX8907_REG_ALARM0_YEAR1 0x0E
+#define MAX8907_REG_ALARM0_YEAR2 0x0F
+#define MAX8907_REG_ALARM1_SEC 0x10
+#define MAX8907_REG_ALARM1_MIN 0x11
+#define MAX8907_REG_ALARM1_HOURS 0x12
+#define MAX8907_REG_ALARM1_WEEKDAY 0x13
+#define MAX8907_REG_ALARM1_DATE 0x14
+#define MAX8907_REG_ALARM1_MONTH 0x15
+#define MAX8907_REG_ALARM1_YEAR1 0x16
+#define MAX8907_REG_ALARM1_YEAR2 0x17
+#define MAX8907_REG_ALARM0_CNTL 0x18
+#define MAX8907_REG_ALARM1_CNTL 0x19
+#define MAX8907_REG_RTC_STATUS 0x1A
+#define MAX8907_REG_RTC_CNTL 0x1B
+#define MAX8907_REG_RTC_IRQ 0x1C
+#define MAX8907_REG_RTC_IRQ_MASK 0x1D
+#define MAX8907_REG_MPL_CNTL 0x1E
+
+/* ADC and Touch Screen Controller register map */
+#define MAX8907_CTL 0
+#define MAX8907_SEQCNT 1
+#define MAX8907_VOUT 2
+
+/* mask bit fields */
+#define MAX8907_MASK_LDO_SEQ 0x1C
+#define MAX8907_MASK_LDO_EN 0x01
+#define MAX8907_MASK_VBBATTCV 0x03
+#define MAX8907_MASK_OUT5V_VINEN 0x10
+#define MAX8907_MASK_OUT5V_ENSRC 0x0E
+#define MAX8907_MASK_OUT5V_EN 0x01
+#define MAX8907_MASK_POWER_OFF 0x40
+
+/* Regulator IDs */
+#define MAX8907_MBATT 0
+#define MAX8907_SD1 1
+#define MAX8907_SD2 2
+#define MAX8907_SD3 3
+#define MAX8907_LDO1 4
+#define MAX8907_LDO2 5
+#define MAX8907_LDO3 6
+#define MAX8907_LDO4 7
+#define MAX8907_LDO5 8
+#define MAX8907_LDO6 9
+#define MAX8907_LDO7 10
+#define MAX8907_LDO8 11
+#define MAX8907_LDO9 12
+#define MAX8907_LDO10 13
+#define MAX8907_LDO11 14
+#define MAX8907_LDO12 15
+#define MAX8907_LDO13 16
+#define MAX8907_LDO14 17
+#define MAX8907_LDO15 18
+#define MAX8907_LDO16 19
+#define MAX8907_LDO17 20
+#define MAX8907_LDO18 21
+#define MAX8907_LDO19 22
+#define MAX8907_LDO20 23
+#define MAX8907_OUT5V 24
+#define MAX8907_OUT33V 25
+#define MAX8907_BBAT 26
+#define MAX8907_SDBY 27
+#define MAX8907_VRTC 28
+#define MAX8907_NUM_REGULATORS (MAX8907_VRTC + 1)
+
+/* IRQ definitions */
+enum {
+ MAX8907_IRQ_VCHG_DC_OVP = 0,
+ MAX8907_IRQ_VCHG_DC_F,
+ MAX8907_IRQ_VCHG_DC_R,
+ MAX8907_IRQ_VCHG_THM_OK_R,
+ MAX8907_IRQ_VCHG_THM_OK_F,
+ MAX8907_IRQ_VCHG_MBATTLOW_F,
+ MAX8907_IRQ_VCHG_MBATTLOW_R,
+ MAX8907_IRQ_VCHG_RST,
+ MAX8907_IRQ_VCHG_DONE,
+ MAX8907_IRQ_VCHG_TOPOFF,
+ MAX8907_IRQ_VCHG_TMR_FAULT,
+
+ MAX8907_IRQ_GPM_RSTIN = 0,
+ MAX8907_IRQ_GPM_MPL,
+ MAX8907_IRQ_GPM_SW_3SEC,
+ MAX8907_IRQ_GPM_EXTON_F,
+ MAX8907_IRQ_GPM_EXTON_R,
+ MAX8907_IRQ_GPM_SW_1SEC,
+ MAX8907_IRQ_GPM_SW_F,
+ MAX8907_IRQ_GPM_SW_R,
+ MAX8907_IRQ_GPM_SYSCKEN_F,
+ MAX8907_IRQ_GPM_SYSCKEN_R,
+
+ MAX8907_IRQ_RTC_ALARM1 = 0,
+ MAX8907_IRQ_RTC_ALARM0,
+};
+
+struct max8907_platform_data {
+ struct regulator_init_data *init_data[MAX8907_NUM_REGULATORS];
+ bool pm_off;
+};
+
+struct regmap_irq_chips_data;
+
+struct max8907 {
+ struct device *dev;
+ struct mutex irq_lock;
+ struct i2c_client *i2c_gen;
+ struct i2c_client *i2c_rtc;
+ struct regmap *regmap_gen;
+ struct regmap *regmap_rtc;
+ struct regmap_irq_chip_data *irqc_chg;
+ struct regmap_irq_chip_data *irqc_on_off;
+ struct regmap_irq_chip_data *irqc_rtc;
+};
+
+#endif
diff --git a/include/linux/mfd/max8925.h b/include/linux/mfd/max8925.h
new file mode 100644
index 000000000..ce8502e9e
--- /dev/null
+++ b/include/linux/mfd/max8925.h
@@ -0,0 +1,277 @@
+/*
+ * Maxim8925 Interface
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_MAX8925_H
+#define __LINUX_MFD_MAX8925_H
+
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+
+/* Unified sub device IDs for MAX8925 */
+enum {
+ MAX8925_ID_SD1,
+ MAX8925_ID_SD2,
+ MAX8925_ID_SD3,
+ MAX8925_ID_LDO1,
+ MAX8925_ID_LDO2,
+ MAX8925_ID_LDO3,
+ MAX8925_ID_LDO4,
+ MAX8925_ID_LDO5,
+ MAX8925_ID_LDO6,
+ MAX8925_ID_LDO7,
+ MAX8925_ID_LDO8,
+ MAX8925_ID_LDO9,
+ MAX8925_ID_LDO10,
+ MAX8925_ID_LDO11,
+ MAX8925_ID_LDO12,
+ MAX8925_ID_LDO13,
+ MAX8925_ID_LDO14,
+ MAX8925_ID_LDO15,
+ MAX8925_ID_LDO16,
+ MAX8925_ID_LDO17,
+ MAX8925_ID_LDO18,
+ MAX8925_ID_LDO19,
+ MAX8925_ID_LDO20,
+ MAX8925_ID_MAX,
+};
+
+enum {
+ /*
+ * Charging current threshold trigger going from fast charge
+ * to TOPOFF charge. From 5% to 20% of fasting charging current.
+ */
+ MAX8925_TOPOFF_THR_5PER,
+ MAX8925_TOPOFF_THR_10PER,
+ MAX8925_TOPOFF_THR_15PER,
+ MAX8925_TOPOFF_THR_20PER,
+};
+
+enum {
+ /* Fast charging current */
+ MAX8925_FCHG_85MA,
+ MAX8925_FCHG_300MA,
+ MAX8925_FCHG_460MA,
+ MAX8925_FCHG_600MA,
+ MAX8925_FCHG_700MA,
+ MAX8925_FCHG_800MA,
+ MAX8925_FCHG_900MA,
+ MAX8925_FCHG_1000MA,
+};
+
+/* Charger registers */
+#define MAX8925_CHG_IRQ1 (0x7e)
+#define MAX8925_CHG_IRQ2 (0x7f)
+#define MAX8925_CHG_IRQ1_MASK (0x80)
+#define MAX8925_CHG_IRQ2_MASK (0x81)
+#define MAX8925_CHG_STATUS (0x82)
+
+/* GPM registers */
+#define MAX8925_SYSENSEL (0x00)
+#define MAX8925_ON_OFF_IRQ1 (0x01)
+#define MAX8925_ON_OFF_IRQ1_MASK (0x02)
+#define MAX8925_ON_OFF_STATUS (0x03)
+#define MAX8925_ON_OFF_IRQ2 (0x0d)
+#define MAX8925_ON_OFF_IRQ2_MASK (0x0e)
+#define MAX8925_RESET_CNFG (0x0f)
+
+/* Touch registers */
+#define MAX8925_TSC_IRQ (0x00)
+#define MAX8925_TSC_IRQ_MASK (0x01)
+#define MAX8925_TSC_CNFG1 (0x02)
+#define MAX8925_ADC_SCHED (0x10)
+#define MAX8925_ADC_RES_END (0x6f)
+
+#define MAX8925_NREF_OK (1 << 4)
+
+/* RTC registers */
+#define MAX8925_ALARM0_CNTL (0x18)
+#define MAX8925_ALARM1_CNTL (0x19)
+#define MAX8925_RTC_IRQ (0x1c)
+#define MAX8925_RTC_IRQ_MASK (0x1d)
+#define MAX8925_MPL_CNTL (0x1e)
+
+/* WLED registers */
+#define MAX8925_WLED_MODE_CNTL (0x84)
+#define MAX8925_WLED_CNTL (0x85)
+
+/* MAX8925 Registers */
+#define MAX8925_SDCTL1 (0x04)
+#define MAX8925_SDCTL2 (0x07)
+#define MAX8925_SDCTL3 (0x0A)
+#define MAX8925_SDV1 (0x06)
+#define MAX8925_SDV2 (0x09)
+#define MAX8925_SDV3 (0x0C)
+#define MAX8925_LDOCTL1 (0x18)
+#define MAX8925_LDOCTL2 (0x1C)
+#define MAX8925_LDOCTL3 (0x20)
+#define MAX8925_LDOCTL4 (0x24)
+#define MAX8925_LDOCTL5 (0x28)
+#define MAX8925_LDOCTL6 (0x2C)
+#define MAX8925_LDOCTL7 (0x30)
+#define MAX8925_LDOCTL8 (0x34)
+#define MAX8925_LDOCTL9 (0x38)
+#define MAX8925_LDOCTL10 (0x3C)
+#define MAX8925_LDOCTL11 (0x40)
+#define MAX8925_LDOCTL12 (0x44)
+#define MAX8925_LDOCTL13 (0x48)
+#define MAX8925_LDOCTL14 (0x4C)
+#define MAX8925_LDOCTL15 (0x50)
+#define MAX8925_LDOCTL16 (0x10)
+#define MAX8925_LDOCTL17 (0x14)
+#define MAX8925_LDOCTL18 (0x72)
+#define MAX8925_LDOCTL19 (0x5C)
+#define MAX8925_LDOCTL20 (0x9C)
+#define MAX8925_LDOVOUT1 (0x1A)
+#define MAX8925_LDOVOUT2 (0x1E)
+#define MAX8925_LDOVOUT3 (0x22)
+#define MAX8925_LDOVOUT4 (0x26)
+#define MAX8925_LDOVOUT5 (0x2A)
+#define MAX8925_LDOVOUT6 (0x2E)
+#define MAX8925_LDOVOUT7 (0x32)
+#define MAX8925_LDOVOUT8 (0x36)
+#define MAX8925_LDOVOUT9 (0x3A)
+#define MAX8925_LDOVOUT10 (0x3E)
+#define MAX8925_LDOVOUT11 (0x42)
+#define MAX8925_LDOVOUT12 (0x46)
+#define MAX8925_LDOVOUT13 (0x4A)
+#define MAX8925_LDOVOUT14 (0x4E)
+#define MAX8925_LDOVOUT15 (0x52)
+#define MAX8925_LDOVOUT16 (0x12)
+#define MAX8925_LDOVOUT17 (0x16)
+#define MAX8925_LDOVOUT18 (0x74)
+#define MAX8925_LDOVOUT19 (0x5E)
+#define MAX8925_LDOVOUT20 (0x9E)
+
+/* bit definitions */
+#define CHG_IRQ1_MASK (0x07)
+#define CHG_IRQ2_MASK (0xff)
+#define ON_OFF_IRQ1_MASK (0xff)
+#define ON_OFF_IRQ2_MASK (0x03)
+#define TSC_IRQ_MASK (0x03)
+#define RTC_IRQ_MASK (0x0c)
+
+#define MAX8925_NAME_SIZE (32)
+
+/* IRQ definitions */
+enum {
+ MAX8925_IRQ_VCHG_DC_OVP,
+ MAX8925_IRQ_VCHG_DC_F,
+ MAX8925_IRQ_VCHG_DC_R,
+ MAX8925_IRQ_VCHG_THM_OK_R,
+ MAX8925_IRQ_VCHG_THM_OK_F,
+ MAX8925_IRQ_VCHG_SYSLOW_F,
+ MAX8925_IRQ_VCHG_SYSLOW_R,
+ MAX8925_IRQ_VCHG_RST,
+ MAX8925_IRQ_VCHG_DONE,
+ MAX8925_IRQ_VCHG_TOPOFF,
+ MAX8925_IRQ_VCHG_TMR_FAULT,
+ MAX8925_IRQ_GPM_RSTIN,
+ MAX8925_IRQ_GPM_MPL,
+ MAX8925_IRQ_GPM_SW_3SEC,
+ MAX8925_IRQ_GPM_EXTON_F,
+ MAX8925_IRQ_GPM_EXTON_R,
+ MAX8925_IRQ_GPM_SW_1SEC,
+ MAX8925_IRQ_GPM_SW_F,
+ MAX8925_IRQ_GPM_SW_R,
+ MAX8925_IRQ_GPM_SYSCKEN_F,
+ MAX8925_IRQ_GPM_SYSCKEN_R,
+ MAX8925_IRQ_RTC_ALARM1,
+ MAX8925_IRQ_RTC_ALARM0,
+ MAX8925_IRQ_TSC_STICK,
+ MAX8925_IRQ_TSC_NSTICK,
+ MAX8925_NR_IRQS,
+};
+
+
+
+struct max8925_chip {
+ struct device *dev;
+ struct i2c_client *i2c;
+ struct i2c_client *adc;
+ struct i2c_client *rtc;
+ struct mutex io_lock;
+ struct mutex irq_lock;
+
+ int irq_base;
+ int core_irq;
+ int tsc_irq;
+ unsigned int wakeup_flag;
+};
+
+struct max8925_backlight_pdata {
+ int lxw_scl; /* 0/1 -- 0.8Ohm/0.4Ohm */
+ int lxw_freq; /* 700KHz ~ 1400KHz */
+ int dual_string; /* 0/1 -- single/dual string */
+};
+
+struct max8925_touch_pdata {
+ unsigned int flags;
+};
+
+struct max8925_power_pdata {
+ int (*set_charger)(int);
+ unsigned batt_detect:1;
+ unsigned topoff_threshold:2;
+ unsigned fast_charge:3; /* charge current */
+ unsigned no_temp_support:1; /* set if no temperature detect */
+ unsigned no_insert_detect:1; /* set if no ac insert detect */
+ char **supplied_to;
+ int num_supplicants;
+};
+
+/*
+ * irq_base: stores IRQ base number of MAX8925 in platform
+ * tsc_irq: stores IRQ number of MAX8925 TSC
+ */
+struct max8925_platform_data {
+ struct max8925_backlight_pdata *backlight;
+ struct max8925_touch_pdata *touch;
+ struct max8925_power_pdata *power;
+ struct regulator_init_data *sd1;
+ struct regulator_init_data *sd2;
+ struct regulator_init_data *sd3;
+ struct regulator_init_data *ldo1;
+ struct regulator_init_data *ldo2;
+ struct regulator_init_data *ldo3;
+ struct regulator_init_data *ldo4;
+ struct regulator_init_data *ldo5;
+ struct regulator_init_data *ldo6;
+ struct regulator_init_data *ldo7;
+ struct regulator_init_data *ldo8;
+ struct regulator_init_data *ldo9;
+ struct regulator_init_data *ldo10;
+ struct regulator_init_data *ldo11;
+ struct regulator_init_data *ldo12;
+ struct regulator_init_data *ldo13;
+ struct regulator_init_data *ldo14;
+ struct regulator_init_data *ldo15;
+ struct regulator_init_data *ldo16;
+ struct regulator_init_data *ldo17;
+ struct regulator_init_data *ldo18;
+ struct regulator_init_data *ldo19;
+ struct regulator_init_data *ldo20;
+
+ int irq_base;
+ int tsc_irq;
+};
+
+extern int max8925_reg_read(struct i2c_client *, int);
+extern int max8925_reg_write(struct i2c_client *, int, unsigned char);
+extern int max8925_bulk_read(struct i2c_client *, int, int, unsigned char *);
+extern int max8925_bulk_write(struct i2c_client *, int, int, unsigned char *);
+extern int max8925_set_bits(struct i2c_client *, int, unsigned char,
+ unsigned char);
+
+extern int max8925_device_init(struct max8925_chip *,
+ struct max8925_platform_data *);
+extern void max8925_device_exit(struct max8925_chip *);
+#endif /* __LINUX_MFD_MAX8925_H */
+
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
new file mode 100644
index 000000000..78c76cd4d
--- /dev/null
+++ b/include/linux/mfd/max8997-private.h
@@ -0,0 +1,430 @@
+/*
+ * max8997-private.h - Voltage regulator driver for the Maxim 8997
+ *
+ * Copyright (C) 2010 Samsung Electrnoics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_MFD_MAX8997_PRIV_H
+#define __LINUX_MFD_MAX8997_PRIV_H
+
+#include <linux/i2c.h>
+#include <linux/export.h>
+#include <linux/irqdomain.h>
+
+#define MAX8997_REG_INVALID (0xff)
+
+enum max8997_pmic_reg {
+ MAX8997_REG_PMIC_ID0 = 0x00,
+ MAX8997_REG_PMIC_ID1 = 0x01,
+ MAX8997_REG_INTSRC = 0x02,
+ MAX8997_REG_INT1 = 0x03,
+ MAX8997_REG_INT2 = 0x04,
+ MAX8997_REG_INT3 = 0x05,
+ MAX8997_REG_INT4 = 0x06,
+
+ MAX8997_REG_INT1MSK = 0x08,
+ MAX8997_REG_INT2MSK = 0x09,
+ MAX8997_REG_INT3MSK = 0x0a,
+ MAX8997_REG_INT4MSK = 0x0b,
+
+ MAX8997_REG_STATUS1 = 0x0d,
+ MAX8997_REG_STATUS2 = 0x0e,
+ MAX8997_REG_STATUS3 = 0x0f,
+ MAX8997_REG_STATUS4 = 0x10,
+
+ MAX8997_REG_MAINCON1 = 0x13,
+ MAX8997_REG_MAINCON2 = 0x14,
+ MAX8997_REG_BUCKRAMP = 0x15,
+
+ MAX8997_REG_BUCK1CTRL = 0x18,
+ MAX8997_REG_BUCK1DVS1 = 0x19,
+ MAX8997_REG_BUCK1DVS2 = 0x1a,
+ MAX8997_REG_BUCK1DVS3 = 0x1b,
+ MAX8997_REG_BUCK1DVS4 = 0x1c,
+ MAX8997_REG_BUCK1DVS5 = 0x1d,
+ MAX8997_REG_BUCK1DVS6 = 0x1e,
+ MAX8997_REG_BUCK1DVS7 = 0x1f,
+ MAX8997_REG_BUCK1DVS8 = 0x20,
+ MAX8997_REG_BUCK2CTRL = 0x21,
+ MAX8997_REG_BUCK2DVS1 = 0x22,
+ MAX8997_REG_BUCK2DVS2 = 0x23,
+ MAX8997_REG_BUCK2DVS3 = 0x24,
+ MAX8997_REG_BUCK2DVS4 = 0x25,
+ MAX8997_REG_BUCK2DVS5 = 0x26,
+ MAX8997_REG_BUCK2DVS6 = 0x27,
+ MAX8997_REG_BUCK2DVS7 = 0x28,
+ MAX8997_REG_BUCK2DVS8 = 0x29,
+ MAX8997_REG_BUCK3CTRL = 0x2a,
+ MAX8997_REG_BUCK3DVS = 0x2b,
+ MAX8997_REG_BUCK4CTRL = 0x2c,
+ MAX8997_REG_BUCK4DVS = 0x2d,
+ MAX8997_REG_BUCK5CTRL = 0x2e,
+ MAX8997_REG_BUCK5DVS1 = 0x2f,
+ MAX8997_REG_BUCK5DVS2 = 0x30,
+ MAX8997_REG_BUCK5DVS3 = 0x31,
+ MAX8997_REG_BUCK5DVS4 = 0x32,
+ MAX8997_REG_BUCK5DVS5 = 0x33,
+ MAX8997_REG_BUCK5DVS6 = 0x34,
+ MAX8997_REG_BUCK5DVS7 = 0x35,
+ MAX8997_REG_BUCK5DVS8 = 0x36,
+ MAX8997_REG_BUCK6CTRL = 0x37,
+ MAX8997_REG_BUCK6BPSKIPCTRL = 0x38,
+ MAX8997_REG_BUCK7CTRL = 0x39,
+ MAX8997_REG_BUCK7DVS = 0x3a,
+ MAX8997_REG_LDO1CTRL = 0x3b,
+ MAX8997_REG_LDO2CTRL = 0x3c,
+ MAX8997_REG_LDO3CTRL = 0x3d,
+ MAX8997_REG_LDO4CTRL = 0x3e,
+ MAX8997_REG_LDO5CTRL = 0x3f,
+ MAX8997_REG_LDO6CTRL = 0x40,
+ MAX8997_REG_LDO7CTRL = 0x41,
+ MAX8997_REG_LDO8CTRL = 0x42,
+ MAX8997_REG_LDO9CTRL = 0x43,
+ MAX8997_REG_LDO10CTRL = 0x44,
+ MAX8997_REG_LDO11CTRL = 0x45,
+ MAX8997_REG_LDO12CTRL = 0x46,
+ MAX8997_REG_LDO13CTRL = 0x47,
+ MAX8997_REG_LDO14CTRL = 0x48,
+ MAX8997_REG_LDO15CTRL = 0x49,
+ MAX8997_REG_LDO16CTRL = 0x4a,
+ MAX8997_REG_LDO17CTRL = 0x4b,
+ MAX8997_REG_LDO18CTRL = 0x4c,
+ MAX8997_REG_LDO21CTRL = 0x4d,
+
+ MAX8997_REG_MBCCTRL1 = 0x50,
+ MAX8997_REG_MBCCTRL2 = 0x51,
+ MAX8997_REG_MBCCTRL3 = 0x52,
+ MAX8997_REG_MBCCTRL4 = 0x53,
+ MAX8997_REG_MBCCTRL5 = 0x54,
+ MAX8997_REG_MBCCTRL6 = 0x55,
+ MAX8997_REG_OTPCGHCVS = 0x56,
+
+ MAX8997_REG_SAFEOUTCTRL = 0x5a,
+
+ MAX8997_REG_LBCNFG1 = 0x5e,
+ MAX8997_REG_LBCNFG2 = 0x5f,
+ MAX8997_REG_BBCCTRL = 0x60,
+
+ MAX8997_REG_FLASH1_CUR = 0x63, /* 0x63 ~ 0x6e for FLASH */
+ MAX8997_REG_FLASH2_CUR = 0x64,
+ MAX8997_REG_MOVIE_CUR = 0x65,
+ MAX8997_REG_GSMB_CUR = 0x66,
+ MAX8997_REG_BOOST_CNTL = 0x67,
+ MAX8997_REG_LEN_CNTL = 0x68,
+ MAX8997_REG_FLASH_CNTL = 0x69,
+ MAX8997_REG_WDT_CNTL = 0x6a,
+ MAX8997_REG_MAXFLASH1 = 0x6b,
+ MAX8997_REG_MAXFLASH2 = 0x6c,
+ MAX8997_REG_FLASHSTATUS = 0x6d,
+ MAX8997_REG_FLASHSTATUSMASK = 0x6e,
+
+ MAX8997_REG_GPIOCNTL1 = 0x70,
+ MAX8997_REG_GPIOCNTL2 = 0x71,
+ MAX8997_REG_GPIOCNTL3 = 0x72,
+ MAX8997_REG_GPIOCNTL4 = 0x73,
+ MAX8997_REG_GPIOCNTL5 = 0x74,
+ MAX8997_REG_GPIOCNTL6 = 0x75,
+ MAX8997_REG_GPIOCNTL7 = 0x76,
+ MAX8997_REG_GPIOCNTL8 = 0x77,
+ MAX8997_REG_GPIOCNTL9 = 0x78,
+ MAX8997_REG_GPIOCNTL10 = 0x79,
+ MAX8997_REG_GPIOCNTL11 = 0x7a,
+ MAX8997_REG_GPIOCNTL12 = 0x7b,
+
+ MAX8997_REG_LDO1CONFIG = 0x80,
+ MAX8997_REG_LDO2CONFIG = 0x81,
+ MAX8997_REG_LDO3CONFIG = 0x82,
+ MAX8997_REG_LDO4CONFIG = 0x83,
+ MAX8997_REG_LDO5CONFIG = 0x84,
+ MAX8997_REG_LDO6CONFIG = 0x85,
+ MAX8997_REG_LDO7CONFIG = 0x86,
+ MAX8997_REG_LDO8CONFIG = 0x87,
+ MAX8997_REG_LDO9CONFIG = 0x88,
+ MAX8997_REG_LDO10CONFIG = 0x89,
+ MAX8997_REG_LDO11CONFIG = 0x8a,
+ MAX8997_REG_LDO12CONFIG = 0x8b,
+ MAX8997_REG_LDO13CONFIG = 0x8c,
+ MAX8997_REG_LDO14CONFIG = 0x8d,
+ MAX8997_REG_LDO15CONFIG = 0x8e,
+ MAX8997_REG_LDO16CONFIG = 0x8f,
+ MAX8997_REG_LDO17CONFIG = 0x90,
+ MAX8997_REG_LDO18CONFIG = 0x91,
+ MAX8997_REG_LDO21CONFIG = 0x92,
+
+ MAX8997_REG_DVSOKTIMER1 = 0x97,
+ MAX8997_REG_DVSOKTIMER2 = 0x98,
+ MAX8997_REG_DVSOKTIMER4 = 0x99,
+ MAX8997_REG_DVSOKTIMER5 = 0x9a,
+
+ MAX8997_REG_PMIC_END = 0x9b,
+};
+
+enum max8997_muic_reg {
+ MAX8997_MUIC_REG_ID = 0x0,
+ MAX8997_MUIC_REG_INT1 = 0x1,
+ MAX8997_MUIC_REG_INT2 = 0x2,
+ MAX8997_MUIC_REG_INT3 = 0x3,
+ MAX8997_MUIC_REG_STATUS1 = 0x4,
+ MAX8997_MUIC_REG_STATUS2 = 0x5,
+ MAX8997_MUIC_REG_STATUS3 = 0x6,
+ MAX8997_MUIC_REG_INTMASK1 = 0x7,
+ MAX8997_MUIC_REG_INTMASK2 = 0x8,
+ MAX8997_MUIC_REG_INTMASK3 = 0x9,
+ MAX8997_MUIC_REG_CDETCTRL = 0xa,
+
+ MAX8997_MUIC_REG_CONTROL1 = 0xc,
+ MAX8997_MUIC_REG_CONTROL2 = 0xd,
+ MAX8997_MUIC_REG_CONTROL3 = 0xe,
+
+ MAX8997_MUIC_REG_END = 0xf,
+};
+
+/* MAX8997-MUIC STATUS1 register */
+#define STATUS1_ADC_SHIFT 0
+#define STATUS1_ADCLOW_SHIFT 5
+#define STATUS1_ADCERR_SHIFT 6
+#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
+#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
+#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
+
+/* MAX8997-MUIC STATUS2 register */
+#define STATUS2_CHGTYP_SHIFT 0
+#define STATUS2_CHGDETRUN_SHIFT 3
+#define STATUS2_DCDTMR_SHIFT 4
+#define STATUS2_DBCHG_SHIFT 5
+#define STATUS2_VBVOLT_SHIFT 6
+#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
+#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
+#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
+#define STATUS2_DBCHG_MASK (0x1 << STATUS2_DBCHG_SHIFT)
+#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
+
+/* MAX8997-MUIC STATUS3 register */
+#define STATUS3_OVP_SHIFT 2
+#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
+
+/* MAX8997-MUIC CONTROL1 register */
+#define COMN1SW_SHIFT 0
+#define COMP2SW_SHIFT 3
+#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
+#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
+#define COMP_SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
+
+#define CONTROL1_SW_USB ((1 << COMP2SW_SHIFT) \
+ | (1 << COMN1SW_SHIFT))
+#define CONTROL1_SW_AUDIO ((2 << COMP2SW_SHIFT) \
+ | (2 << COMN1SW_SHIFT))
+#define CONTROL1_SW_UART ((3 << COMP2SW_SHIFT) \
+ | (3 << COMN1SW_SHIFT))
+#define CONTROL1_SW_OPEN ((0 << COMP2SW_SHIFT) \
+ | (0 << COMN1SW_SHIFT))
+
+#define CONTROL2_LOWPWR_SHIFT (0)
+#define CONTROL2_ADCEN_SHIFT (1)
+#define CONTROL2_CPEN_SHIFT (2)
+#define CONTROL2_SFOUTASRT_SHIFT (3)
+#define CONTROL2_SFOUTORD_SHIFT (4)
+#define CONTROL2_ACCDET_SHIFT (5)
+#define CONTROL2_USBCPINT_SHIFT (6)
+#define CONTROL2_RCPS_SHIFT (7)
+#define CONTROL2_LOWPWR_MASK (0x1 << CONTROL2_LOWPWR_SHIFT)
+#define CONTROL2_ADCEN_MASK (0x1 << CONTROL2_ADCEN_SHIFT)
+#define CONTROL2_CPEN_MASK (0x1 << CONTROL2_CPEN_SHIFT)
+#define CONTROL2_SFOUTASRT_MASK (0x1 << CONTROL2_SFOUTASRT_SHIFT)
+#define CONTROL2_SFOUTORD_MASK (0x1 << CONTROL2_SFOUTORD_SHIFT)
+#define CONTROL2_ACCDET_MASK (0x1 << CONTROL2_ACCDET_SHIFT)
+#define CONTROL2_USBCPINT_MASK (0x1 << CONTROL2_USBCPINT_SHIFT)
+#define CONTROL2_RCPS_MASK (0x1 << CONTROL2_RCPS_SHIFT)
+
+#define CONTROL3_JIGSET_SHIFT (0)
+#define CONTROL3_BTLDSET_SHIFT (2)
+#define CONTROL3_ADCDBSET_SHIFT (4)
+#define CONTROL3_JIGSET_MASK (0x3 << CONTROL3_JIGSET_SHIFT)
+#define CONTROL3_BTLDSET_MASK (0x3 << CONTROL3_BTLDSET_SHIFT)
+#define CONTROL3_ADCDBSET_MASK (0x3 << CONTROL3_ADCDBSET_SHIFT)
+
+enum max8997_haptic_reg {
+ MAX8997_HAPTIC_REG_GENERAL = 0x00,
+ MAX8997_HAPTIC_REG_CONF1 = 0x01,
+ MAX8997_HAPTIC_REG_CONF2 = 0x02,
+ MAX8997_HAPTIC_REG_DRVCONF = 0x03,
+ MAX8997_HAPTIC_REG_CYCLECONF1 = 0x04,
+ MAX8997_HAPTIC_REG_CYCLECONF2 = 0x05,
+ MAX8997_HAPTIC_REG_SIGCONF1 = 0x06,
+ MAX8997_HAPTIC_REG_SIGCONF2 = 0x07,
+ MAX8997_HAPTIC_REG_SIGCONF3 = 0x08,
+ MAX8997_HAPTIC_REG_SIGCONF4 = 0x09,
+ MAX8997_HAPTIC_REG_SIGDC1 = 0x0a,
+ MAX8997_HAPTIC_REG_SIGDC2 = 0x0b,
+ MAX8997_HAPTIC_REG_SIGPWMDC1 = 0x0c,
+ MAX8997_HAPTIC_REG_SIGPWMDC2 = 0x0d,
+ MAX8997_HAPTIC_REG_SIGPWMDC3 = 0x0e,
+ MAX8997_HAPTIC_REG_SIGPWMDC4 = 0x0f,
+ MAX8997_HAPTIC_REG_MTR_REV = 0x10,
+
+ MAX8997_HAPTIC_REG_END = 0x11,
+};
+
+/* slave addr = 0x0c: using "2nd part" of rev4 datasheet */
+enum max8997_rtc_reg {
+ MAX8997_RTC_CTRLMASK = 0x02,
+ MAX8997_RTC_CTRL = 0x03,
+ MAX8997_RTC_UPDATE1 = 0x04,
+ MAX8997_RTC_UPDATE2 = 0x05,
+ MAX8997_RTC_WTSR_SMPL = 0x06,
+
+ MAX8997_RTC_SEC = 0x10,
+ MAX8997_RTC_MIN = 0x11,
+ MAX8997_RTC_HOUR = 0x12,
+ MAX8997_RTC_DAY_OF_WEEK = 0x13,
+ MAX8997_RTC_MONTH = 0x14,
+ MAX8997_RTC_YEAR = 0x15,
+ MAX8997_RTC_DAY_OF_MONTH = 0x16,
+ MAX8997_RTC_ALARM1_SEC = 0x17,
+ MAX8997_RTC_ALARM1_MIN = 0x18,
+ MAX8997_RTC_ALARM1_HOUR = 0x19,
+ MAX8997_RTC_ALARM1_DAY_OF_WEEK = 0x1a,
+ MAX8997_RTC_ALARM1_MONTH = 0x1b,
+ MAX8997_RTC_ALARM1_YEAR = 0x1c,
+ MAX8997_RTC_ALARM1_DAY_OF_MONTH = 0x1d,
+ MAX8997_RTC_ALARM2_SEC = 0x1e,
+ MAX8997_RTC_ALARM2_MIN = 0x1f,
+ MAX8997_RTC_ALARM2_HOUR = 0x20,
+ MAX8997_RTC_ALARM2_DAY_OF_WEEK = 0x21,
+ MAX8997_RTC_ALARM2_MONTH = 0x22,
+ MAX8997_RTC_ALARM2_YEAR = 0x23,
+ MAX8997_RTC_ALARM2_DAY_OF_MONTH = 0x24,
+};
+
+enum max8997_irq_source {
+ PMIC_INT1 = 0,
+ PMIC_INT2,
+ PMIC_INT3,
+ PMIC_INT4,
+
+ FUEL_GAUGE, /* Ignored (MAX17042 driver handles) */
+
+ MUIC_INT1,
+ MUIC_INT2,
+ MUIC_INT3,
+
+ GPIO_LOW, /* Not implemented */
+ GPIO_HI, /* Not implemented */
+
+ FLASH_STATUS, /* Not implemented */
+
+ MAX8997_IRQ_GROUP_NR,
+};
+
+enum max8997_irq {
+ MAX8997_PMICIRQ_PWRONR,
+ MAX8997_PMICIRQ_PWRONF,
+ MAX8997_PMICIRQ_PWRON1SEC,
+ MAX8997_PMICIRQ_JIGONR,
+ MAX8997_PMICIRQ_JIGONF,
+ MAX8997_PMICIRQ_LOWBAT2,
+ MAX8997_PMICIRQ_LOWBAT1,
+
+ MAX8997_PMICIRQ_JIGR,
+ MAX8997_PMICIRQ_JIGF,
+ MAX8997_PMICIRQ_MR,
+ MAX8997_PMICIRQ_DVS1OK,
+ MAX8997_PMICIRQ_DVS2OK,
+ MAX8997_PMICIRQ_DVS3OK,
+ MAX8997_PMICIRQ_DVS4OK,
+
+ MAX8997_PMICIRQ_CHGINS,
+ MAX8997_PMICIRQ_CHGRM,
+ MAX8997_PMICIRQ_DCINOVP,
+ MAX8997_PMICIRQ_TOPOFFR,
+ MAX8997_PMICIRQ_CHGRSTF,
+ MAX8997_PMICIRQ_MBCHGTMEXPD,
+
+ MAX8997_PMICIRQ_RTC60S,
+ MAX8997_PMICIRQ_RTCA1,
+ MAX8997_PMICIRQ_RTCA2,
+ MAX8997_PMICIRQ_SMPL_INT,
+ MAX8997_PMICIRQ_RTC1S,
+ MAX8997_PMICIRQ_WTSR,
+
+ MAX8997_MUICIRQ_ADCError,
+ MAX8997_MUICIRQ_ADCLow,
+ MAX8997_MUICIRQ_ADC,
+
+ MAX8997_MUICIRQ_VBVolt,
+ MAX8997_MUICIRQ_DBChg,
+ MAX8997_MUICIRQ_DCDTmr,
+ MAX8997_MUICIRQ_ChgDetRun,
+ MAX8997_MUICIRQ_ChgTyp,
+
+ MAX8997_MUICIRQ_OVP,
+
+ MAX8997_IRQ_NR,
+};
+
+#define MAX8997_NUM_GPIO 12
+struct max8997_dev {
+ struct device *dev;
+ struct max8997_platform_data *pdata;
+ struct i2c_client *i2c; /* 0xcc / PMIC, Battery Control, and FLASH */
+ struct i2c_client *rtc; /* slave addr 0x0c */
+ struct i2c_client *haptic; /* slave addr 0x90 */
+ struct i2c_client *muic; /* slave addr 0x4a */
+ struct mutex iolock;
+
+ unsigned long type;
+ struct platform_device *battery; /* battery control (not fuel gauge) */
+
+ int irq;
+ int ono;
+ struct irq_domain *irq_domain;
+ struct mutex irqlock;
+ int irq_masks_cur[MAX8997_IRQ_GROUP_NR];
+ int irq_masks_cache[MAX8997_IRQ_GROUP_NR];
+
+ /* For hibernation */
+ u8 reg_dump[MAX8997_REG_PMIC_END + MAX8997_MUIC_REG_END +
+ MAX8997_HAPTIC_REG_END];
+
+ bool gpio_status[MAX8997_NUM_GPIO];
+};
+
+enum max8997_types {
+ TYPE_MAX8997,
+ TYPE_MAX8966,
+};
+
+extern int max8997_irq_init(struct max8997_dev *max8997);
+extern void max8997_irq_exit(struct max8997_dev *max8997);
+extern int max8997_irq_resume(struct max8997_dev *max8997);
+
+extern int max8997_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest);
+extern int max8997_bulk_read(struct i2c_client *i2c, u8 reg, int count,
+ u8 *buf);
+extern int max8997_write_reg(struct i2c_client *i2c, u8 reg, u8 value);
+extern int max8997_bulk_write(struct i2c_client *i2c, u8 reg, int count,
+ u8 *buf);
+extern int max8997_update_reg(struct i2c_client *i2c, u8 reg, u8 val, u8 mask);
+
+#define MAX8997_GPIO_INT_BOTH (0x3 << 4)
+#define MAX8997_GPIO_INT_RISE (0x2 << 4)
+#define MAX8997_GPIO_INT_FALL (0x1 << 4)
+
+#define MAX8997_GPIO_INT_MASK (0x3 << 4)
+#define MAX8997_GPIO_DATA_MASK (0x1 << 2)
+#endif /* __LINUX_MFD_MAX8997_PRIV_H */
diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
new file mode 100644
index 000000000..cf815577b
--- /dev/null
+++ b/include/linux/mfd/max8997.h
@@ -0,0 +1,224 @@
+/*
+ * max8997.h - Driver for the Maxim 8997/8966
+ *
+ * Copyright (C) 2009-2010 Samsung Electrnoics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8998.h
+ *
+ * MAX8997 has PMIC, MUIC, HAPTIC, RTC, FLASH, and Fuel Gauge devices.
+ * Except Fuel Gauge, every device shares the same I2C bus and included in
+ * this mfd driver. Although the fuel gauge is included in the chip, it is
+ * excluded from the driver because a) it has a different I2C bus from
+ * others and b) it can be enabled simply by using MAX17042 driver.
+ */
+
+#ifndef __LINUX_MFD_MAX8998_H
+#define __LINUX_MFD_MAX8998_H
+
+#include <linux/regulator/consumer.h>
+
+/* MAX8997/8966 regulator IDs */
+enum max8998_regulators {
+ MAX8997_LDO1 = 0,
+ MAX8997_LDO2,
+ MAX8997_LDO3,
+ MAX8997_LDO4,
+ MAX8997_LDO5,
+ MAX8997_LDO6,
+ MAX8997_LDO7,
+ MAX8997_LDO8,
+ MAX8997_LDO9,
+ MAX8997_LDO10,
+ MAX8997_LDO11,
+ MAX8997_LDO12,
+ MAX8997_LDO13,
+ MAX8997_LDO14,
+ MAX8997_LDO15,
+ MAX8997_LDO16,
+ MAX8997_LDO17,
+ MAX8997_LDO18,
+ MAX8997_LDO21,
+ MAX8997_BUCK1,
+ MAX8997_BUCK2,
+ MAX8997_BUCK3,
+ MAX8997_BUCK4,
+ MAX8997_BUCK5,
+ MAX8997_BUCK6,
+ MAX8997_BUCK7,
+ MAX8997_EN32KHZ_AP,
+ MAX8997_EN32KHZ_CP,
+ MAX8997_ENVICHG,
+ MAX8997_ESAFEOUT1,
+ MAX8997_ESAFEOUT2,
+ MAX8997_CHARGER_CV, /* control MBCCV of MBCCTRL3 */
+ MAX8997_CHARGER, /* charger current, MBCCTRL4 */
+ MAX8997_CHARGER_TOPOFF, /* MBCCTRL5 */
+
+ MAX8997_REG_MAX,
+};
+
+struct max8997_regulator_data {
+ int id;
+ struct regulator_init_data *initdata;
+ struct device_node *reg_node;
+};
+
+struct max8997_muic_reg_data {
+ u8 addr;
+ u8 data;
+};
+
+/**
+ * struct max8997_muic_platform_data
+ * @init_data: array of max8997_muic_reg_data
+ * used for initializing registers of MAX8997 MUIC device
+ * @num_init_data: array size of init_data
+ */
+struct max8997_muic_platform_data {
+ struct max8997_muic_reg_data *init_data;
+ int num_init_data;
+
+ /* Check cable state after certain delay */
+ int detcable_delay_ms;
+
+ /*
+ * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+ * h/w path of COMP2/COMN1 on CONTROL1 register.
+ */
+ int path_usb;
+ int path_uart;
+};
+
+enum max8997_haptic_motor_type {
+ MAX8997_HAPTIC_ERM,
+ MAX8997_HAPTIC_LRA,
+};
+
+enum max8997_haptic_pulse_mode {
+ MAX8997_EXTERNAL_MODE,
+ MAX8997_INTERNAL_MODE,
+};
+
+enum max8997_haptic_pwm_divisor {
+ MAX8997_PWM_DIVISOR_32,
+ MAX8997_PWM_DIVISOR_64,
+ MAX8997_PWM_DIVISOR_128,
+ MAX8997_PWM_DIVISOR_256,
+};
+
+/**
+ * max8997_haptic_platform_data
+ * @pwm_channel_id: channel number of PWM device
+ * valid for MAX8997_EXTERNAL_MODE
+ * @pwm_period: period in nano second for PWM device
+ * valid for MAX8997_EXTERNAL_MODE
+ * @type: motor type
+ * @mode: pulse mode
+ * MAX8997_EXTERNAL_MODE: external PWM device is used to control motor
+ * MAX8997_INTERNAL_MODE: internal pulse generator is used to control motor
+ * @pwm_divisor: divisor for external PWM device
+ * @internal_mode_pattern: internal mode pattern for internal mode
+ * [0 - 3]: valid pattern number
+ * @pattern_cycle: the number of cycles of the waveform
+ * for the internal mode pattern
+ * [0 - 15]: available cycles
+ * @pattern_signal_period: period of the waveform for the internal mode pattern
+ * [0 - 255]: available period
+ */
+struct max8997_haptic_platform_data {
+ unsigned int pwm_channel_id;
+ unsigned int pwm_period;
+
+ enum max8997_haptic_motor_type type;
+ enum max8997_haptic_pulse_mode mode;
+ enum max8997_haptic_pwm_divisor pwm_divisor;
+
+ unsigned int internal_mode_pattern;
+ unsigned int pattern_cycle;
+ unsigned int pattern_signal_period;
+};
+
+enum max8997_led_mode {
+ MAX8997_NONE,
+ MAX8997_FLASH_MODE,
+ MAX8997_MOVIE_MODE,
+ MAX8997_FLASH_PIN_CONTROL_MODE,
+ MAX8997_MOVIE_PIN_CONTROL_MODE,
+};
+
+/**
+ * struct max8997_led_platform_data
+ * The number of LED devices for MAX8997 is two
+ * @mode: LED mode for each LED device
+ * @brightness: initial brightness for each LED device
+ * range:
+ * [0 - 31]: MAX8997_FLASH_MODE and MAX8997_FLASH_PIN_CONTROL_MODE
+ * [0 - 15]: MAX8997_MOVIE_MODE and MAX8997_MOVIE_PIN_CONTROL_MODE
+ */
+struct max8997_led_platform_data {
+ enum max8997_led_mode mode[2];
+ u8 brightness[2];
+};
+
+struct max8997_platform_data {
+ /* IRQ */
+ int ono;
+ int wakeup;
+
+ /* ---- PMIC ---- */
+ struct max8997_regulator_data *regulators;
+ int num_regulators;
+
+ /*
+ * SET1~3 DVS GPIOs control Buck1, 2, and 5 simultaneously. Therefore,
+ * With buckx_gpiodvs enabled, the buckx cannot be controlled
+ * independently. To control buckx (of 1, 2, and 5) independently,
+ * disable buckx_gpiodvs and control with BUCKxDVS1 register.
+ *
+ * When buckx_gpiodvs and bucky_gpiodvs are both enabled, set_voltage
+ * on buckx will change the voltage of bucky at the same time.
+ *
+ */
+ bool ignore_gpiodvs_side_effect;
+ int buck125_gpios[3]; /* GPIO of [0]SET1, [1]SET2, [2]SET3 */
+ int buck125_default_idx; /* Default value of SET1, 2, 3 */
+ unsigned int buck1_voltage[8]; /* buckx_voltage in uV */
+ bool buck1_gpiodvs;
+ unsigned int buck2_voltage[8];
+ bool buck2_gpiodvs;
+ unsigned int buck5_voltage[8];
+ bool buck5_gpiodvs;
+
+ /* ---- Charger control ---- */
+ /* eoc stands for 'end of charge' */
+ int eoc_mA; /* 50 ~ 200mA by 10mA step */
+ /* charge Full Timeout */
+ int timeout; /* 0 (no timeout), 5, 6, 7 hours */
+
+ /* ---- MUIC ---- */
+ struct max8997_muic_platform_data *muic_pdata;
+
+ /* ---- HAPTIC ---- */
+ struct max8997_haptic_platform_data *haptic_pdata;
+
+ /* RTC: Not implemented */
+ /* ---- LED ---- */
+ struct max8997_led_platform_data *led_pdata;
+};
+
+#endif /* __LINUX_MFD_MAX8998_H */
diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h
new file mode 100644
index 000000000..d68ada502
--- /dev/null
+++ b/include/linux/mfd/max8998-private.h
@@ -0,0 +1,182 @@
+/*
+ * max8998-private.h - Voltage regulator driver for the Maxim 8998
+ *
+ * Copyright (C) 2009-2010 Samsung Electrnoics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_MFD_MAX8998_PRIV_H
+#define __LINUX_MFD_MAX8998_PRIV_H
+
+#define MAX8998_NUM_IRQ_REGS 4
+
+/* MAX 8998 registers */
+enum {
+ MAX8998_REG_IRQ1,
+ MAX8998_REG_IRQ2,
+ MAX8998_REG_IRQ3,
+ MAX8998_REG_IRQ4,
+ MAX8998_REG_IRQM1,
+ MAX8998_REG_IRQM2,
+ MAX8998_REG_IRQM3,
+ MAX8998_REG_IRQM4,
+ MAX8998_REG_STATUS1,
+ MAX8998_REG_STATUS2,
+ MAX8998_REG_STATUSM1,
+ MAX8998_REG_STATUSM2,
+ MAX8998_REG_CHGR1,
+ MAX8998_REG_CHGR2,
+ MAX8998_REG_LDO_ACTIVE_DISCHARGE1,
+ MAX8998_REG_LDO_ACTIVE_DISCHARGE2,
+ MAX8998_REG_BUCK_ACTIVE_DISCHARGE3,
+ MAX8998_REG_ONOFF1,
+ MAX8998_REG_ONOFF2,
+ MAX8998_REG_ONOFF3,
+ MAX8998_REG_ONOFF4,
+ MAX8998_REG_BUCK1_VOLTAGE1,
+ MAX8998_REG_BUCK1_VOLTAGE2,
+ MAX8998_REG_BUCK1_VOLTAGE3,
+ MAX8998_REG_BUCK1_VOLTAGE4,
+ MAX8998_REG_BUCK2_VOLTAGE1,
+ MAX8998_REG_BUCK2_VOLTAGE2,
+ MAX8998_REG_BUCK3,
+ MAX8998_REG_BUCK4,
+ MAX8998_REG_LDO2_LDO3,
+ MAX8998_REG_LDO4,
+ MAX8998_REG_LDO5,
+ MAX8998_REG_LDO6,
+ MAX8998_REG_LDO7,
+ MAX8998_REG_LDO8_LDO9,
+ MAX8998_REG_LDO10_LDO11,
+ MAX8998_REG_LDO12,
+ MAX8998_REG_LDO13,
+ MAX8998_REG_LDO14,
+ MAX8998_REG_LDO15,
+ MAX8998_REG_LDO16,
+ MAX8998_REG_LDO17,
+ MAX8998_REG_BKCHR,
+ MAX8998_REG_LBCNFG1,
+ MAX8998_REG_LBCNFG2,
+};
+
+/* IRQ definitions */
+enum {
+ MAX8998_IRQ_DCINF,
+ MAX8998_IRQ_DCINR,
+ MAX8998_IRQ_JIGF,
+ MAX8998_IRQ_JIGR,
+ MAX8998_IRQ_PWRONF,
+ MAX8998_IRQ_PWRONR,
+
+ MAX8998_IRQ_WTSREVNT,
+ MAX8998_IRQ_SMPLEVNT,
+ MAX8998_IRQ_ALARM1,
+ MAX8998_IRQ_ALARM0,
+
+ MAX8998_IRQ_ONKEY1S,
+ MAX8998_IRQ_TOPOFFR,
+ MAX8998_IRQ_DCINOVPR,
+ MAX8998_IRQ_CHGRSTF,
+ MAX8998_IRQ_DONER,
+ MAX8998_IRQ_CHGFAULT,
+
+ MAX8998_IRQ_LOBAT1,
+ MAX8998_IRQ_LOBAT2,
+
+ MAX8998_IRQ_NR,
+};
+
+/* MAX8998 various variants */
+enum {
+ TYPE_MAX8998 = 0, /* Default */
+ TYPE_LP3974, /* National version of MAX8998 */
+ TYPE_LP3979, /* Added AVS */
+};
+
+#define MAX8998_IRQ_DCINF_MASK (1 << 2)
+#define MAX8998_IRQ_DCINR_MASK (1 << 3)
+#define MAX8998_IRQ_JIGF_MASK (1 << 4)
+#define MAX8998_IRQ_JIGR_MASK (1 << 5)
+#define MAX8998_IRQ_PWRONF_MASK (1 << 6)
+#define MAX8998_IRQ_PWRONR_MASK (1 << 7)
+
+#define MAX8998_IRQ_WTSREVNT_MASK (1 << 0)
+#define MAX8998_IRQ_SMPLEVNT_MASK (1 << 1)
+#define MAX8998_IRQ_ALARM1_MASK (1 << 2)
+#define MAX8998_IRQ_ALARM0_MASK (1 << 3)
+
+#define MAX8998_IRQ_ONKEY1S_MASK (1 << 0)
+#define MAX8998_IRQ_TOPOFFR_MASK (1 << 2)
+#define MAX8998_IRQ_DCINOVPR_MASK (1 << 3)
+#define MAX8998_IRQ_CHGRSTF_MASK (1 << 4)
+#define MAX8998_IRQ_DONER_MASK (1 << 5)
+#define MAX8998_IRQ_CHGFAULT_MASK (1 << 7)
+
+#define MAX8998_IRQ_LOBAT1_MASK (1 << 0)
+#define MAX8998_IRQ_LOBAT2_MASK (1 << 1)
+
+#define MAX8998_ENRAMP (1 << 4)
+
+struct irq_domain;
+
+/**
+ * struct max8998_dev - max8998 master device for sub-drivers
+ * @dev: master device of the chip (can be used to access platform data)
+ * @pdata: platform data for the driver and subdrivers
+ * @i2c: i2c client private data for regulator
+ * @rtc: i2c client private data for rtc
+ * @iolock: mutex for serializing io access
+ * @irqlock: mutex for buslock
+ * @irq_base: base IRQ number for max8998, required for IRQs
+ * @irq: generic IRQ number for max8998
+ * @ono: power onoff IRQ number for max8998
+ * @irq_masks_cur: currently active value
+ * @irq_masks_cache: cached hardware value
+ * @type: indicate which max8998 "variant" is used
+ */
+struct max8998_dev {
+ struct device *dev;
+ struct max8998_platform_data *pdata;
+ struct i2c_client *i2c;
+ struct i2c_client *rtc;
+ struct mutex iolock;
+ struct mutex irqlock;
+
+ unsigned int irq_base;
+ struct irq_domain *irq_domain;
+ int irq;
+ int ono;
+ u8 irq_masks_cur[MAX8998_NUM_IRQ_REGS];
+ u8 irq_masks_cache[MAX8998_NUM_IRQ_REGS];
+ unsigned long type;
+ bool wakeup;
+};
+
+int max8998_irq_init(struct max8998_dev *max8998);
+void max8998_irq_exit(struct max8998_dev *max8998);
+int max8998_irq_resume(struct max8998_dev *max8998);
+
+extern int max8998_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest);
+extern int max8998_bulk_read(struct i2c_client *i2c, u8 reg, int count,
+ u8 *buf);
+extern int max8998_write_reg(struct i2c_client *i2c, u8 reg, u8 value);
+extern int max8998_bulk_write(struct i2c_client *i2c, u8 reg, int count,
+ u8 *buf);
+extern int max8998_update_reg(struct i2c_client *i2c, u8 reg, u8 val, u8 mask);
+
+#endif /* __LINUX_MFD_MAX8998_PRIV_H */
diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h
new file mode 100644
index 000000000..e3956a654
--- /dev/null
+++ b/include/linux/mfd/max8998.h
@@ -0,0 +1,118 @@
+/*
+ * max8998.h - Voltage regulator driver for the Maxim 8998
+ *
+ * Copyright (C) 2009-2010 Samsung Electrnoics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_MFD_MAX8998_H
+#define __LINUX_MFD_MAX8998_H
+
+#include <linux/regulator/machine.h>
+
+/* MAX 8998 regulator ids */
+enum {
+ MAX8998_LDO2 = 2,
+ MAX8998_LDO3,
+ MAX8998_LDO4,
+ MAX8998_LDO5,
+ MAX8998_LDO6,
+ MAX8998_LDO7,
+ MAX8998_LDO8,
+ MAX8998_LDO9,
+ MAX8998_LDO10,
+ MAX8998_LDO11,
+ MAX8998_LDO12,
+ MAX8998_LDO13,
+ MAX8998_LDO14,
+ MAX8998_LDO15,
+ MAX8998_LDO16,
+ MAX8998_LDO17,
+ MAX8998_BUCK1,
+ MAX8998_BUCK2,
+ MAX8998_BUCK3,
+ MAX8998_BUCK4,
+ MAX8998_EN32KHZ_AP,
+ MAX8998_EN32KHZ_CP,
+ MAX8998_ENVICHG,
+ MAX8998_ESAFEOUT1,
+ MAX8998_ESAFEOUT2,
+};
+
+/**
+ * max8998_regulator_data - regulator data
+ * @id: regulator id
+ * @initdata: regulator init data (contraints, supplies, ...)
+ * @reg_node: DT node of regulator (unused on non-DT platforms)
+ */
+struct max8998_regulator_data {
+ int id;
+ struct regulator_init_data *initdata;
+ struct device_node *reg_node;
+};
+
+/**
+ * struct max8998_board - packages regulator init data
+ * @regulators: array of defined regulators
+ * @num_regulators: number of regulators used
+ * @irq_base: base IRQ number for max8998, required for IRQs
+ * @ono: power onoff IRQ number for max8998
+ * @buck_voltage_lock: Do NOT change the values of the following six
+ * registers set by buck?_voltage?. The voltage of BUCK1/2 cannot
+ * be other than the preset values.
+ * @buck1_voltage: BUCK1 DVS mode 1 voltage registers
+ * @buck2_voltage: BUCK2 DVS mode 2 voltage registers
+ * @buck1_set1: BUCK1 gpio pin 1 to set output voltage
+ * @buck1_set2: BUCK1 gpio pin 2 to set output voltage
+ * @buck1_default_idx: Default for BUCK1 gpio pin 1, 2
+ * @buck2_set3: BUCK2 gpio pin to set output voltage
+ * @buck2_default_idx: Default for BUCK2 gpio pin.
+ * @wakeup: Allow to wake up from suspend
+ * @rtc_delay: LP3974 RTC chip bug that requires delay after a register
+ * write before reading it.
+ * @eoc: End of Charge Level in percent: 10% ~ 45% by 5% step
+ * If it equals 0, leave it unchanged.
+ * Otherwise, it is a invalid value.
+ * @restart: Restart Level in mV: 100, 150, 200, and -1 for disable.
+ * If it equals 0, leave it unchanged.
+ * Otherwise, it is a invalid value.
+ * @timeout: Full Timeout in hours: 5, 6, 7, and -1 for disable.
+ * If it equals 0, leave it unchanged.
+ * Otherwise, leave it unchanged.
+ */
+struct max8998_platform_data {
+ struct max8998_regulator_data *regulators;
+ int num_regulators;
+ unsigned int irq_base;
+ int ono;
+ bool buck_voltage_lock;
+ int buck1_voltage[4];
+ int buck2_voltage[2];
+ int buck1_set1;
+ int buck1_set2;
+ int buck1_default_idx;
+ int buck2_set3;
+ int buck2_default_idx;
+ bool wakeup;
+ bool rtc_delay;
+ int eoc;
+ int restart;
+ int timeout;
+};
+
+#endif /* __LINUX_MFD_MAX8998_H */
diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h
new file mode 100644
index 000000000..4ff6137d8
--- /dev/null
+++ b/include/linux/mfd/mc13783.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
+ * Copyright 2009-2010 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#ifndef __LINUX_MFD_MC13783_H
+#define __LINUX_MFD_MC13783_H
+
+#include <linux/mfd/mc13xxx.h>
+
+#define MC13783_REG_SW1A 0
+#define MC13783_REG_SW1B 1
+#define MC13783_REG_SW2A 2
+#define MC13783_REG_SW2B 3
+#define MC13783_REG_SW3 4
+#define MC13783_REG_PLL 5
+#define MC13783_REG_VAUDIO 6
+#define MC13783_REG_VIOHI 7
+#define MC13783_REG_VIOLO 8
+#define MC13783_REG_VDIG 9
+#define MC13783_REG_VGEN 10
+#define MC13783_REG_VRFDIG 11
+#define MC13783_REG_VRFREF 12
+#define MC13783_REG_VRFCP 13
+#define MC13783_REG_VSIM 14
+#define MC13783_REG_VESIM 15
+#define MC13783_REG_VCAM 16
+#define MC13783_REG_VRFBG 17
+#define MC13783_REG_VVIB 18
+#define MC13783_REG_VRF1 19
+#define MC13783_REG_VRF2 20
+#define MC13783_REG_VMMC1 21
+#define MC13783_REG_VMMC2 22
+#define MC13783_REG_GPO1 23
+#define MC13783_REG_GPO2 24
+#define MC13783_REG_GPO3 25
+#define MC13783_REG_GPO4 26
+#define MC13783_REG_V1 27
+#define MC13783_REG_V2 28
+#define MC13783_REG_V3 29
+#define MC13783_REG_V4 30
+#define MC13783_REG_PWGT1SPI 31
+#define MC13783_REG_PWGT2SPI 32
+
+#define MC13783_IRQ_ADCDONE MC13XXX_IRQ_ADCDONE
+#define MC13783_IRQ_ADCBISDONE MC13XXX_IRQ_ADCBISDONE
+#define MC13783_IRQ_TS MC13XXX_IRQ_TS
+#define MC13783_IRQ_WHIGH 3
+#define MC13783_IRQ_WLOW 4
+#define MC13783_IRQ_CHGDET MC13XXX_IRQ_CHGDET
+#define MC13783_IRQ_CHGOV 7
+#define MC13783_IRQ_CHGREV MC13XXX_IRQ_CHGREV
+#define MC13783_IRQ_CHGSHORT MC13XXX_IRQ_CHGSHORT
+#define MC13783_IRQ_CCCV MC13XXX_IRQ_CCCV
+#define MC13783_IRQ_CHGCURR MC13XXX_IRQ_CHGCURR
+#define MC13783_IRQ_BPON MC13XXX_IRQ_BPON
+#define MC13783_IRQ_LOBATL MC13XXX_IRQ_LOBATL
+#define MC13783_IRQ_LOBATH MC13XXX_IRQ_LOBATH
+#define MC13783_IRQ_UDP 15
+#define MC13783_IRQ_USB 16
+#define MC13783_IRQ_ID 19
+#define MC13783_IRQ_SE1 21
+#define MC13783_IRQ_CKDET 22
+#define MC13783_IRQ_UDM 23
+#define MC13783_IRQ_1HZ MC13XXX_IRQ_1HZ
+#define MC13783_IRQ_TODA MC13XXX_IRQ_TODA
+#define MC13783_IRQ_ONOFD1 27
+#define MC13783_IRQ_ONOFD2 28
+#define MC13783_IRQ_ONOFD3 29
+#define MC13783_IRQ_SYSRST MC13XXX_IRQ_SYSRST
+#define MC13783_IRQ_RTCRST MC13XXX_IRQ_RTCRST
+#define MC13783_IRQ_PC MC13XXX_IRQ_PC
+#define MC13783_IRQ_WARM MC13XXX_IRQ_WARM
+#define MC13783_IRQ_MEMHLD MC13XXX_IRQ_MEMHLD
+#define MC13783_IRQ_PWRRDY 35
+#define MC13783_IRQ_THWARNL MC13XXX_IRQ_THWARNL
+#define MC13783_IRQ_THWARNH MC13XXX_IRQ_THWARNH
+#define MC13783_IRQ_CLK MC13XXX_IRQ_CLK
+#define MC13783_IRQ_SEMAF 39
+#define MC13783_IRQ_MC2B 41
+#define MC13783_IRQ_HSDET 42
+#define MC13783_IRQ_HSL 43
+#define MC13783_IRQ_ALSPTH 44
+#define MC13783_IRQ_AHSSHORT 45
+
+#endif /* ifndef __LINUX_MFD_MC13783_H */
diff --git a/include/linux/mfd/mc13892.h b/include/linux/mfd/mc13892.h
new file mode 100644
index 000000000..a00f2bec1
--- /dev/null
+++ b/include/linux/mfd/mc13892.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_MC13892_H
+#define __LINUX_MFD_MC13892_H
+
+#include <linux/mfd/mc13xxx.h>
+
+#define MC13892_SW1 0
+#define MC13892_SW2 1
+#define MC13892_SW3 2
+#define MC13892_SW4 3
+#define MC13892_SWBST 4
+#define MC13892_VIOHI 5
+#define MC13892_VPLL 6
+#define MC13892_VDIG 7
+#define MC13892_VSD 8
+#define MC13892_VUSB2 9
+#define MC13892_VVIDEO 10
+#define MC13892_VAUDIO 11
+#define MC13892_VCAM 12
+#define MC13892_VGEN1 13
+#define MC13892_VGEN2 14
+#define MC13892_VGEN3 15
+#define MC13892_VUSB 16
+#define MC13892_GPO1 17
+#define MC13892_GPO2 18
+#define MC13892_GPO3 19
+#define MC13892_GPO4 20
+#define MC13892_PWGT1SPI 21
+#define MC13892_PWGT2SPI 22
+#define MC13892_VCOINCELL 23
+
+#endif
diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
new file mode 100644
index 000000000..638222e43
--- /dev/null
+++ b/include/linux/mfd/mc13xxx.h
@@ -0,0 +1,262 @@
+/*
+ * Copyright 2009-2010 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#ifndef __LINUX_MFD_MC13XXX_H
+#define __LINUX_MFD_MC13XXX_H
+
+#include <linux/interrupt.h>
+
+struct mc13xxx;
+
+void mc13xxx_lock(struct mc13xxx *mc13xxx);
+void mc13xxx_unlock(struct mc13xxx *mc13xxx);
+
+int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val);
+int mc13xxx_reg_write(struct mc13xxx *mc13xxx, unsigned int offset, u32 val);
+int mc13xxx_reg_rmw(struct mc13xxx *mc13xxx, unsigned int offset,
+ u32 mask, u32 val);
+
+int mc13xxx_irq_request(struct mc13xxx *mc13xxx, int irq,
+ irq_handler_t handler, const char *name, void *dev);
+int mc13xxx_irq_free(struct mc13xxx *mc13xxx, int irq, void *dev);
+
+int mc13xxx_irq_status(struct mc13xxx *mc13xxx, int irq,
+ int *enabled, int *pending);
+
+int mc13xxx_get_flags(struct mc13xxx *mc13xxx);
+
+int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx,
+ unsigned int mode, unsigned int channel,
+ u8 ato, bool atox, unsigned int *sample);
+
+/* Deprecated calls */
+static inline int mc13xxx_irq_ack(struct mc13xxx *mc13xxx, int irq)
+{
+ return 0;
+}
+
+static inline int mc13xxx_irq_request_nounmask(struct mc13xxx *mc13xxx, int irq,
+ irq_handler_t handler,
+ const char *name, void *dev)
+{
+ return mc13xxx_irq_request(mc13xxx, irq, handler, name, dev);
+}
+
+int mc13xxx_irq_mask(struct mc13xxx *mc13xxx, int irq);
+int mc13xxx_irq_unmask(struct mc13xxx *mc13xxx, int irq);
+
+#define MC13783_AUDIO_RX0 36
+#define MC13783_AUDIO_RX1 37
+#define MC13783_AUDIO_TX 38
+#define MC13783_SSI_NETWORK 39
+#define MC13783_AUDIO_CODEC 40
+#define MC13783_AUDIO_DAC 41
+
+#define MC13XXX_IRQ_ADCDONE 0
+#define MC13XXX_IRQ_ADCBISDONE 1
+#define MC13XXX_IRQ_TS 2
+#define MC13XXX_IRQ_CHGDET 6
+#define MC13XXX_IRQ_CHGREV 8
+#define MC13XXX_IRQ_CHGSHORT 9
+#define MC13XXX_IRQ_CCCV 10
+#define MC13XXX_IRQ_CHGCURR 11
+#define MC13XXX_IRQ_BPON 12
+#define MC13XXX_IRQ_LOBATL 13
+#define MC13XXX_IRQ_LOBATH 14
+#define MC13XXX_IRQ_1HZ 24
+#define MC13XXX_IRQ_TODA 25
+#define MC13XXX_IRQ_SYSRST 30
+#define MC13XXX_IRQ_RTCRST 31
+#define MC13XXX_IRQ_PC 32
+#define MC13XXX_IRQ_WARM 33
+#define MC13XXX_IRQ_MEMHLD 34
+#define MC13XXX_IRQ_THWARNL 36
+#define MC13XXX_IRQ_THWARNH 37
+#define MC13XXX_IRQ_CLK 38
+
+struct regulator_init_data;
+
+struct mc13xxx_regulator_init_data {
+ int id;
+ struct regulator_init_data *init_data;
+ struct device_node *node;
+};
+
+struct mc13xxx_regulator_platform_data {
+ int num_regulators;
+ struct mc13xxx_regulator_init_data *regulators;
+};
+
+enum {
+ /* MC13783 LED IDs */
+ MC13783_LED_MD,
+ MC13783_LED_AD,
+ MC13783_LED_KP,
+ MC13783_LED_R1,
+ MC13783_LED_G1,
+ MC13783_LED_B1,
+ MC13783_LED_R2,
+ MC13783_LED_G2,
+ MC13783_LED_B2,
+ MC13783_LED_R3,
+ MC13783_LED_G3,
+ MC13783_LED_B3,
+ /* MC13892 LED IDs */
+ MC13892_LED_MD,
+ MC13892_LED_AD,
+ MC13892_LED_KP,
+ MC13892_LED_R,
+ MC13892_LED_G,
+ MC13892_LED_B,
+ /* MC34708 LED IDs */
+ MC34708_LED_R,
+ MC34708_LED_G,
+};
+
+struct mc13xxx_led_platform_data {
+ int id;
+ const char *name;
+ const char *default_trigger;
+};
+
+#define MAX_LED_CONTROL_REGS 6
+
+/* MC13783 LED Control 0 */
+#define MC13783_LED_C0_ENABLE (1 << 0)
+#define MC13783_LED_C0_TRIODE_MD (1 << 7)
+#define MC13783_LED_C0_TRIODE_AD (1 << 8)
+#define MC13783_LED_C0_TRIODE_KP (1 << 9)
+#define MC13783_LED_C0_BOOST (1 << 10)
+#define MC13783_LED_C0_ABMODE(x) (((x) & 0x7) << 11)
+#define MC13783_LED_C0_ABREF(x) (((x) & 0x3) << 14)
+/* MC13783 LED Control 1 */
+#define MC13783_LED_C1_TC1HALF (1 << 18)
+#define MC13783_LED_C1_SLEWLIM (1 << 23)
+/* MC13783 LED Control 2 */
+#define MC13783_LED_C2_CURRENT_MD(x) (((x) & 0x7) << 0)
+#define MC13783_LED_C2_CURRENT_AD(x) (((x) & 0x7) << 3)
+#define MC13783_LED_C2_CURRENT_KP(x) (((x) & 0x7) << 6)
+#define MC13783_LED_C2_PERIOD(x) (((x) & 0x3) << 21)
+#define MC13783_LED_C2_SLEWLIM (1 << 23)
+/* MC13783 LED Control 3 */
+#define MC13783_LED_C3_CURRENT_R1(x) (((x) & 0x3) << 0)
+#define MC13783_LED_C3_CURRENT_G1(x) (((x) & 0x3) << 2)
+#define MC13783_LED_C3_CURRENT_B1(x) (((x) & 0x3) << 4)
+#define MC13783_LED_C3_PERIOD(x) (((x) & 0x3) << 21)
+#define MC13783_LED_C3_TRIODE_TC1 (1 << 23)
+/* MC13783 LED Control 4 */
+#define MC13783_LED_C4_CURRENT_R2(x) (((x) & 0x3) << 0)
+#define MC13783_LED_C4_CURRENT_G2(x) (((x) & 0x3) << 2)
+#define MC13783_LED_C4_CURRENT_B2(x) (((x) & 0x3) << 4)
+#define MC13783_LED_C4_PERIOD(x) (((x) & 0x3) << 21)
+#define MC13783_LED_C4_TRIODE_TC2 (1 << 23)
+/* MC13783 LED Control 5 */
+#define MC13783_LED_C5_CURRENT_R3(x) (((x) & 0x3) << 0)
+#define MC13783_LED_C5_CURRENT_G3(x) (((x) & 0x3) << 2)
+#define MC13783_LED_C5_CURRENT_B3(x) (((x) & 0x3) << 4)
+#define MC13783_LED_C5_PERIOD(x) (((x) & 0x3) << 21)
+#define MC13783_LED_C5_TRIODE_TC3 (1 << 23)
+/* MC13892 LED Control 0 */
+#define MC13892_LED_C0_CURRENT_MD(x) (((x) & 0x7) << 9)
+#define MC13892_LED_C0_CURRENT_AD(x) (((x) & 0x7) << 21)
+/* MC13892 LED Control 1 */
+#define MC13892_LED_C1_CURRENT_KP(x) (((x) & 0x7) << 9)
+/* MC13892 LED Control 2 */
+#define MC13892_LED_C2_CURRENT_R(x) (((x) & 0x7) << 9)
+#define MC13892_LED_C2_CURRENT_G(x) (((x) & 0x7) << 21)
+/* MC13892 LED Control 3 */
+#define MC13892_LED_C3_CURRENT_B(x) (((x) & 0x7) << 9)
+/* MC34708 LED Control 0 */
+#define MC34708_LED_C0_CURRENT_R(x) (((x) & 0x3) << 9)
+#define MC34708_LED_C0_CURRENT_G(x) (((x) & 0x3) << 21)
+
+struct mc13xxx_leds_platform_data {
+ struct mc13xxx_led_platform_data *led;
+ int num_leds;
+ u32 led_control[MAX_LED_CONTROL_REGS];
+};
+
+#define MC13783_BUTTON_DBNC_0MS 0
+#define MC13783_BUTTON_DBNC_30MS 1
+#define MC13783_BUTTON_DBNC_150MS 2
+#define MC13783_BUTTON_DBNC_750MS 3
+#define MC13783_BUTTON_ENABLE (1 << 2)
+#define MC13783_BUTTON_POL_INVERT (1 << 3)
+#define MC13783_BUTTON_RESET_EN (1 << 4)
+
+struct mc13xxx_buttons_platform_data {
+ int b1on_flags;
+ unsigned short b1on_key;
+ int b2on_flags;
+ unsigned short b2on_key;
+ int b3on_flags;
+ unsigned short b3on_key;
+};
+
+#define MC13783_TS_ATO_FIRST false
+#define MC13783_TS_ATO_EACH true
+
+struct mc13xxx_ts_platform_data {
+ /* Delay between Touchscreen polarization and ADC Conversion.
+ * Given in clock ticks of a 32 kHz clock which gives a granularity of
+ * about 30.5ms */
+ u8 ato;
+ /* Use the ATO delay only for the first conversion or for each one */
+ bool atox;
+};
+
+enum mc13783_ssi_port {
+ MC13783_SSI1_PORT,
+ MC13783_SSI2_PORT,
+};
+
+struct mc13xxx_codec_platform_data {
+ enum mc13783_ssi_port adc_ssi_port;
+ enum mc13783_ssi_port dac_ssi_port;
+};
+
+#define MC13XXX_USE_TOUCHSCREEN (1 << 0)
+#define MC13XXX_USE_CODEC (1 << 1)
+#define MC13XXX_USE_ADC (1 << 2)
+#define MC13XXX_USE_RTC (1 << 3)
+
+struct mc13xxx_platform_data {
+ unsigned int flags;
+
+ struct mc13xxx_regulator_platform_data regulators;
+ struct mc13xxx_leds_platform_data *leds;
+ struct mc13xxx_buttons_platform_data *buttons;
+ struct mc13xxx_ts_platform_data touch;
+ struct mc13xxx_codec_platform_data *codec;
+};
+
+#define MC13XXX_ADC_MODE_TS 1
+#define MC13XXX_ADC_MODE_SINGLE_CHAN 2
+#define MC13XXX_ADC_MODE_MULT_CHAN 3
+
+#define MC13XXX_ADC0 43
+#define MC13XXX_ADC0_LICELLCON (1 << 0)
+#define MC13XXX_ADC0_CHRGICON (1 << 1)
+#define MC13XXX_ADC0_BATICON (1 << 2)
+#define MC13XXX_ADC0_ADREFEN (1 << 10)
+#define MC13XXX_ADC0_TSMOD0 (1 << 12)
+#define MC13XXX_ADC0_TSMOD1 (1 << 13)
+#define MC13XXX_ADC0_TSMOD2 (1 << 14)
+#define MC13XXX_ADC0_ADINC1 (1 << 16)
+#define MC13XXX_ADC0_ADINC2 (1 << 17)
+
+#define MC13XXX_ADC0_TSMOD_MASK (MC13XXX_ADC0_TSMOD0 | \
+ MC13XXX_ADC0_TSMOD1 | \
+ MC13XXX_ADC0_TSMOD2)
+
+#define MC13XXX_ADC0_CONFIG_MASK (MC13XXX_ADC0_TSMOD_MASK | \
+ MC13XXX_ADC0_LICELLCON | \
+ MC13XXX_ADC0_CHRGICON | \
+ MC13XXX_ADC0_BATICON)
+
+#endif /* ifndef __LINUX_MFD_MC13XXX_H */
diff --git a/include/linux/mfd/mcp.h b/include/linux/mfd/mcp.h
new file mode 100644
index 000000000..f68295304
--- /dev/null
+++ b/include/linux/mfd/mcp.h
@@ -0,0 +1,66 @@
+/*
+ * linux/drivers/mfd/mcp.h
+ *
+ * Copyright (C) 2001 Russell King, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+#ifndef MCP_H
+#define MCP_H
+
+#include <linux/device.h>
+
+struct mcp_ops;
+
+struct mcp {
+ struct module *owner;
+ struct mcp_ops *ops;
+ spinlock_t lock;
+ int use_count;
+ unsigned int sclk_rate;
+ unsigned int rw_timeout;
+ struct device attached_device;
+};
+
+struct mcp_ops {
+ void (*set_telecom_divisor)(struct mcp *, unsigned int);
+ void (*set_audio_divisor)(struct mcp *, unsigned int);
+ void (*reg_write)(struct mcp *, unsigned int, unsigned int);
+ unsigned int (*reg_read)(struct mcp *, unsigned int);
+ void (*enable)(struct mcp *);
+ void (*disable)(struct mcp *);
+};
+
+void mcp_set_telecom_divisor(struct mcp *, unsigned int);
+void mcp_set_audio_divisor(struct mcp *, unsigned int);
+void mcp_reg_write(struct mcp *, unsigned int, unsigned int);
+unsigned int mcp_reg_read(struct mcp *, unsigned int);
+void mcp_enable(struct mcp *);
+void mcp_disable(struct mcp *);
+#define mcp_get_sclk_rate(mcp) ((mcp)->sclk_rate)
+
+struct mcp *mcp_host_alloc(struct device *, size_t);
+int mcp_host_add(struct mcp *, void *);
+void mcp_host_del(struct mcp *);
+void mcp_host_free(struct mcp *);
+
+struct mcp_driver {
+ struct device_driver drv;
+ int (*probe)(struct mcp *);
+ void (*remove)(struct mcp *);
+};
+
+int mcp_driver_register(struct mcp_driver *);
+void mcp_driver_unregister(struct mcp_driver *);
+
+#define mcp_get_drvdata(mcp) dev_get_drvdata(&(mcp)->attached_device)
+#define mcp_set_drvdata(mcp,d) dev_set_drvdata(&(mcp)->attached_device, d)
+
+static inline void *mcp_priv(struct mcp *mcp)
+{
+ return mcp + 1;
+}
+
+#endif
diff --git a/include/linux/mfd/menelaus.h b/include/linux/mfd/menelaus.h
new file mode 100644
index 000000000..9e85ac06d
--- /dev/null
+++ b/include/linux/mfd/menelaus.h
@@ -0,0 +1,40 @@
+/*
+ * Functions to access Menelaus power management chip
+ */
+
+#ifndef __ASM_ARCH_MENELAUS_H
+#define __ASM_ARCH_MENELAUS_H
+
+struct device;
+
+struct menelaus_platform_data {
+ int (* late_init)(struct device *dev);
+};
+
+extern int menelaus_register_mmc_callback(void (*callback)(void *data, u8 card_mask),
+ void *data);
+extern void menelaus_unregister_mmc_callback(void);
+extern int menelaus_set_mmc_opendrain(int slot, int enable);
+extern int menelaus_set_mmc_slot(int slot, int enable, int power, int cd_on);
+
+extern int menelaus_set_vmem(unsigned int mV);
+extern int menelaus_set_vio(unsigned int mV);
+extern int menelaus_set_vmmc(unsigned int mV);
+extern int menelaus_set_vaux(unsigned int mV);
+extern int menelaus_set_vdcdc(int dcdc, unsigned int mV);
+extern int menelaus_set_slot_sel(int enable);
+extern int menelaus_get_slot_pin_states(void);
+extern int menelaus_set_vcore_hw(unsigned int roof_mV, unsigned int floor_mV);
+
+#define EN_VPLL_SLEEP (1 << 7)
+#define EN_VMMC_SLEEP (1 << 6)
+#define EN_VAUX_SLEEP (1 << 5)
+#define EN_VIO_SLEEP (1 << 4)
+#define EN_VMEM_SLEEP (1 << 3)
+#define EN_DC3_SLEEP (1 << 2)
+#define EN_DC2_SLEEP (1 << 1)
+#define EN_VC_SLEEP (1 << 0)
+
+extern int menelaus_set_regulator_sleep(int enable, u32 val);
+
+#endif
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
new file mode 100644
index 000000000..cf5265b0d
--- /dev/null
+++ b/include/linux/mfd/mt6397/core.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu, MediaTek
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MFD_MT6397_CORE_H__
+#define __MFD_MT6397_CORE_H__
+
+enum mt6397_irq_numbers {
+ MT6397_IRQ_SPKL_AB = 0,
+ MT6397_IRQ_SPKR_AB,
+ MT6397_IRQ_SPKL,
+ MT6397_IRQ_SPKR,
+ MT6397_IRQ_BAT_L,
+ MT6397_IRQ_BAT_H,
+ MT6397_IRQ_FG_BAT_L,
+ MT6397_IRQ_FG_BAT_H,
+ MT6397_IRQ_WATCHDOG,
+ MT6397_IRQ_PWRKEY,
+ MT6397_IRQ_THR_L,
+ MT6397_IRQ_THR_H,
+ MT6397_IRQ_VBATON_UNDET,
+ MT6397_IRQ_BVALID_DET,
+ MT6397_IRQ_CHRDET,
+ MT6397_IRQ_OV,
+ MT6397_IRQ_LDO,
+ MT6397_IRQ_HOMEKEY,
+ MT6397_IRQ_ACCDET,
+ MT6397_IRQ_AUDIO,
+ MT6397_IRQ_RTC,
+ MT6397_IRQ_PWRKEY_RSTB,
+ MT6397_IRQ_HDMI_SIFM,
+ MT6397_IRQ_HDMI_CEC,
+ MT6397_IRQ_VCA15,
+ MT6397_IRQ_VSRMCA15,
+ MT6397_IRQ_VCORE,
+ MT6397_IRQ_VGPU,
+ MT6397_IRQ_VIO18,
+ MT6397_IRQ_VPCA7,
+ MT6397_IRQ_VSRMCA7,
+ MT6397_IRQ_VDRM,
+ MT6397_IRQ_NR,
+};
+
+struct mt6397_chip {
+ struct device *dev;
+ struct regmap *regmap;
+ int irq;
+ struct irq_domain *irq_domain;
+ struct mutex irqlock;
+ u16 irq_masks_cur[2];
+ u16 irq_masks_cache[2];
+};
+
+#endif /* __MFD_MT6397_CORE_H__ */
diff --git a/include/linux/mfd/mt6397/registers.h b/include/linux/mfd/mt6397/registers.h
new file mode 100644
index 000000000..f23a0a60a
--- /dev/null
+++ b/include/linux/mfd/mt6397/registers.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu, MediaTek
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MFD_MT6397_REGISTERS_H__
+#define __MFD_MT6397_REGISTERS_H__
+
+/* PMIC Registers */
+#define MT6397_CID 0x0100
+#define MT6397_TOP_CKPDN 0x0102
+#define MT6397_TOP_CKPDN_SET 0x0104
+#define MT6397_TOP_CKPDN_CLR 0x0106
+#define MT6397_TOP_CKPDN2 0x0108
+#define MT6397_TOP_CKPDN2_SET 0x010A
+#define MT6397_TOP_CKPDN2_CLR 0x010C
+#define MT6397_TOP_GPIO_CKPDN 0x010E
+#define MT6397_TOP_RST_CON 0x0114
+#define MT6397_WRP_CKPDN 0x011A
+#define MT6397_WRP_RST_CON 0x0120
+#define MT6397_TOP_RST_MISC 0x0126
+#define MT6397_TOP_CKCON1 0x0128
+#define MT6397_TOP_CKCON2 0x012A
+#define MT6397_TOP_CKTST1 0x012C
+#define MT6397_TOP_CKTST2 0x012E
+#define MT6397_OC_DEG_EN 0x0130
+#define MT6397_OC_CTL0 0x0132
+#define MT6397_OC_CTL1 0x0134
+#define MT6397_OC_CTL2 0x0136
+#define MT6397_INT_RSV 0x0138
+#define MT6397_TEST_CON0 0x013A
+#define MT6397_TEST_CON1 0x013C
+#define MT6397_STATUS0 0x013E
+#define MT6397_STATUS1 0x0140
+#define MT6397_PGSTATUS 0x0142
+#define MT6397_CHRSTATUS 0x0144
+#define MT6397_OCSTATUS0 0x0146
+#define MT6397_OCSTATUS1 0x0148
+#define MT6397_OCSTATUS2 0x014A
+#define MT6397_HDMI_PAD_IE 0x014C
+#define MT6397_TEST_OUT_L 0x014E
+#define MT6397_TEST_OUT_H 0x0150
+#define MT6397_TDSEL_CON 0x0152
+#define MT6397_RDSEL_CON 0x0154
+#define MT6397_GPIO_SMT_CON0 0x0156
+#define MT6397_GPIO_SMT_CON1 0x0158
+#define MT6397_GPIO_SMT_CON2 0x015A
+#define MT6397_GPIO_SMT_CON3 0x015C
+#define MT6397_DRV_CON0 0x015E
+#define MT6397_DRV_CON1 0x0160
+#define MT6397_DRV_CON2 0x0162
+#define MT6397_DRV_CON3 0x0164
+#define MT6397_DRV_CON4 0x0166
+#define MT6397_DRV_CON5 0x0168
+#define MT6397_DRV_CON6 0x016A
+#define MT6397_DRV_CON7 0x016C
+#define MT6397_DRV_CON8 0x016E
+#define MT6397_DRV_CON9 0x0170
+#define MT6397_DRV_CON10 0x0172
+#define MT6397_DRV_CON11 0x0174
+#define MT6397_DRV_CON12 0x0176
+#define MT6397_INT_CON0 0x0178
+#define MT6397_INT_CON1 0x017E
+#define MT6397_INT_STATUS0 0x0184
+#define MT6397_INT_STATUS1 0x0186
+#define MT6397_FQMTR_CON0 0x0188
+#define MT6397_FQMTR_CON1 0x018A
+#define MT6397_FQMTR_CON2 0x018C
+#define MT6397_EFUSE_DOUT_0_15 0x01C4
+#define MT6397_EFUSE_DOUT_16_31 0x01C6
+#define MT6397_EFUSE_DOUT_32_47 0x01C8
+#define MT6397_EFUSE_DOUT_48_63 0x01CA
+#define MT6397_SPI_CON 0x01CC
+#define MT6397_TOP_CKPDN3 0x01CE
+#define MT6397_TOP_CKCON3 0x01D4
+#define MT6397_EFUSE_DOUT_64_79 0x01D6
+#define MT6397_EFUSE_DOUT_80_95 0x01D8
+#define MT6397_EFUSE_DOUT_96_111 0x01DA
+#define MT6397_EFUSE_DOUT_112_127 0x01DC
+#define MT6397_EFUSE_DOUT_128_143 0x01DE
+#define MT6397_EFUSE_DOUT_144_159 0x01E0
+#define MT6397_EFUSE_DOUT_160_175 0x01E2
+#define MT6397_EFUSE_DOUT_176_191 0x01E4
+#define MT6397_EFUSE_DOUT_192_207 0x01E6
+#define MT6397_EFUSE_DOUT_208_223 0x01E8
+#define MT6397_EFUSE_DOUT_224_239 0x01EA
+#define MT6397_EFUSE_DOUT_240_255 0x01EC
+#define MT6397_EFUSE_DOUT_256_271 0x01EE
+#define MT6397_EFUSE_DOUT_272_287 0x01F0
+#define MT6397_EFUSE_DOUT_288_300 0x01F2
+#define MT6397_EFUSE_DOUT_304_319 0x01F4
+#define MT6397_BUCK_CON0 0x0200
+#define MT6397_BUCK_CON1 0x0202
+#define MT6397_BUCK_CON2 0x0204
+#define MT6397_BUCK_CON3 0x0206
+#define MT6397_BUCK_CON4 0x0208
+#define MT6397_BUCK_CON5 0x020A
+#define MT6397_BUCK_CON6 0x020C
+#define MT6397_BUCK_CON7 0x020E
+#define MT6397_BUCK_CON8 0x0210
+#define MT6397_BUCK_CON9 0x0212
+#define MT6397_VCA15_CON0 0x0214
+#define MT6397_VCA15_CON1 0x0216
+#define MT6397_VCA15_CON2 0x0218
+#define MT6397_VCA15_CON3 0x021A
+#define MT6397_VCA15_CON4 0x021C
+#define MT6397_VCA15_CON5 0x021E
+#define MT6397_VCA15_CON6 0x0220
+#define MT6397_VCA15_CON7 0x0222
+#define MT6397_VCA15_CON8 0x0224
+#define MT6397_VCA15_CON9 0x0226
+#define MT6397_VCA15_CON10 0x0228
+#define MT6397_VCA15_CON11 0x022A
+#define MT6397_VCA15_CON12 0x022C
+#define MT6397_VCA15_CON13 0x022E
+#define MT6397_VCA15_CON14 0x0230
+#define MT6397_VCA15_CON15 0x0232
+#define MT6397_VCA15_CON16 0x0234
+#define MT6397_VCA15_CON17 0x0236
+#define MT6397_VCA15_CON18 0x0238
+#define MT6397_VSRMCA15_CON0 0x023A
+#define MT6397_VSRMCA15_CON1 0x023C
+#define MT6397_VSRMCA15_CON2 0x023E
+#define MT6397_VSRMCA15_CON3 0x0240
+#define MT6397_VSRMCA15_CON4 0x0242
+#define MT6397_VSRMCA15_CON5 0x0244
+#define MT6397_VSRMCA15_CON6 0x0246
+#define MT6397_VSRMCA15_CON7 0x0248
+#define MT6397_VSRMCA15_CON8 0x024A
+#define MT6397_VSRMCA15_CON9 0x024C
+#define MT6397_VSRMCA15_CON10 0x024E
+#define MT6397_VSRMCA15_CON11 0x0250
+#define MT6397_VSRMCA15_CON12 0x0252
+#define MT6397_VSRMCA15_CON13 0x0254
+#define MT6397_VSRMCA15_CON14 0x0256
+#define MT6397_VSRMCA15_CON15 0x0258
+#define MT6397_VSRMCA15_CON16 0x025A
+#define MT6397_VSRMCA15_CON17 0x025C
+#define MT6397_VSRMCA15_CON18 0x025E
+#define MT6397_VSRMCA15_CON19 0x0260
+#define MT6397_VSRMCA15_CON20 0x0262
+#define MT6397_VSRMCA15_CON21 0x0264
+#define MT6397_VCORE_CON0 0x0266
+#define MT6397_VCORE_CON1 0x0268
+#define MT6397_VCORE_CON2 0x026A
+#define MT6397_VCORE_CON3 0x026C
+#define MT6397_VCORE_CON4 0x026E
+#define MT6397_VCORE_CON5 0x0270
+#define MT6397_VCORE_CON6 0x0272
+#define MT6397_VCORE_CON7 0x0274
+#define MT6397_VCORE_CON8 0x0276
+#define MT6397_VCORE_CON9 0x0278
+#define MT6397_VCORE_CON10 0x027A
+#define MT6397_VCORE_CON11 0x027C
+#define MT6397_VCORE_CON12 0x027E
+#define MT6397_VCORE_CON13 0x0280
+#define MT6397_VCORE_CON14 0x0282
+#define MT6397_VCORE_CON15 0x0284
+#define MT6397_VCORE_CON16 0x0286
+#define MT6397_VCORE_CON17 0x0288
+#define MT6397_VCORE_CON18 0x028A
+#define MT6397_VGPU_CON0 0x028C
+#define MT6397_VGPU_CON1 0x028E
+#define MT6397_VGPU_CON2 0x0290
+#define MT6397_VGPU_CON3 0x0292
+#define MT6397_VGPU_CON4 0x0294
+#define MT6397_VGPU_CON5 0x0296
+#define MT6397_VGPU_CON6 0x0298
+#define MT6397_VGPU_CON7 0x029A
+#define MT6397_VGPU_CON8 0x029C
+#define MT6397_VGPU_CON9 0x029E
+#define MT6397_VGPU_CON10 0x02A0
+#define MT6397_VGPU_CON11 0x02A2
+#define MT6397_VGPU_CON12 0x02A4
+#define MT6397_VGPU_CON13 0x02A6
+#define MT6397_VGPU_CON14 0x02A8
+#define MT6397_VGPU_CON15 0x02AA
+#define MT6397_VGPU_CON16 0x02AC
+#define MT6397_VGPU_CON17 0x02AE
+#define MT6397_VGPU_CON18 0x02B0
+#define MT6397_VIO18_CON0 0x0300
+#define MT6397_VIO18_CON1 0x0302
+#define MT6397_VIO18_CON2 0x0304
+#define MT6397_VIO18_CON3 0x0306
+#define MT6397_VIO18_CON4 0x0308
+#define MT6397_VIO18_CON5 0x030A
+#define MT6397_VIO18_CON6 0x030C
+#define MT6397_VIO18_CON7 0x030E
+#define MT6397_VIO18_CON8 0x0310
+#define MT6397_VIO18_CON9 0x0312
+#define MT6397_VIO18_CON10 0x0314
+#define MT6397_VIO18_CON11 0x0316
+#define MT6397_VIO18_CON12 0x0318
+#define MT6397_VIO18_CON13 0x031A
+#define MT6397_VIO18_CON14 0x031C
+#define MT6397_VIO18_CON15 0x031E
+#define MT6397_VIO18_CON16 0x0320
+#define MT6397_VIO18_CON17 0x0322
+#define MT6397_VIO18_CON18 0x0324
+#define MT6397_VPCA7_CON0 0x0326
+#define MT6397_VPCA7_CON1 0x0328
+#define MT6397_VPCA7_CON2 0x032A
+#define MT6397_VPCA7_CON3 0x032C
+#define MT6397_VPCA7_CON4 0x032E
+#define MT6397_VPCA7_CON5 0x0330
+#define MT6397_VPCA7_CON6 0x0332
+#define MT6397_VPCA7_CON7 0x0334
+#define MT6397_VPCA7_CON8 0x0336
+#define MT6397_VPCA7_CON9 0x0338
+#define MT6397_VPCA7_CON10 0x033A
+#define MT6397_VPCA7_CON11 0x033C
+#define MT6397_VPCA7_CON12 0x033E
+#define MT6397_VPCA7_CON13 0x0340
+#define MT6397_VPCA7_CON14 0x0342
+#define MT6397_VPCA7_CON15 0x0344
+#define MT6397_VPCA7_CON16 0x0346
+#define MT6397_VPCA7_CON17 0x0348
+#define MT6397_VPCA7_CON18 0x034A
+#define MT6397_VSRMCA7_CON0 0x034C
+#define MT6397_VSRMCA7_CON1 0x034E
+#define MT6397_VSRMCA7_CON2 0x0350
+#define MT6397_VSRMCA7_CON3 0x0352
+#define MT6397_VSRMCA7_CON4 0x0354
+#define MT6397_VSRMCA7_CON5 0x0356
+#define MT6397_VSRMCA7_CON6 0x0358
+#define MT6397_VSRMCA7_CON7 0x035A
+#define MT6397_VSRMCA7_CON8 0x035C
+#define MT6397_VSRMCA7_CON9 0x035E
+#define MT6397_VSRMCA7_CON10 0x0360
+#define MT6397_VSRMCA7_CON11 0x0362
+#define MT6397_VSRMCA7_CON12 0x0364
+#define MT6397_VSRMCA7_CON13 0x0366
+#define MT6397_VSRMCA7_CON14 0x0368
+#define MT6397_VSRMCA7_CON15 0x036A
+#define MT6397_VSRMCA7_CON16 0x036C
+#define MT6397_VSRMCA7_CON17 0x036E
+#define MT6397_VSRMCA7_CON18 0x0370
+#define MT6397_VSRMCA7_CON19 0x0372
+#define MT6397_VSRMCA7_CON20 0x0374
+#define MT6397_VSRMCA7_CON21 0x0376
+#define MT6397_VDRM_CON0 0x0378
+#define MT6397_VDRM_CON1 0x037A
+#define MT6397_VDRM_CON2 0x037C
+#define MT6397_VDRM_CON3 0x037E
+#define MT6397_VDRM_CON4 0x0380
+#define MT6397_VDRM_CON5 0x0382
+#define MT6397_VDRM_CON6 0x0384
+#define MT6397_VDRM_CON7 0x0386
+#define MT6397_VDRM_CON8 0x0388
+#define MT6397_VDRM_CON9 0x038A
+#define MT6397_VDRM_CON10 0x038C
+#define MT6397_VDRM_CON11 0x038E
+#define MT6397_VDRM_CON12 0x0390
+#define MT6397_VDRM_CON13 0x0392
+#define MT6397_VDRM_CON14 0x0394
+#define MT6397_VDRM_CON15 0x0396
+#define MT6397_VDRM_CON16 0x0398
+#define MT6397_VDRM_CON17 0x039A
+#define MT6397_VDRM_CON18 0x039C
+#define MT6397_BUCK_K_CON0 0x039E
+#define MT6397_BUCK_K_CON1 0x03A0
+#define MT6397_ANALDO_CON0 0x0400
+#define MT6397_ANALDO_CON1 0x0402
+#define MT6397_ANALDO_CON2 0x0404
+#define MT6397_ANALDO_CON3 0x0406
+#define MT6397_ANALDO_CON4 0x0408
+#define MT6397_ANALDO_CON5 0x040A
+#define MT6397_ANALDO_CON6 0x040C
+#define MT6397_ANALDO_CON7 0x040E
+#define MT6397_DIGLDO_CON0 0x0410
+#define MT6397_DIGLDO_CON1 0x0412
+#define MT6397_DIGLDO_CON2 0x0414
+#define MT6397_DIGLDO_CON3 0x0416
+#define MT6397_DIGLDO_CON4 0x0418
+#define MT6397_DIGLDO_CON5 0x041A
+#define MT6397_DIGLDO_CON6 0x041C
+#define MT6397_DIGLDO_CON7 0x041E
+#define MT6397_DIGLDO_CON8 0x0420
+#define MT6397_DIGLDO_CON9 0x0422
+#define MT6397_DIGLDO_CON10 0x0424
+#define MT6397_DIGLDO_CON11 0x0426
+#define MT6397_DIGLDO_CON12 0x0428
+#define MT6397_DIGLDO_CON13 0x042A
+#define MT6397_DIGLDO_CON14 0x042C
+#define MT6397_DIGLDO_CON15 0x042E
+#define MT6397_DIGLDO_CON16 0x0430
+#define MT6397_DIGLDO_CON17 0x0432
+#define MT6397_DIGLDO_CON18 0x0434
+#define MT6397_DIGLDO_CON19 0x0436
+#define MT6397_DIGLDO_CON20 0x0438
+#define MT6397_DIGLDO_CON21 0x043A
+#define MT6397_DIGLDO_CON22 0x043C
+#define MT6397_DIGLDO_CON23 0x043E
+#define MT6397_DIGLDO_CON24 0x0440
+#define MT6397_DIGLDO_CON25 0x0442
+#define MT6397_DIGLDO_CON26 0x0444
+#define MT6397_DIGLDO_CON27 0x0446
+#define MT6397_DIGLDO_CON28 0x0448
+#define MT6397_DIGLDO_CON29 0x044A
+#define MT6397_DIGLDO_CON30 0x044C
+#define MT6397_DIGLDO_CON31 0x044E
+#define MT6397_DIGLDO_CON32 0x0450
+#define MT6397_DIGLDO_CON33 0x045A
+#define MT6397_SPK_CON0 0x0600
+#define MT6397_SPK_CON1 0x0602
+#define MT6397_SPK_CON2 0x0604
+#define MT6397_SPK_CON3 0x0606
+#define MT6397_SPK_CON4 0x0608
+#define MT6397_SPK_CON5 0x060A
+#define MT6397_SPK_CON6 0x060C
+#define MT6397_SPK_CON7 0x060E
+#define MT6397_SPK_CON8 0x0610
+#define MT6397_SPK_CON9 0x0612
+#define MT6397_SPK_CON10 0x0614
+#define MT6397_SPK_CON11 0x0616
+#define MT6397_AUDDAC_CON0 0x0700
+#define MT6397_AUDBUF_CFG0 0x0702
+#define MT6397_AUDBUF_CFG1 0x0704
+#define MT6397_AUDBUF_CFG2 0x0706
+#define MT6397_AUDBUF_CFG3 0x0708
+#define MT6397_AUDBUF_CFG4 0x070A
+#define MT6397_IBIASDIST_CFG0 0x070C
+#define MT6397_AUDACCDEPOP_CFG0 0x070E
+#define MT6397_AUD_IV_CFG0 0x0710
+#define MT6397_AUDCLKGEN_CFG0 0x0712
+#define MT6397_AUDLDO_CFG0 0x0714
+#define MT6397_AUDLDO_CFG1 0x0716
+#define MT6397_AUDNVREGGLB_CFG0 0x0718
+#define MT6397_AUD_NCP0 0x071A
+#define MT6397_AUDPREAMP_CON0 0x071C
+#define MT6397_AUDADC_CON0 0x071E
+#define MT6397_AUDADC_CON1 0x0720
+#define MT6397_AUDADC_CON2 0x0722
+#define MT6397_AUDADC_CON3 0x0724
+#define MT6397_AUDADC_CON4 0x0726
+#define MT6397_AUDADC_CON5 0x0728
+#define MT6397_AUDADC_CON6 0x072A
+#define MT6397_AUDDIGMI_CON0 0x072C
+#define MT6397_AUDLSBUF_CON0 0x072E
+#define MT6397_AUDLSBUF_CON1 0x0730
+#define MT6397_AUDENCSPARE_CON0 0x0732
+#define MT6397_AUDENCCLKSQ_CON0 0x0734
+#define MT6397_AUDPREAMPGAIN_CON0 0x0736
+#define MT6397_ZCD_CON0 0x0738
+#define MT6397_ZCD_CON1 0x073A
+#define MT6397_ZCD_CON2 0x073C
+#define MT6397_ZCD_CON3 0x073E
+#define MT6397_ZCD_CON4 0x0740
+#define MT6397_ZCD_CON5 0x0742
+#define MT6397_NCP_CLKDIV_CON0 0x0744
+#define MT6397_NCP_CLKDIV_CON1 0x0746
+
+#endif /* __MFD_MT6397_REGISTERS_H__ */
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
new file mode 100644
index 000000000..bb270bd03
--- /dev/null
+++ b/include/linux/mfd/palmas.h
@@ -0,0 +1,3772 @@
+/*
+ * TI Palmas
+ *
+ * Copyright 2011-2013 Texas Instruments Inc.
+ *
+ * Author: Graeme Gregory <gg@slimlogic.co.uk>
+ * Author: Ian Lartey <ian@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_PALMAS_H
+#define __LINUX_MFD_PALMAS_H
+
+#include <linux/usb/otg.h>
+#include <linux/leds.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/extcon.h>
+#include <linux/usb/phy_companion.h>
+
+#define PALMAS_NUM_CLIENTS 3
+
+/* The ID_REVISION NUMBERS */
+#define PALMAS_CHIP_OLD_ID 0x0000
+#define PALMAS_CHIP_ID 0xC035
+#define PALMAS_CHIP_CHARGER_ID 0xC036
+
+#define TPS65917_RESERVED -1
+
+#define is_palmas(a) (((a) == PALMAS_CHIP_OLD_ID) || \
+ ((a) == PALMAS_CHIP_ID))
+#define is_palmas_charger(a) ((a) == PALMAS_CHIP_CHARGER_ID)
+
+/**
+ * Palmas PMIC feature types
+ *
+ * PALMAS_PMIC_FEATURE_SMPS10_BOOST - used when the PMIC provides SMPS10_BOOST
+ * regulator.
+ *
+ * PALMAS_PMIC_HAS(b, f) - macro to check if a bandgap device is capable of a
+ * specific feature (above) or not. Return non-zero, if yes.
+ */
+#define PALMAS_PMIC_FEATURE_SMPS10_BOOST BIT(0)
+#define PALMAS_PMIC_HAS(b, f) \
+ ((b)->features & PALMAS_PMIC_FEATURE_ ## f)
+
+struct palmas_pmic;
+struct palmas_gpadc;
+struct palmas_resource;
+struct palmas_usb;
+struct palmas_pmic_driver_data;
+struct palmas_pmic_platform_data;
+
+enum palmas_usb_state {
+ PALMAS_USB_STATE_DISCONNECT,
+ PALMAS_USB_STATE_VBUS,
+ PALMAS_USB_STATE_ID,
+};
+
+struct palmas {
+ struct device *dev;
+
+ struct i2c_client *i2c_clients[PALMAS_NUM_CLIENTS];
+ struct regmap *regmap[PALMAS_NUM_CLIENTS];
+
+ /* Stored chip id */
+ int id;
+
+ unsigned int features;
+ /* IRQ Data */
+ int irq;
+ u32 irq_mask;
+ struct mutex irq_lock;
+ struct regmap_irq_chip_data *irq_data;
+
+ struct palmas_pmic_driver_data *pmic_ddata;
+
+ /* Child Devices */
+ struct palmas_pmic *pmic;
+ struct palmas_gpadc *gpadc;
+ struct palmas_resource *resource;
+ struct palmas_usb *usb;
+
+ /* GPIO MUXing */
+ u8 gpio_muxed;
+ u8 led_muxed;
+ u8 pwm_muxed;
+};
+
+#define PALMAS_EXT_REQ (PALMAS_EXT_CONTROL_ENABLE1 | \
+ PALMAS_EXT_CONTROL_ENABLE2 | \
+ PALMAS_EXT_CONTROL_NSLEEP)
+
+struct palmas_sleep_requestor_info {
+ int id;
+ int reg_offset;
+ int bit_pos;
+};
+
+struct palmas_regs_info {
+ char *name;
+ char *sname;
+ u8 vsel_addr;
+ u8 ctrl_addr;
+ u8 tstep_addr;
+ int sleep_id;
+};
+
+struct palmas_pmic_driver_data {
+ int smps_start;
+ int smps_end;
+ int ldo_begin;
+ int ldo_end;
+ int max_reg;
+ bool has_regen3;
+ struct palmas_regs_info *palmas_regs_info;
+ struct of_regulator_match *palmas_matches;
+ struct palmas_sleep_requestor_info *sleep_req_info;
+ int (*smps_register)(struct palmas_pmic *pmic,
+ struct palmas_pmic_driver_data *ddata,
+ struct palmas_pmic_platform_data *pdata,
+ const char *pdev_name,
+ struct regulator_config config);
+ int (*ldo_register)(struct palmas_pmic *pmic,
+ struct palmas_pmic_driver_data *ddata,
+ struct palmas_pmic_platform_data *pdata,
+ const char *pdev_name,
+ struct regulator_config config);
+};
+
+struct palmas_gpadc_platform_data {
+ /* Channel 3 current source is only enabled during conversion */
+ int ch3_current;
+
+ /* Channel 0 current source can be used for battery detection.
+ * If used for battery detection this will cause a permanent current
+ * consumption depending on current level set here.
+ */
+ int ch0_current;
+
+ /* default BAT_REMOVAL_DAT setting on device probe */
+ int bat_removal;
+
+ /* Sets the START_POLARITY bit in the RT_CTRL register */
+ int start_polarity;
+};
+
+struct palmas_reg_init {
+ /* warm_rest controls the voltage levels after a warm reset
+ *
+ * 0: reload default values from OTP on warm reset
+ * 1: maintain voltage from VSEL on warm reset
+ */
+ int warm_reset;
+
+ /* roof_floor controls whether the regulator uses the i2c style
+ * of DVS or uses the method where a GPIO or other control method is
+ * attached to the NSLEEP/ENABLE1/ENABLE2 pins
+ *
+ * For SMPS
+ *
+ * 0: i2c selection of voltage
+ * 1: pin selection of voltage.
+ *
+ * For LDO unused
+ */
+ int roof_floor;
+
+ /* sleep_mode is the mode loaded to MODE_SLEEP bits as defined in
+ * the data sheet.
+ *
+ * For SMPS
+ *
+ * 0: Off
+ * 1: AUTO
+ * 2: ECO
+ * 3: Forced PWM
+ *
+ * For LDO
+ *
+ * 0: Off
+ * 1: On
+ */
+ int mode_sleep;
+
+ /* voltage_sel is the bitfield loaded onto the SMPSX_VOLTAGE
+ * register. Set this is the default voltage set in OTP needs
+ * to be overridden.
+ */
+ u8 vsel;
+
+};
+
+enum palmas_regulators {
+ /* SMPS regulators */
+ PALMAS_REG_SMPS12,
+ PALMAS_REG_SMPS123,
+ PALMAS_REG_SMPS3,
+ PALMAS_REG_SMPS45,
+ PALMAS_REG_SMPS457,
+ PALMAS_REG_SMPS6,
+ PALMAS_REG_SMPS7,
+ PALMAS_REG_SMPS8,
+ PALMAS_REG_SMPS9,
+ PALMAS_REG_SMPS10_OUT2,
+ PALMAS_REG_SMPS10_OUT1,
+ /* LDO regulators */
+ PALMAS_REG_LDO1,
+ PALMAS_REG_LDO2,
+ PALMAS_REG_LDO3,
+ PALMAS_REG_LDO4,
+ PALMAS_REG_LDO5,
+ PALMAS_REG_LDO6,
+ PALMAS_REG_LDO7,
+ PALMAS_REG_LDO8,
+ PALMAS_REG_LDO9,
+ PALMAS_REG_LDOLN,
+ PALMAS_REG_LDOUSB,
+ /* External regulators */
+ PALMAS_REG_REGEN1,
+ PALMAS_REG_REGEN2,
+ PALMAS_REG_REGEN3,
+ PALMAS_REG_SYSEN1,
+ PALMAS_REG_SYSEN2,
+ /* Total number of regulators */
+ PALMAS_NUM_REGS,
+};
+
+enum tps65917_regulators {
+ /* SMPS regulators */
+ TPS65917_REG_SMPS1,
+ TPS65917_REG_SMPS2,
+ TPS65917_REG_SMPS3,
+ TPS65917_REG_SMPS4,
+ TPS65917_REG_SMPS5,
+ /* LDO regulators */
+ TPS65917_REG_LDO1,
+ TPS65917_REG_LDO2,
+ TPS65917_REG_LDO3,
+ TPS65917_REG_LDO4,
+ TPS65917_REG_LDO5,
+ TPS65917_REG_REGEN1,
+ TPS65917_REG_REGEN2,
+ TPS65917_REG_REGEN3,
+
+ /* Total number of regulators */
+ TPS65917_NUM_REGS,
+};
+
+/* External controll signal name */
+enum {
+ PALMAS_EXT_CONTROL_ENABLE1 = 0x1,
+ PALMAS_EXT_CONTROL_ENABLE2 = 0x2,
+ PALMAS_EXT_CONTROL_NSLEEP = 0x4,
+};
+
+/*
+ * Palmas device resources can be controlled externally for
+ * enabling/disabling it rather than register write through i2c.
+ * Add the external controlled requestor ID for different resources.
+ */
+enum palmas_external_requestor_id {
+ PALMAS_EXTERNAL_REQSTR_ID_REGEN1,
+ PALMAS_EXTERNAL_REQSTR_ID_REGEN2,
+ PALMAS_EXTERNAL_REQSTR_ID_SYSEN1,
+ PALMAS_EXTERNAL_REQSTR_ID_SYSEN2,
+ PALMAS_EXTERNAL_REQSTR_ID_CLK32KG,
+ PALMAS_EXTERNAL_REQSTR_ID_CLK32KGAUDIO,
+ PALMAS_EXTERNAL_REQSTR_ID_REGEN3,
+ PALMAS_EXTERNAL_REQSTR_ID_SMPS12,
+ PALMAS_EXTERNAL_REQSTR_ID_SMPS3,
+ PALMAS_EXTERNAL_REQSTR_ID_SMPS45,
+ PALMAS_EXTERNAL_REQSTR_ID_SMPS6,
+ PALMAS_EXTERNAL_REQSTR_ID_SMPS7,
+ PALMAS_EXTERNAL_REQSTR_ID_SMPS8,
+ PALMAS_EXTERNAL_REQSTR_ID_SMPS9,
+ PALMAS_EXTERNAL_REQSTR_ID_SMPS10,
+ PALMAS_EXTERNAL_REQSTR_ID_LDO1,
+ PALMAS_EXTERNAL_REQSTR_ID_LDO2,
+ PALMAS_EXTERNAL_REQSTR_ID_LDO3,
+ PALMAS_EXTERNAL_REQSTR_ID_LDO4,
+ PALMAS_EXTERNAL_REQSTR_ID_LDO5,
+ PALMAS_EXTERNAL_REQSTR_ID_LDO6,
+ PALMAS_EXTERNAL_REQSTR_ID_LDO7,
+ PALMAS_EXTERNAL_REQSTR_ID_LDO8,
+ PALMAS_EXTERNAL_REQSTR_ID_LDO9,
+ PALMAS_EXTERNAL_REQSTR_ID_LDOLN,
+ PALMAS_EXTERNAL_REQSTR_ID_LDOUSB,
+
+ /* Last entry */
+ PALMAS_EXTERNAL_REQSTR_ID_MAX,
+};
+
+enum tps65917_external_requestor_id {
+ TPS65917_EXTERNAL_REQSTR_ID_REGEN1,
+ TPS65917_EXTERNAL_REQSTR_ID_REGEN2,
+ TPS65917_EXTERNAL_REQSTR_ID_REGEN3,
+ TPS65917_EXTERNAL_REQSTR_ID_SMPS1,
+ TPS65917_EXTERNAL_REQSTR_ID_SMPS2,
+ TPS65917_EXTERNAL_REQSTR_ID_SMPS3,
+ TPS65917_EXTERNAL_REQSTR_ID_SMPS4,
+ TPS65917_EXTERNAL_REQSTR_ID_SMPS5,
+ TPS65917_EXTERNAL_REQSTR_ID_LDO1,
+ TPS65917_EXTERNAL_REQSTR_ID_LDO2,
+ TPS65917_EXTERNAL_REQSTR_ID_LDO3,
+ TPS65917_EXTERNAL_REQSTR_ID_LDO4,
+ TPS65917_EXTERNAL_REQSTR_ID_LDO5,
+ /* Last entry */
+ TPS65917_EXTERNAL_REQSTR_ID_MAX,
+};
+
+struct palmas_pmic_platform_data {
+ /* An array of pointers to regulator init data indexed by regulator
+ * ID
+ */
+ struct regulator_init_data *reg_data[PALMAS_NUM_REGS];
+
+ /* An array of pointers to structures containing sleep mode and DVS
+ * configuration for regulators indexed by ID
+ */
+ struct palmas_reg_init *reg_init[PALMAS_NUM_REGS];
+
+ /* use LDO6 for vibrator control */
+ int ldo6_vibrator;
+
+ /* Enable tracking mode of LDO8 */
+ bool enable_ldo8_tracking;
+};
+
+struct palmas_usb_platform_data {
+ /* Do we enable the wakeup comparator on probe */
+ int wakeup;
+};
+
+struct palmas_resource_platform_data {
+ int regen1_mode_sleep;
+ int regen2_mode_sleep;
+ int sysen1_mode_sleep;
+ int sysen2_mode_sleep;
+
+ /* bitfield to be loaded to NSLEEP_RES_ASSIGN */
+ u8 nsleep_res;
+ /* bitfield to be loaded to NSLEEP_SMPS_ASSIGN */
+ u8 nsleep_smps;
+ /* bitfield to be loaded to NSLEEP_LDO_ASSIGN1 */
+ u8 nsleep_ldo1;
+ /* bitfield to be loaded to NSLEEP_LDO_ASSIGN2 */
+ u8 nsleep_ldo2;
+
+ /* bitfield to be loaded to ENABLE1_RES_ASSIGN */
+ u8 enable1_res;
+ /* bitfield to be loaded to ENABLE1_SMPS_ASSIGN */
+ u8 enable1_smps;
+ /* bitfield to be loaded to ENABLE1_LDO_ASSIGN1 */
+ u8 enable1_ldo1;
+ /* bitfield to be loaded to ENABLE1_LDO_ASSIGN2 */
+ u8 enable1_ldo2;
+
+ /* bitfield to be loaded to ENABLE2_RES_ASSIGN */
+ u8 enable2_res;
+ /* bitfield to be loaded to ENABLE2_SMPS_ASSIGN */
+ u8 enable2_smps;
+ /* bitfield to be loaded to ENABLE2_LDO_ASSIGN1 */
+ u8 enable2_ldo1;
+ /* bitfield to be loaded to ENABLE2_LDO_ASSIGN2 */
+ u8 enable2_ldo2;
+};
+
+struct palmas_clk_platform_data {
+ int clk32kg_mode_sleep;
+ int clk32kgaudio_mode_sleep;
+};
+
+struct palmas_platform_data {
+ int irq_flags;
+ int gpio_base;
+
+ /* bit value to be loaded to the POWER_CTRL register */
+ u8 power_ctrl;
+
+ /*
+ * boolean to select if we want to configure muxing here
+ * then the two value to load into the registers if true
+ */
+ int mux_from_pdata;
+ u8 pad1, pad2;
+ bool pm_off;
+
+ struct palmas_pmic_platform_data *pmic_pdata;
+ struct palmas_gpadc_platform_data *gpadc_pdata;
+ struct palmas_usb_platform_data *usb_pdata;
+ struct palmas_resource_platform_data *resource_pdata;
+ struct palmas_clk_platform_data *clk_pdata;
+};
+
+struct palmas_gpadc_calibration {
+ s32 gain;
+ s32 gain_error;
+ s32 offset_error;
+};
+
+struct palmas_gpadc {
+ struct device *dev;
+ struct palmas *palmas;
+
+ int ch3_current;
+ int ch0_current;
+
+ int gpadc_force;
+
+ int bat_removal;
+
+ struct mutex reading_lock;
+ struct completion irq_complete;
+
+ int eoc_sw_irq;
+
+ struct palmas_gpadc_calibration *palmas_cal_tbl;
+
+ int conv0_channel;
+ int conv1_channel;
+ int rt_channel;
+};
+
+struct palmas_gpadc_result {
+ s32 raw_code;
+ s32 corrected_code;
+ s32 result;
+};
+
+#define PALMAS_MAX_CHANNELS 16
+
+/* Define the tps65917 IRQ numbers */
+enum tps65917_irqs {
+ /* INT1 registers */
+ TPS65917_RESERVED1,
+ TPS65917_PWRON_IRQ,
+ TPS65917_LONG_PRESS_KEY_IRQ,
+ TPS65917_RESERVED2,
+ TPS65917_PWRDOWN_IRQ,
+ TPS65917_HOTDIE_IRQ,
+ TPS65917_VSYS_MON_IRQ,
+ TPS65917_RESERVED3,
+ /* INT2 registers */
+ TPS65917_RESERVED4,
+ TPS65917_OTP_ERROR_IRQ,
+ TPS65917_WDT_IRQ,
+ TPS65917_RESERVED5,
+ TPS65917_RESET_IN_IRQ,
+ TPS65917_FSD_IRQ,
+ TPS65917_SHORT_IRQ,
+ TPS65917_RESERVED6,
+ /* INT3 registers */
+ TPS65917_GPADC_AUTO_0_IRQ,
+ TPS65917_GPADC_AUTO_1_IRQ,
+ TPS65917_GPADC_EOC_SW_IRQ,
+ TPS65917_RESREVED6,
+ TPS65917_RESERVED7,
+ TPS65917_RESERVED8,
+ TPS65917_RESERVED9,
+ TPS65917_VBUS_IRQ,
+ /* INT4 registers */
+ TPS65917_GPIO_0_IRQ,
+ TPS65917_GPIO_1_IRQ,
+ TPS65917_GPIO_2_IRQ,
+ TPS65917_GPIO_3_IRQ,
+ TPS65917_GPIO_4_IRQ,
+ TPS65917_GPIO_5_IRQ,
+ TPS65917_GPIO_6_IRQ,
+ TPS65917_RESERVED10,
+ /* Total Number IRQs */
+ TPS65917_NUM_IRQ,
+};
+
+/* Define the palmas IRQ numbers */
+enum palmas_irqs {
+ /* INT1 registers */
+ PALMAS_CHARG_DET_N_VBUS_OVV_IRQ,
+ PALMAS_PWRON_IRQ,
+ PALMAS_LONG_PRESS_KEY_IRQ,
+ PALMAS_RPWRON_IRQ,
+ PALMAS_PWRDOWN_IRQ,
+ PALMAS_HOTDIE_IRQ,
+ PALMAS_VSYS_MON_IRQ,
+ PALMAS_VBAT_MON_IRQ,
+ /* INT2 registers */
+ PALMAS_RTC_ALARM_IRQ,
+ PALMAS_RTC_TIMER_IRQ,
+ PALMAS_WDT_IRQ,
+ PALMAS_BATREMOVAL_IRQ,
+ PALMAS_RESET_IN_IRQ,
+ PALMAS_FBI_BB_IRQ,
+ PALMAS_SHORT_IRQ,
+ PALMAS_VAC_ACOK_IRQ,
+ /* INT3 registers */
+ PALMAS_GPADC_AUTO_0_IRQ,
+ PALMAS_GPADC_AUTO_1_IRQ,
+ PALMAS_GPADC_EOC_SW_IRQ,
+ PALMAS_GPADC_EOC_RT_IRQ,
+ PALMAS_ID_OTG_IRQ,
+ PALMAS_ID_IRQ,
+ PALMAS_VBUS_OTG_IRQ,
+ PALMAS_VBUS_IRQ,
+ /* INT4 registers */
+ PALMAS_GPIO_0_IRQ,
+ PALMAS_GPIO_1_IRQ,
+ PALMAS_GPIO_2_IRQ,
+ PALMAS_GPIO_3_IRQ,
+ PALMAS_GPIO_4_IRQ,
+ PALMAS_GPIO_5_IRQ,
+ PALMAS_GPIO_6_IRQ,
+ PALMAS_GPIO_7_IRQ,
+ /* Total Number IRQs */
+ PALMAS_NUM_IRQ,
+};
+
+struct palmas_pmic {
+ struct palmas *palmas;
+ struct device *dev;
+ struct regulator_desc desc[PALMAS_NUM_REGS];
+ struct regulator_dev *rdev[PALMAS_NUM_REGS];
+ struct mutex mutex;
+
+ int smps123;
+ int smps457;
+ int smps12;
+
+ int range[PALMAS_REG_SMPS10_OUT1];
+ unsigned int ramp_delay[PALMAS_REG_SMPS10_OUT1];
+ unsigned int current_reg_mode[PALMAS_REG_SMPS10_OUT1];
+};
+
+struct palmas_resource {
+ struct palmas *palmas;
+ struct device *dev;
+};
+
+struct palmas_usb {
+ struct palmas *palmas;
+ struct device *dev;
+
+ struct extcon_dev *edev;
+
+ int id_otg_irq;
+ int id_irq;
+ int vbus_otg_irq;
+ int vbus_irq;
+
+ enum palmas_usb_state linkstat;
+ int wakeup;
+ bool enable_vbus_detection;
+ bool enable_id_detection;
+};
+
+#define comparator_to_palmas(x) container_of((x), struct palmas_usb, comparator)
+
+enum usb_irq_events {
+ /* Wakeup events from INT3 */
+ PALMAS_USB_ID_WAKEPUP,
+ PALMAS_USB_VBUS_WAKEUP,
+
+ /* ID_OTG_EVENTS */
+ PALMAS_USB_ID_GND,
+ N_PALMAS_USB_ID_GND,
+ PALMAS_USB_ID_C,
+ N_PALMAS_USB_ID_C,
+ PALMAS_USB_ID_B,
+ N_PALMAS_USB_ID_B,
+ PALMAS_USB_ID_A,
+ N_PALMAS_USB_ID_A,
+ PALMAS_USB_ID_FLOAT,
+ N_PALMAS_USB_ID_FLOAT,
+
+ /* VBUS_OTG_EVENTS */
+ PALMAS_USB_VB_SESS_END,
+ N_PALMAS_USB_VB_SESS_END,
+ PALMAS_USB_VB_SESS_VLD,
+ N_PALMAS_USB_VB_SESS_VLD,
+ PALMAS_USB_VA_SESS_VLD,
+ N_PALMAS_USB_VA_SESS_VLD,
+ PALMAS_USB_VA_VBUS_VLD,
+ N_PALMAS_USB_VA_VBUS_VLD,
+ PALMAS_USB_VADP_SNS,
+ N_PALMAS_USB_VADP_SNS,
+ PALMAS_USB_VADP_PRB,
+ N_PALMAS_USB_VADP_PRB,
+ PALMAS_USB_VOTG_SESS_VLD,
+ N_PALMAS_USB_VOTG_SESS_VLD,
+};
+
+/* defines so we can store the mux settings */
+#define PALMAS_GPIO_0_MUXED (1 << 0)
+#define PALMAS_GPIO_1_MUXED (1 << 1)
+#define PALMAS_GPIO_2_MUXED (1 << 2)
+#define PALMAS_GPIO_3_MUXED (1 << 3)
+#define PALMAS_GPIO_4_MUXED (1 << 4)
+#define PALMAS_GPIO_5_MUXED (1 << 5)
+#define PALMAS_GPIO_6_MUXED (1 << 6)
+#define PALMAS_GPIO_7_MUXED (1 << 7)
+
+#define PALMAS_LED1_MUXED (1 << 0)
+#define PALMAS_LED2_MUXED (1 << 1)
+
+#define PALMAS_PWM1_MUXED (1 << 0)
+#define PALMAS_PWM2_MUXED (1 << 1)
+
+/* helper macro to get correct slave number */
+#define PALMAS_BASE_TO_SLAVE(x) ((x >> 8) - 1)
+#define PALMAS_BASE_TO_REG(x, y) ((x & 0xFF) + y)
+
+/* Base addresses of IP blocks in Palmas */
+#define PALMAS_SMPS_DVS_BASE 0x020
+#define PALMAS_RTC_BASE 0x100
+#define PALMAS_VALIDITY_BASE 0x118
+#define PALMAS_SMPS_BASE 0x120
+#define PALMAS_LDO_BASE 0x150
+#define PALMAS_DVFS_BASE 0x180
+#define PALMAS_PMU_CONTROL_BASE 0x1A0
+#define PALMAS_RESOURCE_BASE 0x1D4
+#define PALMAS_PU_PD_OD_BASE 0x1F0
+#define PALMAS_LED_BASE 0x200
+#define PALMAS_INTERRUPT_BASE 0x210
+#define PALMAS_USB_OTG_BASE 0x250
+#define PALMAS_VIBRATOR_BASE 0x270
+#define PALMAS_GPIO_BASE 0x280
+#define PALMAS_USB_BASE 0x290
+#define PALMAS_GPADC_BASE 0x2C0
+#define PALMAS_TRIM_GPADC_BASE 0x3CD
+
+/* Registers for function RTC */
+#define PALMAS_SECONDS_REG 0x00
+#define PALMAS_MINUTES_REG 0x01
+#define PALMAS_HOURS_REG 0x02
+#define PALMAS_DAYS_REG 0x03
+#define PALMAS_MONTHS_REG 0x04
+#define PALMAS_YEARS_REG 0x05
+#define PALMAS_WEEKS_REG 0x06
+#define PALMAS_ALARM_SECONDS_REG 0x08
+#define PALMAS_ALARM_MINUTES_REG 0x09
+#define PALMAS_ALARM_HOURS_REG 0x0A
+#define PALMAS_ALARM_DAYS_REG 0x0B
+#define PALMAS_ALARM_MONTHS_REG 0x0C
+#define PALMAS_ALARM_YEARS_REG 0x0D
+#define PALMAS_RTC_CTRL_REG 0x10
+#define PALMAS_RTC_STATUS_REG 0x11
+#define PALMAS_RTC_INTERRUPTS_REG 0x12
+#define PALMAS_RTC_COMP_LSB_REG 0x13
+#define PALMAS_RTC_COMP_MSB_REG 0x14
+#define PALMAS_RTC_RES_PROG_REG 0x15
+#define PALMAS_RTC_RESET_STATUS_REG 0x16
+
+/* Bit definitions for SECONDS_REG */
+#define PALMAS_SECONDS_REG_SEC1_MASK 0x70
+#define PALMAS_SECONDS_REG_SEC1_SHIFT 0x04
+#define PALMAS_SECONDS_REG_SEC0_MASK 0x0F
+#define PALMAS_SECONDS_REG_SEC0_SHIFT 0x00
+
+/* Bit definitions for MINUTES_REG */
+#define PALMAS_MINUTES_REG_MIN1_MASK 0x70
+#define PALMAS_MINUTES_REG_MIN1_SHIFT 0x04
+#define PALMAS_MINUTES_REG_MIN0_MASK 0x0F
+#define PALMAS_MINUTES_REG_MIN0_SHIFT 0x00
+
+/* Bit definitions for HOURS_REG */
+#define PALMAS_HOURS_REG_PM_NAM 0x80
+#define PALMAS_HOURS_REG_PM_NAM_SHIFT 0x07
+#define PALMAS_HOURS_REG_HOUR1_MASK 0x30
+#define PALMAS_HOURS_REG_HOUR1_SHIFT 0x04
+#define PALMAS_HOURS_REG_HOUR0_MASK 0x0F
+#define PALMAS_HOURS_REG_HOUR0_SHIFT 0x00
+
+/* Bit definitions for DAYS_REG */
+#define PALMAS_DAYS_REG_DAY1_MASK 0x30
+#define PALMAS_DAYS_REG_DAY1_SHIFT 0x04
+#define PALMAS_DAYS_REG_DAY0_MASK 0x0F
+#define PALMAS_DAYS_REG_DAY0_SHIFT 0x00
+
+/* Bit definitions for MONTHS_REG */
+#define PALMAS_MONTHS_REG_MONTH1 0x10
+#define PALMAS_MONTHS_REG_MONTH1_SHIFT 0x04
+#define PALMAS_MONTHS_REG_MONTH0_MASK 0x0F
+#define PALMAS_MONTHS_REG_MONTH0_SHIFT 0x00
+
+/* Bit definitions for YEARS_REG */
+#define PALMAS_YEARS_REG_YEAR1_MASK 0xf0
+#define PALMAS_YEARS_REG_YEAR1_SHIFT 0x04
+#define PALMAS_YEARS_REG_YEAR0_MASK 0x0F
+#define PALMAS_YEARS_REG_YEAR0_SHIFT 0x00
+
+/* Bit definitions for WEEKS_REG */
+#define PALMAS_WEEKS_REG_WEEK_MASK 0x07
+#define PALMAS_WEEKS_REG_WEEK_SHIFT 0x00
+
+/* Bit definitions for ALARM_SECONDS_REG */
+#define PALMAS_ALARM_SECONDS_REG_ALARM_SEC1_MASK 0x70
+#define PALMAS_ALARM_SECONDS_REG_ALARM_SEC1_SHIFT 0x04
+#define PALMAS_ALARM_SECONDS_REG_ALARM_SEC0_MASK 0x0F
+#define PALMAS_ALARM_SECONDS_REG_ALARM_SEC0_SHIFT 0x00
+
+/* Bit definitions for ALARM_MINUTES_REG */
+#define PALMAS_ALARM_MINUTES_REG_ALARM_MIN1_MASK 0x70
+#define PALMAS_ALARM_MINUTES_REG_ALARM_MIN1_SHIFT 0x04
+#define PALMAS_ALARM_MINUTES_REG_ALARM_MIN0_MASK 0x0F
+#define PALMAS_ALARM_MINUTES_REG_ALARM_MIN0_SHIFT 0x00
+
+/* Bit definitions for ALARM_HOURS_REG */
+#define PALMAS_ALARM_HOURS_REG_ALARM_PM_NAM 0x80
+#define PALMAS_ALARM_HOURS_REG_ALARM_PM_NAM_SHIFT 0x07
+#define PALMAS_ALARM_HOURS_REG_ALARM_HOUR1_MASK 0x30
+#define PALMAS_ALARM_HOURS_REG_ALARM_HOUR1_SHIFT 0x04
+#define PALMAS_ALARM_HOURS_REG_ALARM_HOUR0_MASK 0x0F
+#define PALMAS_ALARM_HOURS_REG_ALARM_HOUR0_SHIFT 0x00
+
+/* Bit definitions for ALARM_DAYS_REG */
+#define PALMAS_ALARM_DAYS_REG_ALARM_DAY1_MASK 0x30
+#define PALMAS_ALARM_DAYS_REG_ALARM_DAY1_SHIFT 0x04
+#define PALMAS_ALARM_DAYS_REG_ALARM_DAY0_MASK 0x0F
+#define PALMAS_ALARM_DAYS_REG_ALARM_DAY0_SHIFT 0x00
+
+/* Bit definitions for ALARM_MONTHS_REG */
+#define PALMAS_ALARM_MONTHS_REG_ALARM_MONTH1 0x10
+#define PALMAS_ALARM_MONTHS_REG_ALARM_MONTH1_SHIFT 0x04
+#define PALMAS_ALARM_MONTHS_REG_ALARM_MONTH0_MASK 0x0F
+#define PALMAS_ALARM_MONTHS_REG_ALARM_MONTH0_SHIFT 0x00
+
+/* Bit definitions for ALARM_YEARS_REG */
+#define PALMAS_ALARM_YEARS_REG_ALARM_YEAR1_MASK 0xf0
+#define PALMAS_ALARM_YEARS_REG_ALARM_YEAR1_SHIFT 0x04
+#define PALMAS_ALARM_YEARS_REG_ALARM_YEAR0_MASK 0x0F
+#define PALMAS_ALARM_YEARS_REG_ALARM_YEAR0_SHIFT 0x00
+
+/* Bit definitions for RTC_CTRL_REG */
+#define PALMAS_RTC_CTRL_REG_RTC_V_OPT 0x80
+#define PALMAS_RTC_CTRL_REG_RTC_V_OPT_SHIFT 0x07
+#define PALMAS_RTC_CTRL_REG_GET_TIME 0x40
+#define PALMAS_RTC_CTRL_REG_GET_TIME_SHIFT 0x06
+#define PALMAS_RTC_CTRL_REG_SET_32_COUNTER 0x20
+#define PALMAS_RTC_CTRL_REG_SET_32_COUNTER_SHIFT 0x05
+#define PALMAS_RTC_CTRL_REG_TEST_MODE 0x10
+#define PALMAS_RTC_CTRL_REG_TEST_MODE_SHIFT 0x04
+#define PALMAS_RTC_CTRL_REG_MODE_12_24 0x08
+#define PALMAS_RTC_CTRL_REG_MODE_12_24_SHIFT 0x03
+#define PALMAS_RTC_CTRL_REG_AUTO_COMP 0x04
+#define PALMAS_RTC_CTRL_REG_AUTO_COMP_SHIFT 0x02
+#define PALMAS_RTC_CTRL_REG_ROUND_30S 0x02
+#define PALMAS_RTC_CTRL_REG_ROUND_30S_SHIFT 0x01
+#define PALMAS_RTC_CTRL_REG_STOP_RTC 0x01
+#define PALMAS_RTC_CTRL_REG_STOP_RTC_SHIFT 0x00
+
+/* Bit definitions for RTC_STATUS_REG */
+#define PALMAS_RTC_STATUS_REG_POWER_UP 0x80
+#define PALMAS_RTC_STATUS_REG_POWER_UP_SHIFT 0x07
+#define PALMAS_RTC_STATUS_REG_ALARM 0x40
+#define PALMAS_RTC_STATUS_REG_ALARM_SHIFT 0x06
+#define PALMAS_RTC_STATUS_REG_EVENT_1D 0x20
+#define PALMAS_RTC_STATUS_REG_EVENT_1D_SHIFT 0x05
+#define PALMAS_RTC_STATUS_REG_EVENT_1H 0x10
+#define PALMAS_RTC_STATUS_REG_EVENT_1H_SHIFT 0x04
+#define PALMAS_RTC_STATUS_REG_EVENT_1M 0x08
+#define PALMAS_RTC_STATUS_REG_EVENT_1M_SHIFT 0x03
+#define PALMAS_RTC_STATUS_REG_EVENT_1S 0x04
+#define PALMAS_RTC_STATUS_REG_EVENT_1S_SHIFT 0x02
+#define PALMAS_RTC_STATUS_REG_RUN 0x02
+#define PALMAS_RTC_STATUS_REG_RUN_SHIFT 0x01
+
+/* Bit definitions for RTC_INTERRUPTS_REG */
+#define PALMAS_RTC_INTERRUPTS_REG_IT_SLEEP_MASK_EN 0x10
+#define PALMAS_RTC_INTERRUPTS_REG_IT_SLEEP_MASK_EN_SHIFT 0x04
+#define PALMAS_RTC_INTERRUPTS_REG_IT_ALARM 0x08
+#define PALMAS_RTC_INTERRUPTS_REG_IT_ALARM_SHIFT 0x03
+#define PALMAS_RTC_INTERRUPTS_REG_IT_TIMER 0x04
+#define PALMAS_RTC_INTERRUPTS_REG_IT_TIMER_SHIFT 0x02
+#define PALMAS_RTC_INTERRUPTS_REG_EVERY_MASK 0x03
+#define PALMAS_RTC_INTERRUPTS_REG_EVERY_SHIFT 0x00
+
+/* Bit definitions for RTC_COMP_LSB_REG */
+#define PALMAS_RTC_COMP_LSB_REG_RTC_COMP_LSB_MASK 0xFF
+#define PALMAS_RTC_COMP_LSB_REG_RTC_COMP_LSB_SHIFT 0x00
+
+/* Bit definitions for RTC_COMP_MSB_REG */
+#define PALMAS_RTC_COMP_MSB_REG_RTC_COMP_MSB_MASK 0xFF
+#define PALMAS_RTC_COMP_MSB_REG_RTC_COMP_MSB_SHIFT 0x00
+
+/* Bit definitions for RTC_RES_PROG_REG */
+#define PALMAS_RTC_RES_PROG_REG_SW_RES_PROG_MASK 0x3F
+#define PALMAS_RTC_RES_PROG_REG_SW_RES_PROG_SHIFT 0x00
+
+/* Bit definitions for RTC_RESET_STATUS_REG */
+#define PALMAS_RTC_RESET_STATUS_REG_RESET_STATUS 0x01
+#define PALMAS_RTC_RESET_STATUS_REG_RESET_STATUS_SHIFT 0x00
+
+/* Registers for function BACKUP */
+#define PALMAS_BACKUP0 0x00
+#define PALMAS_BACKUP1 0x01
+#define PALMAS_BACKUP2 0x02
+#define PALMAS_BACKUP3 0x03
+#define PALMAS_BACKUP4 0x04
+#define PALMAS_BACKUP5 0x05
+#define PALMAS_BACKUP6 0x06
+#define PALMAS_BACKUP7 0x07
+
+/* Bit definitions for BACKUP0 */
+#define PALMAS_BACKUP0_BACKUP_MASK 0xFF
+#define PALMAS_BACKUP0_BACKUP_SHIFT 0x00
+
+/* Bit definitions for BACKUP1 */
+#define PALMAS_BACKUP1_BACKUP_MASK 0xFF
+#define PALMAS_BACKUP1_BACKUP_SHIFT 0x00
+
+/* Bit definitions for BACKUP2 */
+#define PALMAS_BACKUP2_BACKUP_MASK 0xFF
+#define PALMAS_BACKUP2_BACKUP_SHIFT 0x00
+
+/* Bit definitions for BACKUP3 */
+#define PALMAS_BACKUP3_BACKUP_MASK 0xFF
+#define PALMAS_BACKUP3_BACKUP_SHIFT 0x00
+
+/* Bit definitions for BACKUP4 */
+#define PALMAS_BACKUP4_BACKUP_MASK 0xFF
+#define PALMAS_BACKUP4_BACKUP_SHIFT 0x00
+
+/* Bit definitions for BACKUP5 */
+#define PALMAS_BACKUP5_BACKUP_MASK 0xFF
+#define PALMAS_BACKUP5_BACKUP_SHIFT 0x00
+
+/* Bit definitions for BACKUP6 */
+#define PALMAS_BACKUP6_BACKUP_MASK 0xFF
+#define PALMAS_BACKUP6_BACKUP_SHIFT 0x00
+
+/* Bit definitions for BACKUP7 */
+#define PALMAS_BACKUP7_BACKUP_MASK 0xFF
+#define PALMAS_BACKUP7_BACKUP_SHIFT 0x00
+
+/* Registers for function SMPS */
+#define PALMAS_SMPS12_CTRL 0x00
+#define PALMAS_SMPS12_TSTEP 0x01
+#define PALMAS_SMPS12_FORCE 0x02
+#define PALMAS_SMPS12_VOLTAGE 0x03
+#define PALMAS_SMPS3_CTRL 0x04
+#define PALMAS_SMPS3_VOLTAGE 0x07
+#define PALMAS_SMPS45_CTRL 0x08
+#define PALMAS_SMPS45_TSTEP 0x09
+#define PALMAS_SMPS45_FORCE 0x0A
+#define PALMAS_SMPS45_VOLTAGE 0x0B
+#define PALMAS_SMPS6_CTRL 0x0C
+#define PALMAS_SMPS6_TSTEP 0x0D
+#define PALMAS_SMPS6_FORCE 0x0E
+#define PALMAS_SMPS6_VOLTAGE 0x0F
+#define PALMAS_SMPS7_CTRL 0x10
+#define PALMAS_SMPS7_VOLTAGE 0x13
+#define PALMAS_SMPS8_CTRL 0x14
+#define PALMAS_SMPS8_TSTEP 0x15
+#define PALMAS_SMPS8_FORCE 0x16
+#define PALMAS_SMPS8_VOLTAGE 0x17
+#define PALMAS_SMPS9_CTRL 0x18
+#define PALMAS_SMPS9_VOLTAGE 0x1B
+#define PALMAS_SMPS10_CTRL 0x1C
+#define PALMAS_SMPS10_STATUS 0x1F
+#define PALMAS_SMPS_CTRL 0x24
+#define PALMAS_SMPS_PD_CTRL 0x25
+#define PALMAS_SMPS_DITHER_EN 0x26
+#define PALMAS_SMPS_THERMAL_EN 0x27
+#define PALMAS_SMPS_THERMAL_STATUS 0x28
+#define PALMAS_SMPS_SHORT_STATUS 0x29
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN 0x2A
+#define PALMAS_SMPS_POWERGOOD_MASK1 0x2B
+#define PALMAS_SMPS_POWERGOOD_MASK2 0x2C
+
+/* Bit definitions for SMPS12_CTRL */
+#define PALMAS_SMPS12_CTRL_WR_S 0x80
+#define PALMAS_SMPS12_CTRL_WR_S_SHIFT 0x07
+#define PALMAS_SMPS12_CTRL_ROOF_FLOOR_EN 0x40
+#define PALMAS_SMPS12_CTRL_ROOF_FLOOR_EN_SHIFT 0x06
+#define PALMAS_SMPS12_CTRL_STATUS_MASK 0x30
+#define PALMAS_SMPS12_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_SMPS12_CTRL_MODE_SLEEP_MASK 0x0c
+#define PALMAS_SMPS12_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK 0x03
+#define PALMAS_SMPS12_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for SMPS12_TSTEP */
+#define PALMAS_SMPS12_TSTEP_TSTEP_MASK 0x03
+#define PALMAS_SMPS12_TSTEP_TSTEP_SHIFT 0x00
+
+/* Bit definitions for SMPS12_FORCE */
+#define PALMAS_SMPS12_FORCE_CMD 0x80
+#define PALMAS_SMPS12_FORCE_CMD_SHIFT 0x07
+#define PALMAS_SMPS12_FORCE_VSEL_MASK 0x7F
+#define PALMAS_SMPS12_FORCE_VSEL_SHIFT 0x00
+
+/* Bit definitions for SMPS12_VOLTAGE */
+#define PALMAS_SMPS12_VOLTAGE_RANGE 0x80
+#define PALMAS_SMPS12_VOLTAGE_RANGE_SHIFT 0x07
+#define PALMAS_SMPS12_VOLTAGE_VSEL_MASK 0x7F
+#define PALMAS_SMPS12_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for SMPS3_CTRL */
+#define PALMAS_SMPS3_CTRL_WR_S 0x80
+#define PALMAS_SMPS3_CTRL_WR_S_SHIFT 0x07
+#define PALMAS_SMPS3_CTRL_STATUS_MASK 0x30
+#define PALMAS_SMPS3_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_SMPS3_CTRL_MODE_SLEEP_MASK 0x0c
+#define PALMAS_SMPS3_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_SMPS3_CTRL_MODE_ACTIVE_MASK 0x03
+#define PALMAS_SMPS3_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for SMPS3_VOLTAGE */
+#define PALMAS_SMPS3_VOLTAGE_RANGE 0x80
+#define PALMAS_SMPS3_VOLTAGE_RANGE_SHIFT 0x07
+#define PALMAS_SMPS3_VOLTAGE_VSEL_MASK 0x7F
+#define PALMAS_SMPS3_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for SMPS45_CTRL */
+#define PALMAS_SMPS45_CTRL_WR_S 0x80
+#define PALMAS_SMPS45_CTRL_WR_S_SHIFT 0x07
+#define PALMAS_SMPS45_CTRL_ROOF_FLOOR_EN 0x40
+#define PALMAS_SMPS45_CTRL_ROOF_FLOOR_EN_SHIFT 0x06
+#define PALMAS_SMPS45_CTRL_STATUS_MASK 0x30
+#define PALMAS_SMPS45_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_SMPS45_CTRL_MODE_SLEEP_MASK 0x0c
+#define PALMAS_SMPS45_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_SMPS45_CTRL_MODE_ACTIVE_MASK 0x03
+#define PALMAS_SMPS45_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for SMPS45_TSTEP */
+#define PALMAS_SMPS45_TSTEP_TSTEP_MASK 0x03
+#define PALMAS_SMPS45_TSTEP_TSTEP_SHIFT 0x00
+
+/* Bit definitions for SMPS45_FORCE */
+#define PALMAS_SMPS45_FORCE_CMD 0x80
+#define PALMAS_SMPS45_FORCE_CMD_SHIFT 0x07
+#define PALMAS_SMPS45_FORCE_VSEL_MASK 0x7F
+#define PALMAS_SMPS45_FORCE_VSEL_SHIFT 0x00
+
+/* Bit definitions for SMPS45_VOLTAGE */
+#define PALMAS_SMPS45_VOLTAGE_RANGE 0x80
+#define PALMAS_SMPS45_VOLTAGE_RANGE_SHIFT 0x07
+#define PALMAS_SMPS45_VOLTAGE_VSEL_MASK 0x7F
+#define PALMAS_SMPS45_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for SMPS6_CTRL */
+#define PALMAS_SMPS6_CTRL_WR_S 0x80
+#define PALMAS_SMPS6_CTRL_WR_S_SHIFT 0x07
+#define PALMAS_SMPS6_CTRL_ROOF_FLOOR_EN 0x40
+#define PALMAS_SMPS6_CTRL_ROOF_FLOOR_EN_SHIFT 0x06
+#define PALMAS_SMPS6_CTRL_STATUS_MASK 0x30
+#define PALMAS_SMPS6_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_SMPS6_CTRL_MODE_SLEEP_MASK 0x0c
+#define PALMAS_SMPS6_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_SMPS6_CTRL_MODE_ACTIVE_MASK 0x03
+#define PALMAS_SMPS6_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for SMPS6_TSTEP */
+#define PALMAS_SMPS6_TSTEP_TSTEP_MASK 0x03
+#define PALMAS_SMPS6_TSTEP_TSTEP_SHIFT 0x00
+
+/* Bit definitions for SMPS6_FORCE */
+#define PALMAS_SMPS6_FORCE_CMD 0x80
+#define PALMAS_SMPS6_FORCE_CMD_SHIFT 0x07
+#define PALMAS_SMPS6_FORCE_VSEL_MASK 0x7F
+#define PALMAS_SMPS6_FORCE_VSEL_SHIFT 0x00
+
+/* Bit definitions for SMPS6_VOLTAGE */
+#define PALMAS_SMPS6_VOLTAGE_RANGE 0x80
+#define PALMAS_SMPS6_VOLTAGE_RANGE_SHIFT 0x07
+#define PALMAS_SMPS6_VOLTAGE_VSEL_MASK 0x7F
+#define PALMAS_SMPS6_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for SMPS7_CTRL */
+#define PALMAS_SMPS7_CTRL_WR_S 0x80
+#define PALMAS_SMPS7_CTRL_WR_S_SHIFT 0x07
+#define PALMAS_SMPS7_CTRL_STATUS_MASK 0x30
+#define PALMAS_SMPS7_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_SMPS7_CTRL_MODE_SLEEP_MASK 0x0c
+#define PALMAS_SMPS7_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_SMPS7_CTRL_MODE_ACTIVE_MASK 0x03
+#define PALMAS_SMPS7_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for SMPS7_VOLTAGE */
+#define PALMAS_SMPS7_VOLTAGE_RANGE 0x80
+#define PALMAS_SMPS7_VOLTAGE_RANGE_SHIFT 0x07
+#define PALMAS_SMPS7_VOLTAGE_VSEL_MASK 0x7F
+#define PALMAS_SMPS7_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for SMPS8_CTRL */
+#define PALMAS_SMPS8_CTRL_WR_S 0x80
+#define PALMAS_SMPS8_CTRL_WR_S_SHIFT 0x07
+#define PALMAS_SMPS8_CTRL_ROOF_FLOOR_EN 0x40
+#define PALMAS_SMPS8_CTRL_ROOF_FLOOR_EN_SHIFT 0x06
+#define PALMAS_SMPS8_CTRL_STATUS_MASK 0x30
+#define PALMAS_SMPS8_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_SMPS8_CTRL_MODE_SLEEP_MASK 0x0c
+#define PALMAS_SMPS8_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_SMPS8_CTRL_MODE_ACTIVE_MASK 0x03
+#define PALMAS_SMPS8_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for SMPS8_TSTEP */
+#define PALMAS_SMPS8_TSTEP_TSTEP_MASK 0x03
+#define PALMAS_SMPS8_TSTEP_TSTEP_SHIFT 0x00
+
+/* Bit definitions for SMPS8_FORCE */
+#define PALMAS_SMPS8_FORCE_CMD 0x80
+#define PALMAS_SMPS8_FORCE_CMD_SHIFT 0x07
+#define PALMAS_SMPS8_FORCE_VSEL_MASK 0x7F
+#define PALMAS_SMPS8_FORCE_VSEL_SHIFT 0x00
+
+/* Bit definitions for SMPS8_VOLTAGE */
+#define PALMAS_SMPS8_VOLTAGE_RANGE 0x80
+#define PALMAS_SMPS8_VOLTAGE_RANGE_SHIFT 0x07
+#define PALMAS_SMPS8_VOLTAGE_VSEL_MASK 0x7F
+#define PALMAS_SMPS8_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for SMPS9_CTRL */
+#define PALMAS_SMPS9_CTRL_WR_S 0x80
+#define PALMAS_SMPS9_CTRL_WR_S_SHIFT 0x07
+#define PALMAS_SMPS9_CTRL_STATUS_MASK 0x30
+#define PALMAS_SMPS9_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_SMPS9_CTRL_MODE_SLEEP_MASK 0x0c
+#define PALMAS_SMPS9_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_SMPS9_CTRL_MODE_ACTIVE_MASK 0x03
+#define PALMAS_SMPS9_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for SMPS9_VOLTAGE */
+#define PALMAS_SMPS9_VOLTAGE_RANGE 0x80
+#define PALMAS_SMPS9_VOLTAGE_RANGE_SHIFT 0x07
+#define PALMAS_SMPS9_VOLTAGE_VSEL_MASK 0x7F
+#define PALMAS_SMPS9_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for SMPS10_CTRL */
+#define PALMAS_SMPS10_CTRL_MODE_SLEEP_MASK 0xf0
+#define PALMAS_SMPS10_CTRL_MODE_SLEEP_SHIFT 0x04
+#define PALMAS_SMPS10_CTRL_MODE_ACTIVE_MASK 0x0F
+#define PALMAS_SMPS10_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for SMPS10_STATUS */
+#define PALMAS_SMPS10_STATUS_STATUS_MASK 0x0F
+#define PALMAS_SMPS10_STATUS_STATUS_SHIFT 0x00
+
+/* Bit definitions for SMPS_CTRL */
+#define PALMAS_SMPS_CTRL_SMPS45_SMPS457_EN 0x20
+#define PALMAS_SMPS_CTRL_SMPS45_SMPS457_EN_SHIFT 0x05
+#define PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN 0x10
+#define PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN_SHIFT 0x04
+#define PALMAS_SMPS_CTRL_SMPS45_PHASE_CTRL_MASK 0x0c
+#define PALMAS_SMPS_CTRL_SMPS45_PHASE_CTRL_SHIFT 0x02
+#define PALMAS_SMPS_CTRL_SMPS123_PHASE_CTRL_MASK 0x03
+#define PALMAS_SMPS_CTRL_SMPS123_PHASE_CTRL_SHIFT 0x00
+
+/* Bit definitions for SMPS_PD_CTRL */
+#define PALMAS_SMPS_PD_CTRL_SMPS9 0x40
+#define PALMAS_SMPS_PD_CTRL_SMPS9_SHIFT 0x06
+#define PALMAS_SMPS_PD_CTRL_SMPS8 0x20
+#define PALMAS_SMPS_PD_CTRL_SMPS8_SHIFT 0x05
+#define PALMAS_SMPS_PD_CTRL_SMPS7 0x10
+#define PALMAS_SMPS_PD_CTRL_SMPS7_SHIFT 0x04
+#define PALMAS_SMPS_PD_CTRL_SMPS6 0x08
+#define PALMAS_SMPS_PD_CTRL_SMPS6_SHIFT 0x03
+#define PALMAS_SMPS_PD_CTRL_SMPS45 0x04
+#define PALMAS_SMPS_PD_CTRL_SMPS45_SHIFT 0x02
+#define PALMAS_SMPS_PD_CTRL_SMPS3 0x02
+#define PALMAS_SMPS_PD_CTRL_SMPS3_SHIFT 0x01
+#define PALMAS_SMPS_PD_CTRL_SMPS12 0x01
+#define PALMAS_SMPS_PD_CTRL_SMPS12_SHIFT 0x00
+
+/* Bit definitions for SMPS_THERMAL_EN */
+#define PALMAS_SMPS_THERMAL_EN_SMPS9 0x40
+#define PALMAS_SMPS_THERMAL_EN_SMPS9_SHIFT 0x06
+#define PALMAS_SMPS_THERMAL_EN_SMPS8 0x20
+#define PALMAS_SMPS_THERMAL_EN_SMPS8_SHIFT 0x05
+#define PALMAS_SMPS_THERMAL_EN_SMPS6 0x08
+#define PALMAS_SMPS_THERMAL_EN_SMPS6_SHIFT 0x03
+#define PALMAS_SMPS_THERMAL_EN_SMPS457 0x04
+#define PALMAS_SMPS_THERMAL_EN_SMPS457_SHIFT 0x02
+#define PALMAS_SMPS_THERMAL_EN_SMPS123 0x01
+#define PALMAS_SMPS_THERMAL_EN_SMPS123_SHIFT 0x00
+
+/* Bit definitions for SMPS_THERMAL_STATUS */
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS9 0x40
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS9_SHIFT 0x06
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS8 0x20
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS8_SHIFT 0x05
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS6 0x08
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS6_SHIFT 0x03
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS457 0x04
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS457_SHIFT 0x02
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS123 0x01
+#define PALMAS_SMPS_THERMAL_STATUS_SMPS123_SHIFT 0x00
+
+/* Bit definitions for SMPS_SHORT_STATUS */
+#define PALMAS_SMPS_SHORT_STATUS_SMPS10 0x80
+#define PALMAS_SMPS_SHORT_STATUS_SMPS10_SHIFT 0x07
+#define PALMAS_SMPS_SHORT_STATUS_SMPS9 0x40
+#define PALMAS_SMPS_SHORT_STATUS_SMPS9_SHIFT 0x06
+#define PALMAS_SMPS_SHORT_STATUS_SMPS8 0x20
+#define PALMAS_SMPS_SHORT_STATUS_SMPS8_SHIFT 0x05
+#define PALMAS_SMPS_SHORT_STATUS_SMPS7 0x10
+#define PALMAS_SMPS_SHORT_STATUS_SMPS7_SHIFT 0x04
+#define PALMAS_SMPS_SHORT_STATUS_SMPS6 0x08
+#define PALMAS_SMPS_SHORT_STATUS_SMPS6_SHIFT 0x03
+#define PALMAS_SMPS_SHORT_STATUS_SMPS45 0x04
+#define PALMAS_SMPS_SHORT_STATUS_SMPS45_SHIFT 0x02
+#define PALMAS_SMPS_SHORT_STATUS_SMPS3 0x02
+#define PALMAS_SMPS_SHORT_STATUS_SMPS3_SHIFT 0x01
+#define PALMAS_SMPS_SHORT_STATUS_SMPS12 0x01
+#define PALMAS_SMPS_SHORT_STATUS_SMPS12_SHIFT 0x00
+
+/* Bit definitions for SMPS_NEGATIVE_CURRENT_LIMIT_EN */
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS9 0x40
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS9_SHIFT 0x06
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS8 0x20
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS8_SHIFT 0x05
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS7 0x10
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS7_SHIFT 0x04
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS6 0x08
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS6_SHIFT 0x03
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS45 0x04
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS45_SHIFT 0x02
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS3 0x02
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS3_SHIFT 0x01
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS12 0x01
+#define PALMAS_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS12_SHIFT 0x00
+
+/* Bit definitions for SMPS_POWERGOOD_MASK1 */
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS10 0x80
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS10_SHIFT 0x07
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS9 0x40
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS9_SHIFT 0x06
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS8 0x20
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS8_SHIFT 0x05
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS7 0x10
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS7_SHIFT 0x04
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS6 0x08
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS6_SHIFT 0x03
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS45 0x04
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS45_SHIFT 0x02
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS3 0x02
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS3_SHIFT 0x01
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS12 0x01
+#define PALMAS_SMPS_POWERGOOD_MASK1_SMPS12_SHIFT 0x00
+
+/* Bit definitions for SMPS_POWERGOOD_MASK2 */
+#define PALMAS_SMPS_POWERGOOD_MASK2_POWERGOOD_TYPE_SELECT 0x80
+#define PALMAS_SMPS_POWERGOOD_MASK2_POWERGOOD_TYPE_SELECT_SHIFT 0x07
+#define PALMAS_SMPS_POWERGOOD_MASK2_GPIO_7 0x04
+#define PALMAS_SMPS_POWERGOOD_MASK2_GPIO_7_SHIFT 0x02
+#define PALMAS_SMPS_POWERGOOD_MASK2_VBUS 0x02
+#define PALMAS_SMPS_POWERGOOD_MASK2_VBUS_SHIFT 0x01
+#define PALMAS_SMPS_POWERGOOD_MASK2_ACOK 0x01
+#define PALMAS_SMPS_POWERGOOD_MASK2_ACOK_SHIFT 0x00
+
+/* Registers for function LDO */
+#define PALMAS_LDO1_CTRL 0x00
+#define PALMAS_LDO1_VOLTAGE 0x01
+#define PALMAS_LDO2_CTRL 0x02
+#define PALMAS_LDO2_VOLTAGE 0x03
+#define PALMAS_LDO3_CTRL 0x04
+#define PALMAS_LDO3_VOLTAGE 0x05
+#define PALMAS_LDO4_CTRL 0x06
+#define PALMAS_LDO4_VOLTAGE 0x07
+#define PALMAS_LDO5_CTRL 0x08
+#define PALMAS_LDO5_VOLTAGE 0x09
+#define PALMAS_LDO6_CTRL 0x0A
+#define PALMAS_LDO6_VOLTAGE 0x0B
+#define PALMAS_LDO7_CTRL 0x0C
+#define PALMAS_LDO7_VOLTAGE 0x0D
+#define PALMAS_LDO8_CTRL 0x0E
+#define PALMAS_LDO8_VOLTAGE 0x0F
+#define PALMAS_LDO9_CTRL 0x10
+#define PALMAS_LDO9_VOLTAGE 0x11
+#define PALMAS_LDOLN_CTRL 0x12
+#define PALMAS_LDOLN_VOLTAGE 0x13
+#define PALMAS_LDOUSB_CTRL 0x14
+#define PALMAS_LDOUSB_VOLTAGE 0x15
+#define PALMAS_LDO_CTRL 0x1A
+#define PALMAS_LDO_PD_CTRL1 0x1B
+#define PALMAS_LDO_PD_CTRL2 0x1C
+#define PALMAS_LDO_SHORT_STATUS1 0x1D
+#define PALMAS_LDO_SHORT_STATUS2 0x1E
+
+/* Bit definitions for LDO1_CTRL */
+#define PALMAS_LDO1_CTRL_WR_S 0x80
+#define PALMAS_LDO1_CTRL_WR_S_SHIFT 0x07
+#define PALMAS_LDO1_CTRL_STATUS 0x10
+#define PALMAS_LDO1_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_LDO1_CTRL_MODE_SLEEP 0x04
+#define PALMAS_LDO1_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_LDO1_CTRL_MODE_ACTIVE 0x01
+#define PALMAS_LDO1_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for LDO1_VOLTAGE */
+#define PALMAS_LDO1_VOLTAGE_VSEL_MASK 0x3F
+#define PALMAS_LDO1_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for LDO2_CTRL */
+#define PALMAS_LDO2_CTRL_WR_S 0x80
+#define PALMAS_LDO2_CTRL_WR_S_SHIFT 0x07
+#define PALMAS_LDO2_CTRL_STATUS 0x10
+#define PALMAS_LDO2_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_LDO2_CTRL_MODE_SLEEP 0x04
+#define PALMAS_LDO2_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_LDO2_CTRL_MODE_ACTIVE 0x01
+#define PALMAS_LDO2_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for LDO2_VOLTAGE */
+#define PALMAS_LDO2_VOLTAGE_VSEL_MASK 0x3F
+#define PALMAS_LDO2_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for LDO3_CTRL */
+#define PALMAS_LDO3_CTRL_WR_S 0x80
+#define PALMAS_LDO3_CTRL_WR_S_SHIFT 0x07
+#define PALMAS_LDO3_CTRL_STATUS 0x10
+#define PALMAS_LDO3_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_LDO3_CTRL_MODE_SLEEP 0x04
+#define PALMAS_LDO3_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_LDO3_CTRL_MODE_ACTIVE 0x01
+#define PALMAS_LDO3_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for LDO3_VOLTAGE */
+#define PALMAS_LDO3_VOLTAGE_VSEL_MASK 0x3F
+#define PALMAS_LDO3_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for LDO4_CTRL */
+#define PALMAS_LDO4_CTRL_WR_S 0x80
+#define PALMAS_LDO4_CTRL_WR_S_SHIFT 0x07
+#define PALMAS_LDO4_CTRL_STATUS 0x10
+#define PALMAS_LDO4_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_LDO4_CTRL_MODE_SLEEP 0x04
+#define PALMAS_LDO4_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_LDO4_CTRL_MODE_ACTIVE 0x01
+#define PALMAS_LDO4_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for LDO4_VOLTAGE */
+#define PALMAS_LDO4_VOLTAGE_VSEL_MASK 0x3F
+#define PALMAS_LDO4_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for LDO5_CTRL */
+#define PALMAS_LDO5_CTRL_WR_S 0x80
+#define PALMAS_LDO5_CTRL_WR_S_SHIFT 0x07
+#define PALMAS_LDO5_CTRL_STATUS 0x10
+#define PALMAS_LDO5_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_LDO5_CTRL_MODE_SLEEP 0x04
+#define PALMAS_LDO5_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_LDO5_CTRL_MODE_ACTIVE 0x01
+#define PALMAS_LDO5_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for LDO5_VOLTAGE */
+#define PALMAS_LDO5_VOLTAGE_VSEL_MASK 0x3F
+#define PALMAS_LDO5_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for LDO6_CTRL */
+#define PALMAS_LDO6_CTRL_WR_S 0x80
+#define PALMAS_LDO6_CTRL_WR_S_SHIFT 0x07
+#define PALMAS_LDO6_CTRL_LDO_VIB_EN 0x40
+#define PALMAS_LDO6_CTRL_LDO_VIB_EN_SHIFT 0x06
+#define PALMAS_LDO6_CTRL_STATUS 0x10
+#define PALMAS_LDO6_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_LDO6_CTRL_MODE_SLEEP 0x04
+#define PALMAS_LDO6_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_LDO6_CTRL_MODE_ACTIVE 0x01
+#define PALMAS_LDO6_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for LDO6_VOLTAGE */
+#define PALMAS_LDO6_VOLTAGE_VSEL_MASK 0x3F
+#define PALMAS_LDO6_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for LDO7_CTRL */
+#define PALMAS_LDO7_CTRL_WR_S 0x80
+#define PALMAS_LDO7_CTRL_WR_S_SHIFT 0x07
+#define PALMAS_LDO7_CTRL_STATUS 0x10
+#define PALMAS_LDO7_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_LDO7_CTRL_MODE_SLEEP 0x04
+#define PALMAS_LDO7_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_LDO7_CTRL_MODE_ACTIVE 0x01
+#define PALMAS_LDO7_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for LDO7_VOLTAGE */
+#define PALMAS_LDO7_VOLTAGE_VSEL_MASK 0x3F
+#define PALMAS_LDO7_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for LDO8_CTRL */
+#define PALMAS_LDO8_CTRL_WR_S 0x80
+#define PALMAS_LDO8_CTRL_WR_S_SHIFT 0x07
+#define PALMAS_LDO8_CTRL_LDO_TRACKING_EN 0x40
+#define PALMAS_LDO8_CTRL_LDO_TRACKING_EN_SHIFT 0x06
+#define PALMAS_LDO8_CTRL_STATUS 0x10
+#define PALMAS_LDO8_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_LDO8_CTRL_MODE_SLEEP 0x04
+#define PALMAS_LDO8_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_LDO8_CTRL_MODE_ACTIVE 0x01
+#define PALMAS_LDO8_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for LDO8_VOLTAGE */
+#define PALMAS_LDO8_VOLTAGE_VSEL_MASK 0x3F
+#define PALMAS_LDO8_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for LDO9_CTRL */
+#define PALMAS_LDO9_CTRL_WR_S 0x80
+#define PALMAS_LDO9_CTRL_WR_S_SHIFT 0x07
+#define PALMAS_LDO9_CTRL_LDO_BYPASS_EN 0x40
+#define PALMAS_LDO9_CTRL_LDO_BYPASS_EN_SHIFT 0x06
+#define PALMAS_LDO9_CTRL_STATUS 0x10
+#define PALMAS_LDO9_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_LDO9_CTRL_MODE_SLEEP 0x04
+#define PALMAS_LDO9_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_LDO9_CTRL_MODE_ACTIVE 0x01
+#define PALMAS_LDO9_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for LDO9_VOLTAGE */
+#define PALMAS_LDO9_VOLTAGE_VSEL_MASK 0x3F
+#define PALMAS_LDO9_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for LDOLN_CTRL */
+#define PALMAS_LDOLN_CTRL_WR_S 0x80
+#define PALMAS_LDOLN_CTRL_WR_S_SHIFT 0x07
+#define PALMAS_LDOLN_CTRL_STATUS 0x10
+#define PALMAS_LDOLN_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_LDOLN_CTRL_MODE_SLEEP 0x04
+#define PALMAS_LDOLN_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_LDOLN_CTRL_MODE_ACTIVE 0x01
+#define PALMAS_LDOLN_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for LDOLN_VOLTAGE */
+#define PALMAS_LDOLN_VOLTAGE_VSEL_MASK 0x3F
+#define PALMAS_LDOLN_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for LDOUSB_CTRL */
+#define PALMAS_LDOUSB_CTRL_WR_S 0x80
+#define PALMAS_LDOUSB_CTRL_WR_S_SHIFT 0x07
+#define PALMAS_LDOUSB_CTRL_STATUS 0x10
+#define PALMAS_LDOUSB_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_LDOUSB_CTRL_MODE_SLEEP 0x04
+#define PALMAS_LDOUSB_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_LDOUSB_CTRL_MODE_ACTIVE 0x01
+#define PALMAS_LDOUSB_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for LDOUSB_VOLTAGE */
+#define PALMAS_LDOUSB_VOLTAGE_VSEL_MASK 0x3F
+#define PALMAS_LDOUSB_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for LDO_CTRL */
+#define PALMAS_LDO_CTRL_LDOUSB_ON_VBUS_VSYS 0x01
+#define PALMAS_LDO_CTRL_LDOUSB_ON_VBUS_VSYS_SHIFT 0x00
+
+/* Bit definitions for LDO_PD_CTRL1 */
+#define PALMAS_LDO_PD_CTRL1_LDO8 0x80
+#define PALMAS_LDO_PD_CTRL1_LDO8_SHIFT 0x07
+#define PALMAS_LDO_PD_CTRL1_LDO7 0x40
+#define PALMAS_LDO_PD_CTRL1_LDO7_SHIFT 0x06
+#define PALMAS_LDO_PD_CTRL1_LDO6 0x20
+#define PALMAS_LDO_PD_CTRL1_LDO6_SHIFT 0x05
+#define PALMAS_LDO_PD_CTRL1_LDO5 0x10
+#define PALMAS_LDO_PD_CTRL1_LDO5_SHIFT 0x04
+#define PALMAS_LDO_PD_CTRL1_LDO4 0x08
+#define PALMAS_LDO_PD_CTRL1_LDO4_SHIFT 0x03
+#define PALMAS_LDO_PD_CTRL1_LDO3 0x04
+#define PALMAS_LDO_PD_CTRL1_LDO3_SHIFT 0x02
+#define PALMAS_LDO_PD_CTRL1_LDO2 0x02
+#define PALMAS_LDO_PD_CTRL1_LDO2_SHIFT 0x01
+#define PALMAS_LDO_PD_CTRL1_LDO1 0x01
+#define PALMAS_LDO_PD_CTRL1_LDO1_SHIFT 0x00
+
+/* Bit definitions for LDO_PD_CTRL2 */
+#define PALMAS_LDO_PD_CTRL2_LDOUSB 0x04
+#define PALMAS_LDO_PD_CTRL2_LDOUSB_SHIFT 0x02
+#define PALMAS_LDO_PD_CTRL2_LDOLN 0x02
+#define PALMAS_LDO_PD_CTRL2_LDOLN_SHIFT 0x01
+#define PALMAS_LDO_PD_CTRL2_LDO9 0x01
+#define PALMAS_LDO_PD_CTRL2_LDO9_SHIFT 0x00
+
+/* Bit definitions for LDO_SHORT_STATUS1 */
+#define PALMAS_LDO_SHORT_STATUS1_LDO8 0x80
+#define PALMAS_LDO_SHORT_STATUS1_LDO8_SHIFT 0x07
+#define PALMAS_LDO_SHORT_STATUS1_LDO7 0x40
+#define PALMAS_LDO_SHORT_STATUS1_LDO7_SHIFT 0x06
+#define PALMAS_LDO_SHORT_STATUS1_LDO6 0x20
+#define PALMAS_LDO_SHORT_STATUS1_LDO6_SHIFT 0x05
+#define PALMAS_LDO_SHORT_STATUS1_LDO5 0x10
+#define PALMAS_LDO_SHORT_STATUS1_LDO5_SHIFT 0x04
+#define PALMAS_LDO_SHORT_STATUS1_LDO4 0x08
+#define PALMAS_LDO_SHORT_STATUS1_LDO4_SHIFT 0x03
+#define PALMAS_LDO_SHORT_STATUS1_LDO3 0x04
+#define PALMAS_LDO_SHORT_STATUS1_LDO3_SHIFT 0x02
+#define PALMAS_LDO_SHORT_STATUS1_LDO2 0x02
+#define PALMAS_LDO_SHORT_STATUS1_LDO2_SHIFT 0x01
+#define PALMAS_LDO_SHORT_STATUS1_LDO1 0x01
+#define PALMAS_LDO_SHORT_STATUS1_LDO1_SHIFT 0x00
+
+/* Bit definitions for LDO_SHORT_STATUS2 */
+#define PALMAS_LDO_SHORT_STATUS2_LDOVANA 0x08
+#define PALMAS_LDO_SHORT_STATUS2_LDOVANA_SHIFT 0x03
+#define PALMAS_LDO_SHORT_STATUS2_LDOUSB 0x04
+#define PALMAS_LDO_SHORT_STATUS2_LDOUSB_SHIFT 0x02
+#define PALMAS_LDO_SHORT_STATUS2_LDOLN 0x02
+#define PALMAS_LDO_SHORT_STATUS2_LDOLN_SHIFT 0x01
+#define PALMAS_LDO_SHORT_STATUS2_LDO9 0x01
+#define PALMAS_LDO_SHORT_STATUS2_LDO9_SHIFT 0x00
+
+/* Registers for function PMU_CONTROL */
+#define PALMAS_DEV_CTRL 0x00
+#define PALMAS_POWER_CTRL 0x01
+#define PALMAS_VSYS_LO 0x02
+#define PALMAS_VSYS_MON 0x03
+#define PALMAS_VBAT_MON 0x04
+#define PALMAS_WATCHDOG 0x05
+#define PALMAS_BOOT_STATUS 0x06
+#define PALMAS_BATTERY_BOUNCE 0x07
+#define PALMAS_BACKUP_BATTERY_CTRL 0x08
+#define PALMAS_LONG_PRESS_KEY 0x09
+#define PALMAS_OSC_THERM_CTRL 0x0A
+#define PALMAS_BATDEBOUNCING 0x0B
+#define PALMAS_SWOFF_HWRST 0x0F
+#define PALMAS_SWOFF_COLDRST 0x10
+#define PALMAS_SWOFF_STATUS 0x11
+#define PALMAS_PMU_CONFIG 0x12
+#define PALMAS_SPARE 0x14
+#define PALMAS_PMU_SECONDARY_INT 0x15
+#define PALMAS_SW_REVISION 0x17
+#define PALMAS_EXT_CHRG_CTRL 0x18
+#define PALMAS_PMU_SECONDARY_INT2 0x19
+
+/* Bit definitions for DEV_CTRL */
+#define PALMAS_DEV_CTRL_DEV_STATUS_MASK 0x0c
+#define PALMAS_DEV_CTRL_DEV_STATUS_SHIFT 0x02
+#define PALMAS_DEV_CTRL_SW_RST 0x02
+#define PALMAS_DEV_CTRL_SW_RST_SHIFT 0x01
+#define PALMAS_DEV_CTRL_DEV_ON 0x01
+#define PALMAS_DEV_CTRL_DEV_ON_SHIFT 0x00
+
+/* Bit definitions for POWER_CTRL */
+#define PALMAS_POWER_CTRL_ENABLE2_MASK 0x04
+#define PALMAS_POWER_CTRL_ENABLE2_MASK_SHIFT 0x02
+#define PALMAS_POWER_CTRL_ENABLE1_MASK 0x02
+#define PALMAS_POWER_CTRL_ENABLE1_MASK_SHIFT 0x01
+#define PALMAS_POWER_CTRL_NSLEEP_MASK 0x01
+#define PALMAS_POWER_CTRL_NSLEEP_MASK_SHIFT 0x00
+
+/* Bit definitions for VSYS_LO */
+#define PALMAS_VSYS_LO_THRESHOLD_MASK 0x1F
+#define PALMAS_VSYS_LO_THRESHOLD_SHIFT 0x00
+
+/* Bit definitions for VSYS_MON */
+#define PALMAS_VSYS_MON_ENABLE 0x80
+#define PALMAS_VSYS_MON_ENABLE_SHIFT 0x07
+#define PALMAS_VSYS_MON_THRESHOLD_MASK 0x3F
+#define PALMAS_VSYS_MON_THRESHOLD_SHIFT 0x00
+
+/* Bit definitions for VBAT_MON */
+#define PALMAS_VBAT_MON_ENABLE 0x80
+#define PALMAS_VBAT_MON_ENABLE_SHIFT 0x07
+#define PALMAS_VBAT_MON_THRESHOLD_MASK 0x3F
+#define PALMAS_VBAT_MON_THRESHOLD_SHIFT 0x00
+
+/* Bit definitions for WATCHDOG */
+#define PALMAS_WATCHDOG_LOCK 0x20
+#define PALMAS_WATCHDOG_LOCK_SHIFT 0x05
+#define PALMAS_WATCHDOG_ENABLE 0x10
+#define PALMAS_WATCHDOG_ENABLE_SHIFT 0x04
+#define PALMAS_WATCHDOG_MODE 0x08
+#define PALMAS_WATCHDOG_MODE_SHIFT 0x03
+#define PALMAS_WATCHDOG_TIMER_MASK 0x07
+#define PALMAS_WATCHDOG_TIMER_SHIFT 0x00
+
+/* Bit definitions for BOOT_STATUS */
+#define PALMAS_BOOT_STATUS_BOOT1 0x02
+#define PALMAS_BOOT_STATUS_BOOT1_SHIFT 0x01
+#define PALMAS_BOOT_STATUS_BOOT0 0x01
+#define PALMAS_BOOT_STATUS_BOOT0_SHIFT 0x00
+
+/* Bit definitions for BATTERY_BOUNCE */
+#define PALMAS_BATTERY_BOUNCE_BB_DELAY_MASK 0x3F
+#define PALMAS_BATTERY_BOUNCE_BB_DELAY_SHIFT 0x00
+
+/* Bit definitions for BACKUP_BATTERY_CTRL */
+#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_18_15 0x80
+#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_18_15_SHIFT 0x07
+#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_EN_SLP 0x40
+#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_EN_SLP_SHIFT 0x06
+#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_EN_OFF 0x20
+#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_EN_OFF_SHIFT 0x05
+#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_PWEN 0x10
+#define PALMAS_BACKUP_BATTERY_CTRL_VRTC_PWEN_SHIFT 0x04
+#define PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG 0x08
+#define PALMAS_BACKUP_BATTERY_CTRL_BBS_BBC_LOW_ICHRG_SHIFT 0x03
+#define PALMAS_BACKUP_BATTERY_CTRL_BB_SEL_MASK 0x06
+#define PALMAS_BACKUP_BATTERY_CTRL_BB_SEL_SHIFT 0x01
+#define PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN 0x01
+#define PALMAS_BACKUP_BATTERY_CTRL_BB_CHG_EN_SHIFT 0x00
+
+/* Bit definitions for LONG_PRESS_KEY */
+#define PALMAS_LONG_PRESS_KEY_LPK_LOCK 0x80
+#define PALMAS_LONG_PRESS_KEY_LPK_LOCK_SHIFT 0x07
+#define PALMAS_LONG_PRESS_KEY_LPK_INT_CLR 0x10
+#define PALMAS_LONG_PRESS_KEY_LPK_INT_CLR_SHIFT 0x04
+#define PALMAS_LONG_PRESS_KEY_LPK_TIME_MASK 0x0c
+#define PALMAS_LONG_PRESS_KEY_LPK_TIME_SHIFT 0x02
+#define PALMAS_LONG_PRESS_KEY_PWRON_DEBOUNCE_MASK 0x03
+#define PALMAS_LONG_PRESS_KEY_PWRON_DEBOUNCE_SHIFT 0x00
+
+/* Bit definitions for OSC_THERM_CTRL */
+#define PALMAS_OSC_THERM_CTRL_VANA_ON_IN_SLEEP 0x80
+#define PALMAS_OSC_THERM_CTRL_VANA_ON_IN_SLEEP_SHIFT 0x07
+#define PALMAS_OSC_THERM_CTRL_INT_MASK_IN_SLEEP 0x40
+#define PALMAS_OSC_THERM_CTRL_INT_MASK_IN_SLEEP_SHIFT 0x06
+#define PALMAS_OSC_THERM_CTRL_RC15MHZ_ON_IN_SLEEP 0x20
+#define PALMAS_OSC_THERM_CTRL_RC15MHZ_ON_IN_SLEEP_SHIFT 0x05
+#define PALMAS_OSC_THERM_CTRL_THERM_OFF_IN_SLEEP 0x10
+#define PALMAS_OSC_THERM_CTRL_THERM_OFF_IN_SLEEP_SHIFT 0x04
+#define PALMAS_OSC_THERM_CTRL_THERM_HD_SEL_MASK 0x0c
+#define PALMAS_OSC_THERM_CTRL_THERM_HD_SEL_SHIFT 0x02
+#define PALMAS_OSC_THERM_CTRL_OSC_BYPASS 0x02
+#define PALMAS_OSC_THERM_CTRL_OSC_BYPASS_SHIFT 0x01
+#define PALMAS_OSC_THERM_CTRL_OSC_HPMODE 0x01
+#define PALMAS_OSC_THERM_CTRL_OSC_HPMODE_SHIFT 0x00
+
+/* Bit definitions for BATDEBOUNCING */
+#define PALMAS_BATDEBOUNCING_BAT_DEB_BYPASS 0x80
+#define PALMAS_BATDEBOUNCING_BAT_DEB_BYPASS_SHIFT 0x07
+#define PALMAS_BATDEBOUNCING_BINS_DEB_MASK 0x78
+#define PALMAS_BATDEBOUNCING_BINS_DEB_SHIFT 0x03
+#define PALMAS_BATDEBOUNCING_BEXT_DEB_MASK 0x07
+#define PALMAS_BATDEBOUNCING_BEXT_DEB_SHIFT 0x00
+
+/* Bit definitions for SWOFF_HWRST */
+#define PALMAS_SWOFF_HWRST_PWRON_LPK 0x80
+#define PALMAS_SWOFF_HWRST_PWRON_LPK_SHIFT 0x07
+#define PALMAS_SWOFF_HWRST_PWRDOWN 0x40
+#define PALMAS_SWOFF_HWRST_PWRDOWN_SHIFT 0x06
+#define PALMAS_SWOFF_HWRST_WTD 0x20
+#define PALMAS_SWOFF_HWRST_WTD_SHIFT 0x05
+#define PALMAS_SWOFF_HWRST_TSHUT 0x10
+#define PALMAS_SWOFF_HWRST_TSHUT_SHIFT 0x04
+#define PALMAS_SWOFF_HWRST_RESET_IN 0x08
+#define PALMAS_SWOFF_HWRST_RESET_IN_SHIFT 0x03
+#define PALMAS_SWOFF_HWRST_SW_RST 0x04
+#define PALMAS_SWOFF_HWRST_SW_RST_SHIFT 0x02
+#define PALMAS_SWOFF_HWRST_VSYS_LO 0x02
+#define PALMAS_SWOFF_HWRST_VSYS_LO_SHIFT 0x01
+#define PALMAS_SWOFF_HWRST_GPADC_SHUTDOWN 0x01
+#define PALMAS_SWOFF_HWRST_GPADC_SHUTDOWN_SHIFT 0x00
+
+/* Bit definitions for SWOFF_COLDRST */
+#define PALMAS_SWOFF_COLDRST_PWRON_LPK 0x80
+#define PALMAS_SWOFF_COLDRST_PWRON_LPK_SHIFT 0x07
+#define PALMAS_SWOFF_COLDRST_PWRDOWN 0x40
+#define PALMAS_SWOFF_COLDRST_PWRDOWN_SHIFT 0x06
+#define PALMAS_SWOFF_COLDRST_WTD 0x20
+#define PALMAS_SWOFF_COLDRST_WTD_SHIFT 0x05
+#define PALMAS_SWOFF_COLDRST_TSHUT 0x10
+#define PALMAS_SWOFF_COLDRST_TSHUT_SHIFT 0x04
+#define PALMAS_SWOFF_COLDRST_RESET_IN 0x08
+#define PALMAS_SWOFF_COLDRST_RESET_IN_SHIFT 0x03
+#define PALMAS_SWOFF_COLDRST_SW_RST 0x04
+#define PALMAS_SWOFF_COLDRST_SW_RST_SHIFT 0x02
+#define PALMAS_SWOFF_COLDRST_VSYS_LO 0x02
+#define PALMAS_SWOFF_COLDRST_VSYS_LO_SHIFT 0x01
+#define PALMAS_SWOFF_COLDRST_GPADC_SHUTDOWN 0x01
+#define PALMAS_SWOFF_COLDRST_GPADC_SHUTDOWN_SHIFT 0x00
+
+/* Bit definitions for SWOFF_STATUS */
+#define PALMAS_SWOFF_STATUS_PWRON_LPK 0x80
+#define PALMAS_SWOFF_STATUS_PWRON_LPK_SHIFT 0x07
+#define PALMAS_SWOFF_STATUS_PWRDOWN 0x40
+#define PALMAS_SWOFF_STATUS_PWRDOWN_SHIFT 0x06
+#define PALMAS_SWOFF_STATUS_WTD 0x20
+#define PALMAS_SWOFF_STATUS_WTD_SHIFT 0x05
+#define PALMAS_SWOFF_STATUS_TSHUT 0x10
+#define PALMAS_SWOFF_STATUS_TSHUT_SHIFT 0x04
+#define PALMAS_SWOFF_STATUS_RESET_IN 0x08
+#define PALMAS_SWOFF_STATUS_RESET_IN_SHIFT 0x03
+#define PALMAS_SWOFF_STATUS_SW_RST 0x04
+#define PALMAS_SWOFF_STATUS_SW_RST_SHIFT 0x02
+#define PALMAS_SWOFF_STATUS_VSYS_LO 0x02
+#define PALMAS_SWOFF_STATUS_VSYS_LO_SHIFT 0x01
+#define PALMAS_SWOFF_STATUS_GPADC_SHUTDOWN 0x01
+#define PALMAS_SWOFF_STATUS_GPADC_SHUTDOWN_SHIFT 0x00
+
+/* Bit definitions for PMU_CONFIG */
+#define PALMAS_PMU_CONFIG_MULTI_CELL_EN 0x40
+#define PALMAS_PMU_CONFIG_MULTI_CELL_EN_SHIFT 0x06
+#define PALMAS_PMU_CONFIG_SPARE_MASK 0x30
+#define PALMAS_PMU_CONFIG_SPARE_SHIFT 0x04
+#define PALMAS_PMU_CONFIG_SWOFF_DLY_MASK 0x0c
+#define PALMAS_PMU_CONFIG_SWOFF_DLY_SHIFT 0x02
+#define PALMAS_PMU_CONFIG_GATE_RESET_OUT 0x02
+#define PALMAS_PMU_CONFIG_GATE_RESET_OUT_SHIFT 0x01
+#define PALMAS_PMU_CONFIG_AUTODEVON 0x01
+#define PALMAS_PMU_CONFIG_AUTODEVON_SHIFT 0x00
+
+/* Bit definitions for SPARE */
+#define PALMAS_SPARE_SPARE_MASK 0xf8
+#define PALMAS_SPARE_SPARE_SHIFT 0x03
+#define PALMAS_SPARE_REGEN3_OD 0x04
+#define PALMAS_SPARE_REGEN3_OD_SHIFT 0x02
+#define PALMAS_SPARE_REGEN2_OD 0x02
+#define PALMAS_SPARE_REGEN2_OD_SHIFT 0x01
+#define PALMAS_SPARE_REGEN1_OD 0x01
+#define PALMAS_SPARE_REGEN1_OD_SHIFT 0x00
+
+/* Bit definitions for PMU_SECONDARY_INT */
+#define PALMAS_PMU_SECONDARY_INT_VBUS_OVV_INT_SRC 0x80
+#define PALMAS_PMU_SECONDARY_INT_VBUS_OVV_INT_SRC_SHIFT 0x07
+#define PALMAS_PMU_SECONDARY_INT_CHARG_DET_N_INT_SRC 0x40
+#define PALMAS_PMU_SECONDARY_INT_CHARG_DET_N_INT_SRC_SHIFT 0x06
+#define PALMAS_PMU_SECONDARY_INT_BB_INT_SRC 0x20
+#define PALMAS_PMU_SECONDARY_INT_BB_INT_SRC_SHIFT 0x05
+#define PALMAS_PMU_SECONDARY_INT_FBI_INT_SRC 0x10
+#define PALMAS_PMU_SECONDARY_INT_FBI_INT_SRC_SHIFT 0x04
+#define PALMAS_PMU_SECONDARY_INT_VBUS_OVV_MASK 0x08
+#define PALMAS_PMU_SECONDARY_INT_VBUS_OVV_MASK_SHIFT 0x03
+#define PALMAS_PMU_SECONDARY_INT_CHARG_DET_N_MASK 0x04
+#define PALMAS_PMU_SECONDARY_INT_CHARG_DET_N_MASK_SHIFT 0x02
+#define PALMAS_PMU_SECONDARY_INT_BB_MASK 0x02
+#define PALMAS_PMU_SECONDARY_INT_BB_MASK_SHIFT 0x01
+#define PALMAS_PMU_SECONDARY_INT_FBI_MASK 0x01
+#define PALMAS_PMU_SECONDARY_INT_FBI_MASK_SHIFT 0x00
+
+/* Bit definitions for SW_REVISION */
+#define PALMAS_SW_REVISION_SW_REVISION_MASK 0xFF
+#define PALMAS_SW_REVISION_SW_REVISION_SHIFT 0x00
+
+/* Bit definitions for EXT_CHRG_CTRL */
+#define PALMAS_EXT_CHRG_CTRL_VBUS_OVV_STATUS 0x80
+#define PALMAS_EXT_CHRG_CTRL_VBUS_OVV_STATUS_SHIFT 0x07
+#define PALMAS_EXT_CHRG_CTRL_CHARG_DET_N_STATUS 0x40
+#define PALMAS_EXT_CHRG_CTRL_CHARG_DET_N_STATUS_SHIFT 0x06
+#define PALMAS_EXT_CHRG_CTRL_VSYS_DEBOUNCE_DELAY 0x08
+#define PALMAS_EXT_CHRG_CTRL_VSYS_DEBOUNCE_DELAY_SHIFT 0x03
+#define PALMAS_EXT_CHRG_CTRL_CHRG_DET_N 0x04
+#define PALMAS_EXT_CHRG_CTRL_CHRG_DET_N_SHIFT 0x02
+#define PALMAS_EXT_CHRG_CTRL_AUTO_ACA_EN 0x02
+#define PALMAS_EXT_CHRG_CTRL_AUTO_ACA_EN_SHIFT 0x01
+#define PALMAS_EXT_CHRG_CTRL_AUTO_LDOUSB_EN 0x01
+#define PALMAS_EXT_CHRG_CTRL_AUTO_LDOUSB_EN_SHIFT 0x00
+
+/* Bit definitions for PMU_SECONDARY_INT2 */
+#define PALMAS_PMU_SECONDARY_INT2_DVFS2_INT_SRC 0x20
+#define PALMAS_PMU_SECONDARY_INT2_DVFS2_INT_SRC_SHIFT 0x05
+#define PALMAS_PMU_SECONDARY_INT2_DVFS1_INT_SRC 0x10
+#define PALMAS_PMU_SECONDARY_INT2_DVFS1_INT_SRC_SHIFT 0x04
+#define PALMAS_PMU_SECONDARY_INT2_DVFS2_MASK 0x02
+#define PALMAS_PMU_SECONDARY_INT2_DVFS2_MASK_SHIFT 0x01
+#define PALMAS_PMU_SECONDARY_INT2_DVFS1_MASK 0x01
+#define PALMAS_PMU_SECONDARY_INT2_DVFS1_MASK_SHIFT 0x00
+
+/* Registers for function RESOURCE */
+#define PALMAS_CLK32KG_CTRL 0x00
+#define PALMAS_CLK32KGAUDIO_CTRL 0x01
+#define PALMAS_REGEN1_CTRL 0x02
+#define PALMAS_REGEN2_CTRL 0x03
+#define PALMAS_SYSEN1_CTRL 0x04
+#define PALMAS_SYSEN2_CTRL 0x05
+#define PALMAS_NSLEEP_RES_ASSIGN 0x06
+#define PALMAS_NSLEEP_SMPS_ASSIGN 0x07
+#define PALMAS_NSLEEP_LDO_ASSIGN1 0x08
+#define PALMAS_NSLEEP_LDO_ASSIGN2 0x09
+#define PALMAS_ENABLE1_RES_ASSIGN 0x0A
+#define PALMAS_ENABLE1_SMPS_ASSIGN 0x0B
+#define PALMAS_ENABLE1_LDO_ASSIGN1 0x0C
+#define PALMAS_ENABLE1_LDO_ASSIGN2 0x0D
+#define PALMAS_ENABLE2_RES_ASSIGN 0x0E
+#define PALMAS_ENABLE2_SMPS_ASSIGN 0x0F
+#define PALMAS_ENABLE2_LDO_ASSIGN1 0x10
+#define PALMAS_ENABLE2_LDO_ASSIGN2 0x11
+#define PALMAS_REGEN3_CTRL 0x12
+
+/* Bit definitions for CLK32KG_CTRL */
+#define PALMAS_CLK32KG_CTRL_STATUS 0x10
+#define PALMAS_CLK32KG_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_CLK32KG_CTRL_MODE_SLEEP 0x04
+#define PALMAS_CLK32KG_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_CLK32KG_CTRL_MODE_ACTIVE 0x01
+#define PALMAS_CLK32KG_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for CLK32KGAUDIO_CTRL */
+#define PALMAS_CLK32KGAUDIO_CTRL_STATUS 0x10
+#define PALMAS_CLK32KGAUDIO_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_CLK32KGAUDIO_CTRL_RESERVED3 0x08
+#define PALMAS_CLK32KGAUDIO_CTRL_RESERVED3_SHIFT 0x03
+#define PALMAS_CLK32KGAUDIO_CTRL_MODE_SLEEP 0x04
+#define PALMAS_CLK32KGAUDIO_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_CLK32KGAUDIO_CTRL_MODE_ACTIVE 0x01
+#define PALMAS_CLK32KGAUDIO_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for REGEN1_CTRL */
+#define PALMAS_REGEN1_CTRL_STATUS 0x10
+#define PALMAS_REGEN1_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_REGEN1_CTRL_MODE_SLEEP 0x04
+#define PALMAS_REGEN1_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_REGEN1_CTRL_MODE_ACTIVE 0x01
+#define PALMAS_REGEN1_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for REGEN2_CTRL */
+#define PALMAS_REGEN2_CTRL_STATUS 0x10
+#define PALMAS_REGEN2_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_REGEN2_CTRL_MODE_SLEEP 0x04
+#define PALMAS_REGEN2_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_REGEN2_CTRL_MODE_ACTIVE 0x01
+#define PALMAS_REGEN2_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for SYSEN1_CTRL */
+#define PALMAS_SYSEN1_CTRL_STATUS 0x10
+#define PALMAS_SYSEN1_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_SYSEN1_CTRL_MODE_SLEEP 0x04
+#define PALMAS_SYSEN1_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_SYSEN1_CTRL_MODE_ACTIVE 0x01
+#define PALMAS_SYSEN1_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for SYSEN2_CTRL */
+#define PALMAS_SYSEN2_CTRL_STATUS 0x10
+#define PALMAS_SYSEN2_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_SYSEN2_CTRL_MODE_SLEEP 0x04
+#define PALMAS_SYSEN2_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_SYSEN2_CTRL_MODE_ACTIVE 0x01
+#define PALMAS_SYSEN2_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for NSLEEP_RES_ASSIGN */
+#define PALMAS_NSLEEP_RES_ASSIGN_REGEN3 0x40
+#define PALMAS_NSLEEP_RES_ASSIGN_REGEN3_SHIFT 0x06
+#define PALMAS_NSLEEP_RES_ASSIGN_CLK32KGAUDIO 0x20
+#define PALMAS_NSLEEP_RES_ASSIGN_CLK32KGAUDIO_SHIFT 0x05
+#define PALMAS_NSLEEP_RES_ASSIGN_CLK32KG 0x10
+#define PALMAS_NSLEEP_RES_ASSIGN_CLK32KG_SHIFT 0x04
+#define PALMAS_NSLEEP_RES_ASSIGN_SYSEN2 0x08
+#define PALMAS_NSLEEP_RES_ASSIGN_SYSEN2_SHIFT 0x03
+#define PALMAS_NSLEEP_RES_ASSIGN_SYSEN1 0x04
+#define PALMAS_NSLEEP_RES_ASSIGN_SYSEN1_SHIFT 0x02
+#define PALMAS_NSLEEP_RES_ASSIGN_REGEN2 0x02
+#define PALMAS_NSLEEP_RES_ASSIGN_REGEN2_SHIFT 0x01
+#define PALMAS_NSLEEP_RES_ASSIGN_REGEN1 0x01
+#define PALMAS_NSLEEP_RES_ASSIGN_REGEN1_SHIFT 0x00
+
+/* Bit definitions for NSLEEP_SMPS_ASSIGN */
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS10 0x80
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS10_SHIFT 0x07
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS9 0x40
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS9_SHIFT 0x06
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS8 0x20
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS8_SHIFT 0x05
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS7 0x10
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS7_SHIFT 0x04
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS6 0x08
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS6_SHIFT 0x03
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS45 0x04
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS45_SHIFT 0x02
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS3 0x02
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS3_SHIFT 0x01
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS12 0x01
+#define PALMAS_NSLEEP_SMPS_ASSIGN_SMPS12_SHIFT 0x00
+
+/* Bit definitions for NSLEEP_LDO_ASSIGN1 */
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO8 0x80
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO8_SHIFT 0x07
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO7 0x40
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO7_SHIFT 0x06
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO6 0x20
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO6_SHIFT 0x05
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO5 0x10
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO5_SHIFT 0x04
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO4 0x08
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO4_SHIFT 0x03
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO3 0x04
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO3_SHIFT 0x02
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO2 0x02
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO2_SHIFT 0x01
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO1 0x01
+#define PALMAS_NSLEEP_LDO_ASSIGN1_LDO1_SHIFT 0x00
+
+/* Bit definitions for NSLEEP_LDO_ASSIGN2 */
+#define PALMAS_NSLEEP_LDO_ASSIGN2_LDOUSB 0x04
+#define PALMAS_NSLEEP_LDO_ASSIGN2_LDOUSB_SHIFT 0x02
+#define PALMAS_NSLEEP_LDO_ASSIGN2_LDOLN 0x02
+#define PALMAS_NSLEEP_LDO_ASSIGN2_LDOLN_SHIFT 0x01
+#define PALMAS_NSLEEP_LDO_ASSIGN2_LDO9 0x01
+#define PALMAS_NSLEEP_LDO_ASSIGN2_LDO9_SHIFT 0x00
+
+/* Bit definitions for ENABLE1_RES_ASSIGN */
+#define PALMAS_ENABLE1_RES_ASSIGN_REGEN3 0x40
+#define PALMAS_ENABLE1_RES_ASSIGN_REGEN3_SHIFT 0x06
+#define PALMAS_ENABLE1_RES_ASSIGN_CLK32KGAUDIO 0x20
+#define PALMAS_ENABLE1_RES_ASSIGN_CLK32KGAUDIO_SHIFT 0x05
+#define PALMAS_ENABLE1_RES_ASSIGN_CLK32KG 0x10
+#define PALMAS_ENABLE1_RES_ASSIGN_CLK32KG_SHIFT 0x04
+#define PALMAS_ENABLE1_RES_ASSIGN_SYSEN2 0x08
+#define PALMAS_ENABLE1_RES_ASSIGN_SYSEN2_SHIFT 0x03
+#define PALMAS_ENABLE1_RES_ASSIGN_SYSEN1 0x04
+#define PALMAS_ENABLE1_RES_ASSIGN_SYSEN1_SHIFT 0x02
+#define PALMAS_ENABLE1_RES_ASSIGN_REGEN2 0x02
+#define PALMAS_ENABLE1_RES_ASSIGN_REGEN2_SHIFT 0x01
+#define PALMAS_ENABLE1_RES_ASSIGN_REGEN1 0x01
+#define PALMAS_ENABLE1_RES_ASSIGN_REGEN1_SHIFT 0x00
+
+/* Bit definitions for ENABLE1_SMPS_ASSIGN */
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS10 0x80
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS10_SHIFT 0x07
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS9 0x40
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS9_SHIFT 0x06
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS8 0x20
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS8_SHIFT 0x05
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS7 0x10
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS7_SHIFT 0x04
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS6 0x08
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS6_SHIFT 0x03
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS45 0x04
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS45_SHIFT 0x02
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS3 0x02
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS3_SHIFT 0x01
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS12 0x01
+#define PALMAS_ENABLE1_SMPS_ASSIGN_SMPS12_SHIFT 0x00
+
+/* Bit definitions for ENABLE1_LDO_ASSIGN1 */
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO8 0x80
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO8_SHIFT 0x07
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO7 0x40
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO7_SHIFT 0x06
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO6 0x20
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO6_SHIFT 0x05
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO5 0x10
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO5_SHIFT 0x04
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO4 0x08
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO4_SHIFT 0x03
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO3 0x04
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO3_SHIFT 0x02
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO2 0x02
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO2_SHIFT 0x01
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO1 0x01
+#define PALMAS_ENABLE1_LDO_ASSIGN1_LDO1_SHIFT 0x00
+
+/* Bit definitions for ENABLE1_LDO_ASSIGN2 */
+#define PALMAS_ENABLE1_LDO_ASSIGN2_LDOUSB 0x04
+#define PALMAS_ENABLE1_LDO_ASSIGN2_LDOUSB_SHIFT 0x02
+#define PALMAS_ENABLE1_LDO_ASSIGN2_LDOLN 0x02
+#define PALMAS_ENABLE1_LDO_ASSIGN2_LDOLN_SHIFT 0x01
+#define PALMAS_ENABLE1_LDO_ASSIGN2_LDO9 0x01
+#define PALMAS_ENABLE1_LDO_ASSIGN2_LDO9_SHIFT 0x00
+
+/* Bit definitions for ENABLE2_RES_ASSIGN */
+#define PALMAS_ENABLE2_RES_ASSIGN_REGEN3 0x40
+#define PALMAS_ENABLE2_RES_ASSIGN_REGEN3_SHIFT 0x06
+#define PALMAS_ENABLE2_RES_ASSIGN_CLK32KGAUDIO 0x20
+#define PALMAS_ENABLE2_RES_ASSIGN_CLK32KGAUDIO_SHIFT 0x05
+#define PALMAS_ENABLE2_RES_ASSIGN_CLK32KG 0x10
+#define PALMAS_ENABLE2_RES_ASSIGN_CLK32KG_SHIFT 0x04
+#define PALMAS_ENABLE2_RES_ASSIGN_SYSEN2 0x08
+#define PALMAS_ENABLE2_RES_ASSIGN_SYSEN2_SHIFT 0x03
+#define PALMAS_ENABLE2_RES_ASSIGN_SYSEN1 0x04
+#define PALMAS_ENABLE2_RES_ASSIGN_SYSEN1_SHIFT 0x02
+#define PALMAS_ENABLE2_RES_ASSIGN_REGEN2 0x02
+#define PALMAS_ENABLE2_RES_ASSIGN_REGEN2_SHIFT 0x01
+#define PALMAS_ENABLE2_RES_ASSIGN_REGEN1 0x01
+#define PALMAS_ENABLE2_RES_ASSIGN_REGEN1_SHIFT 0x00
+
+/* Bit definitions for ENABLE2_SMPS_ASSIGN */
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS10 0x80
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS10_SHIFT 0x07
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS9 0x40
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS9_SHIFT 0x06
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS8 0x20
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS8_SHIFT 0x05
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS7 0x10
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS7_SHIFT 0x04
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS6 0x08
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS6_SHIFT 0x03
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS45 0x04
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS45_SHIFT 0x02
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS3 0x02
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS3_SHIFT 0x01
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS12 0x01
+#define PALMAS_ENABLE2_SMPS_ASSIGN_SMPS12_SHIFT 0x00
+
+/* Bit definitions for ENABLE2_LDO_ASSIGN1 */
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO8 0x80
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO8_SHIFT 0x07
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO7 0x40
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO7_SHIFT 0x06
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO6 0x20
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO6_SHIFT 0x05
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO5 0x10
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO5_SHIFT 0x04
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO4 0x08
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO4_SHIFT 0x03
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO3 0x04
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO3_SHIFT 0x02
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO2 0x02
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO2_SHIFT 0x01
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO1 0x01
+#define PALMAS_ENABLE2_LDO_ASSIGN1_LDO1_SHIFT 0x00
+
+/* Bit definitions for ENABLE2_LDO_ASSIGN2 */
+#define PALMAS_ENABLE2_LDO_ASSIGN2_LDOUSB 0x04
+#define PALMAS_ENABLE2_LDO_ASSIGN2_LDOUSB_SHIFT 0x02
+#define PALMAS_ENABLE2_LDO_ASSIGN2_LDOLN 0x02
+#define PALMAS_ENABLE2_LDO_ASSIGN2_LDOLN_SHIFT 0x01
+#define PALMAS_ENABLE2_LDO_ASSIGN2_LDO9 0x01
+#define PALMAS_ENABLE2_LDO_ASSIGN2_LDO9_SHIFT 0x00
+
+/* Bit definitions for REGEN3_CTRL */
+#define PALMAS_REGEN3_CTRL_STATUS 0x10
+#define PALMAS_REGEN3_CTRL_STATUS_SHIFT 0x04
+#define PALMAS_REGEN3_CTRL_MODE_SLEEP 0x04
+#define PALMAS_REGEN3_CTRL_MODE_SLEEP_SHIFT 0x02
+#define PALMAS_REGEN3_CTRL_MODE_ACTIVE 0x01
+#define PALMAS_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Registers for function PAD_CONTROL */
+#define PALMAS_OD_OUTPUT_CTRL2 0x02
+#define PALMAS_POLARITY_CTRL2 0x03
+#define PALMAS_PU_PD_INPUT_CTRL1 0x04
+#define PALMAS_PU_PD_INPUT_CTRL2 0x05
+#define PALMAS_PU_PD_INPUT_CTRL3 0x06
+#define PALMAS_PU_PD_INPUT_CTRL5 0x07
+#define PALMAS_OD_OUTPUT_CTRL 0x08
+#define PALMAS_POLARITY_CTRL 0x09
+#define PALMAS_PRIMARY_SECONDARY_PAD1 0x0A
+#define PALMAS_PRIMARY_SECONDARY_PAD2 0x0B
+#define PALMAS_I2C_SPI 0x0C
+#define PALMAS_PU_PD_INPUT_CTRL4 0x0D
+#define PALMAS_PRIMARY_SECONDARY_PAD3 0x0E
+#define PALMAS_PRIMARY_SECONDARY_PAD4 0x0F
+
+/* Bit definitions for PU_PD_INPUT_CTRL1 */
+#define PALMAS_PU_PD_INPUT_CTRL1_RESET_IN_PD 0x40
+#define PALMAS_PU_PD_INPUT_CTRL1_RESET_IN_PD_SHIFT 0x06
+#define PALMAS_PU_PD_INPUT_CTRL1_GPADC_START_PU 0x20
+#define PALMAS_PU_PD_INPUT_CTRL1_GPADC_START_PU_SHIFT 0x05
+#define PALMAS_PU_PD_INPUT_CTRL1_GPADC_START_PD 0x10
+#define PALMAS_PU_PD_INPUT_CTRL1_GPADC_START_PD_SHIFT 0x04
+#define PALMAS_PU_PD_INPUT_CTRL1_PWRDOWN_PD 0x04
+#define PALMAS_PU_PD_INPUT_CTRL1_PWRDOWN_PD_SHIFT 0x02
+#define PALMAS_PU_PD_INPUT_CTRL1_NRESWARM_PU 0x02
+#define PALMAS_PU_PD_INPUT_CTRL1_NRESWARM_PU_SHIFT 0x01
+
+/* Bit definitions for PU_PD_INPUT_CTRL2 */
+#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE2_PU 0x20
+#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE2_PU_SHIFT 0x05
+#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE2_PD 0x10
+#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE2_PD_SHIFT 0x04
+#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE1_PU 0x08
+#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE1_PU_SHIFT 0x03
+#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE1_PD 0x04
+#define PALMAS_PU_PD_INPUT_CTRL2_ENABLE1_PD_SHIFT 0x02
+#define PALMAS_PU_PD_INPUT_CTRL2_NSLEEP_PU 0x02
+#define PALMAS_PU_PD_INPUT_CTRL2_NSLEEP_PU_SHIFT 0x01
+#define PALMAS_PU_PD_INPUT_CTRL2_NSLEEP_PD 0x01
+#define PALMAS_PU_PD_INPUT_CTRL2_NSLEEP_PD_SHIFT 0x00
+
+/* Bit definitions for PU_PD_INPUT_CTRL3 */
+#define PALMAS_PU_PD_INPUT_CTRL3_ACOK_PD 0x40
+#define PALMAS_PU_PD_INPUT_CTRL3_ACOK_PD_SHIFT 0x06
+#define PALMAS_PU_PD_INPUT_CTRL3_CHRG_DET_N_PD 0x10
+#define PALMAS_PU_PD_INPUT_CTRL3_CHRG_DET_N_PD_SHIFT 0x04
+#define PALMAS_PU_PD_INPUT_CTRL3_POWERHOLD_PD 0x04
+#define PALMAS_PU_PD_INPUT_CTRL3_POWERHOLD_PD_SHIFT 0x02
+#define PALMAS_PU_PD_INPUT_CTRL3_MSECURE_PD 0x01
+#define PALMAS_PU_PD_INPUT_CTRL3_MSECURE_PD_SHIFT 0x00
+
+/* Bit definitions for OD_OUTPUT_CTRL */
+#define PALMAS_OD_OUTPUT_CTRL_PWM_2_OD 0x80
+#define PALMAS_OD_OUTPUT_CTRL_PWM_2_OD_SHIFT 0x07
+#define PALMAS_OD_OUTPUT_CTRL_VBUSDET_OD 0x40
+#define PALMAS_OD_OUTPUT_CTRL_VBUSDET_OD_SHIFT 0x06
+#define PALMAS_OD_OUTPUT_CTRL_PWM_1_OD 0x20
+#define PALMAS_OD_OUTPUT_CTRL_PWM_1_OD_SHIFT 0x05
+#define PALMAS_OD_OUTPUT_CTRL_INT_OD 0x08
+#define PALMAS_OD_OUTPUT_CTRL_INT_OD_SHIFT 0x03
+
+/* Bit definitions for POLARITY_CTRL */
+#define PALMAS_POLARITY_CTRL_INT_POLARITY 0x80
+#define PALMAS_POLARITY_CTRL_INT_POLARITY_SHIFT 0x07
+#define PALMAS_POLARITY_CTRL_ENABLE2_POLARITY 0x40
+#define PALMAS_POLARITY_CTRL_ENABLE2_POLARITY_SHIFT 0x06
+#define PALMAS_POLARITY_CTRL_ENABLE1_POLARITY 0x20
+#define PALMAS_POLARITY_CTRL_ENABLE1_POLARITY_SHIFT 0x05
+#define PALMAS_POLARITY_CTRL_NSLEEP_POLARITY 0x10
+#define PALMAS_POLARITY_CTRL_NSLEEP_POLARITY_SHIFT 0x04
+#define PALMAS_POLARITY_CTRL_RESET_IN_POLARITY 0x08
+#define PALMAS_POLARITY_CTRL_RESET_IN_POLARITY_SHIFT 0x03
+#define PALMAS_POLARITY_CTRL_GPIO_3_CHRG_DET_N_POLARITY 0x04
+#define PALMAS_POLARITY_CTRL_GPIO_3_CHRG_DET_N_POLARITY_SHIFT 0x02
+#define PALMAS_POLARITY_CTRL_POWERGOOD_USB_PSEL_POLARITY 0x02
+#define PALMAS_POLARITY_CTRL_POWERGOOD_USB_PSEL_POLARITY_SHIFT 0x01
+#define PALMAS_POLARITY_CTRL_PWRDOWN_POLARITY 0x01
+#define PALMAS_POLARITY_CTRL_PWRDOWN_POLARITY_SHIFT 0x00
+
+/* Bit definitions for PRIMARY_SECONDARY_PAD1 */
+#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_3 0x80
+#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_3_SHIFT 0x07
+#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_2_MASK 0x60
+#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_2_SHIFT 0x05
+#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_MASK 0x18
+#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_SHIFT 0x03
+#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_0 0x04
+#define PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_0_SHIFT 0x02
+#define PALMAS_PRIMARY_SECONDARY_PAD1_VAC 0x02
+#define PALMAS_PRIMARY_SECONDARY_PAD1_VAC_SHIFT 0x01
+#define PALMAS_PRIMARY_SECONDARY_PAD1_POWERGOOD 0x01
+#define PALMAS_PRIMARY_SECONDARY_PAD1_POWERGOOD_SHIFT 0x00
+
+/* Bit definitions for PRIMARY_SECONDARY_PAD2 */
+#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK 0x30
+#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_SHIFT 0x04
+#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_6 0x08
+#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_6_SHIFT 0x03
+#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK 0x06
+#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_5_SHIFT 0x01
+#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_4 0x01
+#define PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_4_SHIFT 0x00
+
+/* Bit definitions for I2C_SPI */
+#define PALMAS_I2C_SPI_I2C2OTP_EN 0x80
+#define PALMAS_I2C_SPI_I2C2OTP_EN_SHIFT 0x07
+#define PALMAS_I2C_SPI_I2C2OTP_PAGESEL 0x40
+#define PALMAS_I2C_SPI_I2C2OTP_PAGESEL_SHIFT 0x06
+#define PALMAS_I2C_SPI_ID_I2C2 0x20
+#define PALMAS_I2C_SPI_ID_I2C2_SHIFT 0x05
+#define PALMAS_I2C_SPI_I2C_SPI 0x10
+#define PALMAS_I2C_SPI_I2C_SPI_SHIFT 0x04
+#define PALMAS_I2C_SPI_ID_I2C1_MASK 0x0F
+#define PALMAS_I2C_SPI_ID_I2C1_SHIFT 0x00
+
+/* Bit definitions for PU_PD_INPUT_CTRL4 */
+#define PALMAS_PU_PD_INPUT_CTRL4_DVFS2_DAT_PD 0x40
+#define PALMAS_PU_PD_INPUT_CTRL4_DVFS2_DAT_PD_SHIFT 0x06
+#define PALMAS_PU_PD_INPUT_CTRL4_DVFS2_CLK_PD 0x10
+#define PALMAS_PU_PD_INPUT_CTRL4_DVFS2_CLK_PD_SHIFT 0x04
+#define PALMAS_PU_PD_INPUT_CTRL4_DVFS1_DAT_PD 0x04
+#define PALMAS_PU_PD_INPUT_CTRL4_DVFS1_DAT_PD_SHIFT 0x02
+#define PALMAS_PU_PD_INPUT_CTRL4_DVFS1_CLK_PD 0x01
+#define PALMAS_PU_PD_INPUT_CTRL4_DVFS1_CLK_PD_SHIFT 0x00
+
+/* Bit definitions for PRIMARY_SECONDARY_PAD3 */
+#define PALMAS_PRIMARY_SECONDARY_PAD3_DVFS2 0x02
+#define PALMAS_PRIMARY_SECONDARY_PAD3_DVFS2_SHIFT 0x01
+#define PALMAS_PRIMARY_SECONDARY_PAD3_DVFS1 0x01
+#define PALMAS_PRIMARY_SECONDARY_PAD3_DVFS1_SHIFT 0x00
+
+/* Registers for function LED_PWM */
+#define PALMAS_LED_PERIOD_CTRL 0x00
+#define PALMAS_LED_CTRL 0x01
+#define PALMAS_PWM_CTRL1 0x02
+#define PALMAS_PWM_CTRL2 0x03
+
+/* Bit definitions for LED_PERIOD_CTRL */
+#define PALMAS_LED_PERIOD_CTRL_LED_2_PERIOD_MASK 0x38
+#define PALMAS_LED_PERIOD_CTRL_LED_2_PERIOD_SHIFT 0x03
+#define PALMAS_LED_PERIOD_CTRL_LED_1_PERIOD_MASK 0x07
+#define PALMAS_LED_PERIOD_CTRL_LED_1_PERIOD_SHIFT 0x00
+
+/* Bit definitions for LED_CTRL */
+#define PALMAS_LED_CTRL_LED_2_SEQ 0x20
+#define PALMAS_LED_CTRL_LED_2_SEQ_SHIFT 0x05
+#define PALMAS_LED_CTRL_LED_1_SEQ 0x10
+#define PALMAS_LED_CTRL_LED_1_SEQ_SHIFT 0x04
+#define PALMAS_LED_CTRL_LED_2_ON_TIME_MASK 0x0c
+#define PALMAS_LED_CTRL_LED_2_ON_TIME_SHIFT 0x02
+#define PALMAS_LED_CTRL_LED_1_ON_TIME_MASK 0x03
+#define PALMAS_LED_CTRL_LED_1_ON_TIME_SHIFT 0x00
+
+/* Bit definitions for PWM_CTRL1 */
+#define PALMAS_PWM_CTRL1_PWM_FREQ_EN 0x02
+#define PALMAS_PWM_CTRL1_PWM_FREQ_EN_SHIFT 0x01
+#define PALMAS_PWM_CTRL1_PWM_FREQ_SEL 0x01
+#define PALMAS_PWM_CTRL1_PWM_FREQ_SEL_SHIFT 0x00
+
+/* Bit definitions for PWM_CTRL2 */
+#define PALMAS_PWM_CTRL2_PWM_DUTY_SEL_MASK 0xFF
+#define PALMAS_PWM_CTRL2_PWM_DUTY_SEL_SHIFT 0x00
+
+/* Registers for function INTERRUPT */
+#define PALMAS_INT1_STATUS 0x00
+#define PALMAS_INT1_MASK 0x01
+#define PALMAS_INT1_LINE_STATE 0x02
+#define PALMAS_INT1_EDGE_DETECT1_RESERVED 0x03
+#define PALMAS_INT1_EDGE_DETECT2_RESERVED 0x04
+#define PALMAS_INT2_STATUS 0x05
+#define PALMAS_INT2_MASK 0x06
+#define PALMAS_INT2_LINE_STATE 0x07
+#define PALMAS_INT2_EDGE_DETECT1_RESERVED 0x08
+#define PALMAS_INT2_EDGE_DETECT2_RESERVED 0x09
+#define PALMAS_INT3_STATUS 0x0A
+#define PALMAS_INT3_MASK 0x0B
+#define PALMAS_INT3_LINE_STATE 0x0C
+#define PALMAS_INT3_EDGE_DETECT1_RESERVED 0x0D
+#define PALMAS_INT3_EDGE_DETECT2_RESERVED 0x0E
+#define PALMAS_INT4_STATUS 0x0F
+#define PALMAS_INT4_MASK 0x10
+#define PALMAS_INT4_LINE_STATE 0x11
+#define PALMAS_INT4_EDGE_DETECT1 0x12
+#define PALMAS_INT4_EDGE_DETECT2 0x13
+#define PALMAS_INT_CTRL 0x14
+
+/* Bit definitions for INT1_STATUS */
+#define PALMAS_INT1_STATUS_VBAT_MON 0x80
+#define PALMAS_INT1_STATUS_VBAT_MON_SHIFT 0x07
+#define PALMAS_INT1_STATUS_VSYS_MON 0x40
+#define PALMAS_INT1_STATUS_VSYS_MON_SHIFT 0x06
+#define PALMAS_INT1_STATUS_HOTDIE 0x20
+#define PALMAS_INT1_STATUS_HOTDIE_SHIFT 0x05
+#define PALMAS_INT1_STATUS_PWRDOWN 0x10
+#define PALMAS_INT1_STATUS_PWRDOWN_SHIFT 0x04
+#define PALMAS_INT1_STATUS_RPWRON 0x08
+#define PALMAS_INT1_STATUS_RPWRON_SHIFT 0x03
+#define PALMAS_INT1_STATUS_LONG_PRESS_KEY 0x04
+#define PALMAS_INT1_STATUS_LONG_PRESS_KEY_SHIFT 0x02
+#define PALMAS_INT1_STATUS_PWRON 0x02
+#define PALMAS_INT1_STATUS_PWRON_SHIFT 0x01
+#define PALMAS_INT1_STATUS_CHARG_DET_N_VBUS_OVV 0x01
+#define PALMAS_INT1_STATUS_CHARG_DET_N_VBUS_OVV_SHIFT 0x00
+
+/* Bit definitions for INT1_MASK */
+#define PALMAS_INT1_MASK_VBAT_MON 0x80
+#define PALMAS_INT1_MASK_VBAT_MON_SHIFT 0x07
+#define PALMAS_INT1_MASK_VSYS_MON 0x40
+#define PALMAS_INT1_MASK_VSYS_MON_SHIFT 0x06
+#define PALMAS_INT1_MASK_HOTDIE 0x20
+#define PALMAS_INT1_MASK_HOTDIE_SHIFT 0x05
+#define PALMAS_INT1_MASK_PWRDOWN 0x10
+#define PALMAS_INT1_MASK_PWRDOWN_SHIFT 0x04
+#define PALMAS_INT1_MASK_RPWRON 0x08
+#define PALMAS_INT1_MASK_RPWRON_SHIFT 0x03
+#define PALMAS_INT1_MASK_LONG_PRESS_KEY 0x04
+#define PALMAS_INT1_MASK_LONG_PRESS_KEY_SHIFT 0x02
+#define PALMAS_INT1_MASK_PWRON 0x02
+#define PALMAS_INT1_MASK_PWRON_SHIFT 0x01
+#define PALMAS_INT1_MASK_CHARG_DET_N_VBUS_OVV 0x01
+#define PALMAS_INT1_MASK_CHARG_DET_N_VBUS_OVV_SHIFT 0x00
+
+/* Bit definitions for INT1_LINE_STATE */
+#define PALMAS_INT1_LINE_STATE_VBAT_MON 0x80
+#define PALMAS_INT1_LINE_STATE_VBAT_MON_SHIFT 0x07
+#define PALMAS_INT1_LINE_STATE_VSYS_MON 0x40
+#define PALMAS_INT1_LINE_STATE_VSYS_MON_SHIFT 0x06
+#define PALMAS_INT1_LINE_STATE_HOTDIE 0x20
+#define PALMAS_INT1_LINE_STATE_HOTDIE_SHIFT 0x05
+#define PALMAS_INT1_LINE_STATE_PWRDOWN 0x10
+#define PALMAS_INT1_LINE_STATE_PWRDOWN_SHIFT 0x04
+#define PALMAS_INT1_LINE_STATE_RPWRON 0x08
+#define PALMAS_INT1_LINE_STATE_RPWRON_SHIFT 0x03
+#define PALMAS_INT1_LINE_STATE_LONG_PRESS_KEY 0x04
+#define PALMAS_INT1_LINE_STATE_LONG_PRESS_KEY_SHIFT 0x02
+#define PALMAS_INT1_LINE_STATE_PWRON 0x02
+#define PALMAS_INT1_LINE_STATE_PWRON_SHIFT 0x01
+#define PALMAS_INT1_LINE_STATE_CHARG_DET_N_VBUS_OVV 0x01
+#define PALMAS_INT1_LINE_STATE_CHARG_DET_N_VBUS_OVV_SHIFT 0x00
+
+/* Bit definitions for INT2_STATUS */
+#define PALMAS_INT2_STATUS_VAC_ACOK 0x80
+#define PALMAS_INT2_STATUS_VAC_ACOK_SHIFT 0x07
+#define PALMAS_INT2_STATUS_SHORT 0x40
+#define PALMAS_INT2_STATUS_SHORT_SHIFT 0x06
+#define PALMAS_INT2_STATUS_FBI_BB 0x20
+#define PALMAS_INT2_STATUS_FBI_BB_SHIFT 0x05
+#define PALMAS_INT2_STATUS_RESET_IN 0x10
+#define PALMAS_INT2_STATUS_RESET_IN_SHIFT 0x04
+#define PALMAS_INT2_STATUS_BATREMOVAL 0x08
+#define PALMAS_INT2_STATUS_BATREMOVAL_SHIFT 0x03
+#define PALMAS_INT2_STATUS_WDT 0x04
+#define PALMAS_INT2_STATUS_WDT_SHIFT 0x02
+#define PALMAS_INT2_STATUS_RTC_TIMER 0x02
+#define PALMAS_INT2_STATUS_RTC_TIMER_SHIFT 0x01
+#define PALMAS_INT2_STATUS_RTC_ALARM 0x01
+#define PALMAS_INT2_STATUS_RTC_ALARM_SHIFT 0x00
+
+/* Bit definitions for INT2_MASK */
+#define PALMAS_INT2_MASK_VAC_ACOK 0x80
+#define PALMAS_INT2_MASK_VAC_ACOK_SHIFT 0x07
+#define PALMAS_INT2_MASK_SHORT 0x40
+#define PALMAS_INT2_MASK_SHORT_SHIFT 0x06
+#define PALMAS_INT2_MASK_FBI_BB 0x20
+#define PALMAS_INT2_MASK_FBI_BB_SHIFT 0x05
+#define PALMAS_INT2_MASK_RESET_IN 0x10
+#define PALMAS_INT2_MASK_RESET_IN_SHIFT 0x04
+#define PALMAS_INT2_MASK_BATREMOVAL 0x08
+#define PALMAS_INT2_MASK_BATREMOVAL_SHIFT 0x03
+#define PALMAS_INT2_MASK_WDT 0x04
+#define PALMAS_INT2_MASK_WDT_SHIFT 0x02
+#define PALMAS_INT2_MASK_RTC_TIMER 0x02
+#define PALMAS_INT2_MASK_RTC_TIMER_SHIFT 0x01
+#define PALMAS_INT2_MASK_RTC_ALARM 0x01
+#define PALMAS_INT2_MASK_RTC_ALARM_SHIFT 0x00
+
+/* Bit definitions for INT2_LINE_STATE */
+#define PALMAS_INT2_LINE_STATE_VAC_ACOK 0x80
+#define PALMAS_INT2_LINE_STATE_VAC_ACOK_SHIFT 0x07
+#define PALMAS_INT2_LINE_STATE_SHORT 0x40
+#define PALMAS_INT2_LINE_STATE_SHORT_SHIFT 0x06
+#define PALMAS_INT2_LINE_STATE_FBI_BB 0x20
+#define PALMAS_INT2_LINE_STATE_FBI_BB_SHIFT 0x05
+#define PALMAS_INT2_LINE_STATE_RESET_IN 0x10
+#define PALMAS_INT2_LINE_STATE_RESET_IN_SHIFT 0x04
+#define PALMAS_INT2_LINE_STATE_BATREMOVAL 0x08
+#define PALMAS_INT2_LINE_STATE_BATREMOVAL_SHIFT 0x03
+#define PALMAS_INT2_LINE_STATE_WDT 0x04
+#define PALMAS_INT2_LINE_STATE_WDT_SHIFT 0x02
+#define PALMAS_INT2_LINE_STATE_RTC_TIMER 0x02
+#define PALMAS_INT2_LINE_STATE_RTC_TIMER_SHIFT 0x01
+#define PALMAS_INT2_LINE_STATE_RTC_ALARM 0x01
+#define PALMAS_INT2_LINE_STATE_RTC_ALARM_SHIFT 0x00
+
+/* Bit definitions for INT3_STATUS */
+#define PALMAS_INT3_STATUS_VBUS 0x80
+#define PALMAS_INT3_STATUS_VBUS_SHIFT 0x07
+#define PALMAS_INT3_STATUS_VBUS_OTG 0x40
+#define PALMAS_INT3_STATUS_VBUS_OTG_SHIFT 0x06
+#define PALMAS_INT3_STATUS_ID 0x20
+#define PALMAS_INT3_STATUS_ID_SHIFT 0x05
+#define PALMAS_INT3_STATUS_ID_OTG 0x10
+#define PALMAS_INT3_STATUS_ID_OTG_SHIFT 0x04
+#define PALMAS_INT3_STATUS_GPADC_EOC_RT 0x08
+#define PALMAS_INT3_STATUS_GPADC_EOC_RT_SHIFT 0x03
+#define PALMAS_INT3_STATUS_GPADC_EOC_SW 0x04
+#define PALMAS_INT3_STATUS_GPADC_EOC_SW_SHIFT 0x02
+#define PALMAS_INT3_STATUS_GPADC_AUTO_1 0x02
+#define PALMAS_INT3_STATUS_GPADC_AUTO_1_SHIFT 0x01
+#define PALMAS_INT3_STATUS_GPADC_AUTO_0 0x01
+#define PALMAS_INT3_STATUS_GPADC_AUTO_0_SHIFT 0x00
+
+/* Bit definitions for INT3_MASK */
+#define PALMAS_INT3_MASK_VBUS 0x80
+#define PALMAS_INT3_MASK_VBUS_SHIFT 0x07
+#define PALMAS_INT3_MASK_VBUS_OTG 0x40
+#define PALMAS_INT3_MASK_VBUS_OTG_SHIFT 0x06
+#define PALMAS_INT3_MASK_ID 0x20
+#define PALMAS_INT3_MASK_ID_SHIFT 0x05
+#define PALMAS_INT3_MASK_ID_OTG 0x10
+#define PALMAS_INT3_MASK_ID_OTG_SHIFT 0x04
+#define PALMAS_INT3_MASK_GPADC_EOC_RT 0x08
+#define PALMAS_INT3_MASK_GPADC_EOC_RT_SHIFT 0x03
+#define PALMAS_INT3_MASK_GPADC_EOC_SW 0x04
+#define PALMAS_INT3_MASK_GPADC_EOC_SW_SHIFT 0x02
+#define PALMAS_INT3_MASK_GPADC_AUTO_1 0x02
+#define PALMAS_INT3_MASK_GPADC_AUTO_1_SHIFT 0x01
+#define PALMAS_INT3_MASK_GPADC_AUTO_0 0x01
+#define PALMAS_INT3_MASK_GPADC_AUTO_0_SHIFT 0x00
+
+/* Bit definitions for INT3_LINE_STATE */
+#define PALMAS_INT3_LINE_STATE_VBUS 0x80
+#define PALMAS_INT3_LINE_STATE_VBUS_SHIFT 0x07
+#define PALMAS_INT3_LINE_STATE_VBUS_OTG 0x40
+#define PALMAS_INT3_LINE_STATE_VBUS_OTG_SHIFT 0x06
+#define PALMAS_INT3_LINE_STATE_ID 0x20
+#define PALMAS_INT3_LINE_STATE_ID_SHIFT 0x05
+#define PALMAS_INT3_LINE_STATE_ID_OTG 0x10
+#define PALMAS_INT3_LINE_STATE_ID_OTG_SHIFT 0x04
+#define PALMAS_INT3_LINE_STATE_GPADC_EOC_RT 0x08
+#define PALMAS_INT3_LINE_STATE_GPADC_EOC_RT_SHIFT 0x03
+#define PALMAS_INT3_LINE_STATE_GPADC_EOC_SW 0x04
+#define PALMAS_INT3_LINE_STATE_GPADC_EOC_SW_SHIFT 0x02
+#define PALMAS_INT3_LINE_STATE_GPADC_AUTO_1 0x02
+#define PALMAS_INT3_LINE_STATE_GPADC_AUTO_1_SHIFT 0x01
+#define PALMAS_INT3_LINE_STATE_GPADC_AUTO_0 0x01
+#define PALMAS_INT3_LINE_STATE_GPADC_AUTO_0_SHIFT 0x00
+
+/* Bit definitions for INT4_STATUS */
+#define PALMAS_INT4_STATUS_GPIO_7 0x80
+#define PALMAS_INT4_STATUS_GPIO_7_SHIFT 0x07
+#define PALMAS_INT4_STATUS_GPIO_6 0x40
+#define PALMAS_INT4_STATUS_GPIO_6_SHIFT 0x06
+#define PALMAS_INT4_STATUS_GPIO_5 0x20
+#define PALMAS_INT4_STATUS_GPIO_5_SHIFT 0x05
+#define PALMAS_INT4_STATUS_GPIO_4 0x10
+#define PALMAS_INT4_STATUS_GPIO_4_SHIFT 0x04
+#define PALMAS_INT4_STATUS_GPIO_3 0x08
+#define PALMAS_INT4_STATUS_GPIO_3_SHIFT 0x03
+#define PALMAS_INT4_STATUS_GPIO_2 0x04
+#define PALMAS_INT4_STATUS_GPIO_2_SHIFT 0x02
+#define PALMAS_INT4_STATUS_GPIO_1 0x02
+#define PALMAS_INT4_STATUS_GPIO_1_SHIFT 0x01
+#define PALMAS_INT4_STATUS_GPIO_0 0x01
+#define PALMAS_INT4_STATUS_GPIO_0_SHIFT 0x00
+
+/* Bit definitions for INT4_MASK */
+#define PALMAS_INT4_MASK_GPIO_7 0x80
+#define PALMAS_INT4_MASK_GPIO_7_SHIFT 0x07
+#define PALMAS_INT4_MASK_GPIO_6 0x40
+#define PALMAS_INT4_MASK_GPIO_6_SHIFT 0x06
+#define PALMAS_INT4_MASK_GPIO_5 0x20
+#define PALMAS_INT4_MASK_GPIO_5_SHIFT 0x05
+#define PALMAS_INT4_MASK_GPIO_4 0x10
+#define PALMAS_INT4_MASK_GPIO_4_SHIFT 0x04
+#define PALMAS_INT4_MASK_GPIO_3 0x08
+#define PALMAS_INT4_MASK_GPIO_3_SHIFT 0x03
+#define PALMAS_INT4_MASK_GPIO_2 0x04
+#define PALMAS_INT4_MASK_GPIO_2_SHIFT 0x02
+#define PALMAS_INT4_MASK_GPIO_1 0x02
+#define PALMAS_INT4_MASK_GPIO_1_SHIFT 0x01
+#define PALMAS_INT4_MASK_GPIO_0 0x01
+#define PALMAS_INT4_MASK_GPIO_0_SHIFT 0x00
+
+/* Bit definitions for INT4_LINE_STATE */
+#define PALMAS_INT4_LINE_STATE_GPIO_7 0x80
+#define PALMAS_INT4_LINE_STATE_GPIO_7_SHIFT 0x07
+#define PALMAS_INT4_LINE_STATE_GPIO_6 0x40
+#define PALMAS_INT4_LINE_STATE_GPIO_6_SHIFT 0x06
+#define PALMAS_INT4_LINE_STATE_GPIO_5 0x20
+#define PALMAS_INT4_LINE_STATE_GPIO_5_SHIFT 0x05
+#define PALMAS_INT4_LINE_STATE_GPIO_4 0x10
+#define PALMAS_INT4_LINE_STATE_GPIO_4_SHIFT 0x04
+#define PALMAS_INT4_LINE_STATE_GPIO_3 0x08
+#define PALMAS_INT4_LINE_STATE_GPIO_3_SHIFT 0x03
+#define PALMAS_INT4_LINE_STATE_GPIO_2 0x04
+#define PALMAS_INT4_LINE_STATE_GPIO_2_SHIFT 0x02
+#define PALMAS_INT4_LINE_STATE_GPIO_1 0x02
+#define PALMAS_INT4_LINE_STATE_GPIO_1_SHIFT 0x01
+#define PALMAS_INT4_LINE_STATE_GPIO_0 0x01
+#define PALMAS_INT4_LINE_STATE_GPIO_0_SHIFT 0x00
+
+/* Bit definitions for INT4_EDGE_DETECT1 */
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_3_RISING 0x80
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_3_RISING_SHIFT 0x07
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_3_FALLING 0x40
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_3_FALLING_SHIFT 0x06
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_2_RISING 0x20
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_2_RISING_SHIFT 0x05
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_2_FALLING 0x10
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_2_FALLING_SHIFT 0x04
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_1_RISING 0x08
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_1_RISING_SHIFT 0x03
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_1_FALLING 0x04
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_1_FALLING_SHIFT 0x02
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_0_RISING 0x02
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_0_RISING_SHIFT 0x01
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_0_FALLING 0x01
+#define PALMAS_INT4_EDGE_DETECT1_GPIO_0_FALLING_SHIFT 0x00
+
+/* Bit definitions for INT4_EDGE_DETECT2 */
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_7_RISING 0x80
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_7_RISING_SHIFT 0x07
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_7_FALLING 0x40
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_7_FALLING_SHIFT 0x06
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_6_RISING 0x20
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_6_RISING_SHIFT 0x05
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_6_FALLING 0x10
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_6_FALLING_SHIFT 0x04
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_5_RISING 0x08
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_5_RISING_SHIFT 0x03
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_5_FALLING 0x04
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_5_FALLING_SHIFT 0x02
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_4_RISING 0x02
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_4_RISING_SHIFT 0x01
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_4_FALLING 0x01
+#define PALMAS_INT4_EDGE_DETECT2_GPIO_4_FALLING_SHIFT 0x00
+
+/* Bit definitions for INT_CTRL */
+#define PALMAS_INT_CTRL_INT_PENDING 0x04
+#define PALMAS_INT_CTRL_INT_PENDING_SHIFT 0x02
+#define PALMAS_INT_CTRL_INT_CLEAR 0x01
+#define PALMAS_INT_CTRL_INT_CLEAR_SHIFT 0x00
+
+/* Registers for function USB_OTG */
+#define PALMAS_USB_WAKEUP 0x03
+#define PALMAS_USB_VBUS_CTRL_SET 0x04
+#define PALMAS_USB_VBUS_CTRL_CLR 0x05
+#define PALMAS_USB_ID_CTRL_SET 0x06
+#define PALMAS_USB_ID_CTRL_CLEAR 0x07
+#define PALMAS_USB_VBUS_INT_SRC 0x08
+#define PALMAS_USB_VBUS_INT_LATCH_SET 0x09
+#define PALMAS_USB_VBUS_INT_LATCH_CLR 0x0A
+#define PALMAS_USB_VBUS_INT_EN_LO_SET 0x0B
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR 0x0C
+#define PALMAS_USB_VBUS_INT_EN_HI_SET 0x0D
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR 0x0E
+#define PALMAS_USB_ID_INT_SRC 0x0F
+#define PALMAS_USB_ID_INT_LATCH_SET 0x10
+#define PALMAS_USB_ID_INT_LATCH_CLR 0x11
+#define PALMAS_USB_ID_INT_EN_LO_SET 0x12
+#define PALMAS_USB_ID_INT_EN_LO_CLR 0x13
+#define PALMAS_USB_ID_INT_EN_HI_SET 0x14
+#define PALMAS_USB_ID_INT_EN_HI_CLR 0x15
+#define PALMAS_USB_OTG_ADP_CTRL 0x16
+#define PALMAS_USB_OTG_ADP_HIGH 0x17
+#define PALMAS_USB_OTG_ADP_LOW 0x18
+#define PALMAS_USB_OTG_ADP_RISE 0x19
+#define PALMAS_USB_OTG_REVISION 0x1A
+
+/* Bit definitions for USB_WAKEUP */
+#define PALMAS_USB_WAKEUP_ID_WK_UP_COMP 0x01
+#define PALMAS_USB_WAKEUP_ID_WK_UP_COMP_SHIFT 0x00
+
+/* Bit definitions for USB_VBUS_CTRL_SET */
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_CHRG_VSYS 0x80
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_CHRG_VSYS_SHIFT 0x07
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_DISCHRG 0x20
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_DISCHRG_SHIFT 0x05
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_IADP_SRC 0x10
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_IADP_SRC_SHIFT 0x04
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_IADP_SINK 0x08
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_IADP_SINK_SHIFT 0x03
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_ACT_COMP 0x04
+#define PALMAS_USB_VBUS_CTRL_SET_VBUS_ACT_COMP_SHIFT 0x02
+
+/* Bit definitions for USB_VBUS_CTRL_CLR */
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_CHRG_VSYS 0x80
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_CHRG_VSYS_SHIFT 0x07
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_DISCHRG 0x20
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_DISCHRG_SHIFT 0x05
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_IADP_SRC 0x10
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_IADP_SRC_SHIFT 0x04
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_IADP_SINK 0x08
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_IADP_SINK_SHIFT 0x03
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_ACT_COMP 0x04
+#define PALMAS_USB_VBUS_CTRL_CLR_VBUS_ACT_COMP_SHIFT 0x02
+
+/* Bit definitions for USB_ID_CTRL_SET */
+#define PALMAS_USB_ID_CTRL_SET_ID_PU_220K 0x80
+#define PALMAS_USB_ID_CTRL_SET_ID_PU_220K_SHIFT 0x07
+#define PALMAS_USB_ID_CTRL_SET_ID_PU_100K 0x40
+#define PALMAS_USB_ID_CTRL_SET_ID_PU_100K_SHIFT 0x06
+#define PALMAS_USB_ID_CTRL_SET_ID_GND_DRV 0x20
+#define PALMAS_USB_ID_CTRL_SET_ID_GND_DRV_SHIFT 0x05
+#define PALMAS_USB_ID_CTRL_SET_ID_SRC_16U 0x10
+#define PALMAS_USB_ID_CTRL_SET_ID_SRC_16U_SHIFT 0x04
+#define PALMAS_USB_ID_CTRL_SET_ID_SRC_5U 0x08
+#define PALMAS_USB_ID_CTRL_SET_ID_SRC_5U_SHIFT 0x03
+#define PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP 0x04
+#define PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP_SHIFT 0x02
+
+/* Bit definitions for USB_ID_CTRL_CLEAR */
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_PU_220K 0x80
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_PU_220K_SHIFT 0x07
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_PU_100K 0x40
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_PU_100K_SHIFT 0x06
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_GND_DRV 0x20
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_GND_DRV_SHIFT 0x05
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_SRC_16U 0x10
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_SRC_16U_SHIFT 0x04
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_SRC_5U 0x08
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_SRC_5U_SHIFT 0x03
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_ACT_COMP 0x04
+#define PALMAS_USB_ID_CTRL_CLEAR_ID_ACT_COMP_SHIFT 0x02
+
+/* Bit definitions for USB_VBUS_INT_SRC */
+#define PALMAS_USB_VBUS_INT_SRC_VOTG_SESS_VLD 0x80
+#define PALMAS_USB_VBUS_INT_SRC_VOTG_SESS_VLD_SHIFT 0x07
+#define PALMAS_USB_VBUS_INT_SRC_VADP_PRB 0x40
+#define PALMAS_USB_VBUS_INT_SRC_VADP_PRB_SHIFT 0x06
+#define PALMAS_USB_VBUS_INT_SRC_VADP_SNS 0x20
+#define PALMAS_USB_VBUS_INT_SRC_VADP_SNS_SHIFT 0x05
+#define PALMAS_USB_VBUS_INT_SRC_VA_VBUS_VLD 0x08
+#define PALMAS_USB_VBUS_INT_SRC_VA_VBUS_VLD_SHIFT 0x03
+#define PALMAS_USB_VBUS_INT_SRC_VA_SESS_VLD 0x04
+#define PALMAS_USB_VBUS_INT_SRC_VA_SESS_VLD_SHIFT 0x02
+#define PALMAS_USB_VBUS_INT_SRC_VB_SESS_VLD 0x02
+#define PALMAS_USB_VBUS_INT_SRC_VB_SESS_VLD_SHIFT 0x01
+#define PALMAS_USB_VBUS_INT_SRC_VB_SESS_END 0x01
+#define PALMAS_USB_VBUS_INT_SRC_VB_SESS_END_SHIFT 0x00
+
+/* Bit definitions for USB_VBUS_INT_LATCH_SET */
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VOTG_SESS_VLD 0x80
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VOTG_SESS_VLD_SHIFT 0x07
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VADP_PRB 0x40
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VADP_PRB_SHIFT 0x06
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VADP_SNS 0x20
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VADP_SNS_SHIFT 0x05
+#define PALMAS_USB_VBUS_INT_LATCH_SET_ADP 0x10
+#define PALMAS_USB_VBUS_INT_LATCH_SET_ADP_SHIFT 0x04
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VA_VBUS_VLD 0x08
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VA_VBUS_VLD_SHIFT 0x03
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VA_SESS_VLD 0x04
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VA_SESS_VLD_SHIFT 0x02
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VB_SESS_VLD 0x02
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VB_SESS_VLD_SHIFT 0x01
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VB_SESS_END 0x01
+#define PALMAS_USB_VBUS_INT_LATCH_SET_VB_SESS_END_SHIFT 0x00
+
+/* Bit definitions for USB_VBUS_INT_LATCH_CLR */
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VOTG_SESS_VLD 0x80
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VOTG_SESS_VLD_SHIFT 0x07
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VADP_PRB 0x40
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VADP_PRB_SHIFT 0x06
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VADP_SNS 0x20
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VADP_SNS_SHIFT 0x05
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_ADP 0x10
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_ADP_SHIFT 0x04
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VA_VBUS_VLD 0x08
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VA_VBUS_VLD_SHIFT 0x03
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VA_SESS_VLD 0x04
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VA_SESS_VLD_SHIFT 0x02
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VB_SESS_VLD 0x02
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VB_SESS_VLD_SHIFT 0x01
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VB_SESS_END 0x01
+#define PALMAS_USB_VBUS_INT_LATCH_CLR_VB_SESS_END_SHIFT 0x00
+
+/* Bit definitions for USB_VBUS_INT_EN_LO_SET */
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VOTG_SESS_VLD 0x80
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VOTG_SESS_VLD_SHIFT 0x07
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VADP_PRB 0x40
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VADP_PRB_SHIFT 0x06
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VADP_SNS 0x20
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VADP_SNS_SHIFT 0x05
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VA_VBUS_VLD 0x08
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VA_VBUS_VLD_SHIFT 0x03
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VA_SESS_VLD 0x04
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VA_SESS_VLD_SHIFT 0x02
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VB_SESS_VLD 0x02
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VB_SESS_VLD_SHIFT 0x01
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VB_SESS_END 0x01
+#define PALMAS_USB_VBUS_INT_EN_LO_SET_VB_SESS_END_SHIFT 0x00
+
+/* Bit definitions for USB_VBUS_INT_EN_LO_CLR */
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VOTG_SESS_VLD 0x80
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VOTG_SESS_VLD_SHIFT 0x07
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VADP_PRB 0x40
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VADP_PRB_SHIFT 0x06
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VADP_SNS 0x20
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VADP_SNS_SHIFT 0x05
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VA_VBUS_VLD 0x08
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VA_VBUS_VLD_SHIFT 0x03
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VA_SESS_VLD 0x04
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VA_SESS_VLD_SHIFT 0x02
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VB_SESS_VLD 0x02
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VB_SESS_VLD_SHIFT 0x01
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VB_SESS_END 0x01
+#define PALMAS_USB_VBUS_INT_EN_LO_CLR_VB_SESS_END_SHIFT 0x00
+
+/* Bit definitions for USB_VBUS_INT_EN_HI_SET */
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VOTG_SESS_VLD 0x80
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VOTG_SESS_VLD_SHIFT 0x07
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VADP_PRB 0x40
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VADP_PRB_SHIFT 0x06
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VADP_SNS 0x20
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VADP_SNS_SHIFT 0x05
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_ADP 0x10
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_ADP_SHIFT 0x04
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VA_VBUS_VLD 0x08
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VA_VBUS_VLD_SHIFT 0x03
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VA_SESS_VLD 0x04
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VA_SESS_VLD_SHIFT 0x02
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VB_SESS_VLD 0x02
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VB_SESS_VLD_SHIFT 0x01
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VB_SESS_END 0x01
+#define PALMAS_USB_VBUS_INT_EN_HI_SET_VB_SESS_END_SHIFT 0x00
+
+/* Bit definitions for USB_VBUS_INT_EN_HI_CLR */
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VOTG_SESS_VLD 0x80
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VOTG_SESS_VLD_SHIFT 0x07
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VADP_PRB 0x40
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VADP_PRB_SHIFT 0x06
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VADP_SNS 0x20
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VADP_SNS_SHIFT 0x05
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_ADP 0x10
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_ADP_SHIFT 0x04
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VA_VBUS_VLD 0x08
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VA_VBUS_VLD_SHIFT 0x03
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VA_SESS_VLD 0x04
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VA_SESS_VLD_SHIFT 0x02
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VB_SESS_VLD 0x02
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VB_SESS_VLD_SHIFT 0x01
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VB_SESS_END 0x01
+#define PALMAS_USB_VBUS_INT_EN_HI_CLR_VB_SESS_END_SHIFT 0x00
+
+/* Bit definitions for USB_ID_INT_SRC */
+#define PALMAS_USB_ID_INT_SRC_ID_FLOAT 0x10
+#define PALMAS_USB_ID_INT_SRC_ID_FLOAT_SHIFT 0x04
+#define PALMAS_USB_ID_INT_SRC_ID_A 0x08
+#define PALMAS_USB_ID_INT_SRC_ID_A_SHIFT 0x03
+#define PALMAS_USB_ID_INT_SRC_ID_B 0x04
+#define PALMAS_USB_ID_INT_SRC_ID_B_SHIFT 0x02
+#define PALMAS_USB_ID_INT_SRC_ID_C 0x02
+#define PALMAS_USB_ID_INT_SRC_ID_C_SHIFT 0x01
+#define PALMAS_USB_ID_INT_SRC_ID_GND 0x01
+#define PALMAS_USB_ID_INT_SRC_ID_GND_SHIFT 0x00
+
+/* Bit definitions for USB_ID_INT_LATCH_SET */
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_FLOAT 0x10
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_FLOAT_SHIFT 0x04
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_A 0x08
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_A_SHIFT 0x03
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_B 0x04
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_B_SHIFT 0x02
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_C 0x02
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_C_SHIFT 0x01
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_GND 0x01
+#define PALMAS_USB_ID_INT_LATCH_SET_ID_GND_SHIFT 0x00
+
+/* Bit definitions for USB_ID_INT_LATCH_CLR */
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_FLOAT 0x10
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_FLOAT_SHIFT 0x04
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_A 0x08
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_A_SHIFT 0x03
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_B 0x04
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_B_SHIFT 0x02
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_C 0x02
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_C_SHIFT 0x01
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_GND 0x01
+#define PALMAS_USB_ID_INT_LATCH_CLR_ID_GND_SHIFT 0x00
+
+/* Bit definitions for USB_ID_INT_EN_LO_SET */
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_FLOAT 0x10
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_FLOAT_SHIFT 0x04
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_A 0x08
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_A_SHIFT 0x03
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_B 0x04
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_B_SHIFT 0x02
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_C 0x02
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_C_SHIFT 0x01
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_GND 0x01
+#define PALMAS_USB_ID_INT_EN_LO_SET_ID_GND_SHIFT 0x00
+
+/* Bit definitions for USB_ID_INT_EN_LO_CLR */
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_FLOAT 0x10
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_FLOAT_SHIFT 0x04
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_A 0x08
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_A_SHIFT 0x03
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_B 0x04
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_B_SHIFT 0x02
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_C 0x02
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_C_SHIFT 0x01
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_GND 0x01
+#define PALMAS_USB_ID_INT_EN_LO_CLR_ID_GND_SHIFT 0x00
+
+/* Bit definitions for USB_ID_INT_EN_HI_SET */
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT 0x10
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT_SHIFT 0x04
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_A 0x08
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_A_SHIFT 0x03
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_B 0x04
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_B_SHIFT 0x02
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_C 0x02
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_C_SHIFT 0x01
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_GND 0x01
+#define PALMAS_USB_ID_INT_EN_HI_SET_ID_GND_SHIFT 0x00
+
+/* Bit definitions for USB_ID_INT_EN_HI_CLR */
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT 0x10
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT_SHIFT 0x04
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_A 0x08
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_A_SHIFT 0x03
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_B 0x04
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_B_SHIFT 0x02
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_C 0x02
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_C_SHIFT 0x01
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND 0x01
+#define PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND_SHIFT 0x00
+
+/* Bit definitions for USB_OTG_ADP_CTRL */
+#define PALMAS_USB_OTG_ADP_CTRL_ADP_EN 0x04
+#define PALMAS_USB_OTG_ADP_CTRL_ADP_EN_SHIFT 0x02
+#define PALMAS_USB_OTG_ADP_CTRL_ADP_MODE_MASK 0x03
+#define PALMAS_USB_OTG_ADP_CTRL_ADP_MODE_SHIFT 0x00
+
+/* Bit definitions for USB_OTG_ADP_HIGH */
+#define PALMAS_USB_OTG_ADP_HIGH_T_ADP_HIGH_MASK 0xFF
+#define PALMAS_USB_OTG_ADP_HIGH_T_ADP_HIGH_SHIFT 0x00
+
+/* Bit definitions for USB_OTG_ADP_LOW */
+#define PALMAS_USB_OTG_ADP_LOW_T_ADP_LOW_MASK 0xFF
+#define PALMAS_USB_OTG_ADP_LOW_T_ADP_LOW_SHIFT 0x00
+
+/* Bit definitions for USB_OTG_ADP_RISE */
+#define PALMAS_USB_OTG_ADP_RISE_T_ADP_RISE_MASK 0xFF
+#define PALMAS_USB_OTG_ADP_RISE_T_ADP_RISE_SHIFT 0x00
+
+/* Bit definitions for USB_OTG_REVISION */
+#define PALMAS_USB_OTG_REVISION_OTG_REV 0x01
+#define PALMAS_USB_OTG_REVISION_OTG_REV_SHIFT 0x00
+
+/* Registers for function VIBRATOR */
+#define PALMAS_VIBRA_CTRL 0x00
+
+/* Bit definitions for VIBRA_CTRL */
+#define PALMAS_VIBRA_CTRL_PWM_DUTY_SEL_MASK 0x06
+#define PALMAS_VIBRA_CTRL_PWM_DUTY_SEL_SHIFT 0x01
+#define PALMAS_VIBRA_CTRL_PWM_FREQ_SEL 0x01
+#define PALMAS_VIBRA_CTRL_PWM_FREQ_SEL_SHIFT 0x00
+
+/* Registers for function GPIO */
+#define PALMAS_GPIO_DATA_IN 0x00
+#define PALMAS_GPIO_DATA_DIR 0x01
+#define PALMAS_GPIO_DATA_OUT 0x02
+#define PALMAS_GPIO_DEBOUNCE_EN 0x03
+#define PALMAS_GPIO_CLEAR_DATA_OUT 0x04
+#define PALMAS_GPIO_SET_DATA_OUT 0x05
+#define PALMAS_PU_PD_GPIO_CTRL1 0x06
+#define PALMAS_PU_PD_GPIO_CTRL2 0x07
+#define PALMAS_OD_OUTPUT_GPIO_CTRL 0x08
+#define PALMAS_GPIO_DATA_IN2 0x09
+#define PALMAS_GPIO_DATA_DIR2 0x0A
+#define PALMAS_GPIO_DATA_OUT2 0x0B
+#define PALMAS_GPIO_DEBOUNCE_EN2 0x0C
+#define PALMAS_GPIO_CLEAR_DATA_OUT2 0x0D
+#define PALMAS_GPIO_SET_DATA_OUT2 0x0E
+#define PALMAS_PU_PD_GPIO_CTRL3 0x0F
+#define PALMAS_PU_PD_GPIO_CTRL4 0x10
+#define PALMAS_OD_OUTPUT_GPIO_CTRL2 0x11
+
+/* Bit definitions for GPIO_DATA_IN */
+#define PALMAS_GPIO_DATA_IN_GPIO_7_IN 0x80
+#define PALMAS_GPIO_DATA_IN_GPIO_7_IN_SHIFT 0x07
+#define PALMAS_GPIO_DATA_IN_GPIO_6_IN 0x40
+#define PALMAS_GPIO_DATA_IN_GPIO_6_IN_SHIFT 0x06
+#define PALMAS_GPIO_DATA_IN_GPIO_5_IN 0x20
+#define PALMAS_GPIO_DATA_IN_GPIO_5_IN_SHIFT 0x05
+#define PALMAS_GPIO_DATA_IN_GPIO_4_IN 0x10
+#define PALMAS_GPIO_DATA_IN_GPIO_4_IN_SHIFT 0x04
+#define PALMAS_GPIO_DATA_IN_GPIO_3_IN 0x08
+#define PALMAS_GPIO_DATA_IN_GPIO_3_IN_SHIFT 0x03
+#define PALMAS_GPIO_DATA_IN_GPIO_2_IN 0x04
+#define PALMAS_GPIO_DATA_IN_GPIO_2_IN_SHIFT 0x02
+#define PALMAS_GPIO_DATA_IN_GPIO_1_IN 0x02
+#define PALMAS_GPIO_DATA_IN_GPIO_1_IN_SHIFT 0x01
+#define PALMAS_GPIO_DATA_IN_GPIO_0_IN 0x01
+#define PALMAS_GPIO_DATA_IN_GPIO_0_IN_SHIFT 0x00
+
+/* Bit definitions for GPIO_DATA_DIR */
+#define PALMAS_GPIO_DATA_DIR_GPIO_7_DIR 0x80
+#define PALMAS_GPIO_DATA_DIR_GPIO_7_DIR_SHIFT 0x07
+#define PALMAS_GPIO_DATA_DIR_GPIO_6_DIR 0x40
+#define PALMAS_GPIO_DATA_DIR_GPIO_6_DIR_SHIFT 0x06
+#define PALMAS_GPIO_DATA_DIR_GPIO_5_DIR 0x20
+#define PALMAS_GPIO_DATA_DIR_GPIO_5_DIR_SHIFT 0x05
+#define PALMAS_GPIO_DATA_DIR_GPIO_4_DIR 0x10
+#define PALMAS_GPIO_DATA_DIR_GPIO_4_DIR_SHIFT 0x04
+#define PALMAS_GPIO_DATA_DIR_GPIO_3_DIR 0x08
+#define PALMAS_GPIO_DATA_DIR_GPIO_3_DIR_SHIFT 0x03
+#define PALMAS_GPIO_DATA_DIR_GPIO_2_DIR 0x04
+#define PALMAS_GPIO_DATA_DIR_GPIO_2_DIR_SHIFT 0x02
+#define PALMAS_GPIO_DATA_DIR_GPIO_1_DIR 0x02
+#define PALMAS_GPIO_DATA_DIR_GPIO_1_DIR_SHIFT 0x01
+#define PALMAS_GPIO_DATA_DIR_GPIO_0_DIR 0x01
+#define PALMAS_GPIO_DATA_DIR_GPIO_0_DIR_SHIFT 0x00
+
+/* Bit definitions for GPIO_DATA_OUT */
+#define PALMAS_GPIO_DATA_OUT_GPIO_7_OUT 0x80
+#define PALMAS_GPIO_DATA_OUT_GPIO_7_OUT_SHIFT 0x07
+#define PALMAS_GPIO_DATA_OUT_GPIO_6_OUT 0x40
+#define PALMAS_GPIO_DATA_OUT_GPIO_6_OUT_SHIFT 0x06
+#define PALMAS_GPIO_DATA_OUT_GPIO_5_OUT 0x20
+#define PALMAS_GPIO_DATA_OUT_GPIO_5_OUT_SHIFT 0x05
+#define PALMAS_GPIO_DATA_OUT_GPIO_4_OUT 0x10
+#define PALMAS_GPIO_DATA_OUT_GPIO_4_OUT_SHIFT 0x04
+#define PALMAS_GPIO_DATA_OUT_GPIO_3_OUT 0x08
+#define PALMAS_GPIO_DATA_OUT_GPIO_3_OUT_SHIFT 0x03
+#define PALMAS_GPIO_DATA_OUT_GPIO_2_OUT 0x04
+#define PALMAS_GPIO_DATA_OUT_GPIO_2_OUT_SHIFT 0x02
+#define PALMAS_GPIO_DATA_OUT_GPIO_1_OUT 0x02
+#define PALMAS_GPIO_DATA_OUT_GPIO_1_OUT_SHIFT 0x01
+#define PALMAS_GPIO_DATA_OUT_GPIO_0_OUT 0x01
+#define PALMAS_GPIO_DATA_OUT_GPIO_0_OUT_SHIFT 0x00
+
+/* Bit definitions for GPIO_DEBOUNCE_EN */
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_7_DEBOUNCE_EN 0x80
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_7_DEBOUNCE_EN_SHIFT 0x07
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_6_DEBOUNCE_EN 0x40
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_6_DEBOUNCE_EN_SHIFT 0x06
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_5_DEBOUNCE_EN 0x20
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_5_DEBOUNCE_EN_SHIFT 0x05
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_4_DEBOUNCE_EN 0x10
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_4_DEBOUNCE_EN_SHIFT 0x04
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_3_DEBOUNCE_EN 0x08
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_3_DEBOUNCE_EN_SHIFT 0x03
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_2_DEBOUNCE_EN 0x04
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_2_DEBOUNCE_EN_SHIFT 0x02
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_1_DEBOUNCE_EN 0x02
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_1_DEBOUNCE_EN_SHIFT 0x01
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_0_DEBOUNCE_EN 0x01
+#define PALMAS_GPIO_DEBOUNCE_EN_GPIO_0_DEBOUNCE_EN_SHIFT 0x00
+
+/* Bit definitions for GPIO_CLEAR_DATA_OUT */
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_7_CLEAR_DATA_OUT 0x80
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_7_CLEAR_DATA_OUT_SHIFT 0x07
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_6_CLEAR_DATA_OUT 0x40
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_6_CLEAR_DATA_OUT_SHIFT 0x06
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_5_CLEAR_DATA_OUT 0x20
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_5_CLEAR_DATA_OUT_SHIFT 0x05
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_4_CLEAR_DATA_OUT 0x10
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_4_CLEAR_DATA_OUT_SHIFT 0x04
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_3_CLEAR_DATA_OUT 0x08
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_3_CLEAR_DATA_OUT_SHIFT 0x03
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_2_CLEAR_DATA_OUT 0x04
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_2_CLEAR_DATA_OUT_SHIFT 0x02
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_1_CLEAR_DATA_OUT 0x02
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_1_CLEAR_DATA_OUT_SHIFT 0x01
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_0_CLEAR_DATA_OUT 0x01
+#define PALMAS_GPIO_CLEAR_DATA_OUT_GPIO_0_CLEAR_DATA_OUT_SHIFT 0x00
+
+/* Bit definitions for GPIO_SET_DATA_OUT */
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_7_SET_DATA_OUT 0x80
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_7_SET_DATA_OUT_SHIFT 0x07
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_6_SET_DATA_OUT 0x40
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_6_SET_DATA_OUT_SHIFT 0x06
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_5_SET_DATA_OUT 0x20
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_5_SET_DATA_OUT_SHIFT 0x05
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_4_SET_DATA_OUT 0x10
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_4_SET_DATA_OUT_SHIFT 0x04
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_3_SET_DATA_OUT 0x08
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_3_SET_DATA_OUT_SHIFT 0x03
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_2_SET_DATA_OUT 0x04
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_2_SET_DATA_OUT_SHIFT 0x02
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_1_SET_DATA_OUT 0x02
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_1_SET_DATA_OUT_SHIFT 0x01
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_0_SET_DATA_OUT 0x01
+#define PALMAS_GPIO_SET_DATA_OUT_GPIO_0_SET_DATA_OUT_SHIFT 0x00
+
+/* Bit definitions for PU_PD_GPIO_CTRL1 */
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_3_PD 0x40
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_3_PD_SHIFT 0x06
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_2_PU 0x20
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_2_PU_SHIFT 0x05
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_2_PD 0x10
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_2_PD_SHIFT 0x04
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_1_PU 0x08
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_1_PU_SHIFT 0x03
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_1_PD 0x04
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_1_PD_SHIFT 0x02
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_0_PD 0x01
+#define PALMAS_PU_PD_GPIO_CTRL1_GPIO_0_PD_SHIFT 0x00
+
+/* Bit definitions for PU_PD_GPIO_CTRL2 */
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_7_PD 0x40
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_7_PD_SHIFT 0x06
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_6_PU 0x20
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_6_PU_SHIFT 0x05
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_6_PD 0x10
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_6_PD_SHIFT 0x04
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_5_PU 0x08
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_5_PU_SHIFT 0x03
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_5_PD 0x04
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_5_PD_SHIFT 0x02
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_4_PU 0x02
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_4_PU_SHIFT 0x01
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_4_PD 0x01
+#define PALMAS_PU_PD_GPIO_CTRL2_GPIO_4_PD_SHIFT 0x00
+
+/* Bit definitions for OD_OUTPUT_GPIO_CTRL */
+#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_5_OD 0x20
+#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_5_OD_SHIFT 0x05
+#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_2_OD 0x04
+#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_2_OD_SHIFT 0x02
+#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_1_OD 0x02
+#define PALMAS_OD_OUTPUT_GPIO_CTRL_GPIO_1_OD_SHIFT 0x01
+
+/* Registers for function GPADC */
+#define PALMAS_GPADC_CTRL1 0x00
+#define PALMAS_GPADC_CTRL2 0x01
+#define PALMAS_GPADC_RT_CTRL 0x02
+#define PALMAS_GPADC_AUTO_CTRL 0x03
+#define PALMAS_GPADC_STATUS 0x04
+#define PALMAS_GPADC_RT_SELECT 0x05
+#define PALMAS_GPADC_RT_CONV0_LSB 0x06
+#define PALMAS_GPADC_RT_CONV0_MSB 0x07
+#define PALMAS_GPADC_AUTO_SELECT 0x08
+#define PALMAS_GPADC_AUTO_CONV0_LSB 0x09
+#define PALMAS_GPADC_AUTO_CONV0_MSB 0x0A
+#define PALMAS_GPADC_AUTO_CONV1_LSB 0x0B
+#define PALMAS_GPADC_AUTO_CONV1_MSB 0x0C
+#define PALMAS_GPADC_SW_SELECT 0x0D
+#define PALMAS_GPADC_SW_CONV0_LSB 0x0E
+#define PALMAS_GPADC_SW_CONV0_MSB 0x0F
+#define PALMAS_GPADC_THRES_CONV0_LSB 0x10
+#define PALMAS_GPADC_THRES_CONV0_MSB 0x11
+#define PALMAS_GPADC_THRES_CONV1_LSB 0x12
+#define PALMAS_GPADC_THRES_CONV1_MSB 0x13
+#define PALMAS_GPADC_SMPS_ILMONITOR_EN 0x14
+#define PALMAS_GPADC_SMPS_VSEL_MONITORING 0x15
+
+/* Bit definitions for GPADC_CTRL1 */
+#define PALMAS_GPADC_CTRL1_RESERVED_MASK 0xc0
+#define PALMAS_GPADC_CTRL1_RESERVED_SHIFT 0x06
+#define PALMAS_GPADC_CTRL1_CURRENT_SRC_CH3_MASK 0x30
+#define PALMAS_GPADC_CTRL1_CURRENT_SRC_CH3_SHIFT 0x04
+#define PALMAS_GPADC_CTRL1_CURRENT_SRC_CH0_MASK 0x0c
+#define PALMAS_GPADC_CTRL1_CURRENT_SRC_CH0_SHIFT 0x02
+#define PALMAS_GPADC_CTRL1_BAT_REMOVAL_DET 0x02
+#define PALMAS_GPADC_CTRL1_BAT_REMOVAL_DET_SHIFT 0x01
+#define PALMAS_GPADC_CTRL1_GPADC_FORCE 0x01
+#define PALMAS_GPADC_CTRL1_GPADC_FORCE_SHIFT 0x00
+
+/* Bit definitions for GPADC_CTRL2 */
+#define PALMAS_GPADC_CTRL2_RESERVED_MASK 0x06
+#define PALMAS_GPADC_CTRL2_RESERVED_SHIFT 0x01
+
+/* Bit definitions for GPADC_RT_CTRL */
+#define PALMAS_GPADC_RT_CTRL_EXTEND_DELAY 0x02
+#define PALMAS_GPADC_RT_CTRL_EXTEND_DELAY_SHIFT 0x01
+#define PALMAS_GPADC_RT_CTRL_START_POLARITY 0x01
+#define PALMAS_GPADC_RT_CTRL_START_POLARITY_SHIFT 0x00
+
+/* Bit definitions for GPADC_AUTO_CTRL */
+#define PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV1 0x80
+#define PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV1_SHIFT 0x07
+#define PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV0 0x40
+#define PALMAS_GPADC_AUTO_CTRL_SHUTDOWN_CONV0_SHIFT 0x06
+#define PALMAS_GPADC_AUTO_CTRL_AUTO_CONV1_EN 0x20
+#define PALMAS_GPADC_AUTO_CTRL_AUTO_CONV1_EN_SHIFT 0x05
+#define PALMAS_GPADC_AUTO_CTRL_AUTO_CONV0_EN 0x10
+#define PALMAS_GPADC_AUTO_CTRL_AUTO_CONV0_EN_SHIFT 0x04
+#define PALMAS_GPADC_AUTO_CTRL_COUNTER_CONV_MASK 0x0F
+#define PALMAS_GPADC_AUTO_CTRL_COUNTER_CONV_SHIFT 0x00
+
+/* Bit definitions for GPADC_STATUS */
+#define PALMAS_GPADC_STATUS_GPADC_AVAILABLE 0x10
+#define PALMAS_GPADC_STATUS_GPADC_AVAILABLE_SHIFT 0x04
+
+/* Bit definitions for GPADC_RT_SELECT */
+#define PALMAS_GPADC_RT_SELECT_RT_CONV_EN 0x80
+#define PALMAS_GPADC_RT_SELECT_RT_CONV_EN_SHIFT 0x07
+#define PALMAS_GPADC_RT_SELECT_RT_CONV0_SEL_MASK 0x0F
+#define PALMAS_GPADC_RT_SELECT_RT_CONV0_SEL_SHIFT 0x00
+
+/* Bit definitions for GPADC_RT_CONV0_LSB */
+#define PALMAS_GPADC_RT_CONV0_LSB_RT_CONV0_LSB_MASK 0xFF
+#define PALMAS_GPADC_RT_CONV0_LSB_RT_CONV0_LSB_SHIFT 0x00
+
+/* Bit definitions for GPADC_RT_CONV0_MSB */
+#define PALMAS_GPADC_RT_CONV0_MSB_RT_CONV0_MSB_MASK 0x0F
+#define PALMAS_GPADC_RT_CONV0_MSB_RT_CONV0_MSB_SHIFT 0x00
+
+/* Bit definitions for GPADC_AUTO_SELECT */
+#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV1_SEL_MASK 0xF0
+#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV1_SEL_SHIFT 0x04
+#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV0_SEL_MASK 0x0F
+#define PALMAS_GPADC_AUTO_SELECT_AUTO_CONV0_SEL_SHIFT 0x00
+
+/* Bit definitions for GPADC_AUTO_CONV0_LSB */
+#define PALMAS_GPADC_AUTO_CONV0_LSB_AUTO_CONV0_LSB_MASK 0xFF
+#define PALMAS_GPADC_AUTO_CONV0_LSB_AUTO_CONV0_LSB_SHIFT 0x00
+
+/* Bit definitions for GPADC_AUTO_CONV0_MSB */
+#define PALMAS_GPADC_AUTO_CONV0_MSB_AUTO_CONV0_MSB_MASK 0x0F
+#define PALMAS_GPADC_AUTO_CONV0_MSB_AUTO_CONV0_MSB_SHIFT 0x00
+
+/* Bit definitions for GPADC_AUTO_CONV1_LSB */
+#define PALMAS_GPADC_AUTO_CONV1_LSB_AUTO_CONV1_LSB_MASK 0xFF
+#define PALMAS_GPADC_AUTO_CONV1_LSB_AUTO_CONV1_LSB_SHIFT 0x00
+
+/* Bit definitions for GPADC_AUTO_CONV1_MSB */
+#define PALMAS_GPADC_AUTO_CONV1_MSB_AUTO_CONV1_MSB_MASK 0x0F
+#define PALMAS_GPADC_AUTO_CONV1_MSB_AUTO_CONV1_MSB_SHIFT 0x00
+
+/* Bit definitions for GPADC_SW_SELECT */
+#define PALMAS_GPADC_SW_SELECT_SW_CONV_EN 0x80
+#define PALMAS_GPADC_SW_SELECT_SW_CONV_EN_SHIFT 0x07
+#define PALMAS_GPADC_SW_SELECT_SW_START_CONV0 0x10
+#define PALMAS_GPADC_SW_SELECT_SW_START_CONV0_SHIFT 0x04
+#define PALMAS_GPADC_SW_SELECT_SW_CONV0_SEL_MASK 0x0F
+#define PALMAS_GPADC_SW_SELECT_SW_CONV0_SEL_SHIFT 0x00
+
+/* Bit definitions for GPADC_SW_CONV0_LSB */
+#define PALMAS_GPADC_SW_CONV0_LSB_SW_CONV0_LSB_MASK 0xFF
+#define PALMAS_GPADC_SW_CONV0_LSB_SW_CONV0_LSB_SHIFT 0x00
+
+/* Bit definitions for GPADC_SW_CONV0_MSB */
+#define PALMAS_GPADC_SW_CONV0_MSB_SW_CONV0_MSB_MASK 0x0F
+#define PALMAS_GPADC_SW_CONV0_MSB_SW_CONV0_MSB_SHIFT 0x00
+
+/* Bit definitions for GPADC_THRES_CONV0_LSB */
+#define PALMAS_GPADC_THRES_CONV0_LSB_THRES_CONV0_LSB_MASK 0xFF
+#define PALMAS_GPADC_THRES_CONV0_LSB_THRES_CONV0_LSB_SHIFT 0x00
+
+/* Bit definitions for GPADC_THRES_CONV0_MSB */
+#define PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_POL 0x80
+#define PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_POL_SHIFT 0x07
+#define PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_MSB_MASK 0x0F
+#define PALMAS_GPADC_THRES_CONV0_MSB_THRES_CONV0_MSB_SHIFT 0x00
+
+/* Bit definitions for GPADC_THRES_CONV1_LSB */
+#define PALMAS_GPADC_THRES_CONV1_LSB_THRES_CONV1_LSB_MASK 0xFF
+#define PALMAS_GPADC_THRES_CONV1_LSB_THRES_CONV1_LSB_SHIFT 0x00
+
+/* Bit definitions for GPADC_THRES_CONV1_MSB */
+#define PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_POL 0x80
+#define PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_POL_SHIFT 0x07
+#define PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_MSB_MASK 0x0F
+#define PALMAS_GPADC_THRES_CONV1_MSB_THRES_CONV1_MSB_SHIFT 0x00
+
+/* Bit definitions for GPADC_SMPS_ILMONITOR_EN */
+#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_EN 0x20
+#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_EN_SHIFT 0x05
+#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_REXT 0x10
+#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_REXT_SHIFT 0x04
+#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_SEL_MASK 0x0F
+#define PALMAS_GPADC_SMPS_ILMONITOR_EN_SMPS_ILMON_SEL_SHIFT 0x00
+
+/* Bit definitions for GPADC_SMPS_VSEL_MONITORING */
+#define PALMAS_GPADC_SMPS_VSEL_MONITORING_ACTIVE_PHASE 0x80
+#define PALMAS_GPADC_SMPS_VSEL_MONITORING_ACTIVE_PHASE_SHIFT 0x07
+#define PALMAS_GPADC_SMPS_VSEL_MONITORING_SMPS_VSEL_MONITORING_MASK 0x7F
+#define PALMAS_GPADC_SMPS_VSEL_MONITORING_SMPS_VSEL_MONITORING_SHIFT 0x00
+
+/* Registers for function GPADC */
+#define PALMAS_GPADC_TRIM1 0x00
+#define PALMAS_GPADC_TRIM2 0x01
+#define PALMAS_GPADC_TRIM3 0x02
+#define PALMAS_GPADC_TRIM4 0x03
+#define PALMAS_GPADC_TRIM5 0x04
+#define PALMAS_GPADC_TRIM6 0x05
+#define PALMAS_GPADC_TRIM7 0x06
+#define PALMAS_GPADC_TRIM8 0x07
+#define PALMAS_GPADC_TRIM9 0x08
+#define PALMAS_GPADC_TRIM10 0x09
+#define PALMAS_GPADC_TRIM11 0x0A
+#define PALMAS_GPADC_TRIM12 0x0B
+#define PALMAS_GPADC_TRIM13 0x0C
+#define PALMAS_GPADC_TRIM14 0x0D
+#define PALMAS_GPADC_TRIM15 0x0E
+#define PALMAS_GPADC_TRIM16 0x0F
+
+/* TPS659038 regen2_ctrl offset iss different from palmas */
+#define TPS659038_REGEN2_CTRL 0x12
+
+/* TPS65917 Interrupt registers */
+
+/* Registers for function INTERRUPT */
+#define TPS65917_INT1_STATUS 0x00
+#define TPS65917_INT1_MASK 0x01
+#define TPS65917_INT1_LINE_STATE 0x02
+#define TPS65917_INT2_STATUS 0x05
+#define TPS65917_INT2_MASK 0x06
+#define TPS65917_INT2_LINE_STATE 0x07
+#define TPS65917_INT3_STATUS 0x0A
+#define TPS65917_INT3_MASK 0x0B
+#define TPS65917_INT3_LINE_STATE 0x0C
+#define TPS65917_INT4_STATUS 0x0F
+#define TPS65917_INT4_MASK 0x10
+#define TPS65917_INT4_LINE_STATE 0x11
+#define TPS65917_INT4_EDGE_DETECT1 0x12
+#define TPS65917_INT4_EDGE_DETECT2 0x13
+#define TPS65917_INT_CTRL 0x14
+
+/* Bit definitions for INT1_STATUS */
+#define TPS65917_INT1_STATUS_VSYS_MON 0x40
+#define TPS65917_INT1_STATUS_VSYS_MON_SHIFT 0x06
+#define TPS65917_INT1_STATUS_HOTDIE 0x20
+#define TPS65917_INT1_STATUS_HOTDIE_SHIFT 0x05
+#define TPS65917_INT1_STATUS_PWRDOWN 0x10
+#define TPS65917_INT1_STATUS_PWRDOWN_SHIFT 0x04
+#define TPS65917_INT1_STATUS_LONG_PRESS_KEY 0x04
+#define TPS65917_INT1_STATUS_LONG_PRESS_KEY_SHIFT 0x02
+#define TPS65917_INT1_STATUS_PWRON 0x02
+#define TPS65917_INT1_STATUS_PWRON_SHIFT 0x01
+
+/* Bit definitions for INT1_MASK */
+#define TPS65917_INT1_MASK_VSYS_MON 0x40
+#define TPS65917_INT1_MASK_VSYS_MON_SHIFT 0x06
+#define TPS65917_INT1_MASK_HOTDIE 0x20
+#define TPS65917_INT1_MASK_HOTDIE_SHIFT 0x05
+#define TPS65917_INT1_MASK_PWRDOWN 0x10
+#define TPS65917_INT1_MASK_PWRDOWN_SHIFT 0x04
+#define TPS65917_INT1_MASK_LONG_PRESS_KEY 0x04
+#define TPS65917_INT1_MASK_LONG_PRESS_KEY_SHIFT 0x02
+#define TPS65917_INT1_MASK_PWRON 0x02
+#define TPS65917_INT1_MASK_PWRON_SHIFT 0x01
+
+/* Bit definitions for INT1_LINE_STATE */
+#define TPS65917_INT1_LINE_STATE_VSYS_MON 0x40
+#define TPS65917_INT1_LINE_STATE_VSYS_MON_SHIFT 0x06
+#define TPS65917_INT1_LINE_STATE_HOTDIE 0x20
+#define TPS65917_INT1_LINE_STATE_HOTDIE_SHIFT 0x05
+#define TPS65917_INT1_LINE_STATE_PWRDOWN 0x10
+#define TPS65917_INT1_LINE_STATE_PWRDOWN_SHIFT 0x04
+#define TPS65917_INT1_LINE_STATE_LONG_PRESS_KEY 0x04
+#define TPS65917_INT1_LINE_STATE_LONG_PRESS_KEY_SHIFT 0x02
+#define TPS65917_INT1_LINE_STATE_PWRON 0x02
+#define TPS65917_INT1_LINE_STATE_PWRON_SHIFT 0x01
+
+/* Bit definitions for INT2_STATUS */
+#define TPS65917_INT2_STATUS_SHORT 0x40
+#define TPS65917_INT2_STATUS_SHORT_SHIFT 0x06
+#define TPS65917_INT2_STATUS_FSD 0x20
+#define TPS65917_INT2_STATUS_FSD_SHIFT 0x05
+#define TPS65917_INT2_STATUS_RESET_IN 0x10
+#define TPS65917_INT2_STATUS_RESET_IN_SHIFT 0x04
+#define TPS65917_INT2_STATUS_WDT 0x04
+#define TPS65917_INT2_STATUS_WDT_SHIFT 0x02
+#define TPS65917_INT2_STATUS_OTP_ERROR 0x02
+#define TPS65917_INT2_STATUS_OTP_ERROR_SHIFT 0x01
+
+/* Bit definitions for INT2_MASK */
+#define TPS65917_INT2_MASK_SHORT 0x40
+#define TPS65917_INT2_MASK_SHORT_SHIFT 0x06
+#define TPS65917_INT2_MASK_FSD 0x20
+#define TPS65917_INT2_MASK_FSD_SHIFT 0x05
+#define TPS65917_INT2_MASK_RESET_IN 0x10
+#define TPS65917_INT2_MASK_RESET_IN_SHIFT 0x04
+#define TPS65917_INT2_MASK_WDT 0x04
+#define TPS65917_INT2_MASK_WDT_SHIFT 0x02
+#define TPS65917_INT2_MASK_OTP_ERROR_TIMER 0x02
+#define TPS65917_INT2_MASK_OTP_ERROR_SHIFT 0x01
+
+/* Bit definitions for INT2_LINE_STATE */
+#define TPS65917_INT2_LINE_STATE_SHORT 0x40
+#define TPS65917_INT2_LINE_STATE_SHORT_SHIFT 0x06
+#define TPS65917_INT2_LINE_STATE_FSD 0x20
+#define TPS65917_INT2_LINE_STATE_FSD_SHIFT 0x05
+#define TPS65917_INT2_LINE_STATE_RESET_IN 0x10
+#define TPS65917_INT2_LINE_STATE_RESET_IN_SHIFT 0x04
+#define TPS65917_INT2_LINE_STATE_WDT 0x04
+#define TPS65917_INT2_LINE_STATE_WDT_SHIFT 0x02
+#define TPS65917_INT2_LINE_STATE_OTP_ERROR 0x02
+#define TPS65917_INT2_LINE_STATE_OTP_ERROR_SHIFT 0x01
+
+/* Bit definitions for INT3_STATUS */
+#define TPS65917_INT3_STATUS_VBUS 0x80
+#define TPS65917_INT3_STATUS_VBUS_SHIFT 0x07
+#define TPS65917_INT3_STATUS_GPADC_EOC_SW 0x04
+#define TPS65917_INT3_STATUS_GPADC_EOC_SW_SHIFT 0x02
+#define TPS65917_INT3_STATUS_GPADC_AUTO_1 0x02
+#define TPS65917_INT3_STATUS_GPADC_AUTO_1_SHIFT 0x01
+#define TPS65917_INT3_STATUS_GPADC_AUTO_0 0x01
+#define TPS65917_INT3_STATUS_GPADC_AUTO_0_SHIFT 0x00
+
+/* Bit definitions for INT3_MASK */
+#define TPS65917_INT3_MASK_VBUS 0x80
+#define TPS65917_INT3_MASK_VBUS_SHIFT 0x07
+#define TPS65917_INT3_MASK_GPADC_EOC_SW 0x04
+#define TPS65917_INT3_MASK_GPADC_EOC_SW_SHIFT 0x02
+#define TPS65917_INT3_MASK_GPADC_AUTO_1 0x02
+#define TPS65917_INT3_MASK_GPADC_AUTO_1_SHIFT 0x01
+#define TPS65917_INT3_MASK_GPADC_AUTO_0 0x01
+#define TPS65917_INT3_MASK_GPADC_AUTO_0_SHIFT 0x00
+
+/* Bit definitions for INT3_LINE_STATE */
+#define TPS65917_INT3_LINE_STATE_VBUS 0x80
+#define TPS65917_INT3_LINE_STATE_VBUS_SHIFT 0x07
+#define TPS65917_INT3_LINE_STATE_GPADC_EOC_SW 0x04
+#define TPS65917_INT3_LINE_STATE_GPADC_EOC_SW_SHIFT 0x02
+#define TPS65917_INT3_LINE_STATE_GPADC_AUTO_1 0x02
+#define TPS65917_INT3_LINE_STATE_GPADC_AUTO_1_SHIFT 0x01
+#define TPS65917_INT3_LINE_STATE_GPADC_AUTO_0 0x01
+#define TPS65917_INT3_LINE_STATE_GPADC_AUTO_0_SHIFT 0x00
+
+/* Bit definitions for INT4_STATUS */
+#define TPS65917_INT4_STATUS_GPIO_6 0x40
+#define TPS65917_INT4_STATUS_GPIO_6_SHIFT 0x06
+#define TPS65917_INT4_STATUS_GPIO_5 0x20
+#define TPS65917_INT4_STATUS_GPIO_5_SHIFT 0x05
+#define TPS65917_INT4_STATUS_GPIO_4 0x10
+#define TPS65917_INT4_STATUS_GPIO_4_SHIFT 0x04
+#define TPS65917_INT4_STATUS_GPIO_3 0x08
+#define TPS65917_INT4_STATUS_GPIO_3_SHIFT 0x03
+#define TPS65917_INT4_STATUS_GPIO_2 0x04
+#define TPS65917_INT4_STATUS_GPIO_2_SHIFT 0x02
+#define TPS65917_INT4_STATUS_GPIO_1 0x02
+#define TPS65917_INT4_STATUS_GPIO_1_SHIFT 0x01
+#define TPS65917_INT4_STATUS_GPIO_0 0x01
+#define TPS65917_INT4_STATUS_GPIO_0_SHIFT 0x00
+
+/* Bit definitions for INT4_MASK */
+#define TPS65917_INT4_MASK_GPIO_6 0x40
+#define TPS65917_INT4_MASK_GPIO_6_SHIFT 0x06
+#define TPS65917_INT4_MASK_GPIO_5 0x20
+#define TPS65917_INT4_MASK_GPIO_5_SHIFT 0x05
+#define TPS65917_INT4_MASK_GPIO_4 0x10
+#define TPS65917_INT4_MASK_GPIO_4_SHIFT 0x04
+#define TPS65917_INT4_MASK_GPIO_3 0x08
+#define TPS65917_INT4_MASK_GPIO_3_SHIFT 0x03
+#define TPS65917_INT4_MASK_GPIO_2 0x04
+#define TPS65917_INT4_MASK_GPIO_2_SHIFT 0x02
+#define TPS65917_INT4_MASK_GPIO_1 0x02
+#define TPS65917_INT4_MASK_GPIO_1_SHIFT 0x01
+#define TPS65917_INT4_MASK_GPIO_0 0x01
+#define TPS65917_INT4_MASK_GPIO_0_SHIFT 0x00
+
+/* Bit definitions for INT4_LINE_STATE */
+#define TPS65917_INT4_LINE_STATE_GPIO_6 0x40
+#define TPS65917_INT4_LINE_STATE_GPIO_6_SHIFT 0x06
+#define TPS65917_INT4_LINE_STATE_GPIO_5 0x20
+#define TPS65917_INT4_LINE_STATE_GPIO_5_SHIFT 0x05
+#define TPS65917_INT4_LINE_STATE_GPIO_4 0x10
+#define TPS65917_INT4_LINE_STATE_GPIO_4_SHIFT 0x04
+#define TPS65917_INT4_LINE_STATE_GPIO_3 0x08
+#define TPS65917_INT4_LINE_STATE_GPIO_3_SHIFT 0x03
+#define TPS65917_INT4_LINE_STATE_GPIO_2 0x04
+#define TPS65917_INT4_LINE_STATE_GPIO_2_SHIFT 0x02
+#define TPS65917_INT4_LINE_STATE_GPIO_1 0x02
+#define TPS65917_INT4_LINE_STATE_GPIO_1_SHIFT 0x01
+#define TPS65917_INT4_LINE_STATE_GPIO_0 0x01
+#define TPS65917_INT4_LINE_STATE_GPIO_0_SHIFT 0x00
+
+/* Bit definitions for INT4_EDGE_DETECT1 */
+#define TPS65917_INT4_EDGE_DETECT1_GPIO_3_RISING 0x80
+#define TPS65917_INT4_EDGE_DETECT1_GPIO_3_RISING_SHIFT 0x07
+#define TPS65917_INT4_EDGE_DETECT1_GPIO_3_FALLING 0x40
+#define TPS65917_INT4_EDGE_DETECT1_GPIO_3_FALLING_SHIFT 0x06
+#define TPS65917_INT4_EDGE_DETECT1_GPIO_2_RISING 0x20
+#define TPS65917_INT4_EDGE_DETECT1_GPIO_2_RISING_SHIFT 0x05
+#define TPS65917_INT4_EDGE_DETECT1_GPIO_2_FALLING 0x10
+#define TPS65917_INT4_EDGE_DETECT1_GPIO_2_FALLING_SHIFT 0x04
+#define TPS65917_INT4_EDGE_DETECT1_GPIO_1_RISING 0x08
+#define TPS65917_INT4_EDGE_DETECT1_GPIO_1_RISING_SHIFT 0x03
+#define TPS65917_INT4_EDGE_DETECT1_GPIO_1_FALLING 0x04
+#define TPS65917_INT4_EDGE_DETECT1_GPIO_1_FALLING_SHIFT 0x02
+#define TPS65917_INT4_EDGE_DETECT1_GPIO_0_RISING 0x02
+#define TPS65917_INT4_EDGE_DETECT1_GPIO_0_RISING_SHIFT 0x01
+#define TPS65917_INT4_EDGE_DETECT1_GPIO_0_FALLING 0x01
+#define TPS65917_INT4_EDGE_DETECT1_GPIO_0_FALLING_SHIFT 0x00
+
+/* Bit definitions for INT4_EDGE_DETECT2 */
+#define TPS65917_INT4_EDGE_DETECT2_GPIO_6_RISING 0x20
+#define TPS65917_INT4_EDGE_DETECT2_GPIO_6_RISING_SHIFT 0x05
+#define TPS65917_INT4_EDGE_DETECT2_GPIO_6_FALLING 0x10
+#define TPS65917_INT4_EDGE_DETECT2_GPIO_6_FALLING_SHIFT 0x04
+#define TPS65917_INT4_EDGE_DETECT2_GPIO_5_RISING 0x08
+#define TPS65917_INT4_EDGE_DETECT2_GPIO_5_RISING_SHIFT 0x03
+#define TPS65917_INT4_EDGE_DETECT2_GPIO_5_FALLING 0x04
+#define TPS65917_INT4_EDGE_DETECT2_GPIO_5_FALLING_SHIFT 0x02
+#define TPS65917_INT4_EDGE_DETECT2_GPIO_4_RISING 0x02
+#define TPS65917_INT4_EDGE_DETECT2_GPIO_4_RISING_SHIFT 0x01
+#define TPS65917_INT4_EDGE_DETECT2_GPIO_4_FALLING 0x01
+#define TPS65917_INT4_EDGE_DETECT2_GPIO_4_FALLING_SHIFT 0x00
+
+/* Bit definitions for INT_CTRL */
+#define TPS65917_INT_CTRL_INT_PENDING 0x04
+#define TPS65917_INT_CTRL_INT_PENDING_SHIFT 0x02
+#define TPS65917_INT_CTRL_INT_CLEAR 0x01
+#define TPS65917_INT_CTRL_INT_CLEAR_SHIFT 0x00
+
+/* TPS65917 SMPS Registers */
+
+/* Registers for function SMPS */
+#define TPS65917_SMPS1_CTRL 0x00
+#define TPS65917_SMPS1_FORCE 0x02
+#define TPS65917_SMPS1_VOLTAGE 0x03
+#define TPS65917_SMPS2_CTRL 0x04
+#define TPS65917_SMPS2_FORCE 0x06
+#define TPS65917_SMPS2_VOLTAGE 0x07
+#define TPS65917_SMPS3_CTRL 0x0C
+#define TPS65917_SMPS3_FORCE 0x0E
+#define TPS65917_SMPS3_VOLTAGE 0x0F
+#define TPS65917_SMPS4_CTRL 0x10
+#define TPS65917_SMPS4_VOLTAGE 0x13
+#define TPS65917_SMPS5_CTRL 0x18
+#define TPS65917_SMPS5_VOLTAGE 0x1B
+#define TPS65917_SMPS_CTRL 0x24
+#define TPS65917_SMPS_PD_CTRL 0x25
+#define TPS65917_SMPS_THERMAL_EN 0x27
+#define TPS65917_SMPS_THERMAL_STATUS 0x28
+#define TPS65917_SMPS_SHORT_STATUS 0x29
+#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN 0x2A
+#define TPS65917_SMPS_POWERGOOD_MASK1 0x2B
+#define TPS65917_SMPS_POWERGOOD_MASK2 0x2C
+
+/* Bit definitions for SMPS1_CTRL */
+#define TPS65917_SMPS1_CTRL_WR_S 0x80
+#define TPS65917_SMPS1_CTRL_WR_S_SHIFT 0x07
+#define TPS65917_SMPS1_CTRL_ROOF_FLOOR_EN 0x40
+#define TPS65917_SMPS1_CTRL_ROOF_FLOOR_EN_SHIFT 0x06
+#define TPS65917_SMPS1_CTRL_STATUS_MASK 0x30
+#define TPS65917_SMPS1_CTRL_STATUS_SHIFT 0x04
+#define TPS65917_SMPS1_CTRL_MODE_SLEEP_MASK 0x0C
+#define TPS65917_SMPS1_CTRL_MODE_SLEEP_SHIFT 0x02
+#define TPS65917_SMPS1_CTRL_MODE_ACTIVE_MASK 0x03
+#define TPS65917_SMPS1_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for SMPS1_FORCE */
+#define TPS65917_SMPS1_FORCE_CMD 0x80
+#define TPS65917_SMPS1_FORCE_CMD_SHIFT 0x07
+#define TPS65917_SMPS1_FORCE_VSEL_MASK 0x7F
+#define TPS65917_SMPS1_FORCE_VSEL_SHIFT 0x00
+
+/* Bit definitions for SMPS1_VOLTAGE */
+#define TPS65917_SMPS1_VOLTAGE_RANGE 0x80
+#define TPS65917_SMPS1_VOLTAGE_RANGE_SHIFT 0x07
+#define TPS65917_SMPS1_VOLTAGE_VSEL_MASK 0x7F
+#define TPS65917_SMPS1_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for SMPS2_CTRL */
+#define TPS65917_SMPS2_CTRL_WR_S 0x80
+#define TPS65917_SMPS2_CTRL_WR_S_SHIFT 0x07
+#define TPS65917_SMPS2_CTRL_ROOF_FLOOR_EN 0x40
+#define TPS65917_SMPS2_CTRL_ROOF_FLOOR_EN_SHIFT 0x06
+#define TPS65917_SMPS2_CTRL_STATUS_MASK 0x30
+#define TPS65917_SMPS2_CTRL_STATUS_SHIFT 0x04
+#define TPS65917_SMPS2_CTRL_MODE_SLEEP_MASK 0x0C
+#define TPS65917_SMPS2_CTRL_MODE_SLEEP_SHIFT 0x02
+#define TPS65917_SMPS2_CTRL_MODE_ACTIVE_MASK 0x03
+#define TPS65917_SMPS2_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for SMPS2_FORCE */
+#define TPS65917_SMPS2_FORCE_CMD 0x80
+#define TPS65917_SMPS2_FORCE_CMD_SHIFT 0x07
+#define TPS65917_SMPS2_FORCE_VSEL_MASK 0x7F
+#define TPS65917_SMPS2_FORCE_VSEL_SHIFT 0x00
+
+/* Bit definitions for SMPS2_VOLTAGE */
+#define TPS65917_SMPS2_VOLTAGE_RANGE 0x80
+#define TPS65917_SMPS2_VOLTAGE_RANGE_SHIFT 0x07
+#define TPS65917_SMPS2_VOLTAGE_VSEL_MASK 0x7F
+#define TPS65917_SMPS2_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for SMPS3_CTRL */
+#define TPS65917_SMPS3_CTRL_WR_S 0x80
+#define TPS65917_SMPS3_CTRL_WR_S_SHIFT 0x07
+#define TPS65917_SMPS3_CTRL_ROOF_FLOOR_EN 0x40
+#define TPS65917_SMPS3_CTRL_ROOF_FLOOR_EN_SHIFT 0x06
+#define TPS65917_SMPS3_CTRL_STATUS_MASK 0x30
+#define TPS65917_SMPS3_CTRL_STATUS_SHIFT 0x04
+#define TPS65917_SMPS3_CTRL_MODE_SLEEP_MASK 0x0C
+#define TPS65917_SMPS3_CTRL_MODE_SLEEP_SHIFT 0x02
+#define TPS65917_SMPS3_CTRL_MODE_ACTIVE_MASK 0x03
+#define TPS65917_SMPS3_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for SMPS3_FORCE */
+#define TPS65917_SMPS3_FORCE_CMD 0x80
+#define TPS65917_SMPS3_FORCE_CMD_SHIFT 0x07
+#define TPS65917_SMPS3_FORCE_VSEL_MASK 0x7F
+#define TPS65917_SMPS3_FORCE_VSEL_SHIFT 0x00
+
+/* Bit definitions for SMPS3_VOLTAGE */
+#define TPS65917_SMPS3_VOLTAGE_RANGE 0x80
+#define TPS65917_SMPS3_VOLTAGE_RANGE_SHIFT 0x07
+#define TPS65917_SMPS3_VOLTAGE_VSEL_MASK 0x7F
+#define TPS65917_SMPS3_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for SMPS4_CTRL */
+#define TPS65917_SMPS4_CTRL_WR_S 0x80
+#define TPS65917_SMPS4_CTRL_WR_S_SHIFT 0x07
+#define TPS65917_SMPS4_CTRL_ROOF_FLOOR_EN 0x40
+#define TPS65917_SMPS4_CTRL_ROOF_FLOOR_EN_SHIFT 0x06
+#define TPS65917_SMPS4_CTRL_STATUS_MASK 0x30
+#define TPS65917_SMPS4_CTRL_STATUS_SHIFT 0x04
+#define TPS65917_SMPS4_CTRL_MODE_SLEEP_MASK 0x0C
+#define TPS65917_SMPS4_CTRL_MODE_SLEEP_SHIFT 0x02
+#define TPS65917_SMPS4_CTRL_MODE_ACTIVE_MASK 0x03
+#define TPS65917_SMPS4_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for SMPS4_VOLTAGE */
+#define TPS65917_SMPS4_VOLTAGE_RANGE 0x80
+#define TPS65917_SMPS4_VOLTAGE_RANGE_SHIFT 0x07
+#define TPS65917_SMPS4_VOLTAGE_VSEL_MASK 0x7F
+#define TPS65917_SMPS4_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for SMPS5_CTRL */
+#define TPS65917_SMPS5_CTRL_WR_S 0x80
+#define TPS65917_SMPS5_CTRL_WR_S_SHIFT 0x07
+#define TPS65917_SMPS5_CTRL_ROOF_FLOOR_EN 0x40
+#define TPS65917_SMPS5_CTRL_ROOF_FLOOR_EN_SHIFT 0x06
+#define TPS65917_SMPS5_CTRL_STATUS_MASK 0x30
+#define TPS65917_SMPS5_CTRL_STATUS_SHIFT 0x04
+#define TPS65917_SMPS5_CTRL_MODE_SLEEP_MASK 0x0C
+#define TPS65917_SMPS5_CTRL_MODE_SLEEP_SHIFT 0x02
+#define TPS65917_SMPS5_CTRL_MODE_ACTIVE_MASK 0x03
+#define TPS65917_SMPS5_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for SMPS5_VOLTAGE */
+#define TPS65917_SMPS5_VOLTAGE_RANGE 0x80
+#define TPS65917_SMPS5_VOLTAGE_RANGE_SHIFT 0x07
+#define TPS65917_SMPS5_VOLTAGE_VSEL_MASK 0x7F
+#define TPS65917_SMPS5_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for SMPS_CTRL */
+#define TPS65917_SMPS_CTRL_SMPS1_SMPS12_EN 0x10
+#define TPS65917_SMPS_CTRL_SMPS1_SMPS12_EN_SHIFT 0x04
+#define TPS65917_SMPS_CTRL_SMPS12_PHASE_CTRL 0x03
+#define TPS65917_SMPS_CTRL_SMPS12_PHASE_CTRL_SHIFT 0x00
+
+/* Bit definitions for SMPS_PD_CTRL */
+#define TPS65917_SMPS_PD_CTRL_SMPS5 0x40
+#define TPS65917_SMPS_PD_CTRL_SMPS5_SHIFT 0x06
+#define TPS65917_SMPS_PD_CTRL_SMPS4 0x10
+#define TPS65917_SMPS_PD_CTRL_SMPS4_SHIFT 0x04
+#define TPS65917_SMPS_PD_CTRL_SMPS3 0x08
+#define TPS65917_SMPS_PD_CTRL_SMPS3_SHIFT 0x03
+#define TPS65917_SMPS_PD_CTRL_SMPS2 0x02
+#define TPS65917_SMPS_PD_CTRL_SMPS2_SHIFT 0x01
+#define TPS65917_SMPS_PD_CTRL_SMPS1 0x01
+#define TPS65917_SMPS_PD_CTRL_SMPS1_SHIFT 0x00
+
+/* Bit definitions for SMPS_THERMAL_EN */
+#define TPS65917_SMPS_THERMAL_EN_SMPS5 0x40
+#define TPS65917_SMPS_THERMAL_EN_SMPS5_SHIFT 0x06
+#define TPS65917_SMPS_THERMAL_EN_SMPS3 0x08
+#define TPS65917_SMPS_THERMAL_EN_SMPS3_SHIFT 0x03
+#define TPS65917_SMPS_THERMAL_EN_SMPS12 0x01
+#define TPS65917_SMPS_THERMAL_EN_SMPS12_SHIFT 0x00
+
+/* Bit definitions for SMPS_THERMAL_STATUS */
+#define TPS65917_SMPS_THERMAL_STATUS_SMPS5 0x40
+#define TPS65917_SMPS_THERMAL_STATUS_SMPS5_SHIFT 0x06
+#define TPS65917_SMPS_THERMAL_STATUS_SMPS3 0x08
+#define TPS65917_SMPS_THERMAL_STATUS_SMPS3_SHIFT 0x03
+#define TPS65917_SMPS_THERMAL_STATUS_SMPS12 0x01
+#define TPS65917_SMPS_THERMAL_STATUS_SMPS12_SHIFT 0x00
+
+/* Bit definitions for SMPS_SHORT_STATUS */
+#define TPS65917_SMPS_SHORT_STATUS_SMPS5 0x40
+#define TPS65917_SMPS_SHORT_STATUS_SMPS5_SHIFT 0x06
+#define TPS65917_SMPS_SHORT_STATUS_SMPS4 0x10
+#define TPS65917_SMPS_SHORT_STATUS_SMPS4_SHIFT 0x04
+#define TPS65917_SMPS_SHORT_STATUS_SMPS3 0x08
+#define TPS65917_SMPS_SHORT_STATUS_SMPS3_SHIFT 0x03
+#define TPS65917_SMPS_SHORT_STATUS_SMPS2 0x02
+#define TPS65917_SMPS_SHORT_STATUS_SMPS2_SHIFT 0x01
+#define TPS65917_SMPS_SHORT_STATUS_SMPS1 0x01
+#define TPS65917_SMPS_SHORT_STATUS_SMPS1_SHIFT 0x00
+
+/* Bit definitions for SMPS_NEGATIVE_CURRENT_LIMIT_EN */
+#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS5 0x40
+#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS5_SHIFT 0x06
+#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS4 0x10
+#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS4_SHIFT 0x04
+#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS3 0x08
+#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS3_SHIFT 0x03
+#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS2 0x02
+#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS2_SHIFT 0x01
+#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS1 0x01
+#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS1_SHIFT 0x00
+
+/* Bit definitions for SMPS_POWERGOOD_MASK1 */
+#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS5 0x40
+#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS5_SHIFT 0x06
+#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS4 0x10
+#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS4_SHIFT 0x04
+#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS3 0x08
+#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS3_SHIFT 0x03
+#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS2 0x02
+#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS2_SHIFT 0x01
+#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS1 0x01
+#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS1_SHIFT 0x00
+
+/* Bit definitions for SMPS_POWERGOOD_MASK2 */
+#define TPS65917_SMPS_POWERGOOD_MASK2_POWERGOOD_TYPE_SELECT 0x80
+#define TPS65917_SMPS_POWERGOOD_MASK2_POWERGOOD_TYPE_SELECT_SHIFT 0x07
+#define TPS65917_SMPS_POWERGOOD_MASK2_OVC_ALARM_SHIFT 0x10
+#define TPS65917_SMPS_POWERGOOD_MASK2_OVC_ALARM 0x04
+
+/* Bit definitions for SMPS_PLL_CTRL */
+
+#define TPS65917_SMPS_PLL_CTRL_PLL_EN_PLL_BYPASS_SHIFT 0x08
+#define TPS65917_SMPS_PLL_CTRL_PLL_PLL_EN_BYPASS 0x03
+#define TPS65917_SMPS_PLL_CTRL_PLL_PLL_BYPASS_CLK_SHIFT 0x04
+#define TPS65917_SMPS_PLL_CTRL_PLL_PLL_BYPASS_CLK 0x02
+
+/* Registers for function LDO */
+#define TPS65917_LDO1_CTRL 0x00
+#define TPS65917_LDO1_VOLTAGE 0x01
+#define TPS65917_LDO2_CTRL 0x02
+#define TPS65917_LDO2_VOLTAGE 0x03
+#define TPS65917_LDO3_CTRL 0x04
+#define TPS65917_LDO3_VOLTAGE 0x05
+#define TPS65917_LDO4_CTRL 0x0E
+#define TPS65917_LDO4_VOLTAGE 0x0F
+#define TPS65917_LDO5_CTRL 0x12
+#define TPS65917_LDO5_VOLTAGE 0x13
+#define TPS65917_LDO_PD_CTRL1 0x1B
+#define TPS65917_LDO_PD_CTRL2 0x1C
+#define TPS65917_LDO_SHORT_STATUS1 0x1D
+#define TPS65917_LDO_SHORT_STATUS2 0x1E
+#define TPS65917_LDO_PD_CTRL3 0x2D
+#define TPS65917_LDO_SHORT_STATUS3 0x2E
+
+/* Bit definitions for LDO1_CTRL */
+#define TPS65917_LDO1_CTRL_WR_S 0x80
+#define TPS65917_LDO1_CTRL_WR_S_SHIFT 0x07
+#define TPS65917_LDO1_CTRL_BYPASS_EN 0x40
+#define TPS65917_LDO1_CTRL_BYPASS_EN_SHIFT 0x06
+#define TPS65917_LDO1_CTRL_STATUS 0x10
+#define TPS65917_LDO1_CTRL_STATUS_SHIFT 0x04
+#define TPS65917_LDO1_CTRL_MODE_SLEEP 0x04
+#define TPS65917_LDO1_CTRL_MODE_SLEEP_SHIFT 0x02
+#define TPS65917_LDO1_CTRL_MODE_ACTIVE 0x01
+#define TPS65917_LDO1_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for LDO1_VOLTAGE */
+#define TPS65917_LDO1_VOLTAGE_VSEL_MASK 0x2F
+#define TPS65917_LDO1_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for LDO2_CTRL */
+#define TPS65917_LDO2_CTRL_WR_S 0x80
+#define TPS65917_LDO2_CTRL_WR_S_SHIFT 0x07
+#define TPS65917_LDO2_CTRL_BYPASS_EN 0x40
+#define TPS65917_LDO2_CTRL_BYPASS_EN_SHIFT 0x06
+#define TPS65917_LDO2_CTRL_STATUS 0x10
+#define TPS65917_LDO2_CTRL_STATUS_SHIFT 0x04
+#define TPS65917_LDO2_CTRL_MODE_SLEEP 0x04
+#define TPS65917_LDO2_CTRL_MODE_SLEEP_SHIFT 0x02
+#define TPS65917_LDO2_CTRL_MODE_ACTIVE 0x01
+#define TPS65917_LDO2_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for LDO2_VOLTAGE */
+#define TPS65917_LDO2_VOLTAGE_VSEL_MASK 0x2F
+#define TPS65917_LDO2_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for LDO3_CTRL */
+#define TPS65917_LDO3_CTRL_WR_S 0x80
+#define TPS65917_LDO3_CTRL_WR_S_SHIFT 0x07
+#define TPS65917_LDO3_CTRL_STATUS 0x10
+#define TPS65917_LDO3_CTRL_STATUS_SHIFT 0x04
+#define TPS65917_LDO3_CTRL_MODE_SLEEP 0x04
+#define TPS65917_LDO3_CTRL_MODE_SLEEP_SHIFT 0x02
+#define TPS65917_LDO3_CTRL_MODE_ACTIVE 0x01
+#define TPS65917_LDO3_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for LDO3_VOLTAGE */
+#define TPS65917_LDO3_VOLTAGE_VSEL_MASK 0x2F
+#define TPS65917_LDO3_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for LDO4_CTRL */
+#define TPS65917_LDO4_CTRL_WR_S 0x80
+#define TPS65917_LDO4_CTRL_WR_S_SHIFT 0x07
+#define TPS65917_LDO4_CTRL_STATUS 0x10
+#define TPS65917_LDO4_CTRL_STATUS_SHIFT 0x04
+#define TPS65917_LDO4_CTRL_MODE_SLEEP 0x04
+#define TPS65917_LDO4_CTRL_MODE_SLEEP_SHIFT 0x02
+#define TPS65917_LDO4_CTRL_MODE_ACTIVE 0x01
+#define TPS65917_LDO4_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for LDO4_VOLTAGE */
+#define TPS65917_LDO4_VOLTAGE_VSEL_MASK 0x2F
+#define TPS65917_LDO4_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for LDO5_CTRL */
+#define TPS65917_LDO5_CTRL_WR_S 0x80
+#define TPS65917_LDO5_CTRL_WR_S_SHIFT 0x07
+#define TPS65917_LDO5_CTRL_STATUS 0x10
+#define TPS65917_LDO5_CTRL_STATUS_SHIFT 0x04
+#define TPS65917_LDO5_CTRL_MODE_SLEEP 0x04
+#define TPS65917_LDO5_CTRL_MODE_SLEEP_SHIFT 0x02
+#define TPS65917_LDO5_CTRL_MODE_ACTIVE 0x01
+#define TPS65917_LDO5_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for LDO5_VOLTAGE */
+#define TPS65917_LDO5_VOLTAGE_VSEL_MASK 0x2F
+#define TPS65917_LDO5_VOLTAGE_VSEL_SHIFT 0x00
+
+/* Bit definitions for LDO_PD_CTRL1 */
+#define TPS65917_LDO_PD_CTRL1_LDO4 0x80
+#define TPS65917_LDO_PD_CTRL1_LDO4_SHIFT 0x07
+#define TPS65917_LDO_PD_CTRL1_LDO2 0x02
+#define TPS65917_LDO_PD_CTRL1_LDO2_SHIFT 0x01
+#define TPS65917_LDO_PD_CTRL1_LDO1 0x01
+#define TPS65917_LDO_PD_CTRL1_LDO1_SHIFT 0x00
+
+/* Bit definitions for LDO_PD_CTRL2 */
+#define TPS65917_LDO_PD_CTRL2_LDO3 0x04
+#define TPS65917_LDO_PD_CTRL2_LDO3_SHIFT 0x02
+#define TPS65917_LDO_PD_CTRL2_LDO5 0x02
+#define TPS65917_LDO_PD_CTRL2_LDO5_SHIFT 0x01
+
+/* Bit definitions for LDO_PD_CTRL3 */
+#define TPS65917_LDO_PD_CTRL2_LDOVANA 0x80
+#define TPS65917_LDO_PD_CTRL2_LDOVANA_SHIFT 0x07
+
+/* Bit definitions for LDO_SHORT_STATUS1 */
+#define TPS65917_LDO_SHORT_STATUS1_LDO4 0x80
+#define TPS65917_LDO_SHORT_STATUS1_LDO4_SHIFT 0x07
+#define TPS65917_LDO_SHORT_STATUS1_LDO2 0x02
+#define TPS65917_LDO_SHORT_STATUS1_LDO2_SHIFT 0x01
+#define TPS65917_LDO_SHORT_STATUS1_LDO1 0x01
+#define TPS65917_LDO_SHORT_STATUS1_LDO1_SHIFT 0x00
+
+/* Bit definitions for LDO_SHORT_STATUS2 */
+#define TPS65917_LDO_SHORT_STATUS2_LDO3 0x04
+#define TPS65917_LDO_SHORT_STATUS2_LDO3_SHIFT 0x02
+#define TPS65917_LDO_SHORT_STATUS2_LDO5 0x02
+#define TPS65917_LDO_SHORT_STATUS2_LDO5_SHIFT 0x01
+
+/* Bit definitions for LDO_SHORT_STATUS2 */
+#define TPS65917_LDO_SHORT_STATUS2_LDOVANA 0x80
+#define TPS65917_LDO_SHORT_STATUS2_LDOVANA_SHIFT 0x07
+
+/* Bit definitions for REGEN1_CTRL */
+#define TPS65917_REGEN1_CTRL_STATUS 0x10
+#define TPS65917_REGEN1_CTRL_STATUS_SHIFT 0x04
+#define TPS65917_REGEN1_CTRL_MODE_SLEEP 0x04
+#define TPS65917_REGEN1_CTRL_MODE_SLEEP_SHIFT 0x02
+#define TPS65917_REGEN1_CTRL_MODE_ACTIVE 0x01
+#define TPS65917_REGEN1_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for PLLEN_CTRL */
+#define TPS65917_PLLEN_CTRL_STATUS 0x10
+#define TPS65917_PLLEN_CTRL_STATUS_SHIFT 0x04
+#define TPS65917_PLLEN_CTRL_MODE_SLEEP 0x04
+#define TPS65917_PLLEN_CTRL_MODE_SLEEP_SHIFT 0x02
+#define TPS65917_PLLEN_CTRL_MODE_ACTIVE 0x01
+#define TPS65917_PLLEN_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for REGEN2_CTRL */
+#define TPS65917_REGEN2_CTRL_STATUS 0x10
+#define TPS65917_REGEN2_CTRL_STATUS_SHIFT 0x04
+#define TPS65917_REGEN2_CTRL_MODE_SLEEP 0x04
+#define TPS65917_REGEN2_CTRL_MODE_SLEEP_SHIFT 0x02
+#define TPS65917_REGEN2_CTRL_MODE_ACTIVE 0x01
+#define TPS65917_REGEN2_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Bit definitions for NSLEEP_RES_ASSIGN */
+#define TPS65917_NSLEEP_RES_ASSIGN_PLL_EN 0x08
+#define TPS65917_NSLEEP_RES_ASSIGN_PLL_EN_SHIFT 0x03
+#define TPS65917_NSLEEP_RES_ASSIGN_REGEN3 0x04
+#define TPS65917_NSLEEP_RES_ASSIGN_REGEN3_SHIFT 0x02
+#define TPS65917_NSLEEP_RES_ASSIGN_REGEN2 0x02
+#define TPS65917_NSLEEP_RES_ASSIGN_REGEN2_SHIFT 0x01
+#define TPS65917_NSLEEP_RES_ASSIGN_REGEN1 0x01
+#define TPS65917_NSLEEP_RES_ASSIGN_REGEN1_SHIFT 0x00
+
+/* Bit definitions for NSLEEP_SMPS_ASSIGN */
+#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS5 0x40
+#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS5_SHIFT 0x06
+#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS4 0x10
+#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS4_SHIFT 0x04
+#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS3 0x08
+#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS3_SHIFT 0x03
+#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS2 0x02
+#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS2_SHIFT 0x01
+#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS1 0x01
+#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS1_SHIFT 0x00
+
+/* Bit definitions for NSLEEP_LDO_ASSIGN1 */
+#define TPS65917_NSLEEP_LDO_ASSIGN1_LDO4 0x80
+#define TPS65917_NSLEEP_LDO_ASSIGN1_LDO4_SHIFT 0x07
+#define TPS65917_NSLEEP_LDO_ASSIGN1_LDO2 0x02
+#define TPS65917_NSLEEP_LDO_ASSIGN1_LDO2_SHIFT 0x01
+#define TPS65917_NSLEEP_LDO_ASSIGN1_LDO1 0x01
+#define TPS65917_NSLEEP_LDO_ASSIGN1_LDO1_SHIFT 0x00
+
+/* Bit definitions for NSLEEP_LDO_ASSIGN2 */
+#define TPS65917_NSLEEP_LDO_ASSIGN2_LDO3 0x04
+#define TPS65917_NSLEEP_LDO_ASSIGN2_LDO3_SHIFT 0x02
+#define TPS65917_NSLEEP_LDO_ASSIGN2_LDO5 0x02
+#define TPS65917_NSLEEP_LDO_ASSIGN2_LDO5_SHIFT 0x01
+
+/* Bit definitions for ENABLE1_RES_ASSIGN */
+#define TPS65917_ENABLE1_RES_ASSIGN_PLLEN 0x08
+#define TPS65917_ENABLE1_RES_ASSIGN_PLLEN_SHIFT 0x03
+#define TPS65917_ENABLE1_RES_ASSIGN_REGEN3 0x04
+#define TPS65917_ENABLE1_RES_ASSIGN_REGEN3_SHIFT 0x02
+#define TPS65917_ENABLE1_RES_ASSIGN_REGEN2 0x02
+#define TPS65917_ENABLE1_RES_ASSIGN_REGEN2_SHIFT 0x01
+#define TPS65917_ENABLE1_RES_ASSIGN_REGEN1 0x01
+#define TPS65917_ENABLE1_RES_ASSIGN_REGEN1_SHIFT 0x00
+
+/* Bit definitions for ENABLE1_SMPS_ASSIGN */
+#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS5 0x40
+#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS5_SHIFT 0x06
+#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS4 0x10
+#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS4_SHIFT 0x04
+#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS3 0x08
+#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS3_SHIFT 0x03
+#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS2 0x02
+#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS2_SHIFT 0x01
+#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS1 0x01
+#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS1_SHIFT 0x00
+
+/* Bit definitions for ENABLE1_LDO_ASSIGN1 */
+#define TPS65917_ENABLE1_LDO_ASSIGN1_LDO4 0x80
+#define TPS65917_ENABLE1_LDO_ASSIGN1_LDO4_SHIFT 0x07
+#define TPS65917_ENABLE1_LDO_ASSIGN1_LDO2 0x02
+#define TPS65917_ENABLE1_LDO_ASSIGN1_LDO2_SHIFT 0x01
+#define TPS65917_ENABLE1_LDO_ASSIGN1_LDO1 0x01
+#define TPS65917_ENABLE1_LDO_ASSIGN1_LDO1_SHIFT 0x00
+
+/* Bit definitions for ENABLE1_LDO_ASSIGN2 */
+#define TPS65917_ENABLE1_LDO_ASSIGN2_LDO3 0x04
+#define TPS65917_ENABLE1_LDO_ASSIGN2_LDO3_SHIFT 0x02
+#define TPS65917_ENABLE1_LDO_ASSIGN2_LDO5 0x02
+#define TPS65917_ENABLE1_LDO_ASSIGN2_LDO5_SHIFT 0x01
+
+/* Bit definitions for ENABLE2_RES_ASSIGN */
+#define TPS65917_ENABLE2_RES_ASSIGN_PLLEN 0x08
+#define TPS65917_ENABLE2_RES_ASSIGN_PLLEN_SHIFT 0x03
+#define TPS65917_ENABLE2_RES_ASSIGN_REGEN3 0x04
+#define TPS65917_ENABLE2_RES_ASSIGN_REGEN3_SHIFT 0x02
+#define TPS65917_ENABLE2_RES_ASSIGN_REGEN2 0x02
+#define TPS65917_ENABLE2_RES_ASSIGN_REGEN2_SHIFT 0x01
+#define TPS65917_ENABLE2_RES_ASSIGN_REGEN1 0x01
+#define TPS65917_ENABLE2_RES_ASSIGN_REGEN1_SHIFT 0x00
+
+/* Bit definitions for ENABLE2_SMPS_ASSIGN */
+#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS5 0x40
+#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS5_SHIFT 0x06
+#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS4 0x10
+#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS4_SHIFT 0x04
+#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS3 0x08
+#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS3_SHIFT 0x03
+#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS2 0x02
+#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS2_SHIFT 0x01
+#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS1 0x01
+#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS1_SHIFT 0x00
+
+/* Bit definitions for ENABLE2_LDO_ASSIGN1 */
+#define TPS65917_ENABLE2_LDO_ASSIGN1_LDO4 0x80
+#define TPS65917_ENABLE2_LDO_ASSIGN1_LDO4_SHIFT 0x07
+#define TPS65917_ENABLE2_LDO_ASSIGN1_LDO2 0x02
+#define TPS65917_ENABLE2_LDO_ASSIGN1_LDO2_SHIFT 0x01
+#define TPS65917_ENABLE2_LDO_ASSIGN1_LDO1 0x01
+#define TPS65917_ENABLE2_LDO_ASSIGN1_LDO1_SHIFT 0x00
+
+/* Bit definitions for ENABLE2_LDO_ASSIGN2 */
+#define TPS65917_ENABLE2_LDO_ASSIGN2_LDO3 0x04
+#define TPS65917_ENABLE2_LDO_ASSIGN2_LDO3_SHIFT 0x02
+#define TPS65917_ENABLE2_LDO_ASSIGN2_LDO5 0x02
+#define TPS65917_ENABLE2_LDO_ASSIGN2_LDO5_SHIFT 0x01
+
+/* Bit definitions for REGEN3_CTRL */
+#define TPS65917_REGEN3_CTRL_STATUS 0x10
+#define TPS65917_REGEN3_CTRL_STATUS_SHIFT 0x04
+#define TPS65917_REGEN3_CTRL_MODE_SLEEP 0x04
+#define TPS65917_REGEN3_CTRL_MODE_SLEEP_SHIFT 0x02
+#define TPS65917_REGEN3_CTRL_MODE_ACTIVE 0x01
+#define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00
+
+/* Registers for function RESOURCE */
+#define TPS65917_REGEN1_CTRL 0x2
+#define TPS65917_PLLEN_CTRL 0x3
+#define TPS65917_NSLEEP_RES_ASSIGN 0x6
+#define TPS65917_NSLEEP_SMPS_ASSIGN 0x7
+#define TPS65917_NSLEEP_LDO_ASSIGN1 0x8
+#define TPS65917_NSLEEP_LDO_ASSIGN2 0x9
+#define TPS65917_ENABLE1_RES_ASSIGN 0xA
+#define TPS65917_ENABLE1_SMPS_ASSIGN 0xB
+#define TPS65917_ENABLE1_LDO_ASSIGN1 0xC
+#define TPS65917_ENABLE1_LDO_ASSIGN2 0xD
+#define TPS65917_ENABLE2_RES_ASSIGN 0xE
+#define TPS65917_ENABLE2_SMPS_ASSIGN 0xF
+#define TPS65917_ENABLE2_LDO_ASSIGN1 0x10
+#define TPS65917_ENABLE2_LDO_ASSIGN2 0x11
+#define TPS65917_REGEN2_CTRL 0x12
+#define TPS65917_REGEN3_CTRL 0x13
+
+static inline int palmas_read(struct palmas *palmas, unsigned int base,
+ unsigned int reg, unsigned int *val)
+{
+ unsigned int addr = PALMAS_BASE_TO_REG(base, reg);
+ int slave_id = PALMAS_BASE_TO_SLAVE(base);
+
+ return regmap_read(palmas->regmap[slave_id], addr, val);
+}
+
+static inline int palmas_write(struct palmas *palmas, unsigned int base,
+ unsigned int reg, unsigned int value)
+{
+ unsigned int addr = PALMAS_BASE_TO_REG(base, reg);
+ int slave_id = PALMAS_BASE_TO_SLAVE(base);
+
+ return regmap_write(palmas->regmap[slave_id], addr, value);
+}
+
+static inline int palmas_bulk_write(struct palmas *palmas, unsigned int base,
+ unsigned int reg, const void *val, size_t val_count)
+{
+ unsigned int addr = PALMAS_BASE_TO_REG(base, reg);
+ int slave_id = PALMAS_BASE_TO_SLAVE(base);
+
+ return regmap_bulk_write(palmas->regmap[slave_id], addr,
+ val, val_count);
+}
+
+static inline int palmas_bulk_read(struct palmas *palmas, unsigned int base,
+ unsigned int reg, void *val, size_t val_count)
+{
+ unsigned int addr = PALMAS_BASE_TO_REG(base, reg);
+ int slave_id = PALMAS_BASE_TO_SLAVE(base);
+
+ return regmap_bulk_read(palmas->regmap[slave_id], addr,
+ val, val_count);
+}
+
+static inline int palmas_update_bits(struct palmas *palmas, unsigned int base,
+ unsigned int reg, unsigned int mask, unsigned int val)
+{
+ unsigned int addr = PALMAS_BASE_TO_REG(base, reg);
+ int slave_id = PALMAS_BASE_TO_SLAVE(base);
+
+ return regmap_update_bits(palmas->regmap[slave_id], addr, mask, val);
+}
+
+static inline int palmas_irq_get_virq(struct palmas *palmas, int irq)
+{
+ return regmap_irq_get_virq(palmas->irq_data, irq);
+}
+
+
+int palmas_ext_control_req_config(struct palmas *palmas,
+ enum palmas_external_requestor_id ext_control_req_id,
+ int ext_ctrl, bool enable);
+
+#endif /* __LINUX_MFD_PALMAS_H */
diff --git a/include/linux/mfd/pcf50633/adc.h b/include/linux/mfd/pcf50633/adc.h
new file mode 100644
index 000000000..b35e62801
--- /dev/null
+++ b/include/linux/mfd/pcf50633/adc.h
@@ -0,0 +1,73 @@
+/*
+ * adc.h -- Driver for NXP PCF50633 ADC
+ *
+ * (C) 2006-2008 by Openmoko, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_MFD_PCF50633_ADC_H
+#define __LINUX_MFD_PCF50633_ADC_H
+
+#include <linux/mfd/pcf50633/core.h>
+#include <linux/platform_device.h>
+
+/* ADC Registers */
+#define PCF50633_REG_ADCC3 0x52
+#define PCF50633_REG_ADCC2 0x53
+#define PCF50633_REG_ADCC1 0x54
+#define PCF50633_REG_ADCS1 0x55
+#define PCF50633_REG_ADCS2 0x56
+#define PCF50633_REG_ADCS3 0x57
+
+#define PCF50633_ADCC1_ADCSTART 0x01
+#define PCF50633_ADCC1_RES_8BIT 0x02
+#define PCF50633_ADCC1_RES_10BIT 0x00
+#define PCF50633_ADCC1_AVERAGE_NO 0x00
+#define PCF50633_ADCC1_AVERAGE_4 0x04
+#define PCF50633_ADCC1_AVERAGE_8 0x08
+#define PCF50633_ADCC1_AVERAGE_16 0x0c
+#define PCF50633_ADCC1_MUX_BATSNS_RES 0x00
+#define PCF50633_ADCC1_MUX_BATSNS_SUBTR 0x10
+#define PCF50633_ADCC1_MUX_ADCIN2_RES 0x20
+#define PCF50633_ADCC1_MUX_ADCIN2_SUBTR 0x30
+#define PCF50633_ADCC1_MUX_BATTEMP 0x60
+#define PCF50633_ADCC1_MUX_ADCIN1 0x70
+#define PCF50633_ADCC1_AVERAGE_MASK 0x0c
+#define PCF50633_ADCC1_ADCMUX_MASK 0xf0
+
+#define PCF50633_ADCC2_RATIO_NONE 0x00
+#define PCF50633_ADCC2_RATIO_BATTEMP 0x01
+#define PCF50633_ADCC2_RATIO_ADCIN1 0x02
+#define PCF50633_ADCC2_RATIO_BOTH 0x03
+#define PCF50633_ADCC2_RATIOSETTL_100US 0x04
+
+#define PCF50633_ADCC3_ACCSW_EN 0x01
+#define PCF50633_ADCC3_NTCSW_EN 0x04
+#define PCF50633_ADCC3_RES_DIV_TWO 0x10
+#define PCF50633_ADCC3_RES_DIV_THREE 0x00
+
+#define PCF50633_ADCS3_REF_NTCSW 0x00
+#define PCF50633_ADCS3_REF_ACCSW 0x10
+#define PCF50633_ADCS3_REF_2V0 0x20
+#define PCF50633_ADCS3_REF_VISA 0x30
+#define PCF50633_ADCS3_REF_2V0_2 0x70
+#define PCF50633_ADCS3_ADCRDY 0x80
+
+#define PCF50633_ADCS3_ADCDAT1L_MASK 0x03
+#define PCF50633_ADCS3_ADCDAT2L_MASK 0x0c
+#define PCF50633_ADCS3_ADCDAT2L_SHIFT 2
+#define PCF50633_ASCS3_REF_MASK 0x70
+
+extern int
+pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg,
+ void (*callback)(struct pcf50633 *, void *, int),
+ void *callback_param);
+extern int
+pcf50633_adc_sync_read(struct pcf50633 *pcf, int mux, int avg);
+
+#endif /* __LINUX_PCF50633_ADC_H */
diff --git a/include/linux/mfd/pcf50633/backlight.h b/include/linux/mfd/pcf50633/backlight.h
new file mode 100644
index 000000000..83747e217
--- /dev/null
+++ b/include/linux/mfd/pcf50633/backlight.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ * PCF50633 backlight device driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __LINUX_MFD_PCF50633_BACKLIGHT
+#define __LINUX_MFD_PCF50633_BACKLIGHT
+
+/*
+* @default_brightness: Backlight brightness is initialized to this value
+*
+* Brightness to be used after the driver has been probed.
+* Valid range 0-63.
+*
+* @default_brightness_limit: The actual brightness is limited by this value
+*
+* Brightness limit to be used after the driver has been probed. This is useful
+* when it is not known how much power is available for the backlight during
+* probe.
+* Valid range 0-63. Can be changed later with pcf50633_bl_set_brightness_limit.
+*
+* @ramp_time: Display ramp time when changing brightness
+*
+* When changing the backlights brightness the change is not instant, instead
+* it fades smooth from one state to another. This value specifies how long
+* the fade should take. The lower the value the higher the fade time.
+* Valid range 0-255
+*/
+struct pcf50633_bl_platform_data {
+ unsigned int default_brightness;
+ unsigned int default_brightness_limit;
+ uint8_t ramp_time;
+};
+
+
+struct pcf50633;
+
+int pcf50633_bl_set_brightness_limit(struct pcf50633 *pcf, unsigned int limit);
+
+#endif
+
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h
new file mode 100644
index 000000000..a80840752
--- /dev/null
+++ b/include/linux/mfd/pcf50633/core.h
@@ -0,0 +1,238 @@
+/*
+ * core.h -- Core driver for NXP PCF50633
+ *
+ * (C) 2006-2008 by Openmoko, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_MFD_PCF50633_CORE_H
+#define __LINUX_MFD_PCF50633_CORE_H
+
+#include <linux/i2c.h>
+#include <linux/workqueue.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/power_supply.h>
+#include <linux/mfd/pcf50633/backlight.h>
+
+struct pcf50633;
+struct regmap;
+
+#define PCF50633_NUM_REGULATORS 11
+
+struct pcf50633_platform_data {
+ struct regulator_init_data reg_init_data[PCF50633_NUM_REGULATORS];
+
+ char **batteries;
+ int num_batteries;
+
+ /*
+ * Should be set accordingly to the reference resistor used, see
+ * I_{ch(ref)} charger reference current in the pcf50633 User
+ * Manual.
+ */
+ int charger_reference_current_ma;
+
+ /* Callbacks */
+ void (*probe_done)(struct pcf50633 *);
+ void (*mbc_event_callback)(struct pcf50633 *, int);
+ void (*regulator_registered)(struct pcf50633 *, int);
+ void (*force_shutdown)(struct pcf50633 *);
+
+ u8 resumers[5];
+
+ struct pcf50633_bl_platform_data *backlight_data;
+};
+
+struct pcf50633_irq {
+ void (*handler) (int, void *);
+ void *data;
+};
+
+int pcf50633_register_irq(struct pcf50633 *pcf, int irq,
+ void (*handler) (int, void *), void *data);
+int pcf50633_free_irq(struct pcf50633 *pcf, int irq);
+
+int pcf50633_irq_mask(struct pcf50633 *pcf, int irq);
+int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq);
+int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq);
+
+int pcf50633_read_block(struct pcf50633 *, u8 reg,
+ int nr_regs, u8 *data);
+int pcf50633_write_block(struct pcf50633 *pcf, u8 reg,
+ int nr_regs, u8 *data);
+u8 pcf50633_reg_read(struct pcf50633 *, u8 reg);
+int pcf50633_reg_write(struct pcf50633 *pcf, u8 reg, u8 val);
+
+int pcf50633_reg_set_bit_mask(struct pcf50633 *pcf, u8 reg, u8 mask, u8 val);
+int pcf50633_reg_clear_bits(struct pcf50633 *pcf, u8 reg, u8 bits);
+
+/* Interrupt registers */
+
+#define PCF50633_REG_INT1 0x02
+#define PCF50633_REG_INT2 0x03
+#define PCF50633_REG_INT3 0x04
+#define PCF50633_REG_INT4 0x05
+#define PCF50633_REG_INT5 0x06
+
+#define PCF50633_REG_INT1M 0x07
+#define PCF50633_REG_INT2M 0x08
+#define PCF50633_REG_INT3M 0x09
+#define PCF50633_REG_INT4M 0x0a
+#define PCF50633_REG_INT5M 0x0b
+
+enum {
+ /* Chip IRQs */
+ PCF50633_IRQ_ADPINS,
+ PCF50633_IRQ_ADPREM,
+ PCF50633_IRQ_USBINS,
+ PCF50633_IRQ_USBREM,
+ PCF50633_IRQ_RESERVED1,
+ PCF50633_IRQ_RESERVED2,
+ PCF50633_IRQ_ALARM,
+ PCF50633_IRQ_SECOND,
+ PCF50633_IRQ_ONKEYR,
+ PCF50633_IRQ_ONKEYF,
+ PCF50633_IRQ_EXTON1R,
+ PCF50633_IRQ_EXTON1F,
+ PCF50633_IRQ_EXTON2R,
+ PCF50633_IRQ_EXTON2F,
+ PCF50633_IRQ_EXTON3R,
+ PCF50633_IRQ_EXTON3F,
+ PCF50633_IRQ_BATFULL,
+ PCF50633_IRQ_CHGHALT,
+ PCF50633_IRQ_THLIMON,
+ PCF50633_IRQ_THLIMOFF,
+ PCF50633_IRQ_USBLIMON,
+ PCF50633_IRQ_USBLIMOFF,
+ PCF50633_IRQ_ADCRDY,
+ PCF50633_IRQ_ONKEY1S,
+ PCF50633_IRQ_LOWSYS,
+ PCF50633_IRQ_LOWBAT,
+ PCF50633_IRQ_HIGHTMP,
+ PCF50633_IRQ_AUTOPWRFAIL,
+ PCF50633_IRQ_DWN1PWRFAIL,
+ PCF50633_IRQ_DWN2PWRFAIL,
+ PCF50633_IRQ_LEDPWRFAIL,
+ PCF50633_IRQ_LEDOVP,
+ PCF50633_IRQ_LDO1PWRFAIL,
+ PCF50633_IRQ_LDO2PWRFAIL,
+ PCF50633_IRQ_LDO3PWRFAIL,
+ PCF50633_IRQ_LDO4PWRFAIL,
+ PCF50633_IRQ_LDO5PWRFAIL,
+ PCF50633_IRQ_LDO6PWRFAIL,
+ PCF50633_IRQ_HCLDOPWRFAIL,
+ PCF50633_IRQ_HCLDOOVL,
+
+ /* Always last */
+ PCF50633_NUM_IRQ,
+};
+
+struct pcf50633 {
+ struct device *dev;
+ struct regmap *regmap;
+
+ struct pcf50633_platform_data *pdata;
+ int irq;
+ struct pcf50633_irq irq_handler[PCF50633_NUM_IRQ];
+ struct work_struct irq_work;
+ struct workqueue_struct *work_queue;
+ struct mutex lock;
+
+ u8 mask_regs[5];
+
+ u8 suspend_irq_masks[5];
+ u8 resume_reason[5];
+ int is_suspended;
+
+ int onkey1s_held;
+
+ struct platform_device *rtc_pdev;
+ struct platform_device *mbc_pdev;
+ struct platform_device *adc_pdev;
+ struct platform_device *input_pdev;
+ struct platform_device *bl_pdev;
+ struct platform_device *regulator_pdev[PCF50633_NUM_REGULATORS];
+};
+
+enum pcf50633_reg_int1 {
+ PCF50633_INT1_ADPINS = 0x01, /* Adapter inserted */
+ PCF50633_INT1_ADPREM = 0x02, /* Adapter removed */
+ PCF50633_INT1_USBINS = 0x04, /* USB inserted */
+ PCF50633_INT1_USBREM = 0x08, /* USB removed */
+ /* reserved */
+ PCF50633_INT1_ALARM = 0x40, /* RTC alarm time is reached */
+ PCF50633_INT1_SECOND = 0x80, /* RTC periodic second interrupt */
+};
+
+enum pcf50633_reg_int2 {
+ PCF50633_INT2_ONKEYR = 0x01, /* ONKEY rising edge */
+ PCF50633_INT2_ONKEYF = 0x02, /* ONKEY falling edge */
+ PCF50633_INT2_EXTON1R = 0x04, /* EXTON1 rising edge */
+ PCF50633_INT2_EXTON1F = 0x08, /* EXTON1 falling edge */
+ PCF50633_INT2_EXTON2R = 0x10, /* EXTON2 rising edge */
+ PCF50633_INT2_EXTON2F = 0x20, /* EXTON2 falling edge */
+ PCF50633_INT2_EXTON3R = 0x40, /* EXTON3 rising edge */
+ PCF50633_INT2_EXTON3F = 0x80, /* EXTON3 falling edge */
+};
+
+enum pcf50633_reg_int3 {
+ PCF50633_INT3_BATFULL = 0x01, /* Battery full */
+ PCF50633_INT3_CHGHALT = 0x02, /* Charger halt */
+ PCF50633_INT3_THLIMON = 0x04,
+ PCF50633_INT3_THLIMOFF = 0x08,
+ PCF50633_INT3_USBLIMON = 0x10,
+ PCF50633_INT3_USBLIMOFF = 0x20,
+ PCF50633_INT3_ADCRDY = 0x40, /* ADC result ready */
+ PCF50633_INT3_ONKEY1S = 0x80, /* ONKEY pressed 1 second */
+};
+
+enum pcf50633_reg_int4 {
+ PCF50633_INT4_LOWSYS = 0x01,
+ PCF50633_INT4_LOWBAT = 0x02,
+ PCF50633_INT4_HIGHTMP = 0x04,
+ PCF50633_INT4_AUTOPWRFAIL = 0x08,
+ PCF50633_INT4_DWN1PWRFAIL = 0x10,
+ PCF50633_INT4_DWN2PWRFAIL = 0x20,
+ PCF50633_INT4_LEDPWRFAIL = 0x40,
+ PCF50633_INT4_LEDOVP = 0x80,
+};
+
+enum pcf50633_reg_int5 {
+ PCF50633_INT5_LDO1PWRFAIL = 0x01,
+ PCF50633_INT5_LDO2PWRFAIL = 0x02,
+ PCF50633_INT5_LDO3PWRFAIL = 0x04,
+ PCF50633_INT5_LDO4PWRFAIL = 0x08,
+ PCF50633_INT5_LDO5PWRFAIL = 0x10,
+ PCF50633_INT5_LDO6PWRFAIL = 0x20,
+ PCF50633_INT5_HCLDOPWRFAIL = 0x40,
+ PCF50633_INT5_HCLDOOVL = 0x80,
+};
+
+/* misc. registers */
+#define PCF50633_REG_OOCSHDWN 0x0c
+
+/* LED registers */
+#define PCF50633_REG_LEDOUT 0x28
+#define PCF50633_REG_LEDENA 0x29
+#define PCF50633_REG_LEDCTL 0x2a
+#define PCF50633_REG_LEDDIM 0x2b
+
+static inline struct pcf50633 *dev_to_pcf50633(struct device *dev)
+{
+ return dev_get_drvdata(dev);
+}
+
+int pcf50633_irq_init(struct pcf50633 *pcf, int irq);
+void pcf50633_irq_free(struct pcf50633 *pcf);
+#ifdef CONFIG_PM
+int pcf50633_irq_suspend(struct pcf50633 *pcf);
+int pcf50633_irq_resume(struct pcf50633 *pcf);
+#endif
+
+#endif
diff --git a/include/linux/mfd/pcf50633/gpio.h b/include/linux/mfd/pcf50633/gpio.h
new file mode 100644
index 000000000..a42b845ef
--- /dev/null
+++ b/include/linux/mfd/pcf50633/gpio.h
@@ -0,0 +1,52 @@
+/*
+ * gpio.h -- GPIO driver for NXP PCF50633
+ *
+ * (C) 2006-2008 by Openmoko, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_MFD_PCF50633_GPIO_H
+#define __LINUX_MFD_PCF50633_GPIO_H
+
+#include <linux/mfd/pcf50633/core.h>
+
+#define PCF50633_GPIO1 1
+#define PCF50633_GPIO2 2
+#define PCF50633_GPIO3 3
+#define PCF50633_GPO 4
+
+#define PCF50633_REG_GPIO1CFG 0x14
+#define PCF50633_REG_GPIO2CFG 0x15
+#define PCF50633_REG_GPIO3CFG 0x16
+#define PCF50633_REG_GPOCFG 0x17
+
+#define PCF50633_GPOCFG_GPOSEL_MASK 0x07
+
+enum pcf50633_reg_gpocfg {
+ PCF50633_GPOCFG_GPOSEL_0 = 0x00,
+ PCF50633_GPOCFG_GPOSEL_LED_NFET = 0x01,
+ PCF50633_GPOCFG_GPOSEL_SYSxOK = 0x02,
+ PCF50633_GPOCFG_GPOSEL_CLK32K = 0x03,
+ PCF50633_GPOCFG_GPOSEL_ADAPUSB = 0x04,
+ PCF50633_GPOCFG_GPOSEL_USBxOK = 0x05,
+ PCF50633_GPOCFG_GPOSEL_ACTPH4 = 0x06,
+ PCF50633_GPOCFG_GPOSEL_1 = 0x07,
+ PCF50633_GPOCFG_GPOSEL_INVERSE = 0x08,
+};
+
+int pcf50633_gpio_set(struct pcf50633 *pcf, int gpio, u8 val);
+u8 pcf50633_gpio_get(struct pcf50633 *pcf, int gpio);
+
+int pcf50633_gpio_invert_set(struct pcf50633 *, int gpio, int invert);
+int pcf50633_gpio_invert_get(struct pcf50633 *pcf, int gpio);
+
+int pcf50633_gpio_power_supply_set(struct pcf50633 *,
+ int gpio, int regulator, int on);
+#endif /* __LINUX_MFD_PCF50633_GPIO_H */
+
+
diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h
new file mode 100644
index 000000000..df4f5fa88
--- /dev/null
+++ b/include/linux/mfd/pcf50633/mbc.h
@@ -0,0 +1,134 @@
+/*
+ * mbc.h -- Driver for NXP PCF50633 Main Battery Charger
+ *
+ * (C) 2006-2008 by Openmoko, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_MFD_PCF50633_MBC_H
+#define __LINUX_MFD_PCF50633_MBC_H
+
+#include <linux/mfd/pcf50633/core.h>
+#include <linux/platform_device.h>
+
+#define PCF50633_REG_MBCC1 0x43
+#define PCF50633_REG_MBCC2 0x44
+#define PCF50633_REG_MBCC3 0x45
+#define PCF50633_REG_MBCC4 0x46
+#define PCF50633_REG_MBCC5 0x47
+#define PCF50633_REG_MBCC6 0x48
+#define PCF50633_REG_MBCC7 0x49
+#define PCF50633_REG_MBCC8 0x4a
+#define PCF50633_REG_MBCS1 0x4b
+#define PCF50633_REG_MBCS2 0x4c
+#define PCF50633_REG_MBCS3 0x4d
+
+enum pcf50633_reg_mbcc1 {
+ PCF50633_MBCC1_CHGENA = 0x01, /* Charger enable */
+ PCF50633_MBCC1_AUTOSTOP = 0x02,
+ PCF50633_MBCC1_AUTORES = 0x04, /* automatic resume */
+ PCF50633_MBCC1_RESUME = 0x08, /* explicit resume cmd */
+ PCF50633_MBCC1_RESTART = 0x10, /* restart charging */
+ PCF50633_MBCC1_PREWDTIME_60M = 0x20, /* max. precharging time */
+ PCF50633_MBCC1_WDTIME_1H = 0x00,
+ PCF50633_MBCC1_WDTIME_2H = 0x40,
+ PCF50633_MBCC1_WDTIME_4H = 0x80,
+ PCF50633_MBCC1_WDTIME_6H = 0xc0,
+};
+#define PCF50633_MBCC1_WDTIME_MASK 0xc0
+
+enum pcf50633_reg_mbcc2 {
+ PCF50633_MBCC2_VBATCOND_2V7 = 0x00,
+ PCF50633_MBCC2_VBATCOND_2V85 = 0x01,
+ PCF50633_MBCC2_VBATCOND_3V0 = 0x02,
+ PCF50633_MBCC2_VBATCOND_3V15 = 0x03,
+ PCF50633_MBCC2_VMAX_4V = 0x00,
+ PCF50633_MBCC2_VMAX_4V20 = 0x28,
+ PCF50633_MBCC2_VRESDEBTIME_64S = 0x80, /* debounce time (32/64sec) */
+};
+
+enum pcf50633_reg_mbcc7 {
+ PCF50633_MBCC7_USB_100mA = 0x00,
+ PCF50633_MBCC7_USB_500mA = 0x01,
+ PCF50633_MBCC7_USB_1000mA = 0x02,
+ PCF50633_MBCC7_USB_SUSPEND = 0x03,
+ PCF50633_MBCC7_BATTEMP_EN = 0x04,
+ PCF50633_MBCC7_BATSYSIMAX_1A6 = 0x00,
+ PCF50633_MBCC7_BATSYSIMAX_1A8 = 0x40,
+ PCF50633_MBCC7_BATSYSIMAX_2A0 = 0x80,
+ PCF50633_MBCC7_BATSYSIMAX_2A2 = 0xc0,
+};
+#define PCF50633_MBCC7_USB_MASK 0x03
+
+enum pcf50633_reg_mbcc8 {
+ PCF50633_MBCC8_USBENASUS = 0x10,
+};
+
+enum pcf50633_reg_mbcs1 {
+ PCF50633_MBCS1_USBPRES = 0x01,
+ PCF50633_MBCS1_USBOK = 0x02,
+ PCF50633_MBCS1_ADAPTPRES = 0x04,
+ PCF50633_MBCS1_ADAPTOK = 0x08,
+ PCF50633_MBCS1_TBAT_OK = 0x00,
+ PCF50633_MBCS1_TBAT_ABOVE = 0x10,
+ PCF50633_MBCS1_TBAT_BELOW = 0x20,
+ PCF50633_MBCS1_TBAT_UNDEF = 0x30,
+ PCF50633_MBCS1_PREWDTEXP = 0x40,
+ PCF50633_MBCS1_WDTEXP = 0x80,
+};
+
+enum pcf50633_reg_mbcs2_mbcmod {
+ PCF50633_MBCS2_MBC_PLAY = 0x00,
+ PCF50633_MBCS2_MBC_USB_PRE = 0x01,
+ PCF50633_MBCS2_MBC_USB_PRE_WAIT = 0x02,
+ PCF50633_MBCS2_MBC_USB_FAST = 0x03,
+ PCF50633_MBCS2_MBC_USB_FAST_WAIT = 0x04,
+ PCF50633_MBCS2_MBC_USB_SUSPEND = 0x05,
+ PCF50633_MBCS2_MBC_ADP_PRE = 0x06,
+ PCF50633_MBCS2_MBC_ADP_PRE_WAIT = 0x07,
+ PCF50633_MBCS2_MBC_ADP_FAST = 0x08,
+ PCF50633_MBCS2_MBC_ADP_FAST_WAIT = 0x09,
+ PCF50633_MBCS2_MBC_BAT_FULL = 0x0a,
+ PCF50633_MBCS2_MBC_HALT = 0x0b,
+};
+#define PCF50633_MBCS2_MBC_MASK 0x0f
+enum pcf50633_reg_mbcs2_chgstat {
+ PCF50633_MBCS2_CHGS_NONE = 0x00,
+ PCF50633_MBCS2_CHGS_ADAPTER = 0x10,
+ PCF50633_MBCS2_CHGS_USB = 0x20,
+ PCF50633_MBCS2_CHGS_BOTH = 0x30,
+};
+#define PCF50633_MBCS2_RESSTAT_AUTO 0x40
+
+enum pcf50633_reg_mbcs3 {
+ PCF50633_MBCS3_USBLIM_PLAY = 0x01,
+ PCF50633_MBCS3_USBLIM_CGH = 0x02,
+ PCF50633_MBCS3_TLIM_PLAY = 0x04,
+ PCF50633_MBCS3_TLIM_CHG = 0x08,
+ PCF50633_MBCS3_ILIM = 0x10, /* 1: Ibat > Icutoff */
+ PCF50633_MBCS3_VLIM = 0x20, /* 1: Vbat == Vmax */
+ PCF50633_MBCS3_VBATSTAT = 0x40, /* 1: Vbat > Vbatcond */
+ PCF50633_MBCS3_VRES = 0x80, /* 1: Vbat > Vth(RES) */
+};
+
+#define PCF50633_MBCC2_VBATCOND_MASK 0x03
+#define PCF50633_MBCC2_VMAX_MASK 0x3c
+
+/* Charger status */
+#define PCF50633_MBC_USB_ONLINE 0x01
+#define PCF50633_MBC_USB_ACTIVE 0x02
+#define PCF50633_MBC_ADAPTER_ONLINE 0x04
+#define PCF50633_MBC_ADAPTER_ACTIVE 0x08
+
+int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma);
+
+int pcf50633_mbc_get_status(struct pcf50633 *);
+int pcf50633_mbc_get_usb_online_status(struct pcf50633 *);
+
+#endif
+
diff --git a/include/linux/mfd/pcf50633/pmic.h b/include/linux/mfd/pcf50633/pmic.h
new file mode 100644
index 000000000..2d3dbe53b
--- /dev/null
+++ b/include/linux/mfd/pcf50633/pmic.h
@@ -0,0 +1,67 @@
+#ifndef __LINUX_MFD_PCF50633_PMIC_H
+#define __LINUX_MFD_PCF50633_PMIC_H
+
+#include <linux/mfd/pcf50633/core.h>
+#include <linux/platform_device.h>
+
+#define PCF50633_REG_AUTOOUT 0x1a
+#define PCF50633_REG_AUTOENA 0x1b
+#define PCF50633_REG_AUTOCTL 0x1c
+#define PCF50633_REG_AUTOMXC 0x1d
+#define PCF50633_REG_DOWN1OUT 0x1e
+#define PCF50633_REG_DOWN1ENA 0x1f
+#define PCF50633_REG_DOWN1CTL 0x20
+#define PCF50633_REG_DOWN1MXC 0x21
+#define PCF50633_REG_DOWN2OUT 0x22
+#define PCF50633_REG_DOWN2ENA 0x23
+#define PCF50633_REG_DOWN2CTL 0x24
+#define PCF50633_REG_DOWN2MXC 0x25
+#define PCF50633_REG_MEMLDOOUT 0x26
+#define PCF50633_REG_MEMLDOENA 0x27
+#define PCF50633_REG_LDO1OUT 0x2d
+#define PCF50633_REG_LDO1ENA 0x2e
+#define PCF50633_REG_LDO2OUT 0x2f
+#define PCF50633_REG_LDO2ENA 0x30
+#define PCF50633_REG_LDO3OUT 0x31
+#define PCF50633_REG_LDO3ENA 0x32
+#define PCF50633_REG_LDO4OUT 0x33
+#define PCF50633_REG_LDO4ENA 0x34
+#define PCF50633_REG_LDO5OUT 0x35
+#define PCF50633_REG_LDO5ENA 0x36
+#define PCF50633_REG_LDO6OUT 0x37
+#define PCF50633_REG_LDO6ENA 0x38
+#define PCF50633_REG_HCLDOOUT 0x39
+#define PCF50633_REG_HCLDOENA 0x3a
+#define PCF50633_REG_HCLDOOVL 0x40
+
+enum pcf50633_regulator_enable {
+ PCF50633_REGULATOR_ON = 0x01,
+ PCF50633_REGULATOR_ON_GPIO1 = 0x02,
+ PCF50633_REGULATOR_ON_GPIO2 = 0x04,
+ PCF50633_REGULATOR_ON_GPIO3 = 0x08,
+};
+#define PCF50633_REGULATOR_ON_MASK 0x0f
+
+enum pcf50633_regulator_phase {
+ PCF50633_REGULATOR_ACTPH1 = 0x00,
+ PCF50633_REGULATOR_ACTPH2 = 0x10,
+ PCF50633_REGULATOR_ACTPH3 = 0x20,
+ PCF50633_REGULATOR_ACTPH4 = 0x30,
+};
+#define PCF50633_REGULATOR_ACTPH_MASK 0x30
+
+enum pcf50633_regulator_id {
+ PCF50633_REGULATOR_AUTO,
+ PCF50633_REGULATOR_DOWN1,
+ PCF50633_REGULATOR_DOWN2,
+ PCF50633_REGULATOR_LDO1,
+ PCF50633_REGULATOR_LDO2,
+ PCF50633_REGULATOR_LDO3,
+ PCF50633_REGULATOR_LDO4,
+ PCF50633_REGULATOR_LDO5,
+ PCF50633_REGULATOR_LDO6,
+ PCF50633_REGULATOR_HCLDO,
+ PCF50633_REGULATOR_MEMLDO,
+};
+#endif
+
diff --git a/include/linux/mfd/qcom_rpm.h b/include/linux/mfd/qcom_rpm.h
new file mode 100644
index 000000000..742ebf1b7
--- /dev/null
+++ b/include/linux/mfd/qcom_rpm.h
@@ -0,0 +1,13 @@
+#ifndef __QCOM_RPM_H__
+#define __QCOM_RPM_H__
+
+#include <linux/types.h>
+
+struct qcom_rpm;
+
+#define QCOM_RPM_ACTIVE_STATE 0
+#define QCOM_RPM_SLEEP_STATE 1
+
+int qcom_rpm_write(struct qcom_rpm *rpm, int state, int resource, u32 *buf, size_t count);
+
+#endif
diff --git a/include/linux/mfd/rc5t583.h b/include/linux/mfd/rc5t583.h
new file mode 100644
index 000000000..fd413ccab
--- /dev/null
+++ b/include/linux/mfd/rc5t583.h
@@ -0,0 +1,380 @@
+/*
+ * Core driver interface to access RICOH_RC5T583 power management chip.
+ *
+ * Copyright (c) 2011-2012, NVIDIA CORPORATION. All rights reserved.
+ * Author: Laxman dewangan <ldewangan@nvidia.com>
+ *
+ * Based on code
+ * Copyright (C) 2011 RICOH COMPANY,LTD
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#ifndef __LINUX_MFD_RC5T583_H
+#define __LINUX_MFD_RC5T583_H
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/regmap.h>
+
+#define RC5T583_MAX_REGS 0xF8
+
+/* Maximum number of main interrupts */
+#define MAX_MAIN_INTERRUPT 5
+#define RC5T583_MAX_GPEDGE_REG 2
+#define RC5T583_MAX_INTERRUPT_EN_REGS 8
+#define RC5T583_MAX_INTERRUPT_MASK_REGS 9
+
+/* Interrupt enable register */
+#define RC5T583_INT_EN_SYS1 0x19
+#define RC5T583_INT_EN_SYS2 0x1D
+#define RC5T583_INT_EN_DCDC 0x41
+#define RC5T583_INT_EN_RTC 0xED
+#define RC5T583_INT_EN_ADC1 0x90
+#define RC5T583_INT_EN_ADC2 0x91
+#define RC5T583_INT_EN_ADC3 0x92
+
+/* Interrupt status registers (monitor regs in Ricoh)*/
+#define RC5T583_INTC_INTPOL 0xAD
+#define RC5T583_INTC_INTEN 0xAE
+#define RC5T583_INTC_INTMON 0xAF
+
+#define RC5T583_INT_MON_GRP 0xAF
+#define RC5T583_INT_MON_SYS1 0x1B
+#define RC5T583_INT_MON_SYS2 0x1F
+#define RC5T583_INT_MON_DCDC 0x43
+#define RC5T583_INT_MON_RTC 0xEE
+
+/* Interrupt clearing registers */
+#define RC5T583_INT_IR_SYS1 0x1A
+#define RC5T583_INT_IR_SYS2 0x1E
+#define RC5T583_INT_IR_DCDC 0x42
+#define RC5T583_INT_IR_RTC 0xEE
+#define RC5T583_INT_IR_ADCL 0x94
+#define RC5T583_INT_IR_ADCH 0x95
+#define RC5T583_INT_IR_ADCEND 0x96
+#define RC5T583_INT_IR_GPIOR 0xA9
+#define RC5T583_INT_IR_GPIOF 0xAA
+
+/* Sleep sequence registers */
+#define RC5T583_SLPSEQ1 0x21
+#define RC5T583_SLPSEQ2 0x22
+#define RC5T583_SLPSEQ3 0x23
+#define RC5T583_SLPSEQ4 0x24
+#define RC5T583_SLPSEQ5 0x25
+#define RC5T583_SLPSEQ6 0x26
+#define RC5T583_SLPSEQ7 0x27
+#define RC5T583_SLPSEQ8 0x28
+#define RC5T583_SLPSEQ9 0x29
+#define RC5T583_SLPSEQ10 0x2A
+#define RC5T583_SLPSEQ11 0x2B
+
+/* Regulator registers */
+#define RC5T583_REG_DC0CTL 0x30
+#define RC5T583_REG_DC0DAC 0x31
+#define RC5T583_REG_DC0LATCTL 0x32
+#define RC5T583_REG_SR0CTL 0x33
+
+#define RC5T583_REG_DC1CTL 0x34
+#define RC5T583_REG_DC1DAC 0x35
+#define RC5T583_REG_DC1LATCTL 0x36
+#define RC5T583_REG_SR1CTL 0x37
+
+#define RC5T583_REG_DC2CTL 0x38
+#define RC5T583_REG_DC2DAC 0x39
+#define RC5T583_REG_DC2LATCTL 0x3A
+#define RC5T583_REG_SR2CTL 0x3B
+
+#define RC5T583_REG_DC3CTL 0x3C
+#define RC5T583_REG_DC3DAC 0x3D
+#define RC5T583_REG_DC3LATCTL 0x3E
+#define RC5T583_REG_SR3CTL 0x3F
+
+
+#define RC5T583_REG_LDOEN1 0x50
+#define RC5T583_REG_LDOEN2 0x51
+#define RC5T583_REG_LDODIS1 0x52
+#define RC5T583_REG_LDODIS2 0x53
+
+#define RC5T583_REG_LDO0DAC 0x54
+#define RC5T583_REG_LDO1DAC 0x55
+#define RC5T583_REG_LDO2DAC 0x56
+#define RC5T583_REG_LDO3DAC 0x57
+#define RC5T583_REG_LDO4DAC 0x58
+#define RC5T583_REG_LDO5DAC 0x59
+#define RC5T583_REG_LDO6DAC 0x5A
+#define RC5T583_REG_LDO7DAC 0x5B
+#define RC5T583_REG_LDO8DAC 0x5C
+#define RC5T583_REG_LDO9DAC 0x5D
+
+#define RC5T583_REG_DC0DAC_DS 0x60
+#define RC5T583_REG_DC1DAC_DS 0x61
+#define RC5T583_REG_DC2DAC_DS 0x62
+#define RC5T583_REG_DC3DAC_DS 0x63
+
+#define RC5T583_REG_LDO0DAC_DS 0x64
+#define RC5T583_REG_LDO1DAC_DS 0x65
+#define RC5T583_REG_LDO2DAC_DS 0x66
+#define RC5T583_REG_LDO3DAC_DS 0x67
+#define RC5T583_REG_LDO4DAC_DS 0x68
+#define RC5T583_REG_LDO5DAC_DS 0x69
+#define RC5T583_REG_LDO6DAC_DS 0x6A
+#define RC5T583_REG_LDO7DAC_DS 0x6B
+#define RC5T583_REG_LDO8DAC_DS 0x6C
+#define RC5T583_REG_LDO9DAC_DS 0x6D
+
+/* GPIO register base address */
+#define RC5T583_GPIO_IOSEL 0xA0
+#define RC5T583_GPIO_PDEN 0xA1
+#define RC5T583_GPIO_IOOUT 0xA2
+#define RC5T583_GPIO_PGSEL 0xA3
+#define RC5T583_GPIO_GPINV 0xA4
+#define RC5T583_GPIO_GPDEB 0xA5
+#define RC5T583_GPIO_GPEDGE1 0xA6
+#define RC5T583_GPIO_GPEDGE2 0xA7
+#define RC5T583_GPIO_EN_INT 0xA8
+#define RC5T583_GPIO_MON_IOIN 0xAB
+#define RC5T583_GPIO_GPOFUNC 0xAC
+
+/* RTC registers */
+#define RC5T583_RTC_SEC 0xE0
+#define RC5T583_RTC_MIN 0xE1
+#define RC5T583_RTC_HOUR 0xE2
+#define RC5T583_RTC_WDAY 0xE3
+#define RC5T583_RTC_DAY 0xE4
+#define RC5T583_RTC_MONTH 0xE5
+#define RC5T583_RTC_YEAR 0xE6
+#define RC5T583_RTC_ADJ 0xE7
+#define RC5T583_RTC_AW_MIN 0xE8
+#define RC5T583_RTC_AW_HOUR 0xE9
+#define RC5T583_RTC_AW_WEEK 0xEA
+#define RC5T583_RTC_AD_MIN 0xEB
+#define RC5T583_RTC_AD_HOUR 0xEC
+#define RC5T583_RTC_CTL1 0xED
+#define RC5T583_RTC_CTL2 0xEE
+#define RC5T583_RTC_AY_MIN 0xF0
+#define RC5T583_RTC_AY_HOUR 0xF1
+#define RC5T583_RTC_AY_DAY 0xF2
+#define RC5T583_RTC_AY_MONTH 0xF3
+#define RC5T583_RTC_AY_YEAR 0xF4
+
+/* RICOH_RC5T583 IRQ definitions */
+enum {
+ RC5T583_IRQ_ONKEY,
+ RC5T583_IRQ_ACOK,
+ RC5T583_IRQ_LIDOPEN,
+ RC5T583_IRQ_PREOT,
+ RC5T583_IRQ_CLKSTP,
+ RC5T583_IRQ_ONKEY_OFF,
+ RC5T583_IRQ_WD,
+ RC5T583_IRQ_EN_PWRREQ1,
+ RC5T583_IRQ_EN_PWRREQ2,
+ RC5T583_IRQ_PRE_VINDET,
+
+ RC5T583_IRQ_DC0LIM,
+ RC5T583_IRQ_DC1LIM,
+ RC5T583_IRQ_DC2LIM,
+ RC5T583_IRQ_DC3LIM,
+
+ RC5T583_IRQ_CTC,
+ RC5T583_IRQ_YALE,
+ RC5T583_IRQ_DALE,
+ RC5T583_IRQ_WALE,
+
+ RC5T583_IRQ_AIN1L,
+ RC5T583_IRQ_AIN2L,
+ RC5T583_IRQ_AIN3L,
+ RC5T583_IRQ_VBATL,
+ RC5T583_IRQ_VIN3L,
+ RC5T583_IRQ_VIN8L,
+ RC5T583_IRQ_AIN1H,
+ RC5T583_IRQ_AIN2H,
+ RC5T583_IRQ_AIN3H,
+ RC5T583_IRQ_VBATH,
+ RC5T583_IRQ_VIN3H,
+ RC5T583_IRQ_VIN8H,
+ RC5T583_IRQ_ADCEND,
+
+ RC5T583_IRQ_GPIO0,
+ RC5T583_IRQ_GPIO1,
+ RC5T583_IRQ_GPIO2,
+ RC5T583_IRQ_GPIO3,
+ RC5T583_IRQ_GPIO4,
+ RC5T583_IRQ_GPIO5,
+ RC5T583_IRQ_GPIO6,
+ RC5T583_IRQ_GPIO7,
+
+ /* Should be last entry */
+ RC5T583_MAX_IRQS,
+};
+
+/* Ricoh583 gpio definitions */
+enum {
+ RC5T583_GPIO0,
+ RC5T583_GPIO1,
+ RC5T583_GPIO2,
+ RC5T583_GPIO3,
+ RC5T583_GPIO4,
+ RC5T583_GPIO5,
+ RC5T583_GPIO6,
+ RC5T583_GPIO7,
+
+ /* Should be last entry */
+ RC5T583_MAX_GPIO,
+};
+
+enum {
+ RC5T583_DS_NONE,
+ RC5T583_DS_DC0,
+ RC5T583_DS_DC1,
+ RC5T583_DS_DC2,
+ RC5T583_DS_DC3,
+ RC5T583_DS_LDO0,
+ RC5T583_DS_LDO1,
+ RC5T583_DS_LDO2,
+ RC5T583_DS_LDO3,
+ RC5T583_DS_LDO4,
+ RC5T583_DS_LDO5,
+ RC5T583_DS_LDO6,
+ RC5T583_DS_LDO7,
+ RC5T583_DS_LDO8,
+ RC5T583_DS_LDO9,
+ RC5T583_DS_PSO0,
+ RC5T583_DS_PSO1,
+ RC5T583_DS_PSO2,
+ RC5T583_DS_PSO3,
+ RC5T583_DS_PSO4,
+ RC5T583_DS_PSO5,
+ RC5T583_DS_PSO6,
+ RC5T583_DS_PSO7,
+
+ /* Should be last entry */
+ RC5T583_DS_MAX,
+};
+
+/*
+ * Ricoh pmic RC5T583 supports sleep through two external controls.
+ * The output of gpios and regulator can be enable/disable through
+ * this external signals.
+ */
+enum {
+ RC5T583_EXT_PWRREQ1_CONTROL = 0x1,
+ RC5T583_EXT_PWRREQ2_CONTROL = 0x2,
+};
+
+enum {
+ RC5T583_REGULATOR_DC0,
+ RC5T583_REGULATOR_DC1,
+ RC5T583_REGULATOR_DC2,
+ RC5T583_REGULATOR_DC3,
+ RC5T583_REGULATOR_LDO0,
+ RC5T583_REGULATOR_LDO1,
+ RC5T583_REGULATOR_LDO2,
+ RC5T583_REGULATOR_LDO3,
+ RC5T583_REGULATOR_LDO4,
+ RC5T583_REGULATOR_LDO5,
+ RC5T583_REGULATOR_LDO6,
+ RC5T583_REGULATOR_LDO7,
+ RC5T583_REGULATOR_LDO8,
+ RC5T583_REGULATOR_LDO9,
+
+ /* Should be last entry */
+ RC5T583_REGULATOR_MAX,
+};
+
+struct rc5t583 {
+ struct device *dev;
+ struct regmap *regmap;
+ int chip_irq;
+ int irq_base;
+ struct mutex irq_lock;
+ unsigned long group_irq_en[MAX_MAIN_INTERRUPT];
+
+ /* For main interrupt bits in INTC */
+ uint8_t intc_inten_reg;
+
+ /* For group interrupt bits and address */
+ uint8_t irq_en_reg[RC5T583_MAX_INTERRUPT_EN_REGS];
+
+ /* For gpio edge */
+ uint8_t gpedge_reg[RC5T583_MAX_GPEDGE_REG];
+};
+
+/*
+ * rc5t583_platform_data: Platform data for ricoh rc5t583 pmu.
+ * The board specific data is provided through this structure.
+ * @irq_base: Irq base number on which this device registers their interrupts.
+ * @gpio_base: GPIO base from which gpio of this device will start.
+ * @enable_shutdown: Enable shutdown through the input pin "shutdown".
+ * @regulator_deepsleep_slot: The slot number on which device goes to sleep
+ * in device sleep mode.
+ * @regulator_ext_pwr_control: External power request regulator control. The
+ * regulator output enable/disable is controlled by the external
+ * power request input state.
+ * @reg_init_data: Regulator init data.
+ */
+
+struct rc5t583_platform_data {
+ int irq_base;
+ int gpio_base;
+ bool enable_shutdown;
+ int regulator_deepsleep_slot[RC5T583_REGULATOR_MAX];
+ unsigned long regulator_ext_pwr_control[RC5T583_REGULATOR_MAX];
+ struct regulator_init_data *reg_init_data[RC5T583_REGULATOR_MAX];
+};
+
+static inline int rc5t583_write(struct device *dev, uint8_t reg, uint8_t val)
+{
+ struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
+ return regmap_write(rc5t583->regmap, reg, val);
+}
+
+static inline int rc5t583_read(struct device *dev, uint8_t reg, uint8_t *val)
+{
+ struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
+ unsigned int ival;
+ int ret;
+ ret = regmap_read(rc5t583->regmap, reg, &ival);
+ if (!ret)
+ *val = (uint8_t)ival;
+ return ret;
+}
+
+static inline int rc5t583_set_bits(struct device *dev, unsigned int reg,
+ unsigned int bit_mask)
+{
+ struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
+ return regmap_update_bits(rc5t583->regmap, reg, bit_mask, bit_mask);
+}
+
+static inline int rc5t583_clear_bits(struct device *dev, unsigned int reg,
+ unsigned int bit_mask)
+{
+ struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
+ return regmap_update_bits(rc5t583->regmap, reg, bit_mask, 0);
+}
+
+static inline int rc5t583_update(struct device *dev, unsigned int reg,
+ unsigned int val, unsigned int mask)
+{
+ struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
+ return regmap_update_bits(rc5t583->regmap, reg, mask, val);
+}
+
+int rc5t583_ext_power_req_config(struct device *dev, int deepsleep_id,
+ int ext_pwr_req, int deepsleep_slot_nr);
+int rc5t583_irq_init(struct rc5t583 *rc5t583, int irq, int irq_base);
+int rc5t583_irq_exit(struct rc5t583 *rc5t583);
+
+#endif
diff --git a/include/linux/mfd/rdc321x.h b/include/linux/mfd/rdc321x.h
new file mode 100644
index 000000000..442743a8f
--- /dev/null
+++ b/include/linux/mfd/rdc321x.h
@@ -0,0 +1,26 @@
+#ifndef __RDC321X_MFD_H
+#define __RDC321X_MFD_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+/* Offsets to be accessed in the southbridge PCI
+ * device configuration register */
+#define RDC321X_WDT_CTRL 0x44
+#define RDC321X_GPIO_CTRL_REG1 0x48
+#define RDC321X_GPIO_DATA_REG1 0x4c
+#define RDC321X_GPIO_CTRL_REG2 0x84
+#define RDC321X_GPIO_DATA_REG2 0x88
+
+#define RDC321X_NUM_GPIO 59
+
+struct rdc321x_gpio_pdata {
+ struct pci_dev *sb_pdev;
+ unsigned max_gpios;
+};
+
+struct rdc321x_wdt_pdata {
+ struct pci_dev *sb_pdev;
+};
+
+#endif /* __RDC321X_MFD_H */
diff --git a/include/linux/mfd/retu.h b/include/linux/mfd/retu.h
new file mode 100644
index 000000000..65471c4a3
--- /dev/null
+++ b/include/linux/mfd/retu.h
@@ -0,0 +1,28 @@
+/*
+ * Retu/Tahvo MFD driver interface
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ */
+
+#ifndef __LINUX_MFD_RETU_H
+#define __LINUX_MFD_RETU_H
+
+struct retu_dev;
+
+int retu_read(struct retu_dev *, u8);
+int retu_write(struct retu_dev *, u8, u16);
+
+/* Registers */
+#define RETU_REG_WATCHDOG 0x17 /* Watchdog */
+#define RETU_REG_CC1 0x0d /* Common control register 1 */
+#define RETU_REG_STATUS 0x16 /* Status register */
+
+/* Interrupt sources */
+#define TAHVO_INT_VBUS 0 /* VBUS state */
+
+/* Interrupt status */
+#define TAHVO_STAT_VBUS (1 << TAHVO_INT_VBUS)
+
+#endif /* __LINUX_MFD_RETU_H */
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
new file mode 100644
index 000000000..441b6ee72
--- /dev/null
+++ b/include/linux/mfd/rk808.h
@@ -0,0 +1,199 @@
+/*
+ * rk808.h for Rockchip RK808
+ *
+ * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * Author: Chris Zhong <zyw@rock-chips.com>
+ * Author: Zhang Qing <zhangqing@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_REGULATOR_rk808_H
+#define __LINUX_REGULATOR_rk808_H
+
+#include <linux/regulator/machine.h>
+#include <linux/regmap.h>
+
+/*
+ * rk808 Global Register Map.
+ */
+
+#define RK808_DCDC1 0 /* (0+RK808_START) */
+#define RK808_LDO1 4 /* (4+RK808_START) */
+#define RK808_NUM_REGULATORS 14
+
+enum rk808_reg {
+ RK808_ID_DCDC1,
+ RK808_ID_DCDC2,
+ RK808_ID_DCDC3,
+ RK808_ID_DCDC4,
+ RK808_ID_LDO1,
+ RK808_ID_LDO2,
+ RK808_ID_LDO3,
+ RK808_ID_LDO4,
+ RK808_ID_LDO5,
+ RK808_ID_LDO6,
+ RK808_ID_LDO7,
+ RK808_ID_LDO8,
+ RK808_ID_SWITCH1,
+ RK808_ID_SWITCH2,
+};
+
+#define RK808_SECONDS_REG 0x00
+#define RK808_MINUTES_REG 0x01
+#define RK808_HOURS_REG 0x02
+#define RK808_DAYS_REG 0x03
+#define RK808_MONTHS_REG 0x04
+#define RK808_YEARS_REG 0x05
+#define RK808_WEEKS_REG 0x06
+#define RK808_ALARM_SECONDS_REG 0x08
+#define RK808_ALARM_MINUTES_REG 0x09
+#define RK808_ALARM_HOURS_REG 0x0a
+#define RK808_ALARM_DAYS_REG 0x0b
+#define RK808_ALARM_MONTHS_REG 0x0c
+#define RK808_ALARM_YEARS_REG 0x0d
+#define RK808_RTC_CTRL_REG 0x10
+#define RK808_RTC_STATUS_REG 0x11
+#define RK808_RTC_INT_REG 0x12
+#define RK808_RTC_COMP_LSB_REG 0x13
+#define RK808_RTC_COMP_MSB_REG 0x14
+#define RK808_CLK32OUT_REG 0x20
+#define RK808_VB_MON_REG 0x21
+#define RK808_THERMAL_REG 0x22
+#define RK808_DCDC_EN_REG 0x23
+#define RK808_LDO_EN_REG 0x24
+#define RK808_SLEEP_SET_OFF_REG1 0x25
+#define RK808_SLEEP_SET_OFF_REG2 0x26
+#define RK808_DCDC_UV_STS_REG 0x27
+#define RK808_DCDC_UV_ACT_REG 0x28
+#define RK808_LDO_UV_STS_REG 0x29
+#define RK808_LDO_UV_ACT_REG 0x2a
+#define RK808_DCDC_PG_REG 0x2b
+#define RK808_LDO_PG_REG 0x2c
+#define RK808_VOUT_MON_TDB_REG 0x2d
+#define RK808_BUCK1_CONFIG_REG 0x2e
+#define RK808_BUCK1_ON_VSEL_REG 0x2f
+#define RK808_BUCK1_SLP_VSEL_REG 0x30
+#define RK808_BUCK1_DVS_VSEL_REG 0x31
+#define RK808_BUCK2_CONFIG_REG 0x32
+#define RK808_BUCK2_ON_VSEL_REG 0x33
+#define RK808_BUCK2_SLP_VSEL_REG 0x34
+#define RK808_BUCK2_DVS_VSEL_REG 0x35
+#define RK808_BUCK3_CONFIG_REG 0x36
+#define RK808_BUCK4_CONFIG_REG 0x37
+#define RK808_BUCK4_ON_VSEL_REG 0x38
+#define RK808_BUCK4_SLP_VSEL_REG 0x39
+#define RK808_BOOST_CONFIG_REG 0x3a
+#define RK808_LDO1_ON_VSEL_REG 0x3b
+#define RK808_LDO1_SLP_VSEL_REG 0x3c
+#define RK808_LDO2_ON_VSEL_REG 0x3d
+#define RK808_LDO2_SLP_VSEL_REG 0x3e
+#define RK808_LDO3_ON_VSEL_REG 0x3f
+#define RK808_LDO3_SLP_VSEL_REG 0x40
+#define RK808_LDO4_ON_VSEL_REG 0x41
+#define RK808_LDO4_SLP_VSEL_REG 0x42
+#define RK808_LDO5_ON_VSEL_REG 0x43
+#define RK808_LDO5_SLP_VSEL_REG 0x44
+#define RK808_LDO6_ON_VSEL_REG 0x45
+#define RK808_LDO6_SLP_VSEL_REG 0x46
+#define RK808_LDO7_ON_VSEL_REG 0x47
+#define RK808_LDO7_SLP_VSEL_REG 0x48
+#define RK808_LDO8_ON_VSEL_REG 0x49
+#define RK808_LDO8_SLP_VSEL_REG 0x4a
+#define RK808_DEVCTRL_REG 0x4b
+#define RK808_INT_STS_REG1 0x4c
+#define RK808_INT_STS_MSK_REG1 0x4d
+#define RK808_INT_STS_REG2 0x4e
+#define RK808_INT_STS_MSK_REG2 0x4f
+#define RK808_IO_POL_REG 0x50
+
+/* IRQ Definitions */
+#define RK808_IRQ_VOUT_LO 0
+#define RK808_IRQ_VB_LO 1
+#define RK808_IRQ_PWRON 2
+#define RK808_IRQ_PWRON_LP 3
+#define RK808_IRQ_HOTDIE 4
+#define RK808_IRQ_RTC_ALARM 5
+#define RK808_IRQ_RTC_PERIOD 6
+#define RK808_IRQ_PLUG_IN_INT 7
+#define RK808_IRQ_PLUG_OUT_INT 8
+#define RK808_NUM_IRQ 9
+
+#define RK808_IRQ_VOUT_LO_MSK BIT(0)
+#define RK808_IRQ_VB_LO_MSK BIT(1)
+#define RK808_IRQ_PWRON_MSK BIT(2)
+#define RK808_IRQ_PWRON_LP_MSK BIT(3)
+#define RK808_IRQ_HOTDIE_MSK BIT(4)
+#define RK808_IRQ_RTC_ALARM_MSK BIT(5)
+#define RK808_IRQ_RTC_PERIOD_MSK BIT(6)
+#define RK808_IRQ_PLUG_IN_INT_MSK BIT(0)
+#define RK808_IRQ_PLUG_OUT_INT_MSK BIT(1)
+
+#define RK808_VBAT_LOW_2V8 0x00
+#define RK808_VBAT_LOW_2V9 0x01
+#define RK808_VBAT_LOW_3V0 0x02
+#define RK808_VBAT_LOW_3V1 0x03
+#define RK808_VBAT_LOW_3V2 0x04
+#define RK808_VBAT_LOW_3V3 0x05
+#define RK808_VBAT_LOW_3V4 0x06
+#define RK808_VBAT_LOW_3V5 0x07
+#define VBAT_LOW_VOL_MASK (0x07 << 0)
+#define EN_VABT_LOW_SHUT_DOWN (0x00 << 4)
+#define EN_VBAT_LOW_IRQ (0x1 << 4)
+#define VBAT_LOW_ACT_MASK (0x1 << 4)
+
+#define BUCK_ILMIN_MASK (7 << 0)
+#define BOOST_ILMIN_MASK (7 << 0)
+#define BUCK1_RATE_MASK (3 << 3)
+#define BUCK2_RATE_MASK (3 << 3)
+#define MASK_ALL 0xff
+
+#define BUCK_UV_ACT_MASK 0x0f
+#define BUCK_UV_ACT_DISABLE 0
+
+#define SWITCH2_EN BIT(6)
+#define SWITCH1_EN BIT(5)
+#define DEV_OFF_RST BIT(3)
+
+#define VB_LO_ACT BIT(4)
+#define VB_LO_SEL_3500MV (7 << 0)
+
+#define VOUT_LO_INT BIT(0)
+#define CLK32KOUT2_EN BIT(0)
+
+enum {
+ BUCK_ILMIN_50MA,
+ BUCK_ILMIN_100MA,
+ BUCK_ILMIN_150MA,
+ BUCK_ILMIN_200MA,
+ BUCK_ILMIN_250MA,
+ BUCK_ILMIN_300MA,
+ BUCK_ILMIN_350MA,
+ BUCK_ILMIN_400MA,
+};
+
+enum {
+ BOOST_ILMIN_75MA,
+ BOOST_ILMIN_100MA,
+ BOOST_ILMIN_125MA,
+ BOOST_ILMIN_150MA,
+ BOOST_ILMIN_175MA,
+ BOOST_ILMIN_200MA,
+ BOOST_ILMIN_225MA,
+ BOOST_ILMIN_250MA,
+};
+
+struct rk808 {
+ struct i2c_client *i2c;
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap *regmap;
+};
+#endif /* __LINUX_REGULATOR_rk808_H */
diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h
new file mode 100644
index 000000000..c72d5344f
--- /dev/null
+++ b/include/linux/mfd/rn5t618.h
@@ -0,0 +1,228 @@
+/*
+ * MFD core driver for Ricoh RN5T618 PMIC
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LINUX_MFD_RN5T618_H
+#define __LINUX_MFD_RN5T618_H
+
+#include <linux/regmap.h>
+
+#define RN5T618_LSIVER 0x00
+#define RN5T618_OTPVER 0x01
+#define RN5T618_IODAC 0x02
+#define RN5T618_VINDAC 0x03
+#define RN5T618_CPUCNT 0x06
+#define RN5T618_PSWR 0x07
+#define RN5T618_PONHIS 0x09
+#define RN5T618_POFFHIS 0x0a
+#define RN5T618_WATCHDOG 0x0b
+#define RN5T618_WATCHDOGCNT 0x0c
+#define RN5T618_PWRFUNC 0x0d
+#define RN5T618_SLPCNT 0x0e
+#define RN5T618_REPCNT 0x0f
+#define RN5T618_PWRONTIMSET 0x10
+#define RN5T618_NOETIMSETCNT 0x11
+#define RN5T618_PWRIREN 0x12
+#define RN5T618_PWRIRQ 0x13
+#define RN5T618_PWRMON 0x14
+#define RN5T618_PWRIRSEL 0x15
+#define RN5T618_DC1_SLOT 0x16
+#define RN5T618_DC2_SLOT 0x17
+#define RN5T618_DC3_SLOT 0x18
+#define RN5T618_LDO1_SLOT 0x1b
+#define RN5T618_LDO2_SLOT 0x1c
+#define RN5T618_LDO3_SLOT 0x1d
+#define RN5T618_LDO4_SLOT 0x1e
+#define RN5T618_LDO5_SLOT 0x1f
+#define RN5T618_PSO0_SLOT 0x25
+#define RN5T618_PSO1_SLOT 0x26
+#define RN5T618_PSO2_SLOT 0x27
+#define RN5T618_PSO3_SLOT 0x28
+#define RN5T618_LDORTC1_SLOT 0x2a
+#define RN5T618_DC1CTL 0x2c
+#define RN5T618_DC1CTL2 0x2d
+#define RN5T618_DC2CTL 0x2e
+#define RN5T618_DC2CTL2 0x2f
+#define RN5T618_DC3CTL 0x30
+#define RN5T618_DC3CTL2 0x31
+#define RN5T618_DC1DAC 0x36
+#define RN5T618_DC2DAC 0x37
+#define RN5T618_DC3DAC 0x38
+#define RN5T618_DC1DAC_SLP 0x3b
+#define RN5T618_DC2DAC_SLP 0x3c
+#define RN5T618_DC3DAC_SLP 0x3d
+#define RN5T618_DCIREN 0x40
+#define RN5T618_DCIRQ 0x41
+#define RN5T618_DCIRMON 0x42
+#define RN5T618_LDOEN1 0x44
+#define RN5T618_LDOEN2 0x45
+#define RN5T618_LDODIS 0x46
+#define RN5T618_LDO1DAC 0x4c
+#define RN5T618_LDO2DAC 0x4d
+#define RN5T618_LDO3DAC 0x4e
+#define RN5T618_LDO4DAC 0x4f
+#define RN5T618_LDO5DAC 0x50
+#define RN5T618_LDORTCDAC 0x56
+#define RN5T618_LDORTC2DAC 0x57
+#define RN5T618_LDO1DAC_SLP 0x58
+#define RN5T618_LDO2DAC_SLP 0x59
+#define RN5T618_LDO3DAC_SLP 0x5a
+#define RN5T618_LDO4DAC_SLP 0x5b
+#define RN5T618_LDO5DAC_SLP 0x5c
+#define RN5T618_ADCCNT1 0x64
+#define RN5T618_ADCCNT2 0x65
+#define RN5T618_ADCCNT3 0x66
+#define RN5T618_ILIMDATAH 0x68
+#define RN5T618_ILIMDATAL 0x69
+#define RN5T618_VBATDATAH 0x6a
+#define RN5T618_VBATDATAL 0x6b
+#define RN5T618_VADPDATAH 0x6c
+#define RN5T618_VADPDATAL 0x6d
+#define RN5T618_VUSBDATAH 0x6e
+#define RN5T618_VUSBDATAL 0x6f
+#define RN5T618_VSYSDATAH 0x70
+#define RN5T618_VSYSDATAL 0x71
+#define RN5T618_VTHMDATAH 0x72
+#define RN5T618_VTHMDATAL 0x73
+#define RN5T618_AIN1DATAH 0x74
+#define RN5T618_AIN1DATAL 0x75
+#define RN5T618_AIN0DATAH 0x76
+#define RN5T618_AIN0DATAL 0x77
+#define RN5T618_ILIMTHL 0x78
+#define RN5T618_ILIMTHH 0x79
+#define RN5T618_VBATTHL 0x7a
+#define RN5T618_VBATTHH 0x7b
+#define RN5T618_VADPTHL 0x7c
+#define RN5T618_VADPTHH 0x7d
+#define RN5T618_VUSBTHL 0x7e
+#define RN5T618_VUSBTHH 0x7f
+#define RN5T618_VSYSTHL 0x80
+#define RN5T618_VSYSTHH 0x81
+#define RN5T618_VTHMTHL 0x82
+#define RN5T618_VTHMTHH 0x83
+#define RN5T618_AIN1THL 0x84
+#define RN5T618_AIN1THH 0x85
+#define RN5T618_AIN0THL 0x86
+#define RN5T618_AIN0THH 0x87
+#define RN5T618_EN_ADCIR1 0x88
+#define RN5T618_EN_ADCIR2 0x89
+#define RN5T618_EN_ADCIR3 0x8a
+#define RN5T618_IR_ADC1 0x8c
+#define RN5T618_IR_ADC2 0x8d
+#define RN5T618_IR_ADC3 0x8e
+#define RN5T618_IOSEL 0x90
+#define RN5T618_IOOUT 0x91
+#define RN5T618_GPEDGE1 0x92
+#define RN5T618_GPEDGE2 0x93
+#define RN5T618_EN_GPIR 0x94
+#define RN5T618_IR_GPR 0x95
+#define RN5T618_IR_GPF 0x96
+#define RN5T618_MON_IOIN 0x97
+#define RN5T618_GPLED_FUNC 0x98
+#define RN5T618_INTPOL 0x9c
+#define RN5T618_INTEN 0x9d
+#define RN5T618_INTMON 0x9e
+#define RN5T618_PREVINDAC 0xb0
+#define RN5T618_BATDAC 0xb1
+#define RN5T618_CHGCTL1 0xb3
+#define RN5T618_CHGCTL2 0xb4
+#define RN5T618_VSYSSET 0xb5
+#define RN5T618_REGISET1 0xb6
+#define RN5T618_REGISET2 0xb7
+#define RN5T618_CHGISET 0xb8
+#define RN5T618_TIMSET 0xb9
+#define RN5T618_BATSET1 0xba
+#define RN5T618_BATSET2 0xbb
+#define RN5T618_DIESET 0xbc
+#define RN5T618_CHGSTATE 0xbd
+#define RN5T618_CHGCTRL_IRFMASK 0xbe
+#define RN5T618_CHGSTAT_IRFMASK1 0xbf
+#define RN5T618_CHGSTAT_IRFMASK2 0xc0
+#define RN5T618_CHGERR_IRFMASK 0xc1
+#define RN5T618_CHGCTRL_IRR 0xc2
+#define RN5T618_CHGSTAT_IRR1 0xc3
+#define RN5T618_CHGSTAT_IRR2 0xc4
+#define RN5T618_CHGERR_IRR 0xc5
+#define RN5T618_CHGCTRL_MONI 0xc6
+#define RN5T618_CHGSTAT_MONI1 0xc7
+#define RN5T618_CHGSTAT_MONI2 0xc8
+#define RN5T618_CHGERR_MONI 0xc9
+#define RN5T618_CHGCTRL_DETMOD1 0xca
+#define RN5T618_CHGCTRL_DETMOD2 0xcb
+#define RN5T618_CHGSTAT_DETMOD1 0xcc
+#define RN5T618_CHGSTAT_DETMOD2 0xcd
+#define RN5T618_CHGSTAT_DETMOD3 0xce
+#define RN5T618_CHGERR_DETMOD1 0xcf
+#define RN5T618_CHGERR_DETMOD2 0xd0
+#define RN5T618_CHGOSCCTL 0xd4
+#define RN5T618_CHGOSCSCORESET1 0xd5
+#define RN5T618_CHGOSCSCORESET2 0xd6
+#define RN5T618_CHGOSCSCORESET3 0xd7
+#define RN5T618_CHGOSCFREQSET1 0xd8
+#define RN5T618_CHGOSCFREQSET2 0xd9
+#define RN5T618_CONTROL 0xe0
+#define RN5T618_SOC 0xe1
+#define RN5T618_RE_CAP_H 0xe2
+#define RN5T618_RE_CAP_L 0xe3
+#define RN5T618_FA_CAP_H 0xe4
+#define RN5T618_FA_CAP_L 0xe5
+#define RN5T618_AGE 0xe6
+#define RN5T618_TT_EMPTY_H 0xe7
+#define RN5T618_TT_EMPTY_L 0xe8
+#define RN5T618_TT_FULL_H 0xe9
+#define RN5T618_TT_FULL_L 0xea
+#define RN5T618_VOLTAGE_1 0xeb
+#define RN5T618_VOLTAGE_0 0xec
+#define RN5T618_TEMP_1 0xed
+#define RN5T618_TEMP_0 0xee
+#define RN5T618_CC_CTRL 0xef
+#define RN5T618_CC_COUNT2 0xf0
+#define RN5T618_CC_COUNT1 0xf1
+#define RN5T618_CC_COUNT0 0xf2
+#define RN5T618_CC_SUMREG3 0xf3
+#define RN5T618_CC_SUMREG2 0xf4
+#define RN5T618_CC_SUMREG1 0xf5
+#define RN5T618_CC_SUMREG0 0xf6
+#define RN5T618_CC_OFFREG1 0xf7
+#define RN5T618_CC_OFFREG0 0xf8
+#define RN5T618_CC_GAINREG1 0xf9
+#define RN5T618_CC_GAINREG0 0xfa
+#define RN5T618_CC_AVEREG1 0xfb
+#define RN5T618_CC_AVEREG0 0xfc
+#define RN5T618_MAX_REG 0xfc
+
+#define RN5T618_REPCNT_REPWRON BIT(0)
+#define RN5T618_SLPCNT_SWPWROFF BIT(0)
+#define RN5T618_WATCHDOG_WDOGEN BIT(2)
+#define RN5T618_WATCHDOG_WDOGTIM_M (BIT(0) | BIT(1))
+#define RN5T618_WATCHDOG_WDOGTIM_S 0
+#define RN5T618_PWRIRQ_IR_WDOG BIT(6)
+
+enum {
+ RN5T618_DCDC1,
+ RN5T618_DCDC2,
+ RN5T618_DCDC3,
+ RN5T618_LDO1,
+ RN5T618_LDO2,
+ RN5T618_LDO3,
+ RN5T618_LDO4,
+ RN5T618_LDO5,
+ RN5T618_LDORTC1,
+ RN5T618_LDORTC2,
+ RN5T618_REG_NUM,
+};
+
+struct rn5t618 {
+ struct regmap *regmap;
+};
+
+#endif /* __LINUX_MFD_RN5T618_H */
diff --git a/include/linux/mfd/rt5033-private.h b/include/linux/mfd/rt5033-private.h
new file mode 100644
index 000000000..1b63fc2f4
--- /dev/null
+++ b/include/linux/mfd/rt5033-private.h
@@ -0,0 +1,260 @@
+/*
+ * MFD core driver for Richtek RT5033
+ *
+ * Copyright (C) 2014 Samsung Electronics, Co., Ltd.
+ * Author: Beomho Seo <beomho.seo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published bythe Free Software Foundation.
+ */
+
+#ifndef __RT5033_PRIVATE_H__
+#define __RT5033_PRIVATE_H__
+
+enum rt5033_reg {
+ RT5033_REG_CHG_STAT = 0x00,
+ RT5033_REG_CHG_CTRL1 = 0x01,
+ RT5033_REG_CHG_CTRL2 = 0x02,
+ RT5033_REG_DEVICE_ID = 0x03,
+ RT5033_REG_CHG_CTRL3 = 0x04,
+ RT5033_REG_CHG_CTRL4 = 0x05,
+ RT5033_REG_CHG_CTRL5 = 0x06,
+ RT5033_REG_RT_CTRL0 = 0x07,
+ RT5033_REG_CHG_RESET = 0x08,
+ /* Reserved 0x09~0x18 */
+ RT5033_REG_RT_CTRL1 = 0x19,
+ /* Reserved 0x1A~0x20 */
+ RT5033_REG_FLED_FUNCTION1 = 0x21,
+ RT5033_REG_FLED_FUNCTION2 = 0x22,
+ RT5033_REG_FLED_STROBE_CTRL1 = 0x23,
+ RT5033_REG_FLED_STROBE_CTRL2 = 0x24,
+ RT5033_REG_FLED_CTRL1 = 0x25,
+ RT5033_REG_FLED_CTRL2 = 0x26,
+ RT5033_REG_FLED_CTRL3 = 0x27,
+ RT5033_REG_FLED_CTRL4 = 0x28,
+ RT5033_REG_FLED_CTRL5 = 0x29,
+ /* Reserved 0x2A~0x40 */
+ RT5033_REG_CTRL = 0x41,
+ RT5033_REG_BUCK_CTRL = 0x42,
+ RT5033_REG_LDO_CTRL = 0x43,
+ /* Reserved 0x44~0x46 */
+ RT5033_REG_MANUAL_RESET_CTRL = 0x47,
+ /* Reserved 0x48~0x5F */
+ RT5033_REG_CHG_IRQ1 = 0x60,
+ RT5033_REG_CHG_IRQ2 = 0x61,
+ RT5033_REG_CHG_IRQ3 = 0x62,
+ RT5033_REG_CHG_IRQ1_CTRL = 0x63,
+ RT5033_REG_CHG_IRQ2_CTRL = 0x64,
+ RT5033_REG_CHG_IRQ3_CTRL = 0x65,
+ RT5033_REG_LED_IRQ_STAT = 0x66,
+ RT5033_REG_LED_IRQ_CTRL = 0x67,
+ RT5033_REG_PMIC_IRQ_STAT = 0x68,
+ RT5033_REG_PMIC_IRQ_CTRL = 0x69,
+ RT5033_REG_SHDN_CTRL = 0x6A,
+ RT5033_REG_OFF_EVENT = 0x6B,
+
+ RT5033_REG_END,
+};
+
+/* RT5033 Charger state register */
+#define RT5033_CHG_STAT_MASK 0x20
+#define RT5033_CHG_STAT_DISCHARGING 0x00
+#define RT5033_CHG_STAT_FULL 0x10
+#define RT5033_CHG_STAT_CHARGING 0x20
+#define RT5033_CHG_STAT_NOT_CHARGING 0x30
+#define RT5033_CHG_STAT_TYPE_MASK 0x60
+#define RT5033_CHG_STAT_TYPE_PRE 0x20
+#define RT5033_CHG_STAT_TYPE_FAST 0x60
+
+/* RT5033 CHGCTRL1 register */
+#define RT5033_CHGCTRL1_IAICR_MASK 0xe0
+#define RT5033_CHGCTRL1_MODE_MASK 0x01
+
+/* RT5033 CHGCTRL2 register */
+#define RT5033_CHGCTRL2_CV_MASK 0xfc
+
+/* RT5033 CHGCTRL3 register */
+#define RT5033_CHGCTRL3_CFO_EN_MASK 0x40
+#define RT5033_CHGCTRL3_TIMER_MASK 0x38
+#define RT5033_CHGCTRL3_TIMER_EN_MASK 0x01
+
+/* RT5033 CHGCTRL4 register */
+#define RT5033_CHGCTRL4_EOC_MASK 0x07
+#define RT5033_CHGCTRL4_IPREC_MASK 0x18
+
+/* RT5033 CHGCTRL5 register */
+#define RT5033_CHGCTRL5_VPREC_MASK 0x0f
+#define RT5033_CHGCTRL5_ICHG_MASK 0xf0
+#define RT5033_CHGCTRL5_ICHG_SHIFT 0x04
+#define RT5033_CHG_MAX_CURRENT 0x0d
+
+/* RT5033 RT CTRL1 register */
+#define RT5033_RT_CTRL1_UUG_MASK 0x02
+#define RT5033_RT_HZ_MASK 0x01
+
+/* RT5033 control register */
+#define RT5033_CTRL_FCCM_BUCK_MASK 0x00
+#define RT5033_CTRL_BUCKOMS_MASK 0x01
+#define RT5033_CTRL_LDOOMS_MASK 0x02
+#define RT5033_CTRL_SLDOOMS_MASK 0x03
+#define RT5033_CTRL_EN_BUCK_MASK 0x04
+#define RT5033_CTRL_EN_LDO_MASK 0x05
+#define RT5033_CTRL_EN_SAFE_LDO_MASK 0x06
+#define RT5033_CTRL_LDO_SLEEP_MASK 0x07
+
+/* RT5033 BUCK control register */
+#define RT5033_BUCK_CTRL_MASK 0x1f
+
+/* RT5033 LDO control register */
+#define RT5033_LDO_CTRL_MASK 0x1f
+
+/* RT5033 charger property - model, manufacturer */
+
+#define RT5033_CHARGER_MODEL "RT5033WSC Charger"
+#define RT5033_MANUFACTURER "Richtek Technology Corporation"
+
+/*
+ * RT5033 charger fast-charge current lmits (as in CHGCTRL1 register),
+ * AICR mode limits the input current for example,
+ * the AIRC 100 mode limits the input current to 100 mA.
+ */
+#define RT5033_AICR_100_MODE 0x20
+#define RT5033_AICR_500_MODE 0x40
+#define RT5033_AICR_700_MODE 0x60
+#define RT5033_AICR_900_MODE 0x80
+#define RT5033_AICR_1500_MODE 0xc0
+#define RT5033_AICR_2000_MODE 0xe0
+#define RT5033_AICR_MODE_MASK 0xe0
+
+/* RT5033 use internal timer need to set time */
+#define RT5033_FAST_CHARGE_TIMER4 0x00
+#define RT5033_FAST_CHARGE_TIMER6 0x01
+#define RT5033_FAST_CHARGE_TIMER8 0x02
+#define RT5033_FAST_CHARGE_TIMER9 0x03
+#define RT5033_FAST_CHARGE_TIMER12 0x04
+#define RT5033_FAST_CHARGE_TIMER14 0x05
+#define RT5033_FAST_CHARGE_TIMER16 0x06
+
+#define RT5033_INT_TIMER_ENABLE 0x01
+
+/* RT5033 charger termination enable mask */
+#define RT5033_TE_ENABLE_MASK 0x08
+
+/*
+ * RT5033 charger opa mode. RT50300 have two opa mode charger mode
+ * and boost mode for OTG
+ */
+
+#define RT5033_CHARGER_MODE 0x00
+#define RT5033_BOOST_MODE 0x01
+
+/* RT5033 charger termination enable */
+#define RT5033_TE_ENABLE 0x08
+
+/* RT5033 charger CFO enable */
+#define RT5033_CFO_ENABLE 0x40
+
+/* RT5033 charger constant charge voltage (as in CHGCTRL2 register), uV */
+#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MIN 3650000U
+#define RT5033_CHARGER_CONST_VOLTAGE_STEP_NUM 25000U
+#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MAX 4400000U
+
+/* RT5033 charger pre-charge current limits (as in CHGCTRL4 register), uA */
+#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MIN 350000U
+#define RT5033_CHARGER_PRE_CURRENT_STEP_NUM 100000U
+#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MAX 650000U
+
+/* RT5033 charger fast-charge current (as in CHGCTRL5 register), uA */
+#define RT5033_CHARGER_FAST_CURRENT_MIN 700000U
+#define RT5033_CHARGER_FAST_CURRENT_STEP_NUM 100000U
+#define RT5033_CHARGER_FAST_CURRENT_MAX 2000000U
+
+/*
+ * RT5033 charger const-charge end of charger current (
+ * as in CHGCTRL4 register), uA
+ */
+#define RT5033_CHARGER_EOC_MIN 150000U
+#define RT5033_CHARGER_EOC_REF 300000U
+#define RT5033_CHARGER_EOC_STEP_NUM1 50000U
+#define RT5033_CHARGER_EOC_STEP_NUM2 100000U
+#define RT5033_CHARGER_EOC_MAX 600000U
+
+/*
+ * RT5033 charger pre-charge threshold volt limits
+ * (as in CHGCTRL5 register), uV
+ */
+
+#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MIN 2300000U
+#define RT5033_CHARGER_PRE_THRESHOLD_STEP_NUM 100000U
+#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MAX 3800000U
+
+/*
+ * RT5033 charger enable UUG, If UUG enable MOS auto control by H/W charger
+ * circuit.
+ */
+#define RT5033_CHARGER_UUG_ENABLE 0x02
+
+/* RT5033 charger High impedance mode */
+#define RT5033_CHARGER_HZ_DISABLE 0x00
+#define RT5033_CHARGER_HZ_ENABLE 0x01
+
+/* RT5033 regulator BUCK output voltage uV */
+#define RT5033_REGULATOR_BUCK_VOLTAGE_MIN 1000000U
+#define RT5033_REGULATOR_BUCK_VOLTAGE_MAX 3000000U
+#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP 100000U
+#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM 32
+
+/* RT5033 regulator LDO output voltage uV */
+#define RT5033_REGULATOR_LDO_VOLTAGE_MIN 1200000U
+#define RT5033_REGULATOR_LDO_VOLTAGE_MAX 3000000U
+#define RT5033_REGULATOR_LDO_VOLTAGE_STEP 100000U
+#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM 32
+
+/* RT5033 regulator SAFE LDO output voltage uV */
+#define RT5033_REGULATOR_SAFE_LDO_VOLTAGE 4900000U
+
+enum rt5033_fuel_reg {
+ RT5033_FUEL_REG_OCV_H = 0x00,
+ RT5033_FUEL_REG_OCV_L = 0x01,
+ RT5033_FUEL_REG_VBAT_H = 0x02,
+ RT5033_FUEL_REG_VBAT_L = 0x03,
+ RT5033_FUEL_REG_SOC_H = 0x04,
+ RT5033_FUEL_REG_SOC_L = 0x05,
+ RT5033_FUEL_REG_CTRL_H = 0x06,
+ RT5033_FUEL_REG_CTRL_L = 0x07,
+ RT5033_FUEL_REG_CRATE = 0x08,
+ RT5033_FUEL_REG_DEVICE_ID = 0x09,
+ RT5033_FUEL_REG_AVG_VOLT_H = 0x0A,
+ RT5033_FUEL_REG_AVG_VOLT_L = 0x0B,
+ RT5033_FUEL_REG_CONFIG_H = 0x0C,
+ RT5033_FUEL_REG_CONFIG_L = 0x0D,
+ /* Reserved 0x0E~0x0F */
+ RT5033_FUEL_REG_IRQ_CTRL = 0x10,
+ RT5033_FUEL_REG_IRQ_FLAG = 0x11,
+ RT5033_FUEL_VMIN = 0x12,
+ RT5033_FUEL_SMIN = 0x13,
+ /* Reserved 0x14~0x1F */
+ RT5033_FUEL_VGCOMP1 = 0x20,
+ RT5033_FUEL_VGCOMP2 = 0x21,
+ RT5033_FUEL_VGCOMP3 = 0x22,
+ RT5033_FUEL_VGCOMP4 = 0x23,
+ /* Reserved 0x24~0xFD */
+ RT5033_FUEL_MFA_H = 0xFE,
+ RT5033_FUEL_MFA_L = 0xFF,
+
+ RT5033_FUEL_REG_END,
+};
+
+/* RT5033 fuel gauge battery present property */
+#define RT5033_FUEL_BAT_PRESENT 0x02
+
+/* RT5033 PMIC interrupts */
+#define RT5033_PMIC_IRQ_BUCKOCP 2
+#define RT5033_PMIC_IRQ_BUCKLV 3
+#define RT5033_PMIC_IRQ_SAFELDOLV 4
+#define RT5033_PMIC_IRQ_LDOLV 5
+#define RT5033_PMIC_IRQ_OT 6
+#define RT5033_PMIC_IRQ_VDDA_UV 7
+
+#endif /* __RT5033_PRIVATE_H__ */
diff --git a/include/linux/mfd/rt5033.h b/include/linux/mfd/rt5033.h
new file mode 100644
index 000000000..6cff5cf45
--- /dev/null
+++ b/include/linux/mfd/rt5033.h
@@ -0,0 +1,62 @@
+/*
+ * MFD core driver for the RT5033
+ *
+ * Copyright (C) 2014 Samsung Electronics
+ * Author: Beomho Seo <beomho.seo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published bythe Free Software Foundation.
+ */
+
+#ifndef __RT5033_H__
+#define __RT5033_H__
+
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/power_supply.h>
+
+/* RT5033 regulator IDs */
+enum rt5033_regulators {
+ RT5033_BUCK = 0,
+ RT5033_LDO,
+ RT5033_SAFE_LDO,
+
+ RT5033_REGULATOR_NUM,
+};
+
+struct rt5033_dev {
+ struct device *dev;
+
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *irq_data;
+ int irq;
+ bool wakeup;
+};
+
+struct rt5033_battery {
+ struct i2c_client *client;
+ struct rt5033_dev *rt5033;
+ struct regmap *regmap;
+ struct power_supply *psy;
+};
+
+/* RT5033 charger platform data */
+struct rt5033_charger_data {
+ unsigned int pre_uamp;
+ unsigned int pre_uvolt;
+ unsigned int const_uvolt;
+ unsigned int eoc_uamp;
+ unsigned int fast_uamp;
+};
+
+struct rt5033_charger {
+ struct device *dev;
+ struct rt5033_dev *rt5033;
+ struct power_supply psy;
+
+ struct rt5033_charger_data *chg;
+};
+
+#endif /* __RT5033_H__ */
diff --git a/include/linux/mfd/rtsx_common.h b/include/linux/mfd/rtsx_common.h
new file mode 100644
index 000000000..443176ee1
--- /dev/null
+++ b/include/linux/mfd/rtsx_common.h
@@ -0,0 +1,50 @@
+/* Driver for Realtek driver-based card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#ifndef __RTSX_COMMON_H
+#define __RTSX_COMMON_H
+
+#define DRV_NAME_RTSX_PCI "rtsx_pci"
+#define DRV_NAME_RTSX_PCI_SDMMC "rtsx_pci_sdmmc"
+#define DRV_NAME_RTSX_PCI_MS "rtsx_pci_ms"
+
+#define RTSX_REG_PAIR(addr, val) (((u32)(addr) << 16) | (u8)(val))
+
+#define RTSX_SSC_DEPTH_4M 0x01
+#define RTSX_SSC_DEPTH_2M 0x02
+#define RTSX_SSC_DEPTH_1M 0x03
+#define RTSX_SSC_DEPTH_500K 0x04
+#define RTSX_SSC_DEPTH_250K 0x05
+
+#define RTSX_SD_CARD 0
+#define RTSX_MS_CARD 1
+
+#define CLK_TO_DIV_N 0
+#define DIV_N_TO_CLK 1
+
+struct platform_device;
+
+struct rtsx_slot {
+ struct platform_device *p_dev;
+ void (*card_event)(struct platform_device *p_dev);
+};
+
+#endif
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
new file mode 100644
index 000000000..ff843e7ca
--- /dev/null
+++ b/include/linux/mfd/rtsx_pci.h
@@ -0,0 +1,1045 @@
+/* Driver for Realtek PCI-Express card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Wei WANG <wei_wang@realsil.com.cn>
+ */
+
+#ifndef __RTSX_PCI_H
+#define __RTSX_PCI_H
+
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/mfd/rtsx_common.h>
+
+#define MAX_RW_REG_CNT 1024
+
+#define RTSX_HCBAR 0x00
+#define RTSX_HCBCTLR 0x04
+#define STOP_CMD (0x01 << 28)
+#define READ_REG_CMD 0
+#define WRITE_REG_CMD 1
+#define CHECK_REG_CMD 2
+
+#define RTSX_HDBAR 0x08
+#define SG_INT 0x04
+#define SG_END 0x02
+#define SG_VALID 0x01
+#define SG_NO_OP 0x00
+#define SG_TRANS_DATA (0x02 << 4)
+#define SG_LINK_DESC (0x03 << 4)
+#define RTSX_HDBCTLR 0x0C
+#define SDMA_MODE 0x00
+#define ADMA_MODE (0x02 << 26)
+#define STOP_DMA (0x01 << 28)
+#define TRIG_DMA (0x01 << 31)
+
+#define RTSX_HAIMR 0x10
+#define HAIMR_TRANS_START (0x01 << 31)
+#define HAIMR_READ 0x00
+#define HAIMR_WRITE (0x01 << 30)
+#define HAIMR_READ_START (HAIMR_TRANS_START | HAIMR_READ)
+#define HAIMR_WRITE_START (HAIMR_TRANS_START | HAIMR_WRITE)
+#define HAIMR_TRANS_END (HAIMR_TRANS_START)
+
+#define RTSX_BIPR 0x14
+#define CMD_DONE_INT (1 << 31)
+#define DATA_DONE_INT (1 << 30)
+#define TRANS_OK_INT (1 << 29)
+#define TRANS_FAIL_INT (1 << 28)
+#define XD_INT (1 << 27)
+#define MS_INT (1 << 26)
+#define SD_INT (1 << 25)
+#define GPIO0_INT (1 << 24)
+#define OC_INT (1 << 23)
+#define SD_WRITE_PROTECT (1 << 19)
+#define XD_EXIST (1 << 18)
+#define MS_EXIST (1 << 17)
+#define SD_EXIST (1 << 16)
+#define DELINK_INT GPIO0_INT
+#define MS_OC_INT (1 << 23)
+#define SD_OC_INT (1 << 22)
+
+#define CARD_INT (XD_INT | MS_INT | SD_INT)
+#define NEED_COMPLETE_INT (DATA_DONE_INT | TRANS_OK_INT | TRANS_FAIL_INT)
+#define RTSX_INT (CMD_DONE_INT | NEED_COMPLETE_INT | \
+ CARD_INT | GPIO0_INT | OC_INT)
+#define CARD_EXIST (XD_EXIST | MS_EXIST | SD_EXIST)
+
+#define RTSX_BIER 0x18
+#define CMD_DONE_INT_EN (1 << 31)
+#define DATA_DONE_INT_EN (1 << 30)
+#define TRANS_OK_INT_EN (1 << 29)
+#define TRANS_FAIL_INT_EN (1 << 28)
+#define XD_INT_EN (1 << 27)
+#define MS_INT_EN (1 << 26)
+#define SD_INT_EN (1 << 25)
+#define GPIO0_INT_EN (1 << 24)
+#define OC_INT_EN (1 << 23)
+#define DELINK_INT_EN GPIO0_INT_EN
+#define MS_OC_INT_EN (1 << 23)
+#define SD_OC_INT_EN (1 << 22)
+
+
+/*
+ * macros for easy use
+ */
+#define rtsx_pci_writel(pcr, reg, value) \
+ iowrite32(value, (pcr)->remap_addr + reg)
+#define rtsx_pci_readl(pcr, reg) \
+ ioread32((pcr)->remap_addr + reg)
+#define rtsx_pci_writew(pcr, reg, value) \
+ iowrite16(value, (pcr)->remap_addr + reg)
+#define rtsx_pci_readw(pcr, reg) \
+ ioread16((pcr)->remap_addr + reg)
+#define rtsx_pci_writeb(pcr, reg, value) \
+ iowrite8(value, (pcr)->remap_addr + reg)
+#define rtsx_pci_readb(pcr, reg) \
+ ioread8((pcr)->remap_addr + reg)
+
+#define rtsx_pci_read_config_byte(pcr, where, val) \
+ pci_read_config_byte((pcr)->pci, where, val)
+
+#define rtsx_pci_write_config_byte(pcr, where, val) \
+ pci_write_config_byte((pcr)->pci, where, val)
+
+#define rtsx_pci_read_config_dword(pcr, where, val) \
+ pci_read_config_dword((pcr)->pci, where, val)
+
+#define rtsx_pci_write_config_dword(pcr, where, val) \
+ pci_write_config_dword((pcr)->pci, where, val)
+
+#define STATE_TRANS_NONE 0
+#define STATE_TRANS_CMD 1
+#define STATE_TRANS_BUF 2
+#define STATE_TRANS_SG 3
+
+#define TRANS_NOT_READY 0
+#define TRANS_RESULT_OK 1
+#define TRANS_RESULT_FAIL 2
+#define TRANS_NO_DEVICE 3
+
+#define RTSX_RESV_BUF_LEN 4096
+#define HOST_CMDS_BUF_LEN 1024
+#define HOST_SG_TBL_BUF_LEN (RTSX_RESV_BUF_LEN - HOST_CMDS_BUF_LEN)
+#define HOST_SG_TBL_ITEMS (HOST_SG_TBL_BUF_LEN / 8)
+#define MAX_SG_ITEM_LEN 0x80000
+#define HOST_TO_DEVICE 0
+#define DEVICE_TO_HOST 1
+
+#define OUTPUT_3V3 0
+#define OUTPUT_1V8 1
+
+#define RTSX_PHASE_MAX 32
+#define RX_TUNING_CNT 3
+
+#define MS_CFG 0xFD40
+#define SAMPLE_TIME_RISING 0x00
+#define SAMPLE_TIME_FALLING 0x80
+#define PUSH_TIME_DEFAULT 0x00
+#define PUSH_TIME_ODD 0x40
+#define NO_EXTEND_TOGGLE 0x00
+#define EXTEND_TOGGLE_CHK 0x20
+#define MS_BUS_WIDTH_1 0x00
+#define MS_BUS_WIDTH_4 0x10
+#define MS_BUS_WIDTH_8 0x18
+#define MS_2K_SECTOR_MODE 0x04
+#define MS_512_SECTOR_MODE 0x00
+#define MS_TOGGLE_TIMEOUT_EN 0x00
+#define MS_TOGGLE_TIMEOUT_DISEN 0x01
+#define MS_NO_CHECK_INT 0x02
+#define MS_TPC 0xFD41
+#define MS_TRANS_CFG 0xFD42
+#define WAIT_INT 0x80
+#define NO_WAIT_INT 0x00
+#define NO_AUTO_READ_INT_REG 0x00
+#define AUTO_READ_INT_REG 0x40
+#define MS_CRC16_ERR 0x20
+#define MS_RDY_TIMEOUT 0x10
+#define MS_INT_CMDNK 0x08
+#define MS_INT_BREQ 0x04
+#define MS_INT_ERR 0x02
+#define MS_INT_CED 0x01
+#define MS_TRANSFER 0xFD43
+#define MS_TRANSFER_START 0x80
+#define MS_TRANSFER_END 0x40
+#define MS_TRANSFER_ERR 0x20
+#define MS_BS_STATE 0x10
+#define MS_TM_READ_BYTES 0x00
+#define MS_TM_NORMAL_READ 0x01
+#define MS_TM_WRITE_BYTES 0x04
+#define MS_TM_NORMAL_WRITE 0x05
+#define MS_TM_AUTO_READ 0x08
+#define MS_TM_AUTO_WRITE 0x0C
+#define MS_INT_REG 0xFD44
+#define MS_BYTE_CNT 0xFD45
+#define MS_SECTOR_CNT_L 0xFD46
+#define MS_SECTOR_CNT_H 0xFD47
+#define MS_DBUS_H 0xFD48
+
+#define SD_CFG1 0xFDA0
+#define SD_CLK_DIVIDE_0 0x00
+#define SD_CLK_DIVIDE_256 0xC0
+#define SD_CLK_DIVIDE_128 0x80
+#define SD_BUS_WIDTH_1BIT 0x00
+#define SD_BUS_WIDTH_4BIT 0x01
+#define SD_BUS_WIDTH_8BIT 0x02
+#define SD_ASYNC_FIFO_NOT_RST 0x10
+#define SD_20_MODE 0x00
+#define SD_DDR_MODE 0x04
+#define SD_30_MODE 0x08
+#define SD_CLK_DIVIDE_MASK 0xC0
+#define SD_CFG2 0xFDA1
+#define SD_CALCULATE_CRC7 0x00
+#define SD_NO_CALCULATE_CRC7 0x80
+#define SD_CHECK_CRC16 0x00
+#define SD_NO_CHECK_CRC16 0x40
+#define SD_NO_CHECK_WAIT_CRC_TO 0x20
+#define SD_WAIT_BUSY_END 0x08
+#define SD_NO_WAIT_BUSY_END 0x00
+#define SD_CHECK_CRC7 0x00
+#define SD_NO_CHECK_CRC7 0x04
+#define SD_RSP_LEN_0 0x00
+#define SD_RSP_LEN_6 0x01
+#define SD_RSP_LEN_17 0x02
+#define SD_RSP_TYPE_R0 0x04
+#define SD_RSP_TYPE_R1 0x01
+#define SD_RSP_TYPE_R1b 0x09
+#define SD_RSP_TYPE_R2 0x02
+#define SD_RSP_TYPE_R3 0x05
+#define SD_RSP_TYPE_R4 0x05
+#define SD_RSP_TYPE_R5 0x01
+#define SD_RSP_TYPE_R6 0x01
+#define SD_RSP_TYPE_R7 0x01
+#define SD_CFG3 0xFDA2
+#define SD_RSP_80CLK_TIMEOUT_EN 0x01
+
+#define SD_STAT1 0xFDA3
+#define SD_CRC7_ERR 0x80
+#define SD_CRC16_ERR 0x40
+#define SD_CRC_WRITE_ERR 0x20
+#define SD_CRC_WRITE_ERR_MASK 0x1C
+#define GET_CRC_TIME_OUT 0x02
+#define SD_TUNING_COMPARE_ERR 0x01
+#define SD_STAT2 0xFDA4
+#define SD_RSP_80CLK_TIMEOUT 0x01
+
+#define SD_BUS_STAT 0xFDA5
+#define SD_CLK_TOGGLE_EN 0x80
+#define SD_CLK_FORCE_STOP 0x40
+#define SD_DAT3_STATUS 0x10
+#define SD_DAT2_STATUS 0x08
+#define SD_DAT1_STATUS 0x04
+#define SD_DAT0_STATUS 0x02
+#define SD_CMD_STATUS 0x01
+#define SD_PAD_CTL 0xFDA6
+#define SD_IO_USING_1V8 0x80
+#define SD_IO_USING_3V3 0x7F
+#define TYPE_A_DRIVING 0x00
+#define TYPE_B_DRIVING 0x01
+#define TYPE_C_DRIVING 0x02
+#define TYPE_D_DRIVING 0x03
+#define SD_SAMPLE_POINT_CTL 0xFDA7
+#define DDR_FIX_RX_DAT 0x00
+#define DDR_VAR_RX_DAT 0x80
+#define DDR_FIX_RX_DAT_EDGE 0x00
+#define DDR_FIX_RX_DAT_14_DELAY 0x40
+#define DDR_FIX_RX_CMD 0x00
+#define DDR_VAR_RX_CMD 0x20
+#define DDR_FIX_RX_CMD_POS_EDGE 0x00
+#define DDR_FIX_RX_CMD_14_DELAY 0x10
+#define SD20_RX_POS_EDGE 0x00
+#define SD20_RX_14_DELAY 0x08
+#define SD20_RX_SEL_MASK 0x08
+#define SD_PUSH_POINT_CTL 0xFDA8
+#define DDR_FIX_TX_CMD_DAT 0x00
+#define DDR_VAR_TX_CMD_DAT 0x80
+#define DDR_FIX_TX_DAT_14_TSU 0x00
+#define DDR_FIX_TX_DAT_12_TSU 0x40
+#define DDR_FIX_TX_CMD_NEG_EDGE 0x00
+#define DDR_FIX_TX_CMD_14_AHEAD 0x20
+#define SD20_TX_NEG_EDGE 0x00
+#define SD20_TX_14_AHEAD 0x10
+#define SD20_TX_SEL_MASK 0x10
+#define DDR_VAR_SDCLK_POL_SWAP 0x01
+#define SD_CMD0 0xFDA9
+#define SD_CMD_START 0x40
+#define SD_CMD1 0xFDAA
+#define SD_CMD2 0xFDAB
+#define SD_CMD3 0xFDAC
+#define SD_CMD4 0xFDAD
+#define SD_CMD5 0xFDAE
+#define SD_BYTE_CNT_L 0xFDAF
+#define SD_BYTE_CNT_H 0xFDB0
+#define SD_BLOCK_CNT_L 0xFDB1
+#define SD_BLOCK_CNT_H 0xFDB2
+#define SD_TRANSFER 0xFDB3
+#define SD_TRANSFER_START 0x80
+#define SD_TRANSFER_END 0x40
+#define SD_STAT_IDLE 0x20
+#define SD_TRANSFER_ERR 0x10
+#define SD_TM_NORMAL_WRITE 0x00
+#define SD_TM_AUTO_WRITE_3 0x01
+#define SD_TM_AUTO_WRITE_4 0x02
+#define SD_TM_AUTO_READ_3 0x05
+#define SD_TM_AUTO_READ_4 0x06
+#define SD_TM_CMD_RSP 0x08
+#define SD_TM_AUTO_WRITE_1 0x09
+#define SD_TM_AUTO_WRITE_2 0x0A
+#define SD_TM_NORMAL_READ 0x0C
+#define SD_TM_AUTO_READ_1 0x0D
+#define SD_TM_AUTO_READ_2 0x0E
+#define SD_TM_AUTO_TUNING 0x0F
+#define SD_CMD_STATE 0xFDB5
+#define SD_CMD_IDLE 0x80
+
+#define SD_DATA_STATE 0xFDB6
+#define SD_DATA_IDLE 0x80
+
+#define SRCTL 0xFC13
+
+#define DCM_DRP_CTL 0xFC23
+#define DCM_RESET 0x08
+#define DCM_LOCKED 0x04
+#define DCM_208M 0x00
+#define DCM_TX 0x01
+#define DCM_RX 0x02
+#define DCM_DRP_TRIG 0xFC24
+#define DRP_START 0x80
+#define DRP_DONE 0x40
+#define DCM_DRP_CFG 0xFC25
+#define DRP_WRITE 0x80
+#define DRP_READ 0x00
+#define DCM_WRITE_ADDRESS_50 0x50
+#define DCM_WRITE_ADDRESS_51 0x51
+#define DCM_READ_ADDRESS_00 0x00
+#define DCM_READ_ADDRESS_51 0x51
+#define DCM_DRP_WR_DATA_L 0xFC26
+#define DCM_DRP_WR_DATA_H 0xFC27
+#define DCM_DRP_RD_DATA_L 0xFC28
+#define DCM_DRP_RD_DATA_H 0xFC29
+#define SD_VPCLK0_CTL 0xFC2A
+#define SD_VPCLK1_CTL 0xFC2B
+#define SD_DCMPS0_CTL 0xFC2C
+#define SD_DCMPS1_CTL 0xFC2D
+#define SD_VPTX_CTL SD_VPCLK0_CTL
+#define SD_VPRX_CTL SD_VPCLK1_CTL
+#define PHASE_CHANGE 0x80
+#define PHASE_NOT_RESET 0x40
+#define SD_DCMPS_TX_CTL SD_DCMPS0_CTL
+#define SD_DCMPS_RX_CTL SD_DCMPS1_CTL
+#define DCMPS_CHANGE 0x80
+#define DCMPS_CHANGE_DONE 0x40
+#define DCMPS_ERROR 0x20
+#define DCMPS_CURRENT_PHASE 0x1F
+#define CARD_CLK_SOURCE 0xFC2E
+#define CRC_FIX_CLK (0x00 << 0)
+#define CRC_VAR_CLK0 (0x01 << 0)
+#define CRC_VAR_CLK1 (0x02 << 0)
+#define SD30_FIX_CLK (0x00 << 2)
+#define SD30_VAR_CLK0 (0x01 << 2)
+#define SD30_VAR_CLK1 (0x02 << 2)
+#define SAMPLE_FIX_CLK (0x00 << 4)
+#define SAMPLE_VAR_CLK0 (0x01 << 4)
+#define SAMPLE_VAR_CLK1 (0x02 << 4)
+#define CARD_PWR_CTL 0xFD50
+#define PMOS_STRG_MASK 0x10
+#define PMOS_STRG_800mA 0x10
+#define PMOS_STRG_400mA 0x00
+#define SD_POWER_OFF 0x03
+#define SD_PARTIAL_POWER_ON 0x01
+#define SD_POWER_ON 0x00
+#define SD_POWER_MASK 0x03
+#define MS_POWER_OFF 0x0C
+#define MS_PARTIAL_POWER_ON 0x04
+#define MS_POWER_ON 0x00
+#define MS_POWER_MASK 0x0C
+#define BPP_POWER_OFF 0x0F
+#define BPP_POWER_5_PERCENT_ON 0x0E
+#define BPP_POWER_10_PERCENT_ON 0x0C
+#define BPP_POWER_15_PERCENT_ON 0x08
+#define BPP_POWER_ON 0x00
+#define BPP_POWER_MASK 0x0F
+#define SD_VCC_PARTIAL_POWER_ON 0x02
+#define SD_VCC_POWER_ON 0x00
+#define CARD_CLK_SWITCH 0xFD51
+#define RTL8411B_PACKAGE_MODE 0xFD51
+#define CARD_SHARE_MODE 0xFD52
+#define CARD_SHARE_MASK 0x0F
+#define CARD_SHARE_MULTI_LUN 0x00
+#define CARD_SHARE_NORMAL 0x00
+#define CARD_SHARE_48_SD 0x04
+#define CARD_SHARE_48_MS 0x08
+#define CARD_SHARE_BAROSSA_SD 0x01
+#define CARD_SHARE_BAROSSA_MS 0x02
+#define CARD_DRIVE_SEL 0xFD53
+#define MS_DRIVE_8mA (0x01 << 6)
+#define MMC_DRIVE_8mA (0x01 << 4)
+#define XD_DRIVE_8mA (0x01 << 2)
+#define GPIO_DRIVE_8mA 0x01
+#define RTS5209_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
+ XD_DRIVE_8mA | GPIO_DRIVE_8mA)
+#define RTL8411_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | MMC_DRIVE_8mA |\
+ XD_DRIVE_8mA)
+#define RTSX_CARD_DRIVE_DEFAULT (MS_DRIVE_8mA | GPIO_DRIVE_8mA)
+
+#define CARD_STOP 0xFD54
+#define SPI_STOP 0x01
+#define XD_STOP 0x02
+#define SD_STOP 0x04
+#define MS_STOP 0x08
+#define SPI_CLR_ERR 0x10
+#define XD_CLR_ERR 0x20
+#define SD_CLR_ERR 0x40
+#define MS_CLR_ERR 0x80
+#define CARD_OE 0xFD55
+#define SD_OUTPUT_EN 0x04
+#define MS_OUTPUT_EN 0x08
+#define CARD_AUTO_BLINK 0xFD56
+#define CARD_GPIO_DIR 0xFD57
+#define CARD_GPIO 0xFD58
+#define CARD_DATA_SOURCE 0xFD5B
+#define PINGPONG_BUFFER 0x01
+#define RING_BUFFER 0x00
+#define SD30_CLK_DRIVE_SEL 0xFD5A
+#define DRIVER_TYPE_A 0x05
+#define DRIVER_TYPE_B 0x03
+#define DRIVER_TYPE_C 0x02
+#define DRIVER_TYPE_D 0x01
+#define CARD_SELECT 0xFD5C
+#define SD_MOD_SEL 2
+#define MS_MOD_SEL 3
+#define SD30_DRIVE_SEL 0xFD5E
+#define CFG_DRIVER_TYPE_A 0x02
+#define CFG_DRIVER_TYPE_B 0x03
+#define CFG_DRIVER_TYPE_C 0x01
+#define CFG_DRIVER_TYPE_D 0x00
+#define SD30_CMD_DRIVE_SEL 0xFD5E
+#define SD30_DAT_DRIVE_SEL 0xFD5F
+#define CARD_CLK_EN 0xFD69
+#define SD_CLK_EN 0x04
+#define MS_CLK_EN 0x08
+#define SDIO_CTRL 0xFD6B
+#define CD_PAD_CTL 0xFD73
+#define CD_DISABLE_MASK 0x07
+#define MS_CD_DISABLE 0x04
+#define SD_CD_DISABLE 0x02
+#define XD_CD_DISABLE 0x01
+#define CD_DISABLE 0x07
+#define CD_ENABLE 0x00
+#define MS_CD_EN_ONLY 0x03
+#define SD_CD_EN_ONLY 0x05
+#define XD_CD_EN_ONLY 0x06
+#define FORCE_CD_LOW_MASK 0x38
+#define FORCE_CD_XD_LOW 0x08
+#define FORCE_CD_SD_LOW 0x10
+#define FORCE_CD_MS_LOW 0x20
+#define CD_AUTO_DISABLE 0x40
+#define FPDCTL 0xFC00
+#define SSC_POWER_DOWN 0x01
+#define SD_OC_POWER_DOWN 0x02
+#define ALL_POWER_DOWN 0x07
+#define OC_POWER_DOWN 0x06
+#define PDINFO 0xFC01
+
+#define CLK_CTL 0xFC02
+#define CHANGE_CLK 0x01
+#define CLK_LOW_FREQ 0x01
+
+#define CLK_DIV 0xFC03
+#define CLK_DIV_1 0x01
+#define CLK_DIV_2 0x02
+#define CLK_DIV_4 0x03
+#define CLK_DIV_8 0x04
+#define CLK_SEL 0xFC04
+
+#define SSC_DIV_N_0 0xFC0F
+#define SSC_DIV_N_1 0xFC10
+#define SSC_CTL1 0xFC11
+#define SSC_RSTB 0x80
+#define SSC_8X_EN 0x40
+#define SSC_FIX_FRAC 0x20
+#define SSC_SEL_1M 0x00
+#define SSC_SEL_2M 0x08
+#define SSC_SEL_4M 0x10
+#define SSC_SEL_8M 0x18
+#define SSC_CTL2 0xFC12
+#define SSC_DEPTH_MASK 0x07
+#define SSC_DEPTH_DISALBE 0x00
+#define SSC_DEPTH_4M 0x01
+#define SSC_DEPTH_2M 0x02
+#define SSC_DEPTH_1M 0x03
+#define SSC_DEPTH_500K 0x04
+#define SSC_DEPTH_250K 0x05
+#define RCCTL 0xFC14
+
+#define FPGA_PULL_CTL 0xFC1D
+#define OLT_LED_CTL 0xFC1E
+#define GPIO_CTL 0xFC1F
+
+#define LDO_CTL 0xFC1E
+#define BPP_ASIC_1V7 0x00
+#define BPP_ASIC_1V8 0x01
+#define BPP_ASIC_1V9 0x02
+#define BPP_ASIC_2V0 0x03
+#define BPP_ASIC_2V7 0x04
+#define BPP_ASIC_2V8 0x05
+#define BPP_ASIC_3V2 0x06
+#define BPP_ASIC_3V3 0x07
+#define BPP_REG_TUNED18 0x07
+#define BPP_TUNED18_SHIFT_8402 5
+#define BPP_TUNED18_SHIFT_8411 4
+#define BPP_PAD_MASK 0x04
+#define BPP_PAD_3V3 0x04
+#define BPP_PAD_1V8 0x00
+#define BPP_LDO_POWB 0x03
+#define BPP_LDO_ON 0x00
+#define BPP_LDO_SUSPEND 0x02
+#define BPP_LDO_OFF 0x03
+#define SYS_VER 0xFC32
+
+#define CARD_PULL_CTL1 0xFD60
+#define CARD_PULL_CTL2 0xFD61
+#define CARD_PULL_CTL3 0xFD62
+#define CARD_PULL_CTL4 0xFD63
+#define CARD_PULL_CTL5 0xFD64
+#define CARD_PULL_CTL6 0xFD65
+
+/* PCI Express Related Registers */
+#define IRQEN0 0xFE20
+#define IRQSTAT0 0xFE21
+#define DMA_DONE_INT 0x80
+#define SUSPEND_INT 0x40
+#define LINK_RDY_INT 0x20
+#define LINK_DOWN_INT 0x10
+#define IRQEN1 0xFE22
+#define IRQSTAT1 0xFE23
+#define TLPRIEN 0xFE24
+#define TLPRISTAT 0xFE25
+#define TLPTIEN 0xFE26
+#define TLPTISTAT 0xFE27
+#define DMATC0 0xFE28
+#define DMATC1 0xFE29
+#define DMATC2 0xFE2A
+#define DMATC3 0xFE2B
+#define DMACTL 0xFE2C
+#define DMA_RST 0x80
+#define DMA_BUSY 0x04
+#define DMA_DIR_TO_CARD 0x00
+#define DMA_DIR_FROM_CARD 0x02
+#define DMA_EN 0x01
+#define DMA_128 (0 << 4)
+#define DMA_256 (1 << 4)
+#define DMA_512 (2 << 4)
+#define DMA_1024 (3 << 4)
+#define DMA_PACK_SIZE_MASK 0x30
+#define BCTL 0xFE2D
+#define RBBC0 0xFE2E
+#define RBBC1 0xFE2F
+#define RBDAT 0xFE30
+#define RBCTL 0xFE34
+#define CFGADDR0 0xFE35
+#define CFGADDR1 0xFE36
+#define CFGDATA0 0xFE37
+#define CFGDATA1 0xFE38
+#define CFGDATA2 0xFE39
+#define CFGDATA3 0xFE3A
+#define CFGRWCTL 0xFE3B
+#define PHYRWCTL 0xFE3C
+#define PHYDATA0 0xFE3D
+#define PHYDATA1 0xFE3E
+#define PHYADDR 0xFE3F
+#define MSGRXDATA0 0xFE40
+#define MSGRXDATA1 0xFE41
+#define MSGRXDATA2 0xFE42
+#define MSGRXDATA3 0xFE43
+#define MSGTXDATA0 0xFE44
+#define MSGTXDATA1 0xFE45
+#define MSGTXDATA2 0xFE46
+#define MSGTXDATA3 0xFE47
+#define MSGTXCTL 0xFE48
+#define LTR_CTL 0xFE4A
+#define OBFF_CFG 0xFE4C
+
+#define CDRESUMECTL 0xFE52
+#define WAKE_SEL_CTL 0xFE54
+#define PCLK_CTL 0xFE55
+#define PCLK_MODE_SEL 0x20
+#define PME_FORCE_CTL 0xFE56
+
+#define ASPM_FORCE_CTL 0xFE57
+#define FORCE_ASPM_CTL0 0x10
+#define FORCE_ASPM_VAL_MASK 0x03
+#define FORCE_ASPM_L1_EN 0x02
+#define FORCE_ASPM_L0_EN 0x01
+#define FORCE_ASPM_NO_ASPM 0x00
+#define PM_CLK_FORCE_CTL 0xFE58
+#define FUNC_FORCE_CTL 0xFE59
+#define PERST_GLITCH_WIDTH 0xFE5C
+#define CHANGE_LINK_STATE 0xFE5B
+#define RESET_LOAD_REG 0xFE5E
+#define EFUSE_CONTENT 0xFE5F
+#define HOST_SLEEP_STATE 0xFE60
+#define HOST_ENTER_S1 1
+#define HOST_ENTER_S3 2
+
+#define SDIO_CFG 0xFE70
+#define PM_EVENT_DEBUG 0xFE71
+#define PME_DEBUG_0 0x08
+#define NFTS_TX_CTRL 0xFE72
+
+#define PWR_GATE_CTRL 0xFE75
+#define PWR_GATE_EN 0x01
+#define LDO3318_PWR_MASK 0x06
+#define LDO_ON 0x00
+#define LDO_SUSPEND 0x04
+#define LDO_OFF 0x06
+#define PWD_SUSPEND_EN 0xFE76
+#define LDO_PWR_SEL 0xFE78
+
+#define L1SUB_CONFIG1 0xFE8D
+#define L1SUB_CONFIG2 0xFE8E
+#define L1SUB_AUTO_CFG 0x02
+#define L1SUB_CONFIG3 0xFE8F
+
+#define DUMMY_REG_RESET_0 0xFE90
+
+#define AUTOLOAD_CFG_BASE 0xFF00
+#define PETXCFG 0xFF03
+
+#define PM_CTRL1 0xFF44
+#define CD_RESUME_EN_MASK 0xF0
+
+#define PM_CTRL2 0xFF45
+#define PM_CTRL3 0xFF46
+#define SDIO_SEND_PME_EN 0x80
+#define FORCE_RC_MODE_ON 0x40
+#define FORCE_RX50_LINK_ON 0x20
+#define D3_DELINK_MODE_EN 0x10
+#define USE_PESRTB_CTL_DELINK 0x08
+#define DELAY_PIN_WAKE 0x04
+#define RESET_PIN_WAKE 0x02
+#define PM_WAKE_EN 0x01
+#define PM_CTRL4 0xFF47
+
+/* Memory mapping */
+#define SRAM_BASE 0xE600
+#define RBUF_BASE 0xF400
+#define PPBUF_BASE1 0xF800
+#define PPBUF_BASE2 0xFA00
+#define IMAGE_FLAG_ADDR0 0xCE80
+#define IMAGE_FLAG_ADDR1 0xCE81
+
+#define RREF_CFG 0xFF6C
+#define RREF_VBGSEL_MASK 0x38
+#define RREF_VBGSEL_1V25 0x28
+
+#define OOBS_CONFIG 0xFF6E
+#define OOBS_AUTOK_DIS 0x80
+#define OOBS_VAL_MASK 0x1F
+
+#define LDO_DV18_CFG 0xFF70
+#define LDO_DV18_SR_MASK 0xC0
+#define LDO_DV18_SR_DF 0x40
+
+#define LDO_CONFIG2 0xFF71
+#define LDO_D3318_MASK 0x07
+#define LDO_D3318_33V 0x07
+#define LDO_D3318_18V 0x02
+
+#define LDO_VCC_CFG0 0xFF72
+#define LDO_VCC_LMTVTH_MASK 0x30
+#define LDO_VCC_LMTVTH_2A 0x10
+
+#define LDO_VCC_CFG1 0xFF73
+#define LDO_VCC_REF_TUNE_MASK 0x30
+#define LDO_VCC_REF_1V2 0x20
+#define LDO_VCC_TUNE_MASK 0x07
+#define LDO_VCC_1V8 0x04
+#define LDO_VCC_3V3 0x07
+#define LDO_VCC_LMT_EN 0x08
+
+#define LDO_VIO_CFG 0xFF75
+#define LDO_VIO_SR_MASK 0xC0
+#define LDO_VIO_SR_DF 0x40
+#define LDO_VIO_REF_TUNE_MASK 0x30
+#define LDO_VIO_REF_1V2 0x20
+#define LDO_VIO_TUNE_MASK 0x07
+#define LDO_VIO_1V7 0x03
+#define LDO_VIO_1V8 0x04
+#define LDO_VIO_3V3 0x07
+
+#define LDO_DV12S_CFG 0xFF76
+#define LDO_REF12_TUNE_MASK 0x18
+#define LDO_REF12_TUNE_DF 0x10
+#define LDO_D12_TUNE_MASK 0x07
+#define LDO_D12_TUNE_DF 0x04
+
+#define LDO_AV12S_CFG 0xFF77
+#define LDO_AV12S_TUNE_MASK 0x07
+#define LDO_AV12S_TUNE_DF 0x04
+
+#define SD40_LDO_CTL1 0xFE7D
+#define SD40_VIO_TUNE_MASK 0x70
+#define SD40_VIO_TUNE_1V7 0x30
+#define SD_VIO_LDO_1V8 0x40
+#define SD_VIO_LDO_3V3 0x70
+
+/* Phy register */
+#define PHY_PCR 0x00
+#define PHY_PCR_FORCE_CODE 0xB000
+#define PHY_PCR_OOBS_CALI_50 0x0800
+#define PHY_PCR_OOBS_VCM_08 0x0200
+#define PHY_PCR_OOBS_SEN_90 0x0040
+#define PHY_PCR_RSSI_EN 0x0002
+#define PHY_PCR_RX10K 0x0001
+
+#define PHY_RCR0 0x01
+#define PHY_RCR1 0x02
+#define PHY_RCR1_ADP_TIME_4 0x0400
+#define PHY_RCR1_VCO_COARSE 0x001F
+#define PHY_SSCCR2 0x02
+#define PHY_SSCCR2_PLL_NCODE 0x0A00
+#define PHY_SSCCR2_TIME0 0x001C
+#define PHY_SSCCR2_TIME2_WIDTH 0x0003
+
+#define PHY_RCR2 0x03
+#define PHY_RCR2_EMPHASE_EN 0x8000
+#define PHY_RCR2_NADJR 0x4000
+#define PHY_RCR2_CDR_SR_2 0x0100
+#define PHY_RCR2_FREQSEL_12 0x0040
+#define PHY_RCR2_CDR_SC_12P 0x0010
+#define PHY_RCR2_CALIB_LATE 0x0002
+#define PHY_SSCCR3 0x03
+#define PHY_SSCCR3_STEP_IN 0x2740
+#define PHY_SSCCR3_CHECK_DELAY 0x0008
+#define _PHY_ANA03 0x03
+#define _PHY_ANA03_TIMER_MAX 0x2700
+#define _PHY_ANA03_OOBS_DEB_EN 0x0040
+#define _PHY_CMU_DEBUG_EN 0x0008
+
+#define PHY_RTCR 0x04
+#define PHY_RDR 0x05
+#define PHY_RDR_RXDSEL_1_9 0x4000
+#define PHY_SSC_AUTO_PWD 0x0600
+#define PHY_TCR0 0x06
+#define PHY_TCR1 0x07
+#define PHY_TUNE 0x08
+#define PHY_TUNE_TUNEREF_1_0 0x4000
+#define PHY_TUNE_VBGSEL_1252 0x0C00
+#define PHY_TUNE_SDBUS_33 0x0200
+#define PHY_TUNE_TUNED18 0x01C0
+#define PHY_TUNE_TUNED12 0X0020
+#define PHY_TUNE_TUNEA12 0x0004
+#define PHY_TUNE_VOLTAGE_MASK 0xFC3F
+#define PHY_TUNE_VOLTAGE_3V3 0x03C0
+#define PHY_TUNE_D18_1V8 0x0100
+#define PHY_TUNE_D18_1V7 0x0080
+#define PHY_ANA08 0x08
+#define PHY_ANA08_RX_EQ_DCGAIN 0x5000
+#define PHY_ANA08_SEL_RX_EN 0x0400
+#define PHY_ANA08_RX_EQ_VAL 0x03C0
+#define PHY_ANA08_SCP 0x0020
+#define PHY_ANA08_SEL_IPI 0x0004
+
+#define PHY_IMR 0x09
+#define PHY_BPCR 0x0A
+#define PHY_BPCR_IBRXSEL 0x0400
+#define PHY_BPCR_IBTXSEL 0x0100
+#define PHY_BPCR_IB_FILTER 0x0080
+#define PHY_BPCR_CMIRROR_EN 0x0040
+
+#define PHY_BIST 0x0B
+#define PHY_RAW_L 0x0C
+#define PHY_RAW_H 0x0D
+#define PHY_RAW_DATA 0x0E
+#define PHY_HOST_CLK_CTRL 0x0F
+#define PHY_DMR 0x10
+#define PHY_BACR 0x11
+#define PHY_BACR_BASIC_MASK 0xFFF3
+#define PHY_IER 0x12
+#define PHY_BCSR 0x13
+#define PHY_BPR 0x14
+#define PHY_BPNR2 0x15
+#define PHY_BPNR 0x16
+#define PHY_BRNR2 0x17
+#define PHY_BENR 0x18
+#define PHY_REV 0x19
+#define PHY_REV_RESV 0xE000
+#define PHY_REV_RXIDLE_LATCHED 0x1000
+#define PHY_REV_P1_EN 0x0800
+#define PHY_REV_RXIDLE_EN 0x0400
+#define PHY_REV_CLKREQ_TX_EN 0x0200
+#define PHY_REV_CLKREQ_RX_EN 0x0100
+#define PHY_REV_CLKREQ_DT_1_0 0x0040
+#define PHY_REV_STOP_CLKRD 0x0020
+#define PHY_REV_RX_PWST 0x0008
+#define PHY_REV_STOP_CLKWR 0x0004
+#define _PHY_REV0 0x19
+#define _PHY_REV0_FILTER_OUT 0x3800
+#define _PHY_REV0_CDR_BYPASS_PFD 0x0100
+#define _PHY_REV0_CDR_RX_IDLE_BYPASS 0x0002
+
+#define PHY_FLD0 0x1A
+#define PHY_ANA1A 0x1A
+#define PHY_ANA1A_TXR_LOOPBACK 0x2000
+#define PHY_ANA1A_RXT_BIST 0x0500
+#define PHY_ANA1A_TXR_BIST 0x0040
+#define PHY_ANA1A_REV 0x0006
+#define PHY_FLD1 0x1B
+#define PHY_FLD2 0x1C
+#define PHY_FLD3 0x1D
+#define PHY_FLD3_TIMER_4 0x0800
+#define PHY_FLD3_TIMER_6 0x0020
+#define PHY_FLD3_RXDELINK 0x0004
+#define PHY_ANA1D 0x1D
+#define PHY_ANA1D_DEBUG_ADDR 0x0004
+#define _PHY_FLD0 0x1D
+#define _PHY_FLD0_CLK_REQ_20C 0x8000
+#define _PHY_FLD0_RX_IDLE_EN 0x1000
+#define _PHY_FLD0_BIT_ERR_RSTN 0x0800
+#define _PHY_FLD0_BER_COUNT 0x01E0
+#define _PHY_FLD0_BER_TIMER 0x001E
+#define _PHY_FLD0_CHECK_EN 0x0001
+
+#define PHY_FLD4 0x1E
+#define PHY_FLD4_FLDEN_SEL 0x4000
+#define PHY_FLD4_REQ_REF 0x2000
+#define PHY_FLD4_RXAMP_OFF 0x1000
+#define PHY_FLD4_REQ_ADDA 0x0800
+#define PHY_FLD4_BER_COUNT 0x00E0
+#define PHY_FLD4_BER_TIMER 0x000A
+#define PHY_FLD4_BER_CHK_EN 0x0001
+#define PHY_DIG1E 0x1E
+#define PHY_DIG1E_REV 0x4000
+#define PHY_DIG1E_D0_X_D1 0x1000
+#define PHY_DIG1E_RX_ON_HOST 0x0800
+#define PHY_DIG1E_RCLK_REF_HOST 0x0400
+#define PHY_DIG1E_RCLK_TX_EN_KEEP 0x0040
+#define PHY_DIG1E_RCLK_TX_TERM_KEEP 0x0020
+#define PHY_DIG1E_RCLK_RX_EIDLE_ON 0x0010
+#define PHY_DIG1E_TX_TERM_KEEP 0x0008
+#define PHY_DIG1E_RX_TERM_KEEP 0x0004
+#define PHY_DIG1E_TX_EN_KEEP 0x0002
+#define PHY_DIG1E_RX_EN_KEEP 0x0001
+#define PHY_DUM_REG 0x1F
+
+#define PCR_SETTING_REG1 0x724
+#define PCR_SETTING_REG2 0x814
+#define PCR_SETTING_REG3 0x747
+
+#define rtsx_pci_init_cmd(pcr) ((pcr)->ci = 0)
+
+struct rtsx_pcr;
+
+struct pcr_handle {
+ struct rtsx_pcr *pcr;
+};
+
+struct pcr_ops {
+ int (*write_phy)(struct rtsx_pcr *pcr, u8 addr, u16 val);
+ int (*read_phy)(struct rtsx_pcr *pcr, u8 addr, u16 *val);
+ int (*extra_init_hw)(struct rtsx_pcr *pcr);
+ int (*optimize_phy)(struct rtsx_pcr *pcr);
+ int (*turn_on_led)(struct rtsx_pcr *pcr);
+ int (*turn_off_led)(struct rtsx_pcr *pcr);
+ int (*enable_auto_blink)(struct rtsx_pcr *pcr);
+ int (*disable_auto_blink)(struct rtsx_pcr *pcr);
+ int (*card_power_on)(struct rtsx_pcr *pcr, int card);
+ int (*card_power_off)(struct rtsx_pcr *pcr, int card);
+ int (*switch_output_voltage)(struct rtsx_pcr *pcr,
+ u8 voltage);
+ unsigned int (*cd_deglitch)(struct rtsx_pcr *pcr);
+ int (*conv_clk_and_div_n)(int clk, int dir);
+ void (*fetch_vendor_settings)(struct rtsx_pcr *pcr);
+ void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state);
+};
+
+enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN};
+
+struct rtsx_pcr {
+ struct pci_dev *pci;
+ unsigned int id;
+ int pcie_cap;
+
+ /* pci resources */
+ unsigned long addr;
+ void __iomem *remap_addr;
+ int irq;
+
+ /* host reserved buffer */
+ void *rtsx_resv_buf;
+ dma_addr_t rtsx_resv_buf_addr;
+
+ void *host_cmds_ptr;
+ dma_addr_t host_cmds_addr;
+ int ci;
+
+ void *host_sg_tbl_ptr;
+ dma_addr_t host_sg_tbl_addr;
+ int sgi;
+
+ u32 bier;
+ char trans_result;
+
+ unsigned int card_inserted;
+ unsigned int card_removed;
+ unsigned int card_exist;
+
+ struct delayed_work carddet_work;
+ struct delayed_work idle_work;
+
+ spinlock_t lock;
+ struct mutex pcr_mutex;
+ struct completion *done;
+ struct completion *finish_me;
+
+ unsigned int cur_clock;
+ bool remove_pci;
+ bool msi_en;
+
+#define EXTRA_CAPS_SD_SDR50 (1 << 0)
+#define EXTRA_CAPS_SD_SDR104 (1 << 1)
+#define EXTRA_CAPS_SD_DDR50 (1 << 2)
+#define EXTRA_CAPS_MMC_HSDDR (1 << 3)
+#define EXTRA_CAPS_MMC_HS200 (1 << 4)
+#define EXTRA_CAPS_MMC_8BIT (1 << 5)
+ u32 extra_caps;
+
+#define IC_VER_A 0
+#define IC_VER_B 1
+#define IC_VER_C 2
+#define IC_VER_D 3
+ u8 ic_version;
+
+ u8 sd30_drive_sel_1v8;
+ u8 sd30_drive_sel_3v3;
+ u8 card_drive_sel;
+#define ASPM_L1_EN 0x02
+ u8 aspm_en;
+
+#define PCR_MS_PMOS (1 << 0)
+#define PCR_REVERSE_SOCKET (1 << 1)
+ u32 flags;
+
+ u32 tx_initial_phase;
+ u32 rx_initial_phase;
+
+ const u32 *sd_pull_ctl_enable_tbl;
+ const u32 *sd_pull_ctl_disable_tbl;
+ const u32 *ms_pull_ctl_enable_tbl;
+ const u32 *ms_pull_ctl_disable_tbl;
+
+ const struct pcr_ops *ops;
+ enum PDEV_STAT state;
+
+ u16 reg_pm_ctrl3;
+
+ int num_slots;
+ struct rtsx_slot *slots;
+};
+
+#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
+#define PCI_VID(pcr) ((pcr)->pci->vendor)
+#define PCI_PID(pcr) ((pcr)->pci->device)
+#define is_version(pcr, pid, ver) \
+ (CHK_PCI_PID(pcr, pid) && (pcr)->ic_version == (ver))
+#define pcr_dbg(pcr, fmt, arg...) \
+ dev_dbg(&(pcr)->pci->dev, fmt, ##arg)
+
+#define SDR104_PHASE(val) ((val) & 0xFF)
+#define SDR50_PHASE(val) (((val) >> 8) & 0xFF)
+#define DDR50_PHASE(val) (((val) >> 16) & 0xFF)
+#define SDR104_TX_PHASE(pcr) SDR104_PHASE((pcr)->tx_initial_phase)
+#define SDR50_TX_PHASE(pcr) SDR50_PHASE((pcr)->tx_initial_phase)
+#define DDR50_TX_PHASE(pcr) DDR50_PHASE((pcr)->tx_initial_phase)
+#define SDR104_RX_PHASE(pcr) SDR104_PHASE((pcr)->rx_initial_phase)
+#define SDR50_RX_PHASE(pcr) SDR50_PHASE((pcr)->rx_initial_phase)
+#define DDR50_RX_PHASE(pcr) DDR50_PHASE((pcr)->rx_initial_phase)
+#define SET_CLOCK_PHASE(sdr104, sdr50, ddr50) \
+ (((ddr50) << 16) | ((sdr50) << 8) | (sdr104))
+
+void rtsx_pci_start_run(struct rtsx_pcr *pcr);
+int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data);
+int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data);
+int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val);
+int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val);
+void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr);
+void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
+ u8 cmd_type, u16 reg_addr, u8 mask, u8 data);
+void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr);
+int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout);
+int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
+ int num_sg, bool read, int timeout);
+int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
+ int num_sg, bool read);
+void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
+ int num_sg, bool read);
+int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
+ int count, bool read, int timeout);
+int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len);
+int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len);
+int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card);
+int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card);
+int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
+int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card);
+int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card);
+int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card);
+int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage);
+unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr);
+void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr);
+
+static inline u8 *rtsx_pci_get_cmd_data(struct rtsx_pcr *pcr)
+{
+ return (u8 *)(pcr->host_cmds_ptr);
+}
+
+static inline int rtsx_pci_update_cfg_byte(struct rtsx_pcr *pcr, int addr,
+ u8 mask, u8 append)
+{
+ int err;
+ u8 val;
+
+ err = pci_read_config_byte(pcr->pci, addr, &val);
+ if (err < 0)
+ return err;
+ return pci_write_config_byte(pcr->pci, addr, (val & mask) | append);
+}
+
+static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val)
+{
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg, 0xFF, val >> 24);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 1, 0xFF, val >> 16);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 2, 0xFF, val >> 8);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 3, 0xFF, val);
+}
+
+static inline int rtsx_pci_update_phy(struct rtsx_pcr *pcr, u8 addr,
+ u16 mask, u16 append)
+{
+ int err;
+ u16 val;
+
+ err = rtsx_pci_read_phy_register(pcr, addr, &val);
+ if (err < 0)
+ return err;
+
+ return rtsx_pci_write_phy_register(pcr, addr, (val & mask) | append);
+}
+
+#endif
diff --git a/include/linux/mfd/rtsx_usb.h b/include/linux/mfd/rtsx_usb.h
new file mode 100644
index 000000000..c446e4fd6
--- /dev/null
+++ b/include/linux/mfd/rtsx_usb.h
@@ -0,0 +1,628 @@
+/* Driver for Realtek RTS5139 USB card reader
+ *
+ * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author:
+ * Roger Tseng <rogerable@realtek.com>
+ */
+
+#ifndef __RTSX_USB_H
+#define __RTSX_USB_H
+
+#include <linux/usb.h>
+
+/* related module names */
+#define RTSX_USB_SD_CARD 0
+#define RTSX_USB_MS_CARD 1
+
+/* endpoint numbers */
+#define EP_BULK_OUT 1
+#define EP_BULK_IN 2
+#define EP_INTR_IN 3
+
+/* USB vendor requests */
+#define RTSX_USB_REQ_REG_OP 0x00
+#define RTSX_USB_REQ_POLL 0x02
+
+/* miscellaneous parameters */
+#define MIN_DIV_N 60
+#define MAX_DIV_N 120
+
+#define MAX_PHASE 15
+#define RX_TUNING_CNT 3
+
+#define QFN24 0
+#define LQFP48 1
+#define CHECK_PKG(ucr, pkg) ((ucr)->package == (pkg))
+
+/* data structures */
+struct rtsx_ucr {
+ u16 vendor_id;
+ u16 product_id;
+
+ int package;
+ u8 ic_version;
+ bool is_rts5179;
+
+ unsigned int cur_clk;
+
+ u8 *cmd_buf;
+ unsigned int cmd_idx;
+ u8 *rsp_buf;
+
+ struct usb_device *pusb_dev;
+ struct usb_interface *pusb_intf;
+ struct usb_sg_request current_sg;
+ unsigned char *iobuf;
+ dma_addr_t iobuf_dma;
+
+ struct timer_list sg_timer;
+ struct mutex dev_mutex;
+};
+
+/* buffer size */
+#define IOBUF_SIZE 1024
+
+/* prototypes of exported functions */
+extern int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status);
+
+extern int rtsx_usb_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data);
+extern int rtsx_usb_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask,
+ u8 data);
+
+extern int rtsx_usb_ep0_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask,
+ u8 data);
+extern int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr,
+ u8 *data);
+
+extern void rtsx_usb_add_cmd(struct rtsx_ucr *ucr, u8 cmd_type,
+ u16 reg_addr, u8 mask, u8 data);
+extern int rtsx_usb_send_cmd(struct rtsx_ucr *ucr, u8 flag, int timeout);
+extern int rtsx_usb_get_rsp(struct rtsx_ucr *ucr, int rsp_len, int timeout);
+extern int rtsx_usb_transfer_data(struct rtsx_ucr *ucr, unsigned int pipe,
+ void *buf, unsigned int len, int use_sg,
+ unsigned int *act_len, int timeout);
+
+extern int rtsx_usb_read_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len);
+extern int rtsx_usb_write_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len);
+extern int rtsx_usb_switch_clock(struct rtsx_ucr *ucr, unsigned int card_clock,
+ u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk);
+extern int rtsx_usb_card_exclusive_check(struct rtsx_ucr *ucr, int card);
+
+/* card status */
+#define SD_CD 0x01
+#define MS_CD 0x02
+#define XD_CD 0x04
+#define CD_MASK (SD_CD | MS_CD | XD_CD)
+#define SD_WP 0x08
+
+/* reader command field offset & parameters */
+#define READ_REG_CMD 0
+#define WRITE_REG_CMD 1
+#define CHECK_REG_CMD 2
+
+#define PACKET_TYPE 4
+#define CNT_H 5
+#define CNT_L 6
+#define STAGE_FLAG 7
+#define CMD_OFFSET 8
+#define SEQ_WRITE_DATA_OFFSET 12
+
+#define BATCH_CMD 0
+#define SEQ_READ 1
+#define SEQ_WRITE 2
+
+#define STAGE_R 0x01
+#define STAGE_DI 0x02
+#define STAGE_DO 0x04
+#define STAGE_MS_STATUS 0x08
+#define STAGE_XD_STATUS 0x10
+#define MODE_C 0x00
+#define MODE_CR (STAGE_R)
+#define MODE_CDIR (STAGE_R | STAGE_DI)
+#define MODE_CDOR (STAGE_R | STAGE_DO)
+
+#define EP0_OP_SHIFT 14
+#define EP0_READ_REG_CMD 2
+#define EP0_WRITE_REG_CMD 3
+
+#define rtsx_usb_cmd_hdr_tag(ucr) \
+ do { \
+ ucr->cmd_buf[0] = 'R'; \
+ ucr->cmd_buf[1] = 'T'; \
+ ucr->cmd_buf[2] = 'C'; \
+ ucr->cmd_buf[3] = 'R'; \
+ } while (0)
+
+static inline void rtsx_usb_init_cmd(struct rtsx_ucr *ucr)
+{
+ rtsx_usb_cmd_hdr_tag(ucr);
+ ucr->cmd_idx = 0;
+ ucr->cmd_buf[PACKET_TYPE] = BATCH_CMD;
+}
+
+/* internal register address */
+#define FPDCTL 0xFC00
+#define SSC_DIV_N_0 0xFC07
+#define SSC_CTL1 0xFC09
+#define SSC_CTL2 0xFC0A
+#define CFG_MODE 0xFC0E
+#define CFG_MODE_1 0xFC0F
+#define RCCTL 0xFC14
+#define SOF_WDOG 0xFC28
+#define SYS_DUMMY0 0xFC30
+
+#define MS_BLKEND 0xFD30
+#define MS_READ_START 0xFD31
+#define MS_READ_COUNT 0xFD32
+#define MS_WRITE_START 0xFD33
+#define MS_WRITE_COUNT 0xFD34
+#define MS_COMMAND 0xFD35
+#define MS_OLD_BLOCK_0 0xFD36
+#define MS_OLD_BLOCK_1 0xFD37
+#define MS_NEW_BLOCK_0 0xFD38
+#define MS_NEW_BLOCK_1 0xFD39
+#define MS_LOG_BLOCK_0 0xFD3A
+#define MS_LOG_BLOCK_1 0xFD3B
+#define MS_BUS_WIDTH 0xFD3C
+#define MS_PAGE_START 0xFD3D
+#define MS_PAGE_LENGTH 0xFD3E
+#define MS_CFG 0xFD40
+#define MS_TPC 0xFD41
+#define MS_TRANS_CFG 0xFD42
+#define MS_TRANSFER 0xFD43
+#define MS_INT_REG 0xFD44
+#define MS_BYTE_CNT 0xFD45
+#define MS_SECTOR_CNT_L 0xFD46
+#define MS_SECTOR_CNT_H 0xFD47
+#define MS_DBUS_H 0xFD48
+
+#define CARD_DMA1_CTL 0xFD5C
+#define CARD_PULL_CTL1 0xFD60
+#define CARD_PULL_CTL2 0xFD61
+#define CARD_PULL_CTL3 0xFD62
+#define CARD_PULL_CTL4 0xFD63
+#define CARD_PULL_CTL5 0xFD64
+#define CARD_PULL_CTL6 0xFD65
+#define CARD_EXIST 0xFD6F
+#define CARD_INT_PEND 0xFD71
+
+#define LDO_POWER_CFG 0xFD7B
+
+#define SD_CFG1 0xFDA0
+#define SD_CFG2 0xFDA1
+#define SD_CFG3 0xFDA2
+#define SD_STAT1 0xFDA3
+#define SD_STAT2 0xFDA4
+#define SD_BUS_STAT 0xFDA5
+#define SD_PAD_CTL 0xFDA6
+#define SD_SAMPLE_POINT_CTL 0xFDA7
+#define SD_PUSH_POINT_CTL 0xFDA8
+#define SD_CMD0 0xFDA9
+#define SD_CMD1 0xFDAA
+#define SD_CMD2 0xFDAB
+#define SD_CMD3 0xFDAC
+#define SD_CMD4 0xFDAD
+#define SD_CMD5 0xFDAE
+#define SD_BYTE_CNT_L 0xFDAF
+#define SD_BYTE_CNT_H 0xFDB0
+#define SD_BLOCK_CNT_L 0xFDB1
+#define SD_BLOCK_CNT_H 0xFDB2
+#define SD_TRANSFER 0xFDB3
+#define SD_CMD_STATE 0xFDB5
+#define SD_DATA_STATE 0xFDB6
+#define SD_VPCLK0_CTL 0xFC2A
+#define SD_VPCLK1_CTL 0xFC2B
+#define SD_DCMPS0_CTL 0xFC2C
+#define SD_DCMPS1_CTL 0xFC2D
+
+#define CARD_DMA1_CTL 0xFD5C
+
+#define HW_VERSION 0xFC01
+
+#define SSC_CLK_FPGA_SEL 0xFC02
+#define CLK_DIV 0xFC03
+#define SFSM_ED 0xFC04
+
+#define CD_DEGLITCH_WIDTH 0xFC20
+#define CD_DEGLITCH_EN 0xFC21
+#define AUTO_DELINK_EN 0xFC23
+
+#define FPGA_PULL_CTL 0xFC1D
+#define CARD_CLK_SOURCE 0xFC2E
+
+#define CARD_SHARE_MODE 0xFD51
+#define CARD_DRIVE_SEL 0xFD52
+#define CARD_STOP 0xFD53
+#define CARD_OE 0xFD54
+#define CARD_AUTO_BLINK 0xFD55
+#define CARD_GPIO 0xFD56
+#define SD30_DRIVE_SEL 0xFD57
+
+#define CARD_DATA_SOURCE 0xFD5D
+#define CARD_SELECT 0xFD5E
+
+#define CARD_CLK_EN 0xFD79
+#define CARD_PWR_CTL 0xFD7A
+
+#define OCPCTL 0xFD80
+#define OCPPARA1 0xFD81
+#define OCPPARA2 0xFD82
+#define OCPSTAT 0xFD83
+
+#define HS_USB_STAT 0xFE01
+#define HS_VCONTROL 0xFE26
+#define HS_VSTAIN 0xFE27
+#define HS_VLOADM 0xFE28
+#define HS_VSTAOUT 0xFE29
+
+#define MC_IRQ 0xFF00
+#define MC_IRQEN 0xFF01
+#define MC_FIFO_CTL 0xFF02
+#define MC_FIFO_BC0 0xFF03
+#define MC_FIFO_BC1 0xFF04
+#define MC_FIFO_STAT 0xFF05
+#define MC_FIFO_MODE 0xFF06
+#define MC_FIFO_RD_PTR0 0xFF07
+#define MC_FIFO_RD_PTR1 0xFF08
+#define MC_DMA_CTL 0xFF10
+#define MC_DMA_TC0 0xFF11
+#define MC_DMA_TC1 0xFF12
+#define MC_DMA_TC2 0xFF13
+#define MC_DMA_TC3 0xFF14
+#define MC_DMA_RST 0xFF15
+
+#define RBUF_SIZE_MASK 0xFBFF
+#define RBUF_BASE 0xF000
+#define PPBUF_BASE1 0xF800
+#define PPBUF_BASE2 0xFA00
+
+/* internal register value macros */
+#define POWER_OFF 0x03
+#define PARTIAL_POWER_ON 0x02
+#define POWER_ON 0x00
+#define POWER_MASK 0x03
+#define LDO3318_PWR_MASK 0x0C
+#define LDO_ON 0x00
+#define LDO_SUSPEND 0x08
+#define LDO_OFF 0x0C
+#define DV3318_AUTO_PWR_OFF 0x10
+#define FORCE_LDO_POWERB 0x60
+
+/* LDO_POWER_CFG */
+#define TUNE_SD18_MASK 0x1C
+#define TUNE_SD18_1V7 0x00
+#define TUNE_SD18_1V8 (0x01 << 2)
+#define TUNE_SD18_1V9 (0x02 << 2)
+#define TUNE_SD18_2V0 (0x03 << 2)
+#define TUNE_SD18_2V7 (0x04 << 2)
+#define TUNE_SD18_2V8 (0x05 << 2)
+#define TUNE_SD18_2V9 (0x06 << 2)
+#define TUNE_SD18_3V3 (0x07 << 2)
+
+/* CLK_DIV */
+#define CLK_CHANGE 0x80
+#define CLK_DIV_1 0x00
+#define CLK_DIV_2 0x01
+#define CLK_DIV_4 0x02
+#define CLK_DIV_8 0x03
+
+#define SSC_POWER_MASK 0x01
+#define SSC_POWER_DOWN 0x01
+#define SSC_POWER_ON 0x00
+
+#define FPGA_VER 0x80
+#define HW_VER_MASK 0x0F
+
+#define EXTEND_DMA1_ASYNC_SIGNAL 0x02
+
+/* CFG_MODE*/
+#define XTAL_FREE 0x80
+#define CLK_MODE_MASK 0x03
+#define CLK_MODE_12M_XTAL 0x00
+#define CLK_MODE_NON_XTAL 0x01
+#define CLK_MODE_24M_OSC 0x02
+#define CLK_MODE_48M_OSC 0x03
+
+/* CFG_MODE_1*/
+#define RTS5179 0x02
+
+#define NYET_EN 0x01
+#define NYET_MSAK 0x01
+
+#define SD30_DRIVE_MASK 0x07
+#define SD20_DRIVE_MASK 0x03
+
+#define DISABLE_SD_CD 0x08
+#define DISABLE_MS_CD 0x10
+#define DISABLE_XD_CD 0x20
+#define SD_CD_DEGLITCH_EN 0x01
+#define MS_CD_DEGLITCH_EN 0x02
+#define XD_CD_DEGLITCH_EN 0x04
+
+#define CARD_SHARE_LQFP48 0x04
+#define CARD_SHARE_QFN24 0x00
+#define CARD_SHARE_LQFP_SEL 0x04
+#define CARD_SHARE_XD 0x00
+#define CARD_SHARE_SD 0x01
+#define CARD_SHARE_MS 0x02
+#define CARD_SHARE_MASK 0x03
+
+
+/* SD30_DRIVE_SEL */
+#define DRIVER_TYPE_A 0x05
+#define DRIVER_TYPE_B 0x03
+#define DRIVER_TYPE_C 0x02
+#define DRIVER_TYPE_D 0x01
+
+/* SD_BUS_STAT */
+#define SD_CLK_TOGGLE_EN 0x80
+#define SD_CLK_FORCE_STOP 0x40
+#define SD_DAT3_STATUS 0x10
+#define SD_DAT2_STATUS 0x08
+#define SD_DAT1_STATUS 0x04
+#define SD_DAT0_STATUS 0x02
+#define SD_CMD_STATUS 0x01
+
+/* SD_PAD_CTL */
+#define SD_IO_USING_1V8 0x80
+#define SD_IO_USING_3V3 0x7F
+#define TYPE_A_DRIVING 0x00
+#define TYPE_B_DRIVING 0x01
+#define TYPE_C_DRIVING 0x02
+#define TYPE_D_DRIVING 0x03
+
+/* CARD_CLK_EN */
+#define SD_CLK_EN 0x04
+#define MS_CLK_EN 0x08
+
+/* CARD_SELECT */
+#define SD_MOD_SEL 2
+#define MS_MOD_SEL 3
+
+/* CARD_SHARE_MODE */
+#define CARD_SHARE_LQFP48 0x04
+#define CARD_SHARE_QFN24 0x00
+#define CARD_SHARE_LQFP_SEL 0x04
+#define CARD_SHARE_XD 0x00
+#define CARD_SHARE_SD 0x01
+#define CARD_SHARE_MS 0x02
+#define CARD_SHARE_MASK 0x03
+
+/* SSC_CTL1 */
+#define SSC_RSTB 0x80
+#define SSC_8X_EN 0x40
+#define SSC_FIX_FRAC 0x20
+#define SSC_SEL_1M 0x00
+#define SSC_SEL_2M 0x08
+#define SSC_SEL_4M 0x10
+#define SSC_SEL_8M 0x18
+
+/* SSC_CTL2 */
+#define SSC_DEPTH_MASK 0x03
+#define SSC_DEPTH_DISALBE 0x00
+#define SSC_DEPTH_2M 0x01
+#define SSC_DEPTH_1M 0x02
+#define SSC_DEPTH_512K 0x03
+
+/* SD_VPCLK0_CTL */
+#define PHASE_CHANGE 0x80
+#define PHASE_NOT_RESET 0x40
+
+/* SD_TRANSFER */
+#define SD_TRANSFER_START 0x80
+#define SD_TRANSFER_END 0x40
+#define SD_STAT_IDLE 0x20
+#define SD_TRANSFER_ERR 0x10
+#define SD_TM_NORMAL_WRITE 0x00
+#define SD_TM_AUTO_WRITE_3 0x01
+#define SD_TM_AUTO_WRITE_4 0x02
+#define SD_TM_AUTO_READ_3 0x05
+#define SD_TM_AUTO_READ_4 0x06
+#define SD_TM_CMD_RSP 0x08
+#define SD_TM_AUTO_WRITE_1 0x09
+#define SD_TM_AUTO_WRITE_2 0x0A
+#define SD_TM_NORMAL_READ 0x0C
+#define SD_TM_AUTO_READ_1 0x0D
+#define SD_TM_AUTO_READ_2 0x0E
+#define SD_TM_AUTO_TUNING 0x0F
+
+/* SD_CFG1 */
+#define SD_CLK_DIVIDE_0 0x00
+#define SD_CLK_DIVIDE_256 0xC0
+#define SD_CLK_DIVIDE_128 0x80
+#define SD_CLK_DIVIDE_MASK 0xC0
+#define SD_BUS_WIDTH_1BIT 0x00
+#define SD_BUS_WIDTH_4BIT 0x01
+#define SD_BUS_WIDTH_8BIT 0x02
+#define SD_ASYNC_FIFO_RST 0x10
+#define SD_20_MODE 0x00
+#define SD_DDR_MODE 0x04
+#define SD_30_MODE 0x08
+
+/* SD_CFG2 */
+#define SD_CALCULATE_CRC7 0x00
+#define SD_NO_CALCULATE_CRC7 0x80
+#define SD_CHECK_CRC16 0x00
+#define SD_NO_CHECK_CRC16 0x40
+#define SD_WAIT_CRC_TO_EN 0x20
+#define SD_WAIT_BUSY_END 0x08
+#define SD_NO_WAIT_BUSY_END 0x00
+#define SD_CHECK_CRC7 0x00
+#define SD_NO_CHECK_CRC7 0x04
+#define SD_RSP_LEN_0 0x00
+#define SD_RSP_LEN_6 0x01
+#define SD_RSP_LEN_17 0x02
+#define SD_RSP_TYPE_R0 0x04
+#define SD_RSP_TYPE_R1 0x01
+#define SD_RSP_TYPE_R1b 0x09
+#define SD_RSP_TYPE_R2 0x02
+#define SD_RSP_TYPE_R3 0x05
+#define SD_RSP_TYPE_R4 0x05
+#define SD_RSP_TYPE_R5 0x01
+#define SD_RSP_TYPE_R6 0x01
+#define SD_RSP_TYPE_R7 0x01
+
+/* SD_STAT1 */
+#define SD_CRC7_ERR 0x80
+#define SD_CRC16_ERR 0x40
+#define SD_CRC_WRITE_ERR 0x20
+#define SD_CRC_WRITE_ERR_MASK 0x1C
+#define GET_CRC_TIME_OUT 0x02
+#define SD_TUNING_COMPARE_ERR 0x01
+
+/* SD_DATA_STATE */
+#define SD_DATA_IDLE 0x80
+
+/* CARD_DATA_SOURCE */
+#define PINGPONG_BUFFER 0x01
+#define RING_BUFFER 0x00
+
+/* CARD_OE */
+#define SD_OUTPUT_EN 0x04
+#define MS_OUTPUT_EN 0x08
+
+/* CARD_STOP */
+#define SD_STOP 0x04
+#define MS_STOP 0x08
+#define SD_CLR_ERR 0x40
+#define MS_CLR_ERR 0x80
+
+/* CARD_CLK_SOURCE */
+#define CRC_FIX_CLK (0x00 << 0)
+#define CRC_VAR_CLK0 (0x01 << 0)
+#define CRC_VAR_CLK1 (0x02 << 0)
+#define SD30_FIX_CLK (0x00 << 2)
+#define SD30_VAR_CLK0 (0x01 << 2)
+#define SD30_VAR_CLK1 (0x02 << 2)
+#define SAMPLE_FIX_CLK (0x00 << 4)
+#define SAMPLE_VAR_CLK0 (0x01 << 4)
+#define SAMPLE_VAR_CLK1 (0x02 << 4)
+
+/* SD_SAMPLE_POINT_CTL */
+#define DDR_FIX_RX_DAT 0x00
+#define DDR_VAR_RX_DAT 0x80
+#define DDR_FIX_RX_DAT_EDGE 0x00
+#define DDR_FIX_RX_DAT_14_DELAY 0x40
+#define DDR_FIX_RX_CMD 0x00
+#define DDR_VAR_RX_CMD 0x20
+#define DDR_FIX_RX_CMD_POS_EDGE 0x00
+#define DDR_FIX_RX_CMD_14_DELAY 0x10
+#define SD20_RX_POS_EDGE 0x00
+#define SD20_RX_14_DELAY 0x08
+#define SD20_RX_SEL_MASK 0x08
+
+/* SD_PUSH_POINT_CTL */
+#define DDR_FIX_TX_CMD_DAT 0x00
+#define DDR_VAR_TX_CMD_DAT 0x80
+#define DDR_FIX_TX_DAT_14_TSU 0x00
+#define DDR_FIX_TX_DAT_12_TSU 0x40
+#define DDR_FIX_TX_CMD_NEG_EDGE 0x00
+#define DDR_FIX_TX_CMD_14_AHEAD 0x20
+#define SD20_TX_NEG_EDGE 0x00
+#define SD20_TX_14_AHEAD 0x10
+#define SD20_TX_SEL_MASK 0x10
+#define DDR_VAR_SDCLK_POL_SWAP 0x01
+
+/* MS_CFG */
+#define SAMPLE_TIME_RISING 0x00
+#define SAMPLE_TIME_FALLING 0x80
+#define PUSH_TIME_DEFAULT 0x00
+#define PUSH_TIME_ODD 0x40
+#define NO_EXTEND_TOGGLE 0x00
+#define EXTEND_TOGGLE_CHK 0x20
+#define MS_BUS_WIDTH_1 0x00
+#define MS_BUS_WIDTH_4 0x10
+#define MS_BUS_WIDTH_8 0x18
+#define MS_2K_SECTOR_MODE 0x04
+#define MS_512_SECTOR_MODE 0x00
+#define MS_TOGGLE_TIMEOUT_EN 0x00
+#define MS_TOGGLE_TIMEOUT_DISEN 0x01
+#define MS_NO_CHECK_INT 0x02
+
+/* MS_TRANS_CFG */
+#define WAIT_INT 0x80
+#define NO_WAIT_INT 0x00
+#define NO_AUTO_READ_INT_REG 0x00
+#define AUTO_READ_INT_REG 0x40
+#define MS_CRC16_ERR 0x20
+#define MS_RDY_TIMEOUT 0x10
+#define MS_INT_CMDNK 0x08
+#define MS_INT_BREQ 0x04
+#define MS_INT_ERR 0x02
+#define MS_INT_CED 0x01
+
+/* MS_TRANSFER */
+#define MS_TRANSFER_START 0x80
+#define MS_TRANSFER_END 0x40
+#define MS_TRANSFER_ERR 0x20
+#define MS_BS_STATE 0x10
+#define MS_TM_READ_BYTES 0x00
+#define MS_TM_NORMAL_READ 0x01
+#define MS_TM_WRITE_BYTES 0x04
+#define MS_TM_NORMAL_WRITE 0x05
+#define MS_TM_AUTO_READ 0x08
+#define MS_TM_AUTO_WRITE 0x0C
+#define MS_TM_SET_CMD 0x06
+#define MS_TM_COPY_PAGE 0x07
+#define MS_TM_MULTI_READ 0x02
+#define MS_TM_MULTI_WRITE 0x03
+
+/* MC_FIFO_CTL */
+#define FIFO_FLUSH 0x01
+
+/* MC_DMA_RST */
+#define DMA_RESET 0x01
+
+/* MC_DMA_CTL */
+#define DMA_TC_EQ_0 0x80
+#define DMA_DIR_TO_CARD 0x00
+#define DMA_DIR_FROM_CARD 0x02
+#define DMA_EN 0x01
+#define DMA_128 (0 << 2)
+#define DMA_256 (1 << 2)
+#define DMA_512 (2 << 2)
+#define DMA_1024 (3 << 2)
+#define DMA_PACK_SIZE_MASK 0x0C
+
+/* CARD_INT_PEND */
+#define XD_INT 0x10
+#define MS_INT 0x08
+#define SD_INT 0x04
+
+/* LED operations*/
+static inline int rtsx_usb_turn_on_led(struct rtsx_ucr *ucr)
+{
+ return rtsx_usb_ep0_write_register(ucr, CARD_GPIO, 0x03, 0x02);
+}
+
+static inline int rtsx_usb_turn_off_led(struct rtsx_ucr *ucr)
+{
+ return rtsx_usb_ep0_write_register(ucr, CARD_GPIO, 0x03, 0x03);
+}
+
+/* HW error clearing */
+static inline void rtsx_usb_clear_fsm_err(struct rtsx_ucr *ucr)
+{
+ rtsx_usb_ep0_write_register(ucr, SFSM_ED, 0xf8, 0xf8);
+}
+
+static inline void rtsx_usb_clear_dma_err(struct rtsx_ucr *ucr)
+{
+ rtsx_usb_ep0_write_register(ucr, MC_FIFO_CTL,
+ FIFO_FLUSH, FIFO_FLUSH);
+ rtsx_usb_ep0_write_register(ucr, MC_DMA_RST, DMA_RESET, DMA_RESET);
+}
+#endif /* __RTS51139_H */
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
new file mode 100644
index 000000000..75115384f
--- /dev/null
+++ b/include/linux/mfd/samsung/core.h
@@ -0,0 +1,176 @@
+/*
+ * core.h
+ *
+ * copyright (c) 2011 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_SEC_CORE_H
+#define __LINUX_MFD_SEC_CORE_H
+
+/* Macros to represent minimum voltages for LDO/BUCK */
+#define MIN_3000_MV 3000000
+#define MIN_2500_MV 2500000
+#define MIN_2000_MV 2000000
+#define MIN_1800_MV 1800000
+#define MIN_1500_MV 1500000
+#define MIN_1400_MV 1400000
+#define MIN_1000_MV 1000000
+
+#define MIN_900_MV 900000
+#define MIN_850_MV 850000
+#define MIN_800_MV 800000
+#define MIN_750_MV 750000
+#define MIN_600_MV 600000
+#define MIN_500_MV 500000
+
+/* Macros to represent steps for LDO/BUCK */
+#define STEP_50_MV 50000
+#define STEP_25_MV 25000
+#define STEP_12_5_MV 12500
+#define STEP_6_25_MV 6250
+
+enum sec_device_type {
+ S5M8751X,
+ S5M8763X,
+ S5M8767X,
+ S2MPA01,
+ S2MPS11X,
+ S2MPS13X,
+ S2MPS14X,
+ S2MPU02,
+};
+
+/**
+ * struct sec_pmic_dev - s2m/s5m master device for sub-drivers
+ * @dev: Master device of the chip
+ * @pdata: Platform data populated with data from DTS
+ * or board files
+ * @regmap_pmic: Regmap associated with PMIC's I2C address
+ * @i2c: I2C client of the main driver
+ * @device_type: Type of device, matches enum sec_device_type
+ * @irq_base: Base IRQ number for device, required for IRQs
+ * @irq: Generic IRQ number for device
+ * @irq_data: Runtime data structure for IRQ controller
+ * @wakeup: Whether or not this is a wakeup device
+ */
+struct sec_pmic_dev {
+ struct device *dev;
+ struct sec_platform_data *pdata;
+ struct regmap *regmap_pmic;
+ struct i2c_client *i2c;
+
+ unsigned long device_type;
+ int irq_base;
+ int irq;
+ struct regmap_irq_chip_data *irq_data;
+
+ bool wakeup;
+};
+
+int sec_irq_init(struct sec_pmic_dev *sec_pmic);
+void sec_irq_exit(struct sec_pmic_dev *sec_pmic);
+int sec_irq_resume(struct sec_pmic_dev *sec_pmic);
+
+struct sec_platform_data {
+ struct sec_regulator_data *regulators;
+ struct sec_opmode_data *opmode;
+ int device_type;
+ int num_regulators;
+
+ int irq_base;
+ int (*cfg_pmic_irq)(void);
+
+ bool wakeup;
+ bool buck_voltage_lock;
+
+ int buck_gpios[3];
+ int buck_ds[3];
+ unsigned int buck2_voltage[8];
+ bool buck2_gpiodvs;
+ unsigned int buck3_voltage[8];
+ bool buck3_gpiodvs;
+ unsigned int buck4_voltage[8];
+ bool buck4_gpiodvs;
+
+ int buck_set1;
+ int buck_set2;
+ int buck_set3;
+ int buck2_enable;
+ int buck3_enable;
+ int buck4_enable;
+ int buck_default_idx;
+ int buck2_default_idx;
+ int buck3_default_idx;
+ int buck4_default_idx;
+
+ int buck_ramp_delay;
+
+ int buck2_ramp_delay;
+ int buck34_ramp_delay;
+ int buck5_ramp_delay;
+ int buck16_ramp_delay;
+ int buck7810_ramp_delay;
+ int buck9_ramp_delay;
+ int buck24_ramp_delay;
+ int buck3_ramp_delay;
+ int buck7_ramp_delay;
+ int buck8910_ramp_delay;
+
+ bool buck1_ramp_enable;
+ bool buck2_ramp_enable;
+ bool buck3_ramp_enable;
+ bool buck4_ramp_enable;
+ bool buck6_ramp_enable;
+
+ int buck2_init;
+ int buck3_init;
+ int buck4_init;
+};
+
+/**
+ * sec_regulator_data - regulator data
+ * @id: regulator id
+ * @initdata: regulator init data (contraints, supplies, ...)
+ */
+struct sec_regulator_data {
+ int id;
+ struct regulator_init_data *initdata;
+ struct device_node *reg_node;
+ int ext_control_gpio;
+};
+
+/*
+ * sec_opmode_data - regulator operation mode data
+ * @id: regulator id
+ * @mode: regulator operation mode
+ */
+struct sec_opmode_data {
+ int id;
+ unsigned int mode;
+};
+
+/*
+ * samsung regulator operation mode
+ * SEC_OPMODE_OFF Regulator always OFF
+ * SEC_OPMODE_ON Regulator always ON
+ * SEC_OPMODE_LOWPOWER Regulator is on in low-power mode
+ * SEC_OPMODE_SUSPEND Regulator is changed by PWREN pin
+ * If PWREN is high, regulator is on
+ * If PWREN is low, regulator is off
+ */
+
+enum sec_opmode {
+ SEC_OPMODE_OFF,
+ SEC_OPMODE_ON,
+ SEC_OPMODE_LOWPOWER,
+ SEC_OPMODE_SUSPEND,
+};
+
+#endif /* __LINUX_MFD_SEC_CORE_H */
diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h
new file mode 100644
index 000000000..667aa4048
--- /dev/null
+++ b/include/linux/mfd/samsung/irq.h
@@ -0,0 +1,253 @@
+/* irq.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_SEC_IRQ_H
+#define __LINUX_MFD_SEC_IRQ_H
+
+enum s2mpa01_irq {
+ S2MPA01_IRQ_PWRONF,
+ S2MPA01_IRQ_PWRONR,
+ S2MPA01_IRQ_JIGONBF,
+ S2MPA01_IRQ_JIGONBR,
+ S2MPA01_IRQ_ACOKBF,
+ S2MPA01_IRQ_ACOKBR,
+ S2MPA01_IRQ_PWRON1S,
+ S2MPA01_IRQ_MRB,
+
+ S2MPA01_IRQ_RTC60S,
+ S2MPA01_IRQ_RTCA1,
+ S2MPA01_IRQ_RTCA0,
+ S2MPA01_IRQ_SMPL,
+ S2MPA01_IRQ_RTC1S,
+ S2MPA01_IRQ_WTSR,
+
+ S2MPA01_IRQ_INT120C,
+ S2MPA01_IRQ_INT140C,
+ S2MPA01_IRQ_LDO3_TSD,
+ S2MPA01_IRQ_B16_TSD,
+ S2MPA01_IRQ_B24_TSD,
+ S2MPA01_IRQ_B35_TSD,
+
+ S2MPA01_IRQ_NR,
+};
+
+#define S2MPA01_IRQ_PWRONF_MASK (1 << 0)
+#define S2MPA01_IRQ_PWRONR_MASK (1 << 1)
+#define S2MPA01_IRQ_JIGONBF_MASK (1 << 2)
+#define S2MPA01_IRQ_JIGONBR_MASK (1 << 3)
+#define S2MPA01_IRQ_ACOKBF_MASK (1 << 4)
+#define S2MPA01_IRQ_ACOKBR_MASK (1 << 5)
+#define S2MPA01_IRQ_PWRON1S_MASK (1 << 6)
+#define S2MPA01_IRQ_MRB_MASK (1 << 7)
+
+#define S2MPA01_IRQ_RTC60S_MASK (1 << 0)
+#define S2MPA01_IRQ_RTCA1_MASK (1 << 1)
+#define S2MPA01_IRQ_RTCA0_MASK (1 << 2)
+#define S2MPA01_IRQ_SMPL_MASK (1 << 3)
+#define S2MPA01_IRQ_RTC1S_MASK (1 << 4)
+#define S2MPA01_IRQ_WTSR_MASK (1 << 5)
+
+#define S2MPA01_IRQ_INT120C_MASK (1 << 0)
+#define S2MPA01_IRQ_INT140C_MASK (1 << 1)
+#define S2MPA01_IRQ_LDO3_TSD_MASK (1 << 2)
+#define S2MPA01_IRQ_B16_TSD_MASK (1 << 3)
+#define S2MPA01_IRQ_B24_TSD_MASK (1 << 4)
+#define S2MPA01_IRQ_B35_TSD_MASK (1 << 5)
+
+enum s2mps11_irq {
+ S2MPS11_IRQ_PWRONF,
+ S2MPS11_IRQ_PWRONR,
+ S2MPS11_IRQ_JIGONBF,
+ S2MPS11_IRQ_JIGONBR,
+ S2MPS11_IRQ_ACOKBF,
+ S2MPS11_IRQ_ACOKBR,
+ S2MPS11_IRQ_PWRON1S,
+ S2MPS11_IRQ_MRB,
+
+ S2MPS11_IRQ_RTC60S,
+ S2MPS11_IRQ_RTCA1,
+ S2MPS11_IRQ_RTCA0,
+ S2MPS11_IRQ_SMPL,
+ S2MPS11_IRQ_RTC1S,
+ S2MPS11_IRQ_WTSR,
+
+ S2MPS11_IRQ_INT120C,
+ S2MPS11_IRQ_INT140C,
+
+ S2MPS11_IRQ_NR,
+};
+
+#define S2MPS11_IRQ_PWRONF_MASK (1 << 0)
+#define S2MPS11_IRQ_PWRONR_MASK (1 << 1)
+#define S2MPS11_IRQ_JIGONBF_MASK (1 << 2)
+#define S2MPS11_IRQ_JIGONBR_MASK (1 << 3)
+#define S2MPS11_IRQ_ACOKBF_MASK (1 << 4)
+#define S2MPS11_IRQ_ACOKBR_MASK (1 << 5)
+#define S2MPS11_IRQ_PWRON1S_MASK (1 << 6)
+#define S2MPS11_IRQ_MRB_MASK (1 << 7)
+
+#define S2MPS11_IRQ_RTC60S_MASK (1 << 0)
+#define S2MPS11_IRQ_RTCA1_MASK (1 << 1)
+#define S2MPS11_IRQ_RTCA0_MASK (1 << 2)
+#define S2MPS11_IRQ_SMPL_MASK (1 << 3)
+#define S2MPS11_IRQ_RTC1S_MASK (1 << 4)
+#define S2MPS11_IRQ_WTSR_MASK (1 << 5)
+
+#define S2MPS11_IRQ_INT120C_MASK (1 << 0)
+#define S2MPS11_IRQ_INT140C_MASK (1 << 1)
+
+enum s2mps14_irq {
+ S2MPS14_IRQ_PWRONF,
+ S2MPS14_IRQ_PWRONR,
+ S2MPS14_IRQ_JIGONBF,
+ S2MPS14_IRQ_JIGONBR,
+ S2MPS14_IRQ_ACOKBF,
+ S2MPS14_IRQ_ACOKBR,
+ S2MPS14_IRQ_PWRON1S,
+ S2MPS14_IRQ_MRB,
+
+ S2MPS14_IRQ_RTC60S,
+ S2MPS14_IRQ_RTCA1,
+ S2MPS14_IRQ_RTCA0,
+ S2MPS14_IRQ_SMPL,
+ S2MPS14_IRQ_RTC1S,
+ S2MPS14_IRQ_WTSR,
+
+ S2MPS14_IRQ_INT120C,
+ S2MPS14_IRQ_INT140C,
+ S2MPS14_IRQ_TSD,
+
+ S2MPS14_IRQ_NR,
+};
+
+enum s2mpu02_irq {
+ S2MPU02_IRQ_PWRONF,
+ S2MPU02_IRQ_PWRONR,
+ S2MPU02_IRQ_JIGONBF,
+ S2MPU02_IRQ_JIGONBR,
+ S2MPU02_IRQ_ACOKBF,
+ S2MPU02_IRQ_ACOKBR,
+ S2MPU02_IRQ_PWRON1S,
+ S2MPU02_IRQ_MRB,
+
+ S2MPU02_IRQ_RTC60S,
+ S2MPU02_IRQ_RTCA1,
+ S2MPU02_IRQ_RTCA0,
+ S2MPU02_IRQ_SMPL,
+ S2MPU02_IRQ_RTC1S,
+ S2MPU02_IRQ_WTSR,
+
+ S2MPU02_IRQ_INT120C,
+ S2MPU02_IRQ_INT140C,
+ S2MPU02_IRQ_TSD,
+
+ S2MPU02_IRQ_NR,
+};
+
+/* Masks for interrupts are the same as in s2mps11 */
+#define S2MPS14_IRQ_TSD_MASK (1 << 2)
+
+enum s5m8767_irq {
+ S5M8767_IRQ_PWRR,
+ S5M8767_IRQ_PWRF,
+ S5M8767_IRQ_PWR1S,
+ S5M8767_IRQ_JIGR,
+ S5M8767_IRQ_JIGF,
+ S5M8767_IRQ_LOWBAT2,
+ S5M8767_IRQ_LOWBAT1,
+
+ S5M8767_IRQ_MRB,
+ S5M8767_IRQ_DVSOK2,
+ S5M8767_IRQ_DVSOK3,
+ S5M8767_IRQ_DVSOK4,
+
+ S5M8767_IRQ_RTC60S,
+ S5M8767_IRQ_RTCA1,
+ S5M8767_IRQ_RTCA2,
+ S5M8767_IRQ_SMPL,
+ S5M8767_IRQ_RTC1S,
+ S5M8767_IRQ_WTSR,
+
+ S5M8767_IRQ_NR,
+};
+
+#define S5M8767_IRQ_PWRR_MASK (1 << 0)
+#define S5M8767_IRQ_PWRF_MASK (1 << 1)
+#define S5M8767_IRQ_PWR1S_MASK (1 << 3)
+#define S5M8767_IRQ_JIGR_MASK (1 << 4)
+#define S5M8767_IRQ_JIGF_MASK (1 << 5)
+#define S5M8767_IRQ_LOWBAT2_MASK (1 << 6)
+#define S5M8767_IRQ_LOWBAT1_MASK (1 << 7)
+
+#define S5M8767_IRQ_MRB_MASK (1 << 2)
+#define S5M8767_IRQ_DVSOK2_MASK (1 << 3)
+#define S5M8767_IRQ_DVSOK3_MASK (1 << 4)
+#define S5M8767_IRQ_DVSOK4_MASK (1 << 5)
+
+#define S5M8767_IRQ_RTC60S_MASK (1 << 0)
+#define S5M8767_IRQ_RTCA1_MASK (1 << 1)
+#define S5M8767_IRQ_RTCA2_MASK (1 << 2)
+#define S5M8767_IRQ_SMPL_MASK (1 << 3)
+#define S5M8767_IRQ_RTC1S_MASK (1 << 4)
+#define S5M8767_IRQ_WTSR_MASK (1 << 5)
+
+enum s5m8763_irq {
+ S5M8763_IRQ_DCINF,
+ S5M8763_IRQ_DCINR,
+ S5M8763_IRQ_JIGF,
+ S5M8763_IRQ_JIGR,
+ S5M8763_IRQ_PWRONF,
+ S5M8763_IRQ_PWRONR,
+
+ S5M8763_IRQ_WTSREVNT,
+ S5M8763_IRQ_SMPLEVNT,
+ S5M8763_IRQ_ALARM1,
+ S5M8763_IRQ_ALARM0,
+
+ S5M8763_IRQ_ONKEY1S,
+ S5M8763_IRQ_TOPOFFR,
+ S5M8763_IRQ_DCINOVPR,
+ S5M8763_IRQ_CHGRSTF,
+ S5M8763_IRQ_DONER,
+ S5M8763_IRQ_CHGFAULT,
+
+ S5M8763_IRQ_LOBAT1,
+ S5M8763_IRQ_LOBAT2,
+
+ S5M8763_IRQ_NR,
+};
+
+#define S5M8763_IRQ_DCINF_MASK (1 << 2)
+#define S5M8763_IRQ_DCINR_MASK (1 << 3)
+#define S5M8763_IRQ_JIGF_MASK (1 << 4)
+#define S5M8763_IRQ_JIGR_MASK (1 << 5)
+#define S5M8763_IRQ_PWRONF_MASK (1 << 6)
+#define S5M8763_IRQ_PWRONR_MASK (1 << 7)
+
+#define S5M8763_IRQ_WTSREVNT_MASK (1 << 0)
+#define S5M8763_IRQ_SMPLEVNT_MASK (1 << 1)
+#define S5M8763_IRQ_ALARM1_MASK (1 << 2)
+#define S5M8763_IRQ_ALARM0_MASK (1 << 3)
+
+#define S5M8763_IRQ_ONKEY1S_MASK (1 << 0)
+#define S5M8763_IRQ_TOPOFFR_MASK (1 << 2)
+#define S5M8763_IRQ_DCINOVPR_MASK (1 << 3)
+#define S5M8763_IRQ_CHGRSTF_MASK (1 << 4)
+#define S5M8763_IRQ_DONER_MASK (1 << 5)
+#define S5M8763_IRQ_CHGFAULT_MASK (1 << 7)
+
+#define S5M8763_IRQ_LOBAT1_MASK (1 << 0)
+#define S5M8763_IRQ_LOBAT2_MASK (1 << 1)
+
+#define S5M8763_ENRAMP (1 << 4)
+
+#endif /* __LINUX_MFD_SEC_IRQ_H */
diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h
new file mode 100644
index 000000000..29c30ac36
--- /dev/null
+++ b/include/linux/mfd/samsung/rtc.h
@@ -0,0 +1,151 @@
+/* rtc.h
+ *
+ * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_MFD_SEC_RTC_H
+#define __LINUX_MFD_SEC_RTC_H
+
+enum s5m_rtc_reg {
+ S5M_RTC_SEC,
+ S5M_RTC_MIN,
+ S5M_RTC_HOUR,
+ S5M_RTC_WEEKDAY,
+ S5M_RTC_DATE,
+ S5M_RTC_MONTH,
+ S5M_RTC_YEAR1,
+ S5M_RTC_YEAR2,
+ S5M_ALARM0_SEC,
+ S5M_ALARM0_MIN,
+ S5M_ALARM0_HOUR,
+ S5M_ALARM0_WEEKDAY,
+ S5M_ALARM0_DATE,
+ S5M_ALARM0_MONTH,
+ S5M_ALARM0_YEAR1,
+ S5M_ALARM0_YEAR2,
+ S5M_ALARM1_SEC,
+ S5M_ALARM1_MIN,
+ S5M_ALARM1_HOUR,
+ S5M_ALARM1_WEEKDAY,
+ S5M_ALARM1_DATE,
+ S5M_ALARM1_MONTH,
+ S5M_ALARM1_YEAR1,
+ S5M_ALARM1_YEAR2,
+ S5M_ALARM0_CONF,
+ S5M_ALARM1_CONF,
+ S5M_RTC_STATUS,
+ S5M_WTSR_SMPL_CNTL,
+ S5M_RTC_UDR_CON,
+
+ S5M_RTC_REG_MAX,
+};
+
+enum s2mps_rtc_reg {
+ S2MPS_RTC_CTRL,
+ S2MPS_WTSR_SMPL_CNTL,
+ S2MPS_RTC_UDR_CON,
+ S2MPS_RSVD,
+ S2MPS_RTC_SEC,
+ S2MPS_RTC_MIN,
+ S2MPS_RTC_HOUR,
+ S2MPS_RTC_WEEKDAY,
+ S2MPS_RTC_DATE,
+ S2MPS_RTC_MONTH,
+ S2MPS_RTC_YEAR,
+ S2MPS_ALARM0_SEC,
+ S2MPS_ALARM0_MIN,
+ S2MPS_ALARM0_HOUR,
+ S2MPS_ALARM0_WEEKDAY,
+ S2MPS_ALARM0_DATE,
+ S2MPS_ALARM0_MONTH,
+ S2MPS_ALARM0_YEAR,
+ S2MPS_ALARM1_SEC,
+ S2MPS_ALARM1_MIN,
+ S2MPS_ALARM1_HOUR,
+ S2MPS_ALARM1_WEEKDAY,
+ S2MPS_ALARM1_DATE,
+ S2MPS_ALARM1_MONTH,
+ S2MPS_ALARM1_YEAR,
+ S2MPS_OFFSRC,
+
+ S2MPS_RTC_REG_MAX,
+};
+
+#define RTC_I2C_ADDR (0x0C >> 1)
+
+#define HOUR_12 (1 << 7)
+#define HOUR_AMPM (1 << 6)
+#define HOUR_PM (1 << 5)
+#define S5M_ALARM0_STATUS (1 << 1)
+#define S5M_ALARM1_STATUS (1 << 2)
+#define S5M_UPDATE_AD (1 << 0)
+
+#define S2MPS_ALARM0_STATUS (1 << 2)
+#define S2MPS_ALARM1_STATUS (1 << 1)
+
+/* RTC Control Register */
+#define BCD_EN_SHIFT 0
+#define BCD_EN_MASK (1 << BCD_EN_SHIFT)
+#define MODEL24_SHIFT 1
+#define MODEL24_MASK (1 << MODEL24_SHIFT)
+/* RTC Update Register1 */
+#define S5M_RTC_UDR_SHIFT 0
+#define S5M_RTC_UDR_MASK (1 << S5M_RTC_UDR_SHIFT)
+#define S2MPS_RTC_WUDR_SHIFT 4
+#define S2MPS_RTC_WUDR_MASK (1 << S2MPS_RTC_WUDR_SHIFT)
+#define S2MPS13_RTC_AUDR_SHIFT 1
+#define S2MPS13_RTC_AUDR_MASK (1 << S2MPS13_RTC_AUDR_SHIFT)
+#define S2MPS_RTC_RUDR_SHIFT 0
+#define S2MPS_RTC_RUDR_MASK (1 << S2MPS_RTC_RUDR_SHIFT)
+#define RTC_TCON_SHIFT 1
+#define RTC_TCON_MASK (1 << RTC_TCON_SHIFT)
+#define S5M_RTC_TIME_EN_SHIFT 3
+#define S5M_RTC_TIME_EN_MASK (1 << S5M_RTC_TIME_EN_SHIFT)
+/*
+ * UDR_T field in S5M_RTC_UDR_CON register determines the time needed
+ * for updating alarm and time registers. Default is 7.32 ms.
+ */
+#define S5M_RTC_UDR_T_SHIFT 6
+#define S5M_RTC_UDR_T_MASK (0x3 << S5M_RTC_UDR_T_SHIFT)
+#define S5M_RTC_UDR_T_7320_US (0x0 << S5M_RTC_UDR_T_SHIFT)
+#define S5M_RTC_UDR_T_1830_US (0x1 << S5M_RTC_UDR_T_SHIFT)
+#define S5M_RTC_UDR_T_3660_US (0x2 << S5M_RTC_UDR_T_SHIFT)
+#define S5M_RTC_UDR_T_450_US (0x3 << S5M_RTC_UDR_T_SHIFT)
+
+/* RTC Hour register */
+#define HOUR_PM_SHIFT 6
+#define HOUR_PM_MASK (1 << HOUR_PM_SHIFT)
+/* RTC Alarm Enable */
+#define ALARM_ENABLE_SHIFT 7
+#define ALARM_ENABLE_MASK (1 << ALARM_ENABLE_SHIFT)
+
+#define SMPL_ENABLE_SHIFT 7
+#define SMPL_ENABLE_MASK (1 << SMPL_ENABLE_SHIFT)
+
+#define WTSR_ENABLE_SHIFT 6
+#define WTSR_ENABLE_MASK (1 << WTSR_ENABLE_SHIFT)
+
+enum {
+ RTC_SEC = 0,
+ RTC_MIN,
+ RTC_HOUR,
+ RTC_WEEKDAY,
+ RTC_DATE,
+ RTC_MONTH,
+ RTC_YEAR1,
+ RTC_YEAR2,
+};
+
+#endif /* __LINUX_MFD_SEC_RTC_H */
diff --git a/include/linux/mfd/samsung/s2mpa01.h b/include/linux/mfd/samsung/s2mpa01.h
new file mode 100644
index 000000000..2766108bc
--- /dev/null
+++ b/include/linux/mfd/samsung/s2mpa01.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_S2MPA01_H
+#define __LINUX_MFD_S2MPA01_H
+
+/* S2MPA01 registers */
+enum s2mpa01_reg {
+ S2MPA01_REG_ID,
+ S2MPA01_REG_INT1,
+ S2MPA01_REG_INT2,
+ S2MPA01_REG_INT3,
+ S2MPA01_REG_INT1M,
+ S2MPA01_REG_INT2M,
+ S2MPA01_REG_INT3M,
+ S2MPA01_REG_ST1,
+ S2MPA01_REG_ST2,
+ S2MPA01_REG_PWRONSRC,
+ S2MPA01_REG_OFFSRC,
+ S2MPA01_REG_RTC_BUF,
+ S2MPA01_REG_CTRL1,
+ S2MPA01_REG_ETC_TEST,
+ S2MPA01_REG_RSVD1,
+ S2MPA01_REG_BU_CHG,
+ S2MPA01_REG_RAMP1,
+ S2MPA01_REG_RAMP2,
+ S2MPA01_REG_LDO_DSCH1,
+ S2MPA01_REG_LDO_DSCH2,
+ S2MPA01_REG_LDO_DSCH3,
+ S2MPA01_REG_LDO_DSCH4,
+ S2MPA01_REG_OTP_ADRL,
+ S2MPA01_REG_OTP_ADRH,
+ S2MPA01_REG_OTP_DATA,
+ S2MPA01_REG_MON1SEL,
+ S2MPA01_REG_MON2SEL,
+ S2MPA01_REG_LEE,
+ S2MPA01_REG_RSVD2,
+ S2MPA01_REG_RSVD3,
+ S2MPA01_REG_RSVD4,
+ S2MPA01_REG_RSVD5,
+ S2MPA01_REG_RSVD6,
+ S2MPA01_REG_TOP_RSVD,
+ S2MPA01_REG_DVS_SEL,
+ S2MPA01_REG_DVS_PTR,
+ S2MPA01_REG_DVS_DATA,
+ S2MPA01_REG_RSVD_NO,
+ S2MPA01_REG_UVLO,
+ S2MPA01_REG_LEE_NO,
+ S2MPA01_REG_B1CTRL1,
+ S2MPA01_REG_B1CTRL2,
+ S2MPA01_REG_B2CTRL1,
+ S2MPA01_REG_B2CTRL2,
+ S2MPA01_REG_B3CTRL1,
+ S2MPA01_REG_B3CTRL2,
+ S2MPA01_REG_B4CTRL1,
+ S2MPA01_REG_B4CTRL2,
+ S2MPA01_REG_B5CTRL1,
+ S2MPA01_REG_B5CTRL2,
+ S2MPA01_REG_B5CTRL3,
+ S2MPA01_REG_B5CTRL4,
+ S2MPA01_REG_B5CTRL5,
+ S2MPA01_REG_B5CTRL6,
+ S2MPA01_REG_B6CTRL1,
+ S2MPA01_REG_B6CTRL2,
+ S2MPA01_REG_B7CTRL1,
+ S2MPA01_REG_B7CTRL2,
+ S2MPA01_REG_B8CTRL1,
+ S2MPA01_REG_B8CTRL2,
+ S2MPA01_REG_B9CTRL1,
+ S2MPA01_REG_B9CTRL2,
+ S2MPA01_REG_B10CTRL1,
+ S2MPA01_REG_B10CTRL2,
+ S2MPA01_REG_L1CTRL,
+ S2MPA01_REG_L2CTRL,
+ S2MPA01_REG_L3CTRL,
+ S2MPA01_REG_L4CTRL,
+ S2MPA01_REG_L5CTRL,
+ S2MPA01_REG_L6CTRL,
+ S2MPA01_REG_L7CTRL,
+ S2MPA01_REG_L8CTRL,
+ S2MPA01_REG_L9CTRL,
+ S2MPA01_REG_L10CTRL,
+ S2MPA01_REG_L11CTRL,
+ S2MPA01_REG_L12CTRL,
+ S2MPA01_REG_L13CTRL,
+ S2MPA01_REG_L14CTRL,
+ S2MPA01_REG_L15CTRL,
+ S2MPA01_REG_L16CTRL,
+ S2MPA01_REG_L17CTRL,
+ S2MPA01_REG_L18CTRL,
+ S2MPA01_REG_L19CTRL,
+ S2MPA01_REG_L20CTRL,
+ S2MPA01_REG_L21CTRL,
+ S2MPA01_REG_L22CTRL,
+ S2MPA01_REG_L23CTRL,
+ S2MPA01_REG_L24CTRL,
+ S2MPA01_REG_L25CTRL,
+ S2MPA01_REG_L26CTRL,
+
+ S2MPA01_REG_LDO_OVCB1,
+ S2MPA01_REG_LDO_OVCB2,
+ S2MPA01_REG_LDO_OVCB3,
+ S2MPA01_REG_LDO_OVCB4,
+
+};
+
+/* S2MPA01 regulator ids */
+enum s2mpa01_regulators {
+ S2MPA01_LDO1,
+ S2MPA01_LDO2,
+ S2MPA01_LDO3,
+ S2MPA01_LDO4,
+ S2MPA01_LDO5,
+ S2MPA01_LDO6,
+ S2MPA01_LDO7,
+ S2MPA01_LDO8,
+ S2MPA01_LDO9,
+ S2MPA01_LDO10,
+ S2MPA01_LDO11,
+ S2MPA01_LDO12,
+ S2MPA01_LDO13,
+ S2MPA01_LDO14,
+ S2MPA01_LDO15,
+ S2MPA01_LDO16,
+ S2MPA01_LDO17,
+ S2MPA01_LDO18,
+ S2MPA01_LDO19,
+ S2MPA01_LDO20,
+ S2MPA01_LDO21,
+ S2MPA01_LDO22,
+ S2MPA01_LDO23,
+ S2MPA01_LDO24,
+ S2MPA01_LDO25,
+ S2MPA01_LDO26,
+
+ S2MPA01_BUCK1,
+ S2MPA01_BUCK2,
+ S2MPA01_BUCK3,
+ S2MPA01_BUCK4,
+ S2MPA01_BUCK5,
+ S2MPA01_BUCK6,
+ S2MPA01_BUCK7,
+ S2MPA01_BUCK8,
+ S2MPA01_BUCK9,
+ S2MPA01_BUCK10,
+
+ S2MPA01_REGULATOR_MAX,
+};
+
+#define S2MPA01_LDO_VSEL_MASK 0x3F
+#define S2MPA01_BUCK_VSEL_MASK 0xFF
+#define S2MPA01_ENABLE_MASK (0x03 << S2MPA01_ENABLE_SHIFT)
+#define S2MPA01_ENABLE_SHIFT 0x06
+#define S2MPA01_LDO_N_VOLTAGES (S2MPA01_LDO_VSEL_MASK + 1)
+#define S2MPA01_BUCK_N_VOLTAGES (S2MPA01_BUCK_VSEL_MASK + 1)
+
+#define S2MPA01_RAMP_DELAY 12500 /* uV/us */
+
+#define S2MPA01_BUCK16_RAMP_SHIFT 4
+#define S2MPA01_BUCK24_RAMP_SHIFT 6
+#define S2MPA01_BUCK3_RAMP_SHIFT 4
+#define S2MPA01_BUCK5_RAMP_SHIFT 6
+#define S2MPA01_BUCK7_RAMP_SHIFT 2
+#define S2MPA01_BUCK8910_RAMP_SHIFT 0
+
+#define S2MPA01_BUCK1_RAMP_EN_SHIFT 3
+#define S2MPA01_BUCK2_RAMP_EN_SHIFT 2
+#define S2MPA01_BUCK3_RAMP_EN_SHIFT 1
+#define S2MPA01_BUCK4_RAMP_EN_SHIFT 0
+#define S2MPA01_PMIC_EN_SHIFT 6
+
+#endif /*__LINUX_MFD_S2MPA01_H */
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
new file mode 100644
index 000000000..7981a9d77
--- /dev/null
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -0,0 +1,195 @@
+/*
+ * s2mps11.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_S2MPS11_H
+#define __LINUX_MFD_S2MPS11_H
+
+/* S2MPS11 registers */
+enum s2mps11_reg {
+ S2MPS11_REG_ID,
+ S2MPS11_REG_INT1,
+ S2MPS11_REG_INT2,
+ S2MPS11_REG_INT3,
+ S2MPS11_REG_INT1M,
+ S2MPS11_REG_INT2M,
+ S2MPS11_REG_INT3M,
+ S2MPS11_REG_ST1,
+ S2MPS11_REG_ST2,
+ S2MPS11_REG_OFFSRC,
+ S2MPS11_REG_PWRONSRC,
+ S2MPS11_REG_RTC_CTRL,
+ S2MPS11_REG_CTRL1,
+ S2MPS11_REG_ETC_TEST,
+ S2MPS11_REG_RSVD3,
+ S2MPS11_REG_BU_CHG,
+ S2MPS11_REG_RAMP,
+ S2MPS11_REG_RAMP_BUCK,
+ S2MPS11_REG_LDO1_8,
+ S2MPS11_REG_LDO9_16,
+ S2MPS11_REG_LDO17_24,
+ S2MPS11_REG_LDO25_32,
+ S2MPS11_REG_LDO33_38,
+ S2MPS11_REG_LDO1_8_1,
+ S2MPS11_REG_LDO9_16_1,
+ S2MPS11_REG_LDO17_24_1,
+ S2MPS11_REG_LDO25_32_1,
+ S2MPS11_REG_LDO33_38_1,
+ S2MPS11_REG_OTP_ADRL,
+ S2MPS11_REG_OTP_ADRH,
+ S2MPS11_REG_OTP_DATA,
+ S2MPS11_REG_MON1SEL,
+ S2MPS11_REG_MON2SEL,
+ S2MPS11_REG_LEE,
+ S2MPS11_REG_RSVD_NO,
+ S2MPS11_REG_UVLO,
+ S2MPS11_REG_LEE_NO,
+ S2MPS11_REG_B1CTRL1,
+ S2MPS11_REG_B1CTRL2,
+ S2MPS11_REG_B2CTRL1,
+ S2MPS11_REG_B2CTRL2,
+ S2MPS11_REG_B3CTRL1,
+ S2MPS11_REG_B3CTRL2,
+ S2MPS11_REG_B4CTRL1,
+ S2MPS11_REG_B4CTRL2,
+ S2MPS11_REG_B5CTRL1,
+ S2MPS11_REG_BUCK5_SW,
+ S2MPS11_REG_B5CTRL2,
+ S2MPS11_REG_B5CTRL3,
+ S2MPS11_REG_B5CTRL4,
+ S2MPS11_REG_B5CTRL5,
+ S2MPS11_REG_B6CTRL1,
+ S2MPS11_REG_B6CTRL2,
+ S2MPS11_REG_B7CTRL1,
+ S2MPS11_REG_B7CTRL2,
+ S2MPS11_REG_B8CTRL1,
+ S2MPS11_REG_B8CTRL2,
+ S2MPS11_REG_B9CTRL1,
+ S2MPS11_REG_B9CTRL2,
+ S2MPS11_REG_B10CTRL1,
+ S2MPS11_REG_B10CTRL2,
+ S2MPS11_REG_L1CTRL,
+ S2MPS11_REG_L2CTRL,
+ S2MPS11_REG_L3CTRL,
+ S2MPS11_REG_L4CTRL,
+ S2MPS11_REG_L5CTRL,
+ S2MPS11_REG_L6CTRL,
+ S2MPS11_REG_L7CTRL,
+ S2MPS11_REG_L8CTRL,
+ S2MPS11_REG_L9CTRL,
+ S2MPS11_REG_L10CTRL,
+ S2MPS11_REG_L11CTRL,
+ S2MPS11_REG_L12CTRL,
+ S2MPS11_REG_L13CTRL,
+ S2MPS11_REG_L14CTRL,
+ S2MPS11_REG_L15CTRL,
+ S2MPS11_REG_L16CTRL,
+ S2MPS11_REG_L17CTRL,
+ S2MPS11_REG_L18CTRL,
+ S2MPS11_REG_L19CTRL,
+ S2MPS11_REG_L20CTRL,
+ S2MPS11_REG_L21CTRL,
+ S2MPS11_REG_L22CTRL,
+ S2MPS11_REG_L23CTRL,
+ S2MPS11_REG_L24CTRL,
+ S2MPS11_REG_L25CTRL,
+ S2MPS11_REG_L26CTRL,
+ S2MPS11_REG_L27CTRL,
+ S2MPS11_REG_L28CTRL,
+ S2MPS11_REG_L29CTRL,
+ S2MPS11_REG_L30CTRL,
+ S2MPS11_REG_L31CTRL,
+ S2MPS11_REG_L32CTRL,
+ S2MPS11_REG_L33CTRL,
+ S2MPS11_REG_L34CTRL,
+ S2MPS11_REG_L35CTRL,
+ S2MPS11_REG_L36CTRL,
+ S2MPS11_REG_L37CTRL,
+ S2MPS11_REG_L38CTRL,
+};
+
+/* S2MPS11 regulator ids */
+enum s2mps11_regulators {
+ S2MPS11_LDO1,
+ S2MPS11_LDO2,
+ S2MPS11_LDO3,
+ S2MPS11_LDO4,
+ S2MPS11_LDO5,
+ S2MPS11_LDO6,
+ S2MPS11_LDO7,
+ S2MPS11_LDO8,
+ S2MPS11_LDO9,
+ S2MPS11_LDO10,
+ S2MPS11_LDO11,
+ S2MPS11_LDO12,
+ S2MPS11_LDO13,
+ S2MPS11_LDO14,
+ S2MPS11_LDO15,
+ S2MPS11_LDO16,
+ S2MPS11_LDO17,
+ S2MPS11_LDO18,
+ S2MPS11_LDO19,
+ S2MPS11_LDO20,
+ S2MPS11_LDO21,
+ S2MPS11_LDO22,
+ S2MPS11_LDO23,
+ S2MPS11_LDO24,
+ S2MPS11_LDO25,
+ S2MPS11_LDO26,
+ S2MPS11_LDO27,
+ S2MPS11_LDO28,
+ S2MPS11_LDO29,
+ S2MPS11_LDO30,
+ S2MPS11_LDO31,
+ S2MPS11_LDO32,
+ S2MPS11_LDO33,
+ S2MPS11_LDO34,
+ S2MPS11_LDO35,
+ S2MPS11_LDO36,
+ S2MPS11_LDO37,
+ S2MPS11_LDO38,
+ S2MPS11_BUCK1,
+ S2MPS11_BUCK2,
+ S2MPS11_BUCK3,
+ S2MPS11_BUCK4,
+ S2MPS11_BUCK5,
+ S2MPS11_BUCK6,
+ S2MPS11_BUCK7,
+ S2MPS11_BUCK8,
+ S2MPS11_BUCK9,
+ S2MPS11_BUCK10,
+
+ S2MPS11_REGULATOR_MAX,
+};
+
+#define S2MPS11_LDO_VSEL_MASK 0x3F
+#define S2MPS11_BUCK_VSEL_MASK 0xFF
+#define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT)
+#define S2MPS11_ENABLE_SHIFT 0x06
+#define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1)
+#define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
+#define S2MPS11_RAMP_DELAY 25000 /* uV/us */
+
+
+#define S2MPS11_BUCK2_RAMP_SHIFT 6
+#define S2MPS11_BUCK34_RAMP_SHIFT 4
+#define S2MPS11_BUCK5_RAMP_SHIFT 6
+#define S2MPS11_BUCK16_RAMP_SHIFT 4
+#define S2MPS11_BUCK7810_RAMP_SHIFT 2
+#define S2MPS11_BUCK9_RAMP_SHIFT 0
+#define S2MPS11_BUCK2_RAMP_EN_SHIFT 3
+#define S2MPS11_BUCK3_RAMP_EN_SHIFT 2
+#define S2MPS11_BUCK4_RAMP_EN_SHIFT 1
+#define S2MPS11_BUCK6_RAMP_EN_SHIFT 0
+#define S2MPS11_PMIC_EN_SHIFT 6
+
+#endif /* __LINUX_MFD_S2MPS11_H */
diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h
new file mode 100644
index 000000000..b1fd675fa
--- /dev/null
+++ b/include/linux/mfd/samsung/s2mps13.h
@@ -0,0 +1,188 @@
+/*
+ * s2mps13.h
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_MFD_S2MPS13_H
+#define __LINUX_MFD_S2MPS13_H
+
+/* S2MPS13 registers */
+enum s2mps13_reg {
+ S2MPS13_REG_ID,
+ S2MPS13_REG_INT1,
+ S2MPS13_REG_INT2,
+ S2MPS13_REG_INT3,
+ S2MPS13_REG_INT1M,
+ S2MPS13_REG_INT2M,
+ S2MPS13_REG_INT3M,
+ S2MPS13_REG_ST1,
+ S2MPS13_REG_ST2,
+ S2MPS13_REG_PWRONSRC,
+ S2MPS13_REG_OFFSRC,
+ S2MPS13_REG_BU_CHG,
+ S2MPS13_REG_RTCCTRL,
+ S2MPS13_REG_CTRL1,
+ S2MPS13_REG_CTRL2,
+ S2MPS13_REG_RSVD1,
+ S2MPS13_REG_RSVD2,
+ S2MPS13_REG_RSVD3,
+ S2MPS13_REG_RSVD4,
+ S2MPS13_REG_RSVD5,
+ S2MPS13_REG_RSVD6,
+ S2MPS13_REG_CTRL3,
+ S2MPS13_REG_RSVD7,
+ S2MPS13_REG_RSVD8,
+ S2MPS13_REG_WRSTBI,
+ S2MPS13_REG_B1CTRL,
+ S2MPS13_REG_B1OUT,
+ S2MPS13_REG_B2CTRL,
+ S2MPS13_REG_B2OUT,
+ S2MPS13_REG_B3CTRL,
+ S2MPS13_REG_B3OUT,
+ S2MPS13_REG_B4CTRL,
+ S2MPS13_REG_B4OUT,
+ S2MPS13_REG_B5CTRL,
+ S2MPS13_REG_B5OUT,
+ S2MPS13_REG_B6CTRL,
+ S2MPS13_REG_B6OUT,
+ S2MPS13_REG_B7CTRL,
+ S2MPS13_REG_B7SW,
+ S2MPS13_REG_B7OUT,
+ S2MPS13_REG_B8CTRL,
+ S2MPS13_REG_B8OUT,
+ S2MPS13_REG_B9CTRL,
+ S2MPS13_REG_B9OUT,
+ S2MPS13_REG_B10CTRL,
+ S2MPS13_REG_B10OUT,
+ S2MPS13_REG_BB1CTRL,
+ S2MPS13_REG_BB1OUT,
+ S2MPS13_REG_BUCK_RAMP1,
+ S2MPS13_REG_BUCK_RAMP2,
+ S2MPS13_REG_LDO_DVS1,
+ S2MPS13_REG_LDO_DVS2,
+ S2MPS13_REG_LDO_DVS3,
+ S2MPS13_REG_B6OUT2,
+ S2MPS13_REG_L1CTRL,
+ S2MPS13_REG_L2CTRL,
+ S2MPS13_REG_L3CTRL,
+ S2MPS13_REG_L4CTRL,
+ S2MPS13_REG_L5CTRL,
+ S2MPS13_REG_L6CTRL,
+ S2MPS13_REG_L7CTRL,
+ S2MPS13_REG_L8CTRL,
+ S2MPS13_REG_L9CTRL,
+ S2MPS13_REG_L10CTRL,
+ S2MPS13_REG_L11CTRL,
+ S2MPS13_REG_L12CTRL,
+ S2MPS13_REG_L13CTRL,
+ S2MPS13_REG_L14CTRL,
+ S2MPS13_REG_L15CTRL,
+ S2MPS13_REG_L16CTRL,
+ S2MPS13_REG_L17CTRL,
+ S2MPS13_REG_L18CTRL,
+ S2MPS13_REG_L19CTRL,
+ S2MPS13_REG_L20CTRL,
+ S2MPS13_REG_L21CTRL,
+ S2MPS13_REG_L22CTRL,
+ S2MPS13_REG_L23CTRL,
+ S2MPS13_REG_L24CTRL,
+ S2MPS13_REG_L25CTRL,
+ S2MPS13_REG_L26CTRL,
+ S2MPS13_REG_L27CTRL,
+ S2MPS13_REG_L28CTRL,
+ S2MPS13_REG_L29CTRL,
+ S2MPS13_REG_L30CTRL,
+ S2MPS13_REG_L31CTRL,
+ S2MPS13_REG_L32CTRL,
+ S2MPS13_REG_L33CTRL,
+ S2MPS13_REG_L34CTRL,
+ S2MPS13_REG_L35CTRL,
+ S2MPS13_REG_L36CTRL,
+ S2MPS13_REG_L37CTRL,
+ S2MPS13_REG_L38CTRL,
+ S2MPS13_REG_L39CTRL,
+ S2MPS13_REG_L40CTRL,
+ S2MPS13_REG_LDODSCH1,
+ S2MPS13_REG_LDODSCH2,
+ S2MPS13_REG_LDODSCH3,
+ S2MPS13_REG_LDODSCH4,
+ S2MPS13_REG_LDODSCH5,
+};
+
+/* regulator ids */
+enum s2mps13_regulators {
+ S2MPS13_LDO1,
+ S2MPS13_LDO2,
+ S2MPS13_LDO3,
+ S2MPS13_LDO4,
+ S2MPS13_LDO5,
+ S2MPS13_LDO6,
+ S2MPS13_LDO7,
+ S2MPS13_LDO8,
+ S2MPS13_LDO9,
+ S2MPS13_LDO10,
+ S2MPS13_LDO11,
+ S2MPS13_LDO12,
+ S2MPS13_LDO13,
+ S2MPS13_LDO14,
+ S2MPS13_LDO15,
+ S2MPS13_LDO16,
+ S2MPS13_LDO17,
+ S2MPS13_LDO18,
+ S2MPS13_LDO19,
+ S2MPS13_LDO20,
+ S2MPS13_LDO21,
+ S2MPS13_LDO22,
+ S2MPS13_LDO23,
+ S2MPS13_LDO24,
+ S2MPS13_LDO25,
+ S2MPS13_LDO26,
+ S2MPS13_LDO27,
+ S2MPS13_LDO28,
+ S2MPS13_LDO29,
+ S2MPS13_LDO30,
+ S2MPS13_LDO31,
+ S2MPS13_LDO32,
+ S2MPS13_LDO33,
+ S2MPS13_LDO34,
+ S2MPS13_LDO35,
+ S2MPS13_LDO36,
+ S2MPS13_LDO37,
+ S2MPS13_LDO38,
+ S2MPS13_LDO39,
+ S2MPS13_LDO40,
+ S2MPS13_BUCK1,
+ S2MPS13_BUCK2,
+ S2MPS13_BUCK3,
+ S2MPS13_BUCK4,
+ S2MPS13_BUCK5,
+ S2MPS13_BUCK6,
+ S2MPS13_BUCK7,
+ S2MPS13_BUCK8,
+ S2MPS13_BUCK9,
+ S2MPS13_BUCK10,
+
+ S2MPS13_REGULATOR_MAX,
+};
+
+/*
+ * Default ramp delay in uv/us. Datasheet says that ramp delay can be
+ * controlled however it does not specify which register is used for that.
+ * Let's assume that default value will be set.
+ */
+#define S2MPS13_BUCK_RAMP_DELAY 12500
+
+#endif /* __LINUX_MFD_S2MPS13_H */
diff --git a/include/linux/mfd/samsung/s2mps14.h b/include/linux/mfd/samsung/s2mps14.h
new file mode 100644
index 000000000..c92f4782a
--- /dev/null
+++ b/include/linux/mfd/samsung/s2mps14.h
@@ -0,0 +1,146 @@
+/*
+ * s2mps14.h
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_MFD_S2MPS14_H
+#define __LINUX_MFD_S2MPS14_H
+
+/* S2MPS14 registers */
+enum s2mps14_reg {
+ S2MPS14_REG_ID,
+ S2MPS14_REG_INT1,
+ S2MPS14_REG_INT2,
+ S2MPS14_REG_INT3,
+ S2MPS14_REG_INT1M,
+ S2MPS14_REG_INT2M,
+ S2MPS14_REG_INT3M,
+ S2MPS14_REG_ST1,
+ S2MPS14_REG_ST2,
+ S2MPS14_REG_PWRONSRC,
+ S2MPS14_REG_OFFSRC,
+ S2MPS14_REG_BU_CHG,
+ S2MPS14_REG_RTCCTRL,
+ S2MPS14_REG_CTRL1,
+ S2MPS14_REG_CTRL2,
+ S2MPS14_REG_RSVD1,
+ S2MPS14_REG_RSVD2,
+ S2MPS14_REG_RSVD3,
+ S2MPS14_REG_RSVD4,
+ S2MPS14_REG_RSVD5,
+ S2MPS14_REG_RSVD6,
+ S2MPS14_REG_CTRL3,
+ S2MPS14_REG_RSVD7,
+ S2MPS14_REG_RSVD8,
+ S2MPS14_REG_WRSTBI,
+ S2MPS14_REG_B1CTRL1,
+ S2MPS14_REG_B1CTRL2,
+ S2MPS14_REG_B2CTRL1,
+ S2MPS14_REG_B2CTRL2,
+ S2MPS14_REG_B3CTRL1,
+ S2MPS14_REG_B3CTRL2,
+ S2MPS14_REG_B4CTRL1,
+ S2MPS14_REG_B4CTRL2,
+ S2MPS14_REG_B5CTRL1,
+ S2MPS14_REG_B5CTRL2,
+ S2MPS14_REG_L1CTRL,
+ S2MPS14_REG_L2CTRL,
+ S2MPS14_REG_L3CTRL,
+ S2MPS14_REG_L4CTRL,
+ S2MPS14_REG_L5CTRL,
+ S2MPS14_REG_L6CTRL,
+ S2MPS14_REG_L7CTRL,
+ S2MPS14_REG_L8CTRL,
+ S2MPS14_REG_L9CTRL,
+ S2MPS14_REG_L10CTRL,
+ S2MPS14_REG_L11CTRL,
+ S2MPS14_REG_L12CTRL,
+ S2MPS14_REG_L13CTRL,
+ S2MPS14_REG_L14CTRL,
+ S2MPS14_REG_L15CTRL,
+ S2MPS14_REG_L16CTRL,
+ S2MPS14_REG_L17CTRL,
+ S2MPS14_REG_L18CTRL,
+ S2MPS14_REG_L19CTRL,
+ S2MPS14_REG_L20CTRL,
+ S2MPS14_REG_L21CTRL,
+ S2MPS14_REG_L22CTRL,
+ S2MPS14_REG_L23CTRL,
+ S2MPS14_REG_L24CTRL,
+ S2MPS14_REG_L25CTRL,
+ S2MPS14_REG_LDODSCH1,
+ S2MPS14_REG_LDODSCH2,
+ S2MPS14_REG_LDODSCH3,
+};
+
+/* S2MPS14 regulator ids */
+enum s2mps14_regulators {
+ S2MPS14_LDO1,
+ S2MPS14_LDO2,
+ S2MPS14_LDO3,
+ S2MPS14_LDO4,
+ S2MPS14_LDO5,
+ S2MPS14_LDO6,
+ S2MPS14_LDO7,
+ S2MPS14_LDO8,
+ S2MPS14_LDO9,
+ S2MPS14_LDO10,
+ S2MPS14_LDO11,
+ S2MPS14_LDO12,
+ S2MPS14_LDO13,
+ S2MPS14_LDO14,
+ S2MPS14_LDO15,
+ S2MPS14_LDO16,
+ S2MPS14_LDO17,
+ S2MPS14_LDO18,
+ S2MPS14_LDO19,
+ S2MPS14_LDO20,
+ S2MPS14_LDO21,
+ S2MPS14_LDO22,
+ S2MPS14_LDO23,
+ S2MPS14_LDO24,
+ S2MPS14_LDO25,
+ S2MPS14_BUCK1,
+ S2MPS14_BUCK2,
+ S2MPS14_BUCK3,
+ S2MPS14_BUCK4,
+ S2MPS14_BUCK5,
+
+ S2MPS14_REGULATOR_MAX,
+};
+
+/* Regulator constraints for BUCKx */
+#define S2MPS14_BUCK1235_START_SEL 0x20
+#define S2MPS14_BUCK4_START_SEL 0x40
+/*
+ * Default ramp delay in uv/us. Datasheet says that ramp delay can be
+ * controlled however it does not specify which register is used for that.
+ * Let's assume that default value will be set.
+ */
+#define S2MPS14_BUCK_RAMP_DELAY 12500
+
+#define S2MPS14_LDO_VSEL_MASK 0x3F
+#define S2MPS14_BUCK_VSEL_MASK 0xFF
+#define S2MPS14_ENABLE_MASK (0x03 << S2MPS14_ENABLE_SHIFT)
+#define S2MPS14_ENABLE_SHIFT 6
+/* On/Off controlled by PWREN */
+#define S2MPS14_ENABLE_SUSPEND (0x01 << S2MPS14_ENABLE_SHIFT)
+/* On/Off controlled by LDO10EN or EMMCEN */
+#define S2MPS14_ENABLE_EXT_CONTROL (0x00 << S2MPS14_ENABLE_SHIFT)
+#define S2MPS14_LDO_N_VOLTAGES (S2MPS14_LDO_VSEL_MASK + 1)
+#define S2MPS14_BUCK_N_VOLTAGES (S2MPS14_BUCK_VSEL_MASK + 1)
+
+#endif /* __LINUX_MFD_S2MPS14_H */
diff --git a/include/linux/mfd/samsung/s2mpu02.h b/include/linux/mfd/samsung/s2mpu02.h
new file mode 100644
index 000000000..47ae9bc58
--- /dev/null
+++ b/include/linux/mfd/samsung/s2mpu02.h
@@ -0,0 +1,201 @@
+/*
+ * s2mpu02.h
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_MFD_S2MPU02_H
+#define __LINUX_MFD_S2MPU02_H
+
+/* S2MPU02 registers */
+enum S2MPU02_reg {
+ S2MPU02_REG_ID,
+ S2MPU02_REG_INT1,
+ S2MPU02_REG_INT2,
+ S2MPU02_REG_INT3,
+ S2MPU02_REG_INT1M,
+ S2MPU02_REG_INT2M,
+ S2MPU02_REG_INT3M,
+ S2MPU02_REG_ST1,
+ S2MPU02_REG_ST2,
+ S2MPU02_REG_PWRONSRC,
+ S2MPU02_REG_OFFSRC,
+ S2MPU02_REG_BU_CHG,
+ S2MPU02_REG_RTCCTRL,
+ S2MPU02_REG_PMCTRL1,
+ S2MPU02_REG_RSVD1,
+ S2MPU02_REG_RSVD2,
+ S2MPU02_REG_RSVD3,
+ S2MPU02_REG_RSVD4,
+ S2MPU02_REG_RSVD5,
+ S2MPU02_REG_RSVD6,
+ S2MPU02_REG_RSVD7,
+ S2MPU02_REG_WRSTEN,
+ S2MPU02_REG_RSVD8,
+ S2MPU02_REG_RSVD9,
+ S2MPU02_REG_RSVD10,
+ S2MPU02_REG_B1CTRL1,
+ S2MPU02_REG_B1CTRL2,
+ S2MPU02_REG_B2CTRL1,
+ S2MPU02_REG_B2CTRL2,
+ S2MPU02_REG_B3CTRL1,
+ S2MPU02_REG_B3CTRL2,
+ S2MPU02_REG_B4CTRL1,
+ S2MPU02_REG_B4CTRL2,
+ S2MPU02_REG_B5CTRL1,
+ S2MPU02_REG_B5CTRL2,
+ S2MPU02_REG_B5CTRL3,
+ S2MPU02_REG_B5CTRL4,
+ S2MPU02_REG_B5CTRL5,
+ S2MPU02_REG_B6CTRL1,
+ S2MPU02_REG_B6CTRL2,
+ S2MPU02_REG_B7CTRL1,
+ S2MPU02_REG_B7CTRL2,
+ S2MPU02_REG_RAMP1,
+ S2MPU02_REG_RAMP2,
+ S2MPU02_REG_L1CTRL,
+ S2MPU02_REG_L2CTRL1,
+ S2MPU02_REG_L2CTRL2,
+ S2MPU02_REG_L2CTRL3,
+ S2MPU02_REG_L2CTRL4,
+ S2MPU02_REG_L3CTRL,
+ S2MPU02_REG_L4CTRL,
+ S2MPU02_REG_L5CTRL,
+ S2MPU02_REG_L6CTRL,
+ S2MPU02_REG_L7CTRL,
+ S2MPU02_REG_L8CTRL,
+ S2MPU02_REG_L9CTRL,
+ S2MPU02_REG_L10CTRL,
+ S2MPU02_REG_L11CTRL,
+ S2MPU02_REG_L12CTRL,
+ S2MPU02_REG_L13CTRL,
+ S2MPU02_REG_L14CTRL,
+ S2MPU02_REG_L15CTRL,
+ S2MPU02_REG_L16CTRL,
+ S2MPU02_REG_L17CTRL,
+ S2MPU02_REG_L18CTRL,
+ S2MPU02_REG_L19CTRL,
+ S2MPU02_REG_L20CTRL,
+ S2MPU02_REG_L21CTRL,
+ S2MPU02_REG_L22CTRL,
+ S2MPU02_REG_L23CTRL,
+ S2MPU02_REG_L24CTRL,
+ S2MPU02_REG_L25CTRL,
+ S2MPU02_REG_L26CTRL,
+ S2MPU02_REG_L27CTRL,
+ S2MPU02_REG_L28CTRL,
+ S2MPU02_REG_LDODSCH1,
+ S2MPU02_REG_LDODSCH2,
+ S2MPU02_REG_LDODSCH3,
+ S2MPU02_REG_LDODSCH4,
+ S2MPU02_REG_SELMIF,
+ S2MPU02_REG_RSVD11,
+ S2MPU02_REG_RSVD12,
+ S2MPU02_REG_RSVD13,
+ S2MPU02_REG_DVSSEL,
+ S2MPU02_REG_DVSPTR,
+ S2MPU02_REG_DVSDATA,
+};
+
+/* S2MPU02 regulator ids */
+enum S2MPU02_regulators {
+ S2MPU02_LDO1,
+ S2MPU02_LDO2,
+ S2MPU02_LDO3,
+ S2MPU02_LDO4,
+ S2MPU02_LDO5,
+ S2MPU02_LDO6,
+ S2MPU02_LDO7,
+ S2MPU02_LDO8,
+ S2MPU02_LDO9,
+ S2MPU02_LDO10,
+ S2MPU02_LDO11,
+ S2MPU02_LDO12,
+ S2MPU02_LDO13,
+ S2MPU02_LDO14,
+ S2MPU02_LDO15,
+ S2MPU02_LDO16,
+ S2MPU02_LDO17,
+ S2MPU02_LDO18,
+ S2MPU02_LDO19,
+ S2MPU02_LDO20,
+ S2MPU02_LDO21,
+ S2MPU02_LDO22,
+ S2MPU02_LDO23,
+ S2MPU02_LDO24,
+ S2MPU02_LDO25,
+ S2MPU02_LDO26,
+ S2MPU02_LDO27,
+ S2MPU02_LDO28,
+ S2MPU02_BUCK1,
+ S2MPU02_BUCK2,
+ S2MPU02_BUCK3,
+ S2MPU02_BUCK4,
+ S2MPU02_BUCK5,
+ S2MPU02_BUCK6,
+ S2MPU02_BUCK7,
+
+ S2MPU02_REGULATOR_MAX,
+};
+
+/* Regulator constraints for BUCKx */
+#define S2MPU02_BUCK1234_MIN_600MV 600000
+#define S2MPU02_BUCK5_MIN_1081_25MV 1081250
+#define S2MPU02_BUCK6_MIN_1700MV 1700000
+#define S2MPU02_BUCK7_MIN_900MV 900000
+
+#define S2MPU02_BUCK1234_STEP_6_25MV 6250
+#define S2MPU02_BUCK5_STEP_6_25MV 6250
+#define S2MPU02_BUCK6_STEP_2_50MV 2500
+#define S2MPU02_BUCK7_STEP_6_25MV 6250
+
+#define S2MPU02_BUCK1234_START_SEL 0x00
+#define S2MPU02_BUCK5_START_SEL 0x4D
+#define S2MPU02_BUCK6_START_SEL 0x28
+#define S2MPU02_BUCK7_START_SEL 0x30
+
+#define S2MPU02_BUCK_RAMP_DELAY 12500
+
+/* Regulator constraints for different types of LDOx */
+#define S2MPU02_LDO_MIN_900MV 900000
+#define S2MPU02_LDO_MIN_1050MV 1050000
+#define S2MPU02_LDO_MIN_1600MV 1600000
+#define S2MPU02_LDO_STEP_12_5MV 12500
+#define S2MPU02_LDO_STEP_25MV 25000
+#define S2MPU02_LDO_STEP_50MV 50000
+
+#define S2MPU02_LDO_GROUP1_START_SEL 0x8
+#define S2MPU02_LDO_GROUP2_START_SEL 0xA
+#define S2MPU02_LDO_GROUP3_START_SEL 0x10
+
+#define S2MPU02_LDO_VSEL_MASK 0x3F
+#define S2MPU02_BUCK_VSEL_MASK 0xFF
+#define S2MPU02_ENABLE_MASK (0x03 << S2MPU02_ENABLE_SHIFT)
+#define S2MPU02_ENABLE_SHIFT 6
+
+/* On/Off controlled by PWREN */
+#define S2MPU02_ENABLE_SUSPEND (0x01 << S2MPU02_ENABLE_SHIFT)
+#define S2MPU02_DISABLE_SUSPEND (0x11 << S2MPU02_ENABLE_SHIFT)
+#define S2MPU02_LDO_N_VOLTAGES (S2MPU02_LDO_VSEL_MASK + 1)
+#define S2MPU02_BUCK_N_VOLTAGES (S2MPU02_BUCK_VSEL_MASK + 1)
+
+/* RAMP delay for BUCK1234*/
+#define S2MPU02_BUCK1_RAMP_SHIFT 6
+#define S2MPU02_BUCK2_RAMP_SHIFT 4
+#define S2MPU02_BUCK3_RAMP_SHIFT 2
+#define S2MPU02_BUCK4_RAMP_SHIFT 0
+#define S2MPU02_BUCK1234_RAMP_MASK 0x3
+
+#endif /* __LINUX_MFD_S2MPU02_H */
diff --git a/include/linux/mfd/samsung/s5m8763.h b/include/linux/mfd/samsung/s5m8763.h
new file mode 100644
index 000000000..e025418e5
--- /dev/null
+++ b/include/linux/mfd/samsung/s5m8763.h
@@ -0,0 +1,96 @@
+/* s5m8763.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_S5M8763_H
+#define __LINUX_MFD_S5M8763_H
+
+/* S5M8763 registers */
+enum s5m8763_reg {
+ S5M8763_REG_IRQ1,
+ S5M8763_REG_IRQ2,
+ S5M8763_REG_IRQ3,
+ S5M8763_REG_IRQ4,
+ S5M8763_REG_IRQM1,
+ S5M8763_REG_IRQM2,
+ S5M8763_REG_IRQM3,
+ S5M8763_REG_IRQM4,
+ S5M8763_REG_STATUS1,
+ S5M8763_REG_STATUS2,
+ S5M8763_REG_STATUSM1,
+ S5M8763_REG_STATUSM2,
+ S5M8763_REG_CHGR1,
+ S5M8763_REG_CHGR2,
+ S5M8763_REG_LDO_ACTIVE_DISCHARGE1,
+ S5M8763_REG_LDO_ACTIVE_DISCHARGE2,
+ S5M8763_REG_BUCK_ACTIVE_DISCHARGE3,
+ S5M8763_REG_ONOFF1,
+ S5M8763_REG_ONOFF2,
+ S5M8763_REG_ONOFF3,
+ S5M8763_REG_ONOFF4,
+ S5M8763_REG_BUCK1_VOLTAGE1,
+ S5M8763_REG_BUCK1_VOLTAGE2,
+ S5M8763_REG_BUCK1_VOLTAGE3,
+ S5M8763_REG_BUCK1_VOLTAGE4,
+ S5M8763_REG_BUCK2_VOLTAGE1,
+ S5M8763_REG_BUCK2_VOLTAGE2,
+ S5M8763_REG_BUCK3,
+ S5M8763_REG_BUCK4,
+ S5M8763_REG_LDO1_LDO2,
+ S5M8763_REG_LDO3,
+ S5M8763_REG_LDO4,
+ S5M8763_REG_LDO5,
+ S5M8763_REG_LDO6,
+ S5M8763_REG_LDO7,
+ S5M8763_REG_LDO7_LDO8,
+ S5M8763_REG_LDO9_LDO10,
+ S5M8763_REG_LDO11,
+ S5M8763_REG_LDO12,
+ S5M8763_REG_LDO13,
+ S5M8763_REG_LDO14,
+ S5M8763_REG_LDO15,
+ S5M8763_REG_LDO16,
+ S5M8763_REG_BKCHR,
+ S5M8763_REG_LBCNFG1,
+ S5M8763_REG_LBCNFG2,
+};
+
+/* S5M8763 regulator ids */
+enum s5m8763_regulators {
+ S5M8763_LDO1,
+ S5M8763_LDO2,
+ S5M8763_LDO3,
+ S5M8763_LDO4,
+ S5M8763_LDO5,
+ S5M8763_LDO6,
+ S5M8763_LDO7,
+ S5M8763_LDO8,
+ S5M8763_LDO9,
+ S5M8763_LDO10,
+ S5M8763_LDO11,
+ S5M8763_LDO12,
+ S5M8763_LDO13,
+ S5M8763_LDO14,
+ S5M8763_LDO15,
+ S5M8763_LDO16,
+ S5M8763_BUCK1,
+ S5M8763_BUCK2,
+ S5M8763_BUCK3,
+ S5M8763_BUCK4,
+ S5M8763_AP_EN32KHZ,
+ S5M8763_CP_EN32KHZ,
+ S5M8763_ENCHGVI,
+ S5M8763_ESAFEUSB1,
+ S5M8763_ESAFEUSB2,
+};
+
+#define S5M8763_ENRAMP (1 << 4)
+#endif /* __LINUX_MFD_S5M8763_H */
diff --git a/include/linux/mfd/samsung/s5m8767.h b/include/linux/mfd/samsung/s5m8767.h
new file mode 100644
index 000000000..243b58fec
--- /dev/null
+++ b/include/linux/mfd/samsung/s5m8767.h
@@ -0,0 +1,211 @@
+/* s5m8767.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_S5M8767_H
+#define __LINUX_MFD_S5M8767_H
+
+/* S5M8767 registers */
+enum s5m8767_reg {
+ S5M8767_REG_ID,
+ S5M8767_REG_INT1,
+ S5M8767_REG_INT2,
+ S5M8767_REG_INT3,
+ S5M8767_REG_INT1M,
+ S5M8767_REG_INT2M,
+ S5M8767_REG_INT3M,
+ S5M8767_REG_STATUS1,
+ S5M8767_REG_STATUS2,
+ S5M8767_REG_STATUS3,
+ S5M8767_REG_CTRL1,
+ S5M8767_REG_CTRL2,
+ S5M8767_REG_LOWBAT1,
+ S5M8767_REG_LOWBAT2,
+ S5M8767_REG_BUCHG,
+ S5M8767_REG_DVSRAMP,
+ S5M8767_REG_DVSTIMER2 = 0x10,
+ S5M8767_REG_DVSTIMER3,
+ S5M8767_REG_DVSTIMER4,
+ S5M8767_REG_LDO1,
+ S5M8767_REG_LDO2,
+ S5M8767_REG_LDO3,
+ S5M8767_REG_LDO4,
+ S5M8767_REG_LDO5,
+ S5M8767_REG_LDO6,
+ S5M8767_REG_LDO7,
+ S5M8767_REG_LDO8,
+ S5M8767_REG_LDO9,
+ S5M8767_REG_LDO10,
+ S5M8767_REG_LDO11,
+ S5M8767_REG_LDO12,
+ S5M8767_REG_LDO13,
+ S5M8767_REG_LDO14 = 0x20,
+ S5M8767_REG_LDO15,
+ S5M8767_REG_LDO16,
+ S5M8767_REG_LDO17,
+ S5M8767_REG_LDO18,
+ S5M8767_REG_LDO19,
+ S5M8767_REG_LDO20,
+ S5M8767_REG_LDO21,
+ S5M8767_REG_LDO22,
+ S5M8767_REG_LDO23,
+ S5M8767_REG_LDO24,
+ S5M8767_REG_LDO25,
+ S5M8767_REG_LDO26,
+ S5M8767_REG_LDO27,
+ S5M8767_REG_LDO28,
+ S5M8767_REG_UVLO = 0x31,
+ S5M8767_REG_BUCK1CTRL1,
+ S5M8767_REG_BUCK1CTRL2,
+ S5M8767_REG_BUCK2CTRL,
+ S5M8767_REG_BUCK2DVS1,
+ S5M8767_REG_BUCK2DVS2,
+ S5M8767_REG_BUCK2DVS3,
+ S5M8767_REG_BUCK2DVS4,
+ S5M8767_REG_BUCK2DVS5,
+ S5M8767_REG_BUCK2DVS6,
+ S5M8767_REG_BUCK2DVS7,
+ S5M8767_REG_BUCK2DVS8,
+ S5M8767_REG_BUCK3CTRL,
+ S5M8767_REG_BUCK3DVS1,
+ S5M8767_REG_BUCK3DVS2,
+ S5M8767_REG_BUCK3DVS3,
+ S5M8767_REG_BUCK3DVS4,
+ S5M8767_REG_BUCK3DVS5,
+ S5M8767_REG_BUCK3DVS6,
+ S5M8767_REG_BUCK3DVS7,
+ S5M8767_REG_BUCK3DVS8,
+ S5M8767_REG_BUCK4CTRL,
+ S5M8767_REG_BUCK4DVS1,
+ S5M8767_REG_BUCK4DVS2,
+ S5M8767_REG_BUCK4DVS3,
+ S5M8767_REG_BUCK4DVS4,
+ S5M8767_REG_BUCK4DVS5,
+ S5M8767_REG_BUCK4DVS6,
+ S5M8767_REG_BUCK4DVS7,
+ S5M8767_REG_BUCK4DVS8,
+ S5M8767_REG_BUCK5CTRL1,
+ S5M8767_REG_BUCK5CTRL2,
+ S5M8767_REG_BUCK5CTRL3,
+ S5M8767_REG_BUCK5CTRL4,
+ S5M8767_REG_BUCK5CTRL5,
+ S5M8767_REG_BUCK6CTRL1,
+ S5M8767_REG_BUCK6CTRL2,
+ S5M8767_REG_BUCK7CTRL1,
+ S5M8767_REG_BUCK7CTRL2,
+ S5M8767_REG_BUCK8CTRL1,
+ S5M8767_REG_BUCK8CTRL2,
+ S5M8767_REG_BUCK9CTRL1,
+ S5M8767_REG_BUCK9CTRL2,
+ S5M8767_REG_LDO1CTRL,
+ S5M8767_REG_LDO2_1CTRL,
+ S5M8767_REG_LDO2_2CTRL,
+ S5M8767_REG_LDO2_3CTRL,
+ S5M8767_REG_LDO2_4CTRL,
+ S5M8767_REG_LDO3CTRL,
+ S5M8767_REG_LDO4CTRL,
+ S5M8767_REG_LDO5CTRL,
+ S5M8767_REG_LDO6CTRL,
+ S5M8767_REG_LDO7CTRL,
+ S5M8767_REG_LDO8CTRL,
+ S5M8767_REG_LDO9CTRL,
+ S5M8767_REG_LDO10CTRL,
+ S5M8767_REG_LDO11CTRL,
+ S5M8767_REG_LDO12CTRL,
+ S5M8767_REG_LDO13CTRL,
+ S5M8767_REG_LDO14CTRL,
+ S5M8767_REG_LDO15CTRL,
+ S5M8767_REG_LDO16CTRL,
+ S5M8767_REG_LDO17CTRL,
+ S5M8767_REG_LDO18CTRL,
+ S5M8767_REG_LDO19CTRL,
+ S5M8767_REG_LDO20CTRL,
+ S5M8767_REG_LDO21CTRL,
+ S5M8767_REG_LDO22CTRL,
+ S5M8767_REG_LDO23CTRL,
+ S5M8767_REG_LDO24CTRL,
+ S5M8767_REG_LDO25CTRL,
+ S5M8767_REG_LDO26CTRL,
+ S5M8767_REG_LDO27CTRL,
+ S5M8767_REG_LDO28CTRL,
+};
+
+/* S5M8767 regulator ids */
+enum s5m8767_regulators {
+ S5M8767_LDO1,
+ S5M8767_LDO2,
+ S5M8767_LDO3,
+ S5M8767_LDO4,
+ S5M8767_LDO5,
+ S5M8767_LDO6,
+ S5M8767_LDO7,
+ S5M8767_LDO8,
+ S5M8767_LDO9,
+ S5M8767_LDO10,
+ S5M8767_LDO11,
+ S5M8767_LDO12,
+ S5M8767_LDO13,
+ S5M8767_LDO14,
+ S5M8767_LDO15,
+ S5M8767_LDO16,
+ S5M8767_LDO17,
+ S5M8767_LDO18,
+ S5M8767_LDO19,
+ S5M8767_LDO20,
+ S5M8767_LDO21,
+ S5M8767_LDO22,
+ S5M8767_LDO23,
+ S5M8767_LDO24,
+ S5M8767_LDO25,
+ S5M8767_LDO26,
+ S5M8767_LDO27,
+ S5M8767_LDO28,
+ S5M8767_BUCK1,
+ S5M8767_BUCK2,
+ S5M8767_BUCK3,
+ S5M8767_BUCK4,
+ S5M8767_BUCK5,
+ S5M8767_BUCK6,
+ S5M8767_BUCK7,
+ S5M8767_BUCK8,
+ S5M8767_BUCK9,
+ S5M8767_AP_EN32KHZ,
+ S5M8767_CP_EN32KHZ,
+
+ S5M8767_REG_MAX,
+};
+
+/* LDO_EN/BUCK_EN field in registers */
+#define S5M8767_ENCTRL_SHIFT 6
+#define S5M8767_ENCTRL_MASK (0x3 << S5M8767_ENCTRL_SHIFT)
+
+/*
+ * LDO_EN/BUCK_EN register value for controlling this Buck or LDO
+ * by GPIO (PWREN, BUCKEN).
+ */
+#define S5M8767_ENCTRL_USE_GPIO 0x1
+
+/*
+ * Values for BUCK_RAMP field in DVS_RAMP register, matching raw values
+ * in mV/us.
+ */
+enum s5m8767_dvs_buck_ramp_values {
+ S5M8767_DVS_BUCK_RAMP_5 = 0x4,
+ S5M8767_DVS_BUCK_RAMP_10 = 0x9,
+ S5M8767_DVS_BUCK_RAMP_12_5 = 0xb,
+ S5M8767_DVS_BUCK_RAMP_25 = 0xd,
+ S5M8767_DVS_BUCK_RAMP_50 = 0xe,
+ S5M8767_DVS_BUCK_RAMP_100 = 0xf,
+};
+#define S5M8767_DVS_BUCK_RAMP_SHIFT 4
+#define S5M8767_DVS_BUCK_RAMP_MASK (0xf << S5M8767_DVS_BUCK_RAMP_SHIFT)
+
+#endif /* __LINUX_MFD_S5M8767_H */
diff --git a/include/linux/mfd/si476x-core.h b/include/linux/mfd/si476x-core.h
new file mode 100644
index 000000000..674b45d5a
--- /dev/null
+++ b/include/linux/mfd/si476x-core.h
@@ -0,0 +1,533 @@
+/*
+ * include/media/si476x-core.h -- Common definitions for si476x core
+ * device
+ *
+ * Copyright (C) 2012 Innovative Converged Devices(ICD)
+ * Copyright (C) 2013 Andrey Smirnov
+ *
+ * Author: Andrey Smirnov <andrew.smirnov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#ifndef SI476X_CORE_H
+#define SI476X_CORE_H
+
+#include <linux/kfifo.h>
+#include <linux/atomic.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/videodev2.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/mfd/si476x-platform.h>
+#include <linux/mfd/si476x-reports.h>
+
+/* Command Timeouts */
+#define SI476X_DEFAULT_TIMEOUT 100000
+#define SI476X_TIMEOUT_TUNE 700000
+#define SI476X_TIMEOUT_POWER_UP 330000
+#define SI476X_STATUS_POLL_US 0
+
+/* -------------------- si476x-i2c.c ----------------------- */
+
+enum si476x_freq_supported_chips {
+ SI476X_CHIP_SI4761 = 1,
+ SI476X_CHIP_SI4764,
+ SI476X_CHIP_SI4768,
+};
+
+enum si476x_part_revisions {
+ SI476X_REVISION_A10 = 0,
+ SI476X_REVISION_A20 = 1,
+ SI476X_REVISION_A30 = 2,
+};
+
+enum si476x_mfd_cells {
+ SI476X_RADIO_CELL = 0,
+ SI476X_CODEC_CELL,
+ SI476X_MFD_CELLS,
+};
+
+/**
+ * enum si476x_power_state - possible power state of the si476x
+ * device.
+ *
+ * @SI476X_POWER_DOWN: In this state all regulators are turned off
+ * and the reset line is pulled low. The device is completely
+ * inactive.
+ * @SI476X_POWER_UP_FULL: In this state all the power regualtors are
+ * turned on, reset line pulled high, IRQ line is enabled(polling is
+ * active for polling use scenario) and device is turned on with
+ * POWER_UP command. The device is ready to be used.
+ * @SI476X_POWER_INCONSISTENT: This state indicates that previous
+ * power down was inconsistent, meaning some of the regulators were
+ * not turned down and thus use of the device, without power-cycling
+ * is impossible.
+ */
+enum si476x_power_state {
+ SI476X_POWER_DOWN = 0,
+ SI476X_POWER_UP_FULL = 1,
+ SI476X_POWER_INCONSISTENT = 2,
+};
+
+/**
+ * struct si476x_core - internal data structure representing the
+ * underlying "core" device which all the MFD cell-devices use.
+ *
+ * @client: Actual I2C client used to transfer commands to the chip.
+ * @chip_id: Last digit of the chip model(E.g. "1" for SI4761)
+ * @cells: MFD cell devices created by this driver.
+ * @cmd_lock: Mutex used to serialize all the requests to the core
+ * device. This filed should not be used directly. Instead
+ * si476x_core_lock()/si476x_core_unlock() should be used to get
+ * exclusive access to the "core" device.
+ * @users: Active users counter(Used by the radio cell)
+ * @rds_read_queue: Wait queue used to wait for RDS data.
+ * @rds_fifo: FIFO in which all the RDS data received from the chip is
+ * placed.
+ * @rds_fifo_drainer: Worker that drains on-chip RDS FIFO.
+ * @rds_drainer_is_working: Flag used for launching only one instance
+ * of the @rds_fifo_drainer.
+ * @rds_drainer_status_lock: Lock used to guard access to the
+ * @rds_drainer_is_working variable.
+ * @command: Wait queue for wainting on the command comapletion.
+ * @cts: Clear To Send flag set upon receiving first status with CTS
+ * set.
+ * @tuning: Wait queue used for wainting for tune/seek comand
+ * completion.
+ * @stc: Similar to @cts, but for the STC bit of the status value.
+ * @power_up_parameters: Parameters used as argument for POWER_UP
+ * command when the device is started.
+ * @state: Current power state of the device.
+ * @supplues: Structure containing handles to all power supplies used
+ * by the device (NULL ones are ignored).
+ * @gpio_reset: GPIO pin connectet to the RSTB pin of the chip.
+ * @pinmux: Chip's configurable pins configuration.
+ * @diversity_mode: Chips role when functioning in diversity mode.
+ * @status_monitor: Polling worker used in polling use case scenarion
+ * (when IRQ is not avalible).
+ * @revision: Chip's running firmware revision number(Used for correct
+ * command set support).
+ */
+
+struct si476x_core {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ int chip_id;
+ struct mfd_cell cells[SI476X_MFD_CELLS];
+
+ struct mutex cmd_lock; /* for serializing fm radio operations */
+ atomic_t users;
+
+ wait_queue_head_t rds_read_queue;
+ struct kfifo rds_fifo;
+ struct work_struct rds_fifo_drainer;
+ bool rds_drainer_is_working;
+ struct mutex rds_drainer_status_lock;
+
+ wait_queue_head_t command;
+ atomic_t cts;
+
+ wait_queue_head_t tuning;
+ atomic_t stc;
+
+ struct si476x_power_up_args power_up_parameters;
+
+ enum si476x_power_state power_state;
+
+ struct regulator_bulk_data supplies[4];
+
+ int gpio_reset;
+
+ struct si476x_pinmux pinmux;
+ enum si476x_phase_diversity_mode diversity_mode;
+
+ atomic_t is_alive;
+
+ struct delayed_work status_monitor;
+#define SI476X_WORK_TO_CORE(w) container_of(to_delayed_work(w), \
+ struct si476x_core, \
+ status_monitor)
+
+ int revision;
+
+ int rds_fifo_depth;
+};
+
+static inline struct si476x_core *i2c_mfd_cell_to_core(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev->parent);
+ return i2c_get_clientdata(client);
+}
+
+
+/**
+ * si476x_core_lock() - lock the core device to get an exclusive access
+ * to it.
+ */
+static inline void si476x_core_lock(struct si476x_core *core)
+{
+ mutex_lock(&core->cmd_lock);
+}
+
+/**
+ * si476x_core_unlock() - unlock the core device to relinquish an
+ * exclusive access to it.
+ */
+static inline void si476x_core_unlock(struct si476x_core *core)
+{
+ mutex_unlock(&core->cmd_lock);
+}
+
+/* *_TUNE_FREQ family of commands accept frequency in multiples of
+ 10kHz */
+static inline u16 hz_to_si476x(struct si476x_core *core, int freq)
+{
+ u16 result;
+
+ switch (core->power_up_parameters.func) {
+ default:
+ case SI476X_FUNC_FM_RECEIVER:
+ result = freq / 10000;
+ break;
+ case SI476X_FUNC_AM_RECEIVER:
+ result = freq / 1000;
+ break;
+ }
+
+ return result;
+}
+
+static inline int si476x_to_hz(struct si476x_core *core, u16 freq)
+{
+ int result;
+
+ switch (core->power_up_parameters.func) {
+ default:
+ case SI476X_FUNC_FM_RECEIVER:
+ result = freq * 10000;
+ break;
+ case SI476X_FUNC_AM_RECEIVER:
+ result = freq * 1000;
+ break;
+ }
+
+ return result;
+}
+
+/* Since the V4L2_TUNER_CAP_LOW flag is supplied, V4L2 subsystem
+ * mesures frequency in 62.5 Hz units */
+
+static inline int hz_to_v4l2(int freq)
+{
+ return (freq * 10) / 625;
+}
+
+static inline int v4l2_to_hz(int freq)
+{
+ return (freq * 625) / 10;
+}
+
+static inline u16 v4l2_to_si476x(struct si476x_core *core, int freq)
+{
+ return hz_to_si476x(core, v4l2_to_hz(freq));
+}
+
+static inline int si476x_to_v4l2(struct si476x_core *core, u16 freq)
+{
+ return hz_to_v4l2(si476x_to_hz(core, freq));
+}
+
+
+
+/**
+ * struct si476x_func_info - structure containing result of the
+ * FUNC_INFO command.
+ *
+ * @firmware.major: Firmware major number.
+ * @firmware.minor[...]: Firmware minor numbers.
+ * @patch_id:
+ * @func: Mode tuner is working in.
+ */
+struct si476x_func_info {
+ struct {
+ u8 major, minor[2];
+ } firmware;
+ u16 patch_id;
+ enum si476x_func func;
+};
+
+/**
+ * struct si476x_power_down_args - structure used to pass parameters
+ * to POWER_DOWN command
+ *
+ * @xosc: true - Power down, but leav oscillator running.
+ * false - Full power down.
+ */
+struct si476x_power_down_args {
+ bool xosc;
+};
+
+/**
+ * enum si476x_tunemode - enum representing possible tune modes for
+ * the chip.
+ * @SI476X_TM_VALIDATED_NORMAL_TUNE: Unconditionally stay on the new
+ * channel after tune, tune status is valid.
+ * @SI476X_TM_INVALIDATED_FAST_TUNE: Unconditionally stay in the new
+ * channel after tune, tune status invalid.
+ * @SI476X_TM_VALIDATED_AF_TUNE: Jump back to previous channel if
+ * metric thresholds are not met.
+ * @SI476X_TM_VALIDATED_AF_CHECK: Unconditionally jump back to the
+ * previous channel.
+ */
+enum si476x_tunemode {
+ SI476X_TM_VALIDATED_NORMAL_TUNE = 0,
+ SI476X_TM_INVALIDATED_FAST_TUNE = 1,
+ SI476X_TM_VALIDATED_AF_TUNE = 2,
+ SI476X_TM_VALIDATED_AF_CHECK = 3,
+};
+
+/**
+ * enum si476x_smoothmetrics - enum containing the possible setting fo
+ * audio transitioning of the chip
+ * @SI476X_SM_INITIALIZE_AUDIO: Initialize audio state to match this
+ * new channel
+ * @SI476X_SM_TRANSITION_AUDIO: Transition audio state from previous
+ * channel values to the new values
+ */
+enum si476x_smoothmetrics {
+ SI476X_SM_INITIALIZE_AUDIO = 0,
+ SI476X_SM_TRANSITION_AUDIO = 1,
+};
+
+/**
+ * struct si476x_rds_status_report - the structure representing the
+ * response to 'FM_RD_STATUS' command
+ * @rdstpptyint: Traffic program flag(TP) and/or program type(PTY)
+ * code has changed.
+ * @rdspiint: Program identification(PI) code has changed.
+ * @rdssyncint: RDS synchronization has changed.
+ * @rdsfifoint: RDS was received and the RDS FIFO has at least
+ * 'FM_RDS_INTERRUPT_FIFO_COUNT' elements in it.
+ * @tpptyvalid: TP flag and PTY code are valid falg.
+ * @pivalid: PI code is valid flag.
+ * @rdssync: RDS is currently synchronized.
+ * @rdsfifolost: On or more RDS groups have been lost/discarded flag.
+ * @tp: Current channel's TP flag.
+ * @pty: Current channel's PTY code.
+ * @pi: Current channel's PI code.
+ * @rdsfifoused: Number of blocks remaining in the RDS FIFO (0 if
+ * empty).
+ */
+struct si476x_rds_status_report {
+ bool rdstpptyint, rdspiint, rdssyncint, rdsfifoint;
+ bool tpptyvalid, pivalid, rdssync, rdsfifolost;
+ bool tp;
+
+ u8 pty;
+ u16 pi;
+
+ u8 rdsfifoused;
+ u8 ble[4];
+
+ struct v4l2_rds_data rds[4];
+};
+
+struct si476x_rsq_status_args {
+ bool primary;
+ bool rsqack;
+ bool attune;
+ bool cancel;
+ bool stcack;
+};
+
+enum si476x_injside {
+ SI476X_INJSIDE_AUTO = 0,
+ SI476X_INJSIDE_LOW = 1,
+ SI476X_INJSIDE_HIGH = 2,
+};
+
+struct si476x_tune_freq_args {
+ bool zifsr;
+ bool hd;
+ enum si476x_injside injside;
+ int freq;
+ enum si476x_tunemode tunemode;
+ enum si476x_smoothmetrics smoothmetrics;
+ int antcap;
+};
+
+int si476x_core_stop(struct si476x_core *, bool);
+int si476x_core_start(struct si476x_core *, bool);
+int si476x_core_set_power_state(struct si476x_core *, enum si476x_power_state);
+bool si476x_core_has_am(struct si476x_core *);
+bool si476x_core_has_diversity(struct si476x_core *);
+bool si476x_core_is_a_secondary_tuner(struct si476x_core *);
+bool si476x_core_is_a_primary_tuner(struct si476x_core *);
+bool si476x_core_is_in_am_receiver_mode(struct si476x_core *core);
+bool si476x_core_is_powered_up(struct si476x_core *core);
+
+enum si476x_i2c_type {
+ SI476X_I2C_SEND,
+ SI476X_I2C_RECV
+};
+
+int si476x_core_i2c_xfer(struct si476x_core *,
+ enum si476x_i2c_type,
+ char *, int);
+
+
+/* -------------------- si476x-cmd.c ----------------------- */
+
+int si476x_core_cmd_func_info(struct si476x_core *, struct si476x_func_info *);
+int si476x_core_cmd_set_property(struct si476x_core *, u16, u16);
+int si476x_core_cmd_get_property(struct si476x_core *, u16);
+int si476x_core_cmd_dig_audio_pin_cfg(struct si476x_core *,
+ enum si476x_dclk_config,
+ enum si476x_dfs_config,
+ enum si476x_dout_config,
+ enum si476x_xout_config);
+int si476x_core_cmd_zif_pin_cfg(struct si476x_core *,
+ enum si476x_iqclk_config,
+ enum si476x_iqfs_config,
+ enum si476x_iout_config,
+ enum si476x_qout_config);
+int si476x_core_cmd_ic_link_gpo_ctl_pin_cfg(struct si476x_core *,
+ enum si476x_icin_config,
+ enum si476x_icip_config,
+ enum si476x_icon_config,
+ enum si476x_icop_config);
+int si476x_core_cmd_ana_audio_pin_cfg(struct si476x_core *,
+ enum si476x_lrout_config);
+int si476x_core_cmd_intb_pin_cfg(struct si476x_core *, enum si476x_intb_config,
+ enum si476x_a1_config);
+int si476x_core_cmd_fm_seek_start(struct si476x_core *, bool, bool);
+int si476x_core_cmd_am_seek_start(struct si476x_core *, bool, bool);
+int si476x_core_cmd_fm_rds_status(struct si476x_core *, bool, bool, bool,
+ struct si476x_rds_status_report *);
+int si476x_core_cmd_fm_rds_blockcount(struct si476x_core *, bool,
+ struct si476x_rds_blockcount_report *);
+int si476x_core_cmd_fm_tune_freq(struct si476x_core *,
+ struct si476x_tune_freq_args *);
+int si476x_core_cmd_am_tune_freq(struct si476x_core *,
+ struct si476x_tune_freq_args *);
+int si476x_core_cmd_am_rsq_status(struct si476x_core *,
+ struct si476x_rsq_status_args *,
+ struct si476x_rsq_status_report *);
+int si476x_core_cmd_fm_rsq_status(struct si476x_core *,
+ struct si476x_rsq_status_args *,
+ struct si476x_rsq_status_report *);
+int si476x_core_cmd_power_up(struct si476x_core *,
+ struct si476x_power_up_args *);
+int si476x_core_cmd_power_down(struct si476x_core *,
+ struct si476x_power_down_args *);
+int si476x_core_cmd_fm_phase_div_status(struct si476x_core *);
+int si476x_core_cmd_fm_phase_diversity(struct si476x_core *,
+ enum si476x_phase_diversity_mode);
+
+int si476x_core_cmd_fm_acf_status(struct si476x_core *,
+ struct si476x_acf_status_report *);
+int si476x_core_cmd_am_acf_status(struct si476x_core *,
+ struct si476x_acf_status_report *);
+int si476x_core_cmd_agc_status(struct si476x_core *,
+ struct si476x_agc_status_report *);
+
+enum si476x_power_grid_type {
+ SI476X_POWER_GRID_50HZ = 0,
+ SI476X_POWER_GRID_60HZ,
+};
+
+/* Properties */
+
+enum si476x_interrupt_flags {
+ SI476X_STCIEN = (1 << 0),
+ SI476X_ACFIEN = (1 << 1),
+ SI476X_RDSIEN = (1 << 2),
+ SI476X_RSQIEN = (1 << 3),
+
+ SI476X_ERRIEN = (1 << 6),
+ SI476X_CTSIEN = (1 << 7),
+
+ SI476X_STCREP = (1 << 8),
+ SI476X_ACFREP = (1 << 9),
+ SI476X_RDSREP = (1 << 10),
+ SI476X_RSQREP = (1 << 11),
+};
+
+enum si476x_rdsint_sources {
+ SI476X_RDSTPPTY = (1 << 4),
+ SI476X_RDSPI = (1 << 3),
+ SI476X_RDSSYNC = (1 << 1),
+ SI476X_RDSRECV = (1 << 0),
+};
+
+enum si476x_status_response_bits {
+ SI476X_CTS = (1 << 7),
+ SI476X_ERR = (1 << 6),
+ /* Status response for WB receiver */
+ SI476X_WB_ASQ_INT = (1 << 4),
+ SI476X_RSQ_INT = (1 << 3),
+ /* Status response for FM receiver */
+ SI476X_FM_RDS_INT = (1 << 2),
+ SI476X_ACF_INT = (1 << 1),
+ SI476X_STC_INT = (1 << 0),
+};
+
+/* -------------------- si476x-prop.c ----------------------- */
+
+enum si476x_common_receiver_properties {
+ SI476X_PROP_INT_CTL_ENABLE = 0x0000,
+ SI476X_PROP_DIGITAL_IO_INPUT_SAMPLE_RATE = 0x0200,
+ SI476X_PROP_DIGITAL_IO_INPUT_FORMAT = 0x0201,
+ SI476X_PROP_DIGITAL_IO_OUTPUT_SAMPLE_RATE = 0x0202,
+ SI476X_PROP_DIGITAL_IO_OUTPUT_FORMAT = 0x0203,
+
+ SI476X_PROP_SEEK_BAND_BOTTOM = 0x1100,
+ SI476X_PROP_SEEK_BAND_TOP = 0x1101,
+ SI476X_PROP_SEEK_FREQUENCY_SPACING = 0x1102,
+
+ SI476X_PROP_VALID_MAX_TUNE_ERROR = 0x2000,
+ SI476X_PROP_VALID_SNR_THRESHOLD = 0x2003,
+ SI476X_PROP_VALID_RSSI_THRESHOLD = 0x2004,
+};
+
+enum si476x_am_receiver_properties {
+ SI476X_PROP_AUDIO_PWR_LINE_FILTER = 0x0303,
+};
+
+enum si476x_fm_receiver_properties {
+ SI476X_PROP_AUDIO_DEEMPHASIS = 0x0302,
+
+ SI476X_PROP_FM_RDS_INTERRUPT_SOURCE = 0x4000,
+ SI476X_PROP_FM_RDS_INTERRUPT_FIFO_COUNT = 0x4001,
+ SI476X_PROP_FM_RDS_CONFIG = 0x4002,
+};
+
+enum si476x_prop_audio_pwr_line_filter_bits {
+ SI476X_PROP_PWR_HARMONICS_MASK = 0x001f,
+ SI476X_PROP_PWR_GRID_MASK = 0x0100,
+ SI476X_PROP_PWR_ENABLE_MASK = 0x0200,
+ SI476X_PROP_PWR_GRID_50HZ = 0x0000,
+ SI476X_PROP_PWR_GRID_60HZ = 0x0100,
+};
+
+enum si476x_prop_fm_rds_config_bits {
+ SI476X_PROP_RDSEN_MASK = 0x1,
+ SI476X_PROP_RDSEN = 0x1,
+};
+
+
+struct regmap *devm_regmap_init_si476x(struct si476x_core *);
+
+#endif /* SI476X_CORE_H */
diff --git a/include/linux/mfd/si476x-platform.h b/include/linux/mfd/si476x-platform.h
new file mode 100644
index 000000000..88bb93b7a
--- /dev/null
+++ b/include/linux/mfd/si476x-platform.h
@@ -0,0 +1,267 @@
+/*
+ * include/media/si476x-platform.h -- Platform data specific definitions
+ *
+ * Copyright (C) 2013 Andrey Smirnov
+ *
+ * Author: Andrey Smirnov <andrew.smirnov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#ifndef __SI476X_PLATFORM_H__
+#define __SI476X_PLATFORM_H__
+
+/* It is possible to select one of the four adresses using pins A0
+ * and A1 on SI476x */
+#define SI476X_I2C_ADDR_1 0x60
+#define SI476X_I2C_ADDR_2 0x61
+#define SI476X_I2C_ADDR_3 0x62
+#define SI476X_I2C_ADDR_4 0x63
+
+enum si476x_iqclk_config {
+ SI476X_IQCLK_NOOP = 0,
+ SI476X_IQCLK_TRISTATE = 1,
+ SI476X_IQCLK_IQ = 21,
+};
+enum si476x_iqfs_config {
+ SI476X_IQFS_NOOP = 0,
+ SI476X_IQFS_TRISTATE = 1,
+ SI476X_IQFS_IQ = 21,
+};
+enum si476x_iout_config {
+ SI476X_IOUT_NOOP = 0,
+ SI476X_IOUT_TRISTATE = 1,
+ SI476X_IOUT_OUTPUT = 22,
+};
+enum si476x_qout_config {
+ SI476X_QOUT_NOOP = 0,
+ SI476X_QOUT_TRISTATE = 1,
+ SI476X_QOUT_OUTPUT = 22,
+};
+
+enum si476x_dclk_config {
+ SI476X_DCLK_NOOP = 0,
+ SI476X_DCLK_TRISTATE = 1,
+ SI476X_DCLK_DAUDIO = 10,
+};
+
+enum si476x_dfs_config {
+ SI476X_DFS_NOOP = 0,
+ SI476X_DFS_TRISTATE = 1,
+ SI476X_DFS_DAUDIO = 10,
+};
+
+enum si476x_dout_config {
+ SI476X_DOUT_NOOP = 0,
+ SI476X_DOUT_TRISTATE = 1,
+ SI476X_DOUT_I2S_OUTPUT = 12,
+ SI476X_DOUT_I2S_INPUT = 13,
+};
+
+enum si476x_xout_config {
+ SI476X_XOUT_NOOP = 0,
+ SI476X_XOUT_TRISTATE = 1,
+ SI476X_XOUT_I2S_INPUT = 13,
+ SI476X_XOUT_MODE_SELECT = 23,
+};
+
+enum si476x_icin_config {
+ SI476X_ICIN_NOOP = 0,
+ SI476X_ICIN_TRISTATE = 1,
+ SI476X_ICIN_GPO1_HIGH = 2,
+ SI476X_ICIN_GPO1_LOW = 3,
+ SI476X_ICIN_IC_LINK = 30,
+};
+
+enum si476x_icip_config {
+ SI476X_ICIP_NOOP = 0,
+ SI476X_ICIP_TRISTATE = 1,
+ SI476X_ICIP_GPO2_HIGH = 2,
+ SI476X_ICIP_GPO2_LOW = 3,
+ SI476X_ICIP_IC_LINK = 30,
+};
+
+enum si476x_icon_config {
+ SI476X_ICON_NOOP = 0,
+ SI476X_ICON_TRISTATE = 1,
+ SI476X_ICON_I2S = 10,
+ SI476X_ICON_IC_LINK = 30,
+};
+
+enum si476x_icop_config {
+ SI476X_ICOP_NOOP = 0,
+ SI476X_ICOP_TRISTATE = 1,
+ SI476X_ICOP_I2S = 10,
+ SI476X_ICOP_IC_LINK = 30,
+};
+
+
+enum si476x_lrout_config {
+ SI476X_LROUT_NOOP = 0,
+ SI476X_LROUT_TRISTATE = 1,
+ SI476X_LROUT_AUDIO = 2,
+ SI476X_LROUT_MPX = 3,
+};
+
+
+enum si476x_intb_config {
+ SI476X_INTB_NOOP = 0,
+ SI476X_INTB_TRISTATE = 1,
+ SI476X_INTB_DAUDIO = 10,
+ SI476X_INTB_IRQ = 40,
+};
+
+enum si476x_a1_config {
+ SI476X_A1_NOOP = 0,
+ SI476X_A1_TRISTATE = 1,
+ SI476X_A1_IRQ = 40,
+};
+
+
+struct si476x_pinmux {
+ enum si476x_dclk_config dclk;
+ enum si476x_dfs_config dfs;
+ enum si476x_dout_config dout;
+ enum si476x_xout_config xout;
+
+ enum si476x_iqclk_config iqclk;
+ enum si476x_iqfs_config iqfs;
+ enum si476x_iout_config iout;
+ enum si476x_qout_config qout;
+
+ enum si476x_icin_config icin;
+ enum si476x_icip_config icip;
+ enum si476x_icon_config icon;
+ enum si476x_icop_config icop;
+
+ enum si476x_lrout_config lrout;
+
+ enum si476x_intb_config intb;
+ enum si476x_a1_config a1;
+};
+
+enum si476x_ibias6x {
+ SI476X_IBIAS6X_OTHER = 0,
+ SI476X_IBIAS6X_RCVR1_NON_4MHZ_CLK = 1,
+};
+
+enum si476x_xstart {
+ SI476X_XSTART_MULTIPLE_TUNER = 0x11,
+ SI476X_XSTART_NORMAL = 0x77,
+};
+
+enum si476x_freq {
+ SI476X_FREQ_4_MHZ = 0,
+ SI476X_FREQ_37P209375_MHZ = 1,
+ SI476X_FREQ_36P4_MHZ = 2,
+ SI476X_FREQ_37P8_MHZ = 3,
+};
+
+enum si476x_xmode {
+ SI476X_XMODE_CRYSTAL_RCVR1 = 1,
+ SI476X_XMODE_EXT_CLOCK = 2,
+ SI476X_XMODE_CRYSTAL_RCVR2_3 = 3,
+};
+
+enum si476x_xbiashc {
+ SI476X_XBIASHC_SINGLE_RECEIVER = 0,
+ SI476X_XBIASHC_MULTIPLE_RECEIVER = 1,
+};
+
+enum si476x_xbias {
+ SI476X_XBIAS_RCVR2_3 = 0,
+ SI476X_XBIAS_4MHZ_RCVR1 = 3,
+ SI476X_XBIAS_RCVR1 = 7,
+};
+
+enum si476x_func {
+ SI476X_FUNC_BOOTLOADER = 0,
+ SI476X_FUNC_FM_RECEIVER = 1,
+ SI476X_FUNC_AM_RECEIVER = 2,
+ SI476X_FUNC_WB_RECEIVER = 3,
+};
+
+
+/**
+ * @xcload: Selects the amount of additional on-chip capacitance to
+ * be connected between XTAL1 and gnd and between XTAL2 and
+ * GND. One half of the capacitance value shown here is the
+ * additional load capacitance presented to the xtal. The
+ * minimum step size is 0.277 pF. Recommended value is 0x28
+ * but it will be layout dependent. Range is 0–0x3F i.e.
+ * (0–16.33 pF)
+ * @ctsien: enable CTSINT(interrupt request when CTS condition
+ * arises) when set
+ * @intsel: when set A1 pin becomes the interrupt pin; otherwise,
+ * INTB is the interrupt pin
+ * @func: selects the boot function of the device. I.e.
+ * SI476X_BOOTLOADER - Boot loader
+ * SI476X_FM_RECEIVER - FM receiver
+ * SI476X_AM_RECEIVER - AM receiver
+ * SI476X_WB_RECEIVER - Weatherband receiver
+ * @freq: oscillator's crystal frequency:
+ * SI476X_XTAL_37P209375_MHZ - 37.209375 Mhz
+ * SI476X_XTAL_36P4_MHZ - 36.4 Mhz
+ * SI476X_XTAL_37P8_MHZ - 37.8 Mhz
+ */
+struct si476x_power_up_args {
+ enum si476x_ibias6x ibias6x;
+ enum si476x_xstart xstart;
+ u8 xcload;
+ bool fastboot;
+ enum si476x_xbiashc xbiashc;
+ enum si476x_xbias xbias;
+ enum si476x_func func;
+ enum si476x_freq freq;
+ enum si476x_xmode xmode;
+};
+
+
+/**
+ * enum si476x_phase_diversity_mode - possbile phase diversity modes
+ * for SI4764/5/6/7 chips.
+ *
+ * @SI476X_PHDIV_DISABLED: Phase diversity feature is
+ * disabled.
+ * @SI476X_PHDIV_PRIMARY_COMBINING: Tuner works as a primary tuner
+ * in combination with a
+ * secondary one.
+ * @SI476X_PHDIV_PRIMARY_ANTENNA: Tuner works as a primary tuner
+ * using only its own antenna.
+ * @SI476X_PHDIV_SECONDARY_ANTENNA: Tuner works as a primary tuner
+ * usning seconary tuner's antenna.
+ * @SI476X_PHDIV_SECONDARY_COMBINING: Tuner works as a secondary
+ * tuner in combination with the
+ * primary one.
+ */
+enum si476x_phase_diversity_mode {
+ SI476X_PHDIV_DISABLED = 0,
+ SI476X_PHDIV_PRIMARY_COMBINING = 1,
+ SI476X_PHDIV_PRIMARY_ANTENNA = 2,
+ SI476X_PHDIV_SECONDARY_ANTENNA = 3,
+ SI476X_PHDIV_SECONDARY_COMBINING = 5,
+};
+
+
+/*
+ * Platform dependent definition
+ */
+struct si476x_platform_data {
+ int gpio_reset; /* < 0 if not used */
+
+ struct si476x_power_up_args power_up_parameters;
+ enum si476x_phase_diversity_mode diversity_mode;
+
+ struct si476x_pinmux pinmux;
+};
+
+
+#endif /* __SI476X_PLATFORM_H__ */
diff --git a/include/linux/mfd/si476x-reports.h b/include/linux/mfd/si476x-reports.h
new file mode 100644
index 000000000..e0b9455a7
--- /dev/null
+++ b/include/linux/mfd/si476x-reports.h
@@ -0,0 +1,163 @@
+/*
+ * include/media/si476x-platform.h -- Definitions of the data formats
+ * returned by debugfs hooks
+ *
+ * Copyright (C) 2013 Andrey Smirnov
+ *
+ * Author: Andrey Smirnov <andrew.smirnov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#ifndef __SI476X_REPORTS_H__
+#define __SI476X_REPORTS_H__
+
+/**
+ * struct si476x_rsq_status - structure containing received signal
+ * quality
+ * @multhint: Multipath Detect High.
+ * true - Indicatedes that the value is below
+ * FM_RSQ_MULTIPATH_HIGH_THRESHOLD
+ * false - Indicatedes that the value is above
+ * FM_RSQ_MULTIPATH_HIGH_THRESHOLD
+ * @multlint: Multipath Detect Low.
+ * true - Indicatedes that the value is below
+ * FM_RSQ_MULTIPATH_LOW_THRESHOLD
+ * false - Indicatedes that the value is above
+ * FM_RSQ_MULTIPATH_LOW_THRESHOLD
+ * @snrhint: SNR Detect High.
+ * true - Indicatedes that the value is below
+ * FM_RSQ_SNR_HIGH_THRESHOLD
+ * false - Indicatedes that the value is above
+ * FM_RSQ_SNR_HIGH_THRESHOLD
+ * @snrlint: SNR Detect Low.
+ * true - Indicatedes that the value is below
+ * FM_RSQ_SNR_LOW_THRESHOLD
+ * false - Indicatedes that the value is above
+ * FM_RSQ_SNR_LOW_THRESHOLD
+ * @rssihint: RSSI Detect High.
+ * true - Indicatedes that the value is below
+ * FM_RSQ_RSSI_HIGH_THRESHOLD
+ * false - Indicatedes that the value is above
+ * FM_RSQ_RSSI_HIGH_THRESHOLD
+ * @rssilint: RSSI Detect Low.
+ * true - Indicatedes that the value is below
+ * FM_RSQ_RSSI_LOW_THRESHOLD
+ * false - Indicatedes that the value is above
+ * FM_RSQ_RSSI_LOW_THRESHOLD
+ * @bltf: Band Limit.
+ * Set if seek command hits the band limit or wrapped to
+ * the original frequency.
+ * @snr_ready: SNR measurement in progress.
+ * @rssiready: RSSI measurement in progress.
+ * @afcrl: Set if FREQOFF >= MAX_TUNE_ERROR
+ * @valid: Set if the channel is valid
+ * rssi < FM_VALID_RSSI_THRESHOLD
+ * snr < FM_VALID_SNR_THRESHOLD
+ * tune_error < FM_VALID_MAX_TUNE_ERROR
+ * @readfreq: Current tuned frequency.
+ * @freqoff: Signed frequency offset.
+ * @rssi: Received Signal Strength Indicator(dBuV).
+ * @snr: RF SNR Indicator(dB).
+ * @lassi:
+ * @hassi: Low/High side Adjacent(100 kHz) Channel Strength Indicator
+ * @mult: Multipath indicator
+ * @dev: Who knows? But values may vary.
+ * @readantcap: Antenna tuning capacity value.
+ * @assi: Adjacent Channel(+/- 200kHz) Strength Indicator
+ * @usn: Ultrasonic Noise Inticator in -DBFS
+ */
+struct si476x_rsq_status_report {
+ __u8 multhint, multlint;
+ __u8 snrhint, snrlint;
+ __u8 rssihint, rssilint;
+ __u8 bltf;
+ __u8 snr_ready;
+ __u8 rssiready;
+ __u8 injside;
+ __u8 afcrl;
+ __u8 valid;
+
+ __u16 readfreq;
+ __s8 freqoff;
+ __s8 rssi;
+ __s8 snr;
+ __s8 issi;
+ __s8 lassi, hassi;
+ __s8 mult;
+ __u8 dev;
+ __u16 readantcap;
+ __s8 assi;
+ __s8 usn;
+
+ __u8 pilotdev;
+ __u8 rdsdev;
+ __u8 assidev;
+ __u8 strongdev;
+ __u16 rdspi;
+} __packed;
+
+/**
+ * si476x_acf_status_report - ACF report results
+ *
+ * @blend_int: If set, indicates that stereo separation has crossed
+ * below the blend threshold as set by FM_ACF_BLEND_THRESHOLD
+ * @hblend_int: If set, indicates that HiBlend cutoff frequency is
+ * lower than threshold as set by FM_ACF_HBLEND_THRESHOLD
+ * @hicut_int: If set, indicates that HiCut cutoff frequency is lower
+ * than the threshold set by ACF_
+
+ */
+struct si476x_acf_status_report {
+ __u8 blend_int;
+ __u8 hblend_int;
+ __u8 hicut_int;
+ __u8 chbw_int;
+ __u8 softmute_int;
+ __u8 smute;
+ __u8 smattn;
+ __u8 chbw;
+ __u8 hicut;
+ __u8 hiblend;
+ __u8 pilot;
+ __u8 stblend;
+} __packed;
+
+enum si476x_fmagc {
+ SI476X_FMAGC_10K_OHM = 0,
+ SI476X_FMAGC_800_OHM = 1,
+ SI476X_FMAGC_400_OHM = 2,
+ SI476X_FMAGC_200_OHM = 4,
+ SI476X_FMAGC_100_OHM = 8,
+ SI476X_FMAGC_50_OHM = 16,
+ SI476X_FMAGC_25_OHM = 32,
+ SI476X_FMAGC_12P5_OHM = 64,
+ SI476X_FMAGC_6P25_OHM = 128,
+};
+
+struct si476x_agc_status_report {
+ __u8 mxhi;
+ __u8 mxlo;
+ __u8 lnahi;
+ __u8 lnalo;
+ __u8 fmagc1;
+ __u8 fmagc2;
+ __u8 pgagain;
+ __u8 fmwblang;
+} __packed;
+
+struct si476x_rds_blockcount_report {
+ __u16 expected;
+ __u16 received;
+ __u16 uncorrectable;
+} __packed;
+
+#endif /* __SI476X_REPORTS_H__ */
diff --git a/include/linux/mfd/sky81452.h b/include/linux/mfd/sky81452.h
new file mode 100644
index 000000000..b0925fa3e
--- /dev/null
+++ b/include/linux/mfd/sky81452.h
@@ -0,0 +1,31 @@
+/*
+ * sky81452.h SKY81452 MFD driver
+ *
+ * Copyright 2014 Skyworks Solutions Inc.
+ * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SKY81452_H
+#define _SKY81452_H
+
+#include <linux/platform_data/sky81452-backlight.h>
+#include <linux/regulator/machine.h>
+
+struct sky81452_platform_data {
+ struct sky81452_bl_platform_data *bl_pdata;
+ struct regulator_init_data *regulator_init_data;
+};
+
+#endif
diff --git a/include/linux/mfd/smsc.h b/include/linux/mfd/smsc.h
new file mode 100644
index 000000000..9747b29f3
--- /dev/null
+++ b/include/linux/mfd/smsc.h
@@ -0,0 +1,109 @@
+/*
+ * SMSC ECE1099
+ *
+ * Copyright 2012 Texas Instruments Inc.
+ *
+ * Author: Sourav Poddar <sourav.poddar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_SMSC_H
+#define __LINUX_MFD_SMSC_H
+
+#include <linux/regmap.h>
+
+#define SMSC_ID_ECE1099 1
+#define SMSC_NUM_CLIENTS 2
+
+#define SMSC_BASE_ADDR 0x38
+#define OMAP_GPIO_SMSC_IRQ 151
+
+#define SMSC_MAXGPIO 32
+#define SMSC_BANK(offs) ((offs) >> 3)
+#define SMSC_BIT(offs) (1u << ((offs) & 0x7))
+
+struct smsc {
+ struct device *dev;
+ struct i2c_client *i2c_clients[SMSC_NUM_CLIENTS];
+ struct regmap *regmap;
+ int clk;
+ /* Stored chip id */
+ int id;
+};
+
+struct smsc_gpio;
+struct smsc_keypad;
+
+static inline int smsc_read(struct device *child, unsigned int reg,
+ unsigned int *dest)
+{
+ struct smsc *smsc = dev_get_drvdata(child->parent);
+
+ return regmap_read(smsc->regmap, reg, dest);
+}
+
+static inline int smsc_write(struct device *child, unsigned int reg,
+ unsigned int value)
+{
+ struct smsc *smsc = dev_get_drvdata(child->parent);
+
+ return regmap_write(smsc->regmap, reg, value);
+}
+
+/* Registers for SMSC */
+#define SMSC_RESET 0xF5
+#define SMSC_GRP_INT 0xF9
+#define SMSC_CLK_CTRL 0xFA
+#define SMSC_WKUP_CTRL 0xFB
+#define SMSC_DEV_ID 0xFC
+#define SMSC_DEV_REV 0xFD
+#define SMSC_VEN_ID_L 0xFE
+#define SMSC_VEN_ID_H 0xFF
+
+/* CLK VALUE */
+#define SMSC_CLK_VALUE 0x13
+
+/* Registers for function GPIO INPUT */
+#define SMSC_GPIO_DATA_IN_START 0x00
+
+/* Registers for function GPIO OUPUT */
+#define SMSC_GPIO_DATA_OUT_START 0x05
+
+/* Definitions for SMSC GPIO CONFIGURATION REGISTER*/
+#define SMSC_GPIO_INPUT_LOW 0x01
+#define SMSC_GPIO_INPUT_RISING 0x09
+#define SMSC_GPIO_INPUT_FALLING 0x11
+#define SMSC_GPIO_INPUT_BOTH_EDGE 0x19
+#define SMSC_GPIO_OUTPUT_PP 0x21
+#define SMSC_GPIO_OUTPUT_OP 0x31
+
+#define GRP_INT_STAT 0xf9
+#define SMSC_GPI_INT 0x0f
+#define SMSC_CFG_START 0x0A
+
+/* Registers for SMSC GPIO INTERRUPT STATUS REGISTER*/
+#define SMSC_GPIO_INT_STAT_START 0x32
+
+/* Registers for SMSC GPIO INTERRUPT MASK REGISTER*/
+#define SMSC_GPIO_INT_MASK_START 0x37
+
+/* Registers for SMSC function KEYPAD*/
+#define SMSC_KP_OUT 0x40
+#define SMSC_KP_IN 0x41
+#define SMSC_KP_INT_STAT 0x42
+#define SMSC_KP_INT_MASK 0x43
+
+/* Definitions for keypad */
+#define SMSC_KP_KSO 0x70
+#define SMSC_KP_KSI 0x51
+#define SMSC_KSO_ALL_LOW 0x20
+#define SMSC_KP_SET_LOW_PWR 0x0B
+#define SMSC_KP_SET_HIGH 0xFF
+#define SMSC_KSO_EVAL 0x00
+
+#endif /* __LINUX_MFD_SMSC_H */
diff --git a/include/linux/mfd/sta2x11-mfd.h b/include/linux/mfd/sta2x11-mfd.h
new file mode 100644
index 000000000..9a855ac11
--- /dev/null
+++ b/include/linux/mfd/sta2x11-mfd.h
@@ -0,0 +1,518 @@
+/*
+ * Copyright (c) 2009-2011 Wind River Systems, Inc.
+ * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * The STMicroelectronics ConneXt (STA2X11) chip has several unrelated
+ * functions in one PCI endpoint functions. This driver simply
+ * registers the platform devices in this iomemregion and exports a few
+ * functions to access common registers
+ */
+
+#ifndef __STA2X11_MFD_H
+#define __STA2X11_MFD_H
+#include <linux/types.h>
+#include <linux/pci.h>
+
+enum sta2x11_mfd_plat_dev {
+ sta2x11_sctl = 0,
+ sta2x11_gpio,
+ sta2x11_scr,
+ sta2x11_time,
+ sta2x11_apbreg,
+ sta2x11_apb_soc_regs,
+ sta2x11_vic,
+ sta2x11_n_mfd_plat_devs,
+};
+
+#define STA2X11_MFD_SCTL_NAME "sta2x11-sctl"
+#define STA2X11_MFD_GPIO_NAME "sta2x11-gpio"
+#define STA2X11_MFD_SCR_NAME "sta2x11-scr"
+#define STA2X11_MFD_TIME_NAME "sta2x11-time"
+#define STA2X11_MFD_APBREG_NAME "sta2x11-apbreg"
+#define STA2X11_MFD_APB_SOC_REGS_NAME "sta2x11-apb-soc-regs"
+#define STA2X11_MFD_VIC_NAME "sta2x11-vic"
+
+extern u32
+__sta2x11_mfd_mask(struct pci_dev *, u32, u32, u32, enum sta2x11_mfd_plat_dev);
+
+/*
+ * The MFD PCI block includes the GPIO peripherals and other register blocks.
+ * For GPIO, we have 32*4 bits (I use "gsta" for "gpio sta2x11".)
+ */
+#define GSTA_GPIO_PER_BLOCK 32
+#define GSTA_NR_BLOCKS 4
+#define GSTA_NR_GPIO (GSTA_GPIO_PER_BLOCK * GSTA_NR_BLOCKS)
+
+/* Pinconfig is set by the board definition: altfunc, pull-up, pull-down */
+struct sta2x11_gpio_pdata {
+ unsigned pinconfig[GSTA_NR_GPIO];
+};
+
+/* Macros below lifted from sh_pfc.h, with minor differences */
+#define PINMUX_TYPE_NONE 0
+#define PINMUX_TYPE_FUNCTION 1
+#define PINMUX_TYPE_OUTPUT_LOW 2
+#define PINMUX_TYPE_OUTPUT_HIGH 3
+#define PINMUX_TYPE_INPUT 4
+#define PINMUX_TYPE_INPUT_PULLUP 5
+#define PINMUX_TYPE_INPUT_PULLDOWN 6
+
+/* Give names to GPIO pins, like PXA does, taken from the manual */
+#define STA2X11_GPIO0 0
+#define STA2X11_GPIO1 1
+#define STA2X11_GPIO2 2
+#define STA2X11_GPIO3 3
+#define STA2X11_GPIO4 4
+#define STA2X11_GPIO5 5
+#define STA2X11_GPIO6 6
+#define STA2X11_GPIO7 7
+#define STA2X11_GPIO8_RGBOUT_RED7 8
+#define STA2X11_GPIO9_RGBOUT_RED6 9
+#define STA2X11_GPIO10_RGBOUT_RED5 10
+#define STA2X11_GPIO11_RGBOUT_RED4 11
+#define STA2X11_GPIO12_RGBOUT_RED3 12
+#define STA2X11_GPIO13_RGBOUT_RED2 13
+#define STA2X11_GPIO14_RGBOUT_RED1 14
+#define STA2X11_GPIO15_RGBOUT_RED0 15
+#define STA2X11_GPIO16_RGBOUT_GREEN7 16
+#define STA2X11_GPIO17_RGBOUT_GREEN6 17
+#define STA2X11_GPIO18_RGBOUT_GREEN5 18
+#define STA2X11_GPIO19_RGBOUT_GREEN4 19
+#define STA2X11_GPIO20_RGBOUT_GREEN3 20
+#define STA2X11_GPIO21_RGBOUT_GREEN2 21
+#define STA2X11_GPIO22_RGBOUT_GREEN1 22
+#define STA2X11_GPIO23_RGBOUT_GREEN0 23
+#define STA2X11_GPIO24_RGBOUT_BLUE7 24
+#define STA2X11_GPIO25_RGBOUT_BLUE6 25
+#define STA2X11_GPIO26_RGBOUT_BLUE5 26
+#define STA2X11_GPIO27_RGBOUT_BLUE4 27
+#define STA2X11_GPIO28_RGBOUT_BLUE3 28
+#define STA2X11_GPIO29_RGBOUT_BLUE2 29
+#define STA2X11_GPIO30_RGBOUT_BLUE1 30
+#define STA2X11_GPIO31_RGBOUT_BLUE0 31
+#define STA2X11_GPIO32_RGBOUT_VSYNCH 32
+#define STA2X11_GPIO33_RGBOUT_HSYNCH 33
+#define STA2X11_GPIO34_RGBOUT_DEN 34
+#define STA2X11_GPIO35_ETH_CRS_DV 35
+#define STA2X11_GPIO36_ETH_TXD1 36
+#define STA2X11_GPIO37_ETH_TXD0 37
+#define STA2X11_GPIO38_ETH_TX_EN 38
+#define STA2X11_GPIO39_MDIO 39
+#define STA2X11_GPIO40_ETH_REF_CLK 40
+#define STA2X11_GPIO41_ETH_RXD1 41
+#define STA2X11_GPIO42_ETH_RXD0 42
+#define STA2X11_GPIO43_MDC 43
+#define STA2X11_GPIO44_CAN_TX 44
+#define STA2X11_GPIO45_CAN_RX 45
+#define STA2X11_GPIO46_MLB_DAT 46
+#define STA2X11_GPIO47_MLB_SIG 47
+#define STA2X11_GPIO48_SPI0_CLK 48
+#define STA2X11_GPIO49_SPI0_TXD 49
+#define STA2X11_GPIO50_SPI0_RXD 50
+#define STA2X11_GPIO51_SPI0_FRM 51
+#define STA2X11_GPIO52_SPI1_CLK 52
+#define STA2X11_GPIO53_SPI1_TXD 53
+#define STA2X11_GPIO54_SPI1_RXD 54
+#define STA2X11_GPIO55_SPI1_FRM 55
+#define STA2X11_GPIO56_SPI2_CLK 56
+#define STA2X11_GPIO57_SPI2_TXD 57
+#define STA2X11_GPIO58_SPI2_RXD 58
+#define STA2X11_GPIO59_SPI2_FRM 59
+#define STA2X11_GPIO60_I2C0_SCL 60
+#define STA2X11_GPIO61_I2C0_SDA 61
+#define STA2X11_GPIO62_I2C1_SCL 62
+#define STA2X11_GPIO63_I2C1_SDA 63
+#define STA2X11_GPIO64_I2C2_SCL 64
+#define STA2X11_GPIO65_I2C2_SDA 65
+#define STA2X11_GPIO66_I2C3_SCL 66
+#define STA2X11_GPIO67_I2C3_SDA 67
+#define STA2X11_GPIO68_MSP0_RCK 68
+#define STA2X11_GPIO69_MSP0_RXD 69
+#define STA2X11_GPIO70_MSP0_RFS 70
+#define STA2X11_GPIO71_MSP0_TCK 71
+#define STA2X11_GPIO72_MSP0_TXD 72
+#define STA2X11_GPIO73_MSP0_TFS 73
+#define STA2X11_GPIO74_MSP0_SCK 74
+#define STA2X11_GPIO75_MSP1_CK 75
+#define STA2X11_GPIO76_MSP1_RXD 76
+#define STA2X11_GPIO77_MSP1_FS 77
+#define STA2X11_GPIO78_MSP1_TXD 78
+#define STA2X11_GPIO79_MSP2_CK 79
+#define STA2X11_GPIO80_MSP2_RXD 80
+#define STA2X11_GPIO81_MSP2_FS 81
+#define STA2X11_GPIO82_MSP2_TXD 82
+#define STA2X11_GPIO83_MSP3_CK 83
+#define STA2X11_GPIO84_MSP3_RXD 84
+#define STA2X11_GPIO85_MSP3_FS 85
+#define STA2X11_GPIO86_MSP3_TXD 86
+#define STA2X11_GPIO87_MSP4_CK 87
+#define STA2X11_GPIO88_MSP4_RXD 88
+#define STA2X11_GPIO89_MSP4_FS 89
+#define STA2X11_GPIO90_MSP4_TXD 90
+#define STA2X11_GPIO91_MSP5_CK 91
+#define STA2X11_GPIO92_MSP5_RXD 92
+#define STA2X11_GPIO93_MSP5_FS 93
+#define STA2X11_GPIO94_MSP5_TXD 94
+#define STA2X11_GPIO95_SDIO3_DAT3 95
+#define STA2X11_GPIO96_SDIO3_DAT2 96
+#define STA2X11_GPIO97_SDIO3_DAT1 97
+#define STA2X11_GPIO98_SDIO3_DAT0 98
+#define STA2X11_GPIO99_SDIO3_CLK 99
+#define STA2X11_GPIO100_SDIO3_CMD 100
+#define STA2X11_GPIO101 101
+#define STA2X11_GPIO102 102
+#define STA2X11_GPIO103 103
+#define STA2X11_GPIO104 104
+#define STA2X11_GPIO105_SDIO2_DAT3 105
+#define STA2X11_GPIO106_SDIO2_DAT2 106
+#define STA2X11_GPIO107_SDIO2_DAT1 107
+#define STA2X11_GPIO108_SDIO2_DAT0 108
+#define STA2X11_GPIO109_SDIO2_CLK 109
+#define STA2X11_GPIO110_SDIO2_CMD 110
+#define STA2X11_GPIO111 111
+#define STA2X11_GPIO112 112
+#define STA2X11_GPIO113 113
+#define STA2X11_GPIO114 114
+#define STA2X11_GPIO115_SDIO1_DAT3 115
+#define STA2X11_GPIO116_SDIO1_DAT2 116
+#define STA2X11_GPIO117_SDIO1_DAT1 117
+#define STA2X11_GPIO118_SDIO1_DAT0 118
+#define STA2X11_GPIO119_SDIO1_CLK 119
+#define STA2X11_GPIO120_SDIO1_CMD 120
+#define STA2X11_GPIO121 121
+#define STA2X11_GPIO122 122
+#define STA2X11_GPIO123 123
+#define STA2X11_GPIO124 124
+#define STA2X11_GPIO125_UART2_TXD 125
+#define STA2X11_GPIO126_UART2_RXD 126
+#define STA2X11_GPIO127_UART3_TXD 127
+
+/*
+ * The APB bridge has its own registers, needed by our users as well.
+ * They are accessed with the following read/mask/write function.
+ */
+static inline u32
+sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+{
+ return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_apbreg);
+}
+
+/* CAN and MLB */
+#define APBREG_BSR 0x00 /* Bridge Status Reg */
+#define APBREG_PAER 0x08 /* Peripherals Address Error Reg */
+#define APBREG_PWAC 0x20 /* Peripheral Write Access Control reg */
+#define APBREG_PRAC 0x40 /* Peripheral Read Access Control reg */
+#define APBREG_PCG 0x60 /* Peripheral Clock Gating Reg */
+#define APBREG_PUR 0x80 /* Peripheral Under Reset Reg */
+#define APBREG_EMU_PCG 0xA0 /* Emulator Peripheral Clock Gating Reg */
+
+#define APBREG_CAN (1 << 1)
+#define APBREG_MLB (1 << 3)
+
+/* SARAC */
+#define APBREG_BSR_SARAC 0x100 /* Bridge Status Reg */
+#define APBREG_PAER_SARAC 0x108 /* Peripherals Address Error Reg */
+#define APBREG_PWAC_SARAC 0x120 /* Peripheral Write Access Control reg */
+#define APBREG_PRAC_SARAC 0x140 /* Peripheral Read Access Control reg */
+#define APBREG_PCG_SARAC 0x160 /* Peripheral Clock Gating Reg */
+#define APBREG_PUR_SARAC 0x180 /* Peripheral Under Reset Reg */
+#define APBREG_EMU_PCG_SARAC 0x1A0 /* Emulator Peripheral Clock Gating Reg */
+
+#define APBREG_SARAC (1 << 2)
+
+/*
+ * The system controller has its own registers. Some of these are accessed
+ * by out users as well, using the following read/mask/write/function
+ */
+static inline
+u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+{
+ return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_sctl);
+}
+
+#define SCTL_SCCTL 0x00 /* System controller control register */
+#define SCTL_ARMCFG 0x04 /* ARM configuration register */
+#define SCTL_SCPLLCTL 0x08 /* PLL control status register */
+
+#define SCTL_SCPLLCTL_AUDIO_PLL_PD BIT(1)
+#define SCTL_SCPLLCTL_FRAC_CONTROL BIT(3)
+#define SCTL_SCPLLCTL_STRB_BYPASS BIT(6)
+#define SCTL_SCPLLCTL_STRB_INPUT BIT(8)
+
+#define SCTL_SCPLLFCTRL 0x0c /* PLL frequency control register */
+
+#define SCTL_SCPLLFCTRL_AUDIO_PLL_NDIV_MASK 0xff
+#define SCTL_SCPLLFCTRL_AUDIO_PLL_NDIV_SHIFT 10
+#define SCTL_SCPLLFCTRL_AUDIO_PLL_IDF_MASK 7
+#define SCTL_SCPLLFCTRL_AUDIO_PLL_IDF_SHIFT 21
+#define SCTL_SCPLLFCTRL_AUDIO_PLL_ODF_MASK 7
+#define SCTL_SCPLLFCTRL_AUDIO_PLL_ODF_SHIFT 18
+#define SCTL_SCPLLFCTRL_DITHER_DISABLE_MASK 0x03
+#define SCTL_SCPLLFCTRL_DITHER_DISABLE_SHIFT 4
+
+
+#define SCTL_SCRESFRACT 0x10 /* PLL fractional input register */
+
+#define SCTL_SCRESFRACT_MASK 0x0000ffff
+
+
+#define SCTL_SCRESCTRL1 0x14 /* Peripheral reset control 1 */
+#define SCTL_SCRESXTRL2 0x18 /* Peripheral reset control 2 */
+#define SCTL_SCPEREN0 0x1c /* Peripheral clock enable register 0 */
+#define SCTL_SCPEREN1 0x20 /* Peripheral clock enable register 1 */
+#define SCTL_SCPEREN2 0x24 /* Peripheral clock enable register 2 */
+#define SCTL_SCGRST 0x28 /* Peripheral global reset */
+#define SCTL_SCPCIECSBRST 0x2c /* PCIe PAB CSB reset status register */
+#define SCTL_SCPCIPMCR1 0x30 /* PCI power management control 1 */
+#define SCTL_SCPCIPMCR2 0x34 /* PCI power management control 2 */
+#define SCTL_SCPCIPMSR1 0x38 /* PCI power management status 1 */
+#define SCTL_SCPCIPMSR2 0x3c /* PCI power management status 2 */
+#define SCTL_SCPCIPMSR3 0x40 /* PCI power management status 3 */
+#define SCTL_SCINTREN 0x44 /* Interrupt enable */
+#define SCTL_SCRISR 0x48 /* RAW interrupt status */
+#define SCTL_SCCLKSTAT0 0x4c /* Peripheral clocks status 0 */
+#define SCTL_SCCLKSTAT1 0x50 /* Peripheral clocks status 1 */
+#define SCTL_SCCLKSTAT2 0x54 /* Peripheral clocks status 2 */
+#define SCTL_SCRSTSTA 0x58 /* Reset status register */
+
+#define SCTL_SCRESCTRL1_USB_PHY_POR (1 << 0)
+#define SCTL_SCRESCTRL1_USB_OTG (1 << 1)
+#define SCTL_SCRESCTRL1_USB_HRST (1 << 2)
+#define SCTL_SCRESCTRL1_USB_PHY_HOST (1 << 3)
+#define SCTL_SCRESCTRL1_SATAII (1 << 4)
+#define SCTL_SCRESCTRL1_VIP (1 << 5)
+#define SCTL_SCRESCTRL1_PER_MMC0 (1 << 6)
+#define SCTL_SCRESCTRL1_PER_MMC1 (1 << 7)
+#define SCTL_SCRESCTRL1_PER_GPIO0 (1 << 8)
+#define SCTL_SCRESCTRL1_PER_GPIO1 (1 << 9)
+#define SCTL_SCRESCTRL1_PER_GPIO2 (1 << 10)
+#define SCTL_SCRESCTRL1_PER_GPIO3 (1 << 11)
+#define SCTL_SCRESCTRL1_PER_MTU0 (1 << 12)
+#define SCTL_SCRESCTRL1_KER_SPI0 (1 << 13)
+#define SCTL_SCRESCTRL1_KER_SPI1 (1 << 14)
+#define SCTL_SCRESCTRL1_KER_SPI2 (1 << 15)
+#define SCTL_SCRESCTRL1_KER_MCI0 (1 << 16)
+#define SCTL_SCRESCTRL1_KER_MCI1 (1 << 17)
+#define SCTL_SCRESCTRL1_PRE_HSI2C0 (1 << 18)
+#define SCTL_SCRESCTRL1_PER_HSI2C1 (1 << 19)
+#define SCTL_SCRESCTRL1_PER_HSI2C2 (1 << 20)
+#define SCTL_SCRESCTRL1_PER_HSI2C3 (1 << 21)
+#define SCTL_SCRESCTRL1_PER_MSP0 (1 << 22)
+#define SCTL_SCRESCTRL1_PER_MSP1 (1 << 23)
+#define SCTL_SCRESCTRL1_PER_MSP2 (1 << 24)
+#define SCTL_SCRESCTRL1_PER_MSP3 (1 << 25)
+#define SCTL_SCRESCTRL1_PER_MSP4 (1 << 26)
+#define SCTL_SCRESCTRL1_PER_MSP5 (1 << 27)
+#define SCTL_SCRESCTRL1_PER_MMC (1 << 28)
+#define SCTL_SCRESCTRL1_KER_MSP0 (1 << 29)
+#define SCTL_SCRESCTRL1_KER_MSP1 (1 << 30)
+#define SCTL_SCRESCTRL1_KER_MSP2 (1 << 31)
+
+#define SCTL_SCPEREN0_UART0 (1 << 0)
+#define SCTL_SCPEREN0_UART1 (1 << 1)
+#define SCTL_SCPEREN0_UART2 (1 << 2)
+#define SCTL_SCPEREN0_UART3 (1 << 3)
+#define SCTL_SCPEREN0_MSP0 (1 << 4)
+#define SCTL_SCPEREN0_MSP1 (1 << 5)
+#define SCTL_SCPEREN0_MSP2 (1 << 6)
+#define SCTL_SCPEREN0_MSP3 (1 << 7)
+#define SCTL_SCPEREN0_MSP4 (1 << 8)
+#define SCTL_SCPEREN0_MSP5 (1 << 9)
+#define SCTL_SCPEREN0_SPI0 (1 << 10)
+#define SCTL_SCPEREN0_SPI1 (1 << 11)
+#define SCTL_SCPEREN0_SPI2 (1 << 12)
+#define SCTL_SCPEREN0_I2C0 (1 << 13)
+#define SCTL_SCPEREN0_I2C1 (1 << 14)
+#define SCTL_SCPEREN0_I2C2 (1 << 15)
+#define SCTL_SCPEREN0_I2C3 (1 << 16)
+#define SCTL_SCPEREN0_SVDO_LVDS (1 << 17)
+#define SCTL_SCPEREN0_USB_HOST (1 << 18)
+#define SCTL_SCPEREN0_USB_OTG (1 << 19)
+#define SCTL_SCPEREN0_MCI0 (1 << 20)
+#define SCTL_SCPEREN0_MCI1 (1 << 21)
+#define SCTL_SCPEREN0_MCI2 (1 << 22)
+#define SCTL_SCPEREN0_MCI3 (1 << 23)
+#define SCTL_SCPEREN0_SATA (1 << 24)
+#define SCTL_SCPEREN0_ETHERNET (1 << 25)
+#define SCTL_SCPEREN0_VIC (1 << 26)
+#define SCTL_SCPEREN0_DMA_AUDIO (1 << 27)
+#define SCTL_SCPEREN0_DMA_SOC (1 << 28)
+#define SCTL_SCPEREN0_RAM (1 << 29)
+#define SCTL_SCPEREN0_VIP (1 << 30)
+#define SCTL_SCPEREN0_ARM (1 << 31)
+
+#define SCTL_SCPEREN1_UART0 (1 << 0)
+#define SCTL_SCPEREN1_UART1 (1 << 1)
+#define SCTL_SCPEREN1_UART2 (1 << 2)
+#define SCTL_SCPEREN1_UART3 (1 << 3)
+#define SCTL_SCPEREN1_MSP0 (1 << 4)
+#define SCTL_SCPEREN1_MSP1 (1 << 5)
+#define SCTL_SCPEREN1_MSP2 (1 << 6)
+#define SCTL_SCPEREN1_MSP3 (1 << 7)
+#define SCTL_SCPEREN1_MSP4 (1 << 8)
+#define SCTL_SCPEREN1_MSP5 (1 << 9)
+#define SCTL_SCPEREN1_SPI0 (1 << 10)
+#define SCTL_SCPEREN1_SPI1 (1 << 11)
+#define SCTL_SCPEREN1_SPI2 (1 << 12)
+#define SCTL_SCPEREN1_I2C0 (1 << 13)
+#define SCTL_SCPEREN1_I2C1 (1 << 14)
+#define SCTL_SCPEREN1_I2C2 (1 << 15)
+#define SCTL_SCPEREN1_I2C3 (1 << 16)
+#define SCTL_SCPEREN1_USB_PHY (1 << 17)
+
+/*
+ * APB-SOC registers
+ */
+static inline
+u32 sta2x11_apb_soc_regs_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
+{
+ return __sta2x11_mfd_mask(pdev, reg, mask, val, sta2x11_apb_soc_regs);
+}
+
+#define PCIE_EP1_FUNC3_0_INTR_REG 0x000
+#define PCIE_EP1_FUNC7_4_INTR_REG 0x004
+#define PCIE_EP2_FUNC3_0_INTR_REG 0x008
+#define PCIE_EP2_FUNC7_4_INTR_REG 0x00c
+#define PCIE_EP3_FUNC3_0_INTR_REG 0x010
+#define PCIE_EP3_FUNC7_4_INTR_REG 0x014
+#define PCIE_EP4_FUNC3_0_INTR_REG 0x018
+#define PCIE_EP4_FUNC7_4_INTR_REG 0x01c
+#define PCIE_INTR_ENABLE0_REG 0x020
+#define PCIE_INTR_ENABLE1_REG 0x024
+#define PCIE_EP1_FUNC_TC_REG 0x028
+#define PCIE_EP2_FUNC_TC_REG 0x02c
+#define PCIE_EP3_FUNC_TC_REG 0x030
+#define PCIE_EP4_FUNC_TC_REG 0x034
+#define PCIE_EP1_FUNC_F_REG 0x038
+#define PCIE_EP2_FUNC_F_REG 0x03c
+#define PCIE_EP3_FUNC_F_REG 0x040
+#define PCIE_EP4_FUNC_F_REG 0x044
+#define PCIE_PAB_AMBA_SW_RST_REG 0x048
+#define PCIE_PM_STATUS_0_PORT_0_4 0x04c
+#define PCIE_PM_STATUS_7_0_EP1 0x050
+#define PCIE_PM_STATUS_7_0_EP2 0x054
+#define PCIE_PM_STATUS_7_0_EP3 0x058
+#define PCIE_PM_STATUS_7_0_EP4 0x05c
+#define PCIE_DEV_ID_0_EP1_REG 0x060
+#define PCIE_CC_REV_ID_0_EP1_REG 0x064
+#define PCIE_DEV_ID_1_EP1_REG 0x068
+#define PCIE_CC_REV_ID_1_EP1_REG 0x06c
+#define PCIE_DEV_ID_2_EP1_REG 0x070
+#define PCIE_CC_REV_ID_2_EP1_REG 0x074
+#define PCIE_DEV_ID_3_EP1_REG 0x078
+#define PCIE_CC_REV_ID_3_EP1_REG 0x07c
+#define PCIE_DEV_ID_4_EP1_REG 0x080
+#define PCIE_CC_REV_ID_4_EP1_REG 0x084
+#define PCIE_DEV_ID_5_EP1_REG 0x088
+#define PCIE_CC_REV_ID_5_EP1_REG 0x08c
+#define PCIE_DEV_ID_6_EP1_REG 0x090
+#define PCIE_CC_REV_ID_6_EP1_REG 0x094
+#define PCIE_DEV_ID_7_EP1_REG 0x098
+#define PCIE_CC_REV_ID_7_EP1_REG 0x09c
+#define PCIE_DEV_ID_0_EP2_REG 0x0a0
+#define PCIE_CC_REV_ID_0_EP2_REG 0x0a4
+#define PCIE_DEV_ID_1_EP2_REG 0x0a8
+#define PCIE_CC_REV_ID_1_EP2_REG 0x0ac
+#define PCIE_DEV_ID_2_EP2_REG 0x0b0
+#define PCIE_CC_REV_ID_2_EP2_REG 0x0b4
+#define PCIE_DEV_ID_3_EP2_REG 0x0b8
+#define PCIE_CC_REV_ID_3_EP2_REG 0x0bc
+#define PCIE_DEV_ID_4_EP2_REG 0x0c0
+#define PCIE_CC_REV_ID_4_EP2_REG 0x0c4
+#define PCIE_DEV_ID_5_EP2_REG 0x0c8
+#define PCIE_CC_REV_ID_5_EP2_REG 0x0cc
+#define PCIE_DEV_ID_6_EP2_REG 0x0d0
+#define PCIE_CC_REV_ID_6_EP2_REG 0x0d4
+#define PCIE_DEV_ID_7_EP2_REG 0x0d8
+#define PCIE_CC_REV_ID_7_EP2_REG 0x0dC
+#define PCIE_DEV_ID_0_EP3_REG 0x0e0
+#define PCIE_CC_REV_ID_0_EP3_REG 0x0e4
+#define PCIE_DEV_ID_1_EP3_REG 0x0e8
+#define PCIE_CC_REV_ID_1_EP3_REG 0x0ec
+#define PCIE_DEV_ID_2_EP3_REG 0x0f0
+#define PCIE_CC_REV_ID_2_EP3_REG 0x0f4
+#define PCIE_DEV_ID_3_EP3_REG 0x0f8
+#define PCIE_CC_REV_ID_3_EP3_REG 0x0fc
+#define PCIE_DEV_ID_4_EP3_REG 0x100
+#define PCIE_CC_REV_ID_4_EP3_REG 0x104
+#define PCIE_DEV_ID_5_EP3_REG 0x108
+#define PCIE_CC_REV_ID_5_EP3_REG 0x10c
+#define PCIE_DEV_ID_6_EP3_REG 0x110
+#define PCIE_CC_REV_ID_6_EP3_REG 0x114
+#define PCIE_DEV_ID_7_EP3_REG 0x118
+#define PCIE_CC_REV_ID_7_EP3_REG 0x11c
+#define PCIE_DEV_ID_0_EP4_REG 0x120
+#define PCIE_CC_REV_ID_0_EP4_REG 0x124
+#define PCIE_DEV_ID_1_EP4_REG 0x128
+#define PCIE_CC_REV_ID_1_EP4_REG 0x12c
+#define PCIE_DEV_ID_2_EP4_REG 0x130
+#define PCIE_CC_REV_ID_2_EP4_REG 0x134
+#define PCIE_DEV_ID_3_EP4_REG 0x138
+#define PCIE_CC_REV_ID_3_EP4_REG 0x13c
+#define PCIE_DEV_ID_4_EP4_REG 0x140
+#define PCIE_CC_REV_ID_4_EP4_REG 0x144
+#define PCIE_DEV_ID_5_EP4_REG 0x148
+#define PCIE_CC_REV_ID_5_EP4_REG 0x14c
+#define PCIE_DEV_ID_6_EP4_REG 0x150
+#define PCIE_CC_REV_ID_6_EP4_REG 0x154
+#define PCIE_DEV_ID_7_EP4_REG 0x158
+#define PCIE_CC_REV_ID_7_EP4_REG 0x15c
+#define PCIE_SUBSYS_VEN_ID_REG 0x160
+#define PCIE_COMMON_CLOCK_CONFIG_0_4_0 0x164
+#define PCIE_MIPHYP_SSC_EN_REG 0x168
+#define PCIE_MIPHYP_ADDR_REG 0x16c
+#define PCIE_L1_ASPM_READY_REG 0x170
+#define PCIE_EXT_CFG_RDY_REG 0x174
+#define PCIE_SoC_INT_ROUTER_STATUS0_REG 0x178
+#define PCIE_SoC_INT_ROUTER_STATUS1_REG 0x17c
+#define PCIE_SoC_INT_ROUTER_STATUS2_REG 0x180
+#define PCIE_SoC_INT_ROUTER_STATUS3_REG 0x184
+#define DMA_IP_CTRL_REG 0x324
+#define DISP_BRIDGE_PU_PD_CTRL_REG 0x328
+#define VIP_PU_PD_CTRL_REG 0x32c
+#define USB_MLB_PU_PD_CTRL_REG 0x330
+#define SDIO_PU_PD_MISCFUNC_CTRL_REG1 0x334
+#define SDIO_PU_PD_MISCFUNC_CTRL_REG2 0x338
+#define UART_PU_PD_CTRL_REG 0x33c
+#define ARM_Lock 0x340
+#define SYS_IO_CHAR_REG1 0x344
+#define SYS_IO_CHAR_REG2 0x348
+#define SATA_CORE_ID_REG 0x34c
+#define SATA_CTRL_REG 0x350
+#define I2C_HSFIX_MISC_REG 0x354
+#define SPARE2_RESERVED 0x358
+#define SPARE3_RESERVED 0x35c
+#define MASTER_LOCK_REG 0x368
+#define SYSTEM_CONFIG_STATUS_REG 0x36c
+#define MSP_CLK_CTRL_REG 0x39c
+#define COMPENSATION_REG1 0x3c4
+#define COMPENSATION_REG2 0x3c8
+#define COMPENSATION_REG3 0x3cc
+#define TEST_CTL_REG 0x3d0
+
+/*
+ * SECR (OTP) registers
+ */
+#define STA2X11_SECR_CR 0x00
+#define STA2X11_SECR_FVR0 0x10
+#define STA2X11_SECR_FVR1 0x14
+
+extern int sta2x11_mfd_get_regs_data(struct platform_device *pdev,
+ enum sta2x11_mfd_plat_dev index,
+ void __iomem **regs,
+ spinlock_t **lock);
+
+#endif /* __STA2X11_MFD_H */
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
new file mode 100644
index 000000000..c9d869027
--- /dev/null
+++ b/include/linux/mfd/stmpe.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License, version 2
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ */
+
+#ifndef __LINUX_MFD_STMPE_H
+#define __LINUX_MFD_STMPE_H
+
+#include <linux/mutex.h>
+
+struct device;
+struct regulator;
+
+enum stmpe_block {
+ STMPE_BLOCK_GPIO = 1 << 0,
+ STMPE_BLOCK_KEYPAD = 1 << 1,
+ STMPE_BLOCK_TOUCHSCREEN = 1 << 2,
+ STMPE_BLOCK_ADC = 1 << 3,
+ STMPE_BLOCK_PWM = 1 << 4,
+ STMPE_BLOCK_ROTATOR = 1 << 5,
+};
+
+enum stmpe_partnum {
+ STMPE610,
+ STMPE801,
+ STMPE811,
+ STMPE1601,
+ STMPE1801,
+ STMPE2401,
+ STMPE2403,
+ STMPE_NBR_PARTS
+};
+
+/*
+ * For registers whose locations differ on variants, the correct address is
+ * obtained by indexing stmpe->regs with one of the following.
+ */
+enum {
+ STMPE_IDX_CHIP_ID,
+ STMPE_IDX_ICR_LSB,
+ STMPE_IDX_IER_LSB,
+ STMPE_IDX_ISR_LSB,
+ STMPE_IDX_ISR_MSB,
+ STMPE_IDX_GPMR_LSB,
+ STMPE_IDX_GPSR_LSB,
+ STMPE_IDX_GPCR_LSB,
+ STMPE_IDX_GPDR_LSB,
+ STMPE_IDX_GPEDR_MSB,
+ STMPE_IDX_GPRER_LSB,
+ STMPE_IDX_GPFER_LSB,
+ STMPE_IDX_GPPUR_LSB,
+ STMPE_IDX_GPPDR_LSB,
+ STMPE_IDX_GPAFR_U_MSB,
+ STMPE_IDX_IEGPIOR_LSB,
+ STMPE_IDX_ISGPIOR_LSB,
+ STMPE_IDX_ISGPIOR_MSB,
+ STMPE_IDX_MAX,
+};
+
+
+struct stmpe_variant_info;
+struct stmpe_client_info;
+
+/**
+ * struct stmpe - STMPE MFD structure
+ * @vcc: optional VCC regulator
+ * @vio: optional VIO regulator
+ * @lock: lock protecting I/O operations
+ * @irq_lock: IRQ bus lock
+ * @dev: device, mostly for dev_dbg()
+ * @irq_domain: IRQ domain
+ * @client: client - i2c or spi
+ * @ci: client specific information
+ * @partnum: part number
+ * @variant: the detected STMPE model number
+ * @regs: list of addresses of registers which are at different addresses on
+ * different variants. Indexed by one of STMPE_IDX_*.
+ * @irq: irq number for stmpe
+ * @num_gpios: number of gpios, differs for variants
+ * @ier: cache of IER registers for bus_lock
+ * @oldier: cache of IER registers for bus_lock
+ * @pdata: platform data
+ */
+struct stmpe {
+ struct regulator *vcc;
+ struct regulator *vio;
+ struct mutex lock;
+ struct mutex irq_lock;
+ struct device *dev;
+ struct irq_domain *domain;
+ void *client;
+ struct stmpe_client_info *ci;
+ enum stmpe_partnum partnum;
+ struct stmpe_variant_info *variant;
+ const u8 *regs;
+
+ int irq;
+ int num_gpios;
+ u8 ier[2];
+ u8 oldier[2];
+ struct stmpe_platform_data *pdata;
+};
+
+extern int stmpe_reg_write(struct stmpe *stmpe, u8 reg, u8 data);
+extern int stmpe_reg_read(struct stmpe *stmpe, u8 reg);
+extern int stmpe_block_read(struct stmpe *stmpe, u8 reg, u8 length,
+ u8 *values);
+extern int stmpe_block_write(struct stmpe *stmpe, u8 reg, u8 length,
+ const u8 *values);
+extern int stmpe_set_bits(struct stmpe *stmpe, u8 reg, u8 mask, u8 val);
+extern int stmpe_set_altfunc(struct stmpe *stmpe, u32 pins,
+ enum stmpe_block block);
+extern int stmpe_enable(struct stmpe *stmpe, unsigned int blocks);
+extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks);
+
+#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0)
+
+/**
+ * struct stmpe_ts_platform_data - stmpe811 touch screen controller platform
+ * data
+ * @sample_time: ADC converstion time in number of clock.
+ * (0 -> 36 clocks, 1 -> 44 clocks, 2 -> 56 clocks, 3 -> 64 clocks,
+ * 4 -> 80 clocks, 5 -> 96 clocks, 6 -> 144 clocks),
+ * recommended is 4.
+ * @mod_12b: ADC Bit mode (0 -> 10bit ADC, 1 -> 12bit ADC)
+ * @ref_sel: ADC reference source
+ * (0 -> internal reference, 1 -> external reference)
+ * @adc_freq: ADC Clock speed
+ * (0 -> 1.625 MHz, 1 -> 3.25 MHz, 2 || 3 -> 6.5 MHz)
+ * @ave_ctrl: Sample average control
+ * (0 -> 1 sample, 1 -> 2 samples, 2 -> 4 samples, 3 -> 8 samples)
+ * @touch_det_delay: Touch detect interrupt delay
+ * (0 -> 10 us, 1 -> 50 us, 2 -> 100 us, 3 -> 500 us,
+ * 4-> 1 ms, 5 -> 5 ms, 6 -> 10 ms, 7 -> 50 ms)
+ * recommended is 3
+ * @settling: Panel driver settling time
+ * (0 -> 10 us, 1 -> 100 us, 2 -> 500 us, 3 -> 1 ms,
+ * 4 -> 5 ms, 5 -> 10 ms, 6 for 50 ms, 7 -> 100 ms)
+ * recommended is 2
+ * @fraction_z: Length of the fractional part in z
+ * (fraction_z ([0..7]) = Count of the fractional part)
+ * recommended is 7
+ * @i_drive: current limit value of the touchscreen drivers
+ * (0 -> 20 mA typical 35 mA max, 1 -> 50 mA typical 80 mA max)
+ *
+ * */
+struct stmpe_ts_platform_data {
+ u8 sample_time;
+ u8 mod_12b;
+ u8 ref_sel;
+ u8 adc_freq;
+ u8 ave_ctrl;
+ u8 touch_det_delay;
+ u8 settling;
+ u8 fraction_z;
+ u8 i_drive;
+};
+
+/**
+ * struct stmpe_platform_data - STMPE platform data
+ * @id: device id to distinguish between multiple STMPEs on the same board
+ * @blocks: bitmask of blocks to enable (use STMPE_BLOCK_*)
+ * @irq_trigger: IRQ trigger to use for the interrupt to the host
+ * @autosleep: bool to enable/disable stmpe autosleep
+ * @autosleep_timeout: inactivity timeout in milliseconds for autosleep
+ * @irq_over_gpio: true if gpio is used to get irq
+ * @irq_gpio: gpio number over which irq will be requested (significant only if
+ * irq_over_gpio is true)
+ * @ts: touchscreen-specific platform data
+ */
+struct stmpe_platform_data {
+ int id;
+ unsigned int blocks;
+ unsigned int irq_trigger;
+ bool autosleep;
+ bool irq_over_gpio;
+ int irq_gpio;
+ int autosleep_timeout;
+
+ struct stmpe_ts_platform_data *ts;
+};
+
+#endif
diff --git a/include/linux/mfd/stw481x.h b/include/linux/mfd/stw481x.h
new file mode 100644
index 000000000..833074b76
--- /dev/null
+++ b/include/linux/mfd/stw481x.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef MFD_STW481X_H
+#define MFD_STW481X_H
+
+#include <linux/i2c.h>
+#include <linux/regulator/machine.h>
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+
+/* These registers are accessed from more than one driver */
+#define STW_CONF1 0x11U
+#define STW_CONF1_PDN_VMMC 0x01U
+#define STW_CONF1_VMMC_MASK 0x0eU
+#define STW_CONF1_VMMC_1_8V 0x02U
+#define STW_CONF1_VMMC_2_85V 0x04U
+#define STW_CONF1_VMMC_3V 0x06U
+#define STW_CONF1_VMMC_1_85V 0x08U
+#define STW_CONF1_VMMC_2_6V 0x0aU
+#define STW_CONF1_VMMC_2_7V 0x0cU
+#define STW_CONF1_VMMC_3_3V 0x0eU
+#define STW_CONF1_MMC_LS_STATUS 0x10U
+#define STW_PCTL_REG_LO 0x1eU
+#define STW_PCTL_REG_HI 0x1fU
+#define STW_CONF1_V_MONITORING 0x20U
+#define STW_CONF1_IT_WARN 0x40U
+#define STW_CONF1_PDN_VAUX 0x80U
+#define STW_CONF2 0x20U
+#define STW_CONF2_MASK_TWARN 0x01U
+#define STW_CONF2_VMMC_EXT 0x02U
+#define STW_CONF2_MASK_IT_WAKE_UP 0x04U
+#define STW_CONF2_GPO1 0x08U
+#define STW_CONF2_GPO2 0x10U
+#define STW_VCORE_SLEEP 0x21U
+
+/**
+ * struct stw481x - state holder for the Stw481x drivers
+ * @i2c_client: corresponding I2C client
+ * @map: regmap handle to access device registers
+ */
+struct stw481x {
+ struct i2c_client *client;
+ struct regmap *map;
+};
+
+#endif
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
new file mode 100644
index 000000000..75e543b78
--- /dev/null
+++ b/include/linux/mfd/syscon.h
@@ -0,0 +1,53 @@
+/*
+ * System Control Driver
+ *
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro Ltd.
+ *
+ * Author: Dong Aisheng <dong.aisheng@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_MFD_SYSCON_H__
+#define __LINUX_MFD_SYSCON_H__
+
+#include <linux/err.h>
+
+struct device_node;
+
+#ifdef CONFIG_MFD_SYSCON
+extern struct regmap *syscon_node_to_regmap(struct device_node *np);
+extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s);
+extern struct regmap *syscon_regmap_lookup_by_pdevname(const char *s);
+extern struct regmap *syscon_regmap_lookup_by_phandle(
+ struct device_node *np,
+ const char *property);
+#else
+static inline struct regmap *syscon_node_to_regmap(struct device_node *np)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct regmap *syscon_regmap_lookup_by_compatible(const char *s)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct regmap *syscon_regmap_lookup_by_pdevname(const char *s)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct regmap *syscon_regmap_lookup_by_phandle(
+ struct device_node *np,
+ const char *property)
+{
+ return ERR_PTR(-ENOSYS);
+}
+#endif
+
+#endif /* __LINUX_MFD_SYSCON_H__ */
diff --git a/include/linux/mfd/syscon/atmel-matrix.h b/include/linux/mfd/syscon/atmel-matrix.h
new file mode 100644
index 000000000..8293c3e2a
--- /dev/null
+++ b/include/linux/mfd/syscon/atmel-matrix.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2014 Atmel Corporation.
+ *
+ * Memory Controllers (MATRIX, EBI) - System peripherals registers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_MFD_SYSCON_ATMEL_MATRIX_H
+#define _LINUX_MFD_SYSCON_ATMEL_MATRIX_H
+
+#define AT91SAM9260_MATRIX_MCFG 0x00
+#define AT91SAM9260_MATRIX_SCFG 0x40
+#define AT91SAM9260_MATRIX_PRS 0x80
+#define AT91SAM9260_MATRIX_MRCR 0x100
+#define AT91SAM9260_MATRIX_EBICSA 0x11c
+
+#define AT91SAM9261_MATRIX_MRCR 0x0
+#define AT91SAM9261_MATRIX_SCFG 0x4
+#define AT91SAM9261_MATRIX_TCR 0x24
+#define AT91SAM9261_MATRIX_EBICSA 0x30
+#define AT91SAM9261_MATRIX_USBPUCR 0x34
+
+#define AT91SAM9263_MATRIX_MCFG 0x00
+#define AT91SAM9263_MATRIX_SCFG 0x40
+#define AT91SAM9263_MATRIX_PRS 0x80
+#define AT91SAM9263_MATRIX_MRCR 0x100
+#define AT91SAM9263_MATRIX_TCR 0x114
+#define AT91SAM9263_MATRIX_EBI0CSA 0x120
+#define AT91SAM9263_MATRIX_EBI1CSA 0x124
+
+#define AT91SAM9RL_MATRIX_MCFG 0x00
+#define AT91SAM9RL_MATRIX_SCFG 0x40
+#define AT91SAM9RL_MATRIX_PRS 0x80
+#define AT91SAM9RL_MATRIX_MRCR 0x100
+#define AT91SAM9RL_MATRIX_TCR 0x114
+#define AT91SAM9RL_MATRIX_EBICSA 0x120
+
+#define AT91SAM9G45_MATRIX_MCFG 0x00
+#define AT91SAM9G45_MATRIX_SCFG 0x40
+#define AT91SAM9G45_MATRIX_PRS 0x80
+#define AT91SAM9G45_MATRIX_MRCR 0x100
+#define AT91SAM9G45_MATRIX_TCR 0x110
+#define AT91SAM9G45_MATRIX_DDRMPR 0x118
+#define AT91SAM9G45_MATRIX_EBICSA 0x128
+
+#define AT91SAM9N12_MATRIX_MCFG 0x00
+#define AT91SAM9N12_MATRIX_SCFG 0x40
+#define AT91SAM9N12_MATRIX_PRS 0x80
+#define AT91SAM9N12_MATRIX_MRCR 0x100
+#define AT91SAM9N12_MATRIX_EBICSA 0x118
+
+#define AT91SAM9X5_MATRIX_MCFG 0x00
+#define AT91SAM9X5_MATRIX_SCFG 0x40
+#define AT91SAM9X5_MATRIX_PRS 0x80
+#define AT91SAM9X5_MATRIX_MRCR 0x100
+#define AT91SAM9X5_MATRIX_EBICSA 0x120
+
+#define SAMA5D3_MATRIX_MCFG 0x00
+#define SAMA5D3_MATRIX_SCFG 0x40
+#define SAMA5D3_MATRIX_PRS 0x80
+#define SAMA5D3_MATRIX_MRCR 0x100
+
+#define AT91_MATRIX_MCFG(o, x) ((o) + ((x) * 0x4))
+#define AT91_MATRIX_ULBT GENMASK(2, 0)
+#define AT91_MATRIX_ULBT_INFINITE (0 << 0)
+#define AT91_MATRIX_ULBT_SINGLE (1 << 0)
+#define AT91_MATRIX_ULBT_FOUR (2 << 0)
+#define AT91_MATRIX_ULBT_EIGHT (3 << 0)
+#define AT91_MATRIX_ULBT_SIXTEEN (4 << 0)
+
+#define AT91_MATRIX_SCFG(o, x) ((o) + ((x) * 0x4))
+#define AT91_MATRIX_SLOT_CYCLE GENMASK(7, 0)
+#define AT91_MATRIX_DEFMSTR_TYPE GENMASK(17, 16)
+#define AT91_MATRIX_DEFMSTR_TYPE_NONE (0 << 16)
+#define AT91_MATRIX_DEFMSTR_TYPE_LAST (1 << 16)
+#define AT91_MATRIX_DEFMSTR_TYPE_FIXED (2 << 16)
+#define AT91_MATRIX_FIXED_DEFMSTR GENMASK(20, 18)
+#define AT91_MATRIX_ARBT GENMASK(25, 24)
+#define AT91_MATRIX_ARBT_ROUND_ROBIN (0 << 24)
+#define AT91_MATRIX_ARBT_FIXED_PRIORITY (1 << 24)
+
+#define AT91_MATRIX_ITCM_SIZE GENMASK(3, 0)
+#define AT91_MATRIX_ITCM_0 (0 << 0)
+#define AT91_MATRIX_ITCM_16 (5 << 0)
+#define AT91_MATRIX_ITCM_32 (6 << 0)
+#define AT91_MATRIX_ITCM_64 (7 << 0)
+#define AT91_MATRIX_DTCM_SIZE GENMASK(7, 4)
+#define AT91_MATRIX_DTCM_0 (0 << 4)
+#define AT91_MATRIX_DTCM_16 (5 << 4)
+#define AT91_MATRIX_DTCM_32 (6 << 4)
+#define AT91_MATRIX_DTCM_64 (7 << 4)
+
+#define AT91_MATRIX_PRAS(o, x) ((o) + ((x) * 0x8))
+#define AT91_MATRIX_PRBS(o, x) ((o) + ((x) * 0x8) + 0x4)
+#define AT91_MATRIX_MPR(x) GENMASK(((x) * 0x4) + 1, ((x) * 0x4))
+
+#define AT91_MATRIX_RCB(x) BIT(x)
+
+#define AT91_MATRIX_CSA(cs, val) (val << (cs))
+#define AT91_MATRIX_DBPUC BIT(8)
+#define AT91_MATRIX_DBPDC BIT(9)
+#define AT91_MATRIX_VDDIOMSEL BIT(16)
+#define AT91_MATRIX_VDDIOMSEL_1_8V (0 << 16)
+#define AT91_MATRIX_VDDIOMSEL_3_3V (1 << 16)
+#define AT91_MATRIX_EBI_IOSR BIT(17)
+#define AT91_MATRIX_DDR_IOSR BIT(18)
+#define AT91_MATRIX_NFD0_SELECT BIT(24)
+#define AT91_MATRIX_DDR_MP_EN BIT(25)
+#define AT91_MATRIX_EBI_NUM_CS 8
+
+#define AT91_MATRIX_USBPUCR_PUON BIT(30)
+
+#endif /* _LINUX_MFD_SYSCON_ATMEL_MATRIX_H */
diff --git a/include/linux/mfd/syscon/atmel-smc.h b/include/linux/mfd/syscon/atmel-smc.h
new file mode 100644
index 000000000..be6ebe64e
--- /dev/null
+++ b/include/linux/mfd/syscon/atmel-smc.h
@@ -0,0 +1,173 @@
+/*
+ * Atmel SMC (Static Memory Controller) register offsets and bit definitions.
+ *
+ * Copyright (C) 2014 Atmel
+ * Copyright (C) 2014 Free Electrons
+ *
+ * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_MFD_SYSCON_ATMEL_SMC_H_
+#define _LINUX_MFD_SYSCON_ATMEL_SMC_H_
+
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+
+#define AT91SAM9_SMC_GENERIC 0x00
+#define AT91SAM9_SMC_GENERIC_BLK_SZ 0x10
+
+#define SAMA5_SMC_GENERIC 0x600
+#define SAMA5_SMC_GENERIC_BLK_SZ 0x14
+
+#define AT91SAM9_SMC_SETUP(o) ((o) + 0x00)
+#define AT91SAM9_SMC_NWESETUP(x) (x)
+#define AT91SAM9_SMC_NCS_WRSETUP(x) ((x) << 8)
+#define AT91SAM9_SMC_NRDSETUP(x) ((x) << 16)
+#define AT91SAM9_SMC_NCS_NRDSETUP(x) ((x) << 24)
+
+#define AT91SAM9_SMC_PULSE(o) ((o) + 0x04)
+#define AT91SAM9_SMC_NWEPULSE(x) (x)
+#define AT91SAM9_SMC_NCS_WRPULSE(x) ((x) << 8)
+#define AT91SAM9_SMC_NRDPULSE(x) ((x) << 16)
+#define AT91SAM9_SMC_NCS_NRDPULSE(x) ((x) << 24)
+
+#define AT91SAM9_SMC_CYCLE(o) ((o) + 0x08)
+#define AT91SAM9_SMC_NWECYCLE(x) (x)
+#define AT91SAM9_SMC_NRDCYCLE(x) ((x) << 16)
+
+#define AT91SAM9_SMC_MODE(o) ((o) + 0x0c)
+#define SAMA5_SMC_MODE(o) ((o) + 0x10)
+#define AT91_SMC_READMODE BIT(0)
+#define AT91_SMC_READMODE_NCS (0 << 0)
+#define AT91_SMC_READMODE_NRD (1 << 0)
+#define AT91_SMC_WRITEMODE BIT(1)
+#define AT91_SMC_WRITEMODE_NCS (0 << 1)
+#define AT91_SMC_WRITEMODE_NWE (1 << 1)
+#define AT91_SMC_EXNWMODE GENMASK(5, 4)
+#define AT91_SMC_EXNWMODE_DISABLE (0 << 4)
+#define AT91_SMC_EXNWMODE_FROZEN (2 << 4)
+#define AT91_SMC_EXNWMODE_READY (3 << 4)
+#define AT91_SMC_BAT BIT(8)
+#define AT91_SMC_BAT_SELECT (0 << 8)
+#define AT91_SMC_BAT_WRITE (1 << 8)
+#define AT91_SMC_DBW GENMASK(13, 12)
+#define AT91_SMC_DBW_8 (0 << 12)
+#define AT91_SMC_DBW_16 (1 << 12)
+#define AT91_SMC_DBW_32 (2 << 12)
+#define AT91_SMC_TDF GENMASK(19, 16)
+#define AT91_SMC_TDF_(x) ((((x) - 1) << 16) & AT91_SMC_TDF)
+#define AT91_SMC_TDF_MAX 16
+#define AT91_SMC_TDFMODE_OPTIMIZED BIT(20)
+#define AT91_SMC_PMEN BIT(24)
+#define AT91_SMC_PS GENMASK(29, 28)
+#define AT91_SMC_PS_4 (0 << 28)
+#define AT91_SMC_PS_8 (1 << 28)
+#define AT91_SMC_PS_16 (2 << 28)
+#define AT91_SMC_PS_32 (3 << 28)
+
+
+/*
+ * This function converts a setup timing expressed in nanoseconds into an
+ * encoded value that can be written in the SMC_SETUP register.
+ *
+ * The following formula is described in atmel datasheets (section
+ * "SMC Setup Register"):
+ *
+ * setup length = (128* SETUP[5] + SETUP[4:0])
+ *
+ * where setup length is the timing expressed in cycles.
+ */
+static inline u32 at91sam9_smc_setup_ns_to_cycles(unsigned int clk_rate,
+ u32 timing_ns)
+{
+ u32 clk_period = DIV_ROUND_UP(NSEC_PER_SEC, clk_rate);
+ u32 coded_cycles = 0;
+ u32 cycles;
+
+ cycles = DIV_ROUND_UP(timing_ns, clk_period);
+ if (cycles / 32) {
+ coded_cycles |= 1 << 5;
+ if (cycles < 128)
+ cycles = 0;
+ }
+
+ coded_cycles |= cycles % 32;
+
+ return coded_cycles;
+}
+
+/*
+ * This function converts a pulse timing expressed in nanoseconds into an
+ * encoded value that can be written in the SMC_PULSE register.
+ *
+ * The following formula is described in atmel datasheets (section
+ * "SMC Pulse Register"):
+ *
+ * pulse length = (256* PULSE[6] + PULSE[5:0])
+ *
+ * where pulse length is the timing expressed in cycles.
+ */
+static inline u32 at91sam9_smc_pulse_ns_to_cycles(unsigned int clk_rate,
+ u32 timing_ns)
+{
+ u32 clk_period = DIV_ROUND_UP(NSEC_PER_SEC, clk_rate);
+ u32 coded_cycles = 0;
+ u32 cycles;
+
+ cycles = DIV_ROUND_UP(timing_ns, clk_period);
+ if (cycles / 64) {
+ coded_cycles |= 1 << 6;
+ if (cycles < 256)
+ cycles = 0;
+ }
+
+ coded_cycles |= cycles % 64;
+
+ return coded_cycles;
+}
+
+/*
+ * This function converts a cycle timing expressed in nanoseconds into an
+ * encoded value that can be written in the SMC_CYCLE register.
+ *
+ * The following formula is described in atmel datasheets (section
+ * "SMC Cycle Register"):
+ *
+ * cycle length = (CYCLE[8:7]*256 + CYCLE[6:0])
+ *
+ * where cycle length is the timing expressed in cycles.
+ */
+static inline u32 at91sam9_smc_cycle_ns_to_cycles(unsigned int clk_rate,
+ u32 timing_ns)
+{
+ u32 clk_period = DIV_ROUND_UP(NSEC_PER_SEC, clk_rate);
+ u32 coded_cycles = 0;
+ u32 cycles;
+
+ cycles = DIV_ROUND_UP(timing_ns, clk_period);
+ if (cycles / 128) {
+ coded_cycles = cycles / 256;
+ cycles %= 256;
+ if (cycles >= 128) {
+ coded_cycles++;
+ cycles = 0;
+ }
+
+ if (coded_cycles > 0x3) {
+ coded_cycles = 0x3;
+ cycles = 0x7f;
+ }
+
+ coded_cycles <<= 7;
+ }
+
+ coded_cycles |= cycles % 128;
+
+ return coded_cycles;
+}
+
+#endif /* _LINUX_MFD_SYSCON_ATMEL_SMC_H_ */
diff --git a/include/linux/mfd/syscon/atmel-st.h b/include/linux/mfd/syscon/atmel-st.h
new file mode 100644
index 000000000..8acf1ec1f
--- /dev/null
+++ b/include/linux/mfd/syscon/atmel-st.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2005 Ivan Kokshaysky
+ * Copyright (C) SAN People
+ *
+ * System Timer (ST) - System peripherals registers.
+ * Based on AT91RM9200 datasheet revision E.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_MFD_SYSCON_ATMEL_ST_H
+#define _LINUX_MFD_SYSCON_ATMEL_ST_H
+
+#include <linux/bitops.h>
+
+#define AT91_ST_CR 0x00 /* Control Register */
+#define AT91_ST_WDRST BIT(0) /* Watchdog Timer Restart */
+
+#define AT91_ST_PIMR 0x04 /* Period Interval Mode Register */
+#define AT91_ST_PIV 0xffff /* Period Interval Value */
+
+#define AT91_ST_WDMR 0x08 /* Watchdog Mode Register */
+#define AT91_ST_WDV 0xffff /* Watchdog Counter Value */
+#define AT91_ST_RSTEN BIT(16) /* Reset Enable */
+#define AT91_ST_EXTEN BIT(17) /* External Signal Assertion Enable */
+
+#define AT91_ST_RTMR 0x0c /* Real-time Mode Register */
+#define AT91_ST_RTPRES 0xffff /* Real-time Prescalar Value */
+
+#define AT91_ST_SR 0x10 /* Status Register */
+#define AT91_ST_PITS BIT(0) /* Period Interval Timer Status */
+#define AT91_ST_WDOVF BIT(1) /* Watchdog Overflow */
+#define AT91_ST_RTTINC BIT(2) /* Real-time Timer Increment */
+#define AT91_ST_ALMS BIT(3) /* Alarm Status */
+
+#define AT91_ST_IER 0x14 /* Interrupt Enable Register */
+#define AT91_ST_IDR 0x18 /* Interrupt Disable Register */
+#define AT91_ST_IMR 0x1c /* Interrupt Mask Register */
+
+#define AT91_ST_RTAR 0x20 /* Real-time Alarm Register */
+#define AT91_ST_ALMV 0xfffff /* Alarm Value */
+
+#define AT91_ST_CRTR 0x24 /* Current Real-time Register */
+#define AT91_ST_CRTV 0xfffff /* Current Real-Time Value */
+
+#endif /* _LINUX_MFD_SYSCON_ATMEL_ST_H */
diff --git a/include/linux/mfd/syscon/clps711x.h b/include/linux/mfd/syscon/clps711x.h
new file mode 100644
index 000000000..26355abae
--- /dev/null
+++ b/include/linux/mfd/syscon/clps711x.h
@@ -0,0 +1,94 @@
+/*
+ * CLPS711X system register bits definitions
+ *
+ * Copyright (C) 2013 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_MFD_SYSCON_CLPS711X_H_
+#define _LINUX_MFD_SYSCON_CLPS711X_H_
+
+#define SYSCON_OFFSET (0x00)
+#define SYSFLG_OFFSET (0x40)
+
+#define SYSCON1_KBDSCAN(x) ((x) & 15)
+#define SYSCON1_KBDSCAN_MASK (15)
+#define SYSCON1_TC1M (1 << 4)
+#define SYSCON1_TC1S (1 << 5)
+#define SYSCON1_TC2M (1 << 6)
+#define SYSCON1_TC2S (1 << 7)
+#define SYSCON1_BZTOG (1 << 9)
+#define SYSCON1_BZMOD (1 << 10)
+#define SYSCON1_DBGEN (1 << 11)
+#define SYSCON1_LCDEN (1 << 12)
+#define SYSCON1_CDENTX (1 << 13)
+#define SYSCON1_CDENRX (1 << 14)
+#define SYSCON1_SIREN (1 << 15)
+#define SYSCON1_ADCKSEL(x) (((x) & 3) << 16)
+#define SYSCON1_ADCKSEL_MASK (3 << 16)
+#define SYSCON1_EXCKEN (1 << 18)
+#define SYSCON1_WAKEDIS (1 << 19)
+#define SYSCON1_IRTXM (1 << 20)
+
+#define SYSCON2_SERSEL (1 << 0)
+#define SYSCON2_KBD6 (1 << 1)
+#define SYSCON2_DRAMZ (1 << 2)
+#define SYSCON2_KBWEN (1 << 3)
+#define SYSCON2_SS2TXEN (1 << 4)
+#define SYSCON2_PCCARD1 (1 << 5)
+#define SYSCON2_PCCARD2 (1 << 6)
+#define SYSCON2_SS2RXEN (1 << 7)
+#define SYSCON2_SS2MAEN (1 << 9)
+#define SYSCON2_OSTB (1 << 12)
+#define SYSCON2_CLKENSL (1 << 13)
+#define SYSCON2_BUZFREQ (1 << 14)
+
+#define SYSCON3_ADCCON (1 << 0)
+#define SYSCON3_CLKCTL0 (1 << 1)
+#define SYSCON3_CLKCTL1 (1 << 2)
+#define SYSCON3_DAISEL (1 << 3)
+#define SYSCON3_ADCCKNSEN (1 << 4)
+#define SYSCON3_VERSN(x) (((x) >> 5) & 7)
+#define SYSCON3_VERSN_MASK (7 << 5)
+#define SYSCON3_FASTWAKE (1 << 8)
+#define SYSCON3_DAIEN (1 << 9)
+#define SYSCON3_128FS SYSCON3_DAIEN
+#define SYSCON3_ENPD67 (1 << 10)
+
+#define SYSCON_UARTEN (1 << 8)
+
+#define SYSFLG1_MCDR (1 << 0)
+#define SYSFLG1_DCDET (1 << 1)
+#define SYSFLG1_WUDR (1 << 2)
+#define SYSFLG1_WUON (1 << 3)
+#define SYSFLG1_CTS (1 << 8)
+#define SYSFLG1_DSR (1 << 9)
+#define SYSFLG1_DCD (1 << 10)
+#define SYSFLG1_NBFLG (1 << 12)
+#define SYSFLG1_RSTFLG (1 << 13)
+#define SYSFLG1_PFFLG (1 << 14)
+#define SYSFLG1_CLDFLG (1 << 15)
+#define SYSFLG1_CRXFE (1 << 24)
+#define SYSFLG1_CTXFF (1 << 25)
+#define SYSFLG1_SSIBUSY (1 << 26)
+#define SYSFLG1_ID (1 << 29)
+#define SYSFLG1_VERID(x) (((x) >> 30) & 3)
+#define SYSFLG1_VERID_MASK (3 << 30)
+
+#define SYSFLG2_SSRXOF (1 << 0)
+#define SYSFLG2_RESVAL (1 << 1)
+#define SYSFLG2_RESFRM (1 << 2)
+#define SYSFLG2_SS2RXFE (1 << 3)
+#define SYSFLG2_SS2TXFF (1 << 4)
+#define SYSFLG2_SS2TXUF (1 << 5)
+#define SYSFLG2_CKMODE (1 << 6)
+
+#define SYSFLG_UBUSY (1 << 11)
+#define SYSFLG_URXFE (1 << 22)
+#define SYSFLG_UTXFF (1 << 23)
+
+#endif
diff --git a/include/linux/mfd/syscon/exynos4-pmu.h b/include/linux/mfd/syscon/exynos4-pmu.h
new file mode 100644
index 000000000..278b1b154
--- /dev/null
+++ b/include/linux/mfd/syscon/exynos4-pmu.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2015 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_
+#define _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_
+
+/* Exynos4 PMU register definitions */
+
+/* MIPI_PHYn_CONTROL register offset: n = 0..1 */
+#define EXYNOS4_MIPI_PHY_CONTROL(n) (0x710 + (n) * 4)
+#define EXYNOS4_MIPI_PHY_ENABLE (1 << 0)
+#define EXYNOS4_MIPI_PHY_SRESETN (1 << 1)
+#define EXYNOS4_MIPI_PHY_MRESETN (1 << 2)
+#define EXYNOS4_MIPI_PHY_RESET_MASK (3 << 1)
+
+#endif /* _LINUX_MFD_SYSCON_PMU_EXYNOS4_H_ */
diff --git a/include/linux/mfd/syscon/exynos5-pmu.h b/include/linux/mfd/syscon/exynos5-pmu.h
new file mode 100644
index 000000000..9352adc95
--- /dev/null
+++ b/include/linux/mfd/syscon/exynos5-pmu.h
@@ -0,0 +1,47 @@
+/*
+ * Exynos5 SoC series Power Management Unit (PMU) register offsets
+ * and bit definitions.
+ *
+ * Copyright (C) 2014 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_
+#define _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_
+
+/* Exynos5 PMU register definitions */
+#define EXYNOS5_HDMI_PHY_CONTROL (0x700)
+#define EXYNOS5_USBDRD_PHY_CONTROL (0x704)
+
+/* Exynos5250 specific register definitions */
+#define EXYNOS5_USBHOST_PHY_CONTROL (0x708)
+#define EXYNOS5_EFNAND_PHY_CONTROL (0x70c)
+#define EXYNOS5_MIPI_PHY0_CONTROL (0x710)
+#define EXYNOS5_MIPI_PHY1_CONTROL (0x714)
+#define EXYNOS5_ADC_PHY_CONTROL (0x718)
+#define EXYNOS5_MTCADC_PHY_CONTROL (0x71c)
+#define EXYNOS5_DPTX_PHY_CONTROL (0x720)
+#define EXYNOS5_SATA_PHY_CONTROL (0x724)
+
+/* Exynos5420 specific register definitions */
+#define EXYNOS5420_USBDRD1_PHY_CONTROL (0x708)
+#define EXYNOS5420_USBHOST_PHY_CONTROL (0x70c)
+#define EXYNOS5420_MIPI_PHY0_CONTROL (0x714)
+#define EXYNOS5420_MIPI_PHY1_CONTROL (0x718)
+#define EXYNOS5420_MIPI_PHY2_CONTROL (0x71c)
+#define EXYNOS5420_ADC_PHY_CONTROL (0x720)
+#define EXYNOS5420_MTCADC_PHY_CONTROL (0x724)
+#define EXYNOS5420_DPTX_PHY_CONTROL (0x728)
+
+/* Exynos5433 specific register definitions */
+#define EXYNOS5433_USBHOST30_PHY_CONTROL (0x728)
+
+#define EXYNOS5_PHY_ENABLE BIT(0)
+
+#define EXYNOS5_MIPI_PHY_S_RESETN BIT(1)
+#define EXYNOS5_MIPI_PHY_M_RESETN BIT(2)
+
+#endif /* _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_ */
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
new file mode 100644
index 000000000..d16f4c82c
--- /dev/null
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -0,0 +1,438 @@
+/*
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_IMX6Q_IOMUXC_GPR_H
+#define __LINUX_IMX6Q_IOMUXC_GPR_H
+
+#include <linux/bitops.h>
+
+#define IOMUXC_GPR0 0x00
+#define IOMUXC_GPR1 0x04
+#define IOMUXC_GPR2 0x08
+#define IOMUXC_GPR3 0x0c
+#define IOMUXC_GPR4 0x10
+#define IOMUXC_GPR5 0x14
+#define IOMUXC_GPR6 0x18
+#define IOMUXC_GPR7 0x1c
+#define IOMUXC_GPR8 0x20
+#define IOMUXC_GPR9 0x24
+#define IOMUXC_GPR10 0x28
+#define IOMUXC_GPR11 0x2c
+#define IOMUXC_GPR12 0x30
+#define IOMUXC_GPR13 0x34
+
+#define IMX6Q_GPR0_CLOCK_8_MUX_SEL_MASK (0x3 << 30)
+#define IMX6Q_GPR0_CLOCK_8_MUX_SEL_AUDMUX_RXCLK_P7_MUXED (0x0 << 30)
+#define IMX6Q_GPR0_CLOCK_8_MUX_SEL_AUDMUX_RXCLK_P7 (0x1 << 30)
+#define IMX6Q_GPR0_CLOCK_8_MUX_SEL_SSI3_SSI_SRCK (0x2 << 30)
+#define IMX6Q_GPR0_CLOCK_8_MUX_SEL_SSI3_RX_BIT_CLK (0x3 << 30)
+#define IMX6Q_GPR0_CLOCK_0_MUX_SEL_MASK (0x3 << 28)
+#define IMX6Q_GPR0_CLOCK_0_MUX_SEL_ESAI1_IPP_IND_SCKR_MUXED (0x0 << 28)
+#define IMX6Q_GPR0_CLOCK_0_MUX_SEL_ESAI1_IPP_IND_SCKR (0x1 << 28)
+#define IMX6Q_GPR0_CLOCK_0_MUX_SEL_ESAI1_IPP_DO_SCKR (0x2 << 28)
+#define IMX6Q_GPR0_CLOCK_B_MUX_SEL_MASK (0x3 << 26)
+#define IMX6Q_GPR0_CLOCK_B_MUX_SEL_AUDMUX_TXCLK_P7_MUXED (0x0 << 26)
+#define IMX6Q_GPR0_CLOCK_B_MUX_SEL_AUDMUX_TXCLK_P7 (0x1 << 26)
+#define IMX6Q_GPR0_CLOCK_B_MUX_SEL_SSI3_SSI_STCK (0x2 << 26)
+#define IMX6Q_GPR0_CLOCK_B_MUX_SEL_SSI3_TX_BIT_CLK (0x3 << 26)
+#define IMX6Q_GPR0_CLOCK_3_MUX_SEL_MASK (0x3 << 24)
+#define IMX6Q_GPR0_CLOCK_3_MUX_SEL_AUDMUX_RXCLK_P7_MUXED (0x3 << 24)
+#define IMX6Q_GPR0_CLOCK_3_MUX_SEL_AUDMUX_RXCLK_P7 (0x3 << 24)
+#define IMX6Q_GPR0_CLOCK_3_MUX_SEL_SSI3_SSI_SRCK (0x3 << 24)
+#define IMX6Q_GPR0_CLOCK_3_MUX_SEL_SSI3_RX_BIT_CLK (0x3 << 24)
+#define IMX6Q_GPR0_CLOCK_A_MUX_SEL_MASK (0x3 << 22)
+#define IMX6Q_GPR0_CLOCK_A_MUX_SEL_AUDMUX_TXCLK_P2_MUXED (0x0 << 22)
+#define IMX6Q_GPR0_CLOCK_A_MUX_SEL_AUDMUX_TXCLK_P2 (0x1 << 22)
+#define IMX6Q_GPR0_CLOCK_A_MUX_SEL_SSI2_SSI_STCK (0x2 << 22)
+#define IMX6Q_GPR0_CLOCK_A_MUX_SEL_SSI2_TX_BIT_CLK (0x3 << 22)
+#define IMX6Q_GPR0_CLOCK_2_MUX_SEL_MASK (0x3 << 20)
+#define IMX6Q_GPR0_CLOCK_2_MUX_SEL_AUDMUX_RXCLK_P2_MUXED (0x0 << 20)
+#define IMX6Q_GPR0_CLOCK_2_MUX_SEL_AUDMUX_RXCLK_P2 (0x1 << 20)
+#define IMX6Q_GPR0_CLOCK_2_MUX_SEL_SSI2_SSI_SRCK (0x2 << 20)
+#define IMX6Q_GPR0_CLOCK_2_MUX_SEL_SSI2_RX_BIT_CLK (0x3 << 20)
+#define IMX6Q_GPR0_CLOCK_9_MUX_SEL_MASK (0x3 << 18)
+#define IMX6Q_GPR0_CLOCK_9_MUX_SEL_AUDMUX_TXCLK_P1_MUXED (0x0 << 18)
+#define IMX6Q_GPR0_CLOCK_9_MUX_SEL_AUDMUX_TXCLK_P1 (0x1 << 18)
+#define IMX6Q_GPR0_CLOCK_9_MUX_SEL_SSI1_SSI_STCK (0x2 << 18)
+#define IMX6Q_GPR0_CLOCK_9_MUX_SEL_SSI1_SSI_TX_BIT_CLK (0x3 << 18)
+#define IMX6Q_GPR0_CLOCK_1_MUX_SEL_MASK (0x3 << 16)
+#define IMX6Q_GPR0_CLOCK_1_MUX_SEL_AUDMUX_RXCLK_P1_MUXED (0x0 << 16)
+#define IMX6Q_GPR0_CLOCK_1_MUX_SEL_AUDMUX_RXCLK_P1 (0x1 << 16)
+#define IMX6Q_GPR0_CLOCK_1_MUX_SEL_SSI1_SSI_SRCK (0x2 << 16)
+#define IMX6Q_GPR0_CLOCK_1_MUX_SEL_SSI1_SSI_RX_BIT_CLK (0x3 << 16)
+#define IMX6Q_GPR0_TX_CLK2_MUX_SEL_MASK (0x3 << 14)
+#define IMX6Q_GPR0_TX_CLK2_MUX_SEL_ASRCK_CLK1 (0x0 << 14)
+#define IMX6Q_GPR0_TX_CLK2_MUX_SEL_ASRCK_CLK2 (0x1 << 14)
+#define IMX6Q_GPR0_TX_CLK2_MUX_SEL_ASRCK_CLK3 (0x2 << 14)
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL7_MASK BIT(7)
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL7_SPDIF 0x0
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL7_IOMUX BIT(7)
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL6_MASK BIT(6)
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL6_ESAI 0x0
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL6_I2C3 BIT(6)
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL5_MASK BIT(5)
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL5_ECSPI4 0x0
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL5_EPIT2 BIT(5)
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL4_MASK BIT(4)
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL4_ECSPI4 0x0
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL4_I2C1 BIT(4)
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL3_MASK BIT(3)
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL3_ECSPI2 0x0
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL3_I2C1 BIT(3)
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL2_MASK BIT(2)
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL2_ECSPI1 0x0
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL2_I2C2 BIT(2)
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL1_MASK BIT(1)
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL1_ECSPI1 0x0
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL1_I2C3 BIT(1)
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL0_MASK BIT(0)
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL0_IPU1 0x0
+#define IMX6Q_GPR0_DMAREQ_MUX_SEL0_IOMUX BIT(0)
+
+#define IMX6Q_GPR1_PCIE_REQ_MASK (0x3 << 30)
+#define IMX6Q_GPR1_PCIE_EXIT_L1 BIT(28)
+#define IMX6Q_GPR1_PCIE_RDY_L23 BIT(27)
+#define IMX6Q_GPR1_PCIE_ENTER_L1 BIT(26)
+#define IMX6Q_GPR1_MIPI_COLOR_SW BIT(25)
+#define IMX6Q_GPR1_DPI_OFF BIT(24)
+#define IMX6Q_GPR1_EXC_MON_MASK BIT(22)
+#define IMX6Q_GPR1_EXC_MON_OKAY 0x0
+#define IMX6Q_GPR1_EXC_MON_SLVE BIT(22)
+#define IMX6Q_GPR1_ENET_CLK_SEL_MASK BIT(21)
+#define IMX6Q_GPR1_ENET_CLK_SEL_PAD 0
+#define IMX6Q_GPR1_ENET_CLK_SEL_ANATOP BIT(21)
+#define IMX6Q_GPR1_MIPI_IPU2_MUX_MASK BIT(20)
+#define IMX6Q_GPR1_MIPI_IPU2_MUX_GASKET 0x0
+#define IMX6Q_GPR1_MIPI_IPU2_MUX_IOMUX BIT(20)
+#define IMX6Q_GPR1_MIPI_IPU1_MUX_MASK BIT(19)
+#define IMX6Q_GPR1_MIPI_IPU1_MUX_GASKET 0x0
+#define IMX6Q_GPR1_MIPI_IPU1_MUX_IOMUX BIT(19)
+#define IMX6Q_GPR1_PCIE_TEST_PD BIT(18)
+#define IMX6Q_GPR1_IPU_VPU_MUX_MASK BIT(17)
+#define IMX6Q_GPR1_IPU_VPU_MUX_IPU1 0x0
+#define IMX6Q_GPR1_IPU_VPU_MUX_IPU2 BIT(17)
+#define IMX6Q_GPR1_PCIE_REF_CLK_EN BIT(16)
+#define IMX6Q_GPR1_USB_EXP_MODE BIT(15)
+#define IMX6Q_GPR1_PCIE_INT BIT(14)
+#define IMX6Q_GPR1_USB_OTG_ID_SEL_MASK BIT(13)
+#define IMX6Q_GPR1_USB_OTG_ID_SEL_ENET_RX_ER 0x0
+#define IMX6Q_GPR1_USB_OTG_ID_SEL_GPIO_1 BIT(13)
+#define IMX6Q_GPR1_GINT BIT(12)
+#define IMX6Q_GPR1_ADDRS3_MASK (0x3 << 10)
+#define IMX6Q_GPR1_ADDRS3_32MB (0x0 << 10)
+#define IMX6Q_GPR1_ADDRS3_64MB (0x1 << 10)
+#define IMX6Q_GPR1_ADDRS3_128MB (0x2 << 10)
+#define IMX6Q_GPR1_ACT_CS3 BIT(9)
+#define IMX6Q_GPR1_ADDRS2_MASK (0x3 << 7)
+#define IMX6Q_GPR1_ACT_CS2 BIT(6)
+#define IMX6Q_GPR1_ADDRS1_MASK (0x3 << 4)
+#define IMX6Q_GPR1_ACT_CS1 BIT(3)
+#define IMX6Q_GPR1_ADDRS0_MASK (0x3 << 1)
+#define IMX6Q_GPR1_ACT_CS0 BIT(0)
+
+#define IMX6Q_GPR2_COUNTER_RESET_VAL_MASK (0x3 << 20)
+#define IMX6Q_GPR2_COUNTER_RESET_VAL_5 (0x0 << 20)
+#define IMX6Q_GPR2_COUNTER_RESET_VAL_3 (0x1 << 20)
+#define IMX6Q_GPR2_COUNTER_RESET_VAL_4 (0x2 << 20)
+#define IMX6Q_GPR2_COUNTER_RESET_VAL_6 (0x3 << 20)
+#define IMX6Q_GPR2_LVDS_CLK_SHIFT_MASK (0x7 << 16)
+#define IMX6Q_GPR2_LVDS_CLK_SHIFT_0 (0x0 << 16)
+#define IMX6Q_GPR2_LVDS_CLK_SHIFT_1 (0x1 << 16)
+#define IMX6Q_GPR2_LVDS_CLK_SHIFT_2 (0x2 << 16)
+#define IMX6Q_GPR2_LVDS_CLK_SHIFT_3 (0x3 << 16)
+#define IMX6Q_GPR2_LVDS_CLK_SHIFT_4 (0x4 << 16)
+#define IMX6Q_GPR2_LVDS_CLK_SHIFT_5 (0x5 << 16)
+#define IMX6Q_GPR2_LVDS_CLK_SHIFT_6 (0x6 << 16)
+#define IMX6Q_GPR2_LVDS_CLK_SHIFT_7 (0x7 << 16)
+#define IMX6Q_GPR2_BGREF_RRMODE_MASK BIT(15)
+#define IMX6Q_GPR2_BGREF_RRMODE_EXT_RESISTOR 0x0
+#define IMX6Q_GPR2_BGREF_RRMODE_INT_RESISTOR BIT(15)
+#define IMX6Q_GPR2_DI1_VS_POLARITY_MASK BIT(10)
+#define IMX6Q_GPR2_DI1_VS_POLARITY_ACTIVE_H 0x0
+#define IMX6Q_GPR2_DI1_VS_POLARITY_ACTIVE_L BIT(10)
+#define IMX6Q_GPR2_DI0_VS_POLARITY_MASK BIT(9)
+#define IMX6Q_GPR2_DI0_VS_POLARITY_ACTIVE_H 0x0
+#define IMX6Q_GPR2_DI0_VS_POLARITY_ACTIVE_L BIT(9)
+#define IMX6Q_GPR2_BIT_MAPPING_CH1_MASK BIT(8)
+#define IMX6Q_GPR2_BIT_MAPPING_CH1_SPWG 0x0
+#define IMX6Q_GPR2_BIT_MAPPING_CH1_JEIDA BIT(8)
+#define IMX6Q_GPR2_DATA_WIDTH_CH1_MASK BIT(7)
+#define IMX6Q_GPR2_DATA_WIDTH_CH1_18BIT 0x0
+#define IMX6Q_GPR2_DATA_WIDTH_CH1_24BIT BIT(7)
+#define IMX6Q_GPR2_BIT_MAPPING_CH0_MASK BIT(6)
+#define IMX6Q_GPR2_BIT_MAPPING_CH0_SPWG 0x0
+#define IMX6Q_GPR2_BIT_MAPPING_CH0_JEIDA BIT(6)
+#define IMX6Q_GPR2_DATA_WIDTH_CH0_MASK BIT(5)
+#define IMX6Q_GPR2_DATA_WIDTH_CH0_18BIT 0x0
+#define IMX6Q_GPR2_DATA_WIDTH_CH0_24BIT BIT(5)
+#define IMX6Q_GPR2_SPLIT_MODE_EN BIT(4)
+#define IMX6Q_GPR2_CH1_MODE_MASK (0x3 << 2)
+#define IMX6Q_GPR2_CH1_MODE_DISABLE (0x0 << 2)
+#define IMX6Q_GPR2_CH1_MODE_EN_ROUTE_DI0 (0x1 << 2)
+#define IMX6Q_GPR2_CH1_MODE_EN_ROUTE_DI1 (0x3 << 2)
+#define IMX6Q_GPR2_CH0_MODE_MASK (0x3 << 0)
+#define IMX6Q_GPR2_CH0_MODE_DISABLE (0x0 << 0)
+#define IMX6Q_GPR2_CH0_MODE_EN_ROUTE_DI0 (0x1 << 0)
+#define IMX6Q_GPR2_CH0_MODE_EN_ROUTE_DI1 (0x3 << 0)
+
+#define IMX6Q_GPR3_GPU_DBG_MASK (0x3 << 29)
+#define IMX6Q_GPR3_GPU_DBG_GPU3D (0x0 << 29)
+#define IMX6Q_GPR3_GPU_DBG_GPU2D (0x1 << 29)
+#define IMX6Q_GPR3_GPU_DBG_OPENVG (0x2 << 29)
+#define IMX6Q_GPR3_BCH_WR_CACHE_CTL BIT(28)
+#define IMX6Q_GPR3_BCH_RD_CACHE_CTL BIT(27)
+#define IMX6Q_GPR3_USDHCX_WR_CACHE_CTL BIT(26)
+#define IMX6Q_GPR3_USDHCX_RD_CACHE_CTL BIT(25)
+#define IMX6Q_GPR3_OCRAM_CTL_MASK (0xf << 21)
+#define IMX6Q_GPR3_OCRAM_STATUS_MASK (0xf << 17)
+#define IMX6Q_GPR3_CORE3_DBG_ACK_EN BIT(16)
+#define IMX6Q_GPR3_CORE2_DBG_ACK_EN BIT(15)
+#define IMX6Q_GPR3_CORE1_DBG_ACK_EN BIT(14)
+#define IMX6Q_GPR3_CORE0_DBG_ACK_EN BIT(13)
+#define IMX6Q_GPR3_TZASC2_BOOT_LOCK BIT(12)
+#define IMX6Q_GPR3_TZASC1_BOOT_LOCK BIT(11)
+#define IMX6Q_GPR3_IPU_DIAG_MASK BIT(10)
+#define IMX6Q_GPR3_LVDS1_MUX_CTL_MASK (0x3 << 8)
+#define IMX6Q_GPR3_LVDS1_MUX_CTL_IPU1_DI0 (0x0 << 8)
+#define IMX6Q_GPR3_LVDS1_MUX_CTL_IPU1_DI1 (0x1 << 8)
+#define IMX6Q_GPR3_LVDS1_MUX_CTL_IPU2_DI0 (0x2 << 8)
+#define IMX6Q_GPR3_LVDS1_MUX_CTL_IPU2_DI1 (0x3 << 8)
+#define IMX6Q_GPR3_LVDS0_MUX_CTL_MASK (0x3 << 6)
+#define IMX6Q_GPR3_LVDS0_MUX_CTL_IPU1_DI0 (0x0 << 6)
+#define IMX6Q_GPR3_LVDS0_MUX_CTL_IPU1_DI1 (0x1 << 6)
+#define IMX6Q_GPR3_LVDS0_MUX_CTL_IPU2_DI0 (0x2 << 6)
+#define IMX6Q_GPR3_LVDS0_MUX_CTL_IPU2_DI1 (0x3 << 6)
+#define IMX6Q_GPR3_MIPI_MUX_CTL_SHIFT 4
+#define IMX6Q_GPR3_MIPI_MUX_CTL_MASK (0x3 << 4)
+#define IMX6Q_GPR3_MIPI_MUX_CTL_IPU1_DI0 (0x0 << 4)
+#define IMX6Q_GPR3_MIPI_MUX_CTL_IPU1_DI1 (0x1 << 4)
+#define IMX6Q_GPR3_MIPI_MUX_CTL_IPU2_DI0 (0x2 << 4)
+#define IMX6Q_GPR3_MIPI_MUX_CTL_IPU2_DI1 (0x3 << 4)
+#define IMX6Q_GPR3_HDMI_MUX_CTL_SHIFT 2
+#define IMX6Q_GPR3_HDMI_MUX_CTL_MASK (0x3 << 2)
+#define IMX6Q_GPR3_HDMI_MUX_CTL_IPU1_DI0 (0x0 << 2)
+#define IMX6Q_GPR3_HDMI_MUX_CTL_IPU1_DI1 (0x1 << 2)
+#define IMX6Q_GPR3_HDMI_MUX_CTL_IPU2_DI0 (0x2 << 2)
+#define IMX6Q_GPR3_HDMI_MUX_CTL_IPU2_DI1 (0x3 << 2)
+
+#define IMX6Q_GPR4_VDOA_WR_CACHE_SEL BIT(31)
+#define IMX6Q_GPR4_VDOA_RD_CACHE_SEL BIT(30)
+#define IMX6Q_GPR4_VDOA_WR_CACHE_VAL BIT(29)
+#define IMX6Q_GPR4_VDOA_RD_CACHE_VAL BIT(28)
+#define IMX6Q_GPR4_PCIE_WR_CACHE_SEL BIT(27)
+#define IMX6Q_GPR4_PCIE_RD_CACHE_SEL BIT(26)
+#define IMX6Q_GPR4_PCIE_WR_CACHE_VAL BIT(25)
+#define IMX6Q_GPR4_PCIE_RD_CACHE_VAL BIT(24)
+#define IMX6Q_GPR4_SDMA_STOP_ACK BIT(19)
+#define IMX6Q_GPR4_CAN2_STOP_ACK BIT(18)
+#define IMX6Q_GPR4_CAN1_STOP_ACK BIT(17)
+#define IMX6Q_GPR4_ENET_STOP_ACK BIT(16)
+#define IMX6Q_GPR4_SOC_VERSION_MASK (0xff << 8)
+#define IMX6Q_GPR4_SOC_VERSION_OFF 0x8
+#define IMX6Q_GPR4_VPU_WR_CACHE_SEL BIT(7)
+#define IMX6Q_GPR4_VPU_RD_CACHE_SEL BIT(6)
+#define IMX6Q_GPR4_VPU_P_WR_CACHE_VAL BIT(3)
+#define IMX6Q_GPR4_VPU_P_RD_CACHE_VAL_MASK BIT(2)
+#define IMX6Q_GPR4_IPU_WR_CACHE_CTL BIT(1)
+#define IMX6Q_GPR4_IPU_RD_CACHE_CTL BIT(0)
+
+#define IMX6Q_GPR5_L2_CLK_STOP BIT(8)
+
+#define IMX6Q_GPR6_IPU1_ID00_WR_QOS_MASK (0xf << 0)
+#define IMX6Q_GPR6_IPU1_ID01_WR_QOS_MASK (0xf << 4)
+#define IMX6Q_GPR6_IPU1_ID10_WR_QOS_MASK (0xf << 8)
+#define IMX6Q_GPR6_IPU1_ID11_WR_QOS_MASK (0xf << 12)
+#define IMX6Q_GPR6_IPU1_ID00_RD_QOS_MASK (0xf << 16)
+#define IMX6Q_GPR6_IPU1_ID01_RD_QOS_MASK (0xf << 20)
+#define IMX6Q_GPR6_IPU1_ID10_RD_QOS_MASK (0xf << 24)
+#define IMX6Q_GPR6_IPU1_ID11_RD_QOS_MASK (0xf << 28)
+
+#define IMX6Q_GPR7_IPU2_ID00_WR_QOS_MASK (0xf << 0)
+#define IMX6Q_GPR7_IPU2_ID01_WR_QOS_MASK (0xf << 4)
+#define IMX6Q_GPR7_IPU2_ID10_WR_QOS_MASK (0xf << 8)
+#define IMX6Q_GPR7_IPU2_ID11_WR_QOS_MASK (0xf << 12)
+#define IMX6Q_GPR7_IPU2_ID00_RD_QOS_MASK (0xf << 16)
+#define IMX6Q_GPR7_IPU2_ID01_RD_QOS_MASK (0xf << 20)
+#define IMX6Q_GPR7_IPU2_ID10_RD_QOS_MASK (0xf << 24)
+#define IMX6Q_GPR7_IPU2_ID11_RD_QOS_MASK (0xf << 28)
+
+#define IMX6Q_GPR8_TX_SWING_LOW (0x7f << 25)
+#define IMX6Q_GPR8_TX_SWING_FULL (0x7f << 18)
+#define IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB (0x3f << 12)
+#define IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB (0x3f << 6)
+#define IMX6Q_GPR8_TX_DEEMPH_GEN1 (0x3f << 0)
+
+#define IMX6Q_GPR9_TZASC2_BYP BIT(1)
+#define IMX6Q_GPR9_TZASC1_BYP BIT(0)
+
+#define IMX6Q_GPR10_LOCK_DBG_EN BIT(29)
+#define IMX6Q_GPR10_LOCK_DBG_CLK_EN BIT(28)
+#define IMX6Q_GPR10_LOCK_SEC_ERR_RESP BIT(27)
+#define IMX6Q_GPR10_LOCK_OCRAM_TZ_ADDR (0x3f << 21)
+#define IMX6Q_GPR10_LOCK_OCRAM_TZ_EN BIT(20)
+#define IMX6Q_GPR10_LOCK_DCIC2_MUX_MASK (0x3 << 18)
+#define IMX6Q_GPR10_LOCK_DCIC1_MUX_MASK (0x3 << 16)
+#define IMX6Q_GPR10_DBG_EN BIT(13)
+#define IMX6Q_GPR10_DBG_CLK_EN BIT(12)
+#define IMX6Q_GPR10_SEC_ERR_RESP_MASK BIT(11)
+#define IMX6Q_GPR10_SEC_ERR_RESP_OKEY 0x0
+#define IMX6Q_GPR10_SEC_ERR_RESP_SLVE BIT(11)
+#define IMX6Q_GPR10_OCRAM_TZ_ADDR_MASK (0x3f << 5)
+#define IMX6Q_GPR10_OCRAM_TZ_EN_MASK BIT(4)
+#define IMX6Q_GPR10_DCIC2_MUX_CTL_MASK (0x3 << 2)
+#define IMX6Q_GPR10_DCIC2_MUX_CTL_IPU1_DI0 (0x0 << 2)
+#define IMX6Q_GPR10_DCIC2_MUX_CTL_IPU1_DI1 (0x1 << 2)
+#define IMX6Q_GPR10_DCIC2_MUX_CTL_IPU2_DI0 (0x2 << 2)
+#define IMX6Q_GPR10_DCIC2_MUX_CTL_IPU2_DI1 (0x3 << 2)
+#define IMX6Q_GPR10_DCIC1_MUX_CTL_MASK (0x3 << 0)
+#define IMX6Q_GPR10_DCIC1_MUX_CTL_IPU1_DI0 (0x0 << 0)
+#define IMX6Q_GPR10_DCIC1_MUX_CTL_IPU1_DI1 (0x1 << 0)
+#define IMX6Q_GPR10_DCIC1_MUX_CTL_IPU2_DI0 (0x2 << 0)
+#define IMX6Q_GPR10_DCIC1_MUX_CTL_IPU2_DI1 (0x3 << 0)
+
+#define IMX6Q_GPR12_ARMP_IPG_CLK_EN BIT(27)
+#define IMX6Q_GPR12_ARMP_AHB_CLK_EN BIT(26)
+#define IMX6Q_GPR12_ARMP_ATB_CLK_EN BIT(25)
+#define IMX6Q_GPR12_ARMP_APB_CLK_EN BIT(24)
+#define IMX6Q_GPR12_DEVICE_TYPE (0xf << 12)
+#define IMX6Q_GPR12_PCIE_CTL_2 BIT(10)
+#define IMX6Q_GPR12_LOS_LEVEL (0x1f << 4)
+
+#define IMX6Q_GPR13_SDMA_STOP_REQ BIT(30)
+#define IMX6Q_GPR13_CAN2_STOP_REQ BIT(29)
+#define IMX6Q_GPR13_CAN1_STOP_REQ BIT(28)
+#define IMX6Q_GPR13_ENET_STOP_REQ BIT(27)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK (0x7 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_0_5_DB (0x0 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_1_0_DB (0x1 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_1_5_DB (0x2 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_2_0_DB (0x3 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_2_5_DB (0x4 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB (0x5 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_3_5_DB (0x6 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_4_0_DB (0x7 << 24)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK (0x1f << 19)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1I (0x10 << 19)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1M (0x10 << 19)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1X (0x1a << 19)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2I (0x12 << 19)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M (0x12 << 19)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2X (0x1a << 19)
+#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK (0x7 << 16)
+#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_1P_1F (0x0 << 16)
+#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_2F (0x1 << 16)
+#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_1P_4F (0x2 << 16)
+#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F (0x3 << 16)
+#define IMX6Q_GPR13_SATA_SPD_MODE_MASK BIT(15)
+#define IMX6Q_GPR13_SATA_SPD_MODE_1P5G 0x0
+#define IMX6Q_GPR13_SATA_SPD_MODE_3P0G BIT(15)
+#define IMX6Q_GPR13_SATA_MPLL_SS_EN BIT(14)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_MASK (0x7 << 11)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_16_16 (0x0 << 11)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_14_16 (0x1 << 11)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_12_16 (0x2 << 11)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_10_16 (0x3 << 11)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_9_16 (0x4 << 11)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_8_16 (0x5 << 11)
+#define IMX6Q_GPR13_SATA_TX_BOOST_MASK (0xf << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_0_00_DB (0x0 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_0_37_DB (0x1 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_0_74_DB (0x2 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_1_11_DB (0x3 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_1_48_DB (0x4 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_1_85_DB (0x5 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_2_22_DB (0x6 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_2_59_DB (0x7 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_2_96_DB (0x8 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB (0x9 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_3_70_DB (0xa << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_4_07_DB (0xb << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_4_44_DB (0xc << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_4_81_DB (0xd << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_5_28_DB (0xe << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_5_75_DB (0xf << 7)
+#define IMX6Q_GPR13_SATA_TX_LVL_MASK (0x1f << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_937_V (0x00 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_947_V (0x01 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_957_V (0x02 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_966_V (0x03 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_976_V (0x04 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_986_V (0x05 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_996_V (0x06 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_005_V (0x07 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_015_V (0x08 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_025_V (0x09 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_035_V (0x0a << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_045_V (0x0b << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_054_V (0x0c << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_064_V (0x0d << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_074_V (0x0e << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_084_V (0x0f << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_094_V (0x10 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_104_V (0x11 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_113_V (0x12 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_123_V (0x13 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_133_V (0x14 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_143_V (0x15 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_152_V (0x16 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_162_V (0x17 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_172_V (0x18 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_182_V (0x19 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_191_V (0x1a << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_201_V (0x1b << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_211_V (0x1c << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_221_V (0x1d << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_230_V (0x1e << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_240_V (0x1f << 2)
+#define IMX6Q_GPR13_SATA_MPLL_CLK_EN BIT(1)
+#define IMX6Q_GPR13_SATA_TX_EDGE_RATE BIT(0)
+
+/* For imx6sl iomux gpr register field define */
+#define IMX6SL_GPR1_FEC_CLOCK_MUX1_SEL_MASK (0x3 << 17)
+#define IMX6SL_GPR1_FEC_CLOCK_MUX2_SEL_MASK (0x1 << 14)
+
+/* For imx6sx iomux gpr register field define */
+#define IMX6SX_GPR1_VDEC_SW_RST_MASK (0x1 << 20)
+#define IMX6SX_GPR1_VDEC_SW_RST_RESET (0x1 << 20)
+#define IMX6SX_GPR1_VDEC_SW_RST_RELEASE (0x0 << 20)
+#define IMX6SX_GPR1_VADC_SW_RST_MASK (0x1 << 19)
+#define IMX6SX_GPR1_VADC_SW_RST_RESET (0x1 << 19)
+#define IMX6SX_GPR1_VADC_SW_RST_RELEASE (0x0 << 19)
+#define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_MASK (0x3 << 13)
+#define IMX6SX_GPR1_FEC_CLOCK_PAD_DIR_MASK (0x3 << 17)
+#define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_EXT (0x3 << 13)
+
+#define IMX6SX_GPR4_FEC_ENET1_STOP_REQ (0x1 << 3)
+#define IMX6SX_GPR4_FEC_ENET2_STOP_REQ (0x1 << 4)
+
+#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_MASK (0x1 << 3)
+#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF1 (0x0 << 3)
+#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF2 (0x1 << 3)
+
+#define IMX6SX_GPR5_CSI2_MUX_CTRL_MASK (0x3 << 27)
+#define IMX6SX_GPR5_CSI2_MUX_CTRL_EXT_PIN (0x0 << 27)
+#define IMX6SX_GPR5_CSI2_MUX_CTRL_CVD (0x1 << 27)
+#define IMX6SX_GPR5_CSI2_MUX_CTRL_VDAC_TO_CSI (0x2 << 27)
+#define IMX6SX_GPR5_CSI2_MUX_CTRL_GND (0x3 << 27)
+#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_MASK (0x1 << 26)
+#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_ENABLE (0x1 << 26)
+#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_DISABLE (0x0 << 26)
+#define IMX6SX_GPR5_CSI1_MUX_CTRL_MASK (0x3 << 4)
+#define IMX6SX_GPR5_CSI1_MUX_CTRL_EXT_PIN (0x0 << 4)
+#define IMX6SX_GPR5_CSI1_MUX_CTRL_CVD (0x1 << 4)
+#define IMX6SX_GPR5_CSI1_MUX_CTRL_VDAC_TO_CSI (0x2 << 4)
+#define IMX6SX_GPR5_CSI1_MUX_CTRL_GND (0x3 << 4)
+
+#define IMX6SX_GPR5_DISP_MUX_DCIC2_LCDIF2 (0x0 << 2)
+#define IMX6SX_GPR5_DISP_MUX_DCIC2_LVDS (0x1 << 2)
+#define IMX6SX_GPR5_DISP_MUX_DCIC2_MASK (0x1 << 2)
+#define IMX6SX_GPR5_DISP_MUX_DCIC1_LCDIF1 (0x0 << 1)
+#define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1)
+#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1)
+
+#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h
new file mode 100644
index 000000000..b4629818a
--- /dev/null
+++ b/include/linux/mfd/t7l66xb.h
@@ -0,0 +1,34 @@
+/*
+ * This file contains the definitions for the T7L66XB
+ *
+ * (C) Copyright 2005 Ian Molton <spyro@f2s.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef MFD_T7L66XB_H
+#define MFD_T7L66XB_H
+
+#include <linux/mfd/core.h>
+#include <linux/mfd/tmio.h>
+
+struct t7l66xb_platform_data {
+ int (*enable)(struct platform_device *dev);
+ int (*disable)(struct platform_device *dev);
+ int (*suspend)(struct platform_device *dev);
+ int (*resume)(struct platform_device *dev);
+
+ int irq_base; /* The base for subdevice irqs */
+
+ struct tmio_nand_data *nand_data;
+};
+
+
+#define IRQ_T7L66XB_MMC (1)
+#define IRQ_T7L66XB_NAND (3)
+
+#define T7L66XB_NR_IRQS 8
+
+#endif
diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h
new file mode 100644
index 000000000..468c31a27
--- /dev/null
+++ b/include/linux/mfd/tc3589x.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License, version 2
+ */
+
+#ifndef __LINUX_MFD_TC3589x_H
+#define __LINUX_MFD_TC3589x_H
+
+struct device;
+
+enum tx3589x_block {
+ TC3589x_BLOCK_GPIO = 1 << 0,
+ TC3589x_BLOCK_KEYPAD = 1 << 1,
+};
+
+#define TC3589x_RSTCTRL_IRQRST (1 << 4)
+#define TC3589x_RSTCTRL_TIMRST (1 << 3)
+#define TC3589x_RSTCTRL_ROTRST (1 << 2)
+#define TC3589x_RSTCTRL_KBDRST (1 << 1)
+#define TC3589x_RSTCTRL_GPIRST (1 << 0)
+
+/* Keyboard Configuration Registers */
+#define TC3589x_KBDSETTLE_REG 0x01
+#define TC3589x_KBDBOUNCE 0x02
+#define TC3589x_KBDSIZE 0x03
+#define TC3589x_KBCFG_LSB 0x04
+#define TC3589x_KBCFG_MSB 0x05
+#define TC3589x_KBDIC 0x08
+#define TC3589x_KBDMSK 0x09
+#define TC3589x_EVTCODE_FIFO 0x10
+#define TC3589x_KBDMFS 0x8F
+
+#define TC3589x_IRQST 0x91
+
+#define TC3589x_MANFCODE_MAGIC 0x03
+#define TC3589x_MANFCODE 0x80
+#define TC3589x_VERSION 0x81
+#define TC3589x_IOCFG 0xA7
+
+#define TC3589x_CLKMODE 0x88
+#define TC3589x_CLKCFG 0x89
+#define TC3589x_CLKEN 0x8A
+
+#define TC3589x_RSTCTRL 0x82
+#define TC3589x_EXTRSTN 0x83
+#define TC3589x_RSTINTCLR 0x84
+
+/* Pull up/down configuration registers */
+#define TC3589x_IOCFG 0xA7
+#define TC3589x_IOPULLCFG0_LSB 0xAA
+#define TC3589x_IOPULLCFG0_MSB 0xAB
+#define TC3589x_IOPULLCFG1_LSB 0xAC
+#define TC3589x_IOPULLCFG1_MSB 0xAD
+#define TC3589x_IOPULLCFG2_LSB 0xAE
+
+#define TC3589x_GPIOIS0 0xC9
+#define TC3589x_GPIOIS1 0xCA
+#define TC3589x_GPIOIS2 0xCB
+#define TC3589x_GPIOIBE0 0xCC
+#define TC3589x_GPIOIBE1 0xCD
+#define TC3589x_GPIOIBE2 0xCE
+#define TC3589x_GPIOIEV0 0xCF
+#define TC3589x_GPIOIEV1 0xD0
+#define TC3589x_GPIOIEV2 0xD1
+#define TC3589x_GPIOIE0 0xD2
+#define TC3589x_GPIOIE1 0xD3
+#define TC3589x_GPIOIE2 0xD4
+#define TC3589x_GPIORIS0 0xD6
+#define TC3589x_GPIORIS1 0xD7
+#define TC3589x_GPIORIS2 0xD8
+#define TC3589x_GPIOMIS0 0xD9
+#define TC3589x_GPIOMIS1 0xDA
+#define TC3589x_GPIOMIS2 0xDB
+#define TC3589x_GPIOIC0 0xDC
+#define TC3589x_GPIOIC1 0xDD
+#define TC3589x_GPIOIC2 0xDE
+
+#define TC3589x_GPIODATA0 0xC0
+#define TC3589x_GPIOMASK0 0xc1
+#define TC3589x_GPIODATA1 0xC2
+#define TC3589x_GPIOMASK1 0xc3
+#define TC3589x_GPIODATA2 0xC4
+#define TC3589x_GPIOMASK2 0xC5
+
+#define TC3589x_GPIODIR0 0xC6
+#define TC3589x_GPIODIR1 0xC7
+#define TC3589x_GPIODIR2 0xC8
+
+#define TC3589x_GPIOSYNC0 0xE6
+#define TC3589x_GPIOSYNC1 0xE7
+#define TC3589x_GPIOSYNC2 0xE8
+
+#define TC3589x_GPIOWAKE0 0xE9
+#define TC3589x_GPIOWAKE1 0xEA
+#define TC3589x_GPIOWAKE2 0xEB
+
+#define TC3589x_GPIOODM0 0xE0
+#define TC3589x_GPIOODE0 0xE1
+#define TC3589x_GPIOODM1 0xE2
+#define TC3589x_GPIOODE1 0xE3
+#define TC3589x_GPIOODM2 0xE4
+#define TC3589x_GPIOODE2 0xE5
+
+#define TC3589x_INT_GPIIRQ 0
+#define TC3589x_INT_TI0IRQ 1
+#define TC3589x_INT_TI1IRQ 2
+#define TC3589x_INT_TI2IRQ 3
+#define TC3589x_INT_ROTIRQ 5
+#define TC3589x_INT_KBDIRQ 6
+#define TC3589x_INT_PORIRQ 7
+
+#define TC3589x_NR_INTERNAL_IRQS 8
+
+struct tc3589x {
+ struct mutex lock;
+ struct device *dev;
+ struct i2c_client *i2c;
+ struct irq_domain *domain;
+
+ int irq_base;
+ int num_gpio;
+ struct tc3589x_platform_data *pdata;
+};
+
+extern int tc3589x_reg_write(struct tc3589x *tc3589x, u8 reg, u8 data);
+extern int tc3589x_reg_read(struct tc3589x *tc3589x, u8 reg);
+extern int tc3589x_block_read(struct tc3589x *tc3589x, u8 reg, u8 length,
+ u8 *values);
+extern int tc3589x_block_write(struct tc3589x *tc3589x, u8 reg, u8 length,
+ const u8 *values);
+extern int tc3589x_set_bits(struct tc3589x *tc3589x, u8 reg, u8 mask, u8 val);
+
+/*
+ * Keypad related platform specific constants
+ * These values may be modified for fine tuning
+ */
+#define TC_KPD_ROWS 0x8
+#define TC_KPD_COLUMNS 0x8
+#define TC_KPD_DEBOUNCE_PERIOD 0xA3
+#define TC_KPD_SETTLE_TIME 0xA3
+
+
+/**
+ * struct tc3589x_platform_data - TC3589x platform data
+ * @block: bitmask of blocks to enable (use TC3589x_BLOCK_*)
+ */
+struct tc3589x_platform_data {
+ unsigned int block;
+};
+
+#endif
diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h
new file mode 100644
index 000000000..b48882094
--- /dev/null
+++ b/include/linux/mfd/tc6387xb.h
@@ -0,0 +1,20 @@
+/*
+ * This file contains the definitions for the TC6387XB
+ *
+ * (C) Copyright 2005 Ian Molton <spyro@f2s.com>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License. See linux/COPYING for more information.
+ *
+ */
+#ifndef MFD_TC6387XB_H
+#define MFD_TC6387XB_H
+
+struct tc6387xb_platform_data {
+ int (*enable)(struct platform_device *dev);
+ int (*disable)(struct platform_device *dev);
+ int (*suspend)(struct platform_device *dev);
+ int (*resume)(struct platform_device *dev);
+};
+
+#endif
diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h
new file mode 100644
index 000000000..626e44820
--- /dev/null
+++ b/include/linux/mfd/tc6393xb.h
@@ -0,0 +1,59 @@
+/*
+ * Toshiba TC6393XB SoC support
+ *
+ * Copyright(c) 2005-2006 Chris Humbert
+ * Copyright(c) 2005 Dirk Opfer
+ * Copyright(c) 2005 Ian Molton <spyro@f2s.com>
+ * Copyright(c) 2007 Dmitry Baryshkov
+ *
+ * Based on code written by Sharp/Lineo for 2.4 kernels
+ * Based on locomo.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MFD_TC6393XB_H
+#define MFD_TC6393XB_H
+
+#include <linux/fb.h>
+
+/* Also one should provide the CK3P6MI clock */
+struct tc6393xb_platform_data {
+ u16 scr_pll2cr; /* PLL2 Control */
+ u16 scr_gper; /* GP Enable */
+
+ int (*enable)(struct platform_device *dev);
+ int (*disable)(struct platform_device *dev);
+ int (*suspend)(struct platform_device *dev);
+ int (*resume)(struct platform_device *dev);
+
+ int irq_base; /* base for subdevice irqs */
+ int gpio_base;
+ int (*setup)(struct platform_device *dev);
+ void (*teardown)(struct platform_device *dev);
+
+ struct tmio_nand_data *nand_data;
+ struct tmio_fb_data *fb_data;
+
+ unsigned resume_restore : 1; /* make special actions
+ to preserve the state
+ on suspend/resume */
+};
+
+extern int tc6393xb_lcd_mode(struct platform_device *fb,
+ const struct fb_videomode *mode);
+extern int tc6393xb_lcd_set_power(struct platform_device *fb, bool on);
+
+/*
+ * Relative to irq_base
+ */
+#define IRQ_TC6393_NAND 0
+#define IRQ_TC6393_MMC 1
+#define IRQ_TC6393_OHCI 2
+#define IRQ_TC6393_FB 4
+
+#define TC6393XB_NR_IRQS 8
+
+#endif
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
new file mode 100644
index 000000000..1fd50dcfe
--- /dev/null
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -0,0 +1,190 @@
+#ifndef __LINUX_TI_AM335X_TSCADC_MFD_H
+#define __LINUX_TI_AM335X_TSCADC_MFD_H
+
+/*
+ * TI Touch Screen / ADC MFD driver
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mfd/core.h>
+
+#define REG_RAWIRQSTATUS 0x024
+#define REG_IRQSTATUS 0x028
+#define REG_IRQENABLE 0x02C
+#define REG_IRQCLR 0x030
+#define REG_IRQWAKEUP 0x034
+#define REG_CTRL 0x040
+#define REG_ADCFSM 0x044
+#define REG_CLKDIV 0x04C
+#define REG_SE 0x054
+#define REG_IDLECONFIG 0x058
+#define REG_CHARGECONFIG 0x05C
+#define REG_CHARGEDELAY 0x060
+#define REG_STEPCONFIG(n) (0x64 + ((n) * 8))
+#define REG_STEPDELAY(n) (0x68 + ((n) * 8))
+#define REG_FIFO0CNT 0xE4
+#define REG_FIFO0THR 0xE8
+#define REG_FIFO1CNT 0xF0
+#define REG_FIFO1THR 0xF4
+#define REG_FIFO0 0x100
+#define REG_FIFO1 0x200
+
+/* Register Bitfields */
+/* IRQ wakeup enable */
+#define IRQWKUP_ENB BIT(0)
+
+/* Step Enable */
+#define STEPENB_MASK (0x1FFFF << 0)
+#define STEPENB(val) ((val) << 0)
+#define ENB(val) (1 << (val))
+#define STPENB_STEPENB STEPENB(0x1FFFF)
+#define STPENB_STEPENB_TC STEPENB(0x1FFF)
+
+/* IRQ enable */
+#define IRQENB_HW_PEN BIT(0)
+#define IRQENB_EOS BIT(1)
+#define IRQENB_FIFO0THRES BIT(2)
+#define IRQENB_FIFO0OVRRUN BIT(3)
+#define IRQENB_FIFO0UNDRFLW BIT(4)
+#define IRQENB_FIFO1THRES BIT(5)
+#define IRQENB_FIFO1OVRRUN BIT(6)
+#define IRQENB_FIFO1UNDRFLW BIT(7)
+#define IRQENB_PENUP BIT(9)
+
+/* Step Configuration */
+#define STEPCONFIG_MODE_MASK (3 << 0)
+#define STEPCONFIG_MODE(val) ((val) << 0)
+#define STEPCONFIG_MODE_SWCNT STEPCONFIG_MODE(1)
+#define STEPCONFIG_MODE_HWSYNC STEPCONFIG_MODE(2)
+#define STEPCONFIG_AVG_MASK (7 << 2)
+#define STEPCONFIG_AVG(val) ((val) << 2)
+#define STEPCONFIG_AVG_16 STEPCONFIG_AVG(4)
+#define STEPCONFIG_XPP BIT(5)
+#define STEPCONFIG_XNN BIT(6)
+#define STEPCONFIG_YPP BIT(7)
+#define STEPCONFIG_YNN BIT(8)
+#define STEPCONFIG_XNP BIT(9)
+#define STEPCONFIG_YPN BIT(10)
+#define STEPCONFIG_INM_MASK (0xF << 15)
+#define STEPCONFIG_INM(val) ((val) << 15)
+#define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8)
+#define STEPCONFIG_INP_MASK (0xF << 19)
+#define STEPCONFIG_INP(val) ((val) << 19)
+#define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4)
+#define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8)
+#define STEPCONFIG_FIFO1 BIT(26)
+
+/* Delay register */
+#define STEPDELAY_OPEN_MASK (0x3FFFF << 0)
+#define STEPDELAY_OPEN(val) ((val) << 0)
+#define STEPCONFIG_OPENDLY STEPDELAY_OPEN(0x098)
+#define STEPDELAY_SAMPLE_MASK (0xFF << 24)
+#define STEPDELAY_SAMPLE(val) ((val) << 24)
+#define STEPCONFIG_SAMPLEDLY STEPDELAY_SAMPLE(0)
+
+/* Charge Config */
+#define STEPCHARGE_RFP_MASK (7 << 12)
+#define STEPCHARGE_RFP(val) ((val) << 12)
+#define STEPCHARGE_RFP_XPUL STEPCHARGE_RFP(1)
+#define STEPCHARGE_INM_MASK (0xF << 15)
+#define STEPCHARGE_INM(val) ((val) << 15)
+#define STEPCHARGE_INM_AN1 STEPCHARGE_INM(1)
+#define STEPCHARGE_INP_MASK (0xF << 19)
+#define STEPCHARGE_INP(val) ((val) << 19)
+#define STEPCHARGE_RFM_MASK (3 << 23)
+#define STEPCHARGE_RFM(val) ((val) << 23)
+#define STEPCHARGE_RFM_XNUR STEPCHARGE_RFM(1)
+
+/* Charge delay */
+#define CHARGEDLY_OPEN_MASK (0x3FFFF << 0)
+#define CHARGEDLY_OPEN(val) ((val) << 0)
+#define CHARGEDLY_OPENDLY CHARGEDLY_OPEN(0x400)
+
+/* Control register */
+#define CNTRLREG_TSCSSENB BIT(0)
+#define CNTRLREG_STEPID BIT(1)
+#define CNTRLREG_STEPCONFIGWRT BIT(2)
+#define CNTRLREG_POWERDOWN BIT(4)
+#define CNTRLREG_AFE_CTRL_MASK (3 << 5)
+#define CNTRLREG_AFE_CTRL(val) ((val) << 5)
+#define CNTRLREG_4WIRE CNTRLREG_AFE_CTRL(1)
+#define CNTRLREG_5WIRE CNTRLREG_AFE_CTRL(2)
+#define CNTRLREG_8WIRE CNTRLREG_AFE_CTRL(3)
+#define CNTRLREG_TSCENB BIT(7)
+
+/* FIFO READ Register */
+#define FIFOREAD_DATA_MASK (0xfff << 0)
+#define FIFOREAD_CHNLID_MASK (0xf << 16)
+
+/* Sequencer Status */
+#define SEQ_STATUS BIT(5)
+#define CHARGE_STEP 0x11
+
+#define ADC_CLK 3000000
+#define TOTAL_STEPS 16
+#define TOTAL_CHANNELS 8
+#define FIFO1_THRESHOLD 19
+
+/*
+ * time in us for processing a single channel, calculated as follows:
+ *
+ * num cycles = open delay + (sample delay + conv time) * averaging
+ *
+ * num cycles: 152 + (1 + 13) * 16 = 376
+ *
+ * clock frequency: 26MHz / 8 = 3.25MHz
+ * clock period: 1 / 3.25MHz = 308ns
+ *
+ * processing time: 376 * 308ns = 116us
+ */
+#define IDLE_TIMEOUT 116 /* microsec */
+
+#define TSCADC_CELLS 2
+
+struct ti_tscadc_dev {
+ struct device *dev;
+ struct regmap *regmap_tscadc;
+ void __iomem *tscadc_base;
+ int irq;
+ int used_cells; /* 1-2 */
+ int tsc_wires;
+ int tsc_cell; /* -1 if not used */
+ int adc_cell; /* -1 if not used */
+ struct mfd_cell cells[TSCADC_CELLS];
+ u32 reg_se_cache;
+ bool adc_waiting;
+ bool adc_in_use;
+ wait_queue_head_t reg_se_wait;
+ spinlock_t reg_lock;
+ unsigned int clk_div;
+
+ /* tsc device */
+ struct titsc *tsc;
+
+ /* adc device */
+ struct adc_device *adc;
+};
+
+static inline struct ti_tscadc_dev *ti_tscadc_dev_get(struct platform_device *p)
+{
+ struct ti_tscadc_dev **tscadc_dev = p->dev.platform_data;
+
+ return *tscadc_dev;
+}
+
+void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tsadc, u32 val);
+void am335x_tsc_se_set_once(struct ti_tscadc_dev *tsadc, u32 val);
+void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val);
+void am335x_tsc_se_adc_done(struct ti_tscadc_dev *tsadc);
+
+#endif
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
new file mode 100644
index 000000000..24b86d538
--- /dev/null
+++ b/include/linux/mfd/tmio.h
@@ -0,0 +1,154 @@
+#ifndef MFD_TMIO_H
+#define MFD_TMIO_H
+
+#include <linux/device.h>
+#include <linux/fb.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/mmc/card.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define tmio_ioread8(addr) readb(addr)
+#define tmio_ioread16(addr) readw(addr)
+#define tmio_ioread16_rep(r, b, l) readsw(r, b, l)
+#define tmio_ioread32(addr) \
+ (((u32) readw((addr))) | (((u32) readw((addr) + 2)) << 16))
+
+#define tmio_iowrite8(val, addr) writeb((val), (addr))
+#define tmio_iowrite16(val, addr) writew((val), (addr))
+#define tmio_iowrite16_rep(r, b, l) writesw(r, b, l)
+#define tmio_iowrite32(val, addr) \
+ do { \
+ writew((val), (addr)); \
+ writew((val) >> 16, (addr) + 2); \
+ } while (0)
+
+#define CNF_CMD 0x04
+#define CNF_CTL_BASE 0x10
+#define CNF_INT_PIN 0x3d
+#define CNF_STOP_CLK_CTL 0x40
+#define CNF_GCLK_CTL 0x41
+#define CNF_SD_CLK_MODE 0x42
+#define CNF_PIN_STATUS 0x44
+#define CNF_PWR_CTL_1 0x48
+#define CNF_PWR_CTL_2 0x49
+#define CNF_PWR_CTL_3 0x4a
+#define CNF_CARD_DETECT_MODE 0x4c
+#define CNF_SD_SLOT 0x50
+#define CNF_EXT_GCLK_CTL_1 0xf0
+#define CNF_EXT_GCLK_CTL_2 0xf1
+#define CNF_EXT_GCLK_CTL_3 0xf9
+#define CNF_SD_LED_EN_1 0xfa
+#define CNF_SD_LED_EN_2 0xfe
+
+#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
+
+#define sd_config_write8(base, shift, reg, val) \
+ tmio_iowrite8((val), (base) + ((reg) << (shift)))
+#define sd_config_write16(base, shift, reg, val) \
+ tmio_iowrite16((val), (base) + ((reg) << (shift)))
+#define sd_config_write32(base, shift, reg, val) \
+ do { \
+ tmio_iowrite16((val), (base) + ((reg) << (shift))); \
+ tmio_iowrite16((val) >> 16, (base) + ((reg + 2) << (shift))); \
+ } while (0)
+
+/* tmio MMC platform flags */
+#define TMIO_MMC_WRPROTECT_DISABLE (1 << 0)
+/*
+ * Some controllers can support a 2-byte block size when the bus width
+ * is configured in 4-bit mode.
+ */
+#define TMIO_MMC_BLKSZ_2BYTES (1 << 1)
+/*
+ * Some controllers can support SDIO IRQ signalling.
+ */
+#define TMIO_MMC_SDIO_IRQ (1 << 2)
+/*
+ * Some controllers require waiting for the SD bus to become
+ * idle before writing to some registers.
+ */
+#define TMIO_MMC_HAS_IDLE_WAIT (1 << 4)
+/*
+ * A GPIO is used for card hotplug detection. We need an extra flag for this,
+ * because 0 is a valid GPIO number too, and requiring users to specify
+ * cd_gpio < 0 to disable GPIO hotplug would break backwards compatibility.
+ */
+#define TMIO_MMC_USE_GPIO_CD (1 << 5)
+
+/*
+ * Some controllers doesn't have over 0x100 register.
+ * it is used to checking accessibility of
+ * CTL_SD_CARD_CLK_CTL / CTL_CLK_AND_WAIT_CTL
+ */
+#define TMIO_MMC_HAVE_HIGH_REG (1 << 6)
+
+/*
+ * Some controllers have CMD12 automatically
+ * issue/non-issue register
+ */
+#define TMIO_MMC_HAVE_CMD12_CTRL (1 << 7)
+
+/*
+ * Some controllers needs to set 1 on SDIO status reserved bits
+ */
+#define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8)
+
+/*
+ * Some controllers allows to set SDx actual clock
+ */
+#define TMIO_MMC_CLK_ACTUAL (1 << 10)
+
+int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
+int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
+void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
+void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state);
+
+struct dma_chan;
+
+/*
+ * data for the MMC controller
+ */
+struct tmio_mmc_data {
+ void *chan_priv_tx;
+ void *chan_priv_rx;
+ unsigned int hclk;
+ unsigned long capabilities;
+ unsigned long capabilities2;
+ unsigned long flags;
+ u32 ocr_mask; /* available voltages */
+ unsigned int cd_gpio;
+ int alignment_shift;
+ dma_addr_t dma_rx_offset;
+ void (*set_pwr)(struct platform_device *host, int state);
+ void (*set_clk_div)(struct platform_device *host, int state);
+};
+
+/*
+ * data for the NAND controller
+ */
+struct tmio_nand_data {
+ struct nand_bbt_descr *badblock_pattern;
+ struct mtd_partition *partition;
+ unsigned int num_partitions;
+};
+
+#define FBIO_TMIO_ACC_WRITE 0x7C639300
+#define FBIO_TMIO_ACC_SYNC 0x7C639301
+
+struct tmio_fb_data {
+ int (*lcd_set_power)(struct platform_device *fb_dev,
+ bool on);
+ int (*lcd_mode)(struct platform_device *fb_dev,
+ const struct fb_videomode *mode);
+ int num_modes;
+ struct fb_videomode *modes;
+
+ /* in mm: size of screen */
+ int height;
+ int width;
+};
+
+
+#endif
diff --git a/include/linux/mfd/tps6105x.h b/include/linux/mfd/tps6105x.h
new file mode 100644
index 000000000..386743dd9
--- /dev/null
+++ b/include/linux/mfd/tps6105x.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef MFD_TPS6105X_H
+#define MFD_TPS6105X_H
+
+#include <linux/i2c.h>
+#include <linux/regulator/machine.h>
+
+/*
+ * Register definitions to all subdrivers
+ */
+#define TPS6105X_REG_0 0x00
+#define TPS6105X_REG0_MODE_SHIFT 6
+#define TPS6105X_REG0_MODE_MASK (0x03<<6)
+/* These defines for both reg0 and reg1 */
+#define TPS6105X_REG0_MODE_SHUTDOWN 0x00
+#define TPS6105X_REG0_MODE_TORCH 0x01
+#define TPS6105X_REG0_MODE_TORCH_FLASH 0x02
+#define TPS6105X_REG0_MODE_VOLTAGE 0x03
+#define TPS6105X_REG0_VOLTAGE_SHIFT 4
+#define TPS6105X_REG0_VOLTAGE_MASK (3<<4)
+#define TPS6105X_REG0_VOLTAGE_450 0
+#define TPS6105X_REG0_VOLTAGE_500 1
+#define TPS6105X_REG0_VOLTAGE_525 2
+#define TPS6105X_REG0_VOLTAGE_500_2 3
+#define TPS6105X_REG0_DIMMING_SHIFT 3
+#define TPS6105X_REG0_TORCHC_SHIFT 0
+#define TPS6105X_REG0_TORCHC_MASK (7<<0)
+#define TPS6105X_REG0_TORCHC_0 0x00
+#define TPS6105X_REG0_TORCHC_50 0x01
+#define TPS6105X_REG0_TORCHC_75 0x02
+#define TPS6105X_REG0_TORCHC_100 0x03
+#define TPS6105X_REG0_TORCHC_150 0x04
+#define TPS6105X_REG0_TORCHC_200 0x05
+#define TPS6105X_REG0_TORCHC_250_400 0x06
+#define TPS6105X_REG0_TORCHC_250_500 0x07
+#define TPS6105X_REG_1 0x01
+#define TPS6105X_REG1_MODE_SHIFT 6
+#define TPS6105X_REG1_MODE_MASK (0x03<<6)
+#define TPS6105X_REG1_MODE_SHUTDOWN 0x00
+#define TPS6105X_REG1_MODE_TORCH 0x01
+#define TPS6105X_REG1_MODE_TORCH_FLASH 0x02
+#define TPS6105X_REG1_MODE_VOLTAGE 0x03
+#define TPS6105X_REG_2 0x02
+#define TPS6105X_REG_3 0x03
+
+/**
+ * enum tps6105x_mode - desired mode for the TPS6105x
+ * @TPS6105X_MODE_SHUTDOWN: this instance is inactive, not used for anything
+ * @TPS61905X_MODE_TORCH: this instance is used as a LED, usually a while
+ * LED, for example as backlight or flashlight. If this is set, the
+ * TPS6105X will register to the LED framework
+ * @TPS6105X_MODE_TORCH_FLASH: this instance is used as a flashgun, usually
+ * in a camera
+ * @TPS6105X_MODE_VOLTAGE: this instance is used as a voltage regulator and
+ * will register to the regulator framework
+ */
+enum tps6105x_mode {
+ TPS6105X_MODE_SHUTDOWN,
+ TPS6105X_MODE_TORCH,
+ TPS6105X_MODE_TORCH_FLASH,
+ TPS6105X_MODE_VOLTAGE,
+};
+
+/**
+ * struct tps6105x_platform_data - TPS61905x platform data
+ * @mode: what mode this instance shall be operated in,
+ * this is not selectable at runtime
+ * @regulator_data: initialization data for the voltage
+ * regulator if used as a voltage source
+ */
+struct tps6105x_platform_data {
+ enum tps6105x_mode mode;
+ struct regulator_init_data *regulator_data;
+};
+
+/**
+ * struct tps6105x - state holder for the TPS6105x drivers
+ * @mutex: mutex to serialize I2C accesses
+ * @i2c_client: corresponding I2C client
+ * @regulator: regulator device if used in voltage mode
+ */
+struct tps6105x {
+ struct tps6105x_platform_data *pdata;
+ struct mutex lock;
+ struct i2c_client *client;
+ struct regulator_dev *regulator;
+};
+
+extern int tps6105x_set(struct tps6105x *tps6105x, u8 reg, u8 value);
+extern int tps6105x_get(struct tps6105x *tps6105x, u8 reg, u8 *buf);
+extern int tps6105x_mask_and_set(struct tps6105x *tps6105x, u8 reg,
+ u8 bitmask, u8 bitvalues);
+
+#endif
diff --git a/include/linux/mfd/tps6507x.h b/include/linux/mfd/tps6507x.h
new file mode 100644
index 000000000..c2ae56933
--- /dev/null
+++ b/include/linux/mfd/tps6507x.h
@@ -0,0 +1,168 @@
+/* linux/mfd/tps6507x.h
+ *
+ * Functions to access TPS65070 power management chip.
+ *
+ * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com)
+ *
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+
+#ifndef __LINUX_MFD_TPS6507X_H
+#define __LINUX_MFD_TPS6507X_H
+
+/*
+ * ----------------------------------------------------------------------------
+ * Registers, all 8 bits
+ * ----------------------------------------------------------------------------
+ */
+
+
+/* Register definitions */
+#define TPS6507X_REG_PPATH1 0X01
+#define TPS6507X_CHG_USB BIT(7)
+#define TPS6507X_CHG_AC BIT(6)
+#define TPS6507X_CHG_USB_PW_ENABLE BIT(5)
+#define TPS6507X_CHG_AC_PW_ENABLE BIT(4)
+#define TPS6507X_CHG_AC_CURRENT BIT(2)
+#define TPS6507X_CHG_USB_CURRENT BIT(0)
+
+#define TPS6507X_REG_INT 0X02
+#define TPS6507X_REG_MASK_AC_USB BIT(7)
+#define TPS6507X_REG_MASK_TSC BIT(6)
+#define TPS6507X_REG_MASK_PB_IN BIT(5)
+#define TPS6507X_REG_TSC_INT BIT(3)
+#define TPS6507X_REG_PB_IN_INT BIT(2)
+#define TPS6507X_REG_AC_USB_APPLIED BIT(1)
+#define TPS6507X_REG_AC_USB_REMOVED BIT(0)
+
+#define TPS6507X_REG_CHGCONFIG0 0X03
+
+#define TPS6507X_REG_CHGCONFIG1 0X04
+#define TPS6507X_CON_CTRL1_DCDC1_ENABLE BIT(4)
+#define TPS6507X_CON_CTRL1_DCDC2_ENABLE BIT(3)
+#define TPS6507X_CON_CTRL1_DCDC3_ENABLE BIT(2)
+#define TPS6507X_CON_CTRL1_LDO1_ENABLE BIT(1)
+#define TPS6507X_CON_CTRL1_LDO2_ENABLE BIT(0)
+
+#define TPS6507X_REG_CHGCONFIG2 0X05
+
+#define TPS6507X_REG_CHGCONFIG3 0X06
+
+#define TPS6507X_REG_ADCONFIG 0X07
+#define TPS6507X_ADCONFIG_AD_ENABLE BIT(7)
+#define TPS6507X_ADCONFIG_START_CONVERSION BIT(6)
+#define TPS6507X_ADCONFIG_CONVERSION_DONE BIT(5)
+#define TPS6507X_ADCONFIG_VREF_ENABLE BIT(4)
+#define TPS6507X_ADCONFIG_INPUT_AD_IN1 0
+#define TPS6507X_ADCONFIG_INPUT_AD_IN2 1
+#define TPS6507X_ADCONFIG_INPUT_AD_IN3 2
+#define TPS6507X_ADCONFIG_INPUT_AD_IN4 3
+#define TPS6507X_ADCONFIG_INPUT_TS_PIN 4
+#define TPS6507X_ADCONFIG_INPUT_BAT_CURRENT 5
+#define TPS6507X_ADCONFIG_INPUT_AC_VOLTAGE 6
+#define TPS6507X_ADCONFIG_INPUT_SYS_VOLTAGE 7
+#define TPS6507X_ADCONFIG_INPUT_CHARGER_VOLTAGE 8
+#define TPS6507X_ADCONFIG_INPUT_BAT_VOLTAGE 9
+#define TPS6507X_ADCONFIG_INPUT_THRESHOLD_VOLTAGE 10
+#define TPS6507X_ADCONFIG_INPUT_ISET1_VOLTAGE 11
+#define TPS6507X_ADCONFIG_INPUT_ISET2_VOLTAGE 12
+#define TPS6507X_ADCONFIG_INPUT_REAL_TSC 14
+#define TPS6507X_ADCONFIG_INPUT_TSC 15
+
+#define TPS6507X_REG_TSCMODE 0X08
+#define TPS6507X_TSCMODE_X_POSITION 0
+#define TPS6507X_TSCMODE_Y_POSITION 1
+#define TPS6507X_TSCMODE_PRESSURE 2
+#define TPS6507X_TSCMODE_X_PLATE 3
+#define TPS6507X_TSCMODE_Y_PLATE 4
+#define TPS6507X_TSCMODE_STANDBY 5
+#define TPS6507X_TSCMODE_ADC_INPUT 6
+#define TPS6507X_TSCMODE_DISABLE 7
+
+#define TPS6507X_REG_ADRESULT_1 0X09
+
+#define TPS6507X_REG_ADRESULT_2 0X0A
+#define TPS6507X_REG_ADRESULT_2_MASK (BIT(1) | BIT(0))
+
+#define TPS6507X_REG_PGOOD 0X0B
+
+#define TPS6507X_REG_PGOODMASK 0X0C
+
+#define TPS6507X_REG_CON_CTRL1 0X0D
+#define TPS6507X_CON_CTRL1_DCDC1_ENABLE BIT(4)
+#define TPS6507X_CON_CTRL1_DCDC2_ENABLE BIT(3)
+#define TPS6507X_CON_CTRL1_DCDC3_ENABLE BIT(2)
+#define TPS6507X_CON_CTRL1_LDO1_ENABLE BIT(1)
+#define TPS6507X_CON_CTRL1_LDO2_ENABLE BIT(0)
+
+#define TPS6507X_REG_CON_CTRL2 0X0E
+
+#define TPS6507X_REG_CON_CTRL3 0X0F
+
+#define TPS6507X_REG_DEFDCDC1 0X10
+#define TPS6507X_DEFDCDC1_DCDC1_EXT_ADJ_EN BIT(7)
+#define TPS6507X_DEFDCDC1_DCDC1_MASK 0X3F
+
+#define TPS6507X_REG_DEFDCDC2_LOW 0X11
+#define TPS6507X_DEFDCDC2_LOW_DCDC2_MASK 0X3F
+
+#define TPS6507X_REG_DEFDCDC2_HIGH 0X12
+#define TPS6507X_DEFDCDC2_HIGH_DCDC2_MASK 0X3F
+
+#define TPS6507X_REG_DEFDCDC3_LOW 0X13
+#define TPS6507X_DEFDCDC3_LOW_DCDC3_MASK 0X3F
+
+#define TPS6507X_REG_DEFDCDC3_HIGH 0X14
+#define TPS6507X_DEFDCDC3_HIGH_DCDC3_MASK 0X3F
+
+#define TPS6507X_REG_DEFSLEW 0X15
+
+#define TPS6507X_REG_LDO_CTRL1 0X16
+#define TPS6507X_REG_LDO_CTRL1_LDO1_MASK 0X0F
+
+#define TPS6507X_REG_DEFLDO2 0X17
+#define TPS6507X_REG_DEFLDO2_LDO2_MASK 0X3F
+
+#define TPS6507X_REG_WLED_CTRL1 0X18
+
+#define TPS6507X_REG_WLED_CTRL2 0X19
+
+/* VDCDC MASK */
+#define TPS6507X_DEFDCDCX_DCDC_MASK 0X3F
+
+#define TPS6507X_MAX_REGISTER 0X19
+
+/**
+ * struct tps6507x_board - packages regulator and touchscreen init data
+ * @tps6507x_regulator_data: regulator initialization values
+ *
+ * Board data may be used to initialize regulator and touchscreen.
+ */
+
+struct tps6507x_board {
+ struct regulator_init_data *tps6507x_pmic_init_data;
+ struct touchscreen_init_data *tps6507x_ts_init_data;
+};
+
+/**
+ * struct tps6507x_dev - tps6507x sub-driver chip access routines
+ * @read_dev() - I2C register read function
+ * @write_dev() - I2C register write function
+ *
+ * Device data may be used to access the TPS6507x chip
+ */
+
+struct tps6507x_dev {
+ struct device *dev;
+ struct i2c_client *i2c_client;
+ int (*read_dev)(struct tps6507x_dev *tps6507x, char reg, int size,
+ void *dest);
+ int (*write_dev)(struct tps6507x_dev *tps6507x, char reg, int size,
+ void *src);
+
+ /* Client devices */
+ struct tps6507x_pmic *pmic;
+};
+
+#endif /* __LINUX_MFD_TPS6507X_H */
diff --git a/include/linux/mfd/tps65090.h b/include/linux/mfd/tps65090.h
new file mode 100644
index 000000000..0bf2708df
--- /dev/null
+++ b/include/linux/mfd/tps65090.h
@@ -0,0 +1,156 @@
+/*
+ * Core driver interface for TI TPS65090 PMIC family
+ *
+ * Copyright (C) 2012 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __LINUX_MFD_TPS65090_H
+#define __LINUX_MFD_TPS65090_H
+
+#include <linux/irq.h>
+#include <linux/regmap.h>
+
+/* TPS65090 IRQs */
+enum {
+ TPS65090_IRQ_INTERRUPT,
+ TPS65090_IRQ_VAC_STATUS_CHANGE,
+ TPS65090_IRQ_VSYS_STATUS_CHANGE,
+ TPS65090_IRQ_BAT_STATUS_CHANGE,
+ TPS65090_IRQ_CHARGING_STATUS_CHANGE,
+ TPS65090_IRQ_CHARGING_COMPLETE,
+ TPS65090_IRQ_OVERLOAD_DCDC1,
+ TPS65090_IRQ_OVERLOAD_DCDC2,
+ TPS65090_IRQ_OVERLOAD_DCDC3,
+ TPS65090_IRQ_OVERLOAD_FET1,
+ TPS65090_IRQ_OVERLOAD_FET2,
+ TPS65090_IRQ_OVERLOAD_FET3,
+ TPS65090_IRQ_OVERLOAD_FET4,
+ TPS65090_IRQ_OVERLOAD_FET5,
+ TPS65090_IRQ_OVERLOAD_FET6,
+ TPS65090_IRQ_OVERLOAD_FET7,
+};
+
+/* TPS65090 Regulator ID */
+enum {
+ TPS65090_REGULATOR_DCDC1,
+ TPS65090_REGULATOR_DCDC2,
+ TPS65090_REGULATOR_DCDC3,
+ TPS65090_REGULATOR_FET1,
+ TPS65090_REGULATOR_FET2,
+ TPS65090_REGULATOR_FET3,
+ TPS65090_REGULATOR_FET4,
+ TPS65090_REGULATOR_FET5,
+ TPS65090_REGULATOR_FET6,
+ TPS65090_REGULATOR_FET7,
+ TPS65090_REGULATOR_LDO1,
+ TPS65090_REGULATOR_LDO2,
+
+ /* Last entry for maximum ID */
+ TPS65090_REGULATOR_MAX,
+};
+
+/* Register addresses */
+#define TPS65090_REG_INTR_STS 0x00
+#define TPS65090_REG_INTR_STS2 0x01
+#define TPS65090_REG_INTR_MASK 0x02
+#define TPS65090_REG_INTR_MASK2 0x03
+#define TPS65090_REG_CG_CTRL0 0x04
+#define TPS65090_REG_CG_CTRL1 0x05
+#define TPS65090_REG_CG_CTRL2 0x06
+#define TPS65090_REG_CG_CTRL3 0x07
+#define TPS65090_REG_CG_CTRL4 0x08
+#define TPS65090_REG_CG_CTRL5 0x09
+#define TPS65090_REG_CG_STATUS1 0x0a
+#define TPS65090_REG_CG_STATUS2 0x0b
+
+struct tps65090 {
+ struct device *dev;
+ struct regmap *rmap;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+/*
+ * struct tps65090_regulator_plat_data
+ *
+ * @reg_init_data: The regulator init data.
+ * @enable_ext_control: Enable extrenal control or not. Only available for
+ * DCDC1, DCDC2 and DCDC3.
+ * @gpio: Gpio number if external control is enabled and controlled through
+ * gpio.
+ * @overcurrent_wait_valid: True if the overcurrent_wait should be applied.
+ * @overcurrent_wait: Value to set as the overcurrent wait time. This is the
+ * actual bitfield value, not a time in ms (valid value are 0 - 3).
+ */
+struct tps65090_regulator_plat_data {
+ struct regulator_init_data *reg_init_data;
+ bool enable_ext_control;
+ int gpio;
+ bool overcurrent_wait_valid;
+ int overcurrent_wait;
+};
+
+struct tps65090_platform_data {
+ int irq_base;
+
+ char **supplied_to;
+ size_t num_supplicants;
+ int enable_low_current_chrg;
+
+ struct tps65090_regulator_plat_data *reg_pdata[TPS65090_REGULATOR_MAX];
+};
+
+/*
+ * NOTE: the functions below are not intended for use outside
+ * of the TPS65090 sub-device drivers
+ */
+static inline int tps65090_write(struct device *dev, int reg, uint8_t val)
+{
+ struct tps65090 *tps = dev_get_drvdata(dev);
+
+ return regmap_write(tps->rmap, reg, val);
+}
+
+static inline int tps65090_read(struct device *dev, int reg, uint8_t *val)
+{
+ struct tps65090 *tps = dev_get_drvdata(dev);
+ unsigned int temp_val;
+ int ret;
+
+ ret = regmap_read(tps->rmap, reg, &temp_val);
+ if (!ret)
+ *val = temp_val;
+ return ret;
+}
+
+static inline int tps65090_set_bits(struct device *dev, int reg,
+ uint8_t bit_num)
+{
+ struct tps65090 *tps = dev_get_drvdata(dev);
+
+ return regmap_update_bits(tps->rmap, reg, BIT(bit_num), ~0u);
+}
+
+static inline int tps65090_clr_bits(struct device *dev, int reg,
+ uint8_t bit_num)
+{
+ struct tps65090 *tps = dev_get_drvdata(dev);
+
+ return regmap_update_bits(tps->rmap, reg, BIT(bit_num), 0u);
+}
+
+#endif /*__LINUX_MFD_TPS65090_H */
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
new file mode 100644
index 000000000..ac7fba44d
--- /dev/null
+++ b/include/linux/mfd/tps65217.h
@@ -0,0 +1,281 @@
+/*
+ * linux/mfd/tps65217.h
+ *
+ * Functions to access TPS65217 power management chip.
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_MFD_TPS65217_H
+#define __LINUX_MFD_TPS65217_H
+
+#include <linux/i2c.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+/* TPS chip id list */
+#define TPS65217 0xF0
+
+/* I2C ID for TPS65217 part */
+#define TPS65217_I2C_ID 0x24
+
+/* All register addresses */
+#define TPS65217_REG_CHIPID 0X00
+#define TPS65217_REG_PPATH 0X01
+#define TPS65217_REG_INT 0X02
+#define TPS65217_REG_CHGCONFIG0 0X03
+#define TPS65217_REG_CHGCONFIG1 0X04
+#define TPS65217_REG_CHGCONFIG2 0X05
+#define TPS65217_REG_CHGCONFIG3 0X06
+#define TPS65217_REG_WLEDCTRL1 0X07
+#define TPS65217_REG_WLEDCTRL2 0X08
+#define TPS65217_REG_MUXCTRL 0X09
+#define TPS65217_REG_STATUS 0X0A
+#define TPS65217_REG_PASSWORD 0X0B
+#define TPS65217_REG_PGOOD 0X0C
+#define TPS65217_REG_DEFPG 0X0D
+#define TPS65217_REG_DEFDCDC1 0X0E
+#define TPS65217_REG_DEFDCDC2 0X0F
+#define TPS65217_REG_DEFDCDC3 0X10
+#define TPS65217_REG_DEFSLEW 0X11
+#define TPS65217_REG_DEFLDO1 0X12
+#define TPS65217_REG_DEFLDO2 0X13
+#define TPS65217_REG_DEFLS1 0X14
+#define TPS65217_REG_DEFLS2 0X15
+#define TPS65217_REG_ENABLE 0X16
+#define TPS65217_REG_DEFUVLO 0X18
+#define TPS65217_REG_SEQ1 0X19
+#define TPS65217_REG_SEQ2 0X1A
+#define TPS65217_REG_SEQ3 0X1B
+#define TPS65217_REG_SEQ4 0X1C
+#define TPS65217_REG_SEQ5 0X1D
+#define TPS65217_REG_SEQ6 0X1E
+
+#define TPS65217_REG_MAX TPS65217_REG_SEQ6
+
+/* Register field definitions */
+#define TPS65217_CHIPID_CHIP_MASK 0xF0
+#define TPS65217_CHIPID_REV_MASK 0x0F
+
+#define TPS65217_PPATH_ACSINK_ENABLE BIT(7)
+#define TPS65217_PPATH_USBSINK_ENABLE BIT(6)
+#define TPS65217_PPATH_AC_PW_ENABLE BIT(5)
+#define TPS65217_PPATH_USB_PW_ENABLE BIT(4)
+#define TPS65217_PPATH_AC_CURRENT_MASK 0x0C
+#define TPS65217_PPATH_USB_CURRENT_MASK 0x03
+
+#define TPS65217_INT_PBM BIT(6)
+#define TPS65217_INT_ACM BIT(5)
+#define TPS65217_INT_USBM BIT(4)
+#define TPS65217_INT_PBI BIT(2)
+#define TPS65217_INT_ACI BIT(1)
+#define TPS65217_INT_USBI BIT(0)
+
+#define TPS65217_CHGCONFIG0_TREG BIT(7)
+#define TPS65217_CHGCONFIG0_DPPM BIT(6)
+#define TPS65217_CHGCONFIG0_TSUSP BIT(5)
+#define TPS65217_CHGCONFIG0_TERMI BIT(4)
+#define TPS65217_CHGCONFIG0_ACTIVE BIT(3)
+#define TPS65217_CHGCONFIG0_CHGTOUT BIT(2)
+#define TPS65217_CHGCONFIG0_PCHGTOUT BIT(1)
+#define TPS65217_CHGCONFIG0_BATTEMP BIT(0)
+
+#define TPS65217_CHGCONFIG1_TMR_MASK 0xC0
+#define TPS65217_CHGCONFIG1_TMR_ENABLE BIT(5)
+#define TPS65217_CHGCONFIG1_NTC_TYPE BIT(4)
+#define TPS65217_CHGCONFIG1_RESET BIT(3)
+#define TPS65217_CHGCONFIG1_TERM BIT(2)
+#define TPS65217_CHGCONFIG1_SUSP BIT(1)
+#define TPS65217_CHGCONFIG1_CHG_EN BIT(0)
+
+#define TPS65217_CHGCONFIG2_DYNTMR BIT(7)
+#define TPS65217_CHGCONFIG2_VPREGHG BIT(6)
+#define TPS65217_CHGCONFIG2_VOREG_MASK 0x30
+
+#define TPS65217_CHGCONFIG3_ICHRG_MASK 0xC0
+#define TPS65217_CHGCONFIG3_DPPMTH_MASK 0x30
+#define TPS65217_CHGCONFIG2_PCHRGT BIT(3)
+#define TPS65217_CHGCONFIG2_TERMIF 0x06
+#define TPS65217_CHGCONFIG2_TRANGE BIT(0)
+
+#define TPS65217_WLEDCTRL1_ISINK_ENABLE BIT(3)
+#define TPS65217_WLEDCTRL1_ISEL BIT(2)
+#define TPS65217_WLEDCTRL1_FDIM_MASK 0x03
+
+#define TPS65217_WLEDCTRL2_DUTY_MASK 0x7F
+
+#define TPS65217_MUXCTRL_MUX_MASK 0x07
+
+#define TPS65217_STATUS_OFF BIT(7)
+#define TPS65217_STATUS_ACPWR BIT(3)
+#define TPS65217_STATUS_USBPWR BIT(2)
+#define TPS65217_STATUS_PB BIT(0)
+
+#define TPS65217_PASSWORD_REGS_UNLOCK 0x7D
+
+#define TPS65217_PGOOD_LDO3_PG BIT(6)
+#define TPS65217_PGOOD_LDO4_PG BIT(5)
+#define TPS65217_PGOOD_DC1_PG BIT(4)
+#define TPS65217_PGOOD_DC2_PG BIT(3)
+#define TPS65217_PGOOD_DC3_PG BIT(2)
+#define TPS65217_PGOOD_LDO1_PG BIT(1)
+#define TPS65217_PGOOD_LDO2_PG BIT(0)
+
+#define TPS65217_DEFPG_LDO1PGM BIT(3)
+#define TPS65217_DEFPG_LDO2PGM BIT(2)
+#define TPS65217_DEFPG_PGDLY_MASK 0x03
+
+#define TPS65217_DEFDCDCX_XADJX BIT(7)
+#define TPS65217_DEFDCDCX_DCDC_MASK 0x3F
+
+#define TPS65217_DEFSLEW_GO BIT(7)
+#define TPS65217_DEFSLEW_GODSBL BIT(6)
+#define TPS65217_DEFSLEW_PFM_EN1 BIT(5)
+#define TPS65217_DEFSLEW_PFM_EN2 BIT(4)
+#define TPS65217_DEFSLEW_PFM_EN3 BIT(3)
+#define TPS65217_DEFSLEW_SLEW_MASK 0x07
+
+#define TPS65217_DEFLDO1_LDO1_MASK 0x0F
+
+#define TPS65217_DEFLDO2_TRACK BIT(6)
+#define TPS65217_DEFLDO2_LDO2_MASK 0x3F
+
+#define TPS65217_DEFLDO3_LDO3_EN BIT(5)
+#define TPS65217_DEFLDO3_LDO3_MASK 0x1F
+
+#define TPS65217_DEFLDO4_LDO4_EN BIT(5)
+#define TPS65217_DEFLDO4_LDO4_MASK 0x1F
+
+#define TPS65217_ENABLE_LS1_EN BIT(6)
+#define TPS65217_ENABLE_LS2_EN BIT(5)
+#define TPS65217_ENABLE_DC1_EN BIT(4)
+#define TPS65217_ENABLE_DC2_EN BIT(3)
+#define TPS65217_ENABLE_DC3_EN BIT(2)
+#define TPS65217_ENABLE_LDO1_EN BIT(1)
+#define TPS65217_ENABLE_LDO2_EN BIT(0)
+
+#define TPS65217_DEFUVLO_UVLOHYS BIT(2)
+#define TPS65217_DEFUVLO_UVLO_MASK 0x03
+
+#define TPS65217_SEQ1_DC1_SEQ_MASK 0xF0
+#define TPS65217_SEQ1_DC2_SEQ_MASK 0x0F
+
+#define TPS65217_SEQ2_DC3_SEQ_MASK 0xF0
+#define TPS65217_SEQ2_LDO1_SEQ_MASK 0x0F
+
+#define TPS65217_SEQ3_LDO2_SEQ_MASK 0xF0
+#define TPS65217_SEQ3_LDO3_SEQ_MASK 0x0F
+
+#define TPS65217_SEQ4_LDO4_SEQ_MASK 0xF0
+
+#define TPS65217_SEQ5_DLY1_MASK 0xC0
+#define TPS65217_SEQ5_DLY2_MASK 0x30
+#define TPS65217_SEQ5_DLY3_MASK 0x0C
+#define TPS65217_SEQ5_DLY4_MASK 0x03
+
+#define TPS65217_SEQ6_DLY5_MASK 0xC0
+#define TPS65217_SEQ6_DLY6_MASK 0x30
+#define TPS65217_SEQ6_SEQUP BIT(2)
+#define TPS65217_SEQ6_SEQDWN BIT(1)
+#define TPS65217_SEQ6_INSTDWN BIT(0)
+
+#define TPS65217_MAX_REGISTER 0x1E
+#define TPS65217_PROTECT_NONE 0
+#define TPS65217_PROTECT_L1 1
+#define TPS65217_PROTECT_L2 2
+
+
+enum tps65217_regulator_id {
+ /* DCDC's */
+ TPS65217_DCDC_1,
+ TPS65217_DCDC_2,
+ TPS65217_DCDC_3,
+ /* LDOs */
+ TPS65217_LDO_1,
+ TPS65217_LDO_2,
+ TPS65217_LDO_3,
+ TPS65217_LDO_4,
+};
+
+#define TPS65217_MAX_REG_ID TPS65217_LDO_4
+
+/* Number of step-down converters available */
+#define TPS65217_NUM_DCDC 3
+/* Number of LDO voltage regulators available */
+#define TPS65217_NUM_LDO 4
+/* Number of total regulators available */
+#define TPS65217_NUM_REGULATOR (TPS65217_NUM_DCDC + TPS65217_NUM_LDO)
+
+enum tps65217_bl_isel {
+ TPS65217_BL_ISET1 = 1,
+ TPS65217_BL_ISET2,
+};
+
+enum tps65217_bl_fdim {
+ TPS65217_BL_FDIM_100HZ,
+ TPS65217_BL_FDIM_200HZ,
+ TPS65217_BL_FDIM_500HZ,
+ TPS65217_BL_FDIM_1000HZ,
+};
+
+struct tps65217_bl_pdata {
+ enum tps65217_bl_isel isel;
+ enum tps65217_bl_fdim fdim;
+ int dft_brightness;
+};
+
+/**
+ * struct tps65217_board - packages regulator init data
+ * @tps65217_regulator_data: regulator initialization values
+ *
+ * Board data may be used to initialize regulator.
+ */
+struct tps65217_board {
+ struct regulator_init_data *tps65217_init_data[TPS65217_NUM_REGULATOR];
+ struct device_node *of_node[TPS65217_NUM_REGULATOR];
+ struct tps65217_bl_pdata *bl_pdata;
+};
+
+/**
+ * struct tps65217 - tps65217 sub-driver chip access routines
+ *
+ * Device data may be used to access the TPS65217 chip
+ */
+
+struct tps65217 {
+ struct device *dev;
+ struct tps65217_board *pdata;
+ unsigned long id;
+ struct regulator_desc desc[TPS65217_NUM_REGULATOR];
+ struct regmap *regmap;
+};
+
+static inline struct tps65217 *dev_to_tps65217(struct device *dev)
+{
+ return dev_get_drvdata(dev);
+}
+
+static inline unsigned long tps65217_chip_id(struct tps65217 *tps65217)
+{
+ return tps65217->id;
+}
+
+int tps65217_reg_read(struct tps65217 *tps, unsigned int reg,
+ unsigned int *val);
+int tps65217_reg_write(struct tps65217 *tps, unsigned int reg,
+ unsigned int val, unsigned int level);
+int tps65217_set_bits(struct tps65217 *tps, unsigned int reg,
+ unsigned int mask, unsigned int val, unsigned int level);
+int tps65217_clear_bits(struct tps65217 *tps, unsigned int reg,
+ unsigned int mask, unsigned int level);
+
+#endif /* __LINUX_MFD_TPS65217_H */
diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h
new file mode 100644
index 000000000..2f9b59324
--- /dev/null
+++ b/include/linux/mfd/tps65218.h
@@ -0,0 +1,283 @@
+/*
+ * linux/mfd/tps65218.h
+ *
+ * Functions to access TPS65219 power management chip.
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ */
+
+#ifndef __LINUX_MFD_TPS65218_H
+#define __LINUX_MFD_TPS65218_H
+
+#include <linux/i2c.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/bitops.h>
+
+/* TPS chip id list */
+#define TPS65218 0xF0
+
+/* I2C ID for TPS65218 part */
+#define TPS65218_I2C_ID 0x24
+
+/* All register addresses */
+#define TPS65218_REG_CHIPID 0x00
+#define TPS65218_REG_INT1 0x01
+#define TPS65218_REG_INT2 0x02
+#define TPS65218_REG_INT_MASK1 0x03
+#define TPS65218_REG_INT_MASK2 0x04
+#define TPS65218_REG_STATUS 0x05
+#define TPS65218_REG_CONTROL 0x06
+#define TPS65218_REG_FLAG 0x07
+
+#define TPS65218_REG_PASSWORD 0x10
+#define TPS65218_REG_ENABLE1 0x11
+#define TPS65218_REG_ENABLE2 0x12
+#define TPS65218_REG_CONFIG1 0x13
+#define TPS65218_REG_CONFIG2 0x14
+#define TPS65218_REG_CONFIG3 0x15
+#define TPS65218_REG_CONTROL_DCDC1 0x16
+#define TPS65218_REG_CONTROL_DCDC2 0x17
+#define TPS65218_REG_CONTROL_DCDC3 0x18
+#define TPS65218_REG_CONTROL_DCDC4 0x19
+#define TPS65218_REG_CONTRL_SLEW_RATE 0x1A
+#define TPS65218_REG_CONTROL_LDO1 0x1B
+#define TPS65218_REG_SEQ1 0x20
+#define TPS65218_REG_SEQ2 0x21
+#define TPS65218_REG_SEQ3 0x22
+#define TPS65218_REG_SEQ4 0x23
+#define TPS65218_REG_SEQ5 0x24
+#define TPS65218_REG_SEQ6 0x25
+#define TPS65218_REG_SEQ7 0x26
+
+/* Register field definitions */
+#define TPS65218_CHIPID_CHIP_MASK 0xF8
+#define TPS65218_CHIPID_REV_MASK 0x07
+
+#define TPS65218_INT1_VPRG BIT(5)
+#define TPS65218_INT1_AC BIT(4)
+#define TPS65218_INT1_PB BIT(3)
+#define TPS65218_INT1_HOT BIT(2)
+#define TPS65218_INT1_CC_AQC BIT(1)
+#define TPS65218_INT1_PRGC BIT(0)
+
+#define TPS65218_INT2_LS3_F BIT(5)
+#define TPS65218_INT2_LS2_F BIT(4)
+#define TPS65218_INT2_LS1_F BIT(3)
+#define TPS65218_INT2_LS3_I BIT(2)
+#define TPS65218_INT2_LS2_I BIT(1)
+#define TPS65218_INT2_LS1_I BIT(0)
+
+#define TPS65218_INT_MASK1_VPRG BIT(5)
+#define TPS65218_INT_MASK1_AC BIT(4)
+#define TPS65218_INT_MASK1_PB BIT(3)
+#define TPS65218_INT_MASK1_HOT BIT(2)
+#define TPS65218_INT_MASK1_CC_AQC BIT(1)
+#define TPS65218_INT_MASK1_PRGC BIT(0)
+
+#define TPS65218_INT_MASK2_LS3_F BIT(5)
+#define TPS65218_INT_MASK2_LS2_F BIT(4)
+#define TPS65218_INT_MASK2_LS1_F BIT(3)
+#define TPS65218_INT_MASK2_LS3_I BIT(2)
+#define TPS65218_INT_MASK2_LS2_I BIT(1)
+#define TPS65218_INT_MASK2_LS1_I BIT(0)
+
+#define TPS65218_STATUS_FSEAL BIT(7)
+#define TPS65218_STATUS_EE BIT(6)
+#define TPS65218_STATUS_AC_STATE BIT(5)
+#define TPS65218_STATUS_PB_STATE BIT(4)
+#define TPS65218_STATUS_STATE_MASK 0xC
+#define TPS65218_STATUS_CC_STAT 0x3
+
+#define TPS65218_CONTROL_OFFNPFO BIT(1)
+#define TPS65218_CONTROL_CC_AQ BIT(0)
+
+#define TPS65218_FLAG_GPO3_FLG BIT(7)
+#define TPS65218_FLAG_GPO2_FLG BIT(6)
+#define TPS65218_FLAG_GPO1_FLG BIT(5)
+#define TPS65218_FLAG_LDO1_FLG BIT(4)
+#define TPS65218_FLAG_DC4_FLG BIT(3)
+#define TPS65218_FLAG_DC3_FLG BIT(2)
+#define TPS65218_FLAG_DC2_FLG BIT(1)
+#define TPS65218_FLAG_DC1_FLG BIT(0)
+
+#define TPS65218_ENABLE1_DC6_EN BIT(5)
+#define TPS65218_ENABLE1_DC5_EN BIT(4)
+#define TPS65218_ENABLE1_DC4_EN BIT(3)
+#define TPS65218_ENABLE1_DC3_EN BIT(2)
+#define TPS65218_ENABLE1_DC2_EN BIT(1)
+#define TPS65218_ENABLE1_DC1_EN BIT(0)
+
+#define TPS65218_ENABLE2_GPIO3 BIT(6)
+#define TPS65218_ENABLE2_GPIO2 BIT(5)
+#define TPS65218_ENABLE2_GPIO1 BIT(4)
+#define TPS65218_ENABLE2_LS3_EN BIT(3)
+#define TPS65218_ENABLE2_LS2_EN BIT(2)
+#define TPS65218_ENABLE2_LS1_EN BIT(1)
+#define TPS65218_ENABLE2_LDO1_EN BIT(0)
+
+
+#define TPS65218_CONFIG1_TRST BIT(7)
+#define TPS65218_CONFIG1_GPO2_BUF BIT(6)
+#define TPS65218_CONFIG1_IO1_SEL BIT(5)
+#define TPS65218_CONFIG1_PGDLY_MASK 0x18
+#define TPS65218_CONFIG1_STRICT BIT(2)
+#define TPS65218_CONFIG1_UVLO_MASK 0x3
+
+#define TPS65218_CONFIG2_DC12_RST BIT(7)
+#define TPS65218_CONFIG2_UVLOHYS BIT(6)
+#define TPS65218_CONFIG2_LS3ILIM_MASK 0xC
+#define TPS65218_CONFIG2_LS2ILIM_MASK 0x3
+
+#define TPS65218_CONFIG3_LS3NPFO BIT(5)
+#define TPS65218_CONFIG3_LS2NPFO BIT(4)
+#define TPS65218_CONFIG3_LS1NPFO BIT(3)
+#define TPS65218_CONFIG3_LS3DCHRG BIT(2)
+#define TPS65218_CONFIG3_LS2DCHRG BIT(1)
+#define TPS65218_CONFIG3_LS1DCHRG BIT(0)
+
+#define TPS65218_CONTROL_DCDC1_PFM BIT(7)
+#define TPS65218_CONTROL_DCDC1_MASK 0x7F
+
+#define TPS65218_CONTROL_DCDC2_PFM BIT(7)
+#define TPS65218_CONTROL_DCDC2_MASK 0x3F
+
+#define TPS65218_CONTROL_DCDC3_PFM BIT(7)
+#define TPS65218_CONTROL_DCDC3_MASK 0x3F
+
+#define TPS65218_CONTROL_DCDC4_PFM BIT(7)
+#define TPS65218_CONTROL_DCDC4_MASK 0x3F
+
+#define TPS65218_SLEW_RATE_GO BIT(7)
+#define TPS65218_SLEW_RATE_GODSBL BIT(6)
+#define TPS65218_SLEW_RATE_SLEW_MASK 0x7
+
+#define TPS65218_CONTROL_LDO1_MASK 0x3F
+
+#define TPS65218_SEQ1_DLY8 BIT(7)
+#define TPS65218_SEQ1_DLY7 BIT(6)
+#define TPS65218_SEQ1_DLY6 BIT(5)
+#define TPS65218_SEQ1_DLY5 BIT(4)
+#define TPS65218_SEQ1_DLY4 BIT(3)
+#define TPS65218_SEQ1_DLY3 BIT(2)
+#define TPS65218_SEQ1_DLY2 BIT(1)
+#define TPS65218_SEQ1_DLY1 BIT(0)
+
+#define TPS65218_SEQ2_DLYFCTR BIT(7)
+#define TPS65218_SEQ2_DLY9 BIT(0)
+
+#define TPS65218_SEQ3_DC2_SEQ_MASK 0xF0
+#define TPS65218_SEQ3_DC1_SEQ_MASK 0xF
+
+#define TPS65218_SEQ4_DC4_SEQ_MASK 0xF0
+#define TPS65218_SEQ4_DC3_SEQ_MASK 0xF
+
+#define TPS65218_SEQ5_DC6_SEQ_MASK 0xF0
+#define TPS65218_SEQ5_DC5_SEQ_MASK 0xF
+
+#define TPS65218_SEQ6_LS1_SEQ_MASK 0xF0
+#define TPS65218_SEQ6_LDO1_SEQ_MASK 0xF
+
+#define TPS65218_SEQ7_GPO3_SEQ_MASK 0xF0
+#define TPS65218_SEQ7_GPO1_SEQ_MASK 0xF
+#define TPS65218_PROTECT_NONE 0
+#define TPS65218_PROTECT_L1 1
+
+enum tps65218_regulator_id {
+ /* DCDC's */
+ TPS65218_DCDC_1,
+ TPS65218_DCDC_2,
+ TPS65218_DCDC_3,
+ TPS65218_DCDC_4,
+ TPS65218_DCDC_5,
+ TPS65218_DCDC_6,
+ /* LDOs */
+ TPS65218_LDO_1,
+};
+
+#define TPS65218_MAX_REG_ID TPS65218_LDO_1
+
+/* Number of step-down converters available */
+#define TPS65218_NUM_DCDC 6
+/* Number of LDO voltage regulators available */
+#define TPS65218_NUM_LDO 1
+/* Number of total regulators available */
+#define TPS65218_NUM_REGULATOR (TPS65218_NUM_DCDC + TPS65218_NUM_LDO)
+
+/* Define the TPS65218 IRQ numbers */
+enum tps65218_irqs {
+ /* INT1 registers */
+ TPS65218_PRGC_IRQ,
+ TPS65218_CC_AQC_IRQ,
+ TPS65218_HOT_IRQ,
+ TPS65218_PB_IRQ,
+ TPS65218_AC_IRQ,
+ TPS65218_VPRG_IRQ,
+ TPS65218_INVALID1_IRQ,
+ TPS65218_INVALID2_IRQ,
+ /* INT2 registers */
+ TPS65218_LS1_I_IRQ,
+ TPS65218_LS2_I_IRQ,
+ TPS65218_LS3_I_IRQ,
+ TPS65218_LS1_F_IRQ,
+ TPS65218_LS2_F_IRQ,
+ TPS65218_LS3_F_IRQ,
+ TPS65218_INVALID3_IRQ,
+ TPS65218_INVALID4_IRQ,
+};
+
+/**
+ * struct tps_info - packages regulator constraints
+ * @id: Id of the regulator
+ * @name: Voltage regulator name
+ * @min_uV: minimum micro volts
+ * @max_uV: minimum micro volts
+ *
+ * This data is used to check the regualtor voltage limits while setting.
+ */
+struct tps_info {
+ int id;
+ const char *name;
+ int min_uV;
+ int max_uV;
+};
+
+/**
+ * struct tps65218 - tps65218 sub-driver chip access routines
+ *
+ * Device data may be used to access the TPS65218 chip
+ */
+
+struct tps65218 {
+ struct device *dev;
+ unsigned int id;
+
+ struct mutex tps_lock; /* lock guarding the data structure */
+ /* IRQ Data */
+ int irq;
+ u32 irq_mask;
+ struct regmap_irq_chip_data *irq_data;
+ struct regulator_desc desc[TPS65218_NUM_REGULATOR];
+ struct tps_info *info[TPS65218_NUM_REGULATOR];
+ struct regmap *regmap;
+};
+
+int tps65218_reg_read(struct tps65218 *tps, unsigned int reg,
+ unsigned int *val);
+int tps65218_reg_write(struct tps65218 *tps, unsigned int reg,
+ unsigned int val, unsigned int level);
+int tps65218_set_bits(struct tps65218 *tps, unsigned int reg,
+ unsigned int mask, unsigned int val, unsigned int level);
+int tps65218_clear_bits(struct tps65218 *tps, unsigned int reg,
+ unsigned int mask, unsigned int level);
+
+#endif /* __LINUX_MFD_TPS65218_H */
diff --git a/include/linux/mfd/tps6586x.h b/include/linux/mfd/tps6586x.h
new file mode 100644
index 000000000..96187ed9f
--- /dev/null
+++ b/include/linux/mfd/tps6586x.h
@@ -0,0 +1,110 @@
+#ifndef __LINUX_MFD_TPS6586X_H
+#define __LINUX_MFD_TPS6586X_H
+
+#define TPS6586X_SLEW_RATE_INSTANTLY 0x00
+#define TPS6586X_SLEW_RATE_110UV 0x01
+#define TPS6586X_SLEW_RATE_220UV 0x02
+#define TPS6586X_SLEW_RATE_440UV 0x03
+#define TPS6586X_SLEW_RATE_880UV 0x04
+#define TPS6586X_SLEW_RATE_1760UV 0x05
+#define TPS6586X_SLEW_RATE_3520UV 0x06
+#define TPS6586X_SLEW_RATE_7040UV 0x07
+
+#define TPS6586X_SLEW_RATE_SET 0x08
+#define TPS6586X_SLEW_RATE_MASK 0x07
+
+/* VERSION CRC */
+#define TPS658621A 0x15
+#define TPS658621CD 0x2c
+#define TPS658623 0x1b
+#define TPS658640 0x01
+#define TPS658640v2 0x02
+#define TPS658643 0x03
+
+enum {
+ TPS6586X_ID_SYS,
+ TPS6586X_ID_SM_0,
+ TPS6586X_ID_SM_1,
+ TPS6586X_ID_SM_2,
+ TPS6586X_ID_LDO_0,
+ TPS6586X_ID_LDO_1,
+ TPS6586X_ID_LDO_2,
+ TPS6586X_ID_LDO_3,
+ TPS6586X_ID_LDO_4,
+ TPS6586X_ID_LDO_5,
+ TPS6586X_ID_LDO_6,
+ TPS6586X_ID_LDO_7,
+ TPS6586X_ID_LDO_8,
+ TPS6586X_ID_LDO_9,
+ TPS6586X_ID_LDO_RTC,
+ TPS6586X_ID_MAX_REGULATOR,
+};
+
+enum {
+ TPS6586X_INT_PLDO_0,
+ TPS6586X_INT_PLDO_1,
+ TPS6586X_INT_PLDO_2,
+ TPS6586X_INT_PLDO_3,
+ TPS6586X_INT_PLDO_4,
+ TPS6586X_INT_PLDO_5,
+ TPS6586X_INT_PLDO_6,
+ TPS6586X_INT_PLDO_7,
+ TPS6586X_INT_COMP_DET,
+ TPS6586X_INT_ADC,
+ TPS6586X_INT_PLDO_8,
+ TPS6586X_INT_PLDO_9,
+ TPS6586X_INT_PSM_0,
+ TPS6586X_INT_PSM_1,
+ TPS6586X_INT_PSM_2,
+ TPS6586X_INT_PSM_3,
+ TPS6586X_INT_RTC_ALM1,
+ TPS6586X_INT_ACUSB_OVP,
+ TPS6586X_INT_USB_DET,
+ TPS6586X_INT_AC_DET,
+ TPS6586X_INT_BAT_DET,
+ TPS6586X_INT_CHG_STAT,
+ TPS6586X_INT_CHG_TEMP,
+ TPS6586X_INT_PP,
+ TPS6586X_INT_RESUME,
+ TPS6586X_INT_LOW_SYS,
+ TPS6586X_INT_RTC_ALM2,
+};
+
+struct tps6586x_settings {
+ int slew_rate;
+};
+
+struct tps6586x_subdev_info {
+ int id;
+ const char *name;
+ void *platform_data;
+ struct device_node *of_node;
+};
+
+struct tps6586x_platform_data {
+ int num_subdevs;
+ struct tps6586x_subdev_info *subdevs;
+
+ int gpio_base;
+ int irq_base;
+ bool pm_off;
+
+ struct regulator_init_data *reg_init_data[TPS6586X_ID_MAX_REGULATOR];
+};
+
+/*
+ * NOTE: the functions below are not intended for use outside
+ * of the TPS6586X sub-device drivers
+ */
+extern int tps6586x_write(struct device *dev, int reg, uint8_t val);
+extern int tps6586x_writes(struct device *dev, int reg, int len, uint8_t *val);
+extern int tps6586x_read(struct device *dev, int reg, uint8_t *val);
+extern int tps6586x_reads(struct device *dev, int reg, int len, uint8_t *val);
+extern int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask);
+extern int tps6586x_clr_bits(struct device *dev, int reg, uint8_t bit_mask);
+extern int tps6586x_update(struct device *dev, int reg, uint8_t val,
+ uint8_t mask);
+extern int tps6586x_irq_get_virq(struct device *dev, int irq);
+extern int tps6586x_get_version(struct device *dev);
+
+#endif /*__LINUX_MFD_TPS6586X_H */
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
new file mode 100644
index 000000000..6483a6fdc
--- /dev/null
+++ b/include/linux/mfd/tps65910.h
@@ -0,0 +1,955 @@
+/*
+ * tps65910.h -- TI TPS6591x
+ *
+ * Copyright 2010-2011 Texas Instruments Inc.
+ *
+ * Author: Graeme Gregory <gg@slimlogic.co.uk>
+ * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
+ * Author: Arnaud Deconinck <a-deconinck@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_TPS65910_H
+#define __LINUX_MFD_TPS65910_H
+
+#include <linux/gpio.h>
+#include <linux/regmap.h>
+
+/* TPS chip id list */
+#define TPS65910 0
+#define TPS65911 1
+
+/* TPS regulator type list */
+#define REGULATOR_LDO 0
+#define REGULATOR_DCDC 1
+
+/*
+ * List of registers for component TPS65910
+ *
+ */
+
+#define TPS65910_SECONDS 0x0
+#define TPS65910_MINUTES 0x1
+#define TPS65910_HOURS 0x2
+#define TPS65910_DAYS 0x3
+#define TPS65910_MONTHS 0x4
+#define TPS65910_YEARS 0x5
+#define TPS65910_WEEKS 0x6
+#define TPS65910_ALARM_SECONDS 0x8
+#define TPS65910_ALARM_MINUTES 0x9
+#define TPS65910_ALARM_HOURS 0xA
+#define TPS65910_ALARM_DAYS 0xB
+#define TPS65910_ALARM_MONTHS 0xC
+#define TPS65910_ALARM_YEARS 0xD
+#define TPS65910_RTC_CTRL 0x10
+#define TPS65910_RTC_STATUS 0x11
+#define TPS65910_RTC_INTERRUPTS 0x12
+#define TPS65910_RTC_COMP_LSB 0x13
+#define TPS65910_RTC_COMP_MSB 0x14
+#define TPS65910_RTC_RES_PROG 0x15
+#define TPS65910_RTC_RESET_STATUS 0x16
+#define TPS65910_BCK1 0x17
+#define TPS65910_BCK2 0x18
+#define TPS65910_BCK3 0x19
+#define TPS65910_BCK4 0x1A
+#define TPS65910_BCK5 0x1B
+#define TPS65910_PUADEN 0x1C
+#define TPS65910_REF 0x1D
+#define TPS65910_VRTC 0x1E
+#define TPS65910_VIO 0x20
+#define TPS65910_VDD1 0x21
+#define TPS65910_VDD1_OP 0x22
+#define TPS65910_VDD1_SR 0x23
+#define TPS65910_VDD2 0x24
+#define TPS65910_VDD2_OP 0x25
+#define TPS65910_VDD2_SR 0x26
+#define TPS65910_VDD3 0x27
+#define TPS65910_VDIG1 0x30
+#define TPS65910_VDIG2 0x31
+#define TPS65910_VAUX1 0x32
+#define TPS65910_VAUX2 0x33
+#define TPS65910_VAUX33 0x34
+#define TPS65910_VMMC 0x35
+#define TPS65910_VPLL 0x36
+#define TPS65910_VDAC 0x37
+#define TPS65910_THERM 0x38
+#define TPS65910_BBCH 0x39
+#define TPS65910_DCDCCTRL 0x3E
+#define TPS65910_DEVCTRL 0x3F
+#define TPS65910_DEVCTRL2 0x40
+#define TPS65910_SLEEP_KEEP_LDO_ON 0x41
+#define TPS65910_SLEEP_KEEP_RES_ON 0x42
+#define TPS65910_SLEEP_SET_LDO_OFF 0x43
+#define TPS65910_SLEEP_SET_RES_OFF 0x44
+#define TPS65910_EN1_LDO_ASS 0x45
+#define TPS65910_EN1_SMPS_ASS 0x46
+#define TPS65910_EN2_LDO_ASS 0x47
+#define TPS65910_EN2_SMPS_ASS 0x48
+#define TPS65910_EN3_LDO_ASS 0x49
+#define TPS65910_SPARE 0x4A
+#define TPS65910_INT_STS 0x50
+#define TPS65910_INT_MSK 0x51
+#define TPS65910_INT_STS2 0x52
+#define TPS65910_INT_MSK2 0x53
+#define TPS65910_INT_STS3 0x54
+#define TPS65910_INT_MSK3 0x55
+#define TPS65910_GPIO0 0x60
+#define TPS65910_GPIO1 0x61
+#define TPS65910_GPIO2 0x62
+#define TPS65910_GPIO3 0x63
+#define TPS65910_GPIO4 0x64
+#define TPS65910_GPIO5 0x65
+#define TPS65910_GPIO6 0x66
+#define TPS65910_GPIO7 0x67
+#define TPS65910_GPIO8 0x68
+#define TPS65910_JTAGVERNUM 0x80
+#define TPS65910_MAX_REGISTER 0x80
+
+/*
+ * List of registers specific to TPS65911
+ */
+#define TPS65911_VDDCTRL 0x27
+#define TPS65911_VDDCTRL_OP 0x28
+#define TPS65911_VDDCTRL_SR 0x29
+#define TPS65911_LDO1 0x30
+#define TPS65911_LDO2 0x31
+#define TPS65911_LDO5 0x32
+#define TPS65911_LDO8 0x33
+#define TPS65911_LDO7 0x34
+#define TPS65911_LDO6 0x35
+#define TPS65911_LDO4 0x36
+#define TPS65911_LDO3 0x37
+#define TPS65911_VMBCH 0x6A
+#define TPS65911_VMBCH2 0x6B
+
+/*
+ * List of register bitfields for component TPS65910
+ *
+ */
+
+/* RTC_CTRL_REG bitfields */
+#define TPS65910_RTC_CTRL_STOP_RTC 0x01 /*0=stop, 1=run */
+#define TPS65910_RTC_CTRL_GET_TIME 0x40
+
+/* RTC_STATUS_REG bitfields */
+#define TPS65910_RTC_STATUS_ALARM 0x40
+
+/* RTC_INTERRUPTS_REG bitfields */
+#define TPS65910_RTC_INTERRUPTS_EVERY 0x03
+#define TPS65910_RTC_INTERRUPTS_IT_ALARM 0x08
+
+/*Register BCK1 (0x80) register.RegisterDescription */
+#define BCK1_BCKUP_MASK 0xFF
+#define BCK1_BCKUP_SHIFT 0
+
+
+/*Register BCK2 (0x80) register.RegisterDescription */
+#define BCK2_BCKUP_MASK 0xFF
+#define BCK2_BCKUP_SHIFT 0
+
+
+/*Register BCK3 (0x80) register.RegisterDescription */
+#define BCK3_BCKUP_MASK 0xFF
+#define BCK3_BCKUP_SHIFT 0
+
+
+/*Register BCK4 (0x80) register.RegisterDescription */
+#define BCK4_BCKUP_MASK 0xFF
+#define BCK4_BCKUP_SHIFT 0
+
+
+/*Register BCK5 (0x80) register.RegisterDescription */
+#define BCK5_BCKUP_MASK 0xFF
+#define BCK5_BCKUP_SHIFT 0
+
+
+/*Register PUADEN (0x80) register.RegisterDescription */
+#define PUADEN_EN3P_MASK 0x80
+#define PUADEN_EN3P_SHIFT 7
+#define PUADEN_I2CCTLP_MASK 0x40
+#define PUADEN_I2CCTLP_SHIFT 6
+#define PUADEN_I2CSRP_MASK 0x20
+#define PUADEN_I2CSRP_SHIFT 5
+#define PUADEN_PWRONP_MASK 0x10
+#define PUADEN_PWRONP_SHIFT 4
+#define PUADEN_SLEEPP_MASK 0x08
+#define PUADEN_SLEEPP_SHIFT 3
+#define PUADEN_PWRHOLDP_MASK 0x04
+#define PUADEN_PWRHOLDP_SHIFT 2
+#define PUADEN_BOOT1P_MASK 0x02
+#define PUADEN_BOOT1P_SHIFT 1
+#define PUADEN_BOOT0P_MASK 0x01
+#define PUADEN_BOOT0P_SHIFT 0
+
+
+/*Register REF (0x80) register.RegisterDescription */
+#define REF_VMBCH_SEL_MASK 0x0C
+#define REF_VMBCH_SEL_SHIFT 2
+#define REF_ST_MASK 0x03
+#define REF_ST_SHIFT 0
+
+
+/*Register VRTC (0x80) register.RegisterDescription */
+#define VRTC_VRTC_OFFMASK_MASK 0x08
+#define VRTC_VRTC_OFFMASK_SHIFT 3
+#define VRTC_ST_MASK 0x03
+#define VRTC_ST_SHIFT 0
+
+
+/*Register VIO (0x80) register.RegisterDescription */
+#define VIO_ILMAX_MASK 0xC0
+#define VIO_ILMAX_SHIFT 6
+#define VIO_SEL_MASK 0x0C
+#define VIO_SEL_SHIFT 2
+#define VIO_ST_MASK 0x03
+#define VIO_ST_SHIFT 0
+
+
+/*Register VDD1 (0x80) register.RegisterDescription */
+#define VDD1_VGAIN_SEL_MASK 0xC0
+#define VDD1_VGAIN_SEL_SHIFT 6
+#define VDD1_ILMAX_MASK 0x20
+#define VDD1_ILMAX_SHIFT 5
+#define VDD1_TSTEP_MASK 0x1C
+#define VDD1_TSTEP_SHIFT 2
+#define VDD1_ST_MASK 0x03
+#define VDD1_ST_SHIFT 0
+
+
+/*Register VDD1_OP (0x80) register.RegisterDescription */
+#define VDD1_OP_CMD_MASK 0x80
+#define VDD1_OP_CMD_SHIFT 7
+#define VDD1_OP_SEL_MASK 0x7F
+#define VDD1_OP_SEL_SHIFT 0
+
+
+/*Register VDD1_SR (0x80) register.RegisterDescription */
+#define VDD1_SR_SEL_MASK 0x7F
+#define VDD1_SR_SEL_SHIFT 0
+
+
+/*Register VDD2 (0x80) register.RegisterDescription */
+#define VDD2_VGAIN_SEL_MASK 0xC0
+#define VDD2_VGAIN_SEL_SHIFT 6
+#define VDD2_ILMAX_MASK 0x20
+#define VDD2_ILMAX_SHIFT 5
+#define VDD2_TSTEP_MASK 0x1C
+#define VDD2_TSTEP_SHIFT 2
+#define VDD2_ST_MASK 0x03
+#define VDD2_ST_SHIFT 0
+
+
+/*Register VDD2_OP (0x80) register.RegisterDescription */
+#define VDD2_OP_CMD_MASK 0x80
+#define VDD2_OP_CMD_SHIFT 7
+#define VDD2_OP_SEL_MASK 0x7F
+#define VDD2_OP_SEL_SHIFT 0
+
+/*Register VDD2_SR (0x80) register.RegisterDescription */
+#define VDD2_SR_SEL_MASK 0x7F
+#define VDD2_SR_SEL_SHIFT 0
+
+
+/*Registers VDD1, VDD2 voltage values definitions */
+#define VDD1_2_NUM_VOLT_FINE 73
+#define VDD1_2_NUM_VOLT_COARSE 3
+#define VDD1_2_MIN_VOLT 6000
+#define VDD1_2_OFFSET 125
+
+
+/*Register VDD3 (0x80) register.RegisterDescription */
+#define VDD3_CKINEN_MASK 0x04
+#define VDD3_CKINEN_SHIFT 2
+#define VDD3_ST_MASK 0x03
+#define VDD3_ST_SHIFT 0
+#define VDDCTRL_MIN_VOLT 6000
+#define VDDCTRL_OFFSET 125
+
+/*Registers VDIG (0x80) to VDAC register.RegisterDescription */
+#define LDO_SEL_MASK 0x0C
+#define LDO_SEL_SHIFT 2
+#define LDO_ST_MASK 0x03
+#define LDO_ST_SHIFT 0
+#define LDO_ST_ON_BIT 0x01
+#define LDO_ST_MODE_BIT 0x02
+
+
+/* Registers LDO1 to LDO8 in tps65910 */
+#define LDO1_SEL_MASK 0xFC
+#define LDO3_SEL_MASK 0x7C
+#define LDO_MIN_VOLT 1000
+#define LDO_MAX_VOLT 3300
+
+
+/*Register VDIG1 (0x80) register.RegisterDescription */
+#define VDIG1_SEL_MASK 0x0C
+#define VDIG1_SEL_SHIFT 2
+#define VDIG1_ST_MASK 0x03
+#define VDIG1_ST_SHIFT 0
+
+
+/*Register VDIG2 (0x80) register.RegisterDescription */
+#define VDIG2_SEL_MASK 0x0C
+#define VDIG2_SEL_SHIFT 2
+#define VDIG2_ST_MASK 0x03
+#define VDIG2_ST_SHIFT 0
+
+
+/*Register VAUX1 (0x80) register.RegisterDescription */
+#define VAUX1_SEL_MASK 0x0C
+#define VAUX1_SEL_SHIFT 2
+#define VAUX1_ST_MASK 0x03
+#define VAUX1_ST_SHIFT 0
+
+
+/*Register VAUX2 (0x80) register.RegisterDescription */
+#define VAUX2_SEL_MASK 0x0C
+#define VAUX2_SEL_SHIFT 2
+#define VAUX2_ST_MASK 0x03
+#define VAUX2_ST_SHIFT 0
+
+
+/*Register VAUX33 (0x80) register.RegisterDescription */
+#define VAUX33_SEL_MASK 0x0C
+#define VAUX33_SEL_SHIFT 2
+#define VAUX33_ST_MASK 0x03
+#define VAUX33_ST_SHIFT 0
+
+
+/*Register VMMC (0x80) register.RegisterDescription */
+#define VMMC_SEL_MASK 0x0C
+#define VMMC_SEL_SHIFT 2
+#define VMMC_ST_MASK 0x03
+#define VMMC_ST_SHIFT 0
+
+
+/*Register VPLL (0x80) register.RegisterDescription */
+#define VPLL_SEL_MASK 0x0C
+#define VPLL_SEL_SHIFT 2
+#define VPLL_ST_MASK 0x03
+#define VPLL_ST_SHIFT 0
+
+
+/*Register VDAC (0x80) register.RegisterDescription */
+#define VDAC_SEL_MASK 0x0C
+#define VDAC_SEL_SHIFT 2
+#define VDAC_ST_MASK 0x03
+#define VDAC_ST_SHIFT 0
+
+
+/*Register THERM (0x80) register.RegisterDescription */
+#define THERM_THERM_HD_MASK 0x20
+#define THERM_THERM_HD_SHIFT 5
+#define THERM_THERM_TS_MASK 0x10
+#define THERM_THERM_TS_SHIFT 4
+#define THERM_THERM_HDSEL_MASK 0x0C
+#define THERM_THERM_HDSEL_SHIFT 2
+#define THERM_RSVD1_MASK 0x02
+#define THERM_RSVD1_SHIFT 1
+#define THERM_THERM_STATE_MASK 0x01
+#define THERM_THERM_STATE_SHIFT 0
+
+
+/*Register BBCH (0x80) register.RegisterDescription */
+#define BBCH_BBSEL_MASK 0x06
+#define BBCH_BBSEL_SHIFT 1
+
+
+/*Register DCDCCTRL (0x80) register.RegisterDescription */
+#define DCDCCTRL_VDD2_PSKIP_MASK 0x20
+#define DCDCCTRL_VDD2_PSKIP_SHIFT 5
+#define DCDCCTRL_VDD1_PSKIP_MASK 0x10
+#define DCDCCTRL_VDD1_PSKIP_SHIFT 4
+#define DCDCCTRL_VIO_PSKIP_MASK 0x08
+#define DCDCCTRL_VIO_PSKIP_SHIFT 3
+#define DCDCCTRL_DCDCCKEXT_MASK 0x04
+#define DCDCCTRL_DCDCCKEXT_SHIFT 2
+#define DCDCCTRL_DCDCCKSYNC_MASK 0x03
+#define DCDCCTRL_DCDCCKSYNC_SHIFT 0
+
+
+/*Register DEVCTRL (0x80) register.RegisterDescription */
+#define DEVCTRL_PWR_OFF_MASK 0x80
+#define DEVCTRL_PWR_OFF_SHIFT 7
+#define DEVCTRL_RTC_PWDN_MASK 0x40
+#define DEVCTRL_RTC_PWDN_SHIFT 6
+#define DEVCTRL_CK32K_CTRL_MASK 0x20
+#define DEVCTRL_CK32K_CTRL_SHIFT 5
+#define DEVCTRL_SR_CTL_I2C_SEL_MASK 0x10
+#define DEVCTRL_SR_CTL_I2C_SEL_SHIFT 4
+#define DEVCTRL_DEV_OFF_RST_MASK 0x08
+#define DEVCTRL_DEV_OFF_RST_SHIFT 3
+#define DEVCTRL_DEV_ON_MASK 0x04
+#define DEVCTRL_DEV_ON_SHIFT 2
+#define DEVCTRL_DEV_SLP_MASK 0x02
+#define DEVCTRL_DEV_SLP_SHIFT 1
+#define DEVCTRL_DEV_OFF_MASK 0x01
+#define DEVCTRL_DEV_OFF_SHIFT 0
+
+
+/*Register DEVCTRL2 (0x80) register.RegisterDescription */
+#define DEVCTRL2_TSLOT_LENGTH_MASK 0x30
+#define DEVCTRL2_TSLOT_LENGTH_SHIFT 4
+#define DEVCTRL2_SLEEPSIG_POL_MASK 0x08
+#define DEVCTRL2_SLEEPSIG_POL_SHIFT 3
+#define DEVCTRL2_PWON_LP_OFF_MASK 0x04
+#define DEVCTRL2_PWON_LP_OFF_SHIFT 2
+#define DEVCTRL2_PWON_LP_RST_MASK 0x02
+#define DEVCTRL2_PWON_LP_RST_SHIFT 1
+#define DEVCTRL2_IT_POL_MASK 0x01
+#define DEVCTRL2_IT_POL_SHIFT 0
+
+
+/*Register SLEEP_KEEP_LDO_ON (0x80) register.RegisterDescription */
+#define SLEEP_KEEP_LDO_ON_VDAC_KEEPON_MASK 0x80
+#define SLEEP_KEEP_LDO_ON_VDAC_KEEPON_SHIFT 7
+#define SLEEP_KEEP_LDO_ON_VPLL_KEEPON_MASK 0x40
+#define SLEEP_KEEP_LDO_ON_VPLL_KEEPON_SHIFT 6
+#define SLEEP_KEEP_LDO_ON_VAUX33_KEEPON_MASK 0x20
+#define SLEEP_KEEP_LDO_ON_VAUX33_KEEPON_SHIFT 5
+#define SLEEP_KEEP_LDO_ON_VAUX2_KEEPON_MASK 0x10
+#define SLEEP_KEEP_LDO_ON_VAUX2_KEEPON_SHIFT 4
+#define SLEEP_KEEP_LDO_ON_VAUX1_KEEPON_MASK 0x08
+#define SLEEP_KEEP_LDO_ON_VAUX1_KEEPON_SHIFT 3
+#define SLEEP_KEEP_LDO_ON_VDIG2_KEEPON_MASK 0x04
+#define SLEEP_KEEP_LDO_ON_VDIG2_KEEPON_SHIFT 2
+#define SLEEP_KEEP_LDO_ON_VDIG1_KEEPON_MASK 0x02
+#define SLEEP_KEEP_LDO_ON_VDIG1_KEEPON_SHIFT 1
+#define SLEEP_KEEP_LDO_ON_VMMC_KEEPON_MASK 0x01
+#define SLEEP_KEEP_LDO_ON_VMMC_KEEPON_SHIFT 0
+
+
+/*Register SLEEP_KEEP_RES_ON (0x80) register.RegisterDescription */
+#define SLEEP_KEEP_RES_ON_THERM_KEEPON_MASK 0x80
+#define SLEEP_KEEP_RES_ON_THERM_KEEPON_SHIFT 7
+#define SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_MASK 0x40
+#define SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_SHIFT 6
+#define SLEEP_KEEP_RES_ON_VRTC_KEEPON_MASK 0x20
+#define SLEEP_KEEP_RES_ON_VRTC_KEEPON_SHIFT 5
+#define SLEEP_KEEP_RES_ON_I2CHS_KEEPON_MASK 0x10
+#define SLEEP_KEEP_RES_ON_I2CHS_KEEPON_SHIFT 4
+#define SLEEP_KEEP_RES_ON_VDD3_KEEPON_MASK 0x08
+#define SLEEP_KEEP_RES_ON_VDD3_KEEPON_SHIFT 3
+#define SLEEP_KEEP_RES_ON_VDD2_KEEPON_MASK 0x04
+#define SLEEP_KEEP_RES_ON_VDD2_KEEPON_SHIFT 2
+#define SLEEP_KEEP_RES_ON_VDD1_KEEPON_MASK 0x02
+#define SLEEP_KEEP_RES_ON_VDD1_KEEPON_SHIFT 1
+#define SLEEP_KEEP_RES_ON_VIO_KEEPON_MASK 0x01
+#define SLEEP_KEEP_RES_ON_VIO_KEEPON_SHIFT 0
+
+
+/*Register SLEEP_SET_LDO_OFF (0x80) register.RegisterDescription */
+#define SLEEP_SET_LDO_OFF_VDAC_SETOFF_MASK 0x80
+#define SLEEP_SET_LDO_OFF_VDAC_SETOFF_SHIFT 7
+#define SLEEP_SET_LDO_OFF_VPLL_SETOFF_MASK 0x40
+#define SLEEP_SET_LDO_OFF_VPLL_SETOFF_SHIFT 6
+#define SLEEP_SET_LDO_OFF_VAUX33_SETOFF_MASK 0x20
+#define SLEEP_SET_LDO_OFF_VAUX33_SETOFF_SHIFT 5
+#define SLEEP_SET_LDO_OFF_VAUX2_SETOFF_MASK 0x10
+#define SLEEP_SET_LDO_OFF_VAUX2_SETOFF_SHIFT 4
+#define SLEEP_SET_LDO_OFF_VAUX1_SETOFF_MASK 0x08
+#define SLEEP_SET_LDO_OFF_VAUX1_SETOFF_SHIFT 3
+#define SLEEP_SET_LDO_OFF_VDIG2_SETOFF_MASK 0x04
+#define SLEEP_SET_LDO_OFF_VDIG2_SETOFF_SHIFT 2
+#define SLEEP_SET_LDO_OFF_VDIG1_SETOFF_MASK 0x02
+#define SLEEP_SET_LDO_OFF_VDIG1_SETOFF_SHIFT 1
+#define SLEEP_SET_LDO_OFF_VMMC_SETOFF_MASK 0x01
+#define SLEEP_SET_LDO_OFF_VMMC_SETOFF_SHIFT 0
+
+
+/*Register SLEEP_SET_RES_OFF (0x80) register.RegisterDescription */
+#define SLEEP_SET_RES_OFF_DEFAULT_VOLT_MASK 0x80
+#define SLEEP_SET_RES_OFF_DEFAULT_VOLT_SHIFT 7
+#define SLEEP_SET_RES_OFF_RSVD_MASK 0x60
+#define SLEEP_SET_RES_OFF_RSVD_SHIFT 5
+#define SLEEP_SET_RES_OFF_SPARE_SETOFF_MASK 0x10
+#define SLEEP_SET_RES_OFF_SPARE_SETOFF_SHIFT 4
+#define SLEEP_SET_RES_OFF_VDD3_SETOFF_MASK 0x08
+#define SLEEP_SET_RES_OFF_VDD3_SETOFF_SHIFT 3
+#define SLEEP_SET_RES_OFF_VDD2_SETOFF_MASK 0x04
+#define SLEEP_SET_RES_OFF_VDD2_SETOFF_SHIFT 2
+#define SLEEP_SET_RES_OFF_VDD1_SETOFF_MASK 0x02
+#define SLEEP_SET_RES_OFF_VDD1_SETOFF_SHIFT 1
+#define SLEEP_SET_RES_OFF_VIO_SETOFF_MASK 0x01
+#define SLEEP_SET_RES_OFF_VIO_SETOFF_SHIFT 0
+
+
+/*Register EN1_LDO_ASS (0x80) register.RegisterDescription */
+#define EN1_LDO_ASS_VDAC_EN1_MASK 0x80
+#define EN1_LDO_ASS_VDAC_EN1_SHIFT 7
+#define EN1_LDO_ASS_VPLL_EN1_MASK 0x40
+#define EN1_LDO_ASS_VPLL_EN1_SHIFT 6
+#define EN1_LDO_ASS_VAUX33_EN1_MASK 0x20
+#define EN1_LDO_ASS_VAUX33_EN1_SHIFT 5
+#define EN1_LDO_ASS_VAUX2_EN1_MASK 0x10
+#define EN1_LDO_ASS_VAUX2_EN1_SHIFT 4
+#define EN1_LDO_ASS_VAUX1_EN1_MASK 0x08
+#define EN1_LDO_ASS_VAUX1_EN1_SHIFT 3
+#define EN1_LDO_ASS_VDIG2_EN1_MASK 0x04
+#define EN1_LDO_ASS_VDIG2_EN1_SHIFT 2
+#define EN1_LDO_ASS_VDIG1_EN1_MASK 0x02
+#define EN1_LDO_ASS_VDIG1_EN1_SHIFT 1
+#define EN1_LDO_ASS_VMMC_EN1_MASK 0x01
+#define EN1_LDO_ASS_VMMC_EN1_SHIFT 0
+
+
+/*Register EN1_SMPS_ASS (0x80) register.RegisterDescription */
+#define EN1_SMPS_ASS_RSVD_MASK 0xE0
+#define EN1_SMPS_ASS_RSVD_SHIFT 5
+#define EN1_SMPS_ASS_SPARE_EN1_MASK 0x10
+#define EN1_SMPS_ASS_SPARE_EN1_SHIFT 4
+#define EN1_SMPS_ASS_VDD3_EN1_MASK 0x08
+#define EN1_SMPS_ASS_VDD3_EN1_SHIFT 3
+#define EN1_SMPS_ASS_VDD2_EN1_MASK 0x04
+#define EN1_SMPS_ASS_VDD2_EN1_SHIFT 2
+#define EN1_SMPS_ASS_VDD1_EN1_MASK 0x02
+#define EN1_SMPS_ASS_VDD1_EN1_SHIFT 1
+#define EN1_SMPS_ASS_VIO_EN1_MASK 0x01
+#define EN1_SMPS_ASS_VIO_EN1_SHIFT 0
+
+
+/*Register EN2_LDO_ASS (0x80) register.RegisterDescription */
+#define EN2_LDO_ASS_VDAC_EN2_MASK 0x80
+#define EN2_LDO_ASS_VDAC_EN2_SHIFT 7
+#define EN2_LDO_ASS_VPLL_EN2_MASK 0x40
+#define EN2_LDO_ASS_VPLL_EN2_SHIFT 6
+#define EN2_LDO_ASS_VAUX33_EN2_MASK 0x20
+#define EN2_LDO_ASS_VAUX33_EN2_SHIFT 5
+#define EN2_LDO_ASS_VAUX2_EN2_MASK 0x10
+#define EN2_LDO_ASS_VAUX2_EN2_SHIFT 4
+#define EN2_LDO_ASS_VAUX1_EN2_MASK 0x08
+#define EN2_LDO_ASS_VAUX1_EN2_SHIFT 3
+#define EN2_LDO_ASS_VDIG2_EN2_MASK 0x04
+#define EN2_LDO_ASS_VDIG2_EN2_SHIFT 2
+#define EN2_LDO_ASS_VDIG1_EN2_MASK 0x02
+#define EN2_LDO_ASS_VDIG1_EN2_SHIFT 1
+#define EN2_LDO_ASS_VMMC_EN2_MASK 0x01
+#define EN2_LDO_ASS_VMMC_EN2_SHIFT 0
+
+
+/*Register EN2_SMPS_ASS (0x80) register.RegisterDescription */
+#define EN2_SMPS_ASS_RSVD_MASK 0xE0
+#define EN2_SMPS_ASS_RSVD_SHIFT 5
+#define EN2_SMPS_ASS_SPARE_EN2_MASK 0x10
+#define EN2_SMPS_ASS_SPARE_EN2_SHIFT 4
+#define EN2_SMPS_ASS_VDD3_EN2_MASK 0x08
+#define EN2_SMPS_ASS_VDD3_EN2_SHIFT 3
+#define EN2_SMPS_ASS_VDD2_EN2_MASK 0x04
+#define EN2_SMPS_ASS_VDD2_EN2_SHIFT 2
+#define EN2_SMPS_ASS_VDD1_EN2_MASK 0x02
+#define EN2_SMPS_ASS_VDD1_EN2_SHIFT 1
+#define EN2_SMPS_ASS_VIO_EN2_MASK 0x01
+#define EN2_SMPS_ASS_VIO_EN2_SHIFT 0
+
+
+/*Register EN3_LDO_ASS (0x80) register.RegisterDescription */
+#define EN3_LDO_ASS_VDAC_EN3_MASK 0x80
+#define EN3_LDO_ASS_VDAC_EN3_SHIFT 7
+#define EN3_LDO_ASS_VPLL_EN3_MASK 0x40
+#define EN3_LDO_ASS_VPLL_EN3_SHIFT 6
+#define EN3_LDO_ASS_VAUX33_EN3_MASK 0x20
+#define EN3_LDO_ASS_VAUX33_EN3_SHIFT 5
+#define EN3_LDO_ASS_VAUX2_EN3_MASK 0x10
+#define EN3_LDO_ASS_VAUX2_EN3_SHIFT 4
+#define EN3_LDO_ASS_VAUX1_EN3_MASK 0x08
+#define EN3_LDO_ASS_VAUX1_EN3_SHIFT 3
+#define EN3_LDO_ASS_VDIG2_EN3_MASK 0x04
+#define EN3_LDO_ASS_VDIG2_EN3_SHIFT 2
+#define EN3_LDO_ASS_VDIG1_EN3_MASK 0x02
+#define EN3_LDO_ASS_VDIG1_EN3_SHIFT 1
+#define EN3_LDO_ASS_VMMC_EN3_MASK 0x01
+#define EN3_LDO_ASS_VMMC_EN3_SHIFT 0
+
+
+/*Register SPARE (0x80) register.RegisterDescription */
+#define SPARE_SPARE_MASK 0xFF
+#define SPARE_SPARE_SHIFT 0
+
+#define TPS65910_INT_STS_RTC_PERIOD_IT_MASK 0x80
+#define TPS65910_INT_STS_RTC_PERIOD_IT_SHIFT 7
+#define TPS65910_INT_STS_RTC_ALARM_IT_MASK 0x40
+#define TPS65910_INT_STS_RTC_ALARM_IT_SHIFT 6
+#define TPS65910_INT_STS_HOTDIE_IT_MASK 0x20
+#define TPS65910_INT_STS_HOTDIE_IT_SHIFT 5
+#define TPS65910_INT_STS_PWRHOLD_F_IT_MASK 0x10
+#define TPS65910_INT_STS_PWRHOLD_F_IT_SHIFT 4
+#define TPS65910_INT_STS_PWRON_LP_IT_MASK 0x08
+#define TPS65910_INT_STS_PWRON_LP_IT_SHIFT 3
+#define TPS65910_INT_STS_PWRON_IT_MASK 0x04
+#define TPS65910_INT_STS_PWRON_IT_SHIFT 2
+#define TPS65910_INT_STS_VMBHI_IT_MASK 0x02
+#define TPS65910_INT_STS_VMBHI_IT_SHIFT 1
+#define TPS65910_INT_STS_VMBDCH_IT_MASK 0x01
+#define TPS65910_INT_STS_VMBDCH_IT_SHIFT 0
+
+#define TPS65910_INT_MSK_RTC_PERIOD_IT_MSK_MASK 0x80
+#define TPS65910_INT_MSK_RTC_PERIOD_IT_MSK_SHIFT 7
+#define TPS65910_INT_MSK_RTC_ALARM_IT_MSK_MASK 0x40
+#define TPS65910_INT_MSK_RTC_ALARM_IT_MSK_SHIFT 6
+#define TPS65910_INT_MSK_HOTDIE_IT_MSK_MASK 0x20
+#define TPS65910_INT_MSK_HOTDIE_IT_MSK_SHIFT 5
+#define TPS65910_INT_MSK_PWRHOLD_IT_MSK_MASK 0x10
+#define TPS65910_INT_MSK_PWRHOLD_IT_MSK_SHIFT 4
+#define TPS65910_INT_MSK_PWRON_LP_IT_MSK_MASK 0x08
+#define TPS65910_INT_MSK_PWRON_LP_IT_MSK_SHIFT 3
+#define TPS65910_INT_MSK_PWRON_IT_MSK_MASK 0x04
+#define TPS65910_INT_MSK_PWRON_IT_MSK_SHIFT 2
+#define TPS65910_INT_MSK_VMBHI_IT_MSK_MASK 0x02
+#define TPS65910_INT_MSK_VMBHI_IT_MSK_SHIFT 1
+#define TPS65910_INT_MSK_VMBDCH_IT_MSK_MASK 0x01
+#define TPS65910_INT_MSK_VMBDCH_IT_MSK_SHIFT 0
+
+#define TPS65910_INT_STS2_GPIO0_F_IT_SHIFT 2
+#define TPS65910_INT_STS2_GPIO0_F_IT_MASK 0x02
+#define TPS65910_INT_STS2_GPIO0_R_IT_SHIFT 1
+#define TPS65910_INT_STS2_GPIO0_R_IT_MASK 0x01
+
+#define TPS65910_INT_MSK2_GPIO0_F_IT_MSK_SHIFT 2
+#define TPS65910_INT_MSK2_GPIO0_F_IT_MSK_MASK 0x02
+#define TPS65910_INT_MSK2_GPIO0_R_IT_MSK_SHIFT 1
+#define TPS65910_INT_MSK2_GPIO0_R_IT_MSK_MASK 0x01
+
+/*Register INT_STS (0x80) register.RegisterDescription */
+#define INT_STS_RTC_PERIOD_IT_MASK 0x80
+#define INT_STS_RTC_PERIOD_IT_SHIFT 7
+#define INT_STS_RTC_ALARM_IT_MASK 0x40
+#define INT_STS_RTC_ALARM_IT_SHIFT 6
+#define INT_STS_HOTDIE_IT_MASK 0x20
+#define INT_STS_HOTDIE_IT_SHIFT 5
+#define INT_STS_PWRHOLD_R_IT_MASK 0x10
+#define INT_STS_PWRHOLD_R_IT_SHIFT 4
+#define INT_STS_PWRON_LP_IT_MASK 0x08
+#define INT_STS_PWRON_LP_IT_SHIFT 3
+#define INT_STS_PWRON_IT_MASK 0x04
+#define INT_STS_PWRON_IT_SHIFT 2
+#define INT_STS_VMBHI_IT_MASK 0x02
+#define INT_STS_VMBHI_IT_SHIFT 1
+#define INT_STS_PWRHOLD_F_IT_MASK 0x01
+#define INT_STS_PWRHOLD_F_IT_SHIFT 0
+
+
+/*Register INT_MSK (0x80) register.RegisterDescription */
+#define INT_MSK_RTC_PERIOD_IT_MSK_MASK 0x80
+#define INT_MSK_RTC_PERIOD_IT_MSK_SHIFT 7
+#define INT_MSK_RTC_ALARM_IT_MSK_MASK 0x40
+#define INT_MSK_RTC_ALARM_IT_MSK_SHIFT 6
+#define INT_MSK_HOTDIE_IT_MSK_MASK 0x20
+#define INT_MSK_HOTDIE_IT_MSK_SHIFT 5
+#define INT_MSK_PWRHOLD_R_IT_MSK_MASK 0x10
+#define INT_MSK_PWRHOLD_R_IT_MSK_SHIFT 4
+#define INT_MSK_PWRON_LP_IT_MSK_MASK 0x08
+#define INT_MSK_PWRON_LP_IT_MSK_SHIFT 3
+#define INT_MSK_PWRON_IT_MSK_MASK 0x04
+#define INT_MSK_PWRON_IT_MSK_SHIFT 2
+#define INT_MSK_VMBHI_IT_MSK_MASK 0x02
+#define INT_MSK_VMBHI_IT_MSK_SHIFT 1
+#define INT_MSK_PWRHOLD_F_IT_MSK_MASK 0x01
+#define INT_MSK_PWRHOLD_F_IT_MSK_SHIFT 0
+
+
+/*Register INT_STS2 (0x80) register.RegisterDescription */
+#define INT_STS2_GPIO3_F_IT_MASK 0x80
+#define INT_STS2_GPIO3_F_IT_SHIFT 7
+#define INT_STS2_GPIO3_R_IT_MASK 0x40
+#define INT_STS2_GPIO3_R_IT_SHIFT 6
+#define INT_STS2_GPIO2_F_IT_MASK 0x20
+#define INT_STS2_GPIO2_F_IT_SHIFT 5
+#define INT_STS2_GPIO2_R_IT_MASK 0x10
+#define INT_STS2_GPIO2_R_IT_SHIFT 4
+#define INT_STS2_GPIO1_F_IT_MASK 0x08
+#define INT_STS2_GPIO1_F_IT_SHIFT 3
+#define INT_STS2_GPIO1_R_IT_MASK 0x04
+#define INT_STS2_GPIO1_R_IT_SHIFT 2
+#define INT_STS2_GPIO0_F_IT_MASK 0x02
+#define INT_STS2_GPIO0_F_IT_SHIFT 1
+#define INT_STS2_GPIO0_R_IT_MASK 0x01
+#define INT_STS2_GPIO0_R_IT_SHIFT 0
+
+
+/*Register INT_MSK2 (0x80) register.RegisterDescription */
+#define INT_MSK2_GPIO3_F_IT_MSK_MASK 0x80
+#define INT_MSK2_GPIO3_F_IT_MSK_SHIFT 7
+#define INT_MSK2_GPIO3_R_IT_MSK_MASK 0x40
+#define INT_MSK2_GPIO3_R_IT_MSK_SHIFT 6
+#define INT_MSK2_GPIO2_F_IT_MSK_MASK 0x20
+#define INT_MSK2_GPIO2_F_IT_MSK_SHIFT 5
+#define INT_MSK2_GPIO2_R_IT_MSK_MASK 0x10
+#define INT_MSK2_GPIO2_R_IT_MSK_SHIFT 4
+#define INT_MSK2_GPIO1_F_IT_MSK_MASK 0x08
+#define INT_MSK2_GPIO1_F_IT_MSK_SHIFT 3
+#define INT_MSK2_GPIO1_R_IT_MSK_MASK 0x04
+#define INT_MSK2_GPIO1_R_IT_MSK_SHIFT 2
+#define INT_MSK2_GPIO0_F_IT_MSK_MASK 0x02
+#define INT_MSK2_GPIO0_F_IT_MSK_SHIFT 1
+#define INT_MSK2_GPIO0_R_IT_MSK_MASK 0x01
+#define INT_MSK2_GPIO0_R_IT_MSK_SHIFT 0
+
+
+/*Register INT_STS3 (0x80) register.RegisterDescription */
+#define INT_STS3_PWRDN_IT_MASK 0x80
+#define INT_STS3_PWRDN_IT_SHIFT 7
+#define INT_STS3_VMBCH2_L_IT_MASK 0x40
+#define INT_STS3_VMBCH2_L_IT_SHIFT 6
+#define INT_STS3_VMBCH2_H_IT_MASK 0x20
+#define INT_STS3_VMBCH2_H_IT_SHIFT 5
+#define INT_STS3_WTCHDG_IT_MASK 0x10
+#define INT_STS3_WTCHDG_IT_SHIFT 4
+#define INT_STS3_GPIO5_F_IT_MASK 0x08
+#define INT_STS3_GPIO5_F_IT_SHIFT 3
+#define INT_STS3_GPIO5_R_IT_MASK 0x04
+#define INT_STS3_GPIO5_R_IT_SHIFT 2
+#define INT_STS3_GPIO4_F_IT_MASK 0x02
+#define INT_STS3_GPIO4_F_IT_SHIFT 1
+#define INT_STS3_GPIO4_R_IT_MASK 0x01
+#define INT_STS3_GPIO4_R_IT_SHIFT 0
+
+
+/*Register INT_MSK3 (0x80) register.RegisterDescription */
+#define INT_MSK3_PWRDN_IT_MSK_MASK 0x80
+#define INT_MSK3_PWRDN_IT_MSK_SHIFT 7
+#define INT_MSK3_VMBCH2_L_IT_MSK_MASK 0x40
+#define INT_MSK3_VMBCH2_L_IT_MSK_SHIFT 6
+#define INT_MSK3_VMBCH2_H_IT_MSK_MASK 0x20
+#define INT_MSK3_VMBCH2_H_IT_MSK_SHIFT 5
+#define INT_MSK3_WTCHDG_IT_MSK_MASK 0x10
+#define INT_MSK3_WTCHDG_IT_MSK_SHIFT 4
+#define INT_MSK3_GPIO5_F_IT_MSK_MASK 0x08
+#define INT_MSK3_GPIO5_F_IT_MSK_SHIFT 3
+#define INT_MSK3_GPIO5_R_IT_MSK_MASK 0x04
+#define INT_MSK3_GPIO5_R_IT_MSK_SHIFT 2
+#define INT_MSK3_GPIO4_F_IT_MSK_MASK 0x02
+#define INT_MSK3_GPIO4_F_IT_MSK_SHIFT 1
+#define INT_MSK3_GPIO4_R_IT_MSK_MASK 0x01
+#define INT_MSK3_GPIO4_R_IT_MSK_SHIFT 0
+
+
+/*Register GPIO (0x80) register.RegisterDescription */
+#define GPIO_SLEEP_MASK 0x80
+#define GPIO_SLEEP_SHIFT 7
+#define GPIO_DEB_MASK 0x10
+#define GPIO_DEB_SHIFT 4
+#define GPIO_PUEN_MASK 0x08
+#define GPIO_PUEN_SHIFT 3
+#define GPIO_CFG_MASK 0x04
+#define GPIO_CFG_SHIFT 2
+#define GPIO_STS_MASK 0x02
+#define GPIO_STS_SHIFT 1
+#define GPIO_SET_MASK 0x01
+#define GPIO_SET_SHIFT 0
+
+
+/*Register JTAGVERNUM (0x80) register.RegisterDescription */
+#define JTAGVERNUM_VERNUM_MASK 0x0F
+#define JTAGVERNUM_VERNUM_SHIFT 0
+
+
+/* Register VDDCTRL (0x27) bit definitions */
+#define VDDCTRL_ST_MASK 0x03
+#define VDDCTRL_ST_SHIFT 0
+
+
+/*Register VDDCTRL_OP (0x28) bit definitios */
+#define VDDCTRL_OP_CMD_MASK 0x80
+#define VDDCTRL_OP_CMD_SHIFT 7
+#define VDDCTRL_OP_SEL_MASK 0x7F
+#define VDDCTRL_OP_SEL_SHIFT 0
+
+
+/*Register VDDCTRL_SR (0x29) bit definitions */
+#define VDDCTRL_SR_SEL_MASK 0x7F
+#define VDDCTRL_SR_SEL_SHIFT 0
+
+
+/* IRQ Definitions */
+#define TPS65910_IRQ_VBAT_VMBDCH 0
+#define TPS65910_IRQ_VBAT_VMHI 1
+#define TPS65910_IRQ_PWRON 2
+#define TPS65910_IRQ_PWRON_LP 3
+#define TPS65910_IRQ_PWRHOLD 4
+#define TPS65910_IRQ_HOTDIE 5
+#define TPS65910_IRQ_RTC_ALARM 6
+#define TPS65910_IRQ_RTC_PERIOD 7
+#define TPS65910_IRQ_GPIO_R 8
+#define TPS65910_IRQ_GPIO_F 9
+#define TPS65910_NUM_IRQ 10
+
+#define TPS65911_IRQ_PWRHOLD_F 0
+#define TPS65911_IRQ_VBAT_VMHI 1
+#define TPS65911_IRQ_PWRON 2
+#define TPS65911_IRQ_PWRON_LP 3
+#define TPS65911_IRQ_PWRHOLD_R 4
+#define TPS65911_IRQ_HOTDIE 5
+#define TPS65911_IRQ_RTC_ALARM 6
+#define TPS65911_IRQ_RTC_PERIOD 7
+#define TPS65911_IRQ_GPIO0_R 8
+#define TPS65911_IRQ_GPIO0_F 9
+#define TPS65911_IRQ_GPIO1_R 10
+#define TPS65911_IRQ_GPIO1_F 11
+#define TPS65911_IRQ_GPIO2_R 12
+#define TPS65911_IRQ_GPIO2_F 13
+#define TPS65911_IRQ_GPIO3_R 14
+#define TPS65911_IRQ_GPIO3_F 15
+#define TPS65911_IRQ_GPIO4_R 16
+#define TPS65911_IRQ_GPIO4_F 17
+#define TPS65911_IRQ_GPIO5_R 18
+#define TPS65911_IRQ_GPIO5_F 19
+#define TPS65911_IRQ_WTCHDG 20
+#define TPS65911_IRQ_VMBCH2_H 21
+#define TPS65911_IRQ_VMBCH2_L 22
+#define TPS65911_IRQ_PWRDN 23
+
+#define TPS65911_NUM_IRQ 24
+
+/* GPIO Register Definitions */
+#define TPS65910_GPIO_DEB BIT(2)
+#define TPS65910_GPIO_PUEN BIT(3)
+#define TPS65910_GPIO_CFG BIT(2)
+#define TPS65910_GPIO_STS BIT(1)
+#define TPS65910_GPIO_SET BIT(0)
+
+/* Max number of TPS65910/11 GPIOs */
+#define TPS65910_NUM_GPIO 6
+#define TPS65911_NUM_GPIO 9
+#define TPS6591X_MAX_NUM_GPIO 9
+
+/* Regulator Index Definitions */
+#define TPS65910_REG_VRTC 0
+#define TPS65910_REG_VIO 1
+#define TPS65910_REG_VDD1 2
+#define TPS65910_REG_VDD2 3
+#define TPS65910_REG_VDD3 4
+#define TPS65910_REG_VDIG1 5
+#define TPS65910_REG_VDIG2 6
+#define TPS65910_REG_VPLL 7
+#define TPS65910_REG_VDAC 8
+#define TPS65910_REG_VAUX1 9
+#define TPS65910_REG_VAUX2 10
+#define TPS65910_REG_VAUX33 11
+#define TPS65910_REG_VMMC 12
+#define TPS65910_REG_VBB 13
+
+#define TPS65911_REG_VDDCTRL 4
+#define TPS65911_REG_LDO1 5
+#define TPS65911_REG_LDO2 6
+#define TPS65911_REG_LDO3 7
+#define TPS65911_REG_LDO4 8
+#define TPS65911_REG_LDO5 9
+#define TPS65911_REG_LDO6 10
+#define TPS65911_REG_LDO7 11
+#define TPS65911_REG_LDO8 12
+
+/* Max number of TPS65910/11 regulators */
+#define TPS65910_NUM_REGS 14
+
+/* External sleep controls through EN1/EN2/EN3/SLEEP inputs */
+#define TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1 0x1
+#define TPS65910_SLEEP_CONTROL_EXT_INPUT_EN2 0x2
+#define TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3 0x4
+#define TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP 0x8
+
+/*
+ * Sleep keepon data: Maintains the state in sleep mode
+ * @therm_keepon: Keep on the thermal monitoring in sleep state.
+ * @clkout32k_keepon: Keep on the 32KHz clock output in sleep state.
+ * @i2chs_keepon: Keep on high speed internal clock in sleep state.
+ */
+struct tps65910_sleep_keepon_data {
+ unsigned therm_keepon:1;
+ unsigned clkout32k_keepon:1;
+ unsigned i2chs_keepon:1;
+};
+
+/**
+ * struct tps65910_board
+ * Board platform data may be used to initialize regulators.
+ */
+
+struct tps65910_board {
+ int gpio_base;
+ int irq;
+ int irq_base;
+ int vmbch_threshold;
+ int vmbch2_threshold;
+ bool en_ck32k_xtal;
+ bool en_dev_slp;
+ bool pm_off;
+ struct tps65910_sleep_keepon_data *slp_keepon;
+ bool en_gpio_sleep[TPS6591X_MAX_NUM_GPIO];
+ unsigned long regulator_ext_sleep_control[TPS65910_NUM_REGS];
+ struct regulator_init_data *tps65910_pmic_init_data[TPS65910_NUM_REGS];
+};
+
+/**
+ * struct tps65910 - tps65910 sub-driver chip access routines
+ */
+
+struct tps65910 {
+ struct device *dev;
+ struct i2c_client *i2c_client;
+ struct regmap *regmap;
+ unsigned long id;
+
+ /* Client devices */
+ struct tps65910_pmic *pmic;
+ struct tps65910_rtc *rtc;
+ struct tps65910_power *power;
+
+ /* Device node parsed board data */
+ struct tps65910_board *of_plat_data;
+
+ /* IRQ Handling */
+ int chip_irq;
+ struct regmap_irq_chip_data *irq_data;
+};
+
+struct tps65910_platform_data {
+ int irq;
+ int irq_base;
+};
+
+static inline int tps65910_chip_id(struct tps65910 *tps65910)
+{
+ return tps65910->id;
+}
+
+static inline int tps65910_reg_read(struct tps65910 *tps65910, u8 reg,
+ unsigned int *val)
+{
+ return regmap_read(tps65910->regmap, reg, val);
+}
+
+static inline int tps65910_reg_write(struct tps65910 *tps65910, u8 reg,
+ unsigned int val)
+{
+ return regmap_write(tps65910->regmap, reg, val);
+}
+
+static inline int tps65910_reg_set_bits(struct tps65910 *tps65910, u8 reg,
+ u8 mask)
+{
+ return regmap_update_bits(tps65910->regmap, reg, mask, mask);
+}
+
+static inline int tps65910_reg_clear_bits(struct tps65910 *tps65910, u8 reg,
+ u8 mask)
+{
+ return regmap_update_bits(tps65910->regmap, reg, mask, 0);
+}
+
+static inline int tps65910_reg_update_bits(struct tps65910 *tps65910, u8 reg,
+ u8 mask, u8 val)
+{
+ return regmap_update_bits(tps65910->regmap, reg, mask, val);
+}
+
+static inline int tps65910_irq_get_virq(struct tps65910 *tps65910, int irq)
+{
+ return regmap_irq_get_virq(tps65910->irq_data, irq);
+}
+
+#endif /* __LINUX_MFD_TPS65910_H */
diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h
new file mode 100644
index 000000000..6d309032d
--- /dev/null
+++ b/include/linux/mfd/tps65912.h
@@ -0,0 +1,328 @@
+/*
+ * tps65912.h -- TI TPS6591x
+ *
+ * Copyright 2011 Texas Instruments Inc.
+ *
+ * Author: Margarita Olaya <magi@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_TPS65912_H
+#define __LINUX_MFD_TPS65912_H
+
+/* TPS regulator type list */
+#define REGULATOR_LDO 0
+#define REGULATOR_DCDC 1
+
+/*
+ * List of registers for TPS65912
+ */
+
+#define TPS65912_DCDC1_CTRL 0x00
+#define TPS65912_DCDC2_CTRL 0x01
+#define TPS65912_DCDC3_CTRL 0x02
+#define TPS65912_DCDC4_CTRL 0x03
+#define TPS65912_DCDC1_OP 0x04
+#define TPS65912_DCDC1_AVS 0x05
+#define TPS65912_DCDC1_LIMIT 0x06
+#define TPS65912_DCDC2_OP 0x07
+#define TPS65912_DCDC2_AVS 0x08
+#define TPS65912_DCDC2_LIMIT 0x09
+#define TPS65912_DCDC3_OP 0x0A
+#define TPS65912_DCDC3_AVS 0x0B
+#define TPS65912_DCDC3_LIMIT 0x0C
+#define TPS65912_DCDC4_OP 0x0D
+#define TPS65912_DCDC4_AVS 0x0E
+#define TPS65912_DCDC4_LIMIT 0x0F
+#define TPS65912_LDO1_OP 0x10
+#define TPS65912_LDO1_AVS 0x11
+#define TPS65912_LDO1_LIMIT 0x12
+#define TPS65912_LDO2_OP 0x13
+#define TPS65912_LDO2_AVS 0x14
+#define TPS65912_LDO2_LIMIT 0x15
+#define TPS65912_LDO3_OP 0x16
+#define TPS65912_LDO3_AVS 0x17
+#define TPS65912_LDO3_LIMIT 0x18
+#define TPS65912_LDO4_OP 0x19
+#define TPS65912_LDO4_AVS 0x1A
+#define TPS65912_LDO4_LIMIT 0x1B
+#define TPS65912_LDO5 0x1C
+#define TPS65912_LDO6 0x1D
+#define TPS65912_LDO7 0x1E
+#define TPS65912_LDO8 0x1F
+#define TPS65912_LDO9 0x20
+#define TPS65912_LDO10 0x21
+#define TPS65912_THRM 0x22
+#define TPS65912_CLK32OUT 0x23
+#define TPS65912_DEVCTRL 0x24
+#define TPS65912_DEVCTRL2 0x25
+#define TPS65912_I2C_SPI_CFG 0x26
+#define TPS65912_KEEP_ON 0x27
+#define TPS65912_KEEP_ON2 0x28
+#define TPS65912_SET_OFF1 0x29
+#define TPS65912_SET_OFF2 0x2A
+#define TPS65912_DEF_VOLT 0x2B
+#define TPS65912_DEF_VOLT_MAPPING 0x2C
+#define TPS65912_DISCHARGE 0x2D
+#define TPS65912_DISCHARGE2 0x2E
+#define TPS65912_EN1_SET1 0x2F
+#define TPS65912_EN1_SET2 0x30
+#define TPS65912_EN2_SET1 0x31
+#define TPS65912_EN2_SET2 0x32
+#define TPS65912_EN3_SET1 0x33
+#define TPS65912_EN3_SET2 0x34
+#define TPS65912_EN4_SET1 0x35
+#define TPS65912_EN4_SET2 0x36
+#define TPS65912_PGOOD 0x37
+#define TPS65912_PGOOD2 0x38
+#define TPS65912_INT_STS 0x39
+#define TPS65912_INT_MSK 0x3A
+#define TPS65912_INT_STS2 0x3B
+#define TPS65912_INT_MSK2 0x3C
+#define TPS65912_INT_STS3 0x3D
+#define TPS65912_INT_MSK3 0x3E
+#define TPS65912_INT_STS4 0x3F
+#define TPS65912_INT_MSK4 0x40
+#define TPS65912_GPIO1 0x41
+#define TPS65912_GPIO2 0x42
+#define TPS65912_GPIO3 0x43
+#define TPS65912_GPIO4 0x44
+#define TPS65912_GPIO5 0x45
+#define TPS65912_VMON 0x46
+#define TPS65912_LEDA_CTRL1 0x47
+#define TPS65912_LEDA_CTRL2 0x48
+#define TPS65912_LEDA_CTRL3 0x49
+#define TPS65912_LEDA_CTRL4 0x4A
+#define TPS65912_LEDA_CTRL5 0x4B
+#define TPS65912_LEDA_CTRL6 0x4C
+#define TPS65912_LEDA_CTRL7 0x4D
+#define TPS65912_LEDA_CTRL8 0x4E
+#define TPS65912_LEDB_CTRL1 0x4F
+#define TPS65912_LEDB_CTRL2 0x50
+#define TPS65912_LEDB_CTRL3 0x51
+#define TPS65912_LEDB_CTRL4 0x52
+#define TPS65912_LEDB_CTRL5 0x53
+#define TPS65912_LEDB_CTRL6 0x54
+#define TPS65912_LEDB_CTRL7 0x55
+#define TPS65912_LEDB_CTRL8 0x56
+#define TPS65912_LEDC_CTRL1 0x57
+#define TPS65912_LEDC_CTRL2 0x58
+#define TPS65912_LEDC_CTRL3 0x59
+#define TPS65912_LEDC_CTRL4 0x5A
+#define TPS65912_LEDC_CTRL5 0x5B
+#define TPS65912_LEDC_CTRL6 0x5C
+#define TPS65912_LEDC_CTRL7 0x5D
+#define TPS65912_LEDC_CTRL8 0x5E
+#define TPS65912_LED_RAMP_UP_TIME 0x5F
+#define TPS65912_LED_RAMP_DOWN_TIME 0x60
+#define TPS65912_LED_SEQ_EN 0x61
+#define TPS65912_LOADSWITCH 0x62
+#define TPS65912_SPARE 0x63
+#define TPS65912_VERNUM 0x64
+#define TPS6591X_MAX_REGISTER 0x64
+
+/* IRQ Definitions */
+#define TPS65912_IRQ_PWRHOLD_F 0
+#define TPS65912_IRQ_VMON 1
+#define TPS65912_IRQ_PWRON 2
+#define TPS65912_IRQ_PWRON_LP 3
+#define TPS65912_IRQ_PWRHOLD_R 4
+#define TPS65912_IRQ_HOTDIE 5
+#define TPS65912_IRQ_GPIO1_R 6
+#define TPS65912_IRQ_GPIO1_F 7
+#define TPS65912_IRQ_GPIO2_R 8
+#define TPS65912_IRQ_GPIO2_F 9
+#define TPS65912_IRQ_GPIO3_R 10
+#define TPS65912_IRQ_GPIO3_F 11
+#define TPS65912_IRQ_GPIO4_R 12
+#define TPS65912_IRQ_GPIO4_F 13
+#define TPS65912_IRQ_GPIO5_R 14
+#define TPS65912_IRQ_GPIO5_F 15
+#define TPS65912_IRQ_PGOOD_DCDC1 16
+#define TPS65912_IRQ_PGOOD_DCDC2 17
+#define TPS65912_IRQ_PGOOD_DCDC3 18
+#define TPS65912_IRQ_PGOOD_DCDC4 19
+#define TPS65912_IRQ_PGOOD_LDO1 20
+#define TPS65912_IRQ_PGOOD_LDO2 21
+#define TPS65912_IRQ_PGOOD_LDO3 22
+#define TPS65912_IRQ_PGOOD_LDO4 23
+#define TPS65912_IRQ_PGOOD_LDO5 24
+#define TPS65912_IRQ_PGOOD_LDO6 25
+#define TPS65912_IRQ_PGOOD_LDO7 26
+#define TPS65912_IRQ_PGOOD_LD08 27
+#define TPS65912_IRQ_PGOOD_LDO9 28
+#define TPS65912_IRQ_PGOOD_LDO10 29
+
+#define TPS65912_NUM_IRQ 30
+
+/* GPIO 1 and 2 Register Definitions */
+#define GPIO_SLEEP_MASK 0x80
+#define GPIO_SLEEP_SHIFT 7
+#define GPIO_DEB_MASK 0x10
+#define GPIO_DEB_SHIFT 4
+#define GPIO_CFG_MASK 0x04
+#define GPIO_CFG_SHIFT 2
+#define GPIO_STS_MASK 0x02
+#define GPIO_STS_SHIFT 1
+#define GPIO_SET_MASK 0x01
+#define GPIO_SET_SHIFT 0
+
+/* GPIO 3 Register Definitions */
+#define GPIO3_SLEEP_MASK 0x80
+#define GPIO3_SLEEP_SHIFT 7
+#define GPIO3_SEL_MASK 0x40
+#define GPIO3_SEL_SHIFT 6
+#define GPIO3_ODEN_MASK 0x20
+#define GPIO3_ODEN_SHIFT 5
+#define GPIO3_DEB_MASK 0x10
+#define GPIO3_DEB_SHIFT 4
+#define GPIO3_PDEN_MASK 0x08
+#define GPIO3_PDEN_SHIFT 3
+#define GPIO3_CFG_MASK 0x04
+#define GPIO3_CFG_SHIFT 2
+#define GPIO3_STS_MASK 0x02
+#define GPIO3_STS_SHIFT 1
+#define GPIO3_SET_MASK 0x01
+#define GPIO3_SET_SHIFT 0
+
+/* GPIO 4 Register Definitions */
+#define GPIO4_SLEEP_MASK 0x80
+#define GPIO4_SLEEP_SHIFT 7
+#define GPIO4_SEL_MASK 0x40
+#define GPIO4_SEL_SHIFT 6
+#define GPIO4_ODEN_MASK 0x20
+#define GPIO4_ODEN_SHIFT 5
+#define GPIO4_DEB_MASK 0x10
+#define GPIO4_DEB_SHIFT 4
+#define GPIO4_PDEN_MASK 0x08
+#define GPIO4_PDEN_SHIFT 3
+#define GPIO4_CFG_MASK 0x04
+#define GPIO4_CFG_SHIFT 2
+#define GPIO4_STS_MASK 0x02
+#define GPIO4_STS_SHIFT 1
+#define GPIO4_SET_MASK 0x01
+#define GPIO4_SET_SHIFT 0
+
+/* Register THERM (0x80) register.RegisterDescription */
+#define THERM_THERM_HD_MASK 0x20
+#define THERM_THERM_HD_SHIFT 5
+#define THERM_THERM_TS_MASK 0x10
+#define THERM_THERM_TS_SHIFT 4
+#define THERM_THERM_HDSEL_MASK 0x0C
+#define THERM_THERM_HDSEL_SHIFT 2
+#define THERM_RSVD1_MASK 0x02
+#define THERM_RSVD1_SHIFT 1
+#define THERM_THERM_STATE_MASK 0x01
+#define THERM_THERM_STATE_SHIFT 0
+
+/* Register DCDCCTRL1 register.RegisterDescription */
+#define DCDCCTRL_VCON_ENABLE_MASK 0x80
+#define DCDCCTRL_VCON_ENABLE_SHIFT 7
+#define DCDCCTRL_VCON_RANGE1_MASK 0x40
+#define DCDCCTRL_VCON_RANGE1_SHIFT 6
+#define DCDCCTRL_VCON_RANGE0_MASK 0x20
+#define DCDCCTRL_VCON_RANGE0_SHIFT 5
+#define DCDCCTRL_TSTEP2_MASK 0x10
+#define DCDCCTRL_TSTEP2_SHIFT 4
+#define DCDCCTRL_TSTEP1_MASK 0x08
+#define DCDCCTRL_TSTEP1_SHIFT 3
+#define DCDCCTRL_TSTEP0_MASK 0x04
+#define DCDCCTRL_TSTEP0_SHIFT 2
+#define DCDCCTRL_DCDC1_MODE_MASK 0x02
+#define DCDCCTRL_DCDC1_MODE_SHIFT 1
+
+/* Register DCDCCTRL2 and DCDCCTRL3 register.RegisterDescription */
+#define DCDCCTRL_TSTEP2_MASK 0x10
+#define DCDCCTRL_TSTEP2_SHIFT 4
+#define DCDCCTRL_TSTEP1_MASK 0x08
+#define DCDCCTRL_TSTEP1_SHIFT 3
+#define DCDCCTRL_TSTEP0_MASK 0x04
+#define DCDCCTRL_TSTEP0_SHIFT 2
+#define DCDCCTRL_DCDC_MODE_MASK 0x02
+#define DCDCCTRL_DCDC_MODE_SHIFT 1
+#define DCDCCTRL_RSVD0_MASK 0x01
+#define DCDCCTRL_RSVD0_SHIFT 0
+
+/* Register DCDCCTRL4 register.RegisterDescription */
+#define DCDCCTRL_RAMP_TIME_MASK 0x01
+#define DCDCCTRL_RAMP_TIME_SHIFT 0
+
+/* Register DCDCx_AVS */
+#define DCDC_AVS_ENABLE_MASK 0x80
+#define DCDC_AVS_ENABLE_SHIFT 7
+#define DCDC_AVS_ECO_MASK 0x40
+#define DCDC_AVS_ECO_SHIFT 6
+
+/* Register DCDCx_LIMIT */
+#define DCDC_LIMIT_RANGE_MASK 0xC0
+#define DCDC_LIMIT_RANGE_SHIFT 6
+#define DCDC_LIMIT_MAX_SEL_MASK 0x3F
+#define DCDC_LIMIT_MAX_SEL_SHIFT 0
+
+/**
+ * struct tps65912_board
+ * Board platform dat may be used to initialize regulators.
+ */
+struct tps65912_board {
+ int is_dcdc1_avs;
+ int is_dcdc2_avs;
+ int is_dcdc3_avs;
+ int is_dcdc4_avs;
+ int irq;
+ int irq_base;
+ int gpio_base;
+ struct regulator_init_data *tps65912_pmic_init_data;
+};
+
+/**
+ * struct tps65912 - tps65912 sub-driver chip access routines
+ */
+
+struct tps65912 {
+ struct device *dev;
+ /* for read/write acces */
+ struct mutex io_mutex;
+
+ /* For device IO interfaces: I2C or SPI */
+ void *control_data;
+
+ int (*read)(struct tps65912 *tps65912, u8 reg, int size, void *dest);
+ int (*write)(struct tps65912 *tps65912, u8 reg, int size, void *src);
+
+ /* Client devices */
+ struct tps65912_pmic *pmic;
+
+ /* GPIO Handling */
+ struct gpio_chip gpio;
+
+ /* IRQ Handling */
+ struct mutex irq_lock;
+ int chip_irq;
+ int irq_base;
+ int irq_num;
+ u32 irq_mask;
+};
+
+struct tps65912_platform_data {
+ int irq;
+ int irq_base;
+};
+
+unsigned int tps_chip(void);
+
+int tps65912_set_bits(struct tps65912 *tps65912, u8 reg, u8 mask);
+int tps65912_clear_bits(struct tps65912 *tps65912, u8 reg, u8 mask);
+int tps65912_reg_read(struct tps65912 *tps65912, u8 reg);
+int tps65912_reg_write(struct tps65912 *tps65912, u8 reg, u8 val);
+int tps65912_device_init(struct tps65912 *tps65912);
+void tps65912_device_exit(struct tps65912 *tps65912);
+int tps65912_irq_init(struct tps65912 *tps65912, int irq,
+ struct tps65912_platform_data *pdata);
+int tps65912_irq_exit(struct tps65912 *tps65912);
+
+#endif /* __LINUX_MFD_TPS65912_H */
diff --git a/include/linux/mfd/tps80031.h b/include/linux/mfd/tps80031.h
new file mode 100644
index 000000000..2c75c9c93
--- /dev/null
+++ b/include/linux/mfd/tps80031.h
@@ -0,0 +1,637 @@
+/*
+ * tps80031.h -- TI TPS80031 and TI TPS80032 PMIC driver.
+ *
+ * Copyright (c) 2012, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#ifndef __LINUX_MFD_TPS80031_H
+#define __LINUX_MFD_TPS80031_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+/* Pull-ups/Pull-downs */
+#define TPS80031_CFG_INPUT_PUPD1 0xF0
+#define TPS80031_CFG_INPUT_PUPD2 0xF1
+#define TPS80031_CFG_INPUT_PUPD3 0xF2
+#define TPS80031_CFG_INPUT_PUPD4 0xF3
+#define TPS80031_CFG_LDO_PD1 0xF4
+#define TPS80031_CFG_LDO_PD2 0xF5
+#define TPS80031_CFG_SMPS_PD 0xF6
+
+/* Real Time Clock */
+#define TPS80031_SECONDS_REG 0x00
+#define TPS80031_MINUTES_REG 0x01
+#define TPS80031_HOURS_REG 0x02
+#define TPS80031_DAYS_REG 0x03
+#define TPS80031_MONTHS_REG 0x04
+#define TPS80031_YEARS_REG 0x05
+#define TPS80031_WEEKS_REG 0x06
+#define TPS80031_ALARM_SECONDS_REG 0x08
+#define TPS80031_ALARM_MINUTES_REG 0x09
+#define TPS80031_ALARM_HOURS_REG 0x0A
+#define TPS80031_ALARM_DAYS_REG 0x0B
+#define TPS80031_ALARM_MONTHS_REG 0x0C
+#define TPS80031_ALARM_YEARS_REG 0x0D
+#define TPS80031_RTC_CTRL_REG 0x10
+#define TPS80031_RTC_STATUS_REG 0x11
+#define TPS80031_RTC_INTERRUPTS_REG 0x12
+#define TPS80031_RTC_COMP_LSB_REG 0x13
+#define TPS80031_RTC_COMP_MSB_REG 0x14
+#define TPS80031_RTC_RESET_STATUS_REG 0x16
+
+/*PMC Master Module */
+#define TPS80031_PHOENIX_START_CONDITION 0x1F
+#define TPS80031_PHOENIX_MSK_TRANSITION 0x20
+#define TPS80031_STS_HW_CONDITIONS 0x21
+#define TPS80031_PHOENIX_LAST_TURNOFF_STS 0x22
+#define TPS80031_VSYSMIN_LO_THRESHOLD 0x23
+#define TPS80031_VSYSMIN_HI_THRESHOLD 0x24
+#define TPS80031_PHOENIX_DEV_ON 0x25
+#define TPS80031_STS_PWR_GRP_STATE 0x27
+#define TPS80031_PH_CFG_VSYSLOW 0x28
+#define TPS80031_PH_STS_BOOT 0x29
+#define TPS80031_PHOENIX_SENS_TRANSITION 0x2A
+#define TPS80031_PHOENIX_SEQ_CFG 0x2B
+#define TPS80031_PRIMARY_WATCHDOG_CFG 0X2C
+#define TPS80031_KEY_PRESS_DUR_CFG 0X2D
+#define TPS80031_SMPS_LDO_SHORT_STS 0x2E
+
+/* PMC Slave Module - Broadcast */
+#define TPS80031_BROADCAST_ADDR_ALL 0x31
+#define TPS80031_BROADCAST_ADDR_REF 0x32
+#define TPS80031_BROADCAST_ADDR_PROV 0x33
+#define TPS80031_BROADCAST_ADDR_CLK_RST 0x34
+
+/* PMC Slave Module SMPS Regulators */
+#define TPS80031_SMPS4_CFG_TRANS 0x41
+#define TPS80031_SMPS4_CFG_STATE 0x42
+#define TPS80031_SMPS4_CFG_VOLTAGE 0x44
+#define TPS80031_VIO_CFG_TRANS 0x47
+#define TPS80031_VIO_CFG_STATE 0x48
+#define TPS80031_VIO_CFG_FORCE 0x49
+#define TPS80031_VIO_CFG_VOLTAGE 0x4A
+#define TPS80031_VIO_CFG_STEP 0x48
+#define TPS80031_SMPS1_CFG_TRANS 0x53
+#define TPS80031_SMPS1_CFG_STATE 0x54
+#define TPS80031_SMPS1_CFG_FORCE 0x55
+#define TPS80031_SMPS1_CFG_VOLTAGE 0x56
+#define TPS80031_SMPS1_CFG_STEP 0x57
+#define TPS80031_SMPS2_CFG_TRANS 0x59
+#define TPS80031_SMPS2_CFG_STATE 0x5A
+#define TPS80031_SMPS2_CFG_FORCE 0x5B
+#define TPS80031_SMPS2_CFG_VOLTAGE 0x5C
+#define TPS80031_SMPS2_CFG_STEP 0x5D
+#define TPS80031_SMPS3_CFG_TRANS 0x65
+#define TPS80031_SMPS3_CFG_STATE 0x66
+#define TPS80031_SMPS3_CFG_VOLTAGE 0x68
+
+/* PMC Slave Module LDO Regulators */
+#define TPS80031_VANA_CFG_TRANS 0x81
+#define TPS80031_VANA_CFG_STATE 0x82
+#define TPS80031_VANA_CFG_VOLTAGE 0x83
+#define TPS80031_LDO2_CFG_TRANS 0x85
+#define TPS80031_LDO2_CFG_STATE 0x86
+#define TPS80031_LDO2_CFG_VOLTAGE 0x87
+#define TPS80031_LDO4_CFG_TRANS 0x89
+#define TPS80031_LDO4_CFG_STATE 0x8A
+#define TPS80031_LDO4_CFG_VOLTAGE 0x8B
+#define TPS80031_LDO3_CFG_TRANS 0x8D
+#define TPS80031_LDO3_CFG_STATE 0x8E
+#define TPS80031_LDO3_CFG_VOLTAGE 0x8F
+#define TPS80031_LDO6_CFG_TRANS 0x91
+#define TPS80031_LDO6_CFG_STATE 0x92
+#define TPS80031_LDO6_CFG_VOLTAGE 0x93
+#define TPS80031_LDOLN_CFG_TRANS 0x95
+#define TPS80031_LDOLN_CFG_STATE 0x96
+#define TPS80031_LDOLN_CFG_VOLTAGE 0x97
+#define TPS80031_LDO5_CFG_TRANS 0x99
+#define TPS80031_LDO5_CFG_STATE 0x9A
+#define TPS80031_LDO5_CFG_VOLTAGE 0x9B
+#define TPS80031_LDO1_CFG_TRANS 0x9D
+#define TPS80031_LDO1_CFG_STATE 0x9E
+#define TPS80031_LDO1_CFG_VOLTAGE 0x9F
+#define TPS80031_LDOUSB_CFG_TRANS 0xA1
+#define TPS80031_LDOUSB_CFG_STATE 0xA2
+#define TPS80031_LDOUSB_CFG_VOLTAGE 0xA3
+#define TPS80031_LDO7_CFG_TRANS 0xA5
+#define TPS80031_LDO7_CFG_STATE 0xA6
+#define TPS80031_LDO7_CFG_VOLTAGE 0xA7
+
+/* PMC Slave Module External Control */
+#define TPS80031_REGEN1_CFG_TRANS 0xAE
+#define TPS80031_REGEN1_CFG_STATE 0xAF
+#define TPS80031_REGEN2_CFG_TRANS 0xB1
+#define TPS80031_REGEN2_CFG_STATE 0xB2
+#define TPS80031_SYSEN_CFG_TRANS 0xB4
+#define TPS80031_SYSEN_CFG_STATE 0xB5
+
+/* PMC Slave Module Internal Control */
+#define TPS80031_NRESPWRON_CFG_TRANS 0xB7
+#define TPS80031_NRESPWRON_CFG_STATE 0xB8
+#define TPS80031_CLK32KAO_CFG_TRANS 0xBA
+#define TPS80031_CLK32KAO_CFG_STATE 0xBB
+#define TPS80031_CLK32KG_CFG_TRANS 0xBD
+#define TPS80031_CLK32KG_CFG_STATE 0xBE
+#define TPS80031_CLK32KAUDIO_CFG_TRANS 0xC0
+#define TPS80031_CLK32KAUDIO_CFG_STATE 0xC1
+#define TPS80031_VRTC_CFG_TRANS 0xC3
+#define TPS80031_VRTC_CFG_STATE 0xC4
+#define TPS80031_BIAS_CFG_TRANS 0xC6
+#define TPS80031_BIAS_CFG_STATE 0xC7
+#define TPS80031_VSYSMIN_HI_CFG_TRANS 0xC9
+#define TPS80031_VSYSMIN_HI_CFG_STATE 0xCA
+#define TPS80031_RC6MHZ_CFG_TRANS 0xCC
+#define TPS80031_RC6MHZ_CFG_STATE 0xCD
+#define TPS80031_TMP_CFG_TRANS 0xCF
+#define TPS80031_TMP_CFG_STATE 0xD0
+
+/* PMC Slave Module resources assignment */
+#define TPS80031_PREQ1_RES_ASS_A 0xD7
+#define TPS80031_PREQ1_RES_ASS_B 0xD8
+#define TPS80031_PREQ1_RES_ASS_C 0xD9
+#define TPS80031_PREQ2_RES_ASS_A 0xDA
+#define TPS80031_PREQ2_RES_ASS_B 0xDB
+#define TPS80031_PREQ2_RES_ASS_C 0xDC
+#define TPS80031_PREQ3_RES_ASS_A 0xDD
+#define TPS80031_PREQ3_RES_ASS_B 0xDE
+#define TPS80031_PREQ3_RES_ASS_C 0xDF
+
+/* PMC Slave Module Miscellaneous */
+#define TPS80031_SMPS_OFFSET 0xE0
+#define TPS80031_SMPS_MULT 0xE3
+#define TPS80031_MISC1 0xE4
+#define TPS80031_MISC2 0xE5
+#define TPS80031_BBSPOR_CFG 0xE6
+#define TPS80031_TMP_CFG 0xE7
+
+/* Battery Charging Controller and Indicator LED */
+#define TPS80031_CONTROLLER_CTRL2 0xDA
+#define TPS80031_CONTROLLER_VSEL_COMP 0xDB
+#define TPS80031_CHARGERUSB_VSYSREG 0xDC
+#define TPS80031_CHARGERUSB_VICHRG_PC 0xDD
+#define TPS80031_LINEAR_CHRG_STS 0xDE
+#define TPS80031_CONTROLLER_INT_MASK 0xE0
+#define TPS80031_CONTROLLER_CTRL1 0xE1
+#define TPS80031_CONTROLLER_WDG 0xE2
+#define TPS80031_CONTROLLER_STAT1 0xE3
+#define TPS80031_CHARGERUSB_INT_STATUS 0xE4
+#define TPS80031_CHARGERUSB_INT_MASK 0xE5
+#define TPS80031_CHARGERUSB_STATUS_INT1 0xE6
+#define TPS80031_CHARGERUSB_STATUS_INT2 0xE7
+#define TPS80031_CHARGERUSB_CTRL1 0xE8
+#define TPS80031_CHARGERUSB_CTRL2 0xE9
+#define TPS80031_CHARGERUSB_CTRL3 0xEA
+#define TPS80031_CHARGERUSB_STAT1 0xEB
+#define TPS80031_CHARGERUSB_VOREG 0xEC
+#define TPS80031_CHARGERUSB_VICHRG 0xED
+#define TPS80031_CHARGERUSB_CINLIMIT 0xEE
+#define TPS80031_CHARGERUSB_CTRLLIMIT1 0xEF
+#define TPS80031_CHARGERUSB_CTRLLIMIT2 0xF0
+#define TPS80031_LED_PWM_CTRL1 0xF4
+#define TPS80031_LED_PWM_CTRL2 0xF5
+
+/* USB On-The-Go */
+#define TPS80031_BACKUP_REG 0xFA
+#define TPS80031_USB_VENDOR_ID_LSB 0x00
+#define TPS80031_USB_VENDOR_ID_MSB 0x01
+#define TPS80031_USB_PRODUCT_ID_LSB 0x02
+#define TPS80031_USB_PRODUCT_ID_MSB 0x03
+#define TPS80031_USB_VBUS_CTRL_SET 0x04
+#define TPS80031_USB_VBUS_CTRL_CLR 0x05
+#define TPS80031_USB_ID_CTRL_SET 0x06
+#define TPS80031_USB_ID_CTRL_CLR 0x07
+#define TPS80031_USB_VBUS_INT_SRC 0x08
+#define TPS80031_USB_VBUS_INT_LATCH_SET 0x09
+#define TPS80031_USB_VBUS_INT_LATCH_CLR 0x0A
+#define TPS80031_USB_VBUS_INT_EN_LO_SET 0x0B
+#define TPS80031_USB_VBUS_INT_EN_LO_CLR 0x0C
+#define TPS80031_USB_VBUS_INT_EN_HI_SET 0x0D
+#define TPS80031_USB_VBUS_INT_EN_HI_CLR 0x0E
+#define TPS80031_USB_ID_INT_SRC 0x0F
+#define TPS80031_USB_ID_INT_LATCH_SET 0x10
+#define TPS80031_USB_ID_INT_LATCH_CLR 0x11
+#define TPS80031_USB_ID_INT_EN_LO_SET 0x12
+#define TPS80031_USB_ID_INT_EN_LO_CLR 0x13
+#define TPS80031_USB_ID_INT_EN_HI_SET 0x14
+#define TPS80031_USB_ID_INT_EN_HI_CLR 0x15
+#define TPS80031_USB_OTG_ADP_CTRL 0x16
+#define TPS80031_USB_OTG_ADP_HIGH 0x17
+#define TPS80031_USB_OTG_ADP_LOW 0x18
+#define TPS80031_USB_OTG_ADP_RISE 0x19
+#define TPS80031_USB_OTG_REVISION 0x1A
+
+/* Gas Gauge */
+#define TPS80031_FG_REG_00 0xC0
+#define TPS80031_FG_REG_01 0xC1
+#define TPS80031_FG_REG_02 0xC2
+#define TPS80031_FG_REG_03 0xC3
+#define TPS80031_FG_REG_04 0xC4
+#define TPS80031_FG_REG_05 0xC5
+#define TPS80031_FG_REG_06 0xC6
+#define TPS80031_FG_REG_07 0xC7
+#define TPS80031_FG_REG_08 0xC8
+#define TPS80031_FG_REG_09 0xC9
+#define TPS80031_FG_REG_10 0xCA
+#define TPS80031_FG_REG_11 0xCB
+
+/* General Purpose ADC */
+#define TPS80031_GPADC_CTRL 0x2E
+#define TPS80031_GPADC_CTRL2 0x2F
+#define TPS80031_RTSELECT_LSB 0x32
+#define TPS80031_RTSELECT_ISB 0x33
+#define TPS80031_RTSELECT_MSB 0x34
+#define TPS80031_GPSELECT_ISB 0x35
+#define TPS80031_CTRL_P1 0x36
+#define TPS80031_RTCH0_LSB 0x37
+#define TPS80031_RTCH0_MSB 0x38
+#define TPS80031_RTCH1_LSB 0x39
+#define TPS80031_RTCH1_MSB 0x3A
+#define TPS80031_GPCH0_LSB 0x3B
+#define TPS80031_GPCH0_MSB 0x3C
+
+/* SIM, MMC and Battery Detection */
+#define TPS80031_SIMDEBOUNCING 0xEB
+#define TPS80031_SIMCTRL 0xEC
+#define TPS80031_MMCDEBOUNCING 0xED
+#define TPS80031_MMCCTRL 0xEE
+#define TPS80031_BATDEBOUNCING 0xEF
+
+/* Vibrator Driver and PWMs */
+#define TPS80031_VIBCTRL 0x9B
+#define TPS80031_VIBMODE 0x9C
+#define TPS80031_PWM1ON 0xBA
+#define TPS80031_PWM1OFF 0xBB
+#define TPS80031_PWM2ON 0xBD
+#define TPS80031_PWM2OFF 0xBE
+
+/* Control Interface */
+#define TPS80031_INT_STS_A 0xD0
+#define TPS80031_INT_STS_B 0xD1
+#define TPS80031_INT_STS_C 0xD2
+#define TPS80031_INT_MSK_LINE_A 0xD3
+#define TPS80031_INT_MSK_LINE_B 0xD4
+#define TPS80031_INT_MSK_LINE_C 0xD5
+#define TPS80031_INT_MSK_STS_A 0xD6
+#define TPS80031_INT_MSK_STS_B 0xD7
+#define TPS80031_INT_MSK_STS_C 0xD8
+#define TPS80031_TOGGLE1 0x90
+#define TPS80031_TOGGLE2 0x91
+#define TPS80031_TOGGLE3 0x92
+#define TPS80031_PWDNSTATUS1 0x93
+#define TPS80031_PWDNSTATUS2 0x94
+#define TPS80031_VALIDITY0 0x17
+#define TPS80031_VALIDITY1 0x18
+#define TPS80031_VALIDITY2 0x19
+#define TPS80031_VALIDITY3 0x1A
+#define TPS80031_VALIDITY4 0x1B
+#define TPS80031_VALIDITY5 0x1C
+#define TPS80031_VALIDITY6 0x1D
+#define TPS80031_VALIDITY7 0x1E
+
+/* Version number related register */
+#define TPS80031_JTAGVERNUM 0x87
+#define TPS80031_EPROM_REV 0xDF
+
+/* GPADC Trimming Bits. */
+#define TPS80031_GPADC_TRIM0 0xCC
+#define TPS80031_GPADC_TRIM1 0xCD
+#define TPS80031_GPADC_TRIM2 0xCE
+#define TPS80031_GPADC_TRIM3 0xCF
+#define TPS80031_GPADC_TRIM4 0xD0
+#define TPS80031_GPADC_TRIM5 0xD1
+#define TPS80031_GPADC_TRIM6 0xD2
+#define TPS80031_GPADC_TRIM7 0xD3
+#define TPS80031_GPADC_TRIM8 0xD4
+#define TPS80031_GPADC_TRIM9 0xD5
+#define TPS80031_GPADC_TRIM10 0xD6
+#define TPS80031_GPADC_TRIM11 0xD7
+#define TPS80031_GPADC_TRIM12 0xD8
+#define TPS80031_GPADC_TRIM13 0xD9
+#define TPS80031_GPADC_TRIM14 0xDA
+#define TPS80031_GPADC_TRIM15 0xDB
+#define TPS80031_GPADC_TRIM16 0xDC
+#define TPS80031_GPADC_TRIM17 0xDD
+#define TPS80031_GPADC_TRIM18 0xDE
+
+/* TPS80031_CONTROLLER_STAT1 bit fields */
+#define TPS80031_CONTROLLER_STAT1_BAT_TEMP 0
+#define TPS80031_CONTROLLER_STAT1_BAT_REMOVED 1
+#define TPS80031_CONTROLLER_STAT1_VBUS_DET 2
+#define TPS80031_CONTROLLER_STAT1_VAC_DET 3
+#define TPS80031_CONTROLLER_STAT1_FAULT_WDG 4
+#define TPS80031_CONTROLLER_STAT1_LINCH_GATED 6
+/* TPS80031_CONTROLLER_INT_MASK bit filed */
+#define TPS80031_CONTROLLER_INT_MASK_MVAC_DET 0
+#define TPS80031_CONTROLLER_INT_MASK_MVBUS_DET 1
+#define TPS80031_CONTROLLER_INT_MASK_MBAT_TEMP 2
+#define TPS80031_CONTROLLER_INT_MASK_MFAULT_WDG 3
+#define TPS80031_CONTROLLER_INT_MASK_MBAT_REMOVED 4
+#define TPS80031_CONTROLLER_INT_MASK_MLINCH_GATED 5
+
+#define TPS80031_CHARGE_CONTROL_SUB_INT_MASK 0x3F
+
+/* TPS80031_PHOENIX_DEV_ON bit field */
+#define TPS80031_DEVOFF 0x1
+
+#define TPS80031_EXT_CONTROL_CFG_TRANS 0
+#define TPS80031_EXT_CONTROL_CFG_STATE 1
+
+/* State register field */
+#define TPS80031_STATE_OFF 0x00
+#define TPS80031_STATE_ON 0x01
+#define TPS80031_STATE_MASK 0x03
+
+/* Trans register field */
+#define TPS80031_TRANS_ACTIVE_OFF 0x00
+#define TPS80031_TRANS_ACTIVE_ON 0x01
+#define TPS80031_TRANS_ACTIVE_MASK 0x03
+#define TPS80031_TRANS_SLEEP_OFF 0x00
+#define TPS80031_TRANS_SLEEP_ON 0x04
+#define TPS80031_TRANS_SLEEP_MASK 0x0C
+#define TPS80031_TRANS_OFF_OFF 0x00
+#define TPS80031_TRANS_OFF_ACTIVE 0x10
+#define TPS80031_TRANS_OFF_MASK 0x30
+
+#define TPS80031_EXT_PWR_REQ (TPS80031_PWR_REQ_INPUT_PREQ1 | \
+ TPS80031_PWR_REQ_INPUT_PREQ2 | \
+ TPS80031_PWR_REQ_INPUT_PREQ3)
+
+/* TPS80031_BBSPOR_CFG bit field */
+#define TPS80031_BBSPOR_CHG_EN 0x8
+#define TPS80031_MAX_REGISTER 0xFF
+
+struct i2c_client;
+
+/* Supported chips */
+enum chips {
+ TPS80031 = 0x00000001,
+ TPS80032 = 0x00000002,
+};
+
+enum {
+ TPS80031_INT_PWRON,
+ TPS80031_INT_RPWRON,
+ TPS80031_INT_SYS_VLOW,
+ TPS80031_INT_RTC_ALARM,
+ TPS80031_INT_RTC_PERIOD,
+ TPS80031_INT_HOT_DIE,
+ TPS80031_INT_VXX_SHORT,
+ TPS80031_INT_SPDURATION,
+ TPS80031_INT_WATCHDOG,
+ TPS80031_INT_BAT,
+ TPS80031_INT_SIM,
+ TPS80031_INT_MMC,
+ TPS80031_INT_RES,
+ TPS80031_INT_GPADC_RT,
+ TPS80031_INT_GPADC_SW2_EOC,
+ TPS80031_INT_CC_AUTOCAL,
+ TPS80031_INT_ID_WKUP,
+ TPS80031_INT_VBUSS_WKUP,
+ TPS80031_INT_ID,
+ TPS80031_INT_VBUS,
+ TPS80031_INT_CHRG_CTRL,
+ TPS80031_INT_EXT_CHRG,
+ TPS80031_INT_INT_CHRG,
+ TPS80031_INT_RES2,
+ TPS80031_INT_BAT_TEMP_OVRANGE,
+ TPS80031_INT_BAT_REMOVED,
+ TPS80031_INT_VBUS_DET,
+ TPS80031_INT_VAC_DET,
+ TPS80031_INT_FAULT_WDG,
+ TPS80031_INT_LINCH_GATED,
+
+ /* Last interrupt id to get the end number */
+ TPS80031_INT_NR,
+};
+
+/* TPS80031 Slave IDs */
+#define TPS80031_NUM_SLAVES 4
+#define TPS80031_SLAVE_ID0 0
+#define TPS80031_SLAVE_ID1 1
+#define TPS80031_SLAVE_ID2 2
+#define TPS80031_SLAVE_ID3 3
+
+/* TPS80031 I2C addresses */
+#define TPS80031_I2C_ID0_ADDR 0x12
+#define TPS80031_I2C_ID1_ADDR 0x48
+#define TPS80031_I2C_ID2_ADDR 0x49
+#define TPS80031_I2C_ID3_ADDR 0x4A
+
+enum {
+ TPS80031_REGULATOR_VIO,
+ TPS80031_REGULATOR_SMPS1,
+ TPS80031_REGULATOR_SMPS2,
+ TPS80031_REGULATOR_SMPS3,
+ TPS80031_REGULATOR_SMPS4,
+ TPS80031_REGULATOR_VANA,
+ TPS80031_REGULATOR_LDO1,
+ TPS80031_REGULATOR_LDO2,
+ TPS80031_REGULATOR_LDO3,
+ TPS80031_REGULATOR_LDO4,
+ TPS80031_REGULATOR_LDO5,
+ TPS80031_REGULATOR_LDO6,
+ TPS80031_REGULATOR_LDO7,
+ TPS80031_REGULATOR_LDOLN,
+ TPS80031_REGULATOR_LDOUSB,
+ TPS80031_REGULATOR_VBUS,
+ TPS80031_REGULATOR_REGEN1,
+ TPS80031_REGULATOR_REGEN2,
+ TPS80031_REGULATOR_SYSEN,
+ TPS80031_REGULATOR_MAX,
+};
+
+/* Different configurations for the rails */
+enum {
+ /* USBLDO input selection */
+ TPS80031_USBLDO_INPUT_VSYS = 0x00000001,
+ TPS80031_USBLDO_INPUT_PMID = 0x00000002,
+
+ /* LDO3 output mode */
+ TPS80031_LDO3_OUTPUT_VIB = 0x00000004,
+
+ /* VBUS configuration */
+ TPS80031_VBUS_DISCHRG_EN_PDN = 0x00000004,
+ TPS80031_VBUS_SW_ONLY = 0x00000008,
+ TPS80031_VBUS_SW_N_ID = 0x00000010,
+};
+
+/* External controls requests */
+enum tps80031_ext_control {
+ TPS80031_PWR_REQ_INPUT_NONE = 0x00000000,
+ TPS80031_PWR_REQ_INPUT_PREQ1 = 0x00000001,
+ TPS80031_PWR_REQ_INPUT_PREQ2 = 0x00000002,
+ TPS80031_PWR_REQ_INPUT_PREQ3 = 0x00000004,
+ TPS80031_PWR_OFF_ON_SLEEP = 0x00000008,
+ TPS80031_PWR_ON_ON_SLEEP = 0x00000010,
+};
+
+enum tps80031_pupd_pins {
+ TPS80031_PREQ1 = 0,
+ TPS80031_PREQ2A,
+ TPS80031_PREQ2B,
+ TPS80031_PREQ2C,
+ TPS80031_PREQ3,
+ TPS80031_NRES_WARM,
+ TPS80031_PWM_FORCE,
+ TPS80031_CHRG_EXT_CHRG_STATZ,
+ TPS80031_SIM,
+ TPS80031_MMC,
+ TPS80031_GPADC_START,
+ TPS80031_DVSI2C_SCL,
+ TPS80031_DVSI2C_SDA,
+ TPS80031_CTLI2C_SCL,
+ TPS80031_CTLI2C_SDA,
+};
+
+enum tps80031_pupd_settings {
+ TPS80031_PUPD_NORMAL,
+ TPS80031_PUPD_PULLDOWN,
+ TPS80031_PUPD_PULLUP,
+};
+
+struct tps80031 {
+ struct device *dev;
+ unsigned long chip_info;
+ int es_version;
+ struct i2c_client *clients[TPS80031_NUM_SLAVES];
+ struct regmap *regmap[TPS80031_NUM_SLAVES];
+ struct regmap_irq_chip_data *irq_data;
+};
+
+struct tps80031_pupd_init_data {
+ int input_pin;
+ int setting;
+};
+
+/*
+ * struct tps80031_regulator_platform_data - tps80031 regulator platform data.
+ *
+ * @reg_init_data: The regulator init data.
+ * @ext_ctrl_flag: External control flag for sleep/power request control.
+ * @config_flags: Configuration flag to configure the rails.
+ * It should be ORed of config enums.
+ */
+
+struct tps80031_regulator_platform_data {
+ struct regulator_init_data *reg_init_data;
+ unsigned int ext_ctrl_flag;
+ unsigned int config_flags;
+};
+
+struct tps80031_platform_data {
+ int irq_base;
+ bool use_power_off;
+ struct tps80031_pupd_init_data *pupd_init_data;
+ int pupd_init_data_size;
+ struct tps80031_regulator_platform_data
+ *regulator_pdata[TPS80031_REGULATOR_MAX];
+};
+
+static inline int tps80031_write(struct device *dev, int sid,
+ int reg, uint8_t val)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+ return regmap_write(tps80031->regmap[sid], reg, val);
+}
+
+static inline int tps80031_writes(struct device *dev, int sid, int reg,
+ int len, uint8_t *val)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+ return regmap_bulk_write(tps80031->regmap[sid], reg, val, len);
+}
+
+static inline int tps80031_read(struct device *dev, int sid,
+ int reg, uint8_t *val)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+ unsigned int ival;
+ int ret;
+
+ ret = regmap_read(tps80031->regmap[sid], reg, &ival);
+ if (ret < 0) {
+ dev_err(dev, "failed reading from reg 0x%02x\n", reg);
+ return ret;
+ }
+
+ *val = ival;
+ return ret;
+}
+
+static inline int tps80031_reads(struct device *dev, int sid,
+ int reg, int len, uint8_t *val)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+ return regmap_bulk_read(tps80031->regmap[sid], reg, val, len);
+}
+
+static inline int tps80031_set_bits(struct device *dev, int sid,
+ int reg, uint8_t bit_mask)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+ return regmap_update_bits(tps80031->regmap[sid], reg,
+ bit_mask, bit_mask);
+}
+
+static inline int tps80031_clr_bits(struct device *dev, int sid,
+ int reg, uint8_t bit_mask)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+ return regmap_update_bits(tps80031->regmap[sid], reg, bit_mask, 0);
+}
+
+static inline int tps80031_update(struct device *dev, int sid,
+ int reg, uint8_t val, uint8_t mask)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+ return regmap_update_bits(tps80031->regmap[sid], reg, mask, val);
+}
+
+static inline unsigned long tps80031_get_chip_info(struct device *dev)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+ return tps80031->chip_info;
+}
+
+static inline int tps80031_get_pmu_version(struct device *dev)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+ return tps80031->es_version;
+}
+
+static inline int tps80031_irq_get_virq(struct device *dev, int irq)
+{
+ struct tps80031 *tps80031 = dev_get_drvdata(dev);
+
+ return regmap_irq_get_virq(tps80031->irq_data, irq);
+}
+
+extern int tps80031_ext_power_req_config(struct device *dev,
+ unsigned long ext_ctrl_flag, int preq_bit,
+ int state_reg_add, int trans_reg_add);
+#endif /*__LINUX_MFD_TPS80031_H */
diff --git a/include/linux/mfd/twl4030-audio.h b/include/linux/mfd/twl4030-audio.h
new file mode 100644
index 000000000..3d22b72df
--- /dev/null
+++ b/include/linux/mfd/twl4030-audio.h
@@ -0,0 +1,272 @@
+/*
+ * MFD driver for twl4030 audio submodule
+ *
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * Copyright: (C) 2009 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __TWL4030_CODEC_H__
+#define __TWL4030_CODEC_H__
+
+/* Codec registers */
+#define TWL4030_REG_CODEC_MODE 0x01
+#define TWL4030_REG_OPTION 0x02
+#define TWL4030_REG_UNKNOWN 0x03
+#define TWL4030_REG_MICBIAS_CTL 0x04
+#define TWL4030_REG_ANAMICL 0x05
+#define TWL4030_REG_ANAMICR 0x06
+#define TWL4030_REG_AVADC_CTL 0x07
+#define TWL4030_REG_ADCMICSEL 0x08
+#define TWL4030_REG_DIGMIXING 0x09
+#define TWL4030_REG_ATXL1PGA 0x0A
+#define TWL4030_REG_ATXR1PGA 0x0B
+#define TWL4030_REG_AVTXL2PGA 0x0C
+#define TWL4030_REG_AVTXR2PGA 0x0D
+#define TWL4030_REG_AUDIO_IF 0x0E
+#define TWL4030_REG_VOICE_IF 0x0F
+#define TWL4030_REG_ARXR1PGA 0x10
+#define TWL4030_REG_ARXL1PGA 0x11
+#define TWL4030_REG_ARXR2PGA 0x12
+#define TWL4030_REG_ARXL2PGA 0x13
+#define TWL4030_REG_VRXPGA 0x14
+#define TWL4030_REG_VSTPGA 0x15
+#define TWL4030_REG_VRX2ARXPGA 0x16
+#define TWL4030_REG_AVDAC_CTL 0x17
+#define TWL4030_REG_ARX2VTXPGA 0x18
+#define TWL4030_REG_ARXL1_APGA_CTL 0x19
+#define TWL4030_REG_ARXR1_APGA_CTL 0x1A
+#define TWL4030_REG_ARXL2_APGA_CTL 0x1B
+#define TWL4030_REG_ARXR2_APGA_CTL 0x1C
+#define TWL4030_REG_ATX2ARXPGA 0x1D
+#define TWL4030_REG_BT_IF 0x1E
+#define TWL4030_REG_BTPGA 0x1F
+#define TWL4030_REG_BTSTPGA 0x20
+#define TWL4030_REG_EAR_CTL 0x21
+#define TWL4030_REG_HS_SEL 0x22
+#define TWL4030_REG_HS_GAIN_SET 0x23
+#define TWL4030_REG_HS_POPN_SET 0x24
+#define TWL4030_REG_PREDL_CTL 0x25
+#define TWL4030_REG_PREDR_CTL 0x26
+#define TWL4030_REG_PRECKL_CTL 0x27
+#define TWL4030_REG_PRECKR_CTL 0x28
+#define TWL4030_REG_HFL_CTL 0x29
+#define TWL4030_REG_HFR_CTL 0x2A
+#define TWL4030_REG_ALC_CTL 0x2B
+#define TWL4030_REG_ALC_SET1 0x2C
+#define TWL4030_REG_ALC_SET2 0x2D
+#define TWL4030_REG_BOOST_CTL 0x2E
+#define TWL4030_REG_SOFTVOL_CTL 0x2F
+#define TWL4030_REG_DTMF_FREQSEL 0x30
+#define TWL4030_REG_DTMF_TONEXT1H 0x31
+#define TWL4030_REG_DTMF_TONEXT1L 0x32
+#define TWL4030_REG_DTMF_TONEXT2H 0x33
+#define TWL4030_REG_DTMF_TONEXT2L 0x34
+#define TWL4030_REG_DTMF_TONOFF 0x35
+#define TWL4030_REG_DTMF_WANONOFF 0x36
+#define TWL4030_REG_I2S_RX_SCRAMBLE_H 0x37
+#define TWL4030_REG_I2S_RX_SCRAMBLE_M 0x38
+#define TWL4030_REG_I2S_RX_SCRAMBLE_L 0x39
+#define TWL4030_REG_APLL_CTL 0x3A
+#define TWL4030_REG_DTMF_CTL 0x3B
+#define TWL4030_REG_DTMF_PGA_CTL2 0x3C
+#define TWL4030_REG_DTMF_PGA_CTL1 0x3D
+#define TWL4030_REG_MISC_SET_1 0x3E
+#define TWL4030_REG_PCMBTMUX 0x3F
+#define TWL4030_REG_RX_PATH_SEL 0x43
+#define TWL4030_REG_VDL_APGA_CTL 0x44
+#define TWL4030_REG_VIBRA_CTL 0x45
+#define TWL4030_REG_VIBRA_SET 0x46
+#define TWL4030_REG_VIBRA_PWM_SET 0x47
+#define TWL4030_REG_ANAMIC_GAIN 0x48
+#define TWL4030_REG_MISC_SET_2 0x49
+
+/* Bitfield Definitions */
+
+/* TWL4030_CODEC_MODE (0x01) Fields */
+#define TWL4030_APLL_RATE 0xF0
+#define TWL4030_APLL_RATE_8000 0x00
+#define TWL4030_APLL_RATE_11025 0x10
+#define TWL4030_APLL_RATE_12000 0x20
+#define TWL4030_APLL_RATE_16000 0x40
+#define TWL4030_APLL_RATE_22050 0x50
+#define TWL4030_APLL_RATE_24000 0x60
+#define TWL4030_APLL_RATE_32000 0x80
+#define TWL4030_APLL_RATE_44100 0x90
+#define TWL4030_APLL_RATE_48000 0xA0
+#define TWL4030_APLL_RATE_96000 0xE0
+#define TWL4030_SEL_16K 0x08
+#define TWL4030_CODECPDZ 0x02
+#define TWL4030_OPT_MODE 0x01
+#define TWL4030_OPTION_1 (1 << 0)
+#define TWL4030_OPTION_2 (0 << 0)
+
+/* TWL4030_OPTION (0x02) Fields */
+#define TWL4030_ATXL1_EN (1 << 0)
+#define TWL4030_ATXR1_EN (1 << 1)
+#define TWL4030_ATXL2_VTXL_EN (1 << 2)
+#define TWL4030_ATXR2_VTXR_EN (1 << 3)
+#define TWL4030_ARXL1_VRX_EN (1 << 4)
+#define TWL4030_ARXR1_EN (1 << 5)
+#define TWL4030_ARXL2_EN (1 << 6)
+#define TWL4030_ARXR2_EN (1 << 7)
+
+/* TWL4030_REG_MICBIAS_CTL (0x04) Fields */
+#define TWL4030_MICBIAS2_CTL 0x40
+#define TWL4030_MICBIAS1_CTL 0x20
+#define TWL4030_HSMICBIAS_EN 0x04
+#define TWL4030_MICBIAS2_EN 0x02
+#define TWL4030_MICBIAS1_EN 0x01
+
+/* ANAMICL (0x05) Fields */
+#define TWL4030_CNCL_OFFSET_START 0x80
+#define TWL4030_OFFSET_CNCL_SEL 0x60
+#define TWL4030_OFFSET_CNCL_SEL_ARX1 0x00
+#define TWL4030_OFFSET_CNCL_SEL_ARX2 0x20
+#define TWL4030_OFFSET_CNCL_SEL_VRX 0x40
+#define TWL4030_OFFSET_CNCL_SEL_ALL 0x60
+#define TWL4030_MICAMPL_EN 0x10
+#define TWL4030_CKMIC_EN 0x08
+#define TWL4030_AUXL_EN 0x04
+#define TWL4030_HSMIC_EN 0x02
+#define TWL4030_MAINMIC_EN 0x01
+
+/* ANAMICR (0x06) Fields */
+#define TWL4030_MICAMPR_EN 0x10
+#define TWL4030_AUXR_EN 0x04
+#define TWL4030_SUBMIC_EN 0x01
+
+/* AVADC_CTL (0x07) Fields */
+#define TWL4030_ADCL_EN 0x08
+#define TWL4030_AVADC_CLK_PRIORITY 0x04
+#define TWL4030_ADCR_EN 0x02
+
+/* TWL4030_REG_ADCMICSEL (0x08) Fields */
+#define TWL4030_DIGMIC1_EN 0x08
+#define TWL4030_TX2IN_SEL 0x04
+#define TWL4030_DIGMIC0_EN 0x02
+#define TWL4030_TX1IN_SEL 0x01
+
+/* AUDIO_IF (0x0E) Fields */
+#define TWL4030_AIF_SLAVE_EN 0x80
+#define TWL4030_DATA_WIDTH 0x60
+#define TWL4030_DATA_WIDTH_16S_16W 0x00
+#define TWL4030_DATA_WIDTH_32S_16W 0x40
+#define TWL4030_DATA_WIDTH_32S_24W 0x60
+#define TWL4030_AIF_FORMAT 0x18
+#define TWL4030_AIF_FORMAT_CODEC 0x00
+#define TWL4030_AIF_FORMAT_LEFT 0x08
+#define TWL4030_AIF_FORMAT_RIGHT 0x10
+#define TWL4030_AIF_FORMAT_TDM 0x18
+#define TWL4030_AIF_TRI_EN 0x04
+#define TWL4030_CLK256FS_EN 0x02
+#define TWL4030_AIF_EN 0x01
+
+/* VOICE_IF (0x0F) Fields */
+#define TWL4030_VIF_SLAVE_EN 0x80
+#define TWL4030_VIF_DIN_EN 0x40
+#define TWL4030_VIF_DOUT_EN 0x20
+#define TWL4030_VIF_SWAP 0x10
+#define TWL4030_VIF_FORMAT 0x08
+#define TWL4030_VIF_TRI_EN 0x04
+#define TWL4030_VIF_SUB_EN 0x02
+#define TWL4030_VIF_EN 0x01
+
+/* EAR_CTL (0x21) */
+#define TWL4030_EAR_GAIN 0x30
+
+/* HS_GAIN_SET (0x23) Fields */
+#define TWL4030_HSR_GAIN 0x0C
+#define TWL4030_HSR_GAIN_PWR_DOWN 0x00
+#define TWL4030_HSR_GAIN_PLUS_6DB 0x04
+#define TWL4030_HSR_GAIN_0DB 0x08
+#define TWL4030_HSR_GAIN_MINUS_6DB 0x0C
+#define TWL4030_HSL_GAIN 0x03
+#define TWL4030_HSL_GAIN_PWR_DOWN 0x00
+#define TWL4030_HSL_GAIN_PLUS_6DB 0x01
+#define TWL4030_HSL_GAIN_0DB 0x02
+#define TWL4030_HSL_GAIN_MINUS_6DB 0x03
+
+/* HS_POPN_SET (0x24) Fields */
+#define TWL4030_VMID_EN 0x40
+#define TWL4030_EXTMUTE 0x20
+#define TWL4030_RAMP_DELAY 0x1C
+#define TWL4030_RAMP_DELAY_20MS 0x00
+#define TWL4030_RAMP_DELAY_40MS 0x04
+#define TWL4030_RAMP_DELAY_81MS 0x08
+#define TWL4030_RAMP_DELAY_161MS 0x0C
+#define TWL4030_RAMP_DELAY_323MS 0x10
+#define TWL4030_RAMP_DELAY_645MS 0x14
+#define TWL4030_RAMP_DELAY_1291MS 0x18
+#define TWL4030_RAMP_DELAY_2581MS 0x1C
+#define TWL4030_RAMP_EN 0x02
+
+/* PREDL_CTL (0x25) */
+#define TWL4030_PREDL_GAIN 0x30
+
+/* PREDR_CTL (0x26) */
+#define TWL4030_PREDR_GAIN 0x30
+
+/* PRECKL_CTL (0x27) */
+#define TWL4030_PRECKL_GAIN 0x30
+
+/* PRECKR_CTL (0x28) */
+#define TWL4030_PRECKR_GAIN 0x30
+
+/* HFL_CTL (0x29, 0x2A) Fields */
+#define TWL4030_HF_CTL_HB_EN 0x04
+#define TWL4030_HF_CTL_LOOP_EN 0x08
+#define TWL4030_HF_CTL_RAMP_EN 0x10
+#define TWL4030_HF_CTL_REF_EN 0x20
+
+/* APLL_CTL (0x3A) Fields */
+#define TWL4030_APLL_EN 0x10
+#define TWL4030_APLL_INFREQ 0x0F
+#define TWL4030_APLL_INFREQ_19200KHZ 0x05
+#define TWL4030_APLL_INFREQ_26000KHZ 0x06
+#define TWL4030_APLL_INFREQ_38400KHZ 0x0F
+
+/* REG_MISC_SET_1 (0x3E) Fields */
+#define TWL4030_CLK64_EN 0x80
+#define TWL4030_SCRAMBLE_EN 0x40
+#define TWL4030_FMLOOP_EN 0x20
+#define TWL4030_SMOOTH_ANAVOL_EN 0x02
+#define TWL4030_DIGMIC_LR_SWAP_EN 0x01
+
+/* VIBRA_CTL (0x45) */
+#define TWL4030_VIBRA_EN 0x01
+#define TWL4030_VIBRA_DIR 0x02
+#define TWL4030_VIBRA_AUDIO_SEL_L1 (0x00 << 2)
+#define TWL4030_VIBRA_AUDIO_SEL_R1 (0x01 << 2)
+#define TWL4030_VIBRA_AUDIO_SEL_L2 (0x02 << 2)
+#define TWL4030_VIBRA_AUDIO_SEL_R2 (0x03 << 2)
+#define TWL4030_VIBRA_SEL 0x10
+#define TWL4030_VIBRA_DIR_SEL 0x20
+
+/* TWL4030 codec resource IDs */
+enum twl4030_audio_res {
+ TWL4030_AUDIO_RES_POWER = 0,
+ TWL4030_AUDIO_RES_APLL,
+ TWL4030_AUDIO_RES_MAX,
+};
+
+int twl4030_audio_disable_resource(enum twl4030_audio_res id);
+int twl4030_audio_enable_resource(enum twl4030_audio_res id);
+unsigned int twl4030_audio_get_mclk(void);
+
+#endif /* End of __TWL4030_CODEC_H__ */
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
new file mode 100644
index 000000000..8f9fc3d26
--- /dev/null
+++ b/include/linux/mfd/twl6040.h
@@ -0,0 +1,269 @@
+/*
+ * MFD driver for twl6040
+ *
+ * Authors: Jorge Eduardo Candelaria <jorge.candelaria@ti.com>
+ * Misael Lopez Cruz <misael.lopez@ti.com>
+ *
+ * Copyright: (C) 2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __TWL6040_CODEC_H__
+#define __TWL6040_CODEC_H__
+
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+
+#define TWL6040_REG_ASICID 0x01
+#define TWL6040_REG_ASICREV 0x02
+#define TWL6040_REG_INTID 0x03
+#define TWL6040_REG_INTMR 0x04
+#define TWL6040_REG_NCPCTL 0x05
+#define TWL6040_REG_LDOCTL 0x06
+#define TWL6040_REG_HPPLLCTL 0x07
+#define TWL6040_REG_LPPLLCTL 0x08
+#define TWL6040_REG_LPPLLDIV 0x09
+#define TWL6040_REG_AMICBCTL 0x0A
+#define TWL6040_REG_DMICBCTL 0x0B
+#define TWL6040_REG_MICLCTL 0x0C
+#define TWL6040_REG_MICRCTL 0x0D
+#define TWL6040_REG_MICGAIN 0x0E
+#define TWL6040_REG_LINEGAIN 0x0F
+#define TWL6040_REG_HSLCTL 0x10
+#define TWL6040_REG_HSRCTL 0x11
+#define TWL6040_REG_HSGAIN 0x12
+#define TWL6040_REG_EARCTL 0x13
+#define TWL6040_REG_HFLCTL 0x14
+#define TWL6040_REG_HFLGAIN 0x15
+#define TWL6040_REG_HFRCTL 0x16
+#define TWL6040_REG_HFRGAIN 0x17
+#define TWL6040_REG_VIBCTLL 0x18
+#define TWL6040_REG_VIBDATL 0x19
+#define TWL6040_REG_VIBCTLR 0x1A
+#define TWL6040_REG_VIBDATR 0x1B
+#define TWL6040_REG_HKCTL1 0x1C
+#define TWL6040_REG_HKCTL2 0x1D
+#define TWL6040_REG_GPOCTL 0x1E
+#define TWL6040_REG_ALB 0x1F
+#define TWL6040_REG_DLB 0x20
+#define TWL6040_REG_TRIM1 0x28
+#define TWL6040_REG_TRIM2 0x29
+#define TWL6040_REG_TRIM3 0x2A
+#define TWL6040_REG_HSOTRIM 0x2B
+#define TWL6040_REG_HFOTRIM 0x2C
+#define TWL6040_REG_ACCCTL 0x2D
+#define TWL6040_REG_STATUS 0x2E
+
+/* INTID (0x03) fields */
+
+#define TWL6040_THINT 0x01
+#define TWL6040_PLUGINT 0x02
+#define TWL6040_UNPLUGINT 0x04
+#define TWL6040_HOOKINT 0x08
+#define TWL6040_HFINT 0x10
+#define TWL6040_VIBINT 0x20
+#define TWL6040_READYINT 0x40
+
+/* INTMR (0x04) fields */
+
+#define TWL6040_THMSK 0x01
+#define TWL6040_PLUGMSK 0x02
+#define TWL6040_HOOKMSK 0x08
+#define TWL6040_HFMSK 0x10
+#define TWL6040_VIBMSK 0x20
+#define TWL6040_READYMSK 0x40
+#define TWL6040_ALLINT_MSK 0x7B
+
+/* NCPCTL (0x05) fields */
+
+#define TWL6040_NCPENA 0x01
+#define TWL6040_NCPOPEN 0x40
+
+/* LDOCTL (0x06) fields */
+
+#define TWL6040_LSLDOENA 0x01
+#define TWL6040_HSLDOENA 0x04
+#define TWL6040_REFENA 0x40
+#define TWL6040_OSCENA 0x80
+
+/* HPPLLCTL (0x07) fields */
+
+#define TWL6040_HPLLENA 0x01
+#define TWL6040_HPLLRST 0x02
+#define TWL6040_HPLLBP 0x04
+#define TWL6040_HPLLSQRENA 0x08
+#define TWL6040_MCLK_12000KHZ (0 << 5)
+#define TWL6040_MCLK_19200KHZ (1 << 5)
+#define TWL6040_MCLK_26000KHZ (2 << 5)
+#define TWL6040_MCLK_38400KHZ (3 << 5)
+#define TWL6040_MCLK_MSK 0x60
+
+/* LPPLLCTL (0x08) fields */
+
+#define TWL6040_LPLLENA 0x01
+#define TWL6040_LPLLRST 0x02
+#define TWL6040_LPLLSEL 0x04
+#define TWL6040_LPLLFIN 0x08
+#define TWL6040_HPLLSEL 0x10
+
+/* HSLCTL/R (0x10/0x11) fields */
+
+#define TWL6040_HSDACENA (1 << 0)
+#define TWL6040_HSDACMODE (1 << 1)
+#define TWL6040_HSDRVENA (1 << 2)
+#define TWL6040_HSDRVMODE (1 << 3)
+
+/* HFLCTL/R (0x14/0x16) fields */
+
+#define TWL6040_HFDACENA (1 << 0)
+#define TWL6040_HFPGAENA (1 << 1)
+#define TWL6040_HFDRVENA (1 << 4)
+
+/* VIBCTLL/R (0x18/0x1A) fields */
+
+#define TWL6040_VIBENA (1 << 0)
+#define TWL6040_VIBSEL (1 << 1)
+#define TWL6040_VIBCTRL (1 << 2)
+#define TWL6040_VIBCTRL_P (1 << 3)
+#define TWL6040_VIBCTRL_N (1 << 4)
+
+/* VIBDATL/R (0x19/0x1B) fields */
+
+#define TWL6040_VIBDAT_MAX 0x64
+
+/* GPOCTL (0x1E) fields */
+
+#define TWL6040_GPO1 0x01
+#define TWL6040_GPO2 0x02
+#define TWL6040_GPO3 0x04
+
+/* ACCCTL (0x2D) fields */
+
+#define TWL6040_I2CSEL 0x01
+#define TWL6040_RESETSPLIT 0x04
+#define TWL6040_INTCLRMODE 0x08
+#define TWL6040_I2CMODE(x) ((x & 0x3) << 4)
+
+/* STATUS (0x2E) fields */
+
+#define TWL6040_PLUGCOMP 0x02
+#define TWL6040_VIBLOCDET 0x10
+#define TWL6040_VIBROCDET 0x20
+#define TWL6040_TSHUTDET 0x40
+
+#define TWL6040_CELLS 3
+
+#define TWL6040_REV_ES1_0 0x00
+#define TWL6040_REV_ES1_1 0x01 /* Rev ES1.1 and ES1.2 */
+#define TWL6040_REV_ES1_3 0x02
+#define TWL6041_REV_ES2_0 0x10
+
+#define TWL6040_IRQ_TH 0
+#define TWL6040_IRQ_PLUG 1
+#define TWL6040_IRQ_HOOK 2
+#define TWL6040_IRQ_HF 3
+#define TWL6040_IRQ_VIB 4
+#define TWL6040_IRQ_READY 5
+
+/* PLL selection */
+#define TWL6040_SYSCLK_SEL_LPPLL 0
+#define TWL6040_SYSCLK_SEL_HPPLL 1
+
+#define TWL6040_GPO_MAX 3
+
+/* TODO: All platform data struct can be removed */
+struct twl6040_codec_data {
+ u16 hs_left_step;
+ u16 hs_right_step;
+ u16 hf_left_step;
+ u16 hf_right_step;
+};
+
+struct twl6040_vibra_data {
+ unsigned int vibldrv_res; /* left driver resistance */
+ unsigned int vibrdrv_res; /* right driver resistance */
+ unsigned int viblmotor_res; /* left motor resistance */
+ unsigned int vibrmotor_res; /* right motor resistance */
+ int vddvibl_uV; /* VDDVIBL volt, set 0 for fixed reg */
+ int vddvibr_uV; /* VDDVIBR volt, set 0 for fixed reg */
+};
+
+struct twl6040_gpo_data {
+ int gpio_base;
+};
+
+struct twl6040_platform_data {
+ int audpwron_gpio; /* audio power-on gpio */
+
+ struct twl6040_codec_data *codec;
+ struct twl6040_vibra_data *vibra;
+ struct twl6040_gpo_data *gpo;
+};
+
+struct regmap;
+struct regmap_irq_chips_data;
+
+struct twl6040 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_irq_chip_data *irq_data;
+ struct regulator_bulk_data supplies[2]; /* supplies for vio, v2v1 */
+ struct clk *clk32k;
+ struct mutex mutex;
+ struct mutex irq_mutex;
+ struct mfd_cell cells[TWL6040_CELLS];
+ struct completion ready;
+
+ int audpwron;
+ int power_count;
+ int rev;
+
+ /* PLL configuration */
+ int pll;
+ unsigned int sysclk;
+ unsigned int mclk;
+
+ unsigned int irq;
+ unsigned int irq_ready;
+ unsigned int irq_th;
+};
+
+int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg);
+int twl6040_reg_write(struct twl6040 *twl6040, unsigned int reg,
+ u8 val);
+int twl6040_set_bits(struct twl6040 *twl6040, unsigned int reg,
+ u8 mask);
+int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg,
+ u8 mask);
+int twl6040_power(struct twl6040 *twl6040, int on);
+int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
+ unsigned int freq_in, unsigned int freq_out);
+int twl6040_get_pll(struct twl6040 *twl6040);
+unsigned int twl6040_get_sysclk(struct twl6040 *twl6040);
+
+/* Get the combined status of the vibra control register */
+int twl6040_get_vibralr_status(struct twl6040 *twl6040);
+
+static inline int twl6040_get_revid(struct twl6040 *twl6040)
+{
+ return twl6040->rev;
+}
+
+
+#endif /* End of __TWL6040_CODEC_H__ */
diff --git a/include/linux/mfd/ucb1x00.h b/include/linux/mfd/ucb1x00.h
new file mode 100644
index 000000000..88f90cbf8
--- /dev/null
+++ b/include/linux/mfd/ucb1x00.h
@@ -0,0 +1,260 @@
+/*
+ * linux/include/mfd/ucb1x00.h
+ *
+ * Copyright (C) 2001 Russell King, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+#ifndef UCB1200_H
+#define UCB1200_H
+
+#include <linux/device.h>
+#include <linux/mfd/mcp.h>
+#include <linux/gpio.h>
+#include <linux/mutex.h>
+
+#define UCB_IO_DATA 0x00
+#define UCB_IO_DIR 0x01
+
+#define UCB_IO_0 (1 << 0)
+#define UCB_IO_1 (1 << 1)
+#define UCB_IO_2 (1 << 2)
+#define UCB_IO_3 (1 << 3)
+#define UCB_IO_4 (1 << 4)
+#define UCB_IO_5 (1 << 5)
+#define UCB_IO_6 (1 << 6)
+#define UCB_IO_7 (1 << 7)
+#define UCB_IO_8 (1 << 8)
+#define UCB_IO_9 (1 << 9)
+
+#define UCB_IE_RIS 0x02
+#define UCB_IE_FAL 0x03
+#define UCB_IE_STATUS 0x04
+#define UCB_IE_CLEAR 0x04
+#define UCB_IE_ADC (1 << 11)
+#define UCB_IE_TSPX (1 << 12)
+#define UCB_IE_TSMX (1 << 13)
+#define UCB_IE_TCLIP (1 << 14)
+#define UCB_IE_ACLIP (1 << 15)
+
+#define UCB_IRQ_TSPX 12
+
+#define UCB_TC_A 0x05
+#define UCB_TC_A_LOOP (1 << 7) /* UCB1200 */
+#define UCB_TC_A_AMPL (1 << 7) /* UCB1300 */
+
+#define UCB_TC_B 0x06
+#define UCB_TC_B_VOICE_ENA (1 << 3)
+#define UCB_TC_B_CLIP (1 << 4)
+#define UCB_TC_B_ATT (1 << 6)
+#define UCB_TC_B_SIDE_ENA (1 << 11)
+#define UCB_TC_B_MUTE (1 << 13)
+#define UCB_TC_B_IN_ENA (1 << 14)
+#define UCB_TC_B_OUT_ENA (1 << 15)
+
+#define UCB_AC_A 0x07
+#define UCB_AC_B 0x08
+#define UCB_AC_B_LOOP (1 << 8)
+#define UCB_AC_B_MUTE (1 << 13)
+#define UCB_AC_B_IN_ENA (1 << 14)
+#define UCB_AC_B_OUT_ENA (1 << 15)
+
+#define UCB_TS_CR 0x09
+#define UCB_TS_CR_TSMX_POW (1 << 0)
+#define UCB_TS_CR_TSPX_POW (1 << 1)
+#define UCB_TS_CR_TSMY_POW (1 << 2)
+#define UCB_TS_CR_TSPY_POW (1 << 3)
+#define UCB_TS_CR_TSMX_GND (1 << 4)
+#define UCB_TS_CR_TSPX_GND (1 << 5)
+#define UCB_TS_CR_TSMY_GND (1 << 6)
+#define UCB_TS_CR_TSPY_GND (1 << 7)
+#define UCB_TS_CR_MODE_INT (0 << 8)
+#define UCB_TS_CR_MODE_PRES (1 << 8)
+#define UCB_TS_CR_MODE_POS (2 << 8)
+#define UCB_TS_CR_BIAS_ENA (1 << 11)
+#define UCB_TS_CR_TSPX_LOW (1 << 12)
+#define UCB_TS_CR_TSMX_LOW (1 << 13)
+
+#define UCB_ADC_CR 0x0a
+#define UCB_ADC_SYNC_ENA (1 << 0)
+#define UCB_ADC_VREFBYP_CON (1 << 1)
+#define UCB_ADC_INP_TSPX (0 << 2)
+#define UCB_ADC_INP_TSMX (1 << 2)
+#define UCB_ADC_INP_TSPY (2 << 2)
+#define UCB_ADC_INP_TSMY (3 << 2)
+#define UCB_ADC_INP_AD0 (4 << 2)
+#define UCB_ADC_INP_AD1 (5 << 2)
+#define UCB_ADC_INP_AD2 (6 << 2)
+#define UCB_ADC_INP_AD3 (7 << 2)
+#define UCB_ADC_EXT_REF (1 << 5)
+#define UCB_ADC_START (1 << 7)
+#define UCB_ADC_ENA (1 << 15)
+
+#define UCB_ADC_DATA 0x0b
+#define UCB_ADC_DAT_VAL (1 << 15)
+#define UCB_ADC_DAT(x) (((x) & 0x7fe0) >> 5)
+
+#define UCB_ID 0x0c
+#define UCB_ID_1200 0x1004
+#define UCB_ID_1300 0x1005
+#define UCB_ID_TC35143 0x9712
+
+#define UCB_MODE 0x0d
+#define UCB_MODE_DYN_VFLAG_ENA (1 << 12)
+#define UCB_MODE_AUD_OFF_CAN (1 << 13)
+
+enum ucb1x00_reset {
+ UCB_RST_PROBE,
+ UCB_RST_RESUME,
+ UCB_RST_SUSPEND,
+ UCB_RST_REMOVE,
+ UCB_RST_PROBE_FAIL,
+};
+
+struct ucb1x00_plat_data {
+ void (*reset)(enum ucb1x00_reset);
+ unsigned irq_base;
+ int gpio_base;
+ unsigned can_wakeup;
+};
+
+struct ucb1x00 {
+ raw_spinlock_t irq_lock;
+ struct mcp *mcp;
+ unsigned int irq;
+ int irq_base;
+ struct mutex adc_mutex;
+ spinlock_t io_lock;
+ u16 id;
+ u16 io_dir;
+ u16 io_out;
+ u16 adc_cr;
+ u16 irq_fal_enbl;
+ u16 irq_ris_enbl;
+ u16 irq_mask;
+ u16 irq_wake;
+ struct device dev;
+ struct list_head node;
+ struct list_head devs;
+ struct gpio_chip gpio;
+};
+
+struct ucb1x00_driver;
+
+struct ucb1x00_dev {
+ struct list_head dev_node;
+ struct list_head drv_node;
+ struct ucb1x00 *ucb;
+ struct ucb1x00_driver *drv;
+ void *priv;
+};
+
+struct ucb1x00_driver {
+ struct list_head node;
+ struct list_head devs;
+ int (*add)(struct ucb1x00_dev *dev);
+ void (*remove)(struct ucb1x00_dev *dev);
+ int (*suspend)(struct ucb1x00_dev *dev);
+ int (*resume)(struct ucb1x00_dev *dev);
+};
+
+#define classdev_to_ucb1x00(cd) container_of(cd, struct ucb1x00, dev)
+
+int ucb1x00_register_driver(struct ucb1x00_driver *);
+void ucb1x00_unregister_driver(struct ucb1x00_driver *);
+
+/**
+ * ucb1x00_clkrate - return the UCB1x00 SIB clock rate
+ * @ucb: UCB1x00 structure describing chip
+ *
+ * Return the SIB clock rate in Hz.
+ */
+static inline unsigned int ucb1x00_clkrate(struct ucb1x00 *ucb)
+{
+ return mcp_get_sclk_rate(ucb->mcp);
+}
+
+/**
+ * ucb1x00_enable - enable the UCB1x00 SIB clock
+ * @ucb: UCB1x00 structure describing chip
+ *
+ * Enable the SIB clock. This can be called multiple times.
+ */
+static inline void ucb1x00_enable(struct ucb1x00 *ucb)
+{
+ mcp_enable(ucb->mcp);
+}
+
+/**
+ * ucb1x00_disable - disable the UCB1x00 SIB clock
+ * @ucb: UCB1x00 structure describing chip
+ *
+ * Disable the SIB clock. The SIB clock will only be disabled
+ * when the number of ucb1x00_enable calls match the number of
+ * ucb1x00_disable calls.
+ */
+static inline void ucb1x00_disable(struct ucb1x00 *ucb)
+{
+ mcp_disable(ucb->mcp);
+}
+
+/**
+ * ucb1x00_reg_write - write a UCB1x00 register
+ * @ucb: UCB1x00 structure describing chip
+ * @reg: UCB1x00 4-bit register index to write
+ * @val: UCB1x00 16-bit value to write
+ *
+ * Write the UCB1x00 register @reg with value @val. The SIB
+ * clock must be running for this function to return.
+ */
+static inline void ucb1x00_reg_write(struct ucb1x00 *ucb, unsigned int reg, unsigned int val)
+{
+ mcp_reg_write(ucb->mcp, reg, val);
+}
+
+/**
+ * ucb1x00_reg_read - read a UCB1x00 register
+ * @ucb: UCB1x00 structure describing chip
+ * @reg: UCB1x00 4-bit register index to write
+ *
+ * Read the UCB1x00 register @reg and return its value. The SIB
+ * clock must be running for this function to return.
+ */
+static inline unsigned int ucb1x00_reg_read(struct ucb1x00 *ucb, unsigned int reg)
+{
+ return mcp_reg_read(ucb->mcp, reg);
+}
+/**
+ * ucb1x00_set_audio_divisor -
+ * @ucb: UCB1x00 structure describing chip
+ * @div: SIB clock divisor
+ */
+static inline void ucb1x00_set_audio_divisor(struct ucb1x00 *ucb, unsigned int div)
+{
+ mcp_set_audio_divisor(ucb->mcp, div);
+}
+
+/**
+ * ucb1x00_set_telecom_divisor -
+ * @ucb: UCB1x00 structure describing chip
+ * @div: SIB clock divisor
+ */
+static inline void ucb1x00_set_telecom_divisor(struct ucb1x00 *ucb, unsigned int div)
+{
+ mcp_set_telecom_divisor(ucb->mcp, div);
+}
+
+void ucb1x00_io_set_dir(struct ucb1x00 *ucb, unsigned int, unsigned int);
+void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int, unsigned int);
+unsigned int ucb1x00_io_read(struct ucb1x00 *ucb);
+
+#define UCB_NOSYNC (0)
+#define UCB_SYNC (1)
+
+unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync);
+void ucb1x00_adc_enable(struct ucb1x00 *ucb);
+void ucb1x00_adc_disable(struct ucb1x00 *ucb);
+
+#endif
diff --git a/include/linux/mfd/viperboard.h b/include/linux/mfd/viperboard.h
new file mode 100644
index 000000000..193452848
--- /dev/null
+++ b/include/linux/mfd/viperboard.h
@@ -0,0 +1,110 @@
+/*
+ * include/linux/mfd/viperboard.h
+ *
+ * Nano River Technologies viperboard definitions
+ *
+ * (C) 2012 by Lemonage GmbH
+ * Author: Lars Poeschel <poeschel@lemonage.de>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_VIPERBOARD_H__
+#define __MFD_VIPERBOARD_H__
+
+#include <linux/types.h>
+#include <linux/usb.h>
+
+#define VPRBRD_EP_OUT 0x02
+#define VPRBRD_EP_IN 0x86
+
+#define VPRBRD_I2C_MSG_LEN 512 /* max length of a msg on USB level */
+
+#define VPRBRD_I2C_FREQ_6MHZ 1 /* 6 MBit/s */
+#define VPRBRD_I2C_FREQ_3MHZ 2 /* 3 MBit/s */
+#define VPRBRD_I2C_FREQ_1MHZ 3 /* 1 MBit/s */
+#define VPRBRD_I2C_FREQ_FAST 4 /* 400 kbit/s */
+#define VPRBRD_I2C_FREQ_400KHZ VPRBRD_I2C_FREQ_FAST
+#define VPRBRD_I2C_FREQ_200KHZ 5 /* 200 kbit/s */
+#define VPRBRD_I2C_FREQ_STD 6 /* 100 kbit/s */
+#define VPRBRD_I2C_FREQ_100KHZ VPRBRD_I2C_FREQ_STD
+#define VPRBRD_I2C_FREQ_10KHZ 7 /* 10 kbit/s */
+
+#define VPRBRD_I2C_CMD_WRITE 0x00
+#define VPRBRD_I2C_CMD_READ 0x01
+#define VPRBRD_I2C_CMD_ADDR 0x02
+
+#define VPRBRD_USB_TYPE_OUT 0x40
+#define VPRBRD_USB_TYPE_IN 0xc0
+#define VPRBRD_USB_TIMEOUT_MS 100
+#define VPRBRD_USB_REQUEST_I2C_FREQ 0xe6
+#define VPRBRD_USB_REQUEST_I2C 0xe9
+#define VPRBRD_USB_REQUEST_MAJOR 0xea
+#define VPRBRD_USB_REQUEST_MINOR 0xeb
+#define VPRBRD_USB_REQUEST_ADC 0xec
+#define VPRBRD_USB_REQUEST_GPIOA 0xed
+#define VPRBRD_USB_REQUEST_GPIOB 0xdd
+
+struct vprbrd_i2c_write_hdr {
+ u8 cmd;
+ u16 addr;
+ u8 len1;
+ u8 len2;
+ u8 last;
+ u8 chan;
+ u16 spi;
+} __packed;
+
+struct vprbrd_i2c_read_hdr {
+ u8 cmd;
+ u16 addr;
+ u8 len0;
+ u8 len1;
+ u8 len2;
+ u8 len3;
+ u8 len4;
+ u8 len5;
+ u16 tf1; /* transfer 1 length */
+ u16 tf2; /* transfer 2 length */
+} __packed;
+
+struct vprbrd_i2c_status {
+ u8 unknown[11];
+ u8 status;
+} __packed;
+
+struct vprbrd_i2c_write_msg {
+ struct vprbrd_i2c_write_hdr header;
+ u8 data[VPRBRD_I2C_MSG_LEN
+ - sizeof(struct vprbrd_i2c_write_hdr)];
+} __packed;
+
+struct vprbrd_i2c_read_msg {
+ struct vprbrd_i2c_read_hdr header;
+ u8 data[VPRBRD_I2C_MSG_LEN
+ - sizeof(struct vprbrd_i2c_read_hdr)];
+} __packed;
+
+struct vprbrd_i2c_addr_msg {
+ u8 cmd;
+ u8 addr;
+ u8 unknown1;
+ u16 len;
+ u8 unknown2;
+ u8 unknown3;
+} __packed;
+
+/* Structure to hold all device specific stuff */
+struct vprbrd {
+ struct usb_device *usb_dev; /* the usb device for this device */
+ struct mutex lock;
+ u8 buf[sizeof(struct vprbrd_i2c_write_msg)];
+ struct platform_device pdev;
+};
+
+#endif /* __MFD_VIPERBOARD_H__ */
diff --git a/include/linux/mfd/wl1273-core.h b/include/linux/mfd/wl1273-core.h
new file mode 100644
index 000000000..db2f3f454
--- /dev/null
+++ b/include/linux/mfd/wl1273-core.h
@@ -0,0 +1,290 @@
+/*
+ * include/linux/mfd/wl1273-core.h
+ *
+ * Some definitions for the wl1273 radio receiver/transmitter chip.
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ * Author: Matti J. Aaltonen <matti.j.aaltonen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef WL1273_CORE_H
+#define WL1273_CORE_H
+
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+
+#define WL1273_FM_DRIVER_NAME "wl1273-fm"
+#define RX71_FM_I2C_ADDR 0x22
+
+#define WL1273_STEREO_GET 0
+#define WL1273_RSSI_LVL_GET 1
+#define WL1273_IF_COUNT_GET 2
+#define WL1273_FLAG_GET 3
+#define WL1273_RDS_SYNC_GET 4
+#define WL1273_RDS_DATA_GET 5
+#define WL1273_FREQ_SET 10
+#define WL1273_AF_FREQ_SET 11
+#define WL1273_MOST_MODE_SET 12
+#define WL1273_MOST_BLEND_SET 13
+#define WL1273_DEMPH_MODE_SET 14
+#define WL1273_SEARCH_LVL_SET 15
+#define WL1273_BAND_SET 16
+#define WL1273_MUTE_STATUS_SET 17
+#define WL1273_RDS_PAUSE_LVL_SET 18
+#define WL1273_RDS_PAUSE_DUR_SET 19
+#define WL1273_RDS_MEM_SET 20
+#define WL1273_RDS_BLK_B_SET 21
+#define WL1273_RDS_MSK_B_SET 22
+#define WL1273_RDS_PI_MASK_SET 23
+#define WL1273_RDS_PI_SET 24
+#define WL1273_RDS_SYSTEM_SET 25
+#define WL1273_INT_MASK_SET 26
+#define WL1273_SEARCH_DIR_SET 27
+#define WL1273_VOLUME_SET 28
+#define WL1273_AUDIO_ENABLE 29
+#define WL1273_PCM_MODE_SET 30
+#define WL1273_I2S_MODE_CONFIG_SET 31
+#define WL1273_POWER_SET 32
+#define WL1273_INTX_CONFIG_SET 33
+#define WL1273_PULL_EN_SET 34
+#define WL1273_HILO_SET 35
+#define WL1273_SWITCH2FREF 36
+#define WL1273_FREQ_DRIFT_REPORT 37
+
+#define WL1273_PCE_GET 40
+#define WL1273_FIRM_VER_GET 41
+#define WL1273_ASIC_VER_GET 42
+#define WL1273_ASIC_ID_GET 43
+#define WL1273_MAN_ID_GET 44
+#define WL1273_TUNER_MODE_SET 45
+#define WL1273_STOP_SEARCH 46
+#define WL1273_RDS_CNTRL_SET 47
+
+#define WL1273_WRITE_HARDWARE_REG 100
+#define WL1273_CODE_DOWNLOAD 101
+#define WL1273_RESET 102
+
+#define WL1273_FM_POWER_MODE 254
+#define WL1273_FM_INTERRUPT 255
+
+/* Transmitter API */
+
+#define WL1273_CHANL_SET 55
+#define WL1273_SCAN_SPACING_SET 56
+#define WL1273_REF_SET 57
+#define WL1273_POWER_ENB_SET 90
+#define WL1273_POWER_ATT_SET 58
+#define WL1273_POWER_LEV_SET 59
+#define WL1273_AUDIO_DEV_SET 60
+#define WL1273_PILOT_DEV_SET 61
+#define WL1273_RDS_DEV_SET 62
+#define WL1273_PUPD_SET 91
+#define WL1273_AUDIO_IO_SET 63
+#define WL1273_PREMPH_SET 64
+#define WL1273_MONO_SET 66
+#define WL1273_MUTE 92
+#define WL1273_MPX_LMT_ENABLE 67
+#define WL1273_PI_SET 93
+#define WL1273_ECC_SET 69
+#define WL1273_PTY 70
+#define WL1273_AF 71
+#define WL1273_DISPLAY_MODE 74
+#define WL1273_RDS_REP_SET 77
+#define WL1273_RDS_CONFIG_DATA_SET 98
+#define WL1273_RDS_DATA_SET 99
+#define WL1273_RDS_DATA_ENB 94
+#define WL1273_TA_SET 78
+#define WL1273_TP_SET 79
+#define WL1273_DI_SET 80
+#define WL1273_MS_SET 81
+#define WL1273_PS_SCROLL_SPEED 82
+#define WL1273_TX_AUDIO_LEVEL_TEST 96
+#define WL1273_TX_AUDIO_LEVEL_TEST_THRESHOLD 73
+#define WL1273_TX_AUDIO_INPUT_LEVEL_RANGE_SET 54
+#define WL1273_RX_ANTENNA_SELECT 87
+#define WL1273_I2C_DEV_ADDR_SET 86
+#define WL1273_REF_ERR_CALIB_PARAM_SET 88
+#define WL1273_REF_ERR_CALIB_PERIODICITY_SET 89
+#define WL1273_SOC_INT_TRIGGER 52
+#define WL1273_SOC_AUDIO_PATH_SET 83
+#define WL1273_SOC_PCMI_OVERRIDE 84
+#define WL1273_SOC_I2S_OVERRIDE 85
+#define WL1273_RSSI_BLOCK_SCAN_FREQ_SET 95
+#define WL1273_RSSI_BLOCK_SCAN_START 97
+#define WL1273_RSSI_BLOCK_SCAN_DATA_GET 5
+#define WL1273_READ_FMANT_TUNE_VALUE 104
+
+#define WL1273_RDS_OFF 0
+#define WL1273_RDS_ON 1
+#define WL1273_RDS_RESET 2
+
+#define WL1273_AUDIO_DIGITAL 0
+#define WL1273_AUDIO_ANALOG 1
+
+#define WL1273_MODE_RX BIT(0)
+#define WL1273_MODE_TX BIT(1)
+#define WL1273_MODE_OFF BIT(2)
+#define WL1273_MODE_SUSPENDED BIT(3)
+
+#define WL1273_RADIO_CHILD BIT(0)
+#define WL1273_CODEC_CHILD BIT(1)
+
+#define WL1273_RX_MONO 1
+#define WL1273_RX_STEREO 0
+#define WL1273_TX_MONO 0
+#define WL1273_TX_STEREO 1
+
+#define WL1273_MAX_VOLUME 0xffff
+#define WL1273_DEFAULT_VOLUME 0x78b8
+
+/* I2S protocol, left channel first, data width 16 bits */
+#define WL1273_PCM_DEF_MODE 0x00
+
+/* Rx */
+#define WL1273_AUDIO_ENABLE_I2S BIT(0)
+#define WL1273_AUDIO_ENABLE_ANALOG BIT(1)
+
+/* Tx */
+#define WL1273_AUDIO_IO_SET_ANALOG 0
+#define WL1273_AUDIO_IO_SET_I2S 1
+
+#define WL1273_PUPD_SET_OFF 0x00
+#define WL1273_PUPD_SET_ON 0x01
+#define WL1273_PUPD_SET_RETENTION 0x10
+
+/* I2S mode */
+#define WL1273_IS2_WIDTH_32 0x0
+#define WL1273_IS2_WIDTH_40 0x1
+#define WL1273_IS2_WIDTH_22_23 0x2
+#define WL1273_IS2_WIDTH_23_22 0x3
+#define WL1273_IS2_WIDTH_48 0x4
+#define WL1273_IS2_WIDTH_50 0x5
+#define WL1273_IS2_WIDTH_60 0x6
+#define WL1273_IS2_WIDTH_64 0x7
+#define WL1273_IS2_WIDTH_80 0x8
+#define WL1273_IS2_WIDTH_96 0x9
+#define WL1273_IS2_WIDTH_128 0xa
+#define WL1273_IS2_WIDTH 0xf
+
+#define WL1273_IS2_FORMAT_STD (0x0 << 4)
+#define WL1273_IS2_FORMAT_LEFT (0x1 << 4)
+#define WL1273_IS2_FORMAT_RIGHT (0x2 << 4)
+#define WL1273_IS2_FORMAT_USER (0x3 << 4)
+
+#define WL1273_IS2_MASTER (0x0 << 6)
+#define WL1273_IS2_SLAVEW (0x1 << 6)
+
+#define WL1273_IS2_TRI_AFTER_SENDING (0x0 << 7)
+#define WL1273_IS2_TRI_ALWAYS_ACTIVE (0x1 << 7)
+
+#define WL1273_IS2_SDOWS_RR (0x0 << 8)
+#define WL1273_IS2_SDOWS_RF (0x1 << 8)
+#define WL1273_IS2_SDOWS_FR (0x2 << 8)
+#define WL1273_IS2_SDOWS_FF (0x3 << 8)
+
+#define WL1273_IS2_TRI_OPT (0x0 << 10)
+#define WL1273_IS2_TRI_ALWAYS (0x1 << 10)
+
+#define WL1273_IS2_RATE_48K (0x0 << 12)
+#define WL1273_IS2_RATE_44_1K (0x1 << 12)
+#define WL1273_IS2_RATE_32K (0x2 << 12)
+#define WL1273_IS2_RATE_22_05K (0x4 << 12)
+#define WL1273_IS2_RATE_16K (0x5 << 12)
+#define WL1273_IS2_RATE_12K (0x8 << 12)
+#define WL1273_IS2_RATE_11_025 (0x9 << 12)
+#define WL1273_IS2_RATE_8K (0xa << 12)
+#define WL1273_IS2_RATE (0xf << 12)
+
+#define WL1273_I2S_DEF_MODE (WL1273_IS2_WIDTH_32 | \
+ WL1273_IS2_FORMAT_STD | \
+ WL1273_IS2_MASTER | \
+ WL1273_IS2_TRI_AFTER_SENDING | \
+ WL1273_IS2_SDOWS_RR | \
+ WL1273_IS2_TRI_OPT | \
+ WL1273_IS2_RATE_48K)
+
+#define SCHAR_MIN (-128)
+#define SCHAR_MAX 127
+
+#define WL1273_FR_EVENT BIT(0)
+#define WL1273_BL_EVENT BIT(1)
+#define WL1273_RDS_EVENT BIT(2)
+#define WL1273_BBLK_EVENT BIT(3)
+#define WL1273_LSYNC_EVENT BIT(4)
+#define WL1273_LEV_EVENT BIT(5)
+#define WL1273_IFFR_EVENT BIT(6)
+#define WL1273_PI_EVENT BIT(7)
+#define WL1273_PD_EVENT BIT(8)
+#define WL1273_STIC_EVENT BIT(9)
+#define WL1273_MAL_EVENT BIT(10)
+#define WL1273_POW_ENB_EVENT BIT(11)
+#define WL1273_SCAN_OVER_EVENT BIT(12)
+#define WL1273_ERROR_EVENT BIT(13)
+
+#define TUNER_MODE_STOP_SEARCH 0
+#define TUNER_MODE_PRESET 1
+#define TUNER_MODE_AUTO_SEEK 2
+#define TUNER_MODE_AF 3
+#define TUNER_MODE_AUTO_SEEK_PI 4
+#define TUNER_MODE_AUTO_SEEK_BULK 5
+
+#define RDS_BLOCK_SIZE 3
+
+struct wl1273_fm_platform_data {
+ int (*request_resources) (struct i2c_client *client);
+ void (*free_resources) (void);
+ void (*enable) (void);
+ void (*disable) (void);
+
+ u8 forbidden_modes;
+ unsigned int children;
+};
+
+#define WL1273_FM_CORE_CELLS 2
+
+#define WL1273_BAND_OTHER 0
+#define WL1273_BAND_JAPAN 1
+
+#define WL1273_BAND_JAPAN_LOW 76000
+#define WL1273_BAND_JAPAN_HIGH 90000
+#define WL1273_BAND_OTHER_LOW 87500
+#define WL1273_BAND_OTHER_HIGH 108000
+
+#define WL1273_BAND_TX_LOW 76000
+#define WL1273_BAND_TX_HIGH 108000
+
+struct wl1273_core {
+ struct mfd_cell cells[WL1273_FM_CORE_CELLS];
+ struct wl1273_fm_platform_data *pdata;
+
+ unsigned int mode;
+ unsigned int i2s_mode;
+ unsigned int volume;
+ unsigned int audio_mode;
+ unsigned int channel_number;
+ struct mutex lock; /* for serializing fm radio operations */
+
+ struct i2c_client *client;
+
+ int (*read)(struct wl1273_core *core, u8, u16 *);
+ int (*write)(struct wl1273_core *core, u8, u16);
+ int (*write_data)(struct wl1273_core *core, u8 *, u16);
+ int (*set_audio)(struct wl1273_core *core, unsigned int);
+ int (*set_volume)(struct wl1273_core *core, unsigned int);
+};
+
+#endif /* ifndef WL1273_CORE_H */
diff --git a/include/linux/mfd/wm831x/auxadc.h b/include/linux/mfd/wm831x/auxadc.h
new file mode 100644
index 000000000..867aa23f9
--- /dev/null
+++ b/include/linux/mfd/wm831x/auxadc.h
@@ -0,0 +1,218 @@
+/*
+ * include/linux/mfd/wm831x/auxadc.h -- Auxiliary ADC interface for WM831x
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_WM831X_AUXADC_H__
+#define __MFD_WM831X_AUXADC_H__
+
+struct wm831x;
+
+/*
+ * R16429 (0x402D) - AuxADC Data
+ */
+#define WM831X_AUX_DATA_SRC_MASK 0xF000 /* AUX_DATA_SRC - [15:12] */
+#define WM831X_AUX_DATA_SRC_SHIFT 12 /* AUX_DATA_SRC - [15:12] */
+#define WM831X_AUX_DATA_SRC_WIDTH 4 /* AUX_DATA_SRC - [15:12] */
+#define WM831X_AUX_DATA_MASK 0x0FFF /* AUX_DATA - [11:0] */
+#define WM831X_AUX_DATA_SHIFT 0 /* AUX_DATA - [11:0] */
+#define WM831X_AUX_DATA_WIDTH 12 /* AUX_DATA - [11:0] */
+
+/*
+ * R16430 (0x402E) - AuxADC Control
+ */
+#define WM831X_AUX_ENA 0x8000 /* AUX_ENA */
+#define WM831X_AUX_ENA_MASK 0x8000 /* AUX_ENA */
+#define WM831X_AUX_ENA_SHIFT 15 /* AUX_ENA */
+#define WM831X_AUX_ENA_WIDTH 1 /* AUX_ENA */
+#define WM831X_AUX_CVT_ENA 0x4000 /* AUX_CVT_ENA */
+#define WM831X_AUX_CVT_ENA_MASK 0x4000 /* AUX_CVT_ENA */
+#define WM831X_AUX_CVT_ENA_SHIFT 14 /* AUX_CVT_ENA */
+#define WM831X_AUX_CVT_ENA_WIDTH 1 /* AUX_CVT_ENA */
+#define WM831X_AUX_SLPENA 0x1000 /* AUX_SLPENA */
+#define WM831X_AUX_SLPENA_MASK 0x1000 /* AUX_SLPENA */
+#define WM831X_AUX_SLPENA_SHIFT 12 /* AUX_SLPENA */
+#define WM831X_AUX_SLPENA_WIDTH 1 /* AUX_SLPENA */
+#define WM831X_AUX_FRC_ENA 0x0800 /* AUX_FRC_ENA */
+#define WM831X_AUX_FRC_ENA_MASK 0x0800 /* AUX_FRC_ENA */
+#define WM831X_AUX_FRC_ENA_SHIFT 11 /* AUX_FRC_ENA */
+#define WM831X_AUX_FRC_ENA_WIDTH 1 /* AUX_FRC_ENA */
+#define WM831X_AUX_RATE_MASK 0x003F /* AUX_RATE - [5:0] */
+#define WM831X_AUX_RATE_SHIFT 0 /* AUX_RATE - [5:0] */
+#define WM831X_AUX_RATE_WIDTH 6 /* AUX_RATE - [5:0] */
+
+/*
+ * R16431 (0x402F) - AuxADC Source
+ */
+#define WM831X_AUX_CAL_SEL 0x8000 /* AUX_CAL_SEL */
+#define WM831X_AUX_CAL_SEL_MASK 0x8000 /* AUX_CAL_SEL */
+#define WM831X_AUX_CAL_SEL_SHIFT 15 /* AUX_CAL_SEL */
+#define WM831X_AUX_CAL_SEL_WIDTH 1 /* AUX_CAL_SEL */
+#define WM831X_AUX_BKUP_BATT_SEL 0x0400 /* AUX_BKUP_BATT_SEL */
+#define WM831X_AUX_BKUP_BATT_SEL_MASK 0x0400 /* AUX_BKUP_BATT_SEL */
+#define WM831X_AUX_BKUP_BATT_SEL_SHIFT 10 /* AUX_BKUP_BATT_SEL */
+#define WM831X_AUX_BKUP_BATT_SEL_WIDTH 1 /* AUX_BKUP_BATT_SEL */
+#define WM831X_AUX_WALL_SEL 0x0200 /* AUX_WALL_SEL */
+#define WM831X_AUX_WALL_SEL_MASK 0x0200 /* AUX_WALL_SEL */
+#define WM831X_AUX_WALL_SEL_SHIFT 9 /* AUX_WALL_SEL */
+#define WM831X_AUX_WALL_SEL_WIDTH 1 /* AUX_WALL_SEL */
+#define WM831X_AUX_BATT_SEL 0x0100 /* AUX_BATT_SEL */
+#define WM831X_AUX_BATT_SEL_MASK 0x0100 /* AUX_BATT_SEL */
+#define WM831X_AUX_BATT_SEL_SHIFT 8 /* AUX_BATT_SEL */
+#define WM831X_AUX_BATT_SEL_WIDTH 1 /* AUX_BATT_SEL */
+#define WM831X_AUX_USB_SEL 0x0080 /* AUX_USB_SEL */
+#define WM831X_AUX_USB_SEL_MASK 0x0080 /* AUX_USB_SEL */
+#define WM831X_AUX_USB_SEL_SHIFT 7 /* AUX_USB_SEL */
+#define WM831X_AUX_USB_SEL_WIDTH 1 /* AUX_USB_SEL */
+#define WM831X_AUX_SYSVDD_SEL 0x0040 /* AUX_SYSVDD_SEL */
+#define WM831X_AUX_SYSVDD_SEL_MASK 0x0040 /* AUX_SYSVDD_SEL */
+#define WM831X_AUX_SYSVDD_SEL_SHIFT 6 /* AUX_SYSVDD_SEL */
+#define WM831X_AUX_SYSVDD_SEL_WIDTH 1 /* AUX_SYSVDD_SEL */
+#define WM831X_AUX_BATT_TEMP_SEL 0x0020 /* AUX_BATT_TEMP_SEL */
+#define WM831X_AUX_BATT_TEMP_SEL_MASK 0x0020 /* AUX_BATT_TEMP_SEL */
+#define WM831X_AUX_BATT_TEMP_SEL_SHIFT 5 /* AUX_BATT_TEMP_SEL */
+#define WM831X_AUX_BATT_TEMP_SEL_WIDTH 1 /* AUX_BATT_TEMP_SEL */
+#define WM831X_AUX_CHIP_TEMP_SEL 0x0010 /* AUX_CHIP_TEMP_SEL */
+#define WM831X_AUX_CHIP_TEMP_SEL_MASK 0x0010 /* AUX_CHIP_TEMP_SEL */
+#define WM831X_AUX_CHIP_TEMP_SEL_SHIFT 4 /* AUX_CHIP_TEMP_SEL */
+#define WM831X_AUX_CHIP_TEMP_SEL_WIDTH 1 /* AUX_CHIP_TEMP_SEL */
+#define WM831X_AUX_AUX4_SEL 0x0008 /* AUX_AUX4_SEL */
+#define WM831X_AUX_AUX4_SEL_MASK 0x0008 /* AUX_AUX4_SEL */
+#define WM831X_AUX_AUX4_SEL_SHIFT 3 /* AUX_AUX4_SEL */
+#define WM831X_AUX_AUX4_SEL_WIDTH 1 /* AUX_AUX4_SEL */
+#define WM831X_AUX_AUX3_SEL 0x0004 /* AUX_AUX3_SEL */
+#define WM831X_AUX_AUX3_SEL_MASK 0x0004 /* AUX_AUX3_SEL */
+#define WM831X_AUX_AUX3_SEL_SHIFT 2 /* AUX_AUX3_SEL */
+#define WM831X_AUX_AUX3_SEL_WIDTH 1 /* AUX_AUX3_SEL */
+#define WM831X_AUX_AUX2_SEL 0x0002 /* AUX_AUX2_SEL */
+#define WM831X_AUX_AUX2_SEL_MASK 0x0002 /* AUX_AUX2_SEL */
+#define WM831X_AUX_AUX2_SEL_SHIFT 1 /* AUX_AUX2_SEL */
+#define WM831X_AUX_AUX2_SEL_WIDTH 1 /* AUX_AUX2_SEL */
+#define WM831X_AUX_AUX1_SEL 0x0001 /* AUX_AUX1_SEL */
+#define WM831X_AUX_AUX1_SEL_MASK 0x0001 /* AUX_AUX1_SEL */
+#define WM831X_AUX_AUX1_SEL_SHIFT 0 /* AUX_AUX1_SEL */
+#define WM831X_AUX_AUX1_SEL_WIDTH 1 /* AUX_AUX1_SEL */
+
+/*
+ * R16432 (0x4030) - Comparator Control
+ */
+#define WM831X_DCOMP4_STS 0x0800 /* DCOMP4_STS */
+#define WM831X_DCOMP4_STS_MASK 0x0800 /* DCOMP4_STS */
+#define WM831X_DCOMP4_STS_SHIFT 11 /* DCOMP4_STS */
+#define WM831X_DCOMP4_STS_WIDTH 1 /* DCOMP4_STS */
+#define WM831X_DCOMP3_STS 0x0400 /* DCOMP3_STS */
+#define WM831X_DCOMP3_STS_MASK 0x0400 /* DCOMP3_STS */
+#define WM831X_DCOMP3_STS_SHIFT 10 /* DCOMP3_STS */
+#define WM831X_DCOMP3_STS_WIDTH 1 /* DCOMP3_STS */
+#define WM831X_DCOMP2_STS 0x0200 /* DCOMP2_STS */
+#define WM831X_DCOMP2_STS_MASK 0x0200 /* DCOMP2_STS */
+#define WM831X_DCOMP2_STS_SHIFT 9 /* DCOMP2_STS */
+#define WM831X_DCOMP2_STS_WIDTH 1 /* DCOMP2_STS */
+#define WM831X_DCOMP1_STS 0x0100 /* DCOMP1_STS */
+#define WM831X_DCOMP1_STS_MASK 0x0100 /* DCOMP1_STS */
+#define WM831X_DCOMP1_STS_SHIFT 8 /* DCOMP1_STS */
+#define WM831X_DCOMP1_STS_WIDTH 1 /* DCOMP1_STS */
+#define WM831X_DCMP4_ENA 0x0008 /* DCMP4_ENA */
+#define WM831X_DCMP4_ENA_MASK 0x0008 /* DCMP4_ENA */
+#define WM831X_DCMP4_ENA_SHIFT 3 /* DCMP4_ENA */
+#define WM831X_DCMP4_ENA_WIDTH 1 /* DCMP4_ENA */
+#define WM831X_DCMP3_ENA 0x0004 /* DCMP3_ENA */
+#define WM831X_DCMP3_ENA_MASK 0x0004 /* DCMP3_ENA */
+#define WM831X_DCMP3_ENA_SHIFT 2 /* DCMP3_ENA */
+#define WM831X_DCMP3_ENA_WIDTH 1 /* DCMP3_ENA */
+#define WM831X_DCMP2_ENA 0x0002 /* DCMP2_ENA */
+#define WM831X_DCMP2_ENA_MASK 0x0002 /* DCMP2_ENA */
+#define WM831X_DCMP2_ENA_SHIFT 1 /* DCMP2_ENA */
+#define WM831X_DCMP2_ENA_WIDTH 1 /* DCMP2_ENA */
+#define WM831X_DCMP1_ENA 0x0001 /* DCMP1_ENA */
+#define WM831X_DCMP1_ENA_MASK 0x0001 /* DCMP1_ENA */
+#define WM831X_DCMP1_ENA_SHIFT 0 /* DCMP1_ENA */
+#define WM831X_DCMP1_ENA_WIDTH 1 /* DCMP1_ENA */
+
+/*
+ * R16433 (0x4031) - Comparator 1
+ */
+#define WM831X_DCMP1_SRC_MASK 0xE000 /* DCMP1_SRC - [15:13] */
+#define WM831X_DCMP1_SRC_SHIFT 13 /* DCMP1_SRC - [15:13] */
+#define WM831X_DCMP1_SRC_WIDTH 3 /* DCMP1_SRC - [15:13] */
+#define WM831X_DCMP1_GT 0x1000 /* DCMP1_GT */
+#define WM831X_DCMP1_GT_MASK 0x1000 /* DCMP1_GT */
+#define WM831X_DCMP1_GT_SHIFT 12 /* DCMP1_GT */
+#define WM831X_DCMP1_GT_WIDTH 1 /* DCMP1_GT */
+#define WM831X_DCMP1_THR_MASK 0x0FFF /* DCMP1_THR - [11:0] */
+#define WM831X_DCMP1_THR_SHIFT 0 /* DCMP1_THR - [11:0] */
+#define WM831X_DCMP1_THR_WIDTH 12 /* DCMP1_THR - [11:0] */
+
+/*
+ * R16434 (0x4032) - Comparator 2
+ */
+#define WM831X_DCMP2_SRC_MASK 0xE000 /* DCMP2_SRC - [15:13] */
+#define WM831X_DCMP2_SRC_SHIFT 13 /* DCMP2_SRC - [15:13] */
+#define WM831X_DCMP2_SRC_WIDTH 3 /* DCMP2_SRC - [15:13] */
+#define WM831X_DCMP2_GT 0x1000 /* DCMP2_GT */
+#define WM831X_DCMP2_GT_MASK 0x1000 /* DCMP2_GT */
+#define WM831X_DCMP2_GT_SHIFT 12 /* DCMP2_GT */
+#define WM831X_DCMP2_GT_WIDTH 1 /* DCMP2_GT */
+#define WM831X_DCMP2_THR_MASK 0x0FFF /* DCMP2_THR - [11:0] */
+#define WM831X_DCMP2_THR_SHIFT 0 /* DCMP2_THR - [11:0] */
+#define WM831X_DCMP2_THR_WIDTH 12 /* DCMP2_THR - [11:0] */
+
+/*
+ * R16435 (0x4033) - Comparator 3
+ */
+#define WM831X_DCMP3_SRC_MASK 0xE000 /* DCMP3_SRC - [15:13] */
+#define WM831X_DCMP3_SRC_SHIFT 13 /* DCMP3_SRC - [15:13] */
+#define WM831X_DCMP3_SRC_WIDTH 3 /* DCMP3_SRC - [15:13] */
+#define WM831X_DCMP3_GT 0x1000 /* DCMP3_GT */
+#define WM831X_DCMP3_GT_MASK 0x1000 /* DCMP3_GT */
+#define WM831X_DCMP3_GT_SHIFT 12 /* DCMP3_GT */
+#define WM831X_DCMP3_GT_WIDTH 1 /* DCMP3_GT */
+#define WM831X_DCMP3_THR_MASK 0x0FFF /* DCMP3_THR - [11:0] */
+#define WM831X_DCMP3_THR_SHIFT 0 /* DCMP3_THR - [11:0] */
+#define WM831X_DCMP3_THR_WIDTH 12 /* DCMP3_THR - [11:0] */
+
+/*
+ * R16436 (0x4034) - Comparator 4
+ */
+#define WM831X_DCMP4_SRC_MASK 0xE000 /* DCMP4_SRC - [15:13] */
+#define WM831X_DCMP4_SRC_SHIFT 13 /* DCMP4_SRC - [15:13] */
+#define WM831X_DCMP4_SRC_WIDTH 3 /* DCMP4_SRC - [15:13] */
+#define WM831X_DCMP4_GT 0x1000 /* DCMP4_GT */
+#define WM831X_DCMP4_GT_MASK 0x1000 /* DCMP4_GT */
+#define WM831X_DCMP4_GT_SHIFT 12 /* DCMP4_GT */
+#define WM831X_DCMP4_GT_WIDTH 1 /* DCMP4_GT */
+#define WM831X_DCMP4_THR_MASK 0x0FFF /* DCMP4_THR - [11:0] */
+#define WM831X_DCMP4_THR_SHIFT 0 /* DCMP4_THR - [11:0] */
+#define WM831X_DCMP4_THR_WIDTH 12 /* DCMP4_THR - [11:0] */
+
+#define WM831X_AUX_CAL_FACTOR 0xfff
+#define WM831X_AUX_CAL_NOMINAL 0x222
+
+enum wm831x_auxadc {
+ WM831X_AUX_CAL = 15,
+ WM831X_AUX_BKUP_BATT = 10,
+ WM831X_AUX_WALL = 9,
+ WM831X_AUX_BATT = 8,
+ WM831X_AUX_USB = 7,
+ WM831X_AUX_SYSVDD = 6,
+ WM831X_AUX_BATT_TEMP = 5,
+ WM831X_AUX_CHIP_TEMP = 4,
+ WM831X_AUX_AUX4 = 3,
+ WM831X_AUX_AUX3 = 2,
+ WM831X_AUX_AUX2 = 1,
+ WM831X_AUX_AUX1 = 0,
+};
+
+int wm831x_auxadc_read(struct wm831x *wm831x, enum wm831x_auxadc input);
+int wm831x_auxadc_read_uv(struct wm831x *wm831x, enum wm831x_auxadc input);
+
+#endif
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
new file mode 100644
index 000000000..76c226484
--- /dev/null
+++ b/include/linux/mfd/wm831x/core.h
@@ -0,0 +1,430 @@
+/*
+ * include/linux/mfd/wm831x/core.h -- Core interface for WM831x
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_WM831X_CORE_H__
+#define __MFD_WM831X_CORE_H__
+
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/list.h>
+#include <linux/regmap.h>
+#include <linux/mfd/wm831x/auxadc.h>
+
+/*
+ * Register values.
+ */
+#define WM831X_RESET_ID 0x00
+#define WM831X_REVISION 0x01
+#define WM831X_PARENT_ID 0x4000
+#define WM831X_SYSVDD_CONTROL 0x4001
+#define WM831X_THERMAL_MONITORING 0x4002
+#define WM831X_POWER_STATE 0x4003
+#define WM831X_WATCHDOG 0x4004
+#define WM831X_ON_PIN_CONTROL 0x4005
+#define WM831X_RESET_CONTROL 0x4006
+#define WM831X_CONTROL_INTERFACE 0x4007
+#define WM831X_SECURITY_KEY 0x4008
+#define WM831X_SOFTWARE_SCRATCH 0x4009
+#define WM831X_OTP_CONTROL 0x400A
+#define WM831X_GPIO_LEVEL 0x400C
+#define WM831X_SYSTEM_STATUS 0x400D
+#define WM831X_ON_SOURCE 0x400E
+#define WM831X_OFF_SOURCE 0x400F
+#define WM831X_SYSTEM_INTERRUPTS 0x4010
+#define WM831X_INTERRUPT_STATUS_1 0x4011
+#define WM831X_INTERRUPT_STATUS_2 0x4012
+#define WM831X_INTERRUPT_STATUS_3 0x4013
+#define WM831X_INTERRUPT_STATUS_4 0x4014
+#define WM831X_INTERRUPT_STATUS_5 0x4015
+#define WM831X_IRQ_CONFIG 0x4017
+#define WM831X_SYSTEM_INTERRUPTS_MASK 0x4018
+#define WM831X_INTERRUPT_STATUS_1_MASK 0x4019
+#define WM831X_INTERRUPT_STATUS_2_MASK 0x401A
+#define WM831X_INTERRUPT_STATUS_3_MASK 0x401B
+#define WM831X_INTERRUPT_STATUS_4_MASK 0x401C
+#define WM831X_INTERRUPT_STATUS_5_MASK 0x401D
+#define WM831X_RTC_WRITE_COUNTER 0x4020
+#define WM831X_RTC_TIME_1 0x4021
+#define WM831X_RTC_TIME_2 0x4022
+#define WM831X_RTC_ALARM_1 0x4023
+#define WM831X_RTC_ALARM_2 0x4024
+#define WM831X_RTC_CONTROL 0x4025
+#define WM831X_RTC_TRIM 0x4026
+#define WM831X_TOUCH_CONTROL_1 0x4028
+#define WM831X_TOUCH_CONTROL_2 0x4029
+#define WM831X_TOUCH_DATA_X 0x402A
+#define WM831X_TOUCH_DATA_Y 0x402B
+#define WM831X_TOUCH_DATA_Z 0x402C
+#define WM831X_AUXADC_DATA 0x402D
+#define WM831X_AUXADC_CONTROL 0x402E
+#define WM831X_AUXADC_SOURCE 0x402F
+#define WM831X_COMPARATOR_CONTROL 0x4030
+#define WM831X_COMPARATOR_1 0x4031
+#define WM831X_COMPARATOR_2 0x4032
+#define WM831X_COMPARATOR_3 0x4033
+#define WM831X_COMPARATOR_4 0x4034
+#define WM831X_GPIO1_CONTROL 0x4038
+#define WM831X_GPIO2_CONTROL 0x4039
+#define WM831X_GPIO3_CONTROL 0x403A
+#define WM831X_GPIO4_CONTROL 0x403B
+#define WM831X_GPIO5_CONTROL 0x403C
+#define WM831X_GPIO6_CONTROL 0x403D
+#define WM831X_GPIO7_CONTROL 0x403E
+#define WM831X_GPIO8_CONTROL 0x403F
+#define WM831X_GPIO9_CONTROL 0x4040
+#define WM831X_GPIO10_CONTROL 0x4041
+#define WM831X_GPIO11_CONTROL 0x4042
+#define WM831X_GPIO12_CONTROL 0x4043
+#define WM831X_GPIO13_CONTROL 0x4044
+#define WM831X_GPIO14_CONTROL 0x4045
+#define WM831X_GPIO15_CONTROL 0x4046
+#define WM831X_GPIO16_CONTROL 0x4047
+#define WM831X_CHARGER_CONTROL_1 0x4048
+#define WM831X_CHARGER_CONTROL_2 0x4049
+#define WM831X_CHARGER_STATUS 0x404A
+#define WM831X_BACKUP_CHARGER_CONTROL 0x404B
+#define WM831X_STATUS_LED_1 0x404C
+#define WM831X_STATUS_LED_2 0x404D
+#define WM831X_CURRENT_SINK_1 0x404E
+#define WM831X_CURRENT_SINK_2 0x404F
+#define WM831X_DCDC_ENABLE 0x4050
+#define WM831X_LDO_ENABLE 0x4051
+#define WM831X_DCDC_STATUS 0x4052
+#define WM831X_LDO_STATUS 0x4053
+#define WM831X_DCDC_UV_STATUS 0x4054
+#define WM831X_LDO_UV_STATUS 0x4055
+#define WM831X_DC1_CONTROL_1 0x4056
+#define WM831X_DC1_CONTROL_2 0x4057
+#define WM831X_DC1_ON_CONFIG 0x4058
+#define WM831X_DC1_SLEEP_CONTROL 0x4059
+#define WM831X_DC1_DVS_CONTROL 0x405A
+#define WM831X_DC2_CONTROL_1 0x405B
+#define WM831X_DC2_CONTROL_2 0x405C
+#define WM831X_DC2_ON_CONFIG 0x405D
+#define WM831X_DC2_SLEEP_CONTROL 0x405E
+#define WM831X_DC2_DVS_CONTROL 0x405F
+#define WM831X_DC3_CONTROL_1 0x4060
+#define WM831X_DC3_CONTROL_2 0x4061
+#define WM831X_DC3_ON_CONFIG 0x4062
+#define WM831X_DC3_SLEEP_CONTROL 0x4063
+#define WM831X_DC4_CONTROL 0x4064
+#define WM831X_DC4_SLEEP_CONTROL 0x4065
+#define WM832X_DC4_SLEEP_CONTROL 0x4067
+#define WM831X_EPE1_CONTROL 0x4066
+#define WM831X_EPE2_CONTROL 0x4067
+#define WM831X_LDO1_CONTROL 0x4068
+#define WM831X_LDO1_ON_CONTROL 0x4069
+#define WM831X_LDO1_SLEEP_CONTROL 0x406A
+#define WM831X_LDO2_CONTROL 0x406B
+#define WM831X_LDO2_ON_CONTROL 0x406C
+#define WM831X_LDO2_SLEEP_CONTROL 0x406D
+#define WM831X_LDO3_CONTROL 0x406E
+#define WM831X_LDO3_ON_CONTROL 0x406F
+#define WM831X_LDO3_SLEEP_CONTROL 0x4070
+#define WM831X_LDO4_CONTROL 0x4071
+#define WM831X_LDO4_ON_CONTROL 0x4072
+#define WM831X_LDO4_SLEEP_CONTROL 0x4073
+#define WM831X_LDO5_CONTROL 0x4074
+#define WM831X_LDO5_ON_CONTROL 0x4075
+#define WM831X_LDO5_SLEEP_CONTROL 0x4076
+#define WM831X_LDO6_CONTROL 0x4077
+#define WM831X_LDO6_ON_CONTROL 0x4078
+#define WM831X_LDO6_SLEEP_CONTROL 0x4079
+#define WM831X_LDO7_CONTROL 0x407A
+#define WM831X_LDO7_ON_CONTROL 0x407B
+#define WM831X_LDO7_SLEEP_CONTROL 0x407C
+#define WM831X_LDO8_CONTROL 0x407D
+#define WM831X_LDO8_ON_CONTROL 0x407E
+#define WM831X_LDO8_SLEEP_CONTROL 0x407F
+#define WM831X_LDO9_CONTROL 0x4080
+#define WM831X_LDO9_ON_CONTROL 0x4081
+#define WM831X_LDO9_SLEEP_CONTROL 0x4082
+#define WM831X_LDO10_CONTROL 0x4083
+#define WM831X_LDO10_ON_CONTROL 0x4084
+#define WM831X_LDO10_SLEEP_CONTROL 0x4085
+#define WM831X_LDO11_ON_CONTROL 0x4087
+#define WM831X_LDO11_SLEEP_CONTROL 0x4088
+#define WM831X_POWER_GOOD_SOURCE_1 0x408E
+#define WM831X_POWER_GOOD_SOURCE_2 0x408F
+#define WM831X_CLOCK_CONTROL_1 0x4090
+#define WM831X_CLOCK_CONTROL_2 0x4091
+#define WM831X_FLL_CONTROL_1 0x4092
+#define WM831X_FLL_CONTROL_2 0x4093
+#define WM831X_FLL_CONTROL_3 0x4094
+#define WM831X_FLL_CONTROL_4 0x4095
+#define WM831X_FLL_CONTROL_5 0x4096
+#define WM831X_UNIQUE_ID_1 0x7800
+#define WM831X_UNIQUE_ID_2 0x7801
+#define WM831X_UNIQUE_ID_3 0x7802
+#define WM831X_UNIQUE_ID_4 0x7803
+#define WM831X_UNIQUE_ID_5 0x7804
+#define WM831X_UNIQUE_ID_6 0x7805
+#define WM831X_UNIQUE_ID_7 0x7806
+#define WM831X_UNIQUE_ID_8 0x7807
+#define WM831X_FACTORY_OTP_ID 0x7808
+#define WM831X_FACTORY_OTP_1 0x7809
+#define WM831X_FACTORY_OTP_2 0x780A
+#define WM831X_FACTORY_OTP_3 0x780B
+#define WM831X_FACTORY_OTP_4 0x780C
+#define WM831X_FACTORY_OTP_5 0x780D
+#define WM831X_CUSTOMER_OTP_ID 0x7810
+#define WM831X_DC1_OTP_CONTROL 0x7811
+#define WM831X_DC2_OTP_CONTROL 0x7812
+#define WM831X_DC3_OTP_CONTROL 0x7813
+#define WM831X_LDO1_2_OTP_CONTROL 0x7814
+#define WM831X_LDO3_4_OTP_CONTROL 0x7815
+#define WM831X_LDO5_6_OTP_CONTROL 0x7816
+#define WM831X_LDO7_8_OTP_CONTROL 0x7817
+#define WM831X_LDO9_10_OTP_CONTROL 0x7818
+#define WM831X_LDO11_EPE_CONTROL 0x7819
+#define WM831X_GPIO1_OTP_CONTROL 0x781A
+#define WM831X_GPIO2_OTP_CONTROL 0x781B
+#define WM831X_GPIO3_OTP_CONTROL 0x781C
+#define WM831X_GPIO4_OTP_CONTROL 0x781D
+#define WM831X_GPIO5_OTP_CONTROL 0x781E
+#define WM831X_GPIO6_OTP_CONTROL 0x781F
+#define WM831X_DBE_CHECK_DATA 0x7827
+
+/*
+ * R0 (0x00) - Reset ID
+ */
+#define WM831X_CHIP_ID_MASK 0xFFFF /* CHIP_ID - [15:0] */
+#define WM831X_CHIP_ID_SHIFT 0 /* CHIP_ID - [15:0] */
+#define WM831X_CHIP_ID_WIDTH 16 /* CHIP_ID - [15:0] */
+
+/*
+ * R1 (0x01) - Revision
+ */
+#define WM831X_PARENT_REV_MASK 0xFF00 /* PARENT_REV - [15:8] */
+#define WM831X_PARENT_REV_SHIFT 8 /* PARENT_REV - [15:8] */
+#define WM831X_PARENT_REV_WIDTH 8 /* PARENT_REV - [15:8] */
+#define WM831X_CHILD_REV_MASK 0x00FF /* CHILD_REV - [7:0] */
+#define WM831X_CHILD_REV_SHIFT 0 /* CHILD_REV - [7:0] */
+#define WM831X_CHILD_REV_WIDTH 8 /* CHILD_REV - [7:0] */
+
+/*
+ * R16384 (0x4000) - Parent ID
+ */
+#define WM831X_PARENT_ID_MASK 0xFFFF /* PARENT_ID - [15:0] */
+#define WM831X_PARENT_ID_SHIFT 0 /* PARENT_ID - [15:0] */
+#define WM831X_PARENT_ID_WIDTH 16 /* PARENT_ID - [15:0] */
+
+/*
+ * R16389 (0x4005) - ON Pin Control
+ */
+#define WM831X_ON_PIN_SECACT_MASK 0x0300 /* ON_PIN_SECACT - [9:8] */
+#define WM831X_ON_PIN_SECACT_SHIFT 8 /* ON_PIN_SECACT - [9:8] */
+#define WM831X_ON_PIN_SECACT_WIDTH 2 /* ON_PIN_SECACT - [9:8] */
+#define WM831X_ON_PIN_PRIMACT_MASK 0x0030 /* ON_PIN_PRIMACT - [5:4] */
+#define WM831X_ON_PIN_PRIMACT_SHIFT 4 /* ON_PIN_PRIMACT - [5:4] */
+#define WM831X_ON_PIN_PRIMACT_WIDTH 2 /* ON_PIN_PRIMACT - [5:4] */
+#define WM831X_ON_PIN_STS 0x0008 /* ON_PIN_STS */
+#define WM831X_ON_PIN_STS_MASK 0x0008 /* ON_PIN_STS */
+#define WM831X_ON_PIN_STS_SHIFT 3 /* ON_PIN_STS */
+#define WM831X_ON_PIN_STS_WIDTH 1 /* ON_PIN_STS */
+#define WM831X_ON_PIN_TO_MASK 0x0003 /* ON_PIN_TO - [1:0] */
+#define WM831X_ON_PIN_TO_SHIFT 0 /* ON_PIN_TO - [1:0] */
+#define WM831X_ON_PIN_TO_WIDTH 2 /* ON_PIN_TO - [1:0] */
+
+/*
+ * R16528 (0x4090) - Clock Control 1
+ */
+#define WM831X_CLKOUT_ENA 0x8000 /* CLKOUT_ENA */
+#define WM831X_CLKOUT_ENA_MASK 0x8000 /* CLKOUT_ENA */
+#define WM831X_CLKOUT_ENA_SHIFT 15 /* CLKOUT_ENA */
+#define WM831X_CLKOUT_ENA_WIDTH 1 /* CLKOUT_ENA */
+#define WM831X_CLKOUT_OD 0x2000 /* CLKOUT_OD */
+#define WM831X_CLKOUT_OD_MASK 0x2000 /* CLKOUT_OD */
+#define WM831X_CLKOUT_OD_SHIFT 13 /* CLKOUT_OD */
+#define WM831X_CLKOUT_OD_WIDTH 1 /* CLKOUT_OD */
+#define WM831X_CLKOUT_SLOT_MASK 0x0700 /* CLKOUT_SLOT - [10:8] */
+#define WM831X_CLKOUT_SLOT_SHIFT 8 /* CLKOUT_SLOT - [10:8] */
+#define WM831X_CLKOUT_SLOT_WIDTH 3 /* CLKOUT_SLOT - [10:8] */
+#define WM831X_CLKOUT_SLPSLOT_MASK 0x0070 /* CLKOUT_SLPSLOT - [6:4] */
+#define WM831X_CLKOUT_SLPSLOT_SHIFT 4 /* CLKOUT_SLPSLOT - [6:4] */
+#define WM831X_CLKOUT_SLPSLOT_WIDTH 3 /* CLKOUT_SLPSLOT - [6:4] */
+#define WM831X_CLKOUT_SRC 0x0001 /* CLKOUT_SRC */
+#define WM831X_CLKOUT_SRC_MASK 0x0001 /* CLKOUT_SRC */
+#define WM831X_CLKOUT_SRC_SHIFT 0 /* CLKOUT_SRC */
+#define WM831X_CLKOUT_SRC_WIDTH 1 /* CLKOUT_SRC */
+
+/*
+ * R16529 (0x4091) - Clock Control 2
+ */
+#define WM831X_XTAL_INH 0x8000 /* XTAL_INH */
+#define WM831X_XTAL_INH_MASK 0x8000 /* XTAL_INH */
+#define WM831X_XTAL_INH_SHIFT 15 /* XTAL_INH */
+#define WM831X_XTAL_INH_WIDTH 1 /* XTAL_INH */
+#define WM831X_XTAL_ENA 0x2000 /* XTAL_ENA */
+#define WM831X_XTAL_ENA_MASK 0x2000 /* XTAL_ENA */
+#define WM831X_XTAL_ENA_SHIFT 13 /* XTAL_ENA */
+#define WM831X_XTAL_ENA_WIDTH 1 /* XTAL_ENA */
+#define WM831X_XTAL_BKUPENA 0x1000 /* XTAL_BKUPENA */
+#define WM831X_XTAL_BKUPENA_MASK 0x1000 /* XTAL_BKUPENA */
+#define WM831X_XTAL_BKUPENA_SHIFT 12 /* XTAL_BKUPENA */
+#define WM831X_XTAL_BKUPENA_WIDTH 1 /* XTAL_BKUPENA */
+#define WM831X_FLL_AUTO 0x0080 /* FLL_AUTO */
+#define WM831X_FLL_AUTO_MASK 0x0080 /* FLL_AUTO */
+#define WM831X_FLL_AUTO_SHIFT 7 /* FLL_AUTO */
+#define WM831X_FLL_AUTO_WIDTH 1 /* FLL_AUTO */
+#define WM831X_FLL_AUTO_FREQ_MASK 0x0007 /* FLL_AUTO_FREQ - [2:0] */
+#define WM831X_FLL_AUTO_FREQ_SHIFT 0 /* FLL_AUTO_FREQ - [2:0] */
+#define WM831X_FLL_AUTO_FREQ_WIDTH 3 /* FLL_AUTO_FREQ - [2:0] */
+
+/*
+ * R16530 (0x4092) - FLL Control 1
+ */
+#define WM831X_FLL_FRAC 0x0004 /* FLL_FRAC */
+#define WM831X_FLL_FRAC_MASK 0x0004 /* FLL_FRAC */
+#define WM831X_FLL_FRAC_SHIFT 2 /* FLL_FRAC */
+#define WM831X_FLL_FRAC_WIDTH 1 /* FLL_FRAC */
+#define WM831X_FLL_OSC_ENA 0x0002 /* FLL_OSC_ENA */
+#define WM831X_FLL_OSC_ENA_MASK 0x0002 /* FLL_OSC_ENA */
+#define WM831X_FLL_OSC_ENA_SHIFT 1 /* FLL_OSC_ENA */
+#define WM831X_FLL_OSC_ENA_WIDTH 1 /* FLL_OSC_ENA */
+#define WM831X_FLL_ENA 0x0001 /* FLL_ENA */
+#define WM831X_FLL_ENA_MASK 0x0001 /* FLL_ENA */
+#define WM831X_FLL_ENA_SHIFT 0 /* FLL_ENA */
+#define WM831X_FLL_ENA_WIDTH 1 /* FLL_ENA */
+
+/*
+ * R16531 (0x4093) - FLL Control 2
+ */
+#define WM831X_FLL_OUTDIV_MASK 0x3F00 /* FLL_OUTDIV - [13:8] */
+#define WM831X_FLL_OUTDIV_SHIFT 8 /* FLL_OUTDIV - [13:8] */
+#define WM831X_FLL_OUTDIV_WIDTH 6 /* FLL_OUTDIV - [13:8] */
+#define WM831X_FLL_CTRL_RATE_MASK 0x0070 /* FLL_CTRL_RATE - [6:4] */
+#define WM831X_FLL_CTRL_RATE_SHIFT 4 /* FLL_CTRL_RATE - [6:4] */
+#define WM831X_FLL_CTRL_RATE_WIDTH 3 /* FLL_CTRL_RATE - [6:4] */
+#define WM831X_FLL_FRATIO_MASK 0x0007 /* FLL_FRATIO - [2:0] */
+#define WM831X_FLL_FRATIO_SHIFT 0 /* FLL_FRATIO - [2:0] */
+#define WM831X_FLL_FRATIO_WIDTH 3 /* FLL_FRATIO - [2:0] */
+
+/*
+ * R16532 (0x4094) - FLL Control 3
+ */
+#define WM831X_FLL_K_MASK 0xFFFF /* FLL_K - [15:0] */
+#define WM831X_FLL_K_SHIFT 0 /* FLL_K - [15:0] */
+#define WM831X_FLL_K_WIDTH 16 /* FLL_K - [15:0] */
+
+/*
+ * R16533 (0x4095) - FLL Control 4
+ */
+#define WM831X_FLL_N_MASK 0x7FE0 /* FLL_N - [14:5] */
+#define WM831X_FLL_N_SHIFT 5 /* FLL_N - [14:5] */
+#define WM831X_FLL_N_WIDTH 10 /* FLL_N - [14:5] */
+#define WM831X_FLL_GAIN_MASK 0x000F /* FLL_GAIN - [3:0] */
+#define WM831X_FLL_GAIN_SHIFT 0 /* FLL_GAIN - [3:0] */
+#define WM831X_FLL_GAIN_WIDTH 4 /* FLL_GAIN - [3:0] */
+
+/*
+ * R16534 (0x4096) - FLL Control 5
+ */
+#define WM831X_FLL_CLK_REF_DIV_MASK 0x0018 /* FLL_CLK_REF_DIV - [4:3] */
+#define WM831X_FLL_CLK_REF_DIV_SHIFT 3 /* FLL_CLK_REF_DIV - [4:3] */
+#define WM831X_FLL_CLK_REF_DIV_WIDTH 2 /* FLL_CLK_REF_DIV - [4:3] */
+#define WM831X_FLL_CLK_SRC_MASK 0x0003 /* FLL_CLK_SRC - [1:0] */
+#define WM831X_FLL_CLK_SRC_SHIFT 0 /* FLL_CLK_SRC - [1:0] */
+#define WM831X_FLL_CLK_SRC_WIDTH 2 /* FLL_CLK_SRC - [1:0] */
+
+struct regulator_dev;
+struct irq_domain;
+
+#define WM831X_NUM_IRQ_REGS 5
+#define WM831X_NUM_GPIO_REGS 16
+
+enum wm831x_parent {
+ WM8310 = 0x8310,
+ WM8311 = 0x8311,
+ WM8312 = 0x8312,
+ WM8320 = 0x8320,
+ WM8321 = 0x8321,
+ WM8325 = 0x8325,
+ WM8326 = 0x8326,
+};
+
+struct wm831x;
+
+typedef int (*wm831x_auxadc_read_fn)(struct wm831x *wm831x,
+ enum wm831x_auxadc input);
+
+struct wm831x {
+ struct mutex io_lock;
+
+ struct device *dev;
+
+ struct regmap *regmap;
+
+ int irq; /* Our chip IRQ */
+ struct mutex irq_lock;
+ struct irq_domain *irq_domain;
+ int irq_masks_cur[WM831X_NUM_IRQ_REGS]; /* Currently active value */
+ int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */
+
+ bool soft_shutdown;
+
+ /* Chip revision based flags */
+ unsigned has_gpio_ena:1; /* Has GPIO enable bit */
+ unsigned has_cs_sts:1; /* Has current sink status bit */
+ unsigned charger_irq_wake:1; /* Are charger IRQs a wake source? */
+
+ int num_gpio;
+
+ /* Used by the interrupt controller code to post writes */
+ int gpio_update[WM831X_NUM_GPIO_REGS];
+ bool gpio_level_high[WM831X_NUM_GPIO_REGS];
+ bool gpio_level_low[WM831X_NUM_GPIO_REGS];
+
+ struct mutex auxadc_lock;
+ struct list_head auxadc_pending;
+ u16 auxadc_active;
+ wm831x_auxadc_read_fn auxadc_read;
+
+ /* The WM831x has a security key blocking access to certain
+ * registers. The mutex is taken by the accessors for locking
+ * and unlocking the security key, locked is used to fail
+ * writes if the lock is held.
+ */
+ struct mutex key_lock;
+ unsigned int locked:1;
+};
+
+/* Device I/O API */
+int wm831x_reg_read(struct wm831x *wm831x, unsigned short reg);
+int wm831x_reg_write(struct wm831x *wm831x, unsigned short reg,
+ unsigned short val);
+void wm831x_reg_lock(struct wm831x *wm831x);
+int wm831x_reg_unlock(struct wm831x *wm831x);
+int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg,
+ unsigned short mask, unsigned short val);
+int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg,
+ int count, u16 *buf);
+
+int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq);
+void wm831x_device_exit(struct wm831x *wm831x);
+int wm831x_device_suspend(struct wm831x *wm831x);
+void wm831x_device_shutdown(struct wm831x *wm831x);
+int wm831x_irq_init(struct wm831x *wm831x, int irq);
+void wm831x_irq_exit(struct wm831x *wm831x);
+void wm831x_auxadc_init(struct wm831x *wm831x);
+
+static inline int wm831x_irq(struct wm831x *wm831x, int irq)
+{
+ return irq_create_mapping(wm831x->irq_domain, irq);
+}
+
+extern struct regmap_config wm831x_regmap_config;
+
+#endif
diff --git a/include/linux/mfd/wm831x/gpio.h b/include/linux/mfd/wm831x/gpio.h
new file mode 100644
index 000000000..9b163c588
--- /dev/null
+++ b/include/linux/mfd/wm831x/gpio.h
@@ -0,0 +1,59 @@
+/*
+ * include/linux/mfd/wm831x/gpio.h -- GPIO for WM831x
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_WM831X_GPIO_H__
+#define __MFD_WM831X_GPIO_H__
+
+/*
+ * R16440-16455 (0x4038-0x4047) - GPIOx Control
+ */
+#define WM831X_GPN_DIR 0x8000 /* GPN_DIR */
+#define WM831X_GPN_DIR_MASK 0x8000 /* GPN_DIR */
+#define WM831X_GPN_DIR_SHIFT 15 /* GPN_DIR */
+#define WM831X_GPN_DIR_WIDTH 1 /* GPN_DIR */
+#define WM831X_GPN_PULL_MASK 0x6000 /* GPN_PULL - [14:13] */
+#define WM831X_GPN_PULL_SHIFT 13 /* GPN_PULL - [14:13] */
+#define WM831X_GPN_PULL_WIDTH 2 /* GPN_PULL - [14:13] */
+#define WM831X_GPN_INT_MODE 0x1000 /* GPN_INT_MODE */
+#define WM831X_GPN_INT_MODE_MASK 0x1000 /* GPN_INT_MODE */
+#define WM831X_GPN_INT_MODE_SHIFT 12 /* GPN_INT_MODE */
+#define WM831X_GPN_INT_MODE_WIDTH 1 /* GPN_INT_MODE */
+#define WM831X_GPN_PWR_DOM 0x0800 /* GPN_PWR_DOM */
+#define WM831X_GPN_PWR_DOM_MASK 0x0800 /* GPN_PWR_DOM */
+#define WM831X_GPN_PWR_DOM_SHIFT 11 /* GPN_PWR_DOM */
+#define WM831X_GPN_PWR_DOM_WIDTH 1 /* GPN_PWR_DOM */
+#define WM831X_GPN_POL 0x0400 /* GPN_POL */
+#define WM831X_GPN_POL_MASK 0x0400 /* GPN_POL */
+#define WM831X_GPN_POL_SHIFT 10 /* GPN_POL */
+#define WM831X_GPN_POL_WIDTH 1 /* GPN_POL */
+#define WM831X_GPN_OD 0x0200 /* GPN_OD */
+#define WM831X_GPN_OD_MASK 0x0200 /* GPN_OD */
+#define WM831X_GPN_OD_SHIFT 9 /* GPN_OD */
+#define WM831X_GPN_OD_WIDTH 1 /* GPN_OD */
+#define WM831X_GPN_ENA 0x0080 /* GPN_ENA */
+#define WM831X_GPN_ENA_MASK 0x0080 /* GPN_ENA */
+#define WM831X_GPN_ENA_SHIFT 7 /* GPN_ENA */
+#define WM831X_GPN_ENA_WIDTH 1 /* GPN_ENA */
+#define WM831X_GPN_TRI 0x0080 /* GPN_TRI */
+#define WM831X_GPN_TRI_MASK 0x0080 /* GPN_TRI */
+#define WM831X_GPN_TRI_SHIFT 7 /* GPN_TRI */
+#define WM831X_GPN_TRI_WIDTH 1 /* GPN_TRI */
+#define WM831X_GPN_FN_MASK 0x000F /* GPN_FN - [3:0] */
+#define WM831X_GPN_FN_SHIFT 0 /* GPN_FN - [3:0] */
+#define WM831X_GPN_FN_WIDTH 4 /* GPN_FN - [3:0] */
+
+#define WM831X_GPIO_PULL_NONE (0 << WM831X_GPN_PULL_SHIFT)
+#define WM831X_GPIO_PULL_DOWN (1 << WM831X_GPN_PULL_SHIFT)
+#define WM831X_GPIO_PULL_UP (2 << WM831X_GPN_PULL_SHIFT)
+#endif
diff --git a/include/linux/mfd/wm831x/irq.h b/include/linux/mfd/wm831x/irq.h
new file mode 100644
index 000000000..3a8c97656
--- /dev/null
+++ b/include/linux/mfd/wm831x/irq.h
@@ -0,0 +1,764 @@
+/*
+ * include/linux/mfd/wm831x/irq.h -- Interrupt controller for WM831x
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_WM831X_IRQ_H__
+#define __MFD_WM831X_IRQ_H__
+
+/* Interrupt number assignments within Linux */
+#define WM831X_IRQ_TEMP_THW 0
+#define WM831X_IRQ_GPIO_1 1
+#define WM831X_IRQ_GPIO_2 2
+#define WM831X_IRQ_GPIO_3 3
+#define WM831X_IRQ_GPIO_4 4
+#define WM831X_IRQ_GPIO_5 5
+#define WM831X_IRQ_GPIO_6 6
+#define WM831X_IRQ_GPIO_7 7
+#define WM831X_IRQ_GPIO_8 8
+#define WM831X_IRQ_GPIO_9 9
+#define WM831X_IRQ_GPIO_10 10
+#define WM831X_IRQ_GPIO_11 11
+#define WM831X_IRQ_GPIO_12 12
+#define WM831X_IRQ_GPIO_13 13
+#define WM831X_IRQ_GPIO_14 14
+#define WM831X_IRQ_GPIO_15 15
+#define WM831X_IRQ_GPIO_16 16
+#define WM831X_IRQ_ON 17
+#define WM831X_IRQ_PPM_SYSLO 18
+#define WM831X_IRQ_PPM_PWR_SRC 19
+#define WM831X_IRQ_PPM_USB_CURR 20
+#define WM831X_IRQ_WDOG_TO 21
+#define WM831X_IRQ_RTC_PER 22
+#define WM831X_IRQ_RTC_ALM 23
+#define WM831X_IRQ_CHG_BATT_HOT 24
+#define WM831X_IRQ_CHG_BATT_COLD 25
+#define WM831X_IRQ_CHG_BATT_FAIL 26
+#define WM831X_IRQ_CHG_OV 27
+#define WM831X_IRQ_CHG_END 29
+#define WM831X_IRQ_CHG_TO 30
+#define WM831X_IRQ_CHG_MODE 31
+#define WM831X_IRQ_CHG_START 32
+#define WM831X_IRQ_TCHDATA 33
+#define WM831X_IRQ_TCHPD 34
+#define WM831X_IRQ_AUXADC_DATA 35
+#define WM831X_IRQ_AUXADC_DCOMP1 36
+#define WM831X_IRQ_AUXADC_DCOMP2 37
+#define WM831X_IRQ_AUXADC_DCOMP3 38
+#define WM831X_IRQ_AUXADC_DCOMP4 39
+#define WM831X_IRQ_CS1 40
+#define WM831X_IRQ_CS2 41
+#define WM831X_IRQ_HC_DC1 42
+#define WM831X_IRQ_HC_DC2 43
+#define WM831X_IRQ_UV_LDO1 44
+#define WM831X_IRQ_UV_LDO2 45
+#define WM831X_IRQ_UV_LDO3 46
+#define WM831X_IRQ_UV_LDO4 47
+#define WM831X_IRQ_UV_LDO5 48
+#define WM831X_IRQ_UV_LDO6 49
+#define WM831X_IRQ_UV_LDO7 50
+#define WM831X_IRQ_UV_LDO8 51
+#define WM831X_IRQ_UV_LDO9 52
+#define WM831X_IRQ_UV_LDO10 53
+#define WM831X_IRQ_UV_DC1 54
+#define WM831X_IRQ_UV_DC2 55
+#define WM831X_IRQ_UV_DC3 56
+#define WM831X_IRQ_UV_DC4 57
+
+#define WM831X_NUM_IRQS 58
+
+/*
+ * R16400 (0x4010) - System Interrupts
+ */
+#define WM831X_PS_INT 0x8000 /* PS_INT */
+#define WM831X_PS_INT_MASK 0x8000 /* PS_INT */
+#define WM831X_PS_INT_SHIFT 15 /* PS_INT */
+#define WM831X_PS_INT_WIDTH 1 /* PS_INT */
+#define WM831X_TEMP_INT 0x4000 /* TEMP_INT */
+#define WM831X_TEMP_INT_MASK 0x4000 /* TEMP_INT */
+#define WM831X_TEMP_INT_SHIFT 14 /* TEMP_INT */
+#define WM831X_TEMP_INT_WIDTH 1 /* TEMP_INT */
+#define WM831X_GP_INT 0x2000 /* GP_INT */
+#define WM831X_GP_INT_MASK 0x2000 /* GP_INT */
+#define WM831X_GP_INT_SHIFT 13 /* GP_INT */
+#define WM831X_GP_INT_WIDTH 1 /* GP_INT */
+#define WM831X_ON_PIN_INT 0x1000 /* ON_PIN_INT */
+#define WM831X_ON_PIN_INT_MASK 0x1000 /* ON_PIN_INT */
+#define WM831X_ON_PIN_INT_SHIFT 12 /* ON_PIN_INT */
+#define WM831X_ON_PIN_INT_WIDTH 1 /* ON_PIN_INT */
+#define WM831X_WDOG_INT 0x0800 /* WDOG_INT */
+#define WM831X_WDOG_INT_MASK 0x0800 /* WDOG_INT */
+#define WM831X_WDOG_INT_SHIFT 11 /* WDOG_INT */
+#define WM831X_WDOG_INT_WIDTH 1 /* WDOG_INT */
+#define WM831X_TCHDATA_INT 0x0400 /* TCHDATA_INT */
+#define WM831X_TCHDATA_INT_MASK 0x0400 /* TCHDATA_INT */
+#define WM831X_TCHDATA_INT_SHIFT 10 /* TCHDATA_INT */
+#define WM831X_TCHDATA_INT_WIDTH 1 /* TCHDATA_INT */
+#define WM831X_TCHPD_INT 0x0200 /* TCHPD_INT */
+#define WM831X_TCHPD_INT_MASK 0x0200 /* TCHPD_INT */
+#define WM831X_TCHPD_INT_SHIFT 9 /* TCHPD_INT */
+#define WM831X_TCHPD_INT_WIDTH 1 /* TCHPD_INT */
+#define WM831X_AUXADC_INT 0x0100 /* AUXADC_INT */
+#define WM831X_AUXADC_INT_MASK 0x0100 /* AUXADC_INT */
+#define WM831X_AUXADC_INT_SHIFT 8 /* AUXADC_INT */
+#define WM831X_AUXADC_INT_WIDTH 1 /* AUXADC_INT */
+#define WM831X_PPM_INT 0x0080 /* PPM_INT */
+#define WM831X_PPM_INT_MASK 0x0080 /* PPM_INT */
+#define WM831X_PPM_INT_SHIFT 7 /* PPM_INT */
+#define WM831X_PPM_INT_WIDTH 1 /* PPM_INT */
+#define WM831X_CS_INT 0x0040 /* CS_INT */
+#define WM831X_CS_INT_MASK 0x0040 /* CS_INT */
+#define WM831X_CS_INT_SHIFT 6 /* CS_INT */
+#define WM831X_CS_INT_WIDTH 1 /* CS_INT */
+#define WM831X_RTC_INT 0x0020 /* RTC_INT */
+#define WM831X_RTC_INT_MASK 0x0020 /* RTC_INT */
+#define WM831X_RTC_INT_SHIFT 5 /* RTC_INT */
+#define WM831X_RTC_INT_WIDTH 1 /* RTC_INT */
+#define WM831X_OTP_INT 0x0010 /* OTP_INT */
+#define WM831X_OTP_INT_MASK 0x0010 /* OTP_INT */
+#define WM831X_OTP_INT_SHIFT 4 /* OTP_INT */
+#define WM831X_OTP_INT_WIDTH 1 /* OTP_INT */
+#define WM831X_CHILD_INT 0x0008 /* CHILD_INT */
+#define WM831X_CHILD_INT_MASK 0x0008 /* CHILD_INT */
+#define WM831X_CHILD_INT_SHIFT 3 /* CHILD_INT */
+#define WM831X_CHILD_INT_WIDTH 1 /* CHILD_INT */
+#define WM831X_CHG_INT 0x0004 /* CHG_INT */
+#define WM831X_CHG_INT_MASK 0x0004 /* CHG_INT */
+#define WM831X_CHG_INT_SHIFT 2 /* CHG_INT */
+#define WM831X_CHG_INT_WIDTH 1 /* CHG_INT */
+#define WM831X_HC_INT 0x0002 /* HC_INT */
+#define WM831X_HC_INT_MASK 0x0002 /* HC_INT */
+#define WM831X_HC_INT_SHIFT 1 /* HC_INT */
+#define WM831X_HC_INT_WIDTH 1 /* HC_INT */
+#define WM831X_UV_INT 0x0001 /* UV_INT */
+#define WM831X_UV_INT_MASK 0x0001 /* UV_INT */
+#define WM831X_UV_INT_SHIFT 0 /* UV_INT */
+#define WM831X_UV_INT_WIDTH 1 /* UV_INT */
+
+/*
+ * R16401 (0x4011) - Interrupt Status 1
+ */
+#define WM831X_PPM_SYSLO_EINT 0x8000 /* PPM_SYSLO_EINT */
+#define WM831X_PPM_SYSLO_EINT_MASK 0x8000 /* PPM_SYSLO_EINT */
+#define WM831X_PPM_SYSLO_EINT_SHIFT 15 /* PPM_SYSLO_EINT */
+#define WM831X_PPM_SYSLO_EINT_WIDTH 1 /* PPM_SYSLO_EINT */
+#define WM831X_PPM_PWR_SRC_EINT 0x4000 /* PPM_PWR_SRC_EINT */
+#define WM831X_PPM_PWR_SRC_EINT_MASK 0x4000 /* PPM_PWR_SRC_EINT */
+#define WM831X_PPM_PWR_SRC_EINT_SHIFT 14 /* PPM_PWR_SRC_EINT */
+#define WM831X_PPM_PWR_SRC_EINT_WIDTH 1 /* PPM_PWR_SRC_EINT */
+#define WM831X_PPM_USB_CURR_EINT 0x2000 /* PPM_USB_CURR_EINT */
+#define WM831X_PPM_USB_CURR_EINT_MASK 0x2000 /* PPM_USB_CURR_EINT */
+#define WM831X_PPM_USB_CURR_EINT_SHIFT 13 /* PPM_USB_CURR_EINT */
+#define WM831X_PPM_USB_CURR_EINT_WIDTH 1 /* PPM_USB_CURR_EINT */
+#define WM831X_ON_PIN_EINT 0x1000 /* ON_PIN_EINT */
+#define WM831X_ON_PIN_EINT_MASK 0x1000 /* ON_PIN_EINT */
+#define WM831X_ON_PIN_EINT_SHIFT 12 /* ON_PIN_EINT */
+#define WM831X_ON_PIN_EINT_WIDTH 1 /* ON_PIN_EINT */
+#define WM831X_WDOG_TO_EINT 0x0800 /* WDOG_TO_EINT */
+#define WM831X_WDOG_TO_EINT_MASK 0x0800 /* WDOG_TO_EINT */
+#define WM831X_WDOG_TO_EINT_SHIFT 11 /* WDOG_TO_EINT */
+#define WM831X_WDOG_TO_EINT_WIDTH 1 /* WDOG_TO_EINT */
+#define WM831X_TCHDATA_EINT 0x0400 /* TCHDATA_EINT */
+#define WM831X_TCHDATA_EINT_MASK 0x0400 /* TCHDATA_EINT */
+#define WM831X_TCHDATA_EINT_SHIFT 10 /* TCHDATA_EINT */
+#define WM831X_TCHDATA_EINT_WIDTH 1 /* TCHDATA_EINT */
+#define WM831X_TCHPD_EINT 0x0200 /* TCHPD_EINT */
+#define WM831X_TCHPD_EINT_MASK 0x0200 /* TCHPD_EINT */
+#define WM831X_TCHPD_EINT_SHIFT 9 /* TCHPD_EINT */
+#define WM831X_TCHPD_EINT_WIDTH 1 /* TCHPD_EINT */
+#define WM831X_AUXADC_DATA_EINT 0x0100 /* AUXADC_DATA_EINT */
+#define WM831X_AUXADC_DATA_EINT_MASK 0x0100 /* AUXADC_DATA_EINT */
+#define WM831X_AUXADC_DATA_EINT_SHIFT 8 /* AUXADC_DATA_EINT */
+#define WM831X_AUXADC_DATA_EINT_WIDTH 1 /* AUXADC_DATA_EINT */
+#define WM831X_AUXADC_DCOMP4_EINT 0x0080 /* AUXADC_DCOMP4_EINT */
+#define WM831X_AUXADC_DCOMP4_EINT_MASK 0x0080 /* AUXADC_DCOMP4_EINT */
+#define WM831X_AUXADC_DCOMP4_EINT_SHIFT 7 /* AUXADC_DCOMP4_EINT */
+#define WM831X_AUXADC_DCOMP4_EINT_WIDTH 1 /* AUXADC_DCOMP4_EINT */
+#define WM831X_AUXADC_DCOMP3_EINT 0x0040 /* AUXADC_DCOMP3_EINT */
+#define WM831X_AUXADC_DCOMP3_EINT_MASK 0x0040 /* AUXADC_DCOMP3_EINT */
+#define WM831X_AUXADC_DCOMP3_EINT_SHIFT 6 /* AUXADC_DCOMP3_EINT */
+#define WM831X_AUXADC_DCOMP3_EINT_WIDTH 1 /* AUXADC_DCOMP3_EINT */
+#define WM831X_AUXADC_DCOMP2_EINT 0x0020 /* AUXADC_DCOMP2_EINT */
+#define WM831X_AUXADC_DCOMP2_EINT_MASK 0x0020 /* AUXADC_DCOMP2_EINT */
+#define WM831X_AUXADC_DCOMP2_EINT_SHIFT 5 /* AUXADC_DCOMP2_EINT */
+#define WM831X_AUXADC_DCOMP2_EINT_WIDTH 1 /* AUXADC_DCOMP2_EINT */
+#define WM831X_AUXADC_DCOMP1_EINT 0x0010 /* AUXADC_DCOMP1_EINT */
+#define WM831X_AUXADC_DCOMP1_EINT_MASK 0x0010 /* AUXADC_DCOMP1_EINT */
+#define WM831X_AUXADC_DCOMP1_EINT_SHIFT 4 /* AUXADC_DCOMP1_EINT */
+#define WM831X_AUXADC_DCOMP1_EINT_WIDTH 1 /* AUXADC_DCOMP1_EINT */
+#define WM831X_RTC_PER_EINT 0x0008 /* RTC_PER_EINT */
+#define WM831X_RTC_PER_EINT_MASK 0x0008 /* RTC_PER_EINT */
+#define WM831X_RTC_PER_EINT_SHIFT 3 /* RTC_PER_EINT */
+#define WM831X_RTC_PER_EINT_WIDTH 1 /* RTC_PER_EINT */
+#define WM831X_RTC_ALM_EINT 0x0004 /* RTC_ALM_EINT */
+#define WM831X_RTC_ALM_EINT_MASK 0x0004 /* RTC_ALM_EINT */
+#define WM831X_RTC_ALM_EINT_SHIFT 2 /* RTC_ALM_EINT */
+#define WM831X_RTC_ALM_EINT_WIDTH 1 /* RTC_ALM_EINT */
+#define WM831X_TEMP_THW_EINT 0x0002 /* TEMP_THW_EINT */
+#define WM831X_TEMP_THW_EINT_MASK 0x0002 /* TEMP_THW_EINT */
+#define WM831X_TEMP_THW_EINT_SHIFT 1 /* TEMP_THW_EINT */
+#define WM831X_TEMP_THW_EINT_WIDTH 1 /* TEMP_THW_EINT */
+
+/*
+ * R16402 (0x4012) - Interrupt Status 2
+ */
+#define WM831X_CHG_BATT_HOT_EINT 0x8000 /* CHG_BATT_HOT_EINT */
+#define WM831X_CHG_BATT_HOT_EINT_MASK 0x8000 /* CHG_BATT_HOT_EINT */
+#define WM831X_CHG_BATT_HOT_EINT_SHIFT 15 /* CHG_BATT_HOT_EINT */
+#define WM831X_CHG_BATT_HOT_EINT_WIDTH 1 /* CHG_BATT_HOT_EINT */
+#define WM831X_CHG_BATT_COLD_EINT 0x4000 /* CHG_BATT_COLD_EINT */
+#define WM831X_CHG_BATT_COLD_EINT_MASK 0x4000 /* CHG_BATT_COLD_EINT */
+#define WM831X_CHG_BATT_COLD_EINT_SHIFT 14 /* CHG_BATT_COLD_EINT */
+#define WM831X_CHG_BATT_COLD_EINT_WIDTH 1 /* CHG_BATT_COLD_EINT */
+#define WM831X_CHG_BATT_FAIL_EINT 0x2000 /* CHG_BATT_FAIL_EINT */
+#define WM831X_CHG_BATT_FAIL_EINT_MASK 0x2000 /* CHG_BATT_FAIL_EINT */
+#define WM831X_CHG_BATT_FAIL_EINT_SHIFT 13 /* CHG_BATT_FAIL_EINT */
+#define WM831X_CHG_BATT_FAIL_EINT_WIDTH 1 /* CHG_BATT_FAIL_EINT */
+#define WM831X_CHG_OV_EINT 0x1000 /* CHG_OV_EINT */
+#define WM831X_CHG_OV_EINT_MASK 0x1000 /* CHG_OV_EINT */
+#define WM831X_CHG_OV_EINT_SHIFT 12 /* CHG_OV_EINT */
+#define WM831X_CHG_OV_EINT_WIDTH 1 /* CHG_OV_EINT */
+#define WM831X_CHG_END_EINT 0x0800 /* CHG_END_EINT */
+#define WM831X_CHG_END_EINT_MASK 0x0800 /* CHG_END_EINT */
+#define WM831X_CHG_END_EINT_SHIFT 11 /* CHG_END_EINT */
+#define WM831X_CHG_END_EINT_WIDTH 1 /* CHG_END_EINT */
+#define WM831X_CHG_TO_EINT 0x0400 /* CHG_TO_EINT */
+#define WM831X_CHG_TO_EINT_MASK 0x0400 /* CHG_TO_EINT */
+#define WM831X_CHG_TO_EINT_SHIFT 10 /* CHG_TO_EINT */
+#define WM831X_CHG_TO_EINT_WIDTH 1 /* CHG_TO_EINT */
+#define WM831X_CHG_MODE_EINT 0x0200 /* CHG_MODE_EINT */
+#define WM831X_CHG_MODE_EINT_MASK 0x0200 /* CHG_MODE_EINT */
+#define WM831X_CHG_MODE_EINT_SHIFT 9 /* CHG_MODE_EINT */
+#define WM831X_CHG_MODE_EINT_WIDTH 1 /* CHG_MODE_EINT */
+#define WM831X_CHG_START_EINT 0x0100 /* CHG_START_EINT */
+#define WM831X_CHG_START_EINT_MASK 0x0100 /* CHG_START_EINT */
+#define WM831X_CHG_START_EINT_SHIFT 8 /* CHG_START_EINT */
+#define WM831X_CHG_START_EINT_WIDTH 1 /* CHG_START_EINT */
+#define WM831X_CS2_EINT 0x0080 /* CS2_EINT */
+#define WM831X_CS2_EINT_MASK 0x0080 /* CS2_EINT */
+#define WM831X_CS2_EINT_SHIFT 7 /* CS2_EINT */
+#define WM831X_CS2_EINT_WIDTH 1 /* CS2_EINT */
+#define WM831X_CS1_EINT 0x0040 /* CS1_EINT */
+#define WM831X_CS1_EINT_MASK 0x0040 /* CS1_EINT */
+#define WM831X_CS1_EINT_SHIFT 6 /* CS1_EINT */
+#define WM831X_CS1_EINT_WIDTH 1 /* CS1_EINT */
+#define WM831X_OTP_CMD_END_EINT 0x0020 /* OTP_CMD_END_EINT */
+#define WM831X_OTP_CMD_END_EINT_MASK 0x0020 /* OTP_CMD_END_EINT */
+#define WM831X_OTP_CMD_END_EINT_SHIFT 5 /* OTP_CMD_END_EINT */
+#define WM831X_OTP_CMD_END_EINT_WIDTH 1 /* OTP_CMD_END_EINT */
+#define WM831X_OTP_ERR_EINT 0x0010 /* OTP_ERR_EINT */
+#define WM831X_OTP_ERR_EINT_MASK 0x0010 /* OTP_ERR_EINT */
+#define WM831X_OTP_ERR_EINT_SHIFT 4 /* OTP_ERR_EINT */
+#define WM831X_OTP_ERR_EINT_WIDTH 1 /* OTP_ERR_EINT */
+#define WM831X_PS_POR_EINT 0x0004 /* PS_POR_EINT */
+#define WM831X_PS_POR_EINT_MASK 0x0004 /* PS_POR_EINT */
+#define WM831X_PS_POR_EINT_SHIFT 2 /* PS_POR_EINT */
+#define WM831X_PS_POR_EINT_WIDTH 1 /* PS_POR_EINT */
+#define WM831X_PS_SLEEP_OFF_EINT 0x0002 /* PS_SLEEP_OFF_EINT */
+#define WM831X_PS_SLEEP_OFF_EINT_MASK 0x0002 /* PS_SLEEP_OFF_EINT */
+#define WM831X_PS_SLEEP_OFF_EINT_SHIFT 1 /* PS_SLEEP_OFF_EINT */
+#define WM831X_PS_SLEEP_OFF_EINT_WIDTH 1 /* PS_SLEEP_OFF_EINT */
+#define WM831X_PS_ON_WAKE_EINT 0x0001 /* PS_ON_WAKE_EINT */
+#define WM831X_PS_ON_WAKE_EINT_MASK 0x0001 /* PS_ON_WAKE_EINT */
+#define WM831X_PS_ON_WAKE_EINT_SHIFT 0 /* PS_ON_WAKE_EINT */
+#define WM831X_PS_ON_WAKE_EINT_WIDTH 1 /* PS_ON_WAKE_EINT */
+
+/*
+ * R16403 (0x4013) - Interrupt Status 3
+ */
+#define WM831X_UV_LDO10_EINT 0x0200 /* UV_LDO10_EINT */
+#define WM831X_UV_LDO10_EINT_MASK 0x0200 /* UV_LDO10_EINT */
+#define WM831X_UV_LDO10_EINT_SHIFT 9 /* UV_LDO10_EINT */
+#define WM831X_UV_LDO10_EINT_WIDTH 1 /* UV_LDO10_EINT */
+#define WM831X_UV_LDO9_EINT 0x0100 /* UV_LDO9_EINT */
+#define WM831X_UV_LDO9_EINT_MASK 0x0100 /* UV_LDO9_EINT */
+#define WM831X_UV_LDO9_EINT_SHIFT 8 /* UV_LDO9_EINT */
+#define WM831X_UV_LDO9_EINT_WIDTH 1 /* UV_LDO9_EINT */
+#define WM831X_UV_LDO8_EINT 0x0080 /* UV_LDO8_EINT */
+#define WM831X_UV_LDO8_EINT_MASK 0x0080 /* UV_LDO8_EINT */
+#define WM831X_UV_LDO8_EINT_SHIFT 7 /* UV_LDO8_EINT */
+#define WM831X_UV_LDO8_EINT_WIDTH 1 /* UV_LDO8_EINT */
+#define WM831X_UV_LDO7_EINT 0x0040 /* UV_LDO7_EINT */
+#define WM831X_UV_LDO7_EINT_MASK 0x0040 /* UV_LDO7_EINT */
+#define WM831X_UV_LDO7_EINT_SHIFT 6 /* UV_LDO7_EINT */
+#define WM831X_UV_LDO7_EINT_WIDTH 1 /* UV_LDO7_EINT */
+#define WM831X_UV_LDO6_EINT 0x0020 /* UV_LDO6_EINT */
+#define WM831X_UV_LDO6_EINT_MASK 0x0020 /* UV_LDO6_EINT */
+#define WM831X_UV_LDO6_EINT_SHIFT 5 /* UV_LDO6_EINT */
+#define WM831X_UV_LDO6_EINT_WIDTH 1 /* UV_LDO6_EINT */
+#define WM831X_UV_LDO5_EINT 0x0010 /* UV_LDO5_EINT */
+#define WM831X_UV_LDO5_EINT_MASK 0x0010 /* UV_LDO5_EINT */
+#define WM831X_UV_LDO5_EINT_SHIFT 4 /* UV_LDO5_EINT */
+#define WM831X_UV_LDO5_EINT_WIDTH 1 /* UV_LDO5_EINT */
+#define WM831X_UV_LDO4_EINT 0x0008 /* UV_LDO4_EINT */
+#define WM831X_UV_LDO4_EINT_MASK 0x0008 /* UV_LDO4_EINT */
+#define WM831X_UV_LDO4_EINT_SHIFT 3 /* UV_LDO4_EINT */
+#define WM831X_UV_LDO4_EINT_WIDTH 1 /* UV_LDO4_EINT */
+#define WM831X_UV_LDO3_EINT 0x0004 /* UV_LDO3_EINT */
+#define WM831X_UV_LDO3_EINT_MASK 0x0004 /* UV_LDO3_EINT */
+#define WM831X_UV_LDO3_EINT_SHIFT 2 /* UV_LDO3_EINT */
+#define WM831X_UV_LDO3_EINT_WIDTH 1 /* UV_LDO3_EINT */
+#define WM831X_UV_LDO2_EINT 0x0002 /* UV_LDO2_EINT */
+#define WM831X_UV_LDO2_EINT_MASK 0x0002 /* UV_LDO2_EINT */
+#define WM831X_UV_LDO2_EINT_SHIFT 1 /* UV_LDO2_EINT */
+#define WM831X_UV_LDO2_EINT_WIDTH 1 /* UV_LDO2_EINT */
+#define WM831X_UV_LDO1_EINT 0x0001 /* UV_LDO1_EINT */
+#define WM831X_UV_LDO1_EINT_MASK 0x0001 /* UV_LDO1_EINT */
+#define WM831X_UV_LDO1_EINT_SHIFT 0 /* UV_LDO1_EINT */
+#define WM831X_UV_LDO1_EINT_WIDTH 1 /* UV_LDO1_EINT */
+
+/*
+ * R16404 (0x4014) - Interrupt Status 4
+ */
+#define WM831X_HC_DC2_EINT 0x0200 /* HC_DC2_EINT */
+#define WM831X_HC_DC2_EINT_MASK 0x0200 /* HC_DC2_EINT */
+#define WM831X_HC_DC2_EINT_SHIFT 9 /* HC_DC2_EINT */
+#define WM831X_HC_DC2_EINT_WIDTH 1 /* HC_DC2_EINT */
+#define WM831X_HC_DC1_EINT 0x0100 /* HC_DC1_EINT */
+#define WM831X_HC_DC1_EINT_MASK 0x0100 /* HC_DC1_EINT */
+#define WM831X_HC_DC1_EINT_SHIFT 8 /* HC_DC1_EINT */
+#define WM831X_HC_DC1_EINT_WIDTH 1 /* HC_DC1_EINT */
+#define WM831X_UV_DC4_EINT 0x0008 /* UV_DC4_EINT */
+#define WM831X_UV_DC4_EINT_MASK 0x0008 /* UV_DC4_EINT */
+#define WM831X_UV_DC4_EINT_SHIFT 3 /* UV_DC4_EINT */
+#define WM831X_UV_DC4_EINT_WIDTH 1 /* UV_DC4_EINT */
+#define WM831X_UV_DC3_EINT 0x0004 /* UV_DC3_EINT */
+#define WM831X_UV_DC3_EINT_MASK 0x0004 /* UV_DC3_EINT */
+#define WM831X_UV_DC3_EINT_SHIFT 2 /* UV_DC3_EINT */
+#define WM831X_UV_DC3_EINT_WIDTH 1 /* UV_DC3_EINT */
+#define WM831X_UV_DC2_EINT 0x0002 /* UV_DC2_EINT */
+#define WM831X_UV_DC2_EINT_MASK 0x0002 /* UV_DC2_EINT */
+#define WM831X_UV_DC2_EINT_SHIFT 1 /* UV_DC2_EINT */
+#define WM831X_UV_DC2_EINT_WIDTH 1 /* UV_DC2_EINT */
+#define WM831X_UV_DC1_EINT 0x0001 /* UV_DC1_EINT */
+#define WM831X_UV_DC1_EINT_MASK 0x0001 /* UV_DC1_EINT */
+#define WM831X_UV_DC1_EINT_SHIFT 0 /* UV_DC1_EINT */
+#define WM831X_UV_DC1_EINT_WIDTH 1 /* UV_DC1_EINT */
+
+/*
+ * R16405 (0x4015) - Interrupt Status 5
+ */
+#define WM831X_GP16_EINT 0x8000 /* GP16_EINT */
+#define WM831X_GP16_EINT_MASK 0x8000 /* GP16_EINT */
+#define WM831X_GP16_EINT_SHIFT 15 /* GP16_EINT */
+#define WM831X_GP16_EINT_WIDTH 1 /* GP16_EINT */
+#define WM831X_GP15_EINT 0x4000 /* GP15_EINT */
+#define WM831X_GP15_EINT_MASK 0x4000 /* GP15_EINT */
+#define WM831X_GP15_EINT_SHIFT 14 /* GP15_EINT */
+#define WM831X_GP15_EINT_WIDTH 1 /* GP15_EINT */
+#define WM831X_GP14_EINT 0x2000 /* GP14_EINT */
+#define WM831X_GP14_EINT_MASK 0x2000 /* GP14_EINT */
+#define WM831X_GP14_EINT_SHIFT 13 /* GP14_EINT */
+#define WM831X_GP14_EINT_WIDTH 1 /* GP14_EINT */
+#define WM831X_GP13_EINT 0x1000 /* GP13_EINT */
+#define WM831X_GP13_EINT_MASK 0x1000 /* GP13_EINT */
+#define WM831X_GP13_EINT_SHIFT 12 /* GP13_EINT */
+#define WM831X_GP13_EINT_WIDTH 1 /* GP13_EINT */
+#define WM831X_GP12_EINT 0x0800 /* GP12_EINT */
+#define WM831X_GP12_EINT_MASK 0x0800 /* GP12_EINT */
+#define WM831X_GP12_EINT_SHIFT 11 /* GP12_EINT */
+#define WM831X_GP12_EINT_WIDTH 1 /* GP12_EINT */
+#define WM831X_GP11_EINT 0x0400 /* GP11_EINT */
+#define WM831X_GP11_EINT_MASK 0x0400 /* GP11_EINT */
+#define WM831X_GP11_EINT_SHIFT 10 /* GP11_EINT */
+#define WM831X_GP11_EINT_WIDTH 1 /* GP11_EINT */
+#define WM831X_GP10_EINT 0x0200 /* GP10_EINT */
+#define WM831X_GP10_EINT_MASK 0x0200 /* GP10_EINT */
+#define WM831X_GP10_EINT_SHIFT 9 /* GP10_EINT */
+#define WM831X_GP10_EINT_WIDTH 1 /* GP10_EINT */
+#define WM831X_GP9_EINT 0x0100 /* GP9_EINT */
+#define WM831X_GP9_EINT_MASK 0x0100 /* GP9_EINT */
+#define WM831X_GP9_EINT_SHIFT 8 /* GP9_EINT */
+#define WM831X_GP9_EINT_WIDTH 1 /* GP9_EINT */
+#define WM831X_GP8_EINT 0x0080 /* GP8_EINT */
+#define WM831X_GP8_EINT_MASK 0x0080 /* GP8_EINT */
+#define WM831X_GP8_EINT_SHIFT 7 /* GP8_EINT */
+#define WM831X_GP8_EINT_WIDTH 1 /* GP8_EINT */
+#define WM831X_GP7_EINT 0x0040 /* GP7_EINT */
+#define WM831X_GP7_EINT_MASK 0x0040 /* GP7_EINT */
+#define WM831X_GP7_EINT_SHIFT 6 /* GP7_EINT */
+#define WM831X_GP7_EINT_WIDTH 1 /* GP7_EINT */
+#define WM831X_GP6_EINT 0x0020 /* GP6_EINT */
+#define WM831X_GP6_EINT_MASK 0x0020 /* GP6_EINT */
+#define WM831X_GP6_EINT_SHIFT 5 /* GP6_EINT */
+#define WM831X_GP6_EINT_WIDTH 1 /* GP6_EINT */
+#define WM831X_GP5_EINT 0x0010 /* GP5_EINT */
+#define WM831X_GP5_EINT_MASK 0x0010 /* GP5_EINT */
+#define WM831X_GP5_EINT_SHIFT 4 /* GP5_EINT */
+#define WM831X_GP5_EINT_WIDTH 1 /* GP5_EINT */
+#define WM831X_GP4_EINT 0x0008 /* GP4_EINT */
+#define WM831X_GP4_EINT_MASK 0x0008 /* GP4_EINT */
+#define WM831X_GP4_EINT_SHIFT 3 /* GP4_EINT */
+#define WM831X_GP4_EINT_WIDTH 1 /* GP4_EINT */
+#define WM831X_GP3_EINT 0x0004 /* GP3_EINT */
+#define WM831X_GP3_EINT_MASK 0x0004 /* GP3_EINT */
+#define WM831X_GP3_EINT_SHIFT 2 /* GP3_EINT */
+#define WM831X_GP3_EINT_WIDTH 1 /* GP3_EINT */
+#define WM831X_GP2_EINT 0x0002 /* GP2_EINT */
+#define WM831X_GP2_EINT_MASK 0x0002 /* GP2_EINT */
+#define WM831X_GP2_EINT_SHIFT 1 /* GP2_EINT */
+#define WM831X_GP2_EINT_WIDTH 1 /* GP2_EINT */
+#define WM831X_GP1_EINT 0x0001 /* GP1_EINT */
+#define WM831X_GP1_EINT_MASK 0x0001 /* GP1_EINT */
+#define WM831X_GP1_EINT_SHIFT 0 /* GP1_EINT */
+#define WM831X_GP1_EINT_WIDTH 1 /* GP1_EINT */
+
+/*
+ * R16407 (0x4017) - IRQ Config
+ */
+#define WM831X_IRQ_OD 0x0002 /* IRQ_OD */
+#define WM831X_IRQ_OD_MASK 0x0002 /* IRQ_OD */
+#define WM831X_IRQ_OD_SHIFT 1 /* IRQ_OD */
+#define WM831X_IRQ_OD_WIDTH 1 /* IRQ_OD */
+#define WM831X_IM_IRQ 0x0001 /* IM_IRQ */
+#define WM831X_IM_IRQ_MASK 0x0001 /* IM_IRQ */
+#define WM831X_IM_IRQ_SHIFT 0 /* IM_IRQ */
+#define WM831X_IM_IRQ_WIDTH 1 /* IM_IRQ */
+
+/*
+ * R16408 (0x4018) - System Interrupts Mask
+ */
+#define WM831X_IM_PS_INT 0x8000 /* IM_PS_INT */
+#define WM831X_IM_PS_INT_MASK 0x8000 /* IM_PS_INT */
+#define WM831X_IM_PS_INT_SHIFT 15 /* IM_PS_INT */
+#define WM831X_IM_PS_INT_WIDTH 1 /* IM_PS_INT */
+#define WM831X_IM_TEMP_INT 0x4000 /* IM_TEMP_INT */
+#define WM831X_IM_TEMP_INT_MASK 0x4000 /* IM_TEMP_INT */
+#define WM831X_IM_TEMP_INT_SHIFT 14 /* IM_TEMP_INT */
+#define WM831X_IM_TEMP_INT_WIDTH 1 /* IM_TEMP_INT */
+#define WM831X_IM_GP_INT 0x2000 /* IM_GP_INT */
+#define WM831X_IM_GP_INT_MASK 0x2000 /* IM_GP_INT */
+#define WM831X_IM_GP_INT_SHIFT 13 /* IM_GP_INT */
+#define WM831X_IM_GP_INT_WIDTH 1 /* IM_GP_INT */
+#define WM831X_IM_ON_PIN_INT 0x1000 /* IM_ON_PIN_INT */
+#define WM831X_IM_ON_PIN_INT_MASK 0x1000 /* IM_ON_PIN_INT */
+#define WM831X_IM_ON_PIN_INT_SHIFT 12 /* IM_ON_PIN_INT */
+#define WM831X_IM_ON_PIN_INT_WIDTH 1 /* IM_ON_PIN_INT */
+#define WM831X_IM_WDOG_INT 0x0800 /* IM_WDOG_INT */
+#define WM831X_IM_WDOG_INT_MASK 0x0800 /* IM_WDOG_INT */
+#define WM831X_IM_WDOG_INT_SHIFT 11 /* IM_WDOG_INT */
+#define WM831X_IM_WDOG_INT_WIDTH 1 /* IM_WDOG_INT */
+#define WM831X_IM_TCHDATA_INT 0x0400 /* IM_TCHDATA_INT */
+#define WM831X_IM_TCHDATA_INT_MASK 0x0400 /* IM_TCHDATA_INT */
+#define WM831X_IM_TCHDATA_INT_SHIFT 10 /* IM_TCHDATA_INT */
+#define WM831X_IM_TCHDATA_INT_WIDTH 1 /* IM_TCHDATA_INT */
+#define WM831X_IM_TCHPD_INT 0x0200 /* IM_TCHPD_INT */
+#define WM831X_IM_TCHPD_INT_MASK 0x0200 /* IM_TCHPD_INT */
+#define WM831X_IM_TCHPD_INT_SHIFT 9 /* IM_TCHPD_INT */
+#define WM831X_IM_TCHPD_INT_WIDTH 1 /* IM_TCHPD_INT */
+#define WM831X_IM_AUXADC_INT 0x0100 /* IM_AUXADC_INT */
+#define WM831X_IM_AUXADC_INT_MASK 0x0100 /* IM_AUXADC_INT */
+#define WM831X_IM_AUXADC_INT_SHIFT 8 /* IM_AUXADC_INT */
+#define WM831X_IM_AUXADC_INT_WIDTH 1 /* IM_AUXADC_INT */
+#define WM831X_IM_PPM_INT 0x0080 /* IM_PPM_INT */
+#define WM831X_IM_PPM_INT_MASK 0x0080 /* IM_PPM_INT */
+#define WM831X_IM_PPM_INT_SHIFT 7 /* IM_PPM_INT */
+#define WM831X_IM_PPM_INT_WIDTH 1 /* IM_PPM_INT */
+#define WM831X_IM_CS_INT 0x0040 /* IM_CS_INT */
+#define WM831X_IM_CS_INT_MASK 0x0040 /* IM_CS_INT */
+#define WM831X_IM_CS_INT_SHIFT 6 /* IM_CS_INT */
+#define WM831X_IM_CS_INT_WIDTH 1 /* IM_CS_INT */
+#define WM831X_IM_RTC_INT 0x0020 /* IM_RTC_INT */
+#define WM831X_IM_RTC_INT_MASK 0x0020 /* IM_RTC_INT */
+#define WM831X_IM_RTC_INT_SHIFT 5 /* IM_RTC_INT */
+#define WM831X_IM_RTC_INT_WIDTH 1 /* IM_RTC_INT */
+#define WM831X_IM_OTP_INT 0x0010 /* IM_OTP_INT */
+#define WM831X_IM_OTP_INT_MASK 0x0010 /* IM_OTP_INT */
+#define WM831X_IM_OTP_INT_SHIFT 4 /* IM_OTP_INT */
+#define WM831X_IM_OTP_INT_WIDTH 1 /* IM_OTP_INT */
+#define WM831X_IM_CHILD_INT 0x0008 /* IM_CHILD_INT */
+#define WM831X_IM_CHILD_INT_MASK 0x0008 /* IM_CHILD_INT */
+#define WM831X_IM_CHILD_INT_SHIFT 3 /* IM_CHILD_INT */
+#define WM831X_IM_CHILD_INT_WIDTH 1 /* IM_CHILD_INT */
+#define WM831X_IM_CHG_INT 0x0004 /* IM_CHG_INT */
+#define WM831X_IM_CHG_INT_MASK 0x0004 /* IM_CHG_INT */
+#define WM831X_IM_CHG_INT_SHIFT 2 /* IM_CHG_INT */
+#define WM831X_IM_CHG_INT_WIDTH 1 /* IM_CHG_INT */
+#define WM831X_IM_HC_INT 0x0002 /* IM_HC_INT */
+#define WM831X_IM_HC_INT_MASK 0x0002 /* IM_HC_INT */
+#define WM831X_IM_HC_INT_SHIFT 1 /* IM_HC_INT */
+#define WM831X_IM_HC_INT_WIDTH 1 /* IM_HC_INT */
+#define WM831X_IM_UV_INT 0x0001 /* IM_UV_INT */
+#define WM831X_IM_UV_INT_MASK 0x0001 /* IM_UV_INT */
+#define WM831X_IM_UV_INT_SHIFT 0 /* IM_UV_INT */
+#define WM831X_IM_UV_INT_WIDTH 1 /* IM_UV_INT */
+
+/*
+ * R16409 (0x4019) - Interrupt Status 1 Mask
+ */
+#define WM831X_IM_PPM_SYSLO_EINT 0x8000 /* IM_PPM_SYSLO_EINT */
+#define WM831X_IM_PPM_SYSLO_EINT_MASK 0x8000 /* IM_PPM_SYSLO_EINT */
+#define WM831X_IM_PPM_SYSLO_EINT_SHIFT 15 /* IM_PPM_SYSLO_EINT */
+#define WM831X_IM_PPM_SYSLO_EINT_WIDTH 1 /* IM_PPM_SYSLO_EINT */
+#define WM831X_IM_PPM_PWR_SRC_EINT 0x4000 /* IM_PPM_PWR_SRC_EINT */
+#define WM831X_IM_PPM_PWR_SRC_EINT_MASK 0x4000 /* IM_PPM_PWR_SRC_EINT */
+#define WM831X_IM_PPM_PWR_SRC_EINT_SHIFT 14 /* IM_PPM_PWR_SRC_EINT */
+#define WM831X_IM_PPM_PWR_SRC_EINT_WIDTH 1 /* IM_PPM_PWR_SRC_EINT */
+#define WM831X_IM_PPM_USB_CURR_EINT 0x2000 /* IM_PPM_USB_CURR_EINT */
+#define WM831X_IM_PPM_USB_CURR_EINT_MASK 0x2000 /* IM_PPM_USB_CURR_EINT */
+#define WM831X_IM_PPM_USB_CURR_EINT_SHIFT 13 /* IM_PPM_USB_CURR_EINT */
+#define WM831X_IM_PPM_USB_CURR_EINT_WIDTH 1 /* IM_PPM_USB_CURR_EINT */
+#define WM831X_IM_ON_PIN_EINT 0x1000 /* IM_ON_PIN_EINT */
+#define WM831X_IM_ON_PIN_EINT_MASK 0x1000 /* IM_ON_PIN_EINT */
+#define WM831X_IM_ON_PIN_EINT_SHIFT 12 /* IM_ON_PIN_EINT */
+#define WM831X_IM_ON_PIN_EINT_WIDTH 1 /* IM_ON_PIN_EINT */
+#define WM831X_IM_WDOG_TO_EINT 0x0800 /* IM_WDOG_TO_EINT */
+#define WM831X_IM_WDOG_TO_EINT_MASK 0x0800 /* IM_WDOG_TO_EINT */
+#define WM831X_IM_WDOG_TO_EINT_SHIFT 11 /* IM_WDOG_TO_EINT */
+#define WM831X_IM_WDOG_TO_EINT_WIDTH 1 /* IM_WDOG_TO_EINT */
+#define WM831X_IM_TCHDATA_EINT 0x0400 /* IM_TCHDATA_EINT */
+#define WM831X_IM_TCHDATA_EINT_MASK 0x0400 /* IM_TCHDATA_EINT */
+#define WM831X_IM_TCHDATA_EINT_SHIFT 10 /* IM_TCHDATA_EINT */
+#define WM831X_IM_TCHDATA_EINT_WIDTH 1 /* IM_TCHDATA_EINT */
+#define WM831X_IM_TCHPD_EINT 0x0200 /* IM_TCHPD_EINT */
+#define WM831X_IM_TCHPD_EINT_MASK 0x0200 /* IM_TCHPD_EINT */
+#define WM831X_IM_TCHPD_EINT_SHIFT 9 /* IM_TCHPD_EINT */
+#define WM831X_IM_TCHPD_EINT_WIDTH 1 /* IM_TCHPD_EINT */
+#define WM831X_IM_AUXADC_DATA_EINT 0x0100 /* IM_AUXADC_DATA_EINT */
+#define WM831X_IM_AUXADC_DATA_EINT_MASK 0x0100 /* IM_AUXADC_DATA_EINT */
+#define WM831X_IM_AUXADC_DATA_EINT_SHIFT 8 /* IM_AUXADC_DATA_EINT */
+#define WM831X_IM_AUXADC_DATA_EINT_WIDTH 1 /* IM_AUXADC_DATA_EINT */
+#define WM831X_IM_AUXADC_DCOMP4_EINT 0x0080 /* IM_AUXADC_DCOMP4_EINT */
+#define WM831X_IM_AUXADC_DCOMP4_EINT_MASK 0x0080 /* IM_AUXADC_DCOMP4_EINT */
+#define WM831X_IM_AUXADC_DCOMP4_EINT_SHIFT 7 /* IM_AUXADC_DCOMP4_EINT */
+#define WM831X_IM_AUXADC_DCOMP4_EINT_WIDTH 1 /* IM_AUXADC_DCOMP4_EINT */
+#define WM831X_IM_AUXADC_DCOMP3_EINT 0x0040 /* IM_AUXADC_DCOMP3_EINT */
+#define WM831X_IM_AUXADC_DCOMP3_EINT_MASK 0x0040 /* IM_AUXADC_DCOMP3_EINT */
+#define WM831X_IM_AUXADC_DCOMP3_EINT_SHIFT 6 /* IM_AUXADC_DCOMP3_EINT */
+#define WM831X_IM_AUXADC_DCOMP3_EINT_WIDTH 1 /* IM_AUXADC_DCOMP3_EINT */
+#define WM831X_IM_AUXADC_DCOMP2_EINT 0x0020 /* IM_AUXADC_DCOMP2_EINT */
+#define WM831X_IM_AUXADC_DCOMP2_EINT_MASK 0x0020 /* IM_AUXADC_DCOMP2_EINT */
+#define WM831X_IM_AUXADC_DCOMP2_EINT_SHIFT 5 /* IM_AUXADC_DCOMP2_EINT */
+#define WM831X_IM_AUXADC_DCOMP2_EINT_WIDTH 1 /* IM_AUXADC_DCOMP2_EINT */
+#define WM831X_IM_AUXADC_DCOMP1_EINT 0x0010 /* IM_AUXADC_DCOMP1_EINT */
+#define WM831X_IM_AUXADC_DCOMP1_EINT_MASK 0x0010 /* IM_AUXADC_DCOMP1_EINT */
+#define WM831X_IM_AUXADC_DCOMP1_EINT_SHIFT 4 /* IM_AUXADC_DCOMP1_EINT */
+#define WM831X_IM_AUXADC_DCOMP1_EINT_WIDTH 1 /* IM_AUXADC_DCOMP1_EINT */
+#define WM831X_IM_RTC_PER_EINT 0x0008 /* IM_RTC_PER_EINT */
+#define WM831X_IM_RTC_PER_EINT_MASK 0x0008 /* IM_RTC_PER_EINT */
+#define WM831X_IM_RTC_PER_EINT_SHIFT 3 /* IM_RTC_PER_EINT */
+#define WM831X_IM_RTC_PER_EINT_WIDTH 1 /* IM_RTC_PER_EINT */
+#define WM831X_IM_RTC_ALM_EINT 0x0004 /* IM_RTC_ALM_EINT */
+#define WM831X_IM_RTC_ALM_EINT_MASK 0x0004 /* IM_RTC_ALM_EINT */
+#define WM831X_IM_RTC_ALM_EINT_SHIFT 2 /* IM_RTC_ALM_EINT */
+#define WM831X_IM_RTC_ALM_EINT_WIDTH 1 /* IM_RTC_ALM_EINT */
+#define WM831X_IM_TEMP_THW_EINT 0x0002 /* IM_TEMP_THW_EINT */
+#define WM831X_IM_TEMP_THW_EINT_MASK 0x0002 /* IM_TEMP_THW_EINT */
+#define WM831X_IM_TEMP_THW_EINT_SHIFT 1 /* IM_TEMP_THW_EINT */
+#define WM831X_IM_TEMP_THW_EINT_WIDTH 1 /* IM_TEMP_THW_EINT */
+
+/*
+ * R16410 (0x401A) - Interrupt Status 2 Mask
+ */
+#define WM831X_IM_CHG_BATT_HOT_EINT 0x8000 /* IM_CHG_BATT_HOT_EINT */
+#define WM831X_IM_CHG_BATT_HOT_EINT_MASK 0x8000 /* IM_CHG_BATT_HOT_EINT */
+#define WM831X_IM_CHG_BATT_HOT_EINT_SHIFT 15 /* IM_CHG_BATT_HOT_EINT */
+#define WM831X_IM_CHG_BATT_HOT_EINT_WIDTH 1 /* IM_CHG_BATT_HOT_EINT */
+#define WM831X_IM_CHG_BATT_COLD_EINT 0x4000 /* IM_CHG_BATT_COLD_EINT */
+#define WM831X_IM_CHG_BATT_COLD_EINT_MASK 0x4000 /* IM_CHG_BATT_COLD_EINT */
+#define WM831X_IM_CHG_BATT_COLD_EINT_SHIFT 14 /* IM_CHG_BATT_COLD_EINT */
+#define WM831X_IM_CHG_BATT_COLD_EINT_WIDTH 1 /* IM_CHG_BATT_COLD_EINT */
+#define WM831X_IM_CHG_BATT_FAIL_EINT 0x2000 /* IM_CHG_BATT_FAIL_EINT */
+#define WM831X_IM_CHG_BATT_FAIL_EINT_MASK 0x2000 /* IM_CHG_BATT_FAIL_EINT */
+#define WM831X_IM_CHG_BATT_FAIL_EINT_SHIFT 13 /* IM_CHG_BATT_FAIL_EINT */
+#define WM831X_IM_CHG_BATT_FAIL_EINT_WIDTH 1 /* IM_CHG_BATT_FAIL_EINT */
+#define WM831X_IM_CHG_OV_EINT 0x1000 /* IM_CHG_OV_EINT */
+#define WM831X_IM_CHG_OV_EINT_MASK 0x1000 /* IM_CHG_OV_EINT */
+#define WM831X_IM_CHG_OV_EINT_SHIFT 12 /* IM_CHG_OV_EINT */
+#define WM831X_IM_CHG_OV_EINT_WIDTH 1 /* IM_CHG_OV_EINT */
+#define WM831X_IM_CHG_END_EINT 0x0800 /* IM_CHG_END_EINT */
+#define WM831X_IM_CHG_END_EINT_MASK 0x0800 /* IM_CHG_END_EINT */
+#define WM831X_IM_CHG_END_EINT_SHIFT 11 /* IM_CHG_END_EINT */
+#define WM831X_IM_CHG_END_EINT_WIDTH 1 /* IM_CHG_END_EINT */
+#define WM831X_IM_CHG_TO_EINT 0x0400 /* IM_CHG_TO_EINT */
+#define WM831X_IM_CHG_TO_EINT_MASK 0x0400 /* IM_CHG_TO_EINT */
+#define WM831X_IM_CHG_TO_EINT_SHIFT 10 /* IM_CHG_TO_EINT */
+#define WM831X_IM_CHG_TO_EINT_WIDTH 1 /* IM_CHG_TO_EINT */
+#define WM831X_IM_CHG_MODE_EINT 0x0200 /* IM_CHG_MODE_EINT */
+#define WM831X_IM_CHG_MODE_EINT_MASK 0x0200 /* IM_CHG_MODE_EINT */
+#define WM831X_IM_CHG_MODE_EINT_SHIFT 9 /* IM_CHG_MODE_EINT */
+#define WM831X_IM_CHG_MODE_EINT_WIDTH 1 /* IM_CHG_MODE_EINT */
+#define WM831X_IM_CHG_START_EINT 0x0100 /* IM_CHG_START_EINT */
+#define WM831X_IM_CHG_START_EINT_MASK 0x0100 /* IM_CHG_START_EINT */
+#define WM831X_IM_CHG_START_EINT_SHIFT 8 /* IM_CHG_START_EINT */
+#define WM831X_IM_CHG_START_EINT_WIDTH 1 /* IM_CHG_START_EINT */
+#define WM831X_IM_CS2_EINT 0x0080 /* IM_CS2_EINT */
+#define WM831X_IM_CS2_EINT_MASK 0x0080 /* IM_CS2_EINT */
+#define WM831X_IM_CS2_EINT_SHIFT 7 /* IM_CS2_EINT */
+#define WM831X_IM_CS2_EINT_WIDTH 1 /* IM_CS2_EINT */
+#define WM831X_IM_CS1_EINT 0x0040 /* IM_CS1_EINT */
+#define WM831X_IM_CS1_EINT_MASK 0x0040 /* IM_CS1_EINT */
+#define WM831X_IM_CS1_EINT_SHIFT 6 /* IM_CS1_EINT */
+#define WM831X_IM_CS1_EINT_WIDTH 1 /* IM_CS1_EINT */
+#define WM831X_IM_OTP_CMD_END_EINT 0x0020 /* IM_OTP_CMD_END_EINT */
+#define WM831X_IM_OTP_CMD_END_EINT_MASK 0x0020 /* IM_OTP_CMD_END_EINT */
+#define WM831X_IM_OTP_CMD_END_EINT_SHIFT 5 /* IM_OTP_CMD_END_EINT */
+#define WM831X_IM_OTP_CMD_END_EINT_WIDTH 1 /* IM_OTP_CMD_END_EINT */
+#define WM831X_IM_OTP_ERR_EINT 0x0010 /* IM_OTP_ERR_EINT */
+#define WM831X_IM_OTP_ERR_EINT_MASK 0x0010 /* IM_OTP_ERR_EINT */
+#define WM831X_IM_OTP_ERR_EINT_SHIFT 4 /* IM_OTP_ERR_EINT */
+#define WM831X_IM_OTP_ERR_EINT_WIDTH 1 /* IM_OTP_ERR_EINT */
+#define WM831X_IM_PS_POR_EINT 0x0004 /* IM_PS_POR_EINT */
+#define WM831X_IM_PS_POR_EINT_MASK 0x0004 /* IM_PS_POR_EINT */
+#define WM831X_IM_PS_POR_EINT_SHIFT 2 /* IM_PS_POR_EINT */
+#define WM831X_IM_PS_POR_EINT_WIDTH 1 /* IM_PS_POR_EINT */
+#define WM831X_IM_PS_SLEEP_OFF_EINT 0x0002 /* IM_PS_SLEEP_OFF_EINT */
+#define WM831X_IM_PS_SLEEP_OFF_EINT_MASK 0x0002 /* IM_PS_SLEEP_OFF_EINT */
+#define WM831X_IM_PS_SLEEP_OFF_EINT_SHIFT 1 /* IM_PS_SLEEP_OFF_EINT */
+#define WM831X_IM_PS_SLEEP_OFF_EINT_WIDTH 1 /* IM_PS_SLEEP_OFF_EINT */
+#define WM831X_IM_PS_ON_WAKE_EINT 0x0001 /* IM_PS_ON_WAKE_EINT */
+#define WM831X_IM_PS_ON_WAKE_EINT_MASK 0x0001 /* IM_PS_ON_WAKE_EINT */
+#define WM831X_IM_PS_ON_WAKE_EINT_SHIFT 0 /* IM_PS_ON_WAKE_EINT */
+#define WM831X_IM_PS_ON_WAKE_EINT_WIDTH 1 /* IM_PS_ON_WAKE_EINT */
+
+/*
+ * R16411 (0x401B) - Interrupt Status 3 Mask
+ */
+#define WM831X_IM_UV_LDO10_EINT 0x0200 /* IM_UV_LDO10_EINT */
+#define WM831X_IM_UV_LDO10_EINT_MASK 0x0200 /* IM_UV_LDO10_EINT */
+#define WM831X_IM_UV_LDO10_EINT_SHIFT 9 /* IM_UV_LDO10_EINT */
+#define WM831X_IM_UV_LDO10_EINT_WIDTH 1 /* IM_UV_LDO10_EINT */
+#define WM831X_IM_UV_LDO9_EINT 0x0100 /* IM_UV_LDO9_EINT */
+#define WM831X_IM_UV_LDO9_EINT_MASK 0x0100 /* IM_UV_LDO9_EINT */
+#define WM831X_IM_UV_LDO9_EINT_SHIFT 8 /* IM_UV_LDO9_EINT */
+#define WM831X_IM_UV_LDO9_EINT_WIDTH 1 /* IM_UV_LDO9_EINT */
+#define WM831X_IM_UV_LDO8_EINT 0x0080 /* IM_UV_LDO8_EINT */
+#define WM831X_IM_UV_LDO8_EINT_MASK 0x0080 /* IM_UV_LDO8_EINT */
+#define WM831X_IM_UV_LDO8_EINT_SHIFT 7 /* IM_UV_LDO8_EINT */
+#define WM831X_IM_UV_LDO8_EINT_WIDTH 1 /* IM_UV_LDO8_EINT */
+#define WM831X_IM_UV_LDO7_EINT 0x0040 /* IM_UV_LDO7_EINT */
+#define WM831X_IM_UV_LDO7_EINT_MASK 0x0040 /* IM_UV_LDO7_EINT */
+#define WM831X_IM_UV_LDO7_EINT_SHIFT 6 /* IM_UV_LDO7_EINT */
+#define WM831X_IM_UV_LDO7_EINT_WIDTH 1 /* IM_UV_LDO7_EINT */
+#define WM831X_IM_UV_LDO6_EINT 0x0020 /* IM_UV_LDO6_EINT */
+#define WM831X_IM_UV_LDO6_EINT_MASK 0x0020 /* IM_UV_LDO6_EINT */
+#define WM831X_IM_UV_LDO6_EINT_SHIFT 5 /* IM_UV_LDO6_EINT */
+#define WM831X_IM_UV_LDO6_EINT_WIDTH 1 /* IM_UV_LDO6_EINT */
+#define WM831X_IM_UV_LDO5_EINT 0x0010 /* IM_UV_LDO5_EINT */
+#define WM831X_IM_UV_LDO5_EINT_MASK 0x0010 /* IM_UV_LDO5_EINT */
+#define WM831X_IM_UV_LDO5_EINT_SHIFT 4 /* IM_UV_LDO5_EINT */
+#define WM831X_IM_UV_LDO5_EINT_WIDTH 1 /* IM_UV_LDO5_EINT */
+#define WM831X_IM_UV_LDO4_EINT 0x0008 /* IM_UV_LDO4_EINT */
+#define WM831X_IM_UV_LDO4_EINT_MASK 0x0008 /* IM_UV_LDO4_EINT */
+#define WM831X_IM_UV_LDO4_EINT_SHIFT 3 /* IM_UV_LDO4_EINT */
+#define WM831X_IM_UV_LDO4_EINT_WIDTH 1 /* IM_UV_LDO4_EINT */
+#define WM831X_IM_UV_LDO3_EINT 0x0004 /* IM_UV_LDO3_EINT */
+#define WM831X_IM_UV_LDO3_EINT_MASK 0x0004 /* IM_UV_LDO3_EINT */
+#define WM831X_IM_UV_LDO3_EINT_SHIFT 2 /* IM_UV_LDO3_EINT */
+#define WM831X_IM_UV_LDO3_EINT_WIDTH 1 /* IM_UV_LDO3_EINT */
+#define WM831X_IM_UV_LDO2_EINT 0x0002 /* IM_UV_LDO2_EINT */
+#define WM831X_IM_UV_LDO2_EINT_MASK 0x0002 /* IM_UV_LDO2_EINT */
+#define WM831X_IM_UV_LDO2_EINT_SHIFT 1 /* IM_UV_LDO2_EINT */
+#define WM831X_IM_UV_LDO2_EINT_WIDTH 1 /* IM_UV_LDO2_EINT */
+#define WM831X_IM_UV_LDO1_EINT 0x0001 /* IM_UV_LDO1_EINT */
+#define WM831X_IM_UV_LDO1_EINT_MASK 0x0001 /* IM_UV_LDO1_EINT */
+#define WM831X_IM_UV_LDO1_EINT_SHIFT 0 /* IM_UV_LDO1_EINT */
+#define WM831X_IM_UV_LDO1_EINT_WIDTH 1 /* IM_UV_LDO1_EINT */
+
+/*
+ * R16412 (0x401C) - Interrupt Status 4 Mask
+ */
+#define WM831X_IM_HC_DC2_EINT 0x0200 /* IM_HC_DC2_EINT */
+#define WM831X_IM_HC_DC2_EINT_MASK 0x0200 /* IM_HC_DC2_EINT */
+#define WM831X_IM_HC_DC2_EINT_SHIFT 9 /* IM_HC_DC2_EINT */
+#define WM831X_IM_HC_DC2_EINT_WIDTH 1 /* IM_HC_DC2_EINT */
+#define WM831X_IM_HC_DC1_EINT 0x0100 /* IM_HC_DC1_EINT */
+#define WM831X_IM_HC_DC1_EINT_MASK 0x0100 /* IM_HC_DC1_EINT */
+#define WM831X_IM_HC_DC1_EINT_SHIFT 8 /* IM_HC_DC1_EINT */
+#define WM831X_IM_HC_DC1_EINT_WIDTH 1 /* IM_HC_DC1_EINT */
+#define WM831X_IM_UV_DC4_EINT 0x0008 /* IM_UV_DC4_EINT */
+#define WM831X_IM_UV_DC4_EINT_MASK 0x0008 /* IM_UV_DC4_EINT */
+#define WM831X_IM_UV_DC4_EINT_SHIFT 3 /* IM_UV_DC4_EINT */
+#define WM831X_IM_UV_DC4_EINT_WIDTH 1 /* IM_UV_DC4_EINT */
+#define WM831X_IM_UV_DC3_EINT 0x0004 /* IM_UV_DC3_EINT */
+#define WM831X_IM_UV_DC3_EINT_MASK 0x0004 /* IM_UV_DC3_EINT */
+#define WM831X_IM_UV_DC3_EINT_SHIFT 2 /* IM_UV_DC3_EINT */
+#define WM831X_IM_UV_DC3_EINT_WIDTH 1 /* IM_UV_DC3_EINT */
+#define WM831X_IM_UV_DC2_EINT 0x0002 /* IM_UV_DC2_EINT */
+#define WM831X_IM_UV_DC2_EINT_MASK 0x0002 /* IM_UV_DC2_EINT */
+#define WM831X_IM_UV_DC2_EINT_SHIFT 1 /* IM_UV_DC2_EINT */
+#define WM831X_IM_UV_DC2_EINT_WIDTH 1 /* IM_UV_DC2_EINT */
+#define WM831X_IM_UV_DC1_EINT 0x0001 /* IM_UV_DC1_EINT */
+#define WM831X_IM_UV_DC1_EINT_MASK 0x0001 /* IM_UV_DC1_EINT */
+#define WM831X_IM_UV_DC1_EINT_SHIFT 0 /* IM_UV_DC1_EINT */
+#define WM831X_IM_UV_DC1_EINT_WIDTH 1 /* IM_UV_DC1_EINT */
+
+/*
+ * R16413 (0x401D) - Interrupt Status 5 Mask
+ */
+#define WM831X_IM_GP16_EINT 0x8000 /* IM_GP16_EINT */
+#define WM831X_IM_GP16_EINT_MASK 0x8000 /* IM_GP16_EINT */
+#define WM831X_IM_GP16_EINT_SHIFT 15 /* IM_GP16_EINT */
+#define WM831X_IM_GP16_EINT_WIDTH 1 /* IM_GP16_EINT */
+#define WM831X_IM_GP15_EINT 0x4000 /* IM_GP15_EINT */
+#define WM831X_IM_GP15_EINT_MASK 0x4000 /* IM_GP15_EINT */
+#define WM831X_IM_GP15_EINT_SHIFT 14 /* IM_GP15_EINT */
+#define WM831X_IM_GP15_EINT_WIDTH 1 /* IM_GP15_EINT */
+#define WM831X_IM_GP14_EINT 0x2000 /* IM_GP14_EINT */
+#define WM831X_IM_GP14_EINT_MASK 0x2000 /* IM_GP14_EINT */
+#define WM831X_IM_GP14_EINT_SHIFT 13 /* IM_GP14_EINT */
+#define WM831X_IM_GP14_EINT_WIDTH 1 /* IM_GP14_EINT */
+#define WM831X_IM_GP13_EINT 0x1000 /* IM_GP13_EINT */
+#define WM831X_IM_GP13_EINT_MASK 0x1000 /* IM_GP13_EINT */
+#define WM831X_IM_GP13_EINT_SHIFT 12 /* IM_GP13_EINT */
+#define WM831X_IM_GP13_EINT_WIDTH 1 /* IM_GP13_EINT */
+#define WM831X_IM_GP12_EINT 0x0800 /* IM_GP12_EINT */
+#define WM831X_IM_GP12_EINT_MASK 0x0800 /* IM_GP12_EINT */
+#define WM831X_IM_GP12_EINT_SHIFT 11 /* IM_GP12_EINT */
+#define WM831X_IM_GP12_EINT_WIDTH 1 /* IM_GP12_EINT */
+#define WM831X_IM_GP11_EINT 0x0400 /* IM_GP11_EINT */
+#define WM831X_IM_GP11_EINT_MASK 0x0400 /* IM_GP11_EINT */
+#define WM831X_IM_GP11_EINT_SHIFT 10 /* IM_GP11_EINT */
+#define WM831X_IM_GP11_EINT_WIDTH 1 /* IM_GP11_EINT */
+#define WM831X_IM_GP10_EINT 0x0200 /* IM_GP10_EINT */
+#define WM831X_IM_GP10_EINT_MASK 0x0200 /* IM_GP10_EINT */
+#define WM831X_IM_GP10_EINT_SHIFT 9 /* IM_GP10_EINT */
+#define WM831X_IM_GP10_EINT_WIDTH 1 /* IM_GP10_EINT */
+#define WM831X_IM_GP9_EINT 0x0100 /* IM_GP9_EINT */
+#define WM831X_IM_GP9_EINT_MASK 0x0100 /* IM_GP9_EINT */
+#define WM831X_IM_GP9_EINT_SHIFT 8 /* IM_GP9_EINT */
+#define WM831X_IM_GP9_EINT_WIDTH 1 /* IM_GP9_EINT */
+#define WM831X_IM_GP8_EINT 0x0080 /* IM_GP8_EINT */
+#define WM831X_IM_GP8_EINT_MASK 0x0080 /* IM_GP8_EINT */
+#define WM831X_IM_GP8_EINT_SHIFT 7 /* IM_GP8_EINT */
+#define WM831X_IM_GP8_EINT_WIDTH 1 /* IM_GP8_EINT */
+#define WM831X_IM_GP7_EINT 0x0040 /* IM_GP7_EINT */
+#define WM831X_IM_GP7_EINT_MASK 0x0040 /* IM_GP7_EINT */
+#define WM831X_IM_GP7_EINT_SHIFT 6 /* IM_GP7_EINT */
+#define WM831X_IM_GP7_EINT_WIDTH 1 /* IM_GP7_EINT */
+#define WM831X_IM_GP6_EINT 0x0020 /* IM_GP6_EINT */
+#define WM831X_IM_GP6_EINT_MASK 0x0020 /* IM_GP6_EINT */
+#define WM831X_IM_GP6_EINT_SHIFT 5 /* IM_GP6_EINT */
+#define WM831X_IM_GP6_EINT_WIDTH 1 /* IM_GP6_EINT */
+#define WM831X_IM_GP5_EINT 0x0010 /* IM_GP5_EINT */
+#define WM831X_IM_GP5_EINT_MASK 0x0010 /* IM_GP5_EINT */
+#define WM831X_IM_GP5_EINT_SHIFT 4 /* IM_GP5_EINT */
+#define WM831X_IM_GP5_EINT_WIDTH 1 /* IM_GP5_EINT */
+#define WM831X_IM_GP4_EINT 0x0008 /* IM_GP4_EINT */
+#define WM831X_IM_GP4_EINT_MASK 0x0008 /* IM_GP4_EINT */
+#define WM831X_IM_GP4_EINT_SHIFT 3 /* IM_GP4_EINT */
+#define WM831X_IM_GP4_EINT_WIDTH 1 /* IM_GP4_EINT */
+#define WM831X_IM_GP3_EINT 0x0004 /* IM_GP3_EINT */
+#define WM831X_IM_GP3_EINT_MASK 0x0004 /* IM_GP3_EINT */
+#define WM831X_IM_GP3_EINT_SHIFT 2 /* IM_GP3_EINT */
+#define WM831X_IM_GP3_EINT_WIDTH 1 /* IM_GP3_EINT */
+#define WM831X_IM_GP2_EINT 0x0002 /* IM_GP2_EINT */
+#define WM831X_IM_GP2_EINT_MASK 0x0002 /* IM_GP2_EINT */
+#define WM831X_IM_GP2_EINT_SHIFT 1 /* IM_GP2_EINT */
+#define WM831X_IM_GP2_EINT_WIDTH 1 /* IM_GP2_EINT */
+#define WM831X_IM_GP1_EINT 0x0001 /* IM_GP1_EINT */
+#define WM831X_IM_GP1_EINT_MASK 0x0001 /* IM_GP1_EINT */
+#define WM831X_IM_GP1_EINT_SHIFT 0 /* IM_GP1_EINT */
+#define WM831X_IM_GP1_EINT_WIDTH 1 /* IM_GP1_EINT */
+
+
+#endif
diff --git a/include/linux/mfd/wm831x/otp.h b/include/linux/mfd/wm831x/otp.h
new file mode 100644
index 000000000..ce1f81a39
--- /dev/null
+++ b/include/linux/mfd/wm831x/otp.h
@@ -0,0 +1,162 @@
+/*
+ * include/linux/mfd/wm831x/otp.h -- OTP interface for WM831x
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_WM831X_OTP_H__
+#define __MFD_WM831X_OTP_H__
+
+int wm831x_otp_init(struct wm831x *wm831x);
+void wm831x_otp_exit(struct wm831x *wm831x);
+
+/*
+ * R30720 (0x7800) - Unique ID 1
+ */
+#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */
+#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */
+#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */
+
+/*
+ * R30721 (0x7801) - Unique ID 2
+ */
+#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */
+#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */
+#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */
+
+/*
+ * R30722 (0x7802) - Unique ID 3
+ */
+#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */
+#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */
+#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */
+
+/*
+ * R30723 (0x7803) - Unique ID 4
+ */
+#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */
+#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */
+#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */
+
+/*
+ * R30724 (0x7804) - Unique ID 5
+ */
+#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */
+#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */
+#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */
+
+/*
+ * R30725 (0x7805) - Unique ID 6
+ */
+#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */
+#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */
+#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */
+
+/*
+ * R30726 (0x7806) - Unique ID 7
+ */
+#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */
+#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */
+#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */
+
+/*
+ * R30727 (0x7807) - Unique ID 8
+ */
+#define WM831X_UNIQUE_ID_MASK 0xFFFF /* UNIQUE_ID - [15:0] */
+#define WM831X_UNIQUE_ID_SHIFT 0 /* UNIQUE_ID - [15:0] */
+#define WM831X_UNIQUE_ID_WIDTH 16 /* UNIQUE_ID - [15:0] */
+
+/*
+ * R30728 (0x7808) - Factory OTP ID
+ */
+#define WM831X_OTP_FACT_ID_MASK 0xFFFE /* OTP_FACT_ID - [15:1] */
+#define WM831X_OTP_FACT_ID_SHIFT 1 /* OTP_FACT_ID - [15:1] */
+#define WM831X_OTP_FACT_ID_WIDTH 15 /* OTP_FACT_ID - [15:1] */
+#define WM831X_OTP_FACT_FINAL 0x0001 /* OTP_FACT_FINAL */
+#define WM831X_OTP_FACT_FINAL_MASK 0x0001 /* OTP_FACT_FINAL */
+#define WM831X_OTP_FACT_FINAL_SHIFT 0 /* OTP_FACT_FINAL */
+#define WM831X_OTP_FACT_FINAL_WIDTH 1 /* OTP_FACT_FINAL */
+
+/*
+ * R30729 (0x7809) - Factory OTP 1
+ */
+#define WM831X_DC3_TRIM_MASK 0xF000 /* DC3_TRIM - [15:12] */
+#define WM831X_DC3_TRIM_SHIFT 12 /* DC3_TRIM - [15:12] */
+#define WM831X_DC3_TRIM_WIDTH 4 /* DC3_TRIM - [15:12] */
+#define WM831X_DC2_TRIM_MASK 0x0FC0 /* DC2_TRIM - [11:6] */
+#define WM831X_DC2_TRIM_SHIFT 6 /* DC2_TRIM - [11:6] */
+#define WM831X_DC2_TRIM_WIDTH 6 /* DC2_TRIM - [11:6] */
+#define WM831X_DC1_TRIM_MASK 0x003F /* DC1_TRIM - [5:0] */
+#define WM831X_DC1_TRIM_SHIFT 0 /* DC1_TRIM - [5:0] */
+#define WM831X_DC1_TRIM_WIDTH 6 /* DC1_TRIM - [5:0] */
+
+/*
+ * R30730 (0x780A) - Factory OTP 2
+ */
+#define WM831X_CHIP_ID_MASK 0xFFFF /* CHIP_ID - [15:0] */
+#define WM831X_CHIP_ID_SHIFT 0 /* CHIP_ID - [15:0] */
+#define WM831X_CHIP_ID_WIDTH 16 /* CHIP_ID - [15:0] */
+
+/*
+ * R30731 (0x780B) - Factory OTP 3
+ */
+#define WM831X_OSC_TRIM_MASK 0x0780 /* OSC_TRIM - [10:7] */
+#define WM831X_OSC_TRIM_SHIFT 7 /* OSC_TRIM - [10:7] */
+#define WM831X_OSC_TRIM_WIDTH 4 /* OSC_TRIM - [10:7] */
+#define WM831X_BG_TRIM_MASK 0x0078 /* BG_TRIM - [6:3] */
+#define WM831X_BG_TRIM_SHIFT 3 /* BG_TRIM - [6:3] */
+#define WM831X_BG_TRIM_WIDTH 4 /* BG_TRIM - [6:3] */
+#define WM831X_LPBG_TRIM_MASK 0x0007 /* LPBG_TRIM - [2:0] */
+#define WM831X_LPBG_TRIM_SHIFT 0 /* LPBG_TRIM - [2:0] */
+#define WM831X_LPBG_TRIM_WIDTH 3 /* LPBG_TRIM - [2:0] */
+
+/*
+ * R30732 (0x780C) - Factory OTP 4
+ */
+#define WM831X_CHILD_I2C_ADDR_MASK 0x00FE /* CHILD_I2C_ADDR - [7:1] */
+#define WM831X_CHILD_I2C_ADDR_SHIFT 1 /* CHILD_I2C_ADDR - [7:1] */
+#define WM831X_CHILD_I2C_ADDR_WIDTH 7 /* CHILD_I2C_ADDR - [7:1] */
+#define WM831X_CH_AW 0x0001 /* CH_AW */
+#define WM831X_CH_AW_MASK 0x0001 /* CH_AW */
+#define WM831X_CH_AW_SHIFT 0 /* CH_AW */
+#define WM831X_CH_AW_WIDTH 1 /* CH_AW */
+
+/*
+ * R30733 (0x780D) - Factory OTP 5
+ */
+#define WM831X_CHARGE_TRIM_MASK 0x003F /* CHARGE_TRIM - [5:0] */
+#define WM831X_CHARGE_TRIM_SHIFT 0 /* CHARGE_TRIM - [5:0] */
+#define WM831X_CHARGE_TRIM_WIDTH 6 /* CHARGE_TRIM - [5:0] */
+
+/*
+ * R30736 (0x7810) - Customer OTP ID
+ */
+#define WM831X_OTP_AUTO_PROG 0x8000 /* OTP_AUTO_PROG */
+#define WM831X_OTP_AUTO_PROG_MASK 0x8000 /* OTP_AUTO_PROG */
+#define WM831X_OTP_AUTO_PROG_SHIFT 15 /* OTP_AUTO_PROG */
+#define WM831X_OTP_AUTO_PROG_WIDTH 1 /* OTP_AUTO_PROG */
+#define WM831X_OTP_CUST_ID_MASK 0x7FFE /* OTP_CUST_ID - [14:1] */
+#define WM831X_OTP_CUST_ID_SHIFT 1 /* OTP_CUST_ID - [14:1] */
+#define WM831X_OTP_CUST_ID_WIDTH 14 /* OTP_CUST_ID - [14:1] */
+#define WM831X_OTP_CUST_FINAL 0x0001 /* OTP_CUST_FINAL */
+#define WM831X_OTP_CUST_FINAL_MASK 0x0001 /* OTP_CUST_FINAL */
+#define WM831X_OTP_CUST_FINAL_SHIFT 0 /* OTP_CUST_FINAL */
+#define WM831X_OTP_CUST_FINAL_WIDTH 1 /* OTP_CUST_FINAL */
+
+/*
+ * R30759 (0x7827) - DBE CHECK DATA
+ */
+#define WM831X_DBE_VALID_DATA_MASK 0xFFFF /* DBE_VALID_DATA - [15:0] */
+#define WM831X_DBE_VALID_DATA_SHIFT 0 /* DBE_VALID_DATA - [15:0] */
+#define WM831X_DBE_VALID_DATA_WIDTH 16 /* DBE_VALID_DATA - [15:0] */
+
+
+#endif
diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h
new file mode 100644
index 000000000..dcc9631b3
--- /dev/null
+++ b/include/linux/mfd/wm831x/pdata.h
@@ -0,0 +1,150 @@
+/*
+ * include/linux/mfd/wm831x/pdata.h -- Platform data for WM831x
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_WM831X_PDATA_H__
+#define __MFD_WM831X_PDATA_H__
+
+struct wm831x;
+struct regulator_init_data;
+
+struct wm831x_backlight_pdata {
+ int isink; /** ISINK to use, 1 or 2 */
+ int max_uA; /** Maximum current to allow */
+};
+
+struct wm831x_backup_pdata {
+ int charger_enable;
+ int no_constant_voltage; /** Disable constant voltage charging */
+ int vlim; /** Voltage limit in millivolts */
+ int ilim; /** Current limit in microamps */
+};
+
+struct wm831x_battery_pdata {
+ int enable; /** Enable charging */
+ int fast_enable; /** Enable fast charging */
+ int off_mask; /** Mask OFF while charging */
+ int trickle_ilim; /** Trickle charge current limit, in mA */
+ int vsel; /** Target voltage, in mV */
+ int eoc_iterm; /** End of trickle charge current, in mA */
+ int fast_ilim; /** Fast charge current limit, in mA */
+ int timeout; /** Charge cycle timeout, in minutes */
+};
+
+/**
+ * Configuration for the WM831x DC-DC BuckWise convertors. This
+ * should be passed as driver_data in the regulator_init_data.
+ *
+ * Currently all the configuration is for the fast DVS switching
+ * support of the devices. This allows MFPs on the device to be
+ * configured as an input to switch between two output voltages,
+ * allowing voltage transitions without the expense of an access over
+ * I2C or SPI buses.
+ */
+struct wm831x_buckv_pdata {
+ int dvs_gpio; /** CPU GPIO to use for DVS switching */
+ int dvs_control_src; /** Hardware DVS source to use (1 or 2) */
+ int dvs_init_state; /** DVS state to expect on startup */
+ int dvs_state_gpio; /** CPU GPIO to use for monitoring status */
+};
+
+/* Sources for status LED configuration. Values are register values
+ * plus 1 to allow for a zero default for preserve.
+ */
+enum wm831x_status_src {
+ WM831X_STATUS_PRESERVE = 0, /* Keep the current hardware setting */
+ WM831X_STATUS_OTP = 1,
+ WM831X_STATUS_POWER = 2,
+ WM831X_STATUS_CHARGER = 3,
+ WM831X_STATUS_MANUAL = 4,
+};
+
+struct wm831x_status_pdata {
+ enum wm831x_status_src default_src;
+ const char *name;
+ const char *default_trigger;
+};
+
+struct wm831x_touch_pdata {
+ int fivewire; /** 1 for five wire mode, 0 for 4 wire */
+ int isel; /** Current for pen down (uA) */
+ int rpu; /** Pen down sensitivity resistor divider */
+ int pressure; /** Report pressure (boolean) */
+ unsigned int data_irq; /** Touch data ready IRQ */
+ int data_irqf; /** IRQ flags for data ready IRQ */
+ unsigned int pd_irq; /** Touch pendown detect IRQ */
+ int pd_irqf; /** IRQ flags for pen down IRQ */
+};
+
+enum wm831x_watchdog_action {
+ WM831X_WDOG_NONE = 0,
+ WM831X_WDOG_INTERRUPT = 1,
+ WM831X_WDOG_RESET = 2,
+ WM831X_WDOG_WAKE = 3,
+};
+
+struct wm831x_watchdog_pdata {
+ enum wm831x_watchdog_action primary, secondary;
+ int update_gpio;
+ unsigned int software:1;
+};
+
+#define WM831X_MAX_STATUS 2
+#define WM831X_MAX_DCDC 4
+#define WM831X_MAX_EPE 2
+#define WM831X_MAX_LDO 11
+#define WM831X_MAX_ISINK 2
+
+#define WM831X_GPIO_CONFIGURE 0x10000
+#define WM831X_GPIO_NUM 16
+
+struct wm831x_pdata {
+ /** Used to distinguish multiple WM831x chips */
+ int wm831x_num;
+
+ /** Called before subdevices are set up */
+ int (*pre_init)(struct wm831x *wm831x);
+ /** Called after subdevices are set up */
+ int (*post_init)(struct wm831x *wm831x);
+
+ /** Put the /IRQ line into CMOS mode */
+ bool irq_cmos;
+
+ /** Disable the touchscreen */
+ bool disable_touch;
+
+ /** The driver should initiate a power off sequence during shutdown */
+ bool soft_shutdown;
+
+ int irq_base;
+ int gpio_base;
+ int gpio_defaults[WM831X_GPIO_NUM];
+ struct wm831x_backlight_pdata *backlight;
+ struct wm831x_backup_pdata *backup;
+ struct wm831x_battery_pdata *battery;
+ struct wm831x_touch_pdata *touch;
+ struct wm831x_watchdog_pdata *watchdog;
+
+ /** LED1 = 0 and so on */
+ struct wm831x_status_pdata *status[WM831X_MAX_STATUS];
+ /** DCDC1 = 0 and so on */
+ struct regulator_init_data *dcdc[WM831X_MAX_DCDC];
+ /** EPE1 = 0 and so on */
+ struct regulator_init_data *epe[WM831X_MAX_EPE];
+ /** LDO1 = 0 and so on */
+ struct regulator_init_data *ldo[WM831X_MAX_LDO];
+ /** ISINK1 = 0 and so on*/
+ struct regulator_init_data *isink[WM831X_MAX_ISINK];
+};
+
+#endif
diff --git a/include/linux/mfd/wm831x/pmu.h b/include/linux/mfd/wm831x/pmu.h
new file mode 100644
index 000000000..b18cbb027
--- /dev/null
+++ b/include/linux/mfd/wm831x/pmu.h
@@ -0,0 +1,189 @@
+/*
+ * include/linux/mfd/wm831x/pmu.h -- PMU for WM831x
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_WM831X_PMU_H__
+#define __MFD_WM831X_PMU_H__
+
+/*
+ * R16387 (0x4003) - Power State
+ */
+#define WM831X_CHIP_ON 0x8000 /* CHIP_ON */
+#define WM831X_CHIP_ON_MASK 0x8000 /* CHIP_ON */
+#define WM831X_CHIP_ON_SHIFT 15 /* CHIP_ON */
+#define WM831X_CHIP_ON_WIDTH 1 /* CHIP_ON */
+#define WM831X_CHIP_SLP 0x4000 /* CHIP_SLP */
+#define WM831X_CHIP_SLP_MASK 0x4000 /* CHIP_SLP */
+#define WM831X_CHIP_SLP_SHIFT 14 /* CHIP_SLP */
+#define WM831X_CHIP_SLP_WIDTH 1 /* CHIP_SLP */
+#define WM831X_REF_LP 0x1000 /* REF_LP */
+#define WM831X_REF_LP_MASK 0x1000 /* REF_LP */
+#define WM831X_REF_LP_SHIFT 12 /* REF_LP */
+#define WM831X_REF_LP_WIDTH 1 /* REF_LP */
+#define WM831X_PWRSTATE_DLY_MASK 0x0C00 /* PWRSTATE_DLY - [11:10] */
+#define WM831X_PWRSTATE_DLY_SHIFT 10 /* PWRSTATE_DLY - [11:10] */
+#define WM831X_PWRSTATE_DLY_WIDTH 2 /* PWRSTATE_DLY - [11:10] */
+#define WM831X_SWRST_DLY 0x0200 /* SWRST_DLY */
+#define WM831X_SWRST_DLY_MASK 0x0200 /* SWRST_DLY */
+#define WM831X_SWRST_DLY_SHIFT 9 /* SWRST_DLY */
+#define WM831X_SWRST_DLY_WIDTH 1 /* SWRST_DLY */
+#define WM831X_USB100MA_STARTUP_MASK 0x0030 /* USB100MA_STARTUP - [5:4] */
+#define WM831X_USB100MA_STARTUP_SHIFT 4 /* USB100MA_STARTUP - [5:4] */
+#define WM831X_USB100MA_STARTUP_WIDTH 2 /* USB100MA_STARTUP - [5:4] */
+#define WM831X_USB_CURR_STS 0x0008 /* USB_CURR_STS */
+#define WM831X_USB_CURR_STS_MASK 0x0008 /* USB_CURR_STS */
+#define WM831X_USB_CURR_STS_SHIFT 3 /* USB_CURR_STS */
+#define WM831X_USB_CURR_STS_WIDTH 1 /* USB_CURR_STS */
+#define WM831X_USB_ILIM_MASK 0x0007 /* USB_ILIM - [2:0] */
+#define WM831X_USB_ILIM_SHIFT 0 /* USB_ILIM - [2:0] */
+#define WM831X_USB_ILIM_WIDTH 3 /* USB_ILIM - [2:0] */
+
+/*
+ * R16397 (0x400D) - System Status
+ */
+#define WM831X_THW_STS 0x8000 /* THW_STS */
+#define WM831X_THW_STS_MASK 0x8000 /* THW_STS */
+#define WM831X_THW_STS_SHIFT 15 /* THW_STS */
+#define WM831X_THW_STS_WIDTH 1 /* THW_STS */
+#define WM831X_PWR_SRC_BATT 0x0400 /* PWR_SRC_BATT */
+#define WM831X_PWR_SRC_BATT_MASK 0x0400 /* PWR_SRC_BATT */
+#define WM831X_PWR_SRC_BATT_SHIFT 10 /* PWR_SRC_BATT */
+#define WM831X_PWR_SRC_BATT_WIDTH 1 /* PWR_SRC_BATT */
+#define WM831X_PWR_WALL 0x0200 /* PWR_WALL */
+#define WM831X_PWR_WALL_MASK 0x0200 /* PWR_WALL */
+#define WM831X_PWR_WALL_SHIFT 9 /* PWR_WALL */
+#define WM831X_PWR_WALL_WIDTH 1 /* PWR_WALL */
+#define WM831X_PWR_USB 0x0100 /* PWR_USB */
+#define WM831X_PWR_USB_MASK 0x0100 /* PWR_USB */
+#define WM831X_PWR_USB_SHIFT 8 /* PWR_USB */
+#define WM831X_PWR_USB_WIDTH 1 /* PWR_USB */
+#define WM831X_MAIN_STATE_MASK 0x001F /* MAIN_STATE - [4:0] */
+#define WM831X_MAIN_STATE_SHIFT 0 /* MAIN_STATE - [4:0] */
+#define WM831X_MAIN_STATE_WIDTH 5 /* MAIN_STATE - [4:0] */
+
+/*
+ * R16456 (0x4048) - Charger Control 1
+ */
+#define WM831X_CHG_ENA 0x8000 /* CHG_ENA */
+#define WM831X_CHG_ENA_MASK 0x8000 /* CHG_ENA */
+#define WM831X_CHG_ENA_SHIFT 15 /* CHG_ENA */
+#define WM831X_CHG_ENA_WIDTH 1 /* CHG_ENA */
+#define WM831X_CHG_FRC 0x4000 /* CHG_FRC */
+#define WM831X_CHG_FRC_MASK 0x4000 /* CHG_FRC */
+#define WM831X_CHG_FRC_SHIFT 14 /* CHG_FRC */
+#define WM831X_CHG_FRC_WIDTH 1 /* CHG_FRC */
+#define WM831X_CHG_ITERM_MASK 0x1C00 /* CHG_ITERM - [12:10] */
+#define WM831X_CHG_ITERM_SHIFT 10 /* CHG_ITERM - [12:10] */
+#define WM831X_CHG_ITERM_WIDTH 3 /* CHG_ITERM - [12:10] */
+#define WM831X_CHG_FAST 0x0020 /* CHG_FAST */
+#define WM831X_CHG_FAST_MASK 0x0020 /* CHG_FAST */
+#define WM831X_CHG_FAST_SHIFT 5 /* CHG_FAST */
+#define WM831X_CHG_FAST_WIDTH 1 /* CHG_FAST */
+#define WM831X_CHG_IMON_ENA 0x0002 /* CHG_IMON_ENA */
+#define WM831X_CHG_IMON_ENA_MASK 0x0002 /* CHG_IMON_ENA */
+#define WM831X_CHG_IMON_ENA_SHIFT 1 /* CHG_IMON_ENA */
+#define WM831X_CHG_IMON_ENA_WIDTH 1 /* CHG_IMON_ENA */
+#define WM831X_CHG_CHIP_TEMP_MON 0x0001 /* CHG_CHIP_TEMP_MON */
+#define WM831X_CHG_CHIP_TEMP_MON_MASK 0x0001 /* CHG_CHIP_TEMP_MON */
+#define WM831X_CHG_CHIP_TEMP_MON_SHIFT 0 /* CHG_CHIP_TEMP_MON */
+#define WM831X_CHG_CHIP_TEMP_MON_WIDTH 1 /* CHG_CHIP_TEMP_MON */
+
+/*
+ * R16457 (0x4049) - Charger Control 2
+ */
+#define WM831X_CHG_OFF_MSK 0x4000 /* CHG_OFF_MSK */
+#define WM831X_CHG_OFF_MSK_MASK 0x4000 /* CHG_OFF_MSK */
+#define WM831X_CHG_OFF_MSK_SHIFT 14 /* CHG_OFF_MSK */
+#define WM831X_CHG_OFF_MSK_WIDTH 1 /* CHG_OFF_MSK */
+#define WM831X_CHG_TIME_MASK 0x0F00 /* CHG_TIME - [11:8] */
+#define WM831X_CHG_TIME_SHIFT 8 /* CHG_TIME - [11:8] */
+#define WM831X_CHG_TIME_WIDTH 4 /* CHG_TIME - [11:8] */
+#define WM831X_CHG_TRKL_ILIM_MASK 0x00C0 /* CHG_TRKL_ILIM - [7:6] */
+#define WM831X_CHG_TRKL_ILIM_SHIFT 6 /* CHG_TRKL_ILIM - [7:6] */
+#define WM831X_CHG_TRKL_ILIM_WIDTH 2 /* CHG_TRKL_ILIM - [7:6] */
+#define WM831X_CHG_VSEL_MASK 0x0030 /* CHG_VSEL - [5:4] */
+#define WM831X_CHG_VSEL_SHIFT 4 /* CHG_VSEL - [5:4] */
+#define WM831X_CHG_VSEL_WIDTH 2 /* CHG_VSEL - [5:4] */
+#define WM831X_CHG_FAST_ILIM_MASK 0x000F /* CHG_FAST_ILIM - [3:0] */
+#define WM831X_CHG_FAST_ILIM_SHIFT 0 /* CHG_FAST_ILIM - [3:0] */
+#define WM831X_CHG_FAST_ILIM_WIDTH 4 /* CHG_FAST_ILIM - [3:0] */
+
+/*
+ * R16458 (0x404A) - Charger Status
+ */
+#define WM831X_BATT_OV_STS 0x8000 /* BATT_OV_STS */
+#define WM831X_BATT_OV_STS_MASK 0x8000 /* BATT_OV_STS */
+#define WM831X_BATT_OV_STS_SHIFT 15 /* BATT_OV_STS */
+#define WM831X_BATT_OV_STS_WIDTH 1 /* BATT_OV_STS */
+#define WM831X_CHG_STATE_MASK 0x7000 /* CHG_STATE - [14:12] */
+#define WM831X_CHG_STATE_SHIFT 12 /* CHG_STATE - [14:12] */
+#define WM831X_CHG_STATE_WIDTH 3 /* CHG_STATE - [14:12] */
+#define WM831X_BATT_HOT_STS 0x0800 /* BATT_HOT_STS */
+#define WM831X_BATT_HOT_STS_MASK 0x0800 /* BATT_HOT_STS */
+#define WM831X_BATT_HOT_STS_SHIFT 11 /* BATT_HOT_STS */
+#define WM831X_BATT_HOT_STS_WIDTH 1 /* BATT_HOT_STS */
+#define WM831X_BATT_COLD_STS 0x0400 /* BATT_COLD_STS */
+#define WM831X_BATT_COLD_STS_MASK 0x0400 /* BATT_COLD_STS */
+#define WM831X_BATT_COLD_STS_SHIFT 10 /* BATT_COLD_STS */
+#define WM831X_BATT_COLD_STS_WIDTH 1 /* BATT_COLD_STS */
+#define WM831X_CHG_TOPOFF 0x0200 /* CHG_TOPOFF */
+#define WM831X_CHG_TOPOFF_MASK 0x0200 /* CHG_TOPOFF */
+#define WM831X_CHG_TOPOFF_SHIFT 9 /* CHG_TOPOFF */
+#define WM831X_CHG_TOPOFF_WIDTH 1 /* CHG_TOPOFF */
+#define WM831X_CHG_ACTIVE 0x0100 /* CHG_ACTIVE */
+#define WM831X_CHG_ACTIVE_MASK 0x0100 /* CHG_ACTIVE */
+#define WM831X_CHG_ACTIVE_SHIFT 8 /* CHG_ACTIVE */
+#define WM831X_CHG_ACTIVE_WIDTH 1 /* CHG_ACTIVE */
+#define WM831X_CHG_TIME_ELAPSED_MASK 0x00FF /* CHG_TIME_ELAPSED - [7:0] */
+#define WM831X_CHG_TIME_ELAPSED_SHIFT 0 /* CHG_TIME_ELAPSED - [7:0] */
+#define WM831X_CHG_TIME_ELAPSED_WIDTH 8 /* CHG_TIME_ELAPSED - [7:0] */
+
+#define WM831X_CHG_STATE_OFF (0 << WM831X_CHG_STATE_SHIFT)
+#define WM831X_CHG_STATE_TRICKLE (1 << WM831X_CHG_STATE_SHIFT)
+#define WM831X_CHG_STATE_FAST (2 << WM831X_CHG_STATE_SHIFT)
+#define WM831X_CHG_STATE_TRICKLE_OT (3 << WM831X_CHG_STATE_SHIFT)
+#define WM831X_CHG_STATE_FAST_OT (4 << WM831X_CHG_STATE_SHIFT)
+#define WM831X_CHG_STATE_DEFECTIVE (5 << WM831X_CHG_STATE_SHIFT)
+
+/*
+ * R16459 (0x404B) - Backup Charger Control
+ */
+#define WM831X_BKUP_CHG_ENA 0x8000 /* BKUP_CHG_ENA */
+#define WM831X_BKUP_CHG_ENA_MASK 0x8000 /* BKUP_CHG_ENA */
+#define WM831X_BKUP_CHG_ENA_SHIFT 15 /* BKUP_CHG_ENA */
+#define WM831X_BKUP_CHG_ENA_WIDTH 1 /* BKUP_CHG_ENA */
+#define WM831X_BKUP_CHG_STS 0x4000 /* BKUP_CHG_STS */
+#define WM831X_BKUP_CHG_STS_MASK 0x4000 /* BKUP_CHG_STS */
+#define WM831X_BKUP_CHG_STS_SHIFT 14 /* BKUP_CHG_STS */
+#define WM831X_BKUP_CHG_STS_WIDTH 1 /* BKUP_CHG_STS */
+#define WM831X_BKUP_CHG_MODE 0x1000 /* BKUP_CHG_MODE */
+#define WM831X_BKUP_CHG_MODE_MASK 0x1000 /* BKUP_CHG_MODE */
+#define WM831X_BKUP_CHG_MODE_SHIFT 12 /* BKUP_CHG_MODE */
+#define WM831X_BKUP_CHG_MODE_WIDTH 1 /* BKUP_CHG_MODE */
+#define WM831X_BKUP_BATT_DET_ENA 0x0800 /* BKUP_BATT_DET_ENA */
+#define WM831X_BKUP_BATT_DET_ENA_MASK 0x0800 /* BKUP_BATT_DET_ENA */
+#define WM831X_BKUP_BATT_DET_ENA_SHIFT 11 /* BKUP_BATT_DET_ENA */
+#define WM831X_BKUP_BATT_DET_ENA_WIDTH 1 /* BKUP_BATT_DET_ENA */
+#define WM831X_BKUP_BATT_STS 0x0400 /* BKUP_BATT_STS */
+#define WM831X_BKUP_BATT_STS_MASK 0x0400 /* BKUP_BATT_STS */
+#define WM831X_BKUP_BATT_STS_SHIFT 10 /* BKUP_BATT_STS */
+#define WM831X_BKUP_BATT_STS_WIDTH 1 /* BKUP_BATT_STS */
+#define WM831X_BKUP_CHG_VLIM 0x0010 /* BKUP_CHG_VLIM */
+#define WM831X_BKUP_CHG_VLIM_MASK 0x0010 /* BKUP_CHG_VLIM */
+#define WM831X_BKUP_CHG_VLIM_SHIFT 4 /* BKUP_CHG_VLIM */
+#define WM831X_BKUP_CHG_VLIM_WIDTH 1 /* BKUP_CHG_VLIM */
+#define WM831X_BKUP_CHG_ILIM_MASK 0x0003 /* BKUP_CHG_ILIM - [1:0] */
+#define WM831X_BKUP_CHG_ILIM_SHIFT 0 /* BKUP_CHG_ILIM - [1:0] */
+#define WM831X_BKUP_CHG_ILIM_WIDTH 2 /* BKUP_CHG_ILIM - [1:0] */
+
+#endif
diff --git a/include/linux/mfd/wm831x/regulator.h b/include/linux/mfd/wm831x/regulator.h
new file mode 100644
index 000000000..955d30fc6
--- /dev/null
+++ b/include/linux/mfd/wm831x/regulator.h
@@ -0,0 +1,1218 @@
+/*
+ * linux/mfd/wm831x/regulator.h -- Regulator definitons for wm831x
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_WM831X_REGULATOR_H__
+#define __MFD_WM831X_REGULATOR_H__
+
+/*
+ * R16462 (0x404E) - Current Sink 1
+ */
+#define WM831X_CS1_ENA 0x8000 /* CS1_ENA */
+#define WM831X_CS1_ENA_MASK 0x8000 /* CS1_ENA */
+#define WM831X_CS1_ENA_SHIFT 15 /* CS1_ENA */
+#define WM831X_CS1_ENA_WIDTH 1 /* CS1_ENA */
+#define WM831X_CS1_DRIVE 0x4000 /* CS1_DRIVE */
+#define WM831X_CS1_DRIVE_MASK 0x4000 /* CS1_DRIVE */
+#define WM831X_CS1_DRIVE_SHIFT 14 /* CS1_DRIVE */
+#define WM831X_CS1_DRIVE_WIDTH 1 /* CS1_DRIVE */
+#define WM831X_CS1_SLPENA 0x1000 /* CS1_SLPENA */
+#define WM831X_CS1_SLPENA_MASK 0x1000 /* CS1_SLPENA */
+#define WM831X_CS1_SLPENA_SHIFT 12 /* CS1_SLPENA */
+#define WM831X_CS1_SLPENA_WIDTH 1 /* CS1_SLPENA */
+#define WM831X_CS1_OFF_RAMP_MASK 0x0C00 /* CS1_OFF_RAMP - [11:10] */
+#define WM831X_CS1_OFF_RAMP_SHIFT 10 /* CS1_OFF_RAMP - [11:10] */
+#define WM831X_CS1_OFF_RAMP_WIDTH 2 /* CS1_OFF_RAMP - [11:10] */
+#define WM831X_CS1_ON_RAMP_MASK 0x0300 /* CS1_ON_RAMP - [9:8] */
+#define WM831X_CS1_ON_RAMP_SHIFT 8 /* CS1_ON_RAMP - [9:8] */
+#define WM831X_CS1_ON_RAMP_WIDTH 2 /* CS1_ON_RAMP - [9:8] */
+#define WM831X_CS1_ISEL_MASK 0x003F /* CS1_ISEL - [5:0] */
+#define WM831X_CS1_ISEL_SHIFT 0 /* CS1_ISEL - [5:0] */
+#define WM831X_CS1_ISEL_WIDTH 6 /* CS1_ISEL - [5:0] */
+
+/*
+ * R16463 (0x404F) - Current Sink 2
+ */
+#define WM831X_CS2_ENA 0x8000 /* CS2_ENA */
+#define WM831X_CS2_ENA_MASK 0x8000 /* CS2_ENA */
+#define WM831X_CS2_ENA_SHIFT 15 /* CS2_ENA */
+#define WM831X_CS2_ENA_WIDTH 1 /* CS2_ENA */
+#define WM831X_CS2_DRIVE 0x4000 /* CS2_DRIVE */
+#define WM831X_CS2_DRIVE_MASK 0x4000 /* CS2_DRIVE */
+#define WM831X_CS2_DRIVE_SHIFT 14 /* CS2_DRIVE */
+#define WM831X_CS2_DRIVE_WIDTH 1 /* CS2_DRIVE */
+#define WM831X_CS2_SLPENA 0x1000 /* CS2_SLPENA */
+#define WM831X_CS2_SLPENA_MASK 0x1000 /* CS2_SLPENA */
+#define WM831X_CS2_SLPENA_SHIFT 12 /* CS2_SLPENA */
+#define WM831X_CS2_SLPENA_WIDTH 1 /* CS2_SLPENA */
+#define WM831X_CS2_OFF_RAMP_MASK 0x0C00 /* CS2_OFF_RAMP - [11:10] */
+#define WM831X_CS2_OFF_RAMP_SHIFT 10 /* CS2_OFF_RAMP - [11:10] */
+#define WM831X_CS2_OFF_RAMP_WIDTH 2 /* CS2_OFF_RAMP - [11:10] */
+#define WM831X_CS2_ON_RAMP_MASK 0x0300 /* CS2_ON_RAMP - [9:8] */
+#define WM831X_CS2_ON_RAMP_SHIFT 8 /* CS2_ON_RAMP - [9:8] */
+#define WM831X_CS2_ON_RAMP_WIDTH 2 /* CS2_ON_RAMP - [9:8] */
+#define WM831X_CS2_ISEL_MASK 0x003F /* CS2_ISEL - [5:0] */
+#define WM831X_CS2_ISEL_SHIFT 0 /* CS2_ISEL - [5:0] */
+#define WM831X_CS2_ISEL_WIDTH 6 /* CS2_ISEL - [5:0] */
+
+/*
+ * R16464 (0x4050) - DCDC Enable
+ */
+#define WM831X_EPE2_ENA 0x0080 /* EPE2_ENA */
+#define WM831X_EPE2_ENA_MASK 0x0080 /* EPE2_ENA */
+#define WM831X_EPE2_ENA_SHIFT 7 /* EPE2_ENA */
+#define WM831X_EPE2_ENA_WIDTH 1 /* EPE2_ENA */
+#define WM831X_EPE1_ENA 0x0040 /* EPE1_ENA */
+#define WM831X_EPE1_ENA_MASK 0x0040 /* EPE1_ENA */
+#define WM831X_EPE1_ENA_SHIFT 6 /* EPE1_ENA */
+#define WM831X_EPE1_ENA_WIDTH 1 /* EPE1_ENA */
+#define WM831X_DC4_ENA 0x0008 /* DC4_ENA */
+#define WM831X_DC4_ENA_MASK 0x0008 /* DC4_ENA */
+#define WM831X_DC4_ENA_SHIFT 3 /* DC4_ENA */
+#define WM831X_DC4_ENA_WIDTH 1 /* DC4_ENA */
+#define WM831X_DC3_ENA 0x0004 /* DC3_ENA */
+#define WM831X_DC3_ENA_MASK 0x0004 /* DC3_ENA */
+#define WM831X_DC3_ENA_SHIFT 2 /* DC3_ENA */
+#define WM831X_DC3_ENA_WIDTH 1 /* DC3_ENA */
+#define WM831X_DC2_ENA 0x0002 /* DC2_ENA */
+#define WM831X_DC2_ENA_MASK 0x0002 /* DC2_ENA */
+#define WM831X_DC2_ENA_SHIFT 1 /* DC2_ENA */
+#define WM831X_DC2_ENA_WIDTH 1 /* DC2_ENA */
+#define WM831X_DC1_ENA 0x0001 /* DC1_ENA */
+#define WM831X_DC1_ENA_MASK 0x0001 /* DC1_ENA */
+#define WM831X_DC1_ENA_SHIFT 0 /* DC1_ENA */
+#define WM831X_DC1_ENA_WIDTH 1 /* DC1_ENA */
+
+/*
+ * R16465 (0x4051) - LDO Enable
+ */
+#define WM831X_LDO11_ENA 0x0400 /* LDO11_ENA */
+#define WM831X_LDO11_ENA_MASK 0x0400 /* LDO11_ENA */
+#define WM831X_LDO11_ENA_SHIFT 10 /* LDO11_ENA */
+#define WM831X_LDO11_ENA_WIDTH 1 /* LDO11_ENA */
+#define WM831X_LDO10_ENA 0x0200 /* LDO10_ENA */
+#define WM831X_LDO10_ENA_MASK 0x0200 /* LDO10_ENA */
+#define WM831X_LDO10_ENA_SHIFT 9 /* LDO10_ENA */
+#define WM831X_LDO10_ENA_WIDTH 1 /* LDO10_ENA */
+#define WM831X_LDO9_ENA 0x0100 /* LDO9_ENA */
+#define WM831X_LDO9_ENA_MASK 0x0100 /* LDO9_ENA */
+#define WM831X_LDO9_ENA_SHIFT 8 /* LDO9_ENA */
+#define WM831X_LDO9_ENA_WIDTH 1 /* LDO9_ENA */
+#define WM831X_LDO8_ENA 0x0080 /* LDO8_ENA */
+#define WM831X_LDO8_ENA_MASK 0x0080 /* LDO8_ENA */
+#define WM831X_LDO8_ENA_SHIFT 7 /* LDO8_ENA */
+#define WM831X_LDO8_ENA_WIDTH 1 /* LDO8_ENA */
+#define WM831X_LDO7_ENA 0x0040 /* LDO7_ENA */
+#define WM831X_LDO7_ENA_MASK 0x0040 /* LDO7_ENA */
+#define WM831X_LDO7_ENA_SHIFT 6 /* LDO7_ENA */
+#define WM831X_LDO7_ENA_WIDTH 1 /* LDO7_ENA */
+#define WM831X_LDO6_ENA 0x0020 /* LDO6_ENA */
+#define WM831X_LDO6_ENA_MASK 0x0020 /* LDO6_ENA */
+#define WM831X_LDO6_ENA_SHIFT 5 /* LDO6_ENA */
+#define WM831X_LDO6_ENA_WIDTH 1 /* LDO6_ENA */
+#define WM831X_LDO5_ENA 0x0010 /* LDO5_ENA */
+#define WM831X_LDO5_ENA_MASK 0x0010 /* LDO5_ENA */
+#define WM831X_LDO5_ENA_SHIFT 4 /* LDO5_ENA */
+#define WM831X_LDO5_ENA_WIDTH 1 /* LDO5_ENA */
+#define WM831X_LDO4_ENA 0x0008 /* LDO4_ENA */
+#define WM831X_LDO4_ENA_MASK 0x0008 /* LDO4_ENA */
+#define WM831X_LDO4_ENA_SHIFT 3 /* LDO4_ENA */
+#define WM831X_LDO4_ENA_WIDTH 1 /* LDO4_ENA */
+#define WM831X_LDO3_ENA 0x0004 /* LDO3_ENA */
+#define WM831X_LDO3_ENA_MASK 0x0004 /* LDO3_ENA */
+#define WM831X_LDO3_ENA_SHIFT 2 /* LDO3_ENA */
+#define WM831X_LDO3_ENA_WIDTH 1 /* LDO3_ENA */
+#define WM831X_LDO2_ENA 0x0002 /* LDO2_ENA */
+#define WM831X_LDO2_ENA_MASK 0x0002 /* LDO2_ENA */
+#define WM831X_LDO2_ENA_SHIFT 1 /* LDO2_ENA */
+#define WM831X_LDO2_ENA_WIDTH 1 /* LDO2_ENA */
+#define WM831X_LDO1_ENA 0x0001 /* LDO1_ENA */
+#define WM831X_LDO1_ENA_MASK 0x0001 /* LDO1_ENA */
+#define WM831X_LDO1_ENA_SHIFT 0 /* LDO1_ENA */
+#define WM831X_LDO1_ENA_WIDTH 1 /* LDO1_ENA */
+
+/*
+ * R16466 (0x4052) - DCDC Status
+ */
+#define WM831X_EPE2_STS 0x0080 /* EPE2_STS */
+#define WM831X_EPE2_STS_MASK 0x0080 /* EPE2_STS */
+#define WM831X_EPE2_STS_SHIFT 7 /* EPE2_STS */
+#define WM831X_EPE2_STS_WIDTH 1 /* EPE2_STS */
+#define WM831X_EPE1_STS 0x0040 /* EPE1_STS */
+#define WM831X_EPE1_STS_MASK 0x0040 /* EPE1_STS */
+#define WM831X_EPE1_STS_SHIFT 6 /* EPE1_STS */
+#define WM831X_EPE1_STS_WIDTH 1 /* EPE1_STS */
+#define WM831X_DC4_STS 0x0008 /* DC4_STS */
+#define WM831X_DC4_STS_MASK 0x0008 /* DC4_STS */
+#define WM831X_DC4_STS_SHIFT 3 /* DC4_STS */
+#define WM831X_DC4_STS_WIDTH 1 /* DC4_STS */
+#define WM831X_DC3_STS 0x0004 /* DC3_STS */
+#define WM831X_DC3_STS_MASK 0x0004 /* DC3_STS */
+#define WM831X_DC3_STS_SHIFT 2 /* DC3_STS */
+#define WM831X_DC3_STS_WIDTH 1 /* DC3_STS */
+#define WM831X_DC2_STS 0x0002 /* DC2_STS */
+#define WM831X_DC2_STS_MASK 0x0002 /* DC2_STS */
+#define WM831X_DC2_STS_SHIFT 1 /* DC2_STS */
+#define WM831X_DC2_STS_WIDTH 1 /* DC2_STS */
+#define WM831X_DC1_STS 0x0001 /* DC1_STS */
+#define WM831X_DC1_STS_MASK 0x0001 /* DC1_STS */
+#define WM831X_DC1_STS_SHIFT 0 /* DC1_STS */
+#define WM831X_DC1_STS_WIDTH 1 /* DC1_STS */
+
+/*
+ * R16467 (0x4053) - LDO Status
+ */
+#define WM831X_LDO11_STS 0x0400 /* LDO11_STS */
+#define WM831X_LDO11_STS_MASK 0x0400 /* LDO11_STS */
+#define WM831X_LDO11_STS_SHIFT 10 /* LDO11_STS */
+#define WM831X_LDO11_STS_WIDTH 1 /* LDO11_STS */
+#define WM831X_LDO10_STS 0x0200 /* LDO10_STS */
+#define WM831X_LDO10_STS_MASK 0x0200 /* LDO10_STS */
+#define WM831X_LDO10_STS_SHIFT 9 /* LDO10_STS */
+#define WM831X_LDO10_STS_WIDTH 1 /* LDO10_STS */
+#define WM831X_LDO9_STS 0x0100 /* LDO9_STS */
+#define WM831X_LDO9_STS_MASK 0x0100 /* LDO9_STS */
+#define WM831X_LDO9_STS_SHIFT 8 /* LDO9_STS */
+#define WM831X_LDO9_STS_WIDTH 1 /* LDO9_STS */
+#define WM831X_LDO8_STS 0x0080 /* LDO8_STS */
+#define WM831X_LDO8_STS_MASK 0x0080 /* LDO8_STS */
+#define WM831X_LDO8_STS_SHIFT 7 /* LDO8_STS */
+#define WM831X_LDO8_STS_WIDTH 1 /* LDO8_STS */
+#define WM831X_LDO7_STS 0x0040 /* LDO7_STS */
+#define WM831X_LDO7_STS_MASK 0x0040 /* LDO7_STS */
+#define WM831X_LDO7_STS_SHIFT 6 /* LDO7_STS */
+#define WM831X_LDO7_STS_WIDTH 1 /* LDO7_STS */
+#define WM831X_LDO6_STS 0x0020 /* LDO6_STS */
+#define WM831X_LDO6_STS_MASK 0x0020 /* LDO6_STS */
+#define WM831X_LDO6_STS_SHIFT 5 /* LDO6_STS */
+#define WM831X_LDO6_STS_WIDTH 1 /* LDO6_STS */
+#define WM831X_LDO5_STS 0x0010 /* LDO5_STS */
+#define WM831X_LDO5_STS_MASK 0x0010 /* LDO5_STS */
+#define WM831X_LDO5_STS_SHIFT 4 /* LDO5_STS */
+#define WM831X_LDO5_STS_WIDTH 1 /* LDO5_STS */
+#define WM831X_LDO4_STS 0x0008 /* LDO4_STS */
+#define WM831X_LDO4_STS_MASK 0x0008 /* LDO4_STS */
+#define WM831X_LDO4_STS_SHIFT 3 /* LDO4_STS */
+#define WM831X_LDO4_STS_WIDTH 1 /* LDO4_STS */
+#define WM831X_LDO3_STS 0x0004 /* LDO3_STS */
+#define WM831X_LDO3_STS_MASK 0x0004 /* LDO3_STS */
+#define WM831X_LDO3_STS_SHIFT 2 /* LDO3_STS */
+#define WM831X_LDO3_STS_WIDTH 1 /* LDO3_STS */
+#define WM831X_LDO2_STS 0x0002 /* LDO2_STS */
+#define WM831X_LDO2_STS_MASK 0x0002 /* LDO2_STS */
+#define WM831X_LDO2_STS_SHIFT 1 /* LDO2_STS */
+#define WM831X_LDO2_STS_WIDTH 1 /* LDO2_STS */
+#define WM831X_LDO1_STS 0x0001 /* LDO1_STS */
+#define WM831X_LDO1_STS_MASK 0x0001 /* LDO1_STS */
+#define WM831X_LDO1_STS_SHIFT 0 /* LDO1_STS */
+#define WM831X_LDO1_STS_WIDTH 1 /* LDO1_STS */
+
+/*
+ * R16468 (0x4054) - DCDC UV Status
+ */
+#define WM831X_DC2_OV_STS 0x2000 /* DC2_OV_STS */
+#define WM831X_DC2_OV_STS_MASK 0x2000 /* DC2_OV_STS */
+#define WM831X_DC2_OV_STS_SHIFT 13 /* DC2_OV_STS */
+#define WM831X_DC2_OV_STS_WIDTH 1 /* DC2_OV_STS */
+#define WM831X_DC1_OV_STS 0x1000 /* DC1_OV_STS */
+#define WM831X_DC1_OV_STS_MASK 0x1000 /* DC1_OV_STS */
+#define WM831X_DC1_OV_STS_SHIFT 12 /* DC1_OV_STS */
+#define WM831X_DC1_OV_STS_WIDTH 1 /* DC1_OV_STS */
+#define WM831X_DC2_HC_STS 0x0200 /* DC2_HC_STS */
+#define WM831X_DC2_HC_STS_MASK 0x0200 /* DC2_HC_STS */
+#define WM831X_DC2_HC_STS_SHIFT 9 /* DC2_HC_STS */
+#define WM831X_DC2_HC_STS_WIDTH 1 /* DC2_HC_STS */
+#define WM831X_DC1_HC_STS 0x0100 /* DC1_HC_STS */
+#define WM831X_DC1_HC_STS_MASK 0x0100 /* DC1_HC_STS */
+#define WM831X_DC1_HC_STS_SHIFT 8 /* DC1_HC_STS */
+#define WM831X_DC1_HC_STS_WIDTH 1 /* DC1_HC_STS */
+#define WM831X_DC4_UV_STS 0x0008 /* DC4_UV_STS */
+#define WM831X_DC4_UV_STS_MASK 0x0008 /* DC4_UV_STS */
+#define WM831X_DC4_UV_STS_SHIFT 3 /* DC4_UV_STS */
+#define WM831X_DC4_UV_STS_WIDTH 1 /* DC4_UV_STS */
+#define WM831X_DC3_UV_STS 0x0004 /* DC3_UV_STS */
+#define WM831X_DC3_UV_STS_MASK 0x0004 /* DC3_UV_STS */
+#define WM831X_DC3_UV_STS_SHIFT 2 /* DC3_UV_STS */
+#define WM831X_DC3_UV_STS_WIDTH 1 /* DC3_UV_STS */
+#define WM831X_DC2_UV_STS 0x0002 /* DC2_UV_STS */
+#define WM831X_DC2_UV_STS_MASK 0x0002 /* DC2_UV_STS */
+#define WM831X_DC2_UV_STS_SHIFT 1 /* DC2_UV_STS */
+#define WM831X_DC2_UV_STS_WIDTH 1 /* DC2_UV_STS */
+#define WM831X_DC1_UV_STS 0x0001 /* DC1_UV_STS */
+#define WM831X_DC1_UV_STS_MASK 0x0001 /* DC1_UV_STS */
+#define WM831X_DC1_UV_STS_SHIFT 0 /* DC1_UV_STS */
+#define WM831X_DC1_UV_STS_WIDTH 1 /* DC1_UV_STS */
+
+/*
+ * R16469 (0x4055) - LDO UV Status
+ */
+#define WM831X_INTLDO_UV_STS 0x8000 /* INTLDO_UV_STS */
+#define WM831X_INTLDO_UV_STS_MASK 0x8000 /* INTLDO_UV_STS */
+#define WM831X_INTLDO_UV_STS_SHIFT 15 /* INTLDO_UV_STS */
+#define WM831X_INTLDO_UV_STS_WIDTH 1 /* INTLDO_UV_STS */
+#define WM831X_LDO10_UV_STS 0x0200 /* LDO10_UV_STS */
+#define WM831X_LDO10_UV_STS_MASK 0x0200 /* LDO10_UV_STS */
+#define WM831X_LDO10_UV_STS_SHIFT 9 /* LDO10_UV_STS */
+#define WM831X_LDO10_UV_STS_WIDTH 1 /* LDO10_UV_STS */
+#define WM831X_LDO9_UV_STS 0x0100 /* LDO9_UV_STS */
+#define WM831X_LDO9_UV_STS_MASK 0x0100 /* LDO9_UV_STS */
+#define WM831X_LDO9_UV_STS_SHIFT 8 /* LDO9_UV_STS */
+#define WM831X_LDO9_UV_STS_WIDTH 1 /* LDO9_UV_STS */
+#define WM831X_LDO8_UV_STS 0x0080 /* LDO8_UV_STS */
+#define WM831X_LDO8_UV_STS_MASK 0x0080 /* LDO8_UV_STS */
+#define WM831X_LDO8_UV_STS_SHIFT 7 /* LDO8_UV_STS */
+#define WM831X_LDO8_UV_STS_WIDTH 1 /* LDO8_UV_STS */
+#define WM831X_LDO7_UV_STS 0x0040 /* LDO7_UV_STS */
+#define WM831X_LDO7_UV_STS_MASK 0x0040 /* LDO7_UV_STS */
+#define WM831X_LDO7_UV_STS_SHIFT 6 /* LDO7_UV_STS */
+#define WM831X_LDO7_UV_STS_WIDTH 1 /* LDO7_UV_STS */
+#define WM831X_LDO6_UV_STS 0x0020 /* LDO6_UV_STS */
+#define WM831X_LDO6_UV_STS_MASK 0x0020 /* LDO6_UV_STS */
+#define WM831X_LDO6_UV_STS_SHIFT 5 /* LDO6_UV_STS */
+#define WM831X_LDO6_UV_STS_WIDTH 1 /* LDO6_UV_STS */
+#define WM831X_LDO5_UV_STS 0x0010 /* LDO5_UV_STS */
+#define WM831X_LDO5_UV_STS_MASK 0x0010 /* LDO5_UV_STS */
+#define WM831X_LDO5_UV_STS_SHIFT 4 /* LDO5_UV_STS */
+#define WM831X_LDO5_UV_STS_WIDTH 1 /* LDO5_UV_STS */
+#define WM831X_LDO4_UV_STS 0x0008 /* LDO4_UV_STS */
+#define WM831X_LDO4_UV_STS_MASK 0x0008 /* LDO4_UV_STS */
+#define WM831X_LDO4_UV_STS_SHIFT 3 /* LDO4_UV_STS */
+#define WM831X_LDO4_UV_STS_WIDTH 1 /* LDO4_UV_STS */
+#define WM831X_LDO3_UV_STS 0x0004 /* LDO3_UV_STS */
+#define WM831X_LDO3_UV_STS_MASK 0x0004 /* LDO3_UV_STS */
+#define WM831X_LDO3_UV_STS_SHIFT 2 /* LDO3_UV_STS */
+#define WM831X_LDO3_UV_STS_WIDTH 1 /* LDO3_UV_STS */
+#define WM831X_LDO2_UV_STS 0x0002 /* LDO2_UV_STS */
+#define WM831X_LDO2_UV_STS_MASK 0x0002 /* LDO2_UV_STS */
+#define WM831X_LDO2_UV_STS_SHIFT 1 /* LDO2_UV_STS */
+#define WM831X_LDO2_UV_STS_WIDTH 1 /* LDO2_UV_STS */
+#define WM831X_LDO1_UV_STS 0x0001 /* LDO1_UV_STS */
+#define WM831X_LDO1_UV_STS_MASK 0x0001 /* LDO1_UV_STS */
+#define WM831X_LDO1_UV_STS_SHIFT 0 /* LDO1_UV_STS */
+#define WM831X_LDO1_UV_STS_WIDTH 1 /* LDO1_UV_STS */
+
+/*
+ * R16470 (0x4056) - DC1 Control 1
+ */
+#define WM831X_DC1_RATE_MASK 0xC000 /* DC1_RATE - [15:14] */
+#define WM831X_DC1_RATE_SHIFT 14 /* DC1_RATE - [15:14] */
+#define WM831X_DC1_RATE_WIDTH 2 /* DC1_RATE - [15:14] */
+#define WM831X_DC1_PHASE 0x1000 /* DC1_PHASE */
+#define WM831X_DC1_PHASE_MASK 0x1000 /* DC1_PHASE */
+#define WM831X_DC1_PHASE_SHIFT 12 /* DC1_PHASE */
+#define WM831X_DC1_PHASE_WIDTH 1 /* DC1_PHASE */
+#define WM831X_DC1_FREQ_MASK 0x0300 /* DC1_FREQ - [9:8] */
+#define WM831X_DC1_FREQ_SHIFT 8 /* DC1_FREQ - [9:8] */
+#define WM831X_DC1_FREQ_WIDTH 2 /* DC1_FREQ - [9:8] */
+#define WM831X_DC1_FLT 0x0080 /* DC1_FLT */
+#define WM831X_DC1_FLT_MASK 0x0080 /* DC1_FLT */
+#define WM831X_DC1_FLT_SHIFT 7 /* DC1_FLT */
+#define WM831X_DC1_FLT_WIDTH 1 /* DC1_FLT */
+#define WM831X_DC1_SOFT_START_MASK 0x0030 /* DC1_SOFT_START - [5:4] */
+#define WM831X_DC1_SOFT_START_SHIFT 4 /* DC1_SOFT_START - [5:4] */
+#define WM831X_DC1_SOFT_START_WIDTH 2 /* DC1_SOFT_START - [5:4] */
+#define WM831X_DC1_CAP_MASK 0x0003 /* DC1_CAP - [1:0] */
+#define WM831X_DC1_CAP_SHIFT 0 /* DC1_CAP - [1:0] */
+#define WM831X_DC1_CAP_WIDTH 2 /* DC1_CAP - [1:0] */
+
+/*
+ * R16471 (0x4057) - DC1 Control 2
+ */
+#define WM831X_DC1_ERR_ACT_MASK 0xC000 /* DC1_ERR_ACT - [15:14] */
+#define WM831X_DC1_ERR_ACT_SHIFT 14 /* DC1_ERR_ACT - [15:14] */
+#define WM831X_DC1_ERR_ACT_WIDTH 2 /* DC1_ERR_ACT - [15:14] */
+#define WM831X_DC1_HWC_SRC_MASK 0x1800 /* DC1_HWC_SRC - [12:11] */
+#define WM831X_DC1_HWC_SRC_SHIFT 11 /* DC1_HWC_SRC - [12:11] */
+#define WM831X_DC1_HWC_SRC_WIDTH 2 /* DC1_HWC_SRC - [12:11] */
+#define WM831X_DC1_HWC_VSEL 0x0400 /* DC1_HWC_VSEL */
+#define WM831X_DC1_HWC_VSEL_MASK 0x0400 /* DC1_HWC_VSEL */
+#define WM831X_DC1_HWC_VSEL_SHIFT 10 /* DC1_HWC_VSEL */
+#define WM831X_DC1_HWC_VSEL_WIDTH 1 /* DC1_HWC_VSEL */
+#define WM831X_DC1_HWC_MODE_MASK 0x0300 /* DC1_HWC_MODE - [9:8] */
+#define WM831X_DC1_HWC_MODE_SHIFT 8 /* DC1_HWC_MODE - [9:8] */
+#define WM831X_DC1_HWC_MODE_WIDTH 2 /* DC1_HWC_MODE - [9:8] */
+#define WM831X_DC1_HC_THR_MASK 0x0070 /* DC1_HC_THR - [6:4] */
+#define WM831X_DC1_HC_THR_SHIFT 4 /* DC1_HC_THR - [6:4] */
+#define WM831X_DC1_HC_THR_WIDTH 3 /* DC1_HC_THR - [6:4] */
+#define WM831X_DC1_HC_IND_ENA 0x0001 /* DC1_HC_IND_ENA */
+#define WM831X_DC1_HC_IND_ENA_MASK 0x0001 /* DC1_HC_IND_ENA */
+#define WM831X_DC1_HC_IND_ENA_SHIFT 0 /* DC1_HC_IND_ENA */
+#define WM831X_DC1_HC_IND_ENA_WIDTH 1 /* DC1_HC_IND_ENA */
+
+/*
+ * R16472 (0x4058) - DC1 ON Config
+ */
+#define WM831X_DC1_ON_SLOT_MASK 0xE000 /* DC1_ON_SLOT - [15:13] */
+#define WM831X_DC1_ON_SLOT_SHIFT 13 /* DC1_ON_SLOT - [15:13] */
+#define WM831X_DC1_ON_SLOT_WIDTH 3 /* DC1_ON_SLOT - [15:13] */
+#define WM831X_DC1_ON_MODE_MASK 0x0300 /* DC1_ON_MODE - [9:8] */
+#define WM831X_DC1_ON_MODE_SHIFT 8 /* DC1_ON_MODE - [9:8] */
+#define WM831X_DC1_ON_MODE_WIDTH 2 /* DC1_ON_MODE - [9:8] */
+#define WM831X_DC1_ON_VSEL_MASK 0x007F /* DC1_ON_VSEL - [6:0] */
+#define WM831X_DC1_ON_VSEL_SHIFT 0 /* DC1_ON_VSEL - [6:0] */
+#define WM831X_DC1_ON_VSEL_WIDTH 7 /* DC1_ON_VSEL - [6:0] */
+
+/*
+ * R16473 (0x4059) - DC1 SLEEP Control
+ */
+#define WM831X_DC1_SLP_SLOT_MASK 0xE000 /* DC1_SLP_SLOT - [15:13] */
+#define WM831X_DC1_SLP_SLOT_SHIFT 13 /* DC1_SLP_SLOT - [15:13] */
+#define WM831X_DC1_SLP_SLOT_WIDTH 3 /* DC1_SLP_SLOT - [15:13] */
+#define WM831X_DC1_SLP_MODE_MASK 0x0300 /* DC1_SLP_MODE - [9:8] */
+#define WM831X_DC1_SLP_MODE_SHIFT 8 /* DC1_SLP_MODE - [9:8] */
+#define WM831X_DC1_SLP_MODE_WIDTH 2 /* DC1_SLP_MODE - [9:8] */
+#define WM831X_DC1_SLP_VSEL_MASK 0x007F /* DC1_SLP_VSEL - [6:0] */
+#define WM831X_DC1_SLP_VSEL_SHIFT 0 /* DC1_SLP_VSEL - [6:0] */
+#define WM831X_DC1_SLP_VSEL_WIDTH 7 /* DC1_SLP_VSEL - [6:0] */
+
+/*
+ * R16474 (0x405A) - DC1 DVS Control
+ */
+#define WM831X_DC1_DVS_SRC_MASK 0x1800 /* DC1_DVS_SRC - [12:11] */
+#define WM831X_DC1_DVS_SRC_SHIFT 11 /* DC1_DVS_SRC - [12:11] */
+#define WM831X_DC1_DVS_SRC_WIDTH 2 /* DC1_DVS_SRC - [12:11] */
+#define WM831X_DC1_DVS_VSEL_MASK 0x007F /* DC1_DVS_VSEL - [6:0] */
+#define WM831X_DC1_DVS_VSEL_SHIFT 0 /* DC1_DVS_VSEL - [6:0] */
+#define WM831X_DC1_DVS_VSEL_WIDTH 7 /* DC1_DVS_VSEL - [6:0] */
+
+/*
+ * R16475 (0x405B) - DC2 Control 1
+ */
+#define WM831X_DC2_RATE_MASK 0xC000 /* DC2_RATE - [15:14] */
+#define WM831X_DC2_RATE_SHIFT 14 /* DC2_RATE - [15:14] */
+#define WM831X_DC2_RATE_WIDTH 2 /* DC2_RATE - [15:14] */
+#define WM831X_DC2_PHASE 0x1000 /* DC2_PHASE */
+#define WM831X_DC2_PHASE_MASK 0x1000 /* DC2_PHASE */
+#define WM831X_DC2_PHASE_SHIFT 12 /* DC2_PHASE */
+#define WM831X_DC2_PHASE_WIDTH 1 /* DC2_PHASE */
+#define WM831X_DC2_FREQ_MASK 0x0300 /* DC2_FREQ - [9:8] */
+#define WM831X_DC2_FREQ_SHIFT 8 /* DC2_FREQ - [9:8] */
+#define WM831X_DC2_FREQ_WIDTH 2 /* DC2_FREQ - [9:8] */
+#define WM831X_DC2_FLT 0x0080 /* DC2_FLT */
+#define WM831X_DC2_FLT_MASK 0x0080 /* DC2_FLT */
+#define WM831X_DC2_FLT_SHIFT 7 /* DC2_FLT */
+#define WM831X_DC2_FLT_WIDTH 1 /* DC2_FLT */
+#define WM831X_DC2_SOFT_START_MASK 0x0030 /* DC2_SOFT_START - [5:4] */
+#define WM831X_DC2_SOFT_START_SHIFT 4 /* DC2_SOFT_START - [5:4] */
+#define WM831X_DC2_SOFT_START_WIDTH 2 /* DC2_SOFT_START - [5:4] */
+#define WM831X_DC2_CAP_MASK 0x0003 /* DC2_CAP - [1:0] */
+#define WM831X_DC2_CAP_SHIFT 0 /* DC2_CAP - [1:0] */
+#define WM831X_DC2_CAP_WIDTH 2 /* DC2_CAP - [1:0] */
+
+/*
+ * R16476 (0x405C) - DC2 Control 2
+ */
+#define WM831X_DC2_ERR_ACT_MASK 0xC000 /* DC2_ERR_ACT - [15:14] */
+#define WM831X_DC2_ERR_ACT_SHIFT 14 /* DC2_ERR_ACT - [15:14] */
+#define WM831X_DC2_ERR_ACT_WIDTH 2 /* DC2_ERR_ACT - [15:14] */
+#define WM831X_DC2_HWC_SRC_MASK 0x1800 /* DC2_HWC_SRC - [12:11] */
+#define WM831X_DC2_HWC_SRC_SHIFT 11 /* DC2_HWC_SRC - [12:11] */
+#define WM831X_DC2_HWC_SRC_WIDTH 2 /* DC2_HWC_SRC - [12:11] */
+#define WM831X_DC2_HWC_VSEL 0x0400 /* DC2_HWC_VSEL */
+#define WM831X_DC2_HWC_VSEL_MASK 0x0400 /* DC2_HWC_VSEL */
+#define WM831X_DC2_HWC_VSEL_SHIFT 10 /* DC2_HWC_VSEL */
+#define WM831X_DC2_HWC_VSEL_WIDTH 1 /* DC2_HWC_VSEL */
+#define WM831X_DC2_HWC_MODE_MASK 0x0300 /* DC2_HWC_MODE - [9:8] */
+#define WM831X_DC2_HWC_MODE_SHIFT 8 /* DC2_HWC_MODE - [9:8] */
+#define WM831X_DC2_HWC_MODE_WIDTH 2 /* DC2_HWC_MODE - [9:8] */
+#define WM831X_DC2_HC_THR_MASK 0x0070 /* DC2_HC_THR - [6:4] */
+#define WM831X_DC2_HC_THR_SHIFT 4 /* DC2_HC_THR - [6:4] */
+#define WM831X_DC2_HC_THR_WIDTH 3 /* DC2_HC_THR - [6:4] */
+#define WM831X_DC2_HC_IND_ENA 0x0001 /* DC2_HC_IND_ENA */
+#define WM831X_DC2_HC_IND_ENA_MASK 0x0001 /* DC2_HC_IND_ENA */
+#define WM831X_DC2_HC_IND_ENA_SHIFT 0 /* DC2_HC_IND_ENA */
+#define WM831X_DC2_HC_IND_ENA_WIDTH 1 /* DC2_HC_IND_ENA */
+
+/*
+ * R16477 (0x405D) - DC2 ON Config
+ */
+#define WM831X_DC2_ON_SLOT_MASK 0xE000 /* DC2_ON_SLOT - [15:13] */
+#define WM831X_DC2_ON_SLOT_SHIFT 13 /* DC2_ON_SLOT - [15:13] */
+#define WM831X_DC2_ON_SLOT_WIDTH 3 /* DC2_ON_SLOT - [15:13] */
+#define WM831X_DC2_ON_MODE_MASK 0x0300 /* DC2_ON_MODE - [9:8] */
+#define WM831X_DC2_ON_MODE_SHIFT 8 /* DC2_ON_MODE - [9:8] */
+#define WM831X_DC2_ON_MODE_WIDTH 2 /* DC2_ON_MODE - [9:8] */
+#define WM831X_DC2_ON_VSEL_MASK 0x007F /* DC2_ON_VSEL - [6:0] */
+#define WM831X_DC2_ON_VSEL_SHIFT 0 /* DC2_ON_VSEL - [6:0] */
+#define WM831X_DC2_ON_VSEL_WIDTH 7 /* DC2_ON_VSEL - [6:0] */
+
+/*
+ * R16478 (0x405E) - DC2 SLEEP Control
+ */
+#define WM831X_DC2_SLP_SLOT_MASK 0xE000 /* DC2_SLP_SLOT - [15:13] */
+#define WM831X_DC2_SLP_SLOT_SHIFT 13 /* DC2_SLP_SLOT - [15:13] */
+#define WM831X_DC2_SLP_SLOT_WIDTH 3 /* DC2_SLP_SLOT - [15:13] */
+#define WM831X_DC2_SLP_MODE_MASK 0x0300 /* DC2_SLP_MODE - [9:8] */
+#define WM831X_DC2_SLP_MODE_SHIFT 8 /* DC2_SLP_MODE - [9:8] */
+#define WM831X_DC2_SLP_MODE_WIDTH 2 /* DC2_SLP_MODE - [9:8] */
+#define WM831X_DC2_SLP_VSEL_MASK 0x007F /* DC2_SLP_VSEL - [6:0] */
+#define WM831X_DC2_SLP_VSEL_SHIFT 0 /* DC2_SLP_VSEL - [6:0] */
+#define WM831X_DC2_SLP_VSEL_WIDTH 7 /* DC2_SLP_VSEL - [6:0] */
+
+/*
+ * R16479 (0x405F) - DC2 DVS Control
+ */
+#define WM831X_DC2_DVS_SRC_MASK 0x1800 /* DC2_DVS_SRC - [12:11] */
+#define WM831X_DC2_DVS_SRC_SHIFT 11 /* DC2_DVS_SRC - [12:11] */
+#define WM831X_DC2_DVS_SRC_WIDTH 2 /* DC2_DVS_SRC - [12:11] */
+#define WM831X_DC2_DVS_VSEL_MASK 0x007F /* DC2_DVS_VSEL - [6:0] */
+#define WM831X_DC2_DVS_VSEL_SHIFT 0 /* DC2_DVS_VSEL - [6:0] */
+#define WM831X_DC2_DVS_VSEL_WIDTH 7 /* DC2_DVS_VSEL - [6:0] */
+
+/*
+ * R16480 (0x4060) - DC3 Control 1
+ */
+#define WM831X_DC3_PHASE 0x1000 /* DC3_PHASE */
+#define WM831X_DC3_PHASE_MASK 0x1000 /* DC3_PHASE */
+#define WM831X_DC3_PHASE_SHIFT 12 /* DC3_PHASE */
+#define WM831X_DC3_PHASE_WIDTH 1 /* DC3_PHASE */
+#define WM831X_DC3_FLT 0x0080 /* DC3_FLT */
+#define WM831X_DC3_FLT_MASK 0x0080 /* DC3_FLT */
+#define WM831X_DC3_FLT_SHIFT 7 /* DC3_FLT */
+#define WM831X_DC3_FLT_WIDTH 1 /* DC3_FLT */
+#define WM831X_DC3_SOFT_START_MASK 0x0030 /* DC3_SOFT_START - [5:4] */
+#define WM831X_DC3_SOFT_START_SHIFT 4 /* DC3_SOFT_START - [5:4] */
+#define WM831X_DC3_SOFT_START_WIDTH 2 /* DC3_SOFT_START - [5:4] */
+#define WM831X_DC3_STNBY_LIM_MASK 0x000C /* DC3_STNBY_LIM - [3:2] */
+#define WM831X_DC3_STNBY_LIM_SHIFT 2 /* DC3_STNBY_LIM - [3:2] */
+#define WM831X_DC3_STNBY_LIM_WIDTH 2 /* DC3_STNBY_LIM - [3:2] */
+#define WM831X_DC3_CAP_MASK 0x0003 /* DC3_CAP - [1:0] */
+#define WM831X_DC3_CAP_SHIFT 0 /* DC3_CAP - [1:0] */
+#define WM831X_DC3_CAP_WIDTH 2 /* DC3_CAP - [1:0] */
+
+/*
+ * R16481 (0x4061) - DC3 Control 2
+ */
+#define WM831X_DC3_ERR_ACT_MASK 0xC000 /* DC3_ERR_ACT - [15:14] */
+#define WM831X_DC3_ERR_ACT_SHIFT 14 /* DC3_ERR_ACT - [15:14] */
+#define WM831X_DC3_ERR_ACT_WIDTH 2 /* DC3_ERR_ACT - [15:14] */
+#define WM831X_DC3_HWC_SRC_MASK 0x1800 /* DC3_HWC_SRC - [12:11] */
+#define WM831X_DC3_HWC_SRC_SHIFT 11 /* DC3_HWC_SRC - [12:11] */
+#define WM831X_DC3_HWC_SRC_WIDTH 2 /* DC3_HWC_SRC - [12:11] */
+#define WM831X_DC3_HWC_VSEL 0x0400 /* DC3_HWC_VSEL */
+#define WM831X_DC3_HWC_VSEL_MASK 0x0400 /* DC3_HWC_VSEL */
+#define WM831X_DC3_HWC_VSEL_SHIFT 10 /* DC3_HWC_VSEL */
+#define WM831X_DC3_HWC_VSEL_WIDTH 1 /* DC3_HWC_VSEL */
+#define WM831X_DC3_HWC_MODE_MASK 0x0300 /* DC3_HWC_MODE - [9:8] */
+#define WM831X_DC3_HWC_MODE_SHIFT 8 /* DC3_HWC_MODE - [9:8] */
+#define WM831X_DC3_HWC_MODE_WIDTH 2 /* DC3_HWC_MODE - [9:8] */
+#define WM831X_DC3_OVP 0x0080 /* DC3_OVP */
+#define WM831X_DC3_OVP_MASK 0x0080 /* DC3_OVP */
+#define WM831X_DC3_OVP_SHIFT 7 /* DC3_OVP */
+#define WM831X_DC3_OVP_WIDTH 1 /* DC3_OVP */
+
+/*
+ * R16482 (0x4062) - DC3 ON Config
+ */
+#define WM831X_DC3_ON_SLOT_MASK 0xE000 /* DC3_ON_SLOT - [15:13] */
+#define WM831X_DC3_ON_SLOT_SHIFT 13 /* DC3_ON_SLOT - [15:13] */
+#define WM831X_DC3_ON_SLOT_WIDTH 3 /* DC3_ON_SLOT - [15:13] */
+#define WM831X_DC3_ON_MODE_MASK 0x0300 /* DC3_ON_MODE - [9:8] */
+#define WM831X_DC3_ON_MODE_SHIFT 8 /* DC3_ON_MODE - [9:8] */
+#define WM831X_DC3_ON_MODE_WIDTH 2 /* DC3_ON_MODE - [9:8] */
+#define WM831X_DC3_ON_VSEL_MASK 0x007F /* DC3_ON_VSEL - [6:0] */
+#define WM831X_DC3_ON_VSEL_SHIFT 0 /* DC3_ON_VSEL - [6:0] */
+#define WM831X_DC3_ON_VSEL_WIDTH 7 /* DC3_ON_VSEL - [6:0] */
+
+/*
+ * R16483 (0x4063) - DC3 SLEEP Control
+ */
+#define WM831X_DC3_SLP_SLOT_MASK 0xE000 /* DC3_SLP_SLOT - [15:13] */
+#define WM831X_DC3_SLP_SLOT_SHIFT 13 /* DC3_SLP_SLOT - [15:13] */
+#define WM831X_DC3_SLP_SLOT_WIDTH 3 /* DC3_SLP_SLOT - [15:13] */
+#define WM831X_DC3_SLP_MODE_MASK 0x0300 /* DC3_SLP_MODE - [9:8] */
+#define WM831X_DC3_SLP_MODE_SHIFT 8 /* DC3_SLP_MODE - [9:8] */
+#define WM831X_DC3_SLP_MODE_WIDTH 2 /* DC3_SLP_MODE - [9:8] */
+#define WM831X_DC3_SLP_VSEL_MASK 0x007F /* DC3_SLP_VSEL - [6:0] */
+#define WM831X_DC3_SLP_VSEL_SHIFT 0 /* DC3_SLP_VSEL - [6:0] */
+#define WM831X_DC3_SLP_VSEL_WIDTH 7 /* DC3_SLP_VSEL - [6:0] */
+
+/*
+ * R16484 (0x4064) - DC4 Control
+ */
+#define WM831X_DC4_ERR_ACT_MASK 0xC000 /* DC4_ERR_ACT - [15:14] */
+#define WM831X_DC4_ERR_ACT_SHIFT 14 /* DC4_ERR_ACT - [15:14] */
+#define WM831X_DC4_ERR_ACT_WIDTH 2 /* DC4_ERR_ACT - [15:14] */
+#define WM831X_DC4_HWC_SRC_MASK 0x1800 /* DC4_HWC_SRC - [12:11] */
+#define WM831X_DC4_HWC_SRC_SHIFT 11 /* DC4_HWC_SRC - [12:11] */
+#define WM831X_DC4_HWC_SRC_WIDTH 2 /* DC4_HWC_SRC - [12:11] */
+#define WM831X_DC4_HWC_MODE 0x0100 /* DC4_HWC_MODE */
+#define WM831X_DC4_HWC_MODE_MASK 0x0100 /* DC4_HWC_MODE */
+#define WM831X_DC4_HWC_MODE_SHIFT 8 /* DC4_HWC_MODE */
+#define WM831X_DC4_HWC_MODE_WIDTH 1 /* DC4_HWC_MODE */
+#define WM831X_DC4_RANGE_MASK 0x000C /* DC4_RANGE - [3:2] */
+#define WM831X_DC4_RANGE_SHIFT 2 /* DC4_RANGE - [3:2] */
+#define WM831X_DC4_RANGE_WIDTH 2 /* DC4_RANGE - [3:2] */
+#define WM831X_DC4_FBSRC 0x0001 /* DC4_FBSRC */
+#define WM831X_DC4_FBSRC_MASK 0x0001 /* DC4_FBSRC */
+#define WM831X_DC4_FBSRC_SHIFT 0 /* DC4_FBSRC */
+#define WM831X_DC4_FBSRC_WIDTH 1 /* DC4_FBSRC */
+
+/*
+ * R16485 (0x4065) - DC4 SLEEP Control
+ */
+#define WM831X_DC4_SLPENA 0x0100 /* DC4_SLPENA */
+#define WM831X_DC4_SLPENA_MASK 0x0100 /* DC4_SLPENA */
+#define WM831X_DC4_SLPENA_SHIFT 8 /* DC4_SLPENA */
+#define WM831X_DC4_SLPENA_WIDTH 1 /* DC4_SLPENA */
+
+/*
+ * R16488 (0x4068) - LDO1 Control
+ */
+#define WM831X_LDO1_ERR_ACT_MASK 0xC000 /* LDO1_ERR_ACT - [15:14] */
+#define WM831X_LDO1_ERR_ACT_SHIFT 14 /* LDO1_ERR_ACT - [15:14] */
+#define WM831X_LDO1_ERR_ACT_WIDTH 2 /* LDO1_ERR_ACT - [15:14] */
+#define WM831X_LDO1_HWC_SRC_MASK 0x1800 /* LDO1_HWC_SRC - [12:11] */
+#define WM831X_LDO1_HWC_SRC_SHIFT 11 /* LDO1_HWC_SRC - [12:11] */
+#define WM831X_LDO1_HWC_SRC_WIDTH 2 /* LDO1_HWC_SRC - [12:11] */
+#define WM831X_LDO1_HWC_VSEL 0x0400 /* LDO1_HWC_VSEL */
+#define WM831X_LDO1_HWC_VSEL_MASK 0x0400 /* LDO1_HWC_VSEL */
+#define WM831X_LDO1_HWC_VSEL_SHIFT 10 /* LDO1_HWC_VSEL */
+#define WM831X_LDO1_HWC_VSEL_WIDTH 1 /* LDO1_HWC_VSEL */
+#define WM831X_LDO1_HWC_MODE_MASK 0x0300 /* LDO1_HWC_MODE - [9:8] */
+#define WM831X_LDO1_HWC_MODE_SHIFT 8 /* LDO1_HWC_MODE - [9:8] */
+#define WM831X_LDO1_HWC_MODE_WIDTH 2 /* LDO1_HWC_MODE - [9:8] */
+#define WM831X_LDO1_FLT 0x0080 /* LDO1_FLT */
+#define WM831X_LDO1_FLT_MASK 0x0080 /* LDO1_FLT */
+#define WM831X_LDO1_FLT_SHIFT 7 /* LDO1_FLT */
+#define WM831X_LDO1_FLT_WIDTH 1 /* LDO1_FLT */
+#define WM831X_LDO1_SWI 0x0040 /* LDO1_SWI */
+#define WM831X_LDO1_SWI_MASK 0x0040 /* LDO1_SWI */
+#define WM831X_LDO1_SWI_SHIFT 6 /* LDO1_SWI */
+#define WM831X_LDO1_SWI_WIDTH 1 /* LDO1_SWI */
+#define WM831X_LDO1_LP_MODE 0x0001 /* LDO1_LP_MODE */
+#define WM831X_LDO1_LP_MODE_MASK 0x0001 /* LDO1_LP_MODE */
+#define WM831X_LDO1_LP_MODE_SHIFT 0 /* LDO1_LP_MODE */
+#define WM831X_LDO1_LP_MODE_WIDTH 1 /* LDO1_LP_MODE */
+
+/*
+ * R16489 (0x4069) - LDO1 ON Control
+ */
+#define WM831X_LDO1_ON_SLOT_MASK 0xE000 /* LDO1_ON_SLOT - [15:13] */
+#define WM831X_LDO1_ON_SLOT_SHIFT 13 /* LDO1_ON_SLOT - [15:13] */
+#define WM831X_LDO1_ON_SLOT_WIDTH 3 /* LDO1_ON_SLOT - [15:13] */
+#define WM831X_LDO1_ON_MODE 0x0100 /* LDO1_ON_MODE */
+#define WM831X_LDO1_ON_MODE_MASK 0x0100 /* LDO1_ON_MODE */
+#define WM831X_LDO1_ON_MODE_SHIFT 8 /* LDO1_ON_MODE */
+#define WM831X_LDO1_ON_MODE_WIDTH 1 /* LDO1_ON_MODE */
+#define WM831X_LDO1_ON_VSEL_MASK 0x001F /* LDO1_ON_VSEL - [4:0] */
+#define WM831X_LDO1_ON_VSEL_SHIFT 0 /* LDO1_ON_VSEL - [4:0] */
+#define WM831X_LDO1_ON_VSEL_WIDTH 5 /* LDO1_ON_VSEL - [4:0] */
+
+/*
+ * R16490 (0x406A) - LDO1 SLEEP Control
+ */
+#define WM831X_LDO1_SLP_SLOT_MASK 0xE000 /* LDO1_SLP_SLOT - [15:13] */
+#define WM831X_LDO1_SLP_SLOT_SHIFT 13 /* LDO1_SLP_SLOT - [15:13] */
+#define WM831X_LDO1_SLP_SLOT_WIDTH 3 /* LDO1_SLP_SLOT - [15:13] */
+#define WM831X_LDO1_SLP_MODE 0x0100 /* LDO1_SLP_MODE */
+#define WM831X_LDO1_SLP_MODE_MASK 0x0100 /* LDO1_SLP_MODE */
+#define WM831X_LDO1_SLP_MODE_SHIFT 8 /* LDO1_SLP_MODE */
+#define WM831X_LDO1_SLP_MODE_WIDTH 1 /* LDO1_SLP_MODE */
+#define WM831X_LDO1_SLP_VSEL_MASK 0x001F /* LDO1_SLP_VSEL - [4:0] */
+#define WM831X_LDO1_SLP_VSEL_SHIFT 0 /* LDO1_SLP_VSEL - [4:0] */
+#define WM831X_LDO1_SLP_VSEL_WIDTH 5 /* LDO1_SLP_VSEL - [4:0] */
+
+/*
+ * R16491 (0x406B) - LDO2 Control
+ */
+#define WM831X_LDO2_ERR_ACT_MASK 0xC000 /* LDO2_ERR_ACT - [15:14] */
+#define WM831X_LDO2_ERR_ACT_SHIFT 14 /* LDO2_ERR_ACT - [15:14] */
+#define WM831X_LDO2_ERR_ACT_WIDTH 2 /* LDO2_ERR_ACT - [15:14] */
+#define WM831X_LDO2_HWC_SRC_MASK 0x1800 /* LDO2_HWC_SRC - [12:11] */
+#define WM831X_LDO2_HWC_SRC_SHIFT 11 /* LDO2_HWC_SRC - [12:11] */
+#define WM831X_LDO2_HWC_SRC_WIDTH 2 /* LDO2_HWC_SRC - [12:11] */
+#define WM831X_LDO2_HWC_VSEL 0x0400 /* LDO2_HWC_VSEL */
+#define WM831X_LDO2_HWC_VSEL_MASK 0x0400 /* LDO2_HWC_VSEL */
+#define WM831X_LDO2_HWC_VSEL_SHIFT 10 /* LDO2_HWC_VSEL */
+#define WM831X_LDO2_HWC_VSEL_WIDTH 1 /* LDO2_HWC_VSEL */
+#define WM831X_LDO2_HWC_MODE_MASK 0x0300 /* LDO2_HWC_MODE - [9:8] */
+#define WM831X_LDO2_HWC_MODE_SHIFT 8 /* LDO2_HWC_MODE - [9:8] */
+#define WM831X_LDO2_HWC_MODE_WIDTH 2 /* LDO2_HWC_MODE - [9:8] */
+#define WM831X_LDO2_FLT 0x0080 /* LDO2_FLT */
+#define WM831X_LDO2_FLT_MASK 0x0080 /* LDO2_FLT */
+#define WM831X_LDO2_FLT_SHIFT 7 /* LDO2_FLT */
+#define WM831X_LDO2_FLT_WIDTH 1 /* LDO2_FLT */
+#define WM831X_LDO2_SWI 0x0040 /* LDO2_SWI */
+#define WM831X_LDO2_SWI_MASK 0x0040 /* LDO2_SWI */
+#define WM831X_LDO2_SWI_SHIFT 6 /* LDO2_SWI */
+#define WM831X_LDO2_SWI_WIDTH 1 /* LDO2_SWI */
+#define WM831X_LDO2_LP_MODE 0x0001 /* LDO2_LP_MODE */
+#define WM831X_LDO2_LP_MODE_MASK 0x0001 /* LDO2_LP_MODE */
+#define WM831X_LDO2_LP_MODE_SHIFT 0 /* LDO2_LP_MODE */
+#define WM831X_LDO2_LP_MODE_WIDTH 1 /* LDO2_LP_MODE */
+
+/*
+ * R16492 (0x406C) - LDO2 ON Control
+ */
+#define WM831X_LDO2_ON_SLOT_MASK 0xE000 /* LDO2_ON_SLOT - [15:13] */
+#define WM831X_LDO2_ON_SLOT_SHIFT 13 /* LDO2_ON_SLOT - [15:13] */
+#define WM831X_LDO2_ON_SLOT_WIDTH 3 /* LDO2_ON_SLOT - [15:13] */
+#define WM831X_LDO2_ON_MODE 0x0100 /* LDO2_ON_MODE */
+#define WM831X_LDO2_ON_MODE_MASK 0x0100 /* LDO2_ON_MODE */
+#define WM831X_LDO2_ON_MODE_SHIFT 8 /* LDO2_ON_MODE */
+#define WM831X_LDO2_ON_MODE_WIDTH 1 /* LDO2_ON_MODE */
+#define WM831X_LDO2_ON_VSEL_MASK 0x001F /* LDO2_ON_VSEL - [4:0] */
+#define WM831X_LDO2_ON_VSEL_SHIFT 0 /* LDO2_ON_VSEL - [4:0] */
+#define WM831X_LDO2_ON_VSEL_WIDTH 5 /* LDO2_ON_VSEL - [4:0] */
+
+/*
+ * R16493 (0x406D) - LDO2 SLEEP Control
+ */
+#define WM831X_LDO2_SLP_SLOT_MASK 0xE000 /* LDO2_SLP_SLOT - [15:13] */
+#define WM831X_LDO2_SLP_SLOT_SHIFT 13 /* LDO2_SLP_SLOT - [15:13] */
+#define WM831X_LDO2_SLP_SLOT_WIDTH 3 /* LDO2_SLP_SLOT - [15:13] */
+#define WM831X_LDO2_SLP_MODE 0x0100 /* LDO2_SLP_MODE */
+#define WM831X_LDO2_SLP_MODE_MASK 0x0100 /* LDO2_SLP_MODE */
+#define WM831X_LDO2_SLP_MODE_SHIFT 8 /* LDO2_SLP_MODE */
+#define WM831X_LDO2_SLP_MODE_WIDTH 1 /* LDO2_SLP_MODE */
+#define WM831X_LDO2_SLP_VSEL_MASK 0x001F /* LDO2_SLP_VSEL - [4:0] */
+#define WM831X_LDO2_SLP_VSEL_SHIFT 0 /* LDO2_SLP_VSEL - [4:0] */
+#define WM831X_LDO2_SLP_VSEL_WIDTH 5 /* LDO2_SLP_VSEL - [4:0] */
+
+/*
+ * R16494 (0x406E) - LDO3 Control
+ */
+#define WM831X_LDO3_ERR_ACT_MASK 0xC000 /* LDO3_ERR_ACT - [15:14] */
+#define WM831X_LDO3_ERR_ACT_SHIFT 14 /* LDO3_ERR_ACT - [15:14] */
+#define WM831X_LDO3_ERR_ACT_WIDTH 2 /* LDO3_ERR_ACT - [15:14] */
+#define WM831X_LDO3_HWC_SRC_MASK 0x1800 /* LDO3_HWC_SRC - [12:11] */
+#define WM831X_LDO3_HWC_SRC_SHIFT 11 /* LDO3_HWC_SRC - [12:11] */
+#define WM831X_LDO3_HWC_SRC_WIDTH 2 /* LDO3_HWC_SRC - [12:11] */
+#define WM831X_LDO3_HWC_VSEL 0x0400 /* LDO3_HWC_VSEL */
+#define WM831X_LDO3_HWC_VSEL_MASK 0x0400 /* LDO3_HWC_VSEL */
+#define WM831X_LDO3_HWC_VSEL_SHIFT 10 /* LDO3_HWC_VSEL */
+#define WM831X_LDO3_HWC_VSEL_WIDTH 1 /* LDO3_HWC_VSEL */
+#define WM831X_LDO3_HWC_MODE_MASK 0x0300 /* LDO3_HWC_MODE - [9:8] */
+#define WM831X_LDO3_HWC_MODE_SHIFT 8 /* LDO3_HWC_MODE - [9:8] */
+#define WM831X_LDO3_HWC_MODE_WIDTH 2 /* LDO3_HWC_MODE - [9:8] */
+#define WM831X_LDO3_FLT 0x0080 /* LDO3_FLT */
+#define WM831X_LDO3_FLT_MASK 0x0080 /* LDO3_FLT */
+#define WM831X_LDO3_FLT_SHIFT 7 /* LDO3_FLT */
+#define WM831X_LDO3_FLT_WIDTH 1 /* LDO3_FLT */
+#define WM831X_LDO3_SWI 0x0040 /* LDO3_SWI */
+#define WM831X_LDO3_SWI_MASK 0x0040 /* LDO3_SWI */
+#define WM831X_LDO3_SWI_SHIFT 6 /* LDO3_SWI */
+#define WM831X_LDO3_SWI_WIDTH 1 /* LDO3_SWI */
+#define WM831X_LDO3_LP_MODE 0x0001 /* LDO3_LP_MODE */
+#define WM831X_LDO3_LP_MODE_MASK 0x0001 /* LDO3_LP_MODE */
+#define WM831X_LDO3_LP_MODE_SHIFT 0 /* LDO3_LP_MODE */
+#define WM831X_LDO3_LP_MODE_WIDTH 1 /* LDO3_LP_MODE */
+
+/*
+ * R16495 (0x406F) - LDO3 ON Control
+ */
+#define WM831X_LDO3_ON_SLOT_MASK 0xE000 /* LDO3_ON_SLOT - [15:13] */
+#define WM831X_LDO3_ON_SLOT_SHIFT 13 /* LDO3_ON_SLOT - [15:13] */
+#define WM831X_LDO3_ON_SLOT_WIDTH 3 /* LDO3_ON_SLOT - [15:13] */
+#define WM831X_LDO3_ON_MODE 0x0100 /* LDO3_ON_MODE */
+#define WM831X_LDO3_ON_MODE_MASK 0x0100 /* LDO3_ON_MODE */
+#define WM831X_LDO3_ON_MODE_SHIFT 8 /* LDO3_ON_MODE */
+#define WM831X_LDO3_ON_MODE_WIDTH 1 /* LDO3_ON_MODE */
+#define WM831X_LDO3_ON_VSEL_MASK 0x001F /* LDO3_ON_VSEL - [4:0] */
+#define WM831X_LDO3_ON_VSEL_SHIFT 0 /* LDO3_ON_VSEL - [4:0] */
+#define WM831X_LDO3_ON_VSEL_WIDTH 5 /* LDO3_ON_VSEL - [4:0] */
+
+/*
+ * R16496 (0x4070) - LDO3 SLEEP Control
+ */
+#define WM831X_LDO3_SLP_SLOT_MASK 0xE000 /* LDO3_SLP_SLOT - [15:13] */
+#define WM831X_LDO3_SLP_SLOT_SHIFT 13 /* LDO3_SLP_SLOT - [15:13] */
+#define WM831X_LDO3_SLP_SLOT_WIDTH 3 /* LDO3_SLP_SLOT - [15:13] */
+#define WM831X_LDO3_SLP_MODE 0x0100 /* LDO3_SLP_MODE */
+#define WM831X_LDO3_SLP_MODE_MASK 0x0100 /* LDO3_SLP_MODE */
+#define WM831X_LDO3_SLP_MODE_SHIFT 8 /* LDO3_SLP_MODE */
+#define WM831X_LDO3_SLP_MODE_WIDTH 1 /* LDO3_SLP_MODE */
+#define WM831X_LDO3_SLP_VSEL_MASK 0x001F /* LDO3_SLP_VSEL - [4:0] */
+#define WM831X_LDO3_SLP_VSEL_SHIFT 0 /* LDO3_SLP_VSEL - [4:0] */
+#define WM831X_LDO3_SLP_VSEL_WIDTH 5 /* LDO3_SLP_VSEL - [4:0] */
+
+/*
+ * R16497 (0x4071) - LDO4 Control
+ */
+#define WM831X_LDO4_ERR_ACT_MASK 0xC000 /* LDO4_ERR_ACT - [15:14] */
+#define WM831X_LDO4_ERR_ACT_SHIFT 14 /* LDO4_ERR_ACT - [15:14] */
+#define WM831X_LDO4_ERR_ACT_WIDTH 2 /* LDO4_ERR_ACT - [15:14] */
+#define WM831X_LDO4_HWC_SRC_MASK 0x1800 /* LDO4_HWC_SRC - [12:11] */
+#define WM831X_LDO4_HWC_SRC_SHIFT 11 /* LDO4_HWC_SRC - [12:11] */
+#define WM831X_LDO4_HWC_SRC_WIDTH 2 /* LDO4_HWC_SRC - [12:11] */
+#define WM831X_LDO4_HWC_VSEL 0x0400 /* LDO4_HWC_VSEL */
+#define WM831X_LDO4_HWC_VSEL_MASK 0x0400 /* LDO4_HWC_VSEL */
+#define WM831X_LDO4_HWC_VSEL_SHIFT 10 /* LDO4_HWC_VSEL */
+#define WM831X_LDO4_HWC_VSEL_WIDTH 1 /* LDO4_HWC_VSEL */
+#define WM831X_LDO4_HWC_MODE_MASK 0x0300 /* LDO4_HWC_MODE - [9:8] */
+#define WM831X_LDO4_HWC_MODE_SHIFT 8 /* LDO4_HWC_MODE - [9:8] */
+#define WM831X_LDO4_HWC_MODE_WIDTH 2 /* LDO4_HWC_MODE - [9:8] */
+#define WM831X_LDO4_FLT 0x0080 /* LDO4_FLT */
+#define WM831X_LDO4_FLT_MASK 0x0080 /* LDO4_FLT */
+#define WM831X_LDO4_FLT_SHIFT 7 /* LDO4_FLT */
+#define WM831X_LDO4_FLT_WIDTH 1 /* LDO4_FLT */
+#define WM831X_LDO4_SWI 0x0040 /* LDO4_SWI */
+#define WM831X_LDO4_SWI_MASK 0x0040 /* LDO4_SWI */
+#define WM831X_LDO4_SWI_SHIFT 6 /* LDO4_SWI */
+#define WM831X_LDO4_SWI_WIDTH 1 /* LDO4_SWI */
+#define WM831X_LDO4_LP_MODE 0x0001 /* LDO4_LP_MODE */
+#define WM831X_LDO4_LP_MODE_MASK 0x0001 /* LDO4_LP_MODE */
+#define WM831X_LDO4_LP_MODE_SHIFT 0 /* LDO4_LP_MODE */
+#define WM831X_LDO4_LP_MODE_WIDTH 1 /* LDO4_LP_MODE */
+
+/*
+ * R16498 (0x4072) - LDO4 ON Control
+ */
+#define WM831X_LDO4_ON_SLOT_MASK 0xE000 /* LDO4_ON_SLOT - [15:13] */
+#define WM831X_LDO4_ON_SLOT_SHIFT 13 /* LDO4_ON_SLOT - [15:13] */
+#define WM831X_LDO4_ON_SLOT_WIDTH 3 /* LDO4_ON_SLOT - [15:13] */
+#define WM831X_LDO4_ON_MODE 0x0100 /* LDO4_ON_MODE */
+#define WM831X_LDO4_ON_MODE_MASK 0x0100 /* LDO4_ON_MODE */
+#define WM831X_LDO4_ON_MODE_SHIFT 8 /* LDO4_ON_MODE */
+#define WM831X_LDO4_ON_MODE_WIDTH 1 /* LDO4_ON_MODE */
+#define WM831X_LDO4_ON_VSEL_MASK 0x001F /* LDO4_ON_VSEL - [4:0] */
+#define WM831X_LDO4_ON_VSEL_SHIFT 0 /* LDO4_ON_VSEL - [4:0] */
+#define WM831X_LDO4_ON_VSEL_WIDTH 5 /* LDO4_ON_VSEL - [4:0] */
+
+/*
+ * R16499 (0x4073) - LDO4 SLEEP Control
+ */
+#define WM831X_LDO4_SLP_SLOT_MASK 0xE000 /* LDO4_SLP_SLOT - [15:13] */
+#define WM831X_LDO4_SLP_SLOT_SHIFT 13 /* LDO4_SLP_SLOT - [15:13] */
+#define WM831X_LDO4_SLP_SLOT_WIDTH 3 /* LDO4_SLP_SLOT - [15:13] */
+#define WM831X_LDO4_SLP_MODE 0x0100 /* LDO4_SLP_MODE */
+#define WM831X_LDO4_SLP_MODE_MASK 0x0100 /* LDO4_SLP_MODE */
+#define WM831X_LDO4_SLP_MODE_SHIFT 8 /* LDO4_SLP_MODE */
+#define WM831X_LDO4_SLP_MODE_WIDTH 1 /* LDO4_SLP_MODE */
+#define WM831X_LDO4_SLP_VSEL_MASK 0x001F /* LDO4_SLP_VSEL - [4:0] */
+#define WM831X_LDO4_SLP_VSEL_SHIFT 0 /* LDO4_SLP_VSEL - [4:0] */
+#define WM831X_LDO4_SLP_VSEL_WIDTH 5 /* LDO4_SLP_VSEL - [4:0] */
+
+/*
+ * R16500 (0x4074) - LDO5 Control
+ */
+#define WM831X_LDO5_ERR_ACT_MASK 0xC000 /* LDO5_ERR_ACT - [15:14] */
+#define WM831X_LDO5_ERR_ACT_SHIFT 14 /* LDO5_ERR_ACT - [15:14] */
+#define WM831X_LDO5_ERR_ACT_WIDTH 2 /* LDO5_ERR_ACT - [15:14] */
+#define WM831X_LDO5_HWC_SRC_MASK 0x1800 /* LDO5_HWC_SRC - [12:11] */
+#define WM831X_LDO5_HWC_SRC_SHIFT 11 /* LDO5_HWC_SRC - [12:11] */
+#define WM831X_LDO5_HWC_SRC_WIDTH 2 /* LDO5_HWC_SRC - [12:11] */
+#define WM831X_LDO5_HWC_VSEL 0x0400 /* LDO5_HWC_VSEL */
+#define WM831X_LDO5_HWC_VSEL_MASK 0x0400 /* LDO5_HWC_VSEL */
+#define WM831X_LDO5_HWC_VSEL_SHIFT 10 /* LDO5_HWC_VSEL */
+#define WM831X_LDO5_HWC_VSEL_WIDTH 1 /* LDO5_HWC_VSEL */
+#define WM831X_LDO5_HWC_MODE_MASK 0x0300 /* LDO5_HWC_MODE - [9:8] */
+#define WM831X_LDO5_HWC_MODE_SHIFT 8 /* LDO5_HWC_MODE - [9:8] */
+#define WM831X_LDO5_HWC_MODE_WIDTH 2 /* LDO5_HWC_MODE - [9:8] */
+#define WM831X_LDO5_FLT 0x0080 /* LDO5_FLT */
+#define WM831X_LDO5_FLT_MASK 0x0080 /* LDO5_FLT */
+#define WM831X_LDO5_FLT_SHIFT 7 /* LDO5_FLT */
+#define WM831X_LDO5_FLT_WIDTH 1 /* LDO5_FLT */
+#define WM831X_LDO5_SWI 0x0040 /* LDO5_SWI */
+#define WM831X_LDO5_SWI_MASK 0x0040 /* LDO5_SWI */
+#define WM831X_LDO5_SWI_SHIFT 6 /* LDO5_SWI */
+#define WM831X_LDO5_SWI_WIDTH 1 /* LDO5_SWI */
+#define WM831X_LDO5_LP_MODE 0x0001 /* LDO5_LP_MODE */
+#define WM831X_LDO5_LP_MODE_MASK 0x0001 /* LDO5_LP_MODE */
+#define WM831X_LDO5_LP_MODE_SHIFT 0 /* LDO5_LP_MODE */
+#define WM831X_LDO5_LP_MODE_WIDTH 1 /* LDO5_LP_MODE */
+
+/*
+ * R16501 (0x4075) - LDO5 ON Control
+ */
+#define WM831X_LDO5_ON_SLOT_MASK 0xE000 /* LDO5_ON_SLOT - [15:13] */
+#define WM831X_LDO5_ON_SLOT_SHIFT 13 /* LDO5_ON_SLOT - [15:13] */
+#define WM831X_LDO5_ON_SLOT_WIDTH 3 /* LDO5_ON_SLOT - [15:13] */
+#define WM831X_LDO5_ON_MODE 0x0100 /* LDO5_ON_MODE */
+#define WM831X_LDO5_ON_MODE_MASK 0x0100 /* LDO5_ON_MODE */
+#define WM831X_LDO5_ON_MODE_SHIFT 8 /* LDO5_ON_MODE */
+#define WM831X_LDO5_ON_MODE_WIDTH 1 /* LDO5_ON_MODE */
+#define WM831X_LDO5_ON_VSEL_MASK 0x001F /* LDO5_ON_VSEL - [4:0] */
+#define WM831X_LDO5_ON_VSEL_SHIFT 0 /* LDO5_ON_VSEL - [4:0] */
+#define WM831X_LDO5_ON_VSEL_WIDTH 5 /* LDO5_ON_VSEL - [4:0] */
+
+/*
+ * R16502 (0x4076) - LDO5 SLEEP Control
+ */
+#define WM831X_LDO5_SLP_SLOT_MASK 0xE000 /* LDO5_SLP_SLOT - [15:13] */
+#define WM831X_LDO5_SLP_SLOT_SHIFT 13 /* LDO5_SLP_SLOT - [15:13] */
+#define WM831X_LDO5_SLP_SLOT_WIDTH 3 /* LDO5_SLP_SLOT - [15:13] */
+#define WM831X_LDO5_SLP_MODE 0x0100 /* LDO5_SLP_MODE */
+#define WM831X_LDO5_SLP_MODE_MASK 0x0100 /* LDO5_SLP_MODE */
+#define WM831X_LDO5_SLP_MODE_SHIFT 8 /* LDO5_SLP_MODE */
+#define WM831X_LDO5_SLP_MODE_WIDTH 1 /* LDO5_SLP_MODE */
+#define WM831X_LDO5_SLP_VSEL_MASK 0x001F /* LDO5_SLP_VSEL - [4:0] */
+#define WM831X_LDO5_SLP_VSEL_SHIFT 0 /* LDO5_SLP_VSEL - [4:0] */
+#define WM831X_LDO5_SLP_VSEL_WIDTH 5 /* LDO5_SLP_VSEL - [4:0] */
+
+/*
+ * R16503 (0x4077) - LDO6 Control
+ */
+#define WM831X_LDO6_ERR_ACT_MASK 0xC000 /* LDO6_ERR_ACT - [15:14] */
+#define WM831X_LDO6_ERR_ACT_SHIFT 14 /* LDO6_ERR_ACT - [15:14] */
+#define WM831X_LDO6_ERR_ACT_WIDTH 2 /* LDO6_ERR_ACT - [15:14] */
+#define WM831X_LDO6_HWC_SRC_MASK 0x1800 /* LDO6_HWC_SRC - [12:11] */
+#define WM831X_LDO6_HWC_SRC_SHIFT 11 /* LDO6_HWC_SRC - [12:11] */
+#define WM831X_LDO6_HWC_SRC_WIDTH 2 /* LDO6_HWC_SRC - [12:11] */
+#define WM831X_LDO6_HWC_VSEL 0x0400 /* LDO6_HWC_VSEL */
+#define WM831X_LDO6_HWC_VSEL_MASK 0x0400 /* LDO6_HWC_VSEL */
+#define WM831X_LDO6_HWC_VSEL_SHIFT 10 /* LDO6_HWC_VSEL */
+#define WM831X_LDO6_HWC_VSEL_WIDTH 1 /* LDO6_HWC_VSEL */
+#define WM831X_LDO6_HWC_MODE_MASK 0x0300 /* LDO6_HWC_MODE - [9:8] */
+#define WM831X_LDO6_HWC_MODE_SHIFT 8 /* LDO6_HWC_MODE - [9:8] */
+#define WM831X_LDO6_HWC_MODE_WIDTH 2 /* LDO6_HWC_MODE - [9:8] */
+#define WM831X_LDO6_FLT 0x0080 /* LDO6_FLT */
+#define WM831X_LDO6_FLT_MASK 0x0080 /* LDO6_FLT */
+#define WM831X_LDO6_FLT_SHIFT 7 /* LDO6_FLT */
+#define WM831X_LDO6_FLT_WIDTH 1 /* LDO6_FLT */
+#define WM831X_LDO6_SWI 0x0040 /* LDO6_SWI */
+#define WM831X_LDO6_SWI_MASK 0x0040 /* LDO6_SWI */
+#define WM831X_LDO6_SWI_SHIFT 6 /* LDO6_SWI */
+#define WM831X_LDO6_SWI_WIDTH 1 /* LDO6_SWI */
+#define WM831X_LDO6_LP_MODE 0x0001 /* LDO6_LP_MODE */
+#define WM831X_LDO6_LP_MODE_MASK 0x0001 /* LDO6_LP_MODE */
+#define WM831X_LDO6_LP_MODE_SHIFT 0 /* LDO6_LP_MODE */
+#define WM831X_LDO6_LP_MODE_WIDTH 1 /* LDO6_LP_MODE */
+
+/*
+ * R16504 (0x4078) - LDO6 ON Control
+ */
+#define WM831X_LDO6_ON_SLOT_MASK 0xE000 /* LDO6_ON_SLOT - [15:13] */
+#define WM831X_LDO6_ON_SLOT_SHIFT 13 /* LDO6_ON_SLOT - [15:13] */
+#define WM831X_LDO6_ON_SLOT_WIDTH 3 /* LDO6_ON_SLOT - [15:13] */
+#define WM831X_LDO6_ON_MODE 0x0100 /* LDO6_ON_MODE */
+#define WM831X_LDO6_ON_MODE_MASK 0x0100 /* LDO6_ON_MODE */
+#define WM831X_LDO6_ON_MODE_SHIFT 8 /* LDO6_ON_MODE */
+#define WM831X_LDO6_ON_MODE_WIDTH 1 /* LDO6_ON_MODE */
+#define WM831X_LDO6_ON_VSEL_MASK 0x001F /* LDO6_ON_VSEL - [4:0] */
+#define WM831X_LDO6_ON_VSEL_SHIFT 0 /* LDO6_ON_VSEL - [4:0] */
+#define WM831X_LDO6_ON_VSEL_WIDTH 5 /* LDO6_ON_VSEL - [4:0] */
+
+/*
+ * R16505 (0x4079) - LDO6 SLEEP Control
+ */
+#define WM831X_LDO6_SLP_SLOT_MASK 0xE000 /* LDO6_SLP_SLOT - [15:13] */
+#define WM831X_LDO6_SLP_SLOT_SHIFT 13 /* LDO6_SLP_SLOT - [15:13] */
+#define WM831X_LDO6_SLP_SLOT_WIDTH 3 /* LDO6_SLP_SLOT - [15:13] */
+#define WM831X_LDO6_SLP_MODE 0x0100 /* LDO6_SLP_MODE */
+#define WM831X_LDO6_SLP_MODE_MASK 0x0100 /* LDO6_SLP_MODE */
+#define WM831X_LDO6_SLP_MODE_SHIFT 8 /* LDO6_SLP_MODE */
+#define WM831X_LDO6_SLP_MODE_WIDTH 1 /* LDO6_SLP_MODE */
+#define WM831X_LDO6_SLP_VSEL_MASK 0x001F /* LDO6_SLP_VSEL - [4:0] */
+#define WM831X_LDO6_SLP_VSEL_SHIFT 0 /* LDO6_SLP_VSEL - [4:0] */
+#define WM831X_LDO6_SLP_VSEL_WIDTH 5 /* LDO6_SLP_VSEL - [4:0] */
+
+/*
+ * R16506 (0x407A) - LDO7 Control
+ */
+#define WM831X_LDO7_ERR_ACT_MASK 0xC000 /* LDO7_ERR_ACT - [15:14] */
+#define WM831X_LDO7_ERR_ACT_SHIFT 14 /* LDO7_ERR_ACT - [15:14] */
+#define WM831X_LDO7_ERR_ACT_WIDTH 2 /* LDO7_ERR_ACT - [15:14] */
+#define WM831X_LDO7_HWC_SRC_MASK 0x1800 /* LDO7_HWC_SRC - [12:11] */
+#define WM831X_LDO7_HWC_SRC_SHIFT 11 /* LDO7_HWC_SRC - [12:11] */
+#define WM831X_LDO7_HWC_SRC_WIDTH 2 /* LDO7_HWC_SRC - [12:11] */
+#define WM831X_LDO7_HWC_VSEL 0x0400 /* LDO7_HWC_VSEL */
+#define WM831X_LDO7_HWC_VSEL_MASK 0x0400 /* LDO7_HWC_VSEL */
+#define WM831X_LDO7_HWC_VSEL_SHIFT 10 /* LDO7_HWC_VSEL */
+#define WM831X_LDO7_HWC_VSEL_WIDTH 1 /* LDO7_HWC_VSEL */
+#define WM831X_LDO7_HWC_MODE_MASK 0x0300 /* LDO7_HWC_MODE - [9:8] */
+#define WM831X_LDO7_HWC_MODE_SHIFT 8 /* LDO7_HWC_MODE - [9:8] */
+#define WM831X_LDO7_HWC_MODE_WIDTH 2 /* LDO7_HWC_MODE - [9:8] */
+#define WM831X_LDO7_FLT 0x0080 /* LDO7_FLT */
+#define WM831X_LDO7_FLT_MASK 0x0080 /* LDO7_FLT */
+#define WM831X_LDO7_FLT_SHIFT 7 /* LDO7_FLT */
+#define WM831X_LDO7_FLT_WIDTH 1 /* LDO7_FLT */
+#define WM831X_LDO7_SWI 0x0040 /* LDO7_SWI */
+#define WM831X_LDO7_SWI_MASK 0x0040 /* LDO7_SWI */
+#define WM831X_LDO7_SWI_SHIFT 6 /* LDO7_SWI */
+#define WM831X_LDO7_SWI_WIDTH 1 /* LDO7_SWI */
+
+/*
+ * R16507 (0x407B) - LDO7 ON Control
+ */
+#define WM831X_LDO7_ON_SLOT_MASK 0xE000 /* LDO7_ON_SLOT - [15:13] */
+#define WM831X_LDO7_ON_SLOT_SHIFT 13 /* LDO7_ON_SLOT - [15:13] */
+#define WM831X_LDO7_ON_SLOT_WIDTH 3 /* LDO7_ON_SLOT - [15:13] */
+#define WM831X_LDO7_ON_MODE 0x0100 /* LDO7_ON_MODE */
+#define WM831X_LDO7_ON_MODE_MASK 0x0100 /* LDO7_ON_MODE */
+#define WM831X_LDO7_ON_MODE_SHIFT 8 /* LDO7_ON_MODE */
+#define WM831X_LDO7_ON_MODE_WIDTH 1 /* LDO7_ON_MODE */
+#define WM831X_LDO7_ON_VSEL_MASK 0x001F /* LDO7_ON_VSEL - [4:0] */
+#define WM831X_LDO7_ON_VSEL_SHIFT 0 /* LDO7_ON_VSEL - [4:0] */
+#define WM831X_LDO7_ON_VSEL_WIDTH 5 /* LDO7_ON_VSEL - [4:0] */
+
+/*
+ * R16508 (0x407C) - LDO7 SLEEP Control
+ */
+#define WM831X_LDO7_SLP_SLOT_MASK 0xE000 /* LDO7_SLP_SLOT - [15:13] */
+#define WM831X_LDO7_SLP_SLOT_SHIFT 13 /* LDO7_SLP_SLOT - [15:13] */
+#define WM831X_LDO7_SLP_SLOT_WIDTH 3 /* LDO7_SLP_SLOT - [15:13] */
+#define WM831X_LDO7_SLP_MODE 0x0100 /* LDO7_SLP_MODE */
+#define WM831X_LDO7_SLP_MODE_MASK 0x0100 /* LDO7_SLP_MODE */
+#define WM831X_LDO7_SLP_MODE_SHIFT 8 /* LDO7_SLP_MODE */
+#define WM831X_LDO7_SLP_MODE_WIDTH 1 /* LDO7_SLP_MODE */
+#define WM831X_LDO7_SLP_VSEL_MASK 0x001F /* LDO7_SLP_VSEL - [4:0] */
+#define WM831X_LDO7_SLP_VSEL_SHIFT 0 /* LDO7_SLP_VSEL - [4:0] */
+#define WM831X_LDO7_SLP_VSEL_WIDTH 5 /* LDO7_SLP_VSEL - [4:0] */
+
+/*
+ * R16509 (0x407D) - LDO8 Control
+ */
+#define WM831X_LDO8_ERR_ACT_MASK 0xC000 /* LDO8_ERR_ACT - [15:14] */
+#define WM831X_LDO8_ERR_ACT_SHIFT 14 /* LDO8_ERR_ACT - [15:14] */
+#define WM831X_LDO8_ERR_ACT_WIDTH 2 /* LDO8_ERR_ACT - [15:14] */
+#define WM831X_LDO8_HWC_SRC_MASK 0x1800 /* LDO8_HWC_SRC - [12:11] */
+#define WM831X_LDO8_HWC_SRC_SHIFT 11 /* LDO8_HWC_SRC - [12:11] */
+#define WM831X_LDO8_HWC_SRC_WIDTH 2 /* LDO8_HWC_SRC - [12:11] */
+#define WM831X_LDO8_HWC_VSEL 0x0400 /* LDO8_HWC_VSEL */
+#define WM831X_LDO8_HWC_VSEL_MASK 0x0400 /* LDO8_HWC_VSEL */
+#define WM831X_LDO8_HWC_VSEL_SHIFT 10 /* LDO8_HWC_VSEL */
+#define WM831X_LDO8_HWC_VSEL_WIDTH 1 /* LDO8_HWC_VSEL */
+#define WM831X_LDO8_HWC_MODE_MASK 0x0300 /* LDO8_HWC_MODE - [9:8] */
+#define WM831X_LDO8_HWC_MODE_SHIFT 8 /* LDO8_HWC_MODE - [9:8] */
+#define WM831X_LDO8_HWC_MODE_WIDTH 2 /* LDO8_HWC_MODE - [9:8] */
+#define WM831X_LDO8_FLT 0x0080 /* LDO8_FLT */
+#define WM831X_LDO8_FLT_MASK 0x0080 /* LDO8_FLT */
+#define WM831X_LDO8_FLT_SHIFT 7 /* LDO8_FLT */
+#define WM831X_LDO8_FLT_WIDTH 1 /* LDO8_FLT */
+#define WM831X_LDO8_SWI 0x0040 /* LDO8_SWI */
+#define WM831X_LDO8_SWI_MASK 0x0040 /* LDO8_SWI */
+#define WM831X_LDO8_SWI_SHIFT 6 /* LDO8_SWI */
+#define WM831X_LDO8_SWI_WIDTH 1 /* LDO8_SWI */
+
+/*
+ * R16510 (0x407E) - LDO8 ON Control
+ */
+#define WM831X_LDO8_ON_SLOT_MASK 0xE000 /* LDO8_ON_SLOT - [15:13] */
+#define WM831X_LDO8_ON_SLOT_SHIFT 13 /* LDO8_ON_SLOT - [15:13] */
+#define WM831X_LDO8_ON_SLOT_WIDTH 3 /* LDO8_ON_SLOT - [15:13] */
+#define WM831X_LDO8_ON_MODE 0x0100 /* LDO8_ON_MODE */
+#define WM831X_LDO8_ON_MODE_MASK 0x0100 /* LDO8_ON_MODE */
+#define WM831X_LDO8_ON_MODE_SHIFT 8 /* LDO8_ON_MODE */
+#define WM831X_LDO8_ON_MODE_WIDTH 1 /* LDO8_ON_MODE */
+#define WM831X_LDO8_ON_VSEL_MASK 0x001F /* LDO8_ON_VSEL - [4:0] */
+#define WM831X_LDO8_ON_VSEL_SHIFT 0 /* LDO8_ON_VSEL - [4:0] */
+#define WM831X_LDO8_ON_VSEL_WIDTH 5 /* LDO8_ON_VSEL - [4:0] */
+
+/*
+ * R16511 (0x407F) - LDO8 SLEEP Control
+ */
+#define WM831X_LDO8_SLP_SLOT_MASK 0xE000 /* LDO8_SLP_SLOT - [15:13] */
+#define WM831X_LDO8_SLP_SLOT_SHIFT 13 /* LDO8_SLP_SLOT - [15:13] */
+#define WM831X_LDO8_SLP_SLOT_WIDTH 3 /* LDO8_SLP_SLOT - [15:13] */
+#define WM831X_LDO8_SLP_MODE 0x0100 /* LDO8_SLP_MODE */
+#define WM831X_LDO8_SLP_MODE_MASK 0x0100 /* LDO8_SLP_MODE */
+#define WM831X_LDO8_SLP_MODE_SHIFT 8 /* LDO8_SLP_MODE */
+#define WM831X_LDO8_SLP_MODE_WIDTH 1 /* LDO8_SLP_MODE */
+#define WM831X_LDO8_SLP_VSEL_MASK 0x001F /* LDO8_SLP_VSEL - [4:0] */
+#define WM831X_LDO8_SLP_VSEL_SHIFT 0 /* LDO8_SLP_VSEL - [4:0] */
+#define WM831X_LDO8_SLP_VSEL_WIDTH 5 /* LDO8_SLP_VSEL - [4:0] */
+
+/*
+ * R16512 (0x4080) - LDO9 Control
+ */
+#define WM831X_LDO9_ERR_ACT_MASK 0xC000 /* LDO9_ERR_ACT - [15:14] */
+#define WM831X_LDO9_ERR_ACT_SHIFT 14 /* LDO9_ERR_ACT - [15:14] */
+#define WM831X_LDO9_ERR_ACT_WIDTH 2 /* LDO9_ERR_ACT - [15:14] */
+#define WM831X_LDO9_HWC_SRC_MASK 0x1800 /* LDO9_HWC_SRC - [12:11] */
+#define WM831X_LDO9_HWC_SRC_SHIFT 11 /* LDO9_HWC_SRC - [12:11] */
+#define WM831X_LDO9_HWC_SRC_WIDTH 2 /* LDO9_HWC_SRC - [12:11] */
+#define WM831X_LDO9_HWC_VSEL 0x0400 /* LDO9_HWC_VSEL */
+#define WM831X_LDO9_HWC_VSEL_MASK 0x0400 /* LDO9_HWC_VSEL */
+#define WM831X_LDO9_HWC_VSEL_SHIFT 10 /* LDO9_HWC_VSEL */
+#define WM831X_LDO9_HWC_VSEL_WIDTH 1 /* LDO9_HWC_VSEL */
+#define WM831X_LDO9_HWC_MODE_MASK 0x0300 /* LDO9_HWC_MODE - [9:8] */
+#define WM831X_LDO9_HWC_MODE_SHIFT 8 /* LDO9_HWC_MODE - [9:8] */
+#define WM831X_LDO9_HWC_MODE_WIDTH 2 /* LDO9_HWC_MODE - [9:8] */
+#define WM831X_LDO9_FLT 0x0080 /* LDO9_FLT */
+#define WM831X_LDO9_FLT_MASK 0x0080 /* LDO9_FLT */
+#define WM831X_LDO9_FLT_SHIFT 7 /* LDO9_FLT */
+#define WM831X_LDO9_FLT_WIDTH 1 /* LDO9_FLT */
+#define WM831X_LDO9_SWI 0x0040 /* LDO9_SWI */
+#define WM831X_LDO9_SWI_MASK 0x0040 /* LDO9_SWI */
+#define WM831X_LDO9_SWI_SHIFT 6 /* LDO9_SWI */
+#define WM831X_LDO9_SWI_WIDTH 1 /* LDO9_SWI */
+
+/*
+ * R16513 (0x4081) - LDO9 ON Control
+ */
+#define WM831X_LDO9_ON_SLOT_MASK 0xE000 /* LDO9_ON_SLOT - [15:13] */
+#define WM831X_LDO9_ON_SLOT_SHIFT 13 /* LDO9_ON_SLOT - [15:13] */
+#define WM831X_LDO9_ON_SLOT_WIDTH 3 /* LDO9_ON_SLOT - [15:13] */
+#define WM831X_LDO9_ON_MODE 0x0100 /* LDO9_ON_MODE */
+#define WM831X_LDO9_ON_MODE_MASK 0x0100 /* LDO9_ON_MODE */
+#define WM831X_LDO9_ON_MODE_SHIFT 8 /* LDO9_ON_MODE */
+#define WM831X_LDO9_ON_MODE_WIDTH 1 /* LDO9_ON_MODE */
+#define WM831X_LDO9_ON_VSEL_MASK 0x001F /* LDO9_ON_VSEL - [4:0] */
+#define WM831X_LDO9_ON_VSEL_SHIFT 0 /* LDO9_ON_VSEL - [4:0] */
+#define WM831X_LDO9_ON_VSEL_WIDTH 5 /* LDO9_ON_VSEL - [4:0] */
+
+/*
+ * R16514 (0x4082) - LDO9 SLEEP Control
+ */
+#define WM831X_LDO9_SLP_SLOT_MASK 0xE000 /* LDO9_SLP_SLOT - [15:13] */
+#define WM831X_LDO9_SLP_SLOT_SHIFT 13 /* LDO9_SLP_SLOT - [15:13] */
+#define WM831X_LDO9_SLP_SLOT_WIDTH 3 /* LDO9_SLP_SLOT - [15:13] */
+#define WM831X_LDO9_SLP_MODE 0x0100 /* LDO9_SLP_MODE */
+#define WM831X_LDO9_SLP_MODE_MASK 0x0100 /* LDO9_SLP_MODE */
+#define WM831X_LDO9_SLP_MODE_SHIFT 8 /* LDO9_SLP_MODE */
+#define WM831X_LDO9_SLP_MODE_WIDTH 1 /* LDO9_SLP_MODE */
+#define WM831X_LDO9_SLP_VSEL_MASK 0x001F /* LDO9_SLP_VSEL - [4:0] */
+#define WM831X_LDO9_SLP_VSEL_SHIFT 0 /* LDO9_SLP_VSEL - [4:0] */
+#define WM831X_LDO9_SLP_VSEL_WIDTH 5 /* LDO9_SLP_VSEL - [4:0] */
+
+/*
+ * R16515 (0x4083) - LDO10 Control
+ */
+#define WM831X_LDO10_ERR_ACT_MASK 0xC000 /* LDO10_ERR_ACT - [15:14] */
+#define WM831X_LDO10_ERR_ACT_SHIFT 14 /* LDO10_ERR_ACT - [15:14] */
+#define WM831X_LDO10_ERR_ACT_WIDTH 2 /* LDO10_ERR_ACT - [15:14] */
+#define WM831X_LDO10_HWC_SRC_MASK 0x1800 /* LDO10_HWC_SRC - [12:11] */
+#define WM831X_LDO10_HWC_SRC_SHIFT 11 /* LDO10_HWC_SRC - [12:11] */
+#define WM831X_LDO10_HWC_SRC_WIDTH 2 /* LDO10_HWC_SRC - [12:11] */
+#define WM831X_LDO10_HWC_VSEL 0x0400 /* LDO10_HWC_VSEL */
+#define WM831X_LDO10_HWC_VSEL_MASK 0x0400 /* LDO10_HWC_VSEL */
+#define WM831X_LDO10_HWC_VSEL_SHIFT 10 /* LDO10_HWC_VSEL */
+#define WM831X_LDO10_HWC_VSEL_WIDTH 1 /* LDO10_HWC_VSEL */
+#define WM831X_LDO10_HWC_MODE_MASK 0x0300 /* LDO10_HWC_MODE - [9:8] */
+#define WM831X_LDO10_HWC_MODE_SHIFT 8 /* LDO10_HWC_MODE - [9:8] */
+#define WM831X_LDO10_HWC_MODE_WIDTH 2 /* LDO10_HWC_MODE - [9:8] */
+#define WM831X_LDO10_FLT 0x0080 /* LDO10_FLT */
+#define WM831X_LDO10_FLT_MASK 0x0080 /* LDO10_FLT */
+#define WM831X_LDO10_FLT_SHIFT 7 /* LDO10_FLT */
+#define WM831X_LDO10_FLT_WIDTH 1 /* LDO10_FLT */
+#define WM831X_LDO10_SWI 0x0040 /* LDO10_SWI */
+#define WM831X_LDO10_SWI_MASK 0x0040 /* LDO10_SWI */
+#define WM831X_LDO10_SWI_SHIFT 6 /* LDO10_SWI */
+#define WM831X_LDO10_SWI_WIDTH 1 /* LDO10_SWI */
+
+/*
+ * R16516 (0x4084) - LDO10 ON Control
+ */
+#define WM831X_LDO10_ON_SLOT_MASK 0xE000 /* LDO10_ON_SLOT - [15:13] */
+#define WM831X_LDO10_ON_SLOT_SHIFT 13 /* LDO10_ON_SLOT - [15:13] */
+#define WM831X_LDO10_ON_SLOT_WIDTH 3 /* LDO10_ON_SLOT - [15:13] */
+#define WM831X_LDO10_ON_MODE 0x0100 /* LDO10_ON_MODE */
+#define WM831X_LDO10_ON_MODE_MASK 0x0100 /* LDO10_ON_MODE */
+#define WM831X_LDO10_ON_MODE_SHIFT 8 /* LDO10_ON_MODE */
+#define WM831X_LDO10_ON_MODE_WIDTH 1 /* LDO10_ON_MODE */
+#define WM831X_LDO10_ON_VSEL_MASK 0x001F /* LDO10_ON_VSEL - [4:0] */
+#define WM831X_LDO10_ON_VSEL_SHIFT 0 /* LDO10_ON_VSEL - [4:0] */
+#define WM831X_LDO10_ON_VSEL_WIDTH 5 /* LDO10_ON_VSEL - [4:0] */
+
+/*
+ * R16517 (0x4085) - LDO10 SLEEP Control
+ */
+#define WM831X_LDO10_SLP_SLOT_MASK 0xE000 /* LDO10_SLP_SLOT - [15:13] */
+#define WM831X_LDO10_SLP_SLOT_SHIFT 13 /* LDO10_SLP_SLOT - [15:13] */
+#define WM831X_LDO10_SLP_SLOT_WIDTH 3 /* LDO10_SLP_SLOT - [15:13] */
+#define WM831X_LDO10_SLP_MODE 0x0100 /* LDO10_SLP_MODE */
+#define WM831X_LDO10_SLP_MODE_MASK 0x0100 /* LDO10_SLP_MODE */
+#define WM831X_LDO10_SLP_MODE_SHIFT 8 /* LDO10_SLP_MODE */
+#define WM831X_LDO10_SLP_MODE_WIDTH 1 /* LDO10_SLP_MODE */
+#define WM831X_LDO10_SLP_VSEL_MASK 0x001F /* LDO10_SLP_VSEL - [4:0] */
+#define WM831X_LDO10_SLP_VSEL_SHIFT 0 /* LDO10_SLP_VSEL - [4:0] */
+#define WM831X_LDO10_SLP_VSEL_WIDTH 5 /* LDO10_SLP_VSEL - [4:0] */
+
+/*
+ * R16519 (0x4087) - LDO11 ON Control
+ */
+#define WM831X_LDO11_ON_SLOT_MASK 0xE000 /* LDO11_ON_SLOT - [15:13] */
+#define WM831X_LDO11_ON_SLOT_SHIFT 13 /* LDO11_ON_SLOT - [15:13] */
+#define WM831X_LDO11_ON_SLOT_WIDTH 3 /* LDO11_ON_SLOT - [15:13] */
+#define WM831X_LDO11_OFFENA 0x1000 /* LDO11_OFFENA */
+#define WM831X_LDO11_OFFENA_MASK 0x1000 /* LDO11_OFFENA */
+#define WM831X_LDO11_OFFENA_SHIFT 12 /* LDO11_OFFENA */
+#define WM831X_LDO11_OFFENA_WIDTH 1 /* LDO11_OFFENA */
+#define WM831X_LDO11_VSEL_SRC 0x0080 /* LDO11_VSEL_SRC */
+#define WM831X_LDO11_VSEL_SRC_MASK 0x0080 /* LDO11_VSEL_SRC */
+#define WM831X_LDO11_VSEL_SRC_SHIFT 7 /* LDO11_VSEL_SRC */
+#define WM831X_LDO11_VSEL_SRC_WIDTH 1 /* LDO11_VSEL_SRC */
+#define WM831X_LDO11_ON_VSEL_MASK 0x000F /* LDO11_ON_VSEL - [3:0] */
+#define WM831X_LDO11_ON_VSEL_SHIFT 0 /* LDO11_ON_VSEL - [3:0] */
+#define WM831X_LDO11_ON_VSEL_WIDTH 4 /* LDO11_ON_VSEL - [3:0] */
+
+/*
+ * R16520 (0x4088) - LDO11 SLEEP Control
+ */
+#define WM831X_LDO11_SLP_SLOT_MASK 0xE000 /* LDO11_SLP_SLOT - [15:13] */
+#define WM831X_LDO11_SLP_SLOT_SHIFT 13 /* LDO11_SLP_SLOT - [15:13] */
+#define WM831X_LDO11_SLP_SLOT_WIDTH 3 /* LDO11_SLP_SLOT - [15:13] */
+#define WM831X_LDO11_SLP_VSEL_MASK 0x000F /* LDO11_SLP_VSEL - [3:0] */
+#define WM831X_LDO11_SLP_VSEL_SHIFT 0 /* LDO11_SLP_VSEL - [3:0] */
+#define WM831X_LDO11_SLP_VSEL_WIDTH 4 /* LDO11_SLP_VSEL - [3:0] */
+
+/*
+ * R16526 (0x408E) - Power Good Source 1
+ */
+#define WM831X_DC4_OK 0x0008 /* DC4_OK */
+#define WM831X_DC4_OK_MASK 0x0008 /* DC4_OK */
+#define WM831X_DC4_OK_SHIFT 3 /* DC4_OK */
+#define WM831X_DC4_OK_WIDTH 1 /* DC4_OK */
+#define WM831X_DC3_OK 0x0004 /* DC3_OK */
+#define WM831X_DC3_OK_MASK 0x0004 /* DC3_OK */
+#define WM831X_DC3_OK_SHIFT 2 /* DC3_OK */
+#define WM831X_DC3_OK_WIDTH 1 /* DC3_OK */
+#define WM831X_DC2_OK 0x0002 /* DC2_OK */
+#define WM831X_DC2_OK_MASK 0x0002 /* DC2_OK */
+#define WM831X_DC2_OK_SHIFT 1 /* DC2_OK */
+#define WM831X_DC2_OK_WIDTH 1 /* DC2_OK */
+#define WM831X_DC1_OK 0x0001 /* DC1_OK */
+#define WM831X_DC1_OK_MASK 0x0001 /* DC1_OK */
+#define WM831X_DC1_OK_SHIFT 0 /* DC1_OK */
+#define WM831X_DC1_OK_WIDTH 1 /* DC1_OK */
+
+/*
+ * R16527 (0x408F) - Power Good Source 2
+ */
+#define WM831X_LDO10_OK 0x0200 /* LDO10_OK */
+#define WM831X_LDO10_OK_MASK 0x0200 /* LDO10_OK */
+#define WM831X_LDO10_OK_SHIFT 9 /* LDO10_OK */
+#define WM831X_LDO10_OK_WIDTH 1 /* LDO10_OK */
+#define WM831X_LDO9_OK 0x0100 /* LDO9_OK */
+#define WM831X_LDO9_OK_MASK 0x0100 /* LDO9_OK */
+#define WM831X_LDO9_OK_SHIFT 8 /* LDO9_OK */
+#define WM831X_LDO9_OK_WIDTH 1 /* LDO9_OK */
+#define WM831X_LDO8_OK 0x0080 /* LDO8_OK */
+#define WM831X_LDO8_OK_MASK 0x0080 /* LDO8_OK */
+#define WM831X_LDO8_OK_SHIFT 7 /* LDO8_OK */
+#define WM831X_LDO8_OK_WIDTH 1 /* LDO8_OK */
+#define WM831X_LDO7_OK 0x0040 /* LDO7_OK */
+#define WM831X_LDO7_OK_MASK 0x0040 /* LDO7_OK */
+#define WM831X_LDO7_OK_SHIFT 6 /* LDO7_OK */
+#define WM831X_LDO7_OK_WIDTH 1 /* LDO7_OK */
+#define WM831X_LDO6_OK 0x0020 /* LDO6_OK */
+#define WM831X_LDO6_OK_MASK 0x0020 /* LDO6_OK */
+#define WM831X_LDO6_OK_SHIFT 5 /* LDO6_OK */
+#define WM831X_LDO6_OK_WIDTH 1 /* LDO6_OK */
+#define WM831X_LDO5_OK 0x0010 /* LDO5_OK */
+#define WM831X_LDO5_OK_MASK 0x0010 /* LDO5_OK */
+#define WM831X_LDO5_OK_SHIFT 4 /* LDO5_OK */
+#define WM831X_LDO5_OK_WIDTH 1 /* LDO5_OK */
+#define WM831X_LDO4_OK 0x0008 /* LDO4_OK */
+#define WM831X_LDO4_OK_MASK 0x0008 /* LDO4_OK */
+#define WM831X_LDO4_OK_SHIFT 3 /* LDO4_OK */
+#define WM831X_LDO4_OK_WIDTH 1 /* LDO4_OK */
+#define WM831X_LDO3_OK 0x0004 /* LDO3_OK */
+#define WM831X_LDO3_OK_MASK 0x0004 /* LDO3_OK */
+#define WM831X_LDO3_OK_SHIFT 2 /* LDO3_OK */
+#define WM831X_LDO3_OK_WIDTH 1 /* LDO3_OK */
+#define WM831X_LDO2_OK 0x0002 /* LDO2_OK */
+#define WM831X_LDO2_OK_MASK 0x0002 /* LDO2_OK */
+#define WM831X_LDO2_OK_SHIFT 1 /* LDO2_OK */
+#define WM831X_LDO2_OK_WIDTH 1 /* LDO2_OK */
+#define WM831X_LDO1_OK 0x0001 /* LDO1_OK */
+#define WM831X_LDO1_OK_MASK 0x0001 /* LDO1_OK */
+#define WM831X_LDO1_OK_SHIFT 0 /* LDO1_OK */
+#define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */
+
+#define WM831X_ISINK_MAX_ISEL 55
+extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1];
+
+#endif
diff --git a/include/linux/mfd/wm831x/status.h b/include/linux/mfd/wm831x/status.h
new file mode 100644
index 000000000..6bc090d0e
--- /dev/null
+++ b/include/linux/mfd/wm831x/status.h
@@ -0,0 +1,34 @@
+/*
+ * include/linux/mfd/wm831x/status.h -- Status LEDs for WM831x
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_WM831X_STATUS_H__
+#define __MFD_WM831X_STATUS_H__
+
+#define WM831X_LED_SRC_MASK 0xC000 /* LED_SRC - [15:14] */
+#define WM831X_LED_SRC_SHIFT 14 /* LED_SRC - [15:14] */
+#define WM831X_LED_SRC_WIDTH 2 /* LED_SRC - [15:14] */
+#define WM831X_LED_MODE_MASK 0x0300 /* LED_MODE - [9:8] */
+#define WM831X_LED_MODE_SHIFT 8 /* LED_MODE - [9:8] */
+#define WM831X_LED_MODE_WIDTH 2 /* LED_MODE - [9:8] */
+#define WM831X_LED_SEQ_LEN_MASK 0x0030 /* LED_SEQ_LEN - [5:4] */
+#define WM831X_LED_SEQ_LEN_SHIFT 4 /* LED_SEQ_LEN - [5:4] */
+#define WM831X_LED_SEQ_LEN_WIDTH 2 /* LED_SEQ_LEN - [5:4] */
+#define WM831X_LED_DUR_MASK 0x000C /* LED_DUR - [3:2] */
+#define WM831X_LED_DUR_SHIFT 2 /* LED_DUR - [3:2] */
+#define WM831X_LED_DUR_WIDTH 2 /* LED_DUR - [3:2] */
+#define WM831X_LED_DUTY_CYC_MASK 0x0003 /* LED_DUTY_CYC - [1:0] */
+#define WM831X_LED_DUTY_CYC_SHIFT 0 /* LED_DUTY_CYC - [1:0] */
+#define WM831X_LED_DUTY_CYC_WIDTH 2 /* LED_DUTY_CYC - [1:0] */
+
+#endif
diff --git a/include/linux/mfd/wm831x/watchdog.h b/include/linux/mfd/wm831x/watchdog.h
new file mode 100644
index 000000000..97a99b529
--- /dev/null
+++ b/include/linux/mfd/wm831x/watchdog.h
@@ -0,0 +1,52 @@
+/*
+ * include/linux/mfd/wm831x/watchdog.h -- Watchdog for WM831x
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_WM831X_WATCHDOG_H__
+#define __MFD_WM831X_WATCHDOG_H__
+
+
+/*
+ * R16388 (0x4004) - Watchdog
+ */
+#define WM831X_WDOG_ENA 0x8000 /* WDOG_ENA */
+#define WM831X_WDOG_ENA_MASK 0x8000 /* WDOG_ENA */
+#define WM831X_WDOG_ENA_SHIFT 15 /* WDOG_ENA */
+#define WM831X_WDOG_ENA_WIDTH 1 /* WDOG_ENA */
+#define WM831X_WDOG_DEBUG 0x4000 /* WDOG_DEBUG */
+#define WM831X_WDOG_DEBUG_MASK 0x4000 /* WDOG_DEBUG */
+#define WM831X_WDOG_DEBUG_SHIFT 14 /* WDOG_DEBUG */
+#define WM831X_WDOG_DEBUG_WIDTH 1 /* WDOG_DEBUG */
+#define WM831X_WDOG_RST_SRC 0x2000 /* WDOG_RST_SRC */
+#define WM831X_WDOG_RST_SRC_MASK 0x2000 /* WDOG_RST_SRC */
+#define WM831X_WDOG_RST_SRC_SHIFT 13 /* WDOG_RST_SRC */
+#define WM831X_WDOG_RST_SRC_WIDTH 1 /* WDOG_RST_SRC */
+#define WM831X_WDOG_SLPENA 0x1000 /* WDOG_SLPENA */
+#define WM831X_WDOG_SLPENA_MASK 0x1000 /* WDOG_SLPENA */
+#define WM831X_WDOG_SLPENA_SHIFT 12 /* WDOG_SLPENA */
+#define WM831X_WDOG_SLPENA_WIDTH 1 /* WDOG_SLPENA */
+#define WM831X_WDOG_RESET 0x0800 /* WDOG_RESET */
+#define WM831X_WDOG_RESET_MASK 0x0800 /* WDOG_RESET */
+#define WM831X_WDOG_RESET_SHIFT 11 /* WDOG_RESET */
+#define WM831X_WDOG_RESET_WIDTH 1 /* WDOG_RESET */
+#define WM831X_WDOG_SECACT_MASK 0x0300 /* WDOG_SECACT - [9:8] */
+#define WM831X_WDOG_SECACT_SHIFT 8 /* WDOG_SECACT - [9:8] */
+#define WM831X_WDOG_SECACT_WIDTH 2 /* WDOG_SECACT - [9:8] */
+#define WM831X_WDOG_PRIMACT_MASK 0x0030 /* WDOG_PRIMACT - [5:4] */
+#define WM831X_WDOG_PRIMACT_SHIFT 4 /* WDOG_PRIMACT - [5:4] */
+#define WM831X_WDOG_PRIMACT_WIDTH 2 /* WDOG_PRIMACT - [5:4] */
+#define WM831X_WDOG_TO_MASK 0x0007 /* WDOG_TO - [2:0] */
+#define WM831X_WDOG_TO_SHIFT 0 /* WDOG_TO - [2:0] */
+#define WM831X_WDOG_TO_WIDTH 3 /* WDOG_TO - [2:0] */
+
+#endif
diff --git a/include/linux/mfd/wm8350/audio.h b/include/linux/mfd/wm8350/audio.h
new file mode 100644
index 000000000..bd581c6fa
--- /dev/null
+++ b/include/linux/mfd/wm8350/audio.h
@@ -0,0 +1,628 @@
+/*
+ * audio.h -- Audio Driver for Wolfson WM8350 PMIC
+ *
+ * Copyright 2007, 2008 Wolfson Microelectronics PLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_WM8350_AUDIO_H_
+#define __LINUX_MFD_WM8350_AUDIO_H_
+
+#include <linux/platform_device.h>
+
+#define WM8350_CLOCK_CONTROL_1 0x28
+#define WM8350_CLOCK_CONTROL_2 0x29
+#define WM8350_FLL_CONTROL_1 0x2A
+#define WM8350_FLL_CONTROL_2 0x2B
+#define WM8350_FLL_CONTROL_3 0x2C
+#define WM8350_FLL_CONTROL_4 0x2D
+#define WM8350_DAC_CONTROL 0x30
+#define WM8350_DAC_DIGITAL_VOLUME_L 0x32
+#define WM8350_DAC_DIGITAL_VOLUME_R 0x33
+#define WM8350_DAC_LR_RATE 0x35
+#define WM8350_DAC_CLOCK_CONTROL 0x36
+#define WM8350_DAC_MUTE 0x3A
+#define WM8350_DAC_MUTE_VOLUME 0x3B
+#define WM8350_DAC_SIDE 0x3C
+#define WM8350_ADC_CONTROL 0x40
+#define WM8350_ADC_DIGITAL_VOLUME_L 0x42
+#define WM8350_ADC_DIGITAL_VOLUME_R 0x43
+#define WM8350_ADC_DIVIDER 0x44
+#define WM8350_ADC_LR_RATE 0x46
+#define WM8350_INPUT_CONTROL 0x48
+#define WM8350_IN3_INPUT_CONTROL 0x49
+#define WM8350_MIC_BIAS_CONTROL 0x4A
+#define WM8350_OUTPUT_CONTROL 0x4C
+#define WM8350_JACK_DETECT 0x4D
+#define WM8350_ANTI_POP_CONTROL 0x4E
+#define WM8350_LEFT_INPUT_VOLUME 0x50
+#define WM8350_RIGHT_INPUT_VOLUME 0x51
+#define WM8350_LEFT_MIXER_CONTROL 0x58
+#define WM8350_RIGHT_MIXER_CONTROL 0x59
+#define WM8350_OUT3_MIXER_CONTROL 0x5C
+#define WM8350_OUT4_MIXER_CONTROL 0x5D
+#define WM8350_OUTPUT_LEFT_MIXER_VOLUME 0x60
+#define WM8350_OUTPUT_RIGHT_MIXER_VOLUME 0x61
+#define WM8350_INPUT_MIXER_VOLUME_L 0x62
+#define WM8350_INPUT_MIXER_VOLUME_R 0x63
+#define WM8350_INPUT_MIXER_VOLUME 0x64
+#define WM8350_LOUT1_VOLUME 0x68
+#define WM8350_ROUT1_VOLUME 0x69
+#define WM8350_LOUT2_VOLUME 0x6A
+#define WM8350_ROUT2_VOLUME 0x6B
+#define WM8350_BEEP_VOLUME 0x6F
+#define WM8350_AI_FORMATING 0x70
+#define WM8350_ADC_DAC_COMP 0x71
+#define WM8350_AI_ADC_CONTROL 0x72
+#define WM8350_AI_DAC_CONTROL 0x73
+#define WM8350_AIF_TEST 0x74
+#define WM8350_JACK_PIN_STATUS 0xE7
+
+/* Bit values for R08 (0x08) */
+#define WM8350_CODEC_ISEL_1_5 0 /* x1.5 */
+#define WM8350_CODEC_ISEL_1_0 1 /* x1.0 */
+#define WM8350_CODEC_ISEL_0_75 2 /* x0.75 */
+#define WM8350_CODEC_ISEL_0_5 3 /* x0.5 */
+
+#define WM8350_VMID_OFF 0
+#define WM8350_VMID_300K 1
+#define WM8350_VMID_50K 2
+#define WM8350_VMID_5K 3
+
+/*
+ * R40 (0x28) - Clock Control 1
+ */
+#define WM8350_TOCLK_RATE 0x4000
+#define WM8350_MCLK_SEL 0x0800
+#define WM8350_MCLK_DIV_MASK 0x0100
+#define WM8350_BCLK_DIV_MASK 0x00F0
+#define WM8350_OPCLK_DIV_MASK 0x0007
+
+/*
+ * R41 (0x29) - Clock Control 2
+ */
+#define WM8350_LRC_ADC_SEL 0x8000
+#define WM8350_MCLK_DIR 0x0001
+
+/*
+ * R42 (0x2A) - FLL Control 1
+ */
+#define WM8350_FLL_DITHER_WIDTH_MASK 0x3000
+#define WM8350_FLL_DITHER_HP 0x0800
+#define WM8350_FLL_OUTDIV_MASK 0x0700
+#define WM8350_FLL_RSP_RATE_MASK 0x00F0
+#define WM8350_FLL_RATE_MASK 0x0007
+
+/*
+ * R43 (0x2B) - FLL Control 2
+ */
+#define WM8350_FLL_RATIO_MASK 0xF800
+#define WM8350_FLL_N_MASK 0x03FF
+
+/*
+ * R44 (0x2C) - FLL Control 3
+ */
+#define WM8350_FLL_K_MASK 0xFFFF
+
+/*
+ * R45 (0x2D) - FLL Control 4
+ */
+#define WM8350_FLL_FRAC 0x0020
+#define WM8350_FLL_SLOW_LOCK_REF 0x0010
+#define WM8350_FLL_CLK_SRC_MASK 0x0003
+
+/*
+ * R48 (0x30) - DAC Control
+ */
+#define WM8350_DAC_MONO 0x2000
+#define WM8350_AIF_LRCLKRATE 0x1000
+#define WM8350_DEEMP_MASK 0x0030
+#define WM8350_DACL_DATINV 0x0002
+#define WM8350_DACR_DATINV 0x0001
+
+/*
+ * R50 (0x32) - DAC Digital Volume L
+ */
+#define WM8350_DAC_VU 0x0100
+#define WM8350_DACL_VOL_MASK 0x00FF
+
+/*
+ * R51 (0x33) - DAC Digital Volume R
+ */
+#define WM8350_DAC_VU 0x0100
+#define WM8350_DACR_VOL_MASK 0x00FF
+
+/*
+ * R53 (0x35) - DAC LR Rate
+ */
+#define WM8350_DACLRC_ENA 0x0800
+#define WM8350_DACLRC_RATE_MASK 0x07FF
+
+/*
+ * R54 (0x36) - DAC Clock Control
+ */
+#define WM8350_DACCLK_POL 0x0010
+#define WM8350_DAC_CLKDIV_MASK 0x0007
+
+/*
+ * R58 (0x3A) - DAC Mute
+ */
+#define WM8350_DAC_MUTE_ENA 0x4000
+
+/*
+ * R59 (0x3B) - DAC Mute Volume
+ */
+#define WM8350_DAC_MUTEMODE 0x4000
+#define WM8350_DAC_MUTERATE 0x2000
+#define WM8350_DAC_SB_FILT 0x1000
+
+/*
+ * R60 (0x3C) - DAC Side
+ */
+#define WM8350_ADC_TO_DACL_MASK 0x3000
+#define WM8350_ADC_TO_DACR_MASK 0x0C00
+
+/*
+ * R64 (0x40) - ADC Control
+ */
+#define WM8350_ADC_HPF_CUT_MASK 0x0300
+#define WM8350_ADCL_DATINV 0x0002
+#define WM8350_ADCR_DATINV 0x0001
+
+/*
+ * R66 (0x42) - ADC Digital Volume L
+ */
+#define WM8350_ADC_VU 0x0100
+#define WM8350_ADCL_VOL_MASK 0x00FF
+
+/*
+ * R67 (0x43) - ADC Digital Volume R
+ */
+#define WM8350_ADC_VU 0x0100
+#define WM8350_ADCR_VOL_MASK 0x00FF
+
+/*
+ * R68 (0x44) - ADC Divider
+ */
+#define WM8350_ADCL_DAC_SVOL_MASK 0x0F00
+#define WM8350_ADCR_DAC_SVOL_MASK 0x00F0
+#define WM8350_ADCCLK_POL 0x0008
+#define WM8350_ADC_CLKDIV_MASK 0x0007
+
+/*
+ * R70 (0x46) - ADC LR Rate
+ */
+#define WM8350_ADCLRC_ENA 0x0800
+#define WM8350_ADCLRC_RATE_MASK 0x07FF
+
+/*
+ * R72 (0x48) - Input Control
+ */
+#define WM8350_IN2R_ENA 0x0400
+#define WM8350_IN1RN_ENA 0x0200
+#define WM8350_IN1RP_ENA 0x0100
+#define WM8350_IN2L_ENA 0x0004
+#define WM8350_IN1LN_ENA 0x0002
+#define WM8350_IN1LP_ENA 0x0001
+
+/*
+ * R73 (0x49) - IN3 Input Control
+ */
+#define WM8350_IN3R_SHORT 0x4000
+#define WM8350_IN3L_SHORT 0x0040
+
+/*
+ * R74 (0x4A) - Mic Bias Control
+ */
+#define WM8350_MICBSEL 0x4000
+#define WM8350_MCDTHR_MASK 0x001C
+#define WM8350_MCDSCTHR_MASK 0x0003
+
+/*
+ * R76 (0x4C) - Output Control
+ */
+#define WM8350_OUT4_VROI 0x0800
+#define WM8350_OUT3_VROI 0x0400
+#define WM8350_OUT2_VROI 0x0200
+#define WM8350_OUT1_VROI 0x0100
+#define WM8350_OUT2_FB 0x0004
+#define WM8350_OUT1_FB 0x0001
+
+/*
+ * R77 (0x4D) - Jack Detect
+ */
+#define WM8350_JDL_ENA 0x8000
+#define WM8350_JDR_ENA 0x4000
+
+/*
+ * R78 (0x4E) - Anti Pop Control
+ */
+#define WM8350_ANTI_POP_MASK 0x0300
+#define WM8350_DIS_OP_LN4_MASK 0x00C0
+#define WM8350_DIS_OP_LN3_MASK 0x0030
+#define WM8350_DIS_OP_OUT2_MASK 0x000C
+#define WM8350_DIS_OP_OUT1_MASK 0x0003
+
+/*
+ * R80 (0x50) - Left Input Volume
+ */
+#define WM8350_INL_MUTE 0x4000
+#define WM8350_INL_ZC 0x2000
+#define WM8350_IN_VU 0x0100
+#define WM8350_INL_VOL_MASK 0x00FC
+
+/*
+ * R81 (0x51) - Right Input Volume
+ */
+#define WM8350_INR_MUTE 0x4000
+#define WM8350_INR_ZC 0x2000
+#define WM8350_IN_VU 0x0100
+#define WM8350_INR_VOL_MASK 0x00FC
+
+/*
+ * R88 (0x58) - Left Mixer Control
+ */
+#define WM8350_DACR_TO_MIXOUTL 0x1000
+#define WM8350_DACL_TO_MIXOUTL 0x0800
+#define WM8350_IN3L_TO_MIXOUTL 0x0004
+#define WM8350_INR_TO_MIXOUTL 0x0002
+#define WM8350_INL_TO_MIXOUTL 0x0001
+
+/*
+ * R89 (0x59) - Right Mixer Control
+ */
+#define WM8350_DACR_TO_MIXOUTR 0x1000
+#define WM8350_DACL_TO_MIXOUTR 0x0800
+#define WM8350_IN3R_TO_MIXOUTR 0x0008
+#define WM8350_INR_TO_MIXOUTR 0x0002
+#define WM8350_INL_TO_MIXOUTR 0x0001
+
+/*
+ * R92 (0x5C) - OUT3 Mixer Control
+ */
+#define WM8350_DACL_TO_OUT3 0x0800
+#define WM8350_MIXINL_TO_OUT3 0x0100
+#define WM8350_OUT4_TO_OUT3 0x0008
+#define WM8350_MIXOUTL_TO_OUT3 0x0001
+
+/*
+ * R93 (0x5D) - OUT4 Mixer Control
+ */
+#define WM8350_DACR_TO_OUT4 0x1000
+#define WM8350_DACL_TO_OUT4 0x0800
+#define WM8350_OUT4_ATTN 0x0400
+#define WM8350_MIXINR_TO_OUT4 0x0200
+#define WM8350_OUT3_TO_OUT4 0x0004
+#define WM8350_MIXOUTR_TO_OUT4 0x0002
+#define WM8350_MIXOUTL_TO_OUT4 0x0001
+
+/*
+ * R96 (0x60) - Output Left Mixer Volume
+ */
+#define WM8350_IN3L_MIXOUTL_VOL_MASK 0x0E00
+#define WM8350_IN3L_MIXOUTL_VOL_SHIFT 9
+#define WM8350_INR_MIXOUTL_VOL_MASK 0x00E0
+#define WM8350_INR_MIXOUTL_VOL_SHIFT 5
+#define WM8350_INL_MIXOUTL_VOL_MASK 0x000E
+#define WM8350_INL_MIXOUTL_VOL_SHIFT 1
+
+/* Bit values for R96 (0x60) */
+#define WM8350_IN3L_MIXOUTL_VOL_OFF 0
+#define WM8350_IN3L_MIXOUTL_VOL_M12DB 1
+#define WM8350_IN3L_MIXOUTL_VOL_M9DB 2
+#define WM8350_IN3L_MIXOUTL_VOL_M6DB 3
+#define WM8350_IN3L_MIXOUTL_VOL_M3DB 4
+#define WM8350_IN3L_MIXOUTL_VOL_0DB 5
+#define WM8350_IN3L_MIXOUTL_VOL_3DB 6
+#define WM8350_IN3L_MIXOUTL_VOL_6DB 7
+
+#define WM8350_INR_MIXOUTL_VOL_OFF 0
+#define WM8350_INR_MIXOUTL_VOL_M12DB 1
+#define WM8350_INR_MIXOUTL_VOL_M9DB 2
+#define WM8350_INR_MIXOUTL_VOL_M6DB 3
+#define WM8350_INR_MIXOUTL_VOL_M3DB 4
+#define WM8350_INR_MIXOUTL_VOL_0DB 5
+#define WM8350_INR_MIXOUTL_VOL_3DB 6
+#define WM8350_INR_MIXOUTL_VOL_6DB 7
+
+#define WM8350_INL_MIXOUTL_VOL_OFF 0
+#define WM8350_INL_MIXOUTL_VOL_M12DB 1
+#define WM8350_INL_MIXOUTL_VOL_M9DB 2
+#define WM8350_INL_MIXOUTL_VOL_M6DB 3
+#define WM8350_INL_MIXOUTL_VOL_M3DB 4
+#define WM8350_INL_MIXOUTL_VOL_0DB 5
+#define WM8350_INL_MIXOUTL_VOL_3DB 6
+#define WM8350_INL_MIXOUTL_VOL_6DB 7
+
+/*
+ * R97 (0x61) - Output Right Mixer Volume
+ */
+#define WM8350_IN3R_MIXOUTR_VOL_MASK 0xE000
+#define WM8350_IN3R_MIXOUTR_VOL_SHIFT 13
+#define WM8350_INR_MIXOUTR_VOL_MASK 0x00E0
+#define WM8350_INR_MIXOUTR_VOL_SHIFT 5
+#define WM8350_INL_MIXOUTR_VOL_MASK 0x000E
+#define WM8350_INL_MIXOUTR_VOL_SHIFT 1
+
+/* Bit values for R96 (0x60) */
+#define WM8350_IN3R_MIXOUTR_VOL_OFF 0
+#define WM8350_IN3R_MIXOUTR_VOL_M12DB 1
+#define WM8350_IN3R_MIXOUTR_VOL_M9DB 2
+#define WM8350_IN3R_MIXOUTR_VOL_M6DB 3
+#define WM8350_IN3R_MIXOUTR_VOL_M3DB 4
+#define WM8350_IN3R_MIXOUTR_VOL_0DB 5
+#define WM8350_IN3R_MIXOUTR_VOL_3DB 6
+#define WM8350_IN3R_MIXOUTR_VOL_6DB 7
+
+#define WM8350_INR_MIXOUTR_VOL_OFF 0
+#define WM8350_INR_MIXOUTR_VOL_M12DB 1
+#define WM8350_INR_MIXOUTR_VOL_M9DB 2
+#define WM8350_INR_MIXOUTR_VOL_M6DB 3
+#define WM8350_INR_MIXOUTR_VOL_M3DB 4
+#define WM8350_INR_MIXOUTR_VOL_0DB 5
+#define WM8350_INR_MIXOUTR_VOL_3DB 6
+#define WM8350_INR_MIXOUTR_VOL_6DB 7
+
+#define WM8350_INL_MIXOUTR_VOL_OFF 0
+#define WM8350_INL_MIXOUTR_VOL_M12DB 1
+#define WM8350_INL_MIXOUTR_VOL_M9DB 2
+#define WM8350_INL_MIXOUTR_VOL_M6DB 3
+#define WM8350_INL_MIXOUTR_VOL_M3DB 4
+#define WM8350_INL_MIXOUTR_VOL_0DB 5
+#define WM8350_INL_MIXOUTR_VOL_3DB 6
+#define WM8350_INL_MIXOUTR_VOL_6DB 7
+
+/*
+ * R98 (0x62) - Input Mixer Volume L
+ */
+#define WM8350_IN3L_MIXINL_VOL_MASK 0x0E00
+#define WM8350_IN2L_MIXINL_VOL_MASK 0x000E
+#define WM8350_INL_MIXINL_VOL 0x0001
+
+/*
+ * R99 (0x63) - Input Mixer Volume R
+ */
+#define WM8350_IN3R_MIXINR_VOL_MASK 0xE000
+#define WM8350_IN2R_MIXINR_VOL_MASK 0x00E0
+#define WM8350_INR_MIXINR_VOL 0x0001
+
+/*
+ * R100 (0x64) - Input Mixer Volume
+ */
+#define WM8350_OUT4_MIXIN_DST 0x8000
+#define WM8350_OUT4_MIXIN_VOL_MASK 0x000E
+
+/*
+ * R104 (0x68) - LOUT1 Volume
+ */
+#define WM8350_OUT1L_MUTE 0x4000
+#define WM8350_OUT1L_ZC 0x2000
+#define WM8350_OUT1_VU 0x0100
+#define WM8350_OUT1L_VOL_MASK 0x00FC
+#define WM8350_OUT1L_VOL_SHIFT 2
+
+/*
+ * R105 (0x69) - ROUT1 Volume
+ */
+#define WM8350_OUT1R_MUTE 0x4000
+#define WM8350_OUT1R_ZC 0x2000
+#define WM8350_OUT1_VU 0x0100
+#define WM8350_OUT1R_VOL_MASK 0x00FC
+#define WM8350_OUT1R_VOL_SHIFT 2
+
+/*
+ * R106 (0x6A) - LOUT2 Volume
+ */
+#define WM8350_OUT2L_MUTE 0x4000
+#define WM8350_OUT2L_ZC 0x2000
+#define WM8350_OUT2_VU 0x0100
+#define WM8350_OUT2L_VOL_MASK 0x00FC
+
+/*
+ * R107 (0x6B) - ROUT2 Volume
+ */
+#define WM8350_OUT2R_MUTE 0x4000
+#define WM8350_OUT2R_ZC 0x2000
+#define WM8350_OUT2R_INV 0x0400
+#define WM8350_OUT2R_INV_MUTE 0x0200
+#define WM8350_OUT2_VU 0x0100
+#define WM8350_OUT2R_VOL_MASK 0x00FC
+
+/*
+ * R111 (0x6F) - BEEP Volume
+ */
+#define WM8350_IN3R_OUT2R_VOL_MASK 0x00E0
+
+/*
+ * R112 (0x70) - AI Formating
+ */
+#define WM8350_AIF_BCLK_INV 0x8000
+#define WM8350_AIF_TRI 0x2000
+#define WM8350_AIF_LRCLK_INV 0x1000
+#define WM8350_AIF_WL_MASK 0x0C00
+#define WM8350_AIF_FMT_MASK 0x0300
+
+/*
+ * R113 (0x71) - ADC DAC COMP
+ */
+#define WM8350_DAC_COMP 0x0080
+#define WM8350_DAC_COMPMODE 0x0040
+#define WM8350_ADC_COMP 0x0020
+#define WM8350_ADC_COMPMODE 0x0010
+#define WM8350_LOOPBACK 0x0001
+
+/*
+ * R114 (0x72) - AI ADC Control
+ */
+#define WM8350_AIFADC_PD 0x0080
+#define WM8350_AIFADCL_SRC 0x0040
+#define WM8350_AIFADCR_SRC 0x0020
+#define WM8350_AIFADC_TDM_CHAN 0x0010
+#define WM8350_AIFADC_TDM 0x0008
+
+/*
+ * R115 (0x73) - AI DAC Control
+ */
+#define WM8350_BCLK_MSTR 0x4000
+#define WM8350_AIFDAC_PD 0x0080
+#define WM8350_DACL_SRC 0x0040
+#define WM8350_DACR_SRC 0x0020
+#define WM8350_AIFDAC_TDM_CHAN 0x0010
+#define WM8350_AIFDAC_TDM 0x0008
+#define WM8350_DAC_BOOST_MASK 0x0003
+
+/*
+ * R116 (0x74) - AIF Test
+ */
+#define WM8350_CODEC_BYP 0x4000
+#define WM8350_AIFADC_WR_TST 0x2000
+#define WM8350_AIFADC_RD_TST 0x1000
+#define WM8350_AIFDAC_WR_TST 0x0800
+#define WM8350_AIFDAC_RD_TST 0x0400
+#define WM8350_AIFADC_ASYN 0x0020
+#define WM8350_AIFDAC_ASYN 0x0010
+
+/*
+ * R231 (0xE7) - Jack Status
+ */
+#define WM8350_JACK_L_LVL 0x0800
+#define WM8350_JACK_R_LVL 0x0400
+#define WM8350_JACK_MICSCD_LVL 0x0200
+#define WM8350_JACK_MICSD_LVL 0x0100
+
+/*
+ * WM8350 Platform setup
+ */
+#define WM8350_S_CURVE_NONE 0x0
+#define WM8350_S_CURVE_FAST 0x1
+#define WM8350_S_CURVE_MEDIUM 0x2
+#define WM8350_S_CURVE_SLOW 0x3
+
+#define WM8350_DISCHARGE_OFF 0x0
+#define WM8350_DISCHARGE_FAST 0x1
+#define WM8350_DISCHARGE_MEDIUM 0x2
+#define WM8350_DISCHARGE_SLOW 0x3
+
+#define WM8350_TIE_OFF_500R 0x0
+#define WM8350_TIE_OFF_30K 0x1
+
+/*
+ * Clock sources & directions
+ */
+#define WM8350_SYSCLK 0
+
+#define WM8350_MCLK_SEL_PLL_MCLK 0
+#define WM8350_MCLK_SEL_PLL_DAC 1
+#define WM8350_MCLK_SEL_PLL_ADC 2
+#define WM8350_MCLK_SEL_PLL_32K 3
+#define WM8350_MCLK_SEL_MCLK 5
+
+/* clock divider id's */
+#define WM8350_ADC_CLKDIV 0
+#define WM8350_DAC_CLKDIV 1
+#define WM8350_BCLK_CLKDIV 2
+#define WM8350_OPCLK_CLKDIV 3
+#define WM8350_TO_CLKDIV 4
+#define WM8350_SYS_CLKDIV 5
+#define WM8350_DACLR_CLKDIV 6
+#define WM8350_ADCLR_CLKDIV 7
+
+/* ADC clock dividers */
+#define WM8350_ADCDIV_1 0x0
+#define WM8350_ADCDIV_1_5 0x1
+#define WM8350_ADCDIV_2 0x2
+#define WM8350_ADCDIV_3 0x3
+#define WM8350_ADCDIV_4 0x4
+#define WM8350_ADCDIV_5_5 0x5
+#define WM8350_ADCDIV_6 0x6
+
+/* ADC clock dividers */
+#define WM8350_DACDIV_1 0x0
+#define WM8350_DACDIV_1_5 0x1
+#define WM8350_DACDIV_2 0x2
+#define WM8350_DACDIV_3 0x3
+#define WM8350_DACDIV_4 0x4
+#define WM8350_DACDIV_5_5 0x5
+#define WM8350_DACDIV_6 0x6
+
+/* BCLK clock dividers */
+#define WM8350_BCLK_DIV_1 (0x0 << 4)
+#define WM8350_BCLK_DIV_1_5 (0x1 << 4)
+#define WM8350_BCLK_DIV_2 (0x2 << 4)
+#define WM8350_BCLK_DIV_3 (0x3 << 4)
+#define WM8350_BCLK_DIV_4 (0x4 << 4)
+#define WM8350_BCLK_DIV_5_5 (0x5 << 4)
+#define WM8350_BCLK_DIV_6 (0x6 << 4)
+#define WM8350_BCLK_DIV_8 (0x7 << 4)
+#define WM8350_BCLK_DIV_11 (0x8 << 4)
+#define WM8350_BCLK_DIV_12 (0x9 << 4)
+#define WM8350_BCLK_DIV_16 (0xa << 4)
+#define WM8350_BCLK_DIV_22 (0xb << 4)
+#define WM8350_BCLK_DIV_24 (0xc << 4)
+#define WM8350_BCLK_DIV_32 (0xd << 4)
+#define WM8350_BCLK_DIV_44 (0xe << 4)
+#define WM8350_BCLK_DIV_48 (0xf << 4)
+
+/* Sys (MCLK) clock dividers */
+#define WM8350_MCLK_DIV_1 (0x0 << 8)
+#define WM8350_MCLK_DIV_2 (0x1 << 8)
+
+/* OP clock dividers */
+#define WM8350_OPCLK_DIV_1 0x0
+#define WM8350_OPCLK_DIV_2 0x1
+#define WM8350_OPCLK_DIV_3 0x2
+#define WM8350_OPCLK_DIV_4 0x3
+#define WM8350_OPCLK_DIV_5_5 0x4
+#define WM8350_OPCLK_DIV_6 0x5
+
+/* DAI ID */
+#define WM8350_HIFI_DAI 0
+
+/*
+ * Audio interrupts.
+ */
+#define WM8350_IRQ_CODEC_JCK_DET_L 39
+#define WM8350_IRQ_CODEC_JCK_DET_R 40
+#define WM8350_IRQ_CODEC_MICSCD 41
+#define WM8350_IRQ_CODEC_MICD 42
+
+/*
+ * WM8350 Platform data.
+ *
+ * This must be initialised per platform for best audio performance.
+ * Please see WM8350 datasheet for information.
+ */
+struct wm8350_audio_platform_data {
+ int vmid_discharge_msecs; /* VMID --> OFF discharge time */
+ int drain_msecs; /* OFF drain time */
+ int cap_discharge_msecs; /* Cap ON (from OFF) discharge time */
+ int vmid_charge_msecs; /* vmid power up time */
+ u32 vmid_s_curve:2; /* vmid enable s curve speed */
+ u32 dis_out4:2; /* out4 discharge speed */
+ u32 dis_out3:2; /* out3 discharge speed */
+ u32 dis_out2:2; /* out2 discharge speed */
+ u32 dis_out1:2; /* out1 discharge speed */
+ u32 vroi_out4:1; /* out4 tie off */
+ u32 vroi_out3:1; /* out3 tie off */
+ u32 vroi_out2:1; /* out2 tie off */
+ u32 vroi_out1:1; /* out1 tie off */
+ u32 vroi_enable:1; /* enable tie off */
+ u32 codec_current_on:2; /* current level ON */
+ u32 codec_current_standby:2; /* current level STANDBY */
+ u32 codec_current_charge:2; /* codec current @ vmid charge */
+};
+
+struct snd_soc_codec;
+
+struct wm8350_codec {
+ struct platform_device *pdev;
+ struct snd_soc_codec *codec;
+ struct wm8350_audio_platform_data *platform_data;
+};
+
+#endif
diff --git a/include/linux/mfd/wm8350/comparator.h b/include/linux/mfd/wm8350/comparator.h
new file mode 100644
index 000000000..54bc5d0fd
--- /dev/null
+++ b/include/linux/mfd/wm8350/comparator.h
@@ -0,0 +1,175 @@
+/*
+ * comparator.h -- Comparator Aux ADC for Wolfson WM8350 PMIC
+ *
+ * Copyright 2007 Wolfson Microelectronics PLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_MFD_WM8350_COMPARATOR_H_
+#define __LINUX_MFD_WM8350_COMPARATOR_H_
+
+/*
+ * Registers
+ */
+
+#define WM8350_DIGITISER_CONTROL_1 0x90
+#define WM8350_DIGITISER_CONTROL_2 0x91
+#define WM8350_AUX1_READBACK 0x98
+#define WM8350_AUX2_READBACK 0x99
+#define WM8350_AUX3_READBACK 0x9A
+#define WM8350_AUX4_READBACK 0x9B
+#define WM8350_CHIP_TEMP_READBACK 0x9F
+#define WM8350_GENERIC_COMPARATOR_CONTROL 0xA3
+#define WM8350_GENERIC_COMPARATOR_1 0xA4
+#define WM8350_GENERIC_COMPARATOR_2 0xA5
+#define WM8350_GENERIC_COMPARATOR_3 0xA6
+#define WM8350_GENERIC_COMPARATOR_4 0xA7
+
+/*
+ * R144 (0x90) - Digitiser Control (1)
+ */
+#define WM8350_AUXADC_CTC 0x4000
+#define WM8350_AUXADC_POLL 0x2000
+#define WM8350_AUXADC_HIB_MODE 0x1000
+#define WM8350_AUXADC_SEL8 0x0080
+#define WM8350_AUXADC_SEL7 0x0040
+#define WM8350_AUXADC_SEL6 0x0020
+#define WM8350_AUXADC_SEL5 0x0010
+#define WM8350_AUXADC_SEL4 0x0008
+#define WM8350_AUXADC_SEL3 0x0004
+#define WM8350_AUXADC_SEL2 0x0002
+#define WM8350_AUXADC_SEL1 0x0001
+
+/*
+ * R145 (0x91) - Digitiser Control (2)
+ */
+#define WM8350_AUXADC_MASKMODE_MASK 0x3000
+#define WM8350_AUXADC_CRATE_MASK 0x0700
+#define WM8350_AUXADC_CAL 0x0004
+#define WM8350_AUX_RBMODE 0x0002
+#define WM8350_AUXADC_WAIT 0x0001
+
+/*
+ * R152 (0x98) - AUX1 Readback
+ */
+#define WM8350_AUXADC_SCALE1_MASK 0x6000
+#define WM8350_AUXADC_REF1 0x1000
+#define WM8350_AUXADC_DATA1_MASK 0x0FFF
+
+/*
+ * R153 (0x99) - AUX2 Readback
+ */
+#define WM8350_AUXADC_SCALE2_MASK 0x6000
+#define WM8350_AUXADC_REF2 0x1000
+#define WM8350_AUXADC_DATA2_MASK 0x0FFF
+
+/*
+ * R154 (0x9A) - AUX3 Readback
+ */
+#define WM8350_AUXADC_SCALE3_MASK 0x6000
+#define WM8350_AUXADC_REF3 0x1000
+#define WM8350_AUXADC_DATA3_MASK 0x0FFF
+
+/*
+ * R155 (0x9B) - AUX4 Readback
+ */
+#define WM8350_AUXADC_SCALE4_MASK 0x6000
+#define WM8350_AUXADC_REF4 0x1000
+#define WM8350_AUXADC_DATA4_MASK 0x0FFF
+
+/*
+ * R156 (0x9C) - USB Voltage Readback
+ */
+#define WM8350_AUXADC_DATA_USB_MASK 0x0FFF
+
+/*
+ * R157 (0x9D) - LINE Voltage Readback
+ */
+#define WM8350_AUXADC_DATA_LINE_MASK 0x0FFF
+
+/*
+ * R158 (0x9E) - BATT Voltage Readback
+ */
+#define WM8350_AUXADC_DATA_BATT_MASK 0x0FFF
+
+/*
+ * R159 (0x9F) - Chip Temp Readback
+ */
+#define WM8350_AUXADC_DATA_CHIPTEMP_MASK 0x0FFF
+
+/*
+ * R163 (0xA3) - Generic Comparator Control
+ */
+#define WM8350_DCMP4_ENA 0x0008
+#define WM8350_DCMP3_ENA 0x0004
+#define WM8350_DCMP2_ENA 0x0002
+#define WM8350_DCMP1_ENA 0x0001
+
+/*
+ * R164 (0xA4) - Generic comparator 1
+ */
+#define WM8350_DCMP1_SRCSEL_MASK 0xE000
+#define WM8350_DCMP1_GT 0x1000
+#define WM8350_DCMP1_THR_MASK 0x0FFF
+
+/*
+ * R165 (0xA5) - Generic comparator 2
+ */
+#define WM8350_DCMP2_SRCSEL_MASK 0xE000
+#define WM8350_DCMP2_GT 0x1000
+#define WM8350_DCMP2_THR_MASK 0x0FFF
+
+/*
+ * R166 (0xA6) - Generic comparator 3
+ */
+#define WM8350_DCMP3_SRCSEL_MASK 0xE000
+#define WM8350_DCMP3_GT 0x1000
+#define WM8350_DCMP3_THR_MASK 0x0FFF
+
+/*
+ * R167 (0xA7) - Generic comparator 4
+ */
+#define WM8350_DCMP4_SRCSEL_MASK 0xE000
+#define WM8350_DCMP4_GT 0x1000
+#define WM8350_DCMP4_THR_MASK 0x0FFF
+
+/*
+ * Interrupts.
+ */
+#define WM8350_IRQ_AUXADC_DATARDY 16
+#define WM8350_IRQ_AUXADC_DCOMP4 17
+#define WM8350_IRQ_AUXADC_DCOMP3 18
+#define WM8350_IRQ_AUXADC_DCOMP2 19
+#define WM8350_IRQ_AUXADC_DCOMP1 20
+#define WM8350_IRQ_SYS_HYST_COMP_FAIL 21
+#define WM8350_IRQ_SYS_CHIP_GT115 22
+#define WM8350_IRQ_SYS_CHIP_GT140 23
+
+/*
+ * USB/2, LINE & BATT = ((VRTC * 2) / 4095)) * 10e6 uV
+ * Where VRTC = 2.7 V
+ */
+#define WM8350_AUX_COEFF 1319
+
+#define WM8350_AUXADC_AUX1 0
+#define WM8350_AUXADC_AUX2 1
+#define WM8350_AUXADC_AUX3 2
+#define WM8350_AUXADC_AUX4 3
+#define WM8350_AUXADC_USB 4
+#define WM8350_AUXADC_LINE 5
+#define WM8350_AUXADC_BATT 6
+#define WM8350_AUXADC_TEMP 7
+
+struct wm8350;
+
+/*
+ * AUX ADC Readback
+ */
+int wm8350_read_auxadc(struct wm8350 *wm8350, int channel, int scale,
+ int vref);
+
+#endif
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
new file mode 100644
index 000000000..509481d9c
--- /dev/null
+++ b/include/linux/mfd/wm8350/core.h
@@ -0,0 +1,694 @@
+/*
+ * core.h -- Core Driver for Wolfson WM8350 PMIC
+ *
+ * Copyright 2007 Wolfson Microelectronics PLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_WM8350_CORE_H_
+#define __LINUX_MFD_WM8350_CORE_H_
+
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/wm8350/audio.h>
+#include <linux/mfd/wm8350/gpio.h>
+#include <linux/mfd/wm8350/pmic.h>
+#include <linux/mfd/wm8350/rtc.h>
+#include <linux/mfd/wm8350/supply.h>
+#include <linux/mfd/wm8350/wdt.h>
+
+/*
+ * Register values.
+ */
+#define WM8350_RESET_ID 0x00
+#define WM8350_ID 0x01
+#define WM8350_REVISION 0x02
+#define WM8350_SYSTEM_CONTROL_1 0x03
+#define WM8350_SYSTEM_CONTROL_2 0x04
+#define WM8350_SYSTEM_HIBERNATE 0x05
+#define WM8350_INTERFACE_CONTROL 0x06
+#define WM8350_POWER_MGMT_1 0x08
+#define WM8350_POWER_MGMT_2 0x09
+#define WM8350_POWER_MGMT_3 0x0A
+#define WM8350_POWER_MGMT_4 0x0B
+#define WM8350_POWER_MGMT_5 0x0C
+#define WM8350_POWER_MGMT_6 0x0D
+#define WM8350_POWER_MGMT_7 0x0E
+
+#define WM8350_SYSTEM_INTERRUPTS 0x18
+#define WM8350_INT_STATUS_1 0x19
+#define WM8350_INT_STATUS_2 0x1A
+#define WM8350_POWER_UP_INT_STATUS 0x1B
+#define WM8350_UNDER_VOLTAGE_INT_STATUS 0x1C
+#define WM8350_OVER_CURRENT_INT_STATUS 0x1D
+#define WM8350_GPIO_INT_STATUS 0x1E
+#define WM8350_COMPARATOR_INT_STATUS 0x1F
+#define WM8350_SYSTEM_INTERRUPTS_MASK 0x20
+#define WM8350_INT_STATUS_1_MASK 0x21
+#define WM8350_INT_STATUS_2_MASK 0x22
+#define WM8350_POWER_UP_INT_STATUS_MASK 0x23
+#define WM8350_UNDER_VOLTAGE_INT_STATUS_MASK 0x24
+#define WM8350_OVER_CURRENT_INT_STATUS_MASK 0x25
+#define WM8350_GPIO_INT_STATUS_MASK 0x26
+#define WM8350_COMPARATOR_INT_STATUS_MASK 0x27
+#define WM8350_CHARGER_OVERRIDES 0xE2
+#define WM8350_MISC_OVERRIDES 0xE3
+#define WM8350_COMPARATOR_OVERRIDES 0xE7
+#define WM8350_STATE_MACHINE_STATUS 0xE9
+
+#define WM8350_MAX_REGISTER 0xFF
+
+#define WM8350_UNLOCK_KEY 0x0013
+#define WM8350_LOCK_KEY 0x0000
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R0 (0x00) - Reset/ID
+ */
+#define WM8350_SW_RESET_CHIP_ID_MASK 0xFFFF
+
+/*
+ * R1 (0x01) - ID
+ */
+#define WM8350_CHIP_REV_MASK 0x7000
+#define WM8350_CONF_STS_MASK 0x0C00
+#define WM8350_CUST_ID_MASK 0x00FF
+
+/*
+ * R2 (0x02) - Revision
+ */
+#define WM8350_MASK_REV_MASK 0x00FF
+
+/*
+ * R3 (0x03) - System Control 1
+ */
+#define WM8350_CHIP_ON 0x8000
+#define WM8350_POWERCYCLE 0x2000
+#define WM8350_VCC_FAULT_OV 0x1000
+#define WM8350_REG_RSTB_TIME_MASK 0x0C00
+#define WM8350_BG_SLEEP 0x0200
+#define WM8350_MEM_VALID 0x0020
+#define WM8350_CHIP_SET_UP 0x0010
+#define WM8350_ON_DEB_T 0x0008
+#define WM8350_ON_POL 0x0002
+#define WM8350_IRQ_POL 0x0001
+
+/*
+ * R4 (0x04) - System Control 2
+ */
+#define WM8350_USB_SUSPEND_8MA 0x8000
+#define WM8350_USB_SUSPEND 0x4000
+#define WM8350_USB_MSTR 0x2000
+#define WM8350_USB_MSTR_SRC 0x1000
+#define WM8350_USB_500MA 0x0800
+#define WM8350_USB_NOLIM 0x0400
+
+/*
+ * R5 (0x05) - System Hibernate
+ */
+#define WM8350_HIBERNATE 0x8000
+#define WM8350_WDOG_HIB_MODE 0x0080
+#define WM8350_REG_HIB_STARTUP_SEQ 0x0040
+#define WM8350_REG_RESET_HIB_MODE 0x0020
+#define WM8350_RST_HIB_MODE 0x0010
+#define WM8350_IRQ_HIB_MODE 0x0008
+#define WM8350_MEMRST_HIB_MODE 0x0004
+#define WM8350_PCCOMP_HIB_MODE 0x0002
+#define WM8350_TEMPMON_HIB_MODE 0x0001
+
+/*
+ * R6 (0x06) - Interface Control
+ */
+#define WM8350_USE_DEV_PINS 0x8000
+#define WM8350_USE_DEV_PINS_MASK 0x8000
+#define WM8350_USE_DEV_PINS_SHIFT 15
+#define WM8350_DEV_ADDR_MASK 0x6000
+#define WM8350_DEV_ADDR_SHIFT 13
+#define WM8350_CONFIG_DONE 0x1000
+#define WM8350_CONFIG_DONE_MASK 0x1000
+#define WM8350_CONFIG_DONE_SHIFT 12
+#define WM8350_RECONFIG_AT_ON 0x0800
+#define WM8350_RECONFIG_AT_ON_MASK 0x0800
+#define WM8350_RECONFIG_AT_ON_SHIFT 11
+#define WM8350_AUTOINC 0x0200
+#define WM8350_AUTOINC_MASK 0x0200
+#define WM8350_AUTOINC_SHIFT 9
+#define WM8350_ARA 0x0100
+#define WM8350_ARA_MASK 0x0100
+#define WM8350_ARA_SHIFT 8
+#define WM8350_SPI_CFG 0x0008
+#define WM8350_SPI_CFG_MASK 0x0008
+#define WM8350_SPI_CFG_SHIFT 3
+#define WM8350_SPI_4WIRE 0x0004
+#define WM8350_SPI_4WIRE_MASK 0x0004
+#define WM8350_SPI_4WIRE_SHIFT 2
+#define WM8350_SPI_3WIRE 0x0002
+#define WM8350_SPI_3WIRE_MASK 0x0002
+#define WM8350_SPI_3WIRE_SHIFT 1
+
+/* Bit values for R06 (0x06) */
+#define WM8350_USE_DEV_PINS_PRIMARY 0
+#define WM8350_USE_DEV_PINS_DEV 1
+
+#define WM8350_DEV_ADDR_34 0
+#define WM8350_DEV_ADDR_36 1
+#define WM8350_DEV_ADDR_3C 2
+#define WM8350_DEV_ADDR_3E 3
+
+#define WM8350_CONFIG_DONE_OFF 0
+#define WM8350_CONFIG_DONE_DONE 1
+
+#define WM8350_RECONFIG_AT_ON_OFF 0
+#define WM8350_RECONFIG_AT_ON_ON 1
+
+#define WM8350_AUTOINC_OFF 0
+#define WM8350_AUTOINC_ON 1
+
+#define WM8350_ARA_OFF 0
+#define WM8350_ARA_ON 1
+
+#define WM8350_SPI_CFG_CMOS 0
+#define WM8350_SPI_CFG_OD 1
+
+#define WM8350_SPI_4WIRE_3WIRE 0
+#define WM8350_SPI_4WIRE_4WIRE 1
+
+#define WM8350_SPI_3WIRE_I2C 0
+#define WM8350_SPI_3WIRE_SPI 1
+
+/*
+ * R8 (0x08) - Power mgmt (1)
+ */
+#define WM8350_CODEC_ISEL_MASK 0xC000
+#define WM8350_VBUFEN 0x2000
+#define WM8350_OUTPUT_DRAIN_EN 0x0400
+#define WM8350_MIC_DET_ENA 0x0100
+#define WM8350_BIASEN 0x0020
+#define WM8350_MICBEN 0x0010
+#define WM8350_VMIDEN 0x0004
+#define WM8350_VMID_MASK 0x0003
+#define WM8350_VMID_SHIFT 0
+
+/*
+ * R9 (0x09) - Power mgmt (2)
+ */
+#define WM8350_IN3R_ENA 0x0800
+#define WM8350_IN3L_ENA 0x0400
+#define WM8350_INR_ENA 0x0200
+#define WM8350_INL_ENA 0x0100
+#define WM8350_MIXINR_ENA 0x0080
+#define WM8350_MIXINL_ENA 0x0040
+#define WM8350_OUT4_ENA 0x0020
+#define WM8350_OUT3_ENA 0x0010
+#define WM8350_MIXOUTR_ENA 0x0002
+#define WM8350_MIXOUTL_ENA 0x0001
+
+/*
+ * R10 (0x0A) - Power mgmt (3)
+ */
+#define WM8350_IN3R_TO_OUT2R 0x0080
+#define WM8350_OUT2R_ENA 0x0008
+#define WM8350_OUT2L_ENA 0x0004
+#define WM8350_OUT1R_ENA 0x0002
+#define WM8350_OUT1L_ENA 0x0001
+
+/*
+ * R11 (0x0B) - Power mgmt (4)
+ */
+#define WM8350_SYSCLK_ENA 0x4000
+#define WM8350_ADC_HPF_ENA 0x2000
+#define WM8350_FLL_ENA 0x0800
+#define WM8350_FLL_OSC_ENA 0x0400
+#define WM8350_TOCLK_ENA 0x0100
+#define WM8350_DACR_ENA 0x0020
+#define WM8350_DACL_ENA 0x0010
+#define WM8350_ADCR_ENA 0x0008
+#define WM8350_ADCL_ENA 0x0004
+
+/*
+ * R12 (0x0C) - Power mgmt (5)
+ */
+#define WM8350_CODEC_ENA 0x1000
+#define WM8350_RTC_TICK_ENA 0x0800
+#define WM8350_OSC32K_ENA 0x0400
+#define WM8350_CHG_ENA 0x0200
+#define WM8350_ACC_DET_ENA 0x0100
+#define WM8350_AUXADC_ENA 0x0080
+#define WM8350_DCMP4_ENA 0x0008
+#define WM8350_DCMP3_ENA 0x0004
+#define WM8350_DCMP2_ENA 0x0002
+#define WM8350_DCMP1_ENA 0x0001
+
+/*
+ * R13 (0x0D) - Power mgmt (6)
+ */
+#define WM8350_LS_ENA 0x8000
+#define WM8350_LDO4_ENA 0x0800
+#define WM8350_LDO3_ENA 0x0400
+#define WM8350_LDO2_ENA 0x0200
+#define WM8350_LDO1_ENA 0x0100
+#define WM8350_DC6_ENA 0x0020
+#define WM8350_DC5_ENA 0x0010
+#define WM8350_DC4_ENA 0x0008
+#define WM8350_DC3_ENA 0x0004
+#define WM8350_DC2_ENA 0x0002
+#define WM8350_DC1_ENA 0x0001
+
+/*
+ * R14 (0x0E) - Power mgmt (7)
+ */
+#define WM8350_CS2_ENA 0x0002
+#define WM8350_CS1_ENA 0x0001
+
+/*
+ * R24 (0x18) - System Interrupts
+ */
+#define WM8350_OC_INT 0x2000
+#define WM8350_UV_INT 0x1000
+#define WM8350_PUTO_INT 0x0800
+#define WM8350_CS_INT 0x0200
+#define WM8350_EXT_INT 0x0100
+#define WM8350_CODEC_INT 0x0080
+#define WM8350_GP_INT 0x0040
+#define WM8350_AUXADC_INT 0x0020
+#define WM8350_RTC_INT 0x0010
+#define WM8350_SYS_INT 0x0008
+#define WM8350_CHG_INT 0x0004
+#define WM8350_USB_INT 0x0002
+#define WM8350_WKUP_INT 0x0001
+
+/*
+ * R25 (0x19) - Interrupt Status 1
+ */
+#define WM8350_CHG_BAT_HOT_EINT 0x8000
+#define WM8350_CHG_BAT_COLD_EINT 0x4000
+#define WM8350_CHG_BAT_FAIL_EINT 0x2000
+#define WM8350_CHG_TO_EINT 0x1000
+#define WM8350_CHG_END_EINT 0x0800
+#define WM8350_CHG_START_EINT 0x0400
+#define WM8350_CHG_FAST_RDY_EINT 0x0200
+#define WM8350_RTC_PER_EINT 0x0080
+#define WM8350_RTC_SEC_EINT 0x0040
+#define WM8350_RTC_ALM_EINT 0x0020
+#define WM8350_CHG_VBATT_LT_3P9_EINT 0x0004
+#define WM8350_CHG_VBATT_LT_3P1_EINT 0x0002
+#define WM8350_CHG_VBATT_LT_2P85_EINT 0x0001
+
+/*
+ * R26 (0x1A) - Interrupt Status 2
+ */
+#define WM8350_CS1_EINT 0x2000
+#define WM8350_CS2_EINT 0x1000
+#define WM8350_USB_LIMIT_EINT 0x0400
+#define WM8350_AUXADC_DATARDY_EINT 0x0100
+#define WM8350_AUXADC_DCOMP4_EINT 0x0080
+#define WM8350_AUXADC_DCOMP3_EINT 0x0040
+#define WM8350_AUXADC_DCOMP2_EINT 0x0020
+#define WM8350_AUXADC_DCOMP1_EINT 0x0010
+#define WM8350_SYS_HYST_COMP_FAIL_EINT 0x0008
+#define WM8350_SYS_CHIP_GT115_EINT 0x0004
+#define WM8350_SYS_CHIP_GT140_EINT 0x0002
+#define WM8350_SYS_WDOG_TO_EINT 0x0001
+
+/*
+ * R27 (0x1B) - Power Up Interrupt Status
+ */
+#define WM8350_PUTO_LDO4_EINT 0x0800
+#define WM8350_PUTO_LDO3_EINT 0x0400
+#define WM8350_PUTO_LDO2_EINT 0x0200
+#define WM8350_PUTO_LDO1_EINT 0x0100
+#define WM8350_PUTO_DC6_EINT 0x0020
+#define WM8350_PUTO_DC5_EINT 0x0010
+#define WM8350_PUTO_DC4_EINT 0x0008
+#define WM8350_PUTO_DC3_EINT 0x0004
+#define WM8350_PUTO_DC2_EINT 0x0002
+#define WM8350_PUTO_DC1_EINT 0x0001
+
+/*
+ * R28 (0x1C) - Under Voltage Interrupt status
+ */
+#define WM8350_UV_LDO4_EINT 0x0800
+#define WM8350_UV_LDO3_EINT 0x0400
+#define WM8350_UV_LDO2_EINT 0x0200
+#define WM8350_UV_LDO1_EINT 0x0100
+#define WM8350_UV_DC6_EINT 0x0020
+#define WM8350_UV_DC5_EINT 0x0010
+#define WM8350_UV_DC4_EINT 0x0008
+#define WM8350_UV_DC3_EINT 0x0004
+#define WM8350_UV_DC2_EINT 0x0002
+#define WM8350_UV_DC1_EINT 0x0001
+
+/*
+ * R29 (0x1D) - Over Current Interrupt status
+ */
+#define WM8350_OC_LS_EINT 0x8000
+
+/*
+ * R30 (0x1E) - GPIO Interrupt Status
+ */
+#define WM8350_GP12_EINT 0x1000
+#define WM8350_GP11_EINT 0x0800
+#define WM8350_GP10_EINT 0x0400
+#define WM8350_GP9_EINT 0x0200
+#define WM8350_GP8_EINT 0x0100
+#define WM8350_GP7_EINT 0x0080
+#define WM8350_GP6_EINT 0x0040
+#define WM8350_GP5_EINT 0x0020
+#define WM8350_GP4_EINT 0x0010
+#define WM8350_GP3_EINT 0x0008
+#define WM8350_GP2_EINT 0x0004
+#define WM8350_GP1_EINT 0x0002
+#define WM8350_GP0_EINT 0x0001
+
+/*
+ * R31 (0x1F) - Comparator Interrupt Status
+ */
+#define WM8350_EXT_USB_FB_EINT 0x8000
+#define WM8350_EXT_WALL_FB_EINT 0x4000
+#define WM8350_EXT_BAT_FB_EINT 0x2000
+#define WM8350_CODEC_JCK_DET_L_EINT 0x0800
+#define WM8350_CODEC_JCK_DET_R_EINT 0x0400
+#define WM8350_CODEC_MICSCD_EINT 0x0200
+#define WM8350_CODEC_MICD_EINT 0x0100
+#define WM8350_WKUP_OFF_STATE_EINT 0x0040
+#define WM8350_WKUP_HIB_STATE_EINT 0x0020
+#define WM8350_WKUP_CONV_FAULT_EINT 0x0010
+#define WM8350_WKUP_WDOG_RST_EINT 0x0008
+#define WM8350_WKUP_GP_PWR_ON_EINT 0x0004
+#define WM8350_WKUP_ONKEY_EINT 0x0002
+#define WM8350_WKUP_GP_WAKEUP_EINT 0x0001
+
+/*
+ * R32 (0x20) - System Interrupts Mask
+ */
+#define WM8350_IM_OC_INT 0x2000
+#define WM8350_IM_UV_INT 0x1000
+#define WM8350_IM_PUTO_INT 0x0800
+#define WM8350_IM_SPARE_INT 0x0400
+#define WM8350_IM_CS_INT 0x0200
+#define WM8350_IM_EXT_INT 0x0100
+#define WM8350_IM_CODEC_INT 0x0080
+#define WM8350_IM_GP_INT 0x0040
+#define WM8350_IM_AUXADC_INT 0x0020
+#define WM8350_IM_RTC_INT 0x0010
+#define WM8350_IM_SYS_INT 0x0008
+#define WM8350_IM_CHG_INT 0x0004
+#define WM8350_IM_USB_INT 0x0002
+#define WM8350_IM_WKUP_INT 0x0001
+
+/*
+ * R33 (0x21) - Interrupt Status 1 Mask
+ */
+#define WM8350_IM_CHG_BAT_HOT_EINT 0x8000
+#define WM8350_IM_CHG_BAT_COLD_EINT 0x4000
+#define WM8350_IM_CHG_BAT_FAIL_EINT 0x2000
+#define WM8350_IM_CHG_TO_EINT 0x1000
+#define WM8350_IM_CHG_END_EINT 0x0800
+#define WM8350_IM_CHG_START_EINT 0x0400
+#define WM8350_IM_CHG_FAST_RDY_EINT 0x0200
+#define WM8350_IM_RTC_PER_EINT 0x0080
+#define WM8350_IM_RTC_SEC_EINT 0x0040
+#define WM8350_IM_RTC_ALM_EINT 0x0020
+#define WM8350_IM_CHG_VBATT_LT_3P9_EINT 0x0004
+#define WM8350_IM_CHG_VBATT_LT_3P1_EINT 0x0002
+#define WM8350_IM_CHG_VBATT_LT_2P85_EINT 0x0001
+
+/*
+ * R34 (0x22) - Interrupt Status 2 Mask
+ */
+#define WM8350_IM_SPARE2_EINT 0x8000
+#define WM8350_IM_SPARE1_EINT 0x4000
+#define WM8350_IM_CS1_EINT 0x2000
+#define WM8350_IM_CS2_EINT 0x1000
+#define WM8350_IM_USB_LIMIT_EINT 0x0400
+#define WM8350_IM_AUXADC_DATARDY_EINT 0x0100
+#define WM8350_IM_AUXADC_DCOMP4_EINT 0x0080
+#define WM8350_IM_AUXADC_DCOMP3_EINT 0x0040
+#define WM8350_IM_AUXADC_DCOMP2_EINT 0x0020
+#define WM8350_IM_AUXADC_DCOMP1_EINT 0x0010
+#define WM8350_IM_SYS_HYST_COMP_FAIL_EINT 0x0008
+#define WM8350_IM_SYS_CHIP_GT115_EINT 0x0004
+#define WM8350_IM_SYS_CHIP_GT140_EINT 0x0002
+#define WM8350_IM_SYS_WDOG_TO_EINT 0x0001
+
+/*
+ * R35 (0x23) - Power Up Interrupt Status Mask
+ */
+#define WM8350_IM_PUTO_LDO4_EINT 0x0800
+#define WM8350_IM_PUTO_LDO3_EINT 0x0400
+#define WM8350_IM_PUTO_LDO2_EINT 0x0200
+#define WM8350_IM_PUTO_LDO1_EINT 0x0100
+#define WM8350_IM_PUTO_DC6_EINT 0x0020
+#define WM8350_IM_PUTO_DC5_EINT 0x0010
+#define WM8350_IM_PUTO_DC4_EINT 0x0008
+#define WM8350_IM_PUTO_DC3_EINT 0x0004
+#define WM8350_IM_PUTO_DC2_EINT 0x0002
+#define WM8350_IM_PUTO_DC1_EINT 0x0001
+
+/*
+ * R36 (0x24) - Under Voltage Interrupt status Mask
+ */
+#define WM8350_IM_UV_LDO4_EINT 0x0800
+#define WM8350_IM_UV_LDO3_EINT 0x0400
+#define WM8350_IM_UV_LDO2_EINT 0x0200
+#define WM8350_IM_UV_LDO1_EINT 0x0100
+#define WM8350_IM_UV_DC6_EINT 0x0020
+#define WM8350_IM_UV_DC5_EINT 0x0010
+#define WM8350_IM_UV_DC4_EINT 0x0008
+#define WM8350_IM_UV_DC3_EINT 0x0004
+#define WM8350_IM_UV_DC2_EINT 0x0002
+#define WM8350_IM_UV_DC1_EINT 0x0001
+
+/*
+ * R37 (0x25) - Over Current Interrupt status Mask
+ */
+#define WM8350_IM_OC_LS_EINT 0x8000
+
+/*
+ * R38 (0x26) - GPIO Interrupt Status Mask
+ */
+#define WM8350_IM_GP12_EINT 0x1000
+#define WM8350_IM_GP11_EINT 0x0800
+#define WM8350_IM_GP10_EINT 0x0400
+#define WM8350_IM_GP9_EINT 0x0200
+#define WM8350_IM_GP8_EINT 0x0100
+#define WM8350_IM_GP7_EINT 0x0080
+#define WM8350_IM_GP6_EINT 0x0040
+#define WM8350_IM_GP5_EINT 0x0020
+#define WM8350_IM_GP4_EINT 0x0010
+#define WM8350_IM_GP3_EINT 0x0008
+#define WM8350_IM_GP2_EINT 0x0004
+#define WM8350_IM_GP1_EINT 0x0002
+#define WM8350_IM_GP0_EINT 0x0001
+
+/*
+ * R39 (0x27) - Comparator Interrupt Status Mask
+ */
+#define WM8350_IM_EXT_USB_FB_EINT 0x8000
+#define WM8350_IM_EXT_WALL_FB_EINT 0x4000
+#define WM8350_IM_EXT_BAT_FB_EINT 0x2000
+#define WM8350_IM_CODEC_JCK_DET_L_EINT 0x0800
+#define WM8350_IM_CODEC_JCK_DET_R_EINT 0x0400
+#define WM8350_IM_CODEC_MICSCD_EINT 0x0200
+#define WM8350_IM_CODEC_MICD_EINT 0x0100
+#define WM8350_IM_WKUP_OFF_STATE_EINT 0x0040
+#define WM8350_IM_WKUP_HIB_STATE_EINT 0x0020
+#define WM8350_IM_WKUP_CONV_FAULT_EINT 0x0010
+#define WM8350_IM_WKUP_WDOG_RST_EINT 0x0008
+#define WM8350_IM_WKUP_GP_PWR_ON_EINT 0x0004
+#define WM8350_IM_WKUP_ONKEY_EINT 0x0002
+#define WM8350_IM_WKUP_GP_WAKEUP_EINT 0x0001
+
+/*
+ * R220 (0xDC) - RAM BIST 1
+ */
+#define WM8350_READ_STATUS 0x0800
+#define WM8350_TSTRAM_CLK 0x0100
+#define WM8350_TSTRAM_CLK_ENA 0x0080
+#define WM8350_STARTSEQ 0x0040
+#define WM8350_READ_SRC 0x0020
+#define WM8350_COUNT_DIR 0x0010
+#define WM8350_TSTRAM_MODE_MASK 0x000E
+#define WM8350_TSTRAM_ENA 0x0001
+
+/*
+ * R225 (0xE1) - DCDC/LDO status
+ */
+#define WM8350_LS_STS 0x8000
+#define WM8350_LDO4_STS 0x0800
+#define WM8350_LDO3_STS 0x0400
+#define WM8350_LDO2_STS 0x0200
+#define WM8350_LDO1_STS 0x0100
+#define WM8350_DC6_STS 0x0020
+#define WM8350_DC5_STS 0x0010
+#define WM8350_DC4_STS 0x0008
+#define WM8350_DC3_STS 0x0004
+#define WM8350_DC2_STS 0x0002
+#define WM8350_DC1_STS 0x0001
+
+/*
+ * R226 (0xE2) - Charger status
+ */
+#define WM8350_CHG_BATT_HOT_OVRDE 0x8000
+#define WM8350_CHG_BATT_COLD_OVRDE 0x4000
+
+/*
+ * R227 (0xE3) - Misc Overrides
+ */
+#define WM8350_USB_LIMIT_OVRDE 0x0400
+
+/*
+ * R227 (0xE7) - Comparator Overrides
+ */
+#define WM8350_USB_FB_OVRDE 0x8000
+#define WM8350_WALL_FB_OVRDE 0x4000
+#define WM8350_BATT_FB_OVRDE 0x2000
+
+
+/*
+ * R233 (0xE9) - State Machinine Status
+ */
+#define WM8350_USB_SM_MASK 0x0700
+#define WM8350_USB_SM_SHIFT 8
+
+#define WM8350_USB_SM_100_SLV 1
+#define WM8350_USB_SM_500_SLV 5
+#define WM8350_USB_SM_STDBY_SLV 7
+
+/* WM8350 wake up conditions */
+#define WM8350_IRQ_WKUP_OFF_STATE 43
+#define WM8350_IRQ_WKUP_HIB_STATE 44
+#define WM8350_IRQ_WKUP_CONV_FAULT 45
+#define WM8350_IRQ_WKUP_WDOG_RST 46
+#define WM8350_IRQ_WKUP_GP_PWR_ON 47
+#define WM8350_IRQ_WKUP_ONKEY 48
+#define WM8350_IRQ_WKUP_GP_WAKEUP 49
+
+/* wm8350 chip revisions */
+#define WM8350_REV_E 0x4
+#define WM8350_REV_F 0x5
+#define WM8350_REV_G 0x6
+#define WM8350_REV_H 0x7
+
+#define WM8350_NUM_IRQ 63
+
+#define WM8350_NUM_IRQ_REGS 7
+
+extern const struct regmap_config wm8350_regmap;
+
+struct wm8350;
+
+struct wm8350_hwmon {
+ struct platform_device *pdev;
+ struct device *classdev;
+};
+
+struct wm8350 {
+ struct device *dev;
+
+ /* device IO */
+ struct regmap *regmap;
+ bool unlocked;
+
+ struct mutex auxadc_mutex;
+ struct completion auxadc_done;
+
+ /* Interrupt handling */
+ struct mutex irq_lock;
+ int chip_irq;
+ int irq_base;
+ u16 irq_masks[WM8350_NUM_IRQ_REGS];
+
+ /* Client devices */
+ struct wm8350_codec codec;
+ struct wm8350_gpio gpio;
+ struct wm8350_hwmon hwmon;
+ struct wm8350_pmic pmic;
+ struct wm8350_power power;
+ struct wm8350_rtc rtc;
+ struct wm8350_wdt wdt;
+};
+
+/**
+ * Data to be supplied by the platform to initialise the WM8350.
+ *
+ * @init: Function called during driver initialisation. Should be
+ * used by the platform to configure GPIO functions and similar.
+ * @irq_high: Set if WM8350 IRQ is active high.
+ * @irq_base: Base IRQ for genirq (not currently used).
+ * @gpio_base: Base for gpiolib.
+ */
+struct wm8350_platform_data {
+ int (*init)(struct wm8350 *wm8350);
+ int irq_high;
+ int irq_base;
+ int gpio_base;
+};
+
+
+/*
+ * WM8350 device initialisation and exit.
+ */
+int wm8350_device_init(struct wm8350 *wm8350, int irq,
+ struct wm8350_platform_data *pdata);
+void wm8350_device_exit(struct wm8350 *wm8350);
+
+/*
+ * WM8350 device IO
+ */
+int wm8350_clear_bits(struct wm8350 *wm8350, u16 reg, u16 mask);
+int wm8350_set_bits(struct wm8350 *wm8350, u16 reg, u16 mask);
+u16 wm8350_reg_read(struct wm8350 *wm8350, int reg);
+int wm8350_reg_write(struct wm8350 *wm8350, int reg, u16 val);
+int wm8350_reg_lock(struct wm8350 *wm8350);
+int wm8350_reg_unlock(struct wm8350 *wm8350);
+int wm8350_block_read(struct wm8350 *wm8350, int reg, int size, u16 *dest);
+int wm8350_block_write(struct wm8350 *wm8350, int reg, int size, u16 *src);
+
+/*
+ * WM8350 internal interrupts
+ */
+static inline int wm8350_register_irq(struct wm8350 *wm8350, int irq,
+ irq_handler_t handler,
+ unsigned long flags,
+ const char *name, void *data)
+{
+ if (!wm8350->irq_base)
+ return -ENODEV;
+
+ return request_threaded_irq(irq + wm8350->irq_base, NULL,
+ handler, flags, name, data);
+}
+
+static inline void wm8350_free_irq(struct wm8350 *wm8350, int irq, void *data)
+{
+ free_irq(irq + wm8350->irq_base, data);
+}
+
+static inline void wm8350_mask_irq(struct wm8350 *wm8350, int irq)
+{
+ disable_irq(irq + wm8350->irq_base);
+}
+
+static inline void wm8350_unmask_irq(struct wm8350 *wm8350, int irq)
+{
+ enable_irq(irq + wm8350->irq_base);
+}
+
+int wm8350_irq_init(struct wm8350 *wm8350, int irq,
+ struct wm8350_platform_data *pdata);
+int wm8350_irq_exit(struct wm8350 *wm8350);
+
+#endif
diff --git a/include/linux/mfd/wm8350/gpio.h b/include/linux/mfd/wm8350/gpio.h
new file mode 100644
index 000000000..d657bcd6d
--- /dev/null
+++ b/include/linux/mfd/wm8350/gpio.h
@@ -0,0 +1,361 @@
+/*
+ * gpio.h -- GPIO Driver for Wolfson WM8350 PMIC
+ *
+ * Copyright 2007 Wolfson Microelectronics PLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_WM8350_GPIO_H_
+#define __LINUX_MFD_WM8350_GPIO_H_
+
+#include <linux/platform_device.h>
+
+/*
+ * GPIO Registers.
+ */
+#define WM8350_GPIO_DEBOUNCE 0x80
+#define WM8350_GPIO_PIN_PULL_UP_CONTROL 0x81
+#define WM8350_GPIO_PULL_DOWN_CONTROL 0x82
+#define WM8350_GPIO_INT_MODE 0x83
+#define WM8350_GPIO_CONTROL 0x85
+#define WM8350_GPIO_CONFIGURATION_I_O 0x86
+#define WM8350_GPIO_PIN_POLARITY_TYPE 0x87
+#define WM8350_GPIO_FUNCTION_SELECT_1 0x8C
+#define WM8350_GPIO_FUNCTION_SELECT_2 0x8D
+#define WM8350_GPIO_FUNCTION_SELECT_3 0x8E
+#define WM8350_GPIO_FUNCTION_SELECT_4 0x8F
+#define WM8350_GPIO_LEVEL 0xE6
+
+/*
+ * GPIO Functions
+ */
+#define WM8350_GPIO0_GPIO_IN 0x0
+#define WM8350_GPIO0_GPIO_OUT 0x0
+#define WM8350_GPIO0_PWR_ON_IN 0x1
+#define WM8350_GPIO0_PWR_ON_OUT 0x1
+#define WM8350_GPIO0_LDO_EN_IN 0x2
+#define WM8350_GPIO0_VRTC_OUT 0x2
+#define WM8350_GPIO0_LPWR1_IN 0x3
+#define WM8350_GPIO0_POR_B_OUT 0x3
+
+#define WM8350_GPIO1_GPIO_IN 0x0
+#define WM8350_GPIO1_GPIO_OUT 0x0
+#define WM8350_GPIO1_PWR_ON_IN 0x1
+#define WM8350_GPIO1_DO_CONF_OUT 0x1
+#define WM8350_GPIO1_LDO_EN_IN 0x2
+#define WM8350_GPIO1_RESET_OUT 0x2
+#define WM8350_GPIO1_LPWR2_IN 0x3
+#define WM8350_GPIO1_MEMRST_OUT 0x3
+
+#define WM8350_GPIO2_GPIO_IN 0x0
+#define WM8350_GPIO2_GPIO_OUT 0x0
+#define WM8350_GPIO2_PWR_ON_IN 0x1
+#define WM8350_GPIO2_PWR_ON_OUT 0x1
+#define WM8350_GPIO2_WAKE_UP_IN 0x2
+#define WM8350_GPIO2_VRTC_OUT 0x2
+#define WM8350_GPIO2_32KHZ_IN 0x3
+#define WM8350_GPIO2_32KHZ_OUT 0x3
+
+#define WM8350_GPIO3_GPIO_IN 0x0
+#define WM8350_GPIO3_GPIO_OUT 0x0
+#define WM8350_GPIO3_PWR_ON_IN 0x1
+#define WM8350_GPIO3_P_CLK_OUT 0x1
+#define WM8350_GPIO3_LDO_EN_IN 0x2
+#define WM8350_GPIO3_VRTC_OUT 0x2
+#define WM8350_GPIO3_PWR_OFF_IN 0x3
+#define WM8350_GPIO3_32KHZ_OUT 0x3
+
+#define WM8350_GPIO4_GPIO_IN 0x0
+#define WM8350_GPIO4_GPIO_OUT 0x0
+#define WM8350_GPIO4_MR_IN 0x1
+#define WM8350_GPIO4_MEM_RST_OUT 0x1
+#define WM8350_GPIO4_FLASH_IN 0x2
+#define WM8350_GPIO4_ADA_OUT 0x2
+#define WM8350_GPIO4_HIBERNATE_IN 0x3
+#define WM8350_GPIO4_FLASH_OUT 0x3
+#define WM8350_GPIO4_MICDET_OUT 0x4
+#define WM8350_GPIO4_MICSHT_OUT 0x5
+
+#define WM8350_GPIO5_GPIO_IN 0x0
+#define WM8350_GPIO5_GPIO_OUT 0x0
+#define WM8350_GPIO5_LPWR1_IN 0x1
+#define WM8350_GPIO5_P_CLK_OUT 0x1
+#define WM8350_GPIO5_ADCLRCLK_IN 0x2
+#define WM8350_GPIO5_ADCLRCLK_OUT 0x2
+#define WM8350_GPIO5_HIBERNATE_IN 0x3
+#define WM8350_GPIO5_32KHZ_OUT 0x3
+#define WM8350_GPIO5_MICDET_OUT 0x4
+#define WM8350_GPIO5_MICSHT_OUT 0x5
+#define WM8350_GPIO5_ADA_OUT 0x6
+#define WM8350_GPIO5_OPCLK_OUT 0x7
+
+#define WM8350_GPIO6_GPIO_IN 0x0
+#define WM8350_GPIO6_GPIO_OUT 0x0
+#define WM8350_GPIO6_LPWR2_IN 0x1
+#define WM8350_GPIO6_MEMRST_OUT 0x1
+#define WM8350_GPIO6_FLASH_IN 0x2
+#define WM8350_GPIO6_ADA_OUT 0x2
+#define WM8350_GPIO6_HIBERNATE_IN 0x3
+#define WM8350_GPIO6_RTC_OUT 0x3
+#define WM8350_GPIO6_MICDET_OUT 0x4
+#define WM8350_GPIO6_MICSHT_OUT 0x5
+#define WM8350_GPIO6_ADCLRCLKB_OUT 0x6
+#define WM8350_GPIO6_SDOUT_OUT 0x7
+
+#define WM8350_GPIO7_GPIO_IN 0x0
+#define WM8350_GPIO7_GPIO_OUT 0x0
+#define WM8350_GPIO7_LPWR3_IN 0x1
+#define WM8350_GPIO7_P_CLK_OUT 0x1
+#define WM8350_GPIO7_MASK_IN 0x2
+#define WM8350_GPIO7_VCC_FAULT_OUT 0x2
+#define WM8350_GPIO7_HIBERNATE_IN 0x3
+#define WM8350_GPIO7_BATT_FAULT_OUT 0x3
+#define WM8350_GPIO7_MICDET_OUT 0x4
+#define WM8350_GPIO7_MICSHT_OUT 0x5
+#define WM8350_GPIO7_ADA_OUT 0x6
+#define WM8350_GPIO7_CSB_IN 0x7
+
+#define WM8350_GPIO8_GPIO_IN 0x0
+#define WM8350_GPIO8_GPIO_OUT 0x0
+#define WM8350_GPIO8_MR_IN 0x1
+#define WM8350_GPIO8_VCC_FAULT_OUT 0x1
+#define WM8350_GPIO8_ADCBCLK_IN 0x2
+#define WM8350_GPIO8_ADCBCLK_OUT 0x2
+#define WM8350_GPIO8_PWR_OFF_IN 0x3
+#define WM8350_GPIO8_BATT_FAULT_OUT 0x3
+#define WM8350_GPIO8_ALTSCL_IN 0xf
+
+#define WM8350_GPIO9_GPIO_IN 0x0
+#define WM8350_GPIO9_GPIO_OUT 0x0
+#define WM8350_GPIO9_HEARTBEAT_IN 0x1
+#define WM8350_GPIO9_VCC_FAULT_OUT 0x1
+#define WM8350_GPIO9_MASK_IN 0x2
+#define WM8350_GPIO9_LINE_GT_BATT_OUT 0x2
+#define WM8350_GPIO9_PWR_OFF_IN 0x3
+#define WM8350_GPIO9_BATT_FAULT_OUT 0x3
+#define WM8350_GPIO9_ALTSDA_OUT 0xf
+
+#define WM8350_GPIO10_GPIO_IN 0x0
+#define WM8350_GPIO10_GPIO_OUT 0x0
+#define WM8350_GPIO10_ISINKC_OUT 0x1
+#define WM8350_GPIO10_PWR_OFF_IN 0x2
+#define WM8350_GPIO10_LINE_GT_BATT_OUT 0x2
+#define WM8350_GPIO10_CHD_IND_IN 0x3
+
+#define WM8350_GPIO11_GPIO_IN 0x0
+#define WM8350_GPIO11_GPIO_OUT 0x0
+#define WM8350_GPIO11_ISINKD_OUT 0x1
+#define WM8350_GPIO11_WAKEUP_IN 0x2
+#define WM8350_GPIO11_LINE_GT_BATT_OUT 0x2
+#define WM8350_GPIO11_CHD_IND_IN 0x3
+
+#define WM8350_GPIO12_GPIO_IN 0x0
+#define WM8350_GPIO12_GPIO_OUT 0x0
+#define WM8350_GPIO12_ISINKE_OUT 0x1
+#define WM8350_GPIO12_LINE_GT_BATT_OUT 0x2
+#define WM8350_GPIO12_LINE_EN_OUT 0x3
+#define WM8350_GPIO12_32KHZ_OUT 0x4
+
+#define WM8350_GPIO_DIR_IN 0
+#define WM8350_GPIO_DIR_OUT 1
+#define WM8350_GPIO_ACTIVE_LOW 0
+#define WM8350_GPIO_ACTIVE_HIGH 1
+#define WM8350_GPIO_PULL_NONE 0
+#define WM8350_GPIO_PULL_UP 1
+#define WM8350_GPIO_PULL_DOWN 2
+#define WM8350_GPIO_INVERT_OFF 0
+#define WM8350_GPIO_INVERT_ON 1
+#define WM8350_GPIO_DEBOUNCE_OFF 0
+#define WM8350_GPIO_DEBOUNCE_ON 1
+
+/*
+ * R30 (0x1E) - GPIO Interrupt Status
+ */
+#define WM8350_GP12_EINT 0x1000
+#define WM8350_GP11_EINT 0x0800
+#define WM8350_GP10_EINT 0x0400
+#define WM8350_GP9_EINT 0x0200
+#define WM8350_GP8_EINT 0x0100
+#define WM8350_GP7_EINT 0x0080
+#define WM8350_GP6_EINT 0x0040
+#define WM8350_GP5_EINT 0x0020
+#define WM8350_GP4_EINT 0x0010
+#define WM8350_GP3_EINT 0x0008
+#define WM8350_GP2_EINT 0x0004
+#define WM8350_GP1_EINT 0x0002
+#define WM8350_GP0_EINT 0x0001
+
+
+/*
+ * R128 (0x80) - GPIO Debounce
+ */
+#define WM8350_GP12_DB 0x1000
+#define WM8350_GP11_DB 0x0800
+#define WM8350_GP10_DB 0x0400
+#define WM8350_GP9_DB 0x0200
+#define WM8350_GP8_DB 0x0100
+#define WM8350_GP7_DB 0x0080
+#define WM8350_GP6_DB 0x0040
+#define WM8350_GP5_DB 0x0020
+#define WM8350_GP4_DB 0x0010
+#define WM8350_GP3_DB 0x0008
+#define WM8350_GP2_DB 0x0004
+#define WM8350_GP1_DB 0x0002
+#define WM8350_GP0_DB 0x0001
+
+/*
+ * R129 (0x81) - GPIO Pin pull up Control
+ */
+#define WM8350_GP12_PU 0x1000
+#define WM8350_GP11_PU 0x0800
+#define WM8350_GP10_PU 0x0400
+#define WM8350_GP9_PU 0x0200
+#define WM8350_GP8_PU 0x0100
+#define WM8350_GP7_PU 0x0080
+#define WM8350_GP6_PU 0x0040
+#define WM8350_GP5_PU 0x0020
+#define WM8350_GP4_PU 0x0010
+#define WM8350_GP3_PU 0x0008
+#define WM8350_GP2_PU 0x0004
+#define WM8350_GP1_PU 0x0002
+#define WM8350_GP0_PU 0x0001
+
+/*
+ * R130 (0x82) - GPIO Pull down Control
+ */
+#define WM8350_GP12_PD 0x1000
+#define WM8350_GP11_PD 0x0800
+#define WM8350_GP10_PD 0x0400
+#define WM8350_GP9_PD 0x0200
+#define WM8350_GP8_PD 0x0100
+#define WM8350_GP7_PD 0x0080
+#define WM8350_GP6_PD 0x0040
+#define WM8350_GP5_PD 0x0020
+#define WM8350_GP4_PD 0x0010
+#define WM8350_GP3_PD 0x0008
+#define WM8350_GP2_PD 0x0004
+#define WM8350_GP1_PD 0x0002
+#define WM8350_GP0_PD 0x0001
+
+/*
+ * R131 (0x83) - GPIO Interrupt Mode
+ */
+#define WM8350_GP12_INTMODE 0x1000
+#define WM8350_GP11_INTMODE 0x0800
+#define WM8350_GP10_INTMODE 0x0400
+#define WM8350_GP9_INTMODE 0x0200
+#define WM8350_GP8_INTMODE 0x0100
+#define WM8350_GP7_INTMODE 0x0080
+#define WM8350_GP6_INTMODE 0x0040
+#define WM8350_GP5_INTMODE 0x0020
+#define WM8350_GP4_INTMODE 0x0010
+#define WM8350_GP3_INTMODE 0x0008
+#define WM8350_GP2_INTMODE 0x0004
+#define WM8350_GP1_INTMODE 0x0002
+#define WM8350_GP0_INTMODE 0x0001
+
+/*
+ * R133 (0x85) - GPIO Control
+ */
+#define WM8350_GP_DBTIME_MASK 0x00C0
+
+/*
+ * R134 (0x86) - GPIO Configuration (i/o)
+ */
+#define WM8350_GP12_DIR 0x1000
+#define WM8350_GP11_DIR 0x0800
+#define WM8350_GP10_DIR 0x0400
+#define WM8350_GP9_DIR 0x0200
+#define WM8350_GP8_DIR 0x0100
+#define WM8350_GP7_DIR 0x0080
+#define WM8350_GP6_DIR 0x0040
+#define WM8350_GP5_DIR 0x0020
+#define WM8350_GP4_DIR 0x0010
+#define WM8350_GP3_DIR 0x0008
+#define WM8350_GP2_DIR 0x0004
+#define WM8350_GP1_DIR 0x0002
+#define WM8350_GP0_DIR 0x0001
+
+/*
+ * R135 (0x87) - GPIO Pin Polarity / Type
+ */
+#define WM8350_GP12_CFG 0x1000
+#define WM8350_GP11_CFG 0x0800
+#define WM8350_GP10_CFG 0x0400
+#define WM8350_GP9_CFG 0x0200
+#define WM8350_GP8_CFG 0x0100
+#define WM8350_GP7_CFG 0x0080
+#define WM8350_GP6_CFG 0x0040
+#define WM8350_GP5_CFG 0x0020
+#define WM8350_GP4_CFG 0x0010
+#define WM8350_GP3_CFG 0x0008
+#define WM8350_GP2_CFG 0x0004
+#define WM8350_GP1_CFG 0x0002
+#define WM8350_GP0_CFG 0x0001
+
+/*
+ * R140 (0x8C) - GPIO Function Select 1
+ */
+#define WM8350_GP3_FN_MASK 0xF000
+#define WM8350_GP2_FN_MASK 0x0F00
+#define WM8350_GP1_FN_MASK 0x00F0
+#define WM8350_GP0_FN_MASK 0x000F
+
+/*
+ * R141 (0x8D) - GPIO Function Select 2
+ */
+#define WM8350_GP7_FN_MASK 0xF000
+#define WM8350_GP6_FN_MASK 0x0F00
+#define WM8350_GP5_FN_MASK 0x00F0
+#define WM8350_GP4_FN_MASK 0x000F
+
+/*
+ * R142 (0x8E) - GPIO Function Select 3
+ */
+#define WM8350_GP11_FN_MASK 0xF000
+#define WM8350_GP10_FN_MASK 0x0F00
+#define WM8350_GP9_FN_MASK 0x00F0
+#define WM8350_GP8_FN_MASK 0x000F
+
+/*
+ * R143 (0x8F) - GPIO Function Select 4
+ */
+#define WM8350_GP12_FN_MASK 0x000F
+
+/*
+ * R230 (0xE6) - GPIO Pin Status
+ */
+#define WM8350_GP12_LVL 0x1000
+#define WM8350_GP11_LVL 0x0800
+#define WM8350_GP10_LVL 0x0400
+#define WM8350_GP9_LVL 0x0200
+#define WM8350_GP8_LVL 0x0100
+#define WM8350_GP7_LVL 0x0080
+#define WM8350_GP6_LVL 0x0040
+#define WM8350_GP5_LVL 0x0020
+#define WM8350_GP4_LVL 0x0010
+#define WM8350_GP3_LVL 0x0008
+#define WM8350_GP2_LVL 0x0004
+#define WM8350_GP1_LVL 0x0002
+#define WM8350_GP0_LVL 0x0001
+
+struct wm8350;
+
+int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func,
+ int pol, int pull, int invert, int debounce);
+
+struct wm8350_gpio {
+ struct platform_device *pdev;
+};
+
+/*
+ * GPIO Interrupts
+ */
+#define WM8350_IRQ_GPIO(x) (50 + x)
+
+#endif
diff --git a/include/linux/mfd/wm8350/pmic.h b/include/linux/mfd/wm8350/pmic.h
new file mode 100644
index 000000000..579b50ca2
--- /dev/null
+++ b/include/linux/mfd/wm8350/pmic.h
@@ -0,0 +1,781 @@
+/*
+ * pmic.h -- Power Management Driver for Wolfson WM8350 PMIC
+ *
+ * Copyright 2007 Wolfson Microelectronics PLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_WM8350_PMIC_H
+#define __LINUX_MFD_WM8350_PMIC_H
+
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/regulator/machine.h>
+
+/*
+ * Register values.
+ */
+
+#define WM8350_CURRENT_SINK_DRIVER_A 0xAC
+#define WM8350_CSA_FLASH_CONTROL 0xAD
+#define WM8350_CURRENT_SINK_DRIVER_B 0xAE
+#define WM8350_CSB_FLASH_CONTROL 0xAF
+#define WM8350_DCDC_LDO_REQUESTED 0xB0
+#define WM8350_DCDC_ACTIVE_OPTIONS 0xB1
+#define WM8350_DCDC_SLEEP_OPTIONS 0xB2
+#define WM8350_POWER_CHECK_COMPARATOR 0xB3
+#define WM8350_DCDC1_CONTROL 0xB4
+#define WM8350_DCDC1_TIMEOUTS 0xB5
+#define WM8350_DCDC1_LOW_POWER 0xB6
+#define WM8350_DCDC2_CONTROL 0xB7
+#define WM8350_DCDC2_TIMEOUTS 0xB8
+#define WM8350_DCDC3_CONTROL 0xBA
+#define WM8350_DCDC3_TIMEOUTS 0xBB
+#define WM8350_DCDC3_LOW_POWER 0xBC
+#define WM8350_DCDC4_CONTROL 0xBD
+#define WM8350_DCDC4_TIMEOUTS 0xBE
+#define WM8350_DCDC4_LOW_POWER 0xBF
+#define WM8350_DCDC5_CONTROL 0xC0
+#define WM8350_DCDC5_TIMEOUTS 0xC1
+#define WM8350_DCDC6_CONTROL 0xC3
+#define WM8350_DCDC6_TIMEOUTS 0xC4
+#define WM8350_DCDC6_LOW_POWER 0xC5
+#define WM8350_LIMIT_SWITCH_CONTROL 0xC7
+#define WM8350_LDO1_CONTROL 0xC8
+#define WM8350_LDO1_TIMEOUTS 0xC9
+#define WM8350_LDO1_LOW_POWER 0xCA
+#define WM8350_LDO2_CONTROL 0xCB
+#define WM8350_LDO2_TIMEOUTS 0xCC
+#define WM8350_LDO2_LOW_POWER 0xCD
+#define WM8350_LDO3_CONTROL 0xCE
+#define WM8350_LDO3_TIMEOUTS 0xCF
+#define WM8350_LDO3_LOW_POWER 0xD0
+#define WM8350_LDO4_CONTROL 0xD1
+#define WM8350_LDO4_TIMEOUTS 0xD2
+#define WM8350_LDO4_LOW_POWER 0xD3
+#define WM8350_VCC_FAULT_MASKS 0xD7
+#define WM8350_MAIN_BANDGAP_CONTROL 0xD8
+#define WM8350_OSC_CONTROL 0xD9
+#define WM8350_RTC_TICK_CONTROL 0xDA
+#define WM8350_SECURITY 0xDB
+#define WM8350_RAM_BIST_1 0xDC
+#define WM8350_DCDC_LDO_STATUS 0xE1
+#define WM8350_GPIO_PIN_STATUS 0xE6
+
+#define WM8350_DCDC1_FORCE_PWM 0xF8
+#define WM8350_DCDC3_FORCE_PWM 0xFA
+#define WM8350_DCDC4_FORCE_PWM 0xFB
+#define WM8350_DCDC6_FORCE_PWM 0xFD
+
+/*
+ * R172 (0xAC) - Current Sink Driver A
+ */
+#define WM8350_CS1_HIB_MODE 0x1000
+#define WM8350_CS1_HIB_MODE_MASK 0x1000
+#define WM8350_CS1_HIB_MODE_SHIFT 12
+#define WM8350_CS1_ISEL_MASK 0x003F
+#define WM8350_CS1_ISEL_SHIFT 0
+
+/* Bit values for R172 (0xAC) */
+#define WM8350_CS1_HIB_MODE_DISABLE 0
+#define WM8350_CS1_HIB_MODE_LEAVE 1
+
+#define WM8350_CS1_ISEL_220M 0x3F
+
+/*
+ * R173 (0xAD) - CSA Flash control
+ */
+#define WM8350_CS1_FLASH_MODE 0x8000
+#define WM8350_CS1_TRIGSRC 0x4000
+#define WM8350_CS1_DRIVE 0x2000
+#define WM8350_CS1_FLASH_DUR_MASK 0x0300
+#define WM8350_CS1_OFF_RAMP_MASK 0x0030
+#define WM8350_CS1_ON_RAMP_MASK 0x0003
+
+/*
+ * R174 (0xAE) - Current Sink Driver B
+ */
+#define WM8350_CS2_HIB_MODE 0x1000
+#define WM8350_CS2_ISEL_MASK 0x003F
+
+/*
+ * R175 (0xAF) - CSB Flash control
+ */
+#define WM8350_CS2_FLASH_MODE 0x8000
+#define WM8350_CS2_TRIGSRC 0x4000
+#define WM8350_CS2_DRIVE 0x2000
+#define WM8350_CS2_FLASH_DUR_MASK 0x0300
+#define WM8350_CS2_OFF_RAMP_MASK 0x0030
+#define WM8350_CS2_ON_RAMP_MASK 0x0003
+
+/*
+ * R176 (0xB0) - DCDC/LDO requested
+ */
+#define WM8350_LS_ENA 0x8000
+#define WM8350_LDO4_ENA 0x0800
+#define WM8350_LDO3_ENA 0x0400
+#define WM8350_LDO2_ENA 0x0200
+#define WM8350_LDO1_ENA 0x0100
+#define WM8350_DC6_ENA 0x0020
+#define WM8350_DC5_ENA 0x0010
+#define WM8350_DC4_ENA 0x0008
+#define WM8350_DC3_ENA 0x0004
+#define WM8350_DC2_ENA 0x0002
+#define WM8350_DC1_ENA 0x0001
+
+/*
+ * R177 (0xB1) - DCDC Active options
+ */
+#define WM8350_PUTO_MASK 0x3000
+#define WM8350_PWRUP_DELAY_MASK 0x0300
+#define WM8350_DC6_ACTIVE 0x0020
+#define WM8350_DC4_ACTIVE 0x0008
+#define WM8350_DC3_ACTIVE 0x0004
+#define WM8350_DC1_ACTIVE 0x0001
+
+/*
+ * R178 (0xB2) - DCDC Sleep options
+ */
+#define WM8350_DC6_SLEEP 0x0020
+#define WM8350_DC4_SLEEP 0x0008
+#define WM8350_DC3_SLEEP 0x0004
+#define WM8350_DC1_SLEEP 0x0001
+
+/*
+ * R179 (0xB3) - Power-check comparator
+ */
+#define WM8350_PCCMP_ERRACT 0x4000
+#define WM8350_PCCMP_RAIL 0x0100
+#define WM8350_PCCMP_OFF_THR_MASK 0x0070
+#define WM8350_PCCMP_ON_THR_MASK 0x0007
+
+/*
+ * R180 (0xB4) - DCDC1 Control
+ */
+#define WM8350_DC1_OPFLT 0x0400
+#define WM8350_DC1_VSEL_MASK 0x007F
+#define WM8350_DC1_VSEL_SHIFT 0
+
+/*
+ * R181 (0xB5) - DCDC1 Timeouts
+ */
+#define WM8350_DC1_ERRACT_MASK 0xC000
+#define WM8350_DC1_ERRACT_SHIFT 14
+#define WM8350_DC1_ENSLOT_MASK 0x3C00
+#define WM8350_DC1_ENSLOT_SHIFT 10
+#define WM8350_DC1_SDSLOT_MASK 0x03C0
+#define WM8350_DC1_UVTO_MASK 0x0030
+#define WM8350_DC1_SDSLOT_SHIFT 6
+
+/* Bit values for R181 (0xB5) */
+#define WM8350_DC1_ERRACT_NONE 0
+#define WM8350_DC1_ERRACT_SHUTDOWN_CONV 1
+#define WM8350_DC1_ERRACT_SHUTDOWN_SYS 2
+
+/*
+ * R182 (0xB6) - DCDC1 Low Power
+ */
+#define WM8350_DC1_HIB_MODE_MASK 0x7000
+#define WM8350_DC1_HIB_TRIG_MASK 0x0300
+#define WM8350_DC1_VIMG_MASK 0x007F
+
+/*
+ * R183 (0xB7) - DCDC2 Control
+ */
+#define WM8350_DC2_MODE 0x4000
+#define WM8350_DC2_MODE_MASK 0x4000
+#define WM8350_DC2_MODE_SHIFT 14
+#define WM8350_DC2_HIB_MODE 0x1000
+#define WM8350_DC2_HIB_MODE_MASK 0x1000
+#define WM8350_DC2_HIB_MODE_SHIFT 12
+#define WM8350_DC2_HIB_TRIG_MASK 0x0300
+#define WM8350_DC2_HIB_TRIG_SHIFT 8
+#define WM8350_DC2_ILIM 0x0040
+#define WM8350_DC2_ILIM_MASK 0x0040
+#define WM8350_DC2_ILIM_SHIFT 6
+#define WM8350_DC2_RMP_MASK 0x0018
+#define WM8350_DC2_RMP_SHIFT 3
+#define WM8350_DC2_FBSRC_MASK 0x0003
+#define WM8350_DC2_FBSRC_SHIFT 0
+
+/* Bit values for R183 (0xB7) */
+#define WM8350_DC2_MODE_BOOST 0
+#define WM8350_DC2_MODE_SWITCH 1
+
+#define WM8350_DC2_HIB_MODE_ACTIVE 1
+#define WM8350_DC2_HIB_MODE_DISABLE 0
+
+#define WM8350_DC2_HIB_TRIG_NONE 0
+#define WM8350_DC2_HIB_TRIG_LPWR1 1
+#define WM8350_DC2_HIB_TRIG_LPWR2 2
+#define WM8350_DC2_HIB_TRIG_LPWR3 3
+
+#define WM8350_DC2_ILIM_HIGH 0
+#define WM8350_DC2_ILIM_LOW 1
+
+#define WM8350_DC2_RMP_30V 0
+#define WM8350_DC2_RMP_20V 1
+#define WM8350_DC2_RMP_10V 2
+#define WM8350_DC2_RMP_5V 3
+
+#define WM8350_DC2_FBSRC_FB2 0
+#define WM8350_DC2_FBSRC_ISINKA 1
+#define WM8350_DC2_FBSRC_ISINKB 2
+#define WM8350_DC2_FBSRC_USB 3
+
+/*
+ * R184 (0xB8) - DCDC2 Timeouts
+ */
+#define WM8350_DC2_ERRACT_MASK 0xC000
+#define WM8350_DC2_ERRACT_SHIFT 14
+#define WM8350_DC2_ENSLOT_MASK 0x3C00
+#define WM8350_DC2_ENSLOT_SHIFT 10
+#define WM8350_DC2_SDSLOT_MASK 0x03C0
+#define WM8350_DC2_UVTO_MASK 0x0030
+
+/* Bit values for R184 (0xB8) */
+#define WM8350_DC2_ERRACT_NONE 0
+#define WM8350_DC2_ERRACT_SHUTDOWN_CONV 1
+#define WM8350_DC2_ERRACT_SHUTDOWN_SYS 2
+
+/*
+ * R186 (0xBA) - DCDC3 Control
+ */
+#define WM8350_DC3_OPFLT 0x0400
+#define WM8350_DC3_VSEL_MASK 0x007F
+#define WM8350_DC3_VSEL_SHIFT 0
+
+/*
+ * R187 (0xBB) - DCDC3 Timeouts
+ */
+#define WM8350_DC3_ERRACT_MASK 0xC000
+#define WM8350_DC3_ERRACT_SHIFT 14
+#define WM8350_DC3_ENSLOT_MASK 0x3C00
+#define WM8350_DC3_ENSLOT_SHIFT 10
+#define WM8350_DC3_SDSLOT_MASK 0x03C0
+#define WM8350_DC3_UVTO_MASK 0x0030
+#define WM8350_DC3_SDSLOT_SHIFT 6
+
+/* Bit values for R187 (0xBB) */
+#define WM8350_DC3_ERRACT_NONE 0
+#define WM8350_DC3_ERRACT_SHUTDOWN_CONV 1
+#define WM8350_DC3_ERRACT_SHUTDOWN_SYS 2
+/*
+ * R188 (0xBC) - DCDC3 Low Power
+ */
+#define WM8350_DC3_HIB_MODE_MASK 0x7000
+#define WM8350_DC3_HIB_TRIG_MASK 0x0300
+#define WM8350_DC3_VIMG_MASK 0x007F
+
+/*
+ * R189 (0xBD) - DCDC4 Control
+ */
+#define WM8350_DC4_OPFLT 0x0400
+#define WM8350_DC4_VSEL_MASK 0x007F
+#define WM8350_DC4_VSEL_SHIFT 0
+
+/*
+ * R190 (0xBE) - DCDC4 Timeouts
+ */
+#define WM8350_DC4_ERRACT_MASK 0xC000
+#define WM8350_DC4_ERRACT_SHIFT 14
+#define WM8350_DC4_ENSLOT_MASK 0x3C00
+#define WM8350_DC4_ENSLOT_SHIFT 10
+#define WM8350_DC4_SDSLOT_MASK 0x03C0
+#define WM8350_DC4_UVTO_MASK 0x0030
+#define WM8350_DC4_SDSLOT_SHIFT 6
+
+/* Bit values for R190 (0xBE) */
+#define WM8350_DC4_ERRACT_NONE 0
+#define WM8350_DC4_ERRACT_SHUTDOWN_CONV 1
+#define WM8350_DC4_ERRACT_SHUTDOWN_SYS 2
+
+/*
+ * R191 (0xBF) - DCDC4 Low Power
+ */
+#define WM8350_DC4_HIB_MODE_MASK 0x7000
+#define WM8350_DC4_HIB_TRIG_MASK 0x0300
+#define WM8350_DC4_VIMG_MASK 0x007F
+
+/*
+ * R192 (0xC0) - DCDC5 Control
+ */
+#define WM8350_DC5_MODE 0x4000
+#define WM8350_DC5_MODE_MASK 0x4000
+#define WM8350_DC5_MODE_SHIFT 14
+#define WM8350_DC5_HIB_MODE 0x1000
+#define WM8350_DC5_HIB_MODE_MASK 0x1000
+#define WM8350_DC5_HIB_MODE_SHIFT 12
+#define WM8350_DC5_HIB_TRIG_MASK 0x0300
+#define WM8350_DC5_HIB_TRIG_SHIFT 8
+#define WM8350_DC5_ILIM 0x0040
+#define WM8350_DC5_ILIM_MASK 0x0040
+#define WM8350_DC5_ILIM_SHIFT 6
+#define WM8350_DC5_RMP_MASK 0x0018
+#define WM8350_DC5_RMP_SHIFT 3
+#define WM8350_DC5_FBSRC_MASK 0x0003
+#define WM8350_DC5_FBSRC_SHIFT 0
+
+/* Bit values for R192 (0xC0) */
+#define WM8350_DC5_MODE_BOOST 0
+#define WM8350_DC5_MODE_SWITCH 1
+
+#define WM8350_DC5_HIB_MODE_ACTIVE 1
+#define WM8350_DC5_HIB_MODE_DISABLE 0
+
+#define WM8350_DC5_HIB_TRIG_NONE 0
+#define WM8350_DC5_HIB_TRIG_LPWR1 1
+#define WM8350_DC5_HIB_TRIG_LPWR2 2
+#define WM8350_DC5_HIB_TRIG_LPWR3 3
+
+#define WM8350_DC5_ILIM_HIGH 0
+#define WM8350_DC5_ILIM_LOW 1
+
+#define WM8350_DC5_RMP_30V 0
+#define WM8350_DC5_RMP_20V 1
+#define WM8350_DC5_RMP_10V 2
+#define WM8350_DC5_RMP_5V 3
+
+#define WM8350_DC5_FBSRC_FB2 0
+#define WM8350_DC5_FBSRC_ISINKA 1
+#define WM8350_DC5_FBSRC_ISINKB 2
+#define WM8350_DC5_FBSRC_USB 3
+
+/*
+ * R193 (0xC1) - DCDC5 Timeouts
+ */
+#define WM8350_DC5_ERRACT_MASK 0xC000
+#define WM8350_DC5_ERRACT_SHIFT 14
+#define WM8350_DC5_ENSLOT_MASK 0x3C00
+#define WM8350_DC5_ENSLOT_SHIFT 10
+#define WM8350_DC5_SDSLOT_MASK 0x03C0
+#define WM8350_DC5_UVTO_MASK 0x0030
+#define WM8350_DC5_SDSLOT_SHIFT 6
+
+/* Bit values for R193 (0xC1) */
+#define WM8350_DC5_ERRACT_NONE 0
+#define WM8350_DC5_ERRACT_SHUTDOWN_CONV 1
+#define WM8350_DC5_ERRACT_SHUTDOWN_SYS 2
+
+/*
+ * R195 (0xC3) - DCDC6 Control
+ */
+#define WM8350_DC6_OPFLT 0x0400
+#define WM8350_DC6_VSEL_MASK 0x007F
+#define WM8350_DC6_VSEL_SHIFT 0
+
+/*
+ * R196 (0xC4) - DCDC6 Timeouts
+ */
+#define WM8350_DC6_ERRACT_MASK 0xC000
+#define WM8350_DC6_ERRACT_SHIFT 14
+#define WM8350_DC6_ENSLOT_MASK 0x3C00
+#define WM8350_DC6_ENSLOT_SHIFT 10
+#define WM8350_DC6_SDSLOT_MASK 0x03C0
+#define WM8350_DC6_UVTO_MASK 0x0030
+#define WM8350_DC6_SDSLOT_SHIFT 6
+
+/* Bit values for R196 (0xC4) */
+#define WM8350_DC6_ERRACT_NONE 0
+#define WM8350_DC6_ERRACT_SHUTDOWN_CONV 1
+#define WM8350_DC6_ERRACT_SHUTDOWN_SYS 2
+
+/*
+ * R197 (0xC5) - DCDC6 Low Power
+ */
+#define WM8350_DC6_HIB_MODE_MASK 0x7000
+#define WM8350_DC6_HIB_TRIG_MASK 0x0300
+#define WM8350_DC6_VIMG_MASK 0x007F
+
+/*
+ * R199 (0xC7) - Limit Switch Control
+ */
+#define WM8350_LS_ERRACT_MASK 0xC000
+#define WM8350_LS_ERRACT_SHIFT 14
+#define WM8350_LS_ENSLOT_MASK 0x3C00
+#define WM8350_LS_ENSLOT_SHIFT 10
+#define WM8350_LS_SDSLOT_MASK 0x03C0
+#define WM8350_LS_SDSLOT_SHIFT 6
+#define WM8350_LS_HIB_MODE 0x0010
+#define WM8350_LS_HIB_MODE_MASK 0x0010
+#define WM8350_LS_HIB_MODE_SHIFT 4
+#define WM8350_LS_HIB_PROT 0x0002
+#define WM8350_LS_HIB_PROT_MASK 0x0002
+#define WM8350_LS_HIB_PROT_SHIFT 1
+#define WM8350_LS_PROT 0x0001
+#define WM8350_LS_PROT_MASK 0x0001
+#define WM8350_LS_PROT_SHIFT 0
+
+/* Bit values for R199 (0xC7) */
+#define WM8350_LS_ERRACT_NONE 0
+#define WM8350_LS_ERRACT_SHUTDOWN_CONV 1
+#define WM8350_LS_ERRACT_SHUTDOWN_SYS 2
+
+/*
+ * R200 (0xC8) - LDO1 Control
+ */
+#define WM8350_LDO1_SWI 0x4000
+#define WM8350_LDO1_OPFLT 0x0400
+#define WM8350_LDO1_VSEL_MASK 0x001F
+#define WM8350_LDO1_VSEL_SHIFT 0
+
+/*
+ * R201 (0xC9) - LDO1 Timeouts
+ */
+#define WM8350_LDO1_ERRACT_MASK 0xC000
+#define WM8350_LDO1_ERRACT_SHIFT 14
+#define WM8350_LDO1_ENSLOT_MASK 0x3C00
+#define WM8350_LDO1_ENSLOT_SHIFT 10
+#define WM8350_LDO1_SDSLOT_MASK 0x03C0
+#define WM8350_LDO1_UVTO_MASK 0x0030
+#define WM8350_LDO1_SDSLOT_SHIFT 6
+
+/* Bit values for R201 (0xC9) */
+#define WM8350_LDO1_ERRACT_NONE 0
+#define WM8350_LDO1_ERRACT_SHUTDOWN_CONV 1
+#define WM8350_LDO1_ERRACT_SHUTDOWN_SYS 2
+
+/*
+ * R202 (0xCA) - LDO1 Low Power
+ */
+#define WM8350_LDO1_HIB_MODE_MASK 0x3000
+#define WM8350_LDO1_HIB_TRIG_MASK 0x0300
+#define WM8350_LDO1_VIMG_MASK 0x001F
+#define WM8350_LDO1_HIB_MODE_DIS (0x1 << 12)
+
+
+/*
+ * R203 (0xCB) - LDO2 Control
+ */
+#define WM8350_LDO2_SWI 0x4000
+#define WM8350_LDO2_OPFLT 0x0400
+#define WM8350_LDO2_VSEL_MASK 0x001F
+#define WM8350_LDO2_VSEL_SHIFT 0
+
+/*
+ * R204 (0xCC) - LDO2 Timeouts
+ */
+#define WM8350_LDO2_ERRACT_MASK 0xC000
+#define WM8350_LDO2_ERRACT_SHIFT 14
+#define WM8350_LDO2_ENSLOT_MASK 0x3C00
+#define WM8350_LDO2_ENSLOT_SHIFT 10
+#define WM8350_LDO2_SDSLOT_MASK 0x03C0
+#define WM8350_LDO2_SDSLOT_SHIFT 6
+
+/* Bit values for R204 (0xCC) */
+#define WM8350_LDO2_ERRACT_NONE 0
+#define WM8350_LDO2_ERRACT_SHUTDOWN_CONV 1
+#define WM8350_LDO2_ERRACT_SHUTDOWN_SYS 2
+
+/*
+ * R205 (0xCD) - LDO2 Low Power
+ */
+#define WM8350_LDO2_HIB_MODE_MASK 0x3000
+#define WM8350_LDO2_HIB_TRIG_MASK 0x0300
+#define WM8350_LDO2_VIMG_MASK 0x001F
+
+/*
+ * R206 (0xCE) - LDO3 Control
+ */
+#define WM8350_LDO3_SWI 0x4000
+#define WM8350_LDO3_OPFLT 0x0400
+#define WM8350_LDO3_VSEL_MASK 0x001F
+#define WM8350_LDO3_VSEL_SHIFT 0
+
+/*
+ * R207 (0xCF) - LDO3 Timeouts
+ */
+#define WM8350_LDO3_ERRACT_MASK 0xC000
+#define WM8350_LDO3_ERRACT_SHIFT 14
+#define WM8350_LDO3_ENSLOT_MASK 0x3C00
+#define WM8350_LDO3_ENSLOT_SHIFT 10
+#define WM8350_LDO3_SDSLOT_MASK 0x03C0
+#define WM8350_LDO3_UVTO_MASK 0x0030
+#define WM8350_LDO3_SDSLOT_SHIFT 6
+
+/* Bit values for R207 (0xCF) */
+#define WM8350_LDO3_ERRACT_NONE 0
+#define WM8350_LDO3_ERRACT_SHUTDOWN_CONV 1
+#define WM8350_LDO3_ERRACT_SHUTDOWN_SYS 2
+
+/*
+ * R208 (0xD0) - LDO3 Low Power
+ */
+#define WM8350_LDO3_HIB_MODE_MASK 0x3000
+#define WM8350_LDO3_HIB_TRIG_MASK 0x0300
+#define WM8350_LDO3_VIMG_MASK 0x001F
+
+/*
+ * R209 (0xD1) - LDO4 Control
+ */
+#define WM8350_LDO4_SWI 0x4000
+#define WM8350_LDO4_OPFLT 0x0400
+#define WM8350_LDO4_VSEL_MASK 0x001F
+#define WM8350_LDO4_VSEL_SHIFT 0
+
+/*
+ * R210 (0xD2) - LDO4 Timeouts
+ */
+#define WM8350_LDO4_ERRACT_MASK 0xC000
+#define WM8350_LDO4_ERRACT_SHIFT 14
+#define WM8350_LDO4_ENSLOT_MASK 0x3C00
+#define WM8350_LDO4_ENSLOT_SHIFT 10
+#define WM8350_LDO4_SDSLOT_MASK 0x03C0
+#define WM8350_LDO4_UVTO_MASK 0x0030
+#define WM8350_LDO4_SDSLOT_SHIFT 6
+
+/* Bit values for R210 (0xD2) */
+#define WM8350_LDO4_ERRACT_NONE 0
+#define WM8350_LDO4_ERRACT_SHUTDOWN_CONV 1
+#define WM8350_LDO4_ERRACT_SHUTDOWN_SYS 2
+
+/*
+ * R211 (0xD3) - LDO4 Low Power
+ */
+#define WM8350_LDO4_HIB_MODE_MASK 0x3000
+#define WM8350_LDO4_HIB_TRIG_MASK 0x0300
+#define WM8350_LDO4_VIMG_MASK 0x001F
+
+/*
+ * R215 (0xD7) - VCC_FAULT Masks
+ */
+#define WM8350_LS_FAULT 0x8000
+#define WM8350_LDO4_FAULT 0x0800
+#define WM8350_LDO3_FAULT 0x0400
+#define WM8350_LDO2_FAULT 0x0200
+#define WM8350_LDO1_FAULT 0x0100
+#define WM8350_DC6_FAULT 0x0020
+#define WM8350_DC5_FAULT 0x0010
+#define WM8350_DC4_FAULT 0x0008
+#define WM8350_DC3_FAULT 0x0004
+#define WM8350_DC2_FAULT 0x0002
+#define WM8350_DC1_FAULT 0x0001
+
+/*
+ * R216 (0xD8) - Main Bandgap Control
+ */
+#define WM8350_MBG_LOAD_FUSES 0x8000
+#define WM8350_MBG_FUSE_WPREP 0x4000
+#define WM8350_MBG_FUSE_WRITE 0x2000
+#define WM8350_MBG_FUSE_TRIM_MASK 0x1F00
+#define WM8350_MBG_TRIM_SRC 0x0020
+#define WM8350_MBG_USER_TRIM_MASK 0x001F
+
+/*
+ * R217 (0xD9) - OSC Control
+ */
+#define WM8350_OSC_LOAD_FUSES 0x8000
+#define WM8350_OSC_FUSE_WPREP 0x4000
+#define WM8350_OSC_FUSE_WRITE 0x2000
+#define WM8350_OSC_FUSE_TRIM_MASK 0x0F00
+#define WM8350_OSC_TRIM_SRC 0x0020
+#define WM8350_OSC_USER_TRIM_MASK 0x000F
+
+/*
+ * R248 (0xF8) - DCDC1 Force PWM
+ */
+#define WM8350_DCDC1_FORCE_PWM_ENA 0x0010
+
+/*
+ * R250 (0xFA) - DCDC3 Force PWM
+ */
+#define WM8350_DCDC3_FORCE_PWM_ENA 0x0010
+
+/*
+ * R251 (0xFB) - DCDC4 Force PWM
+ */
+#define WM8350_DCDC4_FORCE_PWM_ENA 0x0010
+
+/*
+ * R253 (0xFD) - DCDC1 Force PWM
+ */
+#define WM8350_DCDC6_FORCE_PWM_ENA 0x0010
+
+/*
+ * DCDC's
+ */
+#define WM8350_DCDC_1 0
+#define WM8350_DCDC_2 1
+#define WM8350_DCDC_3 2
+#define WM8350_DCDC_4 3
+#define WM8350_DCDC_5 4
+#define WM8350_DCDC_6 5
+
+/* DCDC modes */
+#define WM8350_DCDC_ACTIVE_STANDBY 0
+#define WM8350_DCDC_ACTIVE_PULSE 1
+#define WM8350_DCDC_SLEEP_NORMAL 0
+#define WM8350_DCDC_SLEEP_LOW 1
+
+/* DCDC Low power (Hibernate) mode */
+#define WM8350_DCDC_HIB_MODE_CUR (0 << 12)
+#define WM8350_DCDC_HIB_MODE_IMAGE (1 << 12)
+#define WM8350_DCDC_HIB_MODE_STANDBY (2 << 12)
+#define WM8350_DCDC_HIB_MODE_LDO (4 << 12)
+#define WM8350_DCDC_HIB_MODE_LDO_IM (5 << 12)
+#define WM8350_DCDC_HIB_MODE_DIS (7 << 12)
+#define WM8350_DCDC_HIB_MODE_MASK (7 << 12)
+
+/* DCDC Low Power (Hibernate) signal */
+#define WM8350_DCDC_HIB_SIG_REG (0 << 8)
+#define WM8350_DCDC_HIB_SIG_LPWR1 (1 << 8)
+#define WM8350_DCDC_HIB_SIG_LPWR2 (2 << 8)
+#define WM8350_DCDC_HIB_SIG_LPWR3 (3 << 8)
+
+/* LDO Low power (Hibernate) mode */
+#define WM8350_LDO_HIB_MODE_IMAGE (0 << 0)
+#define WM8350_LDO_HIB_MODE_DIS (1 << 0)
+
+/* LDO Low Power (Hibernate) signal */
+#define WM8350_LDO_HIB_SIG_REG (0 << 8)
+#define WM8350_LDO_HIB_SIG_LPWR1 (1 << 8)
+#define WM8350_LDO_HIB_SIG_LPWR2 (2 << 8)
+#define WM8350_LDO_HIB_SIG_LPWR3 (3 << 8)
+
+/*
+ * LDOs
+ */
+#define WM8350_LDO_1 6
+#define WM8350_LDO_2 7
+#define WM8350_LDO_3 8
+#define WM8350_LDO_4 9
+
+/*
+ * ISINKs
+ */
+#define WM8350_ISINK_A 10
+#define WM8350_ISINK_B 11
+
+#define WM8350_ISINK_MODE_BOOST 0
+#define WM8350_ISINK_MODE_SWITCH 1
+#define WM8350_ISINK_ILIM_NORMAL 0
+#define WM8350_ISINK_ILIM_LOW 1
+
+#define WM8350_ISINK_FLASH_DISABLE 0
+#define WM8350_ISINK_FLASH_ENABLE 1
+#define WM8350_ISINK_FLASH_TRIG_BIT 0
+#define WM8350_ISINK_FLASH_TRIG_GPIO 1
+#define WM8350_ISINK_FLASH_MODE_EN (1 << 13)
+#define WM8350_ISINK_FLASH_MODE_DIS (0 << 13)
+#define WM8350_ISINK_FLASH_DUR_32MS (0 << 8)
+#define WM8350_ISINK_FLASH_DUR_64MS (1 << 8)
+#define WM8350_ISINK_FLASH_DUR_96MS (2 << 8)
+#define WM8350_ISINK_FLASH_DUR_1024MS (3 << 8)
+#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 0)
+#define WM8350_ISINK_FLASH_ON_0_25S (1 << 0)
+#define WM8350_ISINK_FLASH_ON_0_50S (2 << 0)
+#define WM8350_ISINK_FLASH_ON_1_00S (3 << 0)
+#define WM8350_ISINK_FLASH_ON_1_95S (1 << 0)
+#define WM8350_ISINK_FLASH_ON_3_91S (2 << 0)
+#define WM8350_ISINK_FLASH_ON_7_80S (3 << 0)
+#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 4)
+#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 4)
+#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 4)
+#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 4)
+#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 4)
+#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 4)
+#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 4)
+
+/*
+ * Regulator Interrupts.
+ */
+#define WM8350_IRQ_CS1 13
+#define WM8350_IRQ_CS2 14
+#define WM8350_IRQ_UV_LDO4 25
+#define WM8350_IRQ_UV_LDO3 26
+#define WM8350_IRQ_UV_LDO2 27
+#define WM8350_IRQ_UV_LDO1 28
+#define WM8350_IRQ_UV_DC6 29
+#define WM8350_IRQ_UV_DC5 30
+#define WM8350_IRQ_UV_DC4 31
+#define WM8350_IRQ_UV_DC3 32
+#define WM8350_IRQ_UV_DC2 33
+#define WM8350_IRQ_UV_DC1 34
+#define WM8350_IRQ_OC_LS 35
+
+#define NUM_WM8350_REGULATORS 12
+
+struct wm8350;
+struct platform_device;
+struct regulator_init_data;
+
+/*
+ * WM8350 LED platform data
+ */
+struct wm8350_led_platform_data {
+ const char *name;
+ const char *default_trigger;
+ int max_uA;
+};
+
+struct wm8350_led {
+ struct platform_device *pdev;
+ struct mutex mutex;
+ struct work_struct work;
+ spinlock_t value_lock;
+ enum led_brightness value;
+ struct led_classdev cdev;
+ int max_uA_index;
+ int enabled;
+
+ struct regulator *isink;
+ struct regulator_consumer_supply isink_consumer;
+ struct regulator_init_data isink_init;
+ struct regulator *dcdc;
+ struct regulator_consumer_supply dcdc_consumer;
+ struct regulator_init_data dcdc_init;
+};
+
+struct wm8350_pmic {
+ /* Number of regulators of each type on this device */
+ int max_dcdc;
+ int max_isink;
+
+ /* ISINK to DCDC mapping */
+ int isink_A_dcdc;
+ int isink_B_dcdc;
+
+ /* hibernate configs */
+ u16 dcdc1_hib_mode;
+ u16 dcdc3_hib_mode;
+ u16 dcdc4_hib_mode;
+ u16 dcdc6_hib_mode;
+
+ /* regulator devices */
+ struct platform_device *pdev[NUM_WM8350_REGULATORS];
+
+ /* LED devices */
+ struct wm8350_led led[2];
+};
+
+int wm8350_register_regulator(struct wm8350 *wm8350, int reg,
+ struct regulator_init_data *initdata);
+int wm8350_register_led(struct wm8350 *wm8350, int lednum, int dcdc, int isink,
+ struct wm8350_led_platform_data *pdata);
+
+/*
+ * Additional DCDC control not supported via regulator API
+ */
+int wm8350_dcdc_set_slot(struct wm8350 *wm8350, int dcdc, u16 start,
+ u16 stop, u16 fault);
+int wm8350_dcdc25_set_mode(struct wm8350 *wm8350, int dcdc, u16 mode,
+ u16 ilim, u16 ramp, u16 feedback);
+
+/*
+ * Additional LDO control not supported via regulator API
+ */
+int wm8350_ldo_set_slot(struct wm8350 *wm8350, int ldo, u16 start, u16 stop);
+
+/*
+ * Additional ISINK control not supported via regulator API
+ */
+int wm8350_isink_set_flash(struct wm8350 *wm8350, int isink, u16 mode,
+ u16 trigger, u16 duration, u16 on_ramp,
+ u16 off_ramp, u16 drive);
+
+#endif
diff --git a/include/linux/mfd/wm8350/rtc.h b/include/linux/mfd/wm8350/rtc.h
new file mode 100644
index 000000000..ebd72ffc6
--- /dev/null
+++ b/include/linux/mfd/wm8350/rtc.h
@@ -0,0 +1,269 @@
+/*
+ * rtc.h -- RTC driver for Wolfson WM8350 PMIC
+ *
+ * Copyright 2007 Wolfson Microelectronics PLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_MFD_WM8350_RTC_H
+#define __LINUX_MFD_WM8350_RTC_H
+
+#include <linux/platform_device.h>
+
+/*
+ * Register values.
+ */
+#define WM8350_RTC_SECONDS_MINUTES 0x10
+#define WM8350_RTC_HOURS_DAY 0x11
+#define WM8350_RTC_DATE_MONTH 0x12
+#define WM8350_RTC_YEAR 0x13
+#define WM8350_ALARM_SECONDS_MINUTES 0x14
+#define WM8350_ALARM_HOURS_DAY 0x15
+#define WM8350_ALARM_DATE_MONTH 0x16
+#define WM8350_RTC_TIME_CONTROL 0x17
+
+/*
+ * R16 (0x10) - RTC Seconds/Minutes
+ */
+#define WM8350_RTC_MINS_MASK 0x7F00
+#define WM8350_RTC_MINS_SHIFT 8
+#define WM8350_RTC_SECS_MASK 0x007F
+#define WM8350_RTC_SECS_SHIFT 0
+
+/*
+ * R17 (0x11) - RTC Hours/Day
+ */
+#define WM8350_RTC_DAY_MASK 0x0700
+#define WM8350_RTC_DAY_SHIFT 8
+#define WM8350_RTC_HPM_MASK 0x0020
+#define WM8350_RTC_HPM_SHIFT 5
+#define WM8350_RTC_HRS_MASK 0x001F
+#define WM8350_RTC_HRS_SHIFT 0
+
+/* Bit values for R21 (0x15) */
+#define WM8350_RTC_DAY_SUN 1
+#define WM8350_RTC_DAY_MON 2
+#define WM8350_RTC_DAY_TUE 3
+#define WM8350_RTC_DAY_WED 4
+#define WM8350_RTC_DAY_THU 5
+#define WM8350_RTC_DAY_FRI 6
+#define WM8350_RTC_DAY_SAT 7
+
+#define WM8350_RTC_HPM_AM 0
+#define WM8350_RTC_HPM_PM 1
+
+/*
+ * R18 (0x12) - RTC Date/Month
+ */
+#define WM8350_RTC_MTH_MASK 0x1F00
+#define WM8350_RTC_MTH_SHIFT 8
+#define WM8350_RTC_DATE_MASK 0x003F
+#define WM8350_RTC_DATE_SHIFT 0
+
+/* Bit values for R22 (0x16) */
+#define WM8350_RTC_MTH_JAN 1
+#define WM8350_RTC_MTH_FEB 2
+#define WM8350_RTC_MTH_MAR 3
+#define WM8350_RTC_MTH_APR 4
+#define WM8350_RTC_MTH_MAY 5
+#define WM8350_RTC_MTH_JUN 6
+#define WM8350_RTC_MTH_JUL 7
+#define WM8350_RTC_MTH_AUG 8
+#define WM8350_RTC_MTH_SEP 9
+#define WM8350_RTC_MTH_OCT 10
+#define WM8350_RTC_MTH_NOV 11
+#define WM8350_RTC_MTH_DEC 12
+#define WM8350_RTC_MTH_JAN_BCD 0x01
+#define WM8350_RTC_MTH_FEB_BCD 0x02
+#define WM8350_RTC_MTH_MAR_BCD 0x03
+#define WM8350_RTC_MTH_APR_BCD 0x04
+#define WM8350_RTC_MTH_MAY_BCD 0x05
+#define WM8350_RTC_MTH_JUN_BCD 0x06
+#define WM8350_RTC_MTH_JUL_BCD 0x07
+#define WM8350_RTC_MTH_AUG_BCD 0x08
+#define WM8350_RTC_MTH_SEP_BCD 0x09
+#define WM8350_RTC_MTH_OCT_BCD 0x10
+#define WM8350_RTC_MTH_NOV_BCD 0x11
+#define WM8350_RTC_MTH_DEC_BCD 0x12
+
+/*
+ * R19 (0x13) - RTC Year
+ */
+#define WM8350_RTC_YHUNDREDS_MASK 0x3F00
+#define WM8350_RTC_YHUNDREDS_SHIFT 8
+#define WM8350_RTC_YUNITS_MASK 0x00FF
+#define WM8350_RTC_YUNITS_SHIFT 0
+
+/*
+ * R20 (0x14) - Alarm Seconds/Minutes
+ */
+#define WM8350_RTC_ALMMINS_MASK 0x7F00
+#define WM8350_RTC_ALMMINS_SHIFT 8
+#define WM8350_RTC_ALMSECS_MASK 0x007F
+#define WM8350_RTC_ALMSECS_SHIFT 0
+
+/* Bit values for R20 (0x14) */
+#define WM8350_RTC_ALMMINS_DONT_CARE -1
+#define WM8350_RTC_ALMSECS_DONT_CARE -1
+
+/*
+ * R21 (0x15) - Alarm Hours/Day
+ */
+#define WM8350_RTC_ALMDAY_MASK 0x0F00
+#define WM8350_RTC_ALMDAY_SHIFT 8
+#define WM8350_RTC_ALMHPM_MASK 0x0020
+#define WM8350_RTC_ALMHPM_SHIFT 5
+#define WM8350_RTC_ALMHRS_MASK 0x001F
+#define WM8350_RTC_ALMHRS_SHIFT 0
+
+/* Bit values for R21 (0x15) */
+#define WM8350_RTC_ALMDAY_DONT_CARE -1
+#define WM8350_RTC_ALMDAY_SUN 1
+#define WM8350_RTC_ALMDAY_MON 2
+#define WM8350_RTC_ALMDAY_TUE 3
+#define WM8350_RTC_ALMDAY_WED 4
+#define WM8350_RTC_ALMDAY_THU 5
+#define WM8350_RTC_ALMDAY_FRI 6
+#define WM8350_RTC_ALMDAY_SAT 7
+
+#define WM8350_RTC_ALMHPM_AM 0
+#define WM8350_RTC_ALMHPM_PM 1
+
+#define WM8350_RTC_ALMHRS_DONT_CARE -1
+
+/*
+ * R22 (0x16) - Alarm Date/Month
+ */
+#define WM8350_RTC_ALMMTH_MASK 0x1F00
+#define WM8350_RTC_ALMMTH_SHIFT 8
+#define WM8350_RTC_ALMDATE_MASK 0x003F
+#define WM8350_RTC_ALMDATE_SHIFT 0
+
+/* Bit values for R22 (0x16) */
+#define WM8350_RTC_ALMDATE_DONT_CARE -1
+
+#define WM8350_RTC_ALMMTH_DONT_CARE -1
+#define WM8350_RTC_ALMMTH_JAN 1
+#define WM8350_RTC_ALMMTH_FEB 2
+#define WM8350_RTC_ALMMTH_MAR 3
+#define WM8350_RTC_ALMMTH_APR 4
+#define WM8350_RTC_ALMMTH_MAY 5
+#define WM8350_RTC_ALMMTH_JUN 6
+#define WM8350_RTC_ALMMTH_JUL 7
+#define WM8350_RTC_ALMMTH_AUG 8
+#define WM8350_RTC_ALMMTH_SEP 9
+#define WM8350_RTC_ALMMTH_OCT 10
+#define WM8350_RTC_ALMMTH_NOV 11
+#define WM8350_RTC_ALMMTH_DEC 12
+#define WM8350_RTC_ALMMTH_JAN_BCD 0x01
+#define WM8350_RTC_ALMMTH_FEB_BCD 0x02
+#define WM8350_RTC_ALMMTH_MAR_BCD 0x03
+#define WM8350_RTC_ALMMTH_APR_BCD 0x04
+#define WM8350_RTC_ALMMTH_MAY_BCD 0x05
+#define WM8350_RTC_ALMMTH_JUN_BCD 0x06
+#define WM8350_RTC_ALMMTH_JUL_BCD 0x07
+#define WM8350_RTC_ALMMTH_AUG_BCD 0x08
+#define WM8350_RTC_ALMMTH_SEP_BCD 0x09
+#define WM8350_RTC_ALMMTH_OCT_BCD 0x10
+#define WM8350_RTC_ALMMTH_NOV_BCD 0x11
+#define WM8350_RTC_ALMMTH_DEC_BCD 0x12
+
+/*
+ * R23 (0x17) - RTC Time Control
+ */
+#define WM8350_RTC_BCD 0x8000
+#define WM8350_RTC_BCD_MASK 0x8000
+#define WM8350_RTC_BCD_SHIFT 15
+#define WM8350_RTC_12HR 0x4000
+#define WM8350_RTC_12HR_MASK 0x4000
+#define WM8350_RTC_12HR_SHIFT 14
+#define WM8350_RTC_DST 0x2000
+#define WM8350_RTC_DST_MASK 0x2000
+#define WM8350_RTC_DST_SHIFT 13
+#define WM8350_RTC_SET 0x0800
+#define WM8350_RTC_SET_MASK 0x0800
+#define WM8350_RTC_SET_SHIFT 11
+#define WM8350_RTC_STS 0x0400
+#define WM8350_RTC_STS_MASK 0x0400
+#define WM8350_RTC_STS_SHIFT 10
+#define WM8350_RTC_ALMSET 0x0200
+#define WM8350_RTC_ALMSET_MASK 0x0200
+#define WM8350_RTC_ALMSET_SHIFT 9
+#define WM8350_RTC_ALMSTS 0x0100
+#define WM8350_RTC_ALMSTS_MASK 0x0100
+#define WM8350_RTC_ALMSTS_SHIFT 8
+#define WM8350_RTC_PINT 0x0070
+#define WM8350_RTC_PINT_MASK 0x0070
+#define WM8350_RTC_PINT_SHIFT 4
+#define WM8350_RTC_DSW 0x000F
+#define WM8350_RTC_DSW_MASK 0x000F
+#define WM8350_RTC_DSW_SHIFT 0
+
+/* Bit values for R23 (0x17) */
+#define WM8350_RTC_BCD_BINARY 0
+#define WM8350_RTC_BCD_BCD 1
+
+#define WM8350_RTC_12HR_24HR 0
+#define WM8350_RTC_12HR_12HR 1
+
+#define WM8350_RTC_DST_DISABLED 0
+#define WM8350_RTC_DST_ENABLED 1
+
+#define WM8350_RTC_SET_RUN 0
+#define WM8350_RTC_SET_SET 1
+
+#define WM8350_RTC_STS_RUNNING 0
+#define WM8350_RTC_STS_STOPPED 1
+
+#define WM8350_RTC_ALMSET_RUN 0
+#define WM8350_RTC_ALMSET_SET 1
+
+#define WM8350_RTC_ALMSTS_RUNNING 0
+#define WM8350_RTC_ALMSTS_STOPPED 1
+
+#define WM8350_RTC_PINT_DISABLED 0
+#define WM8350_RTC_PINT_SECS 1
+#define WM8350_RTC_PINT_MINS 2
+#define WM8350_RTC_PINT_HRS 3
+#define WM8350_RTC_PINT_DAYS 4
+#define WM8350_RTC_PINT_MTHS 5
+
+#define WM8350_RTC_DSW_DISABLED 0
+#define WM8350_RTC_DSW_1HZ 1
+#define WM8350_RTC_DSW_2HZ 2
+#define WM8350_RTC_DSW_4HZ 3
+#define WM8350_RTC_DSW_8HZ 4
+#define WM8350_RTC_DSW_16HZ 5
+#define WM8350_RTC_DSW_32HZ 6
+#define WM8350_RTC_DSW_64HZ 7
+#define WM8350_RTC_DSW_128HZ 8
+#define WM8350_RTC_DSW_256HZ 9
+#define WM8350_RTC_DSW_512HZ 10
+#define WM8350_RTC_DSW_1024HZ 11
+
+/*
+ * R218 (0xDA) - RTC Tick Control
+ */
+#define WM8350_RTC_TICKSTS 0x4000
+#define WM8350_RTC_CLKSRC 0x2000
+#define WM8350_RTC_TRIM_MASK 0x03FF
+
+/*
+ * RTC Interrupts.
+ */
+#define WM8350_IRQ_RTC_PER 7
+#define WM8350_IRQ_RTC_SEC 8
+#define WM8350_IRQ_RTC_ALM 9
+
+struct wm8350_rtc {
+ struct platform_device *pdev;
+ struct rtc_device *rtc;
+ int alarm_enabled; /* used over suspend/resume */
+ int update_enabled;
+};
+
+#endif
diff --git a/include/linux/mfd/wm8350/supply.h b/include/linux/mfd/wm8350/supply.h
new file mode 100644
index 000000000..8dc93673e
--- /dev/null
+++ b/include/linux/mfd/wm8350/supply.h
@@ -0,0 +1,134 @@
+/*
+ * supply.h -- Power Supply Driver for Wolfson WM8350 PMIC
+ *
+ * Copyright 2007 Wolfson Microelectronics PLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_WM8350_SUPPLY_H_
+#define __LINUX_MFD_WM8350_SUPPLY_H_
+
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
+
+/*
+ * Charger registers
+ */
+#define WM8350_BATTERY_CHARGER_CONTROL_1 0xA8
+#define WM8350_BATTERY_CHARGER_CONTROL_2 0xA9
+#define WM8350_BATTERY_CHARGER_CONTROL_3 0xAA
+
+/*
+ * R168 (0xA8) - Battery Charger Control 1
+ */
+#define WM8350_CHG_ENA_R168 0x8000
+#define WM8350_CHG_THR 0x2000
+#define WM8350_CHG_EOC_SEL_MASK 0x1C00
+#define WM8350_CHG_TRICKLE_TEMP_CHOKE 0x0200
+#define WM8350_CHG_TRICKLE_USB_CHOKE 0x0100
+#define WM8350_CHG_RECOVER_T 0x0080
+#define WM8350_CHG_END_ACT 0x0040
+#define WM8350_CHG_FAST 0x0020
+#define WM8350_CHG_FAST_USB_THROTTLE 0x0010
+#define WM8350_CHG_NTC_MON 0x0008
+#define WM8350_CHG_BATT_HOT_MON 0x0004
+#define WM8350_CHG_BATT_COLD_MON 0x0002
+#define WM8350_CHG_CHIP_TEMP_MON 0x0001
+
+/*
+ * R169 (0xA9) - Battery Charger Control 2
+ */
+#define WM8350_CHG_ACTIVE 0x8000
+#define WM8350_CHG_PAUSE 0x4000
+#define WM8350_CHG_STS_MASK 0x3000
+#define WM8350_CHG_TIME_MASK 0x0F00
+#define WM8350_CHG_MASK_WALL_FB 0x0080
+#define WM8350_CHG_TRICKLE_SEL 0x0040
+#define WM8350_CHG_VSEL_MASK 0x0030
+#define WM8350_CHG_ISEL_MASK 0x000F
+#define WM8350_CHG_STS_OFF 0x0000
+#define WM8350_CHG_STS_TRICKLE 0x1000
+#define WM8350_CHG_STS_FAST 0x2000
+
+/*
+ * R170 (0xAA) - Battery Charger Control 3
+ */
+#define WM8350_CHG_THROTTLE_T_MASK 0x0060
+#define WM8350_CHG_SMART 0x0010
+#define WM8350_CHG_TIMER_ADJT_MASK 0x000F
+
+/*
+ * Charger Interrupts
+ */
+#define WM8350_IRQ_CHG_BAT_HOT 0
+#define WM8350_IRQ_CHG_BAT_COLD 1
+#define WM8350_IRQ_CHG_BAT_FAIL 2
+#define WM8350_IRQ_CHG_TO 3
+#define WM8350_IRQ_CHG_END 4
+#define WM8350_IRQ_CHG_START 5
+#define WM8350_IRQ_CHG_FAST_RDY 6
+#define WM8350_IRQ_CHG_VBATT_LT_3P9 10
+#define WM8350_IRQ_CHG_VBATT_LT_3P1 11
+#define WM8350_IRQ_CHG_VBATT_LT_2P85 12
+
+/*
+ * Charger Policy
+ */
+#define WM8350_CHG_TRICKLE_50mA (0 << 6)
+#define WM8350_CHG_TRICKLE_100mA (1 << 6)
+#define WM8350_CHG_4_05V (0 << 4)
+#define WM8350_CHG_4_10V (1 << 4)
+#define WM8350_CHG_4_15V (2 << 4)
+#define WM8350_CHG_4_20V (3 << 4)
+#define WM8350_CHG_FAST_LIMIT_mA(x) ((x / 50) & 0xf)
+#define WM8350_CHG_EOC_mA(x) (((x - 10) & 0x7) << 10)
+#define WM8350_CHG_TRICKLE_3_1V (0 << 13)
+#define WM8350_CHG_TRICKLE_3_9V (1 << 13)
+
+/*
+ * Supply Registers.
+ */
+#define WM8350_USB_VOLTAGE_READBACK 0x9C
+#define WM8350_LINE_VOLTAGE_READBACK 0x9D
+#define WM8350_BATT_VOLTAGE_READBACK 0x9E
+
+/*
+ * Supply Interrupts.
+ */
+#define WM8350_IRQ_USB_LIMIT 15
+#define WM8350_IRQ_EXT_USB_FB 36
+#define WM8350_IRQ_EXT_WALL_FB 37
+#define WM8350_IRQ_EXT_BAT_FB 38
+
+/*
+ * Policy to control charger state machine.
+ */
+struct wm8350_charger_policy {
+
+ /* charger state machine policy - set in machine driver */
+ int eoc_mA; /* end of charge current (mA) */
+ int charge_mV; /* charge voltage */
+ int fast_limit_mA; /* fast charge current limit */
+ int fast_limit_USB_mA; /* USB fast charge current limit */
+ int charge_timeout; /* charge timeout (mins) */
+ int trickle_start_mV; /* trickle charge starts at mV */
+ int trickle_charge_mA; /* trickle charge current */
+ int trickle_charge_USB_mA; /* USB trickle charge current */
+};
+
+struct wm8350_power {
+ struct platform_device *pdev;
+ struct power_supply *battery;
+ struct power_supply *usb;
+ struct power_supply *ac;
+ struct wm8350_charger_policy *policy;
+
+ int rev_g_coeff;
+};
+
+#endif
diff --git a/include/linux/mfd/wm8350/wdt.h b/include/linux/mfd/wm8350/wdt.h
new file mode 100644
index 000000000..f6135b5e5
--- /dev/null
+++ b/include/linux/mfd/wm8350/wdt.h
@@ -0,0 +1,28 @@
+/*
+ * wdt.h -- Watchdog Driver for Wolfson WM8350 PMIC
+ *
+ * Copyright 2007, 2008 Wolfson Microelectronics PLC
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_MFD_WM8350_WDT_H_
+#define __LINUX_MFD_WM8350_WDT_H_
+
+#include <linux/platform_device.h>
+
+#define WM8350_WDOG_HIB_MODE 0x0080
+#define WM8350_WDOG_DEBUG 0x0040
+#define WM8350_WDOG_MODE_MASK 0x0030
+#define WM8350_WDOG_TO_MASK 0x0007
+
+#define WM8350_IRQ_SYS_WDOG_TO 24
+
+struct wm8350_wdt {
+ struct platform_device *pdev;
+};
+
+#endif
diff --git a/include/linux/mfd/wm8400-audio.h b/include/linux/mfd/wm8400-audio.h
new file mode 100644
index 000000000..e06ed3eb1
--- /dev/null
+++ b/include/linux/mfd/wm8400-audio.h
@@ -0,0 +1,1187 @@
+/*
+ * wm8400 private definitions for audio
+ *
+ * Copyright 2008 Wolfson Microelectronics plc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_MFD_WM8400_AUDIO_H
+#define __LINUX_MFD_WM8400_AUDIO_H
+
+#include <linux/mfd/wm8400-audio.h>
+
+/*
+ * R2 (0x02) - Power Management (1)
+ */
+#define WM8400_CODEC_ENA 0x8000 /* CODEC_ENA */
+#define WM8400_CODEC_ENA_MASK 0x8000 /* CODEC_ENA */
+#define WM8400_CODEC_ENA_SHIFT 15 /* CODEC_ENA */
+#define WM8400_CODEC_ENA_WIDTH 1 /* CODEC_ENA */
+#define WM8400_SYSCLK_ENA 0x4000 /* SYSCLK_ENA */
+#define WM8400_SYSCLK_ENA_MASK 0x4000 /* SYSCLK_ENA */
+#define WM8400_SYSCLK_ENA_SHIFT 14 /* SYSCLK_ENA */
+#define WM8400_SYSCLK_ENA_WIDTH 1 /* SYSCLK_ENA */
+#define WM8400_SPK_MIX_ENA 0x2000 /* SPK_MIX_ENA */
+#define WM8400_SPK_MIX_ENA_MASK 0x2000 /* SPK_MIX_ENA */
+#define WM8400_SPK_MIX_ENA_SHIFT 13 /* SPK_MIX_ENA */
+#define WM8400_SPK_MIX_ENA_WIDTH 1 /* SPK_MIX_ENA */
+#define WM8400_SPK_ENA 0x1000 /* SPK_ENA */
+#define WM8400_SPK_ENA_MASK 0x1000 /* SPK_ENA */
+#define WM8400_SPK_ENA_SHIFT 12 /* SPK_ENA */
+#define WM8400_SPK_ENA_WIDTH 1 /* SPK_ENA */
+#define WM8400_OUT3_ENA 0x0800 /* OUT3_ENA */
+#define WM8400_OUT3_ENA_MASK 0x0800 /* OUT3_ENA */
+#define WM8400_OUT3_ENA_SHIFT 11 /* OUT3_ENA */
+#define WM8400_OUT3_ENA_WIDTH 1 /* OUT3_ENA */
+#define WM8400_OUT4_ENA 0x0400 /* OUT4_ENA */
+#define WM8400_OUT4_ENA_MASK 0x0400 /* OUT4_ENA */
+#define WM8400_OUT4_ENA_SHIFT 10 /* OUT4_ENA */
+#define WM8400_OUT4_ENA_WIDTH 1 /* OUT4_ENA */
+#define WM8400_LOUT_ENA 0x0200 /* LOUT_ENA */
+#define WM8400_LOUT_ENA_MASK 0x0200 /* LOUT_ENA */
+#define WM8400_LOUT_ENA_SHIFT 9 /* LOUT_ENA */
+#define WM8400_LOUT_ENA_WIDTH 1 /* LOUT_ENA */
+#define WM8400_ROUT_ENA 0x0100 /* ROUT_ENA */
+#define WM8400_ROUT_ENA_MASK 0x0100 /* ROUT_ENA */
+#define WM8400_ROUT_ENA_SHIFT 8 /* ROUT_ENA */
+#define WM8400_ROUT_ENA_WIDTH 1 /* ROUT_ENA */
+#define WM8400_MIC1BIAS_ENA 0x0010 /* MIC1BIAS_ENA */
+#define WM8400_MIC1BIAS_ENA_MASK 0x0010 /* MIC1BIAS_ENA */
+#define WM8400_MIC1BIAS_ENA_SHIFT 4 /* MIC1BIAS_ENA */
+#define WM8400_MIC1BIAS_ENA_WIDTH 1 /* MIC1BIAS_ENA */
+#define WM8400_VMID_MODE_MASK 0x0006 /* VMID_MODE - [2:1] */
+#define WM8400_VMID_MODE_SHIFT 1 /* VMID_MODE - [2:1] */
+#define WM8400_VMID_MODE_WIDTH 2 /* VMID_MODE - [2:1] */
+#define WM8400_VREF_ENA 0x0001 /* VREF_ENA */
+#define WM8400_VREF_ENA_MASK 0x0001 /* VREF_ENA */
+#define WM8400_VREF_ENA_SHIFT 0 /* VREF_ENA */
+#define WM8400_VREF_ENA_WIDTH 1 /* VREF_ENA */
+
+/*
+ * R3 (0x03) - Power Management (2)
+ */
+#define WM8400_FLL_ENA 0x8000 /* FLL_ENA */
+#define WM8400_FLL_ENA_MASK 0x8000 /* FLL_ENA */
+#define WM8400_FLL_ENA_SHIFT 15 /* FLL_ENA */
+#define WM8400_FLL_ENA_WIDTH 1 /* FLL_ENA */
+#define WM8400_TSHUT_ENA 0x4000 /* TSHUT_ENA */
+#define WM8400_TSHUT_ENA_MASK 0x4000 /* TSHUT_ENA */
+#define WM8400_TSHUT_ENA_SHIFT 14 /* TSHUT_ENA */
+#define WM8400_TSHUT_ENA_WIDTH 1 /* TSHUT_ENA */
+#define WM8400_TSHUT_OPDIS 0x2000 /* TSHUT_OPDIS */
+#define WM8400_TSHUT_OPDIS_MASK 0x2000 /* TSHUT_OPDIS */
+#define WM8400_TSHUT_OPDIS_SHIFT 13 /* TSHUT_OPDIS */
+#define WM8400_TSHUT_OPDIS_WIDTH 1 /* TSHUT_OPDIS */
+#define WM8400_OPCLK_ENA 0x0800 /* OPCLK_ENA */
+#define WM8400_OPCLK_ENA_MASK 0x0800 /* OPCLK_ENA */
+#define WM8400_OPCLK_ENA_SHIFT 11 /* OPCLK_ENA */
+#define WM8400_OPCLK_ENA_WIDTH 1 /* OPCLK_ENA */
+#define WM8400_AINL_ENA 0x0200 /* AINL_ENA */
+#define WM8400_AINL_ENA_MASK 0x0200 /* AINL_ENA */
+#define WM8400_AINL_ENA_SHIFT 9 /* AINL_ENA */
+#define WM8400_AINL_ENA_WIDTH 1 /* AINL_ENA */
+#define WM8400_AINR_ENA 0x0100 /* AINR_ENA */
+#define WM8400_AINR_ENA_MASK 0x0100 /* AINR_ENA */
+#define WM8400_AINR_ENA_SHIFT 8 /* AINR_ENA */
+#define WM8400_AINR_ENA_WIDTH 1 /* AINR_ENA */
+#define WM8400_LIN34_ENA 0x0080 /* LIN34_ENA */
+#define WM8400_LIN34_ENA_MASK 0x0080 /* LIN34_ENA */
+#define WM8400_LIN34_ENA_SHIFT 7 /* LIN34_ENA */
+#define WM8400_LIN34_ENA_WIDTH 1 /* LIN34_ENA */
+#define WM8400_LIN12_ENA 0x0040 /* LIN12_ENA */
+#define WM8400_LIN12_ENA_MASK 0x0040 /* LIN12_ENA */
+#define WM8400_LIN12_ENA_SHIFT 6 /* LIN12_ENA */
+#define WM8400_LIN12_ENA_WIDTH 1 /* LIN12_ENA */
+#define WM8400_RIN34_ENA 0x0020 /* RIN34_ENA */
+#define WM8400_RIN34_ENA_MASK 0x0020 /* RIN34_ENA */
+#define WM8400_RIN34_ENA_SHIFT 5 /* RIN34_ENA */
+#define WM8400_RIN34_ENA_WIDTH 1 /* RIN34_ENA */
+#define WM8400_RIN12_ENA 0x0010 /* RIN12_ENA */
+#define WM8400_RIN12_ENA_MASK 0x0010 /* RIN12_ENA */
+#define WM8400_RIN12_ENA_SHIFT 4 /* RIN12_ENA */
+#define WM8400_RIN12_ENA_WIDTH 1 /* RIN12_ENA */
+#define WM8400_ADCL_ENA 0x0002 /* ADCL_ENA */
+#define WM8400_ADCL_ENA_MASK 0x0002 /* ADCL_ENA */
+#define WM8400_ADCL_ENA_SHIFT 1 /* ADCL_ENA */
+#define WM8400_ADCL_ENA_WIDTH 1 /* ADCL_ENA */
+#define WM8400_ADCR_ENA 0x0001 /* ADCR_ENA */
+#define WM8400_ADCR_ENA_MASK 0x0001 /* ADCR_ENA */
+#define WM8400_ADCR_ENA_SHIFT 0 /* ADCR_ENA */
+#define WM8400_ADCR_ENA_WIDTH 1 /* ADCR_ENA */
+
+/*
+ * R4 (0x04) - Power Management (3)
+ */
+#define WM8400_LON_ENA 0x2000 /* LON_ENA */
+#define WM8400_LON_ENA_MASK 0x2000 /* LON_ENA */
+#define WM8400_LON_ENA_SHIFT 13 /* LON_ENA */
+#define WM8400_LON_ENA_WIDTH 1 /* LON_ENA */
+#define WM8400_LOP_ENA 0x1000 /* LOP_ENA */
+#define WM8400_LOP_ENA_MASK 0x1000 /* LOP_ENA */
+#define WM8400_LOP_ENA_SHIFT 12 /* LOP_ENA */
+#define WM8400_LOP_ENA_WIDTH 1 /* LOP_ENA */
+#define WM8400_RON_ENA 0x0800 /* RON_ENA */
+#define WM8400_RON_ENA_MASK 0x0800 /* RON_ENA */
+#define WM8400_RON_ENA_SHIFT 11 /* RON_ENA */
+#define WM8400_RON_ENA_WIDTH 1 /* RON_ENA */
+#define WM8400_ROP_ENA 0x0400 /* ROP_ENA */
+#define WM8400_ROP_ENA_MASK 0x0400 /* ROP_ENA */
+#define WM8400_ROP_ENA_SHIFT 10 /* ROP_ENA */
+#define WM8400_ROP_ENA_WIDTH 1 /* ROP_ENA */
+#define WM8400_LOPGA_ENA 0x0080 /* LOPGA_ENA */
+#define WM8400_LOPGA_ENA_MASK 0x0080 /* LOPGA_ENA */
+#define WM8400_LOPGA_ENA_SHIFT 7 /* LOPGA_ENA */
+#define WM8400_LOPGA_ENA_WIDTH 1 /* LOPGA_ENA */
+#define WM8400_ROPGA_ENA 0x0040 /* ROPGA_ENA */
+#define WM8400_ROPGA_ENA_MASK 0x0040 /* ROPGA_ENA */
+#define WM8400_ROPGA_ENA_SHIFT 6 /* ROPGA_ENA */
+#define WM8400_ROPGA_ENA_WIDTH 1 /* ROPGA_ENA */
+#define WM8400_LOMIX_ENA 0x0020 /* LOMIX_ENA */
+#define WM8400_LOMIX_ENA_MASK 0x0020 /* LOMIX_ENA */
+#define WM8400_LOMIX_ENA_SHIFT 5 /* LOMIX_ENA */
+#define WM8400_LOMIX_ENA_WIDTH 1 /* LOMIX_ENA */
+#define WM8400_ROMIX_ENA 0x0010 /* ROMIX_ENA */
+#define WM8400_ROMIX_ENA_MASK 0x0010 /* ROMIX_ENA */
+#define WM8400_ROMIX_ENA_SHIFT 4 /* ROMIX_ENA */
+#define WM8400_ROMIX_ENA_WIDTH 1 /* ROMIX_ENA */
+#define WM8400_DACL_ENA 0x0002 /* DACL_ENA */
+#define WM8400_DACL_ENA_MASK 0x0002 /* DACL_ENA */
+#define WM8400_DACL_ENA_SHIFT 1 /* DACL_ENA */
+#define WM8400_DACL_ENA_WIDTH 1 /* DACL_ENA */
+#define WM8400_DACR_ENA 0x0001 /* DACR_ENA */
+#define WM8400_DACR_ENA_MASK 0x0001 /* DACR_ENA */
+#define WM8400_DACR_ENA_SHIFT 0 /* DACR_ENA */
+#define WM8400_DACR_ENA_WIDTH 1 /* DACR_ENA */
+
+/*
+ * R5 (0x05) - Audio Interface (1)
+ */
+#define WM8400_AIFADCL_SRC 0x8000 /* AIFADCL_SRC */
+#define WM8400_AIFADCL_SRC_MASK 0x8000 /* AIFADCL_SRC */
+#define WM8400_AIFADCL_SRC_SHIFT 15 /* AIFADCL_SRC */
+#define WM8400_AIFADCL_SRC_WIDTH 1 /* AIFADCL_SRC */
+#define WM8400_AIFADCR_SRC 0x4000 /* AIFADCR_SRC */
+#define WM8400_AIFADCR_SRC_MASK 0x4000 /* AIFADCR_SRC */
+#define WM8400_AIFADCR_SRC_SHIFT 14 /* AIFADCR_SRC */
+#define WM8400_AIFADCR_SRC_WIDTH 1 /* AIFADCR_SRC */
+#define WM8400_AIFADC_TDM 0x2000 /* AIFADC_TDM */
+#define WM8400_AIFADC_TDM_MASK 0x2000 /* AIFADC_TDM */
+#define WM8400_AIFADC_TDM_SHIFT 13 /* AIFADC_TDM */
+#define WM8400_AIFADC_TDM_WIDTH 1 /* AIFADC_TDM */
+#define WM8400_AIFADC_TDM_CHAN 0x1000 /* AIFADC_TDM_CHAN */
+#define WM8400_AIFADC_TDM_CHAN_MASK 0x1000 /* AIFADC_TDM_CHAN */
+#define WM8400_AIFADC_TDM_CHAN_SHIFT 12 /* AIFADC_TDM_CHAN */
+#define WM8400_AIFADC_TDM_CHAN_WIDTH 1 /* AIFADC_TDM_CHAN */
+#define WM8400_AIF_BCLK_INV 0x0100 /* AIF_BCLK_INV */
+#define WM8400_AIF_BCLK_INV_MASK 0x0100 /* AIF_BCLK_INV */
+#define WM8400_AIF_BCLK_INV_SHIFT 8 /* AIF_BCLK_INV */
+#define WM8400_AIF_BCLK_INV_WIDTH 1 /* AIF_BCLK_INV */
+#define WM8400_AIF_LRCLK_INV 0x0080 /* AIF_LRCLK_INV */
+#define WM8400_AIF_LRCLK_INV_MASK 0x0080 /* AIF_LRCLK_INV */
+#define WM8400_AIF_LRCLK_INV_SHIFT 7 /* AIF_LRCLK_INV */
+#define WM8400_AIF_LRCLK_INV_WIDTH 1 /* AIF_LRCLK_INV */
+#define WM8400_AIF_WL_MASK 0x0060 /* AIF_WL - [6:5] */
+#define WM8400_AIF_WL_SHIFT 5 /* AIF_WL - [6:5] */
+#define WM8400_AIF_WL_WIDTH 2 /* AIF_WL - [6:5] */
+#define WM8400_AIF_WL_16BITS (0 << 5)
+#define WM8400_AIF_WL_20BITS (1 << 5)
+#define WM8400_AIF_WL_24BITS (2 << 5)
+#define WM8400_AIF_WL_32BITS (3 << 5)
+#define WM8400_AIF_FMT_MASK 0x0018 /* AIF_FMT - [4:3] */
+#define WM8400_AIF_FMT_SHIFT 3 /* AIF_FMT - [4:3] */
+#define WM8400_AIF_FMT_WIDTH 2 /* AIF_FMT - [4:3] */
+#define WM8400_AIF_FMT_RIGHTJ (0 << 3)
+#define WM8400_AIF_FMT_LEFTJ (1 << 3)
+#define WM8400_AIF_FMT_I2S (2 << 3)
+#define WM8400_AIF_FMT_DSP (3 << 3)
+
+/*
+ * R6 (0x06) - Audio Interface (2)
+ */
+#define WM8400_DACL_SRC 0x8000 /* DACL_SRC */
+#define WM8400_DACL_SRC_MASK 0x8000 /* DACL_SRC */
+#define WM8400_DACL_SRC_SHIFT 15 /* DACL_SRC */
+#define WM8400_DACL_SRC_WIDTH 1 /* DACL_SRC */
+#define WM8400_DACR_SRC 0x4000 /* DACR_SRC */
+#define WM8400_DACR_SRC_MASK 0x4000 /* DACR_SRC */
+#define WM8400_DACR_SRC_SHIFT 14 /* DACR_SRC */
+#define WM8400_DACR_SRC_WIDTH 1 /* DACR_SRC */
+#define WM8400_AIFDAC_TDM 0x2000 /* AIFDAC_TDM */
+#define WM8400_AIFDAC_TDM_MASK 0x2000 /* AIFDAC_TDM */
+#define WM8400_AIFDAC_TDM_SHIFT 13 /* AIFDAC_TDM */
+#define WM8400_AIFDAC_TDM_WIDTH 1 /* AIFDAC_TDM */
+#define WM8400_AIFDAC_TDM_CHAN 0x1000 /* AIFDAC_TDM_CHAN */
+#define WM8400_AIFDAC_TDM_CHAN_MASK 0x1000 /* AIFDAC_TDM_CHAN */
+#define WM8400_AIFDAC_TDM_CHAN_SHIFT 12 /* AIFDAC_TDM_CHAN */
+#define WM8400_AIFDAC_TDM_CHAN_WIDTH 1 /* AIFDAC_TDM_CHAN */
+#define WM8400_DAC_BOOST_MASK 0x0C00 /* DAC_BOOST - [11:10] */
+#define WM8400_DAC_BOOST_SHIFT 10 /* DAC_BOOST - [11:10] */
+#define WM8400_DAC_BOOST_WIDTH 2 /* DAC_BOOST - [11:10] */
+#define WM8400_DAC_COMP 0x0010 /* DAC_COMP */
+#define WM8400_DAC_COMP_MASK 0x0010 /* DAC_COMP */
+#define WM8400_DAC_COMP_SHIFT 4 /* DAC_COMP */
+#define WM8400_DAC_COMP_WIDTH 1 /* DAC_COMP */
+#define WM8400_DAC_COMPMODE 0x0008 /* DAC_COMPMODE */
+#define WM8400_DAC_COMPMODE_MASK 0x0008 /* DAC_COMPMODE */
+#define WM8400_DAC_COMPMODE_SHIFT 3 /* DAC_COMPMODE */
+#define WM8400_DAC_COMPMODE_WIDTH 1 /* DAC_COMPMODE */
+#define WM8400_ADC_COMP 0x0004 /* ADC_COMP */
+#define WM8400_ADC_COMP_MASK 0x0004 /* ADC_COMP */
+#define WM8400_ADC_COMP_SHIFT 2 /* ADC_COMP */
+#define WM8400_ADC_COMP_WIDTH 1 /* ADC_COMP */
+#define WM8400_ADC_COMPMODE 0x0002 /* ADC_COMPMODE */
+#define WM8400_ADC_COMPMODE_MASK 0x0002 /* ADC_COMPMODE */
+#define WM8400_ADC_COMPMODE_SHIFT 1 /* ADC_COMPMODE */
+#define WM8400_ADC_COMPMODE_WIDTH 1 /* ADC_COMPMODE */
+#define WM8400_LOOPBACK 0x0001 /* LOOPBACK */
+#define WM8400_LOOPBACK_MASK 0x0001 /* LOOPBACK */
+#define WM8400_LOOPBACK_SHIFT 0 /* LOOPBACK */
+#define WM8400_LOOPBACK_WIDTH 1 /* LOOPBACK */
+
+/*
+ * R7 (0x07) - Clocking (1)
+ */
+#define WM8400_TOCLK_RATE 0x8000 /* TOCLK_RATE */
+#define WM8400_TOCLK_RATE_MASK 0x8000 /* TOCLK_RATE */
+#define WM8400_TOCLK_RATE_SHIFT 15 /* TOCLK_RATE */
+#define WM8400_TOCLK_RATE_WIDTH 1 /* TOCLK_RATE */
+#define WM8400_TOCLK_ENA 0x4000 /* TOCLK_ENA */
+#define WM8400_TOCLK_ENA_MASK 0x4000 /* TOCLK_ENA */
+#define WM8400_TOCLK_ENA_SHIFT 14 /* TOCLK_ENA */
+#define WM8400_TOCLK_ENA_WIDTH 1 /* TOCLK_ENA */
+#define WM8400_OPCLKDIV_MASK 0x1E00 /* OPCLKDIV - [12:9] */
+#define WM8400_OPCLKDIV_SHIFT 9 /* OPCLKDIV - [12:9] */
+#define WM8400_OPCLKDIV_WIDTH 4 /* OPCLKDIV - [12:9] */
+#define WM8400_DCLKDIV_MASK 0x01C0 /* DCLKDIV - [8:6] */
+#define WM8400_DCLKDIV_SHIFT 6 /* DCLKDIV - [8:6] */
+#define WM8400_DCLKDIV_WIDTH 3 /* DCLKDIV - [8:6] */
+#define WM8400_BCLK_DIV_MASK 0x001E /* BCLK_DIV - [4:1] */
+#define WM8400_BCLK_DIV_SHIFT 1 /* BCLK_DIV - [4:1] */
+#define WM8400_BCLK_DIV_WIDTH 4 /* BCLK_DIV - [4:1] */
+
+/*
+ * R8 (0x08) - Clocking (2)
+ */
+#define WM8400_MCLK_SRC 0x8000 /* MCLK_SRC */
+#define WM8400_MCLK_SRC_MASK 0x8000 /* MCLK_SRC */
+#define WM8400_MCLK_SRC_SHIFT 15 /* MCLK_SRC */
+#define WM8400_MCLK_SRC_WIDTH 1 /* MCLK_SRC */
+#define WM8400_SYSCLK_SRC 0x4000 /* SYSCLK_SRC */
+#define WM8400_SYSCLK_SRC_MASK 0x4000 /* SYSCLK_SRC */
+#define WM8400_SYSCLK_SRC_SHIFT 14 /* SYSCLK_SRC */
+#define WM8400_SYSCLK_SRC_WIDTH 1 /* SYSCLK_SRC */
+#define WM8400_CLK_FORCE 0x2000 /* CLK_FORCE */
+#define WM8400_CLK_FORCE_MASK 0x2000 /* CLK_FORCE */
+#define WM8400_CLK_FORCE_SHIFT 13 /* CLK_FORCE */
+#define WM8400_CLK_FORCE_WIDTH 1 /* CLK_FORCE */
+#define WM8400_MCLK_DIV_MASK 0x1800 /* MCLK_DIV - [12:11] */
+#define WM8400_MCLK_DIV_SHIFT 11 /* MCLK_DIV - [12:11] */
+#define WM8400_MCLK_DIV_WIDTH 2 /* MCLK_DIV - [12:11] */
+#define WM8400_MCLK_INV 0x0400 /* MCLK_INV */
+#define WM8400_MCLK_INV_MASK 0x0400 /* MCLK_INV */
+#define WM8400_MCLK_INV_SHIFT 10 /* MCLK_INV */
+#define WM8400_MCLK_INV_WIDTH 1 /* MCLK_INV */
+#define WM8400_ADC_CLKDIV_MASK 0x00E0 /* ADC_CLKDIV - [7:5] */
+#define WM8400_ADC_CLKDIV_SHIFT 5 /* ADC_CLKDIV - [7:5] */
+#define WM8400_ADC_CLKDIV_WIDTH 3 /* ADC_CLKDIV - [7:5] */
+#define WM8400_DAC_CLKDIV_MASK 0x001C /* DAC_CLKDIV - [4:2] */
+#define WM8400_DAC_CLKDIV_SHIFT 2 /* DAC_CLKDIV - [4:2] */
+#define WM8400_DAC_CLKDIV_WIDTH 3 /* DAC_CLKDIV - [4:2] */
+
+/*
+ * R9 (0x09) - Audio Interface (3)
+ */
+#define WM8400_AIF_MSTR1 0x8000 /* AIF_MSTR1 */
+#define WM8400_AIF_MSTR1_MASK 0x8000 /* AIF_MSTR1 */
+#define WM8400_AIF_MSTR1_SHIFT 15 /* AIF_MSTR1 */
+#define WM8400_AIF_MSTR1_WIDTH 1 /* AIF_MSTR1 */
+#define WM8400_AIF_MSTR2 0x4000 /* AIF_MSTR2 */
+#define WM8400_AIF_MSTR2_MASK 0x4000 /* AIF_MSTR2 */
+#define WM8400_AIF_MSTR2_SHIFT 14 /* AIF_MSTR2 */
+#define WM8400_AIF_MSTR2_WIDTH 1 /* AIF_MSTR2 */
+#define WM8400_AIF_SEL 0x2000 /* AIF_SEL */
+#define WM8400_AIF_SEL_MASK 0x2000 /* AIF_SEL */
+#define WM8400_AIF_SEL_SHIFT 13 /* AIF_SEL */
+#define WM8400_AIF_SEL_WIDTH 1 /* AIF_SEL */
+#define WM8400_ADCLRC_DIR 0x0800 /* ADCLRC_DIR */
+#define WM8400_ADCLRC_DIR_MASK 0x0800 /* ADCLRC_DIR */
+#define WM8400_ADCLRC_DIR_SHIFT 11 /* ADCLRC_DIR */
+#define WM8400_ADCLRC_DIR_WIDTH 1 /* ADCLRC_DIR */
+#define WM8400_ADCLRC_RATE_MASK 0x07FF /* ADCLRC_RATE - [10:0] */
+#define WM8400_ADCLRC_RATE_SHIFT 0 /* ADCLRC_RATE - [10:0] */
+#define WM8400_ADCLRC_RATE_WIDTH 11 /* ADCLRC_RATE - [10:0] */
+
+/*
+ * R10 (0x0A) - Audio Interface (4)
+ */
+#define WM8400_ALRCGPIO1 0x8000 /* ALRCGPIO1 */
+#define WM8400_ALRCGPIO1_MASK 0x8000 /* ALRCGPIO1 */
+#define WM8400_ALRCGPIO1_SHIFT 15 /* ALRCGPIO1 */
+#define WM8400_ALRCGPIO1_WIDTH 1 /* ALRCGPIO1 */
+#define WM8400_ALRCBGPIO6 0x4000 /* ALRCBGPIO6 */
+#define WM8400_ALRCBGPIO6_MASK 0x4000 /* ALRCBGPIO6 */
+#define WM8400_ALRCBGPIO6_SHIFT 14 /* ALRCBGPIO6 */
+#define WM8400_ALRCBGPIO6_WIDTH 1 /* ALRCBGPIO6 */
+#define WM8400_AIF_TRIS 0x2000 /* AIF_TRIS */
+#define WM8400_AIF_TRIS_MASK 0x2000 /* AIF_TRIS */
+#define WM8400_AIF_TRIS_SHIFT 13 /* AIF_TRIS */
+#define WM8400_AIF_TRIS_WIDTH 1 /* AIF_TRIS */
+#define WM8400_DACLRC_DIR 0x0800 /* DACLRC_DIR */
+#define WM8400_DACLRC_DIR_MASK 0x0800 /* DACLRC_DIR */
+#define WM8400_DACLRC_DIR_SHIFT 11 /* DACLRC_DIR */
+#define WM8400_DACLRC_DIR_WIDTH 1 /* DACLRC_DIR */
+#define WM8400_DACLRC_RATE_MASK 0x07FF /* DACLRC_RATE - [10:0] */
+#define WM8400_DACLRC_RATE_SHIFT 0 /* DACLRC_RATE - [10:0] */
+#define WM8400_DACLRC_RATE_WIDTH 11 /* DACLRC_RATE - [10:0] */
+
+/*
+ * R11 (0x0B) - DAC CTRL
+ */
+#define WM8400_DAC_SDMCLK_RATE 0x2000 /* DAC_SDMCLK_RATE */
+#define WM8400_DAC_SDMCLK_RATE_MASK 0x2000 /* DAC_SDMCLK_RATE */
+#define WM8400_DAC_SDMCLK_RATE_SHIFT 13 /* DAC_SDMCLK_RATE */
+#define WM8400_DAC_SDMCLK_RATE_WIDTH 1 /* DAC_SDMCLK_RATE */
+#define WM8400_AIF_LRCLKRATE 0x0400 /* AIF_LRCLKRATE */
+#define WM8400_AIF_LRCLKRATE_MASK 0x0400 /* AIF_LRCLKRATE */
+#define WM8400_AIF_LRCLKRATE_SHIFT 10 /* AIF_LRCLKRATE */
+#define WM8400_AIF_LRCLKRATE_WIDTH 1 /* AIF_LRCLKRATE */
+#define WM8400_DAC_MONO 0x0200 /* DAC_MONO */
+#define WM8400_DAC_MONO_MASK 0x0200 /* DAC_MONO */
+#define WM8400_DAC_MONO_SHIFT 9 /* DAC_MONO */
+#define WM8400_DAC_MONO_WIDTH 1 /* DAC_MONO */
+#define WM8400_DAC_SB_FILT 0x0100 /* DAC_SB_FILT */
+#define WM8400_DAC_SB_FILT_MASK 0x0100 /* DAC_SB_FILT */
+#define WM8400_DAC_SB_FILT_SHIFT 8 /* DAC_SB_FILT */
+#define WM8400_DAC_SB_FILT_WIDTH 1 /* DAC_SB_FILT */
+#define WM8400_DAC_MUTERATE 0x0080 /* DAC_MUTERATE */
+#define WM8400_DAC_MUTERATE_MASK 0x0080 /* DAC_MUTERATE */
+#define WM8400_DAC_MUTERATE_SHIFT 7 /* DAC_MUTERATE */
+#define WM8400_DAC_MUTERATE_WIDTH 1 /* DAC_MUTERATE */
+#define WM8400_DAC_MUTEMODE 0x0040 /* DAC_MUTEMODE */
+#define WM8400_DAC_MUTEMODE_MASK 0x0040 /* DAC_MUTEMODE */
+#define WM8400_DAC_MUTEMODE_SHIFT 6 /* DAC_MUTEMODE */
+#define WM8400_DAC_MUTEMODE_WIDTH 1 /* DAC_MUTEMODE */
+#define WM8400_DEEMP_MASK 0x0030 /* DEEMP - [5:4] */
+#define WM8400_DEEMP_SHIFT 4 /* DEEMP - [5:4] */
+#define WM8400_DEEMP_WIDTH 2 /* DEEMP - [5:4] */
+#define WM8400_DAC_MUTE 0x0004 /* DAC_MUTE */
+#define WM8400_DAC_MUTE_MASK 0x0004 /* DAC_MUTE */
+#define WM8400_DAC_MUTE_SHIFT 2 /* DAC_MUTE */
+#define WM8400_DAC_MUTE_WIDTH 1 /* DAC_MUTE */
+#define WM8400_DACL_DATINV 0x0002 /* DACL_DATINV */
+#define WM8400_DACL_DATINV_MASK 0x0002 /* DACL_DATINV */
+#define WM8400_DACL_DATINV_SHIFT 1 /* DACL_DATINV */
+#define WM8400_DACL_DATINV_WIDTH 1 /* DACL_DATINV */
+#define WM8400_DACR_DATINV 0x0001 /* DACR_DATINV */
+#define WM8400_DACR_DATINV_MASK 0x0001 /* DACR_DATINV */
+#define WM8400_DACR_DATINV_SHIFT 0 /* DACR_DATINV */
+#define WM8400_DACR_DATINV_WIDTH 1 /* DACR_DATINV */
+
+/*
+ * R12 (0x0C) - Left DAC Digital Volume
+ */
+#define WM8400_DAC_VU 0x0100 /* DAC_VU */
+#define WM8400_DAC_VU_MASK 0x0100 /* DAC_VU */
+#define WM8400_DAC_VU_SHIFT 8 /* DAC_VU */
+#define WM8400_DAC_VU_WIDTH 1 /* DAC_VU */
+#define WM8400_DACL_VOL_MASK 0x00FF /* DACL_VOL - [7:0] */
+#define WM8400_DACL_VOL_SHIFT 0 /* DACL_VOL - [7:0] */
+#define WM8400_DACL_VOL_WIDTH 8 /* DACL_VOL - [7:0] */
+
+/*
+ * R13 (0x0D) - Right DAC Digital Volume
+ */
+#define WM8400_DAC_VU 0x0100 /* DAC_VU */
+#define WM8400_DAC_VU_MASK 0x0100 /* DAC_VU */
+#define WM8400_DAC_VU_SHIFT 8 /* DAC_VU */
+#define WM8400_DAC_VU_WIDTH 1 /* DAC_VU */
+#define WM8400_DACR_VOL_MASK 0x00FF /* DACR_VOL - [7:0] */
+#define WM8400_DACR_VOL_SHIFT 0 /* DACR_VOL - [7:0] */
+#define WM8400_DACR_VOL_WIDTH 8 /* DACR_VOL - [7:0] */
+
+/*
+ * R14 (0x0E) - Digital Side Tone
+ */
+#define WM8400_ADCL_DAC_SVOL_MASK 0x1E00 /* ADCL_DAC_SVOL - [12:9] */
+#define WM8400_ADCL_DAC_SVOL_SHIFT 9 /* ADCL_DAC_SVOL - [12:9] */
+#define WM8400_ADCL_DAC_SVOL_WIDTH 4 /* ADCL_DAC_SVOL - [12:9] */
+#define WM8400_ADCR_DAC_SVOL_MASK 0x01E0 /* ADCR_DAC_SVOL - [8:5] */
+#define WM8400_ADCR_DAC_SVOL_SHIFT 5 /* ADCR_DAC_SVOL - [8:5] */
+#define WM8400_ADCR_DAC_SVOL_WIDTH 4 /* ADCR_DAC_SVOL - [8:5] */
+#define WM8400_ADC_TO_DACL_MASK 0x000C /* ADC_TO_DACL - [3:2] */
+#define WM8400_ADC_TO_DACL_SHIFT 2 /* ADC_TO_DACL - [3:2] */
+#define WM8400_ADC_TO_DACL_WIDTH 2 /* ADC_TO_DACL - [3:2] */
+#define WM8400_ADC_TO_DACR_MASK 0x0003 /* ADC_TO_DACR - [1:0] */
+#define WM8400_ADC_TO_DACR_SHIFT 0 /* ADC_TO_DACR - [1:0] */
+#define WM8400_ADC_TO_DACR_WIDTH 2 /* ADC_TO_DACR - [1:0] */
+
+/*
+ * R15 (0x0F) - ADC CTRL
+ */
+#define WM8400_ADC_HPF_ENA 0x0100 /* ADC_HPF_ENA */
+#define WM8400_ADC_HPF_ENA_MASK 0x0100 /* ADC_HPF_ENA */
+#define WM8400_ADC_HPF_ENA_SHIFT 8 /* ADC_HPF_ENA */
+#define WM8400_ADC_HPF_ENA_WIDTH 1 /* ADC_HPF_ENA */
+#define WM8400_ADC_HPF_CUT_MASK 0x0060 /* ADC_HPF_CUT - [6:5] */
+#define WM8400_ADC_HPF_CUT_SHIFT 5 /* ADC_HPF_CUT - [6:5] */
+#define WM8400_ADC_HPF_CUT_WIDTH 2 /* ADC_HPF_CUT - [6:5] */
+#define WM8400_ADCL_DATINV 0x0002 /* ADCL_DATINV */
+#define WM8400_ADCL_DATINV_MASK 0x0002 /* ADCL_DATINV */
+#define WM8400_ADCL_DATINV_SHIFT 1 /* ADCL_DATINV */
+#define WM8400_ADCL_DATINV_WIDTH 1 /* ADCL_DATINV */
+#define WM8400_ADCR_DATINV 0x0001 /* ADCR_DATINV */
+#define WM8400_ADCR_DATINV_MASK 0x0001 /* ADCR_DATINV */
+#define WM8400_ADCR_DATINV_SHIFT 0 /* ADCR_DATINV */
+#define WM8400_ADCR_DATINV_WIDTH 1 /* ADCR_DATINV */
+
+/*
+ * R16 (0x10) - Left ADC Digital Volume
+ */
+#define WM8400_ADC_VU 0x0100 /* ADC_VU */
+#define WM8400_ADC_VU_MASK 0x0100 /* ADC_VU */
+#define WM8400_ADC_VU_SHIFT 8 /* ADC_VU */
+#define WM8400_ADC_VU_WIDTH 1 /* ADC_VU */
+#define WM8400_ADCL_VOL_MASK 0x00FF /* ADCL_VOL - [7:0] */
+#define WM8400_ADCL_VOL_SHIFT 0 /* ADCL_VOL - [7:0] */
+#define WM8400_ADCL_VOL_WIDTH 8 /* ADCL_VOL - [7:0] */
+
+/*
+ * R17 (0x11) - Right ADC Digital Volume
+ */
+#define WM8400_ADC_VU 0x0100 /* ADC_VU */
+#define WM8400_ADC_VU_MASK 0x0100 /* ADC_VU */
+#define WM8400_ADC_VU_SHIFT 8 /* ADC_VU */
+#define WM8400_ADC_VU_WIDTH 1 /* ADC_VU */
+#define WM8400_ADCR_VOL_MASK 0x00FF /* ADCR_VOL - [7:0] */
+#define WM8400_ADCR_VOL_SHIFT 0 /* ADCR_VOL - [7:0] */
+#define WM8400_ADCR_VOL_WIDTH 8 /* ADCR_VOL - [7:0] */
+
+/*
+ * R24 (0x18) - Left Line Input 1&2 Volume
+ */
+#define WM8400_IPVU 0x0100 /* IPVU */
+#define WM8400_IPVU_MASK 0x0100 /* IPVU */
+#define WM8400_IPVU_SHIFT 8 /* IPVU */
+#define WM8400_IPVU_WIDTH 1 /* IPVU */
+#define WM8400_LI12MUTE 0x0080 /* LI12MUTE */
+#define WM8400_LI12MUTE_MASK 0x0080 /* LI12MUTE */
+#define WM8400_LI12MUTE_SHIFT 7 /* LI12MUTE */
+#define WM8400_LI12MUTE_WIDTH 1 /* LI12MUTE */
+#define WM8400_LI12ZC 0x0040 /* LI12ZC */
+#define WM8400_LI12ZC_MASK 0x0040 /* LI12ZC */
+#define WM8400_LI12ZC_SHIFT 6 /* LI12ZC */
+#define WM8400_LI12ZC_WIDTH 1 /* LI12ZC */
+#define WM8400_LIN12VOL_MASK 0x001F /* LIN12VOL - [4:0] */
+#define WM8400_LIN12VOL_SHIFT 0 /* LIN12VOL - [4:0] */
+#define WM8400_LIN12VOL_WIDTH 5 /* LIN12VOL - [4:0] */
+
+/*
+ * R25 (0x19) - Left Line Input 3&4 Volume
+ */
+#define WM8400_IPVU 0x0100 /* IPVU */
+#define WM8400_IPVU_MASK 0x0100 /* IPVU */
+#define WM8400_IPVU_SHIFT 8 /* IPVU */
+#define WM8400_IPVU_WIDTH 1 /* IPVU */
+#define WM8400_LI34MUTE 0x0080 /* LI34MUTE */
+#define WM8400_LI34MUTE_MASK 0x0080 /* LI34MUTE */
+#define WM8400_LI34MUTE_SHIFT 7 /* LI34MUTE */
+#define WM8400_LI34MUTE_WIDTH 1 /* LI34MUTE */
+#define WM8400_LI34ZC 0x0040 /* LI34ZC */
+#define WM8400_LI34ZC_MASK 0x0040 /* LI34ZC */
+#define WM8400_LI34ZC_SHIFT 6 /* LI34ZC */
+#define WM8400_LI34ZC_WIDTH 1 /* LI34ZC */
+#define WM8400_LIN34VOL_MASK 0x001F /* LIN34VOL - [4:0] */
+#define WM8400_LIN34VOL_SHIFT 0 /* LIN34VOL - [4:0] */
+#define WM8400_LIN34VOL_WIDTH 5 /* LIN34VOL - [4:0] */
+
+/*
+ * R26 (0x1A) - Right Line Input 1&2 Volume
+ */
+#define WM8400_IPVU 0x0100 /* IPVU */
+#define WM8400_IPVU_MASK 0x0100 /* IPVU */
+#define WM8400_IPVU_SHIFT 8 /* IPVU */
+#define WM8400_IPVU_WIDTH 1 /* IPVU */
+#define WM8400_RI12MUTE 0x0080 /* RI12MUTE */
+#define WM8400_RI12MUTE_MASK 0x0080 /* RI12MUTE */
+#define WM8400_RI12MUTE_SHIFT 7 /* RI12MUTE */
+#define WM8400_RI12MUTE_WIDTH 1 /* RI12MUTE */
+#define WM8400_RI12ZC 0x0040 /* RI12ZC */
+#define WM8400_RI12ZC_MASK 0x0040 /* RI12ZC */
+#define WM8400_RI12ZC_SHIFT 6 /* RI12ZC */
+#define WM8400_RI12ZC_WIDTH 1 /* RI12ZC */
+#define WM8400_RIN12VOL_MASK 0x001F /* RIN12VOL - [4:0] */
+#define WM8400_RIN12VOL_SHIFT 0 /* RIN12VOL - [4:0] */
+#define WM8400_RIN12VOL_WIDTH 5 /* RIN12VOL - [4:0] */
+
+/*
+ * R27 (0x1B) - Right Line Input 3&4 Volume
+ */
+#define WM8400_IPVU 0x0100 /* IPVU */
+#define WM8400_IPVU_MASK 0x0100 /* IPVU */
+#define WM8400_IPVU_SHIFT 8 /* IPVU */
+#define WM8400_IPVU_WIDTH 1 /* IPVU */
+#define WM8400_RI34MUTE 0x0080 /* RI34MUTE */
+#define WM8400_RI34MUTE_MASK 0x0080 /* RI34MUTE */
+#define WM8400_RI34MUTE_SHIFT 7 /* RI34MUTE */
+#define WM8400_RI34MUTE_WIDTH 1 /* RI34MUTE */
+#define WM8400_RI34ZC 0x0040 /* RI34ZC */
+#define WM8400_RI34ZC_MASK 0x0040 /* RI34ZC */
+#define WM8400_RI34ZC_SHIFT 6 /* RI34ZC */
+#define WM8400_RI34ZC_WIDTH 1 /* RI34ZC */
+#define WM8400_RIN34VOL_MASK 0x001F /* RIN34VOL - [4:0] */
+#define WM8400_RIN34VOL_SHIFT 0 /* RIN34VOL - [4:0] */
+#define WM8400_RIN34VOL_WIDTH 5 /* RIN34VOL - [4:0] */
+
+/*
+ * R28 (0x1C) - Left Output Volume
+ */
+#define WM8400_OPVU 0x0100 /* OPVU */
+#define WM8400_OPVU_MASK 0x0100 /* OPVU */
+#define WM8400_OPVU_SHIFT 8 /* OPVU */
+#define WM8400_OPVU_WIDTH 1 /* OPVU */
+#define WM8400_LOZC 0x0080 /* LOZC */
+#define WM8400_LOZC_MASK 0x0080 /* LOZC */
+#define WM8400_LOZC_SHIFT 7 /* LOZC */
+#define WM8400_LOZC_WIDTH 1 /* LOZC */
+#define WM8400_LOUTVOL_MASK 0x007F /* LOUTVOL - [6:0] */
+#define WM8400_LOUTVOL_SHIFT 0 /* LOUTVOL - [6:0] */
+#define WM8400_LOUTVOL_WIDTH 7 /* LOUTVOL - [6:0] */
+
+/*
+ * R29 (0x1D) - Right Output Volume
+ */
+#define WM8400_OPVU 0x0100 /* OPVU */
+#define WM8400_OPVU_MASK 0x0100 /* OPVU */
+#define WM8400_OPVU_SHIFT 8 /* OPVU */
+#define WM8400_OPVU_WIDTH 1 /* OPVU */
+#define WM8400_ROZC 0x0080 /* ROZC */
+#define WM8400_ROZC_MASK 0x0080 /* ROZC */
+#define WM8400_ROZC_SHIFT 7 /* ROZC */
+#define WM8400_ROZC_WIDTH 1 /* ROZC */
+#define WM8400_ROUTVOL_MASK 0x007F /* ROUTVOL - [6:0] */
+#define WM8400_ROUTVOL_SHIFT 0 /* ROUTVOL - [6:0] */
+#define WM8400_ROUTVOL_WIDTH 7 /* ROUTVOL - [6:0] */
+
+/*
+ * R30 (0x1E) - Line Outputs Volume
+ */
+#define WM8400_LONMUTE 0x0040 /* LONMUTE */
+#define WM8400_LONMUTE_MASK 0x0040 /* LONMUTE */
+#define WM8400_LONMUTE_SHIFT 6 /* LONMUTE */
+#define WM8400_LONMUTE_WIDTH 1 /* LONMUTE */
+#define WM8400_LOPMUTE 0x0020 /* LOPMUTE */
+#define WM8400_LOPMUTE_MASK 0x0020 /* LOPMUTE */
+#define WM8400_LOPMUTE_SHIFT 5 /* LOPMUTE */
+#define WM8400_LOPMUTE_WIDTH 1 /* LOPMUTE */
+#define WM8400_LOATTN 0x0010 /* LOATTN */
+#define WM8400_LOATTN_MASK 0x0010 /* LOATTN */
+#define WM8400_LOATTN_SHIFT 4 /* LOATTN */
+#define WM8400_LOATTN_WIDTH 1 /* LOATTN */
+#define WM8400_RONMUTE 0x0004 /* RONMUTE */
+#define WM8400_RONMUTE_MASK 0x0004 /* RONMUTE */
+#define WM8400_RONMUTE_SHIFT 2 /* RONMUTE */
+#define WM8400_RONMUTE_WIDTH 1 /* RONMUTE */
+#define WM8400_ROPMUTE 0x0002 /* ROPMUTE */
+#define WM8400_ROPMUTE_MASK 0x0002 /* ROPMUTE */
+#define WM8400_ROPMUTE_SHIFT 1 /* ROPMUTE */
+#define WM8400_ROPMUTE_WIDTH 1 /* ROPMUTE */
+#define WM8400_ROATTN 0x0001 /* ROATTN */
+#define WM8400_ROATTN_MASK 0x0001 /* ROATTN */
+#define WM8400_ROATTN_SHIFT 0 /* ROATTN */
+#define WM8400_ROATTN_WIDTH 1 /* ROATTN */
+
+/*
+ * R31 (0x1F) - Out3/4 Volume
+ */
+#define WM8400_OUT3MUTE 0x0020 /* OUT3MUTE */
+#define WM8400_OUT3MUTE_MASK 0x0020 /* OUT3MUTE */
+#define WM8400_OUT3MUTE_SHIFT 5 /* OUT3MUTE */
+#define WM8400_OUT3MUTE_WIDTH 1 /* OUT3MUTE */
+#define WM8400_OUT3ATTN 0x0010 /* OUT3ATTN */
+#define WM8400_OUT3ATTN_MASK 0x0010 /* OUT3ATTN */
+#define WM8400_OUT3ATTN_SHIFT 4 /* OUT3ATTN */
+#define WM8400_OUT3ATTN_WIDTH 1 /* OUT3ATTN */
+#define WM8400_OUT4MUTE 0x0002 /* OUT4MUTE */
+#define WM8400_OUT4MUTE_MASK 0x0002 /* OUT4MUTE */
+#define WM8400_OUT4MUTE_SHIFT 1 /* OUT4MUTE */
+#define WM8400_OUT4MUTE_WIDTH 1 /* OUT4MUTE */
+#define WM8400_OUT4ATTN 0x0001 /* OUT4ATTN */
+#define WM8400_OUT4ATTN_MASK 0x0001 /* OUT4ATTN */
+#define WM8400_OUT4ATTN_SHIFT 0 /* OUT4ATTN */
+#define WM8400_OUT4ATTN_WIDTH 1 /* OUT4ATTN */
+
+/*
+ * R32 (0x20) - Left OPGA Volume
+ */
+#define WM8400_OPVU 0x0100 /* OPVU */
+#define WM8400_OPVU_MASK 0x0100 /* OPVU */
+#define WM8400_OPVU_SHIFT 8 /* OPVU */
+#define WM8400_OPVU_WIDTH 1 /* OPVU */
+#define WM8400_LOPGAZC 0x0080 /* LOPGAZC */
+#define WM8400_LOPGAZC_MASK 0x0080 /* LOPGAZC */
+#define WM8400_LOPGAZC_SHIFT 7 /* LOPGAZC */
+#define WM8400_LOPGAZC_WIDTH 1 /* LOPGAZC */
+#define WM8400_LOPGAVOL_MASK 0x007F /* LOPGAVOL - [6:0] */
+#define WM8400_LOPGAVOL_SHIFT 0 /* LOPGAVOL - [6:0] */
+#define WM8400_LOPGAVOL_WIDTH 7 /* LOPGAVOL - [6:0] */
+
+/*
+ * R33 (0x21) - Right OPGA Volume
+ */
+#define WM8400_OPVU 0x0100 /* OPVU */
+#define WM8400_OPVU_MASK 0x0100 /* OPVU */
+#define WM8400_OPVU_SHIFT 8 /* OPVU */
+#define WM8400_OPVU_WIDTH 1 /* OPVU */
+#define WM8400_ROPGAZC 0x0080 /* ROPGAZC */
+#define WM8400_ROPGAZC_MASK 0x0080 /* ROPGAZC */
+#define WM8400_ROPGAZC_SHIFT 7 /* ROPGAZC */
+#define WM8400_ROPGAZC_WIDTH 1 /* ROPGAZC */
+#define WM8400_ROPGAVOL_MASK 0x007F /* ROPGAVOL - [6:0] */
+#define WM8400_ROPGAVOL_SHIFT 0 /* ROPGAVOL - [6:0] */
+#define WM8400_ROPGAVOL_WIDTH 7 /* ROPGAVOL - [6:0] */
+
+/*
+ * R34 (0x22) - Speaker Volume
+ */
+#define WM8400_SPKATTN_MASK 0x0003 /* SPKATTN - [1:0] */
+#define WM8400_SPKATTN_SHIFT 0 /* SPKATTN - [1:0] */
+#define WM8400_SPKATTN_WIDTH 2 /* SPKATTN - [1:0] */
+
+/*
+ * R35 (0x23) - ClassD1
+ */
+#define WM8400_CDMODE 0x0100 /* CDMODE */
+#define WM8400_CDMODE_MASK 0x0100 /* CDMODE */
+#define WM8400_CDMODE_SHIFT 8 /* CDMODE */
+#define WM8400_CDMODE_WIDTH 1 /* CDMODE */
+#define WM8400_CLASSD_CLK_SEL 0x0080 /* CLASSD_CLK_SEL */
+#define WM8400_CLASSD_CLK_SEL_MASK 0x0080 /* CLASSD_CLK_SEL */
+#define WM8400_CLASSD_CLK_SEL_SHIFT 7 /* CLASSD_CLK_SEL */
+#define WM8400_CLASSD_CLK_SEL_WIDTH 1 /* CLASSD_CLK_SEL */
+#define WM8400_CD_SRCTRL 0x0040 /* CD_SRCTRL */
+#define WM8400_CD_SRCTRL_MASK 0x0040 /* CD_SRCTRL */
+#define WM8400_CD_SRCTRL_SHIFT 6 /* CD_SRCTRL */
+#define WM8400_CD_SRCTRL_WIDTH 1 /* CD_SRCTRL */
+#define WM8400_SPKNOPOP 0x0020 /* SPKNOPOP */
+#define WM8400_SPKNOPOP_MASK 0x0020 /* SPKNOPOP */
+#define WM8400_SPKNOPOP_SHIFT 5 /* SPKNOPOP */
+#define WM8400_SPKNOPOP_WIDTH 1 /* SPKNOPOP */
+#define WM8400_DBLERATE 0x0010 /* DBLERATE */
+#define WM8400_DBLERATE_MASK 0x0010 /* DBLERATE */
+#define WM8400_DBLERATE_SHIFT 4 /* DBLERATE */
+#define WM8400_DBLERATE_WIDTH 1 /* DBLERATE */
+#define WM8400_LOOPTEST 0x0008 /* LOOPTEST */
+#define WM8400_LOOPTEST_MASK 0x0008 /* LOOPTEST */
+#define WM8400_LOOPTEST_SHIFT 3 /* LOOPTEST */
+#define WM8400_LOOPTEST_WIDTH 1 /* LOOPTEST */
+#define WM8400_HALFABBIAS 0x0004 /* HALFABBIAS */
+#define WM8400_HALFABBIAS_MASK 0x0004 /* HALFABBIAS */
+#define WM8400_HALFABBIAS_SHIFT 2 /* HALFABBIAS */
+#define WM8400_HALFABBIAS_WIDTH 1 /* HALFABBIAS */
+#define WM8400_TRIDEL_MASK 0x0003 /* TRIDEL - [1:0] */
+#define WM8400_TRIDEL_SHIFT 0 /* TRIDEL - [1:0] */
+#define WM8400_TRIDEL_WIDTH 2 /* TRIDEL - [1:0] */
+
+/*
+ * R37 (0x25) - ClassD3
+ */
+#define WM8400_DCGAIN_MASK 0x0038 /* DCGAIN - [5:3] */
+#define WM8400_DCGAIN_SHIFT 3 /* DCGAIN - [5:3] */
+#define WM8400_DCGAIN_WIDTH 3 /* DCGAIN - [5:3] */
+#define WM8400_ACGAIN_MASK 0x0007 /* ACGAIN - [2:0] */
+#define WM8400_ACGAIN_SHIFT 0 /* ACGAIN - [2:0] */
+#define WM8400_ACGAIN_WIDTH 3 /* ACGAIN - [2:0] */
+
+/*
+ * R39 (0x27) - Input Mixer1
+ */
+#define WM8400_AINLMODE_MASK 0x000C /* AINLMODE - [3:2] */
+#define WM8400_AINLMODE_SHIFT 2 /* AINLMODE - [3:2] */
+#define WM8400_AINLMODE_WIDTH 2 /* AINLMODE - [3:2] */
+#define WM8400_AINRMODE_MASK 0x0003 /* AINRMODE - [1:0] */
+#define WM8400_AINRMODE_SHIFT 0 /* AINRMODE - [1:0] */
+#define WM8400_AINRMODE_WIDTH 2 /* AINRMODE - [1:0] */
+
+/*
+ * R40 (0x28) - Input Mixer2
+ */
+#define WM8400_LMP4 0x0080 /* LMP4 */
+#define WM8400_LMP4_MASK 0x0080 /* LMP4 */
+#define WM8400_LMP4_SHIFT 7 /* LMP4 */
+#define WM8400_LMP4_WIDTH 1 /* LMP4 */
+#define WM8400_LMN3 0x0040 /* LMN3 */
+#define WM8400_LMN3_MASK 0x0040 /* LMN3 */
+#define WM8400_LMN3_SHIFT 6 /* LMN3 */
+#define WM8400_LMN3_WIDTH 1 /* LMN3 */
+#define WM8400_LMP2 0x0020 /* LMP2 */
+#define WM8400_LMP2_MASK 0x0020 /* LMP2 */
+#define WM8400_LMP2_SHIFT 5 /* LMP2 */
+#define WM8400_LMP2_WIDTH 1 /* LMP2 */
+#define WM8400_LMN1 0x0010 /* LMN1 */
+#define WM8400_LMN1_MASK 0x0010 /* LMN1 */
+#define WM8400_LMN1_SHIFT 4 /* LMN1 */
+#define WM8400_LMN1_WIDTH 1 /* LMN1 */
+#define WM8400_RMP4 0x0008 /* RMP4 */
+#define WM8400_RMP4_MASK 0x0008 /* RMP4 */
+#define WM8400_RMP4_SHIFT 3 /* RMP4 */
+#define WM8400_RMP4_WIDTH 1 /* RMP4 */
+#define WM8400_RMN3 0x0004 /* RMN3 */
+#define WM8400_RMN3_MASK 0x0004 /* RMN3 */
+#define WM8400_RMN3_SHIFT 2 /* RMN3 */
+#define WM8400_RMN3_WIDTH 1 /* RMN3 */
+#define WM8400_RMP2 0x0002 /* RMP2 */
+#define WM8400_RMP2_MASK 0x0002 /* RMP2 */
+#define WM8400_RMP2_SHIFT 1 /* RMP2 */
+#define WM8400_RMP2_WIDTH 1 /* RMP2 */
+#define WM8400_RMN1 0x0001 /* RMN1 */
+#define WM8400_RMN1_MASK 0x0001 /* RMN1 */
+#define WM8400_RMN1_SHIFT 0 /* RMN1 */
+#define WM8400_RMN1_WIDTH 1 /* RMN1 */
+
+/*
+ * R41 (0x29) - Input Mixer3
+ */
+#define WM8400_L34MNB 0x0100 /* L34MNB */
+#define WM8400_L34MNB_MASK 0x0100 /* L34MNB */
+#define WM8400_L34MNB_SHIFT 8 /* L34MNB */
+#define WM8400_L34MNB_WIDTH 1 /* L34MNB */
+#define WM8400_L34MNBST 0x0080 /* L34MNBST */
+#define WM8400_L34MNBST_MASK 0x0080 /* L34MNBST */
+#define WM8400_L34MNBST_SHIFT 7 /* L34MNBST */
+#define WM8400_L34MNBST_WIDTH 1 /* L34MNBST */
+#define WM8400_L12MNB 0x0020 /* L12MNB */
+#define WM8400_L12MNB_MASK 0x0020 /* L12MNB */
+#define WM8400_L12MNB_SHIFT 5 /* L12MNB */
+#define WM8400_L12MNB_WIDTH 1 /* L12MNB */
+#define WM8400_L12MNBST 0x0010 /* L12MNBST */
+#define WM8400_L12MNBST_MASK 0x0010 /* L12MNBST */
+#define WM8400_L12MNBST_SHIFT 4 /* L12MNBST */
+#define WM8400_L12MNBST_WIDTH 1 /* L12MNBST */
+#define WM8400_LDBVOL_MASK 0x0007 /* LDBVOL - [2:0] */
+#define WM8400_LDBVOL_SHIFT 0 /* LDBVOL - [2:0] */
+#define WM8400_LDBVOL_WIDTH 3 /* LDBVOL - [2:0] */
+
+/*
+ * R42 (0x2A) - Input Mixer4
+ */
+#define WM8400_R34MNB 0x0100 /* R34MNB */
+#define WM8400_R34MNB_MASK 0x0100 /* R34MNB */
+#define WM8400_R34MNB_SHIFT 8 /* R34MNB */
+#define WM8400_R34MNB_WIDTH 1 /* R34MNB */
+#define WM8400_R34MNBST 0x0080 /* R34MNBST */
+#define WM8400_R34MNBST_MASK 0x0080 /* R34MNBST */
+#define WM8400_R34MNBST_SHIFT 7 /* R34MNBST */
+#define WM8400_R34MNBST_WIDTH 1 /* R34MNBST */
+#define WM8400_R12MNB 0x0020 /* R12MNB */
+#define WM8400_R12MNB_MASK 0x0020 /* R12MNB */
+#define WM8400_R12MNB_SHIFT 5 /* R12MNB */
+#define WM8400_R12MNB_WIDTH 1 /* R12MNB */
+#define WM8400_R12MNBST 0x0010 /* R12MNBST */
+#define WM8400_R12MNBST_MASK 0x0010 /* R12MNBST */
+#define WM8400_R12MNBST_SHIFT 4 /* R12MNBST */
+#define WM8400_R12MNBST_WIDTH 1 /* R12MNBST */
+#define WM8400_RDBVOL_MASK 0x0007 /* RDBVOL - [2:0] */
+#define WM8400_RDBVOL_SHIFT 0 /* RDBVOL - [2:0] */
+#define WM8400_RDBVOL_WIDTH 3 /* RDBVOL - [2:0] */
+
+/*
+ * R43 (0x2B) - Input Mixer5
+ */
+#define WM8400_LI2BVOL_MASK 0x01C0 /* LI2BVOL - [8:6] */
+#define WM8400_LI2BVOL_SHIFT 6 /* LI2BVOL - [8:6] */
+#define WM8400_LI2BVOL_WIDTH 3 /* LI2BVOL - [8:6] */
+#define WM8400_LR4BVOL_MASK 0x0038 /* LR4BVOL - [5:3] */
+#define WM8400_LR4BVOL_SHIFT 3 /* LR4BVOL - [5:3] */
+#define WM8400_LR4BVOL_WIDTH 3 /* LR4BVOL - [5:3] */
+#define WM8400_LL4BVOL_MASK 0x0007 /* LL4BVOL - [2:0] */
+#define WM8400_LL4BVOL_SHIFT 0 /* LL4BVOL - [2:0] */
+#define WM8400_LL4BVOL_WIDTH 3 /* LL4BVOL - [2:0] */
+
+/*
+ * R44 (0x2C) - Input Mixer6
+ */
+#define WM8400_RI2BVOL_MASK 0x01C0 /* RI2BVOL - [8:6] */
+#define WM8400_RI2BVOL_SHIFT 6 /* RI2BVOL - [8:6] */
+#define WM8400_RI2BVOL_WIDTH 3 /* RI2BVOL - [8:6] */
+#define WM8400_RL4BVOL_MASK 0x0038 /* RL4BVOL - [5:3] */
+#define WM8400_RL4BVOL_SHIFT 3 /* RL4BVOL - [5:3] */
+#define WM8400_RL4BVOL_WIDTH 3 /* RL4BVOL - [5:3] */
+#define WM8400_RR4BVOL_MASK 0x0007 /* RR4BVOL - [2:0] */
+#define WM8400_RR4BVOL_SHIFT 0 /* RR4BVOL - [2:0] */
+#define WM8400_RR4BVOL_WIDTH 3 /* RR4BVOL - [2:0] */
+
+/*
+ * R45 (0x2D) - Output Mixer1
+ */
+#define WM8400_LRBLO 0x0080 /* LRBLO */
+#define WM8400_LRBLO_MASK 0x0080 /* LRBLO */
+#define WM8400_LRBLO_SHIFT 7 /* LRBLO */
+#define WM8400_LRBLO_WIDTH 1 /* LRBLO */
+#define WM8400_LLBLO 0x0040 /* LLBLO */
+#define WM8400_LLBLO_MASK 0x0040 /* LLBLO */
+#define WM8400_LLBLO_SHIFT 6 /* LLBLO */
+#define WM8400_LLBLO_WIDTH 1 /* LLBLO */
+#define WM8400_LRI3LO 0x0020 /* LRI3LO */
+#define WM8400_LRI3LO_MASK 0x0020 /* LRI3LO */
+#define WM8400_LRI3LO_SHIFT 5 /* LRI3LO */
+#define WM8400_LRI3LO_WIDTH 1 /* LRI3LO */
+#define WM8400_LLI3LO 0x0010 /* LLI3LO */
+#define WM8400_LLI3LO_MASK 0x0010 /* LLI3LO */
+#define WM8400_LLI3LO_SHIFT 4 /* LLI3LO */
+#define WM8400_LLI3LO_WIDTH 1 /* LLI3LO */
+#define WM8400_LR12LO 0x0008 /* LR12LO */
+#define WM8400_LR12LO_MASK 0x0008 /* LR12LO */
+#define WM8400_LR12LO_SHIFT 3 /* LR12LO */
+#define WM8400_LR12LO_WIDTH 1 /* LR12LO */
+#define WM8400_LL12LO 0x0004 /* LL12LO */
+#define WM8400_LL12LO_MASK 0x0004 /* LL12LO */
+#define WM8400_LL12LO_SHIFT 2 /* LL12LO */
+#define WM8400_LL12LO_WIDTH 1 /* LL12LO */
+#define WM8400_LDLO 0x0001 /* LDLO */
+#define WM8400_LDLO_MASK 0x0001 /* LDLO */
+#define WM8400_LDLO_SHIFT 0 /* LDLO */
+#define WM8400_LDLO_WIDTH 1 /* LDLO */
+
+/*
+ * R46 (0x2E) - Output Mixer2
+ */
+#define WM8400_RLBRO 0x0080 /* RLBRO */
+#define WM8400_RLBRO_MASK 0x0080 /* RLBRO */
+#define WM8400_RLBRO_SHIFT 7 /* RLBRO */
+#define WM8400_RLBRO_WIDTH 1 /* RLBRO */
+#define WM8400_RRBRO 0x0040 /* RRBRO */
+#define WM8400_RRBRO_MASK 0x0040 /* RRBRO */
+#define WM8400_RRBRO_SHIFT 6 /* RRBRO */
+#define WM8400_RRBRO_WIDTH 1 /* RRBRO */
+#define WM8400_RLI3RO 0x0020 /* RLI3RO */
+#define WM8400_RLI3RO_MASK 0x0020 /* RLI3RO */
+#define WM8400_RLI3RO_SHIFT 5 /* RLI3RO */
+#define WM8400_RLI3RO_WIDTH 1 /* RLI3RO */
+#define WM8400_RRI3RO 0x0010 /* RRI3RO */
+#define WM8400_RRI3RO_MASK 0x0010 /* RRI3RO */
+#define WM8400_RRI3RO_SHIFT 4 /* RRI3RO */
+#define WM8400_RRI3RO_WIDTH 1 /* RRI3RO */
+#define WM8400_RL12RO 0x0008 /* RL12RO */
+#define WM8400_RL12RO_MASK 0x0008 /* RL12RO */
+#define WM8400_RL12RO_SHIFT 3 /* RL12RO */
+#define WM8400_RL12RO_WIDTH 1 /* RL12RO */
+#define WM8400_RR12RO 0x0004 /* RR12RO */
+#define WM8400_RR12RO_MASK 0x0004 /* RR12RO */
+#define WM8400_RR12RO_SHIFT 2 /* RR12RO */
+#define WM8400_RR12RO_WIDTH 1 /* RR12RO */
+#define WM8400_RDRO 0x0001 /* RDRO */
+#define WM8400_RDRO_MASK 0x0001 /* RDRO */
+#define WM8400_RDRO_SHIFT 0 /* RDRO */
+#define WM8400_RDRO_WIDTH 1 /* RDRO */
+
+/*
+ * R47 (0x2F) - Output Mixer3
+ */
+#define WM8400_LLI3LOVOL_MASK 0x01C0 /* LLI3LOVOL - [8:6] */
+#define WM8400_LLI3LOVOL_SHIFT 6 /* LLI3LOVOL - [8:6] */
+#define WM8400_LLI3LOVOL_WIDTH 3 /* LLI3LOVOL - [8:6] */
+#define WM8400_LR12LOVOL_MASK 0x0038 /* LR12LOVOL - [5:3] */
+#define WM8400_LR12LOVOL_SHIFT 3 /* LR12LOVOL - [5:3] */
+#define WM8400_LR12LOVOL_WIDTH 3 /* LR12LOVOL - [5:3] */
+#define WM8400_LL12LOVOL_MASK 0x0007 /* LL12LOVOL - [2:0] */
+#define WM8400_LL12LOVOL_SHIFT 0 /* LL12LOVOL - [2:0] */
+#define WM8400_LL12LOVOL_WIDTH 3 /* LL12LOVOL - [2:0] */
+
+/*
+ * R48 (0x30) - Output Mixer4
+ */
+#define WM8400_RRI3ROVOL_MASK 0x01C0 /* RRI3ROVOL - [8:6] */
+#define WM8400_RRI3ROVOL_SHIFT 6 /* RRI3ROVOL - [8:6] */
+#define WM8400_RRI3ROVOL_WIDTH 3 /* RRI3ROVOL - [8:6] */
+#define WM8400_RL12ROVOL_MASK 0x0038 /* RL12ROVOL - [5:3] */
+#define WM8400_RL12ROVOL_SHIFT 3 /* RL12ROVOL - [5:3] */
+#define WM8400_RL12ROVOL_WIDTH 3 /* RL12ROVOL - [5:3] */
+#define WM8400_RR12ROVOL_MASK 0x0007 /* RR12ROVOL - [2:0] */
+#define WM8400_RR12ROVOL_SHIFT 0 /* RR12ROVOL - [2:0] */
+#define WM8400_RR12ROVOL_WIDTH 3 /* RR12ROVOL - [2:0] */
+
+/*
+ * R49 (0x31) - Output Mixer5
+ */
+#define WM8400_LRI3LOVOL_MASK 0x01C0 /* LRI3LOVOL - [8:6] */
+#define WM8400_LRI3LOVOL_SHIFT 6 /* LRI3LOVOL - [8:6] */
+#define WM8400_LRI3LOVOL_WIDTH 3 /* LRI3LOVOL - [8:6] */
+#define WM8400_LRBLOVOL_MASK 0x0038 /* LRBLOVOL - [5:3] */
+#define WM8400_LRBLOVOL_SHIFT 3 /* LRBLOVOL - [5:3] */
+#define WM8400_LRBLOVOL_WIDTH 3 /* LRBLOVOL - [5:3] */
+#define WM8400_LLBLOVOL_MASK 0x0007 /* LLBLOVOL - [2:0] */
+#define WM8400_LLBLOVOL_SHIFT 0 /* LLBLOVOL - [2:0] */
+#define WM8400_LLBLOVOL_WIDTH 3 /* LLBLOVOL - [2:0] */
+
+/*
+ * R50 (0x32) - Output Mixer6
+ */
+#define WM8400_RLI3ROVOL_MASK 0x01C0 /* RLI3ROVOL - [8:6] */
+#define WM8400_RLI3ROVOL_SHIFT 6 /* RLI3ROVOL - [8:6] */
+#define WM8400_RLI3ROVOL_WIDTH 3 /* RLI3ROVOL - [8:6] */
+#define WM8400_RLBROVOL_MASK 0x0038 /* RLBROVOL - [5:3] */
+#define WM8400_RLBROVOL_SHIFT 3 /* RLBROVOL - [5:3] */
+#define WM8400_RLBROVOL_WIDTH 3 /* RLBROVOL - [5:3] */
+#define WM8400_RRBROVOL_MASK 0x0007 /* RRBROVOL - [2:0] */
+#define WM8400_RRBROVOL_SHIFT 0 /* RRBROVOL - [2:0] */
+#define WM8400_RRBROVOL_WIDTH 3 /* RRBROVOL - [2:0] */
+
+/*
+ * R51 (0x33) - Out3/4 Mixer
+ */
+#define WM8400_VSEL_MASK 0x0180 /* VSEL - [8:7] */
+#define WM8400_VSEL_SHIFT 7 /* VSEL - [8:7] */
+#define WM8400_VSEL_WIDTH 2 /* VSEL - [8:7] */
+#define WM8400_LI4O3 0x0020 /* LI4O3 */
+#define WM8400_LI4O3_MASK 0x0020 /* LI4O3 */
+#define WM8400_LI4O3_SHIFT 5 /* LI4O3 */
+#define WM8400_LI4O3_WIDTH 1 /* LI4O3 */
+#define WM8400_LPGAO3 0x0010 /* LPGAO3 */
+#define WM8400_LPGAO3_MASK 0x0010 /* LPGAO3 */
+#define WM8400_LPGAO3_SHIFT 4 /* LPGAO3 */
+#define WM8400_LPGAO3_WIDTH 1 /* LPGAO3 */
+#define WM8400_RI4O4 0x0002 /* RI4O4 */
+#define WM8400_RI4O4_MASK 0x0002 /* RI4O4 */
+#define WM8400_RI4O4_SHIFT 1 /* RI4O4 */
+#define WM8400_RI4O4_WIDTH 1 /* RI4O4 */
+#define WM8400_RPGAO4 0x0001 /* RPGAO4 */
+#define WM8400_RPGAO4_MASK 0x0001 /* RPGAO4 */
+#define WM8400_RPGAO4_SHIFT 0 /* RPGAO4 */
+#define WM8400_RPGAO4_WIDTH 1 /* RPGAO4 */
+
+/*
+ * R52 (0x34) - Line Mixer1
+ */
+#define WM8400_LLOPGALON 0x0040 /* LLOPGALON */
+#define WM8400_LLOPGALON_MASK 0x0040 /* LLOPGALON */
+#define WM8400_LLOPGALON_SHIFT 6 /* LLOPGALON */
+#define WM8400_LLOPGALON_WIDTH 1 /* LLOPGALON */
+#define WM8400_LROPGALON 0x0020 /* LROPGALON */
+#define WM8400_LROPGALON_MASK 0x0020 /* LROPGALON */
+#define WM8400_LROPGALON_SHIFT 5 /* LROPGALON */
+#define WM8400_LROPGALON_WIDTH 1 /* LROPGALON */
+#define WM8400_LOPLON 0x0010 /* LOPLON */
+#define WM8400_LOPLON_MASK 0x0010 /* LOPLON */
+#define WM8400_LOPLON_SHIFT 4 /* LOPLON */
+#define WM8400_LOPLON_WIDTH 1 /* LOPLON */
+#define WM8400_LR12LOP 0x0004 /* LR12LOP */
+#define WM8400_LR12LOP_MASK 0x0004 /* LR12LOP */
+#define WM8400_LR12LOP_SHIFT 2 /* LR12LOP */
+#define WM8400_LR12LOP_WIDTH 1 /* LR12LOP */
+#define WM8400_LL12LOP 0x0002 /* LL12LOP */
+#define WM8400_LL12LOP_MASK 0x0002 /* LL12LOP */
+#define WM8400_LL12LOP_SHIFT 1 /* LL12LOP */
+#define WM8400_LL12LOP_WIDTH 1 /* LL12LOP */
+#define WM8400_LLOPGALOP 0x0001 /* LLOPGALOP */
+#define WM8400_LLOPGALOP_MASK 0x0001 /* LLOPGALOP */
+#define WM8400_LLOPGALOP_SHIFT 0 /* LLOPGALOP */
+#define WM8400_LLOPGALOP_WIDTH 1 /* LLOPGALOP */
+
+/*
+ * R53 (0x35) - Line Mixer2
+ */
+#define WM8400_RROPGARON 0x0040 /* RROPGARON */
+#define WM8400_RROPGARON_MASK 0x0040 /* RROPGARON */
+#define WM8400_RROPGARON_SHIFT 6 /* RROPGARON */
+#define WM8400_RROPGARON_WIDTH 1 /* RROPGARON */
+#define WM8400_RLOPGARON 0x0020 /* RLOPGARON */
+#define WM8400_RLOPGARON_MASK 0x0020 /* RLOPGARON */
+#define WM8400_RLOPGARON_SHIFT 5 /* RLOPGARON */
+#define WM8400_RLOPGARON_WIDTH 1 /* RLOPGARON */
+#define WM8400_ROPRON 0x0010 /* ROPRON */
+#define WM8400_ROPRON_MASK 0x0010 /* ROPRON */
+#define WM8400_ROPRON_SHIFT 4 /* ROPRON */
+#define WM8400_ROPRON_WIDTH 1 /* ROPRON */
+#define WM8400_RL12ROP 0x0004 /* RL12ROP */
+#define WM8400_RL12ROP_MASK 0x0004 /* RL12ROP */
+#define WM8400_RL12ROP_SHIFT 2 /* RL12ROP */
+#define WM8400_RL12ROP_WIDTH 1 /* RL12ROP */
+#define WM8400_RR12ROP 0x0002 /* RR12ROP */
+#define WM8400_RR12ROP_MASK 0x0002 /* RR12ROP */
+#define WM8400_RR12ROP_SHIFT 1 /* RR12ROP */
+#define WM8400_RR12ROP_WIDTH 1 /* RR12ROP */
+#define WM8400_RROPGAROP 0x0001 /* RROPGAROP */
+#define WM8400_RROPGAROP_MASK 0x0001 /* RROPGAROP */
+#define WM8400_RROPGAROP_SHIFT 0 /* RROPGAROP */
+#define WM8400_RROPGAROP_WIDTH 1 /* RROPGAROP */
+
+/*
+ * R54 (0x36) - Speaker Mixer
+ */
+#define WM8400_LB2SPK 0x0080 /* LB2SPK */
+#define WM8400_LB2SPK_MASK 0x0080 /* LB2SPK */
+#define WM8400_LB2SPK_SHIFT 7 /* LB2SPK */
+#define WM8400_LB2SPK_WIDTH 1 /* LB2SPK */
+#define WM8400_RB2SPK 0x0040 /* RB2SPK */
+#define WM8400_RB2SPK_MASK 0x0040 /* RB2SPK */
+#define WM8400_RB2SPK_SHIFT 6 /* RB2SPK */
+#define WM8400_RB2SPK_WIDTH 1 /* RB2SPK */
+#define WM8400_LI2SPK 0x0020 /* LI2SPK */
+#define WM8400_LI2SPK_MASK 0x0020 /* LI2SPK */
+#define WM8400_LI2SPK_SHIFT 5 /* LI2SPK */
+#define WM8400_LI2SPK_WIDTH 1 /* LI2SPK */
+#define WM8400_RI2SPK 0x0010 /* RI2SPK */
+#define WM8400_RI2SPK_MASK 0x0010 /* RI2SPK */
+#define WM8400_RI2SPK_SHIFT 4 /* RI2SPK */
+#define WM8400_RI2SPK_WIDTH 1 /* RI2SPK */
+#define WM8400_LOPGASPK 0x0008 /* LOPGASPK */
+#define WM8400_LOPGASPK_MASK 0x0008 /* LOPGASPK */
+#define WM8400_LOPGASPK_SHIFT 3 /* LOPGASPK */
+#define WM8400_LOPGASPK_WIDTH 1 /* LOPGASPK */
+#define WM8400_ROPGASPK 0x0004 /* ROPGASPK */
+#define WM8400_ROPGASPK_MASK 0x0004 /* ROPGASPK */
+#define WM8400_ROPGASPK_SHIFT 2 /* ROPGASPK */
+#define WM8400_ROPGASPK_WIDTH 1 /* ROPGASPK */
+#define WM8400_LDSPK 0x0002 /* LDSPK */
+#define WM8400_LDSPK_MASK 0x0002 /* LDSPK */
+#define WM8400_LDSPK_SHIFT 1 /* LDSPK */
+#define WM8400_LDSPK_WIDTH 1 /* LDSPK */
+#define WM8400_RDSPK 0x0001 /* RDSPK */
+#define WM8400_RDSPK_MASK 0x0001 /* RDSPK */
+#define WM8400_RDSPK_SHIFT 0 /* RDSPK */
+#define WM8400_RDSPK_WIDTH 1 /* RDSPK */
+
+/*
+ * R55 (0x37) - Additional Control
+ */
+#define WM8400_VROI 0x0001 /* VROI */
+#define WM8400_VROI_MASK 0x0001 /* VROI */
+#define WM8400_VROI_SHIFT 0 /* VROI */
+#define WM8400_VROI_WIDTH 1 /* VROI */
+
+/*
+ * R56 (0x38) - AntiPOP1
+ */
+#define WM8400_DIS_LLINE 0x0020 /* DIS_LLINE */
+#define WM8400_DIS_LLINE_MASK 0x0020 /* DIS_LLINE */
+#define WM8400_DIS_LLINE_SHIFT 5 /* DIS_LLINE */
+#define WM8400_DIS_LLINE_WIDTH 1 /* DIS_LLINE */
+#define WM8400_DIS_RLINE 0x0010 /* DIS_RLINE */
+#define WM8400_DIS_RLINE_MASK 0x0010 /* DIS_RLINE */
+#define WM8400_DIS_RLINE_SHIFT 4 /* DIS_RLINE */
+#define WM8400_DIS_RLINE_WIDTH 1 /* DIS_RLINE */
+#define WM8400_DIS_OUT3 0x0008 /* DIS_OUT3 */
+#define WM8400_DIS_OUT3_MASK 0x0008 /* DIS_OUT3 */
+#define WM8400_DIS_OUT3_SHIFT 3 /* DIS_OUT3 */
+#define WM8400_DIS_OUT3_WIDTH 1 /* DIS_OUT3 */
+#define WM8400_DIS_OUT4 0x0004 /* DIS_OUT4 */
+#define WM8400_DIS_OUT4_MASK 0x0004 /* DIS_OUT4 */
+#define WM8400_DIS_OUT4_SHIFT 2 /* DIS_OUT4 */
+#define WM8400_DIS_OUT4_WIDTH 1 /* DIS_OUT4 */
+#define WM8400_DIS_LOUT 0x0002 /* DIS_LOUT */
+#define WM8400_DIS_LOUT_MASK 0x0002 /* DIS_LOUT */
+#define WM8400_DIS_LOUT_SHIFT 1 /* DIS_LOUT */
+#define WM8400_DIS_LOUT_WIDTH 1 /* DIS_LOUT */
+#define WM8400_DIS_ROUT 0x0001 /* DIS_ROUT */
+#define WM8400_DIS_ROUT_MASK 0x0001 /* DIS_ROUT */
+#define WM8400_DIS_ROUT_SHIFT 0 /* DIS_ROUT */
+#define WM8400_DIS_ROUT_WIDTH 1 /* DIS_ROUT */
+
+/*
+ * R57 (0x39) - AntiPOP2
+ */
+#define WM8400_SOFTST 0x0040 /* SOFTST */
+#define WM8400_SOFTST_MASK 0x0040 /* SOFTST */
+#define WM8400_SOFTST_SHIFT 6 /* SOFTST */
+#define WM8400_SOFTST_WIDTH 1 /* SOFTST */
+#define WM8400_BUFIOEN 0x0008 /* BUFIOEN */
+#define WM8400_BUFIOEN_MASK 0x0008 /* BUFIOEN */
+#define WM8400_BUFIOEN_SHIFT 3 /* BUFIOEN */
+#define WM8400_BUFIOEN_WIDTH 1 /* BUFIOEN */
+#define WM8400_BUFDCOPEN 0x0004 /* BUFDCOPEN */
+#define WM8400_BUFDCOPEN_MASK 0x0004 /* BUFDCOPEN */
+#define WM8400_BUFDCOPEN_SHIFT 2 /* BUFDCOPEN */
+#define WM8400_BUFDCOPEN_WIDTH 1 /* BUFDCOPEN */
+#define WM8400_POBCTRL 0x0002 /* POBCTRL */
+#define WM8400_POBCTRL_MASK 0x0002 /* POBCTRL */
+#define WM8400_POBCTRL_SHIFT 1 /* POBCTRL */
+#define WM8400_POBCTRL_WIDTH 1 /* POBCTRL */
+#define WM8400_VMIDTOG 0x0001 /* VMIDTOG */
+#define WM8400_VMIDTOG_MASK 0x0001 /* VMIDTOG */
+#define WM8400_VMIDTOG_SHIFT 0 /* VMIDTOG */
+#define WM8400_VMIDTOG_WIDTH 1 /* VMIDTOG */
+
+/*
+ * R58 (0x3A) - MICBIAS
+ */
+#define WM8400_MCDSCTH_MASK 0x00C0 /* MCDSCTH - [7:6] */
+#define WM8400_MCDSCTH_SHIFT 6 /* MCDSCTH - [7:6] */
+#define WM8400_MCDSCTH_WIDTH 2 /* MCDSCTH - [7:6] */
+#define WM8400_MCDTHR_MASK 0x0038 /* MCDTHR - [5:3] */
+#define WM8400_MCDTHR_SHIFT 3 /* MCDTHR - [5:3] */
+#define WM8400_MCDTHR_WIDTH 3 /* MCDTHR - [5:3] */
+#define WM8400_MCD 0x0004 /* MCD */
+#define WM8400_MCD_MASK 0x0004 /* MCD */
+#define WM8400_MCD_SHIFT 2 /* MCD */
+#define WM8400_MCD_WIDTH 1 /* MCD */
+#define WM8400_MBSEL 0x0001 /* MBSEL */
+#define WM8400_MBSEL_MASK 0x0001 /* MBSEL */
+#define WM8400_MBSEL_SHIFT 0 /* MBSEL */
+#define WM8400_MBSEL_WIDTH 1 /* MBSEL */
+
+/*
+ * R60 (0x3C) - FLL Control 1
+ */
+#define WM8400_FLL_REF_FREQ 0x1000 /* FLL_REF_FREQ */
+#define WM8400_FLL_REF_FREQ_MASK 0x1000 /* FLL_REF_FREQ */
+#define WM8400_FLL_REF_FREQ_SHIFT 12 /* FLL_REF_FREQ */
+#define WM8400_FLL_REF_FREQ_WIDTH 1 /* FLL_REF_FREQ */
+#define WM8400_FLL_CLK_SRC_MASK 0x0C00 /* FLL_CLK_SRC - [11:10] */
+#define WM8400_FLL_CLK_SRC_SHIFT 10 /* FLL_CLK_SRC - [11:10] */
+#define WM8400_FLL_CLK_SRC_WIDTH 2 /* FLL_CLK_SRC - [11:10] */
+#define WM8400_FLL_FRAC 0x0200 /* FLL_FRAC */
+#define WM8400_FLL_FRAC_MASK 0x0200 /* FLL_FRAC */
+#define WM8400_FLL_FRAC_SHIFT 9 /* FLL_FRAC */
+#define WM8400_FLL_FRAC_WIDTH 1 /* FLL_FRAC */
+#define WM8400_FLL_OSC_ENA 0x0100 /* FLL_OSC_ENA */
+#define WM8400_FLL_OSC_ENA_MASK 0x0100 /* FLL_OSC_ENA */
+#define WM8400_FLL_OSC_ENA_SHIFT 8 /* FLL_OSC_ENA */
+#define WM8400_FLL_OSC_ENA_WIDTH 1 /* FLL_OSC_ENA */
+#define WM8400_FLL_CTRL_RATE_MASK 0x00E0 /* FLL_CTRL_RATE - [7:5] */
+#define WM8400_FLL_CTRL_RATE_SHIFT 5 /* FLL_CTRL_RATE - [7:5] */
+#define WM8400_FLL_CTRL_RATE_WIDTH 3 /* FLL_CTRL_RATE - [7:5] */
+#define WM8400_FLL_FRATIO_MASK 0x001F /* FLL_FRATIO - [4:0] */
+#define WM8400_FLL_FRATIO_SHIFT 0 /* FLL_FRATIO - [4:0] */
+#define WM8400_FLL_FRATIO_WIDTH 5 /* FLL_FRATIO - [4:0] */
+
+/*
+ * R61 (0x3D) - FLL Control 2
+ */
+#define WM8400_FLL_K_MASK 0xFFFF /* FLL_K - [15:0] */
+#define WM8400_FLL_K_SHIFT 0 /* FLL_K - [15:0] */
+#define WM8400_FLL_K_WIDTH 16 /* FLL_K - [15:0] */
+
+/*
+ * R62 (0x3E) - FLL Control 3
+ */
+#define WM8400_FLL_N_MASK 0x03FF /* FLL_N - [9:0] */
+#define WM8400_FLL_N_SHIFT 0 /* FLL_N - [9:0] */
+#define WM8400_FLL_N_WIDTH 10 /* FLL_N - [9:0] */
+
+/*
+ * R63 (0x3F) - FLL Control 4
+ */
+#define WM8400_FLL_TRK_GAIN_MASK 0x0078 /* FLL_TRK_GAIN - [6:3] */
+#define WM8400_FLL_TRK_GAIN_SHIFT 3 /* FLL_TRK_GAIN - [6:3] */
+#define WM8400_FLL_TRK_GAIN_WIDTH 4 /* FLL_TRK_GAIN - [6:3] */
+#define WM8400_FLL_OUTDIV_MASK 0x0007 /* FLL_OUTDIV - [2:0] */
+#define WM8400_FLL_OUTDIV_SHIFT 0 /* FLL_OUTDIV - [2:0] */
+#define WM8400_FLL_OUTDIV_WIDTH 3 /* FLL_OUTDIV - [2:0] */
+
+struct wm8400;
+void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400);
+
+#endif
diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h
new file mode 100644
index 000000000..2de565b94
--- /dev/null
+++ b/include/linux/mfd/wm8400-private.h
@@ -0,0 +1,935 @@
+/*
+ * wm8400 private definitions.
+ *
+ * Copyright 2008 Wolfson Microelectronics plc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_MFD_WM8400_PRIV_H
+#define __LINUX_MFD_WM8400_PRIV_H
+
+#include <linux/mfd/wm8400.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define WM8400_REGISTER_COUNT 0x55
+
+struct wm8400 {
+ struct device *dev;
+ struct regmap *regmap;
+
+ struct platform_device regulators[6];
+};
+
+/*
+ * Register values.
+ */
+#define WM8400_RESET_ID 0x00
+#define WM8400_ID 0x01
+#define WM8400_POWER_MANAGEMENT_1 0x02
+#define WM8400_POWER_MANAGEMENT_2 0x03
+#define WM8400_POWER_MANAGEMENT_3 0x04
+#define WM8400_AUDIO_INTERFACE_1 0x05
+#define WM8400_AUDIO_INTERFACE_2 0x06
+#define WM8400_CLOCKING_1 0x07
+#define WM8400_CLOCKING_2 0x08
+#define WM8400_AUDIO_INTERFACE_3 0x09
+#define WM8400_AUDIO_INTERFACE_4 0x0A
+#define WM8400_DAC_CTRL 0x0B
+#define WM8400_LEFT_DAC_DIGITAL_VOLUME 0x0C
+#define WM8400_RIGHT_DAC_DIGITAL_VOLUME 0x0D
+#define WM8400_DIGITAL_SIDE_TONE 0x0E
+#define WM8400_ADC_CTRL 0x0F
+#define WM8400_LEFT_ADC_DIGITAL_VOLUME 0x10
+#define WM8400_RIGHT_ADC_DIGITAL_VOLUME 0x11
+#define WM8400_GPIO_CTRL_1 0x12
+#define WM8400_GPIO1_GPIO2 0x13
+#define WM8400_GPIO3_GPIO4 0x14
+#define WM8400_GPIO5_GPIO6 0x15
+#define WM8400_GPIOCTRL_2 0x16
+#define WM8400_GPIO_POL 0x17
+#define WM8400_LEFT_LINE_INPUT_1_2_VOLUME 0x18
+#define WM8400_LEFT_LINE_INPUT_3_4_VOLUME 0x19
+#define WM8400_RIGHT_LINE_INPUT_1_2_VOLUME 0x1A
+#define WM8400_RIGHT_LINE_INPUT_3_4_VOLUME 0x1B
+#define WM8400_LEFT_OUTPUT_VOLUME 0x1C
+#define WM8400_RIGHT_OUTPUT_VOLUME 0x1D
+#define WM8400_LINE_OUTPUTS_VOLUME 0x1E
+#define WM8400_OUT3_4_VOLUME 0x1F
+#define WM8400_LEFT_OPGA_VOLUME 0x20
+#define WM8400_RIGHT_OPGA_VOLUME 0x21
+#define WM8400_SPEAKER_VOLUME 0x22
+#define WM8400_CLASSD1 0x23
+#define WM8400_CLASSD3 0x25
+#define WM8400_INPUT_MIXER1 0x27
+#define WM8400_INPUT_MIXER2 0x28
+#define WM8400_INPUT_MIXER3 0x29
+#define WM8400_INPUT_MIXER4 0x2A
+#define WM8400_INPUT_MIXER5 0x2B
+#define WM8400_INPUT_MIXER6 0x2C
+#define WM8400_OUTPUT_MIXER1 0x2D
+#define WM8400_OUTPUT_MIXER2 0x2E
+#define WM8400_OUTPUT_MIXER3 0x2F
+#define WM8400_OUTPUT_MIXER4 0x30
+#define WM8400_OUTPUT_MIXER5 0x31
+#define WM8400_OUTPUT_MIXER6 0x32
+#define WM8400_OUT3_4_MIXER 0x33
+#define WM8400_LINE_MIXER1 0x34
+#define WM8400_LINE_MIXER2 0x35
+#define WM8400_SPEAKER_MIXER 0x36
+#define WM8400_ADDITIONAL_CONTROL 0x37
+#define WM8400_ANTIPOP1 0x38
+#define WM8400_ANTIPOP2 0x39
+#define WM8400_MICBIAS 0x3A
+#define WM8400_FLL_CONTROL_1 0x3C
+#define WM8400_FLL_CONTROL_2 0x3D
+#define WM8400_FLL_CONTROL_3 0x3E
+#define WM8400_FLL_CONTROL_4 0x3F
+#define WM8400_LDO1_CONTROL 0x41
+#define WM8400_LDO2_CONTROL 0x42
+#define WM8400_LDO3_CONTROL 0x43
+#define WM8400_LDO4_CONTROL 0x44
+#define WM8400_DCDC1_CONTROL_1 0x46
+#define WM8400_DCDC1_CONTROL_2 0x47
+#define WM8400_DCDC2_CONTROL_1 0x48
+#define WM8400_DCDC2_CONTROL_2 0x49
+#define WM8400_INTERFACE 0x4B
+#define WM8400_PM_GENERAL 0x4C
+#define WM8400_PM_SHUTDOWN_CONTROL 0x4E
+#define WM8400_INTERRUPT_STATUS_1 0x4F
+#define WM8400_INTERRUPT_STATUS_1_MASK 0x50
+#define WM8400_INTERRUPT_LEVELS 0x51
+#define WM8400_SHUTDOWN_REASON 0x52
+#define WM8400_LINE_CIRCUITS 0x54
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R0 (0x00) - Reset/ID
+ */
+#define WM8400_SW_RESET_CHIP_ID_MASK 0xFFFF /* SW_RESET/CHIP_ID - [15:0] */
+#define WM8400_SW_RESET_CHIP_ID_SHIFT 0 /* SW_RESET/CHIP_ID - [15:0] */
+#define WM8400_SW_RESET_CHIP_ID_WIDTH 16 /* SW_RESET/CHIP_ID - [15:0] */
+
+/*
+ * R1 (0x01) - ID
+ */
+#define WM8400_CHIP_REV_MASK 0x7000 /* CHIP_REV - [14:12] */
+#define WM8400_CHIP_REV_SHIFT 12 /* CHIP_REV - [14:12] */
+#define WM8400_CHIP_REV_WIDTH 3 /* CHIP_REV - [14:12] */
+
+/*
+ * R18 (0x12) - GPIO CTRL 1
+ */
+#define WM8400_IRQ 0x1000 /* IRQ */
+#define WM8400_IRQ_MASK 0x1000 /* IRQ */
+#define WM8400_IRQ_SHIFT 12 /* IRQ */
+#define WM8400_IRQ_WIDTH 1 /* IRQ */
+#define WM8400_TEMPOK 0x0800 /* TEMPOK */
+#define WM8400_TEMPOK_MASK 0x0800 /* TEMPOK */
+#define WM8400_TEMPOK_SHIFT 11 /* TEMPOK */
+#define WM8400_TEMPOK_WIDTH 1 /* TEMPOK */
+#define WM8400_MIC1SHRT 0x0400 /* MIC1SHRT */
+#define WM8400_MIC1SHRT_MASK 0x0400 /* MIC1SHRT */
+#define WM8400_MIC1SHRT_SHIFT 10 /* MIC1SHRT */
+#define WM8400_MIC1SHRT_WIDTH 1 /* MIC1SHRT */
+#define WM8400_MIC1DET 0x0200 /* MIC1DET */
+#define WM8400_MIC1DET_MASK 0x0200 /* MIC1DET */
+#define WM8400_MIC1DET_SHIFT 9 /* MIC1DET */
+#define WM8400_MIC1DET_WIDTH 1 /* MIC1DET */
+#define WM8400_FLL_LCK 0x0100 /* FLL_LCK */
+#define WM8400_FLL_LCK_MASK 0x0100 /* FLL_LCK */
+#define WM8400_FLL_LCK_SHIFT 8 /* FLL_LCK */
+#define WM8400_FLL_LCK_WIDTH 1 /* FLL_LCK */
+#define WM8400_GPIO_STATUS_MASK 0x00FF /* GPIO_STATUS - [7:0] */
+#define WM8400_GPIO_STATUS_SHIFT 0 /* GPIO_STATUS - [7:0] */
+#define WM8400_GPIO_STATUS_WIDTH 8 /* GPIO_STATUS - [7:0] */
+
+/*
+ * R19 (0x13) - GPIO1 & GPIO2
+ */
+#define WM8400_GPIO2_DEB_ENA 0x8000 /* GPIO2_DEB_ENA */
+#define WM8400_GPIO2_DEB_ENA_MASK 0x8000 /* GPIO2_DEB_ENA */
+#define WM8400_GPIO2_DEB_ENA_SHIFT 15 /* GPIO2_DEB_ENA */
+#define WM8400_GPIO2_DEB_ENA_WIDTH 1 /* GPIO2_DEB_ENA */
+#define WM8400_GPIO2_IRQ_ENA 0x4000 /* GPIO2_IRQ_ENA */
+#define WM8400_GPIO2_IRQ_ENA_MASK 0x4000 /* GPIO2_IRQ_ENA */
+#define WM8400_GPIO2_IRQ_ENA_SHIFT 14 /* GPIO2_IRQ_ENA */
+#define WM8400_GPIO2_IRQ_ENA_WIDTH 1 /* GPIO2_IRQ_ENA */
+#define WM8400_GPIO2_PU 0x2000 /* GPIO2_PU */
+#define WM8400_GPIO2_PU_MASK 0x2000 /* GPIO2_PU */
+#define WM8400_GPIO2_PU_SHIFT 13 /* GPIO2_PU */
+#define WM8400_GPIO2_PU_WIDTH 1 /* GPIO2_PU */
+#define WM8400_GPIO2_PD 0x1000 /* GPIO2_PD */
+#define WM8400_GPIO2_PD_MASK 0x1000 /* GPIO2_PD */
+#define WM8400_GPIO2_PD_SHIFT 12 /* GPIO2_PD */
+#define WM8400_GPIO2_PD_WIDTH 1 /* GPIO2_PD */
+#define WM8400_GPIO2_SEL_MASK 0x0F00 /* GPIO2_SEL - [11:8] */
+#define WM8400_GPIO2_SEL_SHIFT 8 /* GPIO2_SEL - [11:8] */
+#define WM8400_GPIO2_SEL_WIDTH 4 /* GPIO2_SEL - [11:8] */
+#define WM8400_GPIO1_DEB_ENA 0x0080 /* GPIO1_DEB_ENA */
+#define WM8400_GPIO1_DEB_ENA_MASK 0x0080 /* GPIO1_DEB_ENA */
+#define WM8400_GPIO1_DEB_ENA_SHIFT 7 /* GPIO1_DEB_ENA */
+#define WM8400_GPIO1_DEB_ENA_WIDTH 1 /* GPIO1_DEB_ENA */
+#define WM8400_GPIO1_IRQ_ENA 0x0040 /* GPIO1_IRQ_ENA */
+#define WM8400_GPIO1_IRQ_ENA_MASK 0x0040 /* GPIO1_IRQ_ENA */
+#define WM8400_GPIO1_IRQ_ENA_SHIFT 6 /* GPIO1_IRQ_ENA */
+#define WM8400_GPIO1_IRQ_ENA_WIDTH 1 /* GPIO1_IRQ_ENA */
+#define WM8400_GPIO1_PU 0x0020 /* GPIO1_PU */
+#define WM8400_GPIO1_PU_MASK 0x0020 /* GPIO1_PU */
+#define WM8400_GPIO1_PU_SHIFT 5 /* GPIO1_PU */
+#define WM8400_GPIO1_PU_WIDTH 1 /* GPIO1_PU */
+#define WM8400_GPIO1_PD 0x0010 /* GPIO1_PD */
+#define WM8400_GPIO1_PD_MASK 0x0010 /* GPIO1_PD */
+#define WM8400_GPIO1_PD_SHIFT 4 /* GPIO1_PD */
+#define WM8400_GPIO1_PD_WIDTH 1 /* GPIO1_PD */
+#define WM8400_GPIO1_SEL_MASK 0x000F /* GPIO1_SEL - [3:0] */
+#define WM8400_GPIO1_SEL_SHIFT 0 /* GPIO1_SEL - [3:0] */
+#define WM8400_GPIO1_SEL_WIDTH 4 /* GPIO1_SEL - [3:0] */
+
+/*
+ * R20 (0x14) - GPIO3 & GPIO4
+ */
+#define WM8400_GPIO4_DEB_ENA 0x8000 /* GPIO4_DEB_ENA */
+#define WM8400_GPIO4_DEB_ENA_MASK 0x8000 /* GPIO4_DEB_ENA */
+#define WM8400_GPIO4_DEB_ENA_SHIFT 15 /* GPIO4_DEB_ENA */
+#define WM8400_GPIO4_DEB_ENA_WIDTH 1 /* GPIO4_DEB_ENA */
+#define WM8400_GPIO4_IRQ_ENA 0x4000 /* GPIO4_IRQ_ENA */
+#define WM8400_GPIO4_IRQ_ENA_MASK 0x4000 /* GPIO4_IRQ_ENA */
+#define WM8400_GPIO4_IRQ_ENA_SHIFT 14 /* GPIO4_IRQ_ENA */
+#define WM8400_GPIO4_IRQ_ENA_WIDTH 1 /* GPIO4_IRQ_ENA */
+#define WM8400_GPIO4_PU 0x2000 /* GPIO4_PU */
+#define WM8400_GPIO4_PU_MASK 0x2000 /* GPIO4_PU */
+#define WM8400_GPIO4_PU_SHIFT 13 /* GPIO4_PU */
+#define WM8400_GPIO4_PU_WIDTH 1 /* GPIO4_PU */
+#define WM8400_GPIO4_PD 0x1000 /* GPIO4_PD */
+#define WM8400_GPIO4_PD_MASK 0x1000 /* GPIO4_PD */
+#define WM8400_GPIO4_PD_SHIFT 12 /* GPIO4_PD */
+#define WM8400_GPIO4_PD_WIDTH 1 /* GPIO4_PD */
+#define WM8400_GPIO4_SEL_MASK 0x0F00 /* GPIO4_SEL - [11:8] */
+#define WM8400_GPIO4_SEL_SHIFT 8 /* GPIO4_SEL - [11:8] */
+#define WM8400_GPIO4_SEL_WIDTH 4 /* GPIO4_SEL - [11:8] */
+#define WM8400_GPIO3_DEB_ENA 0x0080 /* GPIO3_DEB_ENA */
+#define WM8400_GPIO3_DEB_ENA_MASK 0x0080 /* GPIO3_DEB_ENA */
+#define WM8400_GPIO3_DEB_ENA_SHIFT 7 /* GPIO3_DEB_ENA */
+#define WM8400_GPIO3_DEB_ENA_WIDTH 1 /* GPIO3_DEB_ENA */
+#define WM8400_GPIO3_IRQ_ENA 0x0040 /* GPIO3_IRQ_ENA */
+#define WM8400_GPIO3_IRQ_ENA_MASK 0x0040 /* GPIO3_IRQ_ENA */
+#define WM8400_GPIO3_IRQ_ENA_SHIFT 6 /* GPIO3_IRQ_ENA */
+#define WM8400_GPIO3_IRQ_ENA_WIDTH 1 /* GPIO3_IRQ_ENA */
+#define WM8400_GPIO3_PU 0x0020 /* GPIO3_PU */
+#define WM8400_GPIO3_PU_MASK 0x0020 /* GPIO3_PU */
+#define WM8400_GPIO3_PU_SHIFT 5 /* GPIO3_PU */
+#define WM8400_GPIO3_PU_WIDTH 1 /* GPIO3_PU */
+#define WM8400_GPIO3_PD 0x0010 /* GPIO3_PD */
+#define WM8400_GPIO3_PD_MASK 0x0010 /* GPIO3_PD */
+#define WM8400_GPIO3_PD_SHIFT 4 /* GPIO3_PD */
+#define WM8400_GPIO3_PD_WIDTH 1 /* GPIO3_PD */
+#define WM8400_GPIO3_SEL_MASK 0x000F /* GPIO3_SEL - [3:0] */
+#define WM8400_GPIO3_SEL_SHIFT 0 /* GPIO3_SEL - [3:0] */
+#define WM8400_GPIO3_SEL_WIDTH 4 /* GPIO3_SEL - [3:0] */
+
+/*
+ * R21 (0x15) - GPIO5 & GPIO6
+ */
+#define WM8400_GPIO6_DEB_ENA 0x8000 /* GPIO6_DEB_ENA */
+#define WM8400_GPIO6_DEB_ENA_MASK 0x8000 /* GPIO6_DEB_ENA */
+#define WM8400_GPIO6_DEB_ENA_SHIFT 15 /* GPIO6_DEB_ENA */
+#define WM8400_GPIO6_DEB_ENA_WIDTH 1 /* GPIO6_DEB_ENA */
+#define WM8400_GPIO6_IRQ_ENA 0x4000 /* GPIO6_IRQ_ENA */
+#define WM8400_GPIO6_IRQ_ENA_MASK 0x4000 /* GPIO6_IRQ_ENA */
+#define WM8400_GPIO6_IRQ_ENA_SHIFT 14 /* GPIO6_IRQ_ENA */
+#define WM8400_GPIO6_IRQ_ENA_WIDTH 1 /* GPIO6_IRQ_ENA */
+#define WM8400_GPIO6_PU 0x2000 /* GPIO6_PU */
+#define WM8400_GPIO6_PU_MASK 0x2000 /* GPIO6_PU */
+#define WM8400_GPIO6_PU_SHIFT 13 /* GPIO6_PU */
+#define WM8400_GPIO6_PU_WIDTH 1 /* GPIO6_PU */
+#define WM8400_GPIO6_PD 0x1000 /* GPIO6_PD */
+#define WM8400_GPIO6_PD_MASK 0x1000 /* GPIO6_PD */
+#define WM8400_GPIO6_PD_SHIFT 12 /* GPIO6_PD */
+#define WM8400_GPIO6_PD_WIDTH 1 /* GPIO6_PD */
+#define WM8400_GPIO6_SEL_MASK 0x0F00 /* GPIO6_SEL - [11:8] */
+#define WM8400_GPIO6_SEL_SHIFT 8 /* GPIO6_SEL - [11:8] */
+#define WM8400_GPIO6_SEL_WIDTH 4 /* GPIO6_SEL - [11:8] */
+#define WM8400_GPIO5_DEB_ENA 0x0080 /* GPIO5_DEB_ENA */
+#define WM8400_GPIO5_DEB_ENA_MASK 0x0080 /* GPIO5_DEB_ENA */
+#define WM8400_GPIO5_DEB_ENA_SHIFT 7 /* GPIO5_DEB_ENA */
+#define WM8400_GPIO5_DEB_ENA_WIDTH 1 /* GPIO5_DEB_ENA */
+#define WM8400_GPIO5_IRQ_ENA 0x0040 /* GPIO5_IRQ_ENA */
+#define WM8400_GPIO5_IRQ_ENA_MASK 0x0040 /* GPIO5_IRQ_ENA */
+#define WM8400_GPIO5_IRQ_ENA_SHIFT 6 /* GPIO5_IRQ_ENA */
+#define WM8400_GPIO5_IRQ_ENA_WIDTH 1 /* GPIO5_IRQ_ENA */
+#define WM8400_GPIO5_PU 0x0020 /* GPIO5_PU */
+#define WM8400_GPIO5_PU_MASK 0x0020 /* GPIO5_PU */
+#define WM8400_GPIO5_PU_SHIFT 5 /* GPIO5_PU */
+#define WM8400_GPIO5_PU_WIDTH 1 /* GPIO5_PU */
+#define WM8400_GPIO5_PD 0x0010 /* GPIO5_PD */
+#define WM8400_GPIO5_PD_MASK 0x0010 /* GPIO5_PD */
+#define WM8400_GPIO5_PD_SHIFT 4 /* GPIO5_PD */
+#define WM8400_GPIO5_PD_WIDTH 1 /* GPIO5_PD */
+#define WM8400_GPIO5_SEL_MASK 0x000F /* GPIO5_SEL - [3:0] */
+#define WM8400_GPIO5_SEL_SHIFT 0 /* GPIO5_SEL - [3:0] */
+#define WM8400_GPIO5_SEL_WIDTH 4 /* GPIO5_SEL - [3:0] */
+
+/*
+ * R22 (0x16) - GPIOCTRL 2
+ */
+#define WM8400_TEMPOK_IRQ_ENA 0x0800 /* TEMPOK_IRQ_ENA */
+#define WM8400_TEMPOK_IRQ_ENA_MASK 0x0800 /* TEMPOK_IRQ_ENA */
+#define WM8400_TEMPOK_IRQ_ENA_SHIFT 11 /* TEMPOK_IRQ_ENA */
+#define WM8400_TEMPOK_IRQ_ENA_WIDTH 1 /* TEMPOK_IRQ_ENA */
+#define WM8400_MIC1SHRT_IRQ_ENA 0x0400 /* MIC1SHRT_IRQ_ENA */
+#define WM8400_MIC1SHRT_IRQ_ENA_MASK 0x0400 /* MIC1SHRT_IRQ_ENA */
+#define WM8400_MIC1SHRT_IRQ_ENA_SHIFT 10 /* MIC1SHRT_IRQ_ENA */
+#define WM8400_MIC1SHRT_IRQ_ENA_WIDTH 1 /* MIC1SHRT_IRQ_ENA */
+#define WM8400_MIC1DET_IRQ_ENA 0x0200 /* MIC1DET_IRQ_ENA */
+#define WM8400_MIC1DET_IRQ_ENA_MASK 0x0200 /* MIC1DET_IRQ_ENA */
+#define WM8400_MIC1DET_IRQ_ENA_SHIFT 9 /* MIC1DET_IRQ_ENA */
+#define WM8400_MIC1DET_IRQ_ENA_WIDTH 1 /* MIC1DET_IRQ_ENA */
+#define WM8400_FLL_LCK_IRQ_ENA 0x0100 /* FLL_LCK_IRQ_ENA */
+#define WM8400_FLL_LCK_IRQ_ENA_MASK 0x0100 /* FLL_LCK_IRQ_ENA */
+#define WM8400_FLL_LCK_IRQ_ENA_SHIFT 8 /* FLL_LCK_IRQ_ENA */
+#define WM8400_FLL_LCK_IRQ_ENA_WIDTH 1 /* FLL_LCK_IRQ_ENA */
+#define WM8400_GPI8_DEB_ENA 0x0080 /* GPI8_DEB_ENA */
+#define WM8400_GPI8_DEB_ENA_MASK 0x0080 /* GPI8_DEB_ENA */
+#define WM8400_GPI8_DEB_ENA_SHIFT 7 /* GPI8_DEB_ENA */
+#define WM8400_GPI8_DEB_ENA_WIDTH 1 /* GPI8_DEB_ENA */
+#define WM8400_GPI8_IRQ_ENA 0x0040 /* GPI8_IRQ_ENA */
+#define WM8400_GPI8_IRQ_ENA_MASK 0x0040 /* GPI8_IRQ_ENA */
+#define WM8400_GPI8_IRQ_ENA_SHIFT 6 /* GPI8_IRQ_ENA */
+#define WM8400_GPI8_IRQ_ENA_WIDTH 1 /* GPI8_IRQ_ENA */
+#define WM8400_GPI8_ENA 0x0010 /* GPI8_ENA */
+#define WM8400_GPI8_ENA_MASK 0x0010 /* GPI8_ENA */
+#define WM8400_GPI8_ENA_SHIFT 4 /* GPI8_ENA */
+#define WM8400_GPI8_ENA_WIDTH 1 /* GPI8_ENA */
+#define WM8400_GPI7_DEB_ENA 0x0008 /* GPI7_DEB_ENA */
+#define WM8400_GPI7_DEB_ENA_MASK 0x0008 /* GPI7_DEB_ENA */
+#define WM8400_GPI7_DEB_ENA_SHIFT 3 /* GPI7_DEB_ENA */
+#define WM8400_GPI7_DEB_ENA_WIDTH 1 /* GPI7_DEB_ENA */
+#define WM8400_GPI7_IRQ_ENA 0x0004 /* GPI7_IRQ_ENA */
+#define WM8400_GPI7_IRQ_ENA_MASK 0x0004 /* GPI7_IRQ_ENA */
+#define WM8400_GPI7_IRQ_ENA_SHIFT 2 /* GPI7_IRQ_ENA */
+#define WM8400_GPI7_IRQ_ENA_WIDTH 1 /* GPI7_IRQ_ENA */
+#define WM8400_GPI7_ENA 0x0001 /* GPI7_ENA */
+#define WM8400_GPI7_ENA_MASK 0x0001 /* GPI7_ENA */
+#define WM8400_GPI7_ENA_SHIFT 0 /* GPI7_ENA */
+#define WM8400_GPI7_ENA_WIDTH 1 /* GPI7_ENA */
+
+/*
+ * R23 (0x17) - GPIO_POL
+ */
+#define WM8400_IRQ_INV 0x1000 /* IRQ_INV */
+#define WM8400_IRQ_INV_MASK 0x1000 /* IRQ_INV */
+#define WM8400_IRQ_INV_SHIFT 12 /* IRQ_INV */
+#define WM8400_IRQ_INV_WIDTH 1 /* IRQ_INV */
+#define WM8400_TEMPOK_POL 0x0800 /* TEMPOK_POL */
+#define WM8400_TEMPOK_POL_MASK 0x0800 /* TEMPOK_POL */
+#define WM8400_TEMPOK_POL_SHIFT 11 /* TEMPOK_POL */
+#define WM8400_TEMPOK_POL_WIDTH 1 /* TEMPOK_POL */
+#define WM8400_MIC1SHRT_POL 0x0400 /* MIC1SHRT_POL */
+#define WM8400_MIC1SHRT_POL_MASK 0x0400 /* MIC1SHRT_POL */
+#define WM8400_MIC1SHRT_POL_SHIFT 10 /* MIC1SHRT_POL */
+#define WM8400_MIC1SHRT_POL_WIDTH 1 /* MIC1SHRT_POL */
+#define WM8400_MIC1DET_POL 0x0200 /* MIC1DET_POL */
+#define WM8400_MIC1DET_POL_MASK 0x0200 /* MIC1DET_POL */
+#define WM8400_MIC1DET_POL_SHIFT 9 /* MIC1DET_POL */
+#define WM8400_MIC1DET_POL_WIDTH 1 /* MIC1DET_POL */
+#define WM8400_FLL_LCK_POL 0x0100 /* FLL_LCK_POL */
+#define WM8400_FLL_LCK_POL_MASK 0x0100 /* FLL_LCK_POL */
+#define WM8400_FLL_LCK_POL_SHIFT 8 /* FLL_LCK_POL */
+#define WM8400_FLL_LCK_POL_WIDTH 1 /* FLL_LCK_POL */
+#define WM8400_GPIO_POL_MASK 0x00FF /* GPIO_POL - [7:0] */
+#define WM8400_GPIO_POL_SHIFT 0 /* GPIO_POL - [7:0] */
+#define WM8400_GPIO_POL_WIDTH 8 /* GPIO_POL - [7:0] */
+
+/*
+ * R65 (0x41) - LDO 1 Control
+ */
+#define WM8400_LDO1_ENA 0x8000 /* LDO1_ENA */
+#define WM8400_LDO1_ENA_MASK 0x8000 /* LDO1_ENA */
+#define WM8400_LDO1_ENA_SHIFT 15 /* LDO1_ENA */
+#define WM8400_LDO1_ENA_WIDTH 1 /* LDO1_ENA */
+#define WM8400_LDO1_SWI 0x4000 /* LDO1_SWI */
+#define WM8400_LDO1_SWI_MASK 0x4000 /* LDO1_SWI */
+#define WM8400_LDO1_SWI_SHIFT 14 /* LDO1_SWI */
+#define WM8400_LDO1_SWI_WIDTH 1 /* LDO1_SWI */
+#define WM8400_LDO1_OPFLT 0x1000 /* LDO1_OPFLT */
+#define WM8400_LDO1_OPFLT_MASK 0x1000 /* LDO1_OPFLT */
+#define WM8400_LDO1_OPFLT_SHIFT 12 /* LDO1_OPFLT */
+#define WM8400_LDO1_OPFLT_WIDTH 1 /* LDO1_OPFLT */
+#define WM8400_LDO1_ERRACT 0x0800 /* LDO1_ERRACT */
+#define WM8400_LDO1_ERRACT_MASK 0x0800 /* LDO1_ERRACT */
+#define WM8400_LDO1_ERRACT_SHIFT 11 /* LDO1_ERRACT */
+#define WM8400_LDO1_ERRACT_WIDTH 1 /* LDO1_ERRACT */
+#define WM8400_LDO1_HIB_MODE 0x0400 /* LDO1_HIB_MODE */
+#define WM8400_LDO1_HIB_MODE_MASK 0x0400 /* LDO1_HIB_MODE */
+#define WM8400_LDO1_HIB_MODE_SHIFT 10 /* LDO1_HIB_MODE */
+#define WM8400_LDO1_HIB_MODE_WIDTH 1 /* LDO1_HIB_MODE */
+#define WM8400_LDO1_VIMG_MASK 0x03E0 /* LDO1_VIMG - [9:5] */
+#define WM8400_LDO1_VIMG_SHIFT 5 /* LDO1_VIMG - [9:5] */
+#define WM8400_LDO1_VIMG_WIDTH 5 /* LDO1_VIMG - [9:5] */
+#define WM8400_LDO1_VSEL_MASK 0x001F /* LDO1_VSEL - [4:0] */
+#define WM8400_LDO1_VSEL_SHIFT 0 /* LDO1_VSEL - [4:0] */
+#define WM8400_LDO1_VSEL_WIDTH 5 /* LDO1_VSEL - [4:0] */
+
+/*
+ * R66 (0x42) - LDO 2 Control
+ */
+#define WM8400_LDO2_ENA 0x8000 /* LDO2_ENA */
+#define WM8400_LDO2_ENA_MASK 0x8000 /* LDO2_ENA */
+#define WM8400_LDO2_ENA_SHIFT 15 /* LDO2_ENA */
+#define WM8400_LDO2_ENA_WIDTH 1 /* LDO2_ENA */
+#define WM8400_LDO2_SWI 0x4000 /* LDO2_SWI */
+#define WM8400_LDO2_SWI_MASK 0x4000 /* LDO2_SWI */
+#define WM8400_LDO2_SWI_SHIFT 14 /* LDO2_SWI */
+#define WM8400_LDO2_SWI_WIDTH 1 /* LDO2_SWI */
+#define WM8400_LDO2_OPFLT 0x1000 /* LDO2_OPFLT */
+#define WM8400_LDO2_OPFLT_MASK 0x1000 /* LDO2_OPFLT */
+#define WM8400_LDO2_OPFLT_SHIFT 12 /* LDO2_OPFLT */
+#define WM8400_LDO2_OPFLT_WIDTH 1 /* LDO2_OPFLT */
+#define WM8400_LDO2_ERRACT 0x0800 /* LDO2_ERRACT */
+#define WM8400_LDO2_ERRACT_MASK 0x0800 /* LDO2_ERRACT */
+#define WM8400_LDO2_ERRACT_SHIFT 11 /* LDO2_ERRACT */
+#define WM8400_LDO2_ERRACT_WIDTH 1 /* LDO2_ERRACT */
+#define WM8400_LDO2_HIB_MODE 0x0400 /* LDO2_HIB_MODE */
+#define WM8400_LDO2_HIB_MODE_MASK 0x0400 /* LDO2_HIB_MODE */
+#define WM8400_LDO2_HIB_MODE_SHIFT 10 /* LDO2_HIB_MODE */
+#define WM8400_LDO2_HIB_MODE_WIDTH 1 /* LDO2_HIB_MODE */
+#define WM8400_LDO2_VIMG_MASK 0x03E0 /* LDO2_VIMG - [9:5] */
+#define WM8400_LDO2_VIMG_SHIFT 5 /* LDO2_VIMG - [9:5] */
+#define WM8400_LDO2_VIMG_WIDTH 5 /* LDO2_VIMG - [9:5] */
+#define WM8400_LDO2_VSEL_MASK 0x001F /* LDO2_VSEL - [4:0] */
+#define WM8400_LDO2_VSEL_SHIFT 0 /* LDO2_VSEL - [4:0] */
+#define WM8400_LDO2_VSEL_WIDTH 5 /* LDO2_VSEL - [4:0] */
+
+/*
+ * R67 (0x43) - LDO 3 Control
+ */
+#define WM8400_LDO3_ENA 0x8000 /* LDO3_ENA */
+#define WM8400_LDO3_ENA_MASK 0x8000 /* LDO3_ENA */
+#define WM8400_LDO3_ENA_SHIFT 15 /* LDO3_ENA */
+#define WM8400_LDO3_ENA_WIDTH 1 /* LDO3_ENA */
+#define WM8400_LDO3_SWI 0x4000 /* LDO3_SWI */
+#define WM8400_LDO3_SWI_MASK 0x4000 /* LDO3_SWI */
+#define WM8400_LDO3_SWI_SHIFT 14 /* LDO3_SWI */
+#define WM8400_LDO3_SWI_WIDTH 1 /* LDO3_SWI */
+#define WM8400_LDO3_OPFLT 0x1000 /* LDO3_OPFLT */
+#define WM8400_LDO3_OPFLT_MASK 0x1000 /* LDO3_OPFLT */
+#define WM8400_LDO3_OPFLT_SHIFT 12 /* LDO3_OPFLT */
+#define WM8400_LDO3_OPFLT_WIDTH 1 /* LDO3_OPFLT */
+#define WM8400_LDO3_ERRACT 0x0800 /* LDO3_ERRACT */
+#define WM8400_LDO3_ERRACT_MASK 0x0800 /* LDO3_ERRACT */
+#define WM8400_LDO3_ERRACT_SHIFT 11 /* LDO3_ERRACT */
+#define WM8400_LDO3_ERRACT_WIDTH 1 /* LDO3_ERRACT */
+#define WM8400_LDO3_HIB_MODE 0x0400 /* LDO3_HIB_MODE */
+#define WM8400_LDO3_HIB_MODE_MASK 0x0400 /* LDO3_HIB_MODE */
+#define WM8400_LDO3_HIB_MODE_SHIFT 10 /* LDO3_HIB_MODE */
+#define WM8400_LDO3_HIB_MODE_WIDTH 1 /* LDO3_HIB_MODE */
+#define WM8400_LDO3_VIMG_MASK 0x03E0 /* LDO3_VIMG - [9:5] */
+#define WM8400_LDO3_VIMG_SHIFT 5 /* LDO3_VIMG - [9:5] */
+#define WM8400_LDO3_VIMG_WIDTH 5 /* LDO3_VIMG - [9:5] */
+#define WM8400_LDO3_VSEL_MASK 0x001F /* LDO3_VSEL - [4:0] */
+#define WM8400_LDO3_VSEL_SHIFT 0 /* LDO3_VSEL - [4:0] */
+#define WM8400_LDO3_VSEL_WIDTH 5 /* LDO3_VSEL - [4:0] */
+
+/*
+ * R68 (0x44) - LDO 4 Control
+ */
+#define WM8400_LDO4_ENA 0x8000 /* LDO4_ENA */
+#define WM8400_LDO4_ENA_MASK 0x8000 /* LDO4_ENA */
+#define WM8400_LDO4_ENA_SHIFT 15 /* LDO4_ENA */
+#define WM8400_LDO4_ENA_WIDTH 1 /* LDO4_ENA */
+#define WM8400_LDO4_SWI 0x4000 /* LDO4_SWI */
+#define WM8400_LDO4_SWI_MASK 0x4000 /* LDO4_SWI */
+#define WM8400_LDO4_SWI_SHIFT 14 /* LDO4_SWI */
+#define WM8400_LDO4_SWI_WIDTH 1 /* LDO4_SWI */
+#define WM8400_LDO4_OPFLT 0x1000 /* LDO4_OPFLT */
+#define WM8400_LDO4_OPFLT_MASK 0x1000 /* LDO4_OPFLT */
+#define WM8400_LDO4_OPFLT_SHIFT 12 /* LDO4_OPFLT */
+#define WM8400_LDO4_OPFLT_WIDTH 1 /* LDO4_OPFLT */
+#define WM8400_LDO4_ERRACT 0x0800 /* LDO4_ERRACT */
+#define WM8400_LDO4_ERRACT_MASK 0x0800 /* LDO4_ERRACT */
+#define WM8400_LDO4_ERRACT_SHIFT 11 /* LDO4_ERRACT */
+#define WM8400_LDO4_ERRACT_WIDTH 1 /* LDO4_ERRACT */
+#define WM8400_LDO4_HIB_MODE 0x0400 /* LDO4_HIB_MODE */
+#define WM8400_LDO4_HIB_MODE_MASK 0x0400 /* LDO4_HIB_MODE */
+#define WM8400_LDO4_HIB_MODE_SHIFT 10 /* LDO4_HIB_MODE */
+#define WM8400_LDO4_HIB_MODE_WIDTH 1 /* LDO4_HIB_MODE */
+#define WM8400_LDO4_VIMG_MASK 0x03E0 /* LDO4_VIMG - [9:5] */
+#define WM8400_LDO4_VIMG_SHIFT 5 /* LDO4_VIMG - [9:5] */
+#define WM8400_LDO4_VIMG_WIDTH 5 /* LDO4_VIMG - [9:5] */
+#define WM8400_LDO4_VSEL_MASK 0x001F /* LDO4_VSEL - [4:0] */
+#define WM8400_LDO4_VSEL_SHIFT 0 /* LDO4_VSEL - [4:0] */
+#define WM8400_LDO4_VSEL_WIDTH 5 /* LDO4_VSEL - [4:0] */
+
+/*
+ * R70 (0x46) - DCDC1 Control 1
+ */
+#define WM8400_DC1_ENA 0x8000 /* DC1_ENA */
+#define WM8400_DC1_ENA_MASK 0x8000 /* DC1_ENA */
+#define WM8400_DC1_ENA_SHIFT 15 /* DC1_ENA */
+#define WM8400_DC1_ENA_WIDTH 1 /* DC1_ENA */
+#define WM8400_DC1_ACTIVE 0x4000 /* DC1_ACTIVE */
+#define WM8400_DC1_ACTIVE_MASK 0x4000 /* DC1_ACTIVE */
+#define WM8400_DC1_ACTIVE_SHIFT 14 /* DC1_ACTIVE */
+#define WM8400_DC1_ACTIVE_WIDTH 1 /* DC1_ACTIVE */
+#define WM8400_DC1_SLEEP 0x2000 /* DC1_SLEEP */
+#define WM8400_DC1_SLEEP_MASK 0x2000 /* DC1_SLEEP */
+#define WM8400_DC1_SLEEP_SHIFT 13 /* DC1_SLEEP */
+#define WM8400_DC1_SLEEP_WIDTH 1 /* DC1_SLEEP */
+#define WM8400_DC1_OPFLT 0x1000 /* DC1_OPFLT */
+#define WM8400_DC1_OPFLT_MASK 0x1000 /* DC1_OPFLT */
+#define WM8400_DC1_OPFLT_SHIFT 12 /* DC1_OPFLT */
+#define WM8400_DC1_OPFLT_WIDTH 1 /* DC1_OPFLT */
+#define WM8400_DC1_ERRACT 0x0800 /* DC1_ERRACT */
+#define WM8400_DC1_ERRACT_MASK 0x0800 /* DC1_ERRACT */
+#define WM8400_DC1_ERRACT_SHIFT 11 /* DC1_ERRACT */
+#define WM8400_DC1_ERRACT_WIDTH 1 /* DC1_ERRACT */
+#define WM8400_DC1_HIB_MODE 0x0400 /* DC1_HIB_MODE */
+#define WM8400_DC1_HIB_MODE_MASK 0x0400 /* DC1_HIB_MODE */
+#define WM8400_DC1_HIB_MODE_SHIFT 10 /* DC1_HIB_MODE */
+#define WM8400_DC1_HIB_MODE_WIDTH 1 /* DC1_HIB_MODE */
+#define WM8400_DC1_SOFTST_MASK 0x0300 /* DC1_SOFTST - [9:8] */
+#define WM8400_DC1_SOFTST_SHIFT 8 /* DC1_SOFTST - [9:8] */
+#define WM8400_DC1_SOFTST_WIDTH 2 /* DC1_SOFTST - [9:8] */
+#define WM8400_DC1_OV_PROT 0x0080 /* DC1_OV_PROT */
+#define WM8400_DC1_OV_PROT_MASK 0x0080 /* DC1_OV_PROT */
+#define WM8400_DC1_OV_PROT_SHIFT 7 /* DC1_OV_PROT */
+#define WM8400_DC1_OV_PROT_WIDTH 1 /* DC1_OV_PROT */
+#define WM8400_DC1_VSEL_MASK 0x007F /* DC1_VSEL - [6:0] */
+#define WM8400_DC1_VSEL_SHIFT 0 /* DC1_VSEL - [6:0] */
+#define WM8400_DC1_VSEL_WIDTH 7 /* DC1_VSEL - [6:0] */
+
+/*
+ * R71 (0x47) - DCDC1 Control 2
+ */
+#define WM8400_DC1_FRC_PWM 0x2000 /* DC1_FRC_PWM */
+#define WM8400_DC1_FRC_PWM_MASK 0x2000 /* DC1_FRC_PWM */
+#define WM8400_DC1_FRC_PWM_SHIFT 13 /* DC1_FRC_PWM */
+#define WM8400_DC1_FRC_PWM_WIDTH 1 /* DC1_FRC_PWM */
+#define WM8400_DC1_STBY_LIM_MASK 0x0300 /* DC1_STBY_LIM - [9:8] */
+#define WM8400_DC1_STBY_LIM_SHIFT 8 /* DC1_STBY_LIM - [9:8] */
+#define WM8400_DC1_STBY_LIM_WIDTH 2 /* DC1_STBY_LIM - [9:8] */
+#define WM8400_DC1_ACT_LIM 0x0080 /* DC1_ACT_LIM */
+#define WM8400_DC1_ACT_LIM_MASK 0x0080 /* DC1_ACT_LIM */
+#define WM8400_DC1_ACT_LIM_SHIFT 7 /* DC1_ACT_LIM */
+#define WM8400_DC1_ACT_LIM_WIDTH 1 /* DC1_ACT_LIM */
+#define WM8400_DC1_VIMG_MASK 0x007F /* DC1_VIMG - [6:0] */
+#define WM8400_DC1_VIMG_SHIFT 0 /* DC1_VIMG - [6:0] */
+#define WM8400_DC1_VIMG_WIDTH 7 /* DC1_VIMG - [6:0] */
+
+/*
+ * R72 (0x48) - DCDC2 Control 1
+ */
+#define WM8400_DC2_ENA 0x8000 /* DC2_ENA */
+#define WM8400_DC2_ENA_MASK 0x8000 /* DC2_ENA */
+#define WM8400_DC2_ENA_SHIFT 15 /* DC2_ENA */
+#define WM8400_DC2_ENA_WIDTH 1 /* DC2_ENA */
+#define WM8400_DC2_ACTIVE 0x4000 /* DC2_ACTIVE */
+#define WM8400_DC2_ACTIVE_MASK 0x4000 /* DC2_ACTIVE */
+#define WM8400_DC2_ACTIVE_SHIFT 14 /* DC2_ACTIVE */
+#define WM8400_DC2_ACTIVE_WIDTH 1 /* DC2_ACTIVE */
+#define WM8400_DC2_SLEEP 0x2000 /* DC2_SLEEP */
+#define WM8400_DC2_SLEEP_MASK 0x2000 /* DC2_SLEEP */
+#define WM8400_DC2_SLEEP_SHIFT 13 /* DC2_SLEEP */
+#define WM8400_DC2_SLEEP_WIDTH 1 /* DC2_SLEEP */
+#define WM8400_DC2_OPFLT 0x1000 /* DC2_OPFLT */
+#define WM8400_DC2_OPFLT_MASK 0x1000 /* DC2_OPFLT */
+#define WM8400_DC2_OPFLT_SHIFT 12 /* DC2_OPFLT */
+#define WM8400_DC2_OPFLT_WIDTH 1 /* DC2_OPFLT */
+#define WM8400_DC2_ERRACT 0x0800 /* DC2_ERRACT */
+#define WM8400_DC2_ERRACT_MASK 0x0800 /* DC2_ERRACT */
+#define WM8400_DC2_ERRACT_SHIFT 11 /* DC2_ERRACT */
+#define WM8400_DC2_ERRACT_WIDTH 1 /* DC2_ERRACT */
+#define WM8400_DC2_HIB_MODE 0x0400 /* DC2_HIB_MODE */
+#define WM8400_DC2_HIB_MODE_MASK 0x0400 /* DC2_HIB_MODE */
+#define WM8400_DC2_HIB_MODE_SHIFT 10 /* DC2_HIB_MODE */
+#define WM8400_DC2_HIB_MODE_WIDTH 1 /* DC2_HIB_MODE */
+#define WM8400_DC2_SOFTST_MASK 0x0300 /* DC2_SOFTST - [9:8] */
+#define WM8400_DC2_SOFTST_SHIFT 8 /* DC2_SOFTST - [9:8] */
+#define WM8400_DC2_SOFTST_WIDTH 2 /* DC2_SOFTST - [9:8] */
+#define WM8400_DC2_OV_PROT 0x0080 /* DC2_OV_PROT */
+#define WM8400_DC2_OV_PROT_MASK 0x0080 /* DC2_OV_PROT */
+#define WM8400_DC2_OV_PROT_SHIFT 7 /* DC2_OV_PROT */
+#define WM8400_DC2_OV_PROT_WIDTH 1 /* DC2_OV_PROT */
+#define WM8400_DC2_VSEL_MASK 0x007F /* DC2_VSEL - [6:0] */
+#define WM8400_DC2_VSEL_SHIFT 0 /* DC2_VSEL - [6:0] */
+#define WM8400_DC2_VSEL_WIDTH 7 /* DC2_VSEL - [6:0] */
+
+/*
+ * R73 (0x49) - DCDC2 Control 2
+ */
+#define WM8400_DC2_FRC_PWM 0x2000 /* DC2_FRC_PWM */
+#define WM8400_DC2_FRC_PWM_MASK 0x2000 /* DC2_FRC_PWM */
+#define WM8400_DC2_FRC_PWM_SHIFT 13 /* DC2_FRC_PWM */
+#define WM8400_DC2_FRC_PWM_WIDTH 1 /* DC2_FRC_PWM */
+#define WM8400_DC2_STBY_LIM_MASK 0x0300 /* DC2_STBY_LIM - [9:8] */
+#define WM8400_DC2_STBY_LIM_SHIFT 8 /* DC2_STBY_LIM - [9:8] */
+#define WM8400_DC2_STBY_LIM_WIDTH 2 /* DC2_STBY_LIM - [9:8] */
+#define WM8400_DC2_ACT_LIM 0x0080 /* DC2_ACT_LIM */
+#define WM8400_DC2_ACT_LIM_MASK 0x0080 /* DC2_ACT_LIM */
+#define WM8400_DC2_ACT_LIM_SHIFT 7 /* DC2_ACT_LIM */
+#define WM8400_DC2_ACT_LIM_WIDTH 1 /* DC2_ACT_LIM */
+#define WM8400_DC2_VIMG_MASK 0x007F /* DC2_VIMG - [6:0] */
+#define WM8400_DC2_VIMG_SHIFT 0 /* DC2_VIMG - [6:0] */
+#define WM8400_DC2_VIMG_WIDTH 7 /* DC2_VIMG - [6:0] */
+
+/*
+ * R75 (0x4B) - Interface
+ */
+#define WM8400_AUTOINC 0x0008 /* AUTOINC */
+#define WM8400_AUTOINC_MASK 0x0008 /* AUTOINC */
+#define WM8400_AUTOINC_SHIFT 3 /* AUTOINC */
+#define WM8400_AUTOINC_WIDTH 1 /* AUTOINC */
+#define WM8400_ARA_ENA 0x0004 /* ARA_ENA */
+#define WM8400_ARA_ENA_MASK 0x0004 /* ARA_ENA */
+#define WM8400_ARA_ENA_SHIFT 2 /* ARA_ENA */
+#define WM8400_ARA_ENA_WIDTH 1 /* ARA_ENA */
+#define WM8400_SPI_CFG 0x0002 /* SPI_CFG */
+#define WM8400_SPI_CFG_MASK 0x0002 /* SPI_CFG */
+#define WM8400_SPI_CFG_SHIFT 1 /* SPI_CFG */
+#define WM8400_SPI_CFG_WIDTH 1 /* SPI_CFG */
+
+/*
+ * R76 (0x4C) - PM GENERAL
+ */
+#define WM8400_CODEC_SOFTST 0x8000 /* CODEC_SOFTST */
+#define WM8400_CODEC_SOFTST_MASK 0x8000 /* CODEC_SOFTST */
+#define WM8400_CODEC_SOFTST_SHIFT 15 /* CODEC_SOFTST */
+#define WM8400_CODEC_SOFTST_WIDTH 1 /* CODEC_SOFTST */
+#define WM8400_CODEC_SOFTSD 0x4000 /* CODEC_SOFTSD */
+#define WM8400_CODEC_SOFTSD_MASK 0x4000 /* CODEC_SOFTSD */
+#define WM8400_CODEC_SOFTSD_SHIFT 14 /* CODEC_SOFTSD */
+#define WM8400_CODEC_SOFTSD_WIDTH 1 /* CODEC_SOFTSD */
+#define WM8400_CHIP_SOFTSD 0x2000 /* CHIP_SOFTSD */
+#define WM8400_CHIP_SOFTSD_MASK 0x2000 /* CHIP_SOFTSD */
+#define WM8400_CHIP_SOFTSD_SHIFT 13 /* CHIP_SOFTSD */
+#define WM8400_CHIP_SOFTSD_WIDTH 1 /* CHIP_SOFTSD */
+#define WM8400_DSLEEP1_POL 0x0008 /* DSLEEP1_POL */
+#define WM8400_DSLEEP1_POL_MASK 0x0008 /* DSLEEP1_POL */
+#define WM8400_DSLEEP1_POL_SHIFT 3 /* DSLEEP1_POL */
+#define WM8400_DSLEEP1_POL_WIDTH 1 /* DSLEEP1_POL */
+#define WM8400_DSLEEP2_POL 0x0004 /* DSLEEP2_POL */
+#define WM8400_DSLEEP2_POL_MASK 0x0004 /* DSLEEP2_POL */
+#define WM8400_DSLEEP2_POL_SHIFT 2 /* DSLEEP2_POL */
+#define WM8400_DSLEEP2_POL_WIDTH 1 /* DSLEEP2_POL */
+#define WM8400_PWR_STATE_MASK 0x0003 /* PWR_STATE - [1:0] */
+#define WM8400_PWR_STATE_SHIFT 0 /* PWR_STATE - [1:0] */
+#define WM8400_PWR_STATE_WIDTH 2 /* PWR_STATE - [1:0] */
+
+/*
+ * R78 (0x4E) - PM Shutdown Control
+ */
+#define WM8400_CHIP_GT150_ERRACT 0x0200 /* CHIP_GT150_ERRACT */
+#define WM8400_CHIP_GT150_ERRACT_MASK 0x0200 /* CHIP_GT150_ERRACT */
+#define WM8400_CHIP_GT150_ERRACT_SHIFT 9 /* CHIP_GT150_ERRACT */
+#define WM8400_CHIP_GT150_ERRACT_WIDTH 1 /* CHIP_GT150_ERRACT */
+#define WM8400_CHIP_GT115_ERRACT 0x0100 /* CHIP_GT115_ERRACT */
+#define WM8400_CHIP_GT115_ERRACT_MASK 0x0100 /* CHIP_GT115_ERRACT */
+#define WM8400_CHIP_GT115_ERRACT_SHIFT 8 /* CHIP_GT115_ERRACT */
+#define WM8400_CHIP_GT115_ERRACT_WIDTH 1 /* CHIP_GT115_ERRACT */
+#define WM8400_LINE_CMP_ERRACT 0x0080 /* LINE_CMP_ERRACT */
+#define WM8400_LINE_CMP_ERRACT_MASK 0x0080 /* LINE_CMP_ERRACT */
+#define WM8400_LINE_CMP_ERRACT_SHIFT 7 /* LINE_CMP_ERRACT */
+#define WM8400_LINE_CMP_ERRACT_WIDTH 1 /* LINE_CMP_ERRACT */
+#define WM8400_UVLO_ERRACT 0x0040 /* UVLO_ERRACT */
+#define WM8400_UVLO_ERRACT_MASK 0x0040 /* UVLO_ERRACT */
+#define WM8400_UVLO_ERRACT_SHIFT 6 /* UVLO_ERRACT */
+#define WM8400_UVLO_ERRACT_WIDTH 1 /* UVLO_ERRACT */
+
+/*
+ * R79 (0x4F) - Interrupt Status 1
+ */
+#define WM8400_MICD_CINT 0x8000 /* MICD_CINT */
+#define WM8400_MICD_CINT_MASK 0x8000 /* MICD_CINT */
+#define WM8400_MICD_CINT_SHIFT 15 /* MICD_CINT */
+#define WM8400_MICD_CINT_WIDTH 1 /* MICD_CINT */
+#define WM8400_MICSCD_CINT 0x4000 /* MICSCD_CINT */
+#define WM8400_MICSCD_CINT_MASK 0x4000 /* MICSCD_CINT */
+#define WM8400_MICSCD_CINT_SHIFT 14 /* MICSCD_CINT */
+#define WM8400_MICSCD_CINT_WIDTH 1 /* MICSCD_CINT */
+#define WM8400_JDL_CINT 0x2000 /* JDL_CINT */
+#define WM8400_JDL_CINT_MASK 0x2000 /* JDL_CINT */
+#define WM8400_JDL_CINT_SHIFT 13 /* JDL_CINT */
+#define WM8400_JDL_CINT_WIDTH 1 /* JDL_CINT */
+#define WM8400_JDR_CINT 0x1000 /* JDR_CINT */
+#define WM8400_JDR_CINT_MASK 0x1000 /* JDR_CINT */
+#define WM8400_JDR_CINT_SHIFT 12 /* JDR_CINT */
+#define WM8400_JDR_CINT_WIDTH 1 /* JDR_CINT */
+#define WM8400_CODEC_SEQ_END_EINT 0x0800 /* CODEC_SEQ_END_EINT */
+#define WM8400_CODEC_SEQ_END_EINT_MASK 0x0800 /* CODEC_SEQ_END_EINT */
+#define WM8400_CODEC_SEQ_END_EINT_SHIFT 11 /* CODEC_SEQ_END_EINT */
+#define WM8400_CODEC_SEQ_END_EINT_WIDTH 1 /* CODEC_SEQ_END_EINT */
+#define WM8400_CDEL_TO_EINT 0x0400 /* CDEL_TO_EINT */
+#define WM8400_CDEL_TO_EINT_MASK 0x0400 /* CDEL_TO_EINT */
+#define WM8400_CDEL_TO_EINT_SHIFT 10 /* CDEL_TO_EINT */
+#define WM8400_CDEL_TO_EINT_WIDTH 1 /* CDEL_TO_EINT */
+#define WM8400_CHIP_GT150_EINT 0x0200 /* CHIP_GT150_EINT */
+#define WM8400_CHIP_GT150_EINT_MASK 0x0200 /* CHIP_GT150_EINT */
+#define WM8400_CHIP_GT150_EINT_SHIFT 9 /* CHIP_GT150_EINT */
+#define WM8400_CHIP_GT150_EINT_WIDTH 1 /* CHIP_GT150_EINT */
+#define WM8400_CHIP_GT115_EINT 0x0100 /* CHIP_GT115_EINT */
+#define WM8400_CHIP_GT115_EINT_MASK 0x0100 /* CHIP_GT115_EINT */
+#define WM8400_CHIP_GT115_EINT_SHIFT 8 /* CHIP_GT115_EINT */
+#define WM8400_CHIP_GT115_EINT_WIDTH 1 /* CHIP_GT115_EINT */
+#define WM8400_LINE_CMP_EINT 0x0080 /* LINE_CMP_EINT */
+#define WM8400_LINE_CMP_EINT_MASK 0x0080 /* LINE_CMP_EINT */
+#define WM8400_LINE_CMP_EINT_SHIFT 7 /* LINE_CMP_EINT */
+#define WM8400_LINE_CMP_EINT_WIDTH 1 /* LINE_CMP_EINT */
+#define WM8400_UVLO_EINT 0x0040 /* UVLO_EINT */
+#define WM8400_UVLO_EINT_MASK 0x0040 /* UVLO_EINT */
+#define WM8400_UVLO_EINT_SHIFT 6 /* UVLO_EINT */
+#define WM8400_UVLO_EINT_WIDTH 1 /* UVLO_EINT */
+#define WM8400_DC2_UV_EINT 0x0020 /* DC2_UV_EINT */
+#define WM8400_DC2_UV_EINT_MASK 0x0020 /* DC2_UV_EINT */
+#define WM8400_DC2_UV_EINT_SHIFT 5 /* DC2_UV_EINT */
+#define WM8400_DC2_UV_EINT_WIDTH 1 /* DC2_UV_EINT */
+#define WM8400_DC1_UV_EINT 0x0010 /* DC1_UV_EINT */
+#define WM8400_DC1_UV_EINT_MASK 0x0010 /* DC1_UV_EINT */
+#define WM8400_DC1_UV_EINT_SHIFT 4 /* DC1_UV_EINT */
+#define WM8400_DC1_UV_EINT_WIDTH 1 /* DC1_UV_EINT */
+#define WM8400_LDO4_UV_EINT 0x0008 /* LDO4_UV_EINT */
+#define WM8400_LDO4_UV_EINT_MASK 0x0008 /* LDO4_UV_EINT */
+#define WM8400_LDO4_UV_EINT_SHIFT 3 /* LDO4_UV_EINT */
+#define WM8400_LDO4_UV_EINT_WIDTH 1 /* LDO4_UV_EINT */
+#define WM8400_LDO3_UV_EINT 0x0004 /* LDO3_UV_EINT */
+#define WM8400_LDO3_UV_EINT_MASK 0x0004 /* LDO3_UV_EINT */
+#define WM8400_LDO3_UV_EINT_SHIFT 2 /* LDO3_UV_EINT */
+#define WM8400_LDO3_UV_EINT_WIDTH 1 /* LDO3_UV_EINT */
+#define WM8400_LDO2_UV_EINT 0x0002 /* LDO2_UV_EINT */
+#define WM8400_LDO2_UV_EINT_MASK 0x0002 /* LDO2_UV_EINT */
+#define WM8400_LDO2_UV_EINT_SHIFT 1 /* LDO2_UV_EINT */
+#define WM8400_LDO2_UV_EINT_WIDTH 1 /* LDO2_UV_EINT */
+#define WM8400_LDO1_UV_EINT 0x0001 /* LDO1_UV_EINT */
+#define WM8400_LDO1_UV_EINT_MASK 0x0001 /* LDO1_UV_EINT */
+#define WM8400_LDO1_UV_EINT_SHIFT 0 /* LDO1_UV_EINT */
+#define WM8400_LDO1_UV_EINT_WIDTH 1 /* LDO1_UV_EINT */
+
+/*
+ * R80 (0x50) - Interrupt Status 1 Mask
+ */
+#define WM8400_IM_MICD_CINT 0x8000 /* IM_MICD_CINT */
+#define WM8400_IM_MICD_CINT_MASK 0x8000 /* IM_MICD_CINT */
+#define WM8400_IM_MICD_CINT_SHIFT 15 /* IM_MICD_CINT */
+#define WM8400_IM_MICD_CINT_WIDTH 1 /* IM_MICD_CINT */
+#define WM8400_IM_MICSCD_CINT 0x4000 /* IM_MICSCD_CINT */
+#define WM8400_IM_MICSCD_CINT_MASK 0x4000 /* IM_MICSCD_CINT */
+#define WM8400_IM_MICSCD_CINT_SHIFT 14 /* IM_MICSCD_CINT */
+#define WM8400_IM_MICSCD_CINT_WIDTH 1 /* IM_MICSCD_CINT */
+#define WM8400_IM_JDL_CINT 0x2000 /* IM_JDL_CINT */
+#define WM8400_IM_JDL_CINT_MASK 0x2000 /* IM_JDL_CINT */
+#define WM8400_IM_JDL_CINT_SHIFT 13 /* IM_JDL_CINT */
+#define WM8400_IM_JDL_CINT_WIDTH 1 /* IM_JDL_CINT */
+#define WM8400_IM_JDR_CINT 0x1000 /* IM_JDR_CINT */
+#define WM8400_IM_JDR_CINT_MASK 0x1000 /* IM_JDR_CINT */
+#define WM8400_IM_JDR_CINT_SHIFT 12 /* IM_JDR_CINT */
+#define WM8400_IM_JDR_CINT_WIDTH 1 /* IM_JDR_CINT */
+#define WM8400_IM_CODEC_SEQ_END_EINT 0x0800 /* IM_CODEC_SEQ_END_EINT */
+#define WM8400_IM_CODEC_SEQ_END_EINT_MASK 0x0800 /* IM_CODEC_SEQ_END_EINT */
+#define WM8400_IM_CODEC_SEQ_END_EINT_SHIFT 11 /* IM_CODEC_SEQ_END_EINT */
+#define WM8400_IM_CODEC_SEQ_END_EINT_WIDTH 1 /* IM_CODEC_SEQ_END_EINT */
+#define WM8400_IM_CDEL_TO_EINT 0x0400 /* IM_CDEL_TO_EINT */
+#define WM8400_IM_CDEL_TO_EINT_MASK 0x0400 /* IM_CDEL_TO_EINT */
+#define WM8400_IM_CDEL_TO_EINT_SHIFT 10 /* IM_CDEL_TO_EINT */
+#define WM8400_IM_CDEL_TO_EINT_WIDTH 1 /* IM_CDEL_TO_EINT */
+#define WM8400_IM_CHIP_GT150_EINT 0x0200 /* IM_CHIP_GT150_EINT */
+#define WM8400_IM_CHIP_GT150_EINT_MASK 0x0200 /* IM_CHIP_GT150_EINT */
+#define WM8400_IM_CHIP_GT150_EINT_SHIFT 9 /* IM_CHIP_GT150_EINT */
+#define WM8400_IM_CHIP_GT150_EINT_WIDTH 1 /* IM_CHIP_GT150_EINT */
+#define WM8400_IM_CHIP_GT115_EINT 0x0100 /* IM_CHIP_GT115_EINT */
+#define WM8400_IM_CHIP_GT115_EINT_MASK 0x0100 /* IM_CHIP_GT115_EINT */
+#define WM8400_IM_CHIP_GT115_EINT_SHIFT 8 /* IM_CHIP_GT115_EINT */
+#define WM8400_IM_CHIP_GT115_EINT_WIDTH 1 /* IM_CHIP_GT115_EINT */
+#define WM8400_IM_LINE_CMP_EINT 0x0080 /* IM_LINE_CMP_EINT */
+#define WM8400_IM_LINE_CMP_EINT_MASK 0x0080 /* IM_LINE_CMP_EINT */
+#define WM8400_IM_LINE_CMP_EINT_SHIFT 7 /* IM_LINE_CMP_EINT */
+#define WM8400_IM_LINE_CMP_EINT_WIDTH 1 /* IM_LINE_CMP_EINT */
+#define WM8400_IM_UVLO_EINT 0x0040 /* IM_UVLO_EINT */
+#define WM8400_IM_UVLO_EINT_MASK 0x0040 /* IM_UVLO_EINT */
+#define WM8400_IM_UVLO_EINT_SHIFT 6 /* IM_UVLO_EINT */
+#define WM8400_IM_UVLO_EINT_WIDTH 1 /* IM_UVLO_EINT */
+#define WM8400_IM_DC2_UV_EINT 0x0020 /* IM_DC2_UV_EINT */
+#define WM8400_IM_DC2_UV_EINT_MASK 0x0020 /* IM_DC2_UV_EINT */
+#define WM8400_IM_DC2_UV_EINT_SHIFT 5 /* IM_DC2_UV_EINT */
+#define WM8400_IM_DC2_UV_EINT_WIDTH 1 /* IM_DC2_UV_EINT */
+#define WM8400_IM_DC1_UV_EINT 0x0010 /* IM_DC1_UV_EINT */
+#define WM8400_IM_DC1_UV_EINT_MASK 0x0010 /* IM_DC1_UV_EINT */
+#define WM8400_IM_DC1_UV_EINT_SHIFT 4 /* IM_DC1_UV_EINT */
+#define WM8400_IM_DC1_UV_EINT_WIDTH 1 /* IM_DC1_UV_EINT */
+#define WM8400_IM_LDO4_UV_EINT 0x0008 /* IM_LDO4_UV_EINT */
+#define WM8400_IM_LDO4_UV_EINT_MASK 0x0008 /* IM_LDO4_UV_EINT */
+#define WM8400_IM_LDO4_UV_EINT_SHIFT 3 /* IM_LDO4_UV_EINT */
+#define WM8400_IM_LDO4_UV_EINT_WIDTH 1 /* IM_LDO4_UV_EINT */
+#define WM8400_IM_LDO3_UV_EINT 0x0004 /* IM_LDO3_UV_EINT */
+#define WM8400_IM_LDO3_UV_EINT_MASK 0x0004 /* IM_LDO3_UV_EINT */
+#define WM8400_IM_LDO3_UV_EINT_SHIFT 2 /* IM_LDO3_UV_EINT */
+#define WM8400_IM_LDO3_UV_EINT_WIDTH 1 /* IM_LDO3_UV_EINT */
+#define WM8400_IM_LDO2_UV_EINT 0x0002 /* IM_LDO2_UV_EINT */
+#define WM8400_IM_LDO2_UV_EINT_MASK 0x0002 /* IM_LDO2_UV_EINT */
+#define WM8400_IM_LDO2_UV_EINT_SHIFT 1 /* IM_LDO2_UV_EINT */
+#define WM8400_IM_LDO2_UV_EINT_WIDTH 1 /* IM_LDO2_UV_EINT */
+#define WM8400_IM_LDO1_UV_EINT 0x0001 /* IM_LDO1_UV_EINT */
+#define WM8400_IM_LDO1_UV_EINT_MASK 0x0001 /* IM_LDO1_UV_EINT */
+#define WM8400_IM_LDO1_UV_EINT_SHIFT 0 /* IM_LDO1_UV_EINT */
+#define WM8400_IM_LDO1_UV_EINT_WIDTH 1 /* IM_LDO1_UV_EINT */
+
+/*
+ * R81 (0x51) - Interrupt Levels
+ */
+#define WM8400_MICD_LVL 0x8000 /* MICD_LVL */
+#define WM8400_MICD_LVL_MASK 0x8000 /* MICD_LVL */
+#define WM8400_MICD_LVL_SHIFT 15 /* MICD_LVL */
+#define WM8400_MICD_LVL_WIDTH 1 /* MICD_LVL */
+#define WM8400_MICSCD_LVL 0x4000 /* MICSCD_LVL */
+#define WM8400_MICSCD_LVL_MASK 0x4000 /* MICSCD_LVL */
+#define WM8400_MICSCD_LVL_SHIFT 14 /* MICSCD_LVL */
+#define WM8400_MICSCD_LVL_WIDTH 1 /* MICSCD_LVL */
+#define WM8400_JDL_LVL 0x2000 /* JDL_LVL */
+#define WM8400_JDL_LVL_MASK 0x2000 /* JDL_LVL */
+#define WM8400_JDL_LVL_SHIFT 13 /* JDL_LVL */
+#define WM8400_JDL_LVL_WIDTH 1 /* JDL_LVL */
+#define WM8400_JDR_LVL 0x1000 /* JDR_LVL */
+#define WM8400_JDR_LVL_MASK 0x1000 /* JDR_LVL */
+#define WM8400_JDR_LVL_SHIFT 12 /* JDR_LVL */
+#define WM8400_JDR_LVL_WIDTH 1 /* JDR_LVL */
+#define WM8400_CODEC_SEQ_END_LVL 0x0800 /* CODEC_SEQ_END_LVL */
+#define WM8400_CODEC_SEQ_END_LVL_MASK 0x0800 /* CODEC_SEQ_END_LVL */
+#define WM8400_CODEC_SEQ_END_LVL_SHIFT 11 /* CODEC_SEQ_END_LVL */
+#define WM8400_CODEC_SEQ_END_LVL_WIDTH 1 /* CODEC_SEQ_END_LVL */
+#define WM8400_CDEL_TO_LVL 0x0400 /* CDEL_TO_LVL */
+#define WM8400_CDEL_TO_LVL_MASK 0x0400 /* CDEL_TO_LVL */
+#define WM8400_CDEL_TO_LVL_SHIFT 10 /* CDEL_TO_LVL */
+#define WM8400_CDEL_TO_LVL_WIDTH 1 /* CDEL_TO_LVL */
+#define WM8400_CHIP_GT150_LVL 0x0200 /* CHIP_GT150_LVL */
+#define WM8400_CHIP_GT150_LVL_MASK 0x0200 /* CHIP_GT150_LVL */
+#define WM8400_CHIP_GT150_LVL_SHIFT 9 /* CHIP_GT150_LVL */
+#define WM8400_CHIP_GT150_LVL_WIDTH 1 /* CHIP_GT150_LVL */
+#define WM8400_CHIP_GT115_LVL 0x0100 /* CHIP_GT115_LVL */
+#define WM8400_CHIP_GT115_LVL_MASK 0x0100 /* CHIP_GT115_LVL */
+#define WM8400_CHIP_GT115_LVL_SHIFT 8 /* CHIP_GT115_LVL */
+#define WM8400_CHIP_GT115_LVL_WIDTH 1 /* CHIP_GT115_LVL */
+#define WM8400_LINE_CMP_LVL 0x0080 /* LINE_CMP_LVL */
+#define WM8400_LINE_CMP_LVL_MASK 0x0080 /* LINE_CMP_LVL */
+#define WM8400_LINE_CMP_LVL_SHIFT 7 /* LINE_CMP_LVL */
+#define WM8400_LINE_CMP_LVL_WIDTH 1 /* LINE_CMP_LVL */
+#define WM8400_UVLO_LVL 0x0040 /* UVLO_LVL */
+#define WM8400_UVLO_LVL_MASK 0x0040 /* UVLO_LVL */
+#define WM8400_UVLO_LVL_SHIFT 6 /* UVLO_LVL */
+#define WM8400_UVLO_LVL_WIDTH 1 /* UVLO_LVL */
+#define WM8400_DC2_UV_LVL 0x0020 /* DC2_UV_LVL */
+#define WM8400_DC2_UV_LVL_MASK 0x0020 /* DC2_UV_LVL */
+#define WM8400_DC2_UV_LVL_SHIFT 5 /* DC2_UV_LVL */
+#define WM8400_DC2_UV_LVL_WIDTH 1 /* DC2_UV_LVL */
+#define WM8400_DC1_UV_LVL 0x0010 /* DC1_UV_LVL */
+#define WM8400_DC1_UV_LVL_MASK 0x0010 /* DC1_UV_LVL */
+#define WM8400_DC1_UV_LVL_SHIFT 4 /* DC1_UV_LVL */
+#define WM8400_DC1_UV_LVL_WIDTH 1 /* DC1_UV_LVL */
+#define WM8400_LDO4_UV_LVL 0x0008 /* LDO4_UV_LVL */
+#define WM8400_LDO4_UV_LVL_MASK 0x0008 /* LDO4_UV_LVL */
+#define WM8400_LDO4_UV_LVL_SHIFT 3 /* LDO4_UV_LVL */
+#define WM8400_LDO4_UV_LVL_WIDTH 1 /* LDO4_UV_LVL */
+#define WM8400_LDO3_UV_LVL 0x0004 /* LDO3_UV_LVL */
+#define WM8400_LDO3_UV_LVL_MASK 0x0004 /* LDO3_UV_LVL */
+#define WM8400_LDO3_UV_LVL_SHIFT 2 /* LDO3_UV_LVL */
+#define WM8400_LDO3_UV_LVL_WIDTH 1 /* LDO3_UV_LVL */
+#define WM8400_LDO2_UV_LVL 0x0002 /* LDO2_UV_LVL */
+#define WM8400_LDO2_UV_LVL_MASK 0x0002 /* LDO2_UV_LVL */
+#define WM8400_LDO2_UV_LVL_SHIFT 1 /* LDO2_UV_LVL */
+#define WM8400_LDO2_UV_LVL_WIDTH 1 /* LDO2_UV_LVL */
+#define WM8400_LDO1_UV_LVL 0x0001 /* LDO1_UV_LVL */
+#define WM8400_LDO1_UV_LVL_MASK 0x0001 /* LDO1_UV_LVL */
+#define WM8400_LDO1_UV_LVL_SHIFT 0 /* LDO1_UV_LVL */
+#define WM8400_LDO1_UV_LVL_WIDTH 1 /* LDO1_UV_LVL */
+
+/*
+ * R82 (0x52) - Shutdown Reason
+ */
+#define WM8400_SDR_CHIP_SOFTSD 0x2000 /* SDR_CHIP_SOFTSD */
+#define WM8400_SDR_CHIP_SOFTSD_MASK 0x2000 /* SDR_CHIP_SOFTSD */
+#define WM8400_SDR_CHIP_SOFTSD_SHIFT 13 /* SDR_CHIP_SOFTSD */
+#define WM8400_SDR_CHIP_SOFTSD_WIDTH 1 /* SDR_CHIP_SOFTSD */
+#define WM8400_SDR_NPDN 0x0800 /* SDR_NPDN */
+#define WM8400_SDR_NPDN_MASK 0x0800 /* SDR_NPDN */
+#define WM8400_SDR_NPDN_SHIFT 11 /* SDR_NPDN */
+#define WM8400_SDR_NPDN_WIDTH 1 /* SDR_NPDN */
+#define WM8400_SDR_CHIP_GT150 0x0200 /* SDR_CHIP_GT150 */
+#define WM8400_SDR_CHIP_GT150_MASK 0x0200 /* SDR_CHIP_GT150 */
+#define WM8400_SDR_CHIP_GT150_SHIFT 9 /* SDR_CHIP_GT150 */
+#define WM8400_SDR_CHIP_GT150_WIDTH 1 /* SDR_CHIP_GT150 */
+#define WM8400_SDR_CHIP_GT115 0x0100 /* SDR_CHIP_GT115 */
+#define WM8400_SDR_CHIP_GT115_MASK 0x0100 /* SDR_CHIP_GT115 */
+#define WM8400_SDR_CHIP_GT115_SHIFT 8 /* SDR_CHIP_GT115 */
+#define WM8400_SDR_CHIP_GT115_WIDTH 1 /* SDR_CHIP_GT115 */
+#define WM8400_SDR_LINE_CMP 0x0080 /* SDR_LINE_CMP */
+#define WM8400_SDR_LINE_CMP_MASK 0x0080 /* SDR_LINE_CMP */
+#define WM8400_SDR_LINE_CMP_SHIFT 7 /* SDR_LINE_CMP */
+#define WM8400_SDR_LINE_CMP_WIDTH 1 /* SDR_LINE_CMP */
+#define WM8400_SDR_UVLO 0x0040 /* SDR_UVLO */
+#define WM8400_SDR_UVLO_MASK 0x0040 /* SDR_UVLO */
+#define WM8400_SDR_UVLO_SHIFT 6 /* SDR_UVLO */
+#define WM8400_SDR_UVLO_WIDTH 1 /* SDR_UVLO */
+#define WM8400_SDR_DC2_UV 0x0020 /* SDR_DC2_UV */
+#define WM8400_SDR_DC2_UV_MASK 0x0020 /* SDR_DC2_UV */
+#define WM8400_SDR_DC2_UV_SHIFT 5 /* SDR_DC2_UV */
+#define WM8400_SDR_DC2_UV_WIDTH 1 /* SDR_DC2_UV */
+#define WM8400_SDR_DC1_UV 0x0010 /* SDR_DC1_UV */
+#define WM8400_SDR_DC1_UV_MASK 0x0010 /* SDR_DC1_UV */
+#define WM8400_SDR_DC1_UV_SHIFT 4 /* SDR_DC1_UV */
+#define WM8400_SDR_DC1_UV_WIDTH 1 /* SDR_DC1_UV */
+#define WM8400_SDR_LDO4_UV 0x0008 /* SDR_LDO4_UV */
+#define WM8400_SDR_LDO4_UV_MASK 0x0008 /* SDR_LDO4_UV */
+#define WM8400_SDR_LDO4_UV_SHIFT 3 /* SDR_LDO4_UV */
+#define WM8400_SDR_LDO4_UV_WIDTH 1 /* SDR_LDO4_UV */
+#define WM8400_SDR_LDO3_UV 0x0004 /* SDR_LDO3_UV */
+#define WM8400_SDR_LDO3_UV_MASK 0x0004 /* SDR_LDO3_UV */
+#define WM8400_SDR_LDO3_UV_SHIFT 2 /* SDR_LDO3_UV */
+#define WM8400_SDR_LDO3_UV_WIDTH 1 /* SDR_LDO3_UV */
+#define WM8400_SDR_LDO2_UV 0x0002 /* SDR_LDO2_UV */
+#define WM8400_SDR_LDO2_UV_MASK 0x0002 /* SDR_LDO2_UV */
+#define WM8400_SDR_LDO2_UV_SHIFT 1 /* SDR_LDO2_UV */
+#define WM8400_SDR_LDO2_UV_WIDTH 1 /* SDR_LDO2_UV */
+#define WM8400_SDR_LDO1_UV 0x0001 /* SDR_LDO1_UV */
+#define WM8400_SDR_LDO1_UV_MASK 0x0001 /* SDR_LDO1_UV */
+#define WM8400_SDR_LDO1_UV_SHIFT 0 /* SDR_LDO1_UV */
+#define WM8400_SDR_LDO1_UV_WIDTH 1 /* SDR_LDO1_UV */
+
+/*
+ * R84 (0x54) - Line Circuits
+ */
+#define WM8400_BG_LINE_COMP 0x8000 /* BG_LINE_COMP */
+#define WM8400_BG_LINE_COMP_MASK 0x8000 /* BG_LINE_COMP */
+#define WM8400_BG_LINE_COMP_SHIFT 15 /* BG_LINE_COMP */
+#define WM8400_BG_LINE_COMP_WIDTH 1 /* BG_LINE_COMP */
+#define WM8400_LINE_CMP_VTHI_MASK 0x00F0 /* LINE_CMP_VTHI - [7:4] */
+#define WM8400_LINE_CMP_VTHI_SHIFT 4 /* LINE_CMP_VTHI - [7:4] */
+#define WM8400_LINE_CMP_VTHI_WIDTH 4 /* LINE_CMP_VTHI - [7:4] */
+#define WM8400_LINE_CMP_VTHD_MASK 0x000F /* LINE_CMP_VTHD - [3:0] */
+#define WM8400_LINE_CMP_VTHD_SHIFT 0 /* LINE_CMP_VTHD - [3:0] */
+#define WM8400_LINE_CMP_VTHD_WIDTH 4 /* LINE_CMP_VTHD - [3:0] */
+
+u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg);
+int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data);
+
+static inline int wm8400_set_bits(struct wm8400 *wm8400, u8 reg,
+ u16 mask, u16 val)
+{
+ return regmap_update_bits(wm8400->regmap, reg, mask, val);
+}
+
+#endif
diff --git a/include/linux/mfd/wm8400.h b/include/linux/mfd/wm8400.h
new file mode 100644
index 000000000..b46b566ac
--- /dev/null
+++ b/include/linux/mfd/wm8400.h
@@ -0,0 +1,40 @@
+/*
+ * wm8400 client interface
+ *
+ * Copyright 2008 Wolfson Microelectronics plc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_MFD_WM8400_H
+#define __LINUX_MFD_WM8400_H
+
+#include <linux/regulator/machine.h>
+
+#define WM8400_LDO1 0
+#define WM8400_LDO2 1
+#define WM8400_LDO3 2
+#define WM8400_LDO4 3
+#define WM8400_DCDC1 4
+#define WM8400_DCDC2 5
+
+struct wm8400_platform_data {
+ int (*platform_init)(struct device *dev);
+};
+
+int wm8400_register_regulator(struct device *dev, int reg,
+ struct regulator_init_data *initdata);
+
+#endif
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
new file mode 100644
index 000000000..eefafa62d
--- /dev/null
+++ b/include/linux/mfd/wm8994/core.h
@@ -0,0 +1,145 @@
+/*
+ * include/linux/mfd/wm8994/core.h -- Core interface for WM8994
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_WM8994_CORE_H__
+#define __MFD_WM8994_CORE_H__
+
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/wm8994/pdata.h>
+
+enum wm8994_type {
+ WM8994 = 0,
+ WM8958 = 1,
+ WM1811 = 2,
+};
+
+struct regulator_dev;
+struct regulator_bulk_data;
+struct irq_domain;
+
+#define WM8994_NUM_GPIO_REGS 11
+#define WM8994_NUM_LDO_REGS 2
+#define WM8994_NUM_IRQ_REGS 2
+
+#define WM8994_IRQ_TEMP_SHUT 0
+#define WM8994_IRQ_MIC1_DET 1
+#define WM8994_IRQ_MIC1_SHRT 2
+#define WM8994_IRQ_MIC2_DET 3
+#define WM8994_IRQ_MIC2_SHRT 4
+#define WM8994_IRQ_FLL1_LOCK 5
+#define WM8994_IRQ_FLL2_LOCK 6
+#define WM8994_IRQ_SRC1_LOCK 7
+#define WM8994_IRQ_SRC2_LOCK 8
+#define WM8994_IRQ_AIF1DRC1_SIG_DET 9
+#define WM8994_IRQ_AIF1DRC2_SIG_DET 10
+#define WM8994_IRQ_AIF2DRC_SIG_DET 11
+#define WM8994_IRQ_FIFOS_ERR 12
+#define WM8994_IRQ_WSEQ_DONE 13
+#define WM8994_IRQ_DCS_DONE 14
+#define WM8994_IRQ_TEMP_WARN 15
+
+/* GPIOs in the chip are numbered from 1-11 */
+#define WM8994_IRQ_GPIO(x) (x + WM8994_IRQ_TEMP_WARN)
+
+struct wm8994 {
+ struct wm8994_pdata pdata;
+
+ enum wm8994_type type;
+ int revision;
+ int cust_id;
+
+ struct device *dev;
+ struct regmap *regmap;
+
+ bool ldo_ena_always_driven;
+
+ int gpio_base;
+ int irq_base;
+
+ int irq;
+ struct regmap_irq_chip_data *irq_data;
+ struct irq_domain *edge_irq;
+
+ /* Used over suspend/resume */
+ bool suspended;
+
+ struct regulator_dev *dbvdd;
+ int num_supplies;
+ struct regulator_bulk_data *supplies;
+};
+
+/* Device I/O API */
+
+static inline int wm8994_reg_read(struct wm8994 *wm8994, unsigned short reg)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(wm8994->regmap, reg, &val);
+
+ if (ret < 0)
+ return ret;
+ else
+ return val;
+}
+
+static inline int wm8994_reg_write(struct wm8994 *wm8994, unsigned short reg,
+ unsigned short val)
+{
+ return regmap_write(wm8994->regmap, reg, val);
+}
+
+static inline int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
+ int count, u16 *buf)
+{
+ return regmap_bulk_read(wm8994->regmap, reg, buf, count);
+}
+
+static inline int wm8994_bulk_write(struct wm8994 *wm8994, unsigned short reg,
+ int count, const u16 *buf)
+{
+ return regmap_raw_write(wm8994->regmap, reg, buf, count * sizeof(u16));
+}
+
+static inline int wm8994_set_bits(struct wm8994 *wm8994, unsigned short reg,
+ unsigned short mask, unsigned short val)
+{
+ return regmap_update_bits(wm8994->regmap, reg, mask, val);
+}
+
+/* Helper to save on boilerplate */
+static inline int wm8994_request_irq(struct wm8994 *wm8994, int irq,
+ irq_handler_t handler, const char *name,
+ void *data)
+{
+ if (!wm8994->irq_data)
+ return -EINVAL;
+ return request_threaded_irq(regmap_irq_get_virq(wm8994->irq_data, irq),
+ NULL, handler, IRQF_TRIGGER_RISING, name,
+ data);
+}
+static inline void wm8994_free_irq(struct wm8994 *wm8994, int irq, void *data)
+{
+ if (!wm8994->irq_data)
+ return;
+ free_irq(regmap_irq_get_virq(wm8994->irq_data, irq), data);
+}
+
+int wm8994_irq_init(struct wm8994 *wm8994);
+void wm8994_irq_exit(struct wm8994 *wm8994);
+
+#endif
diff --git a/include/linux/mfd/wm8994/gpio.h b/include/linux/mfd/wm8994/gpio.h
new file mode 100644
index 000000000..0c79b5ff4
--- /dev/null
+++ b/include/linux/mfd/wm8994/gpio.h
@@ -0,0 +1,76 @@
+/*
+ * include/linux/mfd/wm8994/gpio.h - GPIO configuration for WM8994
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_WM8994_GPIO_H__
+#define __MFD_WM8994_GPIO_H__
+
+#define WM8994_GPIO_MAX 11
+
+#define WM8994_GP_FN_PIN_SPECIFIC 0
+#define WM8994_GP_FN_GPIO 1
+#define WM8994_GP_FN_SDOUT 2
+#define WM8994_GP_FN_IRQ 3
+#define WM8994_GP_FN_TEMPERATURE 4
+#define WM8994_GP_FN_MICBIAS1_DET 5
+#define WM8994_GP_FN_MICBIAS1_SHORT 6
+#define WM8994_GP_FN_MICBIAS2_DET 7
+#define WM8994_GP_FN_MICBIAS2_SHORT 8
+#define WM8994_GP_FN_FLL1_LOCK 9
+#define WM8994_GP_FN_FLL2_LOCK 10
+#define WM8994_GP_FN_SRC1_LOCK 11
+#define WM8994_GP_FN_SRC2_LOCK 12
+#define WM8994_GP_FN_DRC1_ACT 13
+#define WM8994_GP_FN_DRC2_ACT 14
+#define WM8994_GP_FN_DRC3_ACT 15
+#define WM8994_GP_FN_WSEQ_STATUS 16
+#define WM8994_GP_FN_FIFO_ERROR 17
+#define WM8994_GP_FN_OPCLK 18
+#define WM8994_GP_FN_THW 19
+#define WM8994_GP_FN_DCS_DONE 20
+#define WM8994_GP_FN_FLL1_OUT 21
+#define WM8994_GP_FN_FLL2_OUT 22
+
+#define WM8994_GPN_DIR 0x8000 /* GPN_DIR */
+#define WM8994_GPN_DIR_MASK 0x8000 /* GPN_DIR */
+#define WM8994_GPN_DIR_SHIFT 15 /* GPN_DIR */
+#define WM8994_GPN_DIR_WIDTH 1 /* GPN_DIR */
+#define WM8994_GPN_PU 0x4000 /* GPN_PU */
+#define WM8994_GPN_PU_MASK 0x4000 /* GPN_PU */
+#define WM8994_GPN_PU_SHIFT 14 /* GPN_PU */
+#define WM8994_GPN_PU_WIDTH 1 /* GPN_PU */
+#define WM8994_GPN_PD 0x2000 /* GPN_PD */
+#define WM8994_GPN_PD_MASK 0x2000 /* GPN_PD */
+#define WM8994_GPN_PD_SHIFT 13 /* GPN_PD */
+#define WM8994_GPN_PD_WIDTH 1 /* GPN_PD */
+#define WM8994_GPN_POL 0x0400 /* GPN_POL */
+#define WM8994_GPN_POL_MASK 0x0400 /* GPN_POL */
+#define WM8994_GPN_POL_SHIFT 10 /* GPN_POL */
+#define WM8994_GPN_POL_WIDTH 1 /* GPN_POL */
+#define WM8994_GPN_OP_CFG 0x0200 /* GPN_OP_CFG */
+#define WM8994_GPN_OP_CFG_MASK 0x0200 /* GPN_OP_CFG */
+#define WM8994_GPN_OP_CFG_SHIFT 9 /* GPN_OP_CFG */
+#define WM8994_GPN_OP_CFG_WIDTH 1 /* GPN_OP_CFG */
+#define WM8994_GPN_DB 0x0100 /* GPN_DB */
+#define WM8994_GPN_DB_MASK 0x0100 /* GPN_DB */
+#define WM8994_GPN_DB_SHIFT 8 /* GPN_DB */
+#define WM8994_GPN_DB_WIDTH 1 /* GPN_DB */
+#define WM8994_GPN_LVL 0x0040 /* GPN_LVL */
+#define WM8994_GPN_LVL_MASK 0x0040 /* GPN_LVL */
+#define WM8994_GPN_LVL_SHIFT 6 /* GPN_LVL */
+#define WM8994_GPN_LVL_WIDTH 1 /* GPN_LVL */
+#define WM8994_GPN_FN_MASK 0x001F /* GPN_FN - [4:0] */
+#define WM8994_GPN_FN_SHIFT 0 /* GPN_FN - [4:0] */
+#define WM8994_GPN_FN_WIDTH 5 /* GPN_FN - [4:0] */
+
+#endif
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
new file mode 100644
index 000000000..90c60524a
--- /dev/null
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -0,0 +1,238 @@
+/*
+ * include/linux/mfd/wm8994/pdata.h -- Platform data for WM8994
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_WM8994_PDATA_H__
+#define __MFD_WM8994_PDATA_H__
+
+#define WM8994_NUM_LDO 2
+#define WM8994_NUM_GPIO 11
+#define WM8994_NUM_AIF 3
+
+struct wm8994_ldo_pdata {
+ /** GPIOs to enable regulator, 0 or less if not available */
+ int enable;
+
+ const struct regulator_init_data *init_data;
+};
+
+#define WM8994_CONFIGURE_GPIO 0x10000
+
+#define WM8994_DRC_REGS 5
+#define WM8994_EQ_REGS 20
+#define WM8958_MBC_CUTOFF_REGS 20
+#define WM8958_MBC_COEFF_REGS 48
+#define WM8958_MBC_COMBINED_REGS 56
+#define WM8958_VSS_HPF_REGS 2
+#define WM8958_VSS_REGS 148
+#define WM8958_ENH_EQ_REGS 32
+
+/**
+ * DRC configurations are specified with a label and a set of register
+ * values to write (the enable bits will be ignored). At runtime an
+ * enumerated control will be presented for each DRC block allowing
+ * the user to choose the configration to use.
+ *
+ * Configurations may be generated by hand or by using the DRC control
+ * panel provided by the WISCE - see http://www.wolfsonmicro.com/wisce/
+ * for details.
+ */
+struct wm8994_drc_cfg {
+ const char *name;
+ u16 regs[WM8994_DRC_REGS];
+};
+
+/**
+ * ReTune Mobile configurations are specified with a label, sample
+ * rate and set of values to write (the enable bits will be ignored).
+ *
+ * Configurations are expected to be generated using the ReTune Mobile
+ * control panel in WISCE - see http://www.wolfsonmicro.com/wisce/
+ */
+struct wm8994_retune_mobile_cfg {
+ const char *name;
+ unsigned int rate;
+ u16 regs[WM8994_EQ_REGS];
+};
+
+/**
+ * Multiband compressor configurations are specified with a label and
+ * two sets of values to write. Configurations are expected to be
+ * generated using the multiband compressor configuration panel in
+ * WISCE - see http://www.wolfsonmicro.com/wisce/
+ */
+struct wm8958_mbc_cfg {
+ const char *name;
+ u16 cutoff_regs[WM8958_MBC_CUTOFF_REGS];
+ u16 coeff_regs[WM8958_MBC_COEFF_REGS];
+
+ /* Coefficient layout when using MBC+VSS firmware */
+ u16 combined_regs[WM8958_MBC_COMBINED_REGS];
+};
+
+/**
+ * VSS HPF configurations are specified with a label and two values to
+ * write. Configurations are expected to be generated using the
+ * multiband compressor configuration panel in WISCE - see
+ * http://www.wolfsonmicro.com/wisce/
+ */
+struct wm8958_vss_hpf_cfg {
+ const char *name;
+ u16 regs[WM8958_VSS_HPF_REGS];
+};
+
+/**
+ * VSS configurations are specified with a label and array of values
+ * to write. Configurations are expected to be generated using the
+ * multiband compressor configuration panel in WISCE - see
+ * http://www.wolfsonmicro.com/wisce/
+ */
+struct wm8958_vss_cfg {
+ const char *name;
+ u16 regs[WM8958_VSS_REGS];
+};
+
+/**
+ * Enhanced EQ configurations are specified with a label and array of
+ * values to write. Configurations are expected to be generated using
+ * the multiband compressor configuration panel in WISCE - see
+ * http://www.wolfsonmicro.com/wisce/
+ */
+struct wm8958_enh_eq_cfg {
+ const char *name;
+ u16 regs[WM8958_ENH_EQ_REGS];
+};
+
+/**
+ * Microphone detection rates, used to tune response rates and power
+ * consumption for WM8958/WM1811 microphone detection.
+ *
+ * @sysclk: System clock rate to use this configuration for.
+ * @idle: True if this configuration should use when no accessory is detected,
+ * false otherwise.
+ * @start: Value for MICD_BIAS_START_TIME register field (not shifted).
+ * @rate: Value for MICD_RATE register field (not shifted).
+ */
+struct wm8958_micd_rate {
+ int sysclk;
+ bool idle;
+ int start;
+ int rate;
+};
+
+struct wm8994_pdata {
+ int gpio_base;
+
+ /**
+ * Default values for GPIOs if non-zero, WM8994_CONFIGURE_GPIO
+ * can be used for all zero values.
+ */
+ int gpio_defaults[WM8994_NUM_GPIO];
+
+ struct wm8994_ldo_pdata ldo[WM8994_NUM_LDO];
+
+ int irq_base; /** Base IRQ number for WM8994, required for IRQs */
+ unsigned long irq_flags; /** user irq flags */
+
+ int num_drc_cfgs;
+ struct wm8994_drc_cfg *drc_cfgs;
+
+ int num_retune_mobile_cfgs;
+ struct wm8994_retune_mobile_cfg *retune_mobile_cfgs;
+
+ int num_mbc_cfgs;
+ struct wm8958_mbc_cfg *mbc_cfgs;
+
+ int num_vss_cfgs;
+ struct wm8958_vss_cfg *vss_cfgs;
+
+ int num_vss_hpf_cfgs;
+ struct wm8958_vss_hpf_cfg *vss_hpf_cfgs;
+
+ int num_enh_eq_cfgs;
+ struct wm8958_enh_eq_cfg *enh_eq_cfgs;
+
+ int num_micd_rates;
+ struct wm8958_micd_rate *micd_rates;
+
+ /* Power up delays to add after microphone bias power up (ms) */
+ int micb1_delay;
+ int micb2_delay;
+
+ /* LINEOUT can be differential or single ended */
+ unsigned int lineout1_diff:1;
+ unsigned int lineout2_diff:1;
+
+ /* Common mode feedback */
+ unsigned int lineout1fb:1;
+ unsigned int lineout2fb:1;
+
+ /* Delay between detecting a jack and starting microphone
+ * detect (specified in ms)
+ */
+ int micdet_delay;
+
+ /* Delay between microphone detect completing and reporting on
+ * insert (specified in ms)
+ */
+ int mic_id_delay;
+
+ /* IRQ for microphone detection if brought out directly as a
+ * signal.
+ */
+ int micdet_irq;
+
+ /* WM8994 microphone biases: 0=0.9*AVDD1 1=0.65*AVVD1 */
+ unsigned int micbias1_lvl:1;
+ unsigned int micbias2_lvl:1;
+
+ /* WM8994 jack detect threashold levels, see datasheet for values */
+ unsigned int jd_scthr:2;
+ unsigned int jd_thr:2;
+
+ /* Configure WM1811 jack detection for use with external capacitor */
+ unsigned int jd_ext_cap:1;
+
+ /* WM8958 microphone bias configuration */
+ int micbias[2];
+
+ /* WM8958 microphone detection ranges */
+ u16 micd_lvl_sel;
+
+ /* Disable the internal pull downs on the LDOs if they are
+ * always driven (eg, connected to an always on supply or
+ * GPIO that always drives an output. If they float power
+ * consumption will rise.
+ */
+ bool ldo_ena_always_driven;
+
+ /*
+ * SPKMODE must be pulled internally by the device on this
+ * system.
+ */
+ bool spkmode_pu;
+
+ /**
+ * Maximum number of channels clocks will be generated for,
+ * useful for systems where and I2S bus with multiple data
+ * lines is mastered.
+ */
+ int max_channels_clocked[WM8994_NUM_AIF];
+
+ /**
+ * GPIO for the IRQ pin if host only supports edge triggering
+ */
+ int irq_gpio;
+};
+
+#endif
diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h
new file mode 100644
index 000000000..db8cef3d5
--- /dev/null
+++ b/include/linux/mfd/wm8994/registers.h
@@ -0,0 +1,4822 @@
+/*
+ * include/linux/mfd/wm8994/registers.h -- Register definitions for WM8994
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_WM8994_REGISTERS_H__
+#define __MFD_WM8994_REGISTERS_H__
+
+/*
+ * Register values.
+ */
+#define WM8994_SOFTWARE_RESET 0x00
+#define WM8994_POWER_MANAGEMENT_1 0x01
+#define WM8994_POWER_MANAGEMENT_2 0x02
+#define WM8994_POWER_MANAGEMENT_3 0x03
+#define WM8994_POWER_MANAGEMENT_4 0x04
+#define WM8994_POWER_MANAGEMENT_5 0x05
+#define WM8994_POWER_MANAGEMENT_6 0x06
+#define WM8994_INPUT_MIXER_1 0x15
+#define WM8994_LEFT_LINE_INPUT_1_2_VOLUME 0x18
+#define WM8994_LEFT_LINE_INPUT_3_4_VOLUME 0x19
+#define WM8994_RIGHT_LINE_INPUT_1_2_VOLUME 0x1A
+#define WM8994_RIGHT_LINE_INPUT_3_4_VOLUME 0x1B
+#define WM8994_LEFT_OUTPUT_VOLUME 0x1C
+#define WM8994_RIGHT_OUTPUT_VOLUME 0x1D
+#define WM8994_LINE_OUTPUTS_VOLUME 0x1E
+#define WM8994_HPOUT2_VOLUME 0x1F
+#define WM8994_LEFT_OPGA_VOLUME 0x20
+#define WM8994_RIGHT_OPGA_VOLUME 0x21
+#define WM8994_SPKMIXL_ATTENUATION 0x22
+#define WM8994_SPKMIXR_ATTENUATION 0x23
+#define WM8994_SPKOUT_MIXERS 0x24
+#define WM8994_CLASSD 0x25
+#define WM8994_SPEAKER_VOLUME_LEFT 0x26
+#define WM8994_SPEAKER_VOLUME_RIGHT 0x27
+#define WM8994_INPUT_MIXER_2 0x28
+#define WM8994_INPUT_MIXER_3 0x29
+#define WM8994_INPUT_MIXER_4 0x2A
+#define WM8994_INPUT_MIXER_5 0x2B
+#define WM8994_INPUT_MIXER_6 0x2C
+#define WM8994_OUTPUT_MIXER_1 0x2D
+#define WM8994_OUTPUT_MIXER_2 0x2E
+#define WM8994_OUTPUT_MIXER_3 0x2F
+#define WM8994_OUTPUT_MIXER_4 0x30
+#define WM8994_OUTPUT_MIXER_5 0x31
+#define WM8994_OUTPUT_MIXER_6 0x32
+#define WM8994_HPOUT2_MIXER 0x33
+#define WM8994_LINE_MIXER_1 0x34
+#define WM8994_LINE_MIXER_2 0x35
+#define WM8994_SPEAKER_MIXER 0x36
+#define WM8994_ADDITIONAL_CONTROL 0x37
+#define WM8994_ANTIPOP_1 0x38
+#define WM8994_ANTIPOP_2 0x39
+#define WM8994_MICBIAS 0x3A
+#define WM8994_LDO_1 0x3B
+#define WM8994_LDO_2 0x3C
+#define WM8958_MICBIAS1 0x3D
+#define WM8958_MICBIAS2 0x3E
+#define WM8994_CHARGE_PUMP_1 0x4C
+#define WM8958_CHARGE_PUMP_2 0x4D
+#define WM8994_CLASS_W_1 0x51
+#define WM8994_DC_SERVO_1 0x54
+#define WM8994_DC_SERVO_2 0x55
+#define WM8994_DC_SERVO_4 0x57
+#define WM8994_DC_SERVO_READBACK 0x58
+#define WM8994_DC_SERVO_4E 0x59
+#define WM8994_ANALOGUE_HP_1 0x60
+#define WM8958_MIC_DETECT_1 0xD0
+#define WM8958_MIC_DETECT_2 0xD1
+#define WM8958_MIC_DETECT_3 0xD2
+#define WM8994_CHIP_REVISION 0x100
+#define WM8994_CONTROL_INTERFACE 0x101
+#define WM8994_WRITE_SEQUENCER_CTRL_1 0x110
+#define WM8994_WRITE_SEQUENCER_CTRL_2 0x111
+#define WM8994_AIF1_CLOCKING_1 0x200
+#define WM8994_AIF1_CLOCKING_2 0x201
+#define WM8994_AIF2_CLOCKING_1 0x204
+#define WM8994_AIF2_CLOCKING_2 0x205
+#define WM8994_CLOCKING_1 0x208
+#define WM8994_CLOCKING_2 0x209
+#define WM8994_AIF1_RATE 0x210
+#define WM8994_AIF2_RATE 0x211
+#define WM8994_RATE_STATUS 0x212
+#define WM8994_FLL1_CONTROL_1 0x220
+#define WM8994_FLL1_CONTROL_2 0x221
+#define WM8994_FLL1_CONTROL_3 0x222
+#define WM8994_FLL1_CONTROL_4 0x223
+#define WM8994_FLL1_CONTROL_5 0x224
+#define WM8958_FLL1_EFS_1 0x226
+#define WM8958_FLL1_EFS_2 0x227
+#define WM8994_FLL2_CONTROL_1 0x240
+#define WM8994_FLL2_CONTROL_2 0x241
+#define WM8994_FLL2_CONTROL_3 0x242
+#define WM8994_FLL2_CONTROL_4 0x243
+#define WM8994_FLL2_CONTROL_5 0x244
+#define WM8958_FLL2_EFS_1 0x246
+#define WM8958_FLL2_EFS_2 0x247
+#define WM8994_AIF1_CONTROL_1 0x300
+#define WM8994_AIF1_CONTROL_2 0x301
+#define WM8994_AIF1_MASTER_SLAVE 0x302
+#define WM8994_AIF1_BCLK 0x303
+#define WM8994_AIF1ADC_LRCLK 0x304
+#define WM8994_AIF1DAC_LRCLK 0x305
+#define WM8994_AIF1DAC_DATA 0x306
+#define WM8994_AIF1ADC_DATA 0x307
+#define WM8994_AIF2_CONTROL_1 0x310
+#define WM8994_AIF2_CONTROL_2 0x311
+#define WM8994_AIF2_MASTER_SLAVE 0x312
+#define WM8994_AIF2_BCLK 0x313
+#define WM8994_AIF2ADC_LRCLK 0x314
+#define WM8994_AIF2DAC_LRCLK 0x315
+#define WM8994_AIF2DAC_DATA 0x316
+#define WM8994_AIF2ADC_DATA 0x317
+#define WM1811_AIF2TX_CONTROL 0x318
+#define WM8958_AIF3_CONTROL_1 0x320
+#define WM8958_AIF3_CONTROL_2 0x321
+#define WM8958_AIF3DAC_DATA 0x322
+#define WM8958_AIF3ADC_DATA 0x323
+#define WM8994_AIF1_ADC1_LEFT_VOLUME 0x400
+#define WM8994_AIF1_ADC1_RIGHT_VOLUME 0x401
+#define WM8994_AIF1_DAC1_LEFT_VOLUME 0x402
+#define WM8994_AIF1_DAC1_RIGHT_VOLUME 0x403
+#define WM8994_AIF1_ADC2_LEFT_VOLUME 0x404
+#define WM8994_AIF1_ADC2_RIGHT_VOLUME 0x405
+#define WM8994_AIF1_DAC2_LEFT_VOLUME 0x406
+#define WM8994_AIF1_DAC2_RIGHT_VOLUME 0x407
+#define WM8994_AIF1_ADC1_FILTERS 0x410
+#define WM8994_AIF1_ADC2_FILTERS 0x411
+#define WM8994_AIF1_DAC1_FILTERS_1 0x420
+#define WM8994_AIF1_DAC1_FILTERS_2 0x421
+#define WM8994_AIF1_DAC2_FILTERS_1 0x422
+#define WM8994_AIF1_DAC2_FILTERS_2 0x423
+#define WM8958_AIF1_DAC1_NOISE_GATE 0x430
+#define WM8958_AIF1_DAC2_NOISE_GATE 0x431
+#define WM8994_AIF1_DRC1_1 0x440
+#define WM8994_AIF1_DRC1_2 0x441
+#define WM8994_AIF1_DRC1_3 0x442
+#define WM8994_AIF1_DRC1_4 0x443
+#define WM8994_AIF1_DRC1_5 0x444
+#define WM8994_AIF1_DRC2_1 0x450
+#define WM8994_AIF1_DRC2_2 0x451
+#define WM8994_AIF1_DRC2_3 0x452
+#define WM8994_AIF1_DRC2_4 0x453
+#define WM8994_AIF1_DRC2_5 0x454
+#define WM8994_AIF1_DAC1_EQ_GAINS_1 0x480
+#define WM8994_AIF1_DAC1_EQ_GAINS_2 0x481
+#define WM8994_AIF1_DAC1_EQ_BAND_1_A 0x482
+#define WM8994_AIF1_DAC1_EQ_BAND_1_B 0x483
+#define WM8994_AIF1_DAC1_EQ_BAND_1_PG 0x484
+#define WM8994_AIF1_DAC1_EQ_BAND_2_A 0x485
+#define WM8994_AIF1_DAC1_EQ_BAND_2_B 0x486
+#define WM8994_AIF1_DAC1_EQ_BAND_2_C 0x487
+#define WM8994_AIF1_DAC1_EQ_BAND_2_PG 0x488
+#define WM8994_AIF1_DAC1_EQ_BAND_3_A 0x489
+#define WM8994_AIF1_DAC1_EQ_BAND_3_B 0x48A
+#define WM8994_AIF1_DAC1_EQ_BAND_3_C 0x48B
+#define WM8994_AIF1_DAC1_EQ_BAND_3_PG 0x48C
+#define WM8994_AIF1_DAC1_EQ_BAND_4_A 0x48D
+#define WM8994_AIF1_DAC1_EQ_BAND_4_B 0x48E
+#define WM8994_AIF1_DAC1_EQ_BAND_4_C 0x48F
+#define WM8994_AIF1_DAC1_EQ_BAND_4_PG 0x490
+#define WM8994_AIF1_DAC1_EQ_BAND_5_A 0x491
+#define WM8994_AIF1_DAC1_EQ_BAND_5_B 0x492
+#define WM8994_AIF1_DAC1_EQ_BAND_5_PG 0x493
+#define WM8994_AIF1_DAC1_EQ_BAND_1_C 0x494
+#define WM8994_AIF1_DAC2_EQ_GAINS_1 0x4A0
+#define WM8994_AIF1_DAC2_EQ_GAINS_2 0x4A1
+#define WM8994_AIF1_DAC2_EQ_BAND_1_A 0x4A2
+#define WM8994_AIF1_DAC2_EQ_BAND_1_B 0x4A3
+#define WM8994_AIF1_DAC2_EQ_BAND_1_PG 0x4A4
+#define WM8994_AIF1_DAC2_EQ_BAND_2_A 0x4A5
+#define WM8994_AIF1_DAC2_EQ_BAND_2_B 0x4A6
+#define WM8994_AIF1_DAC2_EQ_BAND_2_C 0x4A7
+#define WM8994_AIF1_DAC2_EQ_BAND_2_PG 0x4A8
+#define WM8994_AIF1_DAC2_EQ_BAND_3_A 0x4A9
+#define WM8994_AIF1_DAC2_EQ_BAND_3_B 0x4AA
+#define WM8994_AIF1_DAC2_EQ_BAND_3_C 0x4AB
+#define WM8994_AIF1_DAC2_EQ_BAND_3_PG 0x4AC
+#define WM8994_AIF1_DAC2_EQ_BAND_4_A 0x4AD
+#define WM8994_AIF1_DAC2_EQ_BAND_4_B 0x4AE
+#define WM8994_AIF1_DAC2_EQ_BAND_4_C 0x4AF
+#define WM8994_AIF1_DAC2_EQ_BAND_4_PG 0x4B0
+#define WM8994_AIF1_DAC2_EQ_BAND_5_A 0x4B1
+#define WM8994_AIF1_DAC2_EQ_BAND_5_B 0x4B2
+#define WM8994_AIF1_DAC2_EQ_BAND_5_PG 0x4B3
+#define WM8994_AIF1_DAC2_EQ_BAND_1_C 0x4B4
+#define WM8994_AIF2_ADC_LEFT_VOLUME 0x500
+#define WM8994_AIF2_ADC_RIGHT_VOLUME 0x501
+#define WM8994_AIF2_DAC_LEFT_VOLUME 0x502
+#define WM8994_AIF2_DAC_RIGHT_VOLUME 0x503
+#define WM8994_AIF2_ADC_FILTERS 0x510
+#define WM8994_AIF2_DAC_FILTERS_1 0x520
+#define WM8994_AIF2_DAC_FILTERS_2 0x521
+#define WM8958_AIF2_DAC_NOISE_GATE 0x530
+#define WM8994_AIF2_DRC_1 0x540
+#define WM8994_AIF2_DRC_2 0x541
+#define WM8994_AIF2_DRC_3 0x542
+#define WM8994_AIF2_DRC_4 0x543
+#define WM8994_AIF2_DRC_5 0x544
+#define WM8994_AIF2_EQ_GAINS_1 0x580
+#define WM8994_AIF2_EQ_GAINS_2 0x581
+#define WM8994_AIF2_EQ_BAND_1_A 0x582
+#define WM8994_AIF2_EQ_BAND_1_B 0x583
+#define WM8994_AIF2_EQ_BAND_1_PG 0x584
+#define WM8994_AIF2_EQ_BAND_2_A 0x585
+#define WM8994_AIF2_EQ_BAND_2_B 0x586
+#define WM8994_AIF2_EQ_BAND_2_C 0x587
+#define WM8994_AIF2_EQ_BAND_2_PG 0x588
+#define WM8994_AIF2_EQ_BAND_3_A 0x589
+#define WM8994_AIF2_EQ_BAND_3_B 0x58A
+#define WM8994_AIF2_EQ_BAND_3_C 0x58B
+#define WM8994_AIF2_EQ_BAND_3_PG 0x58C
+#define WM8994_AIF2_EQ_BAND_4_A 0x58D
+#define WM8994_AIF2_EQ_BAND_4_B 0x58E
+#define WM8994_AIF2_EQ_BAND_4_C 0x58F
+#define WM8994_AIF2_EQ_BAND_4_PG 0x590
+#define WM8994_AIF2_EQ_BAND_5_A 0x591
+#define WM8994_AIF2_EQ_BAND_5_B 0x592
+#define WM8994_AIF2_EQ_BAND_5_PG 0x593
+#define WM8994_AIF2_EQ_BAND_1_C 0x594
+#define WM8994_DAC1_MIXER_VOLUMES 0x600
+#define WM8994_DAC1_LEFT_MIXER_ROUTING 0x601
+#define WM8994_DAC1_RIGHT_MIXER_ROUTING 0x602
+#define WM8994_DAC2_MIXER_VOLUMES 0x603
+#define WM8994_DAC2_LEFT_MIXER_ROUTING 0x604
+#define WM8994_DAC2_RIGHT_MIXER_ROUTING 0x605
+#define WM8994_AIF1_ADC1_LEFT_MIXER_ROUTING 0x606
+#define WM8994_AIF1_ADC1_RIGHT_MIXER_ROUTING 0x607
+#define WM8994_AIF1_ADC2_LEFT_MIXER_ROUTING 0x608
+#define WM8994_AIF1_ADC2_RIGHT_MIXER_ROUTING 0x609
+#define WM8994_DAC1_LEFT_VOLUME 0x610
+#define WM8994_DAC1_RIGHT_VOLUME 0x611
+#define WM8994_DAC2_LEFT_VOLUME 0x612
+#define WM8994_DAC2_RIGHT_VOLUME 0x613
+#define WM8994_DAC_SOFTMUTE 0x614
+#define WM8994_OVERSAMPLING 0x620
+#define WM8994_SIDETONE 0x621
+#define WM8994_GPIO_1 0x700
+#define WM8994_GPIO_2 0x701
+#define WM8994_GPIO_3 0x702
+#define WM8994_GPIO_4 0x703
+#define WM8994_GPIO_5 0x704
+#define WM8994_GPIO_6 0x705
+#define WM1811_JACKDET_CTRL 0x705
+#define WM8994_GPIO_7 0x706
+#define WM8994_GPIO_8 0x707
+#define WM8994_GPIO_9 0x708
+#define WM8994_GPIO_10 0x709
+#define WM8994_GPIO_11 0x70A
+#define WM8994_PULL_CONTROL_1 0x720
+#define WM8994_PULL_CONTROL_2 0x721
+#define WM8994_INTERRUPT_STATUS_1 0x730
+#define WM8994_INTERRUPT_STATUS_2 0x731
+#define WM8994_INTERRUPT_RAW_STATUS_2 0x732
+#define WM8994_INTERRUPT_STATUS_1_MASK 0x738
+#define WM8994_INTERRUPT_STATUS_2_MASK 0x739
+#define WM8994_INTERRUPT_CONTROL 0x740
+#define WM8994_IRQ_DEBOUNCE 0x748
+#define WM8958_DSP2_PROGRAM 0x900
+#define WM8958_DSP2_CONFIG 0x901
+#define WM8958_DSP2_MAGICNUM 0xA00
+#define WM8958_DSP2_RELEASEYEAR 0xA01
+#define WM8958_DSP2_RELEASEMONTHDAY 0xA02
+#define WM8958_DSP2_RELEASETIME 0xA03
+#define WM8958_DSP2_VERMAJMIN 0xA04
+#define WM8958_DSP2_VERBUILD 0xA05
+#define WM8958_DSP2_TESTREG 0xA06
+#define WM8958_DSP2_XORREG 0xA07
+#define WM8958_DSP2_SHIFTMAXX 0xA08
+#define WM8958_DSP2_SHIFTMAXY 0xA09
+#define WM8958_DSP2_SHIFTMAXZ 0xA0A
+#define WM8958_DSP2_SHIFTMAXEXTLO 0xA0B
+#define WM8958_DSP2_AESSELECT 0xA0C
+#define WM8958_DSP2_EXECCONTROL 0xA0D
+#define WM8958_DSP2_SAMPLEBREAK 0xA0E
+#define WM8958_DSP2_COUNTBREAK 0xA0F
+#define WM8958_DSP2_INTSTATUS 0xA10
+#define WM8958_DSP2_EVENTSTATUS 0xA11
+#define WM8958_DSP2_INTMASK 0xA12
+#define WM8958_DSP2_CONFIGDWIDTH 0xA13
+#define WM8958_DSP2_CONFIGINSTR 0xA14
+#define WM8958_DSP2_CONFIGDMEM 0xA15
+#define WM8958_DSP2_CONFIGDELAYS 0xA16
+#define WM8958_DSP2_CONFIGNUMIO 0xA17
+#define WM8958_DSP2_CONFIGEXTDEPTH 0xA18
+#define WM8958_DSP2_CONFIGMULTIPLIER 0xA19
+#define WM8958_DSP2_CONFIGCTRLDWIDTH 0xA1A
+#define WM8958_DSP2_CONFIGPIPELINE 0xA1B
+#define WM8958_DSP2_SHIFTMAXEXTHI 0xA1C
+#define WM8958_DSP2_SWVERSIONREG 0xA1D
+#define WM8958_DSP2_CONFIGXMEM 0xA1E
+#define WM8958_DSP2_CONFIGYMEM 0xA1F
+#define WM8958_DSP2_CONFIGZMEM 0xA20
+#define WM8958_FW_BUILD_1 0x2000
+#define WM8958_FW_BUILD_0 0x2001
+#define WM8958_FW_ID_1 0x2002
+#define WM8958_FW_ID_0 0x2003
+#define WM8958_FW_MAJOR_1 0x2004
+#define WM8958_FW_MAJOR_0 0x2005
+#define WM8958_FW_MINOR_1 0x2006
+#define WM8958_FW_MINOR_0 0x2007
+#define WM8958_FW_PATCH_1 0x2008
+#define WM8958_FW_PATCH_0 0x2009
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C1_1 0x2200
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C1_2 0x2201
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C2_1 0x2202
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C2_2 0x2203
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C3_1 0x2204
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C3_2 0x2205
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C2_1 0x2206
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C2_2 0x2207
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C3_1 0x2208
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C3_2 0x2209
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C1_1 0x220A
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C1_2 0x220B
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C1_1 0x220C
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C1_2 0x220D
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C2_1 0x220E
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C2_2 0x220F
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C3_1 0x2210
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C3_2 0x2211
+#define WM8958_MBC_BAND_1_LOWER_CUTOFF_1 0x2212
+#define WM8958_MBC_BAND_1_LOWER_CUTOFF_2 0x2213
+#define WM8958_MBC_BAND_1_K_1 0x2400
+#define WM8958_MBC_BAND_1_K_2 0x2401
+#define WM8958_MBC_BAND_1_N1_1 0x2402
+#define WM8958_MBC_BAND_1_N1_2 0x2403
+#define WM8958_MBC_BAND_1_N2_1 0x2404
+#define WM8958_MBC_BAND_1_N2_2 0x2405
+#define WM8958_MBC_BAND_1_N3_1 0x2406
+#define WM8958_MBC_BAND_1_N3_2 0x2407
+#define WM8958_MBC_BAND_1_N4_1 0x2408
+#define WM8958_MBC_BAND_1_N4_2 0x2409
+#define WM8958_MBC_BAND_1_N5_1 0x240A
+#define WM8958_MBC_BAND_1_N5_2 0x240B
+#define WM8958_MBC_BAND_1_X1_1 0x240C
+#define WM8958_MBC_BAND_1_X1_2 0x240D
+#define WM8958_MBC_BAND_1_X2_1 0x240E
+#define WM8958_MBC_BAND_1_X2_2 0x240F
+#define WM8958_MBC_BAND_1_X3_1 0x2410
+#define WM8958_MBC_BAND_1_X3_2 0x2411
+#define WM8958_MBC_BAND_1_ATTACK_1 0x2412
+#define WM8958_MBC_BAND_1_ATTACK_2 0x2413
+#define WM8958_MBC_BAND_1_DECAY_1 0x2414
+#define WM8958_MBC_BAND_1_DECAY_2 0x2415
+#define WM8958_MBC_BAND_2_K_1 0x2416
+#define WM8958_MBC_BAND_2_K_2 0x2417
+#define WM8958_MBC_BAND_2_N1_1 0x2418
+#define WM8958_MBC_BAND_2_N1_2 0x2419
+#define WM8958_MBC_BAND_2_N2_1 0x241A
+#define WM8958_MBC_BAND_2_N2_2 0x241B
+#define WM8958_MBC_BAND_2_N3_1 0x241C
+#define WM8958_MBC_BAND_2_N3_2 0x241D
+#define WM8958_MBC_BAND_2_N4_1 0x241E
+#define WM8958_MBC_BAND_2_N4_2 0x241F
+#define WM8958_MBC_BAND_2_N5_1 0x2420
+#define WM8958_MBC_BAND_2_N5_2 0x2421
+#define WM8958_MBC_BAND_2_X1_1 0x2422
+#define WM8958_MBC_BAND_2_X1_2 0x2423
+#define WM8958_MBC_BAND_2_X2_1 0x2424
+#define WM8958_MBC_BAND_2_X2_2 0x2425
+#define WM8958_MBC_BAND_2_X3_1 0x2426
+#define WM8958_MBC_BAND_2_X3_2 0x2427
+#define WM8958_MBC_BAND_2_ATTACK_1 0x2428
+#define WM8958_MBC_BAND_2_ATTACK_2 0x2429
+#define WM8958_MBC_BAND_2_DECAY_1 0x242A
+#define WM8958_MBC_BAND_2_DECAY_2 0x242B
+#define WM8958_MBC_B2_PG2_1 0x242C
+#define WM8958_MBC_B2_PG2_2 0x242D
+#define WM8958_MBC_B1_PG2_1 0x242E
+#define WM8958_MBC_B1_PG2_2 0x242F
+#define WM8958_MBC_CROSSOVER_1 0x2600
+#define WM8958_MBC_CROSSOVER_2 0x2601
+#define WM8958_MBC_HPF_1 0x2602
+#define WM8958_MBC_HPF_2 0x2603
+#define WM8958_MBC_LPF_1 0x2606
+#define WM8958_MBC_LPF_2 0x2607
+#define WM8958_MBC_RMS_LIMIT_1 0x260A
+#define WM8958_MBC_RMS_LIMIT_2 0x260B
+#define WM8994_WRITE_SEQUENCER_0 0x3000
+#define WM8994_WRITE_SEQUENCER_1 0x3001
+#define WM8994_WRITE_SEQUENCER_2 0x3002
+#define WM8994_WRITE_SEQUENCER_3 0x3003
+#define WM8994_WRITE_SEQUENCER_4 0x3004
+#define WM8994_WRITE_SEQUENCER_5 0x3005
+#define WM8994_WRITE_SEQUENCER_6 0x3006
+#define WM8994_WRITE_SEQUENCER_7 0x3007
+#define WM8994_WRITE_SEQUENCER_8 0x3008
+#define WM8994_WRITE_SEQUENCER_9 0x3009
+#define WM8994_WRITE_SEQUENCER_10 0x300A
+#define WM8994_WRITE_SEQUENCER_11 0x300B
+#define WM8994_WRITE_SEQUENCER_12 0x300C
+#define WM8994_WRITE_SEQUENCER_13 0x300D
+#define WM8994_WRITE_SEQUENCER_14 0x300E
+#define WM8994_WRITE_SEQUENCER_15 0x300F
+#define WM8994_WRITE_SEQUENCER_16 0x3010
+#define WM8994_WRITE_SEQUENCER_17 0x3011
+#define WM8994_WRITE_SEQUENCER_18 0x3012
+#define WM8994_WRITE_SEQUENCER_19 0x3013
+#define WM8994_WRITE_SEQUENCER_20 0x3014
+#define WM8994_WRITE_SEQUENCER_21 0x3015
+#define WM8994_WRITE_SEQUENCER_22 0x3016
+#define WM8994_WRITE_SEQUENCER_23 0x3017
+#define WM8994_WRITE_SEQUENCER_24 0x3018
+#define WM8994_WRITE_SEQUENCER_25 0x3019
+#define WM8994_WRITE_SEQUENCER_26 0x301A
+#define WM8994_WRITE_SEQUENCER_27 0x301B
+#define WM8994_WRITE_SEQUENCER_28 0x301C
+#define WM8994_WRITE_SEQUENCER_29 0x301D
+#define WM8994_WRITE_SEQUENCER_30 0x301E
+#define WM8994_WRITE_SEQUENCER_31 0x301F
+#define WM8994_WRITE_SEQUENCER_32 0x3020
+#define WM8994_WRITE_SEQUENCER_33 0x3021
+#define WM8994_WRITE_SEQUENCER_34 0x3022
+#define WM8994_WRITE_SEQUENCER_35 0x3023
+#define WM8994_WRITE_SEQUENCER_36 0x3024
+#define WM8994_WRITE_SEQUENCER_37 0x3025
+#define WM8994_WRITE_SEQUENCER_38 0x3026
+#define WM8994_WRITE_SEQUENCER_39 0x3027
+#define WM8994_WRITE_SEQUENCER_40 0x3028
+#define WM8994_WRITE_SEQUENCER_41 0x3029
+#define WM8994_WRITE_SEQUENCER_42 0x302A
+#define WM8994_WRITE_SEQUENCER_43 0x302B
+#define WM8994_WRITE_SEQUENCER_44 0x302C
+#define WM8994_WRITE_SEQUENCER_45 0x302D
+#define WM8994_WRITE_SEQUENCER_46 0x302E
+#define WM8994_WRITE_SEQUENCER_47 0x302F
+#define WM8994_WRITE_SEQUENCER_48 0x3030
+#define WM8994_WRITE_SEQUENCER_49 0x3031
+#define WM8994_WRITE_SEQUENCER_50 0x3032
+#define WM8994_WRITE_SEQUENCER_51 0x3033
+#define WM8994_WRITE_SEQUENCER_52 0x3034
+#define WM8994_WRITE_SEQUENCER_53 0x3035
+#define WM8994_WRITE_SEQUENCER_54 0x3036
+#define WM8994_WRITE_SEQUENCER_55 0x3037
+#define WM8994_WRITE_SEQUENCER_56 0x3038
+#define WM8994_WRITE_SEQUENCER_57 0x3039
+#define WM8994_WRITE_SEQUENCER_58 0x303A
+#define WM8994_WRITE_SEQUENCER_59 0x303B
+#define WM8994_WRITE_SEQUENCER_60 0x303C
+#define WM8994_WRITE_SEQUENCER_61 0x303D
+#define WM8994_WRITE_SEQUENCER_62 0x303E
+#define WM8994_WRITE_SEQUENCER_63 0x303F
+#define WM8994_WRITE_SEQUENCER_64 0x3040
+#define WM8994_WRITE_SEQUENCER_65 0x3041
+#define WM8994_WRITE_SEQUENCER_66 0x3042
+#define WM8994_WRITE_SEQUENCER_67 0x3043
+#define WM8994_WRITE_SEQUENCER_68 0x3044
+#define WM8994_WRITE_SEQUENCER_69 0x3045
+#define WM8994_WRITE_SEQUENCER_70 0x3046
+#define WM8994_WRITE_SEQUENCER_71 0x3047
+#define WM8994_WRITE_SEQUENCER_72 0x3048
+#define WM8994_WRITE_SEQUENCER_73 0x3049
+#define WM8994_WRITE_SEQUENCER_74 0x304A
+#define WM8994_WRITE_SEQUENCER_75 0x304B
+#define WM8994_WRITE_SEQUENCER_76 0x304C
+#define WM8994_WRITE_SEQUENCER_77 0x304D
+#define WM8994_WRITE_SEQUENCER_78 0x304E
+#define WM8994_WRITE_SEQUENCER_79 0x304F
+#define WM8994_WRITE_SEQUENCER_80 0x3050
+#define WM8994_WRITE_SEQUENCER_81 0x3051
+#define WM8994_WRITE_SEQUENCER_82 0x3052
+#define WM8994_WRITE_SEQUENCER_83 0x3053
+#define WM8994_WRITE_SEQUENCER_84 0x3054
+#define WM8994_WRITE_SEQUENCER_85 0x3055
+#define WM8994_WRITE_SEQUENCER_86 0x3056
+#define WM8994_WRITE_SEQUENCER_87 0x3057
+#define WM8994_WRITE_SEQUENCER_88 0x3058
+#define WM8994_WRITE_SEQUENCER_89 0x3059
+#define WM8994_WRITE_SEQUENCER_90 0x305A
+#define WM8994_WRITE_SEQUENCER_91 0x305B
+#define WM8994_WRITE_SEQUENCER_92 0x305C
+#define WM8994_WRITE_SEQUENCER_93 0x305D
+#define WM8994_WRITE_SEQUENCER_94 0x305E
+#define WM8994_WRITE_SEQUENCER_95 0x305F
+#define WM8994_WRITE_SEQUENCER_96 0x3060
+#define WM8994_WRITE_SEQUENCER_97 0x3061
+#define WM8994_WRITE_SEQUENCER_98 0x3062
+#define WM8994_WRITE_SEQUENCER_99 0x3063
+#define WM8994_WRITE_SEQUENCER_100 0x3064
+#define WM8994_WRITE_SEQUENCER_101 0x3065
+#define WM8994_WRITE_SEQUENCER_102 0x3066
+#define WM8994_WRITE_SEQUENCER_103 0x3067
+#define WM8994_WRITE_SEQUENCER_104 0x3068
+#define WM8994_WRITE_SEQUENCER_105 0x3069
+#define WM8994_WRITE_SEQUENCER_106 0x306A
+#define WM8994_WRITE_SEQUENCER_107 0x306B
+#define WM8994_WRITE_SEQUENCER_108 0x306C
+#define WM8994_WRITE_SEQUENCER_109 0x306D
+#define WM8994_WRITE_SEQUENCER_110 0x306E
+#define WM8994_WRITE_SEQUENCER_111 0x306F
+#define WM8994_WRITE_SEQUENCER_112 0x3070
+#define WM8994_WRITE_SEQUENCER_113 0x3071
+#define WM8994_WRITE_SEQUENCER_114 0x3072
+#define WM8994_WRITE_SEQUENCER_115 0x3073
+#define WM8994_WRITE_SEQUENCER_116 0x3074
+#define WM8994_WRITE_SEQUENCER_117 0x3075
+#define WM8994_WRITE_SEQUENCER_118 0x3076
+#define WM8994_WRITE_SEQUENCER_119 0x3077
+#define WM8994_WRITE_SEQUENCER_120 0x3078
+#define WM8994_WRITE_SEQUENCER_121 0x3079
+#define WM8994_WRITE_SEQUENCER_122 0x307A
+#define WM8994_WRITE_SEQUENCER_123 0x307B
+#define WM8994_WRITE_SEQUENCER_124 0x307C
+#define WM8994_WRITE_SEQUENCER_125 0x307D
+#define WM8994_WRITE_SEQUENCER_126 0x307E
+#define WM8994_WRITE_SEQUENCER_127 0x307F
+#define WM8994_WRITE_SEQUENCER_128 0x3080
+#define WM8994_WRITE_SEQUENCER_129 0x3081
+#define WM8994_WRITE_SEQUENCER_130 0x3082
+#define WM8994_WRITE_SEQUENCER_131 0x3083
+#define WM8994_WRITE_SEQUENCER_132 0x3084
+#define WM8994_WRITE_SEQUENCER_133 0x3085
+#define WM8994_WRITE_SEQUENCER_134 0x3086
+#define WM8994_WRITE_SEQUENCER_135 0x3087
+#define WM8994_WRITE_SEQUENCER_136 0x3088
+#define WM8994_WRITE_SEQUENCER_137 0x3089
+#define WM8994_WRITE_SEQUENCER_138 0x308A
+#define WM8994_WRITE_SEQUENCER_139 0x308B
+#define WM8994_WRITE_SEQUENCER_140 0x308C
+#define WM8994_WRITE_SEQUENCER_141 0x308D
+#define WM8994_WRITE_SEQUENCER_142 0x308E
+#define WM8994_WRITE_SEQUENCER_143 0x308F
+#define WM8994_WRITE_SEQUENCER_144 0x3090
+#define WM8994_WRITE_SEQUENCER_145 0x3091
+#define WM8994_WRITE_SEQUENCER_146 0x3092
+#define WM8994_WRITE_SEQUENCER_147 0x3093
+#define WM8994_WRITE_SEQUENCER_148 0x3094
+#define WM8994_WRITE_SEQUENCER_149 0x3095
+#define WM8994_WRITE_SEQUENCER_150 0x3096
+#define WM8994_WRITE_SEQUENCER_151 0x3097
+#define WM8994_WRITE_SEQUENCER_152 0x3098
+#define WM8994_WRITE_SEQUENCER_153 0x3099
+#define WM8994_WRITE_SEQUENCER_154 0x309A
+#define WM8994_WRITE_SEQUENCER_155 0x309B
+#define WM8994_WRITE_SEQUENCER_156 0x309C
+#define WM8994_WRITE_SEQUENCER_157 0x309D
+#define WM8994_WRITE_SEQUENCER_158 0x309E
+#define WM8994_WRITE_SEQUENCER_159 0x309F
+#define WM8994_WRITE_SEQUENCER_160 0x30A0
+#define WM8994_WRITE_SEQUENCER_161 0x30A1
+#define WM8994_WRITE_SEQUENCER_162 0x30A2
+#define WM8994_WRITE_SEQUENCER_163 0x30A3
+#define WM8994_WRITE_SEQUENCER_164 0x30A4
+#define WM8994_WRITE_SEQUENCER_165 0x30A5
+#define WM8994_WRITE_SEQUENCER_166 0x30A6
+#define WM8994_WRITE_SEQUENCER_167 0x30A7
+#define WM8994_WRITE_SEQUENCER_168 0x30A8
+#define WM8994_WRITE_SEQUENCER_169 0x30A9
+#define WM8994_WRITE_SEQUENCER_170 0x30AA
+#define WM8994_WRITE_SEQUENCER_171 0x30AB
+#define WM8994_WRITE_SEQUENCER_172 0x30AC
+#define WM8994_WRITE_SEQUENCER_173 0x30AD
+#define WM8994_WRITE_SEQUENCER_174 0x30AE
+#define WM8994_WRITE_SEQUENCER_175 0x30AF
+#define WM8994_WRITE_SEQUENCER_176 0x30B0
+#define WM8994_WRITE_SEQUENCER_177 0x30B1
+#define WM8994_WRITE_SEQUENCER_178 0x30B2
+#define WM8994_WRITE_SEQUENCER_179 0x30B3
+#define WM8994_WRITE_SEQUENCER_180 0x30B4
+#define WM8994_WRITE_SEQUENCER_181 0x30B5
+#define WM8994_WRITE_SEQUENCER_182 0x30B6
+#define WM8994_WRITE_SEQUENCER_183 0x30B7
+#define WM8994_WRITE_SEQUENCER_184 0x30B8
+#define WM8994_WRITE_SEQUENCER_185 0x30B9
+#define WM8994_WRITE_SEQUENCER_186 0x30BA
+#define WM8994_WRITE_SEQUENCER_187 0x30BB
+#define WM8994_WRITE_SEQUENCER_188 0x30BC
+#define WM8994_WRITE_SEQUENCER_189 0x30BD
+#define WM8994_WRITE_SEQUENCER_190 0x30BE
+#define WM8994_WRITE_SEQUENCER_191 0x30BF
+#define WM8994_WRITE_SEQUENCER_192 0x30C0
+#define WM8994_WRITE_SEQUENCER_193 0x30C1
+#define WM8994_WRITE_SEQUENCER_194 0x30C2
+#define WM8994_WRITE_SEQUENCER_195 0x30C3
+#define WM8994_WRITE_SEQUENCER_196 0x30C4
+#define WM8994_WRITE_SEQUENCER_197 0x30C5
+#define WM8994_WRITE_SEQUENCER_198 0x30C6
+#define WM8994_WRITE_SEQUENCER_199 0x30C7
+#define WM8994_WRITE_SEQUENCER_200 0x30C8
+#define WM8994_WRITE_SEQUENCER_201 0x30C9
+#define WM8994_WRITE_SEQUENCER_202 0x30CA
+#define WM8994_WRITE_SEQUENCER_203 0x30CB
+#define WM8994_WRITE_SEQUENCER_204 0x30CC
+#define WM8994_WRITE_SEQUENCER_205 0x30CD
+#define WM8994_WRITE_SEQUENCER_206 0x30CE
+#define WM8994_WRITE_SEQUENCER_207 0x30CF
+#define WM8994_WRITE_SEQUENCER_208 0x30D0
+#define WM8994_WRITE_SEQUENCER_209 0x30D1
+#define WM8994_WRITE_SEQUENCER_210 0x30D2
+#define WM8994_WRITE_SEQUENCER_211 0x30D3
+#define WM8994_WRITE_SEQUENCER_212 0x30D4
+#define WM8994_WRITE_SEQUENCER_213 0x30D5
+#define WM8994_WRITE_SEQUENCER_214 0x30D6
+#define WM8994_WRITE_SEQUENCER_215 0x30D7
+#define WM8994_WRITE_SEQUENCER_216 0x30D8
+#define WM8994_WRITE_SEQUENCER_217 0x30D9
+#define WM8994_WRITE_SEQUENCER_218 0x30DA
+#define WM8994_WRITE_SEQUENCER_219 0x30DB
+#define WM8994_WRITE_SEQUENCER_220 0x30DC
+#define WM8994_WRITE_SEQUENCER_221 0x30DD
+#define WM8994_WRITE_SEQUENCER_222 0x30DE
+#define WM8994_WRITE_SEQUENCER_223 0x30DF
+#define WM8994_WRITE_SEQUENCER_224 0x30E0
+#define WM8994_WRITE_SEQUENCER_225 0x30E1
+#define WM8994_WRITE_SEQUENCER_226 0x30E2
+#define WM8994_WRITE_SEQUENCER_227 0x30E3
+#define WM8994_WRITE_SEQUENCER_228 0x30E4
+#define WM8994_WRITE_SEQUENCER_229 0x30E5
+#define WM8994_WRITE_SEQUENCER_230 0x30E6
+#define WM8994_WRITE_SEQUENCER_231 0x30E7
+#define WM8994_WRITE_SEQUENCER_232 0x30E8
+#define WM8994_WRITE_SEQUENCER_233 0x30E9
+#define WM8994_WRITE_SEQUENCER_234 0x30EA
+#define WM8994_WRITE_SEQUENCER_235 0x30EB
+#define WM8994_WRITE_SEQUENCER_236 0x30EC
+#define WM8994_WRITE_SEQUENCER_237 0x30ED
+#define WM8994_WRITE_SEQUENCER_238 0x30EE
+#define WM8994_WRITE_SEQUENCER_239 0x30EF
+#define WM8994_WRITE_SEQUENCER_240 0x30F0
+#define WM8994_WRITE_SEQUENCER_241 0x30F1
+#define WM8994_WRITE_SEQUENCER_242 0x30F2
+#define WM8994_WRITE_SEQUENCER_243 0x30F3
+#define WM8994_WRITE_SEQUENCER_244 0x30F4
+#define WM8994_WRITE_SEQUENCER_245 0x30F5
+#define WM8994_WRITE_SEQUENCER_246 0x30F6
+#define WM8994_WRITE_SEQUENCER_247 0x30F7
+#define WM8994_WRITE_SEQUENCER_248 0x30F8
+#define WM8994_WRITE_SEQUENCER_249 0x30F9
+#define WM8994_WRITE_SEQUENCER_250 0x30FA
+#define WM8994_WRITE_SEQUENCER_251 0x30FB
+#define WM8994_WRITE_SEQUENCER_252 0x30FC
+#define WM8994_WRITE_SEQUENCER_253 0x30FD
+#define WM8994_WRITE_SEQUENCER_254 0x30FE
+#define WM8994_WRITE_SEQUENCER_255 0x30FF
+#define WM8994_WRITE_SEQUENCER_256 0x3100
+#define WM8994_WRITE_SEQUENCER_257 0x3101
+#define WM8994_WRITE_SEQUENCER_258 0x3102
+#define WM8994_WRITE_SEQUENCER_259 0x3103
+#define WM8994_WRITE_SEQUENCER_260 0x3104
+#define WM8994_WRITE_SEQUENCER_261 0x3105
+#define WM8994_WRITE_SEQUENCER_262 0x3106
+#define WM8994_WRITE_SEQUENCER_263 0x3107
+#define WM8994_WRITE_SEQUENCER_264 0x3108
+#define WM8994_WRITE_SEQUENCER_265 0x3109
+#define WM8994_WRITE_SEQUENCER_266 0x310A
+#define WM8994_WRITE_SEQUENCER_267 0x310B
+#define WM8994_WRITE_SEQUENCER_268 0x310C
+#define WM8994_WRITE_SEQUENCER_269 0x310D
+#define WM8994_WRITE_SEQUENCER_270 0x310E
+#define WM8994_WRITE_SEQUENCER_271 0x310F
+#define WM8994_WRITE_SEQUENCER_272 0x3110
+#define WM8994_WRITE_SEQUENCER_273 0x3111
+#define WM8994_WRITE_SEQUENCER_274 0x3112
+#define WM8994_WRITE_SEQUENCER_275 0x3113
+#define WM8994_WRITE_SEQUENCER_276 0x3114
+#define WM8994_WRITE_SEQUENCER_277 0x3115
+#define WM8994_WRITE_SEQUENCER_278 0x3116
+#define WM8994_WRITE_SEQUENCER_279 0x3117
+#define WM8994_WRITE_SEQUENCER_280 0x3118
+#define WM8994_WRITE_SEQUENCER_281 0x3119
+#define WM8994_WRITE_SEQUENCER_282 0x311A
+#define WM8994_WRITE_SEQUENCER_283 0x311B
+#define WM8994_WRITE_SEQUENCER_284 0x311C
+#define WM8994_WRITE_SEQUENCER_285 0x311D
+#define WM8994_WRITE_SEQUENCER_286 0x311E
+#define WM8994_WRITE_SEQUENCER_287 0x311F
+#define WM8994_WRITE_SEQUENCER_288 0x3120
+#define WM8994_WRITE_SEQUENCER_289 0x3121
+#define WM8994_WRITE_SEQUENCER_290 0x3122
+#define WM8994_WRITE_SEQUENCER_291 0x3123
+#define WM8994_WRITE_SEQUENCER_292 0x3124
+#define WM8994_WRITE_SEQUENCER_293 0x3125
+#define WM8994_WRITE_SEQUENCER_294 0x3126
+#define WM8994_WRITE_SEQUENCER_295 0x3127
+#define WM8994_WRITE_SEQUENCER_296 0x3128
+#define WM8994_WRITE_SEQUENCER_297 0x3129
+#define WM8994_WRITE_SEQUENCER_298 0x312A
+#define WM8994_WRITE_SEQUENCER_299 0x312B
+#define WM8994_WRITE_SEQUENCER_300 0x312C
+#define WM8994_WRITE_SEQUENCER_301 0x312D
+#define WM8994_WRITE_SEQUENCER_302 0x312E
+#define WM8994_WRITE_SEQUENCER_303 0x312F
+#define WM8994_WRITE_SEQUENCER_304 0x3130
+#define WM8994_WRITE_SEQUENCER_305 0x3131
+#define WM8994_WRITE_SEQUENCER_306 0x3132
+#define WM8994_WRITE_SEQUENCER_307 0x3133
+#define WM8994_WRITE_SEQUENCER_308 0x3134
+#define WM8994_WRITE_SEQUENCER_309 0x3135
+#define WM8994_WRITE_SEQUENCER_310 0x3136
+#define WM8994_WRITE_SEQUENCER_311 0x3137
+#define WM8994_WRITE_SEQUENCER_312 0x3138
+#define WM8994_WRITE_SEQUENCER_313 0x3139
+#define WM8994_WRITE_SEQUENCER_314 0x313A
+#define WM8994_WRITE_SEQUENCER_315 0x313B
+#define WM8994_WRITE_SEQUENCER_316 0x313C
+#define WM8994_WRITE_SEQUENCER_317 0x313D
+#define WM8994_WRITE_SEQUENCER_318 0x313E
+#define WM8994_WRITE_SEQUENCER_319 0x313F
+#define WM8994_WRITE_SEQUENCER_320 0x3140
+#define WM8994_WRITE_SEQUENCER_321 0x3141
+#define WM8994_WRITE_SEQUENCER_322 0x3142
+#define WM8994_WRITE_SEQUENCER_323 0x3143
+#define WM8994_WRITE_SEQUENCER_324 0x3144
+#define WM8994_WRITE_SEQUENCER_325 0x3145
+#define WM8994_WRITE_SEQUENCER_326 0x3146
+#define WM8994_WRITE_SEQUENCER_327 0x3147
+#define WM8994_WRITE_SEQUENCER_328 0x3148
+#define WM8994_WRITE_SEQUENCER_329 0x3149
+#define WM8994_WRITE_SEQUENCER_330 0x314A
+#define WM8994_WRITE_SEQUENCER_331 0x314B
+#define WM8994_WRITE_SEQUENCER_332 0x314C
+#define WM8994_WRITE_SEQUENCER_333 0x314D
+#define WM8994_WRITE_SEQUENCER_334 0x314E
+#define WM8994_WRITE_SEQUENCER_335 0x314F
+#define WM8994_WRITE_SEQUENCER_336 0x3150
+#define WM8994_WRITE_SEQUENCER_337 0x3151
+#define WM8994_WRITE_SEQUENCER_338 0x3152
+#define WM8994_WRITE_SEQUENCER_339 0x3153
+#define WM8994_WRITE_SEQUENCER_340 0x3154
+#define WM8994_WRITE_SEQUENCER_341 0x3155
+#define WM8994_WRITE_SEQUENCER_342 0x3156
+#define WM8994_WRITE_SEQUENCER_343 0x3157
+#define WM8994_WRITE_SEQUENCER_344 0x3158
+#define WM8994_WRITE_SEQUENCER_345 0x3159
+#define WM8994_WRITE_SEQUENCER_346 0x315A
+#define WM8994_WRITE_SEQUENCER_347 0x315B
+#define WM8994_WRITE_SEQUENCER_348 0x315C
+#define WM8994_WRITE_SEQUENCER_349 0x315D
+#define WM8994_WRITE_SEQUENCER_350 0x315E
+#define WM8994_WRITE_SEQUENCER_351 0x315F
+#define WM8994_WRITE_SEQUENCER_352 0x3160
+#define WM8994_WRITE_SEQUENCER_353 0x3161
+#define WM8994_WRITE_SEQUENCER_354 0x3162
+#define WM8994_WRITE_SEQUENCER_355 0x3163
+#define WM8994_WRITE_SEQUENCER_356 0x3164
+#define WM8994_WRITE_SEQUENCER_357 0x3165
+#define WM8994_WRITE_SEQUENCER_358 0x3166
+#define WM8994_WRITE_SEQUENCER_359 0x3167
+#define WM8994_WRITE_SEQUENCER_360 0x3168
+#define WM8994_WRITE_SEQUENCER_361 0x3169
+#define WM8994_WRITE_SEQUENCER_362 0x316A
+#define WM8994_WRITE_SEQUENCER_363 0x316B
+#define WM8994_WRITE_SEQUENCER_364 0x316C
+#define WM8994_WRITE_SEQUENCER_365 0x316D
+#define WM8994_WRITE_SEQUENCER_366 0x316E
+#define WM8994_WRITE_SEQUENCER_367 0x316F
+#define WM8994_WRITE_SEQUENCER_368 0x3170
+#define WM8994_WRITE_SEQUENCER_369 0x3171
+#define WM8994_WRITE_SEQUENCER_370 0x3172
+#define WM8994_WRITE_SEQUENCER_371 0x3173
+#define WM8994_WRITE_SEQUENCER_372 0x3174
+#define WM8994_WRITE_SEQUENCER_373 0x3175
+#define WM8994_WRITE_SEQUENCER_374 0x3176
+#define WM8994_WRITE_SEQUENCER_375 0x3177
+#define WM8994_WRITE_SEQUENCER_376 0x3178
+#define WM8994_WRITE_SEQUENCER_377 0x3179
+#define WM8994_WRITE_SEQUENCER_378 0x317A
+#define WM8994_WRITE_SEQUENCER_379 0x317B
+#define WM8994_WRITE_SEQUENCER_380 0x317C
+#define WM8994_WRITE_SEQUENCER_381 0x317D
+#define WM8994_WRITE_SEQUENCER_382 0x317E
+#define WM8994_WRITE_SEQUENCER_383 0x317F
+#define WM8994_WRITE_SEQUENCER_384 0x3180
+#define WM8994_WRITE_SEQUENCER_385 0x3181
+#define WM8994_WRITE_SEQUENCER_386 0x3182
+#define WM8994_WRITE_SEQUENCER_387 0x3183
+#define WM8994_WRITE_SEQUENCER_388 0x3184
+#define WM8994_WRITE_SEQUENCER_389 0x3185
+#define WM8994_WRITE_SEQUENCER_390 0x3186
+#define WM8994_WRITE_SEQUENCER_391 0x3187
+#define WM8994_WRITE_SEQUENCER_392 0x3188
+#define WM8994_WRITE_SEQUENCER_393 0x3189
+#define WM8994_WRITE_SEQUENCER_394 0x318A
+#define WM8994_WRITE_SEQUENCER_395 0x318B
+#define WM8994_WRITE_SEQUENCER_396 0x318C
+#define WM8994_WRITE_SEQUENCER_397 0x318D
+#define WM8994_WRITE_SEQUENCER_398 0x318E
+#define WM8994_WRITE_SEQUENCER_399 0x318F
+#define WM8994_WRITE_SEQUENCER_400 0x3190
+#define WM8994_WRITE_SEQUENCER_401 0x3191
+#define WM8994_WRITE_SEQUENCER_402 0x3192
+#define WM8994_WRITE_SEQUENCER_403 0x3193
+#define WM8994_WRITE_SEQUENCER_404 0x3194
+#define WM8994_WRITE_SEQUENCER_405 0x3195
+#define WM8994_WRITE_SEQUENCER_406 0x3196
+#define WM8994_WRITE_SEQUENCER_407 0x3197
+#define WM8994_WRITE_SEQUENCER_408 0x3198
+#define WM8994_WRITE_SEQUENCER_409 0x3199
+#define WM8994_WRITE_SEQUENCER_410 0x319A
+#define WM8994_WRITE_SEQUENCER_411 0x319B
+#define WM8994_WRITE_SEQUENCER_412 0x319C
+#define WM8994_WRITE_SEQUENCER_413 0x319D
+#define WM8994_WRITE_SEQUENCER_414 0x319E
+#define WM8994_WRITE_SEQUENCER_415 0x319F
+#define WM8994_WRITE_SEQUENCER_416 0x31A0
+#define WM8994_WRITE_SEQUENCER_417 0x31A1
+#define WM8994_WRITE_SEQUENCER_418 0x31A2
+#define WM8994_WRITE_SEQUENCER_419 0x31A3
+#define WM8994_WRITE_SEQUENCER_420 0x31A4
+#define WM8994_WRITE_SEQUENCER_421 0x31A5
+#define WM8994_WRITE_SEQUENCER_422 0x31A6
+#define WM8994_WRITE_SEQUENCER_423 0x31A7
+#define WM8994_WRITE_SEQUENCER_424 0x31A8
+#define WM8994_WRITE_SEQUENCER_425 0x31A9
+#define WM8994_WRITE_SEQUENCER_426 0x31AA
+#define WM8994_WRITE_SEQUENCER_427 0x31AB
+#define WM8994_WRITE_SEQUENCER_428 0x31AC
+#define WM8994_WRITE_SEQUENCER_429 0x31AD
+#define WM8994_WRITE_SEQUENCER_430 0x31AE
+#define WM8994_WRITE_SEQUENCER_431 0x31AF
+#define WM8994_WRITE_SEQUENCER_432 0x31B0
+#define WM8994_WRITE_SEQUENCER_433 0x31B1
+#define WM8994_WRITE_SEQUENCER_434 0x31B2
+#define WM8994_WRITE_SEQUENCER_435 0x31B3
+#define WM8994_WRITE_SEQUENCER_436 0x31B4
+#define WM8994_WRITE_SEQUENCER_437 0x31B5
+#define WM8994_WRITE_SEQUENCER_438 0x31B6
+#define WM8994_WRITE_SEQUENCER_439 0x31B7
+#define WM8994_WRITE_SEQUENCER_440 0x31B8
+#define WM8994_WRITE_SEQUENCER_441 0x31B9
+#define WM8994_WRITE_SEQUENCER_442 0x31BA
+#define WM8994_WRITE_SEQUENCER_443 0x31BB
+#define WM8994_WRITE_SEQUENCER_444 0x31BC
+#define WM8994_WRITE_SEQUENCER_445 0x31BD
+#define WM8994_WRITE_SEQUENCER_446 0x31BE
+#define WM8994_WRITE_SEQUENCER_447 0x31BF
+#define WM8994_WRITE_SEQUENCER_448 0x31C0
+#define WM8994_WRITE_SEQUENCER_449 0x31C1
+#define WM8994_WRITE_SEQUENCER_450 0x31C2
+#define WM8994_WRITE_SEQUENCER_451 0x31C3
+#define WM8994_WRITE_SEQUENCER_452 0x31C4
+#define WM8994_WRITE_SEQUENCER_453 0x31C5
+#define WM8994_WRITE_SEQUENCER_454 0x31C6
+#define WM8994_WRITE_SEQUENCER_455 0x31C7
+#define WM8994_WRITE_SEQUENCER_456 0x31C8
+#define WM8994_WRITE_SEQUENCER_457 0x31C9
+#define WM8994_WRITE_SEQUENCER_458 0x31CA
+#define WM8994_WRITE_SEQUENCER_459 0x31CB
+#define WM8994_WRITE_SEQUENCER_460 0x31CC
+#define WM8994_WRITE_SEQUENCER_461 0x31CD
+#define WM8994_WRITE_SEQUENCER_462 0x31CE
+#define WM8994_WRITE_SEQUENCER_463 0x31CF
+#define WM8994_WRITE_SEQUENCER_464 0x31D0
+#define WM8994_WRITE_SEQUENCER_465 0x31D1
+#define WM8994_WRITE_SEQUENCER_466 0x31D2
+#define WM8994_WRITE_SEQUENCER_467 0x31D3
+#define WM8994_WRITE_SEQUENCER_468 0x31D4
+#define WM8994_WRITE_SEQUENCER_469 0x31D5
+#define WM8994_WRITE_SEQUENCER_470 0x31D6
+#define WM8994_WRITE_SEQUENCER_471 0x31D7
+#define WM8994_WRITE_SEQUENCER_472 0x31D8
+#define WM8994_WRITE_SEQUENCER_473 0x31D9
+#define WM8994_WRITE_SEQUENCER_474 0x31DA
+#define WM8994_WRITE_SEQUENCER_475 0x31DB
+#define WM8994_WRITE_SEQUENCER_476 0x31DC
+#define WM8994_WRITE_SEQUENCER_477 0x31DD
+#define WM8994_WRITE_SEQUENCER_478 0x31DE
+#define WM8994_WRITE_SEQUENCER_479 0x31DF
+#define WM8994_WRITE_SEQUENCER_480 0x31E0
+#define WM8994_WRITE_SEQUENCER_481 0x31E1
+#define WM8994_WRITE_SEQUENCER_482 0x31E2
+#define WM8994_WRITE_SEQUENCER_483 0x31E3
+#define WM8994_WRITE_SEQUENCER_484 0x31E4
+#define WM8994_WRITE_SEQUENCER_485 0x31E5
+#define WM8994_WRITE_SEQUENCER_486 0x31E6
+#define WM8994_WRITE_SEQUENCER_487 0x31E7
+#define WM8994_WRITE_SEQUENCER_488 0x31E8
+#define WM8994_WRITE_SEQUENCER_489 0x31E9
+#define WM8994_WRITE_SEQUENCER_490 0x31EA
+#define WM8994_WRITE_SEQUENCER_491 0x31EB
+#define WM8994_WRITE_SEQUENCER_492 0x31EC
+#define WM8994_WRITE_SEQUENCER_493 0x31ED
+#define WM8994_WRITE_SEQUENCER_494 0x31EE
+#define WM8994_WRITE_SEQUENCER_495 0x31EF
+#define WM8994_WRITE_SEQUENCER_496 0x31F0
+#define WM8994_WRITE_SEQUENCER_497 0x31F1
+#define WM8994_WRITE_SEQUENCER_498 0x31F2
+#define WM8994_WRITE_SEQUENCER_499 0x31F3
+#define WM8994_WRITE_SEQUENCER_500 0x31F4
+#define WM8994_WRITE_SEQUENCER_501 0x31F5
+#define WM8994_WRITE_SEQUENCER_502 0x31F6
+#define WM8994_WRITE_SEQUENCER_503 0x31F7
+#define WM8994_WRITE_SEQUENCER_504 0x31F8
+#define WM8994_WRITE_SEQUENCER_505 0x31F9
+#define WM8994_WRITE_SEQUENCER_506 0x31FA
+#define WM8994_WRITE_SEQUENCER_507 0x31FB
+#define WM8994_WRITE_SEQUENCER_508 0x31FC
+#define WM8994_WRITE_SEQUENCER_509 0x31FD
+#define WM8994_WRITE_SEQUENCER_510 0x31FE
+#define WM8994_WRITE_SEQUENCER_511 0x31FF
+
+#define WM8994_REGISTER_COUNT 736
+#define WM8994_MAX_REGISTER 0x31FF
+#define WM8994_MAX_CACHED_REGISTER 0x749
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R0 (0x00) - Software Reset
+ */
+#define WM8994_SW_RESET_MASK 0xFFFF /* SW_RESET - [15:0] */
+#define WM8994_SW_RESET_SHIFT 0 /* SW_RESET - [15:0] */
+#define WM8994_SW_RESET_WIDTH 16 /* SW_RESET - [15:0] */
+
+/*
+ * R1 (0x01) - Power Management (1)
+ */
+#define WM8994_SPKOUTR_ENA 0x2000 /* SPKOUTR_ENA */
+#define WM8994_SPKOUTR_ENA_MASK 0x2000 /* SPKOUTR_ENA */
+#define WM8994_SPKOUTR_ENA_SHIFT 13 /* SPKOUTR_ENA */
+#define WM8994_SPKOUTR_ENA_WIDTH 1 /* SPKOUTR_ENA */
+#define WM8994_SPKOUTL_ENA 0x1000 /* SPKOUTL_ENA */
+#define WM8994_SPKOUTL_ENA_MASK 0x1000 /* SPKOUTL_ENA */
+#define WM8994_SPKOUTL_ENA_SHIFT 12 /* SPKOUTL_ENA */
+#define WM8994_SPKOUTL_ENA_WIDTH 1 /* SPKOUTL_ENA */
+#define WM8994_HPOUT2_ENA 0x0800 /* HPOUT2_ENA */
+#define WM8994_HPOUT2_ENA_MASK 0x0800 /* HPOUT2_ENA */
+#define WM8994_HPOUT2_ENA_SHIFT 11 /* HPOUT2_ENA */
+#define WM8994_HPOUT2_ENA_WIDTH 1 /* HPOUT2_ENA */
+#define WM8994_HPOUT1L_ENA 0x0200 /* HPOUT1L_ENA */
+#define WM8994_HPOUT1L_ENA_MASK 0x0200 /* HPOUT1L_ENA */
+#define WM8994_HPOUT1L_ENA_SHIFT 9 /* HPOUT1L_ENA */
+#define WM8994_HPOUT1L_ENA_WIDTH 1 /* HPOUT1L_ENA */
+#define WM8994_HPOUT1R_ENA 0x0100 /* HPOUT1R_ENA */
+#define WM8994_HPOUT1R_ENA_MASK 0x0100 /* HPOUT1R_ENA */
+#define WM8994_HPOUT1R_ENA_SHIFT 8 /* HPOUT1R_ENA */
+#define WM8994_HPOUT1R_ENA_WIDTH 1 /* HPOUT1R_ENA */
+#define WM8994_MICB2_ENA 0x0020 /* MICB2_ENA */
+#define WM8994_MICB2_ENA_MASK 0x0020 /* MICB2_ENA */
+#define WM8994_MICB2_ENA_SHIFT 5 /* MICB2_ENA */
+#define WM8994_MICB2_ENA_WIDTH 1 /* MICB2_ENA */
+#define WM8994_MICB1_ENA 0x0010 /* MICB1_ENA */
+#define WM8994_MICB1_ENA_MASK 0x0010 /* MICB1_ENA */
+#define WM8994_MICB1_ENA_SHIFT 4 /* MICB1_ENA */
+#define WM8994_MICB1_ENA_WIDTH 1 /* MICB1_ENA */
+#define WM8994_VMID_SEL_MASK 0x0006 /* VMID_SEL - [2:1] */
+#define WM8994_VMID_SEL_SHIFT 1 /* VMID_SEL - [2:1] */
+#define WM8994_VMID_SEL_WIDTH 2 /* VMID_SEL - [2:1] */
+#define WM8994_BIAS_ENA 0x0001 /* BIAS_ENA */
+#define WM8994_BIAS_ENA_MASK 0x0001 /* BIAS_ENA */
+#define WM8994_BIAS_ENA_SHIFT 0 /* BIAS_ENA */
+#define WM8994_BIAS_ENA_WIDTH 1 /* BIAS_ENA */
+
+/*
+ * R2 (0x02) - Power Management (2)
+ */
+#define WM8994_TSHUT_ENA 0x4000 /* TSHUT_ENA */
+#define WM8994_TSHUT_ENA_MASK 0x4000 /* TSHUT_ENA */
+#define WM8994_TSHUT_ENA_SHIFT 14 /* TSHUT_ENA */
+#define WM8994_TSHUT_ENA_WIDTH 1 /* TSHUT_ENA */
+#define WM8994_TSHUT_OPDIS 0x2000 /* TSHUT_OPDIS */
+#define WM8994_TSHUT_OPDIS_MASK 0x2000 /* TSHUT_OPDIS */
+#define WM8994_TSHUT_OPDIS_SHIFT 13 /* TSHUT_OPDIS */
+#define WM8994_TSHUT_OPDIS_WIDTH 1 /* TSHUT_OPDIS */
+#define WM8994_OPCLK_ENA 0x0800 /* OPCLK_ENA */
+#define WM8994_OPCLK_ENA_MASK 0x0800 /* OPCLK_ENA */
+#define WM8994_OPCLK_ENA_SHIFT 11 /* OPCLK_ENA */
+#define WM8994_OPCLK_ENA_WIDTH 1 /* OPCLK_ENA */
+#define WM8994_MIXINL_ENA 0x0200 /* MIXINL_ENA */
+#define WM8994_MIXINL_ENA_MASK 0x0200 /* MIXINL_ENA */
+#define WM8994_MIXINL_ENA_SHIFT 9 /* MIXINL_ENA */
+#define WM8994_MIXINL_ENA_WIDTH 1 /* MIXINL_ENA */
+#define WM8994_MIXINR_ENA 0x0100 /* MIXINR_ENA */
+#define WM8994_MIXINR_ENA_MASK 0x0100 /* MIXINR_ENA */
+#define WM8994_MIXINR_ENA_SHIFT 8 /* MIXINR_ENA */
+#define WM8994_MIXINR_ENA_WIDTH 1 /* MIXINR_ENA */
+#define WM8994_IN2L_ENA 0x0080 /* IN2L_ENA */
+#define WM8994_IN2L_ENA_MASK 0x0080 /* IN2L_ENA */
+#define WM8994_IN2L_ENA_SHIFT 7 /* IN2L_ENA */
+#define WM8994_IN2L_ENA_WIDTH 1 /* IN2L_ENA */
+#define WM8994_IN1L_ENA 0x0040 /* IN1L_ENA */
+#define WM8994_IN1L_ENA_MASK 0x0040 /* IN1L_ENA */
+#define WM8994_IN1L_ENA_SHIFT 6 /* IN1L_ENA */
+#define WM8994_IN1L_ENA_WIDTH 1 /* IN1L_ENA */
+#define WM8994_IN2R_ENA 0x0020 /* IN2R_ENA */
+#define WM8994_IN2R_ENA_MASK 0x0020 /* IN2R_ENA */
+#define WM8994_IN2R_ENA_SHIFT 5 /* IN2R_ENA */
+#define WM8994_IN2R_ENA_WIDTH 1 /* IN2R_ENA */
+#define WM8994_IN1R_ENA 0x0010 /* IN1R_ENA */
+#define WM8994_IN1R_ENA_MASK 0x0010 /* IN1R_ENA */
+#define WM8994_IN1R_ENA_SHIFT 4 /* IN1R_ENA */
+#define WM8994_IN1R_ENA_WIDTH 1 /* IN1R_ENA */
+
+/*
+ * R3 (0x03) - Power Management (3)
+ */
+#define WM8994_LINEOUT1N_ENA 0x2000 /* LINEOUT1N_ENA */
+#define WM8994_LINEOUT1N_ENA_MASK 0x2000 /* LINEOUT1N_ENA */
+#define WM8994_LINEOUT1N_ENA_SHIFT 13 /* LINEOUT1N_ENA */
+#define WM8994_LINEOUT1N_ENA_WIDTH 1 /* LINEOUT1N_ENA */
+#define WM8994_LINEOUT1P_ENA 0x1000 /* LINEOUT1P_ENA */
+#define WM8994_LINEOUT1P_ENA_MASK 0x1000 /* LINEOUT1P_ENA */
+#define WM8994_LINEOUT1P_ENA_SHIFT 12 /* LINEOUT1P_ENA */
+#define WM8994_LINEOUT1P_ENA_WIDTH 1 /* LINEOUT1P_ENA */
+#define WM8994_LINEOUT2N_ENA 0x0800 /* LINEOUT2N_ENA */
+#define WM8994_LINEOUT2N_ENA_MASK 0x0800 /* LINEOUT2N_ENA */
+#define WM8994_LINEOUT2N_ENA_SHIFT 11 /* LINEOUT2N_ENA */
+#define WM8994_LINEOUT2N_ENA_WIDTH 1 /* LINEOUT2N_ENA */
+#define WM8994_LINEOUT2P_ENA 0x0400 /* LINEOUT2P_ENA */
+#define WM8994_LINEOUT2P_ENA_MASK 0x0400 /* LINEOUT2P_ENA */
+#define WM8994_LINEOUT2P_ENA_SHIFT 10 /* LINEOUT2P_ENA */
+#define WM8994_LINEOUT2P_ENA_WIDTH 1 /* LINEOUT2P_ENA */
+#define WM8994_SPKRVOL_ENA 0x0200 /* SPKRVOL_ENA */
+#define WM8994_SPKRVOL_ENA_MASK 0x0200 /* SPKRVOL_ENA */
+#define WM8994_SPKRVOL_ENA_SHIFT 9 /* SPKRVOL_ENA */
+#define WM8994_SPKRVOL_ENA_WIDTH 1 /* SPKRVOL_ENA */
+#define WM8994_SPKLVOL_ENA 0x0100 /* SPKLVOL_ENA */
+#define WM8994_SPKLVOL_ENA_MASK 0x0100 /* SPKLVOL_ENA */
+#define WM8994_SPKLVOL_ENA_SHIFT 8 /* SPKLVOL_ENA */
+#define WM8994_SPKLVOL_ENA_WIDTH 1 /* SPKLVOL_ENA */
+#define WM8994_MIXOUTLVOL_ENA 0x0080 /* MIXOUTLVOL_ENA */
+#define WM8994_MIXOUTLVOL_ENA_MASK 0x0080 /* MIXOUTLVOL_ENA */
+#define WM8994_MIXOUTLVOL_ENA_SHIFT 7 /* MIXOUTLVOL_ENA */
+#define WM8994_MIXOUTLVOL_ENA_WIDTH 1 /* MIXOUTLVOL_ENA */
+#define WM8994_MIXOUTRVOL_ENA 0x0040 /* MIXOUTRVOL_ENA */
+#define WM8994_MIXOUTRVOL_ENA_MASK 0x0040 /* MIXOUTRVOL_ENA */
+#define WM8994_MIXOUTRVOL_ENA_SHIFT 6 /* MIXOUTRVOL_ENA */
+#define WM8994_MIXOUTRVOL_ENA_WIDTH 1 /* MIXOUTRVOL_ENA */
+#define WM8994_MIXOUTL_ENA 0x0020 /* MIXOUTL_ENA */
+#define WM8994_MIXOUTL_ENA_MASK 0x0020 /* MIXOUTL_ENA */
+#define WM8994_MIXOUTL_ENA_SHIFT 5 /* MIXOUTL_ENA */
+#define WM8994_MIXOUTL_ENA_WIDTH 1 /* MIXOUTL_ENA */
+#define WM8994_MIXOUTR_ENA 0x0010 /* MIXOUTR_ENA */
+#define WM8994_MIXOUTR_ENA_MASK 0x0010 /* MIXOUTR_ENA */
+#define WM8994_MIXOUTR_ENA_SHIFT 4 /* MIXOUTR_ENA */
+#define WM8994_MIXOUTR_ENA_WIDTH 1 /* MIXOUTR_ENA */
+
+/*
+ * R4 (0x04) - Power Management (4)
+ */
+#define WM8994_AIF2ADCL_ENA 0x2000 /* AIF2ADCL_ENA */
+#define WM8994_AIF2ADCL_ENA_MASK 0x2000 /* AIF2ADCL_ENA */
+#define WM8994_AIF2ADCL_ENA_SHIFT 13 /* AIF2ADCL_ENA */
+#define WM8994_AIF2ADCL_ENA_WIDTH 1 /* AIF2ADCL_ENA */
+#define WM8994_AIF2ADCR_ENA 0x1000 /* AIF2ADCR_ENA */
+#define WM8994_AIF2ADCR_ENA_MASK 0x1000 /* AIF2ADCR_ENA */
+#define WM8994_AIF2ADCR_ENA_SHIFT 12 /* AIF2ADCR_ENA */
+#define WM8994_AIF2ADCR_ENA_WIDTH 1 /* AIF2ADCR_ENA */
+#define WM8994_AIF1ADC2L_ENA 0x0800 /* AIF1ADC2L_ENA */
+#define WM8994_AIF1ADC2L_ENA_MASK 0x0800 /* AIF1ADC2L_ENA */
+#define WM8994_AIF1ADC2L_ENA_SHIFT 11 /* AIF1ADC2L_ENA */
+#define WM8994_AIF1ADC2L_ENA_WIDTH 1 /* AIF1ADC2L_ENA */
+#define WM8994_AIF1ADC2R_ENA 0x0400 /* AIF1ADC2R_ENA */
+#define WM8994_AIF1ADC2R_ENA_MASK 0x0400 /* AIF1ADC2R_ENA */
+#define WM8994_AIF1ADC2R_ENA_SHIFT 10 /* AIF1ADC2R_ENA */
+#define WM8994_AIF1ADC2R_ENA_WIDTH 1 /* AIF1ADC2R_ENA */
+#define WM8994_AIF1ADC1L_ENA 0x0200 /* AIF1ADC1L_ENA */
+#define WM8994_AIF1ADC1L_ENA_MASK 0x0200 /* AIF1ADC1L_ENA */
+#define WM8994_AIF1ADC1L_ENA_SHIFT 9 /* AIF1ADC1L_ENA */
+#define WM8994_AIF1ADC1L_ENA_WIDTH 1 /* AIF1ADC1L_ENA */
+#define WM8994_AIF1ADC1R_ENA 0x0100 /* AIF1ADC1R_ENA */
+#define WM8994_AIF1ADC1R_ENA_MASK 0x0100 /* AIF1ADC1R_ENA */
+#define WM8994_AIF1ADC1R_ENA_SHIFT 8 /* AIF1ADC1R_ENA */
+#define WM8994_AIF1ADC1R_ENA_WIDTH 1 /* AIF1ADC1R_ENA */
+#define WM8994_DMIC2L_ENA 0x0020 /* DMIC2L_ENA */
+#define WM8994_DMIC2L_ENA_MASK 0x0020 /* DMIC2L_ENA */
+#define WM8994_DMIC2L_ENA_SHIFT 5 /* DMIC2L_ENA */
+#define WM8994_DMIC2L_ENA_WIDTH 1 /* DMIC2L_ENA */
+#define WM8994_DMIC2R_ENA 0x0010 /* DMIC2R_ENA */
+#define WM8994_DMIC2R_ENA_MASK 0x0010 /* DMIC2R_ENA */
+#define WM8994_DMIC2R_ENA_SHIFT 4 /* DMIC2R_ENA */
+#define WM8994_DMIC2R_ENA_WIDTH 1 /* DMIC2R_ENA */
+#define WM8994_DMIC1L_ENA 0x0008 /* DMIC1L_ENA */
+#define WM8994_DMIC1L_ENA_MASK 0x0008 /* DMIC1L_ENA */
+#define WM8994_DMIC1L_ENA_SHIFT 3 /* DMIC1L_ENA */
+#define WM8994_DMIC1L_ENA_WIDTH 1 /* DMIC1L_ENA */
+#define WM8994_DMIC1R_ENA 0x0004 /* DMIC1R_ENA */
+#define WM8994_DMIC1R_ENA_MASK 0x0004 /* DMIC1R_ENA */
+#define WM8994_DMIC1R_ENA_SHIFT 2 /* DMIC1R_ENA */
+#define WM8994_DMIC1R_ENA_WIDTH 1 /* DMIC1R_ENA */
+#define WM8994_ADCL_ENA 0x0002 /* ADCL_ENA */
+#define WM8994_ADCL_ENA_MASK 0x0002 /* ADCL_ENA */
+#define WM8994_ADCL_ENA_SHIFT 1 /* ADCL_ENA */
+#define WM8994_ADCL_ENA_WIDTH 1 /* ADCL_ENA */
+#define WM8994_ADCR_ENA 0x0001 /* ADCR_ENA */
+#define WM8994_ADCR_ENA_MASK 0x0001 /* ADCR_ENA */
+#define WM8994_ADCR_ENA_SHIFT 0 /* ADCR_ENA */
+#define WM8994_ADCR_ENA_WIDTH 1 /* ADCR_ENA */
+
+/*
+ * R5 (0x05) - Power Management (5)
+ */
+#define WM8994_AIF2DACL_ENA 0x2000 /* AIF2DACL_ENA */
+#define WM8994_AIF2DACL_ENA_MASK 0x2000 /* AIF2DACL_ENA */
+#define WM8994_AIF2DACL_ENA_SHIFT 13 /* AIF2DACL_ENA */
+#define WM8994_AIF2DACL_ENA_WIDTH 1 /* AIF2DACL_ENA */
+#define WM8994_AIF2DACR_ENA 0x1000 /* AIF2DACR_ENA */
+#define WM8994_AIF2DACR_ENA_MASK 0x1000 /* AIF2DACR_ENA */
+#define WM8994_AIF2DACR_ENA_SHIFT 12 /* AIF2DACR_ENA */
+#define WM8994_AIF2DACR_ENA_WIDTH 1 /* AIF2DACR_ENA */
+#define WM8994_AIF1DAC2L_ENA 0x0800 /* AIF1DAC2L_ENA */
+#define WM8994_AIF1DAC2L_ENA_MASK 0x0800 /* AIF1DAC2L_ENA */
+#define WM8994_AIF1DAC2L_ENA_SHIFT 11 /* AIF1DAC2L_ENA */
+#define WM8994_AIF1DAC2L_ENA_WIDTH 1 /* AIF1DAC2L_ENA */
+#define WM8994_AIF1DAC2R_ENA 0x0400 /* AIF1DAC2R_ENA */
+#define WM8994_AIF1DAC2R_ENA_MASK 0x0400 /* AIF1DAC2R_ENA */
+#define WM8994_AIF1DAC2R_ENA_SHIFT 10 /* AIF1DAC2R_ENA */
+#define WM8994_AIF1DAC2R_ENA_WIDTH 1 /* AIF1DAC2R_ENA */
+#define WM8994_AIF1DAC1L_ENA 0x0200 /* AIF1DAC1L_ENA */
+#define WM8994_AIF1DAC1L_ENA_MASK 0x0200 /* AIF1DAC1L_ENA */
+#define WM8994_AIF1DAC1L_ENA_SHIFT 9 /* AIF1DAC1L_ENA */
+#define WM8994_AIF1DAC1L_ENA_WIDTH 1 /* AIF1DAC1L_ENA */
+#define WM8994_AIF1DAC1R_ENA 0x0100 /* AIF1DAC1R_ENA */
+#define WM8994_AIF1DAC1R_ENA_MASK 0x0100 /* AIF1DAC1R_ENA */
+#define WM8994_AIF1DAC1R_ENA_SHIFT 8 /* AIF1DAC1R_ENA */
+#define WM8994_AIF1DAC1R_ENA_WIDTH 1 /* AIF1DAC1R_ENA */
+#define WM8994_DAC2L_ENA 0x0008 /* DAC2L_ENA */
+#define WM8994_DAC2L_ENA_MASK 0x0008 /* DAC2L_ENA */
+#define WM8994_DAC2L_ENA_SHIFT 3 /* DAC2L_ENA */
+#define WM8994_DAC2L_ENA_WIDTH 1 /* DAC2L_ENA */
+#define WM8994_DAC2R_ENA 0x0004 /* DAC2R_ENA */
+#define WM8994_DAC2R_ENA_MASK 0x0004 /* DAC2R_ENA */
+#define WM8994_DAC2R_ENA_SHIFT 2 /* DAC2R_ENA */
+#define WM8994_DAC2R_ENA_WIDTH 1 /* DAC2R_ENA */
+#define WM8994_DAC1L_ENA 0x0002 /* DAC1L_ENA */
+#define WM8994_DAC1L_ENA_MASK 0x0002 /* DAC1L_ENA */
+#define WM8994_DAC1L_ENA_SHIFT 1 /* DAC1L_ENA */
+#define WM8994_DAC1L_ENA_WIDTH 1 /* DAC1L_ENA */
+#define WM8994_DAC1R_ENA 0x0001 /* DAC1R_ENA */
+#define WM8994_DAC1R_ENA_MASK 0x0001 /* DAC1R_ENA */
+#define WM8994_DAC1R_ENA_SHIFT 0 /* DAC1R_ENA */
+#define WM8994_DAC1R_ENA_WIDTH 1 /* DAC1R_ENA */
+
+/*
+ * R6 (0x06) - Power Management (6)
+ */
+#define WM8958_AIF3ADC_SRC_MASK 0x0600 /* AIF3ADC_SRC - [10:9] */
+#define WM8958_AIF3ADC_SRC_SHIFT 9 /* AIF3ADC_SRC - [10:9] */
+#define WM8958_AIF3ADC_SRC_WIDTH 2 /* AIF3ADC_SRC - [10:9] */
+#define WM8958_AIF2DAC_SRC_MASK 0x0180 /* AIF2DAC_SRC - [8:7] */
+#define WM8958_AIF2DAC_SRC_SHIFT 7 /* AIF2DAC_SRC - [8:7] */
+#define WM8958_AIF2DAC_SRC_WIDTH 2 /* AIF2DAC_SRC - [8:7] */
+#define WM8994_AIF3_TRI 0x0020 /* AIF3_TRI */
+#define WM8994_AIF3_TRI_MASK 0x0020 /* AIF3_TRI */
+#define WM8994_AIF3_TRI_SHIFT 5 /* AIF3_TRI */
+#define WM8994_AIF3_TRI_WIDTH 1 /* AIF3_TRI */
+#define WM8994_AIF3_ADCDAT_SRC_MASK 0x0018 /* AIF3_ADCDAT_SRC - [4:3] */
+#define WM8994_AIF3_ADCDAT_SRC_SHIFT 3 /* AIF3_ADCDAT_SRC - [4:3] */
+#define WM8994_AIF3_ADCDAT_SRC_WIDTH 2 /* AIF3_ADCDAT_SRC - [4:3] */
+#define WM8994_AIF2_ADCDAT_SRC 0x0004 /* AIF2_ADCDAT_SRC */
+#define WM8994_AIF2_ADCDAT_SRC_MASK 0x0004 /* AIF2_ADCDAT_SRC */
+#define WM8994_AIF2_ADCDAT_SRC_SHIFT 2 /* AIF2_ADCDAT_SRC */
+#define WM8994_AIF2_ADCDAT_SRC_WIDTH 1 /* AIF2_ADCDAT_SRC */
+#define WM8994_AIF2_DACDAT_SRC 0x0002 /* AIF2_DACDAT_SRC */
+#define WM8994_AIF2_DACDAT_SRC_MASK 0x0002 /* AIF2_DACDAT_SRC */
+#define WM8994_AIF2_DACDAT_SRC_SHIFT 1 /* AIF2_DACDAT_SRC */
+#define WM8994_AIF2_DACDAT_SRC_WIDTH 1 /* AIF2_DACDAT_SRC */
+#define WM8994_AIF1_DACDAT_SRC 0x0001 /* AIF1_DACDAT_SRC */
+#define WM8994_AIF1_DACDAT_SRC_MASK 0x0001 /* AIF1_DACDAT_SRC */
+#define WM8994_AIF1_DACDAT_SRC_SHIFT 0 /* AIF1_DACDAT_SRC */
+#define WM8994_AIF1_DACDAT_SRC_WIDTH 1 /* AIF1_DACDAT_SRC */
+
+/*
+ * R21 (0x15) - Input Mixer (1)
+ */
+#define WM8994_IN1RP_MIXINR_BOOST 0x0100 /* IN1RP_MIXINR_BOOST */
+#define WM8994_IN1RP_MIXINR_BOOST_MASK 0x0100 /* IN1RP_MIXINR_BOOST */
+#define WM8994_IN1RP_MIXINR_BOOST_SHIFT 8 /* IN1RP_MIXINR_BOOST */
+#define WM8994_IN1RP_MIXINR_BOOST_WIDTH 1 /* IN1RP_MIXINR_BOOST */
+#define WM8994_IN1LP_MIXINL_BOOST 0x0080 /* IN1LP_MIXINL_BOOST */
+#define WM8994_IN1LP_MIXINL_BOOST_MASK 0x0080 /* IN1LP_MIXINL_BOOST */
+#define WM8994_IN1LP_MIXINL_BOOST_SHIFT 7 /* IN1LP_MIXINL_BOOST */
+#define WM8994_IN1LP_MIXINL_BOOST_WIDTH 1 /* IN1LP_MIXINL_BOOST */
+#define WM8994_INPUTS_CLAMP 0x0040 /* INPUTS_CLAMP */
+#define WM8994_INPUTS_CLAMP_MASK 0x0040 /* INPUTS_CLAMP */
+#define WM8994_INPUTS_CLAMP_SHIFT 6 /* INPUTS_CLAMP */
+#define WM8994_INPUTS_CLAMP_WIDTH 1 /* INPUTS_CLAMP */
+
+/*
+ * R24 (0x18) - Left Line Input 1&2 Volume
+ */
+#define WM8994_IN1_VU 0x0100 /* IN1_VU */
+#define WM8994_IN1_VU_MASK 0x0100 /* IN1_VU */
+#define WM8994_IN1_VU_SHIFT 8 /* IN1_VU */
+#define WM8994_IN1_VU_WIDTH 1 /* IN1_VU */
+#define WM8994_IN1L_MUTE 0x0080 /* IN1L_MUTE */
+#define WM8994_IN1L_MUTE_MASK 0x0080 /* IN1L_MUTE */
+#define WM8994_IN1L_MUTE_SHIFT 7 /* IN1L_MUTE */
+#define WM8994_IN1L_MUTE_WIDTH 1 /* IN1L_MUTE */
+#define WM8994_IN1L_ZC 0x0040 /* IN1L_ZC */
+#define WM8994_IN1L_ZC_MASK 0x0040 /* IN1L_ZC */
+#define WM8994_IN1L_ZC_SHIFT 6 /* IN1L_ZC */
+#define WM8994_IN1L_ZC_WIDTH 1 /* IN1L_ZC */
+#define WM8994_IN1L_VOL_MASK 0x001F /* IN1L_VOL - [4:0] */
+#define WM8994_IN1L_VOL_SHIFT 0 /* IN1L_VOL - [4:0] */
+#define WM8994_IN1L_VOL_WIDTH 5 /* IN1L_VOL - [4:0] */
+
+/*
+ * R25 (0x19) - Left Line Input 3&4 Volume
+ */
+#define WM8994_IN2_VU 0x0100 /* IN2_VU */
+#define WM8994_IN2_VU_MASK 0x0100 /* IN2_VU */
+#define WM8994_IN2_VU_SHIFT 8 /* IN2_VU */
+#define WM8994_IN2_VU_WIDTH 1 /* IN2_VU */
+#define WM8994_IN2L_MUTE 0x0080 /* IN2L_MUTE */
+#define WM8994_IN2L_MUTE_MASK 0x0080 /* IN2L_MUTE */
+#define WM8994_IN2L_MUTE_SHIFT 7 /* IN2L_MUTE */
+#define WM8994_IN2L_MUTE_WIDTH 1 /* IN2L_MUTE */
+#define WM8994_IN2L_ZC 0x0040 /* IN2L_ZC */
+#define WM8994_IN2L_ZC_MASK 0x0040 /* IN2L_ZC */
+#define WM8994_IN2L_ZC_SHIFT 6 /* IN2L_ZC */
+#define WM8994_IN2L_ZC_WIDTH 1 /* IN2L_ZC */
+#define WM8994_IN2L_VOL_MASK 0x001F /* IN2L_VOL - [4:0] */
+#define WM8994_IN2L_VOL_SHIFT 0 /* IN2L_VOL - [4:0] */
+#define WM8994_IN2L_VOL_WIDTH 5 /* IN2L_VOL - [4:0] */
+
+/*
+ * R26 (0x1A) - Right Line Input 1&2 Volume
+ */
+#define WM8994_IN1_VU 0x0100 /* IN1_VU */
+#define WM8994_IN1_VU_MASK 0x0100 /* IN1_VU */
+#define WM8994_IN1_VU_SHIFT 8 /* IN1_VU */
+#define WM8994_IN1_VU_WIDTH 1 /* IN1_VU */
+#define WM8994_IN1R_MUTE 0x0080 /* IN1R_MUTE */
+#define WM8994_IN1R_MUTE_MASK 0x0080 /* IN1R_MUTE */
+#define WM8994_IN1R_MUTE_SHIFT 7 /* IN1R_MUTE */
+#define WM8994_IN1R_MUTE_WIDTH 1 /* IN1R_MUTE */
+#define WM8994_IN1R_ZC 0x0040 /* IN1R_ZC */
+#define WM8994_IN1R_ZC_MASK 0x0040 /* IN1R_ZC */
+#define WM8994_IN1R_ZC_SHIFT 6 /* IN1R_ZC */
+#define WM8994_IN1R_ZC_WIDTH 1 /* IN1R_ZC */
+#define WM8994_IN1R_VOL_MASK 0x001F /* IN1R_VOL - [4:0] */
+#define WM8994_IN1R_VOL_SHIFT 0 /* IN1R_VOL - [4:0] */
+#define WM8994_IN1R_VOL_WIDTH 5 /* IN1R_VOL - [4:0] */
+
+/*
+ * R27 (0x1B) - Right Line Input 3&4 Volume
+ */
+#define WM8994_IN2_VU 0x0100 /* IN2_VU */
+#define WM8994_IN2_VU_MASK 0x0100 /* IN2_VU */
+#define WM8994_IN2_VU_SHIFT 8 /* IN2_VU */
+#define WM8994_IN2_VU_WIDTH 1 /* IN2_VU */
+#define WM8994_IN2R_MUTE 0x0080 /* IN2R_MUTE */
+#define WM8994_IN2R_MUTE_MASK 0x0080 /* IN2R_MUTE */
+#define WM8994_IN2R_MUTE_SHIFT 7 /* IN2R_MUTE */
+#define WM8994_IN2R_MUTE_WIDTH 1 /* IN2R_MUTE */
+#define WM8994_IN2R_ZC 0x0040 /* IN2R_ZC */
+#define WM8994_IN2R_ZC_MASK 0x0040 /* IN2R_ZC */
+#define WM8994_IN2R_ZC_SHIFT 6 /* IN2R_ZC */
+#define WM8994_IN2R_ZC_WIDTH 1 /* IN2R_ZC */
+#define WM8994_IN2R_VOL_MASK 0x001F /* IN2R_VOL - [4:0] */
+#define WM8994_IN2R_VOL_SHIFT 0 /* IN2R_VOL - [4:0] */
+#define WM8994_IN2R_VOL_WIDTH 5 /* IN2R_VOL - [4:0] */
+
+/*
+ * R28 (0x1C) - Left Output Volume
+ */
+#define WM8994_HPOUT1_VU 0x0100 /* HPOUT1_VU */
+#define WM8994_HPOUT1_VU_MASK 0x0100 /* HPOUT1_VU */
+#define WM8994_HPOUT1_VU_SHIFT 8 /* HPOUT1_VU */
+#define WM8994_HPOUT1_VU_WIDTH 1 /* HPOUT1_VU */
+#define WM8994_HPOUT1L_ZC 0x0080 /* HPOUT1L_ZC */
+#define WM8994_HPOUT1L_ZC_MASK 0x0080 /* HPOUT1L_ZC */
+#define WM8994_HPOUT1L_ZC_SHIFT 7 /* HPOUT1L_ZC */
+#define WM8994_HPOUT1L_ZC_WIDTH 1 /* HPOUT1L_ZC */
+#define WM8994_HPOUT1L_MUTE_N 0x0040 /* HPOUT1L_MUTE_N */
+#define WM8994_HPOUT1L_MUTE_N_MASK 0x0040 /* HPOUT1L_MUTE_N */
+#define WM8994_HPOUT1L_MUTE_N_SHIFT 6 /* HPOUT1L_MUTE_N */
+#define WM8994_HPOUT1L_MUTE_N_WIDTH 1 /* HPOUT1L_MUTE_N */
+#define WM8994_HPOUT1L_VOL_MASK 0x003F /* HPOUT1L_VOL - [5:0] */
+#define WM8994_HPOUT1L_VOL_SHIFT 0 /* HPOUT1L_VOL - [5:0] */
+#define WM8994_HPOUT1L_VOL_WIDTH 6 /* HPOUT1L_VOL - [5:0] */
+
+/*
+ * R29 (0x1D) - Right Output Volume
+ */
+#define WM8994_HPOUT1_VU 0x0100 /* HPOUT1_VU */
+#define WM8994_HPOUT1_VU_MASK 0x0100 /* HPOUT1_VU */
+#define WM8994_HPOUT1_VU_SHIFT 8 /* HPOUT1_VU */
+#define WM8994_HPOUT1_VU_WIDTH 1 /* HPOUT1_VU */
+#define WM8994_HPOUT1R_ZC 0x0080 /* HPOUT1R_ZC */
+#define WM8994_HPOUT1R_ZC_MASK 0x0080 /* HPOUT1R_ZC */
+#define WM8994_HPOUT1R_ZC_SHIFT 7 /* HPOUT1R_ZC */
+#define WM8994_HPOUT1R_ZC_WIDTH 1 /* HPOUT1R_ZC */
+#define WM8994_HPOUT1R_MUTE_N 0x0040 /* HPOUT1R_MUTE_N */
+#define WM8994_HPOUT1R_MUTE_N_MASK 0x0040 /* HPOUT1R_MUTE_N */
+#define WM8994_HPOUT1R_MUTE_N_SHIFT 6 /* HPOUT1R_MUTE_N */
+#define WM8994_HPOUT1R_MUTE_N_WIDTH 1 /* HPOUT1R_MUTE_N */
+#define WM8994_HPOUT1R_VOL_MASK 0x003F /* HPOUT1R_VOL - [5:0] */
+#define WM8994_HPOUT1R_VOL_SHIFT 0 /* HPOUT1R_VOL - [5:0] */
+#define WM8994_HPOUT1R_VOL_WIDTH 6 /* HPOUT1R_VOL - [5:0] */
+
+/*
+ * R30 (0x1E) - Line Outputs Volume
+ */
+#define WM8994_LINEOUT1N_MUTE 0x0040 /* LINEOUT1N_MUTE */
+#define WM8994_LINEOUT1N_MUTE_MASK 0x0040 /* LINEOUT1N_MUTE */
+#define WM8994_LINEOUT1N_MUTE_SHIFT 6 /* LINEOUT1N_MUTE */
+#define WM8994_LINEOUT1N_MUTE_WIDTH 1 /* LINEOUT1N_MUTE */
+#define WM8994_LINEOUT1P_MUTE 0x0020 /* LINEOUT1P_MUTE */
+#define WM8994_LINEOUT1P_MUTE_MASK 0x0020 /* LINEOUT1P_MUTE */
+#define WM8994_LINEOUT1P_MUTE_SHIFT 5 /* LINEOUT1P_MUTE */
+#define WM8994_LINEOUT1P_MUTE_WIDTH 1 /* LINEOUT1P_MUTE */
+#define WM8994_LINEOUT1_VOL 0x0010 /* LINEOUT1_VOL */
+#define WM8994_LINEOUT1_VOL_MASK 0x0010 /* LINEOUT1_VOL */
+#define WM8994_LINEOUT1_VOL_SHIFT 4 /* LINEOUT1_VOL */
+#define WM8994_LINEOUT1_VOL_WIDTH 1 /* LINEOUT1_VOL */
+#define WM8994_LINEOUT2N_MUTE 0x0004 /* LINEOUT2N_MUTE */
+#define WM8994_LINEOUT2N_MUTE_MASK 0x0004 /* LINEOUT2N_MUTE */
+#define WM8994_LINEOUT2N_MUTE_SHIFT 2 /* LINEOUT2N_MUTE */
+#define WM8994_LINEOUT2N_MUTE_WIDTH 1 /* LINEOUT2N_MUTE */
+#define WM8994_LINEOUT2P_MUTE 0x0002 /* LINEOUT2P_MUTE */
+#define WM8994_LINEOUT2P_MUTE_MASK 0x0002 /* LINEOUT2P_MUTE */
+#define WM8994_LINEOUT2P_MUTE_SHIFT 1 /* LINEOUT2P_MUTE */
+#define WM8994_LINEOUT2P_MUTE_WIDTH 1 /* LINEOUT2P_MUTE */
+#define WM8994_LINEOUT2_VOL 0x0001 /* LINEOUT2_VOL */
+#define WM8994_LINEOUT2_VOL_MASK 0x0001 /* LINEOUT2_VOL */
+#define WM8994_LINEOUT2_VOL_SHIFT 0 /* LINEOUT2_VOL */
+#define WM8994_LINEOUT2_VOL_WIDTH 1 /* LINEOUT2_VOL */
+
+/*
+ * R31 (0x1F) - HPOUT2 Volume
+ */
+#define WM8994_HPOUT2_MUTE 0x0020 /* HPOUT2_MUTE */
+#define WM8994_HPOUT2_MUTE_MASK 0x0020 /* HPOUT2_MUTE */
+#define WM8994_HPOUT2_MUTE_SHIFT 5 /* HPOUT2_MUTE */
+#define WM8994_HPOUT2_MUTE_WIDTH 1 /* HPOUT2_MUTE */
+#define WM8994_HPOUT2_VOL 0x0010 /* HPOUT2_VOL */
+#define WM8994_HPOUT2_VOL_MASK 0x0010 /* HPOUT2_VOL */
+#define WM8994_HPOUT2_VOL_SHIFT 4 /* HPOUT2_VOL */
+#define WM8994_HPOUT2_VOL_WIDTH 1 /* HPOUT2_VOL */
+
+/*
+ * R32 (0x20) - Left OPGA Volume
+ */
+#define WM8994_MIXOUT_VU 0x0100 /* MIXOUT_VU */
+#define WM8994_MIXOUT_VU_MASK 0x0100 /* MIXOUT_VU */
+#define WM8994_MIXOUT_VU_SHIFT 8 /* MIXOUT_VU */
+#define WM8994_MIXOUT_VU_WIDTH 1 /* MIXOUT_VU */
+#define WM8994_MIXOUTL_ZC 0x0080 /* MIXOUTL_ZC */
+#define WM8994_MIXOUTL_ZC_MASK 0x0080 /* MIXOUTL_ZC */
+#define WM8994_MIXOUTL_ZC_SHIFT 7 /* MIXOUTL_ZC */
+#define WM8994_MIXOUTL_ZC_WIDTH 1 /* MIXOUTL_ZC */
+#define WM8994_MIXOUTL_MUTE_N 0x0040 /* MIXOUTL_MUTE_N */
+#define WM8994_MIXOUTL_MUTE_N_MASK 0x0040 /* MIXOUTL_MUTE_N */
+#define WM8994_MIXOUTL_MUTE_N_SHIFT 6 /* MIXOUTL_MUTE_N */
+#define WM8994_MIXOUTL_MUTE_N_WIDTH 1 /* MIXOUTL_MUTE_N */
+#define WM8994_MIXOUTL_VOL_MASK 0x003F /* MIXOUTL_VOL - [5:0] */
+#define WM8994_MIXOUTL_VOL_SHIFT 0 /* MIXOUTL_VOL - [5:0] */
+#define WM8994_MIXOUTL_VOL_WIDTH 6 /* MIXOUTL_VOL - [5:0] */
+
+/*
+ * R33 (0x21) - Right OPGA Volume
+ */
+#define WM8994_MIXOUT_VU 0x0100 /* MIXOUT_VU */
+#define WM8994_MIXOUT_VU_MASK 0x0100 /* MIXOUT_VU */
+#define WM8994_MIXOUT_VU_SHIFT 8 /* MIXOUT_VU */
+#define WM8994_MIXOUT_VU_WIDTH 1 /* MIXOUT_VU */
+#define WM8994_MIXOUTR_ZC 0x0080 /* MIXOUTR_ZC */
+#define WM8994_MIXOUTR_ZC_MASK 0x0080 /* MIXOUTR_ZC */
+#define WM8994_MIXOUTR_ZC_SHIFT 7 /* MIXOUTR_ZC */
+#define WM8994_MIXOUTR_ZC_WIDTH 1 /* MIXOUTR_ZC */
+#define WM8994_MIXOUTR_MUTE_N 0x0040 /* MIXOUTR_MUTE_N */
+#define WM8994_MIXOUTR_MUTE_N_MASK 0x0040 /* MIXOUTR_MUTE_N */
+#define WM8994_MIXOUTR_MUTE_N_SHIFT 6 /* MIXOUTR_MUTE_N */
+#define WM8994_MIXOUTR_MUTE_N_WIDTH 1 /* MIXOUTR_MUTE_N */
+#define WM8994_MIXOUTR_VOL_MASK 0x003F /* MIXOUTR_VOL - [5:0] */
+#define WM8994_MIXOUTR_VOL_SHIFT 0 /* MIXOUTR_VOL - [5:0] */
+#define WM8994_MIXOUTR_VOL_WIDTH 6 /* MIXOUTR_VOL - [5:0] */
+
+/*
+ * R34 (0x22) - SPKMIXL Attenuation
+ */
+#define WM8994_DAC2L_SPKMIXL_VOL 0x0040 /* DAC2L_SPKMIXL_VOL */
+#define WM8994_DAC2L_SPKMIXL_VOL_MASK 0x0040 /* DAC2L_SPKMIXL_VOL */
+#define WM8994_DAC2L_SPKMIXL_VOL_SHIFT 6 /* DAC2L_SPKMIXL_VOL */
+#define WM8994_DAC2L_SPKMIXL_VOL_WIDTH 1 /* DAC2L_SPKMIXL_VOL */
+#define WM8994_MIXINL_SPKMIXL_VOL 0x0020 /* MIXINL_SPKMIXL_VOL */
+#define WM8994_MIXINL_SPKMIXL_VOL_MASK 0x0020 /* MIXINL_SPKMIXL_VOL */
+#define WM8994_MIXINL_SPKMIXL_VOL_SHIFT 5 /* MIXINL_SPKMIXL_VOL */
+#define WM8994_MIXINL_SPKMIXL_VOL_WIDTH 1 /* MIXINL_SPKMIXL_VOL */
+#define WM8994_IN1LP_SPKMIXL_VOL 0x0010 /* IN1LP_SPKMIXL_VOL */
+#define WM8994_IN1LP_SPKMIXL_VOL_MASK 0x0010 /* IN1LP_SPKMIXL_VOL */
+#define WM8994_IN1LP_SPKMIXL_VOL_SHIFT 4 /* IN1LP_SPKMIXL_VOL */
+#define WM8994_IN1LP_SPKMIXL_VOL_WIDTH 1 /* IN1LP_SPKMIXL_VOL */
+#define WM8994_MIXOUTL_SPKMIXL_VOL 0x0008 /* MIXOUTL_SPKMIXL_VOL */
+#define WM8994_MIXOUTL_SPKMIXL_VOL_MASK 0x0008 /* MIXOUTL_SPKMIXL_VOL */
+#define WM8994_MIXOUTL_SPKMIXL_VOL_SHIFT 3 /* MIXOUTL_SPKMIXL_VOL */
+#define WM8994_MIXOUTL_SPKMIXL_VOL_WIDTH 1 /* MIXOUTL_SPKMIXL_VOL */
+#define WM8994_DAC1L_SPKMIXL_VOL 0x0004 /* DAC1L_SPKMIXL_VOL */
+#define WM8994_DAC1L_SPKMIXL_VOL_MASK 0x0004 /* DAC1L_SPKMIXL_VOL */
+#define WM8994_DAC1L_SPKMIXL_VOL_SHIFT 2 /* DAC1L_SPKMIXL_VOL */
+#define WM8994_DAC1L_SPKMIXL_VOL_WIDTH 1 /* DAC1L_SPKMIXL_VOL */
+#define WM8994_SPKMIXL_VOL_MASK 0x0003 /* SPKMIXL_VOL - [1:0] */
+#define WM8994_SPKMIXL_VOL_SHIFT 0 /* SPKMIXL_VOL - [1:0] */
+#define WM8994_SPKMIXL_VOL_WIDTH 2 /* SPKMIXL_VOL - [1:0] */
+
+/*
+ * R35 (0x23) - SPKMIXR Attenuation
+ */
+#define WM8994_SPKOUT_CLASSAB 0x0100 /* SPKOUT_CLASSAB */
+#define WM8994_SPKOUT_CLASSAB_MASK 0x0100 /* SPKOUT_CLASSAB */
+#define WM8994_SPKOUT_CLASSAB_SHIFT 8 /* SPKOUT_CLASSAB */
+#define WM8994_SPKOUT_CLASSAB_WIDTH 1 /* SPKOUT_CLASSAB */
+#define WM8994_DAC2R_SPKMIXR_VOL 0x0040 /* DAC2R_SPKMIXR_VOL */
+#define WM8994_DAC2R_SPKMIXR_VOL_MASK 0x0040 /* DAC2R_SPKMIXR_VOL */
+#define WM8994_DAC2R_SPKMIXR_VOL_SHIFT 6 /* DAC2R_SPKMIXR_VOL */
+#define WM8994_DAC2R_SPKMIXR_VOL_WIDTH 1 /* DAC2R_SPKMIXR_VOL */
+#define WM8994_MIXINR_SPKMIXR_VOL 0x0020 /* MIXINR_SPKMIXR_VOL */
+#define WM8994_MIXINR_SPKMIXR_VOL_MASK 0x0020 /* MIXINR_SPKMIXR_VOL */
+#define WM8994_MIXINR_SPKMIXR_VOL_SHIFT 5 /* MIXINR_SPKMIXR_VOL */
+#define WM8994_MIXINR_SPKMIXR_VOL_WIDTH 1 /* MIXINR_SPKMIXR_VOL */
+#define WM8994_IN1RP_SPKMIXR_VOL 0x0010 /* IN1RP_SPKMIXR_VOL */
+#define WM8994_IN1RP_SPKMIXR_VOL_MASK 0x0010 /* IN1RP_SPKMIXR_VOL */
+#define WM8994_IN1RP_SPKMIXR_VOL_SHIFT 4 /* IN1RP_SPKMIXR_VOL */
+#define WM8994_IN1RP_SPKMIXR_VOL_WIDTH 1 /* IN1RP_SPKMIXR_VOL */
+#define WM8994_MIXOUTR_SPKMIXR_VOL 0x0008 /* MIXOUTR_SPKMIXR_VOL */
+#define WM8994_MIXOUTR_SPKMIXR_VOL_MASK 0x0008 /* MIXOUTR_SPKMIXR_VOL */
+#define WM8994_MIXOUTR_SPKMIXR_VOL_SHIFT 3 /* MIXOUTR_SPKMIXR_VOL */
+#define WM8994_MIXOUTR_SPKMIXR_VOL_WIDTH 1 /* MIXOUTR_SPKMIXR_VOL */
+#define WM8994_DAC1R_SPKMIXR_VOL 0x0004 /* DAC1R_SPKMIXR_VOL */
+#define WM8994_DAC1R_SPKMIXR_VOL_MASK 0x0004 /* DAC1R_SPKMIXR_VOL */
+#define WM8994_DAC1R_SPKMIXR_VOL_SHIFT 2 /* DAC1R_SPKMIXR_VOL */
+#define WM8994_DAC1R_SPKMIXR_VOL_WIDTH 1 /* DAC1R_SPKMIXR_VOL */
+#define WM8994_SPKMIXR_VOL_MASK 0x0003 /* SPKMIXR_VOL - [1:0] */
+#define WM8994_SPKMIXR_VOL_SHIFT 0 /* SPKMIXR_VOL - [1:0] */
+#define WM8994_SPKMIXR_VOL_WIDTH 2 /* SPKMIXR_VOL - [1:0] */
+
+/*
+ * R36 (0x24) - SPKOUT Mixers
+ */
+#define WM8994_IN2LRP_TO_SPKOUTL 0x0020 /* IN2LRP_TO_SPKOUTL */
+#define WM8994_IN2LRP_TO_SPKOUTL_MASK 0x0020 /* IN2LRP_TO_SPKOUTL */
+#define WM8994_IN2LRP_TO_SPKOUTL_SHIFT 5 /* IN2LRP_TO_SPKOUTL */
+#define WM8994_IN2LRP_TO_SPKOUTL_WIDTH 1 /* IN2LRP_TO_SPKOUTL */
+#define WM8994_SPKMIXL_TO_SPKOUTL 0x0010 /* SPKMIXL_TO_SPKOUTL */
+#define WM8994_SPKMIXL_TO_SPKOUTL_MASK 0x0010 /* SPKMIXL_TO_SPKOUTL */
+#define WM8994_SPKMIXL_TO_SPKOUTL_SHIFT 4 /* SPKMIXL_TO_SPKOUTL */
+#define WM8994_SPKMIXL_TO_SPKOUTL_WIDTH 1 /* SPKMIXL_TO_SPKOUTL */
+#define WM8994_SPKMIXR_TO_SPKOUTL 0x0008 /* SPKMIXR_TO_SPKOUTL */
+#define WM8994_SPKMIXR_TO_SPKOUTL_MASK 0x0008 /* SPKMIXR_TO_SPKOUTL */
+#define WM8994_SPKMIXR_TO_SPKOUTL_SHIFT 3 /* SPKMIXR_TO_SPKOUTL */
+#define WM8994_SPKMIXR_TO_SPKOUTL_WIDTH 1 /* SPKMIXR_TO_SPKOUTL */
+#define WM8994_IN2LRP_TO_SPKOUTR 0x0004 /* IN2LRP_TO_SPKOUTR */
+#define WM8994_IN2LRP_TO_SPKOUTR_MASK 0x0004 /* IN2LRP_TO_SPKOUTR */
+#define WM8994_IN2LRP_TO_SPKOUTR_SHIFT 2 /* IN2LRP_TO_SPKOUTR */
+#define WM8994_IN2LRP_TO_SPKOUTR_WIDTH 1 /* IN2LRP_TO_SPKOUTR */
+#define WM8994_SPKMIXL_TO_SPKOUTR 0x0002 /* SPKMIXL_TO_SPKOUTR */
+#define WM8994_SPKMIXL_TO_SPKOUTR_MASK 0x0002 /* SPKMIXL_TO_SPKOUTR */
+#define WM8994_SPKMIXL_TO_SPKOUTR_SHIFT 1 /* SPKMIXL_TO_SPKOUTR */
+#define WM8994_SPKMIXL_TO_SPKOUTR_WIDTH 1 /* SPKMIXL_TO_SPKOUTR */
+#define WM8994_SPKMIXR_TO_SPKOUTR 0x0001 /* SPKMIXR_TO_SPKOUTR */
+#define WM8994_SPKMIXR_TO_SPKOUTR_MASK 0x0001 /* SPKMIXR_TO_SPKOUTR */
+#define WM8994_SPKMIXR_TO_SPKOUTR_SHIFT 0 /* SPKMIXR_TO_SPKOUTR */
+#define WM8994_SPKMIXR_TO_SPKOUTR_WIDTH 1 /* SPKMIXR_TO_SPKOUTR */
+
+/*
+ * R37 (0x25) - ClassD
+ */
+#define WM8994_SPKOUTL_BOOST_MASK 0x0038 /* SPKOUTL_BOOST - [5:3] */
+#define WM8994_SPKOUTL_BOOST_SHIFT 3 /* SPKOUTL_BOOST - [5:3] */
+#define WM8994_SPKOUTL_BOOST_WIDTH 3 /* SPKOUTL_BOOST - [5:3] */
+#define WM8994_SPKOUTR_BOOST_MASK 0x0007 /* SPKOUTR_BOOST - [2:0] */
+#define WM8994_SPKOUTR_BOOST_SHIFT 0 /* SPKOUTR_BOOST - [2:0] */
+#define WM8994_SPKOUTR_BOOST_WIDTH 3 /* SPKOUTR_BOOST - [2:0] */
+
+/*
+ * R38 (0x26) - Speaker Volume Left
+ */
+#define WM8994_SPKOUT_VU 0x0100 /* SPKOUT_VU */
+#define WM8994_SPKOUT_VU_MASK 0x0100 /* SPKOUT_VU */
+#define WM8994_SPKOUT_VU_SHIFT 8 /* SPKOUT_VU */
+#define WM8994_SPKOUT_VU_WIDTH 1 /* SPKOUT_VU */
+#define WM8994_SPKOUTL_ZC 0x0080 /* SPKOUTL_ZC */
+#define WM8994_SPKOUTL_ZC_MASK 0x0080 /* SPKOUTL_ZC */
+#define WM8994_SPKOUTL_ZC_SHIFT 7 /* SPKOUTL_ZC */
+#define WM8994_SPKOUTL_ZC_WIDTH 1 /* SPKOUTL_ZC */
+#define WM8994_SPKOUTL_MUTE_N 0x0040 /* SPKOUTL_MUTE_N */
+#define WM8994_SPKOUTL_MUTE_N_MASK 0x0040 /* SPKOUTL_MUTE_N */
+#define WM8994_SPKOUTL_MUTE_N_SHIFT 6 /* SPKOUTL_MUTE_N */
+#define WM8994_SPKOUTL_MUTE_N_WIDTH 1 /* SPKOUTL_MUTE_N */
+#define WM8994_SPKOUTL_VOL_MASK 0x003F /* SPKOUTL_VOL - [5:0] */
+#define WM8994_SPKOUTL_VOL_SHIFT 0 /* SPKOUTL_VOL - [5:0] */
+#define WM8994_SPKOUTL_VOL_WIDTH 6 /* SPKOUTL_VOL - [5:0] */
+
+/*
+ * R39 (0x27) - Speaker Volume Right
+ */
+#define WM8994_SPKOUT_VU 0x0100 /* SPKOUT_VU */
+#define WM8994_SPKOUT_VU_MASK 0x0100 /* SPKOUT_VU */
+#define WM8994_SPKOUT_VU_SHIFT 8 /* SPKOUT_VU */
+#define WM8994_SPKOUT_VU_WIDTH 1 /* SPKOUT_VU */
+#define WM8994_SPKOUTR_ZC 0x0080 /* SPKOUTR_ZC */
+#define WM8994_SPKOUTR_ZC_MASK 0x0080 /* SPKOUTR_ZC */
+#define WM8994_SPKOUTR_ZC_SHIFT 7 /* SPKOUTR_ZC */
+#define WM8994_SPKOUTR_ZC_WIDTH 1 /* SPKOUTR_ZC */
+#define WM8994_SPKOUTR_MUTE_N 0x0040 /* SPKOUTR_MUTE_N */
+#define WM8994_SPKOUTR_MUTE_N_MASK 0x0040 /* SPKOUTR_MUTE_N */
+#define WM8994_SPKOUTR_MUTE_N_SHIFT 6 /* SPKOUTR_MUTE_N */
+#define WM8994_SPKOUTR_MUTE_N_WIDTH 1 /* SPKOUTR_MUTE_N */
+#define WM8994_SPKOUTR_VOL_MASK 0x003F /* SPKOUTR_VOL - [5:0] */
+#define WM8994_SPKOUTR_VOL_SHIFT 0 /* SPKOUTR_VOL - [5:0] */
+#define WM8994_SPKOUTR_VOL_WIDTH 6 /* SPKOUTR_VOL - [5:0] */
+
+/*
+ * R40 (0x28) - Input Mixer (2)
+ */
+#define WM8994_IN2LP_TO_IN2L 0x0080 /* IN2LP_TO_IN2L */
+#define WM8994_IN2LP_TO_IN2L_MASK 0x0080 /* IN2LP_TO_IN2L */
+#define WM8994_IN2LP_TO_IN2L_SHIFT 7 /* IN2LP_TO_IN2L */
+#define WM8994_IN2LP_TO_IN2L_WIDTH 1 /* IN2LP_TO_IN2L */
+#define WM8994_IN2LN_TO_IN2L 0x0040 /* IN2LN_TO_IN2L */
+#define WM8994_IN2LN_TO_IN2L_MASK 0x0040 /* IN2LN_TO_IN2L */
+#define WM8994_IN2LN_TO_IN2L_SHIFT 6 /* IN2LN_TO_IN2L */
+#define WM8994_IN2LN_TO_IN2L_WIDTH 1 /* IN2LN_TO_IN2L */
+#define WM8994_IN1LP_TO_IN1L 0x0020 /* IN1LP_TO_IN1L */
+#define WM8994_IN1LP_TO_IN1L_MASK 0x0020 /* IN1LP_TO_IN1L */
+#define WM8994_IN1LP_TO_IN1L_SHIFT 5 /* IN1LP_TO_IN1L */
+#define WM8994_IN1LP_TO_IN1L_WIDTH 1 /* IN1LP_TO_IN1L */
+#define WM8994_IN1LN_TO_IN1L 0x0010 /* IN1LN_TO_IN1L */
+#define WM8994_IN1LN_TO_IN1L_MASK 0x0010 /* IN1LN_TO_IN1L */
+#define WM8994_IN1LN_TO_IN1L_SHIFT 4 /* IN1LN_TO_IN1L */
+#define WM8994_IN1LN_TO_IN1L_WIDTH 1 /* IN1LN_TO_IN1L */
+#define WM8994_IN2RP_TO_IN2R 0x0008 /* IN2RP_TO_IN2R */
+#define WM8994_IN2RP_TO_IN2R_MASK 0x0008 /* IN2RP_TO_IN2R */
+#define WM8994_IN2RP_TO_IN2R_SHIFT 3 /* IN2RP_TO_IN2R */
+#define WM8994_IN2RP_TO_IN2R_WIDTH 1 /* IN2RP_TO_IN2R */
+#define WM8994_IN2RN_TO_IN2R 0x0004 /* IN2RN_TO_IN2R */
+#define WM8994_IN2RN_TO_IN2R_MASK 0x0004 /* IN2RN_TO_IN2R */
+#define WM8994_IN2RN_TO_IN2R_SHIFT 2 /* IN2RN_TO_IN2R */
+#define WM8994_IN2RN_TO_IN2R_WIDTH 1 /* IN2RN_TO_IN2R */
+#define WM8994_IN1RP_TO_IN1R 0x0002 /* IN1RP_TO_IN1R */
+#define WM8994_IN1RP_TO_IN1R_MASK 0x0002 /* IN1RP_TO_IN1R */
+#define WM8994_IN1RP_TO_IN1R_SHIFT 1 /* IN1RP_TO_IN1R */
+#define WM8994_IN1RP_TO_IN1R_WIDTH 1 /* IN1RP_TO_IN1R */
+#define WM8994_IN1RN_TO_IN1R 0x0001 /* IN1RN_TO_IN1R */
+#define WM8994_IN1RN_TO_IN1R_MASK 0x0001 /* IN1RN_TO_IN1R */
+#define WM8994_IN1RN_TO_IN1R_SHIFT 0 /* IN1RN_TO_IN1R */
+#define WM8994_IN1RN_TO_IN1R_WIDTH 1 /* IN1RN_TO_IN1R */
+
+/*
+ * R41 (0x29) - Input Mixer (3)
+ */
+#define WM8994_IN2L_TO_MIXINL 0x0100 /* IN2L_TO_MIXINL */
+#define WM8994_IN2L_TO_MIXINL_MASK 0x0100 /* IN2L_TO_MIXINL */
+#define WM8994_IN2L_TO_MIXINL_SHIFT 8 /* IN2L_TO_MIXINL */
+#define WM8994_IN2L_TO_MIXINL_WIDTH 1 /* IN2L_TO_MIXINL */
+#define WM8994_IN2L_MIXINL_VOL 0x0080 /* IN2L_MIXINL_VOL */
+#define WM8994_IN2L_MIXINL_VOL_MASK 0x0080 /* IN2L_MIXINL_VOL */
+#define WM8994_IN2L_MIXINL_VOL_SHIFT 7 /* IN2L_MIXINL_VOL */
+#define WM8994_IN2L_MIXINL_VOL_WIDTH 1 /* IN2L_MIXINL_VOL */
+#define WM8994_IN1L_TO_MIXINL 0x0020 /* IN1L_TO_MIXINL */
+#define WM8994_IN1L_TO_MIXINL_MASK 0x0020 /* IN1L_TO_MIXINL */
+#define WM8994_IN1L_TO_MIXINL_SHIFT 5 /* IN1L_TO_MIXINL */
+#define WM8994_IN1L_TO_MIXINL_WIDTH 1 /* IN1L_TO_MIXINL */
+#define WM8994_IN1L_MIXINL_VOL 0x0010 /* IN1L_MIXINL_VOL */
+#define WM8994_IN1L_MIXINL_VOL_MASK 0x0010 /* IN1L_MIXINL_VOL */
+#define WM8994_IN1L_MIXINL_VOL_SHIFT 4 /* IN1L_MIXINL_VOL */
+#define WM8994_IN1L_MIXINL_VOL_WIDTH 1 /* IN1L_MIXINL_VOL */
+#define WM8994_MIXOUTL_MIXINL_VOL_MASK 0x0007 /* MIXOUTL_MIXINL_VOL - [2:0] */
+#define WM8994_MIXOUTL_MIXINL_VOL_SHIFT 0 /* MIXOUTL_MIXINL_VOL - [2:0] */
+#define WM8994_MIXOUTL_MIXINL_VOL_WIDTH 3 /* MIXOUTL_MIXINL_VOL - [2:0] */
+
+/*
+ * R42 (0x2A) - Input Mixer (4)
+ */
+#define WM8994_IN2R_TO_MIXINR 0x0100 /* IN2R_TO_MIXINR */
+#define WM8994_IN2R_TO_MIXINR_MASK 0x0100 /* IN2R_TO_MIXINR */
+#define WM8994_IN2R_TO_MIXINR_SHIFT 8 /* IN2R_TO_MIXINR */
+#define WM8994_IN2R_TO_MIXINR_WIDTH 1 /* IN2R_TO_MIXINR */
+#define WM8994_IN2R_MIXINR_VOL 0x0080 /* IN2R_MIXINR_VOL */
+#define WM8994_IN2R_MIXINR_VOL_MASK 0x0080 /* IN2R_MIXINR_VOL */
+#define WM8994_IN2R_MIXINR_VOL_SHIFT 7 /* IN2R_MIXINR_VOL */
+#define WM8994_IN2R_MIXINR_VOL_WIDTH 1 /* IN2R_MIXINR_VOL */
+#define WM8994_IN1R_TO_MIXINR 0x0020 /* IN1R_TO_MIXINR */
+#define WM8994_IN1R_TO_MIXINR_MASK 0x0020 /* IN1R_TO_MIXINR */
+#define WM8994_IN1R_TO_MIXINR_SHIFT 5 /* IN1R_TO_MIXINR */
+#define WM8994_IN1R_TO_MIXINR_WIDTH 1 /* IN1R_TO_MIXINR */
+#define WM8994_IN1R_MIXINR_VOL 0x0010 /* IN1R_MIXINR_VOL */
+#define WM8994_IN1R_MIXINR_VOL_MASK 0x0010 /* IN1R_MIXINR_VOL */
+#define WM8994_IN1R_MIXINR_VOL_SHIFT 4 /* IN1R_MIXINR_VOL */
+#define WM8994_IN1R_MIXINR_VOL_WIDTH 1 /* IN1R_MIXINR_VOL */
+#define WM8994_MIXOUTR_MIXINR_VOL_MASK 0x0007 /* MIXOUTR_MIXINR_VOL - [2:0] */
+#define WM8994_MIXOUTR_MIXINR_VOL_SHIFT 0 /* MIXOUTR_MIXINR_VOL - [2:0] */
+#define WM8994_MIXOUTR_MIXINR_VOL_WIDTH 3 /* MIXOUTR_MIXINR_VOL - [2:0] */
+
+/*
+ * R43 (0x2B) - Input Mixer (5)
+ */
+#define WM8994_IN1LP_MIXINL_VOL_MASK 0x01C0 /* IN1LP_MIXINL_VOL - [8:6] */
+#define WM8994_IN1LP_MIXINL_VOL_SHIFT 6 /* IN1LP_MIXINL_VOL - [8:6] */
+#define WM8994_IN1LP_MIXINL_VOL_WIDTH 3 /* IN1LP_MIXINL_VOL - [8:6] */
+#define WM8994_IN2LRP_MIXINL_VOL_MASK 0x0007 /* IN2LRP_MIXINL_VOL - [2:0] */
+#define WM8994_IN2LRP_MIXINL_VOL_SHIFT 0 /* IN2LRP_MIXINL_VOL - [2:0] */
+#define WM8994_IN2LRP_MIXINL_VOL_WIDTH 3 /* IN2LRP_MIXINL_VOL - [2:0] */
+
+/*
+ * R44 (0x2C) - Input Mixer (6)
+ */
+#define WM8994_IN1RP_MIXINR_VOL_MASK 0x01C0 /* IN1RP_MIXINR_VOL - [8:6] */
+#define WM8994_IN1RP_MIXINR_VOL_SHIFT 6 /* IN1RP_MIXINR_VOL - [8:6] */
+#define WM8994_IN1RP_MIXINR_VOL_WIDTH 3 /* IN1RP_MIXINR_VOL - [8:6] */
+#define WM8994_IN2LRP_MIXINR_VOL_MASK 0x0007 /* IN2LRP_MIXINR_VOL - [2:0] */
+#define WM8994_IN2LRP_MIXINR_VOL_SHIFT 0 /* IN2LRP_MIXINR_VOL - [2:0] */
+#define WM8994_IN2LRP_MIXINR_VOL_WIDTH 3 /* IN2LRP_MIXINR_VOL - [2:0] */
+
+/*
+ * R45 (0x2D) - Output Mixer (1)
+ */
+#define WM8994_DAC1L_TO_HPOUT1L 0x0100 /* DAC1L_TO_HPOUT1L */
+#define WM8994_DAC1L_TO_HPOUT1L_MASK 0x0100 /* DAC1L_TO_HPOUT1L */
+#define WM8994_DAC1L_TO_HPOUT1L_SHIFT 8 /* DAC1L_TO_HPOUT1L */
+#define WM8994_DAC1L_TO_HPOUT1L_WIDTH 1 /* DAC1L_TO_HPOUT1L */
+#define WM8994_MIXINR_TO_MIXOUTL 0x0080 /* MIXINR_TO_MIXOUTL */
+#define WM8994_MIXINR_TO_MIXOUTL_MASK 0x0080 /* MIXINR_TO_MIXOUTL */
+#define WM8994_MIXINR_TO_MIXOUTL_SHIFT 7 /* MIXINR_TO_MIXOUTL */
+#define WM8994_MIXINR_TO_MIXOUTL_WIDTH 1 /* MIXINR_TO_MIXOUTL */
+#define WM8994_MIXINL_TO_MIXOUTL 0x0040 /* MIXINL_TO_MIXOUTL */
+#define WM8994_MIXINL_TO_MIXOUTL_MASK 0x0040 /* MIXINL_TO_MIXOUTL */
+#define WM8994_MIXINL_TO_MIXOUTL_SHIFT 6 /* MIXINL_TO_MIXOUTL */
+#define WM8994_MIXINL_TO_MIXOUTL_WIDTH 1 /* MIXINL_TO_MIXOUTL */
+#define WM8994_IN2RN_TO_MIXOUTL 0x0020 /* IN2RN_TO_MIXOUTL */
+#define WM8994_IN2RN_TO_MIXOUTL_MASK 0x0020 /* IN2RN_TO_MIXOUTL */
+#define WM8994_IN2RN_TO_MIXOUTL_SHIFT 5 /* IN2RN_TO_MIXOUTL */
+#define WM8994_IN2RN_TO_MIXOUTL_WIDTH 1 /* IN2RN_TO_MIXOUTL */
+#define WM8994_IN2LN_TO_MIXOUTL 0x0010 /* IN2LN_TO_MIXOUTL */
+#define WM8994_IN2LN_TO_MIXOUTL_MASK 0x0010 /* IN2LN_TO_MIXOUTL */
+#define WM8994_IN2LN_TO_MIXOUTL_SHIFT 4 /* IN2LN_TO_MIXOUTL */
+#define WM8994_IN2LN_TO_MIXOUTL_WIDTH 1 /* IN2LN_TO_MIXOUTL */
+#define WM8994_IN1R_TO_MIXOUTL 0x0008 /* IN1R_TO_MIXOUTL */
+#define WM8994_IN1R_TO_MIXOUTL_MASK 0x0008 /* IN1R_TO_MIXOUTL */
+#define WM8994_IN1R_TO_MIXOUTL_SHIFT 3 /* IN1R_TO_MIXOUTL */
+#define WM8994_IN1R_TO_MIXOUTL_WIDTH 1 /* IN1R_TO_MIXOUTL */
+#define WM8994_IN1L_TO_MIXOUTL 0x0004 /* IN1L_TO_MIXOUTL */
+#define WM8994_IN1L_TO_MIXOUTL_MASK 0x0004 /* IN1L_TO_MIXOUTL */
+#define WM8994_IN1L_TO_MIXOUTL_SHIFT 2 /* IN1L_TO_MIXOUTL */
+#define WM8994_IN1L_TO_MIXOUTL_WIDTH 1 /* IN1L_TO_MIXOUTL */
+#define WM8994_IN2LP_TO_MIXOUTL 0x0002 /* IN2LP_TO_MIXOUTL */
+#define WM8994_IN2LP_TO_MIXOUTL_MASK 0x0002 /* IN2LP_TO_MIXOUTL */
+#define WM8994_IN2LP_TO_MIXOUTL_SHIFT 1 /* IN2LP_TO_MIXOUTL */
+#define WM8994_IN2LP_TO_MIXOUTL_WIDTH 1 /* IN2LP_TO_MIXOUTL */
+#define WM8994_DAC1L_TO_MIXOUTL 0x0001 /* DAC1L_TO_MIXOUTL */
+#define WM8994_DAC1L_TO_MIXOUTL_MASK 0x0001 /* DAC1L_TO_MIXOUTL */
+#define WM8994_DAC1L_TO_MIXOUTL_SHIFT 0 /* DAC1L_TO_MIXOUTL */
+#define WM8994_DAC1L_TO_MIXOUTL_WIDTH 1 /* DAC1L_TO_MIXOUTL */
+
+/*
+ * R46 (0x2E) - Output Mixer (2)
+ */
+#define WM8994_DAC1R_TO_HPOUT1R 0x0100 /* DAC1R_TO_HPOUT1R */
+#define WM8994_DAC1R_TO_HPOUT1R_MASK 0x0100 /* DAC1R_TO_HPOUT1R */
+#define WM8994_DAC1R_TO_HPOUT1R_SHIFT 8 /* DAC1R_TO_HPOUT1R */
+#define WM8994_DAC1R_TO_HPOUT1R_WIDTH 1 /* DAC1R_TO_HPOUT1R */
+#define WM8994_MIXINL_TO_MIXOUTR 0x0080 /* MIXINL_TO_MIXOUTR */
+#define WM8994_MIXINL_TO_MIXOUTR_MASK 0x0080 /* MIXINL_TO_MIXOUTR */
+#define WM8994_MIXINL_TO_MIXOUTR_SHIFT 7 /* MIXINL_TO_MIXOUTR */
+#define WM8994_MIXINL_TO_MIXOUTR_WIDTH 1 /* MIXINL_TO_MIXOUTR */
+#define WM8994_MIXINR_TO_MIXOUTR 0x0040 /* MIXINR_TO_MIXOUTR */
+#define WM8994_MIXINR_TO_MIXOUTR_MASK 0x0040 /* MIXINR_TO_MIXOUTR */
+#define WM8994_MIXINR_TO_MIXOUTR_SHIFT 6 /* MIXINR_TO_MIXOUTR */
+#define WM8994_MIXINR_TO_MIXOUTR_WIDTH 1 /* MIXINR_TO_MIXOUTR */
+#define WM8994_IN2LN_TO_MIXOUTR 0x0020 /* IN2LN_TO_MIXOUTR */
+#define WM8994_IN2LN_TO_MIXOUTR_MASK 0x0020 /* IN2LN_TO_MIXOUTR */
+#define WM8994_IN2LN_TO_MIXOUTR_SHIFT 5 /* IN2LN_TO_MIXOUTR */
+#define WM8994_IN2LN_TO_MIXOUTR_WIDTH 1 /* IN2LN_TO_MIXOUTR */
+#define WM8994_IN2RN_TO_MIXOUTR 0x0010 /* IN2RN_TO_MIXOUTR */
+#define WM8994_IN2RN_TO_MIXOUTR_MASK 0x0010 /* IN2RN_TO_MIXOUTR */
+#define WM8994_IN2RN_TO_MIXOUTR_SHIFT 4 /* IN2RN_TO_MIXOUTR */
+#define WM8994_IN2RN_TO_MIXOUTR_WIDTH 1 /* IN2RN_TO_MIXOUTR */
+#define WM8994_IN1L_TO_MIXOUTR 0x0008 /* IN1L_TO_MIXOUTR */
+#define WM8994_IN1L_TO_MIXOUTR_MASK 0x0008 /* IN1L_TO_MIXOUTR */
+#define WM8994_IN1L_TO_MIXOUTR_SHIFT 3 /* IN1L_TO_MIXOUTR */
+#define WM8994_IN1L_TO_MIXOUTR_WIDTH 1 /* IN1L_TO_MIXOUTR */
+#define WM8994_IN1R_TO_MIXOUTR 0x0004 /* IN1R_TO_MIXOUTR */
+#define WM8994_IN1R_TO_MIXOUTR_MASK 0x0004 /* IN1R_TO_MIXOUTR */
+#define WM8994_IN1R_TO_MIXOUTR_SHIFT 2 /* IN1R_TO_MIXOUTR */
+#define WM8994_IN1R_TO_MIXOUTR_WIDTH 1 /* IN1R_TO_MIXOUTR */
+#define WM8994_IN2RP_TO_MIXOUTR 0x0002 /* IN2RP_TO_MIXOUTR */
+#define WM8994_IN2RP_TO_MIXOUTR_MASK 0x0002 /* IN2RP_TO_MIXOUTR */
+#define WM8994_IN2RP_TO_MIXOUTR_SHIFT 1 /* IN2RP_TO_MIXOUTR */
+#define WM8994_IN2RP_TO_MIXOUTR_WIDTH 1 /* IN2RP_TO_MIXOUTR */
+#define WM8994_DAC1R_TO_MIXOUTR 0x0001 /* DAC1R_TO_MIXOUTR */
+#define WM8994_DAC1R_TO_MIXOUTR_MASK 0x0001 /* DAC1R_TO_MIXOUTR */
+#define WM8994_DAC1R_TO_MIXOUTR_SHIFT 0 /* DAC1R_TO_MIXOUTR */
+#define WM8994_DAC1R_TO_MIXOUTR_WIDTH 1 /* DAC1R_TO_MIXOUTR */
+
+/*
+ * R47 (0x2F) - Output Mixer (3)
+ */
+#define WM8994_IN2LP_MIXOUTL_VOL_MASK 0x0E00 /* IN2LP_MIXOUTL_VOL - [11:9] */
+#define WM8994_IN2LP_MIXOUTL_VOL_SHIFT 9 /* IN2LP_MIXOUTL_VOL - [11:9] */
+#define WM8994_IN2LP_MIXOUTL_VOL_WIDTH 3 /* IN2LP_MIXOUTL_VOL - [11:9] */
+#define WM8994_IN2LN_MIXOUTL_VOL_MASK 0x01C0 /* IN2LN_MIXOUTL_VOL - [8:6] */
+#define WM8994_IN2LN_MIXOUTL_VOL_SHIFT 6 /* IN2LN_MIXOUTL_VOL - [8:6] */
+#define WM8994_IN2LN_MIXOUTL_VOL_WIDTH 3 /* IN2LN_MIXOUTL_VOL - [8:6] */
+#define WM8994_IN1R_MIXOUTL_VOL_MASK 0x0038 /* IN1R_MIXOUTL_VOL - [5:3] */
+#define WM8994_IN1R_MIXOUTL_VOL_SHIFT 3 /* IN1R_MIXOUTL_VOL - [5:3] */
+#define WM8994_IN1R_MIXOUTL_VOL_WIDTH 3 /* IN1R_MIXOUTL_VOL - [5:3] */
+#define WM8994_IN1L_MIXOUTL_VOL_MASK 0x0007 /* IN1L_MIXOUTL_VOL - [2:0] */
+#define WM8994_IN1L_MIXOUTL_VOL_SHIFT 0 /* IN1L_MIXOUTL_VOL - [2:0] */
+#define WM8994_IN1L_MIXOUTL_VOL_WIDTH 3 /* IN1L_MIXOUTL_VOL - [2:0] */
+
+/*
+ * R48 (0x30) - Output Mixer (4)
+ */
+#define WM8994_IN2RP_MIXOUTR_VOL_MASK 0x0E00 /* IN2RP_MIXOUTR_VOL - [11:9] */
+#define WM8994_IN2RP_MIXOUTR_VOL_SHIFT 9 /* IN2RP_MIXOUTR_VOL - [11:9] */
+#define WM8994_IN2RP_MIXOUTR_VOL_WIDTH 3 /* IN2RP_MIXOUTR_VOL - [11:9] */
+#define WM8994_IN2RN_MIXOUTR_VOL_MASK 0x01C0 /* IN2RN_MIXOUTR_VOL - [8:6] */
+#define WM8994_IN2RN_MIXOUTR_VOL_SHIFT 6 /* IN2RN_MIXOUTR_VOL - [8:6] */
+#define WM8994_IN2RN_MIXOUTR_VOL_WIDTH 3 /* IN2RN_MIXOUTR_VOL - [8:6] */
+#define WM8994_IN1L_MIXOUTR_VOL_MASK 0x0038 /* IN1L_MIXOUTR_VOL - [5:3] */
+#define WM8994_IN1L_MIXOUTR_VOL_SHIFT 3 /* IN1L_MIXOUTR_VOL - [5:3] */
+#define WM8994_IN1L_MIXOUTR_VOL_WIDTH 3 /* IN1L_MIXOUTR_VOL - [5:3] */
+#define WM8994_IN1R_MIXOUTR_VOL_MASK 0x0007 /* IN1R_MIXOUTR_VOL - [2:0] */
+#define WM8994_IN1R_MIXOUTR_VOL_SHIFT 0 /* IN1R_MIXOUTR_VOL - [2:0] */
+#define WM8994_IN1R_MIXOUTR_VOL_WIDTH 3 /* IN1R_MIXOUTR_VOL - [2:0] */
+
+/*
+ * R49 (0x31) - Output Mixer (5)
+ */
+#define WM8994_DAC1L_MIXOUTL_VOL_MASK 0x0E00 /* DAC1L_MIXOUTL_VOL - [11:9] */
+#define WM8994_DAC1L_MIXOUTL_VOL_SHIFT 9 /* DAC1L_MIXOUTL_VOL - [11:9] */
+#define WM8994_DAC1L_MIXOUTL_VOL_WIDTH 3 /* DAC1L_MIXOUTL_VOL - [11:9] */
+#define WM8994_IN2RN_MIXOUTL_VOL_MASK 0x01C0 /* IN2RN_MIXOUTL_VOL - [8:6] */
+#define WM8994_IN2RN_MIXOUTL_VOL_SHIFT 6 /* IN2RN_MIXOUTL_VOL - [8:6] */
+#define WM8994_IN2RN_MIXOUTL_VOL_WIDTH 3 /* IN2RN_MIXOUTL_VOL - [8:6] */
+#define WM8994_MIXINR_MIXOUTL_VOL_MASK 0x0038 /* MIXINR_MIXOUTL_VOL - [5:3] */
+#define WM8994_MIXINR_MIXOUTL_VOL_SHIFT 3 /* MIXINR_MIXOUTL_VOL - [5:3] */
+#define WM8994_MIXINR_MIXOUTL_VOL_WIDTH 3 /* MIXINR_MIXOUTL_VOL - [5:3] */
+#define WM8994_MIXINL_MIXOUTL_VOL_MASK 0x0007 /* MIXINL_MIXOUTL_VOL - [2:0] */
+#define WM8994_MIXINL_MIXOUTL_VOL_SHIFT 0 /* MIXINL_MIXOUTL_VOL - [2:0] */
+#define WM8994_MIXINL_MIXOUTL_VOL_WIDTH 3 /* MIXINL_MIXOUTL_VOL - [2:0] */
+
+/*
+ * R50 (0x32) - Output Mixer (6)
+ */
+#define WM8994_DAC1R_MIXOUTR_VOL_MASK 0x0E00 /* DAC1R_MIXOUTR_VOL - [11:9] */
+#define WM8994_DAC1R_MIXOUTR_VOL_SHIFT 9 /* DAC1R_MIXOUTR_VOL - [11:9] */
+#define WM8994_DAC1R_MIXOUTR_VOL_WIDTH 3 /* DAC1R_MIXOUTR_VOL - [11:9] */
+#define WM8994_IN2LN_MIXOUTR_VOL_MASK 0x01C0 /* IN2LN_MIXOUTR_VOL - [8:6] */
+#define WM8994_IN2LN_MIXOUTR_VOL_SHIFT 6 /* IN2LN_MIXOUTR_VOL - [8:6] */
+#define WM8994_IN2LN_MIXOUTR_VOL_WIDTH 3 /* IN2LN_MIXOUTR_VOL - [8:6] */
+#define WM8994_MIXINL_MIXOUTR_VOL_MASK 0x0038 /* MIXINL_MIXOUTR_VOL - [5:3] */
+#define WM8994_MIXINL_MIXOUTR_VOL_SHIFT 3 /* MIXINL_MIXOUTR_VOL - [5:3] */
+#define WM8994_MIXINL_MIXOUTR_VOL_WIDTH 3 /* MIXINL_MIXOUTR_VOL - [5:3] */
+#define WM8994_MIXINR_MIXOUTR_VOL_MASK 0x0007 /* MIXINR_MIXOUTR_VOL - [2:0] */
+#define WM8994_MIXINR_MIXOUTR_VOL_SHIFT 0 /* MIXINR_MIXOUTR_VOL - [2:0] */
+#define WM8994_MIXINR_MIXOUTR_VOL_WIDTH 3 /* MIXINR_MIXOUTR_VOL - [2:0] */
+
+/*
+ * R51 (0x33) - HPOUT2 Mixer
+ */
+#define WM8994_IN2LRP_TO_HPOUT2 0x0020 /* IN2LRP_TO_HPOUT2 */
+#define WM8994_IN2LRP_TO_HPOUT2_MASK 0x0020 /* IN2LRP_TO_HPOUT2 */
+#define WM8994_IN2LRP_TO_HPOUT2_SHIFT 5 /* IN2LRP_TO_HPOUT2 */
+#define WM8994_IN2LRP_TO_HPOUT2_WIDTH 1 /* IN2LRP_TO_HPOUT2 */
+#define WM8994_MIXOUTLVOL_TO_HPOUT2 0x0010 /* MIXOUTLVOL_TO_HPOUT2 */
+#define WM8994_MIXOUTLVOL_TO_HPOUT2_MASK 0x0010 /* MIXOUTLVOL_TO_HPOUT2 */
+#define WM8994_MIXOUTLVOL_TO_HPOUT2_SHIFT 4 /* MIXOUTLVOL_TO_HPOUT2 */
+#define WM8994_MIXOUTLVOL_TO_HPOUT2_WIDTH 1 /* MIXOUTLVOL_TO_HPOUT2 */
+#define WM8994_MIXOUTRVOL_TO_HPOUT2 0x0008 /* MIXOUTRVOL_TO_HPOUT2 */
+#define WM8994_MIXOUTRVOL_TO_HPOUT2_MASK 0x0008 /* MIXOUTRVOL_TO_HPOUT2 */
+#define WM8994_MIXOUTRVOL_TO_HPOUT2_SHIFT 3 /* MIXOUTRVOL_TO_HPOUT2 */
+#define WM8994_MIXOUTRVOL_TO_HPOUT2_WIDTH 1 /* MIXOUTRVOL_TO_HPOUT2 */
+
+/*
+ * R52 (0x34) - Line Mixer (1)
+ */
+#define WM8994_MIXOUTL_TO_LINEOUT1N 0x0040 /* MIXOUTL_TO_LINEOUT1N */
+#define WM8994_MIXOUTL_TO_LINEOUT1N_MASK 0x0040 /* MIXOUTL_TO_LINEOUT1N */
+#define WM8994_MIXOUTL_TO_LINEOUT1N_SHIFT 6 /* MIXOUTL_TO_LINEOUT1N */
+#define WM8994_MIXOUTL_TO_LINEOUT1N_WIDTH 1 /* MIXOUTL_TO_LINEOUT1N */
+#define WM8994_MIXOUTR_TO_LINEOUT1N 0x0020 /* MIXOUTR_TO_LINEOUT1N */
+#define WM8994_MIXOUTR_TO_LINEOUT1N_MASK 0x0020 /* MIXOUTR_TO_LINEOUT1N */
+#define WM8994_MIXOUTR_TO_LINEOUT1N_SHIFT 5 /* MIXOUTR_TO_LINEOUT1N */
+#define WM8994_MIXOUTR_TO_LINEOUT1N_WIDTH 1 /* MIXOUTR_TO_LINEOUT1N */
+#define WM8994_LINEOUT1_MODE 0x0010 /* LINEOUT1_MODE */
+#define WM8994_LINEOUT1_MODE_MASK 0x0010 /* LINEOUT1_MODE */
+#define WM8994_LINEOUT1_MODE_SHIFT 4 /* LINEOUT1_MODE */
+#define WM8994_LINEOUT1_MODE_WIDTH 1 /* LINEOUT1_MODE */
+#define WM8994_IN1R_TO_LINEOUT1P 0x0004 /* IN1R_TO_LINEOUT1P */
+#define WM8994_IN1R_TO_LINEOUT1P_MASK 0x0004 /* IN1R_TO_LINEOUT1P */
+#define WM8994_IN1R_TO_LINEOUT1P_SHIFT 2 /* IN1R_TO_LINEOUT1P */
+#define WM8994_IN1R_TO_LINEOUT1P_WIDTH 1 /* IN1R_TO_LINEOUT1P */
+#define WM8994_IN1L_TO_LINEOUT1P 0x0002 /* IN1L_TO_LINEOUT1P */
+#define WM8994_IN1L_TO_LINEOUT1P_MASK 0x0002 /* IN1L_TO_LINEOUT1P */
+#define WM8994_IN1L_TO_LINEOUT1P_SHIFT 1 /* IN1L_TO_LINEOUT1P */
+#define WM8994_IN1L_TO_LINEOUT1P_WIDTH 1 /* IN1L_TO_LINEOUT1P */
+#define WM8994_MIXOUTL_TO_LINEOUT1P 0x0001 /* MIXOUTL_TO_LINEOUT1P */
+#define WM8994_MIXOUTL_TO_LINEOUT1P_MASK 0x0001 /* MIXOUTL_TO_LINEOUT1P */
+#define WM8994_MIXOUTL_TO_LINEOUT1P_SHIFT 0 /* MIXOUTL_TO_LINEOUT1P */
+#define WM8994_MIXOUTL_TO_LINEOUT1P_WIDTH 1 /* MIXOUTL_TO_LINEOUT1P */
+
+/*
+ * R53 (0x35) - Line Mixer (2)
+ */
+#define WM8994_MIXOUTR_TO_LINEOUT2N 0x0040 /* MIXOUTR_TO_LINEOUT2N */
+#define WM8994_MIXOUTR_TO_LINEOUT2N_MASK 0x0040 /* MIXOUTR_TO_LINEOUT2N */
+#define WM8994_MIXOUTR_TO_LINEOUT2N_SHIFT 6 /* MIXOUTR_TO_LINEOUT2N */
+#define WM8994_MIXOUTR_TO_LINEOUT2N_WIDTH 1 /* MIXOUTR_TO_LINEOUT2N */
+#define WM8994_MIXOUTL_TO_LINEOUT2N 0x0020 /* MIXOUTL_TO_LINEOUT2N */
+#define WM8994_MIXOUTL_TO_LINEOUT2N_MASK 0x0020 /* MIXOUTL_TO_LINEOUT2N */
+#define WM8994_MIXOUTL_TO_LINEOUT2N_SHIFT 5 /* MIXOUTL_TO_LINEOUT2N */
+#define WM8994_MIXOUTL_TO_LINEOUT2N_WIDTH 1 /* MIXOUTL_TO_LINEOUT2N */
+#define WM8994_LINEOUT2_MODE 0x0010 /* LINEOUT2_MODE */
+#define WM8994_LINEOUT2_MODE_MASK 0x0010 /* LINEOUT2_MODE */
+#define WM8994_LINEOUT2_MODE_SHIFT 4 /* LINEOUT2_MODE */
+#define WM8994_LINEOUT2_MODE_WIDTH 1 /* LINEOUT2_MODE */
+#define WM8994_IN1L_TO_LINEOUT2P 0x0004 /* IN1L_TO_LINEOUT2P */
+#define WM8994_IN1L_TO_LINEOUT2P_MASK 0x0004 /* IN1L_TO_LINEOUT2P */
+#define WM8994_IN1L_TO_LINEOUT2P_SHIFT 2 /* IN1L_TO_LINEOUT2P */
+#define WM8994_IN1L_TO_LINEOUT2P_WIDTH 1 /* IN1L_TO_LINEOUT2P */
+#define WM8994_IN1R_TO_LINEOUT2P 0x0002 /* IN1R_TO_LINEOUT2P */
+#define WM8994_IN1R_TO_LINEOUT2P_MASK 0x0002 /* IN1R_TO_LINEOUT2P */
+#define WM8994_IN1R_TO_LINEOUT2P_SHIFT 1 /* IN1R_TO_LINEOUT2P */
+#define WM8994_IN1R_TO_LINEOUT2P_WIDTH 1 /* IN1R_TO_LINEOUT2P */
+#define WM8994_MIXOUTR_TO_LINEOUT2P 0x0001 /* MIXOUTR_TO_LINEOUT2P */
+#define WM8994_MIXOUTR_TO_LINEOUT2P_MASK 0x0001 /* MIXOUTR_TO_LINEOUT2P */
+#define WM8994_MIXOUTR_TO_LINEOUT2P_SHIFT 0 /* MIXOUTR_TO_LINEOUT2P */
+#define WM8994_MIXOUTR_TO_LINEOUT2P_WIDTH 1 /* MIXOUTR_TO_LINEOUT2P */
+
+/*
+ * R54 (0x36) - Speaker Mixer
+ */
+#define WM8994_DAC2L_TO_SPKMIXL 0x0200 /* DAC2L_TO_SPKMIXL */
+#define WM8994_DAC2L_TO_SPKMIXL_MASK 0x0200 /* DAC2L_TO_SPKMIXL */
+#define WM8994_DAC2L_TO_SPKMIXL_SHIFT 9 /* DAC2L_TO_SPKMIXL */
+#define WM8994_DAC2L_TO_SPKMIXL_WIDTH 1 /* DAC2L_TO_SPKMIXL */
+#define WM8994_DAC2R_TO_SPKMIXR 0x0100 /* DAC2R_TO_SPKMIXR */
+#define WM8994_DAC2R_TO_SPKMIXR_MASK 0x0100 /* DAC2R_TO_SPKMIXR */
+#define WM8994_DAC2R_TO_SPKMIXR_SHIFT 8 /* DAC2R_TO_SPKMIXR */
+#define WM8994_DAC2R_TO_SPKMIXR_WIDTH 1 /* DAC2R_TO_SPKMIXR */
+#define WM8994_MIXINL_TO_SPKMIXL 0x0080 /* MIXINL_TO_SPKMIXL */
+#define WM8994_MIXINL_TO_SPKMIXL_MASK 0x0080 /* MIXINL_TO_SPKMIXL */
+#define WM8994_MIXINL_TO_SPKMIXL_SHIFT 7 /* MIXINL_TO_SPKMIXL */
+#define WM8994_MIXINL_TO_SPKMIXL_WIDTH 1 /* MIXINL_TO_SPKMIXL */
+#define WM8994_MIXINR_TO_SPKMIXR 0x0040 /* MIXINR_TO_SPKMIXR */
+#define WM8994_MIXINR_TO_SPKMIXR_MASK 0x0040 /* MIXINR_TO_SPKMIXR */
+#define WM8994_MIXINR_TO_SPKMIXR_SHIFT 6 /* MIXINR_TO_SPKMIXR */
+#define WM8994_MIXINR_TO_SPKMIXR_WIDTH 1 /* MIXINR_TO_SPKMIXR */
+#define WM8994_IN1LP_TO_SPKMIXL 0x0020 /* IN1LP_TO_SPKMIXL */
+#define WM8994_IN1LP_TO_SPKMIXL_MASK 0x0020 /* IN1LP_TO_SPKMIXL */
+#define WM8994_IN1LP_TO_SPKMIXL_SHIFT 5 /* IN1LP_TO_SPKMIXL */
+#define WM8994_IN1LP_TO_SPKMIXL_WIDTH 1 /* IN1LP_TO_SPKMIXL */
+#define WM8994_IN1RP_TO_SPKMIXR 0x0010 /* IN1RP_TO_SPKMIXR */
+#define WM8994_IN1RP_TO_SPKMIXR_MASK 0x0010 /* IN1RP_TO_SPKMIXR */
+#define WM8994_IN1RP_TO_SPKMIXR_SHIFT 4 /* IN1RP_TO_SPKMIXR */
+#define WM8994_IN1RP_TO_SPKMIXR_WIDTH 1 /* IN1RP_TO_SPKMIXR */
+#define WM8994_MIXOUTL_TO_SPKMIXL 0x0008 /* MIXOUTL_TO_SPKMIXL */
+#define WM8994_MIXOUTL_TO_SPKMIXL_MASK 0x0008 /* MIXOUTL_TO_SPKMIXL */
+#define WM8994_MIXOUTL_TO_SPKMIXL_SHIFT 3 /* MIXOUTL_TO_SPKMIXL */
+#define WM8994_MIXOUTL_TO_SPKMIXL_WIDTH 1 /* MIXOUTL_TO_SPKMIXL */
+#define WM8994_MIXOUTR_TO_SPKMIXR 0x0004 /* MIXOUTR_TO_SPKMIXR */
+#define WM8994_MIXOUTR_TO_SPKMIXR_MASK 0x0004 /* MIXOUTR_TO_SPKMIXR */
+#define WM8994_MIXOUTR_TO_SPKMIXR_SHIFT 2 /* MIXOUTR_TO_SPKMIXR */
+#define WM8994_MIXOUTR_TO_SPKMIXR_WIDTH 1 /* MIXOUTR_TO_SPKMIXR */
+#define WM8994_DAC1L_TO_SPKMIXL 0x0002 /* DAC1L_TO_SPKMIXL */
+#define WM8994_DAC1L_TO_SPKMIXL_MASK 0x0002 /* DAC1L_TO_SPKMIXL */
+#define WM8994_DAC1L_TO_SPKMIXL_SHIFT 1 /* DAC1L_TO_SPKMIXL */
+#define WM8994_DAC1L_TO_SPKMIXL_WIDTH 1 /* DAC1L_TO_SPKMIXL */
+#define WM8994_DAC1R_TO_SPKMIXR 0x0001 /* DAC1R_TO_SPKMIXR */
+#define WM8994_DAC1R_TO_SPKMIXR_MASK 0x0001 /* DAC1R_TO_SPKMIXR */
+#define WM8994_DAC1R_TO_SPKMIXR_SHIFT 0 /* DAC1R_TO_SPKMIXR */
+#define WM8994_DAC1R_TO_SPKMIXR_WIDTH 1 /* DAC1R_TO_SPKMIXR */
+
+/*
+ * R55 (0x37) - Additional Control
+ */
+#define WM8994_LINEOUT1_FB 0x0080 /* LINEOUT1_FB */
+#define WM8994_LINEOUT1_FB_MASK 0x0080 /* LINEOUT1_FB */
+#define WM8994_LINEOUT1_FB_SHIFT 7 /* LINEOUT1_FB */
+#define WM8994_LINEOUT1_FB_WIDTH 1 /* LINEOUT1_FB */
+#define WM8994_LINEOUT2_FB 0x0040 /* LINEOUT2_FB */
+#define WM8994_LINEOUT2_FB_MASK 0x0040 /* LINEOUT2_FB */
+#define WM8994_LINEOUT2_FB_SHIFT 6 /* LINEOUT2_FB */
+#define WM8994_LINEOUT2_FB_WIDTH 1 /* LINEOUT2_FB */
+#define WM8994_VROI 0x0001 /* VROI */
+#define WM8994_VROI_MASK 0x0001 /* VROI */
+#define WM8994_VROI_SHIFT 0 /* VROI */
+#define WM8994_VROI_WIDTH 1 /* VROI */
+
+/*
+ * R56 (0x38) - AntiPOP (1)
+ */
+#define WM8994_LINEOUT_VMID_BUF_ENA 0x0080 /* LINEOUT_VMID_BUF_ENA */
+#define WM8994_LINEOUT_VMID_BUF_ENA_MASK 0x0080 /* LINEOUT_VMID_BUF_ENA */
+#define WM8994_LINEOUT_VMID_BUF_ENA_SHIFT 7 /* LINEOUT_VMID_BUF_ENA */
+#define WM8994_LINEOUT_VMID_BUF_ENA_WIDTH 1 /* LINEOUT_VMID_BUF_ENA */
+#define WM8994_HPOUT2_IN_ENA 0x0040 /* HPOUT2_IN_ENA */
+#define WM8994_HPOUT2_IN_ENA_MASK 0x0040 /* HPOUT2_IN_ENA */
+#define WM8994_HPOUT2_IN_ENA_SHIFT 6 /* HPOUT2_IN_ENA */
+#define WM8994_HPOUT2_IN_ENA_WIDTH 1 /* HPOUT2_IN_ENA */
+#define WM8994_LINEOUT1_DISCH 0x0020 /* LINEOUT1_DISCH */
+#define WM8994_LINEOUT1_DISCH_MASK 0x0020 /* LINEOUT1_DISCH */
+#define WM8994_LINEOUT1_DISCH_SHIFT 5 /* LINEOUT1_DISCH */
+#define WM8994_LINEOUT1_DISCH_WIDTH 1 /* LINEOUT1_DISCH */
+#define WM8994_LINEOUT2_DISCH 0x0010 /* LINEOUT2_DISCH */
+#define WM8994_LINEOUT2_DISCH_MASK 0x0010 /* LINEOUT2_DISCH */
+#define WM8994_LINEOUT2_DISCH_SHIFT 4 /* LINEOUT2_DISCH */
+#define WM8994_LINEOUT2_DISCH_WIDTH 1 /* LINEOUT2_DISCH */
+
+/*
+ * R57 (0x39) - AntiPOP (2)
+ */
+#define WM1811_JACKDET_MODE_MASK 0x0180 /* JACKDET_MODE - [8:7] */
+#define WM1811_JACKDET_MODE_SHIFT 7 /* JACKDET_MODE - [8:7] */
+#define WM1811_JACKDET_MODE_WIDTH 2 /* JACKDET_MODE - [8:7] */
+#define WM8994_MICB2_DISCH 0x0100 /* MICB2_DISCH */
+#define WM8994_MICB2_DISCH_MASK 0x0100 /* MICB2_DISCH */
+#define WM8994_MICB2_DISCH_SHIFT 8 /* MICB2_DISCH */
+#define WM8994_MICB2_DISCH_WIDTH 1 /* MICB2_DISCH */
+#define WM8994_MICB1_DISCH 0x0080 /* MICB1_DISCH */
+#define WM8994_MICB1_DISCH_MASK 0x0080 /* MICB1_DISCH */
+#define WM8994_MICB1_DISCH_SHIFT 7 /* MICB1_DISCH */
+#define WM8994_MICB1_DISCH_WIDTH 1 /* MICB1_DISCH */
+#define WM8994_VMID_RAMP_MASK 0x0060 /* VMID_RAMP - [6:5] */
+#define WM8994_VMID_RAMP_SHIFT 5 /* VMID_RAMP - [6:5] */
+#define WM8994_VMID_RAMP_WIDTH 2 /* VMID_RAMP - [6:5] */
+#define WM8994_VMID_BUF_ENA 0x0008 /* VMID_BUF_ENA */
+#define WM8994_VMID_BUF_ENA_MASK 0x0008 /* VMID_BUF_ENA */
+#define WM8994_VMID_BUF_ENA_SHIFT 3 /* VMID_BUF_ENA */
+#define WM8994_VMID_BUF_ENA_WIDTH 1 /* VMID_BUF_ENA */
+#define WM8994_STARTUP_BIAS_ENA 0x0004 /* STARTUP_BIAS_ENA */
+#define WM8994_STARTUP_BIAS_ENA_MASK 0x0004 /* STARTUP_BIAS_ENA */
+#define WM8994_STARTUP_BIAS_ENA_SHIFT 2 /* STARTUP_BIAS_ENA */
+#define WM8994_STARTUP_BIAS_ENA_WIDTH 1 /* STARTUP_BIAS_ENA */
+#define WM8994_BIAS_SRC 0x0002 /* BIAS_SRC */
+#define WM8994_BIAS_SRC_MASK 0x0002 /* BIAS_SRC */
+#define WM8994_BIAS_SRC_SHIFT 1 /* BIAS_SRC */
+#define WM8994_BIAS_SRC_WIDTH 1 /* BIAS_SRC */
+#define WM8994_VMID_DISCH 0x0001 /* VMID_DISCH */
+#define WM8994_VMID_DISCH_MASK 0x0001 /* VMID_DISCH */
+#define WM8994_VMID_DISCH_SHIFT 0 /* VMID_DISCH */
+#define WM8994_VMID_DISCH_WIDTH 1 /* VMID_DISCH */
+
+/*
+ * R58 (0x3A) - MICBIAS
+ */
+#define WM8994_MICD_SCTHR_MASK 0x00C0 /* MICD_SCTHR - [7:6] */
+#define WM8994_MICD_SCTHR_SHIFT 6 /* MICD_SCTHR - [7:6] */
+#define WM8994_MICD_SCTHR_WIDTH 2 /* MICD_SCTHR - [7:6] */
+#define WM8994_MICD_THR_MASK 0x0038 /* MICD_THR - [5:3] */
+#define WM8994_MICD_THR_SHIFT 3 /* MICD_THR - [5:3] */
+#define WM8994_MICD_THR_WIDTH 3 /* MICD_THR - [5:3] */
+#define WM8994_MICD_ENA 0x0004 /* MICD_ENA */
+#define WM8994_MICD_ENA_MASK 0x0004 /* MICD_ENA */
+#define WM8994_MICD_ENA_SHIFT 2 /* MICD_ENA */
+#define WM8994_MICD_ENA_WIDTH 1 /* MICD_ENA */
+#define WM8994_MICB2_LVL 0x0002 /* MICB2_LVL */
+#define WM8994_MICB2_LVL_MASK 0x0002 /* MICB2_LVL */
+#define WM8994_MICB2_LVL_SHIFT 1 /* MICB2_LVL */
+#define WM8994_MICB2_LVL_WIDTH 1 /* MICB2_LVL */
+#define WM8994_MICB1_LVL 0x0001 /* MICB1_LVL */
+#define WM8994_MICB1_LVL_MASK 0x0001 /* MICB1_LVL */
+#define WM8994_MICB1_LVL_SHIFT 0 /* MICB1_LVL */
+#define WM8994_MICB1_LVL_WIDTH 1 /* MICB1_LVL */
+
+/*
+ * R59 (0x3B) - LDO 1
+ */
+#define WM8994_LDO1_VSEL_MASK 0x000E /* LDO1_VSEL - [3:1] */
+#define WM8994_LDO1_VSEL_SHIFT 1 /* LDO1_VSEL - [3:1] */
+#define WM8994_LDO1_VSEL_WIDTH 3 /* LDO1_VSEL - [3:1] */
+#define WM8994_LDO1_DISCH 0x0001 /* LDO1_DISCH */
+#define WM8994_LDO1_DISCH_MASK 0x0001 /* LDO1_DISCH */
+#define WM8994_LDO1_DISCH_SHIFT 0 /* LDO1_DISCH */
+#define WM8994_LDO1_DISCH_WIDTH 1 /* LDO1_DISCH */
+
+/*
+ * R60 (0x3C) - LDO 2
+ */
+#define WM8994_LDO2_VSEL_MASK 0x0006 /* LDO2_VSEL - [2:1] */
+#define WM8994_LDO2_VSEL_SHIFT 1 /* LDO2_VSEL - [2:1] */
+#define WM8994_LDO2_VSEL_WIDTH 2 /* LDO2_VSEL - [2:1] */
+#define WM8994_LDO2_DISCH 0x0001 /* LDO2_DISCH */
+#define WM8994_LDO2_DISCH_MASK 0x0001 /* LDO2_DISCH */
+#define WM8994_LDO2_DISCH_SHIFT 0 /* LDO2_DISCH */
+#define WM8994_LDO2_DISCH_WIDTH 1 /* LDO2_DISCH */
+
+/*
+ * R61 (0x3D) - MICBIAS1
+ */
+#define WM8958_MICB1_RATE 0x0020 /* MICB1_RATE */
+#define WM8958_MICB1_RATE_MASK 0x0020 /* MICB1_RATE */
+#define WM8958_MICB1_RATE_SHIFT 5 /* MICB1_RATE */
+#define WM8958_MICB1_RATE_WIDTH 1 /* MICB1_RATE */
+#define WM8958_MICB1_MODE 0x0010 /* MICB1_MODE */
+#define WM8958_MICB1_MODE_MASK 0x0010 /* MICB1_MODE */
+#define WM8958_MICB1_MODE_SHIFT 4 /* MICB1_MODE */
+#define WM8958_MICB1_MODE_WIDTH 1 /* MICB1_MODE */
+#define WM8958_MICB1_LVL_MASK 0x000E /* MICB1_LVL - [3:1] */
+#define WM8958_MICB1_LVL_SHIFT 1 /* MICB1_LVL - [3:1] */
+#define WM8958_MICB1_LVL_WIDTH 3 /* MICB1_LVL - [3:1] */
+#define WM8958_MICB1_DISCH 0x0001 /* MICB1_DISCH */
+#define WM8958_MICB1_DISCH_MASK 0x0001 /* MICB1_DISCH */
+#define WM8958_MICB1_DISCH_SHIFT 0 /* MICB1_DISCH */
+#define WM8958_MICB1_DISCH_WIDTH 1 /* MICB1_DISCH */
+
+/*
+ * R62 (0x3E) - MICBIAS2
+ */
+#define WM8958_MICB2_RATE 0x0020 /* MICB2_RATE */
+#define WM8958_MICB2_RATE_MASK 0x0020 /* MICB2_RATE */
+#define WM8958_MICB2_RATE_SHIFT 5 /* MICB2_RATE */
+#define WM8958_MICB2_RATE_WIDTH 1 /* MICB2_RATE */
+#define WM8958_MICB2_MODE 0x0010 /* MICB2_MODE */
+#define WM8958_MICB2_MODE_MASK 0x0010 /* MICB2_MODE */
+#define WM8958_MICB2_MODE_SHIFT 4 /* MICB2_MODE */
+#define WM8958_MICB2_MODE_WIDTH 1 /* MICB2_MODE */
+#define WM8958_MICB2_LVL_MASK 0x000E /* MICB2_LVL - [3:1] */
+#define WM8958_MICB2_LVL_SHIFT 1 /* MICB2_LVL - [3:1] */
+#define WM8958_MICB2_LVL_WIDTH 3 /* MICB2_LVL - [3:1] */
+#define WM8958_MICB2_DISCH 0x0001 /* MICB2_DISCH */
+#define WM8958_MICB2_DISCH_MASK 0x0001 /* MICB2_DISCH */
+#define WM8958_MICB2_DISCH_SHIFT 0 /* MICB2_DISCH */
+#define WM8958_MICB2_DISCH_WIDTH 1 /* MICB2_DISCH */
+
+/*
+ * R210 (0xD2) - Mic Detect 3
+ */
+#define WM8958_MICD_LVL_MASK 0x07FC /* MICD_LVL - [10:2] */
+#define WM8958_MICD_LVL_SHIFT 2 /* MICD_LVL - [10:2] */
+#define WM8958_MICD_LVL_WIDTH 9 /* MICD_LVL - [10:2] */
+#define WM8958_MICD_VALID 0x0002 /* MICD_VALID */
+#define WM8958_MICD_VALID_MASK 0x0002 /* MICD_VALID */
+#define WM8958_MICD_VALID_SHIFT 1 /* MICD_VALID */
+#define WM8958_MICD_VALID_WIDTH 1 /* MICD_VALID */
+#define WM8958_MICD_STS 0x0001 /* MICD_STS */
+#define WM8958_MICD_STS_MASK 0x0001 /* MICD_STS */
+#define WM8958_MICD_STS_SHIFT 0 /* MICD_STS */
+#define WM8958_MICD_STS_WIDTH 1 /* MICD_STS */
+
+/*
+ * R76 (0x4C) - Charge Pump (1)
+ */
+#define WM8994_CP_ENA 0x8000 /* CP_ENA */
+#define WM8994_CP_ENA_MASK 0x8000 /* CP_ENA */
+#define WM8994_CP_ENA_SHIFT 15 /* CP_ENA */
+#define WM8994_CP_ENA_WIDTH 1 /* CP_ENA */
+
+/*
+ * R77 (0x4D) - Charge Pump (2)
+ */
+#define WM8958_CP_DISCH 0x8000 /* CP_DISCH */
+#define WM8958_CP_DISCH_MASK 0x8000 /* CP_DISCH */
+#define WM8958_CP_DISCH_SHIFT 15 /* CP_DISCH */
+#define WM8958_CP_DISCH_WIDTH 1 /* CP_DISCH */
+
+/*
+ * R81 (0x51) - Class W (1)
+ */
+#define WM8994_CP_DYN_SRC_SEL_MASK 0x0300 /* CP_DYN_SRC_SEL - [9:8] */
+#define WM8994_CP_DYN_SRC_SEL_SHIFT 8 /* CP_DYN_SRC_SEL - [9:8] */
+#define WM8994_CP_DYN_SRC_SEL_WIDTH 2 /* CP_DYN_SRC_SEL - [9:8] */
+#define WM8994_CP_DYN_PWR 0x0001 /* CP_DYN_PWR */
+#define WM8994_CP_DYN_PWR_MASK 0x0001 /* CP_DYN_PWR */
+#define WM8994_CP_DYN_PWR_SHIFT 0 /* CP_DYN_PWR */
+#define WM8994_CP_DYN_PWR_WIDTH 1 /* CP_DYN_PWR */
+
+/*
+ * R84 (0x54) - DC Servo (1)
+ */
+#define WM8994_DCS_TRIG_SINGLE_1 0x2000 /* DCS_TRIG_SINGLE_1 */
+#define WM8994_DCS_TRIG_SINGLE_1_MASK 0x2000 /* DCS_TRIG_SINGLE_1 */
+#define WM8994_DCS_TRIG_SINGLE_1_SHIFT 13 /* DCS_TRIG_SINGLE_1 */
+#define WM8994_DCS_TRIG_SINGLE_1_WIDTH 1 /* DCS_TRIG_SINGLE_1 */
+#define WM8994_DCS_TRIG_SINGLE_0 0x1000 /* DCS_TRIG_SINGLE_0 */
+#define WM8994_DCS_TRIG_SINGLE_0_MASK 0x1000 /* DCS_TRIG_SINGLE_0 */
+#define WM8994_DCS_TRIG_SINGLE_0_SHIFT 12 /* DCS_TRIG_SINGLE_0 */
+#define WM8994_DCS_TRIG_SINGLE_0_WIDTH 1 /* DCS_TRIG_SINGLE_0 */
+#define WM8994_DCS_TRIG_SERIES_1 0x0200 /* DCS_TRIG_SERIES_1 */
+#define WM8994_DCS_TRIG_SERIES_1_MASK 0x0200 /* DCS_TRIG_SERIES_1 */
+#define WM8994_DCS_TRIG_SERIES_1_SHIFT 9 /* DCS_TRIG_SERIES_1 */
+#define WM8994_DCS_TRIG_SERIES_1_WIDTH 1 /* DCS_TRIG_SERIES_1 */
+#define WM8994_DCS_TRIG_SERIES_0 0x0100 /* DCS_TRIG_SERIES_0 */
+#define WM8994_DCS_TRIG_SERIES_0_MASK 0x0100 /* DCS_TRIG_SERIES_0 */
+#define WM8994_DCS_TRIG_SERIES_0_SHIFT 8 /* DCS_TRIG_SERIES_0 */
+#define WM8994_DCS_TRIG_SERIES_0_WIDTH 1 /* DCS_TRIG_SERIES_0 */
+#define WM8994_DCS_TRIG_STARTUP_1 0x0020 /* DCS_TRIG_STARTUP_1 */
+#define WM8994_DCS_TRIG_STARTUP_1_MASK 0x0020 /* DCS_TRIG_STARTUP_1 */
+#define WM8994_DCS_TRIG_STARTUP_1_SHIFT 5 /* DCS_TRIG_STARTUP_1 */
+#define WM8994_DCS_TRIG_STARTUP_1_WIDTH 1 /* DCS_TRIG_STARTUP_1 */
+#define WM8994_DCS_TRIG_STARTUP_0 0x0010 /* DCS_TRIG_STARTUP_0 */
+#define WM8994_DCS_TRIG_STARTUP_0_MASK 0x0010 /* DCS_TRIG_STARTUP_0 */
+#define WM8994_DCS_TRIG_STARTUP_0_SHIFT 4 /* DCS_TRIG_STARTUP_0 */
+#define WM8994_DCS_TRIG_STARTUP_0_WIDTH 1 /* DCS_TRIG_STARTUP_0 */
+#define WM8994_DCS_TRIG_DAC_WR_1 0x0008 /* DCS_TRIG_DAC_WR_1 */
+#define WM8994_DCS_TRIG_DAC_WR_1_MASK 0x0008 /* DCS_TRIG_DAC_WR_1 */
+#define WM8994_DCS_TRIG_DAC_WR_1_SHIFT 3 /* DCS_TRIG_DAC_WR_1 */
+#define WM8994_DCS_TRIG_DAC_WR_1_WIDTH 1 /* DCS_TRIG_DAC_WR_1 */
+#define WM8994_DCS_TRIG_DAC_WR_0 0x0004 /* DCS_TRIG_DAC_WR_0 */
+#define WM8994_DCS_TRIG_DAC_WR_0_MASK 0x0004 /* DCS_TRIG_DAC_WR_0 */
+#define WM8994_DCS_TRIG_DAC_WR_0_SHIFT 2 /* DCS_TRIG_DAC_WR_0 */
+#define WM8994_DCS_TRIG_DAC_WR_0_WIDTH 1 /* DCS_TRIG_DAC_WR_0 */
+#define WM8994_DCS_ENA_CHAN_1 0x0002 /* DCS_ENA_CHAN_1 */
+#define WM8994_DCS_ENA_CHAN_1_MASK 0x0002 /* DCS_ENA_CHAN_1 */
+#define WM8994_DCS_ENA_CHAN_1_SHIFT 1 /* DCS_ENA_CHAN_1 */
+#define WM8994_DCS_ENA_CHAN_1_WIDTH 1 /* DCS_ENA_CHAN_1 */
+#define WM8994_DCS_ENA_CHAN_0 0x0001 /* DCS_ENA_CHAN_0 */
+#define WM8994_DCS_ENA_CHAN_0_MASK 0x0001 /* DCS_ENA_CHAN_0 */
+#define WM8994_DCS_ENA_CHAN_0_SHIFT 0 /* DCS_ENA_CHAN_0 */
+#define WM8994_DCS_ENA_CHAN_0_WIDTH 1 /* DCS_ENA_CHAN_0 */
+
+/*
+ * R85 (0x55) - DC Servo (2)
+ */
+#define WM8994_DCS_SERIES_NO_01_MASK 0x0FE0 /* DCS_SERIES_NO_01 - [11:5] */
+#define WM8994_DCS_SERIES_NO_01_SHIFT 5 /* DCS_SERIES_NO_01 - [11:5] */
+#define WM8994_DCS_SERIES_NO_01_WIDTH 7 /* DCS_SERIES_NO_01 - [11:5] */
+#define WM8994_DCS_TIMER_PERIOD_01_MASK 0x000F /* DCS_TIMER_PERIOD_01 - [3:0] */
+#define WM8994_DCS_TIMER_PERIOD_01_SHIFT 0 /* DCS_TIMER_PERIOD_01 - [3:0] */
+#define WM8994_DCS_TIMER_PERIOD_01_WIDTH 4 /* DCS_TIMER_PERIOD_01 - [3:0] */
+
+/*
+ * R87 (0x57) - DC Servo (4)
+ */
+#define WM8994_DCS_DAC_WR_VAL_1_MASK 0xFF00 /* DCS_DAC_WR_VAL_1 - [15:8] */
+#define WM8994_DCS_DAC_WR_VAL_1_SHIFT 8 /* DCS_DAC_WR_VAL_1 - [15:8] */
+#define WM8994_DCS_DAC_WR_VAL_1_WIDTH 8 /* DCS_DAC_WR_VAL_1 - [15:8] */
+#define WM8994_DCS_DAC_WR_VAL_0_MASK 0x00FF /* DCS_DAC_WR_VAL_0 - [7:0] */
+#define WM8994_DCS_DAC_WR_VAL_0_SHIFT 0 /* DCS_DAC_WR_VAL_0 - [7:0] */
+#define WM8994_DCS_DAC_WR_VAL_0_WIDTH 8 /* DCS_DAC_WR_VAL_0 - [7:0] */
+
+/*
+ * R88 (0x58) - DC Servo Readback
+ */
+#define WM8994_DCS_CAL_COMPLETE_MASK 0x0300 /* DCS_CAL_COMPLETE - [9:8] */
+#define WM8994_DCS_CAL_COMPLETE_SHIFT 8 /* DCS_CAL_COMPLETE - [9:8] */
+#define WM8994_DCS_CAL_COMPLETE_WIDTH 2 /* DCS_CAL_COMPLETE - [9:8] */
+#define WM8994_DCS_DAC_WR_COMPLETE_MASK 0x0030 /* DCS_DAC_WR_COMPLETE - [5:4] */
+#define WM8994_DCS_DAC_WR_COMPLETE_SHIFT 4 /* DCS_DAC_WR_COMPLETE - [5:4] */
+#define WM8994_DCS_DAC_WR_COMPLETE_WIDTH 2 /* DCS_DAC_WR_COMPLETE - [5:4] */
+#define WM8994_DCS_STARTUP_COMPLETE_MASK 0x0003 /* DCS_STARTUP_COMPLETE - [1:0] */
+#define WM8994_DCS_STARTUP_COMPLETE_SHIFT 0 /* DCS_STARTUP_COMPLETE - [1:0] */
+#define WM8994_DCS_STARTUP_COMPLETE_WIDTH 2 /* DCS_STARTUP_COMPLETE - [1:0] */
+
+/*
+ * R96 (0x60) - Analogue HP (1)
+ */
+#define WM1811_HPOUT1_ATTN 0x0100 /* HPOUT1_ATTN */
+#define WM1811_HPOUT1_ATTN_MASK 0x0100 /* HPOUT1_ATTN */
+#define WM1811_HPOUT1_ATTN_SHIFT 8 /* HPOUT1_ATTN */
+#define WM1811_HPOUT1_ATTN_WIDTH 1 /* HPOUT1_ATTN */
+#define WM8994_HPOUT1L_RMV_SHORT 0x0080 /* HPOUT1L_RMV_SHORT */
+#define WM8994_HPOUT1L_RMV_SHORT_MASK 0x0080 /* HPOUT1L_RMV_SHORT */
+#define WM8994_HPOUT1L_RMV_SHORT_SHIFT 7 /* HPOUT1L_RMV_SHORT */
+#define WM8994_HPOUT1L_RMV_SHORT_WIDTH 1 /* HPOUT1L_RMV_SHORT */
+#define WM8994_HPOUT1L_OUTP 0x0040 /* HPOUT1L_OUTP */
+#define WM8994_HPOUT1L_OUTP_MASK 0x0040 /* HPOUT1L_OUTP */
+#define WM8994_HPOUT1L_OUTP_SHIFT 6 /* HPOUT1L_OUTP */
+#define WM8994_HPOUT1L_OUTP_WIDTH 1 /* HPOUT1L_OUTP */
+#define WM8994_HPOUT1L_DLY 0x0020 /* HPOUT1L_DLY */
+#define WM8994_HPOUT1L_DLY_MASK 0x0020 /* HPOUT1L_DLY */
+#define WM8994_HPOUT1L_DLY_SHIFT 5 /* HPOUT1L_DLY */
+#define WM8994_HPOUT1L_DLY_WIDTH 1 /* HPOUT1L_DLY */
+#define WM8994_HPOUT1R_RMV_SHORT 0x0008 /* HPOUT1R_RMV_SHORT */
+#define WM8994_HPOUT1R_RMV_SHORT_MASK 0x0008 /* HPOUT1R_RMV_SHORT */
+#define WM8994_HPOUT1R_RMV_SHORT_SHIFT 3 /* HPOUT1R_RMV_SHORT */
+#define WM8994_HPOUT1R_RMV_SHORT_WIDTH 1 /* HPOUT1R_RMV_SHORT */
+#define WM8994_HPOUT1R_OUTP 0x0004 /* HPOUT1R_OUTP */
+#define WM8994_HPOUT1R_OUTP_MASK 0x0004 /* HPOUT1R_OUTP */
+#define WM8994_HPOUT1R_OUTP_SHIFT 2 /* HPOUT1R_OUTP */
+#define WM8994_HPOUT1R_OUTP_WIDTH 1 /* HPOUT1R_OUTP */
+#define WM8994_HPOUT1R_DLY 0x0002 /* HPOUT1R_DLY */
+#define WM8994_HPOUT1R_DLY_MASK 0x0002 /* HPOUT1R_DLY */
+#define WM8994_HPOUT1R_DLY_SHIFT 1 /* HPOUT1R_DLY */
+#define WM8994_HPOUT1R_DLY_WIDTH 1 /* HPOUT1R_DLY */
+
+/*
+ * R208 (0xD0) - Mic Detect 1
+ */
+#define WM8958_MICD_BIAS_STARTTIME_MASK 0xF000 /* MICD_BIAS_STARTTIME - [15:12] */
+#define WM8958_MICD_BIAS_STARTTIME_SHIFT 12 /* MICD_BIAS_STARTTIME - [15:12] */
+#define WM8958_MICD_BIAS_STARTTIME_WIDTH 4 /* MICD_BIAS_STARTTIME - [15:12] */
+#define WM8958_MICD_RATE_MASK 0x0F00 /* MICD_RATE - [11:8] */
+#define WM8958_MICD_RATE_SHIFT 8 /* MICD_RATE - [11:8] */
+#define WM8958_MICD_RATE_WIDTH 4 /* MICD_RATE - [11:8] */
+#define WM8958_MICD_DBTIME 0x0002 /* MICD_DBTIME */
+#define WM8958_MICD_DBTIME_MASK 0x0002 /* MICD_DBTIME */
+#define WM8958_MICD_DBTIME_SHIFT 1 /* MICD_DBTIME */
+#define WM8958_MICD_DBTIME_WIDTH 1 /* MICD_DBTIME */
+#define WM8958_MICD_ENA 0x0001 /* MICD_ENA */
+#define WM8958_MICD_ENA_MASK 0x0001 /* MICD_ENA */
+#define WM8958_MICD_ENA_SHIFT 0 /* MICD_ENA */
+#define WM8958_MICD_ENA_WIDTH 1 /* MICD_ENA */
+
+/*
+ * R209 (0xD1) - Mic Detect 2
+ */
+#define WM8958_MICD_LVL_SEL_MASK 0x00FF /* MICD_LVL_SEL - [7:0] */
+#define WM8958_MICD_LVL_SEL_SHIFT 0 /* MICD_LVL_SEL - [7:0] */
+#define WM8958_MICD_LVL_SEL_WIDTH 8 /* MICD_LVL_SEL - [7:0] */
+
+/*
+ * R210 (0xD2) - Mic Detect 3
+ */
+#define WM8958_MICD_LVL_MASK 0x07FC /* MICD_LVL - [10:2] */
+#define WM8958_MICD_LVL_SHIFT 2 /* MICD_LVL - [10:2] */
+#define WM8958_MICD_LVL_WIDTH 9 /* MICD_LVL - [10:2] */
+#define WM8958_MICD_VALID 0x0002 /* MICD_VALID */
+#define WM8958_MICD_VALID_MASK 0x0002 /* MICD_VALID */
+#define WM8958_MICD_VALID_SHIFT 1 /* MICD_VALID */
+#define WM8958_MICD_VALID_WIDTH 1 /* MICD_VALID */
+#define WM8958_MICD_STS 0x0001 /* MICD_STS */
+#define WM8958_MICD_STS_MASK 0x0001 /* MICD_STS */
+#define WM8958_MICD_STS_SHIFT 0 /* MICD_STS */
+#define WM8958_MICD_STS_WIDTH 1 /* MICD_STS */
+
+/*
+ * R256 (0x100) - Chip Revision
+ */
+#define WM8994_CUST_ID_MASK 0xFF00 /* CUST_ID - [15:8] */
+#define WM8994_CUST_ID_SHIFT 8 /* CUST_ID - [15:8] */
+#define WM8994_CUST_ID_WIDTH 8 /* CUST_ID - [15:8] */
+#define WM8994_CHIP_REV_MASK 0x000F /* CHIP_REV - [3:0] */
+#define WM8994_CHIP_REV_SHIFT 0 /* CHIP_REV - [3:0] */
+#define WM8994_CHIP_REV_WIDTH 4 /* CHIP_REV - [3:0] */
+
+/*
+ * R257 (0x101) - Control Interface
+ */
+#define WM8994_SPI_CONTRD 0x0040 /* SPI_CONTRD */
+#define WM8994_SPI_CONTRD_MASK 0x0040 /* SPI_CONTRD */
+#define WM8994_SPI_CONTRD_SHIFT 6 /* SPI_CONTRD */
+#define WM8994_SPI_CONTRD_WIDTH 1 /* SPI_CONTRD */
+#define WM8994_SPI_4WIRE 0x0020 /* SPI_4WIRE */
+#define WM8994_SPI_4WIRE_MASK 0x0020 /* SPI_4WIRE */
+#define WM8994_SPI_4WIRE_SHIFT 5 /* SPI_4WIRE */
+#define WM8994_SPI_4WIRE_WIDTH 1 /* SPI_4WIRE */
+#define WM8994_SPI_CFG 0x0010 /* SPI_CFG */
+#define WM8994_SPI_CFG_MASK 0x0010 /* SPI_CFG */
+#define WM8994_SPI_CFG_SHIFT 4 /* SPI_CFG */
+#define WM8994_SPI_CFG_WIDTH 1 /* SPI_CFG */
+#define WM8994_AUTO_INC 0x0004 /* AUTO_INC */
+#define WM8994_AUTO_INC_MASK 0x0004 /* AUTO_INC */
+#define WM8994_AUTO_INC_SHIFT 2 /* AUTO_INC */
+#define WM8994_AUTO_INC_WIDTH 1 /* AUTO_INC */
+
+/*
+ * R272 (0x110) - Write Sequencer Ctrl (1)
+ */
+#define WM8994_WSEQ_ENA 0x8000 /* WSEQ_ENA */
+#define WM8994_WSEQ_ENA_MASK 0x8000 /* WSEQ_ENA */
+#define WM8994_WSEQ_ENA_SHIFT 15 /* WSEQ_ENA */
+#define WM8994_WSEQ_ENA_WIDTH 1 /* WSEQ_ENA */
+#define WM8994_WSEQ_ABORT 0x0200 /* WSEQ_ABORT */
+#define WM8994_WSEQ_ABORT_MASK 0x0200 /* WSEQ_ABORT */
+#define WM8994_WSEQ_ABORT_SHIFT 9 /* WSEQ_ABORT */
+#define WM8994_WSEQ_ABORT_WIDTH 1 /* WSEQ_ABORT */
+#define WM8994_WSEQ_START 0x0100 /* WSEQ_START */
+#define WM8994_WSEQ_START_MASK 0x0100 /* WSEQ_START */
+#define WM8994_WSEQ_START_SHIFT 8 /* WSEQ_START */
+#define WM8994_WSEQ_START_WIDTH 1 /* WSEQ_START */
+#define WM8994_WSEQ_START_INDEX_MASK 0x007F /* WSEQ_START_INDEX - [6:0] */
+#define WM8994_WSEQ_START_INDEX_SHIFT 0 /* WSEQ_START_INDEX - [6:0] */
+#define WM8994_WSEQ_START_INDEX_WIDTH 7 /* WSEQ_START_INDEX - [6:0] */
+
+/*
+ * R273 (0x111) - Write Sequencer Ctrl (2)
+ */
+#define WM8994_WSEQ_BUSY 0x0100 /* WSEQ_BUSY */
+#define WM8994_WSEQ_BUSY_MASK 0x0100 /* WSEQ_BUSY */
+#define WM8994_WSEQ_BUSY_SHIFT 8 /* WSEQ_BUSY */
+#define WM8994_WSEQ_BUSY_WIDTH 1 /* WSEQ_BUSY */
+#define WM8994_WSEQ_CURRENT_INDEX_MASK 0x007F /* WSEQ_CURRENT_INDEX - [6:0] */
+#define WM8994_WSEQ_CURRENT_INDEX_SHIFT 0 /* WSEQ_CURRENT_INDEX - [6:0] */
+#define WM8994_WSEQ_CURRENT_INDEX_WIDTH 7 /* WSEQ_CURRENT_INDEX - [6:0] */
+
+/*
+ * R512 (0x200) - AIF1 Clocking (1)
+ */
+#define WM8994_AIF1CLK_SRC_MASK 0x0018 /* AIF1CLK_SRC - [4:3] */
+#define WM8994_AIF1CLK_SRC_SHIFT 3 /* AIF1CLK_SRC - [4:3] */
+#define WM8994_AIF1CLK_SRC_WIDTH 2 /* AIF1CLK_SRC - [4:3] */
+#define WM8994_AIF1CLK_INV 0x0004 /* AIF1CLK_INV */
+#define WM8994_AIF1CLK_INV_MASK 0x0004 /* AIF1CLK_INV */
+#define WM8994_AIF1CLK_INV_SHIFT 2 /* AIF1CLK_INV */
+#define WM8994_AIF1CLK_INV_WIDTH 1 /* AIF1CLK_INV */
+#define WM8994_AIF1CLK_DIV 0x0002 /* AIF1CLK_DIV */
+#define WM8994_AIF1CLK_DIV_MASK 0x0002 /* AIF1CLK_DIV */
+#define WM8994_AIF1CLK_DIV_SHIFT 1 /* AIF1CLK_DIV */
+#define WM8994_AIF1CLK_DIV_WIDTH 1 /* AIF1CLK_DIV */
+#define WM8994_AIF1CLK_ENA 0x0001 /* AIF1CLK_ENA */
+#define WM8994_AIF1CLK_ENA_MASK 0x0001 /* AIF1CLK_ENA */
+#define WM8994_AIF1CLK_ENA_SHIFT 0 /* AIF1CLK_ENA */
+#define WM8994_AIF1CLK_ENA_WIDTH 1 /* AIF1CLK_ENA */
+
+/*
+ * R513 (0x201) - AIF1 Clocking (2)
+ */
+#define WM8994_AIF1DAC_DIV_MASK 0x0038 /* AIF1DAC_DIV - [5:3] */
+#define WM8994_AIF1DAC_DIV_SHIFT 3 /* AIF1DAC_DIV - [5:3] */
+#define WM8994_AIF1DAC_DIV_WIDTH 3 /* AIF1DAC_DIV - [5:3] */
+#define WM8994_AIF1ADC_DIV_MASK 0x0007 /* AIF1ADC_DIV - [2:0] */
+#define WM8994_AIF1ADC_DIV_SHIFT 0 /* AIF1ADC_DIV - [2:0] */
+#define WM8994_AIF1ADC_DIV_WIDTH 3 /* AIF1ADC_DIV - [2:0] */
+
+/*
+ * R516 (0x204) - AIF2 Clocking (1)
+ */
+#define WM8994_AIF2CLK_SRC_MASK 0x0018 /* AIF2CLK_SRC - [4:3] */
+#define WM8994_AIF2CLK_SRC_SHIFT 3 /* AIF2CLK_SRC - [4:3] */
+#define WM8994_AIF2CLK_SRC_WIDTH 2 /* AIF2CLK_SRC - [4:3] */
+#define WM8994_AIF2CLK_INV 0x0004 /* AIF2CLK_INV */
+#define WM8994_AIF2CLK_INV_MASK 0x0004 /* AIF2CLK_INV */
+#define WM8994_AIF2CLK_INV_SHIFT 2 /* AIF2CLK_INV */
+#define WM8994_AIF2CLK_INV_WIDTH 1 /* AIF2CLK_INV */
+#define WM8994_AIF2CLK_DIV 0x0002 /* AIF2CLK_DIV */
+#define WM8994_AIF2CLK_DIV_MASK 0x0002 /* AIF2CLK_DIV */
+#define WM8994_AIF2CLK_DIV_SHIFT 1 /* AIF2CLK_DIV */
+#define WM8994_AIF2CLK_DIV_WIDTH 1 /* AIF2CLK_DIV */
+#define WM8994_AIF2CLK_ENA 0x0001 /* AIF2CLK_ENA */
+#define WM8994_AIF2CLK_ENA_MASK 0x0001 /* AIF2CLK_ENA */
+#define WM8994_AIF2CLK_ENA_SHIFT 0 /* AIF2CLK_ENA */
+#define WM8994_AIF2CLK_ENA_WIDTH 1 /* AIF2CLK_ENA */
+
+/*
+ * R517 (0x205) - AIF2 Clocking (2)
+ */
+#define WM8994_AIF2DAC_DIV_MASK 0x0038 /* AIF2DAC_DIV - [5:3] */
+#define WM8994_AIF2DAC_DIV_SHIFT 3 /* AIF2DAC_DIV - [5:3] */
+#define WM8994_AIF2DAC_DIV_WIDTH 3 /* AIF2DAC_DIV - [5:3] */
+#define WM8994_AIF2ADC_DIV_MASK 0x0007 /* AIF2ADC_DIV - [2:0] */
+#define WM8994_AIF2ADC_DIV_SHIFT 0 /* AIF2ADC_DIV - [2:0] */
+#define WM8994_AIF2ADC_DIV_WIDTH 3 /* AIF2ADC_DIV - [2:0] */
+
+/*
+ * R520 (0x208) - Clocking (1)
+ */
+#define WM8958_DSP2CLK_ENA 0x4000 /* DSP2CLK_ENA */
+#define WM8958_DSP2CLK_ENA_MASK 0x4000 /* DSP2CLK_ENA */
+#define WM8958_DSP2CLK_ENA_SHIFT 14 /* DSP2CLK_ENA */
+#define WM8958_DSP2CLK_ENA_WIDTH 1 /* DSP2CLK_ENA */
+#define WM8958_DSP2CLK_SRC 0x1000 /* DSP2CLK_SRC */
+#define WM8958_DSP2CLK_SRC_MASK 0x1000 /* DSP2CLK_SRC */
+#define WM8958_DSP2CLK_SRC_SHIFT 12 /* DSP2CLK_SRC */
+#define WM8958_DSP2CLK_SRC_WIDTH 1 /* DSP2CLK_SRC */
+#define WM8994_TOCLK_ENA 0x0010 /* TOCLK_ENA */
+#define WM8994_TOCLK_ENA_MASK 0x0010 /* TOCLK_ENA */
+#define WM8994_TOCLK_ENA_SHIFT 4 /* TOCLK_ENA */
+#define WM8994_TOCLK_ENA_WIDTH 1 /* TOCLK_ENA */
+#define WM8994_AIF1DSPCLK_ENA 0x0008 /* AIF1DSPCLK_ENA */
+#define WM8994_AIF1DSPCLK_ENA_MASK 0x0008 /* AIF1DSPCLK_ENA */
+#define WM8994_AIF1DSPCLK_ENA_SHIFT 3 /* AIF1DSPCLK_ENA */
+#define WM8994_AIF1DSPCLK_ENA_WIDTH 1 /* AIF1DSPCLK_ENA */
+#define WM8994_AIF2DSPCLK_ENA 0x0004 /* AIF2DSPCLK_ENA */
+#define WM8994_AIF2DSPCLK_ENA_MASK 0x0004 /* AIF2DSPCLK_ENA */
+#define WM8994_AIF2DSPCLK_ENA_SHIFT 2 /* AIF2DSPCLK_ENA */
+#define WM8994_AIF2DSPCLK_ENA_WIDTH 1 /* AIF2DSPCLK_ENA */
+#define WM8994_SYSDSPCLK_ENA 0x0002 /* SYSDSPCLK_ENA */
+#define WM8994_SYSDSPCLK_ENA_MASK 0x0002 /* SYSDSPCLK_ENA */
+#define WM8994_SYSDSPCLK_ENA_SHIFT 1 /* SYSDSPCLK_ENA */
+#define WM8994_SYSDSPCLK_ENA_WIDTH 1 /* SYSDSPCLK_ENA */
+#define WM8994_SYSCLK_SRC 0x0001 /* SYSCLK_SRC */
+#define WM8994_SYSCLK_SRC_MASK 0x0001 /* SYSCLK_SRC */
+#define WM8994_SYSCLK_SRC_SHIFT 0 /* SYSCLK_SRC */
+#define WM8994_SYSCLK_SRC_WIDTH 1 /* SYSCLK_SRC */
+
+/*
+ * R521 (0x209) - Clocking (2)
+ */
+#define WM8994_TOCLK_DIV_MASK 0x0700 /* TOCLK_DIV - [10:8] */
+#define WM8994_TOCLK_DIV_SHIFT 8 /* TOCLK_DIV - [10:8] */
+#define WM8994_TOCLK_DIV_WIDTH 3 /* TOCLK_DIV - [10:8] */
+#define WM8994_DBCLK_DIV_MASK 0x0070 /* DBCLK_DIV - [6:4] */
+#define WM8994_DBCLK_DIV_SHIFT 4 /* DBCLK_DIV - [6:4] */
+#define WM8994_DBCLK_DIV_WIDTH 3 /* DBCLK_DIV - [6:4] */
+#define WM8994_OPCLK_DIV_MASK 0x0007 /* OPCLK_DIV - [2:0] */
+#define WM8994_OPCLK_DIV_SHIFT 0 /* OPCLK_DIV - [2:0] */
+#define WM8994_OPCLK_DIV_WIDTH 3 /* OPCLK_DIV - [2:0] */
+
+/*
+ * R528 (0x210) - AIF1 Rate
+ */
+#define WM8994_AIF1_SR_MASK 0x00F0 /* AIF1_SR - [7:4] */
+#define WM8994_AIF1_SR_SHIFT 4 /* AIF1_SR - [7:4] */
+#define WM8994_AIF1_SR_WIDTH 4 /* AIF1_SR - [7:4] */
+#define WM8994_AIF1CLK_RATE_MASK 0x000F /* AIF1CLK_RATE - [3:0] */
+#define WM8994_AIF1CLK_RATE_SHIFT 0 /* AIF1CLK_RATE - [3:0] */
+#define WM8994_AIF1CLK_RATE_WIDTH 4 /* AIF1CLK_RATE - [3:0] */
+
+/*
+ * R529 (0x211) - AIF2 Rate
+ */
+#define WM8994_AIF2_SR_MASK 0x00F0 /* AIF2_SR - [7:4] */
+#define WM8994_AIF2_SR_SHIFT 4 /* AIF2_SR - [7:4] */
+#define WM8994_AIF2_SR_WIDTH 4 /* AIF2_SR - [7:4] */
+#define WM8994_AIF2CLK_RATE_MASK 0x000F /* AIF2CLK_RATE - [3:0] */
+#define WM8994_AIF2CLK_RATE_SHIFT 0 /* AIF2CLK_RATE - [3:0] */
+#define WM8994_AIF2CLK_RATE_WIDTH 4 /* AIF2CLK_RATE - [3:0] */
+
+/*
+ * R530 (0x212) - Rate Status
+ */
+#define WM8994_SR_ERROR_MASK 0x000F /* SR_ERROR - [3:0] */
+#define WM8994_SR_ERROR_SHIFT 0 /* SR_ERROR - [3:0] */
+#define WM8994_SR_ERROR_WIDTH 4 /* SR_ERROR - [3:0] */
+
+/*
+ * R544 (0x220) - FLL1 Control (1)
+ */
+#define WM8994_FLL1_FRAC 0x0004 /* FLL1_FRAC */
+#define WM8994_FLL1_FRAC_MASK 0x0004 /* FLL1_FRAC */
+#define WM8994_FLL1_FRAC_SHIFT 2 /* FLL1_FRAC */
+#define WM8994_FLL1_FRAC_WIDTH 1 /* FLL1_FRAC */
+#define WM8994_FLL1_OSC_ENA 0x0002 /* FLL1_OSC_ENA */
+#define WM8994_FLL1_OSC_ENA_MASK 0x0002 /* FLL1_OSC_ENA */
+#define WM8994_FLL1_OSC_ENA_SHIFT 1 /* FLL1_OSC_ENA */
+#define WM8994_FLL1_OSC_ENA_WIDTH 1 /* FLL1_OSC_ENA */
+#define WM8994_FLL1_ENA 0x0001 /* FLL1_ENA */
+#define WM8994_FLL1_ENA_MASK 0x0001 /* FLL1_ENA */
+#define WM8994_FLL1_ENA_SHIFT 0 /* FLL1_ENA */
+#define WM8994_FLL1_ENA_WIDTH 1 /* FLL1_ENA */
+
+/*
+ * R545 (0x221) - FLL1 Control (2)
+ */
+#define WM8994_FLL1_OUTDIV_MASK 0x3F00 /* FLL1_OUTDIV - [13:8] */
+#define WM8994_FLL1_OUTDIV_SHIFT 8 /* FLL1_OUTDIV - [13:8] */
+#define WM8994_FLL1_OUTDIV_WIDTH 6 /* FLL1_OUTDIV - [13:8] */
+#define WM8994_FLL1_CTRL_RATE_MASK 0x0070 /* FLL1_CTRL_RATE - [6:4] */
+#define WM8994_FLL1_CTRL_RATE_SHIFT 4 /* FLL1_CTRL_RATE - [6:4] */
+#define WM8994_FLL1_CTRL_RATE_WIDTH 3 /* FLL1_CTRL_RATE - [6:4] */
+#define WM8994_FLL1_FRATIO_MASK 0x0007 /* FLL1_FRATIO - [2:0] */
+#define WM8994_FLL1_FRATIO_SHIFT 0 /* FLL1_FRATIO - [2:0] */
+#define WM8994_FLL1_FRATIO_WIDTH 3 /* FLL1_FRATIO - [2:0] */
+
+/*
+ * R546 (0x222) - FLL1 Control (3)
+ */
+#define WM8994_FLL1_K_MASK 0xFFFF /* FLL1_K - [15:0] */
+#define WM8994_FLL1_K_SHIFT 0 /* FLL1_K - [15:0] */
+#define WM8994_FLL1_K_WIDTH 16 /* FLL1_K - [15:0] */
+
+/*
+ * R547 (0x223) - FLL1 Control (4)
+ */
+#define WM8994_FLL1_N_MASK 0x7FE0 /* FLL1_N - [14:5] */
+#define WM8994_FLL1_N_SHIFT 5 /* FLL1_N - [14:5] */
+#define WM8994_FLL1_N_WIDTH 10 /* FLL1_N - [14:5] */
+#define WM8994_FLL1_LOOP_GAIN_MASK 0x000F /* FLL1_LOOP_GAIN - [3:0] */
+#define WM8994_FLL1_LOOP_GAIN_SHIFT 0 /* FLL1_LOOP_GAIN - [3:0] */
+#define WM8994_FLL1_LOOP_GAIN_WIDTH 4 /* FLL1_LOOP_GAIN - [3:0] */
+
+/*
+ * R548 (0x224) - FLL1 Control (5)
+ */
+#define WM8958_FLL1_BYP 0x8000 /* FLL1_BYP */
+#define WM8958_FLL1_BYP_MASK 0x8000 /* FLL1_BYP */
+#define WM8958_FLL1_BYP_SHIFT 15 /* FLL1_BYP */
+#define WM8958_FLL1_BYP_WIDTH 1 /* FLL1_BYP */
+#define WM8994_FLL1_FRC_NCO_VAL_MASK 0x1F80 /* FLL1_FRC_NCO_VAL - [12:7] */
+#define WM8994_FLL1_FRC_NCO_VAL_SHIFT 7 /* FLL1_FRC_NCO_VAL - [12:7] */
+#define WM8994_FLL1_FRC_NCO_VAL_WIDTH 6 /* FLL1_FRC_NCO_VAL - [12:7] */
+#define WM8994_FLL1_FRC_NCO 0x0040 /* FLL1_FRC_NCO */
+#define WM8994_FLL1_FRC_NCO_MASK 0x0040 /* FLL1_FRC_NCO */
+#define WM8994_FLL1_FRC_NCO_SHIFT 6 /* FLL1_FRC_NCO */
+#define WM8994_FLL1_FRC_NCO_WIDTH 1 /* FLL1_FRC_NCO */
+#define WM8994_FLL1_REFCLK_DIV_MASK 0x0018 /* FLL1_REFCLK_DIV - [4:3] */
+#define WM8994_FLL1_REFCLK_DIV_SHIFT 3 /* FLL1_REFCLK_DIV - [4:3] */
+#define WM8994_FLL1_REFCLK_DIV_WIDTH 2 /* FLL1_REFCLK_DIV - [4:3] */
+#define WM8994_FLL1_REFCLK_SRC_MASK 0x0003 /* FLL1_REFCLK_SRC - [1:0] */
+#define WM8994_FLL1_REFCLK_SRC_SHIFT 0 /* FLL1_REFCLK_SRC - [1:0] */
+#define WM8994_FLL1_REFCLK_SRC_WIDTH 2 /* FLL1_REFCLK_SRC - [1:0] */
+
+/*
+ * R550 (0x226) - FLL1 EFS 1
+ */
+#define WM8958_FLL1_LAMBDA_MASK 0xFFFF /* FLL1_LAMBDA - [15:0] */
+#define WM8958_FLL1_LAMBDA_SHIFT 0 /* FLL1_LAMBDA - [15:0] */
+#define WM8958_FLL1_LAMBDA_WIDTH 16 /* FLL1_LAMBDA - [15:0] */
+
+/*
+ * R551 (0x227) - FLL1 EFS 2
+ */
+#define WM8958_FLL1_LFSR_SEL_MASK 0x0006 /* FLL1_LFSR_SEL - [2:1] */
+#define WM8958_FLL1_LFSR_SEL_SHIFT 1 /* FLL1_LFSR_SEL - [2:1] */
+#define WM8958_FLL1_LFSR_SEL_WIDTH 2 /* FLL1_LFSR_SEL - [2:1] */
+#define WM8958_FLL1_EFS_ENA 0x0001 /* FLL1_EFS_ENA */
+#define WM8958_FLL1_EFS_ENA_MASK 0x0001 /* FLL1_EFS_ENA */
+#define WM8958_FLL1_EFS_ENA_SHIFT 0 /* FLL1_EFS_ENA */
+#define WM8958_FLL1_EFS_ENA_WIDTH 1 /* FLL1_EFS_ENA */
+
+/*
+ * R576 (0x240) - FLL2 Control (1)
+ */
+#define WM8994_FLL2_FRAC 0x0004 /* FLL2_FRAC */
+#define WM8994_FLL2_FRAC_MASK 0x0004 /* FLL2_FRAC */
+#define WM8994_FLL2_FRAC_SHIFT 2 /* FLL2_FRAC */
+#define WM8994_FLL2_FRAC_WIDTH 1 /* FLL2_FRAC */
+#define WM8994_FLL2_OSC_ENA 0x0002 /* FLL2_OSC_ENA */
+#define WM8994_FLL2_OSC_ENA_MASK 0x0002 /* FLL2_OSC_ENA */
+#define WM8994_FLL2_OSC_ENA_SHIFT 1 /* FLL2_OSC_ENA */
+#define WM8994_FLL2_OSC_ENA_WIDTH 1 /* FLL2_OSC_ENA */
+#define WM8994_FLL2_ENA 0x0001 /* FLL2_ENA */
+#define WM8994_FLL2_ENA_MASK 0x0001 /* FLL2_ENA */
+#define WM8994_FLL2_ENA_SHIFT 0 /* FLL2_ENA */
+#define WM8994_FLL2_ENA_WIDTH 1 /* FLL2_ENA */
+
+/*
+ * R577 (0x241) - FLL2 Control (2)
+ */
+#define WM8994_FLL2_OUTDIV_MASK 0x3F00 /* FLL2_OUTDIV - [13:8] */
+#define WM8994_FLL2_OUTDIV_SHIFT 8 /* FLL2_OUTDIV - [13:8] */
+#define WM8994_FLL2_OUTDIV_WIDTH 6 /* FLL2_OUTDIV - [13:8] */
+#define WM8994_FLL2_CTRL_RATE_MASK 0x0070 /* FLL2_CTRL_RATE - [6:4] */
+#define WM8994_FLL2_CTRL_RATE_SHIFT 4 /* FLL2_CTRL_RATE - [6:4] */
+#define WM8994_FLL2_CTRL_RATE_WIDTH 3 /* FLL2_CTRL_RATE - [6:4] */
+#define WM8994_FLL2_FRATIO_MASK 0x0007 /* FLL2_FRATIO - [2:0] */
+#define WM8994_FLL2_FRATIO_SHIFT 0 /* FLL2_FRATIO - [2:0] */
+#define WM8994_FLL2_FRATIO_WIDTH 3 /* FLL2_FRATIO - [2:0] */
+
+/*
+ * R578 (0x242) - FLL2 Control (3)
+ */
+#define WM8994_FLL2_K_MASK 0xFFFF /* FLL2_K - [15:0] */
+#define WM8994_FLL2_K_SHIFT 0 /* FLL2_K - [15:0] */
+#define WM8994_FLL2_K_WIDTH 16 /* FLL2_K - [15:0] */
+
+/*
+ * R579 (0x243) - FLL2 Control (4)
+ */
+#define WM8994_FLL2_N_MASK 0x7FE0 /* FLL2_N - [14:5] */
+#define WM8994_FLL2_N_SHIFT 5 /* FLL2_N - [14:5] */
+#define WM8994_FLL2_N_WIDTH 10 /* FLL2_N - [14:5] */
+#define WM8994_FLL2_LOOP_GAIN_MASK 0x000F /* FLL2_LOOP_GAIN - [3:0] */
+#define WM8994_FLL2_LOOP_GAIN_SHIFT 0 /* FLL2_LOOP_GAIN - [3:0] */
+#define WM8994_FLL2_LOOP_GAIN_WIDTH 4 /* FLL2_LOOP_GAIN - [3:0] */
+
+/*
+ * R580 (0x244) - FLL2 Control (5)
+ */
+#define WM8958_FLL2_BYP 0x8000 /* FLL2_BYP */
+#define WM8958_FLL2_BYP_MASK 0x8000 /* FLL2_BYP */
+#define WM8958_FLL2_BYP_SHIFT 15 /* FLL2_BYP */
+#define WM8958_FLL2_BYP_WIDTH 1 /* FLL2_BYP */
+#define WM8994_FLL2_FRC_NCO_VAL_MASK 0x1F80 /* FLL2_FRC_NCO_VAL - [12:7] */
+#define WM8994_FLL2_FRC_NCO_VAL_SHIFT 7 /* FLL2_FRC_NCO_VAL - [12:7] */
+#define WM8994_FLL2_FRC_NCO_VAL_WIDTH 6 /* FLL2_FRC_NCO_VAL - [12:7] */
+#define WM8994_FLL2_FRC_NCO 0x0040 /* FLL2_FRC_NCO */
+#define WM8994_FLL2_FRC_NCO_MASK 0x0040 /* FLL2_FRC_NCO */
+#define WM8994_FLL2_FRC_NCO_SHIFT 6 /* FLL2_FRC_NCO */
+#define WM8994_FLL2_FRC_NCO_WIDTH 1 /* FLL2_FRC_NCO */
+#define WM8994_FLL2_REFCLK_DIV_MASK 0x0018 /* FLL2_REFCLK_DIV - [4:3] */
+#define WM8994_FLL2_REFCLK_DIV_SHIFT 3 /* FLL2_REFCLK_DIV - [4:3] */
+#define WM8994_FLL2_REFCLK_DIV_WIDTH 2 /* FLL2_REFCLK_DIV - [4:3] */
+#define WM8994_FLL2_REFCLK_SRC_MASK 0x0003 /* FLL2_REFCLK_SRC - [1:0] */
+#define WM8994_FLL2_REFCLK_SRC_SHIFT 0 /* FLL2_REFCLK_SRC - [1:0] */
+#define WM8994_FLL2_REFCLK_SRC_WIDTH 2 /* FLL2_REFCLK_SRC - [1:0] */
+
+/*
+ * R582 (0x246) - FLL2 EFS 1
+ */
+#define WM8958_FLL2_LAMBDA_MASK 0xFFFF /* FLL2_LAMBDA - [15:0] */
+#define WM8958_FLL2_LAMBDA_SHIFT 0 /* FLL2_LAMBDA - [15:0] */
+#define WM8958_FLL2_LAMBDA_WIDTH 16 /* FLL2_LAMBDA - [15:0] */
+
+/*
+ * R583 (0x247) - FLL2 EFS 2
+ */
+#define WM8958_FLL2_LFSR_SEL_MASK 0x0006 /* FLL2_LFSR_SEL - [2:1] */
+#define WM8958_FLL2_LFSR_SEL_SHIFT 1 /* FLL2_LFSR_SEL - [2:1] */
+#define WM8958_FLL2_LFSR_SEL_WIDTH 2 /* FLL2_LFSR_SEL - [2:1] */
+#define WM8958_FLL2_EFS_ENA 0x0001 /* FLL2_EFS_ENA */
+#define WM8958_FLL2_EFS_ENA_MASK 0x0001 /* FLL2_EFS_ENA */
+#define WM8958_FLL2_EFS_ENA_SHIFT 0 /* FLL2_EFS_ENA */
+#define WM8958_FLL2_EFS_ENA_WIDTH 1 /* FLL2_EFS_ENA */
+
+/*
+ * R768 (0x300) - AIF1 Control (1)
+ */
+#define WM8994_AIF1ADCL_SRC 0x8000 /* AIF1ADCL_SRC */
+#define WM8994_AIF1ADCL_SRC_MASK 0x8000 /* AIF1ADCL_SRC */
+#define WM8994_AIF1ADCL_SRC_SHIFT 15 /* AIF1ADCL_SRC */
+#define WM8994_AIF1ADCL_SRC_WIDTH 1 /* AIF1ADCL_SRC */
+#define WM8994_AIF1ADCR_SRC 0x4000 /* AIF1ADCR_SRC */
+#define WM8994_AIF1ADCR_SRC_MASK 0x4000 /* AIF1ADCR_SRC */
+#define WM8994_AIF1ADCR_SRC_SHIFT 14 /* AIF1ADCR_SRC */
+#define WM8994_AIF1ADCR_SRC_WIDTH 1 /* AIF1ADCR_SRC */
+#define WM8994_AIF1ADC_TDM 0x2000 /* AIF1ADC_TDM */
+#define WM8994_AIF1ADC_TDM_MASK 0x2000 /* AIF1ADC_TDM */
+#define WM8994_AIF1ADC_TDM_SHIFT 13 /* AIF1ADC_TDM */
+#define WM8994_AIF1ADC_TDM_WIDTH 1 /* AIF1ADC_TDM */
+#define WM8994_AIF1_BCLK_INV 0x0100 /* AIF1_BCLK_INV */
+#define WM8994_AIF1_BCLK_INV_MASK 0x0100 /* AIF1_BCLK_INV */
+#define WM8994_AIF1_BCLK_INV_SHIFT 8 /* AIF1_BCLK_INV */
+#define WM8994_AIF1_BCLK_INV_WIDTH 1 /* AIF1_BCLK_INV */
+#define WM8994_AIF1_LRCLK_INV 0x0080 /* AIF1_LRCLK_INV */
+#define WM8994_AIF1_LRCLK_INV_MASK 0x0080 /* AIF1_LRCLK_INV */
+#define WM8994_AIF1_LRCLK_INV_SHIFT 7 /* AIF1_LRCLK_INV */
+#define WM8994_AIF1_LRCLK_INV_WIDTH 1 /* AIF1_LRCLK_INV */
+#define WM8994_AIF1_WL_MASK 0x0060 /* AIF1_WL - [6:5] */
+#define WM8994_AIF1_WL_SHIFT 5 /* AIF1_WL - [6:5] */
+#define WM8994_AIF1_WL_WIDTH 2 /* AIF1_WL - [6:5] */
+#define WM8994_AIF1_FMT_MASK 0x0018 /* AIF1_FMT - [4:3] */
+#define WM8994_AIF1_FMT_SHIFT 3 /* AIF1_FMT - [4:3] */
+#define WM8994_AIF1_FMT_WIDTH 2 /* AIF1_FMT - [4:3] */
+
+/*
+ * R769 (0x301) - AIF1 Control (2)
+ */
+#define WM8994_AIF1DACL_SRC 0x8000 /* AIF1DACL_SRC */
+#define WM8994_AIF1DACL_SRC_MASK 0x8000 /* AIF1DACL_SRC */
+#define WM8994_AIF1DACL_SRC_SHIFT 15 /* AIF1DACL_SRC */
+#define WM8994_AIF1DACL_SRC_WIDTH 1 /* AIF1DACL_SRC */
+#define WM8994_AIF1DACR_SRC 0x4000 /* AIF1DACR_SRC */
+#define WM8994_AIF1DACR_SRC_MASK 0x4000 /* AIF1DACR_SRC */
+#define WM8994_AIF1DACR_SRC_SHIFT 14 /* AIF1DACR_SRC */
+#define WM8994_AIF1DACR_SRC_WIDTH 1 /* AIF1DACR_SRC */
+#define WM8994_AIF1DAC_BOOST_MASK 0x0C00 /* AIF1DAC_BOOST - [11:10] */
+#define WM8994_AIF1DAC_BOOST_SHIFT 10 /* AIF1DAC_BOOST - [11:10] */
+#define WM8994_AIF1DAC_BOOST_WIDTH 2 /* AIF1DAC_BOOST - [11:10] */
+#define WM8994_AIF1_MONO 0x0100 /* AIF1_MONO */
+#define WM8994_AIF1_MONO_MASK 0x0100 /* AIF1_MONO */
+#define WM8994_AIF1_MONO_SHIFT 8 /* AIF1_MONO */
+#define WM8994_AIF1_MONO_WIDTH 1 /* AIF1_MONO */
+#define WM8994_AIF1DAC_COMP 0x0010 /* AIF1DAC_COMP */
+#define WM8994_AIF1DAC_COMP_MASK 0x0010 /* AIF1DAC_COMP */
+#define WM8994_AIF1DAC_COMP_SHIFT 4 /* AIF1DAC_COMP */
+#define WM8994_AIF1DAC_COMP_WIDTH 1 /* AIF1DAC_COMP */
+#define WM8994_AIF1DAC_COMPMODE 0x0008 /* AIF1DAC_COMPMODE */
+#define WM8994_AIF1DAC_COMPMODE_MASK 0x0008 /* AIF1DAC_COMPMODE */
+#define WM8994_AIF1DAC_COMPMODE_SHIFT 3 /* AIF1DAC_COMPMODE */
+#define WM8994_AIF1DAC_COMPMODE_WIDTH 1 /* AIF1DAC_COMPMODE */
+#define WM8994_AIF1ADC_COMP 0x0004 /* AIF1ADC_COMP */
+#define WM8994_AIF1ADC_COMP_MASK 0x0004 /* AIF1ADC_COMP */
+#define WM8994_AIF1ADC_COMP_SHIFT 2 /* AIF1ADC_COMP */
+#define WM8994_AIF1ADC_COMP_WIDTH 1 /* AIF1ADC_COMP */
+#define WM8994_AIF1ADC_COMPMODE 0x0002 /* AIF1ADC_COMPMODE */
+#define WM8994_AIF1ADC_COMPMODE_MASK 0x0002 /* AIF1ADC_COMPMODE */
+#define WM8994_AIF1ADC_COMPMODE_SHIFT 1 /* AIF1ADC_COMPMODE */
+#define WM8994_AIF1ADC_COMPMODE_WIDTH 1 /* AIF1ADC_COMPMODE */
+#define WM8994_AIF1_LOOPBACK 0x0001 /* AIF1_LOOPBACK */
+#define WM8994_AIF1_LOOPBACK_MASK 0x0001 /* AIF1_LOOPBACK */
+#define WM8994_AIF1_LOOPBACK_SHIFT 0 /* AIF1_LOOPBACK */
+#define WM8994_AIF1_LOOPBACK_WIDTH 1 /* AIF1_LOOPBACK */
+
+/*
+ * R770 (0x302) - AIF1 Master/Slave
+ */
+#define WM8994_AIF1_TRI 0x8000 /* AIF1_TRI */
+#define WM8994_AIF1_TRI_MASK 0x8000 /* AIF1_TRI */
+#define WM8994_AIF1_TRI_SHIFT 15 /* AIF1_TRI */
+#define WM8994_AIF1_TRI_WIDTH 1 /* AIF1_TRI */
+#define WM8994_AIF1_MSTR 0x4000 /* AIF1_MSTR */
+#define WM8994_AIF1_MSTR_MASK 0x4000 /* AIF1_MSTR */
+#define WM8994_AIF1_MSTR_SHIFT 14 /* AIF1_MSTR */
+#define WM8994_AIF1_MSTR_WIDTH 1 /* AIF1_MSTR */
+#define WM8994_AIF1_CLK_FRC 0x2000 /* AIF1_CLK_FRC */
+#define WM8994_AIF1_CLK_FRC_MASK 0x2000 /* AIF1_CLK_FRC */
+#define WM8994_AIF1_CLK_FRC_SHIFT 13 /* AIF1_CLK_FRC */
+#define WM8994_AIF1_CLK_FRC_WIDTH 1 /* AIF1_CLK_FRC */
+#define WM8994_AIF1_LRCLK_FRC 0x1000 /* AIF1_LRCLK_FRC */
+#define WM8994_AIF1_LRCLK_FRC_MASK 0x1000 /* AIF1_LRCLK_FRC */
+#define WM8994_AIF1_LRCLK_FRC_SHIFT 12 /* AIF1_LRCLK_FRC */
+#define WM8994_AIF1_LRCLK_FRC_WIDTH 1 /* AIF1_LRCLK_FRC */
+
+/*
+ * R771 (0x303) - AIF1 BCLK
+ */
+#define WM8994_AIF1_BCLK_DIV_MASK 0x01F0 /* AIF1_BCLK_DIV - [8:4] */
+#define WM8994_AIF1_BCLK_DIV_SHIFT 4 /* AIF1_BCLK_DIV - [8:4] */
+#define WM8994_AIF1_BCLK_DIV_WIDTH 5 /* AIF1_BCLK_DIV - [8:4] */
+
+/*
+ * R772 (0x304) - AIF1ADC LRCLK
+ */
+#define WM8958_AIF1_LRCLK_INV 0x1000 /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_MASK 0x1000 /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_SHIFT 12 /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_WIDTH 1 /* AIF1_LRCLK_INV */
+#define WM8994_AIF1ADC_LRCLK_DIR 0x0800 /* AIF1ADC_LRCLK_DIR */
+#define WM8994_AIF1ADC_LRCLK_DIR_MASK 0x0800 /* AIF1ADC_LRCLK_DIR */
+#define WM8994_AIF1ADC_LRCLK_DIR_SHIFT 11 /* AIF1ADC_LRCLK_DIR */
+#define WM8994_AIF1ADC_LRCLK_DIR_WIDTH 1 /* AIF1ADC_LRCLK_DIR */
+#define WM8994_AIF1ADC_RATE_MASK 0x07FF /* AIF1ADC_RATE - [10:0] */
+#define WM8994_AIF1ADC_RATE_SHIFT 0 /* AIF1ADC_RATE - [10:0] */
+#define WM8994_AIF1ADC_RATE_WIDTH 11 /* AIF1ADC_RATE - [10:0] */
+
+/*
+ * R773 (0x305) - AIF1DAC LRCLK
+ */
+#define WM8958_AIF1_LRCLK_INV 0x1000 /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_MASK 0x1000 /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_SHIFT 12 /* AIF1_LRCLK_INV */
+#define WM8958_AIF1_LRCLK_INV_WIDTH 1 /* AIF1_LRCLK_INV */
+#define WM8994_AIF1DAC_LRCLK_DIR 0x0800 /* AIF1DAC_LRCLK_DIR */
+#define WM8994_AIF1DAC_LRCLK_DIR_MASK 0x0800 /* AIF1DAC_LRCLK_DIR */
+#define WM8994_AIF1DAC_LRCLK_DIR_SHIFT 11 /* AIF1DAC_LRCLK_DIR */
+#define WM8994_AIF1DAC_LRCLK_DIR_WIDTH 1 /* AIF1DAC_LRCLK_DIR */
+#define WM8994_AIF1DAC_RATE_MASK 0x07FF /* AIF1DAC_RATE - [10:0] */
+#define WM8994_AIF1DAC_RATE_SHIFT 0 /* AIF1DAC_RATE - [10:0] */
+#define WM8994_AIF1DAC_RATE_WIDTH 11 /* AIF1DAC_RATE - [10:0] */
+
+/*
+ * R774 (0x306) - AIF1DAC Data
+ */
+#define WM8994_AIF1DACL_DAT_INV 0x0002 /* AIF1DACL_DAT_INV */
+#define WM8994_AIF1DACL_DAT_INV_MASK 0x0002 /* AIF1DACL_DAT_INV */
+#define WM8994_AIF1DACL_DAT_INV_SHIFT 1 /* AIF1DACL_DAT_INV */
+#define WM8994_AIF1DACL_DAT_INV_WIDTH 1 /* AIF1DACL_DAT_INV */
+#define WM8994_AIF1DACR_DAT_INV 0x0001 /* AIF1DACR_DAT_INV */
+#define WM8994_AIF1DACR_DAT_INV_MASK 0x0001 /* AIF1DACR_DAT_INV */
+#define WM8994_AIF1DACR_DAT_INV_SHIFT 0 /* AIF1DACR_DAT_INV */
+#define WM8994_AIF1DACR_DAT_INV_WIDTH 1 /* AIF1DACR_DAT_INV */
+
+/*
+ * R775 (0x307) - AIF1ADC Data
+ */
+#define WM8994_AIF1ADCL_DAT_INV 0x0002 /* AIF1ADCL_DAT_INV */
+#define WM8994_AIF1ADCL_DAT_INV_MASK 0x0002 /* AIF1ADCL_DAT_INV */
+#define WM8994_AIF1ADCL_DAT_INV_SHIFT 1 /* AIF1ADCL_DAT_INV */
+#define WM8994_AIF1ADCL_DAT_INV_WIDTH 1 /* AIF1ADCL_DAT_INV */
+#define WM8994_AIF1ADCR_DAT_INV 0x0001 /* AIF1ADCR_DAT_INV */
+#define WM8994_AIF1ADCR_DAT_INV_MASK 0x0001 /* AIF1ADCR_DAT_INV */
+#define WM8994_AIF1ADCR_DAT_INV_SHIFT 0 /* AIF1ADCR_DAT_INV */
+#define WM8994_AIF1ADCR_DAT_INV_WIDTH 1 /* AIF1ADCR_DAT_INV */
+
+/*
+ * R784 (0x310) - AIF2 Control (1)
+ */
+#define WM8994_AIF2ADCL_SRC 0x8000 /* AIF2ADCL_SRC */
+#define WM8994_AIF2ADCL_SRC_MASK 0x8000 /* AIF2ADCL_SRC */
+#define WM8994_AIF2ADCL_SRC_SHIFT 15 /* AIF2ADCL_SRC */
+#define WM8994_AIF2ADCL_SRC_WIDTH 1 /* AIF2ADCL_SRC */
+#define WM8994_AIF2ADCR_SRC 0x4000 /* AIF2ADCR_SRC */
+#define WM8994_AIF2ADCR_SRC_MASK 0x4000 /* AIF2ADCR_SRC */
+#define WM8994_AIF2ADCR_SRC_SHIFT 14 /* AIF2ADCR_SRC */
+#define WM8994_AIF2ADCR_SRC_WIDTH 1 /* AIF2ADCR_SRC */
+#define WM8994_AIF2ADC_TDM 0x2000 /* AIF2ADC_TDM */
+#define WM8994_AIF2ADC_TDM_MASK 0x2000 /* AIF2ADC_TDM */
+#define WM8994_AIF2ADC_TDM_SHIFT 13 /* AIF2ADC_TDM */
+#define WM8994_AIF2ADC_TDM_WIDTH 1 /* AIF2ADC_TDM */
+#define WM8994_AIF2ADC_TDM_CHAN 0x1000 /* AIF2ADC_TDM_CHAN */
+#define WM8994_AIF2ADC_TDM_CHAN_MASK 0x1000 /* AIF2ADC_TDM_CHAN */
+#define WM8994_AIF2ADC_TDM_CHAN_SHIFT 12 /* AIF2ADC_TDM_CHAN */
+#define WM8994_AIF2ADC_TDM_CHAN_WIDTH 1 /* AIF2ADC_TDM_CHAN */
+#define WM8994_AIF2_BCLK_INV 0x0100 /* AIF2_BCLK_INV */
+#define WM8994_AIF2_BCLK_INV_MASK 0x0100 /* AIF2_BCLK_INV */
+#define WM8994_AIF2_BCLK_INV_SHIFT 8 /* AIF2_BCLK_INV */
+#define WM8994_AIF2_BCLK_INV_WIDTH 1 /* AIF2_BCLK_INV */
+#define WM8994_AIF2_LRCLK_INV 0x0080 /* AIF2_LRCLK_INV */
+#define WM8994_AIF2_LRCLK_INV_MASK 0x0080 /* AIF2_LRCLK_INV */
+#define WM8994_AIF2_LRCLK_INV_SHIFT 7 /* AIF2_LRCLK_INV */
+#define WM8994_AIF2_LRCLK_INV_WIDTH 1 /* AIF2_LRCLK_INV */
+#define WM8994_AIF2_WL_MASK 0x0060 /* AIF2_WL - [6:5] */
+#define WM8994_AIF2_WL_SHIFT 5 /* AIF2_WL - [6:5] */
+#define WM8994_AIF2_WL_WIDTH 2 /* AIF2_WL - [6:5] */
+#define WM8994_AIF2_FMT_MASK 0x0018 /* AIF2_FMT - [4:3] */
+#define WM8994_AIF2_FMT_SHIFT 3 /* AIF2_FMT - [4:3] */
+#define WM8994_AIF2_FMT_WIDTH 2 /* AIF2_FMT - [4:3] */
+
+/*
+ * R785 (0x311) - AIF2 Control (2)
+ */
+#define WM8994_AIF2DACL_SRC 0x8000 /* AIF2DACL_SRC */
+#define WM8994_AIF2DACL_SRC_MASK 0x8000 /* AIF2DACL_SRC */
+#define WM8994_AIF2DACL_SRC_SHIFT 15 /* AIF2DACL_SRC */
+#define WM8994_AIF2DACL_SRC_WIDTH 1 /* AIF2DACL_SRC */
+#define WM8994_AIF2DACR_SRC 0x4000 /* AIF2DACR_SRC */
+#define WM8994_AIF2DACR_SRC_MASK 0x4000 /* AIF2DACR_SRC */
+#define WM8994_AIF2DACR_SRC_SHIFT 14 /* AIF2DACR_SRC */
+#define WM8994_AIF2DACR_SRC_WIDTH 1 /* AIF2DACR_SRC */
+#define WM8994_AIF2DAC_TDM 0x2000 /* AIF2DAC_TDM */
+#define WM8994_AIF2DAC_TDM_MASK 0x2000 /* AIF2DAC_TDM */
+#define WM8994_AIF2DAC_TDM_SHIFT 13 /* AIF2DAC_TDM */
+#define WM8994_AIF2DAC_TDM_WIDTH 1 /* AIF2DAC_TDM */
+#define WM8994_AIF2DAC_TDM_CHAN 0x1000 /* AIF2DAC_TDM_CHAN */
+#define WM8994_AIF2DAC_TDM_CHAN_MASK 0x1000 /* AIF2DAC_TDM_CHAN */
+#define WM8994_AIF2DAC_TDM_CHAN_SHIFT 12 /* AIF2DAC_TDM_CHAN */
+#define WM8994_AIF2DAC_TDM_CHAN_WIDTH 1 /* AIF2DAC_TDM_CHAN */
+#define WM8994_AIF2DAC_BOOST_MASK 0x0C00 /* AIF2DAC_BOOST - [11:10] */
+#define WM8994_AIF2DAC_BOOST_SHIFT 10 /* AIF2DAC_BOOST - [11:10] */
+#define WM8994_AIF2DAC_BOOST_WIDTH 2 /* AIF2DAC_BOOST - [11:10] */
+#define WM8994_AIF2_MONO 0x0100 /* AIF2_MONO */
+#define WM8994_AIF2_MONO_MASK 0x0100 /* AIF2_MONO */
+#define WM8994_AIF2_MONO_SHIFT 8 /* AIF2_MONO */
+#define WM8994_AIF2_MONO_WIDTH 1 /* AIF2_MONO */
+#define WM8994_AIF2DAC_COMP 0x0010 /* AIF2DAC_COMP */
+#define WM8994_AIF2DAC_COMP_MASK 0x0010 /* AIF2DAC_COMP */
+#define WM8994_AIF2DAC_COMP_SHIFT 4 /* AIF2DAC_COMP */
+#define WM8994_AIF2DAC_COMP_WIDTH 1 /* AIF2DAC_COMP */
+#define WM8994_AIF2DAC_COMPMODE 0x0008 /* AIF2DAC_COMPMODE */
+#define WM8994_AIF2DAC_COMPMODE_MASK 0x0008 /* AIF2DAC_COMPMODE */
+#define WM8994_AIF2DAC_COMPMODE_SHIFT 3 /* AIF2DAC_COMPMODE */
+#define WM8994_AIF2DAC_COMPMODE_WIDTH 1 /* AIF2DAC_COMPMODE */
+#define WM8994_AIF2ADC_COMP 0x0004 /* AIF2ADC_COMP */
+#define WM8994_AIF2ADC_COMP_MASK 0x0004 /* AIF2ADC_COMP */
+#define WM8994_AIF2ADC_COMP_SHIFT 2 /* AIF2ADC_COMP */
+#define WM8994_AIF2ADC_COMP_WIDTH 1 /* AIF2ADC_COMP */
+#define WM8994_AIF2ADC_COMPMODE 0x0002 /* AIF2ADC_COMPMODE */
+#define WM8994_AIF2ADC_COMPMODE_MASK 0x0002 /* AIF2ADC_COMPMODE */
+#define WM8994_AIF2ADC_COMPMODE_SHIFT 1 /* AIF2ADC_COMPMODE */
+#define WM8994_AIF2ADC_COMPMODE_WIDTH 1 /* AIF2ADC_COMPMODE */
+#define WM8994_AIF2_LOOPBACK 0x0001 /* AIF2_LOOPBACK */
+#define WM8994_AIF2_LOOPBACK_MASK 0x0001 /* AIF2_LOOPBACK */
+#define WM8994_AIF2_LOOPBACK_SHIFT 0 /* AIF2_LOOPBACK */
+#define WM8994_AIF2_LOOPBACK_WIDTH 1 /* AIF2_LOOPBACK */
+
+/*
+ * R786 (0x312) - AIF2 Master/Slave
+ */
+#define WM8994_AIF2_TRI 0x8000 /* AIF2_TRI */
+#define WM8994_AIF2_TRI_MASK 0x8000 /* AIF2_TRI */
+#define WM8994_AIF2_TRI_SHIFT 15 /* AIF2_TRI */
+#define WM8994_AIF2_TRI_WIDTH 1 /* AIF2_TRI */
+#define WM8994_AIF2_MSTR 0x4000 /* AIF2_MSTR */
+#define WM8994_AIF2_MSTR_MASK 0x4000 /* AIF2_MSTR */
+#define WM8994_AIF2_MSTR_SHIFT 14 /* AIF2_MSTR */
+#define WM8994_AIF2_MSTR_WIDTH 1 /* AIF2_MSTR */
+#define WM8994_AIF2_CLK_FRC 0x2000 /* AIF2_CLK_FRC */
+#define WM8994_AIF2_CLK_FRC_MASK 0x2000 /* AIF2_CLK_FRC */
+#define WM8994_AIF2_CLK_FRC_SHIFT 13 /* AIF2_CLK_FRC */
+#define WM8994_AIF2_CLK_FRC_WIDTH 1 /* AIF2_CLK_FRC */
+#define WM8994_AIF2_LRCLK_FRC 0x1000 /* AIF2_LRCLK_FRC */
+#define WM8994_AIF2_LRCLK_FRC_MASK 0x1000 /* AIF2_LRCLK_FRC */
+#define WM8994_AIF2_LRCLK_FRC_SHIFT 12 /* AIF2_LRCLK_FRC */
+#define WM8994_AIF2_LRCLK_FRC_WIDTH 1 /* AIF2_LRCLK_FRC */
+
+/*
+ * R787 (0x313) - AIF2 BCLK
+ */
+#define WM8994_AIF2_BCLK_DIV_MASK 0x01F0 /* AIF2_BCLK_DIV - [8:4] */
+#define WM8994_AIF2_BCLK_DIV_SHIFT 4 /* AIF2_BCLK_DIV - [8:4] */
+#define WM8994_AIF2_BCLK_DIV_WIDTH 5 /* AIF2_BCLK_DIV - [8:4] */
+
+/*
+ * R788 (0x314) - AIF2ADC LRCLK
+ */
+#define WM8994_AIF2ADC_LRCLK_DIR 0x0800 /* AIF2ADC_LRCLK_DIR */
+#define WM8994_AIF2ADC_LRCLK_DIR_MASK 0x0800 /* AIF2ADC_LRCLK_DIR */
+#define WM8994_AIF2ADC_LRCLK_DIR_SHIFT 11 /* AIF2ADC_LRCLK_DIR */
+#define WM8994_AIF2ADC_LRCLK_DIR_WIDTH 1 /* AIF2ADC_LRCLK_DIR */
+#define WM8994_AIF2ADC_RATE_MASK 0x07FF /* AIF2ADC_RATE - [10:0] */
+#define WM8994_AIF2ADC_RATE_SHIFT 0 /* AIF2ADC_RATE - [10:0] */
+#define WM8994_AIF2ADC_RATE_WIDTH 11 /* AIF2ADC_RATE - [10:0] */
+
+/*
+ * R789 (0x315) - AIF2DAC LRCLK
+ */
+#define WM8994_AIF2DAC_LRCLK_DIR 0x0800 /* AIF2DAC_LRCLK_DIR */
+#define WM8994_AIF2DAC_LRCLK_DIR_MASK 0x0800 /* AIF2DAC_LRCLK_DIR */
+#define WM8994_AIF2DAC_LRCLK_DIR_SHIFT 11 /* AIF2DAC_LRCLK_DIR */
+#define WM8994_AIF2DAC_LRCLK_DIR_WIDTH 1 /* AIF2DAC_LRCLK_DIR */
+#define WM8994_AIF2DAC_RATE_MASK 0x07FF /* AIF2DAC_RATE - [10:0] */
+#define WM8994_AIF2DAC_RATE_SHIFT 0 /* AIF2DAC_RATE - [10:0] */
+#define WM8994_AIF2DAC_RATE_WIDTH 11 /* AIF2DAC_RATE - [10:0] */
+
+/*
+ * R790 (0x316) - AIF2DAC Data
+ */
+#define WM8994_AIF2DACL_DAT_INV 0x0002 /* AIF2DACL_DAT_INV */
+#define WM8994_AIF2DACL_DAT_INV_MASK 0x0002 /* AIF2DACL_DAT_INV */
+#define WM8994_AIF2DACL_DAT_INV_SHIFT 1 /* AIF2DACL_DAT_INV */
+#define WM8994_AIF2DACL_DAT_INV_WIDTH 1 /* AIF2DACL_DAT_INV */
+#define WM8994_AIF2DACR_DAT_INV 0x0001 /* AIF2DACR_DAT_INV */
+#define WM8994_AIF2DACR_DAT_INV_MASK 0x0001 /* AIF2DACR_DAT_INV */
+#define WM8994_AIF2DACR_DAT_INV_SHIFT 0 /* AIF2DACR_DAT_INV */
+#define WM8994_AIF2DACR_DAT_INV_WIDTH 1 /* AIF2DACR_DAT_INV */
+
+/*
+ * R791 (0x317) - AIF2ADC Data
+ */
+#define WM8994_AIF2ADCL_DAT_INV 0x0002 /* AIF2ADCL_DAT_INV */
+#define WM8994_AIF2ADCL_DAT_INV_MASK 0x0002 /* AIF2ADCL_DAT_INV */
+#define WM8994_AIF2ADCL_DAT_INV_SHIFT 1 /* AIF2ADCL_DAT_INV */
+#define WM8994_AIF2ADCL_DAT_INV_WIDTH 1 /* AIF2ADCL_DAT_INV */
+#define WM8994_AIF2ADCR_DAT_INV 0x0001 /* AIF2ADCR_DAT_INV */
+#define WM8994_AIF2ADCR_DAT_INV_MASK 0x0001 /* AIF2ADCR_DAT_INV */
+#define WM8994_AIF2ADCR_DAT_INV_SHIFT 0 /* AIF2ADCR_DAT_INV */
+#define WM8994_AIF2ADCR_DAT_INV_WIDTH 1 /* AIF2ADCR_DAT_INV */
+
+/*
+ * R800 (0x320) - AIF3 Control (1)
+ */
+#define WM8958_AIF3_LRCLK_INV 0x0080 /* AIF3_LRCLK_INV */
+#define WM8958_AIF3_LRCLK_INV_MASK 0x0080 /* AIF3_LRCLK_INV */
+#define WM8958_AIF3_LRCLK_INV_SHIFT 7 /* AIF3_LRCLK_INV */
+#define WM8958_AIF3_LRCLK_INV_WIDTH 1 /* AIF3_LRCLK_INV */
+#define WM8958_AIF3_WL_MASK 0x0060 /* AIF3_WL - [6:5] */
+#define WM8958_AIF3_WL_SHIFT 5 /* AIF3_WL - [6:5] */
+#define WM8958_AIF3_WL_WIDTH 2 /* AIF3_WL - [6:5] */
+#define WM8958_AIF3_FMT_MASK 0x0018 /* AIF3_FMT - [4:3] */
+#define WM8958_AIF3_FMT_SHIFT 3 /* AIF3_FMT - [4:3] */
+#define WM8958_AIF3_FMT_WIDTH 2 /* AIF3_FMT - [4:3] */
+
+/*
+ * R801 (0x321) - AIF3 Control (2)
+ */
+#define WM8958_AIF3DAC_BOOST_MASK 0x0C00 /* AIF3DAC_BOOST - [11:10] */
+#define WM8958_AIF3DAC_BOOST_SHIFT 10 /* AIF3DAC_BOOST - [11:10] */
+#define WM8958_AIF3DAC_BOOST_WIDTH 2 /* AIF3DAC_BOOST - [11:10] */
+#define WM8958_AIF3DAC_COMP 0x0010 /* AIF3DAC_COMP */
+#define WM8958_AIF3DAC_COMP_MASK 0x0010 /* AIF3DAC_COMP */
+#define WM8958_AIF3DAC_COMP_SHIFT 4 /* AIF3DAC_COMP */
+#define WM8958_AIF3DAC_COMP_WIDTH 1 /* AIF3DAC_COMP */
+#define WM8958_AIF3DAC_COMPMODE 0x0008 /* AIF3DAC_COMPMODE */
+#define WM8958_AIF3DAC_COMPMODE_MASK 0x0008 /* AIF3DAC_COMPMODE */
+#define WM8958_AIF3DAC_COMPMODE_SHIFT 3 /* AIF3DAC_COMPMODE */
+#define WM8958_AIF3DAC_COMPMODE_WIDTH 1 /* AIF3DAC_COMPMODE */
+#define WM8958_AIF3ADC_COMP 0x0004 /* AIF3ADC_COMP */
+#define WM8958_AIF3ADC_COMP_MASK 0x0004 /* AIF3ADC_COMP */
+#define WM8958_AIF3ADC_COMP_SHIFT 2 /* AIF3ADC_COMP */
+#define WM8958_AIF3ADC_COMP_WIDTH 1 /* AIF3ADC_COMP */
+#define WM8958_AIF3ADC_COMPMODE 0x0002 /* AIF3ADC_COMPMODE */
+#define WM8958_AIF3ADC_COMPMODE_MASK 0x0002 /* AIF3ADC_COMPMODE */
+#define WM8958_AIF3ADC_COMPMODE_SHIFT 1 /* AIF3ADC_COMPMODE */
+#define WM8958_AIF3ADC_COMPMODE_WIDTH 1 /* AIF3ADC_COMPMODE */
+#define WM8958_AIF3_LOOPBACK 0x0001 /* AIF3_LOOPBACK */
+#define WM8958_AIF3_LOOPBACK_MASK 0x0001 /* AIF3_LOOPBACK */
+#define WM8958_AIF3_LOOPBACK_SHIFT 0 /* AIF3_LOOPBACK */
+#define WM8958_AIF3_LOOPBACK_WIDTH 1 /* AIF3_LOOPBACK */
+
+/*
+ * R802 (0x322) - AIF3DAC Data
+ */
+#define WM8958_AIF3DAC_DAT_INV 0x0001 /* AIF3DAC_DAT_INV */
+#define WM8958_AIF3DAC_DAT_INV_MASK 0x0001 /* AIF3DAC_DAT_INV */
+#define WM8958_AIF3DAC_DAT_INV_SHIFT 0 /* AIF3DAC_DAT_INV */
+#define WM8958_AIF3DAC_DAT_INV_WIDTH 1 /* AIF3DAC_DAT_INV */
+
+/*
+ * R803 (0x323) - AIF3ADC Data
+ */
+#define WM8958_AIF3ADC_DAT_INV 0x0001 /* AIF3ADC_DAT_INV */
+#define WM8958_AIF3ADC_DAT_INV_MASK 0x0001 /* AIF3ADC_DAT_INV */
+#define WM8958_AIF3ADC_DAT_INV_SHIFT 0 /* AIF3ADC_DAT_INV */
+#define WM8958_AIF3ADC_DAT_INV_WIDTH 1 /* AIF3ADC_DAT_INV */
+
+/*
+ * R1024 (0x400) - AIF1 ADC1 Left Volume
+ */
+#define WM8994_AIF1ADC1_VU 0x0100 /* AIF1ADC1_VU */
+#define WM8994_AIF1ADC1_VU_MASK 0x0100 /* AIF1ADC1_VU */
+#define WM8994_AIF1ADC1_VU_SHIFT 8 /* AIF1ADC1_VU */
+#define WM8994_AIF1ADC1_VU_WIDTH 1 /* AIF1ADC1_VU */
+#define WM8994_AIF1ADC1L_VOL_MASK 0x00FF /* AIF1ADC1L_VOL - [7:0] */
+#define WM8994_AIF1ADC1L_VOL_SHIFT 0 /* AIF1ADC1L_VOL - [7:0] */
+#define WM8994_AIF1ADC1L_VOL_WIDTH 8 /* AIF1ADC1L_VOL - [7:0] */
+
+/*
+ * R1025 (0x401) - AIF1 ADC1 Right Volume
+ */
+#define WM8994_AIF1ADC1_VU 0x0100 /* AIF1ADC1_VU */
+#define WM8994_AIF1ADC1_VU_MASK 0x0100 /* AIF1ADC1_VU */
+#define WM8994_AIF1ADC1_VU_SHIFT 8 /* AIF1ADC1_VU */
+#define WM8994_AIF1ADC1_VU_WIDTH 1 /* AIF1ADC1_VU */
+#define WM8994_AIF1ADC1R_VOL_MASK 0x00FF /* AIF1ADC1R_VOL - [7:0] */
+#define WM8994_AIF1ADC1R_VOL_SHIFT 0 /* AIF1ADC1R_VOL - [7:0] */
+#define WM8994_AIF1ADC1R_VOL_WIDTH 8 /* AIF1ADC1R_VOL - [7:0] */
+
+/*
+ * R1026 (0x402) - AIF1 DAC1 Left Volume
+ */
+#define WM8994_AIF1DAC1_VU 0x0100 /* AIF1DAC1_VU */
+#define WM8994_AIF1DAC1_VU_MASK 0x0100 /* AIF1DAC1_VU */
+#define WM8994_AIF1DAC1_VU_SHIFT 8 /* AIF1DAC1_VU */
+#define WM8994_AIF1DAC1_VU_WIDTH 1 /* AIF1DAC1_VU */
+#define WM8994_AIF1DAC1L_VOL_MASK 0x00FF /* AIF1DAC1L_VOL - [7:0] */
+#define WM8994_AIF1DAC1L_VOL_SHIFT 0 /* AIF1DAC1L_VOL - [7:0] */
+#define WM8994_AIF1DAC1L_VOL_WIDTH 8 /* AIF1DAC1L_VOL - [7:0] */
+
+/*
+ * R1027 (0x403) - AIF1 DAC1 Right Volume
+ */
+#define WM8994_AIF1DAC1_VU 0x0100 /* AIF1DAC1_VU */
+#define WM8994_AIF1DAC1_VU_MASK 0x0100 /* AIF1DAC1_VU */
+#define WM8994_AIF1DAC1_VU_SHIFT 8 /* AIF1DAC1_VU */
+#define WM8994_AIF1DAC1_VU_WIDTH 1 /* AIF1DAC1_VU */
+#define WM8994_AIF1DAC1R_VOL_MASK 0x00FF /* AIF1DAC1R_VOL - [7:0] */
+#define WM8994_AIF1DAC1R_VOL_SHIFT 0 /* AIF1DAC1R_VOL - [7:0] */
+#define WM8994_AIF1DAC1R_VOL_WIDTH 8 /* AIF1DAC1R_VOL - [7:0] */
+
+/*
+ * R1028 (0x404) - AIF1 ADC2 Left Volume
+ */
+#define WM8994_AIF1ADC2_VU 0x0100 /* AIF1ADC2_VU */
+#define WM8994_AIF1ADC2_VU_MASK 0x0100 /* AIF1ADC2_VU */
+#define WM8994_AIF1ADC2_VU_SHIFT 8 /* AIF1ADC2_VU */
+#define WM8994_AIF1ADC2_VU_WIDTH 1 /* AIF1ADC2_VU */
+#define WM8994_AIF1ADC2L_VOL_MASK 0x00FF /* AIF1ADC2L_VOL - [7:0] */
+#define WM8994_AIF1ADC2L_VOL_SHIFT 0 /* AIF1ADC2L_VOL - [7:0] */
+#define WM8994_AIF1ADC2L_VOL_WIDTH 8 /* AIF1ADC2L_VOL - [7:0] */
+
+/*
+ * R1029 (0x405) - AIF1 ADC2 Right Volume
+ */
+#define WM8994_AIF1ADC2_VU 0x0100 /* AIF1ADC2_VU */
+#define WM8994_AIF1ADC2_VU_MASK 0x0100 /* AIF1ADC2_VU */
+#define WM8994_AIF1ADC2_VU_SHIFT 8 /* AIF1ADC2_VU */
+#define WM8994_AIF1ADC2_VU_WIDTH 1 /* AIF1ADC2_VU */
+#define WM8994_AIF1ADC2R_VOL_MASK 0x00FF /* AIF1ADC2R_VOL - [7:0] */
+#define WM8994_AIF1ADC2R_VOL_SHIFT 0 /* AIF1ADC2R_VOL - [7:0] */
+#define WM8994_AIF1ADC2R_VOL_WIDTH 8 /* AIF1ADC2R_VOL - [7:0] */
+
+/*
+ * R1030 (0x406) - AIF1 DAC2 Left Volume
+ */
+#define WM8994_AIF1DAC2_VU 0x0100 /* AIF1DAC2_VU */
+#define WM8994_AIF1DAC2_VU_MASK 0x0100 /* AIF1DAC2_VU */
+#define WM8994_AIF1DAC2_VU_SHIFT 8 /* AIF1DAC2_VU */
+#define WM8994_AIF1DAC2_VU_WIDTH 1 /* AIF1DAC2_VU */
+#define WM8994_AIF1DAC2L_VOL_MASK 0x00FF /* AIF1DAC2L_VOL - [7:0] */
+#define WM8994_AIF1DAC2L_VOL_SHIFT 0 /* AIF1DAC2L_VOL - [7:0] */
+#define WM8994_AIF1DAC2L_VOL_WIDTH 8 /* AIF1DAC2L_VOL - [7:0] */
+
+/*
+ * R1031 (0x407) - AIF1 DAC2 Right Volume
+ */
+#define WM8994_AIF1DAC2_VU 0x0100 /* AIF1DAC2_VU */
+#define WM8994_AIF1DAC2_VU_MASK 0x0100 /* AIF1DAC2_VU */
+#define WM8994_AIF1DAC2_VU_SHIFT 8 /* AIF1DAC2_VU */
+#define WM8994_AIF1DAC2_VU_WIDTH 1 /* AIF1DAC2_VU */
+#define WM8994_AIF1DAC2R_VOL_MASK 0x00FF /* AIF1DAC2R_VOL - [7:0] */
+#define WM8994_AIF1DAC2R_VOL_SHIFT 0 /* AIF1DAC2R_VOL - [7:0] */
+#define WM8994_AIF1DAC2R_VOL_WIDTH 8 /* AIF1DAC2R_VOL - [7:0] */
+
+/*
+ * R1040 (0x410) - AIF1 ADC1 Filters
+ */
+#define WM8994_AIF1ADC_4FS 0x8000 /* AIF1ADC_4FS */
+#define WM8994_AIF1ADC_4FS_MASK 0x8000 /* AIF1ADC_4FS */
+#define WM8994_AIF1ADC_4FS_SHIFT 15 /* AIF1ADC_4FS */
+#define WM8994_AIF1ADC_4FS_WIDTH 1 /* AIF1ADC_4FS */
+#define WM8994_AIF1ADC1_HPF_CUT_MASK 0x6000 /* AIF1ADC1_HPF_CUT - [14:13] */
+#define WM8994_AIF1ADC1_HPF_CUT_SHIFT 13 /* AIF1ADC1_HPF_CUT - [14:13] */
+#define WM8994_AIF1ADC1_HPF_CUT_WIDTH 2 /* AIF1ADC1_HPF_CUT - [14:13] */
+#define WM8994_AIF1ADC1L_HPF 0x1000 /* AIF1ADC1L_HPF */
+#define WM8994_AIF1ADC1L_HPF_MASK 0x1000 /* AIF1ADC1L_HPF */
+#define WM8994_AIF1ADC1L_HPF_SHIFT 12 /* AIF1ADC1L_HPF */
+#define WM8994_AIF1ADC1L_HPF_WIDTH 1 /* AIF1ADC1L_HPF */
+#define WM8994_AIF1ADC1R_HPF 0x0800 /* AIF1ADC1R_HPF */
+#define WM8994_AIF1ADC1R_HPF_MASK 0x0800 /* AIF1ADC1R_HPF */
+#define WM8994_AIF1ADC1R_HPF_SHIFT 11 /* AIF1ADC1R_HPF */
+#define WM8994_AIF1ADC1R_HPF_WIDTH 1 /* AIF1ADC1R_HPF */
+
+/*
+ * R1041 (0x411) - AIF1 ADC2 Filters
+ */
+#define WM8994_AIF1ADC2_HPF_CUT_MASK 0x6000 /* AIF1ADC2_HPF_CUT - [14:13] */
+#define WM8994_AIF1ADC2_HPF_CUT_SHIFT 13 /* AIF1ADC2_HPF_CUT - [14:13] */
+#define WM8994_AIF1ADC2_HPF_CUT_WIDTH 2 /* AIF1ADC2_HPF_CUT - [14:13] */
+#define WM8994_AIF1ADC2L_HPF 0x1000 /* AIF1ADC2L_HPF */
+#define WM8994_AIF1ADC2L_HPF_MASK 0x1000 /* AIF1ADC2L_HPF */
+#define WM8994_AIF1ADC2L_HPF_SHIFT 12 /* AIF1ADC2L_HPF */
+#define WM8994_AIF1ADC2L_HPF_WIDTH 1 /* AIF1ADC2L_HPF */
+#define WM8994_AIF1ADC2R_HPF 0x0800 /* AIF1ADC2R_HPF */
+#define WM8994_AIF1ADC2R_HPF_MASK 0x0800 /* AIF1ADC2R_HPF */
+#define WM8994_AIF1ADC2R_HPF_SHIFT 11 /* AIF1ADC2R_HPF */
+#define WM8994_AIF1ADC2R_HPF_WIDTH 1 /* AIF1ADC2R_HPF */
+
+/*
+ * R1056 (0x420) - AIF1 DAC1 Filters (1)
+ */
+#define WM8994_AIF1DAC1_MUTE 0x0200 /* AIF1DAC1_MUTE */
+#define WM8994_AIF1DAC1_MUTE_MASK 0x0200 /* AIF1DAC1_MUTE */
+#define WM8994_AIF1DAC1_MUTE_SHIFT 9 /* AIF1DAC1_MUTE */
+#define WM8994_AIF1DAC1_MUTE_WIDTH 1 /* AIF1DAC1_MUTE */
+#define WM8994_AIF1DAC1_MONO 0x0080 /* AIF1DAC1_MONO */
+#define WM8994_AIF1DAC1_MONO_MASK 0x0080 /* AIF1DAC1_MONO */
+#define WM8994_AIF1DAC1_MONO_SHIFT 7 /* AIF1DAC1_MONO */
+#define WM8994_AIF1DAC1_MONO_WIDTH 1 /* AIF1DAC1_MONO */
+#define WM8994_AIF1DAC1_MUTERATE 0x0020 /* AIF1DAC1_MUTERATE */
+#define WM8994_AIF1DAC1_MUTERATE_MASK 0x0020 /* AIF1DAC1_MUTERATE */
+#define WM8994_AIF1DAC1_MUTERATE_SHIFT 5 /* AIF1DAC1_MUTERATE */
+#define WM8994_AIF1DAC1_MUTERATE_WIDTH 1 /* AIF1DAC1_MUTERATE */
+#define WM8994_AIF1DAC1_UNMUTE_RAMP 0x0010 /* AIF1DAC1_UNMUTE_RAMP */
+#define WM8994_AIF1DAC1_UNMUTE_RAMP_MASK 0x0010 /* AIF1DAC1_UNMUTE_RAMP */
+#define WM8994_AIF1DAC1_UNMUTE_RAMP_SHIFT 4 /* AIF1DAC1_UNMUTE_RAMP */
+#define WM8994_AIF1DAC1_UNMUTE_RAMP_WIDTH 1 /* AIF1DAC1_UNMUTE_RAMP */
+#define WM8994_AIF1DAC1_DEEMP_MASK 0x0006 /* AIF1DAC1_DEEMP - [2:1] */
+#define WM8994_AIF1DAC1_DEEMP_SHIFT 1 /* AIF1DAC1_DEEMP - [2:1] */
+#define WM8994_AIF1DAC1_DEEMP_WIDTH 2 /* AIF1DAC1_DEEMP - [2:1] */
+
+/*
+ * R1057 (0x421) - AIF1 DAC1 Filters (2)
+ */
+#define WM8994_AIF1DAC1_3D_GAIN_MASK 0x3E00 /* AIF1DAC1_3D_GAIN - [13:9] */
+#define WM8994_AIF1DAC1_3D_GAIN_SHIFT 9 /* AIF1DAC1_3D_GAIN - [13:9] */
+#define WM8994_AIF1DAC1_3D_GAIN_WIDTH 5 /* AIF1DAC1_3D_GAIN - [13:9] */
+#define WM8994_AIF1DAC1_3D_ENA 0x0100 /* AIF1DAC1_3D_ENA */
+#define WM8994_AIF1DAC1_3D_ENA_MASK 0x0100 /* AIF1DAC1_3D_ENA */
+#define WM8994_AIF1DAC1_3D_ENA_SHIFT 8 /* AIF1DAC1_3D_ENA */
+#define WM8994_AIF1DAC1_3D_ENA_WIDTH 1 /* AIF1DAC1_3D_ENA */
+
+/*
+ * R1058 (0x422) - AIF1 DAC2 Filters (1)
+ */
+#define WM8994_AIF1DAC2_MUTE 0x0200 /* AIF1DAC2_MUTE */
+#define WM8994_AIF1DAC2_MUTE_MASK 0x0200 /* AIF1DAC2_MUTE */
+#define WM8994_AIF1DAC2_MUTE_SHIFT 9 /* AIF1DAC2_MUTE */
+#define WM8994_AIF1DAC2_MUTE_WIDTH 1 /* AIF1DAC2_MUTE */
+#define WM8994_AIF1DAC2_MONO 0x0080 /* AIF1DAC2_MONO */
+#define WM8994_AIF1DAC2_MONO_MASK 0x0080 /* AIF1DAC2_MONO */
+#define WM8994_AIF1DAC2_MONO_SHIFT 7 /* AIF1DAC2_MONO */
+#define WM8994_AIF1DAC2_MONO_WIDTH 1 /* AIF1DAC2_MONO */
+#define WM8994_AIF1DAC2_MUTERATE 0x0020 /* AIF1DAC2_MUTERATE */
+#define WM8994_AIF1DAC2_MUTERATE_MASK 0x0020 /* AIF1DAC2_MUTERATE */
+#define WM8994_AIF1DAC2_MUTERATE_SHIFT 5 /* AIF1DAC2_MUTERATE */
+#define WM8994_AIF1DAC2_MUTERATE_WIDTH 1 /* AIF1DAC2_MUTERATE */
+#define WM8994_AIF1DAC2_UNMUTE_RAMP 0x0010 /* AIF1DAC2_UNMUTE_RAMP */
+#define WM8994_AIF1DAC2_UNMUTE_RAMP_MASK 0x0010 /* AIF1DAC2_UNMUTE_RAMP */
+#define WM8994_AIF1DAC2_UNMUTE_RAMP_SHIFT 4 /* AIF1DAC2_UNMUTE_RAMP */
+#define WM8994_AIF1DAC2_UNMUTE_RAMP_WIDTH 1 /* AIF1DAC2_UNMUTE_RAMP */
+#define WM8994_AIF1DAC2_DEEMP_MASK 0x0006 /* AIF1DAC2_DEEMP - [2:1] */
+#define WM8994_AIF1DAC2_DEEMP_SHIFT 1 /* AIF1DAC2_DEEMP - [2:1] */
+#define WM8994_AIF1DAC2_DEEMP_WIDTH 2 /* AIF1DAC2_DEEMP - [2:1] */
+
+/*
+ * R1059 (0x423) - AIF1 DAC2 Filters (2)
+ */
+#define WM8994_AIF1DAC2_3D_GAIN_MASK 0x3E00 /* AIF1DAC2_3D_GAIN - [13:9] */
+#define WM8994_AIF1DAC2_3D_GAIN_SHIFT 9 /* AIF1DAC2_3D_GAIN - [13:9] */
+#define WM8994_AIF1DAC2_3D_GAIN_WIDTH 5 /* AIF1DAC2_3D_GAIN - [13:9] */
+#define WM8994_AIF1DAC2_3D_ENA 0x0100 /* AIF1DAC2_3D_ENA */
+#define WM8994_AIF1DAC2_3D_ENA_MASK 0x0100 /* AIF1DAC2_3D_ENA */
+#define WM8994_AIF1DAC2_3D_ENA_SHIFT 8 /* AIF1DAC2_3D_ENA */
+#define WM8994_AIF1DAC2_3D_ENA_WIDTH 1 /* AIF1DAC2_3D_ENA */
+
+/*
+ * R1072 (0x430) - AIF1 DAC1 Noise Gate
+ */
+#define WM8958_AIF1DAC1_NG_HLD_MASK 0x0060 /* AIF1DAC1_NG_HLD - [6:5] */
+#define WM8958_AIF1DAC1_NG_HLD_SHIFT 5 /* AIF1DAC1_NG_HLD - [6:5] */
+#define WM8958_AIF1DAC1_NG_HLD_WIDTH 2 /* AIF1DAC1_NG_HLD - [6:5] */
+#define WM8958_AIF1DAC1_NG_THR_MASK 0x000E /* AIF1DAC1_NG_THR - [3:1] */
+#define WM8958_AIF1DAC1_NG_THR_SHIFT 1 /* AIF1DAC1_NG_THR - [3:1] */
+#define WM8958_AIF1DAC1_NG_THR_WIDTH 3 /* AIF1DAC1_NG_THR - [3:1] */
+#define WM8958_AIF1DAC1_NG_ENA 0x0001 /* AIF1DAC1_NG_ENA */
+#define WM8958_AIF1DAC1_NG_ENA_MASK 0x0001 /* AIF1DAC1_NG_ENA */
+#define WM8958_AIF1DAC1_NG_ENA_SHIFT 0 /* AIF1DAC1_NG_ENA */
+#define WM8958_AIF1DAC1_NG_ENA_WIDTH 1 /* AIF1DAC1_NG_ENA */
+
+/*
+ * R1073 (0x431) - AIF1 DAC2 Noise Gate
+ */
+#define WM8958_AIF1DAC2_NG_HLD_MASK 0x0060 /* AIF1DAC2_NG_HLD - [6:5] */
+#define WM8958_AIF1DAC2_NG_HLD_SHIFT 5 /* AIF1DAC2_NG_HLD - [6:5] */
+#define WM8958_AIF1DAC2_NG_HLD_WIDTH 2 /* AIF1DAC2_NG_HLD - [6:5] */
+#define WM8958_AIF1DAC2_NG_THR_MASK 0x000E /* AIF1DAC2_NG_THR - [3:1] */
+#define WM8958_AIF1DAC2_NG_THR_SHIFT 1 /* AIF1DAC2_NG_THR - [3:1] */
+#define WM8958_AIF1DAC2_NG_THR_WIDTH 3 /* AIF1DAC2_NG_THR - [3:1] */
+#define WM8958_AIF1DAC2_NG_ENA 0x0001 /* AIF1DAC2_NG_ENA */
+#define WM8958_AIF1DAC2_NG_ENA_MASK 0x0001 /* AIF1DAC2_NG_ENA */
+#define WM8958_AIF1DAC2_NG_ENA_SHIFT 0 /* AIF1DAC2_NG_ENA */
+#define WM8958_AIF1DAC2_NG_ENA_WIDTH 1 /* AIF1DAC2_NG_ENA */
+
+/*
+ * R1088 (0x440) - AIF1 DRC1 (1)
+ */
+#define WM8994_AIF1DRC1_SIG_DET_RMS_MASK 0xF800 /* AIF1DRC1_SIG_DET_RMS - [15:11] */
+#define WM8994_AIF1DRC1_SIG_DET_RMS_SHIFT 11 /* AIF1DRC1_SIG_DET_RMS - [15:11] */
+#define WM8994_AIF1DRC1_SIG_DET_RMS_WIDTH 5 /* AIF1DRC1_SIG_DET_RMS - [15:11] */
+#define WM8994_AIF1DRC1_SIG_DET_PK_MASK 0x0600 /* AIF1DRC1_SIG_DET_PK - [10:9] */
+#define WM8994_AIF1DRC1_SIG_DET_PK_SHIFT 9 /* AIF1DRC1_SIG_DET_PK - [10:9] */
+#define WM8994_AIF1DRC1_SIG_DET_PK_WIDTH 2 /* AIF1DRC1_SIG_DET_PK - [10:9] */
+#define WM8994_AIF1DRC1_NG_ENA 0x0100 /* AIF1DRC1_NG_ENA */
+#define WM8994_AIF1DRC1_NG_ENA_MASK 0x0100 /* AIF1DRC1_NG_ENA */
+#define WM8994_AIF1DRC1_NG_ENA_SHIFT 8 /* AIF1DRC1_NG_ENA */
+#define WM8994_AIF1DRC1_NG_ENA_WIDTH 1 /* AIF1DRC1_NG_ENA */
+#define WM8994_AIF1DRC1_SIG_DET_MODE 0x0080 /* AIF1DRC1_SIG_DET_MODE */
+#define WM8994_AIF1DRC1_SIG_DET_MODE_MASK 0x0080 /* AIF1DRC1_SIG_DET_MODE */
+#define WM8994_AIF1DRC1_SIG_DET_MODE_SHIFT 7 /* AIF1DRC1_SIG_DET_MODE */
+#define WM8994_AIF1DRC1_SIG_DET_MODE_WIDTH 1 /* AIF1DRC1_SIG_DET_MODE */
+#define WM8994_AIF1DRC1_SIG_DET 0x0040 /* AIF1DRC1_SIG_DET */
+#define WM8994_AIF1DRC1_SIG_DET_MASK 0x0040 /* AIF1DRC1_SIG_DET */
+#define WM8994_AIF1DRC1_SIG_DET_SHIFT 6 /* AIF1DRC1_SIG_DET */
+#define WM8994_AIF1DRC1_SIG_DET_WIDTH 1 /* AIF1DRC1_SIG_DET */
+#define WM8994_AIF1DRC1_KNEE2_OP_ENA 0x0020 /* AIF1DRC1_KNEE2_OP_ENA */
+#define WM8994_AIF1DRC1_KNEE2_OP_ENA_MASK 0x0020 /* AIF1DRC1_KNEE2_OP_ENA */
+#define WM8994_AIF1DRC1_KNEE2_OP_ENA_SHIFT 5 /* AIF1DRC1_KNEE2_OP_ENA */
+#define WM8994_AIF1DRC1_KNEE2_OP_ENA_WIDTH 1 /* AIF1DRC1_KNEE2_OP_ENA */
+#define WM8994_AIF1DRC1_QR 0x0010 /* AIF1DRC1_QR */
+#define WM8994_AIF1DRC1_QR_MASK 0x0010 /* AIF1DRC1_QR */
+#define WM8994_AIF1DRC1_QR_SHIFT 4 /* AIF1DRC1_QR */
+#define WM8994_AIF1DRC1_QR_WIDTH 1 /* AIF1DRC1_QR */
+#define WM8994_AIF1DRC1_ANTICLIP 0x0008 /* AIF1DRC1_ANTICLIP */
+#define WM8994_AIF1DRC1_ANTICLIP_MASK 0x0008 /* AIF1DRC1_ANTICLIP */
+#define WM8994_AIF1DRC1_ANTICLIP_SHIFT 3 /* AIF1DRC1_ANTICLIP */
+#define WM8994_AIF1DRC1_ANTICLIP_WIDTH 1 /* AIF1DRC1_ANTICLIP */
+#define WM8994_AIF1DAC1_DRC_ENA 0x0004 /* AIF1DAC1_DRC_ENA */
+#define WM8994_AIF1DAC1_DRC_ENA_MASK 0x0004 /* AIF1DAC1_DRC_ENA */
+#define WM8994_AIF1DAC1_DRC_ENA_SHIFT 2 /* AIF1DAC1_DRC_ENA */
+#define WM8994_AIF1DAC1_DRC_ENA_WIDTH 1 /* AIF1DAC1_DRC_ENA */
+#define WM8994_AIF1ADC1L_DRC_ENA 0x0002 /* AIF1ADC1L_DRC_ENA */
+#define WM8994_AIF1ADC1L_DRC_ENA_MASK 0x0002 /* AIF1ADC1L_DRC_ENA */
+#define WM8994_AIF1ADC1L_DRC_ENA_SHIFT 1 /* AIF1ADC1L_DRC_ENA */
+#define WM8994_AIF1ADC1L_DRC_ENA_WIDTH 1 /* AIF1ADC1L_DRC_ENA */
+#define WM8994_AIF1ADC1R_DRC_ENA 0x0001 /* AIF1ADC1R_DRC_ENA */
+#define WM8994_AIF1ADC1R_DRC_ENA_MASK 0x0001 /* AIF1ADC1R_DRC_ENA */
+#define WM8994_AIF1ADC1R_DRC_ENA_SHIFT 0 /* AIF1ADC1R_DRC_ENA */
+#define WM8994_AIF1ADC1R_DRC_ENA_WIDTH 1 /* AIF1ADC1R_DRC_ENA */
+
+/*
+ * R1089 (0x441) - AIF1 DRC1 (2)
+ */
+#define WM8994_AIF1DRC1_ATK_MASK 0x1E00 /* AIF1DRC1_ATK - [12:9] */
+#define WM8994_AIF1DRC1_ATK_SHIFT 9 /* AIF1DRC1_ATK - [12:9] */
+#define WM8994_AIF1DRC1_ATK_WIDTH 4 /* AIF1DRC1_ATK - [12:9] */
+#define WM8994_AIF1DRC1_DCY_MASK 0x01E0 /* AIF1DRC1_DCY - [8:5] */
+#define WM8994_AIF1DRC1_DCY_SHIFT 5 /* AIF1DRC1_DCY - [8:5] */
+#define WM8994_AIF1DRC1_DCY_WIDTH 4 /* AIF1DRC1_DCY - [8:5] */
+#define WM8994_AIF1DRC1_MINGAIN_MASK 0x001C /* AIF1DRC1_MINGAIN - [4:2] */
+#define WM8994_AIF1DRC1_MINGAIN_SHIFT 2 /* AIF1DRC1_MINGAIN - [4:2] */
+#define WM8994_AIF1DRC1_MINGAIN_WIDTH 3 /* AIF1DRC1_MINGAIN - [4:2] */
+#define WM8994_AIF1DRC1_MAXGAIN_MASK 0x0003 /* AIF1DRC1_MAXGAIN - [1:0] */
+#define WM8994_AIF1DRC1_MAXGAIN_SHIFT 0 /* AIF1DRC1_MAXGAIN - [1:0] */
+#define WM8994_AIF1DRC1_MAXGAIN_WIDTH 2 /* AIF1DRC1_MAXGAIN - [1:0] */
+
+/*
+ * R1090 (0x442) - AIF1 DRC1 (3)
+ */
+#define WM8994_AIF1DRC1_NG_MINGAIN_MASK 0xF000 /* AIF1DRC1_NG_MINGAIN - [15:12] */
+#define WM8994_AIF1DRC1_NG_MINGAIN_SHIFT 12 /* AIF1DRC1_NG_MINGAIN - [15:12] */
+#define WM8994_AIF1DRC1_NG_MINGAIN_WIDTH 4 /* AIF1DRC1_NG_MINGAIN - [15:12] */
+#define WM8994_AIF1DRC1_NG_EXP_MASK 0x0C00 /* AIF1DRC1_NG_EXP - [11:10] */
+#define WM8994_AIF1DRC1_NG_EXP_SHIFT 10 /* AIF1DRC1_NG_EXP - [11:10] */
+#define WM8994_AIF1DRC1_NG_EXP_WIDTH 2 /* AIF1DRC1_NG_EXP - [11:10] */
+#define WM8994_AIF1DRC1_QR_THR_MASK 0x0300 /* AIF1DRC1_QR_THR - [9:8] */
+#define WM8994_AIF1DRC1_QR_THR_SHIFT 8 /* AIF1DRC1_QR_THR - [9:8] */
+#define WM8994_AIF1DRC1_QR_THR_WIDTH 2 /* AIF1DRC1_QR_THR - [9:8] */
+#define WM8994_AIF1DRC1_QR_DCY_MASK 0x00C0 /* AIF1DRC1_QR_DCY - [7:6] */
+#define WM8994_AIF1DRC1_QR_DCY_SHIFT 6 /* AIF1DRC1_QR_DCY - [7:6] */
+#define WM8994_AIF1DRC1_QR_DCY_WIDTH 2 /* AIF1DRC1_QR_DCY - [7:6] */
+#define WM8994_AIF1DRC1_HI_COMP_MASK 0x0038 /* AIF1DRC1_HI_COMP - [5:3] */
+#define WM8994_AIF1DRC1_HI_COMP_SHIFT 3 /* AIF1DRC1_HI_COMP - [5:3] */
+#define WM8994_AIF1DRC1_HI_COMP_WIDTH 3 /* AIF1DRC1_HI_COMP - [5:3] */
+#define WM8994_AIF1DRC1_LO_COMP_MASK 0x0007 /* AIF1DRC1_LO_COMP - [2:0] */
+#define WM8994_AIF1DRC1_LO_COMP_SHIFT 0 /* AIF1DRC1_LO_COMP - [2:0] */
+#define WM8994_AIF1DRC1_LO_COMP_WIDTH 3 /* AIF1DRC1_LO_COMP - [2:0] */
+
+/*
+ * R1091 (0x443) - AIF1 DRC1 (4)
+ */
+#define WM8994_AIF1DRC1_KNEE_IP_MASK 0x07E0 /* AIF1DRC1_KNEE_IP - [10:5] */
+#define WM8994_AIF1DRC1_KNEE_IP_SHIFT 5 /* AIF1DRC1_KNEE_IP - [10:5] */
+#define WM8994_AIF1DRC1_KNEE_IP_WIDTH 6 /* AIF1DRC1_KNEE_IP - [10:5] */
+#define WM8994_AIF1DRC1_KNEE_OP_MASK 0x001F /* AIF1DRC1_KNEE_OP - [4:0] */
+#define WM8994_AIF1DRC1_KNEE_OP_SHIFT 0 /* AIF1DRC1_KNEE_OP - [4:0] */
+#define WM8994_AIF1DRC1_KNEE_OP_WIDTH 5 /* AIF1DRC1_KNEE_OP - [4:0] */
+
+/*
+ * R1092 (0x444) - AIF1 DRC1 (5)
+ */
+#define WM8994_AIF1DRC1_KNEE2_IP_MASK 0x03E0 /* AIF1DRC1_KNEE2_IP - [9:5] */
+#define WM8994_AIF1DRC1_KNEE2_IP_SHIFT 5 /* AIF1DRC1_KNEE2_IP - [9:5] */
+#define WM8994_AIF1DRC1_KNEE2_IP_WIDTH 5 /* AIF1DRC1_KNEE2_IP - [9:5] */
+#define WM8994_AIF1DRC1_KNEE2_OP_MASK 0x001F /* AIF1DRC1_KNEE2_OP - [4:0] */
+#define WM8994_AIF1DRC1_KNEE2_OP_SHIFT 0 /* AIF1DRC1_KNEE2_OP - [4:0] */
+#define WM8994_AIF1DRC1_KNEE2_OP_WIDTH 5 /* AIF1DRC1_KNEE2_OP - [4:0] */
+
+/*
+ * R1104 (0x450) - AIF1 DRC2 (1)
+ */
+#define WM8994_AIF1DRC2_SIG_DET_RMS_MASK 0xF800 /* AIF1DRC2_SIG_DET_RMS - [15:11] */
+#define WM8994_AIF1DRC2_SIG_DET_RMS_SHIFT 11 /* AIF1DRC2_SIG_DET_RMS - [15:11] */
+#define WM8994_AIF1DRC2_SIG_DET_RMS_WIDTH 5 /* AIF1DRC2_SIG_DET_RMS - [15:11] */
+#define WM8994_AIF1DRC2_SIG_DET_PK_MASK 0x0600 /* AIF1DRC2_SIG_DET_PK - [10:9] */
+#define WM8994_AIF1DRC2_SIG_DET_PK_SHIFT 9 /* AIF1DRC2_SIG_DET_PK - [10:9] */
+#define WM8994_AIF1DRC2_SIG_DET_PK_WIDTH 2 /* AIF1DRC2_SIG_DET_PK - [10:9] */
+#define WM8994_AIF1DRC2_NG_ENA 0x0100 /* AIF1DRC2_NG_ENA */
+#define WM8994_AIF1DRC2_NG_ENA_MASK 0x0100 /* AIF1DRC2_NG_ENA */
+#define WM8994_AIF1DRC2_NG_ENA_SHIFT 8 /* AIF1DRC2_NG_ENA */
+#define WM8994_AIF1DRC2_NG_ENA_WIDTH 1 /* AIF1DRC2_NG_ENA */
+#define WM8994_AIF1DRC2_SIG_DET_MODE 0x0080 /* AIF1DRC2_SIG_DET_MODE */
+#define WM8994_AIF1DRC2_SIG_DET_MODE_MASK 0x0080 /* AIF1DRC2_SIG_DET_MODE */
+#define WM8994_AIF1DRC2_SIG_DET_MODE_SHIFT 7 /* AIF1DRC2_SIG_DET_MODE */
+#define WM8994_AIF1DRC2_SIG_DET_MODE_WIDTH 1 /* AIF1DRC2_SIG_DET_MODE */
+#define WM8994_AIF1DRC2_SIG_DET 0x0040 /* AIF1DRC2_SIG_DET */
+#define WM8994_AIF1DRC2_SIG_DET_MASK 0x0040 /* AIF1DRC2_SIG_DET */
+#define WM8994_AIF1DRC2_SIG_DET_SHIFT 6 /* AIF1DRC2_SIG_DET */
+#define WM8994_AIF1DRC2_SIG_DET_WIDTH 1 /* AIF1DRC2_SIG_DET */
+#define WM8994_AIF1DRC2_KNEE2_OP_ENA 0x0020 /* AIF1DRC2_KNEE2_OP_ENA */
+#define WM8994_AIF1DRC2_KNEE2_OP_ENA_MASK 0x0020 /* AIF1DRC2_KNEE2_OP_ENA */
+#define WM8994_AIF1DRC2_KNEE2_OP_ENA_SHIFT 5 /* AIF1DRC2_KNEE2_OP_ENA */
+#define WM8994_AIF1DRC2_KNEE2_OP_ENA_WIDTH 1 /* AIF1DRC2_KNEE2_OP_ENA */
+#define WM8994_AIF1DRC2_QR 0x0010 /* AIF1DRC2_QR */
+#define WM8994_AIF1DRC2_QR_MASK 0x0010 /* AIF1DRC2_QR */
+#define WM8994_AIF1DRC2_QR_SHIFT 4 /* AIF1DRC2_QR */
+#define WM8994_AIF1DRC2_QR_WIDTH 1 /* AIF1DRC2_QR */
+#define WM8994_AIF1DRC2_ANTICLIP 0x0008 /* AIF1DRC2_ANTICLIP */
+#define WM8994_AIF1DRC2_ANTICLIP_MASK 0x0008 /* AIF1DRC2_ANTICLIP */
+#define WM8994_AIF1DRC2_ANTICLIP_SHIFT 3 /* AIF1DRC2_ANTICLIP */
+#define WM8994_AIF1DRC2_ANTICLIP_WIDTH 1 /* AIF1DRC2_ANTICLIP */
+#define WM8994_AIF1DAC2_DRC_ENA 0x0004 /* AIF1DAC2_DRC_ENA */
+#define WM8994_AIF1DAC2_DRC_ENA_MASK 0x0004 /* AIF1DAC2_DRC_ENA */
+#define WM8994_AIF1DAC2_DRC_ENA_SHIFT 2 /* AIF1DAC2_DRC_ENA */
+#define WM8994_AIF1DAC2_DRC_ENA_WIDTH 1 /* AIF1DAC2_DRC_ENA */
+#define WM8994_AIF1ADC2L_DRC_ENA 0x0002 /* AIF1ADC2L_DRC_ENA */
+#define WM8994_AIF1ADC2L_DRC_ENA_MASK 0x0002 /* AIF1ADC2L_DRC_ENA */
+#define WM8994_AIF1ADC2L_DRC_ENA_SHIFT 1 /* AIF1ADC2L_DRC_ENA */
+#define WM8994_AIF1ADC2L_DRC_ENA_WIDTH 1 /* AIF1ADC2L_DRC_ENA */
+#define WM8994_AIF1ADC2R_DRC_ENA 0x0001 /* AIF1ADC2R_DRC_ENA */
+#define WM8994_AIF1ADC2R_DRC_ENA_MASK 0x0001 /* AIF1ADC2R_DRC_ENA */
+#define WM8994_AIF1ADC2R_DRC_ENA_SHIFT 0 /* AIF1ADC2R_DRC_ENA */
+#define WM8994_AIF1ADC2R_DRC_ENA_WIDTH 1 /* AIF1ADC2R_DRC_ENA */
+
+/*
+ * R1105 (0x451) - AIF1 DRC2 (2)
+ */
+#define WM8994_AIF1DRC2_ATK_MASK 0x1E00 /* AIF1DRC2_ATK - [12:9] */
+#define WM8994_AIF1DRC2_ATK_SHIFT 9 /* AIF1DRC2_ATK - [12:9] */
+#define WM8994_AIF1DRC2_ATK_WIDTH 4 /* AIF1DRC2_ATK - [12:9] */
+#define WM8994_AIF1DRC2_DCY_MASK 0x01E0 /* AIF1DRC2_DCY - [8:5] */
+#define WM8994_AIF1DRC2_DCY_SHIFT 5 /* AIF1DRC2_DCY - [8:5] */
+#define WM8994_AIF1DRC2_DCY_WIDTH 4 /* AIF1DRC2_DCY - [8:5] */
+#define WM8994_AIF1DRC2_MINGAIN_MASK 0x001C /* AIF1DRC2_MINGAIN - [4:2] */
+#define WM8994_AIF1DRC2_MINGAIN_SHIFT 2 /* AIF1DRC2_MINGAIN - [4:2] */
+#define WM8994_AIF1DRC2_MINGAIN_WIDTH 3 /* AIF1DRC2_MINGAIN - [4:2] */
+#define WM8994_AIF1DRC2_MAXGAIN_MASK 0x0003 /* AIF1DRC2_MAXGAIN - [1:0] */
+#define WM8994_AIF1DRC2_MAXGAIN_SHIFT 0 /* AIF1DRC2_MAXGAIN - [1:0] */
+#define WM8994_AIF1DRC2_MAXGAIN_WIDTH 2 /* AIF1DRC2_MAXGAIN - [1:0] */
+
+/*
+ * R1106 (0x452) - AIF1 DRC2 (3)
+ */
+#define WM8994_AIF1DRC2_NG_MINGAIN_MASK 0xF000 /* AIF1DRC2_NG_MINGAIN - [15:12] */
+#define WM8994_AIF1DRC2_NG_MINGAIN_SHIFT 12 /* AIF1DRC2_NG_MINGAIN - [15:12] */
+#define WM8994_AIF1DRC2_NG_MINGAIN_WIDTH 4 /* AIF1DRC2_NG_MINGAIN - [15:12] */
+#define WM8994_AIF1DRC2_NG_EXP_MASK 0x0C00 /* AIF1DRC2_NG_EXP - [11:10] */
+#define WM8994_AIF1DRC2_NG_EXP_SHIFT 10 /* AIF1DRC2_NG_EXP - [11:10] */
+#define WM8994_AIF1DRC2_NG_EXP_WIDTH 2 /* AIF1DRC2_NG_EXP - [11:10] */
+#define WM8994_AIF1DRC2_QR_THR_MASK 0x0300 /* AIF1DRC2_QR_THR - [9:8] */
+#define WM8994_AIF1DRC2_QR_THR_SHIFT 8 /* AIF1DRC2_QR_THR - [9:8] */
+#define WM8994_AIF1DRC2_QR_THR_WIDTH 2 /* AIF1DRC2_QR_THR - [9:8] */
+#define WM8994_AIF1DRC2_QR_DCY_MASK 0x00C0 /* AIF1DRC2_QR_DCY - [7:6] */
+#define WM8994_AIF1DRC2_QR_DCY_SHIFT 6 /* AIF1DRC2_QR_DCY - [7:6] */
+#define WM8994_AIF1DRC2_QR_DCY_WIDTH 2 /* AIF1DRC2_QR_DCY - [7:6] */
+#define WM8994_AIF1DRC2_HI_COMP_MASK 0x0038 /* AIF1DRC2_HI_COMP - [5:3] */
+#define WM8994_AIF1DRC2_HI_COMP_SHIFT 3 /* AIF1DRC2_HI_COMP - [5:3] */
+#define WM8994_AIF1DRC2_HI_COMP_WIDTH 3 /* AIF1DRC2_HI_COMP - [5:3] */
+#define WM8994_AIF1DRC2_LO_COMP_MASK 0x0007 /* AIF1DRC2_LO_COMP - [2:0] */
+#define WM8994_AIF1DRC2_LO_COMP_SHIFT 0 /* AIF1DRC2_LO_COMP - [2:0] */
+#define WM8994_AIF1DRC2_LO_COMP_WIDTH 3 /* AIF1DRC2_LO_COMP - [2:0] */
+
+/*
+ * R1107 (0x453) - AIF1 DRC2 (4)
+ */
+#define WM8994_AIF1DRC2_KNEE_IP_MASK 0x07E0 /* AIF1DRC2_KNEE_IP - [10:5] */
+#define WM8994_AIF1DRC2_KNEE_IP_SHIFT 5 /* AIF1DRC2_KNEE_IP - [10:5] */
+#define WM8994_AIF1DRC2_KNEE_IP_WIDTH 6 /* AIF1DRC2_KNEE_IP - [10:5] */
+#define WM8994_AIF1DRC2_KNEE_OP_MASK 0x001F /* AIF1DRC2_KNEE_OP - [4:0] */
+#define WM8994_AIF1DRC2_KNEE_OP_SHIFT 0 /* AIF1DRC2_KNEE_OP - [4:0] */
+#define WM8994_AIF1DRC2_KNEE_OP_WIDTH 5 /* AIF1DRC2_KNEE_OP - [4:0] */
+
+/*
+ * R1108 (0x454) - AIF1 DRC2 (5)
+ */
+#define WM8994_AIF1DRC2_KNEE2_IP_MASK 0x03E0 /* AIF1DRC2_KNEE2_IP - [9:5] */
+#define WM8994_AIF1DRC2_KNEE2_IP_SHIFT 5 /* AIF1DRC2_KNEE2_IP - [9:5] */
+#define WM8994_AIF1DRC2_KNEE2_IP_WIDTH 5 /* AIF1DRC2_KNEE2_IP - [9:5] */
+#define WM8994_AIF1DRC2_KNEE2_OP_MASK 0x001F /* AIF1DRC2_KNEE2_OP - [4:0] */
+#define WM8994_AIF1DRC2_KNEE2_OP_SHIFT 0 /* AIF1DRC2_KNEE2_OP - [4:0] */
+#define WM8994_AIF1DRC2_KNEE2_OP_WIDTH 5 /* AIF1DRC2_KNEE2_OP - [4:0] */
+
+/*
+ * R1152 (0x480) - AIF1 DAC1 EQ Gains (1)
+ */
+#define WM8994_AIF1DAC1_EQ_B1_GAIN_MASK 0xF800 /* AIF1DAC1_EQ_B1_GAIN - [15:11] */
+#define WM8994_AIF1DAC1_EQ_B1_GAIN_SHIFT 11 /* AIF1DAC1_EQ_B1_GAIN - [15:11] */
+#define WM8994_AIF1DAC1_EQ_B1_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B1_GAIN - [15:11] */
+#define WM8994_AIF1DAC1_EQ_B2_GAIN_MASK 0x07C0 /* AIF1DAC1_EQ_B2_GAIN - [10:6] */
+#define WM8994_AIF1DAC1_EQ_B2_GAIN_SHIFT 6 /* AIF1DAC1_EQ_B2_GAIN - [10:6] */
+#define WM8994_AIF1DAC1_EQ_B2_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B2_GAIN - [10:6] */
+#define WM8994_AIF1DAC1_EQ_B3_GAIN_MASK 0x003E /* AIF1DAC1_EQ_B3_GAIN - [5:1] */
+#define WM8994_AIF1DAC1_EQ_B3_GAIN_SHIFT 1 /* AIF1DAC1_EQ_B3_GAIN - [5:1] */
+#define WM8994_AIF1DAC1_EQ_B3_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B3_GAIN - [5:1] */
+#define WM8994_AIF1DAC1_EQ_ENA 0x0001 /* AIF1DAC1_EQ_ENA */
+#define WM8994_AIF1DAC1_EQ_ENA_MASK 0x0001 /* AIF1DAC1_EQ_ENA */
+#define WM8994_AIF1DAC1_EQ_ENA_SHIFT 0 /* AIF1DAC1_EQ_ENA */
+#define WM8994_AIF1DAC1_EQ_ENA_WIDTH 1 /* AIF1DAC1_EQ_ENA */
+
+/*
+ * R1153 (0x481) - AIF1 DAC1 EQ Gains (2)
+ */
+#define WM8994_AIF1DAC1_EQ_B4_GAIN_MASK 0xF800 /* AIF1DAC1_EQ_B4_GAIN - [15:11] */
+#define WM8994_AIF1DAC1_EQ_B4_GAIN_SHIFT 11 /* AIF1DAC1_EQ_B4_GAIN - [15:11] */
+#define WM8994_AIF1DAC1_EQ_B4_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B4_GAIN - [15:11] */
+#define WM8994_AIF1DAC1_EQ_B5_GAIN_MASK 0x07C0 /* AIF1DAC1_EQ_B5_GAIN - [10:6] */
+#define WM8994_AIF1DAC1_EQ_B5_GAIN_SHIFT 6 /* AIF1DAC1_EQ_B5_GAIN - [10:6] */
+#define WM8994_AIF1DAC1_EQ_B5_GAIN_WIDTH 5 /* AIF1DAC1_EQ_B5_GAIN - [10:6] */
+
+/*
+ * R1154 (0x482) - AIF1 DAC1 EQ Band 1 A
+ */
+#define WM8994_AIF1DAC1_EQ_B1_A_MASK 0xFFFF /* AIF1DAC1_EQ_B1_A - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B1_A_SHIFT 0 /* AIF1DAC1_EQ_B1_A - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B1_A_WIDTH 16 /* AIF1DAC1_EQ_B1_A - [15:0] */
+
+/*
+ * R1155 (0x483) - AIF1 DAC1 EQ Band 1 B
+ */
+#define WM8994_AIF1DAC1_EQ_B1_B_MASK 0xFFFF /* AIF1DAC1_EQ_B1_B - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B1_B_SHIFT 0 /* AIF1DAC1_EQ_B1_B - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B1_B_WIDTH 16 /* AIF1DAC1_EQ_B1_B - [15:0] */
+
+/*
+ * R1156 (0x484) - AIF1 DAC1 EQ Band 1 PG
+ */
+#define WM8994_AIF1DAC1_EQ_B1_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B1_PG - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B1_PG_SHIFT 0 /* AIF1DAC1_EQ_B1_PG - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B1_PG_WIDTH 16 /* AIF1DAC1_EQ_B1_PG - [15:0] */
+
+/*
+ * R1157 (0x485) - AIF1 DAC1 EQ Band 2 A
+ */
+#define WM8994_AIF1DAC1_EQ_B2_A_MASK 0xFFFF /* AIF1DAC1_EQ_B2_A - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B2_A_SHIFT 0 /* AIF1DAC1_EQ_B2_A - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B2_A_WIDTH 16 /* AIF1DAC1_EQ_B2_A - [15:0] */
+
+/*
+ * R1158 (0x486) - AIF1 DAC1 EQ Band 2 B
+ */
+#define WM8994_AIF1DAC1_EQ_B2_B_MASK 0xFFFF /* AIF1DAC1_EQ_B2_B - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B2_B_SHIFT 0 /* AIF1DAC1_EQ_B2_B - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B2_B_WIDTH 16 /* AIF1DAC1_EQ_B2_B - [15:0] */
+
+/*
+ * R1159 (0x487) - AIF1 DAC1 EQ Band 2 C
+ */
+#define WM8994_AIF1DAC1_EQ_B2_C_MASK 0xFFFF /* AIF1DAC1_EQ_B2_C - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B2_C_SHIFT 0 /* AIF1DAC1_EQ_B2_C - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B2_C_WIDTH 16 /* AIF1DAC1_EQ_B2_C - [15:0] */
+
+/*
+ * R1160 (0x488) - AIF1 DAC1 EQ Band 2 PG
+ */
+#define WM8994_AIF1DAC1_EQ_B2_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B2_PG - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B2_PG_SHIFT 0 /* AIF1DAC1_EQ_B2_PG - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B2_PG_WIDTH 16 /* AIF1DAC1_EQ_B2_PG - [15:0] */
+
+/*
+ * R1161 (0x489) - AIF1 DAC1 EQ Band 3 A
+ */
+#define WM8994_AIF1DAC1_EQ_B3_A_MASK 0xFFFF /* AIF1DAC1_EQ_B3_A - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B3_A_SHIFT 0 /* AIF1DAC1_EQ_B3_A - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B3_A_WIDTH 16 /* AIF1DAC1_EQ_B3_A - [15:0] */
+
+/*
+ * R1162 (0x48A) - AIF1 DAC1 EQ Band 3 B
+ */
+#define WM8994_AIF1DAC1_EQ_B3_B_MASK 0xFFFF /* AIF1DAC1_EQ_B3_B - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B3_B_SHIFT 0 /* AIF1DAC1_EQ_B3_B - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B3_B_WIDTH 16 /* AIF1DAC1_EQ_B3_B - [15:0] */
+
+/*
+ * R1163 (0x48B) - AIF1 DAC1 EQ Band 3 C
+ */
+#define WM8994_AIF1DAC1_EQ_B3_C_MASK 0xFFFF /* AIF1DAC1_EQ_B3_C - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B3_C_SHIFT 0 /* AIF1DAC1_EQ_B3_C - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B3_C_WIDTH 16 /* AIF1DAC1_EQ_B3_C - [15:0] */
+
+/*
+ * R1164 (0x48C) - AIF1 DAC1 EQ Band 3 PG
+ */
+#define WM8994_AIF1DAC1_EQ_B3_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B3_PG - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B3_PG_SHIFT 0 /* AIF1DAC1_EQ_B3_PG - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B3_PG_WIDTH 16 /* AIF1DAC1_EQ_B3_PG - [15:0] */
+
+/*
+ * R1165 (0x48D) - AIF1 DAC1 EQ Band 4 A
+ */
+#define WM8994_AIF1DAC1_EQ_B4_A_MASK 0xFFFF /* AIF1DAC1_EQ_B4_A - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B4_A_SHIFT 0 /* AIF1DAC1_EQ_B4_A - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B4_A_WIDTH 16 /* AIF1DAC1_EQ_B4_A - [15:0] */
+
+/*
+ * R1166 (0x48E) - AIF1 DAC1 EQ Band 4 B
+ */
+#define WM8994_AIF1DAC1_EQ_B4_B_MASK 0xFFFF /* AIF1DAC1_EQ_B4_B - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B4_B_SHIFT 0 /* AIF1DAC1_EQ_B4_B - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B4_B_WIDTH 16 /* AIF1DAC1_EQ_B4_B - [15:0] */
+
+/*
+ * R1167 (0x48F) - AIF1 DAC1 EQ Band 4 C
+ */
+#define WM8994_AIF1DAC1_EQ_B4_C_MASK 0xFFFF /* AIF1DAC1_EQ_B4_C - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B4_C_SHIFT 0 /* AIF1DAC1_EQ_B4_C - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B4_C_WIDTH 16 /* AIF1DAC1_EQ_B4_C - [15:0] */
+
+/*
+ * R1168 (0x490) - AIF1 DAC1 EQ Band 4 PG
+ */
+#define WM8994_AIF1DAC1_EQ_B4_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B4_PG - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B4_PG_SHIFT 0 /* AIF1DAC1_EQ_B4_PG - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B4_PG_WIDTH 16 /* AIF1DAC1_EQ_B4_PG - [15:0] */
+
+/*
+ * R1169 (0x491) - AIF1 DAC1 EQ Band 5 A
+ */
+#define WM8994_AIF1DAC1_EQ_B5_A_MASK 0xFFFF /* AIF1DAC1_EQ_B5_A - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B5_A_SHIFT 0 /* AIF1DAC1_EQ_B5_A - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B5_A_WIDTH 16 /* AIF1DAC1_EQ_B5_A - [15:0] */
+
+/*
+ * R1170 (0x492) - AIF1 DAC1 EQ Band 5 B
+ */
+#define WM8994_AIF1DAC1_EQ_B5_B_MASK 0xFFFF /* AIF1DAC1_EQ_B5_B - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B5_B_SHIFT 0 /* AIF1DAC1_EQ_B5_B - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B5_B_WIDTH 16 /* AIF1DAC1_EQ_B5_B - [15:0] */
+
+/*
+ * R1171 (0x493) - AIF1 DAC1 EQ Band 5 PG
+ */
+#define WM8994_AIF1DAC1_EQ_B5_PG_MASK 0xFFFF /* AIF1DAC1_EQ_B5_PG - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B5_PG_SHIFT 0 /* AIF1DAC1_EQ_B5_PG - [15:0] */
+#define WM8994_AIF1DAC1_EQ_B5_PG_WIDTH 16 /* AIF1DAC1_EQ_B5_PG - [15:0] */
+
+/*
+ * R1184 (0x4A0) - AIF1 DAC2 EQ Gains (1)
+ */
+#define WM8994_AIF1DAC2_EQ_B1_GAIN_MASK 0xF800 /* AIF1DAC2_EQ_B1_GAIN - [15:11] */
+#define WM8994_AIF1DAC2_EQ_B1_GAIN_SHIFT 11 /* AIF1DAC2_EQ_B1_GAIN - [15:11] */
+#define WM8994_AIF1DAC2_EQ_B1_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B1_GAIN - [15:11] */
+#define WM8994_AIF1DAC2_EQ_B2_GAIN_MASK 0x07C0 /* AIF1DAC2_EQ_B2_GAIN - [10:6] */
+#define WM8994_AIF1DAC2_EQ_B2_GAIN_SHIFT 6 /* AIF1DAC2_EQ_B2_GAIN - [10:6] */
+#define WM8994_AIF1DAC2_EQ_B2_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B2_GAIN - [10:6] */
+#define WM8994_AIF1DAC2_EQ_B3_GAIN_MASK 0x003E /* AIF1DAC2_EQ_B3_GAIN - [5:1] */
+#define WM8994_AIF1DAC2_EQ_B3_GAIN_SHIFT 1 /* AIF1DAC2_EQ_B3_GAIN - [5:1] */
+#define WM8994_AIF1DAC2_EQ_B3_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B3_GAIN - [5:1] */
+#define WM8994_AIF1DAC2_EQ_ENA 0x0001 /* AIF1DAC2_EQ_ENA */
+#define WM8994_AIF1DAC2_EQ_ENA_MASK 0x0001 /* AIF1DAC2_EQ_ENA */
+#define WM8994_AIF1DAC2_EQ_ENA_SHIFT 0 /* AIF1DAC2_EQ_ENA */
+#define WM8994_AIF1DAC2_EQ_ENA_WIDTH 1 /* AIF1DAC2_EQ_ENA */
+
+/*
+ * R1185 (0x4A1) - AIF1 DAC2 EQ Gains (2)
+ */
+#define WM8994_AIF1DAC2_EQ_B4_GAIN_MASK 0xF800 /* AIF1DAC2_EQ_B4_GAIN - [15:11] */
+#define WM8994_AIF1DAC2_EQ_B4_GAIN_SHIFT 11 /* AIF1DAC2_EQ_B4_GAIN - [15:11] */
+#define WM8994_AIF1DAC2_EQ_B4_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B4_GAIN - [15:11] */
+#define WM8994_AIF1DAC2_EQ_B5_GAIN_MASK 0x07C0 /* AIF1DAC2_EQ_B5_GAIN - [10:6] */
+#define WM8994_AIF1DAC2_EQ_B5_GAIN_SHIFT 6 /* AIF1DAC2_EQ_B5_GAIN - [10:6] */
+#define WM8994_AIF1DAC2_EQ_B5_GAIN_WIDTH 5 /* AIF1DAC2_EQ_B5_GAIN - [10:6] */
+
+/*
+ * R1186 (0x4A2) - AIF1 DAC2 EQ Band 1 A
+ */
+#define WM8994_AIF1DAC2_EQ_B1_A_MASK 0xFFFF /* AIF1DAC2_EQ_B1_A - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B1_A_SHIFT 0 /* AIF1DAC2_EQ_B1_A - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B1_A_WIDTH 16 /* AIF1DAC2_EQ_B1_A - [15:0] */
+
+/*
+ * R1187 (0x4A3) - AIF1 DAC2 EQ Band 1 B
+ */
+#define WM8994_AIF1DAC2_EQ_B1_B_MASK 0xFFFF /* AIF1DAC2_EQ_B1_B - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B1_B_SHIFT 0 /* AIF1DAC2_EQ_B1_B - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B1_B_WIDTH 16 /* AIF1DAC2_EQ_B1_B - [15:0] */
+
+/*
+ * R1188 (0x4A4) - AIF1 DAC2 EQ Band 1 PG
+ */
+#define WM8994_AIF1DAC2_EQ_B1_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B1_PG - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B1_PG_SHIFT 0 /* AIF1DAC2_EQ_B1_PG - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B1_PG_WIDTH 16 /* AIF1DAC2_EQ_B1_PG - [15:0] */
+
+/*
+ * R1189 (0x4A5) - AIF1 DAC2 EQ Band 2 A
+ */
+#define WM8994_AIF1DAC2_EQ_B2_A_MASK 0xFFFF /* AIF1DAC2_EQ_B2_A - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B2_A_SHIFT 0 /* AIF1DAC2_EQ_B2_A - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B2_A_WIDTH 16 /* AIF1DAC2_EQ_B2_A - [15:0] */
+
+/*
+ * R1190 (0x4A6) - AIF1 DAC2 EQ Band 2 B
+ */
+#define WM8994_AIF1DAC2_EQ_B2_B_MASK 0xFFFF /* AIF1DAC2_EQ_B2_B - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B2_B_SHIFT 0 /* AIF1DAC2_EQ_B2_B - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B2_B_WIDTH 16 /* AIF1DAC2_EQ_B2_B - [15:0] */
+
+/*
+ * R1191 (0x4A7) - AIF1 DAC2 EQ Band 2 C
+ */
+#define WM8994_AIF1DAC2_EQ_B2_C_MASK 0xFFFF /* AIF1DAC2_EQ_B2_C - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B2_C_SHIFT 0 /* AIF1DAC2_EQ_B2_C - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B2_C_WIDTH 16 /* AIF1DAC2_EQ_B2_C - [15:0] */
+
+/*
+ * R1192 (0x4A8) - AIF1 DAC2 EQ Band 2 PG
+ */
+#define WM8994_AIF1DAC2_EQ_B2_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B2_PG - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B2_PG_SHIFT 0 /* AIF1DAC2_EQ_B2_PG - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B2_PG_WIDTH 16 /* AIF1DAC2_EQ_B2_PG - [15:0] */
+
+/*
+ * R1193 (0x4A9) - AIF1 DAC2 EQ Band 3 A
+ */
+#define WM8994_AIF1DAC2_EQ_B3_A_MASK 0xFFFF /* AIF1DAC2_EQ_B3_A - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B3_A_SHIFT 0 /* AIF1DAC2_EQ_B3_A - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B3_A_WIDTH 16 /* AIF1DAC2_EQ_B3_A - [15:0] */
+
+/*
+ * R1194 (0x4AA) - AIF1 DAC2 EQ Band 3 B
+ */
+#define WM8994_AIF1DAC2_EQ_B3_B_MASK 0xFFFF /* AIF1DAC2_EQ_B3_B - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B3_B_SHIFT 0 /* AIF1DAC2_EQ_B3_B - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B3_B_WIDTH 16 /* AIF1DAC2_EQ_B3_B - [15:0] */
+
+/*
+ * R1195 (0x4AB) - AIF1 DAC2 EQ Band 3 C
+ */
+#define WM8994_AIF1DAC2_EQ_B3_C_MASK 0xFFFF /* AIF1DAC2_EQ_B3_C - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B3_C_SHIFT 0 /* AIF1DAC2_EQ_B3_C - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B3_C_WIDTH 16 /* AIF1DAC2_EQ_B3_C - [15:0] */
+
+/*
+ * R1196 (0x4AC) - AIF1 DAC2 EQ Band 3 PG
+ */
+#define WM8994_AIF1DAC2_EQ_B3_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B3_PG - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B3_PG_SHIFT 0 /* AIF1DAC2_EQ_B3_PG - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B3_PG_WIDTH 16 /* AIF1DAC2_EQ_B3_PG - [15:0] */
+
+/*
+ * R1197 (0x4AD) - AIF1 DAC2 EQ Band 4 A
+ */
+#define WM8994_AIF1DAC2_EQ_B4_A_MASK 0xFFFF /* AIF1DAC2_EQ_B4_A - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B4_A_SHIFT 0 /* AIF1DAC2_EQ_B4_A - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B4_A_WIDTH 16 /* AIF1DAC2_EQ_B4_A - [15:0] */
+
+/*
+ * R1198 (0x4AE) - AIF1 DAC2 EQ Band 4 B
+ */
+#define WM8994_AIF1DAC2_EQ_B4_B_MASK 0xFFFF /* AIF1DAC2_EQ_B4_B - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B4_B_SHIFT 0 /* AIF1DAC2_EQ_B4_B - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B4_B_WIDTH 16 /* AIF1DAC2_EQ_B4_B - [15:0] */
+
+/*
+ * R1199 (0x4AF) - AIF1 DAC2 EQ Band 4 C
+ */
+#define WM8994_AIF1DAC2_EQ_B4_C_MASK 0xFFFF /* AIF1DAC2_EQ_B4_C - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B4_C_SHIFT 0 /* AIF1DAC2_EQ_B4_C - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B4_C_WIDTH 16 /* AIF1DAC2_EQ_B4_C - [15:0] */
+
+/*
+ * R1200 (0x4B0) - AIF1 DAC2 EQ Band 4 PG
+ */
+#define WM8994_AIF1DAC2_EQ_B4_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B4_PG - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B4_PG_SHIFT 0 /* AIF1DAC2_EQ_B4_PG - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B4_PG_WIDTH 16 /* AIF1DAC2_EQ_B4_PG - [15:0] */
+
+/*
+ * R1201 (0x4B1) - AIF1 DAC2 EQ Band 5 A
+ */
+#define WM8994_AIF1DAC2_EQ_B5_A_MASK 0xFFFF /* AIF1DAC2_EQ_B5_A - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B5_A_SHIFT 0 /* AIF1DAC2_EQ_B5_A - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B5_A_WIDTH 16 /* AIF1DAC2_EQ_B5_A - [15:0] */
+
+/*
+ * R1202 (0x4B2) - AIF1 DAC2 EQ Band 5 B
+ */
+#define WM8994_AIF1DAC2_EQ_B5_B_MASK 0xFFFF /* AIF1DAC2_EQ_B5_B - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B5_B_SHIFT 0 /* AIF1DAC2_EQ_B5_B - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B5_B_WIDTH 16 /* AIF1DAC2_EQ_B5_B - [15:0] */
+
+/*
+ * R1203 (0x4B3) - AIF1 DAC2 EQ Band 5 PG
+ */
+#define WM8994_AIF1DAC2_EQ_B5_PG_MASK 0xFFFF /* AIF1DAC2_EQ_B5_PG - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B5_PG_SHIFT 0 /* AIF1DAC2_EQ_B5_PG - [15:0] */
+#define WM8994_AIF1DAC2_EQ_B5_PG_WIDTH 16 /* AIF1DAC2_EQ_B5_PG - [15:0] */
+
+/*
+ * R1280 (0x500) - AIF2 ADC Left Volume
+ */
+#define WM8994_AIF2ADC_VU 0x0100 /* AIF2ADC_VU */
+#define WM8994_AIF2ADC_VU_MASK 0x0100 /* AIF2ADC_VU */
+#define WM8994_AIF2ADC_VU_SHIFT 8 /* AIF2ADC_VU */
+#define WM8994_AIF2ADC_VU_WIDTH 1 /* AIF2ADC_VU */
+#define WM8994_AIF2ADCL_VOL_MASK 0x00FF /* AIF2ADCL_VOL - [7:0] */
+#define WM8994_AIF2ADCL_VOL_SHIFT 0 /* AIF2ADCL_VOL - [7:0] */
+#define WM8994_AIF2ADCL_VOL_WIDTH 8 /* AIF2ADCL_VOL - [7:0] */
+
+/*
+ * R1281 (0x501) - AIF2 ADC Right Volume
+ */
+#define WM8994_AIF2ADC_VU 0x0100 /* AIF2ADC_VU */
+#define WM8994_AIF2ADC_VU_MASK 0x0100 /* AIF2ADC_VU */
+#define WM8994_AIF2ADC_VU_SHIFT 8 /* AIF2ADC_VU */
+#define WM8994_AIF2ADC_VU_WIDTH 1 /* AIF2ADC_VU */
+#define WM8994_AIF2ADCR_VOL_MASK 0x00FF /* AIF2ADCR_VOL - [7:0] */
+#define WM8994_AIF2ADCR_VOL_SHIFT 0 /* AIF2ADCR_VOL - [7:0] */
+#define WM8994_AIF2ADCR_VOL_WIDTH 8 /* AIF2ADCR_VOL - [7:0] */
+
+/*
+ * R1282 (0x502) - AIF2 DAC Left Volume
+ */
+#define WM8994_AIF2DAC_VU 0x0100 /* AIF2DAC_VU */
+#define WM8994_AIF2DAC_VU_MASK 0x0100 /* AIF2DAC_VU */
+#define WM8994_AIF2DAC_VU_SHIFT 8 /* AIF2DAC_VU */
+#define WM8994_AIF2DAC_VU_WIDTH 1 /* AIF2DAC_VU */
+#define WM8994_AIF2DACL_VOL_MASK 0x00FF /* AIF2DACL_VOL - [7:0] */
+#define WM8994_AIF2DACL_VOL_SHIFT 0 /* AIF2DACL_VOL - [7:0] */
+#define WM8994_AIF2DACL_VOL_WIDTH 8 /* AIF2DACL_VOL - [7:0] */
+
+/*
+ * R1283 (0x503) - AIF2 DAC Right Volume
+ */
+#define WM8994_AIF2DAC_VU 0x0100 /* AIF2DAC_VU */
+#define WM8994_AIF2DAC_VU_MASK 0x0100 /* AIF2DAC_VU */
+#define WM8994_AIF2DAC_VU_SHIFT 8 /* AIF2DAC_VU */
+#define WM8994_AIF2DAC_VU_WIDTH 1 /* AIF2DAC_VU */
+#define WM8994_AIF2DACR_VOL_MASK 0x00FF /* AIF2DACR_VOL - [7:0] */
+#define WM8994_AIF2DACR_VOL_SHIFT 0 /* AIF2DACR_VOL - [7:0] */
+#define WM8994_AIF2DACR_VOL_WIDTH 8 /* AIF2DACR_VOL - [7:0] */
+
+/*
+ * R1296 (0x510) - AIF2 ADC Filters
+ */
+#define WM8994_AIF2ADC_4FS 0x8000 /* AIF2ADC_4FS */
+#define WM8994_AIF2ADC_4FS_MASK 0x8000 /* AIF2ADC_4FS */
+#define WM8994_AIF2ADC_4FS_SHIFT 15 /* AIF2ADC_4FS */
+#define WM8994_AIF2ADC_4FS_WIDTH 1 /* AIF2ADC_4FS */
+#define WM8994_AIF2ADC_HPF_CUT_MASK 0x6000 /* AIF2ADC_HPF_CUT - [14:13] */
+#define WM8994_AIF2ADC_HPF_CUT_SHIFT 13 /* AIF2ADC_HPF_CUT - [14:13] */
+#define WM8994_AIF2ADC_HPF_CUT_WIDTH 2 /* AIF2ADC_HPF_CUT - [14:13] */
+#define WM8994_AIF2ADCL_HPF 0x1000 /* AIF2ADCL_HPF */
+#define WM8994_AIF2ADCL_HPF_MASK 0x1000 /* AIF2ADCL_HPF */
+#define WM8994_AIF2ADCL_HPF_SHIFT 12 /* AIF2ADCL_HPF */
+#define WM8994_AIF2ADCL_HPF_WIDTH 1 /* AIF2ADCL_HPF */
+#define WM8994_AIF2ADCR_HPF 0x0800 /* AIF2ADCR_HPF */
+#define WM8994_AIF2ADCR_HPF_MASK 0x0800 /* AIF2ADCR_HPF */
+#define WM8994_AIF2ADCR_HPF_SHIFT 11 /* AIF2ADCR_HPF */
+#define WM8994_AIF2ADCR_HPF_WIDTH 1 /* AIF2ADCR_HPF */
+
+/*
+ * R1312 (0x520) - AIF2 DAC Filters (1)
+ */
+#define WM8994_AIF2DAC_MUTE 0x0200 /* AIF2DAC_MUTE */
+#define WM8994_AIF2DAC_MUTE_MASK 0x0200 /* AIF2DAC_MUTE */
+#define WM8994_AIF2DAC_MUTE_SHIFT 9 /* AIF2DAC_MUTE */
+#define WM8994_AIF2DAC_MUTE_WIDTH 1 /* AIF2DAC_MUTE */
+#define WM8994_AIF2DAC_MONO 0x0080 /* AIF2DAC_MONO */
+#define WM8994_AIF2DAC_MONO_MASK 0x0080 /* AIF2DAC_MONO */
+#define WM8994_AIF2DAC_MONO_SHIFT 7 /* AIF2DAC_MONO */
+#define WM8994_AIF2DAC_MONO_WIDTH 1 /* AIF2DAC_MONO */
+#define WM8994_AIF2DAC_MUTERATE 0x0020 /* AIF2DAC_MUTERATE */
+#define WM8994_AIF2DAC_MUTERATE_MASK 0x0020 /* AIF2DAC_MUTERATE */
+#define WM8994_AIF2DAC_MUTERATE_SHIFT 5 /* AIF2DAC_MUTERATE */
+#define WM8994_AIF2DAC_MUTERATE_WIDTH 1 /* AIF2DAC_MUTERATE */
+#define WM8994_AIF2DAC_UNMUTE_RAMP 0x0010 /* AIF2DAC_UNMUTE_RAMP */
+#define WM8994_AIF2DAC_UNMUTE_RAMP_MASK 0x0010 /* AIF2DAC_UNMUTE_RAMP */
+#define WM8994_AIF2DAC_UNMUTE_RAMP_SHIFT 4 /* AIF2DAC_UNMUTE_RAMP */
+#define WM8994_AIF2DAC_UNMUTE_RAMP_WIDTH 1 /* AIF2DAC_UNMUTE_RAMP */
+#define WM8994_AIF2DAC_DEEMP_MASK 0x0006 /* AIF2DAC_DEEMP - [2:1] */
+#define WM8994_AIF2DAC_DEEMP_SHIFT 1 /* AIF2DAC_DEEMP - [2:1] */
+#define WM8994_AIF2DAC_DEEMP_WIDTH 2 /* AIF2DAC_DEEMP - [2:1] */
+
+/*
+ * R1313 (0x521) - AIF2 DAC Filters (2)
+ */
+#define WM8994_AIF2DAC_3D_GAIN_MASK 0x3E00 /* AIF2DAC_3D_GAIN - [13:9] */
+#define WM8994_AIF2DAC_3D_GAIN_SHIFT 9 /* AIF2DAC_3D_GAIN - [13:9] */
+#define WM8994_AIF2DAC_3D_GAIN_WIDTH 5 /* AIF2DAC_3D_GAIN - [13:9] */
+#define WM8994_AIF2DAC_3D_ENA 0x0100 /* AIF2DAC_3D_ENA */
+#define WM8994_AIF2DAC_3D_ENA_MASK 0x0100 /* AIF2DAC_3D_ENA */
+#define WM8994_AIF2DAC_3D_ENA_SHIFT 8 /* AIF2DAC_3D_ENA */
+#define WM8994_AIF2DAC_3D_ENA_WIDTH 1 /* AIF2DAC_3D_ENA */
+
+/*
+ * R1328 (0x530) - AIF2 DAC Noise Gate
+ */
+#define WM8958_AIF2DAC_NG_HLD_MASK 0x0060 /* AIF2DAC_NG_HLD - [6:5] */
+#define WM8958_AIF2DAC_NG_HLD_SHIFT 5 /* AIF2DAC_NG_HLD - [6:5] */
+#define WM8958_AIF2DAC_NG_HLD_WIDTH 2 /* AIF2DAC_NG_HLD - [6:5] */
+#define WM8958_AIF2DAC_NG_THR_MASK 0x000E /* AIF2DAC_NG_THR - [3:1] */
+#define WM8958_AIF2DAC_NG_THR_SHIFT 1 /* AIF2DAC_NG_THR - [3:1] */
+#define WM8958_AIF2DAC_NG_THR_WIDTH 3 /* AIF2DAC_NG_THR - [3:1] */
+#define WM8958_AIF2DAC_NG_ENA 0x0001 /* AIF2DAC_NG_ENA */
+#define WM8958_AIF2DAC_NG_ENA_MASK 0x0001 /* AIF2DAC_NG_ENA */
+#define WM8958_AIF2DAC_NG_ENA_SHIFT 0 /* AIF2DAC_NG_ENA */
+#define WM8958_AIF2DAC_NG_ENA_WIDTH 1 /* AIF2DAC_NG_ENA */
+
+/*
+ * R1344 (0x540) - AIF2 DRC (1)
+ */
+#define WM8994_AIF2DRC_SIG_DET_RMS_MASK 0xF800 /* AIF2DRC_SIG_DET_RMS - [15:11] */
+#define WM8994_AIF2DRC_SIG_DET_RMS_SHIFT 11 /* AIF2DRC_SIG_DET_RMS - [15:11] */
+#define WM8994_AIF2DRC_SIG_DET_RMS_WIDTH 5 /* AIF2DRC_SIG_DET_RMS - [15:11] */
+#define WM8994_AIF2DRC_SIG_DET_PK_MASK 0x0600 /* AIF2DRC_SIG_DET_PK - [10:9] */
+#define WM8994_AIF2DRC_SIG_DET_PK_SHIFT 9 /* AIF2DRC_SIG_DET_PK - [10:9] */
+#define WM8994_AIF2DRC_SIG_DET_PK_WIDTH 2 /* AIF2DRC_SIG_DET_PK - [10:9] */
+#define WM8994_AIF2DRC_NG_ENA 0x0100 /* AIF2DRC_NG_ENA */
+#define WM8994_AIF2DRC_NG_ENA_MASK 0x0100 /* AIF2DRC_NG_ENA */
+#define WM8994_AIF2DRC_NG_ENA_SHIFT 8 /* AIF2DRC_NG_ENA */
+#define WM8994_AIF2DRC_NG_ENA_WIDTH 1 /* AIF2DRC_NG_ENA */
+#define WM8994_AIF2DRC_SIG_DET_MODE 0x0080 /* AIF2DRC_SIG_DET_MODE */
+#define WM8994_AIF2DRC_SIG_DET_MODE_MASK 0x0080 /* AIF2DRC_SIG_DET_MODE */
+#define WM8994_AIF2DRC_SIG_DET_MODE_SHIFT 7 /* AIF2DRC_SIG_DET_MODE */
+#define WM8994_AIF2DRC_SIG_DET_MODE_WIDTH 1 /* AIF2DRC_SIG_DET_MODE */
+#define WM8994_AIF2DRC_SIG_DET 0x0040 /* AIF2DRC_SIG_DET */
+#define WM8994_AIF2DRC_SIG_DET_MASK 0x0040 /* AIF2DRC_SIG_DET */
+#define WM8994_AIF2DRC_SIG_DET_SHIFT 6 /* AIF2DRC_SIG_DET */
+#define WM8994_AIF2DRC_SIG_DET_WIDTH 1 /* AIF2DRC_SIG_DET */
+#define WM8994_AIF2DRC_KNEE2_OP_ENA 0x0020 /* AIF2DRC_KNEE2_OP_ENA */
+#define WM8994_AIF2DRC_KNEE2_OP_ENA_MASK 0x0020 /* AIF2DRC_KNEE2_OP_ENA */
+#define WM8994_AIF2DRC_KNEE2_OP_ENA_SHIFT 5 /* AIF2DRC_KNEE2_OP_ENA */
+#define WM8994_AIF2DRC_KNEE2_OP_ENA_WIDTH 1 /* AIF2DRC_KNEE2_OP_ENA */
+#define WM8994_AIF2DRC_QR 0x0010 /* AIF2DRC_QR */
+#define WM8994_AIF2DRC_QR_MASK 0x0010 /* AIF2DRC_QR */
+#define WM8994_AIF2DRC_QR_SHIFT 4 /* AIF2DRC_QR */
+#define WM8994_AIF2DRC_QR_WIDTH 1 /* AIF2DRC_QR */
+#define WM8994_AIF2DRC_ANTICLIP 0x0008 /* AIF2DRC_ANTICLIP */
+#define WM8994_AIF2DRC_ANTICLIP_MASK 0x0008 /* AIF2DRC_ANTICLIP */
+#define WM8994_AIF2DRC_ANTICLIP_SHIFT 3 /* AIF2DRC_ANTICLIP */
+#define WM8994_AIF2DRC_ANTICLIP_WIDTH 1 /* AIF2DRC_ANTICLIP */
+#define WM8994_AIF2DAC_DRC_ENA 0x0004 /* AIF2DAC_DRC_ENA */
+#define WM8994_AIF2DAC_DRC_ENA_MASK 0x0004 /* AIF2DAC_DRC_ENA */
+#define WM8994_AIF2DAC_DRC_ENA_SHIFT 2 /* AIF2DAC_DRC_ENA */
+#define WM8994_AIF2DAC_DRC_ENA_WIDTH 1 /* AIF2DAC_DRC_ENA */
+#define WM8994_AIF2ADCL_DRC_ENA 0x0002 /* AIF2ADCL_DRC_ENA */
+#define WM8994_AIF2ADCL_DRC_ENA_MASK 0x0002 /* AIF2ADCL_DRC_ENA */
+#define WM8994_AIF2ADCL_DRC_ENA_SHIFT 1 /* AIF2ADCL_DRC_ENA */
+#define WM8994_AIF2ADCL_DRC_ENA_WIDTH 1 /* AIF2ADCL_DRC_ENA */
+#define WM8994_AIF2ADCR_DRC_ENA 0x0001 /* AIF2ADCR_DRC_ENA */
+#define WM8994_AIF2ADCR_DRC_ENA_MASK 0x0001 /* AIF2ADCR_DRC_ENA */
+#define WM8994_AIF2ADCR_DRC_ENA_SHIFT 0 /* AIF2ADCR_DRC_ENA */
+#define WM8994_AIF2ADCR_DRC_ENA_WIDTH 1 /* AIF2ADCR_DRC_ENA */
+
+/*
+ * R1345 (0x541) - AIF2 DRC (2)
+ */
+#define WM8994_AIF2DRC_ATK_MASK 0x1E00 /* AIF2DRC_ATK - [12:9] */
+#define WM8994_AIF2DRC_ATK_SHIFT 9 /* AIF2DRC_ATK - [12:9] */
+#define WM8994_AIF2DRC_ATK_WIDTH 4 /* AIF2DRC_ATK - [12:9] */
+#define WM8994_AIF2DRC_DCY_MASK 0x01E0 /* AIF2DRC_DCY - [8:5] */
+#define WM8994_AIF2DRC_DCY_SHIFT 5 /* AIF2DRC_DCY - [8:5] */
+#define WM8994_AIF2DRC_DCY_WIDTH 4 /* AIF2DRC_DCY - [8:5] */
+#define WM8994_AIF2DRC_MINGAIN_MASK 0x001C /* AIF2DRC_MINGAIN - [4:2] */
+#define WM8994_AIF2DRC_MINGAIN_SHIFT 2 /* AIF2DRC_MINGAIN - [4:2] */
+#define WM8994_AIF2DRC_MINGAIN_WIDTH 3 /* AIF2DRC_MINGAIN - [4:2] */
+#define WM8994_AIF2DRC_MAXGAIN_MASK 0x0003 /* AIF2DRC_MAXGAIN - [1:0] */
+#define WM8994_AIF2DRC_MAXGAIN_SHIFT 0 /* AIF2DRC_MAXGAIN - [1:0] */
+#define WM8994_AIF2DRC_MAXGAIN_WIDTH 2 /* AIF2DRC_MAXGAIN - [1:0] */
+
+/*
+ * R1346 (0x542) - AIF2 DRC (3)
+ */
+#define WM8994_AIF2DRC_NG_MINGAIN_MASK 0xF000 /* AIF2DRC_NG_MINGAIN - [15:12] */
+#define WM8994_AIF2DRC_NG_MINGAIN_SHIFT 12 /* AIF2DRC_NG_MINGAIN - [15:12] */
+#define WM8994_AIF2DRC_NG_MINGAIN_WIDTH 4 /* AIF2DRC_NG_MINGAIN - [15:12] */
+#define WM8994_AIF2DRC_NG_EXP_MASK 0x0C00 /* AIF2DRC_NG_EXP - [11:10] */
+#define WM8994_AIF2DRC_NG_EXP_SHIFT 10 /* AIF2DRC_NG_EXP - [11:10] */
+#define WM8994_AIF2DRC_NG_EXP_WIDTH 2 /* AIF2DRC_NG_EXP - [11:10] */
+#define WM8994_AIF2DRC_QR_THR_MASK 0x0300 /* AIF2DRC_QR_THR - [9:8] */
+#define WM8994_AIF2DRC_QR_THR_SHIFT 8 /* AIF2DRC_QR_THR - [9:8] */
+#define WM8994_AIF2DRC_QR_THR_WIDTH 2 /* AIF2DRC_QR_THR - [9:8] */
+#define WM8994_AIF2DRC_QR_DCY_MASK 0x00C0 /* AIF2DRC_QR_DCY - [7:6] */
+#define WM8994_AIF2DRC_QR_DCY_SHIFT 6 /* AIF2DRC_QR_DCY - [7:6] */
+#define WM8994_AIF2DRC_QR_DCY_WIDTH 2 /* AIF2DRC_QR_DCY - [7:6] */
+#define WM8994_AIF2DRC_HI_COMP_MASK 0x0038 /* AIF2DRC_HI_COMP - [5:3] */
+#define WM8994_AIF2DRC_HI_COMP_SHIFT 3 /* AIF2DRC_HI_COMP - [5:3] */
+#define WM8994_AIF2DRC_HI_COMP_WIDTH 3 /* AIF2DRC_HI_COMP - [5:3] */
+#define WM8994_AIF2DRC_LO_COMP_MASK 0x0007 /* AIF2DRC_LO_COMP - [2:0] */
+#define WM8994_AIF2DRC_LO_COMP_SHIFT 0 /* AIF2DRC_LO_COMP - [2:0] */
+#define WM8994_AIF2DRC_LO_COMP_WIDTH 3 /* AIF2DRC_LO_COMP - [2:0] */
+
+/*
+ * R1347 (0x543) - AIF2 DRC (4)
+ */
+#define WM8994_AIF2DRC_KNEE_IP_MASK 0x07E0 /* AIF2DRC_KNEE_IP - [10:5] */
+#define WM8994_AIF2DRC_KNEE_IP_SHIFT 5 /* AIF2DRC_KNEE_IP - [10:5] */
+#define WM8994_AIF2DRC_KNEE_IP_WIDTH 6 /* AIF2DRC_KNEE_IP - [10:5] */
+#define WM8994_AIF2DRC_KNEE_OP_MASK 0x001F /* AIF2DRC_KNEE_OP - [4:0] */
+#define WM8994_AIF2DRC_KNEE_OP_SHIFT 0 /* AIF2DRC_KNEE_OP - [4:0] */
+#define WM8994_AIF2DRC_KNEE_OP_WIDTH 5 /* AIF2DRC_KNEE_OP - [4:0] */
+
+/*
+ * R1348 (0x544) - AIF2 DRC (5)
+ */
+#define WM8994_AIF2DRC_KNEE2_IP_MASK 0x03E0 /* AIF2DRC_KNEE2_IP - [9:5] */
+#define WM8994_AIF2DRC_KNEE2_IP_SHIFT 5 /* AIF2DRC_KNEE2_IP - [9:5] */
+#define WM8994_AIF2DRC_KNEE2_IP_WIDTH 5 /* AIF2DRC_KNEE2_IP - [9:5] */
+#define WM8994_AIF2DRC_KNEE2_OP_MASK 0x001F /* AIF2DRC_KNEE2_OP - [4:0] */
+#define WM8994_AIF2DRC_KNEE2_OP_SHIFT 0 /* AIF2DRC_KNEE2_OP - [4:0] */
+#define WM8994_AIF2DRC_KNEE2_OP_WIDTH 5 /* AIF2DRC_KNEE2_OP - [4:0] */
+
+/*
+ * R1408 (0x580) - AIF2 EQ Gains (1)
+ */
+#define WM8994_AIF2DAC_EQ_B1_GAIN_MASK 0xF800 /* AIF2DAC_EQ_B1_GAIN - [15:11] */
+#define WM8994_AIF2DAC_EQ_B1_GAIN_SHIFT 11 /* AIF2DAC_EQ_B1_GAIN - [15:11] */
+#define WM8994_AIF2DAC_EQ_B1_GAIN_WIDTH 5 /* AIF2DAC_EQ_B1_GAIN - [15:11] */
+#define WM8994_AIF2DAC_EQ_B2_GAIN_MASK 0x07C0 /* AIF2DAC_EQ_B2_GAIN - [10:6] */
+#define WM8994_AIF2DAC_EQ_B2_GAIN_SHIFT 6 /* AIF2DAC_EQ_B2_GAIN - [10:6] */
+#define WM8994_AIF2DAC_EQ_B2_GAIN_WIDTH 5 /* AIF2DAC_EQ_B2_GAIN - [10:6] */
+#define WM8994_AIF2DAC_EQ_B3_GAIN_MASK 0x003E /* AIF2DAC_EQ_B3_GAIN - [5:1] */
+#define WM8994_AIF2DAC_EQ_B3_GAIN_SHIFT 1 /* AIF2DAC_EQ_B3_GAIN - [5:1] */
+#define WM8994_AIF2DAC_EQ_B3_GAIN_WIDTH 5 /* AIF2DAC_EQ_B3_GAIN - [5:1] */
+#define WM8994_AIF2DAC_EQ_ENA 0x0001 /* AIF2DAC_EQ_ENA */
+#define WM8994_AIF2DAC_EQ_ENA_MASK 0x0001 /* AIF2DAC_EQ_ENA */
+#define WM8994_AIF2DAC_EQ_ENA_SHIFT 0 /* AIF2DAC_EQ_ENA */
+#define WM8994_AIF2DAC_EQ_ENA_WIDTH 1 /* AIF2DAC_EQ_ENA */
+
+/*
+ * R1409 (0x581) - AIF2 EQ Gains (2)
+ */
+#define WM8994_AIF2DAC_EQ_B4_GAIN_MASK 0xF800 /* AIF2DAC_EQ_B4_GAIN - [15:11] */
+#define WM8994_AIF2DAC_EQ_B4_GAIN_SHIFT 11 /* AIF2DAC_EQ_B4_GAIN - [15:11] */
+#define WM8994_AIF2DAC_EQ_B4_GAIN_WIDTH 5 /* AIF2DAC_EQ_B4_GAIN - [15:11] */
+#define WM8994_AIF2DAC_EQ_B5_GAIN_MASK 0x07C0 /* AIF2DAC_EQ_B5_GAIN - [10:6] */
+#define WM8994_AIF2DAC_EQ_B5_GAIN_SHIFT 6 /* AIF2DAC_EQ_B5_GAIN - [10:6] */
+#define WM8994_AIF2DAC_EQ_B5_GAIN_WIDTH 5 /* AIF2DAC_EQ_B5_GAIN - [10:6] */
+
+/*
+ * R1410 (0x582) - AIF2 EQ Band 1 A
+ */
+#define WM8994_AIF2DAC_EQ_B1_A_MASK 0xFFFF /* AIF2DAC_EQ_B1_A - [15:0] */
+#define WM8994_AIF2DAC_EQ_B1_A_SHIFT 0 /* AIF2DAC_EQ_B1_A - [15:0] */
+#define WM8994_AIF2DAC_EQ_B1_A_WIDTH 16 /* AIF2DAC_EQ_B1_A - [15:0] */
+
+/*
+ * R1411 (0x583) - AIF2 EQ Band 1 B
+ */
+#define WM8994_AIF2DAC_EQ_B1_B_MASK 0xFFFF /* AIF2DAC_EQ_B1_B - [15:0] */
+#define WM8994_AIF2DAC_EQ_B1_B_SHIFT 0 /* AIF2DAC_EQ_B1_B - [15:0] */
+#define WM8994_AIF2DAC_EQ_B1_B_WIDTH 16 /* AIF2DAC_EQ_B1_B - [15:0] */
+
+/*
+ * R1412 (0x584) - AIF2 EQ Band 1 PG
+ */
+#define WM8994_AIF2DAC_EQ_B1_PG_MASK 0xFFFF /* AIF2DAC_EQ_B1_PG - [15:0] */
+#define WM8994_AIF2DAC_EQ_B1_PG_SHIFT 0 /* AIF2DAC_EQ_B1_PG - [15:0] */
+#define WM8994_AIF2DAC_EQ_B1_PG_WIDTH 16 /* AIF2DAC_EQ_B1_PG - [15:0] */
+
+/*
+ * R1413 (0x585) - AIF2 EQ Band 2 A
+ */
+#define WM8994_AIF2DAC_EQ_B2_A_MASK 0xFFFF /* AIF2DAC_EQ_B2_A - [15:0] */
+#define WM8994_AIF2DAC_EQ_B2_A_SHIFT 0 /* AIF2DAC_EQ_B2_A - [15:0] */
+#define WM8994_AIF2DAC_EQ_B2_A_WIDTH 16 /* AIF2DAC_EQ_B2_A - [15:0] */
+
+/*
+ * R1414 (0x586) - AIF2 EQ Band 2 B
+ */
+#define WM8994_AIF2DAC_EQ_B2_B_MASK 0xFFFF /* AIF2DAC_EQ_B2_B - [15:0] */
+#define WM8994_AIF2DAC_EQ_B2_B_SHIFT 0 /* AIF2DAC_EQ_B2_B - [15:0] */
+#define WM8994_AIF2DAC_EQ_B2_B_WIDTH 16 /* AIF2DAC_EQ_B2_B - [15:0] */
+
+/*
+ * R1415 (0x587) - AIF2 EQ Band 2 C
+ */
+#define WM8994_AIF2DAC_EQ_B2_C_MASK 0xFFFF /* AIF2DAC_EQ_B2_C - [15:0] */
+#define WM8994_AIF2DAC_EQ_B2_C_SHIFT 0 /* AIF2DAC_EQ_B2_C - [15:0] */
+#define WM8994_AIF2DAC_EQ_B2_C_WIDTH 16 /* AIF2DAC_EQ_B2_C - [15:0] */
+
+/*
+ * R1416 (0x588) - AIF2 EQ Band 2 PG
+ */
+#define WM8994_AIF2DAC_EQ_B2_PG_MASK 0xFFFF /* AIF2DAC_EQ_B2_PG - [15:0] */
+#define WM8994_AIF2DAC_EQ_B2_PG_SHIFT 0 /* AIF2DAC_EQ_B2_PG - [15:0] */
+#define WM8994_AIF2DAC_EQ_B2_PG_WIDTH 16 /* AIF2DAC_EQ_B2_PG - [15:0] */
+
+/*
+ * R1417 (0x589) - AIF2 EQ Band 3 A
+ */
+#define WM8994_AIF2DAC_EQ_B3_A_MASK 0xFFFF /* AIF2DAC_EQ_B3_A - [15:0] */
+#define WM8994_AIF2DAC_EQ_B3_A_SHIFT 0 /* AIF2DAC_EQ_B3_A - [15:0] */
+#define WM8994_AIF2DAC_EQ_B3_A_WIDTH 16 /* AIF2DAC_EQ_B3_A - [15:0] */
+
+/*
+ * R1418 (0x58A) - AIF2 EQ Band 3 B
+ */
+#define WM8994_AIF2DAC_EQ_B3_B_MASK 0xFFFF /* AIF2DAC_EQ_B3_B - [15:0] */
+#define WM8994_AIF2DAC_EQ_B3_B_SHIFT 0 /* AIF2DAC_EQ_B3_B - [15:0] */
+#define WM8994_AIF2DAC_EQ_B3_B_WIDTH 16 /* AIF2DAC_EQ_B3_B - [15:0] */
+
+/*
+ * R1419 (0x58B) - AIF2 EQ Band 3 C
+ */
+#define WM8994_AIF2DAC_EQ_B3_C_MASK 0xFFFF /* AIF2DAC_EQ_B3_C - [15:0] */
+#define WM8994_AIF2DAC_EQ_B3_C_SHIFT 0 /* AIF2DAC_EQ_B3_C - [15:0] */
+#define WM8994_AIF2DAC_EQ_B3_C_WIDTH 16 /* AIF2DAC_EQ_B3_C - [15:0] */
+
+/*
+ * R1420 (0x58C) - AIF2 EQ Band 3 PG
+ */
+#define WM8994_AIF2DAC_EQ_B3_PG_MASK 0xFFFF /* AIF2DAC_EQ_B3_PG - [15:0] */
+#define WM8994_AIF2DAC_EQ_B3_PG_SHIFT 0 /* AIF2DAC_EQ_B3_PG - [15:0] */
+#define WM8994_AIF2DAC_EQ_B3_PG_WIDTH 16 /* AIF2DAC_EQ_B3_PG - [15:0] */
+
+/*
+ * R1421 (0x58D) - AIF2 EQ Band 4 A
+ */
+#define WM8994_AIF2DAC_EQ_B4_A_MASK 0xFFFF /* AIF2DAC_EQ_B4_A - [15:0] */
+#define WM8994_AIF2DAC_EQ_B4_A_SHIFT 0 /* AIF2DAC_EQ_B4_A - [15:0] */
+#define WM8994_AIF2DAC_EQ_B4_A_WIDTH 16 /* AIF2DAC_EQ_B4_A - [15:0] */
+
+/*
+ * R1422 (0x58E) - AIF2 EQ Band 4 B
+ */
+#define WM8994_AIF2DAC_EQ_B4_B_MASK 0xFFFF /* AIF2DAC_EQ_B4_B - [15:0] */
+#define WM8994_AIF2DAC_EQ_B4_B_SHIFT 0 /* AIF2DAC_EQ_B4_B - [15:0] */
+#define WM8994_AIF2DAC_EQ_B4_B_WIDTH 16 /* AIF2DAC_EQ_B4_B - [15:0] */
+
+/*
+ * R1423 (0x58F) - AIF2 EQ Band 4 C
+ */
+#define WM8994_AIF2DAC_EQ_B4_C_MASK 0xFFFF /* AIF2DAC_EQ_B4_C - [15:0] */
+#define WM8994_AIF2DAC_EQ_B4_C_SHIFT 0 /* AIF2DAC_EQ_B4_C - [15:0] */
+#define WM8994_AIF2DAC_EQ_B4_C_WIDTH 16 /* AIF2DAC_EQ_B4_C - [15:0] */
+
+/*
+ * R1424 (0x590) - AIF2 EQ Band 4 PG
+ */
+#define WM8994_AIF2DAC_EQ_B4_PG_MASK 0xFFFF /* AIF2DAC_EQ_B4_PG - [15:0] */
+#define WM8994_AIF2DAC_EQ_B4_PG_SHIFT 0 /* AIF2DAC_EQ_B4_PG - [15:0] */
+#define WM8994_AIF2DAC_EQ_B4_PG_WIDTH 16 /* AIF2DAC_EQ_B4_PG - [15:0] */
+
+/*
+ * R1425 (0x591) - AIF2 EQ Band 5 A
+ */
+#define WM8994_AIF2DAC_EQ_B5_A_MASK 0xFFFF /* AIF2DAC_EQ_B5_A - [15:0] */
+#define WM8994_AIF2DAC_EQ_B5_A_SHIFT 0 /* AIF2DAC_EQ_B5_A - [15:0] */
+#define WM8994_AIF2DAC_EQ_B5_A_WIDTH 16 /* AIF2DAC_EQ_B5_A - [15:0] */
+
+/*
+ * R1426 (0x592) - AIF2 EQ Band 5 B
+ */
+#define WM8994_AIF2DAC_EQ_B5_B_MASK 0xFFFF /* AIF2DAC_EQ_B5_B - [15:0] */
+#define WM8994_AIF2DAC_EQ_B5_B_SHIFT 0 /* AIF2DAC_EQ_B5_B - [15:0] */
+#define WM8994_AIF2DAC_EQ_B5_B_WIDTH 16 /* AIF2DAC_EQ_B5_B - [15:0] */
+
+/*
+ * R1427 (0x593) - AIF2 EQ Band 5 PG
+ */
+#define WM8994_AIF2DAC_EQ_B5_PG_MASK 0xFFFF /* AIF2DAC_EQ_B5_PG - [15:0] */
+#define WM8994_AIF2DAC_EQ_B5_PG_SHIFT 0 /* AIF2DAC_EQ_B5_PG - [15:0] */
+#define WM8994_AIF2DAC_EQ_B5_PG_WIDTH 16 /* AIF2DAC_EQ_B5_PG - [15:0] */
+
+/*
+ * R1536 (0x600) - DAC1 Mixer Volumes
+ */
+#define WM8994_ADCR_DAC1_VOL_MASK 0x01E0 /* ADCR_DAC1_VOL - [8:5] */
+#define WM8994_ADCR_DAC1_VOL_SHIFT 5 /* ADCR_DAC1_VOL - [8:5] */
+#define WM8994_ADCR_DAC1_VOL_WIDTH 4 /* ADCR_DAC1_VOL - [8:5] */
+#define WM8994_ADCL_DAC1_VOL_MASK 0x000F /* ADCL_DAC1_VOL - [3:0] */
+#define WM8994_ADCL_DAC1_VOL_SHIFT 0 /* ADCL_DAC1_VOL - [3:0] */
+#define WM8994_ADCL_DAC1_VOL_WIDTH 4 /* ADCL_DAC1_VOL - [3:0] */
+
+/*
+ * R1537 (0x601) - DAC1 Left Mixer Routing
+ */
+#define WM8994_ADCR_TO_DAC1L 0x0020 /* ADCR_TO_DAC1L */
+#define WM8994_ADCR_TO_DAC1L_MASK 0x0020 /* ADCR_TO_DAC1L */
+#define WM8994_ADCR_TO_DAC1L_SHIFT 5 /* ADCR_TO_DAC1L */
+#define WM8994_ADCR_TO_DAC1L_WIDTH 1 /* ADCR_TO_DAC1L */
+#define WM8994_ADCL_TO_DAC1L 0x0010 /* ADCL_TO_DAC1L */
+#define WM8994_ADCL_TO_DAC1L_MASK 0x0010 /* ADCL_TO_DAC1L */
+#define WM8994_ADCL_TO_DAC1L_SHIFT 4 /* ADCL_TO_DAC1L */
+#define WM8994_ADCL_TO_DAC1L_WIDTH 1 /* ADCL_TO_DAC1L */
+#define WM8994_AIF2DACL_TO_DAC1L 0x0004 /* AIF2DACL_TO_DAC1L */
+#define WM8994_AIF2DACL_TO_DAC1L_MASK 0x0004 /* AIF2DACL_TO_DAC1L */
+#define WM8994_AIF2DACL_TO_DAC1L_SHIFT 2 /* AIF2DACL_TO_DAC1L */
+#define WM8994_AIF2DACL_TO_DAC1L_WIDTH 1 /* AIF2DACL_TO_DAC1L */
+#define WM8994_AIF1DAC2L_TO_DAC1L 0x0002 /* AIF1DAC2L_TO_DAC1L */
+#define WM8994_AIF1DAC2L_TO_DAC1L_MASK 0x0002 /* AIF1DAC2L_TO_DAC1L */
+#define WM8994_AIF1DAC2L_TO_DAC1L_SHIFT 1 /* AIF1DAC2L_TO_DAC1L */
+#define WM8994_AIF1DAC2L_TO_DAC1L_WIDTH 1 /* AIF1DAC2L_TO_DAC1L */
+#define WM8994_AIF1DAC1L_TO_DAC1L 0x0001 /* AIF1DAC1L_TO_DAC1L */
+#define WM8994_AIF1DAC1L_TO_DAC1L_MASK 0x0001 /* AIF1DAC1L_TO_DAC1L */
+#define WM8994_AIF1DAC1L_TO_DAC1L_SHIFT 0 /* AIF1DAC1L_TO_DAC1L */
+#define WM8994_AIF1DAC1L_TO_DAC1L_WIDTH 1 /* AIF1DAC1L_TO_DAC1L */
+
+/*
+ * R1538 (0x602) - DAC1 Right Mixer Routing
+ */
+#define WM8994_ADCR_TO_DAC1R 0x0020 /* ADCR_TO_DAC1R */
+#define WM8994_ADCR_TO_DAC1R_MASK 0x0020 /* ADCR_TO_DAC1R */
+#define WM8994_ADCR_TO_DAC1R_SHIFT 5 /* ADCR_TO_DAC1R */
+#define WM8994_ADCR_TO_DAC1R_WIDTH 1 /* ADCR_TO_DAC1R */
+#define WM8994_ADCL_TO_DAC1R 0x0010 /* ADCL_TO_DAC1R */
+#define WM8994_ADCL_TO_DAC1R_MASK 0x0010 /* ADCL_TO_DAC1R */
+#define WM8994_ADCL_TO_DAC1R_SHIFT 4 /* ADCL_TO_DAC1R */
+#define WM8994_ADCL_TO_DAC1R_WIDTH 1 /* ADCL_TO_DAC1R */
+#define WM8994_AIF2DACR_TO_DAC1R 0x0004 /* AIF2DACR_TO_DAC1R */
+#define WM8994_AIF2DACR_TO_DAC1R_MASK 0x0004 /* AIF2DACR_TO_DAC1R */
+#define WM8994_AIF2DACR_TO_DAC1R_SHIFT 2 /* AIF2DACR_TO_DAC1R */
+#define WM8994_AIF2DACR_TO_DAC1R_WIDTH 1 /* AIF2DACR_TO_DAC1R */
+#define WM8994_AIF1DAC2R_TO_DAC1R 0x0002 /* AIF1DAC2R_TO_DAC1R */
+#define WM8994_AIF1DAC2R_TO_DAC1R_MASK 0x0002 /* AIF1DAC2R_TO_DAC1R */
+#define WM8994_AIF1DAC2R_TO_DAC1R_SHIFT 1 /* AIF1DAC2R_TO_DAC1R */
+#define WM8994_AIF1DAC2R_TO_DAC1R_WIDTH 1 /* AIF1DAC2R_TO_DAC1R */
+#define WM8994_AIF1DAC1R_TO_DAC1R 0x0001 /* AIF1DAC1R_TO_DAC1R */
+#define WM8994_AIF1DAC1R_TO_DAC1R_MASK 0x0001 /* AIF1DAC1R_TO_DAC1R */
+#define WM8994_AIF1DAC1R_TO_DAC1R_SHIFT 0 /* AIF1DAC1R_TO_DAC1R */
+#define WM8994_AIF1DAC1R_TO_DAC1R_WIDTH 1 /* AIF1DAC1R_TO_DAC1R */
+
+/*
+ * R1539 (0x603) - DAC2 Mixer Volumes
+ */
+#define WM8994_ADCR_DAC2_VOL_MASK 0x01E0 /* ADCR_DAC2_VOL - [8:5] */
+#define WM8994_ADCR_DAC2_VOL_SHIFT 5 /* ADCR_DAC2_VOL - [8:5] */
+#define WM8994_ADCR_DAC2_VOL_WIDTH 4 /* ADCR_DAC2_VOL - [8:5] */
+#define WM8994_ADCL_DAC2_VOL_MASK 0x000F /* ADCL_DAC2_VOL - [3:0] */
+#define WM8994_ADCL_DAC2_VOL_SHIFT 0 /* ADCL_DAC2_VOL - [3:0] */
+#define WM8994_ADCL_DAC2_VOL_WIDTH 4 /* ADCL_DAC2_VOL - [3:0] */
+
+/*
+ * R1540 (0x604) - DAC2 Left Mixer Routing
+ */
+#define WM8994_ADCR_TO_DAC2L 0x0020 /* ADCR_TO_DAC2L */
+#define WM8994_ADCR_TO_DAC2L_MASK 0x0020 /* ADCR_TO_DAC2L */
+#define WM8994_ADCR_TO_DAC2L_SHIFT 5 /* ADCR_TO_DAC2L */
+#define WM8994_ADCR_TO_DAC2L_WIDTH 1 /* ADCR_TO_DAC2L */
+#define WM8994_ADCL_TO_DAC2L 0x0010 /* ADCL_TO_DAC2L */
+#define WM8994_ADCL_TO_DAC2L_MASK 0x0010 /* ADCL_TO_DAC2L */
+#define WM8994_ADCL_TO_DAC2L_SHIFT 4 /* ADCL_TO_DAC2L */
+#define WM8994_ADCL_TO_DAC2L_WIDTH 1 /* ADCL_TO_DAC2L */
+#define WM8994_AIF2DACL_TO_DAC2L 0x0004 /* AIF2DACL_TO_DAC2L */
+#define WM8994_AIF2DACL_TO_DAC2L_MASK 0x0004 /* AIF2DACL_TO_DAC2L */
+#define WM8994_AIF2DACL_TO_DAC2L_SHIFT 2 /* AIF2DACL_TO_DAC2L */
+#define WM8994_AIF2DACL_TO_DAC2L_WIDTH 1 /* AIF2DACL_TO_DAC2L */
+#define WM8994_AIF1DAC2L_TO_DAC2L 0x0002 /* AIF1DAC2L_TO_DAC2L */
+#define WM8994_AIF1DAC2L_TO_DAC2L_MASK 0x0002 /* AIF1DAC2L_TO_DAC2L */
+#define WM8994_AIF1DAC2L_TO_DAC2L_SHIFT 1 /* AIF1DAC2L_TO_DAC2L */
+#define WM8994_AIF1DAC2L_TO_DAC2L_WIDTH 1 /* AIF1DAC2L_TO_DAC2L */
+#define WM8994_AIF1DAC1L_TO_DAC2L 0x0001 /* AIF1DAC1L_TO_DAC2L */
+#define WM8994_AIF1DAC1L_TO_DAC2L_MASK 0x0001 /* AIF1DAC1L_TO_DAC2L */
+#define WM8994_AIF1DAC1L_TO_DAC2L_SHIFT 0 /* AIF1DAC1L_TO_DAC2L */
+#define WM8994_AIF1DAC1L_TO_DAC2L_WIDTH 1 /* AIF1DAC1L_TO_DAC2L */
+
+/*
+ * R1541 (0x605) - DAC2 Right Mixer Routing
+ */
+#define WM8994_ADCR_TO_DAC2R 0x0020 /* ADCR_TO_DAC2R */
+#define WM8994_ADCR_TO_DAC2R_MASK 0x0020 /* ADCR_TO_DAC2R */
+#define WM8994_ADCR_TO_DAC2R_SHIFT 5 /* ADCR_TO_DAC2R */
+#define WM8994_ADCR_TO_DAC2R_WIDTH 1 /* ADCR_TO_DAC2R */
+#define WM8994_ADCL_TO_DAC2R 0x0010 /* ADCL_TO_DAC2R */
+#define WM8994_ADCL_TO_DAC2R_MASK 0x0010 /* ADCL_TO_DAC2R */
+#define WM8994_ADCL_TO_DAC2R_SHIFT 4 /* ADCL_TO_DAC2R */
+#define WM8994_ADCL_TO_DAC2R_WIDTH 1 /* ADCL_TO_DAC2R */
+#define WM8994_AIF2DACR_TO_DAC2R 0x0004 /* AIF2DACR_TO_DAC2R */
+#define WM8994_AIF2DACR_TO_DAC2R_MASK 0x0004 /* AIF2DACR_TO_DAC2R */
+#define WM8994_AIF2DACR_TO_DAC2R_SHIFT 2 /* AIF2DACR_TO_DAC2R */
+#define WM8994_AIF2DACR_TO_DAC2R_WIDTH 1 /* AIF2DACR_TO_DAC2R */
+#define WM8994_AIF1DAC2R_TO_DAC2R 0x0002 /* AIF1DAC2R_TO_DAC2R */
+#define WM8994_AIF1DAC2R_TO_DAC2R_MASK 0x0002 /* AIF1DAC2R_TO_DAC2R */
+#define WM8994_AIF1DAC2R_TO_DAC2R_SHIFT 1 /* AIF1DAC2R_TO_DAC2R */
+#define WM8994_AIF1DAC2R_TO_DAC2R_WIDTH 1 /* AIF1DAC2R_TO_DAC2R */
+#define WM8994_AIF1DAC1R_TO_DAC2R 0x0001 /* AIF1DAC1R_TO_DAC2R */
+#define WM8994_AIF1DAC1R_TO_DAC2R_MASK 0x0001 /* AIF1DAC1R_TO_DAC2R */
+#define WM8994_AIF1DAC1R_TO_DAC2R_SHIFT 0 /* AIF1DAC1R_TO_DAC2R */
+#define WM8994_AIF1DAC1R_TO_DAC2R_WIDTH 1 /* AIF1DAC1R_TO_DAC2R */
+
+/*
+ * R1542 (0x606) - AIF1 ADC1 Left Mixer Routing
+ */
+#define WM8994_ADC1L_TO_AIF1ADC1L 0x0002 /* ADC1L_TO_AIF1ADC1L */
+#define WM8994_ADC1L_TO_AIF1ADC1L_MASK 0x0002 /* ADC1L_TO_AIF1ADC1L */
+#define WM8994_ADC1L_TO_AIF1ADC1L_SHIFT 1 /* ADC1L_TO_AIF1ADC1L */
+#define WM8994_ADC1L_TO_AIF1ADC1L_WIDTH 1 /* ADC1L_TO_AIF1ADC1L */
+#define WM8994_AIF2DACL_TO_AIF1ADC1L 0x0001 /* AIF2DACL_TO_AIF1ADC1L */
+#define WM8994_AIF2DACL_TO_AIF1ADC1L_MASK 0x0001 /* AIF2DACL_TO_AIF1ADC1L */
+#define WM8994_AIF2DACL_TO_AIF1ADC1L_SHIFT 0 /* AIF2DACL_TO_AIF1ADC1L */
+#define WM8994_AIF2DACL_TO_AIF1ADC1L_WIDTH 1 /* AIF2DACL_TO_AIF1ADC1L */
+
+/*
+ * R1543 (0x607) - AIF1 ADC1 Right Mixer Routing
+ */
+#define WM8994_ADC1R_TO_AIF1ADC1R 0x0002 /* ADC1R_TO_AIF1ADC1R */
+#define WM8994_ADC1R_TO_AIF1ADC1R_MASK 0x0002 /* ADC1R_TO_AIF1ADC1R */
+#define WM8994_ADC1R_TO_AIF1ADC1R_SHIFT 1 /* ADC1R_TO_AIF1ADC1R */
+#define WM8994_ADC1R_TO_AIF1ADC1R_WIDTH 1 /* ADC1R_TO_AIF1ADC1R */
+#define WM8994_AIF2DACR_TO_AIF1ADC1R 0x0001 /* AIF2DACR_TO_AIF1ADC1R */
+#define WM8994_AIF2DACR_TO_AIF1ADC1R_MASK 0x0001 /* AIF2DACR_TO_AIF1ADC1R */
+#define WM8994_AIF2DACR_TO_AIF1ADC1R_SHIFT 0 /* AIF2DACR_TO_AIF1ADC1R */
+#define WM8994_AIF2DACR_TO_AIF1ADC1R_WIDTH 1 /* AIF2DACR_TO_AIF1ADC1R */
+
+/*
+ * R1544 (0x608) - AIF1 ADC2 Left Mixer Routing
+ */
+#define WM8994_ADC2L_TO_AIF1ADC2L 0x0002 /* ADC2L_TO_AIF1ADC2L */
+#define WM8994_ADC2L_TO_AIF1ADC2L_MASK 0x0002 /* ADC2L_TO_AIF1ADC2L */
+#define WM8994_ADC2L_TO_AIF1ADC2L_SHIFT 1 /* ADC2L_TO_AIF1ADC2L */
+#define WM8994_ADC2L_TO_AIF1ADC2L_WIDTH 1 /* ADC2L_TO_AIF1ADC2L */
+#define WM8994_AIF2DACL_TO_AIF1ADC2L 0x0001 /* AIF2DACL_TO_AIF1ADC2L */
+#define WM8994_AIF2DACL_TO_AIF1ADC2L_MASK 0x0001 /* AIF2DACL_TO_AIF1ADC2L */
+#define WM8994_AIF2DACL_TO_AIF1ADC2L_SHIFT 0 /* AIF2DACL_TO_AIF1ADC2L */
+#define WM8994_AIF2DACL_TO_AIF1ADC2L_WIDTH 1 /* AIF2DACL_TO_AIF1ADC2L */
+
+/*
+ * R1545 (0x609) - AIF1 ADC2 Right mixer Routing
+ */
+#define WM8994_ADC2R_TO_AIF1ADC2R 0x0002 /* ADC2R_TO_AIF1ADC2R */
+#define WM8994_ADC2R_TO_AIF1ADC2R_MASK 0x0002 /* ADC2R_TO_AIF1ADC2R */
+#define WM8994_ADC2R_TO_AIF1ADC2R_SHIFT 1 /* ADC2R_TO_AIF1ADC2R */
+#define WM8994_ADC2R_TO_AIF1ADC2R_WIDTH 1 /* ADC2R_TO_AIF1ADC2R */
+#define WM8994_AIF2DACR_TO_AIF1ADC2R 0x0001 /* AIF2DACR_TO_AIF1ADC2R */
+#define WM8994_AIF2DACR_TO_AIF1ADC2R_MASK 0x0001 /* AIF2DACR_TO_AIF1ADC2R */
+#define WM8994_AIF2DACR_TO_AIF1ADC2R_SHIFT 0 /* AIF2DACR_TO_AIF1ADC2R */
+#define WM8994_AIF2DACR_TO_AIF1ADC2R_WIDTH 1 /* AIF2DACR_TO_AIF1ADC2R */
+
+/*
+ * R1552 (0x610) - DAC1 Left Volume
+ */
+#define WM8994_DAC1L_MUTE 0x0200 /* DAC1L_MUTE */
+#define WM8994_DAC1L_MUTE_MASK 0x0200 /* DAC1L_MUTE */
+#define WM8994_DAC1L_MUTE_SHIFT 9 /* DAC1L_MUTE */
+#define WM8994_DAC1L_MUTE_WIDTH 1 /* DAC1L_MUTE */
+#define WM8994_DAC1_VU 0x0100 /* DAC1_VU */
+#define WM8994_DAC1_VU_MASK 0x0100 /* DAC1_VU */
+#define WM8994_DAC1_VU_SHIFT 8 /* DAC1_VU */
+#define WM8994_DAC1_VU_WIDTH 1 /* DAC1_VU */
+#define WM8994_DAC1L_VOL_MASK 0x00FF /* DAC1L_VOL - [7:0] */
+#define WM8994_DAC1L_VOL_SHIFT 0 /* DAC1L_VOL - [7:0] */
+#define WM8994_DAC1L_VOL_WIDTH 8 /* DAC1L_VOL - [7:0] */
+
+/*
+ * R1553 (0x611) - DAC1 Right Volume
+ */
+#define WM8994_DAC1R_MUTE 0x0200 /* DAC1R_MUTE */
+#define WM8994_DAC1R_MUTE_MASK 0x0200 /* DAC1R_MUTE */
+#define WM8994_DAC1R_MUTE_SHIFT 9 /* DAC1R_MUTE */
+#define WM8994_DAC1R_MUTE_WIDTH 1 /* DAC1R_MUTE */
+#define WM8994_DAC1_VU 0x0100 /* DAC1_VU */
+#define WM8994_DAC1_VU_MASK 0x0100 /* DAC1_VU */
+#define WM8994_DAC1_VU_SHIFT 8 /* DAC1_VU */
+#define WM8994_DAC1_VU_WIDTH 1 /* DAC1_VU */
+#define WM8994_DAC1R_VOL_MASK 0x00FF /* DAC1R_VOL - [7:0] */
+#define WM8994_DAC1R_VOL_SHIFT 0 /* DAC1R_VOL - [7:0] */
+#define WM8994_DAC1R_VOL_WIDTH 8 /* DAC1R_VOL - [7:0] */
+
+/*
+ * R1554 (0x612) - DAC2 Left Volume
+ */
+#define WM8994_DAC2L_MUTE 0x0200 /* DAC2L_MUTE */
+#define WM8994_DAC2L_MUTE_MASK 0x0200 /* DAC2L_MUTE */
+#define WM8994_DAC2L_MUTE_SHIFT 9 /* DAC2L_MUTE */
+#define WM8994_DAC2L_MUTE_WIDTH 1 /* DAC2L_MUTE */
+#define WM8994_DAC2_VU 0x0100 /* DAC2_VU */
+#define WM8994_DAC2_VU_MASK 0x0100 /* DAC2_VU */
+#define WM8994_DAC2_VU_SHIFT 8 /* DAC2_VU */
+#define WM8994_DAC2_VU_WIDTH 1 /* DAC2_VU */
+#define WM8994_DAC2L_VOL_MASK 0x00FF /* DAC2L_VOL - [7:0] */
+#define WM8994_DAC2L_VOL_SHIFT 0 /* DAC2L_VOL - [7:0] */
+#define WM8994_DAC2L_VOL_WIDTH 8 /* DAC2L_VOL - [7:0] */
+
+/*
+ * R1555 (0x613) - DAC2 Right Volume
+ */
+#define WM8994_DAC2R_MUTE 0x0200 /* DAC2R_MUTE */
+#define WM8994_DAC2R_MUTE_MASK 0x0200 /* DAC2R_MUTE */
+#define WM8994_DAC2R_MUTE_SHIFT 9 /* DAC2R_MUTE */
+#define WM8994_DAC2R_MUTE_WIDTH 1 /* DAC2R_MUTE */
+#define WM8994_DAC2_VU 0x0100 /* DAC2_VU */
+#define WM8994_DAC2_VU_MASK 0x0100 /* DAC2_VU */
+#define WM8994_DAC2_VU_SHIFT 8 /* DAC2_VU */
+#define WM8994_DAC2_VU_WIDTH 1 /* DAC2_VU */
+#define WM8994_DAC2R_VOL_MASK 0x00FF /* DAC2R_VOL - [7:0] */
+#define WM8994_DAC2R_VOL_SHIFT 0 /* DAC2R_VOL - [7:0] */
+#define WM8994_DAC2R_VOL_WIDTH 8 /* DAC2R_VOL - [7:0] */
+
+/*
+ * R1556 (0x614) - DAC Softmute
+ */
+#define WM8994_DAC_SOFTMUTEMODE 0x0002 /* DAC_SOFTMUTEMODE */
+#define WM8994_DAC_SOFTMUTEMODE_MASK 0x0002 /* DAC_SOFTMUTEMODE */
+#define WM8994_DAC_SOFTMUTEMODE_SHIFT 1 /* DAC_SOFTMUTEMODE */
+#define WM8994_DAC_SOFTMUTEMODE_WIDTH 1 /* DAC_SOFTMUTEMODE */
+#define WM8994_DAC_MUTERATE 0x0001 /* DAC_MUTERATE */
+#define WM8994_DAC_MUTERATE_MASK 0x0001 /* DAC_MUTERATE */
+#define WM8994_DAC_MUTERATE_SHIFT 0 /* DAC_MUTERATE */
+#define WM8994_DAC_MUTERATE_WIDTH 1 /* DAC_MUTERATE */
+
+/*
+ * R1568 (0x620) - Oversampling
+ */
+#define WM8994_ADC_OSR128 0x0002 /* ADC_OSR128 */
+#define WM8994_ADC_OSR128_MASK 0x0002 /* ADC_OSR128 */
+#define WM8994_ADC_OSR128_SHIFT 1 /* ADC_OSR128 */
+#define WM8994_ADC_OSR128_WIDTH 1 /* ADC_OSR128 */
+#define WM8994_DAC_OSR128 0x0001 /* DAC_OSR128 */
+#define WM8994_DAC_OSR128_MASK 0x0001 /* DAC_OSR128 */
+#define WM8994_DAC_OSR128_SHIFT 0 /* DAC_OSR128 */
+#define WM8994_DAC_OSR128_WIDTH 1 /* DAC_OSR128 */
+
+/*
+ * R1569 (0x621) - Sidetone
+ */
+#define WM8994_ST_HPF_CUT_MASK 0x0380 /* ST_HPF_CUT - [9:7] */
+#define WM8994_ST_HPF_CUT_SHIFT 7 /* ST_HPF_CUT - [9:7] */
+#define WM8994_ST_HPF_CUT_WIDTH 3 /* ST_HPF_CUT - [9:7] */
+#define WM8994_ST_HPF 0x0040 /* ST_HPF */
+#define WM8994_ST_HPF_MASK 0x0040 /* ST_HPF */
+#define WM8994_ST_HPF_SHIFT 6 /* ST_HPF */
+#define WM8994_ST_HPF_WIDTH 1 /* ST_HPF */
+#define WM8994_STR_SEL 0x0002 /* STR_SEL */
+#define WM8994_STR_SEL_MASK 0x0002 /* STR_SEL */
+#define WM8994_STR_SEL_SHIFT 1 /* STR_SEL */
+#define WM8994_STR_SEL_WIDTH 1 /* STR_SEL */
+#define WM8994_STL_SEL 0x0001 /* STL_SEL */
+#define WM8994_STL_SEL_MASK 0x0001 /* STL_SEL */
+#define WM8994_STL_SEL_SHIFT 0 /* STL_SEL */
+#define WM8994_STL_SEL_WIDTH 1 /* STL_SEL */
+
+/*
+ * R1797 (0x705) - JACKDET Ctrl
+ */
+#define WM1811_JACKDET_DB 0x0100 /* JACKDET_DB */
+#define WM1811_JACKDET_DB_MASK 0x0100 /* JACKDET_DB */
+#define WM1811_JACKDET_DB_SHIFT 8 /* JACKDET_DB */
+#define WM1811_JACKDET_DB_WIDTH 1 /* JACKDET_DB */
+#define WM1811_JACKDET_LVL 0x0040 /* JACKDET_LVL */
+#define WM1811_JACKDET_LVL_MASK 0x0040 /* JACKDET_LVL */
+#define WM1811_JACKDET_LVL_SHIFT 6 /* JACKDET_LVL */
+#define WM1811_JACKDET_LVL_WIDTH 1 /* JACKDET_LVL */
+
+/*
+ * R1824 (0x720) - Pull Control (1)
+ */
+#define WM8994_DMICDAT2_PU 0x0800 /* DMICDAT2_PU */
+#define WM8994_DMICDAT2_PU_MASK 0x0800 /* DMICDAT2_PU */
+#define WM8994_DMICDAT2_PU_SHIFT 11 /* DMICDAT2_PU */
+#define WM8994_DMICDAT2_PU_WIDTH 1 /* DMICDAT2_PU */
+#define WM8994_DMICDAT2_PD 0x0400 /* DMICDAT2_PD */
+#define WM8994_DMICDAT2_PD_MASK 0x0400 /* DMICDAT2_PD */
+#define WM8994_DMICDAT2_PD_SHIFT 10 /* DMICDAT2_PD */
+#define WM8994_DMICDAT2_PD_WIDTH 1 /* DMICDAT2_PD */
+#define WM8994_DMICDAT1_PU 0x0200 /* DMICDAT1_PU */
+#define WM8994_DMICDAT1_PU_MASK 0x0200 /* DMICDAT1_PU */
+#define WM8994_DMICDAT1_PU_SHIFT 9 /* DMICDAT1_PU */
+#define WM8994_DMICDAT1_PU_WIDTH 1 /* DMICDAT1_PU */
+#define WM8994_DMICDAT1_PD 0x0100 /* DMICDAT1_PD */
+#define WM8994_DMICDAT1_PD_MASK 0x0100 /* DMICDAT1_PD */
+#define WM8994_DMICDAT1_PD_SHIFT 8 /* DMICDAT1_PD */
+#define WM8994_DMICDAT1_PD_WIDTH 1 /* DMICDAT1_PD */
+#define WM8994_MCLK1_PU 0x0080 /* MCLK1_PU */
+#define WM8994_MCLK1_PU_MASK 0x0080 /* MCLK1_PU */
+#define WM8994_MCLK1_PU_SHIFT 7 /* MCLK1_PU */
+#define WM8994_MCLK1_PU_WIDTH 1 /* MCLK1_PU */
+#define WM8994_MCLK1_PD 0x0040 /* MCLK1_PD */
+#define WM8994_MCLK1_PD_MASK 0x0040 /* MCLK1_PD */
+#define WM8994_MCLK1_PD_SHIFT 6 /* MCLK1_PD */
+#define WM8994_MCLK1_PD_WIDTH 1 /* MCLK1_PD */
+#define WM8994_DACDAT1_PU 0x0020 /* DACDAT1_PU */
+#define WM8994_DACDAT1_PU_MASK 0x0020 /* DACDAT1_PU */
+#define WM8994_DACDAT1_PU_SHIFT 5 /* DACDAT1_PU */
+#define WM8994_DACDAT1_PU_WIDTH 1 /* DACDAT1_PU */
+#define WM8994_DACDAT1_PD 0x0010 /* DACDAT1_PD */
+#define WM8994_DACDAT1_PD_MASK 0x0010 /* DACDAT1_PD */
+#define WM8994_DACDAT1_PD_SHIFT 4 /* DACDAT1_PD */
+#define WM8994_DACDAT1_PD_WIDTH 1 /* DACDAT1_PD */
+#define WM8994_DACLRCLK1_PU 0x0008 /* DACLRCLK1_PU */
+#define WM8994_DACLRCLK1_PU_MASK 0x0008 /* DACLRCLK1_PU */
+#define WM8994_DACLRCLK1_PU_SHIFT 3 /* DACLRCLK1_PU */
+#define WM8994_DACLRCLK1_PU_WIDTH 1 /* DACLRCLK1_PU */
+#define WM8994_DACLRCLK1_PD 0x0004 /* DACLRCLK1_PD */
+#define WM8994_DACLRCLK1_PD_MASK 0x0004 /* DACLRCLK1_PD */
+#define WM8994_DACLRCLK1_PD_SHIFT 2 /* DACLRCLK1_PD */
+#define WM8994_DACLRCLK1_PD_WIDTH 1 /* DACLRCLK1_PD */
+#define WM8994_BCLK1_PU 0x0002 /* BCLK1_PU */
+#define WM8994_BCLK1_PU_MASK 0x0002 /* BCLK1_PU */
+#define WM8994_BCLK1_PU_SHIFT 1 /* BCLK1_PU */
+#define WM8994_BCLK1_PU_WIDTH 1 /* BCLK1_PU */
+#define WM8994_BCLK1_PD 0x0001 /* BCLK1_PD */
+#define WM8994_BCLK1_PD_MASK 0x0001 /* BCLK1_PD */
+#define WM8994_BCLK1_PD_SHIFT 0 /* BCLK1_PD */
+#define WM8994_BCLK1_PD_WIDTH 1 /* BCLK1_PD */
+
+/*
+ * R1825 (0x721) - Pull Control (2)
+ */
+#define WM8994_CSNADDR_PD 0x0100 /* CSNADDR_PD */
+#define WM8994_CSNADDR_PD_MASK 0x0100 /* CSNADDR_PD */
+#define WM8994_CSNADDR_PD_SHIFT 8 /* CSNADDR_PD */
+#define WM8994_CSNADDR_PD_WIDTH 1 /* CSNADDR_PD */
+#define WM8994_LDO2ENA_PD 0x0040 /* LDO2ENA_PD */
+#define WM8994_LDO2ENA_PD_MASK 0x0040 /* LDO2ENA_PD */
+#define WM8994_LDO2ENA_PD_SHIFT 6 /* LDO2ENA_PD */
+#define WM8994_LDO2ENA_PD_WIDTH 1 /* LDO2ENA_PD */
+#define WM8994_LDO1ENA_PD 0x0010 /* LDO1ENA_PD */
+#define WM8994_LDO1ENA_PD_MASK 0x0010 /* LDO1ENA_PD */
+#define WM8994_LDO1ENA_PD_SHIFT 4 /* LDO1ENA_PD */
+#define WM8994_LDO1ENA_PD_WIDTH 1 /* LDO1ENA_PD */
+#define WM8994_CIFMODE_PD 0x0004 /* CIFMODE_PD */
+#define WM8994_CIFMODE_PD_MASK 0x0004 /* CIFMODE_PD */
+#define WM8994_CIFMODE_PD_SHIFT 2 /* CIFMODE_PD */
+#define WM8994_CIFMODE_PD_WIDTH 1 /* CIFMODE_PD */
+#define WM8994_SPKMODE_PU 0x0002 /* SPKMODE_PU */
+#define WM8994_SPKMODE_PU_MASK 0x0002 /* SPKMODE_PU */
+#define WM8994_SPKMODE_PU_SHIFT 1 /* SPKMODE_PU */
+#define WM8994_SPKMODE_PU_WIDTH 1 /* SPKMODE_PU */
+
+/*
+ * R1840 (0x730) - Interrupt Status 1
+ */
+#define WM8994_GP11_EINT 0x0400 /* GP11_EINT */
+#define WM8994_GP11_EINT_MASK 0x0400 /* GP11_EINT */
+#define WM8994_GP11_EINT_SHIFT 10 /* GP11_EINT */
+#define WM8994_GP11_EINT_WIDTH 1 /* GP11_EINT */
+#define WM8994_GP10_EINT 0x0200 /* GP10_EINT */
+#define WM8994_GP10_EINT_MASK 0x0200 /* GP10_EINT */
+#define WM8994_GP10_EINT_SHIFT 9 /* GP10_EINT */
+#define WM8994_GP10_EINT_WIDTH 1 /* GP10_EINT */
+#define WM8994_GP9_EINT 0x0100 /* GP9_EINT */
+#define WM8994_GP9_EINT_MASK 0x0100 /* GP9_EINT */
+#define WM8994_GP9_EINT_SHIFT 8 /* GP9_EINT */
+#define WM8994_GP9_EINT_WIDTH 1 /* GP9_EINT */
+#define WM8994_GP8_EINT 0x0080 /* GP8_EINT */
+#define WM8994_GP8_EINT_MASK 0x0080 /* GP8_EINT */
+#define WM8994_GP8_EINT_SHIFT 7 /* GP8_EINT */
+#define WM8994_GP8_EINT_WIDTH 1 /* GP8_EINT */
+#define WM8994_GP7_EINT 0x0040 /* GP7_EINT */
+#define WM8994_GP7_EINT_MASK 0x0040 /* GP7_EINT */
+#define WM8994_GP7_EINT_SHIFT 6 /* GP7_EINT */
+#define WM8994_GP7_EINT_WIDTH 1 /* GP7_EINT */
+#define WM8994_GP6_EINT 0x0020 /* GP6_EINT */
+#define WM8994_GP6_EINT_MASK 0x0020 /* GP6_EINT */
+#define WM8994_GP6_EINT_SHIFT 5 /* GP6_EINT */
+#define WM8994_GP6_EINT_WIDTH 1 /* GP6_EINT */
+#define WM8994_GP5_EINT 0x0010 /* GP5_EINT */
+#define WM8994_GP5_EINT_MASK 0x0010 /* GP5_EINT */
+#define WM8994_GP5_EINT_SHIFT 4 /* GP5_EINT */
+#define WM8994_GP5_EINT_WIDTH 1 /* GP5_EINT */
+#define WM8994_GP4_EINT 0x0008 /* GP4_EINT */
+#define WM8994_GP4_EINT_MASK 0x0008 /* GP4_EINT */
+#define WM8994_GP4_EINT_SHIFT 3 /* GP4_EINT */
+#define WM8994_GP4_EINT_WIDTH 1 /* GP4_EINT */
+#define WM8994_GP3_EINT 0x0004 /* GP3_EINT */
+#define WM8994_GP3_EINT_MASK 0x0004 /* GP3_EINT */
+#define WM8994_GP3_EINT_SHIFT 2 /* GP3_EINT */
+#define WM8994_GP3_EINT_WIDTH 1 /* GP3_EINT */
+#define WM8994_GP2_EINT 0x0002 /* GP2_EINT */
+#define WM8994_GP2_EINT_MASK 0x0002 /* GP2_EINT */
+#define WM8994_GP2_EINT_SHIFT 1 /* GP2_EINT */
+#define WM8994_GP2_EINT_WIDTH 1 /* GP2_EINT */
+#define WM8994_GP1_EINT 0x0001 /* GP1_EINT */
+#define WM8994_GP1_EINT_MASK 0x0001 /* GP1_EINT */
+#define WM8994_GP1_EINT_SHIFT 0 /* GP1_EINT */
+#define WM8994_GP1_EINT_WIDTH 1 /* GP1_EINT */
+
+/*
+ * R1841 (0x731) - Interrupt Status 2
+ */
+#define WM8994_TEMP_WARN_EINT 0x8000 /* TEMP_WARN_EINT */
+#define WM8994_TEMP_WARN_EINT_MASK 0x8000 /* TEMP_WARN_EINT */
+#define WM8994_TEMP_WARN_EINT_SHIFT 15 /* TEMP_WARN_EINT */
+#define WM8994_TEMP_WARN_EINT_WIDTH 1 /* TEMP_WARN_EINT */
+#define WM8994_DCS_DONE_EINT 0x4000 /* DCS_DONE_EINT */
+#define WM8994_DCS_DONE_EINT_MASK 0x4000 /* DCS_DONE_EINT */
+#define WM8994_DCS_DONE_EINT_SHIFT 14 /* DCS_DONE_EINT */
+#define WM8994_DCS_DONE_EINT_WIDTH 1 /* DCS_DONE_EINT */
+#define WM8994_WSEQ_DONE_EINT 0x2000 /* WSEQ_DONE_EINT */
+#define WM8994_WSEQ_DONE_EINT_MASK 0x2000 /* WSEQ_DONE_EINT */
+#define WM8994_WSEQ_DONE_EINT_SHIFT 13 /* WSEQ_DONE_EINT */
+#define WM8994_WSEQ_DONE_EINT_WIDTH 1 /* WSEQ_DONE_EINT */
+#define WM8994_FIFOS_ERR_EINT 0x1000 /* FIFOS_ERR_EINT */
+#define WM8994_FIFOS_ERR_EINT_MASK 0x1000 /* FIFOS_ERR_EINT */
+#define WM8994_FIFOS_ERR_EINT_SHIFT 12 /* FIFOS_ERR_EINT */
+#define WM8994_FIFOS_ERR_EINT_WIDTH 1 /* FIFOS_ERR_EINT */
+#define WM8994_AIF2DRC_SIG_DET_EINT 0x0800 /* AIF2DRC_SIG_DET_EINT */
+#define WM8994_AIF2DRC_SIG_DET_EINT_MASK 0x0800 /* AIF2DRC_SIG_DET_EINT */
+#define WM8994_AIF2DRC_SIG_DET_EINT_SHIFT 11 /* AIF2DRC_SIG_DET_EINT */
+#define WM8994_AIF2DRC_SIG_DET_EINT_WIDTH 1 /* AIF2DRC_SIG_DET_EINT */
+#define WM8994_AIF1DRC2_SIG_DET_EINT 0x0400 /* AIF1DRC2_SIG_DET_EINT */
+#define WM8994_AIF1DRC2_SIG_DET_EINT_MASK 0x0400 /* AIF1DRC2_SIG_DET_EINT */
+#define WM8994_AIF1DRC2_SIG_DET_EINT_SHIFT 10 /* AIF1DRC2_SIG_DET_EINT */
+#define WM8994_AIF1DRC2_SIG_DET_EINT_WIDTH 1 /* AIF1DRC2_SIG_DET_EINT */
+#define WM8994_AIF1DRC1_SIG_DET_EINT 0x0200 /* AIF1DRC1_SIG_DET_EINT */
+#define WM8994_AIF1DRC1_SIG_DET_EINT_MASK 0x0200 /* AIF1DRC1_SIG_DET_EINT */
+#define WM8994_AIF1DRC1_SIG_DET_EINT_SHIFT 9 /* AIF1DRC1_SIG_DET_EINT */
+#define WM8994_AIF1DRC1_SIG_DET_EINT_WIDTH 1 /* AIF1DRC1_SIG_DET_EINT */
+#define WM8994_SRC2_LOCK_EINT 0x0100 /* SRC2_LOCK_EINT */
+#define WM8994_SRC2_LOCK_EINT_MASK 0x0100 /* SRC2_LOCK_EINT */
+#define WM8994_SRC2_LOCK_EINT_SHIFT 8 /* SRC2_LOCK_EINT */
+#define WM8994_SRC2_LOCK_EINT_WIDTH 1 /* SRC2_LOCK_EINT */
+#define WM8994_SRC1_LOCK_EINT 0x0080 /* SRC1_LOCK_EINT */
+#define WM8994_SRC1_LOCK_EINT_MASK 0x0080 /* SRC1_LOCK_EINT */
+#define WM8994_SRC1_LOCK_EINT_SHIFT 7 /* SRC1_LOCK_EINT */
+#define WM8994_SRC1_LOCK_EINT_WIDTH 1 /* SRC1_LOCK_EINT */
+#define WM8994_FLL2_LOCK_EINT 0x0040 /* FLL2_LOCK_EINT */
+#define WM8994_FLL2_LOCK_EINT_MASK 0x0040 /* FLL2_LOCK_EINT */
+#define WM8994_FLL2_LOCK_EINT_SHIFT 6 /* FLL2_LOCK_EINT */
+#define WM8994_FLL2_LOCK_EINT_WIDTH 1 /* FLL2_LOCK_EINT */
+#define WM8994_FLL1_LOCK_EINT 0x0020 /* FLL1_LOCK_EINT */
+#define WM8994_FLL1_LOCK_EINT_MASK 0x0020 /* FLL1_LOCK_EINT */
+#define WM8994_FLL1_LOCK_EINT_SHIFT 5 /* FLL1_LOCK_EINT */
+#define WM8994_FLL1_LOCK_EINT_WIDTH 1 /* FLL1_LOCK_EINT */
+#define WM8994_MIC2_SHRT_EINT 0x0010 /* MIC2_SHRT_EINT */
+#define WM8994_MIC2_SHRT_EINT_MASK 0x0010 /* MIC2_SHRT_EINT */
+#define WM8994_MIC2_SHRT_EINT_SHIFT 4 /* MIC2_SHRT_EINT */
+#define WM8994_MIC2_SHRT_EINT_WIDTH 1 /* MIC2_SHRT_EINT */
+#define WM8994_MIC2_DET_EINT 0x0008 /* MIC2_DET_EINT */
+#define WM8994_MIC2_DET_EINT_MASK 0x0008 /* MIC2_DET_EINT */
+#define WM8994_MIC2_DET_EINT_SHIFT 3 /* MIC2_DET_EINT */
+#define WM8994_MIC2_DET_EINT_WIDTH 1 /* MIC2_DET_EINT */
+#define WM8994_MIC1_SHRT_EINT 0x0004 /* MIC1_SHRT_EINT */
+#define WM8994_MIC1_SHRT_EINT_MASK 0x0004 /* MIC1_SHRT_EINT */
+#define WM8994_MIC1_SHRT_EINT_SHIFT 2 /* MIC1_SHRT_EINT */
+#define WM8994_MIC1_SHRT_EINT_WIDTH 1 /* MIC1_SHRT_EINT */
+#define WM8994_MIC1_DET_EINT 0x0002 /* MIC1_DET_EINT */
+#define WM8994_MIC1_DET_EINT_MASK 0x0002 /* MIC1_DET_EINT */
+#define WM8994_MIC1_DET_EINT_SHIFT 1 /* MIC1_DET_EINT */
+#define WM8994_MIC1_DET_EINT_WIDTH 1 /* MIC1_DET_EINT */
+#define WM8994_TEMP_SHUT_EINT 0x0001 /* TEMP_SHUT_EINT */
+#define WM8994_TEMP_SHUT_EINT_MASK 0x0001 /* TEMP_SHUT_EINT */
+#define WM8994_TEMP_SHUT_EINT_SHIFT 0 /* TEMP_SHUT_EINT */
+#define WM8994_TEMP_SHUT_EINT_WIDTH 1 /* TEMP_SHUT_EINT */
+
+/*
+ * R1842 (0x732) - Interrupt Raw Status 2
+ */
+#define WM8994_TEMP_WARN_STS 0x8000 /* TEMP_WARN_STS */
+#define WM8994_TEMP_WARN_STS_MASK 0x8000 /* TEMP_WARN_STS */
+#define WM8994_TEMP_WARN_STS_SHIFT 15 /* TEMP_WARN_STS */
+#define WM8994_TEMP_WARN_STS_WIDTH 1 /* TEMP_WARN_STS */
+#define WM8994_DCS_DONE_STS 0x4000 /* DCS_DONE_STS */
+#define WM8994_DCS_DONE_STS_MASK 0x4000 /* DCS_DONE_STS */
+#define WM8994_DCS_DONE_STS_SHIFT 14 /* DCS_DONE_STS */
+#define WM8994_DCS_DONE_STS_WIDTH 1 /* DCS_DONE_STS */
+#define WM8994_WSEQ_DONE_STS 0x2000 /* WSEQ_DONE_STS */
+#define WM8994_WSEQ_DONE_STS_MASK 0x2000 /* WSEQ_DONE_STS */
+#define WM8994_WSEQ_DONE_STS_SHIFT 13 /* WSEQ_DONE_STS */
+#define WM8994_WSEQ_DONE_STS_WIDTH 1 /* WSEQ_DONE_STS */
+#define WM8994_FIFOS_ERR_STS 0x1000 /* FIFOS_ERR_STS */
+#define WM8994_FIFOS_ERR_STS_MASK 0x1000 /* FIFOS_ERR_STS */
+#define WM8994_FIFOS_ERR_STS_SHIFT 12 /* FIFOS_ERR_STS */
+#define WM8994_FIFOS_ERR_STS_WIDTH 1 /* FIFOS_ERR_STS */
+#define WM8994_AIF2DRC_SIG_DET_STS 0x0800 /* AIF2DRC_SIG_DET_STS */
+#define WM8994_AIF2DRC_SIG_DET_STS_MASK 0x0800 /* AIF2DRC_SIG_DET_STS */
+#define WM8994_AIF2DRC_SIG_DET_STS_SHIFT 11 /* AIF2DRC_SIG_DET_STS */
+#define WM8994_AIF2DRC_SIG_DET_STS_WIDTH 1 /* AIF2DRC_SIG_DET_STS */
+#define WM8994_AIF1DRC2_SIG_DET_STS 0x0400 /* AIF1DRC2_SIG_DET_STS */
+#define WM8994_AIF1DRC2_SIG_DET_STS_MASK 0x0400 /* AIF1DRC2_SIG_DET_STS */
+#define WM8994_AIF1DRC2_SIG_DET_STS_SHIFT 10 /* AIF1DRC2_SIG_DET_STS */
+#define WM8994_AIF1DRC2_SIG_DET_STS_WIDTH 1 /* AIF1DRC2_SIG_DET_STS */
+#define WM8994_AIF1DRC1_SIG_DET_STS 0x0200 /* AIF1DRC1_SIG_DET_STS */
+#define WM8994_AIF1DRC1_SIG_DET_STS_MASK 0x0200 /* AIF1DRC1_SIG_DET_STS */
+#define WM8994_AIF1DRC1_SIG_DET_STS_SHIFT 9 /* AIF1DRC1_SIG_DET_STS */
+#define WM8994_AIF1DRC1_SIG_DET_STS_WIDTH 1 /* AIF1DRC1_SIG_DET_STS */
+#define WM8994_SRC2_LOCK_STS 0x0100 /* SRC2_LOCK_STS */
+#define WM8994_SRC2_LOCK_STS_MASK 0x0100 /* SRC2_LOCK_STS */
+#define WM8994_SRC2_LOCK_STS_SHIFT 8 /* SRC2_LOCK_STS */
+#define WM8994_SRC2_LOCK_STS_WIDTH 1 /* SRC2_LOCK_STS */
+#define WM8994_SRC1_LOCK_STS 0x0080 /* SRC1_LOCK_STS */
+#define WM8994_SRC1_LOCK_STS_MASK 0x0080 /* SRC1_LOCK_STS */
+#define WM8994_SRC1_LOCK_STS_SHIFT 7 /* SRC1_LOCK_STS */
+#define WM8994_SRC1_LOCK_STS_WIDTH 1 /* SRC1_LOCK_STS */
+#define WM8994_FLL2_LOCK_STS 0x0040 /* FLL2_LOCK_STS */
+#define WM8994_FLL2_LOCK_STS_MASK 0x0040 /* FLL2_LOCK_STS */
+#define WM8994_FLL2_LOCK_STS_SHIFT 6 /* FLL2_LOCK_STS */
+#define WM8994_FLL2_LOCK_STS_WIDTH 1 /* FLL2_LOCK_STS */
+#define WM8994_FLL1_LOCK_STS 0x0020 /* FLL1_LOCK_STS */
+#define WM8994_FLL1_LOCK_STS_MASK 0x0020 /* FLL1_LOCK_STS */
+#define WM8994_FLL1_LOCK_STS_SHIFT 5 /* FLL1_LOCK_STS */
+#define WM8994_FLL1_LOCK_STS_WIDTH 1 /* FLL1_LOCK_STS */
+#define WM8994_MIC2_SHRT_STS 0x0010 /* MIC2_SHRT_STS */
+#define WM8994_MIC2_SHRT_STS_MASK 0x0010 /* MIC2_SHRT_STS */
+#define WM8994_MIC2_SHRT_STS_SHIFT 4 /* MIC2_SHRT_STS */
+#define WM8994_MIC2_SHRT_STS_WIDTH 1 /* MIC2_SHRT_STS */
+#define WM8994_MIC2_DET_STS 0x0008 /* MIC2_DET_STS */
+#define WM8994_MIC2_DET_STS_MASK 0x0008 /* MIC2_DET_STS */
+#define WM8994_MIC2_DET_STS_SHIFT 3 /* MIC2_DET_STS */
+#define WM8994_MIC2_DET_STS_WIDTH 1 /* MIC2_DET_STS */
+#define WM8994_MIC1_SHRT_STS 0x0004 /* MIC1_SHRT_STS */
+#define WM8994_MIC1_SHRT_STS_MASK 0x0004 /* MIC1_SHRT_STS */
+#define WM8994_MIC1_SHRT_STS_SHIFT 2 /* MIC1_SHRT_STS */
+#define WM8994_MIC1_SHRT_STS_WIDTH 1 /* MIC1_SHRT_STS */
+#define WM8994_MIC1_DET_STS 0x0002 /* MIC1_DET_STS */
+#define WM8994_MIC1_DET_STS_MASK 0x0002 /* MIC1_DET_STS */
+#define WM8994_MIC1_DET_STS_SHIFT 1 /* MIC1_DET_STS */
+#define WM8994_MIC1_DET_STS_WIDTH 1 /* MIC1_DET_STS */
+#define WM8994_TEMP_SHUT_STS 0x0001 /* TEMP_SHUT_STS */
+#define WM8994_TEMP_SHUT_STS_MASK 0x0001 /* TEMP_SHUT_STS */
+#define WM8994_TEMP_SHUT_STS_SHIFT 0 /* TEMP_SHUT_STS */
+#define WM8994_TEMP_SHUT_STS_WIDTH 1 /* TEMP_SHUT_STS */
+
+/*
+ * R1848 (0x738) - Interrupt Status 1 Mask
+ */
+#define WM8994_IM_GP11_EINT 0x0400 /* IM_GP11_EINT */
+#define WM8994_IM_GP11_EINT_MASK 0x0400 /* IM_GP11_EINT */
+#define WM8994_IM_GP11_EINT_SHIFT 10 /* IM_GP11_EINT */
+#define WM8994_IM_GP11_EINT_WIDTH 1 /* IM_GP11_EINT */
+#define WM8994_IM_GP10_EINT 0x0200 /* IM_GP10_EINT */
+#define WM8994_IM_GP10_EINT_MASK 0x0200 /* IM_GP10_EINT */
+#define WM8994_IM_GP10_EINT_SHIFT 9 /* IM_GP10_EINT */
+#define WM8994_IM_GP10_EINT_WIDTH 1 /* IM_GP10_EINT */
+#define WM8994_IM_GP9_EINT 0x0100 /* IM_GP9_EINT */
+#define WM8994_IM_GP9_EINT_MASK 0x0100 /* IM_GP9_EINT */
+#define WM8994_IM_GP9_EINT_SHIFT 8 /* IM_GP9_EINT */
+#define WM8994_IM_GP9_EINT_WIDTH 1 /* IM_GP9_EINT */
+#define WM8994_IM_GP8_EINT 0x0080 /* IM_GP8_EINT */
+#define WM8994_IM_GP8_EINT_MASK 0x0080 /* IM_GP8_EINT */
+#define WM8994_IM_GP8_EINT_SHIFT 7 /* IM_GP8_EINT */
+#define WM8994_IM_GP8_EINT_WIDTH 1 /* IM_GP8_EINT */
+#define WM8994_IM_GP7_EINT 0x0040 /* IM_GP7_EINT */
+#define WM8994_IM_GP7_EINT_MASK 0x0040 /* IM_GP7_EINT */
+#define WM8994_IM_GP7_EINT_SHIFT 6 /* IM_GP7_EINT */
+#define WM8994_IM_GP7_EINT_WIDTH 1 /* IM_GP7_EINT */
+#define WM8994_IM_GP6_EINT 0x0020 /* IM_GP6_EINT */
+#define WM8994_IM_GP6_EINT_MASK 0x0020 /* IM_GP6_EINT */
+#define WM8994_IM_GP6_EINT_SHIFT 5 /* IM_GP6_EINT */
+#define WM8994_IM_GP6_EINT_WIDTH 1 /* IM_GP6_EINT */
+#define WM8994_IM_GP5_EINT 0x0010 /* IM_GP5_EINT */
+#define WM8994_IM_GP5_EINT_MASK 0x0010 /* IM_GP5_EINT */
+#define WM8994_IM_GP5_EINT_SHIFT 4 /* IM_GP5_EINT */
+#define WM8994_IM_GP5_EINT_WIDTH 1 /* IM_GP5_EINT */
+#define WM8994_IM_GP4_EINT 0x0008 /* IM_GP4_EINT */
+#define WM8994_IM_GP4_EINT_MASK 0x0008 /* IM_GP4_EINT */
+#define WM8994_IM_GP4_EINT_SHIFT 3 /* IM_GP4_EINT */
+#define WM8994_IM_GP4_EINT_WIDTH 1 /* IM_GP4_EINT */
+#define WM8994_IM_GP3_EINT 0x0004 /* IM_GP3_EINT */
+#define WM8994_IM_GP3_EINT_MASK 0x0004 /* IM_GP3_EINT */
+#define WM8994_IM_GP3_EINT_SHIFT 2 /* IM_GP3_EINT */
+#define WM8994_IM_GP3_EINT_WIDTH 1 /* IM_GP3_EINT */
+#define WM8994_IM_GP2_EINT 0x0002 /* IM_GP2_EINT */
+#define WM8994_IM_GP2_EINT_MASK 0x0002 /* IM_GP2_EINT */
+#define WM8994_IM_GP2_EINT_SHIFT 1 /* IM_GP2_EINT */
+#define WM8994_IM_GP2_EINT_WIDTH 1 /* IM_GP2_EINT */
+#define WM8994_IM_GP1_EINT 0x0001 /* IM_GP1_EINT */
+#define WM8994_IM_GP1_EINT_MASK 0x0001 /* IM_GP1_EINT */
+#define WM8994_IM_GP1_EINT_SHIFT 0 /* IM_GP1_EINT */
+#define WM8994_IM_GP1_EINT_WIDTH 1 /* IM_GP1_EINT */
+
+/*
+ * R1849 (0x739) - Interrupt Status 2 Mask
+ */
+#define WM8994_IM_TEMP_WARN_EINT 0x8000 /* IM_TEMP_WARN_EINT */
+#define WM8994_IM_TEMP_WARN_EINT_MASK 0x8000 /* IM_TEMP_WARN_EINT */
+#define WM8994_IM_TEMP_WARN_EINT_SHIFT 15 /* IM_TEMP_WARN_EINT */
+#define WM8994_IM_TEMP_WARN_EINT_WIDTH 1 /* IM_TEMP_WARN_EINT */
+#define WM8994_IM_DCS_DONE_EINT 0x4000 /* IM_DCS_DONE_EINT */
+#define WM8994_IM_DCS_DONE_EINT_MASK 0x4000 /* IM_DCS_DONE_EINT */
+#define WM8994_IM_DCS_DONE_EINT_SHIFT 14 /* IM_DCS_DONE_EINT */
+#define WM8994_IM_DCS_DONE_EINT_WIDTH 1 /* IM_DCS_DONE_EINT */
+#define WM8994_IM_WSEQ_DONE_EINT 0x2000 /* IM_WSEQ_DONE_EINT */
+#define WM8994_IM_WSEQ_DONE_EINT_MASK 0x2000 /* IM_WSEQ_DONE_EINT */
+#define WM8994_IM_WSEQ_DONE_EINT_SHIFT 13 /* IM_WSEQ_DONE_EINT */
+#define WM8994_IM_WSEQ_DONE_EINT_WIDTH 1 /* IM_WSEQ_DONE_EINT */
+#define WM8994_IM_FIFOS_ERR_EINT 0x1000 /* IM_FIFOS_ERR_EINT */
+#define WM8994_IM_FIFOS_ERR_EINT_MASK 0x1000 /* IM_FIFOS_ERR_EINT */
+#define WM8994_IM_FIFOS_ERR_EINT_SHIFT 12 /* IM_FIFOS_ERR_EINT */
+#define WM8994_IM_FIFOS_ERR_EINT_WIDTH 1 /* IM_FIFOS_ERR_EINT */
+#define WM8994_IM_AIF2DRC_SIG_DET_EINT 0x0800 /* IM_AIF2DRC_SIG_DET_EINT */
+#define WM8994_IM_AIF2DRC_SIG_DET_EINT_MASK 0x0800 /* IM_AIF2DRC_SIG_DET_EINT */
+#define WM8994_IM_AIF2DRC_SIG_DET_EINT_SHIFT 11 /* IM_AIF2DRC_SIG_DET_EINT */
+#define WM8994_IM_AIF2DRC_SIG_DET_EINT_WIDTH 1 /* IM_AIF2DRC_SIG_DET_EINT */
+#define WM8994_IM_AIF1DRC2_SIG_DET_EINT 0x0400 /* IM_AIF1DRC2_SIG_DET_EINT */
+#define WM8994_IM_AIF1DRC2_SIG_DET_EINT_MASK 0x0400 /* IM_AIF1DRC2_SIG_DET_EINT */
+#define WM8994_IM_AIF1DRC2_SIG_DET_EINT_SHIFT 10 /* IM_AIF1DRC2_SIG_DET_EINT */
+#define WM8994_IM_AIF1DRC2_SIG_DET_EINT_WIDTH 1 /* IM_AIF1DRC2_SIG_DET_EINT */
+#define WM8994_IM_AIF1DRC1_SIG_DET_EINT 0x0200 /* IM_AIF1DRC1_SIG_DET_EINT */
+#define WM8994_IM_AIF1DRC1_SIG_DET_EINT_MASK 0x0200 /* IM_AIF1DRC1_SIG_DET_EINT */
+#define WM8994_IM_AIF1DRC1_SIG_DET_EINT_SHIFT 9 /* IM_AIF1DRC1_SIG_DET_EINT */
+#define WM8994_IM_AIF1DRC1_SIG_DET_EINT_WIDTH 1 /* IM_AIF1DRC1_SIG_DET_EINT */
+#define WM8994_IM_SRC2_LOCK_EINT 0x0100 /* IM_SRC2_LOCK_EINT */
+#define WM8994_IM_SRC2_LOCK_EINT_MASK 0x0100 /* IM_SRC2_LOCK_EINT */
+#define WM8994_IM_SRC2_LOCK_EINT_SHIFT 8 /* IM_SRC2_LOCK_EINT */
+#define WM8994_IM_SRC2_LOCK_EINT_WIDTH 1 /* IM_SRC2_LOCK_EINT */
+#define WM8994_IM_SRC1_LOCK_EINT 0x0080 /* IM_SRC1_LOCK_EINT */
+#define WM8994_IM_SRC1_LOCK_EINT_MASK 0x0080 /* IM_SRC1_LOCK_EINT */
+#define WM8994_IM_SRC1_LOCK_EINT_SHIFT 7 /* IM_SRC1_LOCK_EINT */
+#define WM8994_IM_SRC1_LOCK_EINT_WIDTH 1 /* IM_SRC1_LOCK_EINT */
+#define WM8994_IM_FLL2_LOCK_EINT 0x0040 /* IM_FLL2_LOCK_EINT */
+#define WM8994_IM_FLL2_LOCK_EINT_MASK 0x0040 /* IM_FLL2_LOCK_EINT */
+#define WM8994_IM_FLL2_LOCK_EINT_SHIFT 6 /* IM_FLL2_LOCK_EINT */
+#define WM8994_IM_FLL2_LOCK_EINT_WIDTH 1 /* IM_FLL2_LOCK_EINT */
+#define WM8994_IM_FLL1_LOCK_EINT 0x0020 /* IM_FLL1_LOCK_EINT */
+#define WM8994_IM_FLL1_LOCK_EINT_MASK 0x0020 /* IM_FLL1_LOCK_EINT */
+#define WM8994_IM_FLL1_LOCK_EINT_SHIFT 5 /* IM_FLL1_LOCK_EINT */
+#define WM8994_IM_FLL1_LOCK_EINT_WIDTH 1 /* IM_FLL1_LOCK_EINT */
+#define WM8994_IM_MIC2_SHRT_EINT 0x0010 /* IM_MIC2_SHRT_EINT */
+#define WM8994_IM_MIC2_SHRT_EINT_MASK 0x0010 /* IM_MIC2_SHRT_EINT */
+#define WM8994_IM_MIC2_SHRT_EINT_SHIFT 4 /* IM_MIC2_SHRT_EINT */
+#define WM8994_IM_MIC2_SHRT_EINT_WIDTH 1 /* IM_MIC2_SHRT_EINT */
+#define WM8994_IM_MIC2_DET_EINT 0x0008 /* IM_MIC2_DET_EINT */
+#define WM8994_IM_MIC2_DET_EINT_MASK 0x0008 /* IM_MIC2_DET_EINT */
+#define WM8994_IM_MIC2_DET_EINT_SHIFT 3 /* IM_MIC2_DET_EINT */
+#define WM8994_IM_MIC2_DET_EINT_WIDTH 1 /* IM_MIC2_DET_EINT */
+#define WM8994_IM_MIC1_SHRT_EINT 0x0004 /* IM_MIC1_SHRT_EINT */
+#define WM8994_IM_MIC1_SHRT_EINT_MASK 0x0004 /* IM_MIC1_SHRT_EINT */
+#define WM8994_IM_MIC1_SHRT_EINT_SHIFT 2 /* IM_MIC1_SHRT_EINT */
+#define WM8994_IM_MIC1_SHRT_EINT_WIDTH 1 /* IM_MIC1_SHRT_EINT */
+#define WM8994_IM_MIC1_DET_EINT 0x0002 /* IM_MIC1_DET_EINT */
+#define WM8994_IM_MIC1_DET_EINT_MASK 0x0002 /* IM_MIC1_DET_EINT */
+#define WM8994_IM_MIC1_DET_EINT_SHIFT 1 /* IM_MIC1_DET_EINT */
+#define WM8994_IM_MIC1_DET_EINT_WIDTH 1 /* IM_MIC1_DET_EINT */
+#define WM8994_IM_TEMP_SHUT_EINT 0x0001 /* IM_TEMP_SHUT_EINT */
+#define WM8994_IM_TEMP_SHUT_EINT_MASK 0x0001 /* IM_TEMP_SHUT_EINT */
+#define WM8994_IM_TEMP_SHUT_EINT_SHIFT 0 /* IM_TEMP_SHUT_EINT */
+#define WM8994_IM_TEMP_SHUT_EINT_WIDTH 1 /* IM_TEMP_SHUT_EINT */
+
+/*
+ * R1856 (0x740) - Interrupt Control
+ */
+#define WM8994_IM_IRQ 0x0001 /* IM_IRQ */
+#define WM8994_IM_IRQ_MASK 0x0001 /* IM_IRQ */
+#define WM8994_IM_IRQ_SHIFT 0 /* IM_IRQ */
+#define WM8994_IM_IRQ_WIDTH 1 /* IM_IRQ */
+
+/*
+ * R1864 (0x748) - IRQ Debounce
+ */
+#define WM8994_TEMP_WARN_DB 0x0020 /* TEMP_WARN_DB */
+#define WM8994_TEMP_WARN_DB_MASK 0x0020 /* TEMP_WARN_DB */
+#define WM8994_TEMP_WARN_DB_SHIFT 5 /* TEMP_WARN_DB */
+#define WM8994_TEMP_WARN_DB_WIDTH 1 /* TEMP_WARN_DB */
+#define WM8994_MIC2_SHRT_DB 0x0010 /* MIC2_SHRT_DB */
+#define WM8994_MIC2_SHRT_DB_MASK 0x0010 /* MIC2_SHRT_DB */
+#define WM8994_MIC2_SHRT_DB_SHIFT 4 /* MIC2_SHRT_DB */
+#define WM8994_MIC2_SHRT_DB_WIDTH 1 /* MIC2_SHRT_DB */
+#define WM8994_MIC2_DET_DB 0x0008 /* MIC2_DET_DB */
+#define WM8994_MIC2_DET_DB_MASK 0x0008 /* MIC2_DET_DB */
+#define WM8994_MIC2_DET_DB_SHIFT 3 /* MIC2_DET_DB */
+#define WM8994_MIC2_DET_DB_WIDTH 1 /* MIC2_DET_DB */
+#define WM8994_MIC1_SHRT_DB 0x0004 /* MIC1_SHRT_DB */
+#define WM8994_MIC1_SHRT_DB_MASK 0x0004 /* MIC1_SHRT_DB */
+#define WM8994_MIC1_SHRT_DB_SHIFT 2 /* MIC1_SHRT_DB */
+#define WM8994_MIC1_SHRT_DB_WIDTH 1 /* MIC1_SHRT_DB */
+#define WM8994_MIC1_DET_DB 0x0002 /* MIC1_DET_DB */
+#define WM8994_MIC1_DET_DB_MASK 0x0002 /* MIC1_DET_DB */
+#define WM8994_MIC1_DET_DB_SHIFT 1 /* MIC1_DET_DB */
+#define WM8994_MIC1_DET_DB_WIDTH 1 /* MIC1_DET_DB */
+#define WM8994_TEMP_SHUT_DB 0x0001 /* TEMP_SHUT_DB */
+#define WM8994_TEMP_SHUT_DB_MASK 0x0001 /* TEMP_SHUT_DB */
+#define WM8994_TEMP_SHUT_DB_SHIFT 0 /* TEMP_SHUT_DB */
+#define WM8994_TEMP_SHUT_DB_WIDTH 1 /* TEMP_SHUT_DB */
+
+/*
+ * R2304 (0x900) - DSP2_Program
+ */
+#define WM8958_DSP2_ENA 0x0001 /* DSP2_ENA */
+#define WM8958_DSP2_ENA_MASK 0x0001 /* DSP2_ENA */
+#define WM8958_DSP2_ENA_SHIFT 0 /* DSP2_ENA */
+#define WM8958_DSP2_ENA_WIDTH 1 /* DSP2_ENA */
+
+/*
+ * R2305 (0x901) - DSP2_Config
+ */
+#define WM8958_MBC_SEL_MASK 0x0030 /* MBC_SEL - [5:4] */
+#define WM8958_MBC_SEL_SHIFT 4 /* MBC_SEL - [5:4] */
+#define WM8958_MBC_SEL_WIDTH 2 /* MBC_SEL - [5:4] */
+#define WM8958_MBC_ENA 0x0001 /* MBC_ENA */
+#define WM8958_MBC_ENA_MASK 0x0001 /* MBC_ENA */
+#define WM8958_MBC_ENA_SHIFT 0 /* MBC_ENA */
+#define WM8958_MBC_ENA_WIDTH 1 /* MBC_ENA */
+
+/*
+ * R2560 (0xA00) - DSP2_MagicNum
+ */
+#define WM8958_DSP2_MAGIC_NUM_MASK 0xFFFF /* DSP2_MAGIC_NUM - [15:0] */
+#define WM8958_DSP2_MAGIC_NUM_SHIFT 0 /* DSP2_MAGIC_NUM - [15:0] */
+#define WM8958_DSP2_MAGIC_NUM_WIDTH 16 /* DSP2_MAGIC_NUM - [15:0] */
+
+/*
+ * R2561 (0xA01) - DSP2_ReleaseYear
+ */
+#define WM8958_DSP2_RELEASE_YEAR_MASK 0xFFFF /* DSP2_RELEASE_YEAR - [15:0] */
+#define WM8958_DSP2_RELEASE_YEAR_SHIFT 0 /* DSP2_RELEASE_YEAR - [15:0] */
+#define WM8958_DSP2_RELEASE_YEAR_WIDTH 16 /* DSP2_RELEASE_YEAR - [15:0] */
+
+/*
+ * R2562 (0xA02) - DSP2_ReleaseMonthDay
+ */
+#define WM8958_DSP2_RELEASE_MONTH_MASK 0xFF00 /* DSP2_RELEASE_MONTH - [15:8] */
+#define WM8958_DSP2_RELEASE_MONTH_SHIFT 8 /* DSP2_RELEASE_MONTH - [15:8] */
+#define WM8958_DSP2_RELEASE_MONTH_WIDTH 8 /* DSP2_RELEASE_MONTH - [15:8] */
+#define WM8958_DSP2_RELEASE_DAY_MASK 0x00FF /* DSP2_RELEASE_DAY - [7:0] */
+#define WM8958_DSP2_RELEASE_DAY_SHIFT 0 /* DSP2_RELEASE_DAY - [7:0] */
+#define WM8958_DSP2_RELEASE_DAY_WIDTH 8 /* DSP2_RELEASE_DAY - [7:0] */
+
+/*
+ * R2563 (0xA03) - DSP2_ReleaseTime
+ */
+#define WM8958_DSP2_RELEASE_HOURS_MASK 0xFF00 /* DSP2_RELEASE_HOURS - [15:8] */
+#define WM8958_DSP2_RELEASE_HOURS_SHIFT 8 /* DSP2_RELEASE_HOURS - [15:8] */
+#define WM8958_DSP2_RELEASE_HOURS_WIDTH 8 /* DSP2_RELEASE_HOURS - [15:8] */
+#define WM8958_DSP2_RELEASE_MINS_MASK 0x00FF /* DSP2_RELEASE_MINS - [7:0] */
+#define WM8958_DSP2_RELEASE_MINS_SHIFT 0 /* DSP2_RELEASE_MINS - [7:0] */
+#define WM8958_DSP2_RELEASE_MINS_WIDTH 8 /* DSP2_RELEASE_MINS - [7:0] */
+
+/*
+ * R2564 (0xA04) - DSP2_VerMajMin
+ */
+#define WM8958_DSP2_MAJOR_VER_MASK 0xFF00 /* DSP2_MAJOR_VER - [15:8] */
+#define WM8958_DSP2_MAJOR_VER_SHIFT 8 /* DSP2_MAJOR_VER - [15:8] */
+#define WM8958_DSP2_MAJOR_VER_WIDTH 8 /* DSP2_MAJOR_VER - [15:8] */
+#define WM8958_DSP2_MINOR_VER_MASK 0x00FF /* DSP2_MINOR_VER - [7:0] */
+#define WM8958_DSP2_MINOR_VER_SHIFT 0 /* DSP2_MINOR_VER - [7:0] */
+#define WM8958_DSP2_MINOR_VER_WIDTH 8 /* DSP2_MINOR_VER - [7:0] */
+
+/*
+ * R2565 (0xA05) - DSP2_VerBuild
+ */
+#define WM8958_DSP2_BUILD_VER_MASK 0xFFFF /* DSP2_BUILD_VER - [15:0] */
+#define WM8958_DSP2_BUILD_VER_SHIFT 0 /* DSP2_BUILD_VER - [15:0] */
+#define WM8958_DSP2_BUILD_VER_WIDTH 16 /* DSP2_BUILD_VER - [15:0] */
+
+/*
+ * R2573 (0xA0D) - DSP2_ExecControl
+ */
+#define WM8958_DSP2_STOPC 0x0020 /* DSP2_STOPC */
+#define WM8958_DSP2_STOPC_MASK 0x0020 /* DSP2_STOPC */
+#define WM8958_DSP2_STOPC_SHIFT 5 /* DSP2_STOPC */
+#define WM8958_DSP2_STOPC_WIDTH 1 /* DSP2_STOPC */
+#define WM8958_DSP2_STOPS 0x0010 /* DSP2_STOPS */
+#define WM8958_DSP2_STOPS_MASK 0x0010 /* DSP2_STOPS */
+#define WM8958_DSP2_STOPS_SHIFT 4 /* DSP2_STOPS */
+#define WM8958_DSP2_STOPS_WIDTH 1 /* DSP2_STOPS */
+#define WM8958_DSP2_STOPI 0x0008 /* DSP2_STOPI */
+#define WM8958_DSP2_STOPI_MASK 0x0008 /* DSP2_STOPI */
+#define WM8958_DSP2_STOPI_SHIFT 3 /* DSP2_STOPI */
+#define WM8958_DSP2_STOPI_WIDTH 1 /* DSP2_STOPI */
+#define WM8958_DSP2_STOP 0x0004 /* DSP2_STOP */
+#define WM8958_DSP2_STOP_MASK 0x0004 /* DSP2_STOP */
+#define WM8958_DSP2_STOP_SHIFT 2 /* DSP2_STOP */
+#define WM8958_DSP2_STOP_WIDTH 1 /* DSP2_STOP */
+#define WM8958_DSP2_RUNR 0x0002 /* DSP2_RUNR */
+#define WM8958_DSP2_RUNR_MASK 0x0002 /* DSP2_RUNR */
+#define WM8958_DSP2_RUNR_SHIFT 1 /* DSP2_RUNR */
+#define WM8958_DSP2_RUNR_WIDTH 1 /* DSP2_RUNR */
+#define WM8958_DSP2_RUN 0x0001 /* DSP2_RUN */
+#define WM8958_DSP2_RUN_MASK 0x0001 /* DSP2_RUN */
+#define WM8958_DSP2_RUN_SHIFT 0 /* DSP2_RUN */
+#define WM8958_DSP2_RUN_WIDTH 1 /* DSP2_RUN */
+
+#endif
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h
new file mode 100644
index 000000000..e11f4d9f1
--- /dev/null
+++ b/include/linux/mg_disk.h
@@ -0,0 +1,45 @@
+/*
+ * include/linux/mg_disk.c
+ *
+ * Private data for mflash platform driver
+ *
+ * (c) 2008 mGine Co.,LTD
+ * (c) 2008 unsik Kim <donari75@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MG_DISK_H__
+#define __MG_DISK_H__
+
+/* name for platform device */
+#define MG_DEV_NAME "mg_disk"
+
+/* names of GPIO resource */
+#define MG_RST_PIN "mg_rst"
+/* except MG_BOOT_DEV, reset-out pin should be assigned */
+#define MG_RSTOUT_PIN "mg_rstout"
+
+/* device attribution */
+/* use mflash as boot device */
+#define MG_BOOT_DEV (1 << 0)
+/* use mflash as storage device */
+#define MG_STORAGE_DEV (1 << 1)
+/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
+#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
+
+/* private driver data */
+struct mg_drv_data {
+ /* disk resource */
+ u32 use_polling;
+
+ /* device attribution */
+ u32 dev_attr;
+
+ /* internally used */
+ void *host;
+};
+
+#endif
diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h
new file mode 100644
index 000000000..d5b5f76d5
--- /dev/null
+++ b/include/linux/mic_bus.h
@@ -0,0 +1,110 @@
+/*
+ * Intel MIC Platform Software Stack (MPSS)
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Intel MIC Bus driver.
+ *
+ * This implementation is very similar to the the virtio bus driver
+ * implementation @ include/linux/virtio.h.
+ */
+#ifndef _MIC_BUS_H_
+#define _MIC_BUS_H_
+/*
+ * Everything a mbus driver needs to work with any particular mbus
+ * implementation.
+ */
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+
+struct mbus_device_id {
+ __u32 device;
+ __u32 vendor;
+};
+
+#define MBUS_DEV_DMA_HOST 2
+#define MBUS_DEV_DMA_MIC 3
+#define MBUS_DEV_ANY_ID 0xffffffff
+
+/**
+ * mbus_device - representation of a device using mbus
+ * @mmio_va: virtual address of mmio space
+ * @hw_ops: the hardware ops supported by this device.
+ * @id: the device type identification (used to match it with a driver).
+ * @dev: underlying device.
+ * be used to communicate with.
+ * @index: unique position on the mbus bus
+ */
+struct mbus_device {
+ void __iomem *mmio_va;
+ struct mbus_hw_ops *hw_ops;
+ struct mbus_device_id id;
+ struct device dev;
+ int index;
+};
+
+/**
+ * mbus_driver - operations for a mbus I/O driver
+ * @driver: underlying device driver (populate name and owner).
+ * @id_table: the ids serviced by this driver.
+ * @probe: the function to call when a device is found. Returns 0 or -errno.
+ * @remove: the function to call when a device is removed.
+ */
+struct mbus_driver {
+ struct device_driver driver;
+ const struct mbus_device_id *id_table;
+ int (*probe)(struct mbus_device *dev);
+ void (*scan)(struct mbus_device *dev);
+ void (*remove)(struct mbus_device *dev);
+};
+
+/**
+ * struct mic_irq - opaque pointer used as cookie
+ */
+struct mic_irq;
+
+/**
+ * mbus_hw_ops - Hardware operations for accessing a MIC device on the MIC bus.
+ */
+struct mbus_hw_ops {
+ struct mic_irq* (*request_threaded_irq)(struct mbus_device *mbdev,
+ irq_handler_t handler,
+ irq_handler_t thread_fn,
+ const char *name, void *data,
+ int intr_src);
+ void (*free_irq)(struct mbus_device *mbdev,
+ struct mic_irq *cookie, void *data);
+ void (*ack_interrupt)(struct mbus_device *mbdev, int num);
+};
+
+struct mbus_device *
+mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
+ struct mbus_hw_ops *hw_ops, void __iomem *mmio_va);
+void mbus_unregister_device(struct mbus_device *mbdev);
+
+int mbus_register_driver(struct mbus_driver *drv);
+void mbus_unregister_driver(struct mbus_driver *drv);
+
+static inline struct mbus_device *dev_to_mbus(struct device *_dev)
+{
+ return container_of(_dev, struct mbus_device, dev);
+}
+
+static inline struct mbus_driver *drv_to_mbus(struct device_driver *drv)
+{
+ return container_of(drv, struct mbus_driver, driver);
+}
+
+#endif /* _MIC_BUS_H */
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
new file mode 100644
index 000000000..2e5b194b9
--- /dev/null
+++ b/include/linux/micrel_phy.h
@@ -0,0 +1,46 @@
+/*
+ * include/linux/micrel_phy.h
+ *
+ * Micrel PHY IDs
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef _MICREL_PHY_H
+#define _MICREL_PHY_H
+
+#define MICREL_PHY_ID_MASK 0x00fffff0
+
+#define PHY_ID_KSZ8873MLL 0x000e7237
+#define PHY_ID_KSZ9021 0x00221610
+#define PHY_ID_KSZ9021RLRN 0x00221611
+#define PHY_ID_KS8737 0x00221720
+#define PHY_ID_KSZ8021 0x00221555
+#define PHY_ID_KSZ8031 0x00221556
+#define PHY_ID_KSZ8041 0x00221510
+/* undocumented */
+#define PHY_ID_KSZ8041RNLI 0x00221537
+#define PHY_ID_KSZ8051 0x00221550
+/* same id: ks8001 Rev. A/B, and ks8721 Rev 3. */
+#define PHY_ID_KSZ8001 0x0022161A
+/* same id: KS8081, KS8091 */
+#define PHY_ID_KSZ8081 0x00221560
+#define PHY_ID_KSZ8061 0x00221570
+#define PHY_ID_KSZ9031 0x00221620
+
+#define PHY_ID_KSZ886X 0x00221430
+#define PHY_ID_KSZ8863 0x00221435
+
+/* struct phy_device dev_flags definitions */
+#define MICREL_PHY_50MHZ_CLK 0x00000001
+
+#define MICREL_KSZ9021_EXTREG_CTRL 0xB
+#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
+#define MICREL_KSZ9021_RGMII_CLK_CTRL_PAD_SCEW 0x104
+#define MICREL_KSZ9021_RGMII_RX_DATA_PAD_SCEW 0x105
+
+#endif /* _MICREL_PHY_H */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
new file mode 100644
index 000000000..cac1c0904
--- /dev/null
+++ b/include/linux/migrate.h
@@ -0,0 +1,101 @@
+#ifndef _LINUX_MIGRATE_H
+#define _LINUX_MIGRATE_H
+
+#include <linux/mm.h>
+#include <linux/mempolicy.h>
+#include <linux/migrate_mode.h>
+
+typedef struct page *new_page_t(struct page *page, unsigned long private,
+ int **reason);
+typedef void free_page_t(struct page *page, unsigned long private);
+
+/*
+ * Return values from addresss_space_operations.migratepage():
+ * - negative errno on page migration failure;
+ * - zero on page migration success;
+ */
+#define MIGRATEPAGE_SUCCESS 0
+
+enum migrate_reason {
+ MR_COMPACTION,
+ MR_MEMORY_FAILURE,
+ MR_MEMORY_HOTPLUG,
+ MR_SYSCALL, /* also applies to cpusets */
+ MR_MEMPOLICY_MBIND,
+ MR_NUMA_MISPLACED,
+ MR_CMA
+};
+
+#ifdef CONFIG_MIGRATION
+
+extern void putback_movable_pages(struct list_head *l);
+extern int migrate_page(struct address_space *,
+ struct page *, struct page *, enum migrate_mode);
+extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
+ unsigned long private, enum migrate_mode mode, int reason);
+
+extern int migrate_prep(void);
+extern int migrate_prep_local(void);
+extern void migrate_page_copy(struct page *newpage, struct page *page);
+extern int migrate_huge_page_move_mapping(struct address_space *mapping,
+ struct page *newpage, struct page *page);
+extern int migrate_page_move_mapping(struct address_space *mapping,
+ struct page *newpage, struct page *page,
+ struct buffer_head *head, enum migrate_mode mode,
+ int extra_count);
+#else
+
+static inline void putback_movable_pages(struct list_head *l) {}
+static inline int migrate_pages(struct list_head *l, new_page_t new,
+ free_page_t free, unsigned long private, enum migrate_mode mode,
+ int reason)
+ { return -ENOSYS; }
+
+static inline int migrate_prep(void) { return -ENOSYS; }
+static inline int migrate_prep_local(void) { return -ENOSYS; }
+
+static inline void migrate_page_copy(struct page *newpage,
+ struct page *page) {}
+
+static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
+ struct page *newpage, struct page *page)
+{
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_MIGRATION */
+
+#ifdef CONFIG_NUMA_BALANCING
+extern bool pmd_trans_migrating(pmd_t pmd);
+extern int migrate_misplaced_page(struct page *page,
+ struct vm_area_struct *vma, int node);
+#else
+static inline bool pmd_trans_migrating(pmd_t pmd)
+{
+ return false;
+}
+static inline int migrate_misplaced_page(struct page *page,
+ struct vm_area_struct *vma, int node)
+{
+ return -EAGAIN; /* can't migrate now */
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ pmd_t *pmd, pmd_t entry,
+ unsigned long address,
+ struct page *page, int node);
+#else
+static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ pmd_t *pmd, pmd_t entry,
+ unsigned long address,
+ struct page *page, int node)
+{
+ return -EAGAIN;
+}
+#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
+
+#endif /* _LINUX_MIGRATE_H */
diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h
new file mode 100644
index 000000000..ebf3d89a3
--- /dev/null
+++ b/include/linux/migrate_mode.h
@@ -0,0 +1,16 @@
+#ifndef MIGRATE_MODE_H_INCLUDED
+#define MIGRATE_MODE_H_INCLUDED
+/*
+ * MIGRATE_ASYNC means never block
+ * MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking
+ * on most operations but not ->writepage as the potential stall time
+ * is too significant
+ * MIGRATE_SYNC will block when migrating pages
+ */
+enum migrate_mode {
+ MIGRATE_ASYNC,
+ MIGRATE_SYNC_LIGHT,
+ MIGRATE_SYNC,
+};
+
+#endif /* MIGRATE_MODE_H_INCLUDED */
diff --git a/include/linux/mii.h b/include/linux/mii.h
new file mode 100644
index 000000000..47492c963
--- /dev/null
+++ b/include/linux/mii.h
@@ -0,0 +1,339 @@
+/*
+ * linux/mii.h: definitions for MII-compatible transceivers
+ * Originally drivers/net/sunhme.h.
+ *
+ * Copyright (C) 1996, 1999, 2001 David S. Miller (davem@redhat.com)
+ */
+#ifndef __LINUX_MII_H__
+#define __LINUX_MII_H__
+
+
+#include <linux/if.h>
+#include <uapi/linux/mii.h>
+
+struct ethtool_cmd;
+
+struct mii_if_info {
+ int phy_id;
+ int advertising;
+ int phy_id_mask;
+ int reg_num_mask;
+
+ unsigned int full_duplex : 1; /* is full duplex? */
+ unsigned int force_media : 1; /* is autoneg. disabled? */
+ unsigned int supports_gmii : 1; /* are GMII registers supported? */
+
+ struct net_device *dev;
+ int (*mdio_read) (struct net_device *dev, int phy_id, int location);
+ void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val);
+};
+
+extern int mii_link_ok (struct mii_if_info *mii);
+extern int mii_nway_restart (struct mii_if_info *mii);
+extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
+extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd);
+extern int mii_check_gmii_support(struct mii_if_info *mii);
+extern void mii_check_link (struct mii_if_info *mii);
+extern unsigned int mii_check_media (struct mii_if_info *mii,
+ unsigned int ok_to_print,
+ unsigned int init_media);
+extern int generic_mii_ioctl(struct mii_if_info *mii_if,
+ struct mii_ioctl_data *mii_data, int cmd,
+ unsigned int *duplex_changed);
+
+
+static inline struct mii_ioctl_data *if_mii(struct ifreq *rq)
+{
+ return (struct mii_ioctl_data *) &rq->ifr_ifru;
+}
+
+/**
+ * mii_nway_result
+ * @negotiated: value of MII ANAR and'd with ANLPAR
+ *
+ * Given a set of MII abilities, check each bit and returns the
+ * currently supported media, in the priority order defined by
+ * IEEE 802.3u. We use LPA_xxx constants but note this is not the
+ * value of LPA solely, as described above.
+ *
+ * The one exception to IEEE 802.3u is that 100baseT4 is placed
+ * between 100T-full and 100T-half. If your phy does not support
+ * 100T4 this is fine. If your phy places 100T4 elsewhere in the
+ * priority order, you will need to roll your own function.
+ */
+static inline unsigned int mii_nway_result (unsigned int negotiated)
+{
+ unsigned int ret;
+
+ if (negotiated & LPA_100FULL)
+ ret = LPA_100FULL;
+ else if (negotiated & LPA_100BASE4)
+ ret = LPA_100BASE4;
+ else if (negotiated & LPA_100HALF)
+ ret = LPA_100HALF;
+ else if (negotiated & LPA_10FULL)
+ ret = LPA_10FULL;
+ else
+ ret = LPA_10HALF;
+
+ return ret;
+}
+
+/**
+ * mii_duplex
+ * @duplex_lock: Non-zero if duplex is locked at full
+ * @negotiated: value of MII ANAR and'd with ANLPAR
+ *
+ * A small helper function for a common case. Returns one
+ * if the media is operating or locked at full duplex, and
+ * returns zero otherwise.
+ */
+static inline unsigned int mii_duplex (unsigned int duplex_lock,
+ unsigned int negotiated)
+{
+ if (duplex_lock)
+ return 1;
+ if (mii_nway_result(negotiated) & LPA_DUPLEX)
+ return 1;
+ return 0;
+}
+
+/**
+ * ethtool_adv_to_mii_adv_t
+ * @ethadv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_ADVERTISE register.
+ */
+static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv)
+{
+ u32 result = 0;
+
+ if (ethadv & ADVERTISED_10baseT_Half)
+ result |= ADVERTISE_10HALF;
+ if (ethadv & ADVERTISED_10baseT_Full)
+ result |= ADVERTISE_10FULL;
+ if (ethadv & ADVERTISED_100baseT_Half)
+ result |= ADVERTISE_100HALF;
+ if (ethadv & ADVERTISED_100baseT_Full)
+ result |= ADVERTISE_100FULL;
+ if (ethadv & ADVERTISED_Pause)
+ result |= ADVERTISE_PAUSE_CAP;
+ if (ethadv & ADVERTISED_Asym_Pause)
+ result |= ADVERTISE_PAUSE_ASYM;
+
+ return result;
+}
+
+/**
+ * mii_adv_to_ethtool_adv_t
+ * @adv: value of the MII_ADVERTISE register
+ *
+ * A small helper function that translates MII_ADVERTISE bits
+ * to ethtool advertisement settings.
+ */
+static inline u32 mii_adv_to_ethtool_adv_t(u32 adv)
+{
+ u32 result = 0;
+
+ if (adv & ADVERTISE_10HALF)
+ result |= ADVERTISED_10baseT_Half;
+ if (adv & ADVERTISE_10FULL)
+ result |= ADVERTISED_10baseT_Full;
+ if (adv & ADVERTISE_100HALF)
+ result |= ADVERTISED_100baseT_Half;
+ if (adv & ADVERTISE_100FULL)
+ result |= ADVERTISED_100baseT_Full;
+ if (adv & ADVERTISE_PAUSE_CAP)
+ result |= ADVERTISED_Pause;
+ if (adv & ADVERTISE_PAUSE_ASYM)
+ result |= ADVERTISED_Asym_Pause;
+
+ return result;
+}
+
+/**
+ * ethtool_adv_to_mii_ctrl1000_t
+ * @ethadv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_CTRL1000 register when in 1000T mode.
+ */
+static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
+{
+ u32 result = 0;
+
+ if (ethadv & ADVERTISED_1000baseT_Half)
+ result |= ADVERTISE_1000HALF;
+ if (ethadv & ADVERTISED_1000baseT_Full)
+ result |= ADVERTISE_1000FULL;
+
+ return result;
+}
+
+/**
+ * mii_ctrl1000_to_ethtool_adv_t
+ * @adv: value of the MII_CTRL1000 register
+ *
+ * A small helper function that translates MII_CTRL1000
+ * bits, when in 1000Base-T mode, to ethtool
+ * advertisement settings.
+ */
+static inline u32 mii_ctrl1000_to_ethtool_adv_t(u32 adv)
+{
+ u32 result = 0;
+
+ if (adv & ADVERTISE_1000HALF)
+ result |= ADVERTISED_1000baseT_Half;
+ if (adv & ADVERTISE_1000FULL)
+ result |= ADVERTISED_1000baseT_Full;
+
+ return result;
+}
+
+/**
+ * mii_lpa_to_ethtool_lpa_t
+ * @adv: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA
+ * bits, when in 1000Base-T mode, to ethtool
+ * LP advertisement settings.
+ */
+static inline u32 mii_lpa_to_ethtool_lpa_t(u32 lpa)
+{
+ u32 result = 0;
+
+ if (lpa & LPA_LPACK)
+ result |= ADVERTISED_Autoneg;
+
+ return result | mii_adv_to_ethtool_adv_t(lpa);
+}
+
+/**
+ * mii_stat1000_to_ethtool_lpa_t
+ * @adv: value of the MII_STAT1000 register
+ *
+ * A small helper function that translates MII_STAT1000
+ * bits, when in 1000Base-T mode, to ethtool
+ * advertisement settings.
+ */
+static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa)
+{
+ u32 result = 0;
+
+ if (lpa & LPA_1000HALF)
+ result |= ADVERTISED_1000baseT_Half;
+ if (lpa & LPA_1000FULL)
+ result |= ADVERTISED_1000baseT_Full;
+
+ return result;
+}
+
+/**
+ * ethtool_adv_to_mii_adv_x
+ * @ethadv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_CTRL1000 register when in 1000Base-X mode.
+ */
+static inline u32 ethtool_adv_to_mii_adv_x(u32 ethadv)
+{
+ u32 result = 0;
+
+ if (ethadv & ADVERTISED_1000baseT_Half)
+ result |= ADVERTISE_1000XHALF;
+ if (ethadv & ADVERTISED_1000baseT_Full)
+ result |= ADVERTISE_1000XFULL;
+ if (ethadv & ADVERTISED_Pause)
+ result |= ADVERTISE_1000XPAUSE;
+ if (ethadv & ADVERTISED_Asym_Pause)
+ result |= ADVERTISE_1000XPSE_ASYM;
+
+ return result;
+}
+
+/**
+ * mii_adv_to_ethtool_adv_x
+ * @adv: value of the MII_CTRL1000 register
+ *
+ * A small helper function that translates MII_CTRL1000
+ * bits, when in 1000Base-X mode, to ethtool
+ * advertisement settings.
+ */
+static inline u32 mii_adv_to_ethtool_adv_x(u32 adv)
+{
+ u32 result = 0;
+
+ if (adv & ADVERTISE_1000XHALF)
+ result |= ADVERTISED_1000baseT_Half;
+ if (adv & ADVERTISE_1000XFULL)
+ result |= ADVERTISED_1000baseT_Full;
+ if (adv & ADVERTISE_1000XPAUSE)
+ result |= ADVERTISED_Pause;
+ if (adv & ADVERTISE_1000XPSE_ASYM)
+ result |= ADVERTISED_Asym_Pause;
+
+ return result;
+}
+
+/**
+ * mii_lpa_to_ethtool_lpa_x
+ * @adv: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA
+ * bits, when in 1000Base-X mode, to ethtool
+ * LP advertisement settings.
+ */
+static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
+{
+ u32 result = 0;
+
+ if (lpa & LPA_LPACK)
+ result |= ADVERTISED_Autoneg;
+
+ return result | mii_adv_to_ethtool_adv_x(lpa);
+}
+
+/**
+ * mii_advertise_flowctrl - get flow control advertisement flags
+ * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both)
+ */
+static inline u16 mii_advertise_flowctrl(int cap)
+{
+ u16 adv = 0;
+
+ if (cap & FLOW_CTRL_RX)
+ adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ if (cap & FLOW_CTRL_TX)
+ adv ^= ADVERTISE_PAUSE_ASYM;
+
+ return adv;
+}
+
+/**
+ * mii_resolve_flowctrl_fdx
+ * @lcladv: value of MII ADVERTISE register
+ * @rmtadv: value of MII LPA register
+ *
+ * Resolve full duplex flow control as per IEEE 802.3-2005 table 28B-3
+ */
+static inline u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv)
+{
+ u8 cap = 0;
+
+ if (lcladv & rmtadv & ADVERTISE_PAUSE_CAP) {
+ cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+ } else if (lcladv & rmtadv & ADVERTISE_PAUSE_ASYM) {
+ if (lcladv & ADVERTISE_PAUSE_CAP)
+ cap = FLOW_CTRL_RX;
+ else if (rmtadv & ADVERTISE_PAUSE_CAP)
+ cap = FLOW_CTRL_TX;
+ }
+
+ return cap;
+}
+
+#endif /* __LINUX_MII_H__ */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
new file mode 100644
index 000000000..819077c32
--- /dev/null
+++ b/include/linux/miscdevice.h
@@ -0,0 +1,75 @@
+#ifndef _LINUX_MISCDEVICE_H
+#define _LINUX_MISCDEVICE_H
+#include <linux/major.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+/*
+ * These allocations are managed by device@lanana.org. If you use an
+ * entry that is not in assigned your entry may well be moved and
+ * reassigned, or set dynamic if a fixed value is not justified.
+ */
+
+#define PSMOUSE_MINOR 1
+#define MS_BUSMOUSE_MINOR 2 /* unused */
+#define ATIXL_BUSMOUSE_MINOR 3 /* unused */
+/*#define AMIGAMOUSE_MINOR 4 FIXME OBSOLETE */
+#define ATARIMOUSE_MINOR 5 /* unused */
+#define SUN_MOUSE_MINOR 6 /* unused */
+#define APOLLO_MOUSE_MINOR 7 /* unused */
+#define PC110PAD_MINOR 9 /* unused */
+/*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */
+#define WATCHDOG_MINOR 130 /* Watchdog timer */
+#define TEMP_MINOR 131 /* Temperature Sensor */
+#define RTC_MINOR 135
+#define EFI_RTC_MINOR 136 /* EFI Time services */
+#define VHCI_MINOR 137
+#define SUN_OPENPROM_MINOR 139
+#define DMAPI_MINOR 140 /* unused */
+#define NVRAM_MINOR 144
+#define SGI_MMTIMER 153
+#define STORE_QUEUE_MINOR 155 /* unused */
+#define I2O_MINOR 166
+#define MICROCODE_MINOR 184
+#define VFIO_MINOR 196
+#define TUN_MINOR 200
+#define CUSE_MINOR 203
+#define MWAVE_MINOR 219 /* ACP/Mwave Modem */
+#define MPT_MINOR 220
+#define MPT2SAS_MINOR 221
+#define MPT3SAS_MINOR 222
+#define UINPUT_MINOR 223
+#define MISC_MCELOG_MINOR 227
+#define HPET_MINOR 228
+#define FUSE_MINOR 229
+#define KVM_MINOR 232
+#define BTRFS_MINOR 234
+#define AUTOFS_MINOR 235
+#define MAPPER_CTRL_MINOR 236
+#define LOOP_CTRL_MINOR 237
+#define VHOST_NET_MINOR 238
+#define UHID_MINOR 239
+#define MISC_DYNAMIC_MINOR 255
+
+struct device;
+struct attribute_group;
+
+struct miscdevice {
+ int minor;
+ const char *name;
+ const struct file_operations *fops;
+ struct list_head list;
+ struct device *parent;
+ struct device *this_device;
+ const struct attribute_group **groups;
+ const char *nodename;
+ umode_t mode;
+};
+
+extern int misc_register(struct miscdevice *misc);
+extern int misc_deregister(struct miscdevice *misc);
+
+#define MODULE_ALIAS_MISCDEV(minor) \
+ MODULE_ALIAS("char-major-" __stringify(MISC_MAJOR) \
+ "-" __stringify(minor))
+#endif
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
new file mode 100644
index 000000000..f62e7cf22
--- /dev/null
+++ b/include/linux/mlx4/cmd.h
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2006 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_CMD_H
+#define MLX4_CMD_H
+
+#include <linux/dma-mapping.h>
+#include <linux/if_link.h>
+
+enum {
+ /* initialization and general commands */
+ MLX4_CMD_SYS_EN = 0x1,
+ MLX4_CMD_SYS_DIS = 0x2,
+ MLX4_CMD_MAP_FA = 0xfff,
+ MLX4_CMD_UNMAP_FA = 0xffe,
+ MLX4_CMD_RUN_FW = 0xff6,
+ MLX4_CMD_MOD_STAT_CFG = 0x34,
+ MLX4_CMD_QUERY_DEV_CAP = 0x3,
+ MLX4_CMD_QUERY_FW = 0x4,
+ MLX4_CMD_ENABLE_LAM = 0xff8,
+ MLX4_CMD_DISABLE_LAM = 0xff7,
+ MLX4_CMD_QUERY_DDR = 0x5,
+ MLX4_CMD_QUERY_ADAPTER = 0x6,
+ MLX4_CMD_INIT_HCA = 0x7,
+ MLX4_CMD_CLOSE_HCA = 0x8,
+ MLX4_CMD_INIT_PORT = 0x9,
+ MLX4_CMD_CLOSE_PORT = 0xa,
+ MLX4_CMD_QUERY_HCA = 0xb,
+ MLX4_CMD_QUERY_PORT = 0x43,
+ MLX4_CMD_SENSE_PORT = 0x4d,
+ MLX4_CMD_HW_HEALTH_CHECK = 0x50,
+ MLX4_CMD_SET_PORT = 0xc,
+ MLX4_CMD_SET_NODE = 0x5a,
+ MLX4_CMD_QUERY_FUNC = 0x56,
+ MLX4_CMD_ACCESS_DDR = 0x2e,
+ MLX4_CMD_MAP_ICM = 0xffa,
+ MLX4_CMD_UNMAP_ICM = 0xff9,
+ MLX4_CMD_MAP_ICM_AUX = 0xffc,
+ MLX4_CMD_UNMAP_ICM_AUX = 0xffb,
+ MLX4_CMD_SET_ICM_SIZE = 0xffd,
+ MLX4_CMD_ACCESS_REG = 0x3b,
+ MLX4_CMD_ALLOCATE_VPP = 0x80,
+ MLX4_CMD_SET_VPORT_QOS = 0x81,
+
+ /*master notify fw on finish for slave's flr*/
+ MLX4_CMD_INFORM_FLR_DONE = 0x5b,
+ MLX4_CMD_VIRT_PORT_MAP = 0x5c,
+ MLX4_CMD_GET_OP_REQ = 0x59,
+
+ /* TPT commands */
+ MLX4_CMD_SW2HW_MPT = 0xd,
+ MLX4_CMD_QUERY_MPT = 0xe,
+ MLX4_CMD_HW2SW_MPT = 0xf,
+ MLX4_CMD_READ_MTT = 0x10,
+ MLX4_CMD_WRITE_MTT = 0x11,
+ MLX4_CMD_SYNC_TPT = 0x2f,
+
+ /* EQ commands */
+ MLX4_CMD_MAP_EQ = 0x12,
+ MLX4_CMD_SW2HW_EQ = 0x13,
+ MLX4_CMD_HW2SW_EQ = 0x14,
+ MLX4_CMD_QUERY_EQ = 0x15,
+
+ /* CQ commands */
+ MLX4_CMD_SW2HW_CQ = 0x16,
+ MLX4_CMD_HW2SW_CQ = 0x17,
+ MLX4_CMD_QUERY_CQ = 0x18,
+ MLX4_CMD_MODIFY_CQ = 0x2c,
+
+ /* SRQ commands */
+ MLX4_CMD_SW2HW_SRQ = 0x35,
+ MLX4_CMD_HW2SW_SRQ = 0x36,
+ MLX4_CMD_QUERY_SRQ = 0x37,
+ MLX4_CMD_ARM_SRQ = 0x40,
+
+ /* QP/EE commands */
+ MLX4_CMD_RST2INIT_QP = 0x19,
+ MLX4_CMD_INIT2RTR_QP = 0x1a,
+ MLX4_CMD_RTR2RTS_QP = 0x1b,
+ MLX4_CMD_RTS2RTS_QP = 0x1c,
+ MLX4_CMD_SQERR2RTS_QP = 0x1d,
+ MLX4_CMD_2ERR_QP = 0x1e,
+ MLX4_CMD_RTS2SQD_QP = 0x1f,
+ MLX4_CMD_SQD2SQD_QP = 0x38,
+ MLX4_CMD_SQD2RTS_QP = 0x20,
+ MLX4_CMD_2RST_QP = 0x21,
+ MLX4_CMD_QUERY_QP = 0x22,
+ MLX4_CMD_INIT2INIT_QP = 0x2d,
+ MLX4_CMD_SUSPEND_QP = 0x32,
+ MLX4_CMD_UNSUSPEND_QP = 0x33,
+ MLX4_CMD_UPDATE_QP = 0x61,
+ /* special QP and management commands */
+ MLX4_CMD_CONF_SPECIAL_QP = 0x23,
+ MLX4_CMD_MAD_IFC = 0x24,
+ MLX4_CMD_MAD_DEMUX = 0x203,
+
+ /* multicast commands */
+ MLX4_CMD_READ_MCG = 0x25,
+ MLX4_CMD_WRITE_MCG = 0x26,
+ MLX4_CMD_MGID_HASH = 0x27,
+
+ /* miscellaneous commands */
+ MLX4_CMD_DIAG_RPRT = 0x30,
+ MLX4_CMD_NOP = 0x31,
+ MLX4_CMD_CONFIG_DEV = 0x3a,
+ MLX4_CMD_ACCESS_MEM = 0x2e,
+ MLX4_CMD_SET_VEP = 0x52,
+
+ /* Ethernet specific commands */
+ MLX4_CMD_SET_VLAN_FLTR = 0x47,
+ MLX4_CMD_SET_MCAST_FLTR = 0x48,
+ MLX4_CMD_DUMP_ETH_STATS = 0x49,
+
+ /* Communication channel commands */
+ MLX4_CMD_ARM_COMM_CHANNEL = 0x57,
+ MLX4_CMD_GEN_EQE = 0x58,
+
+ /* virtual commands */
+ MLX4_CMD_ALLOC_RES = 0xf00,
+ MLX4_CMD_FREE_RES = 0xf01,
+ MLX4_CMD_MCAST_ATTACH = 0xf05,
+ MLX4_CMD_UCAST_ATTACH = 0xf06,
+ MLX4_CMD_PROMISC = 0xf08,
+ MLX4_CMD_QUERY_FUNC_CAP = 0xf0a,
+ MLX4_CMD_QP_ATTACH = 0xf0b,
+
+ /* debug commands */
+ MLX4_CMD_QUERY_DEBUG_MSG = 0x2a,
+ MLX4_CMD_SET_DEBUG_MSG = 0x2b,
+
+ /* statistics commands */
+ MLX4_CMD_QUERY_IF_STAT = 0X54,
+ MLX4_CMD_SET_IF_STAT = 0X55,
+
+ /* register/delete flow steering network rules */
+ MLX4_QP_FLOW_STEERING_ATTACH = 0x65,
+ MLX4_QP_FLOW_STEERING_DETACH = 0x66,
+ MLX4_FLOW_STEERING_IB_UC_QP_RANGE = 0x64,
+
+ /* Update and read QCN parameters */
+ MLX4_CMD_CONGESTION_CTRL_OPCODE = 0x68,
+};
+
+enum {
+ MLX4_CMD_TIME_CLASS_A = 60000,
+ MLX4_CMD_TIME_CLASS_B = 60000,
+ MLX4_CMD_TIME_CLASS_C = 60000,
+};
+
+enum {
+ /* virtual to physical port mapping opcode modifiers */
+ MLX4_GET_PORT_VIRT2PHY = 0x0,
+ MLX4_SET_PORT_VIRT2PHY = 0x1,
+};
+
+enum {
+ MLX4_MAILBOX_SIZE = 4096,
+ MLX4_ACCESS_MEM_ALIGN = 256,
+};
+
+enum {
+ /* Set port opcode modifiers */
+ MLX4_SET_PORT_IB_OPCODE = 0x0,
+ MLX4_SET_PORT_ETH_OPCODE = 0x1,
+ MLX4_SET_PORT_BEACON_OPCODE = 0x4,
+};
+
+enum {
+ /* Set port Ethernet input modifiers */
+ MLX4_SET_PORT_GENERAL = 0x0,
+ MLX4_SET_PORT_RQP_CALC = 0x1,
+ MLX4_SET_PORT_MAC_TABLE = 0x2,
+ MLX4_SET_PORT_VLAN_TABLE = 0x3,
+ MLX4_SET_PORT_PRIO_MAP = 0x4,
+ MLX4_SET_PORT_GID_TABLE = 0x5,
+ MLX4_SET_PORT_PRIO2TC = 0x8,
+ MLX4_SET_PORT_SCHEDULER = 0x9,
+ MLX4_SET_PORT_VXLAN = 0xB
+};
+
+enum {
+ MLX4_CMD_MAD_DEMUX_CONFIG = 0,
+ MLX4_CMD_MAD_DEMUX_QUERY_STATE = 1,
+ MLX4_CMD_MAD_DEMUX_QUERY_RESTR = 2, /* Query mad demux restrictions */
+};
+
+enum {
+ MLX4_CMD_WRAPPED,
+ MLX4_CMD_NATIVE
+};
+
+/*
+ * MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP -
+ * Receive checksum value is reported in CQE also for non TCP/UDP packets.
+ *
+ * MLX4_RX_CSUM_MODE_L4 -
+ * L4_CSUM bit in CQE, which indicates whether or not L4 checksum
+ * was validated correctly, is supported.
+ *
+ * MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP -
+ * IP_OK CQE's field is supported also for non TCP/UDP IP packets.
+ *
+ * MLX4_RX_CSUM_MODE_MULTI_VLAN -
+ * Receive Checksum offload is supported for packets with more than 2 vlan headers.
+ */
+enum mlx4_rx_csum_mode {
+ MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP = 1UL << 0,
+ MLX4_RX_CSUM_MODE_L4 = 1UL << 1,
+ MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP = 1UL << 2,
+ MLX4_RX_CSUM_MODE_MULTI_VLAN = 1UL << 3
+};
+
+struct mlx4_config_dev_params {
+ u16 vxlan_udp_dport;
+ u8 rx_csum_flags_port_1;
+ u8 rx_csum_flags_port_2;
+};
+
+enum mlx4_en_congestion_control_algorithm {
+ MLX4_CTRL_ALGO_802_1_QAU_REACTION_POINT = 0,
+};
+
+enum mlx4_en_congestion_control_opmod {
+ MLX4_CONGESTION_CONTROL_GET_PARAMS,
+ MLX4_CONGESTION_CONTROL_GET_STATISTICS,
+ MLX4_CONGESTION_CONTROL_SET_PARAMS = 4,
+};
+
+struct mlx4_dev;
+
+struct mlx4_cmd_mailbox {
+ void *buf;
+ dma_addr_t dma;
+};
+
+int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+ int out_is_imm, u32 in_modifier, u8 op_modifier,
+ u16 op, unsigned long timeout, int native);
+
+/* Invoke a command with no output parameter */
+static inline int mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u32 in_modifier,
+ u8 op_modifier, u16 op, unsigned long timeout,
+ int native)
+{
+ return __mlx4_cmd(dev, in_param, NULL, 0, in_modifier,
+ op_modifier, op, timeout, native);
+}
+
+/* Invoke a command with an output mailbox */
+static inline int mlx4_cmd_box(struct mlx4_dev *dev, u64 in_param, u64 out_param,
+ u32 in_modifier, u8 op_modifier, u16 op,
+ unsigned long timeout, int native)
+{
+ return __mlx4_cmd(dev, in_param, &out_param, 0, in_modifier,
+ op_modifier, op, timeout, native);
+}
+
+/*
+ * Invoke a command with an immediate output parameter (and copy the
+ * output into the caller's out_param pointer after the command
+ * executes).
+ */
+static inline int mlx4_cmd_imm(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+ u32 in_modifier, u8 op_modifier, u16 op,
+ unsigned long timeout, int native)
+{
+ return __mlx4_cmd(dev, in_param, out_param, 1, in_modifier,
+ op_modifier, op, timeout, native);
+}
+
+struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev);
+void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox);
+
+u32 mlx4_comm_get_version(void);
+int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
+int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
+int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
+ int max_tx_rate);
+int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
+int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf);
+int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
+int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
+ struct mlx4_config_dev_params *params);
+void mlx4_cmd_wake_completions(struct mlx4_dev *dev);
+void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev);
+/*
+ * mlx4_get_slave_default_vlan -
+ * return true if VST ( default vlan)
+ * if VST, will return vlan & qos (if not NULL)
+ */
+bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
+ u16 *vlan, u8 *qos);
+
+#define MLX4_COMM_GET_IF_REV(cmd_chan_ver) (u8)((cmd_chan_ver) >> 8)
+#define COMM_CHAN_EVENT_INTERNAL_ERR (1 << 17)
+
+#endif /* MLX4_CMD_H */
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
new file mode 100644
index 000000000..e7ecc12a1
--- /dev/null
+++ b/include/linux/mlx4/cq.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_CQ_H
+#define MLX4_CQ_H
+
+#include <linux/types.h>
+#include <uapi/linux/if_ether.h>
+
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/doorbell.h>
+
+struct mlx4_cqe {
+ __be32 vlan_my_qpn;
+ __be32 immed_rss_invalid;
+ __be32 g_mlpath_rqpn;
+ __be16 sl_vid;
+ union {
+ struct {
+ __be16 rlid;
+ __be16 status;
+ u8 ipv6_ext_mask;
+ u8 badfcs_enc;
+ };
+ u8 smac[ETH_ALEN];
+ };
+ __be32 byte_cnt;
+ __be16 wqe_index;
+ __be16 checksum;
+ u8 reserved[3];
+ u8 owner_sr_opcode;
+};
+
+struct mlx4_err_cqe {
+ __be32 my_qpn;
+ u32 reserved1[5];
+ __be16 wqe_index;
+ u8 vendor_err_syndrome;
+ u8 syndrome;
+ u8 reserved2[3];
+ u8 owner_sr_opcode;
+};
+
+struct mlx4_ts_cqe {
+ __be32 vlan_my_qpn;
+ __be32 immed_rss_invalid;
+ __be32 g_mlpath_rqpn;
+ __be32 timestamp_hi;
+ __be16 status;
+ u8 ipv6_ext_mask;
+ u8 badfcs_enc;
+ __be32 byte_cnt;
+ __be16 wqe_index;
+ __be16 checksum;
+ u8 reserved;
+ __be16 timestamp_lo;
+ u8 owner_sr_opcode;
+} __packed;
+
+enum {
+ MLX4_CQE_L2_TUNNEL_IPOK = 1 << 31,
+ MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29,
+ MLX4_CQE_L2_TUNNEL = 1 << 27,
+ MLX4_CQE_L2_TUNNEL_CSUM = 1 << 26,
+ MLX4_CQE_L2_TUNNEL_IPV4 = 1 << 25,
+
+ MLX4_CQE_QPN_MASK = 0xffffff,
+ MLX4_CQE_VID_MASK = 0xfff,
+};
+
+enum {
+ MLX4_CQE_OWNER_MASK = 0x80,
+ MLX4_CQE_IS_SEND_MASK = 0x40,
+ MLX4_CQE_OPCODE_MASK = 0x1f
+};
+
+enum {
+ MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01,
+ MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02,
+ MLX4_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04,
+ MLX4_CQE_SYNDROME_WR_FLUSH_ERR = 0x05,
+ MLX4_CQE_SYNDROME_MW_BIND_ERR = 0x06,
+ MLX4_CQE_SYNDROME_BAD_RESP_ERR = 0x10,
+ MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11,
+ MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12,
+ MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13,
+ MLX4_CQE_SYNDROME_REMOTE_OP_ERR = 0x14,
+ MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15,
+ MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16,
+ MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22,
+};
+
+enum {
+ MLX4_CQE_STATUS_IPV4 = 1 << 6,
+ MLX4_CQE_STATUS_IPV4F = 1 << 7,
+ MLX4_CQE_STATUS_IPV6 = 1 << 8,
+ MLX4_CQE_STATUS_IPV4OPT = 1 << 9,
+ MLX4_CQE_STATUS_TCP = 1 << 10,
+ MLX4_CQE_STATUS_UDP = 1 << 11,
+ MLX4_CQE_STATUS_IPOK = 1 << 12,
+};
+
+enum {
+ MLX4_CQE_LLC = 1,
+ MLX4_CQE_SNAP = 1 << 1,
+ MLX4_CQE_BAD_FCS = 1 << 4,
+};
+
+static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd,
+ void __iomem *uar_page,
+ spinlock_t *doorbell_lock)
+{
+ __be32 doorbell[2];
+ u32 sn;
+ u32 ci;
+
+ sn = cq->arm_sn & 3;
+ ci = cq->cons_index & 0xffffff;
+
+ *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
+
+ /*
+ * Make sure that the doorbell record in host memory is
+ * written before ringing the doorbell via PCI MMIO.
+ */
+ wmb();
+
+ doorbell[0] = cpu_to_be32(sn << 28 | cmd | cq->cqn);
+ doorbell[1] = cpu_to_be32(ci);
+
+ mlx4_write64(doorbell, uar_page + MLX4_CQ_DOORBELL, doorbell_lock);
+}
+
+static inline void mlx4_cq_set_ci(struct mlx4_cq *cq)
+{
+ *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff);
+}
+
+enum {
+ MLX4_CQ_DB_REQ_NOT_SOL = 1 << 24,
+ MLX4_CQ_DB_REQ_NOT = 2 << 24
+};
+
+int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
+ u16 count, u16 period);
+int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
+ int entries, struct mlx4_mtt *mtt);
+
+#endif /* MLX4_CQ_H */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
new file mode 100644
index 000000000..83e80ab94
--- /dev/null
+++ b/include/linux/mlx4/device.h
@@ -0,0 +1,1488 @@
+/*
+ * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_DEVICE_H
+#define MLX4_DEVICE_H
+
+#include <linux/if_ether.h>
+#include <linux/pci.h>
+#include <linux/completion.h>
+#include <linux/radix-tree.h>
+#include <linux/cpu_rmap.h>
+#include <linux/crash_dump.h>
+
+#include <linux/atomic.h>
+
+#include <linux/timecounter.h>
+
+#define MAX_MSIX_P_PORT 17
+#define MAX_MSIX 64
+#define MSIX_LEGACY_SZ 4
+#define MIN_MSIX_P_PORT 5
+
+#define MLX4_MAX_100M_UNITS_VAL 255 /*
+ * work around: can't set values
+ * greater then this value when
+ * using 100 Mbps units.
+ */
+#define MLX4_RATELIMIT_100M_UNITS 3 /* 100 Mbps */
+#define MLX4_RATELIMIT_1G_UNITS 4 /* 1 Gbps */
+#define MLX4_RATELIMIT_DEFAULT 0x00ff
+
+#define MLX4_ROCE_MAX_GIDS 128
+#define MLX4_ROCE_PF_GIDS 16
+
+enum {
+ MLX4_FLAG_MSI_X = 1 << 0,
+ MLX4_FLAG_OLD_PORT_CMDS = 1 << 1,
+ MLX4_FLAG_MASTER = 1 << 2,
+ MLX4_FLAG_SLAVE = 1 << 3,
+ MLX4_FLAG_SRIOV = 1 << 4,
+ MLX4_FLAG_OLD_REG_MAC = 1 << 6,
+ MLX4_FLAG_BONDED = 1 << 7
+};
+
+enum {
+ MLX4_PORT_CAP_IS_SM = 1 << 1,
+ MLX4_PORT_CAP_DEV_MGMT_SUP = 1 << 19,
+};
+
+enum {
+ MLX4_MAX_PORTS = 2,
+ MLX4_MAX_PORT_PKEYS = 128
+};
+
+/* base qkey for use in sriov tunnel-qp/proxy-qp communication.
+ * These qkeys must not be allowed for general use. This is a 64k range,
+ * and to test for violation, we use the mask (protect against future chg).
+ */
+#define MLX4_RESERVED_QKEY_BASE (0xFFFF0000)
+#define MLX4_RESERVED_QKEY_MASK (0xFFFF0000)
+
+enum {
+ MLX4_BOARD_ID_LEN = 64
+};
+
+enum {
+ MLX4_MAX_NUM_PF = 16,
+ MLX4_MAX_NUM_VF = 126,
+ MLX4_MAX_NUM_VF_P_PORT = 64,
+ MLX4_MFUNC_MAX = 128,
+ MLX4_MAX_EQ_NUM = 1024,
+ MLX4_MFUNC_EQ_NUM = 4,
+ MLX4_MFUNC_MAX_EQES = 8,
+ MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1)
+};
+
+/* Driver supports 3 diffrent device methods to manage traffic steering:
+ * -device managed - High level API for ib and eth flow steering. FW is
+ * managing flow steering tables.
+ * - B0 steering mode - Common low level API for ib and (if supported) eth.
+ * - A0 steering mode - Limited low level API for eth. In case of IB,
+ * B0 mode is in use.
+ */
+enum {
+ MLX4_STEERING_MODE_A0,
+ MLX4_STEERING_MODE_B0,
+ MLX4_STEERING_MODE_DEVICE_MANAGED
+};
+
+enum {
+ MLX4_STEERING_DMFS_A0_DEFAULT,
+ MLX4_STEERING_DMFS_A0_DYNAMIC,
+ MLX4_STEERING_DMFS_A0_STATIC,
+ MLX4_STEERING_DMFS_A0_DISABLE,
+ MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
+};
+
+static inline const char *mlx4_steering_mode_str(int steering_mode)
+{
+ switch (steering_mode) {
+ case MLX4_STEERING_MODE_A0:
+ return "A0 steering";
+
+ case MLX4_STEERING_MODE_B0:
+ return "B0 steering";
+
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ return "Device managed flow steering";
+
+ default:
+ return "Unrecognize steering mode";
+ }
+}
+
+enum {
+ MLX4_TUNNEL_OFFLOAD_MODE_NONE,
+ MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
+};
+
+enum {
+ MLX4_DEV_CAP_FLAG_RC = 1LL << 0,
+ MLX4_DEV_CAP_FLAG_UC = 1LL << 1,
+ MLX4_DEV_CAP_FLAG_UD = 1LL << 2,
+ MLX4_DEV_CAP_FLAG_XRC = 1LL << 3,
+ MLX4_DEV_CAP_FLAG_SRQ = 1LL << 6,
+ MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1LL << 7,
+ MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
+ MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
+ MLX4_DEV_CAP_FLAG_DPDP = 1LL << 12,
+ MLX4_DEV_CAP_FLAG_BLH = 1LL << 15,
+ MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1LL << 16,
+ MLX4_DEV_CAP_FLAG_APM = 1LL << 17,
+ MLX4_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
+ MLX4_DEV_CAP_FLAG_RAW_MCAST = 1LL << 19,
+ MLX4_DEV_CAP_FLAG_UD_AV_PORT = 1LL << 20,
+ MLX4_DEV_CAP_FLAG_UD_MCAST = 1LL << 21,
+ MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30,
+ MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32,
+ MLX4_DEV_CAP_FLAG_FCS_KEEP = 1LL << 34,
+ MLX4_DEV_CAP_FLAG_WOL_PORT1 = 1LL << 37,
+ MLX4_DEV_CAP_FLAG_WOL_PORT2 = 1LL << 38,
+ MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40,
+ MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41,
+ MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42,
+ MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48,
+ MLX4_DEV_CAP_FLAG_RSS_IP_FRAG = 1LL << 52,
+ MLX4_DEV_CAP_FLAG_SET_ETH_SCHED = 1LL << 53,
+ MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55,
+ MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
+ MLX4_DEV_CAP_FLAG_64B_EQE = 1LL << 61,
+ MLX4_DEV_CAP_FLAG_64B_CQE = 1LL << 62
+};
+
+enum {
+ MLX4_DEV_CAP_FLAG2_RSS = 1LL << 0,
+ MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1,
+ MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2,
+ MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3,
+ MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN = 1LL << 4,
+ MLX4_DEV_CAP_FLAG2_TS = 1LL << 5,
+ MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 6,
+ MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7,
+ MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 8,
+ MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 9,
+ MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10,
+ MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11,
+ MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12,
+ MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13,
+ MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 14,
+ MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15,
+ MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16,
+ MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17,
+ MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18,
+ MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19,
+ MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20,
+ MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21,
+ MLX4_DEV_CAP_FLAG2_QCN = 1LL << 22,
+ MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT = 1LL << 23,
+ MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN = 1LL << 24,
+ MLX4_DEV_CAP_FLAG2_QOS_VPP = 1LL << 25,
+ MLX4_DEV_CAP_FLAG2_ETS_CFG = 1LL << 26,
+ MLX4_DEV_CAP_FLAG2_PORT_BEACON = 1LL << 27,
+ MLX4_DEV_CAP_FLAG2_IGNORE_FCS = 1LL << 28,
+};
+
+enum {
+ MLX4_QUERY_FUNC_FLAGS_BF_RES_QP = 1LL << 0,
+ MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1
+};
+
+enum {
+ MLX4_VF_CAP_FLAG_RESET = 1 << 0
+};
+
+/* bit enums for an 8-bit flags field indicating special use
+ * QPs which require special handling in qp_reserve_range.
+ * Currently, this only includes QPs used by the ETH interface,
+ * where we expect to use blueflame. These QPs must not have
+ * bits 6 and 7 set in their qp number.
+ *
+ * This enum may use only bits 0..7.
+ */
+enum {
+ MLX4_RESERVE_A0_QP = 1 << 6,
+ MLX4_RESERVE_ETH_BF_QP = 1 << 7,
+};
+
+enum {
+ MLX4_DEV_CAP_64B_EQE_ENABLED = 1LL << 0,
+ MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1,
+ MLX4_DEV_CAP_CQE_STRIDE_ENABLED = 1LL << 2,
+ MLX4_DEV_CAP_EQE_STRIDE_ENABLED = 1LL << 3
+};
+
+enum {
+ MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0
+};
+
+enum {
+ MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0,
+ MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1,
+ MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2
+};
+
+
+#define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
+
+enum {
+ MLX4_BMME_FLAG_WIN_TYPE_2B = 1 << 1,
+ MLX4_BMME_FLAG_LOCAL_INV = 1 << 6,
+ MLX4_BMME_FLAG_REMOTE_INV = 1 << 7,
+ MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9,
+ MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10,
+ MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11,
+ MLX4_BMME_FLAG_PORT_REMAP = 1 << 24,
+ MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28,
+};
+
+enum {
+ MLX4_FLAG_PORT_REMAP = MLX4_BMME_FLAG_PORT_REMAP
+};
+
+enum mlx4_event {
+ MLX4_EVENT_TYPE_COMP = 0x00,
+ MLX4_EVENT_TYPE_PATH_MIG = 0x01,
+ MLX4_EVENT_TYPE_COMM_EST = 0x02,
+ MLX4_EVENT_TYPE_SQ_DRAINED = 0x03,
+ MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13,
+ MLX4_EVENT_TYPE_SRQ_LIMIT = 0x14,
+ MLX4_EVENT_TYPE_CQ_ERROR = 0x04,
+ MLX4_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
+ MLX4_EVENT_TYPE_EEC_CATAS_ERROR = 0x06,
+ MLX4_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
+ MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
+ MLX4_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
+ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
+ MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08,
+ MLX4_EVENT_TYPE_PORT_CHANGE = 0x09,
+ MLX4_EVENT_TYPE_EQ_OVERFLOW = 0x0f,
+ MLX4_EVENT_TYPE_ECC_DETECT = 0x0e,
+ MLX4_EVENT_TYPE_CMD = 0x0a,
+ MLX4_EVENT_TYPE_VEP_UPDATE = 0x19,
+ MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18,
+ MLX4_EVENT_TYPE_OP_REQUIRED = 0x1a,
+ MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b,
+ MLX4_EVENT_TYPE_FLR_EVENT = 0x1c,
+ MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
+ MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT = 0x3e,
+ MLX4_EVENT_TYPE_NONE = 0xff,
+};
+
+enum {
+ MLX4_PORT_CHANGE_SUBTYPE_DOWN = 1,
+ MLX4_PORT_CHANGE_SUBTYPE_ACTIVE = 4
+};
+
+enum {
+ MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE = 1,
+ MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE = 2,
+};
+
+enum {
+ MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0,
+};
+
+enum slave_port_state {
+ SLAVE_PORT_DOWN = 0,
+ SLAVE_PENDING_UP,
+ SLAVE_PORT_UP,
+};
+
+enum slave_port_gen_event {
+ SLAVE_PORT_GEN_EVENT_DOWN = 0,
+ SLAVE_PORT_GEN_EVENT_UP,
+ SLAVE_PORT_GEN_EVENT_NONE,
+};
+
+enum slave_port_state_event {
+ MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
+ MLX4_PORT_STATE_DEV_EVENT_PORT_UP,
+ MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID,
+ MLX4_PORT_STATE_IB_EVENT_GID_INVALID,
+};
+
+enum {
+ MLX4_PERM_LOCAL_READ = 1 << 10,
+ MLX4_PERM_LOCAL_WRITE = 1 << 11,
+ MLX4_PERM_REMOTE_READ = 1 << 12,
+ MLX4_PERM_REMOTE_WRITE = 1 << 13,
+ MLX4_PERM_ATOMIC = 1 << 14,
+ MLX4_PERM_BIND_MW = 1 << 15,
+ MLX4_PERM_MASK = 0xFC00
+};
+
+enum {
+ MLX4_OPCODE_NOP = 0x00,
+ MLX4_OPCODE_SEND_INVAL = 0x01,
+ MLX4_OPCODE_RDMA_WRITE = 0x08,
+ MLX4_OPCODE_RDMA_WRITE_IMM = 0x09,
+ MLX4_OPCODE_SEND = 0x0a,
+ MLX4_OPCODE_SEND_IMM = 0x0b,
+ MLX4_OPCODE_LSO = 0x0e,
+ MLX4_OPCODE_RDMA_READ = 0x10,
+ MLX4_OPCODE_ATOMIC_CS = 0x11,
+ MLX4_OPCODE_ATOMIC_FA = 0x12,
+ MLX4_OPCODE_MASKED_ATOMIC_CS = 0x14,
+ MLX4_OPCODE_MASKED_ATOMIC_FA = 0x15,
+ MLX4_OPCODE_BIND_MW = 0x18,
+ MLX4_OPCODE_FMR = 0x19,
+ MLX4_OPCODE_LOCAL_INVAL = 0x1b,
+ MLX4_OPCODE_CONFIG_CMD = 0x1f,
+
+ MLX4_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
+ MLX4_RECV_OPCODE_SEND = 0x01,
+ MLX4_RECV_OPCODE_SEND_IMM = 0x02,
+ MLX4_RECV_OPCODE_SEND_INVAL = 0x03,
+
+ MLX4_CQE_OPCODE_ERROR = 0x1e,
+ MLX4_CQE_OPCODE_RESIZE = 0x16,
+};
+
+enum {
+ MLX4_STAT_RATE_OFFSET = 5
+};
+
+enum mlx4_protocol {
+ MLX4_PROT_IB_IPV6 = 0,
+ MLX4_PROT_ETH,
+ MLX4_PROT_IB_IPV4,
+ MLX4_PROT_FCOE
+};
+
+enum {
+ MLX4_MTT_FLAG_PRESENT = 1
+};
+
+enum mlx4_qp_region {
+ MLX4_QP_REGION_FW = 0,
+ MLX4_QP_REGION_RSS_RAW_ETH,
+ MLX4_QP_REGION_BOTTOM = MLX4_QP_REGION_RSS_RAW_ETH,
+ MLX4_QP_REGION_ETH_ADDR,
+ MLX4_QP_REGION_FC_ADDR,
+ MLX4_QP_REGION_FC_EXCH,
+ MLX4_NUM_QP_REGION
+};
+
+enum mlx4_port_type {
+ MLX4_PORT_TYPE_NONE = 0,
+ MLX4_PORT_TYPE_IB = 1,
+ MLX4_PORT_TYPE_ETH = 2,
+ MLX4_PORT_TYPE_AUTO = 3
+};
+
+enum mlx4_special_vlan_idx {
+ MLX4_NO_VLAN_IDX = 0,
+ MLX4_VLAN_MISS_IDX,
+ MLX4_VLAN_REGULAR
+};
+
+enum mlx4_steer_type {
+ MLX4_MC_STEER = 0,
+ MLX4_UC_STEER,
+ MLX4_NUM_STEERS
+};
+
+enum {
+ MLX4_NUM_FEXCH = 64 * 1024,
+};
+
+enum {
+ MLX4_MAX_FAST_REG_PAGES = 511,
+};
+
+enum {
+ MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14,
+ MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15,
+ MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16,
+};
+
+/* Port mgmt change event handling */
+enum {
+ MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK = 1 << 0,
+ MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK = 1 << 1,
+ MLX4_EQ_PORT_INFO_LID_CHANGE_MASK = 1 << 2,
+ MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK = 1 << 3,
+ MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4,
+};
+
+enum {
+ MLX4_DEVICE_STATE_UP = 1 << 0,
+ MLX4_DEVICE_STATE_INTERNAL_ERROR = 1 << 1,
+};
+
+enum {
+ MLX4_INTERFACE_STATE_UP = 1 << 0,
+ MLX4_INTERFACE_STATE_DELETION = 1 << 1,
+};
+
+#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
+ MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
+
+enum mlx4_module_id {
+ MLX4_MODULE_ID_SFP = 0x3,
+ MLX4_MODULE_ID_QSFP = 0xC,
+ MLX4_MODULE_ID_QSFP_PLUS = 0xD,
+ MLX4_MODULE_ID_QSFP28 = 0x11,
+};
+
+enum { /* rl */
+ MLX4_QP_RATE_LIMIT_NONE = 0,
+ MLX4_QP_RATE_LIMIT_KBS = 1,
+ MLX4_QP_RATE_LIMIT_MBS = 2,
+ MLX4_QP_RATE_LIMIT_GBS = 3
+};
+
+struct mlx4_rate_limit_caps {
+ u16 num_rates; /* Number of different rates */
+ u8 min_unit;
+ u16 min_val;
+ u8 max_unit;
+ u16 max_val;
+};
+
+static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
+{
+ return (major << 32) | (minor << 16) | subminor;
+}
+
+struct mlx4_phys_caps {
+ u32 gid_phys_table_len[MLX4_MAX_PORTS + 1];
+ u32 pkey_phys_table_len[MLX4_MAX_PORTS + 1];
+ u32 num_phys_eqs;
+ u32 base_sqpn;
+ u32 base_proxy_sqpn;
+ u32 base_tunnel_sqpn;
+};
+
+struct mlx4_caps {
+ u64 fw_ver;
+ u32 function;
+ int num_ports;
+ int vl_cap[MLX4_MAX_PORTS + 1];
+ int ib_mtu_cap[MLX4_MAX_PORTS + 1];
+ __be32 ib_port_def_cap[MLX4_MAX_PORTS + 1];
+ u64 def_mac[MLX4_MAX_PORTS + 1];
+ int eth_mtu_cap[MLX4_MAX_PORTS + 1];
+ int gid_table_len[MLX4_MAX_PORTS + 1];
+ int pkey_table_len[MLX4_MAX_PORTS + 1];
+ int trans_type[MLX4_MAX_PORTS + 1];
+ int vendor_oui[MLX4_MAX_PORTS + 1];
+ int wavelength[MLX4_MAX_PORTS + 1];
+ u64 trans_code[MLX4_MAX_PORTS + 1];
+ int local_ca_ack_delay;
+ int num_uars;
+ u32 uar_page_size;
+ int bf_reg_size;
+ int bf_regs_per_page;
+ int max_sq_sg;
+ int max_rq_sg;
+ int num_qps;
+ int max_wqes;
+ int max_sq_desc_sz;
+ int max_rq_desc_sz;
+ int max_qp_init_rdma;
+ int max_qp_dest_rdma;
+ u32 *qp0_qkey;
+ u32 *qp0_proxy;
+ u32 *qp1_proxy;
+ u32 *qp0_tunnel;
+ u32 *qp1_tunnel;
+ int num_srqs;
+ int max_srq_wqes;
+ int max_srq_sge;
+ int reserved_srqs;
+ int num_cqs;
+ int max_cqes;
+ int reserved_cqs;
+ int num_sys_eqs;
+ int num_eqs;
+ int reserved_eqs;
+ int num_comp_vectors;
+ int comp_pool;
+ int num_mpts;
+ int max_fmr_maps;
+ int num_mtts;
+ int fmr_reserved_mtts;
+ int reserved_mtts;
+ int reserved_mrws;
+ int reserved_uars;
+ int num_mgms;
+ int num_amgms;
+ int reserved_mcgs;
+ int num_qp_per_mgm;
+ int steering_mode;
+ int dmfs_high_steer_mode;
+ int fs_log_max_ucast_qp_range_size;
+ int num_pds;
+ int reserved_pds;
+ int max_xrcds;
+ int reserved_xrcds;
+ int mtt_entry_sz;
+ u32 max_msg_sz;
+ u32 page_size_cap;
+ u64 flags;
+ u64 flags2;
+ u32 bmme_flags;
+ u32 reserved_lkey;
+ u16 stat_rate_support;
+ u8 port_width_cap[MLX4_MAX_PORTS + 1];
+ int max_gso_sz;
+ int max_rss_tbl_sz;
+ int reserved_qps_cnt[MLX4_NUM_QP_REGION];
+ int reserved_qps;
+ int reserved_qps_base[MLX4_NUM_QP_REGION];
+ int log_num_macs;
+ int log_num_vlans;
+ enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
+ u8 supported_type[MLX4_MAX_PORTS + 1];
+ u8 suggested_type[MLX4_MAX_PORTS + 1];
+ u8 default_sense[MLX4_MAX_PORTS + 1];
+ u32 port_mask[MLX4_MAX_PORTS + 1];
+ enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1];
+ u32 max_counters;
+ u8 port_ib_mtu[MLX4_MAX_PORTS + 1];
+ u16 sqp_demux;
+ u32 eqe_size;
+ u32 cqe_size;
+ u8 eqe_factor;
+ u32 userspace_caps; /* userspace must be aware of these */
+ u32 function_caps; /* VFs must be aware of these */
+ u16 hca_core_clock;
+ u64 phys_port_id[MLX4_MAX_PORTS + 1];
+ int tunnel_offload_mode;
+ u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
+ u8 alloc_res_qp_mask;
+ u32 dmfs_high_rate_qpn_base;
+ u32 dmfs_high_rate_qpn_range;
+ u32 vf_caps;
+ struct mlx4_rate_limit_caps rl_caps;
+};
+
+struct mlx4_buf_list {
+ void *buf;
+ dma_addr_t map;
+};
+
+struct mlx4_buf {
+ struct mlx4_buf_list direct;
+ struct mlx4_buf_list *page_list;
+ int nbufs;
+ int npages;
+ int page_shift;
+};
+
+struct mlx4_mtt {
+ u32 offset;
+ int order;
+ int page_shift;
+};
+
+enum {
+ MLX4_DB_PER_PAGE = PAGE_SIZE / 4
+};
+
+struct mlx4_db_pgdir {
+ struct list_head list;
+ DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE);
+ DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2);
+ unsigned long *bits[2];
+ __be32 *db_page;
+ dma_addr_t db_dma;
+};
+
+struct mlx4_ib_user_db_page;
+
+struct mlx4_db {
+ __be32 *db;
+ union {
+ struct mlx4_db_pgdir *pgdir;
+ struct mlx4_ib_user_db_page *user_page;
+ } u;
+ dma_addr_t dma;
+ int index;
+ int order;
+};
+
+struct mlx4_hwq_resources {
+ struct mlx4_db db;
+ struct mlx4_mtt mtt;
+ struct mlx4_buf buf;
+};
+
+struct mlx4_mr {
+ struct mlx4_mtt mtt;
+ u64 iova;
+ u64 size;
+ u32 key;
+ u32 pd;
+ u32 access;
+ int enabled;
+};
+
+enum mlx4_mw_type {
+ MLX4_MW_TYPE_1 = 1,
+ MLX4_MW_TYPE_2 = 2,
+};
+
+struct mlx4_mw {
+ u32 key;
+ u32 pd;
+ enum mlx4_mw_type type;
+ int enabled;
+};
+
+struct mlx4_fmr {
+ struct mlx4_mr mr;
+ struct mlx4_mpt_entry *mpt;
+ __be64 *mtts;
+ dma_addr_t dma_handle;
+ int max_pages;
+ int max_maps;
+ int maps;
+ u8 page_shift;
+};
+
+struct mlx4_uar {
+ unsigned long pfn;
+ int index;
+ struct list_head bf_list;
+ unsigned free_bf_bmap;
+ void __iomem *map;
+ void __iomem *bf_map;
+};
+
+struct mlx4_bf {
+ unsigned int offset;
+ int buf_size;
+ struct mlx4_uar *uar;
+ void __iomem *reg;
+};
+
+struct mlx4_cq {
+ void (*comp) (struct mlx4_cq *);
+ void (*event) (struct mlx4_cq *, enum mlx4_event);
+
+ struct mlx4_uar *uar;
+
+ u32 cons_index;
+
+ u16 irq;
+ __be32 *set_ci_db;
+ __be32 *arm_db;
+ int arm_sn;
+
+ int cqn;
+ unsigned vector;
+
+ atomic_t refcount;
+ struct completion free;
+ struct {
+ struct list_head list;
+ void (*comp)(struct mlx4_cq *);
+ void *priv;
+ } tasklet_ctx;
+ int reset_notify_added;
+ struct list_head reset_notify;
+};
+
+struct mlx4_qp {
+ void (*event) (struct mlx4_qp *, enum mlx4_event);
+
+ int qpn;
+
+ atomic_t refcount;
+ struct completion free;
+};
+
+struct mlx4_srq {
+ void (*event) (struct mlx4_srq *, enum mlx4_event);
+
+ int srqn;
+ int max;
+ int max_gs;
+ int wqe_shift;
+
+ atomic_t refcount;
+ struct completion free;
+};
+
+struct mlx4_av {
+ __be32 port_pd;
+ u8 reserved1;
+ u8 g_slid;
+ __be16 dlid;
+ u8 reserved2;
+ u8 gid_index;
+ u8 stat_rate;
+ u8 hop_limit;
+ __be32 sl_tclass_flowlabel;
+ u8 dgid[16];
+};
+
+struct mlx4_eth_av {
+ __be32 port_pd;
+ u8 reserved1;
+ u8 smac_idx;
+ u16 reserved2;
+ u8 reserved3;
+ u8 gid_index;
+ u8 stat_rate;
+ u8 hop_limit;
+ __be32 sl_tclass_flowlabel;
+ u8 dgid[16];
+ u8 s_mac[6];
+ u8 reserved4[2];
+ __be16 vlan;
+ u8 mac[ETH_ALEN];
+};
+
+union mlx4_ext_av {
+ struct mlx4_av ib;
+ struct mlx4_eth_av eth;
+};
+
+struct mlx4_counter {
+ u8 reserved1[3];
+ u8 counter_mode;
+ __be32 num_ifc;
+ u32 reserved2[2];
+ __be64 rx_frames;
+ __be64 rx_bytes;
+ __be64 tx_frames;
+ __be64 tx_bytes;
+};
+
+struct mlx4_quotas {
+ int qp;
+ int cq;
+ int srq;
+ int mpt;
+ int mtt;
+ int counter;
+ int xrcd;
+};
+
+struct mlx4_vf_dev {
+ u8 min_port;
+ u8 n_ports;
+};
+
+struct mlx4_dev_persistent {
+ struct pci_dev *pdev;
+ struct mlx4_dev *dev;
+ int nvfs[MLX4_MAX_PORTS + 1];
+ int num_vfs;
+ enum mlx4_port_type curr_port_type[MLX4_MAX_PORTS + 1];
+ enum mlx4_port_type curr_port_poss_type[MLX4_MAX_PORTS + 1];
+ struct work_struct catas_work;
+ struct workqueue_struct *catas_wq;
+ struct mutex device_state_mutex; /* protect HW state */
+ u8 state;
+ struct mutex interface_state_mutex; /* protect SW state */
+ u8 interface_state;
+};
+
+struct mlx4_dev {
+ struct mlx4_dev_persistent *persist;
+ unsigned long flags;
+ unsigned long num_slaves;
+ struct mlx4_caps caps;
+ struct mlx4_phys_caps phys_caps;
+ struct mlx4_quotas quotas;
+ struct radix_tree_root qp_table_tree;
+ u8 rev_id;
+ char board_id[MLX4_BOARD_ID_LEN];
+ int numa_node;
+ int oper_log_mgm_entry_size;
+ u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
+ u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
+ struct mlx4_vf_dev *dev_vfs;
+};
+
+struct mlx4_eqe {
+ u8 reserved1;
+ u8 type;
+ u8 reserved2;
+ u8 subtype;
+ union {
+ u32 raw[6];
+ struct {
+ __be32 cqn;
+ } __packed comp;
+ struct {
+ u16 reserved1;
+ __be16 token;
+ u32 reserved2;
+ u8 reserved3[3];
+ u8 status;
+ __be64 out_param;
+ } __packed cmd;
+ struct {
+ __be32 qpn;
+ } __packed qp;
+ struct {
+ __be32 srqn;
+ } __packed srq;
+ struct {
+ __be32 cqn;
+ u32 reserved1;
+ u8 reserved2[3];
+ u8 syndrome;
+ } __packed cq_err;
+ struct {
+ u32 reserved1[2];
+ __be32 port;
+ } __packed port_change;
+ struct {
+ #define COMM_CHANNEL_BIT_ARRAY_SIZE 4
+ u32 reserved;
+ u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
+ } __packed comm_channel_arm;
+ struct {
+ u8 port;
+ u8 reserved[3];
+ __be64 mac;
+ } __packed mac_update;
+ struct {
+ __be32 slave_id;
+ } __packed flr_event;
+ struct {
+ __be16 current_temperature;
+ __be16 warning_threshold;
+ } __packed warming;
+ struct {
+ u8 reserved[3];
+ u8 port;
+ union {
+ struct {
+ __be16 mstr_sm_lid;
+ __be16 port_lid;
+ __be32 changed_attr;
+ u8 reserved[3];
+ u8 mstr_sm_sl;
+ __be64 gid_prefix;
+ } __packed port_info;
+ struct {
+ __be32 block_ptr;
+ __be32 tbl_entries_mask;
+ } __packed tbl_change_info;
+ } params;
+ } __packed port_mgmt_change;
+ struct {
+ u8 reserved[3];
+ u8 port;
+ u32 reserved1[5];
+ } __packed bad_cable;
+ } event;
+ u8 slave_id;
+ u8 reserved3[2];
+ u8 owner;
+} __packed;
+
+struct mlx4_init_port_param {
+ int set_guid0;
+ int set_node_guid;
+ int set_si_guid;
+ u16 mtu;
+ int port_width_cap;
+ u16 vl_cap;
+ u16 max_gid;
+ u16 max_pkey;
+ u64 guid0;
+ u64 node_guid;
+ u64 si_guid;
+};
+
+#define MAD_IFC_DATA_SZ 192
+/* MAD IFC Mailbox */
+struct mlx4_mad_ifc {
+ u8 base_version;
+ u8 mgmt_class;
+ u8 class_version;
+ u8 method;
+ __be16 status;
+ __be16 class_specific;
+ __be64 tid;
+ __be16 attr_id;
+ __be16 resv;
+ __be32 attr_mod;
+ __be64 mkey;
+ __be16 dr_slid;
+ __be16 dr_dlid;
+ u8 reserved[28];
+ u8 data[MAD_IFC_DATA_SZ];
+} __packed;
+
+#define mlx4_foreach_port(port, dev, type) \
+ for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
+ if ((type) == (dev)->caps.port_mask[(port)])
+
+#define mlx4_foreach_non_ib_transport_port(port, dev) \
+ for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
+ if (((dev)->caps.port_mask[port] != MLX4_PORT_TYPE_IB))
+
+#define mlx4_foreach_ib_transport_port(port, dev) \
+ for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
+ if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
+ ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
+
+#define MLX4_INVALID_SLAVE_ID 0xFF
+
+void handle_port_mgmt_change_event(struct work_struct *work);
+
+static inline int mlx4_master_func_num(struct mlx4_dev *dev)
+{
+ return dev->caps.function;
+}
+
+static inline int mlx4_is_master(struct mlx4_dev *dev)
+{
+ return dev->flags & MLX4_FLAG_MASTER;
+}
+
+static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev)
+{
+ return dev->phys_caps.base_sqpn + 8 +
+ 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev);
+}
+
+static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
+{
+ return (qpn < dev->phys_caps.base_sqpn + 8 +
+ 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev) &&
+ qpn >= dev->phys_caps.base_sqpn) ||
+ (qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]);
+}
+
+static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn)
+{
+ int guest_proxy_base = dev->phys_caps.base_proxy_sqpn + slave * 8;
+
+ if (qpn >= guest_proxy_base && qpn < guest_proxy_base + 8)
+ return 1;
+
+ return 0;
+}
+
+static inline int mlx4_is_mfunc(struct mlx4_dev *dev)
+{
+ return dev->flags & (MLX4_FLAG_SLAVE | MLX4_FLAG_MASTER);
+}
+
+static inline int mlx4_is_slave(struct mlx4_dev *dev)
+{
+ return dev->flags & MLX4_FLAG_SLAVE;
+}
+
+static inline int mlx4_is_eth(struct mlx4_dev *dev, int port)
+{
+ return dev->caps.port_type[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
+}
+
+int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
+ struct mlx4_buf *buf, gfp_t gfp);
+void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
+static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
+{
+ if (BITS_PER_LONG == 64 || buf->nbufs == 1)
+ return buf->direct.buf + offset;
+ else
+ return buf->page_list[offset >> PAGE_SHIFT].buf +
+ (offset & (PAGE_SIZE - 1));
+}
+
+int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
+void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
+int mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
+void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
+
+int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar);
+void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar);
+int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node);
+void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf);
+
+int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
+ struct mlx4_mtt *mtt);
+void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
+u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt);
+
+int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
+ int npages, int page_shift, struct mlx4_mr *mr);
+int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr);
+int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr);
+int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
+ struct mlx4_mw *mw);
+void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw);
+int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw);
+int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int start_index, int npages, u64 *page_list);
+int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ struct mlx4_buf *buf, gfp_t gfp);
+
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order,
+ gfp_t gfp);
+void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
+
+int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
+ int size, int max_direct);
+void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
+ int size);
+
+int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
+ struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
+ unsigned vector, int collapsed, int timestamp_en);
+void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+ int *base, u8 flags);
+void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
+
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp,
+ gfp_t gfp);
+void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
+
+int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn,
+ struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq);
+void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq);
+int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark);
+int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark);
+
+int mlx4_INIT_PORT(struct mlx4_dev *dev, int port);
+int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port);
+
+int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ int block_mcast_loopback, enum mlx4_protocol prot);
+int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ enum mlx4_protocol prot);
+int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ u8 port, int block_mcast_loopback,
+ enum mlx4_protocol protocol, u64 *reg_id);
+int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
+ enum mlx4_protocol protocol, u64 reg_id);
+
+enum {
+ MLX4_DOMAIN_UVERBS = 0x1000,
+ MLX4_DOMAIN_ETHTOOL = 0x2000,
+ MLX4_DOMAIN_RFS = 0x3000,
+ MLX4_DOMAIN_NIC = 0x5000,
+};
+
+enum mlx4_net_trans_rule_id {
+ MLX4_NET_TRANS_RULE_ID_ETH = 0,
+ MLX4_NET_TRANS_RULE_ID_IB,
+ MLX4_NET_TRANS_RULE_ID_IPV6,
+ MLX4_NET_TRANS_RULE_ID_IPV4,
+ MLX4_NET_TRANS_RULE_ID_TCP,
+ MLX4_NET_TRANS_RULE_ID_UDP,
+ MLX4_NET_TRANS_RULE_ID_VXLAN,
+ MLX4_NET_TRANS_RULE_NUM, /* should be last */
+};
+
+extern const u16 __sw_id_hw[];
+
+static inline int map_hw_to_sw_id(u16 header_id)
+{
+
+ int i;
+ for (i = 0; i < MLX4_NET_TRANS_RULE_NUM; i++) {
+ if (header_id == __sw_id_hw[i])
+ return i;
+ }
+ return -EINVAL;
+}
+
+enum mlx4_net_trans_promisc_mode {
+ MLX4_FS_REGULAR = 1,
+ MLX4_FS_ALL_DEFAULT,
+ MLX4_FS_MC_DEFAULT,
+ MLX4_FS_UC_SNIFFER,
+ MLX4_FS_MC_SNIFFER,
+ MLX4_FS_MODE_NUM, /* should be last */
+};
+
+struct mlx4_spec_eth {
+ u8 dst_mac[ETH_ALEN];
+ u8 dst_mac_msk[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
+ u8 src_mac_msk[ETH_ALEN];
+ u8 ether_type_enable;
+ __be16 ether_type;
+ __be16 vlan_id_msk;
+ __be16 vlan_id;
+};
+
+struct mlx4_spec_tcp_udp {
+ __be16 dst_port;
+ __be16 dst_port_msk;
+ __be16 src_port;
+ __be16 src_port_msk;
+};
+
+struct mlx4_spec_ipv4 {
+ __be32 dst_ip;
+ __be32 dst_ip_msk;
+ __be32 src_ip;
+ __be32 src_ip_msk;
+};
+
+struct mlx4_spec_ib {
+ __be32 l3_qpn;
+ __be32 qpn_msk;
+ u8 dst_gid[16];
+ u8 dst_gid_msk[16];
+};
+
+struct mlx4_spec_vxlan {
+ __be32 vni;
+ __be32 vni_mask;
+
+};
+
+struct mlx4_spec_list {
+ struct list_head list;
+ enum mlx4_net_trans_rule_id id;
+ union {
+ struct mlx4_spec_eth eth;
+ struct mlx4_spec_ib ib;
+ struct mlx4_spec_ipv4 ipv4;
+ struct mlx4_spec_tcp_udp tcp_udp;
+ struct mlx4_spec_vxlan vxlan;
+ };
+};
+
+enum mlx4_net_trans_hw_rule_queue {
+ MLX4_NET_TRANS_Q_FIFO,
+ MLX4_NET_TRANS_Q_LIFO,
+};
+
+struct mlx4_net_trans_rule {
+ struct list_head list;
+ enum mlx4_net_trans_hw_rule_queue queue_mode;
+ bool exclusive;
+ bool allow_loopback;
+ enum mlx4_net_trans_promisc_mode promisc_mode;
+ u8 port;
+ u16 priority;
+ u32 qpn;
+};
+
+struct mlx4_net_trans_rule_hw_ctrl {
+ __be16 prio;
+ u8 type;
+ u8 flags;
+ u8 rsvd1;
+ u8 funcid;
+ u8 vep;
+ u8 port;
+ __be32 qpn;
+ __be32 rsvd2;
+};
+
+struct mlx4_net_trans_rule_hw_ib {
+ u8 size;
+ u8 rsvd1;
+ __be16 id;
+ u32 rsvd2;
+ __be32 l3_qpn;
+ __be32 qpn_mask;
+ u8 dst_gid[16];
+ u8 dst_gid_msk[16];
+} __packed;
+
+struct mlx4_net_trans_rule_hw_eth {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ u8 rsvd1[6];
+ u8 dst_mac[6];
+ u16 rsvd2;
+ u8 dst_mac_msk[6];
+ u16 rsvd3;
+ u8 src_mac[6];
+ u16 rsvd4;
+ u8 src_mac_msk[6];
+ u8 rsvd5;
+ u8 ether_type_enable;
+ __be16 ether_type;
+ __be16 vlan_tag_msk;
+ __be16 vlan_tag;
+} __packed;
+
+struct mlx4_net_trans_rule_hw_tcp_udp {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ __be16 rsvd1[3];
+ __be16 dst_port;
+ __be16 rsvd2;
+ __be16 dst_port_msk;
+ __be16 rsvd3;
+ __be16 src_port;
+ __be16 rsvd4;
+ __be16 src_port_msk;
+} __packed;
+
+struct mlx4_net_trans_rule_hw_ipv4 {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ __be32 rsvd1;
+ __be32 dst_ip;
+ __be32 dst_ip_msk;
+ __be32 src_ip;
+ __be32 src_ip_msk;
+} __packed;
+
+struct mlx4_net_trans_rule_hw_vxlan {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ __be32 rsvd1;
+ __be32 vni;
+ __be32 vni_mask;
+} __packed;
+
+struct _rule_hw {
+ union {
+ struct {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ };
+ struct mlx4_net_trans_rule_hw_eth eth;
+ struct mlx4_net_trans_rule_hw_ib ib;
+ struct mlx4_net_trans_rule_hw_ipv4 ipv4;
+ struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
+ struct mlx4_net_trans_rule_hw_vxlan vxlan;
+ };
+};
+
+enum {
+ VXLAN_STEER_BY_OUTER_MAC = 1 << 0,
+ VXLAN_STEER_BY_OUTER_VLAN = 1 << 1,
+ VXLAN_STEER_BY_VSID_VNI = 1 << 2,
+ VXLAN_STEER_BY_INNER_MAC = 1 << 3,
+ VXLAN_STEER_BY_INNER_VLAN = 1 << 4,
+};
+
+
+int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
+ enum mlx4_net_trans_promisc_mode mode);
+int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
+ enum mlx4_net_trans_promisc_mode mode);
+int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
+int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
+int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
+int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
+
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
+int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
+int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+ u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
+int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+ u8 promisc);
+int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
+int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
+ u8 ignore_fcs_value);
+int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
+int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
+int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
+int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
+
+int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
+ int npages, u64 iova, u32 *lkey, u32 *rkey);
+int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
+ int max_maps, u8 page_shift, struct mlx4_fmr *fmr);
+int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
+void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
+ u32 *lkey, u32 *rkey);
+int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
+int mlx4_SYNC_TPT(struct mlx4_dev *dev);
+int mlx4_test_interrupts(struct mlx4_dev *dev);
+int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
+ int *vector);
+void mlx4_release_eq(struct mlx4_dev *dev, int vec);
+
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
+
+int mlx4_get_phys_port_id(struct mlx4_dev *dev);
+int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
+int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
+
+int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
+void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
+
+void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry,
+ int port);
+__be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port);
+void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port);
+int mlx4_flow_attach(struct mlx4_dev *dev,
+ struct mlx4_net_trans_rule *rule, u64 *reg_id);
+int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
+int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
+ enum mlx4_net_trans_promisc_mode flow_type);
+int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
+ enum mlx4_net_trans_rule_id id);
+int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
+
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+ int port, int qpn, u16 prio, u64 *reg_id);
+
+void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
+ int i, int val);
+
+int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey);
+
+int mlx4_is_slave_active(struct mlx4_dev *dev, int slave);
+int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port);
+int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port);
+int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr);
+int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, u8 port_subtype_change);
+enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port);
+int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, int event, enum slave_port_gen_event *gen_event);
+
+void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid);
+__be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave);
+
+int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
+ int *slave_id);
+int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
+ u8 *gid);
+
+int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
+ u32 max_range_qpn);
+
+cycle_t mlx4_read_clock(struct mlx4_dev *dev);
+
+struct mlx4_active_ports {
+ DECLARE_BITMAP(ports, MLX4_MAX_PORTS);
+};
+/* Returns a bitmap of the physical ports which are assigned to slave */
+struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave);
+
+/* Returns the physical port that represents the virtual port of the slave, */
+/* or a value < 0 in case of an error. If a slave has 2 ports, the identity */
+/* mapping is returned. */
+int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port);
+
+struct mlx4_slaves_pport {
+ DECLARE_BITMAP(slaves, MLX4_MFUNC_MAX);
+};
+/* Returns a bitmap of all slaves that are assigned to port. */
+struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
+ int port);
+
+/* Returns a bitmap of all slaves that are assigned exactly to all the */
+/* the ports that are set in crit_ports. */
+struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
+ struct mlx4_dev *dev,
+ const struct mlx4_active_ports *crit_ports);
+
+/* Returns the slave's virtual port that represents the physical port. */
+int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port);
+
+int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port);
+
+int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port);
+int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis);
+int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2);
+int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port);
+int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
+int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
+ int enable);
+int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
+ struct mlx4_mpt_entry ***mpt_entry);
+int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
+ struct mlx4_mpt_entry **mpt_entry);
+int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry,
+ u32 pdn);
+int mlx4_mr_hw_change_access(struct mlx4_dev *dev,
+ struct mlx4_mpt_entry *mpt_entry,
+ u32 access);
+void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev,
+ struct mlx4_mpt_entry **mpt_entry);
+void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr);
+int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
+ u64 iova, u64 size, int npages,
+ int page_shift, struct mlx4_mpt_entry *mpt_entry);
+
+int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
+ u16 offset, u16 size, u8 *data);
+
+/* Returns true if running in low memory profile (kdump kernel) */
+static inline bool mlx4_low_memory_profile(void)
+{
+ return is_kdump_kernel();
+}
+
+/* ACCESS REG commands */
+enum mlx4_access_reg_method {
+ MLX4_ACCESS_REG_QUERY = 0x1,
+ MLX4_ACCESS_REG_WRITE = 0x2,
+};
+
+/* ACCESS PTYS Reg command */
+enum mlx4_ptys_proto {
+ MLX4_PTYS_IB = 1<<0,
+ MLX4_PTYS_EN = 1<<2,
+};
+
+struct mlx4_ptys_reg {
+ u8 resrvd1;
+ u8 local_port;
+ u8 resrvd2;
+ u8 proto_mask;
+ __be32 resrvd3[2];
+ __be32 eth_proto_cap;
+ __be16 ib_width_cap;
+ __be16 ib_speed_cap;
+ __be32 resrvd4;
+ __be32 eth_proto_admin;
+ __be16 ib_width_admin;
+ __be16 ib_speed_admin;
+ __be32 resrvd5;
+ __be32 eth_proto_oper;
+ __be16 ib_width_oper;
+ __be16 ib_speed_oper;
+ __be32 resrvd6;
+ __be32 eth_proto_lp_adv;
+} __packed;
+
+int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
+ enum mlx4_access_reg_method method,
+ struct mlx4_ptys_reg *ptys_reg);
+
+#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx4/doorbell.h b/include/linux/mlx4/doorbell.h
new file mode 100644
index 000000000..f31bba270
--- /dev/null
+++ b/include/linux/mlx4/doorbell.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
+ * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_DOORBELL_H
+#define MLX4_DOORBELL_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+#define MLX4_SEND_DOORBELL 0x14
+#define MLX4_CQ_DOORBELL 0x20
+
+#if BITS_PER_LONG == 64
+/*
+ * Assume that we can just write a 64-bit doorbell atomically. s390
+ * actually doesn't have writeq() but S/390 systems don't even have
+ * PCI so we won't worry about it.
+ */
+
+#define MLX4_DECLARE_DOORBELL_LOCK(name)
+#define MLX4_INIT_DOORBELL_LOCK(ptr) do { } while (0)
+#define MLX4_GET_DOORBELL_LOCK(ptr) (NULL)
+
+static inline void mlx4_write64(__be32 val[2], void __iomem *dest,
+ spinlock_t *doorbell_lock)
+{
+ __raw_writeq(*(u64 *) val, dest);
+}
+
+#else
+
+/*
+ * Just fall back to a spinlock to protect the doorbell if
+ * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit
+ * MMIO writes.
+ */
+
+#define MLX4_DECLARE_DOORBELL_LOCK(name) spinlock_t name;
+#define MLX4_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr)
+#define MLX4_GET_DOORBELL_LOCK(ptr) (ptr)
+
+static inline void mlx4_write64(__be32 val[2], void __iomem *dest,
+ spinlock_t *doorbell_lock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(doorbell_lock, flags);
+ __raw_writel((__force u32) val[0], dest);
+ __raw_writel((__force u32) val[1], dest + 4);
+ spin_unlock_irqrestore(doorbell_lock, flags);
+}
+
+#endif
+
+#endif /* MLX4_DOORBELL_H */
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
new file mode 100644
index 000000000..9553a73d2
--- /dev/null
+++ b/include/linux/mlx4/driver.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2006 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_DRIVER_H
+#define MLX4_DRIVER_H
+
+#include <linux/mlx4/device.h>
+
+struct mlx4_dev;
+
+#define MLX4_MAC_MASK 0xffffffffffffULL
+
+enum mlx4_dev_event {
+ MLX4_DEV_EVENT_CATASTROPHIC_ERROR,
+ MLX4_DEV_EVENT_PORT_UP,
+ MLX4_DEV_EVENT_PORT_DOWN,
+ MLX4_DEV_EVENT_PORT_REINIT,
+ MLX4_DEV_EVENT_PORT_MGMT_CHANGE,
+ MLX4_DEV_EVENT_SLAVE_INIT,
+ MLX4_DEV_EVENT_SLAVE_SHUTDOWN,
+};
+
+enum {
+ MLX4_INTFF_BONDING = 1 << 0
+};
+
+struct mlx4_interface {
+ void * (*add) (struct mlx4_dev *dev);
+ void (*remove)(struct mlx4_dev *dev, void *context);
+ void (*event) (struct mlx4_dev *dev, void *context,
+ enum mlx4_dev_event event, unsigned long param);
+ void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
+ struct list_head list;
+ enum mlx4_protocol protocol;
+ int flags;
+};
+
+int mlx4_register_interface(struct mlx4_interface *intf);
+void mlx4_unregister_interface(struct mlx4_interface *intf);
+
+int mlx4_bond(struct mlx4_dev *dev);
+int mlx4_unbond(struct mlx4_dev *dev);
+static inline int mlx4_is_bonded(struct mlx4_dev *dev)
+{
+ return !!(dev->flags & MLX4_FLAG_BONDED);
+}
+
+struct mlx4_port_map {
+ u8 port1;
+ u8 port2;
+};
+
+int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p);
+
+void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
+
+static inline u64 mlx4_mac_to_u64(u8 *addr)
+{
+ u64 mac = 0;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac <<= 8;
+ mac |= addr[i];
+ }
+ return mac;
+}
+
+#endif /* MLX4_DRIVER_H */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
new file mode 100644
index 000000000..6fed539e5
--- /dev/null
+++ b/include/linux/mlx4/qp.h
@@ -0,0 +1,475 @@
+/*
+ * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_QP_H
+#define MLX4_QP_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#include <linux/mlx4/device.h>
+
+#define MLX4_INVALID_LKEY 0x100
+
+enum mlx4_qp_optpar {
+ MLX4_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
+ MLX4_QP_OPTPAR_RRE = 1 << 1,
+ MLX4_QP_OPTPAR_RAE = 1 << 2,
+ MLX4_QP_OPTPAR_RWE = 1 << 3,
+ MLX4_QP_OPTPAR_PKEY_INDEX = 1 << 4,
+ MLX4_QP_OPTPAR_Q_KEY = 1 << 5,
+ MLX4_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
+ MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
+ MLX4_QP_OPTPAR_SRA_MAX = 1 << 8,
+ MLX4_QP_OPTPAR_RRA_MAX = 1 << 9,
+ MLX4_QP_OPTPAR_PM_STATE = 1 << 10,
+ MLX4_QP_OPTPAR_RETRY_COUNT = 1 << 12,
+ MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13,
+ MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
+ MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16,
+ MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20,
+ MLX4_QP_OPTPAR_VLAN_STRIPPING = 1 << 21,
+};
+
+enum mlx4_qp_state {
+ MLX4_QP_STATE_RST = 0,
+ MLX4_QP_STATE_INIT = 1,
+ MLX4_QP_STATE_RTR = 2,
+ MLX4_QP_STATE_RTS = 3,
+ MLX4_QP_STATE_SQER = 4,
+ MLX4_QP_STATE_SQD = 5,
+ MLX4_QP_STATE_ERR = 6,
+ MLX4_QP_STATE_SQ_DRAINING = 7,
+ MLX4_QP_NUM_STATE
+};
+
+enum {
+ MLX4_QP_ST_RC = 0x0,
+ MLX4_QP_ST_UC = 0x1,
+ MLX4_QP_ST_RD = 0x2,
+ MLX4_QP_ST_UD = 0x3,
+ MLX4_QP_ST_XRC = 0x6,
+ MLX4_QP_ST_MLX = 0x7
+};
+
+enum {
+ MLX4_QP_PM_MIGRATED = 0x3,
+ MLX4_QP_PM_ARMED = 0x0,
+ MLX4_QP_PM_REARM = 0x1
+};
+
+enum {
+ /* params1 */
+ MLX4_QP_BIT_SRE = 1 << 15,
+ MLX4_QP_BIT_SWE = 1 << 14,
+ MLX4_QP_BIT_SAE = 1 << 13,
+ /* params2 */
+ MLX4_QP_BIT_RRE = 1 << 15,
+ MLX4_QP_BIT_RWE = 1 << 14,
+ MLX4_QP_BIT_RAE = 1 << 13,
+ MLX4_QP_BIT_FPP = 1 << 3,
+ MLX4_QP_BIT_RIC = 1 << 4,
+};
+
+enum {
+ MLX4_RSS_HASH_XOR = 0,
+ MLX4_RSS_HASH_TOP = 1,
+
+ MLX4_RSS_UDP_IPV6 = 1 << 0,
+ MLX4_RSS_UDP_IPV4 = 1 << 1,
+ MLX4_RSS_TCP_IPV6 = 1 << 2,
+ MLX4_RSS_IPV6 = 1 << 3,
+ MLX4_RSS_TCP_IPV4 = 1 << 4,
+ MLX4_RSS_IPV4 = 1 << 5,
+
+ MLX4_RSS_BY_OUTER_HEADERS = 0 << 6,
+ MLX4_RSS_BY_INNER_HEADERS = 2 << 6,
+ MLX4_RSS_BY_INNER_HEADERS_IPONLY = 3 << 6,
+
+ /* offset of mlx4_rss_context within mlx4_qp_context.pri_path */
+ MLX4_RSS_OFFSET_IN_QPC_PRI_PATH = 0x24,
+ /* offset of being RSS indirection QP within mlx4_qp_context.flags */
+ MLX4_RSS_QPC_FLAG_OFFSET = 13,
+};
+
+#define MLX4_EN_RSS_KEY_SIZE 40
+
+struct mlx4_rss_context {
+ __be32 base_qpn;
+ __be32 default_qpn;
+ u16 reserved;
+ u8 hash_fn;
+ u8 flags;
+ __be32 rss_key[MLX4_EN_RSS_KEY_SIZE / sizeof(__be32)];
+ __be32 base_qpn_udp;
+};
+
+struct mlx4_qp_path {
+ u8 fl;
+ u8 vlan_control;
+ u8 disable_pkey_check;
+ u8 pkey_index;
+ u8 counter_index;
+ u8 grh_mylmc;
+ __be16 rlid;
+ u8 ackto;
+ u8 mgid_index;
+ u8 static_rate;
+ u8 hop_limit;
+ __be32 tclass_flowlabel;
+ u8 rgid[16];
+ u8 sched_queue;
+ u8 vlan_index;
+ u8 feup;
+ u8 fvl_rx;
+ u8 reserved4[2];
+ u8 dmac[ETH_ALEN];
+};
+
+enum { /* fl */
+ MLX4_FL_CV = 1 << 6,
+ MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2
+};
+enum { /* vlan_control */
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6,
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED = 1 << 5, /* 802.1p priority tag */
+ MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED = 1 << 4,
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED = 1 << 2,
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED = 1 << 1, /* 802.1p priority tag */
+ MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED = 1 << 0
+};
+
+enum { /* feup */
+ MLX4_FEUP_FORCE_ETH_UP = 1 << 6, /* force Eth UP */
+ MLX4_FSM_FORCE_ETH_SRC_MAC = 1 << 5, /* force Source MAC */
+ MLX4_FVL_FORCE_ETH_VLAN = 1 << 3 /* force Eth vlan */
+};
+
+enum { /* fvl_rx */
+ MLX4_FVL_RX_FORCE_ETH_VLAN = 1 << 0 /* enforce Eth rx vlan */
+};
+
+struct mlx4_qp_context {
+ __be32 flags;
+ __be32 pd;
+ u8 mtu_msgmax;
+ u8 rq_size_stride;
+ u8 sq_size_stride;
+ u8 rlkey;
+ __be32 usr_page;
+ __be32 local_qpn;
+ __be32 remote_qpn;
+ struct mlx4_qp_path pri_path;
+ struct mlx4_qp_path alt_path;
+ __be32 params1;
+ u32 reserved1;
+ __be32 next_send_psn;
+ __be32 cqn_send;
+ u32 reserved2[2];
+ __be32 last_acked_psn;
+ __be32 ssn;
+ __be32 params2;
+ __be32 rnr_nextrecvpsn;
+ __be32 xrcd;
+ __be32 cqn_recv;
+ __be64 db_rec_addr;
+ __be32 qkey;
+ __be32 srqn;
+ __be32 msn;
+ __be16 rq_wqe_counter;
+ __be16 sq_wqe_counter;
+ u32 reserved3;
+ __be16 rate_limit_params;
+ u8 reserved4;
+ u8 qos_vport;
+ __be32 param3;
+ __be32 nummmcpeers_basemkey;
+ u8 log_page_size;
+ u8 reserved5[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ u32 reserved6[10];
+};
+
+struct mlx4_update_qp_context {
+ __be64 qp_mask;
+ __be64 primary_addr_path_mask;
+ __be64 secondary_addr_path_mask;
+ u64 reserved1;
+ struct mlx4_qp_context qp_context;
+ u64 reserved2[58];
+};
+
+enum {
+ MLX4_UPD_QP_MASK_PM_STATE = 32,
+ MLX4_UPD_QP_MASK_VSD = 33,
+ MLX4_UPD_QP_MASK_QOS_VPP = 34,
+ MLX4_UPD_QP_MASK_RATE_LIMIT = 35,
+};
+
+enum {
+ MLX4_UPD_QP_PATH_MASK_PKEY_INDEX = 0 + 32,
+ MLX4_UPD_QP_PATH_MASK_FSM = 1 + 32,
+ MLX4_UPD_QP_PATH_MASK_MAC_INDEX = 2 + 32,
+ MLX4_UPD_QP_PATH_MASK_FVL = 3 + 32,
+ MLX4_UPD_QP_PATH_MASK_CV = 4 + 32,
+ MLX4_UPD_QP_PATH_MASK_VLAN_INDEX = 5 + 32,
+ MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN = 6 + 32,
+ MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED = 7 + 32,
+ MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P = 8 + 32,
+ MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED = 9 + 32,
+ MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED = 10 + 32,
+ MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P = 11 + 32,
+ MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED = 12 + 32,
+ MLX4_UPD_QP_PATH_MASK_FEUP = 13 + 32,
+ MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE = 14 + 32,
+ MLX4_UPD_QP_PATH_MASK_IF_COUNTER_INDEX = 15 + 32,
+ MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32,
+};
+
+enum { /* param3 */
+ MLX4_STRIP_VLAN = 1 << 30
+};
+
+/* Which firmware version adds support for NEC (NoErrorCompletion) bit */
+#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
+
+enum {
+ MLX4_WQE_CTRL_NEC = 1 << 29,
+ MLX4_WQE_CTRL_IIP = 1 << 28,
+ MLX4_WQE_CTRL_ILP = 1 << 27,
+ MLX4_WQE_CTRL_FENCE = 1 << 6,
+ MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2,
+ MLX4_WQE_CTRL_SOLICITED = 1 << 1,
+ MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
+ MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
+ MLX4_WQE_CTRL_INS_VLAN = 1 << 6,
+ MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7,
+ MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0,
+};
+
+struct mlx4_wqe_ctrl_seg {
+ __be32 owner_opcode;
+ union {
+ struct {
+ __be16 vlan_tag;
+ u8 ins_vlan;
+ u8 fence_size;
+ };
+ __be32 bf_qpn;
+ };
+ /*
+ * High 24 bits are SRC remote buffer; low 8 bits are flags:
+ * [7] SO (strong ordering)
+ * [5] TCP/UDP checksum
+ * [4] IP checksum
+ * [3:2] C (generate completion queue entry)
+ * [1] SE (solicited event)
+ * [0] FL (force loopback)
+ */
+ union {
+ __be32 srcrb_flags;
+ __be16 srcrb_flags16[2];
+ };
+ /*
+ * imm is immediate data for send/RDMA write w/ immediate;
+ * also invalidation key for send with invalidate; input
+ * modifier for WQEs on CCQs.
+ */
+ __be32 imm;
+};
+
+enum {
+ MLX4_WQE_MLX_VL15 = 1 << 17,
+ MLX4_WQE_MLX_SLR = 1 << 16
+};
+
+struct mlx4_wqe_mlx_seg {
+ u8 owner;
+ u8 reserved1[2];
+ u8 opcode;
+ __be16 sched_prio;
+ u8 reserved2;
+ u8 size;
+ /*
+ * [17] VL15
+ * [16] SLR
+ * [15:12] static rate
+ * [11:8] SL
+ * [4] ICRC
+ * [3:2] C
+ * [0] FL (force loopback)
+ */
+ __be32 flags;
+ __be16 rlid;
+ u16 reserved3;
+};
+
+struct mlx4_wqe_datagram_seg {
+ __be32 av[8];
+ __be32 dqpn;
+ __be32 qkey;
+ __be16 vlan;
+ u8 mac[ETH_ALEN];
+};
+
+struct mlx4_wqe_lso_seg {
+ __be32 mss_hdr_size;
+ __be32 header[0];
+};
+
+enum mlx4_wqe_bind_seg_flags2 {
+ MLX4_WQE_BIND_ZERO_BASED = (1 << 30),
+ MLX4_WQE_BIND_TYPE_2 = (1 << 31),
+};
+
+struct mlx4_wqe_bind_seg {
+ __be32 flags1;
+ __be32 flags2;
+ __be32 new_rkey;
+ __be32 lkey;
+ __be64 addr;
+ __be64 length;
+};
+
+enum {
+ MLX4_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
+ MLX4_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
+ MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ = 1 << 29,
+ MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE = 1 << 30,
+ MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC = 1 << 31
+};
+
+struct mlx4_wqe_fmr_seg {
+ __be32 flags;
+ __be32 mem_key;
+ __be64 buf_list;
+ __be64 start_addr;
+ __be64 reg_len;
+ __be32 offset;
+ __be32 page_size;
+ u32 reserved[2];
+};
+
+struct mlx4_wqe_fmr_ext_seg {
+ u8 flags;
+ u8 reserved;
+ __be16 app_mask;
+ __be16 wire_app_tag;
+ __be16 mem_app_tag;
+ __be32 wire_ref_tag_base;
+ __be32 mem_ref_tag_base;
+};
+
+struct mlx4_wqe_local_inval_seg {
+ u64 reserved1;
+ __be32 mem_key;
+ u32 reserved2;
+ u64 reserved3[2];
+};
+
+struct mlx4_wqe_raddr_seg {
+ __be64 raddr;
+ __be32 rkey;
+ u32 reserved;
+};
+
+struct mlx4_wqe_atomic_seg {
+ __be64 swap_add;
+ __be64 compare;
+};
+
+struct mlx4_wqe_masked_atomic_seg {
+ __be64 swap_add;
+ __be64 compare;
+ __be64 swap_add_mask;
+ __be64 compare_mask;
+};
+
+struct mlx4_wqe_data_seg {
+ __be32 byte_count;
+ __be32 lkey;
+ __be64 addr;
+};
+
+enum {
+ MLX4_INLINE_ALIGN = 64,
+ MLX4_INLINE_SEG = 1 << 31,
+};
+
+struct mlx4_wqe_inline_seg {
+ __be32 byte_count;
+};
+
+enum mlx4_update_qp_attr {
+ MLX4_UPDATE_QP_SMAC = 1 << 0,
+ MLX4_UPDATE_QP_VSD = 1 << 1,
+ MLX4_UPDATE_QP_RATE_LIMIT = 1 << 2,
+ MLX4_UPDATE_QP_QOS_VPORT = 1 << 3,
+ MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 4) - 1
+};
+
+enum mlx4_update_qp_params_flags {
+ MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE = 1 << 0,
+};
+
+struct mlx4_update_qp_params {
+ u8 smac_index;
+ u8 qos_vport;
+ u32 flags;
+ u16 rate_unit;
+ u16 rate_val;
+};
+
+int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
+ enum mlx4_update_qp_attr attr,
+ struct mlx4_update_qp_params *params);
+int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+ struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
+ int sqd_event, struct mlx4_qp *qp);
+
+int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ struct mlx4_qp_context *context);
+
+int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ struct mlx4_qp_context *context,
+ struct mlx4_qp *qp, enum mlx4_qp_state *qp_state);
+
+static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
+{
+ return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1));
+}
+
+void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp);
+
+#endif /* MLX4_QP_H */
diff --git a/include/linux/mlx4/srq.h b/include/linux/mlx4/srq.h
new file mode 100644
index 000000000..192e0f778
--- /dev/null
+++ b/include/linux/mlx4/srq.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX4_SRQ_H
+#define MLX4_SRQ_H
+
+struct mlx4_wqe_srq_next_seg {
+ u16 reserved1;
+ __be16 next_wqe_index;
+ u32 reserved2[3];
+};
+
+struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn);
+
+#endif /* MLX4_SRQ_H */
diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h
new file mode 100644
index 000000000..68cd08f02
--- /dev/null
+++ b/include/linux/mlx5/cmd.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_CMD_H
+#define MLX5_CMD_H
+
+#include <linux/types.h>
+
+struct manage_pages_layout {
+ u64 ptr;
+ u32 reserved;
+ u16 num_entries;
+ u16 func_id;
+};
+
+
+struct mlx5_cmd_alloc_uar_imm_out {
+ u32 rsvd[3];
+ u32 uarn;
+};
+
+#endif /* MLX5_CMD_H */
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
new file mode 100644
index 000000000..2695ced22
--- /dev/null
+++ b/include/linux/mlx5/cq.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_CORE_CQ_H
+#define MLX5_CORE_CQ_H
+
+#include <rdma/ib_verbs.h>
+#include <linux/mlx5/driver.h>
+
+
+struct mlx5_core_cq {
+ u32 cqn;
+ int cqe_sz;
+ __be32 *set_ci_db;
+ __be32 *arm_db;
+ atomic_t refcount;
+ struct completion free;
+ unsigned vector;
+ int irqn;
+ void (*comp) (struct mlx5_core_cq *);
+ void (*event) (struct mlx5_core_cq *, enum mlx5_event);
+ struct mlx5_uar *uar;
+ u32 cons_index;
+ unsigned arm_sn;
+ struct mlx5_rsc_debug *dbg;
+ int pid;
+};
+
+
+enum {
+ MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01,
+ MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02,
+ MLX5_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04,
+ MLX5_CQE_SYNDROME_WR_FLUSH_ERR = 0x05,
+ MLX5_CQE_SYNDROME_MW_BIND_ERR = 0x06,
+ MLX5_CQE_SYNDROME_BAD_RESP_ERR = 0x10,
+ MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11,
+ MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12,
+ MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13,
+ MLX5_CQE_SYNDROME_REMOTE_OP_ERR = 0x14,
+ MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15,
+ MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16,
+ MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22,
+};
+
+enum {
+ MLX5_CQE_OWNER_MASK = 1,
+ MLX5_CQE_REQ = 0,
+ MLX5_CQE_RESP_WR_IMM = 1,
+ MLX5_CQE_RESP_SEND = 2,
+ MLX5_CQE_RESP_SEND_IMM = 3,
+ MLX5_CQE_RESP_SEND_INV = 4,
+ MLX5_CQE_RESIZE_CQ = 5,
+ MLX5_CQE_SIG_ERR = 12,
+ MLX5_CQE_REQ_ERR = 13,
+ MLX5_CQE_RESP_ERR = 14,
+ MLX5_CQE_INVALID = 15,
+};
+
+enum {
+ MLX5_CQ_MODIFY_PERIOD = 1 << 0,
+ MLX5_CQ_MODIFY_COUNT = 1 << 1,
+ MLX5_CQ_MODIFY_OVERRUN = 1 << 2,
+};
+
+enum {
+ MLX5_CQ_OPMOD_RESIZE = 1,
+ MLX5_MODIFY_CQ_MASK_LOG_SIZE = 1 << 0,
+ MLX5_MODIFY_CQ_MASK_PG_OFFSET = 1 << 1,
+ MLX5_MODIFY_CQ_MASK_PG_SIZE = 1 << 2,
+};
+
+struct mlx5_cq_modify_params {
+ int type;
+ union {
+ struct {
+ u32 page_offset;
+ u8 log_cq_size;
+ } resize;
+
+ struct {
+ } moder;
+
+ struct {
+ } mapping;
+ } params;
+};
+
+enum {
+ CQE_SIZE_64 = 0,
+ CQE_SIZE_128 = 1,
+};
+
+static inline int cqe_sz_to_mlx_sz(u8 size)
+{
+ return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
+}
+
+static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
+{
+ *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff);
+}
+
+enum {
+ MLX5_CQ_DB_REQ_NOT_SOL = 1 << 24,
+ MLX5_CQ_DB_REQ_NOT = 0 << 24
+};
+
+static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
+ void __iomem *uar_page,
+ spinlock_t *doorbell_lock,
+ u32 cons_index)
+{
+ __be32 doorbell[2];
+ u32 sn;
+ u32 ci;
+
+ sn = cq->arm_sn & 3;
+ ci = cons_index & 0xffffff;
+
+ *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
+
+ /* Make sure that the doorbell record in host memory is
+ * written before ringing the doorbell via PCI MMIO.
+ */
+ wmb();
+
+ doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
+ doorbell[1] = cpu_to_be32(cq->cqn);
+
+ mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock);
+}
+
+int mlx5_init_cq_table(struct mlx5_core_dev *dev);
+void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
+int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ struct mlx5_create_cq_mbox_in *in, int inlen);
+int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
+int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ struct mlx5_query_cq_mbox_out *out);
+int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ struct mlx5_modify_cq_mbox_in *in, int in_sz);
+int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
+void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
+
+#endif /* MLX5_CORE_CQ_H */
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
new file mode 100644
index 000000000..abf65c790
--- /dev/null
+++ b/include/linux/mlx5/device.h
@@ -0,0 +1,999 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_DEVICE_H
+#define MLX5_DEVICE_H
+
+#include <linux/types.h>
+#include <rdma/ib_verbs.h>
+
+#if defined(__LITTLE_ENDIAN)
+#define MLX5_SET_HOST_ENDIANNESS 0
+#elif defined(__BIG_ENDIAN)
+#define MLX5_SET_HOST_ENDIANNESS 0x80
+#else
+#error Host endianness not defined
+#endif
+
+/* helper macros */
+#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
+#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
+#define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld)))
+#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
+#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
+#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
+#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
+#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
+#define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
+
+#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
+#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
+#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
+#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
+
+/* insert a value to a struct */
+#define MLX5_SET(typ, p, fld, v) do { \
+ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
+ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
+ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
+ (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
+ << __mlx5_dw_bit_off(typ, fld))); \
+} while (0)
+
+#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
+__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
+__mlx5_mask(typ, fld))
+
+#define MLX5_GET_PR(typ, p, fld) ({ \
+ u32 ___t = MLX5_GET(typ, p, fld); \
+ pr_debug(#fld " = 0x%x\n", ___t); \
+ ___t; \
+})
+
+#define MLX5_SET64(typ, p, fld, v) do { \
+ BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
+ BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
+ *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
+} while (0)
+
+#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
+
+enum {
+ MLX5_MAX_COMMANDS = 32,
+ MLX5_CMD_DATA_BLOCK_SIZE = 512,
+ MLX5_PCI_CMD_XPORT = 7,
+ MLX5_MKEY_BSF_OCTO_SIZE = 4,
+ MLX5_MAX_PSVS = 4,
+};
+
+enum {
+ MLX5_EXTENDED_UD_AV = 0x80000000,
+};
+
+enum {
+ MLX5_CQ_STATE_ARMED = 9,
+ MLX5_CQ_STATE_ALWAYS_ARMED = 0xb,
+ MLX5_CQ_STATE_FIRED = 0xa,
+};
+
+enum {
+ MLX5_STAT_RATE_OFFSET = 5,
+};
+
+enum {
+ MLX5_INLINE_SEG = 0x80000000,
+};
+
+enum {
+ MLX5_MIN_PKEY_TABLE_SIZE = 128,
+ MLX5_MAX_LOG_PKEY_TABLE = 5,
+};
+
+enum {
+ MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
+};
+
+enum {
+ MLX5_PFAULT_SUBTYPE_WQE = 0,
+ MLX5_PFAULT_SUBTYPE_RDMA = 1,
+};
+
+enum {
+ MLX5_PERM_LOCAL_READ = 1 << 2,
+ MLX5_PERM_LOCAL_WRITE = 1 << 3,
+ MLX5_PERM_REMOTE_READ = 1 << 4,
+ MLX5_PERM_REMOTE_WRITE = 1 << 5,
+ MLX5_PERM_ATOMIC = 1 << 6,
+ MLX5_PERM_UMR_EN = 1 << 7,
+};
+
+enum {
+ MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0,
+ MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2,
+ MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3,
+ MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6,
+ MLX5_PCIE_CTRL_TPH_MASK = 3 << 4,
+};
+
+enum {
+ MLX5_ACCESS_MODE_PA = 0,
+ MLX5_ACCESS_MODE_MTT = 1,
+ MLX5_ACCESS_MODE_KLM = 2
+};
+
+enum {
+ MLX5_MKEY_REMOTE_INVAL = 1 << 24,
+ MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
+ MLX5_MKEY_BSF_EN = 1 << 30,
+ MLX5_MKEY_LEN64 = 1 << 31,
+};
+
+enum {
+ MLX5_EN_RD = (u64)1,
+ MLX5_EN_WR = (u64)2
+};
+
+enum {
+ MLX5_BF_REGS_PER_PAGE = 4,
+ MLX5_MAX_UAR_PAGES = 1 << 8,
+ MLX5_NON_FP_BF_REGS_PER_PAGE = 2,
+ MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE,
+};
+
+enum {
+ MLX5_MKEY_MASK_LEN = 1ull << 0,
+ MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1,
+ MLX5_MKEY_MASK_START_ADDR = 1ull << 6,
+ MLX5_MKEY_MASK_PD = 1ull << 7,
+ MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8,
+ MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9,
+ MLX5_MKEY_MASK_BSF_EN = 1ull << 12,
+ MLX5_MKEY_MASK_KEY = 1ull << 13,
+ MLX5_MKEY_MASK_QPN = 1ull << 14,
+ MLX5_MKEY_MASK_LR = 1ull << 17,
+ MLX5_MKEY_MASK_LW = 1ull << 18,
+ MLX5_MKEY_MASK_RR = 1ull << 19,
+ MLX5_MKEY_MASK_RW = 1ull << 20,
+ MLX5_MKEY_MASK_A = 1ull << 21,
+ MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
+ MLX5_MKEY_MASK_FREE = 1ull << 29,
+};
+
+enum {
+ MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4),
+
+ MLX5_UMR_CHECK_NOT_FREE = (1 << 5),
+ MLX5_UMR_CHECK_FREE = (2 << 5),
+
+ MLX5_UMR_INLINE = (1 << 7),
+};
+
+#define MLX5_UMR_MTT_ALIGNMENT 0x40
+#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
+#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
+
+enum mlx5_event {
+ MLX5_EVENT_TYPE_COMP = 0x0,
+
+ MLX5_EVENT_TYPE_PATH_MIG = 0x01,
+ MLX5_EVENT_TYPE_COMM_EST = 0x02,
+ MLX5_EVENT_TYPE_SQ_DRAINED = 0x03,
+ MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13,
+ MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14,
+
+ MLX5_EVENT_TYPE_CQ_ERROR = 0x04,
+ MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
+ MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
+ MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
+ MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
+ MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
+
+ MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
+ MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
+ MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
+ MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
+
+ MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
+ MLX5_EVENT_TYPE_STALL_EVENT = 0x1b,
+
+ MLX5_EVENT_TYPE_CMD = 0x0a,
+ MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb,
+
+ MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
+};
+
+enum {
+ MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1,
+ MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4,
+ MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5,
+ MLX5_PORT_CHANGE_SUBTYPE_LID = 6,
+ MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7,
+ MLX5_PORT_CHANGE_SUBTYPE_GUID = 8,
+ MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9,
+};
+
+enum {
+ MLX5_DEV_CAP_FLAG_XRC = 1LL << 3,
+ MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
+ MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
+ MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
+ MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
+ MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23,
+ MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24,
+ MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
+ MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
+ MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
+ MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
+ MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
+};
+
+enum {
+ MLX5_OPCODE_NOP = 0x00,
+ MLX5_OPCODE_SEND_INVAL = 0x01,
+ MLX5_OPCODE_RDMA_WRITE = 0x08,
+ MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
+ MLX5_OPCODE_SEND = 0x0a,
+ MLX5_OPCODE_SEND_IMM = 0x0b,
+ MLX5_OPCODE_RDMA_READ = 0x10,
+ MLX5_OPCODE_ATOMIC_CS = 0x11,
+ MLX5_OPCODE_ATOMIC_FA = 0x12,
+ MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14,
+ MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15,
+ MLX5_OPCODE_BIND_MW = 0x18,
+ MLX5_OPCODE_CONFIG_CMD = 0x1f,
+
+ MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
+ MLX5_RECV_OPCODE_SEND = 0x01,
+ MLX5_RECV_OPCODE_SEND_IMM = 0x02,
+ MLX5_RECV_OPCODE_SEND_INVAL = 0x03,
+
+ MLX5_CQE_OPCODE_ERROR = 0x1e,
+ MLX5_CQE_OPCODE_RESIZE = 0x16,
+
+ MLX5_OPCODE_SET_PSV = 0x20,
+ MLX5_OPCODE_GET_PSV = 0x21,
+ MLX5_OPCODE_CHECK_PSV = 0x22,
+ MLX5_OPCODE_RGET_PSV = 0x26,
+ MLX5_OPCODE_RCHECK_PSV = 0x27,
+
+ MLX5_OPCODE_UMR = 0x25,
+
+};
+
+enum {
+ MLX5_SET_PORT_RESET_QKEY = 0,
+ MLX5_SET_PORT_GUID0 = 16,
+ MLX5_SET_PORT_NODE_GUID = 17,
+ MLX5_SET_PORT_SYS_GUID = 18,
+ MLX5_SET_PORT_GID_TABLE = 19,
+ MLX5_SET_PORT_PKEY_TABLE = 20,
+};
+
+enum {
+ MLX5_MAX_PAGE_SHIFT = 31
+};
+
+enum {
+ MLX5_ADAPTER_PAGE_SHIFT = 12,
+ MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT,
+};
+
+enum {
+ MLX5_CAP_OFF_CMDIF_CSUM = 46,
+};
+
+enum {
+ HCA_CAP_OPMOD_GET_MAX = 0,
+ HCA_CAP_OPMOD_GET_CUR = 1,
+ HCA_CAP_OPMOD_GET_ODP_MAX = 4,
+ HCA_CAP_OPMOD_GET_ODP_CUR = 5
+};
+
+struct mlx5_inbox_hdr {
+ __be16 opcode;
+ u8 rsvd[4];
+ __be16 opmod;
+};
+
+struct mlx5_outbox_hdr {
+ u8 status;
+ u8 rsvd[3];
+ __be32 syndrome;
+};
+
+struct mlx5_cmd_query_adapter_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_cmd_query_adapter_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[24];
+ u8 intapin;
+ u8 rsvd1[13];
+ __be16 vsd_vendor_id;
+ u8 vsd[208];
+ u8 vsd_psid[16];
+};
+
+enum mlx5_odp_transport_cap_bits {
+ MLX5_ODP_SUPPORT_SEND = 1 << 31,
+ MLX5_ODP_SUPPORT_RECV = 1 << 30,
+ MLX5_ODP_SUPPORT_WRITE = 1 << 29,
+ MLX5_ODP_SUPPORT_READ = 1 << 28,
+};
+
+struct mlx5_odp_caps {
+ char reserved[0x10];
+ struct {
+ __be32 rc_odp_caps;
+ __be32 uc_odp_caps;
+ __be32 ud_odp_caps;
+ } per_transport_caps;
+ char reserved2[0xe4];
+};
+
+struct mlx5_cmd_init_hca_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd0[2];
+ __be16 profile;
+ u8 rsvd1[4];
+};
+
+struct mlx5_cmd_init_hca_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_cmd_teardown_hca_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd0[2];
+ __be16 profile;
+ u8 rsvd1[4];
+};
+
+struct mlx5_cmd_teardown_hca_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_cmd_layout {
+ u8 type;
+ u8 rsvd0[3];
+ __be32 inlen;
+ __be64 in_ptr;
+ __be32 in[4];
+ __be32 out[4];
+ __be64 out_ptr;
+ __be32 outlen;
+ u8 token;
+ u8 sig;
+ u8 rsvd1;
+ u8 status_own;
+};
+
+
+struct health_buffer {
+ __be32 assert_var[5];
+ __be32 rsvd0[3];
+ __be32 assert_exit_ptr;
+ __be32 assert_callra;
+ __be32 rsvd1[2];
+ __be32 fw_ver;
+ __be32 hw_id;
+ __be32 rsvd2;
+ u8 irisc_index;
+ u8 synd;
+ __be16 ext_sync;
+};
+
+struct mlx5_init_seg {
+ __be32 fw_rev;
+ __be32 cmdif_rev_fw_sub;
+ __be32 rsvd0[2];
+ __be32 cmdq_addr_h;
+ __be32 cmdq_addr_l_sz;
+ __be32 cmd_dbell;
+ __be32 rsvd1[121];
+ struct health_buffer health;
+ __be32 rsvd2[884];
+ __be32 health_counter;
+ __be32 rsvd3[1019];
+ __be64 ieee1588_clk;
+ __be32 ieee1588_clk_type;
+ __be32 clr_intx;
+};
+
+struct mlx5_eqe_comp {
+ __be32 reserved[6];
+ __be32 cqn;
+};
+
+struct mlx5_eqe_qp_srq {
+ __be32 reserved[6];
+ __be32 qp_srq_n;
+};
+
+struct mlx5_eqe_cq_err {
+ __be32 cqn;
+ u8 reserved1[7];
+ u8 syndrome;
+};
+
+struct mlx5_eqe_port_state {
+ u8 reserved0[8];
+ u8 port;
+};
+
+struct mlx5_eqe_gpio {
+ __be32 reserved0[2];
+ __be64 gpio_event;
+};
+
+struct mlx5_eqe_congestion {
+ u8 type;
+ u8 rsvd0;
+ u8 congestion_level;
+};
+
+struct mlx5_eqe_stall_vl {
+ u8 rsvd0[3];
+ u8 port_vl;
+};
+
+struct mlx5_eqe_cmd {
+ __be32 vector;
+ __be32 rsvd[6];
+};
+
+struct mlx5_eqe_page_req {
+ u8 rsvd0[2];
+ __be16 func_id;
+ __be32 num_pages;
+ __be32 rsvd1[5];
+};
+
+struct mlx5_eqe_page_fault {
+ __be32 bytes_committed;
+ union {
+ struct {
+ u16 reserved1;
+ __be16 wqe_index;
+ u16 reserved2;
+ __be16 packet_length;
+ u8 reserved3[12];
+ } __packed wqe;
+ struct {
+ __be32 r_key;
+ u16 reserved1;
+ __be16 packet_length;
+ __be32 rdma_op_len;
+ __be64 rdma_va;
+ } __packed rdma;
+ } __packed;
+ __be32 flags_qpn;
+} __packed;
+
+union ev_data {
+ __be32 raw[7];
+ struct mlx5_eqe_cmd cmd;
+ struct mlx5_eqe_comp comp;
+ struct mlx5_eqe_qp_srq qp_srq;
+ struct mlx5_eqe_cq_err cq_err;
+ struct mlx5_eqe_port_state port;
+ struct mlx5_eqe_gpio gpio;
+ struct mlx5_eqe_congestion cong;
+ struct mlx5_eqe_stall_vl stall_vl;
+ struct mlx5_eqe_page_req req_pages;
+ struct mlx5_eqe_page_fault page_fault;
+} __packed;
+
+struct mlx5_eqe {
+ u8 rsvd0;
+ u8 type;
+ u8 rsvd1;
+ u8 sub_type;
+ __be32 rsvd2[7];
+ union ev_data data;
+ __be16 rsvd3;
+ u8 signature;
+ u8 owner;
+} __packed;
+
+struct mlx5_cmd_prot_block {
+ u8 data[MLX5_CMD_DATA_BLOCK_SIZE];
+ u8 rsvd0[48];
+ __be64 next;
+ __be32 block_num;
+ u8 rsvd1;
+ u8 token;
+ u8 ctrl_sig;
+ u8 sig;
+};
+
+struct mlx5_err_cqe {
+ u8 rsvd0[32];
+ __be32 srqn;
+ u8 rsvd1[18];
+ u8 vendor_err_synd;
+ u8 syndrome;
+ __be32 s_wqe_opcode_qpn;
+ __be16 wqe_counter;
+ u8 signature;
+ u8 op_own;
+};
+
+struct mlx5_cqe64 {
+ u8 rsvd0[17];
+ u8 ml_path;
+ u8 rsvd20[4];
+ __be16 slid;
+ __be32 flags_rqpn;
+ u8 rsvd28[4];
+ __be32 srqn;
+ __be32 imm_inval_pkey;
+ u8 rsvd40[4];
+ __be32 byte_cnt;
+ __be64 timestamp;
+ __be32 sop_drop_qpn;
+ __be16 wqe_counter;
+ u8 signature;
+ u8 op_own;
+};
+
+struct mlx5_sig_err_cqe {
+ u8 rsvd0[16];
+ __be32 expected_trans_sig;
+ __be32 actual_trans_sig;
+ __be32 expected_reftag;
+ __be32 actual_reftag;
+ __be16 syndrome;
+ u8 rsvd22[2];
+ __be32 mkey;
+ __be64 err_offset;
+ u8 rsvd30[8];
+ __be32 qpn;
+ u8 rsvd38[2];
+ u8 signature;
+ u8 op_own;
+};
+
+struct mlx5_wqe_srq_next_seg {
+ u8 rsvd0[2];
+ __be16 next_wqe_index;
+ u8 signature;
+ u8 rsvd1[11];
+};
+
+union mlx5_ext_cqe {
+ struct ib_grh grh;
+ u8 inl[64];
+};
+
+struct mlx5_cqe128 {
+ union mlx5_ext_cqe inl_grh;
+ struct mlx5_cqe64 cqe64;
+};
+
+struct mlx5_srq_ctx {
+ u8 state_log_sz;
+ u8 rsvd0[3];
+ __be32 flags_xrcd;
+ __be32 pgoff_cqn;
+ u8 rsvd1[4];
+ u8 log_pg_sz;
+ u8 rsvd2[7];
+ __be32 pd;
+ __be16 lwm;
+ __be16 wqe_cnt;
+ u8 rsvd3[8];
+ __be64 db_record;
+};
+
+struct mlx5_create_srq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 input_srqn;
+ u8 rsvd0[4];
+ struct mlx5_srq_ctx ctx;
+ u8 rsvd1[208];
+ __be64 pas[0];
+};
+
+struct mlx5_create_srq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ __be32 srqn;
+ u8 rsvd[4];
+};
+
+struct mlx5_destroy_srq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 srqn;
+ u8 rsvd[4];
+};
+
+struct mlx5_destroy_srq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_query_srq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 srqn;
+ u8 rsvd0[4];
+};
+
+struct mlx5_query_srq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[8];
+ struct mlx5_srq_ctx ctx;
+ u8 rsvd1[32];
+ __be64 pas[0];
+};
+
+struct mlx5_arm_srq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 srqn;
+ __be16 rsvd;
+ __be16 lwm;
+};
+
+struct mlx5_arm_srq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_cq_context {
+ u8 status;
+ u8 cqe_sz_flags;
+ u8 st;
+ u8 rsvd3;
+ u8 rsvd4[6];
+ __be16 page_offset;
+ __be32 log_sz_usr_page;
+ __be16 cq_period;
+ __be16 cq_max_count;
+ __be16 rsvd20;
+ __be16 c_eqn;
+ u8 log_pg_sz;
+ u8 rsvd25[7];
+ __be32 last_notified_index;
+ __be32 solicit_producer_index;
+ __be32 consumer_counter;
+ __be32 producer_counter;
+ u8 rsvd48[8];
+ __be64 db_record_addr;
+};
+
+struct mlx5_create_cq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 input_cqn;
+ u8 rsvdx[4];
+ struct mlx5_cq_context ctx;
+ u8 rsvd6[192];
+ __be64 pas[0];
+};
+
+struct mlx5_create_cq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ __be32 cqn;
+ u8 rsvd0[4];
+};
+
+struct mlx5_destroy_cq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 cqn;
+ u8 rsvd0[4];
+};
+
+struct mlx5_destroy_cq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[8];
+};
+
+struct mlx5_query_cq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 cqn;
+ u8 rsvd0[4];
+};
+
+struct mlx5_query_cq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[8];
+ struct mlx5_cq_context ctx;
+ u8 rsvd6[16];
+ __be64 pas[0];
+};
+
+struct mlx5_modify_cq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 cqn;
+ __be32 field_select;
+ struct mlx5_cq_context ctx;
+ u8 rsvd[192];
+ __be64 pas[0];
+};
+
+struct mlx5_modify_cq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_enable_hca_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_enable_hca_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_disable_hca_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_disable_hca_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_eq_context {
+ u8 status;
+ u8 ec_oi;
+ u8 st;
+ u8 rsvd2[7];
+ __be16 page_pffset;
+ __be32 log_sz_usr_page;
+ u8 rsvd3[7];
+ u8 intr;
+ u8 log_page_size;
+ u8 rsvd4[15];
+ __be32 consumer_counter;
+ __be32 produser_counter;
+ u8 rsvd5[16];
+};
+
+struct mlx5_create_eq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd0[3];
+ u8 input_eqn;
+ u8 rsvd1[4];
+ struct mlx5_eq_context ctx;
+ u8 rsvd2[8];
+ __be64 events_mask;
+ u8 rsvd3[176];
+ __be64 pas[0];
+};
+
+struct mlx5_create_eq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[3];
+ u8 eq_number;
+ u8 rsvd1[4];
+};
+
+struct mlx5_destroy_eq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd0[3];
+ u8 eqn;
+ u8 rsvd1[4];
+};
+
+struct mlx5_destroy_eq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_map_eq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be64 mask;
+ u8 mu;
+ u8 rsvd0[2];
+ u8 eqn;
+ u8 rsvd1[24];
+};
+
+struct mlx5_map_eq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_query_eq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd0[3];
+ u8 eqn;
+ u8 rsvd1[4];
+};
+
+struct mlx5_query_eq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+ struct mlx5_eq_context ctx;
+};
+
+enum {
+ MLX5_MKEY_STATUS_FREE = 1 << 6,
+};
+
+struct mlx5_mkey_seg {
+ /* This is a two bit field occupying bits 31-30.
+ * bit 31 is always 0,
+ * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
+ */
+ u8 status;
+ u8 pcie_control;
+ u8 flags;
+ u8 version;
+ __be32 qpn_mkey7_0;
+ u8 rsvd1[4];
+ __be32 flags_pd;
+ __be64 start_addr;
+ __be64 len;
+ __be32 bsfs_octo_size;
+ u8 rsvd2[16];
+ __be32 xlt_oct_size;
+ u8 rsvd3[3];
+ u8 log2_page_size;
+ u8 rsvd4[4];
+};
+
+struct mlx5_query_special_ctxs_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_query_special_ctxs_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ __be32 dump_fill_mkey;
+ __be32 reserved_lkey;
+};
+
+struct mlx5_create_mkey_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 input_mkey_index;
+ __be32 flags;
+ struct mlx5_mkey_seg seg;
+ u8 rsvd1[16];
+ __be32 xlat_oct_act_size;
+ __be32 rsvd2;
+ u8 rsvd3[168];
+ __be64 pas[0];
+};
+
+struct mlx5_create_mkey_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ __be32 mkey;
+ u8 rsvd[4];
+};
+
+struct mlx5_destroy_mkey_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 mkey;
+ u8 rsvd[4];
+};
+
+struct mlx5_destroy_mkey_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_query_mkey_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 mkey;
+};
+
+struct mlx5_query_mkey_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ __be64 pas[0];
+};
+
+struct mlx5_modify_mkey_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 mkey;
+ __be64 pas[0];
+};
+
+struct mlx5_modify_mkey_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_dump_mkey_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+};
+
+struct mlx5_dump_mkey_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ __be32 mkey;
+};
+
+struct mlx5_mad_ifc_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be16 remote_lid;
+ u8 rsvd0;
+ u8 port;
+ u8 rsvd1[4];
+ u8 data[256];
+};
+
+struct mlx5_mad_ifc_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+ u8 data[256];
+};
+
+struct mlx5_access_reg_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd0[2];
+ __be16 register_id;
+ __be32 arg;
+ __be32 data[0];
+};
+
+struct mlx5_access_reg_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+ __be32 data[0];
+};
+
+#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
+
+enum {
+ MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
+};
+
+struct mlx5_allocate_psv_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 npsv_pd;
+ __be32 rsvd_psv0;
+};
+
+struct mlx5_allocate_psv_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+ __be32 psv_idx[4];
+};
+
+struct mlx5_destroy_psv_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 psv_number;
+ u8 rsvd[4];
+};
+
+struct mlx5_destroy_psv_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+#endif /* MLX5_DEVICE_H */
diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h
new file mode 100644
index 000000000..afc78a3f4
--- /dev/null
+++ b/include/linux/mlx5/doorbell.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_DOORBELL_H
+#define MLX5_DOORBELL_H
+
+#define MLX5_BF_OFFSET 0x800
+#define MLX5_CQ_DOORBELL 0x20
+
+#if BITS_PER_LONG == 64
+/* Assume that we can just write a 64-bit doorbell atomically. s390
+ * actually doesn't have writeq() but S/390 systems don't even have
+ * PCI so we won't worry about it.
+ */
+
+#define MLX5_DECLARE_DOORBELL_LOCK(name)
+#define MLX5_INIT_DOORBELL_LOCK(ptr) do { } while (0)
+#define MLX5_GET_DOORBELL_LOCK(ptr) (NULL)
+
+static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
+ spinlock_t *doorbell_lock)
+{
+ __raw_writeq(*(u64 *)val, dest);
+}
+
+#else
+
+/* Just fall back to a spinlock to protect the doorbell if
+ * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit
+ * MMIO writes.
+ */
+
+#define MLX5_DECLARE_DOORBELL_LOCK(name) spinlock_t name;
+#define MLX5_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr)
+#define MLX5_GET_DOORBELL_LOCK(ptr) (ptr)
+
+static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
+ spinlock_t *doorbell_lock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(doorbell_lock, flags);
+ __raw_writel((__force u32) val[0], dest);
+ __raw_writel((__force u32) val[1], dest + 4);
+ spin_unlock_irqrestore(doorbell_lock, flags);
+}
+
+#endif
+
+#endif /* MLX5_DOORBELL_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
new file mode 100644
index 000000000..9a90e7523
--- /dev/null
+++ b/include/linux/mlx5/driver.h
@@ -0,0 +1,812 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_DRIVER_H
+#define MLX5_DRIVER_H
+
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/pci.h>
+#include <linux/spinlock_types.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/radix-tree.h>
+
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/doorbell.h>
+#include <linux/mlx5/mlx5_ifc.h>
+
+enum {
+ MLX5_BOARD_ID_LEN = 64,
+ MLX5_MAX_NAME_LEN = 16,
+};
+
+enum {
+ /* one minute for the sake of bringup. Generally, commands must always
+ * complete and we may need to increase this timeout value
+ */
+ MLX5_CMD_TIMEOUT_MSEC = 7200 * 1000,
+ MLX5_CMD_WQ_MAX_NAME = 32,
+};
+
+enum {
+ CMD_OWNER_SW = 0x0,
+ CMD_OWNER_HW = 0x1,
+ CMD_STATUS_SUCCESS = 0,
+};
+
+enum mlx5_sqp_t {
+ MLX5_SQP_SMI = 0,
+ MLX5_SQP_GSI = 1,
+ MLX5_SQP_IEEE_1588 = 2,
+ MLX5_SQP_SNIFFER = 3,
+ MLX5_SQP_SYNC_UMR = 4,
+};
+
+enum {
+ MLX5_MAX_PORTS = 2,
+};
+
+enum {
+ MLX5_EQ_VEC_PAGES = 0,
+ MLX5_EQ_VEC_CMD = 1,
+ MLX5_EQ_VEC_ASYNC = 2,
+ MLX5_EQ_VEC_COMP_BASE,
+};
+
+enum {
+ MLX5_MAX_EQ_NAME = 32
+};
+
+enum {
+ MLX5_ATOMIC_MODE_IB_COMP = 1 << 16,
+ MLX5_ATOMIC_MODE_CX = 2 << 16,
+ MLX5_ATOMIC_MODE_8B = 3 << 16,
+ MLX5_ATOMIC_MODE_16B = 4 << 16,
+ MLX5_ATOMIC_MODE_32B = 5 << 16,
+ MLX5_ATOMIC_MODE_64B = 6 << 16,
+ MLX5_ATOMIC_MODE_128B = 7 << 16,
+ MLX5_ATOMIC_MODE_256B = 8 << 16,
+};
+
+enum {
+ MLX5_REG_PCAP = 0x5001,
+ MLX5_REG_PMTU = 0x5003,
+ MLX5_REG_PTYS = 0x5004,
+ MLX5_REG_PAOS = 0x5006,
+ MLX5_REG_PMAOS = 0x5012,
+ MLX5_REG_PUDE = 0x5009,
+ MLX5_REG_PMPE = 0x5010,
+ MLX5_REG_PELC = 0x500e,
+ MLX5_REG_PMLP = 0, /* TBD */
+ MLX5_REG_NODE_DESC = 0x6001,
+ MLX5_REG_HOST_ENDIANNESS = 0x7004,
+};
+
+enum mlx5_page_fault_resume_flags {
+ MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
+ MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
+ MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2,
+ MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7,
+};
+
+enum dbg_rsc_type {
+ MLX5_DBG_RSC_QP,
+ MLX5_DBG_RSC_EQ,
+ MLX5_DBG_RSC_CQ,
+};
+
+struct mlx5_field_desc {
+ struct dentry *dent;
+ int i;
+};
+
+struct mlx5_rsc_debug {
+ struct mlx5_core_dev *dev;
+ void *object;
+ enum dbg_rsc_type type;
+ struct dentry *root;
+ struct mlx5_field_desc fields[0];
+};
+
+enum mlx5_dev_event {
+ MLX5_DEV_EVENT_SYS_ERROR,
+ MLX5_DEV_EVENT_PORT_UP,
+ MLX5_DEV_EVENT_PORT_DOWN,
+ MLX5_DEV_EVENT_PORT_INITIALIZED,
+ MLX5_DEV_EVENT_LID_CHANGE,
+ MLX5_DEV_EVENT_PKEY_CHANGE,
+ MLX5_DEV_EVENT_GUID_CHANGE,
+ MLX5_DEV_EVENT_CLIENT_REREG,
+};
+
+struct mlx5_uuar_info {
+ struct mlx5_uar *uars;
+ int num_uars;
+ int num_low_latency_uuars;
+ unsigned long *bitmap;
+ unsigned int *count;
+ struct mlx5_bf *bfs;
+
+ /*
+ * protect uuar allocation data structs
+ */
+ struct mutex lock;
+ u32 ver;
+};
+
+struct mlx5_bf {
+ void __iomem *reg;
+ void __iomem *regreg;
+ int buf_size;
+ struct mlx5_uar *uar;
+ unsigned long offset;
+ int need_lock;
+ /* protect blue flame buffer selection when needed
+ */
+ spinlock_t lock;
+
+ /* serialize 64 bit writes when done as two 32 bit accesses
+ */
+ spinlock_t lock32;
+ int uuarn;
+};
+
+struct mlx5_cmd_first {
+ __be32 data[4];
+};
+
+struct mlx5_cmd_msg {
+ struct list_head list;
+ struct cache_ent *cache;
+ u32 len;
+ struct mlx5_cmd_first first;
+ struct mlx5_cmd_mailbox *next;
+};
+
+struct mlx5_cmd_debug {
+ struct dentry *dbg_root;
+ struct dentry *dbg_in;
+ struct dentry *dbg_out;
+ struct dentry *dbg_outlen;
+ struct dentry *dbg_status;
+ struct dentry *dbg_run;
+ void *in_msg;
+ void *out_msg;
+ u8 status;
+ u16 inlen;
+ u16 outlen;
+};
+
+struct cache_ent {
+ /* protect block chain allocations
+ */
+ spinlock_t lock;
+ struct list_head head;
+};
+
+struct cmd_msg_cache {
+ struct cache_ent large;
+ struct cache_ent med;
+
+};
+
+struct mlx5_cmd_stats {
+ u64 sum;
+ u64 n;
+ struct dentry *root;
+ struct dentry *avg;
+ struct dentry *count;
+ /* protect command average calculations */
+ spinlock_t lock;
+};
+
+struct mlx5_cmd {
+ void *cmd_alloc_buf;
+ dma_addr_t alloc_dma;
+ int alloc_size;
+ void *cmd_buf;
+ dma_addr_t dma;
+ u16 cmdif_rev;
+ u8 log_sz;
+ u8 log_stride;
+ int max_reg_cmds;
+ int events;
+ u32 __iomem *vector;
+
+ /* protect command queue allocations
+ */
+ spinlock_t alloc_lock;
+
+ /* protect token allocations
+ */
+ spinlock_t token_lock;
+ u8 token;
+ unsigned long bitmask;
+ char wq_name[MLX5_CMD_WQ_MAX_NAME];
+ struct workqueue_struct *wq;
+ struct semaphore sem;
+ struct semaphore pages_sem;
+ int mode;
+ struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
+ struct pci_pool *pool;
+ struct mlx5_cmd_debug dbg;
+ struct cmd_msg_cache cache;
+ int checksum_disabled;
+ struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
+};
+
+struct mlx5_port_caps {
+ int gid_table_len;
+ int pkey_table_len;
+};
+
+struct mlx5_general_caps {
+ u8 log_max_eq;
+ u8 log_max_cq;
+ u8 log_max_qp;
+ u8 log_max_mkey;
+ u8 log_max_pd;
+ u8 log_max_srq;
+ u8 log_max_strq;
+ u8 log_max_mrw_sz;
+ u8 log_max_bsf_list_size;
+ u8 log_max_klm_list_size;
+ u32 max_cqes;
+ int max_wqes;
+ u32 max_eqes;
+ u32 max_indirection;
+ int max_sq_desc_sz;
+ int max_rq_desc_sz;
+ int max_dc_sq_desc_sz;
+ u64 flags;
+ u16 stat_rate_support;
+ int log_max_msg;
+ int num_ports;
+ u8 log_max_ra_res_qp;
+ u8 log_max_ra_req_qp;
+ int max_srq_wqes;
+ int bf_reg_size;
+ int bf_regs_per_page;
+ struct mlx5_port_caps port[MLX5_MAX_PORTS];
+ u8 ext_port_cap[MLX5_MAX_PORTS];
+ int max_vf;
+ u32 reserved_lkey;
+ u8 local_ca_ack_delay;
+ u8 log_max_mcg;
+ u32 max_qp_mcg;
+ int min_page_sz;
+ int pd_cap;
+ u32 max_qp_counters;
+ u32 pkey_table_size;
+ u8 log_max_ra_req_dc;
+ u8 log_max_ra_res_dc;
+ u32 uar_sz;
+ u8 min_log_pg_sz;
+ u8 log_max_xrcd;
+ u16 log_uar_page_sz;
+};
+
+struct mlx5_caps {
+ struct mlx5_general_caps gen;
+};
+
+struct mlx5_cmd_mailbox {
+ void *buf;
+ dma_addr_t dma;
+ struct mlx5_cmd_mailbox *next;
+};
+
+struct mlx5_buf_list {
+ void *buf;
+ dma_addr_t map;
+};
+
+struct mlx5_buf {
+ struct mlx5_buf_list direct;
+ struct mlx5_buf_list *page_list;
+ int nbufs;
+ int npages;
+ int size;
+ u8 page_shift;
+};
+
+struct mlx5_eq {
+ struct mlx5_core_dev *dev;
+ __be32 __iomem *doorbell;
+ u32 cons_index;
+ struct mlx5_buf buf;
+ int size;
+ u8 irqn;
+ u8 eqn;
+ int nent;
+ u64 mask;
+ char name[MLX5_MAX_EQ_NAME];
+ struct list_head list;
+ int index;
+ struct mlx5_rsc_debug *dbg;
+};
+
+struct mlx5_core_psv {
+ u32 psv_idx;
+ struct psv_layout {
+ u32 pd;
+ u16 syndrome;
+ u16 reserved;
+ u16 bg;
+ u16 app_tag;
+ u32 ref_tag;
+ } psv;
+};
+
+struct mlx5_core_sig_ctx {
+ struct mlx5_core_psv psv_memory;
+ struct mlx5_core_psv psv_wire;
+ struct ib_sig_err err_item;
+ bool sig_status_checked;
+ bool sig_err_exists;
+ u32 sigerr_count;
+};
+
+struct mlx5_core_mr {
+ u64 iova;
+ u64 size;
+ u32 key;
+ u32 pd;
+};
+
+enum mlx5_res_type {
+ MLX5_RES_QP,
+};
+
+struct mlx5_core_rsc_common {
+ enum mlx5_res_type res;
+ atomic_t refcount;
+ struct completion free;
+};
+
+struct mlx5_core_srq {
+ u32 srqn;
+ int max;
+ int max_gs;
+ int max_avail_gather;
+ int wqe_shift;
+ void (*event) (struct mlx5_core_srq *, enum mlx5_event);
+
+ atomic_t refcount;
+ struct completion free;
+};
+
+struct mlx5_eq_table {
+ void __iomem *update_ci;
+ void __iomem *update_arm_ci;
+ struct list_head comp_eqs_list;
+ struct mlx5_eq pages_eq;
+ struct mlx5_eq async_eq;
+ struct mlx5_eq cmd_eq;
+ struct msix_entry *msix_arr;
+ int num_comp_vectors;
+ /* protect EQs list
+ */
+ spinlock_t lock;
+};
+
+struct mlx5_uar {
+ u32 index;
+ struct list_head bf_list;
+ unsigned free_bf_bmap;
+ void __iomem *wc_map;
+ void __iomem *map;
+};
+
+
+struct mlx5_core_health {
+ struct health_buffer __iomem *health;
+ __be32 __iomem *health_counter;
+ struct timer_list timer;
+ struct list_head list;
+ u32 prev;
+ int miss_counter;
+};
+
+struct mlx5_cq_table {
+ /* protect radix tree
+ */
+ spinlock_t lock;
+ struct radix_tree_root tree;
+};
+
+struct mlx5_qp_table {
+ /* protect radix tree
+ */
+ spinlock_t lock;
+ struct radix_tree_root tree;
+};
+
+struct mlx5_srq_table {
+ /* protect radix tree
+ */
+ spinlock_t lock;
+ struct radix_tree_root tree;
+};
+
+struct mlx5_mr_table {
+ /* protect radix tree
+ */
+ rwlock_t lock;
+ struct radix_tree_root tree;
+};
+
+struct mlx5_priv {
+ char name[MLX5_MAX_NAME_LEN];
+ struct mlx5_eq_table eq_table;
+ struct mlx5_uuar_info uuari;
+ MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
+
+ /* pages stuff */
+ struct workqueue_struct *pg_wq;
+ struct rb_root page_root;
+ int fw_pages;
+ atomic_t reg_pages;
+ struct list_head free_list;
+
+ struct mlx5_core_health health;
+
+ struct mlx5_srq_table srq_table;
+
+ /* start: qp staff */
+ struct mlx5_qp_table qp_table;
+ struct dentry *qp_debugfs;
+ struct dentry *eq_debugfs;
+ struct dentry *cq_debugfs;
+ struct dentry *cmdif_debugfs;
+ /* end: qp staff */
+
+ /* start: cq staff */
+ struct mlx5_cq_table cq_table;
+ /* end: cq staff */
+
+ /* start: mr staff */
+ struct mlx5_mr_table mr_table;
+ /* end: mr staff */
+
+ /* start: alloc staff */
+ struct mutex pgdir_mutex;
+ struct list_head pgdir_list;
+ /* end: alloc staff */
+ struct dentry *dbg_root;
+
+ /* protect mkey key part */
+ spinlock_t mkey_lock;
+ u8 mkey_key;
+
+ struct list_head dev_list;
+ struct list_head ctx_list;
+ spinlock_t ctx_lock;
+};
+
+struct mlx5_core_dev {
+ struct pci_dev *pdev;
+ u8 rev_id;
+ char board_id[MLX5_BOARD_ID_LEN];
+ struct mlx5_cmd cmd;
+ struct mlx5_caps caps;
+ phys_addr_t iseg_base;
+ struct mlx5_init_seg __iomem *iseg;
+ void (*event) (struct mlx5_core_dev *dev,
+ enum mlx5_dev_event event,
+ unsigned long param);
+ struct mlx5_priv priv;
+ struct mlx5_profile *profile;
+ atomic_t num_qps;
+};
+
+struct mlx5_db {
+ __be32 *db;
+ union {
+ struct mlx5_db_pgdir *pgdir;
+ struct mlx5_ib_user_db_page *user_page;
+ } u;
+ dma_addr_t dma;
+ int index;
+};
+
+enum {
+ MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
+};
+
+enum {
+ MLX5_COMP_EQ_SIZE = 1024,
+};
+
+struct mlx5_db_pgdir {
+ struct list_head list;
+ DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
+ __be32 *db_page;
+ dma_addr_t db_dma;
+};
+
+typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
+
+struct mlx5_cmd_work_ent {
+ struct mlx5_cmd_msg *in;
+ struct mlx5_cmd_msg *out;
+ void *uout;
+ int uout_size;
+ mlx5_cmd_cbk_t callback;
+ void *context;
+ int idx;
+ struct completion done;
+ struct mlx5_cmd *cmd;
+ struct work_struct work;
+ struct mlx5_cmd_layout *lay;
+ int ret;
+ int page_queue;
+ u8 status;
+ u8 token;
+ u64 ts1;
+ u64 ts2;
+ u16 op;
+};
+
+struct mlx5_pas {
+ u64 pa;
+ u8 log_sz;
+};
+
+static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
+{
+ if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1))
+ return buf->direct.buf + offset;
+ else
+ return buf->page_list[offset >> PAGE_SHIFT].buf +
+ (offset & (PAGE_SIZE - 1));
+}
+
+extern struct workqueue_struct *mlx5_core_wq;
+
+#define STRUCT_FIELD(header, field) \
+ .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
+ .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
+
+struct ib_field {
+ size_t struct_offset_bytes;
+ size_t struct_size_bytes;
+ int offset_bits;
+ int size_bits;
+};
+
+static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
+{
+ return pci_get_drvdata(pdev);
+}
+
+extern struct dentry *mlx5_debugfs_root;
+
+static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
+{
+ return ioread32be(&dev->iseg->fw_rev) & 0xffff;
+}
+
+static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
+{
+ return ioread32be(&dev->iseg->fw_rev) >> 16;
+}
+
+static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
+{
+ return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
+}
+
+static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
+{
+ return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
+}
+
+static inline void *mlx5_vzalloc(unsigned long size)
+{
+ void *rtn;
+
+ rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+ if (!rtn)
+ rtn = vzalloc(size);
+ return rtn;
+}
+
+static inline u32 mlx5_base_mkey(const u32 key)
+{
+ return key & 0xffffff00u;
+}
+
+int mlx5_cmd_init(struct mlx5_core_dev *dev);
+void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
+void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
+void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
+int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
+int mlx5_cmd_status_to_err_v2(void *ptr);
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
+ u16 opmod);
+int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+ int out_size);
+int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
+ void *out, int out_size, mlx5_cmd_cbk_t callback,
+ void *context);
+int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
+int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
+int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
+int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
+void mlx5_health_cleanup(void);
+void __init mlx5_health_init(void);
+void mlx5_start_health_poll(struct mlx5_core_dev *dev);
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
+ struct mlx5_buf *buf);
+void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
+struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
+ gfp_t flags, int npages);
+void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
+ struct mlx5_cmd_mailbox *head);
+int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int inlen);
+int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
+int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out);
+int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm, int is_srq);
+void mlx5_init_mr_table(struct mlx5_core_dev *dev);
+void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev);
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+ struct mlx5_create_mkey_mbox_in *in, int inlen,
+ mlx5_cmd_cbk_t callback, void *context,
+ struct mlx5_create_mkey_mbox_out *out);
+int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr);
+int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+ struct mlx5_query_mkey_mbox_out *out, int outlen);
+int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+ u32 *mkey);
+int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
+int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
+int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
+ u16 opmod, u8 port);
+void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
+void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
+int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
+void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
+void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
+ s32 npages);
+int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
+int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
+void mlx5_register_debugfs(void);
+void mlx5_unregister_debugfs(void);
+int mlx5_eq_init(struct mlx5_core_dev *dev);
+void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
+void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
+void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
+void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
+#endif
+void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
+struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
+void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
+int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
+ int nent, u64 mask, const char *name, struct mlx5_uar *uar);
+int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+int mlx5_start_eqs(struct mlx5_core_dev *dev);
+int mlx5_stop_eqs(struct mlx5_core_dev *dev);
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn);
+int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
+int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
+
+int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
+ int size_in, void *data_out, int size_out,
+ u16 reg_num, int arg, int write);
+int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
+
+int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
+ struct mlx5_query_eq_mbox_out *out, int outlen);
+int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
+void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
+
+const char *mlx5_command_str(int command);
+int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
+ int npsvs, u32 *sig_index);
+int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
+void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
+int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
+ struct mlx5_odp_caps *odp_caps);
+
+static inline u32 mlx5_mkey_to_idx(u32 mkey)
+{
+ return mkey >> 8;
+}
+
+static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
+{
+ return mkey_idx << 8;
+}
+
+static inline u8 mlx5_mkey_variant(u32 mkey)
+{
+ return mkey & 0xff;
+}
+
+enum {
+ MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
+ MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
+};
+
+enum {
+ MAX_MR_CACHE_ENTRIES = 16,
+};
+
+enum {
+ MLX5_INTERFACE_PROTOCOL_IB = 0,
+ MLX5_INTERFACE_PROTOCOL_ETH = 1,
+};
+
+struct mlx5_interface {
+ void * (*add)(struct mlx5_core_dev *dev);
+ void (*remove)(struct mlx5_core_dev *dev, void *context);
+ void (*event)(struct mlx5_core_dev *dev, void *context,
+ enum mlx5_dev_event event, unsigned long param);
+ void * (*get_dev)(void *context);
+ int protocol;
+ struct list_head list;
+};
+
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
+int mlx5_register_interface(struct mlx5_interface *intf);
+void mlx5_unregister_interface(struct mlx5_interface *intf);
+
+struct mlx5_profile {
+ u64 mask;
+ u8 log_max_qp;
+ struct {
+ int size;
+ int limit;
+ } mr_cache[MAX_MR_CACHE_ENTRIES];
+};
+
+#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
new file mode 100644
index 000000000..cb3ad17ed
--- /dev/null
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -0,0 +1,349 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_IFC_H
+#define MLX5_IFC_H
+
+enum {
+ MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
+ MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
+ MLX5_CMD_OP_INIT_HCA = 0x102,
+ MLX5_CMD_OP_TEARDOWN_HCA = 0x103,
+ MLX5_CMD_OP_ENABLE_HCA = 0x104,
+ MLX5_CMD_OP_DISABLE_HCA = 0x105,
+ MLX5_CMD_OP_QUERY_PAGES = 0x107,
+ MLX5_CMD_OP_MANAGE_PAGES = 0x108,
+ MLX5_CMD_OP_SET_HCA_CAP = 0x109,
+ MLX5_CMD_OP_CREATE_MKEY = 0x200,
+ MLX5_CMD_OP_QUERY_MKEY = 0x201,
+ MLX5_CMD_OP_DESTROY_MKEY = 0x202,
+ MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
+ MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204,
+ MLX5_CMD_OP_CREATE_EQ = 0x301,
+ MLX5_CMD_OP_DESTROY_EQ = 0x302,
+ MLX5_CMD_OP_QUERY_EQ = 0x303,
+ MLX5_CMD_OP_GEN_EQE = 0x304,
+ MLX5_CMD_OP_CREATE_CQ = 0x400,
+ MLX5_CMD_OP_DESTROY_CQ = 0x401,
+ MLX5_CMD_OP_QUERY_CQ = 0x402,
+ MLX5_CMD_OP_MODIFY_CQ = 0x403,
+ MLX5_CMD_OP_CREATE_QP = 0x500,
+ MLX5_CMD_OP_DESTROY_QP = 0x501,
+ MLX5_CMD_OP_RST2INIT_QP = 0x502,
+ MLX5_CMD_OP_INIT2RTR_QP = 0x503,
+ MLX5_CMD_OP_RTR2RTS_QP = 0x504,
+ MLX5_CMD_OP_RTS2RTS_QP = 0x505,
+ MLX5_CMD_OP_SQERR2RTS_QP = 0x506,
+ MLX5_CMD_OP_2ERR_QP = 0x507,
+ MLX5_CMD_OP_2RST_QP = 0x50a,
+ MLX5_CMD_OP_QUERY_QP = 0x50b,
+ MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
+ MLX5_CMD_OP_CREATE_PSV = 0x600,
+ MLX5_CMD_OP_DESTROY_PSV = 0x601,
+ MLX5_CMD_OP_CREATE_SRQ = 0x700,
+ MLX5_CMD_OP_DESTROY_SRQ = 0x701,
+ MLX5_CMD_OP_QUERY_SRQ = 0x702,
+ MLX5_CMD_OP_ARM_RQ = 0x703,
+ MLX5_CMD_OP_RESIZE_SRQ = 0x704,
+ MLX5_CMD_OP_CREATE_DCT = 0x710,
+ MLX5_CMD_OP_DESTROY_DCT = 0x711,
+ MLX5_CMD_OP_DRAIN_DCT = 0x712,
+ MLX5_CMD_OP_QUERY_DCT = 0x713,
+ MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714,
+ MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
+ MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
+ MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
+ MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755,
+ MLX5_CMD_OP_QUERY_RCOE_ADDRESS = 0x760,
+ MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761,
+ MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770,
+ MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
+ MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
+ MLX5_CMD_OP_ALLOC_PD = 0x800,
+ MLX5_CMD_OP_DEALLOC_PD = 0x801,
+ MLX5_CMD_OP_ALLOC_UAR = 0x802,
+ MLX5_CMD_OP_DEALLOC_UAR = 0x803,
+ MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804,
+ MLX5_CMD_OP_ACCESS_REG = 0x805,
+ MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
+ MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
+ MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a,
+ MLX5_CMD_OP_MAD_IFC = 0x50d,
+ MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b,
+ MLX5_CMD_OP_SET_MAD_DEMUX = 0x80c,
+ MLX5_CMD_OP_NOP = 0x80d,
+ MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
+ MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
+ MLX5_CMD_OP_SET_BURST_SIZE = 0x812,
+ MLX5_CMD_OP_QUERY_BURST_SZIE = 0x813,
+ MLX5_CMD_OP_ACTIVATE_TRACER = 0x814,
+ MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815,
+ MLX5_CMD_OP_CREATE_SNIFFER_RULE = 0x820,
+ MLX5_CMD_OP_DESTROY_SNIFFER_RULE = 0x821,
+ MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x822,
+ MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x823,
+ MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x824,
+ MLX5_CMD_OP_CREATE_TIR = 0x900,
+ MLX5_CMD_OP_MODIFY_TIR = 0x901,
+ MLX5_CMD_OP_DESTROY_TIR = 0x902,
+ MLX5_CMD_OP_QUERY_TIR = 0x903,
+ MLX5_CMD_OP_CREATE_TIS = 0x912,
+ MLX5_CMD_OP_MODIFY_TIS = 0x913,
+ MLX5_CMD_OP_DESTROY_TIS = 0x914,
+ MLX5_CMD_OP_QUERY_TIS = 0x915,
+ MLX5_CMD_OP_CREATE_SQ = 0x904,
+ MLX5_CMD_OP_MODIFY_SQ = 0x905,
+ MLX5_CMD_OP_DESTROY_SQ = 0x906,
+ MLX5_CMD_OP_QUERY_SQ = 0x907,
+ MLX5_CMD_OP_CREATE_RQ = 0x908,
+ MLX5_CMD_OP_MODIFY_RQ = 0x909,
+ MLX5_CMD_OP_DESTROY_RQ = 0x90a,
+ MLX5_CMD_OP_QUERY_RQ = 0x90b,
+ MLX5_CMD_OP_CREATE_RMP = 0x90c,
+ MLX5_CMD_OP_MODIFY_RMP = 0x90d,
+ MLX5_CMD_OP_DESTROY_RMP = 0x90e,
+ MLX5_CMD_OP_QUERY_RMP = 0x90f,
+ MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x910,
+ MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x911,
+ MLX5_CMD_OP_MAX = 0x911
+};
+
+struct mlx5_ifc_cmd_hca_cap_bits {
+ u8 reserved_0[0x80];
+
+ u8 log_max_srq_sz[0x8];
+ u8 log_max_qp_sz[0x8];
+ u8 reserved_1[0xb];
+ u8 log_max_qp[0x5];
+
+ u8 log_max_strq_sz[0x8];
+ u8 reserved_2[0x3];
+ u8 log_max_srqs[0x5];
+ u8 reserved_3[0x10];
+
+ u8 reserved_4[0x8];
+ u8 log_max_cq_sz[0x8];
+ u8 reserved_5[0xb];
+ u8 log_max_cq[0x5];
+
+ u8 log_max_eq_sz[0x8];
+ u8 reserved_6[0x2];
+ u8 log_max_mkey[0x6];
+ u8 reserved_7[0xc];
+ u8 log_max_eq[0x4];
+
+ u8 max_indirection[0x8];
+ u8 reserved_8[0x1];
+ u8 log_max_mrw_sz[0x7];
+ u8 reserved_9[0x2];
+ u8 log_max_bsf_list_size[0x6];
+ u8 reserved_10[0x2];
+ u8 log_max_klm_list_size[0x6];
+
+ u8 reserved_11[0xa];
+ u8 log_max_ra_req_dc[0x6];
+ u8 reserved_12[0xa];
+ u8 log_max_ra_res_dc[0x6];
+
+ u8 reserved_13[0xa];
+ u8 log_max_ra_req_qp[0x6];
+ u8 reserved_14[0xa];
+ u8 log_max_ra_res_qp[0x6];
+
+ u8 pad_cap[0x1];
+ u8 cc_query_allowed[0x1];
+ u8 cc_modify_allowed[0x1];
+ u8 reserved_15[0x1d];
+
+ u8 reserved_16[0x6];
+ u8 max_qp_cnt[0xa];
+ u8 pkey_table_size[0x10];
+
+ u8 eswitch_owner[0x1];
+ u8 reserved_17[0xa];
+ u8 local_ca_ack_delay[0x5];
+ u8 reserved_18[0x8];
+ u8 num_ports[0x8];
+
+ u8 reserved_19[0x3];
+ u8 log_max_msg[0x5];
+ u8 reserved_20[0x18];
+
+ u8 stat_rate_support[0x10];
+ u8 reserved_21[0x10];
+
+ u8 reserved_22[0x10];
+ u8 cmdif_checksum[0x2];
+ u8 sigerr_cqe[0x1];
+ u8 reserved_23[0x1];
+ u8 wq_signature[0x1];
+ u8 sctr_data_cqe[0x1];
+ u8 reserved_24[0x1];
+ u8 sho[0x1];
+ u8 tph[0x1];
+ u8 rf[0x1];
+ u8 dc[0x1];
+ u8 reserved_25[0x2];
+ u8 roce[0x1];
+ u8 atomic[0x1];
+ u8 rsz_srq[0x1];
+
+ u8 cq_oi[0x1];
+ u8 cq_resize[0x1];
+ u8 cq_moderation[0x1];
+ u8 sniffer_rule_flow[0x1];
+ u8 sniffer_rule_vport[0x1];
+ u8 sniffer_rule_phy[0x1];
+ u8 reserved_26[0x1];
+ u8 pg[0x1];
+ u8 block_lb_mc[0x1];
+ u8 reserved_27[0x3];
+ u8 cd[0x1];
+ u8 reserved_28[0x1];
+ u8 apm[0x1];
+ u8 reserved_29[0x7];
+ u8 qkv[0x1];
+ u8 pkv[0x1];
+ u8 reserved_30[0x4];
+ u8 xrc[0x1];
+ u8 ud[0x1];
+ u8 uc[0x1];
+ u8 rc[0x1];
+
+ u8 reserved_31[0xa];
+ u8 uar_sz[0x6];
+ u8 reserved_32[0x8];
+ u8 log_pg_sz[0x8];
+
+ u8 bf[0x1];
+ u8 reserved_33[0xa];
+ u8 log_bf_reg_size[0x5];
+ u8 reserved_34[0x10];
+
+ u8 reserved_35[0x10];
+ u8 max_wqe_sz_sq[0x10];
+
+ u8 reserved_36[0x10];
+ u8 max_wqe_sz_rq[0x10];
+
+ u8 reserved_37[0x10];
+ u8 max_wqe_sz_sq_dc[0x10];
+
+ u8 reserved_38[0x7];
+ u8 max_qp_mcg[0x19];
+
+ u8 reserved_39[0x18];
+ u8 log_max_mcg[0x8];
+
+ u8 reserved_40[0xb];
+ u8 log_max_pd[0x5];
+ u8 reserved_41[0xb];
+ u8 log_max_xrcd[0x5];
+
+ u8 reserved_42[0x20];
+
+ u8 reserved_43[0x3];
+ u8 log_max_rq[0x5];
+ u8 reserved_44[0x3];
+ u8 log_max_sq[0x5];
+ u8 reserved_45[0x3];
+ u8 log_max_tir[0x5];
+ u8 reserved_46[0x3];
+ u8 log_max_tis[0x5];
+
+ u8 reserved_47[0x13];
+ u8 log_max_rq_per_tir[0x5];
+ u8 reserved_48[0x3];
+ u8 log_max_tis_per_sq[0x5];
+
+ u8 reserved_49[0xe0];
+
+ u8 reserved_50[0x10];
+ u8 log_uar_page_sz[0x10];
+
+ u8 reserved_51[0x100];
+
+ u8 reserved_52[0x1f];
+ u8 cqe_zip[0x1];
+
+ u8 cqe_zip_timeout[0x10];
+ u8 cqe_zip_max_num[0x10];
+
+ u8 reserved_53[0x220];
+};
+
+struct mlx5_ifc_set_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct;
+};
+
+struct mlx5_ifc_query_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_hca_cap_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 capability_struct[256][0x8];
+};
+
+struct mlx5_ifc_set_hca_cap_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
new file mode 100644
index 000000000..310b5f7fd
--- /dev/null
+++ b/include/linux/mlx5/qp.h
@@ -0,0 +1,663 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_QP_H
+#define MLX5_QP_H
+
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/driver.h>
+
+#define MLX5_INVALID_LKEY 0x100
+#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
+#define MLX5_DIF_SIZE 8
+#define MLX5_STRIDE_BLOCK_OP 0x400
+#define MLX5_CPY_GRD_MASK 0xc0
+#define MLX5_CPY_APP_MASK 0x30
+#define MLX5_CPY_REF_MASK 0x0f
+#define MLX5_BSF_INC_REFTAG (1 << 6)
+#define MLX5_BSF_INL_VALID (1 << 15)
+#define MLX5_BSF_REFRESH_DIF (1 << 14)
+#define MLX5_BSF_REPEAT_BLOCK (1 << 7)
+#define MLX5_BSF_APPTAG_ESCAPE 0x1
+#define MLX5_BSF_APPREF_ESCAPE 0x2
+
+#define MLX5_QPN_BITS 24
+#define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1)
+
+enum mlx5_qp_optpar {
+ MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
+ MLX5_QP_OPTPAR_RRE = 1 << 1,
+ MLX5_QP_OPTPAR_RAE = 1 << 2,
+ MLX5_QP_OPTPAR_RWE = 1 << 3,
+ MLX5_QP_OPTPAR_PKEY_INDEX = 1 << 4,
+ MLX5_QP_OPTPAR_Q_KEY = 1 << 5,
+ MLX5_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
+ MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
+ MLX5_QP_OPTPAR_SRA_MAX = 1 << 8,
+ MLX5_QP_OPTPAR_RRA_MAX = 1 << 9,
+ MLX5_QP_OPTPAR_PM_STATE = 1 << 10,
+ MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12,
+ MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13,
+ MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
+ MLX5_QP_OPTPAR_PRI_PORT = 1 << 16,
+ MLX5_QP_OPTPAR_SRQN = 1 << 18,
+ MLX5_QP_OPTPAR_CQN_RCV = 1 << 19,
+ MLX5_QP_OPTPAR_DC_HS = 1 << 20,
+ MLX5_QP_OPTPAR_DC_KEY = 1 << 21,
+};
+
+enum mlx5_qp_state {
+ MLX5_QP_STATE_RST = 0,
+ MLX5_QP_STATE_INIT = 1,
+ MLX5_QP_STATE_RTR = 2,
+ MLX5_QP_STATE_RTS = 3,
+ MLX5_QP_STATE_SQER = 4,
+ MLX5_QP_STATE_SQD = 5,
+ MLX5_QP_STATE_ERR = 6,
+ MLX5_QP_STATE_SQ_DRAINING = 7,
+ MLX5_QP_STATE_SUSPENDED = 9,
+ MLX5_QP_NUM_STATE
+};
+
+enum {
+ MLX5_QP_ST_RC = 0x0,
+ MLX5_QP_ST_UC = 0x1,
+ MLX5_QP_ST_UD = 0x2,
+ MLX5_QP_ST_XRC = 0x3,
+ MLX5_QP_ST_MLX = 0x4,
+ MLX5_QP_ST_DCI = 0x5,
+ MLX5_QP_ST_DCT = 0x6,
+ MLX5_QP_ST_QP0 = 0x7,
+ MLX5_QP_ST_QP1 = 0x8,
+ MLX5_QP_ST_RAW_ETHERTYPE = 0x9,
+ MLX5_QP_ST_RAW_IPV6 = 0xa,
+ MLX5_QP_ST_SNIFFER = 0xb,
+ MLX5_QP_ST_SYNC_UMR = 0xe,
+ MLX5_QP_ST_PTP_1588 = 0xd,
+ MLX5_QP_ST_REG_UMR = 0xc,
+ MLX5_QP_ST_MAX
+};
+
+enum {
+ MLX5_QP_PM_MIGRATED = 0x3,
+ MLX5_QP_PM_ARMED = 0x0,
+ MLX5_QP_PM_REARM = 0x1
+};
+
+enum {
+ MLX5_NON_ZERO_RQ = 0 << 24,
+ MLX5_SRQ_RQ = 1 << 24,
+ MLX5_CRQ_RQ = 2 << 24,
+ MLX5_ZERO_LEN_RQ = 3 << 24
+};
+
+enum {
+ /* params1 */
+ MLX5_QP_BIT_SRE = 1 << 15,
+ MLX5_QP_BIT_SWE = 1 << 14,
+ MLX5_QP_BIT_SAE = 1 << 13,
+ /* params2 */
+ MLX5_QP_BIT_RRE = 1 << 15,
+ MLX5_QP_BIT_RWE = 1 << 14,
+ MLX5_QP_BIT_RAE = 1 << 13,
+ MLX5_QP_BIT_RIC = 1 << 4,
+};
+
+enum {
+ MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
+ MLX5_WQE_CTRL_SOLICITED = 1 << 1,
+};
+
+enum {
+ MLX5_SEND_WQE_BB = 64,
+};
+
+enum {
+ MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
+ MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
+ MLX5_WQE_FMR_PERM_REMOTE_READ = 1 << 29,
+ MLX5_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30,
+ MLX5_WQE_FMR_PERM_ATOMIC = 1 << 31
+};
+
+enum {
+ MLX5_FENCE_MODE_NONE = 0 << 5,
+ MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
+ MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
+ MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
+};
+
+enum {
+ MLX5_QP_LAT_SENSITIVE = 1 << 28,
+ MLX5_QP_BLOCK_MCAST = 1 << 30,
+ MLX5_QP_ENABLE_SIG = 1 << 31,
+};
+
+enum {
+ MLX5_RCV_DBR = 0,
+ MLX5_SND_DBR = 1,
+};
+
+enum {
+ MLX5_FLAGS_INLINE = 1<<7,
+ MLX5_FLAGS_CHECK_FREE = 1<<5,
+};
+
+struct mlx5_wqe_fmr_seg {
+ __be32 flags;
+ __be32 mem_key;
+ __be64 buf_list;
+ __be64 start_addr;
+ __be64 reg_len;
+ __be32 offset;
+ __be32 page_size;
+ u32 reserved[2];
+};
+
+struct mlx5_wqe_ctrl_seg {
+ __be32 opmod_idx_opcode;
+ __be32 qpn_ds;
+ u8 signature;
+ u8 rsvd[2];
+ u8 fm_ce_se;
+ __be32 imm;
+};
+
+#define MLX5_WQE_CTRL_DS_MASK 0x3f
+#define MLX5_WQE_CTRL_QPN_MASK 0xffffff00
+#define MLX5_WQE_CTRL_QPN_SHIFT 8
+#define MLX5_WQE_DS_UNITS 16
+#define MLX5_WQE_CTRL_OPCODE_MASK 0xff
+#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
+#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
+
+struct mlx5_wqe_xrc_seg {
+ __be32 xrc_srqn;
+ u8 rsvd[12];
+};
+
+struct mlx5_wqe_masked_atomic_seg {
+ __be64 swap_add;
+ __be64 compare;
+ __be64 swap_add_mask;
+ __be64 compare_mask;
+};
+
+struct mlx5_av {
+ union {
+ struct {
+ __be32 qkey;
+ __be32 reserved;
+ } qkey;
+ __be64 dc_key;
+ } key;
+ __be32 dqp_dct;
+ u8 stat_rate_sl;
+ u8 fl_mlid;
+ __be16 rlid;
+ u8 reserved0[10];
+ u8 tclass;
+ u8 hop_limit;
+ __be32 grh_gid_fl;
+ u8 rgid[16];
+};
+
+struct mlx5_wqe_datagram_seg {
+ struct mlx5_av av;
+};
+
+struct mlx5_wqe_raddr_seg {
+ __be64 raddr;
+ __be32 rkey;
+ u32 reserved;
+};
+
+struct mlx5_wqe_atomic_seg {
+ __be64 swap_add;
+ __be64 compare;
+};
+
+struct mlx5_wqe_data_seg {
+ __be32 byte_count;
+ __be32 lkey;
+ __be64 addr;
+};
+
+struct mlx5_wqe_umr_ctrl_seg {
+ u8 flags;
+ u8 rsvd0[3];
+ __be16 klm_octowords;
+ __be16 bsf_octowords;
+ __be64 mkey_mask;
+ u8 rsvd1[32];
+};
+
+struct mlx5_seg_set_psv {
+ __be32 psv_num;
+ __be16 syndrome;
+ __be16 status;
+ __be32 transient_sig;
+ __be32 ref_tag;
+};
+
+struct mlx5_seg_get_psv {
+ u8 rsvd[19];
+ u8 num_psv;
+ __be32 l_key;
+ __be64 va;
+ __be32 psv_index[4];
+};
+
+struct mlx5_seg_check_psv {
+ u8 rsvd0[2];
+ __be16 err_coalescing_op;
+ u8 rsvd1[2];
+ __be16 xport_err_op;
+ u8 rsvd2[2];
+ __be16 xport_err_mask;
+ u8 rsvd3[7];
+ u8 num_psv;
+ __be32 l_key;
+ __be64 va;
+ __be32 psv_index[4];
+};
+
+struct mlx5_rwqe_sig {
+ u8 rsvd0[4];
+ u8 signature;
+ u8 rsvd1[11];
+};
+
+struct mlx5_wqe_signature_seg {
+ u8 rsvd0[4];
+ u8 signature;
+ u8 rsvd1[11];
+};
+
+#define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff
+
+struct mlx5_wqe_inline_seg {
+ __be32 byte_count;
+};
+
+enum mlx5_sig_type {
+ MLX5_DIF_CRC = 0x1,
+ MLX5_DIF_IPCS = 0x2,
+};
+
+struct mlx5_bsf_inl {
+ __be16 vld_refresh;
+ __be16 dif_apptag;
+ __be32 dif_reftag;
+ u8 sig_type;
+ u8 rp_inv_seed;
+ u8 rsvd[3];
+ u8 dif_inc_ref_guard_check;
+ __be16 dif_app_bitmask_check;
+};
+
+struct mlx5_bsf {
+ struct mlx5_bsf_basic {
+ u8 bsf_size_sbs;
+ u8 check_byte_mask;
+ union {
+ u8 copy_byte_mask;
+ u8 bs_selector;
+ u8 rsvd_wflags;
+ } wire;
+ union {
+ u8 bs_selector;
+ u8 rsvd_mflags;
+ } mem;
+ __be32 raw_data_size;
+ __be32 w_bfs_psv;
+ __be32 m_bfs_psv;
+ } basic;
+ struct mlx5_bsf_ext {
+ __be32 t_init_gen_pro_size;
+ __be32 rsvd_epi_size;
+ __be32 w_tfs_psv;
+ __be32 m_tfs_psv;
+ } ext;
+ struct mlx5_bsf_inl w_inl;
+ struct mlx5_bsf_inl m_inl;
+};
+
+struct mlx5_klm {
+ __be32 bcount;
+ __be32 key;
+ __be64 va;
+};
+
+struct mlx5_stride_block_entry {
+ __be16 stride;
+ __be16 bcount;
+ __be32 key;
+ __be64 va;
+};
+
+struct mlx5_stride_block_ctrl_seg {
+ __be32 bcount_per_cycle;
+ __be32 op;
+ __be32 repeat_count;
+ u16 rsvd;
+ __be16 num_entries;
+};
+
+enum mlx5_pagefault_flags {
+ MLX5_PFAULT_REQUESTOR = 1 << 0,
+ MLX5_PFAULT_WRITE = 1 << 1,
+ MLX5_PFAULT_RDMA = 1 << 2,
+};
+
+/* Contains the details of a pagefault. */
+struct mlx5_pagefault {
+ u32 bytes_committed;
+ u8 event_subtype;
+ enum mlx5_pagefault_flags flags;
+ union {
+ /* Initiator or send message responder pagefault details. */
+ struct {
+ /* Received packet size, only valid for responders. */
+ u32 packet_size;
+ /*
+ * WQE index. Refers to either the send queue or
+ * receive queue, according to event_subtype.
+ */
+ u16 wqe_index;
+ } wqe;
+ /* RDMA responder pagefault details */
+ struct {
+ u32 r_key;
+ /*
+ * Received packet size, minimal size page fault
+ * resolution required for forward progress.
+ */
+ u32 packet_size;
+ u32 rdma_op_len;
+ u64 rdma_va;
+ } rdma;
+ };
+};
+
+struct mlx5_core_qp {
+ struct mlx5_core_rsc_common common; /* must be first */
+ void (*event) (struct mlx5_core_qp *, int);
+ void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *);
+ int qpn;
+ struct mlx5_rsc_debug *dbg;
+ int pid;
+};
+
+struct mlx5_qp_path {
+ u8 fl;
+ u8 rsvd3;
+ u8 free_ar;
+ u8 pkey_index;
+ u8 rsvd0;
+ u8 grh_mlid;
+ __be16 rlid;
+ u8 ackto_lt;
+ u8 mgid_index;
+ u8 static_rate;
+ u8 hop_limit;
+ __be32 tclass_flowlabel;
+ u8 rgid[16];
+ u8 rsvd1[4];
+ u8 sl;
+ u8 port;
+ u8 rsvd2[6];
+};
+
+struct mlx5_qp_context {
+ __be32 flags;
+ __be32 flags_pd;
+ u8 mtu_msgmax;
+ u8 rq_size_stride;
+ __be16 sq_crq_size;
+ __be32 qp_counter_set_usr_page;
+ __be32 wire_qpn;
+ __be32 log_pg_sz_remote_qpn;
+ struct mlx5_qp_path pri_path;
+ struct mlx5_qp_path alt_path;
+ __be32 params1;
+ u8 reserved2[4];
+ __be32 next_send_psn;
+ __be32 cqn_send;
+ u8 reserved3[8];
+ __be32 last_acked_psn;
+ __be32 ssn;
+ __be32 params2;
+ __be32 rnr_nextrecvpsn;
+ __be32 xrcd;
+ __be32 cqn_recv;
+ __be64 db_rec_addr;
+ __be32 qkey;
+ __be32 rq_type_srqn;
+ __be32 rmsn;
+ __be16 hw_sq_wqe_counter;
+ __be16 sw_sq_wqe_counter;
+ __be16 hw_rcyclic_byte_counter;
+ __be16 hw_rq_counter;
+ __be16 sw_rcyclic_byte_counter;
+ __be16 sw_rq_counter;
+ u8 rsvd0[5];
+ u8 cgs;
+ u8 cs_req;
+ u8 cs_res;
+ __be64 dc_access_key;
+ u8 rsvd1[24];
+};
+
+struct mlx5_create_qp_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 input_qpn;
+ u8 rsvd0[4];
+ __be32 opt_param_mask;
+ u8 rsvd1[4];
+ struct mlx5_qp_context ctx;
+ u8 rsvd3[16];
+ __be64 pas[0];
+};
+
+struct mlx5_create_qp_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ __be32 qpn;
+ u8 rsvd0[4];
+};
+
+struct mlx5_destroy_qp_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 qpn;
+ u8 rsvd0[4];
+};
+
+struct mlx5_destroy_qp_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[8];
+};
+
+struct mlx5_modify_qp_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 qpn;
+ u8 rsvd1[4];
+ __be32 optparam;
+ u8 rsvd0[4];
+ struct mlx5_qp_context ctx;
+};
+
+struct mlx5_modify_qp_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[8];
+};
+
+struct mlx5_query_qp_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 qpn;
+ u8 rsvd[4];
+};
+
+struct mlx5_query_qp_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd1[8];
+ __be32 optparam;
+ u8 rsvd0[4];
+ struct mlx5_qp_context ctx;
+ u8 rsvd2[16];
+ __be64 pas[0];
+};
+
+struct mlx5_conf_sqp_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 qpn;
+ u8 rsvd[3];
+ u8 type;
+};
+
+struct mlx5_conf_sqp_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_alloc_xrcd_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_alloc_xrcd_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ __be32 xrcdn;
+ u8 rsvd[4];
+};
+
+struct mlx5_dealloc_xrcd_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 xrcdn;
+ u8 rsvd[4];
+};
+
+struct mlx5_dealloc_xrcd_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
+{
+ return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
+}
+
+static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
+{
+ return radix_tree_lookup(&dev->priv.mr_table.tree, key);
+}
+
+struct mlx5_page_fault_resume_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 flags_qpn;
+ u8 reserved[4];
+};
+
+struct mlx5_page_fault_resume_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+int mlx5_core_create_qp(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *qp,
+ struct mlx5_create_qp_mbox_in *in,
+ int inlen);
+int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
+ enum mlx5_qp_state new_state,
+ struct mlx5_modify_qp_mbox_in *in, int sqd_event,
+ struct mlx5_core_qp *qp);
+int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *qp);
+int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
+ struct mlx5_query_qp_mbox_out *out, int outlen);
+
+int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
+int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
+void mlx5_init_qp_table(struct mlx5_core_dev *dev);
+void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
+int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
+void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
+ u8 context, int error);
+#endif
+
+static inline const char *mlx5_qp_type_str(int type)
+{
+ switch (type) {
+ case MLX5_QP_ST_RC: return "RC";
+ case MLX5_QP_ST_UC: return "C";
+ case MLX5_QP_ST_UD: return "UD";
+ case MLX5_QP_ST_XRC: return "XRC";
+ case MLX5_QP_ST_MLX: return "MLX";
+ case MLX5_QP_ST_QP0: return "QP0";
+ case MLX5_QP_ST_QP1: return "QP1";
+ case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE";
+ case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6";
+ case MLX5_QP_ST_SNIFFER: return "SNIFFER";
+ case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR";
+ case MLX5_QP_ST_PTP_1588: return "PTP_1588";
+ case MLX5_QP_ST_REG_UMR: return "REG_UMR";
+ default: return "Invalid transport type";
+ }
+}
+
+static inline const char *mlx5_qp_state_str(int state)
+{
+ switch (state) {
+ case MLX5_QP_STATE_RST:
+ return "RST";
+ case MLX5_QP_STATE_INIT:
+ return "INIT";
+ case MLX5_QP_STATE_RTR:
+ return "RTR";
+ case MLX5_QP_STATE_RTS:
+ return "RTS";
+ case MLX5_QP_STATE_SQER:
+ return "SQER";
+ case MLX5_QP_STATE_SQD:
+ return "SQD";
+ case MLX5_QP_STATE_ERR:
+ return "ERR";
+ case MLX5_QP_STATE_SQ_DRAINING:
+ return "SQ_DRAINING";
+ case MLX5_QP_STATE_SUSPENDED:
+ return "SUSPENDED";
+ default: return "Invalid QP state";
+ }
+}
+
+#endif /* MLX5_QP_H */
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
new file mode 100644
index 000000000..f43ed054a
--- /dev/null
+++ b/include/linux/mlx5/srq.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_SRQ_H
+#define MLX5_SRQ_H
+
+#include <linux/mlx5/driver.h>
+
+void mlx5_init_srq_table(struct mlx5_core_dev *dev);
+void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev);
+
+#endif /* MLX5_SRQ_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
new file mode 100644
index 000000000..ffe4ecf89
--- /dev/null
+++ b/include/linux/mm.h
@@ -0,0 +1,2227 @@
+#ifndef _LINUX_MM_H
+#define _LINUX_MM_H
+
+#include <linux/errno.h>
+
+#ifdef __KERNEL__
+
+#include <linux/mmdebug.h>
+#include <linux/gfp.h>
+#include <linux/bug.h>
+#include <linux/list.h>
+#include <linux/mmzone.h>
+#include <linux/rbtree.h>
+#include <linux/atomic.h>
+#include <linux/debug_locks.h>
+#include <linux/mm_types.h>
+#include <linux/range.h>
+#include <linux/pfn.h>
+#include <linux/bit_spinlock.h>
+#include <linux/shrinker.h>
+#include <linux/resource.h>
+#include <linux/page_ext.h>
+
+struct mempolicy;
+struct anon_vma;
+struct anon_vma_chain;
+struct file_ra_state;
+struct user_struct;
+struct writeback_control;
+
+#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
+extern unsigned long max_mapnr;
+
+static inline void set_max_mapnr(unsigned long limit)
+{
+ max_mapnr = limit;
+}
+#else
+static inline void set_max_mapnr(unsigned long limit) { }
+#endif
+
+extern unsigned long totalram_pages;
+extern void * high_memory;
+extern int page_cluster;
+
+#ifdef CONFIG_SYSCTL
+extern int sysctl_legacy_va_layout;
+#else
+#define sysctl_legacy_va_layout 0
+#endif
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+
+#ifndef __pa_symbol
+#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
+#endif
+
+/*
+ * To prevent common memory management code establishing
+ * a zero page mapping on a read fault.
+ * This macro should be defined within <asm/pgtable.h>.
+ * s390 does this to prevent multiplexing of hardware bits
+ * related to the physical page in case of virtualization.
+ */
+#ifndef mm_forbids_zeropage
+#define mm_forbids_zeropage(X) (0)
+#endif
+
+extern unsigned long sysctl_user_reserve_kbytes;
+extern unsigned long sysctl_admin_reserve_kbytes;
+
+extern int sysctl_overcommit_memory;
+extern int sysctl_overcommit_ratio;
+extern unsigned long sysctl_overcommit_kbytes;
+
+extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
+ size_t *, loff_t *);
+extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
+ size_t *, loff_t *);
+
+#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
+
+/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
+#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)addr, PAGE_SIZE)
+
+/*
+ * Linux kernel virtual memory manager primitives.
+ * The idea being to have a "virtual" mm in the same way
+ * we have a virtual fs - giving a cleaner interface to the
+ * mm details, and allowing different kinds of memory mappings
+ * (from shared memory to executable loading to arbitrary
+ * mmap() functions).
+ */
+
+extern struct kmem_cache *vm_area_cachep;
+
+#ifndef CONFIG_MMU
+extern struct rb_root nommu_region_tree;
+extern struct rw_semaphore nommu_region_sem;
+
+extern unsigned int kobjsize(const void *objp);
+#endif
+
+/*
+ * vm_flags in vm_area_struct, see mm_types.h.
+ */
+#define VM_NONE 0x00000000
+
+#define VM_READ 0x00000001 /* currently active flags */
+#define VM_WRITE 0x00000002
+#define VM_EXEC 0x00000004
+#define VM_SHARED 0x00000008
+
+/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
+#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
+#define VM_MAYWRITE 0x00000020
+#define VM_MAYEXEC 0x00000040
+#define VM_MAYSHARE 0x00000080
+
+#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
+#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
+#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
+
+#define VM_LOCKED 0x00002000
+#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
+
+ /* Used by sys_madvise() */
+#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
+#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
+
+#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
+#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
+#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
+#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
+#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
+#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
+#define VM_ARCH_2 0x02000000
+#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
+
+#ifdef CONFIG_MEM_SOFT_DIRTY
+# define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */
+#else
+# define VM_SOFTDIRTY 0
+#endif
+
+#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
+#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */
+#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
+#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
+
+#if defined(CONFIG_X86)
+# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
+#elif defined(CONFIG_PPC)
+# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
+#elif defined(CONFIG_PARISC)
+# define VM_GROWSUP VM_ARCH_1
+#elif defined(CONFIG_METAG)
+# define VM_GROWSUP VM_ARCH_1
+#elif defined(CONFIG_IA64)
+# define VM_GROWSUP VM_ARCH_1
+#elif !defined(CONFIG_MMU)
+# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
+#endif
+
+#if defined(CONFIG_X86)
+/* MPX specific bounds table or bounds directory */
+# define VM_MPX VM_ARCH_2
+#endif
+
+#ifndef VM_GROWSUP
+# define VM_GROWSUP VM_NONE
+#endif
+
+/* Bits set in the VMA until the stack is in its final location */
+#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
+
+#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
+#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
+#endif
+
+#ifdef CONFIG_STACK_GROWSUP
+#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+#else
+#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+#endif
+
+/*
+ * Special vmas that are non-mergable, non-mlock()able.
+ * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
+ */
+#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
+
+/* This mask defines which mm->def_flags a process can inherit its parent */
+#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
+
+/*
+ * mapping from the currently active vm_flags protection bits (the
+ * low four bits) to a page protection mask..
+ */
+extern pgprot_t protection_map[16];
+
+#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
+#define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */
+#define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */
+#define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */
+#define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */
+#define FAULT_FLAG_TRIED 0x20 /* Second try */
+#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */
+
+/*
+ * vm_fault is filled by the the pagefault handler and passed to the vma's
+ * ->fault function. The vma's ->fault is responsible for returning a bitmask
+ * of VM_FAULT_xxx flags that give details about how the fault was handled.
+ *
+ * pgoff should be used in favour of virtual_address, if possible.
+ */
+struct vm_fault {
+ unsigned int flags; /* FAULT_FLAG_xxx flags */
+ pgoff_t pgoff; /* Logical page offset based on vma */
+ void __user *virtual_address; /* Faulting virtual address */
+
+ struct page *cow_page; /* Handler may choose to COW */
+ struct page *page; /* ->fault handlers should return a
+ * page here, unless VM_FAULT_NOPAGE
+ * is set (which is also implied by
+ * VM_FAULT_ERROR).
+ */
+ /* for ->map_pages() only */
+ pgoff_t max_pgoff; /* map pages for offset from pgoff till
+ * max_pgoff inclusive */
+ pte_t *pte; /* pte entry associated with ->pgoff */
+};
+
+/*
+ * These are the virtual MM functions - opening of an area, closing and
+ * unmapping it (needed to keep files on disk up-to-date etc), pointer
+ * to the functions called when a no-page or a wp-page exception occurs.
+ */
+struct vm_operations_struct {
+ void (*open)(struct vm_area_struct * area);
+ void (*close)(struct vm_area_struct * area);
+ int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
+ void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+ /* notification that a previously read-only page is about to become
+ * writable, if an error is returned it will cause a SIGBUS */
+ int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+ /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
+ int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+ /* called by access_process_vm when get_user_pages() fails, typically
+ * for use by special VMAs that can switch between memory and hardware
+ */
+ int (*access)(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write);
+
+ /* Called by the /proc/PID/maps code to ask the vma whether it
+ * has a special name. Returning non-NULL will also cause this
+ * vma to be dumped unconditionally. */
+ const char *(*name)(struct vm_area_struct *vma);
+
+#ifdef CONFIG_NUMA
+ /*
+ * set_policy() op must add a reference to any non-NULL @new mempolicy
+ * to hold the policy upon return. Caller should pass NULL @new to
+ * remove a policy and fall back to surrounding context--i.e. do not
+ * install a MPOL_DEFAULT policy, nor the task or system default
+ * mempolicy.
+ */
+ int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
+
+ /*
+ * get_policy() op must add reference [mpol_get()] to any policy at
+ * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
+ * in mm/mempolicy.c will do this automatically.
+ * get_policy() must NOT add a ref if the policy at (vma,addr) is not
+ * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
+ * If no [shared/vma] mempolicy exists at the addr, get_policy() op
+ * must return NULL--i.e., do not "fallback" to task or system default
+ * policy.
+ */
+ struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
+ unsigned long addr);
+#endif
+ /*
+ * Called by vm_normal_page() for special PTEs to find the
+ * page for @addr. This is useful if the default behavior
+ * (using pte_page()) would not find the correct page.
+ */
+ struct page *(*find_special_page)(struct vm_area_struct *vma,
+ unsigned long addr);
+};
+
+struct mmu_gather;
+struct inode;
+
+#define page_private(page) ((page)->private)
+#define set_page_private(page, v) ((page)->private = (v))
+
+/* It's valid only if the page is free path or free_list */
+static inline void set_freepage_migratetype(struct page *page, int migratetype)
+{
+ page->index = migratetype;
+}
+
+/* It's valid only if the page is free path or free_list */
+static inline int get_freepage_migratetype(struct page *page)
+{
+ return page->index;
+}
+
+/*
+ * FIXME: take this include out, include page-flags.h in
+ * files which need it (119 of them)
+ */
+#include <linux/page-flags.h>
+#include <linux/huge_mm.h>
+
+/*
+ * Methods to modify the page usage count.
+ *
+ * What counts for a page usage:
+ * - cache mapping (page->mapping)
+ * - private data (page->private)
+ * - page mapped in a task's page tables, each mapping
+ * is counted separately
+ *
+ * Also, many kernel routines increase the page count before a critical
+ * routine so they can be sure the page doesn't go away from under them.
+ */
+
+/*
+ * Drop a ref, return true if the refcount fell to zero (the page has no users)
+ */
+static inline int put_page_testzero(struct page *page)
+{
+ VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
+ return atomic_dec_and_test(&page->_count);
+}
+
+/*
+ * Try to grab a ref unless the page has a refcount of zero, return false if
+ * that is the case.
+ * This can be called when MMU is off so it must not access
+ * any of the virtual mappings.
+ */
+static inline int get_page_unless_zero(struct page *page)
+{
+ return atomic_inc_not_zero(&page->_count);
+}
+
+/*
+ * Try to drop a ref unless the page has a refcount of one, return false if
+ * that is the case.
+ * This is to make sure that the refcount won't become zero after this drop.
+ * This can be called when MMU is off so it must not access
+ * any of the virtual mappings.
+ */
+static inline int put_page_unless_one(struct page *page)
+{
+ return atomic_add_unless(&page->_count, -1, 1);
+}
+
+extern int page_is_ram(unsigned long pfn);
+extern int region_is_ram(resource_size_t phys_addr, unsigned long size);
+
+/* Support for virtually mapped pages */
+struct page *vmalloc_to_page(const void *addr);
+unsigned long vmalloc_to_pfn(const void *addr);
+
+/*
+ * Determine if an address is within the vmalloc range
+ *
+ * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
+ * is no special casing required.
+ */
+static inline int is_vmalloc_addr(const void *x)
+{
+#ifdef CONFIG_MMU
+ unsigned long addr = (unsigned long)x;
+
+ return addr >= VMALLOC_START && addr < VMALLOC_END;
+#else
+ return 0;
+#endif
+}
+#ifdef CONFIG_MMU
+extern int is_vmalloc_or_module_addr(const void *x);
+#else
+static inline int is_vmalloc_or_module_addr(const void *x)
+{
+ return 0;
+}
+#endif
+
+extern void kvfree(const void *addr);
+
+static inline void compound_lock(struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ VM_BUG_ON_PAGE(PageSlab(page), page);
+ bit_spin_lock(PG_compound_lock, &page->flags);
+#endif
+}
+
+static inline void compound_unlock(struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ VM_BUG_ON_PAGE(PageSlab(page), page);
+ bit_spin_unlock(PG_compound_lock, &page->flags);
+#endif
+}
+
+static inline unsigned long compound_lock_irqsave(struct page *page)
+{
+ unsigned long uninitialized_var(flags);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ local_irq_save(flags);
+ compound_lock(page);
+#endif
+ return flags;
+}
+
+static inline void compound_unlock_irqrestore(struct page *page,
+ unsigned long flags)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ compound_unlock(page);
+ local_irq_restore(flags);
+#endif
+}
+
+static inline struct page *compound_head_by_tail(struct page *tail)
+{
+ struct page *head = tail->first_page;
+
+ /*
+ * page->first_page may be a dangling pointer to an old
+ * compound page, so recheck that it is still a tail
+ * page before returning.
+ */
+ smp_rmb();
+ if (likely(PageTail(tail)))
+ return head;
+ return tail;
+}
+
+/*
+ * Since either compound page could be dismantled asynchronously in THP
+ * or we access asynchronously arbitrary positioned struct page, there
+ * would be tail flag race. To handle this race, we should call
+ * smp_rmb() before checking tail flag. compound_head_by_tail() did it.
+ */
+static inline struct page *compound_head(struct page *page)
+{
+ if (unlikely(PageTail(page)))
+ return compound_head_by_tail(page);
+ return page;
+}
+
+/*
+ * If we access compound page synchronously such as access to
+ * allocated page, there is no need to handle tail flag race, so we can
+ * check tail flag directly without any synchronization primitive.
+ */
+static inline struct page *compound_head_fast(struct page *page)
+{
+ if (unlikely(PageTail(page)))
+ return page->first_page;
+ return page;
+}
+
+/*
+ * The atomic page->_mapcount, starts from -1: so that transitions
+ * both from it and to it can be tracked, using atomic_inc_and_test
+ * and atomic_add_negative(-1).
+ */
+static inline void page_mapcount_reset(struct page *page)
+{
+ atomic_set(&(page)->_mapcount, -1);
+}
+
+static inline int page_mapcount(struct page *page)
+{
+ VM_BUG_ON_PAGE(PageSlab(page), page);
+ return atomic_read(&page->_mapcount) + 1;
+}
+
+static inline int page_count(struct page *page)
+{
+ return atomic_read(&compound_head(page)->_count);
+}
+
+static inline bool __compound_tail_refcounted(struct page *page)
+{
+ return !PageSlab(page) && !PageHeadHuge(page);
+}
+
+/*
+ * This takes a head page as parameter and tells if the
+ * tail page reference counting can be skipped.
+ *
+ * For this to be safe, PageSlab and PageHeadHuge must remain true on
+ * any given page where they return true here, until all tail pins
+ * have been released.
+ */
+static inline bool compound_tail_refcounted(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageHead(page), page);
+ return __compound_tail_refcounted(page);
+}
+
+static inline void get_huge_page_tail(struct page *page)
+{
+ /*
+ * __split_huge_page_refcount() cannot run from under us.
+ */
+ VM_BUG_ON_PAGE(!PageTail(page), page);
+ VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
+ VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
+ if (compound_tail_refcounted(page->first_page))
+ atomic_inc(&page->_mapcount);
+}
+
+extern bool __get_page_tail(struct page *page);
+
+static inline void get_page(struct page *page)
+{
+ if (unlikely(PageTail(page)))
+ if (likely(__get_page_tail(page)))
+ return;
+ /*
+ * Getting a normal page or the head of a compound page
+ * requires to already have an elevated page->_count.
+ */
+ VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
+ atomic_inc(&page->_count);
+}
+
+static inline struct page *virt_to_head_page(const void *x)
+{
+ struct page *page = virt_to_page(x);
+
+ /*
+ * We don't need to worry about synchronization of tail flag
+ * when we call virt_to_head_page() since it is only called for
+ * already allocated page and this page won't be freed until
+ * this virt_to_head_page() is finished. So use _fast variant.
+ */
+ return compound_head_fast(page);
+}
+
+/*
+ * Setup the page count before being freed into the page allocator for
+ * the first time (boot or memory hotplug)
+ */
+static inline void init_page_count(struct page *page)
+{
+ atomic_set(&page->_count, 1);
+}
+
+void put_page(struct page *page);
+void put_pages_list(struct list_head *pages);
+
+void split_page(struct page *page, unsigned int order);
+int split_free_page(struct page *page);
+
+/*
+ * Compound pages have a destructor function. Provide a
+ * prototype for that function and accessor functions.
+ * These are _only_ valid on the head of a PG_compound page.
+ */
+
+static inline void set_compound_page_dtor(struct page *page,
+ compound_page_dtor *dtor)
+{
+ page[1].compound_dtor = dtor;
+}
+
+static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
+{
+ return page[1].compound_dtor;
+}
+
+static inline int compound_order(struct page *page)
+{
+ if (!PageHead(page))
+ return 0;
+ return page[1].compound_order;
+}
+
+static inline void set_compound_order(struct page *page, unsigned long order)
+{
+ page[1].compound_order = order;
+}
+
+#ifdef CONFIG_MMU
+/*
+ * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
+ * servicing faults for write access. In the normal case, do always want
+ * pte_mkwrite. But get_user_pages can cause write faults for mappings
+ * that do not have writing enabled, when used by access_process_vm.
+ */
+static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+ if (likely(vma->vm_flags & VM_WRITE))
+ pte = pte_mkwrite(pte);
+ return pte;
+}
+
+void do_set_pte(struct vm_area_struct *vma, unsigned long address,
+ struct page *page, pte_t *pte, bool write, bool anon);
+#endif
+
+/*
+ * Multiple processes may "see" the same page. E.g. for untouched
+ * mappings of /dev/null, all processes see the same page full of
+ * zeroes, and text pages of executables and shared libraries have
+ * only one copy in memory, at most, normally.
+ *
+ * For the non-reserved pages, page_count(page) denotes a reference count.
+ * page_count() == 0 means the page is free. page->lru is then used for
+ * freelist management in the buddy allocator.
+ * page_count() > 0 means the page has been allocated.
+ *
+ * Pages are allocated by the slab allocator in order to provide memory
+ * to kmalloc and kmem_cache_alloc. In this case, the management of the
+ * page, and the fields in 'struct page' are the responsibility of mm/slab.c
+ * unless a particular usage is carefully commented. (the responsibility of
+ * freeing the kmalloc memory is the caller's, of course).
+ *
+ * A page may be used by anyone else who does a __get_free_page().
+ * In this case, page_count still tracks the references, and should only
+ * be used through the normal accessor functions. The top bits of page->flags
+ * and page->virtual store page management information, but all other fields
+ * are unused and could be used privately, carefully. The management of this
+ * page is the responsibility of the one who allocated it, and those who have
+ * subsequently been given references to it.
+ *
+ * The other pages (we may call them "pagecache pages") are completely
+ * managed by the Linux memory manager: I/O, buffers, swapping etc.
+ * The following discussion applies only to them.
+ *
+ * A pagecache page contains an opaque `private' member, which belongs to the
+ * page's address_space. Usually, this is the address of a circular list of
+ * the page's disk buffers. PG_private must be set to tell the VM to call
+ * into the filesystem to release these pages.
+ *
+ * A page may belong to an inode's memory mapping. In this case, page->mapping
+ * is the pointer to the inode, and page->index is the file offset of the page,
+ * in units of PAGE_CACHE_SIZE.
+ *
+ * If pagecache pages are not associated with an inode, they are said to be
+ * anonymous pages. These may become associated with the swapcache, and in that
+ * case PG_swapcache is set, and page->private is an offset into the swapcache.
+ *
+ * In either case (swapcache or inode backed), the pagecache itself holds one
+ * reference to the page. Setting PG_private should also increment the
+ * refcount. The each user mapping also has a reference to the page.
+ *
+ * The pagecache pages are stored in a per-mapping radix tree, which is
+ * rooted at mapping->page_tree, and indexed by offset.
+ * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
+ * lists, we instead now tag pages as dirty/writeback in the radix tree.
+ *
+ * All pagecache pages may be subject to I/O:
+ * - inode pages may need to be read from disk,
+ * - inode pages which have been modified and are MAP_SHARED may need
+ * to be written back to the inode on disk,
+ * - anonymous pages (including MAP_PRIVATE file mappings) which have been
+ * modified may need to be swapped out to swap space and (later) to be read
+ * back into memory.
+ */
+
+/*
+ * The zone field is never updated after free_area_init_core()
+ * sets it, so none of the operations on it need to be atomic.
+ */
+
+/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
+#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
+#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
+#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
+#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
+
+/*
+ * Define the bit shifts to access each section. For non-existent
+ * sections we define the shift as 0; that plus a 0 mask ensures
+ * the compiler will optimise away reference to them.
+ */
+#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
+#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
+#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
+#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
+
+/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
+#ifdef NODE_NOT_IN_PAGE_FLAGS
+#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
+#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
+ SECTIONS_PGOFF : ZONES_PGOFF)
+#else
+#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
+#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
+ NODES_PGOFF : ZONES_PGOFF)
+#endif
+
+#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
+
+#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
+#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
+#endif
+
+#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
+#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
+#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
+#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
+#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
+
+static inline enum zone_type page_zonenum(const struct page *page)
+{
+ return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
+}
+
+#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
+#define SECTION_IN_PAGE_FLAGS
+#endif
+
+/*
+ * The identification function is mainly used by the buddy allocator for
+ * determining if two pages could be buddies. We are not really identifying
+ * the zone since we could be using the section number id if we do not have
+ * node id available in page flags.
+ * We only guarantee that it will return the same value for two combinable
+ * pages in a zone.
+ */
+static inline int page_zone_id(struct page *page)
+{
+ return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
+}
+
+static inline int zone_to_nid(struct zone *zone)
+{
+#ifdef CONFIG_NUMA
+ return zone->node;
+#else
+ return 0;
+#endif
+}
+
+#ifdef NODE_NOT_IN_PAGE_FLAGS
+extern int page_to_nid(const struct page *page);
+#else
+static inline int page_to_nid(const struct page *page)
+{
+ return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
+}
+#endif
+
+#ifdef CONFIG_NUMA_BALANCING
+static inline int cpu_pid_to_cpupid(int cpu, int pid)
+{
+ return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
+}
+
+static inline int cpupid_to_pid(int cpupid)
+{
+ return cpupid & LAST__PID_MASK;
+}
+
+static inline int cpupid_to_cpu(int cpupid)
+{
+ return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
+}
+
+static inline int cpupid_to_nid(int cpupid)
+{
+ return cpu_to_node(cpupid_to_cpu(cpupid));
+}
+
+static inline bool cpupid_pid_unset(int cpupid)
+{
+ return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
+}
+
+static inline bool cpupid_cpu_unset(int cpupid)
+{
+ return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
+}
+
+static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
+{
+ return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
+}
+
+#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
+{
+ return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
+}
+
+static inline int page_cpupid_last(struct page *page)
+{
+ return page->_last_cpupid;
+}
+static inline void page_cpupid_reset_last(struct page *page)
+{
+ page->_last_cpupid = -1 & LAST_CPUPID_MASK;
+}
+#else
+static inline int page_cpupid_last(struct page *page)
+{
+ return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
+}
+
+extern int page_cpupid_xchg_last(struct page *page, int cpupid);
+
+static inline void page_cpupid_reset_last(struct page *page)
+{
+ int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
+
+ page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
+ page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
+}
+#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
+#else /* !CONFIG_NUMA_BALANCING */
+static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
+{
+ return page_to_nid(page); /* XXX */
+}
+
+static inline int page_cpupid_last(struct page *page)
+{
+ return page_to_nid(page); /* XXX */
+}
+
+static inline int cpupid_to_nid(int cpupid)
+{
+ return -1;
+}
+
+static inline int cpupid_to_pid(int cpupid)
+{
+ return -1;
+}
+
+static inline int cpupid_to_cpu(int cpupid)
+{
+ return -1;
+}
+
+static inline int cpu_pid_to_cpupid(int nid, int pid)
+{
+ return -1;
+}
+
+static inline bool cpupid_pid_unset(int cpupid)
+{
+ return 1;
+}
+
+static inline void page_cpupid_reset_last(struct page *page)
+{
+}
+
+static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
+{
+ return false;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+static inline struct zone *page_zone(const struct page *page)
+{
+ return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
+}
+
+#ifdef SECTION_IN_PAGE_FLAGS
+static inline void set_page_section(struct page *page, unsigned long section)
+{
+ page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
+ page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
+}
+
+static inline unsigned long page_to_section(const struct page *page)
+{
+ return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
+}
+#endif
+
+static inline void set_page_zone(struct page *page, enum zone_type zone)
+{
+ page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
+ page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
+}
+
+static inline void set_page_node(struct page *page, unsigned long node)
+{
+ page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
+ page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
+}
+
+static inline void set_page_links(struct page *page, enum zone_type zone,
+ unsigned long node, unsigned long pfn)
+{
+ set_page_zone(page, zone);
+ set_page_node(page, node);
+#ifdef SECTION_IN_PAGE_FLAGS
+ set_page_section(page, pfn_to_section_nr(pfn));
+#endif
+}
+
+/*
+ * Some inline functions in vmstat.h depend on page_zone()
+ */
+#include <linux/vmstat.h>
+
+static __always_inline void *lowmem_page_address(const struct page *page)
+{
+ return __va(PFN_PHYS(page_to_pfn(page)));
+}
+
+#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
+#define HASHED_PAGE_VIRTUAL
+#endif
+
+#if defined(WANT_PAGE_VIRTUAL)
+static inline void *page_address(const struct page *page)
+{
+ return page->virtual;
+}
+static inline void set_page_address(struct page *page, void *address)
+{
+ page->virtual = address;
+}
+#define page_address_init() do { } while(0)
+#endif
+
+#if defined(HASHED_PAGE_VIRTUAL)
+void *page_address(const struct page *page);
+void set_page_address(struct page *page, void *virtual);
+void page_address_init(void);
+#endif
+
+#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
+#define page_address(page) lowmem_page_address(page)
+#define set_page_address(page, address) do { } while(0)
+#define page_address_init() do { } while(0)
+#endif
+
+extern void *page_rmapping(struct page *page);
+extern struct anon_vma *page_anon_vma(struct page *page);
+extern struct address_space *page_mapping(struct page *page);
+
+extern struct address_space *__page_file_mapping(struct page *);
+
+static inline
+struct address_space *page_file_mapping(struct page *page)
+{
+ if (unlikely(PageSwapCache(page)))
+ return __page_file_mapping(page);
+
+ return page->mapping;
+}
+
+/*
+ * Return the pagecache index of the passed page. Regular pagecache pages
+ * use ->index whereas swapcache pages use ->private
+ */
+static inline pgoff_t page_index(struct page *page)
+{
+ if (unlikely(PageSwapCache(page)))
+ return page_private(page);
+ return page->index;
+}
+
+extern pgoff_t __page_file_index(struct page *page);
+
+/*
+ * Return the file index of the page. Regular pagecache pages use ->index
+ * whereas swapcache pages use swp_offset(->private)
+ */
+static inline pgoff_t page_file_index(struct page *page)
+{
+ if (unlikely(PageSwapCache(page)))
+ return __page_file_index(page);
+
+ return page->index;
+}
+
+/*
+ * Return true if this page is mapped into pagetables.
+ */
+static inline int page_mapped(struct page *page)
+{
+ return atomic_read(&(page)->_mapcount) >= 0;
+}
+
+/*
+ * Different kinds of faults, as returned by handle_mm_fault().
+ * Used to decide whether a process gets delivered SIGBUS or
+ * just gets major/minor fault counters bumped up.
+ */
+
+#define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */
+
+#define VM_FAULT_OOM 0x0001
+#define VM_FAULT_SIGBUS 0x0002
+#define VM_FAULT_MAJOR 0x0004
+#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
+#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
+#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
+#define VM_FAULT_SIGSEGV 0x0040
+
+#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
+#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
+#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */
+#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */
+
+#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
+
+#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
+ VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
+ VM_FAULT_FALLBACK)
+
+/* Encode hstate index for a hwpoisoned large page */
+#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
+#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
+
+/*
+ * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
+ */
+extern void pagefault_out_of_memory(void);
+
+#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
+
+/*
+ * Flags passed to show_mem() and show_free_areas() to suppress output in
+ * various contexts.
+ */
+#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
+
+extern void show_free_areas(unsigned int flags);
+extern bool skip_free_areas_node(unsigned int flags, int nid);
+
+int shmem_zero_setup(struct vm_area_struct *);
+#ifdef CONFIG_SHMEM
+bool shmem_mapping(struct address_space *mapping);
+#else
+static inline bool shmem_mapping(struct address_space *mapping)
+{
+ return false;
+}
+#endif
+
+extern int can_do_mlock(void);
+extern int user_shm_lock(size_t, struct user_struct *);
+extern void user_shm_unlock(size_t, struct user_struct *);
+
+/*
+ * Parameter block passed down to zap_pte_range in exceptional cases.
+ */
+struct zap_details {
+ struct address_space *check_mapping; /* Check page->mapping if set */
+ pgoff_t first_index; /* Lowest page->index to unmap */
+ pgoff_t last_index; /* Highest page->index to unmap */
+};
+
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte);
+
+int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size);
+void zap_page_range(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size, struct zap_details *);
+void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
+ unsigned long start, unsigned long end);
+
+/**
+ * mm_walk - callbacks for walk_page_range
+ * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
+ * this handler is required to be able to handle
+ * pmd_trans_huge() pmds. They may simply choose to
+ * split_huge_page() instead of handling it explicitly.
+ * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
+ * @pte_hole: if set, called for each hole at all levels
+ * @hugetlb_entry: if set, called for each hugetlb entry
+ * @test_walk: caller specific callback function to determine whether
+ * we walk over the current vma or not. A positive returned
+ * value means "do page table walk over the current vma,"
+ * and a negative one means "abort current page table walk
+ * right now." 0 means "skip the current vma."
+ * @mm: mm_struct representing the target process of page table walk
+ * @vma: vma currently walked (NULL if walking outside vmas)
+ * @private: private data for callbacks' usage
+ *
+ * (see the comment on walk_page_range() for more details)
+ */
+struct mm_walk {
+ int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk);
+ int (*pte_entry)(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk);
+ int (*pte_hole)(unsigned long addr, unsigned long next,
+ struct mm_walk *walk);
+ int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
+ unsigned long addr, unsigned long next,
+ struct mm_walk *walk);
+ int (*test_walk)(unsigned long addr, unsigned long next,
+ struct mm_walk *walk);
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ void *private;
+};
+
+int walk_page_range(unsigned long addr, unsigned long end,
+ struct mm_walk *walk);
+int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
+void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
+ unsigned long end, unsigned long floor, unsigned long ceiling);
+int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
+ struct vm_area_struct *vma);
+void unmap_mapping_range(struct address_space *mapping,
+ loff_t const holebegin, loff_t const holelen, int even_cows);
+int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+ unsigned long *pfn);
+int follow_phys(struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags, unsigned long *prot, resource_size_t *phys);
+int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write);
+
+static inline void unmap_shared_mapping_range(struct address_space *mapping,
+ loff_t const holebegin, loff_t const holelen)
+{
+ unmap_mapping_range(mapping, holebegin, holelen, 0);
+}
+
+extern void truncate_pagecache(struct inode *inode, loff_t new);
+extern void truncate_setsize(struct inode *inode, loff_t newsize);
+void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
+void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
+int truncate_inode_page(struct address_space *mapping, struct page *page);
+int generic_error_remove_page(struct address_space *mapping, struct page *page);
+int invalidate_inode_page(struct page *page);
+
+#ifdef CONFIG_MMU
+extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags);
+extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long address, unsigned int fault_flags);
+#else
+static inline int handle_mm_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags)
+{
+ /* should never happen if there's no MMU */
+ BUG();
+ return VM_FAULT_SIGBUS;
+}
+static inline int fixup_user_fault(struct task_struct *tsk,
+ struct mm_struct *mm, unsigned long address,
+ unsigned int fault_flags)
+{
+ /* should never happen if there's no MMU */
+ BUG();
+ return -EFAULT;
+}
+#endif
+
+extern void vma_do_file_update_time(struct vm_area_struct *, const char[], int);
+extern struct file *vma_do_pr_or_file(struct vm_area_struct *, const char[],
+ int);
+extern void vma_do_get_file(struct vm_area_struct *, const char[], int);
+extern void vma_do_fput(struct vm_area_struct *, const char[], int);
+
+#define vma_file_update_time(vma) vma_do_file_update_time(vma, __func__, \
+ __LINE__)
+#define vma_pr_or_file(vma) vma_do_pr_or_file(vma, __func__, \
+ __LINE__)
+#define vma_get_file(vma) vma_do_get_file(vma, __func__, __LINE__)
+#define vma_fput(vma) vma_do_fput(vma, __func__, __LINE__)
+
+#ifndef CONFIG_MMU
+extern struct file *vmr_do_pr_or_file(struct vm_region *, const char[], int);
+extern void vmr_do_fput(struct vm_region *, const char[], int);
+
+#define vmr_pr_or_file(region) vmr_do_pr_or_file(region, __func__, \
+ __LINE__)
+#define vmr_fput(region) vmr_do_fput(region, __func__, __LINE__)
+#endif /* !CONFIG_MMU */
+
+extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
+extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
+ void *buf, int len, int write);
+
+long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ unsigned int foll_flags, struct page **pages,
+ struct vm_area_struct **vmas, int *nonblocking);
+long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ struct vm_area_struct **vmas);
+long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ int *locked);
+long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages,
+ unsigned int gup_flags);
+long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ int write, int force, struct page **pages);
+int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages);
+struct kvec;
+int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
+ struct page **pages);
+int get_kernel_page(unsigned long start, int write, struct page **pages);
+struct page *get_dump_page(unsigned long addr);
+
+extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
+extern void do_invalidatepage(struct page *page, unsigned int offset,
+ unsigned int length);
+
+int __set_page_dirty_nobuffers(struct page *page);
+int __set_page_dirty_no_writeback(struct page *page);
+int redirty_page_for_writepage(struct writeback_control *wbc,
+ struct page *page);
+void account_page_dirtied(struct page *page, struct address_space *mapping);
+void account_page_cleaned(struct page *page, struct address_space *mapping);
+int set_page_dirty(struct page *page);
+int set_page_dirty_lock(struct page *page);
+int clear_page_dirty_for_io(struct page *page);
+
+int get_cmdline(struct task_struct *task, char *buffer, int buflen);
+
+/* Is the vma a continuation of the stack vma above it? */
+static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
+{
+ return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
+}
+
+static inline int stack_guard_page_start(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ return (vma->vm_flags & VM_GROWSDOWN) &&
+ (vma->vm_start == addr) &&
+ !vma_growsdown(vma->vm_prev, addr);
+}
+
+/* Is the vma a continuation of the stack vma below it? */
+static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
+{
+ return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
+}
+
+static inline int stack_guard_page_end(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ return (vma->vm_flags & VM_GROWSUP) &&
+ (vma->vm_end == addr) &&
+ !vma_growsup(vma->vm_next, addr);
+}
+
+extern struct task_struct *task_of_stack(struct task_struct *task,
+ struct vm_area_struct *vma, bool in_group);
+
+extern unsigned long move_page_tables(struct vm_area_struct *vma,
+ unsigned long old_addr, struct vm_area_struct *new_vma,
+ unsigned long new_addr, unsigned long len,
+ bool need_rmap_locks);
+extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, pgprot_t newprot,
+ int dirty_accountable, int prot_numa);
+extern int mprotect_fixup(struct vm_area_struct *vma,
+ struct vm_area_struct **pprev, unsigned long start,
+ unsigned long end, unsigned long newflags);
+
+/*
+ * doesn't attempt to fault and will return short.
+ */
+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages);
+/*
+ * per-process(per-mm_struct) statistics.
+ */
+static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
+{
+ long val = atomic_long_read(&mm->rss_stat.count[member]);
+
+#ifdef SPLIT_RSS_COUNTING
+ /*
+ * counter is updated in asynchronous manner and may go to minus.
+ * But it's never be expected number for users.
+ */
+ if (val < 0)
+ val = 0;
+#endif
+ return (unsigned long)val;
+}
+
+static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
+{
+ atomic_long_add(value, &mm->rss_stat.count[member]);
+}
+
+static inline void inc_mm_counter(struct mm_struct *mm, int member)
+{
+ atomic_long_inc(&mm->rss_stat.count[member]);
+}
+
+static inline void dec_mm_counter(struct mm_struct *mm, int member)
+{
+ atomic_long_dec(&mm->rss_stat.count[member]);
+}
+
+static inline unsigned long get_mm_rss(struct mm_struct *mm)
+{
+ return get_mm_counter(mm, MM_FILEPAGES) +
+ get_mm_counter(mm, MM_ANONPAGES);
+}
+
+static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
+{
+ return max(mm->hiwater_rss, get_mm_rss(mm));
+}
+
+static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
+{
+ return max(mm->hiwater_vm, mm->total_vm);
+}
+
+static inline void update_hiwater_rss(struct mm_struct *mm)
+{
+ unsigned long _rss = get_mm_rss(mm);
+
+ if ((mm)->hiwater_rss < _rss)
+ (mm)->hiwater_rss = _rss;
+}
+
+static inline void update_hiwater_vm(struct mm_struct *mm)
+{
+ if (mm->hiwater_vm < mm->total_vm)
+ mm->hiwater_vm = mm->total_vm;
+}
+
+static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
+{
+ mm->hiwater_rss = get_mm_rss(mm);
+}
+
+static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
+ struct mm_struct *mm)
+{
+ unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
+
+ if (*maxrss < hiwater_rss)
+ *maxrss = hiwater_rss;
+}
+
+#if defined(SPLIT_RSS_COUNTING)
+void sync_mm_rss(struct mm_struct *mm);
+#else
+static inline void sync_mm_rss(struct mm_struct *mm)
+{
+}
+#endif
+
+int vma_wants_writenotify(struct vm_area_struct *vma);
+
+extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
+ spinlock_t **ptl);
+static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+ spinlock_t **ptl)
+{
+ pte_t *ptep;
+ __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
+ return ptep;
+}
+
+#ifdef __PAGETABLE_PUD_FOLDED
+static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
+ unsigned long address)
+{
+ return 0;
+}
+#else
+int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
+#endif
+
+#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
+static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
+ unsigned long address)
+{
+ return 0;
+}
+
+static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
+
+static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
+static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
+
+#else
+int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
+
+static inline void mm_nr_pmds_init(struct mm_struct *mm)
+{
+ atomic_long_set(&mm->nr_pmds, 0);
+}
+
+static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
+{
+ return atomic_long_read(&mm->nr_pmds);
+}
+
+static inline void mm_inc_nr_pmds(struct mm_struct *mm)
+{
+ atomic_long_inc(&mm->nr_pmds);
+}
+
+static inline void mm_dec_nr_pmds(struct mm_struct *mm)
+{
+ atomic_long_dec(&mm->nr_pmds);
+}
+#endif
+
+int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+ pmd_t *pmd, unsigned long address);
+int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
+
+/*
+ * The following ifdef needed to get the 4level-fixup.h header to work.
+ * Remove it when 4level-fixup.h has been removed.
+ */
+#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
+static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+{
+ return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
+ NULL: pud_offset(pgd, address);
+}
+
+static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+{
+ return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
+ NULL: pmd_offset(pud, address);
+}
+#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
+
+#if USE_SPLIT_PTE_PTLOCKS
+#if ALLOC_SPLIT_PTLOCKS
+void __init ptlock_cache_init(void);
+extern bool ptlock_alloc(struct page *page);
+extern void ptlock_free(struct page *page);
+
+static inline spinlock_t *ptlock_ptr(struct page *page)
+{
+ return page->ptl;
+}
+#else /* ALLOC_SPLIT_PTLOCKS */
+static inline void ptlock_cache_init(void)
+{
+}
+
+static inline bool ptlock_alloc(struct page *page)
+{
+ return true;
+}
+
+static inline void ptlock_free(struct page *page)
+{
+}
+
+static inline spinlock_t *ptlock_ptr(struct page *page)
+{
+ return &page->ptl;
+}
+#endif /* ALLOC_SPLIT_PTLOCKS */
+
+static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
+{
+ return ptlock_ptr(pmd_page(*pmd));
+}
+
+static inline bool ptlock_init(struct page *page)
+{
+ /*
+ * prep_new_page() initialize page->private (and therefore page->ptl)
+ * with 0. Make sure nobody took it in use in between.
+ *
+ * It can happen if arch try to use slab for page table allocation:
+ * slab code uses page->slab_cache and page->first_page (for tail
+ * pages), which share storage with page->ptl.
+ */
+ VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
+ if (!ptlock_alloc(page))
+ return false;
+ spin_lock_init(ptlock_ptr(page));
+ return true;
+}
+
+/* Reset page->mapping so free_pages_check won't complain. */
+static inline void pte_lock_deinit(struct page *page)
+{
+ page->mapping = NULL;
+ ptlock_free(page);
+}
+
+#else /* !USE_SPLIT_PTE_PTLOCKS */
+/*
+ * We use mm->page_table_lock to guard all pagetable pages of the mm.
+ */
+static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
+{
+ return &mm->page_table_lock;
+}
+static inline void ptlock_cache_init(void) {}
+static inline bool ptlock_init(struct page *page) { return true; }
+static inline void pte_lock_deinit(struct page *page) {}
+#endif /* USE_SPLIT_PTE_PTLOCKS */
+
+static inline void pgtable_init(void)
+{
+ ptlock_cache_init();
+ pgtable_cache_init();
+}
+
+static inline bool pgtable_page_ctor(struct page *page)
+{
+ inc_zone_page_state(page, NR_PAGETABLE);
+ return ptlock_init(page);
+}
+
+static inline void pgtable_page_dtor(struct page *page)
+{
+ pte_lock_deinit(page);
+ dec_zone_page_state(page, NR_PAGETABLE);
+}
+
+#define pte_offset_map_lock(mm, pmd, address, ptlp) \
+({ \
+ spinlock_t *__ptl = pte_lockptr(mm, pmd); \
+ pte_t *__pte = pte_offset_map(pmd, address); \
+ *(ptlp) = __ptl; \
+ spin_lock(__ptl); \
+ __pte; \
+})
+
+#define pte_unmap_unlock(pte, ptl) do { \
+ spin_unlock(ptl); \
+ pte_unmap(pte); \
+} while (0)
+
+#define pte_alloc_map(mm, vma, pmd, address) \
+ ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma, \
+ pmd, address))? \
+ NULL: pte_offset_map(pmd, address))
+
+#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
+ ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL, \
+ pmd, address))? \
+ NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
+
+#define pte_alloc_kernel(pmd, address) \
+ ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
+ NULL: pte_offset_kernel(pmd, address))
+
+#if USE_SPLIT_PMD_PTLOCKS
+
+static struct page *pmd_to_page(pmd_t *pmd)
+{
+ unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
+ return virt_to_page((void *)((unsigned long) pmd & mask));
+}
+
+static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
+{
+ return ptlock_ptr(pmd_to_page(pmd));
+}
+
+static inline bool pgtable_pmd_page_ctor(struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ page->pmd_huge_pte = NULL;
+#endif
+ return ptlock_init(page);
+}
+
+static inline void pgtable_pmd_page_dtor(struct page *page)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
+#endif
+ ptlock_free(page);
+}
+
+#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
+
+#else
+
+static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
+{
+ return &mm->page_table_lock;
+}
+
+static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
+static inline void pgtable_pmd_page_dtor(struct page *page) {}
+
+#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
+
+#endif
+
+static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
+{
+ spinlock_t *ptl = pmd_lockptr(mm, pmd);
+ spin_lock(ptl);
+ return ptl;
+}
+
+extern void free_area_init(unsigned long * zones_size);
+extern void free_area_init_node(int nid, unsigned long * zones_size,
+ unsigned long zone_start_pfn, unsigned long *zholes_size);
+extern void free_initmem(void);
+
+/*
+ * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
+ * into the buddy system. The freed pages will be poisoned with pattern
+ * "poison" if it's within range [0, UCHAR_MAX].
+ * Return pages freed into the buddy system.
+ */
+extern unsigned long free_reserved_area(void *start, void *end,
+ int poison, char *s);
+
+#ifdef CONFIG_HIGHMEM
+/*
+ * Free a highmem page into the buddy system, adjusting totalhigh_pages
+ * and totalram_pages.
+ */
+extern void free_highmem_page(struct page *page);
+#endif
+
+extern void adjust_managed_page_count(struct page *page, long count);
+extern void mem_init_print_info(const char *str);
+
+/* Free the reserved page into the buddy system, so it gets managed. */
+static inline void __free_reserved_page(struct page *page)
+{
+ ClearPageReserved(page);
+ init_page_count(page);
+ __free_page(page);
+}
+
+static inline void free_reserved_page(struct page *page)
+{
+ __free_reserved_page(page);
+ adjust_managed_page_count(page, 1);
+}
+
+static inline void mark_page_reserved(struct page *page)
+{
+ SetPageReserved(page);
+ adjust_managed_page_count(page, -1);
+}
+
+/*
+ * Default method to free all the __init memory into the buddy system.
+ * The freed pages will be poisoned with pattern "poison" if it's within
+ * range [0, UCHAR_MAX].
+ * Return pages freed into the buddy system.
+ */
+static inline unsigned long free_initmem_default(int poison)
+{
+ extern char __init_begin[], __init_end[];
+
+ return free_reserved_area(&__init_begin, &__init_end,
+ poison, "unused kernel");
+}
+
+static inline unsigned long get_num_physpages(void)
+{
+ int nid;
+ unsigned long phys_pages = 0;
+
+ for_each_online_node(nid)
+ phys_pages += node_present_pages(nid);
+
+ return phys_pages;
+}
+
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+/*
+ * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
+ * zones, allocate the backing mem_map and account for memory holes in a more
+ * architecture independent manner. This is a substitute for creating the
+ * zone_sizes[] and zholes_size[] arrays and passing them to
+ * free_area_init_node()
+ *
+ * An architecture is expected to register range of page frames backed by
+ * physical memory with memblock_add[_node]() before calling
+ * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
+ * usage, an architecture is expected to do something like
+ *
+ * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
+ * max_highmem_pfn};
+ * for_each_valid_physical_page_range()
+ * memblock_add_node(base, size, nid)
+ * free_area_init_nodes(max_zone_pfns);
+ *
+ * free_bootmem_with_active_regions() calls free_bootmem_node() for each
+ * registered physical page range. Similarly
+ * sparse_memory_present_with_active_regions() calls memory_present() for
+ * each range when SPARSEMEM is enabled.
+ *
+ * See mm/page_alloc.c for more information on each function exposed by
+ * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
+ */
+extern void free_area_init_nodes(unsigned long *max_zone_pfn);
+unsigned long node_map_pfn_alignment(void);
+unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
+ unsigned long end_pfn);
+extern unsigned long absent_pages_in_range(unsigned long start_pfn,
+ unsigned long end_pfn);
+extern void get_pfn_range_for_nid(unsigned int nid,
+ unsigned long *start_pfn, unsigned long *end_pfn);
+extern unsigned long find_min_pfn_with_active_regions(void);
+extern void free_bootmem_with_active_regions(int nid,
+ unsigned long max_low_pfn);
+extern void sparse_memory_present_with_active_regions(int nid);
+
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
+#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
+ !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
+static inline int __early_pfn_to_nid(unsigned long pfn)
+{
+ return 0;
+}
+#else
+/* please see mm/page_alloc.c */
+extern int __meminit early_pfn_to_nid(unsigned long pfn);
+/* there is a per-arch backend function. */
+extern int __meminit __early_pfn_to_nid(unsigned long pfn);
+#endif
+
+extern void set_dma_reserve(unsigned long new_dma_reserve);
+extern void memmap_init_zone(unsigned long, int, unsigned long,
+ unsigned long, enum memmap_context);
+extern void setup_per_zone_wmarks(void);
+extern int __meminit init_per_zone_wmark_min(void);
+extern void mem_init(void);
+extern void __init mmap_init(void);
+extern void show_mem(unsigned int flags);
+extern void si_meminfo(struct sysinfo * val);
+extern void si_meminfo_node(struct sysinfo *val, int nid);
+
+extern __printf(3, 4)
+void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
+
+extern void setup_per_cpu_pageset(void);
+
+extern void zone_pcp_update(struct zone *zone);
+extern void zone_pcp_reset(struct zone *zone);
+
+/* page_alloc.c */
+extern int min_free_kbytes;
+
+/* nommu.c */
+extern atomic_long_t mmap_pages_allocated;
+extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
+
+/* interval_tree.c */
+void vma_interval_tree_insert(struct vm_area_struct *node,
+ struct rb_root *root);
+void vma_interval_tree_insert_after(struct vm_area_struct *node,
+ struct vm_area_struct *prev,
+ struct rb_root *root);
+void vma_interval_tree_remove(struct vm_area_struct *node,
+ struct rb_root *root);
+struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
+ unsigned long start, unsigned long last);
+struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
+ unsigned long start, unsigned long last);
+
+#define vma_interval_tree_foreach(vma, root, start, last) \
+ for (vma = vma_interval_tree_iter_first(root, start, last); \
+ vma; vma = vma_interval_tree_iter_next(vma, start, last))
+
+void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
+ struct rb_root *root);
+void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
+ struct rb_root *root);
+struct anon_vma_chain *anon_vma_interval_tree_iter_first(
+ struct rb_root *root, unsigned long start, unsigned long last);
+struct anon_vma_chain *anon_vma_interval_tree_iter_next(
+ struct anon_vma_chain *node, unsigned long start, unsigned long last);
+#ifdef CONFIG_DEBUG_VM_RB
+void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
+#endif
+
+#define anon_vma_interval_tree_foreach(avc, root, start, last) \
+ for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
+ avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
+
+/* mmap.c */
+extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
+extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
+extern struct vm_area_struct *vma_merge(struct mm_struct *,
+ struct vm_area_struct *prev, unsigned long addr, unsigned long end,
+ unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
+ struct mempolicy *);
+extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
+extern int split_vma(struct mm_struct *,
+ struct vm_area_struct *, unsigned long addr, int new_below);
+extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
+extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
+ struct rb_node **, struct rb_node *);
+extern void unlink_file_vma(struct vm_area_struct *);
+extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
+ unsigned long addr, unsigned long len, pgoff_t pgoff,
+ bool *need_rmap_locks);
+extern void exit_mmap(struct mm_struct *);
+
+static inline int check_data_rlimit(unsigned long rlim,
+ unsigned long new,
+ unsigned long start,
+ unsigned long end_data,
+ unsigned long start_data)
+{
+ if (rlim < RLIM_INFINITY) {
+ if (((new - start) + (end_data - start_data)) > rlim)
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+extern int mm_take_all_locks(struct mm_struct *mm);
+extern void mm_drop_all_locks(struct mm_struct *mm);
+
+extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
+extern struct file *get_mm_exe_file(struct mm_struct *mm);
+
+extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
+extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
+ unsigned long addr, unsigned long len,
+ unsigned long flags,
+ const struct vm_special_mapping *spec);
+/* This is an obsolete alternative to _install_special_mapping. */
+extern int install_special_mapping(struct mm_struct *mm,
+ unsigned long addr, unsigned long len,
+ unsigned long flags, struct page **pages);
+
+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+
+extern unsigned long mmap_region(struct file *file, unsigned long addr,
+ unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
+extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot, unsigned long flags,
+ unsigned long pgoff, unsigned long *populate);
+extern int do_munmap(struct mm_struct *, unsigned long, size_t);
+
+#ifdef CONFIG_MMU
+extern int __mm_populate(unsigned long addr, unsigned long len,
+ int ignore_errors);
+static inline void mm_populate(unsigned long addr, unsigned long len)
+{
+ /* Ignore errors */
+ (void) __mm_populate(addr, len, 1);
+}
+#else
+static inline void mm_populate(unsigned long addr, unsigned long len) {}
+#endif
+
+/* These take the mm semaphore themselves */
+extern unsigned long vm_brk(unsigned long, unsigned long);
+extern int vm_munmap(unsigned long, size_t);
+extern unsigned long vm_mmap(struct file *, unsigned long,
+ unsigned long, unsigned long,
+ unsigned long, unsigned long);
+
+struct vm_unmapped_area_info {
+#define VM_UNMAPPED_AREA_TOPDOWN 1
+ unsigned long flags;
+ unsigned long length;
+ unsigned long low_limit;
+ unsigned long high_limit;
+ unsigned long align_mask;
+ unsigned long align_offset;
+};
+
+extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
+extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
+
+/*
+ * Search for an unmapped address range.
+ *
+ * We are looking for a range that:
+ * - does not intersect with any VMA;
+ * - is contained within the [low_limit, high_limit) interval;
+ * - is at least the desired size.
+ * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
+ */
+static inline unsigned long
+vm_unmapped_area(struct vm_unmapped_area_info *info)
+{
+ if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
+ return unmapped_area_topdown(info);
+ else
+ return unmapped_area(info);
+}
+
+/* truncate.c */
+extern void truncate_inode_pages(struct address_space *, loff_t);
+extern void truncate_inode_pages_range(struct address_space *,
+ loff_t lstart, loff_t lend);
+extern void truncate_inode_pages_final(struct address_space *);
+
+/* generic vm_area_ops exported for stackable file systems */
+extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
+extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf);
+extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+/* mm/page-writeback.c */
+int write_one_page(struct page *page, int wait);
+void task_dirty_inc(struct task_struct *tsk);
+
+/* readahead.c */
+#define VM_MAX_READAHEAD 128 /* kbytes */
+#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
+
+int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
+ pgoff_t offset, unsigned long nr_to_read);
+
+void page_cache_sync_readahead(struct address_space *mapping,
+ struct file_ra_state *ra,
+ struct file *filp,
+ pgoff_t offset,
+ unsigned long size);
+
+void page_cache_async_readahead(struct address_space *mapping,
+ struct file_ra_state *ra,
+ struct file *filp,
+ struct page *pg,
+ pgoff_t offset,
+ unsigned long size);
+
+unsigned long max_sane_readahead(unsigned long nr);
+
+/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
+extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
+
+/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
+extern int expand_downwards(struct vm_area_struct *vma,
+ unsigned long address);
+#if VM_GROWSUP
+extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
+#else
+ #define expand_upwards(vma, address) (0)
+#endif
+
+/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
+extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
+extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
+ struct vm_area_struct **pprev);
+
+/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
+ NULL if none. Assume start_addr < end_addr. */
+static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+{
+ struct vm_area_struct * vma = find_vma(mm,start_addr);
+
+ if (vma && end_addr <= vma->vm_start)
+ vma = NULL;
+ return vma;
+}
+
+static inline unsigned long vma_pages(struct vm_area_struct *vma)
+{
+ return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+}
+
+/* Look up the first VMA which exactly match the interval vm_start ... vm_end */
+static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
+ unsigned long vm_start, unsigned long vm_end)
+{
+ struct vm_area_struct *vma = find_vma(mm, vm_start);
+
+ if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
+ vma = NULL;
+
+ return vma;
+}
+
+#ifdef CONFIG_MMU
+pgprot_t vm_get_page_prot(unsigned long vm_flags);
+void vma_set_page_prot(struct vm_area_struct *vma);
+#else
+static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ return __pgprot(0);
+}
+static inline void vma_set_page_prot(struct vm_area_struct *vma)
+{
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+}
+#endif
+
+#ifdef CONFIG_NUMA_BALANCING
+unsigned long change_prot_numa(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+#endif
+
+struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
+int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t);
+int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
+int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn);
+int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn);
+int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
+
+
+struct page *follow_page_mask(struct vm_area_struct *vma,
+ unsigned long address, unsigned int foll_flags,
+ unsigned int *page_mask);
+
+static inline struct page *follow_page(struct vm_area_struct *vma,
+ unsigned long address, unsigned int foll_flags)
+{
+ unsigned int unused_page_mask;
+ return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
+}
+
+#define FOLL_WRITE 0x01 /* check pte is writable */
+#define FOLL_TOUCH 0x02 /* mark page accessed */
+#define FOLL_GET 0x04 /* do get_page on page */
+#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */
+#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
+#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
+ * and return without waiting upon it */
+#define FOLL_POPULATE 0x40 /* fault in page */
+#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
+#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
+#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
+#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
+#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
+
+typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
+ void *data);
+extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
+ unsigned long size, pte_fn_t fn, void *data);
+
+#ifdef CONFIG_PROC_FS
+void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
+#else
+static inline void vm_stat_account(struct mm_struct *mm,
+ unsigned long flags, struct file *file, long pages)
+{
+ mm->total_vm += pages;
+}
+#endif /* CONFIG_PROC_FS */
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+extern bool _debug_pagealloc_enabled;
+extern void __kernel_map_pages(struct page *page, int numpages, int enable);
+
+static inline bool debug_pagealloc_enabled(void)
+{
+ return _debug_pagealloc_enabled;
+}
+
+static inline void
+kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ if (!debug_pagealloc_enabled())
+ return;
+
+ __kernel_map_pages(page, numpages, enable);
+}
+#ifdef CONFIG_HIBERNATION
+extern bool kernel_page_present(struct page *page);
+#endif /* CONFIG_HIBERNATION */
+#else
+static inline void
+kernel_map_pages(struct page *page, int numpages, int enable) {}
+#ifdef CONFIG_HIBERNATION
+static inline bool kernel_page_present(struct page *page) { return true; }
+#endif /* CONFIG_HIBERNATION */
+#endif
+
+#ifdef __HAVE_ARCH_GATE_AREA
+extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
+extern int in_gate_area_no_mm(unsigned long addr);
+extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
+#else
+static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
+{
+ return NULL;
+}
+static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
+static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
+{
+ return 0;
+}
+#endif /* __HAVE_ARCH_GATE_AREA */
+
+#ifdef CONFIG_SYSCTL
+extern int sysctl_drop_caches;
+int drop_caches_sysctl_handler(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+#endif
+
+void drop_pagecache(void);
+void drop_slab(void);
+void drop_slab_node(int nid);
+
+#ifndef CONFIG_MMU
+#define randomize_va_space 0
+#else
+extern int randomize_va_space;
+#endif
+
+const char * arch_vma_name(struct vm_area_struct *vma);
+void print_vma_addr(char *prefix, unsigned long rip);
+
+void sparse_mem_maps_populate_node(struct page **map_map,
+ unsigned long pnum_begin,
+ unsigned long pnum_end,
+ unsigned long map_count,
+ int nodeid);
+
+struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
+pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
+pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
+pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
+pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
+void *vmemmap_alloc_block(unsigned long size, int node);
+void *vmemmap_alloc_block_buf(unsigned long size, int node);
+void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
+int vmemmap_populate_basepages(unsigned long start, unsigned long end,
+ int node);
+int vmemmap_populate(unsigned long start, unsigned long end, int node);
+void vmemmap_populate_print_last(void);
+#ifdef CONFIG_MEMORY_HOTPLUG
+void vmemmap_free(unsigned long start, unsigned long end);
+#endif
+void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
+ unsigned long size);
+
+enum mf_flags {
+ MF_COUNT_INCREASED = 1 << 0,
+ MF_ACTION_REQUIRED = 1 << 1,
+ MF_MUST_KILL = 1 << 2,
+ MF_SOFT_OFFLINE = 1 << 3,
+};
+extern int memory_failure(unsigned long pfn, int trapno, int flags);
+extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
+extern int unpoison_memory(unsigned long pfn);
+extern int sysctl_memory_failure_early_kill;
+extern int sysctl_memory_failure_recovery;
+extern void shake_page(struct page *p, int access);
+extern atomic_long_t num_poisoned_pages;
+extern int soft_offline_page(struct page *page, int flags);
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
+extern void clear_huge_page(struct page *page,
+ unsigned long addr,
+ unsigned int pages_per_huge_page);
+extern void copy_user_huge_page(struct page *dst, struct page *src,
+ unsigned long addr, struct vm_area_struct *vma,
+ unsigned int pages_per_huge_page);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
+
+extern struct page_ext_operations debug_guardpage_ops;
+extern struct page_ext_operations page_poisoning_ops;
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+extern unsigned int _debug_guardpage_minorder;
+extern bool _debug_guardpage_enabled;
+
+static inline unsigned int debug_guardpage_minorder(void)
+{
+ return _debug_guardpage_minorder;
+}
+
+static inline bool debug_guardpage_enabled(void)
+{
+ return _debug_guardpage_enabled;
+}
+
+static inline bool page_is_guard(struct page *page)
+{
+ struct page_ext *page_ext;
+
+ if (!debug_guardpage_enabled())
+ return false;
+
+ page_ext = lookup_page_ext(page);
+ return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
+}
+#else
+static inline unsigned int debug_guardpage_minorder(void) { return 0; }
+static inline bool debug_guardpage_enabled(void) { return false; }
+static inline bool page_is_guard(struct page *page) { return false; }
+#endif /* CONFIG_DEBUG_PAGEALLOC */
+
+#if MAX_NUMNODES > 1
+void __init setup_nr_node_ids(void);
+#else
+static inline void setup_nr_node_ids(void) {}
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
new file mode 100644
index 000000000..cf55945c8
--- /dev/null
+++ b/include/linux/mm_inline.h
@@ -0,0 +1,103 @@
+#ifndef LINUX_MM_INLINE_H
+#define LINUX_MM_INLINE_H
+
+#include <linux/huge_mm.h>
+#include <linux/swap.h>
+
+/**
+ * page_is_file_cache - should the page be on a file LRU or anon LRU?
+ * @page: the page to test
+ *
+ * Returns 1 if @page is page cache page backed by a regular filesystem,
+ * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
+ * Used by functions that manipulate the LRU lists, to sort a page
+ * onto the right LRU list.
+ *
+ * We would like to get this info without a page flag, but the state
+ * needs to survive until the page is last deleted from the LRU, which
+ * could be as far down as __page_cache_release.
+ */
+static inline int page_is_file_cache(struct page *page)
+{
+ return !PageSwapBacked(page);
+}
+
+static __always_inline void add_page_to_lru_list(struct page *page,
+ struct lruvec *lruvec, enum lru_list lru)
+{
+ int nr_pages = hpage_nr_pages(page);
+ mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
+ list_add(&page->lru, &lruvec->lists[lru]);
+ __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages);
+}
+
+static __always_inline void del_page_from_lru_list(struct page *page,
+ struct lruvec *lruvec, enum lru_list lru)
+{
+ int nr_pages = hpage_nr_pages(page);
+ mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
+ list_del(&page->lru);
+ __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages);
+}
+
+/**
+ * page_lru_base_type - which LRU list type should a page be on?
+ * @page: the page to test
+ *
+ * Used for LRU list index arithmetic.
+ *
+ * Returns the base LRU type - file or anon - @page should be on.
+ */
+static inline enum lru_list page_lru_base_type(struct page *page)
+{
+ if (page_is_file_cache(page))
+ return LRU_INACTIVE_FILE;
+ return LRU_INACTIVE_ANON;
+}
+
+/**
+ * page_off_lru - which LRU list was page on? clearing its lru flags.
+ * @page: the page to test
+ *
+ * Returns the LRU list a page was on, as an index into the array of LRU
+ * lists; and clears its Unevictable or Active flags, ready for freeing.
+ */
+static __always_inline enum lru_list page_off_lru(struct page *page)
+{
+ enum lru_list lru;
+
+ if (PageUnevictable(page)) {
+ __ClearPageUnevictable(page);
+ lru = LRU_UNEVICTABLE;
+ } else {
+ lru = page_lru_base_type(page);
+ if (PageActive(page)) {
+ __ClearPageActive(page);
+ lru += LRU_ACTIVE;
+ }
+ }
+ return lru;
+}
+
+/**
+ * page_lru - which LRU list should a page be on?
+ * @page: the page to test
+ *
+ * Returns the LRU list a page should be on, as an index
+ * into the array of LRU lists.
+ */
+static __always_inline enum lru_list page_lru(struct page *page)
+{
+ enum lru_list lru;
+
+ if (PageUnevictable(page))
+ lru = LRU_UNEVICTABLE;
+ else {
+ lru = page_lru_base_type(page);
+ if (PageActive(page))
+ lru += LRU_ACTIVE;
+ }
+ return lru;
+}
+
+#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
new file mode 100644
index 000000000..c54e67ec6
--- /dev/null
+++ b/include/linux/mm_types.h
@@ -0,0 +1,553 @@
+#ifndef _LINUX_MM_TYPES_H
+#define _LINUX_MM_TYPES_H
+
+#include <linux/auxvec.h>
+#include <linux/types.h>
+#include <linux/threads.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/completion.h>
+#include <linux/cpumask.h>
+#include <linux/uprobes.h>
+#include <linux/page-flags-layout.h>
+#include <asm/page.h>
+#include <asm/mmu.h>
+
+#ifndef AT_VECTOR_SIZE_ARCH
+#define AT_VECTOR_SIZE_ARCH 0
+#endif
+#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
+
+struct address_space;
+struct mem_cgroup;
+
+#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
+#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
+ IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
+#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
+
+typedef void compound_page_dtor(struct page *);
+
+/*
+ * Each physical page in the system has a struct page associated with
+ * it to keep track of whatever it is we are using the page for at the
+ * moment. Note that we have no way to track which tasks are using
+ * a page, though if it is a pagecache page, rmap structures can tell us
+ * who is mapping it.
+ *
+ * The objects in struct page are organized in double word blocks in
+ * order to allows us to use atomic double word operations on portions
+ * of struct page. That is currently only used by slub but the arrangement
+ * allows the use of atomic double word operations on the flags/mapping
+ * and lru list pointers also.
+ */
+struct page {
+ /* First double word block */
+ unsigned long flags; /* Atomic flags, some possibly
+ * updated asynchronously */
+ union {
+ struct address_space *mapping; /* If low bit clear, points to
+ * inode address_space, or NULL.
+ * If page mapped as anonymous
+ * memory, low bit is set, and
+ * it points to anon_vma object:
+ * see PAGE_MAPPING_ANON below.
+ */
+ void *s_mem; /* slab first object */
+ };
+
+ /* Second double word */
+ struct {
+ union {
+ pgoff_t index; /* Our offset within mapping. */
+ void *freelist; /* sl[aou]b first free object */
+ bool pfmemalloc; /* If set by the page allocator,
+ * ALLOC_NO_WATERMARKS was set
+ * and the low watermark was not
+ * met implying that the system
+ * is under some pressure. The
+ * caller should try ensure
+ * this page is only used to
+ * free other pages.
+ */
+ };
+
+ union {
+#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
+ defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
+ /* Used for cmpxchg_double in slub */
+ unsigned long counters;
+#else
+ /*
+ * Keep _count separate from slub cmpxchg_double data.
+ * As the rest of the double word is protected by
+ * slab_lock but _count is not.
+ */
+ unsigned counters;
+#endif
+
+ struct {
+
+ union {
+ /*
+ * Count of ptes mapped in
+ * mms, to show when page is
+ * mapped & limit reverse map
+ * searches.
+ *
+ * Used also for tail pages
+ * refcounting instead of
+ * _count. Tail pages cannot
+ * be mapped and keeping the
+ * tail page _count zero at
+ * all times guarantees
+ * get_page_unless_zero() will
+ * never succeed on tail
+ * pages.
+ */
+ atomic_t _mapcount;
+
+ struct { /* SLUB */
+ unsigned inuse:16;
+ unsigned objects:15;
+ unsigned frozen:1;
+ };
+ int units; /* SLOB */
+ };
+ atomic_t _count; /* Usage count, see below. */
+ };
+ unsigned int active; /* SLAB */
+ };
+ };
+
+ /* Third double word block */
+ union {
+ struct list_head lru; /* Pageout list, eg. active_list
+ * protected by zone->lru_lock !
+ * Can be used as a generic list
+ * by the page owner.
+ */
+ struct { /* slub per cpu partial pages */
+ struct page *next; /* Next partial slab */
+#ifdef CONFIG_64BIT
+ int pages; /* Nr of partial slabs left */
+ int pobjects; /* Approximate # of objects */
+#else
+ short int pages;
+ short int pobjects;
+#endif
+ };
+
+ struct slab *slab_page; /* slab fields */
+ struct rcu_head rcu_head; /* Used by SLAB
+ * when destroying via RCU
+ */
+ /* First tail page of compound page */
+ struct {
+ compound_page_dtor *compound_dtor;
+ unsigned long compound_order;
+ };
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
+ pgtable_t pmd_huge_pte; /* protected by page->ptl */
+#endif
+ };
+
+ /* Remainder is not double word aligned */
+ union {
+ unsigned long private; /* Mapping-private opaque data:
+ * usually used for buffer_heads
+ * if PagePrivate set; used for
+ * swp_entry_t if PageSwapCache;
+ * indicates order in the buddy
+ * system if PG_buddy is set.
+ */
+#if USE_SPLIT_PTE_PTLOCKS
+#if ALLOC_SPLIT_PTLOCKS
+ spinlock_t *ptl;
+#else
+ spinlock_t ptl;
+#endif
+#endif
+ struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
+ struct page *first_page; /* Compound tail pages */
+ };
+
+#ifdef CONFIG_MEMCG
+ struct mem_cgroup *mem_cgroup;
+#endif
+
+ /*
+ * On machines where all RAM is mapped into kernel address space,
+ * we can simply calculate the virtual address. On machines with
+ * highmem some memory is mapped into kernel virtual memory
+ * dynamically, so we need a place to store that address.
+ * Note that this field could be 16 bits on x86 ... ;)
+ *
+ * Architectures with slow multiplication can define
+ * WANT_PAGE_VIRTUAL in asm/page.h
+ */
+#if defined(WANT_PAGE_VIRTUAL)
+ void *virtual; /* Kernel virtual address (NULL if
+ not kmapped, ie. highmem) */
+#endif /* WANT_PAGE_VIRTUAL */
+
+#ifdef CONFIG_KMEMCHECK
+ /*
+ * kmemcheck wants to track the status of each byte in a page; this
+ * is a pointer to such a status block. NULL if not tracked.
+ */
+ void *shadow;
+#endif
+
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+ int _last_cpupid;
+#endif
+}
+/*
+ * The struct page can be forced to be double word aligned so that atomic ops
+ * on double words work. The SLUB allocator can make use of such a feature.
+ */
+#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
+ __aligned(2 * sizeof(unsigned long))
+#endif
+;
+
+struct page_frag {
+ struct page *page;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 offset;
+ __u32 size;
+#else
+ __u16 offset;
+ __u16 size;
+#endif
+};
+
+typedef unsigned long __nocast vm_flags_t;
+
+/*
+ * A region containing a mapping of a non-memory backed file under NOMMU
+ * conditions. These are held in a global tree and are pinned by the VMAs that
+ * map parts of them.
+ */
+struct vm_region {
+ struct rb_node vm_rb; /* link in global region tree */
+ vm_flags_t vm_flags; /* VMA vm_flags */
+ unsigned long vm_start; /* start address of region */
+ unsigned long vm_end; /* region initialised to here */
+ unsigned long vm_top; /* region allocated to here */
+ unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
+ struct file *vm_file; /* the backing file or NULL */
+ struct file *vm_prfile; /* the virtual backing file or NULL */
+
+ int vm_usage; /* region usage count (access under nommu_region_sem) */
+ bool vm_icache_flushed : 1; /* true if the icache has been flushed for
+ * this region */
+};
+
+/*
+ * This struct defines a memory VMM memory area. There is one of these
+ * per VM-area/task. A VM area is any part of the process virtual memory
+ * space that has a special rule for the page-fault handlers (ie a shared
+ * library, the executable area etc).
+ */
+struct vm_area_struct {
+ /* The first cache line has the info for VMA tree walking. */
+
+ unsigned long vm_start; /* Our start address within vm_mm. */
+ unsigned long vm_end; /* The first byte after our end address
+ within vm_mm. */
+
+ /* linked list of VM areas per task, sorted by address */
+ struct vm_area_struct *vm_next, *vm_prev;
+
+ struct rb_node vm_rb;
+
+ /*
+ * Largest free memory gap in bytes to the left of this VMA.
+ * Either between this VMA and vma->vm_prev, or between one of the
+ * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
+ * get_unmapped_area find a free area of the right size.
+ */
+ unsigned long rb_subtree_gap;
+
+ /* Second cache line starts here. */
+
+ struct mm_struct *vm_mm; /* The address space we belong to. */
+ pgprot_t vm_page_prot; /* Access permissions of this VMA. */
+ unsigned long vm_flags; /* Flags, see mm.h. */
+
+ /*
+ * For areas with an address space and backing store,
+ * linkage into the address_space->i_mmap interval tree.
+ */
+ struct {
+ struct rb_node rb;
+ unsigned long rb_subtree_last;
+ } shared;
+
+ /*
+ * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
+ * list, after a COW of one of the file pages. A MAP_SHARED vma
+ * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
+ * or brk vma (with NULL file) can only be in an anon_vma list.
+ */
+ struct list_head anon_vma_chain; /* Serialized by mmap_sem &
+ * page_table_lock */
+ struct anon_vma *anon_vma; /* Serialized by page_table_lock */
+
+ /* Function pointers to deal with this struct. */
+ const struct vm_operations_struct *vm_ops;
+
+ /* Information about our backing store: */
+ unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
+ units, *not* PAGE_CACHE_SIZE */
+ struct file * vm_file; /* File we map to (can be NULL). */
+ struct file *vm_prfile; /* shadow of vm_file */
+ void * vm_private_data; /* was vm_pte (shared mem) */
+
+#ifndef CONFIG_MMU
+ struct vm_region *vm_region; /* NOMMU mapping region */
+#endif
+#ifdef CONFIG_NUMA
+ struct mempolicy *vm_policy; /* NUMA policy for the VMA */
+#endif
+#ifdef CONFIG_UKSM
+ struct vma_slot *uksm_vma_slot;
+#endif
+};
+
+struct core_thread {
+ struct task_struct *task;
+ struct core_thread *next;
+};
+
+struct core_state {
+ atomic_t nr_threads;
+ struct core_thread dumper;
+ struct completion startup;
+};
+
+enum {
+ MM_FILEPAGES,
+ MM_ANONPAGES,
+ MM_SWAPENTS,
+ NR_MM_COUNTERS
+};
+
+#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
+#define SPLIT_RSS_COUNTING
+/* per-thread cached information, */
+struct task_rss_stat {
+ int events; /* for synchronization threshold */
+ int count[NR_MM_COUNTERS];
+};
+#endif /* USE_SPLIT_PTE_PTLOCKS */
+
+struct mm_rss_stat {
+ atomic_long_t count[NR_MM_COUNTERS];
+};
+
+struct kioctx_table;
+struct mm_struct {
+ struct vm_area_struct *mmap; /* list of VMAs */
+ struct rb_root mm_rb;
+ u32 vmacache_seqnum; /* per-thread vmacache */
+#ifdef CONFIG_MMU
+ unsigned long (*get_unmapped_area) (struct file *filp,
+ unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags);
+#endif
+ unsigned long mmap_base; /* base of mmap area */
+ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
+ unsigned long task_size; /* size of task vm space */
+ unsigned long highest_vm_end; /* highest vma end address */
+ pgd_t * pgd;
+ atomic_t mm_users; /* How many users with user space? */
+ atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
+ atomic_long_t nr_ptes; /* PTE page table pages */
+#if CONFIG_PGTABLE_LEVELS > 2
+ atomic_long_t nr_pmds; /* PMD page table pages */
+#endif
+ int map_count; /* number of VMAs */
+
+ spinlock_t page_table_lock; /* Protects page tables and some counters */
+ struct rw_semaphore mmap_sem;
+
+ struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
+ * together off init_mm.mmlist, and are protected
+ * by mmlist_lock
+ */
+
+
+ unsigned long hiwater_rss; /* High-watermark of RSS usage */
+ unsigned long hiwater_vm; /* High-water virtual memory usage */
+
+ unsigned long total_vm; /* Total pages mapped */
+ unsigned long locked_vm; /* Pages that have PG_mlocked set */
+ unsigned long pinned_vm; /* Refcount permanently increased */
+ unsigned long shared_vm; /* Shared pages (files) */
+ unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */
+ unsigned long stack_vm; /* VM_GROWSUP/DOWN */
+ unsigned long def_flags;
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long start_brk, brk, start_stack;
+ unsigned long arg_start, arg_end, env_start, env_end;
+
+ unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
+
+ /*
+ * Special counters, in some configurations protected by the
+ * page_table_lock, in other configurations by being atomic.
+ */
+ struct mm_rss_stat rss_stat;
+
+ struct linux_binfmt *binfmt;
+
+ cpumask_var_t cpu_vm_mask_var;
+
+ /* Architecture-specific MM context */
+ mm_context_t context;
+
+ unsigned long flags; /* Must use atomic bitops to access the bits */
+
+ struct core_state *core_state; /* coredumping support */
+#ifdef CONFIG_AIO
+ spinlock_t ioctx_lock;
+ struct kioctx_table __rcu *ioctx_table;
+#endif
+#ifdef CONFIG_MEMCG
+ /*
+ * "owner" points to a task that is regarded as the canonical
+ * user/owner of this mm. All of the following must be true in
+ * order for it to be changed:
+ *
+ * current == mm->owner
+ * current->mm != mm
+ * new_owner->mm == mm
+ * new_owner->alloc_lock is held
+ */
+ struct task_struct __rcu *owner;
+#endif
+
+ /* store ref to file /proc/<pid>/exe symlink points to */
+ struct file __rcu *exe_file;
+#ifdef CONFIG_MMU_NOTIFIER
+ struct mmu_notifier_mm *mmu_notifier_mm;
+#endif
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
+ pgtable_t pmd_huge_pte; /* protected by page_table_lock */
+#endif
+#ifdef CONFIG_CPUMASK_OFFSTACK
+ struct cpumask cpumask_allocation;
+#endif
+#ifdef CONFIG_NUMA_BALANCING
+ /*
+ * numa_next_scan is the next time that the PTEs will be marked
+ * pte_numa. NUMA hinting faults will gather statistics and migrate
+ * pages to new nodes if necessary.
+ */
+ unsigned long numa_next_scan;
+
+ /* Restart point for scanning and setting pte_numa */
+ unsigned long numa_scan_offset;
+
+ /* numa_scan_seq prevents two threads setting pte_numa */
+ int numa_scan_seq;
+#endif
+#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
+ /*
+ * An operation with batched TLB flushing is going on. Anything that
+ * can move process memory needs to flush the TLB when moving a
+ * PROT_NONE or PROT_NUMA mapped page.
+ */
+ bool tlb_flush_pending;
+#endif
+ struct uprobes_state uprobes_state;
+#ifdef CONFIG_X86_INTEL_MPX
+ /* address of the bounds directory */
+ void __user *bd_addr;
+#endif
+};
+
+static inline void mm_init_cpumask(struct mm_struct *mm)
+{
+#ifdef CONFIG_CPUMASK_OFFSTACK
+ mm->cpu_vm_mask_var = &mm->cpumask_allocation;
+#endif
+ cpumask_clear(mm->cpu_vm_mask_var);
+}
+
+/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
+static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
+{
+ return mm->cpu_vm_mask_var;
+}
+
+#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
+/*
+ * Memory barriers to keep this state in sync are graciously provided by
+ * the page table locks, outside of which no page table modifications happen.
+ * The barriers below prevent the compiler from re-ordering the instructions
+ * around the memory barriers that are already present in the code.
+ */
+static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+{
+ barrier();
+ return mm->tlb_flush_pending;
+}
+static inline void set_tlb_flush_pending(struct mm_struct *mm)
+{
+ mm->tlb_flush_pending = true;
+
+ /*
+ * Guarantee that the tlb_flush_pending store does not leak into the
+ * critical section updating the page tables
+ */
+ smp_mb__before_spinlock();
+}
+/* Clearing is done after a TLB flush, which also provides a barrier. */
+static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+{
+ barrier();
+ mm->tlb_flush_pending = false;
+}
+#else
+static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+{
+ return false;
+}
+static inline void set_tlb_flush_pending(struct mm_struct *mm)
+{
+}
+static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+{
+}
+#endif
+
+struct vm_special_mapping
+{
+ const char *name;
+ struct page **pages;
+};
+
+enum tlb_flush_reason {
+ TLB_FLUSH_ON_TASK_SWITCH,
+ TLB_REMOTE_SHOOTDOWN,
+ TLB_LOCAL_SHOOTDOWN,
+ TLB_LOCAL_MM_SHOOTDOWN,
+ NR_TLB_FLUSH_REASONS,
+};
+
+ /*
+ * A swap entry has to fit into a "unsigned long", as the entry is hidden
+ * in the "index" field of the swapper address space.
+ */
+typedef struct {
+ unsigned long val;
+} swp_entry_t;
+
+#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mman.h b/include/linux/mman.h
new file mode 100644
index 000000000..16373c8f5
--- /dev/null
+++ b/include/linux/mman.h
@@ -0,0 +1,93 @@
+#ifndef _LINUX_MMAN_H
+#define _LINUX_MMAN_H
+
+#include <linux/mm.h>
+#include <linux/percpu_counter.h>
+
+#include <linux/atomic.h>
+#include <uapi/linux/mman.h>
+
+extern int sysctl_overcommit_memory;
+extern int sysctl_overcommit_ratio;
+extern unsigned long sysctl_overcommit_kbytes;
+extern struct percpu_counter vm_committed_as;
+
+#ifdef CONFIG_SMP
+extern s32 vm_committed_as_batch;
+#else
+#define vm_committed_as_batch 0
+#endif
+
+unsigned long vm_memory_committed(void);
+
+static inline void vm_acct_memory(long pages)
+{
+ __percpu_counter_add(&vm_committed_as, pages, vm_committed_as_batch);
+}
+
+static inline void vm_unacct_memory(long pages)
+{
+ vm_acct_memory(-pages);
+}
+
+/*
+ * Allow architectures to handle additional protection bits
+ */
+
+#ifndef arch_calc_vm_prot_bits
+#define arch_calc_vm_prot_bits(prot) 0
+#endif
+
+#ifndef arch_vm_get_page_prot
+#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
+#endif
+
+#ifndef arch_validate_prot
+/*
+ * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have
+ * already been masked out.
+ *
+ * Returns true if the prot flags are valid
+ */
+static inline int arch_validate_prot(unsigned long prot)
+{
+ return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
+}
+#define arch_validate_prot arch_validate_prot
+#endif
+
+/*
+ * Optimisation macro. It is equivalent to:
+ * (x & bit1) ? bit2 : 0
+ * but this version is faster.
+ * ("bit1" and "bit2" must be single bits)
+ */
+#define _calc_vm_trans(x, bit1, bit2) \
+ ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
+ : ((x) & (bit1)) / ((bit1) / (bit2)))
+
+/*
+ * Combine the mmap "prot" argument into "vm_flags" used internally.
+ */
+static inline unsigned long
+calc_vm_prot_bits(unsigned long prot)
+{
+ return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
+ _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
+ _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
+ arch_calc_vm_prot_bits(prot);
+}
+
+/*
+ * Combine the mmap "flags" argument into "vm_flags" used internally.
+ */
+static inline unsigned long
+calc_vm_flag_bits(unsigned long flags)
+{
+ return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
+ _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
+ _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED );
+}
+
+unsigned long vm_commit_limit(void);
+#endif /* _LINUX_MMAN_H */
diff --git a/include/linux/mmc/boot.h b/include/linux/mmc/boot.h
new file mode 100644
index 000000000..23acc3baa
--- /dev/null
+++ b/include/linux/mmc/boot.h
@@ -0,0 +1,7 @@
+#ifndef LINUX_MMC_BOOT_H
+#define LINUX_MMC_BOOT_H
+
+enum { MMC_PROGRESS_ENTER, MMC_PROGRESS_INIT,
+ MMC_PROGRESS_LOAD, MMC_PROGRESS_DONE };
+
+#endif /* LINUX_MMC_BOOT_H */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
new file mode 100644
index 000000000..19f0175c0
--- /dev/null
+++ b/include/linux/mmc/card.h
@@ -0,0 +1,531 @@
+/*
+ * linux/include/linux/mmc/card.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Card driver specific definitions.
+ */
+#ifndef LINUX_MMC_CARD_H
+#define LINUX_MMC_CARD_H
+
+#include <linux/device.h>
+#include <linux/mmc/core.h>
+#include <linux/mod_devicetable.h>
+
+struct mmc_cid {
+ unsigned int manfid;
+ char prod_name[8];
+ unsigned char prv;
+ unsigned int serial;
+ unsigned short oemid;
+ unsigned short year;
+ unsigned char hwrev;
+ unsigned char fwrev;
+ unsigned char month;
+};
+
+struct mmc_csd {
+ unsigned char structure;
+ unsigned char mmca_vsn;
+ unsigned short cmdclass;
+ unsigned short tacc_clks;
+ unsigned int tacc_ns;
+ unsigned int c_size;
+ unsigned int r2w_factor;
+ unsigned int max_dtr;
+ unsigned int erase_size; /* In sectors */
+ unsigned int read_blkbits;
+ unsigned int write_blkbits;
+ unsigned int capacity;
+ unsigned int read_partial:1,
+ read_misalign:1,
+ write_partial:1,
+ write_misalign:1,
+ dsr_imp:1;
+};
+
+struct mmc_ext_csd {
+ u8 rev;
+ u8 erase_group_def;
+ u8 sec_feature_support;
+ u8 rel_sectors;
+ u8 rel_param;
+ u8 part_config;
+ u8 cache_ctrl;
+ u8 rst_n_function;
+ u8 max_packed_writes;
+ u8 max_packed_reads;
+ u8 packed_event_en;
+ unsigned int part_time; /* Units: ms */
+ unsigned int sa_timeout; /* Units: 100ns */
+ unsigned int generic_cmd6_time; /* Units: 10ms */
+ unsigned int power_off_longtime; /* Units: ms */
+ u8 power_off_notification; /* state */
+ unsigned int hs_max_dtr;
+ unsigned int hs200_max_dtr;
+#define MMC_HIGH_26_MAX_DTR 26000000
+#define MMC_HIGH_52_MAX_DTR 52000000
+#define MMC_HIGH_DDR_MAX_DTR 52000000
+#define MMC_HS200_MAX_DTR 200000000
+ unsigned int sectors;
+ unsigned int hc_erase_size; /* In sectors */
+ unsigned int hc_erase_timeout; /* In milliseconds */
+ unsigned int sec_trim_mult; /* Secure trim multiplier */
+ unsigned int sec_erase_mult; /* Secure erase multiplier */
+ unsigned int trim_timeout; /* In milliseconds */
+ bool partition_setting_completed; /* enable bit */
+ unsigned long long enhanced_area_offset; /* Units: Byte */
+ unsigned int enhanced_area_size; /* Units: KB */
+ unsigned int cache_size; /* Units: KB */
+ bool hpi_en; /* HPI enablebit */
+ bool hpi; /* HPI support bit */
+ unsigned int hpi_cmd; /* cmd used as HPI */
+ bool bkops; /* background support bit */
+ bool man_bkops_en; /* manual bkops enable bit */
+ unsigned int data_sector_size; /* 512 bytes or 4KB */
+ unsigned int data_tag_unit_size; /* DATA TAG UNIT size */
+ unsigned int boot_ro_lock; /* ro lock support */
+ bool boot_ro_lockable;
+ bool ffu_capable; /* Firmware upgrade support */
+#define MMC_FIRMWARE_LEN 8
+ u8 fwrev[MMC_FIRMWARE_LEN]; /* FW version */
+ u8 raw_exception_status; /* 54 */
+ u8 raw_partition_support; /* 160 */
+ u8 raw_rpmb_size_mult; /* 168 */
+ u8 raw_erased_mem_count; /* 181 */
+ u8 raw_ext_csd_structure; /* 194 */
+ u8 raw_card_type; /* 196 */
+ u8 out_of_int_time; /* 198 */
+ u8 raw_pwr_cl_52_195; /* 200 */
+ u8 raw_pwr_cl_26_195; /* 201 */
+ u8 raw_pwr_cl_52_360; /* 202 */
+ u8 raw_pwr_cl_26_360; /* 203 */
+ u8 raw_s_a_timeout; /* 217 */
+ u8 raw_hc_erase_gap_size; /* 221 */
+ u8 raw_erase_timeout_mult; /* 223 */
+ u8 raw_hc_erase_grp_size; /* 224 */
+ u8 raw_sec_trim_mult; /* 229 */
+ u8 raw_sec_erase_mult; /* 230 */
+ u8 raw_sec_feature_support;/* 231 */
+ u8 raw_trim_mult; /* 232 */
+ u8 raw_pwr_cl_200_195; /* 236 */
+ u8 raw_pwr_cl_200_360; /* 237 */
+ u8 raw_pwr_cl_ddr_52_195; /* 238 */
+ u8 raw_pwr_cl_ddr_52_360; /* 239 */
+ u8 raw_pwr_cl_ddr_200_360; /* 253 */
+ u8 raw_bkops_status; /* 246 */
+ u8 raw_sectors[4]; /* 212 - 4 bytes */
+
+ unsigned int feature_support;
+#define MMC_DISCARD_FEATURE BIT(0) /* CMD38 feature */
+};
+
+struct sd_scr {
+ unsigned char sda_vsn;
+ unsigned char sda_spec3;
+ unsigned char bus_widths;
+#define SD_SCR_BUS_WIDTH_1 (1<<0)
+#define SD_SCR_BUS_WIDTH_4 (1<<2)
+ unsigned char cmds;
+#define SD_SCR_CMD20_SUPPORT (1<<0)
+#define SD_SCR_CMD23_SUPPORT (1<<1)
+};
+
+struct sd_ssr {
+ unsigned int au; /* In sectors */
+ unsigned int erase_timeout; /* In milliseconds */
+ unsigned int erase_offset; /* In milliseconds */
+};
+
+struct sd_switch_caps {
+ unsigned int hs_max_dtr;
+ unsigned int uhs_max_dtr;
+#define HIGH_SPEED_MAX_DTR 50000000
+#define UHS_SDR104_MAX_DTR 208000000
+#define UHS_SDR50_MAX_DTR 100000000
+#define UHS_DDR50_MAX_DTR 50000000
+#define UHS_SDR25_MAX_DTR UHS_DDR50_MAX_DTR
+#define UHS_SDR12_MAX_DTR 25000000
+ unsigned int sd3_bus_mode;
+#define UHS_SDR12_BUS_SPEED 0
+#define HIGH_SPEED_BUS_SPEED 1
+#define UHS_SDR25_BUS_SPEED 1
+#define UHS_SDR50_BUS_SPEED 2
+#define UHS_SDR104_BUS_SPEED 3
+#define UHS_DDR50_BUS_SPEED 4
+
+#define SD_MODE_HIGH_SPEED (1 << HIGH_SPEED_BUS_SPEED)
+#define SD_MODE_UHS_SDR12 (1 << UHS_SDR12_BUS_SPEED)
+#define SD_MODE_UHS_SDR25 (1 << UHS_SDR25_BUS_SPEED)
+#define SD_MODE_UHS_SDR50 (1 << UHS_SDR50_BUS_SPEED)
+#define SD_MODE_UHS_SDR104 (1 << UHS_SDR104_BUS_SPEED)
+#define SD_MODE_UHS_DDR50 (1 << UHS_DDR50_BUS_SPEED)
+ unsigned int sd3_drv_type;
+#define SD_DRIVER_TYPE_B 0x01
+#define SD_DRIVER_TYPE_A 0x02
+#define SD_DRIVER_TYPE_C 0x04
+#define SD_DRIVER_TYPE_D 0x08
+ unsigned int sd3_curr_limit;
+#define SD_SET_CURRENT_LIMIT_200 0
+#define SD_SET_CURRENT_LIMIT_400 1
+#define SD_SET_CURRENT_LIMIT_600 2
+#define SD_SET_CURRENT_LIMIT_800 3
+#define SD_SET_CURRENT_NO_CHANGE (-1)
+
+#define SD_MAX_CURRENT_200 (1 << SD_SET_CURRENT_LIMIT_200)
+#define SD_MAX_CURRENT_400 (1 << SD_SET_CURRENT_LIMIT_400)
+#define SD_MAX_CURRENT_600 (1 << SD_SET_CURRENT_LIMIT_600)
+#define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800)
+};
+
+struct sdio_cccr {
+ unsigned int sdio_vsn;
+ unsigned int sd_vsn;
+ unsigned int multi_block:1,
+ low_speed:1,
+ wide_bus:1,
+ high_power:1,
+ high_speed:1,
+ disable_cd:1;
+};
+
+struct sdio_cis {
+ unsigned short vendor;
+ unsigned short device;
+ unsigned short blksize;
+ unsigned int max_dtr;
+};
+
+struct mmc_host;
+struct mmc_ios;
+struct sdio_func;
+struct sdio_func_tuple;
+
+#define SDIO_MAX_FUNCS 7
+
+enum mmc_blk_status {
+ MMC_BLK_SUCCESS = 0,
+ MMC_BLK_PARTIAL,
+ MMC_BLK_CMD_ERR,
+ MMC_BLK_RETRY,
+ MMC_BLK_ABORT,
+ MMC_BLK_DATA_ERR,
+ MMC_BLK_ECC_ERR,
+ MMC_BLK_NOMEDIUM,
+ MMC_BLK_NEW_REQUEST,
+};
+
+/* The number of MMC physical partitions. These consist of:
+ * boot partitions (2), general purpose partitions (4) and
+ * RPMB partition (1) in MMC v4.4.
+ */
+#define MMC_NUM_BOOT_PARTITION 2
+#define MMC_NUM_GP_PARTITION 4
+#define MMC_NUM_PHY_PARTITION 7
+#define MAX_MMC_PART_NAME_LEN 20
+
+/*
+ * MMC Physical partitions
+ */
+struct mmc_part {
+ unsigned int size; /* partition size (in bytes) */
+ unsigned int part_cfg; /* partition type */
+ char name[MAX_MMC_PART_NAME_LEN];
+ bool force_ro; /* to make boot parts RO by default */
+ unsigned int area_type;
+#define MMC_BLK_DATA_AREA_MAIN (1<<0)
+#define MMC_BLK_DATA_AREA_BOOT (1<<1)
+#define MMC_BLK_DATA_AREA_GP (1<<2)
+#define MMC_BLK_DATA_AREA_RPMB (1<<3)
+};
+
+/*
+ * MMC device
+ */
+struct mmc_card {
+ struct mmc_host *host; /* the host this device belongs to */
+ struct device dev; /* the device */
+ u32 ocr; /* the current OCR setting */
+ unsigned int rca; /* relative card address of device */
+ unsigned int type; /* card type */
+#define MMC_TYPE_MMC 0 /* MMC card */
+#define MMC_TYPE_SD 1 /* SD card */
+#define MMC_TYPE_SDIO 2 /* SDIO card */
+#define MMC_TYPE_SD_COMBO 3 /* SD combo (IO+mem) card */
+ unsigned int state; /* (our) card state */
+#define MMC_STATE_PRESENT (1<<0) /* present in sysfs */
+#define MMC_STATE_READONLY (1<<1) /* card is read-only */
+#define MMC_STATE_BLOCKADDR (1<<2) /* card uses block-addressing */
+#define MMC_CARD_SDXC (1<<3) /* card is SDXC */
+#define MMC_CARD_REMOVED (1<<4) /* card has been removed */
+#define MMC_STATE_DOING_BKOPS (1<<5) /* card is doing BKOPS */
+#define MMC_STATE_SUSPENDED (1<<6) /* card is suspended */
+ unsigned int quirks; /* card quirks */
+#define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */
+#define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */
+ /* for byte mode */
+#define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */
+ /* (missing CIA registers) */
+#define MMC_QUIRK_BROKEN_CLK_GATING (1<<3) /* clock gating the sdio bus will make card fail */
+#define MMC_QUIRK_NONSTD_FUNC_IF (1<<4) /* SDIO card has nonstd function interfaces */
+#define MMC_QUIRK_DISABLE_CD (1<<5) /* disconnect CD/DAT[3] resistor */
+#define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */
+#define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */
+#define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */
+ /* byte mode */
+#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */
+#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */
+#define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */
+
+ unsigned int erase_size; /* erase size in sectors */
+ unsigned int erase_shift; /* if erase unit is power 2 */
+ unsigned int pref_erase; /* in sectors */
+ u8 erased_byte; /* value of erased bytes */
+
+ u32 raw_cid[4]; /* raw card CID */
+ u32 raw_csd[4]; /* raw card CSD */
+ u32 raw_scr[2]; /* raw card SCR */
+ struct mmc_cid cid; /* card identification */
+ struct mmc_csd csd; /* card specific */
+ struct mmc_ext_csd ext_csd; /* mmc v4 extended card specific */
+ struct sd_scr scr; /* extra SD information */
+ struct sd_ssr ssr; /* yet more SD information */
+ struct sd_switch_caps sw_caps; /* switch (CMD6) caps */
+
+ unsigned int sdio_funcs; /* number of SDIO functions */
+ struct sdio_cccr cccr; /* common card info */
+ struct sdio_cis cis; /* common tuple info */
+ struct sdio_func *sdio_func[SDIO_MAX_FUNCS]; /* SDIO functions (devices) */
+ struct sdio_func *sdio_single_irq; /* SDIO function when only one IRQ active */
+ unsigned num_info; /* number of info strings */
+ const char **info; /* info strings */
+ struct sdio_func_tuple *tuples; /* unknown common tuples */
+
+ unsigned int sd_bus_speed; /* Bus Speed Mode set for the card */
+ unsigned int mmc_avail_type; /* supported device type by both host and card */
+
+ struct dentry *debugfs_root;
+ struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
+ unsigned int nr_parts;
+};
+
+/*
+ * This function fill contents in mmc_part.
+ */
+static inline void mmc_part_add(struct mmc_card *card, unsigned int size,
+ unsigned int part_cfg, char *name, int idx, bool ro,
+ int area_type)
+{
+ card->part[card->nr_parts].size = size;
+ card->part[card->nr_parts].part_cfg = part_cfg;
+ sprintf(card->part[card->nr_parts].name, name, idx);
+ card->part[card->nr_parts].force_ro = ro;
+ card->part[card->nr_parts].area_type = area_type;
+ card->nr_parts++;
+}
+
+static inline bool mmc_large_sector(struct mmc_card *card)
+{
+ return card->ext_csd.data_sector_size == 4096;
+}
+
+/*
+ * The world is not perfect and supplies us with broken mmc/sdio devices.
+ * For at least some of these bugs we need a work-around.
+ */
+
+struct mmc_fixup {
+ /* CID-specific fields. */
+ const char *name;
+
+ /* Valid revision range */
+ u64 rev_start, rev_end;
+
+ unsigned int manfid;
+ unsigned short oemid;
+
+ /* SDIO-specfic fields. You can use SDIO_ANY_ID here of course */
+ u16 cis_vendor, cis_device;
+
+ void (*vendor_fixup)(struct mmc_card *card, int data);
+ int data;
+};
+
+#define CID_MANFID_ANY (-1u)
+#define CID_OEMID_ANY ((unsigned short) -1)
+#define CID_NAME_ANY (NULL)
+
+#define END_FIXUP { NULL }
+
+#define _FIXUP_EXT(_name, _manfid, _oemid, _rev_start, _rev_end, \
+ _cis_vendor, _cis_device, \
+ _fixup, _data) \
+ { \
+ .name = (_name), \
+ .manfid = (_manfid), \
+ .oemid = (_oemid), \
+ .rev_start = (_rev_start), \
+ .rev_end = (_rev_end), \
+ .cis_vendor = (_cis_vendor), \
+ .cis_device = (_cis_device), \
+ .vendor_fixup = (_fixup), \
+ .data = (_data), \
+ }
+
+#define MMC_FIXUP_REV(_name, _manfid, _oemid, _rev_start, _rev_end, \
+ _fixup, _data) \
+ _FIXUP_EXT(_name, _manfid, \
+ _oemid, _rev_start, _rev_end, \
+ SDIO_ANY_ID, SDIO_ANY_ID, \
+ _fixup, _data) \
+
+#define MMC_FIXUP(_name, _manfid, _oemid, _fixup, _data) \
+ MMC_FIXUP_REV(_name, _manfid, _oemid, 0, -1ull, _fixup, _data)
+
+#define SDIO_FIXUP(_vendor, _device, _fixup, _data) \
+ _FIXUP_EXT(CID_NAME_ANY, CID_MANFID_ANY, \
+ CID_OEMID_ANY, 0, -1ull, \
+ _vendor, _device, \
+ _fixup, _data) \
+
+#define cid_rev(hwrev, fwrev, year, month) \
+ (((u64) hwrev) << 40 | \
+ ((u64) fwrev) << 32 | \
+ ((u64) year) << 16 | \
+ ((u64) month))
+
+#define cid_rev_card(card) \
+ cid_rev(card->cid.hwrev, \
+ card->cid.fwrev, \
+ card->cid.year, \
+ card->cid.month)
+
+/*
+ * Unconditionally quirk add/remove.
+ */
+
+static inline void __maybe_unused add_quirk(struct mmc_card *card, int data)
+{
+ card->quirks |= data;
+}
+
+static inline void __maybe_unused remove_quirk(struct mmc_card *card, int data)
+{
+ card->quirks &= ~data;
+}
+
+#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC)
+#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD)
+#define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO)
+
+#define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT)
+#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY)
+#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR)
+#define mmc_card_ext_capacity(c) ((c)->state & MMC_CARD_SDXC)
+#define mmc_card_removed(c) ((c) && ((c)->state & MMC_CARD_REMOVED))
+#define mmc_card_doing_bkops(c) ((c)->state & MMC_STATE_DOING_BKOPS)
+#define mmc_card_suspended(c) ((c)->state & MMC_STATE_SUSPENDED)
+
+#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
+#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
+#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
+#define mmc_card_set_ext_capacity(c) ((c)->state |= MMC_CARD_SDXC)
+#define mmc_card_set_removed(c) ((c)->state |= MMC_CARD_REMOVED)
+#define mmc_card_set_doing_bkops(c) ((c)->state |= MMC_STATE_DOING_BKOPS)
+#define mmc_card_clr_doing_bkops(c) ((c)->state &= ~MMC_STATE_DOING_BKOPS)
+#define mmc_card_set_suspended(c) ((c)->state |= MMC_STATE_SUSPENDED)
+#define mmc_card_clr_suspended(c) ((c)->state &= ~MMC_STATE_SUSPENDED)
+
+/*
+ * Quirk add/remove for MMC products.
+ */
+
+static inline void __maybe_unused add_quirk_mmc(struct mmc_card *card, int data)
+{
+ if (mmc_card_mmc(card))
+ card->quirks |= data;
+}
+
+static inline void __maybe_unused remove_quirk_mmc(struct mmc_card *card,
+ int data)
+{
+ if (mmc_card_mmc(card))
+ card->quirks &= ~data;
+}
+
+/*
+ * Quirk add/remove for SD products.
+ */
+
+static inline void __maybe_unused add_quirk_sd(struct mmc_card *card, int data)
+{
+ if (mmc_card_sd(card))
+ card->quirks |= data;
+}
+
+static inline void __maybe_unused remove_quirk_sd(struct mmc_card *card,
+ int data)
+{
+ if (mmc_card_sd(card))
+ card->quirks &= ~data;
+}
+
+static inline int mmc_card_lenient_fn0(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_LENIENT_FN0;
+}
+
+static inline int mmc_blksz_for_byte_mode(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
+}
+
+static inline int mmc_card_disable_cd(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_DISABLE_CD;
+}
+
+static inline int mmc_card_nonstd_func_interface(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_NONSTD_FUNC_IF;
+}
+
+static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512;
+}
+
+static inline int mmc_card_long_read_time(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_LONG_READ_TIME;
+}
+
+static inline int mmc_card_broken_irq_polling(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_BROKEN_IRQ_POLLING;
+}
+
+#define mmc_card_name(c) ((c)->cid.prod_name)
+#define mmc_card_id(c) (dev_name(&(c)->dev))
+
+#define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev)
+
+/*
+ * MMC device driver (e.g., Flash card, I/O card...)
+ */
+struct mmc_driver {
+ struct device_driver drv;
+ int (*probe)(struct mmc_card *);
+ void (*remove)(struct mmc_card *);
+ void (*shutdown)(struct mmc_card *);
+};
+
+extern int mmc_register_driver(struct mmc_driver *);
+extern void mmc_unregister_driver(struct mmc_driver *);
+
+extern void mmc_fixup_device(struct mmc_card *card,
+ const struct mmc_fixup *table);
+
+#endif /* LINUX_MMC_CARD_H */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
new file mode 100644
index 000000000..de722d4e9
--- /dev/null
+++ b/include/linux/mmc/core.h
@@ -0,0 +1,215 @@
+/*
+ * linux/include/linux/mmc/core.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef LINUX_MMC_CORE_H
+#define LINUX_MMC_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+
+struct request;
+struct mmc_data;
+struct mmc_request;
+
+struct mmc_command {
+ u32 opcode;
+ u32 arg;
+#define MMC_CMD23_ARG_REL_WR (1 << 31)
+#define MMC_CMD23_ARG_PACKED ((0 << 31) | (1 << 30))
+#define MMC_CMD23_ARG_TAG_REQ (1 << 29)
+ u32 resp[4];
+ unsigned int flags; /* expected response type */
+#define MMC_RSP_PRESENT (1 << 0)
+#define MMC_RSP_136 (1 << 1) /* 136 bit response */
+#define MMC_RSP_CRC (1 << 2) /* expect valid crc */
+#define MMC_RSP_BUSY (1 << 3) /* card may send busy */
+#define MMC_RSP_OPCODE (1 << 4) /* response contains opcode */
+
+#define MMC_CMD_MASK (3 << 5) /* non-SPI command type */
+#define MMC_CMD_AC (0 << 5)
+#define MMC_CMD_ADTC (1 << 5)
+#define MMC_CMD_BC (2 << 5)
+#define MMC_CMD_BCR (3 << 5)
+
+#define MMC_RSP_SPI_S1 (1 << 7) /* one status byte */
+#define MMC_RSP_SPI_S2 (1 << 8) /* second byte */
+#define MMC_RSP_SPI_B4 (1 << 9) /* four data bytes */
+#define MMC_RSP_SPI_BUSY (1 << 10) /* card may send busy */
+
+/*
+ * These are the native response types, and correspond to valid bit
+ * patterns of the above flags. One additional valid pattern
+ * is all zeros, which means we don't expect a response.
+ */
+#define MMC_RSP_NONE (0)
+#define MMC_RSP_R1 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
+#define MMC_RSP_R1B (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE|MMC_RSP_BUSY)
+#define MMC_RSP_R2 (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC)
+#define MMC_RSP_R3 (MMC_RSP_PRESENT)
+#define MMC_RSP_R4 (MMC_RSP_PRESENT)
+#define MMC_RSP_R5 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
+#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
+#define MMC_RSP_R7 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
+
+#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE))
+
+/*
+ * These are the SPI response types for MMC, SD, and SDIO cards.
+ * Commands return R1, with maybe more info. Zero is an error type;
+ * callers must always provide the appropriate MMC_RSP_SPI_Rx flags.
+ */
+#define MMC_RSP_SPI_R1 (MMC_RSP_SPI_S1)
+#define MMC_RSP_SPI_R1B (MMC_RSP_SPI_S1|MMC_RSP_SPI_BUSY)
+#define MMC_RSP_SPI_R2 (MMC_RSP_SPI_S1|MMC_RSP_SPI_S2)
+#define MMC_RSP_SPI_R3 (MMC_RSP_SPI_S1|MMC_RSP_SPI_B4)
+#define MMC_RSP_SPI_R4 (MMC_RSP_SPI_S1|MMC_RSP_SPI_B4)
+#define MMC_RSP_SPI_R5 (MMC_RSP_SPI_S1|MMC_RSP_SPI_S2)
+#define MMC_RSP_SPI_R7 (MMC_RSP_SPI_S1|MMC_RSP_SPI_B4)
+
+#define mmc_spi_resp_type(cmd) ((cmd)->flags & \
+ (MMC_RSP_SPI_S1|MMC_RSP_SPI_BUSY|MMC_RSP_SPI_S2|MMC_RSP_SPI_B4))
+
+/*
+ * These are the command types.
+ */
+#define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK)
+
+ unsigned int retries; /* max number of retries */
+ int error; /* command error */
+
+/*
+ * Standard errno values are used for errors, but some have specific
+ * meaning in the MMC layer:
+ *
+ * ETIMEDOUT Card took too long to respond
+ * EILSEQ Basic format problem with the received or sent data
+ * (e.g. CRC check failed, incorrect opcode in response
+ * or bad end bit)
+ * EINVAL Request cannot be performed because of restrictions
+ * in hardware and/or the driver
+ * ENOMEDIUM Host can determine that the slot is empty and is
+ * actively failing requests
+ */
+
+ unsigned int busy_timeout; /* busy detect timeout in ms */
+ /* Set this flag only for blocking sanitize request */
+ bool sanitize_busy;
+
+ struct mmc_data *data; /* data segment associated with cmd */
+ struct mmc_request *mrq; /* associated request */
+};
+
+struct mmc_data {
+ unsigned int timeout_ns; /* data timeout (in ns, max 80ms) */
+ unsigned int timeout_clks; /* data timeout (in clocks) */
+ unsigned int blksz; /* data block size */
+ unsigned int blocks; /* number of blocks */
+ int error; /* data error */
+ unsigned int flags;
+
+#define MMC_DATA_WRITE (1 << 8)
+#define MMC_DATA_READ (1 << 9)
+#define MMC_DATA_STREAM (1 << 10)
+
+ unsigned int bytes_xfered;
+
+ struct mmc_command *stop; /* stop command */
+ struct mmc_request *mrq; /* associated request */
+
+ unsigned int sg_len; /* size of scatter list */
+ struct scatterlist *sg; /* I/O scatter list */
+ s32 host_cookie; /* host private data */
+};
+
+struct mmc_host;
+struct mmc_request {
+ struct mmc_command *sbc; /* SET_BLOCK_COUNT for multiblock */
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ struct mmc_command *stop;
+
+ struct completion completion;
+ void (*done)(struct mmc_request *);/* completion function */
+ struct mmc_host *host;
+};
+
+struct mmc_card;
+struct mmc_async_req;
+
+extern int mmc_stop_bkops(struct mmc_card *);
+extern int mmc_read_bkops_status(struct mmc_card *);
+extern struct mmc_async_req *mmc_start_req(struct mmc_host *,
+ struct mmc_async_req *, int *);
+extern int mmc_interrupt_hpi(struct mmc_card *);
+extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *);
+extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int);
+extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
+extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
+ struct mmc_command *, int);
+extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
+extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool,
+ bool, bool);
+extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
+extern int mmc_send_tuning(struct mmc_host *host);
+extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
+
+#define MMC_ERASE_ARG 0x00000000
+#define MMC_SECURE_ERASE_ARG 0x80000000
+#define MMC_TRIM_ARG 0x00000001
+#define MMC_DISCARD_ARG 0x00000003
+#define MMC_SECURE_TRIM1_ARG 0x80000001
+#define MMC_SECURE_TRIM2_ARG 0x80008000
+
+#define MMC_SECURE_ARGS 0x80000000
+#define MMC_TRIM_ARGS 0x00008001
+
+extern int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
+ unsigned int arg);
+extern int mmc_can_erase(struct mmc_card *card);
+extern int mmc_can_trim(struct mmc_card *card);
+extern int mmc_can_discard(struct mmc_card *card);
+extern int mmc_can_sanitize(struct mmc_card *card);
+extern int mmc_can_secure_erase_trim(struct mmc_card *card);
+extern int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
+ unsigned int nr);
+extern unsigned int mmc_calc_max_discard(struct mmc_card *card);
+
+extern int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
+extern int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
+ bool is_rel_write);
+extern int mmc_hw_reset(struct mmc_host *host);
+extern int mmc_can_reset(struct mmc_card *card);
+
+extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *);
+extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int);
+
+extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort);
+extern void mmc_release_host(struct mmc_host *host);
+
+extern void mmc_get_card(struct mmc_card *card);
+extern void mmc_put_card(struct mmc_card *card);
+
+extern int mmc_flush_cache(struct mmc_card *);
+
+extern int mmc_detect_card_removed(struct mmc_host *host);
+
+/**
+ * mmc_claim_host - exclusively claim a host
+ * @host: mmc host to claim
+ *
+ * Claim a host for a set of operations.
+ */
+static inline void mmc_claim_host(struct mmc_host *host)
+{
+ __mmc_claim_host(host, NULL);
+}
+
+struct device_node;
+extern u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max);
+extern int mmc_of_parse_voltage(struct device_node *np, u32 *mask);
+
+#endif /* LINUX_MMC_CORE_H */
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
new file mode 100644
index 000000000..12111993a
--- /dev/null
+++ b/include/linux/mmc/dw_mmc.h
@@ -0,0 +1,271 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ * (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef LINUX_MMC_DW_MMC_H
+#define LINUX_MMC_DW_MMC_H
+
+#include <linux/scatterlist.h>
+#include <linux/mmc/core.h>
+
+#define MAX_MCI_SLOTS 2
+
+enum dw_mci_state {
+ STATE_IDLE = 0,
+ STATE_SENDING_CMD,
+ STATE_SENDING_DATA,
+ STATE_DATA_BUSY,
+ STATE_SENDING_STOP,
+ STATE_DATA_ERROR,
+ STATE_SENDING_CMD11,
+ STATE_WAITING_CMD11_DONE,
+};
+
+enum {
+ EVENT_CMD_COMPLETE = 0,
+ EVENT_XFER_COMPLETE,
+ EVENT_DATA_COMPLETE,
+ EVENT_DATA_ERROR,
+ EVENT_XFER_ERROR
+};
+
+struct mmc_data;
+
+/**
+ * struct dw_mci - MMC controller state shared between all slots
+ * @lock: Spinlock protecting the queue and associated data.
+ * @regs: Pointer to MMIO registers.
+ * @fifo_reg: Pointer to MMIO registers for data FIFO
+ * @sg: Scatterlist entry currently being processed by PIO code, if any.
+ * @sg_miter: PIO mapping scatterlist iterator.
+ * @cur_slot: The slot which is currently using the controller.
+ * @mrq: The request currently being processed on @cur_slot,
+ * or NULL if the controller is idle.
+ * @cmd: The command currently being sent to the card, or NULL.
+ * @data: The data currently being transferred, or NULL if no data
+ * transfer is in progress.
+ * @use_dma: Whether DMA channel is initialized or not.
+ * @using_dma: Whether DMA is in use for the current transfer.
+ * @dma_64bit_address: Whether DMA supports 64-bit address mode or not.
+ * @sg_dma: Bus address of DMA buffer.
+ * @sg_cpu: Virtual address of DMA buffer.
+ * @dma_ops: Pointer to platform-specific DMA callbacks.
+ * @cmd_status: Snapshot of SR taken upon completion of the current
+ * command. Only valid when EVENT_CMD_COMPLETE is pending.
+ * @data_status: Snapshot of SR taken upon completion of the current
+ * data transfer. Only valid when EVENT_DATA_COMPLETE or
+ * EVENT_DATA_ERROR is pending.
+ * @stop_cmdr: Value to be loaded into CMDR when the stop command is
+ * to be sent.
+ * @dir_status: Direction of current transfer.
+ * @tasklet: Tasklet running the request state machine.
+ * @card_tasklet: Tasklet handling card detect.
+ * @pending_events: Bitmask of events flagged by the interrupt handler
+ * to be processed by the tasklet.
+ * @completed_events: Bitmask of events which the state machine has
+ * processed.
+ * @state: Tasklet state.
+ * @queue: List of slots waiting for access to the controller.
+ * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
+ * rate and timeout calculations.
+ * @current_speed: Configured rate of the controller.
+ * @num_slots: Number of slots available.
+ * @verid: Denote Version ID.
+ * @dev: Device associated with the MMC controller.
+ * @pdata: Platform data associated with the MMC controller.
+ * @drv_data: Driver specific data for identified variant of the controller
+ * @priv: Implementation defined private data.
+ * @biu_clk: Pointer to bus interface unit clock instance.
+ * @ciu_clk: Pointer to card interface unit clock instance.
+ * @slot: Slots sharing this MMC controller.
+ * @fifo_depth: depth of FIFO.
+ * @data_shift: log2 of FIFO item size.
+ * @part_buf_start: Start index in part_buf.
+ * @part_buf_count: Bytes of partial data in part_buf.
+ * @part_buf: Simple buffer for partial fifo reads/writes.
+ * @push_data: Pointer to FIFO push function.
+ * @pull_data: Pointer to FIFO pull function.
+ * @quirks: Set of quirks that apply to specific versions of the IP.
+ * @irq_flags: The flags to be passed to request_irq.
+ * @irq: The irq value to be passed to request_irq.
+ * @sdio_id0: Number of slot0 in the SDIO interrupt registers.
+ *
+ * Locking
+ * =======
+ *
+ * @lock is a softirq-safe spinlock protecting @queue as well as
+ * @cur_slot, @mrq and @state. These must always be updated
+ * at the same time while holding @lock.
+ *
+ * @irq_lock is an irq-safe spinlock protecting the INTMASK register
+ * to allow the interrupt handler to modify it directly. Held for only long
+ * enough to read-modify-write INTMASK and no other locks are grabbed when
+ * holding this one.
+ *
+ * The @mrq field of struct dw_mci_slot is also protected by @lock,
+ * and must always be written at the same time as the slot is added to
+ * @queue.
+ *
+ * @pending_events and @completed_events are accessed using atomic bit
+ * operations, so they don't need any locking.
+ *
+ * None of the fields touched by the interrupt handler need any
+ * locking. However, ordering is important: Before EVENT_DATA_ERROR or
+ * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
+ * interrupts must be disabled and @data_status updated with a
+ * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
+ * CMDRDY interrupt must be disabled and @cmd_status updated with a
+ * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
+ * bytes_xfered field of @data must be written. This is ensured by
+ * using barriers.
+ */
+struct dw_mci {
+ spinlock_t lock;
+ spinlock_t irq_lock;
+ void __iomem *regs;
+ void __iomem *fifo_reg;
+
+ struct scatterlist *sg;
+ struct sg_mapping_iter sg_miter;
+
+ struct dw_mci_slot *cur_slot;
+ struct mmc_request *mrq;
+ struct mmc_command *cmd;
+ struct mmc_data *data;
+ struct mmc_command stop_abort;
+ unsigned int prev_blksz;
+ unsigned char timing;
+
+ /* DMA interface members*/
+ int use_dma;
+ int using_dma;
+ int dma_64bit_address;
+
+ dma_addr_t sg_dma;
+ void *sg_cpu;
+ const struct dw_mci_dma_ops *dma_ops;
+#ifdef CONFIG_MMC_DW_IDMAC
+ unsigned int ring_size;
+#else
+ struct dw_mci_dma_data *dma_data;
+#endif
+ u32 cmd_status;
+ u32 data_status;
+ u32 stop_cmdr;
+ u32 dir_status;
+ struct tasklet_struct tasklet;
+ unsigned long pending_events;
+ unsigned long completed_events;
+ enum dw_mci_state state;
+ struct list_head queue;
+
+ u32 bus_hz;
+ u32 current_speed;
+ u32 num_slots;
+ u32 fifoth_val;
+ u16 verid;
+ struct device *dev;
+ struct dw_mci_board *pdata;
+ const struct dw_mci_drv_data *drv_data;
+ void *priv;
+ struct clk *biu_clk;
+ struct clk *ciu_clk;
+ struct dw_mci_slot *slot[MAX_MCI_SLOTS];
+
+ /* FIFO push and pull */
+ int fifo_depth;
+ int data_shift;
+ u8 part_buf_start;
+ u8 part_buf_count;
+ union {
+ u16 part_buf16;
+ u32 part_buf32;
+ u64 part_buf;
+ };
+ void (*push_data)(struct dw_mci *host, void *buf, int cnt);
+ void (*pull_data)(struct dw_mci *host, void *buf, int cnt);
+
+ /* Workaround flags */
+ u32 quirks;
+
+ bool vqmmc_enabled;
+ unsigned long irq_flags; /* IRQ flags */
+ int irq;
+
+ int sdio_id0;
+
+ struct timer_list cmd11_timer;
+};
+
+/* DMA ops for Internal/External DMAC interface */
+struct dw_mci_dma_ops {
+ /* DMA Ops */
+ int (*init)(struct dw_mci *host);
+ void (*start)(struct dw_mci *host, unsigned int sg_len);
+ void (*complete)(struct dw_mci *host);
+ void (*stop)(struct dw_mci *host);
+ void (*cleanup)(struct dw_mci *host);
+ void (*exit)(struct dw_mci *host);
+};
+
+/* IP Quirks/flags. */
+/* DTO fix for command transmission with IDMAC configured */
+#define DW_MCI_QUIRK_IDMAC_DTO BIT(0)
+/* delay needed between retries on some 2.11a implementations */
+#define DW_MCI_QUIRK_RETRY_DELAY BIT(1)
+/* High Speed Capable - Supports HS cards (up to 50MHz) */
+#define DW_MCI_QUIRK_HIGHSPEED BIT(2)
+/* Unreliable card detection */
+#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION BIT(3)
+/* No write protect */
+#define DW_MCI_QUIRK_NO_WRITE_PROTECT BIT(4)
+
+/* Slot level quirks */
+/* This slot has no write protect */
+#define DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT BIT(0)
+
+struct dma_pdata;
+
+struct block_settings {
+ unsigned short max_segs; /* see blk_queue_max_segments */
+ unsigned int max_blk_size; /* maximum size of one mmc block */
+ unsigned int max_blk_count; /* maximum number of blocks in one req*/
+ unsigned int max_req_size; /* maximum number of bytes in one req*/
+ unsigned int max_seg_size; /* see blk_queue_max_segment_size */
+};
+
+/* Board platform data */
+struct dw_mci_board {
+ u32 num_slots;
+
+ u32 quirks; /* Workaround / Quirk flags */
+ unsigned int bus_hz; /* Clock speed at the cclk_in pad */
+
+ u32 caps; /* Capabilities */
+ u32 caps2; /* More capabilities */
+ u32 pm_caps; /* PM capabilities */
+ /*
+ * Override fifo depth. If 0, autodetect it from the FIFOTH register,
+ * but note that this may not be reliable after a bootloader has used
+ * it.
+ */
+ unsigned int fifo_depth;
+
+ /* delay in mS before detecting cards after interrupt */
+ u32 detect_delay_ms;
+
+ struct dw_mci_dma_ops *dma_ops;
+ struct dma_pdata *data;
+ struct block_settings *blk_settings;
+};
+
+#endif /* LINUX_MMC_DW_MMC_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
new file mode 100644
index 000000000..b5bedaec6
--- /dev/null
+++ b/include/linux/mmc/host.h
@@ -0,0 +1,516 @@
+/*
+ * linux/include/linux/mmc/host.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Host driver specific definitions.
+ */
+#ifndef LINUX_MMC_HOST_H
+#define LINUX_MMC_HOST_H
+
+#include <linux/leds.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/fault-inject.h>
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/pm.h>
+
+struct mmc_ios {
+ unsigned int clock; /* clock rate */
+ unsigned short vdd;
+
+/* vdd stores the bit number of the selected voltage range from below. */
+
+ unsigned char bus_mode; /* command output mode */
+
+#define MMC_BUSMODE_OPENDRAIN 1
+#define MMC_BUSMODE_PUSHPULL 2
+
+ unsigned char chip_select; /* SPI chip select */
+
+#define MMC_CS_DONTCARE 0
+#define MMC_CS_HIGH 1
+#define MMC_CS_LOW 2
+
+ unsigned char power_mode; /* power supply mode */
+
+#define MMC_POWER_OFF 0
+#define MMC_POWER_UP 1
+#define MMC_POWER_ON 2
+#define MMC_POWER_UNDEFINED 3
+
+ unsigned char bus_width; /* data bus width */
+
+#define MMC_BUS_WIDTH_1 0
+#define MMC_BUS_WIDTH_4 2
+#define MMC_BUS_WIDTH_8 3
+
+ unsigned char timing; /* timing specification used */
+
+#define MMC_TIMING_LEGACY 0
+#define MMC_TIMING_MMC_HS 1
+#define MMC_TIMING_SD_HS 2
+#define MMC_TIMING_UHS_SDR12 3
+#define MMC_TIMING_UHS_SDR25 4
+#define MMC_TIMING_UHS_SDR50 5
+#define MMC_TIMING_UHS_SDR104 6
+#define MMC_TIMING_UHS_DDR50 7
+#define MMC_TIMING_MMC_DDR52 8
+#define MMC_TIMING_MMC_HS200 9
+#define MMC_TIMING_MMC_HS400 10
+
+ unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */
+
+#define MMC_SIGNAL_VOLTAGE_330 0
+#define MMC_SIGNAL_VOLTAGE_180 1
+#define MMC_SIGNAL_VOLTAGE_120 2
+
+ unsigned char drv_type; /* driver type (A, B, C, D) */
+
+#define MMC_SET_DRIVER_TYPE_B 0
+#define MMC_SET_DRIVER_TYPE_A 1
+#define MMC_SET_DRIVER_TYPE_C 2
+#define MMC_SET_DRIVER_TYPE_D 3
+};
+
+struct mmc_host_ops {
+ /*
+ * It is optional for the host to implement pre_req and post_req in
+ * order to support double buffering of requests (prepare one
+ * request while another request is active).
+ * pre_req() must always be followed by a post_req().
+ * To undo a call made to pre_req(), call post_req() with
+ * a nonzero err condition.
+ */
+ void (*post_req)(struct mmc_host *host, struct mmc_request *req,
+ int err);
+ void (*pre_req)(struct mmc_host *host, struct mmc_request *req,
+ bool is_first_req);
+ void (*request)(struct mmc_host *host, struct mmc_request *req);
+ /*
+ * Avoid calling these three functions too often or in a "fast path",
+ * since underlaying controller might implement them in an expensive
+ * and/or slow way.
+ *
+ * Also note that these functions might sleep, so don't call them
+ * in the atomic contexts!
+ *
+ * Return values for the get_ro callback should be:
+ * 0 for a read/write card
+ * 1 for a read-only card
+ * -ENOSYS when not supported (equal to NULL callback)
+ * or a negative errno value when something bad happened
+ *
+ * Return values for the get_cd callback should be:
+ * 0 for a absent card
+ * 1 for a present card
+ * -ENOSYS when not supported (equal to NULL callback)
+ * or a negative errno value when something bad happened
+ */
+ void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios);
+ int (*get_ro)(struct mmc_host *host);
+ int (*get_cd)(struct mmc_host *host);
+
+ void (*enable_sdio_irq)(struct mmc_host *host, int enable);
+
+ /* optional callback for HC quirks */
+ void (*init_card)(struct mmc_host *host, struct mmc_card *card);
+
+ int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios);
+
+ /* Check if the card is pulling dat[0:3] low */
+ int (*card_busy)(struct mmc_host *host);
+
+ /* The tuning command opcode value is different for SD and eMMC cards */
+ int (*execute_tuning)(struct mmc_host *host, u32 opcode);
+
+ /* Prepare HS400 target operating frequency depending host driver */
+ int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
+ int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv);
+ void (*hw_reset)(struct mmc_host *host);
+ void (*card_event)(struct mmc_host *host);
+
+ /*
+ * Optional callback to support controllers with HW issues for multiple
+ * I/O. Returns the number of supported blocks for the request.
+ */
+ int (*multi_io_quirk)(struct mmc_card *card,
+ unsigned int direction, int blk_size);
+};
+
+struct mmc_card;
+struct device;
+
+struct mmc_async_req {
+ /* active mmc request */
+ struct mmc_request *mrq;
+ /*
+ * Check error status of completed mmc request.
+ * Returns 0 if success otherwise non zero.
+ */
+ int (*err_check) (struct mmc_card *, struct mmc_async_req *);
+};
+
+/**
+ * struct mmc_slot - MMC slot functions
+ *
+ * @cd_irq: MMC/SD-card slot hotplug detection IRQ or -EINVAL
+ * @handler_priv: MMC/SD-card slot context
+ *
+ * Some MMC/SD host controllers implement slot-functions like card and
+ * write-protect detection natively. However, a large number of controllers
+ * leave these functions to the CPU. This struct provides a hook to attach
+ * such slot-function drivers.
+ */
+struct mmc_slot {
+ int cd_irq;
+ void *handler_priv;
+};
+
+/**
+ * mmc_context_info - synchronization details for mmc context
+ * @is_done_rcv wake up reason was done request
+ * @is_new_req wake up reason was new request
+ * @is_waiting_last_req mmc context waiting for single running request
+ * @wait wait queue
+ * @lock lock to protect data fields
+ */
+struct mmc_context_info {
+ bool is_done_rcv;
+ bool is_new_req;
+ bool is_waiting_last_req;
+ wait_queue_head_t wait;
+ spinlock_t lock;
+};
+
+struct regulator;
+struct mmc_pwrseq;
+
+struct mmc_supply {
+ struct regulator *vmmc; /* Card power supply */
+ struct regulator *vqmmc; /* Optional Vccq supply */
+};
+
+struct mmc_host {
+ struct device *parent;
+ struct device class_dev;
+ int index;
+ const struct mmc_host_ops *ops;
+ struct mmc_pwrseq *pwrseq;
+ unsigned int f_min;
+ unsigned int f_max;
+ unsigned int f_init;
+ u32 ocr_avail;
+ u32 ocr_avail_sdio; /* SDIO-specific OCR */
+ u32 ocr_avail_sd; /* SD-specific OCR */
+ u32 ocr_avail_mmc; /* MMC-specific OCR */
+ struct notifier_block pm_notify;
+ u32 max_current_330;
+ u32 max_current_300;
+ u32 max_current_180;
+
+#define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */
+#define MMC_VDD_20_21 0x00000100 /* VDD voltage 2.0 ~ 2.1 */
+#define MMC_VDD_21_22 0x00000200 /* VDD voltage 2.1 ~ 2.2 */
+#define MMC_VDD_22_23 0x00000400 /* VDD voltage 2.2 ~ 2.3 */
+#define MMC_VDD_23_24 0x00000800 /* VDD voltage 2.3 ~ 2.4 */
+#define MMC_VDD_24_25 0x00001000 /* VDD voltage 2.4 ~ 2.5 */
+#define MMC_VDD_25_26 0x00002000 /* VDD voltage 2.5 ~ 2.6 */
+#define MMC_VDD_26_27 0x00004000 /* VDD voltage 2.6 ~ 2.7 */
+#define MMC_VDD_27_28 0x00008000 /* VDD voltage 2.7 ~ 2.8 */
+#define MMC_VDD_28_29 0x00010000 /* VDD voltage 2.8 ~ 2.9 */
+#define MMC_VDD_29_30 0x00020000 /* VDD voltage 2.9 ~ 3.0 */
+#define MMC_VDD_30_31 0x00040000 /* VDD voltage 3.0 ~ 3.1 */
+#define MMC_VDD_31_32 0x00080000 /* VDD voltage 3.1 ~ 3.2 */
+#define MMC_VDD_32_33 0x00100000 /* VDD voltage 3.2 ~ 3.3 */
+#define MMC_VDD_33_34 0x00200000 /* VDD voltage 3.3 ~ 3.4 */
+#define MMC_VDD_34_35 0x00400000 /* VDD voltage 3.4 ~ 3.5 */
+#define MMC_VDD_35_36 0x00800000 /* VDD voltage 3.5 ~ 3.6 */
+
+ u32 caps; /* Host capabilities */
+
+#define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */
+#define MMC_CAP_MMC_HIGHSPEED (1 << 1) /* Can do MMC high-speed timing */
+#define MMC_CAP_SD_HIGHSPEED (1 << 2) /* Can do SD high-speed timing */
+#define MMC_CAP_SDIO_IRQ (1 << 3) /* Can signal pending SDIO IRQs */
+#define MMC_CAP_SPI (1 << 4) /* Talks only SPI protocols */
+#define MMC_CAP_NEEDS_POLL (1 << 5) /* Needs polling for card-detection */
+#define MMC_CAP_8_BIT_DATA (1 << 6) /* Can the host do 8 bit transfers */
+#define MMC_CAP_AGGRESSIVE_PM (1 << 7) /* Suspend (e)MMC/SD at idle */
+#define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */
+#define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */
+#define MMC_CAP_ERASE (1 << 10) /* Allow erase/trim commands */
+#define MMC_CAP_1_8V_DDR (1 << 11) /* can support */
+ /* DDR mode at 1.8V */
+#define MMC_CAP_1_2V_DDR (1 << 12) /* can support */
+ /* DDR mode at 1.2V */
+#define MMC_CAP_POWER_OFF_CARD (1 << 13) /* Can power off after boot */
+#define MMC_CAP_BUS_WIDTH_TEST (1 << 14) /* CMD14/CMD19 bus width ok */
+#define MMC_CAP_UHS_SDR12 (1 << 15) /* Host supports UHS SDR12 mode */
+#define MMC_CAP_UHS_SDR25 (1 << 16) /* Host supports UHS SDR25 mode */
+#define MMC_CAP_UHS_SDR50 (1 << 17) /* Host supports UHS SDR50 mode */
+#define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */
+#define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */
+#define MMC_CAP_RUNTIME_RESUME (1 << 20) /* Resume at runtime_resume. */
+#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
+#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
+#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
+#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
+#define MMC_CAP_HW_RESET (1 << 31) /* Hardware reset */
+
+ u32 caps2; /* More host capabilities */
+
+#define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */
+#define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */
+#define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */
+#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */
+#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \
+ MMC_CAP2_HS200_1_2V_SDR)
+#define MMC_CAP2_HC_ERASE_SZ (1 << 9) /* High-capacity erase size */
+#define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */
+#define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */
+#define MMC_CAP2_PACKED_RD (1 << 12) /* Allow packed read */
+#define MMC_CAP2_PACKED_WR (1 << 13) /* Allow packed write */
+#define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \
+ MMC_CAP2_PACKED_WR)
+#define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */
+#define MMC_CAP2_HS400_1_8V (1 << 15) /* Can support HS400 1.8V */
+#define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */
+#define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \
+ MMC_CAP2_HS400_1_2V)
+#define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V)
+#define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17)
+
+ mmc_pm_flag_t pm_caps; /* supported pm features */
+
+#ifdef CONFIG_MMC_CLKGATE
+ int clk_requests; /* internal reference counter */
+ unsigned int clk_delay; /* number of MCI clk hold cycles */
+ bool clk_gated; /* clock gated */
+ struct delayed_work clk_gate_work; /* delayed clock gate */
+ unsigned int clk_old; /* old clock value cache */
+ spinlock_t clk_lock; /* lock for clk fields */
+ struct mutex clk_gate_mutex; /* mutex for clock gating */
+ struct device_attribute clkgate_delay_attr;
+ unsigned long clkgate_delay;
+#endif
+
+ /* host specific block data */
+ unsigned int max_seg_size; /* see blk_queue_max_segment_size */
+ unsigned short max_segs; /* see blk_queue_max_segments */
+ unsigned short unused;
+ unsigned int max_req_size; /* maximum number of bytes in one req */
+ unsigned int max_blk_size; /* maximum size of one mmc block */
+ unsigned int max_blk_count; /* maximum number of blocks in one req */
+ unsigned int max_busy_timeout; /* max busy timeout in ms */
+
+ /* private data */
+ spinlock_t lock; /* lock for claim and bus ops */
+
+ struct mmc_ios ios; /* current io bus settings */
+
+ /* group bitfields together to minimize padding */
+ unsigned int use_spi_crc:1;
+ unsigned int claimed:1; /* host exclusively claimed */
+ unsigned int bus_dead:1; /* bus has been released */
+#ifdef CONFIG_MMC_DEBUG
+ unsigned int removed:1; /* host is being removed */
+#endif
+
+ int rescan_disable; /* disable card detection */
+ int rescan_entered; /* used with nonremovable devices */
+
+ bool trigger_card_event; /* card_event necessary */
+
+ struct mmc_card *card; /* device attached to this host */
+
+ wait_queue_head_t wq;
+ struct task_struct *claimer; /* task that has host claimed */
+ int claim_cnt; /* "claim" nesting count */
+
+ struct delayed_work detect;
+ int detect_change; /* card detect flag */
+ struct mmc_slot slot;
+
+ const struct mmc_bus_ops *bus_ops; /* current bus driver */
+ unsigned int bus_refs; /* reference counter */
+
+ unsigned int sdio_irqs;
+ struct task_struct *sdio_irq_thread;
+ bool sdio_irq_pending;
+ atomic_t sdio_irq_thread_abort;
+
+ mmc_pm_flag_t pm_flags; /* requested pm features */
+
+ struct led_trigger *led; /* activity led */
+
+#ifdef CONFIG_REGULATOR
+ bool regulator_enabled; /* regulator state */
+#endif
+ struct mmc_supply supply;
+
+ struct dentry *debugfs_root;
+
+ struct mmc_async_req *areq; /* active async req */
+ struct mmc_context_info context_info; /* async synchronization info */
+
+#ifdef CONFIG_FAIL_MMC_REQUEST
+ struct fault_attr fail_mmc_request;
+#endif
+
+ unsigned int actual_clock; /* Actual HC clock rate */
+
+ unsigned int slotno; /* used for sdio acpi binding */
+
+ int dsr_req; /* DSR value is valid */
+ u32 dsr; /* optional driver stage (DSR) value */
+
+ unsigned long private[0] ____cacheline_aligned;
+};
+
+struct mmc_host *mmc_alloc_host(int extra, struct device *);
+int mmc_add_host(struct mmc_host *);
+void mmc_remove_host(struct mmc_host *);
+void mmc_free_host(struct mmc_host *);
+int mmc_of_parse(struct mmc_host *host);
+
+static inline void *mmc_priv(struct mmc_host *host)
+{
+ return (void *)host->private;
+}
+
+#define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI)
+
+#define mmc_dev(x) ((x)->parent)
+#define mmc_classdev(x) (&(x)->class_dev)
+#define mmc_hostname(x) (dev_name(&(x)->class_dev))
+
+int mmc_power_save_host(struct mmc_host *host);
+int mmc_power_restore_host(struct mmc_host *host);
+
+void mmc_detect_change(struct mmc_host *, unsigned long delay);
+void mmc_request_done(struct mmc_host *, struct mmc_request *);
+
+static inline void mmc_signal_sdio_irq(struct mmc_host *host)
+{
+ host->ops->enable_sdio_irq(host, 0);
+ host->sdio_irq_pending = true;
+ wake_up_process(host->sdio_irq_thread);
+}
+
+void sdio_run_irqs(struct mmc_host *host);
+
+#ifdef CONFIG_REGULATOR
+int mmc_regulator_get_ocrmask(struct regulator *supply);
+int mmc_regulator_set_ocr(struct mmc_host *mmc,
+ struct regulator *supply,
+ unsigned short vdd_bit);
+#else
+static inline int mmc_regulator_get_ocrmask(struct regulator *supply)
+{
+ return 0;
+}
+
+static inline int mmc_regulator_set_ocr(struct mmc_host *mmc,
+ struct regulator *supply,
+ unsigned short vdd_bit)
+{
+ return 0;
+}
+#endif
+
+int mmc_regulator_get_supply(struct mmc_host *mmc);
+
+int mmc_pm_notify(struct notifier_block *notify_block, unsigned long, void *);
+
+static inline int mmc_card_is_removable(struct mmc_host *host)
+{
+ return !(host->caps & MMC_CAP_NONREMOVABLE);
+}
+
+static inline int mmc_card_keep_power(struct mmc_host *host)
+{
+ return host->pm_flags & MMC_PM_KEEP_POWER;
+}
+
+static inline int mmc_card_wake_sdio_irq(struct mmc_host *host)
+{
+ return host->pm_flags & MMC_PM_WAKE_SDIO_IRQ;
+}
+
+static inline int mmc_host_cmd23(struct mmc_host *host)
+{
+ return host->caps & MMC_CAP_CMD23;
+}
+
+static inline int mmc_boot_partition_access(struct mmc_host *host)
+{
+ return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
+}
+
+static inline int mmc_host_uhs(struct mmc_host *host)
+{
+ return host->caps &
+ (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_DDR50);
+}
+
+static inline int mmc_host_packed_wr(struct mmc_host *host)
+{
+ return host->caps2 & MMC_CAP2_PACKED_WR;
+}
+
+#ifdef CONFIG_MMC_CLKGATE
+void mmc_host_clk_hold(struct mmc_host *host);
+void mmc_host_clk_release(struct mmc_host *host);
+unsigned int mmc_host_clk_rate(struct mmc_host *host);
+
+#else
+static inline void mmc_host_clk_hold(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_release(struct mmc_host *host)
+{
+}
+
+static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+ return host->ios.clock;
+}
+#endif
+
+static inline int mmc_card_hs(struct mmc_card *card)
+{
+ return card->host->ios.timing == MMC_TIMING_SD_HS ||
+ card->host->ios.timing == MMC_TIMING_MMC_HS;
+}
+
+static inline int mmc_card_uhs(struct mmc_card *card)
+{
+ return card->host->ios.timing >= MMC_TIMING_UHS_SDR12 &&
+ card->host->ios.timing <= MMC_TIMING_UHS_DDR50;
+}
+
+static inline bool mmc_card_hs200(struct mmc_card *card)
+{
+ return card->host->ios.timing == MMC_TIMING_MMC_HS200;
+}
+
+static inline bool mmc_card_ddr52(struct mmc_card *card)
+{
+ return card->host->ios.timing == MMC_TIMING_MMC_DDR52;
+}
+
+static inline bool mmc_card_hs400(struct mmc_card *card)
+{
+ return card->host->ios.timing == MMC_TIMING_MMC_HS400;
+}
+
+#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
new file mode 100644
index 000000000..124f56211
--- /dev/null
+++ b/include/linux/mmc/mmc.h
@@ -0,0 +1,444 @@
+/*
+ * Header for MultiMediaCard (MMC)
+ *
+ * Copyright 2002 Hewlett-Packard Company
+ *
+ * Use consistent with the GNU GPL is permitted,
+ * provided that this copyright notice is
+ * preserved in its entirety in all copies and derived works.
+ *
+ * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
+ * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
+ * FITNESS FOR ANY PARTICULAR PURPOSE.
+ *
+ * Many thanks to Alessandro Rubini and Jonathan Corbet!
+ *
+ * Based strongly on code by:
+ *
+ * Author: Yong-iL Joh <tolkien@mizi.com>
+ *
+ * Author: Andrew Christian
+ * 15 May 2002
+ */
+
+#ifndef LINUX_MMC_MMC_H
+#define LINUX_MMC_MMC_H
+
+/* Standard MMC commands (4.1) type argument response */
+ /* class 1 */
+#define MMC_GO_IDLE_STATE 0 /* bc */
+#define MMC_SEND_OP_COND 1 /* bcr [31:0] OCR R3 */
+#define MMC_ALL_SEND_CID 2 /* bcr R2 */
+#define MMC_SET_RELATIVE_ADDR 3 /* ac [31:16] RCA R1 */
+#define MMC_SET_DSR 4 /* bc [31:16] RCA */
+#define MMC_SLEEP_AWAKE 5 /* ac [31:16] RCA 15:flg R1b */
+#define MMC_SWITCH 6 /* ac [31:0] See below R1b */
+#define MMC_SELECT_CARD 7 /* ac [31:16] RCA R1 */
+#define MMC_SEND_EXT_CSD 8 /* adtc R1 */
+#define MMC_SEND_CSD 9 /* ac [31:16] RCA R2 */
+#define MMC_SEND_CID 10 /* ac [31:16] RCA R2 */
+#define MMC_READ_DAT_UNTIL_STOP 11 /* adtc [31:0] dadr R1 */
+#define MMC_STOP_TRANSMISSION 12 /* ac R1b */
+#define MMC_SEND_STATUS 13 /* ac [31:16] RCA R1 */
+#define MMC_BUS_TEST_R 14 /* adtc R1 */
+#define MMC_GO_INACTIVE_STATE 15 /* ac [31:16] RCA */
+#define MMC_BUS_TEST_W 19 /* adtc R1 */
+#define MMC_SPI_READ_OCR 58 /* spi spi_R3 */
+#define MMC_SPI_CRC_ON_OFF 59 /* spi [0:0] flag spi_R1 */
+
+ /* class 2 */
+#define MMC_SET_BLOCKLEN 16 /* ac [31:0] block len R1 */
+#define MMC_READ_SINGLE_BLOCK 17 /* adtc [31:0] data addr R1 */
+#define MMC_READ_MULTIPLE_BLOCK 18 /* adtc [31:0] data addr R1 */
+#define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */
+#define MMC_SEND_TUNING_BLOCK_HS200 21 /* adtc R1 */
+
+ /* class 3 */
+#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */
+
+ /* class 4 */
+#define MMC_SET_BLOCK_COUNT 23 /* adtc [31:0] data addr R1 */
+#define MMC_WRITE_BLOCK 24 /* adtc [31:0] data addr R1 */
+#define MMC_WRITE_MULTIPLE_BLOCK 25 /* adtc R1 */
+#define MMC_PROGRAM_CID 26 /* adtc R1 */
+#define MMC_PROGRAM_CSD 27 /* adtc R1 */
+
+ /* class 6 */
+#define MMC_SET_WRITE_PROT 28 /* ac [31:0] data addr R1b */
+#define MMC_CLR_WRITE_PROT 29 /* ac [31:0] data addr R1b */
+#define MMC_SEND_WRITE_PROT 30 /* adtc [31:0] wpdata addr R1 */
+
+ /* class 5 */
+#define MMC_ERASE_GROUP_START 35 /* ac [31:0] data addr R1 */
+#define MMC_ERASE_GROUP_END 36 /* ac [31:0] data addr R1 */
+#define MMC_ERASE 38 /* ac R1b */
+
+ /* class 9 */
+#define MMC_FAST_IO 39 /* ac <Complex> R4 */
+#define MMC_GO_IRQ_STATE 40 /* bcr R5 */
+
+ /* class 7 */
+#define MMC_LOCK_UNLOCK 42 /* adtc R1b */
+
+ /* class 8 */
+#define MMC_APP_CMD 55 /* ac [31:16] RCA R1 */
+#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */
+
+static inline bool mmc_op_multi(u32 opcode)
+{
+ return opcode == MMC_WRITE_MULTIPLE_BLOCK ||
+ opcode == MMC_READ_MULTIPLE_BLOCK;
+}
+
+/*
+ * MMC_SWITCH argument format:
+ *
+ * [31:26] Always 0
+ * [25:24] Access Mode
+ * [23:16] Location of target Byte in EXT_CSD
+ * [15:08] Value Byte
+ * [07:03] Always 0
+ * [02:00] Command Set
+ */
+
+/*
+ MMC status in R1, for native mode (SPI bits are different)
+ Type
+ e : error bit
+ s : status bit
+ r : detected and set for the actual command response
+ x : detected and set during command execution. the host must poll
+ the card by sending status command in order to read these bits.
+ Clear condition
+ a : according to the card state
+ b : always related to the previous command. Reception of
+ a valid command will clear it (with a delay of one command)
+ c : clear by read
+ */
+
+#define R1_OUT_OF_RANGE (1 << 31) /* er, c */
+#define R1_ADDRESS_ERROR (1 << 30) /* erx, c */
+#define R1_BLOCK_LEN_ERROR (1 << 29) /* er, c */
+#define R1_ERASE_SEQ_ERROR (1 << 28) /* er, c */
+#define R1_ERASE_PARAM (1 << 27) /* ex, c */
+#define R1_WP_VIOLATION (1 << 26) /* erx, c */
+#define R1_CARD_IS_LOCKED (1 << 25) /* sx, a */
+#define R1_LOCK_UNLOCK_FAILED (1 << 24) /* erx, c */
+#define R1_COM_CRC_ERROR (1 << 23) /* er, b */
+#define R1_ILLEGAL_COMMAND (1 << 22) /* er, b */
+#define R1_CARD_ECC_FAILED (1 << 21) /* ex, c */
+#define R1_CC_ERROR (1 << 20) /* erx, c */
+#define R1_ERROR (1 << 19) /* erx, c */
+#define R1_UNDERRUN (1 << 18) /* ex, c */
+#define R1_OVERRUN (1 << 17) /* ex, c */
+#define R1_CID_CSD_OVERWRITE (1 << 16) /* erx, c, CID/CSD overwrite */
+#define R1_WP_ERASE_SKIP (1 << 15) /* sx, c */
+#define R1_CARD_ECC_DISABLED (1 << 14) /* sx, a */
+#define R1_ERASE_RESET (1 << 13) /* sr, c */
+#define R1_STATUS(x) (x & 0xFFFFE000)
+#define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */
+#define R1_READY_FOR_DATA (1 << 8) /* sx, a */
+#define R1_SWITCH_ERROR (1 << 7) /* sx, c */
+#define R1_EXCEPTION_EVENT (1 << 6) /* sr, a */
+#define R1_APP_CMD (1 << 5) /* sr, c */
+
+#define R1_STATE_IDLE 0
+#define R1_STATE_READY 1
+#define R1_STATE_IDENT 2
+#define R1_STATE_STBY 3
+#define R1_STATE_TRAN 4
+#define R1_STATE_DATA 5
+#define R1_STATE_RCV 6
+#define R1_STATE_PRG 7
+#define R1_STATE_DIS 8
+
+/*
+ * MMC/SD in SPI mode reports R1 status always, and R2 for SEND_STATUS
+ * R1 is the low order byte; R2 is the next highest byte, when present.
+ */
+#define R1_SPI_IDLE (1 << 0)
+#define R1_SPI_ERASE_RESET (1 << 1)
+#define R1_SPI_ILLEGAL_COMMAND (1 << 2)
+#define R1_SPI_COM_CRC (1 << 3)
+#define R1_SPI_ERASE_SEQ (1 << 4)
+#define R1_SPI_ADDRESS (1 << 5)
+#define R1_SPI_PARAMETER (1 << 6)
+/* R1 bit 7 is always zero */
+#define R2_SPI_CARD_LOCKED (1 << 8)
+#define R2_SPI_WP_ERASE_SKIP (1 << 9) /* or lock/unlock fail */
+#define R2_SPI_LOCK_UNLOCK_FAIL R2_SPI_WP_ERASE_SKIP
+#define R2_SPI_ERROR (1 << 10)
+#define R2_SPI_CC_ERROR (1 << 11)
+#define R2_SPI_CARD_ECC_ERROR (1 << 12)
+#define R2_SPI_WP_VIOLATION (1 << 13)
+#define R2_SPI_ERASE_PARAM (1 << 14)
+#define R2_SPI_OUT_OF_RANGE (1 << 15) /* or CSD overwrite */
+#define R2_SPI_CSD_OVERWRITE R2_SPI_OUT_OF_RANGE
+
+/* These are unpacked versions of the actual responses */
+
+struct _mmc_csd {
+ u8 csd_structure;
+ u8 spec_vers;
+ u8 taac;
+ u8 nsac;
+ u8 tran_speed;
+ u16 ccc;
+ u8 read_bl_len;
+ u8 read_bl_partial;
+ u8 write_blk_misalign;
+ u8 read_blk_misalign;
+ u8 dsr_imp;
+ u16 c_size;
+ u8 vdd_r_curr_min;
+ u8 vdd_r_curr_max;
+ u8 vdd_w_curr_min;
+ u8 vdd_w_curr_max;
+ u8 c_size_mult;
+ union {
+ struct { /* MMC system specification version 3.1 */
+ u8 erase_grp_size;
+ u8 erase_grp_mult;
+ } v31;
+ struct { /* MMC system specification version 2.2 */
+ u8 sector_size;
+ u8 erase_grp_size;
+ } v22;
+ } erase;
+ u8 wp_grp_size;
+ u8 wp_grp_enable;
+ u8 default_ecc;
+ u8 r2w_factor;
+ u8 write_bl_len;
+ u8 write_bl_partial;
+ u8 file_format_grp;
+ u8 copy;
+ u8 perm_write_protect;
+ u8 tmp_write_protect;
+ u8 file_format;
+ u8 ecc;
+};
+
+/*
+ * OCR bits are mostly in host.h
+ */
+#define MMC_CARD_BUSY 0x80000000 /* Card Power up status bit */
+
+/*
+ * Card Command Classes (CCC)
+ */
+#define CCC_BASIC (1<<0) /* (0) Basic protocol functions */
+ /* (CMD0,1,2,3,4,7,9,10,12,13,15) */
+ /* (and for SPI, CMD58,59) */
+#define CCC_STREAM_READ (1<<1) /* (1) Stream read commands */
+ /* (CMD11) */
+#define CCC_BLOCK_READ (1<<2) /* (2) Block read commands */
+ /* (CMD16,17,18) */
+#define CCC_STREAM_WRITE (1<<3) /* (3) Stream write commands */
+ /* (CMD20) */
+#define CCC_BLOCK_WRITE (1<<4) /* (4) Block write commands */
+ /* (CMD16,24,25,26,27) */
+#define CCC_ERASE (1<<5) /* (5) Ability to erase blocks */
+ /* (CMD32,33,34,35,36,37,38,39) */
+#define CCC_WRITE_PROT (1<<6) /* (6) Able to write protect blocks */
+ /* (CMD28,29,30) */
+#define CCC_LOCK_CARD (1<<7) /* (7) Able to lock down card */
+ /* (CMD16,CMD42) */
+#define CCC_APP_SPEC (1<<8) /* (8) Application specific */
+ /* (CMD55,56,57,ACMD*) */
+#define CCC_IO_MODE (1<<9) /* (9) I/O mode */
+ /* (CMD5,39,40,52,53) */
+#define CCC_SWITCH (1<<10) /* (10) High speed switch */
+ /* (CMD6,34,35,36,37,50) */
+ /* (11) Reserved */
+ /* (CMD?) */
+
+/*
+ * CSD field definitions
+ */
+
+#define CSD_STRUCT_VER_1_0 0 /* Valid for system specification 1.0 - 1.2 */
+#define CSD_STRUCT_VER_1_1 1 /* Valid for system specification 1.4 - 2.2 */
+#define CSD_STRUCT_VER_1_2 2 /* Valid for system specification 3.1 - 3.2 - 3.31 - 4.0 - 4.1 */
+#define CSD_STRUCT_EXT_CSD 3 /* Version is coded in CSD_STRUCTURE in EXT_CSD */
+
+#define CSD_SPEC_VER_0 0 /* Implements system specification 1.0 - 1.2 */
+#define CSD_SPEC_VER_1 1 /* Implements system specification 1.4 */
+#define CSD_SPEC_VER_2 2 /* Implements system specification 2.0 - 2.2 */
+#define CSD_SPEC_VER_3 3 /* Implements system specification 3.1 - 3.2 - 3.31 */
+#define CSD_SPEC_VER_4 4 /* Implements system specification 4.0 - 4.1 */
+
+/*
+ * EXT_CSD fields
+ */
+
+#define EXT_CSD_FLUSH_CACHE 32 /* W */
+#define EXT_CSD_CACHE_CTRL 33 /* R/W */
+#define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */
+#define EXT_CSD_PACKED_FAILURE_INDEX 35 /* RO */
+#define EXT_CSD_PACKED_CMD_STATUS 36 /* RO */
+#define EXT_CSD_EXP_EVENTS_STATUS 54 /* RO, 2 bytes */
+#define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */
+#define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */
+#define EXT_CSD_GP_SIZE_MULT 143 /* R/W */
+#define EXT_CSD_PARTITION_SETTING_COMPLETED 155 /* R/W */
+#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */
+#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */
+#define EXT_CSD_HPI_MGMT 161 /* R/W */
+#define EXT_CSD_RST_N_FUNCTION 162 /* R/W */
+#define EXT_CSD_BKOPS_EN 163 /* R/W */
+#define EXT_CSD_BKOPS_START 164 /* W */
+#define EXT_CSD_SANITIZE_START 165 /* W */
+#define EXT_CSD_WR_REL_PARAM 166 /* RO */
+#define EXT_CSD_RPMB_MULT 168 /* RO */
+#define EXT_CSD_FW_CONFIG 169 /* R/W */
+#define EXT_CSD_BOOT_WP 173 /* R/W */
+#define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */
+#define EXT_CSD_PART_CONFIG 179 /* R/W */
+#define EXT_CSD_ERASED_MEM_CONT 181 /* RO */
+#define EXT_CSD_BUS_WIDTH 183 /* R/W */
+#define EXT_CSD_HS_TIMING 185 /* R/W */
+#define EXT_CSD_POWER_CLASS 187 /* R/W */
+#define EXT_CSD_REV 192 /* RO */
+#define EXT_CSD_STRUCTURE 194 /* RO */
+#define EXT_CSD_CARD_TYPE 196 /* RO */
+#define EXT_CSD_OUT_OF_INTERRUPT_TIME 198 /* RO */
+#define EXT_CSD_PART_SWITCH_TIME 199 /* RO */
+#define EXT_CSD_PWR_CL_52_195 200 /* RO */
+#define EXT_CSD_PWR_CL_26_195 201 /* RO */
+#define EXT_CSD_PWR_CL_52_360 202 /* RO */
+#define EXT_CSD_PWR_CL_26_360 203 /* RO */
+#define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */
+#define EXT_CSD_S_A_TIMEOUT 217 /* RO */
+#define EXT_CSD_REL_WR_SEC_C 222 /* RO */
+#define EXT_CSD_HC_WP_GRP_SIZE 221 /* RO */
+#define EXT_CSD_ERASE_TIMEOUT_MULT 223 /* RO */
+#define EXT_CSD_HC_ERASE_GRP_SIZE 224 /* RO */
+#define EXT_CSD_BOOT_MULT 226 /* RO */
+#define EXT_CSD_SEC_TRIM_MULT 229 /* RO */
+#define EXT_CSD_SEC_ERASE_MULT 230 /* RO */
+#define EXT_CSD_SEC_FEATURE_SUPPORT 231 /* RO */
+#define EXT_CSD_TRIM_MULT 232 /* RO */
+#define EXT_CSD_PWR_CL_200_195 236 /* RO */
+#define EXT_CSD_PWR_CL_200_360 237 /* RO */
+#define EXT_CSD_PWR_CL_DDR_52_195 238 /* RO */
+#define EXT_CSD_PWR_CL_DDR_52_360 239 /* RO */
+#define EXT_CSD_BKOPS_STATUS 246 /* RO */
+#define EXT_CSD_POWER_OFF_LONG_TIME 247 /* RO */
+#define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */
+#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */
+#define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */
+#define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */
+#define EXT_CSD_SUPPORTED_MODE 493 /* RO */
+#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */
+#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */
+#define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */
+#define EXT_CSD_MAX_PACKED_READS 501 /* RO */
+#define EXT_CSD_BKOPS_SUPPORT 502 /* RO */
+#define EXT_CSD_HPI_FEATURES 503 /* RO */
+
+/*
+ * EXT_CSD field definitions
+ */
+
+#define EXT_CSD_WR_REL_PARAM_EN (1<<2)
+
+#define EXT_CSD_BOOT_WP_B_PWR_WP_DIS (0x40)
+#define EXT_CSD_BOOT_WP_B_PERM_WP_DIS (0x10)
+#define EXT_CSD_BOOT_WP_B_PERM_WP_EN (0x04)
+#define EXT_CSD_BOOT_WP_B_PWR_WP_EN (0x01)
+
+#define EXT_CSD_PART_CONFIG_ACC_MASK (0x7)
+#define EXT_CSD_PART_CONFIG_ACC_BOOT0 (0x1)
+#define EXT_CSD_PART_CONFIG_ACC_RPMB (0x3)
+#define EXT_CSD_PART_CONFIG_ACC_GP0 (0x4)
+
+#define EXT_CSD_PART_SETTING_COMPLETED (0x1)
+#define EXT_CSD_PART_SUPPORT_PART_EN (0x1)
+
+#define EXT_CSD_CMD_SET_NORMAL (1<<0)
+#define EXT_CSD_CMD_SET_SECURE (1<<1)
+#define EXT_CSD_CMD_SET_CPSECURE (1<<2)
+
+#define EXT_CSD_CARD_TYPE_HS_26 (1<<0) /* Card can run at 26MHz */
+#define EXT_CSD_CARD_TYPE_HS_52 (1<<1) /* Card can run at 52MHz */
+#define EXT_CSD_CARD_TYPE_HS (EXT_CSD_CARD_TYPE_HS_26 | \
+ EXT_CSD_CARD_TYPE_HS_52)
+#define EXT_CSD_CARD_TYPE_DDR_1_8V (1<<2) /* Card can run at 52MHz */
+ /* DDR mode @1.8V or 3V I/O */
+#define EXT_CSD_CARD_TYPE_DDR_1_2V (1<<3) /* Card can run at 52MHz */
+ /* DDR mode @1.2V I/O */
+#define EXT_CSD_CARD_TYPE_DDR_52 (EXT_CSD_CARD_TYPE_DDR_1_8V \
+ | EXT_CSD_CARD_TYPE_DDR_1_2V)
+#define EXT_CSD_CARD_TYPE_HS200_1_8V (1<<4) /* Card can run at 200MHz */
+#define EXT_CSD_CARD_TYPE_HS200_1_2V (1<<5) /* Card can run at 200MHz */
+ /* SDR mode @1.2V I/O */
+#define EXT_CSD_CARD_TYPE_HS200 (EXT_CSD_CARD_TYPE_HS200_1_8V | \
+ EXT_CSD_CARD_TYPE_HS200_1_2V)
+#define EXT_CSD_CARD_TYPE_HS400_1_8V (1<<6) /* Card can run at 200MHz DDR, 1.8V */
+#define EXT_CSD_CARD_TYPE_HS400_1_2V (1<<7) /* Card can run at 200MHz DDR, 1.2V */
+#define EXT_CSD_CARD_TYPE_HS400 (EXT_CSD_CARD_TYPE_HS400_1_8V | \
+ EXT_CSD_CARD_TYPE_HS400_1_2V)
+
+#define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */
+#define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */
+#define EXT_CSD_BUS_WIDTH_8 2 /* Card is in 8 bit mode */
+#define EXT_CSD_DDR_BUS_WIDTH_4 5 /* Card is in 4 bit DDR mode */
+#define EXT_CSD_DDR_BUS_WIDTH_8 6 /* Card is in 8 bit DDR mode */
+
+#define EXT_CSD_TIMING_BC 0 /* Backwards compatility */
+#define EXT_CSD_TIMING_HS 1 /* High speed */
+#define EXT_CSD_TIMING_HS200 2 /* HS200 */
+#define EXT_CSD_TIMING_HS400 3 /* HS400 */
+
+#define EXT_CSD_SEC_ER_EN BIT(0)
+#define EXT_CSD_SEC_BD_BLK_EN BIT(2)
+#define EXT_CSD_SEC_GB_CL_EN BIT(4)
+#define EXT_CSD_SEC_SANITIZE BIT(6) /* v4.5 only */
+
+#define EXT_CSD_RST_N_EN_MASK 0x3
+#define EXT_CSD_RST_N_ENABLED 1 /* RST_n is enabled on card */
+
+#define EXT_CSD_NO_POWER_NOTIFICATION 0
+#define EXT_CSD_POWER_ON 1
+#define EXT_CSD_POWER_OFF_SHORT 2
+#define EXT_CSD_POWER_OFF_LONG 3
+
+#define EXT_CSD_PWR_CL_8BIT_MASK 0xF0 /* 8 bit PWR CLS */
+#define EXT_CSD_PWR_CL_4BIT_MASK 0x0F /* 8 bit PWR CLS */
+#define EXT_CSD_PWR_CL_8BIT_SHIFT 4
+#define EXT_CSD_PWR_CL_4BIT_SHIFT 0
+
+#define EXT_CSD_PACKED_EVENT_EN BIT(3)
+
+/*
+ * EXCEPTION_EVENT_STATUS field
+ */
+#define EXT_CSD_URGENT_BKOPS BIT(0)
+#define EXT_CSD_DYNCAP_NEEDED BIT(1)
+#define EXT_CSD_SYSPOOL_EXHAUSTED BIT(2)
+#define EXT_CSD_PACKED_FAILURE BIT(3)
+
+#define EXT_CSD_PACKED_GENERIC_ERROR BIT(0)
+#define EXT_CSD_PACKED_INDEXED_ERROR BIT(1)
+
+/*
+ * BKOPS status level
+ */
+#define EXT_CSD_BKOPS_LEVEL_2 0x2
+
+/*
+ * BKOPS modes
+ */
+#define EXT_CSD_MANUAL_BKOPS_MASK 0x01
+
+/*
+ * MMC_SWITCH access modes
+ */
+
+#define MMC_SWITCH_MODE_CMD_SET 0x00 /* Change the command set */
+#define MMC_SWITCH_MODE_SET_BITS 0x01 /* Set bits which are 1 in value */
+#define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */
+#define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */
+
+#endif /* LINUX_MMC_MMC_H */
diff --git a/include/linux/mmc/pm.h b/include/linux/mmc/pm.h
new file mode 100644
index 000000000..4a139204c
--- /dev/null
+++ b/include/linux/mmc/pm.h
@@ -0,0 +1,30 @@
+/*
+ * linux/include/linux/mmc/pm.h
+ *
+ * Author: Nicolas Pitre
+ * Copyright: (C) 2009 Marvell Technology Group Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef LINUX_MMC_PM_H
+#define LINUX_MMC_PM_H
+
+/*
+ * These flags are used to describe power management features that
+ * some cards (typically SDIO cards) might wish to benefit from when
+ * the host system is being suspended. There are several layers of
+ * abstractions involved, from the host controller driver, to the MMC core
+ * code, to the SDIO core code, to finally get to the actual SDIO function
+ * driver. This file is therefore used for common definitions shared across
+ * all those layers.
+ */
+
+typedef unsigned int mmc_pm_flag_t;
+
+#define MMC_PM_KEEP_POWER (1 << 0) /* preserve card power during suspend */
+#define MMC_PM_WAKE_SDIO_IRQ (1 << 1) /* wake up host system on SDIO IRQ assertion */
+
+#endif /* LINUX_MMC_PM_H */
diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h
new file mode 100644
index 000000000..1ebcf9ba1
--- /dev/null
+++ b/include/linux/mmc/sd.h
@@ -0,0 +1,94 @@
+/*
+ * include/linux/mmc/sd.h
+ *
+ * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#ifndef LINUX_MMC_SD_H
+#define LINUX_MMC_SD_H
+
+/* SD commands type argument response */
+ /* class 0 */
+/* This is basically the same command as for MMC with some quirks. */
+#define SD_SEND_RELATIVE_ADDR 3 /* bcr R6 */
+#define SD_SEND_IF_COND 8 /* bcr [11:0] See below R7 */
+#define SD_SWITCH_VOLTAGE 11 /* ac R1 */
+
+ /* class 10 */
+#define SD_SWITCH 6 /* adtc [31:0] See below R1 */
+
+ /* class 5 */
+#define SD_ERASE_WR_BLK_START 32 /* ac [31:0] data addr R1 */
+#define SD_ERASE_WR_BLK_END 33 /* ac [31:0] data addr R1 */
+
+ /* Application commands */
+#define SD_APP_SET_BUS_WIDTH 6 /* ac [1:0] bus width R1 */
+#define SD_APP_SD_STATUS 13 /* adtc R1 */
+#define SD_APP_SEND_NUM_WR_BLKS 22 /* adtc R1 */
+#define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */
+#define SD_APP_SEND_SCR 51 /* adtc R1 */
+
+/* OCR bit definitions */
+#define SD_OCR_S18R (1 << 24) /* 1.8V switching request */
+#define SD_ROCR_S18A SD_OCR_S18R /* 1.8V switching accepted by card */
+#define SD_OCR_XPC (1 << 28) /* SDXC power control */
+#define SD_OCR_CCS (1 << 30) /* Card Capacity Status */
+
+/*
+ * SD_SWITCH argument format:
+ *
+ * [31] Check (0) or switch (1)
+ * [30:24] Reserved (0)
+ * [23:20] Function group 6
+ * [19:16] Function group 5
+ * [15:12] Function group 4
+ * [11:8] Function group 3
+ * [7:4] Function group 2
+ * [3:0] Function group 1
+ */
+
+/*
+ * SD_SEND_IF_COND argument format:
+ *
+ * [31:12] Reserved (0)
+ * [11:8] Host Voltage Supply Flags
+ * [7:0] Check Pattern (0xAA)
+ */
+
+/*
+ * SCR field definitions
+ */
+
+#define SCR_SPEC_VER_0 0 /* Implements system specification 1.0 - 1.01 */
+#define SCR_SPEC_VER_1 1 /* Implements system specification 1.10 */
+#define SCR_SPEC_VER_2 2 /* Implements system specification 2.00-3.0X */
+
+/*
+ * SD bus widths
+ */
+#define SD_BUS_WIDTH_1 0
+#define SD_BUS_WIDTH_4 2
+
+/*
+ * SD_SWITCH mode
+ */
+#define SD_SWITCH_CHECK 0
+#define SD_SWITCH_SET 1
+
+/*
+ * SD_SWITCH function groups
+ */
+#define SD_SWITCH_GRP_ACCESS 0
+
+/*
+ * SD_SWITCH access modes
+ */
+#define SD_SWITCH_ACCESS_DEF 0
+#define SD_SWITCH_ACCESS_HS 1
+
+#endif /* LINUX_MMC_SD_H */
diff --git a/include/linux/mmc/sdhci-pci-data.h b/include/linux/mmc/sdhci-pci-data.h
new file mode 100644
index 000000000..8959604a1
--- /dev/null
+++ b/include/linux/mmc/sdhci-pci-data.h
@@ -0,0 +1,18 @@
+#ifndef LINUX_MMC_SDHCI_PCI_DATA_H
+#define LINUX_MMC_SDHCI_PCI_DATA_H
+
+struct pci_dev;
+
+struct sdhci_pci_data {
+ struct pci_dev *pdev;
+ int slotno;
+ int rst_n_gpio; /* Set to -EINVAL if unused */
+ int cd_gpio; /* Set to -EINVAL if unused */
+ int (*setup)(struct sdhci_pci_data *data);
+ void (*cleanup)(struct sdhci_pci_data *data);
+};
+
+extern struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev,
+ int slotno);
+
+#endif
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
new file mode 100644
index 000000000..17446d3c3
--- /dev/null
+++ b/include/linux/mmc/sdio.h
@@ -0,0 +1,193 @@
+/*
+ * include/linux/mmc/sdio.h
+ *
+ * Copyright 2006-2007 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#ifndef LINUX_MMC_SDIO_H
+#define LINUX_MMC_SDIO_H
+
+/* SDIO commands type argument response */
+#define SD_IO_SEND_OP_COND 5 /* bcr [23:0] OCR R4 */
+#define SD_IO_RW_DIRECT 52 /* ac [31:0] See below R5 */
+#define SD_IO_RW_EXTENDED 53 /* adtc [31:0] See below R5 */
+
+/*
+ * SD_IO_RW_DIRECT argument format:
+ *
+ * [31] R/W flag
+ * [30:28] Function number
+ * [27] RAW flag
+ * [25:9] Register address
+ * [7:0] Data
+ */
+
+/*
+ * SD_IO_RW_EXTENDED argument format:
+ *
+ * [31] R/W flag
+ * [30:28] Function number
+ * [27] Block mode
+ * [26] Increment address
+ * [25:9] Register address
+ * [8:0] Byte/block count
+ */
+
+#define R4_18V_PRESENT (1<<24)
+#define R4_MEMORY_PRESENT (1 << 27)
+
+/*
+ SDIO status in R5
+ Type
+ e : error bit
+ s : status bit
+ r : detected and set for the actual command response
+ x : detected and set during command execution. the host must poll
+ the card by sending status command in order to read these bits.
+ Clear condition
+ a : according to the card state
+ b : always related to the previous command. Reception of
+ a valid command will clear it (with a delay of one command)
+ c : clear by read
+ */
+
+#define R5_COM_CRC_ERROR (1 << 15) /* er, b */
+#define R5_ILLEGAL_COMMAND (1 << 14) /* er, b */
+#define R5_ERROR (1 << 11) /* erx, c */
+#define R5_FUNCTION_NUMBER (1 << 9) /* er, c */
+#define R5_OUT_OF_RANGE (1 << 8) /* er, c */
+#define R5_STATUS(x) (x & 0xCB00)
+#define R5_IO_CURRENT_STATE(x) ((x & 0x3000) >> 12) /* s, b */
+
+/*
+ * Card Common Control Registers (CCCR)
+ */
+
+#define SDIO_CCCR_CCCR 0x00
+
+#define SDIO_CCCR_REV_1_00 0 /* CCCR/FBR Version 1.00 */
+#define SDIO_CCCR_REV_1_10 1 /* CCCR/FBR Version 1.10 */
+#define SDIO_CCCR_REV_1_20 2 /* CCCR/FBR Version 1.20 */
+#define SDIO_CCCR_REV_3_00 3 /* CCCR/FBR Version 3.00 */
+
+#define SDIO_SDIO_REV_1_00 0 /* SDIO Spec Version 1.00 */
+#define SDIO_SDIO_REV_1_10 1 /* SDIO Spec Version 1.10 */
+#define SDIO_SDIO_REV_1_20 2 /* SDIO Spec Version 1.20 */
+#define SDIO_SDIO_REV_2_00 3 /* SDIO Spec Version 2.00 */
+#define SDIO_SDIO_REV_3_00 4 /* SDIO Spec Version 3.00 */
+
+#define SDIO_CCCR_SD 0x01
+
+#define SDIO_SD_REV_1_01 0 /* SD Physical Spec Version 1.01 */
+#define SDIO_SD_REV_1_10 1 /* SD Physical Spec Version 1.10 */
+#define SDIO_SD_REV_2_00 2 /* SD Physical Spec Version 2.00 */
+#define SDIO_SD_REV_3_00 3 /* SD Physical Spev Version 3.00 */
+
+#define SDIO_CCCR_IOEx 0x02
+#define SDIO_CCCR_IORx 0x03
+
+#define SDIO_CCCR_IENx 0x04 /* Function/Master Interrupt Enable */
+#define SDIO_CCCR_INTx 0x05 /* Function Interrupt Pending */
+
+#define SDIO_CCCR_ABORT 0x06 /* function abort/card reset */
+
+#define SDIO_CCCR_IF 0x07 /* bus interface controls */
+
+#define SDIO_BUS_WIDTH_MASK 0x03 /* data bus width setting */
+#define SDIO_BUS_WIDTH_1BIT 0x00
+#define SDIO_BUS_WIDTH_RESERVED 0x01
+#define SDIO_BUS_WIDTH_4BIT 0x02
+#define SDIO_BUS_ECSI 0x20 /* Enable continuous SPI interrupt */
+#define SDIO_BUS_SCSI 0x40 /* Support continuous SPI interrupt */
+
+#define SDIO_BUS_ASYNC_INT 0x20
+
+#define SDIO_BUS_CD_DISABLE 0x80 /* disable pull-up on DAT3 (pin 1) */
+
+#define SDIO_CCCR_CAPS 0x08
+
+#define SDIO_CCCR_CAP_SDC 0x01 /* can do CMD52 while data transfer */
+#define SDIO_CCCR_CAP_SMB 0x02 /* can do multi-block xfers (CMD53) */
+#define SDIO_CCCR_CAP_SRW 0x04 /* supports read-wait protocol */
+#define SDIO_CCCR_CAP_SBS 0x08 /* supports suspend/resume */
+#define SDIO_CCCR_CAP_S4MI 0x10 /* interrupt during 4-bit CMD53 */
+#define SDIO_CCCR_CAP_E4MI 0x20 /* enable ints during 4-bit CMD53 */
+#define SDIO_CCCR_CAP_LSC 0x40 /* low speed card */
+#define SDIO_CCCR_CAP_4BLS 0x80 /* 4 bit low speed card */
+
+#define SDIO_CCCR_CIS 0x09 /* common CIS pointer (3 bytes) */
+
+/* Following 4 regs are valid only if SBS is set */
+#define SDIO_CCCR_SUSPEND 0x0c
+#define SDIO_CCCR_SELx 0x0d
+#define SDIO_CCCR_EXECx 0x0e
+#define SDIO_CCCR_READYx 0x0f
+
+#define SDIO_CCCR_BLKSIZE 0x10
+
+#define SDIO_CCCR_POWER 0x12
+
+#define SDIO_POWER_SMPC 0x01 /* Supports Master Power Control */
+#define SDIO_POWER_EMPC 0x02 /* Enable Master Power Control */
+
+#define SDIO_CCCR_SPEED 0x13
+
+#define SDIO_SPEED_SHS 0x01 /* Supports High-Speed mode */
+#define SDIO_SPEED_BSS_SHIFT 1
+#define SDIO_SPEED_BSS_MASK (7<<SDIO_SPEED_BSS_SHIFT)
+#define SDIO_SPEED_SDR12 (0<<SDIO_SPEED_BSS_SHIFT)
+#define SDIO_SPEED_SDR25 (1<<SDIO_SPEED_BSS_SHIFT)
+#define SDIO_SPEED_SDR50 (2<<SDIO_SPEED_BSS_SHIFT)
+#define SDIO_SPEED_SDR104 (3<<SDIO_SPEED_BSS_SHIFT)
+#define SDIO_SPEED_DDR50 (4<<SDIO_SPEED_BSS_SHIFT)
+#define SDIO_SPEED_EHS SDIO_SPEED_SDR25 /* Enable High-Speed */
+
+#define SDIO_CCCR_UHS 0x14
+#define SDIO_UHS_SDR50 0x01
+#define SDIO_UHS_SDR104 0x02
+#define SDIO_UHS_DDR50 0x04
+
+#define SDIO_CCCR_DRIVE_STRENGTH 0x15
+#define SDIO_SDTx_MASK 0x07
+#define SDIO_DRIVE_SDTA (1<<0)
+#define SDIO_DRIVE_SDTC (1<<1)
+#define SDIO_DRIVE_SDTD (1<<2)
+#define SDIO_DRIVE_DTSx_MASK 0x03
+#define SDIO_DRIVE_DTSx_SHIFT 4
+#define SDIO_DTSx_SET_TYPE_B (0 << SDIO_DRIVE_DTSx_SHIFT)
+#define SDIO_DTSx_SET_TYPE_A (1 << SDIO_DRIVE_DTSx_SHIFT)
+#define SDIO_DTSx_SET_TYPE_C (2 << SDIO_DRIVE_DTSx_SHIFT)
+#define SDIO_DTSx_SET_TYPE_D (3 << SDIO_DRIVE_DTSx_SHIFT)
+/*
+ * Function Basic Registers (FBR)
+ */
+
+#define SDIO_FBR_BASE(f) ((f) * 0x100) /* base of function f's FBRs */
+
+#define SDIO_FBR_STD_IF 0x00
+
+#define SDIO_FBR_SUPPORTS_CSA 0x40 /* supports Code Storage Area */
+#define SDIO_FBR_ENABLE_CSA 0x80 /* enable Code Storage Area */
+
+#define SDIO_FBR_STD_IF_EXT 0x01
+
+#define SDIO_FBR_POWER 0x02
+
+#define SDIO_FBR_POWER_SPS 0x01 /* Supports Power Selection */
+#define SDIO_FBR_POWER_EPS 0x02 /* Enable (low) Power Selection */
+
+#define SDIO_FBR_CIS 0x09 /* CIS pointer (3 bytes) */
+
+
+#define SDIO_FBR_CSA 0x0C /* CSA pointer (3 bytes) */
+
+#define SDIO_FBR_CSA_DATA 0x0F
+
+#define SDIO_FBR_BLKSIZE 0x10 /* block size (2 bytes) */
+
+#endif /* LINUX_MMC_SDIO_H */
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
new file mode 100644
index 000000000..aab032a6a
--- /dev/null
+++ b/include/linux/mmc/sdio_func.h
@@ -0,0 +1,162 @@
+/*
+ * include/linux/mmc/sdio_func.h
+ *
+ * Copyright 2007-2008 Pierre Ossman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#ifndef LINUX_MMC_SDIO_FUNC_H
+#define LINUX_MMC_SDIO_FUNC_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+#include <linux/mmc/pm.h>
+
+struct mmc_card;
+struct sdio_func;
+
+typedef void (sdio_irq_handler_t)(struct sdio_func *);
+
+/*
+ * SDIO function CIS tuple (unknown to the core)
+ */
+struct sdio_func_tuple {
+ struct sdio_func_tuple *next;
+ unsigned char code;
+ unsigned char size;
+ unsigned char data[0];
+};
+
+/*
+ * SDIO function devices
+ */
+struct sdio_func {
+ struct mmc_card *card; /* the card this device belongs to */
+ struct device dev; /* the device */
+ sdio_irq_handler_t *irq_handler; /* IRQ callback */
+ unsigned int num; /* function number */
+
+ unsigned char class; /* standard interface class */
+ unsigned short vendor; /* vendor id */
+ unsigned short device; /* device id */
+
+ unsigned max_blksize; /* maximum block size */
+ unsigned cur_blksize; /* current block size */
+
+ unsigned enable_timeout; /* max enable timeout in msec */
+
+ unsigned int state; /* function state */
+#define SDIO_STATE_PRESENT (1<<0) /* present in sysfs */
+
+ u8 tmpbuf[4]; /* DMA:able scratch buffer */
+
+ unsigned num_info; /* number of info strings */
+ const char **info; /* info strings */
+
+ struct sdio_func_tuple *tuples;
+};
+
+#define sdio_func_present(f) ((f)->state & SDIO_STATE_PRESENT)
+
+#define sdio_func_set_present(f) ((f)->state |= SDIO_STATE_PRESENT)
+
+#define sdio_func_id(f) (dev_name(&(f)->dev))
+
+#define sdio_get_drvdata(f) dev_get_drvdata(&(f)->dev)
+#define sdio_set_drvdata(f,d) dev_set_drvdata(&(f)->dev, d)
+#define dev_to_sdio_func(d) container_of(d, struct sdio_func, dev)
+
+/*
+ * SDIO function device driver
+ */
+struct sdio_driver {
+ char *name;
+ const struct sdio_device_id *id_table;
+
+ int (*probe)(struct sdio_func *, const struct sdio_device_id *);
+ void (*remove)(struct sdio_func *);
+
+ struct device_driver drv;
+};
+
+/**
+ * SDIO_DEVICE - macro used to describe a specific SDIO device
+ * @vend: the 16 bit manufacturer code
+ * @dev: the 16 bit function id
+ *
+ * This macro is used to create a struct sdio_device_id that matches a
+ * specific device. The class field will be set to SDIO_ANY_ID.
+ */
+#define SDIO_DEVICE(vend,dev) \
+ .class = SDIO_ANY_ID, \
+ .vendor = (vend), .device = (dev)
+
+/**
+ * SDIO_DEVICE_CLASS - macro used to describe a specific SDIO device class
+ * @dev_class: the 8 bit standard interface code
+ *
+ * This macro is used to create a struct sdio_device_id that matches a
+ * specific standard SDIO function type. The vendor and device fields will
+ * be set to SDIO_ANY_ID.
+ */
+#define SDIO_DEVICE_CLASS(dev_class) \
+ .class = (dev_class), \
+ .vendor = SDIO_ANY_ID, .device = SDIO_ANY_ID
+
+extern int sdio_register_driver(struct sdio_driver *);
+extern void sdio_unregister_driver(struct sdio_driver *);
+
+/*
+ * SDIO I/O operations
+ */
+extern void sdio_claim_host(struct sdio_func *func);
+extern void sdio_release_host(struct sdio_func *func);
+
+extern int sdio_enable_func(struct sdio_func *func);
+extern int sdio_disable_func(struct sdio_func *func);
+
+extern int sdio_set_block_size(struct sdio_func *func, unsigned blksz);
+
+extern int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler);
+extern int sdio_release_irq(struct sdio_func *func);
+
+extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz);
+
+extern u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret);
+extern u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret);
+extern u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret);
+
+extern int sdio_memcpy_fromio(struct sdio_func *func, void *dst,
+ unsigned int addr, int count);
+extern int sdio_readsb(struct sdio_func *func, void *dst,
+ unsigned int addr, int count);
+
+extern void sdio_writeb(struct sdio_func *func, u8 b,
+ unsigned int addr, int *err_ret);
+extern void sdio_writew(struct sdio_func *func, u16 b,
+ unsigned int addr, int *err_ret);
+extern void sdio_writel(struct sdio_func *func, u32 b,
+ unsigned int addr, int *err_ret);
+
+extern u8 sdio_writeb_readb(struct sdio_func *func, u8 write_byte,
+ unsigned int addr, int *err_ret);
+
+extern int sdio_memcpy_toio(struct sdio_func *func, unsigned int addr,
+ void *src, int count);
+extern int sdio_writesb(struct sdio_func *func, unsigned int addr,
+ void *src, int count);
+
+extern unsigned char sdio_f0_readb(struct sdio_func *func,
+ unsigned int addr, int *err_ret);
+extern void sdio_f0_writeb(struct sdio_func *func, unsigned char b,
+ unsigned int addr, int *err_ret);
+
+extern mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func);
+extern int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags);
+
+#endif /* LINUX_MMC_SDIO_FUNC_H */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
new file mode 100644
index 000000000..83430f2ea
--- /dev/null
+++ b/include/linux/mmc/sdio_ids.h
@@ -0,0 +1,61 @@
+/*
+ * SDIO Classes, Interface Types, Manufacturer IDs, etc.
+ */
+
+#ifndef LINUX_MMC_SDIO_IDS_H
+#define LINUX_MMC_SDIO_IDS_H
+
+/*
+ * Standard SDIO Function Interfaces
+ */
+
+#define SDIO_CLASS_NONE 0x00 /* Not a SDIO standard interface */
+#define SDIO_CLASS_UART 0x01 /* standard UART interface */
+#define SDIO_CLASS_BT_A 0x02 /* Type-A BlueTooth std interface */
+#define SDIO_CLASS_BT_B 0x03 /* Type-B BlueTooth std interface */
+#define SDIO_CLASS_GPS 0x04 /* GPS standard interface */
+#define SDIO_CLASS_CAMERA 0x05 /* Camera standard interface */
+#define SDIO_CLASS_PHS 0x06 /* PHS standard interface */
+#define SDIO_CLASS_WLAN 0x07 /* WLAN interface */
+#define SDIO_CLASS_ATA 0x08 /* Embedded SDIO-ATA std interface */
+#define SDIO_CLASS_BT_AMP 0x09 /* Type-A Bluetooth AMP interface */
+
+/*
+ * Vendors and devices. Sort key: vendor first, device next.
+ */
+#define SDIO_VENDOR_ID_BROADCOM 0x02d0
+#define SDIO_DEVICE_ID_BROADCOM_43143 0xa887
+#define SDIO_DEVICE_ID_BROADCOM_43241 0x4324
+#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
+#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
+#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
+#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c
+#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
+#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
+#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
+#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
+#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
+#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
+
+#define SDIO_VENDOR_ID_INTEL 0x0089
+#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402
+#define SDIO_DEVICE_ID_INTEL_IWMC3200WIFI 0x1403
+#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404
+#define SDIO_DEVICE_ID_INTEL_IWMC3200GPS 0x1405
+#define SDIO_DEVICE_ID_INTEL_IWMC3200BT 0x1406
+#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5 0x1407
+
+#define SDIO_VENDOR_ID_MARVELL 0x02df
+#define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103
+#define SDIO_DEVICE_ID_MARVELL_8688WLAN 0x9104
+#define SDIO_DEVICE_ID_MARVELL_8688BT 0x9105
+
+#define SDIO_VENDOR_ID_SIANO 0x039a
+#define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201
+#define SDIO_DEVICE_ID_SIANO_NICE 0x0202
+#define SDIO_DEVICE_ID_SIANO_VEGA_A0 0x0300
+#define SDIO_DEVICE_ID_SIANO_VENICE 0x0301
+#define SDIO_DEVICE_ID_SIANO_NOVA_A0 0x1100
+#define SDIO_DEVICE_ID_SIANO_STELLAR 0x5347
+
+#endif /* LINUX_MMC_SDIO_IDS_H */
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h
new file mode 100644
index 000000000..ccd8fb2ca
--- /dev/null
+++ b/include/linux/mmc/sh_mmcif.h
@@ -0,0 +1,218 @@
+/*
+ * include/linux/mmc/sh_mmcif.h
+ *
+ * platform data for eMMC driver
+ *
+ * Copyright (C) 2010 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ */
+
+#ifndef LINUX_MMC_SH_MMCIF_H
+#define LINUX_MMC_SH_MMCIF_H
+
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+/*
+ * MMCIF : CE_CLK_CTRL [19:16]
+ * 1000 : Peripheral clock / 512
+ * 0111 : Peripheral clock / 256
+ * 0110 : Peripheral clock / 128
+ * 0101 : Peripheral clock / 64
+ * 0100 : Peripheral clock / 32
+ * 0011 : Peripheral clock / 16
+ * 0010 : Peripheral clock / 8
+ * 0001 : Peripheral clock / 4
+ * 0000 : Peripheral clock / 2
+ * 1111 : Peripheral clock (sup_pclk set '1')
+ */
+
+struct sh_mmcif_plat_data {
+ int (*get_cd)(struct platform_device *pdef);
+ unsigned int slave_id_tx; /* embedded slave_id_[tr]x */
+ unsigned int slave_id_rx;
+ bool use_cd_gpio : 1;
+ bool ccs_unsupported : 1;
+ bool clk_ctrl2_present : 1;
+ unsigned int cd_gpio;
+ u8 sup_pclk; /* 1 :SH7757, 0: SH7724/SH7372 */
+ unsigned long caps;
+ u32 ocr;
+};
+
+#define MMCIF_CE_CMD_SET 0x00000000
+#define MMCIF_CE_ARG 0x00000008
+#define MMCIF_CE_ARG_CMD12 0x0000000C
+#define MMCIF_CE_CMD_CTRL 0x00000010
+#define MMCIF_CE_BLOCK_SET 0x00000014
+#define MMCIF_CE_CLK_CTRL 0x00000018
+#define MMCIF_CE_BUF_ACC 0x0000001C
+#define MMCIF_CE_RESP3 0x00000020
+#define MMCIF_CE_RESP2 0x00000024
+#define MMCIF_CE_RESP1 0x00000028
+#define MMCIF_CE_RESP0 0x0000002C
+#define MMCIF_CE_RESP_CMD12 0x00000030
+#define MMCIF_CE_DATA 0x00000034
+#define MMCIF_CE_INT 0x00000040
+#define MMCIF_CE_INT_MASK 0x00000044
+#define MMCIF_CE_HOST_STS1 0x00000048
+#define MMCIF_CE_HOST_STS2 0x0000004C
+#define MMCIF_CE_CLK_CTRL2 0x00000070
+#define MMCIF_CE_VERSION 0x0000007C
+
+/* CE_BUF_ACC */
+#define BUF_ACC_DMAWEN (1 << 25)
+#define BUF_ACC_DMAREN (1 << 24)
+#define BUF_ACC_BUSW_32 (0 << 17)
+#define BUF_ACC_BUSW_16 (1 << 17)
+#define BUF_ACC_ATYP (1 << 16)
+
+/* CE_CLK_CTRL */
+#define CLK_ENABLE (1 << 24) /* 1: output mmc clock */
+#define CLK_CLEAR (0xf << 16)
+#define CLK_SUP_PCLK (0xf << 16)
+#define CLKDIV_4 (1 << 16) /* mmc clock frequency.
+ * n: bus clock/(2^(n+1)) */
+#define CLKDIV_256 (7 << 16) /* mmc clock frequency. (see above) */
+#define SRSPTO_256 (2 << 12) /* resp timeout */
+#define SRBSYTO_29 (0xf << 8) /* resp busy timeout */
+#define SRWDTO_29 (0xf << 4) /* read/write timeout */
+#define SCCSTO_29 (0xf << 0) /* ccs timeout */
+
+/* CE_VERSION */
+#define SOFT_RST_ON (1 << 31)
+#define SOFT_RST_OFF 0
+
+static inline u32 sh_mmcif_readl(void __iomem *addr, int reg)
+{
+ return __raw_readl(addr + reg);
+}
+
+static inline void sh_mmcif_writel(void __iomem *addr, int reg, u32 val)
+{
+ __raw_writel(val, addr + reg);
+}
+
+#define SH_MMCIF_BBS 512 /* boot block size */
+
+static inline void sh_mmcif_boot_cmd_send(void __iomem *base,
+ unsigned long cmd, unsigned long arg)
+{
+ sh_mmcif_writel(base, MMCIF_CE_INT, 0);
+ sh_mmcif_writel(base, MMCIF_CE_ARG, arg);
+ sh_mmcif_writel(base, MMCIF_CE_CMD_SET, cmd);
+}
+
+static inline int sh_mmcif_boot_cmd_poll(void __iomem *base, unsigned long mask)
+{
+ unsigned long tmp;
+ int cnt;
+
+ for (cnt = 0; cnt < 1000000; cnt++) {
+ tmp = sh_mmcif_readl(base, MMCIF_CE_INT);
+ if (tmp & mask) {
+ sh_mmcif_writel(base, MMCIF_CE_INT, tmp & ~mask);
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+static inline int sh_mmcif_boot_cmd(void __iomem *base,
+ unsigned long cmd, unsigned long arg)
+{
+ sh_mmcif_boot_cmd_send(base, cmd, arg);
+ return sh_mmcif_boot_cmd_poll(base, 0x00010000);
+}
+
+static inline int sh_mmcif_boot_do_read_single(void __iomem *base,
+ unsigned int block_nr,
+ unsigned long *buf)
+{
+ int k;
+
+ /* CMD13 - Status */
+ sh_mmcif_boot_cmd(base, 0x0d400000, 0x00010000);
+
+ if (sh_mmcif_readl(base, MMCIF_CE_RESP0) != 0x0900)
+ return -1;
+
+ /* CMD17 - Read */
+ sh_mmcif_boot_cmd(base, 0x11480000, block_nr * SH_MMCIF_BBS);
+ if (sh_mmcif_boot_cmd_poll(base, 0x00100000) < 0)
+ return -1;
+
+ for (k = 0; k < (SH_MMCIF_BBS / 4); k++)
+ buf[k] = sh_mmcif_readl(base, MMCIF_CE_DATA);
+
+ return 0;
+}
+
+static inline int sh_mmcif_boot_do_read(void __iomem *base,
+ unsigned long first_block,
+ unsigned long nr_blocks,
+ void *buf)
+{
+ unsigned long k;
+ int ret = 0;
+
+ /* In data transfer mode: Set clock to Bus clock/4 (about 20Mhz) */
+ sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL,
+ CLK_ENABLE | CLKDIV_4 | SRSPTO_256 |
+ SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
+
+ /* CMD9 - Get CSD */
+ sh_mmcif_boot_cmd(base, 0x09806000, 0x00010000);
+
+ /* CMD7 - Select the card */
+ sh_mmcif_boot_cmd(base, 0x07400000, 0x00010000);
+
+ /* CMD16 - Set the block size */
+ sh_mmcif_boot_cmd(base, 0x10400000, SH_MMCIF_BBS);
+
+ for (k = 0; !ret && k < nr_blocks; k++)
+ ret = sh_mmcif_boot_do_read_single(base, first_block + k,
+ buf + (k * SH_MMCIF_BBS));
+
+ return ret;
+}
+
+static inline void sh_mmcif_boot_init(void __iomem *base)
+{
+ /* reset */
+ sh_mmcif_writel(base, MMCIF_CE_VERSION, SOFT_RST_ON);
+ sh_mmcif_writel(base, MMCIF_CE_VERSION, SOFT_RST_OFF);
+
+ /* byte swap */
+ sh_mmcif_writel(base, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
+
+ /* Set block size in MMCIF hardware */
+ sh_mmcif_writel(base, MMCIF_CE_BLOCK_SET, SH_MMCIF_BBS);
+
+ /* Enable the clock, set it to Bus clock/256 (about 325Khz). */
+ sh_mmcif_writel(base, MMCIF_CE_CLK_CTRL,
+ CLK_ENABLE | CLKDIV_256 | SRSPTO_256 |
+ SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
+
+ /* CMD0 */
+ sh_mmcif_boot_cmd(base, 0x00000040, 0);
+
+ /* CMD1 - Get OCR */
+ do {
+ sh_mmcif_boot_cmd(base, 0x01405040, 0x40300000); /* CMD1 */
+ } while ((sh_mmcif_readl(base, MMCIF_CE_RESP0) & 0x80000000)
+ != 0x80000000);
+
+ /* CMD2 - Get CID */
+ sh_mmcif_boot_cmd(base, 0x02806040, 0);
+
+ /* CMD3 - Set card relative address */
+ sh_mmcif_boot_cmd(base, 0x03400040, 0x00010000);
+}
+
+#endif /* LINUX_MMC_SH_MMCIF_H */
diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h
new file mode 100644
index 000000000..95d6f0314
--- /dev/null
+++ b/include/linux/mmc/sh_mobile_sdhi.h
@@ -0,0 +1,10 @@
+#ifndef LINUX_MMC_SH_MOBILE_SDHI_H
+#define LINUX_MMC_SH_MOBILE_SDHI_H
+
+#include <linux/types.h>
+
+#define SH_MOBILE_SDHI_IRQ_CARD_DETECT "card_detect"
+#define SH_MOBILE_SDHI_IRQ_SDCARD "sdcard"
+#define SH_MOBILE_SDHI_IRQ_SDIO "sdio"
+
+#endif /* LINUX_MMC_SH_MOBILE_SDHI_H */
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
new file mode 100644
index 000000000..3945a8c9d
--- /dev/null
+++ b/include/linux/mmc/slot-gpio.h
@@ -0,0 +1,33 @@
+/*
+ * Generic GPIO card-detect helper header
+ *
+ * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MMC_SLOT_GPIO_H
+#define MMC_SLOT_GPIO_H
+
+struct mmc_host;
+
+int mmc_gpio_get_ro(struct mmc_host *host);
+int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio);
+
+int mmc_gpio_get_cd(struct mmc_host *host);
+int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
+ unsigned int debounce);
+
+int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
+ unsigned int idx, bool override_active_level,
+ unsigned int debounce, bool *gpio_invert);
+int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
+ unsigned int idx, bool override_active_level,
+ unsigned int debounce, bool *gpio_invert);
+void mmc_gpio_set_cd_isr(struct mmc_host *host,
+ irqreturn_t (*isr)(int irq, void *dev_id));
+void mmc_gpiod_request_cd_irq(struct mmc_host *host);
+
+#endif
diff --git a/include/linux/mmc/tmio.h b/include/linux/mmc/tmio.h
new file mode 100644
index 000000000..84d9053b5
--- /dev/null
+++ b/include/linux/mmc/tmio.h
@@ -0,0 +1,66 @@
+/*
+ * include/linux/mmc/tmio.h
+ *
+ * Copyright (C) 2007 Ian Molton
+ * Copyright (C) 2004 Ian Molton
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Driver for the MMC / SD / SDIO cell found in:
+ *
+ * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
+ */
+#ifndef LINUX_MMC_TMIO_H
+#define LINUX_MMC_TMIO_H
+
+#define CTL_SD_CMD 0x00
+#define CTL_ARG_REG 0x04
+#define CTL_STOP_INTERNAL_ACTION 0x08
+#define CTL_XFER_BLK_COUNT 0xa
+#define CTL_RESPONSE 0x0c
+#define CTL_STATUS 0x1c
+#define CTL_STATUS2 0x1e
+#define CTL_IRQ_MASK 0x20
+#define CTL_SD_CARD_CLK_CTL 0x24
+#define CTL_SD_XFER_LEN 0x26
+#define CTL_SD_MEM_CARD_OPT 0x28
+#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
+#define CTL_SD_DATA_PORT 0x30
+#define CTL_TRANSACTION_CTL 0x34
+#define CTL_SDIO_STATUS 0x36
+#define CTL_SDIO_IRQ_MASK 0x38
+#define CTL_DMA_ENABLE 0xd8
+#define CTL_RESET_SD 0xe0
+#define CTL_VERSION 0xe2
+#define CTL_SDIO_REGS 0x100
+#define CTL_CLK_AND_WAIT_CTL 0x138
+#define CTL_RESET_SDIO 0x1e0
+
+/* Definitions for values the CTRL_STATUS register can take. */
+#define TMIO_STAT_CMDRESPEND 0x00000001
+#define TMIO_STAT_DATAEND 0x00000004
+#define TMIO_STAT_CARD_REMOVE 0x00000008
+#define TMIO_STAT_CARD_INSERT 0x00000010
+#define TMIO_STAT_SIGSTATE 0x00000020
+#define TMIO_STAT_WRPROTECT 0x00000080
+#define TMIO_STAT_CARD_REMOVE_A 0x00000100
+#define TMIO_STAT_CARD_INSERT_A 0x00000200
+#define TMIO_STAT_SIGSTATE_A 0x00000400
+#define TMIO_STAT_CMD_IDX_ERR 0x00010000
+#define TMIO_STAT_CRCFAIL 0x00020000
+#define TMIO_STAT_STOPBIT_ERR 0x00040000
+#define TMIO_STAT_DATATIMEOUT 0x00080000
+#define TMIO_STAT_RXOVERFLOW 0x00100000
+#define TMIO_STAT_TXUNDERRUN 0x00200000
+#define TMIO_STAT_CMDTIMEOUT 0x00400000
+#define TMIO_STAT_RXRDY 0x01000000
+#define TMIO_STAT_TXRQ 0x02000000
+#define TMIO_STAT_ILL_FUNC 0x20000000
+#define TMIO_STAT_CMD_BUSY 0x40000000
+#define TMIO_STAT_ILL_ACCESS 0x80000000
+
+#define TMIO_BBS 512 /* Boot block size */
+
+#endif /* LINUX_MMC_TMIO_H */
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
new file mode 100644
index 000000000..877ef226f
--- /dev/null
+++ b/include/linux/mmdebug.h
@@ -0,0 +1,58 @@
+#ifndef LINUX_MM_DEBUG_H
+#define LINUX_MM_DEBUG_H 1
+
+#include <linux/stringify.h>
+
+struct page;
+struct vm_area_struct;
+struct mm_struct;
+
+extern void dump_page(struct page *page, const char *reason);
+extern void dump_page_badflags(struct page *page, const char *reason,
+ unsigned long badflags);
+void dump_vma(const struct vm_area_struct *vma);
+void dump_mm(const struct mm_struct *mm);
+
+#ifdef CONFIG_DEBUG_VM
+#define VM_BUG_ON(cond) BUG_ON(cond)
+#define VM_BUG_ON_PAGE(cond, page) \
+ do { \
+ if (unlikely(cond)) { \
+ dump_page(page, "VM_BUG_ON_PAGE(" __stringify(cond)")");\
+ BUG(); \
+ } \
+ } while (0)
+#define VM_BUG_ON_VMA(cond, vma) \
+ do { \
+ if (unlikely(cond)) { \
+ dump_vma(vma); \
+ BUG(); \
+ } \
+ } while (0)
+#define VM_BUG_ON_MM(cond, mm) \
+ do { \
+ if (unlikely(cond)) { \
+ dump_mm(mm); \
+ BUG(); \
+ } \
+ } while (0)
+#define VM_WARN_ON(cond) WARN_ON(cond)
+#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond)
+#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format)
+#else
+#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
+#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
+#define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond)
+#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond)
+#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
+#endif
+
+#ifdef CONFIG_DEBUG_VIRTUAL
+#define VIRTUAL_BUG_ON(cond) BUG_ON(cond)
+#else
+#define VIRTUAL_BUG_ON(cond) do { } while (0)
+#endif
+
+#endif
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
new file mode 100644
index 000000000..c5d52780d
--- /dev/null
+++ b/include/linux/mmiotrace.h
@@ -0,0 +1,111 @@
+#ifndef _LINUX_MMIOTRACE_H
+#define _LINUX_MMIOTRACE_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+struct kmmio_probe;
+struct pt_regs;
+
+typedef void (*kmmio_pre_handler_t)(struct kmmio_probe *,
+ struct pt_regs *, unsigned long addr);
+typedef void (*kmmio_post_handler_t)(struct kmmio_probe *,
+ unsigned long condition, struct pt_regs *);
+
+struct kmmio_probe {
+ /* kmmio internal list: */
+ struct list_head list;
+ /* start location of the probe point: */
+ unsigned long addr;
+ /* length of the probe region: */
+ unsigned long len;
+ /* Called before addr is executed: */
+ kmmio_pre_handler_t pre_handler;
+ /* Called after addr is executed: */
+ kmmio_post_handler_t post_handler;
+ void *private;
+};
+
+extern unsigned int kmmio_count;
+
+extern int register_kmmio_probe(struct kmmio_probe *p);
+extern void unregister_kmmio_probe(struct kmmio_probe *p);
+extern int kmmio_init(void);
+extern void kmmio_cleanup(void);
+
+#ifdef CONFIG_MMIOTRACE
+/* kmmio is active by some kmmio_probes? */
+static inline int is_kmmio_active(void)
+{
+ return kmmio_count;
+}
+
+/* Called from page fault handler. */
+extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
+
+/* Called from ioremap.c */
+extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
+ void __iomem *addr);
+extern void mmiotrace_iounmap(volatile void __iomem *addr);
+
+/* For anyone to insert markers. Remember trailing newline. */
+extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
+#else /* !CONFIG_MMIOTRACE: */
+static inline int is_kmmio_active(void)
+{
+ return 0;
+}
+
+static inline int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+{
+ return 0;
+}
+
+static inline void mmiotrace_ioremap(resource_size_t offset,
+ unsigned long size, void __iomem *addr)
+{
+}
+
+static inline void mmiotrace_iounmap(volatile void __iomem *addr)
+{
+}
+
+static inline __printf(1, 2) int mmiotrace_printk(const char *fmt, ...)
+{
+ return 0;
+}
+#endif /* CONFIG_MMIOTRACE */
+
+enum mm_io_opcode {
+ MMIO_READ = 0x1, /* struct mmiotrace_rw */
+ MMIO_WRITE = 0x2, /* struct mmiotrace_rw */
+ MMIO_PROBE = 0x3, /* struct mmiotrace_map */
+ MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */
+ MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */
+};
+
+struct mmiotrace_rw {
+ resource_size_t phys; /* PCI address of register */
+ unsigned long value;
+ unsigned long pc; /* optional program counter */
+ int map_id;
+ unsigned char opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */
+ unsigned char width; /* size of register access in bytes */
+};
+
+struct mmiotrace_map {
+ resource_size_t phys; /* base address in PCI space */
+ unsigned long virt; /* base virtual address */
+ unsigned long len; /* mapping size */
+ int map_id;
+ unsigned char opcode; /* MMIO_PROBE or MMIO_UNPROBE */
+};
+
+/* in kernel/trace/trace_mmiotrace.c */
+extern void enable_mmiotrace(void);
+extern void disable_mmiotrace(void);
+extern void mmio_trace_rw(struct mmiotrace_rw *rw);
+extern void mmio_trace_mapping(struct mmiotrace_map *map);
+extern int mmio_trace_printk(const char *fmt, va_list args);
+
+#endif /* _LINUX_MMIOTRACE_H */
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
new file mode 100644
index 000000000..70fffeba7
--- /dev/null
+++ b/include/linux/mmu_context.h
@@ -0,0 +1,9 @@
+#ifndef _LINUX_MMU_CONTEXT_H
+#define _LINUX_MMU_CONTEXT_H
+
+struct mm_struct;
+
+void use_mm(struct mm_struct *mm);
+void unuse_mm(struct mm_struct *mm);
+
+#endif
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
new file mode 100644
index 000000000..95243d28a
--- /dev/null
+++ b/include/linux/mmu_notifier.h
@@ -0,0 +1,437 @@
+#ifndef _LINUX_MMU_NOTIFIER_H
+#define _LINUX_MMU_NOTIFIER_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/mm_types.h>
+#include <linux/srcu.h>
+
+struct mmu_notifier;
+struct mmu_notifier_ops;
+
+#ifdef CONFIG_MMU_NOTIFIER
+
+/*
+ * The mmu notifier_mm structure is allocated and installed in
+ * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
+ * critical section and it's released only when mm_count reaches zero
+ * in mmdrop().
+ */
+struct mmu_notifier_mm {
+ /* all mmu notifiers registerd in this mm are queued in this list */
+ struct hlist_head list;
+ /* to serialize the list modifications and hlist_unhashed */
+ spinlock_t lock;
+};
+
+struct mmu_notifier_ops {
+ /*
+ * Called either by mmu_notifier_unregister or when the mm is
+ * being destroyed by exit_mmap, always before all pages are
+ * freed. This can run concurrently with other mmu notifier
+ * methods (the ones invoked outside the mm context) and it
+ * should tear down all secondary mmu mappings and freeze the
+ * secondary mmu. If this method isn't implemented you've to
+ * be sure that nothing could possibly write to the pages
+ * through the secondary mmu by the time the last thread with
+ * tsk->mm == mm exits.
+ *
+ * As side note: the pages freed after ->release returns could
+ * be immediately reallocated by the gart at an alias physical
+ * address with a different cache model, so if ->release isn't
+ * implemented because all _software_ driven memory accesses
+ * through the secondary mmu are terminated by the time the
+ * last thread of this mm quits, you've also to be sure that
+ * speculative _hardware_ operations can't allocate dirty
+ * cachelines in the cpu that could not be snooped and made
+ * coherent with the other read and write operations happening
+ * through the gart alias address, so leading to memory
+ * corruption.
+ */
+ void (*release)(struct mmu_notifier *mn,
+ struct mm_struct *mm);
+
+ /*
+ * clear_flush_young is called after the VM is
+ * test-and-clearing the young/accessed bitflag in the
+ * pte. This way the VM will provide proper aging to the
+ * accesses to the page through the secondary MMUs and not
+ * only to the ones through the Linux pte.
+ * Start-end is necessary in case the secondary MMU is mapping the page
+ * at a smaller granularity than the primary MMU.
+ */
+ int (*clear_flush_young)(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
+
+ /*
+ * test_young is called to check the young/accessed bitflag in
+ * the secondary pte. This is used to know if the page is
+ * frequently used without actually clearing the flag or tearing
+ * down the secondary mapping on the page.
+ */
+ int (*test_young)(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address);
+
+ /*
+ * change_pte is called in cases that pte mapping to page is changed:
+ * for example, when ksm remaps pte to point to a new shared page.
+ */
+ void (*change_pte)(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address,
+ pte_t pte);
+
+ /*
+ * Before this is invoked any secondary MMU is still ok to
+ * read/write to the page previously pointed to by the Linux
+ * pte because the page hasn't been freed yet and it won't be
+ * freed until this returns. If required set_page_dirty has to
+ * be called internally to this method.
+ */
+ void (*invalidate_page)(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address);
+
+ /*
+ * invalidate_range_start() and invalidate_range_end() must be
+ * paired and are called only when the mmap_sem and/or the
+ * locks protecting the reverse maps are held. If the subsystem
+ * can't guarantee that no additional references are taken to
+ * the pages in the range, it has to implement the
+ * invalidate_range() notifier to remove any references taken
+ * after invalidate_range_start().
+ *
+ * Invalidation of multiple concurrent ranges may be
+ * optionally permitted by the driver. Either way the
+ * establishment of sptes is forbidden in the range passed to
+ * invalidate_range_begin/end for the whole duration of the
+ * invalidate_range_begin/end critical section.
+ *
+ * invalidate_range_start() is called when all pages in the
+ * range are still mapped and have at least a refcount of one.
+ *
+ * invalidate_range_end() is called when all pages in the
+ * range have been unmapped and the pages have been freed by
+ * the VM.
+ *
+ * The VM will remove the page table entries and potentially
+ * the page between invalidate_range_start() and
+ * invalidate_range_end(). If the page must not be freed
+ * because of pending I/O or other circumstances then the
+ * invalidate_range_start() callback (or the initial mapping
+ * by the driver) must make sure that the refcount is kept
+ * elevated.
+ *
+ * If the driver increases the refcount when the pages are
+ * initially mapped into an address space then either
+ * invalidate_range_start() or invalidate_range_end() may
+ * decrease the refcount. If the refcount is decreased on
+ * invalidate_range_start() then the VM can free pages as page
+ * table entries are removed. If the refcount is only
+ * droppped on invalidate_range_end() then the driver itself
+ * will drop the last refcount but it must take care to flush
+ * any secondary tlb before doing the final free on the
+ * page. Pages will no longer be referenced by the linux
+ * address space but may still be referenced by sptes until
+ * the last refcount is dropped.
+ */
+ void (*invalidate_range_start)(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+ void (*invalidate_range_end)(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+
+ /*
+ * invalidate_range() is either called between
+ * invalidate_range_start() and invalidate_range_end() when the
+ * VM has to free pages that where unmapped, but before the
+ * pages are actually freed, or outside of _start()/_end() when
+ * a (remote) TLB is necessary.
+ *
+ * If invalidate_range() is used to manage a non-CPU TLB with
+ * shared page-tables, it not necessary to implement the
+ * invalidate_range_start()/end() notifiers, as
+ * invalidate_range() alread catches the points in time when an
+ * external TLB range needs to be flushed.
+ *
+ * The invalidate_range() function is called under the ptl
+ * spin-lock and not allowed to sleep.
+ *
+ * Note that this function might be called with just a sub-range
+ * of what was passed to invalidate_range_start()/end(), if
+ * called between those functions.
+ */
+ void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+};
+
+/*
+ * The notifier chains are protected by mmap_sem and/or the reverse map
+ * semaphores. Notifier chains are only changed when all reverse maps and
+ * the mmap_sem locks are taken.
+ *
+ * Therefore notifier chains can only be traversed when either
+ *
+ * 1. mmap_sem is held.
+ * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
+ * 3. No other concurrent thread can access the list (release)
+ */
+struct mmu_notifier {
+ struct hlist_node hlist;
+ const struct mmu_notifier_ops *ops;
+};
+
+static inline int mm_has_notifiers(struct mm_struct *mm)
+{
+ return unlikely(mm->mmu_notifier_mm);
+}
+
+extern int mmu_notifier_register(struct mmu_notifier *mn,
+ struct mm_struct *mm);
+extern int __mmu_notifier_register(struct mmu_notifier *mn,
+ struct mm_struct *mm);
+extern void mmu_notifier_unregister(struct mmu_notifier *mn,
+ struct mm_struct *mm);
+extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
+ struct mm_struct *mm);
+extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
+extern void __mmu_notifier_release(struct mm_struct *mm);
+extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
+extern int __mmu_notifier_test_young(struct mm_struct *mm,
+ unsigned long address);
+extern void __mmu_notifier_change_pte(struct mm_struct *mm,
+ unsigned long address, pte_t pte);
+extern void __mmu_notifier_invalidate_page(struct mm_struct *mm,
+ unsigned long address);
+extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+
+static inline void mmu_notifier_release(struct mm_struct *mm)
+{
+ if (mm_has_notifiers(mm))
+ __mmu_notifier_release(mm);
+}
+
+static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ if (mm_has_notifiers(mm))
+ return __mmu_notifier_clear_flush_young(mm, start, end);
+ return 0;
+}
+
+static inline int mmu_notifier_test_young(struct mm_struct *mm,
+ unsigned long address)
+{
+ if (mm_has_notifiers(mm))
+ return __mmu_notifier_test_young(mm, address);
+ return 0;
+}
+
+static inline void mmu_notifier_change_pte(struct mm_struct *mm,
+ unsigned long address, pte_t pte)
+{
+ if (mm_has_notifiers(mm))
+ __mmu_notifier_change_pte(mm, address, pte);
+}
+
+static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
+ unsigned long address)
+{
+ if (mm_has_notifiers(mm))
+ __mmu_notifier_invalidate_page(mm, address);
+}
+
+static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ if (mm_has_notifiers(mm))
+ __mmu_notifier_invalidate_range_start(mm, start, end);
+}
+
+static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ if (mm_has_notifiers(mm))
+ __mmu_notifier_invalidate_range_end(mm, start, end);
+}
+
+static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ if (mm_has_notifiers(mm))
+ __mmu_notifier_invalidate_range(mm, start, end);
+}
+
+static inline void mmu_notifier_mm_init(struct mm_struct *mm)
+{
+ mm->mmu_notifier_mm = NULL;
+}
+
+static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
+{
+ if (mm_has_notifiers(mm))
+ __mmu_notifier_mm_destroy(mm);
+}
+
+#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
+({ \
+ int __young; \
+ struct vm_area_struct *___vma = __vma; \
+ unsigned long ___address = __address; \
+ __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
+ __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
+ ___address, \
+ ___address + \
+ PAGE_SIZE); \
+ __young; \
+})
+
+#define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
+({ \
+ int __young; \
+ struct vm_area_struct *___vma = __vma; \
+ unsigned long ___address = __address; \
+ __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
+ __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
+ ___address, \
+ ___address + \
+ PMD_SIZE); \
+ __young; \
+})
+
+#define ptep_clear_flush_notify(__vma, __address, __ptep) \
+({ \
+ unsigned long ___addr = __address & PAGE_MASK; \
+ struct mm_struct *___mm = (__vma)->vm_mm; \
+ pte_t ___pte; \
+ \
+ ___pte = ptep_clear_flush(__vma, __address, __ptep); \
+ mmu_notifier_invalidate_range(___mm, ___addr, \
+ ___addr + PAGE_SIZE); \
+ \
+ ___pte; \
+})
+
+#define pmdp_clear_flush_notify(__vma, __haddr, __pmd) \
+({ \
+ unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
+ struct mm_struct *___mm = (__vma)->vm_mm; \
+ pmd_t ___pmd; \
+ \
+ ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd); \
+ mmu_notifier_invalidate_range(___mm, ___haddr, \
+ ___haddr + HPAGE_PMD_SIZE); \
+ \
+ ___pmd; \
+})
+
+#define pmdp_get_and_clear_notify(__mm, __haddr, __pmd) \
+({ \
+ unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
+ pmd_t ___pmd; \
+ \
+ ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd); \
+ mmu_notifier_invalidate_range(__mm, ___haddr, \
+ ___haddr + HPAGE_PMD_SIZE); \
+ \
+ ___pmd; \
+})
+
+/*
+ * set_pte_at_notify() sets the pte _after_ running the notifier.
+ * This is safe to start by updating the secondary MMUs, because the primary MMU
+ * pte invalidate must have already happened with a ptep_clear_flush() before
+ * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is
+ * required when we change both the protection of the mapping from read-only to
+ * read-write and the pfn (like during copy on write page faults). Otherwise the
+ * old page would remain mapped readonly in the secondary MMUs after the new
+ * page is already writable by some CPU through the primary MMU.
+ */
+#define set_pte_at_notify(__mm, __address, __ptep, __pte) \
+({ \
+ struct mm_struct *___mm = __mm; \
+ unsigned long ___address = __address; \
+ pte_t ___pte = __pte; \
+ \
+ mmu_notifier_change_pte(___mm, ___address, ___pte); \
+ set_pte_at(___mm, ___address, __ptep, ___pte); \
+})
+
+extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
+ void (*func)(struct rcu_head *rcu));
+extern void mmu_notifier_synchronize(void);
+
+#else /* CONFIG_MMU_NOTIFIER */
+
+static inline void mmu_notifier_release(struct mm_struct *mm)
+{
+}
+
+static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ return 0;
+}
+
+static inline int mmu_notifier_test_young(struct mm_struct *mm,
+ unsigned long address)
+{
+ return 0;
+}
+
+static inline void mmu_notifier_change_pte(struct mm_struct *mm,
+ unsigned long address, pte_t pte)
+{
+}
+
+static inline void mmu_notifier_invalidate_page(struct mm_struct *mm,
+ unsigned long address)
+{
+}
+
+static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
+static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
+static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+}
+
+static inline void mmu_notifier_mm_init(struct mm_struct *mm)
+{
+}
+
+static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
+{
+}
+
+#define ptep_clear_flush_young_notify ptep_clear_flush_young
+#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
+#define ptep_clear_flush_notify ptep_clear_flush
+#define pmdp_clear_flush_notify pmdp_clear_flush
+#define pmdp_get_and_clear_notify pmdp_get_and_clear
+#define set_pte_at_notify set_pte_at
+
+#endif /* CONFIG_MMU_NOTIFIER */
+
+#endif /* _LINUX_MMU_NOTIFIER_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
new file mode 100644
index 000000000..ba3cada47
--- /dev/null
+++ b/include/linux/mmzone.h
@@ -0,0 +1,1275 @@
+#ifndef _LINUX_MMZONE_H
+#define _LINUX_MMZONE_H
+
+#ifndef __ASSEMBLY__
+#ifndef __GENERATING_BOUNDS_H
+
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/cache.h>
+#include <linux/threads.h>
+#include <linux/numa.h>
+#include <linux/init.h>
+#include <linux/seqlock.h>
+#include <linux/nodemask.h>
+#include <linux/pageblock-flags.h>
+#include <linux/page-flags-layout.h>
+#include <linux/atomic.h>
+#include <asm/page.h>
+
+/* Free memory management - zoned buddy allocator. */
+#ifndef CONFIG_FORCE_MAX_ZONEORDER
+#define MAX_ORDER 11
+#else
+#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
+#endif
+#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
+
+/*
+ * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
+ * costly to service. That is between allocation orders which should
+ * coalesce naturally under reasonable reclaim pressure and those which
+ * will not.
+ */
+#define PAGE_ALLOC_COSTLY_ORDER 3
+
+enum {
+ MIGRATE_UNMOVABLE,
+ MIGRATE_RECLAIMABLE,
+ MIGRATE_MOVABLE,
+ MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
+ MIGRATE_RESERVE = MIGRATE_PCPTYPES,
+#ifdef CONFIG_CMA
+ /*
+ * MIGRATE_CMA migration type is designed to mimic the way
+ * ZONE_MOVABLE works. Only movable pages can be allocated
+ * from MIGRATE_CMA pageblocks and page allocator never
+ * implicitly change migration type of MIGRATE_CMA pageblock.
+ *
+ * The way to use it is to change migratetype of a range of
+ * pageblocks to MIGRATE_CMA which can be done by
+ * __free_pageblock_cma() function. What is important though
+ * is that a range of pageblocks must be aligned to
+ * MAX_ORDER_NR_PAGES should biggest page be bigger then
+ * a single pageblock.
+ */
+ MIGRATE_CMA,
+#endif
+#ifdef CONFIG_MEMORY_ISOLATION
+ MIGRATE_ISOLATE, /* can't allocate from here */
+#endif
+ MIGRATE_TYPES
+};
+
+#ifdef CONFIG_CMA
+# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+#else
+# define is_migrate_cma(migratetype) false
+#endif
+
+#define for_each_migratetype_order(order, type) \
+ for (order = 0; order < MAX_ORDER; order++) \
+ for (type = 0; type < MIGRATE_TYPES; type++)
+
+extern int page_group_by_mobility_disabled;
+
+#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
+#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
+
+#define get_pageblock_migratetype(page) \
+ get_pfnblock_flags_mask(page, page_to_pfn(page), \
+ PB_migrate_end, MIGRATETYPE_MASK)
+
+static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
+{
+ BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2);
+ return get_pfnblock_flags_mask(page, pfn, PB_migrate_end,
+ MIGRATETYPE_MASK);
+}
+
+struct free_area {
+ struct list_head free_list[MIGRATE_TYPES];
+ unsigned long nr_free;
+};
+
+struct pglist_data;
+
+/*
+ * zone->lock and zone->lru_lock are two of the hottest locks in the kernel.
+ * So add a wild amount of padding here to ensure that they fall into separate
+ * cachelines. There are very few zone structures in the machine, so space
+ * consumption is not a concern here.
+ */
+#if defined(CONFIG_SMP)
+struct zone_padding {
+ char x[0];
+} ____cacheline_internodealigned_in_smp;
+#define ZONE_PADDING(name) struct zone_padding name;
+#else
+#define ZONE_PADDING(name)
+#endif
+
+enum zone_stat_item {
+ /* First 128 byte cacheline (assuming 64 bit words) */
+ NR_FREE_PAGES,
+ NR_ALLOC_BATCH,
+ NR_LRU_BASE,
+ NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
+ NR_ACTIVE_ANON, /* " " " " " */
+ NR_INACTIVE_FILE, /* " " " " " */
+ NR_ACTIVE_FILE, /* " " " " " */
+ NR_UNEVICTABLE, /* " " " " " */
+ NR_MLOCK, /* mlock()ed pages found and moved off LRU */
+ NR_ANON_PAGES, /* Mapped anonymous pages */
+ NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
+ only modified from process context */
+ NR_FILE_PAGES,
+ NR_FILE_DIRTY,
+ NR_WRITEBACK,
+ NR_SLAB_RECLAIMABLE,
+ NR_SLAB_UNRECLAIMABLE,
+ NR_PAGETABLE, /* used for pagetables */
+ NR_KERNEL_STACK,
+ /* Second 128 byte cacheline */
+ NR_UNSTABLE_NFS, /* NFS unstable pages */
+ NR_BOUNCE,
+ NR_VMSCAN_WRITE,
+ NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
+ NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
+ NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
+ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
+ NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
+ NR_DIRTIED, /* page dirtyings since bootup */
+ NR_WRITTEN, /* page writings since bootup */
+ NR_PAGES_SCANNED, /* pages scanned since last reclaim */
+#ifdef CONFIG_NUMA
+ NUMA_HIT, /* allocated in intended node */
+ NUMA_MISS, /* allocated in non intended node */
+ NUMA_FOREIGN, /* was intended here, hit elsewhere */
+ NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
+ NUMA_LOCAL, /* allocation from local node */
+ NUMA_OTHER, /* allocation from other node */
+#endif
+ WORKINGSET_REFAULT,
+ WORKINGSET_ACTIVATE,
+ WORKINGSET_NODERECLAIM,
+ NR_ANON_TRANSPARENT_HUGEPAGES,
+ NR_FREE_CMA_PAGES,
+#ifdef CONFIG_UKSM
+ NR_UKSM_ZERO_PAGES,
+#endif
+ NR_VM_ZONE_STAT_ITEMS };
+
+/*
+ * We do arithmetic on the LRU lists in various places in the code,
+ * so it is important to keep the active lists LRU_ACTIVE higher in
+ * the array than the corresponding inactive lists, and to keep
+ * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
+ *
+ * This has to be kept in sync with the statistics in zone_stat_item
+ * above and the descriptions in vmstat_text in mm/vmstat.c
+ */
+#define LRU_BASE 0
+#define LRU_ACTIVE 1
+#define LRU_FILE 2
+
+enum lru_list {
+ LRU_INACTIVE_ANON = LRU_BASE,
+ LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
+ LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
+ LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
+ LRU_UNEVICTABLE,
+ NR_LRU_LISTS
+};
+
+#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
+
+#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
+
+static inline int is_file_lru(enum lru_list lru)
+{
+ return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
+}
+
+static inline int is_active_lru(enum lru_list lru)
+{
+ return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
+}
+
+static inline int is_unevictable_lru(enum lru_list lru)
+{
+ return (lru == LRU_UNEVICTABLE);
+}
+
+struct zone_reclaim_stat {
+ /*
+ * The pageout code in vmscan.c keeps track of how many of the
+ * mem/swap backed and file backed pages are referenced.
+ * The higher the rotated/scanned ratio, the more valuable
+ * that cache is.
+ *
+ * The anon LRU stats live in [0], file LRU stats in [1]
+ */
+ unsigned long recent_rotated[2];
+ unsigned long recent_scanned[2];
+};
+
+struct lruvec {
+ struct list_head lists[NR_LRU_LISTS];
+ struct zone_reclaim_stat reclaim_stat;
+#ifdef CONFIG_MEMCG
+ struct zone *zone;
+#endif
+};
+
+/* Mask used at gathering information at once (see memcontrol.c) */
+#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
+#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
+#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
+
+/* Isolate clean file */
+#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1)
+/* Isolate unmapped file */
+#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
+/* Isolate for asynchronous migration */
+#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
+/* Isolate unevictable pages */
+#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8)
+
+/* LRU Isolation modes. */
+typedef unsigned __bitwise__ isolate_mode_t;
+
+enum zone_watermarks {
+ WMARK_MIN,
+ WMARK_LOW,
+ WMARK_HIGH,
+ NR_WMARK
+};
+
+#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
+#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
+#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])
+
+struct per_cpu_pages {
+ int count; /* number of pages in the list */
+ int high; /* high watermark, emptying needed */
+ int batch; /* chunk size for buddy add/remove */
+
+ /* Lists of pages, one per migrate type stored on the pcp-lists */
+ struct list_head lists[MIGRATE_PCPTYPES];
+};
+
+struct per_cpu_pageset {
+ struct per_cpu_pages pcp;
+#ifdef CONFIG_NUMA
+ s8 expire;
+#endif
+#ifdef CONFIG_SMP
+ s8 stat_threshold;
+ s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
+#endif
+};
+
+#endif /* !__GENERATING_BOUNDS.H */
+
+enum zone_type {
+#ifdef CONFIG_ZONE_DMA
+ /*
+ * ZONE_DMA is used when there are devices that are not able
+ * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
+ * carve out the portion of memory that is needed for these devices.
+ * The range is arch specific.
+ *
+ * Some examples
+ *
+ * Architecture Limit
+ * ---------------------------
+ * parisc, ia64, sparc <4G
+ * s390 <2G
+ * arm Various
+ * alpha Unlimited or 0-16MB.
+ *
+ * i386, x86_64 and multiple other arches
+ * <16M.
+ */
+ ZONE_DMA,
+#endif
+#ifdef CONFIG_ZONE_DMA32
+ /*
+ * x86_64 needs two ZONE_DMAs because it supports devices that are
+ * only able to do DMA to the lower 16M but also 32 bit devices that
+ * can only do DMA areas below 4G.
+ */
+ ZONE_DMA32,
+#endif
+ /*
+ * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
+ * performed on pages in ZONE_NORMAL if the DMA devices support
+ * transfers to all addressable memory.
+ */
+ ZONE_NORMAL,
+#ifdef CONFIG_HIGHMEM
+ /*
+ * A memory area that is only addressable by the kernel through
+ * mapping portions into its own address space. This is for example
+ * used by i386 to allow the kernel to address the memory beyond
+ * 900MB. The kernel will set up special mappings (page
+ * table entries on i386) for each page that the kernel needs to
+ * access.
+ */
+ ZONE_HIGHMEM,
+#endif
+ ZONE_MOVABLE,
+ __MAX_NR_ZONES
+};
+
+#ifndef __GENERATING_BOUNDS_H
+
+struct zone {
+ /* Read-mostly fields */
+
+ /* zone watermarks, access with *_wmark_pages(zone) macros */
+ unsigned long watermark[NR_WMARK];
+
+ /*
+ * We don't know if the memory that we're going to allocate will be freeable
+ * or/and it will be released eventually, so to avoid totally wasting several
+ * GB of ram we must reserve some of the lower zone memory (otherwise we risk
+ * to run OOM on the lower zones despite there's tons of freeable ram
+ * on the higher zones). This array is recalculated at runtime if the
+ * sysctl_lowmem_reserve_ratio sysctl changes.
+ */
+ long lowmem_reserve[MAX_NR_ZONES];
+
+#ifdef CONFIG_NUMA
+ int node;
+#endif
+
+ /*
+ * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
+ * this zone's LRU. Maintained by the pageout code.
+ */
+ unsigned int inactive_ratio;
+
+ struct pglist_data *zone_pgdat;
+ struct per_cpu_pageset __percpu *pageset;
+
+ /*
+ * This is a per-zone reserve of pages that should not be
+ * considered dirtyable memory.
+ */
+ unsigned long dirty_balance_reserve;
+
+#ifndef CONFIG_SPARSEMEM
+ /*
+ * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
+ * In SPARSEMEM, this map is stored in struct mem_section
+ */
+ unsigned long *pageblock_flags;
+#endif /* CONFIG_SPARSEMEM */
+
+#ifdef CONFIG_NUMA
+ /*
+ * zone reclaim becomes active if more unmapped pages exist.
+ */
+ unsigned long min_unmapped_pages;
+ unsigned long min_slab_pages;
+#endif /* CONFIG_NUMA */
+
+ /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
+ unsigned long zone_start_pfn;
+
+ /*
+ * spanned_pages is the total pages spanned by the zone, including
+ * holes, which is calculated as:
+ * spanned_pages = zone_end_pfn - zone_start_pfn;
+ *
+ * present_pages is physical pages existing within the zone, which
+ * is calculated as:
+ * present_pages = spanned_pages - absent_pages(pages in holes);
+ *
+ * managed_pages is present pages managed by the buddy system, which
+ * is calculated as (reserved_pages includes pages allocated by the
+ * bootmem allocator):
+ * managed_pages = present_pages - reserved_pages;
+ *
+ * So present_pages may be used by memory hotplug or memory power
+ * management logic to figure out unmanaged pages by checking
+ * (present_pages - managed_pages). And managed_pages should be used
+ * by page allocator and vm scanner to calculate all kinds of watermarks
+ * and thresholds.
+ *
+ * Locking rules:
+ *
+ * zone_start_pfn and spanned_pages are protected by span_seqlock.
+ * It is a seqlock because it has to be read outside of zone->lock,
+ * and it is done in the main allocator path. But, it is written
+ * quite infrequently.
+ *
+ * The span_seq lock is declared along with zone->lock because it is
+ * frequently read in proximity to zone->lock. It's good to
+ * give them a chance of being in the same cacheline.
+ *
+ * Write access to present_pages at runtime should be protected by
+ * mem_hotplug_begin/end(). Any reader who can't tolerant drift of
+ * present_pages should get_online_mems() to get a stable value.
+ *
+ * Read access to managed_pages should be safe because it's unsigned
+ * long. Write access to zone->managed_pages and totalram_pages are
+ * protected by managed_page_count_lock at runtime. Idealy only
+ * adjust_managed_page_count() should be used instead of directly
+ * touching zone->managed_pages and totalram_pages.
+ */
+ unsigned long managed_pages;
+ unsigned long spanned_pages;
+ unsigned long present_pages;
+
+ const char *name;
+
+ /*
+ * Number of MIGRATE_RESERVE page block. To maintain for just
+ * optimization. Protected by zone->lock.
+ */
+ int nr_migrate_reserve_block;
+
+#ifdef CONFIG_MEMORY_ISOLATION
+ /*
+ * Number of isolated pageblock. It is used to solve incorrect
+ * freepage counting problem due to racy retrieving migratetype
+ * of pageblock. Protected by zone->lock.
+ */
+ unsigned long nr_isolate_pageblock;
+#endif
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+ /* see spanned/present_pages for more description */
+ seqlock_t span_seqlock;
+#endif
+
+ /*
+ * wait_table -- the array holding the hash table
+ * wait_table_hash_nr_entries -- the size of the hash table array
+ * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
+ *
+ * The purpose of all these is to keep track of the people
+ * waiting for a page to become available and make them
+ * runnable again when possible. The trouble is that this
+ * consumes a lot of space, especially when so few things
+ * wait on pages at a given time. So instead of using
+ * per-page waitqueues, we use a waitqueue hash table.
+ *
+ * The bucket discipline is to sleep on the same queue when
+ * colliding and wake all in that wait queue when removing.
+ * When something wakes, it must check to be sure its page is
+ * truly available, a la thundering herd. The cost of a
+ * collision is great, but given the expected load of the
+ * table, they should be so rare as to be outweighed by the
+ * benefits from the saved space.
+ *
+ * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
+ * primary users of these fields, and in mm/page_alloc.c
+ * free_area_init_core() performs the initialization of them.
+ */
+ wait_queue_head_t *wait_table;
+ unsigned long wait_table_hash_nr_entries;
+ unsigned long wait_table_bits;
+
+ ZONE_PADDING(_pad1_)
+ /* free areas of different sizes */
+ struct free_area free_area[MAX_ORDER];
+
+ /* zone flags, see below */
+ unsigned long flags;
+
+ /* Write-intensive fields used from the page allocator */
+ spinlock_t lock;
+
+ ZONE_PADDING(_pad2_)
+
+ /* Write-intensive fields used by page reclaim */
+
+ /* Fields commonly accessed by the page reclaim scanner */
+ spinlock_t lru_lock;
+ struct lruvec lruvec;
+
+ /* Evictions & activations on the inactive file list */
+ atomic_long_t inactive_age;
+
+ /*
+ * When free pages are below this point, additional steps are taken
+ * when reading the number of free pages to avoid per-cpu counter
+ * drift allowing watermarks to be breached
+ */
+ unsigned long percpu_drift_mark;
+
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+ /* pfn where compaction free scanner should start */
+ unsigned long compact_cached_free_pfn;
+ /* pfn where async and sync compaction migration scanner should start */
+ unsigned long compact_cached_migrate_pfn[2];
+#endif
+
+#ifdef CONFIG_COMPACTION
+ /*
+ * On compaction failure, 1<<compact_defer_shift compactions
+ * are skipped before trying again. The number attempted since
+ * last failure is tracked with compact_considered.
+ */
+ unsigned int compact_considered;
+ unsigned int compact_defer_shift;
+ int compact_order_failed;
+#endif
+
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+ /* Set to true when the PG_migrate_skip bits should be cleared */
+ bool compact_blockskip_flush;
+#endif
+
+ ZONE_PADDING(_pad3_)
+ /* Zone statistics */
+ atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+} ____cacheline_internodealigned_in_smp;
+
+enum zone_flags {
+ ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
+ ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
+ ZONE_CONGESTED, /* zone has many dirty pages backed by
+ * a congested BDI
+ */
+ ZONE_DIRTY, /* reclaim scanning has recently found
+ * many dirty file pages at the tail
+ * of the LRU.
+ */
+ ZONE_WRITEBACK, /* reclaim scanning has recently found
+ * many pages under writeback
+ */
+ ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */
+};
+
+static inline unsigned long zone_end_pfn(const struct zone *zone)
+{
+ return zone->zone_start_pfn + zone->spanned_pages;
+}
+
+static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
+{
+ return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
+}
+
+static inline bool zone_is_initialized(struct zone *zone)
+{
+ return !!zone->wait_table;
+}
+
+static inline bool zone_is_empty(struct zone *zone)
+{
+ return zone->spanned_pages == 0;
+}
+
+/*
+ * The "priority" of VM scanning is how much of the queues we will scan in one
+ * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
+ * queues ("queue_length >> 12") during an aging round.
+ */
+#define DEF_PRIORITY 12
+
+/* Maximum number of zones on a zonelist */
+#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)
+
+#ifdef CONFIG_NUMA
+
+/*
+ * The NUMA zonelists are doubled because we need zonelists that restrict the
+ * allocations to a single node for __GFP_THISNODE.
+ *
+ * [0] : Zonelist with fallback
+ * [1] : No fallback (__GFP_THISNODE)
+ */
+#define MAX_ZONELISTS 2
+
+
+/*
+ * We cache key information from each zonelist for smaller cache
+ * footprint when scanning for free pages in get_page_from_freelist().
+ *
+ * 1) The BITMAP fullzones tracks which zones in a zonelist have come
+ * up short of free memory since the last time (last_fullzone_zap)
+ * we zero'd fullzones.
+ * 2) The array z_to_n[] maps each zone in the zonelist to its node
+ * id, so that we can efficiently evaluate whether that node is
+ * set in the current tasks mems_allowed.
+ *
+ * Both fullzones and z_to_n[] are one-to-one with the zonelist,
+ * indexed by a zones offset in the zonelist zones[] array.
+ *
+ * The get_page_from_freelist() routine does two scans. During the
+ * first scan, we skip zones whose corresponding bit in 'fullzones'
+ * is set or whose corresponding node in current->mems_allowed (which
+ * comes from cpusets) is not set. During the second scan, we bypass
+ * this zonelist_cache, to ensure we look methodically at each zone.
+ *
+ * Once per second, we zero out (zap) fullzones, forcing us to
+ * reconsider nodes that might have regained more free memory.
+ * The field last_full_zap is the time we last zapped fullzones.
+ *
+ * This mechanism reduces the amount of time we waste repeatedly
+ * reexaming zones for free memory when they just came up low on
+ * memory momentarilly ago.
+ *
+ * The zonelist_cache struct members logically belong in struct
+ * zonelist. However, the mempolicy zonelists constructed for
+ * MPOL_BIND are intentionally variable length (and usually much
+ * shorter). A general purpose mechanism for handling structs with
+ * multiple variable length members is more mechanism than we want
+ * here. We resort to some special case hackery instead.
+ *
+ * The MPOL_BIND zonelists don't need this zonelist_cache (in good
+ * part because they are shorter), so we put the fixed length stuff
+ * at the front of the zonelist struct, ending in a variable length
+ * zones[], as is needed by MPOL_BIND.
+ *
+ * Then we put the optional zonelist cache on the end of the zonelist
+ * struct. This optional stuff is found by a 'zlcache_ptr' pointer in
+ * the fixed length portion at the front of the struct. This pointer
+ * both enables us to find the zonelist cache, and in the case of
+ * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL)
+ * to know that the zonelist cache is not there.
+ *
+ * The end result is that struct zonelists come in two flavors:
+ * 1) The full, fixed length version, shown below, and
+ * 2) The custom zonelists for MPOL_BIND.
+ * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache.
+ *
+ * Even though there may be multiple CPU cores on a node modifying
+ * fullzones or last_full_zap in the same zonelist_cache at the same
+ * time, we don't lock it. This is just hint data - if it is wrong now
+ * and then, the allocator will still function, perhaps a bit slower.
+ */
+
+
+struct zonelist_cache {
+ unsigned short z_to_n[MAX_ZONES_PER_ZONELIST]; /* zone->nid */
+ DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST); /* zone full? */
+ unsigned long last_full_zap; /* when last zap'd (jiffies) */
+};
+#else
+#define MAX_ZONELISTS 1
+struct zonelist_cache;
+#endif
+
+/*
+ * This struct contains information about a zone in a zonelist. It is stored
+ * here to avoid dereferences into large structures and lookups of tables
+ */
+struct zoneref {
+ struct zone *zone; /* Pointer to actual zone */
+ int zone_idx; /* zone_idx(zoneref->zone) */
+};
+
+/*
+ * One allocation request operates on a zonelist. A zonelist
+ * is a list of zones, the first one is the 'goal' of the
+ * allocation, the other zones are fallback zones, in decreasing
+ * priority.
+ *
+ * If zlcache_ptr is not NULL, then it is just the address of zlcache,
+ * as explained above. If zlcache_ptr is NULL, there is no zlcache.
+ * *
+ * To speed the reading of the zonelist, the zonerefs contain the zone index
+ * of the entry being read. Helper functions to access information given
+ * a struct zoneref are
+ *
+ * zonelist_zone() - Return the struct zone * for an entry in _zonerefs
+ * zonelist_zone_idx() - Return the index of the zone for an entry
+ * zonelist_node_idx() - Return the index of the node for an entry
+ */
+struct zonelist {
+ struct zonelist_cache *zlcache_ptr; // NULL or &zlcache
+ struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
+#ifdef CONFIG_NUMA
+ struct zonelist_cache zlcache; // optional ...
+#endif
+};
+
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+struct node_active_region {
+ unsigned long start_pfn;
+ unsigned long end_pfn;
+ int nid;
+};
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+
+#ifndef CONFIG_DISCONTIGMEM
+/* The array of struct pages - for discontigmem use pgdat->lmem_map */
+extern struct page *mem_map;
+#endif
+
+/*
+ * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
+ * (mostly NUMA machines?) to denote a higher-level memory zone than the
+ * zone denotes.
+ *
+ * On NUMA machines, each NUMA node would have a pg_data_t to describe
+ * it's memory layout.
+ *
+ * Memory statistics and page replacement data structures are maintained on a
+ * per-zone basis.
+ */
+struct bootmem_data;
+typedef struct pglist_data {
+ struct zone node_zones[MAX_NR_ZONES];
+ struct zonelist node_zonelists[MAX_ZONELISTS];
+ int nr_zones;
+#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
+ struct page *node_mem_map;
+#ifdef CONFIG_PAGE_EXTENSION
+ struct page_ext *node_page_ext;
+#endif
+#endif
+#ifndef CONFIG_NO_BOOTMEM
+ struct bootmem_data *bdata;
+#endif
+#ifdef CONFIG_MEMORY_HOTPLUG
+ /*
+ * Must be held any time you expect node_start_pfn, node_present_pages
+ * or node_spanned_pages stay constant. Holding this will also
+ * guarantee that any pfn_valid() stays that way.
+ *
+ * pgdat_resize_lock() and pgdat_resize_unlock() are provided to
+ * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG.
+ *
+ * Nests above zone->lock and zone->span_seqlock
+ */
+ spinlock_t node_size_lock;
+#endif
+ unsigned long node_start_pfn;
+ unsigned long node_present_pages; /* total number of physical pages */
+ unsigned long node_spanned_pages; /* total size of physical page
+ range, including holes */
+ int node_id;
+ wait_queue_head_t kswapd_wait;
+ wait_queue_head_t pfmemalloc_wait;
+ struct task_struct *kswapd; /* Protected by
+ mem_hotplug_begin/end() */
+ int kswapd_max_order;
+ enum zone_type classzone_idx;
+#ifdef CONFIG_NUMA_BALANCING
+ /* Lock serializing the migrate rate limiting window */
+ spinlock_t numabalancing_migrate_lock;
+
+ /* Rate limiting time interval */
+ unsigned long numabalancing_migrate_next_window;
+
+ /* Number of pages migrated during the rate limiting time interval */
+ unsigned long numabalancing_migrate_nr_pages;
+#endif
+} pg_data_t;
+
+#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
+#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)
+#ifdef CONFIG_FLAT_NODE_MEM_MAP
+#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))
+#else
+#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))
+#endif
+#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))
+
+#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
+#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
+
+static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
+{
+ return pgdat->node_start_pfn + pgdat->node_spanned_pages;
+}
+
+static inline bool pgdat_is_empty(pg_data_t *pgdat)
+{
+ return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
+}
+
+#include <linux/memory_hotplug.h>
+
+extern struct mutex zonelists_mutex;
+void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
+void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
+bool zone_watermark_ok(struct zone *z, unsigned int order,
+ unsigned long mark, int classzone_idx, int alloc_flags);
+bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
+ unsigned long mark, int classzone_idx, int alloc_flags);
+enum memmap_context {
+ MEMMAP_EARLY,
+ MEMMAP_HOTPLUG,
+};
+extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
+ unsigned long size,
+ enum memmap_context context);
+
+extern void lruvec_init(struct lruvec *lruvec);
+
+static inline struct zone *lruvec_zone(struct lruvec *lruvec)
+{
+#ifdef CONFIG_MEMCG
+ return lruvec->zone;
+#else
+ return container_of(lruvec, struct zone, lruvec);
+#endif
+}
+
+#ifdef CONFIG_HAVE_MEMORY_PRESENT
+void memory_present(int nid, unsigned long start, unsigned long end);
+#else
+static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
+#endif
+
+#ifdef CONFIG_HAVE_MEMORYLESS_NODES
+int local_memory_node(int node_id);
+#else
+static inline int local_memory_node(int node_id) { return node_id; };
+#endif
+
+#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
+unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
+#endif
+
+/*
+ * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
+ */
+#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
+
+static inline int populated_zone(struct zone *zone)
+{
+ return (!!zone->present_pages);
+}
+
+extern int movable_zone;
+
+#ifdef CONFIG_HIGHMEM
+static inline int zone_movable_is_highmem(void)
+{
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+ return movable_zone == ZONE_HIGHMEM;
+#else
+ return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
+#endif
+}
+#endif
+
+static inline int is_highmem_idx(enum zone_type idx)
+{
+#ifdef CONFIG_HIGHMEM
+ return (idx == ZONE_HIGHMEM ||
+ (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
+#else
+ return 0;
+#endif
+}
+
+/**
+ * is_highmem - helper function to quickly check if a struct zone is a
+ * highmem zone or not. This is an attempt to keep references
+ * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
+ * @zone - pointer to struct zone variable
+ */
+static inline int is_highmem(struct zone *zone)
+{
+#ifdef CONFIG_HIGHMEM
+ int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
+ return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
+ (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
+ zone_movable_is_highmem());
+#else
+ return 0;
+#endif
+}
+
+/* These two functions are used to setup the per zone pages min values */
+struct ctl_table;
+int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
+int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+
+extern int numa_zonelist_order_handler(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+extern char numa_zonelist_order[];
+#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */
+
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+
+extern struct pglist_data contig_page_data;
+#define NODE_DATA(nid) (&contig_page_data)
+#define NODE_MEM_MAP(nid) mem_map
+
+#else /* CONFIG_NEED_MULTIPLE_NODES */
+
+#include <asm/mmzone.h>
+
+#endif /* !CONFIG_NEED_MULTIPLE_NODES */
+
+extern struct pglist_data *first_online_pgdat(void);
+extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
+extern struct zone *next_zone(struct zone *zone);
+
+/**
+ * for_each_online_pgdat - helper macro to iterate over all online nodes
+ * @pgdat - pointer to a pg_data_t variable
+ */
+#define for_each_online_pgdat(pgdat) \
+ for (pgdat = first_online_pgdat(); \
+ pgdat; \
+ pgdat = next_online_pgdat(pgdat))
+/**
+ * for_each_zone - helper macro to iterate over all memory zones
+ * @zone - pointer to struct zone variable
+ *
+ * The user only needs to declare the zone variable, for_each_zone
+ * fills it in.
+ */
+#define for_each_zone(zone) \
+ for (zone = (first_online_pgdat())->node_zones; \
+ zone; \
+ zone = next_zone(zone))
+
+#define for_each_populated_zone(zone) \
+ for (zone = (first_online_pgdat())->node_zones; \
+ zone; \
+ zone = next_zone(zone)) \
+ if (!populated_zone(zone)) \
+ ; /* do nothing */ \
+ else
+
+static inline struct zone *zonelist_zone(struct zoneref *zoneref)
+{
+ return zoneref->zone;
+}
+
+static inline int zonelist_zone_idx(struct zoneref *zoneref)
+{
+ return zoneref->zone_idx;
+}
+
+static inline int zonelist_node_idx(struct zoneref *zoneref)
+{
+#ifdef CONFIG_NUMA
+ /* zone_to_nid not available in this context */
+ return zoneref->zone->node;
+#else
+ return 0;
+#endif /* CONFIG_NUMA */
+}
+
+/**
+ * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
+ * @z - The cursor used as a starting point for the search
+ * @highest_zoneidx - The zone index of the highest zone to return
+ * @nodes - An optional nodemask to filter the zonelist with
+ *
+ * This function returns the next zone at or below a given zone index that is
+ * within the allowed nodemask using a cursor as the starting point for the
+ * search. The zoneref returned is a cursor that represents the current zone
+ * being examined. It should be advanced by one before calling
+ * next_zones_zonelist again.
+ */
+struct zoneref *next_zones_zonelist(struct zoneref *z,
+ enum zone_type highest_zoneidx,
+ nodemask_t *nodes);
+
+/**
+ * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
+ * @zonelist - The zonelist to search for a suitable zone
+ * @highest_zoneidx - The zone index of the highest zone to return
+ * @nodes - An optional nodemask to filter the zonelist with
+ * @zone - The first suitable zone found is returned via this parameter
+ *
+ * This function returns the first zone at or below a given zone index that is
+ * within the allowed nodemask. The zoneref returned is a cursor that can be
+ * used to iterate the zonelist with next_zones_zonelist by advancing it by
+ * one before calling.
+ */
+static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
+ enum zone_type highest_zoneidx,
+ nodemask_t *nodes,
+ struct zone **zone)
+{
+ struct zoneref *z = next_zones_zonelist(zonelist->_zonerefs,
+ highest_zoneidx, nodes);
+ *zone = zonelist_zone(z);
+ return z;
+}
+
+/**
+ * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
+ * @zone - The current zone in the iterator
+ * @z - The current pointer within zonelist->zones being iterated
+ * @zlist - The zonelist being iterated
+ * @highidx - The zone index of the highest zone to return
+ * @nodemask - Nodemask allowed by the allocator
+ *
+ * This iterator iterates though all zones at or below a given zone index and
+ * within a given nodemask
+ */
+#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
+ for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
+ zone; \
+ z = next_zones_zonelist(++z, highidx, nodemask), \
+ zone = zonelist_zone(z)) \
+
+/**
+ * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
+ * @zone - The current zone in the iterator
+ * @z - The current pointer within zonelist->zones being iterated
+ * @zlist - The zonelist being iterated
+ * @highidx - The zone index of the highest zone to return
+ *
+ * This iterator iterates though all zones at or below a given zone index.
+ */
+#define for_each_zone_zonelist(zone, z, zlist, highidx) \
+ for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
+
+#ifdef CONFIG_SPARSEMEM
+#include <asm/sparsemem.h>
+#endif
+
+#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
+ !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
+static inline unsigned long early_pfn_to_nid(unsigned long pfn)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_FLATMEM
+#define pfn_to_nid(pfn) (0)
+#endif
+
+#ifdef CONFIG_SPARSEMEM
+
+/*
+ * SECTION_SHIFT #bits space required to store a section #
+ *
+ * PA_SECTION_SHIFT physical address to/from section number
+ * PFN_SECTION_SHIFT pfn to/from section number
+ */
+#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)
+#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)
+
+#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)
+
+#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)
+#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))
+
+#define SECTION_BLOCKFLAGS_BITS \
+ ((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
+
+#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
+#error Allocator MAX_ORDER exceeds SECTION_SIZE
+#endif
+
+#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
+#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)
+
+#define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
+#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
+
+struct page;
+struct page_ext;
+struct mem_section {
+ /*
+ * This is, logically, a pointer to an array of struct
+ * pages. However, it is stored with some other magic.
+ * (see sparse.c::sparse_init_one_section())
+ *
+ * Additionally during early boot we encode node id of
+ * the location of the section here to guide allocation.
+ * (see sparse.c::memory_present())
+ *
+ * Making it a UL at least makes someone do a cast
+ * before using it wrong.
+ */
+ unsigned long section_mem_map;
+
+ /* See declaration of similar field in struct zone */
+ unsigned long *pageblock_flags;
+#ifdef CONFIG_PAGE_EXTENSION
+ /*
+ * If !SPARSEMEM, pgdat doesn't have page_ext pointer. We use
+ * section. (see page_ext.h about this.)
+ */
+ struct page_ext *page_ext;
+ unsigned long pad;
+#endif
+ /*
+ * WARNING: mem_section must be a power-of-2 in size for the
+ * calculation and use of SECTION_ROOT_MASK to make sense.
+ */
+};
+
+#ifdef CONFIG_SPARSEMEM_EXTREME
+#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))
+#else
+#define SECTIONS_PER_ROOT 1
+#endif
+
+#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
+#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
+#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
+
+#ifdef CONFIG_SPARSEMEM_EXTREME
+extern struct mem_section *mem_section[NR_SECTION_ROOTS];
+#else
+extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
+#endif
+
+static inline struct mem_section *__nr_to_section(unsigned long nr)
+{
+ if (!mem_section[SECTION_NR_TO_ROOT(nr)])
+ return NULL;
+ return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
+}
+extern int __section_nr(struct mem_section* ms);
+extern unsigned long usemap_size(void);
+
+/*
+ * We use the lower bits of the mem_map pointer to store
+ * a little bit of information. There should be at least
+ * 3 bits here due to 32-bit alignment.
+ */
+#define SECTION_MARKED_PRESENT (1UL<<0)
+#define SECTION_HAS_MEM_MAP (1UL<<1)
+#define SECTION_MAP_LAST_BIT (1UL<<2)
+#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
+#define SECTION_NID_SHIFT 2
+
+static inline struct page *__section_mem_map_addr(struct mem_section *section)
+{
+ unsigned long map = section->section_mem_map;
+ map &= SECTION_MAP_MASK;
+ return (struct page *)map;
+}
+
+static inline int present_section(struct mem_section *section)
+{
+ return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
+}
+
+static inline int present_section_nr(unsigned long nr)
+{
+ return present_section(__nr_to_section(nr));
+}
+
+static inline int valid_section(struct mem_section *section)
+{
+ return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
+}
+
+static inline int valid_section_nr(unsigned long nr)
+{
+ return valid_section(__nr_to_section(nr));
+}
+
+static inline struct mem_section *__pfn_to_section(unsigned long pfn)
+{
+ return __nr_to_section(pfn_to_section_nr(pfn));
+}
+
+#ifndef CONFIG_HAVE_ARCH_PFN_VALID
+static inline int pfn_valid(unsigned long pfn)
+{
+ if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
+ return 0;
+ return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
+}
+#endif
+
+static inline int pfn_present(unsigned long pfn)
+{
+ if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
+ return 0;
+ return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
+}
+
+/*
+ * These are _only_ used during initialisation, therefore they
+ * can use __initdata ... They could have names to indicate
+ * this restriction.
+ */
+#ifdef CONFIG_NUMA
+#define pfn_to_nid(pfn) \
+({ \
+ unsigned long __pfn_to_nid_pfn = (pfn); \
+ page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
+})
+#else
+#define pfn_to_nid(pfn) (0)
+#endif
+
+#define early_pfn_valid(pfn) pfn_valid(pfn)
+void sparse_init(void);
+#else
+#define sparse_init() do {} while (0)
+#define sparse_index_init(_sec, _nid) do {} while (0)
+#endif /* CONFIG_SPARSEMEM */
+
+#ifdef CONFIG_NODES_SPAN_OTHER_NODES
+bool early_pfn_in_nid(unsigned long pfn, int nid);
+#else
+#define early_pfn_in_nid(pfn, nid) (1)
+#endif
+
+#ifndef early_pfn_valid
+#define early_pfn_valid(pfn) (1)
+#endif
+
+void memory_present(int nid, unsigned long start, unsigned long end);
+unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
+
+/*
+ * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
+ * need to check pfn validility within that MAX_ORDER_NR_PAGES block.
+ * pfn_valid_within() should be used in this case; we optimise this away
+ * when we have no holes within a MAX_ORDER_NR_PAGES block.
+ */
+#ifdef CONFIG_HOLES_IN_ZONE
+#define pfn_valid_within(pfn) pfn_valid(pfn)
+#else
+#define pfn_valid_within(pfn) (1)
+#endif
+
+#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
+/*
+ * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
+ * associated with it or not. In FLATMEM, it is expected that holes always
+ * have valid memmap as long as there is valid PFNs either side of the hole.
+ * In SPARSEMEM, it is assumed that a valid section has a memmap for the
+ * entire section.
+ *
+ * However, an ARM, and maybe other embedded architectures in the future
+ * free memmap backing holes to save memory on the assumption the memmap is
+ * never used. The page_zone linkages are then broken even though pfn_valid()
+ * returns true. A walker of the full memmap must then do this additional
+ * check to ensure the memmap they are looking at is sane by making sure
+ * the zone and PFN linkages are still valid. This is expensive, but walkers
+ * of the full memmap are extremely rare.
+ */
+int memmap_valid_within(unsigned long pfn,
+ struct page *page, struct zone *zone);
+#else
+static inline int memmap_valid_within(unsigned long pfn,
+ struct page *page, struct zone *zone)
+{
+ return 1;
+}
+#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+
+#endif /* !__GENERATING_BOUNDS.H */
+#endif /* !__ASSEMBLY__ */
+#endif /* _LINUX_MMZONE_H */
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
new file mode 100644
index 000000000..12b2ab510
--- /dev/null
+++ b/include/linux/mnt_namespace.h
@@ -0,0 +1,18 @@
+#ifndef _NAMESPACE_H_
+#define _NAMESPACE_H_
+#ifdef __KERNEL__
+
+struct mnt_namespace;
+struct fs_struct;
+struct user_namespace;
+
+extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
+ struct user_namespace *, struct fs_struct *);
+extern void put_mnt_ns(struct mnt_namespace *ns);
+
+extern const struct file_operations proc_mounts_operations;
+extern const struct file_operations proc_mountinfo_operations;
+extern const struct file_operations proc_mountstats_operations;
+
+#endif
+#endif
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
new file mode 100644
index 000000000..3bfd56778
--- /dev/null
+++ b/include/linux/mod_devicetable.h
@@ -0,0 +1,632 @@
+/*
+ * Device tables which are exported to userspace via
+ * scripts/mod/file2alias.c. You must keep that file in sync with this
+ * header.
+ */
+
+#ifndef LINUX_MOD_DEVICETABLE_H
+#define LINUX_MOD_DEVICETABLE_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <linux/uuid.h>
+typedef unsigned long kernel_ulong_t;
+#endif
+
+#define PCI_ANY_ID (~0)
+
+struct pci_device_id {
+ __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
+ __u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
+ __u32 class, class_mask; /* (class,subclass,prog-if) triplet */
+ kernel_ulong_t driver_data; /* Data private to the driver */
+};
+
+
+#define IEEE1394_MATCH_VENDOR_ID 0x0001
+#define IEEE1394_MATCH_MODEL_ID 0x0002
+#define IEEE1394_MATCH_SPECIFIER_ID 0x0004
+#define IEEE1394_MATCH_VERSION 0x0008
+
+struct ieee1394_device_id {
+ __u32 match_flags;
+ __u32 vendor_id;
+ __u32 model_id;
+ __u32 specifier_id;
+ __u32 version;
+ kernel_ulong_t driver_data;
+};
+
+
+/*
+ * Device table entry for "new style" table-driven USB drivers.
+ * User mode code can read these tables to choose which modules to load.
+ * Declare the table as a MODULE_DEVICE_TABLE.
+ *
+ * A probe() parameter will point to a matching entry from this table.
+ * Use the driver_info field for each match to hold information tied
+ * to that match: device quirks, etc.
+ *
+ * Terminate the driver's table with an all-zeroes entry.
+ * Use the flag values to control which fields are compared.
+ */
+
+/**
+ * struct usb_device_id - identifies USB devices for probing and hotplugging
+ * @match_flags: Bit mask controlling which of the other fields are used to
+ * match against new devices. Any field except for driver_info may be
+ * used, although some only make sense in conjunction with other fields.
+ * This is usually set by a USB_DEVICE_*() macro, which sets all
+ * other fields in this structure except for driver_info.
+ * @idVendor: USB vendor ID for a device; numbers are assigned
+ * by the USB forum to its members.
+ * @idProduct: Vendor-assigned product ID.
+ * @bcdDevice_lo: Low end of range of vendor-assigned product version numbers.
+ * This is also used to identify individual product versions, for
+ * a range consisting of a single device.
+ * @bcdDevice_hi: High end of version number range. The range of product
+ * versions is inclusive.
+ * @bDeviceClass: Class of device; numbers are assigned
+ * by the USB forum. Products may choose to implement classes,
+ * or be vendor-specific. Device classes specify behavior of all
+ * the interfaces on a device.
+ * @bDeviceSubClass: Subclass of device; associated with bDeviceClass.
+ * @bDeviceProtocol: Protocol of device; associated with bDeviceClass.
+ * @bInterfaceClass: Class of interface; numbers are assigned
+ * by the USB forum. Products may choose to implement classes,
+ * or be vendor-specific. Interface classes specify behavior only
+ * of a given interface; other interfaces may support other classes.
+ * @bInterfaceSubClass: Subclass of interface; associated with bInterfaceClass.
+ * @bInterfaceProtocol: Protocol of interface; associated with bInterfaceClass.
+ * @bInterfaceNumber: Number of interface; composite devices may use
+ * fixed interface numbers to differentiate between vendor-specific
+ * interfaces.
+ * @driver_info: Holds information used by the driver. Usually it holds
+ * a pointer to a descriptor understood by the driver, or perhaps
+ * device flags.
+ *
+ * In most cases, drivers will create a table of device IDs by using
+ * USB_DEVICE(), or similar macros designed for that purpose.
+ * They will then export it to userspace using MODULE_DEVICE_TABLE(),
+ * and provide it to the USB core through their usb_driver structure.
+ *
+ * See the usb_match_id() function for information about how matches are
+ * performed. Briefly, you will normally use one of several macros to help
+ * construct these entries. Each entry you provide will either identify
+ * one or more specific products, or will identify a class of products
+ * which have agreed to behave the same. You should put the more specific
+ * matches towards the beginning of your table, so that driver_info can
+ * record quirks of specific products.
+ */
+struct usb_device_id {
+ /* which fields to match against? */
+ __u16 match_flags;
+
+ /* Used for product specific matches; range is inclusive */
+ __u16 idVendor;
+ __u16 idProduct;
+ __u16 bcdDevice_lo;
+ __u16 bcdDevice_hi;
+
+ /* Used for device class matches */
+ __u8 bDeviceClass;
+ __u8 bDeviceSubClass;
+ __u8 bDeviceProtocol;
+
+ /* Used for interface class matches */
+ __u8 bInterfaceClass;
+ __u8 bInterfaceSubClass;
+ __u8 bInterfaceProtocol;
+
+ /* Used for vendor-specific interface matches */
+ __u8 bInterfaceNumber;
+
+ /* not matched against */
+ kernel_ulong_t driver_info
+ __attribute__((aligned(sizeof(kernel_ulong_t))));
+};
+
+/* Some useful macros to use to create struct usb_device_id */
+#define USB_DEVICE_ID_MATCH_VENDOR 0x0001
+#define USB_DEVICE_ID_MATCH_PRODUCT 0x0002
+#define USB_DEVICE_ID_MATCH_DEV_LO 0x0004
+#define USB_DEVICE_ID_MATCH_DEV_HI 0x0008
+#define USB_DEVICE_ID_MATCH_DEV_CLASS 0x0010
+#define USB_DEVICE_ID_MATCH_DEV_SUBCLASS 0x0020
+#define USB_DEVICE_ID_MATCH_DEV_PROTOCOL 0x0040
+#define USB_DEVICE_ID_MATCH_INT_CLASS 0x0080
+#define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
+#define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
+#define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
+
+#define HID_ANY_ID (~0)
+#define HID_BUS_ANY 0xffff
+#define HID_GROUP_ANY 0x0000
+
+struct hid_device_id {
+ __u16 bus;
+ __u16 group;
+ __u32 vendor;
+ __u32 product;
+ kernel_ulong_t driver_data;
+};
+
+/* s390 CCW devices */
+struct ccw_device_id {
+ __u16 match_flags; /* which fields to match against */
+
+ __u16 cu_type; /* control unit type */
+ __u16 dev_type; /* device type */
+ __u8 cu_model; /* control unit model */
+ __u8 dev_model; /* device model */
+
+ kernel_ulong_t driver_info;
+};
+
+#define CCW_DEVICE_ID_MATCH_CU_TYPE 0x01
+#define CCW_DEVICE_ID_MATCH_CU_MODEL 0x02
+#define CCW_DEVICE_ID_MATCH_DEVICE_TYPE 0x04
+#define CCW_DEVICE_ID_MATCH_DEVICE_MODEL 0x08
+
+/* s390 AP bus devices */
+struct ap_device_id {
+ __u16 match_flags; /* which fields to match against */
+ __u8 dev_type; /* device type */
+ kernel_ulong_t driver_info;
+};
+
+#define AP_DEVICE_ID_MATCH_DEVICE_TYPE 0x01
+
+/* s390 css bus devices (subchannels) */
+struct css_device_id {
+ __u8 match_flags;
+ __u8 type; /* subchannel type */
+ kernel_ulong_t driver_data;
+};
+
+#define ACPI_ID_LEN 9
+
+struct acpi_device_id {
+ __u8 id[ACPI_ID_LEN];
+ kernel_ulong_t driver_data;
+};
+
+#define PNP_ID_LEN 8
+#define PNP_MAX_DEVICES 8
+
+struct pnp_device_id {
+ __u8 id[PNP_ID_LEN];
+ kernel_ulong_t driver_data;
+};
+
+struct pnp_card_device_id {
+ __u8 id[PNP_ID_LEN];
+ kernel_ulong_t driver_data;
+ struct {
+ __u8 id[PNP_ID_LEN];
+ } devs[PNP_MAX_DEVICES];
+};
+
+
+#define SERIO_ANY 0xff
+
+struct serio_device_id {
+ __u8 type;
+ __u8 extra;
+ __u8 id;
+ __u8 proto;
+};
+
+/*
+ * Struct used for matching a device
+ */
+struct of_device_id {
+ char name[32];
+ char type[32];
+ char compatible[128];
+ const void *data;
+};
+
+/* VIO */
+struct vio_device_id {
+ char type[32];
+ char compat[32];
+};
+
+/* PCMCIA */
+
+struct pcmcia_device_id {
+ __u16 match_flags;
+
+ __u16 manf_id;
+ __u16 card_id;
+
+ __u8 func_id;
+
+ /* for real multi-function devices */
+ __u8 function;
+
+ /* for pseudo multi-function devices */
+ __u8 device_no;
+
+ __u32 prod_id_hash[4];
+
+ /* not matched against in kernelspace*/
+ const char * prod_id[4];
+
+ /* not matched against */
+ kernel_ulong_t driver_info;
+ char * cisfile;
+};
+
+#define PCMCIA_DEV_ID_MATCH_MANF_ID 0x0001
+#define PCMCIA_DEV_ID_MATCH_CARD_ID 0x0002
+#define PCMCIA_DEV_ID_MATCH_FUNC_ID 0x0004
+#define PCMCIA_DEV_ID_MATCH_FUNCTION 0x0008
+#define PCMCIA_DEV_ID_MATCH_PROD_ID1 0x0010
+#define PCMCIA_DEV_ID_MATCH_PROD_ID2 0x0020
+#define PCMCIA_DEV_ID_MATCH_PROD_ID3 0x0040
+#define PCMCIA_DEV_ID_MATCH_PROD_ID4 0x0080
+#define PCMCIA_DEV_ID_MATCH_DEVICE_NO 0x0100
+#define PCMCIA_DEV_ID_MATCH_FAKE_CIS 0x0200
+#define PCMCIA_DEV_ID_MATCH_ANONYMOUS 0x0400
+
+/* Input */
+#define INPUT_DEVICE_ID_EV_MAX 0x1f
+#define INPUT_DEVICE_ID_KEY_MIN_INTERESTING 0x71
+#define INPUT_DEVICE_ID_KEY_MAX 0x2ff
+#define INPUT_DEVICE_ID_REL_MAX 0x0f
+#define INPUT_DEVICE_ID_ABS_MAX 0x3f
+#define INPUT_DEVICE_ID_MSC_MAX 0x07
+#define INPUT_DEVICE_ID_LED_MAX 0x0f
+#define INPUT_DEVICE_ID_SND_MAX 0x07
+#define INPUT_DEVICE_ID_FF_MAX 0x7f
+#define INPUT_DEVICE_ID_SW_MAX 0x0f
+
+#define INPUT_DEVICE_ID_MATCH_BUS 1
+#define INPUT_DEVICE_ID_MATCH_VENDOR 2
+#define INPUT_DEVICE_ID_MATCH_PRODUCT 4
+#define INPUT_DEVICE_ID_MATCH_VERSION 8
+
+#define INPUT_DEVICE_ID_MATCH_EVBIT 0x0010
+#define INPUT_DEVICE_ID_MATCH_KEYBIT 0x0020
+#define INPUT_DEVICE_ID_MATCH_RELBIT 0x0040
+#define INPUT_DEVICE_ID_MATCH_ABSBIT 0x0080
+#define INPUT_DEVICE_ID_MATCH_MSCIT 0x0100
+#define INPUT_DEVICE_ID_MATCH_LEDBIT 0x0200
+#define INPUT_DEVICE_ID_MATCH_SNDBIT 0x0400
+#define INPUT_DEVICE_ID_MATCH_FFBIT 0x0800
+#define INPUT_DEVICE_ID_MATCH_SWBIT 0x1000
+
+struct input_device_id {
+
+ kernel_ulong_t flags;
+
+ __u16 bustype;
+ __u16 vendor;
+ __u16 product;
+ __u16 version;
+
+ kernel_ulong_t evbit[INPUT_DEVICE_ID_EV_MAX / BITS_PER_LONG + 1];
+ kernel_ulong_t keybit[INPUT_DEVICE_ID_KEY_MAX / BITS_PER_LONG + 1];
+ kernel_ulong_t relbit[INPUT_DEVICE_ID_REL_MAX / BITS_PER_LONG + 1];
+ kernel_ulong_t absbit[INPUT_DEVICE_ID_ABS_MAX / BITS_PER_LONG + 1];
+ kernel_ulong_t mscbit[INPUT_DEVICE_ID_MSC_MAX / BITS_PER_LONG + 1];
+ kernel_ulong_t ledbit[INPUT_DEVICE_ID_LED_MAX / BITS_PER_LONG + 1];
+ kernel_ulong_t sndbit[INPUT_DEVICE_ID_SND_MAX / BITS_PER_LONG + 1];
+ kernel_ulong_t ffbit[INPUT_DEVICE_ID_FF_MAX / BITS_PER_LONG + 1];
+ kernel_ulong_t swbit[INPUT_DEVICE_ID_SW_MAX / BITS_PER_LONG + 1];
+
+ kernel_ulong_t driver_info;
+};
+
+/* EISA */
+
+#define EISA_SIG_LEN 8
+
+/* The EISA signature, in ASCII form, null terminated */
+struct eisa_device_id {
+ char sig[EISA_SIG_LEN];
+ kernel_ulong_t driver_data;
+};
+
+#define EISA_DEVICE_MODALIAS_FMT "eisa:s%s"
+
+struct parisc_device_id {
+ __u8 hw_type; /* 5 bits used */
+ __u8 hversion_rev; /* 4 bits */
+ __u16 hversion; /* 12 bits */
+ __u32 sversion; /* 20 bits */
+};
+
+#define PA_HWTYPE_ANY_ID 0xff
+#define PA_HVERSION_REV_ANY_ID 0xff
+#define PA_HVERSION_ANY_ID 0xffff
+#define PA_SVERSION_ANY_ID 0xffffffff
+
+/* SDIO */
+
+#define SDIO_ANY_ID (~0)
+
+struct sdio_device_id {
+ __u8 class; /* Standard interface or SDIO_ANY_ID */
+ __u16 vendor; /* Vendor or SDIO_ANY_ID */
+ __u16 device; /* Device ID or SDIO_ANY_ID */
+ kernel_ulong_t driver_data; /* Data private to the driver */
+};
+
+/* SSB core, see drivers/ssb/ */
+struct ssb_device_id {
+ __u16 vendor;
+ __u16 coreid;
+ __u8 revision;
+ __u8 __pad;
+} __attribute__((packed, aligned(2)));
+#define SSB_DEVICE(_vendor, _coreid, _revision) \
+ { .vendor = _vendor, .coreid = _coreid, .revision = _revision, }
+
+#define SSB_ANY_VENDOR 0xFFFF
+#define SSB_ANY_ID 0xFFFF
+#define SSB_ANY_REV 0xFF
+
+/* Broadcom's specific AMBA core, see drivers/bcma/ */
+struct bcma_device_id {
+ __u16 manuf;
+ __u16 id;
+ __u8 rev;
+ __u8 class;
+} __attribute__((packed,aligned(2)));
+#define BCMA_CORE(_manuf, _id, _rev, _class) \
+ { .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, }
+
+#define BCMA_ANY_MANUF 0xFFFF
+#define BCMA_ANY_ID 0xFFFF
+#define BCMA_ANY_REV 0xFF
+#define BCMA_ANY_CLASS 0xFF
+
+struct virtio_device_id {
+ __u32 device;
+ __u32 vendor;
+};
+#define VIRTIO_DEV_ANY_ID 0xffffffff
+
+/*
+ * For Hyper-V devices we use the device guid as the id.
+ */
+struct hv_vmbus_device_id {
+ __u8 guid[16];
+ kernel_ulong_t driver_data; /* Data private to the driver */
+};
+
+/* rpmsg */
+
+#define RPMSG_NAME_SIZE 32
+#define RPMSG_DEVICE_MODALIAS_FMT "rpmsg:%s"
+
+struct rpmsg_device_id {
+ char name[RPMSG_NAME_SIZE];
+};
+
+/* i2c */
+
+#define I2C_NAME_SIZE 20
+#define I2C_MODULE_PREFIX "i2c:"
+
+struct i2c_device_id {
+ char name[I2C_NAME_SIZE];
+ kernel_ulong_t driver_data; /* Data private to the driver */
+};
+
+/* spi */
+
+#define SPI_NAME_SIZE 32
+#define SPI_MODULE_PREFIX "spi:"
+
+struct spi_device_id {
+ char name[SPI_NAME_SIZE];
+ kernel_ulong_t driver_data; /* Data private to the driver */
+};
+
+#define SPMI_NAME_SIZE 32
+#define SPMI_MODULE_PREFIX "spmi:"
+
+struct spmi_device_id {
+ char name[SPMI_NAME_SIZE];
+ kernel_ulong_t driver_data; /* Data private to the driver */
+};
+
+/* dmi */
+enum dmi_field {
+ DMI_NONE,
+ DMI_BIOS_VENDOR,
+ DMI_BIOS_VERSION,
+ DMI_BIOS_DATE,
+ DMI_SYS_VENDOR,
+ DMI_PRODUCT_NAME,
+ DMI_PRODUCT_VERSION,
+ DMI_PRODUCT_SERIAL,
+ DMI_PRODUCT_UUID,
+ DMI_BOARD_VENDOR,
+ DMI_BOARD_NAME,
+ DMI_BOARD_VERSION,
+ DMI_BOARD_SERIAL,
+ DMI_BOARD_ASSET_TAG,
+ DMI_CHASSIS_VENDOR,
+ DMI_CHASSIS_TYPE,
+ DMI_CHASSIS_VERSION,
+ DMI_CHASSIS_SERIAL,
+ DMI_CHASSIS_ASSET_TAG,
+ DMI_STRING_MAX,
+};
+
+struct dmi_strmatch {
+ unsigned char slot:7;
+ unsigned char exact_match:1;
+ char substr[79];
+};
+
+struct dmi_system_id {
+ int (*callback)(const struct dmi_system_id *);
+ const char *ident;
+ struct dmi_strmatch matches[4];
+ void *driver_data;
+};
+/*
+ * struct dmi_device_id appears during expansion of
+ * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
+ * but this is enough for gcc 3.4.6 to error out:
+ * error: storage size of '__mod_dmi_device_table' isn't known
+ */
+#define dmi_device_id dmi_system_id
+
+#define DMI_MATCH(a, b) { .slot = a, .substr = b }
+#define DMI_EXACT_MATCH(a, b) { .slot = a, .substr = b, .exact_match = 1 }
+
+#define PLATFORM_NAME_SIZE 20
+#define PLATFORM_MODULE_PREFIX "platform:"
+
+struct platform_device_id {
+ char name[PLATFORM_NAME_SIZE];
+ kernel_ulong_t driver_data;
+};
+
+#define MDIO_MODULE_PREFIX "mdio:"
+
+#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d"
+#define MDIO_ID_ARGS(_id) \
+ (_id)>>31, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \
+ ((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \
+ ((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \
+ ((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \
+ ((_id)>>15) & 1, ((_id)>>14) & 1, ((_id)>>13) & 1, ((_id)>>12) & 1, \
+ ((_id)>>11) & 1, ((_id)>>10) & 1, ((_id)>>9) & 1, ((_id)>>8) & 1, \
+ ((_id)>>7) & 1, ((_id)>>6) & 1, ((_id)>>5) & 1, ((_id)>>4) & 1, \
+ ((_id)>>3) & 1, ((_id)>>2) & 1, ((_id)>>1) & 1, (_id) & 1
+
+/**
+ * struct mdio_device_id - identifies PHY devices on an MDIO/MII bus
+ * @phy_id: The result of
+ * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&PHYSID2)) & @phy_id_mask
+ * for this PHY type
+ * @phy_id_mask: Defines the significant bits of @phy_id. A value of 0
+ * is used to terminate an array of struct mdio_device_id.
+ */
+struct mdio_device_id {
+ __u32 phy_id;
+ __u32 phy_id_mask;
+};
+
+struct zorro_device_id {
+ __u32 id; /* Device ID or ZORRO_WILDCARD */
+ kernel_ulong_t driver_data; /* Data private to the driver */
+};
+
+#define ZORRO_WILDCARD (0xffffffff) /* not official */
+
+#define ZORRO_DEVICE_MODALIAS_FMT "zorro:i%08X"
+
+#define ISAPNP_ANY_ID 0xffff
+struct isapnp_device_id {
+ unsigned short card_vendor, card_device;
+ unsigned short vendor, function;
+ kernel_ulong_t driver_data; /* data private to the driver */
+};
+
+/**
+ * struct amba_id - identifies a device on an AMBA bus
+ * @id: The significant bits if the hardware device ID
+ * @mask: Bitmask specifying which bits of the id field are significant when
+ * matching. A driver binds to a device when ((hardware device ID) & mask)
+ * == id.
+ * @data: Private data used by the driver.
+ */
+struct amba_id {
+ unsigned int id;
+ unsigned int mask;
+ void *data;
+};
+
+/**
+ * struct mips_cdmm_device_id - identifies devices in MIPS CDMM bus
+ * @type: Device type identifier.
+ */
+struct mips_cdmm_device_id {
+ __u8 type;
+};
+
+/*
+ * Match x86 CPUs for CPU specific drivers.
+ * See documentation of "x86_match_cpu" for details.
+ */
+
+/*
+ * MODULE_DEVICE_TABLE expects this struct to be called x86cpu_device_id.
+ * Although gcc seems to ignore this error, clang fails without this define.
+ */
+#define x86cpu_device_id x86_cpu_id
+struct x86_cpu_id {
+ __u16 vendor;
+ __u16 family;
+ __u16 model;
+ __u16 feature; /* bit index */
+ kernel_ulong_t driver_data;
+};
+
+#define X86_FEATURE_MATCH(x) \
+ { X86_VENDOR_ANY, X86_FAMILY_ANY, X86_MODEL_ANY, x }
+
+#define X86_VENDOR_ANY 0xffff
+#define X86_FAMILY_ANY 0
+#define X86_MODEL_ANY 0
+#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */
+
+/*
+ * Generic table type for matching CPU features.
+ * @feature: the bit number of the feature (0 - 65535)
+ */
+
+struct cpu_feature {
+ __u16 feature;
+};
+
+#define IPACK_ANY_FORMAT 0xff
+#define IPACK_ANY_ID (~0)
+struct ipack_device_id {
+ __u8 format; /* Format version or IPACK_ANY_ID */
+ __u32 vendor; /* Vendor ID or IPACK_ANY_ID */
+ __u32 device; /* Device ID or IPACK_ANY_ID */
+};
+
+#define MEI_CL_MODULE_PREFIX "mei:"
+#define MEI_CL_NAME_SIZE 32
+
+struct mei_cl_device_id {
+ char name[MEI_CL_NAME_SIZE];
+ kernel_ulong_t driver_info;
+};
+
+/* RapidIO */
+
+#define RIO_ANY_ID 0xffff
+
+/**
+ * struct rio_device_id - RIO device identifier
+ * @did: RapidIO device ID
+ * @vid: RapidIO vendor ID
+ * @asm_did: RapidIO assembly device ID
+ * @asm_vid: RapidIO assembly vendor ID
+ *
+ * Identifies a RapidIO device based on both the device/vendor IDs and
+ * the assembly device/vendor IDs.
+ */
+struct rio_device_id {
+ __u16 did, vid;
+ __u16 asm_did, asm_vid;
+};
+
+struct mcb_device_id {
+ __u16 device;
+ kernel_ulong_t driver_data;
+};
+
+#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
new file mode 100644
index 000000000..c883b86ea
--- /dev/null
+++ b/include/linux/module.h
@@ -0,0 +1,658 @@
+#ifndef _LINUX_MODULE_H
+#define _LINUX_MODULE_H
+/*
+ * Dynamic loading of modules into the kernel.
+ *
+ * Rewritten by Richard Henderson <rth@tamu.edu> Dec 1996
+ * Rewritten again by Rusty Russell, 2002
+ */
+#include <linux/list.h>
+#include <linux/stat.h>
+#include <linux/compiler.h>
+#include <linux/cache.h>
+#include <linux/kmod.h>
+#include <linux/elf.h>
+#include <linux/stringify.h>
+#include <linux/kobject.h>
+#include <linux/moduleparam.h>
+#include <linux/jump_label.h>
+#include <linux/export.h>
+
+#include <linux/percpu.h>
+#include <asm/module.h>
+
+/* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
+#define MODULE_SIG_STRING "~Module signature appended~\n"
+
+/* Not Yet Implemented */
+#define MODULE_SUPPORTED_DEVICE(name)
+
+#define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN
+
+struct modversion_info {
+ unsigned long crc;
+ char name[MODULE_NAME_LEN];
+};
+
+struct module;
+
+struct module_kobject {
+ struct kobject kobj;
+ struct module *mod;
+ struct kobject *drivers_dir;
+ struct module_param_attrs *mp;
+ struct completion *kobj_completion;
+};
+
+struct module_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct module_attribute *, struct module_kobject *,
+ char *);
+ ssize_t (*store)(struct module_attribute *, struct module_kobject *,
+ const char *, size_t count);
+ void (*setup)(struct module *, const char *);
+ int (*test)(struct module *);
+ void (*free)(struct module *);
+};
+
+struct module_version_attribute {
+ struct module_attribute mattr;
+ const char *module_name;
+ const char *version;
+} __attribute__ ((__aligned__(sizeof(void *))));
+
+extern ssize_t __modver_version_show(struct module_attribute *,
+ struct module_kobject *, char *);
+
+extern struct module_attribute module_uevent;
+
+/* These are either module local, or the kernel's dummy ones. */
+extern int init_module(void);
+extern void cleanup_module(void);
+
+/* Archs provide a method of finding the correct exception table. */
+struct exception_table_entry;
+
+const struct exception_table_entry *
+search_extable(const struct exception_table_entry *first,
+ const struct exception_table_entry *last,
+ unsigned long value);
+void sort_extable(struct exception_table_entry *start,
+ struct exception_table_entry *finish);
+void sort_main_extable(void);
+void trim_init_extable(struct module *m);
+
+/* Generic info of form tag = "info" */
+#define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info)
+
+/* For userspace: you can also call me... */
+#define MODULE_ALIAS(_alias) MODULE_INFO(alias, _alias)
+
+/* Soft module dependencies. See man modprobe.d for details.
+ * Example: MODULE_SOFTDEP("pre: module-foo module-bar post: module-baz")
+ */
+#define MODULE_SOFTDEP(_softdep) MODULE_INFO(softdep, _softdep)
+
+/*
+ * The following license idents are currently accepted as indicating free
+ * software modules
+ *
+ * "GPL" [GNU Public License v2 or later]
+ * "GPL v2" [GNU Public License v2]
+ * "GPL and additional rights" [GNU Public License v2 rights and more]
+ * "Dual BSD/GPL" [GNU Public License v2
+ * or BSD license choice]
+ * "Dual MIT/GPL" [GNU Public License v2
+ * or MIT license choice]
+ * "Dual MPL/GPL" [GNU Public License v2
+ * or Mozilla license choice]
+ *
+ * The following other idents are available
+ *
+ * "Proprietary" [Non free products]
+ *
+ * There are dual licensed components, but when running with Linux it is the
+ * GPL that is relevant so this is a non issue. Similarly LGPL linked with GPL
+ * is a GPL combined work.
+ *
+ * This exists for several reasons
+ * 1. So modinfo can show license info for users wanting to vet their setup
+ * is free
+ * 2. So the community can ignore bug reports including proprietary modules
+ * 3. So vendors can do likewise based on their own policies
+ */
+#define MODULE_LICENSE(_license) MODULE_INFO(license, _license)
+
+/*
+ * Author(s), use "Name <email>" or just "Name", for multiple
+ * authors use multiple MODULE_AUTHOR() statements/lines.
+ */
+#define MODULE_AUTHOR(_author) MODULE_INFO(author, _author)
+
+/* What your module does. */
+#define MODULE_DESCRIPTION(_description) MODULE_INFO(description, _description)
+
+#ifdef MODULE
+/* Creates an alias so file2alias.c can find device table. */
+#define MODULE_DEVICE_TABLE(type, name) \
+extern const typeof(name) __mod_##type##__##name##_device_table \
+ __attribute__ ((unused, alias(__stringify(name))))
+#else /* !MODULE */
+#define MODULE_DEVICE_TABLE(type, name)
+#endif
+
+/* Version of form [<epoch>:]<version>[-<extra-version>].
+ * Or for CVS/RCS ID version, everything but the number is stripped.
+ * <epoch>: A (small) unsigned integer which allows you to start versions
+ * anew. If not mentioned, it's zero. eg. "2:1.0" is after
+ * "1:2.0".
+
+ * <version>: The <version> may contain only alphanumerics and the
+ * character `.'. Ordered by numeric sort for numeric parts,
+ * ascii sort for ascii parts (as per RPM or DEB algorithm).
+
+ * <extraversion>: Like <version>, but inserted for local
+ * customizations, eg "rh3" or "rusty1".
+
+ * Using this automatically adds a checksum of the .c files and the
+ * local headers in "srcversion".
+ */
+
+#if defined(MODULE) || !defined(CONFIG_SYSFS)
+#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
+#else
+#define MODULE_VERSION(_version) \
+ static struct module_version_attribute ___modver_attr = { \
+ .mattr = { \
+ .attr = { \
+ .name = "version", \
+ .mode = S_IRUGO, \
+ }, \
+ .show = __modver_version_show, \
+ }, \
+ .module_name = KBUILD_MODNAME, \
+ .version = _version, \
+ }; \
+ static const struct module_version_attribute \
+ __used __attribute__ ((__section__ ("__modver"))) \
+ * __moduleparam_const __modver_attr = &___modver_attr
+#endif
+
+/* Optional firmware file (or files) needed by the module
+ * format is simply firmware file name. Multiple firmware
+ * files require multiple MODULE_FIRMWARE() specifiers */
+#define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware)
+
+/* Given an address, look for it in the exception tables */
+const struct exception_table_entry *search_exception_tables(unsigned long add);
+
+struct notifier_block;
+
+#ifdef CONFIG_MODULES
+
+extern int modules_disabled; /* for sysctl */
+/* Get/put a kernel symbol (calls must be symmetric) */
+void *__symbol_get(const char *symbol);
+void *__symbol_get_gpl(const char *symbol);
+#define symbol_get(x) ((typeof(&x))(__symbol_get(VMLINUX_SYMBOL_STR(x))))
+
+/* modules using other modules: kdb wants to see this. */
+struct module_use {
+ struct list_head source_list;
+ struct list_head target_list;
+ struct module *source, *target;
+};
+
+enum module_state {
+ MODULE_STATE_LIVE, /* Normal state. */
+ MODULE_STATE_COMING, /* Full formed, running module_init. */
+ MODULE_STATE_GOING, /* Going away. */
+ MODULE_STATE_UNFORMED, /* Still setting it up. */
+};
+
+struct module {
+ enum module_state state;
+
+ /* Member of list of modules */
+ struct list_head list;
+
+ /* Unique handle for this module */
+ char name[MODULE_NAME_LEN];
+
+ /* Sysfs stuff. */
+ struct module_kobject mkobj;
+ struct module_attribute *modinfo_attrs;
+ const char *version;
+ const char *srcversion;
+ struct kobject *holders_dir;
+
+ /* Exported symbols */
+ const struct kernel_symbol *syms;
+ const unsigned long *crcs;
+ unsigned int num_syms;
+
+ /* Kernel parameters. */
+ struct kernel_param *kp;
+ unsigned int num_kp;
+
+ /* GPL-only exported symbols. */
+ unsigned int num_gpl_syms;
+ const struct kernel_symbol *gpl_syms;
+ const unsigned long *gpl_crcs;
+
+#ifdef CONFIG_UNUSED_SYMBOLS
+ /* unused exported symbols. */
+ const struct kernel_symbol *unused_syms;
+ const unsigned long *unused_crcs;
+ unsigned int num_unused_syms;
+
+ /* GPL-only, unused exported symbols. */
+ unsigned int num_unused_gpl_syms;
+ const struct kernel_symbol *unused_gpl_syms;
+ const unsigned long *unused_gpl_crcs;
+#endif
+
+#ifdef CONFIG_MODULE_SIG
+ /* Signature was verified. */
+ bool sig_ok;
+#endif
+
+ /* symbols that will be GPL-only in the near future. */
+ const struct kernel_symbol *gpl_future_syms;
+ const unsigned long *gpl_future_crcs;
+ unsigned int num_gpl_future_syms;
+
+ /* Exception table */
+ unsigned int num_exentries;
+ struct exception_table_entry *extable;
+
+ /* Startup function. */
+ int (*init)(void);
+
+ /* If this is non-NULL, vfree after init() returns */
+ void *module_init;
+
+ /* Here is the actual code + data, vfree'd on unload. */
+ void *module_core;
+
+ /* Here are the sizes of the init and core sections */
+ unsigned int init_size, core_size;
+
+ /* The size of the executable code in each section. */
+ unsigned int init_text_size, core_text_size;
+
+ /* Size of RO sections of the module (text+rodata) */
+ unsigned int init_ro_size, core_ro_size;
+
+ /* Arch-specific module values */
+ struct mod_arch_specific arch;
+
+ unsigned int taints; /* same bits as kernel:tainted */
+
+#ifdef CONFIG_GENERIC_BUG
+ /* Support for BUG */
+ unsigned num_bugs;
+ struct list_head bug_list;
+ struct bug_entry *bug_table;
+#endif
+
+#ifdef CONFIG_KALLSYMS
+ /*
+ * We keep the symbol and string tables for kallsyms.
+ * The core_* fields below are temporary, loader-only (they
+ * could really be discarded after module init).
+ */
+ Elf_Sym *symtab, *core_symtab;
+ unsigned int num_symtab, core_num_syms;
+ char *strtab, *core_strtab;
+
+ /* Section attributes */
+ struct module_sect_attrs *sect_attrs;
+
+ /* Notes attributes */
+ struct module_notes_attrs *notes_attrs;
+#endif
+
+ /* The command line arguments (may be mangled). People like
+ keeping pointers to this stuff */
+ char *args;
+
+#ifdef CONFIG_SMP
+ /* Per-cpu data. */
+ void __percpu *percpu;
+ unsigned int percpu_size;
+#endif
+
+#ifdef CONFIG_TRACEPOINTS
+ unsigned int num_tracepoints;
+ struct tracepoint * const *tracepoints_ptrs;
+#endif
+#ifdef HAVE_JUMP_LABEL
+ struct jump_entry *jump_entries;
+ unsigned int num_jump_entries;
+#endif
+#ifdef CONFIG_TRACING
+ unsigned int num_trace_bprintk_fmt;
+ const char **trace_bprintk_fmt_start;
+#endif
+#ifdef CONFIG_EVENT_TRACING
+ struct ftrace_event_call **trace_events;
+ unsigned int num_trace_events;
+ struct trace_enum_map **trace_enums;
+ unsigned int num_trace_enums;
+#endif
+#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+ unsigned int num_ftrace_callsites;
+ unsigned long *ftrace_callsites;
+#endif
+
+#ifdef CONFIG_LIVEPATCH
+ bool klp_alive;
+#endif
+
+#ifdef CONFIG_MODULE_UNLOAD
+ /* What modules depend on me? */
+ struct list_head source_list;
+ /* What modules do I depend on? */
+ struct list_head target_list;
+
+ /* Destruction function. */
+ void (*exit)(void);
+
+ atomic_t refcnt;
+#endif
+
+#ifdef CONFIG_CONSTRUCTORS
+ /* Constructor functions. */
+ ctor_fn_t *ctors;
+ unsigned int num_ctors;
+#endif
+};
+#ifndef MODULE_ARCH_INIT
+#define MODULE_ARCH_INIT {}
+#endif
+
+extern struct mutex module_mutex;
+
+/* FIXME: It'd be nice to isolate modules during init, too, so they
+ aren't used before they (may) fail. But presently too much code
+ (IDE & SCSI) require entry into the module during init.*/
+static inline int module_is_live(struct module *mod)
+{
+ return mod->state != MODULE_STATE_GOING;
+}
+
+struct module *__module_text_address(unsigned long addr);
+struct module *__module_address(unsigned long addr);
+bool is_module_address(unsigned long addr);
+bool is_module_percpu_address(unsigned long addr);
+bool is_module_text_address(unsigned long addr);
+
+static inline bool within_module_core(unsigned long addr,
+ const struct module *mod)
+{
+ return (unsigned long)mod->module_core <= addr &&
+ addr < (unsigned long)mod->module_core + mod->core_size;
+}
+
+static inline bool within_module_init(unsigned long addr,
+ const struct module *mod)
+{
+ return (unsigned long)mod->module_init <= addr &&
+ addr < (unsigned long)mod->module_init + mod->init_size;
+}
+
+static inline bool within_module(unsigned long addr, const struct module *mod)
+{
+ return within_module_init(addr, mod) || within_module_core(addr, mod);
+}
+
+/* Search for module by name: must hold module_mutex. */
+struct module *find_module(const char *name);
+
+struct symsearch {
+ const struct kernel_symbol *start, *stop;
+ const unsigned long *crcs;
+ enum {
+ NOT_GPL_ONLY,
+ GPL_ONLY,
+ WILL_BE_GPL_ONLY,
+ } licence;
+ bool unused;
+};
+
+/* Search for an exported symbol by name. */
+const struct kernel_symbol *find_symbol(const char *name,
+ struct module **owner,
+ const unsigned long **crc,
+ bool gplok,
+ bool warn);
+
+/* Walk the exported symbol table */
+bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
+ struct module *owner,
+ void *data), void *data);
+
+/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
+ symnum out of range. */
+int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+ char *name, char *module_name, int *exported);
+
+/* Look for this name: can be of form module:name. */
+unsigned long module_kallsyms_lookup_name(const char *name);
+
+int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ struct module *, unsigned long),
+ void *data);
+
+extern void __module_put_and_exit(struct module *mod, long code)
+ __attribute__((noreturn));
+#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code)
+
+#ifdef CONFIG_MODULE_UNLOAD
+int module_refcount(struct module *mod);
+void __symbol_put(const char *symbol);
+#define symbol_put(x) __symbol_put(VMLINUX_SYMBOL_STR(x))
+void symbol_put_addr(void *addr);
+
+/* Sometimes we know we already have a refcount, and it's easier not
+ to handle the error case (which only happens with rmmod --wait). */
+extern void __module_get(struct module *module);
+
+/* This is the Right Way to get a module: if it fails, it's being removed,
+ * so pretend it's not there. */
+extern bool try_module_get(struct module *module);
+
+extern void module_put(struct module *module);
+
+#else /*!CONFIG_MODULE_UNLOAD*/
+static inline int try_module_get(struct module *module)
+{
+ return !module || module_is_live(module);
+}
+static inline void module_put(struct module *module)
+{
+}
+static inline void __module_get(struct module *module)
+{
+}
+#define symbol_put(x) do { } while (0)
+#define symbol_put_addr(p) do { } while (0)
+
+#endif /* CONFIG_MODULE_UNLOAD */
+int ref_module(struct module *a, struct module *b);
+
+/* This is a #define so the string doesn't get put in every .o file */
+#define module_name(mod) \
+({ \
+ struct module *__mod = (mod); \
+ __mod ? __mod->name : "kernel"; \
+})
+
+/* For kallsyms to ask for address resolution. namebuf should be at
+ * least KSYM_NAME_LEN long: a pointer to namebuf is returned if
+ * found, otherwise NULL. */
+const char *module_address_lookup(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset,
+ char **modname,
+ char *namebuf);
+int lookup_module_symbol_name(unsigned long addr, char *symname);
+int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
+
+/* For extable.c to search modules' exception tables. */
+const struct exception_table_entry *search_module_extables(unsigned long addr);
+
+int register_module_notifier(struct notifier_block *nb);
+int unregister_module_notifier(struct notifier_block *nb);
+
+extern void print_modules(void);
+
+#else /* !CONFIG_MODULES... */
+
+/* Given an address, look for it in the exception tables. */
+static inline const struct exception_table_entry *
+search_module_extables(unsigned long addr)
+{
+ return NULL;
+}
+
+static inline struct module *__module_address(unsigned long addr)
+{
+ return NULL;
+}
+
+static inline struct module *__module_text_address(unsigned long addr)
+{
+ return NULL;
+}
+
+static inline bool is_module_address(unsigned long addr)
+{
+ return false;
+}
+
+static inline bool is_module_percpu_address(unsigned long addr)
+{
+ return false;
+}
+
+static inline bool is_module_text_address(unsigned long addr)
+{
+ return false;
+}
+
+/* Get/put a kernel symbol (calls should be symmetric) */
+#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); })
+#define symbol_put(x) do { } while (0)
+#define symbol_put_addr(x) do { } while (0)
+
+static inline void __module_get(struct module *module)
+{
+}
+
+static inline int try_module_get(struct module *module)
+{
+ return 1;
+}
+
+static inline void module_put(struct module *module)
+{
+}
+
+#define module_name(mod) "kernel"
+
+/* For kallsyms to ask for address resolution. NULL means not found. */
+static inline const char *module_address_lookup(unsigned long addr,
+ unsigned long *symbolsize,
+ unsigned long *offset,
+ char **modname,
+ char *namebuf)
+{
+ return NULL;
+}
+
+static inline int lookup_module_symbol_name(unsigned long addr, char *symname)
+{
+ return -ERANGE;
+}
+
+static inline int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name)
+{
+ return -ERANGE;
+}
+
+static inline int module_get_kallsym(unsigned int symnum, unsigned long *value,
+ char *type, char *name,
+ char *module_name, int *exported)
+{
+ return -ERANGE;
+}
+
+static inline unsigned long module_kallsyms_lookup_name(const char *name)
+{
+ return 0;
+}
+
+static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ struct module *,
+ unsigned long),
+ void *data)
+{
+ return 0;
+}
+
+static inline int register_module_notifier(struct notifier_block *nb)
+{
+ /* no events will happen anyway, so this can always succeed */
+ return 0;
+}
+
+static inline int unregister_module_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+#define module_put_and_exit(code) do_exit(code)
+
+static inline void print_modules(void)
+{
+}
+#endif /* CONFIG_MODULES */
+
+#ifdef CONFIG_SYSFS
+extern struct kset *module_kset;
+extern struct kobj_type module_ktype;
+extern int module_sysfs_initialized;
+#endif /* CONFIG_SYSFS */
+
+#define symbol_request(x) try_then_request_module(symbol_get(x), "symbol:" #x)
+
+/* BELOW HERE ALL THESE ARE OBSOLETE AND WILL VANISH */
+
+#define __MODULE_STRING(x) __stringify(x)
+
+#ifdef CONFIG_DEBUG_SET_MODULE_RONX
+extern void set_all_modules_text_rw(void);
+extern void set_all_modules_text_ro(void);
+#else
+static inline void set_all_modules_text_rw(void) { }
+static inline void set_all_modules_text_ro(void) { }
+#endif
+
+#ifdef CONFIG_GENERIC_BUG
+void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
+ struct module *);
+void module_bug_cleanup(struct module *);
+
+#else /* !CONFIG_GENERIC_BUG */
+
+static inline void module_bug_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *mod)
+{
+}
+static inline void module_bug_cleanup(struct module *mod) {}
+#endif /* CONFIG_GENERIC_BUG */
+
+#endif /* _LINUX_MODULE_H */
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
new file mode 100644
index 000000000..4d0cb9bba
--- /dev/null
+++ b/include/linux/moduleloader.h
@@ -0,0 +1,95 @@
+#ifndef _LINUX_MODULELOADER_H
+#define _LINUX_MODULELOADER_H
+/* The stuff needed for archs to support modules. */
+
+#include <linux/module.h>
+#include <linux/elf.h>
+
+/* These may be implemented by architectures that need to hook into the
+ * module loader code. Architectures that don't need to do anything special
+ * can just rely on the 'weak' default hooks defined in kernel/module.c.
+ * Note, however, that at least one of apply_relocate or apply_relocate_add
+ * must be implemented by each architecture.
+ */
+
+/* Adjust arch-specific sections. Return 0 on success. */
+int module_frob_arch_sections(Elf_Ehdr *hdr,
+ Elf_Shdr *sechdrs,
+ char *secstrings,
+ struct module *mod);
+
+/* Additional bytes needed by arch in front of individual sections */
+unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
+
+/* Allocator used for allocating struct module, core sections and init
+ sections. Returns NULL on failure. */
+void *module_alloc(unsigned long size);
+
+/* Free memory returned from module_alloc. */
+void module_memfree(void *module_region);
+
+/*
+ * Apply the given relocation to the (simplified) ELF. Return -error
+ * or 0.
+ */
+#ifdef CONFIG_MODULES_USE_ELF_REL
+int apply_relocate(Elf_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *mod);
+#else
+static inline int apply_relocate(Elf_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ printk(KERN_ERR "module %s: REL relocation unsupported\n",
+ module_name(me));
+ return -ENOEXEC;
+}
+#endif
+
+/*
+ * Apply the given add relocation to the (simplified) ELF. Return
+ * -error or 0
+ */
+#ifdef CONFIG_MODULES_USE_ELF_RELA
+int apply_relocate_add(Elf_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *mod);
+#else
+static inline int apply_relocate_add(Elf_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ printk(KERN_ERR "module %s: REL relocation unsupported\n",
+ module_name(me));
+ return -ENOEXEC;
+}
+#endif
+
+/* Any final processing of module before access. Return -error or 0. */
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *mod);
+
+/* Any cleanup needed when module leaves. */
+void module_arch_cleanup(struct module *mod);
+
+/* Any cleanup before freeing mod->module_init */
+void module_arch_freeing_init(struct module *mod);
+
+#ifdef CONFIG_KASAN
+#include <linux/kasan.h>
+#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#else
+#define MODULE_ALIGN PAGE_SIZE
+#endif
+
+#endif
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
new file mode 100644
index 000000000..1c9effa25
--- /dev/null
+++ b/include/linux/moduleparam.h
@@ -0,0 +1,508 @@
+#ifndef _LINUX_MODULE_PARAMS_H
+#define _LINUX_MODULE_PARAMS_H
+/* (C) Copyright 2001, 2002 Rusty Russell IBM Corporation */
+#include <linux/init.h>
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+
+/* You can override this manually, but generally this should match the
+ module name. */
+#ifdef MODULE
+#define MODULE_PARAM_PREFIX /* empty */
+#else
+#define MODULE_PARAM_PREFIX KBUILD_MODNAME "."
+#endif
+
+/* Chosen so that structs with an unsigned long line up. */
+#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
+
+#ifdef MODULE
+#define __MODULE_INFO(tag, name, info) \
+static const char __UNIQUE_ID(name)[] \
+ __used __attribute__((section(".modinfo"), unused, aligned(1))) \
+ = __stringify(tag) "=" info
+#else /* !MODULE */
+/* This struct is here for syntactic coherency, it is not used */
+#define __MODULE_INFO(tag, name, info) \
+ struct __UNIQUE_ID(name) {}
+#endif
+#define __MODULE_PARM_TYPE(name, _type) \
+ __MODULE_INFO(parmtype, name##type, #name ":" _type)
+
+/* One for each parameter, describing how to use it. Some files do
+ multiple of these per line, so can't just use MODULE_INFO. */
+#define MODULE_PARM_DESC(_parm, desc) \
+ __MODULE_INFO(parm, _parm, #_parm ":" desc)
+
+struct kernel_param;
+
+/*
+ * Flags available for kernel_param_ops
+ *
+ * NOARG - the parameter allows for no argument (foo instead of foo=1)
+ */
+enum {
+ KERNEL_PARAM_OPS_FL_NOARG = (1 << 0)
+};
+
+struct kernel_param_ops {
+ /* How the ops should behave */
+ unsigned int flags;
+ /* Returns 0, or -errno. arg is in kp->arg. */
+ int (*set)(const char *val, const struct kernel_param *kp);
+ /* Returns length written or -errno. Buffer is 4k (ie. be short!) */
+ int (*get)(char *buffer, const struct kernel_param *kp);
+ /* Optional function to free kp->arg when module unloaded. */
+ void (*free)(void *arg);
+};
+
+/*
+ * Flags available for kernel_param
+ *
+ * UNSAFE - the parameter is dangerous and setting it will taint the kernel
+ */
+enum {
+ KERNEL_PARAM_FL_UNSAFE = (1 << 0)
+};
+
+struct kernel_param {
+ const char *name;
+ const struct kernel_param_ops *ops;
+ u16 perm;
+ s8 level;
+ u8 flags;
+ union {
+ void *arg;
+ const struct kparam_string *str;
+ const struct kparam_array *arr;
+ };
+};
+
+extern const struct kernel_param __start___param[], __stop___param[];
+
+/* Special one for strings we want to copy into */
+struct kparam_string {
+ unsigned int maxlen;
+ char *string;
+};
+
+/* Special one for arrays */
+struct kparam_array
+{
+ unsigned int max;
+ unsigned int elemsize;
+ unsigned int *num;
+ const struct kernel_param_ops *ops;
+ void *elem;
+};
+
+/**
+ * module_param - typesafe helper for a module/cmdline parameter
+ * @value: the variable to alter, and exposed parameter name.
+ * @type: the type of the parameter
+ * @perm: visibility in sysfs.
+ *
+ * @value becomes the module parameter, or (prefixed by KBUILD_MODNAME and a
+ * ".") the kernel commandline parameter. Note that - is changed to _, so
+ * the user can use "foo-bar=1" even for variable "foo_bar".
+ *
+ * @perm is 0 if the the variable is not to appear in sysfs, or 0444
+ * for world-readable, 0644 for root-writable, etc. Note that if it
+ * is writable, you may need to use kparam_block_sysfs_write() around
+ * accesses (esp. charp, which can be kfreed when it changes).
+ *
+ * The @type is simply pasted to refer to a param_ops_##type and a
+ * param_check_##type: for convenience many standard types are provided but
+ * you can create your own by defining those variables.
+ *
+ * Standard types are:
+ * byte, short, ushort, int, uint, long, ulong
+ * charp: a character pointer
+ * bool: a bool, values 0/1, y/n, Y/N.
+ * invbool: the above, only sense-reversed (N = true).
+ */
+#define module_param(name, type, perm) \
+ module_param_named(name, name, type, perm)
+
+/**
+ * module_param_unsafe - same as module_param but taints kernel
+ */
+#define module_param_unsafe(name, type, perm) \
+ module_param_named_unsafe(name, name, type, perm)
+
+/**
+ * module_param_named - typesafe helper for a renamed module/cmdline parameter
+ * @name: a valid C identifier which is the parameter name.
+ * @value: the actual lvalue to alter.
+ * @type: the type of the parameter
+ * @perm: visibility in sysfs.
+ *
+ * Usually it's a good idea to have variable names and user-exposed names the
+ * same, but that's harder if the variable must be non-static or is inside a
+ * structure. This allows exposure under a different name.
+ */
+#define module_param_named(name, value, type, perm) \
+ param_check_##type(name, &(value)); \
+ module_param_cb(name, &param_ops_##type, &value, perm); \
+ __MODULE_PARM_TYPE(name, #type)
+
+/**
+ * module_param_named_unsafe - same as module_param_named but taints kernel
+ */
+#define module_param_named_unsafe(name, value, type, perm) \
+ param_check_##type(name, &(value)); \
+ module_param_cb_unsafe(name, &param_ops_##type, &value, perm); \
+ __MODULE_PARM_TYPE(name, #type)
+
+/**
+ * module_param_cb - general callback for a module/cmdline parameter
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
+#define module_param_cb(name, ops, arg, perm) \
+ __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1, 0)
+
+#define module_param_cb_unsafe(name, ops, arg, perm) \
+ __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1, \
+ KERNEL_PARAM_FL_UNSAFE)
+
+/**
+ * <level>_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before certain initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
+#define __level_param_cb(name, ops, arg, perm, level) \
+ __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level, 0)
+
+#define core_param_cb(name, ops, arg, perm) \
+ __level_param_cb(name, ops, arg, perm, 1)
+
+#define postcore_param_cb(name, ops, arg, perm) \
+ __level_param_cb(name, ops, arg, perm, 2)
+
+#define arch_param_cb(name, ops, arg, perm) \
+ __level_param_cb(name, ops, arg, perm, 3)
+
+#define subsys_param_cb(name, ops, arg, perm) \
+ __level_param_cb(name, ops, arg, perm, 4)
+
+#define fs_param_cb(name, ops, arg, perm) \
+ __level_param_cb(name, ops, arg, perm, 5)
+
+#define device_param_cb(name, ops, arg, perm) \
+ __level_param_cb(name, ops, arg, perm, 6)
+
+#define late_param_cb(name, ops, arg, perm) \
+ __level_param_cb(name, ops, arg, perm, 7)
+
+/* On alpha, ia64 and ppc64 relocations to global data cannot go into
+ read-only sections (which is part of respective UNIX ABI on these
+ platforms). So 'const' makes no sense and even causes compile failures
+ with some compilers. */
+#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64)
+#define __moduleparam_const
+#else
+#define __moduleparam_const const
+#endif
+
+/* This is the fundamental function for registering boot/module
+ parameters. */
+#define __module_param_call(prefix, name, ops, arg, perm, level, flags) \
+ /* Default value instead of permissions? */ \
+ static const char __param_str_##name[] = prefix #name; \
+ static struct kernel_param __moduleparam_const __param_##name \
+ __used \
+ __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
+ = { __param_str_##name, ops, VERIFY_OCTAL_PERMISSIONS(perm), \
+ level, flags, { arg } }
+
+/* Obsolete - use module_param_cb() */
+#define module_param_call(name, set, get, arg, perm) \
+ static struct kernel_param_ops __param_ops_##name = \
+ { .flags = 0, (void *)set, (void *)get }; \
+ __module_param_call(MODULE_PARAM_PREFIX, \
+ name, &__param_ops_##name, arg, \
+ (perm) + sizeof(__check_old_set_param(set))*0, -1, 0)
+
+/* We don't get oldget: it's often a new-style param_get_uint, etc. */
+static inline int
+__check_old_set_param(int (*oldset)(const char *, struct kernel_param *))
+{
+ return 0;
+}
+
+/**
+ * kparam_block_sysfs_write - make sure a parameter isn't written via sysfs.
+ * @name: the name of the parameter
+ *
+ * There's no point blocking write on a paramter that isn't writable via sysfs!
+ */
+#define kparam_block_sysfs_write(name) \
+ do { \
+ BUG_ON(!(__param_##name.perm & 0222)); \
+ __kernel_param_lock(); \
+ } while (0)
+
+/**
+ * kparam_unblock_sysfs_write - allows sysfs to write to a parameter again.
+ * @name: the name of the parameter
+ */
+#define kparam_unblock_sysfs_write(name) \
+ do { \
+ BUG_ON(!(__param_##name.perm & 0222)); \
+ __kernel_param_unlock(); \
+ } while (0)
+
+/**
+ * kparam_block_sysfs_read - make sure a parameter isn't read via sysfs.
+ * @name: the name of the parameter
+ *
+ * This also blocks sysfs writes.
+ */
+#define kparam_block_sysfs_read(name) \
+ do { \
+ BUG_ON(!(__param_##name.perm & 0444)); \
+ __kernel_param_lock(); \
+ } while (0)
+
+/**
+ * kparam_unblock_sysfs_read - allows sysfs to read a parameter again.
+ * @name: the name of the parameter
+ */
+#define kparam_unblock_sysfs_read(name) \
+ do { \
+ BUG_ON(!(__param_##name.perm & 0444)); \
+ __kernel_param_unlock(); \
+ } while (0)
+
+#ifdef CONFIG_SYSFS
+extern void __kernel_param_lock(void);
+extern void __kernel_param_unlock(void);
+#else
+static inline void __kernel_param_lock(void)
+{
+}
+static inline void __kernel_param_unlock(void)
+{
+}
+#endif
+
+#ifndef MODULE
+/**
+ * core_param - define a historical core kernel parameter.
+ * @name: the name of the cmdline and sysfs parameter (often the same as var)
+ * @var: the variable
+ * @type: the type of the parameter
+ * @perm: visibility in sysfs
+ *
+ * core_param is just like module_param(), but cannot be modular and
+ * doesn't add a prefix (such as "printk."). This is for compatibility
+ * with __setup(), and it makes sense as truly core parameters aren't
+ * tied to the particular file they're in.
+ */
+#define core_param(name, var, type, perm) \
+ param_check_##type(name, &(var)); \
+ __module_param_call("", name, &param_ops_##type, &var, perm, -1, 0)
+#endif /* !MODULE */
+
+/**
+ * module_param_string - a char array parameter
+ * @name: the name of the parameter
+ * @string: the string variable
+ * @len: the maximum length of the string, incl. terminator
+ * @perm: visibility in sysfs.
+ *
+ * This actually copies the string when it's set (unlike type charp).
+ * @len is usually just sizeof(string).
+ */
+#define module_param_string(name, string, len, perm) \
+ static const struct kparam_string __param_string_##name \
+ = { len, string }; \
+ __module_param_call(MODULE_PARAM_PREFIX, name, \
+ &param_ops_string, \
+ .str = &__param_string_##name, perm, -1, 0);\
+ __MODULE_PARM_TYPE(name, "string")
+
+/**
+ * parameq - checks if two parameter names match
+ * @name1: parameter name 1
+ * @name2: parameter name 2
+ *
+ * Returns true if the two parameter names are equal.
+ * Dashes (-) are considered equal to underscores (_).
+ */
+extern bool parameq(const char *name1, const char *name2);
+
+/**
+ * parameqn - checks if two parameter names match
+ * @name1: parameter name 1
+ * @name2: parameter name 2
+ * @n: the length to compare
+ *
+ * Similar to parameq(), except it compares @n characters.
+ */
+extern bool parameqn(const char *name1, const char *name2, size_t n);
+
+/* Called on module insert or kernel boot */
+extern char *parse_args(const char *name,
+ char *args,
+ const struct kernel_param *params,
+ unsigned num,
+ s16 level_min,
+ s16 level_max,
+ int (*unknown)(char *param, char *val,
+ const char *doing));
+
+/* Called by module remove. */
+#ifdef CONFIG_SYSFS
+extern void destroy_params(const struct kernel_param *params, unsigned num);
+#else
+static inline void destroy_params(const struct kernel_param *params,
+ unsigned num)
+{
+}
+#endif /* !CONFIG_SYSFS */
+
+/* All the helper functions */
+/* The macros to do compile-time type checking stolen from Jakub
+ Jelinek, who IIRC came up with this idea for the 2.4 module init code. */
+#define __param_check(name, p, type) \
+ static inline type __always_unused *__check_##name(void) { return(p); }
+
+extern struct kernel_param_ops param_ops_byte;
+extern int param_set_byte(const char *val, const struct kernel_param *kp);
+extern int param_get_byte(char *buffer, const struct kernel_param *kp);
+#define param_check_byte(name, p) __param_check(name, p, unsigned char)
+
+extern struct kernel_param_ops param_ops_short;
+extern int param_set_short(const char *val, const struct kernel_param *kp);
+extern int param_get_short(char *buffer, const struct kernel_param *kp);
+#define param_check_short(name, p) __param_check(name, p, short)
+
+extern struct kernel_param_ops param_ops_ushort;
+extern int param_set_ushort(const char *val, const struct kernel_param *kp);
+extern int param_get_ushort(char *buffer, const struct kernel_param *kp);
+#define param_check_ushort(name, p) __param_check(name, p, unsigned short)
+
+extern struct kernel_param_ops param_ops_int;
+extern int param_set_int(const char *val, const struct kernel_param *kp);
+extern int param_get_int(char *buffer, const struct kernel_param *kp);
+#define param_check_int(name, p) __param_check(name, p, int)
+
+extern struct kernel_param_ops param_ops_uint;
+extern int param_set_uint(const char *val, const struct kernel_param *kp);
+extern int param_get_uint(char *buffer, const struct kernel_param *kp);
+#define param_check_uint(name, p) __param_check(name, p, unsigned int)
+
+extern struct kernel_param_ops param_ops_long;
+extern int param_set_long(const char *val, const struct kernel_param *kp);
+extern int param_get_long(char *buffer, const struct kernel_param *kp);
+#define param_check_long(name, p) __param_check(name, p, long)
+
+extern struct kernel_param_ops param_ops_ulong;
+extern int param_set_ulong(const char *val, const struct kernel_param *kp);
+extern int param_get_ulong(char *buffer, const struct kernel_param *kp);
+#define param_check_ulong(name, p) __param_check(name, p, unsigned long)
+
+extern struct kernel_param_ops param_ops_ullong;
+extern int param_set_ullong(const char *val, const struct kernel_param *kp);
+extern int param_get_ullong(char *buffer, const struct kernel_param *kp);
+#define param_check_ullong(name, p) __param_check(name, p, unsigned long long)
+
+extern struct kernel_param_ops param_ops_charp;
+extern int param_set_charp(const char *val, const struct kernel_param *kp);
+extern int param_get_charp(char *buffer, const struct kernel_param *kp);
+#define param_check_charp(name, p) __param_check(name, p, char *)
+
+/* We used to allow int as well as bool. We're taking that away! */
+extern struct kernel_param_ops param_ops_bool;
+extern int param_set_bool(const char *val, const struct kernel_param *kp);
+extern int param_get_bool(char *buffer, const struct kernel_param *kp);
+#define param_check_bool(name, p) __param_check(name, p, bool)
+
+extern struct kernel_param_ops param_ops_invbool;
+extern int param_set_invbool(const char *val, const struct kernel_param *kp);
+extern int param_get_invbool(char *buffer, const struct kernel_param *kp);
+#define param_check_invbool(name, p) __param_check(name, p, bool)
+
+/* An int, which can only be set like a bool (though it shows as an int). */
+extern struct kernel_param_ops param_ops_bint;
+extern int param_set_bint(const char *val, const struct kernel_param *kp);
+#define param_get_bint param_get_int
+#define param_check_bint param_check_int
+
+/**
+ * module_param_array - a parameter which is an array of some type
+ * @name: the name of the array variable
+ * @type: the type, as per module_param()
+ * @nump: optional pointer filled in with the number written
+ * @perm: visibility in sysfs
+ *
+ * Input and output are as comma-separated values. Commas inside values
+ * don't work properly (eg. an array of charp).
+ *
+ * ARRAY_SIZE(@name) is used to determine the number of elements in the
+ * array, so the definition must be visible.
+ */
+#define module_param_array(name, type, nump, perm) \
+ module_param_array_named(name, name, type, nump, perm)
+
+/**
+ * module_param_array_named - renamed parameter which is an array of some type
+ * @name: a valid C identifier which is the parameter name
+ * @array: the name of the array variable
+ * @type: the type, as per module_param()
+ * @nump: optional pointer filled in with the number written
+ * @perm: visibility in sysfs
+ *
+ * This exposes a different name than the actual variable name. See
+ * module_param_named() for why this might be necessary.
+ */
+#define module_param_array_named(name, array, type, nump, perm) \
+ param_check_##type(name, &(array)[0]); \
+ static const struct kparam_array __param_arr_##name \
+ = { .max = ARRAY_SIZE(array), .num = nump, \
+ .ops = &param_ops_##type, \
+ .elemsize = sizeof(array[0]), .elem = array }; \
+ __module_param_call(MODULE_PARAM_PREFIX, name, \
+ &param_array_ops, \
+ .arr = &__param_arr_##name, \
+ perm, -1, 0); \
+ __MODULE_PARM_TYPE(name, "array of " #type)
+
+extern struct kernel_param_ops param_array_ops;
+
+extern struct kernel_param_ops param_ops_string;
+extern int param_set_copystring(const char *val, const struct kernel_param *);
+extern int param_get_string(char *buffer, const struct kernel_param *kp);
+
+/* for exporting parameters in /sys/module/.../parameters */
+
+struct module;
+
+#if defined(CONFIG_SYSFS) && defined(CONFIG_MODULES)
+extern int module_param_sysfs_setup(struct module *mod,
+ const struct kernel_param *kparam,
+ unsigned int num_params);
+
+extern void module_param_sysfs_remove(struct module *mod);
+#else
+static inline int module_param_sysfs_setup(struct module *mod,
+ const struct kernel_param *kparam,
+ unsigned int num_params)
+{
+ return 0;
+}
+
+static inline void module_param_sysfs_remove(struct module *mod)
+{ }
+#endif
+
+#endif /* _LINUX_MODULE_PARAMS_H */
diff --git a/include/linux/mount.h b/include/linux/mount.h
new file mode 100644
index 000000000..f822c3c11
--- /dev/null
+++ b/include/linux/mount.h
@@ -0,0 +1,98 @@
+/*
+ *
+ * Definitions for mount interface. This describes the in the kernel build
+ * linkedlist with mounted filesystems.
+ *
+ * Author: Marco van Wieringen <mvw@planets.elm.net>
+ *
+ */
+#ifndef _LINUX_MOUNT_H
+#define _LINUX_MOUNT_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/nodemask.h>
+#include <linux/spinlock.h>
+#include <linux/seqlock.h>
+#include <linux/atomic.h>
+
+struct super_block;
+struct vfsmount;
+struct dentry;
+struct mnt_namespace;
+
+#define MNT_NOSUID 0x01
+#define MNT_NODEV 0x02
+#define MNT_NOEXEC 0x04
+#define MNT_NOATIME 0x08
+#define MNT_NODIRATIME 0x10
+#define MNT_RELATIME 0x20
+#define MNT_READONLY 0x40 /* does the user want this to be r/o? */
+
+#define MNT_SHRINKABLE 0x100
+#define MNT_WRITE_HOLD 0x200
+
+#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */
+#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */
+/*
+ * MNT_SHARED_MASK is the set of flags that should be cleared when a
+ * mount becomes shared. Currently, this is only the flag that says a
+ * mount cannot be bind mounted, since this is how we create a mount
+ * that shares events with another mount. If you add a new MNT_*
+ * flag, consider how it interacts with shared mounts.
+ */
+#define MNT_SHARED_MASK (MNT_UNBINDABLE)
+#define MNT_USER_SETTABLE_MASK (MNT_NOSUID | MNT_NODEV | MNT_NOEXEC \
+ | MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME \
+ | MNT_READONLY)
+#define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME )
+
+#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
+ MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED)
+
+#define MNT_INTERNAL 0x4000
+
+#define MNT_LOCK_ATIME 0x040000
+#define MNT_LOCK_NOEXEC 0x080000
+#define MNT_LOCK_NOSUID 0x100000
+#define MNT_LOCK_NODEV 0x200000
+#define MNT_LOCK_READONLY 0x400000
+#define MNT_LOCKED 0x800000
+#define MNT_DOOMED 0x1000000
+#define MNT_SYNC_UMOUNT 0x2000000
+#define MNT_MARKED 0x4000000
+#define MNT_UMOUNT 0x8000000
+
+struct vfsmount {
+ struct dentry *mnt_root; /* root of the mounted tree */
+ struct super_block *mnt_sb; /* pointer to superblock */
+ int mnt_flags;
+};
+
+struct file; /* forward dec */
+struct path;
+
+extern int mnt_want_write(struct vfsmount *mnt);
+extern int mnt_want_write_file(struct file *file);
+extern int mnt_clone_write(struct vfsmount *mnt);
+extern void mnt_drop_write(struct vfsmount *mnt);
+extern void mnt_drop_write_file(struct file *file);
+extern void mntput(struct vfsmount *mnt);
+extern struct vfsmount *mntget(struct vfsmount *mnt);
+extern struct vfsmount *mnt_clone_internal(struct path *path);
+extern int __mnt_is_readonly(struct vfsmount *mnt);
+
+struct path;
+extern struct vfsmount *clone_private_mount(struct path *path);
+
+struct file_system_type;
+extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
+ int flags, const char *name,
+ void *data);
+
+extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list);
+extern void mark_mounts_for_expiry(struct list_head *mounts);
+
+extern dev_t name_to_dev_t(const char *name);
+
+#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/mpage.h b/include/linux/mpage.h
new file mode 100644
index 000000000..068a0c994
--- /dev/null
+++ b/include/linux/mpage.h
@@ -0,0 +1,24 @@
+/*
+ * include/linux/mpage.h
+ *
+ * Contains declarations related to preparing and submitting BIOS which contain
+ * multiple pagecache pages.
+ */
+
+/*
+ * (And no, it doesn't do the #ifdef __MPAGE_H thing, and it doesn't do
+ * nested includes. Get it right in the .c file).
+ */
+#ifdef CONFIG_BLOCK
+
+struct writeback_control;
+
+int mpage_readpages(struct address_space *mapping, struct list_head *pages,
+ unsigned nr_pages, get_block_t get_block);
+int mpage_readpage(struct page *page, get_block_t get_block);
+int mpage_writepages(struct address_space *mapping,
+ struct writeback_control *wbc, get_block_t get_block);
+int mpage_writepage(struct page *page, get_block_t *get_block,
+ struct writeback_control *wbc);
+
+#endif
diff --git a/include/linux/mpi.h b/include/linux/mpi.h
new file mode 100644
index 000000000..5af1b81de
--- /dev/null
+++ b/include/linux/mpi.h
@@ -0,0 +1,145 @@
+/* mpi.h - Multi Precision Integers
+ * Copyright (C) 1994, 1996, 1998, 1999,
+ * 2000, 2001 Free Software Foundation, Inc.
+ *
+ * This file is part of GNUPG.
+ *
+ * GNUPG is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * GNUPG is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note: This code is heavily based on the GNU MP Library.
+ * Actually it's the same code with only minor changes in the
+ * way the data is stored; this is to support the abstraction
+ * of an optional secure memory allocation which may be used
+ * to avoid revealing of sensitive data due to paging etc.
+ * The GNU MP Library itself is published under the LGPL;
+ * however I decided to publish this code under the plain GPL.
+ */
+
+#ifndef G10_MPI_H
+#define G10_MPI_H
+
+#include <linux/types.h>
+
+/* DSI defines */
+
+#define SHA1_DIGEST_LENGTH 20
+
+/*end of DSI defines */
+
+#define BYTES_PER_MPI_LIMB (BITS_PER_LONG / 8)
+#define BITS_PER_MPI_LIMB BITS_PER_LONG
+
+typedef unsigned long int mpi_limb_t;
+typedef signed long int mpi_limb_signed_t;
+
+struct gcry_mpi {
+ int alloced; /* array size (# of allocated limbs) */
+ int nlimbs; /* number of valid limbs */
+ int nbits; /* the real number of valid bits (info only) */
+ int sign; /* indicates a negative number */
+ unsigned flags; /* bit 0: array must be allocated in secure memory space */
+ /* bit 1: not used */
+ /* bit 2: the limb is a pointer to some m_alloced data */
+ mpi_limb_t *d; /* array with the limbs */
+};
+
+typedef struct gcry_mpi *MPI;
+
+#define mpi_get_nlimbs(a) ((a)->nlimbs)
+#define mpi_is_neg(a) ((a)->sign)
+
+/*-- mpiutil.c --*/
+MPI mpi_alloc(unsigned nlimbs);
+MPI mpi_alloc_secure(unsigned nlimbs);
+MPI mpi_alloc_like(MPI a);
+void mpi_free(MPI a);
+int mpi_resize(MPI a, unsigned nlimbs);
+int mpi_copy(MPI *copy, const MPI a);
+void mpi_clear(MPI a);
+int mpi_set(MPI w, MPI u);
+int mpi_set_ui(MPI w, ulong u);
+MPI mpi_alloc_set_ui(unsigned long u);
+void mpi_m_check(MPI a);
+void mpi_swap(MPI a, MPI b);
+
+/*-- mpicoder.c --*/
+MPI do_encode_md(const void *sha_buffer, unsigned nbits);
+MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes);
+MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread);
+int mpi_fromstr(MPI val, const char *str);
+u32 mpi_get_keyid(MPI a, u32 *keyid);
+void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign);
+void *mpi_get_secure_buffer(MPI a, unsigned *nbytes, int *sign);
+int mpi_set_buffer(MPI a, const void *buffer, unsigned nbytes, int sign);
+
+#define log_mpidump g10_log_mpidump
+
+/*-- mpi-add.c --*/
+int mpi_add_ui(MPI w, MPI u, ulong v);
+int mpi_add(MPI w, MPI u, MPI v);
+int mpi_addm(MPI w, MPI u, MPI v, MPI m);
+int mpi_sub_ui(MPI w, MPI u, ulong v);
+int mpi_sub(MPI w, MPI u, MPI v);
+int mpi_subm(MPI w, MPI u, MPI v, MPI m);
+
+/*-- mpi-mul.c --*/
+int mpi_mul_ui(MPI w, MPI u, ulong v);
+int mpi_mul_2exp(MPI w, MPI u, ulong cnt);
+int mpi_mul(MPI w, MPI u, MPI v);
+int mpi_mulm(MPI w, MPI u, MPI v, MPI m);
+
+/*-- mpi-div.c --*/
+ulong mpi_fdiv_r_ui(MPI rem, MPI dividend, ulong divisor);
+int mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor);
+int mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor);
+int mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor);
+int mpi_tdiv_r(MPI rem, MPI num, MPI den);
+int mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den);
+int mpi_tdiv_q_2exp(MPI w, MPI u, unsigned count);
+int mpi_divisible_ui(const MPI dividend, ulong divisor);
+
+/*-- mpi-gcd.c --*/
+int mpi_gcd(MPI g, const MPI a, const MPI b);
+
+/*-- mpi-pow.c --*/
+int mpi_pow(MPI w, MPI u, MPI v);
+int mpi_powm(MPI res, MPI base, MPI exp, MPI mod);
+
+/*-- mpi-mpow.c --*/
+int mpi_mulpowm(MPI res, MPI *basearray, MPI *exparray, MPI mod);
+
+/*-- mpi-cmp.c --*/
+int mpi_cmp_ui(MPI u, ulong v);
+int mpi_cmp(MPI u, MPI v);
+
+/*-- mpi-scan.c --*/
+int mpi_getbyte(MPI a, unsigned idx);
+void mpi_putbyte(MPI a, unsigned idx, int value);
+unsigned mpi_trailing_zeros(MPI a);
+
+/*-- mpi-bit.c --*/
+void mpi_normalize(MPI a);
+unsigned mpi_get_nbits(MPI a);
+int mpi_test_bit(MPI a, unsigned n);
+int mpi_set_bit(MPI a, unsigned n);
+int mpi_set_highbit(MPI a, unsigned n);
+void mpi_clear_highbit(MPI a, unsigned n);
+void mpi_clear_bit(MPI a, unsigned n);
+int mpi_rshift(MPI x, MPI a, unsigned n);
+
+/*-- mpi-inv.c --*/
+int mpi_invm(MPI x, MPI u, MPI v);
+
+#endif /*G10_MPI_H */
diff --git a/include/linux/mpls.h b/include/linux/mpls.h
new file mode 100644
index 000000000..9999145bc
--- /dev/null
+++ b/include/linux/mpls.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_MPLS_H
+#define _LINUX_MPLS_H
+
+#include <uapi/linux/mpls.h>
+
+#endif /* _LINUX_MPLS_H */
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
new file mode 100644
index 000000000..79aaa9fc1
--- /dev/null
+++ b/include/linux/mroute.h
@@ -0,0 +1,107 @@
+#ifndef __LINUX_MROUTE_H
+#define __LINUX_MROUTE_H
+
+#include <linux/in.h>
+#include <linux/pim.h>
+#include <net/sock.h>
+#include <uapi/linux/mroute.h>
+
+#ifdef CONFIG_IP_MROUTE
+static inline int ip_mroute_opt(int opt)
+{
+ return (opt >= MRT_BASE) && (opt <= MRT_MAX);
+}
+#else
+static inline int ip_mroute_opt(int opt)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_IP_MROUTE
+extern int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
+extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
+extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
+extern int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
+extern int ip_mr_init(void);
+#else
+static inline
+int ip_mroute_setsockopt(struct sock *sock,
+ int optname, char __user *optval, unsigned int optlen)
+{
+ return -ENOPROTOOPT;
+}
+
+static inline
+int ip_mroute_getsockopt(struct sock *sock,
+ int optname, char __user *optval, int __user *optlen)
+{
+ return -ENOPROTOOPT;
+}
+
+static inline
+int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
+{
+ return -ENOIOCTLCMD;
+}
+
+static inline int ip_mr_init(void)
+{
+ return 0;
+}
+#endif
+
+struct vif_device {
+ struct net_device *dev; /* Device we are using */
+ unsigned long bytes_in,bytes_out;
+ unsigned long pkt_in,pkt_out; /* Statistics */
+ unsigned long rate_limit; /* Traffic shaping (NI) */
+ unsigned char threshold; /* TTL threshold */
+ unsigned short flags; /* Control flags */
+ __be32 local,remote; /* Addresses(remote for tunnels)*/
+ int link; /* Physical interface index */
+};
+
+#define VIFF_STATIC 0x8000
+
+struct mfc_cache {
+ struct list_head list;
+ __be32 mfc_mcastgrp; /* Group the entry belongs to */
+ __be32 mfc_origin; /* Source of packet */
+ vifi_t mfc_parent; /* Source interface */
+ int mfc_flags; /* Flags on line */
+
+ union {
+ struct {
+ unsigned long expires;
+ struct sk_buff_head unresolved; /* Unresolved buffers */
+ } unres;
+ struct {
+ unsigned long last_assert;
+ int minvif;
+ int maxvif;
+ unsigned long bytes;
+ unsigned long pkt;
+ unsigned long wrong_if;
+ unsigned char ttls[MAXVIFS]; /* TTL thresholds */
+ } res;
+ } mfc_un;
+ struct rcu_head rcu;
+};
+
+#define MFC_STATIC 1
+#define MFC_NOTIFY 2
+
+#define MFC_LINES 64
+
+#ifdef __BIG_ENDIAN
+#define MFC_HASH(a,b) (((((__force u32)(__be32)a)>>24)^(((__force u32)(__be32)b)>>26))&(MFC_LINES-1))
+#else
+#define MFC_HASH(a,b) ((((__force u32)(__be32)a)^(((__force u32)(__be32)b)>>2))&(MFC_LINES-1))
+#endif
+
+struct rtmsg;
+extern int ipmr_get_route(struct net *net, struct sk_buff *skb,
+ __be32 saddr, __be32 daddr,
+ struct rtmsg *rtm, int nowait);
+#endif
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
new file mode 100644
index 000000000..66982e764
--- /dev/null
+++ b/include/linux/mroute6.h
@@ -0,0 +1,133 @@
+#ifndef __LINUX_MROUTE6_H
+#define __LINUX_MROUTE6_H
+
+
+#include <linux/pim.h>
+#include <linux/skbuff.h> /* for struct sk_buff_head */
+#include <net/net_namespace.h>
+#include <uapi/linux/mroute6.h>
+
+#ifdef CONFIG_IPV6_MROUTE
+static inline int ip6_mroute_opt(int opt)
+{
+ return (opt >= MRT6_BASE) && (opt <= MRT6_MAX);
+}
+#else
+static inline int ip6_mroute_opt(int opt)
+{
+ return 0;
+}
+#endif
+
+struct sock;
+
+#ifdef CONFIG_IPV6_MROUTE
+extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
+extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
+extern int ip6_mr_input(struct sk_buff *skb);
+extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
+extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
+extern int ip6_mr_init(void);
+extern void ip6_mr_cleanup(void);
+#else
+static inline
+int ip6_mroute_setsockopt(struct sock *sock,
+ int optname, char __user *optval, unsigned int optlen)
+{
+ return -ENOPROTOOPT;
+}
+
+static inline
+int ip6_mroute_getsockopt(struct sock *sock,
+ int optname, char __user *optval, int __user *optlen)
+{
+ return -ENOPROTOOPT;
+}
+
+static inline
+int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
+{
+ return -ENOIOCTLCMD;
+}
+
+static inline int ip6_mr_init(void)
+{
+ return 0;
+}
+
+static inline void ip6_mr_cleanup(void)
+{
+ return;
+}
+#endif
+
+struct mif_device {
+ struct net_device *dev; /* Device we are using */
+ unsigned long bytes_in,bytes_out;
+ unsigned long pkt_in,pkt_out; /* Statistics */
+ unsigned long rate_limit; /* Traffic shaping (NI) */
+ unsigned char threshold; /* TTL threshold */
+ unsigned short flags; /* Control flags */
+ int link; /* Physical interface index */
+};
+
+#define VIFF_STATIC 0x8000
+
+struct mfc6_cache {
+ struct list_head list;
+ struct in6_addr mf6c_mcastgrp; /* Group the entry belongs to */
+ struct in6_addr mf6c_origin; /* Source of packet */
+ mifi_t mf6c_parent; /* Source interface */
+ int mfc_flags; /* Flags on line */
+
+ union {
+ struct {
+ unsigned long expires;
+ struct sk_buff_head unresolved; /* Unresolved buffers */
+ } unres;
+ struct {
+ unsigned long last_assert;
+ int minvif;
+ int maxvif;
+ unsigned long bytes;
+ unsigned long pkt;
+ unsigned long wrong_if;
+ unsigned char ttls[MAXMIFS]; /* TTL thresholds */
+ } res;
+ } mfc_un;
+};
+
+#define MFC_STATIC 1
+#define MFC_NOTIFY 2
+
+#define MFC6_LINES 64
+
+#define MFC6_HASH(a, g) (((__force u32)(a)->s6_addr32[0] ^ \
+ (__force u32)(a)->s6_addr32[1] ^ \
+ (__force u32)(a)->s6_addr32[2] ^ \
+ (__force u32)(a)->s6_addr32[3] ^ \
+ (__force u32)(g)->s6_addr32[0] ^ \
+ (__force u32)(g)->s6_addr32[1] ^ \
+ (__force u32)(g)->s6_addr32[2] ^ \
+ (__force u32)(g)->s6_addr32[3]) % MFC6_LINES)
+
+#define MFC_ASSERT_THRESH (3*HZ) /* Maximal freq. of asserts */
+
+struct rtmsg;
+extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
+ struct rtmsg *rtm, int nowait);
+
+#ifdef CONFIG_IPV6_MROUTE
+extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
+extern int ip6mr_sk_done(struct sock *sk);
+#else
+static inline struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
+{
+ return NULL;
+}
+static inline int ip6mr_sk_done(struct sock *sk)
+{
+ return 0;
+}
+#endif
+#endif
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
new file mode 100644
index 000000000..e1b163f91
--- /dev/null
+++ b/include/linux/msdos_fs.h
@@ -0,0 +1,11 @@
+#ifndef _LINUX_MSDOS_FS_H
+#define _LINUX_MSDOS_FS_H
+
+#include <uapi/linux/msdos_fs.h>
+
+/* media of boot sector */
+static inline int fat_valid_media(u8 media)
+{
+ return 0xf8 <= media || media == 0xf0;
+}
+#endif /* !_LINUX_MSDOS_FS_H */
diff --git a/include/linux/msg.h b/include/linux/msg.h
new file mode 100644
index 000000000..f3f302f9c
--- /dev/null
+++ b/include/linux/msg.h
@@ -0,0 +1,42 @@
+#ifndef _LINUX_MSG_H
+#define _LINUX_MSG_H
+
+#include <linux/list.h>
+#include <uapi/linux/msg.h>
+
+/* one msg_msg structure for each message */
+struct msg_msg {
+ struct list_head m_list;
+ long m_type;
+ size_t m_ts; /* message text size */
+ struct msg_msgseg *next;
+ void *security;
+ /* the actual message follows immediately */
+};
+
+/* one msq_queue structure for each present queue on the system */
+struct msg_queue {
+ struct kern_ipc_perm q_perm;
+ time_t q_stime; /* last msgsnd time */
+ time_t q_rtime; /* last msgrcv time */
+ time_t q_ctime; /* last change time */
+ unsigned long q_cbytes; /* current number of bytes on queue */
+ unsigned long q_qnum; /* number of messages in queue */
+ unsigned long q_qbytes; /* max number of bytes on queue */
+ pid_t q_lspid; /* pid of last msgsnd */
+ pid_t q_lrpid; /* last receive pid */
+
+ struct list_head q_messages;
+ struct list_head q_receivers;
+ struct list_head q_senders;
+};
+
+/* Helper routines for sys_msgsnd and sys_msgrcv */
+extern long do_msgsnd(int msqid, long mtype, void __user *mtext,
+ size_t msgsz, int msgflg);
+extern long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
+ int msgflg,
+ long (*msg_fill)(void __user *, struct msg_msg *,
+ size_t));
+
+#endif /* _LINUX_MSG_H */
diff --git a/include/linux/msi.h b/include/linux/msi.h
new file mode 100644
index 000000000..8ac4a68ff
--- /dev/null
+++ b/include/linux/msi.h
@@ -0,0 +1,243 @@
+#ifndef LINUX_MSI_H
+#define LINUX_MSI_H
+
+#include <linux/kobject.h>
+#include <linux/list.h>
+
+struct msi_msg {
+ u32 address_lo; /* low 32 bits of msi message address */
+ u32 address_hi; /* high 32 bits of msi message address */
+ u32 data; /* 16 bits of msi message data */
+};
+
+extern int pci_msi_ignore_mask;
+/* Helper functions */
+struct irq_data;
+struct msi_desc;
+void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
+
+struct msi_desc {
+ struct {
+ __u8 is_msix : 1;
+ __u8 multiple: 3; /* log2 num of messages allocated */
+ __u8 multi_cap : 3; /* log2 num of messages supported */
+ __u8 maskbit : 1; /* mask-pending bit supported ? */
+ __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
+ __u16 entry_nr; /* specific enabled entry */
+ unsigned default_irq; /* default pre-assigned irq */
+ } msi_attrib;
+
+ u32 masked; /* mask bits */
+ unsigned int irq;
+ unsigned int nvec_used; /* number of messages */
+ struct list_head list;
+
+ union {
+ void __iomem *mask_base;
+ u8 mask_pos;
+ };
+ struct pci_dev *dev;
+
+ /* Last set MSI message */
+ struct msi_msg msg;
+};
+
+/* Helpers to hide struct msi_desc implementation details */
+#define msi_desc_to_dev(desc) (&(desc)->dev.dev)
+#define dev_to_msi_list(dev) (&to_pci_dev((dev))->msi_list)
+#define first_msi_entry(dev) \
+ list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
+#define for_each_msi_entry(desc, dev) \
+ list_for_each_entry((desc), dev_to_msi_list((dev)), list)
+
+#ifdef CONFIG_PCI_MSI
+#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
+#define for_each_pci_msi_entry(desc, pdev) \
+ for_each_msi_entry((desc), &(pdev)->dev)
+
+static inline struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
+{
+ return desc->dev;
+}
+#endif /* CONFIG_PCI_MSI */
+
+void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
+void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
+
+u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
+u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
+void pci_msi_mask_irq(struct irq_data *data);
+void pci_msi_unmask_irq(struct irq_data *data);
+
+/* Conversion helpers. Should be removed after merging */
+static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
+{
+ __pci_write_msi_msg(entry, msg);
+}
+static inline void write_msi_msg(int irq, struct msi_msg *msg)
+{
+ pci_write_msi_msg(irq, msg);
+}
+static inline void mask_msi_irq(struct irq_data *data)
+{
+ pci_msi_mask_irq(data);
+}
+static inline void unmask_msi_irq(struct irq_data *data)
+{
+ pci_msi_unmask_irq(data);
+}
+
+/*
+ * The arch hooks to setup up msi irqs. Those functions are
+ * implemented as weak symbols so that they /can/ be overriden by
+ * architecture specific code if needed.
+ */
+int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
+void arch_teardown_msi_irq(unsigned int irq);
+int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
+void arch_teardown_msi_irqs(struct pci_dev *dev);
+void arch_restore_msi_irqs(struct pci_dev *dev);
+
+void default_teardown_msi_irqs(struct pci_dev *dev);
+void default_restore_msi_irqs(struct pci_dev *dev);
+
+struct msi_controller {
+ struct module *owner;
+ struct device *dev;
+ struct device_node *of_node;
+ struct list_head list;
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ struct irq_domain *domain;
+#endif
+
+ int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
+ struct msi_desc *desc);
+ void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
+};
+
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+
+#include <linux/irqhandler.h>
+#include <asm/msi.h>
+
+struct irq_domain;
+struct irq_chip;
+struct device_node;
+struct msi_domain_info;
+
+/**
+ * struct msi_domain_ops - MSI interrupt domain callbacks
+ * @get_hwirq: Retrieve the resulting hw irq number
+ * @msi_init: Domain specific init function for MSI interrupts
+ * @msi_free: Domain specific function to free a MSI interrupts
+ * @msi_check: Callback for verification of the domain/info/dev data
+ * @msi_prepare: Prepare the allocation of the interrupts in the domain
+ * @msi_finish: Optional callbacl to finalize the allocation
+ * @set_desc: Set the msi descriptor for an interrupt
+ * @handle_error: Optional error handler if the allocation fails
+ *
+ * @get_hwirq, @msi_init and @msi_free are callbacks used by
+ * msi_create_irq_domain() and related interfaces
+ *
+ * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
+ * are callbacks used by msi_irq_domain_alloc_irqs() and related
+ * interfaces which are based on msi_desc.
+ */
+struct msi_domain_ops {
+ irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
+ msi_alloc_info_t *arg);
+ int (*msi_init)(struct irq_domain *domain,
+ struct msi_domain_info *info,
+ unsigned int virq, irq_hw_number_t hwirq,
+ msi_alloc_info_t *arg);
+ void (*msi_free)(struct irq_domain *domain,
+ struct msi_domain_info *info,
+ unsigned int virq);
+ int (*msi_check)(struct irq_domain *domain,
+ struct msi_domain_info *info,
+ struct device *dev);
+ int (*msi_prepare)(struct irq_domain *domain,
+ struct device *dev, int nvec,
+ msi_alloc_info_t *arg);
+ void (*msi_finish)(msi_alloc_info_t *arg, int retval);
+ void (*set_desc)(msi_alloc_info_t *arg,
+ struct msi_desc *desc);
+ int (*handle_error)(struct irq_domain *domain,
+ struct msi_desc *desc, int error);
+};
+
+/**
+ * struct msi_domain_info - MSI interrupt domain data
+ * @flags: Flags to decribe features and capabilities
+ * @ops: The callback data structure
+ * @chip: Optional: associated interrupt chip
+ * @chip_data: Optional: associated interrupt chip data
+ * @handler: Optional: associated interrupt flow handler
+ * @handler_data: Optional: associated interrupt flow handler data
+ * @handler_name: Optional: associated interrupt flow handler name
+ * @data: Optional: domain specific data
+ */
+struct msi_domain_info {
+ u32 flags;
+ struct msi_domain_ops *ops;
+ struct irq_chip *chip;
+ void *chip_data;
+ irq_flow_handler_t handler;
+ void *handler_data;
+ const char *handler_name;
+ void *data;
+};
+
+/* Flags for msi_domain_info */
+enum {
+ /*
+ * Init non implemented ops callbacks with default MSI domain
+ * callbacks.
+ */
+ MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0),
+ /*
+ * Init non implemented chip callbacks with default MSI chip
+ * callbacks.
+ */
+ MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
+ /* Build identity map between hwirq and irq */
+ MSI_FLAG_IDENTITY_MAP = (1 << 2),
+ /* Support multiple PCI MSI interrupts */
+ MSI_FLAG_MULTI_PCI_MSI = (1 << 3),
+ /* Support PCI MSIX interrupts */
+ MSI_FLAG_PCI_MSIX = (1 << 4),
+};
+
+int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ bool force);
+
+struct irq_domain *msi_create_irq_domain(struct device_node *of_node,
+ struct msi_domain_info *info,
+ struct irq_domain *parent);
+int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
+ int nvec);
+void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
+struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
+
+#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
+
+#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
+void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
+struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
+ struct msi_domain_info *info,
+ struct irq_domain *parent);
+int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
+ int nvec, int type);
+void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev);
+struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
+ struct msi_domain_info *info, struct irq_domain *parent);
+
+irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
+ struct msi_desc *desc);
+int pci_msi_domain_check_cap(struct irq_domain *domain,
+ struct msi_domain_info *info, struct device *dev);
+#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
+
+#endif /* LINUX_MSI_H */
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
new file mode 100644
index 000000000..fe722c1fb
--- /dev/null
+++ b/include/linux/msm_mdp.h
@@ -0,0 +1,79 @@
+/* include/linux/msm_mdp.h
+ *
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MSM_MDP_H_
+#define _MSM_MDP_H_
+
+#include <linux/types.h>
+
+#define MSMFB_IOCTL_MAGIC 'm'
+#define MSMFB_GRP_DISP _IOW(MSMFB_IOCTL_MAGIC, 1, unsigned int)
+#define MSMFB_BLIT _IOW(MSMFB_IOCTL_MAGIC, 2, unsigned int)
+
+enum {
+ MDP_RGB_565, /* RGB 565 planar */
+ MDP_XRGB_8888, /* RGB 888 padded */
+ MDP_Y_CBCR_H2V2, /* Y and CbCr, pseudo planar w/ Cb is in MSB */
+ MDP_ARGB_8888, /* ARGB 888 */
+ MDP_RGB_888, /* RGB 888 planar */
+ MDP_Y_CRCB_H2V2, /* Y and CrCb, pseudo planar w/ Cr is in MSB */
+ MDP_YCRYCB_H2V1, /* YCrYCb interleave */
+ MDP_Y_CRCB_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */
+ MDP_Y_CBCR_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */
+ MDP_RGBA_8888, /* ARGB 888 */
+ MDP_BGRA_8888, /* ABGR 888 */
+ MDP_RGBX_8888, /* RGBX 888 */
+ MDP_IMGTYPE_LIMIT /* Non valid image type after this enum */
+};
+
+enum {
+ PMEM_IMG,
+ FB_IMG,
+};
+
+/* flag values */
+#define MDP_ROT_NOP 0
+#define MDP_FLIP_LR 0x1
+#define MDP_FLIP_UD 0x2
+#define MDP_ROT_90 0x4
+#define MDP_ROT_180 (MDP_FLIP_UD|MDP_FLIP_LR)
+#define MDP_ROT_270 (MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR)
+#define MDP_DITHER 0x8
+#define MDP_BLUR 0x10
+
+#define MDP_TRANSP_NOP 0xffffffff
+#define MDP_ALPHA_NOP 0xff
+
+struct mdp_rect {
+ u32 x, y, w, h;
+};
+
+struct mdp_img {
+ u32 width, height, format, offset;
+ int memory_id; /* the file descriptor */
+};
+
+struct mdp_blit_req {
+ struct mdp_img src;
+ struct mdp_img dst;
+ struct mdp_rect src_rect;
+ struct mdp_rect dst_rect;
+ u32 alpha, transp_mask, flags;
+};
+
+struct mdp_blit_req_list {
+ u32 count;
+ struct mdp_blit_req req[];
+};
+
+#endif /* _MSM_MDP_H_ */
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
new file mode 100644
index 000000000..36bb6a503
--- /dev/null
+++ b/include/linux/mtd/bbm.h
@@ -0,0 +1,172 @@
+/*
+ * linux/include/linux/mtd/bbm.h
+ *
+ * NAND family Bad Block Management (BBM) header file
+ * - Bad Block Table (BBT) implementation
+ *
+ * Copyright © 2005 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * Copyright © 2000-2005
+ * Thomas Gleixner <tglx@linuxtronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+#ifndef __LINUX_MTD_BBM_H
+#define __LINUX_MTD_BBM_H
+
+/* The maximum number of NAND chips in an array */
+#define NAND_MAX_CHIPS 8
+
+/**
+ * struct nand_bbt_descr - bad block table descriptor
+ * @options: options for this descriptor
+ * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE
+ * when bbt is searched, then we store the found bbts pages here.
+ * Its an array and supports up to 8 chips now
+ * @offs: offset of the pattern in the oob area of the page
+ * @veroffs: offset of the bbt version counter in the oob are of the page
+ * @version: version read from the bbt page during scan
+ * @len: length of the pattern, if 0 no pattern check is performed
+ * @maxblocks: maximum number of blocks to search for a bbt. This number of
+ * blocks is reserved at the end of the device where the tables are
+ * written.
+ * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than
+ * bad) block in the stored bbt
+ * @pattern: pattern to identify bad block table or factory marked good /
+ * bad blocks, can be NULL, if len = 0
+ *
+ * Descriptor for the bad block table marker and the descriptor for the
+ * pattern which identifies good and bad blocks. The assumption is made
+ * that the pattern and the version count are always located in the oob area
+ * of the first block.
+ */
+struct nand_bbt_descr {
+ int options;
+ int pages[NAND_MAX_CHIPS];
+ int offs;
+ int veroffs;
+ uint8_t version[NAND_MAX_CHIPS];
+ int len;
+ int maxblocks;
+ int reserved_block_code;
+ uint8_t *pattern;
+};
+
+/* Options for the bad block table descriptors */
+
+/* The number of bits used per block in the bbt on the device */
+#define NAND_BBT_NRBITS_MSK 0x0000000F
+#define NAND_BBT_1BIT 0x00000001
+#define NAND_BBT_2BIT 0x00000002
+#define NAND_BBT_4BIT 0x00000004
+#define NAND_BBT_8BIT 0x00000008
+/* The bad block table is in the last good block of the device */
+#define NAND_BBT_LASTBLOCK 0x00000010
+/* The bbt is at the given page, else we must scan for the bbt */
+#define NAND_BBT_ABSPAGE 0x00000020
+/* bbt is stored per chip on multichip devices */
+#define NAND_BBT_PERCHIP 0x00000080
+/* bbt has a version counter at offset veroffs */
+#define NAND_BBT_VERSION 0x00000100
+/* Create a bbt if none exists */
+#define NAND_BBT_CREATE 0x00000200
+/*
+ * Create an empty BBT with no vendor information. Vendor's information may be
+ * unavailable, for example, if the NAND controller has a different data and OOB
+ * layout or if this information is already purged. Must be used in conjunction
+ * with NAND_BBT_CREATE.
+ */
+#define NAND_BBT_CREATE_EMPTY 0x00000400
+/* Write bbt if neccecary */
+#define NAND_BBT_WRITE 0x00002000
+/* Read and write back block contents when writing bbt */
+#define NAND_BBT_SAVECONTENT 0x00004000
+/* Search good / bad pattern on the first and the second page */
+#define NAND_BBT_SCAN2NDPAGE 0x00008000
+/* Search good / bad pattern on the last page of the eraseblock */
+#define NAND_BBT_SCANLASTPAGE 0x00010000
+/*
+ * Use a flash based bad block table. By default, OOB identifier is saved in
+ * OOB area. This option is passed to the default bad block table function.
+ */
+#define NAND_BBT_USE_FLASH 0x00020000
+/*
+ * Do not store flash based bad block table marker in the OOB area; store it
+ * in-band.
+ */
+#define NAND_BBT_NO_OOB 0x00040000
+/*
+ * Do not write new bad block markers to OOB; useful, e.g., when ECC covers
+ * entire spare area. Must be used with NAND_BBT_USE_FLASH.
+ */
+#define NAND_BBT_NO_OOB_BBM 0x00080000
+
+/*
+ * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr
+ * was allocated dynamicaly and must be freed in nand_release(). Has no meaning
+ * in nand_chip.bbt_options.
+ */
+#define NAND_BBT_DYNAMICSTRUCT 0x80000000
+
+/* The maximum number of blocks to scan for a bbt */
+#define NAND_BBT_SCAN_MAXBLOCKS 4
+
+/*
+ * Constants for oob configuration
+ */
+#define NAND_SMALL_BADBLOCK_POS 5
+#define NAND_LARGE_BADBLOCK_POS 0
+#define ONENAND_BADBLOCK_POS 0
+
+/*
+ * Bad block scanning errors
+ */
+#define ONENAND_BBT_READ_ERROR 1
+#define ONENAND_BBT_READ_ECC_ERROR 2
+#define ONENAND_BBT_READ_FATAL_ERROR 4
+
+/**
+ * struct bbm_info - [GENERIC] Bad Block Table data structure
+ * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
+ * @badblockpos: [INTERN] position of the bad block marker in the oob area
+ * @options: options for this descriptor
+ * @bbt: [INTERN] bad block table pointer
+ * @isbad_bbt: function to determine if a block is bad
+ * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for
+ * initial bad block scan
+ * @priv: [OPTIONAL] pointer to private bbm date
+ */
+struct bbm_info {
+ int bbt_erase_shift;
+ int badblockpos;
+ int options;
+
+ uint8_t *bbt;
+
+ int (*isbad_bbt)(struct mtd_info *mtd, loff_t ofs, int allowbbt);
+
+ /* TODO Add more NAND specific fileds */
+ struct nand_bbt_descr *badblock_pattern;
+
+ void *priv;
+};
+
+/* OneNAND BBT interface */
+extern int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
+extern int onenand_default_bbt(struct mtd_info *mtd);
+
+#endif /* __LINUX_MTD_BBM_H */
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
new file mode 100644
index 000000000..e93837f64
--- /dev/null
+++ b/include/linux/mtd/blktrans.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef __MTD_TRANS_H__
+#define __MTD_TRANS_H__
+
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
+struct hd_geometry;
+struct mtd_info;
+struct mtd_blktrans_ops;
+struct file;
+struct inode;
+
+struct mtd_blktrans_dev {
+ struct mtd_blktrans_ops *tr;
+ struct list_head list;
+ struct mtd_info *mtd;
+ struct mutex lock;
+ int devnum;
+ bool bg_stop;
+ unsigned long size;
+ int readonly;
+ int open;
+ struct kref ref;
+ struct gendisk *disk;
+ struct attribute_group *disk_attributes;
+ struct workqueue_struct *wq;
+ struct work_struct work;
+ struct request_queue *rq;
+ spinlock_t queue_lock;
+ void *priv;
+ fmode_t file_mode;
+};
+
+struct mtd_blktrans_ops {
+ char *name;
+ int major;
+ int part_bits;
+ int blksize;
+ int blkshift;
+
+ /* Access functions */
+ int (*readsect)(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buffer);
+ int (*writesect)(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buffer);
+ int (*discard)(struct mtd_blktrans_dev *dev,
+ unsigned long block, unsigned nr_blocks);
+ void (*background)(struct mtd_blktrans_dev *dev);
+
+ /* Block layer ioctls */
+ int (*getgeo)(struct mtd_blktrans_dev *dev, struct hd_geometry *geo);
+ int (*flush)(struct mtd_blktrans_dev *dev);
+
+ /* Called with mtd_table_mutex held; no race with add/remove */
+ int (*open)(struct mtd_blktrans_dev *dev);
+ void (*release)(struct mtd_blktrans_dev *dev);
+
+ /* Called on {de,}registration and on subsequent addition/removal
+ of devices, with mtd_table_mutex held. */
+ void (*add_mtd)(struct mtd_blktrans_ops *tr, struct mtd_info *mtd);
+ void (*remove_dev)(struct mtd_blktrans_dev *dev);
+
+ struct list_head devs;
+ struct list_head list;
+ struct module *owner;
+};
+
+extern int register_mtd_blktrans(struct mtd_blktrans_ops *tr);
+extern int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr);
+extern int add_mtd_blktrans_dev(struct mtd_blktrans_dev *dev);
+extern int del_mtd_blktrans_dev(struct mtd_blktrans_dev *dev);
+extern int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev);
+
+
+#endif /* __MTD_TRANS_H__ */
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
new file mode 100644
index 000000000..299d7d31f
--- /dev/null
+++ b/include/linux/mtd/cfi.h
@@ -0,0 +1,564 @@
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef __MTD_CFI_H__
+#define __MTD_CFI_H__
+
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi_endian.h>
+#include <linux/mtd/xip.h>
+
+#ifdef CONFIG_MTD_CFI_I1
+#define cfi_interleave(cfi) 1
+#define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1)
+#else
+#define cfi_interleave_is_1(cfi) (0)
+#endif
+
+#ifdef CONFIG_MTD_CFI_I2
+# ifdef cfi_interleave
+# undef cfi_interleave
+# define cfi_interleave(cfi) ((cfi)->interleave)
+# else
+# define cfi_interleave(cfi) 2
+# endif
+#define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2)
+#else
+#define cfi_interleave_is_2(cfi) (0)
+#endif
+
+#ifdef CONFIG_MTD_CFI_I4
+# ifdef cfi_interleave
+# undef cfi_interleave
+# define cfi_interleave(cfi) ((cfi)->interleave)
+# else
+# define cfi_interleave(cfi) 4
+# endif
+#define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4)
+#else
+#define cfi_interleave_is_4(cfi) (0)
+#endif
+
+#ifdef CONFIG_MTD_CFI_I8
+# ifdef cfi_interleave
+# undef cfi_interleave
+# define cfi_interleave(cfi) ((cfi)->interleave)
+# else
+# define cfi_interleave(cfi) 8
+# endif
+#define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8)
+#else
+#define cfi_interleave_is_8(cfi) (0)
+#endif
+
+#ifndef cfi_interleave
+#warning No CONFIG_MTD_CFI_Ix selected. No NOR chip support can work.
+static inline int cfi_interleave(void *cfi)
+{
+ BUG();
+ return 0;
+}
+#endif
+
+static inline int cfi_interleave_supported(int i)
+{
+ switch (i) {
+#ifdef CONFIG_MTD_CFI_I1
+ case 1:
+#endif
+#ifdef CONFIG_MTD_CFI_I2
+ case 2:
+#endif
+#ifdef CONFIG_MTD_CFI_I4
+ case 4:
+#endif
+#ifdef CONFIG_MTD_CFI_I8
+ case 8:
+#endif
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+
+/* NB: these values must represents the number of bytes needed to meet the
+ * device type (x8, x16, x32). Eg. a 32 bit device is 4 x 8 bytes.
+ * These numbers are used in calculations.
+ */
+#define CFI_DEVICETYPE_X8 (8 / 8)
+#define CFI_DEVICETYPE_X16 (16 / 8)
+#define CFI_DEVICETYPE_X32 (32 / 8)
+#define CFI_DEVICETYPE_X64 (64 / 8)
+
+
+/* Device Interface Code Assignments from the "Common Flash Memory Interface
+ * Publication 100" dated December 1, 2001.
+ */
+#define CFI_INTERFACE_X8_ASYNC 0x0000
+#define CFI_INTERFACE_X16_ASYNC 0x0001
+#define CFI_INTERFACE_X8_BY_X16_ASYNC 0x0002
+#define CFI_INTERFACE_X32_ASYNC 0x0003
+#define CFI_INTERFACE_X16_BY_X32_ASYNC 0x0005
+#define CFI_INTERFACE_NOT_ALLOWED 0xffff
+
+
+/* NB: We keep these structures in memory in HOST byteorder, except
+ * where individually noted.
+ */
+
+/* Basic Query Structure */
+struct cfi_ident {
+ uint8_t qry[3];
+ uint16_t P_ID;
+ uint16_t P_ADR;
+ uint16_t A_ID;
+ uint16_t A_ADR;
+ uint8_t VccMin;
+ uint8_t VccMax;
+ uint8_t VppMin;
+ uint8_t VppMax;
+ uint8_t WordWriteTimeoutTyp;
+ uint8_t BufWriteTimeoutTyp;
+ uint8_t BlockEraseTimeoutTyp;
+ uint8_t ChipEraseTimeoutTyp;
+ uint8_t WordWriteTimeoutMax;
+ uint8_t BufWriteTimeoutMax;
+ uint8_t BlockEraseTimeoutMax;
+ uint8_t ChipEraseTimeoutMax;
+ uint8_t DevSize;
+ uint16_t InterfaceDesc;
+ uint16_t MaxBufWriteSize;
+ uint8_t NumEraseRegions;
+ uint32_t EraseRegionInfo[0]; /* Not host ordered */
+} __packed;
+
+/* Extended Query Structure for both PRI and ALT */
+
+struct cfi_extquery {
+ uint8_t pri[3];
+ uint8_t MajorVersion;
+ uint8_t MinorVersion;
+} __packed;
+
+/* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */
+
+struct cfi_pri_intelext {
+ uint8_t pri[3];
+ uint8_t MajorVersion;
+ uint8_t MinorVersion;
+ uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature
+ block follows - FIXME - not currently supported */
+ uint8_t SuspendCmdSupport;
+ uint16_t BlkStatusRegMask;
+ uint8_t VccOptimal;
+ uint8_t VppOptimal;
+ uint8_t NumProtectionFields;
+ uint16_t ProtRegAddr;
+ uint8_t FactProtRegSize;
+ uint8_t UserProtRegSize;
+ uint8_t extra[0];
+} __packed;
+
+struct cfi_intelext_otpinfo {
+ uint32_t ProtRegAddr;
+ uint16_t FactGroups;
+ uint8_t FactProtRegSize;
+ uint16_t UserGroups;
+ uint8_t UserProtRegSize;
+} __packed;
+
+struct cfi_intelext_blockinfo {
+ uint16_t NumIdentBlocks;
+ uint16_t BlockSize;
+ uint16_t MinBlockEraseCycles;
+ uint8_t BitsPerCell;
+ uint8_t BlockCap;
+} __packed;
+
+struct cfi_intelext_regioninfo {
+ uint16_t NumIdentPartitions;
+ uint8_t NumOpAllowed;
+ uint8_t NumOpAllowedSimProgMode;
+ uint8_t NumOpAllowedSimEraMode;
+ uint8_t NumBlockTypes;
+ struct cfi_intelext_blockinfo BlockTypes[1];
+} __packed;
+
+struct cfi_intelext_programming_regioninfo {
+ uint8_t ProgRegShift;
+ uint8_t Reserved1;
+ uint8_t ControlValid;
+ uint8_t Reserved2;
+ uint8_t ControlInvalid;
+ uint8_t Reserved3;
+} __packed;
+
+/* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */
+
+struct cfi_pri_amdstd {
+ uint8_t pri[3];
+ uint8_t MajorVersion;
+ uint8_t MinorVersion;
+ uint8_t SiliconRevision; /* bits 1-0: Address Sensitive Unlock */
+ uint8_t EraseSuspend;
+ uint8_t BlkProt;
+ uint8_t TmpBlkUnprotect;
+ uint8_t BlkProtUnprot;
+ uint8_t SimultaneousOps;
+ uint8_t BurstMode;
+ uint8_t PageMode;
+ uint8_t VppMin;
+ uint8_t VppMax;
+ uint8_t TopBottom;
+} __packed;
+
+/* Vendor-Specific PRI for Atmel chips (command set 0x0002) */
+
+struct cfi_pri_atmel {
+ uint8_t pri[3];
+ uint8_t MajorVersion;
+ uint8_t MinorVersion;
+ uint8_t Features;
+ uint8_t BottomBoot;
+ uint8_t BurstMode;
+ uint8_t PageMode;
+} __packed;
+
+struct cfi_pri_query {
+ uint8_t NumFields;
+ uint32_t ProtField[1]; /* Not host ordered */
+} __packed;
+
+struct cfi_bri_query {
+ uint8_t PageModeReadCap;
+ uint8_t NumFields;
+ uint32_t ConfField[1]; /* Not host ordered */
+} __packed;
+
+#define P_ID_NONE 0x0000
+#define P_ID_INTEL_EXT 0x0001
+#define P_ID_AMD_STD 0x0002
+#define P_ID_INTEL_STD 0x0003
+#define P_ID_AMD_EXT 0x0004
+#define P_ID_WINBOND 0x0006
+#define P_ID_ST_ADV 0x0020
+#define P_ID_MITSUBISHI_STD 0x0100
+#define P_ID_MITSUBISHI_EXT 0x0101
+#define P_ID_SST_PAGE 0x0102
+#define P_ID_SST_OLD 0x0701
+#define P_ID_INTEL_PERFORMANCE 0x0200
+#define P_ID_INTEL_DATA 0x0210
+#define P_ID_RESERVED 0xffff
+
+
+#define CFI_MODE_CFI 1
+#define CFI_MODE_JEDEC 0
+
+struct cfi_private {
+ uint16_t cmdset;
+ void *cmdset_priv;
+ int interleave;
+ int device_type;
+ int cfi_mode; /* Are we a JEDEC device pretending to be CFI? */
+ int addr_unlock1;
+ int addr_unlock2;
+ struct mtd_info *(*cmdset_setup)(struct map_info *);
+ struct cfi_ident *cfiq; /* For now only one. We insist that all devs
+ must be of the same type. */
+ int mfr, id;
+ int numchips;
+ map_word sector_erase_cmd;
+ unsigned long chipshift; /* Because they're of the same type */
+ const char *im_name; /* inter_module name for cmdset_setup */
+ struct flchip chips[0]; /* per-chip data structure for each chip */
+};
+
+/*
+ * Returns the command address according to the given geometry.
+ */
+static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
+ struct map_info *map, struct cfi_private *cfi)
+{
+ unsigned bankwidth = map_bankwidth(map);
+ unsigned interleave = cfi_interleave(cfi);
+ unsigned type = cfi->device_type;
+ uint32_t addr;
+
+ addr = (cmd_ofs * type) * interleave;
+
+ /* Modify the unlock address if we are in compatibility mode.
+ * For 16bit devices on 8 bit busses
+ * and 32bit devices on 16 bit busses
+ * set the low bit of the alternating bit sequence of the address.
+ */
+ if (((type * interleave) > bankwidth) && ((cmd_ofs & 0xff) == 0xaa))
+ addr |= (type >> 1)*interleave;
+
+ return addr;
+}
+
+/*
+ * Transforms the CFI command for the given geometry (bus width & interleave).
+ * It looks too long to be inline, but in the common case it should almost all
+ * get optimised away.
+ */
+static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
+{
+ map_word val = { {0} };
+ int wordwidth, words_per_bus, chip_mode, chips_per_word;
+ unsigned long onecmd;
+ int i;
+
+ /* We do it this way to give the compiler a fighting chance
+ of optimising away all the crap for 'bankwidth' larger than
+ an unsigned long, in the common case where that support is
+ disabled */
+ if (map_bankwidth_is_large(map)) {
+ wordwidth = sizeof(unsigned long);
+ words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
+ } else {
+ wordwidth = map_bankwidth(map);
+ words_per_bus = 1;
+ }
+
+ chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
+ chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
+
+ /* First, determine what the bit-pattern should be for a single
+ device, according to chip mode and endianness... */
+ switch (chip_mode) {
+ default: BUG();
+ case 1:
+ onecmd = cmd;
+ break;
+ case 2:
+ onecmd = cpu_to_cfi16(map, cmd);
+ break;
+ case 4:
+ onecmd = cpu_to_cfi32(map, cmd);
+ break;
+ }
+
+ /* Now replicate it across the size of an unsigned long, or
+ just to the bus width as appropriate */
+ switch (chips_per_word) {
+ default: BUG();
+#if BITS_PER_LONG >= 64
+ case 8:
+ onecmd |= (onecmd << (chip_mode * 32));
+#endif
+ case 4:
+ onecmd |= (onecmd << (chip_mode * 16));
+ case 2:
+ onecmd |= (onecmd << (chip_mode * 8));
+ case 1:
+ ;
+ }
+
+ /* And finally, for the multi-word case, replicate it
+ in all words in the structure */
+ for (i=0; i < words_per_bus; i++) {
+ val.x[i] = onecmd;
+ }
+
+ return val;
+}
+#define CMD(x) cfi_build_cmd((x), map, cfi)
+
+
+static inline unsigned long cfi_merge_status(map_word val, struct map_info *map,
+ struct cfi_private *cfi)
+{
+ int wordwidth, words_per_bus, chip_mode, chips_per_word;
+ unsigned long onestat, res = 0;
+ int i;
+
+ /* We do it this way to give the compiler a fighting chance
+ of optimising away all the crap for 'bankwidth' larger than
+ an unsigned long, in the common case where that support is
+ disabled */
+ if (map_bankwidth_is_large(map)) {
+ wordwidth = sizeof(unsigned long);
+ words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
+ } else {
+ wordwidth = map_bankwidth(map);
+ words_per_bus = 1;
+ }
+
+ chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
+ chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
+
+ onestat = val.x[0];
+ /* Or all status words together */
+ for (i=1; i < words_per_bus; i++) {
+ onestat |= val.x[i];
+ }
+
+ res = onestat;
+ switch(chips_per_word) {
+ default: BUG();
+#if BITS_PER_LONG >= 64
+ case 8:
+ res |= (onestat >> (chip_mode * 32));
+#endif
+ case 4:
+ res |= (onestat >> (chip_mode * 16));
+ case 2:
+ res |= (onestat >> (chip_mode * 8));
+ case 1:
+ ;
+ }
+
+ /* Last, determine what the bit-pattern should be for a single
+ device, according to chip mode and endianness... */
+ switch (chip_mode) {
+ case 1:
+ break;
+ case 2:
+ res = cfi16_to_cpu(map, res);
+ break;
+ case 4:
+ res = cfi32_to_cpu(map, res);
+ break;
+ default: BUG();
+ }
+ return res;
+}
+
+#define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
+
+
+/*
+ * Sends a CFI command to a bank of flash for the given geometry.
+ *
+ * Returns the offset in flash where the command was written.
+ * If prev_val is non-null, it will be set to the value at the command address,
+ * before the command was written.
+ */
+static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
+ struct map_info *map, struct cfi_private *cfi,
+ int type, map_word *prev_val)
+{
+ map_word val;
+ uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
+ val = cfi_build_cmd(cmd, map, cfi);
+
+ if (prev_val)
+ *prev_val = map_read(map, addr);
+
+ map_write(map, val, addr);
+
+ return addr - base;
+}
+
+static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
+{
+ map_word val = map_read(map, addr);
+
+ if (map_bankwidth_is_1(map)) {
+ return val.x[0];
+ } else if (map_bankwidth_is_2(map)) {
+ return cfi16_to_cpu(map, val.x[0]);
+ } else {
+ /* No point in a 64-bit byteswap since that would just be
+ swapping the responses from different chips, and we are
+ only interested in one chip (a representative sample) */
+ return cfi32_to_cpu(map, val.x[0]);
+ }
+}
+
+static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
+{
+ map_word val = map_read(map, addr);
+
+ if (map_bankwidth_is_1(map)) {
+ return val.x[0] & 0xff;
+ } else if (map_bankwidth_is_2(map)) {
+ return cfi16_to_cpu(map, val.x[0]);
+ } else {
+ /* No point in a 64-bit byteswap since that would just be
+ swapping the responses from different chips, and we are
+ only interested in one chip (a representative sample) */
+ return cfi32_to_cpu(map, val.x[0]);
+ }
+}
+
+static inline void cfi_udelay(int us)
+{
+ if (us >= 1000) {
+ msleep((us+999)/1000);
+ } else {
+ udelay(us);
+ cond_resched();
+ }
+}
+
+int __xipram cfi_qry_present(struct map_info *map, __u32 base,
+ struct cfi_private *cfi);
+int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
+ struct cfi_private *cfi);
+void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
+ struct cfi_private *cfi);
+
+struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
+ const char* name);
+struct cfi_fixup {
+ uint16_t mfr;
+ uint16_t id;
+ void (*fixup)(struct mtd_info *mtd);
+};
+
+#define CFI_MFR_ANY 0xFFFF
+#define CFI_ID_ANY 0xFFFF
+#define CFI_MFR_CONTINUATION 0x007F
+
+#define CFI_MFR_AMD 0x0001
+#define CFI_MFR_AMIC 0x0037
+#define CFI_MFR_ATMEL 0x001F
+#define CFI_MFR_EON 0x001C
+#define CFI_MFR_FUJITSU 0x0004
+#define CFI_MFR_HYUNDAI 0x00AD
+#define CFI_MFR_INTEL 0x0089
+#define CFI_MFR_MACRONIX 0x00C2
+#define CFI_MFR_NEC 0x0010
+#define CFI_MFR_PMC 0x009D
+#define CFI_MFR_SAMSUNG 0x00EC
+#define CFI_MFR_SHARP 0x00B0
+#define CFI_MFR_SST 0x00BF
+#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
+#define CFI_MFR_TOSHIBA 0x0098
+#define CFI_MFR_WINBOND 0x00DA
+
+void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
+
+typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int len, void *thunk);
+
+int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
+ loff_t ofs, size_t len, void *thunk);
+
+
+#endif /* __MTD_CFI_H__ */
diff --git a/include/linux/mtd/cfi_endian.h b/include/linux/mtd/cfi_endian.h
new file mode 100644
index 000000000..b97a62507
--- /dev/null
+++ b/include/linux/mtd/cfi_endian.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include <asm/byteorder.h>
+
+#define CFI_HOST_ENDIAN 1
+#define CFI_LITTLE_ENDIAN 2
+#define CFI_BIG_ENDIAN 3
+
+#if !defined(CONFIG_MTD_CFI_ADV_OPTIONS) || defined(CONFIG_MTD_CFI_NOSWAP)
+#define CFI_DEFAULT_ENDIAN CFI_HOST_ENDIAN
+#elif defined(CONFIG_MTD_CFI_LE_BYTE_SWAP)
+#define CFI_DEFAULT_ENDIAN CFI_LITTLE_ENDIAN
+#elif defined(CONFIG_MTD_CFI_BE_BYTE_SWAP)
+#define CFI_DEFAULT_ENDIAN CFI_BIG_ENDIAN
+#else
+#error No CFI endianness defined
+#endif
+
+#define cfi_default(s) ((s)?:CFI_DEFAULT_ENDIAN)
+#define cfi_be(s) (cfi_default(s) == CFI_BIG_ENDIAN)
+#define cfi_le(s) (cfi_default(s) == CFI_LITTLE_ENDIAN)
+#define cfi_host(s) (cfi_default(s) == CFI_HOST_ENDIAN)
+
+#define cpu_to_cfi8(map, x) (x)
+#define cfi8_to_cpu(map, x) (x)
+#define cpu_to_cfi16(map, x) _cpu_to_cfi(16, (map)->swap, (x))
+#define cpu_to_cfi32(map, x) _cpu_to_cfi(32, (map)->swap, (x))
+#define cpu_to_cfi64(map, x) _cpu_to_cfi(64, (map)->swap, (x))
+#define cfi16_to_cpu(map, x) _cfi_to_cpu(16, (map)->swap, (x))
+#define cfi32_to_cpu(map, x) _cfi_to_cpu(32, (map)->swap, (x))
+#define cfi64_to_cpu(map, x) _cfi_to_cpu(64, (map)->swap, (x))
+
+#define _cpu_to_cfi(w, s, x) (cfi_host(s)?(x):_swap_to_cfi(w, s, x))
+#define _cfi_to_cpu(w, s, x) (cfi_host(s)?(x):_swap_to_cpu(w, s, x))
+#define _swap_to_cfi(w, s, x) (cfi_be(s)?cpu_to_be##w(x):cpu_to_le##w(x))
+#define _swap_to_cpu(w, s, x) (cfi_be(s)?be##w##_to_cpu(x):le##w##_to_cpu(x))
diff --git a/include/linux/mtd/concat.h b/include/linux/mtd/concat.h
new file mode 100644
index 000000000..ccdbe93a9
--- /dev/null
+++ b/include/linux/mtd/concat.h
@@ -0,0 +1,34 @@
+/*
+ * MTD device concatenation layer definitions
+ *
+ * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef MTD_CONCAT_H
+#define MTD_CONCAT_H
+
+
+struct mtd_info *mtd_concat_create(
+ struct mtd_info *subdev[], /* subdevices to concatenate */
+ int num_devs, /* number of subdevices */
+ const char *name); /* name for the new device */
+
+void mtd_concat_destroy(struct mtd_info *mtd);
+
+#endif
+
diff --git a/include/linux/mtd/doc2000.h b/include/linux/mtd/doc2000.h
new file mode 100644
index 000000000..407d1e556
--- /dev/null
+++ b/include/linux/mtd/doc2000.h
@@ -0,0 +1,220 @@
+/*
+ * Linux driver for Disk-On-Chip devices
+ *
+ * Copyright © 1999 Machine Vision Holdings, Inc.
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ * Copyright © 2002-2003 Greg Ungerer <gerg@snapgear.com>
+ * Copyright © 2002-2003 SnapGear Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef __MTD_DOC2000_H__
+#define __MTD_DOC2000_H__
+
+#include <linux/mtd/mtd.h>
+#include <linux/mutex.h>
+
+#define DoC_Sig1 0
+#define DoC_Sig2 1
+
+#define DoC_ChipID 0x1000
+#define DoC_DOCStatus 0x1001
+#define DoC_DOCControl 0x1002
+#define DoC_FloorSelect 0x1003
+#define DoC_CDSNControl 0x1004
+#define DoC_CDSNDeviceSelect 0x1005
+#define DoC_ECCConf 0x1006
+#define DoC_2k_ECCStatus 0x1007
+
+#define DoC_CDSNSlowIO 0x100d
+#define DoC_ECCSyndrome0 0x1010
+#define DoC_ECCSyndrome1 0x1011
+#define DoC_ECCSyndrome2 0x1012
+#define DoC_ECCSyndrome3 0x1013
+#define DoC_ECCSyndrome4 0x1014
+#define DoC_ECCSyndrome5 0x1015
+#define DoC_AliasResolution 0x101b
+#define DoC_ConfigInput 0x101c
+#define DoC_ReadPipeInit 0x101d
+#define DoC_WritePipeTerm 0x101e
+#define DoC_LastDataRead 0x101f
+#define DoC_NOP 0x1020
+
+#define DoC_Mil_CDSN_IO 0x0800
+#define DoC_2k_CDSN_IO 0x1800
+
+#define DoC_Mplus_NOP 0x1002
+#define DoC_Mplus_AliasResolution 0x1004
+#define DoC_Mplus_DOCControl 0x1006
+#define DoC_Mplus_AccessStatus 0x1008
+#define DoC_Mplus_DeviceSelect 0x1008
+#define DoC_Mplus_Configuration 0x100a
+#define DoC_Mplus_OutputControl 0x100c
+#define DoC_Mplus_FlashControl 0x1020
+#define DoC_Mplus_FlashSelect 0x1022
+#define DoC_Mplus_FlashCmd 0x1024
+#define DoC_Mplus_FlashAddress 0x1026
+#define DoC_Mplus_FlashData0 0x1028
+#define DoC_Mplus_FlashData1 0x1029
+#define DoC_Mplus_ReadPipeInit 0x102a
+#define DoC_Mplus_LastDataRead 0x102c
+#define DoC_Mplus_LastDataRead1 0x102d
+#define DoC_Mplus_WritePipeTerm 0x102e
+#define DoC_Mplus_ECCSyndrome0 0x1040
+#define DoC_Mplus_ECCSyndrome1 0x1041
+#define DoC_Mplus_ECCSyndrome2 0x1042
+#define DoC_Mplus_ECCSyndrome3 0x1043
+#define DoC_Mplus_ECCSyndrome4 0x1044
+#define DoC_Mplus_ECCSyndrome5 0x1045
+#define DoC_Mplus_ECCConf 0x1046
+#define DoC_Mplus_Toggle 0x1046
+#define DoC_Mplus_DownloadStatus 0x1074
+#define DoC_Mplus_CtrlConfirm 0x1076
+#define DoC_Mplus_Power 0x1fff
+
+/* How to access the device?
+ * On ARM, it'll be mmap'd directly with 32-bit wide accesses.
+ * On PPC, it's mmap'd and 16-bit wide.
+ * Others use readb/writeb
+ */
+#if defined(__arm__)
+static inline u8 ReadDOC_(u32 __iomem *addr, unsigned long reg)
+{
+ return __raw_readl(addr + reg);
+}
+static inline void WriteDOC_(u8 data, u32 __iomem *addr, unsigned long reg)
+{
+ __raw_writel(data, addr + reg);
+ wmb();
+}
+#define DOC_IOREMAP_LEN 0x8000
+#elif defined(__ppc__)
+static inline u8 ReadDOC_(u16 __iomem *addr, unsigned long reg)
+{
+ return __raw_readw(addr + reg);
+}
+static inline void WriteDOC_(u8 data, u16 __iomem *addr, unsigned long reg)
+{
+ __raw_writew(data, addr + reg);
+ wmb();
+}
+#define DOC_IOREMAP_LEN 0x4000
+#else
+#define ReadDOC_(adr, reg) readb((void __iomem *)(adr) + (reg))
+#define WriteDOC_(d, adr, reg) writeb(d, (void __iomem *)(adr) + (reg))
+#define DOC_IOREMAP_LEN 0x2000
+
+#endif
+
+#if defined(__i386__) || defined(__x86_64__)
+#define USE_MEMCPY
+#endif
+
+/* These are provided to directly use the DoC_xxx defines */
+#define ReadDOC(adr, reg) ReadDOC_(adr,DoC_##reg)
+#define WriteDOC(d, adr, reg) WriteDOC_(d,adr,DoC_##reg)
+
+#define DOC_MODE_RESET 0
+#define DOC_MODE_NORMAL 1
+#define DOC_MODE_RESERVED1 2
+#define DOC_MODE_RESERVED2 3
+
+#define DOC_MODE_CLR_ERR 0x80
+#define DOC_MODE_RST_LAT 0x10
+#define DOC_MODE_BDECT 0x08
+#define DOC_MODE_MDWREN 0x04
+
+#define DOC_ChipID_Doc2k 0x20
+#define DOC_ChipID_Doc2kTSOP 0x21 /* internal number for MTD */
+#define DOC_ChipID_DocMil 0x30
+#define DOC_ChipID_DocMilPlus32 0x40
+#define DOC_ChipID_DocMilPlus16 0x41
+
+#define CDSN_CTRL_FR_B 0x80
+#define CDSN_CTRL_FR_B0 0x40
+#define CDSN_CTRL_FR_B1 0x80
+
+#define CDSN_CTRL_ECC_IO 0x20
+#define CDSN_CTRL_FLASH_IO 0x10
+#define CDSN_CTRL_WP 0x08
+#define CDSN_CTRL_ALE 0x04
+#define CDSN_CTRL_CLE 0x02
+#define CDSN_CTRL_CE 0x01
+
+#define DOC_ECC_RESET 0
+#define DOC_ECC_ERROR 0x80
+#define DOC_ECC_RW 0x20
+#define DOC_ECC__EN 0x08
+#define DOC_TOGGLE_BIT 0x04
+#define DOC_ECC_RESV 0x02
+#define DOC_ECC_IGNORE 0x01
+
+#define DOC_FLASH_CE 0x80
+#define DOC_FLASH_WP 0x40
+#define DOC_FLASH_BANK 0x02
+
+/* We have to also set the reserved bit 1 for enable */
+#define DOC_ECC_EN (DOC_ECC__EN | DOC_ECC_RESV)
+#define DOC_ECC_DIS (DOC_ECC_RESV)
+
+struct Nand {
+ char floor, chip;
+ unsigned long curadr;
+ unsigned char curmode;
+ /* Also some erase/write/pipeline info when we get that far */
+};
+
+#define MAX_FLOORS 4
+#define MAX_CHIPS 4
+
+#define MAX_FLOORS_MIL 1
+#define MAX_CHIPS_MIL 1
+
+#define MAX_FLOORS_MPLUS 2
+#define MAX_CHIPS_MPLUS 1
+
+#define ADDR_COLUMN 1
+#define ADDR_PAGE 2
+#define ADDR_COLUMN_PAGE 3
+
+struct DiskOnChip {
+ unsigned long physadr;
+ void __iomem *virtadr;
+ unsigned long totlen;
+ unsigned char ChipID; /* Type of DiskOnChip */
+ int ioreg;
+
+ unsigned long mfr; /* Flash IDs - only one type of flash per device */
+ unsigned long id;
+ int chipshift;
+ char page256;
+ char pageadrlen;
+ char interleave; /* Internal interleaving - Millennium Plus style */
+ unsigned long erasesize;
+
+ int curfloor;
+ int curchip;
+
+ int numchips;
+ struct Nand *chips;
+ struct mtd_info *nextdoc;
+ struct mutex lock;
+};
+
+int doc_decode_ecc(unsigned char sector[512], unsigned char ecc1[6]);
+
+#endif /* __MTD_DOC2000_H__ */
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
new file mode 100644
index 000000000..b63fa457f
--- /dev/null
+++ b/include/linux/mtd/flashchip.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright © 2000 Red Hat UK Limited
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef __MTD_FLASHCHIP_H__
+#define __MTD_FLASHCHIP_H__
+
+/* For spinlocks. sched.h includes spinlock.h from whichever directory it
+ * happens to be in - so we don't have to care whether we're on 2.2, which
+ * has asm/spinlock.h, or 2.4, which has linux/spinlock.h
+ */
+#include <linux/sched.h>
+#include <linux/mutex.h>
+
+typedef enum {
+ FL_READY,
+ FL_STATUS,
+ FL_CFI_QUERY,
+ FL_JEDEC_QUERY,
+ FL_ERASING,
+ FL_ERASE_SUSPENDING,
+ FL_ERASE_SUSPENDED,
+ FL_WRITING,
+ FL_WRITING_TO_BUFFER,
+ FL_OTP_WRITE,
+ FL_WRITE_SUSPENDING,
+ FL_WRITE_SUSPENDED,
+ FL_PM_SUSPENDED,
+ FL_SYNCING,
+ FL_UNLOADING,
+ FL_LOCKING,
+ FL_UNLOCKING,
+ FL_POINT,
+ FL_XIP_WHILE_ERASING,
+ FL_XIP_WHILE_WRITING,
+ FL_SHUTDOWN,
+ /* These 2 come from nand_state_t, which has been unified here */
+ FL_READING,
+ FL_CACHEDPRG,
+ /* These 4 come from onenand_state_t, which has been unified here */
+ FL_RESETING,
+ FL_OTPING,
+ FL_PREPARING_ERASE,
+ FL_VERIFYING_ERASE,
+
+ FL_UNKNOWN
+} flstate_t;
+
+
+
+/* NOTE: confusingly, this can be used to refer to more than one chip at a time,
+ if they're interleaved. This can even refer to individual partitions on
+ the same physical chip when present. */
+
+struct flchip {
+ unsigned long start; /* Offset within the map */
+ // unsigned long len;
+ /* We omit len for now, because when we group them together
+ we insist that they're all of the same size, and the chip size
+ is held in the next level up. If we get more versatile later,
+ it'll make it a damn sight harder to find which chip we want from
+ a given offset, and we'll want to add the per-chip length field
+ back in.
+ */
+ int ref_point_counter;
+ flstate_t state;
+ flstate_t oldstate;
+
+ unsigned int write_suspended:1;
+ unsigned int erase_suspended:1;
+ unsigned long in_progress_block_addr;
+
+ struct mutex mutex;
+ wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
+ to be ready */
+ int word_write_time;
+ int buffer_write_time;
+ int erase_time;
+
+ int word_write_time_max;
+ int buffer_write_time_max;
+ int erase_time_max;
+
+ void *priv;
+};
+
+/* This is used to handle contention on write/erase operations
+ between partitions of the same physical chip. */
+struct flchip_shared {
+ struct mutex lock;
+ struct flchip *writing;
+ struct flchip *erasing;
+};
+
+
+#endif /* __MTD_FLASHCHIP_H__ */
diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h
new file mode 100644
index 000000000..c8be32e9f
--- /dev/null
+++ b/include/linux/mtd/fsmc.h
@@ -0,0 +1,174 @@
+/*
+ * incude/mtd/fsmc.h
+ *
+ * ST Microelectronics
+ * Flexible Static Memory Controller (FSMC)
+ * platform data interface and header file
+ *
+ * Copyright © 2010 ST Microelectronics
+ * Vipin Kumar <vipin.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MTD_FSMC_H
+#define __MTD_FSMC_H
+
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/physmap.h>
+#include <linux/types.h>
+#include <linux/mtd/partitions.h>
+#include <asm/param.h>
+
+#define FSMC_NAND_BW8 1
+#define FSMC_NAND_BW16 2
+
+#define FSMC_MAX_NOR_BANKS 4
+#define FSMC_MAX_NAND_BANKS 4
+
+#define FSMC_FLASH_WIDTH8 1
+#define FSMC_FLASH_WIDTH16 2
+
+/* fsmc controller registers for NOR flash */
+#define CTRL 0x0
+ /* ctrl register definitions */
+ #define BANK_ENABLE (1 << 0)
+ #define MUXED (1 << 1)
+ #define NOR_DEV (2 << 2)
+ #define WIDTH_8 (0 << 4)
+ #define WIDTH_16 (1 << 4)
+ #define RSTPWRDWN (1 << 6)
+ #define WPROT (1 << 7)
+ #define WRT_ENABLE (1 << 12)
+ #define WAIT_ENB (1 << 13)
+
+#define CTRL_TIM 0x4
+ /* ctrl_tim register definitions */
+
+#define FSMC_NOR_BANK_SZ 0x8
+#define FSMC_NOR_REG_SIZE 0x40
+
+#define FSMC_NOR_REG(base, bank, reg) (base + \
+ FSMC_NOR_BANK_SZ * (bank) + \
+ reg)
+
+/* fsmc controller registers for NAND flash */
+#define PC 0x00
+ /* pc register definitions */
+ #define FSMC_RESET (1 << 0)
+ #define FSMC_WAITON (1 << 1)
+ #define FSMC_ENABLE (1 << 2)
+ #define FSMC_DEVTYPE_NAND (1 << 3)
+ #define FSMC_DEVWID_8 (0 << 4)
+ #define FSMC_DEVWID_16 (1 << 4)
+ #define FSMC_ECCEN (1 << 6)
+ #define FSMC_ECCPLEN_512 (0 << 7)
+ #define FSMC_ECCPLEN_256 (1 << 7)
+ #define FSMC_TCLR_1 (1)
+ #define FSMC_TCLR_SHIFT (9)
+ #define FSMC_TCLR_MASK (0xF)
+ #define FSMC_TAR_1 (1)
+ #define FSMC_TAR_SHIFT (13)
+ #define FSMC_TAR_MASK (0xF)
+#define STS 0x04
+ /* sts register definitions */
+ #define FSMC_CODE_RDY (1 << 15)
+#define COMM 0x08
+ /* comm register definitions */
+ #define FSMC_TSET_0 0
+ #define FSMC_TSET_SHIFT 0
+ #define FSMC_TSET_MASK 0xFF
+ #define FSMC_TWAIT_6 6
+ #define FSMC_TWAIT_SHIFT 8
+ #define FSMC_TWAIT_MASK 0xFF
+ #define FSMC_THOLD_4 4
+ #define FSMC_THOLD_SHIFT 16
+ #define FSMC_THOLD_MASK 0xFF
+ #define FSMC_THIZ_1 1
+ #define FSMC_THIZ_SHIFT 24
+ #define FSMC_THIZ_MASK 0xFF
+#define ATTRIB 0x0C
+#define IOATA 0x10
+#define ECC1 0x14
+#define ECC2 0x18
+#define ECC3 0x1C
+#define FSMC_NAND_BANK_SZ 0x20
+
+#define FSMC_NAND_REG(base, bank, reg) (base + FSMC_NOR_REG_SIZE + \
+ (FSMC_NAND_BANK_SZ * (bank)) + \
+ reg)
+
+#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
+
+/*
+ * There are 13 bytes of ecc for every 512 byte block in FSMC version 8
+ * and it has to be read consecutively and immediately after the 512
+ * byte data block for hardware to generate the error bit offsets
+ * Managing the ecc bytes in the following way is easier. This way is
+ * similar to oobfree structure maintained already in u-boot nand driver
+ */
+#define MAX_ECCPLACE_ENTRIES 32
+
+struct fsmc_nand_eccplace {
+ uint8_t offset;
+ uint8_t length;
+};
+
+struct fsmc_eccplace {
+ struct fsmc_nand_eccplace eccplace[MAX_ECCPLACE_ENTRIES];
+};
+
+struct fsmc_nand_timings {
+ uint8_t tclr;
+ uint8_t tar;
+ uint8_t thiz;
+ uint8_t thold;
+ uint8_t twait;
+ uint8_t tset;
+};
+
+enum access_mode {
+ USE_DMA_ACCESS = 1,
+ USE_WORD_ACCESS,
+};
+
+/**
+ * fsmc_nand_platform_data - platform specific NAND controller config
+ * @nand_timings: timing setup for the physical NAND interface
+ * @partitions: partition table for the platform, use a default fallback
+ * if this is NULL
+ * @nr_partitions: the number of partitions in the previous entry
+ * @options: different options for the driver
+ * @width: bus width
+ * @bank: default bank
+ * @select_bank: callback to select a certain bank, this is
+ * platform-specific. If the controller only supports one bank
+ * this may be set to NULL
+ */
+struct fsmc_nand_platform_data {
+ struct fsmc_nand_timings *nand_timings;
+ struct mtd_partition *partitions;
+ unsigned int nr_partitions;
+ unsigned int options;
+ unsigned int width;
+ unsigned int bank;
+
+ enum access_mode mode;
+
+ void (*select_bank)(uint32_t bank, uint32_t busw);
+
+ /* priv structures for dma accesses */
+ void *read_dma_priv;
+ void *write_dma_priv;
+};
+
+extern int __init fsmc_nor_init(struct platform_device *pdev,
+ unsigned long base, uint32_t bank, uint32_t width);
+extern void __init fsmc_init_board_info(struct platform_device *pdev,
+ struct mtd_partition *partitions, unsigned int nr_partitions,
+ unsigned int width);
+
+#endif /* __MTD_FSMC_H */
diff --git a/include/linux/mtd/ftl.h b/include/linux/mtd/ftl.h
new file mode 100644
index 000000000..0555f7a0b
--- /dev/null
+++ b/include/linux/mtd/ftl.h
@@ -0,0 +1,74 @@
+/*
+ * Derived from (and probably identical to):
+ * ftl.h 1.7 1999/10/25 20:23:17
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License
+ * at http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS"
+ * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+ * the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * The initial developer of the original code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
+ *
+ * Alternatively, the contents of this file may be used under the
+ * terms of the GNU General Public License version 2 (the "GPL"), in
+ * which case the provisions of the GPL are applicable instead of the
+ * above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use
+ * your version of this file under the MPL, indicate your decision by
+ * deleting the provisions above and replace them with the notice and
+ * other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file
+ * under either the MPL or the GPL.
+ */
+
+#ifndef _LINUX_FTL_H
+#define _LINUX_FTL_H
+
+typedef struct erase_unit_header_t {
+ uint8_t LinkTargetTuple[5];
+ uint8_t DataOrgTuple[10];
+ uint8_t NumTransferUnits;
+ uint32_t EraseCount;
+ uint16_t LogicalEUN;
+ uint8_t BlockSize;
+ uint8_t EraseUnitSize;
+ uint16_t FirstPhysicalEUN;
+ uint16_t NumEraseUnits;
+ uint32_t FormattedSize;
+ uint32_t FirstVMAddress;
+ uint16_t NumVMPages;
+ uint8_t Flags;
+ uint8_t Code;
+ uint32_t SerialNumber;
+ uint32_t AltEUHOffset;
+ uint32_t BAMOffset;
+ uint8_t Reserved[12];
+ uint8_t EndTuple[2];
+} erase_unit_header_t;
+
+/* Flags in erase_unit_header_t */
+#define HIDDEN_AREA 0x01
+#define REVERSE_POLARITY 0x02
+#define DOUBLE_BAI 0x04
+
+/* Definitions for block allocation information */
+
+#define BLOCK_FREE(b) ((b) == 0xffffffff)
+#define BLOCK_DELETED(b) (((b) == 0) || ((b) == 0xfffffffe))
+
+#define BLOCK_TYPE(b) ((b) & 0x7f)
+#define BLOCK_ADDRESS(b) ((b) & ~0x7f)
+#define BLOCK_NUMBER(b) ((b) >> 9)
+#define BLOCK_CONTROL 0x30
+#define BLOCK_DATA 0x40
+#define BLOCK_REPLACEMENT 0x60
+#define BLOCK_BAD 0x70
+
+#endif /* _LINUX_FTL_H */
diff --git a/include/linux/mtd/gen_probe.h b/include/linux/mtd/gen_probe.h
new file mode 100644
index 000000000..2c456054f
--- /dev/null
+++ b/include/linux/mtd/gen_probe.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright © 2001 Red Hat UK Limited
+ * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef __LINUX_MTD_GEN_PROBE_H__
+#define __LINUX_MTD_GEN_PROBE_H__
+
+#include <linux/mtd/flashchip.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/bitops.h>
+
+struct chip_probe {
+ char *name;
+ int (*probe_chip)(struct map_info *map, __u32 base,
+ unsigned long *chip_map, struct cfi_private *cfi);
+};
+
+struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp);
+
+#endif /* __LINUX_MTD_GEN_PROBE_H__ */
diff --git a/include/linux/mtd/inftl.h b/include/linux/mtd/inftl.h
new file mode 100644
index 000000000..02cd5f9b7
--- /dev/null
+++ b/include/linux/mtd/inftl.h
@@ -0,0 +1,63 @@
+/*
+ * inftl.h -- defines to support the Inverse NAND Flash Translation Layer
+ *
+ * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
+ */
+
+#ifndef __MTD_INFTL_H__
+#define __MTD_INFTL_H__
+
+#ifndef __KERNEL__
+#error This is a kernel header. Perhaps include nftl-user.h instead?
+#endif
+
+#include <linux/mtd/blktrans.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nftl.h>
+
+#include <mtd/inftl-user.h>
+
+#ifndef INFTL_MAJOR
+#define INFTL_MAJOR 96
+#endif
+#define INFTL_PARTN_BITS 4
+
+#ifdef __KERNEL__
+
+struct INFTLrecord {
+ struct mtd_blktrans_dev mbd;
+ __u16 MediaUnit;
+ __u32 EraseSize;
+ struct INFTLMediaHeader MediaHdr;
+ int usecount;
+ unsigned char heads;
+ unsigned char sectors;
+ unsigned short cylinders;
+ __u16 numvunits;
+ __u16 firstEUN;
+ __u16 lastEUN;
+ __u16 numfreeEUNs;
+ __u16 LastFreeEUN; /* To speed up finding a free EUN */
+ int head,sect,cyl;
+ __u16 *PUtable; /* Physical Unit Table */
+ __u16 *VUtable; /* Virtual Unit Table */
+ unsigned int nb_blocks; /* number of physical blocks */
+ unsigned int nb_boot_blocks; /* number of blocks used by the bios */
+ struct erase_info instr;
+ struct nand_ecclayout oobinfo;
+};
+
+int INFTL_mount(struct INFTLrecord *s);
+int INFTL_formatblock(struct INFTLrecord *s, int block);
+
+void INFTL_dumptables(struct INFTLrecord *s);
+void INFTL_dumpVUchains(struct INFTLrecord *s);
+
+int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+ size_t *retlen, uint8_t *buf);
+int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+ size_t *retlen, uint8_t *buf);
+
+#endif /* __KERNEL__ */
+
+#endif /* __MTD_INFTL_H__ */
diff --git a/include/linux/mtd/latch-addr-flash.h b/include/linux/mtd/latch-addr-flash.h
new file mode 100644
index 000000000..e94b8e128
--- /dev/null
+++ b/include/linux/mtd/latch-addr-flash.h
@@ -0,0 +1,29 @@
+/*
+ * Interface for NOR flash driver whose high address lines are latched
+ *
+ * Copyright © 2008 MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#ifndef __LATCH_ADDR_FLASH__
+#define __LATCH_ADDR_FLASH__
+
+struct map_info;
+struct mtd_partition;
+
+struct latch_addr_flash_data {
+ unsigned int width;
+ unsigned int size;
+
+ int (*init)(void *data, int cs);
+ void (*done)(void *data);
+ void (*set_window)(unsigned long offset, void *data);
+ void *data;
+
+ unsigned int nr_parts;
+ struct mtd_partition *parts;
+};
+
+#endif
diff --git a/include/linux/mtd/lpc32xx_mlc.h b/include/linux/mtd/lpc32xx_mlc.h
new file mode 100644
index 000000000..d91b1e356
--- /dev/null
+++ b/include/linux/mtd/lpc32xx_mlc.h
@@ -0,0 +1,20 @@
+/*
+ * Platform data for LPC32xx SoC MLC NAND controller
+ *
+ * Copyright © 2012 Roland Stigge
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MTD_LPC32XX_MLC_H
+#define __LINUX_MTD_LPC32XX_MLC_H
+
+#include <linux/dmaengine.h>
+
+struct lpc32xx_mlc_platform_data {
+ bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+};
+
+#endif /* __LINUX_MTD_LPC32XX_MLC_H */
diff --git a/include/linux/mtd/lpc32xx_slc.h b/include/linux/mtd/lpc32xx_slc.h
new file mode 100644
index 000000000..1169548a1
--- /dev/null
+++ b/include/linux/mtd/lpc32xx_slc.h
@@ -0,0 +1,20 @@
+/*
+ * Platform data for LPC32xx SoC SLC NAND controller
+ *
+ * Copyright © 2012 Roland Stigge
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MTD_LPC32XX_SLC_H
+#define __LINUX_MTD_LPC32XX_SLC_H
+
+#include <linux/dmaengine.h>
+
+struct lpc32xx_slc_platform_data {
+ bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+};
+
+#endif /* __LINUX_MTD_LPC32XX_SLC_H */
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
new file mode 100644
index 000000000..29975c73a
--- /dev/null
+++ b/include/linux/mtd/map.h
@@ -0,0 +1,486 @@
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+/* Overhauled routines for dealing with different mmap regions of flash */
+
+#ifndef __LINUX_MTD_MAP_H__
+#define __LINUX_MTD_MAP_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <asm/unaligned.h>
+#include <asm/io.h>
+#include <asm/barrier.h>
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
+#define map_bankwidth(map) 1
+#define map_bankwidth_is_1(map) (map_bankwidth(map) == 1)
+#define map_bankwidth_is_large(map) (0)
+#define map_words(map) (1)
+#define MAX_MAP_BANKWIDTH 1
+#else
+#define map_bankwidth_is_1(map) (0)
+#endif
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_2
+# ifdef map_bankwidth
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# else
+# define map_bankwidth(map) 2
+# define map_bankwidth_is_large(map) (0)
+# define map_words(map) (1)
+# endif
+#define map_bankwidth_is_2(map) (map_bankwidth(map) == 2)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 2
+#else
+#define map_bankwidth_is_2(map) (0)
+#endif
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_4
+# ifdef map_bankwidth
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# else
+# define map_bankwidth(map) 4
+# define map_bankwidth_is_large(map) (0)
+# define map_words(map) (1)
+# endif
+#define map_bankwidth_is_4(map) (map_bankwidth(map) == 4)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 4
+#else
+#define map_bankwidth_is_4(map) (0)
+#endif
+
+/* ensure we never evaluate anything shorted than an unsigned long
+ * to zero, and ensure we'll never miss the end of an comparison (bjd) */
+
+#define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1)) / sizeof(unsigned long))
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8
+# ifdef map_bankwidth
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# if BITS_PER_LONG < 64
+# undef map_bankwidth_is_large
+# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
+# undef map_words
+# define map_words(map) map_calc_words(map)
+# endif
+# else
+# define map_bankwidth(map) 8
+# define map_bankwidth_is_large(map) (BITS_PER_LONG < 64)
+# define map_words(map) map_calc_words(map)
+# endif
+#define map_bankwidth_is_8(map) (map_bankwidth(map) == 8)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 8
+#else
+#define map_bankwidth_is_8(map) (0)
+#endif
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_16
+# ifdef map_bankwidth
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# undef map_bankwidth_is_large
+# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
+# undef map_words
+# define map_words(map) map_calc_words(map)
+# else
+# define map_bankwidth(map) 16
+# define map_bankwidth_is_large(map) (1)
+# define map_words(map) map_calc_words(map)
+# endif
+#define map_bankwidth_is_16(map) (map_bankwidth(map) == 16)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 16
+#else
+#define map_bankwidth_is_16(map) (0)
+#endif
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32
+# ifdef map_bankwidth
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# undef map_bankwidth_is_large
+# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
+# undef map_words
+# define map_words(map) map_calc_words(map)
+# else
+# define map_bankwidth(map) 32
+# define map_bankwidth_is_large(map) (1)
+# define map_words(map) map_calc_words(map)
+# endif
+#define map_bankwidth_is_32(map) (map_bankwidth(map) == 32)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 32
+#else
+#define map_bankwidth_is_32(map) (0)
+#endif
+
+#ifndef map_bankwidth
+#warning "No CONFIG_MTD_MAP_BANK_WIDTH_xx selected. No NOR chip support can work"
+static inline int map_bankwidth(void *map)
+{
+ BUG();
+ return 0;
+}
+#define map_bankwidth_is_large(map) (0)
+#define map_words(map) (0)
+#define MAX_MAP_BANKWIDTH 1
+#endif
+
+static inline int map_bankwidth_supported(int w)
+{
+ switch (w) {
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
+ case 1:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_2
+ case 2:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_4
+ case 4:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8
+ case 8:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_16
+ case 16:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32
+ case 32:
+#endif
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+#define MAX_MAP_LONGS (((MAX_MAP_BANKWIDTH * 8) + BITS_PER_LONG - 1) / BITS_PER_LONG)
+
+typedef union {
+ unsigned long x[MAX_MAP_LONGS];
+} map_word;
+
+/* The map stuff is very simple. You fill in your struct map_info with
+ a handful of routines for accessing the device, making sure they handle
+ paging etc. correctly if your device needs it. Then you pass it off
+ to a chip probe routine -- either JEDEC or CFI probe or both -- via
+ do_map_probe(). If a chip is recognised, the probe code will invoke the
+ appropriate chip driver (if present) and return a struct mtd_info.
+ At which point, you fill in the mtd->module with your own module
+ address, and register it with the MTD core code. Or you could partition
+ it and register the partitions instead, or keep it for your own private
+ use; whatever.
+
+ The mtd->priv field will point to the struct map_info, and any further
+ private data required by the chip driver is linked from the
+ mtd->priv->fldrv_priv field. This allows the map driver to get at
+ the destructor function map->fldrv_destroy() when it's tired
+ of living.
+*/
+
+struct map_info {
+ const char *name;
+ unsigned long size;
+ resource_size_t phys;
+#define NO_XIP (-1UL)
+
+ void __iomem *virt;
+ void *cached;
+
+ int swap; /* this mapping's byte-swapping requirement */
+ int bankwidth; /* in octets. This isn't necessarily the width
+ of actual bus cycles -- it's the repeat interval
+ in bytes, before you are talking to the first chip again.
+ */
+
+#ifdef CONFIG_MTD_COMPLEX_MAPPINGS
+ map_word (*read)(struct map_info *, unsigned long);
+ void (*copy_from)(struct map_info *, void *, unsigned long, ssize_t);
+
+ void (*write)(struct map_info *, const map_word, unsigned long);
+ void (*copy_to)(struct map_info *, unsigned long, const void *, ssize_t);
+
+ /* We can perhaps put in 'point' and 'unpoint' methods, if we really
+ want to enable XIP for non-linear mappings. Not yet though. */
+#endif
+ /* It's possible for the map driver to use cached memory in its
+ copy_from implementation (and _only_ with copy_from). However,
+ when the chip driver knows some flash area has changed contents,
+ it will signal it to the map driver through this routine to let
+ the map driver invalidate the corresponding cache as needed.
+ If there is no cache to care about this can be set to NULL. */
+ void (*inval_cache)(struct map_info *, unsigned long, ssize_t);
+
+ /* set_vpp() must handle being reentered -- enable, enable, disable
+ must leave it enabled. */
+ void (*set_vpp)(struct map_info *, int);
+
+ unsigned long pfow_base;
+ unsigned long map_priv_1;
+ unsigned long map_priv_2;
+ struct device_node *device_node;
+ void *fldrv_priv;
+ struct mtd_chip_driver *fldrv;
+};
+
+struct mtd_chip_driver {
+ struct mtd_info *(*probe)(struct map_info *map);
+ void (*destroy)(struct mtd_info *);
+ struct module *module;
+ char *name;
+ struct list_head list;
+};
+
+void register_mtd_chip_driver(struct mtd_chip_driver *);
+void unregister_mtd_chip_driver(struct mtd_chip_driver *);
+
+struct mtd_info *do_map_probe(const char *name, struct map_info *map);
+void map_destroy(struct mtd_info *mtd);
+
+#define ENABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 1); } while (0)
+#define DISABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 0); } while (0)
+
+#define INVALIDATE_CACHED_RANGE(map, from, size) \
+ do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
+
+
+static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2)
+{
+ int i;
+
+ for (i = 0; i < map_words(map); i++) {
+ if (val1.x[i] != val2.x[i])
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2)
+{
+ map_word r;
+ int i;
+
+ for (i = 0; i < map_words(map); i++)
+ r.x[i] = val1.x[i] & val2.x[i];
+
+ return r;
+}
+
+static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2)
+{
+ map_word r;
+ int i;
+
+ for (i = 0; i < map_words(map); i++)
+ r.x[i] = val1.x[i] & ~val2.x[i];
+
+ return r;
+}
+
+static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2)
+{
+ map_word r;
+ int i;
+
+ for (i = 0; i < map_words(map); i++)
+ r.x[i] = val1.x[i] | val2.x[i];
+
+ return r;
+}
+
+static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3)
+{
+ int i;
+
+ for (i = 0; i < map_words(map); i++) {
+ if ((val1.x[i] & val2.x[i]) != val3.x[i])
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2)
+{
+ int i;
+
+ for (i = 0; i < map_words(map); i++) {
+ if (val1.x[i] & val2.x[i])
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline map_word map_word_load(struct map_info *map, const void *ptr)
+{
+ map_word r;
+
+ if (map_bankwidth_is_1(map))
+ r.x[0] = *(unsigned char *)ptr;
+ else if (map_bankwidth_is_2(map))
+ r.x[0] = get_unaligned((uint16_t *)ptr);
+ else if (map_bankwidth_is_4(map))
+ r.x[0] = get_unaligned((uint32_t *)ptr);
+#if BITS_PER_LONG >= 64
+ else if (map_bankwidth_is_8(map))
+ r.x[0] = get_unaligned((uint64_t *)ptr);
+#endif
+ else if (map_bankwidth_is_large(map))
+ memcpy(r.x, ptr, map->bankwidth);
+ else
+ BUG();
+
+ return r;
+}
+
+static inline map_word map_word_load_partial(struct map_info *map, map_word orig, const unsigned char *buf, int start, int len)
+{
+ int i;
+
+ if (map_bankwidth_is_large(map)) {
+ char *dest = (char *)&orig;
+
+ memcpy(dest+start, buf, len);
+ } else {
+ for (i = start; i < start+len; i++) {
+ int bitpos;
+
+#ifdef __LITTLE_ENDIAN
+ bitpos = i * 8;
+#else /* __BIG_ENDIAN */
+ bitpos = (map_bankwidth(map) - 1 - i) * 8;
+#endif
+ orig.x[0] &= ~(0xff << bitpos);
+ orig.x[0] |= (unsigned long)buf[i-start] << bitpos;
+ }
+ }
+ return orig;
+}
+
+#if BITS_PER_LONG < 64
+#define MAP_FF_LIMIT 4
+#else
+#define MAP_FF_LIMIT 8
+#endif
+
+static inline map_word map_word_ff(struct map_info *map)
+{
+ map_word r;
+ int i;
+
+ if (map_bankwidth(map) < MAP_FF_LIMIT) {
+ int bw = 8 * map_bankwidth(map);
+
+ r.x[0] = (1UL << bw) - 1;
+ } else {
+ for (i = 0; i < map_words(map); i++)
+ r.x[i] = ~0UL;
+ }
+ return r;
+}
+
+static inline map_word inline_map_read(struct map_info *map, unsigned long ofs)
+{
+ map_word r;
+
+ if (map_bankwidth_is_1(map))
+ r.x[0] = __raw_readb(map->virt + ofs);
+ else if (map_bankwidth_is_2(map))
+ r.x[0] = __raw_readw(map->virt + ofs);
+ else if (map_bankwidth_is_4(map))
+ r.x[0] = __raw_readl(map->virt + ofs);
+#if BITS_PER_LONG >= 64
+ else if (map_bankwidth_is_8(map))
+ r.x[0] = __raw_readq(map->virt + ofs);
+#endif
+ else if (map_bankwidth_is_large(map))
+ memcpy_fromio(r.x, map->virt + ofs, map->bankwidth);
+ else
+ BUG();
+
+ return r;
+}
+
+static inline void inline_map_write(struct map_info *map, const map_word datum, unsigned long ofs)
+{
+ if (map_bankwidth_is_1(map))
+ __raw_writeb(datum.x[0], map->virt + ofs);
+ else if (map_bankwidth_is_2(map))
+ __raw_writew(datum.x[0], map->virt + ofs);
+ else if (map_bankwidth_is_4(map))
+ __raw_writel(datum.x[0], map->virt + ofs);
+#if BITS_PER_LONG >= 64
+ else if (map_bankwidth_is_8(map))
+ __raw_writeq(datum.x[0], map->virt + ofs);
+#endif
+ else if (map_bankwidth_is_large(map))
+ memcpy_toio(map->virt+ofs, datum.x, map->bankwidth);
+ else
+ BUG();
+ mb();
+}
+
+static inline void inline_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ if (map->cached)
+ memcpy(to, (char *)map->cached + from, len);
+ else
+ memcpy_fromio(to, map->virt + from, len);
+}
+
+static inline void inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ memcpy_toio(map->virt + to, from, len);
+}
+
+#ifdef CONFIG_MTD_COMPLEX_MAPPINGS
+#define map_read(map, ofs) (map)->read(map, ofs)
+#define map_copy_from(map, to, from, len) (map)->copy_from(map, to, from, len)
+#define map_write(map, datum, ofs) (map)->write(map, datum, ofs)
+#define map_copy_to(map, to, from, len) (map)->copy_to(map, to, from, len)
+
+extern void simple_map_init(struct map_info *);
+#define map_is_linear(map) (map->phys != NO_XIP)
+
+#else
+#define map_read(map, ofs) inline_map_read(map, ofs)
+#define map_copy_from(map, to, from, len) inline_map_copy_from(map, to, from, len)
+#define map_write(map, datum, ofs) inline_map_write(map, datum, ofs)
+#define map_copy_to(map, to, from, len) inline_map_copy_to(map, to, from, len)
+
+
+#define simple_map_init(map) BUG_ON(!map_bankwidth_supported((map)->bankwidth))
+#define map_is_linear(map) ({ (void)(map); 1; })
+
+#endif /* !CONFIG_MTD_COMPLEX_MAPPINGS */
+
+#endif /* __LINUX_MTD_MAP_H__ */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
new file mode 100644
index 000000000..f17fa7580
--- /dev/null
+++ b/include/linux/mtd/mtd.h
@@ -0,0 +1,414 @@
+/*
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef __MTD_MTD_H__
+#define __MTD_MTD_H__
+
+#include <linux/types.h>
+#include <linux/uio.h>
+#include <linux/notifier.h>
+#include <linux/device.h>
+
+#include <mtd/mtd-abi.h>
+
+#include <asm/div64.h>
+
+#define MTD_ERASE_PENDING 0x01
+#define MTD_ERASING 0x02
+#define MTD_ERASE_SUSPEND 0x04
+#define MTD_ERASE_DONE 0x08
+#define MTD_ERASE_FAILED 0x10
+
+#define MTD_FAIL_ADDR_UNKNOWN -1LL
+
+/*
+ * If the erase fails, fail_addr might indicate exactly which block failed. If
+ * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level
+ * or was not specific to any particular block.
+ */
+struct erase_info {
+ struct mtd_info *mtd;
+ uint64_t addr;
+ uint64_t len;
+ uint64_t fail_addr;
+ u_long time;
+ u_long retries;
+ unsigned dev;
+ unsigned cell;
+ void (*callback) (struct erase_info *self);
+ u_long priv;
+ u_char state;
+ struct erase_info *next;
+};
+
+struct mtd_erase_region_info {
+ uint64_t offset; /* At which this region starts, from the beginning of the MTD */
+ uint32_t erasesize; /* For this region */
+ uint32_t numblocks; /* Number of blocks of erasesize in this region */
+ unsigned long *lockmap; /* If keeping bitmap of locks */
+};
+
+/**
+ * struct mtd_oob_ops - oob operation operands
+ * @mode: operation mode
+ *
+ * @len: number of data bytes to write/read
+ *
+ * @retlen: number of data bytes written/read
+ *
+ * @ooblen: number of oob bytes to write/read
+ * @oobretlen: number of oob bytes written/read
+ * @ooboffs: offset of oob data in the oob area (only relevant when
+ * mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW)
+ * @datbuf: data buffer - if NULL only oob data are read/written
+ * @oobbuf: oob data buffer
+ *
+ * Note, it is allowed to read more than one OOB area at one go, but not write.
+ * The interface assumes that the OOB write requests program only one page's
+ * OOB area.
+ */
+struct mtd_oob_ops {
+ unsigned int mode;
+ size_t len;
+ size_t retlen;
+ size_t ooblen;
+ size_t oobretlen;
+ uint32_t ooboffs;
+ uint8_t *datbuf;
+ uint8_t *oobbuf;
+};
+
+#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
+#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
+/*
+ * Internal ECC layout control structure. For historical reasons, there is a
+ * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained
+ * for export to user-space via the ECCGETLAYOUT ioctl.
+ * nand_ecclayout should be expandable in the future simply by the above macros.
+ */
+struct nand_ecclayout {
+ __u32 eccbytes;
+ __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
+ __u32 oobavail;
+ struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
+};
+
+struct module; /* only needed for owner field in mtd_info */
+
+struct mtd_info {
+ u_char type;
+ uint32_t flags;
+ uint64_t size; // Total size of the MTD
+
+ /* "Major" erase size for the device. Naïve users may take this
+ * to be the only erase size available, or may use the more detailed
+ * information below if they desire
+ */
+ uint32_t erasesize;
+ /* Minimal writable flash unit size. In case of NOR flash it is 1 (even
+ * though individual bits can be cleared), in case of NAND flash it is
+ * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR
+ * it is of ECC block size, etc. It is illegal to have writesize = 0.
+ * Any driver registering a struct mtd_info must ensure a writesize of
+ * 1 or larger.
+ */
+ uint32_t writesize;
+
+ /*
+ * Size of the write buffer used by the MTD. MTD devices having a write
+ * buffer can write multiple writesize chunks at a time. E.g. while
+ * writing 4 * writesize bytes to a device with 2 * writesize bytes
+ * buffer the MTD driver can (but doesn't have to) do 2 writesize
+ * operations, but not 4. Currently, all NANDs have writebufsize
+ * equivalent to writesize (NAND page size). Some NOR flashes do have
+ * writebufsize greater than writesize.
+ */
+ uint32_t writebufsize;
+
+ uint32_t oobsize; // Amount of OOB data per block (e.g. 16)
+ uint32_t oobavail; // Available OOB bytes per block
+
+ /*
+ * If erasesize is a power of 2 then the shift is stored in
+ * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize.
+ */
+ unsigned int erasesize_shift;
+ unsigned int writesize_shift;
+ /* Masks based on erasesize_shift and writesize_shift */
+ unsigned int erasesize_mask;
+ unsigned int writesize_mask;
+
+ /*
+ * read ops return -EUCLEAN if max number of bitflips corrected on any
+ * one region comprising an ecc step equals or exceeds this value.
+ * Settable by driver, else defaults to ecc_strength. User can override
+ * in sysfs. N.B. The meaning of the -EUCLEAN return code has changed;
+ * see Documentation/ABI/testing/sysfs-class-mtd for more detail.
+ */
+ unsigned int bitflip_threshold;
+
+ // Kernel-only stuff starts here.
+ const char *name;
+ int index;
+
+ /* ECC layout structure pointer - read only! */
+ struct nand_ecclayout *ecclayout;
+
+ /* the ecc step size. */
+ unsigned int ecc_step_size;
+
+ /* max number of correctible bit errors per ecc step */
+ unsigned int ecc_strength;
+
+ /* Data for variable erase regions. If numeraseregions is zero,
+ * it means that the whole device has erasesize as given above.
+ */
+ int numeraseregions;
+ struct mtd_erase_region_info *eraseregions;
+
+ /*
+ * Do not call via these pointers, use corresponding mtd_*()
+ * wrappers instead.
+ */
+ int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
+ int (*_point) (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void **virt, resource_size_t *phys);
+ int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
+ unsigned long (*_get_unmapped_area) (struct mtd_info *mtd,
+ unsigned long len,
+ unsigned long offset,
+ unsigned long flags);
+ int (*_read) (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf);
+ int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf);
+ int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf);
+ int (*_read_oob) (struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops);
+ int (*_write_oob) (struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops);
+ int (*_get_fact_prot_info) (struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf);
+ int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf);
+ int (*_get_user_prot_info) (struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf);
+ int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen, u_char *buf);
+ int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to,
+ size_t len, size_t *retlen, u_char *buf);
+ int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
+ size_t len);
+ int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen);
+ void (*_sync) (struct mtd_info *mtd);
+ int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
+ int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
+ int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
+ int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs);
+ int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
+ int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
+ int (*_suspend) (struct mtd_info *mtd);
+ void (*_resume) (struct mtd_info *mtd);
+ void (*_reboot) (struct mtd_info *mtd);
+ /*
+ * If the driver is something smart, like UBI, it may need to maintain
+ * its own reference counting. The below functions are only for driver.
+ */
+ int (*_get_device) (struct mtd_info *mtd);
+ void (*_put_device) (struct mtd_info *mtd);
+
+ /* Backing device capabilities for this device
+ * - provides mmap capabilities
+ */
+ struct backing_dev_info *backing_dev_info;
+
+ struct notifier_block reboot_notifier; /* default mode before reboot */
+
+ /* ECC status information */
+ struct mtd_ecc_stats ecc_stats;
+ /* Subpage shift (NAND) */
+ int subpage_sft;
+
+ void *priv;
+
+ struct module *owner;
+ struct device dev;
+ int usecount;
+};
+
+int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
+int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+ void **virt, resource_size_t *phys);
+int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
+unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
+ unsigned long offset, unsigned long flags);
+int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
+ u_char *buf);
+int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+ const u_char *buf);
+int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
+ const u_char *buf);
+
+int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops);
+
+static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ ops->retlen = ops->oobretlen = 0;
+ if (!mtd->_write_oob)
+ return -EOPNOTSUPP;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ return mtd->_write_oob(mtd, to, ops);
+}
+
+int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
+ struct otp_info *buf);
+int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf);
+int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
+ struct otp_info *buf);
+int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf);
+int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, u_char *buf);
+int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
+
+int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen);
+
+static inline void mtd_sync(struct mtd_info *mtd)
+{
+ if (mtd->_sync)
+ mtd->_sync(mtd);
+}
+
+int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs);
+int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs);
+int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
+
+static inline int mtd_suspend(struct mtd_info *mtd)
+{
+ return mtd->_suspend ? mtd->_suspend(mtd) : 0;
+}
+
+static inline void mtd_resume(struct mtd_info *mtd)
+{
+ if (mtd->_resume)
+ mtd->_resume(mtd);
+}
+
+static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
+{
+ if (mtd->erasesize_shift)
+ return sz >> mtd->erasesize_shift;
+ do_div(sz, mtd->erasesize);
+ return sz;
+}
+
+static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
+{
+ if (mtd->erasesize_shift)
+ return sz & mtd->erasesize_mask;
+ return do_div(sz, mtd->erasesize);
+}
+
+static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
+{
+ if (mtd->writesize_shift)
+ return sz >> mtd->writesize_shift;
+ do_div(sz, mtd->writesize);
+ return sz;
+}
+
+static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
+{
+ if (mtd->writesize_shift)
+ return sz & mtd->writesize_mask;
+ return do_div(sz, mtd->writesize);
+}
+
+static inline int mtd_has_oob(const struct mtd_info *mtd)
+{
+ return mtd->_read_oob && mtd->_write_oob;
+}
+
+static inline int mtd_type_is_nand(const struct mtd_info *mtd)
+{
+ return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH;
+}
+
+static inline int mtd_can_have_bb(const struct mtd_info *mtd)
+{
+ return !!mtd->_block_isbad;
+}
+
+ /* Kernel-side ioctl definitions */
+
+struct mtd_partition;
+struct mtd_part_parser_data;
+
+extern int mtd_device_parse_register(struct mtd_info *mtd,
+ const char * const *part_probe_types,
+ struct mtd_part_parser_data *parser_data,
+ const struct mtd_partition *defparts,
+ int defnr_parts);
+#define mtd_device_register(master, parts, nr_parts) \
+ mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
+extern int mtd_device_unregister(struct mtd_info *master);
+extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
+extern int __get_mtd_device(struct mtd_info *mtd);
+extern void __put_mtd_device(struct mtd_info *mtd);
+extern struct mtd_info *get_mtd_device_nm(const char *name);
+extern void put_mtd_device(struct mtd_info *mtd);
+
+
+struct mtd_notifier {
+ void (*add)(struct mtd_info *mtd);
+ void (*remove)(struct mtd_info *mtd);
+ struct list_head list;
+};
+
+
+extern void register_mtd_user (struct mtd_notifier *new);
+extern int unregister_mtd_user (struct mtd_notifier *old);
+void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
+
+void mtd_erase_callback(struct erase_info *instr);
+
+static inline int mtd_is_bitflip(int err) {
+ return err == -EUCLEAN;
+}
+
+static inline int mtd_is_eccerr(int err) {
+ return err == -EBADMSG;
+}
+
+static inline int mtd_is_bitflip_or_eccerr(int err) {
+ return mtd_is_bitflip(err) || mtd_is_eccerr(err);
+}
+
+unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
+
+#endif /* __MTD_MTD_H__ */
diff --git a/include/linux/mtd/mtdram.h b/include/linux/mtd/mtdram.h
new file mode 100644
index 000000000..628a6a21d
--- /dev/null
+++ b/include/linux/mtd/mtdram.h
@@ -0,0 +1,8 @@
+#ifndef __MTD_MTDRAM_H__
+#define __MTD_MTDRAM_H__
+
+#include <linux/mtd/mtd.h>
+int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
+ unsigned long size, const char *name);
+
+#endif /* __MTD_MTDRAM_H__ */
diff --git a/include/linux/mtd/nand-gpio.h b/include/linux/mtd/nand-gpio.h
new file mode 100644
index 000000000..51534e50f
--- /dev/null
+++ b/include/linux/mtd/nand-gpio.h
@@ -0,0 +1,19 @@
+#ifndef __LINUX_MTD_NAND_GPIO_H
+#define __LINUX_MTD_NAND_GPIO_H
+
+#include <linux/mtd/nand.h>
+
+struct gpio_nand_platdata {
+ int gpio_nce;
+ int gpio_nwp;
+ int gpio_cle;
+ int gpio_ale;
+ int gpio_rdy;
+ void (*adjust_parts)(struct gpio_nand_platdata *, size_t);
+ struct mtd_partition *parts;
+ unsigned int num_parts;
+ unsigned int options;
+ int chip_delay;
+};
+
+#endif
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
new file mode 100644
index 000000000..3d4ea7eb2
--- /dev/null
+++ b/include/linux/mtd/nand.h
@@ -0,0 +1,1029 @@
+/*
+ * linux/include/linux/mtd/nand.h
+ *
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
+ * Steven J. Hill <sjhill@realitydiluted.com>
+ * Thomas Gleixner <tglx@linutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Info:
+ * Contains standard defines and IDs for NAND flash devices
+ *
+ * Changelog:
+ * See git changelog.
+ */
+#ifndef __LINUX_MTD_NAND_H
+#define __LINUX_MTD_NAND_H
+
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/mtd/bbm.h>
+
+struct mtd_info;
+struct nand_flash_dev;
+/* Scan and identify a NAND device */
+extern int nand_scan(struct mtd_info *mtd, int max_chips);
+/*
+ * Separate phases of nand_scan(), allowing board driver to intervene
+ * and override command or ECC setup according to flash type.
+ */
+extern int nand_scan_ident(struct mtd_info *mtd, int max_chips,
+ struct nand_flash_dev *table);
+extern int nand_scan_tail(struct mtd_info *mtd);
+
+/* Free resources held by the NAND device */
+extern void nand_release(struct mtd_info *mtd);
+
+/* Internal helper for board drivers which need to override command function */
+extern void nand_wait_ready(struct mtd_info *mtd);
+
+/* locks all blocks present in the device */
+extern int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+
+/* unlocks specified locked blocks */
+extern int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+
+/* The maximum number of NAND chips in an array */
+#define NAND_MAX_CHIPS 8
+
+/*
+ * Constants for hardware specific CLE/ALE/NCE function
+ *
+ * These are bits which can be or'ed to set/clear multiple
+ * bits in one go.
+ */
+/* Select the chip by setting nCE to low */
+#define NAND_NCE 0x01
+/* Select the command latch by setting CLE to high */
+#define NAND_CLE 0x02
+/* Select the address latch by setting ALE to high */
+#define NAND_ALE 0x04
+
+#define NAND_CTRL_CLE (NAND_NCE | NAND_CLE)
+#define NAND_CTRL_ALE (NAND_NCE | NAND_ALE)
+#define NAND_CTRL_CHANGE 0x80
+
+/*
+ * Standard NAND flash commands
+ */
+#define NAND_CMD_READ0 0
+#define NAND_CMD_READ1 1
+#define NAND_CMD_RNDOUT 5
+#define NAND_CMD_PAGEPROG 0x10
+#define NAND_CMD_READOOB 0x50
+#define NAND_CMD_ERASE1 0x60
+#define NAND_CMD_STATUS 0x70
+#define NAND_CMD_SEQIN 0x80
+#define NAND_CMD_RNDIN 0x85
+#define NAND_CMD_READID 0x90
+#define NAND_CMD_ERASE2 0xd0
+#define NAND_CMD_PARAM 0xec
+#define NAND_CMD_GET_FEATURES 0xee
+#define NAND_CMD_SET_FEATURES 0xef
+#define NAND_CMD_RESET 0xff
+
+#define NAND_CMD_LOCK 0x2a
+#define NAND_CMD_UNLOCK1 0x23
+#define NAND_CMD_UNLOCK2 0x24
+
+/* Extended commands for large page devices */
+#define NAND_CMD_READSTART 0x30
+#define NAND_CMD_RNDOUTSTART 0xE0
+#define NAND_CMD_CACHEDPROG 0x15
+
+#define NAND_CMD_NONE -1
+
+/* Status bits */
+#define NAND_STATUS_FAIL 0x01
+#define NAND_STATUS_FAIL_N1 0x02
+#define NAND_STATUS_TRUE_READY 0x20
+#define NAND_STATUS_READY 0x40
+#define NAND_STATUS_WP 0x80
+
+/*
+ * Constants for ECC_MODES
+ */
+typedef enum {
+ NAND_ECC_NONE,
+ NAND_ECC_SOFT,
+ NAND_ECC_HW,
+ NAND_ECC_HW_SYNDROME,
+ NAND_ECC_HW_OOB_FIRST,
+ NAND_ECC_SOFT_BCH,
+} nand_ecc_modes_t;
+
+/*
+ * Constants for Hardware ECC
+ */
+/* Reset Hardware ECC for read */
+#define NAND_ECC_READ 0
+/* Reset Hardware ECC for write */
+#define NAND_ECC_WRITE 1
+/* Enable Hardware ECC before syndrome is read back from flash */
+#define NAND_ECC_READSYN 2
+
+/* Bit mask for flags passed to do_nand_read_ecc */
+#define NAND_GET_DEVICE 0x80
+
+
+/*
+ * Option constants for bizarre disfunctionality and real
+ * features.
+ */
+/* Buswidth is 16 bit */
+#define NAND_BUSWIDTH_16 0x00000002
+/* Chip has cache program function */
+#define NAND_CACHEPRG 0x00000008
+/*
+ * Chip requires ready check on read (for auto-incremented sequential read).
+ * True only for small page devices; large page devices do not support
+ * autoincrement.
+ */
+#define NAND_NEED_READRDY 0x00000100
+
+/* Chip does not allow subpage writes */
+#define NAND_NO_SUBPAGE_WRITE 0x00000200
+
+/* Device is one of 'new' xD cards that expose fake nand command set */
+#define NAND_BROKEN_XD 0x00000400
+
+/* Device behaves just like nand, but is readonly */
+#define NAND_ROM 0x00000800
+
+/* Device supports subpage reads */
+#define NAND_SUBPAGE_READ 0x00001000
+
+/* Options valid for Samsung large page devices */
+#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
+
+/* Macros to identify the above */
+#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
+#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
+
+/* Non chip related options */
+/* This option skips the bbt scan during initialization. */
+#define NAND_SKIP_BBTSCAN 0x00010000
+/*
+ * This option is defined if the board driver allocates its own buffers
+ * (e.g. because it needs them DMA-coherent).
+ */
+#define NAND_OWN_BUFFERS 0x00020000
+/* Chip may not exist, so silence any errors in scan */
+#define NAND_SCAN_SILENT_NODEV 0x00040000
+/*
+ * This option could be defined by controller drivers to protect against
+ * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
+ */
+#define NAND_USE_BOUNCE_BUFFER 0x00080000
+/*
+ * Autodetect nand buswidth with readid/onfi.
+ * This suppose the driver will configure the hardware in 8 bits mode
+ * when calling nand_scan_ident, and update its configuration
+ * before calling nand_scan_tail.
+ */
+#define NAND_BUSWIDTH_AUTO 0x00080000
+
+/* Options set by nand scan */
+/* Nand scan has allocated controller struct */
+#define NAND_CONTROLLER_ALLOC 0x80000000
+
+/* Cell info constants */
+#define NAND_CI_CHIPNR_MSK 0x03
+#define NAND_CI_CELLTYPE_MSK 0x0C
+#define NAND_CI_CELLTYPE_SHIFT 2
+
+/* Keep gcc happy */
+struct nand_chip;
+
+/* ONFI features */
+#define ONFI_FEATURE_16_BIT_BUS (1 << 0)
+#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7)
+
+/* ONFI timing mode, used in both asynchronous and synchronous mode */
+#define ONFI_TIMING_MODE_0 (1 << 0)
+#define ONFI_TIMING_MODE_1 (1 << 1)
+#define ONFI_TIMING_MODE_2 (1 << 2)
+#define ONFI_TIMING_MODE_3 (1 << 3)
+#define ONFI_TIMING_MODE_4 (1 << 4)
+#define ONFI_TIMING_MODE_5 (1 << 5)
+#define ONFI_TIMING_MODE_UNKNOWN (1 << 6)
+
+/* ONFI feature address */
+#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1
+
+/* Vendor-specific feature address (Micron) */
+#define ONFI_FEATURE_ADDR_READ_RETRY 0x89
+
+/* ONFI subfeature parameters length */
+#define ONFI_SUBFEATURE_PARAM_LEN 4
+
+/* ONFI optional commands SET/GET FEATURES supported? */
+#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2)
+
+struct nand_onfi_params {
+ /* rev info and features block */
+ /* 'O' 'N' 'F' 'I' */
+ u8 sig[4];
+ __le16 revision;
+ __le16 features;
+ __le16 opt_cmd;
+ u8 reserved0[2];
+ __le16 ext_param_page_length; /* since ONFI 2.1 */
+ u8 num_of_param_pages; /* since ONFI 2.1 */
+ u8 reserved1[17];
+
+ /* manufacturer information block */
+ char manufacturer[12];
+ char model[20];
+ u8 jedec_id;
+ __le16 date_code;
+ u8 reserved2[13];
+
+ /* memory organization block */
+ __le32 byte_per_page;
+ __le16 spare_bytes_per_page;
+ __le32 data_bytes_per_ppage;
+ __le16 spare_bytes_per_ppage;
+ __le32 pages_per_block;
+ __le32 blocks_per_lun;
+ u8 lun_count;
+ u8 addr_cycles;
+ u8 bits_per_cell;
+ __le16 bb_per_lun;
+ __le16 block_endurance;
+ u8 guaranteed_good_blocks;
+ __le16 guaranteed_block_endurance;
+ u8 programs_per_page;
+ u8 ppage_attr;
+ u8 ecc_bits;
+ u8 interleaved_bits;
+ u8 interleaved_ops;
+ u8 reserved3[13];
+
+ /* electrical parameter block */
+ u8 io_pin_capacitance_max;
+ __le16 async_timing_mode;
+ __le16 program_cache_timing_mode;
+ __le16 t_prog;
+ __le16 t_bers;
+ __le16 t_r;
+ __le16 t_ccs;
+ __le16 src_sync_timing_mode;
+ __le16 src_ssync_features;
+ __le16 clk_pin_capacitance_typ;
+ __le16 io_pin_capacitance_typ;
+ __le16 input_pin_capacitance_typ;
+ u8 input_pin_capacitance_max;
+ u8 driver_strength_support;
+ __le16 t_int_r;
+ __le16 t_ald;
+ u8 reserved4[7];
+
+ /* vendor */
+ __le16 vendor_revision;
+ u8 vendor[88];
+
+ __le16 crc;
+} __packed;
+
+#define ONFI_CRC_BASE 0x4F4E
+
+/* Extended ECC information Block Definition (since ONFI 2.1) */
+struct onfi_ext_ecc_info {
+ u8 ecc_bits;
+ u8 codeword_size;
+ __le16 bb_per_lun;
+ __le16 block_endurance;
+ u8 reserved[2];
+} __packed;
+
+#define ONFI_SECTION_TYPE_0 0 /* Unused section. */
+#define ONFI_SECTION_TYPE_1 1 /* for additional sections. */
+#define ONFI_SECTION_TYPE_2 2 /* for ECC information. */
+struct onfi_ext_section {
+ u8 type;
+ u8 length;
+} __packed;
+
+#define ONFI_EXT_SECTION_MAX 8
+
+/* Extended Parameter Page Definition (since ONFI 2.1) */
+struct onfi_ext_param_page {
+ __le16 crc;
+ u8 sig[4]; /* 'E' 'P' 'P' 'S' */
+ u8 reserved0[10];
+ struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX];
+
+ /*
+ * The actual size of the Extended Parameter Page is in
+ * @ext_param_page_length of nand_onfi_params{}.
+ * The following are the variable length sections.
+ * So we do not add any fields below. Please see the ONFI spec.
+ */
+} __packed;
+
+struct nand_onfi_vendor_micron {
+ u8 two_plane_read;
+ u8 read_cache;
+ u8 read_unique_id;
+ u8 dq_imped;
+ u8 dq_imped_num_settings;
+ u8 dq_imped_feat_addr;
+ u8 rb_pulldown_strength;
+ u8 rb_pulldown_strength_feat_addr;
+ u8 rb_pulldown_strength_num_settings;
+ u8 otp_mode;
+ u8 otp_page_start;
+ u8 otp_data_prot_addr;
+ u8 otp_num_pages;
+ u8 otp_feat_addr;
+ u8 read_retry_options;
+ u8 reserved[72];
+ u8 param_revision;
+} __packed;
+
+struct jedec_ecc_info {
+ u8 ecc_bits;
+ u8 codeword_size;
+ __le16 bb_per_lun;
+ __le16 block_endurance;
+ u8 reserved[2];
+} __packed;
+
+/* JEDEC features */
+#define JEDEC_FEATURE_16_BIT_BUS (1 << 0)
+
+struct nand_jedec_params {
+ /* rev info and features block */
+ /* 'J' 'E' 'S' 'D' */
+ u8 sig[4];
+ __le16 revision;
+ __le16 features;
+ u8 opt_cmd[3];
+ __le16 sec_cmd;
+ u8 num_of_param_pages;
+ u8 reserved0[18];
+
+ /* manufacturer information block */
+ char manufacturer[12];
+ char model[20];
+ u8 jedec_id[6];
+ u8 reserved1[10];
+
+ /* memory organization block */
+ __le32 byte_per_page;
+ __le16 spare_bytes_per_page;
+ u8 reserved2[6];
+ __le32 pages_per_block;
+ __le32 blocks_per_lun;
+ u8 lun_count;
+ u8 addr_cycles;
+ u8 bits_per_cell;
+ u8 programs_per_page;
+ u8 multi_plane_addr;
+ u8 multi_plane_op_attr;
+ u8 reserved3[38];
+
+ /* electrical parameter block */
+ __le16 async_sdr_speed_grade;
+ __le16 toggle_ddr_speed_grade;
+ __le16 sync_ddr_speed_grade;
+ u8 async_sdr_features;
+ u8 toggle_ddr_features;
+ u8 sync_ddr_features;
+ __le16 t_prog;
+ __le16 t_bers;
+ __le16 t_r;
+ __le16 t_r_multi_plane;
+ __le16 t_ccs;
+ __le16 io_pin_capacitance_typ;
+ __le16 input_pin_capacitance_typ;
+ __le16 clk_pin_capacitance_typ;
+ u8 driver_strength_support;
+ __le16 t_ald;
+ u8 reserved4[36];
+
+ /* ECC and endurance block */
+ u8 guaranteed_good_blocks;
+ __le16 guaranteed_block_endurance;
+ struct jedec_ecc_info ecc_info[4];
+ u8 reserved5[29];
+
+ /* reserved */
+ u8 reserved6[148];
+
+ /* vendor */
+ __le16 vendor_rev_num;
+ u8 reserved7[88];
+
+ /* CRC for Parameter Page */
+ __le16 crc;
+} __packed;
+
+/**
+ * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices
+ * @lock: protection lock
+ * @active: the mtd device which holds the controller currently
+ * @wq: wait queue to sleep on if a NAND operation is in
+ * progress used instead of the per chip wait queue
+ * when a hw controller is available.
+ */
+struct nand_hw_control {
+ spinlock_t lock;
+ struct nand_chip *active;
+ wait_queue_head_t wq;
+};
+
+/**
+ * struct nand_ecc_ctrl - Control structure for ECC
+ * @mode: ECC mode
+ * @steps: number of ECC steps per page
+ * @size: data bytes per ECC step
+ * @bytes: ECC bytes per step
+ * @strength: max number of correctible bits per ECC step
+ * @total: total number of ECC bytes per page
+ * @prepad: padding information for syndrome based ECC generators
+ * @postpad: padding information for syndrome based ECC generators
+ * @layout: ECC layout control struct pointer
+ * @priv: pointer to private ECC control data
+ * @hwctl: function to control hardware ECC generator. Must only
+ * be provided if an hardware ECC is available
+ * @calculate: function for ECC calculation or readback from ECC hardware
+ * @correct: function for ECC correction, matching to ECC generator (sw/hw)
+ * @read_page_raw: function to read a raw page without ECC. This function
+ * should hide the specific layout used by the ECC
+ * controller and always return contiguous in-band and
+ * out-of-band data even if they're not stored
+ * contiguously on the NAND chip (e.g.
+ * NAND_ECC_HW_SYNDROME interleaves in-band and
+ * out-of-band data).
+ * @write_page_raw: function to write a raw page without ECC. This function
+ * should hide the specific layout used by the ECC
+ * controller and consider the passed data as contiguous
+ * in-band and out-of-band data. ECC controller is
+ * responsible for doing the appropriate transformations
+ * to adapt to its specific layout (e.g.
+ * NAND_ECC_HW_SYNDROME interleaves in-band and
+ * out-of-band data).
+ * @read_page: function to read a page according to the ECC generator
+ * requirements; returns maximum number of bitflips corrected in
+ * any single ECC step, 0 if bitflips uncorrectable, -EIO hw error
+ * @read_subpage: function to read parts of the page covered by ECC;
+ * returns same as read_page()
+ * @write_subpage: function to write parts of the page covered by ECC.
+ * @write_page: function to write a page according to the ECC generator
+ * requirements.
+ * @write_oob_raw: function to write chip OOB data without ECC
+ * @read_oob_raw: function to read chip OOB data without ECC
+ * @read_oob: function to read chip OOB data
+ * @write_oob: function to write chip OOB data
+ */
+struct nand_ecc_ctrl {
+ nand_ecc_modes_t mode;
+ int steps;
+ int size;
+ int bytes;
+ int total;
+ int strength;
+ int prepad;
+ int postpad;
+ struct nand_ecclayout *layout;
+ void *priv;
+ void (*hwctl)(struct mtd_info *mtd, int mode);
+ int (*calculate)(struct mtd_info *mtd, const uint8_t *dat,
+ uint8_t *ecc_code);
+ int (*correct)(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc,
+ uint8_t *calc_ecc);
+ int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page);
+ int (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required);
+ int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page);
+ int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offs, uint32_t len, uint8_t *buf, int page);
+ int (*write_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offset, uint32_t data_len,
+ const uint8_t *data_buf, int oob_required);
+ int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required);
+ int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
+ int page);
+ int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
+ int page);
+ int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page);
+ int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip,
+ int page);
+};
+
+/**
+ * struct nand_buffers - buffer structure for read/write
+ * @ecccalc: buffer pointer for calculated ECC, size is oobsize.
+ * @ecccode: buffer pointer for ECC read from flash, size is oobsize.
+ * @databuf: buffer pointer for data, size is (page size + oobsize).
+ *
+ * Do not change the order of buffers. databuf and oobrbuf must be in
+ * consecutive order.
+ */
+struct nand_buffers {
+ uint8_t *ecccalc;
+ uint8_t *ecccode;
+ uint8_t *databuf;
+};
+
+/**
+ * struct nand_chip - NAND Private Flash Chip Data
+ * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the
+ * flash device
+ * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the
+ * flash device.
+ * @read_byte: [REPLACEABLE] read one byte from the chip
+ * @read_word: [REPLACEABLE] read one word from the chip
+ * @write_byte: [REPLACEABLE] write a single byte to the chip on the
+ * low 8 I/O lines
+ * @write_buf: [REPLACEABLE] write data from the buffer to the chip
+ * @read_buf: [REPLACEABLE] read data from the chip into the buffer
+ * @select_chip: [REPLACEABLE] select chip nr
+ * @block_bad: [REPLACEABLE] check if a block is bad, using OOB markers
+ * @block_markbad: [REPLACEABLE] mark a block bad
+ * @cmd_ctrl: [BOARDSPECIFIC] hardwarespecific function for controlling
+ * ALE/CLE/nCE. Also used to write command and address
+ * @init_size: [BOARDSPECIFIC] hardwarespecific function for setting
+ * mtd->oobsize, mtd->writesize and so on.
+ * @id_data contains the 8 bytes values of NAND_CMD_READID.
+ * Return with the bus width.
+ * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accessing
+ * device ready/busy line. If set to NULL no access to
+ * ready/busy is available and the ready/busy information
+ * is read from the chip status register.
+ * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing
+ * commands to the chip.
+ * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on
+ * ready.
+ * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for
+ * setting the read-retry mode. Mostly needed for MLC NAND.
+ * @ecc: [BOARDSPECIFIC] ECC control structure
+ * @buffers: buffer structure for read/write
+ * @hwcontrol: platform-specific hardware control structure
+ * @erase: [REPLACEABLE] erase function
+ * @scan_bbt: [REPLACEABLE] function to scan bad block table
+ * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring
+ * data from array to read regs (tR).
+ * @state: [INTERN] the current state of the NAND device
+ * @oob_poi: "poison value buffer," used for laying out OOB data
+ * before writing
+ * @page_shift: [INTERN] number of address bits in a page (column
+ * address bits).
+ * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock
+ * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
+ * @chip_shift: [INTERN] number of address bits in one chip
+ * @options: [BOARDSPECIFIC] various chip options. They can partly
+ * be set to inform nand_scan about special functionality.
+ * See the defines for further explanation.
+ * @bbt_options: [INTERN] bad block specific options. All options used
+ * here must come from bbm.h. By default, these options
+ * will be copied to the appropriate nand_bbt_descr's.
+ * @badblockpos: [INTERN] position of the bad block marker in the oob
+ * area.
+ * @badblockbits: [INTERN] minimum number of set bits in a good block's
+ * bad block marker position; i.e., BBM == 11110111b is
+ * not bad when badblockbits == 7
+ * @bits_per_cell: [INTERN] number of bits per cell. i.e., 1 means SLC.
+ * @ecc_strength_ds: [INTERN] ECC correctability from the datasheet.
+ * Minimum amount of bit errors per @ecc_step_ds guaranteed
+ * to be correctable. If unknown, set to zero.
+ * @ecc_step_ds: [INTERN] ECC step required by the @ecc_strength_ds,
+ * also from the datasheet. It is the recommended ECC step
+ * size, if known; if unknown, set to zero.
+ * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is
+ * either deduced from the datasheet if the NAND
+ * chip is not ONFI compliant or set to 0 if it is
+ * (an ONFI chip is always configured in mode 0
+ * after a NAND reset)
+ * @numchips: [INTERN] number of physical chips
+ * @chipsize: [INTERN] the size of one chip for multichip arrays
+ * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
+ * @pagebuf: [INTERN] holds the pagenumber which is currently in
+ * data_buf.
+ * @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is
+ * currently in data_buf.
+ * @subpagesize: [INTERN] holds the subpagesize
+ * @onfi_version: [INTERN] holds the chip ONFI version (BCD encoded),
+ * non 0 if ONFI supported.
+ * @jedec_version: [INTERN] holds the chip JEDEC version (BCD encoded),
+ * non 0 if JEDEC supported.
+ * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is
+ * supported, 0 otherwise.
+ * @jedec_params: [INTERN] holds the JEDEC parameter page when JEDEC is
+ * supported, 0 otherwise.
+ * @read_retries: [INTERN] the number of read retry modes supported
+ * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
+ * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
+ * @bbt: [INTERN] bad block table pointer
+ * @bbt_td: [REPLACEABLE] bad block table descriptor for flash
+ * lookup.
+ * @bbt_md: [REPLACEABLE] bad block table mirror descriptor
+ * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial
+ * bad block scan.
+ * @controller: [REPLACEABLE] a pointer to a hardware controller
+ * structure which is shared among multiple independent
+ * devices.
+ * @priv: [OPTIONAL] pointer to private chip data
+ * @errstat: [OPTIONAL] hardware specific function to perform
+ * additional error status checks (determine if errors are
+ * correctable).
+ * @write_page: [REPLACEABLE] High-level page write function
+ */
+
+struct nand_chip {
+ void __iomem *IO_ADDR_R;
+ void __iomem *IO_ADDR_W;
+
+ uint8_t (*read_byte)(struct mtd_info *mtd);
+ u16 (*read_word)(struct mtd_info *mtd);
+ void (*write_byte)(struct mtd_info *mtd, uint8_t byte);
+ void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
+ void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
+ void (*select_chip)(struct mtd_info *mtd, int chip);
+ int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip);
+ int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
+ void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
+ int (*init_size)(struct mtd_info *mtd, struct nand_chip *this,
+ u8 *id_data);
+ int (*dev_ready)(struct mtd_info *mtd);
+ void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column,
+ int page_addr);
+ int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this);
+ int (*erase)(struct mtd_info *mtd, int page);
+ int (*scan_bbt)(struct mtd_info *mtd);
+ int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state,
+ int status, int page);
+ int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
+ uint32_t offset, int data_len, const uint8_t *buf,
+ int oob_required, int page, int cached, int raw);
+ int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip,
+ int feature_addr, uint8_t *subfeature_para);
+ int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip,
+ int feature_addr, uint8_t *subfeature_para);
+ int (*setup_read_retry)(struct mtd_info *mtd, int retry_mode);
+
+ int chip_delay;
+ unsigned int options;
+ unsigned int bbt_options;
+
+ int page_shift;
+ int phys_erase_shift;
+ int bbt_erase_shift;
+ int chip_shift;
+ int numchips;
+ uint64_t chipsize;
+ int pagemask;
+ int pagebuf;
+ unsigned int pagebuf_bitflips;
+ int subpagesize;
+ uint8_t bits_per_cell;
+ uint16_t ecc_strength_ds;
+ uint16_t ecc_step_ds;
+ int onfi_timing_mode_default;
+ int badblockpos;
+ int badblockbits;
+
+ int onfi_version;
+ int jedec_version;
+ union {
+ struct nand_onfi_params onfi_params;
+ struct nand_jedec_params jedec_params;
+ };
+
+ int read_retries;
+
+ flstate_t state;
+
+ uint8_t *oob_poi;
+ struct nand_hw_control *controller;
+
+ struct nand_ecc_ctrl ecc;
+ struct nand_buffers *buffers;
+ struct nand_hw_control hwcontrol;
+
+ uint8_t *bbt;
+ struct nand_bbt_descr *bbt_td;
+ struct nand_bbt_descr *bbt_md;
+
+ struct nand_bbt_descr *badblock_pattern;
+
+ void *priv;
+};
+
+/*
+ * NAND Flash Manufacturer ID Codes
+ */
+#define NAND_MFR_TOSHIBA 0x98
+#define NAND_MFR_SAMSUNG 0xec
+#define NAND_MFR_FUJITSU 0x04
+#define NAND_MFR_NATIONAL 0x8f
+#define NAND_MFR_RENESAS 0x07
+#define NAND_MFR_STMICRO 0x20
+#define NAND_MFR_HYNIX 0xad
+#define NAND_MFR_MICRON 0x2c
+#define NAND_MFR_AMD 0x01
+#define NAND_MFR_MACRONIX 0xc2
+#define NAND_MFR_EON 0x92
+#define NAND_MFR_SANDISK 0x45
+#define NAND_MFR_INTEL 0x89
+#define NAND_MFR_ATO 0x9b
+
+/* The maximum expected count of bytes in the NAND ID sequence */
+#define NAND_MAX_ID_LEN 8
+
+/*
+ * A helper for defining older NAND chips where the second ID byte fully
+ * defined the chip, including the geometry (chip size, eraseblock size, page
+ * size). All these chips have 512 bytes NAND page size.
+ */
+#define LEGACY_ID_NAND(nm, devid, chipsz, erasesz, opts) \
+ { .name = (nm), {{ .dev_id = (devid) }}, .pagesize = 512, \
+ .chipsize = (chipsz), .erasesize = (erasesz), .options = (opts) }
+
+/*
+ * A helper for defining newer chips which report their page size and
+ * eraseblock size via the extended ID bytes.
+ *
+ * The real difference between LEGACY_ID_NAND and EXTENDED_ID_NAND is that with
+ * EXTENDED_ID_NAND, manufacturers overloaded the same device ID so that the
+ * device ID now only represented a particular total chip size (and voltage,
+ * buswidth), and the page size, eraseblock size, and OOB size could vary while
+ * using the same device ID.
+ */
+#define EXTENDED_ID_NAND(nm, devid, chipsz, opts) \
+ { .name = (nm), {{ .dev_id = (devid) }}, .chipsize = (chipsz), \
+ .options = (opts) }
+
+#define NAND_ECC_INFO(_strength, _step) \
+ { .strength_ds = (_strength), .step_ds = (_step) }
+#define NAND_ECC_STRENGTH(type) ((type)->ecc.strength_ds)
+#define NAND_ECC_STEP(type) ((type)->ecc.step_ds)
+
+/**
+ * struct nand_flash_dev - NAND Flash Device ID Structure
+ * @name: a human-readable name of the NAND chip
+ * @dev_id: the device ID (the second byte of the full chip ID array)
+ * @mfr_id: manufecturer ID part of the full chip ID array (refers the same
+ * memory address as @id[0])
+ * @dev_id: device ID part of the full chip ID array (refers the same memory
+ * address as @id[1])
+ * @id: full device ID array
+ * @pagesize: size of the NAND page in bytes; if 0, then the real page size (as
+ * well as the eraseblock size) is determined from the extended NAND
+ * chip ID array)
+ * @chipsize: total chip size in MiB
+ * @erasesize: eraseblock size in bytes (determined from the extended ID if 0)
+ * @options: stores various chip bit options
+ * @id_len: The valid length of the @id.
+ * @oobsize: OOB size
+ * @ecc: ECC correctability and step information from the datasheet.
+ * @ecc.strength_ds: The ECC correctability from the datasheet, same as the
+ * @ecc_strength_ds in nand_chip{}.
+ * @ecc.step_ds: The ECC step required by the @ecc.strength_ds, same as the
+ * @ecc_step_ds in nand_chip{}, also from the datasheet.
+ * For example, the "4bit ECC for each 512Byte" can be set with
+ * NAND_ECC_INFO(4, 512).
+ * @onfi_timing_mode_default: the default ONFI timing mode entered after a NAND
+ * reset. Should be deduced from timings described
+ * in the datasheet.
+ *
+ */
+struct nand_flash_dev {
+ char *name;
+ union {
+ struct {
+ uint8_t mfr_id;
+ uint8_t dev_id;
+ };
+ uint8_t id[NAND_MAX_ID_LEN];
+ };
+ unsigned int pagesize;
+ unsigned int chipsize;
+ unsigned int erasesize;
+ unsigned int options;
+ uint16_t id_len;
+ uint16_t oobsize;
+ struct {
+ uint16_t strength_ds;
+ uint16_t step_ds;
+ } ecc;
+ int onfi_timing_mode_default;
+};
+
+/**
+ * struct nand_manufacturers - NAND Flash Manufacturer ID Structure
+ * @name: Manufacturer name
+ * @id: manufacturer ID code of device.
+*/
+struct nand_manufacturers {
+ int id;
+ char *name;
+};
+
+extern struct nand_flash_dev nand_flash_ids[];
+extern struct nand_manufacturers nand_manuf_ids[];
+
+extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
+extern int nand_default_bbt(struct mtd_info *mtd);
+extern int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
+extern int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
+extern int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
+extern int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
+ int allowbbt);
+extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, uint8_t *buf);
+
+/**
+ * struct platform_nand_chip - chip level device structure
+ * @nr_chips: max. number of chips to scan for
+ * @chip_offset: chip number offset
+ * @nr_partitions: number of partitions pointed to by partitions (or zero)
+ * @partitions: mtd partition list
+ * @chip_delay: R/B delay value in us
+ * @options: Option flags, e.g. 16bit buswidth
+ * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
+ * @ecclayout: ECC layout info structure
+ * @part_probe_types: NULL-terminated array of probe types
+ */
+struct platform_nand_chip {
+ int nr_chips;
+ int chip_offset;
+ int nr_partitions;
+ struct mtd_partition *partitions;
+ struct nand_ecclayout *ecclayout;
+ int chip_delay;
+ unsigned int options;
+ unsigned int bbt_options;
+ const char **part_probe_types;
+};
+
+/* Keep gcc happy */
+struct platform_device;
+
+/**
+ * struct platform_nand_ctrl - controller level device structure
+ * @probe: platform specific function to probe/setup hardware
+ * @remove: platform specific function to remove/teardown hardware
+ * @hwcontrol: platform specific hardware control structure
+ * @dev_ready: platform specific function to read ready/busy pin
+ * @select_chip: platform specific chip select function
+ * @cmd_ctrl: platform specific function for controlling
+ * ALE/CLE/nCE. Also used to write command and address
+ * @write_buf: platform specific function for write buffer
+ * @read_buf: platform specific function for read buffer
+ * @read_byte: platform specific function to read one byte from chip
+ * @priv: private data to transport driver specific settings
+ *
+ * All fields are optional and depend on the hardware driver requirements
+ */
+struct platform_nand_ctrl {
+ int (*probe)(struct platform_device *pdev);
+ void (*remove)(struct platform_device *pdev);
+ void (*hwcontrol)(struct mtd_info *mtd, int cmd);
+ int (*dev_ready)(struct mtd_info *mtd);
+ void (*select_chip)(struct mtd_info *mtd, int chip);
+ void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
+ void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
+ void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
+ unsigned char (*read_byte)(struct mtd_info *mtd);
+ void *priv;
+};
+
+/**
+ * struct platform_nand_data - container structure for platform-specific data
+ * @chip: chip level chip structure
+ * @ctrl: controller level device structure
+ */
+struct platform_nand_data {
+ struct platform_nand_chip chip;
+ struct platform_nand_ctrl ctrl;
+};
+
+/* Some helpers to access the data structures */
+static inline
+struct platform_nand_chip *get_platform_nandchip(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ return chip->priv;
+}
+
+/* return the supported features. */
+static inline int onfi_feature(struct nand_chip *chip)
+{
+ return chip->onfi_version ? le16_to_cpu(chip->onfi_params.features) : 0;
+}
+
+/* return the supported asynchronous timing mode. */
+static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
+{
+ if (!chip->onfi_version)
+ return ONFI_TIMING_MODE_UNKNOWN;
+ return le16_to_cpu(chip->onfi_params.async_timing_mode);
+}
+
+/* return the supported synchronous timing mode. */
+static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
+{
+ if (!chip->onfi_version)
+ return ONFI_TIMING_MODE_UNKNOWN;
+ return le16_to_cpu(chip->onfi_params.src_sync_timing_mode);
+}
+
+/*
+ * Check if it is a SLC nand.
+ * The !nand_is_slc() can be used to check the MLC/TLC nand chips.
+ * We do not distinguish the MLC and TLC now.
+ */
+static inline bool nand_is_slc(struct nand_chip *chip)
+{
+ return chip->bits_per_cell == 1;
+}
+
+/**
+ * Check if the opcode's address should be sent only on the lower 8 bits
+ * @command: opcode to check
+ */
+static inline int nand_opcode_8bits(unsigned int command)
+{
+ switch (command) {
+ case NAND_CMD_READID:
+ case NAND_CMD_PARAM:
+ case NAND_CMD_GET_FEATURES:
+ case NAND_CMD_SET_FEATURES:
+ return 1;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/* return the supported JEDEC features. */
+static inline int jedec_feature(struct nand_chip *chip)
+{
+ return chip->jedec_version ? le16_to_cpu(chip->jedec_params.features)
+ : 0;
+}
+
+/*
+ * struct nand_sdr_timings - SDR NAND chip timings
+ *
+ * This struct defines the timing requirements of a SDR NAND chip.
+ * These informations can be found in every NAND datasheets and the timings
+ * meaning are described in the ONFI specifications:
+ * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing
+ * Parameters)
+ *
+ * All these timings are expressed in picoseconds.
+ */
+
+struct nand_sdr_timings {
+ u32 tALH_min;
+ u32 tADL_min;
+ u32 tALS_min;
+ u32 tAR_min;
+ u32 tCEA_max;
+ u32 tCEH_min;
+ u32 tCH_min;
+ u32 tCHZ_max;
+ u32 tCLH_min;
+ u32 tCLR_min;
+ u32 tCLS_min;
+ u32 tCOH_min;
+ u32 tCS_min;
+ u32 tDH_min;
+ u32 tDS_min;
+ u32 tFEAT_max;
+ u32 tIR_min;
+ u32 tITC_max;
+ u32 tRC_min;
+ u32 tREA_max;
+ u32 tREH_min;
+ u32 tRHOH_min;
+ u32 tRHW_min;
+ u32 tRHZ_max;
+ u32 tRLOH_min;
+ u32 tRP_min;
+ u32 tRR_min;
+ u64 tRST_max;
+ u32 tWB_max;
+ u32 tWC_min;
+ u32 tWH_min;
+ u32 tWHR_min;
+ u32 tWP_min;
+ u32 tWW_min;
+};
+
+/* get timing characteristics from ONFI timing mode. */
+const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
+#endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h
new file mode 100644
index 000000000..74acf5367
--- /dev/null
+++ b/include/linux/mtd/nand_bch.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file is the header for the NAND BCH ECC implementation.
+ */
+
+#ifndef __MTD_NAND_BCH_H__
+#define __MTD_NAND_BCH_H__
+
+struct mtd_info;
+struct nand_bch_control;
+
+#if defined(CONFIG_MTD_NAND_ECC_BCH)
+
+static inline int mtd_nand_has_bch(void) { return 1; }
+
+/*
+ * Calculate BCH ecc code
+ */
+int nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code);
+
+/*
+ * Detect and correct bit errors
+ */
+int nand_bch_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc,
+ u_char *calc_ecc);
+/*
+ * Initialize BCH encoder/decoder
+ */
+struct nand_bch_control *
+nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
+ unsigned int eccbytes, struct nand_ecclayout **ecclayout);
+/*
+ * Release BCH encoder/decoder resources
+ */
+void nand_bch_free(struct nand_bch_control *nbc);
+
+#else /* !CONFIG_MTD_NAND_ECC_BCH */
+
+static inline int mtd_nand_has_bch(void) { return 0; }
+
+static inline int
+nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ return -1;
+}
+
+static inline int
+nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ return -1;
+}
+
+static inline struct nand_bch_control *
+nand_bch_init(struct mtd_info *mtd, unsigned int eccsize,
+ unsigned int eccbytes, struct nand_ecclayout **ecclayout)
+{
+ return NULL;
+}
+
+static inline void nand_bch_free(struct nand_bch_control *nbc) {}
+
+#endif /* CONFIG_MTD_NAND_ECC_BCH */
+
+#endif /* __MTD_NAND_BCH_H__ */
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h
new file mode 100644
index 000000000..4d8406c81
--- /dev/null
+++ b/include/linux/mtd/nand_ecc.h
@@ -0,0 +1,42 @@
+/*
+ * drivers/mtd/nand_ecc.h
+ *
+ * Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com>
+ * David Woodhouse <dwmw2@infradead.org>
+ * Thomas Gleixner <tglx@linutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file is the header for the ECC algorithm.
+ */
+
+#ifndef __MTD_NAND_ECC_H__
+#define __MTD_NAND_ECC_H__
+
+struct mtd_info;
+
+/*
+ * Calculate 3 byte ECC code for eccsize byte block
+ */
+void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize,
+ u_char *ecc_code);
+
+/*
+ * Calculate 3 byte ECC code for 256/512 byte block
+ */
+int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code);
+
+/*
+ * Detect and correct a 1 bit error for eccsize byte block
+ */
+int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc,
+ unsigned int eccsize);
+
+/*
+ * Detect and correct a 1 bit error for 256/512 byte block
+ */
+int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
+
+#endif /* __MTD_NAND_ECC_H__ */
diff --git a/include/linux/mtd/ndfc.h b/include/linux/mtd/ndfc.h
new file mode 100644
index 000000000..d0558a982
--- /dev/null
+++ b/include/linux/mtd/ndfc.h
@@ -0,0 +1,67 @@
+/*
+ * linux/include/linux/mtd/ndfc.h
+ *
+ * Copyright (c) 2006 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Info:
+ * Contains defines, datastructures for ndfc nand controller
+ *
+ */
+#ifndef __LINUX_MTD_NDFC_H
+#define __LINUX_MTD_NDFC_H
+
+/* NDFC Register definitions */
+#define NDFC_CMD 0x00
+#define NDFC_ALE 0x04
+#define NDFC_DATA 0x08
+#define NDFC_ECC 0x10
+#define NDFC_BCFG0 0x30
+#define NDFC_BCFG1 0x34
+#define NDFC_BCFG2 0x38
+#define NDFC_BCFG3 0x3c
+#define NDFC_CCR 0x40
+#define NDFC_STAT 0x44
+#define NDFC_HWCTL 0x48
+#define NDFC_REVID 0x50
+
+#define NDFC_STAT_IS_READY 0x01000000
+
+#define NDFC_CCR_RESET_CE 0x80000000 /* CE Reset */
+#define NDFC_CCR_RESET_ECC 0x40000000 /* ECC Reset */
+#define NDFC_CCR_RIE 0x20000000 /* Interrupt Enable on Device Rdy */
+#define NDFC_CCR_REN 0x10000000 /* Enable wait for Rdy in LinearR */
+#define NDFC_CCR_ROMEN 0x08000000 /* Enable ROM In LinearR */
+#define NDFC_CCR_ARE 0x04000000 /* Auto-Read Enable */
+#define NDFC_CCR_BS(x) (((x) & 0x3) << 24) /* Select Bank on CE[x] */
+#define NDFC_CCR_BS_MASK 0x03000000 /* Select Bank */
+#define NDFC_CCR_ARAC0 0x00000000 /* 3 Addr, 1 Col 2 Row 512b page */
+#define NDFC_CCR_ARAC1 0x00001000 /* 4 Addr, 1 Col 3 Row 512b page */
+#define NDFC_CCR_ARAC2 0x00002000 /* 4 Addr, 2 Col 2 Row 2K page */
+#define NDFC_CCR_ARAC3 0x00003000 /* 5 Addr, 2 Col 3 Row 2K page */
+#define NDFC_CCR_ARAC_MASK 0x00003000 /* Auto-Read mode Addr Cycles */
+#define NDFC_CCR_RPG 0x0000C000 /* Auto-Read Page */
+#define NDFC_CCR_EBCC 0x00000004 /* EBC Configuration Completed */
+#define NDFC_CCR_DHC 0x00000002 /* Direct Hardware Control Enable */
+
+#define NDFC_BxCFG_EN 0x80000000 /* Bank Enable */
+#define NDFC_BxCFG_CED 0x40000000 /* nCE Style */
+#define NDFC_BxCFG_SZ_MASK 0x08000000 /* Bank Size */
+#define NDFC_BxCFG_SZ_8BIT 0x00000000 /* 8bit */
+#define NDFC_BxCFG_SZ_16BIT 0x08000000 /* 16bit */
+
+#define NDFC_MAX_BANKS 4
+
+struct ndfc_controller_settings {
+ uint32_t ccr_settings;
+ uint64_t ndfc_erpn;
+};
+
+struct ndfc_chip_settings {
+ uint32_t bank_settings;
+};
+
+#endif
diff --git a/include/linux/mtd/nftl.h b/include/linux/mtd/nftl.h
new file mode 100644
index 000000000..b059629e2
--- /dev/null
+++ b/include/linux/mtd/nftl.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef __MTD_NFTL_H__
+#define __MTD_NFTL_H__
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/blktrans.h>
+
+#include <mtd/nftl-user.h>
+
+/* these info are used in ReplUnitTable */
+#define BLOCK_NIL 0xffff /* last block of a chain */
+#define BLOCK_FREE 0xfffe /* free block */
+#define BLOCK_NOTEXPLORED 0xfffd /* non explored block, only used during mounting */
+#define BLOCK_RESERVED 0xfffc /* bios block or bad block */
+
+struct NFTLrecord {
+ struct mtd_blktrans_dev mbd;
+ __u16 MediaUnit, SpareMediaUnit;
+ __u32 EraseSize;
+ struct NFTLMediaHeader MediaHdr;
+ int usecount;
+ unsigned char heads;
+ unsigned char sectors;
+ unsigned short cylinders;
+ __u16 numvunits;
+ __u16 lastEUN; /* should be suppressed */
+ __u16 numfreeEUNs;
+ __u16 LastFreeEUN; /* To speed up finding a free EUN */
+ int head,sect,cyl;
+ __u16 *EUNtable; /* [numvunits]: First EUN for each virtual unit */
+ __u16 *ReplUnitTable; /* [numEUNs]: ReplUnitNumber for each */
+ unsigned int nb_blocks; /* number of physical blocks */
+ unsigned int nb_boot_blocks; /* number of blocks used by the bios */
+ struct erase_info instr;
+ struct nand_ecclayout oobinfo;
+};
+
+int NFTL_mount(struct NFTLrecord *s);
+int NFTL_formatblock(struct NFTLrecord *s, int block);
+
+int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+ size_t *retlen, uint8_t *buf);
+int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
+ size_t *retlen, uint8_t *buf);
+
+#ifndef NFTL_MAJOR
+#define NFTL_MAJOR 93
+#endif
+
+#define MAX_NFTLS 16
+#define MAX_SECTORS_PER_UNIT 64
+#define NFTL_PARTN_BITS 4
+
+#endif /* __MTD_NFTL_H__ */
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
new file mode 100644
index 000000000..4596503c9
--- /dev/null
+++ b/include/linux/mtd/onenand.h
@@ -0,0 +1,242 @@
+/*
+ * linux/include/linux/mtd/onenand.h
+ *
+ * Copyright © 2005-2009 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MTD_ONENAND_H
+#define __LINUX_MTD_ONENAND_H
+
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/mtd/onenand_regs.h>
+#include <linux/mtd/bbm.h>
+
+#define MAX_DIES 2
+#define MAX_BUFFERRAM 2
+
+/* Scan and identify a OneNAND device */
+extern int onenand_scan(struct mtd_info *mtd, int max_chips);
+/* Free resources held by the OneNAND device */
+extern void onenand_release(struct mtd_info *mtd);
+
+/**
+ * struct onenand_bufferram - OneNAND BufferRAM Data
+ * @blockpage: block & page address in BufferRAM
+ */
+struct onenand_bufferram {
+ int blockpage;
+};
+
+/**
+ * struct onenand_chip - OneNAND Private Flash Chip Data
+ * @base: [BOARDSPECIFIC] address to access OneNAND
+ * @dies: [INTERN][FLEX-ONENAND] number of dies on chip
+ * @boundary: [INTERN][FLEX-ONENAND] Boundary of the dies
+ * @diesize: [INTERN][FLEX-ONENAND] Size of the dies
+ * @chipsize: [INTERN] the size of one chip for multichip arrays
+ * FIXME For Flex-OneNAND, chipsize holds maximum possible
+ * device size ie when all blocks are considered MLC
+ * @device_id: [INTERN] device ID
+ * @density_mask: chip density, used for DDP devices
+ * @verstion_id: [INTERN] version ID
+ * @options: [BOARDSPECIFIC] various chip options. They can
+ * partly be set to inform onenand_scan about
+ * @erase_shift: [INTERN] number of address bits in a block
+ * @page_shift: [INTERN] number of address bits in a page
+ * @page_mask: [INTERN] a page per block mask
+ * @writesize: [INTERN] a real page size
+ * @bufferram_index: [INTERN] BufferRAM index
+ * @bufferram: [INTERN] BufferRAM info
+ * @readw: [REPLACEABLE] hardware specific function for read short
+ * @writew: [REPLACEABLE] hardware specific function for write short
+ * @command: [REPLACEABLE] hardware specific function for writing
+ * commands to the chip
+ * @wait: [REPLACEABLE] hardware specific function for wait on ready
+ * @bbt_wait: [REPLACEABLE] hardware specific function for bbt wait on ready
+ * @unlock_all: [REPLACEABLE] hardware specific function for unlock all
+ * @read_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area
+ * @write_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area
+ * @read_word: [REPLACEABLE] hardware specific function for read
+ * register of OneNAND
+ * @write_word: [REPLACEABLE] hardware specific function for write
+ * register of OneNAND
+ * @mmcontrol: sync burst read function
+ * @chip_probe: [REPLACEABLE] hardware specific function for chip probe
+ * @block_markbad: function to mark a block as bad
+ * @scan_bbt: [REPLACEALBE] hardware specific function for scanning
+ * Bad block Table
+ * @chip_lock: [INTERN] spinlock used to protect access to this
+ * structure and the chip
+ * @wq: [INTERN] wait queue to sleep on if a OneNAND
+ * operation is in progress
+ * @state: [INTERN] the current state of the OneNAND device
+ * @page_buf: [INTERN] page main data buffer
+ * @oob_buf: [INTERN] page oob data buffer
+ * @subpagesize: [INTERN] holds the subpagesize
+ * @ecclayout: [REPLACEABLE] the default ecc placement scheme
+ * @bbm: [REPLACEABLE] pointer to Bad Block Management
+ * @priv: [OPTIONAL] pointer to private chip date
+ */
+struct onenand_chip {
+ void __iomem *base;
+ unsigned dies;
+ unsigned boundary[MAX_DIES];
+ loff_t diesize[MAX_DIES];
+ unsigned int chipsize;
+ unsigned int device_id;
+ unsigned int version_id;
+ unsigned int technology;
+ unsigned int density_mask;
+ unsigned int options;
+
+ unsigned int erase_shift;
+ unsigned int page_shift;
+ unsigned int page_mask;
+ unsigned int writesize;
+
+ unsigned int bufferram_index;
+ struct onenand_bufferram bufferram[MAX_BUFFERRAM];
+
+ int (*command)(struct mtd_info *mtd, int cmd, loff_t address, size_t len);
+ int (*wait)(struct mtd_info *mtd, int state);
+ int (*bbt_wait)(struct mtd_info *mtd, int state);
+ void (*unlock_all)(struct mtd_info *mtd);
+ int (*read_bufferram)(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset, size_t count);
+ int (*write_bufferram)(struct mtd_info *mtd, int area,
+ const unsigned char *buffer, int offset, size_t count);
+ unsigned short (*read_word)(void __iomem *addr);
+ void (*write_word)(unsigned short value, void __iomem *addr);
+ void (*mmcontrol)(struct mtd_info *mtd, int sync_read);
+ int (*chip_probe)(struct mtd_info *mtd);
+ int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
+ int (*scan_bbt)(struct mtd_info *mtd);
+ int (*enable)(struct mtd_info *mtd);
+ int (*disable)(struct mtd_info *mtd);
+
+ struct completion complete;
+ int irq;
+
+ spinlock_t chip_lock;
+ wait_queue_head_t wq;
+ flstate_t state;
+ unsigned char *page_buf;
+ unsigned char *oob_buf;
+#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
+ unsigned char *verify_buf;
+#endif
+
+ int subpagesize;
+ struct nand_ecclayout *ecclayout;
+
+ void *bbm;
+
+ void *priv;
+
+ /*
+ * Shows that the current operation is composed
+ * of sequence of commands. For example, cache program.
+ * Such command status OnGo bit is checked at the end of
+ * sequence.
+ */
+ unsigned int ongoing;
+};
+
+/*
+ * Helper macros
+ */
+#define ONENAND_PAGES_PER_BLOCK (1<<6)
+
+#define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index)
+#define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1)
+#define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1)
+#define ONENAND_SET_PREV_BUFFERRAM(this) (this->bufferram_index ^= 1)
+#define ONENAND_SET_BUFFERRAM0(this) (this->bufferram_index = 0)
+#define ONENAND_SET_BUFFERRAM1(this) (this->bufferram_index = 1)
+
+#define FLEXONENAND(this) \
+ (this->device_id & DEVICE_IS_FLEXONENAND)
+#define ONENAND_GET_SYS_CFG1(this) \
+ (this->read_word(this->base + ONENAND_REG_SYS_CFG1))
+#define ONENAND_SET_SYS_CFG1(v, this) \
+ (this->write_word(v, this->base + ONENAND_REG_SYS_CFG1))
+
+#define ONENAND_IS_DDP(this) \
+ (this->device_id & ONENAND_DEVICE_IS_DDP)
+
+#define ONENAND_IS_MLC(this) \
+ (this->technology & ONENAND_TECHNOLOGY_IS_MLC)
+
+#ifdef CONFIG_MTD_ONENAND_2X_PROGRAM
+#define ONENAND_IS_2PLANE(this) \
+ (this->options & ONENAND_HAS_2PLANE)
+#else
+#define ONENAND_IS_2PLANE(this) (0)
+#endif
+
+#define ONENAND_IS_CACHE_PROGRAM(this) \
+ (this->options & ONENAND_HAS_CACHE_PROGRAM)
+
+#define ONENAND_IS_NOP_1(this) \
+ (this->options & ONENAND_HAS_NOP_1)
+
+/* Check byte access in OneNAND */
+#define ONENAND_CHECK_BYTE_ACCESS(addr) (addr & 0x1)
+
+/*
+ * Options bits
+ */
+#define ONENAND_HAS_CONT_LOCK (0x0001)
+#define ONENAND_HAS_UNLOCK_ALL (0x0002)
+#define ONENAND_HAS_2PLANE (0x0004)
+#define ONENAND_HAS_4KB_PAGE (0x0008)
+#define ONENAND_HAS_CACHE_PROGRAM (0x0010)
+#define ONENAND_HAS_NOP_1 (0x0020)
+#define ONENAND_SKIP_UNLOCK_CHECK (0x0100)
+#define ONENAND_PAGEBUF_ALLOC (0x1000)
+#define ONENAND_OOBBUF_ALLOC (0x2000)
+#define ONENAND_SKIP_INITIAL_UNLOCKING (0x4000)
+
+#define ONENAND_IS_4KB_PAGE(this) \
+ (this->options & ONENAND_HAS_4KB_PAGE)
+
+/*
+ * OneNAND Flash Manufacturer ID Codes
+ */
+#define ONENAND_MFR_SAMSUNG 0xec
+#define ONENAND_MFR_NUMONYX 0x20
+
+/**
+ * struct onenand_manufacturers - NAND Flash Manufacturer ID Structure
+ * @name: Manufacturer name
+ * @id: manufacturer ID code of device.
+*/
+struct onenand_manufacturers {
+ int id;
+ char *name;
+};
+
+int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops);
+unsigned onenand_block(struct onenand_chip *this, loff_t addr);
+loff_t onenand_addr(struct onenand_chip *this, int block);
+int flexonenand_region(struct mtd_info *mtd, loff_t addr);
+
+struct mtd_partition;
+
+struct onenand_platform_data {
+ void (*mmcontrol)(struct mtd_info *mtd, int sync_read);
+ int (*read_bufferram)(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset, size_t count);
+ struct mtd_partition *parts;
+ unsigned int nr_parts;
+};
+
+#endif /* __LINUX_MTD_ONENAND_H */
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
new file mode 100644
index 000000000..d60130f88
--- /dev/null
+++ b/include/linux/mtd/onenand_regs.h
@@ -0,0 +1,223 @@
+/*
+ * linux/include/linux/mtd/onenand_regs.h
+ *
+ * OneNAND Register header file
+ *
+ * Copyright (C) 2005-2007 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ONENAND_REG_H
+#define __ONENAND_REG_H
+
+/* Memory Address Map Translation (Word order) */
+#define ONENAND_MEMORY_MAP(x) ((x) << 1)
+
+/*
+ * External BufferRAM area
+ */
+#define ONENAND_BOOTRAM ONENAND_MEMORY_MAP(0x0000)
+#define ONENAND_DATARAM ONENAND_MEMORY_MAP(0x0200)
+#define ONENAND_SPARERAM ONENAND_MEMORY_MAP(0x8010)
+
+/*
+ * OneNAND Registers
+ */
+#define ONENAND_REG_MANUFACTURER_ID ONENAND_MEMORY_MAP(0xF000)
+#define ONENAND_REG_DEVICE_ID ONENAND_MEMORY_MAP(0xF001)
+#define ONENAND_REG_VERSION_ID ONENAND_MEMORY_MAP(0xF002)
+#define ONENAND_REG_DATA_BUFFER_SIZE ONENAND_MEMORY_MAP(0xF003)
+#define ONENAND_REG_BOOT_BUFFER_SIZE ONENAND_MEMORY_MAP(0xF004)
+#define ONENAND_REG_NUM_BUFFERS ONENAND_MEMORY_MAP(0xF005)
+#define ONENAND_REG_TECHNOLOGY ONENAND_MEMORY_MAP(0xF006)
+
+#define ONENAND_REG_START_ADDRESS1 ONENAND_MEMORY_MAP(0xF100)
+#define ONENAND_REG_START_ADDRESS2 ONENAND_MEMORY_MAP(0xF101)
+#define ONENAND_REG_START_ADDRESS3 ONENAND_MEMORY_MAP(0xF102)
+#define ONENAND_REG_START_ADDRESS4 ONENAND_MEMORY_MAP(0xF103)
+#define ONENAND_REG_START_ADDRESS5 ONENAND_MEMORY_MAP(0xF104)
+#define ONENAND_REG_START_ADDRESS6 ONENAND_MEMORY_MAP(0xF105)
+#define ONENAND_REG_START_ADDRESS7 ONENAND_MEMORY_MAP(0xF106)
+#define ONENAND_REG_START_ADDRESS8 ONENAND_MEMORY_MAP(0xF107)
+
+#define ONENAND_REG_START_BUFFER ONENAND_MEMORY_MAP(0xF200)
+#define ONENAND_REG_COMMAND ONENAND_MEMORY_MAP(0xF220)
+#define ONENAND_REG_SYS_CFG1 ONENAND_MEMORY_MAP(0xF221)
+#define ONENAND_REG_SYS_CFG2 ONENAND_MEMORY_MAP(0xF222)
+#define ONENAND_REG_CTRL_STATUS ONENAND_MEMORY_MAP(0xF240)
+#define ONENAND_REG_INTERRUPT ONENAND_MEMORY_MAP(0xF241)
+#define ONENAND_REG_START_BLOCK_ADDRESS ONENAND_MEMORY_MAP(0xF24C)
+#define ONENAND_REG_END_BLOCK_ADDRESS ONENAND_MEMORY_MAP(0xF24D)
+#define ONENAND_REG_WP_STATUS ONENAND_MEMORY_MAP(0xF24E)
+
+#define ONENAND_REG_ECC_STATUS ONENAND_MEMORY_MAP(0xFF00)
+#define ONENAND_REG_ECC_M0 ONENAND_MEMORY_MAP(0xFF01)
+#define ONENAND_REG_ECC_S0 ONENAND_MEMORY_MAP(0xFF02)
+#define ONENAND_REG_ECC_M1 ONENAND_MEMORY_MAP(0xFF03)
+#define ONENAND_REG_ECC_S1 ONENAND_MEMORY_MAP(0xFF04)
+#define ONENAND_REG_ECC_M2 ONENAND_MEMORY_MAP(0xFF05)
+#define ONENAND_REG_ECC_S2 ONENAND_MEMORY_MAP(0xFF06)
+#define ONENAND_REG_ECC_M3 ONENAND_MEMORY_MAP(0xFF07)
+#define ONENAND_REG_ECC_S3 ONENAND_MEMORY_MAP(0xFF08)
+
+/*
+ * Device ID Register F001h (R)
+ */
+#define DEVICE_IS_FLEXONENAND (1 << 9)
+#define FLEXONENAND_PI_MASK (0x3ff)
+#define FLEXONENAND_PI_UNLOCK_SHIFT (14)
+#define ONENAND_DEVICE_DENSITY_MASK (0xf)
+#define ONENAND_DEVICE_DENSITY_SHIFT (4)
+#define ONENAND_DEVICE_IS_DDP (1 << 3)
+#define ONENAND_DEVICE_IS_DEMUX (1 << 2)
+#define ONENAND_DEVICE_VCC_MASK (0x3)
+
+#define ONENAND_DEVICE_DENSITY_512Mb (0x002)
+#define ONENAND_DEVICE_DENSITY_1Gb (0x003)
+#define ONENAND_DEVICE_DENSITY_2Gb (0x004)
+#define ONENAND_DEVICE_DENSITY_4Gb (0x005)
+
+/*
+ * Version ID Register F002h (R)
+ */
+#define ONENAND_VERSION_PROCESS_SHIFT (8)
+
+/*
+ * Technology Register F006h (R)
+ */
+#define ONENAND_TECHNOLOGY_IS_MLC (1 << 0)
+
+/*
+ * Start Address 1 F100h (R/W) & Start Address 2 F101h (R/W)
+ */
+#define ONENAND_DDP_SHIFT (15)
+#define ONENAND_DDP_CHIP0 (0)
+#define ONENAND_DDP_CHIP1 (1 << ONENAND_DDP_SHIFT)
+
+/*
+ * Start Address 8 F107h (R/W)
+ */
+/* Note: It's actually 0x3f in case of SLC */
+#define ONENAND_FPA_MASK (0x7f)
+#define ONENAND_FPA_SHIFT (2)
+#define ONENAND_FSA_MASK (0x03)
+
+/*
+ * Start Buffer Register F200h (R/W)
+ */
+#define ONENAND_BSA_MASK (0x03)
+#define ONENAND_BSA_SHIFT (8)
+#define ONENAND_BSA_BOOTRAM (0 << 2)
+#define ONENAND_BSA_DATARAM0 (2 << 2)
+#define ONENAND_BSA_DATARAM1 (3 << 2)
+/* Note: It's actually 0x03 in case of SLC */
+#define ONENAND_BSC_MASK (0x07)
+
+/*
+ * Command Register F220h (R/W)
+ */
+#define ONENAND_CMD_READ (0x00)
+#define ONENAND_CMD_READOOB (0x13)
+#define ONENAND_CMD_PROG (0x80)
+#define ONENAND_CMD_PROGOOB (0x1A)
+#define ONENAND_CMD_2X_PROG (0x7D)
+#define ONENAND_CMD_2X_CACHE_PROG (0x7F)
+#define ONENAND_CMD_UNLOCK (0x23)
+#define ONENAND_CMD_LOCK (0x2A)
+#define ONENAND_CMD_LOCK_TIGHT (0x2C)
+#define ONENAND_CMD_UNLOCK_ALL (0x27)
+#define ONENAND_CMD_ERASE (0x94)
+#define ONENAND_CMD_MULTIBLOCK_ERASE (0x95)
+#define ONENAND_CMD_ERASE_VERIFY (0x71)
+#define ONENAND_CMD_RESET (0xF0)
+#define ONENAND_CMD_OTP_ACCESS (0x65)
+#define ONENAND_CMD_READID (0x90)
+#define FLEXONENAND_CMD_PI_UPDATE (0x05)
+#define FLEXONENAND_CMD_PI_ACCESS (0x66)
+#define FLEXONENAND_CMD_RECOVER_LSB (0x05)
+
+/* NOTE: Those are not *REAL* commands */
+#define ONENAND_CMD_BUFFERRAM (0x1978)
+#define FLEXONENAND_CMD_READ_PI (0x1985)
+
+/*
+ * System Configuration 1 Register F221h (R, R/W)
+ */
+#define ONENAND_SYS_CFG1_SYNC_READ (1 << 15)
+#define ONENAND_SYS_CFG1_BRL_7 (7 << 12)
+#define ONENAND_SYS_CFG1_BRL_6 (6 << 12)
+#define ONENAND_SYS_CFG1_BRL_5 (5 << 12)
+#define ONENAND_SYS_CFG1_BRL_4 (4 << 12)
+#define ONENAND_SYS_CFG1_BRL_3 (3 << 12)
+#define ONENAND_SYS_CFG1_BRL_10 (2 << 12)
+#define ONENAND_SYS_CFG1_BRL_9 (1 << 12)
+#define ONENAND_SYS_CFG1_BRL_8 (0 << 12)
+#define ONENAND_SYS_CFG1_BRL_SHIFT (12)
+#define ONENAND_SYS_CFG1_BL_32 (4 << 9)
+#define ONENAND_SYS_CFG1_BL_16 (3 << 9)
+#define ONENAND_SYS_CFG1_BL_8 (2 << 9)
+#define ONENAND_SYS_CFG1_BL_4 (1 << 9)
+#define ONENAND_SYS_CFG1_BL_CONT (0 << 9)
+#define ONENAND_SYS_CFG1_BL_SHIFT (9)
+#define ONENAND_SYS_CFG1_NO_ECC (1 << 8)
+#define ONENAND_SYS_CFG1_RDY (1 << 7)
+#define ONENAND_SYS_CFG1_INT (1 << 6)
+#define ONENAND_SYS_CFG1_IOBE (1 << 5)
+#define ONENAND_SYS_CFG1_RDY_CONF (1 << 4)
+#define ONENAND_SYS_CFG1_VHF (1 << 3)
+#define ONENAND_SYS_CFG1_HF (1 << 2)
+#define ONENAND_SYS_CFG1_SYNC_WRITE (1 << 1)
+
+/*
+ * Controller Status Register F240h (R)
+ */
+#define ONENAND_CTRL_ONGO (1 << 15)
+#define ONENAND_CTRL_LOCK (1 << 14)
+#define ONENAND_CTRL_LOAD (1 << 13)
+#define ONENAND_CTRL_PROGRAM (1 << 12)
+#define ONENAND_CTRL_ERASE (1 << 11)
+#define ONENAND_CTRL_ERROR (1 << 10)
+#define ONENAND_CTRL_RSTB (1 << 7)
+#define ONENAND_CTRL_OTP_L (1 << 6)
+#define ONENAND_CTRL_OTP_BL (1 << 5)
+
+/*
+ * Interrupt Status Register F241h (R)
+ */
+#define ONENAND_INT_MASTER (1 << 15)
+#define ONENAND_INT_READ (1 << 7)
+#define ONENAND_INT_WRITE (1 << 6)
+#define ONENAND_INT_ERASE (1 << 5)
+#define ONENAND_INT_RESET (1 << 4)
+#define ONENAND_INT_CLEAR (0 << 0)
+
+/*
+ * NAND Flash Write Protection Status Register F24Eh (R)
+ */
+#define ONENAND_WP_US (1 << 2)
+#define ONENAND_WP_LS (1 << 1)
+#define ONENAND_WP_LTS (1 << 0)
+
+/*
+ * ECC Status Reigser FF00h (R)
+ */
+#define ONENAND_ECC_1BIT (1 << 0)
+#define ONENAND_ECC_1BIT_ALL (0x5555)
+#define ONENAND_ECC_2BIT (1 << 1)
+#define ONENAND_ECC_2BIT_ALL (0xAAAA)
+#define FLEXONENAND_UNCORRECTABLE_ERROR (0x1010)
+#define ONENAND_ECC_3BIT (1 << 2)
+#define ONENAND_ECC_4BIT (1 << 3)
+#define ONENAND_ECC_4BIT_UNCORRECTABLE (0x1010)
+
+/*
+ * One-Time Programmable (OTP)
+ */
+#define FLEXONENAND_OTP_LOCK_OFFSET (2048)
+#define ONENAND_OTP_LOCK_OFFSET (14)
+
+#endif /* __ONENAND_REG_H */
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
new file mode 100644
index 000000000..6a35e6de5
--- /dev/null
+++ b/include/linux/mtd/partitions.h
@@ -0,0 +1,88 @@
+/*
+ * MTD partitioning layer definitions
+ *
+ * (C) 2000 Nicolas Pitre <nico@fluxnic.net>
+ *
+ * This code is GPL
+ */
+
+#ifndef MTD_PARTITIONS_H
+#define MTD_PARTITIONS_H
+
+#include <linux/types.h>
+
+
+/*
+ * Partition definition structure:
+ *
+ * An array of struct partition is passed along with a MTD object to
+ * mtd_device_register() to create them.
+ *
+ * For each partition, these fields are available:
+ * name: string that will be used to label the partition's MTD device.
+ * size: the partition size; if defined as MTDPART_SIZ_FULL, the partition
+ * will extend to the end of the master MTD device.
+ * offset: absolute starting position within the master MTD device; if
+ * defined as MTDPART_OFS_APPEND, the partition will start where the
+ * previous one ended; if MTDPART_OFS_NXTBLK, at the next erase block;
+ * if MTDPART_OFS_RETAIN, consume as much as possible, leaving size
+ * after the end of partition.
+ * mask_flags: contains flags that have to be masked (removed) from the
+ * master MTD flag set for the corresponding MTD partition.
+ * For example, to force a read-only partition, simply adding
+ * MTD_WRITEABLE to the mask_flags will do the trick.
+ *
+ * Note: writeable partitions require their size and offset be
+ * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
+ */
+
+struct mtd_partition {
+ const char *name; /* identifier string */
+ uint64_t size; /* partition size */
+ uint64_t offset; /* offset within the master MTD space */
+ uint32_t mask_flags; /* master MTD flags to mask out for this partition */
+ struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only) */
+};
+
+#define MTDPART_OFS_RETAIN (-3)
+#define MTDPART_OFS_NXTBLK (-2)
+#define MTDPART_OFS_APPEND (-1)
+#define MTDPART_SIZ_FULL (0)
+
+
+struct mtd_info;
+struct device_node;
+
+/**
+ * struct mtd_part_parser_data - used to pass data to MTD partition parsers.
+ * @origin: for RedBoot, start address of MTD device
+ * @of_node: for OF parsers, device node containing partitioning information
+ */
+struct mtd_part_parser_data {
+ unsigned long origin;
+ struct device_node *of_node;
+};
+
+
+/*
+ * Functions dealing with the various ways of partitioning the space
+ */
+
+struct mtd_part_parser {
+ struct list_head list;
+ struct module *owner;
+ const char *name;
+ int (*parse_fn)(struct mtd_info *, struct mtd_partition **,
+ struct mtd_part_parser_data *);
+};
+
+extern void register_mtd_parser(struct mtd_part_parser *parser);
+extern void deregister_mtd_parser(struct mtd_part_parser *parser);
+
+int mtd_is_partition(const struct mtd_info *mtd);
+int mtd_add_partition(struct mtd_info *master, const char *name,
+ long long offset, long long length);
+int mtd_del_partition(struct mtd_info *master, int partno);
+uint64_t mtd_get_device_size(const struct mtd_info *mtd);
+
+#endif
diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h
new file mode 100644
index 000000000..42ff7ff09
--- /dev/null
+++ b/include/linux/mtd/pfow.h
@@ -0,0 +1,156 @@
+/* Primary function overlay window definitions
+ * and service functions used by LPDDR chips
+ */
+#ifndef __LINUX_MTD_PFOW_H
+#define __LINUX_MTD_PFOW_H
+
+#include <linux/mtd/qinfo.h>
+
+/* PFOW registers addressing */
+/* Address of symbol "P" */
+#define PFOW_QUERY_STRING_P 0x0000
+/* Address of symbol "F" */
+#define PFOW_QUERY_STRING_F 0x0002
+/* Address of symbol "O" */
+#define PFOW_QUERY_STRING_O 0x0004
+/* Address of symbol "W" */
+#define PFOW_QUERY_STRING_W 0x0006
+/* Identification info for LPDDR chip */
+#define PFOW_MANUFACTURER_ID 0x0020
+#define PFOW_DEVICE_ID 0x0022
+/* Address in PFOW where prog buffer can can be found */
+#define PFOW_PROGRAM_BUFFER_OFFSET 0x0040
+/* Size of program buffer in words */
+#define PFOW_PROGRAM_BUFFER_SIZE 0x0042
+/* Address command code register */
+#define PFOW_COMMAND_CODE 0x0080
+/* command data register */
+#define PFOW_COMMAND_DATA 0x0084
+/* command address register lower address bits */
+#define PFOW_COMMAND_ADDRESS_L 0x0088
+/* command address register upper address bits */
+#define PFOW_COMMAND_ADDRESS_H 0x008a
+/* number of bytes to be proggrammed lower address bits */
+#define PFOW_DATA_COUNT_L 0x0090
+/* number of bytes to be proggrammed higher address bits */
+#define PFOW_DATA_COUNT_H 0x0092
+/* command execution register, the only possible value is 0x01 */
+#define PFOW_COMMAND_EXECUTE 0x00c0
+/* 0x01 should be written at this address to clear buffer */
+#define PFOW_CLEAR_PROGRAM_BUFFER 0x00c4
+/* device program/erase suspend register */
+#define PFOW_PROGRAM_ERASE_SUSPEND 0x00c8
+/* device status register */
+#define PFOW_DSR 0x00cc
+
+/* LPDDR memory device command codes */
+/* They are possible values of PFOW command code register */
+#define LPDDR_WORD_PROGRAM 0x0041
+#define LPDDR_BUFF_PROGRAM 0x00E9
+#define LPDDR_BLOCK_ERASE 0x0020
+#define LPDDR_LOCK_BLOCK 0x0061
+#define LPDDR_UNLOCK_BLOCK 0x0062
+#define LPDDR_READ_BLOCK_LOCK_STATUS 0x0065
+#define LPDDR_INFO_QUERY 0x0098
+#define LPDDR_READ_OTP 0x0097
+#define LPDDR_PROG_OTP 0x00C0
+#define LPDDR_RESUME 0x00D0
+
+/* Defines possible value of PFOW command execution register */
+#define LPDDR_START_EXECUTION 0x0001
+
+/* Defines possible value of PFOW program/erase suspend register */
+#define LPDDR_SUSPEND 0x0001
+
+/* Possible values of PFOW device status register */
+/* access R - read; RC read & clearable */
+#define DSR_DPS (1<<1) /* RC; device protect status
+ * 0 - not protected 1 - locked */
+#define DSR_PSS (1<<2) /* R; program suspend status;
+ * 0-prog in progress/completed,
+ * 1- prog suspended */
+#define DSR_VPPS (1<<3) /* RC; 0-Vpp OK, * 1-Vpp low */
+#define DSR_PROGRAM_STATUS (1<<4) /* RC; 0-successful, 1-error */
+#define DSR_ERASE_STATUS (1<<5) /* RC; erase or blank check status;
+ * 0-success erase/blank check,
+ * 1 blank check error */
+#define DSR_ESS (1<<6) /* R; erase suspend status;
+ * 0-erase in progress/complete,
+ * 1 erase suspended */
+#define DSR_READY_STATUS (1<<7) /* R; Device status
+ * 0-busy,
+ * 1-ready */
+#define DSR_RPS (0x3<<8) /* RC; region program status
+ * 00 - Success,
+ * 01-re-program attempt in region with
+ * object mode data,
+ * 10-object mode program w attempt in
+ * region with control mode data
+ * 11-attempt to program invalid half
+ * with 0x41 command */
+#define DSR_AOS (1<<12) /* RC; 1- AO related failure */
+#define DSR_AVAILABLE (1<<15) /* R; Device availbility
+ * 1 - Device available
+ * 0 - not available */
+
+/* The superset of all possible error bits in DSR */
+#define DSR_ERR 0x133A
+
+static inline void send_pfow_command(struct map_info *map,
+ unsigned long cmd_code, unsigned long adr,
+ unsigned long len, map_word *datum)
+{
+ int bits_per_chip = map_bankwidth(map) * 8;
+
+ map_write(map, CMD(cmd_code), map->pfow_base + PFOW_COMMAND_CODE);
+ map_write(map, CMD(adr & ((1<<bits_per_chip) - 1)),
+ map->pfow_base + PFOW_COMMAND_ADDRESS_L);
+ map_write(map, CMD(adr>>bits_per_chip),
+ map->pfow_base + PFOW_COMMAND_ADDRESS_H);
+ if (len) {
+ map_write(map, CMD(len & ((1<<bits_per_chip) - 1)),
+ map->pfow_base + PFOW_DATA_COUNT_L);
+ map_write(map, CMD(len>>bits_per_chip),
+ map->pfow_base + PFOW_DATA_COUNT_H);
+ }
+ if (datum)
+ map_write(map, *datum, map->pfow_base + PFOW_COMMAND_DATA);
+
+ /* Command execution start */
+ map_write(map, CMD(LPDDR_START_EXECUTION),
+ map->pfow_base + PFOW_COMMAND_EXECUTE);
+}
+
+static inline void print_drs_error(unsigned dsr)
+{
+ int prog_status = (dsr & DSR_RPS) >> 8;
+
+ if (!(dsr & DSR_AVAILABLE))
+ printk(KERN_NOTICE"DSR.15: (0) Device not Available\n");
+ if (prog_status & 0x03)
+ printk(KERN_NOTICE"DSR.9,8: (11) Attempt to program invalid "
+ "half with 41h command\n");
+ else if (prog_status & 0x02)
+ printk(KERN_NOTICE"DSR.9,8: (10) Object Mode Program attempt "
+ "in region with Control Mode data\n");
+ else if (prog_status & 0x01)
+ printk(KERN_NOTICE"DSR.9,8: (01) Program attempt in region "
+ "with Object Mode data\n");
+ if (!(dsr & DSR_READY_STATUS))
+ printk(KERN_NOTICE"DSR.7: (0) Device is Busy\n");
+ if (dsr & DSR_ESS)
+ printk(KERN_NOTICE"DSR.6: (1) Erase Suspended\n");
+ if (dsr & DSR_ERASE_STATUS)
+ printk(KERN_NOTICE"DSR.5: (1) Erase/Blank check error\n");
+ if (dsr & DSR_PROGRAM_STATUS)
+ printk(KERN_NOTICE"DSR.4: (1) Program Error\n");
+ if (dsr & DSR_VPPS)
+ printk(KERN_NOTICE"DSR.3: (1) Vpp low detect, operation "
+ "aborted\n");
+ if (dsr & DSR_PSS)
+ printk(KERN_NOTICE"DSR.2: (1) Program suspended\n");
+ if (dsr & DSR_DPS)
+ printk(KERN_NOTICE"DSR.1: (1) Aborted Erase/Program attempt "
+ "on locked block\n");
+}
+#endif /* __LINUX_MTD_PFOW_H */
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h
new file mode 100644
index 000000000..aa6a2633c
--- /dev/null
+++ b/include/linux/mtd/physmap.h
@@ -0,0 +1,36 @@
+/*
+ * For boards with physically mapped flash and using
+ * drivers/mtd/maps/physmap.c mapping driver.
+ *
+ * Copyright (C) 2003 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MTD_PHYSMAP__
+#define __LINUX_MTD_PHYSMAP__
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+struct map_info;
+struct platform_device;
+
+struct physmap_flash_data {
+ unsigned int width;
+ int (*init)(struct platform_device *);
+ void (*exit)(struct platform_device *);
+ void (*set_vpp)(struct platform_device *, int);
+ unsigned int nr_parts;
+ unsigned int pfow_base;
+ char *probe_type;
+ struct mtd_partition *parts;
+ const char * const *part_probe_types;
+};
+
+#endif /* __LINUX_MTD_PHYSMAP__ */
diff --git a/include/linux/mtd/pismo.h b/include/linux/mtd/pismo.h
new file mode 100644
index 000000000..8dfb7e142
--- /dev/null
+++ b/include/linux/mtd/pismo.h
@@ -0,0 +1,17 @@
+/*
+ * PISMO memory driver - http://www.pismoworld.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ */
+#ifndef __LINUX_MTD_PISMO_H
+#define __LINUX_MTD_PISMO_H
+
+struct pismo_pdata {
+ void (*set_vpp)(void *, int);
+ void *vpp_data;
+ phys_addr_t cs_addrs[5];
+};
+
+#endif
diff --git a/include/linux/mtd/plat-ram.h b/include/linux/mtd/plat-ram.h
new file mode 100644
index 000000000..44212d65a
--- /dev/null
+++ b/include/linux/mtd/plat-ram.h
@@ -0,0 +1,34 @@
+/* linux/include/linux/mtd/plat-ram.h
+ *
+ * (c) 2004 Simtec Electronics
+ * http://www.simtec.co.uk/products/SWLINUX/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * Generic platform device based RAM map
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __LINUX_MTD_PLATRAM_H
+#define __LINUX_MTD_PLATRAM_H __FILE__
+
+#define PLATRAM_RO (0)
+#define PLATRAM_RW (1)
+
+struct platdata_mtd_ram {
+ const char *mapname;
+ const char * const *map_probes;
+ const char * const *probes;
+ struct mtd_partition *partitions;
+ int nr_partitions;
+ int bankwidth;
+
+ /* control callbacks */
+
+ void (*set_rw)(struct device *dev, int to);
+};
+
+#endif /* __LINUX_MTD_PLATRAM_H */
diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h
new file mode 100644
index 000000000..7b3d487d8
--- /dev/null
+++ b/include/linux/mtd/qinfo.h
@@ -0,0 +1,91 @@
+#ifndef __LINUX_MTD_QINFO_H
+#define __LINUX_MTD_QINFO_H
+
+#include <linux/mtd/map.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/mtd/partitions.h>
+
+/* lpddr_private describes lpddr flash chip in memory map
+ * @ManufactId - Chip Manufacture ID
+ * @DevId - Chip Device ID
+ * @qinfo - pointer to qinfo records describing the chip
+ * @numchips - number of chips including virual RWW partitions
+ * @chipshift - Chip/partiton size 2^chipshift
+ * @chips - per-chip data structure
+ */
+struct lpddr_private {
+ uint16_t ManufactId;
+ uint16_t DevId;
+ struct qinfo_chip *qinfo;
+ int numchips;
+ unsigned long chipshift;
+ struct flchip chips[0];
+};
+
+/* qinfo_query_info structure contains request information for
+ * each qinfo record
+ * @major - major number of qinfo record
+ * @major - minor number of qinfo record
+ * @id_str - descriptive string to access the record
+ * @desc - detailed description for the qinfo record
+ */
+struct qinfo_query_info {
+ uint8_t major;
+ uint8_t minor;
+ char *id_str;
+ char *desc;
+};
+
+/*
+ * qinfo_chip structure contains necessary qinfo records data
+ * @DevSizeShift - Device size 2^n bytes
+ * @BufSizeShift - Program buffer size 2^n bytes
+ * @TotalBlocksNum - Total number of blocks
+ * @UniformBlockSizeShift - Uniform block size 2^UniformBlockSizeShift bytes
+ * @HWPartsNum - Number of hardware partitions
+ * @SuspEraseSupp - Suspend erase supported
+ * @SingleWordProgTime - Single word program 2^SingleWordProgTime u-sec
+ * @ProgBufferTime - Program buffer write 2^ProgBufferTime u-sec
+ * @BlockEraseTime - Block erase 2^BlockEraseTime m-sec
+ */
+struct qinfo_chip {
+ /* General device info */
+ uint16_t DevSizeShift;
+ uint16_t BufSizeShift;
+ /* Erase block information */
+ uint16_t TotalBlocksNum;
+ uint16_t UniformBlockSizeShift;
+ /* Partition information */
+ uint16_t HWPartsNum;
+ /* Optional features */
+ uint16_t SuspEraseSupp;
+ /* Operation typical time */
+ uint16_t SingleWordProgTime;
+ uint16_t ProgBufferTime;
+ uint16_t BlockEraseTime;
+};
+
+/* defines for fixup usage */
+#define LPDDR_MFR_ANY 0xffff
+#define LPDDR_ID_ANY 0xffff
+#define NUMONYX_MFGR_ID 0x0089
+#define R18_DEVICE_ID_1G 0x893c
+
+static inline map_word lpddr_build_cmd(u_long cmd, struct map_info *map)
+{
+ map_word val = { {0} };
+ val.x[0] = cmd;
+ return val;
+}
+
+#define CMD(x) lpddr_build_cmd(x, map)
+#define CMDVAL(cmd) cmd.x[0]
+
+struct mtd_info *lpddr_cmdset(struct map_info *);
+
+#endif
+
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
new file mode 100644
index 000000000..1c28f8879
--- /dev/null
+++ b/include/linux/mtd/sh_flctl.h
@@ -0,0 +1,192 @@
+/*
+ * SuperH FLCTL nand controller
+ *
+ * Copyright © 2008 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __SH_FLCTL_H__
+#define __SH_FLCTL_H__
+
+#include <linux/completion.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/pm_qos.h>
+
+/* FLCTL registers */
+#define FLCMNCR(f) (f->reg + 0x0)
+#define FLCMDCR(f) (f->reg + 0x4)
+#define FLCMCDR(f) (f->reg + 0x8)
+#define FLADR(f) (f->reg + 0xC)
+#define FLADR2(f) (f->reg + 0x3C)
+#define FLDATAR(f) (f->reg + 0x10)
+#define FLDTCNTR(f) (f->reg + 0x14)
+#define FLINTDMACR(f) (f->reg + 0x18)
+#define FLBSYTMR(f) (f->reg + 0x1C)
+#define FLBSYCNT(f) (f->reg + 0x20)
+#define FLDTFIFO(f) (f->reg + 0x24)
+#define FLECFIFO(f) (f->reg + 0x28)
+#define FLTRCR(f) (f->reg + 0x2C)
+#define FLHOLDCR(f) (f->reg + 0x38)
+#define FL4ECCRESULT0(f) (f->reg + 0x80)
+#define FL4ECCRESULT1(f) (f->reg + 0x84)
+#define FL4ECCRESULT2(f) (f->reg + 0x88)
+#define FL4ECCRESULT3(f) (f->reg + 0x8C)
+#define FL4ECCCR(f) (f->reg + 0x90)
+#define FL4ECCCNT(f) (f->reg + 0x94)
+#define FLERRADR(f) (f->reg + 0x98)
+
+/* FLCMNCR control bits */
+#define _4ECCCNTEN (0x1 << 24)
+#define _4ECCEN (0x1 << 23)
+#define _4ECCCORRECT (0x1 << 22)
+#define SHBUSSEL (0x1 << 20)
+#define SEL_16BIT (0x1 << 19)
+#define SNAND_E (0x1 << 18) /* SNAND (0=512 1=2048)*/
+#define QTSEL_E (0x1 << 17)
+#define ENDIAN (0x1 << 16) /* 1 = little endian */
+#define FCKSEL_E (0x1 << 15)
+#define ACM_SACCES_MODE (0x01 << 10)
+#define NANWF_E (0x1 << 9)
+#define SE_D (0x1 << 8) /* Spare area disable */
+#define CE1_ENABLE (0x1 << 4) /* Chip Enable 1 */
+#define CE0_ENABLE (0x1 << 3) /* Chip Enable 0 */
+#define TYPESEL_SET (0x1 << 0)
+
+/*
+ * Clock settings using the PULSEx registers from FLCMNCR
+ *
+ * Some hardware uses bits called PULSEx instead of FCKSEL_E and QTSEL_E
+ * to control the clock divider used between the High-Speed Peripheral Clock
+ * and the FLCTL internal clock. If so, use CLK_8_BIT_xxx for connecting 8 bit
+ * and CLK_16_BIT_xxx for connecting 16 bit bus bandwith NAND chips. For the 16
+ * bit version the divider is seperate for the pulse width of high and low
+ * signals.
+ */
+#define PULSE3 (0x1 << 27)
+#define PULSE2 (0x1 << 17)
+#define PULSE1 (0x1 << 15)
+#define PULSE0 (0x1 << 9)
+#define CLK_8B_0_5 PULSE1
+#define CLK_8B_1 0x0
+#define CLK_8B_1_5 (PULSE1 | PULSE2)
+#define CLK_8B_2 PULSE0
+#define CLK_8B_3 (PULSE0 | PULSE1 | PULSE2)
+#define CLK_8B_4 (PULSE0 | PULSE2)
+#define CLK_16B_6L_2H PULSE0
+#define CLK_16B_9L_3H (PULSE0 | PULSE1 | PULSE2)
+#define CLK_16B_12L_4H (PULSE0 | PULSE2)
+
+/* FLCMDCR control bits */
+#define ADRCNT2_E (0x1 << 31) /* 5byte address enable */
+#define ADRMD_E (0x1 << 26) /* Sector address access */
+#define CDSRC_E (0x1 << 25) /* Data buffer selection */
+#define DOSR_E (0x1 << 24) /* Status read check */
+#define SELRW (0x1 << 21) /* 0:read 1:write */
+#define DOADR_E (0x1 << 20) /* Address stage execute */
+#define ADRCNT_1 (0x00 << 18) /* Address data bytes: 1byte */
+#define ADRCNT_2 (0x01 << 18) /* Address data bytes: 2byte */
+#define ADRCNT_3 (0x02 << 18) /* Address data bytes: 3byte */
+#define ADRCNT_4 (0x03 << 18) /* Address data bytes: 4byte */
+#define DOCMD2_E (0x1 << 17) /* 2nd cmd stage execute */
+#define DOCMD1_E (0x1 << 16) /* 1st cmd stage execute */
+
+/* FLINTDMACR control bits */
+#define ESTERINTE (0x1 << 24) /* ECC error interrupt enable */
+#define AC1CLR (0x1 << 19) /* ECC FIFO clear */
+#define AC0CLR (0x1 << 18) /* Data FIFO clear */
+#define DREQ0EN (0x1 << 16) /* FLDTFIFODMA Request Enable */
+#define ECERB (0x1 << 9) /* ECC error */
+#define STERB (0x1 << 8) /* Status error */
+#define STERINTE (0x1 << 4) /* Status error enable */
+
+/* FLTRCR control bits */
+#define TRSTRT (0x1 << 0) /* translation start */
+#define TREND (0x1 << 1) /* translation end */
+
+/*
+ * FLHOLDCR control bits
+ *
+ * HOLDEN: Bus Occupancy Enable (inverted)
+ * Enable this bit when the external bus might be used in between transfers.
+ * If not set and the bus gets used by other modules, a deadlock occurs.
+ */
+#define HOLDEN (0x1 << 0)
+
+/* FL4ECCCR control bits */
+#define _4ECCFA (0x1 << 2) /* 4 symbols correct fault */
+#define _4ECCEND (0x1 << 1) /* 4 symbols end */
+#define _4ECCEXST (0x1 << 0) /* 4 symbols exist */
+
+#define LOOP_TIMEOUT_MAX 0x00010000
+
+enum flctl_ecc_res_t {
+ FL_SUCCESS,
+ FL_REPAIRABLE,
+ FL_ERROR,
+ FL_TIMEOUT
+};
+
+struct dma_chan;
+
+struct sh_flctl {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ struct platform_device *pdev;
+ struct dev_pm_qos_request pm_qos;
+ void __iomem *reg;
+
+ uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */
+ int read_bytes;
+ unsigned int index;
+ int seqin_column; /* column in SEQIN cmd */
+ int seqin_page_addr; /* page_addr in SEQIN cmd */
+ uint32_t seqin_read_cmd; /* read cmd in SEQIN cmd */
+ int erase1_page_addr; /* page_addr in ERASE1 cmd */
+ uint32_t erase_ADRCNT; /* bits of FLCMDCR in ERASE1 cmd */
+ uint32_t rw_ADRCNT; /* bits of FLCMDCR in READ WRITE cmd */
+ uint32_t flcmncr_base; /* base value of FLCMNCR */
+ uint32_t flintdmacr_base; /* irq enable bits */
+
+ unsigned page_size:1; /* NAND page size (0 = 512, 1 = 2048) */
+ unsigned hwecc:1; /* Hardware ECC (0 = disabled, 1 = enabled) */
+ unsigned holden:1; /* Hardware has FLHOLDCR and HOLDEN is set */
+ unsigned qos_request:1; /* QoS request to prevent deep power shutdown */
+
+ /* DMA related objects */
+ struct dma_chan *chan_fifo0_rx;
+ struct dma_chan *chan_fifo0_tx;
+ struct completion dma_complete;
+};
+
+struct sh_flctl_platform_data {
+ struct mtd_partition *parts;
+ int nr_parts;
+ unsigned long flcmncr_val;
+
+ unsigned has_hwecc:1;
+ unsigned use_holden:1;
+
+ unsigned int slave_id_fifo0_tx;
+ unsigned int slave_id_fifo0_rx;
+};
+
+static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo)
+{
+ return container_of(mtdinfo, struct sh_flctl, mtd);
+}
+
+#endif /* __SH_FLCTL_H__ */
diff --git a/include/linux/mtd/sharpsl.h b/include/linux/mtd/sharpsl.h
new file mode 100644
index 000000000..25f4d2a84
--- /dev/null
+++ b/include/linux/mtd/sharpsl.h
@@ -0,0 +1,20 @@
+/*
+ * SharpSL NAND support
+ *
+ * Copyright (C) 2008 Dmitry Baryshkov
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+struct sharpsl_nand_platform_data {
+ struct nand_bbt_descr *badblock_pattern;
+ struct nand_ecclayout *ecc_layout;
+ struct mtd_partition *partitions;
+ unsigned int nr_partitions;
+};
diff --git a/include/linux/mtd/spear_smi.h b/include/linux/mtd/spear_smi.h
new file mode 100644
index 000000000..581603ac1
--- /dev/null
+++ b/include/linux/mtd/spear_smi.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright © 2010 ST Microelectronics
+ * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MTD_SPEAR_SMI_H
+#define __MTD_SPEAR_SMI_H
+
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+/* max possible slots for serial-nor flash chip in the SMI controller */
+#define MAX_NUM_FLASH_CHIP 4
+
+/* macro to define partitions for flash devices */
+#define DEFINE_PARTS(n, of, s) \
+{ \
+ .name = n, \
+ .offset = of, \
+ .size = s, \
+}
+
+/**
+ * struct spear_smi_flash_info - platform structure for passing flash
+ * information
+ *
+ * name: name of the serial nor flash for identification
+ * mem_base: the memory base on which the flash is mapped
+ * size: size of the flash in bytes
+ * partitions: parition details
+ * nr_partitions: number of partitions
+ * fast_mode: whether flash supports fast mode
+ */
+
+struct spear_smi_flash_info {
+ char *name;
+ unsigned long mem_base;
+ unsigned long size;
+ struct mtd_partition *partitions;
+ int nr_partitions;
+ u8 fast_mode;
+};
+
+/**
+ * struct spear_smi_plat_data - platform structure for configuring smi
+ *
+ * clk_rate: clk rate at which SMI must operate
+ * num_flashes: number of flashes present on board
+ * board_flash_info: specific details of each flash present on board
+ */
+struct spear_smi_plat_data {
+ unsigned long clk_rate;
+ int num_flashes;
+ struct spear_smi_flash_info *board_flash_info;
+ struct device_node *np[MAX_NUM_FLASH_CHIP];
+};
+
+#endif /* __MTD_SPEAR_SMI_H */
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
new file mode 100644
index 000000000..e5409524b
--- /dev/null
+++ b/include/linux/mtd/spi-nor.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_MTD_SPI_NOR_H
+#define __LINUX_MTD_SPI_NOR_H
+
+/*
+ * Note on opcode nomenclature: some opcodes have a format like
+ * SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number
+ * of I/O lines used for the opcode, address, and data (respectively). The
+ * FUNCTION has an optional suffix of '4', to represent an opcode which
+ * requires a 4-byte (32-bit) address.
+ */
+
+/* Flash opcodes. */
+#define SPINOR_OP_WREN 0x06 /* Write enable */
+#define SPINOR_OP_RDSR 0x05 /* Read status register */
+#define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */
+#define SPINOR_OP_READ 0x03 /* Read data bytes (low frequency) */
+#define SPINOR_OP_READ_FAST 0x0b /* Read data bytes (high frequency) */
+#define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual SPI) */
+#define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad SPI) */
+#define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */
+#define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */
+#define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */
+#define SPINOR_OP_BE_32K 0x52 /* Erase 32KiB block */
+#define SPINOR_OP_CHIP_ERASE 0xc7 /* Erase whole flash chip */
+#define SPINOR_OP_SE 0xd8 /* Sector erase (usually 64KiB) */
+#define SPINOR_OP_RDID 0x9f /* Read JEDEC ID */
+#define SPINOR_OP_RDCR 0x35 /* Read configuration register */
+#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */
+
+/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
+#define SPINOR_OP_READ4 0x13 /* Read data bytes (low frequency) */
+#define SPINOR_OP_READ4_FAST 0x0c /* Read data bytes (high frequency) */
+#define SPINOR_OP_READ4_1_1_2 0x3c /* Read data bytes (Dual SPI) */
+#define SPINOR_OP_READ4_1_1_4 0x6c /* Read data bytes (Quad SPI) */
+#define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */
+#define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */
+
+/* Used for SST flashes only. */
+#define SPINOR_OP_BP 0x02 /* Byte program */
+#define SPINOR_OP_WRDI 0x04 /* Write disable */
+#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */
+
+/* Used for Macronix and Winbond flashes. */
+#define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */
+#define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */
+
+/* Used for Spansion flashes only. */
+#define SPINOR_OP_BRWR 0x17 /* Bank register write */
+
+/* Used for Micron flashes only. */
+#define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */
+#define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */
+
+/* Status Register bits. */
+#define SR_WIP 1 /* Write in progress */
+#define SR_WEL 2 /* Write enable latch */
+/* meaning of other SR_* bits may differ between vendors */
+#define SR_BP0 4 /* Block protect 0 */
+#define SR_BP1 8 /* Block protect 1 */
+#define SR_BP2 0x10 /* Block protect 2 */
+#define SR_SRWD 0x80 /* SR write protect */
+
+#define SR_QUAD_EN_MX 0x40 /* Macronix Quad I/O */
+
+/* Enhanced Volatile Configuration Register bits */
+#define EVCR_QUAD_EN_MICRON 0x80 /* Micron Quad I/O */
+
+/* Flag Status Register bits */
+#define FSR_READY 0x80
+
+/* Configuration Register bits. */
+#define CR_QUAD_EN_SPAN 0x2 /* Spansion Quad I/O */
+
+enum read_mode {
+ SPI_NOR_NORMAL = 0,
+ SPI_NOR_FAST,
+ SPI_NOR_DUAL,
+ SPI_NOR_QUAD,
+};
+
+/**
+ * struct spi_nor_xfer_cfg - Structure for defining a Serial Flash transfer
+ * @wren: command for "Write Enable", or 0x00 for not required
+ * @cmd: command for operation
+ * @cmd_pins: number of pins to send @cmd (1, 2, 4)
+ * @addr: address for operation
+ * @addr_pins: number of pins to send @addr (1, 2, 4)
+ * @addr_width: number of address bytes
+ * (3,4, or 0 for address not required)
+ * @mode: mode data
+ * @mode_pins: number of pins to send @mode (1, 2, 4)
+ * @mode_cycles: number of mode cycles (0 for mode not required)
+ * @dummy_cycles: number of dummy cycles (0 for dummy not required)
+ */
+struct spi_nor_xfer_cfg {
+ u8 wren;
+ u8 cmd;
+ u8 cmd_pins;
+ u32 addr;
+ u8 addr_pins;
+ u8 addr_width;
+ u8 mode;
+ u8 mode_pins;
+ u8 mode_cycles;
+ u8 dummy_cycles;
+};
+
+#define SPI_NOR_MAX_CMD_SIZE 8
+enum spi_nor_ops {
+ SPI_NOR_OPS_READ = 0,
+ SPI_NOR_OPS_WRITE,
+ SPI_NOR_OPS_ERASE,
+ SPI_NOR_OPS_LOCK,
+ SPI_NOR_OPS_UNLOCK,
+};
+
+enum spi_nor_option_flags {
+ SNOR_F_USE_FSR = BIT(0),
+};
+
+/**
+ * struct spi_nor - Structure for defining a the SPI NOR layer
+ * @mtd: point to a mtd_info structure
+ * @lock: the lock for the read/write/erase/lock/unlock operations
+ * @dev: point to a spi device, or a spi nor controller device.
+ * @page_size: the page size of the SPI NOR
+ * @addr_width: number of address bytes
+ * @erase_opcode: the opcode for erasing a sector
+ * @read_opcode: the read opcode
+ * @read_dummy: the dummy needed by the read operation
+ * @program_opcode: the program opcode
+ * @flash_read: the mode of the read
+ * @sst_write_second: used by the SST write operation
+ * @flags: flag options for the current SPI-NOR (SNOR_F_*)
+ * @cfg: used by the read_xfer/write_xfer
+ * @cmd_buf: used by the write_reg
+ * @prepare: [OPTIONAL] do some preparations for the
+ * read/write/erase/lock/unlock operations
+ * @unprepare: [OPTIONAL] do some post work after the
+ * read/write/erase/lock/unlock operations
+ * @read_xfer: [OPTIONAL] the read fundamental primitive
+ * @write_xfer: [OPTIONAL] the writefundamental primitive
+ * @read_reg: [DRIVER-SPECIFIC] read out the register
+ * @write_reg: [DRIVER-SPECIFIC] write data to the register
+ * @read: [DRIVER-SPECIFIC] read data from the SPI NOR
+ * @write: [DRIVER-SPECIFIC] write data to the SPI NOR
+ * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR
+ * at the offset @offs
+ * @lock: [FLASH-SPECIFIC] lock a region of the SPI NOR
+ * @unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR
+ * @priv: the private data
+ */
+struct spi_nor {
+ struct mtd_info *mtd;
+ struct mutex lock;
+ struct device *dev;
+ u32 page_size;
+ u8 addr_width;
+ u8 erase_opcode;
+ u8 read_opcode;
+ u8 read_dummy;
+ u8 program_opcode;
+ enum read_mode flash_read;
+ bool sst_write_second;
+ u32 flags;
+ struct spi_nor_xfer_cfg cfg;
+ u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
+
+ int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
+ void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
+ int (*read_xfer)(struct spi_nor *nor, struct spi_nor_xfer_cfg *cfg,
+ u8 *buf, size_t len);
+ int (*write_xfer)(struct spi_nor *nor, struct spi_nor_xfer_cfg *cfg,
+ u8 *buf, size_t len);
+ int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
+ int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len,
+ int write_enable);
+
+ int (*read)(struct spi_nor *nor, loff_t from,
+ size_t len, size_t *retlen, u_char *read_buf);
+ void (*write)(struct spi_nor *nor, loff_t to,
+ size_t len, size_t *retlen, const u_char *write_buf);
+ int (*erase)(struct spi_nor *nor, loff_t offs);
+
+ int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+ int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+
+ void *priv;
+};
+
+/**
+ * spi_nor_scan() - scan the SPI NOR
+ * @nor: the spi_nor structure
+ * @name: the chip type name
+ * @mode: the read mode supported by the driver
+ *
+ * The drivers can use this fuction to scan the SPI NOR.
+ * In the scanning, it will try to get all the necessary information to
+ * fill the mtd_info{} and the spi_nor{}.
+ *
+ * The chip type name can be provided through the @name parameter.
+ *
+ * Return: 0 for success, others for failure.
+ */
+int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode);
+
+#endif
diff --git a/include/linux/mtd/super.h b/include/linux/mtd/super.h
new file mode 100644
index 000000000..f456230f9
--- /dev/null
+++ b/include/linux/mtd/super.h
@@ -0,0 +1,29 @@
+/* MTD-based superblock handling
+ *
+ * Copyright © 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __MTD_SUPER_H__
+#define __MTD_SUPER_H__
+
+#ifdef __KERNEL__
+
+#include <linux/mtd/mtd.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+
+extern struct dentry *mount_mtd(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data,
+ int (*fill_super)(struct super_block *, void *, int));
+extern void kill_mtd_super(struct super_block *sb);
+
+
+#endif /* __KERNEL__ */
+
+#endif /* __MTD_SUPER_H__ */
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
new file mode 100644
index 000000000..1e271cb55
--- /dev/null
+++ b/include/linux/mtd/ubi.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) International Business Machines Corp., 2006
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Artem Bityutskiy (Битюцкий Артём)
+ */
+
+#ifndef __LINUX_UBI_H__
+#define __LINUX_UBI_H__
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+#include <mtd/ubi-user.h>
+
+/* All voumes/LEBs */
+#define UBI_ALL -1
+
+/*
+ * Maximum number of scatter gather list entries,
+ * we use only 64 to have a lower memory foot print.
+ */
+#define UBI_MAX_SG_COUNT 64
+
+/*
+ * enum ubi_open_mode - UBI volume open mode constants.
+ *
+ * UBI_READONLY: read-only mode
+ * UBI_READWRITE: read-write mode
+ * UBI_EXCLUSIVE: exclusive mode
+ * UBI_METAONLY: modify only the volume meta-data,
+ * i.e. the data stored in the volume table, but not in any of volume LEBs.
+ */
+enum {
+ UBI_READONLY = 1,
+ UBI_READWRITE,
+ UBI_EXCLUSIVE,
+ UBI_METAONLY
+};
+
+/**
+ * struct ubi_volume_info - UBI volume description data structure.
+ * @vol_id: volume ID
+ * @ubi_num: UBI device number this volume belongs to
+ * @size: how many physical eraseblocks are reserved for this volume
+ * @used_bytes: how many bytes of data this volume contains
+ * @used_ebs: how many physical eraseblocks of this volume actually contain any
+ * data
+ * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
+ * @corrupted: non-zero if the volume is corrupted (static volumes only)
+ * @upd_marker: non-zero if the volume has update marker set
+ * @alignment: volume alignment
+ * @usable_leb_size: how many bytes are available in logical eraseblocks of
+ * this volume
+ * @name_len: volume name length
+ * @name: volume name
+ * @cdev: UBI volume character device major and minor numbers
+ *
+ * The @corrupted flag is only relevant to static volumes and is always zero
+ * for dynamic ones. This is because UBI does not care about dynamic volume
+ * data protection and only cares about protecting static volume data.
+ *
+ * The @upd_marker flag is set if the volume update operation was interrupted.
+ * Before touching the volume data during the update operation, UBI first sets
+ * the update marker flag for this volume. If the volume update operation was
+ * further interrupted, the update marker indicates this. If the update marker
+ * is set, the contents of the volume is certainly damaged and a new volume
+ * update operation has to be started.
+ *
+ * To put it differently, @corrupted and @upd_marker fields have different
+ * semantics:
+ * o the @corrupted flag means that this static volume is corrupted for some
+ * reasons, but not because an interrupted volume update
+ * o the @upd_marker field means that the volume is damaged because of an
+ * interrupted update operation.
+ *
+ * I.e., the @corrupted flag is never set if the @upd_marker flag is set.
+ *
+ * The @used_bytes and @used_ebs fields are only really needed for static
+ * volumes and contain the number of bytes stored in this static volume and how
+ * many eraseblock this data occupies. In case of dynamic volumes, the
+ * @used_bytes field is equivalent to @size*@usable_leb_size, and the @used_ebs
+ * field is equivalent to @size.
+ *
+ * In general, logical eraseblock size is a property of the UBI device, not
+ * of the UBI volume. Indeed, the logical eraseblock size depends on the
+ * physical eraseblock size and on how much bytes UBI headers consume. But
+ * because of the volume alignment (@alignment), the usable size of logical
+ * eraseblocks if a volume may be less. The following equation is true:
+ * @usable_leb_size = LEB size - (LEB size mod @alignment),
+ * where LEB size is the logical eraseblock size defined by the UBI device.
+ *
+ * The alignment is multiple to the minimal flash input/output unit size or %1
+ * if all the available space is used.
+ *
+ * To put this differently, alignment may be considered is a way to change
+ * volume logical eraseblock sizes.
+ */
+struct ubi_volume_info {
+ int ubi_num;
+ int vol_id;
+ int size;
+ long long used_bytes;
+ int used_ebs;
+ int vol_type;
+ int corrupted;
+ int upd_marker;
+ int alignment;
+ int usable_leb_size;
+ int name_len;
+ const char *name;
+ dev_t cdev;
+};
+
+/**
+ * struct ubi_sgl - UBI scatter gather list data structure.
+ * @list_pos: current position in @sg[]
+ * @page_pos: current position in @sg[@list_pos]
+ * @sg: the scatter gather list itself
+ *
+ * ubi_sgl is a wrapper around a scatter list which keeps track of the
+ * current position in the list and the current list item such that
+ * it can be used across multiple ubi_leb_read_sg() calls.
+ */
+struct ubi_sgl {
+ int list_pos;
+ int page_pos;
+ struct scatterlist sg[UBI_MAX_SG_COUNT];
+};
+
+/**
+ * ubi_sgl_init - initialize an UBI scatter gather list data structure.
+ * @usgl: the UBI scatter gather struct itself
+ *
+ * Please note that you still have to use sg_init_table() or any adequate
+ * function to initialize the unterlaying struct scatterlist.
+ */
+static inline void ubi_sgl_init(struct ubi_sgl *usgl)
+{
+ usgl->list_pos = 0;
+ usgl->page_pos = 0;
+}
+
+/**
+ * struct ubi_device_info - UBI device description data structure.
+ * @ubi_num: ubi device number
+ * @leb_size: logical eraseblock size on this UBI device
+ * @leb_start: starting offset of logical eraseblocks within physical
+ * eraseblocks
+ * @min_io_size: minimal I/O unit size
+ * @max_write_size: maximum amount of bytes the underlying flash can write at a
+ * time (MTD write buffer size)
+ * @ro_mode: if this device is in read-only mode
+ * @cdev: UBI character device major and minor numbers
+ *
+ * Note, @leb_size is the logical eraseblock size offered by the UBI device.
+ * Volumes of this UBI device may have smaller logical eraseblock size if their
+ * alignment is not equivalent to %1.
+ *
+ * The @max_write_size field describes flash write maximum write unit. For
+ * example, NOR flash allows for changing individual bytes, so @min_io_size is
+ * %1. However, it does not mean than NOR flash has to write data byte-by-byte.
+ * Instead, CFI NOR flashes have a write-buffer of, e.g., 64 bytes, and when
+ * writing large chunks of data, they write 64-bytes at a time. Obviously, this
+ * improves write throughput.
+ *
+ * Also, the MTD device may have N interleaved (striped) flash chips
+ * underneath, in which case @min_io_size can be physical min. I/O size of
+ * single flash chip, while @max_write_size can be N * @min_io_size.
+ *
+ * The @max_write_size field is always greater or equivalent to @min_io_size.
+ * E.g., some NOR flashes may have (@min_io_size = 1, @max_write_size = 64). In
+ * contrast, NAND flashes usually have @min_io_size = @max_write_size = NAND
+ * page size.
+ */
+struct ubi_device_info {
+ int ubi_num;
+ int leb_size;
+ int leb_start;
+ int min_io_size;
+ int max_write_size;
+ int ro_mode;
+ dev_t cdev;
+};
+
+/*
+ * Volume notification types.
+ * @UBI_VOLUME_ADDED: a volume has been added (an UBI device was attached or a
+ * volume was created)
+ * @UBI_VOLUME_REMOVED: a volume has been removed (an UBI device was detached
+ * or a volume was removed)
+ * @UBI_VOLUME_RESIZED: a volume has been re-sized
+ * @UBI_VOLUME_RENAMED: a volume has been re-named
+ * @UBI_VOLUME_UPDATED: data has been written to a volume
+ *
+ * These constants define which type of event has happened when a volume
+ * notification function is invoked.
+ */
+enum {
+ UBI_VOLUME_ADDED,
+ UBI_VOLUME_REMOVED,
+ UBI_VOLUME_RESIZED,
+ UBI_VOLUME_RENAMED,
+ UBI_VOLUME_UPDATED,
+};
+
+/*
+ * struct ubi_notification - UBI notification description structure.
+ * @di: UBI device description object
+ * @vi: UBI volume description object
+ *
+ * UBI notifiers are called with a pointer to an object of this type. The
+ * object describes the notification. Namely, it provides a description of the
+ * UBI device and UBI volume the notification informs about.
+ */
+struct ubi_notification {
+ struct ubi_device_info di;
+ struct ubi_volume_info vi;
+};
+
+/* UBI descriptor given to users when they open UBI volumes */
+struct ubi_volume_desc;
+
+int ubi_get_device_info(int ubi_num, struct ubi_device_info *di);
+void ubi_get_volume_info(struct ubi_volume_desc *desc,
+ struct ubi_volume_info *vi);
+struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode);
+struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
+ int mode);
+struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode);
+
+int ubi_register_volume_notifier(struct notifier_block *nb,
+ int ignore_existing);
+int ubi_unregister_volume_notifier(struct notifier_block *nb);
+
+void ubi_close_volume(struct ubi_volume_desc *desc);
+int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
+ int len, int check);
+int ubi_leb_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl,
+ int offset, int len, int check);
+int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
+ int offset, int len);
+int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
+ int len);
+int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum);
+int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum);
+int ubi_leb_map(struct ubi_volume_desc *desc, int lnum);
+int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum);
+int ubi_sync(int ubi_num);
+int ubi_flush(int ubi_num, int vol_id, int lnum);
+
+/*
+ * This function is the same as the 'ubi_leb_read()' function, but it does not
+ * provide the checking capability.
+ */
+static inline int ubi_read(struct ubi_volume_desc *desc, int lnum, char *buf,
+ int offset, int len)
+{
+ return ubi_leb_read(desc, lnum, buf, offset, len, 0);
+}
+
+/*
+ * This function is the same as the 'ubi_leb_read_sg()' function, but it does
+ * not provide the checking capability.
+ */
+static inline int ubi_read_sg(struct ubi_volume_desc *desc, int lnum,
+ struct ubi_sgl *sgl, int offset, int len)
+{
+ return ubi_leb_read_sg(desc, lnum, sgl, offset, len, 0);
+}
+#endif /* !__LINUX_UBI_H__ */
diff --git a/include/linux/mtd/xip.h b/include/linux/mtd/xip.h
new file mode 100644
index 000000000..abed4dec5
--- /dev/null
+++ b/include/linux/mtd/xip.h
@@ -0,0 +1,99 @@
+/*
+ * MTD primitives for XIP support
+ *
+ * Author: Nicolas Pitre
+ * Created: Nov 2, 2004
+ * Copyright: (C) 2004 MontaVista Software, Inc.
+ *
+ * This XIP support for MTD has been loosely inspired
+ * by an earlier patch authored by David Woodhouse.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MTD_XIP_H__
+#define __LINUX_MTD_XIP_H__
+
+
+#ifdef CONFIG_MTD_XIP
+
+/*
+ * We really don't want gcc to guess anything.
+ * We absolutely _need_ proper inlining.
+ */
+#include <linux/compiler.h>
+
+/*
+ * Function that are modifying the flash state away from array mode must
+ * obviously not be running from flash. The __xipram is therefore marking
+ * those functions so they get relocated to ram.
+ */
+#define __xipram noinline __attribute__ ((__section__ (".data")))
+
+/*
+ * Each architecture has to provide the following macros. They must access
+ * the hardware directly and not rely on any other (XIP) functions since they
+ * won't be available when used (flash not in array mode).
+ *
+ * xip_irqpending()
+ *
+ * return non zero when any hardware interrupt is pending.
+ *
+ * xip_currtime()
+ *
+ * return a platform specific time reference to be used with
+ * xip_elapsed_since().
+ *
+ * xip_elapsed_since(x)
+ *
+ * return in usecs the elapsed timebetween now and the reference x as
+ * returned by xip_currtime().
+ *
+ * note 1: conversion to usec can be approximated, as long as the
+ * returned value is <= the real elapsed time.
+ * note 2: this should be able to cope with a few seconds without
+ * overflowing.
+ *
+ * xip_iprefetch()
+ *
+ * Macro to fill instruction prefetch
+ * e.g. a series of nops: asm volatile (".rep 8; nop; .endr");
+ */
+
+#include <asm/mtd-xip.h>
+
+#ifndef xip_irqpending
+
+#warning "missing IRQ and timer primitives for XIP MTD support"
+#warning "some of the XIP MTD support code will be disabled"
+#warning "your system will therefore be unresponsive when writing or erasing flash"
+
+#define xip_irqpending() (0)
+#define xip_currtime() (0)
+#define xip_elapsed_since(x) (0)
+
+#endif
+
+#ifndef xip_iprefetch
+#define xip_iprefetch() do { } while (0)
+#endif
+
+/*
+ * xip_cpu_idle() is used when waiting for a delay equal or larger than
+ * the system timer tick period. This should put the CPU into idle mode
+ * to save power and to be woken up only when some interrupts are pending.
+ * This should not rely upon standard kernel code.
+ */
+#ifndef xip_cpu_idle
+#define xip_cpu_idle() do { } while (0)
+#endif
+
+#else
+
+#define __xipram
+
+#endif /* CONFIG_MTD_XIP */
+
+#endif /* __LINUX_MTD_XIP_H__ */
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h
new file mode 100644
index 000000000..4ac8b1977
--- /dev/null
+++ b/include/linux/mutex-debug.h
@@ -0,0 +1,24 @@
+#ifndef __LINUX_MUTEX_DEBUG_H
+#define __LINUX_MUTEX_DEBUG_H
+
+#include <linux/linkage.h>
+#include <linux/lockdep.h>
+#include <linux/debug_locks.h>
+
+/*
+ * Mutexes - debugging helpers:
+ */
+
+#define __DEBUG_MUTEX_INITIALIZER(lockname) \
+ , .magic = &lockname
+
+#define mutex_init(mutex) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __mutex_init((mutex), #mutex, &__key); \
+} while (0)
+
+extern void mutex_destroy(struct mutex *lock);
+
+#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
new file mode 100644
index 000000000..2cb7531e7
--- /dev/null
+++ b/include/linux/mutex.h
@@ -0,0 +1,178 @@
+/*
+ * Mutexes: blocking mutual exclusion locks
+ *
+ * started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * This file contains the main data structure and API definitions.
+ */
+#ifndef __LINUX_MUTEX_H
+#define __LINUX_MUTEX_H
+
+#include <asm/current.h>
+#include <linux/list.h>
+#include <linux/spinlock_types.h>
+#include <linux/linkage.h>
+#include <linux/lockdep.h>
+#include <linux/atomic.h>
+#include <asm/processor.h>
+#include <linux/osq_lock.h>
+
+/*
+ * Simple, straightforward mutexes with strict semantics:
+ *
+ * - only one task can hold the mutex at a time
+ * - only the owner can unlock the mutex
+ * - multiple unlocks are not permitted
+ * - recursive locking is not permitted
+ * - a mutex object must be initialized via the API
+ * - a mutex object must not be initialized via memset or copying
+ * - task may not exit with mutex held
+ * - memory areas where held locks reside must not be freed
+ * - held mutexes must not be reinitialized
+ * - mutexes may not be used in hardware or software interrupt
+ * contexts such as tasklets and timers
+ *
+ * These semantics are fully enforced when DEBUG_MUTEXES is
+ * enabled. Furthermore, besides enforcing the above rules, the mutex
+ * debugging code also implements a number of additional features
+ * that make lock debugging easier and faster:
+ *
+ * - uses symbolic names of mutexes, whenever they are printed in debug output
+ * - point-of-acquire tracking, symbolic lookup of function names
+ * - list of all locks held in the system, printout of them
+ * - owner tracking
+ * - detects self-recursing locks and prints out all relevant info
+ * - detects multi-task circular deadlocks and prints out all affected
+ * locks and tasks (and only those tasks)
+ */
+struct mutex {
+ /* 1: unlocked, 0: locked, negative: locked, possible waiters */
+ atomic_t count;
+ spinlock_t wait_lock;
+ struct list_head wait_list;
+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
+ struct task_struct *owner;
+#endif
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
+ struct optimistic_spin_queue osq; /* Spinner MCS lock */
+#endif
+#ifdef CONFIG_DEBUG_MUTEXES
+ void *magic;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+/*
+ * This is the control structure for tasks blocked on mutex,
+ * which resides on the blocked task's kernel stack:
+ */
+struct mutex_waiter {
+ struct list_head list;
+ struct task_struct *task;
+#ifdef CONFIG_DEBUG_MUTEXES
+ void *magic;
+#endif
+};
+
+#ifdef CONFIG_DEBUG_MUTEXES
+# include <linux/mutex-debug.h>
+#else
+# define __DEBUG_MUTEX_INITIALIZER(lockname)
+/**
+ * mutex_init - initialize the mutex
+ * @mutex: the mutex to be initialized
+ *
+ * Initialize the mutex to unlocked state.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
+# define mutex_init(mutex) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __mutex_init((mutex), #mutex, &__key); \
+} while (0)
+static inline void mutex_destroy(struct mutex *lock) {}
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+ , .dep_map = { .name = #lockname }
+#else
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
+#endif
+
+#define __MUTEX_INITIALIZER(lockname) \
+ { .count = ATOMIC_INIT(1) \
+ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
+ , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
+ __DEBUG_MUTEX_INITIALIZER(lockname) \
+ __DEP_MAP_MUTEX_INITIALIZER(lockname) }
+
+#define DEFINE_MUTEX(mutexname) \
+ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+extern void __mutex_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key);
+
+/**
+ * mutex_is_locked - is the mutex locked
+ * @lock: the mutex to be queried
+ *
+ * Returns 1 if the mutex is locked, 0 if unlocked.
+ */
+static inline int mutex_is_locked(struct mutex *lock)
+{
+ return atomic_read(&lock->count) != 1;
+}
+
+/*
+ * See kernel/locking/mutex.c for detailed documentation of these APIs.
+ * Also see Documentation/locking/mutex-design.txt.
+ */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
+extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
+
+extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
+ unsigned int subclass);
+extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
+ unsigned int subclass);
+
+#define mutex_lock(lock) mutex_lock_nested(lock, 0)
+#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
+#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
+
+#define mutex_lock_nest_lock(lock, nest_lock) \
+do { \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
+ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+} while (0)
+
+#else
+extern void mutex_lock(struct mutex *lock);
+extern int __must_check mutex_lock_interruptible(struct mutex *lock);
+extern int __must_check mutex_lock_killable(struct mutex *lock);
+
+# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
+# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
+# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
+# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
+#endif
+
+/*
+ * NOTE: mutex_trylock() follows the spin_trylock() convention,
+ * not the down_trylock() convention!
+ *
+ * Returns 1 if the mutex has been acquired successfully, and 0 on contention.
+ */
+extern int mutex_trylock(struct mutex *lock);
+extern void mutex_unlock(struct mutex *lock);
+
+extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+
+#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h
new file mode 100644
index 000000000..69327b7b4
--- /dev/null
+++ b/include/linux/mv643xx.h
@@ -0,0 +1,979 @@
+/*
+ * mv643xx.h - MV-643XX Internal registers definition file.
+ *
+ * Copyright 2002 Momentum Computer, Inc.
+ * Author: Matthew Dharm <mdharm@momenco.com>
+ * Copyright 2002 GALILEO TECHNOLOGY, LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef __ASM_MV643XX_H
+#define __ASM_MV643XX_H
+
+#include <asm/types.h>
+#include <linux/mv643xx_eth.h>
+#include <linux/mv643xx_i2c.h>
+
+/****************************************/
+/* Processor Address Space */
+/****************************************/
+
+/* DDR SDRAM BAR and size registers */
+
+#define MV64340_CS_0_BASE_ADDR 0x008
+#define MV64340_CS_0_SIZE 0x010
+#define MV64340_CS_1_BASE_ADDR 0x208
+#define MV64340_CS_1_SIZE 0x210
+#define MV64340_CS_2_BASE_ADDR 0x018
+#define MV64340_CS_2_SIZE 0x020
+#define MV64340_CS_3_BASE_ADDR 0x218
+#define MV64340_CS_3_SIZE 0x220
+
+/* Devices BAR and size registers */
+
+#define MV64340_DEV_CS0_BASE_ADDR 0x028
+#define MV64340_DEV_CS0_SIZE 0x030
+#define MV64340_DEV_CS1_BASE_ADDR 0x228
+#define MV64340_DEV_CS1_SIZE 0x230
+#define MV64340_DEV_CS2_BASE_ADDR 0x248
+#define MV64340_DEV_CS2_SIZE 0x250
+#define MV64340_DEV_CS3_BASE_ADDR 0x038
+#define MV64340_DEV_CS3_SIZE 0x040
+#define MV64340_BOOTCS_BASE_ADDR 0x238
+#define MV64340_BOOTCS_SIZE 0x240
+
+/* PCI 0 BAR and size registers */
+
+#define MV64340_PCI_0_IO_BASE_ADDR 0x048
+#define MV64340_PCI_0_IO_SIZE 0x050
+#define MV64340_PCI_0_MEMORY0_BASE_ADDR 0x058
+#define MV64340_PCI_0_MEMORY0_SIZE 0x060
+#define MV64340_PCI_0_MEMORY1_BASE_ADDR 0x080
+#define MV64340_PCI_0_MEMORY1_SIZE 0x088
+#define MV64340_PCI_0_MEMORY2_BASE_ADDR 0x258
+#define MV64340_PCI_0_MEMORY2_SIZE 0x260
+#define MV64340_PCI_0_MEMORY3_BASE_ADDR 0x280
+#define MV64340_PCI_0_MEMORY3_SIZE 0x288
+
+/* PCI 1 BAR and size registers */
+#define MV64340_PCI_1_IO_BASE_ADDR 0x090
+#define MV64340_PCI_1_IO_SIZE 0x098
+#define MV64340_PCI_1_MEMORY0_BASE_ADDR 0x0a0
+#define MV64340_PCI_1_MEMORY0_SIZE 0x0a8
+#define MV64340_PCI_1_MEMORY1_BASE_ADDR 0x0b0
+#define MV64340_PCI_1_MEMORY1_SIZE 0x0b8
+#define MV64340_PCI_1_MEMORY2_BASE_ADDR 0x2a0
+#define MV64340_PCI_1_MEMORY2_SIZE 0x2a8
+#define MV64340_PCI_1_MEMORY3_BASE_ADDR 0x2b0
+#define MV64340_PCI_1_MEMORY3_SIZE 0x2b8
+
+/* SRAM base address */
+#define MV64340_INTEGRATED_SRAM_BASE_ADDR 0x268
+
+/* internal registers space base address */
+#define MV64340_INTERNAL_SPACE_BASE_ADDR 0x068
+
+/* Enables the CS , DEV_CS , PCI 0 and PCI 1
+ windows above */
+#define MV64340_BASE_ADDR_ENABLE 0x278
+
+/****************************************/
+/* PCI remap registers */
+/****************************************/
+ /* PCI 0 */
+#define MV64340_PCI_0_IO_ADDR_REMAP 0x0f0
+#define MV64340_PCI_0_MEMORY0_LOW_ADDR_REMAP 0x0f8
+#define MV64340_PCI_0_MEMORY0_HIGH_ADDR_REMAP 0x320
+#define MV64340_PCI_0_MEMORY1_LOW_ADDR_REMAP 0x100
+#define MV64340_PCI_0_MEMORY1_HIGH_ADDR_REMAP 0x328
+#define MV64340_PCI_0_MEMORY2_LOW_ADDR_REMAP 0x2f8
+#define MV64340_PCI_0_MEMORY2_HIGH_ADDR_REMAP 0x330
+#define MV64340_PCI_0_MEMORY3_LOW_ADDR_REMAP 0x300
+#define MV64340_PCI_0_MEMORY3_HIGH_ADDR_REMAP 0x338
+ /* PCI 1 */
+#define MV64340_PCI_1_IO_ADDR_REMAP 0x108
+#define MV64340_PCI_1_MEMORY0_LOW_ADDR_REMAP 0x110
+#define MV64340_PCI_1_MEMORY0_HIGH_ADDR_REMAP 0x340
+#define MV64340_PCI_1_MEMORY1_LOW_ADDR_REMAP 0x118
+#define MV64340_PCI_1_MEMORY1_HIGH_ADDR_REMAP 0x348
+#define MV64340_PCI_1_MEMORY2_LOW_ADDR_REMAP 0x310
+#define MV64340_PCI_1_MEMORY2_HIGH_ADDR_REMAP 0x350
+#define MV64340_PCI_1_MEMORY3_LOW_ADDR_REMAP 0x318
+#define MV64340_PCI_1_MEMORY3_HIGH_ADDR_REMAP 0x358
+
+#define MV64340_CPU_PCI_0_HEADERS_RETARGET_CONTROL 0x3b0
+#define MV64340_CPU_PCI_0_HEADERS_RETARGET_BASE 0x3b8
+#define MV64340_CPU_PCI_1_HEADERS_RETARGET_CONTROL 0x3c0
+#define MV64340_CPU_PCI_1_HEADERS_RETARGET_BASE 0x3c8
+#define MV64340_CPU_GE_HEADERS_RETARGET_CONTROL 0x3d0
+#define MV64340_CPU_GE_HEADERS_RETARGET_BASE 0x3d8
+#define MV64340_CPU_IDMA_HEADERS_RETARGET_CONTROL 0x3e0
+#define MV64340_CPU_IDMA_HEADERS_RETARGET_BASE 0x3e8
+
+/****************************************/
+/* CPU Control Registers */
+/****************************************/
+
+#define MV64340_CPU_CONFIG 0x000
+#define MV64340_CPU_MODE 0x120
+#define MV64340_CPU_MASTER_CONTROL 0x160
+#define MV64340_CPU_CROSS_BAR_CONTROL_LOW 0x150
+#define MV64340_CPU_CROSS_BAR_CONTROL_HIGH 0x158
+#define MV64340_CPU_CROSS_BAR_TIMEOUT 0x168
+
+/****************************************/
+/* SMP RegisterS */
+/****************************************/
+
+#define MV64340_SMP_WHO_AM_I 0x200
+#define MV64340_SMP_CPU0_DOORBELL 0x214
+#define MV64340_SMP_CPU0_DOORBELL_CLEAR 0x21C
+#define MV64340_SMP_CPU1_DOORBELL 0x224
+#define MV64340_SMP_CPU1_DOORBELL_CLEAR 0x22C
+#define MV64340_SMP_CPU0_DOORBELL_MASK 0x234
+#define MV64340_SMP_CPU1_DOORBELL_MASK 0x23C
+#define MV64340_SMP_SEMAPHOR0 0x244
+#define MV64340_SMP_SEMAPHOR1 0x24c
+#define MV64340_SMP_SEMAPHOR2 0x254
+#define MV64340_SMP_SEMAPHOR3 0x25c
+#define MV64340_SMP_SEMAPHOR4 0x264
+#define MV64340_SMP_SEMAPHOR5 0x26c
+#define MV64340_SMP_SEMAPHOR6 0x274
+#define MV64340_SMP_SEMAPHOR7 0x27c
+
+/****************************************/
+/* CPU Sync Barrier Register */
+/****************************************/
+
+#define MV64340_CPU_0_SYNC_BARRIER_TRIGGER 0x0c0
+#define MV64340_CPU_0_SYNC_BARRIER_VIRTUAL 0x0c8
+#define MV64340_CPU_1_SYNC_BARRIER_TRIGGER 0x0d0
+#define MV64340_CPU_1_SYNC_BARRIER_VIRTUAL 0x0d8
+
+/****************************************/
+/* CPU Access Protect */
+/****************************************/
+
+#define MV64340_CPU_PROTECT_WINDOW_0_BASE_ADDR 0x180
+#define MV64340_CPU_PROTECT_WINDOW_0_SIZE 0x188
+#define MV64340_CPU_PROTECT_WINDOW_1_BASE_ADDR 0x190
+#define MV64340_CPU_PROTECT_WINDOW_1_SIZE 0x198
+#define MV64340_CPU_PROTECT_WINDOW_2_BASE_ADDR 0x1a0
+#define MV64340_CPU_PROTECT_WINDOW_2_SIZE 0x1a8
+#define MV64340_CPU_PROTECT_WINDOW_3_BASE_ADDR 0x1b0
+#define MV64340_CPU_PROTECT_WINDOW_3_SIZE 0x1b8
+
+
+/****************************************/
+/* CPU Error Report */
+/****************************************/
+
+#define MV64340_CPU_ERROR_ADDR_LOW 0x070
+#define MV64340_CPU_ERROR_ADDR_HIGH 0x078
+#define MV64340_CPU_ERROR_DATA_LOW 0x128
+#define MV64340_CPU_ERROR_DATA_HIGH 0x130
+#define MV64340_CPU_ERROR_PARITY 0x138
+#define MV64340_CPU_ERROR_CAUSE 0x140
+#define MV64340_CPU_ERROR_MASK 0x148
+
+/****************************************/
+/* CPU Interface Debug Registers */
+/****************************************/
+
+#define MV64340_PUNIT_SLAVE_DEBUG_LOW 0x360
+#define MV64340_PUNIT_SLAVE_DEBUG_HIGH 0x368
+#define MV64340_PUNIT_MASTER_DEBUG_LOW 0x370
+#define MV64340_PUNIT_MASTER_DEBUG_HIGH 0x378
+#define MV64340_PUNIT_MMASK 0x3e4
+
+/****************************************/
+/* Integrated SRAM Registers */
+/****************************************/
+
+#define MV64340_SRAM_CONFIG 0x380
+#define MV64340_SRAM_TEST_MODE 0X3F4
+#define MV64340_SRAM_ERROR_CAUSE 0x388
+#define MV64340_SRAM_ERROR_ADDR 0x390
+#define MV64340_SRAM_ERROR_ADDR_HIGH 0X3F8
+#define MV64340_SRAM_ERROR_DATA_LOW 0x398
+#define MV64340_SRAM_ERROR_DATA_HIGH 0x3a0
+#define MV64340_SRAM_ERROR_DATA_PARITY 0x3a8
+
+/****************************************/
+/* SDRAM Configuration */
+/****************************************/
+
+#define MV64340_SDRAM_CONFIG 0x1400
+#define MV64340_D_UNIT_CONTROL_LOW 0x1404
+#define MV64340_D_UNIT_CONTROL_HIGH 0x1424
+#define MV64340_SDRAM_TIMING_CONTROL_LOW 0x1408
+#define MV64340_SDRAM_TIMING_CONTROL_HIGH 0x140c
+#define MV64340_SDRAM_ADDR_CONTROL 0x1410
+#define MV64340_SDRAM_OPEN_PAGES_CONTROL 0x1414
+#define MV64340_SDRAM_OPERATION 0x1418
+#define MV64340_SDRAM_MODE 0x141c
+#define MV64340_EXTENDED_DRAM_MODE 0x1420
+#define MV64340_SDRAM_CROSS_BAR_CONTROL_LOW 0x1430
+#define MV64340_SDRAM_CROSS_BAR_CONTROL_HIGH 0x1434
+#define MV64340_SDRAM_CROSS_BAR_TIMEOUT 0x1438
+#define MV64340_SDRAM_ADDR_CTRL_PADS_CALIBRATION 0x14c0
+#define MV64340_SDRAM_DATA_PADS_CALIBRATION 0x14c4
+
+/****************************************/
+/* SDRAM Error Report */
+/****************************************/
+
+#define MV64340_SDRAM_ERROR_DATA_LOW 0x1444
+#define MV64340_SDRAM_ERROR_DATA_HIGH 0x1440
+#define MV64340_SDRAM_ERROR_ADDR 0x1450
+#define MV64340_SDRAM_RECEIVED_ECC 0x1448
+#define MV64340_SDRAM_CALCULATED_ECC 0x144c
+#define MV64340_SDRAM_ECC_CONTROL 0x1454
+#define MV64340_SDRAM_ECC_ERROR_COUNTER 0x1458
+
+/******************************************/
+/* Controlled Delay Line (CDL) Registers */
+/******************************************/
+
+#define MV64340_DFCDL_CONFIG0 0x1480
+#define MV64340_DFCDL_CONFIG1 0x1484
+#define MV64340_DLL_WRITE 0x1488
+#define MV64340_DLL_READ 0x148c
+#define MV64340_SRAM_ADDR 0x1490
+#define MV64340_SRAM_DATA0 0x1494
+#define MV64340_SRAM_DATA1 0x1498
+#define MV64340_SRAM_DATA2 0x149c
+#define MV64340_DFCL_PROBE 0x14a0
+
+/******************************************/
+/* Debug Registers */
+/******************************************/
+
+#define MV64340_DUNIT_DEBUG_LOW 0x1460
+#define MV64340_DUNIT_DEBUG_HIGH 0x1464
+#define MV64340_DUNIT_MMASK 0X1b40
+
+/****************************************/
+/* Device Parameters */
+/****************************************/
+
+#define MV64340_DEVICE_BANK0_PARAMETERS 0x45c
+#define MV64340_DEVICE_BANK1_PARAMETERS 0x460
+#define MV64340_DEVICE_BANK2_PARAMETERS 0x464
+#define MV64340_DEVICE_BANK3_PARAMETERS 0x468
+#define MV64340_DEVICE_BOOT_BANK_PARAMETERS 0x46c
+#define MV64340_DEVICE_INTERFACE_CONTROL 0x4c0
+#define MV64340_DEVICE_INTERFACE_CROSS_BAR_CONTROL_LOW 0x4c8
+#define MV64340_DEVICE_INTERFACE_CROSS_BAR_CONTROL_HIGH 0x4cc
+#define MV64340_DEVICE_INTERFACE_CROSS_BAR_TIMEOUT 0x4c4
+
+/****************************************/
+/* Device interrupt registers */
+/****************************************/
+
+#define MV64340_DEVICE_INTERRUPT_CAUSE 0x4d0
+#define MV64340_DEVICE_INTERRUPT_MASK 0x4d4
+#define MV64340_DEVICE_ERROR_ADDR 0x4d8
+#define MV64340_DEVICE_ERROR_DATA 0x4dc
+#define MV64340_DEVICE_ERROR_PARITY 0x4e0
+
+/****************************************/
+/* Device debug registers */
+/****************************************/
+
+#define MV64340_DEVICE_DEBUG_LOW 0x4e4
+#define MV64340_DEVICE_DEBUG_HIGH 0x4e8
+#define MV64340_RUNIT_MMASK 0x4f0
+
+/****************************************/
+/* PCI Slave Address Decoding registers */
+/****************************************/
+
+#define MV64340_PCI_0_CS_0_BANK_SIZE 0xc08
+#define MV64340_PCI_1_CS_0_BANK_SIZE 0xc88
+#define MV64340_PCI_0_CS_1_BANK_SIZE 0xd08
+#define MV64340_PCI_1_CS_1_BANK_SIZE 0xd88
+#define MV64340_PCI_0_CS_2_BANK_SIZE 0xc0c
+#define MV64340_PCI_1_CS_2_BANK_SIZE 0xc8c
+#define MV64340_PCI_0_CS_3_BANK_SIZE 0xd0c
+#define MV64340_PCI_1_CS_3_BANK_SIZE 0xd8c
+#define MV64340_PCI_0_DEVCS_0_BANK_SIZE 0xc10
+#define MV64340_PCI_1_DEVCS_0_BANK_SIZE 0xc90
+#define MV64340_PCI_0_DEVCS_1_BANK_SIZE 0xd10
+#define MV64340_PCI_1_DEVCS_1_BANK_SIZE 0xd90
+#define MV64340_PCI_0_DEVCS_2_BANK_SIZE 0xd18
+#define MV64340_PCI_1_DEVCS_2_BANK_SIZE 0xd98
+#define MV64340_PCI_0_DEVCS_3_BANK_SIZE 0xc14
+#define MV64340_PCI_1_DEVCS_3_BANK_SIZE 0xc94
+#define MV64340_PCI_0_DEVCS_BOOT_BANK_SIZE 0xd14
+#define MV64340_PCI_1_DEVCS_BOOT_BANK_SIZE 0xd94
+#define MV64340_PCI_0_P2P_MEM0_BAR_SIZE 0xd1c
+#define MV64340_PCI_1_P2P_MEM0_BAR_SIZE 0xd9c
+#define MV64340_PCI_0_P2P_MEM1_BAR_SIZE 0xd20
+#define MV64340_PCI_1_P2P_MEM1_BAR_SIZE 0xda0
+#define MV64340_PCI_0_P2P_I_O_BAR_SIZE 0xd24
+#define MV64340_PCI_1_P2P_I_O_BAR_SIZE 0xda4
+#define MV64340_PCI_0_CPU_BAR_SIZE 0xd28
+#define MV64340_PCI_1_CPU_BAR_SIZE 0xda8
+#define MV64340_PCI_0_INTERNAL_SRAM_BAR_SIZE 0xe00
+#define MV64340_PCI_1_INTERNAL_SRAM_BAR_SIZE 0xe80
+#define MV64340_PCI_0_EXPANSION_ROM_BAR_SIZE 0xd2c
+#define MV64340_PCI_1_EXPANSION_ROM_BAR_SIZE 0xd9c
+#define MV64340_PCI_0_BASE_ADDR_REG_ENABLE 0xc3c
+#define MV64340_PCI_1_BASE_ADDR_REG_ENABLE 0xcbc
+#define MV64340_PCI_0_CS_0_BASE_ADDR_REMAP 0xc48
+#define MV64340_PCI_1_CS_0_BASE_ADDR_REMAP 0xcc8
+#define MV64340_PCI_0_CS_1_BASE_ADDR_REMAP 0xd48
+#define MV64340_PCI_1_CS_1_BASE_ADDR_REMAP 0xdc8
+#define MV64340_PCI_0_CS_2_BASE_ADDR_REMAP 0xc4c
+#define MV64340_PCI_1_CS_2_BASE_ADDR_REMAP 0xccc
+#define MV64340_PCI_0_CS_3_BASE_ADDR_REMAP 0xd4c
+#define MV64340_PCI_1_CS_3_BASE_ADDR_REMAP 0xdcc
+#define MV64340_PCI_0_CS_0_BASE_HIGH_ADDR_REMAP 0xF04
+#define MV64340_PCI_1_CS_0_BASE_HIGH_ADDR_REMAP 0xF84
+#define MV64340_PCI_0_CS_1_BASE_HIGH_ADDR_REMAP 0xF08
+#define MV64340_PCI_1_CS_1_BASE_HIGH_ADDR_REMAP 0xF88
+#define MV64340_PCI_0_CS_2_BASE_HIGH_ADDR_REMAP 0xF0C
+#define MV64340_PCI_1_CS_2_BASE_HIGH_ADDR_REMAP 0xF8C
+#define MV64340_PCI_0_CS_3_BASE_HIGH_ADDR_REMAP 0xF10
+#define MV64340_PCI_1_CS_3_BASE_HIGH_ADDR_REMAP 0xF90
+#define MV64340_PCI_0_DEVCS_0_BASE_ADDR_REMAP 0xc50
+#define MV64340_PCI_1_DEVCS_0_BASE_ADDR_REMAP 0xcd0
+#define MV64340_PCI_0_DEVCS_1_BASE_ADDR_REMAP 0xd50
+#define MV64340_PCI_1_DEVCS_1_BASE_ADDR_REMAP 0xdd0
+#define MV64340_PCI_0_DEVCS_2_BASE_ADDR_REMAP 0xd58
+#define MV64340_PCI_1_DEVCS_2_BASE_ADDR_REMAP 0xdd8
+#define MV64340_PCI_0_DEVCS_3_BASE_ADDR_REMAP 0xc54
+#define MV64340_PCI_1_DEVCS_3_BASE_ADDR_REMAP 0xcd4
+#define MV64340_PCI_0_DEVCS_BOOTCS_BASE_ADDR_REMAP 0xd54
+#define MV64340_PCI_1_DEVCS_BOOTCS_BASE_ADDR_REMAP 0xdd4
+#define MV64340_PCI_0_P2P_MEM0_BASE_ADDR_REMAP_LOW 0xd5c
+#define MV64340_PCI_1_P2P_MEM0_BASE_ADDR_REMAP_LOW 0xddc
+#define MV64340_PCI_0_P2P_MEM0_BASE_ADDR_REMAP_HIGH 0xd60
+#define MV64340_PCI_1_P2P_MEM0_BASE_ADDR_REMAP_HIGH 0xde0
+#define MV64340_PCI_0_P2P_MEM1_BASE_ADDR_REMAP_LOW 0xd64
+#define MV64340_PCI_1_P2P_MEM1_BASE_ADDR_REMAP_LOW 0xde4
+#define MV64340_PCI_0_P2P_MEM1_BASE_ADDR_REMAP_HIGH 0xd68
+#define MV64340_PCI_1_P2P_MEM1_BASE_ADDR_REMAP_HIGH 0xde8
+#define MV64340_PCI_0_P2P_I_O_BASE_ADDR_REMAP 0xd6c
+#define MV64340_PCI_1_P2P_I_O_BASE_ADDR_REMAP 0xdec
+#define MV64340_PCI_0_CPU_BASE_ADDR_REMAP_LOW 0xd70
+#define MV64340_PCI_1_CPU_BASE_ADDR_REMAP_LOW 0xdf0
+#define MV64340_PCI_0_CPU_BASE_ADDR_REMAP_HIGH 0xd74
+#define MV64340_PCI_1_CPU_BASE_ADDR_REMAP_HIGH 0xdf4
+#define MV64340_PCI_0_INTEGRATED_SRAM_BASE_ADDR_REMAP 0xf00
+#define MV64340_PCI_1_INTEGRATED_SRAM_BASE_ADDR_REMAP 0xf80
+#define MV64340_PCI_0_EXPANSION_ROM_BASE_ADDR_REMAP 0xf38
+#define MV64340_PCI_1_EXPANSION_ROM_BASE_ADDR_REMAP 0xfb8
+#define MV64340_PCI_0_ADDR_DECODE_CONTROL 0xd3c
+#define MV64340_PCI_1_ADDR_DECODE_CONTROL 0xdbc
+#define MV64340_PCI_0_HEADERS_RETARGET_CONTROL 0xF40
+#define MV64340_PCI_1_HEADERS_RETARGET_CONTROL 0xFc0
+#define MV64340_PCI_0_HEADERS_RETARGET_BASE 0xF44
+#define MV64340_PCI_1_HEADERS_RETARGET_BASE 0xFc4
+#define MV64340_PCI_0_HEADERS_RETARGET_HIGH 0xF48
+#define MV64340_PCI_1_HEADERS_RETARGET_HIGH 0xFc8
+
+/***********************************/
+/* PCI Control Register Map */
+/***********************************/
+
+#define MV64340_PCI_0_DLL_STATUS_AND_COMMAND 0x1d20
+#define MV64340_PCI_1_DLL_STATUS_AND_COMMAND 0x1da0
+#define MV64340_PCI_0_MPP_PADS_DRIVE_CONTROL 0x1d1C
+#define MV64340_PCI_1_MPP_PADS_DRIVE_CONTROL 0x1d9C
+#define MV64340_PCI_0_COMMAND 0xc00
+#define MV64340_PCI_1_COMMAND 0xc80
+#define MV64340_PCI_0_MODE 0xd00
+#define MV64340_PCI_1_MODE 0xd80
+#define MV64340_PCI_0_RETRY 0xc04
+#define MV64340_PCI_1_RETRY 0xc84
+#define MV64340_PCI_0_READ_BUFFER_DISCARD_TIMER 0xd04
+#define MV64340_PCI_1_READ_BUFFER_DISCARD_TIMER 0xd84
+#define MV64340_PCI_0_MSI_TRIGGER_TIMER 0xc38
+#define MV64340_PCI_1_MSI_TRIGGER_TIMER 0xcb8
+#define MV64340_PCI_0_ARBITER_CONTROL 0x1d00
+#define MV64340_PCI_1_ARBITER_CONTROL 0x1d80
+#define MV64340_PCI_0_CROSS_BAR_CONTROL_LOW 0x1d08
+#define MV64340_PCI_1_CROSS_BAR_CONTROL_LOW 0x1d88
+#define MV64340_PCI_0_CROSS_BAR_CONTROL_HIGH 0x1d0c
+#define MV64340_PCI_1_CROSS_BAR_CONTROL_HIGH 0x1d8c
+#define MV64340_PCI_0_CROSS_BAR_TIMEOUT 0x1d04
+#define MV64340_PCI_1_CROSS_BAR_TIMEOUT 0x1d84
+#define MV64340_PCI_0_SYNC_BARRIER_TRIGGER_REG 0x1D18
+#define MV64340_PCI_1_SYNC_BARRIER_TRIGGER_REG 0x1D98
+#define MV64340_PCI_0_SYNC_BARRIER_VIRTUAL_REG 0x1d10
+#define MV64340_PCI_1_SYNC_BARRIER_VIRTUAL_REG 0x1d90
+#define MV64340_PCI_0_P2P_CONFIG 0x1d14
+#define MV64340_PCI_1_P2P_CONFIG 0x1d94
+
+#define MV64340_PCI_0_ACCESS_CONTROL_BASE_0_LOW 0x1e00
+#define MV64340_PCI_0_ACCESS_CONTROL_BASE_0_HIGH 0x1e04
+#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_0 0x1e08
+#define MV64340_PCI_0_ACCESS_CONTROL_BASE_1_LOW 0x1e10
+#define MV64340_PCI_0_ACCESS_CONTROL_BASE_1_HIGH 0x1e14
+#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_1 0x1e18
+#define MV64340_PCI_0_ACCESS_CONTROL_BASE_2_LOW 0x1e20
+#define MV64340_PCI_0_ACCESS_CONTROL_BASE_2_HIGH 0x1e24
+#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_2 0x1e28
+#define MV64340_PCI_0_ACCESS_CONTROL_BASE_3_LOW 0x1e30
+#define MV64340_PCI_0_ACCESS_CONTROL_BASE_3_HIGH 0x1e34
+#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_3 0x1e38
+#define MV64340_PCI_0_ACCESS_CONTROL_BASE_4_LOW 0x1e40
+#define MV64340_PCI_0_ACCESS_CONTROL_BASE_4_HIGH 0x1e44
+#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_4 0x1e48
+#define MV64340_PCI_0_ACCESS_CONTROL_BASE_5_LOW 0x1e50
+#define MV64340_PCI_0_ACCESS_CONTROL_BASE_5_HIGH 0x1e54
+#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_5 0x1e58
+
+#define MV64340_PCI_1_ACCESS_CONTROL_BASE_0_LOW 0x1e80
+#define MV64340_PCI_1_ACCESS_CONTROL_BASE_0_HIGH 0x1e84
+#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_0 0x1e88
+#define MV64340_PCI_1_ACCESS_CONTROL_BASE_1_LOW 0x1e90
+#define MV64340_PCI_1_ACCESS_CONTROL_BASE_1_HIGH 0x1e94
+#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_1 0x1e98
+#define MV64340_PCI_1_ACCESS_CONTROL_BASE_2_LOW 0x1ea0
+#define MV64340_PCI_1_ACCESS_CONTROL_BASE_2_HIGH 0x1ea4
+#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_2 0x1ea8
+#define MV64340_PCI_1_ACCESS_CONTROL_BASE_3_LOW 0x1eb0
+#define MV64340_PCI_1_ACCESS_CONTROL_BASE_3_HIGH 0x1eb4
+#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_3 0x1eb8
+#define MV64340_PCI_1_ACCESS_CONTROL_BASE_4_LOW 0x1ec0
+#define MV64340_PCI_1_ACCESS_CONTROL_BASE_4_HIGH 0x1ec4
+#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_4 0x1ec8
+#define MV64340_PCI_1_ACCESS_CONTROL_BASE_5_LOW 0x1ed0
+#define MV64340_PCI_1_ACCESS_CONTROL_BASE_5_HIGH 0x1ed4
+#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_5 0x1ed8
+
+/****************************************/
+/* PCI Configuration Access Registers */
+/****************************************/
+
+#define MV64340_PCI_0_CONFIG_ADDR 0xcf8
+#define MV64340_PCI_0_CONFIG_DATA_VIRTUAL_REG 0xcfc
+#define MV64340_PCI_1_CONFIG_ADDR 0xc78
+#define MV64340_PCI_1_CONFIG_DATA_VIRTUAL_REG 0xc7c
+#define MV64340_PCI_0_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG 0xc34
+#define MV64340_PCI_1_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG 0xcb4
+
+/****************************************/
+/* PCI Error Report Registers */
+/****************************************/
+
+#define MV64340_PCI_0_SERR_MASK 0xc28
+#define MV64340_PCI_1_SERR_MASK 0xca8
+#define MV64340_PCI_0_ERROR_ADDR_LOW 0x1d40
+#define MV64340_PCI_1_ERROR_ADDR_LOW 0x1dc0
+#define MV64340_PCI_0_ERROR_ADDR_HIGH 0x1d44
+#define MV64340_PCI_1_ERROR_ADDR_HIGH 0x1dc4
+#define MV64340_PCI_0_ERROR_ATTRIBUTE 0x1d48
+#define MV64340_PCI_1_ERROR_ATTRIBUTE 0x1dc8
+#define MV64340_PCI_0_ERROR_COMMAND 0x1d50
+#define MV64340_PCI_1_ERROR_COMMAND 0x1dd0
+#define MV64340_PCI_0_ERROR_CAUSE 0x1d58
+#define MV64340_PCI_1_ERROR_CAUSE 0x1dd8
+#define MV64340_PCI_0_ERROR_MASK 0x1d5c
+#define MV64340_PCI_1_ERROR_MASK 0x1ddc
+
+/****************************************/
+/* PCI Debug Registers */
+/****************************************/
+
+#define MV64340_PCI_0_MMASK 0X1D24
+#define MV64340_PCI_1_MMASK 0X1DA4
+
+/*********************************************/
+/* PCI Configuration, Function 0, Registers */
+/*********************************************/
+
+#define MV64340_PCI_DEVICE_AND_VENDOR_ID 0x000
+#define MV64340_PCI_STATUS_AND_COMMAND 0x004
+#define MV64340_PCI_CLASS_CODE_AND_REVISION_ID 0x008
+#define MV64340_PCI_BIST_HEADER_TYPE_LATENCY_TIMER_CACHE_LINE 0x00C
+
+#define MV64340_PCI_SCS_0_BASE_ADDR_LOW 0x010
+#define MV64340_PCI_SCS_0_BASE_ADDR_HIGH 0x014
+#define MV64340_PCI_SCS_1_BASE_ADDR_LOW 0x018
+#define MV64340_PCI_SCS_1_BASE_ADDR_HIGH 0x01C
+#define MV64340_PCI_INTERNAL_REG_MEM_MAPPED_BASE_ADDR_LOW 0x020
+#define MV64340_PCI_INTERNAL_REG_MEM_MAPPED_BASE_ADDR_HIGH 0x024
+#define MV64340_PCI_SUBSYSTEM_ID_AND_SUBSYSTEM_VENDOR_ID 0x02c
+#define MV64340_PCI_EXPANSION_ROM_BASE_ADDR_REG 0x030
+#define MV64340_PCI_CAPABILTY_LIST_POINTER 0x034
+#define MV64340_PCI_INTERRUPT_PIN_AND_LINE 0x03C
+ /* capability list */
+#define MV64340_PCI_POWER_MANAGEMENT_CAPABILITY 0x040
+#define MV64340_PCI_POWER_MANAGEMENT_STATUS_AND_CONTROL 0x044
+#define MV64340_PCI_VPD_ADDR 0x048
+#define MV64340_PCI_VPD_DATA 0x04c
+#define MV64340_PCI_MSI_MESSAGE_CONTROL 0x050
+#define MV64340_PCI_MSI_MESSAGE_ADDR 0x054
+#define MV64340_PCI_MSI_MESSAGE_UPPER_ADDR 0x058
+#define MV64340_PCI_MSI_MESSAGE_DATA 0x05c
+#define MV64340_PCI_X_COMMAND 0x060
+#define MV64340_PCI_X_STATUS 0x064
+#define MV64340_PCI_COMPACT_PCI_HOT_SWAP 0x068
+
+/***********************************************/
+/* PCI Configuration, Function 1, Registers */
+/***********************************************/
+
+#define MV64340_PCI_SCS_2_BASE_ADDR_LOW 0x110
+#define MV64340_PCI_SCS_2_BASE_ADDR_HIGH 0x114
+#define MV64340_PCI_SCS_3_BASE_ADDR_LOW 0x118
+#define MV64340_PCI_SCS_3_BASE_ADDR_HIGH 0x11c
+#define MV64340_PCI_INTERNAL_SRAM_BASE_ADDR_LOW 0x120
+#define MV64340_PCI_INTERNAL_SRAM_BASE_ADDR_HIGH 0x124
+
+/***********************************************/
+/* PCI Configuration, Function 2, Registers */
+/***********************************************/
+
+#define MV64340_PCI_DEVCS_0_BASE_ADDR_LOW 0x210
+#define MV64340_PCI_DEVCS_0_BASE_ADDR_HIGH 0x214
+#define MV64340_PCI_DEVCS_1_BASE_ADDR_LOW 0x218
+#define MV64340_PCI_DEVCS_1_BASE_ADDR_HIGH 0x21c
+#define MV64340_PCI_DEVCS_2_BASE_ADDR_LOW 0x220
+#define MV64340_PCI_DEVCS_2_BASE_ADDR_HIGH 0x224
+
+/***********************************************/
+/* PCI Configuration, Function 3, Registers */
+/***********************************************/
+
+#define MV64340_PCI_DEVCS_3_BASE_ADDR_LOW 0x310
+#define MV64340_PCI_DEVCS_3_BASE_ADDR_HIGH 0x314
+#define MV64340_PCI_BOOT_CS_BASE_ADDR_LOW 0x318
+#define MV64340_PCI_BOOT_CS_BASE_ADDR_HIGH 0x31c
+#define MV64340_PCI_CPU_BASE_ADDR_LOW 0x220
+#define MV64340_PCI_CPU_BASE_ADDR_HIGH 0x224
+
+/***********************************************/
+/* PCI Configuration, Function 4, Registers */
+/***********************************************/
+
+#define MV64340_PCI_P2P_MEM0_BASE_ADDR_LOW 0x410
+#define MV64340_PCI_P2P_MEM0_BASE_ADDR_HIGH 0x414
+#define MV64340_PCI_P2P_MEM1_BASE_ADDR_LOW 0x418
+#define MV64340_PCI_P2P_MEM1_BASE_ADDR_HIGH 0x41c
+#define MV64340_PCI_P2P_I_O_BASE_ADDR 0x420
+#define MV64340_PCI_INTERNAL_REGS_I_O_MAPPED_BASE_ADDR 0x424
+
+/****************************************/
+/* Messaging Unit Registers (I20) */
+/****************************************/
+
+#define MV64340_I2O_INBOUND_MESSAGE_REG0_PCI_0_SIDE 0x010
+#define MV64340_I2O_INBOUND_MESSAGE_REG1_PCI_0_SIDE 0x014
+#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_PCI_0_SIDE 0x018
+#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_PCI_0_SIDE 0x01C
+#define MV64340_I2O_INBOUND_DOORBELL_REG_PCI_0_SIDE 0x020
+#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_PCI_0_SIDE 0x024
+#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_PCI_0_SIDE 0x028
+#define MV64340_I2O_OUTBOUND_DOORBELL_REG_PCI_0_SIDE 0x02C
+#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_PCI_0_SIDE 0x030
+#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_PCI_0_SIDE 0x034
+#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_0_SIDE 0x040
+#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_0_SIDE 0x044
+#define MV64340_I2O_QUEUE_CONTROL_REG_PCI_0_SIDE 0x050
+#define MV64340_I2O_QUEUE_BASE_ADDR_REG_PCI_0_SIDE 0x054
+#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_PCI_0_SIDE 0x060
+#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_PCI_0_SIDE 0x064
+#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_PCI_0_SIDE 0x068
+#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_PCI_0_SIDE 0x06C
+#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_PCI_0_SIDE 0x070
+#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_PCI_0_SIDE 0x074
+#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_PCI_0_SIDE 0x0F8
+#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_PCI_0_SIDE 0x0FC
+
+#define MV64340_I2O_INBOUND_MESSAGE_REG0_PCI_1_SIDE 0x090
+#define MV64340_I2O_INBOUND_MESSAGE_REG1_PCI_1_SIDE 0x094
+#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_PCI_1_SIDE 0x098
+#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_PCI_1_SIDE 0x09C
+#define MV64340_I2O_INBOUND_DOORBELL_REG_PCI_1_SIDE 0x0A0
+#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_PCI_1_SIDE 0x0A4
+#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_PCI_1_SIDE 0x0A8
+#define MV64340_I2O_OUTBOUND_DOORBELL_REG_PCI_1_SIDE 0x0AC
+#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_PCI_1_SIDE 0x0B0
+#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_PCI_1_SIDE 0x0B4
+#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_1_SIDE 0x0C0
+#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_1_SIDE 0x0C4
+#define MV64340_I2O_QUEUE_CONTROL_REG_PCI_1_SIDE 0x0D0
+#define MV64340_I2O_QUEUE_BASE_ADDR_REG_PCI_1_SIDE 0x0D4
+#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_PCI_1_SIDE 0x0E0
+#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_PCI_1_SIDE 0x0E4
+#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_PCI_1_SIDE 0x0E8
+#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_PCI_1_SIDE 0x0EC
+#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_PCI_1_SIDE 0x0F0
+#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_PCI_1_SIDE 0x0F4
+#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_PCI_1_SIDE 0x078
+#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_PCI_1_SIDE 0x07C
+
+#define MV64340_I2O_INBOUND_MESSAGE_REG0_CPU0_SIDE 0x1C10
+#define MV64340_I2O_INBOUND_MESSAGE_REG1_CPU0_SIDE 0x1C14
+#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_CPU0_SIDE 0x1C18
+#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_CPU0_SIDE 0x1C1C
+#define MV64340_I2O_INBOUND_DOORBELL_REG_CPU0_SIDE 0x1C20
+#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_CPU0_SIDE 0x1C24
+#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_CPU0_SIDE 0x1C28
+#define MV64340_I2O_OUTBOUND_DOORBELL_REG_CPU0_SIDE 0x1C2C
+#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_CPU0_SIDE 0x1C30
+#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_CPU0_SIDE 0x1C34
+#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_CPU0_SIDE 0x1C40
+#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_CPU0_SIDE 0x1C44
+#define MV64340_I2O_QUEUE_CONTROL_REG_CPU0_SIDE 0x1C50
+#define MV64340_I2O_QUEUE_BASE_ADDR_REG_CPU0_SIDE 0x1C54
+#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_CPU0_SIDE 0x1C60
+#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_CPU0_SIDE 0x1C64
+#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_CPU0_SIDE 0x1C68
+#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_CPU0_SIDE 0x1C6C
+#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_CPU0_SIDE 0x1C70
+#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_CPU0_SIDE 0x1C74
+#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_CPU0_SIDE 0x1CF8
+#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_CPU0_SIDE 0x1CFC
+#define MV64340_I2O_INBOUND_MESSAGE_REG0_CPU1_SIDE 0x1C90
+#define MV64340_I2O_INBOUND_MESSAGE_REG1_CPU1_SIDE 0x1C94
+#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_CPU1_SIDE 0x1C98
+#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_CPU1_SIDE 0x1C9C
+#define MV64340_I2O_INBOUND_DOORBELL_REG_CPU1_SIDE 0x1CA0
+#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_CPU1_SIDE 0x1CA4
+#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_CPU1_SIDE 0x1CA8
+#define MV64340_I2O_OUTBOUND_DOORBELL_REG_CPU1_SIDE 0x1CAC
+#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_CPU1_SIDE 0x1CB0
+#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_CPU1_SIDE 0x1CB4
+#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_CPU1_SIDE 0x1CC0
+#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_CPU1_SIDE 0x1CC4
+#define MV64340_I2O_QUEUE_CONTROL_REG_CPU1_SIDE 0x1CD0
+#define MV64340_I2O_QUEUE_BASE_ADDR_REG_CPU1_SIDE 0x1CD4
+#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_CPU1_SIDE 0x1CE0
+#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_CPU1_SIDE 0x1CE4
+#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_CPU1_SIDE 0x1CE8
+#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_CPU1_SIDE 0x1CEC
+#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_CPU1_SIDE 0x1CF0
+#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_CPU1_SIDE 0x1CF4
+#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_CPU1_SIDE 0x1C78
+#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_CPU1_SIDE 0x1C7C
+
+/****************************************/
+/* Ethernet Unit Registers */
+/****************************************/
+
+/*******************************************/
+/* CUNIT Registers */
+/*******************************************/
+
+ /* Address Decoding Register Map */
+
+#define MV64340_CUNIT_BASE_ADDR_REG0 0xf200
+#define MV64340_CUNIT_BASE_ADDR_REG1 0xf208
+#define MV64340_CUNIT_BASE_ADDR_REG2 0xf210
+#define MV64340_CUNIT_BASE_ADDR_REG3 0xf218
+#define MV64340_CUNIT_SIZE0 0xf204
+#define MV64340_CUNIT_SIZE1 0xf20c
+#define MV64340_CUNIT_SIZE2 0xf214
+#define MV64340_CUNIT_SIZE3 0xf21c
+#define MV64340_CUNIT_HIGH_ADDR_REMAP_REG0 0xf240
+#define MV64340_CUNIT_HIGH_ADDR_REMAP_REG1 0xf244
+#define MV64340_CUNIT_BASE_ADDR_ENABLE_REG 0xf250
+#define MV64340_MPSC0_ACCESS_PROTECTION_REG 0xf254
+#define MV64340_MPSC1_ACCESS_PROTECTION_REG 0xf258
+#define MV64340_CUNIT_INTERNAL_SPACE_BASE_ADDR_REG 0xf25C
+
+ /* Error Report Registers */
+
+#define MV64340_CUNIT_INTERRUPT_CAUSE_REG 0xf310
+#define MV64340_CUNIT_INTERRUPT_MASK_REG 0xf314
+#define MV64340_CUNIT_ERROR_ADDR 0xf318
+
+ /* Cunit Control Registers */
+
+#define MV64340_CUNIT_ARBITER_CONTROL_REG 0xf300
+#define MV64340_CUNIT_CONFIG_REG 0xb40c
+#define MV64340_CUNIT_CRROSBAR_TIMEOUT_REG 0xf304
+
+ /* Cunit Debug Registers */
+
+#define MV64340_CUNIT_DEBUG_LOW 0xf340
+#define MV64340_CUNIT_DEBUG_HIGH 0xf344
+#define MV64340_CUNIT_MMASK 0xf380
+
+ /* MPSCs Clocks Routing Registers */
+
+#define MV64340_MPSC_ROUTING_REG 0xb400
+#define MV64340_MPSC_RX_CLOCK_ROUTING_REG 0xb404
+#define MV64340_MPSC_TX_CLOCK_ROUTING_REG 0xb408
+
+ /* MPSCs Interrupts Registers */
+
+#define MV64340_MPSC_CAUSE_REG(port) (0xb804 + (port<<3))
+#define MV64340_MPSC_MASK_REG(port) (0xb884 + (port<<3))
+
+#define MV64340_MPSC_MAIN_CONFIG_LOW(port) (0x8000 + (port<<12))
+#define MV64340_MPSC_MAIN_CONFIG_HIGH(port) (0x8004 + (port<<12))
+#define MV64340_MPSC_PROTOCOL_CONFIG(port) (0x8008 + (port<<12))
+#define MV64340_MPSC_CHANNEL_REG1(port) (0x800c + (port<<12))
+#define MV64340_MPSC_CHANNEL_REG2(port) (0x8010 + (port<<12))
+#define MV64340_MPSC_CHANNEL_REG3(port) (0x8014 + (port<<12))
+#define MV64340_MPSC_CHANNEL_REG4(port) (0x8018 + (port<<12))
+#define MV64340_MPSC_CHANNEL_REG5(port) (0x801c + (port<<12))
+#define MV64340_MPSC_CHANNEL_REG6(port) (0x8020 + (port<<12))
+#define MV64340_MPSC_CHANNEL_REG7(port) (0x8024 + (port<<12))
+#define MV64340_MPSC_CHANNEL_REG8(port) (0x8028 + (port<<12))
+#define MV64340_MPSC_CHANNEL_REG9(port) (0x802c + (port<<12))
+#define MV64340_MPSC_CHANNEL_REG10(port) (0x8030 + (port<<12))
+
+ /* MPSC0 Registers */
+
+
+/***************************************/
+/* SDMA Registers */
+/***************************************/
+
+#define MV64340_SDMA_CONFIG_REG(channel) (0x4000 + (channel<<13))
+#define MV64340_SDMA_COMMAND_REG(channel) (0x4008 + (channel<<13))
+#define MV64340_SDMA_CURRENT_RX_DESCRIPTOR_POINTER(channel) (0x4810 + (channel<<13))
+#define MV64340_SDMA_CURRENT_TX_DESCRIPTOR_POINTER(channel) (0x4c10 + (channel<<13))
+#define MV64340_SDMA_FIRST_TX_DESCRIPTOR_POINTER(channel) (0x4c14 + (channel<<13))
+
+#define MV64340_SDMA_CAUSE_REG 0xb800
+#define MV64340_SDMA_MASK_REG 0xb880
+
+/* BRG Interrupts */
+
+#define MV64340_BRG_CONFIG_REG(brg) (0xb200 + (brg<<3))
+#define MV64340_BRG_BAUDE_TUNING_REG(brg) (0xb208 + (brg<<3))
+#define MV64340_BRG_CAUSE_REG 0xb834
+#define MV64340_BRG_MASK_REG 0xb8b4
+
+/****************************************/
+/* DMA Channel Control */
+/****************************************/
+
+#define MV64340_DMA_CHANNEL0_CONTROL 0x840
+#define MV64340_DMA_CHANNEL0_CONTROL_HIGH 0x880
+#define MV64340_DMA_CHANNEL1_CONTROL 0x844
+#define MV64340_DMA_CHANNEL1_CONTROL_HIGH 0x884
+#define MV64340_DMA_CHANNEL2_CONTROL 0x848
+#define MV64340_DMA_CHANNEL2_CONTROL_HIGH 0x888
+#define MV64340_DMA_CHANNEL3_CONTROL 0x84C
+#define MV64340_DMA_CHANNEL3_CONTROL_HIGH 0x88C
+
+
+/****************************************/
+/* IDMA Registers */
+/****************************************/
+
+#define MV64340_DMA_CHANNEL0_BYTE_COUNT 0x800
+#define MV64340_DMA_CHANNEL1_BYTE_COUNT 0x804
+#define MV64340_DMA_CHANNEL2_BYTE_COUNT 0x808
+#define MV64340_DMA_CHANNEL3_BYTE_COUNT 0x80C
+#define MV64340_DMA_CHANNEL0_SOURCE_ADDR 0x810
+#define MV64340_DMA_CHANNEL1_SOURCE_ADDR 0x814
+#define MV64340_DMA_CHANNEL2_SOURCE_ADDR 0x818
+#define MV64340_DMA_CHANNEL3_SOURCE_ADDR 0x81c
+#define MV64340_DMA_CHANNEL0_DESTINATION_ADDR 0x820
+#define MV64340_DMA_CHANNEL1_DESTINATION_ADDR 0x824
+#define MV64340_DMA_CHANNEL2_DESTINATION_ADDR 0x828
+#define MV64340_DMA_CHANNEL3_DESTINATION_ADDR 0x82C
+#define MV64340_DMA_CHANNEL0_NEXT_DESCRIPTOR_POINTER 0x830
+#define MV64340_DMA_CHANNEL1_NEXT_DESCRIPTOR_POINTER 0x834
+#define MV64340_DMA_CHANNEL2_NEXT_DESCRIPTOR_POINTER 0x838
+#define MV64340_DMA_CHANNEL3_NEXT_DESCRIPTOR_POINTER 0x83C
+#define MV64340_DMA_CHANNEL0_CURRENT_DESCRIPTOR_POINTER 0x870
+#define MV64340_DMA_CHANNEL1_CURRENT_DESCRIPTOR_POINTER 0x874
+#define MV64340_DMA_CHANNEL2_CURRENT_DESCRIPTOR_POINTER 0x878
+#define MV64340_DMA_CHANNEL3_CURRENT_DESCRIPTOR_POINTER 0x87C
+
+ /* IDMA Address Decoding Base Address Registers */
+
+#define MV64340_DMA_BASE_ADDR_REG0 0xa00
+#define MV64340_DMA_BASE_ADDR_REG1 0xa08
+#define MV64340_DMA_BASE_ADDR_REG2 0xa10
+#define MV64340_DMA_BASE_ADDR_REG3 0xa18
+#define MV64340_DMA_BASE_ADDR_REG4 0xa20
+#define MV64340_DMA_BASE_ADDR_REG5 0xa28
+#define MV64340_DMA_BASE_ADDR_REG6 0xa30
+#define MV64340_DMA_BASE_ADDR_REG7 0xa38
+
+ /* IDMA Address Decoding Size Address Register */
+
+#define MV64340_DMA_SIZE_REG0 0xa04
+#define MV64340_DMA_SIZE_REG1 0xa0c
+#define MV64340_DMA_SIZE_REG2 0xa14
+#define MV64340_DMA_SIZE_REG3 0xa1c
+#define MV64340_DMA_SIZE_REG4 0xa24
+#define MV64340_DMA_SIZE_REG5 0xa2c
+#define MV64340_DMA_SIZE_REG6 0xa34
+#define MV64340_DMA_SIZE_REG7 0xa3C
+
+ /* IDMA Address Decoding High Address Remap and Access
+ Protection Registers */
+
+#define MV64340_DMA_HIGH_ADDR_REMAP_REG0 0xa60
+#define MV64340_DMA_HIGH_ADDR_REMAP_REG1 0xa64
+#define MV64340_DMA_HIGH_ADDR_REMAP_REG2 0xa68
+#define MV64340_DMA_HIGH_ADDR_REMAP_REG3 0xa6C
+#define MV64340_DMA_BASE_ADDR_ENABLE_REG 0xa80
+#define MV64340_DMA_CHANNEL0_ACCESS_PROTECTION_REG 0xa70
+#define MV64340_DMA_CHANNEL1_ACCESS_PROTECTION_REG 0xa74
+#define MV64340_DMA_CHANNEL2_ACCESS_PROTECTION_REG 0xa78
+#define MV64340_DMA_CHANNEL3_ACCESS_PROTECTION_REG 0xa7c
+#define MV64340_DMA_ARBITER_CONTROL 0x860
+#define MV64340_DMA_CROSS_BAR_TIMEOUT 0x8d0
+
+ /* IDMA Headers Retarget Registers */
+
+#define MV64340_DMA_HEADERS_RETARGET_CONTROL 0xa84
+#define MV64340_DMA_HEADERS_RETARGET_BASE 0xa88
+
+ /* IDMA Interrupt Register */
+
+#define MV64340_DMA_INTERRUPT_CAUSE_REG 0x8c0
+#define MV64340_DMA_INTERRUPT_CAUSE_MASK 0x8c4
+#define MV64340_DMA_ERROR_ADDR 0x8c8
+#define MV64340_DMA_ERROR_SELECT 0x8cc
+
+ /* IDMA Debug Register ( for internal use ) */
+
+#define MV64340_DMA_DEBUG_LOW 0x8e0
+#define MV64340_DMA_DEBUG_HIGH 0x8e4
+#define MV64340_DMA_SPARE 0xA8C
+
+/****************************************/
+/* Timer_Counter */
+/****************************************/
+
+#define MV64340_TIMER_COUNTER0 0x850
+#define MV64340_TIMER_COUNTER1 0x854
+#define MV64340_TIMER_COUNTER2 0x858
+#define MV64340_TIMER_COUNTER3 0x85C
+#define MV64340_TIMER_COUNTER_0_3_CONTROL 0x864
+#define MV64340_TIMER_COUNTER_0_3_INTERRUPT_CAUSE 0x868
+#define MV64340_TIMER_COUNTER_0_3_INTERRUPT_MASK 0x86c
+
+/****************************************/
+/* Watchdog registers */
+/****************************************/
+
+#define MV64340_WATCHDOG_CONFIG_REG 0xb410
+#define MV64340_WATCHDOG_VALUE_REG 0xb414
+
+/****************************************/
+/* I2C Registers */
+/****************************************/
+
+#define MV64XXX_I2C_OFFSET 0xc000
+#define MV64XXX_I2C_REG_BLOCK_SIZE 0x0020
+
+/****************************************/
+/* GPP Interface Registers */
+/****************************************/
+
+#define MV64340_GPP_IO_CONTROL 0xf100
+#define MV64340_GPP_LEVEL_CONTROL 0xf110
+#define MV64340_GPP_VALUE 0xf104
+#define MV64340_GPP_INTERRUPT_CAUSE 0xf108
+#define MV64340_GPP_INTERRUPT_MASK0 0xf10c
+#define MV64340_GPP_INTERRUPT_MASK1 0xf114
+#define MV64340_GPP_VALUE_SET 0xf118
+#define MV64340_GPP_VALUE_CLEAR 0xf11c
+
+/****************************************/
+/* Interrupt Controller Registers */
+/****************************************/
+
+/****************************************/
+/* Interrupts */
+/****************************************/
+
+#define MV64340_MAIN_INTERRUPT_CAUSE_LOW 0x004
+#define MV64340_MAIN_INTERRUPT_CAUSE_HIGH 0x00c
+#define MV64340_CPU_INTERRUPT0_MASK_LOW 0x014
+#define MV64340_CPU_INTERRUPT0_MASK_HIGH 0x01c
+#define MV64340_CPU_INTERRUPT0_SELECT_CAUSE 0x024
+#define MV64340_CPU_INTERRUPT1_MASK_LOW 0x034
+#define MV64340_CPU_INTERRUPT1_MASK_HIGH 0x03c
+#define MV64340_CPU_INTERRUPT1_SELECT_CAUSE 0x044
+#define MV64340_INTERRUPT0_MASK_0_LOW 0x054
+#define MV64340_INTERRUPT0_MASK_0_HIGH 0x05c
+#define MV64340_INTERRUPT0_SELECT_CAUSE 0x064
+#define MV64340_INTERRUPT1_MASK_0_LOW 0x074
+#define MV64340_INTERRUPT1_MASK_0_HIGH 0x07c
+#define MV64340_INTERRUPT1_SELECT_CAUSE 0x084
+
+/****************************************/
+/* MPP Interface Registers */
+/****************************************/
+
+#define MV64340_MPP_CONTROL0 0xf000
+#define MV64340_MPP_CONTROL1 0xf004
+#define MV64340_MPP_CONTROL2 0xf008
+#define MV64340_MPP_CONTROL3 0xf00c
+
+/****************************************/
+/* Serial Initialization registers */
+/****************************************/
+
+#define MV64340_SERIAL_INIT_LAST_DATA 0xf324
+#define MV64340_SERIAL_INIT_CONTROL 0xf328
+#define MV64340_SERIAL_INIT_STATUS 0xf32c
+
+extern void mv64340_irq_init(unsigned int base);
+
+/* MPSC Platform Device, Driver Data (Shared register regions) */
+#define MPSC_SHARED_NAME "mpsc_shared"
+
+#define MPSC_ROUTING_BASE_ORDER 0
+#define MPSC_SDMA_INTR_BASE_ORDER 1
+
+#define MPSC_ROUTING_REG_BLOCK_SIZE 0x000c
+#define MPSC_SDMA_INTR_REG_BLOCK_SIZE 0x0084
+
+struct mpsc_shared_pdata {
+ u32 mrr_val;
+ u32 rcrr_val;
+ u32 tcrr_val;
+ u32 intr_cause_val;
+ u32 intr_mask_val;
+};
+
+/* MPSC Platform Device, Driver Data */
+#define MPSC_CTLR_NAME "mpsc"
+
+#define MPSC_BASE_ORDER 0
+#define MPSC_SDMA_BASE_ORDER 1
+#define MPSC_BRG_BASE_ORDER 2
+
+#define MPSC_REG_BLOCK_SIZE 0x0038
+#define MPSC_SDMA_REG_BLOCK_SIZE 0x0c18
+#define MPSC_BRG_REG_BLOCK_SIZE 0x0008
+
+struct mpsc_pdata {
+ u8 mirror_regs;
+ u8 cache_mgmt;
+ u8 max_idle;
+ int default_baud;
+ int default_bits;
+ int default_parity;
+ int default_flow;
+ u32 chr_1_val;
+ u32 chr_2_val;
+ u32 chr_10_val;
+ u32 mpcr_val;
+ u32 bcr_val;
+ u8 brg_can_tune;
+ u8 brg_clk_src;
+ u32 brg_clk_freq;
+};
+
+/* Watchdog Platform Device, Driver Data */
+#define MV64x60_WDT_NAME "mv64x60_wdt"
+
+struct mv64x60_wdt_pdata {
+ int timeout; /* watchdog expiry in seconds, default 10 */
+ int bus_clk; /* bus clock in MHz, default 133 */
+};
+
+#endif /* __ASM_MV643XX_H */
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
new file mode 100644
index 000000000..61a0da38d
--- /dev/null
+++ b/include/linux/mv643xx_eth.h
@@ -0,0 +1,86 @@
+/*
+ * MV-643XX ethernet platform device data definition file.
+ */
+
+#ifndef __LINUX_MV643XX_ETH_H
+#define __LINUX_MV643XX_ETH_H
+
+#include <linux/mbus.h>
+#include <linux/if_ether.h>
+
+#define MV643XX_ETH_SHARED_NAME "mv643xx_eth"
+#define MV643XX_ETH_NAME "mv643xx_eth_port"
+#define MV643XX_ETH_SHARED_REGS 0x2000
+#define MV643XX_ETH_SHARED_REGS_SIZE 0x2000
+#define MV643XX_ETH_BAR_4 0x2220
+#define MV643XX_ETH_SIZE_REG_4 0x2224
+#define MV643XX_ETH_BASE_ADDR_ENABLE_REG 0x2290
+
+#define MV643XX_TX_CSUM_DEFAULT_LIMIT 0
+
+struct mv643xx_eth_shared_platform_data {
+ struct mbus_dram_target_info *dram;
+ /*
+ * Max packet size for Tx IP/Layer 4 checksum, when set to 0, default
+ * limit of 9KiB will be used.
+ */
+ int tx_csum_limit;
+};
+
+#define MV643XX_ETH_PHY_ADDR_DEFAULT 0
+#define MV643XX_ETH_PHY_ADDR(x) (0x80 | (x))
+#define MV643XX_ETH_PHY_NONE 0xff
+
+struct device_node;
+struct mv643xx_eth_platform_data {
+ /*
+ * Pointer back to our parent instance, and our port number.
+ */
+ struct platform_device *shared;
+ int port_number;
+
+ /*
+ * Whether a PHY is present, and if yes, at which address.
+ */
+ int phy_addr;
+ struct device_node *phy_node;
+
+ /*
+ * Use this MAC address if it is valid, overriding the
+ * address that is already in the hardware.
+ */
+ u8 mac_addr[ETH_ALEN];
+
+ /*
+ * If speed is 0, autonegotiation is enabled.
+ * Valid values for speed: 0, SPEED_10, SPEED_100, SPEED_1000.
+ * Valid values for duplex: DUPLEX_HALF, DUPLEX_FULL.
+ */
+ int speed;
+ int duplex;
+
+ /*
+ * How many RX/TX queues to use.
+ */
+ int rx_queue_count;
+ int tx_queue_count;
+
+ /*
+ * Override default RX/TX queue sizes if nonzero.
+ */
+ int rx_queue_size;
+ int tx_queue_size;
+
+ /*
+ * Use on-chip SRAM for RX/TX descriptors if size is nonzero
+ * and sufficient to contain all descriptors for the requested
+ * ring sizes.
+ */
+ unsigned long rx_sram_addr;
+ int rx_sram_size;
+ unsigned long tx_sram_addr;
+ int tx_sram_size;
+};
+
+
+#endif
diff --git a/include/linux/mv643xx_i2c.h b/include/linux/mv643xx_i2c.h
new file mode 100644
index 000000000..5db5152e9
--- /dev/null
+++ b/include/linux/mv643xx_i2c.h
@@ -0,0 +1,22 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _MV64XXX_I2C_H_
+#define _MV64XXX_I2C_H_
+
+#include <linux/types.h>
+
+#define MV64XXX_I2C_CTLR_NAME "mv64xxx_i2c"
+
+/* i2c Platform Device, Driver Data */
+struct mv64xxx_i2c_pdata {
+ u32 freq_m;
+ u32 freq_n;
+ u32 timeout; /* In milliseconds */
+};
+
+#endif /*_MV64XXX_I2C_H_*/
diff --git a/include/linux/mvebu-pmsu.h b/include/linux/mvebu-pmsu.h
new file mode 100644
index 000000000..b918d07ef
--- /dev/null
+++ b/include/linux/mvebu-pmsu.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2012 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MVEBU_PMSU_H__
+#define __MVEBU_PMSU_H__
+
+#ifdef CONFIG_MACH_MVEBU_V7
+int mvebu_pmsu_dfs_request(int cpu);
+#else
+static inline int mvebu_pmsu_dfs_request(int cpu) { return -ENODEV; }
+#endif
+
+#endif /* __MVEBU_PMSU_H__ */
diff --git a/include/linux/mxm-wmi.h b/include/linux/mxm-wmi.h
new file mode 100644
index 000000000..617a29505
--- /dev/null
+++ b/include/linux/mxm-wmi.h
@@ -0,0 +1,33 @@
+/*
+ * MXM WMI driver
+ *
+ * Copyright(C) 2010 Red Hat.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef MXM_WMI_H
+#define MXM_WMI_H
+
+/* discrete adapters */
+#define MXM_MXDS_ADAPTER_0 0x0
+#define MXM_MXDS_ADAPTER_1 0x0
+/* integrated adapter */
+#define MXM_MXDS_ADAPTER_IGD 0x10
+int mxm_wmi_call_mxds(int adapter);
+int mxm_wmi_call_mxmx(int adapter);
+bool mxm_wmi_supported(void);
+
+#endif
diff --git a/include/linux/n_r3964.h b/include/linux/n_r3964.h
new file mode 100644
index 000000000..5d0b2a1de
--- /dev/null
+++ b/include/linux/n_r3964.h
@@ -0,0 +1,177 @@
+/* r3964 linediscipline for linux
+ *
+ * -----------------------------------------------------------
+ * Copyright by
+ * Philips Automation Projects
+ * Kassel (Germany)
+ * -----------------------------------------------------------
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License, incorporated herein by reference.
+ *
+ * Author:
+ * L. Haag
+ *
+ * $Log: r3964.h,v $
+ * Revision 1.4 2005/12/21 19:54:24 Kurt Huwig <kurt huwig de>
+ * Fixed HZ usage on 2.6 kernels
+ * Removed unnecessary include
+ *
+ * Revision 1.3 2001/03/18 13:02:24 dwmw2
+ * Fix timer usage, use spinlocks properly.
+ *
+ * Revision 1.2 2001/03/18 12:53:15 dwmw2
+ * Merge changes in 2.4.2
+ *
+ * Revision 1.1.1.1 1998/10/13 16:43:14 dwmw2
+ * This'll screw the version control
+ *
+ * Revision 1.6 1998/09/30 00:40:38 dwmw2
+ * Updated to use kernel's N_R3964 if available
+ *
+ * Revision 1.4 1998/04/02 20:29:44 lhaag
+ * select, blocking, ...
+ *
+ * Revision 1.3 1998/02/12 18:58:43 root
+ * fixed some memory leaks
+ * calculation of checksum characters
+ *
+ * Revision 1.2 1998/02/07 13:03:17 root
+ * ioctl read_telegram
+ *
+ * Revision 1.1 1998/02/06 19:19:43 root
+ * Initial revision
+ *
+ *
+ */
+#ifndef __LINUX_N_R3964_H__
+#define __LINUX_N_R3964_H__
+
+
+#include <linux/param.h>
+#include <uapi/linux/n_r3964.h>
+
+/*
+ * Common ascii handshake characters:
+ */
+
+#define STX 0x02
+#define ETX 0x03
+#define DLE 0x10
+#define NAK 0x15
+
+/*
+ * Timeouts (from milliseconds to jiffies)
+ */
+
+#define R3964_TO_QVZ ((550)*HZ/1000)
+#define R3964_TO_ZVZ ((220)*HZ/1000)
+#define R3964_TO_NO_BUF ((400)*HZ/1000)
+#define R3964_NO_TX_ROOM ((100)*HZ/1000)
+#define R3964_TO_RX_PANIC ((4000)*HZ/1000)
+#define R3964_MAX_RETRIES 5
+
+
+enum { R3964_IDLE,
+ R3964_TX_REQUEST, R3964_TRANSMITTING,
+ R3964_WAIT_ZVZ_BEFORE_TX_RETRY, R3964_WAIT_FOR_TX_ACK,
+ R3964_WAIT_FOR_RX_BUF,
+ R3964_RECEIVING, R3964_WAIT_FOR_BCC, R3964_WAIT_FOR_RX_REPEAT
+ };
+
+/*
+ * All open file-handles are 'clients' and are stored in a linked list:
+ */
+
+struct r3964_message;
+
+struct r3964_client_info {
+ spinlock_t lock;
+ struct pid *pid;
+ unsigned int sig_flags;
+
+ struct r3964_client_info *next;
+
+ struct r3964_message *first_msg;
+ struct r3964_message *last_msg;
+ struct r3964_block_header *next_block_to_read;
+ int msg_count;
+};
+
+
+
+struct r3964_block_header;
+
+/* internal version of client_message: */
+struct r3964_message {
+ int msg_id;
+ int arg;
+ int error_code;
+ struct r3964_block_header *block;
+ struct r3964_message *next;
+};
+
+/*
+ * Header of received block in rx_buf/tx_buf:
+ */
+
+struct r3964_block_header
+{
+ unsigned int length; /* length in chars without header */
+ unsigned char *data; /* usually data is located
+ immediately behind this struct */
+ unsigned int locks; /* only used in rx_buffer */
+
+ struct r3964_block_header *next;
+ struct r3964_client_info *owner; /* =NULL in rx_buffer */
+};
+
+/*
+ * If rx_buf hasn't enough space to store R3964_MTU chars,
+ * we will reject all incoming STX-requests by sending NAK.
+ */
+
+#define RX_BUF_SIZE 4000
+#define TX_BUF_SIZE 4000
+#define R3964_MAX_BLOCKS_IN_RX_QUEUE 100
+
+#define R3964_PARITY 0x0001
+#define R3964_FRAME 0x0002
+#define R3964_OVERRUN 0x0004
+#define R3964_UNKNOWN 0x0008
+#define R3964_BREAK 0x0010
+#define R3964_CHECKSUM 0x0020
+#define R3964_ERROR 0x003f
+#define R3964_BCC 0x4000
+#define R3964_DEBUG 0x8000
+
+
+struct r3964_info {
+ spinlock_t lock;
+ struct tty_struct *tty;
+ unsigned char priority;
+ unsigned char *rx_buf; /* ring buffer */
+ unsigned char *tx_buf;
+
+ wait_queue_head_t read_wait;
+ //struct wait_queue *read_wait;
+
+ struct r3964_block_header *rx_first;
+ struct r3964_block_header *rx_last;
+ struct r3964_block_header *tx_first;
+ struct r3964_block_header *tx_last;
+ unsigned int tx_position;
+ unsigned int rx_position;
+ unsigned char last_rx;
+ unsigned char bcc;
+ unsigned int blocks_in_rx_queue;
+
+
+ struct r3964_client_info *firstClient;
+ unsigned int state;
+ unsigned int flags;
+
+ struct timer_list tmr;
+ int nRetry;
+};
+
+#endif
diff --git a/include/linux/namei.h b/include/linux/namei.h
new file mode 100644
index 000000000..c8990779f
--- /dev/null
+++ b/include/linux/namei.h
@@ -0,0 +1,98 @@
+#ifndef _LINUX_NAMEI_H
+#define _LINUX_NAMEI_H
+
+#include <linux/dcache.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/path.h>
+
+struct vfsmount;
+struct nameidata;
+
+enum { MAX_NESTED_LINKS = 8 };
+
+/*
+ * Type of the last component on LOOKUP_PARENT
+ */
+enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
+
+/*
+ * The bitmask for a lookup event:
+ * - follow links at the end
+ * - require a directory
+ * - ending slashes ok even for nonexistent files
+ * - internal "there are more path components" flag
+ * - dentry cache is untrusted; force a real lookup
+ * - suppress terminal automount
+ */
+#define LOOKUP_FOLLOW 0x0001
+#define LOOKUP_DIRECTORY 0x0002
+#define LOOKUP_AUTOMOUNT 0x0004
+
+#define LOOKUP_PARENT 0x0010
+#define LOOKUP_REVAL 0x0020
+#define LOOKUP_RCU 0x0040
+
+/*
+ * Intent data
+ */
+#define LOOKUP_OPEN 0x0100
+#define LOOKUP_CREATE 0x0200
+#define LOOKUP_EXCL 0x0400
+#define LOOKUP_RENAME_TARGET 0x0800
+
+#define LOOKUP_JUMPED 0x1000
+#define LOOKUP_ROOT 0x2000
+#define LOOKUP_EMPTY 0x4000
+
+extern int user_path_at(int, const char __user *, unsigned, struct path *);
+extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
+
+#define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path)
+#define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path)
+#define user_path_dir(name, path) \
+ user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path)
+
+extern int kern_path(const char *, unsigned, struct path *);
+
+extern struct dentry *kern_path_create(int, const char *, struct path *, unsigned int);
+extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int);
+extern void done_path_create(struct path *, struct dentry *);
+extern struct dentry *kern_path_locked(const char *, struct path *);
+extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
+
+extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
+
+extern int follow_down_one(struct path *);
+extern int follow_down(struct path *);
+extern int follow_up(struct path *);
+
+extern struct dentry *lock_rename(struct dentry *, struct dentry *);
+extern void unlock_rename(struct dentry *, struct dentry *);
+
+extern void nd_jump_link(struct nameidata *nd, struct path *path);
+extern void nd_set_link(struct nameidata *nd, char *path);
+extern char *nd_get_link(struct nameidata *nd);
+
+static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
+{
+ ((char *) name)[min(len, maxlen)] = '\0';
+}
+
+/**
+ * retry_estale - determine whether the caller should retry an operation
+ * @error: the error that would currently be returned
+ * @flags: flags being used for next lookup attempt
+ *
+ * Check to see if the error code was -ESTALE, and then determine whether
+ * to retry the call based on whether "flags" already has LOOKUP_REVAL set.
+ *
+ * Returns true if the caller should try the operation again.
+ */
+static inline bool
+retry_estale(const long error, const unsigned int flags)
+{
+ return error == -ESTALE && !(flags & LOOKUP_REVAL);
+}
+
+#endif /* _LINUX_NAMEI_H */
diff --git a/include/linux/net.h b/include/linux/net.h
new file mode 100644
index 000000000..738ea48be
--- /dev/null
+++ b/include/linux/net.h
@@ -0,0 +1,299 @@
+/*
+ * NET An implementation of the SOCKET network access protocol.
+ * This is the master header file for the Linux NET layer,
+ * or, in plain English: the networking handling part of the
+ * kernel.
+ *
+ * Version: @(#)net.h 1.0.3 05/25/93
+ *
+ * Authors: Orest Zborowski, <obz@Kodak.COM>
+ * Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_NET_H
+#define _LINUX_NET_H
+
+#include <linux/stringify.h>
+#include <linux/random.h>
+#include <linux/wait.h>
+#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
+#include <linux/kmemcheck.h>
+#include <linux/rcupdate.h>
+#include <linux/jump_label.h>
+#include <uapi/linux/net.h>
+
+struct poll_table_struct;
+struct pipe_inode_info;
+struct inode;
+struct file;
+struct net;
+
+#define SOCK_ASYNC_NOSPACE 0
+#define SOCK_ASYNC_WAITDATA 1
+#define SOCK_NOSPACE 2
+#define SOCK_PASSCRED 3
+#define SOCK_PASSSEC 4
+#define SOCK_EXTERNALLY_ALLOCATED 5
+
+#ifndef ARCH_HAS_SOCKET_TYPES
+/**
+ * enum sock_type - Socket types
+ * @SOCK_STREAM: stream (connection) socket
+ * @SOCK_DGRAM: datagram (conn.less) socket
+ * @SOCK_RAW: raw socket
+ * @SOCK_RDM: reliably-delivered message
+ * @SOCK_SEQPACKET: sequential packet socket
+ * @SOCK_DCCP: Datagram Congestion Control Protocol socket
+ * @SOCK_PACKET: linux specific way of getting packets at the dev level.
+ * For writing rarp and other similar things on the user level.
+ *
+ * When adding some new socket type please
+ * grep ARCH_HAS_SOCKET_TYPE include/asm-* /socket.h, at least MIPS
+ * overrides this enum for binary compat reasons.
+ */
+enum sock_type {
+ SOCK_STREAM = 1,
+ SOCK_DGRAM = 2,
+ SOCK_RAW = 3,
+ SOCK_RDM = 4,
+ SOCK_SEQPACKET = 5,
+ SOCK_DCCP = 6,
+ SOCK_PACKET = 10,
+};
+
+#define SOCK_MAX (SOCK_PACKET + 1)
+/* Mask which covers at least up to SOCK_MASK-1. The
+ * remaining bits are used as flags. */
+#define SOCK_TYPE_MASK 0xf
+
+/* Flags for socket, socketpair, accept4 */
+#define SOCK_CLOEXEC O_CLOEXEC
+#ifndef SOCK_NONBLOCK
+#define SOCK_NONBLOCK O_NONBLOCK
+#endif
+
+#endif /* ARCH_HAS_SOCKET_TYPES */
+
+enum sock_shutdown_cmd {
+ SHUT_RD,
+ SHUT_WR,
+ SHUT_RDWR,
+};
+
+struct socket_wq {
+ /* Note: wait MUST be first field of socket_wq */
+ wait_queue_head_t wait;
+ struct fasync_struct *fasync_list;
+ struct rcu_head rcu;
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct socket - general BSD socket
+ * @state: socket state (%SS_CONNECTED, etc)
+ * @type: socket type (%SOCK_STREAM, etc)
+ * @flags: socket flags (%SOCK_ASYNC_NOSPACE, etc)
+ * @ops: protocol specific socket operations
+ * @file: File back pointer for gc
+ * @sk: internal networking protocol agnostic socket representation
+ * @wq: wait queue for several uses
+ */
+struct socket {
+ socket_state state;
+
+ kmemcheck_bitfield_begin(type);
+ short type;
+ kmemcheck_bitfield_end(type);
+
+ unsigned long flags;
+
+ struct socket_wq __rcu *wq;
+
+ struct file *file;
+ struct sock *sk;
+ const struct proto_ops *ops;
+};
+
+struct vm_area_struct;
+struct page;
+struct sockaddr;
+struct msghdr;
+struct module;
+
+struct proto_ops {
+ int family;
+ struct module *owner;
+ int (*release) (struct socket *sock);
+ int (*bind) (struct socket *sock,
+ struct sockaddr *myaddr,
+ int sockaddr_len);
+ int (*connect) (struct socket *sock,
+ struct sockaddr *vaddr,
+ int sockaddr_len, int flags);
+ int (*socketpair)(struct socket *sock1,
+ struct socket *sock2);
+ int (*accept) (struct socket *sock,
+ struct socket *newsock, int flags);
+ int (*getname) (struct socket *sock,
+ struct sockaddr *addr,
+ int *sockaddr_len, int peer);
+ unsigned int (*poll) (struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
+ int (*ioctl) (struct socket *sock, unsigned int cmd,
+ unsigned long arg);
+#ifdef CONFIG_COMPAT
+ int (*compat_ioctl) (struct socket *sock, unsigned int cmd,
+ unsigned long arg);
+#endif
+ int (*listen) (struct socket *sock, int len);
+ int (*shutdown) (struct socket *sock, int flags);
+ int (*setsockopt)(struct socket *sock, int level,
+ int optname, char __user *optval, unsigned int optlen);
+ int (*getsockopt)(struct socket *sock, int level,
+ int optname, char __user *optval, int __user *optlen);
+#ifdef CONFIG_COMPAT
+ int (*compat_setsockopt)(struct socket *sock, int level,
+ int optname, char __user *optval, unsigned int optlen);
+ int (*compat_getsockopt)(struct socket *sock, int level,
+ int optname, char __user *optval, int __user *optlen);
+#endif
+ int (*sendmsg) (struct socket *sock, struct msghdr *m,
+ size_t total_len);
+ /* Notes for implementing recvmsg:
+ * ===============================
+ * msg->msg_namelen should get updated by the recvmsg handlers
+ * iff msg_name != NULL. It is by default 0 to prevent
+ * returning uninitialized memory to user space. The recvfrom
+ * handlers can assume that msg.msg_name is either NULL or has
+ * a minimum size of sizeof(struct sockaddr_storage).
+ */
+ int (*recvmsg) (struct socket *sock, struct msghdr *m,
+ size_t total_len, int flags);
+ int (*mmap) (struct file *file, struct socket *sock,
+ struct vm_area_struct * vma);
+ ssize_t (*sendpage) (struct socket *sock, struct page *page,
+ int offset, size_t size, int flags);
+ ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len, unsigned int flags);
+ int (*set_peek_off)(struct sock *sk, int val);
+};
+
+#define DECLARE_SOCKADDR(type, dst, src) \
+ type dst = ({ __sockaddr_check_size(sizeof(*dst)); (type) src; })
+
+struct net_proto_family {
+ int family;
+ int (*create)(struct net *net, struct socket *sock,
+ int protocol, int kern);
+ struct module *owner;
+};
+
+struct iovec;
+struct kvec;
+
+enum {
+ SOCK_WAKE_IO,
+ SOCK_WAKE_WAITD,
+ SOCK_WAKE_SPACE,
+ SOCK_WAKE_URG,
+};
+
+int sock_wake_async(struct socket *sk, int how, int band);
+int sock_register(const struct net_proto_family *fam);
+void sock_unregister(int family);
+int __sock_create(struct net *net, int family, int type, int proto,
+ struct socket **res, int kern);
+int sock_create(int family, int type, int proto, struct socket **res);
+int sock_create_kern(int family, int type, int proto, struct socket **res);
+int sock_create_lite(int family, int type, int proto, struct socket **res);
+void sock_release(struct socket *sock);
+int sock_sendmsg(struct socket *sock, struct msghdr *msg);
+int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags);
+struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
+struct socket *sockfd_lookup(int fd, int *err);
+struct socket *sock_from_file(struct file *file, int *err);
+#define sockfd_put(sock) fput(sock->file)
+int net_ratelimit(void);
+
+#define net_ratelimited_function(function, ...) \
+do { \
+ if (net_ratelimit()) \
+ function(__VA_ARGS__); \
+} while (0)
+
+#define net_emerg_ratelimited(fmt, ...) \
+ net_ratelimited_function(pr_emerg, fmt, ##__VA_ARGS__)
+#define net_alert_ratelimited(fmt, ...) \
+ net_ratelimited_function(pr_alert, fmt, ##__VA_ARGS__)
+#define net_crit_ratelimited(fmt, ...) \
+ net_ratelimited_function(pr_crit, fmt, ##__VA_ARGS__)
+#define net_err_ratelimited(fmt, ...) \
+ net_ratelimited_function(pr_err, fmt, ##__VA_ARGS__)
+#define net_notice_ratelimited(fmt, ...) \
+ net_ratelimited_function(pr_notice, fmt, ##__VA_ARGS__)
+#define net_warn_ratelimited(fmt, ...) \
+ net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
+#define net_info_ratelimited(fmt, ...) \
+ net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
+#define net_dbg_ratelimited(fmt, ...) \
+ net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
+
+bool __net_get_random_once(void *buf, int nbytes, bool *done,
+ struct static_key *done_key);
+
+#define net_get_random_once(buf, nbytes) \
+ ({ \
+ bool ___ret = false; \
+ static bool ___done = false; \
+ static struct static_key ___once_key = \
+ STATIC_KEY_INIT_TRUE; \
+ if (static_key_true(&___once_key)) \
+ ___ret = __net_get_random_once(buf, \
+ nbytes, \
+ &___done, \
+ &___once_key); \
+ ___ret; \
+ })
+
+int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
+ size_t num, size_t len);
+int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
+ size_t num, size_t len, int flags);
+
+int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen);
+int kernel_listen(struct socket *sock, int backlog);
+int kernel_accept(struct socket *sock, struct socket **newsock, int flags);
+int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
+ int flags);
+int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
+ int *addrlen);
+int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
+ int *addrlen);
+int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval,
+ int *optlen);
+int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval,
+ unsigned int optlen);
+int kernel_sendpage(struct socket *sock, struct page *page, int offset,
+ size_t size, int flags);
+int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
+int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
+
+#define MODULE_ALIAS_NETPROTO(proto) \
+ MODULE_ALIAS("net-pf-" __stringify(proto))
+
+#define MODULE_ALIAS_NET_PF_PROTO(pf, proto) \
+ MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto))
+
+#define MODULE_ALIAS_NET_PF_PROTO_TYPE(pf, proto, type) \
+ MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
+ "-type-" __stringify(type))
+
+#define MODULE_ALIAS_NET_PF_PROTO_NAME(pf, proto, name) \
+ MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
+ name)
+#endif /* _LINUX_NET_H */
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
new file mode 100644
index 000000000..7d59dc6ab
--- /dev/null
+++ b/include/linux/netdev_features.h
@@ -0,0 +1,190 @@
+/*
+ * Network device features.
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_NETDEV_FEATURES_H
+#define _LINUX_NETDEV_FEATURES_H
+
+#include <linux/types.h>
+
+typedef u64 netdev_features_t;
+
+enum {
+ NETIF_F_SG_BIT, /* Scatter/gather IO. */
+ NETIF_F_IP_CSUM_BIT, /* Can checksum TCP/UDP over IPv4. */
+ __UNUSED_NETIF_F_1,
+ NETIF_F_HW_CSUM_BIT, /* Can checksum all the packets. */
+ NETIF_F_IPV6_CSUM_BIT, /* Can checksum TCP/UDP over IPV6 */
+ NETIF_F_HIGHDMA_BIT, /* Can DMA to high memory. */
+ NETIF_F_FRAGLIST_BIT, /* Scatter/gather IO. */
+ NETIF_F_HW_VLAN_CTAG_TX_BIT, /* Transmit VLAN CTAG HW acceleration */
+ NETIF_F_HW_VLAN_CTAG_RX_BIT, /* Receive VLAN CTAG HW acceleration */
+ NETIF_F_HW_VLAN_CTAG_FILTER_BIT,/* Receive filtering on VLAN CTAGs */
+ NETIF_F_VLAN_CHALLENGED_BIT, /* Device cannot handle VLAN packets */
+ NETIF_F_GSO_BIT, /* Enable software GSO. */
+ NETIF_F_LLTX_BIT, /* LockLess TX - deprecated. Please */
+ /* do not use LLTX in new drivers */
+ NETIF_F_NETNS_LOCAL_BIT, /* Does not change network namespaces */
+ NETIF_F_GRO_BIT, /* Generic receive offload */
+ NETIF_F_LRO_BIT, /* large receive offload */
+
+ /**/NETIF_F_GSO_SHIFT, /* keep the order of SKB_GSO_* bits */
+ NETIF_F_TSO_BIT /* ... TCPv4 segmentation */
+ = NETIF_F_GSO_SHIFT,
+ NETIF_F_UFO_BIT, /* ... UDPv4 fragmentation */
+ NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */
+ NETIF_F_TSO_ECN_BIT, /* ... TCP ECN support */
+ NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */
+ NETIF_F_FSO_BIT, /* ... FCoE segmentation */
+ NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */
+ NETIF_F_GSO_GRE_CSUM_BIT, /* ... GRE with csum with TSO */
+ NETIF_F_GSO_IPIP_BIT, /* ... IPIP tunnel with TSO */
+ NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */
+ NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */
+ NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */
+ NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
+ /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
+ NETIF_F_GSO_TUNNEL_REMCSUM_BIT,
+
+ NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
+ NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */
+ NETIF_F_FCOE_MTU_BIT, /* Supports max FCoE MTU, 2158 bytes*/
+ NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */
+ NETIF_F_RXHASH_BIT, /* Receive hashing offload */
+ NETIF_F_RXCSUM_BIT, /* Receive checksumming offload */
+ NETIF_F_NOCACHE_COPY_BIT, /* Use no-cache copyfromuser */
+ NETIF_F_LOOPBACK_BIT, /* Enable loopback */
+ NETIF_F_RXFCS_BIT, /* Append FCS to skb pkt data */
+ NETIF_F_RXALL_BIT, /* Receive errored frames too */
+ NETIF_F_HW_VLAN_STAG_TX_BIT, /* Transmit VLAN STAG HW acceleration */
+ NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */
+ NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
+ NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
+ NETIF_F_BUSY_POLL_BIT, /* Busy poll */
+ NETIF_F_HW_SWITCH_OFFLOAD_BIT, /* HW switch offload */
+
+ /*
+ * Add your fresh new feature above and remember to update
+ * netdev_features_strings[] in net/core/ethtool.c and maybe
+ * some feature mask #defines below. Please also describe it
+ * in Documentation/networking/netdev-features.txt.
+ */
+
+ /**/NETDEV_FEATURE_COUNT
+};
+
+/* copy'n'paste compression ;) */
+#define __NETIF_F_BIT(bit) ((netdev_features_t)1 << (bit))
+#define __NETIF_F(name) __NETIF_F_BIT(NETIF_F_##name##_BIT)
+
+#define NETIF_F_FCOE_CRC __NETIF_F(FCOE_CRC)
+#define NETIF_F_FCOE_MTU __NETIF_F(FCOE_MTU)
+#define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST)
+#define NETIF_F_FSO __NETIF_F(FSO)
+#define NETIF_F_GRO __NETIF_F(GRO)
+#define NETIF_F_GSO __NETIF_F(GSO)
+#define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST)
+#define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA)
+#define NETIF_F_HW_CSUM __NETIF_F(HW_CSUM)
+#define NETIF_F_HW_VLAN_CTAG_FILTER __NETIF_F(HW_VLAN_CTAG_FILTER)
+#define NETIF_F_HW_VLAN_CTAG_RX __NETIF_F(HW_VLAN_CTAG_RX)
+#define NETIF_F_HW_VLAN_CTAG_TX __NETIF_F(HW_VLAN_CTAG_TX)
+#define NETIF_F_IP_CSUM __NETIF_F(IP_CSUM)
+#define NETIF_F_IPV6_CSUM __NETIF_F(IPV6_CSUM)
+#define NETIF_F_LLTX __NETIF_F(LLTX)
+#define NETIF_F_LOOPBACK __NETIF_F(LOOPBACK)
+#define NETIF_F_LRO __NETIF_F(LRO)
+#define NETIF_F_NETNS_LOCAL __NETIF_F(NETNS_LOCAL)
+#define NETIF_F_NOCACHE_COPY __NETIF_F(NOCACHE_COPY)
+#define NETIF_F_NTUPLE __NETIF_F(NTUPLE)
+#define NETIF_F_RXCSUM __NETIF_F(RXCSUM)
+#define NETIF_F_RXHASH __NETIF_F(RXHASH)
+#define NETIF_F_SCTP_CSUM __NETIF_F(SCTP_CSUM)
+#define NETIF_F_SG __NETIF_F(SG)
+#define NETIF_F_TSO6 __NETIF_F(TSO6)
+#define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN)
+#define NETIF_F_TSO __NETIF_F(TSO)
+#define NETIF_F_UFO __NETIF_F(UFO)
+#define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED)
+#define NETIF_F_RXFCS __NETIF_F(RXFCS)
+#define NETIF_F_RXALL __NETIF_F(RXALL)
+#define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE)
+#define NETIF_F_GSO_GRE_CSUM __NETIF_F(GSO_GRE_CSUM)
+#define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP)
+#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT)
+#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
+#define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM)
+#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
+#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
+#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
+#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
+#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
+#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
+#define NETIF_F_HW_SWITCH_OFFLOAD __NETIF_F(HW_SWITCH_OFFLOAD)
+
+/* Features valid for ethtool to change */
+/* = all defined minus driver/device-class-related */
+#define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \
+ NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
+
+/* remember that ((t)1 << t_BITS) is undefined in C99 */
+#define NETIF_F_ETHTOOL_BITS ((__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) | \
+ (__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) - 1)) & \
+ ~NETIF_F_NEVER_CHANGE)
+
+/* Segmentation offload feature mask */
+#define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \
+ __NETIF_F_BIT(NETIF_F_GSO_SHIFT))
+
+/* List of features with software fallbacks. */
+#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
+ NETIF_F_TSO6 | NETIF_F_UFO)
+
+#define NETIF_F_GEN_CSUM NETIF_F_HW_CSUM
+#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
+#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
+#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
+
+#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+
+#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
+ NETIF_F_FSO)
+
+/*
+ * If one device supports one of these features, then enable them
+ * for all in netdev_increment_features.
+ */
+#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
+ NETIF_F_SG | NETIF_F_HIGHDMA | \
+ NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED | \
+ NETIF_F_HW_SWITCH_OFFLOAD)
+
+/*
+ * If one device doesn't support one of these features, then disable it
+ * for all in netdev_increment_features.
+ */
+#define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO)
+
+/* changeable features with no special hardware requirements */
+#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
+
+#define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_STAG_FILTER | \
+ NETIF_F_HW_VLAN_STAG_RX | \
+ NETIF_F_HW_VLAN_STAG_TX)
+
+#define NETIF_F_GSO_ENCAP_ALL (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPIP | \
+ NETIF_F_GSO_SIT | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+#endif /* _LINUX_NETDEV_FEATURES_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
new file mode 100644
index 000000000..05b9a694e
--- /dev/null
+++ b/include/linux/netdevice.h
@@ -0,0 +1,3961 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Interfaces handler.
+ *
+ * Version: @(#)dev.h 1.0.10 08/12/93
+ *
+ * Authors: Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Corey Minyard <wf-rch!minyard@relay.EU.net>
+ * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
+ * Alan Cox, <alan@lxorguk.ukuu.org.uk>
+ * Bjorn Ekwall. <bj0rn@blox.se>
+ * Pekka Riikonen <priikone@poseidon.pspt.fi>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Moved to /usr/include/linux for NET3
+ */
+#ifndef _LINUX_NETDEVICE_H
+#define _LINUX_NETDEVICE_H
+
+#include <linux/timer.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/prefetch.h>
+#include <asm/cache.h>
+#include <asm/byteorder.h>
+
+#include <linux/percpu.h>
+#include <linux/rculist.h>
+#include <linux/dmaengine.h>
+#include <linux/workqueue.h>
+#include <linux/dynamic_queue_limits.h>
+
+#include <linux/ethtool.h>
+#include <net/net_namespace.h>
+#include <net/dsa.h>
+#ifdef CONFIG_DCB
+#include <net/dcbnl.h>
+#endif
+#include <net/netprio_cgroup.h>
+
+#include <linux/netdev_features.h>
+#include <linux/neighbour.h>
+#include <uapi/linux/netdevice.h>
+#include <uapi/linux/if_bonding.h>
+
+struct netpoll_info;
+struct device;
+struct phy_device;
+/* 802.11 specific */
+struct wireless_dev;
+/* 802.15.4 specific */
+struct wpan_dev;
+struct mpls_dev;
+
+void netdev_set_default_ethtool_ops(struct net_device *dev,
+ const struct ethtool_ops *ops);
+
+/* Backlog congestion levels */
+#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
+#define NET_RX_DROP 1 /* packet dropped */
+
+/*
+ * Transmit return codes: transmit return codes originate from three different
+ * namespaces:
+ *
+ * - qdisc return codes
+ * - driver transmit return codes
+ * - errno values
+ *
+ * Drivers are allowed to return any one of those in their hard_start_xmit()
+ * function. Real network devices commonly used with qdiscs should only return
+ * the driver transmit return codes though - when qdiscs are used, the actual
+ * transmission happens asynchronously, so the value is not propagated to
+ * higher layers. Virtual network devices transmit synchronously, in this case
+ * the driver transmit return codes are consumed by dev_queue_xmit(), all
+ * others are propagated to higher layers.
+ */
+
+/* qdisc ->enqueue() return codes. */
+#define NET_XMIT_SUCCESS 0x00
+#define NET_XMIT_DROP 0x01 /* skb dropped */
+#define NET_XMIT_CN 0x02 /* congestion notification */
+#define NET_XMIT_POLICED 0x03 /* skb is shot by police */
+#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
+
+/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
+ * indicates that the device will soon be dropping packets, or already drops
+ * some packets of the same priority; prompting us to send less aggressively. */
+#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
+#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
+
+/* Driver transmit return codes */
+#define NETDEV_TX_MASK 0xf0
+
+enum netdev_tx {
+ __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
+ NETDEV_TX_OK = 0x00, /* driver took care of packet */
+ NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
+ NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
+};
+typedef enum netdev_tx netdev_tx_t;
+
+/*
+ * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
+ * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
+ */
+static inline bool dev_xmit_complete(int rc)
+{
+ /*
+ * Positive cases with an skb consumed by a driver:
+ * - successful transmission (rc == NETDEV_TX_OK)
+ * - error while transmitting (rc < 0)
+ * - error while queueing to a different device (rc & NET_XMIT_MASK)
+ */
+ if (likely(rc < NET_XMIT_MASK))
+ return true;
+
+ return false;
+}
+
+/*
+ * Compute the worst case header length according to the protocols
+ * used.
+ */
+
+#if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
+# if defined(CONFIG_MAC80211_MESH)
+# define LL_MAX_HEADER 128
+# else
+# define LL_MAX_HEADER 96
+# endif
+#else
+# define LL_MAX_HEADER 32
+#endif
+
+#if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
+ !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
+#define MAX_HEADER LL_MAX_HEADER
+#else
+#define MAX_HEADER (LL_MAX_HEADER + 48)
+#endif
+
+/*
+ * Old network device statistics. Fields are native words
+ * (unsigned long) so they can be read and written atomically.
+ */
+
+struct net_device_stats {
+ unsigned long rx_packets;
+ unsigned long tx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_bytes;
+ unsigned long rx_errors;
+ unsigned long tx_errors;
+ unsigned long rx_dropped;
+ unsigned long tx_dropped;
+ unsigned long multicast;
+ unsigned long collisions;
+ unsigned long rx_length_errors;
+ unsigned long rx_over_errors;
+ unsigned long rx_crc_errors;
+ unsigned long rx_frame_errors;
+ unsigned long rx_fifo_errors;
+ unsigned long rx_missed_errors;
+ unsigned long tx_aborted_errors;
+ unsigned long tx_carrier_errors;
+ unsigned long tx_fifo_errors;
+ unsigned long tx_heartbeat_errors;
+ unsigned long tx_window_errors;
+ unsigned long rx_compressed;
+ unsigned long tx_compressed;
+};
+
+
+#include <linux/cache.h>
+#include <linux/skbuff.h>
+
+#ifdef CONFIG_RPS
+#include <linux/static_key.h>
+extern struct static_key rps_needed;
+#endif
+
+struct neighbour;
+struct neigh_parms;
+struct sk_buff;
+
+struct netdev_hw_addr {
+ struct list_head list;
+ unsigned char addr[MAX_ADDR_LEN];
+ unsigned char type;
+#define NETDEV_HW_ADDR_T_LAN 1
+#define NETDEV_HW_ADDR_T_SAN 2
+#define NETDEV_HW_ADDR_T_SLAVE 3
+#define NETDEV_HW_ADDR_T_UNICAST 4
+#define NETDEV_HW_ADDR_T_MULTICAST 5
+ bool global_use;
+ int sync_cnt;
+ int refcount;
+ int synced;
+ struct rcu_head rcu_head;
+};
+
+struct netdev_hw_addr_list {
+ struct list_head list;
+ int count;
+};
+
+#define netdev_hw_addr_list_count(l) ((l)->count)
+#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
+#define netdev_hw_addr_list_for_each(ha, l) \
+ list_for_each_entry(ha, &(l)->list, list)
+
+#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
+#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
+#define netdev_for_each_uc_addr(ha, dev) \
+ netdev_hw_addr_list_for_each(ha, &(dev)->uc)
+
+#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
+#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
+#define netdev_for_each_mc_addr(ha, dev) \
+ netdev_hw_addr_list_for_each(ha, &(dev)->mc)
+
+struct hh_cache {
+ u16 hh_len;
+ u16 __pad;
+ seqlock_t hh_lock;
+
+ /* cached hardware header; allow for machine alignment needs. */
+#define HH_DATA_MOD 16
+#define HH_DATA_OFF(__len) \
+ (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
+#define HH_DATA_ALIGN(__len) \
+ (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
+ unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
+};
+
+/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
+ * Alternative is:
+ * dev->hard_header_len ? (dev->hard_header_len +
+ * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
+ *
+ * We could use other alignment values, but we must maintain the
+ * relationship HH alignment <= LL alignment.
+ */
+#define LL_RESERVED_SPACE(dev) \
+ ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
+ ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+
+struct header_ops {
+ int (*create) (struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned int len);
+ int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
+ int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
+ void (*cache_update)(struct hh_cache *hh,
+ const struct net_device *dev,
+ const unsigned char *haddr);
+};
+
+/* These flag bits are private to the generic network queueing
+ * layer, they may not be explicitly referenced by any other
+ * code.
+ */
+
+enum netdev_state_t {
+ __LINK_STATE_START,
+ __LINK_STATE_PRESENT,
+ __LINK_STATE_NOCARRIER,
+ __LINK_STATE_LINKWATCH_PENDING,
+ __LINK_STATE_DORMANT,
+};
+
+
+/*
+ * This structure holds at boot time configured netdevice settings. They
+ * are then used in the device probing.
+ */
+struct netdev_boot_setup {
+ char name[IFNAMSIZ];
+ struct ifmap map;
+};
+#define NETDEV_BOOT_SETUP_MAX 8
+
+int __init netdev_boot_setup(char *str);
+
+/*
+ * Structure for NAPI scheduling similar to tasklet but with weighting
+ */
+struct napi_struct {
+ /* The poll_list must only be managed by the entity which
+ * changes the state of the NAPI_STATE_SCHED bit. This means
+ * whoever atomically sets that bit can add this napi_struct
+ * to the per-cpu poll_list, and whoever clears that bit
+ * can remove from the list right before clearing the bit.
+ */
+ struct list_head poll_list;
+
+ unsigned long state;
+ int weight;
+ unsigned int gro_count;
+ int (*poll)(struct napi_struct *, int);
+#ifdef CONFIG_NETPOLL
+ spinlock_t poll_lock;
+ int poll_owner;
+#endif
+ struct net_device *dev;
+ struct sk_buff *gro_list;
+ struct sk_buff *skb;
+ struct hrtimer timer;
+ struct list_head dev_list;
+ struct hlist_node napi_hash_node;
+ unsigned int napi_id;
+};
+
+enum {
+ NAPI_STATE_SCHED, /* Poll is scheduled */
+ NAPI_STATE_DISABLE, /* Disable pending */
+ NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
+ NAPI_STATE_HASHED, /* In NAPI hash */
+};
+
+enum gro_result {
+ GRO_MERGED,
+ GRO_MERGED_FREE,
+ GRO_HELD,
+ GRO_NORMAL,
+ GRO_DROP,
+};
+typedef enum gro_result gro_result_t;
+
+/*
+ * enum rx_handler_result - Possible return values for rx_handlers.
+ * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
+ * further.
+ * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
+ * case skb->dev was changed by rx_handler.
+ * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
+ * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
+ *
+ * rx_handlers are functions called from inside __netif_receive_skb(), to do
+ * special processing of the skb, prior to delivery to protocol handlers.
+ *
+ * Currently, a net_device can only have a single rx_handler registered. Trying
+ * to register a second rx_handler will return -EBUSY.
+ *
+ * To register a rx_handler on a net_device, use netdev_rx_handler_register().
+ * To unregister a rx_handler on a net_device, use
+ * netdev_rx_handler_unregister().
+ *
+ * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
+ * do with the skb.
+ *
+ * If the rx_handler consumed to skb in some way, it should return
+ * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
+ * the skb to be delivered in some other ways.
+ *
+ * If the rx_handler changed skb->dev, to divert the skb to another
+ * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
+ * new device will be called if it exists.
+ *
+ * If the rx_handler consider the skb should be ignored, it should return
+ * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
+ * are registered on exact device (ptype->dev == skb->dev).
+ *
+ * If the rx_handler didn't changed skb->dev, but want the skb to be normally
+ * delivered, it should return RX_HANDLER_PASS.
+ *
+ * A device without a registered rx_handler will behave as if rx_handler
+ * returned RX_HANDLER_PASS.
+ */
+
+enum rx_handler_result {
+ RX_HANDLER_CONSUMED,
+ RX_HANDLER_ANOTHER,
+ RX_HANDLER_EXACT,
+ RX_HANDLER_PASS,
+};
+typedef enum rx_handler_result rx_handler_result_t;
+typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
+
+void __napi_schedule(struct napi_struct *n);
+void __napi_schedule_irqoff(struct napi_struct *n);
+
+static inline bool napi_disable_pending(struct napi_struct *n)
+{
+ return test_bit(NAPI_STATE_DISABLE, &n->state);
+}
+
+/**
+ * napi_schedule_prep - check if napi can be scheduled
+ * @n: napi context
+ *
+ * Test if NAPI routine is already running, and if not mark
+ * it as running. This is used as a condition variable
+ * insure only one NAPI poll instance runs. We also make
+ * sure there is no pending NAPI disable.
+ */
+static inline bool napi_schedule_prep(struct napi_struct *n)
+{
+ return !napi_disable_pending(n) &&
+ !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
+}
+
+/**
+ * napi_schedule - schedule NAPI poll
+ * @n: napi context
+ *
+ * Schedule NAPI poll routine to be called if it is not already
+ * running.
+ */
+static inline void napi_schedule(struct napi_struct *n)
+{
+ if (napi_schedule_prep(n))
+ __napi_schedule(n);
+}
+
+/**
+ * napi_schedule_irqoff - schedule NAPI poll
+ * @n: napi context
+ *
+ * Variant of napi_schedule(), assuming hard irqs are masked.
+ */
+static inline void napi_schedule_irqoff(struct napi_struct *n)
+{
+ if (napi_schedule_prep(n))
+ __napi_schedule_irqoff(n);
+}
+
+/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
+static inline bool napi_reschedule(struct napi_struct *napi)
+{
+ if (napi_schedule_prep(napi)) {
+ __napi_schedule(napi);
+ return true;
+ }
+ return false;
+}
+
+void __napi_complete(struct napi_struct *n);
+void napi_complete_done(struct napi_struct *n, int work_done);
+/**
+ * napi_complete - NAPI processing complete
+ * @n: napi context
+ *
+ * Mark NAPI processing as complete.
+ * Consider using napi_complete_done() instead.
+ */
+static inline void napi_complete(struct napi_struct *n)
+{
+ return napi_complete_done(n, 0);
+}
+
+/**
+ * napi_by_id - lookup a NAPI by napi_id
+ * @napi_id: hashed napi_id
+ *
+ * lookup @napi_id in napi_hash table
+ * must be called under rcu_read_lock()
+ */
+struct napi_struct *napi_by_id(unsigned int napi_id);
+
+/**
+ * napi_hash_add - add a NAPI to global hashtable
+ * @napi: napi context
+ *
+ * generate a new napi_id and store a @napi under it in napi_hash
+ */
+void napi_hash_add(struct napi_struct *napi);
+
+/**
+ * napi_hash_del - remove a NAPI from global table
+ * @napi: napi context
+ *
+ * Warning: caller must observe rcu grace period
+ * before freeing memory containing @napi
+ */
+void napi_hash_del(struct napi_struct *napi);
+
+/**
+ * napi_disable - prevent NAPI from scheduling
+ * @n: napi context
+ *
+ * Stop NAPI from being scheduled on this context.
+ * Waits till any outstanding processing completes.
+ */
+void napi_disable(struct napi_struct *n);
+
+/**
+ * napi_enable - enable NAPI scheduling
+ * @n: napi context
+ *
+ * Resume NAPI from being scheduled on this context.
+ * Must be paired with napi_disable.
+ */
+static inline void napi_enable(struct napi_struct *n)
+{
+ BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
+ smp_mb__before_atomic();
+ clear_bit(NAPI_STATE_SCHED, &n->state);
+}
+
+#ifdef CONFIG_SMP
+/**
+ * napi_synchronize - wait until NAPI is not running
+ * @n: napi context
+ *
+ * Wait until NAPI is done being scheduled on this context.
+ * Waits till any outstanding processing completes but
+ * does not disable future activations.
+ */
+static inline void napi_synchronize(const struct napi_struct *n)
+{
+ while (test_bit(NAPI_STATE_SCHED, &n->state))
+ msleep(1);
+}
+#else
+# define napi_synchronize(n) barrier()
+#endif
+
+enum netdev_queue_state_t {
+ __QUEUE_STATE_DRV_XOFF,
+ __QUEUE_STATE_STACK_XOFF,
+ __QUEUE_STATE_FROZEN,
+};
+
+#define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF)
+#define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF)
+#define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN)
+
+#define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF)
+#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
+ QUEUE_STATE_FROZEN)
+#define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \
+ QUEUE_STATE_FROZEN)
+
+/*
+ * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
+ * netif_tx_* functions below are used to manipulate this flag. The
+ * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
+ * queue independently. The netif_xmit_*stopped functions below are called
+ * to check if the queue has been stopped by the driver or stack (either
+ * of the XOFF bits are set in the state). Drivers should not need to call
+ * netif_xmit*stopped functions, they should only be using netif_tx_*.
+ */
+
+struct netdev_queue {
+/*
+ * read mostly part
+ */
+ struct net_device *dev;
+ struct Qdisc __rcu *qdisc;
+ struct Qdisc *qdisc_sleeping;
+#ifdef CONFIG_SYSFS
+ struct kobject kobj;
+#endif
+#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
+ int numa_node;
+#endif
+/*
+ * write mostly part
+ */
+ spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
+ int xmit_lock_owner;
+ /*
+ * please use this field instead of dev->trans_start
+ */
+ unsigned long trans_start;
+
+ /*
+ * Number of TX timeouts for this queue
+ * (/sys/class/net/DEV/Q/trans_timeout)
+ */
+ unsigned long trans_timeout;
+
+ unsigned long state;
+
+#ifdef CONFIG_BQL
+ struct dql dql;
+#endif
+ unsigned long tx_maxrate;
+} ____cacheline_aligned_in_smp;
+
+static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
+{
+#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
+ return q->numa_node;
+#else
+ return NUMA_NO_NODE;
+#endif
+}
+
+static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
+{
+#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
+ q->numa_node = node;
+#endif
+}
+
+#ifdef CONFIG_RPS
+/*
+ * This structure holds an RPS map which can be of variable length. The
+ * map is an array of CPUs.
+ */
+struct rps_map {
+ unsigned int len;
+ struct rcu_head rcu;
+ u16 cpus[0];
+};
+#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
+
+/*
+ * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
+ * tail pointer for that CPU's input queue at the time of last enqueue, and
+ * a hardware filter index.
+ */
+struct rps_dev_flow {
+ u16 cpu;
+ u16 filter;
+ unsigned int last_qtail;
+};
+#define RPS_NO_FILTER 0xffff
+
+/*
+ * The rps_dev_flow_table structure contains a table of flow mappings.
+ */
+struct rps_dev_flow_table {
+ unsigned int mask;
+ struct rcu_head rcu;
+ struct rps_dev_flow flows[0];
+};
+#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
+ ((_num) * sizeof(struct rps_dev_flow)))
+
+/*
+ * The rps_sock_flow_table contains mappings of flows to the last CPU
+ * on which they were processed by the application (set in recvmsg).
+ * Each entry is a 32bit value. Upper part is the high order bits
+ * of flow hash, lower part is cpu number.
+ * rps_cpu_mask is used to partition the space, depending on number of
+ * possible cpus : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
+ * For example, if 64 cpus are possible, rps_cpu_mask = 0x3f,
+ * meaning we use 32-6=26 bits for the hash.
+ */
+struct rps_sock_flow_table {
+ u32 mask;
+
+ u32 ents[0] ____cacheline_aligned_in_smp;
+};
+#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num]))
+
+#define RPS_NO_CPU 0xffff
+
+extern u32 rps_cpu_mask;
+extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
+
+static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
+ u32 hash)
+{
+ if (table && hash) {
+ unsigned int index = hash & table->mask;
+ u32 val = hash & ~rps_cpu_mask;
+
+ /* We only give a hint, preemption can change cpu under us */
+ val |= raw_smp_processor_id();
+
+ if (table->ents[index] != val)
+ table->ents[index] = val;
+ }
+}
+
+#ifdef CONFIG_RFS_ACCEL
+bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
+ u16 filter_id);
+#endif
+#endif /* CONFIG_RPS */
+
+/* This structure contains an instance of an RX queue. */
+struct netdev_rx_queue {
+#ifdef CONFIG_RPS
+ struct rps_map __rcu *rps_map;
+ struct rps_dev_flow_table __rcu *rps_flow_table;
+#endif
+ struct kobject kobj;
+ struct net_device *dev;
+} ____cacheline_aligned_in_smp;
+
+/*
+ * RX queue sysfs structures and functions.
+ */
+struct rx_queue_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attr, char *buf);
+ ssize_t (*store)(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attr, const char *buf, size_t len);
+};
+
+#ifdef CONFIG_XPS
+/*
+ * This structure holds an XPS map which can be of variable length. The
+ * map is an array of queues.
+ */
+struct xps_map {
+ unsigned int len;
+ unsigned int alloc_len;
+ struct rcu_head rcu;
+ u16 queues[0];
+};
+#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
+#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
+ / sizeof(u16))
+
+/*
+ * This structure holds all XPS maps for device. Maps are indexed by CPU.
+ */
+struct xps_dev_maps {
+ struct rcu_head rcu;
+ struct xps_map __rcu *cpu_map[0];
+};
+#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
+ (nr_cpu_ids * sizeof(struct xps_map *)))
+#endif /* CONFIG_XPS */
+
+#define TC_MAX_QUEUE 16
+#define TC_BITMASK 15
+/* HW offloaded queuing disciplines txq count and offset maps */
+struct netdev_tc_txq {
+ u16 count;
+ u16 offset;
+};
+
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+/*
+ * This structure is to hold information about the device
+ * configured to run FCoE protocol stack.
+ */
+struct netdev_fcoe_hbainfo {
+ char manufacturer[64];
+ char serial_number[64];
+ char hardware_version[64];
+ char driver_version[64];
+ char optionrom_version[64];
+ char firmware_version[64];
+ char model[256];
+ char model_description[256];
+};
+#endif
+
+#define MAX_PHYS_ITEM_ID_LEN 32
+
+/* This structure holds a unique identifier to identify some
+ * physical item (port for example) used by a netdevice.
+ */
+struct netdev_phys_item_id {
+ unsigned char id[MAX_PHYS_ITEM_ID_LEN];
+ unsigned char id_len;
+};
+
+typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
+ struct sk_buff *skb);
+
+/*
+ * This structure defines the management hooks for network devices.
+ * The following hooks can be defined; unless noted otherwise, they are
+ * optional and can be filled with a null pointer.
+ *
+ * int (*ndo_init)(struct net_device *dev);
+ * This function is called once when network device is registered.
+ * The network device can use this to any late stage initializaton
+ * or semantic validattion. It can fail with an error code which will
+ * be propogated back to register_netdev
+ *
+ * void (*ndo_uninit)(struct net_device *dev);
+ * This function is called when device is unregistered or when registration
+ * fails. It is not called if init fails.
+ *
+ * int (*ndo_open)(struct net_device *dev);
+ * This function is called when network device transistions to the up
+ * state.
+ *
+ * int (*ndo_stop)(struct net_device *dev);
+ * This function is called when network device transistions to the down
+ * state.
+ *
+ * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
+ * struct net_device *dev);
+ * Called when a packet needs to be transmitted.
+ * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop
+ * the queue before that can happen; it's for obsolete devices and weird
+ * corner cases, but the stack really does a non-trivial amount
+ * of useless work if you return NETDEV_TX_BUSY.
+ * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
+ * Required can not be NULL.
+ *
+ * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
+ * void *accel_priv, select_queue_fallback_t fallback);
+ * Called to decide which queue to when device supports multiple
+ * transmit queues.
+ *
+ * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
+ * This function is called to allow device receiver to make
+ * changes to configuration when multicast or promiscious is enabled.
+ *
+ * void (*ndo_set_rx_mode)(struct net_device *dev);
+ * This function is called device changes address list filtering.
+ * If driver handles unicast address filtering, it should set
+ * IFF_UNICAST_FLT to its priv_flags.
+ *
+ * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
+ * This function is called when the Media Access Control address
+ * needs to be changed. If this interface is not defined, the
+ * mac address can not be changed.
+ *
+ * int (*ndo_validate_addr)(struct net_device *dev);
+ * Test if Media Access Control address is valid for the device.
+ *
+ * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
+ * Called when a user request an ioctl which can't be handled by
+ * the generic interface code. If not defined ioctl's return
+ * not supported error code.
+ *
+ * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
+ * Used to set network devices bus interface parameters. This interface
+ * is retained for legacy reason, new devices should use the bus
+ * interface (PCI) for low level management.
+ *
+ * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
+ * Called when a user wants to change the Maximum Transfer Unit
+ * of a device. If not defined, any request to change MTU will
+ * will return an error.
+ *
+ * void (*ndo_tx_timeout)(struct net_device *dev);
+ * Callback uses when the transmitter has not made any progress
+ * for dev->watchdog ticks.
+ *
+ * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
+ * struct rtnl_link_stats64 *storage);
+ * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
+ * Called when a user wants to get the network device usage
+ * statistics. Drivers must do one of the following:
+ * 1. Define @ndo_get_stats64 to fill in a zero-initialised
+ * rtnl_link_stats64 structure passed by the caller.
+ * 2. Define @ndo_get_stats to update a net_device_stats structure
+ * (which should normally be dev->stats) and return a pointer to
+ * it. The structure may be changed asynchronously only if each
+ * field is written atomically.
+ * 3. Update dev->stats asynchronously and atomically, and define
+ * neither operation.
+ *
+ * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
+ * If device support VLAN filtering this function is called when a
+ * VLAN id is registered.
+ *
+ * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
+ * If device support VLAN filtering this function is called when a
+ * VLAN id is unregistered.
+ *
+ * void (*ndo_poll_controller)(struct net_device *dev);
+ *
+ * SR-IOV management functions.
+ * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
+ * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
+ * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
+ * int max_tx_rate);
+ * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
+ * int (*ndo_get_vf_config)(struct net_device *dev,
+ * int vf, struct ifla_vf_info *ivf);
+ * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
+ * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
+ * struct nlattr *port[]);
+ *
+ * Enable or disable the VF ability to query its RSS Redirection Table and
+ * Hash Key. This is needed since on some devices VF share this information
+ * with PF and querying it may adduce a theoretical security risk.
+ * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
+ * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
+ * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
+ * Called to setup 'tc' number of traffic classes in the net device. This
+ * is always called from the stack with the rtnl lock held and netif tx
+ * queues stopped. This allows the netdevice to perform queue management
+ * safely.
+ *
+ * Fiber Channel over Ethernet (FCoE) offload functions.
+ * int (*ndo_fcoe_enable)(struct net_device *dev);
+ * Called when the FCoE protocol stack wants to start using LLD for FCoE
+ * so the underlying device can perform whatever needed configuration or
+ * initialization to support acceleration of FCoE traffic.
+ *
+ * int (*ndo_fcoe_disable)(struct net_device *dev);
+ * Called when the FCoE protocol stack wants to stop using LLD for FCoE
+ * so the underlying device can perform whatever needed clean-ups to
+ * stop supporting acceleration of FCoE traffic.
+ *
+ * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
+ * struct scatterlist *sgl, unsigned int sgc);
+ * Called when the FCoE Initiator wants to initialize an I/O that
+ * is a possible candidate for Direct Data Placement (DDP). The LLD can
+ * perform necessary setup and returns 1 to indicate the device is set up
+ * successfully to perform DDP on this I/O, otherwise this returns 0.
+ *
+ * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
+ * Called when the FCoE Initiator/Target is done with the DDPed I/O as
+ * indicated by the FC exchange id 'xid', so the underlying device can
+ * clean up and reuse resources for later DDP requests.
+ *
+ * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
+ * struct scatterlist *sgl, unsigned int sgc);
+ * Called when the FCoE Target wants to initialize an I/O that
+ * is a possible candidate for Direct Data Placement (DDP). The LLD can
+ * perform necessary setup and returns 1 to indicate the device is set up
+ * successfully to perform DDP on this I/O, otherwise this returns 0.
+ *
+ * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
+ * struct netdev_fcoe_hbainfo *hbainfo);
+ * Called when the FCoE Protocol stack wants information on the underlying
+ * device. This information is utilized by the FCoE protocol stack to
+ * register attributes with Fiber Channel management service as per the
+ * FC-GS Fabric Device Management Information(FDMI) specification.
+ *
+ * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
+ * Called when the underlying device wants to override default World Wide
+ * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
+ * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
+ * protocol stack to use.
+ *
+ * RFS acceleration.
+ * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
+ * u16 rxq_index, u32 flow_id);
+ * Set hardware filter for RFS. rxq_index is the target queue index;
+ * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
+ * Return the filter ID on success, or a negative error code.
+ *
+ * Slave management functions (for bridge, bonding, etc).
+ * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
+ * Called to make another netdev an underling.
+ *
+ * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
+ * Called to release previously enslaved netdev.
+ *
+ * Feature/offload setting functions.
+ * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
+ * netdev_features_t features);
+ * Adjusts the requested feature flags according to device-specific
+ * constraints, and returns the resulting flags. Must not modify
+ * the device state.
+ *
+ * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
+ * Called to update device configuration to new features. Passed
+ * feature set might be less than what was returned by ndo_fix_features()).
+ * Must return >0 or -errno if it changed dev->features itself.
+ *
+ * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
+ * struct net_device *dev,
+ * const unsigned char *addr, u16 vid, u16 flags)
+ * Adds an FDB entry to dev for addr.
+ * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
+ * struct net_device *dev,
+ * const unsigned char *addr, u16 vid)
+ * Deletes the FDB entry from dev coresponding to addr.
+ * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
+ * struct net_device *dev, struct net_device *filter_dev,
+ * int idx)
+ * Used to add FDB entries to dump requests. Implementers should add
+ * entries to skb and update idx with the number of entries.
+ *
+ * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
+ * u16 flags)
+ * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
+ * struct net_device *dev, u32 filter_mask,
+ * int nlflags)
+ * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
+ * u16 flags);
+ *
+ * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
+ * Called to change device carrier. Soft-devices (like dummy, team, etc)
+ * which do not represent real hardware may define this to allow their
+ * userspace components to manage their virtual carrier state. Devices
+ * that determine carrier state from physical hardware properties (eg
+ * network cables) or protocol-dependent mechanisms (eg
+ * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
+ *
+ * int (*ndo_get_phys_port_id)(struct net_device *dev,
+ * struct netdev_phys_item_id *ppid);
+ * Called to get ID of physical port of this device. If driver does
+ * not implement this, it is assumed that the hw is not able to have
+ * multiple net devices on single physical port.
+ *
+ * void (*ndo_add_vxlan_port)(struct net_device *dev,
+ * sa_family_t sa_family, __be16 port);
+ * Called by vxlan to notiy a driver about the UDP port and socket
+ * address family that vxlan is listnening to. It is called only when
+ * a new port starts listening. The operation is protected by the
+ * vxlan_net->sock_lock.
+ *
+ * void (*ndo_del_vxlan_port)(struct net_device *dev,
+ * sa_family_t sa_family, __be16 port);
+ * Called by vxlan to notify the driver about a UDP port and socket
+ * address family that vxlan is not listening to anymore. The operation
+ * is protected by the vxlan_net->sock_lock.
+ *
+ * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
+ * struct net_device *dev)
+ * Called by upper layer devices to accelerate switching or other
+ * station functionality into hardware. 'pdev is the lowerdev
+ * to use for the offload and 'dev' is the net device that will
+ * back the offload. Returns a pointer to the private structure
+ * the upper layer will maintain.
+ * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
+ * Called by upper layer device to delete the station created
+ * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
+ * the station and priv is the structure returned by the add
+ * operation.
+ * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
+ * struct net_device *dev,
+ * void *priv);
+ * Callback to use for xmit over the accelerated station. This
+ * is used in place of ndo_start_xmit on accelerated net
+ * devices.
+ * netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
+ * struct net_device *dev
+ * netdev_features_t features);
+ * Called by core transmit path to determine if device is capable of
+ * performing offload operations on a given packet. This is to give
+ * the device an opportunity to implement any restrictions that cannot
+ * be otherwise expressed by feature flags. The check is called with
+ * the set of features that the stack has calculated and it returns
+ * those the driver believes to be appropriate.
+ * int (*ndo_set_tx_maxrate)(struct net_device *dev,
+ * int queue_index, u32 maxrate);
+ * Called when a user wants to set a max-rate limitation of specific
+ * TX queue.
+ * int (*ndo_get_iflink)(const struct net_device *dev);
+ * Called to get the iflink value of this device.
+ */
+struct net_device_ops {
+ int (*ndo_init)(struct net_device *dev);
+ void (*ndo_uninit)(struct net_device *dev);
+ int (*ndo_open)(struct net_device *dev);
+ int (*ndo_stop)(struct net_device *dev);
+ netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
+ struct net_device *dev);
+ u16 (*ndo_select_queue)(struct net_device *dev,
+ struct sk_buff *skb,
+ void *accel_priv,
+ select_queue_fallback_t fallback);
+ void (*ndo_change_rx_flags)(struct net_device *dev,
+ int flags);
+ void (*ndo_set_rx_mode)(struct net_device *dev);
+ int (*ndo_set_mac_address)(struct net_device *dev,
+ void *addr);
+ int (*ndo_validate_addr)(struct net_device *dev);
+ int (*ndo_do_ioctl)(struct net_device *dev,
+ struct ifreq *ifr, int cmd);
+ int (*ndo_set_config)(struct net_device *dev,
+ struct ifmap *map);
+ int (*ndo_change_mtu)(struct net_device *dev,
+ int new_mtu);
+ int (*ndo_neigh_setup)(struct net_device *dev,
+ struct neigh_parms *);
+ void (*ndo_tx_timeout) (struct net_device *dev);
+
+ struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
+ struct rtnl_link_stats64 *storage);
+ struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
+
+ int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
+ __be16 proto, u16 vid);
+ int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
+ __be16 proto, u16 vid);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ void (*ndo_poll_controller)(struct net_device *dev);
+ int (*ndo_netpoll_setup)(struct net_device *dev,
+ struct netpoll_info *info);
+ void (*ndo_netpoll_cleanup)(struct net_device *dev);
+#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ int (*ndo_busy_poll)(struct napi_struct *dev);
+#endif
+ int (*ndo_set_vf_mac)(struct net_device *dev,
+ int queue, u8 *mac);
+ int (*ndo_set_vf_vlan)(struct net_device *dev,
+ int queue, u16 vlan, u8 qos);
+ int (*ndo_set_vf_rate)(struct net_device *dev,
+ int vf, int min_tx_rate,
+ int max_tx_rate);
+ int (*ndo_set_vf_spoofchk)(struct net_device *dev,
+ int vf, bool setting);
+ int (*ndo_get_vf_config)(struct net_device *dev,
+ int vf,
+ struct ifla_vf_info *ivf);
+ int (*ndo_set_vf_link_state)(struct net_device *dev,
+ int vf, int link_state);
+ int (*ndo_set_vf_port)(struct net_device *dev,
+ int vf,
+ struct nlattr *port[]);
+ int (*ndo_get_vf_port)(struct net_device *dev,
+ int vf, struct sk_buff *skb);
+ int (*ndo_set_vf_rss_query_en)(
+ struct net_device *dev,
+ int vf, bool setting);
+ int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
+#if IS_ENABLED(CONFIG_FCOE)
+ int (*ndo_fcoe_enable)(struct net_device *dev);
+ int (*ndo_fcoe_disable)(struct net_device *dev);
+ int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
+ u16 xid,
+ struct scatterlist *sgl,
+ unsigned int sgc);
+ int (*ndo_fcoe_ddp_done)(struct net_device *dev,
+ u16 xid);
+ int (*ndo_fcoe_ddp_target)(struct net_device *dev,
+ u16 xid,
+ struct scatterlist *sgl,
+ unsigned int sgc);
+ int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
+ struct netdev_fcoe_hbainfo *hbainfo);
+#endif
+
+#if IS_ENABLED(CONFIG_LIBFCOE)
+#define NETDEV_FCOE_WWNN 0
+#define NETDEV_FCOE_WWPN 1
+ int (*ndo_fcoe_get_wwn)(struct net_device *dev,
+ u64 *wwn, int type);
+#endif
+
+#ifdef CONFIG_RFS_ACCEL
+ int (*ndo_rx_flow_steer)(struct net_device *dev,
+ const struct sk_buff *skb,
+ u16 rxq_index,
+ u32 flow_id);
+#endif
+ int (*ndo_add_slave)(struct net_device *dev,
+ struct net_device *slave_dev);
+ int (*ndo_del_slave)(struct net_device *dev,
+ struct net_device *slave_dev);
+ netdev_features_t (*ndo_fix_features)(struct net_device *dev,
+ netdev_features_t features);
+ int (*ndo_set_features)(struct net_device *dev,
+ netdev_features_t features);
+ int (*ndo_neigh_construct)(struct neighbour *n);
+ void (*ndo_neigh_destroy)(struct neighbour *n);
+
+ int (*ndo_fdb_add)(struct ndmsg *ndm,
+ struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 vid,
+ u16 flags);
+ int (*ndo_fdb_del)(struct ndmsg *ndm,
+ struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 vid);
+ int (*ndo_fdb_dump)(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ struct net_device *filter_dev,
+ int idx);
+
+ int (*ndo_bridge_setlink)(struct net_device *dev,
+ struct nlmsghdr *nlh,
+ u16 flags);
+ int (*ndo_bridge_getlink)(struct sk_buff *skb,
+ u32 pid, u32 seq,
+ struct net_device *dev,
+ u32 filter_mask,
+ int nlflags);
+ int (*ndo_bridge_dellink)(struct net_device *dev,
+ struct nlmsghdr *nlh,
+ u16 flags);
+ int (*ndo_change_carrier)(struct net_device *dev,
+ bool new_carrier);
+ int (*ndo_get_phys_port_id)(struct net_device *dev,
+ struct netdev_phys_item_id *ppid);
+ int (*ndo_get_phys_port_name)(struct net_device *dev,
+ char *name, size_t len);
+ void (*ndo_add_vxlan_port)(struct net_device *dev,
+ sa_family_t sa_family,
+ __be16 port);
+ void (*ndo_del_vxlan_port)(struct net_device *dev,
+ sa_family_t sa_family,
+ __be16 port);
+
+ void* (*ndo_dfwd_add_station)(struct net_device *pdev,
+ struct net_device *dev);
+ void (*ndo_dfwd_del_station)(struct net_device *pdev,
+ void *priv);
+
+ netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
+ struct net_device *dev,
+ void *priv);
+ int (*ndo_get_lock_subclass)(struct net_device *dev);
+ netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
+ int (*ndo_set_tx_maxrate)(struct net_device *dev,
+ int queue_index,
+ u32 maxrate);
+ int (*ndo_get_iflink)(const struct net_device *dev);
+};
+
+/**
+ * enum net_device_priv_flags - &struct net_device priv_flags
+ *
+ * These are the &struct net_device, they are only set internally
+ * by drivers and used in the kernel. These flags are invisible to
+ * userspace, this means that the order of these flags can change
+ * during any kernel release.
+ *
+ * You should have a pretty good reason to be extending these flags.
+ *
+ * @IFF_802_1Q_VLAN: 802.1Q VLAN device
+ * @IFF_EBRIDGE: Ethernet bridging device
+ * @IFF_SLAVE_INACTIVE: bonding slave not the curr. active
+ * @IFF_MASTER_8023AD: bonding master, 802.3ad
+ * @IFF_MASTER_ALB: bonding master, balance-alb
+ * @IFF_BONDING: bonding master or slave
+ * @IFF_SLAVE_NEEDARP: need ARPs for validation
+ * @IFF_ISATAP: ISATAP interface (RFC4214)
+ * @IFF_MASTER_ARPMON: bonding master, ARP mon in use
+ * @IFF_WAN_HDLC: WAN HDLC device
+ * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
+ * release skb->dst
+ * @IFF_DONT_BRIDGE: disallow bridging this ether dev
+ * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
+ * @IFF_MACVLAN_PORT: device used as macvlan port
+ * @IFF_BRIDGE_PORT: device used as bridge port
+ * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
+ * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
+ * @IFF_UNICAST_FLT: Supports unicast filtering
+ * @IFF_TEAM_PORT: device used as team port
+ * @IFF_SUPP_NOFCS: device supports sending custom FCS
+ * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
+ * change when it's running
+ * @IFF_MACVLAN: Macvlan device
+ */
+enum netdev_priv_flags {
+ IFF_802_1Q_VLAN = 1<<0,
+ IFF_EBRIDGE = 1<<1,
+ IFF_SLAVE_INACTIVE = 1<<2,
+ IFF_MASTER_8023AD = 1<<3,
+ IFF_MASTER_ALB = 1<<4,
+ IFF_BONDING = 1<<5,
+ IFF_SLAVE_NEEDARP = 1<<6,
+ IFF_ISATAP = 1<<7,
+ IFF_MASTER_ARPMON = 1<<8,
+ IFF_WAN_HDLC = 1<<9,
+ IFF_XMIT_DST_RELEASE = 1<<10,
+ IFF_DONT_BRIDGE = 1<<11,
+ IFF_DISABLE_NETPOLL = 1<<12,
+ IFF_MACVLAN_PORT = 1<<13,
+ IFF_BRIDGE_PORT = 1<<14,
+ IFF_OVS_DATAPATH = 1<<15,
+ IFF_TX_SKB_SHARING = 1<<16,
+ IFF_UNICAST_FLT = 1<<17,
+ IFF_TEAM_PORT = 1<<18,
+ IFF_SUPP_NOFCS = 1<<19,
+ IFF_LIVE_ADDR_CHANGE = 1<<20,
+ IFF_MACVLAN = 1<<21,
+ IFF_XMIT_DST_RELEASE_PERM = 1<<22,
+ IFF_IPVLAN_MASTER = 1<<23,
+ IFF_IPVLAN_SLAVE = 1<<24,
+};
+
+#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
+#define IFF_EBRIDGE IFF_EBRIDGE
+#define IFF_SLAVE_INACTIVE IFF_SLAVE_INACTIVE
+#define IFF_MASTER_8023AD IFF_MASTER_8023AD
+#define IFF_MASTER_ALB IFF_MASTER_ALB
+#define IFF_BONDING IFF_BONDING
+#define IFF_SLAVE_NEEDARP IFF_SLAVE_NEEDARP
+#define IFF_ISATAP IFF_ISATAP
+#define IFF_MASTER_ARPMON IFF_MASTER_ARPMON
+#define IFF_WAN_HDLC IFF_WAN_HDLC
+#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
+#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
+#define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
+#define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
+#define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
+#define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
+#define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
+#define IFF_UNICAST_FLT IFF_UNICAST_FLT
+#define IFF_TEAM_PORT IFF_TEAM_PORT
+#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
+#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
+#define IFF_MACVLAN IFF_MACVLAN
+#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
+#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
+#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
+
+/**
+ * struct net_device - The DEVICE structure.
+ * Actually, this whole structure is a big mistake. It mixes I/O
+ * data with strictly "high-level" data, and it has to know about
+ * almost every data structure used in the INET module.
+ *
+ * @name: This is the first field of the "visible" part of this structure
+ * (i.e. as seen by users in the "Space.c" file). It is the name
+ * of the interface.
+ *
+ * @name_hlist: Device name hash chain, please keep it close to name[]
+ * @ifalias: SNMP alias
+ * @mem_end: Shared memory end
+ * @mem_start: Shared memory start
+ * @base_addr: Device I/O address
+ * @irq: Device IRQ number
+ *
+ * @carrier_changes: Stats to monitor carrier on<->off transitions
+ *
+ * @state: Generic network queuing layer state, see netdev_state_t
+ * @dev_list: The global list of network devices
+ * @napi_list: List entry, that is used for polling napi devices
+ * @unreg_list: List entry, that is used, when we are unregistering the
+ * device, see the function unregister_netdev
+ * @close_list: List entry, that is used, when we are closing the device
+ *
+ * @adj_list: Directly linked devices, like slaves for bonding
+ * @all_adj_list: All linked devices, *including* neighbours
+ * @features: Currently active device features
+ * @hw_features: User-changeable features
+ *
+ * @wanted_features: User-requested features
+ * @vlan_features: Mask of features inheritable by VLAN devices
+ *
+ * @hw_enc_features: Mask of features inherited by encapsulating devices
+ * This field indicates what encapsulation
+ * offloads the hardware is capable of doing,
+ * and drivers will need to set them appropriately.
+ *
+ * @mpls_features: Mask of features inheritable by MPLS
+ *
+ * @ifindex: interface index
+ * @group: The group, that the device belongs to
+ *
+ * @stats: Statistics struct, which was left as a legacy, use
+ * rtnl_link_stats64 instead
+ *
+ * @rx_dropped: Dropped packets by core network,
+ * do not use this in drivers
+ * @tx_dropped: Dropped packets by core network,
+ * do not use this in drivers
+ *
+ * @wireless_handlers: List of functions to handle Wireless Extensions,
+ * instead of ioctl,
+ * see <net/iw_handler.h> for details.
+ * @wireless_data: Instance data managed by the core of wireless extensions
+ *
+ * @netdev_ops: Includes several pointers to callbacks,
+ * if one wants to override the ndo_*() functions
+ * @ethtool_ops: Management operations
+ * @header_ops: Includes callbacks for creating,parsing,caching,etc
+ * of Layer 2 headers.
+ *
+ * @flags: Interface flags (a la BSD)
+ * @priv_flags: Like 'flags' but invisible to userspace,
+ * see if.h for the definitions
+ * @gflags: Global flags ( kept as legacy )
+ * @padded: How much padding added by alloc_netdev()
+ * @operstate: RFC2863 operstate
+ * @link_mode: Mapping policy to operstate
+ * @if_port: Selectable AUI, TP, ...
+ * @dma: DMA channel
+ * @mtu: Interface MTU value
+ * @type: Interface hardware type
+ * @hard_header_len: Hardware header length
+ *
+ * @needed_headroom: Extra headroom the hardware may need, but not in all
+ * cases can this be guaranteed
+ * @needed_tailroom: Extra tailroom the hardware may need, but not in all
+ * cases can this be guaranteed. Some cases also use
+ * LL_MAX_HEADER instead to allocate the skb
+ *
+ * interface address info:
+ *
+ * @perm_addr: Permanent hw address
+ * @addr_assign_type: Hw address assignment type
+ * @addr_len: Hardware address length
+ * @neigh_priv_len; Used in neigh_alloc(),
+ * initialized only in atm/clip.c
+ * @dev_id: Used to differentiate devices that share
+ * the same link layer address
+ * @dev_port: Used to differentiate devices that share
+ * the same function
+ * @addr_list_lock: XXX: need comments on this one
+ * @uc_promisc: Counter, that indicates, that promiscuous mode
+ * has been enabled due to the need to listen to
+ * additional unicast addresses in a device that
+ * does not implement ndo_set_rx_mode()
+ * @uc: unicast mac addresses
+ * @mc: multicast mac addresses
+ * @dev_addrs: list of device hw addresses
+ * @queues_kset: Group of all Kobjects in the Tx and RX queues
+ * @promiscuity: Number of times, the NIC is told to work in
+ * Promiscuous mode, if it becomes 0 the NIC will
+ * exit from working in Promiscuous mode
+ * @allmulti: Counter, enables or disables allmulticast mode
+ *
+ * @vlan_info: VLAN info
+ * @dsa_ptr: dsa specific data
+ * @tipc_ptr: TIPC specific data
+ * @atalk_ptr: AppleTalk link
+ * @ip_ptr: IPv4 specific data
+ * @dn_ptr: DECnet specific data
+ * @ip6_ptr: IPv6 specific data
+ * @ax25_ptr: AX.25 specific data
+ * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
+ *
+ * @last_rx: Time of last Rx
+ * @dev_addr: Hw address (before bcast,
+ * because most packets are unicast)
+ *
+ * @_rx: Array of RX queues
+ * @num_rx_queues: Number of RX queues
+ * allocated at register_netdev() time
+ * @real_num_rx_queues: Number of RX queues currently active in device
+ *
+ * @rx_handler: handler for received packets
+ * @rx_handler_data: XXX: need comments on this one
+ * @ingress_queue: XXX: need comments on this one
+ * @broadcast: hw bcast address
+ *
+ * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
+ * indexed by RX queue number. Assigned by driver.
+ * This must only be set if the ndo_rx_flow_steer
+ * operation is defined
+ * @index_hlist: Device index hash chain
+ *
+ * @_tx: Array of TX queues
+ * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time
+ * @real_num_tx_queues: Number of TX queues currently active in device
+ * @qdisc: Root qdisc from userspace point of view
+ * @tx_queue_len: Max frames per queue allowed
+ * @tx_global_lock: XXX: need comments on this one
+ *
+ * @xps_maps: XXX: need comments on this one
+ *
+ * @trans_start: Time (in jiffies) of last Tx
+ * @watchdog_timeo: Represents the timeout that is used by
+ * the watchdog ( see dev_watchdog() )
+ * @watchdog_timer: List of timers
+ *
+ * @pcpu_refcnt: Number of references to this device
+ * @todo_list: Delayed register/unregister
+ * @link_watch_list: XXX: need comments on this one
+ *
+ * @reg_state: Register/unregister state machine
+ * @dismantle: Device is going to be freed
+ * @rtnl_link_state: This enum represents the phases of creating
+ * a new link
+ *
+ * @destructor: Called from unregister,
+ * can be used to call free_netdev
+ * @npinfo: XXX: need comments on this one
+ * @nd_net: Network namespace this network device is inside
+ *
+ * @ml_priv: Mid-layer private
+ * @lstats: Loopback statistics
+ * @tstats: Tunnel statistics
+ * @dstats: Dummy statistics
+ * @vstats: Virtual ethernet statistics
+ *
+ * @garp_port: GARP
+ * @mrp_port: MRP
+ *
+ * @dev: Class/net/name entry
+ * @sysfs_groups: Space for optional device, statistics and wireless
+ * sysfs groups
+ *
+ * @sysfs_rx_queue_group: Space for optional per-rx queue attributes
+ * @rtnl_link_ops: Rtnl_link_ops
+ *
+ * @gso_max_size: Maximum size of generic segmentation offload
+ * @gso_max_segs: Maximum number of segments that can be passed to the
+ * NIC for GSO
+ * @gso_min_segs: Minimum number of segments that can be passed to the
+ * NIC for GSO
+ *
+ * @dcbnl_ops: Data Center Bridging netlink ops
+ * @num_tc: Number of traffic classes in the net device
+ * @tc_to_txq: XXX: need comments on this one
+ * @prio_tc_map XXX: need comments on this one
+ *
+ * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp
+ *
+ * @priomap: XXX: need comments on this one
+ * @phydev: Physical device may attach itself
+ * for hardware timestamping
+ *
+ * @qdisc_tx_busylock: XXX: need comments on this one
+ *
+ * FIXME: cleanup struct net_device such that network protocol info
+ * moves out.
+ */
+
+struct net_device {
+ char name[IFNAMSIZ];
+ struct hlist_node name_hlist;
+ char *ifalias;
+ /*
+ * I/O specific fields
+ * FIXME: Merge these and struct ifmap into one
+ */
+ unsigned long mem_end;
+ unsigned long mem_start;
+ unsigned long base_addr;
+ int irq;
+
+ atomic_t carrier_changes;
+
+ /*
+ * Some hardware also needs these fields (state,dev_list,
+ * napi_list,unreg_list,close_list) but they are not
+ * part of the usual set specified in Space.c.
+ */
+
+ unsigned long state;
+
+ struct list_head dev_list;
+ struct list_head napi_list;
+ struct list_head unreg_list;
+ struct list_head close_list;
+ struct list_head ptype_all;
+ struct list_head ptype_specific;
+
+ struct {
+ struct list_head upper;
+ struct list_head lower;
+ } adj_list;
+
+ struct {
+ struct list_head upper;
+ struct list_head lower;
+ } all_adj_list;
+
+ netdev_features_t features;
+ netdev_features_t hw_features;
+ netdev_features_t wanted_features;
+ netdev_features_t vlan_features;
+ netdev_features_t hw_enc_features;
+ netdev_features_t mpls_features;
+
+ int ifindex;
+ int group;
+
+ struct net_device_stats stats;
+
+ atomic_long_t rx_dropped;
+ atomic_long_t tx_dropped;
+
+#ifdef CONFIG_WIRELESS_EXT
+ const struct iw_handler_def * wireless_handlers;
+ struct iw_public_data * wireless_data;
+#endif
+ const struct net_device_ops *netdev_ops;
+ const struct ethtool_ops *ethtool_ops;
+#ifdef CONFIG_NET_SWITCHDEV
+ const struct swdev_ops *swdev_ops;
+#endif
+
+ const struct header_ops *header_ops;
+
+ unsigned int flags;
+ unsigned int priv_flags;
+
+ unsigned short gflags;
+ unsigned short padded;
+
+ unsigned char operstate;
+ unsigned char link_mode;
+
+ unsigned char if_port;
+ unsigned char dma;
+
+ unsigned int mtu;
+ unsigned short type;
+ unsigned short hard_header_len;
+
+ unsigned short needed_headroom;
+ unsigned short needed_tailroom;
+
+ /* Interface address info. */
+ unsigned char perm_addr[MAX_ADDR_LEN];
+ unsigned char addr_assign_type;
+ unsigned char addr_len;
+ unsigned short neigh_priv_len;
+ unsigned short dev_id;
+ unsigned short dev_port;
+ spinlock_t addr_list_lock;
+ unsigned char name_assign_type;
+ bool uc_promisc;
+ struct netdev_hw_addr_list uc;
+ struct netdev_hw_addr_list mc;
+ struct netdev_hw_addr_list dev_addrs;
+
+#ifdef CONFIG_SYSFS
+ struct kset *queues_kset;
+#endif
+ unsigned int promiscuity;
+ unsigned int allmulti;
+
+
+ /* Protocol specific pointers */
+
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ struct vlan_info __rcu *vlan_info;
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA)
+ struct dsa_switch_tree *dsa_ptr;
+#endif
+#if IS_ENABLED(CONFIG_TIPC)
+ struct tipc_bearer __rcu *tipc_ptr;
+#endif
+ void *atalk_ptr;
+ struct in_device __rcu *ip_ptr;
+ struct dn_dev __rcu *dn_ptr;
+ struct inet6_dev __rcu *ip6_ptr;
+ void *ax25_ptr;
+ struct wireless_dev *ieee80211_ptr;
+ struct wpan_dev *ieee802154_ptr;
+#if IS_ENABLED(CONFIG_MPLS_ROUTING)
+ struct mpls_dev __rcu *mpls_ptr;
+#endif
+
+/*
+ * Cache lines mostly used on receive path (including eth_type_trans())
+ */
+ unsigned long last_rx;
+
+ /* Interface address info used in eth_type_trans() */
+ unsigned char *dev_addr;
+
+
+#ifdef CONFIG_SYSFS
+ struct netdev_rx_queue *_rx;
+
+ unsigned int num_rx_queues;
+ unsigned int real_num_rx_queues;
+
+#endif
+
+ unsigned long gro_flush_timeout;
+ rx_handler_func_t __rcu *rx_handler;
+ void __rcu *rx_handler_data;
+
+ struct netdev_queue __rcu *ingress_queue;
+ unsigned char broadcast[MAX_ADDR_LEN];
+#ifdef CONFIG_RFS_ACCEL
+ struct cpu_rmap *rx_cpu_rmap;
+#endif
+ struct hlist_node index_hlist;
+
+/*
+ * Cache lines mostly used on transmit path
+ */
+ struct netdev_queue *_tx ____cacheline_aligned_in_smp;
+ unsigned int num_tx_queues;
+ unsigned int real_num_tx_queues;
+ struct Qdisc *qdisc;
+ unsigned long tx_queue_len;
+ spinlock_t tx_global_lock;
+ int watchdog_timeo;
+
+#ifdef CONFIG_XPS
+ struct xps_dev_maps __rcu *xps_maps;
+#endif
+
+ /* These may be needed for future network-power-down code. */
+
+ /*
+ * trans_start here is expensive for high speed devices on SMP,
+ * please use netdev_queue->trans_start instead.
+ */
+ unsigned long trans_start;
+
+ struct timer_list watchdog_timer;
+
+ int __percpu *pcpu_refcnt;
+ struct list_head todo_list;
+
+ struct list_head link_watch_list;
+
+ enum { NETREG_UNINITIALIZED=0,
+ NETREG_REGISTERED, /* completed register_netdevice */
+ NETREG_UNREGISTERING, /* called unregister_netdevice */
+ NETREG_UNREGISTERED, /* completed unregister todo */
+ NETREG_RELEASED, /* called free_netdev */
+ NETREG_DUMMY, /* dummy device for NAPI poll */
+ } reg_state:8;
+
+ bool dismantle;
+
+ enum {
+ RTNL_LINK_INITIALIZED,
+ RTNL_LINK_INITIALIZING,
+ } rtnl_link_state:16;
+
+ void (*destructor)(struct net_device *dev);
+
+#ifdef CONFIG_NETPOLL
+ struct netpoll_info __rcu *npinfo;
+#endif
+
+ possible_net_t nd_net;
+
+ /* mid-layer private */
+ union {
+ void *ml_priv;
+ struct pcpu_lstats __percpu *lstats;
+ struct pcpu_sw_netstats __percpu *tstats;
+ struct pcpu_dstats __percpu *dstats;
+ struct pcpu_vstats __percpu *vstats;
+ };
+
+ struct garp_port __rcu *garp_port;
+ struct mrp_port __rcu *mrp_port;
+
+ struct device dev;
+ const struct attribute_group *sysfs_groups[4];
+ const struct attribute_group *sysfs_rx_queue_group;
+
+ const struct rtnl_link_ops *rtnl_link_ops;
+
+ /* for setting kernel sock attribute on TCP connection setup */
+#define GSO_MAX_SIZE 65536
+ unsigned int gso_max_size;
+#define GSO_MAX_SEGS 65535
+ u16 gso_max_segs;
+ u16 gso_min_segs;
+#ifdef CONFIG_DCB
+ const struct dcbnl_rtnl_ops *dcbnl_ops;
+#endif
+ u8 num_tc;
+ struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+ u8 prio_tc_map[TC_BITMASK + 1];
+
+#if IS_ENABLED(CONFIG_FCOE)
+ unsigned int fcoe_ddp_xid;
+#endif
+#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
+ struct netprio_map __rcu *priomap;
+#endif
+ struct phy_device *phydev;
+ struct lock_class_key *qdisc_tx_busylock;
+};
+#define to_net_dev(d) container_of(d, struct net_device, dev)
+
+#define NETDEV_ALIGN 32
+
+static inline
+int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
+{
+ return dev->prio_tc_map[prio & TC_BITMASK];
+}
+
+static inline
+int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
+{
+ if (tc >= dev->num_tc)
+ return -EINVAL;
+
+ dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
+ return 0;
+}
+
+static inline
+void netdev_reset_tc(struct net_device *dev)
+{
+ dev->num_tc = 0;
+ memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
+ memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
+}
+
+static inline
+int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
+{
+ if (tc >= dev->num_tc)
+ return -EINVAL;
+
+ dev->tc_to_txq[tc].count = count;
+ dev->tc_to_txq[tc].offset = offset;
+ return 0;
+}
+
+static inline
+int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
+{
+ if (num_tc > TC_MAX_QUEUE)
+ return -EINVAL;
+
+ dev->num_tc = num_tc;
+ return 0;
+}
+
+static inline
+int netdev_get_num_tc(struct net_device *dev)
+{
+ return dev->num_tc;
+}
+
+static inline
+struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
+ unsigned int index)
+{
+ return &dev->_tx[index];
+}
+
+static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
+ const struct sk_buff *skb)
+{
+ return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+}
+
+static inline void netdev_for_each_tx_queue(struct net_device *dev,
+ void (*f)(struct net_device *,
+ struct netdev_queue *,
+ void *),
+ void *arg)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++)
+ f(dev, &dev->_tx[i], arg);
+}
+
+struct netdev_queue *netdev_pick_tx(struct net_device *dev,
+ struct sk_buff *skb,
+ void *accel_priv);
+
+/*
+ * Net namespace inlines
+ */
+static inline
+struct net *dev_net(const struct net_device *dev)
+{
+ return read_pnet(&dev->nd_net);
+}
+
+static inline
+void dev_net_set(struct net_device *dev, struct net *net)
+{
+ write_pnet(&dev->nd_net, net);
+}
+
+static inline bool netdev_uses_dsa(struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ if (dev->dsa_ptr != NULL)
+ return dsa_uses_tagged_protocol(dev->dsa_ptr);
+#endif
+ return false;
+}
+
+/**
+ * netdev_priv - access network device private data
+ * @dev: network device
+ *
+ * Get network device private data
+ */
+static inline void *netdev_priv(const struct net_device *dev)
+{
+ return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
+}
+
+/* Set the sysfs physical device reference for the network logical device
+ * if set prior to registration will cause a symlink during initialization.
+ */
+#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
+
+/* Set the sysfs device type for the network logical device to allow
+ * fine-grained identification of different network device types. For
+ * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
+ */
+#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
+
+/* Default NAPI poll() weight
+ * Device drivers are strongly advised to not use bigger value
+ */
+#define NAPI_POLL_WEIGHT 64
+
+/**
+ * netif_napi_add - initialize a napi context
+ * @dev: network device
+ * @napi: napi context
+ * @poll: polling function
+ * @weight: default weight
+ *
+ * netif_napi_add() must be used to initialize a napi context prior to calling
+ * *any* of the other napi related functions.
+ */
+void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int weight);
+
+/**
+ * netif_napi_del - remove a napi context
+ * @napi: napi context
+ *
+ * netif_napi_del() removes a napi context from the network device napi list
+ */
+void netif_napi_del(struct napi_struct *napi);
+
+struct napi_gro_cb {
+ /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
+ void *frag0;
+
+ /* Length of frag0. */
+ unsigned int frag0_len;
+
+ /* This indicates where we are processing relative to skb->data. */
+ int data_offset;
+
+ /* This is non-zero if the packet cannot be merged with the new skb. */
+ u16 flush;
+
+ /* Save the IP ID here and check when we get to the transport layer */
+ u16 flush_id;
+
+ /* Number of segments aggregated. */
+ u16 count;
+
+ /* Start offset for remote checksum offload */
+ u16 gro_remcsum_start;
+
+ /* jiffies when first packet was created/queued */
+ unsigned long age;
+
+ /* Used in ipv6_gro_receive() and foo-over-udp */
+ u16 proto;
+
+ /* This is non-zero if the packet may be of the same flow. */
+ u8 same_flow:1;
+
+ /* Used in udp_gro_receive */
+ u8 udp_mark:1;
+
+ /* GRO checksum is valid */
+ u8 csum_valid:1;
+
+ /* Number of checksums via CHECKSUM_UNNECESSARY */
+ u8 csum_cnt:3;
+
+ /* Free the skb? */
+ u8 free:2;
+#define NAPI_GRO_FREE 1
+#define NAPI_GRO_FREE_STOLEN_HEAD 2
+
+ /* Used in foo-over-udp, set in udp[46]_gro_receive */
+ u8 is_ipv6:1;
+
+ /* 7 bit hole */
+
+ /* used to support CHECKSUM_COMPLETE for tunneling protocols */
+ __wsum csum;
+
+ /* used in skb_gro_receive() slow path */
+ struct sk_buff *last;
+};
+
+#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
+
+struct packet_type {
+ __be16 type; /* This is really htons(ether_type). */
+ struct net_device *dev; /* NULL is wildcarded here */
+ int (*func) (struct sk_buff *,
+ struct net_device *,
+ struct packet_type *,
+ struct net_device *);
+ bool (*id_match)(struct packet_type *ptype,
+ struct sock *sk);
+ void *af_packet_priv;
+ struct list_head list;
+};
+
+struct offload_callbacks {
+ struct sk_buff *(*gso_segment)(struct sk_buff *skb,
+ netdev_features_t features);
+ struct sk_buff **(*gro_receive)(struct sk_buff **head,
+ struct sk_buff *skb);
+ int (*gro_complete)(struct sk_buff *skb, int nhoff);
+};
+
+struct packet_offload {
+ __be16 type; /* This is really htons(ether_type). */
+ struct offload_callbacks callbacks;
+ struct list_head list;
+};
+
+struct udp_offload;
+
+struct udp_offload_callbacks {
+ struct sk_buff **(*gro_receive)(struct sk_buff **head,
+ struct sk_buff *skb,
+ struct udp_offload *uoff);
+ int (*gro_complete)(struct sk_buff *skb,
+ int nhoff,
+ struct udp_offload *uoff);
+};
+
+struct udp_offload {
+ __be16 port;
+ u8 ipproto;
+ struct udp_offload_callbacks callbacks;
+};
+
+/* often modified stats are per cpu, other are shared (netdev->stats) */
+struct pcpu_sw_netstats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+};
+
+#define netdev_alloc_pcpu_stats(type) \
+({ \
+ typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
+ if (pcpu_stats) { \
+ int __cpu; \
+ for_each_possible_cpu(__cpu) { \
+ typeof(type) *stat; \
+ stat = per_cpu_ptr(pcpu_stats, __cpu); \
+ u64_stats_init(&stat->syncp); \
+ } \
+ } \
+ pcpu_stats; \
+})
+
+#include <linux/notifier.h>
+
+/* netdevice notifier chain. Please remember to update the rtnetlink
+ * notification exclusion list in rtnetlink_event() when adding new
+ * types.
+ */
+#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
+#define NETDEV_DOWN 0x0002
+#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
+ detected a hardware crash and restarted
+ - we can use this eg to kick tcp sessions
+ once done */
+#define NETDEV_CHANGE 0x0004 /* Notify device state change */
+#define NETDEV_REGISTER 0x0005
+#define NETDEV_UNREGISTER 0x0006
+#define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */
+#define NETDEV_CHANGEADDR 0x0008
+#define NETDEV_GOING_DOWN 0x0009
+#define NETDEV_CHANGENAME 0x000A
+#define NETDEV_FEAT_CHANGE 0x000B
+#define NETDEV_BONDING_FAILOVER 0x000C
+#define NETDEV_PRE_UP 0x000D
+#define NETDEV_PRE_TYPE_CHANGE 0x000E
+#define NETDEV_POST_TYPE_CHANGE 0x000F
+#define NETDEV_POST_INIT 0x0010
+#define NETDEV_UNREGISTER_FINAL 0x0011
+#define NETDEV_RELEASE 0x0012
+#define NETDEV_NOTIFY_PEERS 0x0013
+#define NETDEV_JOIN 0x0014
+#define NETDEV_CHANGEUPPER 0x0015
+#define NETDEV_RESEND_IGMP 0x0016
+#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
+#define NETDEV_CHANGEINFODATA 0x0018
+#define NETDEV_BONDING_INFO 0x0019
+
+int register_netdevice_notifier(struct notifier_block *nb);
+int unregister_netdevice_notifier(struct notifier_block *nb);
+
+struct netdev_notifier_info {
+ struct net_device *dev;
+};
+
+struct netdev_notifier_change_info {
+ struct netdev_notifier_info info; /* must be first */
+ unsigned int flags_changed;
+};
+
+static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
+ struct net_device *dev)
+{
+ info->dev = dev;
+}
+
+static inline struct net_device *
+netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
+{
+ return info->dev;
+}
+
+int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
+
+
+extern rwlock_t dev_base_lock; /* Device list lock */
+
+#define for_each_netdev(net, d) \
+ list_for_each_entry(d, &(net)->dev_base_head, dev_list)
+#define for_each_netdev_reverse(net, d) \
+ list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
+#define for_each_netdev_rcu(net, d) \
+ list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
+#define for_each_netdev_safe(net, d, n) \
+ list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
+#define for_each_netdev_continue(net, d) \
+ list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
+#define for_each_netdev_continue_rcu(net, d) \
+ list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
+#define for_each_netdev_in_bond_rcu(bond, slave) \
+ for_each_netdev_rcu(&init_net, slave) \
+ if (netdev_master_upper_dev_get_rcu(slave) == (bond))
+#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
+
+static inline struct net_device *next_net_device(struct net_device *dev)
+{
+ struct list_head *lh;
+ struct net *net;
+
+ net = dev_net(dev);
+ lh = dev->dev_list.next;
+ return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
+}
+
+static inline struct net_device *next_net_device_rcu(struct net_device *dev)
+{
+ struct list_head *lh;
+ struct net *net;
+
+ net = dev_net(dev);
+ lh = rcu_dereference(list_next_rcu(&dev->dev_list));
+ return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
+}
+
+static inline struct net_device *first_net_device(struct net *net)
+{
+ return list_empty(&net->dev_base_head) ? NULL :
+ net_device_entry(net->dev_base_head.next);
+}
+
+static inline struct net_device *first_net_device_rcu(struct net *net)
+{
+ struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
+
+ return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
+}
+
+int netdev_boot_setup_check(struct net_device *dev);
+unsigned long netdev_boot_base(const char *prefix, int unit);
+struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
+ const char *hwaddr);
+struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
+struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
+void dev_add_pack(struct packet_type *pt);
+void dev_remove_pack(struct packet_type *pt);
+void __dev_remove_pack(struct packet_type *pt);
+void dev_add_offload(struct packet_offload *po);
+void dev_remove_offload(struct packet_offload *po);
+
+int dev_get_iflink(const struct net_device *dev);
+struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
+ unsigned short mask);
+struct net_device *dev_get_by_name(struct net *net, const char *name);
+struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
+struct net_device *__dev_get_by_name(struct net *net, const char *name);
+int dev_alloc_name(struct net_device *dev, const char *name);
+int dev_open(struct net_device *dev);
+int dev_close(struct net_device *dev);
+int dev_close_many(struct list_head *head, bool unlink);
+void dev_disable_lro(struct net_device *dev);
+int dev_loopback_xmit(struct sock *sk, struct sk_buff *newskb);
+int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb);
+static inline int dev_queue_xmit(struct sk_buff *skb)
+{
+ return dev_queue_xmit_sk(skb->sk, skb);
+}
+int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
+int register_netdevice(struct net_device *dev);
+void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
+void unregister_netdevice_many(struct list_head *head);
+static inline void unregister_netdevice(struct net_device *dev)
+{
+ unregister_netdevice_queue(dev, NULL);
+}
+
+int netdev_refcnt_read(const struct net_device *dev);
+void free_netdev(struct net_device *dev);
+void netdev_freemem(struct net_device *dev);
+void synchronize_net(void);
+int init_dummy_netdev(struct net_device *dev);
+
+DECLARE_PER_CPU(int, xmit_recursion);
+static inline int dev_recursion_level(void)
+{
+ return this_cpu_read(xmit_recursion);
+}
+
+struct net_device *dev_get_by_index(struct net *net, int ifindex);
+struct net_device *__dev_get_by_index(struct net *net, int ifindex);
+struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
+int netdev_get_name(struct net *net, char *name, int ifindex);
+int dev_restart(struct net_device *dev);
+int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+
+static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
+{
+ return NAPI_GRO_CB(skb)->data_offset;
+}
+
+static inline unsigned int skb_gro_len(const struct sk_buff *skb)
+{
+ return skb->len - NAPI_GRO_CB(skb)->data_offset;
+}
+
+static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
+{
+ NAPI_GRO_CB(skb)->data_offset += len;
+}
+
+static inline void *skb_gro_header_fast(struct sk_buff *skb,
+ unsigned int offset)
+{
+ return NAPI_GRO_CB(skb)->frag0 + offset;
+}
+
+static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
+{
+ return NAPI_GRO_CB(skb)->frag0_len < hlen;
+}
+
+static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
+ unsigned int offset)
+{
+ if (!pskb_may_pull(skb, hlen))
+ return NULL;
+
+ NAPI_GRO_CB(skb)->frag0 = NULL;
+ NAPI_GRO_CB(skb)->frag0_len = 0;
+ return skb->data + offset;
+}
+
+static inline void *skb_gro_network_header(struct sk_buff *skb)
+{
+ return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
+ skb_network_offset(skb);
+}
+
+static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
+ const void *start, unsigned int len)
+{
+ if (NAPI_GRO_CB(skb)->csum_valid)
+ NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
+ csum_partial(start, len, 0));
+}
+
+/* GRO checksum functions. These are logical equivalents of the normal
+ * checksum functions (in skbuff.h) except that they operate on the GRO
+ * offsets and fields in sk_buff.
+ */
+
+__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
+
+static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
+{
+ return (NAPI_GRO_CB(skb)->gro_remcsum_start - skb_headroom(skb) ==
+ skb_gro_offset(skb));
+}
+
+static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
+ bool zero_okay,
+ __sum16 check)
+{
+ return ((skb->ip_summed != CHECKSUM_PARTIAL ||
+ skb_checksum_start_offset(skb) <
+ skb_gro_offset(skb)) &&
+ !skb_at_gro_remcsum_start(skb) &&
+ NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+ (!zero_okay || check));
+}
+
+static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
+ __wsum psum)
+{
+ if (NAPI_GRO_CB(skb)->csum_valid &&
+ !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
+ return 0;
+
+ NAPI_GRO_CB(skb)->csum = psum;
+
+ return __skb_gro_checksum_complete(skb);
+}
+
+static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
+{
+ if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
+ /* Consume a checksum from CHECKSUM_UNNECESSARY */
+ NAPI_GRO_CB(skb)->csum_cnt--;
+ } else {
+ /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
+ * verified a new top level checksum or an encapsulated one
+ * during GRO. This saves work if we fallback to normal path.
+ */
+ __skb_incr_checksum_unnecessary(skb);
+ }
+}
+
+#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
+ compute_pseudo) \
+({ \
+ __sum16 __ret = 0; \
+ if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
+ __ret = __skb_gro_checksum_validate_complete(skb, \
+ compute_pseudo(skb, proto)); \
+ if (__ret) \
+ __skb_mark_checksum_bad(skb); \
+ else \
+ skb_gro_incr_csum_unnecessary(skb); \
+ __ret; \
+})
+
+#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
+ __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
+
+#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
+ compute_pseudo) \
+ __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
+
+#define skb_gro_checksum_simple_validate(skb) \
+ __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
+
+static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
+{
+ return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+ !NAPI_GRO_CB(skb)->csum_valid);
+}
+
+static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
+ __sum16 check, __wsum pseudo)
+{
+ NAPI_GRO_CB(skb)->csum = ~pseudo;
+ NAPI_GRO_CB(skb)->csum_valid = 1;
+}
+
+#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
+do { \
+ if (__skb_gro_checksum_convert_check(skb)) \
+ __skb_gro_checksum_convert(skb, check, \
+ compute_pseudo(skb, proto)); \
+} while (0)
+
+struct gro_remcsum {
+ int offset;
+ __wsum delta;
+};
+
+static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
+{
+ grc->offset = 0;
+ grc->delta = 0;
+}
+
+static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
+ int start, int offset,
+ struct gro_remcsum *grc,
+ bool nopartial)
+{
+ __wsum delta;
+
+ BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
+
+ if (!nopartial) {
+ NAPI_GRO_CB(skb)->gro_remcsum_start =
+ ((unsigned char *)ptr + start) - skb->head;
+ return;
+ }
+
+ delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset);
+
+ /* Adjust skb->csum since we changed the packet */
+ NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
+
+ grc->offset = (ptr + offset) - (void *)skb->head;
+ grc->delta = delta;
+}
+
+static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
+ struct gro_remcsum *grc)
+{
+ if (!grc->delta)
+ return;
+
+ remcsum_unadjust((__sum16 *)(skb->head + grc->offset), grc->delta);
+}
+
+static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type,
+ const void *daddr, const void *saddr,
+ unsigned int len)
+{
+ if (!dev->header_ops || !dev->header_ops->create)
+ return 0;
+
+ return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
+}
+
+static inline int dev_parse_header(const struct sk_buff *skb,
+ unsigned char *haddr)
+{
+ const struct net_device *dev = skb->dev;
+
+ if (!dev->header_ops || !dev->header_ops->parse)
+ return 0;
+ return dev->header_ops->parse(skb, haddr);
+}
+
+typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
+int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
+static inline int unregister_gifconf(unsigned int family)
+{
+ return register_gifconf(family, NULL);
+}
+
+#ifdef CONFIG_NET_FLOW_LIMIT
+#define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
+struct sd_flow_limit {
+ u64 count;
+ unsigned int num_buckets;
+ unsigned int history_head;
+ u16 history[FLOW_LIMIT_HISTORY];
+ u8 buckets[];
+};
+
+extern int netdev_flow_limit_table_len;
+#endif /* CONFIG_NET_FLOW_LIMIT */
+
+/*
+ * Incoming packets are placed on per-cpu queues
+ */
+struct softnet_data {
+ struct list_head poll_list;
+ struct sk_buff_head process_queue;
+
+ /* stats */
+ unsigned int processed;
+ unsigned int time_squeeze;
+ unsigned int cpu_collision;
+ unsigned int received_rps;
+#ifdef CONFIG_RPS
+ struct softnet_data *rps_ipi_list;
+#endif
+#ifdef CONFIG_NET_FLOW_LIMIT
+ struct sd_flow_limit __rcu *flow_limit;
+#endif
+ struct Qdisc *output_queue;
+ struct Qdisc **output_queue_tailp;
+ struct sk_buff *completion_queue;
+
+#ifdef CONFIG_RPS
+ /* Elements below can be accessed between CPUs for RPS */
+ struct call_single_data csd ____cacheline_aligned_in_smp;
+ struct softnet_data *rps_ipi_next;
+ unsigned int cpu;
+ unsigned int input_queue_head;
+ unsigned int input_queue_tail;
+#endif
+ unsigned int dropped;
+ struct sk_buff_head input_pkt_queue;
+ struct napi_struct backlog;
+
+};
+
+static inline void input_queue_head_incr(struct softnet_data *sd)
+{
+#ifdef CONFIG_RPS
+ sd->input_queue_head++;
+#endif
+}
+
+static inline void input_queue_tail_incr_save(struct softnet_data *sd,
+ unsigned int *qtail)
+{
+#ifdef CONFIG_RPS
+ *qtail = ++sd->input_queue_tail;
+#endif
+}
+
+DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
+
+void __netif_schedule(struct Qdisc *q);
+void netif_schedule_queue(struct netdev_queue *txq);
+
+static inline void netif_tx_schedule_all(struct net_device *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++)
+ netif_schedule_queue(netdev_get_tx_queue(dev, i));
+}
+
+static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
+{
+ clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
+}
+
+/**
+ * netif_start_queue - allow transmit
+ * @dev: network device
+ *
+ * Allow upper layers to call the device hard_start_xmit routine.
+ */
+static inline void netif_start_queue(struct net_device *dev)
+{
+ netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
+}
+
+static inline void netif_tx_start_all_queues(struct net_device *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+ netif_tx_start_queue(txq);
+ }
+}
+
+void netif_tx_wake_queue(struct netdev_queue *dev_queue);
+
+/**
+ * netif_wake_queue - restart transmit
+ * @dev: network device
+ *
+ * Allow upper layers to call the device hard_start_xmit routine.
+ * Used for flow control when transmit resources are available.
+ */
+static inline void netif_wake_queue(struct net_device *dev)
+{
+ netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
+}
+
+static inline void netif_tx_wake_all_queues(struct net_device *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+ netif_tx_wake_queue(txq);
+ }
+}
+
+static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
+{
+ if (WARN_ON(!dev_queue)) {
+ pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
+ return;
+ }
+ set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
+}
+
+/**
+ * netif_stop_queue - stop transmitted packets
+ * @dev: network device
+ *
+ * Stop upper layers calling the device hard_start_xmit routine.
+ * Used for flow control when transmit resources are unavailable.
+ */
+static inline void netif_stop_queue(struct net_device *dev)
+{
+ netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
+}
+
+static inline void netif_tx_stop_all_queues(struct net_device *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+ netif_tx_stop_queue(txq);
+ }
+}
+
+static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
+{
+ return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
+}
+
+/**
+ * netif_queue_stopped - test if transmit queue is flowblocked
+ * @dev: network device
+ *
+ * Test if transmit queue on device is currently unable to send.
+ */
+static inline bool netif_queue_stopped(const struct net_device *dev)
+{
+ return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
+}
+
+static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
+{
+ return dev_queue->state & QUEUE_STATE_ANY_XOFF;
+}
+
+static inline bool
+netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
+{
+ return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
+}
+
+static inline bool
+netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
+{
+ return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
+}
+
+/**
+ * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
+ * @dev_queue: pointer to transmit queue
+ *
+ * BQL enabled drivers might use this helper in their ndo_start_xmit(),
+ * to give appropriate hint to the cpu.
+ */
+static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
+{
+#ifdef CONFIG_BQL
+ prefetchw(&dev_queue->dql.num_queued);
+#endif
+}
+
+/**
+ * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
+ * @dev_queue: pointer to transmit queue
+ *
+ * BQL enabled drivers might use this helper in their TX completion path,
+ * to give appropriate hint to the cpu.
+ */
+static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
+{
+#ifdef CONFIG_BQL
+ prefetchw(&dev_queue->dql.limit);
+#endif
+}
+
+static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
+ unsigned int bytes)
+{
+#ifdef CONFIG_BQL
+ dql_queued(&dev_queue->dql, bytes);
+
+ if (likely(dql_avail(&dev_queue->dql) >= 0))
+ return;
+
+ set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
+
+ /*
+ * The XOFF flag must be set before checking the dql_avail below,
+ * because in netdev_tx_completed_queue we update the dql_completed
+ * before checking the XOFF flag.
+ */
+ smp_mb();
+
+ /* check again in case another CPU has just made room avail */
+ if (unlikely(dql_avail(&dev_queue->dql) >= 0))
+ clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
+#endif
+}
+
+/**
+ * netdev_sent_queue - report the number of bytes queued to hardware
+ * @dev: network device
+ * @bytes: number of bytes queued to the hardware device queue
+ *
+ * Report the number of bytes queued for sending/completion to the network
+ * device hardware queue. @bytes should be a good approximation and should
+ * exactly match netdev_completed_queue() @bytes
+ */
+static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
+{
+ netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
+}
+
+static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
+ unsigned int pkts, unsigned int bytes)
+{
+#ifdef CONFIG_BQL
+ if (unlikely(!bytes))
+ return;
+
+ dql_completed(&dev_queue->dql, bytes);
+
+ /*
+ * Without the memory barrier there is a small possiblity that
+ * netdev_tx_sent_queue will miss the update and cause the queue to
+ * be stopped forever
+ */
+ smp_mb();
+
+ if (dql_avail(&dev_queue->dql) < 0)
+ return;
+
+ if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
+ netif_schedule_queue(dev_queue);
+#endif
+}
+
+/**
+ * netdev_completed_queue - report bytes and packets completed by device
+ * @dev: network device
+ * @pkts: actual number of packets sent over the medium
+ * @bytes: actual number of bytes sent over the medium
+ *
+ * Report the number of bytes and packets transmitted by the network device
+ * hardware queue over the physical medium, @bytes must exactly match the
+ * @bytes amount passed to netdev_sent_queue()
+ */
+static inline void netdev_completed_queue(struct net_device *dev,
+ unsigned int pkts, unsigned int bytes)
+{
+ netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
+}
+
+static inline void netdev_tx_reset_queue(struct netdev_queue *q)
+{
+#ifdef CONFIG_BQL
+ clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
+ dql_reset(&q->dql);
+#endif
+}
+
+/**
+ * netdev_reset_queue - reset the packets and bytes count of a network device
+ * @dev_queue: network device
+ *
+ * Reset the bytes and packet count of a network device and clear the
+ * software flow control OFF bit for this network device
+ */
+static inline void netdev_reset_queue(struct net_device *dev_queue)
+{
+ netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
+}
+
+/**
+ * netdev_cap_txqueue - check if selected tx queue exceeds device queues
+ * @dev: network device
+ * @queue_index: given tx queue index
+ *
+ * Returns 0 if given tx queue index >= number of device tx queues,
+ * otherwise returns the originally passed tx queue index.
+ */
+static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
+{
+ if (unlikely(queue_index >= dev->real_num_tx_queues)) {
+ net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
+ dev->name, queue_index,
+ dev->real_num_tx_queues);
+ return 0;
+ }
+
+ return queue_index;
+}
+
+/**
+ * netif_running - test if up
+ * @dev: network device
+ *
+ * Test if the device has been brought up.
+ */
+static inline bool netif_running(const struct net_device *dev)
+{
+ return test_bit(__LINK_STATE_START, &dev->state);
+}
+
+/*
+ * Routines to manage the subqueues on a device. We only need start
+ * stop, and a check if it's stopped. All other device management is
+ * done at the overall netdevice level.
+ * Also test the device if we're multiqueue.
+ */
+
+/**
+ * netif_start_subqueue - allow sending packets on subqueue
+ * @dev: network device
+ * @queue_index: sub queue index
+ *
+ * Start individual transmit queue of a device with multiple transmit queues.
+ */
+static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
+{
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
+
+ netif_tx_start_queue(txq);
+}
+
+/**
+ * netif_stop_subqueue - stop sending packets on subqueue
+ * @dev: network device
+ * @queue_index: sub queue index
+ *
+ * Stop individual transmit queue of a device with multiple transmit queues.
+ */
+static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
+{
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
+ netif_tx_stop_queue(txq);
+}
+
+/**
+ * netif_subqueue_stopped - test status of subqueue
+ * @dev: network device
+ * @queue_index: sub queue index
+ *
+ * Check individual transmit queue of a device with multiple transmit queues.
+ */
+static inline bool __netif_subqueue_stopped(const struct net_device *dev,
+ u16 queue_index)
+{
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
+
+ return netif_tx_queue_stopped(txq);
+}
+
+static inline bool netif_subqueue_stopped(const struct net_device *dev,
+ struct sk_buff *skb)
+{
+ return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
+}
+
+void netif_wake_subqueue(struct net_device *dev, u16 queue_index);
+
+#ifdef CONFIG_XPS
+int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
+ u16 index);
+#else
+static inline int netif_set_xps_queue(struct net_device *dev,
+ const struct cpumask *mask,
+ u16 index)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
+ * as a distribution range limit for the returned value.
+ */
+static inline u16 skb_tx_hash(const struct net_device *dev,
+ struct sk_buff *skb)
+{
+ return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
+}
+
+/**
+ * netif_is_multiqueue - test if device has multiple transmit queues
+ * @dev: network device
+ *
+ * Check if device has multiple transmit queues
+ */
+static inline bool netif_is_multiqueue(const struct net_device *dev)
+{
+ return dev->num_tx_queues > 1;
+}
+
+int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
+
+#ifdef CONFIG_SYSFS
+int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
+#else
+static inline int netif_set_real_num_rx_queues(struct net_device *dev,
+ unsigned int rxq)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_SYSFS
+static inline unsigned int get_netdev_rx_queue_index(
+ struct netdev_rx_queue *queue)
+{
+ struct net_device *dev = queue->dev;
+ int index = queue - dev->_rx;
+
+ BUG_ON(index >= dev->num_rx_queues);
+ return index;
+}
+#endif
+
+#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
+int netif_get_num_default_rss_queues(void);
+
+enum skb_free_reason {
+ SKB_REASON_CONSUMED,
+ SKB_REASON_DROPPED,
+};
+
+void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
+void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
+
+/*
+ * It is not allowed to call kfree_skb() or consume_skb() from hardware
+ * interrupt context or with hardware interrupts being disabled.
+ * (in_irq() || irqs_disabled())
+ *
+ * We provide four helpers that can be used in following contexts :
+ *
+ * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
+ * replacing kfree_skb(skb)
+ *
+ * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
+ * Typically used in place of consume_skb(skb) in TX completion path
+ *
+ * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
+ * replacing kfree_skb(skb)
+ *
+ * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
+ * and consumed a packet. Used in place of consume_skb(skb)
+ */
+static inline void dev_kfree_skb_irq(struct sk_buff *skb)
+{
+ __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
+}
+
+static inline void dev_consume_skb_irq(struct sk_buff *skb)
+{
+ __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
+}
+
+static inline void dev_kfree_skb_any(struct sk_buff *skb)
+{
+ __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
+}
+
+static inline void dev_consume_skb_any(struct sk_buff *skb)
+{
+ __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
+}
+
+int netif_rx(struct sk_buff *skb);
+int netif_rx_ni(struct sk_buff *skb);
+int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb);
+static inline int netif_receive_skb(struct sk_buff *skb)
+{
+ return netif_receive_skb_sk(skb->sk, skb);
+}
+gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
+void napi_gro_flush(struct napi_struct *napi, bool flush_old);
+struct sk_buff *napi_get_frags(struct napi_struct *napi);
+gro_result_t napi_gro_frags(struct napi_struct *napi);
+struct packet_offload *gro_find_receive_by_type(__be16 type);
+struct packet_offload *gro_find_complete_by_type(__be16 type);
+
+static inline void napi_free_frags(struct napi_struct *napi)
+{
+ kfree_skb(napi->skb);
+ napi->skb = NULL;
+}
+
+int netdev_rx_handler_register(struct net_device *dev,
+ rx_handler_func_t *rx_handler,
+ void *rx_handler_data);
+void netdev_rx_handler_unregister(struct net_device *dev);
+
+bool dev_valid_name(const char *name);
+int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
+int dev_ethtool(struct net *net, struct ifreq *);
+unsigned int dev_get_flags(const struct net_device *);
+int __dev_change_flags(struct net_device *, unsigned int flags);
+int dev_change_flags(struct net_device *, unsigned int);
+void __dev_notify_flags(struct net_device *, unsigned int old_flags,
+ unsigned int gchanges);
+int dev_change_name(struct net_device *, const char *);
+int dev_set_alias(struct net_device *, const char *, size_t);
+int dev_change_net_namespace(struct net_device *, struct net *, const char *);
+int dev_set_mtu(struct net_device *, int);
+void dev_set_group(struct net_device *, int);
+int dev_set_mac_address(struct net_device *, struct sockaddr *);
+int dev_change_carrier(struct net_device *, bool new_carrier);
+int dev_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid);
+int dev_get_phys_port_name(struct net_device *dev,
+ char *name, size_t len);
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
+struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
+ struct netdev_queue *txq, int *ret);
+int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
+int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
+bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
+
+extern int netdev_budget;
+
+/* Called by rtnetlink.c:rtnl_unlock() */
+void netdev_run_todo(void);
+
+/**
+ * dev_put - release reference to device
+ * @dev: network device
+ *
+ * Release reference to device to allow it to be freed.
+ */
+static inline void dev_put(struct net_device *dev)
+{
+ this_cpu_dec(*dev->pcpu_refcnt);
+}
+
+/**
+ * dev_hold - get reference to device
+ * @dev: network device
+ *
+ * Hold reference to device to keep it from being freed.
+ */
+static inline void dev_hold(struct net_device *dev)
+{
+ this_cpu_inc(*dev->pcpu_refcnt);
+}
+
+/* Carrier loss detection, dial on demand. The functions netif_carrier_on
+ * and _off may be called from IRQ context, but it is caller
+ * who is responsible for serialization of these calls.
+ *
+ * The name carrier is inappropriate, these functions should really be
+ * called netif_lowerlayer_*() because they represent the state of any
+ * kind of lower layer not just hardware media.
+ */
+
+void linkwatch_init_dev(struct net_device *dev);
+void linkwatch_fire_event(struct net_device *dev);
+void linkwatch_forget_dev(struct net_device *dev);
+
+/**
+ * netif_carrier_ok - test if carrier present
+ * @dev: network device
+ *
+ * Check if carrier is present on device
+ */
+static inline bool netif_carrier_ok(const struct net_device *dev)
+{
+ return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
+}
+
+unsigned long dev_trans_start(struct net_device *dev);
+
+void __netdev_watchdog_up(struct net_device *dev);
+
+void netif_carrier_on(struct net_device *dev);
+
+void netif_carrier_off(struct net_device *dev);
+
+/**
+ * netif_dormant_on - mark device as dormant.
+ * @dev: network device
+ *
+ * Mark device as dormant (as per RFC2863).
+ *
+ * The dormant state indicates that the relevant interface is not
+ * actually in a condition to pass packets (i.e., it is not 'up') but is
+ * in a "pending" state, waiting for some external event. For "on-
+ * demand" interfaces, this new state identifies the situation where the
+ * interface is waiting for events to place it in the up state.
+ *
+ */
+static inline void netif_dormant_on(struct net_device *dev)
+{
+ if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
+ linkwatch_fire_event(dev);
+}
+
+/**
+ * netif_dormant_off - set device as not dormant.
+ * @dev: network device
+ *
+ * Device is not in dormant state.
+ */
+static inline void netif_dormant_off(struct net_device *dev)
+{
+ if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
+ linkwatch_fire_event(dev);
+}
+
+/**
+ * netif_dormant - test if carrier present
+ * @dev: network device
+ *
+ * Check if carrier is present on device
+ */
+static inline bool netif_dormant(const struct net_device *dev)
+{
+ return test_bit(__LINK_STATE_DORMANT, &dev->state);
+}
+
+
+/**
+ * netif_oper_up - test if device is operational
+ * @dev: network device
+ *
+ * Check if carrier is operational
+ */
+static inline bool netif_oper_up(const struct net_device *dev)
+{
+ return (dev->operstate == IF_OPER_UP ||
+ dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
+}
+
+/**
+ * netif_device_present - is device available or removed
+ * @dev: network device
+ *
+ * Check if device has not been removed from system.
+ */
+static inline bool netif_device_present(struct net_device *dev)
+{
+ return test_bit(__LINK_STATE_PRESENT, &dev->state);
+}
+
+void netif_device_detach(struct net_device *dev);
+
+void netif_device_attach(struct net_device *dev);
+
+/*
+ * Network interface message level settings
+ */
+
+enum {
+ NETIF_MSG_DRV = 0x0001,
+ NETIF_MSG_PROBE = 0x0002,
+ NETIF_MSG_LINK = 0x0004,
+ NETIF_MSG_TIMER = 0x0008,
+ NETIF_MSG_IFDOWN = 0x0010,
+ NETIF_MSG_IFUP = 0x0020,
+ NETIF_MSG_RX_ERR = 0x0040,
+ NETIF_MSG_TX_ERR = 0x0080,
+ NETIF_MSG_TX_QUEUED = 0x0100,
+ NETIF_MSG_INTR = 0x0200,
+ NETIF_MSG_TX_DONE = 0x0400,
+ NETIF_MSG_RX_STATUS = 0x0800,
+ NETIF_MSG_PKTDATA = 0x1000,
+ NETIF_MSG_HW = 0x2000,
+ NETIF_MSG_WOL = 0x4000,
+};
+
+#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
+#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
+#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
+#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
+#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
+#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
+#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
+#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
+#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
+#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
+#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
+#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
+#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
+#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
+#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
+
+static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
+{
+ /* use default */
+ if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
+ return default_msg_enable_bits;
+ if (debug_value == 0) /* no output */
+ return 0;
+ /* set low N bits */
+ return (1 << debug_value) - 1;
+}
+
+static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
+{
+ spin_lock(&txq->_xmit_lock);
+ txq->xmit_lock_owner = cpu;
+}
+
+static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
+{
+ spin_lock_bh(&txq->_xmit_lock);
+ txq->xmit_lock_owner = smp_processor_id();
+}
+
+static inline bool __netif_tx_trylock(struct netdev_queue *txq)
+{
+ bool ok = spin_trylock(&txq->_xmit_lock);
+ if (likely(ok))
+ txq->xmit_lock_owner = smp_processor_id();
+ return ok;
+}
+
+static inline void __netif_tx_unlock(struct netdev_queue *txq)
+{
+ txq->xmit_lock_owner = -1;
+ spin_unlock(&txq->_xmit_lock);
+}
+
+static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
+{
+ txq->xmit_lock_owner = -1;
+ spin_unlock_bh(&txq->_xmit_lock);
+}
+
+static inline void txq_trans_update(struct netdev_queue *txq)
+{
+ if (txq->xmit_lock_owner != -1)
+ txq->trans_start = jiffies;
+}
+
+/**
+ * netif_tx_lock - grab network device transmit lock
+ * @dev: network device
+ *
+ * Get network device transmit lock
+ */
+static inline void netif_tx_lock(struct net_device *dev)
+{
+ unsigned int i;
+ int cpu;
+
+ spin_lock(&dev->tx_global_lock);
+ cpu = smp_processor_id();
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+ /* We are the only thread of execution doing a
+ * freeze, but we have to grab the _xmit_lock in
+ * order to synchronize with threads which are in
+ * the ->hard_start_xmit() handler and already
+ * checked the frozen bit.
+ */
+ __netif_tx_lock(txq, cpu);
+ set_bit(__QUEUE_STATE_FROZEN, &txq->state);
+ __netif_tx_unlock(txq);
+ }
+}
+
+static inline void netif_tx_lock_bh(struct net_device *dev)
+{
+ local_bh_disable();
+ netif_tx_lock(dev);
+}
+
+static inline void netif_tx_unlock(struct net_device *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+ /* No need to grab the _xmit_lock here. If the
+ * queue is not stopped for another reason, we
+ * force a schedule.
+ */
+ clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
+ netif_schedule_queue(txq);
+ }
+ spin_unlock(&dev->tx_global_lock);
+}
+
+static inline void netif_tx_unlock_bh(struct net_device *dev)
+{
+ netif_tx_unlock(dev);
+ local_bh_enable();
+}
+
+#define HARD_TX_LOCK(dev, txq, cpu) { \
+ if ((dev->features & NETIF_F_LLTX) == 0) { \
+ __netif_tx_lock(txq, cpu); \
+ } \
+}
+
+#define HARD_TX_TRYLOCK(dev, txq) \
+ (((dev->features & NETIF_F_LLTX) == 0) ? \
+ __netif_tx_trylock(txq) : \
+ true )
+
+#define HARD_TX_UNLOCK(dev, txq) { \
+ if ((dev->features & NETIF_F_LLTX) == 0) { \
+ __netif_tx_unlock(txq); \
+ } \
+}
+
+static inline void netif_tx_disable(struct net_device *dev)
+{
+ unsigned int i;
+ int cpu;
+
+ local_bh_disable();
+ cpu = smp_processor_id();
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+ __netif_tx_lock(txq, cpu);
+ netif_tx_stop_queue(txq);
+ __netif_tx_unlock(txq);
+ }
+ local_bh_enable();
+}
+
+static inline void netif_addr_lock(struct net_device *dev)
+{
+ spin_lock(&dev->addr_list_lock);
+}
+
+static inline void netif_addr_lock_nested(struct net_device *dev)
+{
+ int subclass = SINGLE_DEPTH_NESTING;
+
+ if (dev->netdev_ops->ndo_get_lock_subclass)
+ subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
+
+ spin_lock_nested(&dev->addr_list_lock, subclass);
+}
+
+static inline void netif_addr_lock_bh(struct net_device *dev)
+{
+ spin_lock_bh(&dev->addr_list_lock);
+}
+
+static inline void netif_addr_unlock(struct net_device *dev)
+{
+ spin_unlock(&dev->addr_list_lock);
+}
+
+static inline void netif_addr_unlock_bh(struct net_device *dev)
+{
+ spin_unlock_bh(&dev->addr_list_lock);
+}
+
+/*
+ * dev_addrs walker. Should be used only for read access. Call with
+ * rcu_read_lock held.
+ */
+#define for_each_dev_addr(dev, ha) \
+ list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
+
+/* These functions live elsewhere (drivers/net/net_init.c, but related) */
+
+void ether_setup(struct net_device *dev);
+
+/* Support for loadable net-drivers */
+struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+ unsigned char name_assign_type,
+ void (*setup)(struct net_device *),
+ unsigned int txqs, unsigned int rxqs);
+#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
+ alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
+
+#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
+ alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
+ count)
+
+int register_netdev(struct net_device *dev);
+void unregister_netdev(struct net_device *dev);
+
+/* General hardware address lists handling functions */
+int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list, int addr_len);
+void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list, int addr_len);
+int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
+ struct net_device *dev,
+ int (*sync)(struct net_device *, const unsigned char *),
+ int (*unsync)(struct net_device *,
+ const unsigned char *));
+void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
+ struct net_device *dev,
+ int (*unsync)(struct net_device *,
+ const unsigned char *));
+void __hw_addr_init(struct netdev_hw_addr_list *list);
+
+/* Functions used for device addresses handling */
+int dev_addr_add(struct net_device *dev, const unsigned char *addr,
+ unsigned char addr_type);
+int dev_addr_del(struct net_device *dev, const unsigned char *addr,
+ unsigned char addr_type);
+void dev_addr_flush(struct net_device *dev);
+int dev_addr_init(struct net_device *dev);
+
+/* Functions used for unicast addresses handling */
+int dev_uc_add(struct net_device *dev, const unsigned char *addr);
+int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
+int dev_uc_del(struct net_device *dev, const unsigned char *addr);
+int dev_uc_sync(struct net_device *to, struct net_device *from);
+int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
+void dev_uc_unsync(struct net_device *to, struct net_device *from);
+void dev_uc_flush(struct net_device *dev);
+void dev_uc_init(struct net_device *dev);
+
+/**
+ * __dev_uc_sync - Synchonize device's unicast list
+ * @dev: device to sync
+ * @sync: function to call if address should be added
+ * @unsync: function to call if address should be removed
+ *
+ * Add newly added addresses to the interface, and release
+ * addresses that have been deleted.
+ **/
+static inline int __dev_uc_sync(struct net_device *dev,
+ int (*sync)(struct net_device *,
+ const unsigned char *),
+ int (*unsync)(struct net_device *,
+ const unsigned char *))
+{
+ return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync);
+}
+
+/**
+ * __dev_uc_unsync - Remove synchronized addresses from device
+ * @dev: device to sync
+ * @unsync: function to call if address should be removed
+ *
+ * Remove all addresses that were added to the device by dev_uc_sync().
+ **/
+static inline void __dev_uc_unsync(struct net_device *dev,
+ int (*unsync)(struct net_device *,
+ const unsigned char *))
+{
+ __hw_addr_unsync_dev(&dev->uc, dev, unsync);
+}
+
+/* Functions used for multicast addresses handling */
+int dev_mc_add(struct net_device *dev, const unsigned char *addr);
+int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
+int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
+int dev_mc_del(struct net_device *dev, const unsigned char *addr);
+int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
+int dev_mc_sync(struct net_device *to, struct net_device *from);
+int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
+void dev_mc_unsync(struct net_device *to, struct net_device *from);
+void dev_mc_flush(struct net_device *dev);
+void dev_mc_init(struct net_device *dev);
+
+/**
+ * __dev_mc_sync - Synchonize device's multicast list
+ * @dev: device to sync
+ * @sync: function to call if address should be added
+ * @unsync: function to call if address should be removed
+ *
+ * Add newly added addresses to the interface, and release
+ * addresses that have been deleted.
+ **/
+static inline int __dev_mc_sync(struct net_device *dev,
+ int (*sync)(struct net_device *,
+ const unsigned char *),
+ int (*unsync)(struct net_device *,
+ const unsigned char *))
+{
+ return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync);
+}
+
+/**
+ * __dev_mc_unsync - Remove synchronized addresses from device
+ * @dev: device to sync
+ * @unsync: function to call if address should be removed
+ *
+ * Remove all addresses that were added to the device by dev_mc_sync().
+ **/
+static inline void __dev_mc_unsync(struct net_device *dev,
+ int (*unsync)(struct net_device *,
+ const unsigned char *))
+{
+ __hw_addr_unsync_dev(&dev->mc, dev, unsync);
+}
+
+/* Functions used for secondary unicast and multicast support */
+void dev_set_rx_mode(struct net_device *dev);
+void __dev_set_rx_mode(struct net_device *dev);
+int dev_set_promiscuity(struct net_device *dev, int inc);
+int dev_set_allmulti(struct net_device *dev, int inc);
+void netdev_state_change(struct net_device *dev);
+void netdev_notify_peers(struct net_device *dev);
+void netdev_features_change(struct net_device *dev);
+/* Load a device via the kmod */
+void dev_load(struct net *net, const char *name);
+struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *storage);
+void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
+ const struct net_device_stats *netdev_stats);
+
+extern int netdev_max_backlog;
+extern int netdev_tstamp_prequeue;
+extern int weight_p;
+extern int bpf_jit_enable;
+
+bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
+struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
+ struct list_head **iter);
+struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
+ struct list_head **iter);
+
+/* iterate through upper list, must be called under RCU read lock */
+#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \
+ for (iter = &(dev)->adj_list.upper, \
+ updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
+ updev; \
+ updev = netdev_upper_get_next_dev_rcu(dev, &(iter)))
+
+/* iterate through upper list, must be called under RCU read lock */
+#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
+ for (iter = &(dev)->all_adj_list.upper, \
+ updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
+ updev; \
+ updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
+
+void *netdev_lower_get_next_private(struct net_device *dev,
+ struct list_head **iter);
+void *netdev_lower_get_next_private_rcu(struct net_device *dev,
+ struct list_head **iter);
+
+#define netdev_for_each_lower_private(dev, priv, iter) \
+ for (iter = (dev)->adj_list.lower.next, \
+ priv = netdev_lower_get_next_private(dev, &(iter)); \
+ priv; \
+ priv = netdev_lower_get_next_private(dev, &(iter)))
+
+#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
+ for (iter = &(dev)->adj_list.lower, \
+ priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
+ priv; \
+ priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
+
+void *netdev_lower_get_next(struct net_device *dev,
+ struct list_head **iter);
+#define netdev_for_each_lower_dev(dev, ldev, iter) \
+ for (iter = &(dev)->adj_list.lower, \
+ ldev = netdev_lower_get_next(dev, &(iter)); \
+ ldev; \
+ ldev = netdev_lower_get_next(dev, &(iter)))
+
+void *netdev_adjacent_get_private(struct list_head *adj_list);
+void *netdev_lower_get_first_private_rcu(struct net_device *dev);
+struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
+struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
+int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
+int netdev_master_upper_dev_link(struct net_device *dev,
+ struct net_device *upper_dev);
+int netdev_master_upper_dev_link_private(struct net_device *dev,
+ struct net_device *upper_dev,
+ void *private);
+void netdev_upper_dev_unlink(struct net_device *dev,
+ struct net_device *upper_dev);
+void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
+void *netdev_lower_dev_get_private(struct net_device *dev,
+ struct net_device *lower_dev);
+
+/* RSS keys are 40 or 52 bytes long */
+#define NETDEV_RSS_KEY_LEN 52
+extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
+void netdev_rss_key_fill(void *buffer, size_t len);
+
+int dev_get_nest_level(struct net_device *dev,
+ bool (*type_check)(struct net_device *dev));
+int skb_checksum_help(struct sk_buff *skb);
+struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+ netdev_features_t features, bool tx_path);
+struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
+ netdev_features_t features);
+
+struct netdev_bonding_info {
+ ifslave slave;
+ ifbond master;
+};
+
+struct netdev_notifier_bonding_info {
+ struct netdev_notifier_info info; /* must be first */
+ struct netdev_bonding_info bonding_info;
+};
+
+void netdev_bonding_info_change(struct net_device *dev,
+ struct netdev_bonding_info *bonding_info);
+
+static inline
+struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
+{
+ return __skb_gso_segment(skb, features, true);
+}
+__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
+
+static inline bool can_checksum_protocol(netdev_features_t features,
+ __be16 protocol)
+{
+ return ((features & NETIF_F_GEN_CSUM) ||
+ ((features & NETIF_F_V4_CSUM) &&
+ protocol == htons(ETH_P_IP)) ||
+ ((features & NETIF_F_V6_CSUM) &&
+ protocol == htons(ETH_P_IPV6)) ||
+ ((features & NETIF_F_FCOE_CRC) &&
+ protocol == htons(ETH_P_FCOE)));
+}
+
+#ifdef CONFIG_BUG
+void netdev_rx_csum_fault(struct net_device *dev);
+#else
+static inline void netdev_rx_csum_fault(struct net_device *dev)
+{
+}
+#endif
+/* rx skb timestamps */
+void net_enable_timestamp(void);
+void net_disable_timestamp(void);
+
+#ifdef CONFIG_PROC_FS
+int __init dev_proc_init(void);
+#else
+#define dev_proc_init() 0
+#endif
+
+static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
+ struct sk_buff *skb, struct net_device *dev,
+ bool more)
+{
+ skb->xmit_more = more ? 1 : 0;
+ return ops->ndo_start_xmit(skb, dev);
+}
+
+static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
+ struct netdev_queue *txq, bool more)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ int rc;
+
+ rc = __netdev_start_xmit(ops, skb, dev, more);
+ if (rc == NETDEV_TX_OK)
+ txq_trans_update(txq);
+
+ return rc;
+}
+
+int netdev_class_create_file_ns(struct class_attribute *class_attr,
+ const void *ns);
+void netdev_class_remove_file_ns(struct class_attribute *class_attr,
+ const void *ns);
+
+static inline int netdev_class_create_file(struct class_attribute *class_attr)
+{
+ return netdev_class_create_file_ns(class_attr, NULL);
+}
+
+static inline void netdev_class_remove_file(struct class_attribute *class_attr)
+{
+ netdev_class_remove_file_ns(class_attr, NULL);
+}
+
+extern struct kobj_ns_type_operations net_ns_type_operations;
+
+const char *netdev_drivername(const struct net_device *dev);
+
+void linkwatch_run_queue(void);
+
+static inline netdev_features_t netdev_intersect_features(netdev_features_t f1,
+ netdev_features_t f2)
+{
+ if (f1 & NETIF_F_GEN_CSUM)
+ f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
+ if (f2 & NETIF_F_GEN_CSUM)
+ f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
+ f1 &= f2;
+ if (f1 & NETIF_F_GEN_CSUM)
+ f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
+
+ return f1;
+}
+
+static inline netdev_features_t netdev_get_wanted_features(
+ struct net_device *dev)
+{
+ return (dev->features & ~dev->hw_features) | dev->wanted_features;
+}
+netdev_features_t netdev_increment_features(netdev_features_t all,
+ netdev_features_t one, netdev_features_t mask);
+
+/* Allow TSO being used on stacked device :
+ * Performing the GSO segmentation before last device
+ * is a performance improvement.
+ */
+static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
+ netdev_features_t mask)
+{
+ return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
+}
+
+int __netdev_update_features(struct net_device *dev);
+void netdev_update_features(struct net_device *dev);
+void netdev_change_features(struct net_device *dev);
+
+void netif_stacked_transfer_operstate(const struct net_device *rootdev,
+ struct net_device *dev);
+
+netdev_features_t passthru_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
+netdev_features_t netif_skb_features(struct sk_buff *skb);
+
+static inline bool net_gso_ok(netdev_features_t features, int gso_type)
+{
+ netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
+
+ /* check flags correspondence */
+ BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_IPIP != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
+
+ return (features & feature) == feature;
+}
+
+static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
+{
+ return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
+ (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
+}
+
+static inline bool netif_needs_gso(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
+ unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
+ (skb->ip_summed != CHECKSUM_UNNECESSARY)));
+}
+
+static inline void netif_set_gso_max_size(struct net_device *dev,
+ unsigned int size)
+{
+ dev->gso_max_size = size;
+}
+
+static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
+ int pulled_hlen, u16 mac_offset,
+ int mac_len)
+{
+ skb->protocol = protocol;
+ skb->encapsulation = 1;
+ skb_push(skb, pulled_hlen);
+ skb_reset_transport_header(skb);
+ skb->mac_header = mac_offset;
+ skb->network_header = skb->mac_header + mac_len;
+ skb->mac_len = mac_len;
+}
+
+static inline bool netif_is_macvlan(struct net_device *dev)
+{
+ return dev->priv_flags & IFF_MACVLAN;
+}
+
+static inline bool netif_is_macvlan_port(struct net_device *dev)
+{
+ return dev->priv_flags & IFF_MACVLAN_PORT;
+}
+
+static inline bool netif_is_ipvlan(struct net_device *dev)
+{
+ return dev->priv_flags & IFF_IPVLAN_SLAVE;
+}
+
+static inline bool netif_is_ipvlan_port(struct net_device *dev)
+{
+ return dev->priv_flags & IFF_IPVLAN_MASTER;
+}
+
+static inline bool netif_is_bond_master(struct net_device *dev)
+{
+ return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
+}
+
+static inline bool netif_is_bond_slave(struct net_device *dev)
+{
+ return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
+}
+
+static inline bool netif_supports_nofcs(struct net_device *dev)
+{
+ return dev->priv_flags & IFF_SUPP_NOFCS;
+}
+
+/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
+static inline void netif_keep_dst(struct net_device *dev)
+{
+ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
+}
+
+extern struct pernet_operations __net_initdata loopback_net_ops;
+
+/* Logging, debugging and troubleshooting/diagnostic helpers. */
+
+/* netdev_printk helpers, similar to dev_printk */
+
+static inline const char *netdev_name(const struct net_device *dev)
+{
+ if (!dev->name[0] || strchr(dev->name, '%'))
+ return "(unnamed net_device)";
+ return dev->name;
+}
+
+static inline const char *netdev_reg_state(const struct net_device *dev)
+{
+ switch (dev->reg_state) {
+ case NETREG_UNINITIALIZED: return " (uninitialized)";
+ case NETREG_REGISTERED: return "";
+ case NETREG_UNREGISTERING: return " (unregistering)";
+ case NETREG_UNREGISTERED: return " (unregistered)";
+ case NETREG_RELEASED: return " (released)";
+ case NETREG_DUMMY: return " (dummy)";
+ }
+
+ WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
+ return " (unknown)";
+}
+
+__printf(3, 4)
+void netdev_printk(const char *level, const struct net_device *dev,
+ const char *format, ...);
+__printf(2, 3)
+void netdev_emerg(const struct net_device *dev, const char *format, ...);
+__printf(2, 3)
+void netdev_alert(const struct net_device *dev, const char *format, ...);
+__printf(2, 3)
+void netdev_crit(const struct net_device *dev, const char *format, ...);
+__printf(2, 3)
+void netdev_err(const struct net_device *dev, const char *format, ...);
+__printf(2, 3)
+void netdev_warn(const struct net_device *dev, const char *format, ...);
+__printf(2, 3)
+void netdev_notice(const struct net_device *dev, const char *format, ...);
+__printf(2, 3)
+void netdev_info(const struct net_device *dev, const char *format, ...);
+
+#define MODULE_ALIAS_NETDEV(device) \
+ MODULE_ALIAS("netdev-" device)
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define netdev_dbg(__dev, format, args...) \
+do { \
+ dynamic_netdev_dbg(__dev, format, ##args); \
+} while (0)
+#elif defined(DEBUG)
+#define netdev_dbg(__dev, format, args...) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args)
+#else
+#define netdev_dbg(__dev, format, args...) \
+({ \
+ if (0) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args); \
+})
+#endif
+
+#if defined(VERBOSE_DEBUG)
+#define netdev_vdbg netdev_dbg
+#else
+
+#define netdev_vdbg(dev, format, args...) \
+({ \
+ if (0) \
+ netdev_printk(KERN_DEBUG, dev, format, ##args); \
+ 0; \
+})
+#endif
+
+/*
+ * netdev_WARN() acts like dev_printk(), but with the key difference
+ * of using a WARN/WARN_ON to get the message out, including the
+ * file/line information and a backtrace.
+ */
+#define netdev_WARN(dev, format, args...) \
+ WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \
+ netdev_reg_state(dev), ##args)
+
+/* netif printk helpers, similar to netdev_printk */
+
+#define netif_printk(priv, type, level, dev, fmt, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ netdev_printk(level, (dev), fmt, ##args); \
+} while (0)
+
+#define netif_level(level, priv, type, dev, fmt, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ netdev_##level(dev, fmt, ##args); \
+} while (0)
+
+#define netif_emerg(priv, type, dev, fmt, args...) \
+ netif_level(emerg, priv, type, dev, fmt, ##args)
+#define netif_alert(priv, type, dev, fmt, args...) \
+ netif_level(alert, priv, type, dev, fmt, ##args)
+#define netif_crit(priv, type, dev, fmt, args...) \
+ netif_level(crit, priv, type, dev, fmt, ##args)
+#define netif_err(priv, type, dev, fmt, args...) \
+ netif_level(err, priv, type, dev, fmt, ##args)
+#define netif_warn(priv, type, dev, fmt, args...) \
+ netif_level(warn, priv, type, dev, fmt, ##args)
+#define netif_notice(priv, type, dev, fmt, args...) \
+ netif_level(notice, priv, type, dev, fmt, ##args)
+#define netif_info(priv, type, dev, fmt, args...) \
+ netif_level(info, priv, type, dev, fmt, ##args)
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define netif_dbg(priv, type, netdev, format, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ dynamic_netdev_dbg(netdev, format, ##args); \
+} while (0)
+#elif defined(DEBUG)
+#define netif_dbg(priv, type, dev, format, args...) \
+ netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
+#else
+#define netif_dbg(priv, type, dev, format, args...) \
+({ \
+ if (0) \
+ netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
+ 0; \
+})
+#endif
+
+#if defined(VERBOSE_DEBUG)
+#define netif_vdbg netif_dbg
+#else
+#define netif_vdbg(priv, type, dev, format, args...) \
+({ \
+ if (0) \
+ netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
+ 0; \
+})
+#endif
+
+/*
+ * The list of packet types we will receive (as opposed to discard)
+ * and the routines to invoke.
+ *
+ * Why 16. Because with 16 the only overlap we get on a hash of the
+ * low nibble of the protocol value is RARP/SNAP/X.25.
+ *
+ * NOTE: That is no longer true with the addition of VLAN tags. Not
+ * sure which should go first, but I bet it won't make much
+ * difference if we are running VLANs. The good news is that
+ * this protocol won't be in the list unless compiled in, so
+ * the average user (w/out VLANs) will not be adversely affected.
+ * --BLG
+ *
+ * 0800 IP
+ * 8100 802.1Q VLAN
+ * 0001 802.3
+ * 0002 AX.25
+ * 0004 802.2
+ * 8035 RARP
+ * 0005 SNAP
+ * 0805 X.25
+ * 0806 ARP
+ * 8137 IPX
+ * 0009 Localtalk
+ * 86DD IPv6
+ */
+#define PTYPE_HASH_SIZE (16)
+#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
+
+#endif /* _LINUX_NETDEVICE_H */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
new file mode 100644
index 000000000..63560d0a8
--- /dev/null
+++ b/include/linux/netfilter.h
@@ -0,0 +1,379 @@
+#ifndef __LINUX_NETFILTER_H
+#define __LINUX_NETFILTER_H
+
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/net.h>
+#include <linux/if.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/static_key.h>
+#include <uapi/linux/netfilter.h>
+#ifdef CONFIG_NETFILTER
+static inline int NF_DROP_GETERR(int verdict)
+{
+ return -(verdict >> NF_VERDICT_QBITS);
+}
+
+static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
+ const union nf_inet_addr *a2)
+{
+ return a1->all[0] == a2->all[0] &&
+ a1->all[1] == a2->all[1] &&
+ a1->all[2] == a2->all[2] &&
+ a1->all[3] == a2->all[3];
+}
+
+static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
+ union nf_inet_addr *result,
+ const union nf_inet_addr *mask)
+{
+ result->all[0] = a1->all[0] & mask->all[0];
+ result->all[1] = a1->all[1] & mask->all[1];
+ result->all[2] = a1->all[2] & mask->all[2];
+ result->all[3] = a1->all[3] & mask->all[3];
+}
+
+int netfilter_init(void);
+
+/* Largest hook number + 1 */
+#define NF_MAX_HOOKS 8
+
+struct sk_buff;
+
+struct nf_hook_ops;
+
+struct sock;
+
+struct nf_hook_state {
+ unsigned int hook;
+ int thresh;
+ u_int8_t pf;
+ struct net_device *in;
+ struct net_device *out;
+ struct sock *sk;
+ int (*okfn)(struct sock *, struct sk_buff *);
+};
+
+static inline void nf_hook_state_init(struct nf_hook_state *p,
+ unsigned int hook,
+ int thresh, u_int8_t pf,
+ struct net_device *indev,
+ struct net_device *outdev,
+ struct sock *sk,
+ int (*okfn)(struct sock *, struct sk_buff *))
+{
+ p->hook = hook;
+ p->thresh = thresh;
+ p->pf = pf;
+ p->in = indev;
+ p->out = outdev;
+ p->sk = sk;
+ p->okfn = okfn;
+}
+
+typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state);
+
+struct nf_hook_ops {
+ struct list_head list;
+
+ /* User fills in from here down. */
+ nf_hookfn *hook;
+ struct module *owner;
+ void *priv;
+ u_int8_t pf;
+ unsigned int hooknum;
+ /* Hooks are ordered in ascending priority. */
+ int priority;
+};
+
+struct nf_sockopt_ops {
+ struct list_head list;
+
+ u_int8_t pf;
+
+ /* Non-inclusive ranges: use 0/0/NULL to never get called. */
+ int set_optmin;
+ int set_optmax;
+ int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len);
+#ifdef CONFIG_COMPAT
+ int (*compat_set)(struct sock *sk, int optval,
+ void __user *user, unsigned int len);
+#endif
+ int get_optmin;
+ int get_optmax;
+ int (*get)(struct sock *sk, int optval, void __user *user, int *len);
+#ifdef CONFIG_COMPAT
+ int (*compat_get)(struct sock *sk, int optval,
+ void __user *user, int *len);
+#endif
+ /* Use the module struct to lock set/get code in place */
+ struct module *owner;
+};
+
+/* Function to register/unregister hook points. */
+int nf_register_hook(struct nf_hook_ops *reg);
+void nf_unregister_hook(struct nf_hook_ops *reg);
+int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
+void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
+
+/* Functions to register get/setsockopt ranges (non-inclusive). You
+ need to check permissions yourself! */
+int nf_register_sockopt(struct nf_sockopt_ops *reg);
+void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
+
+extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+
+#ifdef HAVE_JUMP_LABEL
+extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+
+static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+{
+ if (__builtin_constant_p(pf) &&
+ __builtin_constant_p(hook))
+ return static_key_false(&nf_hooks_needed[pf][hook]);
+
+ return !list_empty(&nf_hooks[pf][hook]);
+}
+#else
+static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
+{
+ return !list_empty(&nf_hooks[pf][hook]);
+}
+#endif
+
+int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
+
+/**
+ * nf_hook_thresh - call a netfilter hook
+ *
+ * Returns 1 if the hook has allowed the packet to pass. The function
+ * okfn must be invoked by the caller in this case. Any other return
+ * value indicates the packet has been consumed by the hook.
+ */
+static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
+ struct sock *sk,
+ struct sk_buff *skb,
+ struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct sock *, struct sk_buff *),
+ int thresh)
+{
+ if (nf_hooks_active(pf, hook)) {
+ struct nf_hook_state state;
+
+ nf_hook_state_init(&state, hook, thresh, pf,
+ indev, outdev, sk, okfn);
+ return nf_hook_slow(skb, &state);
+ }
+ return 1;
+}
+
+static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
+ struct sk_buff *skb, struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct sock *, struct sk_buff *))
+{
+ return nf_hook_thresh(pf, hook, sk, skb, indev, outdev, okfn, INT_MIN);
+}
+
+/* Activate hook; either okfn or kfree_skb called, unless a hook
+ returns NF_STOLEN (in which case, it's up to the hook to deal with
+ the consequences).
+
+ Returns -ERRNO if packet dropped. Zero means queued, stolen or
+ accepted.
+*/
+
+/* RR:
+ > I don't want nf_hook to return anything because people might forget
+ > about async and trust the return value to mean "packet was ok".
+
+ AK:
+ Just document it clearly, then you can expect some sense from kernel
+ coders :)
+*/
+
+static inline int
+NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sock *sk,
+ struct sk_buff *skb, struct net_device *in,
+ struct net_device *out,
+ int (*okfn)(struct sock *, struct sk_buff *), int thresh)
+{
+ int ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, thresh);
+ if (ret == 1)
+ ret = okfn(sk, skb);
+ return ret;
+}
+
+static inline int
+NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sock *sk,
+ struct sk_buff *skb, struct net_device *in, struct net_device *out,
+ int (*okfn)(struct sock *, struct sk_buff *), bool cond)
+{
+ int ret;
+
+ if (!cond ||
+ ((ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, INT_MIN)) == 1))
+ ret = okfn(sk, skb);
+ return ret;
+}
+
+static inline int
+NF_HOOK(uint8_t pf, unsigned int hook, struct sock *sk, struct sk_buff *skb,
+ struct net_device *in, struct net_device *out,
+ int (*okfn)(struct sock *, struct sk_buff *))
+{
+ return NF_HOOK_THRESH(pf, hook, sk, skb, in, out, okfn, INT_MIN);
+}
+
+/* Call setsockopt() */
+int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
+ unsigned int len);
+int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
+ int *len);
+#ifdef CONFIG_COMPAT
+int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval,
+ char __user *opt, unsigned int len);
+int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
+ char __user *opt, int *len);
+#endif
+
+/* Call this before modifying an existing packet: ensures it is
+ modifiable and linear to the point you care about (writable_len).
+ Returns true or false. */
+int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
+
+struct flowi;
+struct nf_queue_entry;
+
+struct nf_afinfo {
+ unsigned short family;
+ __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol);
+ __sum16 (*checksum_partial)(struct sk_buff *skb,
+ unsigned int hook,
+ unsigned int dataoff,
+ unsigned int len,
+ u_int8_t protocol);
+ int (*route)(struct net *net, struct dst_entry **dst,
+ struct flowi *fl, bool strict);
+ void (*saveroute)(const struct sk_buff *skb,
+ struct nf_queue_entry *entry);
+ int (*reroute)(struct sk_buff *skb,
+ const struct nf_queue_entry *entry);
+ int route_key_size;
+};
+
+extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO];
+static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family)
+{
+ return rcu_dereference(nf_afinfo[family]);
+}
+
+static inline __sum16
+nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff,
+ u_int8_t protocol, unsigned short family)
+{
+ const struct nf_afinfo *afinfo;
+ __sum16 csum = 0;
+
+ rcu_read_lock();
+ afinfo = nf_get_afinfo(family);
+ if (afinfo)
+ csum = afinfo->checksum(skb, hook, dataoff, protocol);
+ rcu_read_unlock();
+ return csum;
+}
+
+static inline __sum16
+nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, unsigned int len,
+ u_int8_t protocol, unsigned short family)
+{
+ const struct nf_afinfo *afinfo;
+ __sum16 csum = 0;
+
+ rcu_read_lock();
+ afinfo = nf_get_afinfo(family);
+ if (afinfo)
+ csum = afinfo->checksum_partial(skb, hook, dataoff, len,
+ protocol);
+ rcu_read_unlock();
+ return csum;
+}
+
+int nf_register_afinfo(const struct nf_afinfo *afinfo);
+void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
+
+#include <net/flow.h>
+extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
+
+static inline void
+nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
+{
+#ifdef CONFIG_NF_NAT_NEEDED
+ void (*decodefn)(struct sk_buff *, struct flowi *);
+
+ rcu_read_lock();
+ decodefn = rcu_dereference(nf_nat_decode_session_hook);
+ if (decodefn)
+ decodefn(skb, fl);
+ rcu_read_unlock();
+#endif
+}
+
+#else /* !CONFIG_NETFILTER */
+#define NF_HOOK(pf, hook, sk, skb, indev, outdev, okfn) (okfn)(sk, skb)
+#define NF_HOOK_COND(pf, hook, sk, skb, indev, outdev, okfn, cond) (okfn)(sk, skb)
+static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
+ struct sock *sk,
+ struct sk_buff *skb,
+ struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct sock *sk, struct sk_buff *), int thresh)
+{
+ return okfn(sk, skb);
+}
+static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk,
+ struct sk_buff *skb, struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct sock *, struct sk_buff *))
+{
+ return 1;
+}
+struct flowi;
+static inline void
+nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
+{
+}
+#endif /*CONFIG_NETFILTER*/
+
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
+void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
+extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
+
+struct nf_conn;
+enum ip_conntrack_info;
+struct nlattr;
+
+struct nfq_ct_hook {
+ size_t (*build_size)(const struct nf_conn *ct);
+ int (*build)(struct sk_buff *skb, struct nf_conn *ct);
+ int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
+ int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct,
+ u32 portid, u32 report);
+ void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo, s32 off);
+};
+extern struct nfq_ct_hook __rcu *nfq_ct_hook;
+#else
+static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
+#endif
+
+#endif /*__LINUX_NETFILTER_H*/
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
new file mode 100644
index 000000000..34b172301
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -0,0 +1,573 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _IP_SET_H
+#define _IP_SET_H
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/stringify.h>
+#include <linux/vmalloc.h>
+#include <net/netlink.h>
+#include <uapi/linux/netfilter/ipset/ip_set.h>
+
+#define _IP_SET_MODULE_DESC(a, b, c) \
+ MODULE_DESCRIPTION(a " type of IP sets, revisions " b "-" c)
+#define IP_SET_MODULE_DESC(a, b, c) \
+ _IP_SET_MODULE_DESC(a, __stringify(b), __stringify(c))
+
+/* Set features */
+enum ip_set_feature {
+ IPSET_TYPE_IP_FLAG = 0,
+ IPSET_TYPE_IP = (1 << IPSET_TYPE_IP_FLAG),
+ IPSET_TYPE_PORT_FLAG = 1,
+ IPSET_TYPE_PORT = (1 << IPSET_TYPE_PORT_FLAG),
+ IPSET_TYPE_MAC_FLAG = 2,
+ IPSET_TYPE_MAC = (1 << IPSET_TYPE_MAC_FLAG),
+ IPSET_TYPE_IP2_FLAG = 3,
+ IPSET_TYPE_IP2 = (1 << IPSET_TYPE_IP2_FLAG),
+ IPSET_TYPE_NAME_FLAG = 4,
+ IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG),
+ IPSET_TYPE_IFACE_FLAG = 5,
+ IPSET_TYPE_IFACE = (1 << IPSET_TYPE_IFACE_FLAG),
+ IPSET_TYPE_MARK_FLAG = 6,
+ IPSET_TYPE_MARK = (1 << IPSET_TYPE_MARK_FLAG),
+ IPSET_TYPE_NOMATCH_FLAG = 7,
+ IPSET_TYPE_NOMATCH = (1 << IPSET_TYPE_NOMATCH_FLAG),
+ /* Strictly speaking not a feature, but a flag for dumping:
+ * this settype must be dumped last */
+ IPSET_DUMP_LAST_FLAG = 8,
+ IPSET_DUMP_LAST = (1 << IPSET_DUMP_LAST_FLAG),
+};
+
+/* Set extensions */
+enum ip_set_extension {
+ IPSET_EXT_BIT_TIMEOUT = 0,
+ IPSET_EXT_TIMEOUT = (1 << IPSET_EXT_BIT_TIMEOUT),
+ IPSET_EXT_BIT_COUNTER = 1,
+ IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER),
+ IPSET_EXT_BIT_COMMENT = 2,
+ IPSET_EXT_COMMENT = (1 << IPSET_EXT_BIT_COMMENT),
+ IPSET_EXT_BIT_SKBINFO = 3,
+ IPSET_EXT_SKBINFO = (1 << IPSET_EXT_BIT_SKBINFO),
+ /* Mark set with an extension which needs to call destroy */
+ IPSET_EXT_BIT_DESTROY = 7,
+ IPSET_EXT_DESTROY = (1 << IPSET_EXT_BIT_DESTROY),
+};
+
+#define SET_WITH_TIMEOUT(s) ((s)->extensions & IPSET_EXT_TIMEOUT)
+#define SET_WITH_COUNTER(s) ((s)->extensions & IPSET_EXT_COUNTER)
+#define SET_WITH_COMMENT(s) ((s)->extensions & IPSET_EXT_COMMENT)
+#define SET_WITH_SKBINFO(s) ((s)->extensions & IPSET_EXT_SKBINFO)
+#define SET_WITH_FORCEADD(s) ((s)->flags & IPSET_CREATE_FLAG_FORCEADD)
+
+/* Extension id, in size order */
+enum ip_set_ext_id {
+ IPSET_EXT_ID_COUNTER = 0,
+ IPSET_EXT_ID_TIMEOUT,
+ IPSET_EXT_ID_SKBINFO,
+ IPSET_EXT_ID_COMMENT,
+ IPSET_EXT_ID_MAX,
+};
+
+/* Extension type */
+struct ip_set_ext_type {
+ /* Destroy extension private data (can be NULL) */
+ void (*destroy)(void *ext);
+ enum ip_set_extension type;
+ enum ipset_cadt_flags flag;
+ /* Size and minimal alignment */
+ u8 len;
+ u8 align;
+};
+
+extern const struct ip_set_ext_type ip_set_extensions[];
+
+struct ip_set_ext {
+ u64 packets;
+ u64 bytes;
+ u32 timeout;
+ u32 skbmark;
+ u32 skbmarkmask;
+ u32 skbprio;
+ u16 skbqueue;
+ char *comment;
+};
+
+struct ip_set_counter {
+ atomic64_t bytes;
+ atomic64_t packets;
+};
+
+struct ip_set_comment {
+ char *str;
+};
+
+struct ip_set_skbinfo {
+ u32 skbmark;
+ u32 skbmarkmask;
+ u32 skbprio;
+ u16 skbqueue;
+};
+
+struct ip_set;
+
+#define ext_timeout(e, s) \
+(unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT])
+#define ext_counter(e, s) \
+(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER])
+#define ext_comment(e, s) \
+(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT])
+#define ext_skbinfo(e, s) \
+(struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO])
+
+typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
+ const struct ip_set_ext *ext,
+ struct ip_set_ext *mext, u32 cmdflags);
+
+/* Kernel API function options */
+struct ip_set_adt_opt {
+ u8 family; /* Actual protocol family */
+ u8 dim; /* Dimension of match/target */
+ u8 flags; /* Direction and negation flags */
+ u32 cmdflags; /* Command-like flags */
+ struct ip_set_ext ext; /* Extensions */
+};
+
+/* Set type, variant-specific part */
+struct ip_set_type_variant {
+ /* Kernelspace: test/add/del entries
+ * returns negative error code,
+ * zero for no match/success to add/delete
+ * positive for matching element */
+ int (*kadt)(struct ip_set *set, const struct sk_buff *skb,
+ const struct xt_action_param *par,
+ enum ipset_adt adt, struct ip_set_adt_opt *opt);
+
+ /* Userspace: test/add/del entries
+ * returns negative error code,
+ * zero for no match/success to add/delete
+ * positive for matching element */
+ int (*uadt)(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags, bool retried);
+
+ /* Low level add/del/test functions */
+ ipset_adtfn adt[IPSET_ADT_MAX];
+
+ /* When adding entries and set is full, try to resize the set */
+ int (*resize)(struct ip_set *set, bool retried);
+ /* Destroy the set */
+ void (*destroy)(struct ip_set *set);
+ /* Flush the elements */
+ void (*flush)(struct ip_set *set);
+ /* Expire entries before listing */
+ void (*expire)(struct ip_set *set);
+ /* List set header data */
+ int (*head)(struct ip_set *set, struct sk_buff *skb);
+ /* List elements */
+ int (*list)(const struct ip_set *set, struct sk_buff *skb,
+ struct netlink_callback *cb);
+
+ /* Return true if "b" set is the same as "a"
+ * according to the create set parameters */
+ bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
+};
+
+/* The core set type structure */
+struct ip_set_type {
+ struct list_head list;
+
+ /* Typename */
+ char name[IPSET_MAXNAMELEN];
+ /* Protocol version */
+ u8 protocol;
+ /* Set type dimension */
+ u8 dimension;
+ /*
+ * Supported family: may be NFPROTO_UNSPEC for both
+ * NFPROTO_IPV4/NFPROTO_IPV6.
+ */
+ u8 family;
+ /* Type revisions */
+ u8 revision_min, revision_max;
+ /* Set features to control swapping */
+ u16 features;
+
+ /* Create set */
+ int (*create)(struct net *net, struct ip_set *set,
+ struct nlattr *tb[], u32 flags);
+
+ /* Attribute policies */
+ const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1];
+ const struct nla_policy adt_policy[IPSET_ATTR_ADT_MAX + 1];
+
+ /* Set this to THIS_MODULE if you are a module, otherwise NULL */
+ struct module *me;
+};
+
+/* register and unregister set type */
+extern int ip_set_type_register(struct ip_set_type *set_type);
+extern void ip_set_type_unregister(struct ip_set_type *set_type);
+
+/* A generic IP set */
+struct ip_set {
+ /* The name of the set */
+ char name[IPSET_MAXNAMELEN];
+ /* Lock protecting the set data */
+ rwlock_t lock;
+ /* References to the set */
+ u32 ref;
+ /* The core set type */
+ struct ip_set_type *type;
+ /* The type variant doing the real job */
+ const struct ip_set_type_variant *variant;
+ /* The actual INET family of the set */
+ u8 family;
+ /* The type revision */
+ u8 revision;
+ /* Extensions */
+ u8 extensions;
+ /* Create flags */
+ u8 flags;
+ /* Default timeout value, if enabled */
+ u32 timeout;
+ /* Element data size */
+ size_t dsize;
+ /* Offsets to extensions in elements */
+ size_t offset[IPSET_EXT_ID_MAX];
+ /* The type specific data */
+ void *data;
+};
+
+static inline void
+ip_set_ext_destroy(struct ip_set *set, void *data)
+{
+ /* Check that the extension is enabled for the set and
+ * call it's destroy function for its extension part in data.
+ */
+ if (SET_WITH_COMMENT(set))
+ ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy(
+ ext_comment(data, set));
+}
+
+static inline int
+ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
+{
+ u32 cadt_flags = 0;
+
+ if (SET_WITH_TIMEOUT(set))
+ if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(set->timeout))))
+ return -EMSGSIZE;
+ if (SET_WITH_COUNTER(set))
+ cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
+ if (SET_WITH_COMMENT(set))
+ cadt_flags |= IPSET_FLAG_WITH_COMMENT;
+ if (SET_WITH_SKBINFO(set))
+ cadt_flags |= IPSET_FLAG_WITH_SKBINFO;
+ if (SET_WITH_FORCEADD(set))
+ cadt_flags |= IPSET_FLAG_WITH_FORCEADD;
+
+ if (!cadt_flags)
+ return 0;
+ return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
+}
+
+static inline void
+ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
+{
+ atomic64_add((long long)bytes, &(counter)->bytes);
+}
+
+static inline void
+ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
+{
+ atomic64_add((long long)packets, &(counter)->packets);
+}
+
+static inline u64
+ip_set_get_bytes(const struct ip_set_counter *counter)
+{
+ return (u64)atomic64_read(&(counter)->bytes);
+}
+
+static inline u64
+ip_set_get_packets(const struct ip_set_counter *counter)
+{
+ return (u64)atomic64_read(&(counter)->packets);
+}
+
+static inline void
+ip_set_update_counter(struct ip_set_counter *counter,
+ const struct ip_set_ext *ext,
+ struct ip_set_ext *mext, u32 flags)
+{
+ if (ext->packets != ULLONG_MAX &&
+ !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
+ ip_set_add_bytes(ext->bytes, counter);
+ ip_set_add_packets(ext->packets, counter);
+ }
+ if (flags & IPSET_FLAG_MATCH_COUNTERS) {
+ mext->packets = ip_set_get_packets(counter);
+ mext->bytes = ip_set_get_bytes(counter);
+ }
+}
+
+static inline void
+ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
+ const struct ip_set_ext *ext,
+ struct ip_set_ext *mext, u32 flags)
+{
+ mext->skbmark = skbinfo->skbmark;
+ mext->skbmarkmask = skbinfo->skbmarkmask;
+ mext->skbprio = skbinfo->skbprio;
+ mext->skbqueue = skbinfo->skbqueue;
+}
+static inline bool
+ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo)
+{
+ /* Send nonzero parameters only */
+ return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
+ nla_put_net64(skb, IPSET_ATTR_SKBMARK,
+ cpu_to_be64((u64)skbinfo->skbmark << 32 |
+ skbinfo->skbmarkmask))) ||
+ (skbinfo->skbprio &&
+ nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
+ cpu_to_be32(skbinfo->skbprio))) ||
+ (skbinfo->skbqueue &&
+ nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
+ cpu_to_be16(skbinfo->skbqueue)));
+
+}
+
+static inline void
+ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
+ const struct ip_set_ext *ext)
+{
+ skbinfo->skbmark = ext->skbmark;
+ skbinfo->skbmarkmask = ext->skbmarkmask;
+ skbinfo->skbprio = ext->skbprio;
+ skbinfo->skbqueue = ext->skbqueue;
+}
+
+static inline bool
+ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter)
+{
+ return nla_put_net64(skb, IPSET_ATTR_BYTES,
+ cpu_to_be64(ip_set_get_bytes(counter))) ||
+ nla_put_net64(skb, IPSET_ATTR_PACKETS,
+ cpu_to_be64(ip_set_get_packets(counter)));
+}
+
+static inline void
+ip_set_init_counter(struct ip_set_counter *counter,
+ const struct ip_set_ext *ext)
+{
+ if (ext->bytes != ULLONG_MAX)
+ atomic64_set(&(counter)->bytes, (long long)(ext->bytes));
+ if (ext->packets != ULLONG_MAX)
+ atomic64_set(&(counter)->packets, (long long)(ext->packets));
+}
+
+/* Netlink CB args */
+enum {
+ IPSET_CB_NET = 0,
+ IPSET_CB_DUMP,
+ IPSET_CB_INDEX,
+ IPSET_CB_ARG0,
+ IPSET_CB_ARG1,
+ IPSET_CB_ARG2,
+};
+
+/* register and unregister set references */
+extern ip_set_id_t ip_set_get_byname(struct net *net,
+ const char *name, struct ip_set **set);
+extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
+extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
+extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
+extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
+
+/* API for iptables set match, and SET target */
+
+extern int ip_set_add(ip_set_id_t id, const struct sk_buff *skb,
+ const struct xt_action_param *par,
+ struct ip_set_adt_opt *opt);
+extern int ip_set_del(ip_set_id_t id, const struct sk_buff *skb,
+ const struct xt_action_param *par,
+ struct ip_set_adt_opt *opt);
+extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb,
+ const struct xt_action_param *par,
+ struct ip_set_adt_opt *opt);
+
+/* Utility functions */
+extern void *ip_set_alloc(size_t size);
+extern void ip_set_free(void *members);
+extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr);
+extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
+extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
+ size_t len);
+extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
+ struct ip_set_ext *ext);
+
+static inline int
+ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr)
+{
+ __be32 ip;
+ int ret = ip_set_get_ipaddr4(nla, &ip);
+
+ if (ret)
+ return ret;
+ *ipaddr = ntohl(ip);
+ return 0;
+}
+
+/* Ignore IPSET_ERR_EXIST errors if asked to do so? */
+static inline bool
+ip_set_eexist(int ret, u32 flags)
+{
+ return ret == -IPSET_ERR_EXIST && (flags & IPSET_FLAG_EXIST);
+}
+
+/* Match elements marked with nomatch */
+static inline bool
+ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt, struct ip_set *set)
+{
+ return adt == IPSET_TEST &&
+ (set->type->features & IPSET_TYPE_NOMATCH) &&
+ ((flags >> 16) & IPSET_FLAG_NOMATCH) &&
+ (ret > 0 || ret == -ENOTEMPTY);
+}
+
+/* Check the NLA_F_NET_BYTEORDER flag */
+static inline bool
+ip_set_attr_netorder(struct nlattr *tb[], int type)
+{
+ return tb[type] && (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
+}
+
+static inline bool
+ip_set_optattr_netorder(struct nlattr *tb[], int type)
+{
+ return !tb[type] || (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
+}
+
+/* Useful converters */
+static inline u32
+ip_set_get_h32(const struct nlattr *attr)
+{
+ return ntohl(nla_get_be32(attr));
+}
+
+static inline u16
+ip_set_get_h16(const struct nlattr *attr)
+{
+ return ntohs(nla_get_be16(attr));
+}
+
+#define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED)
+#define ipset_nest_end(skb, start) nla_nest_end(skb, start)
+
+static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr)
+{
+ struct nlattr *__nested = ipset_nest_start(skb, type);
+ int ret;
+
+ if (!__nested)
+ return -EMSGSIZE;
+ ret = nla_put_in_addr(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);
+ if (!ret)
+ ipset_nest_end(skb, __nested);
+ return ret;
+}
+
+static inline int nla_put_ipaddr6(struct sk_buff *skb, int type,
+ const struct in6_addr *ipaddrptr)
+{
+ struct nlattr *__nested = ipset_nest_start(skb, type);
+ int ret;
+
+ if (!__nested)
+ return -EMSGSIZE;
+ ret = nla_put_in6_addr(skb, IPSET_ATTR_IPADDR_IPV6, ipaddrptr);
+ if (!ret)
+ ipset_nest_end(skb, __nested);
+ return ret;
+}
+
+/* Get address from skbuff */
+static inline __be32
+ip4addr(const struct sk_buff *skb, bool src)
+{
+ return src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
+}
+
+static inline void
+ip4addrptr(const struct sk_buff *skb, bool src, __be32 *addr)
+{
+ *addr = src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
+}
+
+static inline void
+ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
+{
+ memcpy(addr, src ? &ipv6_hdr(skb)->saddr : &ipv6_hdr(skb)->daddr,
+ sizeof(*addr));
+}
+
+/* Calculate the bytes required to store the inclusive range of a-b */
+static inline int
+bitmap_bytes(u32 a, u32 b)
+{
+ return 4 * ((((b - a + 8) / 8) + 3) / 4);
+}
+
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_comment.h>
+
+static inline int
+ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
+ const void *e, bool active)
+{
+ if (SET_WITH_TIMEOUT(set)) {
+ unsigned long *timeout = ext_timeout(e, set);
+
+ if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(active ? ip_set_timeout_get(timeout)
+ : *timeout)))
+ return -EMSGSIZE;
+ }
+ if (SET_WITH_COUNTER(set) &&
+ ip_set_put_counter(skb, ext_counter(e, set)))
+ return -EMSGSIZE;
+ if (SET_WITH_COMMENT(set) &&
+ ip_set_put_comment(skb, ext_comment(e, set)))
+ return -EMSGSIZE;
+ if (SET_WITH_SKBINFO(set) &&
+ ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
+ return -EMSGSIZE;
+ return 0;
+}
+
+#define IP_SET_INIT_KEXT(skb, opt, set) \
+ { .bytes = (skb)->len, .packets = 1, \
+ .timeout = ip_set_adt_opt_timeout(opt, set) }
+
+#define IP_SET_INIT_UEXT(set) \
+ { .bytes = ULLONG_MAX, .packets = ULLONG_MAX, \
+ .timeout = (set)->timeout }
+
+#define IP_SET_INIT_CIDR(a, b) ((a) ? (a) : (b))
+
+#define IPSET_CONCAT(a, b) a##b
+#define IPSET_TOKEN(a, b) IPSET_CONCAT(a, b)
+
+#endif /*_IP_SET_H */
diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h
new file mode 100644
index 000000000..5e4662a71
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_bitmap.h
@@ -0,0 +1,28 @@
+#ifndef __IP_SET_BITMAP_H
+#define __IP_SET_BITMAP_H
+
+#include <uapi/linux/netfilter/ipset/ip_set_bitmap.h>
+
+#define IPSET_BITMAP_MAX_RANGE 0x0000FFFF
+
+enum {
+ IPSET_ADD_FAILED = 1,
+ IPSET_ADD_STORE_PLAIN_TIMEOUT,
+ IPSET_ADD_START_STORED_TIMEOUT,
+};
+
+/* Common functions */
+
+static inline u32
+range_to_mask(u32 from, u32 to, u8 *bits)
+{
+ u32 mask = 0xFFFFFFFE;
+
+ *bits = 32;
+ while (--(*bits) > 0 && mask && (to & mask) != from)
+ mask <<= 1;
+
+ return mask;
+}
+
+#endif /* __IP_SET_BITMAP_H */
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
new file mode 100644
index 000000000..21217ea00
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_comment.h
@@ -0,0 +1,57 @@
+#ifndef _IP_SET_COMMENT_H
+#define _IP_SET_COMMENT_H
+
+/* Copyright (C) 2013 Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef __KERNEL__
+
+static inline char*
+ip_set_comment_uget(struct nlattr *tb)
+{
+ return nla_data(tb);
+}
+
+static inline void
+ip_set_init_comment(struct ip_set_comment *comment,
+ const struct ip_set_ext *ext)
+{
+ size_t len = ext->comment ? strlen(ext->comment) : 0;
+
+ if (unlikely(comment->str)) {
+ kfree(comment->str);
+ comment->str = NULL;
+ }
+ if (!len)
+ return;
+ if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
+ len = IPSET_MAX_COMMENT_SIZE;
+ comment->str = kzalloc(len + 1, GFP_ATOMIC);
+ if (unlikely(!comment->str))
+ return;
+ strlcpy(comment->str, ext->comment, len + 1);
+}
+
+static inline int
+ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
+{
+ if (!comment->str)
+ return 0;
+ return nla_put_string(skb, IPSET_ATTR_COMMENT, comment->str);
+}
+
+static inline void
+ip_set_comment_free(struct ip_set_comment *comment)
+{
+ if (unlikely(!comment->str))
+ return;
+ kfree(comment->str);
+ comment->str = NULL;
+}
+
+#endif
+#endif
diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h
new file mode 100644
index 000000000..90d09300e
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_getport.h
@@ -0,0 +1,33 @@
+#ifndef _IP_SET_GETPORT_H
+#define _IP_SET_GETPORT_H
+
+extern bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
+ __be16 *port, u8 *proto);
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+extern bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+ __be16 *port, u8 *proto);
+#else
+static inline bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+ __be16 *port, u8 *proto)
+{
+ return false;
+}
+#endif
+
+extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src,
+ __be16 *port);
+
+static inline bool ip_set_proto_with_ports(u8 proto)
+{
+ switch (proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_SCTP:
+ case IPPROTO_UDP:
+ case IPPROTO_UDPLITE:
+ return true;
+ }
+ return false;
+}
+
+#endif /*_IP_SET_GETPORT_H*/
diff --git a/include/linux/netfilter/ipset/ip_set_hash.h b/include/linux/netfilter/ipset/ip_set_hash.h
new file mode 100644
index 000000000..f98ddfb09
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_hash.h
@@ -0,0 +1,13 @@
+#ifndef __IP_SET_HASH_H
+#define __IP_SET_HASH_H
+
+#include <uapi/linux/netfilter/ipset/ip_set_hash.h>
+
+
+#define IPSET_DEFAULT_HASHSIZE 1024
+#define IPSET_MIMINAL_HASHSIZE 64
+#define IPSET_DEFAULT_MAXELEM 65536
+#define IPSET_DEFAULT_PROBES 4
+#define IPSET_DEFAULT_RESIZE 100
+
+#endif /* __IP_SET_HASH_H */
diff --git a/include/linux/netfilter/ipset/ip_set_list.h b/include/linux/netfilter/ipset/ip_set_list.h
new file mode 100644
index 000000000..fe2622a00
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_list.h
@@ -0,0 +1,11 @@
+#ifndef __IP_SET_LIST_H
+#define __IP_SET_LIST_H
+
+#include <uapi/linux/netfilter/ipset/ip_set_list.h>
+
+
+#define IP_SET_LIST_DEFAULT_SIZE 8
+#define IP_SET_LIST_MIN_SIZE 4
+#define IP_SET_LIST_MAX_SIZE 65536
+
+#endif /* __IP_SET_LIST_H */
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
new file mode 100644
index 000000000..83c2f9e08
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -0,0 +1,78 @@
+#ifndef _IP_SET_TIMEOUT_H
+#define _IP_SET_TIMEOUT_H
+
+/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef __KERNEL__
+
+/* How often should the gc be run by default */
+#define IPSET_GC_TIME (3 * 60)
+
+/* Timeout period depending on the timeout value of the given set */
+#define IPSET_GC_PERIOD(timeout) \
+ ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1)
+
+/* Entry is set with no timeout value */
+#define IPSET_ELEM_PERMANENT 0
+
+/* Set is defined with timeout support: timeout value may be 0 */
+#define IPSET_NO_TIMEOUT UINT_MAX
+
+#define ip_set_adt_opt_timeout(opt, set) \
+((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout)
+
+static inline unsigned int
+ip_set_timeout_uget(struct nlattr *tb)
+{
+ unsigned int timeout = ip_set_get_h32(tb);
+
+ /* Normalize to fit into jiffies */
+ if (timeout > UINT_MAX/MSEC_PER_SEC)
+ timeout = UINT_MAX/MSEC_PER_SEC;
+
+ /* Userspace supplied TIMEOUT parameter: adjust crazy size */
+ return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout;
+}
+
+static inline bool
+ip_set_timeout_test(unsigned long timeout)
+{
+ return timeout == IPSET_ELEM_PERMANENT ||
+ time_is_after_jiffies(timeout);
+}
+
+static inline bool
+ip_set_timeout_expired(unsigned long *timeout)
+{
+ return *timeout != IPSET_ELEM_PERMANENT &&
+ time_is_before_jiffies(*timeout);
+}
+
+static inline void
+ip_set_timeout_set(unsigned long *timeout, u32 t)
+{
+ if (!t) {
+ *timeout = IPSET_ELEM_PERMANENT;
+ return;
+ }
+
+ *timeout = msecs_to_jiffies(t * 1000) + jiffies;
+ if (*timeout == IPSET_ELEM_PERMANENT)
+ /* Bingo! :-) */
+ (*timeout)--;
+}
+
+static inline u32
+ip_set_timeout_get(unsigned long *timeout)
+{
+ return *timeout == IPSET_ELEM_PERMANENT ? 0 :
+ jiffies_to_msecs(*timeout - jiffies)/1000;
+}
+
+#endif /* __KERNEL__ */
+#endif /* _IP_SET_TIMEOUT_H */
diff --git a/include/linux/netfilter/ipset/pfxlen.h b/include/linux/netfilter/ipset/pfxlen.h
new file mode 100644
index 000000000..1afbb94b4
--- /dev/null
+++ b/include/linux/netfilter/ipset/pfxlen.h
@@ -0,0 +1,53 @@
+#ifndef _PFXLEN_H
+#define _PFXLEN_H
+
+#include <asm/byteorder.h>
+#include <linux/netfilter.h>
+#include <net/tcp.h>
+
+/* Prefixlen maps, by Jan Engelhardt */
+extern const union nf_inet_addr ip_set_netmask_map[];
+extern const union nf_inet_addr ip_set_hostmask_map[];
+
+static inline __be32
+ip_set_netmask(u8 pfxlen)
+{
+ return ip_set_netmask_map[pfxlen].ip;
+}
+
+static inline const __be32 *
+ip_set_netmask6(u8 pfxlen)
+{
+ return &ip_set_netmask_map[pfxlen].ip6[0];
+}
+
+static inline u32
+ip_set_hostmask(u8 pfxlen)
+{
+ return (__force u32) ip_set_hostmask_map[pfxlen].ip;
+}
+
+static inline const __be32 *
+ip_set_hostmask6(u8 pfxlen)
+{
+ return &ip_set_hostmask_map[pfxlen].ip6[0];
+}
+
+extern u32 ip_set_range_to_cidr(u32 from, u32 to, u8 *cidr);
+
+#define ip_set_mask_from_to(from, to, cidr) \
+do { \
+ from &= ip_set_hostmask(cidr); \
+ to = from | ~ip_set_hostmask(cidr); \
+} while (0)
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+ ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+ ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+ ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+ ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+#endif /*_PFXLEN_H */
diff --git a/include/linux/netfilter/nf_conntrack_amanda.h b/include/linux/netfilter/nf_conntrack_amanda.h
new file mode 100644
index 000000000..4b59a1584
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_amanda.h
@@ -0,0 +1,11 @@
+#ifndef _NF_CONNTRACK_AMANDA_H
+#define _NF_CONNTRACK_AMANDA_H
+/* AMANDA tracking. */
+
+extern unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned int matchoff,
+ unsigned int matchlen,
+ struct nf_conntrack_expect *exp);
+#endif /* _NF_CONNTRACK_AMANDA_H */
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
new file mode 100644
index 000000000..275505792
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -0,0 +1,28 @@
+#ifndef _NF_CONNTRACK_COMMON_H
+#define _NF_CONNTRACK_COMMON_H
+
+#include <uapi/linux/netfilter/nf_conntrack_common.h>
+
+struct ip_conntrack_stat {
+ unsigned int searched;
+ unsigned int found;
+ unsigned int new;
+ unsigned int invalid;
+ unsigned int ignore;
+ unsigned int delete;
+ unsigned int delete_list;
+ unsigned int insert;
+ unsigned int insert_failed;
+ unsigned int drop;
+ unsigned int early_drop;
+ unsigned int error;
+ unsigned int expect_new;
+ unsigned int expect_create;
+ unsigned int expect_delete;
+ unsigned int search_restart;
+};
+
+/* call to create an explicit dependency on nf_conntrack. */
+void need_conntrack(void);
+
+#endif /* _NF_CONNTRACK_COMMON_H */
diff --git a/include/linux/netfilter/nf_conntrack_dccp.h b/include/linux/netfilter/nf_conntrack_dccp.h
new file mode 100644
index 000000000..40dcc8205
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_dccp.h
@@ -0,0 +1,40 @@
+#ifndef _NF_CONNTRACK_DCCP_H
+#define _NF_CONNTRACK_DCCP_H
+
+/* Exposed to userspace over nfnetlink */
+enum ct_dccp_states {
+ CT_DCCP_NONE,
+ CT_DCCP_REQUEST,
+ CT_DCCP_RESPOND,
+ CT_DCCP_PARTOPEN,
+ CT_DCCP_OPEN,
+ CT_DCCP_CLOSEREQ,
+ CT_DCCP_CLOSING,
+ CT_DCCP_TIMEWAIT,
+ CT_DCCP_IGNORE,
+ CT_DCCP_INVALID,
+ __CT_DCCP_MAX
+};
+#define CT_DCCP_MAX (__CT_DCCP_MAX - 1)
+
+enum ct_dccp_roles {
+ CT_DCCP_ROLE_CLIENT,
+ CT_DCCP_ROLE_SERVER,
+ __CT_DCCP_ROLE_MAX
+};
+#define CT_DCCP_ROLE_MAX (__CT_DCCP_ROLE_MAX - 1)
+
+#ifdef __KERNEL__
+#include <net/netfilter/nf_conntrack_tuple.h>
+
+struct nf_ct_dccp {
+ u_int8_t role[IP_CT_DIR_MAX];
+ u_int8_t state;
+ u_int8_t last_pkt;
+ u_int8_t last_dir;
+ u_int64_t handshake_seq;
+};
+
+#endif /* __KERNEL__ */
+
+#endif /* _NF_CONNTRACK_DCCP_H */
diff --git a/include/linux/netfilter/nf_conntrack_ftp.h b/include/linux/netfilter/nf_conntrack_ftp.h
new file mode 100644
index 000000000..5f818b01e
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_ftp.h
@@ -0,0 +1,33 @@
+#ifndef _NF_CONNTRACK_FTP_H
+#define _NF_CONNTRACK_FTP_H
+
+#include <uapi/linux/netfilter/nf_conntrack_ftp.h>
+
+
+#define FTP_PORT 21
+
+#define NF_CT_FTP_SEQ_PICKUP (1 << 0)
+
+#define NUM_SEQ_TO_REMEMBER 2
+/* This structure exists only once per master */
+struct nf_ct_ftp_master {
+ /* Valid seq positions for cmd matching after newline */
+ u_int32_t seq_aft_nl[IP_CT_DIR_MAX][NUM_SEQ_TO_REMEMBER];
+ /* 0 means seq_match_aft_nl not set */
+ u_int16_t seq_aft_nl_num[IP_CT_DIR_MAX];
+ /* pickup sequence tracking, useful for conntrackd */
+ u_int16_t flags[IP_CT_DIR_MAX];
+};
+
+struct nf_conntrack_expect;
+
+/* For NAT to hook in when we find a packet which describes what other
+ * connection we should expect. */
+extern unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ enum nf_ct_ftp_type type,
+ unsigned int protoff,
+ unsigned int matchoff,
+ unsigned int matchlen,
+ struct nf_conntrack_expect *exp);
+#endif /* _NF_CONNTRACK_FTP_H */
diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h
new file mode 100644
index 000000000..858d9b214
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_h323.h
@@ -0,0 +1,97 @@
+#ifndef _NF_CONNTRACK_H323_H
+#define _NF_CONNTRACK_H323_H
+
+#ifdef __KERNEL__
+
+#include <linux/netfilter/nf_conntrack_h323_asn1.h>
+
+#define RAS_PORT 1719
+#define Q931_PORT 1720
+#define H323_RTP_CHANNEL_MAX 4 /* Audio, video, FAX and other */
+
+/* This structure exists only once per master */
+struct nf_ct_h323_master {
+
+ /* Original and NATed Q.931 or H.245 signal ports */
+ __be16 sig_port[IP_CT_DIR_MAX];
+
+ /* Original and NATed RTP ports */
+ __be16 rtp_port[H323_RTP_CHANNEL_MAX][IP_CT_DIR_MAX];
+
+ union {
+ /* RAS connection timeout */
+ u_int32_t timeout;
+
+ /* Next TPKT length (for separate TPKT header and data) */
+ u_int16_t tpkt_len[IP_CT_DIR_MAX];
+ };
+};
+
+struct nf_conn;
+
+int get_h225_addr(struct nf_conn *ct, unsigned char *data,
+ TransportAddress *taddr, union nf_inet_addr *addr,
+ __be16 *port);
+void nf_conntrack_h245_expect(struct nf_conn *new,
+ struct nf_conntrack_expect *this);
+void nf_conntrack_q931_expect(struct nf_conn *new,
+ struct nf_conntrack_expect *this);
+extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
+ unsigned char **data, int dataoff,
+ H245_TransportAddress *taddr,
+ union nf_inet_addr *addr,
+ __be16 port);
+extern int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned int protoff,
+ unsigned char **data, int dataoff,
+ TransportAddress *taddr,
+ union nf_inet_addr *addr,
+ __be16 port);
+extern int (*set_sig_addr_hook) (struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff, unsigned char **data,
+ TransportAddress *taddr, int count);
+extern int (*set_ras_addr_hook) (struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff, unsigned char **data,
+ TransportAddress *taddr, int count);
+extern int (*nat_rtp_rtcp_hook) (struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff, unsigned char **data,
+ int dataoff,
+ H245_TransportAddress *taddr,
+ __be16 port, __be16 rtp_port,
+ struct nf_conntrack_expect *rtp_exp,
+ struct nf_conntrack_expect *rtcp_exp);
+extern int (*nat_t120_hook) (struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, int dataoff,
+ H245_TransportAddress *taddr, __be16 port,
+ struct nf_conntrack_expect *exp);
+extern int (*nat_h245_hook) (struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, int dataoff,
+ TransportAddress *taddr, __be16 port,
+ struct nf_conntrack_expect *exp);
+extern int (*nat_callforwarding_hook) (struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, int dataoff,
+ TransportAddress *taddr,
+ __be16 port,
+ struct nf_conntrack_expect *exp);
+extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned char **data, TransportAddress *taddr,
+ int idx, __be16 port,
+ struct nf_conntrack_expect *exp);
+
+#endif
+
+#endif
diff --git a/include/linux/netfilter/nf_conntrack_h323_asn1.h b/include/linux/netfilter/nf_conntrack_h323_asn1.h
new file mode 100644
index 000000000..3176a277e
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_h323_asn1.h
@@ -0,0 +1,98 @@
+/****************************************************************************
+ * ip_conntrack_h323_asn1.h - BER and PER decoding library for H.323
+ * conntrack/NAT module.
+ *
+ * Copyright (c) 2006 by Jing Min Zhao <zhaojingmin@users.sourceforge.net>
+ *
+ * This source code is licensed under General Public License version 2.
+ *
+ *
+ * This library is based on H.225 version 4, H.235 version 2 and H.245
+ * version 7. It is extremely optimized to decode only the absolutely
+ * necessary objects in a signal for Linux kernel NAT module use, so don't
+ * expect it to be a full ASN.1 library.
+ *
+ * Features:
+ *
+ * 1. Small. The total size of code plus data is less than 20 KB (IA32).
+ * 2. Fast. Decoding Netmeeting's Setup signal 1 million times on a PIII 866
+ * takes only 3.9 seconds.
+ * 3. No memory allocation. It uses a static object. No need to initialize or
+ * cleanup.
+ * 4. Thread safe.
+ * 5. Support embedded architectures that has no misaligned memory access
+ * support.
+ *
+ * Limitations:
+ *
+ * 1. At most 30 faststart entries. Actually this is limited by ethernet's MTU.
+ * If a Setup signal contains more than 30 faststart, the packet size will
+ * very likely exceed the MTU size, then the TPKT will be fragmented. I
+ * don't know how to handle this in a Netfilter module. Anybody can help?
+ * Although I think 30 is enough for most of the cases.
+ * 2. IPv4 addresses only.
+ *
+ ****************************************************************************/
+
+#ifndef _NF_CONNTRACK_HELPER_H323_ASN1_H_
+#define _NF_CONNTRACK_HELPER_H323_ASN1_H_
+
+/*****************************************************************************
+ * H.323 Types
+ ****************************************************************************/
+#include <linux/netfilter/nf_conntrack_h323_types.h>
+
+typedef struct {
+ enum {
+ Q931_NationalEscape = 0x00,
+ Q931_Alerting = 0x01,
+ Q931_CallProceeding = 0x02,
+ Q931_Connect = 0x07,
+ Q931_ConnectAck = 0x0F,
+ Q931_Progress = 0x03,
+ Q931_Setup = 0x05,
+ Q931_SetupAck = 0x0D,
+ Q931_Resume = 0x26,
+ Q931_ResumeAck = 0x2E,
+ Q931_ResumeReject = 0x22,
+ Q931_Suspend = 0x25,
+ Q931_SuspendAck = 0x2D,
+ Q931_SuspendReject = 0x21,
+ Q931_UserInformation = 0x20,
+ Q931_Disconnect = 0x45,
+ Q931_Release = 0x4D,
+ Q931_ReleaseComplete = 0x5A,
+ Q931_Restart = 0x46,
+ Q931_RestartAck = 0x4E,
+ Q931_Segment = 0x60,
+ Q931_CongestionCtrl = 0x79,
+ Q931_Information = 0x7B,
+ Q931_Notify = 0x6E,
+ Q931_Status = 0x7D,
+ Q931_StatusEnquiry = 0x75,
+ Q931_Facility = 0x62
+ } MessageType;
+ H323_UserInformation UUIE;
+} Q931;
+
+/*****************************************************************************
+ * Decode Functions Return Codes
+ ****************************************************************************/
+
+#define H323_ERROR_NONE 0 /* Decoded successfully */
+#define H323_ERROR_STOP 1 /* Decoding stopped, not really an error */
+#define H323_ERROR_BOUND -1
+#define H323_ERROR_RANGE -2
+
+
+/*****************************************************************************
+ * Decode Functions
+ ****************************************************************************/
+
+int DecodeRasMessage(unsigned char *buf, size_t sz, RasMessage * ras);
+int DecodeQ931(unsigned char *buf, size_t sz, Q931 * q931);
+int DecodeMultimediaSystemControlMessage(unsigned char *buf, size_t sz,
+ MultimediaSystemControlMessage *
+ mscm);
+
+#endif
diff --git a/include/linux/netfilter/nf_conntrack_h323_types.h b/include/linux/netfilter/nf_conntrack_h323_types.h
new file mode 100644
index 000000000..b0821f45f
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_h323_types.h
@@ -0,0 +1,934 @@
+/* Generated by Jing Min Zhao's ASN.1 parser, May 16 2007
+ *
+ * Copyright (c) 2006 Jing Min Zhao <zhaojingmin@users.sourceforge.net>
+ *
+ * This source code is licensed under General Public License version 2.
+ */
+
+typedef struct TransportAddress_ipAddress { /* SEQUENCE */
+ int options; /* No use */
+ unsigned int ip;
+} TransportAddress_ipAddress;
+
+typedef struct TransportAddress_ip6Address { /* SEQUENCE */
+ int options; /* No use */
+ unsigned int ip;
+} TransportAddress_ip6Address;
+
+typedef struct TransportAddress { /* CHOICE */
+ enum {
+ eTransportAddress_ipAddress,
+ eTransportAddress_ipSourceRoute,
+ eTransportAddress_ipxAddress,
+ eTransportAddress_ip6Address,
+ eTransportAddress_netBios,
+ eTransportAddress_nsap,
+ eTransportAddress_nonStandardAddress,
+ } choice;
+ union {
+ TransportAddress_ipAddress ipAddress;
+ TransportAddress_ip6Address ip6Address;
+ };
+} TransportAddress;
+
+typedef struct DataProtocolCapability { /* CHOICE */
+ enum {
+ eDataProtocolCapability_nonStandard,
+ eDataProtocolCapability_v14buffered,
+ eDataProtocolCapability_v42lapm,
+ eDataProtocolCapability_hdlcFrameTunnelling,
+ eDataProtocolCapability_h310SeparateVCStack,
+ eDataProtocolCapability_h310SingleVCStack,
+ eDataProtocolCapability_transparent,
+ eDataProtocolCapability_segmentationAndReassembly,
+ eDataProtocolCapability_hdlcFrameTunnelingwSAR,
+ eDataProtocolCapability_v120,
+ eDataProtocolCapability_separateLANStack,
+ eDataProtocolCapability_v76wCompression,
+ eDataProtocolCapability_tcp,
+ eDataProtocolCapability_udp,
+ } choice;
+} DataProtocolCapability;
+
+typedef struct DataApplicationCapability_application { /* CHOICE */
+ enum {
+ eDataApplicationCapability_application_nonStandard,
+ eDataApplicationCapability_application_t120,
+ eDataApplicationCapability_application_dsm_cc,
+ eDataApplicationCapability_application_userData,
+ eDataApplicationCapability_application_t84,
+ eDataApplicationCapability_application_t434,
+ eDataApplicationCapability_application_h224,
+ eDataApplicationCapability_application_nlpid,
+ eDataApplicationCapability_application_dsvdControl,
+ eDataApplicationCapability_application_h222DataPartitioning,
+ eDataApplicationCapability_application_t30fax,
+ eDataApplicationCapability_application_t140,
+ eDataApplicationCapability_application_t38fax,
+ eDataApplicationCapability_application_genericDataCapability,
+ } choice;
+ union {
+ DataProtocolCapability t120;
+ };
+} DataApplicationCapability_application;
+
+typedef struct DataApplicationCapability { /* SEQUENCE */
+ int options; /* No use */
+ DataApplicationCapability_application application;
+} DataApplicationCapability;
+
+typedef struct DataType { /* CHOICE */
+ enum {
+ eDataType_nonStandard,
+ eDataType_nullData,
+ eDataType_videoData,
+ eDataType_audioData,
+ eDataType_data,
+ eDataType_encryptionData,
+ eDataType_h235Control,
+ eDataType_h235Media,
+ eDataType_multiplexedStream,
+ } choice;
+ union {
+ DataApplicationCapability data;
+ };
+} DataType;
+
+typedef struct UnicastAddress_iPAddress { /* SEQUENCE */
+ int options; /* No use */
+ unsigned int network;
+} UnicastAddress_iPAddress;
+
+typedef struct UnicastAddress_iP6Address { /* SEQUENCE */
+ int options; /* No use */
+ unsigned int network;
+} UnicastAddress_iP6Address;
+
+typedef struct UnicastAddress { /* CHOICE */
+ enum {
+ eUnicastAddress_iPAddress,
+ eUnicastAddress_iPXAddress,
+ eUnicastAddress_iP6Address,
+ eUnicastAddress_netBios,
+ eUnicastAddress_iPSourceRouteAddress,
+ eUnicastAddress_nsap,
+ eUnicastAddress_nonStandardAddress,
+ } choice;
+ union {
+ UnicastAddress_iPAddress iPAddress;
+ UnicastAddress_iP6Address iP6Address;
+ };
+} UnicastAddress;
+
+typedef struct H245_TransportAddress { /* CHOICE */
+ enum {
+ eH245_TransportAddress_unicastAddress,
+ eH245_TransportAddress_multicastAddress,
+ } choice;
+ union {
+ UnicastAddress unicastAddress;
+ };
+} H245_TransportAddress;
+
+typedef struct H2250LogicalChannelParameters { /* SEQUENCE */
+ enum {
+ eH2250LogicalChannelParameters_nonStandard = (1 << 31),
+ eH2250LogicalChannelParameters_associatedSessionID =
+ (1 << 30),
+ eH2250LogicalChannelParameters_mediaChannel = (1 << 29),
+ eH2250LogicalChannelParameters_mediaGuaranteedDelivery =
+ (1 << 28),
+ eH2250LogicalChannelParameters_mediaControlChannel =
+ (1 << 27),
+ eH2250LogicalChannelParameters_mediaControlGuaranteedDelivery
+ = (1 << 26),
+ eH2250LogicalChannelParameters_silenceSuppression = (1 << 25),
+ eH2250LogicalChannelParameters_destination = (1 << 24),
+ eH2250LogicalChannelParameters_dynamicRTPPayloadType =
+ (1 << 23),
+ eH2250LogicalChannelParameters_mediaPacketization = (1 << 22),
+ eH2250LogicalChannelParameters_transportCapability =
+ (1 << 21),
+ eH2250LogicalChannelParameters_redundancyEncoding = (1 << 20),
+ eH2250LogicalChannelParameters_source = (1 << 19),
+ } options;
+ H245_TransportAddress mediaChannel;
+ H245_TransportAddress mediaControlChannel;
+} H2250LogicalChannelParameters;
+
+typedef struct OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters { /* CHOICE */
+ enum {
+ eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h222LogicalChannelParameters,
+ eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h223LogicalChannelParameters,
+ eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_v76LogicalChannelParameters,
+ eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters,
+ eOpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters_none,
+ } choice;
+ union {
+ H2250LogicalChannelParameters h2250LogicalChannelParameters;
+ };
+} OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters;
+
+typedef struct OpenLogicalChannel_forwardLogicalChannelParameters { /* SEQUENCE */
+ enum {
+ eOpenLogicalChannel_forwardLogicalChannelParameters_portNumber
+ = (1 << 31),
+ eOpenLogicalChannel_forwardLogicalChannelParameters_forwardLogicalChannelDependency
+ = (1 << 30),
+ eOpenLogicalChannel_forwardLogicalChannelParameters_replacementFor
+ = (1 << 29),
+ } options;
+ DataType dataType;
+ OpenLogicalChannel_forwardLogicalChannelParameters_multiplexParameters
+ multiplexParameters;
+} OpenLogicalChannel_forwardLogicalChannelParameters;
+
+typedef struct OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters { /* CHOICE */
+ enum {
+ eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h223LogicalChannelParameters,
+ eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_v76LogicalChannelParameters,
+ eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters,
+ } choice;
+ union {
+ H2250LogicalChannelParameters h2250LogicalChannelParameters;
+ };
+} OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters;
+
+typedef struct OpenLogicalChannel_reverseLogicalChannelParameters { /* SEQUENCE */
+ enum {
+ eOpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters
+ = (1 << 31),
+ eOpenLogicalChannel_reverseLogicalChannelParameters_reverseLogicalChannelDependency
+ = (1 << 30),
+ eOpenLogicalChannel_reverseLogicalChannelParameters_replacementFor
+ = (1 << 29),
+ } options;
+ OpenLogicalChannel_reverseLogicalChannelParameters_multiplexParameters
+ multiplexParameters;
+} OpenLogicalChannel_reverseLogicalChannelParameters;
+
+typedef struct NetworkAccessParameters_networkAddress { /* CHOICE */
+ enum {
+ eNetworkAccessParameters_networkAddress_q2931Address,
+ eNetworkAccessParameters_networkAddress_e164Address,
+ eNetworkAccessParameters_networkAddress_localAreaAddress,
+ } choice;
+ union {
+ H245_TransportAddress localAreaAddress;
+ };
+} NetworkAccessParameters_networkAddress;
+
+typedef struct NetworkAccessParameters { /* SEQUENCE */
+ enum {
+ eNetworkAccessParameters_distribution = (1 << 31),
+ eNetworkAccessParameters_externalReference = (1 << 30),
+ eNetworkAccessParameters_t120SetupProcedure = (1 << 29),
+ } options;
+ NetworkAccessParameters_networkAddress networkAddress;
+} NetworkAccessParameters;
+
+typedef struct OpenLogicalChannel { /* SEQUENCE */
+ enum {
+ eOpenLogicalChannel_reverseLogicalChannelParameters =
+ (1 << 31),
+ eOpenLogicalChannel_separateStack = (1 << 30),
+ eOpenLogicalChannel_encryptionSync = (1 << 29),
+ } options;
+ OpenLogicalChannel_forwardLogicalChannelParameters
+ forwardLogicalChannelParameters;
+ OpenLogicalChannel_reverseLogicalChannelParameters
+ reverseLogicalChannelParameters;
+ NetworkAccessParameters separateStack;
+} OpenLogicalChannel;
+
+typedef struct Setup_UUIE_fastStart { /* SEQUENCE OF */
+ int count;
+ OpenLogicalChannel item[30];
+} Setup_UUIE_fastStart;
+
+typedef struct Setup_UUIE { /* SEQUENCE */
+ enum {
+ eSetup_UUIE_h245Address = (1 << 31),
+ eSetup_UUIE_sourceAddress = (1 << 30),
+ eSetup_UUIE_destinationAddress = (1 << 29),
+ eSetup_UUIE_destCallSignalAddress = (1 << 28),
+ eSetup_UUIE_destExtraCallInfo = (1 << 27),
+ eSetup_UUIE_destExtraCRV = (1 << 26),
+ eSetup_UUIE_callServices = (1 << 25),
+ eSetup_UUIE_sourceCallSignalAddress = (1 << 24),
+ eSetup_UUIE_remoteExtensionAddress = (1 << 23),
+ eSetup_UUIE_callIdentifier = (1 << 22),
+ eSetup_UUIE_h245SecurityCapability = (1 << 21),
+ eSetup_UUIE_tokens = (1 << 20),
+ eSetup_UUIE_cryptoTokens = (1 << 19),
+ eSetup_UUIE_fastStart = (1 << 18),
+ eSetup_UUIE_mediaWaitForConnect = (1 << 17),
+ eSetup_UUIE_canOverlapSend = (1 << 16),
+ eSetup_UUIE_endpointIdentifier = (1 << 15),
+ eSetup_UUIE_multipleCalls = (1 << 14),
+ eSetup_UUIE_maintainConnection = (1 << 13),
+ eSetup_UUIE_connectionParameters = (1 << 12),
+ eSetup_UUIE_language = (1 << 11),
+ eSetup_UUIE_presentationIndicator = (1 << 10),
+ eSetup_UUIE_screeningIndicator = (1 << 9),
+ eSetup_UUIE_serviceControl = (1 << 8),
+ eSetup_UUIE_symmetricOperationRequired = (1 << 7),
+ eSetup_UUIE_capacity = (1 << 6),
+ eSetup_UUIE_circuitInfo = (1 << 5),
+ eSetup_UUIE_desiredProtocols = (1 << 4),
+ eSetup_UUIE_neededFeatures = (1 << 3),
+ eSetup_UUIE_desiredFeatures = (1 << 2),
+ eSetup_UUIE_supportedFeatures = (1 << 1),
+ eSetup_UUIE_parallelH245Control = (1 << 0),
+ } options;
+ TransportAddress h245Address;
+ TransportAddress destCallSignalAddress;
+ TransportAddress sourceCallSignalAddress;
+ Setup_UUIE_fastStart fastStart;
+} Setup_UUIE;
+
+typedef struct CallProceeding_UUIE_fastStart { /* SEQUENCE OF */
+ int count;
+ OpenLogicalChannel item[30];
+} CallProceeding_UUIE_fastStart;
+
+typedef struct CallProceeding_UUIE { /* SEQUENCE */
+ enum {
+ eCallProceeding_UUIE_h245Address = (1 << 31),
+ eCallProceeding_UUIE_callIdentifier = (1 << 30),
+ eCallProceeding_UUIE_h245SecurityMode = (1 << 29),
+ eCallProceeding_UUIE_tokens = (1 << 28),
+ eCallProceeding_UUIE_cryptoTokens = (1 << 27),
+ eCallProceeding_UUIE_fastStart = (1 << 26),
+ eCallProceeding_UUIE_multipleCalls = (1 << 25),
+ eCallProceeding_UUIE_maintainConnection = (1 << 24),
+ eCallProceeding_UUIE_fastConnectRefused = (1 << 23),
+ eCallProceeding_UUIE_featureSet = (1 << 22),
+ } options;
+ TransportAddress h245Address;
+ CallProceeding_UUIE_fastStart fastStart;
+} CallProceeding_UUIE;
+
+typedef struct Connect_UUIE_fastStart { /* SEQUENCE OF */
+ int count;
+ OpenLogicalChannel item[30];
+} Connect_UUIE_fastStart;
+
+typedef struct Connect_UUIE { /* SEQUENCE */
+ enum {
+ eConnect_UUIE_h245Address = (1 << 31),
+ eConnect_UUIE_callIdentifier = (1 << 30),
+ eConnect_UUIE_h245SecurityMode = (1 << 29),
+ eConnect_UUIE_tokens = (1 << 28),
+ eConnect_UUIE_cryptoTokens = (1 << 27),
+ eConnect_UUIE_fastStart = (1 << 26),
+ eConnect_UUIE_multipleCalls = (1 << 25),
+ eConnect_UUIE_maintainConnection = (1 << 24),
+ eConnect_UUIE_language = (1 << 23),
+ eConnect_UUIE_connectedAddress = (1 << 22),
+ eConnect_UUIE_presentationIndicator = (1 << 21),
+ eConnect_UUIE_screeningIndicator = (1 << 20),
+ eConnect_UUIE_fastConnectRefused = (1 << 19),
+ eConnect_UUIE_serviceControl = (1 << 18),
+ eConnect_UUIE_capacity = (1 << 17),
+ eConnect_UUIE_featureSet = (1 << 16),
+ } options;
+ TransportAddress h245Address;
+ Connect_UUIE_fastStart fastStart;
+} Connect_UUIE;
+
+typedef struct Alerting_UUIE_fastStart { /* SEQUENCE OF */
+ int count;
+ OpenLogicalChannel item[30];
+} Alerting_UUIE_fastStart;
+
+typedef struct Alerting_UUIE { /* SEQUENCE */
+ enum {
+ eAlerting_UUIE_h245Address = (1 << 31),
+ eAlerting_UUIE_callIdentifier = (1 << 30),
+ eAlerting_UUIE_h245SecurityMode = (1 << 29),
+ eAlerting_UUIE_tokens = (1 << 28),
+ eAlerting_UUIE_cryptoTokens = (1 << 27),
+ eAlerting_UUIE_fastStart = (1 << 26),
+ eAlerting_UUIE_multipleCalls = (1 << 25),
+ eAlerting_UUIE_maintainConnection = (1 << 24),
+ eAlerting_UUIE_alertingAddress = (1 << 23),
+ eAlerting_UUIE_presentationIndicator = (1 << 22),
+ eAlerting_UUIE_screeningIndicator = (1 << 21),
+ eAlerting_UUIE_fastConnectRefused = (1 << 20),
+ eAlerting_UUIE_serviceControl = (1 << 19),
+ eAlerting_UUIE_capacity = (1 << 18),
+ eAlerting_UUIE_featureSet = (1 << 17),
+ } options;
+ TransportAddress h245Address;
+ Alerting_UUIE_fastStart fastStart;
+} Alerting_UUIE;
+
+typedef struct FacilityReason { /* CHOICE */
+ enum {
+ eFacilityReason_routeCallToGatekeeper,
+ eFacilityReason_callForwarded,
+ eFacilityReason_routeCallToMC,
+ eFacilityReason_undefinedReason,
+ eFacilityReason_conferenceListChoice,
+ eFacilityReason_startH245,
+ eFacilityReason_noH245,
+ eFacilityReason_newTokens,
+ eFacilityReason_featureSetUpdate,
+ eFacilityReason_forwardedElements,
+ eFacilityReason_transportedInformation,
+ } choice;
+} FacilityReason;
+
+typedef struct Facility_UUIE_fastStart { /* SEQUENCE OF */
+ int count;
+ OpenLogicalChannel item[30];
+} Facility_UUIE_fastStart;
+
+typedef struct Facility_UUIE { /* SEQUENCE */
+ enum {
+ eFacility_UUIE_alternativeAddress = (1 << 31),
+ eFacility_UUIE_alternativeAliasAddress = (1 << 30),
+ eFacility_UUIE_conferenceID = (1 << 29),
+ eFacility_UUIE_callIdentifier = (1 << 28),
+ eFacility_UUIE_destExtraCallInfo = (1 << 27),
+ eFacility_UUIE_remoteExtensionAddress = (1 << 26),
+ eFacility_UUIE_tokens = (1 << 25),
+ eFacility_UUIE_cryptoTokens = (1 << 24),
+ eFacility_UUIE_conferences = (1 << 23),
+ eFacility_UUIE_h245Address = (1 << 22),
+ eFacility_UUIE_fastStart = (1 << 21),
+ eFacility_UUIE_multipleCalls = (1 << 20),
+ eFacility_UUIE_maintainConnection = (1 << 19),
+ eFacility_UUIE_fastConnectRefused = (1 << 18),
+ eFacility_UUIE_serviceControl = (1 << 17),
+ eFacility_UUIE_circuitInfo = (1 << 16),
+ eFacility_UUIE_featureSet = (1 << 15),
+ eFacility_UUIE_destinationInfo = (1 << 14),
+ eFacility_UUIE_h245SecurityMode = (1 << 13),
+ } options;
+ TransportAddress alternativeAddress;
+ FacilityReason reason;
+ TransportAddress h245Address;
+ Facility_UUIE_fastStart fastStart;
+} Facility_UUIE;
+
+typedef struct Progress_UUIE_fastStart { /* SEQUENCE OF */
+ int count;
+ OpenLogicalChannel item[30];
+} Progress_UUIE_fastStart;
+
+typedef struct Progress_UUIE { /* SEQUENCE */
+ enum {
+ eProgress_UUIE_h245Address = (1 << 31),
+ eProgress_UUIE_h245SecurityMode = (1 << 30),
+ eProgress_UUIE_tokens = (1 << 29),
+ eProgress_UUIE_cryptoTokens = (1 << 28),
+ eProgress_UUIE_fastStart = (1 << 27),
+ eProgress_UUIE_multipleCalls = (1 << 26),
+ eProgress_UUIE_maintainConnection = (1 << 25),
+ eProgress_UUIE_fastConnectRefused = (1 << 24),
+ } options;
+ TransportAddress h245Address;
+ Progress_UUIE_fastStart fastStart;
+} Progress_UUIE;
+
+typedef struct H323_UU_PDU_h323_message_body { /* CHOICE */
+ enum {
+ eH323_UU_PDU_h323_message_body_setup,
+ eH323_UU_PDU_h323_message_body_callProceeding,
+ eH323_UU_PDU_h323_message_body_connect,
+ eH323_UU_PDU_h323_message_body_alerting,
+ eH323_UU_PDU_h323_message_body_information,
+ eH323_UU_PDU_h323_message_body_releaseComplete,
+ eH323_UU_PDU_h323_message_body_facility,
+ eH323_UU_PDU_h323_message_body_progress,
+ eH323_UU_PDU_h323_message_body_empty,
+ eH323_UU_PDU_h323_message_body_status,
+ eH323_UU_PDU_h323_message_body_statusInquiry,
+ eH323_UU_PDU_h323_message_body_setupAcknowledge,
+ eH323_UU_PDU_h323_message_body_notify,
+ } choice;
+ union {
+ Setup_UUIE setup;
+ CallProceeding_UUIE callProceeding;
+ Connect_UUIE connect;
+ Alerting_UUIE alerting;
+ Facility_UUIE facility;
+ Progress_UUIE progress;
+ };
+} H323_UU_PDU_h323_message_body;
+
+typedef struct RequestMessage { /* CHOICE */
+ enum {
+ eRequestMessage_nonStandard,
+ eRequestMessage_masterSlaveDetermination,
+ eRequestMessage_terminalCapabilitySet,
+ eRequestMessage_openLogicalChannel,
+ eRequestMessage_closeLogicalChannel,
+ eRequestMessage_requestChannelClose,
+ eRequestMessage_multiplexEntrySend,
+ eRequestMessage_requestMultiplexEntry,
+ eRequestMessage_requestMode,
+ eRequestMessage_roundTripDelayRequest,
+ eRequestMessage_maintenanceLoopRequest,
+ eRequestMessage_communicationModeRequest,
+ eRequestMessage_conferenceRequest,
+ eRequestMessage_multilinkRequest,
+ eRequestMessage_logicalChannelRateRequest,
+ } choice;
+ union {
+ OpenLogicalChannel openLogicalChannel;
+ };
+} RequestMessage;
+
+typedef struct OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters { /* CHOICE */
+ enum {
+ eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h222LogicalChannelParameters,
+ eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters_h2250LogicalChannelParameters,
+ } choice;
+ union {
+ H2250LogicalChannelParameters h2250LogicalChannelParameters;
+ };
+} OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters;
+
+typedef struct OpenLogicalChannelAck_reverseLogicalChannelParameters { /* SEQUENCE */
+ enum {
+ eOpenLogicalChannelAck_reverseLogicalChannelParameters_portNumber
+ = (1 << 31),
+ eOpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters
+ = (1 << 30),
+ eOpenLogicalChannelAck_reverseLogicalChannelParameters_replacementFor
+ = (1 << 29),
+ } options;
+ OpenLogicalChannelAck_reverseLogicalChannelParameters_multiplexParameters
+ multiplexParameters;
+} OpenLogicalChannelAck_reverseLogicalChannelParameters;
+
+typedef struct H2250LogicalChannelAckParameters { /* SEQUENCE */
+ enum {
+ eH2250LogicalChannelAckParameters_nonStandard = (1 << 31),
+ eH2250LogicalChannelAckParameters_sessionID = (1 << 30),
+ eH2250LogicalChannelAckParameters_mediaChannel = (1 << 29),
+ eH2250LogicalChannelAckParameters_mediaControlChannel =
+ (1 << 28),
+ eH2250LogicalChannelAckParameters_dynamicRTPPayloadType =
+ (1 << 27),
+ eH2250LogicalChannelAckParameters_flowControlToZero =
+ (1 << 26),
+ eH2250LogicalChannelAckParameters_portNumber = (1 << 25),
+ } options;
+ H245_TransportAddress mediaChannel;
+ H245_TransportAddress mediaControlChannel;
+} H2250LogicalChannelAckParameters;
+
+typedef struct OpenLogicalChannelAck_forwardMultiplexAckParameters { /* CHOICE */
+ enum {
+ eOpenLogicalChannelAck_forwardMultiplexAckParameters_h2250LogicalChannelAckParameters,
+ } choice;
+ union {
+ H2250LogicalChannelAckParameters
+ h2250LogicalChannelAckParameters;
+ };
+} OpenLogicalChannelAck_forwardMultiplexAckParameters;
+
+typedef struct OpenLogicalChannelAck { /* SEQUENCE */
+ enum {
+ eOpenLogicalChannelAck_reverseLogicalChannelParameters =
+ (1 << 31),
+ eOpenLogicalChannelAck_separateStack = (1 << 30),
+ eOpenLogicalChannelAck_forwardMultiplexAckParameters =
+ (1 << 29),
+ eOpenLogicalChannelAck_encryptionSync = (1 << 28),
+ } options;
+ OpenLogicalChannelAck_reverseLogicalChannelParameters
+ reverseLogicalChannelParameters;
+ NetworkAccessParameters separateStack;
+ OpenLogicalChannelAck_forwardMultiplexAckParameters
+ forwardMultiplexAckParameters;
+} OpenLogicalChannelAck;
+
+typedef struct ResponseMessage { /* CHOICE */
+ enum {
+ eResponseMessage_nonStandard,
+ eResponseMessage_masterSlaveDeterminationAck,
+ eResponseMessage_masterSlaveDeterminationReject,
+ eResponseMessage_terminalCapabilitySetAck,
+ eResponseMessage_terminalCapabilitySetReject,
+ eResponseMessage_openLogicalChannelAck,
+ eResponseMessage_openLogicalChannelReject,
+ eResponseMessage_closeLogicalChannelAck,
+ eResponseMessage_requestChannelCloseAck,
+ eResponseMessage_requestChannelCloseReject,
+ eResponseMessage_multiplexEntrySendAck,
+ eResponseMessage_multiplexEntrySendReject,
+ eResponseMessage_requestMultiplexEntryAck,
+ eResponseMessage_requestMultiplexEntryReject,
+ eResponseMessage_requestModeAck,
+ eResponseMessage_requestModeReject,
+ eResponseMessage_roundTripDelayResponse,
+ eResponseMessage_maintenanceLoopAck,
+ eResponseMessage_maintenanceLoopReject,
+ eResponseMessage_communicationModeResponse,
+ eResponseMessage_conferenceResponse,
+ eResponseMessage_multilinkResponse,
+ eResponseMessage_logicalChannelRateAcknowledge,
+ eResponseMessage_logicalChannelRateReject,
+ } choice;
+ union {
+ OpenLogicalChannelAck openLogicalChannelAck;
+ };
+} ResponseMessage;
+
+typedef struct MultimediaSystemControlMessage { /* CHOICE */
+ enum {
+ eMultimediaSystemControlMessage_request,
+ eMultimediaSystemControlMessage_response,
+ eMultimediaSystemControlMessage_command,
+ eMultimediaSystemControlMessage_indication,
+ } choice;
+ union {
+ RequestMessage request;
+ ResponseMessage response;
+ };
+} MultimediaSystemControlMessage;
+
+typedef struct H323_UU_PDU_h245Control { /* SEQUENCE OF */
+ int count;
+ MultimediaSystemControlMessage item[4];
+} H323_UU_PDU_h245Control;
+
+typedef struct H323_UU_PDU { /* SEQUENCE */
+ enum {
+ eH323_UU_PDU_nonStandardData = (1 << 31),
+ eH323_UU_PDU_h4501SupplementaryService = (1 << 30),
+ eH323_UU_PDU_h245Tunneling = (1 << 29),
+ eH323_UU_PDU_h245Control = (1 << 28),
+ eH323_UU_PDU_nonStandardControl = (1 << 27),
+ eH323_UU_PDU_callLinkage = (1 << 26),
+ eH323_UU_PDU_tunnelledSignallingMessage = (1 << 25),
+ eH323_UU_PDU_provisionalRespToH245Tunneling = (1 << 24),
+ eH323_UU_PDU_stimulusControl = (1 << 23),
+ eH323_UU_PDU_genericData = (1 << 22),
+ } options;
+ H323_UU_PDU_h323_message_body h323_message_body;
+ H323_UU_PDU_h245Control h245Control;
+} H323_UU_PDU;
+
+typedef struct H323_UserInformation { /* SEQUENCE */
+ enum {
+ eH323_UserInformation_user_data = (1 << 31),
+ } options;
+ H323_UU_PDU h323_uu_pdu;
+} H323_UserInformation;
+
+typedef struct GatekeeperRequest { /* SEQUENCE */
+ enum {
+ eGatekeeperRequest_nonStandardData = (1 << 31),
+ eGatekeeperRequest_gatekeeperIdentifier = (1 << 30),
+ eGatekeeperRequest_callServices = (1 << 29),
+ eGatekeeperRequest_endpointAlias = (1 << 28),
+ eGatekeeperRequest_alternateEndpoints = (1 << 27),
+ eGatekeeperRequest_tokens = (1 << 26),
+ eGatekeeperRequest_cryptoTokens = (1 << 25),
+ eGatekeeperRequest_authenticationCapability = (1 << 24),
+ eGatekeeperRequest_algorithmOIDs = (1 << 23),
+ eGatekeeperRequest_integrity = (1 << 22),
+ eGatekeeperRequest_integrityCheckValue = (1 << 21),
+ eGatekeeperRequest_supportsAltGK = (1 << 20),
+ eGatekeeperRequest_featureSet = (1 << 19),
+ eGatekeeperRequest_genericData = (1 << 18),
+ } options;
+ TransportAddress rasAddress;
+} GatekeeperRequest;
+
+typedef struct GatekeeperConfirm { /* SEQUENCE */
+ enum {
+ eGatekeeperConfirm_nonStandardData = (1 << 31),
+ eGatekeeperConfirm_gatekeeperIdentifier = (1 << 30),
+ eGatekeeperConfirm_alternateGatekeeper = (1 << 29),
+ eGatekeeperConfirm_authenticationMode = (1 << 28),
+ eGatekeeperConfirm_tokens = (1 << 27),
+ eGatekeeperConfirm_cryptoTokens = (1 << 26),
+ eGatekeeperConfirm_algorithmOID = (1 << 25),
+ eGatekeeperConfirm_integrity = (1 << 24),
+ eGatekeeperConfirm_integrityCheckValue = (1 << 23),
+ eGatekeeperConfirm_featureSet = (1 << 22),
+ eGatekeeperConfirm_genericData = (1 << 21),
+ } options;
+ TransportAddress rasAddress;
+} GatekeeperConfirm;
+
+typedef struct RegistrationRequest_callSignalAddress { /* SEQUENCE OF */
+ int count;
+ TransportAddress item[10];
+} RegistrationRequest_callSignalAddress;
+
+typedef struct RegistrationRequest_rasAddress { /* SEQUENCE OF */
+ int count;
+ TransportAddress item[10];
+} RegistrationRequest_rasAddress;
+
+typedef struct RegistrationRequest { /* SEQUENCE */
+ enum {
+ eRegistrationRequest_nonStandardData = (1 << 31),
+ eRegistrationRequest_terminalAlias = (1 << 30),
+ eRegistrationRequest_gatekeeperIdentifier = (1 << 29),
+ eRegistrationRequest_alternateEndpoints = (1 << 28),
+ eRegistrationRequest_timeToLive = (1 << 27),
+ eRegistrationRequest_tokens = (1 << 26),
+ eRegistrationRequest_cryptoTokens = (1 << 25),
+ eRegistrationRequest_integrityCheckValue = (1 << 24),
+ eRegistrationRequest_keepAlive = (1 << 23),
+ eRegistrationRequest_endpointIdentifier = (1 << 22),
+ eRegistrationRequest_willSupplyUUIEs = (1 << 21),
+ eRegistrationRequest_maintainConnection = (1 << 20),
+ eRegistrationRequest_alternateTransportAddresses = (1 << 19),
+ eRegistrationRequest_additiveRegistration = (1 << 18),
+ eRegistrationRequest_terminalAliasPattern = (1 << 17),
+ eRegistrationRequest_supportsAltGK = (1 << 16),
+ eRegistrationRequest_usageReportingCapability = (1 << 15),
+ eRegistrationRequest_multipleCalls = (1 << 14),
+ eRegistrationRequest_supportedH248Packages = (1 << 13),
+ eRegistrationRequest_callCreditCapability = (1 << 12),
+ eRegistrationRequest_capacityReportingCapability = (1 << 11),
+ eRegistrationRequest_capacity = (1 << 10),
+ eRegistrationRequest_featureSet = (1 << 9),
+ eRegistrationRequest_genericData = (1 << 8),
+ } options;
+ RegistrationRequest_callSignalAddress callSignalAddress;
+ RegistrationRequest_rasAddress rasAddress;
+ unsigned int timeToLive;
+} RegistrationRequest;
+
+typedef struct RegistrationConfirm_callSignalAddress { /* SEQUENCE OF */
+ int count;
+ TransportAddress item[10];
+} RegistrationConfirm_callSignalAddress;
+
+typedef struct RegistrationConfirm { /* SEQUENCE */
+ enum {
+ eRegistrationConfirm_nonStandardData = (1 << 31),
+ eRegistrationConfirm_terminalAlias = (1 << 30),
+ eRegistrationConfirm_gatekeeperIdentifier = (1 << 29),
+ eRegistrationConfirm_alternateGatekeeper = (1 << 28),
+ eRegistrationConfirm_timeToLive = (1 << 27),
+ eRegistrationConfirm_tokens = (1 << 26),
+ eRegistrationConfirm_cryptoTokens = (1 << 25),
+ eRegistrationConfirm_integrityCheckValue = (1 << 24),
+ eRegistrationConfirm_willRespondToIRR = (1 << 23),
+ eRegistrationConfirm_preGrantedARQ = (1 << 22),
+ eRegistrationConfirm_maintainConnection = (1 << 21),
+ eRegistrationConfirm_serviceControl = (1 << 20),
+ eRegistrationConfirm_supportsAdditiveRegistration = (1 << 19),
+ eRegistrationConfirm_terminalAliasPattern = (1 << 18),
+ eRegistrationConfirm_supportedPrefixes = (1 << 17),
+ eRegistrationConfirm_usageSpec = (1 << 16),
+ eRegistrationConfirm_featureServerAlias = (1 << 15),
+ eRegistrationConfirm_capacityReportingSpec = (1 << 14),
+ eRegistrationConfirm_featureSet = (1 << 13),
+ eRegistrationConfirm_genericData = (1 << 12),
+ } options;
+ RegistrationConfirm_callSignalAddress callSignalAddress;
+ unsigned int timeToLive;
+} RegistrationConfirm;
+
+typedef struct UnregistrationRequest_callSignalAddress { /* SEQUENCE OF */
+ int count;
+ TransportAddress item[10];
+} UnregistrationRequest_callSignalAddress;
+
+typedef struct UnregistrationRequest { /* SEQUENCE */
+ enum {
+ eUnregistrationRequest_endpointAlias = (1 << 31),
+ eUnregistrationRequest_nonStandardData = (1 << 30),
+ eUnregistrationRequest_endpointIdentifier = (1 << 29),
+ eUnregistrationRequest_alternateEndpoints = (1 << 28),
+ eUnregistrationRequest_gatekeeperIdentifier = (1 << 27),
+ eUnregistrationRequest_tokens = (1 << 26),
+ eUnregistrationRequest_cryptoTokens = (1 << 25),
+ eUnregistrationRequest_integrityCheckValue = (1 << 24),
+ eUnregistrationRequest_reason = (1 << 23),
+ eUnregistrationRequest_endpointAliasPattern = (1 << 22),
+ eUnregistrationRequest_supportedPrefixes = (1 << 21),
+ eUnregistrationRequest_alternateGatekeeper = (1 << 20),
+ eUnregistrationRequest_genericData = (1 << 19),
+ } options;
+ UnregistrationRequest_callSignalAddress callSignalAddress;
+} UnregistrationRequest;
+
+typedef struct AdmissionRequest { /* SEQUENCE */
+ enum {
+ eAdmissionRequest_callModel = (1 << 31),
+ eAdmissionRequest_destinationInfo = (1 << 30),
+ eAdmissionRequest_destCallSignalAddress = (1 << 29),
+ eAdmissionRequest_destExtraCallInfo = (1 << 28),
+ eAdmissionRequest_srcCallSignalAddress = (1 << 27),
+ eAdmissionRequest_nonStandardData = (1 << 26),
+ eAdmissionRequest_callServices = (1 << 25),
+ eAdmissionRequest_canMapAlias = (1 << 24),
+ eAdmissionRequest_callIdentifier = (1 << 23),
+ eAdmissionRequest_srcAlternatives = (1 << 22),
+ eAdmissionRequest_destAlternatives = (1 << 21),
+ eAdmissionRequest_gatekeeperIdentifier = (1 << 20),
+ eAdmissionRequest_tokens = (1 << 19),
+ eAdmissionRequest_cryptoTokens = (1 << 18),
+ eAdmissionRequest_integrityCheckValue = (1 << 17),
+ eAdmissionRequest_transportQOS = (1 << 16),
+ eAdmissionRequest_willSupplyUUIEs = (1 << 15),
+ eAdmissionRequest_callLinkage = (1 << 14),
+ eAdmissionRequest_gatewayDataRate = (1 << 13),
+ eAdmissionRequest_capacity = (1 << 12),
+ eAdmissionRequest_circuitInfo = (1 << 11),
+ eAdmissionRequest_desiredProtocols = (1 << 10),
+ eAdmissionRequest_desiredTunnelledProtocol = (1 << 9),
+ eAdmissionRequest_featureSet = (1 << 8),
+ eAdmissionRequest_genericData = (1 << 7),
+ } options;
+ TransportAddress destCallSignalAddress;
+ TransportAddress srcCallSignalAddress;
+} AdmissionRequest;
+
+typedef struct AdmissionConfirm { /* SEQUENCE */
+ enum {
+ eAdmissionConfirm_irrFrequency = (1 << 31),
+ eAdmissionConfirm_nonStandardData = (1 << 30),
+ eAdmissionConfirm_destinationInfo = (1 << 29),
+ eAdmissionConfirm_destExtraCallInfo = (1 << 28),
+ eAdmissionConfirm_destinationType = (1 << 27),
+ eAdmissionConfirm_remoteExtensionAddress = (1 << 26),
+ eAdmissionConfirm_alternateEndpoints = (1 << 25),
+ eAdmissionConfirm_tokens = (1 << 24),
+ eAdmissionConfirm_cryptoTokens = (1 << 23),
+ eAdmissionConfirm_integrityCheckValue = (1 << 22),
+ eAdmissionConfirm_transportQOS = (1 << 21),
+ eAdmissionConfirm_willRespondToIRR = (1 << 20),
+ eAdmissionConfirm_uuiesRequested = (1 << 19),
+ eAdmissionConfirm_language = (1 << 18),
+ eAdmissionConfirm_alternateTransportAddresses = (1 << 17),
+ eAdmissionConfirm_useSpecifiedTransport = (1 << 16),
+ eAdmissionConfirm_circuitInfo = (1 << 15),
+ eAdmissionConfirm_usageSpec = (1 << 14),
+ eAdmissionConfirm_supportedProtocols = (1 << 13),
+ eAdmissionConfirm_serviceControl = (1 << 12),
+ eAdmissionConfirm_multipleCalls = (1 << 11),
+ eAdmissionConfirm_featureSet = (1 << 10),
+ eAdmissionConfirm_genericData = (1 << 9),
+ } options;
+ TransportAddress destCallSignalAddress;
+} AdmissionConfirm;
+
+typedef struct LocationRequest { /* SEQUENCE */
+ enum {
+ eLocationRequest_endpointIdentifier = (1 << 31),
+ eLocationRequest_nonStandardData = (1 << 30),
+ eLocationRequest_sourceInfo = (1 << 29),
+ eLocationRequest_canMapAlias = (1 << 28),
+ eLocationRequest_gatekeeperIdentifier = (1 << 27),
+ eLocationRequest_tokens = (1 << 26),
+ eLocationRequest_cryptoTokens = (1 << 25),
+ eLocationRequest_integrityCheckValue = (1 << 24),
+ eLocationRequest_desiredProtocols = (1 << 23),
+ eLocationRequest_desiredTunnelledProtocol = (1 << 22),
+ eLocationRequest_featureSet = (1 << 21),
+ eLocationRequest_genericData = (1 << 20),
+ eLocationRequest_hopCount = (1 << 19),
+ eLocationRequest_circuitInfo = (1 << 18),
+ } options;
+ TransportAddress replyAddress;
+} LocationRequest;
+
+typedef struct LocationConfirm { /* SEQUENCE */
+ enum {
+ eLocationConfirm_nonStandardData = (1 << 31),
+ eLocationConfirm_destinationInfo = (1 << 30),
+ eLocationConfirm_destExtraCallInfo = (1 << 29),
+ eLocationConfirm_destinationType = (1 << 28),
+ eLocationConfirm_remoteExtensionAddress = (1 << 27),
+ eLocationConfirm_alternateEndpoints = (1 << 26),
+ eLocationConfirm_tokens = (1 << 25),
+ eLocationConfirm_cryptoTokens = (1 << 24),
+ eLocationConfirm_integrityCheckValue = (1 << 23),
+ eLocationConfirm_alternateTransportAddresses = (1 << 22),
+ eLocationConfirm_supportedProtocols = (1 << 21),
+ eLocationConfirm_multipleCalls = (1 << 20),
+ eLocationConfirm_featureSet = (1 << 19),
+ eLocationConfirm_genericData = (1 << 18),
+ eLocationConfirm_circuitInfo = (1 << 17),
+ eLocationConfirm_serviceControl = (1 << 16),
+ } options;
+ TransportAddress callSignalAddress;
+ TransportAddress rasAddress;
+} LocationConfirm;
+
+typedef struct InfoRequestResponse_callSignalAddress { /* SEQUENCE OF */
+ int count;
+ TransportAddress item[10];
+} InfoRequestResponse_callSignalAddress;
+
+typedef struct InfoRequestResponse { /* SEQUENCE */
+ enum {
+ eInfoRequestResponse_nonStandardData = (1 << 31),
+ eInfoRequestResponse_endpointAlias = (1 << 30),
+ eInfoRequestResponse_perCallInfo = (1 << 29),
+ eInfoRequestResponse_tokens = (1 << 28),
+ eInfoRequestResponse_cryptoTokens = (1 << 27),
+ eInfoRequestResponse_integrityCheckValue = (1 << 26),
+ eInfoRequestResponse_needResponse = (1 << 25),
+ eInfoRequestResponse_capacity = (1 << 24),
+ eInfoRequestResponse_irrStatus = (1 << 23),
+ eInfoRequestResponse_unsolicited = (1 << 22),
+ eInfoRequestResponse_genericData = (1 << 21),
+ } options;
+ TransportAddress rasAddress;
+ InfoRequestResponse_callSignalAddress callSignalAddress;
+} InfoRequestResponse;
+
+typedef struct RasMessage { /* CHOICE */
+ enum {
+ eRasMessage_gatekeeperRequest,
+ eRasMessage_gatekeeperConfirm,
+ eRasMessage_gatekeeperReject,
+ eRasMessage_registrationRequest,
+ eRasMessage_registrationConfirm,
+ eRasMessage_registrationReject,
+ eRasMessage_unregistrationRequest,
+ eRasMessage_unregistrationConfirm,
+ eRasMessage_unregistrationReject,
+ eRasMessage_admissionRequest,
+ eRasMessage_admissionConfirm,
+ eRasMessage_admissionReject,
+ eRasMessage_bandwidthRequest,
+ eRasMessage_bandwidthConfirm,
+ eRasMessage_bandwidthReject,
+ eRasMessage_disengageRequest,
+ eRasMessage_disengageConfirm,
+ eRasMessage_disengageReject,
+ eRasMessage_locationRequest,
+ eRasMessage_locationConfirm,
+ eRasMessage_locationReject,
+ eRasMessage_infoRequest,
+ eRasMessage_infoRequestResponse,
+ eRasMessage_nonStandardMessage,
+ eRasMessage_unknownMessageResponse,
+ eRasMessage_requestInProgress,
+ eRasMessage_resourcesAvailableIndicate,
+ eRasMessage_resourcesAvailableConfirm,
+ eRasMessage_infoRequestAck,
+ eRasMessage_infoRequestNak,
+ eRasMessage_serviceControlIndication,
+ eRasMessage_serviceControlResponse,
+ } choice;
+ union {
+ GatekeeperRequest gatekeeperRequest;
+ GatekeeperConfirm gatekeeperConfirm;
+ RegistrationRequest registrationRequest;
+ RegistrationConfirm registrationConfirm;
+ UnregistrationRequest unregistrationRequest;
+ AdmissionRequest admissionRequest;
+ AdmissionConfirm admissionConfirm;
+ LocationRequest locationRequest;
+ LocationConfirm locationConfirm;
+ InfoRequestResponse infoRequestResponse;
+ };
+} RasMessage;
diff --git a/include/linux/netfilter/nf_conntrack_irc.h b/include/linux/netfilter/nf_conntrack_irc.h
new file mode 100644
index 000000000..4bb9bae67
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_irc.h
@@ -0,0 +1,16 @@
+#ifndef _NF_CONNTRACK_IRC_H
+#define _NF_CONNTRACK_IRC_H
+
+#ifdef __KERNEL__
+
+#define IRC_PORT 6667
+
+extern unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned int matchoff,
+ unsigned int matchlen,
+ struct nf_conntrack_expect *exp);
+
+#endif /* __KERNEL__ */
+#endif /* _NF_CONNTRACK_IRC_H */
diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h
new file mode 100644
index 000000000..2ab283031
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_pptp.h
@@ -0,0 +1,326 @@
+/* PPTP constants and structs */
+#ifndef _NF_CONNTRACK_PPTP_H
+#define _NF_CONNTRACK_PPTP_H
+
+#include <linux/netfilter/nf_conntrack_common.h>
+
+extern const char *const pptp_msg_name[];
+
+/* state of the control session */
+enum pptp_ctrlsess_state {
+ PPTP_SESSION_NONE, /* no session present */
+ PPTP_SESSION_ERROR, /* some session error */
+ PPTP_SESSION_STOPREQ, /* stop_sess request seen */
+ PPTP_SESSION_REQUESTED, /* start_sess request seen */
+ PPTP_SESSION_CONFIRMED, /* session established */
+};
+
+/* state of the call inside the control session */
+enum pptp_ctrlcall_state {
+ PPTP_CALL_NONE,
+ PPTP_CALL_ERROR,
+ PPTP_CALL_OUT_REQ,
+ PPTP_CALL_OUT_CONF,
+ PPTP_CALL_IN_REQ,
+ PPTP_CALL_IN_REP,
+ PPTP_CALL_IN_CONF,
+ PPTP_CALL_CLEAR_REQ,
+};
+
+/* conntrack private data */
+struct nf_ct_pptp_master {
+ enum pptp_ctrlsess_state sstate; /* session state */
+ enum pptp_ctrlcall_state cstate; /* call state */
+ __be16 pac_call_id; /* call id of PAC */
+ __be16 pns_call_id; /* call id of PNS */
+
+ /* in pre-2.6.11 this used to be per-expect. Now it is per-conntrack
+ * and therefore imposes a fixed limit on the number of maps */
+ struct nf_ct_gre_keymap *keymap[IP_CT_DIR_MAX];
+};
+
+struct nf_nat_pptp {
+ __be16 pns_call_id; /* NAT'ed PNS call id */
+ __be16 pac_call_id; /* NAT'ed PAC call id */
+};
+
+#ifdef __KERNEL__
+
+#define PPTP_CONTROL_PORT 1723
+
+#define PPTP_PACKET_CONTROL 1
+#define PPTP_PACKET_MGMT 2
+
+#define PPTP_MAGIC_COOKIE 0x1a2b3c4d
+
+struct pptp_pkt_hdr {
+ __u16 packetLength;
+ __be16 packetType;
+ __be32 magicCookie;
+};
+
+/* PptpControlMessageType values */
+#define PPTP_START_SESSION_REQUEST 1
+#define PPTP_START_SESSION_REPLY 2
+#define PPTP_STOP_SESSION_REQUEST 3
+#define PPTP_STOP_SESSION_REPLY 4
+#define PPTP_ECHO_REQUEST 5
+#define PPTP_ECHO_REPLY 6
+#define PPTP_OUT_CALL_REQUEST 7
+#define PPTP_OUT_CALL_REPLY 8
+#define PPTP_IN_CALL_REQUEST 9
+#define PPTP_IN_CALL_REPLY 10
+#define PPTP_IN_CALL_CONNECT 11
+#define PPTP_CALL_CLEAR_REQUEST 12
+#define PPTP_CALL_DISCONNECT_NOTIFY 13
+#define PPTP_WAN_ERROR_NOTIFY 14
+#define PPTP_SET_LINK_INFO 15
+
+#define PPTP_MSG_MAX 15
+
+/* PptpGeneralError values */
+#define PPTP_ERROR_CODE_NONE 0
+#define PPTP_NOT_CONNECTED 1
+#define PPTP_BAD_FORMAT 2
+#define PPTP_BAD_VALUE 3
+#define PPTP_NO_RESOURCE 4
+#define PPTP_BAD_CALLID 5
+#define PPTP_REMOVE_DEVICE_ERROR 6
+
+struct PptpControlHeader {
+ __be16 messageType;
+ __u16 reserved;
+};
+
+/* FramingCapability Bitmap Values */
+#define PPTP_FRAME_CAP_ASYNC 0x1
+#define PPTP_FRAME_CAP_SYNC 0x2
+
+/* BearerCapability Bitmap Values */
+#define PPTP_BEARER_CAP_ANALOG 0x1
+#define PPTP_BEARER_CAP_DIGITAL 0x2
+
+struct PptpStartSessionRequest {
+ __be16 protocolVersion;
+ __u16 reserved1;
+ __be32 framingCapability;
+ __be32 bearerCapability;
+ __be16 maxChannels;
+ __be16 firmwareRevision;
+ __u8 hostName[64];
+ __u8 vendorString[64];
+};
+
+/* PptpStartSessionResultCode Values */
+#define PPTP_START_OK 1
+#define PPTP_START_GENERAL_ERROR 2
+#define PPTP_START_ALREADY_CONNECTED 3
+#define PPTP_START_NOT_AUTHORIZED 4
+#define PPTP_START_UNKNOWN_PROTOCOL 5
+
+struct PptpStartSessionReply {
+ __be16 protocolVersion;
+ __u8 resultCode;
+ __u8 generalErrorCode;
+ __be32 framingCapability;
+ __be32 bearerCapability;
+ __be16 maxChannels;
+ __be16 firmwareRevision;
+ __u8 hostName[64];
+ __u8 vendorString[64];
+};
+
+/* PptpStopReasons */
+#define PPTP_STOP_NONE 1
+#define PPTP_STOP_PROTOCOL 2
+#define PPTP_STOP_LOCAL_SHUTDOWN 3
+
+struct PptpStopSessionRequest {
+ __u8 reason;
+ __u8 reserved1;
+ __u16 reserved2;
+};
+
+/* PptpStopSessionResultCode */
+#define PPTP_STOP_OK 1
+#define PPTP_STOP_GENERAL_ERROR 2
+
+struct PptpStopSessionReply {
+ __u8 resultCode;
+ __u8 generalErrorCode;
+ __u16 reserved1;
+};
+
+struct PptpEchoRequest {
+ __be32 identNumber;
+};
+
+/* PptpEchoReplyResultCode */
+#define PPTP_ECHO_OK 1
+#define PPTP_ECHO_GENERAL_ERROR 2
+
+struct PptpEchoReply {
+ __be32 identNumber;
+ __u8 resultCode;
+ __u8 generalErrorCode;
+ __u16 reserved;
+};
+
+/* PptpFramingType */
+#define PPTP_ASYNC_FRAMING 1
+#define PPTP_SYNC_FRAMING 2
+#define PPTP_DONT_CARE_FRAMING 3
+
+/* PptpCallBearerType */
+#define PPTP_ANALOG_TYPE 1
+#define PPTP_DIGITAL_TYPE 2
+#define PPTP_DONT_CARE_BEARER_TYPE 3
+
+struct PptpOutCallRequest {
+ __be16 callID;
+ __be16 callSerialNumber;
+ __be32 minBPS;
+ __be32 maxBPS;
+ __be32 bearerType;
+ __be32 framingType;
+ __be16 packetWindow;
+ __be16 packetProcDelay;
+ __be16 phoneNumberLength;
+ __u16 reserved1;
+ __u8 phoneNumber[64];
+ __u8 subAddress[64];
+};
+
+/* PptpCallResultCode */
+#define PPTP_OUTCALL_CONNECT 1
+#define PPTP_OUTCALL_GENERAL_ERROR 2
+#define PPTP_OUTCALL_NO_CARRIER 3
+#define PPTP_OUTCALL_BUSY 4
+#define PPTP_OUTCALL_NO_DIAL_TONE 5
+#define PPTP_OUTCALL_TIMEOUT 6
+#define PPTP_OUTCALL_DONT_ACCEPT 7
+
+struct PptpOutCallReply {
+ __be16 callID;
+ __be16 peersCallID;
+ __u8 resultCode;
+ __u8 generalErrorCode;
+ __be16 causeCode;
+ __be32 connectSpeed;
+ __be16 packetWindow;
+ __be16 packetProcDelay;
+ __be32 physChannelID;
+};
+
+struct PptpInCallRequest {
+ __be16 callID;
+ __be16 callSerialNumber;
+ __be32 callBearerType;
+ __be32 physChannelID;
+ __be16 dialedNumberLength;
+ __be16 dialingNumberLength;
+ __u8 dialedNumber[64];
+ __u8 dialingNumber[64];
+ __u8 subAddress[64];
+};
+
+/* PptpInCallResultCode */
+#define PPTP_INCALL_ACCEPT 1
+#define PPTP_INCALL_GENERAL_ERROR 2
+#define PPTP_INCALL_DONT_ACCEPT 3
+
+struct PptpInCallReply {
+ __be16 callID;
+ __be16 peersCallID;
+ __u8 resultCode;
+ __u8 generalErrorCode;
+ __be16 packetWindow;
+ __be16 packetProcDelay;
+ __u16 reserved;
+};
+
+struct PptpInCallConnected {
+ __be16 peersCallID;
+ __u16 reserved;
+ __be32 connectSpeed;
+ __be16 packetWindow;
+ __be16 packetProcDelay;
+ __be32 callFramingType;
+};
+
+struct PptpClearCallRequest {
+ __be16 callID;
+ __u16 reserved;
+};
+
+struct PptpCallDisconnectNotify {
+ __be16 callID;
+ __u8 resultCode;
+ __u8 generalErrorCode;
+ __be16 causeCode;
+ __u16 reserved;
+ __u8 callStatistics[128];
+};
+
+struct PptpWanErrorNotify {
+ __be16 peersCallID;
+ __u16 reserved;
+ __be32 crcErrors;
+ __be32 framingErrors;
+ __be32 hardwareOverRuns;
+ __be32 bufferOverRuns;
+ __be32 timeoutErrors;
+ __be32 alignmentErrors;
+};
+
+struct PptpSetLinkInfo {
+ __be16 peersCallID;
+ __u16 reserved;
+ __be32 sendAccm;
+ __be32 recvAccm;
+};
+
+union pptp_ctrl_union {
+ struct PptpStartSessionRequest sreq;
+ struct PptpStartSessionReply srep;
+ struct PptpStopSessionRequest streq;
+ struct PptpStopSessionReply strep;
+ struct PptpOutCallRequest ocreq;
+ struct PptpOutCallReply ocack;
+ struct PptpInCallRequest icreq;
+ struct PptpInCallReply icack;
+ struct PptpInCallConnected iccon;
+ struct PptpClearCallRequest clrreq;
+ struct PptpCallDisconnectNotify disc;
+ struct PptpWanErrorNotify wanerr;
+ struct PptpSetLinkInfo setlink;
+};
+
+/* crap needed for nf_conntrack_compat.h */
+struct nf_conn;
+struct nf_conntrack_expect;
+
+extern int
+(*nf_nat_pptp_hook_outbound)(struct sk_buff *skb,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq);
+
+extern int
+(*nf_nat_pptp_hook_inbound)(struct sk_buff *skb,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ struct PptpControlHeader *ctlh,
+ union pptp_ctrl_union *pptpReq);
+
+extern void
+(*nf_nat_pptp_hook_exp_gre)(struct nf_conntrack_expect *exp_orig,
+ struct nf_conntrack_expect *exp_reply);
+
+extern void
+(*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct,
+ struct nf_conntrack_expect *exp);
+
+#endif /* __KERNEL__ */
+#endif /* _NF_CONNTRACK_PPTP_H */
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
new file mode 100644
index 000000000..df78dc2b5
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -0,0 +1,93 @@
+#ifndef _CONNTRACK_PROTO_GRE_H
+#define _CONNTRACK_PROTO_GRE_H
+#include <asm/byteorder.h>
+
+/* GRE PROTOCOL HEADER */
+
+/* GRE Version field */
+#define GRE_VERSION_1701 0x0
+#define GRE_VERSION_PPTP 0x1
+
+/* GRE Protocol field */
+#define GRE_PROTOCOL_PPTP 0x880B
+
+/* GRE Flags */
+#define GRE_FLAG_C 0x80
+#define GRE_FLAG_R 0x40
+#define GRE_FLAG_K 0x20
+#define GRE_FLAG_S 0x10
+#define GRE_FLAG_A 0x80
+
+#define GRE_IS_C(f) ((f)&GRE_FLAG_C)
+#define GRE_IS_R(f) ((f)&GRE_FLAG_R)
+#define GRE_IS_K(f) ((f)&GRE_FLAG_K)
+#define GRE_IS_S(f) ((f)&GRE_FLAG_S)
+#define GRE_IS_A(f) ((f)&GRE_FLAG_A)
+
+/* GRE is a mess: Four different standards */
+struct gre_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u16 rec:3,
+ srr:1,
+ seq:1,
+ key:1,
+ routing:1,
+ csum:1,
+ version:3,
+ reserved:4,
+ ack:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u16 csum:1,
+ routing:1,
+ key:1,
+ seq:1,
+ srr:1,
+ rec:3,
+ ack:1,
+ reserved:4,
+ version:3;
+#else
+#error "Adjust your <asm/byteorder.h> defines"
+#endif
+ __be16 protocol;
+};
+
+/* modified GRE header for PPTP */
+struct gre_hdr_pptp {
+ __u8 flags; /* bitfield */
+ __u8 version; /* should be GRE_VERSION_PPTP */
+ __be16 protocol; /* should be GRE_PROTOCOL_PPTP */
+ __be16 payload_len; /* size of ppp payload, not inc. gre header */
+ __be16 call_id; /* peer's call_id for this session */
+ __be32 seq; /* sequence number. Present if S==1 */
+ __be32 ack; /* seq number of highest packet received by */
+ /* sender in this session */
+};
+
+struct nf_ct_gre {
+ unsigned int stream_timeout;
+ unsigned int timeout;
+};
+
+#ifdef __KERNEL__
+#include <net/netfilter/nf_conntrack_tuple.h>
+
+struct nf_conn;
+
+/* structure for original <-> reply keymap */
+struct nf_ct_gre_keymap {
+ struct list_head list;
+ struct nf_conntrack_tuple tuple;
+};
+
+/* add new tuple->key_reply pair to keymap */
+int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
+ struct nf_conntrack_tuple *t);
+
+/* delete keymap entries */
+void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
+
+void nf_nat_need_gre(void);
+
+#endif /* __KERNEL__ */
+#endif /* _CONNTRACK_PROTO_GRE_H */
diff --git a/include/linux/netfilter/nf_conntrack_sane.h b/include/linux/netfilter/nf_conntrack_sane.h
new file mode 100644
index 000000000..4767d6e23
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_sane.h
@@ -0,0 +1,21 @@
+#ifndef _NF_CONNTRACK_SANE_H
+#define _NF_CONNTRACK_SANE_H
+/* SANE tracking. */
+
+#ifdef __KERNEL__
+
+#define SANE_PORT 6566
+
+enum sane_state {
+ SANE_STATE_NORMAL,
+ SANE_STATE_START_REQUESTED,
+};
+
+/* This structure exists only once per master */
+struct nf_ct_sane_master {
+ enum sane_state state;
+};
+
+#endif /* __KERNEL__ */
+
+#endif /* _NF_CONNTRACK_SANE_H */
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h
new file mode 100644
index 000000000..d5af3c27f
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_sip.h
@@ -0,0 +1,199 @@
+#ifndef __NF_CONNTRACK_SIP_H__
+#define __NF_CONNTRACK_SIP_H__
+#ifdef __KERNEL__
+
+#include <net/netfilter/nf_conntrack_expect.h>
+
+#include <linux/types.h>
+
+#define SIP_PORT 5060
+#define SIP_TIMEOUT 3600
+
+struct nf_ct_sip_master {
+ unsigned int register_cseq;
+ unsigned int invite_cseq;
+ __be16 forced_dport;
+};
+
+enum sip_expectation_classes {
+ SIP_EXPECT_SIGNALLING,
+ SIP_EXPECT_AUDIO,
+ SIP_EXPECT_VIDEO,
+ SIP_EXPECT_IMAGE,
+ __SIP_EXPECT_MAX
+};
+#define SIP_EXPECT_MAX (__SIP_EXPECT_MAX - 1)
+
+struct sdp_media_type {
+ const char *name;
+ unsigned int len;
+ enum sip_expectation_classes class;
+};
+
+#define SDP_MEDIA_TYPE(__name, __class) \
+{ \
+ .name = (__name), \
+ .len = sizeof(__name) - 1, \
+ .class = (__class), \
+}
+
+struct sip_handler {
+ const char *method;
+ unsigned int len;
+ int (*request)(struct sk_buff *skb, unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr, unsigned int *datalen,
+ unsigned int cseq);
+ int (*response)(struct sk_buff *skb, unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr, unsigned int *datalen,
+ unsigned int cseq, unsigned int code);
+};
+
+#define SIP_HANDLER(__method, __request, __response) \
+{ \
+ .method = (__method), \
+ .len = sizeof(__method) - 1, \
+ .request = (__request), \
+ .response = (__response), \
+}
+
+struct sip_header {
+ const char *name;
+ const char *cname;
+ const char *search;
+ unsigned int len;
+ unsigned int clen;
+ unsigned int slen;
+ int (*match_len)(const struct nf_conn *ct,
+ const char *dptr, const char *limit,
+ int *shift);
+};
+
+#define __SIP_HDR(__name, __cname, __search, __match) \
+{ \
+ .name = (__name), \
+ .len = sizeof(__name) - 1, \
+ .cname = (__cname), \
+ .clen = (__cname) ? sizeof(__cname) - 1 : 0, \
+ .search = (__search), \
+ .slen = (__search) ? sizeof(__search) - 1 : 0, \
+ .match_len = (__match), \
+}
+
+#define SIP_HDR(__name, __cname, __search, __match) \
+ __SIP_HDR(__name, __cname, __search, __match)
+
+#define SDP_HDR(__name, __search, __match) \
+ __SIP_HDR(__name, NULL, __search, __match)
+
+enum sip_header_types {
+ SIP_HDR_CSEQ,
+ SIP_HDR_FROM,
+ SIP_HDR_TO,
+ SIP_HDR_CONTACT,
+ SIP_HDR_VIA_UDP,
+ SIP_HDR_VIA_TCP,
+ SIP_HDR_EXPIRES,
+ SIP_HDR_CONTENT_LENGTH,
+ SIP_HDR_CALL_ID,
+};
+
+enum sdp_header_types {
+ SDP_HDR_UNSPEC,
+ SDP_HDR_VERSION,
+ SDP_HDR_OWNER,
+ SDP_HDR_CONNECTION,
+ SDP_HDR_MEDIA,
+};
+
+struct nf_nat_sip_hooks {
+ unsigned int (*msg)(struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen);
+
+ void (*seq_adjust)(struct sk_buff *skb,
+ unsigned int protoff, s16 off);
+
+ unsigned int (*expect)(struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ struct nf_conntrack_expect *exp,
+ unsigned int matchoff,
+ unsigned int matchlen);
+
+ unsigned int (*sdp_addr)(struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ unsigned int sdpoff,
+ enum sdp_header_types type,
+ enum sdp_header_types term,
+ const union nf_inet_addr *addr);
+
+ unsigned int (*sdp_port)(struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ unsigned int matchoff,
+ unsigned int matchlen,
+ u_int16_t port);
+
+ unsigned int (*sdp_session)(struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ unsigned int sdpoff,
+ const union nf_inet_addr *addr);
+
+ unsigned int (*sdp_media)(struct sk_buff *skb,
+ unsigned int protoff,
+ unsigned int dataoff,
+ const char **dptr,
+ unsigned int *datalen,
+ struct nf_conntrack_expect *rtp_exp,
+ struct nf_conntrack_expect *rtcp_exp,
+ unsigned int mediaoff,
+ unsigned int medialen,
+ union nf_inet_addr *rtp_addr);
+};
+extern const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
+
+int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr,
+ unsigned int datalen, unsigned int *matchoff,
+ unsigned int *matchlen, union nf_inet_addr *addr,
+ __be16 *port);
+int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
+ unsigned int dataoff, unsigned int datalen,
+ enum sip_header_types type, unsigned int *matchoff,
+ unsigned int *matchlen);
+int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
+ unsigned int *dataoff, unsigned int datalen,
+ enum sip_header_types type, int *in_header,
+ unsigned int *matchoff, unsigned int *matchlen,
+ union nf_inet_addr *addr, __be16 *port);
+int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
+ unsigned int dataoff, unsigned int datalen,
+ const char *name, unsigned int *matchoff,
+ unsigned int *matchlen, union nf_inet_addr *addr,
+ bool delim);
+int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
+ unsigned int off, unsigned int datalen,
+ const char *name, unsigned int *matchoff,
+ unsigned int *matchen, unsigned int *val);
+
+int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
+ unsigned int dataoff, unsigned int datalen,
+ enum sdp_header_types type,
+ enum sdp_header_types term,
+ unsigned int *matchoff, unsigned int *matchlen);
+
+#endif /* __KERNEL__ */
+#endif /* __NF_CONNTRACK_SIP_H__ */
diff --git a/include/linux/netfilter/nf_conntrack_snmp.h b/include/linux/netfilter/nf_conntrack_snmp.h
new file mode 100644
index 000000000..064bc63a5
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_snmp.h
@@ -0,0 +1,9 @@
+#ifndef _NF_CONNTRACK_SNMP_H
+#define _NF_CONNTRACK_SNMP_H
+
+extern int (*nf_nat_snmp_hook)(struct sk_buff *skb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo);
+
+#endif /* _NF_CONNTRACK_SNMP_H */
diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h
new file mode 100644
index 000000000..22db9614b
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_tcp.h
@@ -0,0 +1,32 @@
+#ifndef _NF_CONNTRACK_TCP_H
+#define _NF_CONNTRACK_TCP_H
+
+#include <uapi/linux/netfilter/nf_conntrack_tcp.h>
+
+
+struct ip_ct_tcp_state {
+ u_int32_t td_end; /* max of seq + len */
+ u_int32_t td_maxend; /* max of ack + max(win, 1) */
+ u_int32_t td_maxwin; /* max(win) */
+ u_int32_t td_maxack; /* max of ack */
+ u_int8_t td_scale; /* window scale factor */
+ u_int8_t flags; /* per direction options */
+};
+
+struct ip_ct_tcp {
+ struct ip_ct_tcp_state seen[2]; /* connection parameters per direction */
+ u_int8_t state; /* state of the connection (enum tcp_conntrack) */
+ /* For detecting stale connections */
+ u_int8_t last_dir; /* Direction of the last packet (enum ip_conntrack_dir) */
+ u_int8_t retrans; /* Number of retransmitted packets */
+ u_int8_t last_index; /* Index of the last packet */
+ u_int32_t last_seq; /* Last sequence number seen in dir */
+ u_int32_t last_ack; /* Last sequence number seen in opposite dir */
+ u_int32_t last_end; /* Last seq + len */
+ u_int16_t last_win; /* Last window advertisement seen in dir */
+ /* For SYN packets while we may be out-of-sync */
+ u_int8_t last_wscale; /* Last window scaling factor seen */
+ u_int8_t last_flags; /* Last flags set */
+};
+
+#endif /* _NF_CONNTRACK_TCP_H */
diff --git a/include/linux/netfilter/nf_conntrack_tftp.h b/include/linux/netfilter/nf_conntrack_tftp.h
new file mode 100644
index 000000000..c78d38fdb
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_tftp.h
@@ -0,0 +1,20 @@
+#ifndef _NF_CONNTRACK_TFTP_H
+#define _NF_CONNTRACK_TFTP_H
+
+#define TFTP_PORT 69
+
+struct tftphdr {
+ __be16 opcode;
+};
+
+#define TFTP_OPCODE_READ 1
+#define TFTP_OPCODE_WRITE 2
+#define TFTP_OPCODE_DATA 3
+#define TFTP_OPCODE_ACK 4
+#define TFTP_OPCODE_ERROR 5
+
+extern unsigned int (*nf_nat_tftp_hook)(struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ struct nf_conntrack_expect *exp);
+
+#endif /* _NF_CONNTRACK_TFTP_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
new file mode 100644
index 000000000..e955d4730
--- /dev/null
+++ b/include/linux/netfilter/nfnetlink.h
@@ -0,0 +1,72 @@
+#ifndef _NFNETLINK_H
+#define _NFNETLINK_H
+
+
+#include <linux/netlink.h>
+#include <linux/capability.h>
+#include <net/netlink.h>
+#include <uapi/linux/netfilter/nfnetlink.h>
+
+struct nfnl_callback {
+ int (*call)(struct sock *nl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[]);
+ int (*call_rcu)(struct sock *nl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[]);
+ int (*call_batch)(struct sock *nl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[]);
+ const struct nla_policy *policy; /* netlink attribute policy */
+ const u_int16_t attr_count; /* number of nlattr's */
+};
+
+struct nfnetlink_subsystem {
+ const char *name;
+ __u8 subsys_id; /* nfnetlink subsystem ID */
+ __u8 cb_count; /* number of callbacks */
+ const struct nfnl_callback *cb; /* callback for individual types */
+ int (*commit)(struct sk_buff *skb);
+ int (*abort)(struct sk_buff *skb);
+};
+
+int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
+int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
+
+int nfnetlink_has_listeners(struct net *net, unsigned int group);
+struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size,
+ u32 dst_portid, gfp_t gfp_mask);
+int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
+ unsigned int group, int echo, gfp_t flags);
+int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
+int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
+ int flags);
+
+void nfnl_lock(__u8 subsys_id);
+void nfnl_unlock(__u8 subsys_id);
+#ifdef CONFIG_PROVE_LOCKING
+int lockdep_nfnl_is_held(__u8 subsys_id);
+#else
+static inline int lockdep_nfnl_is_held(__u8 subsys_id)
+{
+ return 1;
+}
+#endif /* CONFIG_PROVE_LOCKING */
+
+/*
+ * nfnl_dereference - fetch RCU pointer when updates are prevented by subsys mutex
+ *
+ * @p: The pointer to read, prior to dereferencing
+ * @ss: The nfnetlink subsystem ID
+ *
+ * Return the value of the specified RCU-protected pointer, but omit
+ * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
+ * caller holds the NFNL subsystem mutex.
+ */
+#define nfnl_dereference(p, ss) \
+ rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))
+
+#define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
+ MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
+
+#endif /* _NFNETLINK_H */
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
new file mode 100644
index 000000000..6ec975748
--- /dev/null
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -0,0 +1,19 @@
+#ifndef _NFNL_ACCT_H_
+#define _NFNL_ACCT_H_
+
+#include <uapi/linux/netfilter/nfnetlink_acct.h>
+
+enum {
+ NFACCT_NO_QUOTA = -1,
+ NFACCT_UNDERQUOTA,
+ NFACCT_OVERQUOTA,
+};
+
+struct nf_acct;
+
+struct nf_acct *nfnl_acct_find_get(const char *filter_name);
+void nfnl_acct_put(struct nf_acct *acct);
+void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
+extern int nfnl_acct_overquota(const struct sk_buff *skb,
+ struct nf_acct *nfacct);
+#endif /* _NFNL_ACCT_H */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
new file mode 100644
index 000000000..a3e215bb0
--- /dev/null
+++ b/include/linux/netfilter/x_tables.h
@@ -0,0 +1,436 @@
+#ifndef _X_TABLES_H
+#define _X_TABLES_H
+
+
+#include <linux/netdevice.h>
+#include <uapi/linux/netfilter/x_tables.h>
+
+/**
+ * struct xt_action_param - parameters for matches/targets
+ *
+ * @match: the match extension
+ * @target: the target extension
+ * @matchinfo: per-match data
+ * @targetinfo: per-target data
+ * @in: input netdevice
+ * @out: output netdevice
+ * @fragoff: packet is a fragment, this is the data offset
+ * @thoff: position of transport header relative to skb->data
+ * @hook: hook number given packet came from
+ * @family: Actual NFPROTO_* through which the function is invoked
+ * (helpful when match->family == NFPROTO_UNSPEC)
+ *
+ * Fields written to by extensions:
+ *
+ * @hotdrop: drop packet if we had inspection problems
+ * Network namespace obtainable using dev_net(in/out)
+ */
+struct xt_action_param {
+ union {
+ const struct xt_match *match;
+ const struct xt_target *target;
+ };
+ union {
+ const void *matchinfo, *targinfo;
+ };
+ const struct net_device *in, *out;
+ int fragoff;
+ unsigned int thoff;
+ unsigned int hooknum;
+ u_int8_t family;
+ bool hotdrop;
+};
+
+/**
+ * struct xt_mtchk_param - parameters for match extensions'
+ * checkentry functions
+ *
+ * @net: network namespace through which the check was invoked
+ * @table: table the rule is tried to be inserted into
+ * @entryinfo: the family-specific rule data
+ * (struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry)
+ * @match: struct xt_match through which this function was invoked
+ * @matchinfo: per-match data
+ * @hook_mask: via which hooks the new rule is reachable
+ * Other fields as above.
+ */
+struct xt_mtchk_param {
+ struct net *net;
+ const char *table;
+ const void *entryinfo;
+ const struct xt_match *match;
+ void *matchinfo;
+ unsigned int hook_mask;
+ u_int8_t family;
+};
+
+/**
+ * struct xt_mdtor_param - match destructor parameters
+ * Fields as above.
+ */
+struct xt_mtdtor_param {
+ struct net *net;
+ const struct xt_match *match;
+ void *matchinfo;
+ u_int8_t family;
+};
+
+/**
+ * struct xt_tgchk_param - parameters for target extensions'
+ * checkentry functions
+ *
+ * @entryinfo: the family-specific rule data
+ * (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry)
+ *
+ * Other fields see above.
+ */
+struct xt_tgchk_param {
+ struct net *net;
+ const char *table;
+ const void *entryinfo;
+ const struct xt_target *target;
+ void *targinfo;
+ unsigned int hook_mask;
+ u_int8_t family;
+};
+
+/* Target destructor parameters */
+struct xt_tgdtor_param {
+ struct net *net;
+ const struct xt_target *target;
+ void *targinfo;
+ u_int8_t family;
+};
+
+struct xt_match {
+ struct list_head list;
+
+ const char name[XT_EXTENSION_MAXNAMELEN];
+ u_int8_t revision;
+
+ /* Return true or false: return FALSE and set *hotdrop = 1 to
+ force immediate packet drop. */
+ /* Arguments changed since 2.6.9, as this must now handle
+ non-linear skb, using skb_header_pointer and
+ skb_ip_make_writable. */
+ bool (*match)(const struct sk_buff *skb,
+ struct xt_action_param *);
+
+ /* Called when user tries to insert an entry of this type. */
+ int (*checkentry)(const struct xt_mtchk_param *);
+
+ /* Called when entry of this type deleted. */
+ void (*destroy)(const struct xt_mtdtor_param *);
+#ifdef CONFIG_COMPAT
+ /* Called when userspace align differs from kernel space one */
+ void (*compat_from_user)(void *dst, const void *src);
+ int (*compat_to_user)(void __user *dst, const void *src);
+#endif
+ /* Set this to THIS_MODULE if you are a module, otherwise NULL */
+ struct module *me;
+
+ const char *table;
+ unsigned int matchsize;
+#ifdef CONFIG_COMPAT
+ unsigned int compatsize;
+#endif
+ unsigned int hooks;
+ unsigned short proto;
+
+ unsigned short family;
+};
+
+/* Registration hooks for targets. */
+struct xt_target {
+ struct list_head list;
+
+ const char name[XT_EXTENSION_MAXNAMELEN];
+ u_int8_t revision;
+
+ /* Returns verdict. Argument order changed since 2.6.9, as this
+ must now handle non-linear skbs, using skb_copy_bits and
+ skb_ip_make_writable. */
+ unsigned int (*target)(struct sk_buff *skb,
+ const struct xt_action_param *);
+
+ /* Called when user tries to insert an entry of this type:
+ hook_mask is a bitmask of hooks from which it can be
+ called. */
+ /* Should return 0 on success or an error code otherwise (-Exxxx). */
+ int (*checkentry)(const struct xt_tgchk_param *);
+
+ /* Called when entry of this type deleted. */
+ void (*destroy)(const struct xt_tgdtor_param *);
+#ifdef CONFIG_COMPAT
+ /* Called when userspace align differs from kernel space one */
+ void (*compat_from_user)(void *dst, const void *src);
+ int (*compat_to_user)(void __user *dst, const void *src);
+#endif
+ /* Set this to THIS_MODULE if you are a module, otherwise NULL */
+ struct module *me;
+
+ const char *table;
+ unsigned int targetsize;
+#ifdef CONFIG_COMPAT
+ unsigned int compatsize;
+#endif
+ unsigned int hooks;
+ unsigned short proto;
+
+ unsigned short family;
+};
+
+/* Furniture shopping... */
+struct xt_table {
+ struct list_head list;
+
+ /* What hooks you will enter on */
+ unsigned int valid_hooks;
+
+ /* Man behind the curtain... */
+ struct xt_table_info *private;
+
+ /* Set this to THIS_MODULE if you are a module, otherwise NULL */
+ struct module *me;
+
+ u_int8_t af; /* address/protocol family */
+ int priority; /* hook order */
+
+ /* A unique name... */
+ const char name[XT_TABLE_MAXNAMELEN];
+};
+
+#include <linux/netfilter_ipv4.h>
+
+/* The table itself */
+struct xt_table_info {
+ /* Size per table */
+ unsigned int size;
+ /* Number of entries: FIXME. --RR */
+ unsigned int number;
+ /* Initial number of entries. Needed for module usage count */
+ unsigned int initial_entries;
+
+ /* Entry points and underflows */
+ unsigned int hook_entry[NF_INET_NUMHOOKS];
+ unsigned int underflow[NF_INET_NUMHOOKS];
+
+ /*
+ * Number of user chains. Since tables cannot have loops, at most
+ * @stacksize jumps (number of user chains) can possibly be made.
+ */
+ unsigned int stacksize;
+ unsigned int __percpu *stackptr;
+ void ***jumpstack;
+ /* ipt_entry tables: one per CPU */
+ /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
+ void *entries[1];
+};
+
+#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
+ + nr_cpu_ids * sizeof(char *))
+int xt_register_target(struct xt_target *target);
+void xt_unregister_target(struct xt_target *target);
+int xt_register_targets(struct xt_target *target, unsigned int n);
+void xt_unregister_targets(struct xt_target *target, unsigned int n);
+
+int xt_register_match(struct xt_match *target);
+void xt_unregister_match(struct xt_match *target);
+int xt_register_matches(struct xt_match *match, unsigned int n);
+void xt_unregister_matches(struct xt_match *match, unsigned int n);
+
+int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
+ bool inv_proto);
+int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
+ bool inv_proto);
+
+struct xt_table *xt_register_table(struct net *net,
+ const struct xt_table *table,
+ struct xt_table_info *bootstrap,
+ struct xt_table_info *newinfo);
+void *xt_unregister_table(struct xt_table *table);
+
+struct xt_table_info *xt_replace_table(struct xt_table *table,
+ unsigned int num_counters,
+ struct xt_table_info *newinfo,
+ int *error);
+
+struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
+struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
+struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
+struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
+int xt_find_revision(u8 af, const char *name, u8 revision, int target,
+ int *err);
+
+struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
+ const char *name);
+void xt_table_unlock(struct xt_table *t);
+
+int xt_proto_init(struct net *net, u_int8_t af);
+void xt_proto_fini(struct net *net, u_int8_t af);
+
+struct xt_table_info *xt_alloc_table_info(unsigned int size);
+void xt_free_table_info(struct xt_table_info *info);
+
+/**
+ * xt_recseq - recursive seqcount for netfilter use
+ *
+ * Packet processing changes the seqcount only if no recursion happened
+ * get_counters() can use read_seqcount_begin()/read_seqcount_retry(),
+ * because we use the normal seqcount convention :
+ * Low order bit set to 1 if a writer is active.
+ */
+DECLARE_PER_CPU(seqcount_t, xt_recseq);
+
+/**
+ * xt_write_recseq_begin - start of a write section
+ *
+ * Begin packet processing : all readers must wait the end
+ * 1) Must be called with preemption disabled
+ * 2) softirqs must be disabled too (or we should use this_cpu_add())
+ * Returns :
+ * 1 if no recursion on this cpu
+ * 0 if recursion detected
+ */
+static inline unsigned int xt_write_recseq_begin(void)
+{
+ unsigned int addend;
+
+ /*
+ * Low order bit of sequence is set if we already
+ * called xt_write_recseq_begin().
+ */
+ addend = (__this_cpu_read(xt_recseq.sequence) + 1) & 1;
+
+ /*
+ * This is kind of a write_seqcount_begin(), but addend is 0 or 1
+ * We dont check addend value to avoid a test and conditional jump,
+ * since addend is most likely 1
+ */
+ __this_cpu_add(xt_recseq.sequence, addend);
+ smp_wmb();
+
+ return addend;
+}
+
+/**
+ * xt_write_recseq_end - end of a write section
+ * @addend: return value from previous xt_write_recseq_begin()
+ *
+ * End packet processing : all readers can proceed
+ * 1) Must be called with preemption disabled
+ * 2) softirqs must be disabled too (or we should use this_cpu_add())
+ */
+static inline void xt_write_recseq_end(unsigned int addend)
+{
+ /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
+ smp_wmb();
+ __this_cpu_add(xt_recseq.sequence, addend);
+}
+
+/*
+ * This helper is performance critical and must be inlined
+ */
+static inline unsigned long ifname_compare_aligned(const char *_a,
+ const char *_b,
+ const char *_mask)
+{
+ const unsigned long *a = (const unsigned long *)_a;
+ const unsigned long *b = (const unsigned long *)_b;
+ const unsigned long *mask = (const unsigned long *)_mask;
+ unsigned long ret;
+
+ ret = (a[0] ^ b[0]) & mask[0];
+ if (IFNAMSIZ > sizeof(unsigned long))
+ ret |= (a[1] ^ b[1]) & mask[1];
+ if (IFNAMSIZ > 2 * sizeof(unsigned long))
+ ret |= (a[2] ^ b[2]) & mask[2];
+ if (IFNAMSIZ > 3 * sizeof(unsigned long))
+ ret |= (a[3] ^ b[3]) & mask[3];
+ BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
+ return ret;
+}
+
+struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
+void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
+
+#ifdef CONFIG_COMPAT
+#include <net/compat.h>
+
+struct compat_xt_entry_match {
+ union {
+ struct {
+ u_int16_t match_size;
+ char name[XT_FUNCTION_MAXNAMELEN - 1];
+ u_int8_t revision;
+ } user;
+ struct {
+ u_int16_t match_size;
+ compat_uptr_t match;
+ } kernel;
+ u_int16_t match_size;
+ } u;
+ unsigned char data[0];
+};
+
+struct compat_xt_entry_target {
+ union {
+ struct {
+ u_int16_t target_size;
+ char name[XT_FUNCTION_MAXNAMELEN - 1];
+ u_int8_t revision;
+ } user;
+ struct {
+ u_int16_t target_size;
+ compat_uptr_t target;
+ } kernel;
+ u_int16_t target_size;
+ } u;
+ unsigned char data[0];
+};
+
+/* FIXME: this works only on 32 bit tasks
+ * need to change whole approach in order to calculate align as function of
+ * current task alignment */
+
+struct compat_xt_counters {
+ compat_u64 pcnt, bcnt; /* Packet and byte counters */
+};
+
+struct compat_xt_counters_info {
+ char name[XT_TABLE_MAXNAMELEN];
+ compat_uint_t num_counters;
+ struct compat_xt_counters counters[0];
+};
+
+struct _compat_xt_align {
+ __u8 u8;
+ __u16 u16;
+ __u32 u32;
+ compat_u64 u64;
+};
+
+#define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
+
+void xt_compat_lock(u_int8_t af);
+void xt_compat_unlock(u_int8_t af);
+
+int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
+void xt_compat_flush_offsets(u_int8_t af);
+void xt_compat_init_offsets(u_int8_t af, unsigned int number);
+int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
+
+int xt_compat_match_offset(const struct xt_match *match);
+int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+ unsigned int *size);
+int xt_compat_match_to_user(const struct xt_entry_match *m,
+ void __user **dstptr, unsigned int *size);
+
+int xt_compat_target_offset(const struct xt_target *target);
+void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
+ unsigned int *size);
+int xt_compat_target_to_user(const struct xt_entry_target *t,
+ void __user **dstptr, unsigned int *size);
+
+#endif /* CONFIG_COMPAT */
+#endif /* _X_TABLES_H */
diff --git a/include/linux/netfilter/xt_hashlimit.h b/include/linux/netfilter/xt_hashlimit.h
new file mode 100644
index 000000000..074790c0c
--- /dev/null
+++ b/include/linux/netfilter/xt_hashlimit.h
@@ -0,0 +1,9 @@
+#ifndef _XT_HASHLIMIT_H
+#define _XT_HASHLIMIT_H
+
+#include <uapi/linux/netfilter/xt_hashlimit.h>
+
+#define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \
+ XT_HASHLIMIT_HASH_SIP | XT_HASHLIMIT_HASH_SPT | \
+ XT_HASHLIMIT_INVERT | XT_HASHLIMIT_BYTES)
+#endif /*_XT_HASHLIMIT_H*/
diff --git a/include/linux/netfilter/xt_physdev.h b/include/linux/netfilter/xt_physdev.h
new file mode 100644
index 000000000..5b5e41716
--- /dev/null
+++ b/include/linux/netfilter/xt_physdev.h
@@ -0,0 +1,7 @@
+#ifndef _XT_PHYSDEV_H
+#define _XT_PHYSDEV_H
+
+#include <linux/if.h>
+#include <uapi/linux/netfilter/xt_physdev.h>
+
+#endif /*_XT_PHYSDEV_H*/
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
new file mode 100644
index 000000000..c22a7fb8d
--- /dev/null
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -0,0 +1,79 @@
+/*
+ * Format of an ARP firewall descriptor
+ *
+ * src, tgt, src_mask, tgt_mask, arpop, arpop_mask are always stored in
+ * network byte order.
+ * flags are stored in host byte order (of course).
+ */
+#ifndef _ARPTABLES_H
+#define _ARPTABLES_H
+
+#include <linux/if.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+#include <linux/skbuff.h>
+#include <uapi/linux/netfilter_arp/arp_tables.h>
+
+/* Standard entry. */
+struct arpt_standard {
+ struct arpt_entry entry;
+ struct xt_standard_target target;
+};
+
+struct arpt_error {
+ struct arpt_entry entry;
+ struct xt_error_target target;
+};
+
+#define ARPT_ENTRY_INIT(__size) \
+{ \
+ .target_offset = sizeof(struct arpt_entry), \
+ .next_offset = (__size), \
+}
+
+#define ARPT_STANDARD_INIT(__verdict) \
+{ \
+ .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_standard)), \
+ .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \
+ sizeof(struct xt_standard_target)), \
+ .target.verdict = -(__verdict) - 1, \
+}
+
+#define ARPT_ERROR_INIT \
+{ \
+ .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_error)), \
+ .target = XT_TARGET_INIT(XT_ERROR_TARGET, \
+ sizeof(struct xt_error_target)), \
+ .target.errorname = "ERROR", \
+}
+
+extern void *arpt_alloc_initial_table(const struct xt_table *);
+extern struct xt_table *arpt_register_table(struct net *net,
+ const struct xt_table *table,
+ const struct arpt_replace *repl);
+extern void arpt_unregister_table(struct xt_table *table);
+extern unsigned int arpt_do_table(struct sk_buff *skb,
+ unsigned int hook,
+ const struct nf_hook_state *state,
+ struct xt_table *table);
+
+#ifdef CONFIG_COMPAT
+#include <net/compat.h>
+
+struct compat_arpt_entry {
+ struct arpt_arp arp;
+ __u16 target_offset;
+ __u16 next_offset;
+ compat_uint_t comefrom;
+ struct compat_xt_counters counters;
+ unsigned char elems[0];
+};
+
+static inline struct xt_entry_target *
+compat_arpt_get_target(struct compat_arpt_entry *e)
+{
+ return (void *)e + e->target_offset;
+}
+
+#endif /* CONFIG_COMPAT */
+#endif /* _ARPTABLES_H */
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
new file mode 100644
index 000000000..f2fdb5a52
--- /dev/null
+++ b/include/linux/netfilter_bridge.h
@@ -0,0 +1,77 @@
+#ifndef __LINUX_BRIDGE_NETFILTER_H
+#define __LINUX_BRIDGE_NETFILTER_H
+
+#include <uapi/linux/netfilter_bridge.h>
+#include <linux/skbuff.h>
+
+enum nf_br_hook_priorities {
+ NF_BR_PRI_FIRST = INT_MIN,
+ NF_BR_PRI_NAT_DST_BRIDGED = -300,
+ NF_BR_PRI_FILTER_BRIDGED = -200,
+ NF_BR_PRI_BRNF = 0,
+ NF_BR_PRI_NAT_DST_OTHER = 100,
+ NF_BR_PRI_FILTER_OTHER = 200,
+ NF_BR_PRI_NAT_SRC = 300,
+ NF_BR_PRI_LAST = INT_MAX,
+};
+
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+
+#define BRNF_BRIDGED_DNAT 0x02
+#define BRNF_NF_BRIDGE_PREROUTING 0x08
+
+static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
+{
+ if (skb->nf_bridge->orig_proto == BRNF_PROTO_PPPOE)
+ return PPPOE_SES_HLEN;
+ return 0;
+}
+
+int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
+
+static inline void br_drop_fake_rtable(struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ if (dst && (dst->flags & DST_FAKE_RTABLE))
+ skb_dst_drop(skb);
+}
+
+static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
+{
+ struct nf_bridge_info *nf_bridge;
+
+ if (skb->nf_bridge == NULL)
+ return 0;
+
+ nf_bridge = skb->nf_bridge;
+ return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0;
+}
+
+static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
+{
+ struct nf_bridge_info *nf_bridge;
+
+ if (skb->nf_bridge == NULL)
+ return 0;
+
+ nf_bridge = skb->nf_bridge;
+ return nf_bridge->physoutdev ? nf_bridge->physoutdev->ifindex : 0;
+}
+
+static inline struct net_device *
+nf_bridge_get_physindev(const struct sk_buff *skb)
+{
+ return skb->nf_bridge ? skb->nf_bridge->physindev : NULL;
+}
+
+static inline struct net_device *
+nf_bridge_get_physoutdev(const struct sk_buff *skb)
+{
+ return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL;
+}
+#else
+#define br_drop_fake_rtable(skb) do { } while (0)
+#endif /* CONFIG_BRIDGE_NETFILTER */
+
+#endif
diff --git a/include/linux/netfilter_bridge/ebt_802_3.h b/include/linux/netfilter_bridge/ebt_802_3.h
new file mode 100644
index 000000000..e17e8bfb4
--- /dev/null
+++ b/include/linux/netfilter_bridge/ebt_802_3.h
@@ -0,0 +1,11 @@
+#ifndef __LINUX_BRIDGE_EBT_802_3_H
+#define __LINUX_BRIDGE_EBT_802_3_H
+
+#include <linux/skbuff.h>
+#include <uapi/linux/netfilter_bridge/ebt_802_3.h>
+
+static inline struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb)
+{
+ return (struct ebt_802_3_hdr *)skb_mac_header(skb);
+}
+#endif
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
new file mode 100644
index 000000000..f1bd3962e
--- /dev/null
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -0,0 +1,128 @@
+/*
+ * ebtables
+ *
+ * Authors:
+ * Bart De Schuymer <bdschuym@pandora.be>
+ *
+ * ebtables.c,v 2.0, April, 2002
+ *
+ * This code is stongly inspired on the iptables code which is
+ * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
+ */
+#ifndef __LINUX_BRIDGE_EFF_H
+#define __LINUX_BRIDGE_EFF_H
+
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <uapi/linux/netfilter_bridge/ebtables.h>
+
+/* return values for match() functions */
+#define EBT_MATCH 0
+#define EBT_NOMATCH 1
+
+struct ebt_match {
+ struct list_head list;
+ const char name[EBT_FUNCTION_MAXNAMELEN];
+ bool (*match)(const struct sk_buff *skb, const struct net_device *in,
+ const struct net_device *out, const struct xt_match *match,
+ const void *matchinfo, int offset, unsigned int protoff,
+ bool *hotdrop);
+ bool (*checkentry)(const char *table, const void *entry,
+ const struct xt_match *match, void *matchinfo,
+ unsigned int hook_mask);
+ void (*destroy)(const struct xt_match *match, void *matchinfo);
+ unsigned int matchsize;
+ u_int8_t revision;
+ u_int8_t family;
+ struct module *me;
+};
+
+struct ebt_watcher {
+ struct list_head list;
+ const char name[EBT_FUNCTION_MAXNAMELEN];
+ unsigned int (*target)(struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ unsigned int hook_num, const struct xt_target *target,
+ const void *targinfo);
+ bool (*checkentry)(const char *table, const void *entry,
+ const struct xt_target *target, void *targinfo,
+ unsigned int hook_mask);
+ void (*destroy)(const struct xt_target *target, void *targinfo);
+ unsigned int targetsize;
+ u_int8_t revision;
+ u_int8_t family;
+ struct module *me;
+};
+
+struct ebt_target {
+ struct list_head list;
+ const char name[EBT_FUNCTION_MAXNAMELEN];
+ /* returns one of the standard EBT_* verdicts */
+ unsigned int (*target)(struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ unsigned int hook_num, const struct xt_target *target,
+ const void *targinfo);
+ bool (*checkentry)(const char *table, const void *entry,
+ const struct xt_target *target, void *targinfo,
+ unsigned int hook_mask);
+ void (*destroy)(const struct xt_target *target, void *targinfo);
+ unsigned int targetsize;
+ u_int8_t revision;
+ u_int8_t family;
+ struct module *me;
+};
+
+/* used for jumping from and into user defined chains (udc) */
+struct ebt_chainstack {
+ struct ebt_entries *chaininfo; /* pointer to chain data */
+ struct ebt_entry *e; /* pointer to entry data */
+ unsigned int n; /* n'th entry */
+};
+
+struct ebt_table_info {
+ /* total size of the entries */
+ unsigned int entries_size;
+ unsigned int nentries;
+ /* pointers to the start of the chains */
+ struct ebt_entries *hook_entry[NF_BR_NUMHOOKS];
+ /* room to maintain the stack used for jumping from and into udc */
+ struct ebt_chainstack **chainstack;
+ char *entries;
+ struct ebt_counter counters[0] ____cacheline_aligned;
+};
+
+struct ebt_table {
+ struct list_head list;
+ char name[EBT_TABLE_MAXNAMELEN];
+ struct ebt_replace_kernel *table;
+ unsigned int valid_hooks;
+ rwlock_t lock;
+ /* e.g. could be the table explicitly only allows certain
+ * matches, targets, ... 0 == let it in */
+ int (*check)(const struct ebt_table_info *info,
+ unsigned int valid_hooks);
+ /* the data used by the kernel */
+ struct ebt_table_info *private;
+ struct module *me;
+};
+
+#define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \
+ ~(__alignof__(struct _xt_align)-1))
+extern struct ebt_table *ebt_register_table(struct net *net,
+ const struct ebt_table *table);
+extern void ebt_unregister_table(struct net *net, struct ebt_table *table);
+extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ struct ebt_table *table);
+
+/* Used in the kernel match() functions */
+#define FWINV(bool,invflg) ((bool) ^ !!(info->invflags & invflg))
+/* True if the hook mask denotes that the rule is in a base chain,
+ * used in the check() functions */
+#define BASE_CHAIN (par->hook_mask & (1 << NF_BR_NUMHOOKS))
+/* Clear the bit in the hook mask that tells if the rule is on a base chain */
+#define CLEAR_BASE_CHAIN_BIT (par->hook_mask &= ~(1 << NF_BR_NUMHOOKS))
+/* True if the target is not a standard target */
+#define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0)
+
+#endif
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
new file mode 100644
index 000000000..6e4591bb5
--- /dev/null
+++ b/include/linux/netfilter_ipv4.h
@@ -0,0 +1,12 @@
+/* IPv4-specific defines for netfilter.
+ * (C)1998 Rusty Russell -- This code is GPL.
+ */
+#ifndef __LINUX_IP_NETFILTER_H
+#define __LINUX_IP_NETFILTER_H
+
+#include <uapi/linux/netfilter_ipv4.h>
+
+int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
+__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol);
+#endif /*__LINUX_IP_NETFILTER_H*/
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
new file mode 100644
index 000000000..4073510da
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -0,0 +1,92 @@
+/*
+ * 25-Jul-1998 Major changes to allow for ip chain table
+ *
+ * 3-Jan-2000 Named tables to allow packet selection for different uses.
+ */
+
+/*
+ * Format of an IP firewall descriptor
+ *
+ * src, dst, src_mask, dst_mask are always stored in network byte order.
+ * flags are stored in host byte order (of course).
+ * Port numbers are stored in HOST byte order.
+ */
+#ifndef _IPTABLES_H
+#define _IPTABLES_H
+
+#include <linux/if.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+
+#include <linux/init.h>
+#include <uapi/linux/netfilter_ipv4/ip_tables.h>
+
+extern void ipt_init(void) __init;
+
+extern struct xt_table *ipt_register_table(struct net *net,
+ const struct xt_table *table,
+ const struct ipt_replace *repl);
+extern void ipt_unregister_table(struct net *net, struct xt_table *table);
+
+/* Standard entry. */
+struct ipt_standard {
+ struct ipt_entry entry;
+ struct xt_standard_target target;
+};
+
+struct ipt_error {
+ struct ipt_entry entry;
+ struct xt_error_target target;
+};
+
+#define IPT_ENTRY_INIT(__size) \
+{ \
+ .target_offset = sizeof(struct ipt_entry), \
+ .next_offset = (__size), \
+}
+
+#define IPT_STANDARD_INIT(__verdict) \
+{ \
+ .entry = IPT_ENTRY_INIT(sizeof(struct ipt_standard)), \
+ .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \
+ sizeof(struct xt_standard_target)), \
+ .target.verdict = -(__verdict) - 1, \
+}
+
+#define IPT_ERROR_INIT \
+{ \
+ .entry = IPT_ENTRY_INIT(sizeof(struct ipt_error)), \
+ .target = XT_TARGET_INIT(XT_ERROR_TARGET, \
+ sizeof(struct xt_error_target)), \
+ .target.errorname = "ERROR", \
+}
+
+extern void *ipt_alloc_initial_table(const struct xt_table *);
+extern unsigned int ipt_do_table(struct sk_buff *skb,
+ unsigned int hook,
+ const struct nf_hook_state *state,
+ struct xt_table *table);
+
+#ifdef CONFIG_COMPAT
+#include <net/compat.h>
+
+struct compat_ipt_entry {
+ struct ipt_ip ip;
+ compat_uint_t nfcache;
+ __u16 target_offset;
+ __u16 next_offset;
+ compat_uint_t comefrom;
+ struct compat_xt_counters counters;
+ unsigned char elems[0];
+};
+
+/* Helper functions */
+static inline struct xt_entry_target *
+compat_ipt_get_target(struct compat_ipt_entry *e)
+{
+ return (void *)e + e->target_offset;
+}
+
+#endif /* CONFIG_COMPAT */
+#endif /* _IPTABLES_H */
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
new file mode 100644
index 000000000..64dad1cc1
--- /dev/null
+++ b/include/linux/netfilter_ipv6.h
@@ -0,0 +1,41 @@
+/* IPv6-specific defines for netfilter.
+ * (C)1998 Rusty Russell -- This code is GPL.
+ * (C)1999 David Jeffery
+ * this header was blatantly ripped from netfilter_ipv4.h
+ * it's amazing what adding a bunch of 6s can do =8^)
+ */
+#ifndef __LINUX_IP6_NETFILTER_H
+#define __LINUX_IP6_NETFILTER_H
+
+#include <uapi/linux/netfilter_ipv6.h>
+
+
+#ifdef CONFIG_NETFILTER
+int ip6_route_me_harder(struct sk_buff *skb);
+__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
+ unsigned int dataoff, u_int8_t protocol);
+
+int ipv6_netfilter_init(void);
+void ipv6_netfilter_fini(void);
+
+/*
+ * Hook functions for ipv6 to allow xt_* modules to be built-in even
+ * if IPv6 is a module.
+ */
+struct nf_ipv6_ops {
+ int (*chk_addr)(struct net *net, const struct in6_addr *addr,
+ const struct net_device *dev, int strict);
+};
+
+extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
+static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
+{
+ return rcu_dereference(nf_ipv6_ops);
+}
+
+#else /* CONFIG_NETFILTER */
+static inline int ipv6_netfilter_init(void) { return 0; }
+static inline void ipv6_netfilter_fini(void) { return; }
+#endif /* CONFIG_NETFILTER */
+
+#endif /*__LINUX_IP6_NETFILTER_H*/
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
new file mode 100644
index 000000000..b40d2b635
--- /dev/null
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -0,0 +1,69 @@
+/*
+ * 25-Jul-1998 Major changes to allow for ip chain table
+ *
+ * 3-Jan-2000 Named tables to allow packet selection for different uses.
+ */
+
+/*
+ * Format of an IP6 firewall descriptor
+ *
+ * src, dst, src_mask, dst_mask are always stored in network byte order.
+ * flags are stored in host byte order (of course).
+ * Port numbers are stored in HOST byte order.
+ */
+#ifndef _IP6_TABLES_H
+#define _IP6_TABLES_H
+
+#include <linux/if.h>
+#include <linux/in6.h>
+#include <linux/ipv6.h>
+#include <linux/skbuff.h>
+
+#include <linux/init.h>
+#include <uapi/linux/netfilter_ipv6/ip6_tables.h>
+
+extern void ip6t_init(void) __init;
+
+extern void *ip6t_alloc_initial_table(const struct xt_table *);
+extern struct xt_table *ip6t_register_table(struct net *net,
+ const struct xt_table *table,
+ const struct ip6t_replace *repl);
+extern void ip6t_unregister_table(struct net *net, struct xt_table *table);
+extern unsigned int ip6t_do_table(struct sk_buff *skb,
+ unsigned int hook,
+ const struct nf_hook_state *state,
+ struct xt_table *table);
+
+/* Check for an extension */
+static inline int
+ip6t_ext_hdr(u8 nexthdr)
+{ return (nexthdr == IPPROTO_HOPOPTS) ||
+ (nexthdr == IPPROTO_ROUTING) ||
+ (nexthdr == IPPROTO_FRAGMENT) ||
+ (nexthdr == IPPROTO_ESP) ||
+ (nexthdr == IPPROTO_AH) ||
+ (nexthdr == IPPROTO_NONE) ||
+ (nexthdr == IPPROTO_DSTOPTS);
+}
+
+#ifdef CONFIG_COMPAT
+#include <net/compat.h>
+
+struct compat_ip6t_entry {
+ struct ip6t_ip6 ipv6;
+ compat_uint_t nfcache;
+ __u16 target_offset;
+ __u16 next_offset;
+ compat_uint_t comefrom;
+ struct compat_xt_counters counters;
+ unsigned char elems[0];
+};
+
+static inline struct xt_entry_target *
+compat_ip6t_get_target(struct compat_ip6t_entry *e)
+{
+ return (void *)e + e->target_offset;
+}
+
+#endif /* CONFIG_COMPAT */
+#endif /* _IP6_TABLES_H */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
new file mode 100644
index 000000000..6835c1279
--- /dev/null
+++ b/include/linux/netlink.h
@@ -0,0 +1,181 @@
+#ifndef __LINUX_NETLINK_H
+#define __LINUX_NETLINK_H
+
+
+#include <linux/capability.h>
+#include <linux/skbuff.h>
+#include <linux/export.h>
+#include <net/scm.h>
+#include <uapi/linux/netlink.h>
+
+struct net;
+
+static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
+{
+ return (struct nlmsghdr *)skb->data;
+}
+
+enum netlink_skb_flags {
+ NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */
+ NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */
+ NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */
+ NETLINK_SKB_DST = 0x8, /* Dst set in sendto or sendmsg */
+};
+
+struct netlink_skb_parms {
+ struct scm_creds creds; /* Skb credentials */
+ __u32 portid;
+ __u32 dst_group;
+ __u32 flags;
+ struct sock *sk;
+};
+
+#define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb))
+#define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds)
+
+
+extern void netlink_table_grab(void);
+extern void netlink_table_ungrab(void);
+
+#define NL_CFG_F_NONROOT_RECV (1 << 0)
+#define NL_CFG_F_NONROOT_SEND (1 << 1)
+
+/* optional Netlink kernel configuration parameters */
+struct netlink_kernel_cfg {
+ unsigned int groups;
+ unsigned int flags;
+ void (*input)(struct sk_buff *skb);
+ struct mutex *cb_mutex;
+ int (*bind)(struct net *net, int group);
+ void (*unbind)(struct net *net, int group);
+ bool (*compare)(struct net *net, struct sock *sk);
+};
+
+extern struct sock *__netlink_kernel_create(struct net *net, int unit,
+ struct module *module,
+ struct netlink_kernel_cfg *cfg);
+static inline struct sock *
+netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg)
+{
+ return __netlink_kernel_create(net, unit, THIS_MODULE, cfg);
+}
+
+extern void netlink_kernel_release(struct sock *sk);
+extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
+extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
+extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
+extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
+extern int netlink_has_listeners(struct sock *sk, unsigned int group);
+extern struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
+ u32 dst_portid, gfp_t gfp_mask);
+extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
+extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
+ __u32 group, gfp_t allocation);
+extern int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
+ __u32 portid, __u32 group, gfp_t allocation,
+ int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
+ void *filter_data);
+extern int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
+extern int netlink_register_notifier(struct notifier_block *nb);
+extern int netlink_unregister_notifier(struct notifier_block *nb);
+
+/* finegrained unicast helpers: */
+struct sock *netlink_getsockbyfilp(struct file *filp);
+int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
+ long *timeo, struct sock *ssk);
+void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
+int netlink_sendskb(struct sock *sk, struct sk_buff *skb);
+
+static inline struct sk_buff *
+netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
+{
+ struct sk_buff *nskb;
+
+ nskb = skb_clone(skb, gfp_mask);
+ if (!nskb)
+ return NULL;
+
+ /* This is a large skb, set destructor callback to release head */
+ if (is_vmalloc_addr(skb->head))
+ nskb->destructor = skb->destructor;
+
+ return nskb;
+}
+
+/*
+ * skb should fit one page. This choice is good for headerless malloc.
+ * But we should limit to 8K so that userspace does not have to
+ * use enormous buffer sizes on recvmsg() calls just to avoid
+ * MSG_TRUNC when PAGE_SIZE is very large.
+ */
+#if PAGE_SIZE < 8192UL
+#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(PAGE_SIZE)
+#else
+#define NLMSG_GOODSIZE SKB_WITH_OVERHEAD(8192UL)
+#endif
+
+#define NLMSG_DEFAULT_SIZE (NLMSG_GOODSIZE - NLMSG_HDRLEN)
+
+
+struct netlink_callback {
+ struct sk_buff *skb;
+ const struct nlmsghdr *nlh;
+ int (*dump)(struct sk_buff * skb,
+ struct netlink_callback *cb);
+ int (*done)(struct netlink_callback *cb);
+ void *data;
+ /* the module that dump function belong to */
+ struct module *module;
+ u16 family;
+ u16 min_dump_alloc;
+ unsigned int prev_seq, seq;
+ long args[6];
+};
+
+struct netlink_notify {
+ struct net *net;
+ u32 portid;
+ int protocol;
+};
+
+struct nlmsghdr *
+__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags);
+
+struct netlink_dump_control {
+ int (*dump)(struct sk_buff *skb, struct netlink_callback *);
+ int (*done)(struct netlink_callback *);
+ void *data;
+ struct module *module;
+ u16 min_dump_alloc;
+};
+
+extern int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct netlink_dump_control *control);
+static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct netlink_dump_control *control)
+{
+ if (!control->module)
+ control->module = THIS_MODULE;
+
+ return __netlink_dump_start(ssk, skb, nlh, control);
+}
+
+struct netlink_tap {
+ struct net_device *dev;
+ struct module *module;
+ struct list_head list;
+};
+
+extern int netlink_add_tap(struct netlink_tap *nt);
+extern int netlink_remove_tap(struct netlink_tap *nt);
+
+bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
+ struct user_namespace *ns, int cap);
+bool netlink_ns_capable(const struct sk_buff *skb,
+ struct user_namespace *ns, int cap);
+bool netlink_capable(const struct sk_buff *skb, int cap);
+bool netlink_net_capable(const struct sk_buff *skb, int cap);
+
+#endif /* __LINUX_NETLINK_H */
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
new file mode 100644
index 000000000..b25ee9ffd
--- /dev/null
+++ b/include/linux/netpoll.h
@@ -0,0 +1,120 @@
+/*
+ * Common code for low-level network console, dump, and debugger code
+ *
+ * Derived from netconsole, kgdb-over-ethernet, and netdump patches
+ */
+
+#ifndef _LINUX_NETPOLL_H
+#define _LINUX_NETPOLL_H
+
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/rcupdate.h>
+#include <linux/list.h>
+
+union inet_addr {
+ __u32 all[4];
+ __be32 ip;
+ __be32 ip6[4];
+ struct in_addr in;
+ struct in6_addr in6;
+};
+
+struct netpoll {
+ struct net_device *dev;
+ char dev_name[IFNAMSIZ];
+ const char *name;
+
+ union inet_addr local_ip, remote_ip;
+ bool ipv6;
+ u16 local_port, remote_port;
+ u8 remote_mac[ETH_ALEN];
+
+ struct work_struct cleanup_work;
+};
+
+struct netpoll_info {
+ atomic_t refcnt;
+
+ struct semaphore dev_lock;
+
+ struct sk_buff_head txq;
+
+ struct delayed_work tx_work;
+
+ struct netpoll *netpoll;
+ struct rcu_head rcu;
+};
+
+#ifdef CONFIG_NETPOLL
+extern void netpoll_poll_disable(struct net_device *dev);
+extern void netpoll_poll_enable(struct net_device *dev);
+#else
+static inline void netpoll_poll_disable(struct net_device *dev) { return; }
+static inline void netpoll_poll_enable(struct net_device *dev) { return; }
+#endif
+
+void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
+void netpoll_print_options(struct netpoll *np);
+int netpoll_parse_options(struct netpoll *np, char *opt);
+int __netpoll_setup(struct netpoll *np, struct net_device *ndev);
+int netpoll_setup(struct netpoll *np);
+void __netpoll_cleanup(struct netpoll *np);
+void __netpoll_free_async(struct netpoll *np);
+void netpoll_cleanup(struct netpoll *np);
+void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
+ struct net_device *dev);
+static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ netpoll_send_skb_on_dev(np, skb, np->dev);
+ local_irq_restore(flags);
+}
+
+#ifdef CONFIG_NETPOLL
+static inline void *netpoll_poll_lock(struct napi_struct *napi)
+{
+ struct net_device *dev = napi->dev;
+
+ if (dev && dev->npinfo) {
+ spin_lock(&napi->poll_lock);
+ napi->poll_owner = smp_processor_id();
+ return napi;
+ }
+ return NULL;
+}
+
+static inline void netpoll_poll_unlock(void *have)
+{
+ struct napi_struct *napi = have;
+
+ if (napi) {
+ napi->poll_owner = -1;
+ spin_unlock(&napi->poll_lock);
+ }
+}
+
+static inline bool netpoll_tx_running(struct net_device *dev)
+{
+ return irqs_disabled();
+}
+
+#else
+static inline void *netpoll_poll_lock(struct napi_struct *napi)
+{
+ return NULL;
+}
+static inline void netpoll_poll_unlock(void *have)
+{
+}
+static inline void netpoll_netdev_init(struct net_device *dev)
+{
+}
+static inline bool netpoll_tx_running(struct net_device *dev)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
new file mode 100644
index 000000000..610af5155
--- /dev/null
+++ b/include/linux/nfs.h
@@ -0,0 +1,54 @@
+/*
+ * NFS protocol definitions
+ *
+ * This file contains constants mostly for Version 2 of the protocol,
+ * but also has a couple of NFSv3 bits in (notably the error codes).
+ */
+#ifndef _LINUX_NFS_H
+#define _LINUX_NFS_H
+
+#include <linux/sunrpc/msg_prot.h>
+#include <linux/string.h>
+#include <uapi/linux/nfs.h>
+
+/*
+ * This is the kernel NFS client file handle representation
+ */
+#define NFS_MAXFHSIZE 128
+struct nfs_fh {
+ unsigned short size;
+ unsigned char data[NFS_MAXFHSIZE];
+};
+
+/*
+ * Returns a zero iff the size and data fields match.
+ * Checks only "size" bytes in the data field.
+ */
+static inline int nfs_compare_fh(const struct nfs_fh *a, const struct nfs_fh *b)
+{
+ return a->size != b->size || memcmp(a->data, b->data, a->size) != 0;
+}
+
+static inline void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *source)
+{
+ target->size = source->size;
+ memcpy(target->data, source->data, source->size);
+}
+
+
+/*
+ * This is really a general kernel constant, but since nothing like
+ * this is defined in the kernel headers, I have to do it here.
+ */
+#define NFS_OFFSET_MAX ((__s64)((~(__u64)0) >> 1))
+
+
+enum nfs3_stable_how {
+ NFS_UNSTABLE = 0,
+ NFS_DATA_SYNC = 1,
+ NFS_FILE_SYNC = 2,
+
+ /* used by direct.c to mark verf as invalid */
+ NFS_INVALID_STABLE_HOW = -1
+};
+#endif /* _LINUX_NFS_H */
diff --git a/include/linux/nfs3.h b/include/linux/nfs3.h
new file mode 100644
index 000000000..a778ad8e3
--- /dev/null
+++ b/include/linux/nfs3.h
@@ -0,0 +1,13 @@
+/*
+ * NFSv3 protocol definitions
+ */
+#ifndef _LINUX_NFS3_H
+#define _LINUX_NFS3_H
+
+#include <uapi/linux/nfs3.h>
+
+
+/* Number of 32bit words in post_op_attr */
+#define NFS3_POST_OP_ATTR_WORDS 22
+
+#endif /* _LINUX_NFS3_H */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
new file mode 100644
index 000000000..32201c269
--- /dev/null
+++ b/include/linux/nfs4.h
@@ -0,0 +1,573 @@
+/*
+ * include/linux/nfs4.h
+ *
+ * NFSv4 protocol definitions.
+ *
+ * Copyright (c) 2002 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Kendrick Smith <kmsmith@umich.edu>
+ * Andy Adamson <andros@umich.edu>
+ */
+#ifndef _LINUX_NFS4_H
+#define _LINUX_NFS4_H
+
+#include <linux/list.h>
+#include <linux/uidgid.h>
+#include <uapi/linux/nfs4.h>
+
+enum nfs4_acl_whotype {
+ NFS4_ACL_WHO_NAMED = 0,
+ NFS4_ACL_WHO_OWNER,
+ NFS4_ACL_WHO_GROUP,
+ NFS4_ACL_WHO_EVERYONE,
+};
+
+struct nfs4_ace {
+ uint32_t type;
+ uint32_t flag;
+ uint32_t access_mask;
+ int whotype;
+ union {
+ kuid_t who_uid;
+ kgid_t who_gid;
+ };
+};
+
+struct nfs4_acl {
+ uint32_t naces;
+ struct nfs4_ace aces[0];
+};
+
+#define NFS4_MAXLABELLEN 2048
+
+struct nfs4_label {
+ uint32_t lfs;
+ uint32_t pi;
+ u32 len;
+ char *label;
+};
+
+typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier;
+
+struct nfs_stateid4 {
+ __be32 seqid;
+ char other[NFS4_STATEID_OTHER_SIZE];
+} __attribute__ ((packed));
+
+typedef struct nfs_stateid4 nfs4_stateid;
+
+enum nfs_opnum4 {
+ OP_ACCESS = 3,
+ OP_CLOSE = 4,
+ OP_COMMIT = 5,
+ OP_CREATE = 6,
+ OP_DELEGPURGE = 7,
+ OP_DELEGRETURN = 8,
+ OP_GETATTR = 9,
+ OP_GETFH = 10,
+ OP_LINK = 11,
+ OP_LOCK = 12,
+ OP_LOCKT = 13,
+ OP_LOCKU = 14,
+ OP_LOOKUP = 15,
+ OP_LOOKUPP = 16,
+ OP_NVERIFY = 17,
+ OP_OPEN = 18,
+ OP_OPENATTR = 19,
+ OP_OPEN_CONFIRM = 20,
+ OP_OPEN_DOWNGRADE = 21,
+ OP_PUTFH = 22,
+ OP_PUTPUBFH = 23,
+ OP_PUTROOTFH = 24,
+ OP_READ = 25,
+ OP_READDIR = 26,
+ OP_READLINK = 27,
+ OP_REMOVE = 28,
+ OP_RENAME = 29,
+ OP_RENEW = 30,
+ OP_RESTOREFH = 31,
+ OP_SAVEFH = 32,
+ OP_SECINFO = 33,
+ OP_SETATTR = 34,
+ OP_SETCLIENTID = 35,
+ OP_SETCLIENTID_CONFIRM = 36,
+ OP_VERIFY = 37,
+ OP_WRITE = 38,
+ OP_RELEASE_LOCKOWNER = 39,
+
+ /* nfs41 */
+ OP_BACKCHANNEL_CTL = 40,
+ OP_BIND_CONN_TO_SESSION = 41,
+ OP_EXCHANGE_ID = 42,
+ OP_CREATE_SESSION = 43,
+ OP_DESTROY_SESSION = 44,
+ OP_FREE_STATEID = 45,
+ OP_GET_DIR_DELEGATION = 46,
+ OP_GETDEVICEINFO = 47,
+ OP_GETDEVICELIST = 48,
+ OP_LAYOUTCOMMIT = 49,
+ OP_LAYOUTGET = 50,
+ OP_LAYOUTRETURN = 51,
+ OP_SECINFO_NO_NAME = 52,
+ OP_SEQUENCE = 53,
+ OP_SET_SSV = 54,
+ OP_TEST_STATEID = 55,
+ OP_WANT_DELEGATION = 56,
+ OP_DESTROY_CLIENTID = 57,
+ OP_RECLAIM_COMPLETE = 58,
+
+ /* nfs42 */
+ OP_ALLOCATE = 59,
+ OP_COPY = 60,
+ OP_COPY_NOTIFY = 61,
+ OP_DEALLOCATE = 62,
+ OP_IO_ADVISE = 63,
+ OP_LAYOUTERROR = 64,
+ OP_LAYOUTSTATS = 65,
+ OP_OFFLOAD_CANCEL = 66,
+ OP_OFFLOAD_STATUS = 67,
+ OP_READ_PLUS = 68,
+ OP_SEEK = 69,
+ OP_WRITE_SAME = 70,
+
+ OP_ILLEGAL = 10044,
+};
+
+/*Defining first and last NFS4 operations implemented.
+Needs to be updated if more operations are defined in future.*/
+
+#define FIRST_NFS4_OP OP_ACCESS
+#define LAST_NFS4_OP OP_WRITE_SAME
+#define LAST_NFS40_OP OP_RELEASE_LOCKOWNER
+#define LAST_NFS41_OP OP_RECLAIM_COMPLETE
+#define LAST_NFS42_OP OP_WRITE_SAME
+
+enum nfsstat4 {
+ NFS4_OK = 0,
+ NFS4ERR_PERM = 1,
+ NFS4ERR_NOENT = 2,
+ NFS4ERR_IO = 5,
+ NFS4ERR_NXIO = 6,
+ NFS4ERR_ACCESS = 13,
+ NFS4ERR_EXIST = 17,
+ NFS4ERR_XDEV = 18,
+ /* Unused/reserved 19 */
+ NFS4ERR_NOTDIR = 20,
+ NFS4ERR_ISDIR = 21,
+ NFS4ERR_INVAL = 22,
+ NFS4ERR_FBIG = 27,
+ NFS4ERR_NOSPC = 28,
+ NFS4ERR_ROFS = 30,
+ NFS4ERR_MLINK = 31,
+ NFS4ERR_NAMETOOLONG = 63,
+ NFS4ERR_NOTEMPTY = 66,
+ NFS4ERR_DQUOT = 69,
+ NFS4ERR_STALE = 70,
+ NFS4ERR_BADHANDLE = 10001,
+ NFS4ERR_BAD_COOKIE = 10003,
+ NFS4ERR_NOTSUPP = 10004,
+ NFS4ERR_TOOSMALL = 10005,
+ NFS4ERR_SERVERFAULT = 10006,
+ NFS4ERR_BADTYPE = 10007,
+ NFS4ERR_DELAY = 10008,
+ NFS4ERR_SAME = 10009,
+ NFS4ERR_DENIED = 10010,
+ NFS4ERR_EXPIRED = 10011,
+ NFS4ERR_LOCKED = 10012,
+ NFS4ERR_GRACE = 10013,
+ NFS4ERR_FHEXPIRED = 10014,
+ NFS4ERR_SHARE_DENIED = 10015,
+ NFS4ERR_WRONGSEC = 10016,
+ NFS4ERR_CLID_INUSE = 10017,
+ NFS4ERR_RESOURCE = 10018,
+ NFS4ERR_MOVED = 10019,
+ NFS4ERR_NOFILEHANDLE = 10020,
+ NFS4ERR_MINOR_VERS_MISMATCH = 10021,
+ NFS4ERR_STALE_CLIENTID = 10022,
+ NFS4ERR_STALE_STATEID = 10023,
+ NFS4ERR_OLD_STATEID = 10024,
+ NFS4ERR_BAD_STATEID = 10025,
+ NFS4ERR_BAD_SEQID = 10026,
+ NFS4ERR_NOT_SAME = 10027,
+ NFS4ERR_LOCK_RANGE = 10028,
+ NFS4ERR_SYMLINK = 10029,
+ NFS4ERR_RESTOREFH = 10030,
+ NFS4ERR_LEASE_MOVED = 10031,
+ NFS4ERR_ATTRNOTSUPP = 10032,
+ NFS4ERR_NO_GRACE = 10033,
+ NFS4ERR_RECLAIM_BAD = 10034,
+ NFS4ERR_RECLAIM_CONFLICT = 10035,
+ NFS4ERR_BADXDR = 10036,
+ NFS4ERR_LOCKS_HELD = 10037,
+ NFS4ERR_OPENMODE = 10038,
+ NFS4ERR_BADOWNER = 10039,
+ NFS4ERR_BADCHAR = 10040,
+ NFS4ERR_BADNAME = 10041,
+ NFS4ERR_BAD_RANGE = 10042,
+ NFS4ERR_LOCK_NOTSUPP = 10043,
+ NFS4ERR_OP_ILLEGAL = 10044,
+ NFS4ERR_DEADLOCK = 10045,
+ NFS4ERR_FILE_OPEN = 10046,
+ NFS4ERR_ADMIN_REVOKED = 10047,
+ NFS4ERR_CB_PATH_DOWN = 10048,
+
+ /* nfs41 */
+ NFS4ERR_BADIOMODE = 10049,
+ NFS4ERR_BADLAYOUT = 10050,
+ NFS4ERR_BAD_SESSION_DIGEST = 10051,
+ NFS4ERR_BADSESSION = 10052,
+ NFS4ERR_BADSLOT = 10053,
+ NFS4ERR_COMPLETE_ALREADY = 10054,
+ NFS4ERR_CONN_NOT_BOUND_TO_SESSION = 10055,
+ NFS4ERR_DELEG_ALREADY_WANTED = 10056,
+ NFS4ERR_BACK_CHAN_BUSY = 10057, /* backchan reqs outstanding */
+ NFS4ERR_LAYOUTTRYLATER = 10058,
+ NFS4ERR_LAYOUTUNAVAILABLE = 10059,
+ NFS4ERR_NOMATCHING_LAYOUT = 10060,
+ NFS4ERR_RECALLCONFLICT = 10061,
+ NFS4ERR_UNKNOWN_LAYOUTTYPE = 10062,
+ NFS4ERR_SEQ_MISORDERED = 10063, /* unexpected seq.id in req */
+ NFS4ERR_SEQUENCE_POS = 10064, /* [CB_]SEQ. op not 1st op */
+ NFS4ERR_REQ_TOO_BIG = 10065, /* request too big */
+ NFS4ERR_REP_TOO_BIG = 10066, /* reply too big */
+ NFS4ERR_REP_TOO_BIG_TO_CACHE = 10067, /* rep. not all cached */
+ NFS4ERR_RETRY_UNCACHED_REP = 10068, /* retry & rep. uncached */
+ NFS4ERR_UNSAFE_COMPOUND = 10069, /* retry/recovery too hard */
+ NFS4ERR_TOO_MANY_OPS = 10070, /* too many ops in [CB_]COMP */
+ NFS4ERR_OP_NOT_IN_SESSION = 10071, /* op needs [CB_]SEQ. op */
+ NFS4ERR_HASH_ALG_UNSUPP = 10072, /* hash alg. not supp. */
+ /* Error 10073 is unused. */
+ NFS4ERR_CLIENTID_BUSY = 10074, /* clientid has state */
+ NFS4ERR_PNFS_IO_HOLE = 10075, /* IO to _SPARSE file hole */
+ NFS4ERR_SEQ_FALSE_RETRY = 10076, /* retry not original */
+ NFS4ERR_BAD_HIGH_SLOT = 10077, /* sequence arg bad */
+ NFS4ERR_DEADSESSION = 10078, /* persistent session dead */
+ NFS4ERR_ENCR_ALG_UNSUPP = 10079, /* SSV alg mismatch */
+ NFS4ERR_PNFS_NO_LAYOUT = 10080, /* direct I/O with no layout */
+ NFS4ERR_NOT_ONLY_OP = 10081, /* bad compound */
+ NFS4ERR_WRONG_CRED = 10082, /* permissions:state change */
+ NFS4ERR_WRONG_TYPE = 10083, /* current operation mismatch */
+ NFS4ERR_DIRDELEG_UNAVAIL = 10084, /* no directory delegation */
+ NFS4ERR_REJECT_DELEG = 10085, /* on callback */
+ NFS4ERR_RETURNCONFLICT = 10086, /* outstanding layoutreturn */
+ NFS4ERR_DELEG_REVOKED = 10087, /* deleg./layout revoked */
+
+ /* nfs42 */
+ NFS4ERR_PARTNER_NOTSUPP = 10088,
+ NFS4ERR_PARTNER_NO_AUTH = 10089,
+ NFS4ERR_UNION_NOTSUPP = 10090,
+ NFS4ERR_OFFLOAD_DENIED = 10091,
+ NFS4ERR_WRONG_LFS = 10092,
+ NFS4ERR_BADLABEL = 10093,
+ NFS4ERR_OFFLOAD_NO_REQS = 10094,
+};
+
+static inline bool seqid_mutating_err(u32 err)
+{
+ /* rfc 3530 section 8.1.5: */
+ switch (err) {
+ case NFS4ERR_STALE_CLIENTID:
+ case NFS4ERR_STALE_STATEID:
+ case NFS4ERR_BAD_STATEID:
+ case NFS4ERR_BAD_SEQID:
+ case NFS4ERR_BADXDR:
+ case NFS4ERR_RESOURCE:
+ case NFS4ERR_NOFILEHANDLE:
+ return false;
+ };
+ return true;
+}
+
+/*
+ * Note: NF4BAD is not actually part of the protocol; it is just used
+ * internally by nfsd.
+ */
+enum nfs_ftype4 {
+ NF4BAD = 0,
+ NF4REG = 1, /* Regular File */
+ NF4DIR = 2, /* Directory */
+ NF4BLK = 3, /* Special File - block device */
+ NF4CHR = 4, /* Special File - character device */
+ NF4LNK = 5, /* Symbolic Link */
+ NF4SOCK = 6, /* Special File - socket */
+ NF4FIFO = 7, /* Special File - fifo */
+ NF4ATTRDIR = 8, /* Attribute Directory */
+ NF4NAMEDATTR = 9 /* Named Attribute */
+};
+
+enum open_claim_type4 {
+ NFS4_OPEN_CLAIM_NULL = 0,
+ NFS4_OPEN_CLAIM_PREVIOUS = 1,
+ NFS4_OPEN_CLAIM_DELEGATE_CUR = 2,
+ NFS4_OPEN_CLAIM_DELEGATE_PREV = 3,
+ NFS4_OPEN_CLAIM_FH = 4, /* 4.1 */
+ NFS4_OPEN_CLAIM_DELEG_CUR_FH = 5, /* 4.1 */
+ NFS4_OPEN_CLAIM_DELEG_PREV_FH = 6, /* 4.1 */
+};
+
+enum opentype4 {
+ NFS4_OPEN_NOCREATE = 0,
+ NFS4_OPEN_CREATE = 1
+};
+
+enum createmode4 {
+ NFS4_CREATE_UNCHECKED = 0,
+ NFS4_CREATE_GUARDED = 1,
+ NFS4_CREATE_EXCLUSIVE = 2,
+ /*
+ * New to NFSv4.1. If session is persistent,
+ * GUARDED4 MUST be used. Otherwise, use
+ * EXCLUSIVE4_1 instead of EXCLUSIVE4.
+ */
+ NFS4_CREATE_EXCLUSIVE4_1 = 3
+};
+
+enum limit_by4 {
+ NFS4_LIMIT_SIZE = 1,
+ NFS4_LIMIT_BLOCKS = 2
+};
+
+enum open_delegation_type4 {
+ NFS4_OPEN_DELEGATE_NONE = 0,
+ NFS4_OPEN_DELEGATE_READ = 1,
+ NFS4_OPEN_DELEGATE_WRITE = 2,
+ NFS4_OPEN_DELEGATE_NONE_EXT = 3, /* 4.1 */
+};
+
+enum why_no_delegation4 { /* new to v4.1 */
+ WND4_NOT_WANTED = 0,
+ WND4_CONTENTION = 1,
+ WND4_RESOURCE = 2,
+ WND4_NOT_SUPP_FTYPE = 3,
+ WND4_WRITE_DELEG_NOT_SUPP_FTYPE = 4,
+ WND4_NOT_SUPP_UPGRADE = 5,
+ WND4_NOT_SUPP_DOWNGRADE = 6,
+ WND4_CANCELLED = 7,
+ WND4_IS_DIR = 8,
+};
+
+enum lock_type4 {
+ NFS4_UNLOCK_LT = 0,
+ NFS4_READ_LT = 1,
+ NFS4_WRITE_LT = 2,
+ NFS4_READW_LT = 3,
+ NFS4_WRITEW_LT = 4
+};
+
+
+/* Mandatory Attributes */
+#define FATTR4_WORD0_SUPPORTED_ATTRS (1UL << 0)
+#define FATTR4_WORD0_TYPE (1UL << 1)
+#define FATTR4_WORD0_FH_EXPIRE_TYPE (1UL << 2)
+#define FATTR4_WORD0_CHANGE (1UL << 3)
+#define FATTR4_WORD0_SIZE (1UL << 4)
+#define FATTR4_WORD0_LINK_SUPPORT (1UL << 5)
+#define FATTR4_WORD0_SYMLINK_SUPPORT (1UL << 6)
+#define FATTR4_WORD0_NAMED_ATTR (1UL << 7)
+#define FATTR4_WORD0_FSID (1UL << 8)
+#define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9)
+#define FATTR4_WORD0_LEASE_TIME (1UL << 10)
+#define FATTR4_WORD0_RDATTR_ERROR (1UL << 11)
+/* Mandatory in NFSv4.1 */
+#define FATTR4_WORD2_SUPPATTR_EXCLCREAT (1UL << 11)
+
+/* Recommended Attributes */
+#define FATTR4_WORD0_ACL (1UL << 12)
+#define FATTR4_WORD0_ACLSUPPORT (1UL << 13)
+#define FATTR4_WORD0_ARCHIVE (1UL << 14)
+#define FATTR4_WORD0_CANSETTIME (1UL << 15)
+#define FATTR4_WORD0_CASE_INSENSITIVE (1UL << 16)
+#define FATTR4_WORD0_CASE_PRESERVING (1UL << 17)
+#define FATTR4_WORD0_CHOWN_RESTRICTED (1UL << 18)
+#define FATTR4_WORD0_FILEHANDLE (1UL << 19)
+#define FATTR4_WORD0_FILEID (1UL << 20)
+#define FATTR4_WORD0_FILES_AVAIL (1UL << 21)
+#define FATTR4_WORD0_FILES_FREE (1UL << 22)
+#define FATTR4_WORD0_FILES_TOTAL (1UL << 23)
+#define FATTR4_WORD0_FS_LOCATIONS (1UL << 24)
+#define FATTR4_WORD0_HIDDEN (1UL << 25)
+#define FATTR4_WORD0_HOMOGENEOUS (1UL << 26)
+#define FATTR4_WORD0_MAXFILESIZE (1UL << 27)
+#define FATTR4_WORD0_MAXLINK (1UL << 28)
+#define FATTR4_WORD0_MAXNAME (1UL << 29)
+#define FATTR4_WORD0_MAXREAD (1UL << 30)
+#define FATTR4_WORD0_MAXWRITE (1UL << 31)
+#define FATTR4_WORD1_MIMETYPE (1UL << 0)
+#define FATTR4_WORD1_MODE (1UL << 1)
+#define FATTR4_WORD1_NO_TRUNC (1UL << 2)
+#define FATTR4_WORD1_NUMLINKS (1UL << 3)
+#define FATTR4_WORD1_OWNER (1UL << 4)
+#define FATTR4_WORD1_OWNER_GROUP (1UL << 5)
+#define FATTR4_WORD1_QUOTA_HARD (1UL << 6)
+#define FATTR4_WORD1_QUOTA_SOFT (1UL << 7)
+#define FATTR4_WORD1_QUOTA_USED (1UL << 8)
+#define FATTR4_WORD1_RAWDEV (1UL << 9)
+#define FATTR4_WORD1_SPACE_AVAIL (1UL << 10)
+#define FATTR4_WORD1_SPACE_FREE (1UL << 11)
+#define FATTR4_WORD1_SPACE_TOTAL (1UL << 12)
+#define FATTR4_WORD1_SPACE_USED (1UL << 13)
+#define FATTR4_WORD1_SYSTEM (1UL << 14)
+#define FATTR4_WORD1_TIME_ACCESS (1UL << 15)
+#define FATTR4_WORD1_TIME_ACCESS_SET (1UL << 16)
+#define FATTR4_WORD1_TIME_BACKUP (1UL << 17)
+#define FATTR4_WORD1_TIME_CREATE (1UL << 18)
+#define FATTR4_WORD1_TIME_DELTA (1UL << 19)
+#define FATTR4_WORD1_TIME_METADATA (1UL << 20)
+#define FATTR4_WORD1_TIME_MODIFY (1UL << 21)
+#define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22)
+#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23)
+#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
+#define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0)
+#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
+#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
+#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
+
+/* MDS threshold bitmap bits */
+#define THRESHOLD_RD (1UL << 0)
+#define THRESHOLD_WR (1UL << 1)
+#define THRESHOLD_RD_IO (1UL << 2)
+#define THRESHOLD_WR_IO (1UL << 3)
+
+#define NFSPROC4_NULL 0
+#define NFSPROC4_COMPOUND 1
+#define NFS4_VERSION 4
+#define NFS4_MINOR_VERSION 0
+
+#define NFS4_DEBUG 1
+
+/* Index of predefined Linux client operations */
+
+enum {
+ NFSPROC4_CLNT_NULL = 0, /* Unused */
+ NFSPROC4_CLNT_READ,
+ NFSPROC4_CLNT_WRITE,
+ NFSPROC4_CLNT_COMMIT,
+ NFSPROC4_CLNT_OPEN,
+ NFSPROC4_CLNT_OPEN_CONFIRM,
+ NFSPROC4_CLNT_OPEN_NOATTR,
+ NFSPROC4_CLNT_OPEN_DOWNGRADE,
+ NFSPROC4_CLNT_CLOSE,
+ NFSPROC4_CLNT_SETATTR,
+ NFSPROC4_CLNT_FSINFO,
+ NFSPROC4_CLNT_RENEW,
+ NFSPROC4_CLNT_SETCLIENTID,
+ NFSPROC4_CLNT_SETCLIENTID_CONFIRM,
+ NFSPROC4_CLNT_LOCK,
+ NFSPROC4_CLNT_LOCKT,
+ NFSPROC4_CLNT_LOCKU,
+ NFSPROC4_CLNT_ACCESS,
+ NFSPROC4_CLNT_GETATTR,
+ NFSPROC4_CLNT_LOOKUP,
+ NFSPROC4_CLNT_LOOKUP_ROOT,
+ NFSPROC4_CLNT_REMOVE,
+ NFSPROC4_CLNT_RENAME,
+ NFSPROC4_CLNT_LINK,
+ NFSPROC4_CLNT_SYMLINK,
+ NFSPROC4_CLNT_CREATE,
+ NFSPROC4_CLNT_PATHCONF,
+ NFSPROC4_CLNT_STATFS,
+ NFSPROC4_CLNT_READLINK,
+ NFSPROC4_CLNT_READDIR,
+ NFSPROC4_CLNT_SERVER_CAPS,
+ NFSPROC4_CLNT_DELEGRETURN,
+ NFSPROC4_CLNT_GETACL,
+ NFSPROC4_CLNT_SETACL,
+ NFSPROC4_CLNT_FS_LOCATIONS,
+ NFSPROC4_CLNT_RELEASE_LOCKOWNER,
+ NFSPROC4_CLNT_SECINFO,
+ NFSPROC4_CLNT_FSID_PRESENT,
+
+ /* nfs41 */
+ NFSPROC4_CLNT_EXCHANGE_ID,
+ NFSPROC4_CLNT_CREATE_SESSION,
+ NFSPROC4_CLNT_DESTROY_SESSION,
+ NFSPROC4_CLNT_SEQUENCE,
+ NFSPROC4_CLNT_GET_LEASE_TIME,
+ NFSPROC4_CLNT_RECLAIM_COMPLETE,
+ NFSPROC4_CLNT_LAYOUTGET,
+ NFSPROC4_CLNT_GETDEVICEINFO,
+ NFSPROC4_CLNT_LAYOUTCOMMIT,
+ NFSPROC4_CLNT_LAYOUTRETURN,
+ NFSPROC4_CLNT_SECINFO_NO_NAME,
+ NFSPROC4_CLNT_TEST_STATEID,
+ NFSPROC4_CLNT_FREE_STATEID,
+ NFSPROC4_CLNT_GETDEVICELIST,
+ NFSPROC4_CLNT_BIND_CONN_TO_SESSION,
+ NFSPROC4_CLNT_DESTROY_CLIENTID,
+
+ /* nfs42 */
+ NFSPROC4_CLNT_SEEK,
+ NFSPROC4_CLNT_ALLOCATE,
+ NFSPROC4_CLNT_DEALLOCATE,
+};
+
+/* nfs41 types */
+struct nfs4_sessionid {
+ unsigned char data[NFS4_MAX_SESSIONID_LEN];
+};
+
+/* Create Session Flags */
+#define SESSION4_PERSIST 0x001
+#define SESSION4_BACK_CHAN 0x002
+#define SESSION4_RDMA 0x004
+
+#define SESSION4_FLAG_MASK_A 0x007
+
+enum state_protect_how4 {
+ SP4_NONE = 0,
+ SP4_MACH_CRED = 1,
+ SP4_SSV = 2
+};
+
+enum pnfs_layouttype {
+ LAYOUT_NFSV4_1_FILES = 1,
+ LAYOUT_OSD2_OBJECTS = 2,
+ LAYOUT_BLOCK_VOLUME = 3,
+ LAYOUT_FLEX_FILES = 4,
+ LAYOUT_TYPE_MAX
+};
+
+/* used for both layout return and recall */
+enum pnfs_layoutreturn_type {
+ RETURN_FILE = 1,
+ RETURN_FSID = 2,
+ RETURN_ALL = 3
+};
+
+enum pnfs_iomode {
+ IOMODE_READ = 1,
+ IOMODE_RW = 2,
+ IOMODE_ANY = 3,
+};
+
+enum pnfs_notify_deviceid_type4 {
+ NOTIFY_DEVICEID4_CHANGE = 1 << 1,
+ NOTIFY_DEVICEID4_DELETE = 1 << 2,
+};
+
+#define NFL4_UFLG_MASK 0x0000003F
+#define NFL4_UFLG_DENSE 0x00000001
+#define NFL4_UFLG_COMMIT_THRU_MDS 0x00000002
+#define NFL4_UFLG_STRIPE_UNIT_SIZE_MASK 0xFFFFFFC0
+
+/* Encoded in the loh_body field of type layouthint4 */
+enum filelayout_hint_care4 {
+ NFLH4_CARE_DENSE = NFL4_UFLG_DENSE,
+ NFLH4_CARE_COMMIT_THRU_MDS = NFL4_UFLG_COMMIT_THRU_MDS,
+ NFLH4_CARE_STRIPE_UNIT_SIZE = 0x00000040,
+ NFLH4_CARE_STRIPE_COUNT = 0x00000080
+};
+
+#define NFS4_DEVICEID4_SIZE 16
+
+struct nfs4_deviceid {
+ char data[NFS4_DEVICEID4_SIZE];
+};
+
+enum data_content4 {
+ NFS4_CONTENT_DATA = 0,
+ NFS4_CONTENT_HOLE = 1,
+};
+
+#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
new file mode 100644
index 000000000..b95f914ce
--- /dev/null
+++ b/include/linux/nfs_fs.h
@@ -0,0 +1,568 @@
+/*
+ * linux/include/linux/nfs_fs.h
+ *
+ * Copyright (C) 1992 Rick Sladkey
+ *
+ * OS-specific nfs filesystem definitions and declarations
+ */
+#ifndef _LINUX_NFS_FS_H
+#define _LINUX_NFS_FS_H
+
+#include <uapi/linux/nfs_fs.h>
+
+
+/*
+ * Enable dprintk() debugging support for nfs client.
+ */
+#ifdef CONFIG_NFS_DEBUG
+# define NFS_DEBUG
+#endif
+
+#include <linux/in.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/wait.h>
+
+#include <linux/sunrpc/debug.h>
+#include <linux/sunrpc/auth.h>
+#include <linux/sunrpc/clnt.h>
+
+#include <linux/nfs.h>
+#include <linux/nfs2.h>
+#include <linux/nfs3.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_xdr.h>
+#include <linux/nfs_fs_sb.h>
+
+#include <linux/mempool.h>
+
+/*
+ * These are the default flags for swap requests
+ */
+#define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS)
+
+/*
+ * NFSv3/v4 Access mode cache entry
+ */
+struct nfs_access_entry {
+ struct rb_node rb_node;
+ struct list_head lru;
+ unsigned long jiffies;
+ struct rpc_cred * cred;
+ int mask;
+ struct rcu_head rcu_head;
+};
+
+struct nfs_lockowner {
+ fl_owner_t l_owner;
+ pid_t l_pid;
+};
+
+#define NFS_IO_INPROGRESS 0
+struct nfs_io_counter {
+ unsigned long flags;
+ atomic_t io_count;
+};
+
+struct nfs_lock_context {
+ atomic_t count;
+ struct list_head list;
+ struct nfs_open_context *open_context;
+ struct nfs_lockowner lockowner;
+ struct nfs_io_counter io_count;
+};
+
+struct nfs4_state;
+struct nfs_open_context {
+ struct nfs_lock_context lock_context;
+ struct dentry *dentry;
+ struct rpc_cred *cred;
+ struct nfs4_state *state;
+ fmode_t mode;
+
+ unsigned long flags;
+#define NFS_CONTEXT_ERROR_WRITE (0)
+#define NFS_CONTEXT_RESEND_WRITES (1)
+#define NFS_CONTEXT_BAD (2)
+ int error;
+
+ struct list_head list;
+ struct nfs4_threshold *mdsthreshold;
+};
+
+struct nfs_open_dir_context {
+ struct list_head list;
+ struct rpc_cred *cred;
+ unsigned long attr_gencount;
+ __u64 dir_cookie;
+ __u64 dup_cookie;
+ signed char duped;
+};
+
+/*
+ * NFSv4 delegation
+ */
+struct nfs_delegation;
+
+struct posix_acl;
+
+/*
+ * nfs fs inode data in memory
+ */
+struct nfs_inode {
+ /*
+ * The 64bit 'inode number'
+ */
+ __u64 fileid;
+
+ /*
+ * NFS file handle
+ */
+ struct nfs_fh fh;
+
+ /*
+ * Various flags
+ */
+ unsigned long flags; /* atomic bit ops */
+ unsigned long cache_validity; /* bit mask */
+
+ /*
+ * read_cache_jiffies is when we started read-caching this inode.
+ * attrtimeo is for how long the cached information is assumed
+ * to be valid. A successful attribute revalidation doubles
+ * attrtimeo (up to acregmax/acdirmax), a failure resets it to
+ * acregmin/acdirmin.
+ *
+ * We need to revalidate the cached attrs for this inode if
+ *
+ * jiffies - read_cache_jiffies >= attrtimeo
+ *
+ * Please note the comparison is greater than or equal
+ * so that zero timeout values can be specified.
+ */
+ unsigned long read_cache_jiffies;
+ unsigned long attrtimeo;
+ unsigned long attrtimeo_timestamp;
+
+ unsigned long attr_gencount;
+ /* "Generation counter" for the attribute cache. This is
+ * bumped whenever we update the metadata on the
+ * server.
+ */
+ unsigned long cache_change_attribute;
+
+ struct rb_root access_cache;
+ struct list_head access_cache_entry_lru;
+ struct list_head access_cache_inode_lru;
+
+ /*
+ * This is the cookie verifier used for NFSv3 readdir
+ * operations
+ */
+ __be32 cookieverf[2];
+
+ unsigned long nrequests;
+ struct nfs_mds_commit_info commit_info;
+
+ /* Open contexts for shared mmap writes */
+ struct list_head open_files;
+
+ /* Number of in-flight sillydelete RPC calls */
+ atomic_t silly_count;
+ /* List of deferred sillydelete requests */
+ struct hlist_head silly_list;
+ wait_queue_head_t waitqueue;
+
+#if IS_ENABLED(CONFIG_NFS_V4)
+ struct nfs4_cached_acl *nfs4_acl;
+ /* NFSv4 state */
+ struct list_head open_states;
+ struct nfs_delegation __rcu *delegation;
+ struct rw_semaphore rwsem;
+
+ /* pNFS layout information */
+ struct pnfs_layout_hdr *layout;
+#endif /* CONFIG_NFS_V4*/
+ /* how many bytes have been written/read and how many bytes queued up */
+ __u64 write_io;
+ __u64 read_io;
+#ifdef CONFIG_NFS_FSCACHE
+ struct fscache_cookie *fscache;
+#endif
+ struct inode vfs_inode;
+};
+
+/*
+ * Cache validity bit flags
+ */
+#define NFS_INO_INVALID_ATTR 0x0001 /* cached attrs are invalid */
+#define NFS_INO_INVALID_DATA 0x0002 /* cached data is invalid */
+#define NFS_INO_INVALID_ATIME 0x0004 /* cached atime is invalid */
+#define NFS_INO_INVALID_ACCESS 0x0008 /* cached access cred invalid */
+#define NFS_INO_INVALID_ACL 0x0010 /* cached acls are invalid */
+#define NFS_INO_REVAL_PAGECACHE 0x0020 /* must revalidate pagecache */
+#define NFS_INO_REVAL_FORCED 0x0040 /* force revalidation ignoring a delegation */
+#define NFS_INO_INVALID_LABEL 0x0080 /* cached label is invalid */
+
+/*
+ * Bit offsets in flags field
+ */
+#define NFS_INO_ADVISE_RDPLUS (0) /* advise readdirplus */
+#define NFS_INO_STALE (1) /* possible stale inode */
+#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */
+#define NFS_INO_INVALIDATING (3) /* inode is being invalidated */
+#define NFS_INO_FLUSHING (4) /* inode is flushing out data */
+#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
+#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */
+#define NFS_INO_COMMIT (7) /* inode is committing unstable writes */
+#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */
+#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */
+
+static inline struct nfs_inode *NFS_I(const struct inode *inode)
+{
+ return container_of(inode, struct nfs_inode, vfs_inode);
+}
+
+static inline struct nfs_server *NFS_SB(const struct super_block *s)
+{
+ return (struct nfs_server *)(s->s_fs_info);
+}
+
+static inline struct nfs_fh *NFS_FH(const struct inode *inode)
+{
+ return &NFS_I(inode)->fh;
+}
+
+static inline struct nfs_server *NFS_SERVER(const struct inode *inode)
+{
+ return NFS_SB(inode->i_sb);
+}
+
+static inline struct rpc_clnt *NFS_CLIENT(const struct inode *inode)
+{
+ return NFS_SERVER(inode)->client;
+}
+
+static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode)
+{
+ return NFS_SERVER(inode)->nfs_client->rpc_ops;
+}
+
+static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode)
+{
+ struct nfs_server *nfss = NFS_SERVER(inode);
+ return S_ISDIR(inode->i_mode) ? nfss->acdirmin : nfss->acregmin;
+}
+
+static inline unsigned NFS_MAXATTRTIMEO(const struct inode *inode)
+{
+ struct nfs_server *nfss = NFS_SERVER(inode);
+ return S_ISDIR(inode->i_mode) ? nfss->acdirmax : nfss->acregmax;
+}
+
+static inline int NFS_STALE(const struct inode *inode)
+{
+ return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
+}
+
+static inline struct fscache_cookie *nfs_i_fscache(struct inode *inode)
+{
+#ifdef CONFIG_NFS_FSCACHE
+ return NFS_I(inode)->fscache;
+#else
+ return NULL;
+#endif
+}
+
+static inline __u64 NFS_FILEID(const struct inode *inode)
+{
+ return NFS_I(inode)->fileid;
+}
+
+static inline void set_nfs_fileid(struct inode *inode, __u64 fileid)
+{
+ NFS_I(inode)->fileid = fileid;
+}
+
+static inline void nfs_mark_for_revalidate(struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ spin_lock(&inode->i_lock);
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS;
+ if (S_ISDIR(inode->i_mode))
+ nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
+ spin_unlock(&inode->i_lock);
+}
+
+static inline int nfs_server_capable(struct inode *inode, int cap)
+{
+ return NFS_SERVER(inode)->caps & cap;
+}
+
+static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf)
+{
+ dentry->d_time = verf;
+}
+
+/**
+ * nfs_save_change_attribute - Returns the inode attribute change cookie
+ * @dir - pointer to parent directory inode
+ * The "change attribute" is updated every time we finish an operation
+ * that will result in a metadata change on the server.
+ */
+static inline unsigned long nfs_save_change_attribute(struct inode *dir)
+{
+ return NFS_I(dir)->cache_change_attribute;
+}
+
+/**
+ * nfs_verify_change_attribute - Detects NFS remote directory changes
+ * @dir - pointer to parent directory inode
+ * @chattr - previously saved change attribute
+ * Return "false" if the verifiers doesn't match the change attribute.
+ * This would usually indicate that the directory contents have changed on
+ * the server, and that any dentries need revalidating.
+ */
+static inline int nfs_verify_change_attribute(struct inode *dir, unsigned long chattr)
+{
+ return chattr == NFS_I(dir)->cache_change_attribute;
+}
+
+/*
+ * linux/fs/nfs/inode.c
+ */
+extern int nfs_sync_mapping(struct address_space *mapping);
+extern void nfs_zap_mapping(struct inode *inode, struct address_space *mapping);
+extern void nfs_zap_caches(struct inode *);
+extern void nfs_invalidate_atime(struct inode *);
+extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *,
+ struct nfs_fattr *, struct nfs4_label *);
+extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
+extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr);
+extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr);
+extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr);
+extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
+extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
+extern int nfs_permission(struct inode *, int);
+extern int nfs_open(struct inode *, struct file *);
+extern int nfs_release(struct inode *, struct file *);
+extern int nfs_attribute_timeout(struct inode *inode);
+extern int nfs_attribute_cache_expired(struct inode *inode);
+extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
+extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode);
+extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
+extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping);
+extern int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping);
+extern int nfs_setattr(struct dentry *, struct iattr *);
+extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *);
+extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
+ struct nfs4_label *label);
+extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
+extern void put_nfs_open_context(struct nfs_open_context *ctx);
+extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode);
+extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode);
+extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx);
+extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx);
+extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx);
+extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx);
+extern u64 nfs_compat_user_ino64(u64 fileid);
+extern void nfs_fattr_init(struct nfs_fattr *fattr);
+extern void nfs_fattr_set_barrier(struct nfs_fattr *fattr);
+extern unsigned long nfs_inc_attr_generation_counter(void);
+
+extern struct nfs_fattr *nfs_alloc_fattr(void);
+
+static inline void nfs_free_fattr(const struct nfs_fattr *fattr)
+{
+ kfree(fattr);
+}
+
+extern struct nfs_fh *nfs_alloc_fhandle(void);
+
+static inline void nfs_free_fhandle(const struct nfs_fh *fh)
+{
+ kfree(fh);
+}
+
+#ifdef NFS_DEBUG
+extern u32 _nfs_display_fhandle_hash(const struct nfs_fh *fh);
+static inline u32 nfs_display_fhandle_hash(const struct nfs_fh *fh)
+{
+ return _nfs_display_fhandle_hash(fh);
+}
+extern void _nfs_display_fhandle(const struct nfs_fh *fh, const char *caption);
+#define nfs_display_fhandle(fh, caption) \
+ do { \
+ if (unlikely(nfs_debug & NFSDBG_FACILITY)) \
+ _nfs_display_fhandle(fh, caption); \
+ } while (0)
+#else
+static inline u32 nfs_display_fhandle_hash(const struct nfs_fh *fh)
+{
+ return 0;
+}
+static inline void nfs_display_fhandle(const struct nfs_fh *fh,
+ const char *caption)
+{
+}
+#endif
+
+/*
+ * linux/fs/nfs/nfsroot.c
+ */
+extern int nfs_root_data(char **root_device, char **root_data); /*__init*/
+/* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */
+extern __be32 root_nfs_parse_addr(char *name); /*__init*/
+
+/*
+ * linux/fs/nfs/file.c
+ */
+extern const struct file_operations nfs_file_operations;
+#if IS_ENABLED(CONFIG_NFS_V4)
+extern const struct file_operations nfs4_file_operations;
+#endif /* CONFIG_NFS_V4 */
+extern const struct address_space_operations nfs_file_aops;
+extern const struct address_space_operations nfs_dir_aops;
+
+static inline struct nfs_open_context *nfs_file_open_context(struct file *filp)
+{
+ return filp->private_data;
+}
+
+static inline struct rpc_cred *nfs_file_cred(struct file *file)
+{
+ if (file != NULL) {
+ struct nfs_open_context *ctx =
+ nfs_file_open_context(file);
+ if (ctx)
+ return ctx->cred;
+ }
+ return NULL;
+}
+
+/*
+ * linux/fs/nfs/direct.c
+ */
+extern ssize_t nfs_direct_IO(struct kiocb *, struct iov_iter *, loff_t);
+extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
+ struct iov_iter *iter,
+ loff_t pos);
+extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
+ struct iov_iter *iter);
+
+/*
+ * linux/fs/nfs/dir.c
+ */
+extern const struct file_operations nfs_dir_operations;
+extern const struct dentry_operations nfs_dentry_operations;
+
+extern void nfs_force_lookup_revalidate(struct inode *dir);
+extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh,
+ struct nfs_fattr *fattr, struct nfs4_label *label);
+extern int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags);
+extern void nfs_access_zap_cache(struct inode *inode);
+
+/*
+ * linux/fs/nfs/symlink.c
+ */
+extern const struct inode_operations nfs_symlink_inode_operations;
+
+/*
+ * linux/fs/nfs/sysctl.c
+ */
+#ifdef CONFIG_SYSCTL
+extern int nfs_register_sysctl(void);
+extern void nfs_unregister_sysctl(void);
+#else
+#define nfs_register_sysctl() 0
+#define nfs_unregister_sysctl() do { } while(0)
+#endif
+
+/*
+ * linux/fs/nfs/namespace.c
+ */
+extern const struct inode_operations nfs_mountpoint_inode_operations;
+extern const struct inode_operations nfs_referral_inode_operations;
+extern int nfs_mountpoint_expiry_timeout;
+extern void nfs_release_automount_timer(void);
+
+/*
+ * linux/fs/nfs/unlink.c
+ */
+extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
+extern void nfs_wait_on_sillyrename(struct dentry *dentry);
+extern void nfs_block_sillyrename(struct dentry *dentry);
+extern void nfs_unblock_sillyrename(struct dentry *dentry);
+
+/*
+ * linux/fs/nfs/write.c
+ */
+extern int nfs_congestion_kb;
+extern int nfs_writepage(struct page *page, struct writeback_control *wbc);
+extern int nfs_writepages(struct address_space *, struct writeback_control *);
+extern int nfs_flush_incompatible(struct file *file, struct page *page);
+extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
+
+/*
+ * Try to write back everything synchronously (but check the
+ * return value!)
+ */
+extern int nfs_sync_inode(struct inode *inode);
+extern int nfs_wb_all(struct inode *inode);
+extern int nfs_wb_page(struct inode *inode, struct page* page);
+extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
+extern int nfs_commit_inode(struct inode *, int);
+extern struct nfs_commit_data *nfs_commitdata_alloc(void);
+extern void nfs_commit_free(struct nfs_commit_data *data);
+
+static inline int
+nfs_have_writebacks(struct inode *inode)
+{
+ return NFS_I(inode)->nrequests != 0;
+}
+
+/*
+ * linux/fs/nfs/read.c
+ */
+extern int nfs_readpage(struct file *, struct page *);
+extern int nfs_readpages(struct file *, struct address_space *,
+ struct list_head *, unsigned);
+extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
+ struct page *);
+
+/*
+ * inline functions
+ */
+
+static inline loff_t nfs_size_to_loff_t(__u64 size)
+{
+ if (size > (__u64) OFFSET_MAX - 1)
+ return OFFSET_MAX - 1;
+ return (loff_t) size;
+}
+
+static inline ino_t
+nfs_fileid_to_ino_t(u64 fileid)
+{
+ ino_t ino = (ino_t) fileid;
+ if (sizeof(ino_t) < sizeof(u64))
+ ino ^= fileid >> (sizeof(u64)-sizeof(ino_t)) * 8;
+ return ino;
+}
+
+#define NFS_JUKEBOX_RETRY_TIME (5 * HZ)
+
+
+# undef ifdebug
+# ifdef NFS_DEBUG
+# define ifdebug(fac) if (unlikely(nfs_debug & NFSDBG_##fac))
+# define NFS_IFDEBUG(x) x
+# else
+# define ifdebug(fac) if (0)
+# define NFS_IFDEBUG(x)
+# endif
+#endif
diff --git a/include/linux/nfs_fs_i.h b/include/linux/nfs_fs_i.h
new file mode 100644
index 000000000..a5c50d973
--- /dev/null
+++ b/include/linux/nfs_fs_i.h
@@ -0,0 +1,20 @@
+#ifndef _NFS_FS_I
+#define _NFS_FS_I
+
+struct nlm_lockowner;
+
+/*
+ * NFS lock info
+ */
+struct nfs_lock_info {
+ u32 state;
+ struct nlm_lockowner *owner;
+ struct list_head list;
+};
+
+struct nfs4_lock_state;
+struct nfs4_lock_info {
+ struct nfs4_lock_state *owner;
+};
+
+#endif
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
new file mode 100644
index 000000000..5e1273d4d
--- /dev/null
+++ b/include/linux/nfs_fs_sb.h
@@ -0,0 +1,241 @@
+#ifndef _NFS_FS_SB
+#define _NFS_FS_SB
+
+#include <linux/list.h>
+#include <linux/backing-dev.h>
+#include <linux/idr.h>
+#include <linux/wait.h>
+#include <linux/nfs_xdr.h>
+#include <linux/sunrpc/xprt.h>
+
+#include <linux/atomic.h>
+
+struct nfs4_session;
+struct nfs_iostats;
+struct nlm_host;
+struct nfs4_sequence_args;
+struct nfs4_sequence_res;
+struct nfs_server;
+struct nfs4_minor_version_ops;
+struct nfs41_server_scope;
+struct nfs41_impl_id;
+
+/*
+ * The nfs_client identifies our client state to the server.
+ */
+struct nfs_client {
+ atomic_t cl_count;
+ atomic_t cl_mds_count;
+ int cl_cons_state; /* current construction state (-ve: init error) */
+#define NFS_CS_READY 0 /* ready to be used */
+#define NFS_CS_INITING 1 /* busy initialising */
+#define NFS_CS_SESSION_INITING 2 /* busy initialising session */
+ unsigned long cl_res_state; /* NFS resources state */
+#define NFS_CS_CALLBACK 1 /* - callback started */
+#define NFS_CS_IDMAP 2 /* - idmap started */
+#define NFS_CS_RENEWD 3 /* - renewd started */
+#define NFS_CS_STOP_RENEW 4 /* no more state to renew */
+#define NFS_CS_CHECK_LEASE_TIME 5 /* need to check lease time */
+ unsigned long cl_flags; /* behavior switches */
+#define NFS_CS_NORESVPORT 0 /* - use ephemeral src port */
+#define NFS_CS_DISCRTRY 1 /* - disconnect on RPC retry */
+#define NFS_CS_MIGRATION 2 /* - transparent state migr */
+#define NFS_CS_INFINITE_SLOTS 3 /* - don't limit TCP slots */
+#define NFS_CS_NO_RETRANS_TIMEOUT 4 /* - Disable retransmit timeouts */
+ struct sockaddr_storage cl_addr; /* server identifier */
+ size_t cl_addrlen;
+ char * cl_hostname; /* hostname of server */
+ char * cl_acceptor; /* GSSAPI acceptor name */
+ struct list_head cl_share_link; /* link in global client list */
+ struct list_head cl_superblocks; /* List of nfs_server structs */
+
+ struct rpc_clnt * cl_rpcclient;
+ const struct nfs_rpc_ops *rpc_ops; /* NFS protocol vector */
+ int cl_proto; /* Network transport protocol */
+ struct nfs_subversion * cl_nfs_mod; /* pointer to nfs version module */
+
+ u32 cl_minorversion;/* NFSv4 minorversion */
+ struct rpc_cred *cl_machine_cred;
+
+#if IS_ENABLED(CONFIG_NFS_V4)
+ struct list_head cl_ds_clients; /* auth flavor data servers */
+ u64 cl_clientid; /* constant */
+ nfs4_verifier cl_confirm; /* Clientid verifier */
+ unsigned long cl_state;
+
+ spinlock_t cl_lock;
+
+ unsigned long cl_lease_time;
+ unsigned long cl_last_renewal;
+ struct delayed_work cl_renewd;
+
+ struct rpc_wait_queue cl_rpcwaitq;
+
+ /* idmapper */
+ struct idmap * cl_idmap;
+
+ /* Client owner identifier */
+ const char * cl_owner_id;
+
+ u32 cl_cb_ident; /* v4.0 callback identifier */
+ const struct nfs4_minor_version_ops *cl_mvops;
+ unsigned long cl_mig_gen;
+
+ /* NFSv4.0 transport blocking */
+ struct nfs4_slot_table *cl_slot_tbl;
+
+ /* The sequence id to use for the next CREATE_SESSION */
+ u32 cl_seqid;
+ /* The flags used for obtaining the clientid during EXCHANGE_ID */
+ u32 cl_exchange_flags;
+ struct nfs4_session *cl_session; /* shared session */
+ bool cl_preserve_clid;
+ struct nfs41_server_owner *cl_serverowner;
+ struct nfs41_server_scope *cl_serverscope;
+ struct nfs41_impl_id *cl_implid;
+ /* nfs 4.1+ state protection modes: */
+ unsigned long cl_sp4_flags;
+#define NFS_SP4_MACH_CRED_MINIMAL 1 /* Minimal sp4_mach_cred - state ops
+ * must use machine cred */
+#define NFS_SP4_MACH_CRED_CLEANUP 2 /* CLOSE and LOCKU */
+#define NFS_SP4_MACH_CRED_SECINFO 3 /* SECINFO and SECINFO_NO_NAME */
+#define NFS_SP4_MACH_CRED_STATEID 4 /* TEST_STATEID and FREE_STATEID */
+#define NFS_SP4_MACH_CRED_WRITE 5 /* WRITE */
+#define NFS_SP4_MACH_CRED_COMMIT 6 /* COMMIT */
+#endif /* CONFIG_NFS_V4 */
+
+ /* Our own IP address, as a null-terminated string.
+ * This is used to generate the mv0 callback address.
+ */
+ char cl_ipaddr[48];
+
+#ifdef CONFIG_NFS_FSCACHE
+ struct fscache_cookie *fscache; /* client index cache cookie */
+#endif
+
+ struct net *cl_net;
+};
+
+/*
+ * NFS client parameters stored in the superblock.
+ */
+struct nfs_server {
+ struct nfs_client * nfs_client; /* shared client and NFS4 state */
+ struct list_head client_link; /* List of other nfs_server structs
+ * that share the same client
+ */
+ struct list_head master_link; /* link in master servers list */
+ struct rpc_clnt * client; /* RPC client handle */
+ struct rpc_clnt * client_acl; /* ACL RPC client handle */
+ struct nlm_host *nlm_host; /* NLM client handle */
+ struct nfs_iostats __percpu *io_stats; /* I/O statistics */
+ struct backing_dev_info backing_dev_info;
+ atomic_long_t writeback; /* number of writeback pages */
+ int flags; /* various flags */
+ unsigned int caps; /* server capabilities */
+ unsigned int rsize; /* read size */
+ unsigned int rpages; /* read size (in pages) */
+ unsigned int wsize; /* write size */
+ unsigned int wpages; /* write size (in pages) */
+ unsigned int wtmult; /* server disk block size */
+ unsigned int dtsize; /* readdir size */
+ unsigned short port; /* "port=" setting */
+ unsigned int bsize; /* server block size */
+ unsigned int acregmin; /* attr cache timeouts */
+ unsigned int acregmax;
+ unsigned int acdirmin;
+ unsigned int acdirmax;
+ unsigned int namelen;
+ unsigned int options; /* extra options enabled by mount */
+#define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */
+#define NFS_OPTION_MIGRATION 0x00000002 /* - NFSv4 migration enabled */
+
+ struct nfs_fsid fsid;
+ __u64 maxfilesize; /* maximum file size */
+ struct timespec time_delta; /* smallest time granularity */
+ unsigned long mount_time; /* when this fs was mounted */
+ struct super_block *super; /* VFS super block */
+ dev_t s_dev; /* superblock dev numbers */
+ struct nfs_auth_info auth_info; /* parsed auth flavors */
+
+#ifdef CONFIG_NFS_FSCACHE
+ struct nfs_fscache_key *fscache_key; /* unique key for superblock */
+ struct fscache_cookie *fscache; /* superblock cookie */
+#endif
+
+ u32 pnfs_blksize; /* layout_blksize attr */
+#if IS_ENABLED(CONFIG_NFS_V4)
+ u32 attr_bitmask[3];/* V4 bitmask representing the set
+ of attributes supported on this
+ filesystem */
+ u32 attr_bitmask_nl[3];
+ /* V4 bitmask representing the
+ set of attributes supported
+ on this filesystem excluding
+ the label support bit. */
+ u32 cache_consistency_bitmask[3];
+ /* V4 bitmask representing the subset
+ of change attribute, size, ctime
+ and mtime attributes supported by
+ the server */
+ u32 acl_bitmask; /* V4 bitmask representing the ACEs
+ that are supported on this
+ filesystem */
+ u32 fh_expire_type; /* V4 bitmask representing file
+ handle volatility type for
+ this filesystem */
+ struct pnfs_layoutdriver_type *pnfs_curr_ld; /* Active layout driver */
+ struct rpc_wait_queue roc_rpcwaitq;
+ void *pnfs_ld_data; /* per mount point data */
+
+ /* the following fields are protected by nfs_client->cl_lock */
+ struct rb_root state_owners;
+#endif
+ struct ida openowner_id;
+ struct ida lockowner_id;
+ struct list_head state_owners_lru;
+ struct list_head layouts;
+ struct list_head delegations;
+
+ unsigned long mig_gen;
+ unsigned long mig_status;
+#define NFS_MIG_IN_TRANSITION (1)
+#define NFS_MIG_FAILED (2)
+
+ void (*destroy)(struct nfs_server *);
+
+ atomic_t active; /* Keep trace of any activity to this server */
+
+ /* mountd-related mount options */
+ struct sockaddr_storage mountd_address;
+ size_t mountd_addrlen;
+ u32 mountd_version;
+ unsigned short mountd_port;
+ unsigned short mountd_protocol;
+};
+
+/* Server capabilities */
+#define NFS_CAP_READDIRPLUS (1U << 0)
+#define NFS_CAP_HARDLINKS (1U << 1)
+#define NFS_CAP_SYMLINKS (1U << 2)
+#define NFS_CAP_ACLS (1U << 3)
+#define NFS_CAP_ATOMIC_OPEN (1U << 4)
+#define NFS_CAP_CHANGE_ATTR (1U << 5)
+#define NFS_CAP_FILEID (1U << 6)
+#define NFS_CAP_MODE (1U << 7)
+#define NFS_CAP_NLINK (1U << 8)
+#define NFS_CAP_OWNER (1U << 9)
+#define NFS_CAP_OWNER_GROUP (1U << 10)
+#define NFS_CAP_ATIME (1U << 11)
+#define NFS_CAP_CTIME (1U << 12)
+#define NFS_CAP_MTIME (1U << 13)
+#define NFS_CAP_POSIX_LOCK (1U << 14)
+#define NFS_CAP_UIDGID_NOMAP (1U << 15)
+#define NFS_CAP_STATEID_NFSV41 (1U << 16)
+#define NFS_CAP_ATOMIC_OPEN_V1 (1U << 17)
+#define NFS_CAP_SECURITY_LABEL (1U << 18)
+#define NFS_CAP_SEEK (1U << 19)
+#define NFS_CAP_ALLOCATE (1U << 20)
+#define NFS_CAP_DEALLOCATE (1U << 21)
+
+#endif
diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h
new file mode 100644
index 000000000..9dcbbe9a5
--- /dev/null
+++ b/include/linux/nfs_iostat.h
@@ -0,0 +1,133 @@
+/*
+ * User-space visible declarations for NFS client per-mount
+ * point statistics
+ *
+ * Copyright (C) 2005, 2006 Chuck Lever <cel@netapp.com>
+ *
+ * NFS client per-mount statistics provide information about the
+ * health of the NFS client and the health of each NFS mount point.
+ * Generally these are not for detailed problem diagnosis, but
+ * simply to indicate that there is a problem.
+ *
+ * These counters are not meant to be human-readable, but are meant
+ * to be integrated into system monitoring tools such as "sar" and
+ * "iostat". As such, the counters are sampled by the tools over
+ * time, and are never zeroed after a file system is mounted.
+ * Moving averages can be computed by the tools by taking the
+ * difference between two instantaneous samples and dividing that
+ * by the time between the samples.
+ */
+
+#ifndef _LINUX_NFS_IOSTAT
+#define _LINUX_NFS_IOSTAT
+
+#define NFS_IOSTAT_VERS "1.1"
+
+/*
+ * NFS byte counters
+ *
+ * 1. SERVER - the number of payload bytes read from or written
+ * to the server by the NFS client via an NFS READ or WRITE
+ * request.
+ *
+ * 2. NORMAL - the number of bytes read or written by applications
+ * via the read(2) and write(2) system call interfaces.
+ *
+ * 3. DIRECT - the number of bytes read or written from files
+ * opened with the O_DIRECT flag.
+ *
+ * These counters give a view of the data throughput into and out
+ * of the NFS client. Comparing the number of bytes requested by
+ * an application with the number of bytes the client requests from
+ * the server can provide an indication of client efficiency
+ * (per-op, cache hits, etc).
+ *
+ * These counters can also help characterize which access methods
+ * are in use. DIRECT by itself shows whether there is any O_DIRECT
+ * traffic. NORMAL + DIRECT shows how much data is going through
+ * the system call interface. A large amount of SERVER traffic
+ * without much NORMAL or DIRECT traffic shows that applications
+ * are using mapped files.
+ *
+ * NFS page counters
+ *
+ * These count the number of pages read or written via nfs_readpage(),
+ * nfs_readpages(), or their write equivalents.
+ *
+ * NB: When adding new byte counters, please include the measured
+ * units in the name of each byte counter to help users of this
+ * interface determine what exactly is being counted.
+ */
+enum nfs_stat_bytecounters {
+ NFSIOS_NORMALREADBYTES = 0,
+ NFSIOS_NORMALWRITTENBYTES,
+ NFSIOS_DIRECTREADBYTES,
+ NFSIOS_DIRECTWRITTENBYTES,
+ NFSIOS_SERVERREADBYTES,
+ NFSIOS_SERVERWRITTENBYTES,
+ NFSIOS_READPAGES,
+ NFSIOS_WRITEPAGES,
+ __NFSIOS_BYTESMAX,
+};
+
+/*
+ * NFS event counters
+ *
+ * These counters provide a low-overhead way of monitoring client
+ * activity without enabling NFS trace debugging. The counters
+ * show the rate at which VFS requests are made, and how often the
+ * client invalidates its data and attribute caches. This allows
+ * system administrators to monitor such things as how close-to-open
+ * is working, and answer questions such as "why are there so many
+ * GETATTR requests on the wire?"
+ *
+ * They also count anamolous events such as short reads and writes,
+ * silly renames due to close-after-delete, and operations that
+ * change the size of a file (such operations can often be the
+ * source of data corruption if applications aren't using file
+ * locking properly).
+ */
+enum nfs_stat_eventcounters {
+ NFSIOS_INODEREVALIDATE = 0,
+ NFSIOS_DENTRYREVALIDATE,
+ NFSIOS_DATAINVALIDATE,
+ NFSIOS_ATTRINVALIDATE,
+ NFSIOS_VFSOPEN,
+ NFSIOS_VFSLOOKUP,
+ NFSIOS_VFSACCESS,
+ NFSIOS_VFSUPDATEPAGE,
+ NFSIOS_VFSREADPAGE,
+ NFSIOS_VFSREADPAGES,
+ NFSIOS_VFSWRITEPAGE,
+ NFSIOS_VFSWRITEPAGES,
+ NFSIOS_VFSGETDENTS,
+ NFSIOS_VFSSETATTR,
+ NFSIOS_VFSFLUSH,
+ NFSIOS_VFSFSYNC,
+ NFSIOS_VFSLOCK,
+ NFSIOS_VFSRELEASE,
+ NFSIOS_CONGESTIONWAIT,
+ NFSIOS_SETATTRTRUNC,
+ NFSIOS_EXTENDWRITE,
+ NFSIOS_SILLYRENAME,
+ NFSIOS_SHORTREAD,
+ NFSIOS_SHORTWRITE,
+ NFSIOS_DELAY,
+ NFSIOS_PNFS_READ,
+ NFSIOS_PNFS_WRITE,
+ __NFSIOS_COUNTSMAX,
+};
+
+/*
+ * NFS local caching servicing counters
+ */
+enum nfs_stat_fscachecounters {
+ NFSIOS_FSCACHE_PAGES_READ_OK,
+ NFSIOS_FSCACHE_PAGES_READ_FAIL,
+ NFSIOS_FSCACHE_PAGES_WRITTEN_OK,
+ NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL,
+ NFSIOS_FSCACHE_PAGES_UNCACHED,
+ __NFSIOS_FSCACHEMAX,
+};
+
+#endif /* _LINUX_NFS_IOSTAT */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
new file mode 100644
index 000000000..3eb072dbc
--- /dev/null
+++ b/include/linux/nfs_page.h
@@ -0,0 +1,191 @@
+/*
+ * linux/include/linux/nfs_page.h
+ *
+ * Copyright (C) 2000 Trond Myklebust
+ *
+ * NFS page cache wrapper.
+ */
+
+#ifndef _LINUX_NFS_PAGE_H
+#define _LINUX_NFS_PAGE_H
+
+
+#include <linux/list.h>
+#include <linux/pagemap.h>
+#include <linux/wait.h>
+#include <linux/sunrpc/auth.h>
+#include <linux/nfs_xdr.h>
+
+#include <linux/kref.h>
+
+/*
+ * Valid flags for a dirty buffer
+ */
+enum {
+ PG_BUSY = 0, /* nfs_{un}lock_request */
+ PG_MAPPED, /* page private set for buffered io */
+ PG_CLEAN, /* write succeeded */
+ PG_COMMIT_TO_DS, /* used by pnfs layouts */
+ PG_INODE_REF, /* extra ref held by inode when in writeback */
+ PG_HEADLOCK, /* page group lock of wb_head */
+ PG_TEARDOWN, /* page group sync for destroy */
+ PG_UNLOCKPAGE, /* page group sync bit in read path */
+ PG_UPTODATE, /* page group sync bit in read path */
+ PG_WB_END, /* page group sync bit in write path */
+ PG_REMOVE, /* page group sync bit in write path */
+};
+
+struct nfs_inode;
+struct nfs_page {
+ struct list_head wb_list; /* Defines state of page: */
+ struct page *wb_page; /* page to read in/write out */
+ struct nfs_open_context *wb_context; /* File state context info */
+ struct nfs_lock_context *wb_lock_context; /* lock context info */
+ pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */
+ unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */
+ wb_pgbase, /* Start of page data */
+ wb_bytes; /* Length of request */
+ struct kref wb_kref; /* reference count */
+ unsigned long wb_flags;
+ struct nfs_write_verifier wb_verf; /* Commit cookie */
+ struct nfs_page *wb_this_page; /* list of reqs for this page */
+ struct nfs_page *wb_head; /* head pointer for req list */
+};
+
+struct nfs_pageio_descriptor;
+struct nfs_pageio_ops {
+ void (*pg_init)(struct nfs_pageio_descriptor *, struct nfs_page *);
+ size_t (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *,
+ struct nfs_page *);
+ int (*pg_doio)(struct nfs_pageio_descriptor *);
+ unsigned int (*pg_get_mirror_count)(struct nfs_pageio_descriptor *,
+ struct nfs_page *);
+ void (*pg_cleanup)(struct nfs_pageio_descriptor *);
+};
+
+struct nfs_rw_ops {
+ const fmode_t rw_mode;
+ struct nfs_pgio_header *(*rw_alloc_header)(void);
+ void (*rw_free_header)(struct nfs_pgio_header *);
+ void (*rw_release)(struct nfs_pgio_header *);
+ int (*rw_done)(struct rpc_task *, struct nfs_pgio_header *,
+ struct inode *);
+ void (*rw_result)(struct rpc_task *, struct nfs_pgio_header *);
+ void (*rw_initiate)(struct nfs_pgio_header *, struct rpc_message *,
+ const struct nfs_rpc_ops *,
+ struct rpc_task_setup *, int);
+};
+
+struct nfs_pgio_mirror {
+ struct list_head pg_list;
+ unsigned long pg_bytes_written;
+ size_t pg_count;
+ size_t pg_bsize;
+ unsigned int pg_base;
+ unsigned char pg_recoalesce : 1;
+};
+
+struct nfs_pageio_descriptor {
+ unsigned char pg_moreio : 1;
+ struct inode *pg_inode;
+ const struct nfs_pageio_ops *pg_ops;
+ const struct nfs_rw_ops *pg_rw_ops;
+ int pg_ioflags;
+ int pg_error;
+ const struct rpc_call_ops *pg_rpc_callops;
+ const struct nfs_pgio_completion_ops *pg_completion_ops;
+ struct pnfs_layout_segment *pg_lseg;
+ struct nfs_direct_req *pg_dreq;
+ void *pg_layout_private;
+ unsigned int pg_bsize; /* default bsize for mirrors */
+
+ u32 pg_mirror_count;
+ struct nfs_pgio_mirror *pg_mirrors;
+ struct nfs_pgio_mirror pg_mirrors_static[1];
+ struct nfs_pgio_mirror *pg_mirrors_dynamic;
+ u32 pg_mirror_idx; /* current mirror */
+};
+
+/* arbitrarily selected limit to number of mirrors */
+#define NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX 16
+
+#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
+
+extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx,
+ struct page *page,
+ struct nfs_page *last,
+ unsigned int offset,
+ unsigned int count);
+extern void nfs_release_request(struct nfs_page *);
+
+
+extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
+ struct inode *inode,
+ const struct nfs_pageio_ops *pg_ops,
+ const struct nfs_pgio_completion_ops *compl_ops,
+ const struct nfs_rw_ops *rw_ops,
+ size_t bsize,
+ int how);
+extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
+ struct nfs_page *);
+extern int nfs_pageio_resend(struct nfs_pageio_descriptor *,
+ struct nfs_pgio_header *);
+extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc);
+extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t);
+extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
+ struct nfs_page *prev,
+ struct nfs_page *req);
+extern int nfs_wait_on_request(struct nfs_page *);
+extern void nfs_unlock_request(struct nfs_page *req);
+extern void nfs_unlock_and_release_request(struct nfs_page *);
+extern int nfs_page_group_lock(struct nfs_page *, bool);
+extern void nfs_page_group_lock_wait(struct nfs_page *);
+extern void nfs_page_group_unlock(struct nfs_page *);
+extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
+
+/*
+ * Lock the page of an asynchronous request
+ */
+static inline int
+nfs_lock_request(struct nfs_page *req)
+{
+ return !test_and_set_bit(PG_BUSY, &req->wb_flags);
+}
+
+/**
+ * nfs_list_add_request - Insert a request into a list
+ * @req: request
+ * @head: head of list into which to insert the request.
+ */
+static inline void
+nfs_list_add_request(struct nfs_page *req, struct list_head *head)
+{
+ list_add_tail(&req->wb_list, head);
+}
+
+
+/**
+ * nfs_list_remove_request - Remove a request from its wb_list
+ * @req: request
+ */
+static inline void
+nfs_list_remove_request(struct nfs_page *req)
+{
+ if (list_empty(&req->wb_list))
+ return;
+ list_del_init(&req->wb_list);
+}
+
+static inline struct nfs_page *
+nfs_list_entry(struct list_head *head)
+{
+ return list_entry(head, struct nfs_page, wb_list);
+}
+
+static inline
+loff_t req_offset(struct nfs_page *req)
+{
+ return (((loff_t)req->wb_index) << PAGE_CACHE_SHIFT) + req->wb_offset;
+}
+
+#endif /* _LINUX_NFS_PAGE_H */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
new file mode 100644
index 000000000..e9e9a8dcf
--- /dev/null
+++ b/include/linux/nfs_xdr.h
@@ -0,0 +1,1541 @@
+#ifndef _LINUX_NFS_XDR_H
+#define _LINUX_NFS_XDR_H
+
+#include <linux/nfsacl.h>
+#include <linux/sunrpc/gss_api.h>
+
+/*
+ * To change the maximum rsize and wsize supported by the NFS client, adjust
+ * NFS_MAX_FILE_IO_SIZE. 64KB is a typical maximum, but some servers can
+ * support a megabyte or more. The default is left at 4096 bytes, which is
+ * reasonable for NFS over UDP.
+ */
+#define NFS_MAX_FILE_IO_SIZE (1048576U)
+#define NFS_DEF_FILE_IO_SIZE (4096U)
+#define NFS_MIN_FILE_IO_SIZE (1024U)
+
+struct nfs4_string {
+ unsigned int len;
+ char *data;
+};
+
+struct nfs_fsid {
+ uint64_t major;
+ uint64_t minor;
+};
+
+/*
+ * Helper for checking equality between 2 fsids.
+ */
+static inline int nfs_fsid_equal(const struct nfs_fsid *a, const struct nfs_fsid *b)
+{
+ return a->major == b->major && a->minor == b->minor;
+}
+
+struct nfs4_threshold {
+ __u32 bm;
+ __u32 l_type;
+ __u64 rd_sz;
+ __u64 wr_sz;
+ __u64 rd_io_sz;
+ __u64 wr_io_sz;
+};
+
+struct nfs_fattr {
+ unsigned int valid; /* which fields are valid */
+ umode_t mode;
+ __u32 nlink;
+ kuid_t uid;
+ kgid_t gid;
+ dev_t rdev;
+ __u64 size;
+ union {
+ struct {
+ __u32 blocksize;
+ __u32 blocks;
+ } nfs2;
+ struct {
+ __u64 used;
+ } nfs3;
+ } du;
+ struct nfs_fsid fsid;
+ __u64 fileid;
+ __u64 mounted_on_fileid;
+ struct timespec atime;
+ struct timespec mtime;
+ struct timespec ctime;
+ __u64 change_attr; /* NFSv4 change attribute */
+ __u64 pre_change_attr;/* pre-op NFSv4 change attribute */
+ __u64 pre_size; /* pre_op_attr.size */
+ struct timespec pre_mtime; /* pre_op_attr.mtime */
+ struct timespec pre_ctime; /* pre_op_attr.ctime */
+ unsigned long time_start;
+ unsigned long gencount;
+ struct nfs4_string *owner_name;
+ struct nfs4_string *group_name;
+ struct nfs4_threshold *mdsthreshold; /* pNFS threshold hints */
+};
+
+#define NFS_ATTR_FATTR_TYPE (1U << 0)
+#define NFS_ATTR_FATTR_MODE (1U << 1)
+#define NFS_ATTR_FATTR_NLINK (1U << 2)
+#define NFS_ATTR_FATTR_OWNER (1U << 3)
+#define NFS_ATTR_FATTR_GROUP (1U << 4)
+#define NFS_ATTR_FATTR_RDEV (1U << 5)
+#define NFS_ATTR_FATTR_SIZE (1U << 6)
+#define NFS_ATTR_FATTR_PRESIZE (1U << 7)
+#define NFS_ATTR_FATTR_BLOCKS_USED (1U << 8)
+#define NFS_ATTR_FATTR_SPACE_USED (1U << 9)
+#define NFS_ATTR_FATTR_FSID (1U << 10)
+#define NFS_ATTR_FATTR_FILEID (1U << 11)
+#define NFS_ATTR_FATTR_ATIME (1U << 12)
+#define NFS_ATTR_FATTR_MTIME (1U << 13)
+#define NFS_ATTR_FATTR_CTIME (1U << 14)
+#define NFS_ATTR_FATTR_PREMTIME (1U << 15)
+#define NFS_ATTR_FATTR_PRECTIME (1U << 16)
+#define NFS_ATTR_FATTR_CHANGE (1U << 17)
+#define NFS_ATTR_FATTR_PRECHANGE (1U << 18)
+#define NFS_ATTR_FATTR_V4_LOCATIONS (1U << 19)
+#define NFS_ATTR_FATTR_V4_REFERRAL (1U << 20)
+#define NFS_ATTR_FATTR_MOUNTPOINT (1U << 21)
+#define NFS_ATTR_FATTR_MOUNTED_ON_FILEID (1U << 22)
+#define NFS_ATTR_FATTR_OWNER_NAME (1U << 23)
+#define NFS_ATTR_FATTR_GROUP_NAME (1U << 24)
+#define NFS_ATTR_FATTR_V4_SECURITY_LABEL (1U << 25)
+
+#define NFS_ATTR_FATTR (NFS_ATTR_FATTR_TYPE \
+ | NFS_ATTR_FATTR_MODE \
+ | NFS_ATTR_FATTR_NLINK \
+ | NFS_ATTR_FATTR_OWNER \
+ | NFS_ATTR_FATTR_GROUP \
+ | NFS_ATTR_FATTR_RDEV \
+ | NFS_ATTR_FATTR_SIZE \
+ | NFS_ATTR_FATTR_FSID \
+ | NFS_ATTR_FATTR_FILEID \
+ | NFS_ATTR_FATTR_ATIME \
+ | NFS_ATTR_FATTR_MTIME \
+ | NFS_ATTR_FATTR_CTIME \
+ | NFS_ATTR_FATTR_CHANGE)
+#define NFS_ATTR_FATTR_V2 (NFS_ATTR_FATTR \
+ | NFS_ATTR_FATTR_BLOCKS_USED)
+#define NFS_ATTR_FATTR_V3 (NFS_ATTR_FATTR \
+ | NFS_ATTR_FATTR_SPACE_USED)
+#define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \
+ | NFS_ATTR_FATTR_SPACE_USED \
+ | NFS_ATTR_FATTR_V4_SECURITY_LABEL)
+
+/*
+ * Info on the file system
+ */
+struct nfs_fsinfo {
+ struct nfs_fattr *fattr; /* Post-op attributes */
+ __u32 rtmax; /* max. read transfer size */
+ __u32 rtpref; /* pref. read transfer size */
+ __u32 rtmult; /* reads should be multiple of this */
+ __u32 wtmax; /* max. write transfer size */
+ __u32 wtpref; /* pref. write transfer size */
+ __u32 wtmult; /* writes should be multiple of this */
+ __u32 dtpref; /* pref. readdir transfer size */
+ __u64 maxfilesize;
+ struct timespec time_delta; /* server time granularity */
+ __u32 lease_time; /* in seconds */
+ __u32 layouttype; /* supported pnfs layout driver */
+ __u32 blksize; /* preferred pnfs io block size */
+};
+
+struct nfs_fsstat {
+ struct nfs_fattr *fattr; /* Post-op attributes */
+ __u64 tbytes; /* total size in bytes */
+ __u64 fbytes; /* # of free bytes */
+ __u64 abytes; /* # of bytes available to user */
+ __u64 tfiles; /* # of files */
+ __u64 ffiles; /* # of free files */
+ __u64 afiles; /* # of files available to user */
+};
+
+struct nfs2_fsstat {
+ __u32 tsize; /* Server transfer size */
+ __u32 bsize; /* Filesystem block size */
+ __u32 blocks; /* No. of "bsize" blocks on filesystem */
+ __u32 bfree; /* No. of free "bsize" blocks */
+ __u32 bavail; /* No. of available "bsize" blocks */
+};
+
+struct nfs_pathconf {
+ struct nfs_fattr *fattr; /* Post-op attributes */
+ __u32 max_link; /* max # of hard links */
+ __u32 max_namelen; /* max name length */
+};
+
+struct nfs4_change_info {
+ u32 atomic;
+ u64 before;
+ u64 after;
+};
+
+struct nfs_seqid;
+
+/* nfs41 sessions channel attributes */
+struct nfs4_channel_attrs {
+ u32 max_rqst_sz;
+ u32 max_resp_sz;
+ u32 max_resp_sz_cached;
+ u32 max_ops;
+ u32 max_reqs;
+};
+
+struct nfs4_slot;
+struct nfs4_sequence_args {
+ struct nfs4_slot *sa_slot;
+ u8 sa_cache_this : 1,
+ sa_privileged : 1;
+};
+
+struct nfs4_sequence_res {
+ struct nfs4_slot *sr_slot; /* slot used to send request */
+ unsigned long sr_timestamp;
+ int sr_status; /* sequence operation status */
+ u32 sr_status_flags;
+ u32 sr_highest_slotid;
+ u32 sr_target_highest_slotid;
+};
+
+struct nfs4_get_lease_time_args {
+ struct nfs4_sequence_args la_seq_args;
+};
+
+struct nfs4_get_lease_time_res {
+ struct nfs4_sequence_res lr_seq_res;
+ struct nfs_fsinfo *lr_fsinfo;
+};
+
+#define PNFS_LAYOUT_MAXSIZE 4096
+
+struct nfs4_layoutdriver_data {
+ struct page **pages;
+ __u32 pglen;
+ __u32 len;
+};
+
+struct pnfs_layout_range {
+ u32 iomode;
+ u64 offset;
+ u64 length;
+};
+
+struct nfs4_layoutget_args {
+ struct nfs4_sequence_args seq_args;
+ __u32 type;
+ struct pnfs_layout_range range;
+ __u64 minlength;
+ __u32 maxcount;
+ struct inode *inode;
+ struct nfs_open_context *ctx;
+ nfs4_stateid stateid;
+ unsigned long timestamp;
+ struct nfs4_layoutdriver_data layout;
+};
+
+struct nfs4_layoutget_res {
+ struct nfs4_sequence_res seq_res;
+ __u32 return_on_close;
+ struct pnfs_layout_range range;
+ __u32 type;
+ nfs4_stateid stateid;
+ struct nfs4_layoutdriver_data *layoutp;
+};
+
+struct nfs4_layoutget {
+ struct nfs4_layoutget_args args;
+ struct nfs4_layoutget_res res;
+ struct rpc_cred *cred;
+ gfp_t gfp_flags;
+};
+
+struct nfs4_getdeviceinfo_args {
+ struct nfs4_sequence_args seq_args;
+ struct pnfs_device *pdev;
+ __u32 notify_types;
+};
+
+struct nfs4_getdeviceinfo_res {
+ struct nfs4_sequence_res seq_res;
+ struct pnfs_device *pdev;
+ __u32 notification;
+};
+
+struct nfs4_layoutcommit_args {
+ struct nfs4_sequence_args seq_args;
+ nfs4_stateid stateid;
+ __u64 lastbytewritten;
+ struct inode *inode;
+ const u32 *bitmask;
+ size_t layoutupdate_len;
+ struct page *layoutupdate_page;
+ struct page **layoutupdate_pages;
+};
+
+struct nfs4_layoutcommit_res {
+ struct nfs4_sequence_res seq_res;
+ struct nfs_fattr *fattr;
+ const struct nfs_server *server;
+ int status;
+};
+
+struct nfs4_layoutcommit_data {
+ struct rpc_task task;
+ struct nfs_fattr fattr;
+ struct list_head lseg_list;
+ struct rpc_cred *cred;
+ struct inode *inode;
+ struct nfs4_layoutcommit_args args;
+ struct nfs4_layoutcommit_res res;
+};
+
+struct nfs4_layoutreturn_args {
+ struct nfs4_sequence_args seq_args;
+ struct pnfs_layout_hdr *layout;
+ struct inode *inode;
+ struct pnfs_layout_range range;
+ nfs4_stateid stateid;
+ __u32 layout_type;
+};
+
+struct nfs4_layoutreturn_res {
+ struct nfs4_sequence_res seq_res;
+ u32 lrs_present;
+ nfs4_stateid stateid;
+};
+
+struct nfs4_layoutreturn {
+ struct nfs4_layoutreturn_args args;
+ struct nfs4_layoutreturn_res res;
+ struct rpc_cred *cred;
+ struct nfs_client *clp;
+ struct inode *inode;
+ int rpc_status;
+};
+
+struct stateowner_id {
+ __u64 create_time;
+ __u32 uniquifier;
+};
+
+/*
+ * Arguments to the open call.
+ */
+struct nfs_openargs {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh * fh;
+ struct nfs_seqid * seqid;
+ int open_flags;
+ fmode_t fmode;
+ u32 share_access;
+ u32 access;
+ __u64 clientid;
+ struct stateowner_id id;
+ union {
+ struct {
+ struct iattr * attrs; /* UNCHECKED, GUARDED */
+ nfs4_verifier verifier; /* EXCLUSIVE */
+ };
+ nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */
+ fmode_t delegation_type; /* CLAIM_PREVIOUS */
+ } u;
+ const struct qstr * name;
+ const struct nfs_server *server; /* Needed for ID mapping */
+ const u32 * bitmask;
+ const u32 * open_bitmap;
+ __u32 claim;
+ enum createmode4 createmode;
+ const struct nfs4_label *label;
+};
+
+struct nfs_openres {
+ struct nfs4_sequence_res seq_res;
+ nfs4_stateid stateid;
+ struct nfs_fh fh;
+ struct nfs4_change_info cinfo;
+ __u32 rflags;
+ struct nfs_fattr * f_attr;
+ struct nfs4_label *f_label;
+ struct nfs_seqid * seqid;
+ const struct nfs_server *server;
+ fmode_t delegation_type;
+ nfs4_stateid delegation;
+ __u32 do_recall;
+ __u64 maxsize;
+ __u32 attrset[NFS4_BITMAP_SIZE];
+ struct nfs4_string *owner;
+ struct nfs4_string *group_owner;
+ __u32 access_request;
+ __u32 access_supported;
+ __u32 access_result;
+};
+
+/*
+ * Arguments to the open_confirm call.
+ */
+struct nfs_open_confirmargs {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh * fh;
+ nfs4_stateid * stateid;
+ struct nfs_seqid * seqid;
+};
+
+struct nfs_open_confirmres {
+ struct nfs4_sequence_res seq_res;
+ nfs4_stateid stateid;
+ struct nfs_seqid * seqid;
+};
+
+/*
+ * Arguments to the close call.
+ */
+struct nfs_closeargs {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh * fh;
+ nfs4_stateid stateid;
+ struct nfs_seqid * seqid;
+ fmode_t fmode;
+ u32 share_access;
+ const u32 * bitmask;
+};
+
+struct nfs_closeres {
+ struct nfs4_sequence_res seq_res;
+ nfs4_stateid stateid;
+ struct nfs_fattr * fattr;
+ struct nfs_seqid * seqid;
+ const struct nfs_server *server;
+};
+/*
+ * * Arguments to the lock,lockt, and locku call.
+ * */
+struct nfs_lowner {
+ __u64 clientid;
+ __u64 id;
+ dev_t s_dev;
+};
+
+struct nfs_lock_args {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh * fh;
+ struct file_lock * fl;
+ struct nfs_seqid * lock_seqid;
+ nfs4_stateid lock_stateid;
+ struct nfs_seqid * open_seqid;
+ nfs4_stateid open_stateid;
+ struct nfs_lowner lock_owner;
+ unsigned char block : 1;
+ unsigned char reclaim : 1;
+ unsigned char new_lock : 1;
+ unsigned char new_lock_owner : 1;
+};
+
+struct nfs_lock_res {
+ struct nfs4_sequence_res seq_res;
+ nfs4_stateid stateid;
+ struct nfs_seqid * lock_seqid;
+ struct nfs_seqid * open_seqid;
+};
+
+struct nfs_locku_args {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh * fh;
+ struct file_lock * fl;
+ struct nfs_seqid * seqid;
+ nfs4_stateid stateid;
+};
+
+struct nfs_locku_res {
+ struct nfs4_sequence_res seq_res;
+ nfs4_stateid stateid;
+ struct nfs_seqid * seqid;
+};
+
+struct nfs_lockt_args {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh * fh;
+ struct file_lock * fl;
+ struct nfs_lowner lock_owner;
+};
+
+struct nfs_lockt_res {
+ struct nfs4_sequence_res seq_res;
+ struct file_lock * denied; /* LOCK, LOCKT failed */
+};
+
+struct nfs_release_lockowner_args {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_lowner lock_owner;
+};
+
+struct nfs_release_lockowner_res {
+ struct nfs4_sequence_res seq_res;
+};
+
+struct nfs4_delegreturnargs {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh *fhandle;
+ const nfs4_stateid *stateid;
+ const u32 * bitmask;
+};
+
+struct nfs4_delegreturnres {
+ struct nfs4_sequence_res seq_res;
+ struct nfs_fattr * fattr;
+ const struct nfs_server *server;
+};
+
+/*
+ * Arguments to the write call.
+ */
+struct nfs_write_verifier {
+ char data[8];
+};
+
+struct nfs_writeverf {
+ struct nfs_write_verifier verifier;
+ enum nfs3_stable_how committed;
+};
+
+/*
+ * Arguments shared by the read and write call.
+ */
+struct nfs_pgio_args {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh * fh;
+ struct nfs_open_context *context;
+ struct nfs_lock_context *lock_context;
+ nfs4_stateid stateid;
+ __u64 offset;
+ __u32 count;
+ unsigned int pgbase;
+ struct page ** pages;
+ const u32 * bitmask; /* used by write */
+ enum nfs3_stable_how stable; /* used by write */
+};
+
+struct nfs_pgio_res {
+ struct nfs4_sequence_res seq_res;
+ struct nfs_fattr * fattr;
+ __u32 count;
+ __u32 op_status;
+ int eof; /* used by read */
+ struct nfs_writeverf * verf; /* used by write */
+ const struct nfs_server *server; /* used by write */
+
+};
+
+/*
+ * Arguments to the commit call.
+ */
+struct nfs_commitargs {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *fh;
+ __u64 offset;
+ __u32 count;
+ const u32 *bitmask;
+};
+
+struct nfs_commitres {
+ struct nfs4_sequence_res seq_res;
+ __u32 op_status;
+ struct nfs_fattr *fattr;
+ struct nfs_writeverf *verf;
+ const struct nfs_server *server;
+};
+
+/*
+ * Common arguments to the unlink call
+ */
+struct nfs_removeargs {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh *fh;
+ struct qstr name;
+};
+
+struct nfs_removeres {
+ struct nfs4_sequence_res seq_res;
+ const struct nfs_server *server;
+ struct nfs_fattr *dir_attr;
+ struct nfs4_change_info cinfo;
+};
+
+/*
+ * Common arguments to the rename call
+ */
+struct nfs_renameargs {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh *old_dir;
+ const struct nfs_fh *new_dir;
+ const struct qstr *old_name;
+ const struct qstr *new_name;
+};
+
+struct nfs_renameres {
+ struct nfs4_sequence_res seq_res;
+ const struct nfs_server *server;
+ struct nfs4_change_info old_cinfo;
+ struct nfs_fattr *old_fattr;
+ struct nfs4_change_info new_cinfo;
+ struct nfs_fattr *new_fattr;
+};
+
+/* parsed sec= options */
+#define NFS_AUTH_INFO_MAX_FLAVORS 12 /* see fs/nfs/super.c */
+struct nfs_auth_info {
+ unsigned int flavor_len;
+ rpc_authflavor_t flavors[NFS_AUTH_INFO_MAX_FLAVORS];
+};
+
+/*
+ * Argument struct for decode_entry function
+ */
+struct nfs_entry {
+ __u64 ino;
+ __u64 cookie,
+ prev_cookie;
+ const char * name;
+ unsigned int len;
+ int eof;
+ struct nfs_fh * fh;
+ struct nfs_fattr * fattr;
+ struct nfs4_label *label;
+ unsigned char d_type;
+ struct nfs_server * server;
+};
+
+/*
+ * The following types are for NFSv2 only.
+ */
+struct nfs_sattrargs {
+ struct nfs_fh * fh;
+ struct iattr * sattr;
+};
+
+struct nfs_diropargs {
+ struct nfs_fh * fh;
+ const char * name;
+ unsigned int len;
+};
+
+struct nfs_createargs {
+ struct nfs_fh * fh;
+ const char * name;
+ unsigned int len;
+ struct iattr * sattr;
+};
+
+struct nfs_setattrargs {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh * fh;
+ nfs4_stateid stateid;
+ struct iattr * iap;
+ const struct nfs_server * server; /* Needed for name mapping */
+ const u32 * bitmask;
+ const struct nfs4_label *label;
+};
+
+struct nfs_setaclargs {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh * fh;
+ size_t acl_len;
+ unsigned int acl_pgbase;
+ struct page ** acl_pages;
+};
+
+struct nfs_setaclres {
+ struct nfs4_sequence_res seq_res;
+};
+
+struct nfs_getaclargs {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh * fh;
+ size_t acl_len;
+ unsigned int acl_pgbase;
+ struct page ** acl_pages;
+};
+
+/* getxattr ACL interface flags */
+#define NFS4_ACL_TRUNC 0x0001 /* ACL was truncated */
+struct nfs_getaclres {
+ struct nfs4_sequence_res seq_res;
+ size_t acl_len;
+ size_t acl_data_offset;
+ int acl_flags;
+ struct page * acl_scratch;
+};
+
+struct nfs_setattrres {
+ struct nfs4_sequence_res seq_res;
+ struct nfs_fattr * fattr;
+ struct nfs4_label *label;
+ const struct nfs_server * server;
+};
+
+struct nfs_linkargs {
+ struct nfs_fh * fromfh;
+ struct nfs_fh * tofh;
+ const char * toname;
+ unsigned int tolen;
+};
+
+struct nfs_symlinkargs {
+ struct nfs_fh * fromfh;
+ const char * fromname;
+ unsigned int fromlen;
+ struct page ** pages;
+ unsigned int pathlen;
+ struct iattr * sattr;
+};
+
+struct nfs_readdirargs {
+ struct nfs_fh * fh;
+ __u32 cookie;
+ unsigned int count;
+ struct page ** pages;
+};
+
+struct nfs3_getaclargs {
+ struct nfs_fh * fh;
+ int mask;
+ struct page ** pages;
+};
+
+struct nfs3_setaclargs {
+ struct inode * inode;
+ int mask;
+ struct posix_acl * acl_access;
+ struct posix_acl * acl_default;
+ size_t len;
+ unsigned int npages;
+ struct page ** pages;
+};
+
+struct nfs_diropok {
+ struct nfs_fh * fh;
+ struct nfs_fattr * fattr;
+};
+
+struct nfs_readlinkargs {
+ struct nfs_fh * fh;
+ unsigned int pgbase;
+ unsigned int pglen;
+ struct page ** pages;
+};
+
+struct nfs3_sattrargs {
+ struct nfs_fh * fh;
+ struct iattr * sattr;
+ unsigned int guard;
+ struct timespec guardtime;
+};
+
+struct nfs3_diropargs {
+ struct nfs_fh * fh;
+ const char * name;
+ unsigned int len;
+};
+
+struct nfs3_accessargs {
+ struct nfs_fh * fh;
+ __u32 access;
+};
+
+struct nfs3_createargs {
+ struct nfs_fh * fh;
+ const char * name;
+ unsigned int len;
+ struct iattr * sattr;
+ enum nfs3_createmode createmode;
+ __be32 verifier[2];
+};
+
+struct nfs3_mkdirargs {
+ struct nfs_fh * fh;
+ const char * name;
+ unsigned int len;
+ struct iattr * sattr;
+};
+
+struct nfs3_symlinkargs {
+ struct nfs_fh * fromfh;
+ const char * fromname;
+ unsigned int fromlen;
+ struct page ** pages;
+ unsigned int pathlen;
+ struct iattr * sattr;
+};
+
+struct nfs3_mknodargs {
+ struct nfs_fh * fh;
+ const char * name;
+ unsigned int len;
+ enum nfs3_ftype type;
+ struct iattr * sattr;
+ dev_t rdev;
+};
+
+struct nfs3_linkargs {
+ struct nfs_fh * fromfh;
+ struct nfs_fh * tofh;
+ const char * toname;
+ unsigned int tolen;
+};
+
+struct nfs3_readdirargs {
+ struct nfs_fh * fh;
+ __u64 cookie;
+ __be32 verf[2];
+ int plus;
+ unsigned int count;
+ struct page ** pages;
+};
+
+struct nfs3_diropres {
+ struct nfs_fattr * dir_attr;
+ struct nfs_fh * fh;
+ struct nfs_fattr * fattr;
+};
+
+struct nfs3_accessres {
+ struct nfs_fattr * fattr;
+ __u32 access;
+};
+
+struct nfs3_readlinkargs {
+ struct nfs_fh * fh;
+ unsigned int pgbase;
+ unsigned int pglen;
+ struct page ** pages;
+};
+
+struct nfs3_linkres {
+ struct nfs_fattr * dir_attr;
+ struct nfs_fattr * fattr;
+};
+
+struct nfs3_readdirres {
+ struct nfs_fattr * dir_attr;
+ __be32 * verf;
+ int plus;
+};
+
+struct nfs3_getaclres {
+ struct nfs_fattr * fattr;
+ int mask;
+ unsigned int acl_access_count;
+ unsigned int acl_default_count;
+ struct posix_acl * acl_access;
+ struct posix_acl * acl_default;
+};
+
+#if IS_ENABLED(CONFIG_NFS_V4)
+
+typedef u64 clientid4;
+
+struct nfs4_accessargs {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh * fh;
+ const u32 * bitmask;
+ u32 access;
+};
+
+struct nfs4_accessres {
+ struct nfs4_sequence_res seq_res;
+ const struct nfs_server * server;
+ struct nfs_fattr * fattr;
+ u32 supported;
+ u32 access;
+};
+
+struct nfs4_create_arg {
+ struct nfs4_sequence_args seq_args;
+ u32 ftype;
+ union {
+ struct {
+ struct page ** pages;
+ unsigned int len;
+ } symlink; /* NF4LNK */
+ struct {
+ u32 specdata1;
+ u32 specdata2;
+ } device; /* NF4BLK, NF4CHR */
+ } u;
+ const struct qstr * name;
+ const struct nfs_server * server;
+ const struct iattr * attrs;
+ const struct nfs_fh * dir_fh;
+ const u32 * bitmask;
+ const struct nfs4_label *label;
+};
+
+struct nfs4_create_res {
+ struct nfs4_sequence_res seq_res;
+ const struct nfs_server * server;
+ struct nfs_fh * fh;
+ struct nfs_fattr * fattr;
+ struct nfs4_label *label;
+ struct nfs4_change_info dir_cinfo;
+};
+
+struct nfs4_fsinfo_arg {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh * fh;
+ const u32 * bitmask;
+};
+
+struct nfs4_fsinfo_res {
+ struct nfs4_sequence_res seq_res;
+ struct nfs_fsinfo *fsinfo;
+};
+
+struct nfs4_getattr_arg {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh * fh;
+ const u32 * bitmask;
+};
+
+struct nfs4_getattr_res {
+ struct nfs4_sequence_res seq_res;
+ const struct nfs_server * server;
+ struct nfs_fattr * fattr;
+ struct nfs4_label *label;
+};
+
+struct nfs4_link_arg {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh * fh;
+ const struct nfs_fh * dir_fh;
+ const struct qstr * name;
+ const u32 * bitmask;
+};
+
+struct nfs4_link_res {
+ struct nfs4_sequence_res seq_res;
+ const struct nfs_server * server;
+ struct nfs_fattr * fattr;
+ struct nfs4_label *label;
+ struct nfs4_change_info cinfo;
+ struct nfs_fattr * dir_attr;
+};
+
+
+struct nfs4_lookup_arg {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh * dir_fh;
+ const struct qstr * name;
+ const u32 * bitmask;
+};
+
+struct nfs4_lookup_res {
+ struct nfs4_sequence_res seq_res;
+ const struct nfs_server * server;
+ struct nfs_fattr * fattr;
+ struct nfs_fh * fh;
+ struct nfs4_label *label;
+};
+
+struct nfs4_lookup_root_arg {
+ struct nfs4_sequence_args seq_args;
+ const u32 * bitmask;
+};
+
+struct nfs4_pathconf_arg {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh * fh;
+ const u32 * bitmask;
+};
+
+struct nfs4_pathconf_res {
+ struct nfs4_sequence_res seq_res;
+ struct nfs_pathconf *pathconf;
+};
+
+struct nfs4_readdir_arg {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh * fh;
+ u64 cookie;
+ nfs4_verifier verifier;
+ u32 count;
+ struct page ** pages; /* zero-copy data */
+ unsigned int pgbase; /* zero-copy data */
+ const u32 * bitmask;
+ int plus;
+};
+
+struct nfs4_readdir_res {
+ struct nfs4_sequence_res seq_res;
+ nfs4_verifier verifier;
+ unsigned int pgbase;
+};
+
+struct nfs4_readlink {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh * fh;
+ unsigned int pgbase;
+ unsigned int pglen; /* zero-copy data */
+ struct page ** pages; /* zero-copy data */
+};
+
+struct nfs4_readlink_res {
+ struct nfs4_sequence_res seq_res;
+};
+
+#define NFS4_SETCLIENTID_NAMELEN (127)
+struct nfs4_setclientid {
+ const nfs4_verifier * sc_verifier;
+ unsigned int sc_name_len;
+ char sc_name[NFS4_SETCLIENTID_NAMELEN + 1];
+ u32 sc_prog;
+ unsigned int sc_netid_len;
+ char sc_netid[RPCBIND_MAXNETIDLEN + 1];
+ unsigned int sc_uaddr_len;
+ char sc_uaddr[RPCBIND_MAXUADDRLEN + 1];
+ u32 sc_cb_ident;
+ struct rpc_cred *sc_cred;
+};
+
+struct nfs4_setclientid_res {
+ u64 clientid;
+ nfs4_verifier confirm;
+};
+
+struct nfs4_statfs_arg {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh * fh;
+ const u32 * bitmask;
+};
+
+struct nfs4_statfs_res {
+ struct nfs4_sequence_res seq_res;
+ struct nfs_fsstat *fsstat;
+};
+
+struct nfs4_server_caps_arg {
+ struct nfs4_sequence_args seq_args;
+ struct nfs_fh *fhandle;
+};
+
+struct nfs4_server_caps_res {
+ struct nfs4_sequence_res seq_res;
+ u32 attr_bitmask[3];
+ u32 acl_bitmask;
+ u32 has_links;
+ u32 has_symlinks;
+ u32 fh_expire_type;
+};
+
+#define NFS4_PATHNAME_MAXCOMPONENTS 512
+struct nfs4_pathname {
+ unsigned int ncomponents;
+ struct nfs4_string components[NFS4_PATHNAME_MAXCOMPONENTS];
+};
+
+#define NFS4_FS_LOCATION_MAXSERVERS 10
+struct nfs4_fs_location {
+ unsigned int nservers;
+ struct nfs4_string servers[NFS4_FS_LOCATION_MAXSERVERS];
+ struct nfs4_pathname rootpath;
+};
+
+#define NFS4_FS_LOCATIONS_MAXENTRIES 10
+struct nfs4_fs_locations {
+ struct nfs_fattr fattr;
+ const struct nfs_server *server;
+ struct nfs4_pathname fs_path;
+ int nlocations;
+ struct nfs4_fs_location locations[NFS4_FS_LOCATIONS_MAXENTRIES];
+};
+
+struct nfs4_fs_locations_arg {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh *dir_fh;
+ const struct nfs_fh *fh;
+ const struct qstr *name;
+ struct page *page;
+ const u32 *bitmask;
+ clientid4 clientid;
+ unsigned char migration:1, renew:1;
+};
+
+struct nfs4_fs_locations_res {
+ struct nfs4_sequence_res seq_res;
+ struct nfs4_fs_locations *fs_locations;
+ unsigned char migration:1, renew:1;
+};
+
+struct nfs4_secinfo4 {
+ u32 flavor;
+ struct rpcsec_gss_info flavor_info;
+};
+
+struct nfs4_secinfo_flavors {
+ unsigned int num_flavors;
+ struct nfs4_secinfo4 flavors[0];
+};
+
+struct nfs4_secinfo_arg {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh *dir_fh;
+ const struct qstr *name;
+};
+
+struct nfs4_secinfo_res {
+ struct nfs4_sequence_res seq_res;
+ struct nfs4_secinfo_flavors *flavors;
+};
+
+struct nfs4_fsid_present_arg {
+ struct nfs4_sequence_args seq_args;
+ const struct nfs_fh *fh;
+ clientid4 clientid;
+ unsigned char renew:1;
+};
+
+struct nfs4_fsid_present_res {
+ struct nfs4_sequence_res seq_res;
+ struct nfs_fh *fh;
+ unsigned char renew:1;
+};
+
+#endif /* CONFIG_NFS_V4 */
+
+struct nfstime4 {
+ u64 seconds;
+ u32 nseconds;
+};
+
+#ifdef CONFIG_NFS_V4_1
+
+struct pnfs_commit_bucket {
+ struct list_head written;
+ struct list_head committing;
+ struct pnfs_layout_segment *wlseg;
+ struct pnfs_layout_segment *clseg;
+ struct nfs_writeverf direct_verf;
+};
+
+struct pnfs_ds_commit_info {
+ int nwritten;
+ int ncommitting;
+ int nbuckets;
+ struct pnfs_commit_bucket *buckets;
+};
+
+#define NFS4_OP_MAP_NUM_LONGS \
+ DIV_ROUND_UP(LAST_NFS4_OP, 8 * sizeof(unsigned long))
+#define NFS4_OP_MAP_NUM_WORDS \
+ (NFS4_OP_MAP_NUM_LONGS * sizeof(unsigned long) / sizeof(u32))
+struct nfs4_op_map {
+ union {
+ unsigned long longs[NFS4_OP_MAP_NUM_LONGS];
+ u32 words[NFS4_OP_MAP_NUM_WORDS];
+ } u;
+};
+
+struct nfs41_state_protection {
+ u32 how;
+ struct nfs4_op_map enforce;
+ struct nfs4_op_map allow;
+};
+
+#define NFS4_EXCHANGE_ID_LEN (127)
+struct nfs41_exchange_id_args {
+ struct nfs_client *client;
+ nfs4_verifier *verifier;
+ unsigned int id_len;
+ char id[NFS4_EXCHANGE_ID_LEN];
+ u32 flags;
+ struct nfs41_state_protection state_protect;
+};
+
+struct nfs41_server_owner {
+ uint64_t minor_id;
+ uint32_t major_id_sz;
+ char major_id[NFS4_OPAQUE_LIMIT];
+};
+
+struct nfs41_server_scope {
+ uint32_t server_scope_sz;
+ char server_scope[NFS4_OPAQUE_LIMIT];
+};
+
+struct nfs41_impl_id {
+ char domain[NFS4_OPAQUE_LIMIT + 1];
+ char name[NFS4_OPAQUE_LIMIT + 1];
+ struct nfstime4 date;
+};
+
+struct nfs41_bind_conn_to_session_args {
+ struct nfs_client *client;
+ struct nfs4_sessionid sessionid;
+ u32 dir;
+ bool use_conn_in_rdma_mode;
+};
+
+struct nfs41_bind_conn_to_session_res {
+ struct nfs4_sessionid sessionid;
+ u32 dir;
+ bool use_conn_in_rdma_mode;
+};
+
+struct nfs41_exchange_id_res {
+ u64 clientid;
+ u32 seqid;
+ u32 flags;
+ struct nfs41_server_owner *server_owner;
+ struct nfs41_server_scope *server_scope;
+ struct nfs41_impl_id *impl_id;
+ struct nfs41_state_protection state_protect;
+};
+
+struct nfs41_create_session_args {
+ struct nfs_client *client;
+ u64 clientid;
+ uint32_t seqid;
+ uint32_t flags;
+ uint32_t cb_program;
+ struct nfs4_channel_attrs fc_attrs; /* Fore Channel */
+ struct nfs4_channel_attrs bc_attrs; /* Back Channel */
+};
+
+struct nfs41_create_session_res {
+ struct nfs4_sessionid sessionid;
+ uint32_t seqid;
+ uint32_t flags;
+ struct nfs4_channel_attrs fc_attrs; /* Fore Channel */
+ struct nfs4_channel_attrs bc_attrs; /* Back Channel */
+};
+
+struct nfs41_reclaim_complete_args {
+ struct nfs4_sequence_args seq_args;
+ /* In the future extend to include curr_fh for use with migration */
+ unsigned char one_fs:1;
+};
+
+struct nfs41_reclaim_complete_res {
+ struct nfs4_sequence_res seq_res;
+};
+
+#define SECINFO_STYLE_CURRENT_FH 0
+#define SECINFO_STYLE_PARENT 1
+struct nfs41_secinfo_no_name_args {
+ struct nfs4_sequence_args seq_args;
+ int style;
+};
+
+struct nfs41_test_stateid_args {
+ struct nfs4_sequence_args seq_args;
+ nfs4_stateid *stateid;
+};
+
+struct nfs41_test_stateid_res {
+ struct nfs4_sequence_res seq_res;
+ unsigned int status;
+};
+
+struct nfs41_free_stateid_args {
+ struct nfs4_sequence_args seq_args;
+ nfs4_stateid stateid;
+};
+
+struct nfs41_free_stateid_res {
+ struct nfs4_sequence_res seq_res;
+ unsigned int status;
+};
+
+static inline void
+nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
+{
+ kfree(cinfo->buckets);
+}
+
+#else
+
+struct pnfs_ds_commit_info {
+};
+
+static inline void
+nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
+{
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
+#ifdef CONFIG_NFS_V4_2
+struct nfs42_falloc_args {
+ struct nfs4_sequence_args seq_args;
+
+ struct nfs_fh *falloc_fh;
+ nfs4_stateid falloc_stateid;
+ u64 falloc_offset;
+ u64 falloc_length;
+ const u32 *falloc_bitmask;
+};
+
+struct nfs42_falloc_res {
+ struct nfs4_sequence_res seq_res;
+ unsigned int status;
+
+ struct nfs_fattr *falloc_fattr;
+ const struct nfs_server *falloc_server;
+};
+
+struct nfs42_seek_args {
+ struct nfs4_sequence_args seq_args;
+
+ struct nfs_fh *sa_fh;
+ nfs4_stateid sa_stateid;
+ u64 sa_offset;
+ u32 sa_what;
+};
+
+struct nfs42_seek_res {
+ struct nfs4_sequence_res seq_res;
+ unsigned int status;
+
+ u32 sr_eof;
+ u64 sr_offset;
+};
+#endif
+
+struct nfs_page;
+
+#define NFS_PAGEVEC_SIZE (8U)
+
+struct nfs_page_array {
+ struct page **pagevec;
+ unsigned int npages; /* Max length of pagevec */
+ struct page *page_array[NFS_PAGEVEC_SIZE];
+};
+
+/* used as flag bits in nfs_pgio_header */
+enum {
+ NFS_IOHDR_ERROR = 0,
+ NFS_IOHDR_EOF,
+ NFS_IOHDR_REDO,
+};
+
+struct nfs_pgio_header {
+ struct inode *inode;
+ struct rpc_cred *cred;
+ struct list_head pages;
+ struct nfs_page *req;
+ struct nfs_writeverf verf; /* Used for writes */
+ struct pnfs_layout_segment *lseg;
+ loff_t io_start;
+ const struct rpc_call_ops *mds_ops;
+ void (*release) (struct nfs_pgio_header *hdr);
+ const struct nfs_pgio_completion_ops *completion_ops;
+ const struct nfs_rw_ops *rw_ops;
+ struct nfs_direct_req *dreq;
+ void *layout_private;
+ spinlock_t lock;
+ /* fields protected by lock */
+ int pnfs_error;
+ int error; /* merge with pnfs_error */
+ unsigned long good_bytes; /* boundary of good data */
+ unsigned long flags;
+
+ /*
+ * rpc data
+ */
+ struct rpc_task task;
+ struct nfs_fattr fattr;
+ struct nfs_pgio_args args; /* argument struct */
+ struct nfs_pgio_res res; /* result struct */
+ unsigned long timestamp; /* For lease renewal */
+ int (*pgio_done_cb)(struct rpc_task *, struct nfs_pgio_header *);
+ __u64 mds_offset; /* Filelayout dense stripe */
+ struct nfs_page_array page_array;
+ struct nfs_client *ds_clp; /* pNFS data server */
+ int ds_commit_idx; /* ds index if ds_clp is set */
+ int pgio_mirror_idx;/* mirror index in pgio layer */
+};
+
+struct nfs_mds_commit_info {
+ atomic_t rpcs_out;
+ unsigned long ncommit;
+ struct list_head list;
+};
+
+struct nfs_commit_data;
+struct nfs_inode;
+struct nfs_commit_completion_ops {
+ void (*error_cleanup) (struct nfs_inode *nfsi);
+ void (*completion) (struct nfs_commit_data *data);
+};
+
+struct nfs_commit_info {
+ spinlock_t *lock; /* inode->i_lock */
+ struct nfs_mds_commit_info *mds;
+ struct pnfs_ds_commit_info *ds;
+ struct nfs_direct_req *dreq; /* O_DIRECT request */
+ const struct nfs_commit_completion_ops *completion_ops;
+};
+
+struct nfs_commit_data {
+ struct rpc_task task;
+ struct inode *inode;
+ struct rpc_cred *cred;
+ struct nfs_fattr fattr;
+ struct nfs_writeverf verf;
+ struct list_head pages; /* Coalesced requests we wish to flush */
+ struct list_head list; /* lists of struct nfs_write_data */
+ struct nfs_direct_req *dreq; /* O_DIRECT request */
+ struct nfs_commitargs args; /* argument struct */
+ struct nfs_commitres res; /* result struct */
+ struct nfs_open_context *context;
+ struct pnfs_layout_segment *lseg;
+ struct nfs_client *ds_clp; /* pNFS data server */
+ int ds_commit_index;
+ loff_t lwb;
+ const struct rpc_call_ops *mds_ops;
+ const struct nfs_commit_completion_ops *completion_ops;
+ int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data);
+};
+
+struct nfs_pgio_completion_ops {
+ void (*error_cleanup)(struct list_head *head);
+ void (*init_hdr)(struct nfs_pgio_header *hdr);
+ void (*completion)(struct nfs_pgio_header *hdr);
+};
+
+struct nfs_unlinkdata {
+ struct hlist_node list;
+ struct nfs_removeargs args;
+ struct nfs_removeres res;
+ struct inode *dir;
+ struct rpc_cred *cred;
+ struct nfs_fattr dir_attr;
+ long timeout;
+};
+
+struct nfs_renamedata {
+ struct nfs_renameargs args;
+ struct nfs_renameres res;
+ struct rpc_cred *cred;
+ struct inode *old_dir;
+ struct dentry *old_dentry;
+ struct nfs_fattr old_fattr;
+ struct inode *new_dir;
+ struct dentry *new_dentry;
+ struct nfs_fattr new_fattr;
+ void (*complete)(struct rpc_task *, struct nfs_renamedata *);
+ long timeout;
+};
+
+struct nfs_access_entry;
+struct nfs_client;
+struct rpc_timeout;
+struct nfs_subversion;
+struct nfs_mount_info;
+struct nfs_client_initdata;
+struct nfs_pageio_descriptor;
+
+/*
+ * RPC procedure vector for NFSv2/NFSv3 demuxing
+ */
+struct nfs_rpc_ops {
+ u32 version; /* Protocol version */
+ const struct dentry_operations *dentry_ops;
+ const struct inode_operations *dir_inode_ops;
+ const struct inode_operations *file_inode_ops;
+ const struct file_operations *file_ops;
+
+ int (*getroot) (struct nfs_server *, struct nfs_fh *,
+ struct nfs_fsinfo *);
+ struct vfsmount *(*submount) (struct nfs_server *, struct dentry *,
+ struct nfs_fh *, struct nfs_fattr *);
+ struct dentry *(*try_mount) (int, const char *, struct nfs_mount_info *,
+ struct nfs_subversion *);
+ int (*getattr) (struct nfs_server *, struct nfs_fh *,
+ struct nfs_fattr *, struct nfs4_label *);
+ int (*setattr) (struct dentry *, struct nfs_fattr *,
+ struct iattr *);
+ int (*lookup) (struct inode *, struct qstr *,
+ struct nfs_fh *, struct nfs_fattr *,
+ struct nfs4_label *);
+ int (*access) (struct inode *, struct nfs_access_entry *);
+ int (*readlink)(struct inode *, struct page *, unsigned int,
+ unsigned int);
+ int (*create) (struct inode *, struct dentry *,
+ struct iattr *, int);
+ int (*remove) (struct inode *, struct qstr *);
+ void (*unlink_setup) (struct rpc_message *, struct inode *dir);
+ void (*unlink_rpc_prepare) (struct rpc_task *, struct nfs_unlinkdata *);
+ int (*unlink_done) (struct rpc_task *, struct inode *);
+ void (*rename_setup) (struct rpc_message *msg, struct inode *dir);
+ void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *);
+ int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir);
+ int (*link) (struct inode *, struct inode *, struct qstr *);
+ int (*symlink) (struct inode *, struct dentry *, struct page *,
+ unsigned int, struct iattr *);
+ int (*mkdir) (struct inode *, struct dentry *, struct iattr *);
+ int (*rmdir) (struct inode *, struct qstr *);
+ int (*readdir) (struct dentry *, struct rpc_cred *,
+ u64, struct page **, unsigned int, int);
+ int (*mknod) (struct inode *, struct dentry *, struct iattr *,
+ dev_t);
+ int (*statfs) (struct nfs_server *, struct nfs_fh *,
+ struct nfs_fsstat *);
+ int (*fsinfo) (struct nfs_server *, struct nfs_fh *,
+ struct nfs_fsinfo *);
+ int (*pathconf) (struct nfs_server *, struct nfs_fh *,
+ struct nfs_pathconf *);
+ int (*set_capabilities)(struct nfs_server *, struct nfs_fh *);
+ int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, int);
+ int (*pgio_rpc_prepare)(struct rpc_task *,
+ struct nfs_pgio_header *);
+ void (*read_setup)(struct nfs_pgio_header *, struct rpc_message *);
+ int (*read_done)(struct rpc_task *, struct nfs_pgio_header *);
+ void (*write_setup)(struct nfs_pgio_header *, struct rpc_message *);
+ int (*write_done)(struct rpc_task *, struct nfs_pgio_header *);
+ void (*commit_setup) (struct nfs_commit_data *, struct rpc_message *);
+ void (*commit_rpc_prepare)(struct rpc_task *, struct nfs_commit_data *);
+ int (*commit_done) (struct rpc_task *, struct nfs_commit_data *);
+ int (*lock)(struct file *, int, struct file_lock *);
+ int (*lock_check_bounds)(const struct file_lock *);
+ void (*clear_acl_cache)(struct inode *);
+ void (*close_context)(struct nfs_open_context *ctx, int);
+ struct inode * (*open_context) (struct inode *dir,
+ struct nfs_open_context *ctx,
+ int open_flags,
+ struct iattr *iattr,
+ int *);
+ int (*have_delegation)(struct inode *, fmode_t);
+ int (*return_delegation)(struct inode *);
+ struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
+ struct nfs_client *
+ (*init_client) (struct nfs_client *, const struct rpc_timeout *,
+ const char *);
+ void (*free_client) (struct nfs_client *);
+ struct nfs_server *(*create_server)(struct nfs_mount_info *, struct nfs_subversion *);
+ struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *,
+ struct nfs_fattr *, rpc_authflavor_t);
+};
+
+/*
+ * NFS_CALL(getattr, inode, (fattr));
+ * into
+ * NFS_PROTO(inode)->getattr(fattr);
+ */
+#define NFS_CALL(op, inode, args) NFS_PROTO(inode)->op args
+
+/*
+ * Function vectors etc. for the NFS client
+ */
+extern const struct nfs_rpc_ops nfs_v2_clientops;
+extern const struct nfs_rpc_ops nfs_v3_clientops;
+extern const struct nfs_rpc_ops nfs_v4_clientops;
+extern const struct rpc_version nfs_version2;
+extern const struct rpc_version nfs_version3;
+extern const struct rpc_version nfs_version4;
+
+extern const struct rpc_version nfsacl_version3;
+extern const struct rpc_program nfsacl_program;
+
+#endif
diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h
new file mode 100644
index 000000000..5e69e67b3
--- /dev/null
+++ b/include/linux/nfsacl.h
@@ -0,0 +1,41 @@
+/*
+ * File: linux/nfsacl.h
+ *
+ * (C) 2003 Andreas Gruenbacher <agruen@suse.de>
+ */
+#ifndef __LINUX_NFSACL_H
+#define __LINUX_NFSACL_H
+
+
+#include <linux/posix_acl.h>
+#include <linux/sunrpc/xdr.h>
+#include <uapi/linux/nfsacl.h>
+
+/* Maximum number of ACL entries over NFS */
+#define NFS_ACL_MAX_ENTRIES 1024
+
+#define NFSACL_MAXWORDS (2*(2+3*NFS_ACL_MAX_ENTRIES))
+#define NFSACL_MAXPAGES ((2*(8+12*NFS_ACL_MAX_ENTRIES) + PAGE_SIZE-1) \
+ >> PAGE_SHIFT)
+
+#define NFS_ACL_MAX_ENTRIES_INLINE (5)
+#define NFS_ACL_INLINE_BUFSIZE ((2*(2+3*NFS_ACL_MAX_ENTRIES_INLINE)) << 2)
+
+static inline unsigned int
+nfsacl_size(struct posix_acl *acl_access, struct posix_acl *acl_default)
+{
+ unsigned int w = 16;
+ w += max(acl_access ? (int)acl_access->a_count : 3, 4) * 12;
+ if (acl_default)
+ w += max((int)acl_default->a_count, 4) * 12;
+ return w;
+}
+
+extern int
+nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
+ struct posix_acl *acl, int encode_entries, int typeflag);
+extern int
+nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt,
+ struct posix_acl **pacl);
+
+#endif /* __LINUX_NFSACL_H */
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
new file mode 100644
index 000000000..9abb763e4
--- /dev/null
+++ b/include/linux/nilfs2_fs.h
@@ -0,0 +1,919 @@
+/*
+ * nilfs2_fs.h - NILFS2 on-disk structures and common declarations.
+ *
+ * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Written by Koji Sato <koji@osrg.net>
+ * Ryusuke Konishi <ryusuke@osrg.net>
+ */
+/*
+ * linux/include/linux/ext2_fs.h
+ *
+ * Copyright (C) 1992, 1993, 1994, 1995
+ * Remy Card (card@masi.ibp.fr)
+ * Laboratoire MASI - Institut Blaise Pascal
+ * Universite Pierre et Marie Curie (Paris VI)
+ *
+ * from
+ *
+ * linux/include/linux/minix_fs.h
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#ifndef _LINUX_NILFS_FS_H
+#define _LINUX_NILFS_FS_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/magic.h>
+#include <linux/bug.h>
+
+
+#define NILFS_INODE_BMAP_SIZE 7
+/**
+ * struct nilfs_inode - structure of an inode on disk
+ * @i_blocks: blocks count
+ * @i_size: size in bytes
+ * @i_ctime: creation time (seconds)
+ * @i_mtime: modification time (seconds)
+ * @i_ctime_nsec: creation time (nano seconds)
+ * @i_mtime_nsec: modification time (nano seconds)
+ * @i_uid: user id
+ * @i_gid: group id
+ * @i_mode: file mode
+ * @i_links_count: links count
+ * @i_flags: file flags
+ * @i_bmap: block mapping
+ * @i_xattr: extended attributes
+ * @i_generation: file generation (for NFS)
+ * @i_pad: padding
+ */
+struct nilfs_inode {
+ __le64 i_blocks;
+ __le64 i_size;
+ __le64 i_ctime;
+ __le64 i_mtime;
+ __le32 i_ctime_nsec;
+ __le32 i_mtime_nsec;
+ __le32 i_uid;
+ __le32 i_gid;
+ __le16 i_mode;
+ __le16 i_links_count;
+ __le32 i_flags;
+ __le64 i_bmap[NILFS_INODE_BMAP_SIZE];
+#define i_device_code i_bmap[0]
+ __le64 i_xattr;
+ __le32 i_generation;
+ __le32 i_pad;
+};
+
+#define NILFS_MIN_INODE_SIZE 128
+
+/**
+ * struct nilfs_super_root - structure of super root
+ * @sr_sum: check sum
+ * @sr_bytes: byte count of the structure
+ * @sr_flags: flags (reserved)
+ * @sr_nongc_ctime: write time of the last segment not for cleaner operation
+ * @sr_dat: DAT file inode
+ * @sr_cpfile: checkpoint file inode
+ * @sr_sufile: segment usage file inode
+ */
+struct nilfs_super_root {
+ __le32 sr_sum;
+ __le16 sr_bytes;
+ __le16 sr_flags;
+ __le64 sr_nongc_ctime;
+ struct nilfs_inode sr_dat;
+ struct nilfs_inode sr_cpfile;
+ struct nilfs_inode sr_sufile;
+};
+
+#define NILFS_SR_MDT_OFFSET(inode_size, i) \
+ ((unsigned long)&((struct nilfs_super_root *)0)->sr_dat + \
+ (inode_size) * (i))
+#define NILFS_SR_DAT_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 0)
+#define NILFS_SR_CPFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 1)
+#define NILFS_SR_SUFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 2)
+#define NILFS_SR_BYTES(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 3)
+
+/*
+ * Maximal mount counts
+ */
+#define NILFS_DFL_MAX_MNT_COUNT 50 /* 50 mounts */
+
+/*
+ * File system states (sbp->s_state, nilfs->ns_mount_state)
+ */
+#define NILFS_VALID_FS 0x0001 /* Unmounted cleanly */
+#define NILFS_ERROR_FS 0x0002 /* Errors detected */
+#define NILFS_RESIZE_FS 0x0004 /* Resize required */
+
+/*
+ * Mount flags (sbi->s_mount_opt)
+ */
+#define NILFS_MOUNT_ERROR_MODE 0x0070 /* Error mode mask */
+#define NILFS_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */
+#define NILFS_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */
+#define NILFS_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */
+#define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */
+#define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order
+ semantics also for data */
+#define NILFS_MOUNT_NORECOVERY 0x4000 /* Disable write access during
+ mount-time recovery */
+#define NILFS_MOUNT_DISCARD 0x8000 /* Issue DISCARD requests */
+
+
+/**
+ * struct nilfs_super_block - structure of super block on disk
+ */
+struct nilfs_super_block {
+/*00*/ __le32 s_rev_level; /* Revision level */
+ __le16 s_minor_rev_level; /* minor revision level */
+ __le16 s_magic; /* Magic signature */
+
+ __le16 s_bytes; /* Bytes count of CRC calculation
+ for this structure. s_reserved
+ is excluded. */
+ __le16 s_flags; /* flags */
+ __le32 s_crc_seed; /* Seed value of CRC calculation */
+/*10*/ __le32 s_sum; /* Check sum of super block */
+
+ __le32 s_log_block_size; /* Block size represented as follows
+ blocksize =
+ 1 << (s_log_block_size + 10) */
+ __le64 s_nsegments; /* Number of segments in filesystem */
+/*20*/ __le64 s_dev_size; /* block device size in bytes */
+ __le64 s_first_data_block; /* 1st seg disk block number */
+/*30*/ __le32 s_blocks_per_segment; /* number of blocks per full segment */
+ __le32 s_r_segments_percentage; /* Reserved segments percentage */
+
+ __le64 s_last_cno; /* Last checkpoint number */
+/*40*/ __le64 s_last_pseg; /* disk block addr pseg written last */
+ __le64 s_last_seq; /* seq. number of seg written last */
+/*50*/ __le64 s_free_blocks_count; /* Free blocks count */
+
+ __le64 s_ctime; /* Creation time (execution time of
+ newfs) */
+/*60*/ __le64 s_mtime; /* Mount time */
+ __le64 s_wtime; /* Write time */
+/*70*/ __le16 s_mnt_count; /* Mount count */
+ __le16 s_max_mnt_count; /* Maximal mount count */
+ __le16 s_state; /* File system state */
+ __le16 s_errors; /* Behaviour when detecting errors */
+ __le64 s_lastcheck; /* time of last check */
+
+/*80*/ __le32 s_checkinterval; /* max. time between checks */
+ __le32 s_creator_os; /* OS */
+ __le16 s_def_resuid; /* Default uid for reserved blocks */
+ __le16 s_def_resgid; /* Default gid for reserved blocks */
+ __le32 s_first_ino; /* First non-reserved inode */
+
+/*90*/ __le16 s_inode_size; /* Size of an inode */
+ __le16 s_dat_entry_size; /* Size of a dat entry */
+ __le16 s_checkpoint_size; /* Size of a checkpoint */
+ __le16 s_segment_usage_size; /* Size of a segment usage */
+
+/*98*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */
+/*A8*/ char s_volume_name[80]; /* volume name */
+
+/*F8*/ __le32 s_c_interval; /* Commit interval of segment */
+ __le32 s_c_block_max; /* Threshold of data amount for
+ the segment construction */
+/*100*/ __le64 s_feature_compat; /* Compatible feature set */
+ __le64 s_feature_compat_ro; /* Read-only compatible feature set */
+ __le64 s_feature_incompat; /* Incompatible feature set */
+ __u32 s_reserved[186]; /* padding to the end of the block */
+};
+
+/*
+ * Codes for operating systems
+ */
+#define NILFS_OS_LINUX 0
+/* Codes from 1 to 4 are reserved to keep compatibility with ext2 creator-OS */
+
+/*
+ * Revision levels
+ */
+#define NILFS_CURRENT_REV 2 /* current major revision */
+#define NILFS_MINOR_REV 0 /* minor revision */
+#define NILFS_MIN_SUPP_REV 2 /* minimum supported revision */
+
+/*
+ * Feature set definitions
+ *
+ * If there is a bit set in the incompatible feature set that the kernel
+ * doesn't know about, it should refuse to mount the filesystem.
+ */
+#define NILFS_FEATURE_COMPAT_RO_BLOCK_COUNT 0x00000001ULL
+
+#define NILFS_FEATURE_COMPAT_SUPP 0ULL
+#define NILFS_FEATURE_COMPAT_RO_SUPP NILFS_FEATURE_COMPAT_RO_BLOCK_COUNT
+#define NILFS_FEATURE_INCOMPAT_SUPP 0ULL
+
+/*
+ * Bytes count of super_block for CRC-calculation
+ */
+#define NILFS_SB_BYTES \
+ ((long)&((struct nilfs_super_block *)0)->s_reserved)
+
+/*
+ * Special inode number
+ */
+#define NILFS_ROOT_INO 2 /* Root file inode */
+#define NILFS_DAT_INO 3 /* DAT file */
+#define NILFS_CPFILE_INO 4 /* checkpoint file */
+#define NILFS_SUFILE_INO 5 /* segment usage file */
+#define NILFS_IFILE_INO 6 /* ifile */
+#define NILFS_ATIME_INO 7 /* Atime file (reserved) */
+#define NILFS_XATTR_INO 8 /* Xattribute file (reserved) */
+#define NILFS_SKETCH_INO 10 /* Sketch file */
+#define NILFS_USER_INO 11 /* Fisrt user's file inode number */
+
+#define NILFS_SB_OFFSET_BYTES 1024 /* byte offset of nilfs superblock */
+
+#define NILFS_SEG_MIN_BLOCKS 16 /* Minimum number of blocks in
+ a full segment */
+#define NILFS_PSEG_MIN_BLOCKS 2 /* Minimum number of blocks in
+ a partial segment */
+#define NILFS_MIN_NRSVSEGS 8 /* Minimum number of reserved
+ segments */
+
+/*
+ * We call DAT, cpfile, and sufile root metadata files. Inodes of
+ * these files are written in super root block instead of ifile, and
+ * garbage collector doesn't keep any past versions of these files.
+ */
+#define NILFS_ROOT_METADATA_FILE(ino) \
+ ((ino) >= NILFS_DAT_INO && (ino) <= NILFS_SUFILE_INO)
+
+/*
+ * bytes offset of secondary super block
+ */
+#define NILFS_SB2_OFFSET_BYTES(devsize) ((((devsize) >> 12) - 1) << 12)
+
+/*
+ * Maximal count of links to a file
+ */
+#define NILFS_LINK_MAX 32000
+
+/*
+ * Structure of a directory entry
+ * (Same as ext2)
+ */
+
+#define NILFS_NAME_LEN 255
+
+/*
+ * Block size limitations
+ */
+#define NILFS_MIN_BLOCK_SIZE 1024
+#define NILFS_MAX_BLOCK_SIZE 65536
+
+/*
+ * The new version of the directory entry. Since V0 structures are
+ * stored in intel byte order, and the name_len field could never be
+ * bigger than 255 chars, it's safe to reclaim the extra byte for the
+ * file_type field.
+ */
+struct nilfs_dir_entry {
+ __le64 inode; /* Inode number */
+ __le16 rec_len; /* Directory entry length */
+ __u8 name_len; /* Name length */
+ __u8 file_type; /* Dir entry type (file, dir, etc) */
+ char name[NILFS_NAME_LEN]; /* File name */
+ char pad;
+};
+
+/*
+ * NILFS directory file types. Only the low 3 bits are used. The
+ * other bits are reserved for now.
+ */
+enum {
+ NILFS_FT_UNKNOWN,
+ NILFS_FT_REG_FILE,
+ NILFS_FT_DIR,
+ NILFS_FT_CHRDEV,
+ NILFS_FT_BLKDEV,
+ NILFS_FT_FIFO,
+ NILFS_FT_SOCK,
+ NILFS_FT_SYMLINK,
+ NILFS_FT_MAX
+};
+
+/*
+ * NILFS_DIR_PAD defines the directory entries boundaries
+ *
+ * NOTE: It must be a multiple of 8
+ */
+#define NILFS_DIR_PAD 8
+#define NILFS_DIR_ROUND (NILFS_DIR_PAD - 1)
+#define NILFS_DIR_REC_LEN(name_len) (((name_len) + 12 + NILFS_DIR_ROUND) & \
+ ~NILFS_DIR_ROUND)
+#define NILFS_MAX_REC_LEN ((1<<16)-1)
+
+static inline unsigned nilfs_rec_len_from_disk(__le16 dlen)
+{
+ unsigned len = le16_to_cpu(dlen);
+
+#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536)
+ if (len == NILFS_MAX_REC_LEN)
+ return 1 << 16;
+#endif
+ return len;
+}
+
+static inline __le16 nilfs_rec_len_to_disk(unsigned len)
+{
+#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536)
+ if (len == (1 << 16))
+ return cpu_to_le16(NILFS_MAX_REC_LEN);
+ else if (len > (1 << 16))
+ BUG();
+#endif
+ return cpu_to_le16(len);
+}
+
+/**
+ * struct nilfs_finfo - file information
+ * @fi_ino: inode number
+ * @fi_cno: checkpoint number
+ * @fi_nblocks: number of blocks (including intermediate blocks)
+ * @fi_ndatablk: number of file data blocks
+ */
+struct nilfs_finfo {
+ __le64 fi_ino;
+ __le64 fi_cno;
+ __le32 fi_nblocks;
+ __le32 fi_ndatablk;
+ /* array of virtual block numbers */
+};
+
+/**
+ * struct nilfs_binfo_v - information for the block to which a virtual block number is assigned
+ * @bi_vblocknr: virtual block number
+ * @bi_blkoff: block offset
+ */
+struct nilfs_binfo_v {
+ __le64 bi_vblocknr;
+ __le64 bi_blkoff;
+};
+
+/**
+ * struct nilfs_binfo_dat - information for the block which belongs to the DAT file
+ * @bi_blkoff: block offset
+ * @bi_level: level
+ * @bi_pad: padding
+ */
+struct nilfs_binfo_dat {
+ __le64 bi_blkoff;
+ __u8 bi_level;
+ __u8 bi_pad[7];
+};
+
+/**
+ * union nilfs_binfo: block information
+ * @bi_v: nilfs_binfo_v structure
+ * @bi_dat: nilfs_binfo_dat structure
+ */
+union nilfs_binfo {
+ struct nilfs_binfo_v bi_v;
+ struct nilfs_binfo_dat bi_dat;
+};
+
+/**
+ * struct nilfs_segment_summary - segment summary header
+ * @ss_datasum: checksum of data
+ * @ss_sumsum: checksum of segment summary
+ * @ss_magic: magic number
+ * @ss_bytes: size of this structure in bytes
+ * @ss_flags: flags
+ * @ss_seq: sequence number
+ * @ss_create: creation timestamp
+ * @ss_next: next segment
+ * @ss_nblocks: number of blocks
+ * @ss_nfinfo: number of finfo structures
+ * @ss_sumbytes: total size of segment summary in bytes
+ * @ss_pad: padding
+ * @ss_cno: checkpoint number
+ */
+struct nilfs_segment_summary {
+ __le32 ss_datasum;
+ __le32 ss_sumsum;
+ __le32 ss_magic;
+ __le16 ss_bytes;
+ __le16 ss_flags;
+ __le64 ss_seq;
+ __le64 ss_create;
+ __le64 ss_next;
+ __le32 ss_nblocks;
+ __le32 ss_nfinfo;
+ __le32 ss_sumbytes;
+ __le32 ss_pad;
+ __le64 ss_cno;
+ /* array of finfo structures */
+};
+
+#define NILFS_SEGSUM_MAGIC 0x1eaffa11 /* segment summary magic number */
+
+/*
+ * Segment summary flags
+ */
+#define NILFS_SS_LOGBGN 0x0001 /* begins a logical segment */
+#define NILFS_SS_LOGEND 0x0002 /* ends a logical segment */
+#define NILFS_SS_SR 0x0004 /* has super root */
+#define NILFS_SS_SYNDT 0x0008 /* includes data only updates */
+#define NILFS_SS_GC 0x0010 /* segment written for cleaner operation */
+
+/**
+ * struct nilfs_btree_node - B-tree node
+ * @bn_flags: flags
+ * @bn_level: level
+ * @bn_nchildren: number of children
+ * @bn_pad: padding
+ */
+struct nilfs_btree_node {
+ __u8 bn_flags;
+ __u8 bn_level;
+ __le16 bn_nchildren;
+ __le32 bn_pad;
+};
+
+/* flags */
+#define NILFS_BTREE_NODE_ROOT 0x01
+
+/* level */
+#define NILFS_BTREE_LEVEL_DATA 0
+#define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1)
+#define NILFS_BTREE_LEVEL_MAX 14 /* Max level (exclusive) */
+
+/**
+ * struct nilfs_palloc_group_desc - block group descriptor
+ * @pg_nfrees: number of free entries in block group
+ */
+struct nilfs_palloc_group_desc {
+ __le32 pg_nfrees;
+};
+
+/**
+ * struct nilfs_dat_entry - disk address translation entry
+ * @de_blocknr: block number
+ * @de_start: start checkpoint number
+ * @de_end: end checkpoint number
+ * @de_rsv: reserved for future use
+ */
+struct nilfs_dat_entry {
+ __le64 de_blocknr;
+ __le64 de_start;
+ __le64 de_end;
+ __le64 de_rsv;
+};
+
+#define NILFS_MIN_DAT_ENTRY_SIZE 32
+
+/**
+ * struct nilfs_snapshot_list - snapshot list
+ * @ssl_next: next checkpoint number on snapshot list
+ * @ssl_prev: previous checkpoint number on snapshot list
+ */
+struct nilfs_snapshot_list {
+ __le64 ssl_next;
+ __le64 ssl_prev;
+};
+
+/**
+ * struct nilfs_checkpoint - checkpoint structure
+ * @cp_flags: flags
+ * @cp_checkpoints_count: checkpoints count in a block
+ * @cp_snapshot_list: snapshot list
+ * @cp_cno: checkpoint number
+ * @cp_create: creation timestamp
+ * @cp_nblk_inc: number of blocks incremented by this checkpoint
+ * @cp_inodes_count: inodes count
+ * @cp_blocks_count: blocks count
+ * @cp_ifile_inode: inode of ifile
+ */
+struct nilfs_checkpoint {
+ __le32 cp_flags;
+ __le32 cp_checkpoints_count;
+ struct nilfs_snapshot_list cp_snapshot_list;
+ __le64 cp_cno;
+ __le64 cp_create;
+ __le64 cp_nblk_inc;
+ __le64 cp_inodes_count;
+ __le64 cp_blocks_count;
+
+ /* Do not change the byte offset of ifile inode.
+ To keep the compatibility of the disk format,
+ additional fields should be added behind cp_ifile_inode. */
+ struct nilfs_inode cp_ifile_inode;
+};
+
+#define NILFS_MIN_CHECKPOINT_SIZE (64 + NILFS_MIN_INODE_SIZE)
+
+/* checkpoint flags */
+enum {
+ NILFS_CHECKPOINT_SNAPSHOT,
+ NILFS_CHECKPOINT_INVALID,
+ NILFS_CHECKPOINT_SKETCH,
+ NILFS_CHECKPOINT_MINOR,
+};
+
+#define NILFS_CHECKPOINT_FNS(flag, name) \
+static inline void \
+nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp) \
+{ \
+ cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) | \
+ (1UL << NILFS_CHECKPOINT_##flag)); \
+} \
+static inline void \
+nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp) \
+{ \
+ cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) & \
+ ~(1UL << NILFS_CHECKPOINT_##flag)); \
+} \
+static inline int \
+nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp) \
+{ \
+ return !!(le32_to_cpu(cp->cp_flags) & \
+ (1UL << NILFS_CHECKPOINT_##flag)); \
+}
+
+NILFS_CHECKPOINT_FNS(SNAPSHOT, snapshot)
+NILFS_CHECKPOINT_FNS(INVALID, invalid)
+NILFS_CHECKPOINT_FNS(MINOR, minor)
+
+/**
+ * struct nilfs_cpinfo - checkpoint information
+ * @ci_flags: flags
+ * @ci_pad: padding
+ * @ci_cno: checkpoint number
+ * @ci_create: creation timestamp
+ * @ci_nblk_inc: number of blocks incremented by this checkpoint
+ * @ci_inodes_count: inodes count
+ * @ci_blocks_count: blocks count
+ * @ci_next: next checkpoint number in snapshot list
+ */
+struct nilfs_cpinfo {
+ __u32 ci_flags;
+ __u32 ci_pad;
+ __u64 ci_cno;
+ __u64 ci_create;
+ __u64 ci_nblk_inc;
+ __u64 ci_inodes_count;
+ __u64 ci_blocks_count;
+ __u64 ci_next;
+};
+
+#define NILFS_CPINFO_FNS(flag, name) \
+static inline int \
+nilfs_cpinfo_##name(const struct nilfs_cpinfo *cpinfo) \
+{ \
+ return !!(cpinfo->ci_flags & (1UL << NILFS_CHECKPOINT_##flag)); \
+}
+
+NILFS_CPINFO_FNS(SNAPSHOT, snapshot)
+NILFS_CPINFO_FNS(INVALID, invalid)
+NILFS_CPINFO_FNS(MINOR, minor)
+
+
+/**
+ * struct nilfs_cpfile_header - checkpoint file header
+ * @ch_ncheckpoints: number of checkpoints
+ * @ch_nsnapshots: number of snapshots
+ * @ch_snapshot_list: snapshot list
+ */
+struct nilfs_cpfile_header {
+ __le64 ch_ncheckpoints;
+ __le64 ch_nsnapshots;
+ struct nilfs_snapshot_list ch_snapshot_list;
+};
+
+#define NILFS_CPFILE_FIRST_CHECKPOINT_OFFSET \
+ ((sizeof(struct nilfs_cpfile_header) + \
+ sizeof(struct nilfs_checkpoint) - 1) / \
+ sizeof(struct nilfs_checkpoint))
+
+/**
+ * struct nilfs_segment_usage - segment usage
+ * @su_lastmod: last modified timestamp
+ * @su_nblocks: number of blocks in segment
+ * @su_flags: flags
+ */
+struct nilfs_segment_usage {
+ __le64 su_lastmod;
+ __le32 su_nblocks;
+ __le32 su_flags;
+};
+
+#define NILFS_MIN_SEGMENT_USAGE_SIZE 16
+
+/* segment usage flag */
+enum {
+ NILFS_SEGMENT_USAGE_ACTIVE,
+ NILFS_SEGMENT_USAGE_DIRTY,
+ NILFS_SEGMENT_USAGE_ERROR,
+
+ /* ... */
+};
+
+#define NILFS_SEGMENT_USAGE_FNS(flag, name) \
+static inline void \
+nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su) \
+{ \
+ su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) | \
+ (1UL << NILFS_SEGMENT_USAGE_##flag));\
+} \
+static inline void \
+nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su) \
+{ \
+ su->su_flags = \
+ cpu_to_le32(le32_to_cpu(su->su_flags) & \
+ ~(1UL << NILFS_SEGMENT_USAGE_##flag)); \
+} \
+static inline int \
+nilfs_segment_usage_##name(const struct nilfs_segment_usage *su) \
+{ \
+ return !!(le32_to_cpu(su->su_flags) & \
+ (1UL << NILFS_SEGMENT_USAGE_##flag)); \
+}
+
+NILFS_SEGMENT_USAGE_FNS(ACTIVE, active)
+NILFS_SEGMENT_USAGE_FNS(DIRTY, dirty)
+NILFS_SEGMENT_USAGE_FNS(ERROR, error)
+
+static inline void
+nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su)
+{
+ su->su_lastmod = cpu_to_le64(0);
+ su->su_nblocks = cpu_to_le32(0);
+ su->su_flags = cpu_to_le32(0);
+}
+
+static inline int
+nilfs_segment_usage_clean(const struct nilfs_segment_usage *su)
+{
+ return !le32_to_cpu(su->su_flags);
+}
+
+/**
+ * struct nilfs_sufile_header - segment usage file header
+ * @sh_ncleansegs: number of clean segments
+ * @sh_ndirtysegs: number of dirty segments
+ * @sh_last_alloc: last allocated segment number
+ */
+struct nilfs_sufile_header {
+ __le64 sh_ncleansegs;
+ __le64 sh_ndirtysegs;
+ __le64 sh_last_alloc;
+ /* ... */
+};
+
+#define NILFS_SUFILE_FIRST_SEGMENT_USAGE_OFFSET \
+ ((sizeof(struct nilfs_sufile_header) + \
+ sizeof(struct nilfs_segment_usage) - 1) / \
+ sizeof(struct nilfs_segment_usage))
+
+/**
+ * nilfs_suinfo - segment usage information
+ * @sui_lastmod: timestamp of last modification
+ * @sui_nblocks: number of written blocks in segment
+ * @sui_flags: segment usage flags
+ */
+struct nilfs_suinfo {
+ __u64 sui_lastmod;
+ __u32 sui_nblocks;
+ __u32 sui_flags;
+};
+
+#define NILFS_SUINFO_FNS(flag, name) \
+static inline int \
+nilfs_suinfo_##name(const struct nilfs_suinfo *si) \
+{ \
+ return si->sui_flags & (1UL << NILFS_SEGMENT_USAGE_##flag); \
+}
+
+NILFS_SUINFO_FNS(ACTIVE, active)
+NILFS_SUINFO_FNS(DIRTY, dirty)
+NILFS_SUINFO_FNS(ERROR, error)
+
+static inline int nilfs_suinfo_clean(const struct nilfs_suinfo *si)
+{
+ return !si->sui_flags;
+}
+
+/* ioctl */
+/**
+ * nilfs_suinfo_update - segment usage information update
+ * @sup_segnum: segment number
+ * @sup_flags: flags for which fields are active in sup_sui
+ * @sup_reserved: reserved necessary for alignment
+ * @sup_sui: segment usage information
+ */
+struct nilfs_suinfo_update {
+ __u64 sup_segnum;
+ __u32 sup_flags;
+ __u32 sup_reserved;
+ struct nilfs_suinfo sup_sui;
+};
+
+enum {
+ NILFS_SUINFO_UPDATE_LASTMOD,
+ NILFS_SUINFO_UPDATE_NBLOCKS,
+ NILFS_SUINFO_UPDATE_FLAGS,
+ __NR_NILFS_SUINFO_UPDATE_FIELDS,
+};
+
+#define NILFS_SUINFO_UPDATE_FNS(flag, name) \
+static inline void \
+nilfs_suinfo_update_set_##name(struct nilfs_suinfo_update *sup) \
+{ \
+ sup->sup_flags |= 1UL << NILFS_SUINFO_UPDATE_##flag; \
+} \
+static inline void \
+nilfs_suinfo_update_clear_##name(struct nilfs_suinfo_update *sup) \
+{ \
+ sup->sup_flags &= ~(1UL << NILFS_SUINFO_UPDATE_##flag); \
+} \
+static inline int \
+nilfs_suinfo_update_##name(const struct nilfs_suinfo_update *sup) \
+{ \
+ return !!(sup->sup_flags & (1UL << NILFS_SUINFO_UPDATE_##flag));\
+}
+
+NILFS_SUINFO_UPDATE_FNS(LASTMOD, lastmod)
+NILFS_SUINFO_UPDATE_FNS(NBLOCKS, nblocks)
+NILFS_SUINFO_UPDATE_FNS(FLAGS, flags)
+
+enum {
+ NILFS_CHECKPOINT,
+ NILFS_SNAPSHOT,
+};
+
+/**
+ * struct nilfs_cpmode - change checkpoint mode structure
+ * @cm_cno: checkpoint number
+ * @cm_mode: mode of checkpoint
+ * @cm_pad: padding
+ */
+struct nilfs_cpmode {
+ __u64 cm_cno;
+ __u32 cm_mode;
+ __u32 cm_pad;
+};
+
+/**
+ * struct nilfs_argv - argument vector
+ * @v_base: pointer on data array from userspace
+ * @v_nmembs: number of members in data array
+ * @v_size: size of data array in bytes
+ * @v_flags: flags
+ * @v_index: start number of target data items
+ */
+struct nilfs_argv {
+ __u64 v_base;
+ __u32 v_nmembs; /* number of members */
+ __u16 v_size; /* size of members */
+ __u16 v_flags;
+ __u64 v_index;
+};
+
+/**
+ * struct nilfs_period - period of checkpoint numbers
+ * @p_start: start checkpoint number (inclusive)
+ * @p_end: end checkpoint number (exclusive)
+ */
+struct nilfs_period {
+ __u64 p_start;
+ __u64 p_end;
+};
+
+/**
+ * struct nilfs_cpstat - checkpoint statistics
+ * @cs_cno: checkpoint number
+ * @cs_ncps: number of checkpoints
+ * @cs_nsss: number of snapshots
+ */
+struct nilfs_cpstat {
+ __u64 cs_cno;
+ __u64 cs_ncps;
+ __u64 cs_nsss;
+};
+
+/**
+ * struct nilfs_sustat - segment usage statistics
+ * @ss_nsegs: number of segments
+ * @ss_ncleansegs: number of clean segments
+ * @ss_ndirtysegs: number of dirty segments
+ * @ss_ctime: creation time of the last segment
+ * @ss_nongc_ctime: creation time of the last segment not for GC
+ * @ss_prot_seq: least sequence number of segments which must not be reclaimed
+ */
+struct nilfs_sustat {
+ __u64 ss_nsegs;
+ __u64 ss_ncleansegs;
+ __u64 ss_ndirtysegs;
+ __u64 ss_ctime;
+ __u64 ss_nongc_ctime;
+ __u64 ss_prot_seq;
+};
+
+/**
+ * struct nilfs_vinfo - virtual block number information
+ * @vi_vblocknr: virtual block number
+ * @vi_start: start checkpoint number (inclusive)
+ * @vi_end: end checkpoint number (exclusive)
+ * @vi_blocknr: disk block number
+ */
+struct nilfs_vinfo {
+ __u64 vi_vblocknr;
+ __u64 vi_start;
+ __u64 vi_end;
+ __u64 vi_blocknr;
+};
+
+/**
+ * struct nilfs_vdesc - descriptor of virtual block number
+ * @vd_ino: inode number
+ * @vd_cno: checkpoint number
+ * @vd_vblocknr: virtual block number
+ * @vd_period: period of checkpoint numbers
+ * @vd_blocknr: disk block number
+ * @vd_offset: logical block offset inside a file
+ * @vd_flags: flags (data or node block)
+ * @vd_pad: padding
+ */
+struct nilfs_vdesc {
+ __u64 vd_ino;
+ __u64 vd_cno;
+ __u64 vd_vblocknr;
+ struct nilfs_period vd_period;
+ __u64 vd_blocknr;
+ __u64 vd_offset;
+ __u32 vd_flags;
+ __u32 vd_pad;
+};
+
+/**
+ * struct nilfs_bdesc - descriptor of disk block number
+ * @bd_ino: inode number
+ * @bd_oblocknr: disk block address (for skipping dead blocks)
+ * @bd_blocknr: disk block address
+ * @bd_offset: logical block offset inside a file
+ * @bd_level: level in the b-tree organization
+ * @bd_pad: padding
+ */
+struct nilfs_bdesc {
+ __u64 bd_ino;
+ __u64 bd_oblocknr;
+ __u64 bd_blocknr;
+ __u64 bd_offset;
+ __u32 bd_level;
+ __u32 bd_pad;
+};
+
+#define NILFS_IOCTL_IDENT 'n'
+
+#define NILFS_IOCTL_CHANGE_CPMODE \
+ _IOW(NILFS_IOCTL_IDENT, 0x80, struct nilfs_cpmode)
+#define NILFS_IOCTL_DELETE_CHECKPOINT \
+ _IOW(NILFS_IOCTL_IDENT, 0x81, __u64)
+#define NILFS_IOCTL_GET_CPINFO \
+ _IOR(NILFS_IOCTL_IDENT, 0x82, struct nilfs_argv)
+#define NILFS_IOCTL_GET_CPSTAT \
+ _IOR(NILFS_IOCTL_IDENT, 0x83, struct nilfs_cpstat)
+#define NILFS_IOCTL_GET_SUINFO \
+ _IOR(NILFS_IOCTL_IDENT, 0x84, struct nilfs_argv)
+#define NILFS_IOCTL_GET_SUSTAT \
+ _IOR(NILFS_IOCTL_IDENT, 0x85, struct nilfs_sustat)
+#define NILFS_IOCTL_GET_VINFO \
+ _IOWR(NILFS_IOCTL_IDENT, 0x86, struct nilfs_argv)
+#define NILFS_IOCTL_GET_BDESCS \
+ _IOWR(NILFS_IOCTL_IDENT, 0x87, struct nilfs_argv)
+#define NILFS_IOCTL_CLEAN_SEGMENTS \
+ _IOW(NILFS_IOCTL_IDENT, 0x88, struct nilfs_argv[5])
+#define NILFS_IOCTL_SYNC \
+ _IOR(NILFS_IOCTL_IDENT, 0x8A, __u64)
+#define NILFS_IOCTL_RESIZE \
+ _IOW(NILFS_IOCTL_IDENT, 0x8B, __u64)
+#define NILFS_IOCTL_SET_ALLOC_RANGE \
+ _IOW(NILFS_IOCTL_IDENT, 0x8C, __u64[2])
+#define NILFS_IOCTL_SET_SUINFO \
+ _IOW(NILFS_IOCTL_IDENT, 0x8D, struct nilfs_argv)
+
+#endif /* _LINUX_NILFS_FS_H */
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h
new file mode 100644
index 000000000..167342c2c
--- /dev/null
+++ b/include/linux/nl802154.h
@@ -0,0 +1,178 @@
+/*
+ * nl802154.h
+ *
+ * Copyright (C) 2007, 2008, 2009 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef NL802154_H
+#define NL802154_H
+
+#define IEEE802154_NL_NAME "802.15.4 MAC"
+#define IEEE802154_MCAST_COORD_NAME "coordinator"
+#define IEEE802154_MCAST_BEACON_NAME "beacon"
+
+enum {
+ __IEEE802154_ATTR_INVALID,
+
+ IEEE802154_ATTR_DEV_NAME,
+ IEEE802154_ATTR_DEV_INDEX,
+
+ IEEE802154_ATTR_STATUS,
+
+ IEEE802154_ATTR_SHORT_ADDR,
+ IEEE802154_ATTR_HW_ADDR,
+ IEEE802154_ATTR_PAN_ID,
+
+ IEEE802154_ATTR_CHANNEL,
+
+ IEEE802154_ATTR_COORD_SHORT_ADDR,
+ IEEE802154_ATTR_COORD_HW_ADDR,
+ IEEE802154_ATTR_COORD_PAN_ID,
+
+ IEEE802154_ATTR_SRC_SHORT_ADDR,
+ IEEE802154_ATTR_SRC_HW_ADDR,
+ IEEE802154_ATTR_SRC_PAN_ID,
+
+ IEEE802154_ATTR_DEST_SHORT_ADDR,
+ IEEE802154_ATTR_DEST_HW_ADDR,
+ IEEE802154_ATTR_DEST_PAN_ID,
+
+ IEEE802154_ATTR_CAPABILITY,
+ IEEE802154_ATTR_REASON,
+ IEEE802154_ATTR_SCAN_TYPE,
+ IEEE802154_ATTR_CHANNELS,
+ IEEE802154_ATTR_DURATION,
+ IEEE802154_ATTR_ED_LIST,
+ IEEE802154_ATTR_BCN_ORD,
+ IEEE802154_ATTR_SF_ORD,
+ IEEE802154_ATTR_PAN_COORD,
+ IEEE802154_ATTR_BAT_EXT,
+ IEEE802154_ATTR_COORD_REALIGN,
+ IEEE802154_ATTR_SEC,
+
+ IEEE802154_ATTR_PAGE,
+ IEEE802154_ATTR_CHANNEL_PAGE_LIST,
+
+ IEEE802154_ATTR_PHY_NAME,
+ IEEE802154_ATTR_DEV_TYPE,
+
+ IEEE802154_ATTR_TXPOWER,
+ IEEE802154_ATTR_LBT_ENABLED,
+ IEEE802154_ATTR_CCA_MODE,
+ IEEE802154_ATTR_CCA_ED_LEVEL,
+ IEEE802154_ATTR_CSMA_RETRIES,
+ IEEE802154_ATTR_CSMA_MIN_BE,
+ IEEE802154_ATTR_CSMA_MAX_BE,
+
+ IEEE802154_ATTR_FRAME_RETRIES,
+
+ IEEE802154_ATTR_LLSEC_ENABLED,
+ IEEE802154_ATTR_LLSEC_SECLEVEL,
+ IEEE802154_ATTR_LLSEC_KEY_MODE,
+ IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT,
+ IEEE802154_ATTR_LLSEC_KEY_SOURCE_EXTENDED,
+ IEEE802154_ATTR_LLSEC_KEY_ID,
+ IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
+ IEEE802154_ATTR_LLSEC_KEY_BYTES,
+ IEEE802154_ATTR_LLSEC_KEY_USAGE_FRAME_TYPES,
+ IEEE802154_ATTR_LLSEC_KEY_USAGE_COMMANDS,
+ IEEE802154_ATTR_LLSEC_FRAME_TYPE,
+ IEEE802154_ATTR_LLSEC_CMD_FRAME_ID,
+ IEEE802154_ATTR_LLSEC_SECLEVELS,
+ IEEE802154_ATTR_LLSEC_DEV_OVERRIDE,
+ IEEE802154_ATTR_LLSEC_DEV_KEY_MODE,
+
+ __IEEE802154_ATTR_MAX,
+};
+
+#define IEEE802154_ATTR_MAX (__IEEE802154_ATTR_MAX - 1)
+
+extern const struct nla_policy ieee802154_policy[];
+
+/* commands */
+/* REQ should be responded with CONF
+ * and INDIC with RESP
+ */
+enum {
+ __IEEE802154_COMMAND_INVALID,
+
+ IEEE802154_ASSOCIATE_REQ,
+ IEEE802154_ASSOCIATE_CONF,
+ IEEE802154_DISASSOCIATE_REQ,
+ IEEE802154_DISASSOCIATE_CONF,
+ IEEE802154_GET_REQ,
+ IEEE802154_GET_CONF,
+ IEEE802154_RESET_REQ,
+ IEEE802154_RESET_CONF,
+ IEEE802154_SCAN_REQ,
+ IEEE802154_SCAN_CONF,
+ IEEE802154_SET_REQ,
+ IEEE802154_SET_CONF,
+ IEEE802154_START_REQ,
+ IEEE802154_START_CONF,
+ IEEE802154_SYNC_REQ,
+ IEEE802154_POLL_REQ,
+ IEEE802154_POLL_CONF,
+
+ IEEE802154_ASSOCIATE_INDIC,
+ IEEE802154_ASSOCIATE_RESP,
+ IEEE802154_DISASSOCIATE_INDIC,
+ IEEE802154_BEACON_NOTIFY_INDIC,
+ IEEE802154_ORPHAN_INDIC,
+ IEEE802154_ORPHAN_RESP,
+ IEEE802154_COMM_STATUS_INDIC,
+ IEEE802154_SYNC_LOSS_INDIC,
+
+ IEEE802154_GTS_REQ, /* Not supported yet */
+ IEEE802154_GTS_INDIC, /* Not supported yet */
+ IEEE802154_GTS_CONF, /* Not supported yet */
+ IEEE802154_RX_ENABLE_REQ, /* Not supported yet */
+ IEEE802154_RX_ENABLE_CONF, /* Not supported yet */
+
+ IEEE802154_LIST_IFACE,
+ IEEE802154_LIST_PHY,
+ IEEE802154_ADD_IFACE,
+ IEEE802154_DEL_IFACE,
+
+ IEEE802154_SET_MACPARAMS,
+
+ IEEE802154_LLSEC_GETPARAMS,
+ IEEE802154_LLSEC_SETPARAMS,
+ IEEE802154_LLSEC_LIST_KEY,
+ IEEE802154_LLSEC_ADD_KEY,
+ IEEE802154_LLSEC_DEL_KEY,
+ IEEE802154_LLSEC_LIST_DEV,
+ IEEE802154_LLSEC_ADD_DEV,
+ IEEE802154_LLSEC_DEL_DEV,
+ IEEE802154_LLSEC_LIST_DEVKEY,
+ IEEE802154_LLSEC_ADD_DEVKEY,
+ IEEE802154_LLSEC_DEL_DEVKEY,
+ IEEE802154_LLSEC_LIST_SECLEVEL,
+ IEEE802154_LLSEC_ADD_SECLEVEL,
+ IEEE802154_LLSEC_DEL_SECLEVEL,
+
+ __IEEE802154_CMD_MAX,
+};
+
+#define IEEE802154_CMD_MAX (__IEEE802154_CMD_MAX - 1)
+
+enum {
+ __IEEE802154_DEV_INVALID = -1,
+
+ IEEE802154_DEV_WPAN,
+ IEEE802154_DEV_MONITOR,
+
+ __IEEE802154_DEV_MAX,
+};
+
+#endif
diff --git a/include/linux/nls.h b/include/linux/nls.h
new file mode 100644
index 000000000..520681b68
--- /dev/null
+++ b/include/linux/nls.h
@@ -0,0 +1,108 @@
+#ifndef _LINUX_NLS_H
+#define _LINUX_NLS_H
+
+#include <linux/init.h>
+
+/* Unicode has changed over the years. Unicode code points no longer
+ * fit into 16 bits; as of Unicode 5 valid code points range from 0
+ * to 0x10ffff (17 planes, where each plane holds 65536 code points).
+ *
+ * The original decision to represent Unicode characters as 16-bit
+ * wchar_t values is now outdated. But plane 0 still includes the
+ * most commonly used characters, so we will retain it. The newer
+ * 32-bit unicode_t type can be used when it is necessary to
+ * represent the full Unicode character set.
+ */
+
+/* Plane-0 Unicode character */
+typedef u16 wchar_t;
+#define MAX_WCHAR_T 0xffff
+
+/* Arbitrary Unicode character */
+typedef u32 unicode_t;
+
+struct nls_table {
+ const char *charset;
+ const char *alias;
+ int (*uni2char) (wchar_t uni, unsigned char *out, int boundlen);
+ int (*char2uni) (const unsigned char *rawstring, int boundlen,
+ wchar_t *uni);
+ const unsigned char *charset2lower;
+ const unsigned char *charset2upper;
+ struct module *owner;
+ struct nls_table *next;
+};
+
+/* this value hold the maximum octet of charset */
+#define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
+
+/* Byte order for UTF-16 strings */
+enum utf16_endian {
+ UTF16_HOST_ENDIAN,
+ UTF16_LITTLE_ENDIAN,
+ UTF16_BIG_ENDIAN
+};
+
+/* nls_base.c */
+extern int __register_nls(struct nls_table *, struct module *);
+extern int unregister_nls(struct nls_table *);
+extern struct nls_table *load_nls(char *);
+extern void unload_nls(struct nls_table *);
+extern struct nls_table *load_nls_default(void);
+#define register_nls(nls) __register_nls((nls), THIS_MODULE)
+
+extern int utf8_to_utf32(const u8 *s, int len, unicode_t *pu);
+extern int utf32_to_utf8(unicode_t u, u8 *s, int maxlen);
+extern int utf8s_to_utf16s(const u8 *s, int len,
+ enum utf16_endian endian, wchar_t *pwcs, int maxlen);
+extern int utf16s_to_utf8s(const wchar_t *pwcs, int len,
+ enum utf16_endian endian, u8 *s, int maxlen);
+
+static inline unsigned char nls_tolower(struct nls_table *t, unsigned char c)
+{
+ unsigned char nc = t->charset2lower[c];
+
+ return nc ? nc : c;
+}
+
+static inline unsigned char nls_toupper(struct nls_table *t, unsigned char c)
+{
+ unsigned char nc = t->charset2upper[c];
+
+ return nc ? nc : c;
+}
+
+static inline int nls_strnicmp(struct nls_table *t, const unsigned char *s1,
+ const unsigned char *s2, int len)
+{
+ while (len--) {
+ if (nls_tolower(t, *s1++) != nls_tolower(t, *s2++))
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * nls_nullsize - return length of null character for codepage
+ * @codepage - codepage for which to return length of NULL terminator
+ *
+ * Since we can't guarantee that the null terminator will be a particular
+ * length, we have to check against the codepage. If there's a problem
+ * determining it, assume a single-byte NULL terminator.
+ */
+static inline int
+nls_nullsize(const struct nls_table *codepage)
+{
+ int charlen;
+ char tmp[NLS_MAX_CHARSET_SIZE];
+
+ charlen = codepage->uni2char(0, tmp, NLS_MAX_CHARSET_SIZE);
+
+ return charlen > 0 ? charlen : 1;
+}
+
+#define MODULE_ALIAS_NLS(name) MODULE_ALIAS("nls_" __stringify(name))
+
+#endif /* _LINUX_NLS_H */
+
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
new file mode 100644
index 000000000..3d46fb470
--- /dev/null
+++ b/include/linux/nmi.h
@@ -0,0 +1,86 @@
+/*
+ * linux/include/linux/nmi.h
+ */
+#ifndef LINUX_NMI_H
+#define LINUX_NMI_H
+
+#include <linux/sched.h>
+#include <asm/irq.h>
+
+/**
+ * touch_nmi_watchdog - restart NMI watchdog timeout.
+ *
+ * If the architecture supports the NMI watchdog, touch_nmi_watchdog()
+ * may be used to reset the timeout - for code which intentionally
+ * disables interrupts for a long time. This call is stateless.
+ */
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+#include <asm/nmi.h>
+extern void touch_nmi_watchdog(void);
+#else
+static inline void touch_nmi_watchdog(void)
+{
+ touch_softlockup_watchdog();
+}
+#endif
+
+#if defined(CONFIG_HARDLOCKUP_DETECTOR)
+extern void hardlockup_detector_disable(void);
+#else
+static inline void hardlockup_detector_disable(void)
+{
+}
+#endif
+
+/*
+ * Create trigger_all_cpu_backtrace() out of the arch-provided
+ * base function. Return whether such support was available,
+ * to allow calling code to fall back to some other mechanism:
+ */
+#ifdef arch_trigger_all_cpu_backtrace
+static inline bool trigger_all_cpu_backtrace(void)
+{
+ arch_trigger_all_cpu_backtrace(true);
+
+ return true;
+}
+static inline bool trigger_allbutself_cpu_backtrace(void)
+{
+ arch_trigger_all_cpu_backtrace(false);
+ return true;
+}
+#else
+static inline bool trigger_all_cpu_backtrace(void)
+{
+ return false;
+}
+static inline bool trigger_allbutself_cpu_backtrace(void)
+{
+ return false;
+}
+#endif
+
+#ifdef CONFIG_LOCKUP_DETECTOR
+int hw_nmi_is_cpu_stuck(struct pt_regs *);
+u64 hw_nmi_get_sample_period(int watchdog_thresh);
+extern int nmi_watchdog_enabled;
+extern int soft_watchdog_enabled;
+extern int watchdog_user_enabled;
+extern int watchdog_thresh;
+extern int sysctl_softlockup_all_cpu_backtrace;
+struct ctl_table;
+extern int proc_watchdog(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+extern int proc_nmi_watchdog(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+extern int proc_soft_watchdog(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+extern int proc_watchdog_thresh(struct ctl_table *, int ,
+ void __user *, size_t *, loff_t *);
+#endif
+
+#ifdef CONFIG_HAVE_ACPI_APEI_NMI
+#include <asm/nmi.h>
+#endif
+
+#endif
diff --git a/include/linux/node.h b/include/linux/node.h
new file mode 100644
index 000000000..2115ad5d6
--- /dev/null
+++ b/include/linux/node.h
@@ -0,0 +1,84 @@
+/*
+ * include/linux/node.h - generic node definition
+ *
+ * This is mainly for topological representation. We define the
+ * basic 'struct node' here, which can be embedded in per-arch
+ * definitions of processors.
+ *
+ * Basic handling of the devices is done in drivers/base/node.c
+ * and system devices are handled in drivers/base/sys.c.
+ *
+ * Nodes are exported via driverfs in the class/node/devices/
+ * directory.
+ */
+#ifndef _LINUX_NODE_H_
+#define _LINUX_NODE_H_
+
+#include <linux/device.h>
+#include <linux/cpumask.h>
+#include <linux/workqueue.h>
+
+struct node {
+ struct device dev;
+
+#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
+ struct work_struct node_work;
+#endif
+};
+
+struct memory_block;
+extern struct node *node_devices[];
+typedef void (*node_registration_func_t)(struct node *);
+
+extern void unregister_node(struct node *node);
+#ifdef CONFIG_NUMA
+extern int register_one_node(int nid);
+extern void unregister_one_node(int nid);
+extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
+extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
+extern int register_mem_sect_under_node(struct memory_block *mem_blk,
+ int nid);
+extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
+ unsigned long phys_index);
+
+#ifdef CONFIG_HUGETLBFS
+extern void register_hugetlbfs_with_node(node_registration_func_t doregister,
+ node_registration_func_t unregister);
+#endif
+#else
+static inline int register_one_node(int nid)
+{
+ return 0;
+}
+static inline int unregister_one_node(int nid)
+{
+ return 0;
+}
+static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid)
+{
+ return 0;
+}
+static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
+{
+ return 0;
+}
+static inline int register_mem_sect_under_node(struct memory_block *mem_blk,
+ int nid)
+{
+ return 0;
+}
+static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
+ unsigned long phys_index)
+{
+ return 0;
+}
+
+static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
+ node_registration_func_t unreg)
+{
+}
+#endif
+
+#define to_node(device) container_of(device, struct node, dev)
+
+#endif /* _LINUX_NODE_H_ */
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
new file mode 100644
index 000000000..6e85889cf
--- /dev/null
+++ b/include/linux/nodemask.h
@@ -0,0 +1,527 @@
+#ifndef __LINUX_NODEMASK_H
+#define __LINUX_NODEMASK_H
+
+/*
+ * Nodemasks provide a bitmap suitable for representing the
+ * set of Node's in a system, one bit position per Node number.
+ *
+ * See detailed comments in the file linux/bitmap.h describing the
+ * data type on which these nodemasks are based.
+ *
+ * For details of nodemask_parse_user(), see bitmap_parse_user() in
+ * lib/bitmap.c. For details of nodelist_parse(), see bitmap_parselist(),
+ * also in bitmap.c. For details of node_remap(), see bitmap_bitremap in
+ * lib/bitmap.c. For details of nodes_remap(), see bitmap_remap in
+ * lib/bitmap.c. For details of nodes_onto(), see bitmap_onto in
+ * lib/bitmap.c. For details of nodes_fold(), see bitmap_fold in
+ * lib/bitmap.c.
+ *
+ * The available nodemask operations are:
+ *
+ * void node_set(node, mask) turn on bit 'node' in mask
+ * void node_clear(node, mask) turn off bit 'node' in mask
+ * void nodes_setall(mask) set all bits
+ * void nodes_clear(mask) clear all bits
+ * int node_isset(node, mask) true iff bit 'node' set in mask
+ * int node_test_and_set(node, mask) test and set bit 'node' in mask
+ *
+ * void nodes_and(dst, src1, src2) dst = src1 & src2 [intersection]
+ * void nodes_or(dst, src1, src2) dst = src1 | src2 [union]
+ * void nodes_xor(dst, src1, src2) dst = src1 ^ src2
+ * void nodes_andnot(dst, src1, src2) dst = src1 & ~src2
+ * void nodes_complement(dst, src) dst = ~src
+ *
+ * int nodes_equal(mask1, mask2) Does mask1 == mask2?
+ * int nodes_intersects(mask1, mask2) Do mask1 and mask2 intersect?
+ * int nodes_subset(mask1, mask2) Is mask1 a subset of mask2?
+ * int nodes_empty(mask) Is mask empty (no bits sets)?
+ * int nodes_full(mask) Is mask full (all bits sets)?
+ * int nodes_weight(mask) Hamming weight - number of set bits
+ *
+ * void nodes_shift_right(dst, src, n) Shift right
+ * void nodes_shift_left(dst, src, n) Shift left
+ *
+ * int first_node(mask) Number lowest set bit, or MAX_NUMNODES
+ * int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
+ * int first_unset_node(mask) First node not set in mask, or
+ * MAX_NUMNODES.
+ *
+ * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set
+ * NODE_MASK_ALL Initializer - all bits set
+ * NODE_MASK_NONE Initializer - no bits set
+ * unsigned long *nodes_addr(mask) Array of unsigned long's in mask
+ *
+ * int nodemask_parse_user(ubuf, ulen, mask) Parse ascii string as nodemask
+ * int nodelist_parse(buf, map) Parse ascii string as nodelist
+ * int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
+ * void nodes_remap(dst, src, old, new) *dst = map(old, new)(src)
+ * void nodes_onto(dst, orig, relmap) *dst = orig relative to relmap
+ * void nodes_fold(dst, orig, sz) dst bits = orig bits mod sz
+ *
+ * for_each_node_mask(node, mask) for-loop node over mask
+ *
+ * int num_online_nodes() Number of online Nodes
+ * int num_possible_nodes() Number of all possible Nodes
+ *
+ * int node_random(mask) Random node with set bit in mask
+ *
+ * int node_online(node) Is some node online?
+ * int node_possible(node) Is some node possible?
+ *
+ * node_set_online(node) set bit 'node' in node_online_map
+ * node_set_offline(node) clear bit 'node' in node_online_map
+ *
+ * for_each_node(node) for-loop node over node_possible_map
+ * for_each_online_node(node) for-loop node over node_online_map
+ *
+ * Subtlety:
+ * 1) The 'type-checked' form of node_isset() causes gcc (3.3.2, anyway)
+ * to generate slightly worse code. So use a simple one-line #define
+ * for node_isset(), instead of wrapping an inline inside a macro, the
+ * way we do the other calls.
+ *
+ * NODEMASK_SCRATCH
+ * When doing above logical AND, OR, XOR, Remap operations the callers tend to
+ * need temporary nodemask_t's on the stack. But if NODES_SHIFT is large,
+ * nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper
+ * for such situations. See below and CPUMASK_ALLOC also.
+ */
+
+#include <linux/kernel.h>
+#include <linux/threads.h>
+#include <linux/bitmap.h>
+#include <linux/numa.h>
+
+typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
+extern nodemask_t _unused_nodemask_arg_;
+
+/**
+ * nodemask_pr_args - printf args to output a nodemask
+ * @maskp: nodemask to be printed
+ *
+ * Can be used to provide arguments for '%*pb[l]' when printing a nodemask.
+ */
+#define nodemask_pr_args(maskp) MAX_NUMNODES, (maskp)->bits
+
+/*
+ * The inline keyword gives the compiler room to decide to inline, or
+ * not inline a function as it sees best. However, as these functions
+ * are called in both __init and non-__init functions, if they are not
+ * inlined we will end up with a section mis-match error (of the type of
+ * freeable items not being freed). So we must use __always_inline here
+ * to fix the problem. If other functions in the future also end up in
+ * this situation they will also need to be annotated as __always_inline
+ */
+#define node_set(node, dst) __node_set((node), &(dst))
+static __always_inline void __node_set(int node, volatile nodemask_t *dstp)
+{
+ set_bit(node, dstp->bits);
+}
+
+#define node_clear(node, dst) __node_clear((node), &(dst))
+static inline void __node_clear(int node, volatile nodemask_t *dstp)
+{
+ clear_bit(node, dstp->bits);
+}
+
+#define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES)
+static inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
+{
+ bitmap_fill(dstp->bits, nbits);
+}
+
+#define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES)
+static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
+{
+ bitmap_zero(dstp->bits, nbits);
+}
+
+/* No static inline type checking - see Subtlety (1) above. */
+#define node_isset(node, nodemask) test_bit((node), (nodemask).bits)
+
+#define node_test_and_set(node, nodemask) \
+ __node_test_and_set((node), &(nodemask))
+static inline int __node_test_and_set(int node, nodemask_t *addr)
+{
+ return test_and_set_bit(node, addr->bits);
+}
+
+#define nodes_and(dst, src1, src2) \
+ __nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES)
+static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
+ const nodemask_t *src2p, unsigned int nbits)
+{
+ bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_or(dst, src1, src2) \
+ __nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES)
+static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
+ const nodemask_t *src2p, unsigned int nbits)
+{
+ bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_xor(dst, src1, src2) \
+ __nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES)
+static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
+ const nodemask_t *src2p, unsigned int nbits)
+{
+ bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_andnot(dst, src1, src2) \
+ __nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES)
+static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
+ const nodemask_t *src2p, unsigned int nbits)
+{
+ bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_complement(dst, src) \
+ __nodes_complement(&(dst), &(src), MAX_NUMNODES)
+static inline void __nodes_complement(nodemask_t *dstp,
+ const nodemask_t *srcp, unsigned int nbits)
+{
+ bitmap_complement(dstp->bits, srcp->bits, nbits);
+}
+
+#define nodes_equal(src1, src2) \
+ __nodes_equal(&(src1), &(src2), MAX_NUMNODES)
+static inline int __nodes_equal(const nodemask_t *src1p,
+ const nodemask_t *src2p, unsigned int nbits)
+{
+ return bitmap_equal(src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_intersects(src1, src2) \
+ __nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
+static inline int __nodes_intersects(const nodemask_t *src1p,
+ const nodemask_t *src2p, unsigned int nbits)
+{
+ return bitmap_intersects(src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_subset(src1, src2) \
+ __nodes_subset(&(src1), &(src2), MAX_NUMNODES)
+static inline int __nodes_subset(const nodemask_t *src1p,
+ const nodemask_t *src2p, unsigned int nbits)
+{
+ return bitmap_subset(src1p->bits, src2p->bits, nbits);
+}
+
+#define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
+static inline int __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
+{
+ return bitmap_empty(srcp->bits, nbits);
+}
+
+#define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
+static inline int __nodes_full(const nodemask_t *srcp, unsigned int nbits)
+{
+ return bitmap_full(srcp->bits, nbits);
+}
+
+#define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES)
+static inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
+{
+ return bitmap_weight(srcp->bits, nbits);
+}
+
+#define nodes_shift_right(dst, src, n) \
+ __nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES)
+static inline void __nodes_shift_right(nodemask_t *dstp,
+ const nodemask_t *srcp, int n, int nbits)
+{
+ bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
+}
+
+#define nodes_shift_left(dst, src, n) \
+ __nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES)
+static inline void __nodes_shift_left(nodemask_t *dstp,
+ const nodemask_t *srcp, int n, int nbits)
+{
+ bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
+}
+
+/* FIXME: better would be to fix all architectures to never return
+ > MAX_NUMNODES, then the silly min_ts could be dropped. */
+
+#define first_node(src) __first_node(&(src))
+static inline int __first_node(const nodemask_t *srcp)
+{
+ return min_t(int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
+}
+
+#define next_node(n, src) __next_node((n), &(src))
+static inline int __next_node(int n, const nodemask_t *srcp)
+{
+ return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
+}
+
+static inline void init_nodemask_of_node(nodemask_t *mask, int node)
+{
+ nodes_clear(*mask);
+ node_set(node, *mask);
+}
+
+#define nodemask_of_node(node) \
+({ \
+ typeof(_unused_nodemask_arg_) m; \
+ if (sizeof(m) == sizeof(unsigned long)) { \
+ m.bits[0] = 1UL << (node); \
+ } else { \
+ init_nodemask_of_node(&m, (node)); \
+ } \
+ m; \
+})
+
+#define first_unset_node(mask) __first_unset_node(&(mask))
+static inline int __first_unset_node(const nodemask_t *maskp)
+{
+ return min_t(int,MAX_NUMNODES,
+ find_first_zero_bit(maskp->bits, MAX_NUMNODES));
+}
+
+#define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES)
+
+#if MAX_NUMNODES <= BITS_PER_LONG
+
+#define NODE_MASK_ALL \
+((nodemask_t) { { \
+ [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \
+} })
+
+#else
+
+#define NODE_MASK_ALL \
+((nodemask_t) { { \
+ [0 ... BITS_TO_LONGS(MAX_NUMNODES)-2] = ~0UL, \
+ [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \
+} })
+
+#endif
+
+#define NODE_MASK_NONE \
+((nodemask_t) { { \
+ [0 ... BITS_TO_LONGS(MAX_NUMNODES)-1] = 0UL \
+} })
+
+#define nodes_addr(src) ((src).bits)
+
+#define nodemask_parse_user(ubuf, ulen, dst) \
+ __nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES)
+static inline int __nodemask_parse_user(const char __user *buf, int len,
+ nodemask_t *dstp, int nbits)
+{
+ return bitmap_parse_user(buf, len, dstp->bits, nbits);
+}
+
+#define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES)
+static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
+{
+ return bitmap_parselist(buf, dstp->bits, nbits);
+}
+
+#define node_remap(oldbit, old, new) \
+ __node_remap((oldbit), &(old), &(new), MAX_NUMNODES)
+static inline int __node_remap(int oldbit,
+ const nodemask_t *oldp, const nodemask_t *newp, int nbits)
+{
+ return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
+}
+
+#define nodes_remap(dst, src, old, new) \
+ __nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES)
+static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
+ const nodemask_t *oldp, const nodemask_t *newp, int nbits)
+{
+ bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
+}
+
+#define nodes_onto(dst, orig, relmap) \
+ __nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES)
+static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
+ const nodemask_t *relmapp, int nbits)
+{
+ bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
+}
+
+#define nodes_fold(dst, orig, sz) \
+ __nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES)
+static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
+ int sz, int nbits)
+{
+ bitmap_fold(dstp->bits, origp->bits, sz, nbits);
+}
+
+#if MAX_NUMNODES > 1
+#define for_each_node_mask(node, mask) \
+ for ((node) = first_node(mask); \
+ (node) < MAX_NUMNODES; \
+ (node) = next_node((node), (mask)))
+#else /* MAX_NUMNODES == 1 */
+#define for_each_node_mask(node, mask) \
+ if (!nodes_empty(mask)) \
+ for ((node) = 0; (node) < 1; (node)++)
+#endif /* MAX_NUMNODES */
+
+/*
+ * Bitmasks that are kept for all the nodes.
+ */
+enum node_states {
+ N_POSSIBLE, /* The node could become online at some point */
+ N_ONLINE, /* The node is online */
+ N_NORMAL_MEMORY, /* The node has regular memory */
+#ifdef CONFIG_HIGHMEM
+ N_HIGH_MEMORY, /* The node has regular or high memory */
+#else
+ N_HIGH_MEMORY = N_NORMAL_MEMORY,
+#endif
+#ifdef CONFIG_MOVABLE_NODE
+ N_MEMORY, /* The node has memory(regular, high, movable) */
+#else
+ N_MEMORY = N_HIGH_MEMORY,
+#endif
+ N_CPU, /* The node has one or more cpus */
+ NR_NODE_STATES
+};
+
+/*
+ * The following particular system nodemasks and operations
+ * on them manage all possible and online nodes.
+ */
+
+extern nodemask_t node_states[NR_NODE_STATES];
+
+#if MAX_NUMNODES > 1
+static inline int node_state(int node, enum node_states state)
+{
+ return node_isset(node, node_states[state]);
+}
+
+static inline void node_set_state(int node, enum node_states state)
+{
+ __node_set(node, &node_states[state]);
+}
+
+static inline void node_clear_state(int node, enum node_states state)
+{
+ __node_clear(node, &node_states[state]);
+}
+
+static inline int num_node_state(enum node_states state)
+{
+ return nodes_weight(node_states[state]);
+}
+
+#define for_each_node_state(__node, __state) \
+ for_each_node_mask((__node), node_states[__state])
+
+#define first_online_node first_node(node_states[N_ONLINE])
+#define first_memory_node first_node(node_states[N_MEMORY])
+static inline int next_online_node(int nid)
+{
+ return next_node(nid, node_states[N_ONLINE]);
+}
+static inline int next_memory_node(int nid)
+{
+ return next_node(nid, node_states[N_MEMORY]);
+}
+
+extern int nr_node_ids;
+extern int nr_online_nodes;
+
+static inline void node_set_online(int nid)
+{
+ node_set_state(nid, N_ONLINE);
+ nr_online_nodes = num_node_state(N_ONLINE);
+}
+
+static inline void node_set_offline(int nid)
+{
+ node_clear_state(nid, N_ONLINE);
+ nr_online_nodes = num_node_state(N_ONLINE);
+}
+
+#else
+
+static inline int node_state(int node, enum node_states state)
+{
+ return node == 0;
+}
+
+static inline void node_set_state(int node, enum node_states state)
+{
+}
+
+static inline void node_clear_state(int node, enum node_states state)
+{
+}
+
+static inline int num_node_state(enum node_states state)
+{
+ return 1;
+}
+
+#define for_each_node_state(node, __state) \
+ for ( (node) = 0; (node) == 0; (node) = 1)
+
+#define first_online_node 0
+#define first_memory_node 0
+#define next_online_node(nid) (MAX_NUMNODES)
+#define nr_node_ids 1
+#define nr_online_nodes 1
+
+#define node_set_online(node) node_set_state((node), N_ONLINE)
+#define node_set_offline(node) node_clear_state((node), N_ONLINE)
+
+#endif
+
+#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
+extern int node_random(const nodemask_t *maskp);
+#else
+static inline int node_random(const nodemask_t *mask)
+{
+ return 0;
+}
+#endif
+
+#define node_online_map node_states[N_ONLINE]
+#define node_possible_map node_states[N_POSSIBLE]
+
+#define num_online_nodes() num_node_state(N_ONLINE)
+#define num_possible_nodes() num_node_state(N_POSSIBLE)
+#define node_online(node) node_state((node), N_ONLINE)
+#define node_possible(node) node_state((node), N_POSSIBLE)
+
+#define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
+#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
+
+/*
+ * For nodemask scrach area.
+ * NODEMASK_ALLOC(type, name) allocates an object with a specified type and
+ * name.
+ */
+#if NODES_SHIFT > 8 /* nodemask_t > 256 bytes */
+#define NODEMASK_ALLOC(type, name, gfp_flags) \
+ type *name = kmalloc(sizeof(*name), gfp_flags)
+#define NODEMASK_FREE(m) kfree(m)
+#else
+#define NODEMASK_ALLOC(type, name, gfp_flags) type _##name, *name = &_##name
+#define NODEMASK_FREE(m) do {} while (0)
+#endif
+
+/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
+struct nodemask_scratch {
+ nodemask_t mask1;
+ nodemask_t mask2;
+};
+
+#define NODEMASK_SCRATCH(x) \
+ NODEMASK_ALLOC(struct nodemask_scratch, x, \
+ GFP_KERNEL | __GFP_NORETRY)
+#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
+
+
+#endif /* __LINUX_NODEMASK_H */
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
new file mode 100644
index 000000000..d14a4c362
--- /dev/null
+++ b/include/linux/notifier.h
@@ -0,0 +1,215 @@
+/*
+ * Routines to manage notifier chains for passing status changes to any
+ * interested routines. We need this instead of hard coded call lists so
+ * that modules can poke their nose into the innards. The network devices
+ * needed them so here they are for the rest of you.
+ *
+ * Alan Cox <Alan.Cox@linux.org>
+ */
+
+#ifndef _LINUX_NOTIFIER_H
+#define _LINUX_NOTIFIER_H
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/srcu.h>
+
+/*
+ * Notifier chains are of four types:
+ *
+ * Atomic notifier chains: Chain callbacks run in interrupt/atomic
+ * context. Callouts are not allowed to block.
+ * Blocking notifier chains: Chain callbacks run in process context.
+ * Callouts are allowed to block.
+ * Raw notifier chains: There are no restrictions on callbacks,
+ * registration, or unregistration. All locking and protection
+ * must be provided by the caller.
+ * SRCU notifier chains: A variant of blocking notifier chains, with
+ * the same restrictions.
+ *
+ * atomic_notifier_chain_register() may be called from an atomic context,
+ * but blocking_notifier_chain_register() and srcu_notifier_chain_register()
+ * must be called from a process context. Ditto for the corresponding
+ * _unregister() routines.
+ *
+ * atomic_notifier_chain_unregister(), blocking_notifier_chain_unregister(),
+ * and srcu_notifier_chain_unregister() _must not_ be called from within
+ * the call chain.
+ *
+ * SRCU notifier chains are an alternative form of blocking notifier chains.
+ * They use SRCU (Sleepable Read-Copy Update) instead of rw-semaphores for
+ * protection of the chain links. This means there is _very_ low overhead
+ * in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
+ * As compensation, srcu_notifier_chain_unregister() is rather expensive.
+ * SRCU notifier chains should be used when the chain will be called very
+ * often but notifier_blocks will seldom be removed. Also, SRCU notifier
+ * chains are slightly more difficult to use because they require special
+ * runtime initialization.
+ */
+
+typedef int (*notifier_fn_t)(struct notifier_block *nb,
+ unsigned long action, void *data);
+
+struct notifier_block {
+ notifier_fn_t notifier_call;
+ struct notifier_block __rcu *next;
+ int priority;
+};
+
+struct atomic_notifier_head {
+ spinlock_t lock;
+ struct notifier_block __rcu *head;
+};
+
+struct blocking_notifier_head {
+ struct rw_semaphore rwsem;
+ struct notifier_block __rcu *head;
+};
+
+struct raw_notifier_head {
+ struct notifier_block __rcu *head;
+};
+
+struct srcu_notifier_head {
+ struct mutex mutex;
+ struct srcu_struct srcu;
+ struct notifier_block __rcu *head;
+};
+
+#define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \
+ spin_lock_init(&(name)->lock); \
+ (name)->head = NULL; \
+ } while (0)
+#define BLOCKING_INIT_NOTIFIER_HEAD(name) do { \
+ init_rwsem(&(name)->rwsem); \
+ (name)->head = NULL; \
+ } while (0)
+#define RAW_INIT_NOTIFIER_HEAD(name) do { \
+ (name)->head = NULL; \
+ } while (0)
+
+/* srcu_notifier_heads must be initialized and cleaned up dynamically */
+extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
+#define srcu_cleanup_notifier_head(name) \
+ cleanup_srcu_struct(&(name)->srcu);
+
+#define ATOMIC_NOTIFIER_INIT(name) { \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
+ .head = NULL }
+#define BLOCKING_NOTIFIER_INIT(name) { \
+ .rwsem = __RWSEM_INITIALIZER((name).rwsem), \
+ .head = NULL }
+#define RAW_NOTIFIER_INIT(name) { \
+ .head = NULL }
+/* srcu_notifier_heads cannot be initialized statically */
+
+#define ATOMIC_NOTIFIER_HEAD(name) \
+ struct atomic_notifier_head name = \
+ ATOMIC_NOTIFIER_INIT(name)
+#define BLOCKING_NOTIFIER_HEAD(name) \
+ struct blocking_notifier_head name = \
+ BLOCKING_NOTIFIER_INIT(name)
+#define RAW_NOTIFIER_HEAD(name) \
+ struct raw_notifier_head name = \
+ RAW_NOTIFIER_INIT(name)
+
+#ifdef __KERNEL__
+
+extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
+ struct notifier_block *nb);
+extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
+ struct notifier_block *nb);
+extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
+ struct notifier_block *nb);
+extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
+ struct notifier_block *nb);
+
+extern int blocking_notifier_chain_cond_register(
+ struct blocking_notifier_head *nh,
+ struct notifier_block *nb);
+
+extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
+ struct notifier_block *nb);
+extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
+ struct notifier_block *nb);
+extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
+ struct notifier_block *nb);
+extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
+ struct notifier_block *nb);
+
+extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
+ unsigned long val, void *v);
+extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
+ unsigned long val, void *v, int nr_to_call, int *nr_calls);
+extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
+ unsigned long val, void *v);
+extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
+ unsigned long val, void *v, int nr_to_call, int *nr_calls);
+extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
+ unsigned long val, void *v);
+extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
+ unsigned long val, void *v, int nr_to_call, int *nr_calls);
+extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
+ unsigned long val, void *v);
+extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
+ unsigned long val, void *v, int nr_to_call, int *nr_calls);
+
+#define NOTIFY_DONE 0x0000 /* Don't care */
+#define NOTIFY_OK 0x0001 /* Suits me */
+#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
+#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
+ /* Bad/Veto action */
+/*
+ * Clean way to return from the notifier and stop further calls.
+ */
+#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
+
+/* Encapsulate (negative) errno value (in particular, NOTIFY_BAD <=> EPERM). */
+static inline int notifier_from_errno(int err)
+{
+ if (err)
+ return NOTIFY_STOP_MASK | (NOTIFY_OK - err);
+
+ return NOTIFY_OK;
+}
+
+/* Restore (negative) errno value from notify return value. */
+static inline int notifier_to_errno(int ret)
+{
+ ret &= ~NOTIFY_STOP_MASK;
+ return ret > NOTIFY_OK ? NOTIFY_OK - ret : 0;
+}
+
+/*
+ * Declared notifiers so far. I can imagine quite a few more chains
+ * over time (eg laptop power reset chains, reboot chain (to clean
+ * device units up), device [un]mount chain, module load/unload chain,
+ * low memory chain, screenblank chain (for plug in modular screenblankers)
+ * VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
+ */
+
+/* CPU notfiers are defined in include/linux/cpu.h. */
+
+/* netdevice notifiers are defined in include/linux/netdevice.h */
+
+/* reboot notifiers are defined in include/linux/reboot.h. */
+
+/* Hibernation and suspend events are defined in include/linux/suspend.h. */
+
+/* Virtual Terminal events are defined in include/linux/vt.h. */
+
+#define NETLINK_URELEASE 0x0001 /* Unicast netlink socket released */
+
+/* Console keyboard events.
+ * Note: KBD_KEYCODE is always sent before KBD_UNBOUND_KEYCODE, KBD_UNICODE and
+ * KBD_KEYSYM. */
+#define KBD_KEYCODE 0x0001 /* Keyboard keycode, called before any other */
+#define KBD_UNBOUND_KEYCODE 0x0002 /* Keyboard keycode which is not bound to any other */
+#define KBD_UNICODE 0x0003 /* Keyboard unicode */
+#define KBD_KEYSYM 0x0004 /* Keyboard keysym */
+#define KBD_POST_KEYSYM 0x0005 /* Called after keyboard keysym interpretation */
+
+extern struct blocking_notifier_head reboot_notifier_list;
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_NOTIFIER_H */
diff --git a/include/linux/ns_common.h b/include/linux/ns_common.h
new file mode 100644
index 000000000..85a5c8c16
--- /dev/null
+++ b/include/linux/ns_common.h
@@ -0,0 +1,12 @@
+#ifndef _LINUX_NS_COMMON_H
+#define _LINUX_NS_COMMON_H
+
+struct proc_ns_operations;
+
+struct ns_common {
+ atomic_long_t stashed;
+ const struct proc_ns_operations *ops;
+ unsigned int inum;
+};
+
+#endif
diff --git a/include/linux/nsc_gpio.h b/include/linux/nsc_gpio.h
new file mode 100644
index 000000000..7da0cf370
--- /dev/null
+++ b/include/linux/nsc_gpio.h
@@ -0,0 +1,40 @@
+/**
+ nsc_gpio.c
+
+ National Semiconductor GPIO common access methods.
+
+ struct nsc_gpio_ops abstracts the low-level access
+ operations for the GPIO units on 2 NSC chip families; the GEODE
+ integrated CPU, and the PC-8736[03456] integrated PC-peripheral
+ chips.
+
+ The GPIO units on these chips have the same pin architecture, but
+ the access methods differ. Thus, scx200_gpio and pc8736x_gpio
+ implement their own versions of these routines; and use the common
+ file-operations routines implemented in nsc_gpio module.
+
+ Copyright (c) 2005 Jim Cromie <jim.cromie@gmail.com>
+
+ NB: this work was tested on the Geode SC-1100 and PC-87366 chips.
+ NSC sold the GEODE line to AMD, and the PC-8736x line to Winbond.
+*/
+
+struct nsc_gpio_ops {
+ struct module* owner;
+ u32 (*gpio_config) (unsigned iminor, u32 mask, u32 bits);
+ void (*gpio_dump) (struct nsc_gpio_ops *amp, unsigned iminor);
+ int (*gpio_get) (unsigned iminor);
+ void (*gpio_set) (unsigned iminor, int state);
+ void (*gpio_change) (unsigned iminor);
+ int (*gpio_current) (unsigned iminor);
+ struct device* dev; /* for dev_dbg() support, set in init */
+};
+
+extern ssize_t nsc_gpio_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos);
+
+extern ssize_t nsc_gpio_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos);
+
+extern void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index);
+
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
new file mode 100644
index 000000000..35fa08fd7
--- /dev/null
+++ b/include/linux/nsproxy.h
@@ -0,0 +1,85 @@
+#ifndef _LINUX_NSPROXY_H
+#define _LINUX_NSPROXY_H
+
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+
+struct mnt_namespace;
+struct uts_namespace;
+struct ipc_namespace;
+struct pid_namespace;
+struct fs_struct;
+
+/*
+ * A structure to contain pointers to all per-process
+ * namespaces - fs (mount), uts, network, sysvipc, etc.
+ *
+ * The pid namespace is an exception -- it's accessed using
+ * task_active_pid_ns. The pid namespace here is the
+ * namespace that children will use.
+ *
+ * 'count' is the number of tasks holding a reference.
+ * The count for each namespace, then, will be the number
+ * of nsproxies pointing to it, not the number of tasks.
+ *
+ * The nsproxy is shared by tasks which share all namespaces.
+ * As soon as a single namespace is cloned or unshared, the
+ * nsproxy is copied.
+ */
+struct nsproxy {
+ atomic_t count;
+ struct uts_namespace *uts_ns;
+ struct ipc_namespace *ipc_ns;
+ struct mnt_namespace *mnt_ns;
+ struct pid_namespace *pid_ns_for_children;
+ struct net *net_ns;
+};
+extern struct nsproxy init_nsproxy;
+
+/*
+ * the namespaces access rules are:
+ *
+ * 1. only current task is allowed to change tsk->nsproxy pointer or
+ * any pointer on the nsproxy itself. Current must hold the task_lock
+ * when changing tsk->nsproxy.
+ *
+ * 2. when accessing (i.e. reading) current task's namespaces - no
+ * precautions should be taken - just dereference the pointers
+ *
+ * 3. the access to other task namespaces is performed like this
+ * task_lock(task);
+ * nsproxy = task->nsproxy;
+ * if (nsproxy != NULL) {
+ * / *
+ * * work with the namespaces here
+ * * e.g. get the reference on one of them
+ * * /
+ * } / *
+ * * NULL task->nsproxy means that this task is
+ * * almost dead (zombie)
+ * * /
+ * task_unlock(task);
+ *
+ */
+
+int copy_namespaces(unsigned long flags, struct task_struct *tsk);
+void exit_task_namespaces(struct task_struct *tsk);
+void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new);
+void free_nsproxy(struct nsproxy *ns);
+int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **,
+ struct cred *, struct fs_struct *);
+int __init nsproxy_cache_init(void);
+
+static inline void put_nsproxy(struct nsproxy *ns)
+{
+ if (atomic_dec_and_test(&ns->count)) {
+ free_nsproxy(ns);
+ }
+}
+
+static inline void get_nsproxy(struct nsproxy *ns)
+{
+ atomic_inc(&ns->count);
+}
+
+#endif
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
new file mode 100644
index 000000000..9ac1a62fc
--- /dev/null
+++ b/include/linux/ntb.h
@@ -0,0 +1,88 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copy
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+struct ntb_transport_qp;
+
+struct ntb_client {
+ struct device_driver driver;
+ int (*probe)(struct pci_dev *pdev);
+ void (*remove)(struct pci_dev *pdev);
+};
+
+enum {
+ NTB_LINK_DOWN = 0,
+ NTB_LINK_UP,
+};
+
+int ntb_register_client(struct ntb_client *drvr);
+void ntb_unregister_client(struct ntb_client *drvr);
+int ntb_register_client_dev(char *device_name);
+void ntb_unregister_client_dev(char *device_name);
+
+struct ntb_queue_handlers {
+ void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len);
+ void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+ void *data, int len);
+ void (*event_handler)(void *data, int status);
+};
+
+unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
+unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp);
+struct ntb_transport_qp *
+ntb_transport_create_queue(void *data, struct pci_dev *pdev,
+ const struct ntb_queue_handlers *handlers);
+void ntb_transport_free_queue(struct ntb_transport_qp *qp);
+int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+ unsigned int len);
+int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+ unsigned int len);
+void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len);
+void ntb_transport_link_up(struct ntb_transport_qp *qp);
+void ntb_transport_link_down(struct ntb_transport_qp *qp);
+bool ntb_transport_link_query(struct ntb_transport_qp *qp);
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
new file mode 100644
index 000000000..6165b2c62
--- /dev/null
+++ b/include/linux/nubus.h
@@ -0,0 +1,134 @@
+/*
+ nubus.h: various definitions and prototypes for NuBus drivers to use.
+
+ Originally written by Alan Cox.
+
+ Hacked to death by C. Scott Ananian and David Huggins-Daines.
+
+ Some of the constants in here are from the corresponding
+ NetBSD/OpenBSD header file, by Allen Briggs. We figured out the
+ rest of them on our own. */
+#ifndef LINUX_NUBUS_H
+#define LINUX_NUBUS_H
+
+#include <asm/nubus.h>
+#include <uapi/linux/nubus.h>
+
+struct nubus_board {
+ struct nubus_board* next;
+ struct nubus_dev* first_dev;
+
+ /* Only 9-E actually exist, though 0-8 are also theoretically
+ possible, and 0 is a special case which represents the
+ motherboard and onboard peripherals (Ethernet, video) */
+ int slot;
+ /* For slot 0, this is bogus. */
+ char name[64];
+
+ /* Format block */
+ unsigned char* fblock;
+ /* Root directory (does *not* always equal fblock + doffset!) */
+ unsigned char* directory;
+
+ unsigned long slot_addr;
+ /* Offset to root directory (sometimes) */
+ unsigned long doffset;
+ /* Length over which to compute the crc */
+ unsigned long rom_length;
+ /* Completely useless most of the time */
+ unsigned long crc;
+ unsigned char rev;
+ unsigned char format;
+ unsigned char lanes;
+};
+
+struct nubus_dev {
+ /* Next link in device list */
+ struct nubus_dev* next;
+ /* Directory entry in /proc/bus/nubus */
+ struct proc_dir_entry* procdir;
+
+ /* The functional resource ID of this device */
+ unsigned char resid;
+ /* These are mostly here for convenience; we could always read
+ them from the ROMs if we wanted to */
+ unsigned short category;
+ unsigned short type;
+ unsigned short dr_sw;
+ unsigned short dr_hw;
+ /* This is the device's name rather than the board's.
+ Sometimes they are different. Usually the board name is
+ more correct. */
+ char name[64];
+ /* MacOS driver (I kid you not) */
+ unsigned char* driver;
+ /* Actually this is an offset */
+ unsigned long iobase;
+ unsigned long iosize;
+ unsigned char flags, hwdevid;
+
+ /* Functional directory */
+ unsigned char* directory;
+ /* Much of our info comes from here */
+ struct nubus_board* board;
+};
+
+/* This is all NuBus devices (used to find devices later on) */
+extern struct nubus_dev* nubus_devices;
+/* This is all NuBus cards */
+extern struct nubus_board* nubus_boards;
+
+/* Generic NuBus interface functions, modelled after the PCI interface */
+void nubus_scan_bus(void);
+#ifdef CONFIG_PROC_FS
+extern void nubus_proc_init(void);
+#else
+static inline void nubus_proc_init(void) {}
+#endif
+int get_nubus_list(char *buf);
+int nubus_proc_attach_device(struct nubus_dev *dev);
+/* If we need more precision we can add some more of these */
+struct nubus_dev* nubus_find_device(unsigned short category,
+ unsigned short type,
+ unsigned short dr_hw,
+ unsigned short dr_sw,
+ const struct nubus_dev* from);
+struct nubus_dev* nubus_find_type(unsigned short category,
+ unsigned short type,
+ const struct nubus_dev* from);
+/* Might have more than one device in a slot, you know... */
+struct nubus_dev* nubus_find_slot(unsigned int slot,
+ const struct nubus_dev* from);
+
+/* These are somewhat more NuBus-specific. They all return 0 for
+ success and -1 for failure, as you'd expect. */
+
+/* The root directory which contains the board and functional
+ directories */
+int nubus_get_root_dir(const struct nubus_board* board,
+ struct nubus_dir* dir);
+/* The board directory */
+int nubus_get_board_dir(const struct nubus_board* board,
+ struct nubus_dir* dir);
+/* The functional directory */
+int nubus_get_func_dir(const struct nubus_dev* dev,
+ struct nubus_dir* dir);
+
+/* These work on any directory gotten via the above */
+int nubus_readdir(struct nubus_dir* dir,
+ struct nubus_dirent* ent);
+int nubus_find_rsrc(struct nubus_dir* dir,
+ unsigned char rsrc_type,
+ struct nubus_dirent* ent);
+int nubus_rewinddir(struct nubus_dir* dir);
+
+/* Things to do with directory entries */
+int nubus_get_subdir(const struct nubus_dirent* ent,
+ struct nubus_dir* dir);
+void nubus_get_rsrc_mem(void* dest,
+ const struct nubus_dirent *dirent,
+ int len);
+void nubus_get_rsrc_str(void* dest,
+ const struct nubus_dirent *dirent,
+ int maxlen);
+#endif /* LINUX_NUBUS_H */
diff --git a/include/linux/numa.h b/include/linux/numa.h
new file mode 100644
index 000000000..3aaa31603
--- /dev/null
+++ b/include/linux/numa.h
@@ -0,0 +1,15 @@
+#ifndef _LINUX_NUMA_H
+#define _LINUX_NUMA_H
+
+
+#ifdef CONFIG_NODES_SHIFT
+#define NODES_SHIFT CONFIG_NODES_SHIFT
+#else
+#define NODES_SHIFT 0
+#endif
+
+#define MAX_NUMNODES (1 << NODES_SHIFT)
+
+#define NUMA_NO_NODE (-1)
+
+#endif /* _LINUX_NUMA_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
new file mode 100644
index 000000000..8dbd05e70
--- /dev/null
+++ b/include/linux/nvme.h
@@ -0,0 +1,179 @@
+/*
+ * Definitions for the NVM Express interface
+ * Copyright (c) 2011-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _LINUX_NVME_H
+#define _LINUX_NVME_H
+
+#include <uapi/linux/nvme.h>
+#include <linux/pci.h>
+#include <linux/kref.h>
+#include <linux/blk-mq.h>
+
+struct nvme_bar {
+ __u64 cap; /* Controller Capabilities */
+ __u32 vs; /* Version */
+ __u32 intms; /* Interrupt Mask Set */
+ __u32 intmc; /* Interrupt Mask Clear */
+ __u32 cc; /* Controller Configuration */
+ __u32 rsvd1; /* Reserved */
+ __u32 csts; /* Controller Status */
+ __u32 rsvd2; /* Reserved */
+ __u32 aqa; /* Admin Queue Attributes */
+ __u64 asq; /* Admin SQ Base Address */
+ __u64 acq; /* Admin CQ Base Address */
+};
+
+#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
+#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
+#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
+#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
+#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+
+enum {
+ NVME_CC_ENABLE = 1 << 0,
+ NVME_CC_CSS_NVM = 0 << 4,
+ NVME_CC_MPS_SHIFT = 7,
+ NVME_CC_ARB_RR = 0 << 11,
+ NVME_CC_ARB_WRRU = 1 << 11,
+ NVME_CC_ARB_VS = 7 << 11,
+ NVME_CC_SHN_NONE = 0 << 14,
+ NVME_CC_SHN_NORMAL = 1 << 14,
+ NVME_CC_SHN_ABRUPT = 2 << 14,
+ NVME_CC_SHN_MASK = 3 << 14,
+ NVME_CC_IOSQES = 6 << 16,
+ NVME_CC_IOCQES = 4 << 20,
+ NVME_CSTS_RDY = 1 << 0,
+ NVME_CSTS_CFS = 1 << 1,
+ NVME_CSTS_SHST_NORMAL = 0 << 2,
+ NVME_CSTS_SHST_OCCUR = 1 << 2,
+ NVME_CSTS_SHST_CMPLT = 2 << 2,
+ NVME_CSTS_SHST_MASK = 3 << 2,
+};
+
+extern unsigned char nvme_io_timeout;
+#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
+
+/*
+ * Represents an NVM Express device. Each nvme_dev is a PCI function.
+ */
+struct nvme_dev {
+ struct list_head node;
+ struct nvme_queue **queues;
+ struct request_queue *admin_q;
+ struct blk_mq_tag_set tagset;
+ struct blk_mq_tag_set admin_tagset;
+ u32 __iomem *dbs;
+ struct pci_dev *pci_dev;
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_small_pool;
+ int instance;
+ unsigned queue_count;
+ unsigned online_queues;
+ unsigned max_qid;
+ int q_depth;
+ u32 db_stride;
+ u32 ctrl_config;
+ struct msix_entry *entry;
+ struct nvme_bar __iomem *bar;
+ struct list_head namespaces;
+ struct kref kref;
+ struct device *device;
+ work_func_t reset_workfn;
+ struct work_struct reset_work;
+ struct work_struct probe_work;
+ char name[12];
+ char serial[20];
+ char model[40];
+ char firmware_rev[8];
+ u32 max_hw_sectors;
+ u32 stripe_size;
+ u32 page_size;
+ u16 oncs;
+ u16 abort_limit;
+ u8 event_limit;
+ u8 vwc;
+};
+
+/*
+ * An NVM Express namespace is equivalent to a SCSI LUN
+ */
+struct nvme_ns {
+ struct list_head list;
+
+ struct nvme_dev *dev;
+ struct request_queue *queue;
+ struct gendisk *disk;
+
+ unsigned ns_id;
+ int lba_shift;
+ u16 ms;
+ bool ext;
+ u8 pi_type;
+ u64 mode_select_num_blocks;
+ u32 mode_select_block_len;
+};
+
+/*
+ * The nvme_iod describes the data in an I/O, including the list of PRP
+ * entries. You can't see it in this data structure because C doesn't let
+ * me express that. Use nvme_alloc_iod to ensure there's enough space
+ * allocated to store the PRP list.
+ */
+struct nvme_iod {
+ unsigned long private; /* For the use of the submitter of the I/O */
+ int npages; /* In the PRP list. 0 means small pool in use */
+ int offset; /* Of PRP list */
+ int nents; /* Used in scatterlist */
+ int length; /* Of data, in bytes */
+ dma_addr_t first_dma;
+ struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */
+ struct scatterlist sg[0];
+};
+
+static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
+{
+ return (sector >> (ns->lba_shift - 9));
+}
+
+/**
+ * nvme_free_iod - frees an nvme_iod
+ * @dev: The device that the I/O was submitted to
+ * @iod: The memory to free
+ */
+void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
+
+int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int, gfp_t);
+struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
+ unsigned long addr, unsigned length);
+void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
+ struct nvme_iod *iod);
+int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_ns *,
+ struct nvme_command *, u32 *);
+int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
+int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
+ u32 *result);
+int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
+ dma_addr_t dma_addr);
+int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
+ dma_addr_t dma_addr, u32 *result);
+int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
+ dma_addr_t dma_addr, u32 *result);
+
+struct sg_io_hdr;
+
+int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
+int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
+int nvme_sg_get_version_num(int __user *ip);
+
+#endif /* _LINUX_NVME_H */
diff --git a/include/linux/nvram.h b/include/linux/nvram.h
new file mode 100644
index 000000000..cf0ff555a
--- /dev/null
+++ b/include/linux/nvram.h
@@ -0,0 +1,13 @@
+#ifndef _LINUX_NVRAM_H
+#define _LINUX_NVRAM_H
+
+#include <uapi/linux/nvram.h>
+
+/* __foo is foo without grabbing the rtc_lock - get it yourself */
+extern unsigned char __nvram_read_byte(int i);
+extern unsigned char nvram_read_byte(int i);
+extern void __nvram_write_byte(unsigned char c, int i);
+extern void nvram_write_byte(unsigned char c, int i);
+extern int __nvram_check_checksum(void);
+extern int nvram_check_checksum(void);
+#endif /* _LINUX_NVRAM_H */
diff --git a/include/linux/nwpserial.h b/include/linux/nwpserial.h
new file mode 100644
index 000000000..9acb21572
--- /dev/null
+++ b/include/linux/nwpserial.h
@@ -0,0 +1,18 @@
+/*
+ * Serial Port driver for a NWP uart device
+ *
+ * Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#ifndef _NWPSERIAL_H
+#define _NWPSERIAL_H
+
+int nwpserial_register_port(struct uart_port *port);
+void nwpserial_unregister_port(int line);
+
+#endif /* _NWPSERIAL_H */
diff --git a/include/linux/nx842.h b/include/linux/nx842.h
new file mode 100644
index 000000000..a4d324c64
--- /dev/null
+++ b/include/linux/nx842.h
@@ -0,0 +1,11 @@
+#ifndef __NX842_H__
+#define __NX842_H__
+
+int nx842_get_workmem_size(void);
+int nx842_get_workmem_size_aligned(void);
+int nx842_compress(const unsigned char *in, unsigned int in_len,
+ unsigned char *out, unsigned int *out_len, void *wrkmem);
+int nx842_decompress(const unsigned char *in, unsigned int in_len,
+ unsigned char *out, unsigned int *out_len, void *wrkmem);
+
+#endif
diff --git a/include/linux/of.h b/include/linux/of.h
new file mode 100644
index 000000000..8135d507d
--- /dev/null
+++ b/include/linux/of.h
@@ -0,0 +1,1099 @@
+#ifndef _LINUX_OF_H
+#define _LINUX_OF_H
+/*
+ * Definitions for talking to the Open Firmware PROM on
+ * Power Macintosh and other computers.
+ *
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
+ * Updates for SPARC64 by David S. Miller
+ * Derived from PowerPC and Sparc prom.h files by Stephen Rothwell, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/kobject.h>
+#include <linux/mod_devicetable.h>
+#include <linux/spinlock.h>
+#include <linux/topology.h>
+#include <linux/notifier.h>
+#include <linux/property.h>
+#include <linux/list.h>
+
+#include <asm/byteorder.h>
+#include <asm/errno.h>
+
+typedef u32 phandle;
+typedef u32 ihandle;
+
+struct property {
+ char *name;
+ int length;
+ void *value;
+ struct property *next;
+ unsigned long _flags;
+ unsigned int unique_id;
+ struct bin_attribute attr;
+};
+
+#if defined(CONFIG_SPARC)
+struct of_irq_controller;
+#endif
+
+struct device_node {
+ const char *name;
+ const char *type;
+ phandle phandle;
+ const char *full_name;
+ struct fwnode_handle fwnode;
+
+ struct property *properties;
+ struct property *deadprops; /* removed properties */
+ struct device_node *parent;
+ struct device_node *child;
+ struct device_node *sibling;
+ struct kobject kobj;
+ unsigned long _flags;
+ void *data;
+#if defined(CONFIG_SPARC)
+ const char *path_component_name;
+ unsigned int unique_id;
+ struct of_irq_controller *irq_trans;
+#endif
+};
+
+#define MAX_PHANDLE_ARGS 16
+struct of_phandle_args {
+ struct device_node *np;
+ int args_count;
+ uint32_t args[MAX_PHANDLE_ARGS];
+};
+
+struct of_reconfig_data {
+ struct device_node *dn;
+ struct property *prop;
+ struct property *old_prop;
+};
+
+/* initialize a node */
+extern struct kobj_type of_node_ktype;
+static inline void of_node_init(struct device_node *node)
+{
+ kobject_init(&node->kobj, &of_node_ktype);
+ node->fwnode.type = FWNODE_OF;
+}
+
+/* true when node is initialized */
+static inline int of_node_is_initialized(struct device_node *node)
+{
+ return node && node->kobj.state_initialized;
+}
+
+/* true when node is attached (i.e. present on sysfs) */
+static inline int of_node_is_attached(struct device_node *node)
+{
+ return node && node->kobj.state_in_sysfs;
+}
+
+#ifdef CONFIG_OF_DYNAMIC
+extern struct device_node *of_node_get(struct device_node *node);
+extern void of_node_put(struct device_node *node);
+#else /* CONFIG_OF_DYNAMIC */
+/* Dummy ref counting routines - to be implemented later */
+static inline struct device_node *of_node_get(struct device_node *node)
+{
+ return node;
+}
+static inline void of_node_put(struct device_node *node) { }
+#endif /* !CONFIG_OF_DYNAMIC */
+
+/* Pointer for first entry in chain of all nodes. */
+extern struct device_node *of_root;
+extern struct device_node *of_chosen;
+extern struct device_node *of_aliases;
+extern struct device_node *of_stdout;
+extern raw_spinlock_t devtree_lock;
+
+#ifdef CONFIG_OF
+void of_core_init(void);
+
+static inline bool is_of_node(struct fwnode_handle *fwnode)
+{
+ return fwnode && fwnode->type == FWNODE_OF;
+}
+
+static inline struct device_node *of_node(struct fwnode_handle *fwnode)
+{
+ return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL;
+}
+
+static inline bool of_have_populated_dt(void)
+{
+ return of_root != NULL;
+}
+
+static inline bool of_node_is_root(const struct device_node *node)
+{
+ return node && (node->parent == NULL);
+}
+
+static inline int of_node_check_flag(struct device_node *n, unsigned long flag)
+{
+ return test_bit(flag, &n->_flags);
+}
+
+static inline int of_node_test_and_set_flag(struct device_node *n,
+ unsigned long flag)
+{
+ return test_and_set_bit(flag, &n->_flags);
+}
+
+static inline void of_node_set_flag(struct device_node *n, unsigned long flag)
+{
+ set_bit(flag, &n->_flags);
+}
+
+static inline void of_node_clear_flag(struct device_node *n, unsigned long flag)
+{
+ clear_bit(flag, &n->_flags);
+}
+
+static inline int of_property_check_flag(struct property *p, unsigned long flag)
+{
+ return test_bit(flag, &p->_flags);
+}
+
+static inline void of_property_set_flag(struct property *p, unsigned long flag)
+{
+ set_bit(flag, &p->_flags);
+}
+
+static inline void of_property_clear_flag(struct property *p, unsigned long flag)
+{
+ clear_bit(flag, &p->_flags);
+}
+
+extern struct device_node *__of_find_all_nodes(struct device_node *prev);
+extern struct device_node *of_find_all_nodes(struct device_node *prev);
+
+/*
+ * OF address retrieval & translation
+ */
+
+/* Helper to read a big number; size is in cells (not bytes) */
+static inline u64 of_read_number(const __be32 *cell, int size)
+{
+ u64 r = 0;
+ while (size--)
+ r = (r << 32) | be32_to_cpu(*(cell++));
+ return r;
+}
+
+/* Like of_read_number, but we want an unsigned long result */
+static inline unsigned long of_read_ulong(const __be32 *cell, int size)
+{
+ /* toss away upper bits if unsigned long is smaller than u64 */
+ return of_read_number(cell, size);
+}
+
+#if defined(CONFIG_SPARC)
+#include <asm/prom.h>
+#endif
+
+/* Default #address and #size cells. Allow arch asm/prom.h to override */
+#if !defined(OF_ROOT_NODE_ADDR_CELLS_DEFAULT)
+#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1
+#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
+#endif
+
+/* Default string compare functions, Allow arch asm/prom.h to override */
+#if !defined(of_compat_cmp)
+#define of_compat_cmp(s1, s2, l) strcasecmp((s1), (s2))
+#define of_prop_cmp(s1, s2) strcmp((s1), (s2))
+#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
+#endif
+
+/* flag descriptions */
+#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */
+#define OF_DETACHED 2 /* node has been detached from the device tree */
+#define OF_POPULATED 3 /* device already created for the node */
+#define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */
+
+#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
+#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
+
+#define OF_BAD_ADDR ((u64)-1)
+
+static inline const char *of_node_full_name(const struct device_node *np)
+{
+ return np ? np->full_name : "<no-node>";
+}
+
+#define for_each_of_allnodes_from(from, dn) \
+ for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn))
+#define for_each_of_allnodes(dn) for_each_of_allnodes_from(NULL, dn)
+extern struct device_node *of_find_node_by_name(struct device_node *from,
+ const char *name);
+extern struct device_node *of_find_node_by_type(struct device_node *from,
+ const char *type);
+extern struct device_node *of_find_compatible_node(struct device_node *from,
+ const char *type, const char *compat);
+extern struct device_node *of_find_matching_node_and_match(
+ struct device_node *from,
+ const struct of_device_id *matches,
+ const struct of_device_id **match);
+
+extern struct device_node *of_find_node_opts_by_path(const char *path,
+ const char **opts);
+static inline struct device_node *of_find_node_by_path(const char *path)
+{
+ return of_find_node_opts_by_path(path, NULL);
+}
+
+extern struct device_node *of_find_node_by_phandle(phandle handle);
+extern struct device_node *of_get_parent(const struct device_node *node);
+extern struct device_node *of_get_next_parent(struct device_node *node);
+extern struct device_node *of_get_next_child(const struct device_node *node,
+ struct device_node *prev);
+extern struct device_node *of_get_next_available_child(
+ const struct device_node *node, struct device_node *prev);
+
+extern struct device_node *of_get_child_by_name(const struct device_node *node,
+ const char *name);
+
+/* cache lookup */
+extern struct device_node *of_find_next_cache_node(const struct device_node *);
+extern struct device_node *of_find_node_with_property(
+ struct device_node *from, const char *prop_name);
+
+extern struct property *of_find_property(const struct device_node *np,
+ const char *name,
+ int *lenp);
+extern int of_property_count_elems_of_size(const struct device_node *np,
+ const char *propname, int elem_size);
+extern int of_property_read_u32_index(const struct device_node *np,
+ const char *propname,
+ u32 index, u32 *out_value);
+extern int of_property_read_u8_array(const struct device_node *np,
+ const char *propname, u8 *out_values, size_t sz);
+extern int of_property_read_u16_array(const struct device_node *np,
+ const char *propname, u16 *out_values, size_t sz);
+extern int of_property_read_u32_array(const struct device_node *np,
+ const char *propname,
+ u32 *out_values,
+ size_t sz);
+extern int of_property_read_u64(const struct device_node *np,
+ const char *propname, u64 *out_value);
+extern int of_property_read_u64_array(const struct device_node *np,
+ const char *propname,
+ u64 *out_values,
+ size_t sz);
+
+extern int of_property_read_string(struct device_node *np,
+ const char *propname,
+ const char **out_string);
+extern int of_property_match_string(struct device_node *np,
+ const char *propname,
+ const char *string);
+extern int of_property_read_string_helper(struct device_node *np,
+ const char *propname,
+ const char **out_strs, size_t sz, int index);
+extern int of_device_is_compatible(const struct device_node *device,
+ const char *);
+extern bool of_device_is_available(const struct device_node *device);
+extern bool of_device_is_big_endian(const struct device_node *device);
+extern const void *of_get_property(const struct device_node *node,
+ const char *name,
+ int *lenp);
+extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
+#define for_each_property_of_node(dn, pp) \
+ for (pp = dn->properties; pp != NULL; pp = pp->next)
+
+extern int of_n_addr_cells(struct device_node *np);
+extern int of_n_size_cells(struct device_node *np);
+extern const struct of_device_id *of_match_node(
+ const struct of_device_id *matches, const struct device_node *node);
+extern int of_modalias_node(struct device_node *node, char *modalias, int len);
+extern void of_print_phandle_args(const char *msg, const struct of_phandle_args *args);
+extern struct device_node *of_parse_phandle(const struct device_node *np,
+ const char *phandle_name,
+ int index);
+extern int of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name, const char *cells_name, int index,
+ struct of_phandle_args *out_args);
+extern int of_parse_phandle_with_fixed_args(const struct device_node *np,
+ const char *list_name, int cells_count, int index,
+ struct of_phandle_args *out_args);
+extern int of_count_phandle_with_args(const struct device_node *np,
+ const char *list_name, const char *cells_name);
+
+extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align));
+extern int of_alias_get_id(struct device_node *np, const char *stem);
+extern int of_alias_get_highest_id(const char *stem);
+
+extern int of_machine_is_compatible(const char *compat);
+
+extern int of_add_property(struct device_node *np, struct property *prop);
+extern int of_remove_property(struct device_node *np, struct property *prop);
+extern int of_update_property(struct device_node *np, struct property *newprop);
+
+/* For updating the device tree at runtime */
+#define OF_RECONFIG_ATTACH_NODE 0x0001
+#define OF_RECONFIG_DETACH_NODE 0x0002
+#define OF_RECONFIG_ADD_PROPERTY 0x0003
+#define OF_RECONFIG_REMOVE_PROPERTY 0x0004
+#define OF_RECONFIG_UPDATE_PROPERTY 0x0005
+
+extern int of_attach_node(struct device_node *);
+extern int of_detach_node(struct device_node *);
+
+#define of_match_ptr(_ptr) (_ptr)
+
+/*
+ * struct property *prop;
+ * const __be32 *p;
+ * u32 u;
+ *
+ * of_property_for_each_u32(np, "propname", prop, p, u)
+ * printk("U32 value: %x\n", u);
+ */
+const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
+ u32 *pu);
+/*
+ * struct property *prop;
+ * const char *s;
+ *
+ * of_property_for_each_string(np, "propname", prop, s)
+ * printk("String value: %s\n", s);
+ */
+const char *of_prop_next_string(struct property *prop, const char *cur);
+
+bool of_console_check(struct device_node *dn, char *name, int index);
+
+#else /* CONFIG_OF */
+
+static inline void of_core_init(void)
+{
+}
+
+static inline bool is_of_node(struct fwnode_handle *fwnode)
+{
+ return false;
+}
+
+static inline struct device_node *of_node(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline const char* of_node_full_name(const struct device_node *np)
+{
+ return "<no-node>";
+}
+
+static inline struct device_node *of_find_node_by_name(struct device_node *from,
+ const char *name)
+{
+ return NULL;
+}
+
+static inline struct device_node *of_find_node_by_type(struct device_node *from,
+ const char *type)
+{
+ return NULL;
+}
+
+static inline struct device_node *of_find_matching_node_and_match(
+ struct device_node *from,
+ const struct of_device_id *matches,
+ const struct of_device_id **match)
+{
+ return NULL;
+}
+
+static inline struct device_node *of_find_node_by_path(const char *path)
+{
+ return NULL;
+}
+
+static inline struct device_node *of_find_node_opts_by_path(const char *path,
+ const char **opts)
+{
+ return NULL;
+}
+
+static inline struct device_node *of_get_parent(const struct device_node *node)
+{
+ return NULL;
+}
+
+static inline struct device_node *of_get_next_child(
+ const struct device_node *node, struct device_node *prev)
+{
+ return NULL;
+}
+
+static inline struct device_node *of_get_next_available_child(
+ const struct device_node *node, struct device_node *prev)
+{
+ return NULL;
+}
+
+static inline struct device_node *of_find_node_with_property(
+ struct device_node *from, const char *prop_name)
+{
+ return NULL;
+}
+
+static inline bool of_have_populated_dt(void)
+{
+ return false;
+}
+
+static inline struct device_node *of_get_child_by_name(
+ const struct device_node *node,
+ const char *name)
+{
+ return NULL;
+}
+
+static inline int of_device_is_compatible(const struct device_node *device,
+ const char *name)
+{
+ return 0;
+}
+
+static inline bool of_device_is_available(const struct device_node *device)
+{
+ return false;
+}
+
+static inline bool of_device_is_big_endian(const struct device_node *device)
+{
+ return false;
+}
+
+static inline struct property *of_find_property(const struct device_node *np,
+ const char *name,
+ int *lenp)
+{
+ return NULL;
+}
+
+static inline struct device_node *of_find_compatible_node(
+ struct device_node *from,
+ const char *type,
+ const char *compat)
+{
+ return NULL;
+}
+
+static inline int of_property_count_elems_of_size(const struct device_node *np,
+ const char *propname, int elem_size)
+{
+ return -ENOSYS;
+}
+
+static inline int of_property_read_u32_index(const struct device_node *np,
+ const char *propname, u32 index, u32 *out_value)
+{
+ return -ENOSYS;
+}
+
+static inline int of_property_read_u8_array(const struct device_node *np,
+ const char *propname, u8 *out_values, size_t sz)
+{
+ return -ENOSYS;
+}
+
+static inline int of_property_read_u16_array(const struct device_node *np,
+ const char *propname, u16 *out_values, size_t sz)
+{
+ return -ENOSYS;
+}
+
+static inline int of_property_read_u32_array(const struct device_node *np,
+ const char *propname,
+ u32 *out_values, size_t sz)
+{
+ return -ENOSYS;
+}
+
+static inline int of_property_read_u64_array(const struct device_node *np,
+ const char *propname,
+ u64 *out_values, size_t sz)
+{
+ return -ENOSYS;
+}
+
+static inline int of_property_read_string(struct device_node *np,
+ const char *propname,
+ const char **out_string)
+{
+ return -ENOSYS;
+}
+
+static inline int of_property_read_string_helper(struct device_node *np,
+ const char *propname,
+ const char **out_strs, size_t sz, int index)
+{
+ return -ENOSYS;
+}
+
+static inline const void *of_get_property(const struct device_node *node,
+ const char *name,
+ int *lenp)
+{
+ return NULL;
+}
+
+static inline struct device_node *of_get_cpu_node(int cpu,
+ unsigned int *thread)
+{
+ return NULL;
+}
+
+static inline int of_property_read_u64(const struct device_node *np,
+ const char *propname, u64 *out_value)
+{
+ return -ENOSYS;
+}
+
+static inline int of_property_match_string(struct device_node *np,
+ const char *propname,
+ const char *string)
+{
+ return -ENOSYS;
+}
+
+static inline struct device_node *of_parse_phandle(const struct device_node *np,
+ const char *phandle_name,
+ int index)
+{
+ return NULL;
+}
+
+static inline int of_parse_phandle_with_args(struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int index,
+ struct of_phandle_args *out_args)
+{
+ return -ENOSYS;
+}
+
+static inline int of_parse_phandle_with_fixed_args(const struct device_node *np,
+ const char *list_name, int cells_count, int index,
+ struct of_phandle_args *out_args)
+{
+ return -ENOSYS;
+}
+
+static inline int of_count_phandle_with_args(struct device_node *np,
+ const char *list_name,
+ const char *cells_name)
+{
+ return -ENOSYS;
+}
+
+static inline int of_alias_get_id(struct device_node *np, const char *stem)
+{
+ return -ENOSYS;
+}
+
+static inline int of_alias_get_highest_id(const char *stem)
+{
+ return -ENOSYS;
+}
+
+static inline int of_machine_is_compatible(const char *compat)
+{
+ return 0;
+}
+
+static inline bool of_console_check(const struct device_node *dn, const char *name, int index)
+{
+ return false;
+}
+
+static inline const __be32 *of_prop_next_u32(struct property *prop,
+ const __be32 *cur, u32 *pu)
+{
+ return NULL;
+}
+
+static inline const char *of_prop_next_string(struct property *prop,
+ const char *cur)
+{
+ return NULL;
+}
+
+static inline int of_node_check_flag(struct device_node *n, unsigned long flag)
+{
+ return 0;
+}
+
+static inline int of_node_test_and_set_flag(struct device_node *n,
+ unsigned long flag)
+{
+ return 0;
+}
+
+static inline void of_node_set_flag(struct device_node *n, unsigned long flag)
+{
+}
+
+static inline void of_node_clear_flag(struct device_node *n, unsigned long flag)
+{
+}
+
+static inline int of_property_check_flag(struct property *p, unsigned long flag)
+{
+ return 0;
+}
+
+static inline void of_property_set_flag(struct property *p, unsigned long flag)
+{
+}
+
+static inline void of_property_clear_flag(struct property *p, unsigned long flag)
+{
+}
+
+#define of_match_ptr(_ptr) NULL
+#define of_match_node(_matches, _node) NULL
+#endif /* CONFIG_OF */
+
+#if defined(CONFIG_OF) && defined(CONFIG_NUMA)
+extern int of_node_to_nid(struct device_node *np);
+#else
+static inline int of_node_to_nid(struct device_node *device)
+{
+ return NUMA_NO_NODE;
+}
+#endif
+
+static inline struct device_node *of_find_matching_node(
+ struct device_node *from,
+ const struct of_device_id *matches)
+{
+ return of_find_matching_node_and_match(from, matches, NULL);
+}
+
+/**
+ * of_property_count_u8_elems - Count the number of u8 elements in a property
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ *
+ * Search for a property in a device node and count the number of u8 elements
+ * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * not exist or its length does not match a multiple of u8 and -ENODATA if the
+ * property does not have a value.
+ */
+static inline int of_property_count_u8_elems(const struct device_node *np,
+ const char *propname)
+{
+ return of_property_count_elems_of_size(np, propname, sizeof(u8));
+}
+
+/**
+ * of_property_count_u16_elems - Count the number of u16 elements in a property
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ *
+ * Search for a property in a device node and count the number of u16 elements
+ * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * not exist or its length does not match a multiple of u16 and -ENODATA if the
+ * property does not have a value.
+ */
+static inline int of_property_count_u16_elems(const struct device_node *np,
+ const char *propname)
+{
+ return of_property_count_elems_of_size(np, propname, sizeof(u16));
+}
+
+/**
+ * of_property_count_u32_elems - Count the number of u32 elements in a property
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ *
+ * Search for a property in a device node and count the number of u32 elements
+ * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * not exist or its length does not match a multiple of u32 and -ENODATA if the
+ * property does not have a value.
+ */
+static inline int of_property_count_u32_elems(const struct device_node *np,
+ const char *propname)
+{
+ return of_property_count_elems_of_size(np, propname, sizeof(u32));
+}
+
+/**
+ * of_property_count_u64_elems - Count the number of u64 elements in a property
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ *
+ * Search for a property in a device node and count the number of u64 elements
+ * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * not exist or its length does not match a multiple of u64 and -ENODATA if the
+ * property does not have a value.
+ */
+static inline int of_property_count_u64_elems(const struct device_node *np,
+ const char *propname)
+{
+ return of_property_count_elems_of_size(np, propname, sizeof(u64));
+}
+
+/**
+ * of_property_read_string_array() - Read an array of strings from a multiple
+ * strings property.
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_strs: output array of string pointers.
+ * @sz: number of array elements to read.
+ *
+ * Search for a property in a device tree node and retrieve a list of
+ * terminated string values (pointer to data, not a copy) in that property.
+ *
+ * If @out_strs is NULL, the number of strings in the property is returned.
+ */
+static inline int of_property_read_string_array(struct device_node *np,
+ const char *propname, const char **out_strs,
+ size_t sz)
+{
+ return of_property_read_string_helper(np, propname, out_strs, sz, 0);
+}
+
+/**
+ * of_property_count_strings() - Find and return the number of strings from a
+ * multiple strings property.
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ *
+ * Search for a property in a device tree node and retrieve the number of null
+ * terminated string contain in it. Returns the number of strings on
+ * success, -EINVAL if the property does not exist, -ENODATA if property
+ * does not have a value, and -EILSEQ if the string is not null-terminated
+ * within the length of the property data.
+ */
+static inline int of_property_count_strings(struct device_node *np,
+ const char *propname)
+{
+ return of_property_read_string_helper(np, propname, NULL, 0, 0);
+}
+
+/**
+ * of_property_read_string_index() - Find and read a string from a multiple
+ * strings property.
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @index: index of the string in the list of strings
+ * @out_string: pointer to null terminated return string, modified only if
+ * return value is 0.
+ *
+ * Search for a property in a device tree node and retrieve a null
+ * terminated string value (pointer to data, not a copy) in the list of strings
+ * contained in that property.
+ * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
+ * property does not have a value, and -EILSEQ if the string is not
+ * null-terminated within the length of the property data.
+ *
+ * The out_string pointer is modified only if a valid string can be decoded.
+ */
+static inline int of_property_read_string_index(struct device_node *np,
+ const char *propname,
+ int index, const char **output)
+{
+ int rc = of_property_read_string_helper(np, propname, output, 1, index);
+ return rc < 0 ? rc : 0;
+}
+
+/**
+ * of_property_read_bool - Findfrom a property
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ *
+ * Search for a property in a device node.
+ * Returns true if the property exist false otherwise.
+ */
+static inline bool of_property_read_bool(const struct device_node *np,
+ const char *propname)
+{
+ struct property *prop = of_find_property(np, propname, NULL);
+
+ return prop ? true : false;
+}
+
+static inline int of_property_read_u8(const struct device_node *np,
+ const char *propname,
+ u8 *out_value)
+{
+ return of_property_read_u8_array(np, propname, out_value, 1);
+}
+
+static inline int of_property_read_u16(const struct device_node *np,
+ const char *propname,
+ u16 *out_value)
+{
+ return of_property_read_u16_array(np, propname, out_value, 1);
+}
+
+static inline int of_property_read_u32(const struct device_node *np,
+ const char *propname,
+ u32 *out_value)
+{
+ return of_property_read_u32_array(np, propname, out_value, 1);
+}
+
+static inline int of_property_read_s32(const struct device_node *np,
+ const char *propname,
+ s32 *out_value)
+{
+ return of_property_read_u32(np, propname, (u32*) out_value);
+}
+
+#define of_property_for_each_u32(np, propname, prop, p, u) \
+ for (prop = of_find_property(np, propname, NULL), \
+ p = of_prop_next_u32(prop, NULL, &u); \
+ p; \
+ p = of_prop_next_u32(prop, p, &u))
+
+#define of_property_for_each_string(np, propname, prop, s) \
+ for (prop = of_find_property(np, propname, NULL), \
+ s = of_prop_next_string(prop, NULL); \
+ s; \
+ s = of_prop_next_string(prop, s))
+
+#define for_each_node_by_name(dn, name) \
+ for (dn = of_find_node_by_name(NULL, name); dn; \
+ dn = of_find_node_by_name(dn, name))
+#define for_each_node_by_type(dn, type) \
+ for (dn = of_find_node_by_type(NULL, type); dn; \
+ dn = of_find_node_by_type(dn, type))
+#define for_each_compatible_node(dn, type, compatible) \
+ for (dn = of_find_compatible_node(NULL, type, compatible); dn; \
+ dn = of_find_compatible_node(dn, type, compatible))
+#define for_each_matching_node(dn, matches) \
+ for (dn = of_find_matching_node(NULL, matches); dn; \
+ dn = of_find_matching_node(dn, matches))
+#define for_each_matching_node_and_match(dn, matches, match) \
+ for (dn = of_find_matching_node_and_match(NULL, matches, match); \
+ dn; dn = of_find_matching_node_and_match(dn, matches, match))
+
+#define for_each_child_of_node(parent, child) \
+ for (child = of_get_next_child(parent, NULL); child != NULL; \
+ child = of_get_next_child(parent, child))
+#define for_each_available_child_of_node(parent, child) \
+ for (child = of_get_next_available_child(parent, NULL); child != NULL; \
+ child = of_get_next_available_child(parent, child))
+
+#define for_each_node_with_property(dn, prop_name) \
+ for (dn = of_find_node_with_property(NULL, prop_name); dn; \
+ dn = of_find_node_with_property(dn, prop_name))
+
+static inline int of_get_child_count(const struct device_node *np)
+{
+ struct device_node *child;
+ int num = 0;
+
+ for_each_child_of_node(np, child)
+ num++;
+
+ return num;
+}
+
+static inline int of_get_available_child_count(const struct device_node *np)
+{
+ struct device_node *child;
+ int num = 0;
+
+ for_each_available_child_of_node(np, child)
+ num++;
+
+ return num;
+}
+
+#ifdef CONFIG_OF
+#define _OF_DECLARE(table, name, compat, fn, fn_type) \
+ static const struct of_device_id __of_table_##name \
+ __used __section(__##table##_of_table) \
+ = { .compatible = compat, \
+ .data = (fn == (fn_type)NULL) ? fn : fn }
+#else
+#define _OF_DECLARE(table, name, compat, fn, fn_type) \
+ static const struct of_device_id __of_table_##name \
+ __attribute__((unused)) \
+ = { .compatible = compat, \
+ .data = (fn == (fn_type)NULL) ? fn : fn }
+#endif
+
+typedef int (*of_init_fn_2)(struct device_node *, struct device_node *);
+typedef void (*of_init_fn_1)(struct device_node *);
+
+#define OF_DECLARE_1(table, name, compat, fn) \
+ _OF_DECLARE(table, name, compat, fn, of_init_fn_1)
+#define OF_DECLARE_2(table, name, compat, fn) \
+ _OF_DECLARE(table, name, compat, fn, of_init_fn_2)
+
+/**
+ * struct of_changeset_entry - Holds a changeset entry
+ *
+ * @node: list_head for the log list
+ * @action: notifier action
+ * @np: pointer to the device node affected
+ * @prop: pointer to the property affected
+ * @old_prop: hold a pointer to the original property
+ *
+ * Every modification of the device tree during a changeset
+ * is held in a list of of_changeset_entry structures.
+ * That way we can recover from a partial application, or we can
+ * revert the changeset
+ */
+struct of_changeset_entry {
+ struct list_head node;
+ unsigned long action;
+ struct device_node *np;
+ struct property *prop;
+ struct property *old_prop;
+};
+
+/**
+ * struct of_changeset - changeset tracker structure
+ *
+ * @entries: list_head for the changeset entries
+ *
+ * changesets are a convenient way to apply bulk changes to the
+ * live tree. In case of an error, changes are rolled-back.
+ * changesets live on after initial application, and if not
+ * destroyed after use, they can be reverted in one single call.
+ */
+struct of_changeset {
+ struct list_head entries;
+};
+
+enum of_reconfig_change {
+ OF_RECONFIG_NO_CHANGE = 0,
+ OF_RECONFIG_CHANGE_ADD,
+ OF_RECONFIG_CHANGE_REMOVE,
+};
+
+#ifdef CONFIG_OF_DYNAMIC
+extern int of_reconfig_notifier_register(struct notifier_block *);
+extern int of_reconfig_notifier_unregister(struct notifier_block *);
+extern int of_reconfig_notify(unsigned long, struct of_reconfig_data *rd);
+extern int of_reconfig_get_state_change(unsigned long action,
+ struct of_reconfig_data *arg);
+
+extern void of_changeset_init(struct of_changeset *ocs);
+extern void of_changeset_destroy(struct of_changeset *ocs);
+extern int of_changeset_apply(struct of_changeset *ocs);
+extern int of_changeset_revert(struct of_changeset *ocs);
+extern int of_changeset_action(struct of_changeset *ocs,
+ unsigned long action, struct device_node *np,
+ struct property *prop);
+
+static inline int of_changeset_attach_node(struct of_changeset *ocs,
+ struct device_node *np)
+{
+ return of_changeset_action(ocs, OF_RECONFIG_ATTACH_NODE, np, NULL);
+}
+
+static inline int of_changeset_detach_node(struct of_changeset *ocs,
+ struct device_node *np)
+{
+ return of_changeset_action(ocs, OF_RECONFIG_DETACH_NODE, np, NULL);
+}
+
+static inline int of_changeset_add_property(struct of_changeset *ocs,
+ struct device_node *np, struct property *prop)
+{
+ return of_changeset_action(ocs, OF_RECONFIG_ADD_PROPERTY, np, prop);
+}
+
+static inline int of_changeset_remove_property(struct of_changeset *ocs,
+ struct device_node *np, struct property *prop)
+{
+ return of_changeset_action(ocs, OF_RECONFIG_REMOVE_PROPERTY, np, prop);
+}
+
+static inline int of_changeset_update_property(struct of_changeset *ocs,
+ struct device_node *np, struct property *prop)
+{
+ return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop);
+}
+#else /* CONFIG_OF_DYNAMIC */
+static inline int of_reconfig_notifier_register(struct notifier_block *nb)
+{
+ return -EINVAL;
+}
+static inline int of_reconfig_notifier_unregister(struct notifier_block *nb)
+{
+ return -EINVAL;
+}
+static inline int of_reconfig_notify(unsigned long action,
+ struct of_reconfig_data *arg)
+{
+ return -EINVAL;
+}
+static inline int of_reconfig_get_state_change(unsigned long action,
+ struct of_reconfig_data *arg)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_OF_DYNAMIC */
+
+/* CONFIG_OF_RESOLVE api */
+extern int of_resolve_phandles(struct device_node *tree);
+
+/**
+ * of_device_is_system_power_controller - Tells if system-power-controller is found for device_node
+ * @np: Pointer to the given device_node
+ *
+ * return true if present false otherwise
+ */
+static inline bool of_device_is_system_power_controller(const struct device_node *np)
+{
+ return of_property_read_bool(np, "system-power-controller");
+}
+
+/**
+ * Overlay support
+ */
+
+#ifdef CONFIG_OF_OVERLAY
+
+/* ID based overlays; the API for external users */
+int of_overlay_create(struct device_node *tree);
+int of_overlay_destroy(int id);
+int of_overlay_destroy_all(void);
+
+#else
+
+static inline int of_overlay_create(struct device_node *tree)
+{
+ return -ENOTSUPP;
+}
+
+static inline int of_overlay_destroy(int id)
+{
+ return -ENOTSUPP;
+}
+
+static inline int of_overlay_destroy_all(void)
+{
+ return -ENOTSUPP;
+}
+
+#endif
+
+#endif /* _LINUX_OF_H */
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
new file mode 100644
index 000000000..d88e81be6
--- /dev/null
+++ b/include/linux/of_address.h
@@ -0,0 +1,161 @@
+#ifndef __OF_ADDRESS_H
+#define __OF_ADDRESS_H
+#include <linux/ioport.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+
+struct of_pci_range_parser {
+ struct device_node *node;
+ const __be32 *range;
+ const __be32 *end;
+ int np;
+ int pna;
+};
+
+struct of_pci_range {
+ u32 pci_space;
+ u64 pci_addr;
+ u64 cpu_addr;
+ u64 size;
+ u32 flags;
+};
+
+#define for_each_of_pci_range(parser, range) \
+ for (; of_pci_range_parser_one(parser, range);)
+
+/* Translate a DMA address from device space to CPU space */
+extern u64 of_translate_dma_address(struct device_node *dev,
+ const __be32 *in_addr);
+
+#ifdef CONFIG_OF_ADDRESS
+extern u64 of_translate_address(struct device_node *np, const __be32 *addr);
+extern int of_address_to_resource(struct device_node *dev, int index,
+ struct resource *r);
+extern struct device_node *of_find_matching_node_by_address(
+ struct device_node *from,
+ const struct of_device_id *matches,
+ u64 base_address);
+extern void __iomem *of_iomap(struct device_node *device, int index);
+
+/* Extract an address from a device, returns the region size and
+ * the address space flags too. The PCI version uses a BAR number
+ * instead of an absolute index
+ */
+extern const __be32 *of_get_address(struct device_node *dev, int index,
+ u64 *size, unsigned int *flags);
+
+extern int pci_register_io_range(phys_addr_t addr, resource_size_t size);
+extern unsigned long pci_address_to_pio(phys_addr_t addr);
+extern phys_addr_t pci_pio_to_address(unsigned long pio);
+
+extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node);
+extern struct of_pci_range *of_pci_range_parser_one(
+ struct of_pci_range_parser *parser,
+ struct of_pci_range *range);
+extern int of_dma_get_range(struct device_node *np, u64 *dma_addr,
+ u64 *paddr, u64 *size);
+extern bool of_dma_is_coherent(struct device_node *np);
+#else /* CONFIG_OF_ADDRESS */
+static inline struct device_node *of_find_matching_node_by_address(
+ struct device_node *from,
+ const struct of_device_id *matches,
+ u64 base_address)
+{
+ return NULL;
+}
+
+static inline const __be32 *of_get_address(struct device_node *dev, int index,
+ u64 *size, unsigned int *flags)
+{
+ return NULL;
+}
+
+static inline phys_addr_t pci_pio_to_address(unsigned long pio)
+{
+ return 0;
+}
+
+static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ return -1;
+}
+
+static inline struct of_pci_range *of_pci_range_parser_one(
+ struct of_pci_range_parser *parser,
+ struct of_pci_range *range)
+{
+ return NULL;
+}
+
+static inline int of_dma_get_range(struct device_node *np, u64 *dma_addr,
+ u64 *paddr, u64 *size)
+{
+ return -ENODEV;
+}
+
+static inline bool of_dma_is_coherent(struct device_node *np)
+{
+ return false;
+}
+#endif /* CONFIG_OF_ADDRESS */
+
+#ifdef CONFIG_OF
+extern int of_address_to_resource(struct device_node *dev, int index,
+ struct resource *r);
+void __iomem *of_iomap(struct device_node *node, int index);
+void __iomem *of_io_request_and_map(struct device_node *device,
+ int index, const char *name);
+#else
+
+#include <linux/io.h>
+
+static inline int of_address_to_resource(struct device_node *dev, int index,
+ struct resource *r)
+{
+ return -EINVAL;
+}
+
+static inline void __iomem *of_iomap(struct device_node *device, int index)
+{
+ return NULL;
+}
+
+static inline void __iomem *of_io_request_and_map(struct device_node *device,
+ int index, const char *name)
+{
+ return IOMEM_ERR_PTR(-EINVAL);
+}
+#endif
+
+#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI)
+extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
+ u64 *size, unsigned int *flags);
+extern int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r);
+extern int of_pci_range_to_resource(struct of_pci_range *range,
+ struct device_node *np,
+ struct resource *res);
+#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */
+static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
+ struct resource *r)
+{
+ return -ENOSYS;
+}
+
+static inline const __be32 *of_get_pci_address(struct device_node *dev,
+ int bar_no, u64 *size, unsigned int *flags)
+{
+ return NULL;
+}
+static inline int of_pci_range_to_resource(struct of_pci_range *range,
+ struct device_node *np,
+ struct resource *res)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */
+
+#endif /* __OF_ADDRESS_H */
+
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
new file mode 100644
index 000000000..22801b10c
--- /dev/null
+++ b/include/linux/of_device.h
@@ -0,0 +1,98 @@
+#ifndef _LINUX_OF_DEVICE_H
+#define _LINUX_OF_DEVICE_H
+
+#include <linux/cpu.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h> /* temporary until merge */
+
+#include <linux/of.h>
+#include <linux/mod_devicetable.h>
+
+struct device;
+
+#ifdef CONFIG_OF
+extern const struct of_device_id *of_match_device(
+ const struct of_device_id *matches, const struct device *dev);
+extern void of_device_make_bus_id(struct device *dev);
+
+/**
+ * of_driver_match_device - Tell if a driver's of_match_table matches a device.
+ * @drv: the device_driver structure to test
+ * @dev: the device structure to match against
+ */
+static inline int of_driver_match_device(struct device *dev,
+ const struct device_driver *drv)
+{
+ return of_match_device(drv->of_match_table, dev) != NULL;
+}
+
+extern struct platform_device *of_dev_get(struct platform_device *dev);
+extern void of_dev_put(struct platform_device *dev);
+
+extern int of_device_add(struct platform_device *pdev);
+extern int of_device_register(struct platform_device *ofdev);
+extern void of_device_unregister(struct platform_device *ofdev);
+
+extern ssize_t of_device_get_modalias(struct device *dev,
+ char *str, ssize_t len);
+
+extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env);
+extern int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env);
+
+static inline void of_device_node_put(struct device *dev)
+{
+ of_node_put(dev->of_node);
+}
+
+static inline struct device_node *of_cpu_device_node_get(int cpu)
+{
+ struct device *cpu_dev;
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return NULL;
+ return of_node_get(cpu_dev->of_node);
+}
+
+void of_dma_configure(struct device *dev, struct device_node *np);
+#else /* CONFIG_OF */
+
+static inline int of_driver_match_device(struct device *dev,
+ struct device_driver *drv)
+{
+ return 0;
+}
+
+static inline void of_device_uevent(struct device *dev,
+ struct kobj_uevent_env *env) { }
+
+static inline int of_device_get_modalias(struct device *dev,
+ char *str, ssize_t len)
+{
+ return -ENODEV;
+}
+
+static inline int of_device_uevent_modalias(struct device *dev,
+ struct kobj_uevent_env *env)
+{
+ return -ENODEV;
+}
+
+static inline void of_device_node_put(struct device *dev) { }
+
+static inline const struct of_device_id *__of_match_device(
+ const struct of_device_id *matches, const struct device *dev)
+{
+ return NULL;
+}
+#define of_match_device(matches, dev) \
+ __of_match_device(of_match_ptr(matches), (dev))
+
+static inline struct device_node *of_cpu_device_node_get(int cpu)
+{
+ return NULL;
+}
+static inline void of_dma_configure(struct device *dev, struct device_node *np)
+{}
+#endif /* CONFIG_OF */
+
+#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h
new file mode 100644
index 000000000..56bc026c1
--- /dev/null
+++ b/include/linux/of_dma.h
@@ -0,0 +1,75 @@
+/*
+ * OF helpers for DMA request / controller
+ *
+ * Based on of_gpio.h
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_OF_DMA_H
+#define __LINUX_OF_DMA_H
+
+#include <linux/of.h>
+#include <linux/dmaengine.h>
+
+struct device_node;
+
+struct of_dma {
+ struct list_head of_dma_controllers;
+ struct device_node *of_node;
+ struct dma_chan *(*of_dma_xlate)
+ (struct of_phandle_args *, struct of_dma *);
+ void *of_dma_data;
+};
+
+struct of_dma_filter_info {
+ dma_cap_mask_t dma_cap;
+ dma_filter_fn filter_fn;
+};
+
+#ifdef CONFIG_OF
+extern int of_dma_controller_register(struct device_node *np,
+ struct dma_chan *(*of_dma_xlate)
+ (struct of_phandle_args *, struct of_dma *),
+ void *data);
+extern void of_dma_controller_free(struct device_node *np);
+extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
+ const char *name);
+extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma);
+extern struct dma_chan *of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma);
+#else
+static inline int of_dma_controller_register(struct device_node *np,
+ struct dma_chan *(*of_dma_xlate)
+ (struct of_phandle_args *, struct of_dma *),
+ void *data)
+{
+ return -ENODEV;
+}
+
+static inline void of_dma_controller_free(struct device_node *np)
+{
+}
+
+static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
+ const char *name)
+{
+ return NULL;
+}
+
+static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ return NULL;
+}
+
+#define of_dma_xlate_by_chan_id NULL
+
+#endif
+
+#endif /* __LINUX_OF_DMA_H */
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
new file mode 100644
index 000000000..587ee5079
--- /dev/null
+++ b/include/linux/of_fdt.h
@@ -0,0 +1,100 @@
+/*
+ * Definitions for working with the Flattened Device Tree data format
+ *
+ * Copyright 2009 Benjamin Herrenschmidt, IBM Corp
+ * benh@kernel.crashing.org
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_OF_FDT_H
+#define _LINUX_OF_FDT_H
+
+#include <linux/types.h>
+#include <linux/init.h>
+
+/* Definitions used by the flattened device tree */
+#define OF_DT_HEADER 0xd00dfeed /* marker */
+
+#ifndef __ASSEMBLY__
+
+#if defined(CONFIG_OF_FLATTREE)
+
+struct device_node;
+
+/* For scanning an arbitrary device-tree at any time */
+extern char *of_fdt_get_string(const void *blob, u32 offset);
+extern void *of_fdt_get_property(const void *blob,
+ unsigned long node,
+ const char *name,
+ int *size);
+extern int of_fdt_is_compatible(const void *blob,
+ unsigned long node,
+ const char *compat);
+extern bool of_fdt_is_big_endian(const void *blob,
+ unsigned long node);
+extern int of_fdt_match(const void *blob, unsigned long node,
+ const char *const *compat);
+extern void of_fdt_unflatten_tree(unsigned long *blob,
+ struct device_node **mynodes);
+
+/* TBD: Temporary export of fdt globals - remove when code fully merged */
+extern int __initdata dt_root_addr_cells;
+extern int __initdata dt_root_size_cells;
+extern void *initial_boot_params;
+
+extern char __dtb_start[];
+extern char __dtb_end[];
+
+/* For scanning the flat device-tree at boot time */
+extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname,
+ int depth, void *data),
+ void *data);
+extern const void *of_get_flat_dt_prop(unsigned long node, const char *name,
+ int *size);
+extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
+extern int of_flat_dt_match(unsigned long node, const char *const *matches);
+extern unsigned long of_get_flat_dt_root(void);
+extern int of_get_flat_dt_size(void);
+
+extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
+ int depth, void *data);
+extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
+ int depth, void *data);
+extern void early_init_fdt_scan_reserved_mem(void);
+extern void early_init_dt_add_memory_arch(u64 base, u64 size);
+extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
+ bool no_map);
+extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align);
+extern u64 dt_mem_next_cell(int s, const __be32 **cellp);
+
+/* Early flat tree scan hooks */
+extern int early_init_dt_scan_root(unsigned long node, const char *uname,
+ int depth, void *data);
+
+extern bool early_init_dt_scan(void *params);
+extern bool early_init_dt_verify(void *params);
+extern void early_init_dt_scan_nodes(void);
+
+extern const char *of_flat_dt_get_machine_name(void);
+extern const void *of_flat_dt_match_machine(const void *default_match,
+ const void * (*get_next_compat)(const char * const**));
+
+/* Other Prototypes */
+extern void unflatten_device_tree(void);
+extern void unflatten_and_copy_device_tree(void);
+extern void early_init_devtree(void *);
+extern void early_get_first_memblock_info(void *, phys_addr_t *);
+extern u64 fdt_translate_address(const void *blob, int node_offset);
+extern void of_fdt_limit_memory(int limit);
+#else /* CONFIG_OF_FLATTREE */
+static inline void early_init_fdt_scan_reserved_mem(void) {}
+static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
+static inline void unflatten_device_tree(void) {}
+static inline void unflatten_and_copy_device_tree(void) {}
+#endif /* CONFIG_OF_FLATTREE */
+
+#endif /* __ASSEMBLY__ */
+#endif /* _LINUX_OF_FDT_H */
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
new file mode 100644
index 000000000..69dbe312b
--- /dev/null
+++ b/include/linux/of_gpio.h
@@ -0,0 +1,154 @@
+/*
+ * OF helpers for the GPIO API
+ *
+ * Copyright (c) 2007-2008 MontaVista Software, Inc.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_OF_GPIO_H
+#define __LINUX_OF_GPIO_H
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+
+struct device_node;
+
+/*
+ * This is Linux-specific flags. By default controllers' and Linux' mapping
+ * match, but GPIO controllers are free to translate their own flags to
+ * Linux-specific in their .xlate callback. Though, 1:1 mapping is recommended.
+ */
+enum of_gpio_flags {
+ OF_GPIO_ACTIVE_LOW = 0x1,
+};
+
+#ifdef CONFIG_OF_GPIO
+
+/*
+ * OF GPIO chip for memory mapped banks
+ */
+struct of_mm_gpio_chip {
+ struct gpio_chip gc;
+ void (*save_regs)(struct of_mm_gpio_chip *mm_gc);
+ void __iomem *regs;
+};
+
+static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc)
+{
+ return container_of(gc, struct of_mm_gpio_chip, gc);
+}
+
+extern int of_get_named_gpio_flags(struct device_node *np,
+ const char *list_name, int index, enum of_gpio_flags *flags);
+
+extern int of_mm_gpiochip_add(struct device_node *np,
+ struct of_mm_gpio_chip *mm_gc);
+extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
+
+extern void of_gpiochip_add(struct gpio_chip *gc);
+extern void of_gpiochip_remove(struct gpio_chip *gc);
+extern int of_gpio_simple_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags);
+
+#else /* CONFIG_OF_GPIO */
+
+/* Drivers may not strictly depend on the GPIO support, so let them link. */
+static inline int of_get_named_gpio_flags(struct device_node *np,
+ const char *list_name, int index, enum of_gpio_flags *flags)
+{
+ return -ENOSYS;
+}
+
+static inline int of_gpio_simple_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
+{
+ return -ENOSYS;
+}
+
+static inline void of_gpiochip_add(struct gpio_chip *gc) { }
+static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
+
+#endif /* CONFIG_OF_GPIO */
+
+/**
+ * of_gpio_named_count() - Count GPIOs for a device
+ * @np: device node to count GPIOs for
+ * @propname: property name containing gpio specifier(s)
+ *
+ * The function returns the count of GPIOs specified for a node.
+ * Note that the empty GPIO specifiers count too. Returns either
+ * Number of gpios defined in property,
+ * -EINVAL for an incorrectly formed gpios property, or
+ * -ENOENT for a missing gpios property
+ *
+ * Example:
+ * gpios = <0
+ * &gpio1 1 2
+ * 0
+ * &gpio2 3 4>;
+ *
+ * The above example defines four GPIOs, two of which are not specified.
+ * This function will return '4'
+ */
+static inline int of_gpio_named_count(struct device_node *np, const char* propname)
+{
+ return of_count_phandle_with_args(np, propname, "#gpio-cells");
+}
+
+/**
+ * of_gpio_count() - Count GPIOs for a device
+ * @np: device node to count GPIOs for
+ *
+ * Same as of_gpio_named_count, but hard coded to use the 'gpios' property
+ */
+static inline int of_gpio_count(struct device_node *np)
+{
+ return of_gpio_named_count(np, "gpios");
+}
+
+static inline int of_get_gpio_flags(struct device_node *np, int index,
+ enum of_gpio_flags *flags)
+{
+ return of_get_named_gpio_flags(np, "gpios", index, flags);
+}
+
+/**
+ * of_get_named_gpio() - Get a GPIO number to use with GPIO API
+ * @np: device node to get GPIO from
+ * @propname: Name of property containing gpio specifier(s)
+ * @index: index of the GPIO
+ *
+ * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
+ * value on the error condition.
+ */
+static inline int of_get_named_gpio(struct device_node *np,
+ const char *propname, int index)
+{
+ return of_get_named_gpio_flags(np, propname, index, NULL);
+}
+
+/**
+ * of_get_gpio() - Get a GPIO number to use with GPIO API
+ * @np: device node to get GPIO from
+ * @index: index of the GPIO
+ *
+ * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
+ * value on the error condition.
+ */
+static inline int of_get_gpio(struct device_node *np, int index)
+{
+ return of_get_gpio_flags(np, index, NULL);
+}
+
+#endif /* __LINUX_OF_GPIO_H */
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
new file mode 100644
index 000000000..7bc92e050
--- /dev/null
+++ b/include/linux/of_graph.h
@@ -0,0 +1,86 @@
+/*
+ * OF graph binding parsing helpers
+ *
+ * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * Copyright (C) 2012 Renesas Electronics Corp.
+ * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_OF_GRAPH_H
+#define __LINUX_OF_GRAPH_H
+
+#include <linux/types.h>
+
+/**
+ * struct of_endpoint - the OF graph endpoint data structure
+ * @port: identifier (value of reg property) of a port this endpoint belongs to
+ * @id: identifier (value of reg property) of this endpoint
+ * @local_node: pointer to device_node of this endpoint
+ */
+struct of_endpoint {
+ unsigned int port;
+ unsigned int id;
+ const struct device_node *local_node;
+};
+
+/**
+ * for_each_endpoint_of_node - iterate over every endpoint in a device node
+ * @parent: parent device node containing ports and endpoints
+ * @child: loop variable pointing to the current endpoint node
+ *
+ * When breaking out of the loop, of_node_put(child) has to be called manually.
+ */
+#define for_each_endpoint_of_node(parent, child) \
+ for (child = of_graph_get_next_endpoint(parent, NULL); child != NULL; \
+ child = of_graph_get_next_endpoint(parent, child))
+
+#ifdef CONFIG_OF
+int of_graph_parse_endpoint(const struct device_node *node,
+ struct of_endpoint *endpoint);
+struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id);
+struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
+ struct device_node *previous);
+struct device_node *of_graph_get_remote_port_parent(
+ const struct device_node *node);
+struct device_node *of_graph_get_remote_port(const struct device_node *node);
+#else
+
+static inline int of_graph_parse_endpoint(const struct device_node *node,
+ struct of_endpoint *endpoint)
+{
+ return -ENOSYS;
+}
+
+static inline struct device_node *of_graph_get_port_by_id(
+ struct device_node *node, u32 id)
+{
+ return NULL;
+}
+
+static inline struct device_node *of_graph_get_next_endpoint(
+ const struct device_node *parent,
+ struct device_node *previous)
+{
+ return NULL;
+}
+
+static inline struct device_node *of_graph_get_remote_port_parent(
+ const struct device_node *node)
+{
+ return NULL;
+}
+
+static inline struct device_node *of_graph_get_remote_port(
+ const struct device_node *node)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_OF */
+
+#endif /* __LINUX_OF_GRAPH_H */
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
new file mode 100644
index 000000000..ffbe4707d
--- /dev/null
+++ b/include/linux/of_iommu.h
@@ -0,0 +1,46 @@
+#ifndef __OF_IOMMU_H
+#define __OF_IOMMU_H
+
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+
+#ifdef CONFIG_OF_IOMMU
+
+extern int of_get_dma_window(struct device_node *dn, const char *prefix,
+ int index, unsigned long *busno, dma_addr_t *addr,
+ size_t *size);
+
+extern void of_iommu_init(void);
+extern struct iommu_ops *of_iommu_configure(struct device *dev,
+ struct device_node *master_np);
+
+#else
+
+static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
+ int index, unsigned long *busno, dma_addr_t *addr,
+ size_t *size)
+{
+ return -EINVAL;
+}
+
+static inline void of_iommu_init(void) { }
+static inline struct iommu_ops *of_iommu_configure(struct device *dev,
+ struct device_node *master_np)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_OF_IOMMU */
+
+void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops);
+struct iommu_ops *of_iommu_get_ops(struct device_node *np);
+
+extern struct of_device_id __iommu_of_table;
+
+typedef int (*of_iommu_init_fn)(struct device_node *);
+
+#define IOMMU_OF_DECLARE(name, compat, fn) \
+ _OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn)
+
+#endif /* __OF_IOMMU_H */
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
new file mode 100644
index 000000000..d884929a7
--- /dev/null
+++ b/include/linux/of_irq.h
@@ -0,0 +1,91 @@
+#ifndef __OF_IRQ_H
+#define __OF_IRQ_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+
+typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
+
+/*
+ * Workarounds only applied to 32bit powermac machines
+ */
+#define OF_IMAP_OLDWORLD_MAC 0x00000001
+#define OF_IMAP_NO_PHANDLE 0x00000002
+
+#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
+extern unsigned int of_irq_workarounds;
+extern struct device_node *of_irq_dflt_pic;
+extern int of_irq_parse_oldworld(struct device_node *device, int index,
+ struct of_phandle_args *out_irq);
+#else /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
+#define of_irq_workarounds (0)
+#define of_irq_dflt_pic (NULL)
+static inline int of_irq_parse_oldworld(struct device_node *device, int index,
+ struct of_phandle_args *out_irq)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
+
+extern int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq);
+extern int of_irq_parse_one(struct device_node *device, int index,
+ struct of_phandle_args *out_irq);
+extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data);
+extern int of_irq_to_resource(struct device_node *dev, int index,
+ struct resource *r);
+
+extern void of_irq_init(const struct of_device_id *matches);
+
+#ifdef CONFIG_OF_IRQ
+extern int of_irq_count(struct device_node *dev);
+extern int of_irq_get(struct device_node *dev, int index);
+extern int of_irq_get_byname(struct device_node *dev, const char *name);
+extern int of_irq_to_resource_table(struct device_node *dev,
+ struct resource *res, int nr_irqs);
+#else
+static inline int of_irq_count(struct device_node *dev)
+{
+ return 0;
+}
+static inline int of_irq_get(struct device_node *dev, int index)
+{
+ return 0;
+}
+static inline int of_irq_get_byname(struct device_node *dev, const char *name)
+{
+ return 0;
+}
+static inline int of_irq_to_resource_table(struct device_node *dev,
+ struct resource *res, int nr_irqs)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_OF)
+/*
+ * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
+ * implements it differently. However, the prototype is the same for all,
+ * so declare it here regardless of the CONFIG_OF_IRQ setting.
+ */
+extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
+extern struct device_node *of_irq_find_parent(struct device_node *child);
+
+#else /* !CONFIG_OF */
+static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
+ int index)
+{
+ return 0;
+}
+
+static inline void *of_irq_find_parent(struct device_node *child)
+{
+ return NULL;
+}
+#endif /* !CONFIG_OF */
+
+#endif /* __OF_IRQ_H */
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
new file mode 100644
index 000000000..8f2237eb3
--- /dev/null
+++ b/include/linux/of_mdio.h
@@ -0,0 +1,87 @@
+/*
+ * OF helpers for the MDIO (Ethernet PHY) API
+ *
+ * Copyright (c) 2009 Secret Lab Technologies, Ltd.
+ *
+ * This file is released under the GPLv2
+ */
+
+#ifndef __LINUX_OF_MDIO_H
+#define __LINUX_OF_MDIO_H
+
+#include <linux/phy.h>
+#include <linux/of.h>
+
+#ifdef CONFIG_OF
+extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
+extern struct phy_device *of_phy_find_device(struct device_node *phy_np);
+extern struct phy_device *of_phy_connect(struct net_device *dev,
+ struct device_node *phy_np,
+ void (*hndlr)(struct net_device *),
+ u32 flags, phy_interface_t iface);
+struct phy_device *of_phy_attach(struct net_device *dev,
+ struct device_node *phy_np, u32 flags,
+ phy_interface_t iface);
+
+extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
+extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np);
+
+#else /* CONFIG_OF */
+static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
+{
+ /*
+ * Fall back to the non-DT function to register a bus.
+ * This way, we don't have to keep compat bits around in drivers.
+ */
+
+ return mdiobus_register(mdio);
+}
+
+static inline struct phy_device *of_phy_find_device(struct device_node *phy_np)
+{
+ return NULL;
+}
+
+static inline struct phy_device *of_phy_connect(struct net_device *dev,
+ struct device_node *phy_np,
+ void (*hndlr)(struct net_device *),
+ u32 flags, phy_interface_t iface)
+{
+ return NULL;
+}
+
+static inline struct phy_device *of_phy_attach(struct net_device *dev,
+ struct device_node *phy_np,
+ u32 flags, phy_interface_t iface)
+{
+ return NULL;
+}
+
+static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
+{
+ return NULL;
+}
+
+static inline int of_mdio_parse_addr(struct device *dev,
+ const struct device_node *np)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_OF */
+
+#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
+extern int of_phy_register_fixed_link(struct device_node *np);
+extern bool of_phy_is_fixed_link(struct device_node *np);
+#else
+static inline int of_phy_register_fixed_link(struct device_node *np)
+{
+ return -ENOSYS;
+}
+static inline bool of_phy_is_fixed_link(struct device_node *np)
+{
+ return false;
+}
+#endif
+
+
+#endif /* __LINUX_OF_MDIO_H */
diff --git a/include/linux/of_mtd.h b/include/linux/of_mtd.h
new file mode 100644
index 000000000..e266caa36
--- /dev/null
+++ b/include/linux/of_mtd.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
+ *
+ * OF helpers for mtd.
+ *
+ * This file is released under the GPLv2
+ */
+
+#ifndef __LINUX_OF_MTD_H
+#define __LINUX_OF_MTD_H
+
+#ifdef CONFIG_OF_MTD
+
+#include <linux/of.h>
+int of_get_nand_ecc_mode(struct device_node *np);
+int of_get_nand_ecc_step_size(struct device_node *np);
+int of_get_nand_ecc_strength(struct device_node *np);
+int of_get_nand_bus_width(struct device_node *np);
+bool of_get_nand_on_flash_bbt(struct device_node *np);
+
+#else /* CONFIG_OF_MTD */
+
+static inline int of_get_nand_ecc_mode(struct device_node *np)
+{
+ return -ENOSYS;
+}
+
+static inline int of_get_nand_ecc_step_size(struct device_node *np)
+{
+ return -ENOSYS;
+}
+
+static inline int of_get_nand_ecc_strength(struct device_node *np)
+{
+ return -ENOSYS;
+}
+
+static inline int of_get_nand_bus_width(struct device_node *np)
+{
+ return -ENOSYS;
+}
+
+static inline bool of_get_nand_on_flash_bbt(struct device_node *np)
+{
+ return false;
+}
+
+#endif /* CONFIG_OF_MTD */
+
+#endif /* __LINUX_OF_MTD_H */
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
new file mode 100644
index 000000000..9cd72aab7
--- /dev/null
+++ b/include/linux/of_net.h
@@ -0,0 +1,34 @@
+/*
+ * OF helpers for network devices.
+ *
+ * This file is released under the GPLv2
+ */
+
+#ifndef __LINUX_OF_NET_H
+#define __LINUX_OF_NET_H
+
+#ifdef CONFIG_OF_NET
+#include <linux/of.h>
+
+struct net_device;
+extern int of_get_phy_mode(struct device_node *np);
+extern const void *of_get_mac_address(struct device_node *np);
+extern struct net_device *of_find_net_device_by_node(struct device_node *np);
+#else
+static inline int of_get_phy_mode(struct device_node *np)
+{
+ return -ENODEV;
+}
+
+static inline const void *of_get_mac_address(struct device_node *np)
+{
+ return NULL;
+}
+
+static inline struct net_device *of_find_net_device_by_node(struct device_node *np)
+{
+ return NULL;
+}
+#endif
+
+#endif /* __LINUX_OF_NET_H */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
new file mode 100644
index 000000000..29fd3fe1c
--- /dev/null
+++ b/include/linux/of_pci.h
@@ -0,0 +1,75 @@
+#ifndef __OF_PCI_H
+#define __OF_PCI_H
+
+#include <linux/pci.h>
+#include <linux/msi.h>
+
+struct pci_dev;
+struct of_phandle_args;
+struct device_node;
+
+#ifdef CONFIG_OF
+int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq);
+struct device_node *of_pci_find_child_device(struct device_node *parent,
+ unsigned int devfn);
+int of_pci_get_devfn(struct device_node *np);
+int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
+int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
+int of_get_pci_domain_nr(struct device_node *node);
+void of_pci_dma_configure(struct pci_dev *pci_dev);
+#else
+static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
+{
+ return 0;
+}
+
+static inline struct device_node *of_pci_find_child_device(struct device_node *parent,
+ unsigned int devfn)
+{
+ return NULL;
+}
+
+static inline int of_pci_get_devfn(struct device_node *np)
+{
+ return -EINVAL;
+}
+
+static inline int
+of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return 0;
+}
+
+static inline int
+of_pci_parse_bus_range(struct device_node *node, struct resource *res)
+{
+ return -EINVAL;
+}
+
+static inline int
+of_get_pci_domain_nr(struct device_node *node)
+{
+ return -1;
+}
+
+static inline void of_pci_dma_configure(struct pci_dev *pci_dev) { }
+#endif
+
+#if defined(CONFIG_OF_ADDRESS)
+int of_pci_get_host_bridge_resources(struct device_node *dev,
+ unsigned char busno, unsigned char bus_max,
+ struct list_head *resources, resource_size_t *io_base);
+#endif
+
+#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI)
+int of_pci_msi_chip_add(struct msi_controller *chip);
+void of_pci_msi_chip_remove(struct msi_controller *chip);
+struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node);
+#else
+static inline int of_pci_msi_chip_add(struct msi_controller *chip) { return -EINVAL; }
+static inline void of_pci_msi_chip_remove(struct msi_controller *chip) { }
+static inline struct msi_controller *
+of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; }
+#endif
+
+#endif
diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
new file mode 100644
index 000000000..7e09244bb
--- /dev/null
+++ b/include/linux/of_pdt.h
@@ -0,0 +1,44 @@
+/*
+ * Definitions for building a device tree by calling into the
+ * Open Firmware PROM.
+ *
+ * Copyright (C) 2010 Andres Salomon <dilinger@queued.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_OF_PDT_H
+#define _LINUX_OF_PDT_H
+
+/* overridable operations for calling into the PROM */
+struct of_pdt_ops {
+ /*
+ * buf should be 32 bytes; return 0 on success.
+ * If prev is NULL, the first property will be returned.
+ */
+ int (*nextprop)(phandle node, char *prev, char *buf);
+
+ /* for both functions, return proplen on success; -1 on error */
+ int (*getproplen)(phandle node, const char *prop);
+ int (*getproperty)(phandle node, const char *prop, char *buf,
+ int bufsize);
+
+ /* phandles are 0 if no child or sibling exists */
+ phandle (*getchild)(phandle parent);
+ phandle (*getsibling)(phandle node);
+
+ /* return 0 on success; fill in 'len' with number of bytes in path */
+ int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
+};
+
+extern void *prom_early_alloc(unsigned long size);
+
+/* for building the device tree */
+extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops);
+
+extern void (*of_pdt_build_more)(struct device_node *dp);
+
+#endif /* _LINUX_OF_PDT_H */
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
new file mode 100644
index 000000000..611a69114
--- /dev/null
+++ b/include/linux/of_platform.h
@@ -0,0 +1,93 @@
+#ifndef _LINUX_OF_PLATFORM_H
+#define _LINUX_OF_PLATFORM_H
+/*
+ * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pm.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+/**
+ * struct of_dev_auxdata - lookup table entry for device names & platform_data
+ * @compatible: compatible value of node to match against node
+ * @phys_addr: Start address of registers to match against node
+ * @name: Name to assign for matching nodes
+ * @platform_data: platform_data to assign for matching nodes
+ *
+ * This lookup table allows the caller of of_platform_populate() to override
+ * the names of devices when creating devices from the device tree. The table
+ * should be terminated with an empty entry. It also allows the platform_data
+ * pointer to be set.
+ *
+ * The reason for this functionality is that some Linux infrastructure uses
+ * the device name to look up a specific device, but the Linux-specific names
+ * are not encoded into the device tree, so the kernel needs to provide specific
+ * values.
+ *
+ * Note: Using an auxdata lookup table should be considered a last resort when
+ * converting a platform to use the DT. Normally the automatically generated
+ * device name will not matter, and drivers should obtain data from the device
+ * node instead of from an anonymous platform_data pointer.
+ */
+struct of_dev_auxdata {
+ char *compatible;
+ resource_size_t phys_addr;
+ char *name;
+ void *platform_data;
+};
+
+/* Macro to simplify populating a lookup table */
+#define OF_DEV_AUXDATA(_compat,_phys,_name,_pdata) \
+ { .compatible = _compat, .phys_addr = _phys, .name = _name, \
+ .platform_data = _pdata }
+
+extern const struct of_device_id of_default_bus_match_table[];
+
+/* Platform drivers register/unregister */
+extern struct platform_device *of_device_alloc(struct device_node *np,
+ const char *bus_id,
+ struct device *parent);
+extern struct platform_device *of_find_device_by_node(struct device_node *np);
+
+/* Platform devices and busses creation */
+extern struct platform_device *of_platform_device_create(struct device_node *np,
+ const char *bus_id,
+ struct device *parent);
+
+extern int of_platform_bus_probe(struct device_node *root,
+ const struct of_device_id *matches,
+ struct device *parent);
+#ifdef CONFIG_OF_ADDRESS
+extern int of_platform_populate(struct device_node *root,
+ const struct of_device_id *matches,
+ const struct of_dev_auxdata *lookup,
+ struct device *parent);
+extern void of_platform_depopulate(struct device *parent);
+#else
+static inline int of_platform_populate(struct device_node *root,
+ const struct of_device_id *matches,
+ const struct of_dev_auxdata *lookup,
+ struct device *parent)
+{
+ return -ENODEV;
+}
+static inline void of_platform_depopulate(struct device *parent) { }
+#endif
+
+#if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS)
+extern void of_platform_register_reconfig_notifier(void);
+#else
+static inline void of_platform_register_reconfig_notifier(void) { }
+#endif
+
+#endif /* _LINUX_OF_PLATFORM_H */
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
new file mode 100644
index 000000000..ad2f67054
--- /dev/null
+++ b/include/linux/of_reserved_mem.h
@@ -0,0 +1,49 @@
+#ifndef __OF_RESERVED_MEM_H
+#define __OF_RESERVED_MEM_H
+
+struct device;
+struct of_phandle_args;
+struct reserved_mem_ops;
+
+struct reserved_mem {
+ const char *name;
+ unsigned long fdt_node;
+ unsigned long phandle;
+ const struct reserved_mem_ops *ops;
+ phys_addr_t base;
+ phys_addr_t size;
+ void *priv;
+};
+
+struct reserved_mem_ops {
+ int (*device_init)(struct reserved_mem *rmem,
+ struct device *dev);
+ void (*device_release)(struct reserved_mem *rmem,
+ struct device *dev);
+};
+
+typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
+
+#define RESERVEDMEM_OF_DECLARE(name, compat, init) \
+ _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
+
+#ifdef CONFIG_OF_RESERVED_MEM
+int of_reserved_mem_device_init(struct device *dev);
+void of_reserved_mem_device_release(struct device *dev);
+
+void fdt_init_reserved_mem(void);
+void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
+ phys_addr_t base, phys_addr_t size);
+#else
+static inline int of_reserved_mem_device_init(struct device *dev)
+{
+ return -ENOSYS;
+}
+static inline void of_reserved_mem_device_release(struct device *pdev) { }
+
+static inline void fdt_init_reserved_mem(void) { }
+static inline void fdt_reserved_mem_save_node(unsigned long node,
+ const char *uname, phys_addr_t base, phys_addr_t size) { }
+#endif
+
+#endif /* __OF_RESERVED_MEM_H */
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
new file mode 100644
index 000000000..c2bbf672b
--- /dev/null
+++ b/include/linux/oid_registry.h
@@ -0,0 +1,98 @@
+/* ASN.1 Object identifier (OID) registry
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_OID_REGISTRY_H
+#define _LINUX_OID_REGISTRY_H
+
+#include <linux/types.h>
+
+/*
+ * OIDs are turned into these values if possible, or OID__NR if not held here.
+ *
+ * NOTE! Do not mess with the format of each line as this is read by
+ * build_OID_registry.pl to generate the data for look_up_OID().
+ */
+enum OID {
+ OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */
+ OID_id_dsa, /* 1.2.840.10040.4.1 */
+ OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */
+ OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */
+
+ /* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */
+ OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */
+ OID_md2WithRSAEncryption, /* 1.2.840.113549.1.1.2 */
+ OID_md3WithRSAEncryption, /* 1.2.840.113549.1.1.3 */
+ OID_md4WithRSAEncryption, /* 1.2.840.113549.1.1.4 */
+ OID_sha1WithRSAEncryption, /* 1.2.840.113549.1.1.5 */
+ OID_sha256WithRSAEncryption, /* 1.2.840.113549.1.1.11 */
+ OID_sha384WithRSAEncryption, /* 1.2.840.113549.1.1.12 */
+ OID_sha512WithRSAEncryption, /* 1.2.840.113549.1.1.13 */
+ OID_sha224WithRSAEncryption, /* 1.2.840.113549.1.1.14 */
+ /* PKCS#7 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-7(7)} */
+ OID_data, /* 1.2.840.113549.1.7.1 */
+ OID_signed_data, /* 1.2.840.113549.1.7.2 */
+ /* PKCS#9 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-9(9)} */
+ OID_email_address, /* 1.2.840.113549.1.9.1 */
+ OID_content_type, /* 1.2.840.113549.1.9.3 */
+ OID_messageDigest, /* 1.2.840.113549.1.9.4 */
+ OID_signingTime, /* 1.2.840.113549.1.9.5 */
+ OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */
+ OID_smimeAuthenticatedAttrs, /* 1.2.840.113549.1.9.16.2.11 */
+
+ /* {iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2)} */
+ OID_md2, /* 1.2.840.113549.2.2 */
+ OID_md4, /* 1.2.840.113549.2.4 */
+ OID_md5, /* 1.2.840.113549.2.5 */
+
+ /* Microsoft Authenticode & Software Publishing */
+ OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */
+ OID_msPeImageDataObjId, /* 1.3.6.1.4.1.311.2.1.15 */
+ OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */
+ OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */
+
+ OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */
+ OID_sha1, /* 1.3.14.3.2.26 */
+ OID_sha256, /* 2.16.840.1.101.3.4.2.1 */
+
+ /* Distinguished Name attribute IDs [RFC 2256] */
+ OID_commonName, /* 2.5.4.3 */
+ OID_surname, /* 2.5.4.4 */
+ OID_countryName, /* 2.5.4.6 */
+ OID_locality, /* 2.5.4.7 */
+ OID_stateOrProvinceName, /* 2.5.4.8 */
+ OID_organizationName, /* 2.5.4.10 */
+ OID_organizationUnitName, /* 2.5.4.11 */
+ OID_title, /* 2.5.4.12 */
+ OID_description, /* 2.5.4.13 */
+ OID_name, /* 2.5.4.41 */
+ OID_givenName, /* 2.5.4.42 */
+ OID_initials, /* 2.5.4.43 */
+ OID_generationalQualifier, /* 2.5.4.44 */
+
+ /* Certificate extension IDs */
+ OID_subjectKeyIdentifier, /* 2.5.29.14 */
+ OID_keyUsage, /* 2.5.29.15 */
+ OID_subjectAltName, /* 2.5.29.17 */
+ OID_issuerAltName, /* 2.5.29.18 */
+ OID_basicConstraints, /* 2.5.29.19 */
+ OID_crlDistributionPoints, /* 2.5.29.31 */
+ OID_certPolicies, /* 2.5.29.32 */
+ OID_authorityKeyIdentifier, /* 2.5.29.35 */
+ OID_extKeyUsage, /* 2.5.29.37 */
+
+ OID__NR
+};
+
+extern enum OID look_up_OID(const void *data, size_t datasize);
+extern int sprint_oid(const void *, size_t, char *, size_t);
+extern int sprint_OID(enum OID, char *, size_t);
+
+#endif /* _LINUX_OID_REGISTRY_H */
diff --git a/include/linux/olpc-ec.h b/include/linux/olpc-ec.h
new file mode 100644
index 000000000..2925df3ce
--- /dev/null
+++ b/include/linux/olpc-ec.h
@@ -0,0 +1,42 @@
+#ifndef _LINUX_OLPC_EC_H
+#define _LINUX_OLPC_EC_H
+
+/* XO-1 EC commands */
+#define EC_FIRMWARE_REV 0x08
+#define EC_WRITE_SCI_MASK 0x1b
+#define EC_WAKE_UP_WLAN 0x24
+#define EC_WLAN_LEAVE_RESET 0x25
+#define EC_DCON_POWER_MODE 0x26
+#define EC_READ_EB_MODE 0x2a
+#define EC_SET_SCI_INHIBIT 0x32
+#define EC_SET_SCI_INHIBIT_RELEASE 0x34
+#define EC_WLAN_ENTER_RESET 0x35
+#define EC_WRITE_EXT_SCI_MASK 0x38
+#define EC_SCI_QUERY 0x84
+#define EC_EXT_SCI_QUERY 0x85
+
+struct platform_device;
+
+struct olpc_ec_driver {
+ int (*probe)(struct platform_device *);
+ int (*suspend)(struct platform_device *);
+ int (*resume)(struct platform_device *);
+
+ int (*ec_cmd)(u8, u8 *, size_t, u8 *, size_t, void *);
+};
+
+#ifdef CONFIG_OLPC
+
+extern void olpc_ec_driver_register(struct olpc_ec_driver *drv, void *arg);
+
+extern int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf,
+ size_t outlen);
+
+#else
+
+static inline int olpc_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf,
+ size_t outlen) { return -ENODEV; }
+
+#endif /* CONFIG_OLPC */
+
+#endif /* _LINUX_OLPC_EC_H */
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
new file mode 100644
index 000000000..e5a70132a
--- /dev/null
+++ b/include/linux/omap-dma.h
@@ -0,0 +1,352 @@
+#ifndef __LINUX_OMAP_DMA_H
+#define __LINUX_OMAP_DMA_H
+#include <linux/omap-dmaengine.h>
+
+/*
+ * Legacy OMAP DMA handling defines and functions
+ *
+ * NOTE: Do not use these any longer.
+ *
+ * Use the generic dmaengine functions as defined in
+ * include/linux/dmaengine.h.
+ *
+ * Copyright (C) 2003 Nokia Corporation
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ *
+ */
+
+#include <linux/platform_device.h>
+
+#define INT_DMA_LCD 25
+
+#define OMAP1_DMA_TOUT_IRQ (1 << 0)
+#define OMAP_DMA_DROP_IRQ (1 << 1)
+#define OMAP_DMA_HALF_IRQ (1 << 2)
+#define OMAP_DMA_FRAME_IRQ (1 << 3)
+#define OMAP_DMA_LAST_IRQ (1 << 4)
+#define OMAP_DMA_BLOCK_IRQ (1 << 5)
+#define OMAP1_DMA_SYNC_IRQ (1 << 6)
+#define OMAP2_DMA_PKT_IRQ (1 << 7)
+#define OMAP2_DMA_TRANS_ERR_IRQ (1 << 8)
+#define OMAP2_DMA_SECURE_ERR_IRQ (1 << 9)
+#define OMAP2_DMA_SUPERVISOR_ERR_IRQ (1 << 10)
+#define OMAP2_DMA_MISALIGNED_ERR_IRQ (1 << 11)
+
+#define OMAP_DMA_CCR_EN (1 << 7)
+#define OMAP_DMA_CCR_RD_ACTIVE (1 << 9)
+#define OMAP_DMA_CCR_WR_ACTIVE (1 << 10)
+#define OMAP_DMA_CCR_SEL_SRC_DST_SYNC (1 << 24)
+#define OMAP_DMA_CCR_BUFFERING_DISABLE (1 << 25)
+
+#define OMAP_DMA_DATA_TYPE_S8 0x00
+#define OMAP_DMA_DATA_TYPE_S16 0x01
+#define OMAP_DMA_DATA_TYPE_S32 0x02
+
+#define OMAP_DMA_SYNC_ELEMENT 0x00
+#define OMAP_DMA_SYNC_FRAME 0x01
+#define OMAP_DMA_SYNC_BLOCK 0x02
+#define OMAP_DMA_SYNC_PACKET 0x03
+
+#define OMAP_DMA_DST_SYNC_PREFETCH 0x02
+#define OMAP_DMA_SRC_SYNC 0x01
+#define OMAP_DMA_DST_SYNC 0x00
+
+#define OMAP_DMA_PORT_EMIFF 0x00
+#define OMAP_DMA_PORT_EMIFS 0x01
+#define OMAP_DMA_PORT_OCP_T1 0x02
+#define OMAP_DMA_PORT_TIPB 0x03
+#define OMAP_DMA_PORT_OCP_T2 0x04
+#define OMAP_DMA_PORT_MPUI 0x05
+
+#define OMAP_DMA_AMODE_CONSTANT 0x00
+#define OMAP_DMA_AMODE_POST_INC 0x01
+#define OMAP_DMA_AMODE_SINGLE_IDX 0x02
+#define OMAP_DMA_AMODE_DOUBLE_IDX 0x03
+
+#define DMA_DEFAULT_FIFO_DEPTH 0x10
+#define DMA_DEFAULT_ARB_RATE 0x01
+/* Pass THREAD_RESERVE ORed with THREAD_FIFO for tparams */
+#define DMA_THREAD_RESERVE_NORM (0x00 << 12) /* Def */
+#define DMA_THREAD_RESERVE_ONET (0x01 << 12)
+#define DMA_THREAD_RESERVE_TWOT (0x02 << 12)
+#define DMA_THREAD_RESERVE_THREET (0x03 << 12)
+#define DMA_THREAD_FIFO_NONE (0x00 << 14) /* Def */
+#define DMA_THREAD_FIFO_75 (0x01 << 14)
+#define DMA_THREAD_FIFO_25 (0x02 << 14)
+#define DMA_THREAD_FIFO_50 (0x03 << 14)
+
+/* DMA4_OCP_SYSCONFIG bits */
+#define DMA_SYSCONFIG_MIDLEMODE_MASK (3 << 12)
+#define DMA_SYSCONFIG_CLOCKACTIVITY_MASK (3 << 8)
+#define DMA_SYSCONFIG_EMUFREE (1 << 5)
+#define DMA_SYSCONFIG_SIDLEMODE_MASK (3 << 3)
+#define DMA_SYSCONFIG_SOFTRESET (1 << 2)
+#define DMA_SYSCONFIG_AUTOIDLE (1 << 0)
+
+#define DMA_SYSCONFIG_MIDLEMODE(n) ((n) << 12)
+#define DMA_SYSCONFIG_SIDLEMODE(n) ((n) << 3)
+
+#define DMA_IDLEMODE_SMARTIDLE 0x2
+#define DMA_IDLEMODE_NO_IDLE 0x1
+#define DMA_IDLEMODE_FORCE_IDLE 0x0
+
+/* Chaining modes*/
+#ifndef CONFIG_ARCH_OMAP1
+#define OMAP_DMA_STATIC_CHAIN 0x1
+#define OMAP_DMA_DYNAMIC_CHAIN 0x2
+#define OMAP_DMA_CHAIN_ACTIVE 0x1
+#define OMAP_DMA_CHAIN_INACTIVE 0x0
+#endif
+
+#define DMA_CH_PRIO_HIGH 0x1
+#define DMA_CH_PRIO_LOW 0x0 /* Def */
+
+/* Errata handling */
+#define IS_DMA_ERRATA(id) (errata & (id))
+#define SET_DMA_ERRATA(id) (errata |= (id))
+
+#define DMA_ERRATA_IFRAME_BUFFERING BIT(0x0)
+#define DMA_ERRATA_PARALLEL_CHANNELS BIT(0x1)
+#define DMA_ERRATA_i378 BIT(0x2)
+#define DMA_ERRATA_i541 BIT(0x3)
+#define DMA_ERRATA_i88 BIT(0x4)
+#define DMA_ERRATA_3_3 BIT(0x5)
+#define DMA_ROMCODE_BUG BIT(0x6)
+
+/* Attributes for OMAP DMA Contrller */
+#define DMA_LINKED_LCH BIT(0x0)
+#define GLOBAL_PRIORITY BIT(0x1)
+#define RESERVE_CHANNEL BIT(0x2)
+#define IS_CSSA_32 BIT(0x3)
+#define IS_CDSA_32 BIT(0x4)
+#define IS_RW_PRIORITY BIT(0x5)
+#define ENABLE_1510_MODE BIT(0x6)
+#define SRC_PORT BIT(0x7)
+#define DST_PORT BIT(0x8)
+#define SRC_INDEX BIT(0x9)
+#define DST_INDEX BIT(0xa)
+#define IS_BURST_ONLY4 BIT(0xb)
+#define CLEAR_CSR_ON_READ BIT(0xc)
+#define IS_WORD_16 BIT(0xd)
+#define ENABLE_16XX_MODE BIT(0xe)
+#define HS_CHANNELS_RESERVED BIT(0xf)
+#define DMA_ENGINE_HANDLE_IRQ BIT(0x10)
+
+/* Defines for DMA Capabilities */
+#define DMA_HAS_TRANSPARENT_CAPS (0x1 << 18)
+#define DMA_HAS_CONSTANT_FILL_CAPS (0x1 << 19)
+#define DMA_HAS_DESCRIPTOR_CAPS (0x3 << 20)
+
+enum omap_reg_offsets {
+
+GCR, GSCR, GRST1, HW_ID,
+PCH2_ID, PCH0_ID, PCH1_ID, PCHG_ID,
+PCHD_ID, CAPS_0, CAPS_1, CAPS_2,
+CAPS_3, CAPS_4, PCH2_SR, PCH0_SR,
+PCH1_SR, PCHD_SR, REVISION, IRQSTATUS_L0,
+IRQSTATUS_L1, IRQSTATUS_L2, IRQSTATUS_L3, IRQENABLE_L0,
+IRQENABLE_L1, IRQENABLE_L2, IRQENABLE_L3, SYSSTATUS,
+OCP_SYSCONFIG,
+
+/* omap1+ specific */
+CPC, CCR2, LCH_CTRL,
+
+/* Common registers for all omap's */
+CSDP, CCR, CICR, CSR,
+CEN, CFN, CSFI, CSEI,
+CSAC, CDAC, CDEI,
+CDFI, CLNK_CTRL,
+
+/* Channel specific registers */
+CSSA, CDSA, COLOR,
+CCEN, CCFN,
+
+/* omap3630 and omap4 specific */
+CDP, CNDP, CCDN,
+
+};
+
+enum omap_dma_burst_mode {
+ OMAP_DMA_DATA_BURST_DIS = 0,
+ OMAP_DMA_DATA_BURST_4,
+ OMAP_DMA_DATA_BURST_8,
+ OMAP_DMA_DATA_BURST_16,
+};
+
+enum end_type {
+ OMAP_DMA_LITTLE_ENDIAN = 0,
+ OMAP_DMA_BIG_ENDIAN
+};
+
+enum omap_dma_color_mode {
+ OMAP_DMA_COLOR_DIS = 0,
+ OMAP_DMA_CONSTANT_FILL,
+ OMAP_DMA_TRANSPARENT_COPY
+};
+
+enum omap_dma_write_mode {
+ OMAP_DMA_WRITE_NON_POSTED = 0,
+ OMAP_DMA_WRITE_POSTED,
+ OMAP_DMA_WRITE_LAST_NON_POSTED
+};
+
+enum omap_dma_channel_mode {
+ OMAP_DMA_LCH_2D = 0,
+ OMAP_DMA_LCH_G,
+ OMAP_DMA_LCH_P,
+ OMAP_DMA_LCH_PD
+};
+
+struct omap_dma_channel_params {
+ int data_type; /* data type 8,16,32 */
+ int elem_count; /* number of elements in a frame */
+ int frame_count; /* number of frames in a element */
+
+ int src_port; /* Only on OMAP1 REVISIT: Is this needed? */
+ int src_amode; /* constant, post increment, indexed,
+ double indexed */
+ unsigned long src_start; /* source address : physical */
+ int src_ei; /* source element index */
+ int src_fi; /* source frame index */
+
+ int dst_port; /* Only on OMAP1 REVISIT: Is this needed? */
+ int dst_amode; /* constant, post increment, indexed,
+ double indexed */
+ unsigned long dst_start; /* source address : physical */
+ int dst_ei; /* source element index */
+ int dst_fi; /* source frame index */
+
+ int trigger; /* trigger attached if the channel is
+ synchronized */
+ int sync_mode; /* sycn on element, frame , block or packet */
+ int src_or_dst_synch; /* source synch(1) or destination synch(0) */
+
+ int ie; /* interrupt enabled */
+
+ unsigned char read_prio;/* read priority */
+ unsigned char write_prio;/* write priority */
+
+#ifndef CONFIG_ARCH_OMAP1
+ enum omap_dma_burst_mode burst_mode; /* Burst mode 4/8/16 words */
+#endif
+};
+
+struct omap_dma_lch {
+ int next_lch;
+ int dev_id;
+ u16 saved_csr;
+ u16 enabled_irqs;
+ const char *dev_name;
+ void (*callback)(int lch, u16 ch_status, void *data);
+ void *data;
+ long flags;
+ /* required for Dynamic chaining */
+ int prev_linked_ch;
+ int next_linked_ch;
+ int state;
+ int chain_id;
+ int status;
+};
+
+struct omap_dma_dev_attr {
+ u32 dev_caps;
+ u16 lch_count;
+ u16 chan_count;
+};
+
+enum {
+ OMAP_DMA_REG_NONE,
+ OMAP_DMA_REG_16BIT,
+ OMAP_DMA_REG_2X16BIT,
+ OMAP_DMA_REG_32BIT,
+};
+
+struct omap_dma_reg {
+ u16 offset;
+ u8 stride;
+ u8 type;
+};
+
+/* System DMA platform data structure */
+struct omap_system_dma_plat_info {
+ const struct omap_dma_reg *reg_map;
+ unsigned channel_stride;
+ struct omap_dma_dev_attr *dma_attr;
+ u32 errata;
+ void (*show_dma_caps)(void);
+ void (*clear_lch_regs)(int lch);
+ void (*clear_dma)(int lch);
+ void (*dma_write)(u32 val, int reg, int lch);
+ u32 (*dma_read)(int reg, int lch);
+};
+
+#ifdef CONFIG_ARCH_OMAP2PLUS
+#define dma_omap2plus() 1
+#else
+#define dma_omap2plus() 0
+#endif
+#define dma_omap1() (!dma_omap2plus())
+#define __dma_omap15xx(d) (dma_omap1() && (d)->dev_caps & ENABLE_1510_MODE)
+#define __dma_omap16xx(d) (dma_omap1() && (d)->dev_caps & ENABLE_16XX_MODE)
+#define dma_omap15xx() __dma_omap15xx(d)
+#define dma_omap16xx() __dma_omap16xx(d)
+
+extern struct omap_system_dma_plat_info *omap_get_plat_info(void);
+
+extern void omap_set_dma_priority(int lch, int dst_port, int priority);
+extern int omap_request_dma(int dev_id, const char *dev_name,
+ void (*callback)(int lch, u16 ch_status, void *data),
+ void *data, int *dma_ch);
+extern void omap_enable_dma_irq(int ch, u16 irq_bits);
+extern void omap_disable_dma_irq(int ch, u16 irq_bits);
+extern void omap_free_dma(int ch);
+extern void omap_start_dma(int lch);
+extern void omap_stop_dma(int lch);
+extern void omap_set_dma_transfer_params(int lch, int data_type,
+ int elem_count, int frame_count,
+ int sync_mode,
+ int dma_trigger, int src_or_dst_synch);
+extern void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode);
+extern void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode);
+
+extern void omap_set_dma_src_params(int lch, int src_port, int src_amode,
+ unsigned long src_start,
+ int src_ei, int src_fi);
+extern void omap_set_dma_src_data_pack(int lch, int enable);
+extern void omap_set_dma_src_burst_mode(int lch,
+ enum omap_dma_burst_mode burst_mode);
+
+extern void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
+ unsigned long dest_start,
+ int dst_ei, int dst_fi);
+extern void omap_set_dma_dest_data_pack(int lch, int enable);
+extern void omap_set_dma_dest_burst_mode(int lch,
+ enum omap_dma_burst_mode burst_mode);
+
+extern void omap_set_dma_params(int lch,
+ struct omap_dma_channel_params *params);
+
+extern void omap_dma_link_lch(int lch_head, int lch_queue);
+
+extern int omap_set_dma_callback(int lch,
+ void (*callback)(int lch, u16 ch_status, void *data),
+ void *data);
+extern dma_addr_t omap_get_dma_src_pos(int lch);
+extern dma_addr_t omap_get_dma_dst_pos(int lch);
+extern int omap_get_dma_active_status(int lch);
+extern int omap_dma_running(void);
+extern void omap_dma_set_global_params(int arb_rate, int max_fifo_depth,
+ int tparams);
+void omap_dma_global_context_save(void);
+void omap_dma_global_context_restore(void);
+
+#if defined(CONFIG_ARCH_OMAP1) && IS_ENABLED(CONFIG_FB_OMAP)
+#include <mach/lcd_dma.h>
+#else
+static inline int omap_lcd_dma_running(void)
+{
+ return 0;
+}
+#endif
+
+#endif /* __LINUX_OMAP_DMA_H */
diff --git a/include/linux/omap-dmaengine.h b/include/linux/omap-dmaengine.h
new file mode 100644
index 000000000..8e6906c72
--- /dev/null
+++ b/include/linux/omap-dmaengine.h
@@ -0,0 +1,21 @@
+/*
+ * OMAP DMA Engine support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_OMAP_DMAENGINE_H
+#define __LINUX_OMAP_DMAENGINE_H
+
+struct dma_chan;
+
+#if defined(CONFIG_DMA_OMAP) || (defined(CONFIG_DMA_OMAP_MODULE) && defined(MODULE))
+bool omap_dma_filter_fn(struct dma_chan *, void *);
+#else
+static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d)
+{
+ return false;
+}
+#endif
+#endif /* __LINUX_OMAP_DMAENGINE_H */
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
new file mode 100644
index 000000000..7dee00143
--- /dev/null
+++ b/include/linux/omap-gpmc.h
@@ -0,0 +1,200 @@
+/*
+ * OMAP GPMC (General Purpose Memory Controller) defines
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/* Maximum Number of Chip Selects */
+#define GPMC_CS_NUM 8
+
+#define GPMC_CONFIG_WP 0x00000005
+
+#define GPMC_IRQ_FIFOEVENTENABLE 0x01
+#define GPMC_IRQ_COUNT_EVENT 0x02
+
+#define GPMC_BURST_4 4 /* 4 word burst */
+#define GPMC_BURST_8 8 /* 8 word burst */
+#define GPMC_BURST_16 16 /* 16 word burst */
+#define GPMC_DEVWIDTH_8BIT 1 /* 8-bit device width */
+#define GPMC_DEVWIDTH_16BIT 2 /* 16-bit device width */
+#define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */
+#define GPMC_MUX_AD 2 /* Addr-Data multiplex */
+
+/* bool type time settings */
+struct gpmc_bool_timings {
+ bool cycle2cyclediffcsen;
+ bool cycle2cyclesamecsen;
+ bool we_extra_delay;
+ bool oe_extra_delay;
+ bool adv_extra_delay;
+ bool cs_extra_delay;
+ bool time_para_granularity;
+};
+
+/*
+ * Note that all values in this struct are in nanoseconds except sync_clk
+ * (which is in picoseconds), while the register values are in gpmc_fck cycles.
+ */
+struct gpmc_timings {
+ /* Minimum clock period for synchronous mode (in picoseconds) */
+ u32 sync_clk;
+
+ /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */
+ u32 cs_on; /* Assertion time */
+ u32 cs_rd_off; /* Read deassertion time */
+ u32 cs_wr_off; /* Write deassertion time */
+
+ /* ADV signal timings corresponding to GPMC_CONFIG3 */
+ u32 adv_on; /* Assertion time */
+ u32 adv_rd_off; /* Read deassertion time */
+ u32 adv_wr_off; /* Write deassertion time */
+
+ /* WE signals timings corresponding to GPMC_CONFIG4 */
+ u32 we_on; /* WE assertion time */
+ u32 we_off; /* WE deassertion time */
+
+ /* OE signals timings corresponding to GPMC_CONFIG4 */
+ u32 oe_on; /* OE assertion time */
+ u32 oe_off; /* OE deassertion time */
+
+ /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */
+ u32 page_burst_access; /* Multiple access word delay */
+ u32 access; /* Start-cycle to first data valid delay */
+ u32 rd_cycle; /* Total read cycle time */
+ u32 wr_cycle; /* Total write cycle time */
+
+ u32 bus_turnaround;
+ u32 cycle2cycle_delay;
+
+ u32 wait_monitoring;
+ u32 clk_activation;
+
+ /* The following are only on OMAP3430 */
+ u32 wr_access; /* WRACCESSTIME */
+ u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */
+
+ struct gpmc_bool_timings bool_timings;
+};
+
+/* Device timings in picoseconds */
+struct gpmc_device_timings {
+ u32 t_ceasu; /* address setup to CS valid */
+ u32 t_avdasu; /* address setup to ADV valid */
+ /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is
+ * of tusb using these timings even for sync whilst
+ * ideally for adv_rd/(wr)_off it should have considered
+ * t_avdh instead. This indirectly necessitates r/w
+ * variations of t_avdp as it is possible to have one
+ * sync & other async
+ */
+ u32 t_avdp_r; /* ADV low time (what about t_cer ?) */
+ u32 t_avdp_w;
+ u32 t_aavdh; /* address hold time */
+ u32 t_oeasu; /* address setup to OE valid */
+ u32 t_aa; /* access time from ADV assertion */
+ u32 t_iaa; /* initial access time */
+ u32 t_oe; /* access time from OE assertion */
+ u32 t_ce; /* access time from CS asertion */
+ u32 t_rd_cycle; /* read cycle time */
+ u32 t_cez_r; /* read CS deassertion to high Z */
+ u32 t_cez_w; /* write CS deassertion to high Z */
+ u32 t_oez; /* OE deassertion to high Z */
+ u32 t_weasu; /* address setup to WE valid */
+ u32 t_wpl; /* write assertion time */
+ u32 t_wph; /* write deassertion time */
+ u32 t_wr_cycle; /* write cycle time */
+
+ u32 clk;
+ u32 t_bacc; /* burst access valid clock to output delay */
+ u32 t_ces; /* CS setup time to clk */
+ u32 t_avds; /* ADV setup time to clk */
+ u32 t_avdh; /* ADV hold time from clk */
+ u32 t_ach; /* address hold time from clk */
+ u32 t_rdyo; /* clk to ready valid */
+
+ u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */
+ u32 t_ce_avd; /* CS on to ADV on delay */
+
+ /* XXX: check the possibility of combining
+ * cyc_aavhd_oe & cyc_aavdh_we
+ */
+ u8 cyc_aavdh_oe;/* read address hold time in cycles */
+ u8 cyc_aavdh_we;/* write address hold time in cycles */
+ u8 cyc_oe; /* access time from OE assertion in cycles */
+ u8 cyc_wpl; /* write deassertion time in cycles */
+ u32 cyc_iaa; /* initial access time in cycles */
+
+ /* extra delays */
+ bool ce_xdelay;
+ bool avd_xdelay;
+ bool oe_xdelay;
+ bool we_xdelay;
+};
+
+struct gpmc_settings {
+ bool burst_wrap; /* enables wrap bursting */
+ bool burst_read; /* enables read page/burst mode */
+ bool burst_write; /* enables write page/burst mode */
+ bool device_nand; /* device is NAND */
+ bool sync_read; /* enables synchronous reads */
+ bool sync_write; /* enables synchronous writes */
+ bool wait_on_read; /* monitor wait on reads */
+ bool wait_on_write; /* monitor wait on writes */
+ u32 burst_len; /* page/burst length */
+ u32 device_width; /* device bus width (8 or 16 bit) */
+ u32 mux_add_data; /* multiplex address & data */
+ u32 wait_pin; /* wait-pin to be used */
+};
+
+extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_settings *gpmc_s,
+ struct gpmc_device_timings *dev_t);
+
+struct gpmc_nand_regs;
+struct device_node;
+
+extern void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs);
+extern int gpmc_get_client_irq(unsigned irq_config);
+
+extern unsigned int gpmc_ticks_to_ns(unsigned int ticks);
+
+extern void gpmc_cs_write_reg(int cs, int idx, u32 val);
+extern int gpmc_calc_divider(unsigned int sync_clk);
+extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
+ const struct gpmc_settings *s);
+extern int gpmc_cs_program_settings(int cs, struct gpmc_settings *p);
+extern int gpmc_cs_request(int cs, unsigned long size, unsigned long *base);
+extern void gpmc_cs_free(int cs);
+extern int gpmc_configure(int cmd, int wval);
+extern void gpmc_read_settings_dt(struct device_node *np,
+ struct gpmc_settings *p);
+
+extern void omap3_gpmc_save_context(void);
+extern void omap3_gpmc_restore_context(void);
+
+struct gpmc_timings;
+struct omap_nand_platform_data;
+struct omap_onenand_platform_data;
+
+#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2)
+extern int gpmc_nand_init(struct omap_nand_platform_data *d,
+ struct gpmc_timings *gpmc_t);
+#else
+static inline int gpmc_nand_init(struct omap_nand_platform_data *d,
+ struct gpmc_timings *gpmc_t)
+{
+ return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
+extern void gpmc_onenand_init(struct omap_onenand_platform_data *d);
+#else
+#define board_onenand_data NULL
+static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d)
+{
+}
+#endif
diff --git a/include/linux/omap-iommu.h b/include/linux/omap-iommu.h
new file mode 100644
index 000000000..c1aede467
--- /dev/null
+++ b/include/linux/omap-iommu.h
@@ -0,0 +1,19 @@
+/*
+ * omap iommu: simple virtual address space management
+ *
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _OMAP_IOMMU_H_
+#define _OMAP_IOMMU_H_
+
+extern void omap_iommu_save_ctx(struct device *dev);
+extern void omap_iommu_restore_ctx(struct device *dev);
+
+#endif
diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h
new file mode 100644
index 000000000..587bbdd31
--- /dev/null
+++ b/include/linux/omap-mailbox.h
@@ -0,0 +1,29 @@
+/*
+ * omap-mailbox: interprocessor communication module for OMAP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef OMAP_MAILBOX_H
+#define OMAP_MAILBOX_H
+
+typedef u32 mbox_msg_t;
+
+typedef int __bitwise omap_mbox_irq_t;
+#define IRQ_TX ((__force omap_mbox_irq_t) 1)
+#define IRQ_RX ((__force omap_mbox_irq_t) 2)
+
+struct mbox_chan;
+struct mbox_client;
+
+struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
+ const char *chan_name);
+
+void omap_mbox_save_ctx(struct mbox_chan *chan);
+void omap_mbox_restore_ctx(struct mbox_chan *chan);
+void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq);
+void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq);
+
+#endif /* OMAP_MAILBOX_H */
diff --git a/include/linux/omapfb.h b/include/linux/omapfb.h
new file mode 100644
index 000000000..d1f4dccae
--- /dev/null
+++ b/include/linux/omapfb.h
@@ -0,0 +1,42 @@
+/*
+ * File: include/linux/omapfb.h
+ *
+ * Framebuffer driver for TI OMAP boards
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef __LINUX_OMAPFB_H__
+#define __LINUX_OMAPFB_H__
+
+#include <uapi/linux/omapfb.h>
+
+
+struct omap_lcd_config {
+ char panel_name[16];
+ char ctrl_name[16];
+ s16 nreset_gpio;
+ u8 data_lines;
+};
+
+struct omapfb_platform_data {
+ struct omap_lcd_config lcd;
+};
+
+void __init omapfb_set_lcd_config(const struct omap_lcd_config *config);
+
+#endif /* __OMAPFB_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
new file mode 100644
index 000000000..44b2f6f7b
--- /dev/null
+++ b/include/linux/oom.h
@@ -0,0 +1,102 @@
+#ifndef __INCLUDE_LINUX_OOM_H
+#define __INCLUDE_LINUX_OOM_H
+
+
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/nodemask.h>
+#include <uapi/linux/oom.h>
+
+struct zonelist;
+struct notifier_block;
+struct mem_cgroup;
+struct task_struct;
+
+/*
+ * Types of limitations to the nodes from which allocations may occur
+ */
+enum oom_constraint {
+ CONSTRAINT_NONE,
+ CONSTRAINT_CPUSET,
+ CONSTRAINT_MEMORY_POLICY,
+ CONSTRAINT_MEMCG,
+};
+
+enum oom_scan_t {
+ OOM_SCAN_OK, /* scan thread and find its badness */
+ OOM_SCAN_CONTINUE, /* do not consider thread for oom kill */
+ OOM_SCAN_ABORT, /* abort the iteration and return */
+ OOM_SCAN_SELECT, /* always select this thread first */
+};
+
+/* Thread is the potential origin of an oom condition; kill first on oom */
+#define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1)
+
+static inline void set_current_oom_origin(void)
+{
+ current->signal->oom_flags |= OOM_FLAG_ORIGIN;
+}
+
+static inline void clear_current_oom_origin(void)
+{
+ current->signal->oom_flags &= ~OOM_FLAG_ORIGIN;
+}
+
+static inline bool oom_task_origin(const struct task_struct *p)
+{
+ return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN);
+}
+
+extern void mark_tsk_oom_victim(struct task_struct *tsk);
+
+extern void unmark_oom_victim(void);
+
+extern unsigned long oom_badness(struct task_struct *p,
+ struct mem_cgroup *memcg, const nodemask_t *nodemask,
+ unsigned long totalpages);
+
+extern int oom_kills_count(void);
+extern void note_oom_kill(void);
+extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
+ unsigned int points, unsigned long totalpages,
+ struct mem_cgroup *memcg, nodemask_t *nodemask,
+ const char *message);
+
+extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags);
+extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags);
+
+extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
+ int order, const nodemask_t *nodemask,
+ struct mem_cgroup *memcg);
+
+extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
+ unsigned long totalpages, const nodemask_t *nodemask,
+ bool force_kill);
+
+extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
+ int order, nodemask_t *mask, bool force_kill);
+extern int register_oom_notifier(struct notifier_block *nb);
+extern int unregister_oom_notifier(struct notifier_block *nb);
+
+extern bool oom_killer_disabled;
+extern bool oom_killer_disable(void);
+extern void oom_killer_enable(void);
+
+extern struct task_struct *find_lock_task_mm(struct task_struct *p);
+
+static inline bool task_will_free_mem(struct task_struct *task)
+{
+ /*
+ * A coredumping process may sleep for an extended period in exit_mm(),
+ * so the oom killer cannot assume that the process will promptly exit
+ * and release memory.
+ */
+ return (task->flags & PF_EXITING) &&
+ !(task->signal->flags & SIGNAL_GROUP_COREDUMP);
+}
+
+/* sysctls */
+extern int sysctl_oom_dump_tasks;
+extern int sysctl_oom_kill_allocating_task;
+extern int sysctl_panic_on_oom;
+#endif /* _INCLUDE_LINUX_OOM_H */
diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h
new file mode 100644
index 000000000..e6b240b61
--- /dev/null
+++ b/include/linux/openvswitch.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2007-2011 Nicira Networks.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef _LINUX_OPENVSWITCH_H
+#define _LINUX_OPENVSWITCH_H 1
+
+#include <uapi/linux/openvswitch.h>
+
+#endif /* _LINUX_OPENVSWITCH_H */
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
new file mode 100644
index 000000000..b2a0f15f1
--- /dev/null
+++ b/include/linux/oprofile.h
@@ -0,0 +1,209 @@
+/**
+ * @file oprofile.h
+ *
+ * API for machine-specific interrupts to interface
+ * to oprofile.
+ *
+ * @remark Copyright 2002 OProfile authors
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
+ */
+
+#ifndef OPROFILE_H
+#define OPROFILE_H
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+#include <linux/atomic.h>
+
+/* Each escaped entry is prefixed by ESCAPE_CODE
+ * then one of the following codes, then the
+ * relevant data.
+ * These #defines live in this file so that arch-specific
+ * buffer sync'ing code can access them.
+ */
+#define ESCAPE_CODE ~0UL
+#define CTX_SWITCH_CODE 1
+#define CPU_SWITCH_CODE 2
+#define COOKIE_SWITCH_CODE 3
+#define KERNEL_ENTER_SWITCH_CODE 4
+#define KERNEL_EXIT_SWITCH_CODE 5
+#define MODULE_LOADED_CODE 6
+#define CTX_TGID_CODE 7
+#define TRACE_BEGIN_CODE 8
+#define TRACE_END_CODE 9
+#define XEN_ENTER_SWITCH_CODE 10
+#define SPU_PROFILING_CODE 11
+#define SPU_CTX_SWITCH_CODE 12
+#define IBS_FETCH_CODE 13
+#define IBS_OP_CODE 14
+
+struct dentry;
+struct file_operations;
+struct pt_regs;
+
+/* Operations structure to be filled in */
+struct oprofile_operations {
+ /* create any necessary configuration files in the oprofile fs.
+ * Optional. */
+ int (*create_files)(struct dentry * root);
+ /* Do any necessary interrupt setup. Optional. */
+ int (*setup)(void);
+ /* Do any necessary interrupt shutdown. Optional. */
+ void (*shutdown)(void);
+ /* Start delivering interrupts. */
+ int (*start)(void);
+ /* Stop delivering interrupts. */
+ void (*stop)(void);
+ /* Arch-specific buffer sync functions.
+ * Return value = 0: Success
+ * Return value = -1: Failure
+ * Return value = 1: Run generic sync function
+ */
+ int (*sync_start)(void);
+ int (*sync_stop)(void);
+
+ /* Initiate a stack backtrace. Optional. */
+ void (*backtrace)(struct pt_regs * const regs, unsigned int depth);
+
+ /* Multiplex between different events. Optional. */
+ int (*switch_events)(void);
+ /* CPU identification string. */
+ char * cpu_type;
+};
+
+/**
+ * One-time initialisation. *ops must be set to a filled-in
+ * operations structure. This is called even in timer interrupt
+ * mode so an arch can set a backtrace callback.
+ *
+ * If an error occurs, the fields should be left untouched.
+ */
+int oprofile_arch_init(struct oprofile_operations * ops);
+
+/**
+ * One-time exit/cleanup for the arch.
+ */
+void oprofile_arch_exit(void);
+
+/**
+ * Add a sample. This may be called from any context.
+ */
+void oprofile_add_sample(struct pt_regs * const regs, unsigned long event);
+
+/**
+ * Add an extended sample. Use this when the PC is not from the regs, and
+ * we cannot determine if we're in kernel mode from the regs.
+ *
+ * This function does perform a backtrace.
+ *
+ */
+void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
+ unsigned long event, int is_kernel);
+
+/**
+ * Add an hardware sample.
+ */
+void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs,
+ unsigned long event, int is_kernel,
+ struct task_struct *task);
+
+/* Use this instead when the PC value is not from the regs. Doesn't
+ * backtrace. */
+void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event);
+
+/* add a backtrace entry, to be called from the ->backtrace callback */
+void oprofile_add_trace(unsigned long eip);
+
+
+/**
+ * Create a file of the given name as a child of the given root, with
+ * the specified file operations.
+ */
+int oprofilefs_create_file(struct dentry * root,
+ char const * name, const struct file_operations * fops);
+
+int oprofilefs_create_file_perm(struct dentry * root,
+ char const * name, const struct file_operations * fops, int perm);
+
+/** Create a file for read/write access to an unsigned long. */
+int oprofilefs_create_ulong(struct dentry * root,
+ char const * name, ulong * val);
+
+/** Create a file for read-only access to an unsigned long. */
+int oprofilefs_create_ro_ulong(struct dentry * root,
+ char const * name, ulong * val);
+
+/** Create a file for read-only access to an atomic_t. */
+int oprofilefs_create_ro_atomic(struct dentry * root,
+ char const * name, atomic_t * val);
+
+/** create a directory */
+struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
+
+/**
+ * Write the given asciz string to the given user buffer @buf, updating *offset
+ * appropriately. Returns bytes written or -EFAULT.
+ */
+ssize_t oprofilefs_str_to_user(char const * str, char __user * buf, size_t count, loff_t * offset);
+
+/**
+ * Convert an unsigned long value into ASCII and copy it to the user buffer @buf,
+ * updating *offset appropriately. Returns bytes written or -EFAULT.
+ */
+ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t count, loff_t * offset);
+
+/**
+ * Read an ASCII string for a number from a userspace buffer and fill *val on success.
+ * Returns 0 on success, < 0 on error.
+ */
+int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
+
+/** lock for read/write safety */
+extern raw_spinlock_t oprofilefs_lock;
+
+/**
+ * Add the contents of a circular buffer to the event buffer.
+ */
+void oprofile_put_buff(unsigned long *buf, unsigned int start,
+ unsigned int stop, unsigned int max);
+
+unsigned long oprofile_get_cpu_buffer_size(void);
+void oprofile_cpu_buffer_inc_smpl_lost(void);
+
+/* cpu buffer functions */
+
+struct op_sample;
+
+struct op_entry {
+ struct ring_buffer_event *event;
+ struct op_sample *sample;
+ unsigned long size;
+ unsigned long *data;
+};
+
+void oprofile_write_reserve(struct op_entry *entry,
+ struct pt_regs * const regs,
+ unsigned long pc, int code, int size);
+int oprofile_add_data(struct op_entry *entry, unsigned long val);
+int oprofile_add_data64(struct op_entry *entry, u64 val);
+int oprofile_write_commit(struct op_entry *entry);
+
+#ifdef CONFIG_HW_PERF_EVENTS
+int __init oprofile_perf_init(struct oprofile_operations *ops);
+void oprofile_perf_exit(void);
+char *op_name_from_perf_id(void);
+#else
+static inline int __init oprofile_perf_init(struct oprofile_operations *ops)
+{
+ pr_info("oprofile: hardware counters not available\n");
+ return -ENODEV;
+}
+static inline void oprofile_perf_exit(void) { }
+#endif /* CONFIG_HW_PERF_EVENTS */
+
+#endif /* OPROFILE_H */
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
new file mode 100644
index 000000000..3a6490e81
--- /dev/null
+++ b/include/linux/osq_lock.h
@@ -0,0 +1,35 @@
+#ifndef __LINUX_OSQ_LOCK_H
+#define __LINUX_OSQ_LOCK_H
+
+/*
+ * An MCS like lock especially tailored for optimistic spinning for sleeping
+ * lock implementations (mutex, rwsem, etc).
+ */
+struct optimistic_spin_node {
+ struct optimistic_spin_node *next, *prev;
+ int locked; /* 1 if lock acquired */
+ int cpu; /* encoded CPU # + 1 value */
+};
+
+struct optimistic_spin_queue {
+ /*
+ * Stores an encoded value of the CPU # of the tail node in the queue.
+ * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
+ */
+ atomic_t tail;
+};
+
+#define OSQ_UNLOCKED_VAL (0)
+
+/* Init macro and function. */
+#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
+
+static inline void osq_lock_init(struct optimistic_spin_queue *lock)
+{
+ atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
+}
+
+extern bool osq_lock(struct optimistic_spin_queue *lock);
+extern void osq_unlock(struct optimistic_spin_queue *lock);
+
+#endif
diff --git a/include/linux/oxu210hp.h b/include/linux/oxu210hp.h
new file mode 100644
index 000000000..0bf96eae5
--- /dev/null
+++ b/include/linux/oxu210hp.h
@@ -0,0 +1,7 @@
+/* platform data for the OXU210HP HCD */
+
+struct oxu210hp_platform_data {
+ unsigned int bus16:1;
+ unsigned int use_hcd_otg:1;
+ unsigned int use_hcd_sph:1;
+};
diff --git a/include/linux/padata.h b/include/linux/padata.h
new file mode 100644
index 000000000..438694650
--- /dev/null
+++ b/include/linux/padata.h
@@ -0,0 +1,189 @@
+/*
+ * padata.h - header for the padata parallelization interface
+ *
+ * Copyright (C) 2008, 2009 secunet Security Networks AG
+ * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef PADATA_H
+#define PADATA_H
+
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/notifier.h>
+#include <linux/kobject.h>
+
+#define PADATA_CPU_SERIAL 0x01
+#define PADATA_CPU_PARALLEL 0x02
+
+/**
+ * struct padata_priv - Embedded to the users data structure.
+ *
+ * @list: List entry, to attach to the padata lists.
+ * @pd: Pointer to the internal control structure.
+ * @cb_cpu: Callback cpu for serializatioon.
+ * @seq_nr: Sequence number of the parallelized data object.
+ * @info: Used to pass information from the parallel to the serial function.
+ * @parallel: Parallel execution function.
+ * @serial: Serial complete function.
+ */
+struct padata_priv {
+ struct list_head list;
+ struct parallel_data *pd;
+ int cb_cpu;
+ int info;
+ void (*parallel)(struct padata_priv *padata);
+ void (*serial)(struct padata_priv *padata);
+};
+
+/**
+ * struct padata_list
+ *
+ * @list: List head.
+ * @lock: List lock.
+ */
+struct padata_list {
+ struct list_head list;
+ spinlock_t lock;
+};
+
+/**
+* struct padata_serial_queue - The percpu padata serial queue
+*
+* @serial: List to wait for serialization after reordering.
+* @work: work struct for serialization.
+* @pd: Backpointer to the internal control structure.
+*/
+struct padata_serial_queue {
+ struct padata_list serial;
+ struct work_struct work;
+ struct parallel_data *pd;
+};
+
+/**
+ * struct padata_parallel_queue - The percpu padata parallel queue
+ *
+ * @parallel: List to wait for parallelization.
+ * @reorder: List to wait for reordering after parallel processing.
+ * @serial: List to wait for serialization after reordering.
+ * @pwork: work struct for parallelization.
+ * @swork: work struct for serialization.
+ * @pd: Backpointer to the internal control structure.
+ * @work: work struct for parallelization.
+ * @num_obj: Number of objects that are processed by this cpu.
+ * @cpu_index: Index of the cpu.
+ */
+struct padata_parallel_queue {
+ struct padata_list parallel;
+ struct padata_list reorder;
+ struct parallel_data *pd;
+ struct work_struct work;
+ atomic_t num_obj;
+ int cpu_index;
+};
+
+/**
+ * struct padata_cpumask - The cpumasks for the parallel/serial workers
+ *
+ * @pcpu: cpumask for the parallel workers.
+ * @cbcpu: cpumask for the serial (callback) workers.
+ */
+struct padata_cpumask {
+ cpumask_var_t pcpu;
+ cpumask_var_t cbcpu;
+};
+
+/**
+ * struct parallel_data - Internal control structure, covers everything
+ * that depends on the cpumask in use.
+ *
+ * @pinst: padata instance.
+ * @pqueue: percpu padata queues used for parallelization.
+ * @squeue: percpu padata queues used for serialuzation.
+ * @reorder_objects: Number of objects waiting in the reorder queues.
+ * @refcnt: Number of objects holding a reference on this parallel_data.
+ * @max_seq_nr: Maximal used sequence number.
+ * @cpumask: The cpumasks in use for parallel and serial workers.
+ * @lock: Reorder lock.
+ * @processed: Number of already processed objects.
+ * @timer: Reorder timer.
+ */
+struct parallel_data {
+ struct padata_instance *pinst;
+ struct padata_parallel_queue __percpu *pqueue;
+ struct padata_serial_queue __percpu *squeue;
+ atomic_t reorder_objects;
+ atomic_t refcnt;
+ atomic_t seq_nr;
+ struct padata_cpumask cpumask;
+ spinlock_t lock ____cacheline_aligned;
+ unsigned int processed;
+ struct timer_list timer;
+};
+
+/**
+ * struct padata_instance - The overall control structure.
+ *
+ * @cpu_notifier: cpu hotplug notifier.
+ * @wq: The workqueue in use.
+ * @pd: The internal control structure.
+ * @cpumask: User supplied cpumasks for parallel and serial works.
+ * @cpumask_change_notifier: Notifiers chain for user-defined notify
+ * callbacks that will be called when either @pcpu or @cbcpu
+ * or both cpumasks change.
+ * @kobj: padata instance kernel object.
+ * @lock: padata instance lock.
+ * @flags: padata flags.
+ */
+struct padata_instance {
+ struct notifier_block cpu_notifier;
+ struct workqueue_struct *wq;
+ struct parallel_data *pd;
+ struct padata_cpumask cpumask;
+ struct blocking_notifier_head cpumask_change_notifier;
+ struct kobject kobj;
+ struct mutex lock;
+ u8 flags;
+#define PADATA_INIT 1
+#define PADATA_RESET 2
+#define PADATA_INVALID 4
+};
+
+extern struct padata_instance *padata_alloc_possible(
+ struct workqueue_struct *wq);
+extern struct padata_instance *padata_alloc(struct workqueue_struct *wq,
+ const struct cpumask *pcpumask,
+ const struct cpumask *cbcpumask);
+extern void padata_free(struct padata_instance *pinst);
+extern int padata_do_parallel(struct padata_instance *pinst,
+ struct padata_priv *padata, int cb_cpu);
+extern void padata_do_serial(struct padata_priv *padata);
+extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
+ cpumask_var_t cpumask);
+extern int padata_set_cpumasks(struct padata_instance *pinst,
+ cpumask_var_t pcpumask,
+ cpumask_var_t cbcpumask);
+extern int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask);
+extern int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask);
+extern int padata_start(struct padata_instance *pinst);
+extern void padata_stop(struct padata_instance *pinst);
+extern int padata_register_cpumask_notifier(struct padata_instance *pinst,
+ struct notifier_block *nblock);
+extern int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
+ struct notifier_block *nblock);
+#endif
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
new file mode 100644
index 000000000..da5236615
--- /dev/null
+++ b/include/linux/page-flags-layout.h
@@ -0,0 +1,94 @@
+#ifndef PAGE_FLAGS_LAYOUT_H
+#define PAGE_FLAGS_LAYOUT_H
+
+#include <linux/numa.h>
+#include <generated/bounds.h>
+
+/*
+ * When a memory allocation must conform to specific limitations (such
+ * as being suitable for DMA) the caller will pass in hints to the
+ * allocator in the gfp_mask, in the zone modifier bits. These bits
+ * are used to select a priority ordered list of memory zones which
+ * match the requested limits. See gfp_zone() in include/linux/gfp.h
+ */
+#if MAX_NR_ZONES < 2
+#define ZONES_SHIFT 0
+#elif MAX_NR_ZONES <= 2
+#define ZONES_SHIFT 1
+#elif MAX_NR_ZONES <= 4
+#define ZONES_SHIFT 2
+#else
+#error ZONES_SHIFT -- too many zones configured adjust calculation
+#endif
+
+#ifdef CONFIG_SPARSEMEM
+#include <asm/sparsemem.h>
+
+/* SECTION_SHIFT #bits space required to store a section # */
+#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
+
+#endif /* CONFIG_SPARSEMEM */
+
+/*
+ * page->flags layout:
+ *
+ * There are five possibilities for how page->flags get laid out. The first
+ * pair is for the normal case without sparsemem. The second pair is for
+ * sparsemem when there is plenty of space for node and section information.
+ * The last is when there is insufficient space in page->flags and a separate
+ * lookup is necessary.
+ *
+ * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
+ * " plus space for last_cpupid: | NODE | ZONE | LAST_CPUPID ... | FLAGS |
+ * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
+ * " plus space for last_cpupid: | SECTION | NODE | ZONE | LAST_CPUPID ... | FLAGS |
+ * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
+ */
+#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
+#define SECTIONS_WIDTH SECTIONS_SHIFT
+#else
+#define SECTIONS_WIDTH 0
+#endif
+
+#define ZONES_WIDTH ZONES_SHIFT
+
+#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
+#define NODES_WIDTH NODES_SHIFT
+#else
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#error "Vmemmap: No space for nodes field in page flags"
+#endif
+#define NODES_WIDTH 0
+#endif
+
+#ifdef CONFIG_NUMA_BALANCING
+#define LAST__PID_SHIFT 8
+#define LAST__PID_MASK ((1 << LAST__PID_SHIFT)-1)
+
+#define LAST__CPU_SHIFT NR_CPUS_BITS
+#define LAST__CPU_MASK ((1 << LAST__CPU_SHIFT)-1)
+
+#define LAST_CPUPID_SHIFT (LAST__PID_SHIFT+LAST__CPU_SHIFT)
+#else
+#define LAST_CPUPID_SHIFT 0
+#endif
+
+#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
+#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
+#else
+#define LAST_CPUPID_WIDTH 0
+#endif
+
+/*
+ * We are going to use the flags for the page to node mapping if its in
+ * there. This includes the case where there is no node, so it is implicit.
+ */
+#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
+#define NODE_NOT_IN_PAGE_FLAGS
+#endif
+
+#if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0
+#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
+#endif
+
+#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
new file mode 100644
index 000000000..6d4f3fc5e
--- /dev/null
+++ b/include/linux/page-flags.h
@@ -0,0 +1,682 @@
+/*
+ * Macros for manipulating and testing page->flags
+ */
+
+#ifndef PAGE_FLAGS_H
+#define PAGE_FLAGS_H
+
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/mmdebug.h>
+#ifndef __GENERATING_BOUNDS_H
+#include <linux/mm_types.h>
+#include <generated/bounds.h>
+#endif /* !__GENERATING_BOUNDS_H */
+
+/*
+ * Various page->flags bits:
+ *
+ * PG_reserved is set for special pages, which can never be swapped out. Some
+ * of them might not even exist (eg empty_bad_page)...
+ *
+ * The PG_private bitflag is set on pagecache pages if they contain filesystem
+ * specific data (which is normally at page->private). It can be used by
+ * private allocations for its own usage.
+ *
+ * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
+ * and cleared when writeback _starts_ or when read _completes_. PG_writeback
+ * is set before writeback starts and cleared when it finishes.
+ *
+ * PG_locked also pins a page in pagecache, and blocks truncation of the file
+ * while it is held.
+ *
+ * page_waitqueue(page) is a wait queue of all tasks waiting for the page
+ * to become unlocked.
+ *
+ * PG_uptodate tells whether the page's contents is valid. When a read
+ * completes, the page becomes uptodate, unless a disk I/O error happened.
+ *
+ * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
+ * file-backed pagecache (see mm/vmscan.c).
+ *
+ * PG_error is set to indicate that an I/O error occurred on this page.
+ *
+ * PG_arch_1 is an architecture specific page state bit. The generic code
+ * guarantees that this bit is cleared for a page when it first is entered into
+ * the page cache.
+ *
+ * PG_highmem pages are not permanently mapped into the kernel virtual address
+ * space, they need to be kmapped separately for doing IO on the pages. The
+ * struct page (these bits with information) are always mapped into kernel
+ * address space...
+ *
+ * PG_hwpoison indicates that a page got corrupted in hardware and contains
+ * data with incorrect ECC bits that triggered a machine check. Accessing is
+ * not safe since it may cause another machine check. Don't touch!
+ */
+
+/*
+ * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
+ * locked- and dirty-page accounting.
+ *
+ * The page flags field is split into two parts, the main flags area
+ * which extends from the low bits upwards, and the fields area which
+ * extends from the high bits downwards.
+ *
+ * | FIELD | ... | FLAGS |
+ * N-1 ^ 0
+ * (NR_PAGEFLAGS)
+ *
+ * The fields area is reserved for fields mapping zone, node (for NUMA) and
+ * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
+ * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
+ */
+enum pageflags {
+ PG_locked, /* Page is locked. Don't touch. */
+ PG_error,
+ PG_referenced,
+ PG_uptodate,
+ PG_dirty,
+ PG_lru,
+ PG_active,
+ PG_slab,
+ PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
+ PG_arch_1,
+ PG_reserved,
+ PG_private, /* If pagecache, has fs-private data */
+ PG_private_2, /* If pagecache, has fs aux data */
+ PG_writeback, /* Page is under writeback */
+#ifdef CONFIG_PAGEFLAGS_EXTENDED
+ PG_head, /* A head page */
+ PG_tail, /* A tail page */
+#else
+ PG_compound, /* A compound page */
+#endif
+ PG_swapcache, /* Swap page: swp_entry_t in private */
+ PG_mappedtodisk, /* Has blocks allocated on-disk */
+ PG_reclaim, /* To be reclaimed asap */
+ PG_swapbacked, /* Page is backed by RAM/swap */
+ PG_unevictable, /* Page is "unevictable" */
+#ifdef CONFIG_MMU
+ PG_mlocked, /* Page is vma mlocked */
+#endif
+#ifdef CONFIG_ARCH_USES_PG_UNCACHED
+ PG_uncached, /* Page has been mapped as uncached */
+#endif
+#ifdef CONFIG_MEMORY_FAILURE
+ PG_hwpoison, /* hardware poisoned page. Don't touch */
+#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ PG_compound_lock,
+#endif
+#ifdef CONFIG_TOI_INCREMENTAL
+ PG_toi_untracked, /* Don't track dirtiness of this page - assume always dirty */
+ PG_toi_ro, /* Page was made RO by TOI */
+ PG_toi_cbw, /* Copy the page before it is written to */
+ PG_toi_dirty, /* Page has been modified */
+#endif
+ __NR_PAGEFLAGS,
+
+ /* Filesystems */
+ PG_checked = PG_owner_priv_1,
+
+ /* Two page bits are conscripted by FS-Cache to maintain local caching
+ * state. These bits are set on pages belonging to the netfs's inodes
+ * when those inodes are being locally cached.
+ */
+ PG_fscache = PG_private_2, /* page backed by cache */
+
+ /* XEN */
+ /* Pinned in Xen as a read-only pagetable page. */
+ PG_pinned = PG_owner_priv_1,
+ /* Pinned as part of domain save (see xen_mm_pin_all()). */
+ PG_savepinned = PG_dirty,
+ /* Has a grant mapping of another (foreign) domain's page. */
+ PG_foreign = PG_owner_priv_1,
+
+ /* SLOB */
+ PG_slob_free = PG_private,
+};
+
+#ifndef __GENERATING_BOUNDS_H
+
+/*
+ * Macros to create function definitions for page flags
+ */
+#define TESTPAGEFLAG(uname, lname) \
+static inline int Page##uname(const struct page *page) \
+ { return test_bit(PG_##lname, &page->flags); }
+
+#define SETPAGEFLAG(uname, lname) \
+static inline void SetPage##uname(struct page *page) \
+ { set_bit(PG_##lname, &page->flags); }
+
+#define CLEARPAGEFLAG(uname, lname) \
+static inline void ClearPage##uname(struct page *page) \
+ { clear_bit(PG_##lname, &page->flags); }
+
+#define __SETPAGEFLAG(uname, lname) \
+static inline void __SetPage##uname(struct page *page) \
+ { __set_bit(PG_##lname, &page->flags); }
+
+#define __CLEARPAGEFLAG(uname, lname) \
+static inline void __ClearPage##uname(struct page *page) \
+ { __clear_bit(PG_##lname, &page->flags); }
+
+#define TESTSETFLAG(uname, lname) \
+static inline int TestSetPage##uname(struct page *page) \
+ { return test_and_set_bit(PG_##lname, &page->flags); }
+
+#define TESTCLEARFLAG(uname, lname) \
+static inline int TestClearPage##uname(struct page *page) \
+ { return test_and_clear_bit(PG_##lname, &page->flags); }
+
+#define __TESTCLEARFLAG(uname, lname) \
+static inline int __TestClearPage##uname(struct page *page) \
+ { return __test_and_clear_bit(PG_##lname, &page->flags); }
+
+#define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
+ SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname)
+
+#define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
+ __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname)
+
+#define TESTSCFLAG(uname, lname) \
+ TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname)
+
+#define TESTPAGEFLAG_FALSE(uname) \
+static inline int Page##uname(const struct page *page) { return 0; }
+
+#define SETPAGEFLAG_NOOP(uname) \
+static inline void SetPage##uname(struct page *page) { }
+
+#define CLEARPAGEFLAG_NOOP(uname) \
+static inline void ClearPage##uname(struct page *page) { }
+
+#define __CLEARPAGEFLAG_NOOP(uname) \
+static inline void __ClearPage##uname(struct page *page) { }
+
+#define TESTSETFLAG_FALSE(uname) \
+static inline int TestSetPage##uname(struct page *page) { return 0; }
+
+#define TESTCLEARFLAG_FALSE(uname) \
+static inline int TestClearPage##uname(struct page *page) { return 0; }
+
+#define __TESTCLEARFLAG_FALSE(uname) \
+static inline int __TestClearPage##uname(struct page *page) { return 0; }
+
+#define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
+ SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
+
+#define TESTSCFLAG_FALSE(uname) \
+ TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
+
+struct page; /* forward declaration */
+
+TESTPAGEFLAG(Locked, locked)
+PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error)
+PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
+ __SETPAGEFLAG(Referenced, referenced)
+PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
+PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
+PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
+ TESTCLEARFLAG(Active, active)
+__PAGEFLAG(Slab, slab)
+PAGEFLAG(Checked, checked) /* Used by some filesystems */
+PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
+PAGEFLAG(SavePinned, savepinned); /* Xen */
+PAGEFLAG(Foreign, foreign); /* Xen */
+PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
+PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
+ __SETPAGEFLAG(SwapBacked, swapbacked)
+
+__PAGEFLAG(SlobFree, slob_free)
+
+/*
+ * Private page markings that may be used by the filesystem that owns the page
+ * for its own purposes.
+ * - PG_private and PG_private_2 cause releasepage() and co to be invoked
+ */
+PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private)
+ __CLEARPAGEFLAG(Private, private)
+PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2)
+PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
+
+/*
+ * Only test-and-set exist for PG_writeback. The unconditional operators are
+ * risky: they bypass page accounting.
+ */
+TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
+PAGEFLAG(MappedToDisk, mappedtodisk)
+
+/* PG_readahead is only used for reads; PG_reclaim is only for writes */
+PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim)
+PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim)
+
+#ifdef CONFIG_HIGHMEM
+/*
+ * Must use a macro here due to header dependency issues. page_zone() is not
+ * available at this point.
+ */
+#define PageHighMem(__p) is_highmem(page_zone(__p))
+#else
+PAGEFLAG_FALSE(HighMem)
+#endif
+
+#ifdef CONFIG_SWAP
+PAGEFLAG(SwapCache, swapcache)
+#else
+PAGEFLAG_FALSE(SwapCache)
+#endif
+
+PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
+ TESTCLEARFLAG(Unevictable, unevictable)
+
+#ifdef CONFIG_MMU
+PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
+ TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
+#else
+PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
+ TESTSCFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
+#endif
+
+#ifdef CONFIG_ARCH_USES_PG_UNCACHED
+PAGEFLAG(Uncached, uncached)
+#else
+PAGEFLAG_FALSE(Uncached)
+#endif
+
+#ifdef CONFIG_MEMORY_FAILURE
+PAGEFLAG(HWPoison, hwpoison)
+TESTSCFLAG(HWPoison, hwpoison)
+#define __PG_HWPOISON (1UL << PG_hwpoison)
+#else
+PAGEFLAG_FALSE(HWPoison)
+#define __PG_HWPOISON 0
+#endif
+#ifdef CONFIG_TOI_INCREMENTAL
+PAGEFLAG(TOI_RO, toi_ro)
+PAGEFLAG(TOI_Dirty, toi_dirty)
+PAGEFLAG(TOI_Untracked, toi_untracked)
+PAGEFLAG(TOI_CBW, toi_cbw)
+#else
+PAGEFLAG_FALSE(TOI_RO)
+PAGEFLAG_FALSE(TOI_Dirty)
+PAGEFLAG_FALSE(TOI_Untracked)
+PAGEFLAG_FALSE(TOI_CBW)
+#endif
+
+/*
+ * On an anonymous page mapped into a user virtual memory area,
+ * page->mapping points to its anon_vma, not to a struct address_space;
+ * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
+ *
+ * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
+ * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
+ * and then page->mapping points, not to an anon_vma, but to a private
+ * structure which KSM associates with that merged page. See ksm.h.
+ *
+ * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
+ *
+ * Please note that, confusingly, "page_mapping" refers to the inode
+ * address_space which maps the page from disk; whereas "page_mapped"
+ * refers to user virtual address space into which the page is mapped.
+ */
+#define PAGE_MAPPING_ANON 1
+#define PAGE_MAPPING_KSM 2
+#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
+
+static inline int PageAnon(struct page *page)
+{
+ return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
+}
+
+#ifdef CONFIG_KSM
+/*
+ * A KSM page is one of those write-protected "shared pages" or "merged pages"
+ * which KSM maps into multiple mms, wherever identical anonymous page content
+ * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
+ * anon_vma, but to that page's node of the stable tree.
+ */
+static inline int PageKsm(struct page *page)
+{
+ return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+}
+#else
+TESTPAGEFLAG_FALSE(Ksm)
+#endif
+
+u64 stable_page_flags(struct page *page);
+
+static inline int PageUptodate(struct page *page)
+{
+ int ret = test_bit(PG_uptodate, &(page)->flags);
+
+ /*
+ * Must ensure that the data we read out of the page is loaded
+ * _after_ we've loaded page->flags to check for PageUptodate.
+ * We can skip the barrier if the page is not uptodate, because
+ * we wouldn't be reading anything from it.
+ *
+ * See SetPageUptodate() for the other side of the story.
+ */
+ if (ret)
+ smp_rmb();
+
+ return ret;
+}
+
+static inline void __SetPageUptodate(struct page *page)
+{
+ smp_wmb();
+ __set_bit(PG_uptodate, &(page)->flags);
+}
+
+static inline void SetPageUptodate(struct page *page)
+{
+ /*
+ * Memory barrier must be issued before setting the PG_uptodate bit,
+ * so that all previous stores issued in order to bring the page
+ * uptodate are actually visible before PageUptodate becomes true.
+ */
+ smp_wmb();
+ set_bit(PG_uptodate, &(page)->flags);
+}
+
+CLEARPAGEFLAG(Uptodate, uptodate)
+
+int test_clear_page_writeback(struct page *page);
+int __test_set_page_writeback(struct page *page, bool keep_write);
+
+#define test_set_page_writeback(page) \
+ __test_set_page_writeback(page, false)
+#define test_set_page_writeback_keepwrite(page) \
+ __test_set_page_writeback(page, true)
+
+static inline void set_page_writeback(struct page *page)
+{
+ test_set_page_writeback(page);
+}
+
+static inline void set_page_writeback_keepwrite(struct page *page)
+{
+ test_set_page_writeback_keepwrite(page);
+}
+
+#ifdef CONFIG_PAGEFLAGS_EXTENDED
+/*
+ * System with lots of page flags available. This allows separate
+ * flags for PageHead() and PageTail() checks of compound pages so that bit
+ * tests can be used in performance sensitive paths. PageCompound is
+ * generally not used in hot code paths except arch/powerpc/mm/init_64.c
+ * and arch/powerpc/kvm/book3s_64_vio_hv.c which use it to detect huge pages
+ * and avoid handling those in real mode.
+ */
+__PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head)
+__PAGEFLAG(Tail, tail)
+
+static inline int PageCompound(struct page *page)
+{
+ return page->flags & ((1L << PG_head) | (1L << PG_tail));
+
+}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void ClearPageCompound(struct page *page)
+{
+ BUG_ON(!PageHead(page));
+ ClearPageHead(page);
+}
+#endif
+
+#define PG_head_mask ((1L << PG_head))
+
+#else
+/*
+ * Reduce page flag use as much as possible by overlapping
+ * compound page flags with the flags used for page cache pages. Possible
+ * because PageCompound is always set for compound pages and not for
+ * pages on the LRU and/or pagecache.
+ */
+TESTPAGEFLAG(Compound, compound)
+__SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound)
+
+/*
+ * PG_reclaim is used in combination with PG_compound to mark the
+ * head and tail of a compound page. This saves one page flag
+ * but makes it impossible to use compound pages for the page cache.
+ * The PG_reclaim bit would have to be used for reclaim or readahead
+ * if compound pages enter the page cache.
+ *
+ * PG_compound & PG_reclaim => Tail page
+ * PG_compound & ~PG_reclaim => Head page
+ */
+#define PG_head_mask ((1L << PG_compound))
+#define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
+
+static inline int PageHead(struct page *page)
+{
+ return ((page->flags & PG_head_tail_mask) == PG_head_mask);
+}
+
+static inline int PageTail(struct page *page)
+{
+ return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask);
+}
+
+static inline void __SetPageTail(struct page *page)
+{
+ page->flags |= PG_head_tail_mask;
+}
+
+static inline void __ClearPageTail(struct page *page)
+{
+ page->flags &= ~PG_head_tail_mask;
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline void ClearPageCompound(struct page *page)
+{
+ BUG_ON((page->flags & PG_head_tail_mask) != (1 << PG_compound));
+ clear_bit(PG_compound, &page->flags);
+}
+#endif
+
+#endif /* !PAGEFLAGS_EXTENDED */
+
+#ifdef CONFIG_HUGETLB_PAGE
+int PageHuge(struct page *page);
+int PageHeadHuge(struct page *page);
+bool page_huge_active(struct page *page);
+#else
+TESTPAGEFLAG_FALSE(Huge)
+TESTPAGEFLAG_FALSE(HeadHuge)
+
+static inline bool page_huge_active(struct page *page)
+{
+ return 0;
+}
+#endif
+
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * PageHuge() only returns true for hugetlbfs pages, but not for
+ * normal or transparent huge pages.
+ *
+ * PageTransHuge() returns true for both transparent huge and
+ * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
+ * called only in the core VM paths where hugetlbfs pages can't exist.
+ */
+static inline int PageTransHuge(struct page *page)
+{
+ VM_BUG_ON_PAGE(PageTail(page), page);
+ return PageHead(page);
+}
+
+/*
+ * PageTransCompound returns true for both transparent huge pages
+ * and hugetlbfs pages, so it should only be called when it's known
+ * that hugetlbfs pages aren't involved.
+ */
+static inline int PageTransCompound(struct page *page)
+{
+ return PageCompound(page);
+}
+
+/*
+ * PageTransTail returns true for both transparent huge pages
+ * and hugetlbfs pages, so it should only be called when it's known
+ * that hugetlbfs pages aren't involved.
+ */
+static inline int PageTransTail(struct page *page)
+{
+ return PageTail(page);
+}
+
+#else
+
+static inline int PageTransHuge(struct page *page)
+{
+ return 0;
+}
+
+static inline int PageTransCompound(struct page *page)
+{
+ return 0;
+}
+
+static inline int PageTransTail(struct page *page)
+{
+ return 0;
+}
+#endif
+
+/*
+ * PageBuddy() indicate that the page is free and in the buddy system
+ * (see mm/page_alloc.c).
+ *
+ * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
+ * -2 so that an underflow of the page_mapcount() won't be mistaken
+ * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
+ * efficiently by most CPU architectures.
+ */
+#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
+
+static inline int PageBuddy(struct page *page)
+{
+ return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
+}
+
+static inline void __SetPageBuddy(struct page *page)
+{
+ VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
+ atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
+}
+
+static inline void __ClearPageBuddy(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageBuddy(page), page);
+ atomic_set(&page->_mapcount, -1);
+}
+
+#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
+
+static inline int PageBalloon(struct page *page)
+{
+ return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
+}
+
+static inline void __SetPageBalloon(struct page *page)
+{
+ VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
+ atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
+}
+
+static inline void __ClearPageBalloon(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageBalloon(page), page);
+ atomic_set(&page->_mapcount, -1);
+}
+
+/*
+ * If network-based swap is enabled, sl*b must keep track of whether pages
+ * were allocated from pfmemalloc reserves.
+ */
+static inline int PageSlabPfmemalloc(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageSlab(page), page);
+ return PageActive(page);
+}
+
+static inline void SetPageSlabPfmemalloc(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageSlab(page), page);
+ SetPageActive(page);
+}
+
+static inline void __ClearPageSlabPfmemalloc(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageSlab(page), page);
+ __ClearPageActive(page);
+}
+
+static inline void ClearPageSlabPfmemalloc(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageSlab(page), page);
+ ClearPageActive(page);
+}
+
+#ifdef CONFIG_MMU
+#define __PG_MLOCKED (1 << PG_mlocked)
+#else
+#define __PG_MLOCKED 0
+#endif
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __PG_COMPOUND_LOCK (1 << PG_compound_lock)
+#else
+#define __PG_COMPOUND_LOCK 0
+#endif
+
+/*
+ * Flags checked when a page is freed. Pages being freed should not have
+ * these flags set. It they are, there is a problem.
+ */
+#define PAGE_FLAGS_CHECK_AT_FREE \
+ (1 << PG_lru | 1 << PG_locked | \
+ 1 << PG_private | 1 << PG_private_2 | \
+ 1 << PG_writeback | 1 << PG_reserved | \
+ 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
+ 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \
+ __PG_COMPOUND_LOCK)
+
+/*
+ * Flags checked when a page is prepped for return by the page allocator.
+ * Pages being prepped should not have any flags set. It they are set,
+ * there has been a kernel bug or struct page corruption.
+ */
+#ifdef CONFIG_TOI_INCREMENTAL
+#define PAGE_FLAGS_CHECK_AT_PREP (((1 << NR_PAGEFLAGS) - 1) & \
+ ~((1 << PG_toi_dirty) | (1 << PG_toi_ro)))
+#else
+#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1)
+#endif
+
+#define PAGE_FLAGS_PRIVATE \
+ (1 << PG_private | 1 << PG_private_2)
+/**
+ * page_has_private - Determine if page has private stuff
+ * @page: The page to be checked
+ *
+ * Determine if a page has private stuff, indicating that release routines
+ * should be invoked upon it.
+ */
+static inline int page_has_private(struct page *page)
+{
+ return !!(page->flags & PAGE_FLAGS_PRIVATE);
+}
+
+#endif /* !__GENERATING_BOUNDS_H */
+
+#endif /* PAGE_FLAGS_H */
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
new file mode 100644
index 000000000..2dc1e1697
--- /dev/null
+++ b/include/linux/page-isolation.h
@@ -0,0 +1,76 @@
+#ifndef __LINUX_PAGEISOLATION_H
+#define __LINUX_PAGEISOLATION_H
+
+#ifdef CONFIG_MEMORY_ISOLATION
+static inline bool has_isolate_pageblock(struct zone *zone)
+{
+ return zone->nr_isolate_pageblock;
+}
+static inline bool is_migrate_isolate_page(struct page *page)
+{
+ return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
+}
+static inline bool is_migrate_isolate(int migratetype)
+{
+ return migratetype == MIGRATE_ISOLATE;
+}
+#else
+static inline bool has_isolate_pageblock(struct zone *zone)
+{
+ return false;
+}
+static inline bool is_migrate_isolate_page(struct page *page)
+{
+ return false;
+}
+static inline bool is_migrate_isolate(int migratetype)
+{
+ return false;
+}
+#endif
+
+bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
+ bool skip_hwpoisoned_pages);
+void set_pageblock_migratetype(struct page *page, int migratetype);
+int move_freepages_block(struct zone *zone, struct page *page,
+ int migratetype);
+int move_freepages(struct zone *zone,
+ struct page *start_page, struct page *end_page,
+ int migratetype);
+
+/*
+ * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
+ * If specified range includes migrate types other than MOVABLE or CMA,
+ * this will fail with -EBUSY.
+ *
+ * For isolating all pages in the range finally, the caller have to
+ * free all pages in the range. test_page_isolated() can be used for
+ * test it.
+ */
+int
+start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+ unsigned migratetype, bool skip_hwpoisoned_pages);
+
+/*
+ * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
+ * target range is [start_pfn, end_pfn)
+ */
+int
+undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+ unsigned migratetype);
+
+/*
+ * Test all pages in [start_pfn, end_pfn) are isolated or not.
+ */
+int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
+ bool skip_hwpoisoned_pages);
+
+/*
+ * Internal functions. Changes pageblock's migrate type.
+ */
+int set_migratetype_isolate(struct page *page, bool skip_hwpoisoned_pages);
+void unset_migratetype_isolate(struct page *page, unsigned migratetype);
+struct page *alloc_migrate_target(struct page *page, unsigned long private,
+ int **resultp);
+
+#endif
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
new file mode 100644
index 000000000..17fa4f8de
--- /dev/null
+++ b/include/linux/page_counter.h
@@ -0,0 +1,52 @@
+#ifndef _LINUX_PAGE_COUNTER_H
+#define _LINUX_PAGE_COUNTER_H
+
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <asm/page.h>
+
+struct page_counter {
+ atomic_long_t count;
+ unsigned long limit;
+ struct page_counter *parent;
+
+ /* legacy */
+ unsigned long watermark;
+ unsigned long failcnt;
+};
+
+#if BITS_PER_LONG == 32
+#define PAGE_COUNTER_MAX LONG_MAX
+#else
+#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE)
+#endif
+
+static inline void page_counter_init(struct page_counter *counter,
+ struct page_counter *parent)
+{
+ atomic_long_set(&counter->count, 0);
+ counter->limit = PAGE_COUNTER_MAX;
+ counter->parent = parent;
+}
+
+static inline unsigned long page_counter_read(struct page_counter *counter)
+{
+ return atomic_long_read(&counter->count);
+}
+
+void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
+void page_counter_charge(struct page_counter *counter, unsigned long nr_pages);
+int page_counter_try_charge(struct page_counter *counter,
+ unsigned long nr_pages,
+ struct page_counter **fail);
+void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
+int page_counter_limit(struct page_counter *counter, unsigned long limit);
+int page_counter_memparse(const char *buf, const char *max,
+ unsigned long *nr_pages);
+
+static inline void page_counter_reset_watermark(struct page_counter *counter)
+{
+ counter->watermark = page_counter_read(counter);
+}
+
+#endif /* _LINUX_PAGE_COUNTER_H */
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
new file mode 100644
index 000000000..c42981cd9
--- /dev/null
+++ b/include/linux/page_ext.h
@@ -0,0 +1,84 @@
+#ifndef __LINUX_PAGE_EXT_H
+#define __LINUX_PAGE_EXT_H
+
+#include <linux/types.h>
+#include <linux/stacktrace.h>
+
+struct pglist_data;
+struct page_ext_operations {
+ bool (*need)(void);
+ void (*init)(void);
+};
+
+#ifdef CONFIG_PAGE_EXTENSION
+
+/*
+ * page_ext->flags bits:
+ *
+ * PAGE_EXT_DEBUG_POISON is set for poisoned pages. This is used to
+ * implement generic debug pagealloc feature. The pages are filled with
+ * poison patterns and set this flag after free_pages(). The poisoned
+ * pages are verified whether the patterns are not corrupted and clear
+ * the flag before alloc_pages().
+ */
+
+enum page_ext_flags {
+ PAGE_EXT_DEBUG_POISON, /* Page is poisoned */
+ PAGE_EXT_DEBUG_GUARD,
+ PAGE_EXT_OWNER,
+};
+
+/*
+ * Page Extension can be considered as an extended mem_map.
+ * A page_ext page is associated with every page descriptor. The
+ * page_ext helps us add more information about the page.
+ * All page_ext are allocated at boot or memory hotplug event,
+ * then the page_ext for pfn always exists.
+ */
+struct page_ext {
+ unsigned long flags;
+#ifdef CONFIG_PAGE_OWNER
+ unsigned int order;
+ gfp_t gfp_mask;
+ unsigned int nr_entries;
+ unsigned long trace_entries[8];
+#endif
+};
+
+extern void pgdat_page_ext_init(struct pglist_data *pgdat);
+
+#ifdef CONFIG_SPARSEMEM
+static inline void page_ext_init_flatmem(void)
+{
+}
+extern void page_ext_init(void);
+#else
+extern void page_ext_init_flatmem(void);
+static inline void page_ext_init(void)
+{
+}
+#endif
+
+struct page_ext *lookup_page_ext(struct page *page);
+
+#else /* !CONFIG_PAGE_EXTENSION */
+struct page_ext;
+
+static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
+{
+}
+
+static inline struct page_ext *lookup_page_ext(struct page *page)
+{
+ return NULL;
+}
+
+static inline void page_ext_init(void)
+{
+}
+
+static inline void page_ext_init_flatmem(void)
+{
+}
+#endif /* CONFIG_PAGE_EXTENSION */
+#endif /* __LINUX_PAGE_EXT_H */
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
new file mode 100644
index 000000000..b48c3471c
--- /dev/null
+++ b/include/linux/page_owner.h
@@ -0,0 +1,38 @@
+#ifndef __LINUX_PAGE_OWNER_H
+#define __LINUX_PAGE_OWNER_H
+
+#ifdef CONFIG_PAGE_OWNER
+extern bool page_owner_inited;
+extern struct page_ext_operations page_owner_ops;
+
+extern void __reset_page_owner(struct page *page, unsigned int order);
+extern void __set_page_owner(struct page *page,
+ unsigned int order, gfp_t gfp_mask);
+
+static inline void reset_page_owner(struct page *page, unsigned int order)
+{
+ if (likely(!page_owner_inited))
+ return;
+
+ __reset_page_owner(page, order);
+}
+
+static inline void set_page_owner(struct page *page,
+ unsigned int order, gfp_t gfp_mask)
+{
+ if (likely(!page_owner_inited))
+ return;
+
+ __set_page_owner(page, order, gfp_mask);
+}
+#else
+static inline void reset_page_owner(struct page *page, unsigned int order)
+{
+}
+static inline void set_page_owner(struct page *page,
+ unsigned int order, gfp_t gfp_mask)
+{
+}
+
+#endif /* CONFIG_PAGE_OWNER */
+#endif /* __LINUX_PAGE_OWNER_H */
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
new file mode 100644
index 000000000..2baeee12f
--- /dev/null
+++ b/include/linux/pageblock-flags.h
@@ -0,0 +1,101 @@
+/*
+ * Macros for manipulating and testing flags related to a
+ * pageblock_nr_pages number of pages.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2006
+ *
+ * Original author, Mel Gorman
+ * Major cleanups and reduction of bit operations, Andy Whitcroft
+ */
+#ifndef PAGEBLOCK_FLAGS_H
+#define PAGEBLOCK_FLAGS_H
+
+#include <linux/types.h>
+
+/* Bit indices that affect a whole block of pages */
+enum pageblock_bits {
+ PB_migrate,
+ PB_migrate_end = PB_migrate + 3 - 1,
+ /* 3 bits required for migrate types */
+ PB_migrate_skip,/* If set the block is skipped by compaction */
+
+ /*
+ * Assume the bits will always align on a word. If this assumption
+ * changes then get/set pageblock needs updating.
+ */
+ NR_PAGEBLOCK_BITS
+};
+
+#ifdef CONFIG_HUGETLB_PAGE
+
+#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
+
+/* Huge page sizes are variable */
+extern int pageblock_order;
+
+#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
+
+/* Huge pages are a constant size */
+#define pageblock_order HUGETLB_PAGE_ORDER
+
+#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
+
+#else /* CONFIG_HUGETLB_PAGE */
+
+/* If huge pages are not used, group by MAX_ORDER_NR_PAGES */
+#define pageblock_order (MAX_ORDER-1)
+
+#endif /* CONFIG_HUGETLB_PAGE */
+
+#define pageblock_nr_pages (1UL << pageblock_order)
+
+/* Forward declaration */
+struct page;
+
+unsigned long get_pfnblock_flags_mask(struct page *page,
+ unsigned long pfn,
+ unsigned long end_bitidx,
+ unsigned long mask);
+
+void set_pfnblock_flags_mask(struct page *page,
+ unsigned long flags,
+ unsigned long pfn,
+ unsigned long end_bitidx,
+ unsigned long mask);
+
+/* Declarations for getting and setting flags. See mm/page_alloc.c */
+#define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \
+ get_pfnblock_flags_mask(page, page_to_pfn(page), \
+ end_bitidx, \
+ (1 << (end_bitidx - start_bitidx + 1)) - 1)
+#define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \
+ set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \
+ end_bitidx, \
+ (1 << (end_bitidx - start_bitidx + 1)) - 1)
+
+#ifdef CONFIG_COMPACTION
+#define get_pageblock_skip(page) \
+ get_pageblock_flags_group(page, PB_migrate_skip, \
+ PB_migrate_skip)
+#define clear_pageblock_skip(page) \
+ set_pageblock_flags_group(page, 0, PB_migrate_skip, \
+ PB_migrate_skip)
+#define set_pageblock_skip(page) \
+ set_pageblock_flags_group(page, 1, PB_migrate_skip, \
+ PB_migrate_skip)
+#endif /* CONFIG_COMPACTION */
+
+#endif /* PAGEBLOCK_FLAGS_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
new file mode 100644
index 000000000..4b3736f70
--- /dev/null
+++ b/include/linux/pagemap.h
@@ -0,0 +1,673 @@
+#ifndef _LINUX_PAGEMAP_H
+#define _LINUX_PAGEMAP_H
+
+/*
+ * Copyright 1995 Linus Torvalds
+ */
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/highmem.h>
+#include <linux/compiler.h>
+#include <asm/uaccess.h>
+#include <linux/gfp.h>
+#include <linux/bitops.h>
+#include <linux/hardirq.h> /* for in_interrupt() */
+#include <linux/hugetlb_inline.h>
+
+/*
+ * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
+ * allocation mode flags.
+ */
+enum mapping_flags {
+ AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
+ AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
+ AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
+ AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
+ AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */
+};
+
+static inline void mapping_set_error(struct address_space *mapping, int error)
+{
+ if (unlikely(error)) {
+ if (error == -ENOSPC)
+ set_bit(AS_ENOSPC, &mapping->flags);
+ else
+ set_bit(AS_EIO, &mapping->flags);
+ }
+}
+
+static inline void mapping_set_unevictable(struct address_space *mapping)
+{
+ set_bit(AS_UNEVICTABLE, &mapping->flags);
+}
+
+static inline void mapping_clear_unevictable(struct address_space *mapping)
+{
+ clear_bit(AS_UNEVICTABLE, &mapping->flags);
+}
+
+static inline int mapping_unevictable(struct address_space *mapping)
+{
+ if (mapping)
+ return test_bit(AS_UNEVICTABLE, &mapping->flags);
+ return !!mapping;
+}
+
+static inline void mapping_set_exiting(struct address_space *mapping)
+{
+ set_bit(AS_EXITING, &mapping->flags);
+}
+
+static inline int mapping_exiting(struct address_space *mapping)
+{
+ return test_bit(AS_EXITING, &mapping->flags);
+}
+
+static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
+{
+ return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
+}
+
+/*
+ * This is non-atomic. Only to be used before the mapping is activated.
+ * Probably needs a barrier...
+ */
+static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
+{
+ m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
+ (__force unsigned long)mask;
+}
+
+/*
+ * The page cache can be done in larger chunks than
+ * one page, because it allows for more efficient
+ * throughput (it can then be mapped into user
+ * space in smaller chunks for same flexibility).
+ *
+ * Or rather, it _will_ be done in larger chunks.
+ */
+#define PAGE_CACHE_SHIFT PAGE_SHIFT
+#define PAGE_CACHE_SIZE PAGE_SIZE
+#define PAGE_CACHE_MASK PAGE_MASK
+#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
+
+#define page_cache_get(page) get_page(page)
+#define page_cache_release(page) put_page(page)
+void release_pages(struct page **pages, int nr, bool cold);
+
+/*
+ * speculatively take a reference to a page.
+ * If the page is free (_count == 0), then _count is untouched, and 0
+ * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
+ *
+ * This function must be called inside the same rcu_read_lock() section as has
+ * been used to lookup the page in the pagecache radix-tree (or page table):
+ * this allows allocators to use a synchronize_rcu() to stabilize _count.
+ *
+ * Unless an RCU grace period has passed, the count of all pages coming out
+ * of the allocator must be considered unstable. page_count may return higher
+ * than expected, and put_page must be able to do the right thing when the
+ * page has been finished with, no matter what it is subsequently allocated
+ * for (because put_page is what is used here to drop an invalid speculative
+ * reference).
+ *
+ * This is the interesting part of the lockless pagecache (and lockless
+ * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
+ * has the following pattern:
+ * 1. find page in radix tree
+ * 2. conditionally increment refcount
+ * 3. check the page is still in pagecache (if no, goto 1)
+ *
+ * Remove-side that cares about stability of _count (eg. reclaim) has the
+ * following (with tree_lock held for write):
+ * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
+ * B. remove page from pagecache
+ * C. free the page
+ *
+ * There are 2 critical interleavings that matter:
+ * - 2 runs before A: in this case, A sees elevated refcount and bails out
+ * - A runs before 2: in this case, 2 sees zero refcount and retries;
+ * subsequently, B will complete and 1 will find no page, causing the
+ * lookup to return NULL.
+ *
+ * It is possible that between 1 and 2, the page is removed then the exact same
+ * page is inserted into the same position in pagecache. That's OK: the
+ * old find_get_page using tree_lock could equally have run before or after
+ * such a re-insertion, depending on order that locks are granted.
+ *
+ * Lookups racing against pagecache insertion isn't a big problem: either 1
+ * will find the page or it will not. Likewise, the old find_get_page could run
+ * either before the insertion or afterwards, depending on timing.
+ */
+static inline int page_cache_get_speculative(struct page *page)
+{
+ VM_BUG_ON(in_interrupt());
+
+#ifdef CONFIG_TINY_RCU
+# ifdef CONFIG_PREEMPT_COUNT
+ VM_BUG_ON(!in_atomic());
+# endif
+ /*
+ * Preempt must be disabled here - we rely on rcu_read_lock doing
+ * this for us.
+ *
+ * Pagecache won't be truncated from interrupt context, so if we have
+ * found a page in the radix tree here, we have pinned its refcount by
+ * disabling preempt, and hence no need for the "speculative get" that
+ * SMP requires.
+ */
+ VM_BUG_ON_PAGE(page_count(page) == 0, page);
+ atomic_inc(&page->_count);
+
+#else
+ if (unlikely(!get_page_unless_zero(page))) {
+ /*
+ * Either the page has been freed, or will be freed.
+ * In either case, retry here and the caller should
+ * do the right thing (see comments above).
+ */
+ return 0;
+ }
+#endif
+ VM_BUG_ON_PAGE(PageTail(page), page);
+
+ return 1;
+}
+
+/*
+ * Same as above, but add instead of inc (could just be merged)
+ */
+static inline int page_cache_add_speculative(struct page *page, int count)
+{
+ VM_BUG_ON(in_interrupt());
+
+#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
+# ifdef CONFIG_PREEMPT_COUNT
+ VM_BUG_ON(!in_atomic());
+# endif
+ VM_BUG_ON_PAGE(page_count(page) == 0, page);
+ atomic_add(count, &page->_count);
+
+#else
+ if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
+ return 0;
+#endif
+ VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
+
+ return 1;
+}
+
+static inline int page_freeze_refs(struct page *page, int count)
+{
+ return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
+}
+
+static inline void page_unfreeze_refs(struct page *page, int count)
+{
+ VM_BUG_ON_PAGE(page_count(page) != 0, page);
+ VM_BUG_ON(count == 0);
+
+ atomic_set(&page->_count, count);
+}
+
+#ifdef CONFIG_NUMA
+extern struct page *__page_cache_alloc(gfp_t gfp);
+#else
+static inline struct page *__page_cache_alloc(gfp_t gfp)
+{
+ return alloc_pages(gfp, 0);
+}
+#endif
+
+static inline struct page *page_cache_alloc(struct address_space *x)
+{
+ return __page_cache_alloc(mapping_gfp_mask(x));
+}
+
+static inline struct page *page_cache_alloc_cold(struct address_space *x)
+{
+ return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
+}
+
+static inline struct page *page_cache_alloc_readahead(struct address_space *x)
+{
+ return __page_cache_alloc(mapping_gfp_mask(x) |
+ __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN);
+}
+
+typedef int filler_t(void *, struct page *);
+
+pgoff_t page_cache_next_hole(struct address_space *mapping,
+ pgoff_t index, unsigned long max_scan);
+pgoff_t page_cache_prev_hole(struct address_space *mapping,
+ pgoff_t index, unsigned long max_scan);
+
+#define FGP_ACCESSED 0x00000001
+#define FGP_LOCK 0x00000002
+#define FGP_CREAT 0x00000004
+#define FGP_WRITE 0x00000008
+#define FGP_NOFS 0x00000010
+#define FGP_NOWAIT 0x00000020
+
+struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
+ int fgp_flags, gfp_t cache_gfp_mask);
+
+/**
+ * find_get_page - find and get a page reference
+ * @mapping: the address_space to search
+ * @offset: the page index
+ *
+ * Looks up the page cache slot at @mapping & @offset. If there is a
+ * page cache page, it is returned with an increased refcount.
+ *
+ * Otherwise, %NULL is returned.
+ */
+static inline struct page *find_get_page(struct address_space *mapping,
+ pgoff_t offset)
+{
+ return pagecache_get_page(mapping, offset, 0, 0);
+}
+
+static inline struct page *find_get_page_flags(struct address_space *mapping,
+ pgoff_t offset, int fgp_flags)
+{
+ return pagecache_get_page(mapping, offset, fgp_flags, 0);
+}
+
+/**
+ * find_lock_page - locate, pin and lock a pagecache page
+ * pagecache_get_page - find and get a page reference
+ * @mapping: the address_space to search
+ * @offset: the page index
+ *
+ * Looks up the page cache slot at @mapping & @offset. If there is a
+ * page cache page, it is returned locked and with an increased
+ * refcount.
+ *
+ * Otherwise, %NULL is returned.
+ *
+ * find_lock_page() may sleep.
+ */
+static inline struct page *find_lock_page(struct address_space *mapping,
+ pgoff_t offset)
+{
+ return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
+}
+
+/**
+ * find_or_create_page - locate or add a pagecache page
+ * @mapping: the page's address_space
+ * @index: the page's index into the mapping
+ * @gfp_mask: page allocation mode
+ *
+ * Looks up the page cache slot at @mapping & @offset. If there is a
+ * page cache page, it is returned locked and with an increased
+ * refcount.
+ *
+ * If the page is not present, a new page is allocated using @gfp_mask
+ * and added to the page cache and the VM's LRU list. The page is
+ * returned locked and with an increased refcount.
+ *
+ * On memory exhaustion, %NULL is returned.
+ *
+ * find_or_create_page() may sleep, even if @gfp_flags specifies an
+ * atomic allocation!
+ */
+static inline struct page *find_or_create_page(struct address_space *mapping,
+ pgoff_t offset, gfp_t gfp_mask)
+{
+ return pagecache_get_page(mapping, offset,
+ FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
+ gfp_mask);
+}
+
+/**
+ * grab_cache_page_nowait - returns locked page at given index in given cache
+ * @mapping: target address_space
+ * @index: the page index
+ *
+ * Same as grab_cache_page(), but do not wait if the page is unavailable.
+ * This is intended for speculative data generators, where the data can
+ * be regenerated if the page couldn't be grabbed. This routine should
+ * be safe to call while holding the lock for another page.
+ *
+ * Clear __GFP_FS when allocating the page to avoid recursion into the fs
+ * and deadlock against the caller's locked page.
+ */
+static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
+ pgoff_t index)
+{
+ return pagecache_get_page(mapping, index,
+ FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
+ mapping_gfp_mask(mapping));
+}
+
+struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
+struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
+unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
+ unsigned int nr_entries, struct page **entries,
+ pgoff_t *indices);
+unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
+ unsigned int nr_pages, struct page **pages);
+unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
+ unsigned int nr_pages, struct page **pages);
+unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
+ int tag, unsigned int nr_pages, struct page **pages);
+
+struct page *grab_cache_page_write_begin(struct address_space *mapping,
+ pgoff_t index, unsigned flags);
+
+/*
+ * Returns locked page at given index in given cache, creating it if needed.
+ */
+static inline struct page *grab_cache_page(struct address_space *mapping,
+ pgoff_t index)
+{
+ return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
+}
+
+extern struct page * read_cache_page(struct address_space *mapping,
+ pgoff_t index, filler_t *filler, void *data);
+extern struct page * read_cache_page_gfp(struct address_space *mapping,
+ pgoff_t index, gfp_t gfp_mask);
+extern int read_cache_pages(struct address_space *mapping,
+ struct list_head *pages, filler_t *filler, void *data);
+
+static inline struct page *read_mapping_page(struct address_space *mapping,
+ pgoff_t index, void *data)
+{
+ filler_t *filler = (filler_t *)mapping->a_ops->readpage;
+ return read_cache_page(mapping, index, filler, data);
+}
+
+/*
+ * Get the offset in PAGE_SIZE.
+ * (TODO: hugepage should have ->index in PAGE_SIZE)
+ */
+static inline pgoff_t page_to_pgoff(struct page *page)
+{
+ if (unlikely(PageHeadHuge(page)))
+ return page->index << compound_order(page);
+ else
+ return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+}
+
+/*
+ * Return byte-offset into filesystem object for page.
+ */
+static inline loff_t page_offset(struct page *page)
+{
+ return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
+}
+
+static inline loff_t page_file_offset(struct page *page)
+{
+ return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT;
+}
+
+extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
+ unsigned long address);
+
+static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
+ unsigned long address)
+{
+ pgoff_t pgoff;
+ if (unlikely(is_vm_hugetlb_page(vma)))
+ return linear_hugepage_index(vma, address);
+ pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
+ pgoff += vma->vm_pgoff;
+ return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+}
+
+extern void __lock_page(struct page *page);
+extern int __lock_page_killable(struct page *page);
+extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
+ unsigned int flags);
+extern void unlock_page(struct page *page);
+
+static inline void __set_page_locked(struct page *page)
+{
+ __set_bit(PG_locked, &page->flags);
+}
+
+static inline void __clear_page_locked(struct page *page)
+{
+ __clear_bit(PG_locked, &page->flags);
+}
+
+static inline int trylock_page(struct page *page)
+{
+ return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
+}
+
+/*
+ * lock_page may only be called if we have the page's inode pinned.
+ */
+static inline void lock_page(struct page *page)
+{
+ might_sleep();
+ if (!trylock_page(page))
+ __lock_page(page);
+}
+
+/*
+ * lock_page_killable is like lock_page but can be interrupted by fatal
+ * signals. It returns 0 if it locked the page and -EINTR if it was
+ * killed while waiting.
+ */
+static inline int lock_page_killable(struct page *page)
+{
+ might_sleep();
+ if (!trylock_page(page))
+ return __lock_page_killable(page);
+ return 0;
+}
+
+/*
+ * lock_page_or_retry - Lock the page, unless this would block and the
+ * caller indicated that it can handle a retry.
+ *
+ * Return value and mmap_sem implications depend on flags; see
+ * __lock_page_or_retry().
+ */
+static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
+ unsigned int flags)
+{
+ might_sleep();
+ return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
+}
+
+/*
+ * This is exported only for wait_on_page_locked/wait_on_page_writeback,
+ * and for filesystems which need to wait on PG_private.
+ */
+extern void wait_on_page_bit(struct page *page, int bit_nr);
+
+extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
+extern int wait_on_page_bit_killable_timeout(struct page *page,
+ int bit_nr, unsigned long timeout);
+
+static inline int wait_on_page_locked_killable(struct page *page)
+{
+ if (PageLocked(page))
+ return wait_on_page_bit_killable(page, PG_locked);
+ return 0;
+}
+
+extern wait_queue_head_t *page_waitqueue(struct page *page);
+static inline void wake_up_page(struct page *page, int bit)
+{
+ __wake_up_bit(page_waitqueue(page), &page->flags, bit);
+}
+
+/*
+ * Wait for a page to be unlocked.
+ *
+ * This must be called with the caller "holding" the page,
+ * ie with increased "page->count" so that the page won't
+ * go away during the wait..
+ */
+static inline void wait_on_page_locked(struct page *page)
+{
+ if (PageLocked(page))
+ wait_on_page_bit(page, PG_locked);
+}
+
+/*
+ * Wait for a page to complete writeback
+ */
+static inline void wait_on_page_writeback(struct page *page)
+{
+ if (PageWriteback(page))
+ wait_on_page_bit(page, PG_writeback);
+}
+
+extern void end_page_writeback(struct page *page);
+void wait_for_stable_page(struct page *page);
+
+void page_endio(struct page *page, int rw, int err);
+
+/*
+ * Add an arbitrary waiter to a page's wait queue
+ */
+extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
+
+/*
+ * Fault a userspace page into pagetables. Return non-zero on a fault.
+ *
+ * This assumes that two userspace pages are always sufficient. That's
+ * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
+ */
+static inline int fault_in_pages_writeable(char __user *uaddr, int size)
+{
+ int ret;
+
+ if (unlikely(size == 0))
+ return 0;
+
+ /*
+ * Writing zeroes into userspace here is OK, because we know that if
+ * the zero gets there, we'll be overwriting it.
+ */
+ ret = __put_user(0, uaddr);
+ if (ret == 0) {
+ char __user *end = uaddr + size - 1;
+
+ /*
+ * If the page was already mapped, this will get a cache miss
+ * for sure, so try to avoid doing it.
+ */
+ if (((unsigned long)uaddr & PAGE_MASK) !=
+ ((unsigned long)end & PAGE_MASK))
+ ret = __put_user(0, end);
+ }
+ return ret;
+}
+
+static inline int fault_in_pages_readable(const char __user *uaddr, int size)
+{
+ volatile char c;
+ int ret;
+
+ if (unlikely(size == 0))
+ return 0;
+
+ ret = __get_user(c, uaddr);
+ if (ret == 0) {
+ const char __user *end = uaddr + size - 1;
+
+ if (((unsigned long)uaddr & PAGE_MASK) !=
+ ((unsigned long)end & PAGE_MASK)) {
+ ret = __get_user(c, end);
+ (void)c;
+ }
+ }
+ return ret;
+}
+
+/*
+ * Multipage variants of the above prefault helpers, useful if more than
+ * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
+ * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
+ * filemap.c hotpaths.
+ */
+static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
+{
+ int ret = 0;
+ char __user *end = uaddr + size - 1;
+
+ if (unlikely(size == 0))
+ return ret;
+
+ /*
+ * Writing zeroes into userspace here is OK, because we know that if
+ * the zero gets there, we'll be overwriting it.
+ */
+ while (uaddr <= end) {
+ ret = __put_user(0, uaddr);
+ if (ret != 0)
+ return ret;
+ uaddr += PAGE_SIZE;
+ }
+
+ /* Check whether the range spilled into the next page. */
+ if (((unsigned long)uaddr & PAGE_MASK) ==
+ ((unsigned long)end & PAGE_MASK))
+ ret = __put_user(0, end);
+
+ return ret;
+}
+
+static inline int fault_in_multipages_readable(const char __user *uaddr,
+ int size)
+{
+ volatile char c;
+ int ret = 0;
+ const char __user *end = uaddr + size - 1;
+
+ if (unlikely(size == 0))
+ return ret;
+
+ while (uaddr <= end) {
+ ret = __get_user(c, uaddr);
+ if (ret != 0)
+ return ret;
+ uaddr += PAGE_SIZE;
+ }
+
+ /* Check whether the range spilled into the next page. */
+ if (((unsigned long)uaddr & PAGE_MASK) ==
+ ((unsigned long)end & PAGE_MASK)) {
+ ret = __get_user(c, end);
+ (void)c;
+ }
+
+ return ret;
+}
+
+int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
+ pgoff_t index, gfp_t gfp_mask);
+int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
+ pgoff_t index, gfp_t gfp_mask);
+extern void delete_from_page_cache(struct page *page);
+extern void __delete_from_page_cache(struct page *page, void *shadow);
+int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
+
+/*
+ * Like add_to_page_cache_locked, but used to add newly allocated pages:
+ * the page is new, so we can just run __set_page_locked() against it.
+ */
+static inline int add_to_page_cache(struct page *page,
+ struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
+{
+ int error;
+
+ __set_page_locked(page);
+ error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
+ if (unlikely(error))
+ __clear_page_locked(page);
+ return error;
+}
+
+#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
new file mode 100644
index 000000000..b45d391b4
--- /dev/null
+++ b/include/linux/pagevec.h
@@ -0,0 +1,72 @@
+/*
+ * include/linux/pagevec.h
+ *
+ * In many places it is efficient to batch an operation up against multiple
+ * pages. A pagevec is a multipage container which is used for that.
+ */
+
+#ifndef _LINUX_PAGEVEC_H
+#define _LINUX_PAGEVEC_H
+
+/* 14 pointers + two long's align the pagevec structure to a power of two */
+#define PAGEVEC_SIZE 14
+
+struct page;
+struct address_space;
+
+struct pagevec {
+ unsigned long nr;
+ unsigned long cold;
+ struct page *pages[PAGEVEC_SIZE];
+};
+
+void __pagevec_release(struct pagevec *pvec);
+void __pagevec_lru_add(struct pagevec *pvec);
+unsigned pagevec_lookup_entries(struct pagevec *pvec,
+ struct address_space *mapping,
+ pgoff_t start, unsigned nr_entries,
+ pgoff_t *indices);
+void pagevec_remove_exceptionals(struct pagevec *pvec);
+unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
+ pgoff_t start, unsigned nr_pages);
+unsigned pagevec_lookup_tag(struct pagevec *pvec,
+ struct address_space *mapping, pgoff_t *index, int tag,
+ unsigned nr_pages);
+
+static inline void pagevec_init(struct pagevec *pvec, int cold)
+{
+ pvec->nr = 0;
+ pvec->cold = cold;
+}
+
+static inline void pagevec_reinit(struct pagevec *pvec)
+{
+ pvec->nr = 0;
+}
+
+static inline unsigned pagevec_count(struct pagevec *pvec)
+{
+ return pvec->nr;
+}
+
+static inline unsigned pagevec_space(struct pagevec *pvec)
+{
+ return PAGEVEC_SIZE - pvec->nr;
+}
+
+/*
+ * Add a page to a pagevec. Returns the number of slots still available.
+ */
+static inline unsigned pagevec_add(struct pagevec *pvec, struct page *page)
+{
+ pvec->pages[pvec->nr++] = page;
+ return pagevec_space(pvec);
+}
+
+static inline void pagevec_release(struct pagevec *pvec)
+{
+ if (pagevec_count(pvec))
+ __pagevec_release(pvec);
+}
+
+#endif /* _LINUX_PAGEVEC_H */
diff --git a/include/linux/parport.h b/include/linux/parport.h
new file mode 100644
index 000000000..c22f12547
--- /dev/null
+++ b/include/linux/parport.h
@@ -0,0 +1,481 @@
+/*
+ * Any part of this program may be used in documents licensed under
+ * the GNU Free Documentation License, Version 1.1 or any later version
+ * published by the Free Software Foundation.
+ */
+#ifndef _PARPORT_H_
+#define _PARPORT_H_
+
+
+#include <linux/jiffies.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/irqreturn.h>
+#include <linux/semaphore.h>
+#include <asm/ptrace.h>
+#include <uapi/linux/parport.h>
+
+/* Define this later. */
+struct parport;
+struct pardevice;
+
+struct pc_parport_state {
+ unsigned int ctr;
+ unsigned int ecr;
+};
+
+struct ax_parport_state {
+ unsigned int ctr;
+ unsigned int ecr;
+ unsigned int dcsr;
+};
+
+/* used by both parport_amiga and parport_mfc3 */
+struct amiga_parport_state {
+ unsigned char data; /* ciaa.prb */
+ unsigned char datadir; /* ciaa.ddrb */
+ unsigned char status; /* ciab.pra & 7 */
+ unsigned char statusdir;/* ciab.ddrb & 7 */
+};
+
+struct ax88796_parport_state {
+ unsigned char cpr;
+};
+
+struct ip32_parport_state {
+ unsigned int dcr;
+ unsigned int ecr;
+};
+
+struct parport_state {
+ union {
+ struct pc_parport_state pc;
+ /* ARC has no state. */
+ struct ax_parport_state ax;
+ struct amiga_parport_state amiga;
+ struct ax88796_parport_state ax88796;
+ /* Atari has not state. */
+ struct ip32_parport_state ip32;
+ void *misc;
+ } u;
+};
+
+struct parport_operations {
+ /* IBM PC-style virtual registers. */
+ void (*write_data)(struct parport *, unsigned char);
+ unsigned char (*read_data)(struct parport *);
+
+ void (*write_control)(struct parport *, unsigned char);
+ unsigned char (*read_control)(struct parport *);
+ unsigned char (*frob_control)(struct parport *, unsigned char mask,
+ unsigned char val);
+
+ unsigned char (*read_status)(struct parport *);
+
+ /* IRQs. */
+ void (*enable_irq)(struct parport *);
+ void (*disable_irq)(struct parport *);
+
+ /* Data direction. */
+ void (*data_forward) (struct parport *);
+ void (*data_reverse) (struct parport *);
+
+ /* For core parport code. */
+ void (*init_state)(struct pardevice *, struct parport_state *);
+ void (*save_state)(struct parport *, struct parport_state *);
+ void (*restore_state)(struct parport *, struct parport_state *);
+
+ /* Block read/write */
+ size_t (*epp_write_data) (struct parport *port, const void *buf,
+ size_t len, int flags);
+ size_t (*epp_read_data) (struct parport *port, void *buf, size_t len,
+ int flags);
+ size_t (*epp_write_addr) (struct parport *port, const void *buf,
+ size_t len, int flags);
+ size_t (*epp_read_addr) (struct parport *port, void *buf, size_t len,
+ int flags);
+
+ size_t (*ecp_write_data) (struct parport *port, const void *buf,
+ size_t len, int flags);
+ size_t (*ecp_read_data) (struct parport *port, void *buf, size_t len,
+ int flags);
+ size_t (*ecp_write_addr) (struct parport *port, const void *buf,
+ size_t len, int flags);
+
+ size_t (*compat_write_data) (struct parport *port, const void *buf,
+ size_t len, int flags);
+ size_t (*nibble_read_data) (struct parport *port, void *buf,
+ size_t len, int flags);
+ size_t (*byte_read_data) (struct parport *port, void *buf,
+ size_t len, int flags);
+ struct module *owner;
+};
+
+struct parport_device_info {
+ parport_device_class class;
+ const char *class_name;
+ const char *mfr;
+ const char *model;
+ const char *cmdset;
+ const char *description;
+};
+
+/* Each device can have two callback functions:
+ * 1) a preemption function, called by the resource manager to request
+ * that the driver relinquish control of the port. The driver should
+ * return zero if it agrees to release the port, and nonzero if it
+ * refuses. Do not call parport_release() - the kernel will do this
+ * implicitly.
+ *
+ * 2) a wake-up function, called by the resource manager to tell drivers
+ * that the port is available to be claimed. If a driver wants to use
+ * the port, it should call parport_claim() here.
+ */
+
+/* A parallel port device */
+struct pardevice {
+ const char *name;
+ struct parport *port;
+ int daisy;
+ int (*preempt)(void *);
+ void (*wakeup)(void *);
+ void *private;
+ void (*irq_func)(void *);
+ unsigned int flags;
+ struct pardevice *next;
+ struct pardevice *prev;
+ struct parport_state *state; /* saved status over preemption */
+ wait_queue_head_t wait_q;
+ unsigned long int time;
+ unsigned long int timeslice;
+ volatile long int timeout;
+ unsigned long waiting; /* long req'd for set_bit --RR */
+ struct pardevice *waitprev;
+ struct pardevice *waitnext;
+ void * sysctl_table;
+};
+
+/* IEEE1284 information */
+
+/* IEEE1284 phases. These are exposed to userland through ppdev IOCTL
+ * PP[GS]ETPHASE, so do not change existing values. */
+enum ieee1284_phase {
+ IEEE1284_PH_FWD_DATA,
+ IEEE1284_PH_FWD_IDLE,
+ IEEE1284_PH_TERMINATE,
+ IEEE1284_PH_NEGOTIATION,
+ IEEE1284_PH_HBUSY_DNA,
+ IEEE1284_PH_REV_IDLE,
+ IEEE1284_PH_HBUSY_DAVAIL,
+ IEEE1284_PH_REV_DATA,
+ IEEE1284_PH_ECP_SETUP,
+ IEEE1284_PH_ECP_FWD_TO_REV,
+ IEEE1284_PH_ECP_REV_TO_FWD,
+ IEEE1284_PH_ECP_DIR_UNKNOWN,
+};
+struct ieee1284_info {
+ int mode;
+ volatile enum ieee1284_phase phase;
+ struct semaphore irq;
+};
+
+/* A parallel port */
+struct parport {
+ unsigned long base; /* base address */
+ unsigned long base_hi; /* base address (hi - ECR) */
+ unsigned int size; /* IO extent */
+ const char *name;
+ unsigned int modes;
+ int irq; /* interrupt (or -1 for none) */
+ int dma;
+ int muxport; /* which muxport (if any) this is */
+ int portnum; /* which physical parallel port (not mux) */
+ struct device *dev; /* Physical device associated with IO/DMA.
+ * This may unfortulately be null if the
+ * port has a legacy driver.
+ */
+
+ struct parport *physport;
+ /* If this is a non-default mux
+ parport, i.e. we're a clone of a real
+ physical port, this is a pointer to that
+ port. The locking is only done in the
+ real port. For a clone port, the
+ following structure members are
+ meaningless: devices, cad, muxsel,
+ waithead, waittail, flags, pdir,
+ dev, ieee1284, *_lock.
+
+ It this is a default mux parport, or
+ there is no mux involved, this points to
+ ourself. */
+
+ struct pardevice *devices;
+ struct pardevice *cad; /* port owner */
+ int daisy; /* currently selected daisy addr */
+ int muxsel; /* currently selected mux port */
+
+ struct pardevice *waithead;
+ struct pardevice *waittail;
+
+ struct list_head list;
+ unsigned int flags;
+
+ void *sysctl_table;
+ struct parport_device_info probe_info[5]; /* 0-3 + non-IEEE1284.3 */
+ struct ieee1284_info ieee1284;
+
+ struct parport_operations *ops;
+ void *private_data; /* for lowlevel driver */
+
+ int number; /* port index - the `n' in `parportn' */
+ spinlock_t pardevice_lock;
+ spinlock_t waitlist_lock;
+ rwlock_t cad_lock;
+
+ int spintime;
+ atomic_t ref_count;
+
+ unsigned long devflags;
+#define PARPORT_DEVPROC_REGISTERED 0
+ struct pardevice *proc_device; /* Currently register proc device */
+
+ struct list_head full_list;
+ struct parport *slaves[3];
+};
+
+#define DEFAULT_SPIN_TIME 500 /* us */
+
+struct parport_driver {
+ const char *name;
+ void (*attach) (struct parport *);
+ void (*detach) (struct parport *);
+ struct list_head list;
+};
+
+/* parport_register_port registers a new parallel port at the given
+ address (if one does not already exist) and returns a pointer to it.
+ This entails claiming the I/O region, IRQ and DMA. NULL is returned
+ if initialisation fails. */
+struct parport *parport_register_port(unsigned long base, int irq, int dma,
+ struct parport_operations *ops);
+
+/* Once a registered port is ready for high-level drivers to use, the
+ low-level driver that registered it should announce it. This will
+ call the high-level drivers' attach() functions (after things like
+ determining the IEEE 1284.3 topology of the port and collecting
+ DeviceIDs). */
+void parport_announce_port (struct parport *port);
+
+/* Unregister a port. */
+extern void parport_remove_port(struct parport *port);
+
+/* Register a new high-level driver. */
+extern int parport_register_driver (struct parport_driver *);
+
+/* Unregister a high-level driver. */
+extern void parport_unregister_driver (struct parport_driver *);
+
+/* If parport_register_driver doesn't fit your needs, perhaps
+ * parport_find_xxx does. */
+extern struct parport *parport_find_number (int);
+extern struct parport *parport_find_base (unsigned long);
+
+/* generic irq handler, if it suits your needs */
+extern irqreturn_t parport_irq_handler(int irq, void *dev_id);
+
+/* Reference counting for ports. */
+extern struct parport *parport_get_port (struct parport *);
+extern void parport_put_port (struct parport *);
+
+/* parport_register_device declares that a device is connected to a
+ port, and tells the kernel all it needs to know.
+ - pf is the preemption function (may be NULL for no callback)
+ - kf is the wake-up function (may be NULL for no callback)
+ - irq_func is the interrupt handler (may be NULL for no interrupts)
+ - handle is a user pointer that gets handed to callback functions. */
+struct pardevice *parport_register_device(struct parport *port,
+ const char *name,
+ int (*pf)(void *), void (*kf)(void *),
+ void (*irq_func)(void *),
+ int flags, void *handle);
+
+/* parport_unregister unlinks a device from the chain. */
+extern void parport_unregister_device(struct pardevice *dev);
+
+/* parport_claim tries to gain ownership of the port for a particular
+ driver. This may fail (return non-zero) if another driver is busy.
+ If this driver has registered an interrupt handler, it will be
+ enabled. */
+extern int parport_claim(struct pardevice *dev);
+
+/* parport_claim_or_block is the same, but sleeps if the port cannot
+ be claimed. Return value is 1 if it slept, 0 normally and -errno
+ on error. */
+extern int parport_claim_or_block(struct pardevice *dev);
+
+/* parport_release reverses a previous parport_claim. This can never
+ fail, though the effects are undefined (except that they are bad)
+ if you didn't previously own the port. Once you have released the
+ port you should make sure that neither your code nor the hardware
+ on the port tries to initiate any communication without first
+ re-claiming the port. If you mess with the port state (enabling
+ ECP for example) you should clean up before releasing the port. */
+
+extern void parport_release(struct pardevice *dev);
+
+/**
+ * parport_yield - relinquish a parallel port temporarily
+ * @dev: a device on the parallel port
+ *
+ * This function relinquishes the port if it would be helpful to other
+ * drivers to do so. Afterwards it tries to reclaim the port using
+ * parport_claim(), and the return value is the same as for
+ * parport_claim(). If it fails, the port is left unclaimed and it is
+ * the driver's responsibility to reclaim the port.
+ *
+ * The parport_yield() and parport_yield_blocking() functions are for
+ * marking points in the driver at which other drivers may claim the
+ * port and use their devices. Yielding the port is similar to
+ * releasing it and reclaiming it, but is more efficient because no
+ * action is taken if there are no other devices needing the port. In
+ * fact, nothing is done even if there are other devices waiting but
+ * the current device is still within its "timeslice". The default
+ * timeslice is half a second, but it can be adjusted via the /proc
+ * interface.
+ **/
+static __inline__ int parport_yield(struct pardevice *dev)
+{
+ unsigned long int timeslip = (jiffies - dev->time);
+ if ((dev->port->waithead == NULL) || (timeslip < dev->timeslice))
+ return 0;
+ parport_release(dev);
+ return parport_claim(dev);
+}
+
+/**
+ * parport_yield_blocking - relinquish a parallel port temporarily
+ * @dev: a device on the parallel port
+ *
+ * This function relinquishes the port if it would be helpful to other
+ * drivers to do so. Afterwards it tries to reclaim the port using
+ * parport_claim_or_block(), and the return value is the same as for
+ * parport_claim_or_block().
+ **/
+static __inline__ int parport_yield_blocking(struct pardevice *dev)
+{
+ unsigned long int timeslip = (jiffies - dev->time);
+ if ((dev->port->waithead == NULL) || (timeslip < dev->timeslice))
+ return 0;
+ parport_release(dev);
+ return parport_claim_or_block(dev);
+}
+
+/* Flags used to identify what a device does. */
+#define PARPORT_DEV_TRAN 0 /* WARNING !! DEPRECATED !! */
+#define PARPORT_DEV_LURK (1<<0) /* WARNING !! DEPRECATED !! */
+#define PARPORT_DEV_EXCL (1<<1) /* Need exclusive access. */
+
+#define PARPORT_FLAG_EXCL (1<<1) /* EXCL driver registered. */
+
+/* IEEE1284 functions */
+extern void parport_ieee1284_interrupt (void *);
+extern int parport_negotiate (struct parport *, int mode);
+extern ssize_t parport_write (struct parport *, const void *buf, size_t len);
+extern ssize_t parport_read (struct parport *, void *buf, size_t len);
+
+#define PARPORT_INACTIVITY_O_NONBLOCK 1
+extern long parport_set_timeout (struct pardevice *, long inactivity);
+
+extern int parport_wait_event (struct parport *, long timeout);
+extern int parport_wait_peripheral (struct parport *port,
+ unsigned char mask,
+ unsigned char val);
+extern int parport_poll_peripheral (struct parport *port,
+ unsigned char mask,
+ unsigned char val,
+ int usec);
+
+/* For architectural drivers */
+extern size_t parport_ieee1284_write_compat (struct parport *,
+ const void *, size_t, int);
+extern size_t parport_ieee1284_read_nibble (struct parport *,
+ void *, size_t, int);
+extern size_t parport_ieee1284_read_byte (struct parport *,
+ void *, size_t, int);
+extern size_t parport_ieee1284_ecp_read_data (struct parport *,
+ void *, size_t, int);
+extern size_t parport_ieee1284_ecp_write_data (struct parport *,
+ const void *, size_t, int);
+extern size_t parport_ieee1284_ecp_write_addr (struct parport *,
+ const void *, size_t, int);
+extern size_t parport_ieee1284_epp_write_data (struct parport *,
+ const void *, size_t, int);
+extern size_t parport_ieee1284_epp_read_data (struct parport *,
+ void *, size_t, int);
+extern size_t parport_ieee1284_epp_write_addr (struct parport *,
+ const void *, size_t, int);
+extern size_t parport_ieee1284_epp_read_addr (struct parport *,
+ void *, size_t, int);
+
+/* IEEE1284.3 functions */
+extern int parport_daisy_init (struct parport *port);
+extern void parport_daisy_fini (struct parport *port);
+extern struct pardevice *parport_open (int devnum, const char *name);
+extern void parport_close (struct pardevice *dev);
+extern ssize_t parport_device_id (int devnum, char *buffer, size_t len);
+extern void parport_daisy_deselect_all (struct parport *port);
+extern int parport_daisy_select (struct parport *port, int daisy, int mode);
+
+/* Lowlevel drivers _can_ call this support function to handle irqs. */
+static inline void parport_generic_irq(struct parport *port)
+{
+ parport_ieee1284_interrupt (port);
+ read_lock(&port->cad_lock);
+ if (port->cad && port->cad->irq_func)
+ port->cad->irq_func(port->cad->private);
+ read_unlock(&port->cad_lock);
+}
+
+/* Prototypes from parport_procfs */
+extern int parport_proc_register(struct parport *pp);
+extern int parport_proc_unregister(struct parport *pp);
+extern int parport_device_proc_register(struct pardevice *device);
+extern int parport_device_proc_unregister(struct pardevice *device);
+
+/* If PC hardware is the only type supported, we can optimise a bit. */
+#if !defined(CONFIG_PARPORT_NOT_PC)
+
+#include <linux/parport_pc.h>
+#define parport_write_data(p,x) parport_pc_write_data(p,x)
+#define parport_read_data(p) parport_pc_read_data(p)
+#define parport_write_control(p,x) parport_pc_write_control(p,x)
+#define parport_read_control(p) parport_pc_read_control(p)
+#define parport_frob_control(p,m,v) parport_pc_frob_control(p,m,v)
+#define parport_read_status(p) parport_pc_read_status(p)
+#define parport_enable_irq(p) parport_pc_enable_irq(p)
+#define parport_disable_irq(p) parport_pc_disable_irq(p)
+#define parport_data_forward(p) parport_pc_data_forward(p)
+#define parport_data_reverse(p) parport_pc_data_reverse(p)
+
+#else /* !CONFIG_PARPORT_NOT_PC */
+
+/* Generic operations vector through the dispatch table. */
+#define parport_write_data(p,x) (p)->ops->write_data(p,x)
+#define parport_read_data(p) (p)->ops->read_data(p)
+#define parport_write_control(p,x) (p)->ops->write_control(p,x)
+#define parport_read_control(p) (p)->ops->read_control(p)
+#define parport_frob_control(p,m,v) (p)->ops->frob_control(p,m,v)
+#define parport_read_status(p) (p)->ops->read_status(p)
+#define parport_enable_irq(p) (p)->ops->enable_irq(p)
+#define parport_disable_irq(p) (p)->ops->disable_irq(p)
+#define parport_data_forward(p) (p)->ops->data_forward(p)
+#define parport_data_reverse(p) (p)->ops->data_reverse(p)
+
+#endif /* !CONFIG_PARPORT_NOT_PC */
+
+extern unsigned long parport_default_timeslice;
+extern int parport_default_spintime;
+
+#endif /* _PARPORT_H_ */
diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h
new file mode 100644
index 000000000..cc1767f5c
--- /dev/null
+++ b/include/linux/parport_pc.h
@@ -0,0 +1,238 @@
+#ifndef __LINUX_PARPORT_PC_H
+#define __LINUX_PARPORT_PC_H
+
+#include <asm/io.h>
+
+/* --- register definitions ------------------------------- */
+
+#define ECONTROL(p) ((p)->base_hi + 0x2)
+#define CONFIGB(p) ((p)->base_hi + 0x1)
+#define CONFIGA(p) ((p)->base_hi + 0x0)
+#define FIFO(p) ((p)->base_hi + 0x0)
+#define EPPDATA(p) ((p)->base + 0x4)
+#define EPPADDR(p) ((p)->base + 0x3)
+#define CONTROL(p) ((p)->base + 0x2)
+#define STATUS(p) ((p)->base + 0x1)
+#define DATA(p) ((p)->base + 0x0)
+
+struct parport_pc_private {
+ /* Contents of CTR. */
+ unsigned char ctr;
+
+ /* Bitmask of writable CTR bits. */
+ unsigned char ctr_writable;
+
+ /* Whether or not there's an ECR. */
+ int ecr;
+
+ /* Number of PWords that FIFO will hold. */
+ int fifo_depth;
+
+ /* Number of bytes per portword. */
+ int pword;
+
+ /* Not used yet. */
+ int readIntrThreshold;
+ int writeIntrThreshold;
+
+ /* buffer suitable for DMA, if DMA enabled */
+ char *dma_buf;
+ dma_addr_t dma_handle;
+ struct list_head list;
+ struct parport *port;
+};
+
+struct parport_pc_via_data
+{
+ /* ISA PnP IRQ routing register 1 */
+ u8 via_pci_parport_irq_reg;
+ /* ISA PnP DMA request routing register */
+ u8 via_pci_parport_dma_reg;
+ /* Register and value to enable SuperIO configuration access */
+ u8 via_pci_superio_config_reg;
+ u8 via_pci_superio_config_data;
+ /* SuperIO function register number */
+ u8 viacfg_function;
+ /* parallel port control register number */
+ u8 viacfg_parport_control;
+ /* Parallel port base address register */
+ u8 viacfg_parport_base;
+};
+
+static __inline__ void parport_pc_write_data(struct parport *p, unsigned char d)
+{
+#ifdef DEBUG_PARPORT
+ printk (KERN_DEBUG "parport_pc_write_data(%p,0x%02x)\n", p, d);
+#endif
+ outb(d, DATA(p));
+}
+
+static __inline__ unsigned char parport_pc_read_data(struct parport *p)
+{
+ unsigned char val = inb (DATA (p));
+#ifdef DEBUG_PARPORT
+ printk (KERN_DEBUG "parport_pc_read_data(%p) = 0x%02x\n",
+ p, val);
+#endif
+ return val;
+}
+
+#ifdef DEBUG_PARPORT
+static inline void dump_parport_state (char *str, struct parport *p)
+{
+ /* here's hoping that reading these ports won't side-effect anything underneath */
+ unsigned char ecr = inb (ECONTROL (p));
+ unsigned char dcr = inb (CONTROL (p));
+ unsigned char dsr = inb (STATUS (p));
+ static const char *const ecr_modes[] = {"SPP", "PS2", "PPFIFO", "ECP", "xXx", "yYy", "TST", "CFG"};
+ const struct parport_pc_private *priv = p->physport->private_data;
+ int i;
+
+ printk (KERN_DEBUG "*** parport state (%s): ecr=[%s", str, ecr_modes[(ecr & 0xe0) >> 5]);
+ if (ecr & 0x10) printk (",nErrIntrEn");
+ if (ecr & 0x08) printk (",dmaEn");
+ if (ecr & 0x04) printk (",serviceIntr");
+ if (ecr & 0x02) printk (",f_full");
+ if (ecr & 0x01) printk (",f_empty");
+ for (i=0; i<2; i++) {
+ printk ("] dcr(%s)=[", i ? "soft" : "hard");
+ dcr = i ? priv->ctr : inb (CONTROL (p));
+
+ if (dcr & 0x20) {
+ printk ("rev");
+ } else {
+ printk ("fwd");
+ }
+ if (dcr & 0x10) printk (",ackIntEn");
+ if (!(dcr & 0x08)) printk (",N-SELECT-IN");
+ if (dcr & 0x04) printk (",N-INIT");
+ if (!(dcr & 0x02)) printk (",N-AUTOFD");
+ if (!(dcr & 0x01)) printk (",N-STROBE");
+ }
+ printk ("] dsr=[");
+ if (!(dsr & 0x80)) printk ("BUSY");
+ if (dsr & 0x40) printk (",N-ACK");
+ if (dsr & 0x20) printk (",PERROR");
+ if (dsr & 0x10) printk (",SELECT");
+ if (dsr & 0x08) printk (",N-FAULT");
+ printk ("]\n");
+ return;
+}
+#else /* !DEBUG_PARPORT */
+#define dump_parport_state(args...)
+#endif /* !DEBUG_PARPORT */
+
+/* __parport_pc_frob_control differs from parport_pc_frob_control in that
+ * it doesn't do any extra masking. */
+static __inline__ unsigned char __parport_pc_frob_control (struct parport *p,
+ unsigned char mask,
+ unsigned char val)
+{
+ struct parport_pc_private *priv = p->physport->private_data;
+ unsigned char ctr = priv->ctr;
+#ifdef DEBUG_PARPORT
+ printk (KERN_DEBUG
+ "__parport_pc_frob_control(%02x,%02x): %02x -> %02x\n",
+ mask, val, ctr, ((ctr & ~mask) ^ val) & priv->ctr_writable);
+#endif
+ ctr = (ctr & ~mask) ^ val;
+ ctr &= priv->ctr_writable; /* only write writable bits. */
+ outb (ctr, CONTROL (p));
+ priv->ctr = ctr; /* Update soft copy */
+ return ctr;
+}
+
+static __inline__ void parport_pc_data_reverse (struct parport *p)
+{
+ __parport_pc_frob_control (p, 0x20, 0x20);
+}
+
+static __inline__ void parport_pc_data_forward (struct parport *p)
+{
+ __parport_pc_frob_control (p, 0x20, 0x00);
+}
+
+static __inline__ void parport_pc_write_control (struct parport *p,
+ unsigned char d)
+{
+ const unsigned char wm = (PARPORT_CONTROL_STROBE |
+ PARPORT_CONTROL_AUTOFD |
+ PARPORT_CONTROL_INIT |
+ PARPORT_CONTROL_SELECT);
+
+ /* Take this out when drivers have adapted to newer interface. */
+ if (d & 0x20) {
+ printk (KERN_DEBUG "%s (%s): use data_reverse for this!\n",
+ p->name, p->cad->name);
+ parport_pc_data_reverse (p);
+ }
+
+ __parport_pc_frob_control (p, wm, d & wm);
+}
+
+static __inline__ unsigned char parport_pc_read_control(struct parport *p)
+{
+ const unsigned char rm = (PARPORT_CONTROL_STROBE |
+ PARPORT_CONTROL_AUTOFD |
+ PARPORT_CONTROL_INIT |
+ PARPORT_CONTROL_SELECT);
+ const struct parport_pc_private *priv = p->physport->private_data;
+ return priv->ctr & rm; /* Use soft copy */
+}
+
+static __inline__ unsigned char parport_pc_frob_control (struct parport *p,
+ unsigned char mask,
+ unsigned char val)
+{
+ const unsigned char wm = (PARPORT_CONTROL_STROBE |
+ PARPORT_CONTROL_AUTOFD |
+ PARPORT_CONTROL_INIT |
+ PARPORT_CONTROL_SELECT);
+
+ /* Take this out when drivers have adapted to newer interface. */
+ if (mask & 0x20) {
+ printk (KERN_DEBUG "%s (%s): use data_%s for this!\n",
+ p->name, p->cad->name,
+ (val & 0x20) ? "reverse" : "forward");
+ if (val & 0x20)
+ parport_pc_data_reverse (p);
+ else
+ parport_pc_data_forward (p);
+ }
+
+ /* Restrict mask and val to control lines. */
+ mask &= wm;
+ val &= wm;
+
+ return __parport_pc_frob_control (p, mask, val);
+}
+
+static __inline__ unsigned char parport_pc_read_status(struct parport *p)
+{
+ return inb(STATUS(p));
+}
+
+
+static __inline__ void parport_pc_disable_irq(struct parport *p)
+{
+ __parport_pc_frob_control (p, 0x10, 0x00);
+}
+
+static __inline__ void parport_pc_enable_irq(struct parport *p)
+{
+ __parport_pc_frob_control (p, 0x10, 0x10);
+}
+
+extern void parport_pc_release_resources(struct parport *p);
+
+extern int parport_pc_claim_resources(struct parport *p);
+
+/* PCMCIA code will want to get us to look at a port. Provide a mechanism. */
+extern struct parport *parport_pc_probe_port(unsigned long base,
+ unsigned long base_hi,
+ int irq, int dma,
+ struct device *dev,
+ int irqflags);
+extern void parport_pc_unregister_port(struct parport *p);
+
+#endif
diff --git a/include/linux/parser.h b/include/linux/parser.h
new file mode 100644
index 000000000..39d5b7955
--- /dev/null
+++ b/include/linux/parser.h
@@ -0,0 +1,34 @@
+/*
+ * linux/include/linux/parser.h
+ *
+ * Header for lib/parser.c
+ * Intended use of these functions is parsing filesystem argument lists,
+ * but could potentially be used anywhere else that simple option=arg
+ * parsing is required.
+ */
+
+
+/* associates an integer enumerator with a pattern string. */
+struct match_token {
+ int token;
+ const char *pattern;
+};
+
+typedef struct match_token match_table_t[];
+
+/* Maximum number of arguments that match_token will find in a pattern */
+enum {MAX_OPT_ARGS = 3};
+
+/* Describe the location within a string of a substring */
+typedef struct {
+ char *from;
+ char *to;
+} substring_t;
+
+int match_token(char *, const match_table_t table, substring_t args[]);
+int match_int(substring_t *, int *result);
+int match_octal(substring_t *, int *result);
+int match_hex(substring_t *, int *result);
+bool match_wildcard(const char *pattern, const char *str);
+size_t match_strlcpy(char *, const substring_t *, size_t);
+char *match_strdup(const substring_t *);
diff --git a/include/linux/pata_arasan_cf_data.h b/include/linux/pata_arasan_cf_data.h
new file mode 100644
index 000000000..3cc21c9cc
--- /dev/null
+++ b/include/linux/pata_arasan_cf_data.h
@@ -0,0 +1,47 @@
+/*
+ * include/linux/pata_arasan_cf_data.h
+ *
+ * Arasan Compact Flash host controller platform data header file
+ *
+ * Copyright (C) 2011 ST Microelectronics
+ * Viresh Kumar <viresh.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _PATA_ARASAN_CF_DATA_H
+#define _PATA_ARASAN_CF_DATA_H
+
+#include <linux/platform_device.h>
+
+struct arasan_cf_pdata {
+ u8 cf_if_clk;
+ #define CF_IF_CLK_100M (0x0)
+ #define CF_IF_CLK_75M (0x1)
+ #define CF_IF_CLK_66M (0x2)
+ #define CF_IF_CLK_50M (0x3)
+ #define CF_IF_CLK_40M (0x4)
+ #define CF_IF_CLK_33M (0x5)
+ #define CF_IF_CLK_25M (0x6)
+ #define CF_IF_CLK_125M (0x7)
+ #define CF_IF_CLK_150M (0x8)
+ #define CF_IF_CLK_166M (0x9)
+ #define CF_IF_CLK_200M (0xA)
+ /*
+ * Platform specific incapabilities of CF controller is handled via
+ * quirks
+ */
+ u32 quirk;
+ #define CF_BROKEN_PIO (1)
+ #define CF_BROKEN_MWDMA (1 << 1)
+ #define CF_BROKEN_UDMA (1 << 2)
+};
+
+static inline void
+set_arasan_cf_pdata(struct platform_device *pdev, struct arasan_cf_pdata *data)
+{
+ pdev->dev.platform_data = data;
+}
+#endif /* _PATA_ARASAN_CF_DATA_H */
diff --git a/include/linux/patchkey.h b/include/linux/patchkey.h
new file mode 100644
index 000000000..97a919fc9
--- /dev/null
+++ b/include/linux/patchkey.h
@@ -0,0 +1,25 @@
+/*
+ * <linux/patchkey.h> -- definition of _PATCHKEY macro
+ *
+ * Copyright (C) 2005 Stuart Brady
+ *
+ * This exists because awe_voice.h defined its own _PATCHKEY and it wasn't
+ * clear whether removing this would break anything in userspace.
+ *
+ * Do not include this file directly. Please use <sys/soundcard.h> instead.
+ * For kernel code, use <linux/soundcard.h>
+ */
+#ifndef _LINUX_PATCHKEY_H
+#define _LINUX_PATCHKEY_H
+
+# include <asm/byteorder.h>
+#include <uapi/linux/patchkey.h>
+
+# if defined(__BIG_ENDIAN)
+# define _PATCHKEY(id) (0xfd00|id)
+# elif defined(__LITTLE_ENDIAN)
+# define _PATCHKEY(id) ((id<<8)|0x00fd)
+# else
+# error "could not determine byte order"
+# endif
+#endif /* _LINUX_PATCHKEY_H */
diff --git a/include/linux/path.h b/include/linux/path.h
new file mode 100644
index 000000000..d1372186f
--- /dev/null
+++ b/include/linux/path.h
@@ -0,0 +1,20 @@
+#ifndef _LINUX_PATH_H
+#define _LINUX_PATH_H
+
+struct dentry;
+struct vfsmount;
+
+struct path {
+ struct vfsmount *mnt;
+ struct dentry *dentry;
+};
+
+extern void path_get(const struct path *);
+extern void path_put(const struct path *);
+
+static inline int path_equal(const struct path *path1, const struct path *path2)
+{
+ return path1->mnt == path2->mnt && path1->dentry == path2->dentry;
+}
+
+#endif /* _LINUX_PATH_H */
diff --git a/include/linux/pch_dma.h b/include/linux/pch_dma.h
new file mode 100644
index 000000000..fdafe529e
--- /dev/null
+++ b/include/linux/pch_dma.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef PCH_DMA_H
+#define PCH_DMA_H
+
+#include <linux/dmaengine.h>
+
+enum pch_dma_width {
+ PCH_DMA_WIDTH_1_BYTE,
+ PCH_DMA_WIDTH_2_BYTES,
+ PCH_DMA_WIDTH_4_BYTES,
+};
+
+struct pch_dma_slave {
+ struct device *dma_dev;
+ unsigned int chan_id;
+ dma_addr_t tx_reg;
+ dma_addr_t rx_reg;
+ enum pch_dma_width width;
+};
+
+#endif
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
new file mode 100644
index 000000000..a965efa52
--- /dev/null
+++ b/include/linux/pci-acpi.h
@@ -0,0 +1,96 @@
+/*
+ * File pci-acpi.h
+ *
+ * Copyright (C) 2004 Intel
+ * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
+ */
+
+#ifndef _PCI_ACPI_H_
+#define _PCI_ACPI_H_
+
+#include <linux/acpi.h>
+
+#ifdef CONFIG_ACPI
+extern acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev);
+static inline acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
+{
+ return acpi_remove_pm_notifier(dev);
+}
+extern acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
+ struct pci_dev *pci_dev);
+static inline acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
+{
+ return acpi_remove_pm_notifier(dev);
+}
+extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle);
+
+static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
+{
+ struct pci_bus *pbus = pdev->bus;
+
+ /* Find a PCI root bus */
+ while (!pci_is_root_bus(pbus))
+ pbus = pbus->parent;
+
+ return ACPI_HANDLE(pbus->bridge);
+}
+
+static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
+{
+ struct device *dev;
+
+ if (pci_is_root_bus(pbus))
+ dev = pbus->bridge;
+ else {
+ /* If pbus is a virtual bus, there is no bridge to it */
+ if (!pbus->self)
+ return NULL;
+
+ dev = &pbus->self->dev;
+ }
+
+ return ACPI_HANDLE(dev);
+}
+
+void acpi_pci_add_bus(struct pci_bus *bus);
+void acpi_pci_remove_bus(struct pci_bus *bus);
+
+#ifdef CONFIG_ACPI_PCI_SLOT
+void acpi_pci_slot_init(void);
+void acpi_pci_slot_enumerate(struct pci_bus *bus);
+void acpi_pci_slot_remove(struct pci_bus *bus);
+#else
+static inline void acpi_pci_slot_init(void) { }
+static inline void acpi_pci_slot_enumerate(struct pci_bus *bus) { }
+static inline void acpi_pci_slot_remove(struct pci_bus *bus) { }
+#endif
+
+#ifdef CONFIG_HOTPLUG_PCI_ACPI
+void acpiphp_init(void);
+void acpiphp_enumerate_slots(struct pci_bus *bus);
+void acpiphp_remove_slots(struct pci_bus *bus);
+void acpiphp_check_host_bridge(struct acpi_device *adev);
+#else
+static inline void acpiphp_init(void) { }
+static inline void acpiphp_enumerate_slots(struct pci_bus *bus) { }
+static inline void acpiphp_remove_slots(struct pci_bus *bus) { }
+static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { }
+#endif
+
+extern const u8 pci_acpi_dsm_uuid[];
+#define DEVICE_LABEL_DSM 0x07
+#define RESET_DELAY_DSM 0x08
+#define FUNCTION_DELAY_DSM 0x09
+
+#else /* CONFIG_ACPI */
+static inline void acpi_pci_add_bus(struct pci_bus *bus) { }
+static inline void acpi_pci_remove_bus(struct pci_bus *bus) { }
+#endif /* CONFIG_ACPI */
+
+#ifdef CONFIG_ACPI_APEI
+extern bool aer_acpi_firmware_first(void);
+#else
+static inline bool aer_acpi_firmware_first(void) { return false; }
+#endif
+
+#endif /* _PCI_ACPI_H_ */
diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h
new file mode 100644
index 000000000..207c561fb
--- /dev/null
+++ b/include/linux/pci-aspm.h
@@ -0,0 +1,65 @@
+/*
+ * aspm.h
+ *
+ * PCI Express ASPM defines and function prototypes
+ *
+ * Copyright (C) 2007 Intel Corp.
+ * Zhang Yanmin (yanmin.zhang@intel.com)
+ * Shaohua Li (shaohua.li@intel.com)
+ *
+ * For more information, please consult the following manuals (look at
+ * http://www.pcisig.com/ for how to get them):
+ *
+ * PCI Express Specification
+ */
+
+#ifndef LINUX_ASPM_H
+#define LINUX_ASPM_H
+
+#include <linux/pci.h>
+
+#define PCIE_LINK_STATE_L0S 1
+#define PCIE_LINK_STATE_L1 2
+#define PCIE_LINK_STATE_CLKPM 4
+
+#ifdef CONFIG_PCIEASPM
+void pcie_aspm_init_link_state(struct pci_dev *pdev);
+void pcie_aspm_exit_link_state(struct pci_dev *pdev);
+void pcie_aspm_pm_state_change(struct pci_dev *pdev);
+void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
+void pci_disable_link_state(struct pci_dev *pdev, int state);
+void pci_disable_link_state_locked(struct pci_dev *pdev, int state);
+void pcie_no_aspm(void);
+#else
+static inline void pcie_aspm_init_link_state(struct pci_dev *pdev)
+{
+}
+static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev)
+{
+}
+static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev)
+{
+}
+static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
+{
+}
+static inline void pci_disable_link_state(struct pci_dev *pdev, int state)
+{
+}
+static inline void pcie_no_aspm(void)
+{
+}
+#endif
+
+#ifdef CONFIG_PCIEASPM_DEBUG /* this depends on CONFIG_PCIEASPM */
+void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev);
+void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev);
+#else
+static inline void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
+{
+}
+static inline void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
+{
+}
+#endif
+#endif /* LINUX_ASPM_H */
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
new file mode 100644
index 000000000..72031785f
--- /dev/null
+++ b/include/linux/pci-ats.h
@@ -0,0 +1,110 @@
+#ifndef LINUX_PCI_ATS_H
+#define LINUX_PCI_ATS_H
+
+#include <linux/pci.h>
+
+/* Address Translation Service */
+struct pci_ats {
+ int pos; /* capability position */
+ int stu; /* Smallest Translation Unit */
+ int qdep; /* Invalidate Queue Depth */
+ int ref_cnt; /* Physical Function reference count */
+ unsigned int is_enabled:1; /* Enable bit is set */
+};
+
+#ifdef CONFIG_PCI_ATS
+
+int pci_enable_ats(struct pci_dev *dev, int ps);
+void pci_disable_ats(struct pci_dev *dev);
+int pci_ats_queue_depth(struct pci_dev *dev);
+
+/**
+ * pci_ats_enabled - query the ATS status
+ * @dev: the PCI device
+ *
+ * Returns 1 if ATS capability is enabled, or 0 if not.
+ */
+static inline int pci_ats_enabled(struct pci_dev *dev)
+{
+ return dev->ats && dev->ats->is_enabled;
+}
+
+#else /* CONFIG_PCI_ATS */
+
+static inline int pci_enable_ats(struct pci_dev *dev, int ps)
+{
+ return -ENODEV;
+}
+
+static inline void pci_disable_ats(struct pci_dev *dev)
+{
+}
+
+static inline int pci_ats_queue_depth(struct pci_dev *dev)
+{
+ return -ENODEV;
+}
+
+static inline int pci_ats_enabled(struct pci_dev *dev)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PCI_ATS */
+
+#ifdef CONFIG_PCI_PRI
+
+int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
+void pci_disable_pri(struct pci_dev *pdev);
+int pci_reset_pri(struct pci_dev *pdev);
+
+#else /* CONFIG_PCI_PRI */
+
+static inline int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
+{
+ return -ENODEV;
+}
+
+static inline void pci_disable_pri(struct pci_dev *pdev)
+{
+}
+
+static inline int pci_reset_pri(struct pci_dev *pdev)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_PCI_PRI */
+
+#ifdef CONFIG_PCI_PASID
+
+int pci_enable_pasid(struct pci_dev *pdev, int features);
+void pci_disable_pasid(struct pci_dev *pdev);
+int pci_pasid_features(struct pci_dev *pdev);
+int pci_max_pasids(struct pci_dev *pdev);
+
+#else /* CONFIG_PCI_PASID */
+
+static inline int pci_enable_pasid(struct pci_dev *pdev, int features)
+{
+ return -EINVAL;
+}
+
+static inline void pci_disable_pasid(struct pci_dev *pdev)
+{
+}
+
+static inline int pci_pasid_features(struct pci_dev *pdev)
+{
+ return -EINVAL;
+}
+
+static inline int pci_max_pasids(struct pci_dev *pdev)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_PCI_PASID */
+
+
+#endif /* LINUX_PCI_ATS_H*/
diff --git a/include/linux/pci-dma.h b/include/linux/pci-dma.h
new file mode 100644
index 000000000..549a041f9
--- /dev/null
+++ b/include/linux/pci-dma.h
@@ -0,0 +1,11 @@
+#ifndef _LINUX_PCI_DMA_H
+#define _LINUX_PCI_DMA_H
+
+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) DEFINE_DMA_UNMAP_ADDR(ADDR_NAME);
+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) DEFINE_DMA_UNMAP_LEN(LEN_NAME);
+#define pci_unmap_addr dma_unmap_addr
+#define pci_unmap_addr_set dma_unmap_addr_set
+#define pci_unmap_len dma_unmap_len
+#define pci_unmap_len_set dma_unmap_len_set
+
+#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
new file mode 100644
index 000000000..3ef3a5206
--- /dev/null
+++ b/include/linux/pci.h
@@ -0,0 +1,1910 @@
+/*
+ * pci.h
+ *
+ * PCI defines and function prototypes
+ * Copyright 1994, Drew Eckhardt
+ * Copyright 1997--1999 Martin Mares <mj@ucw.cz>
+ *
+ * For more information, please consult the following manuals (look at
+ * http://www.pcisig.com/ for how to get them):
+ *
+ * PCI BIOS Specification
+ * PCI Local Bus Specification
+ * PCI to PCI Bridge Specification
+ * PCI System Design Guide
+ */
+#ifndef LINUX_PCI_H
+#define LINUX_PCI_H
+
+
+#include <linux/mod_devicetable.h>
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/list.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/kobject.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/resource_ext.h>
+#include <uapi/linux/pci.h>
+
+#include <linux/pci_ids.h>
+
+/*
+ * The PCI interface treats multi-function devices as independent
+ * devices. The slot/function address of each device is encoded
+ * in a single byte as follows:
+ *
+ * 7:3 = slot
+ * 2:0 = function
+ *
+ * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
+ * In the interest of not exposing interfaces to user-space unnecessarily,
+ * the following kernel-only defines are being added here.
+ */
+#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
+/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
+#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
+
+/* pci_slot represents a physical slot */
+struct pci_slot {
+ struct pci_bus *bus; /* The bus this slot is on */
+ struct list_head list; /* node in list of slots on this bus */
+ struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */
+ unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
+ struct kobject kobj;
+};
+
+static inline const char *pci_slot_name(const struct pci_slot *slot)
+{
+ return kobject_name(&slot->kobj);
+}
+
+/* File state for mmap()s on /proc/bus/pci/X/Y */
+enum pci_mmap_state {
+ pci_mmap_io,
+ pci_mmap_mem
+};
+
+/* This defines the direction arg to the DMA mapping routines. */
+#define PCI_DMA_BIDIRECTIONAL 0
+#define PCI_DMA_TODEVICE 1
+#define PCI_DMA_FROMDEVICE 2
+#define PCI_DMA_NONE 3
+
+/*
+ * For PCI devices, the region numbers are assigned this way:
+ */
+enum {
+ /* #0-5: standard PCI resources */
+ PCI_STD_RESOURCES,
+ PCI_STD_RESOURCE_END = 5,
+
+ /* #6: expansion ROM resource */
+ PCI_ROM_RESOURCE,
+
+ /* device specific resources */
+#ifdef CONFIG_PCI_IOV
+ PCI_IOV_RESOURCES,
+ PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
+#endif
+
+ /* resources assigned to buses behind the bridge */
+#define PCI_BRIDGE_RESOURCE_NUM 4
+
+ PCI_BRIDGE_RESOURCES,
+ PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
+ PCI_BRIDGE_RESOURCE_NUM - 1,
+
+ /* total resources associated with a PCI device */
+ PCI_NUM_RESOURCES,
+
+ /* preserve this for compatibility */
+ DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
+};
+
+typedef int __bitwise pci_power_t;
+
+#define PCI_D0 ((pci_power_t __force) 0)
+#define PCI_D1 ((pci_power_t __force) 1)
+#define PCI_D2 ((pci_power_t __force) 2)
+#define PCI_D3hot ((pci_power_t __force) 3)
+#define PCI_D3cold ((pci_power_t __force) 4)
+#define PCI_UNKNOWN ((pci_power_t __force) 5)
+#define PCI_POWER_ERROR ((pci_power_t __force) -1)
+
+/* Remember to update this when the list above changes! */
+extern const char *pci_power_names[];
+
+static inline const char *pci_power_name(pci_power_t state)
+{
+ return pci_power_names[1 + (int) state];
+}
+
+#define PCI_PM_D2_DELAY 200
+#define PCI_PM_D3_WAIT 10
+#define PCI_PM_D3COLD_WAIT 100
+#define PCI_PM_BUS_WAIT 50
+
+/** The pci_channel state describes connectivity between the CPU and
+ * the pci device. If some PCI bus between here and the pci device
+ * has crashed or locked up, this info is reflected here.
+ */
+typedef unsigned int __bitwise pci_channel_state_t;
+
+enum pci_channel_state {
+ /* I/O channel is in normal state */
+ pci_channel_io_normal = (__force pci_channel_state_t) 1,
+
+ /* I/O to channel is blocked */
+ pci_channel_io_frozen = (__force pci_channel_state_t) 2,
+
+ /* PCI card is dead */
+ pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
+};
+
+typedef unsigned int __bitwise pcie_reset_state_t;
+
+enum pcie_reset_state {
+ /* Reset is NOT asserted (Use to deassert reset) */
+ pcie_deassert_reset = (__force pcie_reset_state_t) 1,
+
+ /* Use #PERST to reset PCIe device */
+ pcie_warm_reset = (__force pcie_reset_state_t) 2,
+
+ /* Use PCIe Hot Reset to reset device */
+ pcie_hot_reset = (__force pcie_reset_state_t) 3
+};
+
+typedef unsigned short __bitwise pci_dev_flags_t;
+enum pci_dev_flags {
+ /* INTX_DISABLE in PCI_COMMAND register disables MSI
+ * generation too.
+ */
+ PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
+ /* Device configuration is irrevocably lost if disabled into D3 */
+ PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
+ /* Provide indication device is assigned by a Virtual Machine Manager */
+ PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
+ /* Flag for quirk use to store if quirk-specific ACS is enabled */
+ PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
+ /* Flag to indicate the device uses dma_alias_devfn */
+ PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4),
+ /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
+ PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
+ /* Do not use bus resets for device */
+ PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
+ /* Do not use PM reset even if device advertises NoSoftRst- */
+ PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
+};
+
+enum pci_irq_reroute_variant {
+ INTEL_IRQ_REROUTE_VARIANT = 1,
+ MAX_IRQ_REROUTE_VARIANTS = 3
+};
+
+typedef unsigned short __bitwise pci_bus_flags_t;
+enum pci_bus_flags {
+ PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
+ PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
+};
+
+/* These values come from the PCI Express Spec */
+enum pcie_link_width {
+ PCIE_LNK_WIDTH_RESRV = 0x00,
+ PCIE_LNK_X1 = 0x01,
+ PCIE_LNK_X2 = 0x02,
+ PCIE_LNK_X4 = 0x04,
+ PCIE_LNK_X8 = 0x08,
+ PCIE_LNK_X12 = 0x0C,
+ PCIE_LNK_X16 = 0x10,
+ PCIE_LNK_X32 = 0x20,
+ PCIE_LNK_WIDTH_UNKNOWN = 0xFF,
+};
+
+/* Based on the PCI Hotplug Spec, but some values are made up by us */
+enum pci_bus_speed {
+ PCI_SPEED_33MHz = 0x00,
+ PCI_SPEED_66MHz = 0x01,
+ PCI_SPEED_66MHz_PCIX = 0x02,
+ PCI_SPEED_100MHz_PCIX = 0x03,
+ PCI_SPEED_133MHz_PCIX = 0x04,
+ PCI_SPEED_66MHz_PCIX_ECC = 0x05,
+ PCI_SPEED_100MHz_PCIX_ECC = 0x06,
+ PCI_SPEED_133MHz_PCIX_ECC = 0x07,
+ PCI_SPEED_66MHz_PCIX_266 = 0x09,
+ PCI_SPEED_100MHz_PCIX_266 = 0x0a,
+ PCI_SPEED_133MHz_PCIX_266 = 0x0b,
+ AGP_UNKNOWN = 0x0c,
+ AGP_1X = 0x0d,
+ AGP_2X = 0x0e,
+ AGP_4X = 0x0f,
+ AGP_8X = 0x10,
+ PCI_SPEED_66MHz_PCIX_533 = 0x11,
+ PCI_SPEED_100MHz_PCIX_533 = 0x12,
+ PCI_SPEED_133MHz_PCIX_533 = 0x13,
+ PCIE_SPEED_2_5GT = 0x14,
+ PCIE_SPEED_5_0GT = 0x15,
+ PCIE_SPEED_8_0GT = 0x16,
+ PCI_SPEED_UNKNOWN = 0xff,
+};
+
+struct pci_cap_saved_data {
+ u16 cap_nr;
+ bool cap_extended;
+ unsigned int size;
+ u32 data[0];
+};
+
+struct pci_cap_saved_state {
+ struct hlist_node next;
+ struct pci_cap_saved_data cap;
+};
+
+struct pcie_link_state;
+struct pci_vpd;
+struct pci_sriov;
+struct pci_ats;
+
+/*
+ * The pci_dev structure is used to describe PCI devices.
+ */
+struct pci_dev {
+ struct list_head bus_list; /* node in per-bus list */
+ struct pci_bus *bus; /* bus this device is on */
+ struct pci_bus *subordinate; /* bus this device bridges to */
+
+ void *sysdata; /* hook for sys-specific extension */
+ struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */
+ struct pci_slot *slot; /* Physical slot this device is in */
+
+ unsigned int devfn; /* encoded device & function index */
+ unsigned short vendor;
+ unsigned short device;
+ unsigned short subsystem_vendor;
+ unsigned short subsystem_device;
+ unsigned int class; /* 3 bytes: (base,sub,prog-if) */
+ u8 revision; /* PCI revision, low byte of class word */
+ u8 hdr_type; /* PCI header type (`multi' flag masked out) */
+ u8 pcie_cap; /* PCIe capability offset */
+ u8 msi_cap; /* MSI capability offset */
+ u8 msix_cap; /* MSI-X capability offset */
+ u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */
+ u8 rom_base_reg; /* which config register controls the ROM */
+ u8 pin; /* which interrupt pin this device uses */
+ u16 pcie_flags_reg; /* cached PCIe Capabilities Register */
+ u8 dma_alias_devfn;/* devfn of DMA alias, if any */
+
+ struct pci_driver *driver; /* which driver has allocated this device */
+ u64 dma_mask; /* Mask of the bits of bus address this
+ device implements. Normally this is
+ 0xffffffff. You only need to change
+ this if your device has broken DMA
+ or supports 64-bit transfers. */
+
+ struct device_dma_parameters dma_parms;
+
+ pci_power_t current_state; /* Current operating state. In ACPI-speak,
+ this is D0-D3, D0 being fully functional,
+ and D3 being off. */
+ u8 pm_cap; /* PM capability offset */
+ unsigned int pme_support:5; /* Bitmask of states from which PME#
+ can be generated */
+ unsigned int pme_interrupt:1;
+ unsigned int pme_poll:1; /* Poll device's PME status bit */
+ unsigned int d1_support:1; /* Low power state D1 is supported */
+ unsigned int d2_support:1; /* Low power state D2 is supported */
+ unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
+ unsigned int no_d3cold:1; /* D3cold is forbidden */
+ unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
+ unsigned int mmio_always_on:1; /* disallow turning off io/mem
+ decoding during bar sizing */
+ unsigned int wakeup_prepared:1;
+ unsigned int runtime_d3cold:1; /* whether go through runtime
+ D3cold, not set for devices
+ powered on/off by the
+ corresponding bridge */
+ unsigned int ignore_hotplug:1; /* Ignore hotplug events */
+ unsigned int d3_delay; /* D3->D0 transition time in ms */
+ unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
+
+#ifdef CONFIG_PCIEASPM
+ struct pcie_link_state *link_state; /* ASPM link state */
+#endif
+
+ pci_channel_state_t error_state; /* current connectivity state */
+ struct device dev; /* Generic device interface */
+
+ int cfg_size; /* Size of configuration space */
+
+ /*
+ * Instead of touching interrupt line and base address registers
+ * directly, use the values stored here. They might be different!
+ */
+ unsigned int irq;
+ struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
+
+ bool match_driver; /* Skip attaching driver */
+ /* These fields are used by common fixups */
+ unsigned int transparent:1; /* Subtractive decode PCI bridge */
+ unsigned int multifunction:1;/* Part of multi-function device */
+ /* keep track of device state */
+ unsigned int is_added:1;
+ unsigned int is_busmaster:1; /* device is busmaster */
+ unsigned int no_msi:1; /* device may not use msi */
+ unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */
+ unsigned int block_cfg_access:1; /* config space access is blocked */
+ unsigned int broken_parity_status:1; /* Device generates false positive parity */
+ unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
+ unsigned int msi_enabled:1;
+ unsigned int msix_enabled:1;
+ unsigned int ari_enabled:1; /* ARI forwarding */
+ unsigned int is_managed:1;
+ unsigned int needs_freset:1; /* Dev requires fundamental reset */
+ unsigned int state_saved:1;
+ unsigned int is_physfn:1;
+ unsigned int is_virtfn:1;
+ unsigned int reset_fn:1;
+ unsigned int is_hotplug_bridge:1;
+ unsigned int __aer_firmware_first_valid:1;
+ unsigned int __aer_firmware_first:1;
+ unsigned int broken_intx_masking:1;
+ unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
+ unsigned int irq_managed:1;
+ pci_dev_flags_t dev_flags;
+ atomic_t enable_cnt; /* pci_enable_device has been called */
+
+ u32 saved_config_space[16]; /* config space saved at suspend time */
+ struct hlist_head saved_cap_space;
+ struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
+ int rom_attr_enabled; /* has display of the rom attribute been enabled? */
+ struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
+ struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
+#ifdef CONFIG_PCI_MSI
+ struct list_head msi_list;
+ const struct attribute_group **msi_irq_groups;
+#endif
+ struct pci_vpd *vpd;
+#ifdef CONFIG_PCI_ATS
+ union {
+ struct pci_sriov *sriov; /* SR-IOV capability related */
+ struct pci_dev *physfn; /* the PF this VF is associated with */
+ };
+ struct pci_ats *ats; /* Address Translation Service */
+#endif
+ phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
+ size_t romlen; /* Length of ROM if it's not from the BAR */
+ char *driver_override; /* Driver name to force a match */
+};
+
+static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
+{
+#ifdef CONFIG_PCI_IOV
+ if (dev->is_virtfn)
+ dev = dev->physfn;
+#endif
+ return dev;
+}
+
+struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
+
+#define to_pci_dev(n) container_of(n, struct pci_dev, dev)
+#define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
+
+static inline int pci_channel_offline(struct pci_dev *pdev)
+{
+ return (pdev->error_state != pci_channel_io_normal);
+}
+
+struct pci_host_bridge {
+ struct device dev;
+ struct pci_bus *bus; /* root bus */
+ struct list_head windows; /* resource_entry */
+ void (*release_fn)(struct pci_host_bridge *);
+ void *release_data;
+ unsigned int ignore_reset_delay:1; /* for entire hierarchy */
+};
+
+#define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
+void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
+ void (*release_fn)(struct pci_host_bridge *),
+ void *release_data);
+
+int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
+
+/*
+ * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond
+ * to P2P or CardBus bridge windows) go in a table. Additional ones (for
+ * buses below host bridges or subtractive decode bridges) go in the list.
+ * Use pci_bus_for_each_resource() to iterate through all the resources.
+ */
+
+/*
+ * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly
+ * and there's no way to program the bridge with the details of the window.
+ * This does not apply to ACPI _CRS windows, even with the _DEC subtractive-
+ * decode bit set, because they are explicit and can be programmed with _SRS.
+ */
+#define PCI_SUBTRACTIVE_DECODE 0x1
+
+struct pci_bus_resource {
+ struct list_head list;
+ struct resource *res;
+ unsigned int flags;
+};
+
+#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
+
+struct pci_bus {
+ struct list_head node; /* node in list of buses */
+ struct pci_bus *parent; /* parent bus this bridge is on */
+ struct list_head children; /* list of child buses */
+ struct list_head devices; /* list of devices on this bus */
+ struct pci_dev *self; /* bridge device as seen by parent */
+ struct list_head slots; /* list of slots on this bus */
+ struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
+ struct list_head resources; /* address space routed to this bus */
+ struct resource busn_res; /* bus numbers routed to this bus */
+
+ struct pci_ops *ops; /* configuration access functions */
+ struct msi_controller *msi; /* MSI controller */
+ void *sysdata; /* hook for sys-specific extension */
+ struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */
+
+ unsigned char number; /* bus number */
+ unsigned char primary; /* number of primary bridge */
+ unsigned char max_bus_speed; /* enum pci_bus_speed */
+ unsigned char cur_bus_speed; /* enum pci_bus_speed */
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+ int domain_nr;
+#endif
+
+ char name[48];
+
+ unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */
+ pci_bus_flags_t bus_flags; /* inherited by child buses */
+ struct device *bridge;
+ struct device dev;
+ struct bin_attribute *legacy_io; /* legacy I/O for this bus */
+ struct bin_attribute *legacy_mem; /* legacy mem */
+ unsigned int is_added:1;
+};
+
+#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
+
+/*
+ * Returns true if the PCI bus is root (behind host-PCI bridge),
+ * false otherwise
+ *
+ * Some code assumes that "bus->self == NULL" means that bus is a root bus.
+ * This is incorrect because "virtual" buses added for SR-IOV (via
+ * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
+ */
+static inline bool pci_is_root_bus(struct pci_bus *pbus)
+{
+ return !(pbus->parent);
+}
+
+/**
+ * pci_is_bridge - check if the PCI device is a bridge
+ * @dev: PCI device
+ *
+ * Return true if the PCI device is bridge whether it has subordinate
+ * or not.
+ */
+static inline bool pci_is_bridge(struct pci_dev *dev)
+{
+ return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
+ dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
+}
+
+static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
+{
+ dev = pci_physfn(dev);
+ if (pci_is_root_bus(dev->bus))
+ return NULL;
+
+ return dev->bus->self;
+}
+
+struct device *pci_get_host_bridge_device(struct pci_dev *dev);
+void pci_put_host_bridge_device(struct device *dev);
+
+#ifdef CONFIG_PCI_MSI
+static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
+{
+ return pci_dev->msi_enabled || pci_dev->msix_enabled;
+}
+#else
+static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
+#endif
+
+/*
+ * Error values that may be returned by PCI functions.
+ */
+#define PCIBIOS_SUCCESSFUL 0x00
+#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
+#define PCIBIOS_BAD_VENDOR_ID 0x83
+#define PCIBIOS_DEVICE_NOT_FOUND 0x86
+#define PCIBIOS_BAD_REGISTER_NUMBER 0x87
+#define PCIBIOS_SET_FAILED 0x88
+#define PCIBIOS_BUFFER_TOO_SMALL 0x89
+
+/*
+ * Translate above to generic errno for passing back through non-PCI code.
+ */
+static inline int pcibios_err_to_errno(int err)
+{
+ if (err <= PCIBIOS_SUCCESSFUL)
+ return err; /* Assume already errno */
+
+ switch (err) {
+ case PCIBIOS_FUNC_NOT_SUPPORTED:
+ return -ENOENT;
+ case PCIBIOS_BAD_VENDOR_ID:
+ return -ENOTTY;
+ case PCIBIOS_DEVICE_NOT_FOUND:
+ return -ENODEV;
+ case PCIBIOS_BAD_REGISTER_NUMBER:
+ return -EFAULT;
+ case PCIBIOS_SET_FAILED:
+ return -EIO;
+ case PCIBIOS_BUFFER_TOO_SMALL:
+ return -ENOSPC;
+ }
+
+ return -ERANGE;
+}
+
+/* Low-level architecture-dependent routines */
+
+struct pci_ops {
+ void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
+ int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
+ int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
+};
+
+/*
+ * ACPI needs to be able to access PCI config space before we've done a
+ * PCI bus scan and created pci_bus structures.
+ */
+int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int reg, int len, u32 *val);
+int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int reg, int len, u32 val);
+
+#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
+typedef u64 pci_bus_addr_t;
+#else
+typedef u32 pci_bus_addr_t;
+#endif
+
+struct pci_bus_region {
+ pci_bus_addr_t start;
+ pci_bus_addr_t end;
+};
+
+struct pci_dynids {
+ spinlock_t lock; /* protects list, index */
+ struct list_head list; /* for IDs added at runtime */
+};
+
+
+/*
+ * PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
+ * a set of callbacks in struct pci_error_handlers, that device driver
+ * will be notified of PCI bus errors, and will be driven to recovery
+ * when an error occurs.
+ */
+
+typedef unsigned int __bitwise pci_ers_result_t;
+
+enum pci_ers_result {
+ /* no result/none/not supported in device driver */
+ PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
+
+ /* Device driver can recover without slot reset */
+ PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
+
+ /* Device driver wants slot to be reset. */
+ PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
+
+ /* Device has completely failed, is unrecoverable */
+ PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
+
+ /* Device driver is fully recovered and operational */
+ PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
+
+ /* No AER capabilities registered for the driver */
+ PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
+};
+
+/* PCI bus error event callbacks */
+struct pci_error_handlers {
+ /* PCI bus error detected on this device */
+ pci_ers_result_t (*error_detected)(struct pci_dev *dev,
+ enum pci_channel_state error);
+
+ /* MMIO has been re-enabled, but not DMA */
+ pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
+
+ /* PCI Express link has been reset */
+ pci_ers_result_t (*link_reset)(struct pci_dev *dev);
+
+ /* PCI slot has been reset */
+ pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
+
+ /* PCI function reset prepare or completed */
+ void (*reset_notify)(struct pci_dev *dev, bool prepare);
+
+ /* Device driver may resume normal operations */
+ void (*resume)(struct pci_dev *dev);
+};
+
+
+struct module;
+struct pci_driver {
+ struct list_head node;
+ const char *name;
+ const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */
+ int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
+ void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
+ int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */
+ int (*suspend_late) (struct pci_dev *dev, pm_message_t state);
+ int (*resume_early) (struct pci_dev *dev);
+ int (*resume) (struct pci_dev *dev); /* Device woken up */
+ void (*shutdown) (struct pci_dev *dev);
+ int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* PF pdev */
+ const struct pci_error_handlers *err_handler;
+ struct device_driver driver;
+ struct pci_dynids dynids;
+};
+
+#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
+
+/**
+ * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table
+ * @_table: device table name
+ *
+ * This macro is deprecated and should not be used in new code.
+ */
+#define DEFINE_PCI_DEVICE_TABLE(_table) \
+ const struct pci_device_id _table[]
+
+/**
+ * PCI_DEVICE - macro used to describe a specific pci device
+ * @vend: the 16 bit PCI Vendor ID
+ * @dev: the 16 bit PCI Device ID
+ *
+ * This macro is used to create a struct pci_device_id that matches a
+ * specific device. The subvendor and subdevice fields will be set to
+ * PCI_ANY_ID.
+ */
+#define PCI_DEVICE(vend,dev) \
+ .vendor = (vend), .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+
+/**
+ * PCI_DEVICE_SUB - macro used to describe a specific pci device with subsystem
+ * @vend: the 16 bit PCI Vendor ID
+ * @dev: the 16 bit PCI Device ID
+ * @subvend: the 16 bit PCI Subvendor ID
+ * @subdev: the 16 bit PCI Subdevice ID
+ *
+ * This macro is used to create a struct pci_device_id that matches a
+ * specific device with subsystem information.
+ */
+#define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
+ .vendor = (vend), .device = (dev), \
+ .subvendor = (subvend), .subdevice = (subdev)
+
+/**
+ * PCI_DEVICE_CLASS - macro used to describe a specific pci device class
+ * @dev_class: the class, subclass, prog-if triple for this device
+ * @dev_class_mask: the class mask for this device
+ *
+ * This macro is used to create a struct pci_device_id that matches a
+ * specific PCI class. The vendor, device, subvendor, and subdevice
+ * fields will be set to PCI_ANY_ID.
+ */
+#define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
+ .class = (dev_class), .class_mask = (dev_class_mask), \
+ .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+
+/**
+ * PCI_VDEVICE - macro used to describe a specific pci device in short form
+ * @vend: the vendor name
+ * @dev: the 16 bit PCI Device ID
+ *
+ * This macro is used to create a struct pci_device_id that matches a
+ * specific PCI device. The subvendor, and subdevice fields will be set
+ * to PCI_ANY_ID. The macro allows the next field to follow as the device
+ * private data.
+ */
+
+#define PCI_VDEVICE(vend, dev) \
+ .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
+
+/* these external functions are only available when PCI support is enabled */
+#ifdef CONFIG_PCI
+
+void pcie_bus_configure_settings(struct pci_bus *bus);
+
+enum pcie_bus_config_types {
+ PCIE_BUS_TUNE_OFF,
+ PCIE_BUS_SAFE,
+ PCIE_BUS_PERFORMANCE,
+ PCIE_BUS_PEER2PEER,
+};
+
+extern enum pcie_bus_config_types pcie_bus_config;
+
+extern struct bus_type pci_bus_type;
+
+/* Do NOT directly access these two variables, unless you are arch-specific PCI
+ * code, or PCI core code. */
+extern struct list_head pci_root_buses; /* list of all known PCI buses */
+/* Some device drivers need know if PCI is initiated */
+int no_pci_devices(void);
+
+void pcibios_resource_survey_bus(struct pci_bus *bus);
+void pcibios_add_bus(struct pci_bus *bus);
+void pcibios_remove_bus(struct pci_bus *bus);
+void pcibios_fixup_bus(struct pci_bus *);
+int __must_check pcibios_enable_device(struct pci_dev *, int mask);
+/* Architecture-specific versions may override this (weak) */
+char *pcibios_setup(char *str);
+
+/* Used only when drivers/pci/setup.c is used */
+resource_size_t pcibios_align_resource(void *, const struct resource *,
+ resource_size_t,
+ resource_size_t);
+void pcibios_update_irq(struct pci_dev *, int irq);
+
+/* Weak but can be overriden by arch */
+void pci_fixup_cardbus(struct pci_bus *);
+
+/* Generic PCI functions used internally */
+
+void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
+ struct resource *res);
+void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
+ struct pci_bus_region *region);
+void pcibios_scan_specific_bus(int busn);
+struct pci_bus *pci_find_bus(int domain, int busnr);
+void pci_bus_add_devices(const struct pci_bus *bus);
+struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata);
+struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
+struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata,
+ struct list_head *resources);
+int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
+int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
+void pci_bus_release_busn_res(struct pci_bus *b);
+struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata,
+ struct list_head *resources);
+struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
+ int busnr);
+void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
+struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
+ const char *name,
+ struct hotplug_slot *hotplug);
+void pci_destroy_slot(struct pci_slot *slot);
+int pci_scan_slot(struct pci_bus *bus, int devfn);
+struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
+void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
+unsigned int pci_scan_child_bus(struct pci_bus *bus);
+void pci_bus_add_device(struct pci_dev *dev);
+void pci_read_bridge_bases(struct pci_bus *child);
+struct resource *pci_find_parent_resource(const struct pci_dev *dev,
+ struct resource *res);
+u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
+int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
+u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
+struct pci_dev *pci_dev_get(struct pci_dev *dev);
+void pci_dev_put(struct pci_dev *dev);
+void pci_remove_bus(struct pci_bus *b);
+void pci_stop_and_remove_bus_device(struct pci_dev *dev);
+void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
+void pci_stop_root_bus(struct pci_bus *bus);
+void pci_remove_root_bus(struct pci_bus *bus);
+void pci_setup_cardbus(struct pci_bus *bus);
+void pci_sort_breadthfirst(void);
+#define dev_is_pci(d) ((d)->bus == &pci_bus_type)
+#define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
+#define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0))
+
+/* Generic PCI functions exported to card drivers */
+
+enum pci_lost_interrupt_reason {
+ PCI_LOST_IRQ_NO_INFORMATION = 0,
+ PCI_LOST_IRQ_DISABLE_MSI,
+ PCI_LOST_IRQ_DISABLE_MSIX,
+ PCI_LOST_IRQ_DISABLE_ACPI,
+};
+enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev);
+int pci_find_capability(struct pci_dev *dev, int cap);
+int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
+int pci_find_ext_capability(struct pci_dev *dev, int cap);
+int pci_find_next_ext_capability(struct pci_dev *dev, int pos, int cap);
+int pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
+int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
+struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
+
+struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
+ struct pci_dev *from);
+struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
+ unsigned int ss_vendor, unsigned int ss_device,
+ struct pci_dev *from);
+struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
+struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
+ unsigned int devfn);
+static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
+ unsigned int devfn)
+{
+ return pci_get_domain_bus_and_slot(0, bus, devfn);
+}
+struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
+int pci_dev_present(const struct pci_device_id *ids);
+
+int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
+ int where, u8 *val);
+int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
+ int where, u16 *val);
+int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
+ int where, u32 *val);
+int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
+ int where, u8 val);
+int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
+ int where, u16 val);
+int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
+ int where, u32 val);
+
+int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val);
+int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val);
+int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val);
+int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val);
+
+struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
+
+static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
+{
+ return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
+}
+static inline int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
+{
+ return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
+}
+static inline int pci_read_config_dword(const struct pci_dev *dev, int where,
+ u32 *val)
+{
+ return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
+}
+static inline int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
+{
+ return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
+}
+static inline int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
+{
+ return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
+}
+static inline int pci_write_config_dword(const struct pci_dev *dev, int where,
+ u32 val)
+{
+ return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
+}
+
+int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
+int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
+int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
+int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
+int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+ u16 clear, u16 set);
+int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
+ u32 clear, u32 set);
+
+static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
+ u16 set)
+{
+ return pcie_capability_clear_and_set_word(dev, pos, 0, set);
+}
+
+static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
+ u32 set)
+{
+ return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
+}
+
+static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
+ u16 clear)
+{
+ return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
+}
+
+static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
+ u32 clear)
+{
+ return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
+}
+
+/* user-space driven config access */
+int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
+int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
+int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
+int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
+int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
+int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
+
+int __must_check pci_enable_device(struct pci_dev *dev);
+int __must_check pci_enable_device_io(struct pci_dev *dev);
+int __must_check pci_enable_device_mem(struct pci_dev *dev);
+int __must_check pci_reenable_device(struct pci_dev *);
+int __must_check pcim_enable_device(struct pci_dev *pdev);
+void pcim_pin_device(struct pci_dev *pdev);
+
+static inline int pci_is_enabled(struct pci_dev *pdev)
+{
+ return (atomic_read(&pdev->enable_cnt) > 0);
+}
+
+static inline int pci_is_managed(struct pci_dev *pdev)
+{
+ return pdev->is_managed;
+}
+
+void pci_disable_device(struct pci_dev *dev);
+
+extern unsigned int pcibios_max_latency;
+void pci_set_master(struct pci_dev *dev);
+void pci_clear_master(struct pci_dev *dev);
+
+int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
+int pci_set_cacheline_size(struct pci_dev *dev);
+#define HAVE_PCI_SET_MWI
+int __must_check pci_set_mwi(struct pci_dev *dev);
+int pci_try_set_mwi(struct pci_dev *dev);
+void pci_clear_mwi(struct pci_dev *dev);
+void pci_intx(struct pci_dev *dev, int enable);
+bool pci_intx_mask_supported(struct pci_dev *dev);
+bool pci_check_and_mask_intx(struct pci_dev *dev);
+bool pci_check_and_unmask_intx(struct pci_dev *dev);
+void pci_msi_off(struct pci_dev *dev);
+int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
+int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
+int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
+int pci_wait_for_pending_transaction(struct pci_dev *dev);
+int pcix_get_max_mmrbc(struct pci_dev *dev);
+int pcix_get_mmrbc(struct pci_dev *dev);
+int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
+int pcie_get_readrq(struct pci_dev *dev);
+int pcie_set_readrq(struct pci_dev *dev, int rq);
+int pcie_get_mps(struct pci_dev *dev);
+int pcie_set_mps(struct pci_dev *dev, int mps);
+int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
+ enum pcie_link_width *width);
+int __pci_reset_function(struct pci_dev *dev);
+int __pci_reset_function_locked(struct pci_dev *dev);
+int pci_reset_function(struct pci_dev *dev);
+int pci_try_reset_function(struct pci_dev *dev);
+int pci_probe_reset_slot(struct pci_slot *slot);
+int pci_reset_slot(struct pci_slot *slot);
+int pci_try_reset_slot(struct pci_slot *slot);
+int pci_probe_reset_bus(struct pci_bus *bus);
+int pci_reset_bus(struct pci_bus *bus);
+int pci_try_reset_bus(struct pci_bus *bus);
+void pci_reset_secondary_bus(struct pci_dev *dev);
+void pcibios_reset_secondary_bus(struct pci_dev *dev);
+void pci_reset_bridge_secondary_bus(struct pci_dev *dev);
+void pci_update_resource(struct pci_dev *dev, int resno);
+int __must_check pci_assign_resource(struct pci_dev *dev, int i);
+int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
+int pci_select_bars(struct pci_dev *dev, unsigned long flags);
+bool pci_device_is_present(struct pci_dev *pdev);
+void pci_ignore_hotplug(struct pci_dev *dev);
+
+/* ROM control related routines */
+int pci_enable_rom(struct pci_dev *pdev);
+void pci_disable_rom(struct pci_dev *pdev);
+void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
+void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
+size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size);
+void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
+
+/* Power management related routines */
+int pci_save_state(struct pci_dev *dev);
+void pci_restore_state(struct pci_dev *dev);
+struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
+int pci_load_saved_state(struct pci_dev *dev,
+ struct pci_saved_state *state);
+int pci_load_and_free_saved_state(struct pci_dev *dev,
+ struct pci_saved_state **state);
+struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
+struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
+ u16 cap);
+int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
+int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
+ u16 cap, unsigned int size);
+int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
+int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
+pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
+bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
+void pci_pme_active(struct pci_dev *dev, bool enable);
+int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
+ bool runtime, bool enable);
+int pci_wake_from_d3(struct pci_dev *dev, bool enable);
+int pci_prepare_to_sleep(struct pci_dev *dev);
+int pci_back_from_sleep(struct pci_dev *dev);
+bool pci_dev_run_wake(struct pci_dev *dev);
+bool pci_check_pme_status(struct pci_dev *dev);
+void pci_pme_wakeup_bus(struct pci_bus *bus);
+
+static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
+ bool enable)
+{
+ return __pci_enable_wake(dev, state, false, enable);
+}
+
+/* PCI Virtual Channel */
+int pci_save_vc_state(struct pci_dev *dev);
+void pci_restore_vc_state(struct pci_dev *dev);
+void pci_allocate_vc_save_buffers(struct pci_dev *dev);
+
+/* For use by arch with custom probe code */
+void set_pcie_port_type(struct pci_dev *pdev);
+void set_pcie_hotplug_bridge(struct pci_dev *pdev);
+
+/* Functions for PCI Hotplug drivers to use */
+int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
+unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
+unsigned int pci_rescan_bus(struct pci_bus *bus);
+void pci_lock_rescan_remove(void);
+void pci_unlock_rescan_remove(void);
+
+/* Vital product data routines */
+ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
+ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
+
+/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
+resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
+void pci_bus_assign_resources(const struct pci_bus *bus);
+void pci_bus_size_bridges(struct pci_bus *bus);
+int pci_claim_resource(struct pci_dev *, int);
+int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
+void pci_assign_unassigned_resources(void);
+void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
+void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
+void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
+void pdev_enable_device(struct pci_dev *);
+int pci_enable_resources(struct pci_dev *, int mask);
+void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
+ int (*)(const struct pci_dev *, u8, u8));
+#define HAVE_PCI_REQ_REGIONS 2
+int __must_check pci_request_regions(struct pci_dev *, const char *);
+int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
+void pci_release_regions(struct pci_dev *);
+int __must_check pci_request_region(struct pci_dev *, int, const char *);
+int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *);
+void pci_release_region(struct pci_dev *, int);
+int pci_request_selected_regions(struct pci_dev *, int, const char *);
+int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
+void pci_release_selected_regions(struct pci_dev *, int);
+
+/* drivers/pci/bus.c */
+struct pci_bus *pci_bus_get(struct pci_bus *bus);
+void pci_bus_put(struct pci_bus *bus);
+void pci_add_resource(struct list_head *resources, struct resource *res);
+void pci_add_resource_offset(struct list_head *resources, struct resource *res,
+ resource_size_t offset);
+void pci_free_resource_list(struct list_head *resources);
+void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, unsigned int flags);
+struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
+void pci_bus_remove_resources(struct pci_bus *bus);
+
+#define pci_bus_for_each_resource(bus, res, i) \
+ for (i = 0; \
+ (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
+ i++)
+
+int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
+ struct resource *res, resource_size_t size,
+ resource_size_t align, resource_size_t min,
+ unsigned long type_mask,
+ resource_size_t (*alignf)(void *,
+ const struct resource *,
+ resource_size_t,
+ resource_size_t),
+ void *alignf_data);
+
+
+int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
+
+static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
+{
+ struct pci_bus_region region;
+
+ pcibios_resource_to_bus(pdev->bus, &region, &pdev->resource[bar]);
+ return region.start;
+}
+
+/* Proper probing supporting hot-pluggable devices */
+int __must_check __pci_register_driver(struct pci_driver *, struct module *,
+ const char *mod_name);
+
+/*
+ * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded
+ */
+#define pci_register_driver(driver) \
+ __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
+
+void pci_unregister_driver(struct pci_driver *dev);
+
+/**
+ * module_pci_driver() - Helper macro for registering a PCI driver
+ * @__pci_driver: pci_driver struct
+ *
+ * Helper macro for PCI drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_pci_driver(__pci_driver) \
+ module_driver(__pci_driver, pci_register_driver, \
+ pci_unregister_driver)
+
+struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
+int pci_add_dynid(struct pci_driver *drv,
+ unsigned int vendor, unsigned int device,
+ unsigned int subvendor, unsigned int subdevice,
+ unsigned int class, unsigned int class_mask,
+ unsigned long driver_data);
+const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
+ struct pci_dev *dev);
+int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
+ int pass);
+
+void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+ void *userdata);
+int pci_cfg_space_size(struct pci_dev *dev);
+unsigned char pci_bus_max_busnr(struct pci_bus *bus);
+void pci_setup_bridge(struct pci_bus *bus);
+resource_size_t pcibios_window_alignment(struct pci_bus *bus,
+ unsigned long type);
+resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
+
+#define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
+#define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
+
+int pci_set_vga_state(struct pci_dev *pdev, bool decode,
+ unsigned int command_bits, u32 flags);
+/* kmem_cache style wrapper around pci_alloc_consistent() */
+
+#include <linux/pci-dma.h>
+#include <linux/dmapool.h>
+
+#define pci_pool dma_pool
+#define pci_pool_create(name, pdev, size, align, allocation) \
+ dma_pool_create(name, &pdev->dev, size, align, allocation)
+#define pci_pool_destroy(pool) dma_pool_destroy(pool)
+#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
+#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
+
+enum pci_dma_burst_strategy {
+ PCI_DMA_BURST_INFINITY, /* make bursts as large as possible,
+ strategy_parameter is N/A */
+ PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter
+ byte boundaries */
+ PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of
+ strategy_parameter byte boundaries */
+};
+
+struct msix_entry {
+ u32 vector; /* kernel uses to write allocated vector */
+ u16 entry; /* driver uses to specify entry, OS writes */
+};
+
+
+#ifdef CONFIG_PCI_MSI
+int pci_msi_vec_count(struct pci_dev *dev);
+void pci_msi_shutdown(struct pci_dev *dev);
+void pci_disable_msi(struct pci_dev *dev);
+int pci_msix_vec_count(struct pci_dev *dev);
+int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec);
+void pci_msix_shutdown(struct pci_dev *dev);
+void pci_disable_msix(struct pci_dev *dev);
+void pci_restore_msi_state(struct pci_dev *dev);
+int pci_msi_enabled(void);
+int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec);
+static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
+{
+ int rc = pci_enable_msi_range(dev, nvec, nvec);
+ if (rc < 0)
+ return rc;
+ return 0;
+}
+int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
+ int minvec, int maxvec);
+static inline int pci_enable_msix_exact(struct pci_dev *dev,
+ struct msix_entry *entries, int nvec)
+{
+ int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
+ if (rc < 0)
+ return rc;
+ return 0;
+}
+#else
+static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
+static inline void pci_msi_shutdown(struct pci_dev *dev) { }
+static inline void pci_disable_msi(struct pci_dev *dev) { }
+static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
+static inline int pci_enable_msix(struct pci_dev *dev,
+ struct msix_entry *entries, int nvec)
+{ return -ENOSYS; }
+static inline void pci_msix_shutdown(struct pci_dev *dev) { }
+static inline void pci_disable_msix(struct pci_dev *dev) { }
+static inline void pci_restore_msi_state(struct pci_dev *dev) { }
+static inline int pci_msi_enabled(void) { return 0; }
+static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec,
+ int maxvec)
+{ return -ENOSYS; }
+static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
+{ return -ENOSYS; }
+static inline int pci_enable_msix_range(struct pci_dev *dev,
+ struct msix_entry *entries, int minvec, int maxvec)
+{ return -ENOSYS; }
+static inline int pci_enable_msix_exact(struct pci_dev *dev,
+ struct msix_entry *entries, int nvec)
+{ return -ENOSYS; }
+#endif
+
+#ifdef CONFIG_PCIEPORTBUS
+extern bool pcie_ports_disabled;
+extern bool pcie_ports_auto;
+#else
+#define pcie_ports_disabled true
+#define pcie_ports_auto false
+#endif
+
+#ifdef CONFIG_PCIEASPM
+bool pcie_aspm_support_enabled(void);
+#else
+static inline bool pcie_aspm_support_enabled(void) { return false; }
+#endif
+
+#ifdef CONFIG_PCIEAER
+void pci_no_aer(void);
+bool pci_aer_available(void);
+#else
+static inline void pci_no_aer(void) { }
+static inline bool pci_aer_available(void) { return false; }
+#endif
+
+#ifdef CONFIG_PCIE_ECRC
+void pcie_set_ecrc_checking(struct pci_dev *dev);
+void pcie_ecrc_get_policy(char *str);
+#else
+static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
+static inline void pcie_ecrc_get_policy(char *str) { }
+#endif
+
+#define pci_enable_msi(pdev) pci_enable_msi_exact(pdev, 1)
+
+#ifdef CONFIG_HT_IRQ
+/* The functions a driver should call */
+int ht_create_irq(struct pci_dev *dev, int idx);
+void ht_destroy_irq(unsigned int irq);
+#endif /* CONFIG_HT_IRQ */
+
+void pci_cfg_access_lock(struct pci_dev *dev);
+bool pci_cfg_access_trylock(struct pci_dev *dev);
+void pci_cfg_access_unlock(struct pci_dev *dev);
+
+/*
+ * PCI domain support. Sometimes called PCI segment (eg by ACPI),
+ * a PCI domain is defined to be a set of PCI buses which share
+ * configuration space.
+ */
+#ifdef CONFIG_PCI_DOMAINS
+extern int pci_domains_supported;
+int pci_get_new_domain_nr(void);
+#else
+enum { pci_domains_supported = 0 };
+static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
+static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
+static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
+#endif /* CONFIG_PCI_DOMAINS */
+
+/*
+ * Generic implementation for PCI domain support. If your
+ * architecture does not need custom management of PCI
+ * domains then this implementation will be used
+ */
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+static inline int pci_domain_nr(struct pci_bus *bus)
+{
+ return bus->domain_nr;
+}
+void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent);
+#else
+static inline void pci_bus_assign_domain_nr(struct pci_bus *bus,
+ struct device *parent)
+{
+}
+#endif
+
+/* some architectures require additional setup to direct VGA traffic */
+typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
+ unsigned int command_bits, u32 flags);
+void pci_register_set_vga_state(arch_set_vga_state_t func);
+
+#else /* CONFIG_PCI is not enabled */
+
+/*
+ * If the system does not have PCI, clearly these return errors. Define
+ * these as simple inline functions to avoid hair in drivers.
+ */
+
+#define _PCI_NOP(o, s, t) \
+ static inline int pci_##o##_config_##s(struct pci_dev *dev, \
+ int where, t val) \
+ { return PCIBIOS_FUNC_NOT_SUPPORTED; }
+
+#define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \
+ _PCI_NOP(o, word, u16 x) \
+ _PCI_NOP(o, dword, u32 x)
+_PCI_NOP_ALL(read, *)
+_PCI_NOP_ALL(write,)
+
+static inline struct pci_dev *pci_get_device(unsigned int vendor,
+ unsigned int device,
+ struct pci_dev *from)
+{ return NULL; }
+
+static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
+ unsigned int device,
+ unsigned int ss_vendor,
+ unsigned int ss_device,
+ struct pci_dev *from)
+{ return NULL; }
+
+static inline struct pci_dev *pci_get_class(unsigned int class,
+ struct pci_dev *from)
+{ return NULL; }
+
+#define pci_dev_present(ids) (0)
+#define no_pci_devices() (1)
+#define pci_dev_put(dev) do { } while (0)
+
+static inline void pci_set_master(struct pci_dev *dev) { }
+static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
+static inline void pci_disable_device(struct pci_dev *dev) { }
+static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
+{ return -EIO; }
+static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
+{ return -EIO; }
+static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
+ unsigned int size)
+{ return -EIO; }
+static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
+ unsigned long mask)
+{ return -EIO; }
+static inline int pci_assign_resource(struct pci_dev *dev, int i)
+{ return -EBUSY; }
+static inline int __pci_register_driver(struct pci_driver *drv,
+ struct module *owner)
+{ return 0; }
+static inline int pci_register_driver(struct pci_driver *drv)
+{ return 0; }
+static inline void pci_unregister_driver(struct pci_driver *drv) { }
+static inline int pci_find_capability(struct pci_dev *dev, int cap)
+{ return 0; }
+static inline int pci_find_next_capability(struct pci_dev *dev, u8 post,
+ int cap)
+{ return 0; }
+static inline int pci_find_ext_capability(struct pci_dev *dev, int cap)
+{ return 0; }
+
+/* Power management related routines */
+static inline int pci_save_state(struct pci_dev *dev) { return 0; }
+static inline void pci_restore_state(struct pci_dev *dev) { }
+static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+{ return 0; }
+static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
+{ return 0; }
+static inline pci_power_t pci_choose_state(struct pci_dev *dev,
+ pm_message_t state)
+{ return PCI_D0; }
+static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
+ int enable)
+{ return 0; }
+
+static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
+{ return -EIO; }
+static inline void pci_release_regions(struct pci_dev *dev) { }
+
+#define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0)
+
+static inline void pci_block_cfg_access(struct pci_dev *dev) { }
+static inline int pci_block_cfg_access_in_atomic(struct pci_dev *dev)
+{ return 0; }
+static inline void pci_unblock_cfg_access(struct pci_dev *dev) { }
+
+static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
+{ return NULL; }
+static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
+ unsigned int devfn)
+{ return NULL; }
+static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
+ unsigned int devfn)
+{ return NULL; }
+
+static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
+static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
+static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
+
+#define dev_is_pci(d) (false)
+#define dev_is_pf(d) (false)
+#define dev_num_vf(d) (0)
+#endif /* CONFIG_PCI */
+
+/* Include architecture-dependent settings and functions */
+
+#include <asm/pci.h>
+
+/* these helpers provide future and backwards compatibility
+ * for accessing popular PCI BAR info */
+#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
+#define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end)
+#define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags)
+#define pci_resource_len(dev,bar) \
+ ((pci_resource_start((dev), (bar)) == 0 && \
+ pci_resource_end((dev), (bar)) == \
+ pci_resource_start((dev), (bar))) ? 0 : \
+ \
+ (pci_resource_end((dev), (bar)) - \
+ pci_resource_start((dev), (bar)) + 1))
+
+/* Similar to the helpers above, these manipulate per-pci_dev
+ * driver-specific data. They are really just a wrapper around
+ * the generic device structure functions of these calls.
+ */
+static inline void *pci_get_drvdata(struct pci_dev *pdev)
+{
+ return dev_get_drvdata(&pdev->dev);
+}
+
+static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
+{
+ dev_set_drvdata(&pdev->dev, data);
+}
+
+/* If you want to know what to call your pci_dev, ask this function.
+ * Again, it's a wrapper around the generic device.
+ */
+static inline const char *pci_name(const struct pci_dev *pdev)
+{
+ return dev_name(&pdev->dev);
+}
+
+
+/* Some archs don't want to expose struct resource to userland as-is
+ * in sysfs and /proc
+ */
+#ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER
+static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
+ const struct resource *rsrc, resource_size_t *start,
+ resource_size_t *end)
+{
+ *start = rsrc->start;
+ *end = rsrc->end;
+}
+#endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */
+
+
+/*
+ * The world is not perfect and supplies us with broken PCI devices.
+ * For at least a part of these bugs we need a work-around, so both
+ * generic (drivers/pci/quirks.c) and per-architecture code can define
+ * fixup hooks to be called for particular buggy devices.
+ */
+
+struct pci_fixup {
+ u16 vendor; /* You can use PCI_ANY_ID here of course */
+ u16 device; /* You can use PCI_ANY_ID here of course */
+ u32 class; /* You can use PCI_ANY_ID here too */
+ unsigned int class_shift; /* should be 0, 8, 16 */
+ void (*hook)(struct pci_dev *dev);
+};
+
+enum pci_fixup_pass {
+ pci_fixup_early, /* Before probing BARs */
+ pci_fixup_header, /* After reading configuration header */
+ pci_fixup_final, /* Final phase of device fixups */
+ pci_fixup_enable, /* pci_enable_device() time */
+ pci_fixup_resume, /* pci_device_resume() */
+ pci_fixup_suspend, /* pci_device_suspend() */
+ pci_fixup_resume_early, /* pci_device_resume_early() */
+ pci_fixup_suspend_late, /* pci_device_suspend_late() */
+};
+
+/* Anonymous variables would be nice... */
+#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
+ class_shift, hook) \
+ static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
+ __attribute__((__section__(#section), aligned((sizeof(void *))))) \
+ = { vendor, device, class, class_shift, hook };
+
+#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
+ class_shift, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
+ hook, vendor, device, class, class_shift, hook)
+#define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \
+ class_shift, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
+ hook, vendor, device, class, class_shift, hook)
+#define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \
+ class_shift, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
+ hook, vendor, device, class, class_shift, hook)
+#define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \
+ class_shift, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
+ hook, vendor, device, class, class_shift, hook)
+#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
+ class_shift, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
+ resume##hook, vendor, device, class, \
+ class_shift, hook)
+#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
+ class_shift, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
+ resume_early##hook, vendor, device, \
+ class, class_shift, hook)
+#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
+ class_shift, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
+ suspend##hook, vendor, device, class, \
+ class_shift, hook)
+#define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \
+ class_shift, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
+ suspend_late##hook, vendor, device, \
+ class, class_shift, hook)
+
+#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
+ hook, vendor, device, PCI_ANY_ID, 0, hook)
+#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
+ hook, vendor, device, PCI_ANY_ID, 0, hook)
+#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
+ hook, vendor, device, PCI_ANY_ID, 0, hook)
+#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
+ hook, vendor, device, PCI_ANY_ID, 0, hook)
+#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
+ resume##hook, vendor, device, \
+ PCI_ANY_ID, 0, hook)
+#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
+ resume_early##hook, vendor, device, \
+ PCI_ANY_ID, 0, hook)
+#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
+ suspend##hook, vendor, device, \
+ PCI_ANY_ID, 0, hook)
+#define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \
+ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
+ suspend_late##hook, vendor, device, \
+ PCI_ANY_ID, 0, hook)
+
+#ifdef CONFIG_PCI_QUIRKS
+void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
+int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
+void pci_dev_specific_enable_acs(struct pci_dev *dev);
+#else
+static inline void pci_fixup_device(enum pci_fixup_pass pass,
+ struct pci_dev *dev) { }
+static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
+ u16 acs_flags)
+{
+ return -ENOTTY;
+}
+static inline void pci_dev_specific_enable_acs(struct pci_dev *dev) { }
+#endif
+
+void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
+void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
+void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
+int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
+int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
+ const char *name);
+void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
+
+extern int pci_pci_problems;
+#define PCIPCI_FAIL 1 /* No PCI PCI DMA */
+#define PCIPCI_TRITON 2
+#define PCIPCI_NATOMA 4
+#define PCIPCI_VIAETBF 8
+#define PCIPCI_VSFX 16
+#define PCIPCI_ALIMAGIK 32 /* Need low latency setting */
+#define PCIAGP_FAIL 64 /* No PCI to AGP DMA */
+
+extern unsigned long pci_cardbus_io_size;
+extern unsigned long pci_cardbus_mem_size;
+extern u8 pci_dfl_cache_line_size;
+extern u8 pci_cache_line_size;
+
+extern unsigned long pci_hotplug_io_size;
+extern unsigned long pci_hotplug_mem_size;
+
+/* Architecture-specific versions may override these (weak) */
+void pcibios_disable_device(struct pci_dev *dev);
+void pcibios_set_master(struct pci_dev *dev);
+int pcibios_set_pcie_reset_state(struct pci_dev *dev,
+ enum pcie_reset_state state);
+int pcibios_add_device(struct pci_dev *dev);
+void pcibios_release_device(struct pci_dev *dev);
+void pcibios_penalize_isa_irq(int irq, int active);
+
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+extern struct dev_pm_ops pcibios_pm_ops;
+#endif
+
+#ifdef CONFIG_PCI_MMCONFIG
+void __init pci_mmcfg_early_init(void);
+void __init pci_mmcfg_late_init(void);
+#else
+static inline void pci_mmcfg_early_init(void) { }
+static inline void pci_mmcfg_late_init(void) { }
+#endif
+
+int pci_ext_cfg_avail(void);
+
+void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
+
+#ifdef CONFIG_PCI_IOV
+int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
+int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
+
+int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
+void pci_disable_sriov(struct pci_dev *dev);
+int pci_num_vf(struct pci_dev *dev);
+int pci_vfs_assigned(struct pci_dev *dev);
+int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
+int pci_sriov_get_totalvfs(struct pci_dev *dev);
+resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
+#else
+static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
+{
+ return -ENOSYS;
+}
+static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
+{
+ return -ENOSYS;
+}
+static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
+{ return -ENODEV; }
+static inline void pci_disable_sriov(struct pci_dev *dev) { }
+static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
+static inline int pci_vfs_assigned(struct pci_dev *dev)
+{ return 0; }
+static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
+{ return 0; }
+static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
+{ return 0; }
+static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
+{ return 0; }
+#endif
+
+#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
+void pci_hp_create_module_link(struct pci_slot *pci_slot);
+void pci_hp_remove_module_link(struct pci_slot *pci_slot);
+#endif
+
+/**
+ * pci_pcie_cap - get the saved PCIe capability offset
+ * @dev: PCI device
+ *
+ * PCIe capability offset is calculated at PCI device initialization
+ * time and saved in the data structure. This function returns saved
+ * PCIe capability offset. Using this instead of pci_find_capability()
+ * reduces unnecessary search in the PCI configuration space. If you
+ * need to calculate PCIe capability offset from raw device for some
+ * reasons, please use pci_find_capability() instead.
+ */
+static inline int pci_pcie_cap(struct pci_dev *dev)
+{
+ return dev->pcie_cap;
+}
+
+/**
+ * pci_is_pcie - check if the PCI device is PCI Express capable
+ * @dev: PCI device
+ *
+ * Returns: true if the PCI device is PCI Express capable, false otherwise.
+ */
+static inline bool pci_is_pcie(struct pci_dev *dev)
+{
+ return pci_pcie_cap(dev);
+}
+
+/**
+ * pcie_caps_reg - get the PCIe Capabilities Register
+ * @dev: PCI device
+ */
+static inline u16 pcie_caps_reg(const struct pci_dev *dev)
+{
+ return dev->pcie_flags_reg;
+}
+
+/**
+ * pci_pcie_type - get the PCIe device/port type
+ * @dev: PCI device
+ */
+static inline int pci_pcie_type(const struct pci_dev *dev)
+{
+ return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
+}
+
+void pci_request_acs(void);
+bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
+bool pci_acs_path_enabled(struct pci_dev *start,
+ struct pci_dev *end, u16 acs_flags);
+
+#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
+#define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
+
+/* Large Resource Data Type Tag Item Names */
+#define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */
+#define PCI_VPD_LTIN_RO_DATA 0x10 /* Read-Only Data */
+#define PCI_VPD_LTIN_RW_DATA 0x11 /* Read-Write Data */
+
+#define PCI_VPD_LRDT_ID_STRING PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
+#define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
+#define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
+
+/* Small Resource Data Type Tag Item Names */
+#define PCI_VPD_STIN_END 0x78 /* End */
+
+#define PCI_VPD_SRDT_END PCI_VPD_STIN_END
+
+#define PCI_VPD_SRDT_TIN_MASK 0x78
+#define PCI_VPD_SRDT_LEN_MASK 0x07
+
+#define PCI_VPD_LRDT_TAG_SIZE 3
+#define PCI_VPD_SRDT_TAG_SIZE 1
+
+#define PCI_VPD_INFO_FLD_HDR_SIZE 3
+
+#define PCI_VPD_RO_KEYWORD_PARTNO "PN"
+#define PCI_VPD_RO_KEYWORD_MFR_ID "MN"
+#define PCI_VPD_RO_KEYWORD_VENDOR0 "V0"
+#define PCI_VPD_RO_KEYWORD_CHKSUM "RV"
+
+/**
+ * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length
+ * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
+ *
+ * Returns the extracted Large Resource Data Type length.
+ */
+static inline u16 pci_vpd_lrdt_size(const u8 *lrdt)
+{
+ return (u16)lrdt[1] + ((u16)lrdt[2] << 8);
+}
+
+/**
+ * pci_vpd_srdt_size - Extracts the Small Resource Data Type length
+ * @lrdt: Pointer to the beginning of the Small Resource Data Type tag
+ *
+ * Returns the extracted Small Resource Data Type length.
+ */
+static inline u8 pci_vpd_srdt_size(const u8 *srdt)
+{
+ return (*srdt) & PCI_VPD_SRDT_LEN_MASK;
+}
+
+/**
+ * pci_vpd_info_field_size - Extracts the information field length
+ * @lrdt: Pointer to the beginning of an information field header
+ *
+ * Returns the extracted information field length.
+ */
+static inline u8 pci_vpd_info_field_size(const u8 *info_field)
+{
+ return info_field[2];
+}
+
+/**
+ * pci_vpd_find_tag - Locates the Resource Data Type tag provided
+ * @buf: Pointer to buffered vpd data
+ * @off: The offset into the buffer at which to begin the search
+ * @len: The length of the vpd buffer
+ * @rdt: The Resource Data Type to search for
+ *
+ * Returns the index where the Resource Data Type was found or
+ * -ENOENT otherwise.
+ */
+int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt);
+
+/**
+ * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD
+ * @buf: Pointer to buffered vpd data
+ * @off: The offset into the buffer at which to begin the search
+ * @len: The length of the buffer area, relative to off, in which to search
+ * @kw: The keyword to search for
+ *
+ * Returns the index where the information field keyword was found or
+ * -ENOENT otherwise.
+ */
+int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
+ unsigned int len, const char *kw);
+
+/* PCI <-> OF binding helpers */
+#ifdef CONFIG_OF
+struct device_node;
+void pci_set_of_node(struct pci_dev *dev);
+void pci_release_of_node(struct pci_dev *dev);
+void pci_set_bus_of_node(struct pci_bus *bus);
+void pci_release_bus_of_node(struct pci_bus *bus);
+
+/* Arch may override this (weak) */
+struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
+
+static inline struct device_node *
+pci_device_to_OF_node(const struct pci_dev *pdev)
+{
+ return pdev ? pdev->dev.of_node : NULL;
+}
+
+static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
+{
+ return bus ? bus->dev.of_node : NULL;
+}
+
+#else /* CONFIG_OF */
+static inline void pci_set_of_node(struct pci_dev *dev) { }
+static inline void pci_release_of_node(struct pci_dev *dev) { }
+static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
+static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
+static inline struct device_node *
+pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
+#endif /* CONFIG_OF */
+
+#ifdef CONFIG_EEH
+static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
+{
+ return pdev->dev.archdata.edev;
+}
+#endif
+
+int pci_for_each_dma_alias(struct pci_dev *pdev,
+ int (*fn)(struct pci_dev *pdev,
+ u16 alias, void *data), void *data);
+
+/* helper functions for operation of device flag */
+static inline void pci_set_dev_assigned(struct pci_dev *pdev)
+{
+ pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
+}
+static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
+{
+ pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
+}
+static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
+{
+ return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
+}
+#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
new file mode 100644
index 000000000..8c7895061
--- /dev/null
+++ b/include/linux/pci_hotplug.h
@@ -0,0 +1,189 @@
+/*
+ * PCI HotPlug Core Functions
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <kristen.c.accardi@intel.com>
+ *
+ */
+#ifndef _PCI_HOTPLUG_H
+#define _PCI_HOTPLUG_H
+
+/**
+ * struct hotplug_slot_ops -the callbacks that the hotplug pci core can use
+ * @owner: The module owner of this structure
+ * @mod_name: The module name (KBUILD_MODNAME) of this structure
+ * @enable_slot: Called when the user wants to enable a specific pci slot
+ * @disable_slot: Called when the user wants to disable a specific pci slot
+ * @set_attention_status: Called to set the specific slot's attention LED to
+ * the specified value
+ * @hardware_test: Called to run a specified hardware test on the specified
+ * slot.
+ * @get_power_status: Called to get the current power status of a slot.
+ * If this field is NULL, the value passed in the struct hotplug_slot_info
+ * will be used when this value is requested by a user.
+ * @get_attention_status: Called to get the current attention status of a slot.
+ * If this field is NULL, the value passed in the struct hotplug_slot_info
+ * will be used when this value is requested by a user.
+ * @get_latch_status: Called to get the current latch status of a slot.
+ * If this field is NULL, the value passed in the struct hotplug_slot_info
+ * will be used when this value is requested by a user.
+ * @get_adapter_status: Called to get see if an adapter is present in the slot or not.
+ * If this field is NULL, the value passed in the struct hotplug_slot_info
+ * will be used when this value is requested by a user.
+ * @reset_slot: Optional interface to allow override of a bus reset for the
+ * slot for cases where a secondary bus reset can result in spurious
+ * hotplug events or where a slot can be reset independent of the bus.
+ *
+ * The table of function pointers that is passed to the hotplug pci core by a
+ * hotplug pci driver. These functions are called by the hotplug pci core when
+ * the user wants to do something to a specific slot (query it for information,
+ * set an LED, enable / disable power, etc.)
+ */
+struct hotplug_slot_ops {
+ struct module *owner;
+ const char *mod_name;
+ int (*enable_slot) (struct hotplug_slot *slot);
+ int (*disable_slot) (struct hotplug_slot *slot);
+ int (*set_attention_status) (struct hotplug_slot *slot, u8 value);
+ int (*hardware_test) (struct hotplug_slot *slot, u32 value);
+ int (*get_power_status) (struct hotplug_slot *slot, u8 *value);
+ int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
+ int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
+ int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
+ int (*reset_slot) (struct hotplug_slot *slot, int probe);
+};
+
+/**
+ * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
+ * @power_status: if power is enabled or not (1/0)
+ * @attention_status: if the attention light is enabled or not (1/0)
+ * @latch_status: if the latch (if any) is open or closed (1/0)
+ * @adapter_status: if there is a pci board present in the slot or not (1/0)
+ *
+ * Used to notify the hotplug pci core of the status of a specific slot.
+ */
+struct hotplug_slot_info {
+ u8 power_status;
+ u8 attention_status;
+ u8 latch_status;
+ u8 adapter_status;
+};
+
+/**
+ * struct hotplug_slot - used to register a physical slot with the hotplug pci core
+ * @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
+ * @info: pointer to the &struct hotplug_slot_info for the initial values for
+ * this slot.
+ * @release: called during pci_hp_deregister to free memory allocated in a
+ * hotplug_slot structure.
+ * @private: used by the hotplug pci controller driver to store whatever it
+ * needs.
+ */
+struct hotplug_slot {
+ struct hotplug_slot_ops *ops;
+ struct hotplug_slot_info *info;
+ void (*release) (struct hotplug_slot *slot);
+ void *private;
+
+ /* Variables below this are for use only by the hotplug pci core. */
+ struct list_head slot_list;
+ struct pci_slot *pci_slot;
+};
+
+static inline const char *hotplug_slot_name(const struct hotplug_slot *slot)
+{
+ return pci_slot_name(slot->pci_slot);
+}
+
+int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *pbus, int nr,
+ const char *name, struct module *owner,
+ const char *mod_name);
+int pci_hp_deregister(struct hotplug_slot *slot);
+int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot,
+ struct hotplug_slot_info *info);
+
+/* use a define to avoid include chaining to get THIS_MODULE & friends */
+#define pci_hp_register(slot, pbus, devnr, name) \
+ __pci_hp_register(slot, pbus, devnr, name, THIS_MODULE, KBUILD_MODNAME)
+
+/* PCI Setting Record (Type 0) */
+struct hpp_type0 {
+ u32 revision;
+ u8 cache_line_size;
+ u8 latency_timer;
+ u8 enable_serr;
+ u8 enable_perr;
+};
+
+/* PCI-X Setting Record (Type 1) */
+struct hpp_type1 {
+ u32 revision;
+ u8 max_mem_read;
+ u8 avg_max_split;
+ u16 tot_max_split;
+};
+
+/* PCI Express Setting Record (Type 2) */
+struct hpp_type2 {
+ u32 revision;
+ u32 unc_err_mask_and;
+ u32 unc_err_mask_or;
+ u32 unc_err_sever_and;
+ u32 unc_err_sever_or;
+ u32 cor_err_mask_and;
+ u32 cor_err_mask_or;
+ u32 adv_err_cap_and;
+ u32 adv_err_cap_or;
+ u16 pci_exp_devctl_and;
+ u16 pci_exp_devctl_or;
+ u16 pci_exp_lnkctl_and;
+ u16 pci_exp_lnkctl_or;
+ u32 sec_unc_err_sever_and;
+ u32 sec_unc_err_sever_or;
+ u32 sec_unc_err_mask_and;
+ u32 sec_unc_err_mask_or;
+};
+
+struct hotplug_params {
+ struct hpp_type0 *t0; /* Type0: NULL if not available */
+ struct hpp_type1 *t1; /* Type1: NULL if not available */
+ struct hpp_type2 *t2; /* Type2: NULL if not available */
+ struct hpp_type0 type0_data;
+ struct hpp_type1 type1_data;
+ struct hpp_type2 type2_data;
+};
+
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp);
+int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags);
+int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle);
+int acpi_pci_detect_ejectable(acpi_handle handle);
+#else
+static inline int pci_get_hp_params(struct pci_dev *dev,
+ struct hotplug_params *hpp)
+{
+ return -ENODEV;
+}
+#endif
+#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
new file mode 100644
index 000000000..2f7b9a40f
--- /dev/null
+++ b/include/linux/pci_ids.h
@@ -0,0 +1,2995 @@
+/*
+ * PCI Class, Vendor and Device IDs
+ *
+ * Please keep sorted.
+ *
+ * Do not add new entries to this file unless the definitions
+ * are shared between multiple drivers.
+ */
+#ifndef _LINUX_PCI_IDS_H
+#define _LINUX_PCI_IDS_H
+
+/* Device classes and subclasses */
+
+#define PCI_CLASS_NOT_DEFINED 0x0000
+#define PCI_CLASS_NOT_DEFINED_VGA 0x0001
+
+#define PCI_BASE_CLASS_STORAGE 0x01
+#define PCI_CLASS_STORAGE_SCSI 0x0100
+#define PCI_CLASS_STORAGE_IDE 0x0101
+#define PCI_CLASS_STORAGE_FLOPPY 0x0102
+#define PCI_CLASS_STORAGE_IPI 0x0103
+#define PCI_CLASS_STORAGE_RAID 0x0104
+#define PCI_CLASS_STORAGE_SATA 0x0106
+#define PCI_CLASS_STORAGE_SATA_AHCI 0x010601
+#define PCI_CLASS_STORAGE_SAS 0x0107
+#define PCI_CLASS_STORAGE_OTHER 0x0180
+
+#define PCI_BASE_CLASS_NETWORK 0x02
+#define PCI_CLASS_NETWORK_ETHERNET 0x0200
+#define PCI_CLASS_NETWORK_TOKEN_RING 0x0201
+#define PCI_CLASS_NETWORK_FDDI 0x0202
+#define PCI_CLASS_NETWORK_ATM 0x0203
+#define PCI_CLASS_NETWORK_OTHER 0x0280
+
+#define PCI_BASE_CLASS_DISPLAY 0x03
+#define PCI_CLASS_DISPLAY_VGA 0x0300
+#define PCI_CLASS_DISPLAY_XGA 0x0301
+#define PCI_CLASS_DISPLAY_3D 0x0302
+#define PCI_CLASS_DISPLAY_OTHER 0x0380
+
+#define PCI_BASE_CLASS_MULTIMEDIA 0x04
+#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400
+#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401
+#define PCI_CLASS_MULTIMEDIA_PHONE 0x0402
+#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480
+
+#define PCI_BASE_CLASS_MEMORY 0x05
+#define PCI_CLASS_MEMORY_RAM 0x0500
+#define PCI_CLASS_MEMORY_FLASH 0x0501
+#define PCI_CLASS_MEMORY_OTHER 0x0580
+
+#define PCI_BASE_CLASS_BRIDGE 0x06
+#define PCI_CLASS_BRIDGE_HOST 0x0600
+#define PCI_CLASS_BRIDGE_ISA 0x0601
+#define PCI_CLASS_BRIDGE_EISA 0x0602
+#define PCI_CLASS_BRIDGE_MC 0x0603
+#define PCI_CLASS_BRIDGE_PCI 0x0604
+#define PCI_CLASS_BRIDGE_PCMCIA 0x0605
+#define PCI_CLASS_BRIDGE_NUBUS 0x0606
+#define PCI_CLASS_BRIDGE_CARDBUS 0x0607
+#define PCI_CLASS_BRIDGE_RACEWAY 0x0608
+#define PCI_CLASS_BRIDGE_OTHER 0x0680
+
+#define PCI_BASE_CLASS_COMMUNICATION 0x07
+#define PCI_CLASS_COMMUNICATION_SERIAL 0x0700
+#define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701
+#define PCI_CLASS_COMMUNICATION_MULTISERIAL 0x0702
+#define PCI_CLASS_COMMUNICATION_MODEM 0x0703
+#define PCI_CLASS_COMMUNICATION_OTHER 0x0780
+
+#define PCI_BASE_CLASS_SYSTEM 0x08
+#define PCI_CLASS_SYSTEM_PIC 0x0800
+#define PCI_CLASS_SYSTEM_PIC_IOAPIC 0x080010
+#define PCI_CLASS_SYSTEM_PIC_IOXAPIC 0x080020
+#define PCI_CLASS_SYSTEM_DMA 0x0801
+#define PCI_CLASS_SYSTEM_TIMER 0x0802
+#define PCI_CLASS_SYSTEM_RTC 0x0803
+#define PCI_CLASS_SYSTEM_PCI_HOTPLUG 0x0804
+#define PCI_CLASS_SYSTEM_SDHCI 0x0805
+#define PCI_CLASS_SYSTEM_OTHER 0x0880
+
+#define PCI_BASE_CLASS_INPUT 0x09
+#define PCI_CLASS_INPUT_KEYBOARD 0x0900
+#define PCI_CLASS_INPUT_PEN 0x0901
+#define PCI_CLASS_INPUT_MOUSE 0x0902
+#define PCI_CLASS_INPUT_SCANNER 0x0903
+#define PCI_CLASS_INPUT_GAMEPORT 0x0904
+#define PCI_CLASS_INPUT_OTHER 0x0980
+
+#define PCI_BASE_CLASS_DOCKING 0x0a
+#define PCI_CLASS_DOCKING_GENERIC 0x0a00
+#define PCI_CLASS_DOCKING_OTHER 0x0a80
+
+#define PCI_BASE_CLASS_PROCESSOR 0x0b
+#define PCI_CLASS_PROCESSOR_386 0x0b00
+#define PCI_CLASS_PROCESSOR_486 0x0b01
+#define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02
+#define PCI_CLASS_PROCESSOR_ALPHA 0x0b10
+#define PCI_CLASS_PROCESSOR_POWERPC 0x0b20
+#define PCI_CLASS_PROCESSOR_MIPS 0x0b30
+#define PCI_CLASS_PROCESSOR_CO 0x0b40
+
+#define PCI_BASE_CLASS_SERIAL 0x0c
+#define PCI_CLASS_SERIAL_FIREWIRE 0x0c00
+#define PCI_CLASS_SERIAL_FIREWIRE_OHCI 0x0c0010
+#define PCI_CLASS_SERIAL_ACCESS 0x0c01
+#define PCI_CLASS_SERIAL_SSA 0x0c02
+#define PCI_CLASS_SERIAL_USB 0x0c03
+#define PCI_CLASS_SERIAL_USB_UHCI 0x0c0300
+#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310
+#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320
+#define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330
+#define PCI_CLASS_SERIAL_FIBER 0x0c04
+#define PCI_CLASS_SERIAL_SMBUS 0x0c05
+
+#define PCI_BASE_CLASS_WIRELESS 0x0d
+#define PCI_CLASS_WIRELESS_RF_CONTROLLER 0x0d10
+#define PCI_CLASS_WIRELESS_WHCI 0x0d1010
+
+#define PCI_BASE_CLASS_INTELLIGENT 0x0e
+#define PCI_CLASS_INTELLIGENT_I2O 0x0e00
+
+#define PCI_BASE_CLASS_SATELLITE 0x0f
+#define PCI_CLASS_SATELLITE_TV 0x0f00
+#define PCI_CLASS_SATELLITE_AUDIO 0x0f01
+#define PCI_CLASS_SATELLITE_VOICE 0x0f03
+#define PCI_CLASS_SATELLITE_DATA 0x0f04
+
+#define PCI_BASE_CLASS_CRYPT 0x10
+#define PCI_CLASS_CRYPT_NETWORK 0x1000
+#define PCI_CLASS_CRYPT_ENTERTAINMENT 0x1001
+#define PCI_CLASS_CRYPT_OTHER 0x1080
+
+#define PCI_BASE_CLASS_SIGNAL_PROCESSING 0x11
+#define PCI_CLASS_SP_DPIO 0x1100
+#define PCI_CLASS_SP_OTHER 0x1180
+
+#define PCI_CLASS_OTHERS 0xff
+
+/* Vendors and devices. Sort key: vendor first, device next. */
+
+#define PCI_VENDOR_ID_TTTECH 0x0357
+#define PCI_DEVICE_ID_TTTECH_MC322 0x000a
+
+#define PCI_VENDOR_ID_DYNALINK 0x0675
+#define PCI_DEVICE_ID_DYNALINK_IS64PH 0x1702
+
+#define PCI_VENDOR_ID_BERKOM 0x0871
+#define PCI_DEVICE_ID_BERKOM_A1T 0xffa1
+#define PCI_DEVICE_ID_BERKOM_T_CONCEPT 0xffa2
+#define PCI_DEVICE_ID_BERKOM_A4T 0xffa4
+#define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO 0xffa8
+
+#define PCI_VENDOR_ID_COMPAQ 0x0e11
+#define PCI_DEVICE_ID_COMPAQ_TOKENRING 0x0508
+#define PCI_DEVICE_ID_COMPAQ_TACHYON 0xa0fc
+#define PCI_DEVICE_ID_COMPAQ_SMART2P 0xae10
+#define PCI_DEVICE_ID_COMPAQ_NETEL100 0xae32
+#define PCI_DEVICE_ID_COMPAQ_NETEL10 0xae34
+#define PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE 0xae33
+#define PCI_DEVICE_ID_COMPAQ_NETFLEX3I 0xae35
+#define PCI_DEVICE_ID_COMPAQ_NETEL100D 0xae40
+#define PCI_DEVICE_ID_COMPAQ_NETEL100PI 0xae43
+#define PCI_DEVICE_ID_COMPAQ_NETEL100I 0xb011
+#define PCI_DEVICE_ID_COMPAQ_CISS 0xb060
+#define PCI_DEVICE_ID_COMPAQ_CISSB 0xb178
+#define PCI_DEVICE_ID_COMPAQ_CISSC 0x46
+#define PCI_DEVICE_ID_COMPAQ_THUNDER 0xf130
+#define PCI_DEVICE_ID_COMPAQ_NETFLEX3B 0xf150
+
+#define PCI_VENDOR_ID_NCR 0x1000
+#define PCI_VENDOR_ID_LSI_LOGIC 0x1000
+#define PCI_DEVICE_ID_NCR_53C810 0x0001
+#define PCI_DEVICE_ID_NCR_53C820 0x0002
+#define PCI_DEVICE_ID_NCR_53C825 0x0003
+#define PCI_DEVICE_ID_NCR_53C815 0x0004
+#define PCI_DEVICE_ID_LSI_53C810AP 0x0005
+#define PCI_DEVICE_ID_NCR_53C860 0x0006
+#define PCI_DEVICE_ID_LSI_53C1510 0x000a
+#define PCI_DEVICE_ID_NCR_53C896 0x000b
+#define PCI_DEVICE_ID_NCR_53C895 0x000c
+#define PCI_DEVICE_ID_NCR_53C885 0x000d
+#define PCI_DEVICE_ID_NCR_53C875 0x000f
+#define PCI_DEVICE_ID_NCR_53C1510 0x0010
+#define PCI_DEVICE_ID_LSI_53C895A 0x0012
+#define PCI_DEVICE_ID_LSI_53C875A 0x0013
+#define PCI_DEVICE_ID_LSI_53C1010_33 0x0020
+#define PCI_DEVICE_ID_LSI_53C1010_66 0x0021
+#define PCI_DEVICE_ID_LSI_53C1030 0x0030
+#define PCI_DEVICE_ID_LSI_1030_53C1035 0x0032
+#define PCI_DEVICE_ID_LSI_53C1035 0x0040
+#define PCI_DEVICE_ID_NCR_53C875J 0x008f
+#define PCI_DEVICE_ID_LSI_FC909 0x0621
+#define PCI_DEVICE_ID_LSI_FC929 0x0622
+#define PCI_DEVICE_ID_LSI_FC929_LAN 0x0623
+#define PCI_DEVICE_ID_LSI_FC919 0x0624
+#define PCI_DEVICE_ID_LSI_FC919_LAN 0x0625
+#define PCI_DEVICE_ID_LSI_FC929X 0x0626
+#define PCI_DEVICE_ID_LSI_FC939X 0x0642
+#define PCI_DEVICE_ID_LSI_FC949X 0x0640
+#define PCI_DEVICE_ID_LSI_FC949ES 0x0646
+#define PCI_DEVICE_ID_LSI_FC919X 0x0628
+#define PCI_DEVICE_ID_NCR_YELLOWFIN 0x0701
+#define PCI_DEVICE_ID_LSI_61C102 0x0901
+#define PCI_DEVICE_ID_LSI_63C815 0x1000
+#define PCI_DEVICE_ID_LSI_SAS1064 0x0050
+#define PCI_DEVICE_ID_LSI_SAS1064R 0x0411
+#define PCI_DEVICE_ID_LSI_SAS1066 0x005E
+#define PCI_DEVICE_ID_LSI_SAS1068 0x0054
+#define PCI_DEVICE_ID_LSI_SAS1064A 0x005C
+#define PCI_DEVICE_ID_LSI_SAS1064E 0x0056
+#define PCI_DEVICE_ID_LSI_SAS1066E 0x005A
+#define PCI_DEVICE_ID_LSI_SAS1068E 0x0058
+#define PCI_DEVICE_ID_LSI_SAS1078 0x0060
+
+#define PCI_VENDOR_ID_ATI 0x1002
+/* Mach64 */
+#define PCI_DEVICE_ID_ATI_68800 0x4158
+#define PCI_DEVICE_ID_ATI_215CT222 0x4354
+#define PCI_DEVICE_ID_ATI_210888CX 0x4358
+#define PCI_DEVICE_ID_ATI_215ET222 0x4554
+/* Mach64 / Rage */
+#define PCI_DEVICE_ID_ATI_215GB 0x4742
+#define PCI_DEVICE_ID_ATI_215GD 0x4744
+#define PCI_DEVICE_ID_ATI_215GI 0x4749
+#define PCI_DEVICE_ID_ATI_215GP 0x4750
+#define PCI_DEVICE_ID_ATI_215GQ 0x4751
+#define PCI_DEVICE_ID_ATI_215XL 0x4752
+#define PCI_DEVICE_ID_ATI_215GT 0x4754
+#define PCI_DEVICE_ID_ATI_215GTB 0x4755
+#define PCI_DEVICE_ID_ATI_215_IV 0x4756
+#define PCI_DEVICE_ID_ATI_215_IW 0x4757
+#define PCI_DEVICE_ID_ATI_215_IZ 0x475A
+#define PCI_DEVICE_ID_ATI_210888GX 0x4758
+#define PCI_DEVICE_ID_ATI_215_LB 0x4c42
+#define PCI_DEVICE_ID_ATI_215_LD 0x4c44
+#define PCI_DEVICE_ID_ATI_215_LG 0x4c47
+#define PCI_DEVICE_ID_ATI_215_LI 0x4c49
+#define PCI_DEVICE_ID_ATI_215_LM 0x4c4D
+#define PCI_DEVICE_ID_ATI_215_LN 0x4c4E
+#define PCI_DEVICE_ID_ATI_215_LR 0x4c52
+#define PCI_DEVICE_ID_ATI_215_LS 0x4c53
+#define PCI_DEVICE_ID_ATI_264_LT 0x4c54
+/* Mach64 VT */
+#define PCI_DEVICE_ID_ATI_264VT 0x5654
+#define PCI_DEVICE_ID_ATI_264VU 0x5655
+#define PCI_DEVICE_ID_ATI_264VV 0x5656
+/* Rage128 GL */
+#define PCI_DEVICE_ID_ATI_RAGE128_RE 0x5245
+#define PCI_DEVICE_ID_ATI_RAGE128_RF 0x5246
+#define PCI_DEVICE_ID_ATI_RAGE128_RG 0x5247
+/* Rage128 VR */
+#define PCI_DEVICE_ID_ATI_RAGE128_RK 0x524b
+#define PCI_DEVICE_ID_ATI_RAGE128_RL 0x524c
+#define PCI_DEVICE_ID_ATI_RAGE128_SE 0x5345
+#define PCI_DEVICE_ID_ATI_RAGE128_SF 0x5346
+#define PCI_DEVICE_ID_ATI_RAGE128_SG 0x5347
+#define PCI_DEVICE_ID_ATI_RAGE128_SH 0x5348
+#define PCI_DEVICE_ID_ATI_RAGE128_SK 0x534b
+#define PCI_DEVICE_ID_ATI_RAGE128_SL 0x534c
+#define PCI_DEVICE_ID_ATI_RAGE128_SM 0x534d
+#define PCI_DEVICE_ID_ATI_RAGE128_SN 0x534e
+/* Rage128 Ultra */
+#define PCI_DEVICE_ID_ATI_RAGE128_TF 0x5446
+#define PCI_DEVICE_ID_ATI_RAGE128_TL 0x544c
+#define PCI_DEVICE_ID_ATI_RAGE128_TR 0x5452
+#define PCI_DEVICE_ID_ATI_RAGE128_TS 0x5453
+#define PCI_DEVICE_ID_ATI_RAGE128_TT 0x5454
+#define PCI_DEVICE_ID_ATI_RAGE128_TU 0x5455
+/* Rage128 M3 */
+#define PCI_DEVICE_ID_ATI_RAGE128_LE 0x4c45
+#define PCI_DEVICE_ID_ATI_RAGE128_LF 0x4c46
+/* Rage128 M4 */
+#define PCI_DEVICE_ID_ATI_RAGE128_MF 0x4d46
+#define PCI_DEVICE_ID_ATI_RAGE128_ML 0x4d4c
+/* Rage128 Pro GL */
+#define PCI_DEVICE_ID_ATI_RAGE128_PA 0x5041
+#define PCI_DEVICE_ID_ATI_RAGE128_PB 0x5042
+#define PCI_DEVICE_ID_ATI_RAGE128_PC 0x5043
+#define PCI_DEVICE_ID_ATI_RAGE128_PD 0x5044
+#define PCI_DEVICE_ID_ATI_RAGE128_PE 0x5045
+#define PCI_DEVICE_ID_ATI_RAGE128_PF 0x5046
+/* Rage128 Pro VR */
+#define PCI_DEVICE_ID_ATI_RAGE128_PG 0x5047
+#define PCI_DEVICE_ID_ATI_RAGE128_PH 0x5048
+#define PCI_DEVICE_ID_ATI_RAGE128_PI 0x5049
+#define PCI_DEVICE_ID_ATI_RAGE128_PJ 0x504A
+#define PCI_DEVICE_ID_ATI_RAGE128_PK 0x504B
+#define PCI_DEVICE_ID_ATI_RAGE128_PL 0x504C
+#define PCI_DEVICE_ID_ATI_RAGE128_PM 0x504D
+#define PCI_DEVICE_ID_ATI_RAGE128_PN 0x504E
+#define PCI_DEVICE_ID_ATI_RAGE128_PO 0x504F
+#define PCI_DEVICE_ID_ATI_RAGE128_PP 0x5050
+#define PCI_DEVICE_ID_ATI_RAGE128_PQ 0x5051
+#define PCI_DEVICE_ID_ATI_RAGE128_PR 0x5052
+#define PCI_DEVICE_ID_ATI_RAGE128_PS 0x5053
+#define PCI_DEVICE_ID_ATI_RAGE128_PT 0x5054
+#define PCI_DEVICE_ID_ATI_RAGE128_PU 0x5055
+#define PCI_DEVICE_ID_ATI_RAGE128_PV 0x5056
+#define PCI_DEVICE_ID_ATI_RAGE128_PW 0x5057
+#define PCI_DEVICE_ID_ATI_RAGE128_PX 0x5058
+/* Rage128 M4 */
+/* Radeon R100 */
+#define PCI_DEVICE_ID_ATI_RADEON_QD 0x5144
+#define PCI_DEVICE_ID_ATI_RADEON_QE 0x5145
+#define PCI_DEVICE_ID_ATI_RADEON_QF 0x5146
+#define PCI_DEVICE_ID_ATI_RADEON_QG 0x5147
+/* Radeon RV100 (VE) */
+#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159
+#define PCI_DEVICE_ID_ATI_RADEON_QZ 0x515a
+/* Radeon R200 (8500) */
+#define PCI_DEVICE_ID_ATI_RADEON_QL 0x514c
+#define PCI_DEVICE_ID_ATI_RADEON_QN 0x514e
+#define PCI_DEVICE_ID_ATI_RADEON_QO 0x514f
+#define PCI_DEVICE_ID_ATI_RADEON_Ql 0x516c
+#define PCI_DEVICE_ID_ATI_RADEON_BB 0x4242
+/* Radeon R200 (9100) */
+#define PCI_DEVICE_ID_ATI_RADEON_QM 0x514d
+/* Radeon RV200 (7500) */
+#define PCI_DEVICE_ID_ATI_RADEON_QW 0x5157
+#define PCI_DEVICE_ID_ATI_RADEON_QX 0x5158
+/* Radeon NV-100 */
+/* Radeon RV250 (9000) */
+#define PCI_DEVICE_ID_ATI_RADEON_Id 0x4964
+#define PCI_DEVICE_ID_ATI_RADEON_Ie 0x4965
+#define PCI_DEVICE_ID_ATI_RADEON_If 0x4966
+#define PCI_DEVICE_ID_ATI_RADEON_Ig 0x4967
+/* Radeon RV280 (9200) */
+#define PCI_DEVICE_ID_ATI_RADEON_Ya 0x5961
+#define PCI_DEVICE_ID_ATI_RADEON_Yd 0x5964
+/* Radeon R300 (9500) */
+/* Radeon R300 (9700) */
+#define PCI_DEVICE_ID_ATI_RADEON_ND 0x4e44
+#define PCI_DEVICE_ID_ATI_RADEON_NE 0x4e45
+#define PCI_DEVICE_ID_ATI_RADEON_NF 0x4e46
+#define PCI_DEVICE_ID_ATI_RADEON_NG 0x4e47
+/* Radeon R350 (9800) */
+/* Radeon RV350 (9600) */
+/* Radeon M6 */
+#define PCI_DEVICE_ID_ATI_RADEON_LY 0x4c59
+#define PCI_DEVICE_ID_ATI_RADEON_LZ 0x4c5a
+/* Radeon M7 */
+#define PCI_DEVICE_ID_ATI_RADEON_LW 0x4c57
+#define PCI_DEVICE_ID_ATI_RADEON_LX 0x4c58
+/* Radeon M9 */
+#define PCI_DEVICE_ID_ATI_RADEON_Ld 0x4c64
+#define PCI_DEVICE_ID_ATI_RADEON_Le 0x4c65
+#define PCI_DEVICE_ID_ATI_RADEON_Lf 0x4c66
+#define PCI_DEVICE_ID_ATI_RADEON_Lg 0x4c67
+/* Radeon */
+/* RadeonIGP */
+#define PCI_DEVICE_ID_ATI_RS100 0xcab0
+#define PCI_DEVICE_ID_ATI_RS200 0xcab2
+#define PCI_DEVICE_ID_ATI_RS200_B 0xcbb2
+#define PCI_DEVICE_ID_ATI_RS250 0xcab3
+#define PCI_DEVICE_ID_ATI_RS300_100 0x5830
+#define PCI_DEVICE_ID_ATI_RS300_133 0x5831
+#define PCI_DEVICE_ID_ATI_RS300_166 0x5832
+#define PCI_DEVICE_ID_ATI_RS300_200 0x5833
+#define PCI_DEVICE_ID_ATI_RS350_100 0x7830
+#define PCI_DEVICE_ID_ATI_RS350_133 0x7831
+#define PCI_DEVICE_ID_ATI_RS350_166 0x7832
+#define PCI_DEVICE_ID_ATI_RS350_200 0x7833
+#define PCI_DEVICE_ID_ATI_RS400_100 0x5a30
+#define PCI_DEVICE_ID_ATI_RS400_133 0x5a31
+#define PCI_DEVICE_ID_ATI_RS400_166 0x5a32
+#define PCI_DEVICE_ID_ATI_RS400_200 0x5a33
+#define PCI_DEVICE_ID_ATI_RS480 0x5950
+/* ATI IXP Chipset */
+#define PCI_DEVICE_ID_ATI_IXP200_IDE 0x4349
+#define PCI_DEVICE_ID_ATI_IXP200_SMBUS 0x4353
+#define PCI_DEVICE_ID_ATI_IXP300_SMBUS 0x4363
+#define PCI_DEVICE_ID_ATI_IXP300_IDE 0x4369
+#define PCI_DEVICE_ID_ATI_IXP300_SATA 0x436e
+#define PCI_DEVICE_ID_ATI_IXP400_SMBUS 0x4372
+#define PCI_DEVICE_ID_ATI_IXP400_IDE 0x4376
+#define PCI_DEVICE_ID_ATI_IXP400_SATA 0x4379
+#define PCI_DEVICE_ID_ATI_IXP400_SATA2 0x437a
+#define PCI_DEVICE_ID_ATI_IXP600_SATA 0x4380
+#define PCI_DEVICE_ID_ATI_SBX00_SMBUS 0x4385
+#define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c
+#define PCI_DEVICE_ID_ATI_IXP700_SATA 0x4390
+#define PCI_DEVICE_ID_ATI_IXP700_IDE 0x439c
+
+#define PCI_VENDOR_ID_VLSI 0x1004
+#define PCI_DEVICE_ID_VLSI_82C592 0x0005
+#define PCI_DEVICE_ID_VLSI_82C593 0x0006
+#define PCI_DEVICE_ID_VLSI_82C594 0x0007
+#define PCI_DEVICE_ID_VLSI_82C597 0x0009
+#define PCI_DEVICE_ID_VLSI_82C541 0x000c
+#define PCI_DEVICE_ID_VLSI_82C543 0x000d
+#define PCI_DEVICE_ID_VLSI_82C532 0x0101
+#define PCI_DEVICE_ID_VLSI_82C534 0x0102
+#define PCI_DEVICE_ID_VLSI_82C535 0x0104
+#define PCI_DEVICE_ID_VLSI_82C147 0x0105
+#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702
+
+/* AMD RD890 Chipset */
+#define PCI_DEVICE_ID_RD890_IOMMU 0x5a23
+
+#define PCI_VENDOR_ID_ADL 0x1005
+#define PCI_DEVICE_ID_ADL_2301 0x2301
+
+#define PCI_VENDOR_ID_NS 0x100b
+#define PCI_DEVICE_ID_NS_87415 0x0002
+#define PCI_DEVICE_ID_NS_87560_LIO 0x000e
+#define PCI_DEVICE_ID_NS_87560_USB 0x0012
+#define PCI_DEVICE_ID_NS_83815 0x0020
+#define PCI_DEVICE_ID_NS_83820 0x0022
+#define PCI_DEVICE_ID_NS_CS5535_ISA 0x002b
+#define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d
+#define PCI_DEVICE_ID_NS_CS5535_AUDIO 0x002e
+#define PCI_DEVICE_ID_NS_CS5535_USB 0x002f
+#define PCI_DEVICE_ID_NS_GX_VIDEO 0x0030
+#define PCI_DEVICE_ID_NS_SATURN 0x0035
+#define PCI_DEVICE_ID_NS_SCx200_BRIDGE 0x0500
+#define PCI_DEVICE_ID_NS_SCx200_SMI 0x0501
+#define PCI_DEVICE_ID_NS_SCx200_IDE 0x0502
+#define PCI_DEVICE_ID_NS_SCx200_AUDIO 0x0503
+#define PCI_DEVICE_ID_NS_SCx200_VIDEO 0x0504
+#define PCI_DEVICE_ID_NS_SCx200_XBUS 0x0505
+#define PCI_DEVICE_ID_NS_SC1100_BRIDGE 0x0510
+#define PCI_DEVICE_ID_NS_SC1100_SMI 0x0511
+#define PCI_DEVICE_ID_NS_SC1100_XBUS 0x0515
+#define PCI_DEVICE_ID_NS_87410 0xd001
+
+#define PCI_DEVICE_ID_NS_GX_HOST_BRIDGE 0x0028
+
+#define PCI_VENDOR_ID_TSENG 0x100c
+#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202
+#define PCI_DEVICE_ID_TSENG_W32P_b 0x3205
+#define PCI_DEVICE_ID_TSENG_W32P_c 0x3206
+#define PCI_DEVICE_ID_TSENG_W32P_d 0x3207
+#define PCI_DEVICE_ID_TSENG_ET6000 0x3208
+
+#define PCI_VENDOR_ID_WEITEK 0x100e
+#define PCI_DEVICE_ID_WEITEK_P9000 0x9001
+#define PCI_DEVICE_ID_WEITEK_P9100 0x9100
+
+#define PCI_VENDOR_ID_DEC 0x1011
+#define PCI_DEVICE_ID_DEC_BRD 0x0001
+#define PCI_DEVICE_ID_DEC_TULIP 0x0002
+#define PCI_DEVICE_ID_DEC_TGA 0x0004
+#define PCI_DEVICE_ID_DEC_TULIP_FAST 0x0009
+#define PCI_DEVICE_ID_DEC_TGA2 0x000D
+#define PCI_DEVICE_ID_DEC_FDDI 0x000F
+#define PCI_DEVICE_ID_DEC_TULIP_PLUS 0x0014
+#define PCI_DEVICE_ID_DEC_21142 0x0019
+#define PCI_DEVICE_ID_DEC_21052 0x0021
+#define PCI_DEVICE_ID_DEC_21150 0x0022
+#define PCI_DEVICE_ID_DEC_21152 0x0024
+#define PCI_DEVICE_ID_DEC_21153 0x0025
+#define PCI_DEVICE_ID_DEC_21154 0x0026
+#define PCI_DEVICE_ID_DEC_21285 0x1065
+#define PCI_DEVICE_ID_COMPAQ_42XX 0x0046
+
+#define PCI_VENDOR_ID_CIRRUS 0x1013
+#define PCI_DEVICE_ID_CIRRUS_7548 0x0038
+#define PCI_DEVICE_ID_CIRRUS_5430 0x00a0
+#define PCI_DEVICE_ID_CIRRUS_5434_4 0x00a4
+#define PCI_DEVICE_ID_CIRRUS_5434_8 0x00a8
+#define PCI_DEVICE_ID_CIRRUS_5436 0x00ac
+#define PCI_DEVICE_ID_CIRRUS_5446 0x00b8
+#define PCI_DEVICE_ID_CIRRUS_5480 0x00bc
+#define PCI_DEVICE_ID_CIRRUS_5462 0x00d0
+#define PCI_DEVICE_ID_CIRRUS_5464 0x00d4
+#define PCI_DEVICE_ID_CIRRUS_5465 0x00d6
+#define PCI_DEVICE_ID_CIRRUS_6729 0x1100
+#define PCI_DEVICE_ID_CIRRUS_6832 0x1110
+#define PCI_DEVICE_ID_CIRRUS_7543 0x1202
+#define PCI_DEVICE_ID_CIRRUS_4610 0x6001
+#define PCI_DEVICE_ID_CIRRUS_4612 0x6003
+#define PCI_DEVICE_ID_CIRRUS_4615 0x6004
+
+#define PCI_VENDOR_ID_IBM 0x1014
+#define PCI_DEVICE_ID_IBM_TR 0x0018
+#define PCI_DEVICE_ID_IBM_TR_WAKE 0x003e
+#define PCI_DEVICE_ID_IBM_CPC710_PCI64 0x00fc
+#define PCI_DEVICE_ID_IBM_SNIPE 0x0180
+#define PCI_DEVICE_ID_IBM_CITRINE 0x028C
+#define PCI_DEVICE_ID_IBM_GEMSTONE 0xB166
+#define PCI_DEVICE_ID_IBM_OBSIDIAN 0x02BD
+#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1 0x0031
+#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2 0x0219
+#define PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX 0x021A
+#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM 0x0251
+#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361
+#define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252
+
+#define PCI_SUBVENDOR_ID_IBM 0x1014
+#define PCI_SUBDEVICE_ID_IBM_SATURN_SERIAL_ONE_PORT 0x03d4
+
+#define PCI_VENDOR_ID_UNISYS 0x1018
+#define PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR 0x001C
+
+#define PCI_VENDOR_ID_COMPEX2 0x101a /* pci.ids says "AT&T GIS (NCR)" */
+#define PCI_DEVICE_ID_COMPEX2_100VG 0x0005
+
+#define PCI_VENDOR_ID_WD 0x101c
+#define PCI_DEVICE_ID_WD_90C 0xc24a
+
+#define PCI_VENDOR_ID_AMI 0x101e
+#define PCI_DEVICE_ID_AMI_MEGARAID3 0x1960
+#define PCI_DEVICE_ID_AMI_MEGARAID 0x9010
+#define PCI_DEVICE_ID_AMI_MEGARAID2 0x9060
+
+#define PCI_VENDOR_ID_AMD 0x1022
+#define PCI_DEVICE_ID_AMD_K8_NB 0x1100
+#define PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP 0x1101
+#define PCI_DEVICE_ID_AMD_K8_NB_MEMCTL 0x1102
+#define PCI_DEVICE_ID_AMD_K8_NB_MISC 0x1103
+#define PCI_DEVICE_ID_AMD_10H_NB_HT 0x1200
+#define PCI_DEVICE_ID_AMD_10H_NB_MAP 0x1201
+#define PCI_DEVICE_ID_AMD_10H_NB_DRAM 0x1202
+#define PCI_DEVICE_ID_AMD_10H_NB_MISC 0x1203
+#define PCI_DEVICE_ID_AMD_10H_NB_LINK 0x1204
+#define PCI_DEVICE_ID_AMD_11H_NB_HT 0x1300
+#define PCI_DEVICE_ID_AMD_11H_NB_MAP 0x1301
+#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302
+#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
+#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
+#define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403
+#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d
+#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e
+#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F3 0x1573
+#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F4 0x1574
+#define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600
+#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
+#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
+#define PCI_DEVICE_ID_AMD_15H_NB_F3 0x1603
+#define PCI_DEVICE_ID_AMD_15H_NB_F4 0x1604
+#define PCI_DEVICE_ID_AMD_15H_NB_F5 0x1605
+#define PCI_DEVICE_ID_AMD_16H_NB_F3 0x1533
+#define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534
+#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F3 0x1583
+#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F4 0x1584
+#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
+#define PCI_DEVICE_ID_AMD_LANCE 0x2000
+#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
+#define PCI_DEVICE_ID_AMD_SCSI 0x2020
+#define PCI_DEVICE_ID_AMD_SERENADE 0x36c0
+#define PCI_DEVICE_ID_AMD_FE_GATE_7006 0x7006
+#define PCI_DEVICE_ID_AMD_FE_GATE_7007 0x7007
+#define PCI_DEVICE_ID_AMD_FE_GATE_700C 0x700C
+#define PCI_DEVICE_ID_AMD_FE_GATE_700E 0x700E
+#define PCI_DEVICE_ID_AMD_COBRA_7401 0x7401
+#define PCI_DEVICE_ID_AMD_VIPER_7409 0x7409
+#define PCI_DEVICE_ID_AMD_VIPER_740B 0x740B
+#define PCI_DEVICE_ID_AMD_VIPER_7410 0x7410
+#define PCI_DEVICE_ID_AMD_VIPER_7411 0x7411
+#define PCI_DEVICE_ID_AMD_VIPER_7413 0x7413
+#define PCI_DEVICE_ID_AMD_VIPER_7440 0x7440
+#define PCI_DEVICE_ID_AMD_OPUS_7441 0x7441
+#define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443
+#define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443
+#define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445
+#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460
+#define PCI_DEVICE_ID_AMD_8111_LPC 0x7468
+#define PCI_DEVICE_ID_AMD_8111_IDE 0x7469
+#define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a
+#define PCI_DEVICE_ID_AMD_8111_SMBUS 0x746b
+#define PCI_DEVICE_ID_AMD_8111_AUDIO 0x746d
+#define PCI_DEVICE_ID_AMD_8151_0 0x7454
+#define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450
+#define PCI_DEVICE_ID_AMD_8131_APIC 0x7451
+#define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458
+#define PCI_DEVICE_ID_AMD_NL_USB 0x7912
+#define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F
+#define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090
+#define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091
+#define PCI_DEVICE_ID_AMD_CS5536_AUDIO 0x2093
+#define PCI_DEVICE_ID_AMD_CS5536_OHC 0x2094
+#define PCI_DEVICE_ID_AMD_CS5536_EHC 0x2095
+#define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096
+#define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097
+#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A
+#define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081
+#define PCI_DEVICE_ID_AMD_LX_AES 0x2082
+#define PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE 0x7800
+#define PCI_DEVICE_ID_AMD_HUDSON2_SMBUS 0x780b
+#define PCI_DEVICE_ID_AMD_HUDSON2_IDE 0x780c
+
+#define PCI_VENDOR_ID_TRIDENT 0x1023
+#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000
+#define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX 0x2001
+#define PCI_DEVICE_ID_TRIDENT_9320 0x9320
+#define PCI_DEVICE_ID_TRIDENT_9388 0x9388
+#define PCI_DEVICE_ID_TRIDENT_9397 0x9397
+#define PCI_DEVICE_ID_TRIDENT_939A 0x939A
+#define PCI_DEVICE_ID_TRIDENT_9520 0x9520
+#define PCI_DEVICE_ID_TRIDENT_9525 0x9525
+#define PCI_DEVICE_ID_TRIDENT_9420 0x9420
+#define PCI_DEVICE_ID_TRIDENT_9440 0x9440
+#define PCI_DEVICE_ID_TRIDENT_9660 0x9660
+#define PCI_DEVICE_ID_TRIDENT_9750 0x9750
+#define PCI_DEVICE_ID_TRIDENT_9850 0x9850
+#define PCI_DEVICE_ID_TRIDENT_9880 0x9880
+#define PCI_DEVICE_ID_TRIDENT_8400 0x8400
+#define PCI_DEVICE_ID_TRIDENT_8420 0x8420
+#define PCI_DEVICE_ID_TRIDENT_8500 0x8500
+
+#define PCI_VENDOR_ID_AI 0x1025
+#define PCI_DEVICE_ID_AI_M1435 0x1435
+
+#define PCI_VENDOR_ID_DELL 0x1028
+#define PCI_DEVICE_ID_DELL_RACIII 0x0008
+#define PCI_DEVICE_ID_DELL_RAC4 0x0012
+#define PCI_DEVICE_ID_DELL_PERC5 0x0015
+
+#define PCI_VENDOR_ID_MATROX 0x102B
+#define PCI_DEVICE_ID_MATROX_MGA_2 0x0518
+#define PCI_DEVICE_ID_MATROX_MIL 0x0519
+#define PCI_DEVICE_ID_MATROX_MYS 0x051A
+#define PCI_DEVICE_ID_MATROX_MIL_2 0x051b
+#define PCI_DEVICE_ID_MATROX_MYS_AGP 0x051e
+#define PCI_DEVICE_ID_MATROX_MIL_2_AGP 0x051f
+#define PCI_DEVICE_ID_MATROX_MGA_IMP 0x0d10
+#define PCI_DEVICE_ID_MATROX_G100_MM 0x1000
+#define PCI_DEVICE_ID_MATROX_G100_AGP 0x1001
+#define PCI_DEVICE_ID_MATROX_G200_PCI 0x0520
+#define PCI_DEVICE_ID_MATROX_G200_AGP 0x0521
+#define PCI_DEVICE_ID_MATROX_G400 0x0525
+#define PCI_DEVICE_ID_MATROX_G200EV_PCI 0x0530
+#define PCI_DEVICE_ID_MATROX_G550 0x2527
+#define PCI_DEVICE_ID_MATROX_VIA 0x4536
+
+#define PCI_VENDOR_ID_MOBILITY_ELECTRONICS 0x14f2
+
+#define PCI_VENDOR_ID_CT 0x102c
+#define PCI_DEVICE_ID_CT_69000 0x00c0
+#define PCI_DEVICE_ID_CT_65545 0x00d8
+#define PCI_DEVICE_ID_CT_65548 0x00dc
+#define PCI_DEVICE_ID_CT_65550 0x00e0
+#define PCI_DEVICE_ID_CT_65554 0x00e4
+#define PCI_DEVICE_ID_CT_65555 0x00e5
+
+#define PCI_VENDOR_ID_MIRO 0x1031
+#define PCI_DEVICE_ID_MIRO_36050 0x5601
+#define PCI_DEVICE_ID_MIRO_DC10PLUS 0x7efe
+#define PCI_DEVICE_ID_MIRO_DC30PLUS 0xd801
+
+#define PCI_VENDOR_ID_NEC 0x1033
+#define PCI_DEVICE_ID_NEC_CBUS_1 0x0001 /* PCI-Cbus Bridge */
+#define PCI_DEVICE_ID_NEC_LOCAL 0x0002 /* Local Bridge */
+#define PCI_DEVICE_ID_NEC_ATM 0x0003 /* ATM LAN Controller */
+#define PCI_DEVICE_ID_NEC_R4000 0x0004 /* R4000 Bridge */
+#define PCI_DEVICE_ID_NEC_486 0x0005 /* 486 Like Peripheral Bus Bridge */
+#define PCI_DEVICE_ID_NEC_ACCEL_1 0x0006 /* Graphic Accelerator */
+#define PCI_DEVICE_ID_NEC_UXBUS 0x0007 /* UX-Bus Bridge */
+#define PCI_DEVICE_ID_NEC_ACCEL_2 0x0008 /* Graphic Accelerator */
+#define PCI_DEVICE_ID_NEC_GRAPH 0x0009 /* PCI-CoreGraph Bridge */
+#define PCI_DEVICE_ID_NEC_VL 0x0016 /* PCI-VL Bridge */
+#define PCI_DEVICE_ID_NEC_STARALPHA2 0x002c /* STAR ALPHA2 */
+#define PCI_DEVICE_ID_NEC_CBUS_2 0x002d /* PCI-Cbus Bridge */
+#define PCI_DEVICE_ID_NEC_USB 0x0035 /* PCI-USB Host */
+#define PCI_DEVICE_ID_NEC_CBUS_3 0x003b
+#define PCI_DEVICE_ID_NEC_NAPCCARD 0x003e
+#define PCI_DEVICE_ID_NEC_PCX2 0x0046 /* PowerVR */
+#define PCI_DEVICE_ID_NEC_VRC5476 0x009b
+#define PCI_DEVICE_ID_NEC_VRC4173 0x00a5
+#define PCI_DEVICE_ID_NEC_VRC5477_AC97 0x00a6
+#define PCI_DEVICE_ID_NEC_PC9821CS01 0x800c /* PC-9821-CS01 */
+#define PCI_DEVICE_ID_NEC_PC9821NRB06 0x800d /* PC-9821NR-B06 */
+
+#define PCI_VENDOR_ID_FD 0x1036
+#define PCI_DEVICE_ID_FD_36C70 0x0000
+
+#define PCI_VENDOR_ID_SI 0x1039
+#define PCI_DEVICE_ID_SI_5591_AGP 0x0001
+#define PCI_DEVICE_ID_SI_6202 0x0002
+#define PCI_DEVICE_ID_SI_503 0x0008
+#define PCI_DEVICE_ID_SI_ACPI 0x0009
+#define PCI_DEVICE_ID_SI_SMBUS 0x0016
+#define PCI_DEVICE_ID_SI_LPC 0x0018
+#define PCI_DEVICE_ID_SI_5597_VGA 0x0200
+#define PCI_DEVICE_ID_SI_6205 0x0205
+#define PCI_DEVICE_ID_SI_501 0x0406
+#define PCI_DEVICE_ID_SI_496 0x0496
+#define PCI_DEVICE_ID_SI_300 0x0300
+#define PCI_DEVICE_ID_SI_315H 0x0310
+#define PCI_DEVICE_ID_SI_315 0x0315
+#define PCI_DEVICE_ID_SI_315PRO 0x0325
+#define PCI_DEVICE_ID_SI_530 0x0530
+#define PCI_DEVICE_ID_SI_540 0x0540
+#define PCI_DEVICE_ID_SI_550 0x0550
+#define PCI_DEVICE_ID_SI_540_VGA 0x5300
+#define PCI_DEVICE_ID_SI_550_VGA 0x5315
+#define PCI_DEVICE_ID_SI_620 0x0620
+#define PCI_DEVICE_ID_SI_630 0x0630
+#define PCI_DEVICE_ID_SI_633 0x0633
+#define PCI_DEVICE_ID_SI_635 0x0635
+#define PCI_DEVICE_ID_SI_640 0x0640
+#define PCI_DEVICE_ID_SI_645 0x0645
+#define PCI_DEVICE_ID_SI_646 0x0646
+#define PCI_DEVICE_ID_SI_648 0x0648
+#define PCI_DEVICE_ID_SI_650 0x0650
+#define PCI_DEVICE_ID_SI_651 0x0651
+#define PCI_DEVICE_ID_SI_655 0x0655
+#define PCI_DEVICE_ID_SI_661 0x0661
+#define PCI_DEVICE_ID_SI_730 0x0730
+#define PCI_DEVICE_ID_SI_733 0x0733
+#define PCI_DEVICE_ID_SI_630_VGA 0x6300
+#define PCI_DEVICE_ID_SI_735 0x0735
+#define PCI_DEVICE_ID_SI_740 0x0740
+#define PCI_DEVICE_ID_SI_741 0x0741
+#define PCI_DEVICE_ID_SI_745 0x0745
+#define PCI_DEVICE_ID_SI_746 0x0746
+#define PCI_DEVICE_ID_SI_755 0x0755
+#define PCI_DEVICE_ID_SI_760 0x0760
+#define PCI_DEVICE_ID_SI_900 0x0900
+#define PCI_DEVICE_ID_SI_961 0x0961
+#define PCI_DEVICE_ID_SI_962 0x0962
+#define PCI_DEVICE_ID_SI_963 0x0963
+#define PCI_DEVICE_ID_SI_965 0x0965
+#define PCI_DEVICE_ID_SI_966 0x0966
+#define PCI_DEVICE_ID_SI_968 0x0968
+#define PCI_DEVICE_ID_SI_1180 0x1180
+#define PCI_DEVICE_ID_SI_5511 0x5511
+#define PCI_DEVICE_ID_SI_5513 0x5513
+#define PCI_DEVICE_ID_SI_5517 0x5517
+#define PCI_DEVICE_ID_SI_5518 0x5518
+#define PCI_DEVICE_ID_SI_5571 0x5571
+#define PCI_DEVICE_ID_SI_5581 0x5581
+#define PCI_DEVICE_ID_SI_5582 0x5582
+#define PCI_DEVICE_ID_SI_5591 0x5591
+#define PCI_DEVICE_ID_SI_5596 0x5596
+#define PCI_DEVICE_ID_SI_5597 0x5597
+#define PCI_DEVICE_ID_SI_5598 0x5598
+#define PCI_DEVICE_ID_SI_5600 0x5600
+#define PCI_DEVICE_ID_SI_7012 0x7012
+#define PCI_DEVICE_ID_SI_7013 0x7013
+#define PCI_DEVICE_ID_SI_7016 0x7016
+#define PCI_DEVICE_ID_SI_7018 0x7018
+
+#define PCI_VENDOR_ID_HP 0x103c
+#define PCI_VENDOR_ID_HP_3PAR 0x1590
+#define PCI_DEVICE_ID_HP_VISUALIZE_EG 0x1005
+#define PCI_DEVICE_ID_HP_VISUALIZE_FX6 0x1006
+#define PCI_DEVICE_ID_HP_VISUALIZE_FX4 0x1008
+#define PCI_DEVICE_ID_HP_VISUALIZE_FX2 0x100a
+#define PCI_DEVICE_ID_HP_TACHYON 0x1028
+#define PCI_DEVICE_ID_HP_TACHLITE 0x1029
+#define PCI_DEVICE_ID_HP_J2585A 0x1030
+#define PCI_DEVICE_ID_HP_J2585B 0x1031
+#define PCI_DEVICE_ID_HP_J2973A 0x1040
+#define PCI_DEVICE_ID_HP_J2970A 0x1042
+#define PCI_DEVICE_ID_HP_DIVA 0x1048
+#define PCI_DEVICE_ID_HP_DIVA_TOSCA1 0x1049
+#define PCI_DEVICE_ID_HP_DIVA_TOSCA2 0x104A
+#define PCI_DEVICE_ID_HP_DIVA_MAESTRO 0x104B
+#define PCI_DEVICE_ID_HP_REO_IOC 0x10f1
+#define PCI_DEVICE_ID_HP_VISUALIZE_FXE 0x108b
+#define PCI_DEVICE_ID_HP_DIVA_HALFDOME 0x1223
+#define PCI_DEVICE_ID_HP_DIVA_KEYSTONE 0x1226
+#define PCI_DEVICE_ID_HP_DIVA_POWERBAR 0x1227
+#define PCI_DEVICE_ID_HP_ZX1_IOC 0x122a
+#define PCI_DEVICE_ID_HP_PCIX_LBA 0x122e
+#define PCI_DEVICE_ID_HP_SX1000_IOC 0x127c
+#define PCI_DEVICE_ID_HP_DIVA_EVEREST 0x1282
+#define PCI_DEVICE_ID_HP_DIVA_AUX 0x1290
+#define PCI_DEVICE_ID_HP_DIVA_RMP3 0x1301
+#define PCI_DEVICE_ID_HP_DIVA_HURRICANE 0x132a
+#define PCI_DEVICE_ID_HP_CISSA 0x3220
+#define PCI_DEVICE_ID_HP_CISSC 0x3230
+#define PCI_DEVICE_ID_HP_CISSD 0x3238
+#define PCI_DEVICE_ID_HP_CISSE 0x323a
+#define PCI_DEVICE_ID_HP_CISSF 0x323b
+#define PCI_DEVICE_ID_HP_CISSH 0x323c
+#define PCI_DEVICE_ID_HP_CISSI 0x3239
+#define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031
+
+#define PCI_VENDOR_ID_PCTECH 0x1042
+#define PCI_DEVICE_ID_PCTECH_RZ1000 0x1000
+#define PCI_DEVICE_ID_PCTECH_RZ1001 0x1001
+#define PCI_DEVICE_ID_PCTECH_SAMURAI_IDE 0x3020
+
+#define PCI_VENDOR_ID_ASUSTEK 0x1043
+#define PCI_DEVICE_ID_ASUSTEK_0675 0x0675
+
+#define PCI_VENDOR_ID_DPT 0x1044
+#define PCI_DEVICE_ID_DPT 0xa400
+
+#define PCI_VENDOR_ID_OPTI 0x1045
+#define PCI_DEVICE_ID_OPTI_82C558 0xc558
+#define PCI_DEVICE_ID_OPTI_82C621 0xc621
+#define PCI_DEVICE_ID_OPTI_82C700 0xc700
+#define PCI_DEVICE_ID_OPTI_82C825 0xd568
+
+#define PCI_VENDOR_ID_ELSA 0x1048
+#define PCI_DEVICE_ID_ELSA_MICROLINK 0x1000
+#define PCI_DEVICE_ID_ELSA_QS3000 0x3000
+
+#define PCI_VENDOR_ID_STMICRO 0x104A
+#define PCI_DEVICE_ID_STMICRO_USB_HOST 0xCC00
+#define PCI_DEVICE_ID_STMICRO_USB_OHCI 0xCC01
+#define PCI_DEVICE_ID_STMICRO_USB_OTG 0xCC02
+#define PCI_DEVICE_ID_STMICRO_UART_HWFC 0xCC03
+#define PCI_DEVICE_ID_STMICRO_UART_NO_HWFC 0xCC04
+#define PCI_DEVICE_ID_STMICRO_SOC_DMA 0xCC05
+#define PCI_DEVICE_ID_STMICRO_SATA 0xCC06
+#define PCI_DEVICE_ID_STMICRO_I2C 0xCC07
+#define PCI_DEVICE_ID_STMICRO_SPI_HS 0xCC08
+#define PCI_DEVICE_ID_STMICRO_MAC 0xCC09
+#define PCI_DEVICE_ID_STMICRO_SDIO_EMMC 0xCC0A
+#define PCI_DEVICE_ID_STMICRO_SDIO 0xCC0B
+#define PCI_DEVICE_ID_STMICRO_GPIO 0xCC0C
+#define PCI_DEVICE_ID_STMICRO_VIP 0xCC0D
+#define PCI_DEVICE_ID_STMICRO_AUDIO_ROUTER_DMA 0xCC0E
+#define PCI_DEVICE_ID_STMICRO_AUDIO_ROUTER_SRCS 0xCC0F
+#define PCI_DEVICE_ID_STMICRO_AUDIO_ROUTER_MSPS 0xCC10
+#define PCI_DEVICE_ID_STMICRO_CAN 0xCC11
+#define PCI_DEVICE_ID_STMICRO_MLB 0xCC12
+#define PCI_DEVICE_ID_STMICRO_DBP 0xCC13
+#define PCI_DEVICE_ID_STMICRO_SATA_PHY 0xCC14
+#define PCI_DEVICE_ID_STMICRO_ESRAM 0xCC15
+#define PCI_DEVICE_ID_STMICRO_VIC 0xCC16
+
+#define PCI_VENDOR_ID_BUSLOGIC 0x104B
+#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140
+#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040
+#define PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT 0x8130
+
+#define PCI_VENDOR_ID_TI 0x104c
+#define PCI_DEVICE_ID_TI_TVP4020 0x3d07
+#define PCI_DEVICE_ID_TI_4450 0x8011
+#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031
+#define PCI_DEVICE_ID_TI_XX21_XX11_FM 0x8033
+#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034
+#define PCI_DEVICE_ID_TI_X515 0x8036
+#define PCI_DEVICE_ID_TI_XX12 0x8039
+#define PCI_DEVICE_ID_TI_XX12_FM 0x803b
+#define PCI_DEVICE_ID_TI_XIO2000A 0x8231
+#define PCI_DEVICE_ID_TI_1130 0xac12
+#define PCI_DEVICE_ID_TI_1031 0xac13
+#define PCI_DEVICE_ID_TI_1131 0xac15
+#define PCI_DEVICE_ID_TI_1250 0xac16
+#define PCI_DEVICE_ID_TI_1220 0xac17
+#define PCI_DEVICE_ID_TI_1221 0xac19
+#define PCI_DEVICE_ID_TI_1210 0xac1a
+#define PCI_DEVICE_ID_TI_1450 0xac1b
+#define PCI_DEVICE_ID_TI_1225 0xac1c
+#define PCI_DEVICE_ID_TI_1251A 0xac1d
+#define PCI_DEVICE_ID_TI_1211 0xac1e
+#define PCI_DEVICE_ID_TI_1251B 0xac1f
+#define PCI_DEVICE_ID_TI_4410 0xac41
+#define PCI_DEVICE_ID_TI_4451 0xac42
+#define PCI_DEVICE_ID_TI_4510 0xac44
+#define PCI_DEVICE_ID_TI_4520 0xac46
+#define PCI_DEVICE_ID_TI_7510 0xac47
+#define PCI_DEVICE_ID_TI_7610 0xac48
+#define PCI_DEVICE_ID_TI_7410 0xac49
+#define PCI_DEVICE_ID_TI_1410 0xac50
+#define PCI_DEVICE_ID_TI_1420 0xac51
+#define PCI_DEVICE_ID_TI_1451A 0xac52
+#define PCI_DEVICE_ID_TI_1620 0xac54
+#define PCI_DEVICE_ID_TI_1520 0xac55
+#define PCI_DEVICE_ID_TI_1510 0xac56
+#define PCI_DEVICE_ID_TI_X620 0xac8d
+#define PCI_DEVICE_ID_TI_X420 0xac8e
+#define PCI_DEVICE_ID_TI_XX20_FM 0xac8f
+
+#define PCI_VENDOR_ID_SONY 0x104d
+
+/* Winbond have two vendor IDs! See 0x10ad as well */
+#define PCI_VENDOR_ID_WINBOND2 0x1050
+#define PCI_DEVICE_ID_WINBOND2_89C940F 0x5a5a
+#define PCI_DEVICE_ID_WINBOND2_6692 0x6692
+
+#define PCI_VENDOR_ID_ANIGMA 0x1051
+#define PCI_DEVICE_ID_ANIGMA_MC145575 0x0100
+
+#define PCI_VENDOR_ID_EFAR 0x1055
+#define PCI_DEVICE_ID_EFAR_SLC90E66_1 0x9130
+#define PCI_DEVICE_ID_EFAR_SLC90E66_3 0x9463
+
+#define PCI_VENDOR_ID_MOTOROLA 0x1057
+#define PCI_DEVICE_ID_MOTOROLA_MPC105 0x0001
+#define PCI_DEVICE_ID_MOTOROLA_MPC106 0x0002
+#define PCI_DEVICE_ID_MOTOROLA_MPC107 0x0004
+#define PCI_DEVICE_ID_MOTOROLA_RAVEN 0x4801
+#define PCI_DEVICE_ID_MOTOROLA_FALCON 0x4802
+#define PCI_DEVICE_ID_MOTOROLA_HAWK 0x4803
+#define PCI_DEVICE_ID_MOTOROLA_HARRIER 0x480b
+#define PCI_DEVICE_ID_MOTOROLA_MPC5200 0x5803
+#define PCI_DEVICE_ID_MOTOROLA_MPC5200B 0x5809
+
+#define PCI_VENDOR_ID_PROMISE 0x105a
+#define PCI_DEVICE_ID_PROMISE_20265 0x0d30
+#define PCI_DEVICE_ID_PROMISE_20267 0x4d30
+#define PCI_DEVICE_ID_PROMISE_20246 0x4d33
+#define PCI_DEVICE_ID_PROMISE_20262 0x4d38
+#define PCI_DEVICE_ID_PROMISE_20263 0x0D38
+#define PCI_DEVICE_ID_PROMISE_20268 0x4d68
+#define PCI_DEVICE_ID_PROMISE_20269 0x4d69
+#define PCI_DEVICE_ID_PROMISE_20270 0x6268
+#define PCI_DEVICE_ID_PROMISE_20271 0x6269
+#define PCI_DEVICE_ID_PROMISE_20275 0x1275
+#define PCI_DEVICE_ID_PROMISE_20276 0x5275
+#define PCI_DEVICE_ID_PROMISE_20277 0x7275
+
+#define PCI_VENDOR_ID_FOXCONN 0x105b
+
+#define PCI_VENDOR_ID_UMC 0x1060
+#define PCI_DEVICE_ID_UMC_UM8673F 0x0101
+#define PCI_DEVICE_ID_UMC_UM8886BF 0x673a
+#define PCI_DEVICE_ID_UMC_UM8886A 0x886a
+
+#define PCI_VENDOR_ID_PICOPOWER 0x1066
+#define PCI_DEVICE_ID_PICOPOWER_PT86C523 0x0002
+#define PCI_DEVICE_ID_PICOPOWER_PT86C523BBP 0x8002
+
+#define PCI_VENDOR_ID_MYLEX 0x1069
+#define PCI_DEVICE_ID_MYLEX_DAC960_P 0x0001
+#define PCI_DEVICE_ID_MYLEX_DAC960_PD 0x0002
+#define PCI_DEVICE_ID_MYLEX_DAC960_PG 0x0010
+#define PCI_DEVICE_ID_MYLEX_DAC960_LA 0x0020
+#define PCI_DEVICE_ID_MYLEX_DAC960_LP 0x0050
+#define PCI_DEVICE_ID_MYLEX_DAC960_BA 0xBA56
+#define PCI_DEVICE_ID_MYLEX_DAC960_GEM 0xB166
+
+#define PCI_VENDOR_ID_APPLE 0x106b
+#define PCI_DEVICE_ID_APPLE_BANDIT 0x0001
+#define PCI_DEVICE_ID_APPLE_HYDRA 0x000e
+#define PCI_DEVICE_ID_APPLE_UNI_N_FW 0x0018
+#define PCI_DEVICE_ID_APPLE_UNI_N_AGP 0x0020
+#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC 0x0021
+#define PCI_DEVICE_ID_APPLE_UNI_N_GMACP 0x0024
+#define PCI_DEVICE_ID_APPLE_UNI_N_AGP_P 0x0027
+#define PCI_DEVICE_ID_APPLE_UNI_N_AGP15 0x002d
+#define PCI_DEVICE_ID_APPLE_UNI_N_PCI15 0x002e
+#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC2 0x0032
+#define PCI_DEVICE_ID_APPLE_UNI_N_ATA 0x0033
+#define PCI_DEVICE_ID_APPLE_UNI_N_AGP2 0x0034
+#define PCI_DEVICE_ID_APPLE_IPID_ATA100 0x003b
+#define PCI_DEVICE_ID_APPLE_K2_ATA100 0x0043
+#define PCI_DEVICE_ID_APPLE_U3_AGP 0x004b
+#define PCI_DEVICE_ID_APPLE_K2_GMAC 0x004c
+#define PCI_DEVICE_ID_APPLE_SH_ATA 0x0050
+#define PCI_DEVICE_ID_APPLE_SH_SUNGEM 0x0051
+#define PCI_DEVICE_ID_APPLE_U3L_AGP 0x0058
+#define PCI_DEVICE_ID_APPLE_U3H_AGP 0x0059
+#define PCI_DEVICE_ID_APPLE_U4_PCIE 0x005b
+#define PCI_DEVICE_ID_APPLE_IPID2_AGP 0x0066
+#define PCI_DEVICE_ID_APPLE_IPID2_ATA 0x0069
+#define PCI_DEVICE_ID_APPLE_IPID2_FW 0x006a
+#define PCI_DEVICE_ID_APPLE_IPID2_GMAC 0x006b
+#define PCI_DEVICE_ID_APPLE_TIGON3 0x1645
+
+#define PCI_VENDOR_ID_YAMAHA 0x1073
+#define PCI_DEVICE_ID_YAMAHA_724 0x0004
+#define PCI_DEVICE_ID_YAMAHA_724F 0x000d
+#define PCI_DEVICE_ID_YAMAHA_740 0x000a
+#define PCI_DEVICE_ID_YAMAHA_740C 0x000c
+#define PCI_DEVICE_ID_YAMAHA_744 0x0010
+#define PCI_DEVICE_ID_YAMAHA_754 0x0012
+
+#define PCI_VENDOR_ID_QLOGIC 0x1077
+#define PCI_DEVICE_ID_QLOGIC_ISP10160 0x1016
+#define PCI_DEVICE_ID_QLOGIC_ISP1020 0x1020
+#define PCI_DEVICE_ID_QLOGIC_ISP1080 0x1080
+#define PCI_DEVICE_ID_QLOGIC_ISP12160 0x1216
+#define PCI_DEVICE_ID_QLOGIC_ISP1240 0x1240
+#define PCI_DEVICE_ID_QLOGIC_ISP1280 0x1280
+#define PCI_DEVICE_ID_QLOGIC_ISP2100 0x2100
+#define PCI_DEVICE_ID_QLOGIC_ISP2200 0x2200
+#define PCI_DEVICE_ID_QLOGIC_ISP2300 0x2300
+#define PCI_DEVICE_ID_QLOGIC_ISP2312 0x2312
+#define PCI_DEVICE_ID_QLOGIC_ISP2322 0x2322
+#define PCI_DEVICE_ID_QLOGIC_ISP6312 0x6312
+#define PCI_DEVICE_ID_QLOGIC_ISP6322 0x6322
+#define PCI_DEVICE_ID_QLOGIC_ISP2422 0x2422
+#define PCI_DEVICE_ID_QLOGIC_ISP2432 0x2432
+#define PCI_DEVICE_ID_QLOGIC_ISP2512 0x2512
+#define PCI_DEVICE_ID_QLOGIC_ISP2522 0x2522
+#define PCI_DEVICE_ID_QLOGIC_ISP5422 0x5422
+#define PCI_DEVICE_ID_QLOGIC_ISP5432 0x5432
+
+#define PCI_VENDOR_ID_CYRIX 0x1078
+#define PCI_DEVICE_ID_CYRIX_5510 0x0000
+#define PCI_DEVICE_ID_CYRIX_PCI_MASTER 0x0001
+#define PCI_DEVICE_ID_CYRIX_5520 0x0002
+#define PCI_DEVICE_ID_CYRIX_5530_LEGACY 0x0100
+#define PCI_DEVICE_ID_CYRIX_5530_IDE 0x0102
+#define PCI_DEVICE_ID_CYRIX_5530_AUDIO 0x0103
+#define PCI_DEVICE_ID_CYRIX_5530_VIDEO 0x0104
+
+#define PCI_VENDOR_ID_CONTAQ 0x1080
+#define PCI_DEVICE_ID_CONTAQ_82C693 0xc693
+
+#define PCI_VENDOR_ID_OLICOM 0x108d
+#define PCI_DEVICE_ID_OLICOM_OC2325 0x0012
+#define PCI_DEVICE_ID_OLICOM_OC2183 0x0013
+#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
+
+#define PCI_VENDOR_ID_SUN 0x108e
+#define PCI_DEVICE_ID_SUN_EBUS 0x1000
+#define PCI_DEVICE_ID_SUN_HAPPYMEAL 0x1001
+#define PCI_DEVICE_ID_SUN_RIO_EBUS 0x1100
+#define PCI_DEVICE_ID_SUN_RIO_GEM 0x1101
+#define PCI_DEVICE_ID_SUN_RIO_1394 0x1102
+#define PCI_DEVICE_ID_SUN_RIO_USB 0x1103
+#define PCI_DEVICE_ID_SUN_GEM 0x2bad
+#define PCI_DEVICE_ID_SUN_SIMBA 0x5000
+#define PCI_DEVICE_ID_SUN_PBM 0x8000
+#define PCI_DEVICE_ID_SUN_SCHIZO 0x8001
+#define PCI_DEVICE_ID_SUN_SABRE 0xa000
+#define PCI_DEVICE_ID_SUN_HUMMINGBIRD 0xa001
+#define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801
+#define PCI_DEVICE_ID_SUN_CASSINI 0xabba
+
+#define PCI_VENDOR_ID_NI 0x1093
+#define PCI_DEVICE_ID_NI_PCI2322 0xd130
+#define PCI_DEVICE_ID_NI_PCI2324 0xd140
+#define PCI_DEVICE_ID_NI_PCI2328 0xd150
+#define PCI_DEVICE_ID_NI_PXI8422_2322 0xd190
+#define PCI_DEVICE_ID_NI_PXI8422_2324 0xd1a0
+#define PCI_DEVICE_ID_NI_PXI8420_2322 0xd1d0
+#define PCI_DEVICE_ID_NI_PXI8420_2324 0xd1e0
+#define PCI_DEVICE_ID_NI_PXI8420_2328 0xd1f0
+#define PCI_DEVICE_ID_NI_PXI8420_23216 0xd1f1
+#define PCI_DEVICE_ID_NI_PCI2322I 0xd250
+#define PCI_DEVICE_ID_NI_PCI2324I 0xd270
+#define PCI_DEVICE_ID_NI_PCI23216 0xd2b0
+#define PCI_DEVICE_ID_NI_PXI8430_2322 0x7080
+#define PCI_DEVICE_ID_NI_PCI8430_2322 0x70db
+#define PCI_DEVICE_ID_NI_PXI8430_2324 0x70dd
+#define PCI_DEVICE_ID_NI_PCI8430_2324 0x70df
+#define PCI_DEVICE_ID_NI_PXI8430_2328 0x70e2
+#define PCI_DEVICE_ID_NI_PCI8430_2328 0x70e4
+#define PCI_DEVICE_ID_NI_PXI8430_23216 0x70e6
+#define PCI_DEVICE_ID_NI_PCI8430_23216 0x70e7
+#define PCI_DEVICE_ID_NI_PXI8432_2322 0x70e8
+#define PCI_DEVICE_ID_NI_PCI8432_2322 0x70ea
+#define PCI_DEVICE_ID_NI_PXI8432_2324 0x70ec
+#define PCI_DEVICE_ID_NI_PCI8432_2324 0x70ee
+
+#define PCI_VENDOR_ID_CMD 0x1095
+#define PCI_DEVICE_ID_CMD_643 0x0643
+#define PCI_DEVICE_ID_CMD_646 0x0646
+#define PCI_DEVICE_ID_CMD_648 0x0648
+#define PCI_DEVICE_ID_CMD_649 0x0649
+
+#define PCI_DEVICE_ID_SII_680 0x0680
+#define PCI_DEVICE_ID_SII_3112 0x3112
+#define PCI_DEVICE_ID_SII_1210SA 0x0240
+
+#define PCI_VENDOR_ID_BROOKTREE 0x109e
+#define PCI_DEVICE_ID_BROOKTREE_878 0x0878
+#define PCI_DEVICE_ID_BROOKTREE_879 0x0879
+
+#define PCI_VENDOR_ID_SGI 0x10a9
+#define PCI_DEVICE_ID_SGI_IOC3 0x0003
+#define PCI_DEVICE_ID_SGI_LITHIUM 0x1002
+#define PCI_DEVICE_ID_SGI_IOC4 0x100a
+
+#define PCI_VENDOR_ID_WINBOND 0x10ad
+#define PCI_DEVICE_ID_WINBOND_82C105 0x0105
+#define PCI_DEVICE_ID_WINBOND_83C553 0x0565
+
+#define PCI_VENDOR_ID_PLX 0x10b5
+#define PCI_DEVICE_ID_PLX_R685 0x1030
+#define PCI_DEVICE_ID_PLX_ROMULUS 0x106a
+#define PCI_DEVICE_ID_PLX_SPCOM800 0x1076
+#define PCI_DEVICE_ID_PLX_1077 0x1077
+#define PCI_DEVICE_ID_PLX_SPCOM200 0x1103
+#define PCI_DEVICE_ID_PLX_DJINN_ITOO 0x1151
+#define PCI_DEVICE_ID_PLX_R753 0x1152
+#define PCI_DEVICE_ID_PLX_OLITEC 0x1187
+#define PCI_DEVICE_ID_PLX_PCI200SYN 0x3196
+#define PCI_DEVICE_ID_PLX_9030 0x9030
+#define PCI_DEVICE_ID_PLX_9050 0x9050
+#define PCI_DEVICE_ID_PLX_9056 0x9056
+#define PCI_DEVICE_ID_PLX_9080 0x9080
+#define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001
+
+#define PCI_VENDOR_ID_MADGE 0x10b6
+#define PCI_DEVICE_ID_MADGE_MK2 0x0002
+
+#define PCI_VENDOR_ID_3COM 0x10b7
+#define PCI_DEVICE_ID_3COM_3C985 0x0001
+#define PCI_DEVICE_ID_3COM_3C940 0x1700
+#define PCI_DEVICE_ID_3COM_3C339 0x3390
+#define PCI_DEVICE_ID_3COM_3C359 0x3590
+#define PCI_DEVICE_ID_3COM_3C940B 0x80eb
+#define PCI_DEVICE_ID_3COM_3CR990 0x9900
+#define PCI_DEVICE_ID_3COM_3CR990_TX_95 0x9902
+#define PCI_DEVICE_ID_3COM_3CR990_TX_97 0x9903
+#define PCI_DEVICE_ID_3COM_3CR990B 0x9904
+#define PCI_DEVICE_ID_3COM_3CR990_FX 0x9905
+#define PCI_DEVICE_ID_3COM_3CR990SVR95 0x9908
+#define PCI_DEVICE_ID_3COM_3CR990SVR97 0x9909
+#define PCI_DEVICE_ID_3COM_3CR990SVR 0x990a
+
+#define PCI_VENDOR_ID_AL 0x10b9
+#define PCI_DEVICE_ID_AL_M1533 0x1533
+#define PCI_DEVICE_ID_AL_M1535 0x1535
+#define PCI_DEVICE_ID_AL_M1541 0x1541
+#define PCI_DEVICE_ID_AL_M1563 0x1563
+#define PCI_DEVICE_ID_AL_M1621 0x1621
+#define PCI_DEVICE_ID_AL_M1631 0x1631
+#define PCI_DEVICE_ID_AL_M1632 0x1632
+#define PCI_DEVICE_ID_AL_M1641 0x1641
+#define PCI_DEVICE_ID_AL_M1644 0x1644
+#define PCI_DEVICE_ID_AL_M1647 0x1647
+#define PCI_DEVICE_ID_AL_M1651 0x1651
+#define PCI_DEVICE_ID_AL_M1671 0x1671
+#define PCI_DEVICE_ID_AL_M1681 0x1681
+#define PCI_DEVICE_ID_AL_M1683 0x1683
+#define PCI_DEVICE_ID_AL_M1689 0x1689
+#define PCI_DEVICE_ID_AL_M5219 0x5219
+#define PCI_DEVICE_ID_AL_M5228 0x5228
+#define PCI_DEVICE_ID_AL_M5229 0x5229
+#define PCI_DEVICE_ID_AL_M5451 0x5451
+#define PCI_DEVICE_ID_AL_M7101 0x7101
+
+#define PCI_VENDOR_ID_NEOMAGIC 0x10c8
+#define PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO 0x8005
+#define PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO 0x8006
+#define PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO 0x8016
+
+#define PCI_VENDOR_ID_TCONRAD 0x10da
+#define PCI_DEVICE_ID_TCONRAD_TOKENRING 0x0508
+
+#define PCI_VENDOR_ID_NVIDIA 0x10de
+#define PCI_DEVICE_ID_NVIDIA_TNT 0x0020
+#define PCI_DEVICE_ID_NVIDIA_TNT2 0x0028
+#define PCI_DEVICE_ID_NVIDIA_UTNT2 0x0029
+#define PCI_DEVICE_ID_NVIDIA_TNT_UNKNOWN 0x002a
+#define PCI_DEVICE_ID_NVIDIA_VTNT2 0x002C
+#define PCI_DEVICE_ID_NVIDIA_UVTNT2 0x002D
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SMBUS 0x0034
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE 0x0035
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA 0x0036
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2 0x003e
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_ULTRA 0x0040
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800 0x0041
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_LE 0x0042
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x0045
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_4000 0x004E
+#define PCI_DEVICE_ID_NVIDIA_NFORCE4_SMBUS 0x0052
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE 0x0053
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA 0x0054
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2 0x0055
+#define PCI_DEVICE_ID_NVIDIA_CK804_AUDIO 0x0059
+#define PCI_DEVICE_ID_NVIDIA_CK804_PCIE 0x005d
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS 0x0064
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE 0x0065
+#define PCI_DEVICE_ID_NVIDIA_MCP2_MODEM 0x0069
+#define PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO 0x006a
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS 0x0084
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE 0x0085
+#define PCI_DEVICE_ID_NVIDIA_MCP2S_MODEM 0x0089
+#define PCI_DEVICE_ID_NVIDIA_CK8_AUDIO 0x008a
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA 0x008e
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GT 0x0090
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GTX 0x0091
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800 0x0098
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800_GTX 0x0099
+#define PCI_DEVICE_ID_NVIDIA_ITNT2 0x00A0
+#define PCI_DEVICE_ID_GEFORCE_6800A 0x00c1
+#define PCI_DEVICE_ID_GEFORCE_6800A_LE 0x00c2
+#define PCI_DEVICE_ID_GEFORCE_GO_6800 0x00c8
+#define PCI_DEVICE_ID_GEFORCE_GO_6800_ULTRA 0x00c9
+#define PCI_DEVICE_ID_QUADRO_FX_GO1400 0x00cc
+#define PCI_DEVICE_ID_QUADRO_FX_1400 0x00ce
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3 0x00d1
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS 0x00d4
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE 0x00d5
+#define PCI_DEVICE_ID_NVIDIA_MCP3_MODEM 0x00d9
+#define PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO 0x00da
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S 0x00e1
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA 0x00e3
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SMBUS 0x00e4
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE 0x00e5
+#define PCI_DEVICE_ID_NVIDIA_CK8S_AUDIO 0x00ea
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2 0x00ee
+#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_ALT1 0x00f0
+#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT1 0x00f1
+#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT2 0x00f2
+#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6200_ALT1 0x00f3
+#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x00f9
+#define PCIE_DEVICE_ID_NVIDIA_QUADRO_NVS280 0x00fd
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR 0x0100
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR 0x0101
+#define PCI_DEVICE_ID_NVIDIA_QUADRO 0x0103
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX 0x0110
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX2 0x0111
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GO 0x0112
+#define PCI_DEVICE_ID_NVIDIA_QUADRO2_MXR 0x0113
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600_GT 0x0140
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600 0x0141
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6610_XL 0x0145
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_540 0x014E
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200 0x014F
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS 0x0150
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS2 0x0151
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_ULTRA 0x0152
+#define PCI_DEVICE_ID_NVIDIA_QUADRO2_PRO 0x0153
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200_TURBOCACHE 0x0161
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200 0x0164
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250 0x0166
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200_1 0x0167
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250_1 0x0168
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_460 0x0170
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440 0x0171
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420 0x0172
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_SE 0x0173
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO 0x0174
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO 0x0175
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO_M32 0x0176
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_460_GO 0x0177
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_500XGL 0x0178
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO_M64 0x0179
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_200 0x017A
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_550XGL 0x017B
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_500_GOGL 0x017C
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_410_GO_M16 0x017D
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_8X 0x0181
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440SE_8X 0x0182
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420_8X 0x0183
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_4000 0x0185
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_448_GO 0x0186
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_488_GO 0x0187
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_580_XGL 0x0188
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_MAC 0x0189
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_280_NVS 0x018A
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_380_XGL 0x018B
+#define PCI_DEVICE_ID_NVIDIA_IGEFORCE2 0x01a0
+#define PCI_DEVICE_ID_NVIDIA_NFORCE 0x01a4
+#define PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO 0x01b1
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_SMBUS 0x01b4
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_IDE 0x01bc
+#define PCI_DEVICE_ID_NVIDIA_MCP1_MODEM 0x01c1
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2 0x01e0
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE3 0x0200
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_1 0x0201
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_2 0x0202
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_DDC 0x0203
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B 0x0211
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_LE 0x0212
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_GT 0x0215
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4600 0x0250
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4400 0x0251
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4200 0x0253
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_900XGL 0x0258
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_750XGL 0x0259
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_700XGL 0x025B
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS 0x0264
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE 0x0265
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA 0x0266
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2 0x0267
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS 0x0368
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2 0x037F
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800 0x0280
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800_8X 0x0281
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800SE 0x0282
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_4200_GO 0x0286
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_980_XGL 0x0288
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_780_XGL 0x0289
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_700_GOGL 0x028C
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800_ULTRA 0x0301
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800 0x0302
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_2000 0x0308
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1000 0x0309
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600_ULTRA 0x0311
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600 0x0312
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600SE 0x0314
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5600 0x031A
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5650 0x031B
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO700 0x031C
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200 0x0320
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_ULTRA 0x0321
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_1 0x0322
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200SE 0x0323
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5200 0x0324
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250 0x0325
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5500 0x0326
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5100 0x0327
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250_32 0x0328
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO_5200 0x0329
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_NVS_280_PCI 0x032A
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_500 0x032B
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5300 0x032C
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5100 0x032D
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900_ULTRA 0x0330
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900 0x0331
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900XT 0x0332
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5950_ULTRA 0x0333
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900ZT 0x0334
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_3000 0x0338
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_700 0x033F
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700_ULTRA 0x0341
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700 0x0342
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700LE 0x0343
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700VE 0x0344
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_1 0x0347
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2 0x0348
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000 0x034C
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E
+#define PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0 0x0360
+#define PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4 0x0364
+#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS 0x03EB
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE 0x03EC
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_SMBUS 0x0446
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE 0x0448
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_SMBUS 0x0542
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE 0x056C
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS 0x0752
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA 0x0D85
+
+#define PCI_VENDOR_ID_IMS 0x10e0
+#define PCI_DEVICE_ID_IMS_TT128 0x9128
+#define PCI_DEVICE_ID_IMS_TT3D 0x9135
+
+#define PCI_VENDOR_ID_AMCC 0x10e8
+
+#define PCI_VENDOR_ID_INTERG 0x10ea
+#define PCI_DEVICE_ID_INTERG_1682 0x1682
+#define PCI_DEVICE_ID_INTERG_2000 0x2000
+#define PCI_DEVICE_ID_INTERG_2010 0x2010
+#define PCI_DEVICE_ID_INTERG_5000 0x5000
+#define PCI_DEVICE_ID_INTERG_5050 0x5050
+
+#define PCI_VENDOR_ID_REALTEK 0x10ec
+#define PCI_DEVICE_ID_REALTEK_8139 0x8139
+
+#define PCI_VENDOR_ID_XILINX 0x10ee
+#define PCI_DEVICE_ID_RME_DIGI96 0x3fc0
+#define PCI_DEVICE_ID_RME_DIGI96_8 0x3fc1
+#define PCI_DEVICE_ID_RME_DIGI96_8_PRO 0x3fc2
+#define PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST 0x3fc3
+#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP 0x3fc5
+#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP_MADI 0x3fc6
+
+#define PCI_VENDOR_ID_INIT 0x1101
+
+#define PCI_VENDOR_ID_CREATIVE 0x1102 /* duplicate: ECTIVA */
+#define PCI_DEVICE_ID_CREATIVE_EMU10K1 0x0002
+#define PCI_DEVICE_ID_CREATIVE_20K1 0x0005
+#define PCI_DEVICE_ID_CREATIVE_20K2 0x000b
+#define PCI_SUBDEVICE_ID_CREATIVE_SB0760 0x0024
+#define PCI_SUBDEVICE_ID_CREATIVE_SB08801 0x0041
+#define PCI_SUBDEVICE_ID_CREATIVE_SB08802 0x0042
+#define PCI_SUBDEVICE_ID_CREATIVE_SB08803 0x0043
+#define PCI_SUBDEVICE_ID_CREATIVE_SB1270 0x0062
+#define PCI_SUBDEVICE_ID_CREATIVE_HENDRIX 0x6000
+
+#define PCI_VENDOR_ID_ECTIVA 0x1102 /* duplicate: CREATIVE */
+#define PCI_DEVICE_ID_ECTIVA_EV1938 0x8938
+
+#define PCI_VENDOR_ID_TTI 0x1103
+#define PCI_DEVICE_ID_TTI_HPT343 0x0003
+#define PCI_DEVICE_ID_TTI_HPT366 0x0004
+#define PCI_DEVICE_ID_TTI_HPT372 0x0005
+#define PCI_DEVICE_ID_TTI_HPT302 0x0006
+#define PCI_DEVICE_ID_TTI_HPT371 0x0007
+#define PCI_DEVICE_ID_TTI_HPT374 0x0008
+#define PCI_DEVICE_ID_TTI_HPT372N 0x0009 /* apparently a 372N variant? */
+
+#define PCI_VENDOR_ID_VIA 0x1106
+#define PCI_DEVICE_ID_VIA_8763_0 0x0198
+#define PCI_DEVICE_ID_VIA_8380_0 0x0204
+#define PCI_DEVICE_ID_VIA_3238_0 0x0238
+#define PCI_DEVICE_ID_VIA_PT880 0x0258
+#define PCI_DEVICE_ID_VIA_PT880ULTRA 0x0308
+#define PCI_DEVICE_ID_VIA_PX8X0_0 0x0259
+#define PCI_DEVICE_ID_VIA_3269_0 0x0269
+#define PCI_DEVICE_ID_VIA_K8T800PRO_0 0x0282
+#define PCI_DEVICE_ID_VIA_3296_0 0x0296
+#define PCI_DEVICE_ID_VIA_8363_0 0x0305
+#define PCI_DEVICE_ID_VIA_P4M800CE 0x0314
+#define PCI_DEVICE_ID_VIA_P4M890 0x0327
+#define PCI_DEVICE_ID_VIA_VT3324 0x0324
+#define PCI_DEVICE_ID_VIA_VT3336 0x0336
+#define PCI_DEVICE_ID_VIA_VT3351 0x0351
+#define PCI_DEVICE_ID_VIA_VT3364 0x0364
+#define PCI_DEVICE_ID_VIA_8371_0 0x0391
+#define PCI_DEVICE_ID_VIA_6415 0x0415
+#define PCI_DEVICE_ID_VIA_8501_0 0x0501
+#define PCI_DEVICE_ID_VIA_82C561 0x0561
+#define PCI_DEVICE_ID_VIA_82C586_1 0x0571
+#define PCI_DEVICE_ID_VIA_82C576 0x0576
+#define PCI_DEVICE_ID_VIA_82C586_0 0x0586
+#define PCI_DEVICE_ID_VIA_82C596 0x0596
+#define PCI_DEVICE_ID_VIA_82C597_0 0x0597
+#define PCI_DEVICE_ID_VIA_82C598_0 0x0598
+#define PCI_DEVICE_ID_VIA_8601_0 0x0601
+#define PCI_DEVICE_ID_VIA_8605_0 0x0605
+#define PCI_DEVICE_ID_VIA_82C686 0x0686
+#define PCI_DEVICE_ID_VIA_82C691_0 0x0691
+#define PCI_DEVICE_ID_VIA_82C576_1 0x1571
+#define PCI_DEVICE_ID_VIA_82C586_2 0x3038
+#define PCI_DEVICE_ID_VIA_82C586_3 0x3040
+#define PCI_DEVICE_ID_VIA_82C596_3 0x3050
+#define PCI_DEVICE_ID_VIA_82C596B_3 0x3051
+#define PCI_DEVICE_ID_VIA_82C686_4 0x3057
+#define PCI_DEVICE_ID_VIA_82C686_5 0x3058
+#define PCI_DEVICE_ID_VIA_8233_5 0x3059
+#define PCI_DEVICE_ID_VIA_8233_0 0x3074
+#define PCI_DEVICE_ID_VIA_8633_0 0x3091
+#define PCI_DEVICE_ID_VIA_8367_0 0x3099
+#define PCI_DEVICE_ID_VIA_8653_0 0x3101
+#define PCI_DEVICE_ID_VIA_8622 0x3102
+#define PCI_DEVICE_ID_VIA_8235_USB_2 0x3104
+#define PCI_DEVICE_ID_VIA_8233C_0 0x3109
+#define PCI_DEVICE_ID_VIA_8361 0x3112
+#define PCI_DEVICE_ID_VIA_XM266 0x3116
+#define PCI_DEVICE_ID_VIA_612X 0x3119
+#define PCI_DEVICE_ID_VIA_862X_0 0x3123
+#define PCI_DEVICE_ID_VIA_8753_0 0x3128
+#define PCI_DEVICE_ID_VIA_8233A 0x3147
+#define PCI_DEVICE_ID_VIA_8703_51_0 0x3148
+#define PCI_DEVICE_ID_VIA_8237_SATA 0x3149
+#define PCI_DEVICE_ID_VIA_XN266 0x3156
+#define PCI_DEVICE_ID_VIA_6410 0x3164
+#define PCI_DEVICE_ID_VIA_8754C_0 0x3168
+#define PCI_DEVICE_ID_VIA_8235 0x3177
+#define PCI_DEVICE_ID_VIA_8385_0 0x3188
+#define PCI_DEVICE_ID_VIA_8377_0 0x3189
+#define PCI_DEVICE_ID_VIA_8378_0 0x3205
+#define PCI_DEVICE_ID_VIA_8783_0 0x3208
+#define PCI_DEVICE_ID_VIA_8237 0x3227
+#define PCI_DEVICE_ID_VIA_8251 0x3287
+#define PCI_DEVICE_ID_VIA_8261 0x3402
+#define PCI_DEVICE_ID_VIA_8237A 0x3337
+#define PCI_DEVICE_ID_VIA_8237S 0x3372
+#define PCI_DEVICE_ID_VIA_SATA_EIDE 0x5324
+#define PCI_DEVICE_ID_VIA_8231 0x8231
+#define PCI_DEVICE_ID_VIA_8231_4 0x8235
+#define PCI_DEVICE_ID_VIA_8365_1 0x8305
+#define PCI_DEVICE_ID_VIA_CX700 0x8324
+#define PCI_DEVICE_ID_VIA_CX700_IDE 0x0581
+#define PCI_DEVICE_ID_VIA_VX800 0x8353
+#define PCI_DEVICE_ID_VIA_VX855 0x8409
+#define PCI_DEVICE_ID_VIA_VX900 0x8410
+#define PCI_DEVICE_ID_VIA_8371_1 0x8391
+#define PCI_DEVICE_ID_VIA_82C598_1 0x8598
+#define PCI_DEVICE_ID_VIA_838X_1 0xB188
+#define PCI_DEVICE_ID_VIA_83_87XX_1 0xB198
+#define PCI_DEVICE_ID_VIA_VX855_IDE 0xC409
+#define PCI_DEVICE_ID_VIA_ANON 0xFFFF
+
+#define PCI_VENDOR_ID_SIEMENS 0x110A
+#define PCI_DEVICE_ID_SIEMENS_DSCC4 0x2102
+
+#define PCI_VENDOR_ID_VORTEX 0x1119
+#define PCI_DEVICE_ID_VORTEX_GDT60x0 0x0000
+#define PCI_DEVICE_ID_VORTEX_GDT6000B 0x0001
+#define PCI_DEVICE_ID_VORTEX_GDT6x10 0x0002
+#define PCI_DEVICE_ID_VORTEX_GDT6x20 0x0003
+#define PCI_DEVICE_ID_VORTEX_GDT6530 0x0004
+#define PCI_DEVICE_ID_VORTEX_GDT6550 0x0005
+#define PCI_DEVICE_ID_VORTEX_GDT6x17 0x0006
+#define PCI_DEVICE_ID_VORTEX_GDT6x27 0x0007
+#define PCI_DEVICE_ID_VORTEX_GDT6537 0x0008
+#define PCI_DEVICE_ID_VORTEX_GDT6557 0x0009
+#define PCI_DEVICE_ID_VORTEX_GDT6x15 0x000a
+#define PCI_DEVICE_ID_VORTEX_GDT6x25 0x000b
+#define PCI_DEVICE_ID_VORTEX_GDT6535 0x000c
+#define PCI_DEVICE_ID_VORTEX_GDT6555 0x000d
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x0100
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x0101
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x0102
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x0103
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x0104
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x0105
+
+#define PCI_VENDOR_ID_EF 0x111a
+#define PCI_DEVICE_ID_EF_ATM_FPGA 0x0000
+#define PCI_DEVICE_ID_EF_ATM_ASIC 0x0002
+#define PCI_DEVICE_ID_EF_ATM_LANAI2 0x0003
+#define PCI_DEVICE_ID_EF_ATM_LANAIHB 0x0005
+
+#define PCI_VENDOR_ID_IDT 0x111d
+#define PCI_DEVICE_ID_IDT_IDT77201 0x0001
+
+#define PCI_VENDOR_ID_FORE 0x1127
+#define PCI_DEVICE_ID_FORE_PCA200E 0x0300
+
+#define PCI_VENDOR_ID_PHILIPS 0x1131
+#define PCI_DEVICE_ID_PHILIPS_SAA7146 0x7146
+#define PCI_DEVICE_ID_PHILIPS_SAA9730 0x9730
+
+#define PCI_VENDOR_ID_EICON 0x1133
+#define PCI_DEVICE_ID_EICON_DIVA20 0xe002
+#define PCI_DEVICE_ID_EICON_DIVA20_U 0xe004
+#define PCI_DEVICE_ID_EICON_DIVA201 0xe005
+#define PCI_DEVICE_ID_EICON_DIVA202 0xe00b
+#define PCI_DEVICE_ID_EICON_MAESTRA 0xe010
+#define PCI_DEVICE_ID_EICON_MAESTRAQ 0xe012
+#define PCI_DEVICE_ID_EICON_MAESTRAQ_U 0xe013
+#define PCI_DEVICE_ID_EICON_MAESTRAP 0xe014
+
+#define PCI_VENDOR_ID_CISCO 0x1137
+
+#define PCI_VENDOR_ID_ZIATECH 0x1138
+#define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550
+
+
+#define PCI_VENDOR_ID_SYSKONNECT 0x1148
+#define PCI_DEVICE_ID_SYSKONNECT_TR 0x4200
+#define PCI_DEVICE_ID_SYSKONNECT_GE 0x4300
+#define PCI_DEVICE_ID_SYSKONNECT_YU 0x4320
+#define PCI_DEVICE_ID_SYSKONNECT_9DXX 0x4400
+#define PCI_DEVICE_ID_SYSKONNECT_9MXX 0x4500
+
+#define PCI_VENDOR_ID_DIGI 0x114f
+#define PCI_DEVICE_ID_DIGI_DF_M_IOM2_E 0x0070
+#define PCI_DEVICE_ID_DIGI_DF_M_E 0x0071
+#define PCI_DEVICE_ID_DIGI_DF_M_IOM2_A 0x0072
+#define PCI_DEVICE_ID_DIGI_DF_M_A 0x0073
+#define PCI_DEVICE_ID_DIGI_NEO_8 0x00B1
+#define PCI_DEVICE_ID_NEO_2DB9 0x00C8
+#define PCI_DEVICE_ID_NEO_2DB9PRI 0x00C9
+#define PCI_DEVICE_ID_NEO_2RJ45 0x00CA
+#define PCI_DEVICE_ID_NEO_2RJ45PRI 0x00CB
+#define PCIE_DEVICE_ID_NEO_4_IBM 0x00F4
+
+#define PCI_VENDOR_ID_XIRCOM 0x115d
+#define PCI_DEVICE_ID_XIRCOM_RBM56G 0x0101
+#define PCI_DEVICE_ID_XIRCOM_X3201_MDM 0x0103
+
+#define PCI_VENDOR_ID_SERVERWORKS 0x1166
+#define PCI_DEVICE_ID_SERVERWORKS_HE 0x0008
+#define PCI_DEVICE_ID_SERVERWORKS_LE 0x0009
+#define PCI_DEVICE_ID_SERVERWORKS_GCNB_LE 0x0017
+#define PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB 0x0036
+#define PCI_DEVICE_ID_SERVERWORKS_EPB 0x0103
+#define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE 0x0132
+#define PCI_DEVICE_ID_SERVERWORKS_OSB4 0x0200
+#define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201
+#define PCI_DEVICE_ID_SERVERWORKS_CSB6 0x0203
+#define PCI_DEVICE_ID_SERVERWORKS_HT1000SB 0x0205
+#define PCI_DEVICE_ID_SERVERWORKS_OSB4IDE 0x0211
+#define PCI_DEVICE_ID_SERVERWORKS_CSB5IDE 0x0212
+#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE 0x0213
+#define PCI_DEVICE_ID_SERVERWORKS_HT1000IDE 0x0214
+#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2 0x0217
+#define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227
+#define PCI_DEVICE_ID_SERVERWORKS_HT1100LD 0x0408
+
+#define PCI_VENDOR_ID_SBE 0x1176
+#define PCI_DEVICE_ID_SBE_WANXL100 0x0301
+#define PCI_DEVICE_ID_SBE_WANXL200 0x0302
+#define PCI_DEVICE_ID_SBE_WANXL400 0x0104
+#define PCI_SUBDEVICE_ID_SBE_T3E3 0x0009
+#define PCI_SUBDEVICE_ID_SBE_2T3E3_P0 0x0901
+#define PCI_SUBDEVICE_ID_SBE_2T3E3_P1 0x0902
+
+#define PCI_VENDOR_ID_TOSHIBA 0x1179
+#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_1 0x0101
+#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_2 0x0102
+#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_3 0x0103
+#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_5 0x0105
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC95 0x060a
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC97 0x060f
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC100 0x0617
+
+#define PCI_VENDOR_ID_TOSHIBA_2 0x102f
+#define PCI_DEVICE_ID_TOSHIBA_TC35815CF 0x0030
+#define PCI_DEVICE_ID_TOSHIBA_TC35815_NWU 0x0031
+#define PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939 0x0032
+#define PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE 0x0105
+#define PCI_DEVICE_ID_TOSHIBA_TC86C001_MISC 0x0108
+#define PCI_DEVICE_ID_TOSHIBA_SPIDER_NET 0x01b3
+
+#define PCI_VENDOR_ID_ATTO 0x117c
+
+#define PCI_VENDOR_ID_RICOH 0x1180
+#define PCI_DEVICE_ID_RICOH_RL5C465 0x0465
+#define PCI_DEVICE_ID_RICOH_RL5C466 0x0466
+#define PCI_DEVICE_ID_RICOH_RL5C475 0x0475
+#define PCI_DEVICE_ID_RICOH_RL5C476 0x0476
+#define PCI_DEVICE_ID_RICOH_RL5C478 0x0478
+#define PCI_DEVICE_ID_RICOH_R5C822 0x0822
+#define PCI_DEVICE_ID_RICOH_R5CE822 0xe822
+#define PCI_DEVICE_ID_RICOH_R5CE823 0xe823
+#define PCI_DEVICE_ID_RICOH_R5C832 0x0832
+#define PCI_DEVICE_ID_RICOH_R5C843 0x0843
+
+#define PCI_VENDOR_ID_DLINK 0x1186
+#define PCI_DEVICE_ID_DLINK_DGE510T 0x4c00
+
+#define PCI_VENDOR_ID_ARTOP 0x1191
+#define PCI_DEVICE_ID_ARTOP_ATP850UF 0x0005
+#define PCI_DEVICE_ID_ARTOP_ATP860 0x0006
+#define PCI_DEVICE_ID_ARTOP_ATP860R 0x0007
+#define PCI_DEVICE_ID_ARTOP_ATP865 0x0008
+#define PCI_DEVICE_ID_ARTOP_ATP865R 0x0009
+#define PCI_DEVICE_ID_ARTOP_ATP867A 0x000A
+#define PCI_DEVICE_ID_ARTOP_ATP867B 0x000B
+#define PCI_DEVICE_ID_ARTOP_AEC7610 0x8002
+#define PCI_DEVICE_ID_ARTOP_AEC7612UW 0x8010
+#define PCI_DEVICE_ID_ARTOP_AEC7612U 0x8020
+#define PCI_DEVICE_ID_ARTOP_AEC7612S 0x8030
+#define PCI_DEVICE_ID_ARTOP_AEC7612D 0x8040
+#define PCI_DEVICE_ID_ARTOP_AEC7612SUW 0x8050
+#define PCI_DEVICE_ID_ARTOP_8060 0x8060
+
+#define PCI_VENDOR_ID_ZEITNET 0x1193
+#define PCI_DEVICE_ID_ZEITNET_1221 0x0001
+#define PCI_DEVICE_ID_ZEITNET_1225 0x0002
+
+#define PCI_VENDOR_ID_FUJITSU_ME 0x119e
+#define PCI_DEVICE_ID_FUJITSU_FS155 0x0001
+#define PCI_DEVICE_ID_FUJITSU_FS50 0x0003
+
+#define PCI_SUBVENDOR_ID_KEYSPAN 0x11a9
+#define PCI_SUBDEVICE_ID_KEYSPAN_SX2 0x5334
+
+#define PCI_VENDOR_ID_MARVELL 0x11ab
+#define PCI_VENDOR_ID_MARVELL_EXT 0x1b4b
+#define PCI_DEVICE_ID_MARVELL_GT64111 0x4146
+#define PCI_DEVICE_ID_MARVELL_GT64260 0x6430
+#define PCI_DEVICE_ID_MARVELL_MV64360 0x6460
+#define PCI_DEVICE_ID_MARVELL_MV64460 0x6480
+#define PCI_DEVICE_ID_MARVELL_88ALP01_NAND 0x4100
+#define PCI_DEVICE_ID_MARVELL_88ALP01_SD 0x4101
+#define PCI_DEVICE_ID_MARVELL_88ALP01_CCIC 0x4102
+
+#define PCI_VENDOR_ID_V3 0x11b0
+#define PCI_DEVICE_ID_V3_V960 0x0001
+#define PCI_DEVICE_ID_V3_V351 0x0002
+
+#define PCI_VENDOR_ID_ATT 0x11c1
+#define PCI_DEVICE_ID_ATT_VENUS_MODEM 0x480
+
+#define PCI_VENDOR_ID_SPECIALIX 0x11cb
+#define PCI_SUBDEVICE_ID_SPECIALIX_SPEED4 0xa004
+
+#define PCI_VENDOR_ID_ANALOG_DEVICES 0x11d4
+#define PCI_DEVICE_ID_AD1889JS 0x1889
+
+#define PCI_DEVICE_ID_SEGA_BBA 0x1234
+
+#define PCI_VENDOR_ID_ZORAN 0x11de
+#define PCI_DEVICE_ID_ZORAN_36057 0x6057
+#define PCI_DEVICE_ID_ZORAN_36120 0x6120
+
+#define PCI_VENDOR_ID_COMPEX 0x11f6
+#define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112
+
+#define PCI_VENDOR_ID_PMC_Sierra 0x11f8
+
+#define PCI_VENDOR_ID_RP 0x11fe
+#define PCI_DEVICE_ID_RP32INTF 0x0001
+#define PCI_DEVICE_ID_RP8INTF 0x0002
+#define PCI_DEVICE_ID_RP16INTF 0x0003
+#define PCI_DEVICE_ID_RP4QUAD 0x0004
+#define PCI_DEVICE_ID_RP8OCTA 0x0005
+#define PCI_DEVICE_ID_RP8J 0x0006
+#define PCI_DEVICE_ID_RP4J 0x0007
+#define PCI_DEVICE_ID_RP8SNI 0x0008
+#define PCI_DEVICE_ID_RP16SNI 0x0009
+#define PCI_DEVICE_ID_RPP4 0x000A
+#define PCI_DEVICE_ID_RPP8 0x000B
+#define PCI_DEVICE_ID_RP4M 0x000D
+#define PCI_DEVICE_ID_RP2_232 0x000E
+#define PCI_DEVICE_ID_RP2_422 0x000F
+#define PCI_DEVICE_ID_URP32INTF 0x0801
+#define PCI_DEVICE_ID_URP8INTF 0x0802
+#define PCI_DEVICE_ID_URP16INTF 0x0803
+#define PCI_DEVICE_ID_URP8OCTA 0x0805
+#define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C
+#define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D
+#define PCI_DEVICE_ID_CRP16INTF 0x0903
+
+#define PCI_VENDOR_ID_CYCLADES 0x120e
+#define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100
+#define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101
+#define PCI_DEVICE_ID_CYCLOM_4Y_Lo 0x0102
+#define PCI_DEVICE_ID_CYCLOM_4Y_Hi 0x0103
+#define PCI_DEVICE_ID_CYCLOM_8Y_Lo 0x0104
+#define PCI_DEVICE_ID_CYCLOM_8Y_Hi 0x0105
+#define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200
+#define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201
+#define PCI_DEVICE_ID_PC300_RX_2 0x0300
+#define PCI_DEVICE_ID_PC300_RX_1 0x0301
+#define PCI_DEVICE_ID_PC300_TE_2 0x0310
+#define PCI_DEVICE_ID_PC300_TE_1 0x0311
+#define PCI_DEVICE_ID_PC300_TE_M_2 0x0320
+#define PCI_DEVICE_ID_PC300_TE_M_1 0x0321
+
+#define PCI_VENDOR_ID_ESSENTIAL 0x120f
+#define PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER 0x0001
+
+#define PCI_VENDOR_ID_O2 0x1217
+#define PCI_DEVICE_ID_O2_6729 0x6729
+#define PCI_DEVICE_ID_O2_6730 0x673a
+#define PCI_DEVICE_ID_O2_6832 0x6832
+#define PCI_DEVICE_ID_O2_6836 0x6836
+#define PCI_DEVICE_ID_O2_6812 0x6872
+#define PCI_DEVICE_ID_O2_6933 0x6933
+#define PCI_DEVICE_ID_O2_8120 0x8120
+#define PCI_DEVICE_ID_O2_8220 0x8220
+#define PCI_DEVICE_ID_O2_8221 0x8221
+#define PCI_DEVICE_ID_O2_8320 0x8320
+#define PCI_DEVICE_ID_O2_8321 0x8321
+
+#define PCI_VENDOR_ID_3DFX 0x121a
+#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001
+#define PCI_DEVICE_ID_3DFX_VOODOO2 0x0002
+#define PCI_DEVICE_ID_3DFX_BANSHEE 0x0003
+#define PCI_DEVICE_ID_3DFX_VOODOO3 0x0005
+#define PCI_DEVICE_ID_3DFX_VOODOO5 0x0009
+
+#define PCI_VENDOR_ID_AVM 0x1244
+#define PCI_DEVICE_ID_AVM_B1 0x0700
+#define PCI_DEVICE_ID_AVM_C4 0x0800
+#define PCI_DEVICE_ID_AVM_A1 0x0a00
+#define PCI_DEVICE_ID_AVM_A1_V2 0x0e00
+#define PCI_DEVICE_ID_AVM_C2 0x1100
+#define PCI_DEVICE_ID_AVM_T1 0x1200
+
+#define PCI_VENDOR_ID_STALLION 0x124d
+
+/* Allied Telesyn */
+#define PCI_VENDOR_ID_AT 0x1259
+#define PCI_SUBDEVICE_ID_AT_2700FX 0x2701
+#define PCI_SUBDEVICE_ID_AT_2701FX 0x2703
+
+#define PCI_VENDOR_ID_ESS 0x125d
+#define PCI_DEVICE_ID_ESS_ESS1968 0x1968
+#define PCI_DEVICE_ID_ESS_ESS1978 0x1978
+#define PCI_DEVICE_ID_ESS_ALLEGRO_1 0x1988
+#define PCI_DEVICE_ID_ESS_ALLEGRO 0x1989
+#define PCI_DEVICE_ID_ESS_CANYON3D_2LE 0x1990
+#define PCI_DEVICE_ID_ESS_CANYON3D_2 0x1992
+#define PCI_DEVICE_ID_ESS_MAESTRO3 0x1998
+#define PCI_DEVICE_ID_ESS_MAESTRO3_1 0x1999
+#define PCI_DEVICE_ID_ESS_MAESTRO3_HW 0x199a
+#define PCI_DEVICE_ID_ESS_MAESTRO3_2 0x199b
+
+#define PCI_VENDOR_ID_SATSAGEM 0x1267
+#define PCI_DEVICE_ID_SATSAGEM_NICCY 0x1016
+
+#define PCI_VENDOR_ID_ENSONIQ 0x1274
+#define PCI_DEVICE_ID_ENSONIQ_CT5880 0x5880
+#define PCI_DEVICE_ID_ENSONIQ_ES1370 0x5000
+#define PCI_DEVICE_ID_ENSONIQ_ES1371 0x1371
+
+#define PCI_VENDOR_ID_TRANSMETA 0x1279
+#define PCI_DEVICE_ID_EFFICEON 0x0060
+
+#define PCI_VENDOR_ID_ROCKWELL 0x127A
+
+#define PCI_VENDOR_ID_ITE 0x1283
+#define PCI_DEVICE_ID_ITE_8172 0x8172
+#define PCI_DEVICE_ID_ITE_8211 0x8211
+#define PCI_DEVICE_ID_ITE_8212 0x8212
+#define PCI_DEVICE_ID_ITE_8213 0x8213
+#define PCI_DEVICE_ID_ITE_8152 0x8152
+#define PCI_DEVICE_ID_ITE_8872 0x8872
+#define PCI_DEVICE_ID_ITE_IT8330G_0 0xe886
+
+/* formerly Platform Tech */
+#define PCI_DEVICE_ID_ESS_ESS0100 0x0100
+
+#define PCI_VENDOR_ID_ALTEON 0x12ae
+
+#define PCI_SUBVENDOR_ID_CONNECT_TECH 0x12c4
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_232 0x0001
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_232 0x0002
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_232 0x0003
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485 0x0004
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_4_4 0x0005
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485 0x0006
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485_2_2 0x0007
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_485 0x0008
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_2_6 0x0009
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH081101V1 0x000A
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH041101V1 0x000B
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_20MHZ 0x000C
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_PTM 0x000D
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_NT960PCI 0x0100
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_2 0x0201
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_TITAN_4 0x0202
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_232 0x0300
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_232 0x0301
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_232 0x0302
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_1_1 0x0310
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_2 0x0311
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4 0x0312
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2 0x0320
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4 0x0321
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8 0x0322
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_485 0x0330
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_485 0x0331
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_485 0x0332
+
+#define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2
+#define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018
+
+#define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0
+#define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031
+#define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021
+#define PCI_SUBDEVICE_ID_CHASE_PCIFAST16 0x0011
+#define PCI_SUBDEVICE_ID_CHASE_PCIFAST16FMC 0x0041
+#define PCI_SUBVENDOR_ID_CHASE_PCIRAS 0x124D
+#define PCI_SUBDEVICE_ID_CHASE_PCIRAS4 0xF001
+#define PCI_SUBDEVICE_ID_CHASE_PCIRAS8 0xF010
+
+#define PCI_VENDOR_ID_AUREAL 0x12eb
+#define PCI_DEVICE_ID_AUREAL_VORTEX_1 0x0001
+#define PCI_DEVICE_ID_AUREAL_VORTEX_2 0x0002
+#define PCI_DEVICE_ID_AUREAL_ADVANTAGE 0x0003
+
+#define PCI_VENDOR_ID_ELECTRONICDESIGNGMBH 0x12f8
+#define PCI_DEVICE_ID_LML_33R10 0x8a02
+
+#define PCI_VENDOR_ID_ESDGMBH 0x12fe
+#define PCI_DEVICE_ID_ESDGMBH_CPCIASIO4 0x0111
+
+#define PCI_VENDOR_ID_CB 0x1307 /* Measurement Computing */
+
+#define PCI_VENDOR_ID_SIIG 0x131f
+#define PCI_SUBVENDOR_ID_SIIG 0x131f
+#define PCI_DEVICE_ID_SIIG_1S_10x_550 0x1000
+#define PCI_DEVICE_ID_SIIG_1S_10x_650 0x1001
+#define PCI_DEVICE_ID_SIIG_1S_10x_850 0x1002
+#define PCI_DEVICE_ID_SIIG_1S1P_10x_550 0x1010
+#define PCI_DEVICE_ID_SIIG_1S1P_10x_650 0x1011
+#define PCI_DEVICE_ID_SIIG_1S1P_10x_850 0x1012
+#define PCI_DEVICE_ID_SIIG_1P_10x 0x1020
+#define PCI_DEVICE_ID_SIIG_2P_10x 0x1021
+#define PCI_DEVICE_ID_SIIG_2S_10x_550 0x1030
+#define PCI_DEVICE_ID_SIIG_2S_10x_650 0x1031
+#define PCI_DEVICE_ID_SIIG_2S_10x_850 0x1032
+#define PCI_DEVICE_ID_SIIG_2S1P_10x_550 0x1034
+#define PCI_DEVICE_ID_SIIG_2S1P_10x_650 0x1035
+#define PCI_DEVICE_ID_SIIG_2S1P_10x_850 0x1036
+#define PCI_DEVICE_ID_SIIG_4S_10x_550 0x1050
+#define PCI_DEVICE_ID_SIIG_4S_10x_650 0x1051
+#define PCI_DEVICE_ID_SIIG_4S_10x_850 0x1052
+#define PCI_DEVICE_ID_SIIG_1S_20x_550 0x2000
+#define PCI_DEVICE_ID_SIIG_1S_20x_650 0x2001
+#define PCI_DEVICE_ID_SIIG_1S_20x_850 0x2002
+#define PCI_DEVICE_ID_SIIG_1P_20x 0x2020
+#define PCI_DEVICE_ID_SIIG_2P_20x 0x2021
+#define PCI_DEVICE_ID_SIIG_2S_20x_550 0x2030
+#define PCI_DEVICE_ID_SIIG_2S_20x_650 0x2031
+#define PCI_DEVICE_ID_SIIG_2S_20x_850 0x2032
+#define PCI_DEVICE_ID_SIIG_2P1S_20x_550 0x2040
+#define PCI_DEVICE_ID_SIIG_2P1S_20x_650 0x2041
+#define PCI_DEVICE_ID_SIIG_2P1S_20x_850 0x2042
+#define PCI_DEVICE_ID_SIIG_1S1P_20x_550 0x2010
+#define PCI_DEVICE_ID_SIIG_1S1P_20x_650 0x2011
+#define PCI_DEVICE_ID_SIIG_1S1P_20x_850 0x2012
+#define PCI_DEVICE_ID_SIIG_4S_20x_550 0x2050
+#define PCI_DEVICE_ID_SIIG_4S_20x_650 0x2051
+#define PCI_DEVICE_ID_SIIG_4S_20x_850 0x2052
+#define PCI_DEVICE_ID_SIIG_2S1P_20x_550 0x2060
+#define PCI_DEVICE_ID_SIIG_2S1P_20x_650 0x2061
+#define PCI_DEVICE_ID_SIIG_2S1P_20x_850 0x2062
+#define PCI_DEVICE_ID_SIIG_8S_20x_550 0x2080
+#define PCI_DEVICE_ID_SIIG_8S_20x_650 0x2081
+#define PCI_DEVICE_ID_SIIG_8S_20x_850 0x2082
+#define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL 0x2050
+
+#define PCI_VENDOR_ID_RADISYS 0x1331
+
+#define PCI_VENDOR_ID_MICRO_MEMORY 0x1332
+#define PCI_DEVICE_ID_MICRO_MEMORY_5415CN 0x5415
+#define PCI_DEVICE_ID_MICRO_MEMORY_5425CN 0x5425
+#define PCI_DEVICE_ID_MICRO_MEMORY_6155 0x6155
+
+#define PCI_VENDOR_ID_DOMEX 0x134a
+#define PCI_DEVICE_ID_DOMEX_DMX3191D 0x0001
+
+#define PCI_VENDOR_ID_INTASHIELD 0x135a
+#define PCI_DEVICE_ID_INTASHIELD_IS200 0x0d80
+#define PCI_DEVICE_ID_INTASHIELD_IS400 0x0dc0
+
+#define PCI_VENDOR_ID_QUATECH 0x135C
+#define PCI_DEVICE_ID_QUATECH_QSC100 0x0010
+#define PCI_DEVICE_ID_QUATECH_DSC100 0x0020
+#define PCI_DEVICE_ID_QUATECH_DSC200 0x0030
+#define PCI_DEVICE_ID_QUATECH_QSC200 0x0040
+#define PCI_DEVICE_ID_QUATECH_ESC100D 0x0050
+#define PCI_DEVICE_ID_QUATECH_ESC100M 0x0060
+#define PCI_DEVICE_ID_QUATECH_QSCP100 0x0120
+#define PCI_DEVICE_ID_QUATECH_DSCP100 0x0130
+#define PCI_DEVICE_ID_QUATECH_QSCP200 0x0140
+#define PCI_DEVICE_ID_QUATECH_DSCP200 0x0150
+#define PCI_DEVICE_ID_QUATECH_QSCLP100 0x0170
+#define PCI_DEVICE_ID_QUATECH_DSCLP100 0x0180
+#define PCI_DEVICE_ID_QUATECH_DSC100E 0x0181
+#define PCI_DEVICE_ID_QUATECH_SSCLP100 0x0190
+#define PCI_DEVICE_ID_QUATECH_QSCLP200 0x01A0
+#define PCI_DEVICE_ID_QUATECH_DSCLP200 0x01B0
+#define PCI_DEVICE_ID_QUATECH_DSC200E 0x01B1
+#define PCI_DEVICE_ID_QUATECH_SSCLP200 0x01C0
+#define PCI_DEVICE_ID_QUATECH_ESCLP100 0x01E0
+#define PCI_DEVICE_ID_QUATECH_SPPXP_100 0x0278
+
+#define PCI_VENDOR_ID_SEALEVEL 0x135e
+#define PCI_DEVICE_ID_SEALEVEL_U530 0x7101
+#define PCI_DEVICE_ID_SEALEVEL_UCOMM2 0x7201
+#define PCI_DEVICE_ID_SEALEVEL_UCOMM422 0x7402
+#define PCI_DEVICE_ID_SEALEVEL_UCOMM232 0x7202
+#define PCI_DEVICE_ID_SEALEVEL_COMM4 0x7401
+#define PCI_DEVICE_ID_SEALEVEL_COMM8 0x7801
+#define PCI_DEVICE_ID_SEALEVEL_7803 0x7803
+#define PCI_DEVICE_ID_SEALEVEL_UCOMM8 0x7804
+
+#define PCI_VENDOR_ID_HYPERCOPE 0x1365
+#define PCI_DEVICE_ID_HYPERCOPE_PLX 0x9050
+#define PCI_SUBDEVICE_ID_HYPERCOPE_OLD_ERGO 0x0104
+#define PCI_SUBDEVICE_ID_HYPERCOPE_ERGO 0x0106
+#define PCI_SUBDEVICE_ID_HYPERCOPE_METRO 0x0107
+#define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2 0x0108
+
+#define PCI_VENDOR_ID_DIGIGRAM 0x1369
+#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM 0xc001
+#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM 0xc002
+
+#define PCI_VENDOR_ID_KAWASAKI 0x136b
+#define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01
+
+#define PCI_VENDOR_ID_CNET 0x1371
+#define PCI_DEVICE_ID_CNET_GIGACARD 0x434e
+
+#define PCI_VENDOR_ID_LMC 0x1376
+#define PCI_DEVICE_ID_LMC_HSSI 0x0003
+#define PCI_DEVICE_ID_LMC_DS3 0x0004
+#define PCI_DEVICE_ID_LMC_SSI 0x0005
+#define PCI_DEVICE_ID_LMC_T1 0x0006
+
+#define PCI_VENDOR_ID_NETGEAR 0x1385
+#define PCI_DEVICE_ID_NETGEAR_GA620 0x620a
+
+#define PCI_VENDOR_ID_APPLICOM 0x1389
+#define PCI_DEVICE_ID_APPLICOM_PCIGENERIC 0x0001
+#define PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN 0x0002
+#define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003
+
+#define PCI_VENDOR_ID_MOXA 0x1393
+#define PCI_DEVICE_ID_MOXA_RC7000 0x0001
+#define PCI_DEVICE_ID_MOXA_CP102 0x1020
+#define PCI_DEVICE_ID_MOXA_CP102UL 0x1021
+#define PCI_DEVICE_ID_MOXA_CP102U 0x1022
+#define PCI_DEVICE_ID_MOXA_C104 0x1040
+#define PCI_DEVICE_ID_MOXA_CP104U 0x1041
+#define PCI_DEVICE_ID_MOXA_CP104JU 0x1042
+#define PCI_DEVICE_ID_MOXA_CP104EL 0x1043
+#define PCI_DEVICE_ID_MOXA_CT114 0x1140
+#define PCI_DEVICE_ID_MOXA_CP114 0x1141
+#define PCI_DEVICE_ID_MOXA_CP118U 0x1180
+#define PCI_DEVICE_ID_MOXA_CP118EL 0x1181
+#define PCI_DEVICE_ID_MOXA_CP132 0x1320
+#define PCI_DEVICE_ID_MOXA_CP132U 0x1321
+#define PCI_DEVICE_ID_MOXA_CP134U 0x1340
+#define PCI_DEVICE_ID_MOXA_C168 0x1680
+#define PCI_DEVICE_ID_MOXA_CP168U 0x1681
+#define PCI_DEVICE_ID_MOXA_CP168EL 0x1682
+#define PCI_DEVICE_ID_MOXA_CP204J 0x2040
+#define PCI_DEVICE_ID_MOXA_C218 0x2180
+#define PCI_DEVICE_ID_MOXA_C320 0x3200
+
+#define PCI_VENDOR_ID_CCD 0x1397
+#define PCI_DEVICE_ID_CCD_HFC4S 0x08B4
+#define PCI_SUBDEVICE_ID_CCD_PMX2S 0x1234
+#define PCI_DEVICE_ID_CCD_HFC8S 0x16B8
+#define PCI_DEVICE_ID_CCD_2BD0 0x2bd0
+#define PCI_DEVICE_ID_CCD_HFCE1 0x30B1
+#define PCI_SUBDEVICE_ID_CCD_SPD4S 0x3136
+#define PCI_SUBDEVICE_ID_CCD_SPDE1 0x3137
+#define PCI_DEVICE_ID_CCD_B000 0xb000
+#define PCI_DEVICE_ID_CCD_B006 0xb006
+#define PCI_DEVICE_ID_CCD_B007 0xb007
+#define PCI_DEVICE_ID_CCD_B008 0xb008
+#define PCI_DEVICE_ID_CCD_B009 0xb009
+#define PCI_DEVICE_ID_CCD_B00A 0xb00a
+#define PCI_DEVICE_ID_CCD_B00B 0xb00b
+#define PCI_DEVICE_ID_CCD_B00C 0xb00c
+#define PCI_DEVICE_ID_CCD_B100 0xb100
+#define PCI_SUBDEVICE_ID_CCD_IOB4ST 0xB520
+#define PCI_SUBDEVICE_ID_CCD_IOB8STR 0xB521
+#define PCI_SUBDEVICE_ID_CCD_IOB8ST 0xB522
+#define PCI_SUBDEVICE_ID_CCD_IOB1E1 0xB523
+#define PCI_SUBDEVICE_ID_CCD_SWYX4S 0xB540
+#define PCI_SUBDEVICE_ID_CCD_JH4S20 0xB550
+#define PCI_SUBDEVICE_ID_CCD_IOB8ST_1 0xB552
+#define PCI_SUBDEVICE_ID_CCD_JHSE1 0xB553
+#define PCI_SUBDEVICE_ID_CCD_JH8S 0xB55B
+#define PCI_SUBDEVICE_ID_CCD_BN4S 0xB560
+#define PCI_SUBDEVICE_ID_CCD_BN8S 0xB562
+#define PCI_SUBDEVICE_ID_CCD_BNE1 0xB563
+#define PCI_SUBDEVICE_ID_CCD_BNE1D 0xB564
+#define PCI_SUBDEVICE_ID_CCD_BNE1DP 0xB565
+#define PCI_SUBDEVICE_ID_CCD_BN2S 0xB566
+#define PCI_SUBDEVICE_ID_CCD_BN1SM 0xB567
+#define PCI_SUBDEVICE_ID_CCD_BN4SM 0xB568
+#define PCI_SUBDEVICE_ID_CCD_BN2SM 0xB569
+#define PCI_SUBDEVICE_ID_CCD_BNE1M 0xB56A
+#define PCI_SUBDEVICE_ID_CCD_BN8SP 0xB56B
+#define PCI_SUBDEVICE_ID_CCD_HFC4S 0xB620
+#define PCI_SUBDEVICE_ID_CCD_HFC8S 0xB622
+#define PCI_DEVICE_ID_CCD_B700 0xb700
+#define PCI_DEVICE_ID_CCD_B701 0xb701
+#define PCI_SUBDEVICE_ID_CCD_HFCE1 0xC523
+#define PCI_SUBDEVICE_ID_CCD_OV2S 0xE884
+#define PCI_SUBDEVICE_ID_CCD_OV4S 0xE888
+#define PCI_SUBDEVICE_ID_CCD_OV8S 0xE998
+
+#define PCI_VENDOR_ID_EXAR 0x13a8
+#define PCI_DEVICE_ID_EXAR_XR17C152 0x0152
+#define PCI_DEVICE_ID_EXAR_XR17C154 0x0154
+#define PCI_DEVICE_ID_EXAR_XR17C158 0x0158
+#define PCI_DEVICE_ID_EXAR_XR17V352 0x0352
+#define PCI_DEVICE_ID_EXAR_XR17V354 0x0354
+#define PCI_DEVICE_ID_EXAR_XR17V358 0x0358
+
+#define PCI_VENDOR_ID_MICROGATE 0x13c0
+#define PCI_DEVICE_ID_MICROGATE_USC 0x0010
+#define PCI_DEVICE_ID_MICROGATE_SCA 0x0030
+
+#define PCI_VENDOR_ID_3WARE 0x13C1
+#define PCI_DEVICE_ID_3WARE_1000 0x1000
+#define PCI_DEVICE_ID_3WARE_7000 0x1001
+#define PCI_DEVICE_ID_3WARE_9000 0x1002
+
+#define PCI_VENDOR_ID_IOMEGA 0x13ca
+#define PCI_DEVICE_ID_IOMEGA_BUZ 0x4231
+
+#define PCI_VENDOR_ID_ABOCOM 0x13D1
+#define PCI_DEVICE_ID_ABOCOM_2BD1 0x2BD1
+
+#define PCI_VENDOR_ID_SUNDANCE 0x13f0
+
+#define PCI_VENDOR_ID_CMEDIA 0x13f6
+#define PCI_DEVICE_ID_CMEDIA_CM8338A 0x0100
+#define PCI_DEVICE_ID_CMEDIA_CM8338B 0x0101
+#define PCI_DEVICE_ID_CMEDIA_CM8738 0x0111
+#define PCI_DEVICE_ID_CMEDIA_CM8738B 0x0112
+
+#define PCI_VENDOR_ID_ADVANTECH 0x13fe
+
+#define PCI_VENDOR_ID_MEILHAUS 0x1402
+
+#define PCI_VENDOR_ID_LAVA 0x1407
+#define PCI_DEVICE_ID_LAVA_DSERIAL 0x0100 /* 2x 16550 */
+#define PCI_DEVICE_ID_LAVA_QUATRO_A 0x0101 /* 2x 16550, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_QUATRO_B 0x0102 /* 2x 16550, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_QUATTRO_A 0x0120 /* 2x 16550A, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_QUATTRO_B 0x0121 /* 2x 16550A, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_OCTO_A 0x0180 /* 4x 16550A, half of 8 port */
+#define PCI_DEVICE_ID_LAVA_OCTO_B 0x0181 /* 4x 16550A, half of 8 port */
+#define PCI_DEVICE_ID_LAVA_PORT_PLUS 0x0200 /* 2x 16650 */
+#define PCI_DEVICE_ID_LAVA_QUAD_A 0x0201 /* 2x 16650, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_QUAD_B 0x0202 /* 2x 16650, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_SSERIAL 0x0500 /* 1x 16550 */
+#define PCI_DEVICE_ID_LAVA_PORT_650 0x0600 /* 1x 16650 */
+#define PCI_DEVICE_ID_LAVA_PARALLEL 0x8000
+#define PCI_DEVICE_ID_LAVA_DUAL_PAR_A 0x8002 /* The Lava Dual Parallel is */
+#define PCI_DEVICE_ID_LAVA_DUAL_PAR_B 0x8003 /* two PCI devices on a card */
+#define PCI_DEVICE_ID_LAVA_BOCA_IOPPAR 0x8800
+
+#define PCI_VENDOR_ID_TIMEDIA 0x1409
+#define PCI_DEVICE_ID_TIMEDIA_1889 0x7168
+
+#define PCI_VENDOR_ID_ICE 0x1412
+#define PCI_DEVICE_ID_ICE_1712 0x1712
+#define PCI_DEVICE_ID_VT1724 0x1724
+
+#define PCI_VENDOR_ID_OXSEMI 0x1415
+#define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403
+#define PCI_DEVICE_ID_OXSEMI_PCIe840 0xC000
+#define PCI_DEVICE_ID_OXSEMI_PCIe840_G 0xC004
+#define PCI_DEVICE_ID_OXSEMI_PCIe952_0 0xC100
+#define PCI_DEVICE_ID_OXSEMI_PCIe952_0_G 0xC104
+#define PCI_DEVICE_ID_OXSEMI_PCIe952_1 0xC110
+#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_G 0xC114
+#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118
+#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C
+#define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501
+#define PCI_DEVICE_ID_OXSEMI_C950 0x950B
+#define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511
+#define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513
+#define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521
+#define PCI_DEVICE_ID_OXSEMI_16PCI952PP 0x9523
+#define PCI_SUBDEVICE_ID_OXSEMI_C950 0x0001
+
+#define PCI_VENDOR_ID_CHELSIO 0x1425
+
+#define PCI_VENDOR_ID_ADLINK 0x144a
+
+#define PCI_VENDOR_ID_SAMSUNG 0x144d
+
+#define PCI_VENDOR_ID_GIGABYTE 0x1458
+
+#define PCI_VENDOR_ID_AMBIT 0x1468
+
+#define PCI_VENDOR_ID_MYRICOM 0x14c1
+
+#define PCI_VENDOR_ID_TITAN 0x14D2
+#define PCI_DEVICE_ID_TITAN_010L 0x8001
+#define PCI_DEVICE_ID_TITAN_100L 0x8010
+#define PCI_DEVICE_ID_TITAN_110L 0x8011
+#define PCI_DEVICE_ID_TITAN_200L 0x8020
+#define PCI_DEVICE_ID_TITAN_210L 0x8021
+#define PCI_DEVICE_ID_TITAN_400L 0x8040
+#define PCI_DEVICE_ID_TITAN_800L 0x8080
+#define PCI_DEVICE_ID_TITAN_100 0xA001
+#define PCI_DEVICE_ID_TITAN_200 0xA005
+#define PCI_DEVICE_ID_TITAN_400 0xA003
+#define PCI_DEVICE_ID_TITAN_800B 0xA004
+
+#define PCI_VENDOR_ID_PANACOM 0x14d4
+#define PCI_DEVICE_ID_PANACOM_QUADMODEM 0x0400
+#define PCI_DEVICE_ID_PANACOM_DUALMODEM 0x0402
+
+#define PCI_VENDOR_ID_SIPACKETS 0x14d9
+#define PCI_DEVICE_ID_SP1011 0x0010
+
+#define PCI_VENDOR_ID_AFAVLAB 0x14db
+#define PCI_DEVICE_ID_AFAVLAB_P028 0x2180
+#define PCI_DEVICE_ID_AFAVLAB_P030 0x2182
+#define PCI_SUBDEVICE_ID_AFAVLAB_P061 0x2150
+
+#define PCI_VENDOR_ID_AMPLICON 0x14dc
+
+#define PCI_VENDOR_ID_BCM_GVC 0x14a4
+#define PCI_VENDOR_ID_BROADCOM 0x14e4
+#define PCI_DEVICE_ID_TIGON3_5752 0x1600
+#define PCI_DEVICE_ID_TIGON3_5752M 0x1601
+#define PCI_DEVICE_ID_NX2_5709 0x1639
+#define PCI_DEVICE_ID_NX2_5709S 0x163a
+#define PCI_DEVICE_ID_TIGON3_5700 0x1644
+#define PCI_DEVICE_ID_TIGON3_5701 0x1645
+#define PCI_DEVICE_ID_TIGON3_5702 0x1646
+#define PCI_DEVICE_ID_TIGON3_5703 0x1647
+#define PCI_DEVICE_ID_TIGON3_5704 0x1648
+#define PCI_DEVICE_ID_TIGON3_5704S_2 0x1649
+#define PCI_DEVICE_ID_NX2_5706 0x164a
+#define PCI_DEVICE_ID_NX2_5708 0x164c
+#define PCI_DEVICE_ID_TIGON3_5702FE 0x164d
+#define PCI_DEVICE_ID_NX2_57710 0x164e
+#define PCI_DEVICE_ID_NX2_57711 0x164f
+#define PCI_DEVICE_ID_NX2_57711E 0x1650
+#define PCI_DEVICE_ID_TIGON3_5705 0x1653
+#define PCI_DEVICE_ID_TIGON3_5705_2 0x1654
+#define PCI_DEVICE_ID_TIGON3_5719 0x1657
+#define PCI_DEVICE_ID_TIGON3_5721 0x1659
+#define PCI_DEVICE_ID_TIGON3_5722 0x165a
+#define PCI_DEVICE_ID_TIGON3_5723 0x165b
+#define PCI_DEVICE_ID_TIGON3_5705M 0x165d
+#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e
+#define PCI_DEVICE_ID_NX2_57712 0x1662
+#define PCI_DEVICE_ID_NX2_57712E 0x1663
+#define PCI_DEVICE_ID_NX2_57712_MF 0x1663
+#define PCI_DEVICE_ID_TIGON3_5714 0x1668
+#define PCI_DEVICE_ID_TIGON3_5714S 0x1669
+#define PCI_DEVICE_ID_TIGON3_5780 0x166a
+#define PCI_DEVICE_ID_TIGON3_5780S 0x166b
+#define PCI_DEVICE_ID_TIGON3_5705F 0x166e
+#define PCI_DEVICE_ID_NX2_57712_VF 0x166f
+#define PCI_DEVICE_ID_TIGON3_5754M 0x1672
+#define PCI_DEVICE_ID_TIGON3_5755M 0x1673
+#define PCI_DEVICE_ID_TIGON3_5756 0x1674
+#define PCI_DEVICE_ID_TIGON3_5750 0x1676
+#define PCI_DEVICE_ID_TIGON3_5751 0x1677
+#define PCI_DEVICE_ID_TIGON3_5715 0x1678
+#define PCI_DEVICE_ID_TIGON3_5715S 0x1679
+#define PCI_DEVICE_ID_TIGON3_5754 0x167a
+#define PCI_DEVICE_ID_TIGON3_5755 0x167b
+#define PCI_DEVICE_ID_TIGON3_5751M 0x167d
+#define PCI_DEVICE_ID_TIGON3_5751F 0x167e
+#define PCI_DEVICE_ID_TIGON3_5787F 0x167f
+#define PCI_DEVICE_ID_TIGON3_5761E 0x1680
+#define PCI_DEVICE_ID_TIGON3_5761 0x1681
+#define PCI_DEVICE_ID_TIGON3_5764 0x1684
+#define PCI_DEVICE_ID_NX2_57800 0x168a
+#define PCI_DEVICE_ID_NX2_57840 0x168d
+#define PCI_DEVICE_ID_NX2_57810 0x168e
+#define PCI_DEVICE_ID_TIGON3_5787M 0x1693
+#define PCI_DEVICE_ID_TIGON3_5782 0x1696
+#define PCI_DEVICE_ID_TIGON3_5784 0x1698
+#define PCI_DEVICE_ID_TIGON3_5786 0x169a
+#define PCI_DEVICE_ID_TIGON3_5787 0x169b
+#define PCI_DEVICE_ID_TIGON3_5788 0x169c
+#define PCI_DEVICE_ID_TIGON3_5789 0x169d
+#define PCI_DEVICE_ID_NX2_57840_4_10 0x16a1
+#define PCI_DEVICE_ID_NX2_57840_2_20 0x16a2
+#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4
+#define PCI_DEVICE_ID_NX2_57800_MF 0x16a5
+#define PCI_DEVICE_ID_TIGON3_5702X 0x16a6
+#define PCI_DEVICE_ID_TIGON3_5703X 0x16a7
+#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8
+#define PCI_DEVICE_ID_NX2_57800_VF 0x16a9
+#define PCI_DEVICE_ID_NX2_5706S 0x16aa
+#define PCI_DEVICE_ID_NX2_5708S 0x16ac
+#define PCI_DEVICE_ID_NX2_57840_VF 0x16ad
+#define PCI_DEVICE_ID_NX2_57810_MF 0x16ae
+#define PCI_DEVICE_ID_NX2_57810_VF 0x16af
+#define PCI_DEVICE_ID_TIGON3_5702A3 0x16c6
+#define PCI_DEVICE_ID_TIGON3_5703A3 0x16c7
+#define PCI_DEVICE_ID_TIGON3_5781 0x16dd
+#define PCI_DEVICE_ID_TIGON3_5753 0x16f7
+#define PCI_DEVICE_ID_TIGON3_5753M 0x16fd
+#define PCI_DEVICE_ID_TIGON3_5753F 0x16fe
+#define PCI_DEVICE_ID_TIGON3_5901 0x170d
+#define PCI_DEVICE_ID_BCM4401B1 0x170c
+#define PCI_DEVICE_ID_TIGON3_5901_2 0x170e
+#define PCI_DEVICE_ID_TIGON3_5906 0x1712
+#define PCI_DEVICE_ID_TIGON3_5906M 0x1713
+#define PCI_DEVICE_ID_BCM4401 0x4401
+#define PCI_DEVICE_ID_BCM4401B0 0x4402
+
+#define PCI_VENDOR_ID_TOPIC 0x151f
+#define PCI_DEVICE_ID_TOPIC_TP560 0x0000
+
+#define PCI_VENDOR_ID_MAINPINE 0x1522
+#define PCI_DEVICE_ID_MAINPINE_PBRIDGE 0x0100
+#define PCI_VENDOR_ID_ENE 0x1524
+#define PCI_DEVICE_ID_ENE_CB710_FLASH 0x0510
+#define PCI_DEVICE_ID_ENE_CB712_SD 0x0550
+#define PCI_DEVICE_ID_ENE_CB712_SD_2 0x0551
+#define PCI_DEVICE_ID_ENE_CB714_SD 0x0750
+#define PCI_DEVICE_ID_ENE_CB714_SD_2 0x0751
+#define PCI_DEVICE_ID_ENE_1211 0x1211
+#define PCI_DEVICE_ID_ENE_1225 0x1225
+#define PCI_DEVICE_ID_ENE_1410 0x1410
+#define PCI_DEVICE_ID_ENE_710 0x1411
+#define PCI_DEVICE_ID_ENE_712 0x1412
+#define PCI_DEVICE_ID_ENE_1420 0x1420
+#define PCI_DEVICE_ID_ENE_720 0x1421
+#define PCI_DEVICE_ID_ENE_722 0x1422
+
+#define PCI_SUBVENDOR_ID_PERLE 0x155f
+#define PCI_SUBDEVICE_ID_PCI_RAS4 0xf001
+#define PCI_SUBDEVICE_ID_PCI_RAS8 0xf010
+
+#define PCI_VENDOR_ID_SYBA 0x1592
+#define PCI_DEVICE_ID_SYBA_2P_EPP 0x0782
+#define PCI_DEVICE_ID_SYBA_1P_ECP 0x0783
+
+#define PCI_VENDOR_ID_MORETON 0x15aa
+#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000
+
+#define PCI_VENDOR_ID_VMWARE 0x15ad
+
+#define PCI_VENDOR_ID_ZOLTRIX 0x15b0
+#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0
+
+#define PCI_VENDOR_ID_MELLANOX 0x15b3
+#define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44
+#define PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE 0x5a46
+#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278
+#define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282
+#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c
+#define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274
+
+#define PCI_VENDOR_ID_DFI 0x15bd
+
+#define PCI_VENDOR_ID_QUICKNET 0x15e2
+#define PCI_DEVICE_ID_QUICKNET_XJ 0x0500
+
+/*
+ * ADDI-DATA GmbH communication cards <info@addi-data.com>
+ */
+#define PCI_VENDOR_ID_ADDIDATA 0x15B8
+#define PCI_DEVICE_ID_ADDIDATA_APCI7500 0x7000
+#define PCI_DEVICE_ID_ADDIDATA_APCI7420 0x7001
+#define PCI_DEVICE_ID_ADDIDATA_APCI7300 0x7002
+#define PCI_DEVICE_ID_ADDIDATA_APCI7500_2 0x7009
+#define PCI_DEVICE_ID_ADDIDATA_APCI7420_2 0x700A
+#define PCI_DEVICE_ID_ADDIDATA_APCI7300_2 0x700B
+#define PCI_DEVICE_ID_ADDIDATA_APCI7500_3 0x700C
+#define PCI_DEVICE_ID_ADDIDATA_APCI7420_3 0x700D
+#define PCI_DEVICE_ID_ADDIDATA_APCI7300_3 0x700E
+#define PCI_DEVICE_ID_ADDIDATA_APCI7800_3 0x700F
+#define PCI_DEVICE_ID_ADDIDATA_APCIe7300 0x7010
+#define PCI_DEVICE_ID_ADDIDATA_APCIe7420 0x7011
+#define PCI_DEVICE_ID_ADDIDATA_APCIe7500 0x7012
+#define PCI_DEVICE_ID_ADDIDATA_APCIe7800 0x7013
+
+#define PCI_VENDOR_ID_PDC 0x15e9
+
+#define PCI_VENDOR_ID_FARSITE 0x1619
+#define PCI_DEVICE_ID_FARSITE_T2P 0x0400
+#define PCI_DEVICE_ID_FARSITE_T4P 0x0440
+#define PCI_DEVICE_ID_FARSITE_T1U 0x0610
+#define PCI_DEVICE_ID_FARSITE_T2U 0x0620
+#define PCI_DEVICE_ID_FARSITE_T4U 0x0640
+#define PCI_DEVICE_ID_FARSITE_TE1 0x1610
+#define PCI_DEVICE_ID_FARSITE_TE1C 0x1612
+
+#define PCI_VENDOR_ID_ARIMA 0x161f
+
+#define PCI_VENDOR_ID_BROCADE 0x1657
+#define PCI_DEVICE_ID_BROCADE_CT 0x0014
+#define PCI_DEVICE_ID_BROCADE_FC_8G1P 0x0017
+#define PCI_DEVICE_ID_BROCADE_CT_FC 0x0021
+
+#define PCI_VENDOR_ID_SIBYTE 0x166d
+#define PCI_DEVICE_ID_BCM1250_PCI 0x0001
+#define PCI_DEVICE_ID_BCM1250_HT 0x0002
+
+#define PCI_VENDOR_ID_ATHEROS 0x168c
+
+#define PCI_VENDOR_ID_NETCELL 0x169c
+#define PCI_DEVICE_ID_REVOLUTION 0x0044
+
+#define PCI_VENDOR_ID_CENATEK 0x16CA
+#define PCI_DEVICE_ID_CENATEK_IDE 0x0001
+
+#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
+
+#define PCI_VENDOR_ID_VITESSE 0x1725
+#define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174
+
+#define PCI_VENDOR_ID_LINKSYS 0x1737
+#define PCI_DEVICE_ID_LINKSYS_EG1064 0x1064
+
+#define PCI_VENDOR_ID_ALTIMA 0x173b
+#define PCI_DEVICE_ID_ALTIMA_AC1000 0x03e8
+#define PCI_DEVICE_ID_ALTIMA_AC1001 0x03e9
+#define PCI_DEVICE_ID_ALTIMA_AC9100 0x03ea
+#define PCI_DEVICE_ID_ALTIMA_AC1003 0x03eb
+
+#define PCI_VENDOR_ID_BELKIN 0x1799
+#define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f
+
+#define PCI_VENDOR_ID_RDC 0x17f3
+#define PCI_DEVICE_ID_RDC_R6020 0x6020
+#define PCI_DEVICE_ID_RDC_R6030 0x6030
+#define PCI_DEVICE_ID_RDC_R6040 0x6040
+#define PCI_DEVICE_ID_RDC_R6060 0x6060
+#define PCI_DEVICE_ID_RDC_R6061 0x6061
+#define PCI_DEVICE_ID_RDC_D1010 0x1010
+
+#define PCI_VENDOR_ID_LENOVO 0x17aa
+
+#define PCI_VENDOR_ID_ARECA 0x17d3
+#define PCI_DEVICE_ID_ARECA_1110 0x1110
+#define PCI_DEVICE_ID_ARECA_1120 0x1120
+#define PCI_DEVICE_ID_ARECA_1130 0x1130
+#define PCI_DEVICE_ID_ARECA_1160 0x1160
+#define PCI_DEVICE_ID_ARECA_1170 0x1170
+#define PCI_DEVICE_ID_ARECA_1200 0x1200
+#define PCI_DEVICE_ID_ARECA_1201 0x1201
+#define PCI_DEVICE_ID_ARECA_1202 0x1202
+#define PCI_DEVICE_ID_ARECA_1210 0x1210
+#define PCI_DEVICE_ID_ARECA_1220 0x1220
+#define PCI_DEVICE_ID_ARECA_1230 0x1230
+#define PCI_DEVICE_ID_ARECA_1260 0x1260
+#define PCI_DEVICE_ID_ARECA_1270 0x1270
+#define PCI_DEVICE_ID_ARECA_1280 0x1280
+#define PCI_DEVICE_ID_ARECA_1380 0x1380
+#define PCI_DEVICE_ID_ARECA_1381 0x1381
+#define PCI_DEVICE_ID_ARECA_1680 0x1680
+#define PCI_DEVICE_ID_ARECA_1681 0x1681
+
+#define PCI_VENDOR_ID_S2IO 0x17d5
+#define PCI_DEVICE_ID_S2IO_WIN 0x5731
+#define PCI_DEVICE_ID_S2IO_UNI 0x5831
+#define PCI_DEVICE_ID_HERC_WIN 0x5732
+#define PCI_DEVICE_ID_HERC_UNI 0x5832
+
+#define PCI_VENDOR_ID_SITECOM 0x182d
+#define PCI_DEVICE_ID_SITECOM_DC105V2 0x3069
+
+#define PCI_VENDOR_ID_TOPSPIN 0x1867
+
+#define PCI_VENDOR_ID_COMMTECH 0x18f7
+
+#define PCI_VENDOR_ID_SILAN 0x1904
+
+#define PCI_VENDOR_ID_RENESAS 0x1912
+#define PCI_DEVICE_ID_RENESAS_SH7781 0x0001
+#define PCI_DEVICE_ID_RENESAS_SH7780 0x0002
+#define PCI_DEVICE_ID_RENESAS_SH7763 0x0004
+#define PCI_DEVICE_ID_RENESAS_SH7785 0x0007
+#define PCI_DEVICE_ID_RENESAS_SH7786 0x0010
+
+#define PCI_VENDOR_ID_SOLARFLARE 0x1924
+#define PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0 0x0703
+#define PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1 0x6703
+#define PCI_DEVICE_ID_SOLARFLARE_SFC4000B 0x0710
+
+#define PCI_VENDOR_ID_TDI 0x192E
+#define PCI_DEVICE_ID_TDI_EHCI 0x0101
+
+#define PCI_VENDOR_ID_FREESCALE 0x1957
+#define PCI_DEVICE_ID_MPC8308 0xc006
+#define PCI_DEVICE_ID_MPC8315E 0x00b4
+#define PCI_DEVICE_ID_MPC8315 0x00b5
+#define PCI_DEVICE_ID_MPC8314E 0x00b6
+#define PCI_DEVICE_ID_MPC8314 0x00b7
+#define PCI_DEVICE_ID_MPC8378E 0x00c4
+#define PCI_DEVICE_ID_MPC8378 0x00c5
+#define PCI_DEVICE_ID_MPC8377E 0x00c6
+#define PCI_DEVICE_ID_MPC8377 0x00c7
+#define PCI_DEVICE_ID_MPC8548E 0x0012
+#define PCI_DEVICE_ID_MPC8548 0x0013
+#define PCI_DEVICE_ID_MPC8543E 0x0014
+#define PCI_DEVICE_ID_MPC8543 0x0015
+#define PCI_DEVICE_ID_MPC8547E 0x0018
+#define PCI_DEVICE_ID_MPC8545E 0x0019
+#define PCI_DEVICE_ID_MPC8545 0x001a
+#define PCI_DEVICE_ID_MPC8569E 0x0061
+#define PCI_DEVICE_ID_MPC8569 0x0060
+#define PCI_DEVICE_ID_MPC8568E 0x0020
+#define PCI_DEVICE_ID_MPC8568 0x0021
+#define PCI_DEVICE_ID_MPC8567E 0x0022
+#define PCI_DEVICE_ID_MPC8567 0x0023
+#define PCI_DEVICE_ID_MPC8533E 0x0030
+#define PCI_DEVICE_ID_MPC8533 0x0031
+#define PCI_DEVICE_ID_MPC8544E 0x0032
+#define PCI_DEVICE_ID_MPC8544 0x0033
+#define PCI_DEVICE_ID_MPC8572E 0x0040
+#define PCI_DEVICE_ID_MPC8572 0x0041
+#define PCI_DEVICE_ID_MPC8536E 0x0050
+#define PCI_DEVICE_ID_MPC8536 0x0051
+#define PCI_DEVICE_ID_P2020E 0x0070
+#define PCI_DEVICE_ID_P2020 0x0071
+#define PCI_DEVICE_ID_P2010E 0x0078
+#define PCI_DEVICE_ID_P2010 0x0079
+#define PCI_DEVICE_ID_P1020E 0x0100
+#define PCI_DEVICE_ID_P1020 0x0101
+#define PCI_DEVICE_ID_P1021E 0x0102
+#define PCI_DEVICE_ID_P1021 0x0103
+#define PCI_DEVICE_ID_P1011E 0x0108
+#define PCI_DEVICE_ID_P1011 0x0109
+#define PCI_DEVICE_ID_P1022E 0x0110
+#define PCI_DEVICE_ID_P1022 0x0111
+#define PCI_DEVICE_ID_P1013E 0x0118
+#define PCI_DEVICE_ID_P1013 0x0119
+#define PCI_DEVICE_ID_P4080E 0x0400
+#define PCI_DEVICE_ID_P4080 0x0401
+#define PCI_DEVICE_ID_P4040E 0x0408
+#define PCI_DEVICE_ID_P4040 0x0409
+#define PCI_DEVICE_ID_P2040E 0x0410
+#define PCI_DEVICE_ID_P2040 0x0411
+#define PCI_DEVICE_ID_P3041E 0x041E
+#define PCI_DEVICE_ID_P3041 0x041F
+#define PCI_DEVICE_ID_P5020E 0x0420
+#define PCI_DEVICE_ID_P5020 0x0421
+#define PCI_DEVICE_ID_P5010E 0x0428
+#define PCI_DEVICE_ID_P5010 0x0429
+#define PCI_DEVICE_ID_MPC8641 0x7010
+#define PCI_DEVICE_ID_MPC8641D 0x7011
+#define PCI_DEVICE_ID_MPC8610 0x7018
+
+#define PCI_VENDOR_ID_PASEMI 0x1959
+
+#define PCI_VENDOR_ID_ATTANSIC 0x1969
+#define PCI_DEVICE_ID_ATTANSIC_L1 0x1048
+#define PCI_DEVICE_ID_ATTANSIC_L2 0x2048
+
+#define PCI_VENDOR_ID_JMICRON 0x197B
+#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360
+#define PCI_DEVICE_ID_JMICRON_JMB361 0x2361
+#define PCI_DEVICE_ID_JMICRON_JMB362 0x2362
+#define PCI_DEVICE_ID_JMICRON_JMB363 0x2363
+#define PCI_DEVICE_ID_JMICRON_JMB364 0x2364
+#define PCI_DEVICE_ID_JMICRON_JMB365 0x2365
+#define PCI_DEVICE_ID_JMICRON_JMB366 0x2366
+#define PCI_DEVICE_ID_JMICRON_JMB368 0x2368
+#define PCI_DEVICE_ID_JMICRON_JMB369 0x2369
+#define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381
+#define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382
+#define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383
+#define PCI_DEVICE_ID_JMICRON_JMB385_MS 0x2388
+#define PCI_DEVICE_ID_JMICRON_JMB388_SD 0x2391
+#define PCI_DEVICE_ID_JMICRON_JMB388_ESD 0x2392
+#define PCI_DEVICE_ID_JMICRON_JMB390_MS 0x2393
+
+#define PCI_VENDOR_ID_KORENIX 0x1982
+#define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600
+#define PCI_DEVICE_ID_KORENIX_JETCARDF1 0x16ff
+#define PCI_DEVICE_ID_KORENIX_JETCARDF2 0x1700
+#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff
+
+#define PCI_VENDOR_ID_QMI 0x1a32
+
+#define PCI_VENDOR_ID_AZWAVE 0x1a3b
+
+#define PCI_VENDOR_ID_ASMEDIA 0x1b21
+
+#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
+#define PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD 0x0001
+
+#define PCI_VENDOR_ID_TEKRAM 0x1de1
+#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
+
+#define PCI_VENDOR_ID_TEHUTI 0x1fc9
+#define PCI_DEVICE_ID_TEHUTI_3009 0x3009
+#define PCI_DEVICE_ID_TEHUTI_3010 0x3010
+#define PCI_DEVICE_ID_TEHUTI_3014 0x3014
+
+#define PCI_VENDOR_ID_HINT 0x3388
+#define PCI_DEVICE_ID_HINT_VXPROII_IDE 0x8013
+
+#define PCI_VENDOR_ID_3DLABS 0x3d3d
+#define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007
+#define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009
+
+#define PCI_VENDOR_ID_NETXEN 0x4040
+#define PCI_DEVICE_ID_NX2031_10GXSR 0x0001
+#define PCI_DEVICE_ID_NX2031_10GCX4 0x0002
+#define PCI_DEVICE_ID_NX2031_4GCU 0x0003
+#define PCI_DEVICE_ID_NX2031_IMEZ 0x0004
+#define PCI_DEVICE_ID_NX2031_HMEZ 0x0005
+#define PCI_DEVICE_ID_NX2031_XG_MGMT 0x0024
+#define PCI_DEVICE_ID_NX2031_XG_MGMT2 0x0025
+#define PCI_DEVICE_ID_NX3031 0x0100
+
+#define PCI_VENDOR_ID_AKS 0x416c
+#define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100
+
+#define PCI_VENDOR_ID_ACCESSIO 0x494f
+#define PCI_DEVICE_ID_ACCESSIO_WDG_CSM 0x22c0
+
+#define PCI_VENDOR_ID_S3 0x5333
+#define PCI_DEVICE_ID_S3_TRIO 0x8811
+#define PCI_DEVICE_ID_S3_868 0x8880
+#define PCI_DEVICE_ID_S3_968 0x88f0
+#define PCI_DEVICE_ID_S3_SAVAGE4 0x8a25
+#define PCI_DEVICE_ID_S3_PROSAVAGE8 0x8d04
+#define PCI_DEVICE_ID_S3_SONICVIBES 0xca00
+
+#define PCI_VENDOR_ID_DUNORD 0x5544
+#define PCI_DEVICE_ID_DUNORD_I3000 0x0001
+
+#define PCI_VENDOR_ID_DCI 0x6666
+#define PCI_DEVICE_ID_DCI_PCCOM4 0x0001
+#define PCI_DEVICE_ID_DCI_PCCOM8 0x0002
+#define PCI_DEVICE_ID_DCI_PCCOM2 0x0004
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
+#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
+#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
+#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
+#define PCI_DEVICE_ID_INTEL_PXH_1 0x032A
+#define PCI_DEVICE_ID_INTEL_PXHV 0x032C
+#define PCI_DEVICE_ID_INTEL_80332_0 0x0330
+#define PCI_DEVICE_ID_INTEL_80332_1 0x0332
+#define PCI_DEVICE_ID_INTEL_80333_0 0x0370
+#define PCI_DEVICE_ID_INTEL_80333_1 0x0372
+#define PCI_DEVICE_ID_INTEL_82375 0x0482
+#define PCI_DEVICE_ID_INTEL_82424 0x0483
+#define PCI_DEVICE_ID_INTEL_82378 0x0484
+#define PCI_DEVICE_ID_INTEL_MRST_SD0 0x0807
+#define PCI_DEVICE_ID_INTEL_MRST_SD1 0x0808
+#define PCI_DEVICE_ID_INTEL_MFD_SD 0x0820
+#define PCI_DEVICE_ID_INTEL_MFD_SDIO1 0x0821
+#define PCI_DEVICE_ID_INTEL_MFD_SDIO2 0x0822
+#define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823
+#define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824
+#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095E
+#define PCI_DEVICE_ID_INTEL_I960 0x0960
+#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
+#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
+#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062
+#define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085
+#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108F
+#define PCI_DEVICE_ID_INTEL_82815_MC 0x1130
+#define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132
+#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
+#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
+#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
+#define PCI_DEVICE_ID_INTEL_82437 0x122d
+#define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e
+#define PCI_DEVICE_ID_INTEL_82371FB_1 0x1230
+#define PCI_DEVICE_ID_INTEL_82371MX 0x1234
+#define PCI_DEVICE_ID_INTEL_82441 0x1237
+#define PCI_DEVICE_ID_INTEL_82380FB 0x124b
+#define PCI_DEVICE_ID_INTEL_82439 0x1250
+#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960
+#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
+#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
+#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
+#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN 0x1c41
+#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f
+#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0 0x1d40
+#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1 0x1d41
+#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31
+#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40
+#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f
+#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310
+#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f
+#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
+#define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411
+#define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413
+#define PCI_DEVICE_ID_INTEL_82801AA_5 0x2415
+#define PCI_DEVICE_ID_INTEL_82801AA_6 0x2416
+#define PCI_DEVICE_ID_INTEL_82801AA_8 0x2418
+#define PCI_DEVICE_ID_INTEL_82801AB_0 0x2420
+#define PCI_DEVICE_ID_INTEL_82801AB_1 0x2421
+#define PCI_DEVICE_ID_INTEL_82801AB_3 0x2423
+#define PCI_DEVICE_ID_INTEL_82801AB_5 0x2425
+#define PCI_DEVICE_ID_INTEL_82801AB_6 0x2426
+#define PCI_DEVICE_ID_INTEL_82801AB_8 0x2428
+#define PCI_DEVICE_ID_INTEL_82801BA_0 0x2440
+#define PCI_DEVICE_ID_INTEL_82801BA_2 0x2443
+#define PCI_DEVICE_ID_INTEL_82801BA_4 0x2445
+#define PCI_DEVICE_ID_INTEL_82801BA_6 0x2448
+#define PCI_DEVICE_ID_INTEL_82801BA_8 0x244a
+#define PCI_DEVICE_ID_INTEL_82801BA_9 0x244b
+#define PCI_DEVICE_ID_INTEL_82801BA_10 0x244c
+#define PCI_DEVICE_ID_INTEL_82801BA_11 0x244e
+#define PCI_DEVICE_ID_INTEL_82801E_0 0x2450
+#define PCI_DEVICE_ID_INTEL_82801E_11 0x245b
+#define PCI_DEVICE_ID_INTEL_82801CA_0 0x2480
+#define PCI_DEVICE_ID_INTEL_82801CA_3 0x2483
+#define PCI_DEVICE_ID_INTEL_82801CA_5 0x2485
+#define PCI_DEVICE_ID_INTEL_82801CA_6 0x2486
+#define PCI_DEVICE_ID_INTEL_82801CA_10 0x248a
+#define PCI_DEVICE_ID_INTEL_82801CA_11 0x248b
+#define PCI_DEVICE_ID_INTEL_82801CA_12 0x248c
+#define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0
+#define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1
+#define PCI_DEVICE_ID_INTEL_82801DB_2 0x24c2
+#define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3
+#define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5
+#define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6
+#define PCI_DEVICE_ID_INTEL_82801DB_9 0x24c9
+#define PCI_DEVICE_ID_INTEL_82801DB_10 0x24ca
+#define PCI_DEVICE_ID_INTEL_82801DB_11 0x24cb
+#define PCI_DEVICE_ID_INTEL_82801DB_12 0x24cc
+#define PCI_DEVICE_ID_INTEL_82801EB_0 0x24d0
+#define PCI_DEVICE_ID_INTEL_82801EB_1 0x24d1
+#define PCI_DEVICE_ID_INTEL_82801EB_3 0x24d3
+#define PCI_DEVICE_ID_INTEL_82801EB_5 0x24d5
+#define PCI_DEVICE_ID_INTEL_82801EB_6 0x24d6
+#define PCI_DEVICE_ID_INTEL_82801EB_11 0x24db
+#define PCI_DEVICE_ID_INTEL_82801EB_12 0x24dc
+#define PCI_DEVICE_ID_INTEL_82801EB_13 0x24dd
+#define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1
+#define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2
+#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4
+#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6
+#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
+#define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac
+#define PCI_DEVICE_ID_INTEL_82820_HB 0x2500
+#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501
+#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530
+#define PCI_DEVICE_ID_INTEL_82860_HB 0x2531
+#define PCI_DEVICE_ID_INTEL_E7501_MCH 0x254c
+#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560
+#define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562
+#define PCI_DEVICE_ID_INTEL_82865_HB 0x2570
+#define PCI_DEVICE_ID_INTEL_82865_IG 0x2572
+#define PCI_DEVICE_ID_INTEL_82875_HB 0x2578
+#define PCI_DEVICE_ID_INTEL_82915G_HB 0x2580
+#define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582
+#define PCI_DEVICE_ID_INTEL_82915GM_HB 0x2590
+#define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592
+#define PCI_DEVICE_ID_INTEL_5000_ERR 0x25F0
+#define PCI_DEVICE_ID_INTEL_5000_FBD0 0x25F5
+#define PCI_DEVICE_ID_INTEL_5000_FBD1 0x25F6
+#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770
+#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772
+#define PCI_DEVICE_ID_INTEL_3000_HB 0x2778
+#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0
+#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2
+#define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640
+#define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641
+#define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642
+#define PCI_DEVICE_ID_INTEL_ICH6_16 0x266a
+#define PCI_DEVICE_ID_INTEL_ICH6_17 0x266d
+#define PCI_DEVICE_ID_INTEL_ICH6_18 0x266e
+#define PCI_DEVICE_ID_INTEL_ICH6_19 0x266f
+#define PCI_DEVICE_ID_INTEL_ESB2_0 0x2670
+#define PCI_DEVICE_ID_INTEL_ESB2_14 0x2698
+#define PCI_DEVICE_ID_INTEL_ESB2_17 0x269b
+#define PCI_DEVICE_ID_INTEL_ESB2_18 0x269e
+#define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8
+#define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9
+#define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0
+#define PCI_DEVICE_ID_INTEL_TGP_LPC 0x27bc
+#define PCI_DEVICE_ID_INTEL_ICH7_31 0x27bd
+#define PCI_DEVICE_ID_INTEL_ICH7_17 0x27da
+#define PCI_DEVICE_ID_INTEL_ICH7_19 0x27dd
+#define PCI_DEVICE_ID_INTEL_ICH7_20 0x27de
+#define PCI_DEVICE_ID_INTEL_ICH7_21 0x27df
+#define PCI_DEVICE_ID_INTEL_ICH8_0 0x2810
+#define PCI_DEVICE_ID_INTEL_ICH8_1 0x2811
+#define PCI_DEVICE_ID_INTEL_ICH8_2 0x2812
+#define PCI_DEVICE_ID_INTEL_ICH8_3 0x2814
+#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
+#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e
+#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850
+#define PCI_DEVICE_ID_INTEL_ICH9_0 0x2910
+#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917
+#define PCI_DEVICE_ID_INTEL_ICH9_2 0x2912
+#define PCI_DEVICE_ID_INTEL_ICH9_3 0x2913
+#define PCI_DEVICE_ID_INTEL_ICH9_4 0x2914
+#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919
+#define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930
+#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916
+#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918
+#define PCI_DEVICE_ID_INTEL_I7_MCR 0x2c18
+#define PCI_DEVICE_ID_INTEL_I7_MC_TAD 0x2c19
+#define PCI_DEVICE_ID_INTEL_I7_MC_RAS 0x2c1a
+#define PCI_DEVICE_ID_INTEL_I7_MC_TEST 0x2c1c
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL 0x2c20
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR 0x2c21
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK 0x2c22
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC 0x2c23
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL 0x2c28
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR 0x2c29
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK 0x2c2a
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC 0x2c2b
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL 0x2c30
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR 0x2c31
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK 0x2c32
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC 0x2c33
+#define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41
+#define PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT 0x2c40
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE 0x2c50
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT 0x2c51
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2 0x2c70
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_SAD 0x2c81
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0 0x2c90
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_PHY0 0x2c91
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR 0x2c98
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD 0x2c99
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST 0x2c9C
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL 0x2ca0
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR 0x2ca1
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK 0x2ca2
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC 0x2ca3
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL 0x2ca8
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR 0x2ca9
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK 0x2caa
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC 0x2cab
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2 0x2d98
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2 0x2d99
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2 0x2d9a
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2 0x2d9c
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2 0x2da0
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2 0x2da1
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2 0x2da2
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2 0x2da3
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2 0x2da8
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2 0x2da9
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2 0x2daa
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2 0x2dab
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2 0x2db0
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2 0x2db1
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2 0x2db2
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2 0x2db3
+#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
+#define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429
+#define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a
+#define PCI_DEVICE_ID_INTEL_IOAT_TBG6 0x342b
+#define PCI_DEVICE_ID_INTEL_IOAT_TBG7 0x342c
+#define PCI_DEVICE_ID_INTEL_X58_HUB_MGMT 0x342e
+#define PCI_DEVICE_ID_INTEL_IOAT_TBG0 0x3430
+#define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431
+#define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432
+#define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433
+#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
+#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
+#define PCI_DEVICE_ID_INTEL_82854_HB 0x358c
+#define PCI_DEVICE_ID_INTEL_82854_IG 0x358e
+#define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580
+#define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582
+#define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590
+#define PCI_DEVICE_ID_INTEL_E7320_MCH 0x3592
+#define PCI_DEVICE_ID_INTEL_MCH_PA 0x3595
+#define PCI_DEVICE_ID_INTEL_MCH_PA1 0x3596
+#define PCI_DEVICE_ID_INTEL_MCH_PB 0x3597
+#define PCI_DEVICE_ID_INTEL_MCH_PB1 0x3598
+#define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599
+#define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a
+#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e
+#define PCI_DEVICE_ID_INTEL_I7300_MCH_ERR 0x360c
+#define PCI_DEVICE_ID_INTEL_I7300_MCH_FB0 0x360f
+#define PCI_DEVICE_ID_INTEL_I7300_MCH_FB1 0x3610
+#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b
+#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF3 0x3713
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF4 0x3714
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF5 0x3715
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF6 0x3716
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719
+#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14
+#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16
+#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18
+#define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a
+#define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30
+#define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60
+#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN 0x3b00
+#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX 0x3b1f
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB0 0x3c20
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB1 0x3c21
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB2 0x3c22
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB3 0x3c23
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB4 0x3c24
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB5 0x3c25
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB6 0x3c26
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB7 0x3c27
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB8 0x3c2e
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB9 0x3c2f
+#define PCI_DEVICE_ID_INTEL_UNC_HA 0x3c46
+#define PCI_DEVICE_ID_INTEL_UNC_IMC0 0x3cb0
+#define PCI_DEVICE_ID_INTEL_UNC_IMC1 0x3cb1
+#define PCI_DEVICE_ID_INTEL_UNC_IMC2 0x3cb4
+#define PCI_DEVICE_ID_INTEL_UNC_IMC3 0x3cb5
+#define PCI_DEVICE_ID_INTEL_UNC_QPI0 0x3c41
+#define PCI_DEVICE_ID_INTEL_UNC_QPI1 0x3c42
+#define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43
+#define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44
+#define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */
+#define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
+#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
+#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
+#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3
+#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
+#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6
+#define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030
+#define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035
+#define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036
+#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
+#define PCI_DEVICE_ID_INTEL_EP80579_0 0x5031
+#define PCI_DEVICE_ID_INTEL_EP80579_1 0x5032
+#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
+#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
+#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
+#define PCI_DEVICE_ID_INTEL_82437VX 0x7030
+#define PCI_DEVICE_ID_INTEL_82439TX 0x7100
+#define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110
+#define PCI_DEVICE_ID_INTEL_82371AB 0x7111
+#define PCI_DEVICE_ID_INTEL_82371AB_2 0x7112
+#define PCI_DEVICE_ID_INTEL_82371AB_3 0x7113
+#define PCI_DEVICE_ID_INTEL_82810_MC1 0x7120
+#define PCI_DEVICE_ID_INTEL_82810_IG1 0x7121
+#define PCI_DEVICE_ID_INTEL_82810_MC3 0x7122
+#define PCI_DEVICE_ID_INTEL_82810_IG3 0x7123
+#define PCI_DEVICE_ID_INTEL_82810E_MC 0x7124
+#define PCI_DEVICE_ID_INTEL_82810E_IG 0x7125
+#define PCI_DEVICE_ID_INTEL_82443LX_0 0x7180
+#define PCI_DEVICE_ID_INTEL_82443LX_1 0x7181
+#define PCI_DEVICE_ID_INTEL_82443BX_0 0x7190
+#define PCI_DEVICE_ID_INTEL_82443BX_1 0x7191
+#define PCI_DEVICE_ID_INTEL_82443BX_2 0x7192
+#define PCI_DEVICE_ID_INTEL_440MX 0x7195
+#define PCI_DEVICE_ID_INTEL_440MX_6 0x7196
+#define PCI_DEVICE_ID_INTEL_82443MX_0 0x7198
+#define PCI_DEVICE_ID_INTEL_82443MX_1 0x7199
+#define PCI_DEVICE_ID_INTEL_82443MX_3 0x719b
+#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0
+#define PCI_DEVICE_ID_INTEL_82443GX_2 0x71a2
+#define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601
+#define PCI_DEVICE_ID_INTEL_SCH_LPC 0x8119
+#define PCI_DEVICE_ID_INTEL_SCH_IDE 0x811a
+#define PCI_DEVICE_ID_INTEL_E6XX_CU 0x8183
+#define PCI_DEVICE_ID_INTEL_ITC_LPC 0x8186
+#define PCI_DEVICE_ID_INTEL_82454GX 0x84c4
+#define PCI_DEVICE_ID_INTEL_82450GX 0x84c5
+#define PCI_DEVICE_ID_INTEL_82451NX 0x84ca
+#define PCI_DEVICE_ID_INTEL_82454NX 0x84cb
+#define PCI_DEVICE_ID_INTEL_84460GX 0x84ea
+#define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500
+#define PCI_DEVICE_ID_INTEL_IXP2800 0x9004
+#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
+
+#define PCI_VENDOR_ID_SCALEMP 0x8686
+#define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010
+
+#define PCI_VENDOR_ID_COMPUTONE 0x8e0e
+#define PCI_DEVICE_ID_COMPUTONE_PG 0x0302
+#define PCI_SUBVENDOR_ID_COMPUTONE 0x8e0e
+#define PCI_SUBDEVICE_ID_COMPUTONE_PG4 0x0001
+#define PCI_SUBDEVICE_ID_COMPUTONE_PG8 0x0002
+#define PCI_SUBDEVICE_ID_COMPUTONE_PG6 0x0003
+
+#define PCI_VENDOR_ID_KTI 0x8e2e
+
+#define PCI_VENDOR_ID_ADAPTEC 0x9004
+#define PCI_DEVICE_ID_ADAPTEC_7810 0x1078
+#define PCI_DEVICE_ID_ADAPTEC_7821 0x2178
+#define PCI_DEVICE_ID_ADAPTEC_38602 0x3860
+#define PCI_DEVICE_ID_ADAPTEC_7850 0x5078
+#define PCI_DEVICE_ID_ADAPTEC_7855 0x5578
+#define PCI_DEVICE_ID_ADAPTEC_3860 0x6038
+#define PCI_DEVICE_ID_ADAPTEC_1480A 0x6075
+#define PCI_DEVICE_ID_ADAPTEC_7860 0x6078
+#define PCI_DEVICE_ID_ADAPTEC_7861 0x6178
+#define PCI_DEVICE_ID_ADAPTEC_7870 0x7078
+#define PCI_DEVICE_ID_ADAPTEC_7871 0x7178
+#define PCI_DEVICE_ID_ADAPTEC_7872 0x7278
+#define PCI_DEVICE_ID_ADAPTEC_7873 0x7378
+#define PCI_DEVICE_ID_ADAPTEC_7874 0x7478
+#define PCI_DEVICE_ID_ADAPTEC_7895 0x7895
+#define PCI_DEVICE_ID_ADAPTEC_7880 0x8078
+#define PCI_DEVICE_ID_ADAPTEC_7881 0x8178
+#define PCI_DEVICE_ID_ADAPTEC_7882 0x8278
+#define PCI_DEVICE_ID_ADAPTEC_7883 0x8378
+#define PCI_DEVICE_ID_ADAPTEC_7884 0x8478
+#define PCI_DEVICE_ID_ADAPTEC_7885 0x8578
+#define PCI_DEVICE_ID_ADAPTEC_7886 0x8678
+#define PCI_DEVICE_ID_ADAPTEC_7887 0x8778
+#define PCI_DEVICE_ID_ADAPTEC_7888 0x8878
+
+#define PCI_VENDOR_ID_ADAPTEC2 0x9005
+#define PCI_DEVICE_ID_ADAPTEC2_2940U2 0x0010
+#define PCI_DEVICE_ID_ADAPTEC2_2930U2 0x0011
+#define PCI_DEVICE_ID_ADAPTEC2_7890B 0x0013
+#define PCI_DEVICE_ID_ADAPTEC2_7890 0x001f
+#define PCI_DEVICE_ID_ADAPTEC2_3940U2 0x0050
+#define PCI_DEVICE_ID_ADAPTEC2_3950U2D 0x0051
+#define PCI_DEVICE_ID_ADAPTEC2_7896 0x005f
+#define PCI_DEVICE_ID_ADAPTEC2_7892A 0x0080
+#define PCI_DEVICE_ID_ADAPTEC2_7892B 0x0081
+#define PCI_DEVICE_ID_ADAPTEC2_7892D 0x0083
+#define PCI_DEVICE_ID_ADAPTEC2_7892P 0x008f
+#define PCI_DEVICE_ID_ADAPTEC2_7899A 0x00c0
+#define PCI_DEVICE_ID_ADAPTEC2_7899B 0x00c1
+#define PCI_DEVICE_ID_ADAPTEC2_7899D 0x00c3
+#define PCI_DEVICE_ID_ADAPTEC2_7899P 0x00cf
+#define PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN 0x0500
+#define PCI_DEVICE_ID_ADAPTEC2_SCAMP 0x0503
+
+#define PCI_VENDOR_ID_HOLTEK 0x9412
+#define PCI_DEVICE_ID_HOLTEK_6565 0x6565
+
+#define PCI_VENDOR_ID_NETMOS 0x9710
+#define PCI_DEVICE_ID_NETMOS_9705 0x9705
+#define PCI_DEVICE_ID_NETMOS_9715 0x9715
+#define PCI_DEVICE_ID_NETMOS_9735 0x9735
+#define PCI_DEVICE_ID_NETMOS_9745 0x9745
+#define PCI_DEVICE_ID_NETMOS_9755 0x9755
+#define PCI_DEVICE_ID_NETMOS_9805 0x9805
+#define PCI_DEVICE_ID_NETMOS_9815 0x9815
+#define PCI_DEVICE_ID_NETMOS_9835 0x9835
+#define PCI_DEVICE_ID_NETMOS_9845 0x9845
+#define PCI_DEVICE_ID_NETMOS_9855 0x9855
+#define PCI_DEVICE_ID_NETMOS_9865 0x9865
+#define PCI_DEVICE_ID_NETMOS_9900 0x9900
+#define PCI_DEVICE_ID_NETMOS_9901 0x9901
+#define PCI_DEVICE_ID_NETMOS_9904 0x9904
+#define PCI_DEVICE_ID_NETMOS_9912 0x9912
+#define PCI_DEVICE_ID_NETMOS_9922 0x9922
+
+#define PCI_VENDOR_ID_3COM_2 0xa727
+
+#define PCI_VENDOR_ID_DIGIUM 0xd161
+#define PCI_DEVICE_ID_DIGIUM_HFC4S 0xb410
+
+#define PCI_SUBVENDOR_ID_EXSYS 0xd84d
+#define PCI_SUBDEVICE_ID_EXSYS_4014 0x4014
+#define PCI_SUBDEVICE_ID_EXSYS_4055 0x4055
+
+#define PCI_VENDOR_ID_TIGERJET 0xe159
+#define PCI_DEVICE_ID_TIGERJET_300 0x0001
+#define PCI_DEVICE_ID_TIGERJET_100 0x0002
+
+#define PCI_VENDOR_ID_XILINX_RME 0xea60
+#define PCI_DEVICE_ID_RME_DIGI32 0x9896
+#define PCI_DEVICE_ID_RME_DIGI32_PRO 0x9897
+#define PCI_DEVICE_ID_RME_DIGI32_8 0x9898
+
+#define PCI_VENDOR_ID_XEN 0x5853
+#define PCI_DEVICE_ID_XEN_PLATFORM 0x0001
+
+#define PCI_VENDOR_ID_OCZ 0x1b85
+
+#endif /* _LINUX_PCI_IDS_H */
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
new file mode 100644
index 000000000..4f1089f2c
--- /dev/null
+++ b/include/linux/pcieport_if.h
@@ -0,0 +1,68 @@
+/*
+ * File: pcieport_if.h
+ * Purpose: PCI Express Port Bus Driver's IF Data Structure
+ *
+ * Copyright (C) 2004 Intel
+ * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
+ */
+
+#ifndef _PCIEPORT_IF_H_
+#define _PCIEPORT_IF_H_
+
+/* Port Type */
+#define PCIE_ANY_PORT (~0)
+
+/* Service Type */
+#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */
+#define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT)
+#define PCIE_PORT_SERVICE_AER_SHIFT 1 /* Advanced Error Reporting */
+#define PCIE_PORT_SERVICE_AER (1 << PCIE_PORT_SERVICE_AER_SHIFT)
+#define PCIE_PORT_SERVICE_HP_SHIFT 2 /* Native Hotplug */
+#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT)
+#define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */
+#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT)
+
+struct pcie_device {
+ int irq; /* Service IRQ/MSI/MSI-X Vector */
+ struct pci_dev *port; /* Root/Upstream/Downstream Port */
+ u32 service; /* Port service this device represents */
+ void *priv_data; /* Service Private Data */
+ struct device device; /* Generic Device Interface */
+};
+#define to_pcie_device(d) container_of(d, struct pcie_device, device)
+
+static inline void set_service_data(struct pcie_device *dev, void *data)
+{
+ dev->priv_data = data;
+}
+
+static inline void* get_service_data(struct pcie_device *dev)
+{
+ return dev->priv_data;
+}
+
+struct pcie_port_service_driver {
+ const char *name;
+ int (*probe) (struct pcie_device *dev);
+ void (*remove) (struct pcie_device *dev);
+ int (*suspend) (struct pcie_device *dev);
+ int (*resume) (struct pcie_device *dev);
+
+ /* Service Error Recovery Handler */
+ const struct pci_error_handlers *err_handler;
+
+ /* Link Reset Capability - AER service driver specific */
+ pci_ers_result_t (*reset_link) (struct pci_dev *dev);
+
+ int port_type; /* Type of the port this driver can handle */
+ u32 service; /* Port service this device represents */
+
+ struct device_driver driver;
+};
+#define to_service_driver(d) \
+ container_of(d, struct pcie_port_service_driver, driver)
+
+int pcie_port_service_register(struct pcie_port_service_driver *new);
+void pcie_port_service_unregister(struct pcie_port_service_driver *new);
+
+#endif /* _PCIEPORT_IF_H_ */
diff --git a/include/linux/pda_power.h b/include/linux/pda_power.h
new file mode 100644
index 000000000..2bb62bf29
--- /dev/null
+++ b/include/linux/pda_power.h
@@ -0,0 +1,42 @@
+/*
+ * Common power driver for PDAs and phones with one or two external
+ * power supplies (AC/USB) connected to main and backup batteries,
+ * and optional builtin charger.
+ *
+ * Copyright © 2007 Anton Vorontsov <cbou@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __PDA_POWER_H__
+#define __PDA_POWER_H__
+
+#define PDA_POWER_CHARGE_AC (1 << 0)
+#define PDA_POWER_CHARGE_USB (1 << 1)
+
+struct device;
+
+struct pda_power_pdata {
+ int (*init)(struct device *dev);
+ int (*is_ac_online)(void);
+ int (*is_usb_online)(void);
+ void (*set_charge)(int flags);
+ void (*exit)(struct device *dev);
+ int (*suspend)(pm_message_t state);
+ int (*resume)(void);
+
+ char **supplied_to;
+ size_t num_supplicants;
+
+ unsigned int wait_for_status; /* msecs, default is 500 */
+ unsigned int wait_for_charger; /* msecs, default is 500 */
+ unsigned int polling_interval; /* msecs, default is 2000 */
+
+ unsigned long ac_max_uA; /* current to draw when on AC */
+
+ bool use_otg_notifier;
+};
+
+#endif /* __PDA_POWER_H__ */
diff --git a/include/linux/pe.h b/include/linux/pe.h
new file mode 100644
index 000000000..e170b95e7
--- /dev/null
+++ b/include/linux/pe.h
@@ -0,0 +1,448 @@
+/*
+ * Copyright 2011 Red Hat, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author(s): Peter Jones <pjones@redhat.com>
+ */
+#ifndef __LINUX_PE_H
+#define __LINUX_PE_H
+
+#include <linux/types.h>
+
+#define MZ_MAGIC 0x5a4d /* "MZ" */
+
+struct mz_hdr {
+ uint16_t magic; /* MZ_MAGIC */
+ uint16_t lbsize; /* size of last used block */
+ uint16_t blocks; /* pages in file, 0x3 */
+ uint16_t relocs; /* relocations */
+ uint16_t hdrsize; /* header size in "paragraphs" */
+ uint16_t min_extra_pps; /* .bss */
+ uint16_t max_extra_pps; /* runtime limit for the arena size */
+ uint16_t ss; /* relative stack segment */
+ uint16_t sp; /* initial %sp register */
+ uint16_t checksum; /* word checksum */
+ uint16_t ip; /* initial %ip register */
+ uint16_t cs; /* initial %cs relative to load segment */
+ uint16_t reloc_table_offset; /* offset of the first relocation */
+ uint16_t overlay_num; /* overlay number. set to 0. */
+ uint16_t reserved0[4]; /* reserved */
+ uint16_t oem_id; /* oem identifier */
+ uint16_t oem_info; /* oem specific */
+ uint16_t reserved1[10]; /* reserved */
+ uint32_t peaddr; /* address of pe header */
+ char message[64]; /* message to print */
+};
+
+struct mz_reloc {
+ uint16_t offset;
+ uint16_t segment;
+};
+
+#define PE_MAGIC 0x00004550 /* "PE\0\0" */
+#define PE_OPT_MAGIC_PE32 0x010b
+#define PE_OPT_MAGIC_PE32_ROM 0x0107
+#define PE_OPT_MAGIC_PE32PLUS 0x020b
+
+/* machine type */
+#define IMAGE_FILE_MACHINE_UNKNOWN 0x0000
+#define IMAGE_FILE_MACHINE_AM33 0x01d3
+#define IMAGE_FILE_MACHINE_AMD64 0x8664
+#define IMAGE_FILE_MACHINE_ARM 0x01c0
+#define IMAGE_FILE_MACHINE_ARMV7 0x01c4
+#define IMAGE_FILE_MACHINE_EBC 0x0ebc
+#define IMAGE_FILE_MACHINE_I386 0x014c
+#define IMAGE_FILE_MACHINE_IA64 0x0200
+#define IMAGE_FILE_MACHINE_M32R 0x9041
+#define IMAGE_FILE_MACHINE_MIPS16 0x0266
+#define IMAGE_FILE_MACHINE_MIPSFPU 0x0366
+#define IMAGE_FILE_MACHINE_MIPSFPU16 0x0466
+#define IMAGE_FILE_MACHINE_POWERPC 0x01f0
+#define IMAGE_FILE_MACHINE_POWERPCFP 0x01f1
+#define IMAGE_FILE_MACHINE_R4000 0x0166
+#define IMAGE_FILE_MACHINE_SH3 0x01a2
+#define IMAGE_FILE_MACHINE_SH3DSP 0x01a3
+#define IMAGE_FILE_MACHINE_SH3E 0x01a4
+#define IMAGE_FILE_MACHINE_SH4 0x01a6
+#define IMAGE_FILE_MACHINE_SH5 0x01a8
+#define IMAGE_FILE_MACHINE_THUMB 0x01c2
+#define IMAGE_FILE_MACHINE_WCEMIPSV2 0x0169
+
+/* flags */
+#define IMAGE_FILE_RELOCS_STRIPPED 0x0001
+#define IMAGE_FILE_EXECUTABLE_IMAGE 0x0002
+#define IMAGE_FILE_LINE_NUMS_STRIPPED 0x0004
+#define IMAGE_FILE_LOCAL_SYMS_STRIPPED 0x0008
+#define IMAGE_FILE_AGGRESSIVE_WS_TRIM 0x0010
+#define IMAGE_FILE_LARGE_ADDRESS_AWARE 0x0020
+#define IMAGE_FILE_16BIT_MACHINE 0x0040
+#define IMAGE_FILE_BYTES_REVERSED_LO 0x0080
+#define IMAGE_FILE_32BIT_MACHINE 0x0100
+#define IMAGE_FILE_DEBUG_STRIPPED 0x0200
+#define IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP 0x0400
+#define IMAGE_FILE_NET_RUN_FROM_SWAP 0x0800
+#define IMAGE_FILE_SYSTEM 0x1000
+#define IMAGE_FILE_DLL 0x2000
+#define IMAGE_FILE_UP_SYSTEM_ONLY 0x4000
+#define IMAGE_FILE_BYTES_REVERSED_HI 0x8000
+
+struct pe_hdr {
+ uint32_t magic; /* PE magic */
+ uint16_t machine; /* machine type */
+ uint16_t sections; /* number of sections */
+ uint32_t timestamp; /* time_t */
+ uint32_t symbol_table; /* symbol table offset */
+ uint32_t symbols; /* number of symbols */
+ uint16_t opt_hdr_size; /* size of optional header */
+ uint16_t flags; /* flags */
+};
+
+#define IMAGE_FILE_OPT_ROM_MAGIC 0x107
+#define IMAGE_FILE_OPT_PE32_MAGIC 0x10b
+#define IMAGE_FILE_OPT_PE32_PLUS_MAGIC 0x20b
+
+#define IMAGE_SUBSYSTEM_UNKNOWN 0
+#define IMAGE_SUBSYSTEM_NATIVE 1
+#define IMAGE_SUBSYSTEM_WINDOWS_GUI 2
+#define IMAGE_SUBSYSTEM_WINDOWS_CUI 3
+#define IMAGE_SUBSYSTEM_POSIX_CUI 7
+#define IMAGE_SUBSYSTEM_WINDOWS_CE_GUI 9
+#define IMAGE_SUBSYSTEM_EFI_APPLICATION 10
+#define IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER 11
+#define IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER 12
+#define IMAGE_SUBSYSTEM_EFI_ROM_IMAGE 13
+#define IMAGE_SUBSYSTEM_XBOX 14
+
+#define IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE 0x0040
+#define IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY 0x0080
+#define IMAGE_DLL_CHARACTERISTICS_NX_COMPAT 0x0100
+#define IMAGE_DLLCHARACTERISTICS_NO_ISOLATION 0x0200
+#define IMAGE_DLLCHARACTERISTICS_NO_SEH 0x0400
+#define IMAGE_DLLCHARACTERISTICS_NO_BIND 0x0800
+#define IMAGE_DLLCHARACTERISTICS_WDM_DRIVER 0x2000
+#define IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE 0x8000
+
+/* the fact that pe32 isn't padded where pe32+ is 64-bit means union won't
+ * work right. vomit. */
+struct pe32_opt_hdr {
+ /* "standard" header */
+ uint16_t magic; /* file type */
+ uint8_t ld_major; /* linker major version */
+ uint8_t ld_minor; /* linker minor version */
+ uint32_t text_size; /* size of text section(s) */
+ uint32_t data_size; /* size of data section(s) */
+ uint32_t bss_size; /* size of bss section(s) */
+ uint32_t entry_point; /* file offset of entry point */
+ uint32_t code_base; /* relative code addr in ram */
+ uint32_t data_base; /* relative data addr in ram */
+ /* "windows" header */
+ uint32_t image_base; /* preferred load address */
+ uint32_t section_align; /* alignment in bytes */
+ uint32_t file_align; /* file alignment in bytes */
+ uint16_t os_major; /* major OS version */
+ uint16_t os_minor; /* minor OS version */
+ uint16_t image_major; /* major image version */
+ uint16_t image_minor; /* minor image version */
+ uint16_t subsys_major; /* major subsystem version */
+ uint16_t subsys_minor; /* minor subsystem version */
+ uint32_t win32_version; /* reserved, must be 0 */
+ uint32_t image_size; /* image size */
+ uint32_t header_size; /* header size rounded up to
+ file_align */
+ uint32_t csum; /* checksum */
+ uint16_t subsys; /* subsystem */
+ uint16_t dll_flags; /* more flags! */
+ uint32_t stack_size_req;/* amt of stack requested */
+ uint32_t stack_size; /* amt of stack required */
+ uint32_t heap_size_req; /* amt of heap requested */
+ uint32_t heap_size; /* amt of heap required */
+ uint32_t loader_flags; /* reserved, must be 0 */
+ uint32_t data_dirs; /* number of data dir entries */
+};
+
+struct pe32plus_opt_hdr {
+ uint16_t magic; /* file type */
+ uint8_t ld_major; /* linker major version */
+ uint8_t ld_minor; /* linker minor version */
+ uint32_t text_size; /* size of text section(s) */
+ uint32_t data_size; /* size of data section(s) */
+ uint32_t bss_size; /* size of bss section(s) */
+ uint32_t entry_point; /* file offset of entry point */
+ uint32_t code_base; /* relative code addr in ram */
+ /* "windows" header */
+ uint64_t image_base; /* preferred load address */
+ uint32_t section_align; /* alignment in bytes */
+ uint32_t file_align; /* file alignment in bytes */
+ uint16_t os_major; /* major OS version */
+ uint16_t os_minor; /* minor OS version */
+ uint16_t image_major; /* major image version */
+ uint16_t image_minor; /* minor image version */
+ uint16_t subsys_major; /* major subsystem version */
+ uint16_t subsys_minor; /* minor subsystem version */
+ uint32_t win32_version; /* reserved, must be 0 */
+ uint32_t image_size; /* image size */
+ uint32_t header_size; /* header size rounded up to
+ file_align */
+ uint32_t csum; /* checksum */
+ uint16_t subsys; /* subsystem */
+ uint16_t dll_flags; /* more flags! */
+ uint64_t stack_size_req;/* amt of stack requested */
+ uint64_t stack_size; /* amt of stack required */
+ uint64_t heap_size_req; /* amt of heap requested */
+ uint64_t heap_size; /* amt of heap required */
+ uint32_t loader_flags; /* reserved, must be 0 */
+ uint32_t data_dirs; /* number of data dir entries */
+};
+
+struct data_dirent {
+ uint32_t virtual_address; /* relative to load address */
+ uint32_t size;
+};
+
+struct data_directory {
+ struct data_dirent exports; /* .edata */
+ struct data_dirent imports; /* .idata */
+ struct data_dirent resources; /* .rsrc */
+ struct data_dirent exceptions; /* .pdata */
+ struct data_dirent certs; /* certs */
+ struct data_dirent base_relocations; /* .reloc */
+ struct data_dirent debug; /* .debug */
+ struct data_dirent arch; /* reservered */
+ struct data_dirent global_ptr; /* global pointer reg. Size=0 */
+ struct data_dirent tls; /* .tls */
+ struct data_dirent load_config; /* load configuration structure */
+ struct data_dirent bound_imports; /* no idea */
+ struct data_dirent import_addrs; /* import address table */
+ struct data_dirent delay_imports; /* delay-load import table */
+ struct data_dirent clr_runtime_hdr; /* .cor (object only) */
+ struct data_dirent reserved;
+};
+
+struct section_header {
+ char name[8]; /* name or "/12\0" string tbl offset */
+ uint32_t virtual_size; /* size of loaded section in ram */
+ uint32_t virtual_address; /* relative virtual address */
+ uint32_t raw_data_size; /* size of the section */
+ uint32_t data_addr; /* file pointer to first page of sec */
+ uint32_t relocs; /* file pointer to relocation entries */
+ uint32_t line_numbers; /* line numbers! */
+ uint16_t num_relocs; /* number of relocations */
+ uint16_t num_lin_numbers; /* srsly. */
+ uint32_t flags;
+};
+
+/* they actually defined 0x00000000 as well, but I think we'll skip that one. */
+#define IMAGE_SCN_RESERVED_0 0x00000001
+#define IMAGE_SCN_RESERVED_1 0x00000002
+#define IMAGE_SCN_RESERVED_2 0x00000004
+#define IMAGE_SCN_TYPE_NO_PAD 0x00000008 /* don't pad - obsolete */
+#define IMAGE_SCN_RESERVED_3 0x00000010
+#define IMAGE_SCN_CNT_CODE 0x00000020 /* .text */
+#define IMAGE_SCN_CNT_INITIALIZED_DATA 0x00000040 /* .data */
+#define IMAGE_SCN_CNT_UNINITIALIZED_DATA 0x00000080 /* .bss */
+#define IMAGE_SCN_LNK_OTHER 0x00000100 /* reserved */
+#define IMAGE_SCN_LNK_INFO 0x00000200 /* .drectve comments */
+#define IMAGE_SCN_RESERVED_4 0x00000400
+#define IMAGE_SCN_LNK_REMOVE 0x00000800 /* .o only - scn to be rm'd*/
+#define IMAGE_SCN_LNK_COMDAT 0x00001000 /* .o only - COMDAT data */
+#define IMAGE_SCN_RESERVED_5 0x00002000 /* spec omits this */
+#define IMAGE_SCN_RESERVED_6 0x00004000 /* spec omits this */
+#define IMAGE_SCN_GPREL 0x00008000 /* global pointer referenced data */
+/* spec lists 0x20000 twice, I suspect they meant 0x10000 for one of them */
+#define IMAGE_SCN_MEM_PURGEABLE 0x00010000 /* reserved for "future" use */
+#define IMAGE_SCN_16BIT 0x00020000 /* reserved for "future" use */
+#define IMAGE_SCN_LOCKED 0x00040000 /* reserved for "future" use */
+#define IMAGE_SCN_PRELOAD 0x00080000 /* reserved for "future" use */
+/* and here they just stuck a 1-byte integer in the middle of a bitfield */
+#define IMAGE_SCN_ALIGN_1BYTES 0x00100000 /* it does what it says on the box */
+#define IMAGE_SCN_ALIGN_2BYTES 0x00200000
+#define IMAGE_SCN_ALIGN_4BYTES 0x00300000
+#define IMAGE_SCN_ALIGN_8BYTES 0x00400000
+#define IMAGE_SCN_ALIGN_16BYTES 0x00500000
+#define IMAGE_SCN_ALIGN_32BYTES 0x00600000
+#define IMAGE_SCN_ALIGN_64BYTES 0x00700000
+#define IMAGE_SCN_ALIGN_128BYTES 0x00800000
+#define IMAGE_SCN_ALIGN_256BYTES 0x00900000
+#define IMAGE_SCN_ALIGN_512BYTES 0x00a00000
+#define IMAGE_SCN_ALIGN_1024BYTES 0x00b00000
+#define IMAGE_SCN_ALIGN_2048BYTES 0x00c00000
+#define IMAGE_SCN_ALIGN_4096BYTES 0x00d00000
+#define IMAGE_SCN_ALIGN_8192BYTES 0x00e00000
+#define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000 /* extended relocations */
+#define IMAGE_SCN_MEM_DISCARDABLE 0x02000000 /* scn can be discarded */
+#define IMAGE_SCN_MEM_NOT_CACHED 0x04000000 /* cannot be cached */
+#define IMAGE_SCN_MEM_NOT_PAGED 0x08000000 /* not pageable */
+#define IMAGE_SCN_MEM_SHARED 0x10000000 /* can be shared */
+#define IMAGE_SCN_MEM_EXECUTE 0x20000000 /* can be executed as code */
+#define IMAGE_SCN_MEM_READ 0x40000000 /* readable */
+#define IMAGE_SCN_MEM_WRITE 0x80000000 /* writeable */
+
+enum x64_coff_reloc_type {
+ IMAGE_REL_AMD64_ABSOLUTE = 0,
+ IMAGE_REL_AMD64_ADDR64,
+ IMAGE_REL_AMD64_ADDR32,
+ IMAGE_REL_AMD64_ADDR32N,
+ IMAGE_REL_AMD64_REL32,
+ IMAGE_REL_AMD64_REL32_1,
+ IMAGE_REL_AMD64_REL32_2,
+ IMAGE_REL_AMD64_REL32_3,
+ IMAGE_REL_AMD64_REL32_4,
+ IMAGE_REL_AMD64_REL32_5,
+ IMAGE_REL_AMD64_SECTION,
+ IMAGE_REL_AMD64_SECREL,
+ IMAGE_REL_AMD64_SECREL7,
+ IMAGE_REL_AMD64_TOKEN,
+ IMAGE_REL_AMD64_SREL32,
+ IMAGE_REL_AMD64_PAIR,
+ IMAGE_REL_AMD64_SSPAN32,
+};
+
+enum arm_coff_reloc_type {
+ IMAGE_REL_ARM_ABSOLUTE,
+ IMAGE_REL_ARM_ADDR32,
+ IMAGE_REL_ARM_ADDR32N,
+ IMAGE_REL_ARM_BRANCH2,
+ IMAGE_REL_ARM_BRANCH1,
+ IMAGE_REL_ARM_SECTION,
+ IMAGE_REL_ARM_SECREL,
+};
+
+enum sh_coff_reloc_type {
+ IMAGE_REL_SH3_ABSOLUTE,
+ IMAGE_REL_SH3_DIRECT16,
+ IMAGE_REL_SH3_DIRECT32,
+ IMAGE_REL_SH3_DIRECT8,
+ IMAGE_REL_SH3_DIRECT8_WORD,
+ IMAGE_REL_SH3_DIRECT8_LONG,
+ IMAGE_REL_SH3_DIRECT4,
+ IMAGE_REL_SH3_DIRECT4_WORD,
+ IMAGE_REL_SH3_DIRECT4_LONG,
+ IMAGE_REL_SH3_PCREL8_WORD,
+ IMAGE_REL_SH3_PCREL8_LONG,
+ IMAGE_REL_SH3_PCREL12_WORD,
+ IMAGE_REL_SH3_STARTOF_SECTION,
+ IMAGE_REL_SH3_SIZEOF_SECTION,
+ IMAGE_REL_SH3_SECTION,
+ IMAGE_REL_SH3_SECREL,
+ IMAGE_REL_SH3_DIRECT32_NB,
+ IMAGE_REL_SH3_GPREL4_LONG,
+ IMAGE_REL_SH3_TOKEN,
+ IMAGE_REL_SHM_PCRELPT,
+ IMAGE_REL_SHM_REFLO,
+ IMAGE_REL_SHM_REFHALF,
+ IMAGE_REL_SHM_RELLO,
+ IMAGE_REL_SHM_RELHALF,
+ IMAGE_REL_SHM_PAIR,
+ IMAGE_REL_SHM_NOMODE,
+};
+
+enum ppc_coff_reloc_type {
+ IMAGE_REL_PPC_ABSOLUTE,
+ IMAGE_REL_PPC_ADDR64,
+ IMAGE_REL_PPC_ADDR32,
+ IMAGE_REL_PPC_ADDR24,
+ IMAGE_REL_PPC_ADDR16,
+ IMAGE_REL_PPC_ADDR14,
+ IMAGE_REL_PPC_REL24,
+ IMAGE_REL_PPC_REL14,
+ IMAGE_REL_PPC_ADDR32N,
+ IMAGE_REL_PPC_SECREL,
+ IMAGE_REL_PPC_SECTION,
+ IMAGE_REL_PPC_SECREL16,
+ IMAGE_REL_PPC_REFHI,
+ IMAGE_REL_PPC_REFLO,
+ IMAGE_REL_PPC_PAIR,
+ IMAGE_REL_PPC_SECRELLO,
+ IMAGE_REL_PPC_GPREL,
+ IMAGE_REL_PPC_TOKEN,
+};
+
+enum x86_coff_reloc_type {
+ IMAGE_REL_I386_ABSOLUTE,
+ IMAGE_REL_I386_DIR16,
+ IMAGE_REL_I386_REL16,
+ IMAGE_REL_I386_DIR32,
+ IMAGE_REL_I386_DIR32NB,
+ IMAGE_REL_I386_SEG12,
+ IMAGE_REL_I386_SECTION,
+ IMAGE_REL_I386_SECREL,
+ IMAGE_REL_I386_TOKEN,
+ IMAGE_REL_I386_SECREL7,
+ IMAGE_REL_I386_REL32,
+};
+
+enum ia64_coff_reloc_type {
+ IMAGE_REL_IA64_ABSOLUTE,
+ IMAGE_REL_IA64_IMM14,
+ IMAGE_REL_IA64_IMM22,
+ IMAGE_REL_IA64_IMM64,
+ IMAGE_REL_IA64_DIR32,
+ IMAGE_REL_IA64_DIR64,
+ IMAGE_REL_IA64_PCREL21B,
+ IMAGE_REL_IA64_PCREL21M,
+ IMAGE_REL_IA64_PCREL21F,
+ IMAGE_REL_IA64_GPREL22,
+ IMAGE_REL_IA64_LTOFF22,
+ IMAGE_REL_IA64_SECTION,
+ IMAGE_REL_IA64_SECREL22,
+ IMAGE_REL_IA64_SECREL64I,
+ IMAGE_REL_IA64_SECREL32,
+ IMAGE_REL_IA64_DIR32NB,
+ IMAGE_REL_IA64_SREL14,
+ IMAGE_REL_IA64_SREL22,
+ IMAGE_REL_IA64_SREL32,
+ IMAGE_REL_IA64_UREL32,
+ IMAGE_REL_IA64_PCREL60X,
+ IMAGE_REL_IA64_PCREL60B,
+ IMAGE_REL_IA64_PCREL60F,
+ IMAGE_REL_IA64_PCREL60I,
+ IMAGE_REL_IA64_PCREL60M,
+ IMAGE_REL_IA64_IMMGPREL6,
+ IMAGE_REL_IA64_TOKEN,
+ IMAGE_REL_IA64_GPREL32,
+ IMAGE_REL_IA64_ADDEND,
+};
+
+struct coff_reloc {
+ uint32_t virtual_address;
+ uint32_t symbol_table_index;
+ union {
+ enum x64_coff_reloc_type x64_type;
+ enum arm_coff_reloc_type arm_type;
+ enum sh_coff_reloc_type sh_type;
+ enum ppc_coff_reloc_type ppc_type;
+ enum x86_coff_reloc_type x86_type;
+ enum ia64_coff_reloc_type ia64_type;
+ uint16_t data;
+ };
+};
+
+/*
+ * Definitions for the contents of the certs data block
+ */
+#define WIN_CERT_TYPE_PKCS_SIGNED_DATA 0x0002
+#define WIN_CERT_TYPE_EFI_OKCS115 0x0EF0
+#define WIN_CERT_TYPE_EFI_GUID 0x0EF1
+
+#define WIN_CERT_REVISION_1_0 0x0100
+#define WIN_CERT_REVISION_2_0 0x0200
+
+struct win_certificate {
+ uint32_t length;
+ uint16_t revision;
+ uint16_t cert_type;
+};
+
+#endif /* __LINUX_PE_H */
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
new file mode 100644
index 000000000..57f3a1c55
--- /dev/null
+++ b/include/linux/percpu-defs.h
@@ -0,0 +1,516 @@
+/*
+ * linux/percpu-defs.h - basic definitions for percpu areas
+ *
+ * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER.
+ *
+ * This file is separate from linux/percpu.h to avoid cyclic inclusion
+ * dependency from arch header files. Only to be included from
+ * asm/percpu.h.
+ *
+ * This file includes macros necessary to declare percpu sections and
+ * variables, and definitions of percpu accessors and operations. It
+ * should provide enough percpu features to arch header files even when
+ * they can only include asm/percpu.h to avoid cyclic inclusion dependency.
+ */
+
+#ifndef _LINUX_PERCPU_DEFS_H
+#define _LINUX_PERCPU_DEFS_H
+
+#ifdef CONFIG_SMP
+
+#ifdef MODULE
+#define PER_CPU_SHARED_ALIGNED_SECTION ""
+#define PER_CPU_ALIGNED_SECTION ""
+#else
+#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
+#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
+#endif
+#define PER_CPU_FIRST_SECTION "..first"
+
+#else
+
+#define PER_CPU_SHARED_ALIGNED_SECTION ""
+#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
+#define PER_CPU_FIRST_SECTION ""
+
+#endif
+
+/*
+ * Base implementations of per-CPU variable declarations and definitions, where
+ * the section in which the variable is to be placed is provided by the
+ * 'sec' argument. This may be used to affect the parameters governing the
+ * variable's storage.
+ *
+ * NOTE! The sections for the DECLARE and for the DEFINE must match, lest
+ * linkage errors occur due the compiler generating the wrong code to access
+ * that section.
+ */
+#define __PCPU_ATTRS(sec) \
+ __percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \
+ PER_CPU_ATTRIBUTES
+
+#define __PCPU_DUMMY_ATTRS \
+ __attribute__((section(".discard"), unused))
+
+/*
+ * s390 and alpha modules require percpu variables to be defined as
+ * weak to force the compiler to generate GOT based external
+ * references for them. This is necessary because percpu sections
+ * will be located outside of the usually addressable area.
+ *
+ * This definition puts the following two extra restrictions when
+ * defining percpu variables.
+ *
+ * 1. The symbol must be globally unique, even the static ones.
+ * 2. Static percpu variables cannot be defined inside a function.
+ *
+ * Archs which need weak percpu definitions should define
+ * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary.
+ *
+ * To ensure that the generic code observes the above two
+ * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak
+ * definition is used for all cases.
+ */
+#if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
+/*
+ * __pcpu_scope_* dummy variable is used to enforce scope. It
+ * receives the static modifier when it's used in front of
+ * DEFINE_PER_CPU() and will trigger build failure if
+ * DECLARE_PER_CPU() is used for the same variable.
+ *
+ * __pcpu_unique_* dummy variable is used to enforce symbol uniqueness
+ * such that hidden weak symbol collision, which will cause unrelated
+ * variables to share the same address, can be detected during build.
+ */
+#define DECLARE_PER_CPU_SECTION(type, name, sec) \
+ extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
+ extern __PCPU_ATTRS(sec) __typeof__(type) name
+
+#define DEFINE_PER_CPU_SECTION(type, name, sec) \
+ __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
+ extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
+ __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
+ extern __PCPU_ATTRS(sec) __typeof__(type) name; \
+ __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
+ __typeof__(type) name
+#else
+/*
+ * Normal declaration and definition macros.
+ */
+#define DECLARE_PER_CPU_SECTION(type, name, sec) \
+ extern __PCPU_ATTRS(sec) __typeof__(type) name
+
+#define DEFINE_PER_CPU_SECTION(type, name, sec) \
+ __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
+ __typeof__(type) name
+#endif
+
+/*
+ * Variant on the per-CPU variable declaration/definition theme used for
+ * ordinary per-CPU variables.
+ */
+#define DECLARE_PER_CPU(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, "")
+
+#define DEFINE_PER_CPU(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, "")
+
+/*
+ * Declaration/definition used for per-CPU variables that must come first in
+ * the set of variables.
+ */
+#define DECLARE_PER_CPU_FIRST(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
+
+#define DEFINE_PER_CPU_FIRST(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
+
+/*
+ * Declaration/definition used for per-CPU variables that must be cacheline
+ * aligned under SMP conditions so that, whilst a particular instance of the
+ * data corresponds to a particular CPU, inefficiencies due to direct access by
+ * other CPUs are reduced by preventing the data from unnecessarily spanning
+ * cachelines.
+ *
+ * An example of this would be statistical data, where each CPU's set of data
+ * is updated by that CPU alone, but the data from across all CPUs is collated
+ * by a CPU processing a read from a proc file.
+ */
+#define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
+ ____cacheline_aligned_in_smp
+
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
+ ____cacheline_aligned_in_smp
+
+#define DECLARE_PER_CPU_ALIGNED(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \
+ ____cacheline_aligned
+
+#define DEFINE_PER_CPU_ALIGNED(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \
+ ____cacheline_aligned
+
+/*
+ * Declaration/definition used for per-CPU variables that must be page aligned.
+ */
+#define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \
+ __aligned(PAGE_SIZE)
+
+#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \
+ __aligned(PAGE_SIZE)
+
+/*
+ * Declaration/definition used for per-CPU variables that must be read mostly.
+ */
+#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, "..read_mostly")
+
+#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, "..read_mostly")
+
+/*
+ * Intermodule exports for per-CPU variables. sparse forgets about
+ * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
+ * noop if __CHECKER__.
+ */
+#ifndef __CHECKER__
+#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var)
+#else
+#define EXPORT_PER_CPU_SYMBOL(var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var)
+#endif
+
+/*
+ * Accessors and operations.
+ */
+#ifndef __ASSEMBLY__
+
+/*
+ * __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating
+ * @ptr and is invoked once before a percpu area is accessed by all
+ * accessors and operations. This is performed in the generic part of
+ * percpu and arch overrides don't need to worry about it; however, if an
+ * arch wants to implement an arch-specific percpu accessor or operation,
+ * it may use __verify_pcpu_ptr() to verify the parameters.
+ *
+ * + 0 is required in order to convert the pointer type from a
+ * potential array type to a pointer to a single item of the array.
+ */
+#define __verify_pcpu_ptr(ptr) \
+do { \
+ const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
+ (void)__vpp_verify; \
+} while (0)
+
+#ifdef CONFIG_SMP
+
+/*
+ * Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE()
+ * to prevent the compiler from making incorrect assumptions about the
+ * pointer value. The weird cast keeps both GCC and sparse happy.
+ */
+#define SHIFT_PERCPU_PTR(__p, __offset) \
+ RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset))
+
+#define per_cpu_ptr(ptr, cpu) \
+({ \
+ __verify_pcpu_ptr(ptr); \
+ SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))); \
+})
+
+#define raw_cpu_ptr(ptr) \
+({ \
+ __verify_pcpu_ptr(ptr); \
+ arch_raw_cpu_ptr(ptr); \
+})
+
+#ifdef CONFIG_DEBUG_PREEMPT
+#define this_cpu_ptr(ptr) \
+({ \
+ __verify_pcpu_ptr(ptr); \
+ SHIFT_PERCPU_PTR(ptr, my_cpu_offset); \
+})
+#else
+#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
+#endif
+
+#else /* CONFIG_SMP */
+
+#define VERIFY_PERCPU_PTR(__p) \
+({ \
+ __verify_pcpu_ptr(__p); \
+ (typeof(*(__p)) __kernel __force *)(__p); \
+})
+
+#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
+#define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
+#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
+
+#endif /* CONFIG_SMP */
+
+#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu))
+
+/*
+ * Must be an lvalue. Since @var must be a simple identifier,
+ * we force a syntax error here if it isn't.
+ */
+#define get_cpu_var(var) \
+(*({ \
+ preempt_disable(); \
+ this_cpu_ptr(&var); \
+}))
+
+/*
+ * The weird & is necessary because sparse considers (void)(var) to be
+ * a direct dereference of percpu variable (var).
+ */
+#define put_cpu_var(var) \
+do { \
+ (void)&(var); \
+ preempt_enable(); \
+} while (0)
+
+#define get_cpu_ptr(var) \
+({ \
+ preempt_disable(); \
+ this_cpu_ptr(var); \
+})
+
+#define put_cpu_ptr(var) \
+do { \
+ (void)(var); \
+ preempt_enable(); \
+} while (0)
+
+/*
+ * Branching function to split up a function into a set of functions that
+ * are called for different scalar sizes of the objects handled.
+ */
+
+extern void __bad_size_call_parameter(void);
+
+#ifdef CONFIG_DEBUG_PREEMPT
+extern void __this_cpu_preempt_check(const char *op);
+#else
+static inline void __this_cpu_preempt_check(const char *op) { }
+#endif
+
+#define __pcpu_size_call_return(stem, variable) \
+({ \
+ typeof(variable) pscr_ret__; \
+ __verify_pcpu_ptr(&(variable)); \
+ switch(sizeof(variable)) { \
+ case 1: pscr_ret__ = stem##1(variable); break; \
+ case 2: pscr_ret__ = stem##2(variable); break; \
+ case 4: pscr_ret__ = stem##4(variable); break; \
+ case 8: pscr_ret__ = stem##8(variable); break; \
+ default: \
+ __bad_size_call_parameter(); break; \
+ } \
+ pscr_ret__; \
+})
+
+#define __pcpu_size_call_return2(stem, variable, ...) \
+({ \
+ typeof(variable) pscr2_ret__; \
+ __verify_pcpu_ptr(&(variable)); \
+ switch(sizeof(variable)) { \
+ case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
+ case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
+ case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
+ case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
+ default: \
+ __bad_size_call_parameter(); break; \
+ } \
+ pscr2_ret__; \
+})
+
+/*
+ * Special handling for cmpxchg_double. cmpxchg_double is passed two
+ * percpu variables. The first has to be aligned to a double word
+ * boundary and the second has to follow directly thereafter.
+ * We enforce this on all architectures even if they don't support
+ * a double cmpxchg instruction, since it's a cheap requirement, and it
+ * avoids breaking the requirement for architectures with the instruction.
+ */
+#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
+({ \
+ bool pdcrb_ret__; \
+ __verify_pcpu_ptr(&(pcp1)); \
+ BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \
+ VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1))); \
+ VM_BUG_ON((unsigned long)(&(pcp2)) != \
+ (unsigned long)(&(pcp1)) + sizeof(pcp1)); \
+ switch(sizeof(pcp1)) { \
+ case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \
+ case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \
+ case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \
+ case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \
+ default: \
+ __bad_size_call_parameter(); break; \
+ } \
+ pdcrb_ret__; \
+})
+
+#define __pcpu_size_call(stem, variable, ...) \
+do { \
+ __verify_pcpu_ptr(&(variable)); \
+ switch(sizeof(variable)) { \
+ case 1: stem##1(variable, __VA_ARGS__);break; \
+ case 2: stem##2(variable, __VA_ARGS__);break; \
+ case 4: stem##4(variable, __VA_ARGS__);break; \
+ case 8: stem##8(variable, __VA_ARGS__);break; \
+ default: \
+ __bad_size_call_parameter();break; \
+ } \
+} while (0)
+
+/*
+ * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com>
+ *
+ * Optimized manipulation for memory allocated through the per cpu
+ * allocator or for addresses of per cpu variables.
+ *
+ * These operation guarantee exclusivity of access for other operations
+ * on the *same* processor. The assumption is that per cpu data is only
+ * accessed by a single processor instance (the current one).
+ *
+ * The arch code can provide optimized implementation by defining macros
+ * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per
+ * cpu atomic operations for 2 byte sized RMW actions. If arch code does
+ * not provide operations for a scalar size then the fallback in the
+ * generic code will be used.
+ *
+ * cmpxchg_double replaces two adjacent scalars at once. The first two
+ * parameters are per cpu variables which have to be of the same size. A
+ * truth value is returned to indicate success or failure (since a double
+ * register result is difficult to handle). There is very limited hardware
+ * support for these operations, so only certain sizes may work.
+ */
+
+/*
+ * Operations for contexts where we do not want to do any checks for
+ * preemptions. Unless strictly necessary, always use [__]this_cpu_*()
+ * instead.
+ *
+ * If there is no other protection through preempt disable and/or disabling
+ * interupts then one of these RMW operations can show unexpected behavior
+ * because the execution thread was rescheduled on another processor or an
+ * interrupt occurred and the same percpu variable was modified from the
+ * interrupt context.
+ */
+#define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, pcp)
+#define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, pcp, val)
+#define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, pcp, val)
+#define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, pcp, val)
+#define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, pcp, val)
+#define raw_cpu_add_return(pcp, val) __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)
+#define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval)
+#define raw_cpu_cmpxchg(pcp, oval, nval) \
+ __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
+#define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
+
+#define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val))
+#define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1)
+#define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1)
+#define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val))
+#define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1)
+#define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1)
+
+/*
+ * Operations for contexts that are safe from preemption/interrupts. These
+ * operations verify that preemption is disabled.
+ */
+#define __this_cpu_read(pcp) \
+({ \
+ __this_cpu_preempt_check("read"); \
+ raw_cpu_read(pcp); \
+})
+
+#define __this_cpu_write(pcp, val) \
+({ \
+ __this_cpu_preempt_check("write"); \
+ raw_cpu_write(pcp, val); \
+})
+
+#define __this_cpu_add(pcp, val) \
+({ \
+ __this_cpu_preempt_check("add"); \
+ raw_cpu_add(pcp, val); \
+})
+
+#define __this_cpu_and(pcp, val) \
+({ \
+ __this_cpu_preempt_check("and"); \
+ raw_cpu_and(pcp, val); \
+})
+
+#define __this_cpu_or(pcp, val) \
+({ \
+ __this_cpu_preempt_check("or"); \
+ raw_cpu_or(pcp, val); \
+})
+
+#define __this_cpu_add_return(pcp, val) \
+({ \
+ __this_cpu_preempt_check("add_return"); \
+ raw_cpu_add_return(pcp, val); \
+})
+
+#define __this_cpu_xchg(pcp, nval) \
+({ \
+ __this_cpu_preempt_check("xchg"); \
+ raw_cpu_xchg(pcp, nval); \
+})
+
+#define __this_cpu_cmpxchg(pcp, oval, nval) \
+({ \
+ __this_cpu_preempt_check("cmpxchg"); \
+ raw_cpu_cmpxchg(pcp, oval, nval); \
+})
+
+#define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+({ __this_cpu_preempt_check("cmpxchg_double"); \
+ raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2); \
+})
+
+#define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val))
+#define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1)
+#define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1)
+#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val))
+#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1)
+#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
+
+/*
+ * Operations with implied preemption protection. These operations can be
+ * used without worrying about preemption. Note that interrupts may still
+ * occur while an operation is in progress and if the interrupt modifies
+ * the variable too then RMW actions may not be reliable.
+ */
+#define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp)
+#define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val)
+#define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, pcp, val)
+#define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, pcp, val)
+#define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, pcp, val)
+#define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
+#define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval)
+#define this_cpu_cmpxchg(pcp, oval, nval) \
+ __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
+#define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+ __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
+
+#define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val))
+#define this_cpu_inc(pcp) this_cpu_add(pcp, 1)
+#define this_cpu_dec(pcp) this_cpu_sub(pcp, 1)
+#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val))
+#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
+#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
+
+#endif /* __ASSEMBLY__ */
+#endif /* _LINUX_PERCPU_DEFS_H */
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
new file mode 100644
index 000000000..12c9b485b
--- /dev/null
+++ b/include/linux/percpu-refcount.h
@@ -0,0 +1,328 @@
+/*
+ * Percpu refcounts:
+ * (C) 2012 Google, Inc.
+ * Author: Kent Overstreet <koverstreet@google.com>
+ *
+ * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
+ * atomic_dec_and_test() - but percpu.
+ *
+ * There's one important difference between percpu refs and normal atomic_t
+ * refcounts; you have to keep track of your initial refcount, and then when you
+ * start shutting down you call percpu_ref_kill() _before_ dropping the initial
+ * refcount.
+ *
+ * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
+ * than an atomic_t - this is because of the way shutdown works, see
+ * percpu_ref_kill()/PERCPU_COUNT_BIAS.
+ *
+ * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
+ * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
+ * puts the ref back in single atomic_t mode, collecting the per cpu refs and
+ * issuing the appropriate barriers, and then marks the ref as shutting down so
+ * that percpu_ref_put() will check for the ref hitting 0. After it returns,
+ * it's safe to drop the initial ref.
+ *
+ * USAGE:
+ *
+ * See fs/aio.c for some example usage; it's used there for struct kioctx, which
+ * is created when userspaces calls io_setup(), and destroyed when userspace
+ * calls io_destroy() or the process exits.
+ *
+ * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
+ * calls percpu_ref_kill(), then hlist_del_rcu() and synchronize_rcu() to remove
+ * the kioctx from the proccess's list of kioctxs - after that, there can't be
+ * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop
+ * the initial ref with percpu_ref_put().
+ *
+ * Code that does a two stage shutdown like this often needs some kind of
+ * explicit synchronization to ensure the initial refcount can only be dropped
+ * once - percpu_ref_kill() does this for you, it returns true once and false if
+ * someone else already called it. The aio code uses it this way, but it's not
+ * necessary if the code has some other mechanism to synchronize teardown.
+ * around.
+ */
+
+#ifndef _LINUX_PERCPU_REFCOUNT_H
+#define _LINUX_PERCPU_REFCOUNT_H
+
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/gfp.h>
+
+struct percpu_ref;
+typedef void (percpu_ref_func_t)(struct percpu_ref *);
+
+/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
+enum {
+ __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
+ __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */
+ __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
+
+ __PERCPU_REF_FLAG_BITS = 2,
+};
+
+/* @flags for percpu_ref_init() */
+enum {
+ /*
+ * Start w/ ref == 1 in atomic mode. Can be switched to percpu
+ * operation using percpu_ref_switch_to_percpu(). If initialized
+ * with this flag, the ref will stay in atomic mode until
+ * percpu_ref_switch_to_percpu() is invoked on it.
+ */
+ PERCPU_REF_INIT_ATOMIC = 1 << 0,
+
+ /*
+ * Start dead w/ ref == 0 in atomic mode. Must be revived with
+ * percpu_ref_reinit() before used. Implies INIT_ATOMIC.
+ */
+ PERCPU_REF_INIT_DEAD = 1 << 1,
+};
+
+struct percpu_ref {
+ atomic_long_t count;
+ /*
+ * The low bit of the pointer indicates whether the ref is in percpu
+ * mode; if set, then get/put will manipulate the atomic_t.
+ */
+ unsigned long percpu_count_ptr;
+ percpu_ref_func_t *release;
+ percpu_ref_func_t *confirm_switch;
+ bool force_atomic:1;
+ struct rcu_head rcu;
+};
+
+int __must_check percpu_ref_init(struct percpu_ref *ref,
+ percpu_ref_func_t *release, unsigned int flags,
+ gfp_t gfp);
+void percpu_ref_exit(struct percpu_ref *ref);
+void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch);
+void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
+void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_kill);
+void percpu_ref_reinit(struct percpu_ref *ref);
+
+/**
+ * percpu_ref_kill - drop the initial ref
+ * @ref: percpu_ref to kill
+ *
+ * Must be used to drop the initial ref on a percpu refcount; must be called
+ * precisely once before shutdown.
+ *
+ * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
+ * percpu counters and dropping the initial ref.
+ */
+static inline void percpu_ref_kill(struct percpu_ref *ref)
+{
+ return percpu_ref_kill_and_confirm(ref, NULL);
+}
+
+/*
+ * Internal helper. Don't use outside percpu-refcount proper. The
+ * function doesn't return the pointer and let the caller test it for NULL
+ * because doing so forces the compiler to generate two conditional
+ * branches as it can't assume that @ref->percpu_count is not NULL.
+ */
+static inline bool __ref_is_percpu(struct percpu_ref *ref,
+ unsigned long __percpu **percpu_countp)
+{
+ unsigned long percpu_ptr;
+
+ /*
+ * The value of @ref->percpu_count_ptr is tested for
+ * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
+ * used as a pointer. If the compiler generates a separate fetch
+ * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
+ * between contaminating the pointer value, meaning that
+ * ACCESS_ONCE() is required when fetching it.
+ *
+ * Also, we need a data dependency barrier to be paired with
+ * smp_store_release() in __percpu_ref_switch_to_percpu().
+ *
+ * Use lockless deref which contains both.
+ */
+ percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
+
+ /*
+ * Theoretically, the following could test just ATOMIC; however,
+ * then we'd have to mask off DEAD separately as DEAD may be
+ * visible without ATOMIC if we race with percpu_ref_kill(). DEAD
+ * implies ATOMIC anyway. Test them together.
+ */
+ if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
+ return false;
+
+ *percpu_countp = (unsigned long __percpu *)percpu_ptr;
+ return true;
+}
+
+/**
+ * percpu_ref_get_many - increment a percpu refcount
+ * @ref: percpu_ref to get
+ * @nr: number of references to get
+ *
+ * Analogous to atomic_long_add().
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
+{
+ unsigned long __percpu *percpu_count;
+
+ rcu_read_lock_sched();
+
+ if (__ref_is_percpu(ref, &percpu_count))
+ this_cpu_add(*percpu_count, nr);
+ else
+ atomic_long_add(nr, &ref->count);
+
+ rcu_read_unlock_sched();
+}
+
+/**
+ * percpu_ref_get - increment a percpu refcount
+ * @ref: percpu_ref to get
+ *
+ * Analagous to atomic_long_inc().
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline void percpu_ref_get(struct percpu_ref *ref)
+{
+ percpu_ref_get_many(ref, 1);
+}
+
+/**
+ * percpu_ref_tryget - try to increment a percpu refcount
+ * @ref: percpu_ref to try-get
+ *
+ * Increment a percpu refcount unless its count already reached zero.
+ * Returns %true on success; %false on failure.
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline bool percpu_ref_tryget(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count;
+ int ret;
+
+ rcu_read_lock_sched();
+
+ if (__ref_is_percpu(ref, &percpu_count)) {
+ this_cpu_inc(*percpu_count);
+ ret = true;
+ } else {
+ ret = atomic_long_inc_not_zero(&ref->count);
+ }
+
+ rcu_read_unlock_sched();
+
+ return ret;
+}
+
+/**
+ * percpu_ref_tryget_live - try to increment a live percpu refcount
+ * @ref: percpu_ref to try-get
+ *
+ * Increment a percpu refcount unless it has already been killed. Returns
+ * %true on success; %false on failure.
+ *
+ * Completion of percpu_ref_kill() in itself doesn't guarantee that this
+ * function will fail. For such guarantee, percpu_ref_kill_and_confirm()
+ * should be used. After the confirm_kill callback is invoked, it's
+ * guaranteed that no new reference will be given out by
+ * percpu_ref_tryget_live().
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count;
+ int ret = false;
+
+ rcu_read_lock_sched();
+
+ if (__ref_is_percpu(ref, &percpu_count)) {
+ this_cpu_inc(*percpu_count);
+ ret = true;
+ } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
+ ret = atomic_long_inc_not_zero(&ref->count);
+ }
+
+ rcu_read_unlock_sched();
+
+ return ret;
+}
+
+/**
+ * percpu_ref_put_many - decrement a percpu refcount
+ * @ref: percpu_ref to put
+ * @nr: number of references to put
+ *
+ * Decrement the refcount, and if 0, call the release function (which was passed
+ * to percpu_ref_init())
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
+{
+ unsigned long __percpu *percpu_count;
+
+ rcu_read_lock_sched();
+
+ if (__ref_is_percpu(ref, &percpu_count))
+ this_cpu_sub(*percpu_count, nr);
+ else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
+ ref->release(ref);
+
+ rcu_read_unlock_sched();
+}
+
+/**
+ * percpu_ref_put - decrement a percpu refcount
+ * @ref: percpu_ref to put
+ *
+ * Decrement the refcount, and if 0, call the release function (which was passed
+ * to percpu_ref_init())
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline void percpu_ref_put(struct percpu_ref *ref)
+{
+ percpu_ref_put_many(ref, 1);
+}
+
+/**
+ * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
+ * @ref: percpu_ref to test
+ *
+ * Returns %true if @ref is dying or dead.
+ *
+ * This function is safe to call as long as @ref is between init and exit
+ * and the caller is responsible for synchronizing against state changes.
+ */
+static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
+{
+ return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
+}
+
+/**
+ * percpu_ref_is_zero - test whether a percpu refcount reached zero
+ * @ref: percpu_ref to test
+ *
+ * Returns %true if @ref reached zero.
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count;
+
+ if (__ref_is_percpu(ref, &percpu_count))
+ return false;
+ return !atomic_long_read(&ref->count);
+}
+
+#endif
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
new file mode 100644
index 000000000..3e88c9a7d
--- /dev/null
+++ b/include/linux/percpu-rwsem.h
@@ -0,0 +1,34 @@
+#ifndef _LINUX_PERCPU_RWSEM_H
+#define _LINUX_PERCPU_RWSEM_H
+
+#include <linux/atomic.h>
+#include <linux/rwsem.h>
+#include <linux/percpu.h>
+#include <linux/wait.h>
+#include <linux/lockdep.h>
+
+struct percpu_rw_semaphore {
+ unsigned int __percpu *fast_read_ctr;
+ atomic_t write_ctr;
+ struct rw_semaphore rw_sem;
+ atomic_t slow_read_ctr;
+ wait_queue_head_t write_waitq;
+};
+
+extern void percpu_down_read(struct percpu_rw_semaphore *);
+extern void percpu_up_read(struct percpu_rw_semaphore *);
+
+extern void percpu_down_write(struct percpu_rw_semaphore *);
+extern void percpu_up_write(struct percpu_rw_semaphore *);
+
+extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
+ const char *, struct lock_class_key *);
+extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
+
+#define percpu_init_rwsem(brw) \
+({ \
+ static struct lock_class_key rwsem_key; \
+ __percpu_init_rwsem(brw, #brw, &rwsem_key); \
+})
+
+#endif
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
new file mode 100644
index 000000000..caebf2a75
--- /dev/null
+++ b/include/linux/percpu.h
@@ -0,0 +1,141 @@
+#ifndef __LINUX_PERCPU_H
+#define __LINUX_PERCPU_H
+
+#include <linux/mmdebug.h>
+#include <linux/preempt.h>
+#include <linux/smp.h>
+#include <linux/cpumask.h>
+#include <linux/printk.h>
+#include <linux/pfn.h>
+#include <linux/init.h>
+
+#include <asm/percpu.h>
+
+/* enough to cover all DEFINE_PER_CPUs in modules */
+#ifdef CONFIG_MODULES
+#define PERCPU_MODULE_RESERVE (8 << 10)
+#else
+#define PERCPU_MODULE_RESERVE 0
+#endif
+
+#ifndef PERCPU_ENOUGH_ROOM
+#define PERCPU_ENOUGH_ROOM \
+ (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
+ PERCPU_MODULE_RESERVE)
+#endif
+
+/* minimum unit size, also is the maximum supported allocation size */
+#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
+
+/*
+ * Percpu allocator can serve percpu allocations before slab is
+ * initialized which allows slab to depend on the percpu allocator.
+ * The following two parameters decide how much resource to
+ * preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
+ * larger than PERCPU_DYNAMIC_EARLY_SIZE.
+ */
+#define PERCPU_DYNAMIC_EARLY_SLOTS 128
+#define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
+
+/*
+ * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
+ * back on the first chunk for dynamic percpu allocation if arch is
+ * manually allocating and mapping it for faster access (as a part of
+ * large page mapping for example).
+ *
+ * The following values give between one and two pages of free space
+ * after typical minimal boot (2-way SMP, single disk and NIC) with
+ * both defconfig and a distro config on x86_64 and 32. More
+ * intelligent way to determine this would be nice.
+ */
+#if BITS_PER_LONG > 32
+#define PERCPU_DYNAMIC_RESERVE (28 << 10)
+#else
+#define PERCPU_DYNAMIC_RESERVE (20 << 10)
+#endif
+
+extern void *pcpu_base_addr;
+extern const unsigned long *pcpu_unit_offsets;
+
+struct pcpu_group_info {
+ int nr_units; /* aligned # of units */
+ unsigned long base_offset; /* base address offset */
+ unsigned int *cpu_map; /* unit->cpu map, empty
+ * entries contain NR_CPUS */
+};
+
+struct pcpu_alloc_info {
+ size_t static_size;
+ size_t reserved_size;
+ size_t dyn_size;
+ size_t unit_size;
+ size_t atom_size;
+ size_t alloc_size;
+ size_t __ai_size; /* internal, don't use */
+ int nr_groups; /* 0 if grouping unnecessary */
+ struct pcpu_group_info groups[];
+};
+
+enum pcpu_fc {
+ PCPU_FC_AUTO,
+ PCPU_FC_EMBED,
+ PCPU_FC_PAGE,
+
+ PCPU_FC_NR,
+};
+extern const char * const pcpu_fc_names[PCPU_FC_NR];
+
+extern enum pcpu_fc pcpu_chosen_fc;
+
+typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
+ size_t align);
+typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
+typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
+typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
+
+extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
+ int nr_units);
+extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
+
+extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
+ void *base_addr);
+
+#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
+extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
+ size_t atom_size,
+ pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
+ pcpu_fc_alloc_fn_t alloc_fn,
+ pcpu_fc_free_fn_t free_fn);
+#endif
+
+#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
+extern int __init pcpu_page_first_chunk(size_t reserved_size,
+ pcpu_fc_alloc_fn_t alloc_fn,
+ pcpu_fc_free_fn_t free_fn,
+ pcpu_fc_populate_pte_fn_t populate_pte_fn);
+#endif
+
+extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
+extern bool is_kernel_percpu_address(unsigned long addr);
+
+#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
+extern void __init setup_per_cpu_areas(void);
+#endif
+extern void __init percpu_init_late(void);
+
+extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
+extern void __percpu *__alloc_percpu(size_t size, size_t align);
+extern void free_percpu(void __percpu *__pdata);
+extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
+
+#define alloc_percpu_gfp(type, gfp) \
+ (typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \
+ __alignof__(type), gfp)
+#define alloc_percpu(type) \
+ (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \
+ __alignof__(type))
+
+/* To avoid include hell, as printk can not declare this, we declare it here */
+DECLARE_PER_CPU(printk_func_t, printk_func);
+
+#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
new file mode 100644
index 000000000..84a109449
--- /dev/null
+++ b/include/linux/percpu_counter.h
@@ -0,0 +1,190 @@
+#ifndef _LINUX_PERCPU_COUNTER_H
+#define _LINUX_PERCPU_COUNTER_H
+/*
+ * A simple "approximate counter" for use in ext2 and ext3 superblocks.
+ *
+ * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/list.h>
+#include <linux/threads.h>
+#include <linux/percpu.h>
+#include <linux/types.h>
+#include <linux/gfp.h>
+
+#ifdef CONFIG_SMP
+
+struct percpu_counter {
+ raw_spinlock_t lock;
+ s64 count;
+#ifdef CONFIG_HOTPLUG_CPU
+ struct list_head list; /* All percpu_counters are on a list */
+#endif
+ s32 __percpu *counters;
+};
+
+extern int percpu_counter_batch;
+
+int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
+ struct lock_class_key *key);
+
+#define percpu_counter_init(fbc, value, gfp) \
+ ({ \
+ static struct lock_class_key __key; \
+ \
+ __percpu_counter_init(fbc, value, gfp, &__key); \
+ })
+
+void percpu_counter_destroy(struct percpu_counter *fbc);
+void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
+void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
+s64 __percpu_counter_sum(struct percpu_counter *fbc);
+int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
+
+static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
+{
+ return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
+}
+
+static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
+{
+ __percpu_counter_add(fbc, amount, percpu_counter_batch);
+}
+
+static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
+{
+ s64 ret = __percpu_counter_sum(fbc);
+ return ret < 0 ? 0 : ret;
+}
+
+static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
+{
+ return __percpu_counter_sum(fbc);
+}
+
+static inline s64 percpu_counter_read(struct percpu_counter *fbc)
+{
+ return fbc->count;
+}
+
+/*
+ * It is possible for the percpu_counter_read() to return a small negative
+ * number for some counter which should never be negative.
+ *
+ */
+static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
+{
+ s64 ret = fbc->count;
+
+ barrier(); /* Prevent reloads of fbc->count */
+ if (ret >= 0)
+ return ret;
+ return 0;
+}
+
+static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+{
+ return (fbc->counters != NULL);
+}
+
+#else /* !CONFIG_SMP */
+
+struct percpu_counter {
+ s64 count;
+};
+
+static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
+ gfp_t gfp)
+{
+ fbc->count = amount;
+ return 0;
+}
+
+static inline void percpu_counter_destroy(struct percpu_counter *fbc)
+{
+}
+
+static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
+{
+ fbc->count = amount;
+}
+
+static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
+{
+ if (fbc->count > rhs)
+ return 1;
+ else if (fbc->count < rhs)
+ return -1;
+ else
+ return 0;
+}
+
+static inline int
+__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
+{
+ return percpu_counter_compare(fbc, rhs);
+}
+
+static inline void
+percpu_counter_add(struct percpu_counter *fbc, s64 amount)
+{
+ preempt_disable();
+ fbc->count += amount;
+ preempt_enable();
+}
+
+static inline void
+__percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
+{
+ percpu_counter_add(fbc, amount);
+}
+
+static inline s64 percpu_counter_read(struct percpu_counter *fbc)
+{
+ return fbc->count;
+}
+
+/*
+ * percpu_counter is intended to track positive numbers. In the UP case the
+ * number should never be negative.
+ */
+static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
+{
+ return fbc->count;
+}
+
+static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
+{
+ return percpu_counter_read_positive(fbc);
+}
+
+static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
+{
+ return percpu_counter_read(fbc);
+}
+
+static inline int percpu_counter_initialized(struct percpu_counter *fbc)
+{
+ return 1;
+}
+
+#endif /* CONFIG_SMP */
+
+static inline void percpu_counter_inc(struct percpu_counter *fbc)
+{
+ percpu_counter_add(fbc, 1);
+}
+
+static inline void percpu_counter_dec(struct percpu_counter *fbc)
+{
+ percpu_counter_add(fbc, -1);
+}
+
+static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add(fbc, -amount);
+}
+
+#endif /* _LINUX_PERCPU_COUNTER_H */
diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h
new file mode 100644
index 000000000..f5cfdd6a5
--- /dev/null
+++ b/include/linux/percpu_ida.h
@@ -0,0 +1,82 @@
+#ifndef __PERCPU_IDA_H__
+#define __PERCPU_IDA_H__
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/spinlock_types.h>
+#include <linux/wait.h>
+#include <linux/cpumask.h>
+
+struct percpu_ida_cpu;
+
+struct percpu_ida {
+ /*
+ * number of tags available to be allocated, as passed to
+ * percpu_ida_init()
+ */
+ unsigned nr_tags;
+ unsigned percpu_max_size;
+ unsigned percpu_batch_size;
+
+ struct percpu_ida_cpu __percpu *tag_cpu;
+
+ /*
+ * Bitmap of cpus that (may) have tags on their percpu freelists:
+ * steal_tags() uses this to decide when to steal tags, and which cpus
+ * to try stealing from.
+ *
+ * It's ok for a freelist to be empty when its bit is set - steal_tags()
+ * will just keep looking - but the bitmap _must_ be set whenever a
+ * percpu freelist does have tags.
+ */
+ cpumask_t cpus_have_tags;
+
+ struct {
+ spinlock_t lock;
+ /*
+ * When we go to steal tags from another cpu (see steal_tags()),
+ * we want to pick a cpu at random. Cycling through them every
+ * time we steal is a bit easier and more or less equivalent:
+ */
+ unsigned cpu_last_stolen;
+
+ /* For sleeping on allocation failure */
+ wait_queue_head_t wait;
+
+ /*
+ * Global freelist - it's a stack where nr_free points to the
+ * top
+ */
+ unsigned nr_free;
+ unsigned *freelist;
+ } ____cacheline_aligned_in_smp;
+};
+
+/*
+ * Number of tags we move between the percpu freelist and the global freelist at
+ * a time
+ */
+#define IDA_DEFAULT_PCPU_BATCH_MOVE 32U
+/* Max size of percpu freelist, */
+#define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2)
+
+int percpu_ida_alloc(struct percpu_ida *pool, int state);
+void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
+
+void percpu_ida_destroy(struct percpu_ida *pool);
+int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
+ unsigned long max_size, unsigned long batch_size);
+static inline int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
+{
+ return __percpu_ida_init(pool, nr_tags, IDA_DEFAULT_PCPU_SIZE,
+ IDA_DEFAULT_PCPU_BATCH_MOVE);
+}
+
+typedef int (*percpu_ida_cb)(unsigned, void *);
+int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
+ void *data);
+
+unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu);
+#endif /* __PERCPU_IDA_H__ */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
new file mode 100644
index 000000000..d8a82a89f
--- /dev/null
+++ b/include/linux/perf_event.h
@@ -0,0 +1,1050 @@
+/*
+ * Performance events:
+ *
+ * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
+ *
+ * Data type definitions, declarations, prototypes.
+ *
+ * Started by: Thomas Gleixner and Ingo Molnar
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+#ifndef _LINUX_PERF_EVENT_H
+#define _LINUX_PERF_EVENT_H
+
+#include <uapi/linux/perf_event.h>
+
+/*
+ * Kernel-internal data types and definitions:
+ */
+
+#ifdef CONFIG_PERF_EVENTS
+# include <asm/perf_event.h>
+# include <asm/local64.h>
+#endif
+
+struct perf_guest_info_callbacks {
+ int (*is_in_guest)(void);
+ int (*is_user_mode)(void);
+ unsigned long (*get_guest_ip)(void);
+};
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+#include <asm/hw_breakpoint.h>
+#endif
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+#include <linux/hrtimer.h>
+#include <linux/fs.h>
+#include <linux/pid_namespace.h>
+#include <linux/workqueue.h>
+#include <linux/ftrace.h>
+#include <linux/cpu.h>
+#include <linux/irq_work.h>
+#include <linux/static_key.h>
+#include <linux/jump_label_ratelimit.h>
+#include <linux/atomic.h>
+#include <linux/sysfs.h>
+#include <linux/perf_regs.h>
+#include <linux/workqueue.h>
+#include <linux/cgroup.h>
+#include <asm/local.h>
+
+struct perf_callchain_entry {
+ __u64 nr;
+ __u64 ip[PERF_MAX_STACK_DEPTH];
+};
+
+struct perf_raw_record {
+ u32 size;
+ void *data;
+};
+
+/*
+ * branch stack layout:
+ * nr: number of taken branches stored in entries[]
+ *
+ * Note that nr can vary from sample to sample
+ * branches (to, from) are stored from most recent
+ * to least recent, i.e., entries[0] contains the most
+ * recent branch.
+ */
+struct perf_branch_stack {
+ __u64 nr;
+ struct perf_branch_entry entries[0];
+};
+
+struct task_struct;
+
+/*
+ * extra PMU register associated with an event
+ */
+struct hw_perf_event_extra {
+ u64 config; /* register value */
+ unsigned int reg; /* register address or index */
+ int alloc; /* extra register already allocated */
+ int idx; /* index in shared_regs->regs[] */
+};
+
+/**
+ * struct hw_perf_event - performance event hardware details:
+ */
+struct hw_perf_event {
+#ifdef CONFIG_PERF_EVENTS
+ union {
+ struct { /* hardware */
+ u64 config;
+ u64 last_tag;
+ unsigned long config_base;
+ unsigned long event_base;
+ int event_base_rdpmc;
+ int idx;
+ int last_cpu;
+ int flags;
+
+ struct hw_perf_event_extra extra_reg;
+ struct hw_perf_event_extra branch_reg;
+ };
+ struct { /* software */
+ struct hrtimer hrtimer;
+ };
+ struct { /* tracepoint */
+ /* for tp_event->class */
+ struct list_head tp_list;
+ };
+ struct { /* intel_cqm */
+ int cqm_state;
+ int cqm_rmid;
+ struct list_head cqm_events_entry;
+ struct list_head cqm_groups_entry;
+ struct list_head cqm_group_entry;
+ };
+ struct { /* itrace */
+ int itrace_started;
+ };
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ struct { /* breakpoint */
+ /*
+ * Crufty hack to avoid the chicken and egg
+ * problem hw_breakpoint has with context
+ * creation and event initalization.
+ */
+ struct arch_hw_breakpoint info;
+ struct list_head bp_list;
+ };
+#endif
+ };
+ struct task_struct *target;
+ int state;
+ local64_t prev_count;
+ u64 sample_period;
+ u64 last_period;
+ local64_t period_left;
+ u64 interrupts_seq;
+ u64 interrupts;
+
+ u64 freq_time_stamp;
+ u64 freq_count_stamp;
+#endif
+};
+
+/*
+ * hw_perf_event::state flags
+ */
+#define PERF_HES_STOPPED 0x01 /* the counter is stopped */
+#define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
+#define PERF_HES_ARCH 0x04
+
+struct perf_event;
+
+/*
+ * Common implementation detail of pmu::{start,commit,cancel}_txn
+ */
+#define PERF_EVENT_TXN 0x1
+
+/**
+ * pmu::capabilities flags
+ */
+#define PERF_PMU_CAP_NO_INTERRUPT 0x01
+#define PERF_PMU_CAP_NO_NMI 0x02
+#define PERF_PMU_CAP_AUX_NO_SG 0x04
+#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08
+#define PERF_PMU_CAP_EXCLUSIVE 0x10
+#define PERF_PMU_CAP_ITRACE 0x20
+
+/**
+ * struct pmu - generic performance monitoring unit
+ */
+struct pmu {
+ struct list_head entry;
+
+ struct module *module;
+ struct device *dev;
+ const struct attribute_group **attr_groups;
+ const char *name;
+ int type;
+
+ /*
+ * various common per-pmu feature flags
+ */
+ int capabilities;
+
+ int * __percpu pmu_disable_count;
+ struct perf_cpu_context * __percpu pmu_cpu_context;
+ atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
+ int task_ctx_nr;
+ int hrtimer_interval_ms;
+
+ /*
+ * Fully disable/enable this PMU, can be used to protect from the PMI
+ * as well as for lazy/batch writing of the MSRs.
+ */
+ void (*pmu_enable) (struct pmu *pmu); /* optional */
+ void (*pmu_disable) (struct pmu *pmu); /* optional */
+
+ /*
+ * Try and initialize the event for this PMU.
+ * Should return -ENOENT when the @event doesn't match this PMU.
+ */
+ int (*event_init) (struct perf_event *event);
+
+ /*
+ * Notification that the event was mapped or unmapped. Called
+ * in the context of the mapping task.
+ */
+ void (*event_mapped) (struct perf_event *event); /*optional*/
+ void (*event_unmapped) (struct perf_event *event); /*optional*/
+
+#define PERF_EF_START 0x01 /* start the counter when adding */
+#define PERF_EF_RELOAD 0x02 /* reload the counter when starting */
+#define PERF_EF_UPDATE 0x04 /* update the counter when stopping */
+
+ /*
+ * Adds/Removes a counter to/from the PMU, can be done inside
+ * a transaction, see the ->*_txn() methods.
+ */
+ int (*add) (struct perf_event *event, int flags);
+ void (*del) (struct perf_event *event, int flags);
+
+ /*
+ * Starts/Stops a counter present on the PMU. The PMI handler
+ * should stop the counter when perf_event_overflow() returns
+ * !0. ->start() will be used to continue.
+ */
+ void (*start) (struct perf_event *event, int flags);
+ void (*stop) (struct perf_event *event, int flags);
+
+ /*
+ * Updates the counter value of the event.
+ */
+ void (*read) (struct perf_event *event);
+
+ /*
+ * Group events scheduling is treated as a transaction, add
+ * group events as a whole and perform one schedulability test.
+ * If the test fails, roll back the whole group
+ *
+ * Start the transaction, after this ->add() doesn't need to
+ * do schedulability tests.
+ */
+ void (*start_txn) (struct pmu *pmu); /* optional */
+ /*
+ * If ->start_txn() disabled the ->add() schedulability test
+ * then ->commit_txn() is required to perform one. On success
+ * the transaction is closed. On error the transaction is kept
+ * open until ->cancel_txn() is called.
+ */
+ int (*commit_txn) (struct pmu *pmu); /* optional */
+ /*
+ * Will cancel the transaction, assumes ->del() is called
+ * for each successful ->add() during the transaction.
+ */
+ void (*cancel_txn) (struct pmu *pmu); /* optional */
+
+ /*
+ * Will return the value for perf_event_mmap_page::index for this event,
+ * if no implementation is provided it will default to: event->hw.idx + 1.
+ */
+ int (*event_idx) (struct perf_event *event); /*optional */
+
+ /*
+ * context-switches callback
+ */
+ void (*sched_task) (struct perf_event_context *ctx,
+ bool sched_in);
+ /*
+ * PMU specific data size
+ */
+ size_t task_ctx_size;
+
+
+ /*
+ * Return the count value for a counter.
+ */
+ u64 (*count) (struct perf_event *event); /*optional*/
+
+ /*
+ * Set up pmu-private data structures for an AUX area
+ */
+ void *(*setup_aux) (int cpu, void **pages,
+ int nr_pages, bool overwrite);
+ /* optional */
+
+ /*
+ * Free pmu-private AUX data structures
+ */
+ void (*free_aux) (void *aux); /* optional */
+};
+
+/**
+ * enum perf_event_active_state - the states of a event
+ */
+enum perf_event_active_state {
+ PERF_EVENT_STATE_EXIT = -3,
+ PERF_EVENT_STATE_ERROR = -2,
+ PERF_EVENT_STATE_OFF = -1,
+ PERF_EVENT_STATE_INACTIVE = 0,
+ PERF_EVENT_STATE_ACTIVE = 1,
+};
+
+struct file;
+struct perf_sample_data;
+
+typedef void (*perf_overflow_handler_t)(struct perf_event *,
+ struct perf_sample_data *,
+ struct pt_regs *regs);
+
+enum perf_group_flag {
+ PERF_GROUP_SOFTWARE = 0x1,
+};
+
+#define SWEVENT_HLIST_BITS 8
+#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
+
+struct swevent_hlist {
+ struct hlist_head heads[SWEVENT_HLIST_SIZE];
+ struct rcu_head rcu_head;
+};
+
+#define PERF_ATTACH_CONTEXT 0x01
+#define PERF_ATTACH_GROUP 0x02
+#define PERF_ATTACH_TASK 0x04
+#define PERF_ATTACH_TASK_DATA 0x08
+
+struct perf_cgroup;
+struct ring_buffer;
+
+/**
+ * struct perf_event - performance event kernel representation:
+ */
+struct perf_event {
+#ifdef CONFIG_PERF_EVENTS
+ /*
+ * entry onto perf_event_context::event_list;
+ * modifications require ctx->lock
+ * RCU safe iterations.
+ */
+ struct list_head event_entry;
+
+ /*
+ * XXX: group_entry and sibling_list should be mutually exclusive;
+ * either you're a sibling on a group, or you're the group leader.
+ * Rework the code to always use the same list element.
+ *
+ * Locked for modification by both ctx->mutex and ctx->lock; holding
+ * either sufficies for read.
+ */
+ struct list_head group_entry;
+ struct list_head sibling_list;
+
+ /*
+ * We need storage to track the entries in perf_pmu_migrate_context; we
+ * cannot use the event_entry because of RCU and we want to keep the
+ * group in tact which avoids us using the other two entries.
+ */
+ struct list_head migrate_entry;
+
+ struct hlist_node hlist_entry;
+ struct list_head active_entry;
+ int nr_siblings;
+ int group_flags;
+ struct perf_event *group_leader;
+ struct pmu *pmu;
+
+ enum perf_event_active_state state;
+ unsigned int attach_state;
+ local64_t count;
+ atomic64_t child_count;
+
+ /*
+ * These are the total time in nanoseconds that the event
+ * has been enabled (i.e. eligible to run, and the task has
+ * been scheduled in, if this is a per-task event)
+ * and running (scheduled onto the CPU), respectively.
+ *
+ * They are computed from tstamp_enabled, tstamp_running and
+ * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
+ */
+ u64 total_time_enabled;
+ u64 total_time_running;
+
+ /*
+ * These are timestamps used for computing total_time_enabled
+ * and total_time_running when the event is in INACTIVE or
+ * ACTIVE state, measured in nanoseconds from an arbitrary point
+ * in time.
+ * tstamp_enabled: the notional time when the event was enabled
+ * tstamp_running: the notional time when the event was scheduled on
+ * tstamp_stopped: in INACTIVE state, the notional time when the
+ * event was scheduled off.
+ */
+ u64 tstamp_enabled;
+ u64 tstamp_running;
+ u64 tstamp_stopped;
+
+ /*
+ * timestamp shadows the actual context timing but it can
+ * be safely used in NMI interrupt context. It reflects the
+ * context time as it was when the event was last scheduled in.
+ *
+ * ctx_time already accounts for ctx->timestamp. Therefore to
+ * compute ctx_time for a sample, simply add perf_clock().
+ */
+ u64 shadow_ctx_time;
+
+ struct perf_event_attr attr;
+ u16 header_size;
+ u16 id_header_size;
+ u16 read_size;
+ struct hw_perf_event hw;
+
+ struct perf_event_context *ctx;
+ atomic_long_t refcount;
+
+ /*
+ * These accumulate total time (in nanoseconds) that children
+ * events have been enabled and running, respectively.
+ */
+ atomic64_t child_total_time_enabled;
+ atomic64_t child_total_time_running;
+
+ /*
+ * Protect attach/detach and child_list:
+ */
+ struct mutex child_mutex;
+ struct list_head child_list;
+ struct perf_event *parent;
+
+ int oncpu;
+ int cpu;
+
+ struct list_head owner_entry;
+ struct task_struct *owner;
+
+ /* mmap bits */
+ struct mutex mmap_mutex;
+ atomic_t mmap_count;
+
+ struct ring_buffer *rb;
+ struct list_head rb_entry;
+ unsigned long rcu_batches;
+ int rcu_pending;
+
+ /* poll related */
+ wait_queue_head_t waitq;
+ struct fasync_struct *fasync;
+
+ /* delayed work for NMIs and such */
+ int pending_wakeup;
+ int pending_kill;
+ int pending_disable;
+ struct irq_work pending;
+
+ atomic_t event_limit;
+
+ void (*destroy)(struct perf_event *);
+ struct rcu_head rcu_head;
+
+ struct pid_namespace *ns;
+ u64 id;
+
+ u64 (*clock)(void);
+ perf_overflow_handler_t overflow_handler;
+ void *overflow_handler_context;
+
+#ifdef CONFIG_EVENT_TRACING
+ struct ftrace_event_call *tp_event;
+ struct event_filter *filter;
+#ifdef CONFIG_FUNCTION_TRACER
+ struct ftrace_ops ftrace_ops;
+#endif
+#endif
+
+#ifdef CONFIG_CGROUP_PERF
+ struct perf_cgroup *cgrp; /* cgroup event is attach to */
+ int cgrp_defer_enabled;
+#endif
+
+#endif /* CONFIG_PERF_EVENTS */
+};
+
+/**
+ * struct perf_event_context - event context structure
+ *
+ * Used as a container for task events and CPU events as well:
+ */
+struct perf_event_context {
+ struct pmu *pmu;
+ /*
+ * Protect the states of the events in the list,
+ * nr_active, and the list:
+ */
+ raw_spinlock_t lock;
+ /*
+ * Protect the list of events. Locking either mutex or lock
+ * is sufficient to ensure the list doesn't change; to change
+ * the list you need to lock both the mutex and the spinlock.
+ */
+ struct mutex mutex;
+
+ struct list_head active_ctx_list;
+ struct list_head pinned_groups;
+ struct list_head flexible_groups;
+ struct list_head event_list;
+ int nr_events;
+ int nr_active;
+ int is_active;
+ int nr_stat;
+ int nr_freq;
+ int rotate_disable;
+ atomic_t refcount;
+ struct task_struct *task;
+
+ /*
+ * Context clock, runs when context enabled.
+ */
+ u64 time;
+ u64 timestamp;
+
+ /*
+ * These fields let us detect when two contexts have both
+ * been cloned (inherited) from a common ancestor.
+ */
+ struct perf_event_context *parent_ctx;
+ u64 parent_gen;
+ u64 generation;
+ int pin_count;
+ int nr_cgroups; /* cgroup evts */
+ void *task_ctx_data; /* pmu specific data */
+ struct rcu_head rcu_head;
+
+ struct delayed_work orphans_remove;
+ bool orphans_remove_sched;
+};
+
+/*
+ * Number of contexts where an event can trigger:
+ * task, softirq, hardirq, nmi.
+ */
+#define PERF_NR_CONTEXTS 4
+
+/**
+ * struct perf_event_cpu_context - per cpu event context structure
+ */
+struct perf_cpu_context {
+ struct perf_event_context ctx;
+ struct perf_event_context *task_ctx;
+ int active_oncpu;
+ int exclusive;
+ struct hrtimer hrtimer;
+ ktime_t hrtimer_interval;
+ struct pmu *unique_pmu;
+ struct perf_cgroup *cgrp;
+};
+
+struct perf_output_handle {
+ struct perf_event *event;
+ struct ring_buffer *rb;
+ unsigned long wakeup;
+ unsigned long size;
+ union {
+ void *addr;
+ unsigned long head;
+ };
+ int page;
+};
+
+#ifdef CONFIG_CGROUP_PERF
+
+/*
+ * perf_cgroup_info keeps track of time_enabled for a cgroup.
+ * This is a per-cpu dynamically allocated data structure.
+ */
+struct perf_cgroup_info {
+ u64 time;
+ u64 timestamp;
+};
+
+struct perf_cgroup {
+ struct cgroup_subsys_state css;
+ struct perf_cgroup_info __percpu *info;
+};
+
+/*
+ * Must ensure cgroup is pinned (css_get) before calling
+ * this function. In other words, we cannot call this function
+ * if there is no cgroup event for the current CPU context.
+ */
+static inline struct perf_cgroup *
+perf_cgroup_from_task(struct task_struct *task)
+{
+ return container_of(task_css(task, perf_event_cgrp_id),
+ struct perf_cgroup, css);
+}
+#endif /* CONFIG_CGROUP_PERF */
+
+#ifdef CONFIG_PERF_EVENTS
+
+extern void *perf_aux_output_begin(struct perf_output_handle *handle,
+ struct perf_event *event);
+extern void perf_aux_output_end(struct perf_output_handle *handle,
+ unsigned long size, bool truncated);
+extern int perf_aux_output_skip(struct perf_output_handle *handle,
+ unsigned long size);
+extern void *perf_get_aux(struct perf_output_handle *handle);
+
+extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
+extern void perf_pmu_unregister(struct pmu *pmu);
+
+extern int perf_num_counters(void);
+extern const char *perf_pmu_name(void);
+extern void __perf_event_task_sched_in(struct task_struct *prev,
+ struct task_struct *task);
+extern void __perf_event_task_sched_out(struct task_struct *prev,
+ struct task_struct *next);
+extern int perf_event_init_task(struct task_struct *child);
+extern void perf_event_exit_task(struct task_struct *child);
+extern void perf_event_free_task(struct task_struct *task);
+extern void perf_event_delayed_put(struct task_struct *task);
+extern void perf_event_print_debug(void);
+extern void perf_pmu_disable(struct pmu *pmu);
+extern void perf_pmu_enable(struct pmu *pmu);
+extern void perf_sched_cb_dec(struct pmu *pmu);
+extern void perf_sched_cb_inc(struct pmu *pmu);
+extern int perf_event_task_disable(void);
+extern int perf_event_task_enable(void);
+extern int perf_event_refresh(struct perf_event *event, int refresh);
+extern void perf_event_update_userpage(struct perf_event *event);
+extern int perf_event_release_kernel(struct perf_event *event);
+extern struct perf_event *
+perf_event_create_kernel_counter(struct perf_event_attr *attr,
+ int cpu,
+ struct task_struct *task,
+ perf_overflow_handler_t callback,
+ void *context);
+extern void perf_pmu_migrate_context(struct pmu *pmu,
+ int src_cpu, int dst_cpu);
+extern u64 perf_event_read_value(struct perf_event *event,
+ u64 *enabled, u64 *running);
+
+
+struct perf_sample_data {
+ /*
+ * Fields set by perf_sample_data_init(), group so as to
+ * minimize the cachelines touched.
+ */
+ u64 addr;
+ struct perf_raw_record *raw;
+ struct perf_branch_stack *br_stack;
+ u64 period;
+ u64 weight;
+ u64 txn;
+ union perf_mem_data_src data_src;
+
+ /*
+ * The other fields, optionally {set,used} by
+ * perf_{prepare,output}_sample().
+ */
+ u64 type;
+ u64 ip;
+ struct {
+ u32 pid;
+ u32 tid;
+ } tid_entry;
+ u64 time;
+ u64 id;
+ u64 stream_id;
+ struct {
+ u32 cpu;
+ u32 reserved;
+ } cpu_entry;
+ struct perf_callchain_entry *callchain;
+
+ /*
+ * regs_user may point to task_pt_regs or to regs_user_copy, depending
+ * on arch details.
+ */
+ struct perf_regs regs_user;
+ struct pt_regs regs_user_copy;
+
+ struct perf_regs regs_intr;
+ u64 stack_user_size;
+} ____cacheline_aligned;
+
+/* default value for data source */
+#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
+ PERF_MEM_S(LVL, NA) |\
+ PERF_MEM_S(SNOOP, NA) |\
+ PERF_MEM_S(LOCK, NA) |\
+ PERF_MEM_S(TLB, NA))
+
+static inline void perf_sample_data_init(struct perf_sample_data *data,
+ u64 addr, u64 period)
+{
+ /* remaining struct members initialized in perf_prepare_sample() */
+ data->addr = addr;
+ data->raw = NULL;
+ data->br_stack = NULL;
+ data->period = period;
+ data->weight = 0;
+ data->data_src.val = PERF_MEM_NA;
+ data->txn = 0;
+}
+
+extern void perf_output_sample(struct perf_output_handle *handle,
+ struct perf_event_header *header,
+ struct perf_sample_data *data,
+ struct perf_event *event);
+extern void perf_prepare_sample(struct perf_event_header *header,
+ struct perf_sample_data *data,
+ struct perf_event *event,
+ struct pt_regs *regs);
+
+extern int perf_event_overflow(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs);
+
+static inline bool is_sampling_event(struct perf_event *event)
+{
+ return event->attr.sample_period != 0;
+}
+
+/*
+ * Return 1 for a software event, 0 for a hardware event
+ */
+static inline int is_software_event(struct perf_event *event)
+{
+ return event->pmu->task_ctx_nr == perf_sw_context;
+}
+
+extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
+
+extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
+extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
+
+#ifndef perf_arch_fetch_caller_regs
+static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
+#endif
+
+/*
+ * Take a snapshot of the regs. Skip ip and frame pointer to
+ * the nth caller. We only need a few of the regs:
+ * - ip for PERF_SAMPLE_IP
+ * - cs for user_mode() tests
+ * - bp for callchains
+ * - eflags, for future purposes, just in case
+ */
+static inline void perf_fetch_caller_regs(struct pt_regs *regs)
+{
+ memset(regs, 0, sizeof(*regs));
+
+ perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
+}
+
+static __always_inline void
+perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
+{
+ if (static_key_false(&perf_swevent_enabled[event_id]))
+ __perf_sw_event(event_id, nr, regs, addr);
+}
+
+DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
+
+/*
+ * 'Special' version for the scheduler, it hard assumes no recursion,
+ * which is guaranteed by us not actually scheduling inside other swevents
+ * because those disable preemption.
+ */
+static __always_inline void
+perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
+{
+ if (static_key_false(&perf_swevent_enabled[event_id])) {
+ struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
+
+ perf_fetch_caller_regs(regs);
+ ___perf_sw_event(event_id, nr, regs, addr);
+ }
+}
+
+extern struct static_key_deferred perf_sched_events;
+
+static inline void perf_event_task_sched_in(struct task_struct *prev,
+ struct task_struct *task)
+{
+ if (static_key_false(&perf_sched_events.key))
+ __perf_event_task_sched_in(prev, task);
+}
+
+static inline void perf_event_task_sched_out(struct task_struct *prev,
+ struct task_struct *next)
+{
+ perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
+
+ if (static_key_false(&perf_sched_events.key))
+ __perf_event_task_sched_out(prev, next);
+}
+
+static inline u64 __perf_event_count(struct perf_event *event)
+{
+ return local64_read(&event->count) + atomic64_read(&event->child_count);
+}
+
+extern void perf_event_mmap(struct vm_area_struct *vma);
+extern struct perf_guest_info_callbacks *perf_guest_cbs;
+extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
+extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
+
+extern void perf_event_exec(void);
+extern void perf_event_comm(struct task_struct *tsk, bool exec);
+extern void perf_event_fork(struct task_struct *tsk);
+
+/* Callchains */
+DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
+
+extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
+extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
+
+static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
+{
+ if (entry->nr < PERF_MAX_STACK_DEPTH)
+ entry->ip[entry->nr++] = ip;
+}
+
+extern int sysctl_perf_event_paranoid;
+extern int sysctl_perf_event_mlock;
+extern int sysctl_perf_event_sample_rate;
+extern int sysctl_perf_cpu_time_max_percent;
+
+extern void perf_sample_event_took(u64 sample_len_ns);
+
+extern int perf_proc_update_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
+
+static inline bool perf_paranoid_tracepoint_raw(void)
+{
+ return sysctl_perf_event_paranoid > -1;
+}
+
+static inline bool perf_paranoid_cpu(void)
+{
+ return sysctl_perf_event_paranoid > 0;
+}
+
+static inline bool perf_paranoid_kernel(void)
+{
+ return sysctl_perf_event_paranoid > 1;
+}
+
+extern void perf_event_init(void);
+extern void perf_tp_event(u64 addr, u64 count, void *record,
+ int entry_size, struct pt_regs *regs,
+ struct hlist_head *head, int rctx,
+ struct task_struct *task);
+extern void perf_bp_event(struct perf_event *event, void *data);
+
+#ifndef perf_misc_flags
+# define perf_misc_flags(regs) \
+ (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL)
+# define perf_instruction_pointer(regs) instruction_pointer(regs)
+#endif
+
+static inline bool has_branch_stack(struct perf_event *event)
+{
+ return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
+}
+
+static inline bool needs_branch_stack(struct perf_event *event)
+{
+ return event->attr.branch_sample_type != 0;
+}
+
+static inline bool has_aux(struct perf_event *event)
+{
+ return event->pmu->setup_aux;
+}
+
+extern int perf_output_begin(struct perf_output_handle *handle,
+ struct perf_event *event, unsigned int size);
+extern void perf_output_end(struct perf_output_handle *handle);
+extern unsigned int perf_output_copy(struct perf_output_handle *handle,
+ const void *buf, unsigned int len);
+extern unsigned int perf_output_skip(struct perf_output_handle *handle,
+ unsigned int len);
+extern int perf_swevent_get_recursion_context(void);
+extern void perf_swevent_put_recursion_context(int rctx);
+extern u64 perf_swevent_set_period(struct perf_event *event);
+extern void perf_event_enable(struct perf_event *event);
+extern void perf_event_disable(struct perf_event *event);
+extern int __perf_event_disable(void *info);
+extern void perf_event_task_tick(void);
+#else /* !CONFIG_PERF_EVENTS: */
+static inline void *
+perf_aux_output_begin(struct perf_output_handle *handle,
+ struct perf_event *event) { return NULL; }
+static inline void
+perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
+ bool truncated) { }
+static inline int
+perf_aux_output_skip(struct perf_output_handle *handle,
+ unsigned long size) { return -EINVAL; }
+static inline void *
+perf_get_aux(struct perf_output_handle *handle) { return NULL; }
+static inline void
+perf_event_task_sched_in(struct task_struct *prev,
+ struct task_struct *task) { }
+static inline void
+perf_event_task_sched_out(struct task_struct *prev,
+ struct task_struct *next) { }
+static inline int perf_event_init_task(struct task_struct *child) { return 0; }
+static inline void perf_event_exit_task(struct task_struct *child) { }
+static inline void perf_event_free_task(struct task_struct *task) { }
+static inline void perf_event_delayed_put(struct task_struct *task) { }
+static inline void perf_event_print_debug(void) { }
+static inline int perf_event_task_disable(void) { return -EINVAL; }
+static inline int perf_event_task_enable(void) { return -EINVAL; }
+static inline int perf_event_refresh(struct perf_event *event, int refresh)
+{
+ return -EINVAL;
+}
+
+static inline void
+perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
+static inline void
+perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
+static inline void
+perf_bp_event(struct perf_event *event, void *data) { }
+
+static inline int perf_register_guest_info_callbacks
+(struct perf_guest_info_callbacks *callbacks) { return 0; }
+static inline int perf_unregister_guest_info_callbacks
+(struct perf_guest_info_callbacks *callbacks) { return 0; }
+
+static inline void perf_event_mmap(struct vm_area_struct *vma) { }
+static inline void perf_event_exec(void) { }
+static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
+static inline void perf_event_fork(struct task_struct *tsk) { }
+static inline void perf_event_init(void) { }
+static inline int perf_swevent_get_recursion_context(void) { return -1; }
+static inline void perf_swevent_put_recursion_context(int rctx) { }
+static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; }
+static inline void perf_event_enable(struct perf_event *event) { }
+static inline void perf_event_disable(struct perf_event *event) { }
+static inline int __perf_event_disable(void *info) { return -1; }
+static inline void perf_event_task_tick(void) { }
+#endif
+
+#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
+extern bool perf_event_can_stop_tick(void);
+#else
+static inline bool perf_event_can_stop_tick(void) { return true; }
+#endif
+
+#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
+extern void perf_restore_debug_store(void);
+#else
+static inline void perf_restore_debug_store(void) { }
+#endif
+
+#define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
+
+/*
+ * This has to have a higher priority than migration_notifier in sched/core.c.
+ */
+#define perf_cpu_notifier(fn) \
+do { \
+ static struct notifier_block fn##_nb = \
+ { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
+ unsigned long cpu = smp_processor_id(); \
+ unsigned long flags; \
+ \
+ cpu_notifier_register_begin(); \
+ fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \
+ (void *)(unsigned long)cpu); \
+ local_irq_save(flags); \
+ fn(&fn##_nb, (unsigned long)CPU_STARTING, \
+ (void *)(unsigned long)cpu); \
+ local_irq_restore(flags); \
+ fn(&fn##_nb, (unsigned long)CPU_ONLINE, \
+ (void *)(unsigned long)cpu); \
+ __register_cpu_notifier(&fn##_nb); \
+ cpu_notifier_register_done(); \
+} while (0)
+
+/*
+ * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the
+ * callback for already online CPUs.
+ */
+#define __perf_cpu_notifier(fn) \
+do { \
+ static struct notifier_block fn##_nb = \
+ { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
+ \
+ __register_cpu_notifier(&fn##_nb); \
+} while (0)
+
+struct perf_pmu_events_attr {
+ struct device_attribute attr;
+ u64 id;
+ const char *event_str;
+};
+
+ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
+ char *page);
+
+#define PMU_EVENT_ATTR(_name, _var, _id, _show) \
+static struct perf_pmu_events_attr _var = { \
+ .attr = __ATTR(_name, 0444, _show, NULL), \
+ .id = _id, \
+};
+
+#define PMU_EVENT_ATTR_STRING(_name, _var, _str) \
+static struct perf_pmu_events_attr _var = { \
+ .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
+ .id = 0, \
+ .event_str = _str, \
+};
+
+#define PMU_FORMAT_ATTR(_name, _format) \
+static ssize_t \
+_name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *page) \
+{ \
+ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
+ return sprintf(page, _format "\n"); \
+} \
+ \
+static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
+
+#endif /* _LINUX_PERF_EVENT_H */
diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h
new file mode 100644
index 000000000..a5f98d53d
--- /dev/null
+++ b/include/linux/perf_regs.h
@@ -0,0 +1,41 @@
+#ifndef _LINUX_PERF_REGS_H
+#define _LINUX_PERF_REGS_H
+
+struct perf_regs {
+ __u64 abi;
+ struct pt_regs *regs;
+};
+
+#ifdef CONFIG_HAVE_PERF_REGS
+#include <asm/perf_regs.h>
+u64 perf_reg_value(struct pt_regs *regs, int idx);
+int perf_reg_validate(u64 mask);
+u64 perf_reg_abi(struct task_struct *task);
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy);
+#else
+static inline u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+ return 0;
+}
+
+static inline int perf_reg_validate(u64 mask)
+{
+ return mask ? -ENOSYS : 0;
+}
+
+static inline u64 perf_reg_abi(struct task_struct *task)
+{
+ return PERF_SAMPLE_REGS_ABI_NONE;
+}
+
+static inline void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
+#endif /* CONFIG_HAVE_PERF_REGS */
+#endif /* _LINUX_PERF_REGS_H */
diff --git a/include/linux/personality.h b/include/linux/personality.h
new file mode 100644
index 000000000..aeb7892b2
--- /dev/null
+++ b/include/linux/personality.h
@@ -0,0 +1,16 @@
+#ifndef _LINUX_PERSONALITY_H
+#define _LINUX_PERSONALITY_H
+
+#include <uapi/linux/personality.h>
+
+/*
+ * Return the base personality without flags.
+ */
+#define personality(pers) (pers & PER_MASK)
+
+/*
+ * Change personality of the currently running process.
+ */
+#define set_personality(pers) (current->personality = (pers))
+
+#endif /* _LINUX_PERSONALITY_H */
diff --git a/include/linux/pfn.h b/include/linux/pfn.h
new file mode 100644
index 000000000..764663722
--- /dev/null
+++ b/include/linux/pfn.h
@@ -0,0 +1,13 @@
+#ifndef _LINUX_PFN_H_
+#define _LINUX_PFN_H_
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#endif
+
+#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+#define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT)
+
+#endif
diff --git a/include/linux/phonet.h b/include/linux/phonet.h
new file mode 100644
index 000000000..f691b04fc
--- /dev/null
+++ b/include/linux/phonet.h
@@ -0,0 +1,40 @@
+/**
+ * file phonet.h
+ *
+ * Phonet sockets kernel interface
+ *
+ * Copyright (C) 2008 Nokia Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+#ifndef LINUX_PHONET_H
+#define LINUX_PHONET_H
+
+#include <uapi/linux/phonet.h>
+
+#define SIOCPNGAUTOCONF (SIOCDEVPRIVATE + 0)
+
+struct if_phonet_autoconf {
+ uint8_t device;
+};
+
+struct if_phonet_req {
+ char ifr_phonet_name[16];
+ union {
+ struct if_phonet_autoconf ifru_phonet_autoconf;
+ } ifr_ifru;
+};
+#define ifr_phonet_autoconf ifr_ifru.ifru_phonet_autoconf
+#endif
diff --git a/include/linux/phy.h b/include/linux/phy.h
new file mode 100644
index 000000000..685809835
--- /dev/null
+++ b/include/linux/phy.h
@@ -0,0 +1,813 @@
+/*
+ * Framework and drivers for configuring and reading different PHYs
+ * Based on code in sungem_phy.c and gianfar_phy.c
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __PHY_H
+#define __PHY_H
+
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/mod_devicetable.h>
+
+#include <linux/atomic.h>
+
+#define PHY_DEFAULT_FEATURES (SUPPORTED_Autoneg | \
+ SUPPORTED_TP | \
+ SUPPORTED_MII)
+
+#define PHY_10BT_FEATURES (SUPPORTED_10baseT_Half | \
+ SUPPORTED_10baseT_Full)
+
+#define PHY_100BT_FEATURES (SUPPORTED_100baseT_Half | \
+ SUPPORTED_100baseT_Full)
+
+#define PHY_1000BT_FEATURES (SUPPORTED_1000baseT_Half | \
+ SUPPORTED_1000baseT_Full)
+
+#define PHY_BASIC_FEATURES (PHY_10BT_FEATURES | \
+ PHY_100BT_FEATURES | \
+ PHY_DEFAULT_FEATURES)
+
+#define PHY_GBIT_FEATURES (PHY_BASIC_FEATURES | \
+ PHY_1000BT_FEATURES)
+
+
+/*
+ * Set phydev->irq to PHY_POLL if interrupts are not supported,
+ * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if
+ * the attached driver handles the interrupt
+ */
+#define PHY_POLL -1
+#define PHY_IGNORE_INTERRUPT -2
+
+#define PHY_HAS_INTERRUPT 0x00000001
+#define PHY_HAS_MAGICANEG 0x00000002
+#define PHY_IS_INTERNAL 0x00000004
+
+/* Interface Mode definitions */
+typedef enum {
+ PHY_INTERFACE_MODE_NA,
+ PHY_INTERFACE_MODE_MII,
+ PHY_INTERFACE_MODE_GMII,
+ PHY_INTERFACE_MODE_SGMII,
+ PHY_INTERFACE_MODE_TBI,
+ PHY_INTERFACE_MODE_REVMII,
+ PHY_INTERFACE_MODE_RMII,
+ PHY_INTERFACE_MODE_RGMII,
+ PHY_INTERFACE_MODE_RGMII_ID,
+ PHY_INTERFACE_MODE_RGMII_RXID,
+ PHY_INTERFACE_MODE_RGMII_TXID,
+ PHY_INTERFACE_MODE_RTBI,
+ PHY_INTERFACE_MODE_SMII,
+ PHY_INTERFACE_MODE_XGMII,
+ PHY_INTERFACE_MODE_MOCA,
+ PHY_INTERFACE_MODE_QSGMII,
+ PHY_INTERFACE_MODE_MAX,
+} phy_interface_t;
+
+/**
+ * It maps 'enum phy_interface_t' found in include/linux/phy.h
+ * into the device tree binding of 'phy-mode', so that Ethernet
+ * device driver can get phy interface from device tree.
+ */
+static inline const char *phy_modes(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_NA:
+ return "";
+ case PHY_INTERFACE_MODE_MII:
+ return "mii";
+ case PHY_INTERFACE_MODE_GMII:
+ return "gmii";
+ case PHY_INTERFACE_MODE_SGMII:
+ return "sgmii";
+ case PHY_INTERFACE_MODE_TBI:
+ return "tbi";
+ case PHY_INTERFACE_MODE_REVMII:
+ return "rev-mii";
+ case PHY_INTERFACE_MODE_RMII:
+ return "rmii";
+ case PHY_INTERFACE_MODE_RGMII:
+ return "rgmii";
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ return "rgmii-id";
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ return "rgmii-rxid";
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return "rgmii-txid";
+ case PHY_INTERFACE_MODE_RTBI:
+ return "rtbi";
+ case PHY_INTERFACE_MODE_SMII:
+ return "smii";
+ case PHY_INTERFACE_MODE_XGMII:
+ return "xgmii";
+ case PHY_INTERFACE_MODE_MOCA:
+ return "moca";
+ case PHY_INTERFACE_MODE_QSGMII:
+ return "qsgmii";
+ default:
+ return "unknown";
+ }
+}
+
+
+#define PHY_INIT_TIMEOUT 100000
+#define PHY_STATE_TIME 1
+#define PHY_FORCE_TIMEOUT 10
+#define PHY_AN_TIMEOUT 10
+
+#define PHY_MAX_ADDR 32
+
+/* Used when trying to connect to a specific phy (mii bus id:phy device id) */
+#define PHY_ID_FMT "%s:%02x"
+
+/*
+ * Need to be a little smaller than phydev->dev.bus_id to leave room
+ * for the ":%02x"
+ */
+#define MII_BUS_ID_SIZE (20 - 3)
+
+/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
+ IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */
+#define MII_ADDR_C45 (1<<30)
+
+struct device;
+struct sk_buff;
+
+/*
+ * The Bus class for PHYs. Devices which provide access to
+ * PHYs should register using this structure
+ */
+struct mii_bus {
+ const char *name;
+ char id[MII_BUS_ID_SIZE];
+ void *priv;
+ int (*read)(struct mii_bus *bus, int phy_id, int regnum);
+ int (*write)(struct mii_bus *bus, int phy_id, int regnum, u16 val);
+ int (*reset)(struct mii_bus *bus);
+
+ /*
+ * A lock to ensure that only one thing can read/write
+ * the MDIO bus at a time
+ */
+ struct mutex mdio_lock;
+
+ struct device *parent;
+ enum {
+ MDIOBUS_ALLOCATED = 1,
+ MDIOBUS_REGISTERED,
+ MDIOBUS_UNREGISTERED,
+ MDIOBUS_RELEASED,
+ } state;
+ struct device dev;
+
+ /* list of all PHYs on bus */
+ struct phy_device *phy_map[PHY_MAX_ADDR];
+
+ /* PHY addresses to be ignored when probing */
+ u32 phy_mask;
+
+ /*
+ * Pointer to an array of interrupts, each PHY's
+ * interrupt at the index matching its address
+ */
+ int *irq;
+};
+#define to_mii_bus(d) container_of(d, struct mii_bus, dev)
+
+struct mii_bus *mdiobus_alloc_size(size_t);
+static inline struct mii_bus *mdiobus_alloc(void)
+{
+ return mdiobus_alloc_size(0);
+}
+
+int mdiobus_register(struct mii_bus *bus);
+void mdiobus_unregister(struct mii_bus *bus);
+void mdiobus_free(struct mii_bus *bus);
+struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv);
+static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
+{
+ return devm_mdiobus_alloc_size(dev, 0);
+}
+
+void devm_mdiobus_free(struct device *dev, struct mii_bus *bus);
+struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
+int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
+int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
+
+
+#define PHY_INTERRUPT_DISABLED 0x0
+#define PHY_INTERRUPT_ENABLED 0x80000000
+
+/* PHY state machine states:
+ *
+ * DOWN: PHY device and driver are not ready for anything. probe
+ * should be called if and only if the PHY is in this state,
+ * given that the PHY device exists.
+ * - PHY driver probe function will, depending on the PHY, set
+ * the state to STARTING or READY
+ *
+ * STARTING: PHY device is coming up, and the ethernet driver is
+ * not ready. PHY drivers may set this in the probe function.
+ * If they do, they are responsible for making sure the state is
+ * eventually set to indicate whether the PHY is UP or READY,
+ * depending on the state when the PHY is done starting up.
+ * - PHY driver will set the state to READY
+ * - start will set the state to PENDING
+ *
+ * READY: PHY is ready to send and receive packets, but the
+ * controller is not. By default, PHYs which do not implement
+ * probe will be set to this state by phy_probe(). If the PHY
+ * driver knows the PHY is ready, and the PHY state is STARTING,
+ * then it sets this STATE.
+ * - start will set the state to UP
+ *
+ * PENDING: PHY device is coming up, but the ethernet driver is
+ * ready. phy_start will set this state if the PHY state is
+ * STARTING.
+ * - PHY driver will set the state to UP when the PHY is ready
+ *
+ * UP: The PHY and attached device are ready to do work.
+ * Interrupts should be started here.
+ * - timer moves to AN
+ *
+ * AN: The PHY is currently negotiating the link state. Link is
+ * therefore down for now. phy_timer will set this state when it
+ * detects the state is UP. config_aneg will set this state
+ * whenever called with phydev->autoneg set to AUTONEG_ENABLE.
+ * - If autonegotiation finishes, but there's no link, it sets
+ * the state to NOLINK.
+ * - If aneg finishes with link, it sets the state to RUNNING,
+ * and calls adjust_link
+ * - If autonegotiation did not finish after an arbitrary amount
+ * of time, autonegotiation should be tried again if the PHY
+ * supports "magic" autonegotiation (back to AN)
+ * - If it didn't finish, and no magic_aneg, move to FORCING.
+ *
+ * NOLINK: PHY is up, but not currently plugged in.
+ * - If the timer notes that the link comes back, we move to RUNNING
+ * - config_aneg moves to AN
+ * - phy_stop moves to HALTED
+ *
+ * FORCING: PHY is being configured with forced settings
+ * - if link is up, move to RUNNING
+ * - If link is down, we drop to the next highest setting, and
+ * retry (FORCING) after a timeout
+ * - phy_stop moves to HALTED
+ *
+ * RUNNING: PHY is currently up, running, and possibly sending
+ * and/or receiving packets
+ * - timer will set CHANGELINK if we're polling (this ensures the
+ * link state is polled every other cycle of this state machine,
+ * which makes it every other second)
+ * - irq will set CHANGELINK
+ * - config_aneg will set AN
+ * - phy_stop moves to HALTED
+ *
+ * CHANGELINK: PHY experienced a change in link state
+ * - timer moves to RUNNING if link
+ * - timer moves to NOLINK if the link is down
+ * - phy_stop moves to HALTED
+ *
+ * HALTED: PHY is up, but no polling or interrupts are done. Or
+ * PHY is in an error state.
+ *
+ * - phy_start moves to RESUMING
+ *
+ * RESUMING: PHY was halted, but now wants to run again.
+ * - If we are forcing, or aneg is done, timer moves to RUNNING
+ * - If aneg is not done, timer moves to AN
+ * - phy_stop moves to HALTED
+ */
+enum phy_state {
+ PHY_DOWN = 0,
+ PHY_STARTING,
+ PHY_READY,
+ PHY_PENDING,
+ PHY_UP,
+ PHY_AN,
+ PHY_RUNNING,
+ PHY_NOLINK,
+ PHY_FORCING,
+ PHY_CHANGELINK,
+ PHY_HALTED,
+ PHY_RESUMING
+};
+
+/**
+ * struct phy_c45_device_ids - 802.3-c45 Device Identifiers
+ * @devices_in_package: Bit vector of devices present.
+ * @device_ids: The device identifer for each present device.
+ */
+struct phy_c45_device_ids {
+ u32 devices_in_package;
+ u32 device_ids[8];
+};
+
+/* phy_device: An instance of a PHY
+ *
+ * drv: Pointer to the driver for this PHY instance
+ * bus: Pointer to the bus this PHY is on
+ * dev: driver model device structure for this PHY
+ * phy_id: UID for this device found during discovery
+ * c45_ids: 802.3-c45 Device Identifers if is_c45.
+ * is_c45: Set to true if this phy uses clause 45 addressing.
+ * is_internal: Set to true if this phy is internal to a MAC.
+ * has_fixups: Set to true if this phy has fixups/quirks.
+ * suspended: Set to true if this phy has been suspended successfully.
+ * state: state of the PHY for management purposes
+ * dev_flags: Device-specific flags used by the PHY driver.
+ * addr: Bus address of PHY
+ * link_timeout: The number of timer firings to wait before the
+ * giving up on the current attempt at acquiring a link
+ * irq: IRQ number of the PHY's interrupt (-1 if none)
+ * phy_timer: The timer for handling the state machine
+ * phy_queue: A work_queue for the interrupt
+ * attached_dev: The attached enet driver's device instance ptr
+ * adjust_link: Callback for the enet controller to respond to
+ * changes in the link state.
+ *
+ * speed, duplex, pause, supported, advertising, lp_advertising,
+ * and autoneg are used like in mii_if_info
+ *
+ * interrupts currently only supports enabled or disabled,
+ * but could be changed in the future to support enabling
+ * and disabling specific interrupts
+ *
+ * Contains some infrastructure for polling and interrupt
+ * handling, as well as handling shifts in PHY hardware state
+ */
+struct phy_device {
+ /* Information about the PHY type */
+ /* And management functions */
+ struct phy_driver *drv;
+
+ struct mii_bus *bus;
+
+ struct device dev;
+
+ u32 phy_id;
+
+ struct phy_c45_device_ids c45_ids;
+ bool is_c45;
+ bool is_internal;
+ bool has_fixups;
+ bool suspended;
+
+ enum phy_state state;
+
+ u32 dev_flags;
+
+ phy_interface_t interface;
+
+ /* Bus address of the PHY (0-31) */
+ int addr;
+
+ /*
+ * forced speed & duplex (no autoneg)
+ * partner speed & duplex & pause (autoneg)
+ */
+ int speed;
+ int duplex;
+ int pause;
+ int asym_pause;
+
+ /* The most recently read link state */
+ int link;
+
+ /* Enabled Interrupts */
+ u32 interrupts;
+
+ /* Union of PHY and Attached devices' supported modes */
+ /* See mii.h for more info */
+ u32 supported;
+ u32 advertising;
+ u32 lp_advertising;
+
+ int autoneg;
+
+ int link_timeout;
+
+ /*
+ * Interrupt number for this PHY
+ * -1 means no interrupt
+ */
+ int irq;
+
+ /* private data pointer */
+ /* For use by PHYs to maintain extra state */
+ void *priv;
+
+ /* Interrupt and Polling infrastructure */
+ struct work_struct phy_queue;
+ struct delayed_work state_queue;
+ atomic_t irq_disable;
+
+ struct mutex lock;
+
+ struct net_device *attached_dev;
+
+ void (*adjust_link)(struct net_device *dev);
+};
+#define to_phy_device(d) container_of(d, struct phy_device, dev)
+
+/* struct phy_driver: Driver structure for a particular PHY type
+ *
+ * phy_id: The result of reading the UID registers of this PHY
+ * type, and ANDing them with the phy_id_mask. This driver
+ * only works for PHYs with IDs which match this field
+ * name: The friendly name of this PHY type
+ * phy_id_mask: Defines the important bits of the phy_id
+ * features: A list of features (speed, duplex, etc) supported
+ * by this PHY
+ * flags: A bitfield defining certain other features this PHY
+ * supports (like interrupts)
+ * driver_data: static driver data
+ *
+ * The drivers must implement config_aneg and read_status. All
+ * other functions are optional. Note that none of these
+ * functions should be called from interrupt time. The goal is
+ * for the bus read/write functions to be able to block when the
+ * bus transaction is happening, and be freed up by an interrupt
+ * (The MPC85xx has this ability, though it is not currently
+ * supported in the driver).
+ */
+struct phy_driver {
+ u32 phy_id;
+ char *name;
+ unsigned int phy_id_mask;
+ u32 features;
+ u32 flags;
+ const void *driver_data;
+
+ /*
+ * Called to issue a PHY software reset
+ */
+ int (*soft_reset)(struct phy_device *phydev);
+
+ /*
+ * Called to initialize the PHY,
+ * including after a reset
+ */
+ int (*config_init)(struct phy_device *phydev);
+
+ /*
+ * Called during discovery. Used to set
+ * up device-specific structures, if any
+ */
+ int (*probe)(struct phy_device *phydev);
+
+ /* PHY Power Management */
+ int (*suspend)(struct phy_device *phydev);
+ int (*resume)(struct phy_device *phydev);
+
+ /*
+ * Configures the advertisement and resets
+ * autonegotiation if phydev->autoneg is on,
+ * forces the speed to the current settings in phydev
+ * if phydev->autoneg is off
+ */
+ int (*config_aneg)(struct phy_device *phydev);
+
+ /* Determines the auto negotiation result */
+ int (*aneg_done)(struct phy_device *phydev);
+
+ /* Determines the negotiated speed and duplex */
+ int (*read_status)(struct phy_device *phydev);
+
+ /* Clears any pending interrupts */
+ int (*ack_interrupt)(struct phy_device *phydev);
+
+ /* Enables or disables interrupts */
+ int (*config_intr)(struct phy_device *phydev);
+
+ /*
+ * Checks if the PHY generated an interrupt.
+ * For multi-PHY devices with shared PHY interrupt pin
+ */
+ int (*did_interrupt)(struct phy_device *phydev);
+
+ /* Clears up any memory if needed */
+ void (*remove)(struct phy_device *phydev);
+
+ /* Returns true if this is a suitable driver for the given
+ * phydev. If NULL, matching is based on phy_id and
+ * phy_id_mask.
+ */
+ int (*match_phy_device)(struct phy_device *phydev);
+
+ /* Handles ethtool queries for hardware time stamping. */
+ int (*ts_info)(struct phy_device *phydev, struct ethtool_ts_info *ti);
+
+ /* Handles SIOCSHWTSTAMP ioctl for hardware time stamping. */
+ int (*hwtstamp)(struct phy_device *phydev, struct ifreq *ifr);
+
+ /*
+ * Requests a Rx timestamp for 'skb'. If the skb is accepted,
+ * the phy driver promises to deliver it using netif_rx() as
+ * soon as a timestamp becomes available. One of the
+ * PTP_CLASS_ values is passed in 'type'. The function must
+ * return true if the skb is accepted for delivery.
+ */
+ bool (*rxtstamp)(struct phy_device *dev, struct sk_buff *skb, int type);
+
+ /*
+ * Requests a Tx timestamp for 'skb'. The phy driver promises
+ * to deliver it using skb_complete_tx_timestamp() as soon as a
+ * timestamp becomes available. One of the PTP_CLASS_ values
+ * is passed in 'type'.
+ */
+ void (*txtstamp)(struct phy_device *dev, struct sk_buff *skb, int type);
+
+ /* Some devices (e.g. qnap TS-119P II) require PHY register changes to
+ * enable Wake on LAN, so set_wol is provided to be called in the
+ * ethernet driver's set_wol function. */
+ int (*set_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
+
+ /* See set_wol, but for checking whether Wake on LAN is enabled. */
+ void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
+
+ /*
+ * Called to inform a PHY device driver when the core is about to
+ * change the link state. This callback is supposed to be used as
+ * fixup hook for drivers that need to take action when the link
+ * state changes. Drivers are by no means allowed to mess with the
+ * PHY device structure in their implementations.
+ */
+ void (*link_change_notify)(struct phy_device *dev);
+
+ /* A function provided by a phy specific driver to override the
+ * the PHY driver framework support for reading a MMD register
+ * from the PHY. If not supported, return -1. This function is
+ * optional for PHY specific drivers, if not provided then the
+ * default MMD read function is used by the PHY framework.
+ */
+ int (*read_mmd_indirect)(struct phy_device *dev, int ptrad,
+ int devnum, int regnum);
+
+ /* A function provided by a phy specific driver to override the
+ * the PHY driver framework support for writing a MMD register
+ * from the PHY. This function is optional for PHY specific drivers,
+ * if not provided then the default MMD read function is used by
+ * the PHY framework.
+ */
+ void (*write_mmd_indirect)(struct phy_device *dev, int ptrad,
+ int devnum, int regnum, u32 val);
+
+ /* Get the size and type of the eeprom contained within a plug-in
+ * module */
+ int (*module_info)(struct phy_device *dev,
+ struct ethtool_modinfo *modinfo);
+
+ /* Get the eeprom information from the plug-in module */
+ int (*module_eeprom)(struct phy_device *dev,
+ struct ethtool_eeprom *ee, u8 *data);
+
+ struct device_driver driver;
+};
+#define to_phy_driver(d) container_of(d, struct phy_driver, driver)
+
+#define PHY_ANY_ID "MATCH ANY PHY"
+#define PHY_ANY_UID 0xffffffff
+
+/* A Structure for boards to register fixups with the PHY Lib */
+struct phy_fixup {
+ struct list_head list;
+ char bus_id[20];
+ u32 phy_uid;
+ u32 phy_uid_mask;
+ int (*run)(struct phy_device *phydev);
+};
+
+/**
+ * phy_read_mmd - Convenience function for reading a register
+ * from an MMD on a given PHY.
+ * @phydev: The phy_device struct
+ * @devad: The MMD to read from
+ * @regnum: The register on the MMD to read
+ *
+ * Same rules as for phy_read();
+ */
+static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
+{
+ if (!phydev->is_c45)
+ return -EOPNOTSUPP;
+
+ return mdiobus_read(phydev->bus, phydev->addr,
+ MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff));
+}
+
+/**
+ * phy_read_mmd_indirect - reads data from the MMD registers
+ * @phydev: The PHY device bus
+ * @prtad: MMD Address
+ * @devad: MMD DEVAD
+ * @addr: PHY address on the MII bus
+ *
+ * Description: it reads data from the MMD registers (clause 22 to access to
+ * clause 45) of the specified phy address.
+ */
+int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
+ int devad, int addr);
+
+/**
+ * phy_read - Convenience function for reading a given PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to read
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+static inline int phy_read(struct phy_device *phydev, u32 regnum)
+{
+ return mdiobus_read(phydev->bus, phydev->addr, regnum);
+}
+
+/**
+ * phy_write - Convenience function for writing a given PHY register
+ * @phydev: the phy_device struct
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
+{
+ return mdiobus_write(phydev->bus, phydev->addr, regnum, val);
+}
+
+/**
+ * phy_interrupt_is_valid - Convenience function for testing a given PHY irq
+ * @phydev: the phy_device struct
+ *
+ * NOTE: must be kept in sync with addition/removal of PHY_POLL and
+ * PHY_IGNORE_INTERRUPT
+ */
+static inline bool phy_interrupt_is_valid(struct phy_device *phydev)
+{
+ return phydev->irq != PHY_POLL && phydev->irq != PHY_IGNORE_INTERRUPT;
+}
+
+/**
+ * phy_is_internal - Convenience function for testing if a PHY is internal
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_is_internal(struct phy_device *phydev)
+{
+ return phydev->is_internal;
+}
+
+/**
+ * phy_write_mmd - Convenience function for writing a register
+ * on an MMD on a given PHY.
+ * @phydev: The phy_device struct
+ * @devad: The MMD to read from
+ * @regnum: The register on the MMD to read
+ * @val: value to write to @regnum
+ *
+ * Same rules as for phy_write();
+ */
+static inline int phy_write_mmd(struct phy_device *phydev, int devad,
+ u32 regnum, u16 val)
+{
+ if (!phydev->is_c45)
+ return -EOPNOTSUPP;
+
+ regnum = MII_ADDR_C45 | ((devad & 0x1f) << 16) | (regnum & 0xffff);
+
+ return mdiobus_write(phydev->bus, phydev->addr, regnum, val);
+}
+
+/**
+ * phy_write_mmd_indirect - writes data to the MMD registers
+ * @phydev: The PHY device
+ * @prtad: MMD Address
+ * @devad: MMD DEVAD
+ * @addr: PHY address on the MII bus
+ * @data: data to write in the MMD register
+ *
+ * Description: Write data from the MMD registers of the specified
+ * phy address.
+ */
+void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
+ int devad, int addr, u32 data);
+
+struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
+ bool is_c45,
+ struct phy_c45_device_ids *c45_ids);
+struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
+int phy_device_register(struct phy_device *phy);
+int phy_init_hw(struct phy_device *phydev);
+int phy_suspend(struct phy_device *phydev);
+int phy_resume(struct phy_device *phydev);
+struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
+ phy_interface_t interface);
+struct phy_device *phy_find_first(struct mii_bus *bus);
+int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+ u32 flags, phy_interface_t interface);
+int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
+ void (*handler)(struct net_device *),
+ phy_interface_t interface);
+struct phy_device *phy_connect(struct net_device *dev, const char *bus_id,
+ void (*handler)(struct net_device *),
+ phy_interface_t interface);
+void phy_disconnect(struct phy_device *phydev);
+void phy_detach(struct phy_device *phydev);
+void phy_start(struct phy_device *phydev);
+void phy_stop(struct phy_device *phydev);
+int phy_start_aneg(struct phy_device *phydev);
+
+int phy_stop_interrupts(struct phy_device *phydev);
+
+static inline int phy_read_status(struct phy_device *phydev)
+{
+ return phydev->drv->read_status(phydev);
+}
+
+int genphy_config_init(struct phy_device *phydev);
+int genphy_setup_forced(struct phy_device *phydev);
+int genphy_restart_aneg(struct phy_device *phydev);
+int genphy_config_aneg(struct phy_device *phydev);
+int genphy_aneg_done(struct phy_device *phydev);
+int genphy_update_link(struct phy_device *phydev);
+int genphy_read_status(struct phy_device *phydev);
+int genphy_suspend(struct phy_device *phydev);
+int genphy_resume(struct phy_device *phydev);
+int genphy_soft_reset(struct phy_device *phydev);
+void phy_driver_unregister(struct phy_driver *drv);
+void phy_drivers_unregister(struct phy_driver *drv, int n);
+int phy_driver_register(struct phy_driver *new_driver);
+int phy_drivers_register(struct phy_driver *new_driver, int n);
+void phy_state_machine(struct work_struct *work);
+void phy_change(struct work_struct *work);
+void phy_mac_interrupt(struct phy_device *phydev, int new_link);
+void phy_start_machine(struct phy_device *phydev);
+void phy_stop_machine(struct phy_device *phydev);
+int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
+int phy_start_interrupts(struct phy_device *phydev);
+void phy_print_status(struct phy_device *phydev);
+void phy_device_free(struct phy_device *phydev);
+
+int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
+ int (*run)(struct phy_device *));
+int phy_register_fixup_for_id(const char *bus_id,
+ int (*run)(struct phy_device *));
+int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
+ int (*run)(struct phy_device *));
+
+int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable);
+int phy_get_eee_err(struct phy_device *phydev);
+int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data);
+int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data);
+int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol);
+void phy_ethtool_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol);
+
+int __init mdio_bus_init(void);
+void mdio_bus_exit(void);
+
+extern struct bus_type mdio_bus_type;
+
+/**
+ * module_phy_driver() - Helper macro for registering PHY drivers
+ * @__phy_drivers: array of PHY drivers to register
+ *
+ * Helper macro for PHY drivers which do not do anything special in module
+ * init/exit. Each module may only use this macro once, and calling it
+ * replaces module_init() and module_exit().
+ */
+#define phy_module_driver(__phy_drivers, __count) \
+static int __init phy_module_init(void) \
+{ \
+ return phy_drivers_register(__phy_drivers, __count); \
+} \
+module_init(phy_module_init); \
+static void __exit phy_module_exit(void) \
+{ \
+ phy_drivers_unregister(__phy_drivers, __count); \
+} \
+module_exit(phy_module_exit)
+
+#define module_phy_driver(__phy_drivers) \
+ phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers))
+
+#endif /* __PHY_H */
diff --git a/include/linux/phy/omap_control_phy.h b/include/linux/phy/omap_control_phy.h
new file mode 100644
index 000000000..eb7d4a135
--- /dev/null
+++ b/include/linux/phy/omap_control_phy.h
@@ -0,0 +1,99 @@
+/*
+ * omap_control_phy.h - Header file for the PHY part of control module.
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __OMAP_CONTROL_PHY_H__
+#define __OMAP_CONTROL_PHY_H__
+
+enum omap_control_phy_type {
+ OMAP_CTRL_TYPE_OTGHS = 1, /* Mailbox OTGHS_CONTROL */
+ OMAP_CTRL_TYPE_USB2, /* USB2_PHY, power down in CONTROL_DEV_CONF */
+ OMAP_CTRL_TYPE_PIPE3, /* PIPE3 PHY, DPLL & seperate Rx/Tx power */
+ OMAP_CTRL_TYPE_PCIE, /* RX TX control of ACSPCIE */
+ OMAP_CTRL_TYPE_DRA7USB2, /* USB2 PHY, power and power_aux e.g. DRA7 */
+ OMAP_CTRL_TYPE_AM437USB2, /* USB2 PHY, power e.g. AM437x */
+};
+
+struct omap_control_phy {
+ struct device *dev;
+
+ u32 __iomem *otghs_control;
+ u32 __iomem *power;
+ u32 __iomem *power_aux;
+ u32 __iomem *pcie_pcs;
+
+ struct clk *sys_clk;
+
+ enum omap_control_phy_type type;
+};
+
+enum omap_control_usb_mode {
+ USB_MODE_UNDEFINED = 0,
+ USB_MODE_HOST,
+ USB_MODE_DEVICE,
+ USB_MODE_DISCONNECT,
+};
+
+#define OMAP_CTRL_DEV_PHY_PD BIT(0)
+
+#define OMAP_CTRL_DEV_AVALID BIT(0)
+#define OMAP_CTRL_DEV_BVALID BIT(1)
+#define OMAP_CTRL_DEV_VBUSVALID BIT(2)
+#define OMAP_CTRL_DEV_SESSEND BIT(3)
+#define OMAP_CTRL_DEV_IDDIG BIT(4)
+
+#define OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_MASK 0x003FC000
+#define OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_CMD_SHIFT 0xE
+
+#define OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_MASK 0xFFC00000
+#define OMAP_CTRL_PIPE3_PHY_PWRCTL_CLK_FREQ_SHIFT 0x16
+
+#define OMAP_CTRL_PIPE3_PHY_TX_RX_POWERON 0x3
+#define OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF 0x0
+
+#define OMAP_CTRL_PCIE_PCS_MASK 0xff
+#define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 16
+
+#define OMAP_CTRL_USB2_PHY_PD BIT(28)
+
+#define AM437X_CTRL_USB2_PHY_PD BIT(0)
+#define AM437X_CTRL_USB2_OTG_PD BIT(1)
+#define AM437X_CTRL_USB2_OTGVDET_EN BIT(19)
+#define AM437X_CTRL_USB2_OTGSESSEND_EN BIT(20)
+
+#if IS_ENABLED(CONFIG_OMAP_CONTROL_PHY)
+void omap_control_phy_power(struct device *dev, int on);
+void omap_control_usb_set_mode(struct device *dev,
+ enum omap_control_usb_mode mode);
+void omap_control_pcie_pcs(struct device *dev, u8 delay);
+#else
+
+static inline void omap_control_phy_power(struct device *dev, int on)
+{
+}
+
+static inline void omap_control_usb_set_mode(struct device *dev,
+ enum omap_control_usb_mode mode)
+{
+}
+
+static inline void omap_control_pcie_pcs(struct device *dev, u8 delay)
+{
+}
+#endif
+
+#endif /* __OMAP_CONTROL_PHY_H__ */
diff --git a/include/linux/phy/omap_usb.h b/include/linux/phy/omap_usb.h
new file mode 100644
index 000000000..dc2c541a6
--- /dev/null
+++ b/include/linux/phy/omap_usb.h
@@ -0,0 +1,77 @@
+/*
+ * omap_usb.h -- omap usb2 phy header file
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_OMAP_USB2_H
+#define __DRIVERS_OMAP_USB2_H
+
+#include <linux/io.h>
+#include <linux/usb/otg.h>
+
+struct usb_dpll_params {
+ u16 m;
+ u8 n;
+ u8 freq:3;
+ u8 sd;
+ u32 mf;
+};
+
+struct omap_usb {
+ struct usb_phy phy;
+ struct phy_companion *comparator;
+ void __iomem *pll_ctrl_base;
+ void __iomem *phy_base;
+ struct device *dev;
+ struct device *control_dev;
+ struct clk *wkupclk;
+ struct clk *optclk;
+ u8 flags;
+};
+
+struct usb_phy_data {
+ const char *label;
+ u8 flags;
+};
+
+/* Driver Flags */
+#define OMAP_USB2_HAS_START_SRP (1 << 0)
+#define OMAP_USB2_HAS_SET_VBUS (1 << 1)
+#define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT (1 << 2)
+
+#define phy_to_omapusb(x) container_of((x), struct omap_usb, phy)
+
+#if defined(CONFIG_OMAP_USB2) || defined(CONFIG_OMAP_USB2_MODULE)
+extern int omap_usb2_set_comparator(struct phy_companion *comparator);
+#else
+static inline int omap_usb2_set_comparator(struct phy_companion *comparator)
+{
+ return -ENODEV;
+}
+#endif
+
+static inline u32 omap_usb_readl(void __iomem *addr, unsigned offset)
+{
+ return __raw_readl(addr + offset);
+}
+
+static inline void omap_usb_writel(void __iomem *addr, unsigned offset,
+ u32 data)
+{
+ __raw_writel(data, addr + offset);
+}
+
+#endif /* __DRIVERS_OMAP_USB_H */
diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h
new file mode 100644
index 000000000..9d18e9f94
--- /dev/null
+++ b/include/linux/phy/phy-qcom-ufs.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef PHY_QCOM_UFS_H_
+#define PHY_QCOM_UFS_H_
+
+#include "phy.h"
+
+/**
+ * ufs_qcom_phy_enable_ref_clk() - Enable the phy
+ * ref clock.
+ * @phy: reference to a generic phy
+ *
+ * returns 0 for success, and non-zero for error.
+ */
+int ufs_qcom_phy_enable_ref_clk(struct phy *phy);
+
+/**
+ * ufs_qcom_phy_disable_ref_clk() - Disable the phy
+ * ref clock.
+ * @phy: reference to a generic phy.
+ */
+void ufs_qcom_phy_disable_ref_clk(struct phy *phy);
+
+/**
+ * ufs_qcom_phy_enable_dev_ref_clk() - Enable the device
+ * ref clock.
+ * @phy: reference to a generic phy.
+ */
+void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy);
+
+/**
+ * ufs_qcom_phy_disable_dev_ref_clk() - Disable the device
+ * ref clock.
+ * @phy: reference to a generic phy.
+ */
+void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy);
+
+int ufs_qcom_phy_enable_iface_clk(struct phy *phy);
+void ufs_qcom_phy_disable_iface_clk(struct phy *phy);
+int ufs_qcom_phy_start_serdes(struct phy *phy);
+int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes);
+int ufs_qcom_phy_calibrate_phy(struct phy *phy, bool is_rate_B);
+int ufs_qcom_phy_is_pcs_ready(struct phy *phy);
+void ufs_qcom_phy_save_controller_version(struct phy *phy,
+ u8 major, u16 minor, u16 step);
+
+#endif /* PHY_QCOM_UFS_H_ */
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
new file mode 100644
index 000000000..a0197fa1b
--- /dev/null
+++ b/include/linux/phy/phy.h
@@ -0,0 +1,336 @@
+/*
+ * phy.h -- generic phy header file
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __DRIVERS_PHY_H
+#define __DRIVERS_PHY_H
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+
+struct phy;
+
+/**
+ * struct phy_ops - set of function pointers for performing phy operations
+ * @init: operation to be performed for initializing phy
+ * @exit: operation to be performed while exiting
+ * @power_on: powering on the phy
+ * @power_off: powering off the phy
+ * @owner: the module owner containing the ops
+ */
+struct phy_ops {
+ int (*init)(struct phy *phy);
+ int (*exit)(struct phy *phy);
+ int (*power_on)(struct phy *phy);
+ int (*power_off)(struct phy *phy);
+ struct module *owner;
+};
+
+/**
+ * struct phy_attrs - represents phy attributes
+ * @bus_width: Data path width implemented by PHY
+ */
+struct phy_attrs {
+ u32 bus_width;
+};
+
+/**
+ * struct phy - represents the phy device
+ * @dev: phy device
+ * @id: id of the phy device
+ * @ops: function pointers for performing phy operations
+ * @init_data: list of PHY consumers (non-dt only)
+ * @mutex: mutex to protect phy_ops
+ * @init_count: used to protect when the PHY is used by multiple consumers
+ * @power_count: used to protect when the PHY is used by multiple consumers
+ * @phy_attrs: used to specify PHY specific attributes
+ */
+struct phy {
+ struct device dev;
+ int id;
+ const struct phy_ops *ops;
+ struct mutex mutex;
+ int init_count;
+ int power_count;
+ struct phy_attrs attrs;
+ struct regulator *pwr;
+};
+
+/**
+ * struct phy_provider - represents the phy provider
+ * @dev: phy provider device
+ * @owner: the module owner having of_xlate
+ * @of_xlate: function pointer to obtain phy instance from phy pointer
+ * @list: to maintain a linked list of PHY providers
+ */
+struct phy_provider {
+ struct device *dev;
+ struct module *owner;
+ struct list_head list;
+ struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args);
+};
+
+struct phy_lookup {
+ struct list_head node;
+ const char *dev_id;
+ const char *con_id;
+ struct phy *phy;
+};
+
+#define to_phy(a) (container_of((a), struct phy, dev))
+
+#define of_phy_provider_register(dev, xlate) \
+ __of_phy_provider_register((dev), THIS_MODULE, (xlate))
+
+#define devm_of_phy_provider_register(dev, xlate) \
+ __devm_of_phy_provider_register((dev), THIS_MODULE, (xlate))
+
+static inline void phy_set_drvdata(struct phy *phy, void *data)
+{
+ dev_set_drvdata(&phy->dev, data);
+}
+
+static inline void *phy_get_drvdata(struct phy *phy)
+{
+ return dev_get_drvdata(&phy->dev);
+}
+
+#if IS_ENABLED(CONFIG_GENERIC_PHY)
+int phy_pm_runtime_get(struct phy *phy);
+int phy_pm_runtime_get_sync(struct phy *phy);
+int phy_pm_runtime_put(struct phy *phy);
+int phy_pm_runtime_put_sync(struct phy *phy);
+void phy_pm_runtime_allow(struct phy *phy);
+void phy_pm_runtime_forbid(struct phy *phy);
+int phy_init(struct phy *phy);
+int phy_exit(struct phy *phy);
+int phy_power_on(struct phy *phy);
+int phy_power_off(struct phy *phy);
+static inline int phy_get_bus_width(struct phy *phy)
+{
+ return phy->attrs.bus_width;
+}
+static inline void phy_set_bus_width(struct phy *phy, int bus_width)
+{
+ phy->attrs.bus_width = bus_width;
+}
+struct phy *phy_get(struct device *dev, const char *string);
+struct phy *phy_optional_get(struct device *dev, const char *string);
+struct phy *devm_phy_get(struct device *dev, const char *string);
+struct phy *devm_phy_optional_get(struct device *dev, const char *string);
+struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
+ const char *con_id);
+void phy_put(struct phy *phy);
+void devm_phy_put(struct device *dev, struct phy *phy);
+struct phy *of_phy_get(struct device_node *np, const char *con_id);
+struct phy *of_phy_simple_xlate(struct device *dev,
+ struct of_phandle_args *args);
+struct phy *phy_create(struct device *dev, struct device_node *node,
+ const struct phy_ops *ops);
+struct phy *devm_phy_create(struct device *dev, struct device_node *node,
+ const struct phy_ops *ops);
+void phy_destroy(struct phy *phy);
+void devm_phy_destroy(struct device *dev, struct phy *phy);
+struct phy_provider *__of_phy_provider_register(struct device *dev,
+ struct module *owner, struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args));
+struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
+ struct module *owner, struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args));
+void of_phy_provider_unregister(struct phy_provider *phy_provider);
+void devm_of_phy_provider_unregister(struct device *dev,
+ struct phy_provider *phy_provider);
+int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id);
+void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id);
+#else
+static inline int phy_pm_runtime_get(struct phy *phy)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
+static inline int phy_pm_runtime_get_sync(struct phy *phy)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
+static inline int phy_pm_runtime_put(struct phy *phy)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
+static inline int phy_pm_runtime_put_sync(struct phy *phy)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
+static inline void phy_pm_runtime_allow(struct phy *phy)
+{
+ return;
+}
+
+static inline void phy_pm_runtime_forbid(struct phy *phy)
+{
+ return;
+}
+
+static inline int phy_init(struct phy *phy)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
+static inline int phy_exit(struct phy *phy)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
+static inline int phy_power_on(struct phy *phy)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
+static inline int phy_power_off(struct phy *phy)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
+static inline int phy_get_bus_width(struct phy *phy)
+{
+ return -ENOSYS;
+}
+
+static inline void phy_set_bus_width(struct phy *phy, int bus_width)
+{
+ return;
+}
+
+static inline struct phy *phy_get(struct device *dev, const char *string)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct phy *phy_optional_get(struct device *dev,
+ const char *string)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct phy *devm_phy_get(struct device *dev, const char *string)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct phy *devm_phy_optional_get(struct device *dev,
+ const char *string)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct phy *devm_of_phy_get(struct device *dev,
+ struct device_node *np,
+ const char *con_id)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void phy_put(struct phy *phy)
+{
+}
+
+static inline void devm_phy_put(struct device *dev, struct phy *phy)
+{
+}
+
+static inline struct phy *of_phy_get(struct device_node *np, const char *con_id)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct phy *of_phy_simple_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct phy *phy_create(struct device *dev,
+ struct device_node *node,
+ const struct phy_ops *ops)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct phy *devm_phy_create(struct device *dev,
+ struct device_node *node,
+ const struct phy_ops *ops)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void phy_destroy(struct phy *phy)
+{
+}
+
+static inline void devm_phy_destroy(struct device *dev, struct phy *phy)
+{
+}
+
+static inline struct phy_provider *__of_phy_provider_register(
+ struct device *dev, struct module *owner, struct phy * (*of_xlate)(
+ struct device *dev, struct of_phandle_args *args))
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct phy_provider *__devm_of_phy_provider_register(struct device
+ *dev, struct module *owner, struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args))
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline void of_phy_provider_unregister(struct phy_provider *phy_provider)
+{
+}
+
+static inline void devm_of_phy_provider_unregister(struct device *dev,
+ struct phy_provider *phy_provider)
+{
+}
+static inline int
+phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id)
+{
+ return 0;
+}
+static inline void phy_remove_lookup(struct phy *phy, const char *con_id,
+ const char *dev_id) { }
+#endif
+
+#endif /* __DRIVERS_PHY_H */
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
new file mode 100644
index 000000000..fe5732d53
--- /dev/null
+++ b/include/linux/phy_fixed.h
@@ -0,0 +1,57 @@
+#ifndef __PHY_FIXED_H
+#define __PHY_FIXED_H
+
+struct fixed_phy_status {
+ int link;
+ int speed;
+ int duplex;
+ int pause;
+ int asym_pause;
+};
+
+struct device_node;
+
+#if IS_ENABLED(CONFIG_FIXED_PHY)
+extern int fixed_phy_add(unsigned int irq, int phy_id,
+ struct fixed_phy_status *status);
+extern struct phy_device *fixed_phy_register(unsigned int irq,
+ struct fixed_phy_status *status,
+ struct device_node *np);
+extern void fixed_phy_del(int phy_addr);
+extern int fixed_phy_set_link_update(struct phy_device *phydev,
+ int (*link_update)(struct net_device *,
+ struct fixed_phy_status *));
+extern int fixed_phy_update_state(struct phy_device *phydev,
+ const struct fixed_phy_status *status,
+ const struct fixed_phy_status *changed);
+#else
+static inline int fixed_phy_add(unsigned int irq, int phy_id,
+ struct fixed_phy_status *status)
+{
+ return -ENODEV;
+}
+static inline struct phy_device *fixed_phy_register(unsigned int irq,
+ struct fixed_phy_status *status,
+ struct device_node *np)
+{
+ return ERR_PTR(-ENODEV);
+}
+static inline int fixed_phy_del(int phy_addr)
+{
+ return -ENODEV;
+}
+static inline int fixed_phy_set_link_update(struct phy_device *phydev,
+ int (*link_update)(struct net_device *,
+ struct fixed_phy_status *))
+{
+ return -ENODEV;
+}
+static inline int fixed_phy_update_state(struct phy_device *phydev,
+ const struct fixed_phy_status *status,
+ const struct fixed_phy_status *changed)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_FIXED_PHY */
+
+#endif /* __PHY_FIXED_H */
diff --git a/include/linux/pid.h b/include/linux/pid.h
new file mode 100644
index 000000000..23705a53a
--- /dev/null
+++ b/include/linux/pid.h
@@ -0,0 +1,200 @@
+#ifndef _LINUX_PID_H
+#define _LINUX_PID_H
+
+#include <linux/rcupdate.h>
+
+enum pid_type
+{
+ PIDTYPE_PID,
+ PIDTYPE_PGID,
+ PIDTYPE_SID,
+ PIDTYPE_MAX
+};
+
+/*
+ * What is struct pid?
+ *
+ * A struct pid is the kernel's internal notion of a process identifier.
+ * It refers to individual tasks, process groups, and sessions. While
+ * there are processes attached to it the struct pid lives in a hash
+ * table, so it and then the processes that it refers to can be found
+ * quickly from the numeric pid value. The attached processes may be
+ * quickly accessed by following pointers from struct pid.
+ *
+ * Storing pid_t values in the kernel and referring to them later has a
+ * problem. The process originally with that pid may have exited and the
+ * pid allocator wrapped, and another process could have come along
+ * and been assigned that pid.
+ *
+ * Referring to user space processes by holding a reference to struct
+ * task_struct has a problem. When the user space process exits
+ * the now useless task_struct is still kept. A task_struct plus a
+ * stack consumes around 10K of low kernel memory. More precisely
+ * this is THREAD_SIZE + sizeof(struct task_struct). By comparison
+ * a struct pid is about 64 bytes.
+ *
+ * Holding a reference to struct pid solves both of these problems.
+ * It is small so holding a reference does not consume a lot of
+ * resources, and since a new struct pid is allocated when the numeric pid
+ * value is reused (when pids wrap around) we don't mistakenly refer to new
+ * processes.
+ */
+
+
+/*
+ * struct upid is used to get the id of the struct pid, as it is
+ * seen in particular namespace. Later the struct pid is found with
+ * find_pid_ns() using the int nr and struct pid_namespace *ns.
+ */
+
+struct upid {
+ /* Try to keep pid_chain in the same cacheline as nr for find_vpid */
+ int nr;
+ struct pid_namespace *ns;
+ struct hlist_node pid_chain;
+};
+
+struct pid
+{
+ atomic_t count;
+ unsigned int level;
+ /* lists of tasks that use this pid */
+ struct hlist_head tasks[PIDTYPE_MAX];
+ struct rcu_head rcu;
+ struct upid numbers[1];
+};
+
+extern struct pid init_struct_pid;
+
+struct pid_link
+{
+ struct hlist_node node;
+ struct pid *pid;
+};
+
+static inline struct pid *get_pid(struct pid *pid)
+{
+ if (pid)
+ atomic_inc(&pid->count);
+ return pid;
+}
+
+extern void put_pid(struct pid *pid);
+extern struct task_struct *pid_task(struct pid *pid, enum pid_type);
+extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type);
+
+extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);
+
+/*
+ * these helpers must be called with the tasklist_lock write-held.
+ */
+extern void attach_pid(struct task_struct *task, enum pid_type);
+extern void detach_pid(struct task_struct *task, enum pid_type);
+extern void change_pid(struct task_struct *task, enum pid_type,
+ struct pid *pid);
+extern void transfer_pid(struct task_struct *old, struct task_struct *new,
+ enum pid_type);
+
+struct pid_namespace;
+extern struct pid_namespace init_pid_ns;
+
+/*
+ * look up a PID in the hash table. Must be called with the tasklist_lock
+ * or rcu_read_lock() held.
+ *
+ * find_pid_ns() finds the pid in the namespace specified
+ * find_vpid() finds the pid by its virtual id, i.e. in the current namespace
+ *
+ * see also find_task_by_vpid() set in include/linux/sched.h
+ */
+extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns);
+extern struct pid *find_vpid(int nr);
+
+/*
+ * Lookup a PID in the hash table, and return with it's count elevated.
+ */
+extern struct pid *find_get_pid(int nr);
+extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
+int next_pidmap(struct pid_namespace *pid_ns, unsigned int last);
+
+extern struct pid *alloc_pid(struct pid_namespace *ns);
+extern void free_pid(struct pid *pid);
+extern void disable_pid_allocation(struct pid_namespace *ns);
+
+/*
+ * ns_of_pid() returns the pid namespace in which the specified pid was
+ * allocated.
+ *
+ * NOTE:
+ * ns_of_pid() is expected to be called for a process (task) that has
+ * an attached 'struct pid' (see attach_pid(), detach_pid()) i.e @pid
+ * is expected to be non-NULL. If @pid is NULL, caller should handle
+ * the resulting NULL pid-ns.
+ */
+static inline struct pid_namespace *ns_of_pid(struct pid *pid)
+{
+ struct pid_namespace *ns = NULL;
+ if (pid)
+ ns = pid->numbers[pid->level].ns;
+ return ns;
+}
+
+/*
+ * is_child_reaper returns true if the pid is the init process
+ * of the current namespace. As this one could be checked before
+ * pid_ns->child_reaper is assigned in copy_process, we check
+ * with the pid number.
+ */
+static inline bool is_child_reaper(struct pid *pid)
+{
+ return pid->numbers[pid->level].nr == 1;
+}
+
+/*
+ * the helpers to get the pid's id seen from different namespaces
+ *
+ * pid_nr() : global id, i.e. the id seen from the init namespace;
+ * pid_vnr() : virtual id, i.e. the id seen from the pid namespace of
+ * current.
+ * pid_nr_ns() : id seen from the ns specified.
+ *
+ * see also task_xid_nr() etc in include/linux/sched.h
+ */
+
+static inline pid_t pid_nr(struct pid *pid)
+{
+ pid_t nr = 0;
+ if (pid)
+ nr = pid->numbers[0].nr;
+ return nr;
+}
+
+pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns);
+pid_t pid_vnr(struct pid *pid);
+
+#define do_each_pid_task(pid, type, task) \
+ do { \
+ if ((pid) != NULL) \
+ hlist_for_each_entry_rcu((task), \
+ &(pid)->tasks[type], pids[type].node) {
+
+ /*
+ * Both old and new leaders may be attached to
+ * the same pid in the middle of de_thread().
+ */
+#define while_each_pid_task(pid, type, task) \
+ if (type == PIDTYPE_PID) \
+ break; \
+ } \
+ } while (0)
+
+#define do_each_pid_thread(pid, type, task) \
+ do_each_pid_task(pid, type, task) { \
+ struct task_struct *tg___ = task; \
+ do {
+
+#define while_each_pid_thread(pid, type, task) \
+ } while_each_thread(tg___, task); \
+ task = tg___; \
+ } while_each_pid_task(pid, type, task)
+#endif /* _LINUX_PID_H */
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
new file mode 100644
index 000000000..918b117a7
--- /dev/null
+++ b/include/linux/pid_namespace.h
@@ -0,0 +1,103 @@
+#ifndef _LINUX_PID_NS_H
+#define _LINUX_PID_NS_H
+
+#include <linux/sched.h>
+#include <linux/bug.h>
+#include <linux/mm.h>
+#include <linux/workqueue.h>
+#include <linux/threads.h>
+#include <linux/nsproxy.h>
+#include <linux/kref.h>
+#include <linux/ns_common.h>
+
+struct pidmap {
+ atomic_t nr_free;
+ void *page;
+};
+
+#define BITS_PER_PAGE (PAGE_SIZE * 8)
+#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
+#define PIDMAP_ENTRIES ((PID_MAX_LIMIT+BITS_PER_PAGE-1)/BITS_PER_PAGE)
+
+struct fs_pin;
+
+struct pid_namespace {
+ struct kref kref;
+ struct pidmap pidmap[PIDMAP_ENTRIES];
+ struct rcu_head rcu;
+ int last_pid;
+ unsigned int nr_hashed;
+ struct task_struct *child_reaper;
+ struct kmem_cache *pid_cachep;
+ unsigned int level;
+ struct pid_namespace *parent;
+#ifdef CONFIG_PROC_FS
+ struct vfsmount *proc_mnt;
+ struct dentry *proc_self;
+ struct dentry *proc_thread_self;
+#endif
+#ifdef CONFIG_BSD_PROCESS_ACCT
+ struct fs_pin *bacct;
+#endif
+ struct user_namespace *user_ns;
+ struct work_struct proc_work;
+ kgid_t pid_gid;
+ int hide_pid;
+ int reboot; /* group exit code if this pidns was rebooted */
+ struct ns_common ns;
+};
+
+extern struct pid_namespace init_pid_ns;
+
+#define PIDNS_HASH_ADDING (1U << 31)
+
+#ifdef CONFIG_PID_NS
+static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
+{
+ if (ns != &init_pid_ns)
+ kref_get(&ns->kref);
+ return ns;
+}
+
+extern struct pid_namespace *copy_pid_ns(unsigned long flags,
+ struct user_namespace *user_ns, struct pid_namespace *ns);
+extern void zap_pid_ns_processes(struct pid_namespace *pid_ns);
+extern int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd);
+extern void put_pid_ns(struct pid_namespace *ns);
+
+#else /* !CONFIG_PID_NS */
+#include <linux/err.h>
+
+static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
+{
+ return ns;
+}
+
+static inline struct pid_namespace *copy_pid_ns(unsigned long flags,
+ struct user_namespace *user_ns, struct pid_namespace *ns)
+{
+ if (flags & CLONE_NEWPID)
+ ns = ERR_PTR(-EINVAL);
+ return ns;
+}
+
+static inline void put_pid_ns(struct pid_namespace *ns)
+{
+}
+
+static inline void zap_pid_ns_processes(struct pid_namespace *ns)
+{
+ BUG();
+}
+
+static inline int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
+{
+ return 0;
+}
+#endif /* CONFIG_PID_NS */
+
+extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk);
+void pidhash_init(void);
+void pidmap_init(void);
+
+#endif /* _LINUX_PID_NS_H */
diff --git a/include/linux/pim.h b/include/linux/pim.h
new file mode 100644
index 000000000..252bf6644
--- /dev/null
+++ b/include/linux/pim.h
@@ -0,0 +1,27 @@
+#ifndef __LINUX_PIM_H
+#define __LINUX_PIM_H
+
+#include <asm/byteorder.h>
+
+/* Message types - V1 */
+#define PIM_V1_VERSION cpu_to_be32(0x10000000)
+#define PIM_V1_REGISTER 1
+
+/* Message types - V2 */
+#define PIM_VERSION 2
+#define PIM_REGISTER 1
+
+#define PIM_NULL_REGISTER cpu_to_be32(0x40000000)
+
+/* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */
+struct pimreghdr
+{
+ __u8 type;
+ __u8 reserved;
+ __be16 csum;
+ __be32 flags;
+};
+
+struct sk_buff;
+extern int pim_rcv_v1(struct sk_buff *);
+#endif
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
new file mode 100644
index 000000000..18eccefea
--- /dev/null
+++ b/include/linux/pinctrl/consumer.h
@@ -0,0 +1,195 @@
+/*
+ * Consumer interface the pin control subsystem
+ *
+ * Copyright (C) 2012 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __LINUX_PINCTRL_CONSUMER_H
+#define __LINUX_PINCTRL_CONSUMER_H
+
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include <linux/pinctrl/pinctrl-state.h>
+
+/* This struct is private to the core and should be regarded as a cookie */
+struct pinctrl;
+struct pinctrl_state;
+struct device;
+
+#ifdef CONFIG_PINCTRL
+
+/* External interface to pin control */
+extern int pinctrl_request_gpio(unsigned gpio);
+extern void pinctrl_free_gpio(unsigned gpio);
+extern int pinctrl_gpio_direction_input(unsigned gpio);
+extern int pinctrl_gpio_direction_output(unsigned gpio);
+
+extern struct pinctrl * __must_check pinctrl_get(struct device *dev);
+extern void pinctrl_put(struct pinctrl *p);
+extern struct pinctrl_state * __must_check pinctrl_lookup_state(
+ struct pinctrl *p,
+ const char *name);
+extern int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s);
+
+extern struct pinctrl * __must_check devm_pinctrl_get(struct device *dev);
+extern void devm_pinctrl_put(struct pinctrl *p);
+
+#ifdef CONFIG_PM
+extern int pinctrl_pm_select_default_state(struct device *dev);
+extern int pinctrl_pm_select_sleep_state(struct device *dev);
+extern int pinctrl_pm_select_idle_state(struct device *dev);
+#else
+static inline int pinctrl_pm_select_default_state(struct device *dev)
+{
+ return 0;
+}
+static inline int pinctrl_pm_select_sleep_state(struct device *dev)
+{
+ return 0;
+}
+static inline int pinctrl_pm_select_idle_state(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+#else /* !CONFIG_PINCTRL */
+
+static inline int pinctrl_request_gpio(unsigned gpio)
+{
+ return 0;
+}
+
+static inline void pinctrl_free_gpio(unsigned gpio)
+{
+}
+
+static inline int pinctrl_gpio_direction_input(unsigned gpio)
+{
+ return 0;
+}
+
+static inline int pinctrl_gpio_direction_output(unsigned gpio)
+{
+ return 0;
+}
+
+static inline struct pinctrl * __must_check pinctrl_get(struct device *dev)
+{
+ return NULL;
+}
+
+static inline void pinctrl_put(struct pinctrl *p)
+{
+}
+
+static inline struct pinctrl_state * __must_check pinctrl_lookup_state(
+ struct pinctrl *p,
+ const char *name)
+{
+ return NULL;
+}
+
+static inline int pinctrl_select_state(struct pinctrl *p,
+ struct pinctrl_state *s)
+{
+ return 0;
+}
+
+static inline struct pinctrl * __must_check devm_pinctrl_get(struct device *dev)
+{
+ return NULL;
+}
+
+static inline void devm_pinctrl_put(struct pinctrl *p)
+{
+}
+
+static inline int pinctrl_pm_select_default_state(struct device *dev)
+{
+ return 0;
+}
+
+static inline int pinctrl_pm_select_sleep_state(struct device *dev)
+{
+ return 0;
+}
+
+static inline int pinctrl_pm_select_idle_state(struct device *dev)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PINCTRL */
+
+static inline struct pinctrl * __must_check pinctrl_get_select(
+ struct device *dev, const char *name)
+{
+ struct pinctrl *p;
+ struct pinctrl_state *s;
+ int ret;
+
+ p = pinctrl_get(dev);
+ if (IS_ERR(p))
+ return p;
+
+ s = pinctrl_lookup_state(p, name);
+ if (IS_ERR(s)) {
+ pinctrl_put(p);
+ return ERR_PTR(PTR_ERR(s));
+ }
+
+ ret = pinctrl_select_state(p, s);
+ if (ret < 0) {
+ pinctrl_put(p);
+ return ERR_PTR(ret);
+ }
+
+ return p;
+}
+
+static inline struct pinctrl * __must_check pinctrl_get_select_default(
+ struct device *dev)
+{
+ return pinctrl_get_select(dev, PINCTRL_STATE_DEFAULT);
+}
+
+static inline struct pinctrl * __must_check devm_pinctrl_get_select(
+ struct device *dev, const char *name)
+{
+ struct pinctrl *p;
+ struct pinctrl_state *s;
+ int ret;
+
+ p = devm_pinctrl_get(dev);
+ if (IS_ERR(p))
+ return p;
+
+ s = pinctrl_lookup_state(p, name);
+ if (IS_ERR(s)) {
+ devm_pinctrl_put(p);
+ return ERR_CAST(s);
+ }
+
+ ret = pinctrl_select_state(p, s);
+ if (ret < 0) {
+ devm_pinctrl_put(p);
+ return ERR_PTR(ret);
+ }
+
+ return p;
+}
+
+static inline struct pinctrl * __must_check devm_pinctrl_get_select_default(
+ struct device *dev)
+{
+ return devm_pinctrl_get_select(dev, PINCTRL_STATE_DEFAULT);
+}
+
+#endif /* __LINUX_PINCTRL_CONSUMER_H */
diff --git a/include/linux/pinctrl/devinfo.h b/include/linux/pinctrl/devinfo.h
new file mode 100644
index 000000000..281cb91dd
--- /dev/null
+++ b/include/linux/pinctrl/devinfo.h
@@ -0,0 +1,49 @@
+/*
+ * Per-device information from the pin control system.
+ * This is the stuff that get included into the device
+ * core.
+ *
+ * Copyright (C) 2012 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * This interface is used in the core to keep track of pins.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef PINCTRL_DEVINFO_H
+#define PINCTRL_DEVINFO_H
+
+#ifdef CONFIG_PINCTRL
+
+/* The device core acts as a consumer toward pinctrl */
+#include <linux/pinctrl/consumer.h>
+
+/**
+ * struct dev_pin_info - pin state container for devices
+ * @p: pinctrl handle for the containing device
+ * @default_state: the default state for the handle, if found
+ */
+struct dev_pin_info {
+ struct pinctrl *p;
+ struct pinctrl_state *default_state;
+#ifdef CONFIG_PM
+ struct pinctrl_state *sleep_state;
+ struct pinctrl_state *idle_state;
+#endif
+};
+
+extern int pinctrl_bind_pins(struct device *dev);
+
+#else
+
+/* Stubs if we're not using pinctrl */
+
+static inline int pinctrl_bind_pins(struct device *dev)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PINCTRL */
+#endif /* PINCTRL_DEVINFO_H */
diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h
new file mode 100644
index 000000000..e5b1716f9
--- /dev/null
+++ b/include/linux/pinctrl/machine.h
@@ -0,0 +1,170 @@
+/*
+ * Machine interface for the pinctrl subsystem.
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __LINUX_PINCTRL_MACHINE_H
+#define __LINUX_PINCTRL_MACHINE_H
+
+#include <linux/bug.h>
+
+#include <linux/pinctrl/pinctrl-state.h>
+
+enum pinctrl_map_type {
+ PIN_MAP_TYPE_INVALID,
+ PIN_MAP_TYPE_DUMMY_STATE,
+ PIN_MAP_TYPE_MUX_GROUP,
+ PIN_MAP_TYPE_CONFIGS_PIN,
+ PIN_MAP_TYPE_CONFIGS_GROUP,
+};
+
+/**
+ * struct pinctrl_map_mux - mapping table content for MAP_TYPE_MUX_GROUP
+ * @group: the name of the group whose mux function is to be configured. This
+ * field may be left NULL, and the first applicable group for the function
+ * will be used.
+ * @function: the mux function to select for the group
+ */
+struct pinctrl_map_mux {
+ const char *group;
+ const char *function;
+};
+
+/**
+ * struct pinctrl_map_configs - mapping table content for MAP_TYPE_CONFIGS_*
+ * @group_or_pin: the name of the pin or group whose configuration parameters
+ * are to be configured.
+ * @configs: a pointer to an array of config parameters/values to program into
+ * hardware. Each individual pin controller defines the format and meaning
+ * of config parameters.
+ * @num_configs: the number of entries in array @configs
+ */
+struct pinctrl_map_configs {
+ const char *group_or_pin;
+ unsigned long *configs;
+ unsigned num_configs;
+};
+
+/**
+ * struct pinctrl_map - boards/machines shall provide this map for devices
+ * @dev_name: the name of the device using this specific mapping, the name
+ * must be the same as in your struct device*. If this name is set to the
+ * same name as the pin controllers own dev_name(), the map entry will be
+ * hogged by the driver itself upon registration
+ * @name: the name of this specific map entry for the particular machine.
+ * This is the parameter passed to pinmux_lookup_state()
+ * @type: the type of mapping table entry
+ * @ctrl_dev_name: the name of the device controlling this specific mapping,
+ * the name must be the same as in your struct device*. This field is not
+ * used for PIN_MAP_TYPE_DUMMY_STATE
+ * @data: Data specific to the mapping type
+ */
+struct pinctrl_map {
+ const char *dev_name;
+ const char *name;
+ enum pinctrl_map_type type;
+ const char *ctrl_dev_name;
+ union {
+ struct pinctrl_map_mux mux;
+ struct pinctrl_map_configs configs;
+ } data;
+};
+
+/* Convenience macros to create mapping table entries */
+
+#define PIN_MAP_DUMMY_STATE(dev, state) \
+ { \
+ .dev_name = dev, \
+ .name = state, \
+ .type = PIN_MAP_TYPE_DUMMY_STATE, \
+ }
+
+#define PIN_MAP_MUX_GROUP(dev, state, pinctrl, grp, func) \
+ { \
+ .dev_name = dev, \
+ .name = state, \
+ .type = PIN_MAP_TYPE_MUX_GROUP, \
+ .ctrl_dev_name = pinctrl, \
+ .data.mux = { \
+ .group = grp, \
+ .function = func, \
+ }, \
+ }
+
+#define PIN_MAP_MUX_GROUP_DEFAULT(dev, pinctrl, grp, func) \
+ PIN_MAP_MUX_GROUP(dev, PINCTRL_STATE_DEFAULT, pinctrl, grp, func)
+
+#define PIN_MAP_MUX_GROUP_HOG(dev, state, grp, func) \
+ PIN_MAP_MUX_GROUP(dev, state, dev, grp, func)
+
+#define PIN_MAP_MUX_GROUP_HOG_DEFAULT(dev, grp, func) \
+ PIN_MAP_MUX_GROUP(dev, PINCTRL_STATE_DEFAULT, dev, grp, func)
+
+#define PIN_MAP_CONFIGS_PIN(dev, state, pinctrl, pin, cfgs) \
+ { \
+ .dev_name = dev, \
+ .name = state, \
+ .type = PIN_MAP_TYPE_CONFIGS_PIN, \
+ .ctrl_dev_name = pinctrl, \
+ .data.configs = { \
+ .group_or_pin = pin, \
+ .configs = cfgs, \
+ .num_configs = ARRAY_SIZE(cfgs), \
+ }, \
+ }
+
+#define PIN_MAP_CONFIGS_PIN_DEFAULT(dev, pinctrl, pin, cfgs) \
+ PIN_MAP_CONFIGS_PIN(dev, PINCTRL_STATE_DEFAULT, pinctrl, pin, cfgs)
+
+#define PIN_MAP_CONFIGS_PIN_HOG(dev, state, pin, cfgs) \
+ PIN_MAP_CONFIGS_PIN(dev, state, dev, pin, cfgs)
+
+#define PIN_MAP_CONFIGS_PIN_HOG_DEFAULT(dev, pin, cfgs) \
+ PIN_MAP_CONFIGS_PIN(dev, PINCTRL_STATE_DEFAULT, dev, pin, cfgs)
+
+#define PIN_MAP_CONFIGS_GROUP(dev, state, pinctrl, grp, cfgs) \
+ { \
+ .dev_name = dev, \
+ .name = state, \
+ .type = PIN_MAP_TYPE_CONFIGS_GROUP, \
+ .ctrl_dev_name = pinctrl, \
+ .data.configs = { \
+ .group_or_pin = grp, \
+ .configs = cfgs, \
+ .num_configs = ARRAY_SIZE(cfgs), \
+ }, \
+ }
+
+#define PIN_MAP_CONFIGS_GROUP_DEFAULT(dev, pinctrl, grp, cfgs) \
+ PIN_MAP_CONFIGS_GROUP(dev, PINCTRL_STATE_DEFAULT, pinctrl, grp, cfgs)
+
+#define PIN_MAP_CONFIGS_GROUP_HOG(dev, state, grp, cfgs) \
+ PIN_MAP_CONFIGS_GROUP(dev, state, dev, grp, cfgs)
+
+#define PIN_MAP_CONFIGS_GROUP_HOG_DEFAULT(dev, grp, cfgs) \
+ PIN_MAP_CONFIGS_GROUP(dev, PINCTRL_STATE_DEFAULT, dev, grp, cfgs)
+
+#ifdef CONFIG_PINCTRL
+
+extern int pinctrl_register_mappings(struct pinctrl_map const *map,
+ unsigned num_maps);
+extern void pinctrl_provide_dummies(void);
+#else
+
+static inline int pinctrl_register_mappings(struct pinctrl_map const *map,
+ unsigned num_maps)
+{
+ return 0;
+}
+
+static inline void pinctrl_provide_dummies(void)
+{
+}
+#endif /* !CONFIG_PINCTRL */
+#endif
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
new file mode 100644
index 000000000..fe65962b2
--- /dev/null
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -0,0 +1,210 @@
+/*
+ * Interface the generic pinconfig portions of the pinctrl subsystem
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * This interface is used in the core to keep track of pins.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __LINUX_PINCTRL_PINCONF_GENERIC_H
+#define __LINUX_PINCTRL_PINCONF_GENERIC_H
+
+/*
+ * You shouldn't even be able to compile with these enums etc unless you're
+ * using generic pin config. That is why this is defined out.
+ */
+#ifdef CONFIG_GENERIC_PINCONF
+
+/**
+ * enum pin_config_param - possible pin configuration parameters
+ * @PIN_CONFIG_BIAS_DISABLE: disable any pin bias on the pin, a
+ * transition from say pull-up to pull-down implies that you disable
+ * pull-up in the process, this setting disables all biasing.
+ * @PIN_CONFIG_BIAS_HIGH_IMPEDANCE: the pin will be set to a high impedance
+ * mode, also know as "third-state" (tristate) or "high-Z" or "floating".
+ * On output pins this effectively disconnects the pin, which is useful
+ * if for example some other pin is going to drive the signal connected
+ * to it for a while. Pins used for input are usually always high
+ * impedance.
+ * @PIN_CONFIG_BIAS_BUS_HOLD: the pin will be set to weakly latch so that it
+ * weakly drives the last value on a tristate bus, also known as a "bus
+ * holder", "bus keeper" or "repeater". This allows another device on the
+ * bus to change the value by driving the bus high or low and switching to
+ * tristate. The argument is ignored.
+ * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
+ * impedance to VDD). If the argument is != 0 pull-up is enabled,
+ * if it is 0, pull-up is total, i.e. the pin is connected to VDD.
+ * @PIN_CONFIG_BIAS_PULL_DOWN: the pin will be pulled down (usually with high
+ * impedance to GROUND). If the argument is != 0 pull-down is enabled,
+ * if it is 0, pull-down is total, i.e. the pin is connected to GROUND.
+ * @PIN_CONFIG_BIAS_PULL_PIN_DEFAULT: the pin will be pulled up or down based
+ * on embedded knowledge of the controller hardware, like current mux
+ * function. The pull direction and possibly strength too will normally
+ * be decided completely inside the hardware block and not be readable
+ * from the kernel side.
+ * If the argument is != 0 pull up/down is enabled, if it is 0, the
+ * configuration is ignored. The proper way to disable it is to use
+ * @PIN_CONFIG_BIAS_DISABLE.
+ * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and
+ * low, this is the most typical case and is typically achieved with two
+ * active transistors on the output. Setting this config will enable
+ * push-pull mode, the argument is ignored.
+ * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
+ * collector) which means it is usually wired with other output ports
+ * which are then pulled up with an external resistor. Setting this
+ * config will enable open drain mode, the argument is ignored.
+ * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source
+ * (open emitter). Setting this config will enable open source mode, the
+ * argument is ignored.
+ * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current
+ * passed as argument. The argument is in mA.
+ * @PIN_CONFIG_INPUT_ENABLE: enable the pin's input. Note that this does not
+ * affect the pin's ability to drive output. 1 enables input, 0 disables
+ * input.
+ * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
+ * If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
+ * schmitt-trigger mode is disabled.
+ * @PIN_CONFIG_INPUT_SCHMITT: this will configure an input pin to run in
+ * schmitt-trigger mode. If the schmitt-trigger has adjustable hysteresis,
+ * the threshold value is given on a custom format as argument when
+ * setting pins to this mode.
+ * @PIN_CONFIG_INPUT_DEBOUNCE: this will configure the pin to debounce mode,
+ * which means it will wait for signals to settle when reading inputs. The
+ * argument gives the debounce time in usecs. Setting the
+ * argument to zero turns debouncing off.
+ * @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
+ * supplies, the argument to this parameter (on a custom format) tells
+ * the driver which alternative power source to use.
+ * @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
+ * this parameter (on a custom format) tells the driver which alternative
+ * slew rate to use.
+ * @PIN_CONFIG_LOW_POWER_MODE: this will configure the pin for low power
+ * operation, if several modes of operation are supported these can be
+ * passed in the argument on a custom form, else just use argument 1
+ * to indicate low power mode, argument 0 turns low power mode off.
+ * @PIN_CONFIG_OUTPUT: this will configure the pin as an output. Use argument
+ * 1 to indicate high level, argument 0 to indicate low level. (Please
+ * see Documentation/pinctrl.txt, section "GPIO mode pitfalls" for a
+ * discussion around this parameter.)
+ * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if
+ * you need to pass in custom configurations to the pin controller, use
+ * PIN_CONFIG_END+1 as the base offset.
+ */
+enum pin_config_param {
+ PIN_CONFIG_BIAS_DISABLE,
+ PIN_CONFIG_BIAS_HIGH_IMPEDANCE,
+ PIN_CONFIG_BIAS_BUS_HOLD,
+ PIN_CONFIG_BIAS_PULL_UP,
+ PIN_CONFIG_BIAS_PULL_DOWN,
+ PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
+ PIN_CONFIG_DRIVE_PUSH_PULL,
+ PIN_CONFIG_DRIVE_OPEN_DRAIN,
+ PIN_CONFIG_DRIVE_OPEN_SOURCE,
+ PIN_CONFIG_DRIVE_STRENGTH,
+ PIN_CONFIG_INPUT_ENABLE,
+ PIN_CONFIG_INPUT_SCHMITT_ENABLE,
+ PIN_CONFIG_INPUT_SCHMITT,
+ PIN_CONFIG_INPUT_DEBOUNCE,
+ PIN_CONFIG_POWER_SOURCE,
+ PIN_CONFIG_SLEW_RATE,
+ PIN_CONFIG_LOW_POWER_MODE,
+ PIN_CONFIG_OUTPUT,
+ PIN_CONFIG_END = 0x7FFF,
+};
+
+#ifdef CONFIG_DEBUG_FS
+#define PCONFDUMP(a, b, c, d) { .param = a, .display = b, .format = c, \
+ .has_arg = d }
+
+struct pin_config_item {
+ const enum pin_config_param param;
+ const char * const display;
+ const char * const format;
+ bool has_arg;
+};
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * Helpful configuration macro to be used in tables etc.
+ */
+#define PIN_CONF_PACKED(p, a) ((a << 16) | ((unsigned long) p & 0xffffUL))
+
+/*
+ * The following inlines stuffs a configuration parameter and data value
+ * into and out of an unsigned long argument, as used by the generic pin config
+ * system. We put the parameter in the lower 16 bits and the argument in the
+ * upper 16 bits.
+ */
+
+static inline enum pin_config_param pinconf_to_config_param(unsigned long config)
+{
+ return (enum pin_config_param) (config & 0xffffUL);
+}
+
+static inline u16 pinconf_to_config_argument(unsigned long config)
+{
+ return (enum pin_config_param) ((config >> 16) & 0xffffUL);
+}
+
+static inline unsigned long pinconf_to_config_packed(enum pin_config_param param,
+ u16 argument)
+{
+ return PIN_CONF_PACKED(param, argument);
+}
+
+#ifdef CONFIG_OF
+
+#include <linux/device.h>
+#include <linux/pinctrl/machine.h>
+struct pinctrl_dev;
+struct pinctrl_map;
+
+struct pinconf_generic_params {
+ const char * const property;
+ enum pin_config_param param;
+ u32 default_value;
+};
+
+int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np, struct pinctrl_map **map,
+ unsigned *reserved_maps, unsigned *num_maps,
+ enum pinctrl_map_type type);
+int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np_config, struct pinctrl_map **map,
+ unsigned *num_maps, enum pinctrl_map_type type);
+
+static inline int pinconf_generic_dt_node_to_map_group(
+ struct pinctrl_dev *pctldev, struct device_node *np_config,
+ struct pinctrl_map **map, unsigned *num_maps)
+{
+ return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
+ PIN_MAP_TYPE_CONFIGS_GROUP);
+}
+
+static inline int pinconf_generic_dt_node_to_map_pin(
+ struct pinctrl_dev *pctldev, struct device_node *np_config,
+ struct pinctrl_map **map, unsigned *num_maps)
+{
+ return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
+ PIN_MAP_TYPE_CONFIGS_PIN);
+}
+
+static inline int pinconf_generic_dt_node_to_map_all(
+ struct pinctrl_dev *pctldev, struct device_node *np_config,
+ struct pinctrl_map **map, unsigned *num_maps)
+{
+ /*
+ * passing the type as PIN_MAP_TYPE_INVALID causes the underlying parser
+ * to infer the map type from the DT properties used.
+ */
+ return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
+ PIN_MAP_TYPE_INVALID);
+}
+#endif
+
+#endif /* CONFIG_GENERIC_PINCONF */
+
+#endif /* __LINUX_PINCTRL_PINCONF_GENERIC_H */
diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h
new file mode 100644
index 000000000..09eb80f25
--- /dev/null
+++ b/include/linux/pinctrl/pinconf.h
@@ -0,0 +1,75 @@
+/*
+ * Interface the pinconfig portions of the pinctrl subsystem
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * This interface is used in the core to keep track of pins.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __LINUX_PINCTRL_PINCONF_H
+#define __LINUX_PINCTRL_PINCONF_H
+
+#ifdef CONFIG_PINCONF
+
+#include <linux/pinctrl/machine.h>
+
+struct pinctrl_dev;
+struct seq_file;
+
+/**
+ * struct pinconf_ops - pin config operations, to be implemented by
+ * pin configuration capable drivers.
+ * @is_generic: for pin controllers that want to use the generic interface,
+ * this flag tells the framework that it's generic.
+ * @pin_config_get: get the config of a certain pin, if the requested config
+ * is not available on this controller this should return -ENOTSUPP
+ * and if it is available but disabled it should return -EINVAL
+ * @pin_config_set: configure an individual pin
+ * @pin_config_group_get: get configurations for an entire pin group
+ * @pin_config_group_set: configure all pins in a group
+ * @pin_config_dbg_parse_modify: optional debugfs to modify a pin configuration
+ * @pin_config_dbg_show: optional debugfs display hook that will provide
+ * per-device info for a certain pin in debugfs
+ * @pin_config_group_dbg_show: optional debugfs display hook that will provide
+ * per-device info for a certain group in debugfs
+ * @pin_config_config_dbg_show: optional debugfs display hook that will decode
+ * and display a driver's pin configuration parameter
+ */
+struct pinconf_ops {
+#ifdef CONFIG_GENERIC_PINCONF
+ bool is_generic;
+#endif
+ int (*pin_config_get) (struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long *config);
+ int (*pin_config_set) (struct pinctrl_dev *pctldev,
+ unsigned pin,
+ unsigned long *configs,
+ unsigned num_configs);
+ int (*pin_config_group_get) (struct pinctrl_dev *pctldev,
+ unsigned selector,
+ unsigned long *config);
+ int (*pin_config_group_set) (struct pinctrl_dev *pctldev,
+ unsigned selector,
+ unsigned long *configs,
+ unsigned num_configs);
+ int (*pin_config_dbg_parse_modify) (struct pinctrl_dev *pctldev,
+ const char *arg,
+ unsigned long *config);
+ void (*pin_config_dbg_show) (struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ unsigned offset);
+ void (*pin_config_group_dbg_show) (struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ unsigned selector);
+ void (*pin_config_config_dbg_show) (struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ unsigned long config);
+};
+
+#endif
+
+#endif /* __LINUX_PINCTRL_PINCONF_H */
diff --git a/include/linux/pinctrl/pinctrl-state.h b/include/linux/pinctrl/pinctrl-state.h
new file mode 100644
index 000000000..b5919f8e6
--- /dev/null
+++ b/include/linux/pinctrl/pinctrl-state.h
@@ -0,0 +1,24 @@
+/*
+ * Standard pin control state definitions
+ */
+
+/**
+ * @PINCTRL_STATE_DEFAULT: the state the pinctrl handle shall be put
+ * into as default, usually this means the pins are up and ready to
+ * be used by the device driver. This state is commonly used by
+ * hogs to configure muxing and pins at boot, and also as a state
+ * to go into when returning from sleep and idle in
+ * .pm_runtime_resume() or ordinary .resume() for example.
+ * @PINCTRL_STATE_IDLE: the state the pinctrl handle shall be put into
+ * when the pins are idle. This is a state where the system is relaxed
+ * but not fully sleeping - some power may be on but clocks gated for
+ * example. Could typically be set from a pm_runtime_suspend() or
+ * pm_runtime_idle() operation.
+ * @PINCTRL_STATE_SLEEP: the state the pinctrl handle shall be put into
+ * when the pins are sleeping. This is a state where the system is in
+ * its lowest sleep state. Could typically be set from an
+ * ordinary .suspend() function.
+ */
+#define PINCTRL_STATE_DEFAULT "default"
+#define PINCTRL_STATE_IDLE "idle"
+#define PINCTRL_STATE_SLEEP "sleep"
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
new file mode 100644
index 000000000..66e469751
--- /dev/null
+++ b/include/linux/pinctrl/pinctrl.h
@@ -0,0 +1,190 @@
+/*
+ * Interface the pinctrl subsystem
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * This interface is used in the core to keep track of pins.
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __LINUX_PINCTRL_PINCTRL_H
+#define __LINUX_PINCTRL_PINCTRL_H
+
+#ifdef CONFIG_PINCTRL
+
+#include <linux/radix-tree.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include <linux/pinctrl/pinctrl-state.h>
+
+struct device;
+struct pinctrl_dev;
+struct pinctrl_map;
+struct pinmux_ops;
+struct pinconf_ops;
+struct pin_config_item;
+struct gpio_chip;
+struct device_node;
+
+/**
+ * struct pinctrl_pin_desc - boards/machines provide information on their
+ * pins, pads or other muxable units in this struct
+ * @number: unique pin number from the global pin number space
+ * @name: a name for this pin
+ * @drv_data: driver-defined per-pin data. pinctrl core does not touch this
+ */
+struct pinctrl_pin_desc {
+ unsigned number;
+ const char *name;
+ void *drv_data;
+};
+
+/* Convenience macro to define a single named or anonymous pin descriptor */
+#define PINCTRL_PIN(a, b) { .number = a, .name = b }
+#define PINCTRL_PIN_ANON(a) { .number = a }
+
+/**
+ * struct pinctrl_gpio_range - each pin controller can provide subranges of
+ * the GPIO number space to be handled by the controller
+ * @node: list node for internal use
+ * @name: a name for the chip in this range
+ * @id: an ID number for the chip in this range
+ * @base: base offset of the GPIO range
+ * @pin_base: base pin number of the GPIO range if pins == NULL
+ * @pins: enumeration of pins in GPIO range or NULL
+ * @npins: number of pins in the GPIO range, including the base number
+ * @gc: an optional pointer to a gpio_chip
+ */
+struct pinctrl_gpio_range {
+ struct list_head node;
+ const char *name;
+ unsigned int id;
+ unsigned int base;
+ unsigned int pin_base;
+ unsigned const *pins;
+ unsigned int npins;
+ struct gpio_chip *gc;
+};
+
+/**
+ * struct pinctrl_ops - global pin control operations, to be implemented by
+ * pin controller drivers.
+ * @get_groups_count: Returns the count of total number of groups registered.
+ * @get_group_name: return the group name of the pin group
+ * @get_group_pins: return an array of pins corresponding to a certain
+ * group selector @pins, and the size of the array in @num_pins
+ * @pin_dbg_show: optional debugfs display hook that will provide per-device
+ * info for a certain pin in debugfs
+ * @dt_node_to_map: parse a device tree "pin configuration node", and create
+ * mapping table entries for it. These are returned through the @map and
+ * @num_maps output parameters. This function is optional, and may be
+ * omitted for pinctrl drivers that do not support device tree.
+ * @dt_free_map: free mapping table entries created via @dt_node_to_map. The
+ * top-level @map pointer must be freed, along with any dynamically
+ * allocated members of the mapping table entries themselves. This
+ * function is optional, and may be omitted for pinctrl drivers that do
+ * not support device tree.
+ */
+struct pinctrl_ops {
+ int (*get_groups_count) (struct pinctrl_dev *pctldev);
+ const char *(*get_group_name) (struct pinctrl_dev *pctldev,
+ unsigned selector);
+ int (*get_group_pins) (struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const unsigned **pins,
+ unsigned *num_pins);
+ void (*pin_dbg_show) (struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned offset);
+ int (*dt_node_to_map) (struct pinctrl_dev *pctldev,
+ struct device_node *np_config,
+ struct pinctrl_map **map, unsigned *num_maps);
+ void (*dt_free_map) (struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map, unsigned num_maps);
+};
+
+/**
+ * struct pinctrl_desc - pin controller descriptor, register this to pin
+ * control subsystem
+ * @name: name for the pin controller
+ * @pins: an array of pin descriptors describing all the pins handled by
+ * this pin controller
+ * @npins: number of descriptors in the array, usually just ARRAY_SIZE()
+ * of the pins field above
+ * @pctlops: pin control operation vtable, to support global concepts like
+ * grouping of pins, this is optional.
+ * @pmxops: pinmux operations vtable, if you support pinmuxing in your driver
+ * @confops: pin config operations vtable, if you support pin configuration in
+ * your driver
+ * @owner: module providing the pin controller, used for refcounting
+ * @num_custom_params: Number of driver-specific custom parameters to be parsed
+ * from the hardware description
+ * @custom_params: List of driver_specific custom parameters to be parsed from
+ * the hardware description
+ * @custom_conf_items: Information how to print @params in debugfs, must be
+ * the same size as the @custom_params, i.e. @num_custom_params
+ */
+struct pinctrl_desc {
+ const char *name;
+ struct pinctrl_pin_desc const *pins;
+ unsigned int npins;
+ const struct pinctrl_ops *pctlops;
+ const struct pinmux_ops *pmxops;
+ const struct pinconf_ops *confops;
+ struct module *owner;
+#ifdef CONFIG_GENERIC_PINCONF
+ unsigned int num_custom_params;
+ const struct pinconf_generic_params *custom_params;
+ const struct pin_config_item *custom_conf_items;
+#endif
+};
+
+/* External interface to pin controller */
+extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
+ struct device *dev, void *driver_data);
+extern void pinctrl_unregister(struct pinctrl_dev *pctldev);
+extern bool pin_is_valid(struct pinctrl_dev *pctldev, int pin);
+extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range);
+extern void pinctrl_add_gpio_ranges(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *ranges,
+ unsigned nranges);
+extern void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range);
+
+extern struct pinctrl_dev *pinctrl_find_and_add_gpio_range(const char *devname,
+ struct pinctrl_gpio_range *range);
+extern struct pinctrl_gpio_range *
+pinctrl_find_gpio_range_from_pin(struct pinctrl_dev *pctldev,
+ unsigned int pin);
+extern int pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ const char *pin_group, const unsigned **pins,
+ unsigned *num_pins);
+
+#ifdef CONFIG_OF
+extern struct pinctrl_dev *of_pinctrl_get(struct device_node *np);
+#else
+static inline
+struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+extern const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev);
+extern const char *pinctrl_dev_get_devname(struct pinctrl_dev *pctldev);
+extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev);
+#else
+
+struct pinctrl_dev;
+
+/* Sufficiently stupid default functions when pinctrl is not in use */
+static inline bool pin_is_valid(struct pinctrl_dev *pctldev, int pin)
+{
+ return pin >= 0;
+}
+
+#endif /* !CONFIG_PINCTRL */
+
+#endif /* __LINUX_PINCTRL_PINCTRL_H */
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
new file mode 100644
index 000000000..511bda9ed
--- /dev/null
+++ b/include/linux/pinctrl/pinmux.h
@@ -0,0 +1,86 @@
+/*
+ * Interface the pinmux subsystem
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __LINUX_PINCTRL_PINMUX_H
+#define __LINUX_PINCTRL_PINMUX_H
+
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#ifdef CONFIG_PINMUX
+
+struct pinctrl_dev;
+
+/**
+ * struct pinmux_ops - pinmux operations, to be implemented by pin controller
+ * drivers that support pinmuxing
+ * @request: called by the core to see if a certain pin can be made
+ * available for muxing. This is called by the core to acquire the pins
+ * before selecting any actual mux setting across a function. The driver
+ * is allowed to answer "no" by returning a negative error code
+ * @free: the reverse function of the request() callback, frees a pin after
+ * being requested
+ * @get_functions_count: returns number of selectable named functions available
+ * in this pinmux driver
+ * @get_function_name: return the function name of the muxing selector,
+ * called by the core to figure out which mux setting it shall map a
+ * certain device to
+ * @get_function_groups: return an array of groups names (in turn
+ * referencing pins) connected to a certain function selector. The group
+ * name can be used with the generic @pinctrl_ops to retrieve the
+ * actual pins affected. The applicable groups will be returned in
+ * @groups and the number of groups in @num_groups
+ * @set_mux: enable a certain muxing function with a certain pin group. The
+ * driver does not need to figure out whether enabling this function
+ * conflicts some other use of the pins in that group, such collisions
+ * are handled by the pinmux subsystem. The @func_selector selects a
+ * certain function whereas @group_selector selects a certain set of pins
+ * to be used. On simple controllers the latter argument may be ignored
+ * @gpio_request_enable: requests and enables GPIO on a certain pin.
+ * Implement this only if you can mux every pin individually as GPIO. The
+ * affected GPIO range is passed along with an offset(pin number) into that
+ * specific GPIO range - function selectors and pin groups are orthogonal
+ * to this, the core will however make sure the pins do not collide.
+ * @gpio_disable_free: free up GPIO muxing on a certain pin, the reverse of
+ * @gpio_request_enable
+ * @gpio_set_direction: Since controllers may need different configurations
+ * depending on whether the GPIO is configured as input or output,
+ * a direction selector function may be implemented as a backing
+ * to the GPIO controllers that need pin muxing.
+ */
+struct pinmux_ops {
+ int (*request) (struct pinctrl_dev *pctldev, unsigned offset);
+ int (*free) (struct pinctrl_dev *pctldev, unsigned offset);
+ int (*get_functions_count) (struct pinctrl_dev *pctldev);
+ const char *(*get_function_name) (struct pinctrl_dev *pctldev,
+ unsigned selector);
+ int (*get_function_groups) (struct pinctrl_dev *pctldev,
+ unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups);
+ int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector,
+ unsigned group_selector);
+ int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset);
+ void (*gpio_disable_free) (struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset);
+ int (*gpio_set_direction) (struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset,
+ bool input);
+};
+
+#endif /* CONFIG_PINMUX */
+
+#endif /* __LINUX_PINCTRL_PINMUX_H */
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
new file mode 100644
index 000000000..eb8b8ac6d
--- /dev/null
+++ b/include/linux/pipe_fs_i.h
@@ -0,0 +1,149 @@
+#ifndef _LINUX_PIPE_FS_I_H
+#define _LINUX_PIPE_FS_I_H
+
+#define PIPE_DEF_BUFFERS 16
+
+#define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */
+#define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */
+#define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */
+#define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */
+
+/**
+ * struct pipe_buffer - a linux kernel pipe buffer
+ * @page: the page containing the data for the pipe buffer
+ * @offset: offset of data inside the @page
+ * @len: length of data inside the @page
+ * @ops: operations associated with this buffer. See @pipe_buf_operations.
+ * @flags: pipe buffer flags. See above.
+ * @private: private data owned by the ops.
+ **/
+struct pipe_buffer {
+ struct page *page;
+ unsigned int offset, len;
+ const struct pipe_buf_operations *ops;
+ unsigned int flags;
+ unsigned long private;
+};
+
+/**
+ * struct pipe_inode_info - a linux kernel pipe
+ * @mutex: mutex protecting the whole thing
+ * @wait: reader/writer wait point in case of empty/full pipe
+ * @nrbufs: the number of non-empty pipe buffers in this pipe
+ * @buffers: total number of buffers (should be a power of 2)
+ * @curbuf: the current pipe buffer entry
+ * @tmp_page: cached released page
+ * @readers: number of current readers of this pipe
+ * @writers: number of current writers of this pipe
+ * @files: number of struct file referring this pipe (protected by ->i_lock)
+ * @waiting_writers: number of writers blocked waiting for room
+ * @r_counter: reader counter
+ * @w_counter: writer counter
+ * @fasync_readers: reader side fasync
+ * @fasync_writers: writer side fasync
+ * @bufs: the circular array of pipe buffers
+ **/
+struct pipe_inode_info {
+ struct mutex mutex;
+ wait_queue_head_t wait;
+ unsigned int nrbufs, curbuf, buffers;
+ unsigned int readers;
+ unsigned int writers;
+ unsigned int files;
+ unsigned int waiting_writers;
+ unsigned int r_counter;
+ unsigned int w_counter;
+ struct page *tmp_page;
+ struct fasync_struct *fasync_readers;
+ struct fasync_struct *fasync_writers;
+ struct pipe_buffer *bufs;
+};
+
+/*
+ * Note on the nesting of these functions:
+ *
+ * ->confirm()
+ * ->steal()
+ * ...
+ * ->map()
+ * ...
+ * ->unmap()
+ *
+ * That is, ->map() must be called on a confirmed buffer,
+ * same goes for ->steal(). See below for the meaning of each
+ * operation. Also see kerneldoc in fs/pipe.c for the pipe
+ * and generic variants of these hooks.
+ */
+struct pipe_buf_operations {
+ /*
+ * This is set to 1, if the generic pipe read/write may coalesce
+ * data into an existing buffer. If this is set to 0, a new pipe
+ * page segment is always used for new data.
+ */
+ int can_merge;
+
+ /*
+ * ->confirm() verifies that the data in the pipe buffer is there
+ * and that the contents are good. If the pages in the pipe belong
+ * to a file system, we may need to wait for IO completion in this
+ * hook. Returns 0 for good, or a negative error value in case of
+ * error.
+ */
+ int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);
+
+ /*
+ * When the contents of this pipe buffer has been completely
+ * consumed by a reader, ->release() is called.
+ */
+ void (*release)(struct pipe_inode_info *, struct pipe_buffer *);
+
+ /*
+ * Attempt to take ownership of the pipe buffer and its contents.
+ * ->steal() returns 0 for success, in which case the contents
+ * of the pipe (the buf->page) is locked and now completely owned
+ * by the caller. The page may then be transferred to a different
+ * mapping, the most often used case is insertion into different
+ * file address space cache.
+ */
+ int (*steal)(struct pipe_inode_info *, struct pipe_buffer *);
+
+ /*
+ * Get a reference to the pipe buffer.
+ */
+ void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+};
+
+/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
+ memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
+#define PIPE_SIZE PAGE_SIZE
+
+/* Pipe lock and unlock operations */
+void pipe_lock(struct pipe_inode_info *);
+void pipe_unlock(struct pipe_inode_info *);
+void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
+
+extern unsigned int pipe_max_size, pipe_min_size;
+int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *);
+
+
+/* Drop the inode semaphore and wait for a pipe event, atomically */
+void pipe_wait(struct pipe_inode_info *pipe);
+
+struct pipe_inode_info *alloc_pipe_info(void);
+void free_pipe_info(struct pipe_inode_info *);
+
+/* Generic pipe buffer ops functions */
+void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
+int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
+int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
+
+extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
+
+/* for F_SETPIPE_SZ and F_GETPIPE_SZ */
+long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
+struct pipe_inode_info *get_pipe_info(struct file *file);
+
+int create_pipe_files(struct file **, int);
+
+#endif
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h
new file mode 100644
index 000000000..93d142ad1
--- /dev/null
+++ b/include/linux/pktcdvd.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
+ * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License. See linux/COPYING for more information.
+ *
+ * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
+ * DVD-RW devices.
+ *
+ */
+#ifndef __PKTCDVD_H
+#define __PKTCDVD_H
+
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/cdrom.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/mempool.h>
+#include <uapi/linux/pktcdvd.h>
+
+/* default bio write queue congestion marks */
+#define PKT_WRITE_CONGESTION_ON 10000
+#define PKT_WRITE_CONGESTION_OFF 9000
+
+
+struct packet_settings
+{
+ __u32 size; /* packet size in (512 byte) sectors */
+ __u8 fp; /* fixed packets */
+ __u8 link_loss; /* the rest is specified
+ * as per Mt Fuji */
+ __u8 write_type;
+ __u8 track_mode;
+ __u8 block_mode;
+};
+
+/*
+ * Very crude stats for now
+ */
+struct packet_stats
+{
+ unsigned long pkt_started;
+ unsigned long pkt_ended;
+ unsigned long secs_w;
+ unsigned long secs_rg;
+ unsigned long secs_r;
+};
+
+struct packet_cdrw
+{
+ struct list_head pkt_free_list;
+ struct list_head pkt_active_list;
+ spinlock_t active_list_lock; /* Serialize access to pkt_active_list */
+ struct task_struct *thread;
+ atomic_t pending_bios;
+};
+
+/*
+ * Switch to high speed reading after reading this many kilobytes
+ * with no interspersed writes.
+ */
+#define HI_SPEED_SWITCH 512
+
+struct packet_iosched
+{
+ atomic_t attention; /* Set to non-zero when queue processing is needed */
+ int writing; /* Non-zero when writing, zero when reading */
+ spinlock_t lock; /* Protecting read/write queue manipulations */
+ struct bio_list read_queue;
+ struct bio_list write_queue;
+ sector_t last_write; /* The sector where the last write ended */
+ int successive_reads;
+};
+
+/*
+ * 32 buffers of 2048 bytes
+ */
+#if (PAGE_SIZE % CD_FRAMESIZE) != 0
+#error "PAGE_SIZE must be a multiple of CD_FRAMESIZE"
+#endif
+#define PACKET_MAX_SIZE 128
+#define FRAMES_PER_PAGE (PAGE_SIZE / CD_FRAMESIZE)
+#define PACKET_MAX_SECTORS (PACKET_MAX_SIZE * CD_FRAMESIZE >> 9)
+
+enum packet_data_state {
+ PACKET_IDLE_STATE, /* Not used at the moment */
+ PACKET_WAITING_STATE, /* Waiting for more bios to arrive, so */
+ /* we don't have to do as much */
+ /* data gathering */
+ PACKET_READ_WAIT_STATE, /* Waiting for reads to fill in holes */
+ PACKET_WRITE_WAIT_STATE, /* Waiting for the write to complete */
+ PACKET_RECOVERY_STATE, /* Recover after read/write errors */
+ PACKET_FINISHED_STATE, /* After write has finished */
+
+ PACKET_NUM_STATES /* Number of possible states */
+};
+
+/*
+ * Information needed for writing a single packet
+ */
+struct pktcdvd_device;
+
+struct packet_data
+{
+ struct list_head list;
+
+ spinlock_t lock; /* Lock protecting state transitions and */
+ /* orig_bios list */
+
+ struct bio_list orig_bios; /* Original bios passed to pkt_make_request */
+ /* that will be handled by this packet */
+ int write_size; /* Total size of all bios in the orig_bios */
+ /* list, measured in number of frames */
+
+ struct bio *w_bio; /* The bio we will send to the real CD */
+ /* device once we have all data for the */
+ /* packet we are going to write */
+ sector_t sector; /* First sector in this packet */
+ int frames; /* Number of frames in this packet */
+
+ enum packet_data_state state; /* Current state */
+ atomic_t run_sm; /* Incremented whenever the state */
+ /* machine needs to be run */
+ long sleep_time; /* Set this to non-zero to make the state */
+ /* machine run after this many jiffies. */
+
+ atomic_t io_wait; /* Number of pending IO operations */
+ atomic_t io_errors; /* Number of read/write errors during IO */
+
+ struct bio *r_bios[PACKET_MAX_SIZE]; /* bios to use during data gathering */
+ struct page *pages[PACKET_MAX_SIZE / FRAMES_PER_PAGE];
+
+ int cache_valid; /* If non-zero, the data for the zone defined */
+ /* by the sector variable is completely cached */
+ /* in the pages[] vector. */
+
+ int id; /* ID number for debugging */
+ struct pktcdvd_device *pd;
+};
+
+struct pkt_rb_node {
+ struct rb_node rb_node;
+ struct bio *bio;
+};
+
+struct packet_stacked_data
+{
+ struct bio *bio; /* Original read request bio */
+ struct pktcdvd_device *pd;
+};
+#define PSD_POOL_SIZE 64
+
+struct pktcdvd_kobj
+{
+ struct kobject kobj;
+ struct pktcdvd_device *pd;
+};
+#define to_pktcdvdkobj(_k) \
+ ((struct pktcdvd_kobj*)container_of(_k,struct pktcdvd_kobj,kobj))
+
+struct pktcdvd_device
+{
+ struct block_device *bdev; /* dev attached */
+ dev_t pkt_dev; /* our dev */
+ char name[20];
+ struct packet_settings settings;
+ struct packet_stats stats;
+ int refcnt; /* Open count */
+ int write_speed; /* current write speed, kB/s */
+ int read_speed; /* current read speed, kB/s */
+ unsigned long offset; /* start offset */
+ __u8 mode_offset; /* 0 / 8 */
+ __u8 type;
+ unsigned long flags;
+ __u16 mmc3_profile;
+ __u32 nwa; /* next writable address */
+ __u32 lra; /* last recorded address */
+ struct packet_cdrw cdrw;
+ wait_queue_head_t wqueue;
+
+ spinlock_t lock; /* Serialize access to bio_queue */
+ struct rb_root bio_queue; /* Work queue of bios we need to handle */
+ int bio_queue_size; /* Number of nodes in bio_queue */
+ sector_t current_sector; /* Keep track of where the elevator is */
+ atomic_t scan_queue; /* Set to non-zero when pkt_handle_queue */
+ /* needs to be run. */
+ mempool_t *rb_pool; /* mempool for pkt_rb_node allocations */
+
+ struct packet_iosched iosched;
+ struct gendisk *disk;
+
+ int write_congestion_off;
+ int write_congestion_on;
+
+ struct device *dev; /* sysfs pktcdvd[0-7] dev */
+ struct pktcdvd_kobj *kobj_stat; /* sysfs pktcdvd[0-7]/stat/ */
+ struct pktcdvd_kobj *kobj_wqueue; /* sysfs pktcdvd[0-7]/write_queue/ */
+
+ struct dentry *dfs_d_root; /* debugfs: devname directory */
+ struct dentry *dfs_f_info; /* debugfs: info file */
+};
+
+#endif /* __PKTCDVD_H */
diff --git a/include/linux/pl320-ipc.h b/include/linux/pl320-ipc.h
new file mode 100644
index 000000000..5161f63ec
--- /dev/null
+++ b/include/linux/pl320-ipc.h
@@ -0,0 +1,17 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+int pl320_ipc_transmit(u32 *data);
+int pl320_ipc_register_notifier(struct notifier_block *nb);
+int pl320_ipc_unregister_notifier(struct notifier_block *nb);
diff --git a/include/linux/platform_data/ad5449.h b/include/linux/platform_data/ad5449.h
new file mode 100644
index 000000000..bd712bd4b
--- /dev/null
+++ b/include/linux/platform_data/ad5449.h
@@ -0,0 +1,40 @@
+/*
+ * AD5415, AD5426, AD5429, AD5432, AD5439, AD5443, AD5449 Digital to Analog
+ * Converter driver.
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_AD5449_H__
+#define __LINUX_PLATFORM_DATA_AD5449_H__
+
+/**
+ * enum ad5449_sdo_mode - AD5449 SDO pin configuration
+ * @AD5449_SDO_DRIVE_FULL: Drive the SDO pin with full strength.
+ * @AD5449_SDO_DRIVE_WEAK: Drive the SDO pin with not full strength.
+ * @AD5449_SDO_OPEN_DRAIN: Operate the SDO pin in open-drain mode.
+ * @AD5449_SDO_DISABLED: Disable the SDO pin, in this mode it is not possible to
+ * read back from the device.
+ */
+enum ad5449_sdo_mode {
+ AD5449_SDO_DRIVE_FULL = 0x0,
+ AD5449_SDO_DRIVE_WEAK = 0x1,
+ AD5449_SDO_OPEN_DRAIN = 0x2,
+ AD5449_SDO_DISABLED = 0x3,
+};
+
+/**
+ * struct ad5449_platform_data - Platform data for the ad5449 DAC driver
+ * @sdo_mode: SDO pin mode
+ * @hardware_clear_to_midscale: Whether asserting the hardware CLR pin sets the
+ * outputs to midscale (true) or to zero scale(false).
+ */
+struct ad5449_platform_data {
+ enum ad5449_sdo_mode sdo_mode;
+ bool hardware_clear_to_midscale;
+};
+
+#endif
diff --git a/include/linux/platform_data/ad5755.h b/include/linux/platform_data/ad5755.h
new file mode 100644
index 000000000..a5a1cb751
--- /dev/null
+++ b/include/linux/platform_data/ad5755.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+#ifndef __LINUX_PLATFORM_DATA_AD5755_H__
+#define __LINUX_PLATFORM_DATA_AD5755_H__
+
+enum ad5755_mode {
+ AD5755_MODE_VOLTAGE_0V_5V = 0,
+ AD5755_MODE_VOLTAGE_0V_10V = 1,
+ AD5755_MODE_VOLTAGE_PLUSMINUS_5V = 2,
+ AD5755_MODE_VOLTAGE_PLUSMINUS_10V = 3,
+ AD5755_MODE_CURRENT_4mA_20mA = 4,
+ AD5755_MODE_CURRENT_0mA_20mA = 5,
+ AD5755_MODE_CURRENT_0mA_24mA = 6,
+};
+
+enum ad5755_dc_dc_phase {
+ AD5755_DC_DC_PHASE_ALL_SAME_EDGE = 0,
+ AD5755_DC_DC_PHASE_A_B_SAME_EDGE_C_D_OPP_EDGE = 1,
+ AD5755_DC_DC_PHASE_A_C_SAME_EDGE_B_D_OPP_EDGE = 2,
+ AD5755_DC_DC_PHASE_90_DEGREE = 3,
+};
+
+enum ad5755_dc_dc_freq {
+ AD5755_DC_DC_FREQ_250kHZ = 0,
+ AD5755_DC_DC_FREQ_410kHZ = 1,
+ AD5755_DC_DC_FREQ_650kHZ = 2,
+};
+
+enum ad5755_dc_dc_maxv {
+ AD5755_DC_DC_MAXV_23V = 0,
+ AD5755_DC_DC_MAXV_24V5 = 1,
+ AD5755_DC_DC_MAXV_27V = 2,
+ AD5755_DC_DC_MAXV_29V5 = 3,
+};
+
+enum ad5755_slew_rate {
+ AD5755_SLEW_RATE_64k = 0,
+ AD5755_SLEW_RATE_32k = 1,
+ AD5755_SLEW_RATE_16k = 2,
+ AD5755_SLEW_RATE_8k = 3,
+ AD5755_SLEW_RATE_4k = 4,
+ AD5755_SLEW_RATE_2k = 5,
+ AD5755_SLEW_RATE_1k = 6,
+ AD5755_SLEW_RATE_500 = 7,
+ AD5755_SLEW_RATE_250 = 8,
+ AD5755_SLEW_RATE_125 = 9,
+ AD5755_SLEW_RATE_64 = 10,
+ AD5755_SLEW_RATE_32 = 11,
+ AD5755_SLEW_RATE_16 = 12,
+ AD5755_SLEW_RATE_8 = 13,
+ AD5755_SLEW_RATE_4 = 14,
+ AD5755_SLEW_RATE_0_5 = 15,
+};
+
+enum ad5755_slew_step_size {
+ AD5755_SLEW_STEP_SIZE_1 = 0,
+ AD5755_SLEW_STEP_SIZE_2 = 1,
+ AD5755_SLEW_STEP_SIZE_4 = 2,
+ AD5755_SLEW_STEP_SIZE_8 = 3,
+ AD5755_SLEW_STEP_SIZE_16 = 4,
+ AD5755_SLEW_STEP_SIZE_32 = 5,
+ AD5755_SLEW_STEP_SIZE_64 = 6,
+ AD5755_SLEW_STEP_SIZE_128 = 7,
+ AD5755_SLEW_STEP_SIZE_256 = 8,
+};
+
+/**
+ * struct ad5755_platform_data - AD5755 DAC driver platform data
+ * @ext_dc_dc_compenstation_resistor: Whether an external DC-DC converter
+ * compensation register is used.
+ * @dc_dc_phase: DC-DC converter phase.
+ * @dc_dc_freq: DC-DC converter frequency.
+ * @dc_dc_maxv: DC-DC maximum allowed boost voltage.
+ * @dac.mode: The mode to be used for the DAC output.
+ * @dac.ext_current_sense_resistor: Whether an external current sense resistor
+ * is used.
+ * @dac.enable_voltage_overrange: Whether to enable 20% voltage output overrange.
+ * @dac.slew.enable: Whether to enable digital slew.
+ * @dac.slew.rate: Slew rate of the digital slew.
+ * @dac.slew.step_size: Slew step size of the digital slew.
+ **/
+struct ad5755_platform_data {
+ bool ext_dc_dc_compenstation_resistor;
+ enum ad5755_dc_dc_phase dc_dc_phase;
+ enum ad5755_dc_dc_freq dc_dc_freq;
+ enum ad5755_dc_dc_maxv dc_dc_maxv;
+
+ struct {
+ enum ad5755_mode mode;
+ bool ext_current_sense_resistor;
+ bool enable_voltage_overrange;
+ struct {
+ bool enable;
+ enum ad5755_slew_rate rate;
+ enum ad5755_slew_step_size step_size;
+ } slew;
+ } dac[4];
+};
+
+#endif
diff --git a/include/linux/platform_data/ad7266.h b/include/linux/platform_data/ad7266.h
new file mode 100644
index 000000000..eabfdcb26
--- /dev/null
+++ b/include/linux/platform_data/ad7266.h
@@ -0,0 +1,54 @@
+/*
+ * AD7266/65 SPI ADC driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __IIO_ADC_AD7266_H__
+#define __IIO_ADC_AD7266_H__
+
+/**
+ * enum ad7266_range - AD7266 reference voltage range
+ * @AD7266_RANGE_VREF: Device is configured for input range 0V - VREF
+ * (RANGE pin set to low)
+ * @AD7266_RANGE_2VREF: Device is configured for input range 0V - 2VREF
+ * (RANGE pin set to high)
+ */
+enum ad7266_range {
+ AD7266_RANGE_VREF,
+ AD7266_RANGE_2VREF,
+};
+
+/**
+ * enum ad7266_mode - AD7266 sample mode
+ * @AD7266_MODE_DIFF: Device is configured for full differential mode
+ * (SGL/DIFF pin set to low, AD0 pin set to low)
+ * @AD7266_MODE_PSEUDO_DIFF: Device is configured for pseudo differential mode
+ * (SGL/DIFF pin set to low, AD0 pin set to high)
+ * @AD7266_MODE_SINGLE_ENDED: Device is configured for single-ended mode
+ * (SGL/DIFF pin set to high)
+ */
+enum ad7266_mode {
+ AD7266_MODE_DIFF,
+ AD7266_MODE_PSEUDO_DIFF,
+ AD7266_MODE_SINGLE_ENDED,
+};
+
+/**
+ * struct ad7266_platform_data - Platform data for the AD7266 driver
+ * @range: Reference voltage range the device is configured for
+ * @mode: Sample mode the device is configured for
+ * @fixed_addr: Whether the address pins are hard-wired
+ * @addr_gpios: GPIOs used for controlling the address pins, only used if
+ * fixed_addr is set to false.
+ */
+struct ad7266_platform_data {
+ enum ad7266_range range;
+ enum ad7266_mode mode;
+ bool fixed_addr;
+ unsigned int addr_gpios[3];
+};
+
+#endif
diff --git a/include/linux/platform_data/ad7291.h b/include/linux/platform_data/ad7291.h
new file mode 100644
index 000000000..bbd89fa51
--- /dev/null
+++ b/include/linux/platform_data/ad7291.h
@@ -0,0 +1,12 @@
+#ifndef __IIO_AD7291_H__
+#define __IIO_AD7291_H__
+
+/**
+ * struct ad7291_platform_data - AD7291 platform data
+ * @use_external_ref: Whether to use an external or internal reference voltage
+ */
+struct ad7291_platform_data {
+ bool use_external_ref;
+};
+
+#endif
diff --git a/include/linux/platform_data/ad7298.h b/include/linux/platform_data/ad7298.h
new file mode 100644
index 000000000..fbf8adf13
--- /dev/null
+++ b/include/linux/platform_data/ad7298.h
@@ -0,0 +1,20 @@
+/*
+ * AD7298 SPI ADC driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_AD7298_H__
+#define __LINUX_PLATFORM_DATA_AD7298_H__
+
+/**
+ * struct ad7298_platform_data - Platform data for the ad7298 ADC driver
+ * @ext_ref: Whether to use an external reference voltage.
+ **/
+struct ad7298_platform_data {
+ bool ext_ref;
+};
+
+#endif /* IIO_ADC_AD7298_H_ */
diff --git a/include/linux/platform_data/ad7303.h b/include/linux/platform_data/ad7303.h
new file mode 100644
index 000000000..de6a7a6b4
--- /dev/null
+++ b/include/linux/platform_data/ad7303.h
@@ -0,0 +1,21 @@
+/*
+ * Analog Devices AD7303 DAC driver
+ *
+ * Copyright 2013 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __IIO_ADC_AD7303_H__
+#define __IIO_ADC_AD7303_H__
+
+/**
+ * struct ad7303_platform_data - AD7303 platform data
+ * @use_external_ref: If set to true use an external voltage reference connected
+ * to the REF pin, otherwise use the internal reference derived from Vdd.
+ */
+struct ad7303_platform_data {
+ bool use_external_ref;
+};
+
+#endif
diff --git a/include/linux/platform_data/ad7791.h b/include/linux/platform_data/ad7791.h
new file mode 100644
index 000000000..f9e4db1b8
--- /dev/null
+++ b/include/linux/platform_data/ad7791.h
@@ -0,0 +1,17 @@
+#ifndef __LINUX_PLATFORM_DATA_AD7791__
+#define __LINUX_PLATFORM_DATA_AD7791__
+
+/**
+ * struct ad7791_platform_data - AD7791 device platform data
+ * @buffered: If set to true configure the device for buffered input mode.
+ * @burnout_current: If set to true the 100mA burnout current is enabled.
+ * @unipolar: If set to true sample in unipolar mode, if set to false sample in
+ * bipolar mode.
+ */
+struct ad7791_platform_data {
+ bool buffered;
+ bool burnout_current;
+ bool unipolar;
+};
+
+#endif
diff --git a/include/linux/platform_data/ad7793.h b/include/linux/platform_data/ad7793.h
new file mode 100644
index 000000000..7ea6751aa
--- /dev/null
+++ b/include/linux/platform_data/ad7793.h
@@ -0,0 +1,112 @@
+/*
+ * AD7792/AD7793 SPI ADC driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+#ifndef __LINUX_PLATFORM_DATA_AD7793_H__
+#define __LINUX_PLATFORM_DATA_AD7793_H__
+
+/**
+ * enum ad7793_clock_source - AD7793 clock source selection
+ * @AD7793_CLK_SRC_INT: Internal 64 kHz clock, not available at the CLK pin.
+ * @AD7793_CLK_SRC_INT_CO: Internal 64 kHz clock, available at the CLK pin.
+ * @AD7793_CLK_SRC_EXT: Use external clock.
+ * @AD7793_CLK_SRC_EXT_DIV2: Use external clock divided by 2.
+ */
+enum ad7793_clock_source {
+ AD7793_CLK_SRC_INT,
+ AD7793_CLK_SRC_INT_CO,
+ AD7793_CLK_SRC_EXT,
+ AD7793_CLK_SRC_EXT_DIV2,
+};
+
+/**
+ * enum ad7793_bias_voltage - AD7793 bias voltage selection
+ * @AD7793_BIAS_VOLTAGE_DISABLED: Bias voltage generator disabled
+ * @AD7793_BIAS_VOLTAGE_AIN1: Bias voltage connected to AIN1(-).
+ * @AD7793_BIAS_VOLTAGE_AIN2: Bias voltage connected to AIN2(-).
+ * @AD7793_BIAS_VOLTAGE_AIN3: Bias voltage connected to AIN3(-).
+ * Only valid for AD7795/AD7796.
+ */
+enum ad7793_bias_voltage {
+ AD7793_BIAS_VOLTAGE_DISABLED,
+ AD7793_BIAS_VOLTAGE_AIN1,
+ AD7793_BIAS_VOLTAGE_AIN2,
+ AD7793_BIAS_VOLTAGE_AIN3,
+};
+
+/**
+ * enum ad7793_refsel - AD7793 reference voltage selection
+ * @AD7793_REFSEL_REFIN1: External reference applied between REFIN1(+)
+ * and REFIN1(-).
+ * @AD7793_REFSEL_REFIN2: External reference applied between REFIN2(+) and
+ * and REFIN1(-). Only valid for AD7795/AD7796.
+ * @AD7793_REFSEL_INTERNAL: Internal 1.17 V reference.
+ */
+enum ad7793_refsel {
+ AD7793_REFSEL_REFIN1 = 0,
+ AD7793_REFSEL_REFIN2 = 1,
+ AD7793_REFSEL_INTERNAL = 2,
+};
+
+/**
+ * enum ad7793_current_source_direction - AD7793 excitation current direction
+ * @AD7793_IEXEC1_IOUT1_IEXEC2_IOUT2: Current source IEXC1 connected to pin
+ * IOUT1, current source IEXC2 connected to pin IOUT2.
+ * @AD7793_IEXEC1_IOUT2_IEXEC2_IOUT1: Current source IEXC2 connected to pin
+ * IOUT1, current source IEXC1 connected to pin IOUT2.
+ * @AD7793_IEXEC1_IEXEC2_IOUT1: Both current sources connected to pin IOUT1.
+ * Only valid when the current sources are set to 10 uA or 210 uA.
+ * @AD7793_IEXEC1_IEXEC2_IOUT2: Both current sources connected to Pin IOUT2.
+ * Only valid when the current ources are set to 10 uA or 210 uA.
+ */
+enum ad7793_current_source_direction {
+ AD7793_IEXEC1_IOUT1_IEXEC2_IOUT2 = 0,
+ AD7793_IEXEC1_IOUT2_IEXEC2_IOUT1 = 1,
+ AD7793_IEXEC1_IEXEC2_IOUT1 = 2,
+ AD7793_IEXEC1_IEXEC2_IOUT2 = 3,
+};
+
+/**
+ * enum ad7793_excitation_current - AD7793 excitation current selection
+ * @AD7793_IX_DISABLED: Excitation current Disabled.
+ * @AD7793_IX_10uA: Enable 10 micro-ampere excitation current.
+ * @AD7793_IX_210uA: Enable 210 micro-ampere excitation current.
+ * @AD7793_IX_1mA: Enable 1 milli-Ampere excitation current.
+ */
+enum ad7793_excitation_current {
+ AD7793_IX_DISABLED = 0,
+ AD7793_IX_10uA = 1,
+ AD7793_IX_210uA = 2,
+ AD7793_IX_1mA = 3,
+};
+
+/**
+ * struct ad7793_platform_data - AD7793 platform data
+ * @clock_src: Clock source selection
+ * @burnout_current: If set to true the 100nA burnout current is enabled.
+ * @boost_enable: Enable boost for the bias voltage generator.
+ * @buffered: If set to true configure the device for buffered input mode.
+ * @unipolar: If set to true sample in unipolar mode, if set to false sample in
+ * bipolar mode.
+ * @refsel: Reference voltage selection
+ * @bias_voltage: Bias voltage selection
+ * @exitation_current: Excitation current selection
+ * @current_source_direction: Excitation current direction selection
+ */
+struct ad7793_platform_data {
+ enum ad7793_clock_source clock_src;
+ bool burnout_current;
+ bool boost_enable;
+ bool buffered;
+ bool unipolar;
+
+ enum ad7793_refsel refsel;
+ enum ad7793_bias_voltage bias_voltage;
+ enum ad7793_excitation_current exitation_current;
+ enum ad7793_current_source_direction current_source_direction;
+};
+
+#endif /* IIO_ADC_AD7793_H_ */
diff --git a/include/linux/platform_data/ad7887.h b/include/linux/platform_data/ad7887.h
new file mode 100644
index 000000000..1e06eac31
--- /dev/null
+++ b/include/linux/platform_data/ad7887.h
@@ -0,0 +1,26 @@
+/*
+ * AD7887 SPI ADC driver
+ *
+ * Copyright 2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+#ifndef IIO_ADC_AD7887_H_
+#define IIO_ADC_AD7887_H_
+
+/**
+ * struct ad7887_platform_data - AD7887 ADC driver platform data
+ * @en_dual: Whether to use dual channel mode. If set to true AIN1 becomes the
+ * second input channel, and Vref is internally connected to Vdd. If set to
+ * false the device is used in single channel mode and AIN1/Vref is used as
+ * VREF input.
+ * @use_onchip_ref: Whether to use the onchip reference. If set to true the
+ * internal 2.5V reference is used. If set to false a external reference is
+ * used.
+ */
+struct ad7887_platform_data {
+ bool en_dual;
+ bool use_onchip_ref;
+};
+
+#endif /* IIO_ADC_AD7887_H_ */
diff --git a/include/linux/platform_data/adau17x1.h b/include/linux/platform_data/adau17x1.h
new file mode 100644
index 000000000..a81766cae
--- /dev/null
+++ b/include/linux/platform_data/adau17x1.h
@@ -0,0 +1,109 @@
+/*
+ * Driver for ADAU1761/ADAU1461/ADAU1761/ADAU1961/ADAU1781/ADAU1781 codecs
+ *
+ * Copyright 2011-2014 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_ADAU17X1_H__
+#define __LINUX_PLATFORM_DATA_ADAU17X1_H__
+
+/**
+ * enum adau17x1_micbias_voltage - Microphone bias voltage
+ * @ADAU17X1_MICBIAS_0_90_AVDD: 0.9 * AVDD
+ * @ADAU17X1_MICBIAS_0_65_AVDD: 0.65 * AVDD
+ */
+enum adau17x1_micbias_voltage {
+ ADAU17X1_MICBIAS_0_90_AVDD = 0,
+ ADAU17X1_MICBIAS_0_65_AVDD = 1,
+};
+
+/**
+ * enum adau1761_digmic_jackdet_pin_mode - Configuration of the JACKDET/MICIN pin
+ * @ADAU1761_DIGMIC_JACKDET_PIN_MODE_NONE: Disable the pin
+ * @ADAU1761_DIGMIC_JACKDET_PIN_MODE_DIGMIC: Configure the pin for usage as
+ * digital microphone input.
+ * @ADAU1761_DIGMIC_JACKDET_PIN_MODE_JACKDETECT: Configure the pin for jack
+ * insertion detection.
+ */
+enum adau1761_digmic_jackdet_pin_mode {
+ ADAU1761_DIGMIC_JACKDET_PIN_MODE_NONE,
+ ADAU1761_DIGMIC_JACKDET_PIN_MODE_DIGMIC,
+ ADAU1761_DIGMIC_JACKDET_PIN_MODE_JACKDETECT,
+};
+
+/**
+ * adau1761_jackdetect_debounce_time - Jack insertion detection debounce time
+ * @ADAU1761_JACKDETECT_DEBOUNCE_5MS: 5 milliseconds
+ * @ADAU1761_JACKDETECT_DEBOUNCE_10MS: 10 milliseconds
+ * @ADAU1761_JACKDETECT_DEBOUNCE_20MS: 20 milliseconds
+ * @ADAU1761_JACKDETECT_DEBOUNCE_40MS: 40 milliseconds
+ */
+enum adau1761_jackdetect_debounce_time {
+ ADAU1761_JACKDETECT_DEBOUNCE_5MS = 0,
+ ADAU1761_JACKDETECT_DEBOUNCE_10MS = 1,
+ ADAU1761_JACKDETECT_DEBOUNCE_20MS = 2,
+ ADAU1761_JACKDETECT_DEBOUNCE_40MS = 3,
+};
+
+/**
+ * enum adau1761_output_mode - Output mode configuration
+ * @ADAU1761_OUTPUT_MODE_HEADPHONE: Headphone output
+ * @ADAU1761_OUTPUT_MODE_HEADPHONE_CAPLESS: Capless headphone output
+ * @ADAU1761_OUTPUT_MODE_LINE: Line output
+ */
+enum adau1761_output_mode {
+ ADAU1761_OUTPUT_MODE_HEADPHONE,
+ ADAU1761_OUTPUT_MODE_HEADPHONE_CAPLESS,
+ ADAU1761_OUTPUT_MODE_LINE,
+};
+
+/**
+ * struct adau1761_platform_data - ADAU1761 Codec driver platform data
+ * @input_differential: If true the input pins will be configured in
+ * differential mode.
+ * @lineout_mode: Output mode for the LOUT/ROUT pins
+ * @headphone_mode: Output mode for the LHP/RHP pins
+ * @digmic_jackdetect_pin_mode: JACKDET/MICIN pin configuration
+ * @jackdetect_debounce_time: Jack insertion detection debounce time.
+ * Note: This value will only be used, if the JACKDET/MICIN pin is configured
+ * for jack insertion detection.
+ * @jackdetect_active_low: If true the jack insertion detection is active low.
+ * Othwise it will be active high.
+ * @micbias_voltage: Microphone voltage bias
+ */
+struct adau1761_platform_data {
+ bool input_differential;
+ enum adau1761_output_mode lineout_mode;
+ enum adau1761_output_mode headphone_mode;
+
+ enum adau1761_digmic_jackdet_pin_mode digmic_jackdetect_pin_mode;
+
+ enum adau1761_jackdetect_debounce_time jackdetect_debounce_time;
+ bool jackdetect_active_low;
+
+ enum adau17x1_micbias_voltage micbias_voltage;
+};
+
+/**
+ * struct adau1781_platform_data - ADAU1781 Codec driver platform data
+ * @left_input_differential: If true configure the left input as
+ * differential input.
+ * @right_input_differential: If true configure the right input as differntial
+ * input.
+ * @use_dmic: If true configure the MIC pins as digital microphone pins instead
+ * of analog microphone pins.
+ * @micbias_voltage: Microphone voltage bias
+ */
+struct adau1781_platform_data {
+ bool left_input_differential;
+ bool right_input_differential;
+
+ bool use_dmic;
+
+ enum adau17x1_micbias_voltage micbias_voltage;
+};
+
+#endif
diff --git a/include/linux/platform_data/adau1977.h b/include/linux/platform_data/adau1977.h
new file mode 100644
index 000000000..bed11d908
--- /dev/null
+++ b/include/linux/platform_data/adau1977.h
@@ -0,0 +1,45 @@
+/*
+ * ADAU1977/ADAU1978/ADAU1979 driver
+ *
+ * Copyright 2014 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_ADAU1977_H__
+#define __LINUX_PLATFORM_DATA_ADAU1977_H__
+
+/**
+ * enum adau1977_micbias - ADAU1977 MICBIAS pin voltage setting
+ * @ADAU1977_MICBIAS_5V0: MICBIAS is set to 5.0 V
+ * @ADAU1977_MICBIAS_5V5: MICBIAS is set to 5.5 V
+ * @ADAU1977_MICBIAS_6V0: MICBIAS is set to 6.0 V
+ * @ADAU1977_MICBIAS_6V5: MICBIAS is set to 6.5 V
+ * @ADAU1977_MICBIAS_7V0: MICBIAS is set to 7.0 V
+ * @ADAU1977_MICBIAS_7V5: MICBIAS is set to 7.5 V
+ * @ADAU1977_MICBIAS_8V0: MICBIAS is set to 8.0 V
+ * @ADAU1977_MICBIAS_8V5: MICBIAS is set to 8.5 V
+ * @ADAU1977_MICBIAS_9V0: MICBIAS is set to 9.0 V
+ */
+enum adau1977_micbias {
+ ADAU1977_MICBIAS_5V0 = 0x0,
+ ADAU1977_MICBIAS_5V5 = 0x1,
+ ADAU1977_MICBIAS_6V0 = 0x2,
+ ADAU1977_MICBIAS_6V5 = 0x3,
+ ADAU1977_MICBIAS_7V0 = 0x4,
+ ADAU1977_MICBIAS_7V5 = 0x5,
+ ADAU1977_MICBIAS_8V0 = 0x6,
+ ADAU1977_MICBIAS_8V5 = 0x7,
+ ADAU1977_MICBIAS_9V0 = 0x8,
+};
+
+/**
+ * struct adau1977_platform_data - Platform configuration data for the ADAU1977
+ * @micbias: Specifies the voltage for the MICBIAS pin
+ */
+struct adau1977_platform_data {
+ enum adau1977_micbias micbias;
+};
+
+#endif
diff --git a/include/linux/platform_data/ads7828.h b/include/linux/platform_data/ads7828.h
new file mode 100644
index 000000000..3245f45f9
--- /dev/null
+++ b/include/linux/platform_data/ads7828.h
@@ -0,0 +1,29 @@
+/*
+ * TI ADS7828 A/D Converter platform data definition
+ *
+ * Copyright (c) 2012 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * For further information, see the Documentation/hwmon/ads7828 file.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _PDATA_ADS7828_H
+#define _PDATA_ADS7828_H
+
+/**
+ * struct ads7828_platform_data - optional ADS7828 connectivity info
+ * @diff_input: Differential input mode.
+ * @ext_vref: Use an external voltage reference.
+ * @vref_mv: Voltage reference value, if external.
+ */
+struct ads7828_platform_data {
+ bool diff_input;
+ bool ext_vref;
+ unsigned int vref_mv;
+};
+
+#endif /* _PDATA_ADS7828_H */
diff --git a/include/linux/platform_data/arm-ux500-pm.h b/include/linux/platform_data/arm-ux500-pm.h
new file mode 100644
index 000000000..8dff64b29
--- /dev/null
+++ b/include/linux/platform_data/arm-ux500-pm.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010-2013
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com> for
+ * ST-Ericsson.
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org> for Linaro.
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ */
+
+#ifndef ARM_UX500_PM_H
+#define ARM_UX500_PM_H
+
+int prcmu_gic_decouple(void);
+int prcmu_gic_recouple(void);
+bool prcmu_gic_pending_irq(void);
+bool prcmu_pending_irq(void);
+bool prcmu_is_cpu_in_wfi(int cpu);
+int prcmu_copy_gic_settings(void);
+void ux500_pm_init(u32 phy_base, u32 size);
+
+#endif /* ARM_UX500_PM_H */
diff --git a/include/linux/platform_data/asoc-imx-ssi.h b/include/linux/platform_data/asoc-imx-ssi.h
new file mode 100644
index 000000000..92c7fd72f
--- /dev/null
+++ b/include/linux/platform_data/asoc-imx-ssi.h
@@ -0,0 +1,23 @@
+#ifndef __MACH_SSI_H
+#define __MACH_SSI_H
+
+struct snd_ac97;
+
+extern unsigned char imx_ssi_fiq_start, imx_ssi_fiq_end;
+extern unsigned long imx_ssi_fiq_base, imx_ssi_fiq_tx_buffer, imx_ssi_fiq_rx_buffer;
+
+struct imx_ssi_platform_data {
+ unsigned int flags;
+#define IMX_SSI_DMA (1 << 0)
+#define IMX_SSI_USE_AC97 (1 << 1)
+#define IMX_SSI_NET (1 << 2)
+#define IMX_SSI_SYN (1 << 3)
+#define IMX_SSI_USE_I2S_SLAVE (1 << 4)
+ void (*ac97_reset) (struct snd_ac97 *ac97);
+ void (*ac97_warm_reset)(struct snd_ac97 *ac97);
+};
+
+extern int mxc_set_irq_fiq(unsigned int irq, unsigned int type);
+
+#endif /* __MACH_SSI_H */
+
diff --git a/include/linux/platform_data/asoc-kirkwood.h b/include/linux/platform_data/asoc-kirkwood.h
new file mode 100644
index 000000000..d6a55bd2e
--- /dev/null
+++ b/include/linux/platform_data/asoc-kirkwood.h
@@ -0,0 +1,7 @@
+#ifndef __PLAT_AUDIO_H
+#define __PLAT_AUDIO_H
+
+struct kirkwood_asoc_platform_data {
+ int burst;
+};
+#endif
diff --git a/include/linux/platform_data/asoc-mx27vis.h b/include/linux/platform_data/asoc-mx27vis.h
new file mode 100644
index 000000000..409adcd04
--- /dev/null
+++ b/include/linux/platform_data/asoc-mx27vis.h
@@ -0,0 +1,11 @@
+#ifndef __PLATFORM_DATA_ASOC_MX27VIS_H
+#define __PLATFORM_DATA_ASOC_MX27VIS_H
+
+struct snd_mx27vis_platform_data {
+ int amp_gain0_gpio;
+ int amp_gain1_gpio;
+ int amp_mutel_gpio;
+ int amp_muter_gpio;
+};
+
+#endif /* __PLATFORM_DATA_ASOC_MX27VIS_H */
diff --git a/include/linux/platform_data/asoc-palm27x.h b/include/linux/platform_data/asoc-palm27x.h
new file mode 100644
index 000000000..58afb30d5
--- /dev/null
+++ b/include/linux/platform_data/asoc-palm27x.h
@@ -0,0 +1,8 @@
+#ifndef _INCLUDE_PALMASOC_H_
+#define _INCLUDE_PALMASOC_H_
+
+struct palm27x_asoc_info {
+ int jack_gpio;
+};
+
+#endif
diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h
new file mode 100644
index 000000000..5e0bc779e
--- /dev/null
+++ b/include/linux/platform_data/asoc-s3c.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2009 Samsung Electronics Co. Ltd
+ * Author: Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* The machine init code calls s3c*_ac97_setup_gpio with
+ * one of these defines in order to select appropriate bank
+ * of GPIO for AC97 pins
+ */
+#define S3C64XX_AC97_GPD 0
+#define S3C64XX_AC97_GPE 1
+extern void s3c64xx_ac97_setup_gpio(int);
+
+struct samsung_i2s {
+/* If the Primary DAI has 5.1 Channels */
+#define QUIRK_PRI_6CHAN (1 << 0)
+/* If the I2S block has a Stereo Overlay Channel */
+#define QUIRK_SEC_DAI (1 << 1)
+/*
+ * If the I2S block has no internal prescalar or MUX (I2SMOD[10] bit)
+ * The Machine driver must provide suitably set clock to the I2S block.
+ */
+#define QUIRK_NO_MUXPSR (1 << 2)
+#define QUIRK_NEED_RSTCLR (1 << 3)
+#define QUIRK_SUPPORTS_TDM (1 << 4)
+#define QUIRK_SUPPORTS_IDMA (1 << 5)
+ /* Quirks of the I2S controller */
+ u32 quirks;
+ dma_addr_t idma_addr;
+};
+
+/**
+ * struct s3c_audio_pdata - common platform data for audio device drivers
+ * @cfg_gpio: Callback function to setup mux'ed pins in I2S/PCM/AC97 mode
+ */
+struct s3c_audio_pdata {
+ int (*cfg_gpio)(struct platform_device *);
+ union {
+ struct samsung_i2s i2s;
+ } type;
+};
diff --git a/include/linux/platform_data/asoc-s3c24xx_simtec.h b/include/linux/platform_data/asoc-s3c24xx_simtec.h
new file mode 100644
index 000000000..d220e5412
--- /dev/null
+++ b/include/linux/platform_data/asoc-s3c24xx_simtec.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Simtec Audio support.
+*/
+
+/**
+ * struct s3c24xx_audio_simtec_pdata - platform data for simtec audio
+ * @use_mpllin: Select codec clock from MPLLin
+ * @output_cdclk: Need to output CDCLK to the codec
+ * @have_mic: Set if we have a MIC socket
+ * @have_lout: Set if we have a LineOut socket
+ * @amp_gpio: GPIO pin to enable the AMP
+ * @amp_gain: Option GPIO to control AMP gain
+ */
+struct s3c24xx_audio_simtec_pdata {
+ unsigned int use_mpllin:1;
+ unsigned int output_cdclk:1;
+
+ unsigned int have_mic:1;
+ unsigned int have_lout:1;
+
+ int amp_gpio;
+ int amp_gain[2];
+
+ void (*startup)(void);
+};
diff --git a/include/linux/platform_data/asoc-ti-mcbsp.h b/include/linux/platform_data/asoc-ti-mcbsp.h
new file mode 100644
index 000000000..3c73c045f
--- /dev/null
+++ b/include/linux/platform_data/asoc-ti-mcbsp.h
@@ -0,0 +1,58 @@
+/*
+ * Defines for Multi-Channel Buffered Serial Port
+ *
+ * Copyright (C) 2002 RidgeRun, Inc.
+ * Author: Steve Johnson
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __ASOC_TI_MCBSP_H
+#define __ASOC_TI_MCBSP_H
+
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+
+#define MCBSP_CONFIG_TYPE2 0x2
+#define MCBSP_CONFIG_TYPE3 0x3
+#define MCBSP_CONFIG_TYPE4 0x4
+
+/* Platform specific configuration */
+struct omap_mcbsp_ops {
+ void (*request)(unsigned int);
+ void (*free)(unsigned int);
+};
+
+struct omap_mcbsp_platform_data {
+ struct omap_mcbsp_ops *ops;
+ u16 buffer_size;
+ u8 reg_size;
+ u8 reg_step;
+
+ /* McBSP platform and instance specific features */
+ bool has_wakeup; /* Wakeup capability */
+ bool has_ccr; /* Transceiver has configuration control registers */
+ int (*enable_st_clock)(unsigned int, bool);
+};
+
+/**
+ * omap_mcbsp_dev_attr - OMAP McBSP device attributes for omap_hwmod
+ * @sidetone: name of the sidetone device
+ */
+struct omap_mcbsp_dev_attr {
+ const char *sidetone;
+};
+
+#endif
diff --git a/include/linux/platform_data/asoc-ux500-msp.h b/include/linux/platform_data/asoc-ux500-msp.h
new file mode 100644
index 000000000..2f34bb98f
--- /dev/null
+++ b/include/linux/platform_data/asoc-ux500-msp.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL), version 2.
+ */
+
+#ifndef __MSP_H
+#define __MSP_H
+
+#include <linux/platform_data/dma-ste-dma40.h>
+
+/* Platform data structure for a MSP I2S-device */
+struct msp_i2s_platform_data {
+ int id;
+ struct stedma40_chan_cfg *msp_i2s_dma_rx;
+ struct stedma40_chan_cfg *msp_i2s_dma_tx;
+};
+
+#endif
diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h
new file mode 100644
index 000000000..c42aa89d3
--- /dev/null
+++ b/include/linux/platform_data/at24.h
@@ -0,0 +1,55 @@
+/*
+ * at24.h - platform_data for the at24 (generic eeprom) driver
+ * (C) Copyright 2008 by Pengutronix
+ * (C) Copyright 2012 by Wolfram Sang
+ * same license as the driver
+ */
+
+#ifndef _LINUX_AT24_H
+#define _LINUX_AT24_H
+
+#include <linux/types.h>
+#include <linux/memory.h>
+
+/**
+ * struct at24_platform_data - data to set up at24 (generic eeprom) driver
+ * @byte_len: size of eeprom in byte
+ * @page_size: number of byte which can be written in one go
+ * @flags: tunable options, check AT24_FLAG_* defines
+ * @setup: an optional callback invoked after eeprom is probed; enables kernel
+ code to access eeprom via memory_accessor, see example
+ * @context: optional parameter passed to setup()
+ *
+ * If you set up a custom eeprom type, please double-check the parameters.
+ * Especially page_size needs extra care, as you risk data loss if your value
+ * is bigger than what the chip actually supports!
+ *
+ * An example in pseudo code for a setup() callback:
+ *
+ * void get_mac_addr(struct memory_accessor *mem_acc, void *context)
+ * {
+ * u8 *mac_addr = ethernet_pdata->mac_addr;
+ * off_t offset = context;
+ *
+ * // Read MAC addr from EEPROM
+ * if (mem_acc->read(mem_acc, mac_addr, offset, ETH_ALEN) == ETH_ALEN)
+ * pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr);
+ * }
+ *
+ * This function pointer and context can now be set up in at24_platform_data.
+ */
+
+struct at24_platform_data {
+ u32 byte_len; /* size (sum of all addr) */
+ u16 page_size; /* for writes */
+ u8 flags;
+#define AT24_FLAG_ADDR16 0x80 /* address pointer is 16 bit */
+#define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */
+#define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */
+#define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */
+
+ void (*setup)(struct memory_accessor *, void *context);
+ void *context;
+};
+
+#endif /* _LINUX_AT24_H */
diff --git a/include/linux/platform_data/at91_adc.h b/include/linux/platform_data/at91_adc.h
new file mode 100644
index 000000000..7819fc787
--- /dev/null
+++ b/include/linux/platform_data/at91_adc.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2011 Free Electrons
+ *
+ * Licensed under the GPLv2 or later.
+ */
+
+#ifndef _AT91_ADC_H_
+#define _AT91_ADC_H_
+
+enum atmel_adc_ts_type {
+ ATMEL_ADC_TOUCHSCREEN_NONE = 0,
+ ATMEL_ADC_TOUCHSCREEN_4WIRE = 4,
+ ATMEL_ADC_TOUCHSCREEN_5WIRE = 5,
+};
+
+/**
+ * struct at91_adc_trigger - description of triggers
+ * @name: name of the trigger advertised to the user
+ * @value: value to set in the ADC's trigger setup register
+ to enable the trigger
+ * @is_external: Does the trigger rely on an external pin?
+ */
+struct at91_adc_trigger {
+ const char *name;
+ u8 value;
+ bool is_external;
+};
+
+/**
+ * struct at91_adc_data - platform data for ADC driver
+ * @channels_used: channels in use on the board as a bitmask
+ * @startup_time: startup time of the ADC in microseconds
+ * @trigger_list: Triggers available in the ADC
+ * @trigger_number: Number of triggers available in the ADC
+ * @use_external_triggers: does the board has external triggers availables
+ * @vref: Reference voltage for the ADC in millivolts
+ * @touchscreen_type: If a touchscreen is connected, its type (4 or 5 wires)
+ */
+struct at91_adc_data {
+ unsigned long channels_used;
+ u8 startup_time;
+ struct at91_adc_trigger *trigger_list;
+ u8 trigger_number;
+ bool use_external_triggers;
+ u16 vref;
+ enum atmel_adc_ts_type touchscreen_type;
+};
+
+extern void __init at91_add_device_adc(struct at91_adc_data *data);
+#endif
diff --git a/include/linux/platform_data/ata-pxa.h b/include/linux/platform_data/ata-pxa.h
new file mode 100644
index 000000000..6cf7df1d5
--- /dev/null
+++ b/include/linux/platform_data/ata-pxa.h
@@ -0,0 +1,33 @@
+/*
+ * Generic PXA PATA driver
+ *
+ * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MACH_PATA_PXA_H__
+#define __MACH_PATA_PXA_H__
+
+struct pata_pxa_pdata {
+ /* PXA DMA DREQ<0:2> pin */
+ uint32_t dma_dreq;
+ /* Register shift */
+ uint32_t reg_shift;
+ /* IRQ flags */
+ uint32_t irq_flags;
+};
+
+#endif /* __MACH_PATA_PXA_H__ */
diff --git a/include/linux/platform_data/ata-samsung_cf.h b/include/linux/platform_data/ata-samsung_cf.h
new file mode 100644
index 000000000..748e71642
--- /dev/null
+++ b/include/linux/platform_data/ata-samsung_cf.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Samsung CF-ATA platform_device info
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ATA_SAMSUNG_CF_H
+#define __ATA_SAMSUNG_CF_H __FILE__
+
+/**
+ * struct s3c_ide_platdata - S3C IDE driver platform data.
+ * @setup_gpio: Setup the external GPIO pins to the right state for data
+ * transfer in true-ide mode.
+ */
+struct s3c_ide_platdata {
+ void (*setup_gpio)(void);
+};
+
+/*
+ * s3c_ide_set_platdata() - Setup the platform specifc data for IDE driver.
+ * @pdata: Platform data for IDE driver.
+ */
+extern void s3c_ide_set_platdata(struct s3c_ide_platdata *pdata);
+
+/* architecture-specific IDE configuration */
+extern void s3c64xx_ide_setup_gpio(void);
+extern void s5pv210_ide_setup_gpio(void);
+
+#endif /*__ATA_SAMSUNG_CF_H */
diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h
new file mode 100644
index 000000000..4b452c6a2
--- /dev/null
+++ b/include/linux/platform_data/atmel.h
@@ -0,0 +1,97 @@
+/*
+ * atmel platform data
+ *
+ * GPL v2 Only
+ */
+
+#ifndef __ATMEL_H__
+#define __ATMEL_H__
+
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/spi/spi.h>
+#include <linux/usb/atmel_usba_udc.h>
+#include <linux/atmel-mci.h>
+#include <sound/atmel-ac97c.h>
+#include <linux/serial.h>
+#include <linux/platform_data/macb.h>
+
+/*
+ * at91: 6 USARTs and one DBGU port (SAM9260)
+ * avr32: 4
+ */
+#define ATMEL_MAX_UART 7
+
+ /* USB Device */
+struct at91_udc_data {
+ int vbus_pin; /* high == host powering us */
+ u8 vbus_active_low; /* vbus polarity */
+ u8 vbus_polled; /* Use polling, not interrupt */
+ int pullup_pin; /* active == D+ pulled up */
+ u8 pullup_active_low; /* true == pullup_pin is active low */
+};
+
+ /* Compact Flash */
+struct at91_cf_data {
+ int irq_pin; /* I/O IRQ */
+ int det_pin; /* Card detect */
+ int vcc_pin; /* power switching */
+ int rst_pin; /* card reset */
+ u8 chipselect; /* EBI Chip Select number */
+ u8 flags;
+#define AT91_CF_TRUE_IDE 0x01
+#define AT91_IDE_SWAP_A0_A2 0x02
+};
+
+ /* USB Host */
+#define AT91_MAX_USBH_PORTS 3
+struct at91_usbh_data {
+ int vbus_pin[AT91_MAX_USBH_PORTS]; /* port power-control pin */
+ int overcurrent_pin[AT91_MAX_USBH_PORTS];
+ u8 ports; /* number of ports on root hub */
+ u8 overcurrent_supported;
+ u8 vbus_pin_active_low[AT91_MAX_USBH_PORTS];
+ u8 overcurrent_status[AT91_MAX_USBH_PORTS];
+ u8 overcurrent_changed[AT91_MAX_USBH_PORTS];
+};
+
+ /* NAND / SmartMedia */
+struct atmel_nand_data {
+ int enable_pin; /* chip enable */
+ int det_pin; /* card detect */
+ int rdy_pin; /* ready/busy */
+ u8 rdy_pin_active_low; /* rdy_pin value is inverted */
+ u8 ale; /* address line number connected to ALE */
+ u8 cle; /* address line number connected to CLE */
+ u8 bus_width_16; /* buswidth is 16 bit */
+ u8 ecc_mode; /* ecc mode */
+ u8 on_flash_bbt; /* bbt on flash */
+ struct mtd_partition *parts;
+ unsigned int num_parts;
+ bool has_dma; /* support dma transfer */
+
+ /* default is false, only for at32ap7000 chip is true */
+ bool need_reset_workaround;
+};
+
+ /* Serial */
+struct atmel_uart_data {
+ int num; /* port num */
+ short use_dma_tx; /* use transmit DMA? */
+ short use_dma_rx; /* use receive DMA? */
+ void __iomem *regs; /* virt. base address, if any */
+ struct serial_rs485 rs485; /* rs485 settings */
+};
+
+/* CAN */
+struct at91_can_data {
+ void (*transceiver_switch)(int on);
+};
+
+/* FIXME: this needs a better location, but gets stuff building again */
+extern int at91_suspend_entering_slow_clock(void);
+
+#endif /* __ATMEL_H__ */
diff --git a/include/linux/platform_data/bcmgenet.h b/include/linux/platform_data/bcmgenet.h
new file mode 100644
index 000000000..26af54321
--- /dev/null
+++ b/include/linux/platform_data/bcmgenet.h
@@ -0,0 +1,18 @@
+#ifndef __LINUX_PLATFORM_DATA_BCMGENET_H__
+#define __LINUX_PLATFORM_DATA_BCMGENET_H__
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/phy.h>
+
+struct bcmgenet_platform_data {
+ bool mdio_enabled;
+ phy_interface_t phy_interface;
+ int phy_address;
+ int phy_speed;
+ int phy_duplex;
+ u8 mac_address[ETH_ALEN];
+ int genet_version;
+};
+
+#endif
diff --git a/include/linux/platform_data/bd6107.h b/include/linux/platform_data/bd6107.h
new file mode 100644
index 000000000..671d6502d
--- /dev/null
+++ b/include/linux/platform_data/bd6107.h
@@ -0,0 +1,19 @@
+/*
+ * bd6107.h - Rohm BD6107 LEDs Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __BD6107_H__
+#define __BD6107_H__
+
+struct device;
+
+struct bd6107_platform_data {
+ struct device *fbdev;
+ int reset; /* Reset GPIO */
+ unsigned int def_value;
+};
+
+#endif
diff --git a/include/linux/platform_data/bfin_rotary.h b/include/linux/platform_data/bfin_rotary.h
new file mode 100644
index 000000000..98829370f
--- /dev/null
+++ b/include/linux/platform_data/bfin_rotary.h
@@ -0,0 +1,117 @@
+/*
+ * board initialization should put one of these structures into platform_data
+ * and place the bfin-rotary onto platform_bus named "bfin-rotary".
+ *
+ * Copyright 2008-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _BFIN_ROTARY_H
+#define _BFIN_ROTARY_H
+
+/* mode bitmasks */
+#define ROT_QUAD_ENC CNTMODE_QUADENC /* quadrature/grey code encoder mode */
+#define ROT_BIN_ENC CNTMODE_BINENC /* binary encoder mode */
+#define ROT_UD_CNT CNTMODE_UDCNT /* rotary counter mode */
+#define ROT_DIR_CNT CNTMODE_DIRCNT /* direction counter mode */
+
+#define ROT_DEBE DEBE /* Debounce Enable */
+
+#define ROT_CDGINV CDGINV /* CDG Pin Polarity Invert */
+#define ROT_CUDINV CUDINV /* CUD Pin Polarity Invert */
+#define ROT_CZMINV CZMINV /* CZM Pin Polarity Invert */
+
+struct bfin_rotary_platform_data {
+ /* set rotary UP KEY_### or BTN_### in case you prefer
+ * bfin-rotary to send EV_KEY otherwise set 0
+ */
+ unsigned int rotary_up_key;
+ /* set rotary DOWN KEY_### or BTN_### in case you prefer
+ * bfin-rotary to send EV_KEY otherwise set 0
+ */
+ unsigned int rotary_down_key;
+ /* set rotary BUTTON KEY_### or BTN_### */
+ unsigned int rotary_button_key;
+ /* set rotary Relative Axis REL_### in case you prefer
+ * bfin-rotary to send EV_REL otherwise set 0
+ */
+ unsigned int rotary_rel_code;
+ unsigned short debounce; /* 0..17 */
+ unsigned short mode;
+ unsigned short pm_wakeup;
+ unsigned short *pin_list;
+};
+
+/* CNT_CONFIG bitmasks */
+#define CNTE (1 << 0) /* Counter Enable */
+#define DEBE (1 << 1) /* Debounce Enable */
+#define CDGINV (1 << 4) /* CDG Pin Polarity Invert */
+#define CUDINV (1 << 5) /* CUD Pin Polarity Invert */
+#define CZMINV (1 << 6) /* CZM Pin Polarity Invert */
+#define CNTMODE_SHIFT 8
+#define CNTMODE (0x7 << CNTMODE_SHIFT) /* Counter Operating Mode */
+#define ZMZC (1 << 1) /* CZM Zeroes Counter Enable */
+#define BNDMODE_SHIFT 12
+#define BNDMODE (0x3 << BNDMODE_SHIFT) /* Boundary register Mode */
+#define INPDIS (1 << 15) /* CUG and CDG Input Disable */
+
+#define CNTMODE_QUADENC (0 << CNTMODE_SHIFT) /* quadrature encoder mode */
+#define CNTMODE_BINENC (1 << CNTMODE_SHIFT) /* binary encoder mode */
+#define CNTMODE_UDCNT (2 << CNTMODE_SHIFT) /* up/down counter mode */
+#define CNTMODE_DIRCNT (4 << CNTMODE_SHIFT) /* direction counter mode */
+#define CNTMODE_DIRTMR (5 << CNTMODE_SHIFT) /* direction timer mode */
+
+#define BNDMODE_COMP (0 << BNDMODE_SHIFT) /* boundary compare mode */
+#define BNDMODE_ZERO (1 << BNDMODE_SHIFT) /* boundary compare and zero mode */
+#define BNDMODE_CAPT (2 << BNDMODE_SHIFT) /* boundary capture mode */
+#define BNDMODE_AEXT (3 << BNDMODE_SHIFT) /* boundary auto-extend mode */
+
+/* CNT_IMASK bitmasks */
+#define ICIE (1 << 0) /* Illegal Gray/Binary Code Interrupt Enable */
+#define UCIE (1 << 1) /* Up count Interrupt Enable */
+#define DCIE (1 << 2) /* Down count Interrupt Enable */
+#define MINCIE (1 << 3) /* Min Count Interrupt Enable */
+#define MAXCIE (1 << 4) /* Max Count Interrupt Enable */
+#define COV31IE (1 << 5) /* Bit 31 Overflow Interrupt Enable */
+#define COV15IE (1 << 6) /* Bit 15 Overflow Interrupt Enable */
+#define CZEROIE (1 << 7) /* Count to Zero Interrupt Enable */
+#define CZMIE (1 << 8) /* CZM Pin Interrupt Enable */
+#define CZMEIE (1 << 9) /* CZM Error Interrupt Enable */
+#define CZMZIE (1 << 10) /* CZM Zeroes Counter Interrupt Enable */
+
+/* CNT_STATUS bitmasks */
+#define ICII (1 << 0) /* Illegal Gray/Binary Code Interrupt Identifier */
+#define UCII (1 << 1) /* Up count Interrupt Identifier */
+#define DCII (1 << 2) /* Down count Interrupt Identifier */
+#define MINCII (1 << 3) /* Min Count Interrupt Identifier */
+#define MAXCII (1 << 4) /* Max Count Interrupt Identifier */
+#define COV31II (1 << 5) /* Bit 31 Overflow Interrupt Identifier */
+#define COV15II (1 << 6) /* Bit 15 Overflow Interrupt Identifier */
+#define CZEROII (1 << 7) /* Count to Zero Interrupt Identifier */
+#define CZMII (1 << 8) /* CZM Pin Interrupt Identifier */
+#define CZMEII (1 << 9) /* CZM Error Interrupt Identifier */
+#define CZMZII (1 << 10) /* CZM Zeroes Counter Interrupt Identifier */
+
+/* CNT_COMMAND bitmasks */
+#define W1LCNT 0xf /* Load Counter Register */
+#define W1LMIN 0xf0 /* Load Min Register */
+#define W1LMAX 0xf00 /* Load Max Register */
+#define W1ZMONCE (1 << 12) /* Enable CZM Clear Counter Once */
+
+#define W1LCNT_ZERO (1 << 0) /* write 1 to load CNT_COUNTER with zero */
+#define W1LCNT_MIN (1 << 2) /* write 1 to load CNT_COUNTER from CNT_MIN */
+#define W1LCNT_MAX (1 << 3) /* write 1 to load CNT_COUNTER from CNT_MAX */
+
+#define W1LMIN_ZERO (1 << 4) /* write 1 to load CNT_MIN with zero */
+#define W1LMIN_CNT (1 << 5) /* write 1 to load CNT_MIN from CNT_COUNTER */
+#define W1LMIN_MAX (1 << 7) /* write 1 to load CNT_MIN from CNT_MAX */
+
+#define W1LMAX_ZERO (1 << 8) /* write 1 to load CNT_MAX with zero */
+#define W1LMAX_CNT (1 << 9) /* write 1 to load CNT_MAX from CNT_COUNTER */
+#define W1LMAX_MIN (1 << 10) /* write 1 to load CNT_MAX from CNT_MIN */
+
+/* CNT_DEBOUNCE bitmasks */
+#define DPRESCALE 0xf /* Load Counter Register */
+
+#endif
diff --git a/include/linux/platform_data/brcmfmac-sdio.h b/include/linux/platform_data/brcmfmac-sdio.h
new file mode 100644
index 000000000..e75dcbf2b
--- /dev/null
+++ b/include/linux/platform_data/brcmfmac-sdio.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _LINUX_BRCMFMAC_PLATFORM_H
+#define _LINUX_BRCMFMAC_PLATFORM_H
+
+/*
+ * Platform specific driver functions and data. Through the platform specific
+ * device data functions can be provided to help the brcmfmac driver to
+ * operate with the device in combination with the used platform.
+ *
+ * Use the platform data in the following (similar) way:
+ *
+ *
+#include <brcmfmac_platform.h>
+
+
+static void brcmfmac_power_on(void)
+{
+}
+
+static void brcmfmac_power_off(void)
+{
+}
+
+static void brcmfmac_reset(void)
+{
+}
+
+static struct brcmfmac_sdio_platform_data brcmfmac_sdio_pdata = {
+ .power_on = brcmfmac_power_on,
+ .power_off = brcmfmac_power_off,
+ .reset = brcmfmac_reset
+};
+
+static struct platform_device brcmfmac_device = {
+ .name = BRCMFMAC_SDIO_PDATA_NAME,
+ .id = PLATFORM_DEVID_NONE,
+ .dev.platform_data = &brcmfmac_sdio_pdata
+};
+
+void __init brcmfmac_init_pdata(void)
+{
+ brcmfmac_sdio_pdata.oob_irq_supported = true;
+ brcmfmac_sdio_pdata.oob_irq_nr = gpio_to_irq(GPIO_BRCMF_SDIO_OOB);
+ brcmfmac_sdio_pdata.oob_irq_flags = IORESOURCE_IRQ |
+ IORESOURCE_IRQ_HIGHLEVEL;
+ platform_device_register(&brcmfmac_device);
+}
+ *
+ *
+ * Note: the brcmfmac can be loaded as module or be statically built-in into
+ * the kernel. If built-in then do note that it uses module_init (and
+ * module_exit) routines which equal device_initcall. So if you intend to
+ * create a module with the platform specific data for the brcmfmac and have
+ * it built-in to the kernel then use a higher initcall then device_initcall
+ * (see init.h). If this is not done then brcmfmac will load without problems
+ * but will not pickup the platform data.
+ *
+ * When the driver does not "detect" platform driver data then it will continue
+ * without reporting anything and just assume there is no data needed. Which is
+ * probably true for most platforms.
+ *
+ * Explanation of the platform_data fields:
+ *
+ * drive_strength: is the preferred drive_strength to be used for the SDIO
+ * pins. If 0 then a default value will be used. This is the target drive
+ * strength, the exact drive strength which will be used depends on the
+ * capabilities of the device.
+ *
+ * oob_irq_supported: does the board have support for OOB interrupts. SDIO
+ * in-band interrupts are relatively slow and for having less overhead on
+ * interrupt processing an out of band interrupt can be used. If the HW
+ * supports this then enable this by setting this field to true and configure
+ * the oob related fields.
+ *
+ * oob_irq_nr, oob_irq_flags: the OOB interrupt information. The values are
+ * used for registering the irq using request_irq function.
+ *
+ * broken_sg_support: flag for broken sg list support of SDIO host controller.
+ * Set this to true if the SDIO host controller has higher align requirement
+ * than 32 bytes for each scatterlist item.
+ *
+ * sd_head_align: alignment requirement for start of data buffer
+ *
+ * sd_sgentry_align: length alignment requirement for each sg entry
+ *
+ * power_on: This function is called by the brcmfmac when the module gets
+ * loaded. This can be particularly useful for low power devices. The platform
+ * spcific routine may for example decide to power up the complete device.
+ * If there is no use-case for this function then provide NULL.
+ *
+ * power_off: This function is called by the brcmfmac when the module gets
+ * unloaded. At this point the device can be powered down or otherwise be reset.
+ * So if an actual power_off is not supported but reset is then reset the device
+ * when this function gets called. This can be particularly useful for low power
+ * devices. If there is no use-case for this function (either power-down or
+ * reset) then provide NULL.
+ *
+ * reset: This function can get called if the device communication broke down.
+ * This functionality is particularly useful in case of SDIO type devices. It is
+ * possible to reset a dongle via sdio data interface, but it requires that
+ * this is fully functional. This function is chip/module specific and this
+ * function should return only after the complete reset has completed.
+ */
+
+#define BRCMFMAC_SDIO_PDATA_NAME "brcmfmac_sdio"
+
+struct brcmfmac_sdio_platform_data {
+ unsigned int drive_strength;
+ bool oob_irq_supported;
+ unsigned int oob_irq_nr;
+ unsigned long oob_irq_flags;
+ bool broken_sg_support;
+ unsigned short sd_head_align;
+ unsigned short sd_sgentry_align;
+ void (*power_on)(void);
+ void (*power_off)(void);
+ void (*reset)(void);
+};
+
+#endif /* _LINUX_BRCMFMAC_PLATFORM_H */
diff --git a/include/linux/platform_data/bt-nokia-h4p.h b/include/linux/platform_data/bt-nokia-h4p.h
new file mode 100644
index 000000000..30d169dfa
--- /dev/null
+++ b/include/linux/platform_data/bt-nokia-h4p.h
@@ -0,0 +1,38 @@
+/*
+ * This file is part of Nokia H4P bluetooth driver
+ *
+ * Copyright (C) 2010 Nokia Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+
+/**
+ * struct hci_h4p_platform data - hci_h4p Platform data structure
+ */
+struct hci_h4p_platform_data {
+ int chip_type;
+ int bt_sysclk;
+ unsigned int bt_wakeup_gpio;
+ unsigned int host_wakeup_gpio;
+ unsigned int reset_gpio;
+ int reset_gpio_shared;
+ unsigned int uart_irq;
+ phys_addr_t uart_base;
+ const char *uart_iclk;
+ const char *uart_fclk;
+ void (*set_pm_limits)(struct device *dev, bool set);
+};
diff --git a/include/linux/platform_data/camera-mx2.h b/include/linux/platform_data/camera-mx2.h
new file mode 100644
index 000000000..7ded6f1f7
--- /dev/null
+++ b/include/linux/platform_data/camera-mx2.h
@@ -0,0 +1,44 @@
+/*
+ * mx2-cam.h - i.MX27/i.MX25 camera driver header file
+ *
+ * Copyright (C) 2003, Intel Corporation
+ * Copyright (C) 2008, Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (C) 2010, Baruch Siach <baruch@tkos.co.il>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef __MACH_MX2_CAM_H_
+#define __MACH_MX2_CAM_H_
+
+#define MX2_CAMERA_EXT_VSYNC (1 << 1)
+#define MX2_CAMERA_CCIR (1 << 2)
+#define MX2_CAMERA_CCIR_INTERLACE (1 << 3)
+#define MX2_CAMERA_HSYNC_HIGH (1 << 4)
+#define MX2_CAMERA_GATED_CLOCK (1 << 5)
+#define MX2_CAMERA_INV_DATA (1 << 6)
+#define MX2_CAMERA_PCLK_SAMPLE_RISING (1 << 7)
+
+/**
+ * struct mx2_camera_platform_data - optional platform data for mx2_camera
+ * @flags: any combination of MX2_CAMERA_*
+ * @clk: clock rate of the csi block / 2
+ */
+struct mx2_camera_platform_data {
+ unsigned long flags;
+ unsigned long clk;
+};
+
+#endif /* __MACH_MX2_CAM_H_ */
diff --git a/include/linux/platform_data/camera-mx3.h b/include/linux/platform_data/camera-mx3.h
new file mode 100644
index 000000000..a910dadc8
--- /dev/null
+++ b/include/linux/platform_data/camera-mx3.h
@@ -0,0 +1,52 @@
+/*
+ * mx3_camera.h - i.MX3x camera driver header file
+ *
+ * Copyright (C) 2008, Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MX3_CAMERA_H_
+#define _MX3_CAMERA_H_
+
+#include <linux/device.h>
+
+#define MX3_CAMERA_CLK_SRC 1
+#define MX3_CAMERA_EXT_VSYNC 2
+#define MX3_CAMERA_DP 4
+#define MX3_CAMERA_PCP 8
+#define MX3_CAMERA_HSP 0x10
+#define MX3_CAMERA_VSP 0x20
+#define MX3_CAMERA_DATAWIDTH_4 0x40
+#define MX3_CAMERA_DATAWIDTH_8 0x80
+#define MX3_CAMERA_DATAWIDTH_10 0x100
+#define MX3_CAMERA_DATAWIDTH_15 0x200
+
+#define MX3_CAMERA_DATAWIDTH_MASK (MX3_CAMERA_DATAWIDTH_4 | MX3_CAMERA_DATAWIDTH_8 | \
+ MX3_CAMERA_DATAWIDTH_10 | MX3_CAMERA_DATAWIDTH_15)
+
+struct v4l2_async_subdev;
+
+/**
+ * struct mx3_camera_pdata - i.MX3x camera platform data
+ * @flags: MX3_CAMERA_* flags
+ * @mclk_10khz: master clock frequency in 10kHz units
+ * @dma_dev: IPU DMA device to match against in channel allocation
+ */
+struct mx3_camera_pdata {
+ unsigned long flags;
+ unsigned long mclk_10khz;
+ struct device *dma_dev;
+ struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */
+ int *asd_sizes; /* 0-terminated array of asd group sizes */
+};
+
+#endif
diff --git a/include/linux/platform_data/camera-pxa.h b/include/linux/platform_data/camera-pxa.h
new file mode 100644
index 000000000..6709b1cd7
--- /dev/null
+++ b/include/linux/platform_data/camera-pxa.h
@@ -0,0 +1,44 @@
+/*
+ camera.h - PXA camera driver header file
+
+ Copyright (C) 2003, Intel Corporation
+ Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __ASM_ARCH_CAMERA_H_
+#define __ASM_ARCH_CAMERA_H_
+
+#define PXA_CAMERA_MASTER 1
+#define PXA_CAMERA_DATAWIDTH_4 2
+#define PXA_CAMERA_DATAWIDTH_5 4
+#define PXA_CAMERA_DATAWIDTH_8 8
+#define PXA_CAMERA_DATAWIDTH_9 0x10
+#define PXA_CAMERA_DATAWIDTH_10 0x20
+#define PXA_CAMERA_PCLK_EN 0x40
+#define PXA_CAMERA_MCLK_EN 0x80
+#define PXA_CAMERA_PCP 0x100
+#define PXA_CAMERA_HSP 0x200
+#define PXA_CAMERA_VSP 0x400
+
+struct pxacamera_platform_data {
+ unsigned long flags;
+ unsigned long mclk_10khz;
+};
+
+extern void pxa_set_camera_info(struct pxacamera_platform_data *);
+
+#endif /* __ASM_ARCH_CAMERA_H_ */
diff --git a/include/linux/platform_data/camera-rcar.h b/include/linux/platform_data/camera-rcar.h
new file mode 100644
index 000000000..dfc83c581
--- /dev/null
+++ b/include/linux/platform_data/camera-rcar.h
@@ -0,0 +1,25 @@
+/*
+ * Platform data for Renesas R-Car VIN soc-camera driver
+ *
+ * Copyright (C) 2011-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __CAMERA_RCAR_H_
+#define __CAMERA_RCAR_H_
+
+#define RCAR_VIN_HSYNC_ACTIVE_LOW (1 << 0)
+#define RCAR_VIN_VSYNC_ACTIVE_LOW (1 << 1)
+#define RCAR_VIN_BT601 (1 << 2)
+#define RCAR_VIN_BT656 (1 << 3)
+
+struct rcar_vin_platform_data {
+ unsigned int flags;
+};
+
+#endif /* __CAMERA_RCAR_H_ */
diff --git a/include/linux/platform_data/clk-integrator.h b/include/linux/platform_data/clk-integrator.h
new file mode 100644
index 000000000..addd48cac
--- /dev/null
+++ b/include/linux/platform_data/clk-integrator.h
@@ -0,0 +1,2 @@
+void integrator_impd1_clk_init(void __iomem *base, unsigned int id);
+void integrator_impd1_clk_exit(unsigned int id);
diff --git a/include/linux/platform_data/clk-lpss.h b/include/linux/platform_data/clk-lpss.h
new file mode 100644
index 000000000..23901992b
--- /dev/null
+++ b/include/linux/platform_data/clk-lpss.h
@@ -0,0 +1,23 @@
+/*
+ * Intel Low Power Subsystem clocks.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CLK_LPSS_H
+#define __CLK_LPSS_H
+
+struct lpss_clk_data {
+ const char *name;
+ struct clk *clk;
+};
+
+extern int lpt_clk_init(void);
+
+#endif /* __CLK_LPSS_H */
diff --git a/include/linux/platform_data/clk-realview.h b/include/linux/platform_data/clk-realview.h
new file mode 100644
index 000000000..2e426a7db
--- /dev/null
+++ b/include/linux/platform_data/clk-realview.h
@@ -0,0 +1 @@
+void realview_clk_init(void __iomem *sysbase, bool is_pb1176);
diff --git a/include/linux/platform_data/clk-u300.h b/include/linux/platform_data/clk-u300.h
new file mode 100644
index 000000000..8429e7391
--- /dev/null
+++ b/include/linux/platform_data/clk-u300.h
@@ -0,0 +1 @@
+void __init u300_clk_init(void __iomem *base);
diff --git a/include/linux/platform_data/clk-ux500.h b/include/linux/platform_data/clk-ux500.h
new file mode 100644
index 000000000..97baf831e
--- /dev/null
+++ b/include/linux/platform_data/clk-ux500.h
@@ -0,0 +1,23 @@
+/*
+ * Clock definitions for ux500 platforms
+ *
+ * Copyright (C) 2012 ST-Ericsson SA
+ * Author: Ulf Hansson <ulf.hansson@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __CLK_UX500_H
+#define __CLK_UX500_H
+
+void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
+ u32 clkrst5_base, u32 clkrst6_base);
+
+void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
+ u32 clkrst5_base, u32 clkrst6_base);
+void u9540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
+ u32 clkrst5_base, u32 clkrst6_base);
+void u8540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
+ u32 clkrst5_base, u32 clkrst6_base);
+
+#endif /* __CLK_UX500_H */
diff --git a/include/linux/platform_data/coda.h b/include/linux/platform_data/coda.h
new file mode 100644
index 000000000..6ad4410d9
--- /dev/null
+++ b/include/linux/platform_data/coda.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2013 Philipp Zabel, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef PLATFORM_CODA_H
+#define PLATFORM_CODA_H
+
+struct device;
+
+struct coda_platform_data {
+ struct device *iram_dev;
+};
+
+#endif
diff --git a/include/linux/platform_data/cpuidle-exynos.h b/include/linux/platform_data/cpuidle-exynos.h
new file mode 100644
index 000000000..bfa40e4c5
--- /dev/null
+++ b/include/linux/platform_data/cpuidle-exynos.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __CPUIDLE_EXYNOS_H
+#define __CPUIDLE_EXYNOS_H
+
+struct cpuidle_exynos_data {
+ int (*cpu0_enter_aftr)(void);
+ int (*cpu1_powerdown)(void);
+ void (*pre_enter_aftr)(void);
+ void (*post_enter_aftr)(void);
+};
+
+#endif
diff --git a/include/linux/platform_data/crypto-atmel.h b/include/linux/platform_data/crypto-atmel.h
new file mode 100644
index 000000000..b46e0d906
--- /dev/null
+++ b/include/linux/platform_data/crypto-atmel.h
@@ -0,0 +1,22 @@
+#ifndef __LINUX_CRYPTO_ATMEL_H
+#define __LINUX_CRYPTO_ATMEL_H
+
+#include <linux/platform_data/dma-atmel.h>
+
+/**
+ * struct crypto_dma_data - DMA data for AES/TDES/SHA
+ */
+struct crypto_dma_data {
+ struct at_dma_slave txdata;
+ struct at_dma_slave rxdata;
+};
+
+/**
+ * struct crypto_platform_data - board-specific AES/TDES/SHA configuration
+ * @dma_slave: DMA slave interface to use in data transfers.
+ */
+struct crypto_platform_data {
+ struct crypto_dma_data *dma_slave;
+};
+
+#endif /* __LINUX_CRYPTO_ATMEL_H */
diff --git a/include/linux/platform_data/crypto-ux500.h b/include/linux/platform_data/crypto-ux500.h
new file mode 100644
index 000000000..94df96d9a
--- /dev/null
+++ b/include/linux/platform_data/crypto-ux500.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef _CRYPTO_UX500_H
+#define _CRYPTO_UX500_H
+#include <linux/dmaengine.h>
+#include <linux/platform_data/dma-ste-dma40.h>
+
+struct hash_platform_data {
+ void *mem_to_engine;
+ bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+};
+
+struct cryp_platform_data {
+ struct stedma40_chan_cfg mem_to_engine;
+ struct stedma40_chan_cfg engine_to_mem;
+};
+
+#endif
diff --git a/include/linux/platform_data/cyttsp4.h b/include/linux/platform_data/cyttsp4.h
new file mode 100644
index 000000000..6eba54aff
--- /dev/null
+++ b/include/linux/platform_data/cyttsp4.h
@@ -0,0 +1,76 @@
+/*
+ * Header file for:
+ * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers.
+ * For use with Cypress Txx3xx parts.
+ * Supported parts include:
+ * CY8CTST341
+ * CY8CTMA340
+ *
+ * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc.
+ * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, and only version 2, as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com)
+ *
+ */
+#ifndef _CYTTSP4_H_
+#define _CYTTSP4_H_
+
+#define CYTTSP4_MT_NAME "cyttsp4_mt"
+#define CYTTSP4_I2C_NAME "cyttsp4_i2c_adapter"
+#define CYTTSP4_SPI_NAME "cyttsp4_spi_adapter"
+
+#define CY_TOUCH_SETTINGS_MAX 32
+
+struct touch_framework {
+ const uint16_t *abs;
+ uint8_t size;
+ uint8_t enable_vkeys;
+} __packed;
+
+struct cyttsp4_mt_platform_data {
+ struct touch_framework *frmwrk;
+ unsigned short flags;
+ char const *inp_dev_name;
+};
+
+struct touch_settings {
+ const uint8_t *data;
+ uint32_t size;
+ uint8_t tag;
+} __packed;
+
+struct cyttsp4_core_platform_data {
+ int irq_gpio;
+ int rst_gpio;
+ int level_irq_udelay;
+ int (*xres)(struct cyttsp4_core_platform_data *pdata,
+ struct device *dev);
+ int (*init)(struct cyttsp4_core_platform_data *pdata,
+ int on, struct device *dev);
+ int (*power)(struct cyttsp4_core_platform_data *pdata,
+ int on, struct device *dev, atomic_t *ignore_irq);
+ int (*irq_stat)(struct cyttsp4_core_platform_data *pdata,
+ struct device *dev);
+ struct touch_settings *sett[CY_TOUCH_SETTINGS_MAX];
+};
+
+struct cyttsp4_platform_data {
+ struct cyttsp4_core_platform_data *core_pdata;
+ struct cyttsp4_mt_platform_data *mt_pdata;
+};
+
+#endif /* _CYTTSP4_H_ */
diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h
new file mode 100644
index 000000000..85ad68f92
--- /dev/null
+++ b/include/linux/platform_data/davinci_asp.h
@@ -0,0 +1,112 @@
+/*
+ * TI DaVinci Audio Serial Port support
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DAVINCI_ASP_H
+#define __DAVINCI_ASP_H
+
+#include <linux/genalloc.h>
+
+struct davinci_mcasp_pdata {
+ u32 tx_dma_offset;
+ u32 rx_dma_offset;
+ int asp_chan_q; /* event queue number for ASP channel */
+ int ram_chan_q; /* event queue number for RAM channel */
+ /*
+ * Allowing this is more efficient and eliminates left and right swaps
+ * caused by underruns, but will swap the left and right channels
+ * when compared to previous behavior.
+ */
+ unsigned enable_channel_combine:1;
+ unsigned sram_size_playback;
+ unsigned sram_size_capture;
+ struct gen_pool *sram_pool;
+
+ /*
+ * If McBSP peripheral gets the clock from an external pin,
+ * there are three chooses, that are MCBSP_CLKX, MCBSP_CLKR
+ * and MCBSP_CLKS.
+ * Depending on different hardware connections it is possible
+ * to use this setting to change the behaviour of McBSP
+ * driver.
+ */
+ int clk_input_pin;
+
+ /*
+ * This flag works when both clock and FS are outputs for the cpu
+ * and makes clock more accurate (FS is not symmetrical and the
+ * clock is very fast.
+ * The clock becoming faster is named
+ * i2s continuous serial clock (I2S_SCK) and it is an externally
+ * visible bit clock.
+ *
+ * first line : WordSelect
+ * second line : ContinuousSerialClock
+ * third line: SerialData
+ *
+ * SYMMETRICAL APPROACH:
+ * _______________________ LEFT
+ * _| RIGHT |______________________|
+ * _ _ _ _ _ _ _ _
+ * _| |_| |_ x16 _| |_| |_| |_| |_ x16 _| |_| |_
+ * _ _ _ _ _ _ _ _
+ * _/ \_/ \_ ... _/ \_/ \_/ \_/ \_ ... _/ \_/ \_
+ * \_/ \_/ \_/ \_/ \_/ \_/ \_/ \_/
+ *
+ * ACCURATE CLOCK APPROACH:
+ * ______________ LEFT
+ * _| RIGHT |_______________________________|
+ * _ _ _ _ _ _ _ _ _
+ * _| |_ x16 _| |_| |_ x16 _| |_| |_| |_| |_| |_| |
+ * _ _ _ _ dummy cycles
+ * _/ \_ ... _/ \_/ \_ ... _/ \__________________
+ * \_/ \_/ \_/ \_/
+ *
+ */
+ bool i2s_accurate_sck;
+
+ /* McASP specific fields */
+ int tdm_slots;
+ u8 op_mode;
+ u8 num_serializer;
+ u8 *serial_dir;
+ u8 version;
+ u8 txnumevt;
+ u8 rxnumevt;
+ int tx_dma_channel;
+ int rx_dma_channel;
+};
+/* TODO: Fix arch/arm/mach-davinci/ users and remove this define */
+#define snd_platform_data davinci_mcasp_pdata
+
+enum {
+ MCASP_VERSION_1 = 0, /* DM646x */
+ MCASP_VERSION_2, /* DA8xx/OMAPL1x */
+ MCASP_VERSION_3, /* TI81xx/AM33xx */
+ MCASP_VERSION_4, /* DRA7xxx */
+};
+
+enum mcbsp_clk_input_pin {
+ MCBSP_CLKR = 0, /* as in DM365 */
+ MCBSP_CLKS,
+};
+
+#define INACTIVE_MODE 0
+#define TX_MODE 1
+#define RX_MODE 2
+
+#define DAVINCI_MCASP_IIS_MODE 0
+#define DAVINCI_MCASP_DIT_MODE 1
+
+#endif
diff --git a/include/linux/platform_data/db8500_thermal.h b/include/linux/platform_data/db8500_thermal.h
new file mode 100644
index 000000000..3bf60902e
--- /dev/null
+++ b/include/linux/platform_data/db8500_thermal.h
@@ -0,0 +1,38 @@
+/*
+ * db8500_thermal.h - DB8500 Thermal Management Implementation
+ *
+ * Copyright (C) 2012 ST-Ericsson
+ * Copyright (C) 2012 Linaro Ltd.
+ *
+ * Author: Hongbo Zhang <hongbo.zhang@linaro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DB8500_THERMAL_H_
+#define _DB8500_THERMAL_H_
+
+#include <linux/thermal.h>
+
+#define COOLING_DEV_MAX 8
+
+struct db8500_trip_point {
+ unsigned long temp;
+ enum thermal_trip_type type;
+ char cdev_name[COOLING_DEV_MAX][THERMAL_NAME_LENGTH];
+};
+
+struct db8500_thsens_platform_data {
+ struct db8500_trip_point trip_points[THERMAL_MAX_TRIPS];
+ int num_trips;
+};
+
+#endif /* _DB8500_THERMAL_H_ */
diff --git a/include/linux/platform_data/dma-atmel.h b/include/linux/platform_data/dma-atmel.h
new file mode 100644
index 000000000..e95f19c65
--- /dev/null
+++ b/include/linux/platform_data/dma-atmel.h
@@ -0,0 +1,65 @@
+/*
+ * Header file for the Atmel AHB DMA Controller driver
+ *
+ * Copyright (C) 2008 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef AT_HDMAC_H
+#define AT_HDMAC_H
+
+#include <linux/dmaengine.h>
+
+/**
+ * struct at_dma_platform_data - Controller configuration parameters
+ * @nr_channels: Number of channels supported by hardware (max 8)
+ * @cap_mask: dma_capability flags supported by the platform
+ */
+struct at_dma_platform_data {
+ unsigned int nr_channels;
+ dma_cap_mask_t cap_mask;
+};
+
+/**
+ * struct at_dma_slave - Controller-specific information about a slave
+ * @dma_dev: required DMA master device
+ * @cfg: Platform-specific initializer for the CFG register
+ */
+struct at_dma_slave {
+ struct device *dma_dev;
+ u32 cfg;
+};
+
+
+/* Platform-configurable bits in CFG */
+#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */
+
+#define ATC_SRC_PER(h) (0xFU & (h)) /* Channel src rq associated with periph handshaking ifc h */
+#define ATC_DST_PER(h) ((0xFU & (h)) << 4) /* Channel dst rq associated with periph handshaking ifc h */
+#define ATC_SRC_REP (0x1 << 8) /* Source Replay Mod */
+#define ATC_SRC_H2SEL (0x1 << 9) /* Source Handshaking Mod */
+#define ATC_SRC_H2SEL_SW (0x0 << 9)
+#define ATC_SRC_H2SEL_HW (0x1 << 9)
+#define ATC_SRC_PER_MSB(h) (ATC_PER_MSB(h) << 10) /* Channel src rq (most significant bits) */
+#define ATC_DST_REP (0x1 << 12) /* Destination Replay Mod */
+#define ATC_DST_H2SEL (0x1 << 13) /* Destination Handshaking Mod */
+#define ATC_DST_H2SEL_SW (0x0 << 13)
+#define ATC_DST_H2SEL_HW (0x1 << 13)
+#define ATC_DST_PER_MSB(h) (ATC_PER_MSB(h) << 14) /* Channel dst rq (most significant bits) */
+#define ATC_SOD (0x1 << 16) /* Stop On Done */
+#define ATC_LOCK_IF (0x1 << 20) /* Interface Lock */
+#define ATC_LOCK_B (0x1 << 21) /* AHB Bus Lock */
+#define ATC_LOCK_IF_L (0x1 << 22) /* Master Interface Arbiter Lock */
+#define ATC_LOCK_IF_L_CHUNK (0x0 << 22)
+#define ATC_LOCK_IF_L_BUFFER (0x1 << 22)
+#define ATC_AHB_PROT_MASK (0x7 << 24) /* AHB Protection */
+#define ATC_FIFOCFG_MASK (0x3 << 28) /* FIFO Request Configuration */
+#define ATC_FIFOCFG_LARGESTBURST (0x0 << 28)
+#define ATC_FIFOCFG_HALFFIFO (0x1 << 28)
+#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28)
+
+
+#endif /* AT_HDMAC_H */
diff --git a/include/linux/platform_data/dma-coh901318.h b/include/linux/platform_data/dma-coh901318.h
new file mode 100644
index 000000000..c4cb9590d
--- /dev/null
+++ b/include/linux/platform_data/dma-coh901318.h
@@ -0,0 +1,72 @@
+/*
+ * Platform data for the COH901318 DMA controller
+ * Copyright (C) 2007-2013 ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef PLAT_COH901318_H
+#define PLAT_COH901318_H
+
+#ifdef CONFIG_COH901318
+
+/* We only support the U300 DMA channels */
+#define U300_DMA_MSL_TX_0 0
+#define U300_DMA_MSL_TX_1 1
+#define U300_DMA_MSL_TX_2 2
+#define U300_DMA_MSL_TX_3 3
+#define U300_DMA_MSL_TX_4 4
+#define U300_DMA_MSL_TX_5 5
+#define U300_DMA_MSL_TX_6 6
+#define U300_DMA_MSL_RX_0 7
+#define U300_DMA_MSL_RX_1 8
+#define U300_DMA_MSL_RX_2 9
+#define U300_DMA_MSL_RX_3 10
+#define U300_DMA_MSL_RX_4 11
+#define U300_DMA_MSL_RX_5 12
+#define U300_DMA_MSL_RX_6 13
+#define U300_DMA_MMCSD_RX_TX 14
+#define U300_DMA_MSPRO_TX 15
+#define U300_DMA_MSPRO_RX 16
+#define U300_DMA_UART0_TX 17
+#define U300_DMA_UART0_RX 18
+#define U300_DMA_APEX_TX 19
+#define U300_DMA_APEX_RX 20
+#define U300_DMA_PCM_I2S0_TX 21
+#define U300_DMA_PCM_I2S0_RX 22
+#define U300_DMA_PCM_I2S1_TX 23
+#define U300_DMA_PCM_I2S1_RX 24
+#define U300_DMA_XGAM_CDI 25
+#define U300_DMA_XGAM_PDI 26
+#define U300_DMA_SPI_TX 27
+#define U300_DMA_SPI_RX 28
+#define U300_DMA_GENERAL_PURPOSE_0 29
+#define U300_DMA_GENERAL_PURPOSE_1 30
+#define U300_DMA_GENERAL_PURPOSE_2 31
+#define U300_DMA_GENERAL_PURPOSE_3 32
+#define U300_DMA_GENERAL_PURPOSE_4 33
+#define U300_DMA_GENERAL_PURPOSE_5 34
+#define U300_DMA_GENERAL_PURPOSE_6 35
+#define U300_DMA_GENERAL_PURPOSE_7 36
+#define U300_DMA_GENERAL_PURPOSE_8 37
+#define U300_DMA_UART1_TX 38
+#define U300_DMA_UART1_RX 39
+
+#define U300_DMA_DEVICE_CHANNELS 32
+#define U300_DMA_CHANNELS 40
+
+/**
+ * coh901318_filter_id() - DMA channel filter function
+ * @chan: dma channel handle
+ * @chan_id: id of dma channel to be filter out
+ *
+ * In dma_request_channel() it specifies what channel id to be requested
+ */
+bool coh901318_filter_id(struct dma_chan *chan, void *chan_id);
+#else
+static inline bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
+{
+ return false;
+}
+#endif
+
+#endif /* PLAT_COH901318_H */
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
new file mode 100644
index 000000000..87ac14c58
--- /dev/null
+++ b/include/linux/platform_data/dma-dw.h
@@ -0,0 +1,61 @@
+/*
+ * Driver for the Synopsys DesignWare DMA Controller
+ *
+ * Copyright (C) 2007 Atmel Corporation
+ * Copyright (C) 2010-2011 ST Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _PLATFORM_DATA_DMA_DW_H
+#define _PLATFORM_DATA_DMA_DW_H
+
+#include <linux/device.h>
+
+#define DW_DMA_MAX_NR_MASTERS 4
+
+/**
+ * struct dw_dma_slave - Controller-specific information about a slave
+ *
+ * @dma_dev: required DMA master device
+ * @src_id: src request line
+ * @dst_id: dst request line
+ * @src_master: src master for transfers on allocated channel.
+ * @dst_master: dest master for transfers on allocated channel.
+ */
+struct dw_dma_slave {
+ struct device *dma_dev;
+ u8 src_id;
+ u8 dst_id;
+ u8 src_master;
+ u8 dst_master;
+};
+
+/**
+ * struct dw_dma_platform_data - Controller configuration parameters
+ * @nr_channels: Number of channels supported by hardware (max 8)
+ * @is_private: The device channels should be marked as private and not for
+ * by the general purpose DMA channel allocator.
+ * @chan_allocation_order: Allocate channels starting from 0 or 7
+ * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
+ * @block_size: Maximum block size supported by the controller
+ * @nr_masters: Number of AHB masters supported by the controller
+ * @data_width: Maximum data width supported by hardware per AHB master
+ * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
+ */
+struct dw_dma_platform_data {
+ unsigned int nr_channels;
+ bool is_private;
+#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
+#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
+ unsigned char chan_allocation_order;
+#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
+#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
+ unsigned char chan_priority;
+ unsigned short block_size;
+ unsigned char nr_masters;
+ unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
+};
+
+#endif /* _PLATFORM_DATA_DMA_DW_H */
diff --git a/include/linux/platform_data/dma-ep93xx.h b/include/linux/platform_data/dma-ep93xx.h
new file mode 100644
index 000000000..e82c642fa
--- /dev/null
+++ b/include/linux/platform_data/dma-ep93xx.h
@@ -0,0 +1,93 @@
+#ifndef __ASM_ARCH_DMA_H
+#define __ASM_ARCH_DMA_H
+
+#include <linux/types.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+
+/*
+ * M2P channels.
+ *
+ * Note that these values are also directly used for setting the PPALLOC
+ * register.
+ */
+#define EP93XX_DMA_I2S1 0
+#define EP93XX_DMA_I2S2 1
+#define EP93XX_DMA_AAC1 2
+#define EP93XX_DMA_AAC2 3
+#define EP93XX_DMA_AAC3 4
+#define EP93XX_DMA_I2S3 5
+#define EP93XX_DMA_UART1 6
+#define EP93XX_DMA_UART2 7
+#define EP93XX_DMA_UART3 8
+#define EP93XX_DMA_IRDA 9
+/* M2M channels */
+#define EP93XX_DMA_SSP 10
+#define EP93XX_DMA_IDE 11
+
+/**
+ * struct ep93xx_dma_data - configuration data for the EP93xx dmaengine
+ * @port: peripheral which is requesting the channel
+ * @direction: TX/RX channel
+ * @name: optional name for the channel, this is displayed in /proc/interrupts
+ *
+ * This information is passed as private channel parameter in a filter
+ * function. Note that this is only needed for slave/cyclic channels. For
+ * memcpy channels %NULL data should be passed.
+ */
+struct ep93xx_dma_data {
+ int port;
+ enum dma_transfer_direction direction;
+ const char *name;
+};
+
+/**
+ * struct ep93xx_dma_chan_data - platform specific data for a DMA channel
+ * @name: name of the channel, used for getting the right clock for the channel
+ * @base: mapped registers
+ * @irq: interrupt number used by this channel
+ */
+struct ep93xx_dma_chan_data {
+ const char *name;
+ void __iomem *base;
+ int irq;
+};
+
+/**
+ * struct ep93xx_dma_platform_data - platform data for the dmaengine driver
+ * @channels: array of channels which are passed to the driver
+ * @num_channels: number of channels in the array
+ *
+ * This structure is passed to the DMA engine driver via platform data. For
+ * M2P channels, contract is that even channels are for TX and odd for RX.
+ * There is no requirement for the M2M channels.
+ */
+struct ep93xx_dma_platform_data {
+ struct ep93xx_dma_chan_data *channels;
+ size_t num_channels;
+};
+
+static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
+{
+ return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p");
+}
+
+/**
+ * ep93xx_dma_chan_direction - returns direction the channel can be used
+ * @chan: channel
+ *
+ * This function can be used in filter functions to find out whether the
+ * channel supports given DMA direction. Only M2P channels have such
+ * limitation, for M2M channels the direction is configurable.
+ */
+static inline enum dma_transfer_direction
+ep93xx_dma_chan_direction(struct dma_chan *chan)
+{
+ if (!ep93xx_dma_chan_is_m2p(chan))
+ return DMA_NONE;
+
+ /* even channels are for TX, odd for RX */
+ return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+}
+
+#endif /* __ASM_ARCH_DMA_H */
diff --git a/include/linux/platform_data/dma-hsu.h b/include/linux/platform_data/dma-hsu.h
new file mode 100644
index 000000000..8a1f6a492
--- /dev/null
+++ b/include/linux/platform_data/dma-hsu.h
@@ -0,0 +1,25 @@
+/*
+ * Driver for the High Speed UART DMA
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _PLATFORM_DATA_DMA_HSU_H
+#define _PLATFORM_DATA_DMA_HSU_H
+
+#include <linux/device.h>
+
+struct hsu_dma_slave {
+ struct device *dma_dev;
+ int chan_id;
+};
+
+struct hsu_dma_platform_data {
+ unsigned short nr_channels;
+};
+
+#endif /* _PLATFORM_DATA_DMA_HSU_H */
diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h
new file mode 100644
index 000000000..2d0881672
--- /dev/null
+++ b/include/linux/platform_data/dma-imx-sdma.h
@@ -0,0 +1,67 @@
+#ifndef __MACH_MXC_SDMA_H__
+#define __MACH_MXC_SDMA_H__
+
+/**
+ * struct sdma_script_start_addrs - SDMA script start pointers
+ *
+ * start addresses of the different functions in the physical
+ * address space of the SDMA engine.
+ */
+struct sdma_script_start_addrs {
+ s32 ap_2_ap_addr;
+ s32 ap_2_bp_addr;
+ s32 ap_2_ap_fixed_addr;
+ s32 bp_2_ap_addr;
+ s32 loopback_on_dsp_side_addr;
+ s32 mcu_interrupt_only_addr;
+ s32 firi_2_per_addr;
+ s32 firi_2_mcu_addr;
+ s32 per_2_firi_addr;
+ s32 mcu_2_firi_addr;
+ s32 uart_2_per_addr;
+ s32 uart_2_mcu_addr;
+ s32 per_2_app_addr;
+ s32 mcu_2_app_addr;
+ s32 per_2_per_addr;
+ s32 uartsh_2_per_addr;
+ s32 uartsh_2_mcu_addr;
+ s32 per_2_shp_addr;
+ s32 mcu_2_shp_addr;
+ s32 ata_2_mcu_addr;
+ s32 mcu_2_ata_addr;
+ s32 app_2_per_addr;
+ s32 app_2_mcu_addr;
+ s32 shp_2_per_addr;
+ s32 shp_2_mcu_addr;
+ s32 mshc_2_mcu_addr;
+ s32 mcu_2_mshc_addr;
+ s32 spdif_2_mcu_addr;
+ s32 mcu_2_spdif_addr;
+ s32 asrc_2_mcu_addr;
+ s32 ext_mem_2_ipu_addr;
+ s32 descrambler_addr;
+ s32 dptc_dvfs_addr;
+ s32 utra_addr;
+ s32 ram_code_start_addr;
+ /* End of v1 array */
+ s32 mcu_2_ssish_addr;
+ s32 ssish_2_mcu_addr;
+ s32 hdmi_dma_addr;
+ /* End of v2 array */
+ s32 zcanfd_2_mcu_addr;
+ s32 zqspi_2_mcu_addr;
+ /* End of v3 array */
+};
+
+/**
+ * struct sdma_platform_data - platform specific data for SDMA engine
+ *
+ * @fw_name The firmware name
+ * @script_addrs SDMA scripts addresses in SDMA ROM
+ */
+struct sdma_platform_data {
+ char *fw_name;
+ struct sdma_script_start_addrs *script_addrs;
+};
+
+#endif /* __MACH_MXC_SDMA_H__ */
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h
new file mode 100644
index 000000000..7d964e787
--- /dev/null
+++ b/include/linux/platform_data/dma-imx.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARCH_MXC_DMA_H__
+#define __ASM_ARCH_MXC_DMA_H__
+
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+
+/*
+ * This enumerates peripheral types. Used for SDMA.
+ */
+enum sdma_peripheral_type {
+ IMX_DMATYPE_SSI, /* MCU domain SSI */
+ IMX_DMATYPE_SSI_SP, /* Shared SSI */
+ IMX_DMATYPE_MMC, /* MMC */
+ IMX_DMATYPE_SDHC, /* SDHC */
+ IMX_DMATYPE_UART, /* MCU domain UART */
+ IMX_DMATYPE_UART_SP, /* Shared UART */
+ IMX_DMATYPE_FIRI, /* FIRI */
+ IMX_DMATYPE_CSPI, /* MCU domain CSPI */
+ IMX_DMATYPE_CSPI_SP, /* Shared CSPI */
+ IMX_DMATYPE_SIM, /* SIM */
+ IMX_DMATYPE_ATA, /* ATA */
+ IMX_DMATYPE_CCM, /* CCM */
+ IMX_DMATYPE_EXT, /* External peripheral */
+ IMX_DMATYPE_MSHC, /* Memory Stick Host Controller */
+ IMX_DMATYPE_MSHC_SP, /* Shared Memory Stick Host Controller */
+ IMX_DMATYPE_DSP, /* DSP */
+ IMX_DMATYPE_MEMORY, /* Memory */
+ IMX_DMATYPE_FIFO_MEMORY,/* FIFO type Memory */
+ IMX_DMATYPE_SPDIF, /* SPDIF */
+ IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */
+ IMX_DMATYPE_ASRC, /* ASRC */
+ IMX_DMATYPE_ESAI, /* ESAI */
+ IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
+ IMX_DMATYPE_ASRC_SP, /* Shared ASRC */
+ IMX_DMATYPE_SAI, /* SAI */
+};
+
+enum imx_dma_prio {
+ DMA_PRIO_HIGH = 0,
+ DMA_PRIO_MEDIUM = 1,
+ DMA_PRIO_LOW = 2
+};
+
+struct imx_dma_data {
+ int dma_request; /* DMA request line */
+ int dma_request2; /* secondary DMA request line */
+ enum sdma_peripheral_type peripheral_type;
+ int priority;
+};
+
+static inline int imx_dma_is_ipu(struct dma_chan *chan)
+{
+ return !strcmp(dev_name(chan->device->dev), "ipu-core");
+}
+
+static inline int imx_dma_is_general_purpose(struct dma_chan *chan)
+{
+ return !strcmp(chan->device->dev->driver->name, "imx-sdma") ||
+ !strcmp(chan->device->dev->driver->name, "imx-dma");
+}
+
+#endif
diff --git a/include/linux/platform_data/dma-mmp_tdma.h b/include/linux/platform_data/dma-mmp_tdma.h
new file mode 100644
index 000000000..0c7288603
--- /dev/null
+++ b/include/linux/platform_data/dma-mmp_tdma.h
@@ -0,0 +1,40 @@
+/*
+ * SRAM Memory Management
+ *
+ * Copyright (c) 2011 Marvell Semiconductors Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DMA_MMP_TDMA_H
+#define __DMA_MMP_TDMA_H
+
+#include <linux/genalloc.h>
+
+/* ARBITRARY: SRAM allocations are multiples of this 2^N size */
+#define SRAM_GRANULARITY 512
+
+enum sram_type {
+ MMP_SRAM_UNDEFINED = 0,
+ MMP_ASRAM,
+ MMP_ISRAM,
+};
+
+struct sram_platdata {
+ char *pool_name;
+ int granularity;
+};
+
+#ifdef CONFIG_ARM
+extern struct gen_pool *sram_get_gpool(char *pool_name);
+#else
+static inline struct gen_pool *sram_get_gpool(char *pool_name)
+{
+ return NULL;
+}
+#endif
+
+#endif /* __DMA_MMP_TDMA_H */
diff --git a/include/linux/platform_data/dma-mv_xor.h b/include/linux/platform_data/dma-mv_xor.h
new file mode 100644
index 000000000..92ffd3245
--- /dev/null
+++ b/include/linux/platform_data/dma-mv_xor.h
@@ -0,0 +1,21 @@
+/*
+ * Marvell XOR platform device data definition file.
+ */
+
+#ifndef __DMA_MV_XOR_H
+#define __DMA_MV_XOR_H
+
+#include <linux/dmaengine.h>
+#include <linux/mbus.h>
+
+#define MV_XOR_NAME "mv_xor"
+
+struct mv_xor_channel_data {
+ dma_cap_mask_t cap_mask;
+};
+
+struct mv_xor_platform_data {
+ struct mv_xor_channel_data *channels;
+};
+
+#endif
diff --git a/include/linux/platform_data/dma-rcar-audmapp.h b/include/linux/platform_data/dma-rcar-audmapp.h
new file mode 100644
index 000000000..471fffebb
--- /dev/null
+++ b/include/linux/platform_data/dma-rcar-audmapp.h
@@ -0,0 +1,34 @@
+/*
+ * This is for Renesas R-Car Audio-DMAC-peri-peri.
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This file is based on the include/linux/sh_dma.h
+ *
+ * Header for the new SH dmaengine driver
+ *
+ * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef SH_AUDMAPP_H
+#define SH_AUDMAPP_H
+
+#include <linux/dmaengine.h>
+
+struct audmapp_slave_config {
+ int slave_id;
+ dma_addr_t src;
+ dma_addr_t dst;
+ u32 chcr;
+};
+
+struct audmapp_pdata {
+ struct audmapp_slave_config *slave;
+ int slave_num;
+};
+
+#endif /* SH_AUDMAPP_H */
diff --git a/include/linux/platform_data/dma-rcar-hpbdma.h b/include/linux/platform_data/dma-rcar-hpbdma.h
new file mode 100644
index 000000000..648b8ea61
--- /dev/null
+++ b/include/linux/platform_data/dma-rcar-hpbdma.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2011-2013 Renesas Electronics Corporation
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __DMA_RCAR_HPBDMA_H
+#define __DMA_RCAR_HPBDMA_H
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+/* Transmit sizes and respective register values */
+enum {
+ XMIT_SZ_8BIT = 0,
+ XMIT_SZ_16BIT = 1,
+ XMIT_SZ_32BIT = 2,
+ XMIT_SZ_MAX
+};
+
+/* DMA control register (DCR) bits */
+#define HPB_DMAE_DCR_DTAMD (1u << 26)
+#define HPB_DMAE_DCR_DTAC (1u << 25)
+#define HPB_DMAE_DCR_DTAU (1u << 24)
+#define HPB_DMAE_DCR_DTAU1 (1u << 23)
+#define HPB_DMAE_DCR_SWMD (1u << 22)
+#define HPB_DMAE_DCR_BTMD (1u << 21)
+#define HPB_DMAE_DCR_PKMD (1u << 20)
+#define HPB_DMAE_DCR_CT (1u << 18)
+#define HPB_DMAE_DCR_ACMD (1u << 17)
+#define HPB_DMAE_DCR_DIP (1u << 16)
+#define HPB_DMAE_DCR_SMDL (1u << 13)
+#define HPB_DMAE_DCR_SPDAM (1u << 12)
+#define HPB_DMAE_DCR_SDRMD_MASK (3u << 10)
+#define HPB_DMAE_DCR_SDRMD_MOD (0u << 10)
+#define HPB_DMAE_DCR_SDRMD_AUTO (1u << 10)
+#define HPB_DMAE_DCR_SDRMD_TIMER (2u << 10)
+#define HPB_DMAE_DCR_SPDS_MASK (3u << 8)
+#define HPB_DMAE_DCR_SPDS_8BIT (0u << 8)
+#define HPB_DMAE_DCR_SPDS_16BIT (1u << 8)
+#define HPB_DMAE_DCR_SPDS_32BIT (2u << 8)
+#define HPB_DMAE_DCR_DMDL (1u << 5)
+#define HPB_DMAE_DCR_DPDAM (1u << 4)
+#define HPB_DMAE_DCR_DDRMD_MASK (3u << 2)
+#define HPB_DMAE_DCR_DDRMD_MOD (0u << 2)
+#define HPB_DMAE_DCR_DDRMD_AUTO (1u << 2)
+#define HPB_DMAE_DCR_DDRMD_TIMER (2u << 2)
+#define HPB_DMAE_DCR_DPDS_MASK (3u << 0)
+#define HPB_DMAE_DCR_DPDS_8BIT (0u << 0)
+#define HPB_DMAE_DCR_DPDS_16BIT (1u << 0)
+#define HPB_DMAE_DCR_DPDS_32BIT (2u << 0)
+
+/* Asynchronous reset register (ASYNCRSTR) bits */
+#define HPB_DMAE_ASYNCRSTR_ASRST41 BIT(10)
+#define HPB_DMAE_ASYNCRSTR_ASRST40 BIT(9)
+#define HPB_DMAE_ASYNCRSTR_ASRST39 BIT(8)
+#define HPB_DMAE_ASYNCRSTR_ASRST27 BIT(7)
+#define HPB_DMAE_ASYNCRSTR_ASRST26 BIT(6)
+#define HPB_DMAE_ASYNCRSTR_ASRST25 BIT(5)
+#define HPB_DMAE_ASYNCRSTR_ASRST24 BIT(4)
+#define HPB_DMAE_ASYNCRSTR_ASRST23 BIT(3)
+#define HPB_DMAE_ASYNCRSTR_ASRST22 BIT(2)
+#define HPB_DMAE_ASYNCRSTR_ASRST21 BIT(1)
+#define HPB_DMAE_ASYNCRSTR_ASRST20 BIT(0)
+
+struct hpb_dmae_slave_config {
+ unsigned int id;
+ dma_addr_t addr;
+ u32 dcr;
+ u32 port;
+ u32 rstr;
+ u32 mdr;
+ u32 mdm;
+ u32 flags;
+#define HPB_DMAE_SET_ASYNC_RESET BIT(0)
+#define HPB_DMAE_SET_ASYNC_MODE BIT(1)
+ u32 dma_ch;
+};
+
+#define HPB_DMAE_CHANNEL(_irq, _s_id) \
+{ \
+ .ch_irq = _irq, \
+ .s_id = _s_id, \
+}
+
+struct hpb_dmae_channel {
+ unsigned int ch_irq;
+ unsigned int s_id;
+};
+
+struct hpb_dmae_pdata {
+ const struct hpb_dmae_slave_config *slaves;
+ int num_slaves;
+ const struct hpb_dmae_channel *channels;
+ int num_channels;
+ const unsigned int ts_shift[XMIT_SZ_MAX];
+ int num_hw_channels;
+};
+
+#endif
diff --git a/include/linux/platform_data/dma-s3c24xx.h b/include/linux/platform_data/dma-s3c24xx.h
new file mode 100644
index 000000000..89ba1b0c9
--- /dev/null
+++ b/include/linux/platform_data/dma-s3c24xx.h
@@ -0,0 +1,46 @@
+/*
+ * S3C24XX DMA handling
+ *
+ * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/* Helper to encode the source selection constraints for early s3c socs. */
+#define S3C24XX_DMA_CHANREQ(src, chan) ((BIT(3) | src) << chan * 4)
+
+enum s3c24xx_dma_bus {
+ S3C24XX_DMA_APB,
+ S3C24XX_DMA_AHB,
+};
+
+/**
+ * @bus: on which bus does the peripheral reside - AHB or APB.
+ * @handshake: is a handshake with the peripheral necessary
+ * @chansel: channel selection information, depending on variant; reqsel for
+ * s3c2443 and later and channel-selection map for earlier SoCs
+ * see CHANSEL doc in s3c2443-dma.c
+ */
+struct s3c24xx_dma_channel {
+ enum s3c24xx_dma_bus bus;
+ bool handshake;
+ u16 chansel;
+};
+
+/**
+ * struct s3c24xx_dma_platdata - platform specific settings
+ * @num_phy_channels: number of physical channels
+ * @channels: array of virtual channel descriptions
+ * @num_channels: number of virtual channels
+ */
+struct s3c24xx_dma_platdata {
+ int num_phy_channels;
+ struct s3c24xx_dma_channel *channels;
+ int num_channels;
+};
+
+struct dma_chan;
+bool s3c24xx_dma_filter(struct dma_chan *chan, void *param);
diff --git a/include/linux/platform_data/dma-ste-dma40.h b/include/linux/platform_data/dma-ste-dma40.h
new file mode 100644
index 000000000..1bb9b1852
--- /dev/null
+++ b/include/linux/platform_data/dma-ste-dma40.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2007-2010
+ * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+
+#ifndef STE_DMA40_H
+#define STE_DMA40_H
+
+#include <linux/dmaengine.h>
+#include <linux/scatterlist.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+
+/*
+ * Maxium size for a single dma descriptor
+ * Size is limited to 16 bits.
+ * Size is in the units of addr-widths (1,2,4,8 bytes)
+ * Larger transfers will be split up to multiple linked desc
+ */
+#define STEDMA40_MAX_SEG_SIZE 0xFFFF
+
+/* dev types for memcpy */
+#define STEDMA40_DEV_DST_MEMORY (-1)
+#define STEDMA40_DEV_SRC_MEMORY (-1)
+
+enum stedma40_mode {
+ STEDMA40_MODE_LOGICAL = 0,
+ STEDMA40_MODE_PHYSICAL,
+ STEDMA40_MODE_OPERATION,
+};
+
+enum stedma40_mode_opt {
+ STEDMA40_PCHAN_BASIC_MODE = 0,
+ STEDMA40_LCHAN_SRC_LOG_DST_LOG = 0,
+ STEDMA40_PCHAN_MODULO_MODE,
+ STEDMA40_PCHAN_DOUBLE_DST_MODE,
+ STEDMA40_LCHAN_SRC_PHY_DST_LOG,
+ STEDMA40_LCHAN_SRC_LOG_DST_PHY,
+};
+
+#define STEDMA40_ESIZE_8_BIT 0x0
+#define STEDMA40_ESIZE_16_BIT 0x1
+#define STEDMA40_ESIZE_32_BIT 0x2
+#define STEDMA40_ESIZE_64_BIT 0x3
+
+/* The value 4 indicates that PEN-reg shall be set to 0 */
+#define STEDMA40_PSIZE_PHY_1 0x4
+#define STEDMA40_PSIZE_PHY_2 0x0
+#define STEDMA40_PSIZE_PHY_4 0x1
+#define STEDMA40_PSIZE_PHY_8 0x2
+#define STEDMA40_PSIZE_PHY_16 0x3
+
+/*
+ * The number of elements differ in logical and
+ * physical mode
+ */
+#define STEDMA40_PSIZE_LOG_1 STEDMA40_PSIZE_PHY_2
+#define STEDMA40_PSIZE_LOG_4 STEDMA40_PSIZE_PHY_4
+#define STEDMA40_PSIZE_LOG_8 STEDMA40_PSIZE_PHY_8
+#define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16
+
+/* Maximum number of possible physical channels */
+#define STEDMA40_MAX_PHYS 32
+
+enum stedma40_flow_ctrl {
+ STEDMA40_NO_FLOW_CTRL,
+ STEDMA40_FLOW_CTRL,
+};
+
+/**
+ * struct stedma40_half_channel_info - dst/src channel configuration
+ *
+ * @big_endian: true if the src/dst should be read as big endian
+ * @data_width: Data width of the src/dst hardware
+ * @p_size: Burst size
+ * @flow_ctrl: Flow control on/off.
+ */
+struct stedma40_half_channel_info {
+ bool big_endian;
+ enum dma_slave_buswidth data_width;
+ int psize;
+ enum stedma40_flow_ctrl flow_ctrl;
+};
+
+/**
+ * struct stedma40_chan_cfg - Structure to be filled by client drivers.
+ *
+ * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH
+ * @high_priority: true if high-priority
+ * @realtime: true if realtime mode is to be enabled. Only available on DMA40
+ * version 3+, i.e DB8500v2+
+ * @mode: channel mode: physical, logical, or operation
+ * @mode_opt: options for the chosen channel mode
+ * @dev_type: src/dst device type (driver uses dir to figure out which)
+ * @src_info: Parameters for dst half channel
+ * @dst_info: Parameters for dst half channel
+ * @use_fixed_channel: if true, use physical channel specified by phy_channel
+ * @phy_channel: physical channel to use, only if use_fixed_channel is true
+ *
+ * This structure has to be filled by the client drivers.
+ * It is recommended to do all dma configurations for clients in the machine.
+ *
+ */
+struct stedma40_chan_cfg {
+ enum dma_transfer_direction dir;
+ bool high_priority;
+ bool realtime;
+ enum stedma40_mode mode;
+ enum stedma40_mode_opt mode_opt;
+ int dev_type;
+ struct stedma40_half_channel_info src_info;
+ struct stedma40_half_channel_info dst_info;
+
+ bool use_fixed_channel;
+ int phy_channel;
+};
+
+/**
+ * struct stedma40_platform_data - Configuration struct for the dma device.
+ *
+ * @dev_tx: mapping between destination event line and io address
+ * @dev_rx: mapping between source event line and io address
+ * @disabled_channels: A vector, ending with -1, that marks physical channels
+ * that are for different reasons not available for the driver.
+ * @soft_lli_chans: A vector, that marks physical channels will use LLI by SW
+ * which avoids HW bug that exists in some versions of the controller.
+ * SoftLLI introduces relink overhead that could impact performace for
+ * certain use cases.
+ * @num_of_soft_lli_chans: The number of channels that needs to be configured
+ * to use SoftLLI.
+ * @use_esram_lcla: flag for mapping the lcla into esram region
+ * @num_of_memcpy_chans: The number of channels reserved for memcpy.
+ * @num_of_phy_chans: The number of physical channels implemented in HW.
+ * 0 means reading the number of channels from DMA HW but this is only valid
+ * for 'multiple of 4' channels, like 8.
+ */
+struct stedma40_platform_data {
+ int disabled_channels[STEDMA40_MAX_PHYS];
+ int *soft_lli_chans;
+ int num_of_soft_lli_chans;
+ bool use_esram_lcla;
+ int num_of_memcpy_chans;
+ int num_of_phy_chans;
+};
+
+#ifdef CONFIG_STE_DMA40
+
+/**
+ * stedma40_filter() - Provides stedma40_chan_cfg to the
+ * ste_dma40 dma driver via the dmaengine framework.
+ * does some checking of what's provided.
+ *
+ * Never directly called by client. It used by dmaengine.
+ * @chan: dmaengine handle.
+ * @data: Must be of type: struct stedma40_chan_cfg and is
+ * the configuration of the framework.
+ *
+ *
+ */
+
+bool stedma40_filter(struct dma_chan *chan, void *data);
+
+/**
+ * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave
+ * (=device)
+ *
+ * @chan: dmaengine handle
+ * @addr: source or destination physicall address.
+ * @size: bytes to transfer
+ * @direction: direction of transfer
+ * @flags: is actually enum dma_ctrl_flags. See dmaengine.h
+ */
+
+static inline struct
+dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
+ dma_addr_t addr,
+ unsigned int size,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct scatterlist sg;
+ sg_init_table(&sg, 1);
+ sg.dma_address = addr;
+ sg.length = size;
+
+ return dmaengine_prep_slave_sg(chan, &sg, 1, direction, flags);
+}
+
+#else
+static inline bool stedma40_filter(struct dma_chan *chan, void *data)
+{
+ return false;
+}
+
+static inline struct
+dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
+ dma_addr_t addr,
+ unsigned int size,
+ enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/include/linux/platform_data/dmtimer-omap.h b/include/linux/platform_data/dmtimer-omap.h
new file mode 100644
index 000000000..a19b78d82
--- /dev/null
+++ b/include/linux/platform_data/dmtimer-omap.h
@@ -0,0 +1,31 @@
+/*
+ * DMTIMER platform data for TI OMAP platforms
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Jon Hunter <jon-hunter@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __PLATFORM_DATA_DMTIMER_OMAP_H__
+#define __PLATFORM_DATA_DMTIMER_OMAP_H__
+
+struct dmtimer_platform_data {
+ /* set_timer_src - Only used for OMAP1 devices */
+ int (*set_timer_src)(struct platform_device *pdev, int source);
+ u32 timer_capability;
+ u32 timer_errata;
+ int (*get_context_loss_count)(struct device *);
+};
+
+#endif /* __PLATFORM_DATA_DMTIMER_OMAP_H__ */
diff --git a/include/linux/platform_data/drv260x-pdata.h b/include/linux/platform_data/drv260x-pdata.h
new file mode 100644
index 000000000..0a03b0944
--- /dev/null
+++ b/include/linux/platform_data/drv260x-pdata.h
@@ -0,0 +1,28 @@
+/*
+ * Platform data for DRV260X haptics driver family
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ *
+ * Copyright: (C) 2014 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _LINUX_DRV260X_PDATA_H
+#define _LINUX_DRV260X_PDATA_H
+
+struct drv260x_platform_data {
+ u32 library_selection;
+ u32 mode;
+ u32 vib_rated_voltage;
+ u32 vib_overdrive_voltage;
+};
+
+#endif
diff --git a/include/linux/platform_data/dwc3-omap.h b/include/linux/platform_data/dwc3-omap.h
new file mode 100644
index 000000000..1d36ca874
--- /dev/null
+++ b/include/linux/platform_data/dwc3-omap.h
@@ -0,0 +1,43 @@
+/**
+ * dwc3-omap.h - OMAP Specific Glue layer, header.
+ *
+ * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
+ * All rights reserved.
+ *
+ * Author: Felipe Balbi <balbi@ti.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+enum dwc3_omap_utmi_mode {
+ DWC3_OMAP_UTMI_MODE_UNKNOWN = 0,
+ DWC3_OMAP_UTMI_MODE_HW,
+ DWC3_OMAP_UTMI_MODE_SW,
+};
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h
new file mode 100644
index 000000000..bdb2710e2
--- /dev/null
+++ b/include/linux/platform_data/edma.h
@@ -0,0 +1,179 @@
+/*
+ * TI EDMA definitions
+ *
+ * Copyright (C) 2006-2013 Texas Instruments.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/*
+ * This EDMA3 programming framework exposes two basic kinds of resource:
+ *
+ * Channel Triggers transfers, usually from a hardware event but
+ * also manually or by "chaining" from DMA completions.
+ * Each channel is coupled to a Parameter RAM (PaRAM) slot.
+ *
+ * Slot Each PaRAM slot holds a DMA transfer descriptor (PaRAM
+ * "set"), source and destination addresses, a link to a
+ * next PaRAM slot (if any), options for the transfer, and
+ * instructions for updating those addresses. There are
+ * more than twice as many slots as event channels.
+ *
+ * Each PaRAM set describes a sequence of transfers, either for one large
+ * buffer or for several discontiguous smaller buffers. An EDMA transfer
+ * is driven only from a channel, which performs the transfers specified
+ * in its PaRAM slot until there are no more transfers. When that last
+ * transfer completes, the "link" field may be used to reload the channel's
+ * PaRAM slot with a new transfer descriptor.
+ *
+ * The EDMA Channel Controller (CC) maps requests from channels into physical
+ * Transfer Controller (TC) requests when the channel triggers (by hardware
+ * or software events, or by chaining). The two physical DMA channels provided
+ * by the TCs are thus shared by many logical channels.
+ *
+ * DaVinci hardware also has a "QDMA" mechanism which is not currently
+ * supported through this interface. (DSP firmware uses it though.)
+ */
+
+#ifndef EDMA_H_
+#define EDMA_H_
+
+/* PaRAM slots are laid out like this */
+struct edmacc_param {
+ u32 opt;
+ u32 src;
+ u32 a_b_cnt;
+ u32 dst;
+ u32 src_dst_bidx;
+ u32 link_bcntrld;
+ u32 src_dst_cidx;
+ u32 ccnt;
+} __packed;
+
+/* fields in edmacc_param.opt */
+#define SAM BIT(0)
+#define DAM BIT(1)
+#define SYNCDIM BIT(2)
+#define STATIC BIT(3)
+#define EDMA_FWID (0x07 << 8)
+#define TCCMODE BIT(11)
+#define EDMA_TCC(t) ((t) << 12)
+#define TCINTEN BIT(20)
+#define ITCINTEN BIT(21)
+#define TCCHEN BIT(22)
+#define ITCCHEN BIT(23)
+
+/*ch_status paramater of callback function possible values*/
+#define EDMA_DMA_COMPLETE 1
+#define EDMA_DMA_CC_ERROR 2
+#define EDMA_DMA_TC1_ERROR 3
+#define EDMA_DMA_TC2_ERROR 4
+
+enum address_mode {
+ INCR = 0,
+ FIFO = 1
+};
+
+enum fifo_width {
+ W8BIT = 0,
+ W16BIT = 1,
+ W32BIT = 2,
+ W64BIT = 3,
+ W128BIT = 4,
+ W256BIT = 5
+};
+
+enum dma_event_q {
+ EVENTQ_0 = 0,
+ EVENTQ_1 = 1,
+ EVENTQ_2 = 2,
+ EVENTQ_3 = 3,
+ EVENTQ_DEFAULT = -1
+};
+
+enum sync_dimension {
+ ASYNC = 0,
+ ABSYNC = 1
+};
+
+#define EDMA_CTLR_CHAN(ctlr, chan) (((ctlr) << 16) | (chan))
+#define EDMA_CTLR(i) ((i) >> 16)
+#define EDMA_CHAN_SLOT(i) ((i) & 0xffff)
+
+#define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */
+#define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */
+#define EDMA_CONT_PARAMS_ANY 1001
+#define EDMA_CONT_PARAMS_FIXED_EXACT 1002
+#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
+
+#define EDMA_MAX_CC 2
+
+/* alloc/free DMA channels and their dedicated parameter RAM slots */
+int edma_alloc_channel(int channel,
+ void (*callback)(unsigned channel, u16 ch_status, void *data),
+ void *data, enum dma_event_q);
+void edma_free_channel(unsigned channel);
+
+/* alloc/free parameter RAM slots */
+int edma_alloc_slot(unsigned ctlr, int slot);
+void edma_free_slot(unsigned slot);
+
+/* alloc/free a set of contiguous parameter RAM slots */
+int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count);
+int edma_free_cont_slots(unsigned slot, int count);
+
+/* calls that operate on part of a parameter RAM slot */
+void edma_set_src(unsigned slot, dma_addr_t src_port,
+ enum address_mode mode, enum fifo_width);
+void edma_set_dest(unsigned slot, dma_addr_t dest_port,
+ enum address_mode mode, enum fifo_width);
+dma_addr_t edma_get_position(unsigned slot, bool dst);
+void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx);
+void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx);
+void edma_set_transfer_params(unsigned slot, u16 acnt, u16 bcnt, u16 ccnt,
+ u16 bcnt_rld, enum sync_dimension sync_mode);
+void edma_link(unsigned from, unsigned to);
+void edma_unlink(unsigned from);
+
+/* calls that operate on an entire parameter RAM slot */
+void edma_write_slot(unsigned slot, const struct edmacc_param *params);
+void edma_read_slot(unsigned slot, struct edmacc_param *params);
+
+/* channel control operations */
+int edma_start(unsigned channel);
+void edma_stop(unsigned channel);
+void edma_clean_channel(unsigned channel);
+void edma_clear_event(unsigned channel);
+void edma_pause(unsigned channel);
+void edma_resume(unsigned channel);
+
+void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no);
+
+struct edma_rsv_info {
+
+ const s16 (*rsv_chans)[2];
+ const s16 (*rsv_slots)[2];
+};
+
+/* platform_data for EDMA driver */
+struct edma_soc_info {
+ /*
+ * Default queue is expected to be a low-priority queue.
+ * This way, long transfers on the default queue started
+ * by the codec engine will not cause audio defects.
+ */
+ enum dma_event_q default_queue;
+
+ /* Resource reservation for other cores */
+ struct edma_rsv_info *rsv;
+
+ s8 (*queue_priority_mapping)[2];
+ const s16 (*xbar_chans)[2];
+};
+
+int edma_trigger_channel(unsigned);
+
+#endif
diff --git a/include/linux/platform_data/efm32-spi.h b/include/linux/platform_data/efm32-spi.h
new file mode 100644
index 000000000..31b19ca1d
--- /dev/null
+++ b/include/linux/platform_data/efm32-spi.h
@@ -0,0 +1,14 @@
+#ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__
+#define __LINUX_PLATFORM_DATA_EFM32_SPI_H__
+
+#include <linux/types.h>
+
+/**
+ * struct efm32_spi_pdata
+ * @location: pinmux location for the I/O pins (to be written to the ROUTE
+ * register)
+ */
+struct efm32_spi_pdata {
+ u8 location;
+};
+#endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__ */
diff --git a/include/linux/platform_data/efm32-uart.h b/include/linux/platform_data/efm32-uart.h
new file mode 100644
index 000000000..ed0e975b3
--- /dev/null
+++ b/include/linux/platform_data/efm32-uart.h
@@ -0,0 +1,18 @@
+/*
+ *
+ *
+ */
+#ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__
+#define __LINUX_PLATFORM_DATA_EFM32_UART_H__
+
+#include <linux/types.h>
+
+/**
+ * struct efm32_uart_pdata
+ * @location: pinmux location for the I/O pins (to be written to the ROUTE
+ * register)
+ */
+struct efm32_uart_pdata {
+ u8 location;
+};
+#endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_UART_H__ */
diff --git a/include/linux/platform_data/ehci-sh.h b/include/linux/platform_data/ehci-sh.h
new file mode 100644
index 000000000..5c15a738e
--- /dev/null
+++ b/include/linux/platform_data/ehci-sh.h
@@ -0,0 +1,28 @@
+/*
+ * EHCI SuperH driver platform data
+ *
+ * Copyright (C) 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com>
+ * Copyright (C) 2012 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __USB_EHCI_SH_H
+#define __USB_EHCI_SH_H
+
+struct ehci_sh_platdata {
+ void (*phy_init)(void); /* Phy init function */
+};
+
+#endif /* __USB_EHCI_SH_H */
diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h
new file mode 100644
index 000000000..b8686c00f
--- /dev/null
+++ b/include/linux/platform_data/elm.h
@@ -0,0 +1,65 @@
+/*
+ * BCH Error Location Module
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ELM_H
+#define __ELM_H
+
+enum bch_ecc {
+ BCH4_ECC = 0,
+ BCH8_ECC,
+ BCH16_ECC,
+};
+
+/* ELM support 8 error syndrome process */
+#define ERROR_VECTOR_MAX 8
+
+/**
+ * struct elm_errorvec - error vector for elm
+ * @error_reported: set true for vectors error is reported
+ * @error_uncorrectable: number of uncorrectable errors
+ * @error_count: number of correctable errors in the sector
+ * @error_loc: buffer for error location
+ *
+ */
+struct elm_errorvec {
+ bool error_reported;
+ bool error_uncorrectable;
+ int error_count;
+ int error_loc[16];
+};
+
+#if IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH)
+void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
+ struct elm_errorvec *err_vec);
+int elm_config(struct device *dev, enum bch_ecc bch_type,
+ int ecc_steps, int ecc_step_size, int ecc_syndrome_size);
+#else
+static inline void
+elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
+ struct elm_errorvec *err_vec)
+{
+}
+
+static inline int elm_config(struct device *dev, enum bch_ecc bch_type,
+ int ecc_steps, int ecc_step_size,
+ int ecc_syndrome_size)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_MTD_NAND_ECC_BCH */
+
+#endif /* __ELM_H */
diff --git a/include/linux/platform_data/emif_plat.h b/include/linux/platform_data/emif_plat.h
new file mode 100644
index 000000000..5c19a2a64
--- /dev/null
+++ b/include/linux/platform_data/emif_plat.h
@@ -0,0 +1,129 @@
+/*
+ * Definitions for TI EMIF device platform data
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * Aneesh V <aneesh@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __EMIF_PLAT_H
+#define __EMIF_PLAT_H
+
+/* Low power modes - EMIF_PWR_MGMT_CTRL */
+#define EMIF_LP_MODE_DISABLE 0
+#define EMIF_LP_MODE_CLOCK_STOP 1
+#define EMIF_LP_MODE_SELF_REFRESH 2
+#define EMIF_LP_MODE_PWR_DN 4
+
+/* Hardware capabilities */
+#define EMIF_HW_CAPS_LL_INTERFACE 0x00000001
+
+/*
+ * EMIF IP Revisions
+ * EMIF4D - Used in OMAP4
+ * EMIF4D5 - Used in OMAP5
+ */
+#define EMIF_4D 1
+#define EMIF_4D5 2
+
+/*
+ * PHY types
+ * ATTILAPHY - Used in OMAP4
+ * INTELLIPHY - Used in OMAP5
+ */
+#define EMIF_PHY_TYPE_ATTILAPHY 1
+#define EMIF_PHY_TYPE_INTELLIPHY 2
+
+/* Custom config requests */
+#define EMIF_CUSTOM_CONFIG_LPMODE 0x00000001
+#define EMIF_CUSTOM_CONFIG_TEMP_ALERT_POLL_INTERVAL 0x00000002
+#define EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART 0x00000004
+
+#ifndef __ASSEMBLY__
+/**
+ * struct ddr_device_info - All information about the DDR device except AC
+ * timing parameters
+ * @type: Device type (LPDDR2-S4, LPDDR2-S2 etc)
+ * @density: Device density
+ * @io_width: Bus width
+ * @cs1_used: Whether there is a DDR device attached to the second
+ * chip-select(CS1) of this EMIF instance
+ * @cal_resistors_per_cs: Whether there is one calibration resistor per
+ * chip-select or whether it's a single one for both
+ * @manufacturer: Manufacturer name string
+ */
+struct ddr_device_info {
+ u32 type;
+ u32 density;
+ u32 io_width;
+ u32 cs1_used;
+ u32 cal_resistors_per_cs;
+ char manufacturer[10];
+};
+
+/**
+ * struct emif_custom_configs - Custom configuration parameters/policies
+ * passed from the platform layer
+ * @mask: Mask to indicate which configs are requested
+ * @lpmode: LPMODE to be used in PWR_MGMT_CTRL register
+ * @lpmode_timeout_performance: Timeout before LPMODE entry when higher
+ * performance is desired at the cost of power (typically
+ * at higher OPPs)
+ * @lpmode_timeout_power: Timeout before LPMODE entry when better power
+ * savings is desired and performance is not important
+ * (typically at lower loads indicated by lower OPPs)
+ * @lpmode_freq_threshold: The DDR frequency threshold to identify between
+ * the above two cases:
+ * timeout = (freq >= lpmode_freq_threshold) ?
+ * lpmode_timeout_performance :
+ * lpmode_timeout_power;
+ * @temp_alert_poll_interval_ms: LPDDR2 MR4 polling interval at nominal
+ * temperature(in milliseconds). When temperature is high
+ * polling is done 4 times as frequently.
+ */
+struct emif_custom_configs {
+ u32 mask;
+ u32 lpmode;
+ u32 lpmode_timeout_performance;
+ u32 lpmode_timeout_power;
+ u32 lpmode_freq_threshold;
+ u32 temp_alert_poll_interval_ms;
+};
+
+/**
+ * struct emif_platform_data - Platform data passed on EMIF platform
+ * device creation. Used by the driver.
+ * @hw_caps: Hw capabilities of the EMIF IP in the respective SoC
+ * @device_info: Device info structure containing information such
+ * as type, bus width, density etc
+ * @timings: Timings information from device datasheet passed
+ * as an array of 'struct lpddr2_timings'. Can be NULL
+ * if if default timings are ok
+ * @timings_arr_size: Size of the timings array. Depends on the number
+ * of different frequencies for which timings data
+ * is provided
+ * @min_tck: Minimum value of some timing parameters in terms
+ * of number of cycles. Can be NULL if default values
+ * are ok
+ * @custom_configs: Custom configurations requested by SoC or board
+ * code and the data for them. Can be NULL if default
+ * configurations done by the driver are ok. See
+ * documentation for 'struct emif_custom_configs' for
+ * more details
+ */
+struct emif_platform_data {
+ u32 hw_caps;
+ struct ddr_device_info *device_info;
+ const struct lpddr2_timings *timings;
+ u32 timings_arr_size;
+ const struct lpddr2_min_tck *min_tck;
+ struct emif_custom_configs *custom_configs;
+ u32 ip_rev;
+ u32 phy_type;
+};
+#endif /* __ASSEMBLY__ */
+
+#endif /* __LINUX_EMIF_H */
diff --git a/include/linux/platform_data/eth-netx.h b/include/linux/platform_data/eth-netx.h
new file mode 100644
index 000000000..a39515972
--- /dev/null
+++ b/include/linux/platform_data/eth-netx.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ETH_NETX_H
+#define __ETH_NETX_H
+
+struct netxeth_platform_data {
+ unsigned int xcno; /* number of xmac/xpec engine this eth uses */
+};
+
+#endif
diff --git a/include/linux/platform_data/fsa9480.h b/include/linux/platform_data/fsa9480.h
new file mode 100644
index 000000000..72dddcb4b
--- /dev/null
+++ b/include/linux/platform_data/fsa9480.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics
+ * Minkyu Kang <mk7.kang@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _FSA9480_H_
+#define _FSA9480_H_
+
+#define FSA9480_ATTACHED 1
+#define FSA9480_DETACHED 0
+
+struct fsa9480_platform_data {
+ void (*cfg_gpio) (void);
+ void (*usb_cb) (u8 attached);
+ void (*uart_cb) (u8 attached);
+ void (*charger_cb) (u8 attached);
+ void (*jig_cb) (u8 attached);
+ void (*reset_cb) (void);
+ void (*usb_power) (u8 on);
+ int wakeup;
+};
+
+#endif /* _FSA9480_H_ */
diff --git a/include/linux/platform_data/g762.h b/include/linux/platform_data/g762.h
new file mode 100644
index 000000000..d3c512837
--- /dev/null
+++ b/include/linux/platform_data/g762.h
@@ -0,0 +1,37 @@
+/*
+ * Platform data structure for g762 fan controller driver
+ *
+ * Copyright (C) 2013, Arnaud EBALARD <arno@natisbad.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __LINUX_PLATFORM_DATA_G762_H__
+#define __LINUX_PLATFORM_DATA_G762_H__
+
+/*
+ * Following structure can be used to set g762 driver platform specific data
+ * during board init. Note that passing a sparse structure is possible but
+ * will result in non-specified attributes to be set to default value, hence
+ * overloading those installed during boot (e.g. by u-boot).
+ */
+
+struct g762_platform_data {
+ u32 fan_startv;
+ u32 fan_gear_mode;
+ u32 pwm_polarity;
+ u32 clk_freq;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_G762_H__ */
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
new file mode 100644
index 000000000..6ace3fd32
--- /dev/null
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -0,0 +1,55 @@
+/*
+ * DaVinci GPIO Platform Related Defines
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DAVINCI_GPIO_PLATFORM_H
+#define __DAVINCI_GPIO_PLATFORM_H
+
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+#include <asm-generic/gpio.h>
+
+struct davinci_gpio_platform_data {
+ u32 ngpio;
+ u32 gpio_unbanked;
+};
+
+
+struct davinci_gpio_controller {
+ struct gpio_chip chip;
+ struct irq_domain *irq_domain;
+ /* Serialize access to GPIO registers */
+ spinlock_t lock;
+ void __iomem *regs;
+ void __iomem *set_data;
+ void __iomem *clr_data;
+ void __iomem *in_data;
+ int gpio_unbanked;
+ unsigned gpio_irq;
+};
+
+/*
+ * basic gpio routines
+ */
+#define GPIO(X) (X) /* 0 <= X <= (DAVINCI_N_GPIO - 1) */
+
+/* Convert GPIO signal to GPIO pin number */
+#define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio))
+
+static inline u32 __gpio_mask(unsigned gpio)
+{
+ return 1 << (gpio % 32);
+}
+#endif
diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h
new file mode 100644
index 000000000..28702c849
--- /dev/null
+++ b/include/linux/platform_data/gpio-dwapb.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef GPIO_DW_APB_H
+#define GPIO_DW_APB_H
+
+struct dwapb_port_property {
+ struct device_node *node;
+ const char *name;
+ unsigned int idx;
+ unsigned int ngpio;
+ unsigned int gpio_base;
+ unsigned int irq;
+ bool irq_shared;
+};
+
+struct dwapb_platform_data {
+ struct dwapb_port_property *properties;
+ unsigned int nports;
+};
+
+#endif
diff --git a/include/linux/platform_data/gpio-em.h b/include/linux/platform_data/gpio-em.h
new file mode 100644
index 000000000..7c5a519d2
--- /dev/null
+++ b/include/linux/platform_data/gpio-em.h
@@ -0,0 +1,11 @@
+#ifndef __GPIO_EM_H__
+#define __GPIO_EM_H__
+
+struct gpio_em_config {
+ unsigned int gpio_base;
+ unsigned int irq_base;
+ unsigned int number_of_pins;
+ const char *pctl_name;
+};
+
+#endif /* __GPIO_EM_H__ */
diff --git a/include/linux/platform_data/gpio-lpc32xx.h b/include/linux/platform_data/gpio-lpc32xx.h
new file mode 100644
index 000000000..a544e962a
--- /dev/null
+++ b/include/linux/platform_data/gpio-lpc32xx.h
@@ -0,0 +1,50 @@
+/*
+ * Author: Kevin Wells <kevin.wells@nxp.com>
+ *
+ * Copyright (C) 2010 NXP Semiconductors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MACH_GPIO_LPC32XX_H
+#define __MACH_GPIO_LPC32XX_H
+
+/*
+ * Note!
+ * Muxed GP pins need to be setup to the GP state in the board level
+ * code prior to using this driver.
+ * GPI pins : 28xP3 group
+ * GPO pins : 24xP3 group
+ * GPIO pins: 8xP0 group, 24xP1 group, 13xP2 group, 6xP3 group
+ */
+
+#define LPC32XX_GPIO_P0_MAX 8
+#define LPC32XX_GPIO_P1_MAX 24
+#define LPC32XX_GPIO_P2_MAX 13
+#define LPC32XX_GPIO_P3_MAX 6
+#define LPC32XX_GPI_P3_MAX 29
+#define LPC32XX_GPO_P3_MAX 24
+
+#define LPC32XX_GPIO_P0_GRP 0
+#define LPC32XX_GPIO_P1_GRP (LPC32XX_GPIO_P0_GRP + LPC32XX_GPIO_P0_MAX)
+#define LPC32XX_GPIO_P2_GRP (LPC32XX_GPIO_P1_GRP + LPC32XX_GPIO_P1_MAX)
+#define LPC32XX_GPIO_P3_GRP (LPC32XX_GPIO_P2_GRP + LPC32XX_GPIO_P2_MAX)
+#define LPC32XX_GPI_P3_GRP (LPC32XX_GPIO_P3_GRP + LPC32XX_GPIO_P3_MAX)
+#define LPC32XX_GPO_P3_GRP (LPC32XX_GPI_P3_GRP + LPC32XX_GPI_P3_MAX)
+
+/*
+ * A specific GPIO can be selected with this macro
+ * ie, GPIO_05 can be selected with LPC32XX_GPIO(LPC32XX_GPIO_P3_GRP, 5)
+ * See the LPC32x0 User's guide for GPIO group numbers
+ */
+#define LPC32XX_GPIO(x, y) ((x) + (y))
+
+#endif /* __MACH_GPIO_LPC32XX_H */
diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
new file mode 100644
index 000000000..5d50b25a7
--- /dev/null
+++ b/include/linux/platform_data/gpio-omap.h
@@ -0,0 +1,216 @@
+/*
+ * OMAP GPIO handling defines and functions
+ *
+ * Copyright (C) 2003-2005 Nokia Corporation
+ *
+ * Written by Juha Yrjölä <juha.yrjola@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __ASM_ARCH_OMAP_GPIO_H
+#define __ASM_ARCH_OMAP_GPIO_H
+
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#define OMAP1_MPUIO_BASE 0xfffb5000
+
+/*
+ * These are the omap15xx/16xx offsets. The omap7xx offset are
+ * OMAP_MPUIO_ / 2 offsets below.
+ */
+#define OMAP_MPUIO_INPUT_LATCH 0x00
+#define OMAP_MPUIO_OUTPUT 0x04
+#define OMAP_MPUIO_IO_CNTL 0x08
+#define OMAP_MPUIO_KBR_LATCH 0x10
+#define OMAP_MPUIO_KBC 0x14
+#define OMAP_MPUIO_GPIO_EVENT_MODE 0x18
+#define OMAP_MPUIO_GPIO_INT_EDGE 0x1c
+#define OMAP_MPUIO_KBD_INT 0x20
+#define OMAP_MPUIO_GPIO_INT 0x24
+#define OMAP_MPUIO_KBD_MASKIT 0x28
+#define OMAP_MPUIO_GPIO_MASKIT 0x2c
+#define OMAP_MPUIO_GPIO_DEBOUNCING 0x30
+#define OMAP_MPUIO_LATCH 0x34
+
+#define OMAP34XX_NR_GPIOS 6
+
+/*
+ * OMAP1510 GPIO registers
+ */
+#define OMAP1510_GPIO_DATA_INPUT 0x00
+#define OMAP1510_GPIO_DATA_OUTPUT 0x04
+#define OMAP1510_GPIO_DIR_CONTROL 0x08
+#define OMAP1510_GPIO_INT_CONTROL 0x0c
+#define OMAP1510_GPIO_INT_MASK 0x10
+#define OMAP1510_GPIO_INT_STATUS 0x14
+#define OMAP1510_GPIO_PIN_CONTROL 0x18
+
+#define OMAP1510_IH_GPIO_BASE 64
+
+/*
+ * OMAP1610 specific GPIO registers
+ */
+#define OMAP1610_GPIO_REVISION 0x0000
+#define OMAP1610_GPIO_SYSCONFIG 0x0010
+#define OMAP1610_GPIO_SYSSTATUS 0x0014
+#define OMAP1610_GPIO_IRQSTATUS1 0x0018
+#define OMAP1610_GPIO_IRQENABLE1 0x001c
+#define OMAP1610_GPIO_WAKEUPENABLE 0x0028
+#define OMAP1610_GPIO_DATAIN 0x002c
+#define OMAP1610_GPIO_DATAOUT 0x0030
+#define OMAP1610_GPIO_DIRECTION 0x0034
+#define OMAP1610_GPIO_EDGE_CTRL1 0x0038
+#define OMAP1610_GPIO_EDGE_CTRL2 0x003c
+#define OMAP1610_GPIO_CLEAR_IRQENABLE1 0x009c
+#define OMAP1610_GPIO_CLEAR_WAKEUPENA 0x00a8
+#define OMAP1610_GPIO_CLEAR_DATAOUT 0x00b0
+#define OMAP1610_GPIO_SET_IRQENABLE1 0x00dc
+#define OMAP1610_GPIO_SET_WAKEUPENA 0x00e8
+#define OMAP1610_GPIO_SET_DATAOUT 0x00f0
+
+/*
+ * OMAP7XX specific GPIO registers
+ */
+#define OMAP7XX_GPIO_DATA_INPUT 0x00
+#define OMAP7XX_GPIO_DATA_OUTPUT 0x04
+#define OMAP7XX_GPIO_DIR_CONTROL 0x08
+#define OMAP7XX_GPIO_INT_CONTROL 0x0c
+#define OMAP7XX_GPIO_INT_MASK 0x10
+#define OMAP7XX_GPIO_INT_STATUS 0x14
+
+/*
+ * omap2+ specific GPIO registers
+ */
+#define OMAP24XX_GPIO_REVISION 0x0000
+#define OMAP24XX_GPIO_IRQSTATUS1 0x0018
+#define OMAP24XX_GPIO_IRQSTATUS2 0x0028
+#define OMAP24XX_GPIO_IRQENABLE2 0x002c
+#define OMAP24XX_GPIO_IRQENABLE1 0x001c
+#define OMAP24XX_GPIO_WAKE_EN 0x0020
+#define OMAP24XX_GPIO_CTRL 0x0030
+#define OMAP24XX_GPIO_OE 0x0034
+#define OMAP24XX_GPIO_DATAIN 0x0038
+#define OMAP24XX_GPIO_DATAOUT 0x003c
+#define OMAP24XX_GPIO_LEVELDETECT0 0x0040
+#define OMAP24XX_GPIO_LEVELDETECT1 0x0044
+#define OMAP24XX_GPIO_RISINGDETECT 0x0048
+#define OMAP24XX_GPIO_FALLINGDETECT 0x004c
+#define OMAP24XX_GPIO_DEBOUNCE_EN 0x0050
+#define OMAP24XX_GPIO_DEBOUNCE_VAL 0x0054
+#define OMAP24XX_GPIO_CLEARIRQENABLE1 0x0060
+#define OMAP24XX_GPIO_SETIRQENABLE1 0x0064
+#define OMAP24XX_GPIO_CLEARWKUENA 0x0080
+#define OMAP24XX_GPIO_SETWKUENA 0x0084
+#define OMAP24XX_GPIO_CLEARDATAOUT 0x0090
+#define OMAP24XX_GPIO_SETDATAOUT 0x0094
+
+#define OMAP4_GPIO_REVISION 0x0000
+#define OMAP4_GPIO_EOI 0x0020
+#define OMAP4_GPIO_IRQSTATUSRAW0 0x0024
+#define OMAP4_GPIO_IRQSTATUSRAW1 0x0028
+#define OMAP4_GPIO_IRQSTATUS0 0x002c
+#define OMAP4_GPIO_IRQSTATUS1 0x0030
+#define OMAP4_GPIO_IRQSTATUSSET0 0x0034
+#define OMAP4_GPIO_IRQSTATUSSET1 0x0038
+#define OMAP4_GPIO_IRQSTATUSCLR0 0x003c
+#define OMAP4_GPIO_IRQSTATUSCLR1 0x0040
+#define OMAP4_GPIO_IRQWAKEN0 0x0044
+#define OMAP4_GPIO_IRQWAKEN1 0x0048
+#define OMAP4_GPIO_IRQENABLE1 0x011c
+#define OMAP4_GPIO_WAKE_EN 0x0120
+#define OMAP4_GPIO_IRQSTATUS2 0x0128
+#define OMAP4_GPIO_IRQENABLE2 0x012c
+#define OMAP4_GPIO_CTRL 0x0130
+#define OMAP4_GPIO_OE 0x0134
+#define OMAP4_GPIO_DATAIN 0x0138
+#define OMAP4_GPIO_DATAOUT 0x013c
+#define OMAP4_GPIO_LEVELDETECT0 0x0140
+#define OMAP4_GPIO_LEVELDETECT1 0x0144
+#define OMAP4_GPIO_RISINGDETECT 0x0148
+#define OMAP4_GPIO_FALLINGDETECT 0x014c
+#define OMAP4_GPIO_DEBOUNCENABLE 0x0150
+#define OMAP4_GPIO_DEBOUNCINGTIME 0x0154
+#define OMAP4_GPIO_CLEARIRQENABLE1 0x0160
+#define OMAP4_GPIO_SETIRQENABLE1 0x0164
+#define OMAP4_GPIO_CLEARWKUENA 0x0180
+#define OMAP4_GPIO_SETWKUENA 0x0184
+#define OMAP4_GPIO_CLEARDATAOUT 0x0190
+#define OMAP4_GPIO_SETDATAOUT 0x0194
+
+#define OMAP_MAX_GPIO_LINES 192
+
+#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr))
+#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES)
+
+struct omap_gpio_dev_attr {
+ int bank_width; /* GPIO bank width */
+ bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
+};
+
+struct omap_gpio_reg_offs {
+ u16 revision;
+ u16 direction;
+ u16 datain;
+ u16 dataout;
+ u16 set_dataout;
+ u16 clr_dataout;
+ u16 irqstatus;
+ u16 irqstatus2;
+ u16 irqstatus_raw0;
+ u16 irqstatus_raw1;
+ u16 irqenable;
+ u16 irqenable2;
+ u16 set_irqenable;
+ u16 clr_irqenable;
+ u16 debounce;
+ u16 debounce_en;
+ u16 ctrl;
+ u16 wkup_en;
+ u16 leveldetect0;
+ u16 leveldetect1;
+ u16 risingdetect;
+ u16 fallingdetect;
+ u16 irqctrl;
+ u16 edgectrl1;
+ u16 edgectrl2;
+ u16 pinctrl;
+
+ bool irqenable_inv;
+};
+
+struct omap_gpio_platform_data {
+ int bank_type;
+ int bank_width; /* GPIO bank width */
+ int bank_stride; /* Only needed for omap1 MPUIO */
+ bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
+ bool loses_context; /* whether the bank would ever lose context */
+ bool is_mpuio; /* whether the bank is of type MPUIO */
+ u32 non_wakeup_gpios;
+
+ struct omap_gpio_reg_offs *regs;
+
+ /* Return context loss count due to PM states changing */
+ int (*get_context_loss_count)(struct device *dev);
+};
+
+extern void omap2_gpio_prepare_for_idle(int off_mode);
+extern void omap2_gpio_resume_after_idle(void);
+extern void omap_set_gpio_debounce(int gpio, int enable);
+extern void omap_set_gpio_debounce_time(int gpio, int enable);
+
+#endif
diff --git a/include/linux/platform_data/gpio-rcar.h b/include/linux/platform_data/gpio-rcar.h
new file mode 100644
index 000000000..2d8d69432
--- /dev/null
+++ b/include/linux/platform_data/gpio-rcar.h
@@ -0,0 +1,29 @@
+/*
+ * Renesas R-Car GPIO Support
+ *
+ * Copyright (C) 2013 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __GPIO_RCAR_H__
+#define __GPIO_RCAR_H__
+
+struct gpio_rcar_config {
+ int gpio_base;
+ unsigned int irq_base;
+ unsigned int number_of_pins;
+ const char *pctl_name;
+ unsigned has_both_edge_trigger:1;
+};
+
+#define RCAR_GP_PIN(bank, pin) (((bank) * 32) + (pin))
+
+#endif /* __GPIO_RCAR_H__ */
diff --git a/include/linux/platform_data/gpio-ts5500.h b/include/linux/platform_data/gpio-ts5500.h
new file mode 100644
index 000000000..b10d11c9b
--- /dev/null
+++ b/include/linux/platform_data/gpio-ts5500.h
@@ -0,0 +1,27 @@
+/*
+ * GPIO (DIO) header for Technologic Systems TS-5500
+ *
+ * Copyright (c) 2012 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _PDATA_GPIO_TS5500_H
+#define _PDATA_GPIO_TS5500_H
+
+/**
+ * struct ts5500_dio_platform_data - TS-5500 pin block configuration
+ * @base: The GPIO base number to use.
+ * @strap: The only pin connected to an interrupt in a block is input-only.
+ * If you need a bidirectional line which can trigger an IRQ, you
+ * may strap it with an in/out pin. This flag indicates this case.
+ */
+struct ts5500_dio_platform_data {
+ int base;
+ bool strap;
+};
+
+#endif /* _PDATA_GPIO_TS5500_H */
diff --git a/include/linux/platform_data/gpio_backlight.h b/include/linux/platform_data/gpio_backlight.h
new file mode 100644
index 000000000..5ae0d9c80
--- /dev/null
+++ b/include/linux/platform_data/gpio_backlight.h
@@ -0,0 +1,21 @@
+/*
+ * gpio_backlight.h - Simple GPIO-controlled backlight
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __GPIO_BACKLIGHT_H__
+#define __GPIO_BACKLIGHT_H__
+
+struct device;
+
+struct gpio_backlight_platform_data {
+ struct device *fbdev;
+ int gpio;
+ int def_value;
+ bool active_low;
+ const char *name;
+};
+
+#endif
diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h
new file mode 100644
index 000000000..8e981be2e
--- /dev/null
+++ b/include/linux/platform_data/hsmmc-omap.h
@@ -0,0 +1,88 @@
+/*
+ * MMC definitions for OMAP2
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * struct omap_hsmmc_dev_attr.flags possibilities
+ *
+ * OMAP_HSMMC_SUPPORTS_DUAL_VOLT: Some HSMMC controller instances can
+ * operate with either 1.8Vdc or 3.0Vdc card voltages; this flag
+ * should be set if this is the case. See for example Section 22.5.3
+ * "MMC/SD/SDIO1 Bus Voltage Selection" of the OMAP34xx Multimedia
+ * Device Silicon Revision 3.1.x Revision ZR (July 2011) (SWPU223R).
+ *
+ * OMAP_HSMMC_BROKEN_MULTIBLOCK_READ: Multiple-block read transfers
+ * don't work correctly on some MMC controller instances on some
+ * OMAP3 SoCs; this flag should be set if this is the case. See
+ * for example Advisory 2.1.1.128 "MMC: Multiple Block Read
+ * Operation Issue" in _OMAP3530/3525/3515/3503 Silicon Errata_
+ * Revision F (October 2010) (SPRZ278F).
+ */
+#define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(0)
+#define OMAP_HSMMC_BROKEN_MULTIBLOCK_READ BIT(1)
+#define OMAP_HSMMC_SWAKEUP_MISSING BIT(2)
+
+struct omap_hsmmc_dev_attr {
+ u8 flags;
+};
+
+struct mmc_card;
+
+struct omap_hsmmc_platform_data {
+ /* back-link to device */
+ struct device *dev;
+
+ /* set if your board has components or wiring that limits the
+ * maximum frequency on the MMC bus */
+ unsigned int max_freq;
+
+ /* Integrating attributes from the omap_hwmod layer */
+ u8 controller_flags;
+
+ /* Register offset deviation */
+ u16 reg_offset;
+
+ /*
+ * 4/8 wires and any additional host capabilities
+ * need to OR'd all capabilities (ref. linux/mmc/host.h)
+ */
+ u32 caps; /* Used for the MMC driver on 2430 and later */
+ u32 pm_caps; /* PM capabilities of the mmc */
+
+ /* use the internal clock */
+ unsigned internal_clock:1;
+
+ /* nonremovable e.g. eMMC */
+ unsigned nonremovable:1;
+
+ /* eMMC does not handle power off when not in sleep state */
+ unsigned no_regulator_off_init:1;
+
+ /* we can put the features above into this variable */
+#define HSMMC_HAS_PBIAS (1 << 0)
+#define HSMMC_HAS_UPDATED_RESET (1 << 1)
+#define HSMMC_HAS_HSPE_SUPPORT (1 << 2)
+ unsigned features;
+
+ int gpio_cd; /* gpio (card detect) */
+ int gpio_cod; /* gpio (cover detect) */
+ int gpio_wp; /* gpio (write protect) */
+
+ int (*set_power)(struct device *dev, int power_on, int vdd);
+ void (*remux)(struct device *dev, int power_on);
+ /* Call back before enabling / disabling regulators */
+ void (*before_set_reg)(struct device *dev, int power_on, int vdd);
+ /* Call back after enabling / disabling regulators */
+ void (*after_set_reg)(struct device *dev, int power_on, int vdd);
+ /* if we have special card, init it using this callback */
+ void (*init_card)(struct mmc_card *card);
+
+ const char *name;
+ u32 ocr_mask;
+};
diff --git a/include/linux/platform_data/hwmon-s3c.h b/include/linux/platform_data/hwmon-s3c.h
new file mode 100644
index 000000000..0e3cce130
--- /dev/null
+++ b/include/linux/platform_data/hwmon-s3c.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2005 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * S3C - HWMon interface for ADC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __HWMON_S3C_H__
+#define __HWMON_S3C_H__
+
+/**
+ * s3c_hwmon_chcfg - channel configuration
+ * @name: The name to give this channel.
+ * @mult: Multiply the ADC value read by this.
+ * @div: Divide the value from the ADC by this.
+ *
+ * The value read from the ADC is converted to a value that
+ * hwmon expects (mV) by result = (value_read * @mult) / @div.
+ */
+struct s3c_hwmon_chcfg {
+ const char *name;
+ unsigned int mult;
+ unsigned int div;
+};
+
+/**
+ * s3c_hwmon_pdata - HWMON platform data
+ * @in: One configuration for each possible channel used.
+ */
+struct s3c_hwmon_pdata {
+ struct s3c_hwmon_chcfg *in[8];
+};
+
+/**
+ * s3c_hwmon_set_platdata - Set platform data for S3C HWMON device
+ * @pd: Platform data to register to device.
+ *
+ * Register the given platform data for use with the S3C HWMON device.
+ * The call will copy the platform data, so the board definitions can
+ * make the structure itself __initdata.
+ */
+extern void __init s3c_hwmon_set_platdata(struct s3c_hwmon_pdata *pd);
+
+#endif /* __HWMON_S3C_H__ */
diff --git a/include/linux/platform_data/i2c-cbus-gpio.h b/include/linux/platform_data/i2c-cbus-gpio.h
new file mode 100644
index 000000000..6faa992a9
--- /dev/null
+++ b/include/linux/platform_data/i2c-cbus-gpio.h
@@ -0,0 +1,27 @@
+/*
+ * i2c-cbus-gpio.h - CBUS I2C platform_data definition
+ *
+ * Copyright (C) 2004-2009 Nokia Corporation
+ *
+ * Written by Felipe Balbi and Aaro Koskinen.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __INCLUDE_LINUX_I2C_CBUS_GPIO_H
+#define __INCLUDE_LINUX_I2C_CBUS_GPIO_H
+
+struct i2c_cbus_platform_data {
+ int dat_gpio;
+ int clk_gpio;
+ int sel_gpio;
+};
+
+#endif /* __INCLUDE_LINUX_I2C_CBUS_GPIO_H */
diff --git a/include/linux/platform_data/i2c-davinci.h b/include/linux/platform_data/i2c-davinci.h
new file mode 100644
index 000000000..89fd34727
--- /dev/null
+++ b/include/linux/platform_data/i2c-davinci.h
@@ -0,0 +1,27 @@
+/*
+ * DaVinci I2C controller platform_device info
+ *
+ * Author: Vladimir Barinov, MontaVista Software, Inc. <source@mvista.com>
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+*/
+
+#ifndef __ASM_ARCH_I2C_H
+#define __ASM_ARCH_I2C_H
+
+/* All frequencies are expressed in kHz */
+struct davinci_i2c_platform_data {
+ unsigned int bus_freq; /* standard bus frequency (kHz) */
+ unsigned int bus_delay; /* post-transaction delay (usec) */
+ unsigned int sda_pin; /* GPIO pin ID to use for SDA */
+ unsigned int scl_pin; /* GPIO pin ID to use for SCL */
+ bool has_pfunc; /*chip has a ICPFUNC register */
+};
+
+/* for board setup code */
+void davinci_init_i2c(struct davinci_i2c_platform_data *);
+
+#endif /* __ASM_ARCH_I2C_H */
diff --git a/include/linux/platform_data/i2c-designware.h b/include/linux/platform_data/i2c-designware.h
new file mode 100644
index 000000000..7a61fb27c
--- /dev/null
+++ b/include/linux/platform_data/i2c-designware.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef I2C_DESIGNWARE_H
+#define I2C_DESIGNWARE_H
+
+struct dw_i2c_platform_data {
+ unsigned int i2c_scl_freq;
+};
+
+#endif
diff --git a/include/linux/platform_data/i2c-imx.h b/include/linux/platform_data/i2c-imx.h
new file mode 100644
index 000000000..8289d915e
--- /dev/null
+++ b/include/linux/platform_data/i2c-imx.h
@@ -0,0 +1,21 @@
+/*
+ * i2c.h - i.MX I2C driver header file
+ *
+ * Copyright (c) 2008, Darius Augulis <augulis.darius@gmail.com>
+ *
+ * This file is released under the GPLv2
+ */
+
+#ifndef __ASM_ARCH_I2C_H_
+#define __ASM_ARCH_I2C_H_
+
+/**
+ * struct imxi2c_platform_data - structure of platform data for MXC I2C driver
+ * @bitrate: Bus speed measured in Hz
+ *
+ **/
+struct imxi2c_platform_data {
+ u32 bitrate;
+};
+
+#endif /* __ASM_ARCH_I2C_H_ */
diff --git a/include/linux/platform_data/i2c-nuc900.h b/include/linux/platform_data/i2c-nuc900.h
new file mode 100644
index 000000000..9ffb12d06
--- /dev/null
+++ b/include/linux/platform_data/i2c-nuc900.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_ARCH_NUC900_I2C_H
+#define __ASM_ARCH_NUC900_I2C_H
+
+struct nuc900_platform_i2c {
+ int bus_num;
+ unsigned long bus_freq;
+};
+
+#endif /* __ASM_ARCH_NUC900_I2C_H */
diff --git a/include/linux/platform_data/i2c-s3c2410.h b/include/linux/platform_data/i2c-s3c2410.h
new file mode 100644
index 000000000..05af66b84
--- /dev/null
+++ b/include/linux/platform_data/i2c-s3c2410.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2004-2009 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C - I2C Controller platform_device info
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __I2C_S3C2410_H
+#define __I2C_S3C2410_H __FILE__
+
+#define S3C_IICFLG_FILTER (1<<0) /* enable s3c2440 filter */
+
+struct platform_device;
+
+/**
+ * struct s3c2410_platform_i2c - Platform data for s3c I2C.
+ * @bus_num: The bus number to use (if possible).
+ * @flags: Any flags for the I2C bus (E.g. S3C_IICFLK_FILTER).
+ * @slave_addr: The I2C address for the slave device (if enabled).
+ * @frequency: The desired frequency in Hz of the bus. This is
+ * guaranteed to not be exceeded. If the caller does
+ * not care, use zero and the driver will select a
+ * useful default.
+ * @sda_delay: The delay (in ns) applied to SDA edges.
+ * @cfg_gpio: A callback to configure the pins for I2C operation.
+ */
+struct s3c2410_platform_i2c {
+ int bus_num;
+ unsigned int flags;
+ unsigned int slave_addr;
+ unsigned long frequency;
+ unsigned int sda_delay;
+
+ void (*cfg_gpio)(struct platform_device *dev);
+};
+
+/**
+ * s3c_i2c0_set_platdata - set platform data for i2c0 device
+ * @i2c: The platform data to set, or NULL for default data.
+ *
+ * Register the given platform data for use with the i2c0 device. This
+ * call copies the platform data, so the caller can use __initdata for
+ * their copy.
+ *
+ * This call will set cfg_gpio if is null to the default platform
+ * implementation.
+ *
+ * Any user of s3c_device_i2c0 should call this, even if it is with
+ * NULL to ensure that the device is given the default platform data
+ * as the driver will no longer carry defaults.
+ */
+extern void s3c_i2c0_set_platdata(struct s3c2410_platform_i2c *i2c);
+extern void s3c_i2c1_set_platdata(struct s3c2410_platform_i2c *i2c);
+extern void s3c_i2c2_set_platdata(struct s3c2410_platform_i2c *i2c);
+extern void s3c_i2c3_set_platdata(struct s3c2410_platform_i2c *i2c);
+extern void s3c_i2c4_set_platdata(struct s3c2410_platform_i2c *i2c);
+extern void s3c_i2c5_set_platdata(struct s3c2410_platform_i2c *i2c);
+extern void s3c_i2c6_set_platdata(struct s3c2410_platform_i2c *i2c);
+extern void s3c_i2c7_set_platdata(struct s3c2410_platform_i2c *i2c);
+extern void s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *i2c);
+
+/* defined by architecture to configure gpio */
+extern void s3c_i2c0_cfg_gpio(struct platform_device *dev);
+extern void s3c_i2c1_cfg_gpio(struct platform_device *dev);
+extern void s3c_i2c2_cfg_gpio(struct platform_device *dev);
+extern void s3c_i2c3_cfg_gpio(struct platform_device *dev);
+extern void s3c_i2c4_cfg_gpio(struct platform_device *dev);
+extern void s3c_i2c5_cfg_gpio(struct platform_device *dev);
+extern void s3c_i2c6_cfg_gpio(struct platform_device *dev);
+extern void s3c_i2c7_cfg_gpio(struct platform_device *dev);
+
+extern struct s3c2410_platform_i2c default_i2c_data;
+
+#endif /* __I2C_S3C2410_H */
diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h
new file mode 100644
index 000000000..9abc0ca72
--- /dev/null
+++ b/include/linux/platform_data/ina2xx.h
@@ -0,0 +1,19 @@
+/*
+ * Driver for Texas Instruments INA219, INA226 power monitor chips
+ *
+ * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * For further information, see the Documentation/hwmon/ina2xx file.
+ */
+
+/**
+ * struct ina2xx_platform_data - ina2xx info
+ * @shunt_uohms shunt resistance in microohms
+ */
+struct ina2xx_platform_data {
+ long shunt_uohms;
+};
diff --git a/include/linux/platform_data/intel-mid_wdt.h b/include/linux/platform_data/intel-mid_wdt.h
new file mode 100644
index 000000000..b98253466
--- /dev/null
+++ b/include/linux/platform_data/intel-mid_wdt.h
@@ -0,0 +1,22 @@
+/*
+ * intel-mid_wdt: generic Intel MID SCU watchdog driver
+ *
+ * Copyright (C) 2014 Intel Corporation. All rights reserved.
+ * Contact: David Cohen <david.a.cohen@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ */
+
+#ifndef __INTEL_MID_WDT_H__
+#define __INTEL_MID_WDT_H__
+
+#include <linux/platform_device.h>
+
+struct intel_mid_wdt_pdata {
+ int irq;
+ int (*probe)(struct platform_device *pdev);
+};
+
+#endif /*__INTEL_MID_WDT_H__*/
diff --git a/include/linux/platform_data/invensense_mpu6050.h b/include/linux/platform_data/invensense_mpu6050.h
new file mode 100644
index 000000000..ad3aa7b95
--- /dev/null
+++ b/include/linux/platform_data/invensense_mpu6050.h
@@ -0,0 +1,31 @@
+/*
+* Copyright (C) 2012 Invensense, Inc.
+*
+* This software is licensed under the terms of the GNU General Public
+* License version 2, as published by the Free Software Foundation, and
+* may be copied, distributed, and modified under those terms.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef __INV_MPU6050_PLATFORM_H_
+#define __INV_MPU6050_PLATFORM_H_
+
+/**
+ * struct inv_mpu6050_platform_data - Platform data for the mpu driver
+ * @orientation: Orientation matrix of the chip
+ *
+ * Contains platform specific information on how to configure the MPU6050 to
+ * work on this platform. The orientation matricies are 3x3 rotation matricies
+ * that are applied to the data to rotate from the mounting orientation to the
+ * platform orientation. The values must be one of 0, 1, or -1 and each row and
+ * column should have exactly 1 non-zero value.
+ */
+struct inv_mpu6050_platform_data {
+ __s8 orientation[9];
+};
+
+#endif
diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h
new file mode 100644
index 000000000..54a0a9582
--- /dev/null
+++ b/include/linux/platform_data/iommu-omap.h
@@ -0,0 +1,48 @@
+/*
+ * omap iommu: main structures
+ *
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+
+#define MMU_REG_SIZE 256
+
+/**
+ * struct iommu_arch_data - omap iommu private data
+ * @name: name of the iommu device
+ * @iommu_dev: handle of the iommu device
+ *
+ * This is an omap iommu private data object, which binds an iommu user
+ * to its iommu device. This object should be placed at the iommu user's
+ * dev_archdata so generic IOMMU API can be used without having to
+ * utilize omap-specific plumbing anymore.
+ */
+struct omap_iommu_arch_data {
+ const char *name;
+ struct omap_iommu *iommu_dev;
+};
+
+/**
+ * struct omap_mmu_dev_attr - OMAP mmu device attributes for omap_hwmod
+ * @nr_tlb_entries: number of entries supported by the translation
+ * look-aside buffer (TLB).
+ */
+struct omap_mmu_dev_attr {
+ int nr_tlb_entries;
+};
+
+struct iommu_platform_data {
+ const char *name;
+ const char *reset_name;
+ int nr_tlb_entries;
+
+ int (*assert_reset)(struct platform_device *pdev, const char *name);
+ int (*deassert_reset)(struct platform_device *pdev, const char *name);
+};
diff --git a/include/linux/platform_data/irda-pxaficp.h b/include/linux/platform_data/irda-pxaficp.h
new file mode 100644
index 000000000..3cd41f77d
--- /dev/null
+++ b/include/linux/platform_data/irda-pxaficp.h
@@ -0,0 +1,25 @@
+#ifndef ASMARM_ARCH_IRDA_H
+#define ASMARM_ARCH_IRDA_H
+
+/* board specific transceiver capabilities */
+
+#define IR_OFF 1
+#define IR_SIRMODE 2
+#define IR_FIRMODE 4
+
+struct pxaficp_platform_data {
+ int transceiver_cap;
+ void (*transceiver_mode)(struct device *dev, int mode);
+ int (*startup)(struct device *dev);
+ void (*shutdown)(struct device *dev);
+ int gpio_pwdown; /* powerdown GPIO for the IrDA chip */
+ bool gpio_pwdown_inverted; /* gpio_pwdown is inverted */
+};
+
+extern void pxa_set_ficp_info(struct pxaficp_platform_data *info);
+
+#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
+void pxa2xx_transceiver_mode(struct device *dev, int mode);
+#endif
+
+#endif
diff --git a/include/linux/platform_data/irda-sa11x0.h b/include/linux/platform_data/irda-sa11x0.h
new file mode 100644
index 000000000..38f77b5e5
--- /dev/null
+++ b/include/linux/platform_data/irda-sa11x0.h
@@ -0,0 +1,20 @@
+/*
+ * arch/arm/include/asm/mach/irda.h
+ *
+ * Copyright (C) 2004 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARM_MACH_IRDA_H
+#define __ASM_ARM_MACH_IRDA_H
+
+struct irda_platform_data {
+ int (*startup)(struct device *);
+ void (*shutdown)(struct device *);
+ int (*set_power)(struct device *, unsigned int state);
+ void (*set_speed)(struct device *, unsigned int speed);
+};
+
+#endif
diff --git a/include/linux/platform_data/irq-renesas-intc-irqpin.h b/include/linux/platform_data/irq-renesas-intc-irqpin.h
new file mode 100644
index 000000000..e4cb91106
--- /dev/null
+++ b/include/linux/platform_data/irq-renesas-intc-irqpin.h
@@ -0,0 +1,29 @@
+/*
+ * Renesas INTC External IRQ Pin Driver
+ *
+ * Copyright (C) 2013 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __IRQ_RENESAS_INTC_IRQPIN_H__
+#define __IRQ_RENESAS_INTC_IRQPIN_H__
+
+struct renesas_intc_irqpin_config {
+ unsigned int sense_bitfield_width;
+ unsigned int irq_base;
+ bool control_parent;
+};
+
+#endif /* __IRQ_RENESAS_INTC_IRQPIN_H__ */
diff --git a/include/linux/platform_data/irq-renesas-irqc.h b/include/linux/platform_data/irq-renesas-irqc.h
new file mode 100644
index 000000000..3ae17b3e0
--- /dev/null
+++ b/include/linux/platform_data/irq-renesas-irqc.h
@@ -0,0 +1,27 @@
+/*
+ * Renesas IRQC Driver
+ *
+ * Copyright (C) 2013 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __IRQ_RENESAS_IRQC_H__
+#define __IRQ_RENESAS_IRQC_H__
+
+struct renesas_irqc_config {
+ unsigned int irq_base;
+};
+
+#endif /* __IRQ_RENESAS_IRQC_H__ */
diff --git a/include/linux/platform_data/isl9305.h b/include/linux/platform_data/isl9305.h
new file mode 100644
index 000000000..1419133fa
--- /dev/null
+++ b/include/linux/platform_data/isl9305.h
@@ -0,0 +1,30 @@
+/*
+ * isl9305 - Intersil ISL9305 DCDC regulator
+ *
+ * Copyright 2014 Linaro Ltd
+ *
+ * Author: Mark Brown <broonie@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __ISL9305_H
+#define __ISL9305_H
+
+#define ISL9305_DCD1 0
+#define ISL9305_DCD2 1
+#define ISL9305_LDO1 2
+#define ISL9305_LDO2 3
+
+#define ISL9305_MAX_REGULATOR ISL9305_LDO2
+
+struct regulator_init_data;
+
+struct isl9305_pdata {
+ struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR];
+};
+
+#endif
diff --git a/include/linux/platform_data/keyboard-pxa930_rotary.h b/include/linux/platform_data/keyboard-pxa930_rotary.h
new file mode 100644
index 000000000..053587caf
--- /dev/null
+++ b/include/linux/platform_data/keyboard-pxa930_rotary.h
@@ -0,0 +1,20 @@
+#ifndef __ASM_ARCH_PXA930_ROTARY_H
+#define __ASM_ARCH_PXA930_ROTARY_H
+
+/* NOTE:
+ *
+ * rotary can be either interpreted as a ralative input event (e.g.
+ * REL_WHEEL or REL_HWHEEL) or a specific key event (e.g. UP/DOWN
+ * or LEFT/RIGHT), depending on if up_key & down_key are assigned
+ * or rel_code is assigned a non-zero value. When all are non-zero,
+ * up_key and down_key will be preferred.
+ */
+struct pxa930_rotary_platform_data {
+ int up_key;
+ int down_key;
+ int rel_code;
+};
+
+void __init pxa930_set_rotarykey_info(struct pxa930_rotary_platform_data *info);
+
+#endif /* __ASM_ARCH_PXA930_ROTARY_H */
diff --git a/include/linux/platform_data/keyboard-spear.h b/include/linux/platform_data/keyboard-spear.h
new file mode 100644
index 000000000..9248e3a7e
--- /dev/null
+++ b/include/linux/platform_data/keyboard-spear.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2010 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __PLAT_KEYBOARD_H
+#define __PLAT_KEYBOARD_H
+
+#include <linux/bitops.h>
+#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/types.h>
+
+#define DECLARE_9x9_KEYMAP(_name) \
+int _name[] = { \
+ KEY(0, 0, KEY_ESC), \
+ KEY(0, 1, KEY_1), \
+ KEY(0, 2, KEY_2), \
+ KEY(0, 3, KEY_3), \
+ KEY(0, 4, KEY_4), \
+ KEY(0, 5, KEY_5), \
+ KEY(0, 6, KEY_6), \
+ KEY(0, 7, KEY_7), \
+ KEY(0, 8, KEY_8), \
+ KEY(1, 0, KEY_9), \
+ KEY(1, 1, KEY_MINUS), \
+ KEY(1, 2, KEY_EQUAL), \
+ KEY(1, 3, KEY_BACKSPACE), \
+ KEY(1, 4, KEY_TAB), \
+ KEY(1, 5, KEY_Q), \
+ KEY(1, 6, KEY_W), \
+ KEY(1, 7, KEY_E), \
+ KEY(1, 8, KEY_R), \
+ KEY(2, 0, KEY_T), \
+ KEY(2, 1, KEY_Y), \
+ KEY(2, 2, KEY_U), \
+ KEY(2, 3, KEY_I), \
+ KEY(2, 4, KEY_O), \
+ KEY(2, 5, KEY_P), \
+ KEY(2, 6, KEY_LEFTBRACE), \
+ KEY(2, 7, KEY_RIGHTBRACE), \
+ KEY(2, 8, KEY_ENTER), \
+ KEY(3, 0, KEY_LEFTCTRL), \
+ KEY(3, 1, KEY_A), \
+ KEY(3, 2, KEY_S), \
+ KEY(3, 3, KEY_D), \
+ KEY(3, 4, KEY_F), \
+ KEY(3, 5, KEY_G), \
+ KEY(3, 6, KEY_H), \
+ KEY(3, 7, KEY_J), \
+ KEY(3, 8, KEY_K), \
+ KEY(4, 0, KEY_L), \
+ KEY(4, 1, KEY_SEMICOLON), \
+ KEY(4, 2, KEY_APOSTROPHE), \
+ KEY(4, 3, KEY_GRAVE), \
+ KEY(4, 4, KEY_LEFTSHIFT), \
+ KEY(4, 5, KEY_BACKSLASH), \
+ KEY(4, 6, KEY_Z), \
+ KEY(4, 7, KEY_X), \
+ KEY(4, 8, KEY_C), \
+ KEY(5, 0, KEY_V), \
+ KEY(5, 1, KEY_B), \
+ KEY(5, 2, KEY_N), \
+ KEY(5, 3, KEY_M), \
+ KEY(5, 4, KEY_COMMA), \
+ KEY(5, 5, KEY_DOT), \
+ KEY(5, 6, KEY_SLASH), \
+ KEY(5, 7, KEY_RIGHTSHIFT), \
+ KEY(5, 8, KEY_KPASTERISK), \
+ KEY(6, 0, KEY_LEFTALT), \
+ KEY(6, 1, KEY_SPACE), \
+ KEY(6, 2, KEY_CAPSLOCK), \
+ KEY(6, 3, KEY_F1), \
+ KEY(6, 4, KEY_F2), \
+ KEY(6, 5, KEY_F3), \
+ KEY(6, 6, KEY_F4), \
+ KEY(6, 7, KEY_F5), \
+ KEY(6, 8, KEY_F6), \
+ KEY(7, 0, KEY_F7), \
+ KEY(7, 1, KEY_F8), \
+ KEY(7, 2, KEY_F9), \
+ KEY(7, 3, KEY_F10), \
+ KEY(7, 4, KEY_NUMLOCK), \
+ KEY(7, 5, KEY_SCROLLLOCK), \
+ KEY(7, 6, KEY_KP7), \
+ KEY(7, 7, KEY_KP8), \
+ KEY(7, 8, KEY_KP9), \
+ KEY(8, 0, KEY_KPMINUS), \
+ KEY(8, 1, KEY_KP4), \
+ KEY(8, 2, KEY_KP5), \
+ KEY(8, 3, KEY_KP6), \
+ KEY(8, 4, KEY_KPPLUS), \
+ KEY(8, 5, KEY_KP1), \
+ KEY(8, 6, KEY_KP2), \
+ KEY(8, 7, KEY_KP3), \
+ KEY(8, 8, KEY_KP0), \
+}
+
+#define DECLARE_6x6_KEYMAP(_name) \
+int _name[] = { \
+ KEY(0, 0, KEY_RESERVED), \
+ KEY(0, 1, KEY_1), \
+ KEY(0, 2, KEY_2), \
+ KEY(0, 3, KEY_3), \
+ KEY(0, 4, KEY_4), \
+ KEY(0, 5, KEY_5), \
+ KEY(1, 0, KEY_Q), \
+ KEY(1, 1, KEY_W), \
+ KEY(1, 2, KEY_E), \
+ KEY(1, 3, KEY_R), \
+ KEY(1, 4, KEY_T), \
+ KEY(1, 5, KEY_Y), \
+ KEY(2, 0, KEY_D), \
+ KEY(2, 1, KEY_F), \
+ KEY(2, 2, KEY_G), \
+ KEY(2, 3, KEY_H), \
+ KEY(2, 4, KEY_J), \
+ KEY(2, 5, KEY_K), \
+ KEY(3, 0, KEY_B), \
+ KEY(3, 1, KEY_N), \
+ KEY(3, 2, KEY_M), \
+ KEY(3, 3, KEY_COMMA), \
+ KEY(3, 4, KEY_DOT), \
+ KEY(3, 5, KEY_SLASH), \
+ KEY(4, 0, KEY_F6), \
+ KEY(4, 1, KEY_F7), \
+ KEY(4, 2, KEY_F8), \
+ KEY(4, 3, KEY_F9), \
+ KEY(4, 4, KEY_F10), \
+ KEY(4, 5, KEY_NUMLOCK), \
+ KEY(5, 0, KEY_KP2), \
+ KEY(5, 1, KEY_KP3), \
+ KEY(5, 2, KEY_KP0), \
+ KEY(5, 3, KEY_KPDOT), \
+ KEY(5, 4, KEY_RO), \
+ KEY(5, 5, KEY_ZENKAKUHANKAKU), \
+}
+
+#define KEYPAD_9x9 0
+#define KEYPAD_6x6 1
+#define KEYPAD_2x2 2
+
+/**
+ * struct kbd_platform_data - spear keyboard platform data
+ * keymap: pointer to keymap data (table and size)
+ * rep: enables key autorepeat
+ * mode: choose keyboard support(9x9, 6x6, 2x2)
+ * suspended_rate: rate at which keyboard would operate in suspended mode
+ *
+ * This structure is supposed to be used by platform code to supply
+ * keymaps to drivers that implement keyboards.
+ */
+struct kbd_platform_data {
+ const struct matrix_keymap_data *keymap;
+ bool rep;
+ unsigned int mode;
+ unsigned int suspended_rate;
+};
+
+#endif /* __PLAT_KEYBOARD_H */
diff --git a/include/linux/platform_data/keypad-ep93xx.h b/include/linux/platform_data/keypad-ep93xx.h
new file mode 100644
index 000000000..adccee25b
--- /dev/null
+++ b/include/linux/platform_data/keypad-ep93xx.h
@@ -0,0 +1,31 @@
+#ifndef __KEYPAD_EP93XX_H
+#define __KEYPAD_EP93XX_H
+
+struct matrix_keymap_data;
+
+/* flags for the ep93xx_keypad driver */
+#define EP93XX_KEYPAD_DISABLE_3_KEY (1<<0) /* disable 3-key reset */
+#define EP93XX_KEYPAD_DIAG_MODE (1<<1) /* diagnostic mode */
+#define EP93XX_KEYPAD_BACK_DRIVE (1<<2) /* back driving mode */
+#define EP93XX_KEYPAD_TEST_MODE (1<<3) /* scan only column 0 */
+#define EP93XX_KEYPAD_KDIV (1<<4) /* 1/4 clock or 1/16 clock */
+#define EP93XX_KEYPAD_AUTOREPEAT (1<<5) /* enable key autorepeat */
+
+/**
+ * struct ep93xx_keypad_platform_data - platform specific device structure
+ * @keymap_data: pointer to &matrix_keymap_data
+ * @debounce: debounce start count; terminal count is 0xff
+ * @prescale: row/column counter pre-scaler load value
+ * @flags: see above
+ */
+struct ep93xx_keypad_platform_data {
+ struct matrix_keymap_data *keymap_data;
+ unsigned int debounce;
+ unsigned int prescale;
+ unsigned int flags;
+};
+
+#define EP93XX_MATRIX_ROWS (8)
+#define EP93XX_MATRIX_COLS (8)
+
+#endif /* __KEYPAD_EP93XX_H */
diff --git a/include/linux/platform_data/keypad-nomadik-ske.h b/include/linux/platform_data/keypad-nomadik-ske.h
new file mode 100644
index 000000000..31382fbc0
--- /dev/null
+++ b/include/linux/platform_data/keypad-nomadik-ske.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ * Author: Naveen Kumar Gaddipati <naveen.gaddipati@stericsson.com>
+ *
+ * ux500 Scroll key and Keypad Encoder (SKE) header
+ */
+
+#ifndef __SKE_H
+#define __SKE_H
+
+#include <linux/input/matrix_keypad.h>
+
+/* register definitions for SKE peripheral */
+#define SKE_CR 0x00
+#define SKE_VAL0 0x04
+#define SKE_VAL1 0x08
+#define SKE_DBCR 0x0C
+#define SKE_IMSC 0x10
+#define SKE_RIS 0x14
+#define SKE_MIS 0x18
+#define SKE_ICR 0x1C
+
+/*
+ * Keypad module
+ */
+
+/**
+ * struct keypad_platform_data - structure for platform specific data
+ * @init: pointer to keypad init function
+ * @exit: pointer to keypad deinitialisation function
+ * @keymap_data: matrix scan code table for keycodes
+ * @krow: maximum number of rows
+ * @kcol: maximum number of columns
+ * @debounce_ms: platform specific debounce time
+ * @no_autorepeat: flag for auto repetition
+ * @wakeup_enable: allow waking up the system
+ */
+struct ske_keypad_platform_data {
+ int (*init)(void);
+ int (*exit)(void);
+ const struct matrix_keymap_data *keymap_data;
+ u8 krow;
+ u8 kcol;
+ u8 debounce_ms;
+ bool no_autorepeat;
+ bool wakeup_enable;
+};
+#endif /*__SKE_KPD_H*/
diff --git a/include/linux/platform_data/keypad-omap.h b/include/linux/platform_data/keypad-omap.h
new file mode 100644
index 000000000..c3a3abae9
--- /dev/null
+++ b/include/linux/platform_data/keypad-omap.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2006 Komal Shah <komal_shah802003@yahoo.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __KEYPAD_OMAP_H
+#define __KEYPAD_OMAP_H
+
+#ifndef CONFIG_ARCH_OMAP1
+#warning Please update the board to use matrix-keypad driver
+#define omap_readw(reg) 0
+#define omap_writew(val, reg) do {} while (0)
+#endif
+#include <linux/input/matrix_keypad.h>
+
+struct omap_kp_platform_data {
+ int rows;
+ int cols;
+ const struct matrix_keymap_data *keymap_data;
+ bool rep;
+ unsigned long delay;
+ bool dbounce;
+ /* specific to OMAP242x*/
+ unsigned int *row_gpios;
+ unsigned int *col_gpios;
+};
+
+/* Group (0..3) -- when multiple keys are pressed, only the
+ * keys pressed in the same group are considered as pressed. This is
+ * in order to workaround certain crappy HW designs that produce ghost
+ * keypresses. Two free bits, not used by neither row/col nor keynum,
+ * must be available for use as group bits. The below GROUP_SHIFT
+ * macro definition is based on some prior knowledge of the
+ * matrix_keypad defined KEY() macro internals.
+ */
+#define GROUP_SHIFT 14
+#define GROUP_0 (0 << GROUP_SHIFT)
+#define GROUP_1 (1 << GROUP_SHIFT)
+#define GROUP_2 (2 << GROUP_SHIFT)
+#define GROUP_3 (3 << GROUP_SHIFT)
+#define GROUP_MASK GROUP_3
+#if KEY_MAX & GROUP_MASK
+#error Group bits in conflict with keynum bits
+#endif
+
+
+#endif
+
diff --git a/include/linux/platform_data/keypad-pxa27x.h b/include/linux/platform_data/keypad-pxa27x.h
new file mode 100644
index 000000000..24625569d
--- /dev/null
+++ b/include/linux/platform_data/keypad-pxa27x.h
@@ -0,0 +1,72 @@
+#ifndef __ASM_ARCH_PXA27x_KEYPAD_H
+#define __ASM_ARCH_PXA27x_KEYPAD_H
+
+#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
+
+#define MAX_MATRIX_KEY_ROWS (8)
+#define MAX_MATRIX_KEY_COLS (8)
+#define MATRIX_ROW_SHIFT (3)
+#define MAX_DIRECT_KEY_NUM (8)
+
+/* pxa3xx keypad platform specific parameters
+ *
+ * NOTE:
+ * 1. direct_key_num indicates the number of keys in the direct keypad
+ * _plus_ the number of rotary-encoder sensor inputs, this can be
+ * left as 0 if only rotary encoders are enabled, the driver will
+ * automatically calculate this
+ *
+ * 2. direct_key_map is the key code map for the direct keys, if rotary
+ * encoder(s) are enabled, direct key 0/1(2/3) will be ignored
+ *
+ * 3. rotary can be either interpreted as a relative input event (e.g.
+ * REL_WHEEL/REL_HWHEEL) or specific keys (e.g. UP/DOWN/LEFT/RIGHT)
+ *
+ * 4. matrix key and direct key will use the same debounce_interval by
+ * default, which should be sufficient in most cases
+ *
+ * pxa168 keypad platform specific parameter
+ *
+ * NOTE:
+ * clear_wakeup_event callback is a workaround required to clear the
+ * keypad interrupt. The keypad wake must be cleared in addition to
+ * reading the MI/DI bits in the KPC register.
+ */
+struct pxa27x_keypad_platform_data {
+
+ /* code map for the matrix keys */
+ const struct matrix_keymap_data *matrix_keymap_data;
+ unsigned int matrix_key_rows;
+ unsigned int matrix_key_cols;
+
+ /* direct keys */
+ int direct_key_num;
+ unsigned int direct_key_map[MAX_DIRECT_KEY_NUM];
+ /* the key output may be low active */
+ int direct_key_low_active;
+ /* give board a chance to choose the start direct key */
+ unsigned int direct_key_mask;
+
+ /* rotary encoders 0 */
+ int enable_rotary0;
+ int rotary0_rel_code;
+ int rotary0_up_key;
+ int rotary0_down_key;
+
+ /* rotary encoders 1 */
+ int enable_rotary1;
+ int rotary1_rel_code;
+ int rotary1_up_key;
+ int rotary1_down_key;
+
+ /* key debounce interval */
+ unsigned int debounce_interval;
+
+ /* clear wakeup event requirement for pxa168 */
+ void (*clear_wakeup_event)(void);
+};
+
+extern void pxa_set_keypad_info(struct pxa27x_keypad_platform_data *info);
+
+#endif /* __ASM_ARCH_PXA27x_KEYPAD_H */
diff --git a/include/linux/platform_data/keypad-w90p910.h b/include/linux/platform_data/keypad-w90p910.h
new file mode 100644
index 000000000..556778e8d
--- /dev/null
+++ b/include/linux/platform_data/keypad-w90p910.h
@@ -0,0 +1,15 @@
+#ifndef __ASM_ARCH_W90P910_KEYPAD_H
+#define __ASM_ARCH_W90P910_KEYPAD_H
+
+#include <linux/input/matrix_keypad.h>
+
+extern void mfp_set_groupi(struct device *dev);
+
+struct w90p910_keypad_platform_data {
+ const struct matrix_keymap_data *keymap_data;
+
+ unsigned int prescale;
+ unsigned int debounce;
+};
+
+#endif /* __ASM_ARCH_W90P910_KEYPAD_H */
diff --git a/include/linux/platform_data/keyscan-davinci.h b/include/linux/platform_data/keyscan-davinci.h
new file mode 100644
index 000000000..7a560e05b
--- /dev/null
+++ b/include/linux/platform_data/keyscan-davinci.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2009 Texas Instruments, Inc
+ *
+ * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef DAVINCI_KEYSCAN_H
+#define DAVINCI_KEYSCAN_H
+
+#include <linux/io.h>
+
+enum davinci_matrix_types {
+ DAVINCI_KEYSCAN_MATRIX_4X4,
+ DAVINCI_KEYSCAN_MATRIX_5X3,
+};
+
+struct davinci_ks_platform_data {
+ int (*device_enable)(struct device *dev);
+ unsigned short *keymap;
+ u32 keymapsize;
+ u8 rep:1;
+ u8 strobe;
+ u8 interval;
+ u8 matrix_type;
+};
+
+#endif
+
diff --git a/include/linux/platform_data/lcd-mipid.h b/include/linux/platform_data/lcd-mipid.h
new file mode 100644
index 000000000..8e52c6572
--- /dev/null
+++ b/include/linux/platform_data/lcd-mipid.h
@@ -0,0 +1,29 @@
+#ifndef __LCD_MIPID_H
+#define __LCD_MIPID_H
+
+enum mipid_test_num {
+ MIPID_TEST_RGB_LINES,
+};
+
+enum mipid_test_result {
+ MIPID_TEST_SUCCESS,
+ MIPID_TEST_INVALID,
+ MIPID_TEST_FAILED,
+};
+
+#ifdef __KERNEL__
+
+struct mipid_platform_data {
+ int nreset_gpio;
+ int data_lines;
+
+ void (*shutdown)(struct mipid_platform_data *pdata);
+ void (*set_bklight_level)(struct mipid_platform_data *pdata,
+ int level);
+ int (*get_bklight_level)(struct mipid_platform_data *pdata);
+ int (*get_bklight_max)(struct mipid_platform_data *pdata);
+};
+
+#endif
+
+#endif
diff --git a/include/linux/platform_data/leds-kirkwood-netxbig.h b/include/linux/platform_data/leds-kirkwood-netxbig.h
new file mode 100644
index 000000000..d2be19a51
--- /dev/null
+++ b/include/linux/platform_data/leds-kirkwood-netxbig.h
@@ -0,0 +1,53 @@
+/*
+ * Platform data structure for netxbig LED driver
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __LEDS_KIRKWOOD_NETXBIG_H
+#define __LEDS_KIRKWOOD_NETXBIG_H
+
+struct netxbig_gpio_ext {
+ unsigned *addr;
+ int num_addr;
+ unsigned *data;
+ int num_data;
+ unsigned enable;
+};
+
+enum netxbig_led_mode {
+ NETXBIG_LED_OFF,
+ NETXBIG_LED_ON,
+ NETXBIG_LED_SATA,
+ NETXBIG_LED_TIMER1,
+ NETXBIG_LED_TIMER2,
+ NETXBIG_LED_MODE_NUM,
+};
+
+#define NETXBIG_LED_INVALID_MODE NETXBIG_LED_MODE_NUM
+
+struct netxbig_led_timer {
+ unsigned long delay_on;
+ unsigned long delay_off;
+ enum netxbig_led_mode mode;
+};
+
+struct netxbig_led {
+ const char *name;
+ const char *default_trigger;
+ int mode_addr;
+ int *mode_val;
+ int bright_addr;
+};
+
+struct netxbig_led_platform_data {
+ struct netxbig_gpio_ext *gpio_ext;
+ struct netxbig_led_timer *timer;
+ int num_timer;
+ struct netxbig_led *leds;
+ int num_leds;
+};
+
+#endif /* __LEDS_KIRKWOOD_NETXBIG_H */
diff --git a/include/linux/platform_data/leds-kirkwood-ns2.h b/include/linux/platform_data/leds-kirkwood-ns2.h
new file mode 100644
index 000000000..6a9fed57f
--- /dev/null
+++ b/include/linux/platform_data/leds-kirkwood-ns2.h
@@ -0,0 +1,24 @@
+/*
+ * Platform data structure for Network Space v2 LED driver
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __LEDS_KIRKWOOD_NS2_H
+#define __LEDS_KIRKWOOD_NS2_H
+
+struct ns2_led {
+ const char *name;
+ const char *default_trigger;
+ unsigned cmd;
+ unsigned slow;
+};
+
+struct ns2_led_platform_data {
+ int num_leds;
+ struct ns2_led *leds;
+};
+
+#endif /* __LEDS_KIRKWOOD_NS2_H */
diff --git a/include/linux/platform_data/leds-lm355x.h b/include/linux/platform_data/leds-lm355x.h
new file mode 100644
index 000000000..b88724bb0
--- /dev/null
+++ b/include/linux/platform_data/leds-lm355x.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Simple driver for Texas Instruments LM355x LED driver chip
+ *
+ * Author: G.Shark Jeong <gshark.jeong@gmail.com>
+ * Daniel Jeong <daniel.jeong@ti.com>
+ */
+
+#define LM355x_NAME "leds-lm355x"
+#define LM3554_NAME "leds-lm3554"
+#define LM3556_NAME "leds-lm3556"
+
+/* lm3554 : strobe def. on */
+enum lm355x_strobe {
+ LM355x_PIN_STROBE_DISABLE = 0x00,
+ LM355x_PIN_STROBE_ENABLE = 0x01,
+};
+
+enum lm355x_torch {
+ LM355x_PIN_TORCH_DISABLE = 0,
+ LM3554_PIN_TORCH_ENABLE = 0x80,
+ LM3556_PIN_TORCH_ENABLE = 0x10,
+};
+
+enum lm355x_tx2 {
+ LM355x_PIN_TX_DISABLE = 0,
+ LM3554_PIN_TX_ENABLE = 0x20,
+ LM3556_PIN_TX_ENABLE = 0x40,
+};
+
+enum lm355x_ntc {
+ LM355x_PIN_NTC_DISABLE = 0,
+ LM3554_PIN_NTC_ENABLE = 0x08,
+ LM3556_PIN_NTC_ENABLE = 0x80,
+};
+
+enum lm355x_pmode {
+ LM355x_PMODE_DISABLE = 0,
+ LM355x_PMODE_ENABLE = 0x04,
+};
+
+/*
+ * struct lm3554_platform_data
+ * @pin_strobe: strobe input
+ * @pin_torch : input pin
+ * lm3554-tx1/torch/gpio1
+ * lm3556-torch
+ * @pin_tx2 : input pin
+ * lm3554-envm/tx2/gpio2
+ * lm3556-tx pin
+ * @ntc_pin : output pin
+ * lm3554-ledi/ntc
+ * lm3556-temp pin
+ * @pass_mode : pass mode
+ */
+struct lm355x_platform_data {
+ enum lm355x_strobe pin_strobe;
+ enum lm355x_torch pin_tx1;
+ enum lm355x_tx2 pin_tx2;
+ enum lm355x_ntc ntc_pin;
+
+ enum lm355x_pmode pass_mode;
+};
diff --git a/include/linux/platform_data/leds-lm3642.h b/include/linux/platform_data/leds-lm3642.h
new file mode 100644
index 000000000..72d6ee6ad
--- /dev/null
+++ b/include/linux/platform_data/leds-lm3642.h
@@ -0,0 +1,38 @@
+/*
+* Copyright (C) 2012 Texas Instruments
+*
+* License Terms: GNU General Public License v2
+*
+* Simple driver for Texas Instruments LM3642 LED driver chip
+*
+* Author: G.Shark Jeong <gshark.jeong@gmail.com>
+* Daniel Jeong <daniel.jeong@ti.com>
+*/
+
+#ifndef __LINUX_LM3642_H
+#define __LINUX_LM3642_H
+
+#define LM3642_NAME "leds-lm3642"
+
+enum lm3642_torch_pin_enable {
+ LM3642_TORCH_PIN_DISABLE = 0x00,
+ LM3642_TORCH_PIN_ENABLE = 0x10,
+};
+
+enum lm3642_strobe_pin_enable {
+ LM3642_STROBE_PIN_DISABLE = 0x00,
+ LM3642_STROBE_PIN_ENABLE = 0x20,
+};
+
+enum lm3642_tx_pin_enable {
+ LM3642_TX_PIN_DISABLE = 0x00,
+ LM3642_TX_PIN_ENABLE = 0x40,
+};
+
+struct lm3642_platform_data {
+ enum lm3642_torch_pin_enable torch_pin;
+ enum lm3642_strobe_pin_enable strobe_pin;
+ enum lm3642_tx_pin_enable tx_pin;
+};
+
+#endif /* __LINUX_LM3642_H */
diff --git a/include/linux/platform_data/leds-lp55xx.h b/include/linux/platform_data/leds-lp55xx.h
new file mode 100644
index 000000000..624ff9eda
--- /dev/null
+++ b/include/linux/platform_data/leds-lp55xx.h
@@ -0,0 +1,81 @@
+/*
+ * LP55XX Platform Data Header
+ *
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * Derived from leds-lp5521.h, leds-lp5523.h
+ */
+
+#ifndef _LEDS_LP55XX_H
+#define _LEDS_LP55XX_H
+
+/* Clock configuration */
+#define LP55XX_CLOCK_AUTO 0
+#define LP55XX_CLOCK_INT 1
+#define LP55XX_CLOCK_EXT 2
+
+struct lp55xx_led_config {
+ const char *name;
+ const char *default_trigger;
+ u8 chan_nr;
+ u8 led_current; /* mA x10, 0 if led is not connected */
+ u8 max_current;
+};
+
+struct lp55xx_predef_pattern {
+ const u8 *r;
+ const u8 *g;
+ const u8 *b;
+ u8 size_r;
+ u8 size_g;
+ u8 size_b;
+};
+
+enum lp8501_pwr_sel {
+ LP8501_ALL_VDD, /* D1~9 are connected to VDD */
+ LP8501_6VDD_3VOUT, /* D1~6 with VDD, D7~9 with VOUT */
+ LP8501_3VDD_6VOUT, /* D1~6 with VOUT, D7~9 with VDD */
+ LP8501_ALL_VOUT, /* D1~9 are connected to VOUT */
+};
+
+/*
+ * struct lp55xx_platform_data
+ * @led_config : Configurable led class device
+ * @num_channels : Number of LED channels
+ * @label : Used for naming LEDs
+ * @clock_mode : Input clock mode. LP55XX_CLOCK_AUTO or _INT or _EXT
+ * @setup_resources : Platform specific function before enabling the chip
+ * @release_resources : Platform specific function after disabling the chip
+ * @enable : EN pin control by platform side
+ * @patterns : Predefined pattern data for RGB channels
+ * @num_patterns : Number of patterns
+ * @update_config : Value of CONFIG register
+ */
+struct lp55xx_platform_data {
+
+ /* LED channel configuration */
+ struct lp55xx_led_config *led_config;
+ u8 num_channels;
+ const char *label;
+
+ /* Clock configuration */
+ u8 clock_mode;
+
+ /* optional enable GPIO */
+ int enable_gpio;
+
+ /* Predefined pattern data */
+ struct lp55xx_predef_pattern *patterns;
+ unsigned int num_patterns;
+
+ /* LP8501 specific */
+ enum lp8501_pwr_sel pwr_sel;
+};
+
+#endif /* _LEDS_LP55XX_H */
diff --git a/include/linux/platform_data/leds-omap.h b/include/linux/platform_data/leds-omap.h
new file mode 100644
index 000000000..56c9b2a0a
--- /dev/null
+++ b/include/linux/platform_data/leds-omap.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2006 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ASMARM_ARCH_LED_H
+#define ASMARM_ARCH_LED_H
+
+struct omap_led_config {
+ struct led_classdev cdev;
+ s16 gpio;
+};
+
+struct omap_led_platform_data {
+ s16 nr_leds;
+ struct omap_led_config *leds;
+};
+
+#endif
diff --git a/include/linux/platform_data/leds-pca963x.h b/include/linux/platform_data/leds-pca963x.h
new file mode 100644
index 000000000..e731f0036
--- /dev/null
+++ b/include/linux/platform_data/leds-pca963x.h
@@ -0,0 +1,42 @@
+/*
+ * PCA963X LED chip driver.
+ *
+ * Copyright 2012 bct electronic GmbH
+ * Copyright 2013 Qtechnology A/S
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_PCA963X_H
+#define __LINUX_PCA963X_H
+#include <linux/leds.h>
+
+enum pca963x_outdrv {
+ PCA963X_OPEN_DRAIN,
+ PCA963X_TOTEM_POLE, /* aka push-pull */
+};
+
+enum pca963x_blink_type {
+ PCA963X_SW_BLINK,
+ PCA963X_HW_BLINK,
+};
+
+struct pca963x_platform_data {
+ struct led_platform_data leds;
+ enum pca963x_outdrv outdrv;
+ enum pca963x_blink_type blink_type;
+};
+
+#endif /* __LINUX_PCA963X_H*/
diff --git a/include/linux/platform_data/leds-s3c24xx.h b/include/linux/platform_data/leds-s3c24xx.h
new file mode 100644
index 000000000..441a6f290
--- /dev/null
+++ b/include/linux/platform_data/leds-s3c24xx.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2006 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C24XX - LEDs GPIO connector
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __LEDS_S3C24XX_H
+#define __LEDS_S3C24XX_H
+
+#define S3C24XX_LEDF_ACTLOW (1<<0) /* LED is on when GPIO low */
+#define S3C24XX_LEDF_TRISTATE (1<<1) /* tristate to turn off */
+
+struct s3c24xx_led_platdata {
+ unsigned int gpio;
+ unsigned int flags;
+
+ char *name;
+ char *def_trigger;
+};
+
+#endif /* __LEDS_S3C24XX_H */
diff --git a/include/linux/platform_data/lm3630a_bl.h b/include/linux/platform_data/lm3630a_bl.h
new file mode 100644
index 000000000..7538e38e2
--- /dev/null
+++ b/include/linux/platform_data/lm3630a_bl.h
@@ -0,0 +1,65 @@
+/*
+* Simple driver for Texas Instruments LM3630A LED Flash driver chip
+* Copyright (C) 2012 Texas Instruments
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+*/
+
+#ifndef __LINUX_LM3630A_H
+#define __LINUX_LM3630A_H
+
+#define LM3630A_NAME "lm3630a_bl"
+
+enum lm3630a_pwm_ctrl {
+ LM3630A_PWM_DISABLE = 0x00,
+ LM3630A_PWM_BANK_A,
+ LM3630A_PWM_BANK_B,
+ LM3630A_PWM_BANK_ALL,
+ LM3630A_PWM_BANK_A_ACT_LOW = 0x05,
+ LM3630A_PWM_BANK_B_ACT_LOW,
+ LM3630A_PWM_BANK_ALL_ACT_LOW,
+};
+
+enum lm3630a_leda_ctrl {
+ LM3630A_LEDA_DISABLE = 0x00,
+ LM3630A_LEDA_ENABLE = 0x04,
+ LM3630A_LEDA_ENABLE_LINEAR = 0x14,
+};
+
+enum lm3630a_ledb_ctrl {
+ LM3630A_LEDB_DISABLE = 0x00,
+ LM3630A_LEDB_ON_A = 0x01,
+ LM3630A_LEDB_ENABLE = 0x02,
+ LM3630A_LEDB_ENABLE_LINEAR = 0x0A,
+};
+
+#define LM3630A_MAX_BRIGHTNESS 255
+/*
+ *@leda_init_brt : led a init brightness. 4~255
+ *@leda_max_brt : led a max brightness. 4~255
+ *@leda_ctrl : led a disable, enable linear, enable exponential
+ *@ledb_init_brt : led b init brightness. 4~255
+ *@ledb_max_brt : led b max brightness. 4~255
+ *@ledb_ctrl : led b disable, enable linear, enable exponential
+ *@pwm_period : pwm period
+ *@pwm_ctrl : pwm disable, bank a or b, active high or low
+ */
+struct lm3630a_platform_data {
+
+ /* led a config. */
+ int leda_init_brt;
+ int leda_max_brt;
+ enum lm3630a_leda_ctrl leda_ctrl;
+ /* led b config. */
+ int ledb_init_brt;
+ int ledb_max_brt;
+ enum lm3630a_ledb_ctrl ledb_ctrl;
+ /* pwm config. */
+ unsigned int pwm_period;
+ enum lm3630a_pwm_ctrl pwm_ctrl;
+};
+
+#endif /* __LINUX_LM3630A_H */
diff --git a/include/linux/platform_data/lm3639_bl.h b/include/linux/platform_data/lm3639_bl.h
new file mode 100644
index 000000000..5234cd5ed
--- /dev/null
+++ b/include/linux/platform_data/lm3639_bl.h
@@ -0,0 +1,69 @@
+/*
+* Simple driver for Texas Instruments LM3630 LED Flash driver chip
+* Copyright (C) 2012 Texas Instruments
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+*/
+
+#ifndef __LINUX_LM3639_H
+#define __LINUX_LM3639_H
+
+#define LM3639_NAME "lm3639_bl"
+
+enum lm3639_pwm {
+ LM3639_PWM_DISABLE = 0x00,
+ LM3639_PWM_EN_ACTLOW = 0x48,
+ LM3639_PWM_EN_ACTHIGH = 0x40,
+};
+
+enum lm3639_strobe {
+ LM3639_STROBE_DISABLE = 0x00,
+ LM3639_STROBE_EN_ACTLOW = 0x10,
+ LM3639_STROBE_EN_ACTHIGH = 0x30,
+};
+
+enum lm3639_txpin {
+ LM3639_TXPIN_DISABLE = 0x00,
+ LM3639_TXPIN_EN_ACTLOW = 0x04,
+ LM3639_TXPIN_EN_ACTHIGH = 0x0C,
+};
+
+enum lm3639_fleds {
+ LM3639_FLED_DIASBLE_ALL = 0x00,
+ LM3639_FLED_EN_1 = 0x40,
+ LM3639_FLED_EN_2 = 0x20,
+ LM3639_FLED_EN_ALL = 0x60,
+};
+
+enum lm3639_bleds {
+ LM3639_BLED_DIASBLE_ALL = 0x00,
+ LM3639_BLED_EN_1 = 0x10,
+ LM3639_BLED_EN_2 = 0x08,
+ LM3639_BLED_EN_ALL = 0x18,
+};
+enum lm3639_bled_mode {
+ LM3639_BLED_MODE_EXPONETIAL = 0x00,
+ LM3639_BLED_MODE_LINEAR = 0x10,
+};
+
+struct lm3639_platform_data {
+ unsigned int max_brt_led;
+ unsigned int init_brt_led;
+
+ /* input pins */
+ enum lm3639_pwm pin_pwm;
+ enum lm3639_strobe pin_strobe;
+ enum lm3639_txpin pin_tx;
+
+ /* output pins */
+ enum lm3639_fleds fled_pins;
+ enum lm3639_bleds bled_pins;
+ enum lm3639_bled_mode bled_mode;
+
+ void (*pwm_set_intensity) (int brightness, int max_brightness);
+ int (*pwm_get_intensity) (void);
+};
+#endif /* __LINUX_LM3639_H */
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
new file mode 100644
index 000000000..9c7fd1efe
--- /dev/null
+++ b/include/linux/platform_data/lp855x.h
@@ -0,0 +1,151 @@
+/*
+ * LP855x Backlight Driver
+ *
+ * Copyright (C) 2011 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _LP855X_H
+#define _LP855X_H
+
+#define BL_CTL_SHFT (0)
+#define BRT_MODE_SHFT (1)
+#define BRT_MODE_MASK (0x06)
+
+/* Enable backlight. Only valid when BRT_MODE=10(I2C only) */
+#define ENABLE_BL (1)
+#define DISABLE_BL (0)
+
+#define I2C_CONFIG(id) id ## _I2C_CONFIG
+#define PWM_CONFIG(id) id ## _PWM_CONFIG
+
+/* DEVICE CONTROL register - LP8550 */
+#define LP8550_PWM_CONFIG (LP8550_PWM_ONLY << BRT_MODE_SHFT)
+#define LP8550_I2C_CONFIG ((ENABLE_BL << BL_CTL_SHFT) | \
+ (LP8550_I2C_ONLY << BRT_MODE_SHFT))
+
+/* DEVICE CONTROL register - LP8551 */
+#define LP8551_PWM_CONFIG LP8550_PWM_CONFIG
+#define LP8551_I2C_CONFIG LP8550_I2C_CONFIG
+
+/* DEVICE CONTROL register - LP8552 */
+#define LP8552_PWM_CONFIG LP8550_PWM_CONFIG
+#define LP8552_I2C_CONFIG LP8550_I2C_CONFIG
+
+/* DEVICE CONTROL register - LP8553 */
+#define LP8553_PWM_CONFIG LP8550_PWM_CONFIG
+#define LP8553_I2C_CONFIG LP8550_I2C_CONFIG
+
+/* CONFIG register - LP8555 */
+#define LP8555_PWM_STANDBY BIT(7)
+#define LP8555_PWM_FILTER BIT(6)
+#define LP8555_RELOAD_EPROM BIT(3) /* use it if EPROMs should be reset
+ when the backlight turns on */
+#define LP8555_OFF_OPENLEDS BIT(2)
+#define LP8555_PWM_CONFIG LP8555_PWM_ONLY
+#define LP8555_I2C_CONFIG LP8555_I2C_ONLY
+#define LP8555_COMB1_CONFIG LP8555_COMBINED1
+#define LP8555_COMB2_CONFIG LP8555_COMBINED2
+
+/* DEVICE CONTROL register - LP8556 */
+#define LP8556_PWM_CONFIG (LP8556_PWM_ONLY << BRT_MODE_SHFT)
+#define LP8556_COMB1_CONFIG (LP8556_COMBINED1 << BRT_MODE_SHFT)
+#define LP8556_I2C_CONFIG ((ENABLE_BL << BL_CTL_SHFT) | \
+ (LP8556_I2C_ONLY << BRT_MODE_SHFT))
+#define LP8556_COMB2_CONFIG (LP8556_COMBINED2 << BRT_MODE_SHFT)
+#define LP8556_FAST_CONFIG BIT(7) /* use it if EPROMs should be maintained
+ when exiting the low power mode */
+
+/* CONFIG register - LP8557 */
+#define LP8557_PWM_STANDBY BIT(7)
+#define LP8557_PWM_FILTER BIT(6)
+#define LP8557_RELOAD_EPROM BIT(3) /* use it if EPROMs should be reset
+ when the backlight turns on */
+#define LP8557_OFF_OPENLEDS BIT(2)
+#define LP8557_PWM_CONFIG LP8557_PWM_ONLY
+#define LP8557_I2C_CONFIG LP8557_I2C_ONLY
+#define LP8557_COMB1_CONFIG LP8557_COMBINED1
+#define LP8557_COMB2_CONFIG LP8557_COMBINED2
+
+enum lp855x_chip_id {
+ LP8550,
+ LP8551,
+ LP8552,
+ LP8553,
+ LP8555,
+ LP8556,
+ LP8557,
+};
+
+enum lp8550_brighntess_source {
+ LP8550_PWM_ONLY,
+ LP8550_I2C_ONLY = 2,
+};
+
+enum lp8551_brighntess_source {
+ LP8551_PWM_ONLY = LP8550_PWM_ONLY,
+ LP8551_I2C_ONLY = LP8550_I2C_ONLY,
+};
+
+enum lp8552_brighntess_source {
+ LP8552_PWM_ONLY = LP8550_PWM_ONLY,
+ LP8552_I2C_ONLY = LP8550_I2C_ONLY,
+};
+
+enum lp8553_brighntess_source {
+ LP8553_PWM_ONLY = LP8550_PWM_ONLY,
+ LP8553_I2C_ONLY = LP8550_I2C_ONLY,
+};
+
+enum lp8555_brightness_source {
+ LP8555_PWM_ONLY,
+ LP8555_I2C_ONLY,
+ LP8555_COMBINED1, /* Brightness register with shaped PWM */
+ LP8555_COMBINED2, /* PWM with shaped brightness register */
+};
+
+enum lp8556_brightness_source {
+ LP8556_PWM_ONLY,
+ LP8556_COMBINED1, /* pwm + i2c before the shaper block */
+ LP8556_I2C_ONLY,
+ LP8556_COMBINED2, /* pwm + i2c after the shaper block */
+};
+
+enum lp8557_brightness_source {
+ LP8557_PWM_ONLY,
+ LP8557_I2C_ONLY,
+ LP8557_COMBINED1, /* pwm + i2c after the shaper block */
+ LP8557_COMBINED2, /* pwm + i2c before the shaper block */
+};
+
+struct lp855x_rom_data {
+ u8 addr;
+ u8 val;
+};
+
+/**
+ * struct lp855x_platform_data
+ * @name : Backlight driver name. If it is not defined, default name is set.
+ * @device_control : value of DEVICE CONTROL register
+ * @initial_brightness : initial value of backlight brightness
+ * @period_ns : platform specific pwm period value. unit is nano.
+ Only valid when mode is PWM_BASED.
+ * @size_program : total size of lp855x_rom_data
+ * @rom_data : list of new eeprom/eprom registers
+ * @supply : regulator that supplies 3V input
+ */
+struct lp855x_platform_data {
+ const char *name;
+ u8 device_control;
+ u8 initial_brightness;
+ unsigned int period_ns;
+ int size_program;
+ struct lp855x_rom_data *rom_data;
+ struct regulator *supply;
+};
+
+#endif
diff --git a/include/linux/platform_data/lp8727.h b/include/linux/platform_data/lp8727.h
new file mode 100644
index 000000000..47128a50e
--- /dev/null
+++ b/include/linux/platform_data/lp8727.h
@@ -0,0 +1,68 @@
+/*
+ * LP8727 Micro/Mini USB IC with integrated charger
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 National Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LP8727_H
+#define _LP8727_H
+
+enum lp8727_eoc_level {
+ LP8727_EOC_5P,
+ LP8727_EOC_10P,
+ LP8727_EOC_16P,
+ LP8727_EOC_20P,
+ LP8727_EOC_25P,
+ LP8727_EOC_33P,
+ LP8727_EOC_50P,
+};
+
+enum lp8727_ichg {
+ LP8727_ICHG_90mA,
+ LP8727_ICHG_100mA,
+ LP8727_ICHG_400mA,
+ LP8727_ICHG_450mA,
+ LP8727_ICHG_500mA,
+ LP8727_ICHG_600mA,
+ LP8727_ICHG_700mA,
+ LP8727_ICHG_800mA,
+ LP8727_ICHG_900mA,
+ LP8727_ICHG_1000mA,
+};
+
+/**
+ * struct lp8727_chg_param
+ * @eoc_level : end of charge level setting
+ * @ichg : charging current
+ */
+struct lp8727_chg_param {
+ enum lp8727_eoc_level eoc_level;
+ enum lp8727_ichg ichg;
+};
+
+/**
+ * struct lp8727_platform_data
+ * @get_batt_present : check battery status - exists or not
+ * @get_batt_level : get battery voltage (mV)
+ * @get_batt_capacity : get battery capacity (%)
+ * @get_batt_temp : get battery temperature
+ * @ac : charging parameters for AC type charger
+ * @usb : charging parameters for USB type charger
+ * @debounce_msec : interrupt debounce time
+ */
+struct lp8727_platform_data {
+ u8 (*get_batt_present)(void);
+ u16 (*get_batt_level)(void);
+ u8 (*get_batt_capacity)(void);
+ u8 (*get_batt_temp)(void);
+ struct lp8727_chg_param *ac;
+ struct lp8727_chg_param *usb;
+ unsigned int debounce_msec;
+};
+
+#endif
diff --git a/include/linux/platform_data/lp8755.h b/include/linux/platform_data/lp8755.h
new file mode 100644
index 000000000..a7fd0776c
--- /dev/null
+++ b/include/linux/platform_data/lp8755.h
@@ -0,0 +1,71 @@
+/*
+ * LP8755 High Performance Power Management Unit Driver:System Interface Driver
+ *
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Author: Daniel(Geon Si) Jeong <daniel.jeong@ti.com>
+ * G.Shark Jeong <gshark.jeong@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _LP8755_H
+#define _LP8755_H
+
+#include <linux/regulator/consumer.h>
+
+#define LP8755_NAME "lp8755-regulator"
+/*
+ *PWR FAULT : power fault detected
+ *OCP : over current protect activated
+ *OVP : over voltage protect activated
+ *TEMP_WARN : thermal warning
+ *TEMP_SHDN : thermal shutdonw detected
+ *I_LOAD : current measured
+ */
+#define LP8755_EVENT_PWR_FAULT REGULATOR_EVENT_FAIL
+#define LP8755_EVENT_OCP REGULATOR_EVENT_OVER_CURRENT
+#define LP8755_EVENT_OVP 0x10000
+#define LP8755_EVENT_TEMP_WARN 0x2000
+#define LP8755_EVENT_TEMP_SHDN REGULATOR_EVENT_OVER_TEMP
+#define LP8755_EVENT_I_LOAD 0x40000
+
+enum lp8755_bucks {
+ LP8755_BUCK0 = 0,
+ LP8755_BUCK1,
+ LP8755_BUCK2,
+ LP8755_BUCK3,
+ LP8755_BUCK4,
+ LP8755_BUCK5,
+ LP8755_BUCK_MAX,
+};
+
+/**
+ * multiphase configuration options
+ */
+enum lp8755_mphase_config {
+ MPHASE_CONF0,
+ MPHASE_CONF1,
+ MPHASE_CONF2,
+ MPHASE_CONF3,
+ MPHASE_CONF4,
+ MPHASE_CONF5,
+ MPHASE_CONF6,
+ MPHASE_CONF7,
+ MPHASE_CONF8,
+ MPHASE_CONF_MAX
+};
+
+/**
+ * struct lp8755_platform_data
+ * @mphase_type : Multiphase Switcher Configurations.
+ * @buck_data : buck0~6 init voltage in uV
+ */
+struct lp8755_platform_data {
+ int mphase;
+ struct regulator_init_data *buck_data[LP8755_BUCK_MAX];
+};
+#endif
diff --git a/include/linux/platform_data/lv5207lp.h b/include/linux/platform_data/lv5207lp.h
new file mode 100644
index 000000000..7dc4d9a21
--- /dev/null
+++ b/include/linux/platform_data/lv5207lp.h
@@ -0,0 +1,19 @@
+/*
+ * lv5207lp.h - Sanyo LV5207LP LEDs Driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LV5207LP_H__
+#define __LV5207LP_H__
+
+struct device;
+
+struct lv5207lp_platform_data {
+ struct device *fbdev;
+ unsigned int max_value;
+ unsigned int def_value;
+};
+
+#endif
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h
new file mode 100644
index 000000000..044a124bf
--- /dev/null
+++ b/include/linux/platform_data/macb.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __MACB_PDATA_H__
+#define __MACB_PDATA_H__
+
+struct macb_platform_data {
+ u32 phy_mask;
+ int phy_irq_pin; /* PHY IRQ */
+ u8 is_rmii; /* using RMII interface? */
+ u8 rev_eth_addr; /* reverse Ethernet address byte order */
+};
+
+#endif /* __MACB_PDATA_H__ */
diff --git a/include/linux/platform_data/mailbox-omap.h b/include/linux/platform_data/mailbox-omap.h
new file mode 100644
index 000000000..4631dbb42
--- /dev/null
+++ b/include/linux/platform_data/mailbox-omap.h
@@ -0,0 +1,58 @@
+/*
+ * mailbox-omap.h
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PLAT_MAILBOX_H
+#define _PLAT_MAILBOX_H
+
+/* Interrupt register configuration types */
+#define MBOX_INTR_CFG_TYPE1 (0)
+#define MBOX_INTR_CFG_TYPE2 (1)
+
+/**
+ * struct omap_mbox_dev_info - OMAP mailbox device attribute info
+ * @name: name of the mailbox device
+ * @tx_id: mailbox queue id used for transmitting messages
+ * @rx_id: mailbox queue id on which messages are received
+ * @irq_id: irq identifier number to use from the hwmod data
+ * @usr_id: mailbox user id for identifying the interrupt into
+ * the MPU interrupt controller.
+ */
+struct omap_mbox_dev_info {
+ const char *name;
+ u32 tx_id;
+ u32 rx_id;
+ u32 irq_id;
+ u32 usr_id;
+};
+
+/**
+ * struct omap_mbox_pdata - OMAP mailbox platform data
+ * @intr_type: type of interrupt configuration registers used
+ while programming mailbox queue interrupts
+ * @num_users: number of users (processor devices) that the mailbox
+ * h/w block can interrupt
+ * @num_fifos: number of h/w fifos within the mailbox h/w block
+ * @info_cnt: number of mailbox devices for the platform
+ * @info: array of mailbox device attributes
+ */
+struct omap_mbox_pdata {
+ u32 intr_type;
+ u32 num_users;
+ u32 num_fifos;
+ u32 info_cnt;
+ struct omap_mbox_dev_info *info;
+};
+
+#endif /* _PLAT_MAILBOX_H */
diff --git a/include/linux/platform_data/max197.h b/include/linux/platform_data/max197.h
new file mode 100644
index 000000000..8da8f94ee
--- /dev/null
+++ b/include/linux/platform_data/max197.h
@@ -0,0 +1,26 @@
+/*
+ * Maxim MAX197 A/D Converter Driver
+ *
+ * Copyright (c) 2012 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * For further information, see the Documentation/hwmon/max197 file.
+ */
+
+#ifndef _PDATA_MAX197_H
+#define _PDATA_MAX197_H
+
+/**
+ * struct max197_platform_data - MAX197 connectivity info
+ * @convert: Function used to start a conversion with control byte ctrl.
+ * It must return the raw data, or a negative error code.
+ */
+struct max197_platform_data {
+ int (*convert)(u8 ctrl);
+};
+
+#endif /* _PDATA_MAX197_H */
diff --git a/include/linux/platform_data/max3421-hcd.h b/include/linux/platform_data/max3421-hcd.h
new file mode 100644
index 000000000..0303d1970
--- /dev/null
+++ b/include/linux/platform_data/max3421-hcd.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2014 eGauge Systems LLC
+ * Contributed by David Mosberger-Tang <davidm@egauge.net>
+ *
+ * Platform-data structure for MAX3421 USB HCD driver.
+ *
+ */
+#ifndef MAX3421_HCD_PLAT_H_INCLUDED
+#define MAX3421_HCD_PLAT_H_INCLUDED
+
+/*
+ * This structure defines the mapping of certain auxiliary functions to the
+ * MAX3421E GPIO pins. The chip has eight GP inputs and eight GP outputs.
+ * A value of 0 indicates that the pin is not used/wired to anything.
+ *
+ * At this point, the only control the max3421-hcd driver cares about is
+ * to control Vbus (5V to the peripheral).
+ */
+struct max3421_hcd_platform_data {
+ u8 vbus_gpout; /* pin controlling Vbus */
+ u8 vbus_active_level; /* level that turns on power */
+};
+
+#endif /* MAX3421_HCD_PLAT_H_INCLUDED */
diff --git a/include/linux/platform_data/max6697.h b/include/linux/platform_data/max6697.h
new file mode 100644
index 000000000..ed9d3b3da
--- /dev/null
+++ b/include/linux/platform_data/max6697.h
@@ -0,0 +1,36 @@
+/*
+ * max6697.h
+ * Copyright (c) 2012 Guenter Roeck <linux@roeck-us.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MAX6697_H
+#define MAX6697_H
+
+#include <linux/types.h>
+
+/*
+ * For all bit masks:
+ * bit 0: local temperature
+ * bit 1..7: remote temperatures
+ */
+struct max6697_platform_data {
+ bool smbus_timeout_disable; /* set to disable SMBus timeouts */
+ bool extended_range_enable; /* set to enable extended temp range */
+ bool beta_compensation; /* set to enable beta compensation */
+ u8 alert_mask; /* set bit to 1 to disable alert */
+ u8 over_temperature_mask; /* set bit to 1 to disable */
+ u8 resistance_cancellation; /* set bit to 0 to disable
+ * bit mask for MAX6581,
+ * boolean for other chips
+ */
+ u8 ideality_mask; /* set bit to 0 to disable */
+ u8 ideality_value; /* transistor ideality as per
+ * MAX6581 datasheet
+ */
+};
+
+#endif /* MAX6697_H */
diff --git a/include/linux/platform_data/mfd-mcp-sa11x0.h b/include/linux/platform_data/mfd-mcp-sa11x0.h
new file mode 100644
index 000000000..747cd6baf
--- /dev/null
+++ b/include/linux/platform_data/mfd-mcp-sa11x0.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2005 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __MFD_MCP_SA11X0_H
+#define __MFD_MCP_SA11X0_H
+
+#include <linux/types.h>
+
+struct mcp_plat_data {
+ u32 mccr0;
+ u32 mccr1;
+ unsigned int sclk_rate;
+ void *codec_pdata;
+};
+
+#endif
diff --git a/include/linux/platform_data/microread.h b/include/linux/platform_data/microread.h
new file mode 100644
index 000000000..cfda59b22
--- /dev/null
+++ b/include/linux/platform_data/microread.h
@@ -0,0 +1,35 @@
+/*
+ * Driver include for the PN544 NFC chip.
+ *
+ * Copyright (C) 2011 Tieto Poland
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _MICROREAD_H
+#define _MICROREAD_H
+
+#include <linux/i2c.h>
+
+#define MICROREAD_DRIVER_NAME "microread"
+
+/* board config platform data for microread */
+struct microread_nfc_platform_data {
+ unsigned int rst_gpio;
+ unsigned int irq_gpio;
+ unsigned int ioh_gpio;
+};
+
+#endif /* _MICROREAD_H */
diff --git a/include/linux/platform_data/mmc-atmel-mci.h b/include/linux/platform_data/mmc-atmel-mci.h
new file mode 100644
index 000000000..399a2d5a1
--- /dev/null
+++ b/include/linux/platform_data/mmc-atmel-mci.h
@@ -0,0 +1,22 @@
+#ifndef __MMC_ATMEL_MCI_H
+#define __MMC_ATMEL_MCI_H
+
+#include <linux/platform_data/dma-atmel.h>
+#include <linux/platform_data/dma-dw.h>
+
+/**
+ * struct mci_dma_data - DMA data for MCI interface
+ */
+struct mci_dma_data {
+#ifdef CONFIG_ARM
+ struct at_dma_slave sdata;
+#else
+ struct dw_dma_slave sdata;
+#endif
+};
+
+/* accessor macros */
+#define slave_data_ptr(s) (&(s)->sdata)
+#define find_slave_dev(s) ((s)->sdata.dma_dev)
+
+#endif /* __MMC_ATMEL_MCI_H */
diff --git a/include/linux/platform_data/mmc-davinci.h b/include/linux/platform_data/mmc-davinci.h
new file mode 100644
index 000000000..9cea4ee37
--- /dev/null
+++ b/include/linux/platform_data/mmc-davinci.h
@@ -0,0 +1,36 @@
+/*
+ * Board-specific MMC configuration
+ */
+
+#ifndef _DAVINCI_MMC_H
+#define _DAVINCI_MMC_H
+
+#include <linux/types.h>
+#include <linux/mmc/host.h>
+
+struct davinci_mmc_config {
+ /* get_cd()/get_wp() may sleep */
+ int (*get_cd)(int module);
+ int (*get_ro)(int module);
+
+ void (*set_power)(int module, bool on);
+
+ /* wires == 0 is equivalent to wires == 4 (4-bit parallel) */
+ u8 wires;
+
+ u32 max_freq;
+
+ /* any additional host capabilities: OR'd in to mmc->f_caps */
+ u32 caps;
+
+ /* Number of sg segments */
+ u8 nr_sg;
+};
+void davinci_setup_mmc(int module, struct davinci_mmc_config *config);
+
+enum {
+ MMC_CTLR_VERSION_1 = 0, /* DM644x and DM355 */
+ MMC_CTLR_VERSION_2, /* DA830 */
+};
+
+#endif
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
new file mode 100644
index 000000000..75f70f6ac
--- /dev/null
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2010 Wolfram Sang <w.sang@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#ifndef __ASM_ARCH_IMX_ESDHC_H
+#define __ASM_ARCH_IMX_ESDHC_H
+
+#include <linux/types.h>
+
+enum wp_types {
+ ESDHC_WP_NONE, /* no WP, neither controller nor gpio */
+ ESDHC_WP_CONTROLLER, /* mmc controller internal WP */
+ ESDHC_WP_GPIO, /* external gpio pin for WP */
+};
+
+enum cd_types {
+ ESDHC_CD_NONE, /* no CD, neither controller nor gpio */
+ ESDHC_CD_CONTROLLER, /* mmc controller internal CD */
+ ESDHC_CD_GPIO, /* external gpio pin for CD */
+ ESDHC_CD_PERMANENT, /* no CD, card permanently wired to host */
+};
+
+/**
+ * struct esdhc_platform_data - platform data for esdhc on i.MX
+ *
+ * ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35.
+ *
+ * @wp_gpio: gpio for write_protect
+ * @cd_gpio: gpio for card_detect interrupt
+ * @wp_type: type of write_protect method (see wp_types enum above)
+ * @cd_type: type of card_detect method (see cd_types enum above)
+ * @support_vsel: indicate it supports 1.8v switching
+ */
+
+struct esdhc_platform_data {
+ unsigned int wp_gpio;
+ unsigned int cd_gpio;
+ enum wp_types wp_type;
+ enum cd_types cd_type;
+ int max_bus_width;
+ unsigned int f_max;
+ bool support_vsel;
+ unsigned int delay_line;
+};
+#endif /* __ASM_ARCH_IMX_ESDHC_H */
diff --git a/include/linux/platform_data/mmc-mvsdio.h b/include/linux/platform_data/mmc-mvsdio.h
new file mode 100644
index 000000000..d02704cd3
--- /dev/null
+++ b/include/linux/platform_data/mmc-mvsdio.h
@@ -0,0 +1,18 @@
+/*
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MMC_MVSDIO_H
+#define __MMC_MVSDIO_H
+
+#include <linux/mbus.h>
+
+struct mvsdio_platform_data {
+ unsigned int clock;
+ int gpio_card_detect;
+ int gpio_write_protect;
+};
+
+#endif
diff --git a/include/linux/platform_data/mmc-mxcmmc.h b/include/linux/platform_data/mmc-mxcmmc.h
new file mode 100644
index 000000000..29115f405
--- /dev/null
+++ b/include/linux/platform_data/mmc-mxcmmc.h
@@ -0,0 +1,39 @@
+#ifndef ASMARM_ARCH_MMC_H
+#define ASMARM_ARCH_MMC_H
+
+#include <linux/mmc/host.h>
+
+struct device;
+
+/* board specific SDHC data, optional.
+ * If not present, a writable card with 3,3V is assumed.
+ */
+struct imxmmc_platform_data {
+ /* Return values for the get_ro callback should be:
+ * 0 for a read/write card
+ * 1 for a read-only card
+ * -ENOSYS when not supported (equal to NULL callback)
+ * or a negative errno value when something bad happened
+ */
+ int (*get_ro)(struct device *);
+
+ /* board specific hook to (de)initialize the SD slot.
+ * The board code can call 'handler' on a card detection
+ * change giving data as argument.
+ */
+ int (*init)(struct device *dev, irq_handler_t handler, void *data);
+ void (*exit)(struct device *dev, void *data);
+
+ /* available voltages. If not given, assume
+ * MMC_VDD_32_33 | MMC_VDD_33_34
+ */
+ unsigned int ocr_avail;
+
+ /* adjust slot voltage */
+ void (*setpower)(struct device *, unsigned int vdd);
+
+ /* enable card detect using DAT3 */
+ int dat3_card_detect;
+};
+
+#endif
diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h
new file mode 100644
index 000000000..929469291
--- /dev/null
+++ b/include/linux/platform_data/mmc-omap.h
@@ -0,0 +1,121 @@
+/*
+ * MMC definitions for OMAP2
+ *
+ * Copyright (C) 2006 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define OMAP_MMC_MAX_SLOTS 2
+
+struct mmc_card;
+
+struct omap_mmc_platform_data {
+ /* back-link to device */
+ struct device *dev;
+
+ /* number of slots per controller */
+ unsigned nr_slots:2;
+
+ /* set if your board has components or wiring that limits the
+ * maximum frequency on the MMC bus */
+ unsigned int max_freq;
+
+ /* switch the bus to a new slot */
+ int (*switch_slot)(struct device *dev, int slot);
+ /* initialize board-specific MMC functionality, can be NULL if
+ * not supported */
+ int (*init)(struct device *dev);
+ void (*cleanup)(struct device *dev);
+ void (*shutdown)(struct device *dev);
+
+ /* Return context loss count due to PM states changing */
+ int (*get_context_loss_count)(struct device *dev);
+
+ /* Integrating attributes from the omap_hwmod layer */
+ u8 controller_flags;
+
+ /* Register offset deviation */
+ u16 reg_offset;
+
+ struct omap_mmc_slot_data {
+
+ /*
+ * 4/8 wires and any additional host capabilities
+ * need to OR'd all capabilities (ref. linux/mmc/host.h)
+ */
+ u8 wires; /* Used for the MMC driver on omap1 and 2420 */
+ u32 caps; /* Used for the MMC driver on 2430 and later */
+ u32 pm_caps; /* PM capabilities of the mmc */
+
+ /*
+ * nomux means "standard" muxing is wrong on this board, and
+ * that board-specific code handled it before common init logic.
+ */
+ unsigned nomux:1;
+
+ /* switch pin can be for card detect (default) or card cover */
+ unsigned cover:1;
+
+ /* use the internal clock */
+ unsigned internal_clock:1;
+
+ /* nonremovable e.g. eMMC */
+ unsigned nonremovable:1;
+
+ /* Try to sleep or power off when possible */
+ unsigned power_saving:1;
+
+ /* If using power_saving and the MMC power is not to go off */
+ unsigned no_off:1;
+
+ /* eMMC does not handle power off when not in sleep state */
+ unsigned no_regulator_off_init:1;
+
+ /* Regulator off remapped to sleep */
+ unsigned vcc_aux_disable_is_sleep:1;
+
+ /* we can put the features above into this variable */
+#define MMC_OMAP7XX (1 << 3)
+#define MMC_OMAP15XX (1 << 4)
+#define MMC_OMAP16XX (1 << 5)
+ unsigned features;
+
+ int switch_pin; /* gpio (card detect) */
+ int gpio_wp; /* gpio (write protect) */
+
+ int (*set_bus_mode)(struct device *dev, int slot, int bus_mode);
+ int (*set_power)(struct device *dev, int slot,
+ int power_on, int vdd);
+ int (*get_ro)(struct device *dev, int slot);
+ void (*remux)(struct device *dev, int slot, int power_on);
+ /* Call back before enabling / disabling regulators */
+ void (*before_set_reg)(struct device *dev, int slot,
+ int power_on, int vdd);
+ /* Call back after enabling / disabling regulators */
+ void (*after_set_reg)(struct device *dev, int slot,
+ int power_on, int vdd);
+ /* if we have special card, init it using this callback */
+ void (*init_card)(struct mmc_card *card);
+
+ /* return MMC cover switch state, can be NULL if not supported.
+ *
+ * possible return values:
+ * 0 - closed
+ * 1 - open
+ */
+ int (*get_cover_state)(struct device *dev, int slot);
+
+ const char *name;
+ u32 ocr_mask;
+
+ /* Card detection IRQs */
+ int card_detect_irq;
+ int (*card_detect)(struct device *dev, int slot);
+
+ unsigned int ban_openended:1;
+
+ } slots[OMAP_MMC_MAX_SLOTS];
+};
diff --git a/include/linux/platform_data/mmc-pxamci.h b/include/linux/platform_data/mmc-pxamci.h
new file mode 100644
index 000000000..1706b3597
--- /dev/null
+++ b/include/linux/platform_data/mmc-pxamci.h
@@ -0,0 +1,28 @@
+#ifndef ASMARM_ARCH_MMC_H
+#define ASMARM_ARCH_MMC_H
+
+#include <linux/mmc/host.h>
+#include <linux/interrupt.h>
+
+struct device;
+struct mmc_host;
+
+struct pxamci_platform_data {
+ unsigned int ocr_mask; /* available voltages */
+ unsigned long detect_delay_ms; /* delay in millisecond before detecting cards after interrupt */
+ int (*init)(struct device *, irq_handler_t , void *);
+ int (*get_ro)(struct device *);
+ int (*setpower)(struct device *, unsigned int);
+ void (*exit)(struct device *, void *);
+ int gpio_card_detect; /* gpio detecting card insertion */
+ int gpio_card_ro; /* gpio detecting read only toggle */
+ bool gpio_card_ro_invert; /* gpio ro is inverted */
+ int gpio_power; /* gpio powering up MMC bus */
+ bool gpio_power_invert; /* gpio power is inverted */
+};
+
+extern void pxa_set_mci_info(struct pxamci_platform_data *info);
+extern void pxa3xx_set_mci2_info(struct pxamci_platform_data *info);
+extern void pxa3xx_set_mci3_info(struct pxamci_platform_data *info);
+
+#endif
diff --git a/include/linux/platform_data/mmc-s3cmci.h b/include/linux/platform_data/mmc-s3cmci.h
new file mode 100644
index 000000000..c42d31711
--- /dev/null
+++ b/include/linux/platform_data/mmc-s3cmci.h
@@ -0,0 +1,52 @@
+#ifndef _ARCH_MCI_H
+#define _ARCH_MCI_H
+
+/**
+ * struct s3c24xx_mci_pdata - sd/mmc controller platform data
+ * @no_wprotect: Set this to indicate there is no write-protect switch.
+ * @no_detect: Set this if there is no detect switch.
+ * @wprotect_invert: Invert the default sense of the write protect switch.
+ * @detect_invert: Invert the default sense of the write protect switch.
+ * @use_dma: Set to allow the use of DMA.
+ * @gpio_detect: GPIO number for the card detect line.
+ * @gpio_wprotect: GPIO number for the write protect line.
+ * @ocr_avail: The mask of the available power states, non-zero to use.
+ * @set_power: Callback to control the power mode.
+ *
+ * The @gpio_detect is used for card detection when @no_wprotect is unset,
+ * and the default sense is that 0 returned from gpio_get_value() means
+ * that a card is inserted. If @detect_invert is set, then the value from
+ * gpio_get_value() is inverted, which makes 1 mean card inserted.
+ *
+ * The driver will use @gpio_wprotect to signal whether the card is write
+ * protected if @no_wprotect is not set. A 0 returned from gpio_get_value()
+ * means the card is read/write, and 1 means read-only. The @wprotect_invert
+ * will invert the value returned from gpio_get_value().
+ *
+ * Card power is set by @ocr_availa, using MCC_VDD_ constants if it is set
+ * to a non-zero value, otherwise the default of 3.2-3.4V is used.
+ */
+struct s3c24xx_mci_pdata {
+ unsigned int no_wprotect:1;
+ unsigned int no_detect:1;
+ unsigned int wprotect_invert:1;
+ unsigned int detect_invert:1; /* set => detect active high */
+ unsigned int use_dma:1;
+
+ unsigned int gpio_detect;
+ unsigned int gpio_wprotect;
+ unsigned long ocr_avail;
+ void (*set_power)(unsigned char power_mode,
+ unsigned short vdd);
+};
+
+/**
+ * s3c24xx_mci_set_platdata - set platform data for mmc/sdi device
+ * @pdata: The platform data
+ *
+ * Copy the platform data supplied by @pdata so that this can be marked
+ * __initdata.
+ */
+extern void s3c24xx_mci_set_platdata(struct s3c24xx_mci_pdata *pdata);
+
+#endif /* _ARCH_NCI_H */
diff --git a/include/linux/platform_data/mmc-sdhci-s3c.h b/include/linux/platform_data/mmc-sdhci-s3c.h
new file mode 100644
index 000000000..249f02387
--- /dev/null
+++ b/include/linux/platform_data/mmc-sdhci-s3c.h
@@ -0,0 +1,56 @@
+#ifndef __PLATFORM_DATA_SDHCI_S3C_H
+#define __PLATFORM_DATA_SDHCI_S3C_H
+
+struct platform_device;
+
+enum cd_types {
+ S3C_SDHCI_CD_INTERNAL, /* use mmc internal CD line */
+ S3C_SDHCI_CD_EXTERNAL, /* use external callback */
+ S3C_SDHCI_CD_GPIO, /* use external gpio pin for CD line */
+ S3C_SDHCI_CD_NONE, /* no CD line, use polling to detect card */
+ S3C_SDHCI_CD_PERMANENT, /* no CD line, card permanently wired to host */
+};
+
+/**
+ * struct s3c_sdhci_platdata() - Platform device data for Samsung SDHCI
+ * @max_width: The maximum number of data bits supported.
+ * @host_caps: Standard MMC host capabilities bit field.
+ * @host_caps2: The second standard MMC host capabilities bit field.
+ * @cd_type: Type of Card Detection method (see cd_types enum above)
+ * @ext_cd_init: Initialize external card detect subsystem. Called on
+ * sdhci-s3c driver probe when cd_type == S3C_SDHCI_CD_EXTERNAL.
+ * notify_func argument is a callback to the sdhci-s3c driver
+ * that triggers the card detection event. Callback arguments:
+ * dev is pointer to platform device of the host controller,
+ * state is new state of the card (0 - removed, 1 - inserted).
+ * @ext_cd_cleanup: Cleanup external card detect subsystem. Called on
+ * sdhci-s3c driver remove when cd_type == S3C_SDHCI_CD_EXTERNAL.
+ * notify_func argument is the same callback as for ext_cd_init.
+ * @ext_cd_gpio: gpio pin used for external CD line, valid only if
+ * cd_type == S3C_SDHCI_CD_GPIO
+ * @ext_cd_gpio_invert: invert values for external CD gpio line
+ * @cfg_gpio: Configure the GPIO for a specific card bit-width
+ *
+ * Initialisation data specific to either the machine or the platform
+ * for the device driver to use or call-back when configuring gpio or
+ * card speed information.
+*/
+struct s3c_sdhci_platdata {
+ unsigned int max_width;
+ unsigned int host_caps;
+ unsigned int host_caps2;
+ unsigned int pm_caps;
+ enum cd_types cd_type;
+
+ int ext_cd_gpio;
+ bool ext_cd_gpio_invert;
+ int (*ext_cd_init)(void (*notify_func)(struct platform_device *,
+ int state));
+ int (*ext_cd_cleanup)(void (*notify_func)(struct platform_device *,
+ int state));
+
+ void (*cfg_gpio)(struct platform_device *dev, int width);
+};
+
+
+#endif /* __PLATFORM_DATA_SDHCI_S3C_H */
diff --git a/include/linux/platform_data/mmp_audio.h b/include/linux/platform_data/mmp_audio.h
new file mode 100644
index 000000000..0f25d165a
--- /dev/null
+++ b/include/linux/platform_data/mmp_audio.h
@@ -0,0 +1,22 @@
+/*
+ * MMP Platform AUDIO Management
+ *
+ * Copyright (c) 2011 Marvell Semiconductors Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef MMP_AUDIO_H
+#define MMP_AUDIO_H
+
+struct mmp_audio_platdata {
+ u32 period_max_capture;
+ u32 buffer_max_capture;
+ u32 period_max_playback;
+ u32 buffer_max_playback;
+};
+
+#endif /* MMP_AUDIO_H */
diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h
new file mode 100644
index 000000000..2a330ec9e
--- /dev/null
+++ b/include/linux/platform_data/mmp_dma.h
@@ -0,0 +1,19 @@
+/*
+ * MMP Platform DMA Management
+ *
+ * Copyright (c) 2011 Marvell Semiconductors Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef MMP_DMA_H
+#define MMP_DMA_H
+
+struct mmp_dma_platdata {
+ int dma_channels;
+};
+
+#endif /* MMP_DMA_H */
diff --git a/include/linux/platform_data/mouse-pxa930_trkball.h b/include/linux/platform_data/mouse-pxa930_trkball.h
new file mode 100644
index 000000000..5e0789bc4
--- /dev/null
+++ b/include/linux/platform_data/mouse-pxa930_trkball.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_ARCH_PXA930_TRKBALL_H
+#define __ASM_ARCH_PXA930_TRKBALL_H
+
+struct pxa930_trkball_platform_data {
+ int x_filter;
+ int y_filter;
+};
+
+#endif /* __ASM_ARCH_PXA930_TRKBALL_H */
+
diff --git a/include/linux/platform_data/mtd-davinci-aemif.h b/include/linux/platform_data/mtd-davinci-aemif.h
new file mode 100644
index 000000000..97948ac2b
--- /dev/null
+++ b/include/linux/platform_data/mtd-davinci-aemif.h
@@ -0,0 +1,37 @@
+/*
+ * TI DaVinci AEMIF support
+ *
+ * Copyright 2010 (C) Texas Instruments, Inc. http://www.ti.com/
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#ifndef _MACH_DAVINCI_AEMIF_H
+#define _MACH_DAVINCI_AEMIF_H
+
+#include <linux/platform_device.h>
+
+#define NRCSR_OFFSET 0x00
+#define AWCCR_OFFSET 0x04
+#define A1CR_OFFSET 0x10
+
+#define ACR_ASIZE_MASK 0x3
+#define ACR_EW_MASK BIT(30)
+#define ACR_SS_MASK BIT(31)
+
+/* All timings in nanoseconds */
+struct davinci_aemif_timing {
+ u8 wsetup;
+ u8 wstrobe;
+ u8 whold;
+
+ u8 rsetup;
+ u8 rstrobe;
+ u8 rhold;
+
+ u8 ta;
+};
+
+int davinci_aemif_setup(struct platform_device *pdev);
+#endif
diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h
new file mode 100644
index 000000000..1cf555aef
--- /dev/null
+++ b/include/linux/platform_data/mtd-davinci.h
@@ -0,0 +1,90 @@
+/*
+ * mach-davinci/nand.h
+ *
+ * Copyright © 2006 Texas Instruments.
+ *
+ * Ported to 2.6.23 Copyright © 2008 by
+ * Sander Huijsen <Shuijsen@optelecom-nkf.com>
+ * Troy Kisky <troy.kisky@boundarydevices.com>
+ * Dirk Behme <Dirk.Behme@gmail.com>
+ *
+ * --------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __ARCH_ARM_DAVINCI_NAND_H
+#define __ARCH_ARM_DAVINCI_NAND_H
+
+#include <linux/mtd/nand.h>
+
+#define NANDFCR_OFFSET 0x60
+#define NANDFSR_OFFSET 0x64
+#define NANDF1ECC_OFFSET 0x70
+
+/* 4-bit ECC syndrome registers */
+#define NAND_4BIT_ECC_LOAD_OFFSET 0xbc
+#define NAND_4BIT_ECC1_OFFSET 0xc0
+#define NAND_4BIT_ECC2_OFFSET 0xc4
+#define NAND_4BIT_ECC3_OFFSET 0xc8
+#define NAND_4BIT_ECC4_OFFSET 0xcc
+#define NAND_ERR_ADD1_OFFSET 0xd0
+#define NAND_ERR_ADD2_OFFSET 0xd4
+#define NAND_ERR_ERRVAL1_OFFSET 0xd8
+#define NAND_ERR_ERRVAL2_OFFSET 0xdc
+
+/* NOTE: boards don't need to use these address bits
+ * for ALE/CLE unless they support booting from NAND.
+ * They're used unless platform data overrides them.
+ */
+#define MASK_ALE 0x08
+#define MASK_CLE 0x10
+
+struct davinci_nand_pdata { /* platform_data */
+ uint32_t mask_ale;
+ uint32_t mask_cle;
+
+ /* for packages using two chipselects */
+ uint32_t mask_chipsel;
+
+ /* board's default static partition info */
+ struct mtd_partition *parts;
+ unsigned nr_parts;
+
+ /* none == NAND_ECC_NONE (strongly *not* advised!!)
+ * soft == NAND_ECC_SOFT
+ * else == NAND_ECC_HW, according to ecc_bits
+ *
+ * All DaVinci-family chips support 1-bit hardware ECC.
+ * Newer ones also support 4-bit ECC, but are awkward
+ * using it with large page chips.
+ */
+ nand_ecc_modes_t ecc_mode;
+ u8 ecc_bits;
+
+ /* e.g. NAND_BUSWIDTH_16 */
+ unsigned options;
+ /* e.g. NAND_BBT_USE_FLASH */
+ unsigned bbt_options;
+
+ /* Main and mirror bbt descriptor overrides */
+ struct nand_bbt_descr *bbt_td;
+ struct nand_bbt_descr *bbt_md;
+
+ /* Access timings */
+ struct davinci_aemif_timing *timing;
+};
+
+#endif /* __ARCH_ARM_DAVINCI_NAND_H */
diff --git a/include/linux/platform_data/mtd-mxc_nand.h b/include/linux/platform_data/mtd-mxc_nand.h
new file mode 100644
index 000000000..6bb96ef16
--- /dev/null
+++ b/include/linux/platform_data/mtd-mxc_nand.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Sascha Hauer, kernel@pengutronix.de
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#ifndef __ASM_ARCH_NAND_H
+#define __ASM_ARCH_NAND_H
+
+#include <linux/mtd/partitions.h>
+
+struct mxc_nand_platform_data {
+ unsigned int width; /* data bus width in bytes */
+ unsigned int hw_ecc:1; /* 0 if suppress hardware ECC */
+ unsigned int flash_bbt:1; /* set to 1 to use a flash based bbt */
+ struct mtd_partition *parts; /* partition table */
+ int nr_parts; /* size of parts */
+};
+#endif /* __ASM_ARCH_NAND_H */
diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h
new file mode 100644
index 000000000..090bbab01
--- /dev/null
+++ b/include/linux/platform_data/mtd-nand-omap2.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2006 Micron Technology Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _MTD_NAND_OMAP2_H
+#define _MTD_NAND_OMAP2_H
+
+#include <linux/mtd/partitions.h>
+
+#define GPMC_BCH_NUM_REMAINDER 8
+
+enum nand_io {
+ NAND_OMAP_PREFETCH_POLLED = 0, /* prefetch polled mode, default */
+ NAND_OMAP_POLLED, /* polled mode, without prefetch */
+ NAND_OMAP_PREFETCH_DMA, /* prefetch enabled sDMA mode */
+ NAND_OMAP_PREFETCH_IRQ /* prefetch enabled irq mode */
+};
+
+enum omap_ecc {
+ /*
+ * 1-bit ECC: calculation and correction by SW
+ * ECC stored at end of spare area
+ */
+ OMAP_ECC_HAM1_CODE_SW = 0,
+
+ /*
+ * 1-bit ECC: calculation by GPMC, Error detection by Software
+ * ECC layout compatible with ROM code layout
+ */
+ OMAP_ECC_HAM1_CODE_HW,
+ /* 4-bit ECC calculation by GPMC, Error detection by Software */
+ OMAP_ECC_BCH4_CODE_HW_DETECTION_SW,
+ /* 4-bit ECC calculation by GPMC, Error detection by ELM */
+ OMAP_ECC_BCH4_CODE_HW,
+ /* 8-bit ECC calculation by GPMC, Error detection by Software */
+ OMAP_ECC_BCH8_CODE_HW_DETECTION_SW,
+ /* 8-bit ECC calculation by GPMC, Error detection by ELM */
+ OMAP_ECC_BCH8_CODE_HW,
+ /* 16-bit ECC calculation by GPMC, Error detection by ELM */
+ OMAP_ECC_BCH16_CODE_HW,
+};
+
+struct gpmc_nand_regs {
+ void __iomem *gpmc_status;
+ void __iomem *gpmc_nand_command;
+ void __iomem *gpmc_nand_address;
+ void __iomem *gpmc_nand_data;
+ void __iomem *gpmc_prefetch_config1;
+ void __iomem *gpmc_prefetch_config2;
+ void __iomem *gpmc_prefetch_control;
+ void __iomem *gpmc_prefetch_status;
+ void __iomem *gpmc_ecc_config;
+ void __iomem *gpmc_ecc_control;
+ void __iomem *gpmc_ecc_size_config;
+ void __iomem *gpmc_ecc1_result;
+ void __iomem *gpmc_bch_result0[GPMC_BCH_NUM_REMAINDER];
+ void __iomem *gpmc_bch_result1[GPMC_BCH_NUM_REMAINDER];
+ void __iomem *gpmc_bch_result2[GPMC_BCH_NUM_REMAINDER];
+ void __iomem *gpmc_bch_result3[GPMC_BCH_NUM_REMAINDER];
+ void __iomem *gpmc_bch_result4[GPMC_BCH_NUM_REMAINDER];
+ void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER];
+ void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER];
+};
+
+struct omap_nand_platform_data {
+ int cs;
+ struct mtd_partition *parts;
+ int nr_parts;
+ bool dev_ready;
+ bool flash_bbt;
+ enum nand_io xfer_type;
+ int devsize;
+ enum omap_ecc ecc_opt;
+ struct gpmc_nand_regs reg;
+
+ /* for passing the partitions */
+ struct device_node *of_node;
+ struct device_node *elm_of_node;
+};
+#endif
diff --git a/include/linux/platform_data/mtd-nand-pxa3xx.h b/include/linux/platform_data/mtd-nand-pxa3xx.h
new file mode 100644
index 000000000..ac4ea2e64
--- /dev/null
+++ b/include/linux/platform_data/mtd-nand-pxa3xx.h
@@ -0,0 +1,72 @@
+#ifndef __ASM_ARCH_PXA3XX_NAND_H
+#define __ASM_ARCH_PXA3XX_NAND_H
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+struct pxa3xx_nand_timing {
+ unsigned int tCH; /* Enable signal hold time */
+ unsigned int tCS; /* Enable signal setup time */
+ unsigned int tWH; /* ND_nWE high duration */
+ unsigned int tWP; /* ND_nWE pulse time */
+ unsigned int tRH; /* ND_nRE high duration */
+ unsigned int tRP; /* ND_nRE pulse width */
+ unsigned int tR; /* ND_nWE high to ND_nRE low for read */
+ unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
+ unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
+};
+
+struct pxa3xx_nand_flash {
+ char *name;
+ uint32_t chip_id;
+ unsigned int page_per_block; /* Pages per block (PG_PER_BLK) */
+ unsigned int page_size; /* Page size in bytes (PAGE_SZ) */
+ unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
+ unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
+ unsigned int num_blocks; /* Number of physical blocks in Flash */
+
+ struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
+};
+
+/*
+ * Current pxa3xx_nand controller has two chip select which
+ * both be workable.
+ *
+ * Notice should be taken that:
+ * When you want to use this feature, you should not enable the
+ * keep configuration feature, for two chip select could be
+ * attached with different nand chip. The different page size
+ * and timing requirement make the keep configuration impossible.
+ */
+
+/* The max num of chip select current support */
+#define NUM_CHIP_SELECT (2)
+struct pxa3xx_nand_platform_data {
+
+ /* the data flash bus is shared between the Static Memory
+ * Controller and the Data Flash Controller, the arbiter
+ * controls the ownership of the bus
+ */
+ int enable_arbiter;
+
+ /* allow platform code to keep OBM/bootloader defined NFC config */
+ int keep_config;
+
+ /* indicate how many chip selects will be used */
+ int num_cs;
+
+ /* use an flash-based bad block table */
+ bool flash_bbt;
+
+ /* requested ECC strength and ECC step size */
+ int ecc_strength, ecc_step_size;
+
+ const struct mtd_partition *parts[NUM_CHIP_SELECT];
+ unsigned int nr_parts[NUM_CHIP_SELECT];
+
+ const struct pxa3xx_nand_flash * flash;
+ size_t num_flash;
+};
+
+extern void pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info);
+#endif /* __ASM_ARCH_PXA3XX_NAND_H */
diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h
new file mode 100644
index 000000000..36bb92172
--- /dev/null
+++ b/include/linux/platform_data/mtd-nand-s3c2410.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2004 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C2410 - NAND device controller platform_device info
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __MTD_NAND_S3C2410_H
+#define __MTD_NAND_S3C2410_H
+
+/**
+ * struct s3c2410_nand_set - define a set of one or more nand chips
+ * @disable_ecc: Entirely disable ECC - Dangerous
+ * @flash_bbt: Openmoko u-boot can create a Bad Block Table
+ * Setting this flag will allow the kernel to
+ * look for it at boot time and also skip the NAND
+ * scan.
+ * @options: Default value to set into 'struct nand_chip' options.
+ * @nr_chips: Number of chips in this set
+ * @nr_partitions: Number of partitions pointed to by @partitions
+ * @name: Name of set (optional)
+ * @nr_map: Map for low-layer logical to physical chip numbers (option)
+ * @partitions: The mtd partition list
+ *
+ * define a set of one or more nand chips registered with an unique mtd. Also
+ * allows to pass flag to the underlying NAND layer. 'disable_ecc' will trigger
+ * a warning at boot time.
+ */
+struct s3c2410_nand_set {
+ unsigned int disable_ecc:1;
+ unsigned int flash_bbt:1;
+
+ unsigned int options;
+ int nr_chips;
+ int nr_partitions;
+ char *name;
+ int *nr_map;
+ struct mtd_partition *partitions;
+ struct nand_ecclayout *ecc_layout;
+};
+
+struct s3c2410_platform_nand {
+ /* timing information for controller, all times in nanoseconds */
+
+ int tacls; /* time for active CLE/ALE to nWE/nOE */
+ int twrph0; /* active time for nWE/nOE */
+ int twrph1; /* time for release CLE/ALE from nWE/nOE inactive */
+
+ unsigned int ignore_unset_ecc:1;
+
+ int nr_sets;
+ struct s3c2410_nand_set *sets;
+
+ void (*select_chip)(struct s3c2410_nand_set *,
+ int chip);
+};
+
+/**
+ * s3c_nand_set_platdata() - register NAND platform data.
+ * @nand: The NAND platform data to register with s3c_device_nand.
+ *
+ * This function copies the given NAND platform data, @nand and registers
+ * it with the s3c_device_nand. This allows @nand to be __initdata.
+*/
+extern void s3c_nand_set_platdata(struct s3c2410_platform_nand *nand);
+
+#endif /*__MTD_NAND_S3C2410_H */
diff --git a/include/linux/platform_data/mtd-onenand-omap2.h b/include/linux/platform_data/mtd-onenand-omap2.h
new file mode 100644
index 000000000..56ff0e6f5
--- /dev/null
+++ b/include/linux/platform_data/mtd-onenand-omap2.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2006 Nokia Corporation
+ * Author: Juha Yrjola
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MTD_ONENAND_OMAP2_H
+#define __MTD_ONENAND_OMAP2_H
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#define ONENAND_SYNC_READ (1 << 0)
+#define ONENAND_SYNC_READWRITE (1 << 1)
+#define ONENAND_IN_OMAP34XX (1 << 2)
+
+struct omap_onenand_platform_data {
+ int cs;
+ int gpio_irq;
+ struct mtd_partition *parts;
+ int nr_parts;
+ int (*onenand_setup)(void __iomem *, int *freq_ptr);
+ int dma_channel;
+ u8 flags;
+ u8 regulator_can_sleep;
+ u8 skip_initial_unlocking;
+
+ /* for passing the partitions */
+ struct device_node *of_node;
+};
+#endif
diff --git a/include/linux/platform_data/mtd-orion_nand.h b/include/linux/platform_data/mtd-orion_nand.h
new file mode 100644
index 000000000..a7ce77c7c
--- /dev/null
+++ b/include/linux/platform_data/mtd-orion_nand.h
@@ -0,0 +1,24 @@
+/*
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MTD_ORION_NAND_H
+#define __MTD_ORION_NAND_H
+
+/*
+ * Device bus NAND private data
+ */
+struct orion_nand_data {
+ struct mtd_partition *parts;
+ int (*dev_ready)(struct mtd_info *mtd);
+ u32 nr_parts;
+ u8 ale; /* address line number connected to ALE */
+ u8 cle; /* address line number connected to CLE */
+ u8 width; /* buswidth */
+ u8 chip_delay;
+};
+
+
+#endif
diff --git a/include/linux/platform_data/mv_usb.h b/include/linux/platform_data/mv_usb.h
new file mode 100644
index 000000000..98b7925f1
--- /dev/null
+++ b/include/linux/platform_data/mv_usb.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MV_PLATFORM_USB_H
+#define __MV_PLATFORM_USB_H
+
+enum pxa_ehci_type {
+ EHCI_UNDEFINED = 0,
+ PXA_U2OEHCI, /* pxa 168, 9xx */
+ PXA_SPH, /* pxa 168, 9xx SPH */
+ MMP3_HSIC, /* mmp3 hsic */
+ MMP3_FSIC, /* mmp3 fsic */
+};
+
+enum {
+ MV_USB_MODE_OTG,
+ MV_USB_MODE_HOST,
+};
+
+enum {
+ VBUS_LOW = 0,
+ VBUS_HIGH = 1 << 0,
+};
+
+struct mv_usb_addon_irq {
+ unsigned int irq;
+ int (*poll)(void);
+};
+
+struct mv_usb_platform_data {
+ struct mv_usb_addon_irq *id; /* Only valid for OTG. ID pin change*/
+ struct mv_usb_addon_irq *vbus; /* valid for OTG/UDC. VBUS change*/
+
+ /* only valid for HCD. OTG or Host only*/
+ unsigned int mode;
+
+ /* This flag is used for that needs id pin checked by otg */
+ unsigned int disable_otg_clock_gating:1;
+ /* Force a_bus_req to be asserted */
+ unsigned int otg_force_a_bus_req:1;
+
+ int (*phy_init)(void __iomem *regbase);
+ void (*phy_deinit)(void __iomem *regbase);
+ int (*set_vbus)(unsigned int vbus);
+ int (*private_init)(void __iomem *opregs, void __iomem *phyregs);
+};
+#endif
diff --git a/include/linux/platform_data/net-cw1200.h b/include/linux/platform_data/net-cw1200.h
new file mode 100644
index 000000000..c6fbc3ce4
--- /dev/null
+++ b/include/linux/platform_data/net-cw1200.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef CW1200_PLAT_H_INCLUDED
+#define CW1200_PLAT_H_INCLUDED
+
+struct cw1200_platform_data_spi {
+ u8 spi_bits_per_word; /* REQUIRED */
+ u16 ref_clk; /* REQUIRED (in KHz) */
+
+ /* All others are optional */
+ bool have_5ghz;
+ int reset; /* GPIO to RSTn signal (0 disables) */
+ int powerup; /* GPIO to POWERUP signal (0 disables) */
+ int (*power_ctrl)(const struct cw1200_platform_data_spi *pdata,
+ bool enable); /* Control 3v3 / 1v8 supply */
+ int (*clk_ctrl)(const struct cw1200_platform_data_spi *pdata,
+ bool enable); /* Control CLK32K */
+ const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */
+ const char *sdd_file; /* if NULL, will use default for detected hw type */
+};
+
+struct cw1200_platform_data_sdio {
+ u16 ref_clk; /* REQUIRED (in KHz) */
+
+ /* All others are optional */
+ bool have_5ghz;
+ bool no_nptb; /* SDIO hardware does not support non-power-of-2-blocksizes */
+ int reset; /* GPIO to RSTn signal (0 disables) */
+ int powerup; /* GPIO to POWERUP signal (0 disables) */
+ int irq; /* IRQ line or 0 to use SDIO IRQ */
+ int (*power_ctrl)(const struct cw1200_platform_data_sdio *pdata,
+ bool enable); /* Control 3v3 / 1v8 supply */
+ int (*clk_ctrl)(const struct cw1200_platform_data_sdio *pdata,
+ bool enable); /* Control CLK32K */
+ const u8 *macaddr; /* if NULL, use cw1200_mac_template module parameter */
+ const char *sdd_file; /* if NULL, will use default for detected hw type */
+};
+
+
+/* An example of SPI support in your board setup file:
+
+ static struct cw1200_platform_data_spi cw1200_platform_data = {
+ .ref_clk = 38400,
+ .spi_bits_per_word = 16,
+ .reset = GPIO_RF_RESET,
+ .powerup = GPIO_RF_POWERUP,
+ .macaddr = wifi_mac_addr,
+ .sdd_file = "sdd_sagrad_1091_1098.bin",
+ };
+ static struct spi_board_info myboard_spi_devices[] __initdata = {
+ {
+ .modalias = "cw1200_wlan_spi",
+ .max_speed_hz = 52000000,
+ .bus_num = 0,
+ .irq = WIFI_IRQ,
+ .platform_data = &cw1200_platform_data,
+ .chip_select = 0,
+ },
+ };
+
+ */
+
+/* An example of SDIO support in your board setup file:
+
+ static struct cw1200_platform_data_sdio my_cw1200_platform_data = {
+ .ref_clk = 38400,
+ .have_5ghz = false,
+ .sdd_file = "sdd_myplatform.bin",
+ };
+ cw1200_sdio_set_platform_data(&my_cw1200_platform_data);
+
+ */
+
+void __init cw1200_sdio_set_platform_data(struct cw1200_platform_data_sdio *pdata);
+
+#endif /* CW1200_PLAT_H_INCLUDED */
diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h
new file mode 100644
index 000000000..0a6de4ca4
--- /dev/null
+++ b/include/linux/platform_data/ntc_thermistor.h
@@ -0,0 +1,60 @@
+/*
+ * ntc_thermistor.h - NTC Thermistors
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _LINUX_NTC_H
+#define _LINUX_NTC_H
+
+struct iio_channel;
+
+enum ntc_thermistor_type {
+ TYPE_NCPXXWB473,
+ TYPE_NCPXXWL333,
+ TYPE_B57330V2103,
+};
+
+struct ntc_thermistor_platform_data {
+ /*
+ * One (not both) of read_uV and read_ohm should be provided and only
+ * one of the two should be provided.
+ * Both functions should return negative value for an error case.
+ *
+ * pullup_uV, pullup_ohm, pulldown_ohm, and connect are required to use
+ * read_uV()
+ *
+ * How to setup pullup_ohm, pulldown_ohm, and connect is
+ * described at Documentation/hwmon/ntc_thermistor
+ *
+ * pullup/down_ohm: 0 for infinite / not-connected
+ *
+ * chan: iio_channel pointer to communicate with the ADC which the
+ * thermistor is using for conversion of the analog values.
+ */
+ int (*read_uv)(struct ntc_thermistor_platform_data *);
+ unsigned int pullup_uv;
+
+ unsigned int pullup_ohm;
+ unsigned int pulldown_ohm;
+ enum { NTC_CONNECTED_POSITIVE, NTC_CONNECTED_GROUND } connect;
+ struct iio_channel *chan;
+
+ int (*read_ohm)(void);
+};
+
+#endif /* _LINUX_NTC_H */
diff --git a/include/linux/platform_data/nxp-nci.h b/include/linux/platform_data/nxp-nci.h
new file mode 100644
index 000000000..d6ed28679
--- /dev/null
+++ b/include/linux/platform_data/nxp-nci.h
@@ -0,0 +1,27 @@
+/*
+ * Generic platform data for the NXP NCI NFC chips.
+ *
+ * Copyright (C) 2014 NXP Semiconductors All rights reserved.
+ *
+ * Authors: Clément Perrochaud <clement.perrochaud@nxp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _NXP_NCI_H_
+#define _NXP_NCI_H_
+
+struct nxp_nci_nfc_platform_data {
+ unsigned int gpio_en;
+ unsigned int gpio_fw;
+ unsigned int irq;
+};
+
+#endif /* _NXP_NCI_H_ */
diff --git a/include/linux/platform_data/omap-twl4030.h b/include/linux/platform_data/omap-twl4030.h
new file mode 100644
index 000000000..ee60ef79d
--- /dev/null
+++ b/include/linux/platform_data/omap-twl4030.h
@@ -0,0 +1,58 @@
+/**
+ * omap-twl4030.h - ASoC machine driver for TI SoC based boards with twl4030
+ * codec, header.
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * All rights reserved.
+ *
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef _OMAP_TWL4030_H_
+#define _OMAP_TWL4030_H_
+
+/* To select if only one channel is connected in a stereo port */
+#define OMAP_TWL4030_LEFT (1 << 0)
+#define OMAP_TWL4030_RIGHT (1 << 1)
+
+struct omap_tw4030_pdata {
+ const char *card_name;
+ /* Voice port is connected to McBSP3 */
+ bool voice_connected;
+
+ /* The driver will parse the connection flags if this flag is set */
+ bool custom_routing;
+ /* Flags to indicate connected audio ports. */
+ u8 has_hs;
+ u8 has_hf;
+ u8 has_predriv;
+ u8 has_carkit;
+ bool has_ear;
+
+ bool has_mainmic;
+ bool has_submic;
+ bool has_hsmic;
+ bool has_carkitmic;
+ bool has_digimic0;
+ bool has_digimic1;
+ u8 has_linein;
+
+ /* Jack detect GPIO or <= 0 if it is not implemented */
+ int jack_detect;
+};
+
+#endif /* _OMAP_TWL4030_H_ */
diff --git a/include/linux/platform_data/omap-wd-timer.h b/include/linux/platform_data/omap-wd-timer.h
new file mode 100644
index 000000000..d75f5f802
--- /dev/null
+++ b/include/linux/platform_data/omap-wd-timer.h
@@ -0,0 +1,38 @@
+/*
+ * OMAP2+ WDTIMER-specific function prototypes
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_OMAP_WD_TIMER_H
+#define __LINUX_PLATFORM_DATA_OMAP_WD_TIMER_H
+
+#include <linux/types.h>
+
+/*
+ * Standardized OMAP reset source bits
+ *
+ * This is a subset of the ones listed in arch/arm/mach-omap2/prm.h
+ * and are the only ones needed in the watchdog driver.
+ */
+#define OMAP_MPU_WD_RST_SRC_ID_SHIFT 3
+
+/**
+ * struct omap_wd_timer_platform_data - WDTIMER integration to the host SoC
+ * @read_reset_sources - fn ptr for the SoC to indicate the last reset cause
+ *
+ * The function pointed to by @read_reset_sources must return its data
+ * in a standard format - search for RST_SRC_ID_SHIFT in
+ * arch/arm/mach-omap2
+ */
+struct omap_wd_timer_platform_data {
+ u32 (*read_reset_sources)(void);
+};
+
+#endif
diff --git a/include/linux/platform_data/omap1_bl.h b/include/linux/platform_data/omap1_bl.h
new file mode 100644
index 000000000..881a8e92d
--- /dev/null
+++ b/include/linux/platform_data/omap1_bl.h
@@ -0,0 +1,11 @@
+#ifndef __OMAP1_BL_H__
+#define __OMAP1_BL_H__
+
+#include <linux/device.h>
+
+struct omap_backlight_config {
+ int default_intensity;
+ int (*set_power)(struct device *dev, int state);
+};
+
+#endif
diff --git a/include/linux/platform_data/omap_drm.h b/include/linux/platform_data/omap_drm.h
new file mode 100644
index 000000000..f4e4a237e
--- /dev/null
+++ b/include/linux/platform_data/omap_drm.h
@@ -0,0 +1,53 @@
+/*
+ * DRM/KMS platform data for TI OMAP platforms
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __PLATFORM_DATA_OMAP_DRM_H__
+#define __PLATFORM_DATA_OMAP_DRM_H__
+
+/*
+ * Optional platform data to configure the default configuration of which
+ * pipes/overlays/CRTCs are used.. if this is not provided, then instead the
+ * first CONFIG_DRM_OMAP_NUM_CRTCS are used, and they are each connected to
+ * one manager, with priority given to managers that are connected to
+ * detected devices. Remaining overlays are used as video planes. This
+ * should be a good default behavior for most cases, but yet there still
+ * might be times when you wish to do something different.
+ */
+struct omap_kms_platform_data {
+ /* overlays to use as CRTCs: */
+ int ovl_cnt;
+ const int *ovl_ids;
+
+ /* overlays to use as video planes: */
+ int pln_cnt;
+ const int *pln_ids;
+
+ int mgr_cnt;
+ const int *mgr_ids;
+
+ int dev_cnt;
+ const char **dev_names;
+};
+
+struct omap_drm_platform_data {
+ uint32_t omaprev;
+ struct omap_kms_platform_data *kms_pdata;
+};
+
+#endif /* __PLATFORM_DATA_OMAP_DRM_H__ */
diff --git a/include/linux/platform_data/pca953x.h b/include/linux/platform_data/pca953x.h
new file mode 100644
index 000000000..3c98dd4f9
--- /dev/null
+++ b/include/linux/platform_data/pca953x.h
@@ -0,0 +1,30 @@
+#ifndef _LINUX_PCA953X_H
+#define _LINUX_PCA953X_H
+
+#include <linux/types.h>
+#include <linux/i2c.h>
+
+/* platform data for the PCA9539 16-bit I/O expander driver */
+
+struct pca953x_platform_data {
+ /* number of the first GPIO */
+ unsigned gpio_base;
+
+ /* initial polarity inversion setting */
+ u32 invert;
+
+ /* interrupt base */
+ int irq_base;
+
+ void *context; /* param to setup/teardown */
+
+ int (*setup)(struct i2c_client *client,
+ unsigned gpio, unsigned ngpio,
+ void *context);
+ int (*teardown)(struct i2c_client *client,
+ unsigned gpio, unsigned ngpio,
+ void *context);
+ const char *const *names;
+};
+
+#endif /* _LINUX_PCA953X_H */
diff --git a/include/linux/platform_data/pcmcia-pxa2xx_viper.h b/include/linux/platform_data/pcmcia-pxa2xx_viper.h
new file mode 100644
index 000000000..d428be4db
--- /dev/null
+++ b/include/linux/platform_data/pcmcia-pxa2xx_viper.h
@@ -0,0 +1,11 @@
+#ifndef __ARCOM_PCMCIA_H
+#define __ARCOM_PCMCIA_H
+
+struct arcom_pcmcia_pdata {
+ int cd_gpio;
+ int rdy_gpio;
+ int pwr_gpio;
+ void (*reset)(int state);
+};
+
+#endif
diff --git a/include/linux/platform_data/pinctrl-adi2.h b/include/linux/platform_data/pinctrl-adi2.h
new file mode 100644
index 000000000..8f9130061
--- /dev/null
+++ b/include/linux/platform_data/pinctrl-adi2.h
@@ -0,0 +1,40 @@
+/*
+ * Pinctrl Driver for ADI GPIO2 controller
+ *
+ * Copyright 2007-2013 Analog Devices Inc.
+ *
+ * Licensed under the GPLv2 or later
+ */
+
+
+#ifndef PINCTRL_ADI2_H
+#define PINCTRL_ADI2_H
+
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+/**
+ * struct adi_pinctrl_gpio_platform_data - Pinctrl gpio platform data
+ * for ADI GPIO2 device.
+ *
+ * @port_gpio_base: Optional global GPIO index of the GPIO bank.
+ * 0 means driver decides.
+ * @port_pin_base: Pin index of the pin controller device.
+ * @port_width: PIN number of the GPIO bank device
+ * @pint_id: GPIO PINT device id that this GPIO bank should map to.
+ * @pint_assign: The 32-bit GPIO PINT registers can be divided into 2 parts. A
+ * GPIO bank can be mapped into either low 16 bits[0] or high 16
+ * bits[1] of each PINT register.
+ * @pint_map: GIOP bank mapping code in PINT device
+ */
+struct adi_pinctrl_gpio_platform_data {
+ unsigned int port_gpio_base;
+ unsigned int port_pin_base;
+ unsigned int port_width;
+ u8 pinctrl_id;
+ u8 pint_id;
+ bool pint_assign;
+ u8 pint_map;
+};
+
+#endif
diff --git a/include/linux/platform_data/pinctrl-single.h b/include/linux/platform_data/pinctrl-single.h
new file mode 100644
index 000000000..72eacda9b
--- /dev/null
+++ b/include/linux/platform_data/pinctrl-single.h
@@ -0,0 +1,12 @@
+/**
+ * irq: optional wake-up interrupt
+ * rearm: optional soc specific rearm function
+ *
+ * Note that the irq and rearm setup should come from device
+ * tree except for omap where there are still some dependencies
+ * to the legacy PRM code.
+ */
+struct pcs_pdata {
+ int irq;
+ void (*rearm)(void);
+};
diff --git a/include/linux/platform_data/pn544.h b/include/linux/platform_data/pn544.h
new file mode 100644
index 000000000..5ce1ab983
--- /dev/null
+++ b/include/linux/platform_data/pn544.h
@@ -0,0 +1,43 @@
+/*
+ * Driver include for the PN544 NFC chip.
+ *
+ * Copyright (C) Nokia Corporation
+ *
+ * Author: Jari Vanhala <ext-jari.vanhala@nokia.com>
+ * Contact: Matti Aaltoenn <matti.j.aaltonen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _PN544_H_
+#define _PN544_H_
+
+#include <linux/i2c.h>
+
+enum {
+ NFC_GPIO_ENABLE,
+ NFC_GPIO_FW_RESET,
+ NFC_GPIO_IRQ
+};
+
+/* board config */
+struct pn544_nfc_platform_data {
+ int (*request_resources) (struct i2c_client *client);
+ void (*free_resources) (void);
+ void (*enable) (int fw);
+ int (*test) (void);
+ void (*disable) (void);
+ int (*get_gpio)(int type);
+};
+
+#endif /* _PN544_H_ */
diff --git a/include/linux/platform_data/pxa2xx_udc.h b/include/linux/platform_data/pxa2xx_udc.h
new file mode 100644
index 000000000..c6c5e98b5
--- /dev/null
+++ b/include/linux/platform_data/pxa2xx_udc.h
@@ -0,0 +1,27 @@
+/*
+ * This supports machine-specific differences in how the PXA2xx
+ * USB Device Controller (UDC) is wired.
+ *
+ * It is set in linux/arch/arm/mach-pxa/<machine>.c or in
+ * linux/arch/mach-ixp4xx/<machine>.c and used in
+ * the probe routine of linux/drivers/usb/gadget/pxa2xx_udc.c
+ */
+#ifndef PXA2XX_UDC_H
+#define PXA2XX_UDC_H
+
+struct pxa2xx_udc_mach_info {
+ int (*udc_is_connected)(void); /* do we see host? */
+ void (*udc_command)(int cmd);
+#define PXA2XX_UDC_CMD_CONNECT 0 /* let host see us */
+#define PXA2XX_UDC_CMD_DISCONNECT 1 /* so host won't see us */
+
+ /* Boards following the design guidelines in the developer's manual,
+ * with on-chip GPIOs not Lubbock's weird hardware, can have a sane
+ * VBUS IRQ and omit the methods above. Store the GPIO number
+ * here. Note that sometimes the signals go through inverters...
+ */
+ bool gpio_pullup_inverted;
+ int gpio_pullup; /* high == pullup activated */
+};
+
+#endif
diff --git a/include/linux/platform_data/pxa_sdhci.h b/include/linux/platform_data/pxa_sdhci.h
new file mode 100644
index 000000000..9e20c2fb4
--- /dev/null
+++ b/include/linux/platform_data/pxa_sdhci.h
@@ -0,0 +1,58 @@
+/*
+ * include/linux/platform_data/pxa_sdhci.h
+ *
+ * Copyright 2010 Marvell
+ * Zhangfei Gao <zhangfei.gao@marvell.com>
+ *
+ * PXA Platform - SDHCI platform data definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _PXA_SDHCI_H_
+#define _PXA_SDHCI_H_
+
+/* pxa specific flag */
+/* Require clock free running */
+#define PXA_FLAG_ENABLE_CLOCK_GATING (1<<0)
+/* card always wired to host, like on-chip emmc */
+#define PXA_FLAG_CARD_PERMANENT (1<<1)
+/* Board design supports 8-bit data on SD/SDIO BUS */
+#define PXA_FLAG_SD_8_BIT_CAPABLE_SLOT (1<<2)
+
+/*
+ * struct pxa_sdhci_platdata() - Platform device data for PXA SDHCI
+ * @flags: flags for platform requirement
+ * @clk_delay_cycles:
+ * mmp2: each step is roughly 100ps, 5bits width
+ * pxa910: each step is 1ns, 4bits width
+ * @clk_delay_sel: select clk_delay, used on pxa910
+ * 0: choose feedback clk
+ * 1: choose feedback clk + delay value
+ * 2: choose internal clk
+ * @clk_delay_enable: enable clk_delay or not, used on pxa910
+ * @ext_cd_gpio: gpio pin used for external CD line
+ * @ext_cd_gpio_invert: invert values for external CD gpio line
+ * @max_speed: the maximum speed supported
+ * @host_caps: Standard MMC host capabilities bit field.
+ * @quirks: quirks of platfrom
+ * @quirks2: quirks2 of platfrom
+ * @pm_caps: pm_caps of platfrom
+ */
+struct sdhci_pxa_platdata {
+ unsigned int flags;
+ unsigned int clk_delay_cycles;
+ unsigned int clk_delay_sel;
+ bool clk_delay_enable;
+ unsigned int ext_cd_gpio;
+ bool ext_cd_gpio_invert;
+ unsigned int max_speed;
+ u32 host_caps;
+ u32 host_caps2;
+ unsigned int quirks;
+ unsigned int quirks2;
+ unsigned int pm_caps;
+};
+#endif /* _PXA_SDHCI_H_ */
diff --git a/include/linux/platform_data/regulator-haptic.h b/include/linux/platform_data/regulator-haptic.h
new file mode 100644
index 000000000..5658e58e0
--- /dev/null
+++ b/include/linux/platform_data/regulator-haptic.h
@@ -0,0 +1,29 @@
+/*
+ * Regulator Haptic Platform Data
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Jaewon Kim <jaewon02.kim@samsung.com>
+ * Author: Hyunhee Kim <hyunhee.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _REGULATOR_HAPTIC_H
+#define _REGULATOR_HAPTIC_H
+
+/*
+ * struct regulator_haptic_data - Platform device data
+ *
+ * @max_volt: maximum voltage value supplied to the haptic motor.
+ * <The unit of the voltage is a micro>
+ * @min_volt: minimum voltage value supplied to the haptic motor.
+ * <The unit of the voltage is a micro>
+ */
+struct regulator_haptic_data {
+ unsigned int max_volt;
+ unsigned int min_volt;
+};
+
+#endif /* _REGULATOR_HAPTIC_H */
diff --git a/include/linux/platform_data/remoteproc-omap.h b/include/linux/platform_data/remoteproc-omap.h
new file mode 100644
index 000000000..bfbd12b41
--- /dev/null
+++ b/include/linux/platform_data/remoteproc-omap.h
@@ -0,0 +1,59 @@
+/*
+ * Remote Processor - omap-specific bits
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PLAT_REMOTEPROC_H
+#define _PLAT_REMOTEPROC_H
+
+struct rproc_ops;
+struct platform_device;
+
+/*
+ * struct omap_rproc_pdata - omap remoteproc's platform data
+ * @name: the remoteproc's name
+ * @oh_name: omap hwmod device
+ * @oh_name_opt: optional, secondary omap hwmod device
+ * @firmware: name of firmware file to load
+ * @mbox_name: name of omap mailbox device to use with this rproc
+ * @ops: start/stop rproc handlers
+ * @device_enable: omap-specific handler for enabling a device
+ * @device_shutdown: omap-specific handler for shutting down a device
+ * @set_bootaddr: omap-specific handler for setting the rproc boot address
+ */
+struct omap_rproc_pdata {
+ const char *name;
+ const char *oh_name;
+ const char *oh_name_opt;
+ const char *firmware;
+ const char *mbox_name;
+ const struct rproc_ops *ops;
+ int (*device_enable) (struct platform_device *pdev);
+ int (*device_shutdown) (struct platform_device *pdev);
+ void(*set_bootaddr)(u32);
+};
+
+#if defined(CONFIG_OMAP_REMOTEPROC) || defined(CONFIG_OMAP_REMOTEPROC_MODULE)
+
+void __init omap_rproc_reserve_cma(void);
+
+#else
+
+static inline void __init omap_rproc_reserve_cma(void)
+{
+}
+
+#endif
+
+#endif /* _PLAT_REMOTEPROC_H */
diff --git a/include/linux/platform_data/s3c-hsotg.h b/include/linux/platform_data/s3c-hsotg.h
new file mode 100644
index 000000000..3f1cbf95e
--- /dev/null
+++ b/include/linux/platform_data/s3c-hsotg.h
@@ -0,0 +1,42 @@
+/* include/linux/platform_data/s3c-hsotg.h
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * S3C USB2.0 High-speed / OtG platform information
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __LINUX_USB_S3C_HSOTG_H
+#define __LINUX_USB_S3C_HSOTG_H
+
+struct platform_device;
+
+enum s3c_hsotg_dmamode {
+ S3C_HSOTG_DMA_NONE, /* do not use DMA at-all */
+ S3C_HSOTG_DMA_ONLY, /* always use DMA */
+ S3C_HSOTG_DMA_DRV, /* DMA is chosen by driver */
+};
+
+/**
+ * struct s3c_hsotg_plat - platform data for high-speed otg/udc
+ * @dma: Whether to use DMA or not.
+ * @is_osc: The clock source is an oscillator, not a crystal
+ */
+struct s3c_hsotg_plat {
+ enum s3c_hsotg_dmamode dma;
+ unsigned int is_osc:1;
+ int phy_type;
+
+ int (*phy_init)(struct platform_device *pdev, int type);
+ int (*phy_exit)(struct platform_device *pdev, int type);
+};
+
+extern void s3c_hsotg_set_platdata(struct s3c_hsotg_plat *pd);
+
+#endif /* __LINUX_USB_S3C_HSOTG_H */
diff --git a/include/linux/platform_data/s3c-hsudc.h b/include/linux/platform_data/s3c-hsudc.h
new file mode 100644
index 000000000..6fa109339
--- /dev/null
+++ b/include/linux/platform_data/s3c-hsudc.h
@@ -0,0 +1,34 @@
+/*
+ * S3C24XX USB 2.0 High-speed USB controller gadget driver
+ *
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * The S3C24XX USB 2.0 high-speed USB controller supports upto 9 endpoints.
+ * Each endpoint can be configured as either in or out endpoint. Endpoints
+ * can be configured for Bulk or Interrupt transfer mode.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __LINUX_USB_S3C_HSUDC_H
+#define __LINUX_USB_S3C_HSUDC_H
+
+/**
+ * s3c24xx_hsudc_platdata - Platform data for USB High-Speed gadget controller.
+ * @epnum: Number of endpoints to be instantiated by the controller driver.
+ * @gpio_init: Platform specific USB related GPIO initialization.
+ * @gpio_uninit: Platform specific USB releted GPIO uninitialzation.
+ *
+ * Representation of platform data for the S3C24XX USB 2.0 High Speed gadget
+ * controllers.
+ */
+struct s3c24xx_hsudc_platdata {
+ unsigned int epnum;
+ void (*gpio_init)(void);
+ void (*gpio_uninit)(void);
+};
+
+#endif /* __LINUX_USB_S3C_HSUDC_H */
diff --git a/include/linux/platform_data/sa11x0-serial.h b/include/linux/platform_data/sa11x0-serial.h
new file mode 100644
index 000000000..4504d5d59
--- /dev/null
+++ b/include/linux/platform_data/sa11x0-serial.h
@@ -0,0 +1,33 @@
+/*
+ * Author: Nicolas Pitre
+ *
+ * Moved and changed lots, Russell King
+ *
+ * Low level machine dependent UART functions.
+ */
+#ifndef SA11X0_SERIAL_H
+#define SA11X0_SERIAL_H
+
+struct uart_port;
+struct uart_info;
+
+/*
+ * This is a temporary structure for registering these
+ * functions; it is intended to be discarded after boot.
+ */
+struct sa1100_port_fns {
+ void (*set_mctrl)(struct uart_port *, u_int);
+ u_int (*get_mctrl)(struct uart_port *);
+ void (*pm)(struct uart_port *, u_int, u_int);
+ int (*set_wake)(struct uart_port *, u_int);
+};
+
+#ifdef CONFIG_SERIAL_SA1100
+void sa1100_register_uart_fns(struct sa1100_port_fns *fns);
+void sa1100_register_uart(int idx, int port);
+#else
+#define sa1100_register_uart_fns(fns) do { } while (0)
+#define sa1100_register_uart(idx,port) do { } while (0)
+#endif
+
+#endif
diff --git a/include/linux/platform_data/sc18is602.h b/include/linux/platform_data/sc18is602.h
new file mode 100644
index 000000000..997b06634
--- /dev/null
+++ b/include/linux/platform_data/sc18is602.h
@@ -0,0 +1,19 @@
+/*
+ * Platform data for NXP SC18IS602/603
+ *
+ * Copyright (C) 2012 Guenter Roeck <linux@roeck-us.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * For further information, see the Documentation/spi/sc18is602 file.
+ */
+
+/**
+ * struct sc18is602_platform_data - sc18is602 info
+ * @clock_frequency SC18IS603 oscillator frequency
+ */
+struct sc18is602_platform_data {
+ u32 clock_frequency;
+};
diff --git a/include/linux/platform_data/serial-imx.h b/include/linux/platform_data/serial-imx.h
new file mode 100644
index 000000000..a938eba2f
--- /dev/null
+++ b/include/linux/platform_data/serial-imx.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2008 by Sascha Hauer <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#ifndef ASMARM_ARCH_UART_H
+#define ASMARM_ARCH_UART_H
+
+#define IMXUART_HAVE_RTSCTS (1<<0)
+
+struct imxuart_platform_data {
+ unsigned int flags;
+};
+
+#endif
diff --git a/include/linux/platform_data/serial-omap.h b/include/linux/platform_data/serial-omap.h
new file mode 100644
index 000000000..d09275f3c
--- /dev/null
+++ b/include/linux/platform_data/serial-omap.h
@@ -0,0 +1,46 @@
+/*
+ * Driver for OMAP-UART controller.
+ * Based on drivers/serial/8250.c
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * Authors:
+ * Govindraj R <govindraj.raja@ti.com>
+ * Thara Gopinath <thara@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __OMAP_SERIAL_H__
+#define __OMAP_SERIAL_H__
+
+#include <linux/serial_core.h>
+#include <linux/device.h>
+#include <linux/pm_qos.h>
+
+#define DRIVER_NAME "omap_uart"
+
+/*
+ * Use tty device name as ttyO, [O -> OMAP]
+ * in bootargs we specify as console=ttyO0 if uart1
+ * is used as console uart.
+ */
+#define OMAP_SERIAL_NAME "ttyO"
+
+struct omap_uart_port_info {
+ bool dma_enabled; /* To specify DMA Mode */
+ unsigned int uartclk; /* UART clock rate */
+ upf_t flags; /* UPF_* flags */
+ unsigned int dma_rx_buf_size;
+ unsigned int dma_rx_timeout;
+ unsigned int autosuspend_timeout;
+ unsigned int dma_rx_poll_rate;
+
+ int (*get_context_loss_count)(struct device *);
+ void (*enable_wakeup)(struct device *, bool);
+};
+
+#endif /* __OMAP_SERIAL_H__ */
diff --git a/include/linux/platform_data/serial-sccnxp.h b/include/linux/platform_data/serial-sccnxp.h
new file mode 100644
index 000000000..af0c8c3b8
--- /dev/null
+++ b/include/linux/platform_data/serial-sccnxp.h
@@ -0,0 +1,88 @@
+/*
+ * NXP (Philips) SCC+++(SCN+++) serial driver
+ *
+ * Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * Based on sc26xx.c, by Thomas Bogendörfer (tsbogend@alpha.franken.de)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _PLATFORM_DATA_SERIAL_SCCNXP_H_
+#define _PLATFORM_DATA_SERIAL_SCCNXP_H_
+
+#define SCCNXP_MAX_UARTS 2
+
+/* Output lines */
+#define LINE_OP0 1
+#define LINE_OP1 2
+#define LINE_OP2 3
+#define LINE_OP3 4
+#define LINE_OP4 5
+#define LINE_OP5 6
+#define LINE_OP6 7
+#define LINE_OP7 8
+
+/* Input lines */
+#define LINE_IP0 9
+#define LINE_IP1 10
+#define LINE_IP2 11
+#define LINE_IP3 12
+#define LINE_IP4 13
+#define LINE_IP5 14
+#define LINE_IP6 15
+
+/* Signals */
+#define DTR_OP 0 /* DTR */
+#define RTS_OP 4 /* RTS */
+#define DSR_IP 8 /* DSR */
+#define CTS_IP 12 /* CTS */
+#define DCD_IP 16 /* DCD */
+#define RNG_IP 20 /* RNG */
+
+#define DIR_OP 24 /* Special signal for control RS-485.
+ * Goes high when transmit,
+ * then goes low.
+ */
+
+/* Routing control signal 'sig' to line 'line' */
+#define MCTRL_SIG(sig, line) ((line) << (sig))
+
+/*
+ * Example board initialization data:
+ *
+ * static struct resource sc2892_resources[] = {
+ * DEFINE_RES_MEM(UART_PHYS_START, 0x10),
+ * DEFINE_RES_IRQ(IRQ_EXT2),
+ * };
+ *
+ * static struct sccnxp_pdata sc2892_info = {
+ * .mctrl_cfg[0] = MCTRL_SIG(DIR_OP, LINE_OP0),
+ * .mctrl_cfg[1] = MCTRL_SIG(DIR_OP, LINE_OP1),
+ * };
+ *
+ * static struct platform_device sc2892 = {
+ * .name = "sc2892",
+ * .id = -1,
+ * .resource = sc2892_resources,
+ * .num_resources = ARRAY_SIZE(sc2892_resources),
+ * .dev = {
+ * .platform_data = &sc2892_info,
+ * },
+ * };
+ */
+
+/* SCCNXP platform data structure */
+struct sccnxp_pdata {
+ /* Shift for A0 line */
+ const u8 reg_shift;
+ /* Modem control lines configuration */
+ const u32 mctrl_cfg[SCCNXP_MAX_UARTS];
+ /* Timer value for polling mode (usecs) */
+ const unsigned int poll_time_us;
+};
+
+#endif
diff --git a/include/linux/platform_data/sh_ipmmu.h b/include/linux/platform_data/sh_ipmmu.h
new file mode 100644
index 000000000..39f7405cd
--- /dev/null
+++ b/include/linux/platform_data/sh_ipmmu.h
@@ -0,0 +1,18 @@
+/* sh_ipmmu.h
+ *
+ * Copyright (C) 2012 Hideki EIRAKU
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#ifndef __SH_IPMMU_H__
+#define __SH_IPMMU_H__
+
+struct shmobile_ipmmu_platform_data {
+ const char * const *dev_names;
+ unsigned int num_dev_names;
+};
+
+#endif /* __SH_IPMMU_H__ */
diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h
new file mode 100644
index 000000000..7c686d335
--- /dev/null
+++ b/include/linux/platform_data/shmob_drm.h
@@ -0,0 +1,99 @@
+/*
+ * shmob_drm.h -- SH Mobile DRM driver
+ *
+ * Copyright (C) 2012 Renesas Corporation
+ *
+ * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __SHMOB_DRM_H__
+#define __SHMOB_DRM_H__
+
+#include <linux/kernel.h>
+
+#include <drm/drm_mode.h>
+
+struct sh_mobile_meram_cfg;
+struct sh_mobile_meram_info;
+
+enum shmob_drm_clk_source {
+ SHMOB_DRM_CLK_BUS,
+ SHMOB_DRM_CLK_PERIPHERAL,
+ SHMOB_DRM_CLK_EXTERNAL,
+};
+
+enum shmob_drm_interface {
+ SHMOB_DRM_IFACE_RGB8, /* 24bpp, 8:8:8 */
+ SHMOB_DRM_IFACE_RGB9, /* 18bpp, 9:9 */
+ SHMOB_DRM_IFACE_RGB12A, /* 24bpp, 12:12 */
+ SHMOB_DRM_IFACE_RGB12B, /* 12bpp */
+ SHMOB_DRM_IFACE_RGB16, /* 16bpp */
+ SHMOB_DRM_IFACE_RGB18, /* 18bpp */
+ SHMOB_DRM_IFACE_RGB24, /* 24bpp */
+ SHMOB_DRM_IFACE_YUV422, /* 16bpp */
+ SHMOB_DRM_IFACE_SYS8A, /* 24bpp, 8:8:8 */
+ SHMOB_DRM_IFACE_SYS8B, /* 18bpp, 8:8:2 */
+ SHMOB_DRM_IFACE_SYS8C, /* 18bpp, 2:8:8 */
+ SHMOB_DRM_IFACE_SYS8D, /* 16bpp, 8:8 */
+ SHMOB_DRM_IFACE_SYS9, /* 18bpp, 9:9 */
+ SHMOB_DRM_IFACE_SYS12, /* 24bpp, 12:12 */
+ SHMOB_DRM_IFACE_SYS16A, /* 16bpp */
+ SHMOB_DRM_IFACE_SYS16B, /* 18bpp, 16:2 */
+ SHMOB_DRM_IFACE_SYS16C, /* 18bpp, 2:16 */
+ SHMOB_DRM_IFACE_SYS18, /* 18bpp */
+ SHMOB_DRM_IFACE_SYS24, /* 24bpp */
+};
+
+struct shmob_drm_backlight_data {
+ const char *name;
+ int max_brightness;
+ int (*get_brightness)(void);
+ int (*set_brightness)(int brightness);
+};
+
+struct shmob_drm_panel_data {
+ unsigned int width_mm; /* Panel width in mm */
+ unsigned int height_mm; /* Panel height in mm */
+ struct drm_mode_modeinfo mode;
+};
+
+struct shmob_drm_sys_interface_data {
+ unsigned int read_latch:6;
+ unsigned int read_setup:8;
+ unsigned int read_cycle:8;
+ unsigned int read_strobe:8;
+ unsigned int write_setup:8;
+ unsigned int write_cycle:8;
+ unsigned int write_strobe:8;
+ unsigned int cs_setup:3;
+ unsigned int vsync_active_high:1;
+ unsigned int vsync_dir_input:1;
+};
+
+#define SHMOB_DRM_IFACE_FL_DWPOL (1 << 0) /* Rising edge dot clock data latch */
+#define SHMOB_DRM_IFACE_FL_DIPOL (1 << 1) /* Active low display enable */
+#define SHMOB_DRM_IFACE_FL_DAPOL (1 << 2) /* Active low display data */
+#define SHMOB_DRM_IFACE_FL_HSCNT (1 << 3) /* Disable HSYNC during VBLANK */
+#define SHMOB_DRM_IFACE_FL_DWCNT (1 << 4) /* Disable dotclock during blanking */
+
+struct shmob_drm_interface_data {
+ enum shmob_drm_interface interface;
+ struct shmob_drm_sys_interface_data sys;
+ unsigned int clk_div;
+ unsigned int flags;
+};
+
+struct shmob_drm_platform_data {
+ enum shmob_drm_clk_source clk_source;
+ struct shmob_drm_interface_data iface;
+ struct shmob_drm_panel_data panel;
+ struct shmob_drm_backlight_data backlight;
+ const struct sh_mobile_meram_cfg *meram;
+};
+
+#endif /* __SHMOB_DRM_H__ */
diff --git a/include/linux/platform_data/sht15.h b/include/linux/platform_data/sht15.h
new file mode 100644
index 000000000..12289c1e9
--- /dev/null
+++ b/include/linux/platform_data/sht15.h
@@ -0,0 +1,38 @@
+/*
+ * sht15.h - support for the SHT15 Temperature and Humidity Sensor
+ *
+ * Copyright (c) 2009 Jonathan Cameron
+ *
+ * Copyright (c) 2007 Wouter Horre
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * For further information, see the Documentation/hwmon/sht15 file.
+ */
+
+#ifndef _PDATA_SHT15_H
+#define _PDATA_SHT15_H
+
+/**
+ * struct sht15_platform_data - sht15 connectivity info
+ * @gpio_data: no. of gpio to which bidirectional data line is
+ * connected.
+ * @gpio_sck: no. of gpio to which the data clock is connected.
+ * @supply_mv: supply voltage in mv. Overridden by regulator if
+ * available.
+ * @checksum: flag to indicate the checksum should be validated.
+ * @no_otp_reload: flag to indicate no reload from OTP.
+ * @low_resolution: flag to indicate the temp/humidity resolution to use.
+ */
+struct sht15_platform_data {
+ int gpio_data;
+ int gpio_sck;
+ int supply_mv;
+ bool checksum;
+ bool no_otp_reload;
+ bool low_resolution;
+};
+
+#endif /* _PDATA_SHT15_H */
diff --git a/include/linux/platform_data/shtc1.h b/include/linux/platform_data/shtc1.h
new file mode 100644
index 000000000..7b8c353f7
--- /dev/null
+++ b/include/linux/platform_data/shtc1.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2014 Sensirion AG, Switzerland
+ * Author: Johannes Winkelmann <johannes.winkelmann@sensirion.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SHTC1_H_
+#define __SHTC1_H_
+
+struct shtc1_platform_data {
+ bool blocking_io;
+ bool high_precision;
+};
+#endif /* __SHTC1_H_ */
diff --git a/include/linux/platform_data/si5351.h b/include/linux/platform_data/si5351.h
new file mode 100644
index 000000000..533d9807e
--- /dev/null
+++ b/include/linux/platform_data/si5351.h
@@ -0,0 +1,112 @@
+/*
+ * Si5351A/B/C programmable clock generator platform_data.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_SI5351_H__
+#define __LINUX_PLATFORM_DATA_SI5351_H__
+
+/**
+ * enum si5351_pll_src - Si5351 pll clock source
+ * @SI5351_PLL_SRC_DEFAULT: default, do not change eeprom config
+ * @SI5351_PLL_SRC_XTAL: pll source clock is XTAL input
+ * @SI5351_PLL_SRC_CLKIN: pll source clock is CLKIN input (Si5351C only)
+ */
+enum si5351_pll_src {
+ SI5351_PLL_SRC_DEFAULT = 0,
+ SI5351_PLL_SRC_XTAL = 1,
+ SI5351_PLL_SRC_CLKIN = 2,
+};
+
+/**
+ * enum si5351_multisynth_src - Si5351 multisynth clock source
+ * @SI5351_MULTISYNTH_SRC_DEFAULT: default, do not change eeprom config
+ * @SI5351_MULTISYNTH_SRC_VCO0: multisynth source clock is VCO0
+ * @SI5351_MULTISYNTH_SRC_VCO1: multisynth source clock is VCO1/VXCO
+ */
+enum si5351_multisynth_src {
+ SI5351_MULTISYNTH_SRC_DEFAULT = 0,
+ SI5351_MULTISYNTH_SRC_VCO0 = 1,
+ SI5351_MULTISYNTH_SRC_VCO1 = 2,
+};
+
+/**
+ * enum si5351_clkout_src - Si5351 clock output clock source
+ * @SI5351_CLKOUT_SRC_DEFAULT: default, do not change eeprom config
+ * @SI5351_CLKOUT_SRC_MSYNTH_N: clkout N source clock is multisynth N
+ * @SI5351_CLKOUT_SRC_MSYNTH_0_4: clkout N source clock is multisynth 0 (N<4)
+ * or 4 (N>=4)
+ * @SI5351_CLKOUT_SRC_XTAL: clkout N source clock is XTAL
+ * @SI5351_CLKOUT_SRC_CLKIN: clkout N source clock is CLKIN (Si5351C only)
+ */
+enum si5351_clkout_src {
+ SI5351_CLKOUT_SRC_DEFAULT = 0,
+ SI5351_CLKOUT_SRC_MSYNTH_N = 1,
+ SI5351_CLKOUT_SRC_MSYNTH_0_4 = 2,
+ SI5351_CLKOUT_SRC_XTAL = 3,
+ SI5351_CLKOUT_SRC_CLKIN = 4,
+};
+
+/**
+ * enum si5351_drive_strength - Si5351 clock output drive strength
+ * @SI5351_DRIVE_DEFAULT: default, do not change eeprom config
+ * @SI5351_DRIVE_2MA: 2mA clock output drive strength
+ * @SI5351_DRIVE_4MA: 4mA clock output drive strength
+ * @SI5351_DRIVE_6MA: 6mA clock output drive strength
+ * @SI5351_DRIVE_8MA: 8mA clock output drive strength
+ */
+enum si5351_drive_strength {
+ SI5351_DRIVE_DEFAULT = 0,
+ SI5351_DRIVE_2MA = 2,
+ SI5351_DRIVE_4MA = 4,
+ SI5351_DRIVE_6MA = 6,
+ SI5351_DRIVE_8MA = 8,
+};
+
+/**
+ * enum si5351_disable_state - Si5351 clock output disable state
+ * @SI5351_DISABLE_DEFAULT: default, do not change eeprom config
+ * @SI5351_DISABLE_LOW: CLKx is set to a LOW state when disabled
+ * @SI5351_DISABLE_HIGH: CLKx is set to a HIGH state when disabled
+ * @SI5351_DISABLE_FLOATING: CLKx is set to a FLOATING state when
+ * disabled
+ * @SI5351_DISABLE_NEVER: CLKx is NEVER disabled
+ */
+enum si5351_disable_state {
+ SI5351_DISABLE_DEFAULT = 0,
+ SI5351_DISABLE_LOW,
+ SI5351_DISABLE_HIGH,
+ SI5351_DISABLE_FLOATING,
+ SI5351_DISABLE_NEVER,
+};
+
+/**
+ * struct si5351_clkout_config - Si5351 clock output configuration
+ * @clkout: clkout number
+ * @multisynth_src: multisynth source clock
+ * @clkout_src: clkout source clock
+ * @pll_master: if true, clkout can also change pll rate
+ * @drive: output drive strength
+ * @rate: initial clkout rate, or default if 0
+ */
+struct si5351_clkout_config {
+ enum si5351_multisynth_src multisynth_src;
+ enum si5351_clkout_src clkout_src;
+ enum si5351_drive_strength drive;
+ enum si5351_disable_state disable_state;
+ bool pll_master;
+ unsigned long rate;
+};
+
+/**
+ * struct si5351_platform_data - Platform data for the Si5351 clock driver
+ * @clk_xtal: xtal input clock
+ * @clk_clkin: clkin input clock
+ * @pll_src: array of pll source clock setting
+ * @clkout: array of clkout configuration
+ */
+struct si5351_platform_data {
+ enum si5351_pll_src pll_src[2];
+ struct si5351_clkout_config clkout[8];
+};
+
+#endif
diff --git a/include/linux/platform_data/simplefb.h b/include/linux/platform_data/simplefb.h
new file mode 100644
index 000000000..077303ced
--- /dev/null
+++ b/include/linux/platform_data/simplefb.h
@@ -0,0 +1,64 @@
+/*
+ * simplefb.h - Simple Framebuffer Device
+ *
+ * Copyright (C) 2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __PLATFORM_DATA_SIMPLEFB_H__
+#define __PLATFORM_DATA_SIMPLEFB_H__
+
+#include <drm/drm_fourcc.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+
+/* format array, use it to initialize a "struct simplefb_format" array */
+#define SIMPLEFB_FORMATS \
+{ \
+ { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0}, DRM_FORMAT_RGB565 }, \
+ { "x1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {0, 0}, DRM_FORMAT_XRGB1555 }, \
+ { "a1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {15, 1}, DRM_FORMAT_ARGB1555 }, \
+ { "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 }, \
+ { "x8r8g8b8", 32, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_XRGB8888 }, \
+ { "a8r8g8b8", 32, {16, 8}, {8, 8}, {0, 8}, {24, 8}, DRM_FORMAT_ARGB8888 }, \
+ { "a8b8g8r8", 32, {0, 8}, {8, 8}, {16, 8}, {24, 8}, DRM_FORMAT_ABGR8888 }, \
+ { "x2r10g10b10", 32, {20, 10}, {10, 10}, {0, 10}, {0, 0}, DRM_FORMAT_XRGB2101010 }, \
+ { "a2r10g10b10", 32, {20, 10}, {10, 10}, {0, 10}, {30, 2}, DRM_FORMAT_ARGB2101010 }, \
+}
+
+/*
+ * Data-Format for Simple-Framebuffers
+ * @name: unique 0-terminated name that can be used to identify the mode
+ * @red,green,blue: Offsets and sizes of the single RGB parts
+ * @transp: Offset and size of the alpha bits. length=0 means no alpha
+ * @fourcc: 32bit DRM four-CC code (see drm_fourcc.h)
+ */
+struct simplefb_format {
+ const char *name;
+ u32 bits_per_pixel;
+ struct fb_bitfield red;
+ struct fb_bitfield green;
+ struct fb_bitfield blue;
+ struct fb_bitfield transp;
+ u32 fourcc;
+};
+
+/*
+ * Simple-Framebuffer description
+ * If the arch-boot code creates simple-framebuffers without DT support, it
+ * can pass the width, height, stride and format via this platform-data object.
+ * The framebuffer location must be given as IORESOURCE_MEM resource.
+ * @format must be a format as described in "struct simplefb_format" above.
+ */
+struct simplefb_platform_data {
+ u32 width;
+ u32 height;
+ u32 stride;
+ const char *format;
+};
+
+#endif /* __PLATFORM_DATA_SIMPLEFB_H__ */
diff --git a/include/linux/platform_data/sky81452-backlight.h b/include/linux/platform_data/sky81452-backlight.h
new file mode 100644
index 000000000..1231e9bb0
--- /dev/null
+++ b/include/linux/platform_data/sky81452-backlight.h
@@ -0,0 +1,46 @@
+/*
+ * sky81452.h SKY81452 backlight driver
+ *
+ * Copyright 2014 Skyworks Solutions Inc.
+ * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SKY81452_BACKLIGHT_H
+#define _SKY81452_BACKLIGHT_H
+
+/**
+ * struct sky81452_platform_data
+ * @name: backlight driver name.
+ If it is not defined, default name is lcd-backlight.
+ * @gpio_enable:GPIO number which control EN pin
+ * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6.
+ * @ignore_pwm: true if DPWMI should be ignored.
+ * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode.
+ * @phase_shift:true is phase shift mode.
+ * @short_detecion_threshold: It should be one of 4, 5, 6 and 7V.
+ * @boost_current_limit: It should be one of 2300, 2750mA.
+ */
+struct sky81452_bl_platform_data {
+ const char *name;
+ int gpio_enable;
+ unsigned int enable;
+ bool ignore_pwm;
+ bool dpwm_mode;
+ bool phase_shift;
+ unsigned int short_detection_threshold;
+ unsigned int boost_current_limit;
+};
+
+#endif
diff --git a/include/linux/platform_data/spi-clps711x.h b/include/linux/platform_data/spi-clps711x.h
new file mode 100644
index 000000000..301956e63
--- /dev/null
+++ b/include/linux/platform_data/spi-clps711x.h
@@ -0,0 +1,21 @@
+/*
+ * CLPS711X SPI bus driver definitions
+ *
+ * Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef ____LINUX_PLATFORM_DATA_SPI_CLPS711X_H
+#define ____LINUX_PLATFORM_DATA_SPI_CLPS711X_H
+
+/* Board specific platform_data */
+struct spi_clps711x_pdata {
+ int *chipselect; /* Array of GPIO-numbers */
+ int num_chipselect; /* Total count of GPIOs */
+};
+
+#endif
diff --git a/include/linux/platform_data/spi-davinci.h b/include/linux/platform_data/spi-davinci.h
new file mode 100644
index 000000000..8dc2fa47a
--- /dev/null
+++ b/include/linux/platform_data/spi-davinci.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2009 Texas Instruments.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __ARCH_ARM_DAVINCI_SPI_H
+#define __ARCH_ARM_DAVINCI_SPI_H
+
+#include <linux/platform_data/edma.h>
+
+#define SPI_INTERN_CS 0xFF
+
+enum {
+ SPI_VERSION_1, /* For DM355/DM365/DM6467 */
+ SPI_VERSION_2, /* For DA8xx */
+};
+
+/**
+ * davinci_spi_platform_data - Platform data for SPI master device on DaVinci
+ *
+ * @version: version of the SPI IP. Different DaVinci devices have slightly
+ * varying versions of the same IP.
+ * @num_chipselect: number of chipselects supported by this SPI master
+ * @intr_line: interrupt line used to connect the SPI IP to the ARM interrupt
+ * controller withn the SoC. Possible values are 0 and 1.
+ * @chip_sel: list of GPIOs which can act as chip-selects for the SPI.
+ * SPI_INTERN_CS denotes internal SPI chip-select. Not necessary
+ * to populate if all chip-selects are internal.
+ * @cshold_bug: set this to true if the SPI controller on your chip requires
+ * a write to CSHOLD bit in between transfers (like in DM355).
+ * @dma_event_q: DMA event queue to use if SPI_IO_TYPE_DMA is used for any
+ * device on the bus.
+ */
+struct davinci_spi_platform_data {
+ u8 version;
+ u8 num_chipselect;
+ u8 intr_line;
+ u8 *chip_sel;
+ bool cshold_bug;
+ enum dma_event_q dma_event_q;
+};
+
+/**
+ * davinci_spi_config - Per-chip-select configuration for SPI slave devices
+ *
+ * @wdelay: amount of delay between transmissions. Measured in number of
+ * SPI module clocks.
+ * @odd_parity: polarity of parity flag at the end of transmit data stream.
+ * 0 - odd parity, 1 - even parity.
+ * @parity_enable: enable transmission of parity at end of each transmit
+ * data stream.
+ * @io_type: type of IO transfer. Choose between polled, interrupt and DMA.
+ * @timer_disable: disable chip-select timers (setup and hold)
+ * @c2tdelay: chip-select setup time. Measured in number of SPI module clocks.
+ * @t2cdelay: chip-select hold time. Measured in number of SPI module clocks.
+ * @t2edelay: transmit data finished to SPI ENAn pin inactive time. Measured
+ * in number of SPI clocks.
+ * @c2edelay: chip-select active to SPI ENAn signal active time. Measured in
+ * number of SPI clocks.
+ */
+struct davinci_spi_config {
+ u8 wdelay;
+ u8 odd_parity;
+ u8 parity_enable;
+#define SPI_IO_TYPE_INTR 0
+#define SPI_IO_TYPE_POLL 1
+#define SPI_IO_TYPE_DMA 2
+ u8 io_type;
+ u8 timer_disable;
+ u8 c2tdelay;
+ u8 t2cdelay;
+ u8 t2edelay;
+ u8 c2edelay;
+};
+
+#endif /* __ARCH_ARM_DAVINCI_SPI_H */
diff --git a/include/linux/platform_data/spi-ep93xx.h b/include/linux/platform_data/spi-ep93xx.h
new file mode 100644
index 000000000..9bb63ac13
--- /dev/null
+++ b/include/linux/platform_data/spi-ep93xx.h
@@ -0,0 +1,29 @@
+#ifndef __ASM_MACH_EP93XX_SPI_H
+#define __ASM_MACH_EP93XX_SPI_H
+
+struct spi_device;
+
+/**
+ * struct ep93xx_spi_info - EP93xx specific SPI descriptor
+ * @num_chipselect: number of chip selects on this board, must be
+ * at least one
+ * @use_dma: use DMA for the transfers
+ */
+struct ep93xx_spi_info {
+ int num_chipselect;
+ bool use_dma;
+};
+
+/**
+ * struct ep93xx_spi_chip_ops - operation callbacks for SPI slave device
+ * @setup: setup the chip select mechanism
+ * @cleanup: cleanup the chip select mechanism
+ * @cs_control: control the device chip select
+ */
+struct ep93xx_spi_chip_ops {
+ int (*setup)(struct spi_device *spi);
+ void (*cleanup)(struct spi_device *spi);
+ void (*cs_control)(struct spi_device *spi, int value);
+};
+
+#endif /* __ASM_MACH_EP93XX_SPI_H */
diff --git a/include/linux/platform_data/spi-imx.h b/include/linux/platform_data/spi-imx.h
new file mode 100644
index 000000000..08be445e8
--- /dev/null
+++ b/include/linux/platform_data/spi-imx.h
@@ -0,0 +1,27 @@
+
+#ifndef __MACH_SPI_H_
+#define __MACH_SPI_H_
+
+/*
+ * struct spi_imx_master - device.platform_data for SPI controller devices.
+ * @chipselect: Array of chipselects for this master. Numbers >= 0 mean gpio
+ * pins, numbers < 0 mean internal CSPI chipselects according
+ * to MXC_SPI_CS(). Normally you want to use gpio based chip
+ * selects as the CSPI module tries to be intelligent about
+ * when to assert the chipselect: The CSPI module deasserts the
+ * chipselect once it runs out of input data. The other problem
+ * is that it is not possible to mix between high active and low
+ * active chipselects on one single bus using the internal
+ * chipselects. Unfortunately Freescale decided to put some
+ * chipselects on dedicated pins which are not usable as gpios,
+ * so we have to support the internal chipselects.
+ * @num_chipselect: ARRAY_SIZE(chipselect)
+ */
+struct spi_imx_master {
+ int *chipselect;
+ int num_chipselect;
+};
+
+#define MXC_SPI_CS(no) ((no) - 32)
+
+#endif /* __MACH_SPI_H_*/
diff --git a/include/linux/platform_data/spi-nuc900.h b/include/linux/platform_data/spi-nuc900.h
new file mode 100644
index 000000000..4b3f46832
--- /dev/null
+++ b/include/linux/platform_data/spi-nuc900.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2009 Nuvoton technology corporation.
+ *
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;version 2 of the License.
+ *
+ */
+
+#ifndef __SPI_NUC900_H
+#define __SPI_NUC900_H
+
+extern void mfp_set_groupg(struct device *dev, const char *subname);
+
+struct nuc900_spi_info {
+ unsigned int num_cs;
+ unsigned int lsb;
+ unsigned int txneg;
+ unsigned int rxneg;
+ unsigned int divider;
+ unsigned int sleep;
+ unsigned int txnum;
+ unsigned int txbitlen;
+ int bus_num;
+};
+
+struct nuc900_spi_chip {
+ unsigned char bits_per_word;
+};
+
+#endif /* __SPI_NUC900_H */
diff --git a/include/linux/platform_data/spi-omap2-mcspi.h b/include/linux/platform_data/spi-omap2-mcspi.h
new file mode 100644
index 000000000..c100456ea
--- /dev/null
+++ b/include/linux/platform_data/spi-omap2-mcspi.h
@@ -0,0 +1,30 @@
+#ifndef _OMAP2_MCSPI_H
+#define _OMAP2_MCSPI_H
+
+#define OMAP2_MCSPI_REV 0
+#define OMAP3_MCSPI_REV 1
+#define OMAP4_MCSPI_REV 2
+
+#define OMAP4_MCSPI_REG_OFFSET 0x100
+
+#define MCSPI_PINDIR_D0_IN_D1_OUT 0
+#define MCSPI_PINDIR_D0_OUT_D1_IN 1
+
+struct omap2_mcspi_platform_config {
+ unsigned short num_cs;
+ unsigned int regs_offset;
+ unsigned int pin_dir:1;
+};
+
+struct omap2_mcspi_dev_attr {
+ unsigned short num_chipselect;
+};
+
+struct omap2_mcspi_device_config {
+ unsigned turbo_mode:1;
+
+ /* toggle chip select after every word */
+ unsigned cs_per_word:1;
+};
+
+#endif
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h
new file mode 100644
index 000000000..d3889b98a
--- /dev/null
+++ b/include/linux/platform_data/spi-s3c64xx.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2009 Samsung Electronics Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SPI_S3C64XX_H
+#define __SPI_S3C64XX_H
+
+#include <linux/dmaengine.h>
+
+struct platform_device;
+
+/**
+ * struct s3c64xx_spi_csinfo - ChipSelect description
+ * @fb_delay: Slave specific feedback delay.
+ * Refer to FB_CLK_SEL register definition in SPI chapter.
+ * @line: Custom 'identity' of the CS line.
+ *
+ * This is per SPI-Slave Chipselect information.
+ * Allocate and initialize one in machine init code and make the
+ * spi_board_info.controller_data point to it.
+ */
+struct s3c64xx_spi_csinfo {
+ u8 fb_delay;
+ unsigned line;
+};
+
+/**
+ * struct s3c64xx_spi_info - SPI Controller defining structure
+ * @src_clk_nr: Clock source index for the CLK_CFG[SPI_CLKSEL] field.
+ * @num_cs: Number of CS this controller emulates.
+ * @cfg_gpio: Configure pins for this SPI controller.
+ */
+struct s3c64xx_spi_info {
+ int src_clk_nr;
+ int num_cs;
+ int (*cfg_gpio)(void);
+ dma_filter_fn filter;
+};
+
+/**
+ * s3c64xx_spi_set_platdata - SPI Controller configure callback by the board
+ * initialization code.
+ * @cfg_gpio: Pointer to gpio setup function.
+ * @src_clk_nr: Clock the SPI controller is to use to generate SPI clocks.
+ * @num_cs: Number of elements in the 'cs' array.
+ *
+ * Call this from machine init code for each SPI Controller that
+ * has some chips attached to it.
+ */
+extern void s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
+ int num_cs);
+extern void s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
+ int num_cs);
+extern void s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
+ int num_cs);
+
+/* defined by architecture to configure gpio */
+extern int s3c64xx_spi0_cfg_gpio(void);
+extern int s3c64xx_spi1_cfg_gpio(void);
+extern int s3c64xx_spi2_cfg_gpio(void);
+
+extern struct s3c64xx_spi_info s3c64xx_spi0_pdata;
+extern struct s3c64xx_spi_info s3c64xx_spi1_pdata;
+extern struct s3c64xx_spi_info s3c64xx_spi2_pdata;
+#endif /*__SPI_S3C64XX_H */
diff --git a/include/linux/platform_data/ssm2518.h b/include/linux/platform_data/ssm2518.h
new file mode 100644
index 000000000..9a8e3ea28
--- /dev/null
+++ b/include/linux/platform_data/ssm2518.h
@@ -0,0 +1,22 @@
+/*
+ * SSM2518 amplifier audio driver
+ *
+ * Copyright 2013 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_SSM2518_H__
+#define __LINUX_PLATFORM_DATA_SSM2518_H__
+
+/**
+ * struct ssm2518_platform_data - Platform data for the ssm2518 driver
+ * @enable_gpio: GPIO connected to the nSD pin. Set to -1 if the nSD pin is
+ * hardwired.
+ */
+struct ssm2518_platform_data {
+ int enable_gpio;
+};
+
+#endif
diff --git a/include/linux/platform_data/st1232_pdata.h b/include/linux/platform_data/st1232_pdata.h
new file mode 100644
index 000000000..cac3e7b4c
--- /dev/null
+++ b/include/linux/platform_data/st1232_pdata.h
@@ -0,0 +1,13 @@
+#ifndef _LINUX_ST1232_PDATA_H
+#define _LINUX_ST1232_PDATA_H
+
+/*
+ * Optional platform data
+ *
+ * Use this if you want the driver to drive the reset pin.
+ */
+struct st1232_pdata {
+ int reset_gpio;
+};
+
+#endif
diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h
new file mode 100644
index 000000000..cc2bdafb0
--- /dev/null
+++ b/include/linux/platform_data/st21nfca.h
@@ -0,0 +1,33 @@
+/*
+ * Driver include for the ST21NFCA NFC chip.
+ *
+ * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ST21NFCA_HCI_H_
+#define _ST21NFCA_HCI_H_
+
+#include <linux/i2c.h>
+
+#define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci"
+
+struct st21nfca_nfc_platform_data {
+ unsigned int gpio_ena;
+ unsigned int irq_polarity;
+ bool is_ese_present;
+ bool is_uicc_present;
+};
+
+#endif /* _ST21NFCA_HCI_H_ */
diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st21nfcb.h
new file mode 100644
index 000000000..b023373d9
--- /dev/null
+++ b/include/linux/platform_data/st21nfcb.h
@@ -0,0 +1,29 @@
+/*
+ * Driver include for the ST21NFCB NFC chip.
+ *
+ * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ST21NFCB_NCI_H_
+#define _ST21NFCB_NCI_H_
+
+#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci"
+
+struct st21nfcb_nfc_platform_data {
+ unsigned int gpio_reset;
+ unsigned int irq_polarity;
+};
+
+#endif /* _ST21NFCB_NCI_H_ */
diff --git a/include/linux/platform_data/st33zp24.h b/include/linux/platform_data/st33zp24.h
new file mode 100644
index 000000000..817dfdb37
--- /dev/null
+++ b/include/linux/platform_data/st33zp24.h
@@ -0,0 +1,28 @@
+/*
+ * STMicroelectronics TPM Linux driver for TPM 1.2 ST33ZP24
+ * Copyright (C) 2009 - 2015 STMicroelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ST33ZP24_H__
+#define __ST33ZP24_H__
+
+#define TPM_ST33_I2C "st33zp24-i2c"
+#define TPM_ST33_SPI "st33zp24-spi"
+
+struct st33zp24_platform_data {
+ int io_lpcpd;
+};
+
+#endif /* __ST33ZP24_H__ */
diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
new file mode 100644
index 000000000..753839187
--- /dev/null
+++ b/include/linux/platform_data/st_sensors_pdata.h
@@ -0,0 +1,24 @@
+/*
+ * STMicroelectronics sensors platform-data driver
+ *
+ * Copyright 2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_SENSORS_PDATA_H
+#define ST_SENSORS_PDATA_H
+
+/**
+ * struct st_sensors_platform_data - Platform data for the ST sensors
+ * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
+ * Available only for accelerometer and pressure sensors.
+ * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
+ */
+struct st_sensors_platform_data {
+ u8 drdy_int_pin;
+};
+
+#endif /* ST_SENSORS_PDATA_H */
diff --git a/include/linux/platform_data/syscon.h b/include/linux/platform_data/syscon.h
new file mode 100644
index 000000000..2354c6fa3
--- /dev/null
+++ b/include/linux/platform_data/syscon.h
@@ -0,0 +1,8 @@
+#ifndef PLATFORM_DATA_SYSCON_H
+#define PLATFORM_DATA_SYSCON_H
+
+struct syscon_platform_data {
+ const char *label;
+};
+
+#endif
diff --git a/include/linux/platform_data/touchscreen-s3c2410.h b/include/linux/platform_data/touchscreen-s3c2410.h
new file mode 100644
index 000000000..58dc7c5ae
--- /dev/null
+++ b/include/linux/platform_data/touchscreen-s3c2410.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2005 Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __TOUCHSCREEN_S3C2410_H
+#define __TOUCHSCREEN_S3C2410_H
+
+struct s3c2410_ts_mach_info {
+ int delay;
+ int presc;
+ int oversampling_shift;
+ void (*cfg_gpio)(struct platform_device *dev);
+};
+
+extern void s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *);
+
+/* defined by architecture to configure gpio */
+extern void s3c24xx_ts_cfg_gpio(struct platform_device *dev);
+
+#endif /*__TOUCHSCREEN_S3C2410_H */
diff --git a/include/linux/platform_data/tsl2563.h b/include/linux/platform_data/tsl2563.h
new file mode 100644
index 000000000..c90d7a09d
--- /dev/null
+++ b/include/linux/platform_data/tsl2563.h
@@ -0,0 +1,8 @@
+#ifndef __LINUX_TSL2563_H
+#define __LINUX_TSL2563_H
+
+struct tsl2563_platform_data {
+ int cover_comp_gain;
+};
+
+#endif /* __LINUX_TSL2563_H */
diff --git a/include/linux/platform_data/uio_dmem_genirq.h b/include/linux/platform_data/uio_dmem_genirq.h
new file mode 100644
index 000000000..973c1bb32
--- /dev/null
+++ b/include/linux/platform_data/uio_dmem_genirq.h
@@ -0,0 +1,26 @@
+/*
+ * include/linux/platform_data/uio_dmem_genirq.h
+ *
+ * Copyright (C) 2012 Damian Hobson-Garcia
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _UIO_DMEM_GENIRQ_H
+#define _UIO_DMEM_GENIRQ_H
+
+#include <linux/uio_driver.h>
+
+struct uio_dmem_genirq_pdata {
+ struct uio_info uioinfo;
+ unsigned int *dynamic_region_sizes;
+ unsigned int num_dynamic_regions;
+};
+#endif /* _UIO_DMEM_GENIRQ_H */
diff --git a/include/linux/platform_data/uio_pruss.h b/include/linux/platform_data/uio_pruss.h
new file mode 100644
index 000000000..3d47d2198
--- /dev/null
+++ b/include/linux/platform_data/uio_pruss.h
@@ -0,0 +1,26 @@
+/*
+ * include/linux/platform_data/uio_pruss.h
+ *
+ * Platform data for uio_pruss driver
+ *
+ * Copyright (C) 2010-11 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _UIO_PRUSS_H_
+#define _UIO_PRUSS_H_
+
+/* To configure the PRUSS INTC base offset for UIO driver */
+struct uio_pruss_pdata {
+ u32 pintc_base;
+ struct gen_pool *sram_pool;
+};
+#endif /* _UIO_PRUSS_H_ */
diff --git a/include/linux/platform_data/usb-davinci.h b/include/linux/platform_data/usb-davinci.h
new file mode 100644
index 000000000..e0bc4abe6
--- /dev/null
+++ b/include/linux/platform_data/usb-davinci.h
@@ -0,0 +1,59 @@
+/*
+ * USB related definitions
+ *
+ * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __ASM_ARCH_USB_H
+#define __ASM_ARCH_USB_H
+
+/* DA8xx CFGCHIP2 (USB 2.0 PHY Control) register bits */
+#define CFGCHIP2_PHYCLKGD (1 << 17)
+#define CFGCHIP2_VBUSSENSE (1 << 16)
+#define CFGCHIP2_RESET (1 << 15)
+#define CFGCHIP2_OTGMODE (3 << 13)
+#define CFGCHIP2_NO_OVERRIDE (0 << 13)
+#define CFGCHIP2_FORCE_HOST (1 << 13)
+#define CFGCHIP2_FORCE_DEVICE (2 << 13)
+#define CFGCHIP2_FORCE_HOST_VBUS_LOW (3 << 13)
+#define CFGCHIP2_USB1PHYCLKMUX (1 << 12)
+#define CFGCHIP2_USB2PHYCLKMUX (1 << 11)
+#define CFGCHIP2_PHYPWRDN (1 << 10)
+#define CFGCHIP2_OTGPWRDN (1 << 9)
+#define CFGCHIP2_DATPOL (1 << 8)
+#define CFGCHIP2_USB1SUSPENDM (1 << 7)
+#define CFGCHIP2_PHY_PLLON (1 << 6) /* override PLL suspend */
+#define CFGCHIP2_SESENDEN (1 << 5) /* Vsess_end comparator */
+#define CFGCHIP2_VBDTCTEN (1 << 4) /* Vbus comparator */
+#define CFGCHIP2_REFFREQ (0xf << 0)
+#define CFGCHIP2_REFFREQ_12MHZ (1 << 0)
+#define CFGCHIP2_REFFREQ_24MHZ (2 << 0)
+#define CFGCHIP2_REFFREQ_48MHZ (3 << 0)
+
+struct da8xx_ohci_root_hub;
+
+typedef void (*da8xx_ocic_handler_t)(struct da8xx_ohci_root_hub *hub,
+ unsigned port);
+
+/* Passed as the platform data to the OHCI driver */
+struct da8xx_ohci_root_hub {
+ /* Switch the port power on/off */
+ int (*set_power)(unsigned port, int on);
+ /* Read the port power status */
+ int (*get_power)(unsigned port);
+ /* Read the port over-current indicator */
+ int (*get_oci)(unsigned port);
+ /* Over-current indicator change notification (pass NULL to disable) */
+ int (*ocic_notify)(da8xx_ocic_handler_t handler);
+
+ /* Time from power on to power good (in 2 ms units) */
+ u8 potpgt;
+};
+
+void davinci_setup_usb(unsigned mA, unsigned potpgt_ms);
+
+#endif /* ifndef __ASM_ARCH_USB_H */
diff --git a/include/linux/platform_data/usb-ehci-mxc.h b/include/linux/platform_data/usb-ehci-mxc.h
new file mode 100644
index 000000000..157e71f79
--- /dev/null
+++ b/include/linux/platform_data/usb-ehci-mxc.h
@@ -0,0 +1,13 @@
+#ifndef __INCLUDE_ASM_ARCH_MXC_EHCI_H
+#define __INCLUDE_ASM_ARCH_MXC_EHCI_H
+
+struct mxc_usbh_platform_data {
+ int (*init)(struct platform_device *pdev);
+ int (*exit)(struct platform_device *pdev);
+
+ unsigned int portsc;
+ struct usb_phy *otg;
+};
+
+#endif /* __INCLUDE_ASM_ARCH_MXC_EHCI_H */
+
diff --git a/include/linux/platform_data/usb-ehci-orion.h b/include/linux/platform_data/usb-ehci-orion.h
new file mode 100644
index 000000000..52b0acb35
--- /dev/null
+++ b/include/linux/platform_data/usb-ehci-orion.h
@@ -0,0 +1,24 @@
+/*
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __USB_EHCI_ORION_H
+#define __USB_EHCI_ORION_H
+
+#include <linux/mbus.h>
+
+enum orion_ehci_phy_ver {
+ EHCI_PHY_ORION,
+ EHCI_PHY_DD,
+ EHCI_PHY_KW,
+ EHCI_PHY_NA,
+};
+
+struct orion_ehci_data {
+ enum orion_ehci_phy_ver phy_version;
+};
+
+
+#endif
diff --git a/include/linux/platform_data/usb-musb-ux500.h b/include/linux/platform_data/usb-musb-ux500.h
new file mode 100644
index 000000000..dd9c83ac7
--- /dev/null
+++ b/include/linux/platform_data/usb-musb-ux500.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#ifndef __ASM_ARCH_USB_H
+#define __ASM_ARCH_USB_H
+
+#include <linux/dmaengine.h>
+
+#define UX500_MUSB_DMA_NUM_RX_TX_CHANNELS 8
+
+struct ux500_musb_board_data {
+ void **dma_rx_param_array;
+ void **dma_tx_param_array;
+ bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
+};
+
+void ux500_add_usb(struct device *parent, resource_size_t base,
+ int irq, int *dma_rx_cfg, int *dma_tx_cfg);
+#endif
diff --git a/include/linux/platform_data/usb-mx2.h b/include/linux/platform_data/usb-mx2.h
new file mode 100644
index 000000000..22d0b5962
--- /dev/null
+++ b/include/linux/platform_data/usb-mx2.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2009 Martin Fuzzey <mfuzzey@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MX21_USBH
+#define __ASM_ARCH_MX21_USBH
+
+enum mx21_usbh_xcvr {
+ /* Values below as used by hardware (HWMODE register) */
+ MX21_USBXCVR_TXDIF_RXDIF = 0,
+ MX21_USBXCVR_TXDIF_RXSE = 1,
+ MX21_USBXCVR_TXSE_RXDIF = 2,
+ MX21_USBXCVR_TXSE_RXSE = 3,
+};
+
+struct mx21_usbh_platform_data {
+ enum mx21_usbh_xcvr host_xcvr; /* tranceiver mode host 1,2 ports */
+ enum mx21_usbh_xcvr otg_xcvr; /* tranceiver mode otg (as host) port */
+ u16 enable_host1:1,
+ enable_host2:1,
+ enable_otg_host:1, /* enable "OTG" port (as host) */
+ host1_xcverless:1, /* traceiverless host1 port */
+ host1_txenoe:1, /* output enable host1 transmit enable */
+ otg_ext_xcvr:1, /* external tranceiver for OTG port */
+ unused:10;
+};
+
+#endif /* __ASM_ARCH_MX21_USBH */
diff --git a/include/linux/platform_data/usb-ohci-pxa27x.h b/include/linux/platform_data/usb-ohci-pxa27x.h
new file mode 100644
index 000000000..95b6e2a6e
--- /dev/null
+++ b/include/linux/platform_data/usb-ohci-pxa27x.h
@@ -0,0 +1,36 @@
+#ifndef ASMARM_ARCH_OHCI_H
+#define ASMARM_ARCH_OHCI_H
+
+struct device;
+
+struct pxaohci_platform_data {
+ int (*init)(struct device *);
+ void (*exit)(struct device *);
+
+ unsigned long flags;
+#define ENABLE_PORT1 (1 << 0)
+#define ENABLE_PORT2 (1 << 1)
+#define ENABLE_PORT3 (1 << 2)
+#define ENABLE_PORT_ALL (ENABLE_PORT1 | ENABLE_PORT2 | ENABLE_PORT3)
+
+#define POWER_SENSE_LOW (1 << 3)
+#define POWER_CONTROL_LOW (1 << 4)
+#define NO_OC_PROTECTION (1 << 5)
+#define OC_MODE_GLOBAL (0 << 6)
+#define OC_MODE_PERPORT (1 << 6)
+
+ int power_on_delay; /* Power On to Power Good time - in ms
+ * HCD must wait for this duration before
+ * accessing a powered on port
+ */
+ int port_mode;
+#define PMM_NPS_MODE 1
+#define PMM_GLOBAL_MODE 2
+#define PMM_PERPORT_MODE 3
+
+ int power_budget;
+};
+
+extern void pxa_set_ohci_info(struct pxaohci_platform_data *info);
+
+#endif
diff --git a/include/linux/platform_data/usb-ohci-s3c2410.h b/include/linux/platform_data/usb-ohci-s3c2410.h
new file mode 100644
index 000000000..7fa1fbefc
--- /dev/null
+++ b/include/linux/platform_data/usb-ohci-s3c2410.h
@@ -0,0 +1,43 @@
+/* arch/arm/plat-samsung/include/plat/usb-control.h
+ *
+ * Copyright (c) 2004 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C - USB host port information
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_USBCONTROL_H
+#define __ASM_ARCH_USBCONTROL_H
+
+#define S3C_HCDFLG_USED (1)
+
+struct s3c2410_hcd_port {
+ unsigned char flags;
+ unsigned char power;
+ unsigned char oc_status;
+ unsigned char oc_changed;
+};
+
+struct s3c2410_hcd_info {
+ struct usb_hcd *hcd;
+ struct s3c2410_hcd_port port[2];
+
+ void (*power_control)(int port, int to);
+ void (*enable_oc)(struct s3c2410_hcd_info *, int on);
+ void (*report_oc)(struct s3c2410_hcd_info *, int ports);
+};
+
+static void inline s3c2410_usb_report_oc(struct s3c2410_hcd_info *info, int ports)
+{
+ if (info->report_oc != NULL) {
+ (info->report_oc)(info, ports);
+ }
+}
+
+extern void s3c_ohci_set_platdata(struct s3c2410_hcd_info *info);
+
+#endif /*__ASM_ARCH_USBCONTROL_H */
diff --git a/include/linux/platform_data/usb-omap.h b/include/linux/platform_data/usb-omap.h
new file mode 100644
index 000000000..fa579b4c6
--- /dev/null
+++ b/include/linux/platform_data/usb-omap.h
@@ -0,0 +1,88 @@
+/*
+ * usb-omap.h - Platform data for the various OMAP USB IPs
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * This software is distributed under the terms of the GNU General Public
+ * License ("GPL") version 2, as published by the Free Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define OMAP3_HS_USB_PORTS 3
+
+enum usbhs_omap_port_mode {
+ OMAP_USBHS_PORT_MODE_UNUSED,
+ OMAP_EHCI_PORT_MODE_PHY,
+ OMAP_EHCI_PORT_MODE_TLL,
+ OMAP_EHCI_PORT_MODE_HSIC,
+ OMAP_OHCI_PORT_MODE_PHY_6PIN_DATSE0,
+ OMAP_OHCI_PORT_MODE_PHY_6PIN_DPDM,
+ OMAP_OHCI_PORT_MODE_PHY_3PIN_DATSE0,
+ OMAP_OHCI_PORT_MODE_PHY_4PIN_DPDM,
+ OMAP_OHCI_PORT_MODE_TLL_6PIN_DATSE0,
+ OMAP_OHCI_PORT_MODE_TLL_6PIN_DPDM,
+ OMAP_OHCI_PORT_MODE_TLL_3PIN_DATSE0,
+ OMAP_OHCI_PORT_MODE_TLL_4PIN_DPDM,
+ OMAP_OHCI_PORT_MODE_TLL_2PIN_DATSE0,
+ OMAP_OHCI_PORT_MODE_TLL_2PIN_DPDM
+};
+
+struct usbtll_omap_platform_data {
+ enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS];
+};
+
+struct ehci_hcd_omap_platform_data {
+ enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS];
+ int reset_gpio_port[OMAP3_HS_USB_PORTS];
+ struct regulator *regulator[OMAP3_HS_USB_PORTS];
+ unsigned phy_reset:1;
+};
+
+struct ohci_hcd_omap_platform_data {
+ enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS];
+ unsigned es2_compatibility:1;
+};
+
+struct usbhs_omap_platform_data {
+ int nports;
+ enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS];
+ int reset_gpio_port[OMAP3_HS_USB_PORTS];
+ struct regulator *regulator[OMAP3_HS_USB_PORTS];
+
+ struct ehci_hcd_omap_platform_data *ehci_data;
+ struct ohci_hcd_omap_platform_data *ohci_data;
+
+ /* OMAP3 <= ES2.1 have a single ulpi bypass control bit */
+ unsigned single_ulpi_bypass:1;
+ unsigned es2_compatibility:1;
+ unsigned phy_reset:1;
+};
+
+/*-------------------------------------------------------------------------*/
+
+struct omap_musb_board_data {
+ u8 interface_type;
+ u8 mode;
+ u16 power;
+ unsigned extvbus:1;
+ void (*set_phy_power)(u8 on);
+ void (*clear_irq)(void);
+ void (*set_mode)(u8 mode);
+ void (*reset)(void);
+};
+
+enum musb_interface {
+ MUSB_INTERFACE_ULPI,
+ MUSB_INTERFACE_UTMI
+};
diff --git a/include/linux/platform_data/usb-omap1.h b/include/linux/platform_data/usb-omap1.h
new file mode 100644
index 000000000..43b5ce139
--- /dev/null
+++ b/include/linux/platform_data/usb-omap1.h
@@ -0,0 +1,53 @@
+/*
+ * Platform data for OMAP1 USB
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive for
+ * more details.
+ */
+#ifndef __LINUX_USB_OMAP1_H
+#define __LINUX_USB_OMAP1_H
+
+#include <linux/platform_device.h>
+
+struct omap_usb_config {
+ /* Configure drivers according to the connectors on your board:
+ * - "A" connector (rectagular)
+ * ... for host/OHCI use, set "register_host".
+ * - "B" connector (squarish) or "Mini-B"
+ * ... for device/gadget use, set "register_dev".
+ * - "Mini-AB" connector (very similar to Mini-B)
+ * ... for OTG use as device OR host, initialize "otg"
+ */
+ unsigned register_host:1;
+ unsigned register_dev:1;
+ u8 otg; /* port number, 1-based: usb1 == 2 */
+
+ const char *extcon; /* extcon device for OTG */
+
+ u8 hmc_mode;
+
+ /* implicitly true if otg: host supports remote wakeup? */
+ u8 rwc;
+
+ /* signaling pins used to talk to transceiver on usbN:
+ * 0 == usbN unused
+ * 2 == usb0-only, using internal transceiver
+ * 3 == 3 wire bidirectional
+ * 4 == 4 wire bidirectional
+ * 6 == 6 wire unidirectional (or TLL)
+ */
+ u8 pins[3];
+
+ struct platform_device *udc_device;
+ struct platform_device *ohci_device;
+ struct platform_device *otg_device;
+
+ u32 (*usb0_init)(unsigned nwires, unsigned is_device);
+ u32 (*usb1_init)(unsigned nwires);
+ u32 (*usb2_init)(unsigned nwires, unsigned alt_pingroup);
+
+ int (*ocpi_enable)(void);
+};
+
+#endif /* __LINUX_USB_OMAP1_H */
diff --git a/include/linux/platform_data/usb-pxa3xx-ulpi.h b/include/linux/platform_data/usb-pxa3xx-ulpi.h
new file mode 100644
index 000000000..9d82cb65e
--- /dev/null
+++ b/include/linux/platform_data/usb-pxa3xx-ulpi.h
@@ -0,0 +1,35 @@
+/*
+ * PXA3xx U2D header
+ *
+ * Copyright (C) 2010 CompuLab Ltd.
+ *
+ * Igor Grinberg <grinberg@compulab.co.il>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __PXA310_U2D__
+#define __PXA310_U2D__
+
+#include <linux/usb/ulpi.h>
+
+struct pxa3xx_u2d_platform_data {
+
+#define ULPI_SER_6PIN (1 << 0)
+#define ULPI_SER_3PIN (1 << 1)
+ unsigned int ulpi_mode;
+
+ int (*init)(struct device *);
+ void (*exit)(struct device *);
+};
+
+
+/* Start PXA3xx U2D host */
+int pxa3xx_u2d_start_hc(struct usb_bus *host);
+/* Stop PXA3xx U2D host */
+void pxa3xx_u2d_stop_hc(struct usb_bus *host);
+
+extern void pxa3xx_set_u2d_info(struct pxa3xx_u2d_platform_data *info);
+
+#endif /* __PXA310_U2D__ */
diff --git a/include/linux/platform_data/usb-rcar-gen2-phy.h b/include/linux/platform_data/usb-rcar-gen2-phy.h
new file mode 100644
index 000000000..dd3ba46c0
--- /dev/null
+++ b/include/linux/platform_data/usb-rcar-gen2-phy.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __USB_RCAR_GEN2_PHY_H
+#define __USB_RCAR_GEN2_PHY_H
+
+#include <linux/types.h>
+
+struct rcar_gen2_phy_platform_data {
+ /* USB channel 0 configuration */
+ bool chan0_pci:1; /* true: PCI USB host 0, false: USBHS */
+ /* USB channel 2 configuration */
+ bool chan2_pci:1; /* true: PCI USB host 2, false: USBSS */
+};
+
+#endif
diff --git a/include/linux/platform_data/usb-rcar-phy.h b/include/linux/platform_data/usb-rcar-phy.h
new file mode 100644
index 000000000..8ec6964a3
--- /dev/null
+++ b/include/linux/platform_data/usb-rcar-phy.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __USB_RCAR_PHY_H
+#define __USB_RCAR_PHY_H
+
+#include <linux/types.h>
+
+struct rcar_phy_platform_data {
+ bool ferrite_bead:1; /* (R8A7778 only) */
+
+ bool port1_func:1; /* true: port 1 used by function, false: host */
+ unsigned penc1:1; /* Output of the PENC1 pin in function mode */
+ struct { /* Overcurrent pin control for ports 0..2 */
+ bool select_3_3v:1; /* true: USB_OVCn pin, false: OVCn pin */
+ /* Set to false on port 1 in function mode */
+ bool active_high:1; /* true: active high, false: active low */
+ /* Set to true on port 1 in function mode */
+ } ovc_pin[3]; /* (R8A7778 only has 2 ports) */
+};
+
+#endif /* __USB_RCAR_PHY_H */
diff --git a/include/linux/platform_data/usb-s3c2410_udc.h b/include/linux/platform_data/usb-s3c2410_udc.h
new file mode 100644
index 000000000..de8e2288a
--- /dev/null
+++ b/include/linux/platform_data/usb-s3c2410_udc.h
@@ -0,0 +1,44 @@
+/* arch/arm/plat-samsung/include/plat/udc.h
+ *
+ * Copyright (c) 2005 Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *
+ * Changelog:
+ * 14-Mar-2005 RTP Created file
+ * 02-Aug-2005 RTP File rename
+ * 07-Sep-2005 BJD Minor cleanups, changed cmd to enum
+ * 18-Jan-2007 HMW Add per-platform vbus_draw function
+*/
+
+#ifndef __ASM_ARM_ARCH_UDC_H
+#define __ASM_ARM_ARCH_UDC_H
+
+enum s3c2410_udc_cmd_e {
+ S3C2410_UDC_P_ENABLE = 1, /* Pull-up enable */
+ S3C2410_UDC_P_DISABLE = 2, /* Pull-up disable */
+ S3C2410_UDC_P_RESET = 3, /* UDC reset, in case of */
+};
+
+struct s3c2410_udc_mach_info {
+ void (*udc_command)(enum s3c2410_udc_cmd_e);
+ void (*vbus_draw)(unsigned int ma);
+
+ unsigned int pullup_pin;
+ unsigned int pullup_pin_inverted;
+
+ unsigned int vbus_pin;
+ unsigned char vbus_pin_inverted;
+};
+
+extern void __init s3c24xx_udc_set_platdata(struct s3c2410_udc_mach_info *);
+
+struct s3c24xx_hsudc_platdata;
+
+extern void __init s3c24xx_hsudc_set_platdata(struct s3c24xx_hsudc_platdata *pd);
+
+#endif /* __ASM_ARM_ARCH_UDC_H */
diff --git a/include/linux/platform_data/usb3503.h b/include/linux/platform_data/usb3503.h
new file mode 100644
index 000000000..1d1b6ef87
--- /dev/null
+++ b/include/linux/platform_data/usb3503.h
@@ -0,0 +1,24 @@
+#ifndef __USB3503_H__
+#define __USB3503_H__
+
+#define USB3503_I2C_NAME "usb3503"
+
+#define USB3503_OFF_PORT1 (1 << 1)
+#define USB3503_OFF_PORT2 (1 << 2)
+#define USB3503_OFF_PORT3 (1 << 3)
+
+enum usb3503_mode {
+ USB3503_MODE_UNKNOWN,
+ USB3503_MODE_HUB,
+ USB3503_MODE_STANDBY,
+};
+
+struct usb3503_platform_data {
+ enum usb3503_mode initial_mode;
+ u8 port_off_mask;
+ int gpio_intn;
+ int gpio_connect;
+ int gpio_reset;
+};
+
+#endif
diff --git a/include/linux/platform_data/ux500_wdt.h b/include/linux/platform_data/ux500_wdt.h
new file mode 100644
index 000000000..1689ff4c3
--- /dev/null
+++ b/include/linux/platform_data/ux500_wdt.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) ST Ericsson SA 2011
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * STE Ux500 Watchdog platform data
+ */
+#ifndef __UX500_WDT_H
+#define __UX500_WDT_H
+
+/**
+ * struct ux500_wdt_data
+ */
+struct ux500_wdt_data {
+ unsigned int timeout;
+ bool has_28_bits_resolution;
+};
+
+#endif /* __UX500_WDT_H */
diff --git a/include/linux/platform_data/video-clcd-versatile.h b/include/linux/platform_data/video-clcd-versatile.h
new file mode 100644
index 000000000..09ccf182a
--- /dev/null
+++ b/include/linux/platform_data/video-clcd-versatile.h
@@ -0,0 +1,27 @@
+#ifndef PLAT_CLCD_H
+#define PLAT_CLCD_H
+
+#ifdef CONFIG_PLAT_VERSATILE_CLCD
+struct clcd_panel *versatile_clcd_get_panel(const char *);
+int versatile_clcd_setup_dma(struct clcd_fb *, unsigned long);
+int versatile_clcd_mmap_dma(struct clcd_fb *, struct vm_area_struct *);
+void versatile_clcd_remove_dma(struct clcd_fb *);
+#else
+static inline struct clcd_panel *versatile_clcd_get_panel(const char *s)
+{
+ return NULL;
+}
+static inline int versatile_clcd_setup_dma(struct clcd_fb *fb, unsigned long framesize)
+{
+ return -ENODEV;
+}
+static inline int versatile_clcd_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vm)
+{
+ return -ENODEV;
+}
+static inline void versatile_clcd_remove_dma(struct clcd_fb *fb)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/platform_data/video-ep93xx.h b/include/linux/platform_data/video-ep93xx.h
new file mode 100644
index 000000000..92fc2b223
--- /dev/null
+++ b/include/linux/platform_data/video-ep93xx.h
@@ -0,0 +1,52 @@
+#ifndef __VIDEO_EP93XX_H
+#define __VIDEO_EP93XX_H
+
+struct platform_device;
+struct fb_videomode;
+struct fb_info;
+
+#define EP93XXFB_USE_MODEDB 0
+
+/* VideoAttributes flags */
+#define EP93XXFB_STATE_MACHINE_ENABLE (1 << 0)
+#define EP93XXFB_PIXEL_CLOCK_ENABLE (1 << 1)
+#define EP93XXFB_VSYNC_ENABLE (1 << 2)
+#define EP93XXFB_PIXEL_DATA_ENABLE (1 << 3)
+#define EP93XXFB_COMPOSITE_SYNC (1 << 4)
+#define EP93XXFB_SYNC_VERT_HIGH (1 << 5)
+#define EP93XXFB_SYNC_HORIZ_HIGH (1 << 6)
+#define EP93XXFB_SYNC_BLANK_HIGH (1 << 7)
+#define EP93XXFB_PCLK_FALLING (1 << 8)
+#define EP93XXFB_ENABLE_AC (1 << 9)
+#define EP93XXFB_ENABLE_LCD (1 << 10)
+#define EP93XXFB_ENABLE_CCIR (1 << 12)
+#define EP93XXFB_USE_PARALLEL_INTERFACE (1 << 13)
+#define EP93XXFB_ENABLE_INTERRUPT (1 << 14)
+#define EP93XXFB_USB_INTERLACE (1 << 16)
+#define EP93XXFB_USE_EQUALIZATION (1 << 17)
+#define EP93XXFB_USE_DOUBLE_HORZ (1 << 18)
+#define EP93XXFB_USE_DOUBLE_VERT (1 << 19)
+#define EP93XXFB_USE_BLANK_PIXEL (1 << 20)
+#define EP93XXFB_USE_SDCSN0 (0 << 21)
+#define EP93XXFB_USE_SDCSN1 (1 << 21)
+#define EP93XXFB_USE_SDCSN2 (2 << 21)
+#define EP93XXFB_USE_SDCSN3 (3 << 21)
+
+#define EP93XXFB_ENABLE (EP93XXFB_STATE_MACHINE_ENABLE | \
+ EP93XXFB_PIXEL_CLOCK_ENABLE | \
+ EP93XXFB_VSYNC_ENABLE | \
+ EP93XXFB_PIXEL_DATA_ENABLE)
+
+struct ep93xxfb_mach_info {
+ unsigned int num_modes;
+ const struct fb_videomode *modes;
+ const struct fb_videomode *default_mode;
+ int bpp;
+ unsigned int flags;
+
+ int (*setup)(struct platform_device *pdev);
+ void (*teardown)(struct platform_device *pdev);
+ void (*blank)(int blank_mode, struct fb_info *info);
+};
+
+#endif /* __VIDEO_EP93XX_H */
diff --git a/include/linux/platform_data/video-imxfb.h b/include/linux/platform_data/video-imxfb.h
new file mode 100644
index 000000000..18e908324
--- /dev/null
+++ b/include/linux/platform_data/video-imxfb.h
@@ -0,0 +1,72 @@
+/*
+ * This structure describes the machine which we are running on.
+ */
+#ifndef __MACH_IMXFB_H__
+#define __MACH_IMXFB_H__
+
+#include <linux/fb.h>
+
+#define PCR_TFT (1 << 31)
+#define PCR_COLOR (1 << 30)
+#define PCR_PBSIZ_1 (0 << 28)
+#define PCR_PBSIZ_2 (1 << 28)
+#define PCR_PBSIZ_4 (2 << 28)
+#define PCR_PBSIZ_8 (3 << 28)
+#define PCR_BPIX_1 (0 << 25)
+#define PCR_BPIX_2 (1 << 25)
+#define PCR_BPIX_4 (2 << 25)
+#define PCR_BPIX_8 (3 << 25)
+#define PCR_BPIX_12 (4 << 25)
+#define PCR_BPIX_16 (5 << 25)
+#define PCR_BPIX_18 (6 << 25)
+#define PCR_PIXPOL (1 << 24)
+#define PCR_FLMPOL (1 << 23)
+#define PCR_LPPOL (1 << 22)
+#define PCR_CLKPOL (1 << 21)
+#define PCR_OEPOL (1 << 20)
+#define PCR_SCLKIDLE (1 << 19)
+#define PCR_END_SEL (1 << 18)
+#define PCR_END_BYTE_SWAP (1 << 17)
+#define PCR_REV_VS (1 << 16)
+#define PCR_ACD_SEL (1 << 15)
+#define PCR_ACD(x) (((x) & 0x7f) << 8)
+#define PCR_SCLK_SEL (1 << 7)
+#define PCR_SHARP (1 << 6)
+#define PCR_PCD(x) ((x) & 0x3f)
+
+#define PWMR_CLS(x) (((x) & 0x1ff) << 16)
+#define PWMR_LDMSK (1 << 15)
+#define PWMR_SCR1 (1 << 10)
+#define PWMR_SCR0 (1 << 9)
+#define PWMR_CC_EN (1 << 8)
+#define PWMR_PW(x) ((x) & 0xff)
+
+#define LSCR1_PS_RISE_DELAY(x) (((x) & 0x7f) << 26)
+#define LSCR1_CLS_RISE_DELAY(x) (((x) & 0x3f) << 16)
+#define LSCR1_REV_TOGGLE_DELAY(x) (((x) & 0xf) << 8)
+#define LSCR1_GRAY2(x) (((x) & 0xf) << 4)
+#define LSCR1_GRAY1(x) (((x) & 0xf))
+
+#define DMACR_BURST (1 << 31)
+#define DMACR_HM(x) (((x) & 0xf) << 16)
+#define DMACR_TM(x) ((x) & 0xf)
+
+struct imx_fb_videomode {
+ struct fb_videomode mode;
+ u32 pcr;
+ unsigned char bpp;
+};
+
+struct imx_fb_platform_data {
+ struct imx_fb_videomode *mode;
+ int num_modes;
+
+ u_int pwmr;
+ u_int lscr1;
+ u_int dmacr;
+
+ int (*init)(struct platform_device *);
+ void (*exit)(struct platform_device *);
+};
+
+#endif /* ifndef __MACH_IMXFB_H__ */
diff --git a/include/linux/platform_data/video-msm_fb.h b/include/linux/platform_data/video-msm_fb.h
new file mode 100644
index 000000000..31449be3e
--- /dev/null
+++ b/include/linux/platform_data/video-msm_fb.h
@@ -0,0 +1,146 @@
+/*
+ * Internal shared definitions for various MSM framebuffer parts.
+ *
+ * Copyright (C) 2007 Google Incorporated
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_FB_H_
+#define _MSM_FB_H_
+
+#include <linux/device.h>
+
+struct mddi_info;
+
+struct msm_fb_data {
+ int xres; /* x resolution in pixels */
+ int yres; /* y resolution in pixels */
+ int width; /* disply width in mm */
+ int height; /* display height in mm */
+ unsigned output_format;
+};
+
+struct msmfb_callback {
+ void (*func)(struct msmfb_callback *);
+};
+
+enum {
+ MSM_MDDI_PMDH_INTERFACE,
+ MSM_MDDI_EMDH_INTERFACE,
+ MSM_EBI2_INTERFACE,
+};
+
+#define MSMFB_CAP_PARTIAL_UPDATES (1 << 0)
+
+struct msm_panel_data {
+ /* turns off the fb memory */
+ int (*suspend)(struct msm_panel_data *);
+ /* turns on the fb memory */
+ int (*resume)(struct msm_panel_data *);
+ /* turns off the panel */
+ int (*blank)(struct msm_panel_data *);
+ /* turns on the panel */
+ int (*unblank)(struct msm_panel_data *);
+ void (*wait_vsync)(struct msm_panel_data *);
+ void (*request_vsync)(struct msm_panel_data *, struct msmfb_callback *);
+ void (*clear_vsync)(struct msm_panel_data *);
+ /* from the enum above */
+ unsigned interface_type;
+ /* data to be passed to the fb driver */
+ struct msm_fb_data *fb_data;
+
+ /* capabilities supported by the panel */
+ uint32_t caps;
+};
+
+struct msm_mddi_client_data {
+ void (*suspend)(struct msm_mddi_client_data *);
+ void (*resume)(struct msm_mddi_client_data *);
+ void (*activate_link)(struct msm_mddi_client_data *);
+ void (*remote_write)(struct msm_mddi_client_data *, uint32_t val,
+ uint32_t reg);
+ uint32_t (*remote_read)(struct msm_mddi_client_data *, uint32_t reg);
+ void (*auto_hibernate)(struct msm_mddi_client_data *, int);
+ /* custom data that needs to be passed from the board file to a
+ * particular client */
+ void *private_client_data;
+ struct resource *fb_resource;
+ /* from the list above */
+ unsigned interface_type;
+};
+
+struct msm_mddi_platform_data {
+ unsigned int clk_rate;
+ void (*power_client)(struct msm_mddi_client_data *, int on);
+
+ /* fixup the mfr name, product id */
+ void (*fixup)(uint16_t *mfr_name, uint16_t *product_id);
+
+ struct resource *fb_resource; /*optional*/
+ /* number of clients in the list that follows */
+ int num_clients;
+ /* array of client information of clients */
+ struct {
+ unsigned product_id; /* mfr id in top 16 bits, product id
+ * in lower 16 bits
+ */
+ char *name; /* the device name will be the platform
+ * device name registered for the client,
+ * it should match the name of the associated
+ * driver
+ */
+ unsigned id; /* id for mddi client device node, will also
+ * be used as device id of panel devices, if
+ * the client device will have multiple panels
+ * space must be left here for them
+ */
+ void *client_data; /* required private client data */
+ unsigned int clk_rate; /* optional: if the client requires a
+ * different mddi clk rate
+ */
+ } client_platform_data[];
+};
+
+struct mdp_blit_req;
+struct fb_info;
+struct mdp_device {
+ struct device dev;
+ void (*dma)(struct mdp_device *mpd, uint32_t addr,
+ uint32_t stride, uint32_t w, uint32_t h, uint32_t x,
+ uint32_t y, struct msmfb_callback *callback, int interface);
+ void (*dma_wait)(struct mdp_device *mdp);
+ int (*blit)(struct mdp_device *mdp, struct fb_info *fb,
+ struct mdp_blit_req *req);
+ void (*set_grp_disp)(struct mdp_device *mdp, uint32_t disp_id);
+};
+
+struct class_interface;
+int register_mdp_client(struct class_interface *class_intf);
+
+/**** private client data structs go below this line ***/
+
+struct msm_mddi_bridge_platform_data {
+ /* from board file */
+ int (*init)(struct msm_mddi_bridge_platform_data *,
+ struct msm_mddi_client_data *);
+ int (*uninit)(struct msm_mddi_bridge_platform_data *,
+ struct msm_mddi_client_data *);
+ /* passed to panel for use by the fb driver */
+ int (*blank)(struct msm_mddi_bridge_platform_data *,
+ struct msm_mddi_client_data *);
+ int (*unblank)(struct msm_mddi_bridge_platform_data *,
+ struct msm_mddi_client_data *);
+ struct msm_fb_data fb_data;
+};
+
+
+
+#endif
diff --git a/include/linux/platform_data/video-mx3fb.h b/include/linux/platform_data/video-mx3fb.h
new file mode 100644
index 000000000..fdbe60001
--- /dev/null
+++ b/include/linux/platform_data/video-mx3fb.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2008
+ * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARCH_MX3FB_H__
+#define __ASM_ARCH_MX3FB_H__
+
+#include <linux/device.h>
+#include <linux/fb.h>
+
+/* Proprietary FB_SYNC_ flags */
+#define FB_SYNC_OE_ACT_HIGH 0x80000000
+#define FB_SYNC_CLK_INVERT 0x40000000
+#define FB_SYNC_DATA_INVERT 0x20000000
+#define FB_SYNC_CLK_IDLE_EN 0x10000000
+#define FB_SYNC_SHARP_MODE 0x08000000
+#define FB_SYNC_SWAP_RGB 0x04000000
+#define FB_SYNC_CLK_SEL_EN 0x02000000
+
+/*
+ * Specify the way your display is connected. The IPU can arbitrarily
+ * map the internal colors to the external data lines. We only support
+ * the following mappings at the moment.
+ */
+enum disp_data_mapping {
+ /* blue -> d[0..5], green -> d[6..11], red -> d[12..17] */
+ IPU_DISP_DATA_MAPPING_RGB666,
+ /* blue -> d[0..4], green -> d[5..10], red -> d[11..15] */
+ IPU_DISP_DATA_MAPPING_RGB565,
+ /* blue -> d[0..7], green -> d[8..15], red -> d[16..23] */
+ IPU_DISP_DATA_MAPPING_RGB888,
+};
+
+/**
+ * struct mx3fb_platform_data - mx3fb platform data
+ *
+ * @dma_dev: pointer to the dma-device, used for dma-slave connection
+ * @mode: pointer to a platform-provided per mxc_register_fb() videomode
+ */
+struct mx3fb_platform_data {
+ struct device *dma_dev;
+ const char *name;
+ const struct fb_videomode *mode;
+ int num_modes;
+ enum disp_data_mapping disp_data_fmt;
+};
+
+#endif
diff --git a/include/linux/platform_data/video-nuc900fb.h b/include/linux/platform_data/video-nuc900fb.h
new file mode 100644
index 000000000..cec5ece76
--- /dev/null
+++ b/include/linux/platform_data/video-nuc900fb.h
@@ -0,0 +1,83 @@
+/* linux/include/asm/arch-nuc900/fb.h
+ *
+ * Copyright (c) 2008 Nuvoton technology corporation
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Changelog:
+ *
+ * 2008/08/26 vincen.zswan modify this file for LCD.
+ */
+
+#ifndef __ASM_ARM_FB_H
+#define __ASM_ARM_FB_H
+
+
+
+/* LCD Controller Hardware Desc */
+struct nuc900fb_hw {
+ unsigned int lcd_dccs;
+ unsigned int lcd_device_ctrl;
+ unsigned int lcd_mpulcd_cmd;
+ unsigned int lcd_int_cs;
+ unsigned int lcd_crtc_size;
+ unsigned int lcd_crtc_dend;
+ unsigned int lcd_crtc_hr;
+ unsigned int lcd_crtc_hsync;
+ unsigned int lcd_crtc_vr;
+ unsigned int lcd_va_baddr0;
+ unsigned int lcd_va_baddr1;
+ unsigned int lcd_va_fbctrl;
+ unsigned int lcd_va_scale;
+ unsigned int lcd_va_test;
+ unsigned int lcd_va_win;
+ unsigned int lcd_va_stuff;
+};
+
+/* LCD Display Description */
+struct nuc900fb_display {
+ /* LCD Image type */
+ unsigned type;
+
+ /* LCD Screen Size */
+ unsigned short width;
+ unsigned short height;
+
+ /* LCD Screen Info */
+ unsigned short xres;
+ unsigned short yres;
+ unsigned short bpp;
+
+ unsigned long pixclock;
+ unsigned short left_margin;
+ unsigned short right_margin;
+ unsigned short hsync_len;
+ unsigned short upper_margin;
+ unsigned short lower_margin;
+ unsigned short vsync_len;
+
+ /* hardware special register value */
+ unsigned int dccs;
+ unsigned int devctl;
+ unsigned int fbctrl;
+ unsigned int scale;
+};
+
+struct nuc900fb_mach_info {
+ struct nuc900fb_display *displays;
+ unsigned num_displays;
+ unsigned default_display;
+ /* GPIO Setting Info */
+ unsigned gpio_dir;
+ unsigned gpio_dir_mask;
+ unsigned gpio_data;
+ unsigned gpio_data_mask;
+};
+
+extern void __init nuc900_fb_set_platdata(struct nuc900fb_mach_info *);
+
+#endif /* __ASM_ARM_FB_H */
diff --git a/include/linux/platform_data/video-pxafb.h b/include/linux/platform_data/video-pxafb.h
new file mode 100644
index 000000000..07c6c1e15
--- /dev/null
+++ b/include/linux/platform_data/video-pxafb.h
@@ -0,0 +1,173 @@
+/*
+ * Support for the xscale frame buffer.
+ *
+ * Author: Jean-Frederic Clere
+ * Created: Sep 22, 2003
+ * Copyright: jfclere@sinix.net
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fb.h>
+#include <mach/regs-lcd.h>
+
+/*
+ * Supported LCD connections
+ *
+ * bits 0 - 3: for LCD panel type:
+ *
+ * STN - for passive matrix
+ * DSTN - for dual scan passive matrix
+ * TFT - for active matrix
+ *
+ * bits 4 - 9 : for bus width
+ * bits 10-17 : for AC Bias Pin Frequency
+ * bit 18 : for output enable polarity
+ * bit 19 : for pixel clock edge
+ * bit 20 : for output pixel format when base is RGBT16
+ */
+#define LCD_CONN_TYPE(_x) ((_x) & 0x0f)
+#define LCD_CONN_WIDTH(_x) (((_x) >> 4) & 0x1f)
+
+#define LCD_TYPE_MASK 0xf
+#define LCD_TYPE_UNKNOWN 0
+#define LCD_TYPE_MONO_STN 1
+#define LCD_TYPE_MONO_DSTN 2
+#define LCD_TYPE_COLOR_STN 3
+#define LCD_TYPE_COLOR_DSTN 4
+#define LCD_TYPE_COLOR_TFT 5
+#define LCD_TYPE_SMART_PANEL 6
+#define LCD_TYPE_MAX 7
+
+#define LCD_MONO_STN_4BPP ((4 << 4) | LCD_TYPE_MONO_STN)
+#define LCD_MONO_STN_8BPP ((8 << 4) | LCD_TYPE_MONO_STN)
+#define LCD_MONO_DSTN_8BPP ((8 << 4) | LCD_TYPE_MONO_DSTN)
+#define LCD_COLOR_STN_8BPP ((8 << 4) | LCD_TYPE_COLOR_STN)
+#define LCD_COLOR_DSTN_16BPP ((16 << 4) | LCD_TYPE_COLOR_DSTN)
+#define LCD_COLOR_TFT_8BPP ((8 << 4) | LCD_TYPE_COLOR_TFT)
+#define LCD_COLOR_TFT_16BPP ((16 << 4) | LCD_TYPE_COLOR_TFT)
+#define LCD_COLOR_TFT_18BPP ((18 << 4) | LCD_TYPE_COLOR_TFT)
+#define LCD_SMART_PANEL_8BPP ((8 << 4) | LCD_TYPE_SMART_PANEL)
+#define LCD_SMART_PANEL_16BPP ((16 << 4) | LCD_TYPE_SMART_PANEL)
+#define LCD_SMART_PANEL_18BPP ((18 << 4) | LCD_TYPE_SMART_PANEL)
+
+#define LCD_AC_BIAS_FREQ(x) (((x) & 0xff) << 10)
+#define LCD_BIAS_ACTIVE_HIGH (0 << 18)
+#define LCD_BIAS_ACTIVE_LOW (1 << 18)
+#define LCD_PCLK_EDGE_RISE (0 << 19)
+#define LCD_PCLK_EDGE_FALL (1 << 19)
+#define LCD_ALTERNATE_MAPPING (1 << 20)
+
+/*
+ * This structure describes the machine which we are running on.
+ * It is set in linux/arch/arm/mach-pxa/machine_name.c and used in the probe routine
+ * of linux/drivers/video/pxafb.c
+ */
+struct pxafb_mode_info {
+ u_long pixclock;
+
+ u_short xres;
+ u_short yres;
+
+ u_char bpp;
+ u_int cmap_greyscale:1,
+ depth:8,
+ transparency:1,
+ unused:22;
+
+ /* Parallel Mode Timing */
+ u_char hsync_len;
+ u_char left_margin;
+ u_char right_margin;
+
+ u_char vsync_len;
+ u_char upper_margin;
+ u_char lower_margin;
+ u_char sync;
+
+ /* Smart Panel Mode Timing - see PXA27x DM 7.4.15.0.3 for details
+ * Note:
+ * 1. all parameters in nanosecond (ns)
+ * 2. a0cs{rd,wr}_set_hld are controlled by the same register bits
+ * in pxa27x and pxa3xx, initialize them to the same value or
+ * the larger one will be used
+ * 3. same to {rd,wr}_pulse_width
+ *
+ * 4. LCD_PCLK_EDGE_{RISE,FALL} controls the L_PCLK_WR polarity
+ * 5. sync & FB_SYNC_HOR_HIGH_ACT controls the L_LCLK_A0
+ * 6. sync & FB_SYNC_VERT_HIGH_ACT controls the L_LCLK_RD
+ */
+ unsigned a0csrd_set_hld; /* A0 and CS Setup/Hold Time before/after L_FCLK_RD */
+ unsigned a0cswr_set_hld; /* A0 and CS Setup/Hold Time before/after L_PCLK_WR */
+ unsigned wr_pulse_width; /* L_PCLK_WR pulse width */
+ unsigned rd_pulse_width; /* L_FCLK_RD pulse width */
+ unsigned cmd_inh_time; /* Command Inhibit time between two writes */
+ unsigned op_hold_time; /* Output Hold time from L_FCLK_RD negation */
+};
+
+struct pxafb_mach_info {
+ struct pxafb_mode_info *modes;
+ unsigned int num_modes;
+
+ unsigned int lcd_conn;
+ unsigned long video_mem_size;
+
+ u_int fixed_modes:1,
+ cmap_inverse:1,
+ cmap_static:1,
+ acceleration_enabled:1,
+ unused:28;
+
+ /* The following should be defined in LCCR0
+ * LCCR0_Act or LCCR0_Pas Active or Passive
+ * LCCR0_Sngl or LCCR0_Dual Single/Dual panel
+ * LCCR0_Mono or LCCR0_Color Mono/Color
+ * LCCR0_4PixMono or LCCR0_8PixMono (in mono single mode)
+ * LCCR0_DMADel(Tcpu) (optional) DMA request delay
+ *
+ * The following should not be defined in LCCR0:
+ * LCCR0_OUM, LCCR0_BM, LCCR0_QDM, LCCR0_DIS, LCCR0_EFM
+ * LCCR0_IUM, LCCR0_SFM, LCCR0_LDM, LCCR0_ENB
+ */
+ u_int lccr0;
+ /* The following should be defined in LCCR3
+ * LCCR3_OutEnH or LCCR3_OutEnL Output enable polarity
+ * LCCR3_PixRsEdg or LCCR3_PixFlEdg Pixel clock edge type
+ * LCCR3_Acb(X) AB Bias pin frequency
+ * LCCR3_DPC (optional) Double Pixel Clock mode (untested)
+ *
+ * The following should not be defined in LCCR3
+ * LCCR3_HSP, LCCR3_VSP, LCCR0_Pcd(x), LCCR3_Bpp
+ */
+ u_int lccr3;
+ /* The following should be defined in LCCR4
+ * LCCR4_PAL_FOR_0 or LCCR4_PAL_FOR_1 or LCCR4_PAL_FOR_2
+ *
+ * All other bits in LCCR4 should be left alone.
+ */
+ u_int lccr4;
+ void (*pxafb_backlight_power)(int);
+ void (*pxafb_lcd_power)(int, struct fb_var_screeninfo *);
+ void (*smart_update)(struct fb_info *);
+};
+
+void pxa_set_fb_info(struct device *, struct pxafb_mach_info *);
+unsigned long pxafb_get_hsync_time(struct device *dev);
+
+#ifdef CONFIG_FB_PXA_SMARTPANEL
+extern int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int);
+extern int pxafb_smart_flush(struct fb_info *info);
+#else
+static inline int pxafb_smart_queue(struct fb_info *info,
+ uint16_t *cmds, int n)
+{
+ return 0;
+}
+
+static inline int pxafb_smart_flush(struct fb_info *info)
+{
+ return 0;
+}
+#endif
diff --git a/include/linux/platform_data/video_s3c.h b/include/linux/platform_data/video_s3c.h
new file mode 100644
index 000000000..48883995f
--- /dev/null
+++ b/include/linux/platform_data/video_s3c.h
@@ -0,0 +1,54 @@
+#ifndef __PLATFORM_DATA_VIDEO_S3C
+#define __PLATFORM_DATA_VIDEO_S3C
+
+/* S3C_FB_MAX_WIN
+ * Set to the maximum number of windows that any of the supported hardware
+ * can use. Since the platform data uses this for an array size, having it
+ * set to the maximum of any version of the hardware can do is safe.
+ */
+#define S3C_FB_MAX_WIN (5)
+
+/**
+ * struct s3c_fb_pd_win - per window setup data
+ * @xres : The window X size.
+ * @yres : The window Y size.
+ * @virtual_x: The virtual X size.
+ * @virtual_y: The virtual Y size.
+ */
+struct s3c_fb_pd_win {
+ unsigned short default_bpp;
+ unsigned short max_bpp;
+ unsigned short xres;
+ unsigned short yres;
+ unsigned short virtual_x;
+ unsigned short virtual_y;
+};
+
+/**
+ * struct s3c_fb_platdata - S3C driver platform specific information
+ * @setup_gpio: Setup the external GPIO pins to the right state to transfer
+ * the data from the display system to the connected display
+ * device.
+ * @vidcon0: The base vidcon0 values to control the panel data format.
+ * @vidcon1: The base vidcon1 values to control the panel data output.
+ * @vtiming: Video timing when connected to a RGB type panel.
+ * @win: The setup data for each hardware window, or NULL for unused.
+ * @display_mode: The LCD output display mode.
+ *
+ * The platform data supplies the video driver with all the information
+ * it requires to work with the display(s) attached to the machine. It
+ * controls the initial mode, the number of display windows (0 is always
+ * the base framebuffer) that are initialised etc.
+ *
+ */
+struct s3c_fb_platdata {
+ void (*setup_gpio)(void);
+
+ struct s3c_fb_pd_win *win[S3C_FB_MAX_WIN];
+ struct fb_videomode *vtiming;
+
+ u32 vidcon0;
+ u32 vidcon1;
+};
+
+#endif
diff --git a/include/linux/platform_data/voltage-omap.h b/include/linux/platform_data/voltage-omap.h
new file mode 100644
index 000000000..5be4d5def
--- /dev/null
+++ b/include/linux/platform_data/voltage-omap.h
@@ -0,0 +1,39 @@
+/*
+ * OMAP Voltage Management Routines
+ *
+ * Copyright (C) 2011, Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_OMAP_VOLTAGE_H
+#define __ARCH_ARM_OMAP_VOLTAGE_H
+
+/**
+ * struct omap_volt_data - Omap voltage specific data.
+ * @voltage_nominal: The possible voltage value in uV
+ * @sr_efuse_offs: The offset of the efuse register(from system
+ * control module base address) from where to read
+ * the n-target value for the smartreflex module.
+ * @sr_errminlimit: Error min limit value for smartreflex. This value
+ * differs at differnet opp and thus is linked
+ * with voltage.
+ * @vp_errorgain: Error gain value for the voltage processor. This
+ * field also differs according to the voltage/opp.
+ */
+struct omap_volt_data {
+ u32 volt_nominal;
+ u32 sr_efuse_offs;
+ u8 sr_errminlimit;
+ u8 vp_errgain;
+};
+struct voltagedomain;
+
+struct voltagedomain *voltdm_lookup(const char *name);
+int voltdm_scale(struct voltagedomain *voltdm, unsigned long target_volt);
+unsigned long voltdm_get_voltage(struct voltagedomain *voltdm);
+struct omap_volt_data *omap_voltage_get_voltdata(struct voltagedomain *voltdm,
+ unsigned long volt);
+#endif
diff --git a/include/linux/platform_data/wiznet.h b/include/linux/platform_data/wiznet.h
new file mode 100644
index 000000000..b5d8c192d
--- /dev/null
+++ b/include/linux/platform_data/wiznet.h
@@ -0,0 +1,24 @@
+/*
+ * Ethernet driver for the WIZnet W5x00 chip.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef PLATFORM_DATA_WIZNET_H
+#define PLATFORM_DATA_WIZNET_H
+
+#include <linux/if_ether.h>
+
+struct wiznet_platform_data {
+ int link_gpio;
+ u8 mac_addr[ETH_ALEN];
+};
+
+#ifndef CONFIG_WIZNET_BUS_SHIFT
+#define CONFIG_WIZNET_BUS_SHIFT 0
+#endif
+
+#define W5100_BUS_DIRECT_SIZE (0x8000 << CONFIG_WIZNET_BUS_SHIFT)
+#define W5300_BUS_DIRECT_SIZE (0x0400 << CONFIG_WIZNET_BUS_SHIFT)
+
+#endif /* PLATFORM_DATA_WIZNET_H */
diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h
new file mode 100644
index 000000000..0472ab2f6
--- /dev/null
+++ b/include/linux/platform_data/zforce_ts.h
@@ -0,0 +1,26 @@
+/* drivers/input/touchscreen/zforce.c
+ *
+ * Copyright (C) 2012-2013 MundoReader S.L.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_INPUT_ZFORCE_TS_H
+#define _LINUX_INPUT_ZFORCE_TS_H
+
+struct zforce_ts_platdata {
+ int gpio_int;
+ int gpio_rst;
+
+ unsigned int x_max;
+ unsigned int y_max;
+};
+
+#endif /* _LINUX_INPUT_ZFORCE_TS_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
new file mode 100644
index 000000000..58f1e75ba
--- /dev/null
+++ b/include/linux/platform_device.h
@@ -0,0 +1,334 @@
+/*
+ * platform_device.h - generic, centralized driver model
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ *
+ * This file is released under the GPLv2
+ *
+ * See Documentation/driver-model/ for more information.
+ */
+
+#ifndef _PLATFORM_DEVICE_H_
+#define _PLATFORM_DEVICE_H_
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+#define PLATFORM_DEVID_NONE (-1)
+#define PLATFORM_DEVID_AUTO (-2)
+
+struct mfd_cell;
+
+struct platform_device {
+ const char *name;
+ int id;
+ bool id_auto;
+ struct device dev;
+ u32 num_resources;
+ struct resource *resource;
+
+ const struct platform_device_id *id_entry;
+ char *driver_override; /* Driver name to force a match */
+
+ /* MFD cell pointer */
+ struct mfd_cell *mfd_cell;
+
+ /* arch specific additions */
+ struct pdev_archdata archdata;
+};
+
+#define platform_get_device_id(pdev) ((pdev)->id_entry)
+
+#define to_platform_device(x) container_of((x), struct platform_device, dev)
+
+extern int platform_device_register(struct platform_device *);
+extern void platform_device_unregister(struct platform_device *);
+
+extern struct bus_type platform_bus_type;
+extern struct device platform_bus;
+
+extern void arch_setup_pdev_archdata(struct platform_device *);
+extern struct resource *platform_get_resource(struct platform_device *,
+ unsigned int, unsigned int);
+extern int platform_get_irq(struct platform_device *, unsigned int);
+extern struct resource *platform_get_resource_byname(struct platform_device *,
+ unsigned int,
+ const char *);
+extern int platform_get_irq_byname(struct platform_device *, const char *);
+extern int platform_add_devices(struct platform_device **, int);
+
+struct platform_device_info {
+ struct device *parent;
+ struct fwnode_handle *fwnode;
+
+ const char *name;
+ int id;
+
+ const struct resource *res;
+ unsigned int num_res;
+
+ const void *data;
+ size_t size_data;
+ u64 dma_mask;
+};
+extern struct platform_device *platform_device_register_full(
+ const struct platform_device_info *pdevinfo);
+
+/**
+ * platform_device_register_resndata - add a platform-level device with
+ * resources and platform-specific data
+ *
+ * @parent: parent device for the device we're adding
+ * @name: base name of the device we're adding
+ * @id: instance id
+ * @res: set of resources that needs to be allocated for the device
+ * @num: number of resources
+ * @data: platform specific data for this platform device
+ * @size: size of platform specific data
+ *
+ * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
+ */
+static inline struct platform_device *platform_device_register_resndata(
+ struct device *parent, const char *name, int id,
+ const struct resource *res, unsigned int num,
+ const void *data, size_t size) {
+
+ struct platform_device_info pdevinfo = {
+ .parent = parent,
+ .name = name,
+ .id = id,
+ .res = res,
+ .num_res = num,
+ .data = data,
+ .size_data = size,
+ .dma_mask = 0,
+ };
+
+ return platform_device_register_full(&pdevinfo);
+}
+
+/**
+ * platform_device_register_simple - add a platform-level device and its resources
+ * @name: base name of the device we're adding
+ * @id: instance id
+ * @res: set of resources that needs to be allocated for the device
+ * @num: number of resources
+ *
+ * This function creates a simple platform device that requires minimal
+ * resource and memory management. Canned release function freeing memory
+ * allocated for the device allows drivers using such devices to be
+ * unloaded without waiting for the last reference to the device to be
+ * dropped.
+ *
+ * This interface is primarily intended for use with legacy drivers which
+ * probe hardware directly. Because such drivers create sysfs device nodes
+ * themselves, rather than letting system infrastructure handle such device
+ * enumeration tasks, they don't fully conform to the Linux driver model.
+ * In particular, when such drivers are built as modules, they can't be
+ * "hotplugged".
+ *
+ * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
+ */
+static inline struct platform_device *platform_device_register_simple(
+ const char *name, int id,
+ const struct resource *res, unsigned int num)
+{
+ return platform_device_register_resndata(NULL, name, id,
+ res, num, NULL, 0);
+}
+
+/**
+ * platform_device_register_data - add a platform-level device with platform-specific data
+ * @parent: parent device for the device we're adding
+ * @name: base name of the device we're adding
+ * @id: instance id
+ * @data: platform specific data for this platform device
+ * @size: size of platform specific data
+ *
+ * This function creates a simple platform device that requires minimal
+ * resource and memory management. Canned release function freeing memory
+ * allocated for the device allows drivers using such devices to be
+ * unloaded without waiting for the last reference to the device to be
+ * dropped.
+ *
+ * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
+ */
+static inline struct platform_device *platform_device_register_data(
+ struct device *parent, const char *name, int id,
+ const void *data, size_t size)
+{
+ return platform_device_register_resndata(parent, name, id,
+ NULL, 0, data, size);
+}
+
+extern struct platform_device *platform_device_alloc(const char *name, int id);
+extern int platform_device_add_resources(struct platform_device *pdev,
+ const struct resource *res,
+ unsigned int num);
+extern int platform_device_add_data(struct platform_device *pdev,
+ const void *data, size_t size);
+extern int platform_device_add(struct platform_device *pdev);
+extern void platform_device_del(struct platform_device *pdev);
+extern void platform_device_put(struct platform_device *pdev);
+
+struct platform_driver {
+ int (*probe)(struct platform_device *);
+ int (*remove)(struct platform_device *);
+ void (*shutdown)(struct platform_device *);
+ int (*suspend)(struct platform_device *, pm_message_t state);
+ int (*resume)(struct platform_device *);
+ struct device_driver driver;
+ const struct platform_device_id *id_table;
+ bool prevent_deferred_probe;
+};
+
+#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \
+ driver))
+
+/*
+ * use a macro to avoid include chaining to get THIS_MODULE
+ */
+#define platform_driver_register(drv) \
+ __platform_driver_register(drv, THIS_MODULE)
+extern int __platform_driver_register(struct platform_driver *,
+ struct module *);
+extern void platform_driver_unregister(struct platform_driver *);
+
+/* non-hotpluggable platform devices may use this so that probe() and
+ * its support may live in __init sections, conserving runtime memory.
+ */
+#define platform_driver_probe(drv, probe) \
+ __platform_driver_probe(drv, probe, THIS_MODULE)
+extern int __platform_driver_probe(struct platform_driver *driver,
+ int (*probe)(struct platform_device *), struct module *module);
+
+static inline void *platform_get_drvdata(const struct platform_device *pdev)
+{
+ return dev_get_drvdata(&pdev->dev);
+}
+
+static inline void platform_set_drvdata(struct platform_device *pdev,
+ void *data)
+{
+ dev_set_drvdata(&pdev->dev, data);
+}
+
+/* module_platform_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_platform_driver(__platform_driver) \
+ module_driver(__platform_driver, platform_driver_register, \
+ platform_driver_unregister)
+
+/* module_platform_driver_probe() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_platform_driver_probe(__platform_driver, __platform_probe) \
+static int __init __platform_driver##_init(void) \
+{ \
+ return platform_driver_probe(&(__platform_driver), \
+ __platform_probe); \
+} \
+module_init(__platform_driver##_init); \
+static void __exit __platform_driver##_exit(void) \
+{ \
+ platform_driver_unregister(&(__platform_driver)); \
+} \
+module_exit(__platform_driver##_exit);
+
+#define platform_create_bundle(driver, probe, res, n_res, data, size) \
+ __platform_create_bundle(driver, probe, res, n_res, data, size, THIS_MODULE)
+extern struct platform_device *__platform_create_bundle(
+ struct platform_driver *driver, int (*probe)(struct platform_device *),
+ struct resource *res, unsigned int n_res,
+ const void *data, size_t size, struct module *module);
+
+/* early platform driver interface */
+struct early_platform_driver {
+ const char *class_str;
+ struct platform_driver *pdrv;
+ struct list_head list;
+ int requested_id;
+ char *buffer;
+ int bufsize;
+};
+
+#define EARLY_PLATFORM_ID_UNSET -2
+#define EARLY_PLATFORM_ID_ERROR -3
+
+extern int early_platform_driver_register(struct early_platform_driver *epdrv,
+ char *buf);
+extern void early_platform_add_devices(struct platform_device **devs, int num);
+
+static inline int is_early_platform_device(struct platform_device *pdev)
+{
+ return !pdev->dev.driver;
+}
+
+extern void early_platform_driver_register_all(char *class_str);
+extern int early_platform_driver_probe(char *class_str,
+ int nr_probe, int user_only);
+extern void early_platform_cleanup(void);
+
+#define early_platform_init(class_string, platdrv) \
+ early_platform_init_buffer(class_string, platdrv, NULL, 0)
+
+#ifndef MODULE
+#define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \
+static __initdata struct early_platform_driver early_driver = { \
+ .class_str = class_string, \
+ .buffer = buf, \
+ .bufsize = bufsiz, \
+ .pdrv = platdrv, \
+ .requested_id = EARLY_PLATFORM_ID_UNSET, \
+}; \
+static int __init early_platform_driver_setup_func(char *buffer) \
+{ \
+ return early_platform_driver_register(&early_driver, buffer); \
+} \
+early_param(class_string, early_platform_driver_setup_func)
+#else /* MODULE */
+#define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \
+static inline char *early_platform_driver_setup_func(void) \
+{ \
+ return bufsiz ? buf : NULL; \
+}
+#endif /* MODULE */
+
+#ifdef CONFIG_SUSPEND
+extern int platform_pm_suspend(struct device *dev);
+extern int platform_pm_resume(struct device *dev);
+#else
+#define platform_pm_suspend NULL
+#define platform_pm_resume NULL
+#endif
+
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+extern int platform_pm_freeze(struct device *dev);
+extern int platform_pm_thaw(struct device *dev);
+extern int platform_pm_poweroff(struct device *dev);
+extern int platform_pm_restore(struct device *dev);
+#else
+#define platform_pm_freeze NULL
+#define platform_pm_thaw NULL
+#define platform_pm_poweroff NULL
+#define platform_pm_restore NULL
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+#define USE_PLATFORM_PM_SLEEP_OPS \
+ .suspend = platform_pm_suspend, \
+ .resume = platform_pm_resume, \
+ .freeze = platform_pm_freeze, \
+ .thaw = platform_pm_thaw, \
+ .poweroff = platform_pm_poweroff, \
+ .restore = platform_pm_restore,
+#else
+#define USE_PLATFORM_PM_SLEEP_OPS
+#endif
+
+#endif /* _PLATFORM_DEVICE_H_ */
diff --git a/include/linux/plist.h b/include/linux/plist.h
new file mode 100644
index 000000000..97883604a
--- /dev/null
+++ b/include/linux/plist.h
@@ -0,0 +1,300 @@
+/*
+ * Descending-priority-sorted double-linked list
+ *
+ * (C) 2002-2003 Intel Corp
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>.
+ *
+ * 2001-2005 (c) MontaVista Software, Inc.
+ * Daniel Walker <dwalker@mvista.com>
+ *
+ * (C) 2005 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Simplifications of the original code by
+ * Oleg Nesterov <oleg@tv-sign.ru>
+ *
+ * Licensed under the FSF's GNU Public License v2 or later.
+ *
+ * Based on simple lists (include/linux/list.h).
+ *
+ * This is a priority-sorted list of nodes; each node has a
+ * priority from INT_MIN (highest) to INT_MAX (lowest).
+ *
+ * Addition is O(K), removal is O(1), change of priority of a node is
+ * O(K) and K is the number of RT priority levels used in the system.
+ * (1 <= K <= 99)
+ *
+ * This list is really a list of lists:
+ *
+ * - The tier 1 list is the prio_list, different priority nodes.
+ *
+ * - The tier 2 list is the node_list, serialized nodes.
+ *
+ * Simple ASCII art explanation:
+ *
+ * pl:prio_list (only for plist_node)
+ * nl:node_list
+ * HEAD| NODE(S)
+ * |
+ * ||------------------------------------|
+ * ||->|pl|<->|pl|<--------------->|pl|<-|
+ * | |10| |21| |21| |21| |40| (prio)
+ * | | | | | | | | | | |
+ * | | | | | | | | | | |
+ * |->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-|
+ * |-------------------------------------------|
+ *
+ * The nodes on the prio_list list are sorted by priority to simplify
+ * the insertion of new nodes. There are no nodes with duplicate
+ * priorites on the list.
+ *
+ * The nodes on the node_list are ordered by priority and can contain
+ * entries which have the same priority. Those entries are ordered
+ * FIFO
+ *
+ * Addition means: look for the prio_list node in the prio_list
+ * for the priority of the node and insert it before the node_list
+ * entry of the next prio_list node. If it is the first node of
+ * that priority, add it to the prio_list in the right position and
+ * insert it into the serialized node_list list
+ *
+ * Removal means remove it from the node_list and remove it from
+ * the prio_list if the node_list list_head is non empty. In case
+ * of removal from the prio_list it must be checked whether other
+ * entries of the same priority are on the list or not. If there
+ * is another entry of the same priority then this entry has to
+ * replace the removed entry on the prio_list. If the entry which
+ * is removed is the only entry of this priority then a simple
+ * remove from both list is sufficient.
+ *
+ * INT_MIN is the highest priority, 0 is the medium highest, INT_MAX
+ * is lowest priority.
+ *
+ * No locking is done, up to the caller.
+ *
+ */
+#ifndef _LINUX_PLIST_H_
+#define _LINUX_PLIST_H_
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+struct plist_head {
+ struct list_head node_list;
+};
+
+struct plist_node {
+ int prio;
+ struct list_head prio_list;
+ struct list_head node_list;
+};
+
+/**
+ * PLIST_HEAD_INIT - static struct plist_head initializer
+ * @head: struct plist_head variable name
+ */
+#define PLIST_HEAD_INIT(head) \
+{ \
+ .node_list = LIST_HEAD_INIT((head).node_list) \
+}
+
+/**
+ * PLIST_HEAD - declare and init plist_head
+ * @head: name for struct plist_head variable
+ */
+#define PLIST_HEAD(head) \
+ struct plist_head head = PLIST_HEAD_INIT(head)
+
+/**
+ * PLIST_NODE_INIT - static struct plist_node initializer
+ * @node: struct plist_node variable name
+ * @__prio: initial node priority
+ */
+#define PLIST_NODE_INIT(node, __prio) \
+{ \
+ .prio = (__prio), \
+ .prio_list = LIST_HEAD_INIT((node).prio_list), \
+ .node_list = LIST_HEAD_INIT((node).node_list), \
+}
+
+/**
+ * plist_head_init - dynamic struct plist_head initializer
+ * @head: &struct plist_head pointer
+ */
+static inline void
+plist_head_init(struct plist_head *head)
+{
+ INIT_LIST_HEAD(&head->node_list);
+}
+
+/**
+ * plist_node_init - Dynamic struct plist_node initializer
+ * @node: &struct plist_node pointer
+ * @prio: initial node priority
+ */
+static inline void plist_node_init(struct plist_node *node, int prio)
+{
+ node->prio = prio;
+ INIT_LIST_HEAD(&node->prio_list);
+ INIT_LIST_HEAD(&node->node_list);
+}
+
+extern void plist_add(struct plist_node *node, struct plist_head *head);
+extern void plist_del(struct plist_node *node, struct plist_head *head);
+
+extern void plist_requeue(struct plist_node *node, struct plist_head *head);
+
+/**
+ * plist_for_each - iterate over the plist
+ * @pos: the type * to use as a loop counter
+ * @head: the head for your list
+ */
+#define plist_for_each(pos, head) \
+ list_for_each_entry(pos, &(head)->node_list, node_list)
+
+/**
+ * plist_for_each_continue - continue iteration over the plist
+ * @pos: the type * to use as a loop cursor
+ * @head: the head for your list
+ *
+ * Continue to iterate over plist, continuing after the current position.
+ */
+#define plist_for_each_continue(pos, head) \
+ list_for_each_entry_continue(pos, &(head)->node_list, node_list)
+
+/**
+ * plist_for_each_safe - iterate safely over a plist of given type
+ * @pos: the type * to use as a loop counter
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list
+ *
+ * Iterate over a plist of given type, safe against removal of list entry.
+ */
+#define plist_for_each_safe(pos, n, head) \
+ list_for_each_entry_safe(pos, n, &(head)->node_list, node_list)
+
+/**
+ * plist_for_each_entry - iterate over list of given type
+ * @pos: the type * to use as a loop counter
+ * @head: the head for your list
+ * @mem: the name of the list_head within the struct
+ */
+#define plist_for_each_entry(pos, head, mem) \
+ list_for_each_entry(pos, &(head)->node_list, mem.node_list)
+
+/**
+ * plist_for_each_entry_continue - continue iteration over list of given type
+ * @pos: the type * to use as a loop cursor
+ * @head: the head for your list
+ * @m: the name of the list_head within the struct
+ *
+ * Continue to iterate over list of given type, continuing after
+ * the current position.
+ */
+#define plist_for_each_entry_continue(pos, head, m) \
+ list_for_each_entry_continue(pos, &(head)->node_list, m.node_list)
+
+/**
+ * plist_for_each_entry_safe - iterate safely over list of given type
+ * @pos: the type * to use as a loop counter
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list
+ * @m: the name of the list_head within the struct
+ *
+ * Iterate over list of given type, safe against removal of list entry.
+ */
+#define plist_for_each_entry_safe(pos, n, head, m) \
+ list_for_each_entry_safe(pos, n, &(head)->node_list, m.node_list)
+
+/**
+ * plist_head_empty - return !0 if a plist_head is empty
+ * @head: &struct plist_head pointer
+ */
+static inline int plist_head_empty(const struct plist_head *head)
+{
+ return list_empty(&head->node_list);
+}
+
+/**
+ * plist_node_empty - return !0 if plist_node is not on a list
+ * @node: &struct plist_node pointer
+ */
+static inline int plist_node_empty(const struct plist_node *node)
+{
+ return list_empty(&node->node_list);
+}
+
+/* All functions below assume the plist_head is not empty. */
+
+/**
+ * plist_first_entry - get the struct for the first entry
+ * @head: the &struct plist_head pointer
+ * @type: the type of the struct this is embedded in
+ * @member: the name of the list_head within the struct
+ */
+#ifdef CONFIG_DEBUG_PI_LIST
+# define plist_first_entry(head, type, member) \
+({ \
+ WARN_ON(plist_head_empty(head)); \
+ container_of(plist_first(head), type, member); \
+})
+#else
+# define plist_first_entry(head, type, member) \
+ container_of(plist_first(head), type, member)
+#endif
+
+/**
+ * plist_last_entry - get the struct for the last entry
+ * @head: the &struct plist_head pointer
+ * @type: the type of the struct this is embedded in
+ * @member: the name of the list_head within the struct
+ */
+#ifdef CONFIG_DEBUG_PI_LIST
+# define plist_last_entry(head, type, member) \
+({ \
+ WARN_ON(plist_head_empty(head)); \
+ container_of(plist_last(head), type, member); \
+})
+#else
+# define plist_last_entry(head, type, member) \
+ container_of(plist_last(head), type, member)
+#endif
+
+/**
+ * plist_next - get the next entry in list
+ * @pos: the type * to cursor
+ */
+#define plist_next(pos) \
+ list_next_entry(pos, node_list)
+
+/**
+ * plist_prev - get the prev entry in list
+ * @pos: the type * to cursor
+ */
+#define plist_prev(pos) \
+ list_prev_entry(pos, node_list)
+
+/**
+ * plist_first - return the first node (and thus, highest priority)
+ * @head: the &struct plist_head pointer
+ *
+ * Assumes the plist is _not_ empty.
+ */
+static inline struct plist_node *plist_first(const struct plist_head *head)
+{
+ return list_entry(head->node_list.next,
+ struct plist_node, node_list);
+}
+
+/**
+ * plist_last - return the last node (and thus, lowest priority)
+ * @head: the &struct plist_head pointer
+ *
+ * Assumes the plist is _not_ empty.
+ */
+static inline struct plist_node *plist_last(const struct plist_head *head)
+{
+ return list_entry(head->node_list.prev,
+ struct plist_node, node_list);
+}
+
+#endif
diff --git a/include/linux/pm-trace.h b/include/linux/pm-trace.h
new file mode 100644
index 000000000..ecbde7a55
--- /dev/null
+++ b/include/linux/pm-trace.h
@@ -0,0 +1,35 @@
+#ifndef PM_TRACE_H
+#define PM_TRACE_H
+
+#ifdef CONFIG_PM_TRACE
+#include <asm/pm-trace.h>
+#include <linux/types.h>
+
+extern int pm_trace_enabled;
+
+static inline int pm_trace_is_enabled(void)
+{
+ return pm_trace_enabled;
+}
+
+struct device;
+extern void set_trace_device(struct device *);
+extern void generate_pm_trace(const void *tracedata, unsigned int user);
+extern int show_trace_dev_match(char *buf, size_t size);
+
+#define TRACE_DEVICE(dev) do { \
+ if (pm_trace_enabled) \
+ set_trace_device(dev); \
+ } while(0)
+
+#else
+
+static inline int pm_trace_is_enabled(void) { return 0; }
+
+#define TRACE_DEVICE(dev) do { } while (0)
+#define TRACE_RESUME(dev) do { } while (0)
+#define TRACE_SUSPEND(dev) do { } while (0)
+
+#endif
+
+#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
new file mode 100644
index 000000000..2d29c64f8
--- /dev/null
+++ b/include/linux/pm.h
@@ -0,0 +1,773 @@
+/*
+ * pm.h - Power management interface
+ *
+ * Copyright (C) 2000 Andrew Henroid
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_PM_H
+#define _LINUX_PM_H
+
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/timer.h>
+#include <linux/completion.h>
+
+/*
+ * Callbacks for platform drivers to implement.
+ */
+extern void (*pm_power_off)(void);
+extern void (*pm_power_off_prepare)(void);
+
+struct device; /* we have a circular dep with device.h */
+#ifdef CONFIG_VT_CONSOLE_SLEEP
+extern void pm_vt_switch_required(struct device *dev, bool required);
+extern void pm_vt_switch_unregister(struct device *dev);
+#else
+static inline void pm_vt_switch_required(struct device *dev, bool required)
+{
+}
+static inline void pm_vt_switch_unregister(struct device *dev)
+{
+}
+#endif /* CONFIG_VT_CONSOLE_SLEEP */
+
+/*
+ * Device power management
+ */
+
+struct device;
+
+#ifdef CONFIG_PM
+extern const char power_group_name[]; /* = "power" */
+#else
+#define power_group_name NULL
+#endif
+
+typedef struct pm_message {
+ int event;
+} pm_message_t;
+
+/**
+ * struct dev_pm_ops - device PM callbacks
+ *
+ * Several device power state transitions are externally visible, affecting
+ * the state of pending I/O queues and (for drivers that touch hardware)
+ * interrupts, wakeups, DMA, and other hardware state. There may also be
+ * internal transitions to various low-power modes which are transparent
+ * to the rest of the driver stack (such as a driver that's ON gating off
+ * clocks which are not in active use).
+ *
+ * The externally visible transitions are handled with the help of callbacks
+ * included in this structure in such a way that two levels of callbacks are
+ * involved. First, the PM core executes callbacks provided by PM domains,
+ * device types, classes and bus types. They are the subsystem-level callbacks
+ * supposed to execute callbacks provided by device drivers, although they may
+ * choose not to do that. If the driver callbacks are executed, they have to
+ * collaborate with the subsystem-level callbacks to achieve the goals
+ * appropriate for the given system transition, given transition phase and the
+ * subsystem the device belongs to.
+ *
+ * @prepare: The principal role of this callback is to prevent new children of
+ * the device from being registered after it has returned (the driver's
+ * subsystem and generally the rest of the kernel is supposed to prevent
+ * new calls to the probe method from being made too once @prepare() has
+ * succeeded). If @prepare() detects a situation it cannot handle (e.g.
+ * registration of a child already in progress), it may return -EAGAIN, so
+ * that the PM core can execute it once again (e.g. after a new child has
+ * been registered) to recover from the race condition.
+ * This method is executed for all kinds of suspend transitions and is
+ * followed by one of the suspend callbacks: @suspend(), @freeze(), or
+ * @poweroff(). If the transition is a suspend to memory or standby (that
+ * is, not related to hibernation), the return value of @prepare() may be
+ * used to indicate to the PM core to leave the device in runtime suspend
+ * if applicable. Namely, if @prepare() returns a positive number, the PM
+ * core will understand that as a declaration that the device appears to be
+ * runtime-suspended and it may be left in that state during the entire
+ * transition and during the subsequent resume if all of its descendants
+ * are left in runtime suspend too. If that happens, @complete() will be
+ * executed directly after @prepare() and it must ensure the proper
+ * functioning of the device after the system resume.
+ * The PM core executes subsystem-level @prepare() for all devices before
+ * starting to invoke suspend callbacks for any of them, so generally
+ * devices may be assumed to be functional or to respond to runtime resume
+ * requests while @prepare() is being executed. However, device drivers
+ * may NOT assume anything about the availability of user space at that
+ * time and it is NOT valid to request firmware from within @prepare()
+ * (it's too late to do that). It also is NOT valid to allocate
+ * substantial amounts of memory from @prepare() in the GFP_KERNEL mode.
+ * [To work around these limitations, drivers may register suspend and
+ * hibernation notifiers to be executed before the freezing of tasks.]
+ *
+ * @complete: Undo the changes made by @prepare(). This method is executed for
+ * all kinds of resume transitions, following one of the resume callbacks:
+ * @resume(), @thaw(), @restore(). Also called if the state transition
+ * fails before the driver's suspend callback: @suspend(), @freeze() or
+ * @poweroff(), can be executed (e.g. if the suspend callback fails for one
+ * of the other devices that the PM core has unsuccessfully attempted to
+ * suspend earlier).
+ * The PM core executes subsystem-level @complete() after it has executed
+ * the appropriate resume callbacks for all devices. If the corresponding
+ * @prepare() at the beginning of the suspend transition returned a
+ * positive number and the device was left in runtime suspend (without
+ * executing any suspend and resume callbacks for it), @complete() will be
+ * the only callback executed for the device during resume. In that case,
+ * @complete() must be prepared to do whatever is necessary to ensure the
+ * proper functioning of the device after the system resume. To this end,
+ * @complete() can check the power.direct_complete flag of the device to
+ * learn whether (unset) or not (set) the previous suspend and resume
+ * callbacks have been executed for it.
+ *
+ * @suspend: Executed before putting the system into a sleep state in which the
+ * contents of main memory are preserved. The exact action to perform
+ * depends on the device's subsystem (PM domain, device type, class or bus
+ * type), but generally the device must be quiescent after subsystem-level
+ * @suspend() has returned, so that it doesn't do any I/O or DMA.
+ * Subsystem-level @suspend() is executed for all devices after invoking
+ * subsystem-level @prepare() for all of them.
+ *
+ * @suspend_late: Continue operations started by @suspend(). For a number of
+ * devices @suspend_late() may point to the same callback routine as the
+ * runtime suspend callback.
+ *
+ * @resume: Executed after waking the system up from a sleep state in which the
+ * contents of main memory were preserved. The exact action to perform
+ * depends on the device's subsystem, but generally the driver is expected
+ * to start working again, responding to hardware events and software
+ * requests (the device itself may be left in a low-power state, waiting
+ * for a runtime resume to occur). The state of the device at the time its
+ * driver's @resume() callback is run depends on the platform and subsystem
+ * the device belongs to. On most platforms, there are no restrictions on
+ * availability of resources like clocks during @resume().
+ * Subsystem-level @resume() is executed for all devices after invoking
+ * subsystem-level @resume_noirq() for all of them.
+ *
+ * @resume_early: Prepare to execute @resume(). For a number of devices
+ * @resume_early() may point to the same callback routine as the runtime
+ * resume callback.
+ *
+ * @freeze: Hibernation-specific, executed before creating a hibernation image.
+ * Analogous to @suspend(), but it should not enable the device to signal
+ * wakeup events or change its power state. The majority of subsystems
+ * (with the notable exception of the PCI bus type) expect the driver-level
+ * @freeze() to save the device settings in memory to be used by @restore()
+ * during the subsequent resume from hibernation.
+ * Subsystem-level @freeze() is executed for all devices after invoking
+ * subsystem-level @prepare() for all of them.
+ *
+ * @freeze_late: Continue operations started by @freeze(). Analogous to
+ * @suspend_late(), but it should not enable the device to signal wakeup
+ * events or change its power state.
+ *
+ * @thaw: Hibernation-specific, executed after creating a hibernation image OR
+ * if the creation of an image has failed. Also executed after a failing
+ * attempt to restore the contents of main memory from such an image.
+ * Undo the changes made by the preceding @freeze(), so the device can be
+ * operated in the same way as immediately before the call to @freeze().
+ * Subsystem-level @thaw() is executed for all devices after invoking
+ * subsystem-level @thaw_noirq() for all of them. It also may be executed
+ * directly after @freeze() in case of a transition error.
+ *
+ * @thaw_early: Prepare to execute @thaw(). Undo the changes made by the
+ * preceding @freeze_late().
+ *
+ * @poweroff: Hibernation-specific, executed after saving a hibernation image.
+ * Analogous to @suspend(), but it need not save the device's settings in
+ * memory.
+ * Subsystem-level @poweroff() is executed for all devices after invoking
+ * subsystem-level @prepare() for all of them.
+ *
+ * @poweroff_late: Continue operations started by @poweroff(). Analogous to
+ * @suspend_late(), but it need not save the device's settings in memory.
+ *
+ * @restore: Hibernation-specific, executed after restoring the contents of main
+ * memory from a hibernation image, analogous to @resume().
+ *
+ * @restore_early: Prepare to execute @restore(), analogous to @resume_early().
+ *
+ * @suspend_noirq: Complete the actions started by @suspend(). Carry out any
+ * additional operations required for suspending the device that might be
+ * racing with its driver's interrupt handler, which is guaranteed not to
+ * run while @suspend_noirq() is being executed.
+ * It generally is expected that the device will be in a low-power state
+ * (appropriate for the target system sleep state) after subsystem-level
+ * @suspend_noirq() has returned successfully. If the device can generate
+ * system wakeup signals and is enabled to wake up the system, it should be
+ * configured to do so at that time. However, depending on the platform
+ * and device's subsystem, @suspend() or @suspend_late() may be allowed to
+ * put the device into the low-power state and configure it to generate
+ * wakeup signals, in which case it generally is not necessary to define
+ * @suspend_noirq().
+ *
+ * @resume_noirq: Prepare for the execution of @resume() by carrying out any
+ * operations required for resuming the device that might be racing with
+ * its driver's interrupt handler, which is guaranteed not to run while
+ * @resume_noirq() is being executed.
+ *
+ * @freeze_noirq: Complete the actions started by @freeze(). Carry out any
+ * additional operations required for freezing the device that might be
+ * racing with its driver's interrupt handler, which is guaranteed not to
+ * run while @freeze_noirq() is being executed.
+ * The power state of the device should not be changed by either @freeze(),
+ * or @freeze_late(), or @freeze_noirq() and it should not be configured to
+ * signal system wakeup by any of these callbacks.
+ *
+ * @thaw_noirq: Prepare for the execution of @thaw() by carrying out any
+ * operations required for thawing the device that might be racing with its
+ * driver's interrupt handler, which is guaranteed not to run while
+ * @thaw_noirq() is being executed.
+ *
+ * @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to
+ * @suspend_noirq(), but it need not save the device's settings in memory.
+ *
+ * @restore_noirq: Prepare for the execution of @restore() by carrying out any
+ * operations required for thawing the device that might be racing with its
+ * driver's interrupt handler, which is guaranteed not to run while
+ * @restore_noirq() is being executed. Analogous to @resume_noirq().
+ *
+ * All of the above callbacks, except for @complete(), return error codes.
+ * However, the error codes returned by the resume operations, @resume(),
+ * @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do
+ * not cause the PM core to abort the resume transition during which they are
+ * returned. The error codes returned in those cases are only printed by the PM
+ * core to the system logs for debugging purposes. Still, it is recommended
+ * that drivers only return error codes from their resume methods in case of an
+ * unrecoverable failure (i.e. when the device being handled refuses to resume
+ * and becomes unusable) to allow us to modify the PM core in the future, so
+ * that it can avoid attempting to handle devices that failed to resume and
+ * their children.
+ *
+ * It is allowed to unregister devices while the above callbacks are being
+ * executed. However, a callback routine must NOT try to unregister the device
+ * it was called for, although it may unregister children of that device (for
+ * example, if it detects that a child was unplugged while the system was
+ * asleep).
+ *
+ * Refer to Documentation/power/devices.txt for more information about the role
+ * of the above callbacks in the system suspend process.
+ *
+ * There also are callbacks related to runtime power management of devices.
+ * Again, these callbacks are executed by the PM core only for subsystems
+ * (PM domains, device types, classes and bus types) and the subsystem-level
+ * callbacks are supposed to invoke the driver callbacks. Moreover, the exact
+ * actions to be performed by a device driver's callbacks generally depend on
+ * the platform and subsystem the device belongs to.
+ *
+ * @runtime_suspend: Prepare the device for a condition in which it won't be
+ * able to communicate with the CPU(s) and RAM due to power management.
+ * This need not mean that the device should be put into a low-power state.
+ * For example, if the device is behind a link which is about to be turned
+ * off, the device may remain at full power. If the device does go to low
+ * power and is capable of generating runtime wakeup events, remote wakeup
+ * (i.e., a hardware mechanism allowing the device to request a change of
+ * its power state via an interrupt) should be enabled for it.
+ *
+ * @runtime_resume: Put the device into the fully active state in response to a
+ * wakeup event generated by hardware or at the request of software. If
+ * necessary, put the device into the full-power state and restore its
+ * registers, so that it is fully operational.
+ *
+ * @runtime_idle: Device appears to be inactive and it might be put into a
+ * low-power state if all of the necessary conditions are satisfied.
+ * Check these conditions, and return 0 if it's appropriate to let the PM
+ * core queue a suspend request for the device.
+ *
+ * Refer to Documentation/power/runtime_pm.txt for more information about the
+ * role of the above callbacks in device runtime power management.
+ *
+ */
+
+struct dev_pm_ops {
+ int (*prepare)(struct device *dev);
+ void (*complete)(struct device *dev);
+ int (*suspend)(struct device *dev);
+ int (*resume)(struct device *dev);
+ int (*freeze)(struct device *dev);
+ int (*thaw)(struct device *dev);
+ int (*poweroff)(struct device *dev);
+ int (*restore)(struct device *dev);
+ int (*suspend_late)(struct device *dev);
+ int (*resume_early)(struct device *dev);
+ int (*freeze_late)(struct device *dev);
+ int (*thaw_early)(struct device *dev);
+ int (*poweroff_late)(struct device *dev);
+ int (*restore_early)(struct device *dev);
+ int (*suspend_noirq)(struct device *dev);
+ int (*resume_noirq)(struct device *dev);
+ int (*freeze_noirq)(struct device *dev);
+ int (*thaw_noirq)(struct device *dev);
+ int (*poweroff_noirq)(struct device *dev);
+ int (*restore_noirq)(struct device *dev);
+ int (*runtime_suspend)(struct device *dev);
+ int (*runtime_resume)(struct device *dev);
+ int (*runtime_idle)(struct device *dev);
+};
+
+#ifdef CONFIG_PM_SLEEP
+#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ .suspend = suspend_fn, \
+ .resume = resume_fn, \
+ .freeze = suspend_fn, \
+ .thaw = resume_fn, \
+ .poweroff = suspend_fn, \
+ .restore = resume_fn,
+#else
+#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ .suspend_late = suspend_fn, \
+ .resume_early = resume_fn, \
+ .freeze_late = suspend_fn, \
+ .thaw_early = resume_fn, \
+ .poweroff_late = suspend_fn, \
+ .restore_early = resume_fn,
+#else
+#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
+#endif
+
+#ifdef CONFIG_PM
+#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+ .runtime_suspend = suspend_fn, \
+ .runtime_resume = resume_fn, \
+ .runtime_idle = idle_fn,
+#else
+#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
+#endif
+
+/*
+ * Use this if you want to use the same suspend and resume callbacks for suspend
+ * to RAM and hibernation.
+ */
+#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+const struct dev_pm_ops name = { \
+ SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+}
+
+/*
+ * Use this for defining a set of PM operations to be used in all situations
+ * (system suspend, hibernation or runtime PM).
+ * NOTE: In general, system suspend callbacks, .suspend() and .resume(), should
+ * be different from the corresponding runtime PM callbacks, .runtime_suspend(),
+ * and .runtime_resume(), because .runtime_suspend() always works on an already
+ * quiescent device, while .suspend() should assume that the device may be doing
+ * something when it is called (it should ensure that the device will be
+ * quiescent after it has returned). Therefore it's better to point the "late"
+ * suspend and "early" resume callback pointers, .suspend_late() and
+ * .resume_early(), to the same routines as .runtime_suspend() and
+ * .runtime_resume(), respectively (and analogously for hibernation).
+ */
+#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
+const struct dev_pm_ops name = { \
+ SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
+}
+
+/**
+ * PM_EVENT_ messages
+ *
+ * The following PM_EVENT_ messages are defined for the internal use of the PM
+ * core, in order to provide a mechanism allowing the high level suspend and
+ * hibernation code to convey the necessary information to the device PM core
+ * code:
+ *
+ * ON No transition.
+ *
+ * FREEZE System is going to hibernate, call ->prepare() and ->freeze()
+ * for all devices.
+ *
+ * SUSPEND System is going to suspend, call ->prepare() and ->suspend()
+ * for all devices.
+ *
+ * HIBERNATE Hibernation image has been saved, call ->prepare() and
+ * ->poweroff() for all devices.
+ *
+ * QUIESCE Contents of main memory are going to be restored from a (loaded)
+ * hibernation image, call ->prepare() and ->freeze() for all
+ * devices.
+ *
+ * RESUME System is resuming, call ->resume() and ->complete() for all
+ * devices.
+ *
+ * THAW Hibernation image has been created, call ->thaw() and
+ * ->complete() for all devices.
+ *
+ * RESTORE Contents of main memory have been restored from a hibernation
+ * image, call ->restore() and ->complete() for all devices.
+ *
+ * RECOVER Creation of a hibernation image or restoration of the main
+ * memory contents from a hibernation image has failed, call
+ * ->thaw() and ->complete() for all devices.
+ *
+ * The following PM_EVENT_ messages are defined for internal use by
+ * kernel subsystems. They are never issued by the PM core.
+ *
+ * USER_SUSPEND Manual selective suspend was issued by userspace.
+ *
+ * USER_RESUME Manual selective resume was issued by userspace.
+ *
+ * REMOTE_WAKEUP Remote-wakeup request was received from the device.
+ *
+ * AUTO_SUSPEND Automatic (device idle) runtime suspend was
+ * initiated by the subsystem.
+ *
+ * AUTO_RESUME Automatic (device needed) runtime resume was
+ * requested by a driver.
+ */
+
+#define PM_EVENT_INVALID (-1)
+#define PM_EVENT_ON 0x0000
+#define PM_EVENT_FREEZE 0x0001
+#define PM_EVENT_SUSPEND 0x0002
+#define PM_EVENT_HIBERNATE 0x0004
+#define PM_EVENT_QUIESCE 0x0008
+#define PM_EVENT_RESUME 0x0010
+#define PM_EVENT_THAW 0x0020
+#define PM_EVENT_RESTORE 0x0040
+#define PM_EVENT_RECOVER 0x0080
+#define PM_EVENT_USER 0x0100
+#define PM_EVENT_REMOTE 0x0200
+#define PM_EVENT_AUTO 0x0400
+
+#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
+#define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND)
+#define PM_EVENT_USER_RESUME (PM_EVENT_USER | PM_EVENT_RESUME)
+#define PM_EVENT_REMOTE_RESUME (PM_EVENT_REMOTE | PM_EVENT_RESUME)
+#define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND)
+#define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME)
+
+#define PMSG_INVALID ((struct pm_message){ .event = PM_EVENT_INVALID, })
+#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
+#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
+#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, })
+#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
+#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, })
+#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, })
+#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, })
+#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, })
+#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, })
+#define PMSG_USER_SUSPEND ((struct pm_message) \
+ { .event = PM_EVENT_USER_SUSPEND, })
+#define PMSG_USER_RESUME ((struct pm_message) \
+ { .event = PM_EVENT_USER_RESUME, })
+#define PMSG_REMOTE_RESUME ((struct pm_message) \
+ { .event = PM_EVENT_REMOTE_RESUME, })
+#define PMSG_AUTO_SUSPEND ((struct pm_message) \
+ { .event = PM_EVENT_AUTO_SUSPEND, })
+#define PMSG_AUTO_RESUME ((struct pm_message) \
+ { .event = PM_EVENT_AUTO_RESUME, })
+
+#define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0)
+
+/**
+ * Device run-time power management status.
+ *
+ * These status labels are used internally by the PM core to indicate the
+ * current status of a device with respect to the PM core operations. They do
+ * not reflect the actual power state of the device or its status as seen by the
+ * driver.
+ *
+ * RPM_ACTIVE Device is fully operational. Indicates that the device
+ * bus type's ->runtime_resume() callback has completed
+ * successfully.
+ *
+ * RPM_SUSPENDED Device bus type's ->runtime_suspend() callback has
+ * completed successfully. The device is regarded as
+ * suspended.
+ *
+ * RPM_RESUMING Device bus type's ->runtime_resume() callback is being
+ * executed.
+ *
+ * RPM_SUSPENDING Device bus type's ->runtime_suspend() callback is being
+ * executed.
+ */
+
+enum rpm_status {
+ RPM_ACTIVE = 0,
+ RPM_RESUMING,
+ RPM_SUSPENDED,
+ RPM_SUSPENDING,
+};
+
+/**
+ * Device run-time power management request types.
+ *
+ * RPM_REQ_NONE Do nothing.
+ *
+ * RPM_REQ_IDLE Run the device bus type's ->runtime_idle() callback
+ *
+ * RPM_REQ_SUSPEND Run the device bus type's ->runtime_suspend() callback
+ *
+ * RPM_REQ_AUTOSUSPEND Same as RPM_REQ_SUSPEND, but not until the device has
+ * been inactive for as long as power.autosuspend_delay
+ *
+ * RPM_REQ_RESUME Run the device bus type's ->runtime_resume() callback
+ */
+
+enum rpm_request {
+ RPM_REQ_NONE = 0,
+ RPM_REQ_IDLE,
+ RPM_REQ_SUSPEND,
+ RPM_REQ_AUTOSUSPEND,
+ RPM_REQ_RESUME,
+};
+
+struct wakeup_source;
+struct pm_domain_data;
+
+struct pm_subsys_data {
+ spinlock_t lock;
+ unsigned int refcount;
+#ifdef CONFIG_PM_CLK
+ struct list_head clock_list;
+#endif
+#ifdef CONFIG_PM_GENERIC_DOMAINS
+ struct pm_domain_data *domain_data;
+#endif
+};
+
+struct dev_pm_info {
+ pm_message_t power_state;
+ unsigned int can_wakeup:1;
+ unsigned int async_suspend:1;
+ bool is_prepared:1; /* Owned by the PM core */
+ bool is_suspended:1; /* Ditto */
+ bool is_noirq_suspended:1;
+ bool is_late_suspended:1;
+ bool ignore_children:1;
+ bool early_init:1; /* Owned by the PM core */
+ bool direct_complete:1; /* Owned by the PM core */
+ spinlock_t lock;
+#ifdef CONFIG_PM_SLEEP
+ struct list_head entry;
+ struct completion completion;
+ struct wakeup_source *wakeup;
+ bool wakeup_path:1;
+ bool syscore:1;
+#else
+ unsigned int should_wakeup:1;
+#endif
+#ifdef CONFIG_PM
+ struct timer_list suspend_timer;
+ unsigned long timer_expires;
+ struct work_struct work;
+ wait_queue_head_t wait_queue;
+ atomic_t usage_count;
+ atomic_t child_count;
+ unsigned int disable_depth:3;
+ unsigned int idle_notification:1;
+ unsigned int request_pending:1;
+ unsigned int deferred_resume:1;
+ unsigned int run_wake:1;
+ unsigned int runtime_auto:1;
+ unsigned int no_callbacks:1;
+ unsigned int irq_safe:1;
+ unsigned int use_autosuspend:1;
+ unsigned int timer_autosuspends:1;
+ unsigned int memalloc_noio:1;
+ enum rpm_request request;
+ enum rpm_status runtime_status;
+ int runtime_error;
+ int autosuspend_delay;
+ unsigned long last_busy;
+ unsigned long active_jiffies;
+ unsigned long suspended_jiffies;
+ unsigned long accounting_timestamp;
+#endif
+ struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
+ void (*set_latency_tolerance)(struct device *, s32);
+ struct dev_pm_qos *qos;
+};
+
+extern void update_pm_runtime_accounting(struct device *dev);
+extern int dev_pm_get_subsys_data(struct device *dev);
+extern void dev_pm_put_subsys_data(struct device *dev);
+
+/*
+ * Power domains provide callbacks that are executed during system suspend,
+ * hibernation, system resume and during runtime PM transitions along with
+ * subsystem-level and driver-level callbacks.
+ *
+ * @detach: Called when removing a device from the domain.
+ * @activate: Called before executing probe routines for bus types and drivers.
+ * @sync: Called after successful driver probe.
+ * @dismiss: Called after unsuccessful driver probe and after driver removal.
+ */
+struct dev_pm_domain {
+ struct dev_pm_ops ops;
+ void (*detach)(struct device *dev, bool power_off);
+ int (*activate)(struct device *dev);
+ void (*sync)(struct device *dev);
+ void (*dismiss)(struct device *dev);
+};
+
+/*
+ * The PM_EVENT_ messages are also used by drivers implementing the legacy
+ * suspend framework, based on the ->suspend() and ->resume() callbacks common
+ * for suspend and hibernation transitions, according to the rules below.
+ */
+
+/* Necessary, because several drivers use PM_EVENT_PRETHAW */
+#define PM_EVENT_PRETHAW PM_EVENT_QUIESCE
+
+/*
+ * One transition is triggered by resume(), after a suspend() call; the
+ * message is implicit:
+ *
+ * ON Driver starts working again, responding to hardware events
+ * and software requests. The hardware may have gone through
+ * a power-off reset, or it may have maintained state from the
+ * previous suspend() which the driver will rely on while
+ * resuming. On most platforms, there are no restrictions on
+ * availability of resources like clocks during resume().
+ *
+ * Other transitions are triggered by messages sent using suspend(). All
+ * these transitions quiesce the driver, so that I/O queues are inactive.
+ * That commonly entails turning off IRQs and DMA; there may be rules
+ * about how to quiesce that are specific to the bus or the device's type.
+ * (For example, network drivers mark the link state.) Other details may
+ * differ according to the message:
+ *
+ * SUSPEND Quiesce, enter a low power device state appropriate for
+ * the upcoming system state (such as PCI_D3hot), and enable
+ * wakeup events as appropriate.
+ *
+ * HIBERNATE Enter a low power device state appropriate for the hibernation
+ * state (eg. ACPI S4) and enable wakeup events as appropriate.
+ *
+ * FREEZE Quiesce operations so that a consistent image can be saved;
+ * but do NOT otherwise enter a low power device state, and do
+ * NOT emit system wakeup events.
+ *
+ * PRETHAW Quiesce as if for FREEZE; additionally, prepare for restoring
+ * the system from a snapshot taken after an earlier FREEZE.
+ * Some drivers will need to reset their hardware state instead
+ * of preserving it, to ensure that it's never mistaken for the
+ * state which that earlier snapshot had set up.
+ *
+ * A minimally power-aware driver treats all messages as SUSPEND, fully
+ * reinitializes its device during resume() -- whether or not it was reset
+ * during the suspend/resume cycle -- and can't issue wakeup events.
+ *
+ * More power-aware drivers may also use low power states at runtime as
+ * well as during system sleep states like PM_SUSPEND_STANDBY. They may
+ * be able to use wakeup events to exit from runtime low-power states,
+ * or from system low-power states such as standby or suspend-to-RAM.
+ */
+
+#ifdef CONFIG_PM_SLEEP
+extern void device_pm_lock(void);
+extern void dpm_resume_start(pm_message_t state);
+extern void dpm_resume_end(pm_message_t state);
+extern void dpm_resume_noirq(pm_message_t state);
+extern void dpm_resume_early(pm_message_t state);
+extern void dpm_resume(pm_message_t state);
+extern void dpm_complete(pm_message_t state);
+
+extern void device_pm_unlock(void);
+extern int dpm_suspend_end(pm_message_t state);
+extern int dpm_suspend_start(pm_message_t state);
+extern int dpm_suspend_noirq(pm_message_t state);
+extern int dpm_suspend_late(pm_message_t state);
+extern int dpm_suspend(pm_message_t state);
+extern int dpm_prepare(pm_message_t state);
+
+extern void __suspend_report_result(const char *function, void *fn, int ret);
+
+#define suspend_report_result(fn, ret) \
+ do { \
+ __suspend_report_result(__func__, fn, ret); \
+ } while (0)
+
+extern int device_pm_wait_for_dev(struct device *sub, struct device *dev);
+extern void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *));
+
+extern int pm_generic_prepare(struct device *dev);
+extern int pm_generic_suspend_late(struct device *dev);
+extern int pm_generic_suspend_noirq(struct device *dev);
+extern int pm_generic_suspend(struct device *dev);
+extern int pm_generic_resume_early(struct device *dev);
+extern int pm_generic_resume_noirq(struct device *dev);
+extern int pm_generic_resume(struct device *dev);
+extern int pm_generic_freeze_noirq(struct device *dev);
+extern int pm_generic_freeze_late(struct device *dev);
+extern int pm_generic_freeze(struct device *dev);
+extern int pm_generic_thaw_noirq(struct device *dev);
+extern int pm_generic_thaw_early(struct device *dev);
+extern int pm_generic_thaw(struct device *dev);
+extern int pm_generic_restore_noirq(struct device *dev);
+extern int pm_generic_restore_early(struct device *dev);
+extern int pm_generic_restore(struct device *dev);
+extern int pm_generic_poweroff_noirq(struct device *dev);
+extern int pm_generic_poweroff_late(struct device *dev);
+extern int pm_generic_poweroff(struct device *dev);
+extern void pm_generic_complete(struct device *dev);
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define device_pm_lock() do {} while (0)
+#define device_pm_unlock() do {} while (0)
+
+static inline int dpm_suspend_start(pm_message_t state)
+{
+ return 0;
+}
+
+#define suspend_report_result(fn, ret) do {} while (0)
+
+static inline int device_pm_wait_for_dev(struct device *a, struct device *b)
+{
+ return 0;
+}
+
+static inline void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
+{
+}
+
+#define pm_generic_prepare NULL
+#define pm_generic_suspend_late NULL
+#define pm_generic_suspend_noirq NULL
+#define pm_generic_suspend NULL
+#define pm_generic_resume_early NULL
+#define pm_generic_resume_noirq NULL
+#define pm_generic_resume NULL
+#define pm_generic_freeze_noirq NULL
+#define pm_generic_freeze_late NULL
+#define pm_generic_freeze NULL
+#define pm_generic_thaw_noirq NULL
+#define pm_generic_thaw_early NULL
+#define pm_generic_thaw NULL
+#define pm_generic_restore_noirq NULL
+#define pm_generic_restore_early NULL
+#define pm_generic_restore NULL
+#define pm_generic_poweroff_noirq NULL
+#define pm_generic_poweroff_late NULL
+#define pm_generic_poweroff NULL
+#define pm_generic_complete NULL
+#endif /* !CONFIG_PM_SLEEP */
+
+/* How to reorder dpm_list after device_move() */
+enum dpm_order {
+ DPM_ORDER_NONE,
+ DPM_ORDER_DEV_AFTER_PARENT,
+ DPM_ORDER_PARENT_BEFORE_DEV,
+ DPM_ORDER_DEV_LAST,
+};
+
+#endif /* _LINUX_PM_H */
diff --git a/include/linux/pm2301_charger.h b/include/linux/pm2301_charger.h
new file mode 100644
index 000000000..85c16defe
--- /dev/null
+++ b/include/linux/pm2301_charger.h
@@ -0,0 +1,61 @@
+/*
+ * PM2301 charger driver.
+ *
+ * Copyright (C) 2012 ST Ericsson Corporation
+ *
+ * Contact: Olivier LAUNAY (olivier.launay@stericsson.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_PM2301_H
+#define __LINUX_PM2301_H
+
+/**
+ * struct pm2xxx_bm_charger_parameters - Charger specific parameters
+ * @ac_volt_max: maximum allowed AC charger voltage in mV
+ * @ac_curr_max: maximum allowed AC charger current in mA
+ */
+struct pm2xxx_bm_charger_parameters {
+ int ac_volt_max;
+ int ac_curr_max;
+};
+
+/**
+ * struct pm2xxx_bm_data - pm2xxx battery management data
+ * @enable_overshoot flag to enable VBAT overshoot control
+ * @chg_params charger parameters
+ */
+struct pm2xxx_bm_data {
+ bool enable_overshoot;
+ const struct pm2xxx_bm_charger_parameters *chg_params;
+};
+
+struct pm2xxx_charger_platform_data {
+ char **supplied_to;
+ size_t num_supplicants;
+ int i2c_bus;
+ const char *label;
+ int gpio_irq_number;
+ unsigned int lpn_gpio;
+ int irq_type;
+};
+
+struct pm2xxx_platform_data {
+ struct pm2xxx_charger_platform_data *wall_charger;
+ struct pm2xxx_bm_data *battery;
+};
+
+#endif /* __LINUX_PM2301_H */
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
new file mode 100644
index 000000000..0b0039634
--- /dev/null
+++ b/include/linux/pm_clock.h
@@ -0,0 +1,79 @@
+/*
+ * pm_clock.h - Definitions and headers related to device clocks.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#ifndef _LINUX_PM_CLOCK_H
+#define _LINUX_PM_CLOCK_H
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+
+struct pm_clk_notifier_block {
+ struct notifier_block nb;
+ struct dev_pm_domain *pm_domain;
+ char *con_ids[];
+};
+
+struct clk;
+
+#ifdef CONFIG_PM_CLK
+static inline bool pm_clk_no_clocks(struct device *dev)
+{
+ return dev && dev->power.subsys_data
+ && list_empty(&dev->power.subsys_data->clock_list);
+}
+
+extern void pm_clk_init(struct device *dev);
+extern int pm_clk_create(struct device *dev);
+extern void pm_clk_destroy(struct device *dev);
+extern int pm_clk_add(struct device *dev, const char *con_id);
+extern int pm_clk_add_clk(struct device *dev, struct clk *clk);
+extern void pm_clk_remove(struct device *dev, const char *con_id);
+extern int pm_clk_suspend(struct device *dev);
+extern int pm_clk_resume(struct device *dev);
+#else
+static inline bool pm_clk_no_clocks(struct device *dev)
+{
+ return true;
+}
+static inline void pm_clk_init(struct device *dev)
+{
+}
+static inline int pm_clk_create(struct device *dev)
+{
+ return -EINVAL;
+}
+static inline void pm_clk_destroy(struct device *dev)
+{
+}
+static inline int pm_clk_add(struct device *dev, const char *con_id)
+{
+ return -EINVAL;
+}
+
+static inline int pm_clk_add_clk(struct device *dev, struct clk *clk)
+{
+ return -EINVAL;
+}
+static inline void pm_clk_remove(struct device *dev, const char *con_id)
+{
+}
+#define pm_clk_suspend NULL
+#define pm_clk_resume NULL
+#endif
+
+#ifdef CONFIG_HAVE_CLK
+extern void pm_clk_add_notifier(struct bus_type *bus,
+ struct pm_clk_notifier_block *clknb);
+#else
+static inline void pm_clk_add_notifier(struct bus_type *bus,
+ struct pm_clk_notifier_block *clknb)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
new file mode 100644
index 000000000..681ccb053
--- /dev/null
+++ b/include/linux/pm_domain.h
@@ -0,0 +1,326 @@
+/*
+ * pm_domain.h - Definitions and headers related to device power domains.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#ifndef _LINUX_PM_DOMAIN_H
+#define _LINUX_PM_DOMAIN_H
+
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/notifier.h>
+#include <linux/cpuidle.h>
+
+/* Defines used for the flags field in the struct generic_pm_domain */
+#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
+
+enum gpd_status {
+ GPD_STATE_ACTIVE = 0, /* PM domain is active */
+ GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */
+ GPD_STATE_BUSY, /* Something is happening to the PM domain */
+ GPD_STATE_REPEAT, /* Power off in progress, to be repeated */
+ GPD_STATE_POWER_OFF, /* PM domain is off */
+};
+
+struct dev_power_governor {
+ bool (*power_down_ok)(struct dev_pm_domain *domain);
+ bool (*stop_ok)(struct device *dev);
+};
+
+struct gpd_dev_ops {
+ int (*start)(struct device *dev);
+ int (*stop)(struct device *dev);
+ int (*save_state)(struct device *dev);
+ int (*restore_state)(struct device *dev);
+ bool (*active_wakeup)(struct device *dev);
+};
+
+struct gpd_cpuidle_data {
+ unsigned int saved_exit_latency;
+ struct cpuidle_state *idle_state;
+};
+
+struct generic_pm_domain {
+ struct dev_pm_domain domain; /* PM domain operations */
+ struct list_head gpd_list_node; /* Node in the global PM domains list */
+ struct list_head master_links; /* Links with PM domain as a master */
+ struct list_head slave_links; /* Links with PM domain as a slave */
+ struct list_head dev_list; /* List of devices */
+ struct mutex lock;
+ struct dev_power_governor *gov;
+ struct work_struct power_off_work;
+ const char *name;
+ unsigned int in_progress; /* Number of devices being suspended now */
+ atomic_t sd_count; /* Number of subdomains with power "on" */
+ enum gpd_status status; /* Current state of the domain */
+ wait_queue_head_t status_wait_queue;
+ struct task_struct *poweroff_task; /* Powering off task */
+ unsigned int resume_count; /* Number of devices being resumed */
+ unsigned int device_count; /* Number of devices */
+ unsigned int suspended_count; /* System suspend device counter */
+ unsigned int prepared_count; /* Suspend counter of prepared devices */
+ bool suspend_power_off; /* Power status before system suspend */
+ int (*power_off)(struct generic_pm_domain *domain);
+ s64 power_off_latency_ns;
+ int (*power_on)(struct generic_pm_domain *domain);
+ s64 power_on_latency_ns;
+ struct gpd_dev_ops dev_ops;
+ s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
+ bool max_off_time_changed;
+ bool cached_power_down_ok;
+ struct gpd_cpuidle_data *cpuidle_data;
+ int (*attach_dev)(struct generic_pm_domain *domain,
+ struct device *dev);
+ void (*detach_dev)(struct generic_pm_domain *domain,
+ struct device *dev);
+ unsigned int flags; /* Bit field of configs for genpd */
+};
+
+static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
+{
+ return container_of(pd, struct generic_pm_domain, domain);
+}
+
+struct gpd_link {
+ struct generic_pm_domain *master;
+ struct list_head master_node;
+ struct generic_pm_domain *slave;
+ struct list_head slave_node;
+};
+
+struct gpd_timing_data {
+ s64 stop_latency_ns;
+ s64 start_latency_ns;
+ s64 save_state_latency_ns;
+ s64 restore_state_latency_ns;
+ s64 effective_constraint_ns;
+ bool constraint_changed;
+ bool cached_stop_ok;
+};
+
+struct pm_domain_data {
+ struct list_head list_node;
+ struct device *dev;
+};
+
+struct generic_pm_domain_data {
+ struct pm_domain_data base;
+ struct gpd_timing_data td;
+ struct notifier_block nb;
+ int need_restore;
+};
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS
+static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *pdd)
+{
+ return container_of(pdd, struct generic_pm_domain_data, base);
+}
+
+static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
+{
+ return to_gpd_data(dev->power.subsys_data->domain_data);
+}
+
+extern struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev);
+extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
+ struct device *dev,
+ struct gpd_timing_data *td);
+
+extern int __pm_genpd_name_add_device(const char *domain_name,
+ struct device *dev,
+ struct gpd_timing_data *td);
+
+extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+ struct device *dev);
+extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *new_subdomain);
+extern int pm_genpd_add_subdomain_names(const char *master_name,
+ const char *subdomain_name);
+extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *target);
+extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state);
+extern int pm_genpd_name_attach_cpuidle(const char *name, int state);
+extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd);
+extern int pm_genpd_name_detach_cpuidle(const char *name);
+extern void pm_genpd_init(struct generic_pm_domain *genpd,
+ struct dev_power_governor *gov, bool is_off);
+
+extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
+extern int pm_genpd_name_poweron(const char *domain_name);
+extern void pm_genpd_poweroff_unused(void);
+
+extern struct dev_power_governor simple_qos_governor;
+extern struct dev_power_governor pm_domain_always_on_gov;
+#else
+
+static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
+{
+ return ERR_PTR(-ENOSYS);
+}
+static inline struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev)
+{
+ return NULL;
+}
+static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
+ struct device *dev,
+ struct gpd_timing_data *td)
+{
+ return -ENOSYS;
+}
+static inline int __pm_genpd_name_add_device(const char *domain_name,
+ struct device *dev,
+ struct gpd_timing_data *td)
+{
+ return -ENOSYS;
+}
+static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ return -ENOSYS;
+}
+static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *new_sd)
+{
+ return -ENOSYS;
+}
+static inline int pm_genpd_add_subdomain_names(const char *master_name,
+ const char *subdomain_name)
+{
+ return -ENOSYS;
+}
+static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *target)
+{
+ return -ENOSYS;
+}
+static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st)
+{
+ return -ENOSYS;
+}
+static inline int pm_genpd_name_attach_cpuidle(const char *name, int state)
+{
+ return -ENOSYS;
+}
+static inline int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
+{
+ return -ENOSYS;
+}
+static inline int pm_genpd_name_detach_cpuidle(const char *name)
+{
+ return -ENOSYS;
+}
+static inline void pm_genpd_init(struct generic_pm_domain *genpd,
+ struct dev_power_governor *gov, bool is_off)
+{
+}
+static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
+{
+ return -ENOSYS;
+}
+static inline int pm_genpd_name_poweron(const char *domain_name)
+{
+ return -ENOSYS;
+}
+static inline void pm_genpd_poweroff_unused(void) {}
+#define simple_qos_governor NULL
+#define pm_domain_always_on_gov NULL
+#endif
+
+static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ return __pm_genpd_add_device(genpd, dev, NULL);
+}
+
+static inline int pm_genpd_name_add_device(const char *domain_name,
+ struct device *dev)
+{
+ return __pm_genpd_name_add_device(domain_name, dev, NULL);
+}
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP
+extern void pm_genpd_syscore_poweroff(struct device *dev);
+extern void pm_genpd_syscore_poweron(struct device *dev);
+#else
+static inline void pm_genpd_syscore_poweroff(struct device *dev) {}
+static inline void pm_genpd_syscore_poweron(struct device *dev) {}
+#endif
+
+/* OF PM domain providers */
+struct of_device_id;
+
+struct genpd_onecell_data {
+ struct generic_pm_domain **domains;
+ unsigned int num_domains;
+};
+
+typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
+ void *data);
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
+ void *data);
+void of_genpd_del_provider(struct device_node *np);
+struct generic_pm_domain *of_genpd_get_from_provider(
+ struct of_phandle_args *genpdspec);
+
+struct generic_pm_domain *__of_genpd_xlate_simple(
+ struct of_phandle_args *genpdspec,
+ void *data);
+struct generic_pm_domain *__of_genpd_xlate_onecell(
+ struct of_phandle_args *genpdspec,
+ void *data);
+
+int genpd_dev_pm_attach(struct device *dev);
+#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
+static inline int __of_genpd_add_provider(struct device_node *np,
+ genpd_xlate_t xlate, void *data)
+{
+ return 0;
+}
+static inline void of_genpd_del_provider(struct device_node *np) {}
+
+static inline struct generic_pm_domain *of_genpd_get_from_provider(
+ struct of_phandle_args *genpdspec)
+{
+ return NULL;
+}
+
+#define __of_genpd_xlate_simple NULL
+#define __of_genpd_xlate_onecell NULL
+
+static inline int genpd_dev_pm_attach(struct device *dev)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
+
+static inline int of_genpd_add_provider_simple(struct device_node *np,
+ struct generic_pm_domain *genpd)
+{
+ return __of_genpd_add_provider(np, __of_genpd_xlate_simple, genpd);
+}
+static inline int of_genpd_add_provider_onecell(struct device_node *np,
+ struct genpd_onecell_data *data)
+{
+ return __of_genpd_add_provider(np, __of_genpd_xlate_onecell, data);
+}
+
+#ifdef CONFIG_PM
+extern int dev_pm_domain_attach(struct device *dev, bool power_on);
+extern void dev_pm_domain_detach(struct device *dev, bool power_off);
+#else
+static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
+{
+ return -ENODEV;
+}
+static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
+#endif
+
+#endif /* _LINUX_PM_DOMAIN_H */
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
new file mode 100644
index 000000000..cec2d4540
--- /dev/null
+++ b/include/linux/pm_opp.h
@@ -0,0 +1,129 @@
+/*
+ * Generic OPP Interface
+ *
+ * Copyright (C) 2009-2010 Texas Instruments Incorporated.
+ * Nishanth Menon
+ * Romit Dasgupta
+ * Kevin Hilman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_OPP_H__
+#define __LINUX_OPP_H__
+
+#include <linux/err.h>
+#include <linux/notifier.h>
+
+struct dev_pm_opp;
+struct device;
+
+enum dev_pm_opp_event {
+ OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
+};
+
+#if defined(CONFIG_PM_OPP)
+
+unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
+
+unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
+
+int dev_pm_opp_get_opp_count(struct device *dev);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+ unsigned long freq,
+ bool available);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+ unsigned long *freq);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+ unsigned long *freq);
+
+int dev_pm_opp_add(struct device *dev, unsigned long freq,
+ unsigned long u_volt);
+void dev_pm_opp_remove(struct device *dev, unsigned long freq);
+
+int dev_pm_opp_enable(struct device *dev, unsigned long freq);
+
+int dev_pm_opp_disable(struct device *dev, unsigned long freq);
+
+struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev);
+#else
+static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
+{
+ return 0;
+}
+
+static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
+{
+ return 0;
+}
+
+static inline int dev_pm_opp_get_opp_count(struct device *dev)
+{
+ return 0;
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+ unsigned long freq, bool available)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+ unsigned long *freq)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+ unsigned long *freq)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
+ unsigned long u_volt)
+{
+ return -EINVAL;
+}
+
+static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
+{
+}
+
+static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
+{
+ return 0;
+}
+
+static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
+{
+ return 0;
+}
+
+static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
+ struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif /* CONFIG_PM_OPP */
+
+#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
+int of_init_opp_table(struct device *dev);
+void of_free_opp_table(struct device *dev);
+#else
+static inline int of_init_opp_table(struct device *dev)
+{
+ return -EINVAL;
+}
+
+static inline void of_free_opp_table(struct device *dev)
+{
+}
+#endif
+
+#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
new file mode 100644
index 000000000..7b3ae0cff
--- /dev/null
+++ b/include/linux/pm_qos.h
@@ -0,0 +1,237 @@
+#ifndef _LINUX_PM_QOS_H
+#define _LINUX_PM_QOS_H
+/* interface for the pm_qos_power infrastructure of the linux kernel.
+ *
+ * Mark Gross <mgross@linux.intel.com>
+ */
+#include <linux/plist.h>
+#include <linux/notifier.h>
+#include <linux/miscdevice.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+
+enum {
+ PM_QOS_RESERVED = 0,
+ PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_NETWORK_LATENCY,
+ PM_QOS_NETWORK_THROUGHPUT,
+ PM_QOS_MEMORY_BANDWIDTH,
+
+ /* insert new class ID */
+ PM_QOS_NUM_CLASSES,
+};
+
+enum pm_qos_flags_status {
+ PM_QOS_FLAGS_UNDEFINED = -1,
+ PM_QOS_FLAGS_NONE,
+ PM_QOS_FLAGS_SOME,
+ PM_QOS_FLAGS_ALL,
+};
+
+#define PM_QOS_DEFAULT_VALUE -1
+
+#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
+#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
+#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
+#define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0
+#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
+#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
+#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
+#define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1))
+
+#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
+#define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1)
+
+struct pm_qos_request {
+ struct plist_node node;
+ int pm_qos_class;
+ struct delayed_work work; /* for pm_qos_update_request_timeout */
+};
+
+struct pm_qos_flags_request {
+ struct list_head node;
+ s32 flags; /* Do not change to 64 bit */
+};
+
+enum dev_pm_qos_req_type {
+ DEV_PM_QOS_RESUME_LATENCY = 1,
+ DEV_PM_QOS_LATENCY_TOLERANCE,
+ DEV_PM_QOS_FLAGS,
+};
+
+struct dev_pm_qos_request {
+ enum dev_pm_qos_req_type type;
+ union {
+ struct plist_node pnode;
+ struct pm_qos_flags_request flr;
+ } data;
+ struct device *dev;
+};
+
+enum pm_qos_type {
+ PM_QOS_UNITIALIZED,
+ PM_QOS_MAX, /* return the largest value */
+ PM_QOS_MIN, /* return the smallest value */
+ PM_QOS_SUM /* return the sum */
+};
+
+/*
+ * Note: The lockless read path depends on the CPU accessing target_value
+ * or effective_flags atomically. Atomic access is only guaranteed on all CPU
+ * types linux supports for 32 bit quantites
+ */
+struct pm_qos_constraints {
+ struct plist_head list;
+ s32 target_value; /* Do not change to 64 bit */
+ s32 default_value;
+ s32 no_constraint_value;
+ enum pm_qos_type type;
+ struct blocking_notifier_head *notifiers;
+};
+
+struct pm_qos_flags {
+ struct list_head list;
+ s32 effective_flags; /* Do not change to 64 bit */
+};
+
+struct dev_pm_qos {
+ struct pm_qos_constraints resume_latency;
+ struct pm_qos_constraints latency_tolerance;
+ struct pm_qos_flags flags;
+ struct dev_pm_qos_request *resume_latency_req;
+ struct dev_pm_qos_request *latency_tolerance_req;
+ struct dev_pm_qos_request *flags_req;
+};
+
+/* Action requested to pm_qos_update_target */
+enum pm_qos_req_action {
+ PM_QOS_ADD_REQ, /* Add a new request */
+ PM_QOS_UPDATE_REQ, /* Update an existing request */
+ PM_QOS_REMOVE_REQ /* Remove an existing request */
+};
+
+static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
+{
+ return req->dev != NULL;
+}
+
+int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
+ enum pm_qos_req_action action, int value);
+bool pm_qos_update_flags(struct pm_qos_flags *pqf,
+ struct pm_qos_flags_request *req,
+ enum pm_qos_req_action action, s32 val);
+void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
+ s32 value);
+void pm_qos_update_request(struct pm_qos_request *req,
+ s32 new_value);
+void pm_qos_update_request_timeout(struct pm_qos_request *req,
+ s32 new_value, unsigned long timeout_us);
+void pm_qos_remove_request(struct pm_qos_request *req);
+
+int pm_qos_request(int pm_qos_class);
+int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
+int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
+int pm_qos_request_active(struct pm_qos_request *req);
+s32 pm_qos_read_value(struct pm_qos_constraints *c);
+
+#ifdef CONFIG_PM
+enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
+enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
+s32 __dev_pm_qos_read_value(struct device *dev);
+s32 dev_pm_qos_read_value(struct device *dev);
+int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
+ enum dev_pm_qos_req_type type, s32 value);
+int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
+int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
+int dev_pm_qos_add_notifier(struct device *dev,
+ struct notifier_block *notifier);
+int dev_pm_qos_remove_notifier(struct device *dev,
+ struct notifier_block *notifier);
+int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
+int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
+void dev_pm_qos_constraints_init(struct device *dev);
+void dev_pm_qos_constraints_destroy(struct device *dev);
+int dev_pm_qos_add_ancestor_request(struct device *dev,
+ struct dev_pm_qos_request *req,
+ enum dev_pm_qos_req_type type, s32 value);
+int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
+void dev_pm_qos_hide_latency_limit(struct device *dev);
+int dev_pm_qos_expose_flags(struct device *dev, s32 value);
+void dev_pm_qos_hide_flags(struct device *dev);
+int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
+s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
+int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
+
+static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
+{
+ return dev->power.qos->resume_latency_req->data.pnode.prio;
+}
+
+static inline s32 dev_pm_qos_requested_flags(struct device *dev)
+{
+ return dev->power.qos->flags_req->data.flr.flags;
+}
+#else
+static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
+ s32 mask)
+ { return PM_QOS_FLAGS_UNDEFINED; }
+static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev,
+ s32 mask)
+ { return PM_QOS_FLAGS_UNDEFINED; }
+static inline s32 __dev_pm_qos_read_value(struct device *dev)
+ { return 0; }
+static inline s32 dev_pm_qos_read_value(struct device *dev)
+ { return 0; }
+static inline int dev_pm_qos_add_request(struct device *dev,
+ struct dev_pm_qos_request *req,
+ enum dev_pm_qos_req_type type,
+ s32 value)
+ { return 0; }
+static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
+ s32 new_value)
+ { return 0; }
+static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
+ { return 0; }
+static inline int dev_pm_qos_add_notifier(struct device *dev,
+ struct notifier_block *notifier)
+ { return 0; }
+static inline int dev_pm_qos_remove_notifier(struct device *dev,
+ struct notifier_block *notifier)
+ { return 0; }
+static inline int dev_pm_qos_add_global_notifier(
+ struct notifier_block *notifier)
+ { return 0; }
+static inline int dev_pm_qos_remove_global_notifier(
+ struct notifier_block *notifier)
+ { return 0; }
+static inline void dev_pm_qos_constraints_init(struct device *dev)
+{
+ dev->power.power_state = PMSG_ON;
+}
+static inline void dev_pm_qos_constraints_destroy(struct device *dev)
+{
+ dev->power.power_state = PMSG_INVALID;
+}
+static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
+ struct dev_pm_qos_request *req,
+ enum dev_pm_qos_req_type type,
+ s32 value)
+ { return 0; }
+static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
+ { return 0; }
+static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {}
+static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value)
+ { return 0; }
+static inline void dev_pm_qos_hide_flags(struct device *dev) {}
+static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set)
+ { return 0; }
+static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
+ { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
+static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
+ { return 0; }
+
+static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
+static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
+#endif
+
+#endif
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
new file mode 100644
index 000000000..30e84d48b
--- /dev/null
+++ b/include/linux/pm_runtime.h
@@ -0,0 +1,283 @@
+/*
+ * pm_runtime.h - Device run-time power management helper functions.
+ *
+ * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#ifndef _LINUX_PM_RUNTIME_H
+#define _LINUX_PM_RUNTIME_H
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/pm.h>
+
+#include <linux/jiffies.h>
+
+/* Runtime PM flag argument bits */
+#define RPM_ASYNC 0x01 /* Request is asynchronous */
+#define RPM_NOWAIT 0x02 /* Don't wait for concurrent
+ state change */
+#define RPM_GET_PUT 0x04 /* Increment/decrement the
+ usage_count */
+#define RPM_AUTO 0x08 /* Use autosuspend_delay */
+
+#ifdef CONFIG_PM
+extern struct workqueue_struct *pm_wq;
+
+static inline bool queue_pm_work(struct work_struct *work)
+{
+ return queue_work(pm_wq, work);
+}
+
+extern int pm_generic_runtime_suspend(struct device *dev);
+extern int pm_generic_runtime_resume(struct device *dev);
+extern int pm_runtime_force_suspend(struct device *dev);
+extern int pm_runtime_force_resume(struct device *dev);
+
+extern int __pm_runtime_idle(struct device *dev, int rpmflags);
+extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
+extern int __pm_runtime_resume(struct device *dev, int rpmflags);
+extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
+extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
+extern int pm_runtime_barrier(struct device *dev);
+extern void pm_runtime_enable(struct device *dev);
+extern void __pm_runtime_disable(struct device *dev, bool check_resume);
+extern void pm_runtime_allow(struct device *dev);
+extern void pm_runtime_forbid(struct device *dev);
+extern void pm_runtime_no_callbacks(struct device *dev);
+extern void pm_runtime_irq_safe(struct device *dev);
+extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
+extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
+extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
+extern void pm_runtime_update_max_time_suspended(struct device *dev,
+ s64 delta_ns);
+extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
+
+static inline bool pm_children_suspended(struct device *dev)
+{
+ return dev->power.ignore_children
+ || !atomic_read(&dev->power.child_count);
+}
+
+static inline void pm_runtime_get_noresume(struct device *dev)
+{
+ atomic_inc(&dev->power.usage_count);
+}
+
+static inline void pm_runtime_put_noidle(struct device *dev)
+{
+ atomic_add_unless(&dev->power.usage_count, -1, 0);
+}
+
+static inline bool device_run_wake(struct device *dev)
+{
+ return dev->power.run_wake;
+}
+
+static inline void device_set_run_wake(struct device *dev, bool enable)
+{
+ dev->power.run_wake = enable;
+}
+
+static inline bool pm_runtime_suspended(struct device *dev)
+{
+ return dev->power.runtime_status == RPM_SUSPENDED
+ && !dev->power.disable_depth;
+}
+
+static inline bool pm_runtime_active(struct device *dev)
+{
+ return dev->power.runtime_status == RPM_ACTIVE
+ || dev->power.disable_depth;
+}
+
+static inline bool pm_runtime_status_suspended(struct device *dev)
+{
+ return dev->power.runtime_status == RPM_SUSPENDED;
+}
+
+static inline bool pm_runtime_suspended_if_enabled(struct device *dev)
+{
+ return pm_runtime_status_suspended(dev) && dev->power.disable_depth == 1;
+}
+
+static inline bool pm_runtime_enabled(struct device *dev)
+{
+ return !dev->power.disable_depth;
+}
+
+static inline bool pm_runtime_callbacks_present(struct device *dev)
+{
+ return !dev->power.no_callbacks;
+}
+
+static inline void pm_runtime_mark_last_busy(struct device *dev)
+{
+ ACCESS_ONCE(dev->power.last_busy) = jiffies;
+}
+
+static inline bool pm_runtime_is_irq_safe(struct device *dev)
+{
+ return dev->power.irq_safe;
+}
+
+#else /* !CONFIG_PM */
+
+static inline bool queue_pm_work(struct work_struct *work) { return false; }
+
+static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
+static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
+static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
+static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
+
+static inline int __pm_runtime_idle(struct device *dev, int rpmflags)
+{
+ return -ENOSYS;
+}
+static inline int __pm_runtime_suspend(struct device *dev, int rpmflags)
+{
+ return -ENOSYS;
+}
+static inline int __pm_runtime_resume(struct device *dev, int rpmflags)
+{
+ return 1;
+}
+static inline int pm_schedule_suspend(struct device *dev, unsigned int delay)
+{
+ return -ENOSYS;
+}
+static inline int __pm_runtime_set_status(struct device *dev,
+ unsigned int status) { return 0; }
+static inline int pm_runtime_barrier(struct device *dev) { return 0; }
+static inline void pm_runtime_enable(struct device *dev) {}
+static inline void __pm_runtime_disable(struct device *dev, bool c) {}
+static inline void pm_runtime_allow(struct device *dev) {}
+static inline void pm_runtime_forbid(struct device *dev) {}
+
+static inline bool pm_children_suspended(struct device *dev) { return false; }
+static inline void pm_runtime_get_noresume(struct device *dev) {}
+static inline void pm_runtime_put_noidle(struct device *dev) {}
+static inline bool device_run_wake(struct device *dev) { return false; }
+static inline void device_set_run_wake(struct device *dev, bool enable) {}
+static inline bool pm_runtime_suspended(struct device *dev) { return false; }
+static inline bool pm_runtime_active(struct device *dev) { return true; }
+static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
+static inline bool pm_runtime_suspended_if_enabled(struct device *dev) { return false; }
+static inline bool pm_runtime_enabled(struct device *dev) { return false; }
+
+static inline void pm_runtime_no_callbacks(struct device *dev) {}
+static inline void pm_runtime_irq_safe(struct device *dev) {}
+static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
+
+static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
+static inline void pm_runtime_mark_last_busy(struct device *dev) {}
+static inline void __pm_runtime_use_autosuspend(struct device *dev,
+ bool use) {}
+static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
+ int delay) {}
+static inline unsigned long pm_runtime_autosuspend_expiration(
+ struct device *dev) { return 0; }
+static inline void pm_runtime_set_memalloc_noio(struct device *dev,
+ bool enable){}
+
+#endif /* !CONFIG_PM */
+
+static inline int pm_runtime_idle(struct device *dev)
+{
+ return __pm_runtime_idle(dev, 0);
+}
+
+static inline int pm_runtime_suspend(struct device *dev)
+{
+ return __pm_runtime_suspend(dev, 0);
+}
+
+static inline int pm_runtime_autosuspend(struct device *dev)
+{
+ return __pm_runtime_suspend(dev, RPM_AUTO);
+}
+
+static inline int pm_runtime_resume(struct device *dev)
+{
+ return __pm_runtime_resume(dev, 0);
+}
+
+static inline int pm_request_idle(struct device *dev)
+{
+ return __pm_runtime_idle(dev, RPM_ASYNC);
+}
+
+static inline int pm_request_resume(struct device *dev)
+{
+ return __pm_runtime_resume(dev, RPM_ASYNC);
+}
+
+static inline int pm_request_autosuspend(struct device *dev)
+{
+ return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO);
+}
+
+static inline int pm_runtime_get(struct device *dev)
+{
+ return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC);
+}
+
+static inline int pm_runtime_get_sync(struct device *dev)
+{
+ return __pm_runtime_resume(dev, RPM_GET_PUT);
+}
+
+static inline int pm_runtime_put(struct device *dev)
+{
+ return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC);
+}
+
+static inline int pm_runtime_put_autosuspend(struct device *dev)
+{
+ return __pm_runtime_suspend(dev,
+ RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
+}
+
+static inline int pm_runtime_put_sync(struct device *dev)
+{
+ return __pm_runtime_idle(dev, RPM_GET_PUT);
+}
+
+static inline int pm_runtime_put_sync_suspend(struct device *dev)
+{
+ return __pm_runtime_suspend(dev, RPM_GET_PUT);
+}
+
+static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
+{
+ return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
+}
+
+static inline int pm_runtime_set_active(struct device *dev)
+{
+ return __pm_runtime_set_status(dev, RPM_ACTIVE);
+}
+
+static inline void pm_runtime_set_suspended(struct device *dev)
+{
+ __pm_runtime_set_status(dev, RPM_SUSPENDED);
+}
+
+static inline void pm_runtime_disable(struct device *dev)
+{
+ __pm_runtime_disable(dev, true);
+}
+
+static inline void pm_runtime_use_autosuspend(struct device *dev)
+{
+ __pm_runtime_use_autosuspend(dev, true);
+}
+
+static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
+{
+ __pm_runtime_use_autosuspend(dev, false);
+}
+
+#endif
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
new file mode 100644
index 000000000..a0f70808d
--- /dev/null
+++ b/include/linux/pm_wakeup.h
@@ -0,0 +1,195 @@
+/*
+ * pm_wakeup.h - Power management wakeup interface
+ *
+ * Copyright (C) 2008 Alan Stern
+ * Copyright (C) 2010 Rafael J. Wysocki, Novell Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_PM_WAKEUP_H
+#define _LINUX_PM_WAKEUP_H
+
+#ifndef _DEVICE_H_
+# error "please don't include this file directly"
+#endif
+
+#include <linux/types.h>
+
+/**
+ * struct wakeup_source - Representation of wakeup sources
+ *
+ * @total_time: Total time this wakeup source has been active.
+ * @max_time: Maximum time this wakeup source has been continuously active.
+ * @last_time: Monotonic clock when the wakeup source's was touched last time.
+ * @prevent_sleep_time: Total time this source has been preventing autosleep.
+ * @event_count: Number of signaled wakeup events.
+ * @active_count: Number of times the wakeup source was activated.
+ * @relax_count: Number of times the wakeup source was deactivated.
+ * @expire_count: Number of times the wakeup source's timeout has expired.
+ * @wakeup_count: Number of times the wakeup source might abort suspend.
+ * @active: Status of the wakeup source.
+ * @has_timeout: The wakeup source has been activated with a timeout.
+ */
+struct wakeup_source {
+ const char *name;
+ struct list_head entry;
+ spinlock_t lock;
+ struct timer_list timer;
+ unsigned long timer_expires;
+ ktime_t total_time;
+ ktime_t max_time;
+ ktime_t last_time;
+ ktime_t start_prevent_time;
+ ktime_t prevent_sleep_time;
+ unsigned long event_count;
+ unsigned long active_count;
+ unsigned long relax_count;
+ unsigned long expire_count;
+ unsigned long wakeup_count;
+ bool active:1;
+ bool autosleep_enabled:1;
+};
+
+#ifdef CONFIG_PM_SLEEP
+
+/*
+ * Changes to device_may_wakeup take effect on the next pm state change.
+ */
+
+static inline bool device_can_wakeup(struct device *dev)
+{
+ return dev->power.can_wakeup;
+}
+
+static inline bool device_may_wakeup(struct device *dev)
+{
+ return dev->power.can_wakeup && !!dev->power.wakeup;
+}
+
+/* drivers/base/power/wakeup.c */
+extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name);
+extern struct wakeup_source *wakeup_source_create(const char *name);
+extern void wakeup_source_drop(struct wakeup_source *ws);
+extern void wakeup_source_destroy(struct wakeup_source *ws);
+extern void wakeup_source_add(struct wakeup_source *ws);
+extern void wakeup_source_remove(struct wakeup_source *ws);
+extern struct wakeup_source *wakeup_source_register(const char *name);
+extern void wakeup_source_unregister(struct wakeup_source *ws);
+extern int device_wakeup_enable(struct device *dev);
+extern int device_wakeup_disable(struct device *dev);
+extern void device_set_wakeup_capable(struct device *dev, bool capable);
+extern int device_init_wakeup(struct device *dev, bool val);
+extern int device_set_wakeup_enable(struct device *dev, bool enable);
+extern void __pm_stay_awake(struct wakeup_source *ws);
+extern void pm_stay_awake(struct device *dev);
+extern void __pm_relax(struct wakeup_source *ws);
+extern void pm_relax(struct device *dev);
+extern void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec);
+extern void pm_wakeup_event(struct device *dev, unsigned int msec);
+
+#else /* !CONFIG_PM_SLEEP */
+
+static inline void device_set_wakeup_capable(struct device *dev, bool capable)
+{
+ dev->power.can_wakeup = capable;
+}
+
+static inline bool device_can_wakeup(struct device *dev)
+{
+ return dev->power.can_wakeup;
+}
+
+static inline void wakeup_source_prepare(struct wakeup_source *ws,
+ const char *name) {}
+
+static inline struct wakeup_source *wakeup_source_create(const char *name)
+{
+ return NULL;
+}
+
+static inline void wakeup_source_drop(struct wakeup_source *ws) {}
+
+static inline void wakeup_source_destroy(struct wakeup_source *ws) {}
+
+static inline void wakeup_source_add(struct wakeup_source *ws) {}
+
+static inline void wakeup_source_remove(struct wakeup_source *ws) {}
+
+static inline struct wakeup_source *wakeup_source_register(const char *name)
+{
+ return NULL;
+}
+
+static inline void wakeup_source_unregister(struct wakeup_source *ws) {}
+
+static inline int device_wakeup_enable(struct device *dev)
+{
+ dev->power.should_wakeup = true;
+ return 0;
+}
+
+static inline int device_wakeup_disable(struct device *dev)
+{
+ dev->power.should_wakeup = false;
+ return 0;
+}
+
+static inline int device_set_wakeup_enable(struct device *dev, bool enable)
+{
+ dev->power.should_wakeup = enable;
+ return 0;
+}
+
+static inline int device_init_wakeup(struct device *dev, bool val)
+{
+ device_set_wakeup_capable(dev, val);
+ device_set_wakeup_enable(dev, val);
+ return 0;
+}
+
+static inline bool device_may_wakeup(struct device *dev)
+{
+ return dev->power.can_wakeup && dev->power.should_wakeup;
+}
+
+static inline void __pm_stay_awake(struct wakeup_source *ws) {}
+
+static inline void pm_stay_awake(struct device *dev) {}
+
+static inline void __pm_relax(struct wakeup_source *ws) {}
+
+static inline void pm_relax(struct device *dev) {}
+
+static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) {}
+
+static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {}
+
+#endif /* !CONFIG_PM_SLEEP */
+
+static inline void wakeup_source_init(struct wakeup_source *ws,
+ const char *name)
+{
+ wakeup_source_prepare(ws, name);
+ wakeup_source_add(ws);
+}
+
+static inline void wakeup_source_trash(struct wakeup_source *ws)
+{
+ wakeup_source_remove(ws);
+ wakeup_source_drop(ws);
+}
+
+#endif /* _LINUX_PM_WAKEUP_H */
diff --git a/include/linux/pmu.h b/include/linux/pmu.h
new file mode 100644
index 000000000..99b400b8a
--- /dev/null
+++ b/include/linux/pmu.h
@@ -0,0 +1,85 @@
+/*
+ * Definitions for talking to the PMU. The PMU is a microcontroller
+ * which controls battery charging and system power on PowerBook 3400
+ * and 2400 models as well as the RTC and various other things.
+ *
+ * Copyright (C) 1998 Paul Mackerras.
+ */
+#ifndef _LINUX_PMU_H
+#define _LINUX_PMU_H
+
+#include <uapi/linux/pmu.h>
+
+
+extern int find_via_pmu(void);
+
+extern int pmu_request(struct adb_request *req,
+ void (*done)(struct adb_request *), int nbytes, ...);
+extern int pmu_queue_request(struct adb_request *req);
+extern void pmu_poll(void);
+extern void pmu_poll_adb(void); /* For use by xmon */
+extern void pmu_wait_complete(struct adb_request *req);
+
+/* For use before switching interrupts off for a long time;
+ * warning: not stackable
+ */
+#if defined(CONFIG_ADB_PMU)
+extern void pmu_suspend(void);
+extern void pmu_resume(void);
+#else
+static inline void pmu_suspend(void)
+{}
+static inline void pmu_resume(void)
+{}
+#endif
+
+extern void pmu_enable_irled(int on);
+
+extern void pmu_restart(void);
+extern void pmu_shutdown(void);
+extern void pmu_unlock(void);
+
+extern int pmu_present(void);
+extern int pmu_get_model(void);
+
+extern void pmu_backlight_set_sleep(int sleep);
+
+#define PMU_MAX_BATTERIES 2
+
+/* values for pmu_power_flags */
+#define PMU_PWR_AC_PRESENT 0x00000001
+
+/* values for pmu_battery_info.flags */
+#define PMU_BATT_PRESENT 0x00000001
+#define PMU_BATT_CHARGING 0x00000002
+#define PMU_BATT_TYPE_MASK 0x000000f0
+#define PMU_BATT_TYPE_SMART 0x00000010 /* Smart battery */
+#define PMU_BATT_TYPE_HOOPER 0x00000020 /* 3400/3500 */
+#define PMU_BATT_TYPE_COMET 0x00000030 /* 2400 */
+
+struct pmu_battery_info
+{
+ unsigned int flags;
+ unsigned int charge; /* current charge */
+ unsigned int max_charge; /* maximum charge */
+ signed int amperage; /* current, positive if charging */
+ unsigned int voltage; /* voltage */
+ unsigned int time_remaining; /* remaining time */
+};
+
+extern int pmu_battery_count;
+extern struct pmu_battery_info pmu_batteries[PMU_MAX_BATTERIES];
+extern unsigned int pmu_power_flags;
+
+/* Backlight */
+extern void pmu_backlight_init(void);
+
+/* some code needs to know if the PMU was suspended for hibernation */
+#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
+extern int pmu_sys_suspended;
+#else
+/* if power management is not configured it can't be suspended */
+#define pmu_sys_suspended 0
+#endif
+
+#endif /* _LINUX_PMU_H */
diff --git a/include/linux/pnfs_osd_xdr.h b/include/linux/pnfs_osd_xdr.h
new file mode 100644
index 000000000..17d7d0d20
--- /dev/null
+++ b/include/linux/pnfs_osd_xdr.h
@@ -0,0 +1,317 @@
+/*
+ * pNFS-osd on-the-wire data structures
+ *
+ * Copyright (C) 2007 Panasas Inc. [year of first publication]
+ * All rights reserved.
+ *
+ * Benny Halevy <bhalevy@panasas.com>
+ * Boaz Harrosh <ooo@electrozaur.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * See the file COPYING included with this distribution for more details.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Panasas company nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __PNFS_OSD_XDR_H__
+#define __PNFS_OSD_XDR_H__
+
+#include <linux/nfs_fs.h>
+
+/*
+ * draft-ietf-nfsv4-minorversion-22
+ * draft-ietf-nfsv4-pnfs-obj-12
+ */
+
+/* Layout Structure */
+
+enum pnfs_osd_raid_algorithm4 {
+ PNFS_OSD_RAID_0 = 1,
+ PNFS_OSD_RAID_4 = 2,
+ PNFS_OSD_RAID_5 = 3,
+ PNFS_OSD_RAID_PQ = 4 /* Reed-Solomon P+Q */
+};
+
+/* struct pnfs_osd_data_map4 {
+ * uint32_t odm_num_comps;
+ * length4 odm_stripe_unit;
+ * uint32_t odm_group_width;
+ * uint32_t odm_group_depth;
+ * uint32_t odm_mirror_cnt;
+ * pnfs_osd_raid_algorithm4 odm_raid_algorithm;
+ * };
+ */
+struct pnfs_osd_data_map {
+ u32 odm_num_comps;
+ u64 odm_stripe_unit;
+ u32 odm_group_width;
+ u32 odm_group_depth;
+ u32 odm_mirror_cnt;
+ u32 odm_raid_algorithm;
+};
+
+/* struct pnfs_osd_objid4 {
+ * deviceid4 oid_device_id;
+ * uint64_t oid_partition_id;
+ * uint64_t oid_object_id;
+ * };
+ */
+struct pnfs_osd_objid {
+ struct nfs4_deviceid oid_device_id;
+ u64 oid_partition_id;
+ u64 oid_object_id;
+};
+
+/* For printout. I use:
+ * kprint("dev(%llx:%llx)", _DEVID_LO(pointer), _DEVID_HI(pointer));
+ * BE style
+ */
+#define _DEVID_LO(oid_device_id) \
+ (unsigned long long)be64_to_cpup((__be64 *)(oid_device_id)->data)
+
+#define _DEVID_HI(oid_device_id) \
+ (unsigned long long)be64_to_cpup(((__be64 *)(oid_device_id)->data) + 1)
+
+enum pnfs_osd_version {
+ PNFS_OSD_MISSING = 0,
+ PNFS_OSD_VERSION_1 = 1,
+ PNFS_OSD_VERSION_2 = 2
+};
+
+struct pnfs_osd_opaque_cred {
+ u32 cred_len;
+ void *cred;
+};
+
+enum pnfs_osd_cap_key_sec {
+ PNFS_OSD_CAP_KEY_SEC_NONE = 0,
+ PNFS_OSD_CAP_KEY_SEC_SSV = 1,
+};
+
+/* struct pnfs_osd_object_cred4 {
+ * pnfs_osd_objid4 oc_object_id;
+ * pnfs_osd_version4 oc_osd_version;
+ * pnfs_osd_cap_key_sec4 oc_cap_key_sec;
+ * opaque oc_capability_key<>;
+ * opaque oc_capability<>;
+ * };
+ */
+struct pnfs_osd_object_cred {
+ struct pnfs_osd_objid oc_object_id;
+ u32 oc_osd_version;
+ u32 oc_cap_key_sec;
+ struct pnfs_osd_opaque_cred oc_cap_key;
+ struct pnfs_osd_opaque_cred oc_cap;
+};
+
+/* struct pnfs_osd_layout4 {
+ * pnfs_osd_data_map4 olo_map;
+ * uint32_t olo_comps_index;
+ * pnfs_osd_object_cred4 olo_components<>;
+ * };
+ */
+struct pnfs_osd_layout {
+ struct pnfs_osd_data_map olo_map;
+ u32 olo_comps_index;
+ u32 olo_num_comps;
+ struct pnfs_osd_object_cred *olo_comps;
+};
+
+/* Device Address */
+enum pnfs_osd_targetid_type {
+ OBJ_TARGET_ANON = 1,
+ OBJ_TARGET_SCSI_NAME = 2,
+ OBJ_TARGET_SCSI_DEVICE_ID = 3,
+};
+
+/* union pnfs_osd_targetid4 switch (pnfs_osd_targetid_type4 oti_type) {
+ * case OBJ_TARGET_SCSI_NAME:
+ * string oti_scsi_name<>;
+ *
+ * case OBJ_TARGET_SCSI_DEVICE_ID:
+ * opaque oti_scsi_device_id<>;
+ *
+ * default:
+ * void;
+ * };
+ *
+ * union pnfs_osd_targetaddr4 switch (bool ota_available) {
+ * case TRUE:
+ * netaddr4 ota_netaddr;
+ * case FALSE:
+ * void;
+ * };
+ *
+ * struct pnfs_osd_deviceaddr4 {
+ * pnfs_osd_targetid4 oda_targetid;
+ * pnfs_osd_targetaddr4 oda_targetaddr;
+ * uint64_t oda_lun;
+ * opaque oda_systemid<>;
+ * pnfs_osd_object_cred4 oda_root_obj_cred;
+ * opaque oda_osdname<>;
+ * };
+ */
+struct pnfs_osd_targetid {
+ u32 oti_type;
+ struct nfs4_string oti_scsi_device_id;
+};
+
+/* struct netaddr4 {
+ * // see struct rpcb in RFC1833
+ * string r_netid<>; // network id
+ * string r_addr<>; // universal address
+ * };
+ */
+struct pnfs_osd_net_addr {
+ struct nfs4_string r_netid;
+ struct nfs4_string r_addr;
+};
+
+struct pnfs_osd_targetaddr {
+ u32 ota_available;
+ struct pnfs_osd_net_addr ota_netaddr;
+};
+
+struct pnfs_osd_deviceaddr {
+ struct pnfs_osd_targetid oda_targetid;
+ struct pnfs_osd_targetaddr oda_targetaddr;
+ u8 oda_lun[8];
+ struct nfs4_string oda_systemid;
+ struct pnfs_osd_object_cred oda_root_obj_cred;
+ struct nfs4_string oda_osdname;
+};
+
+/* LAYOUTCOMMIT: layoutupdate */
+
+/* union pnfs_osd_deltaspaceused4 switch (bool dsu_valid) {
+ * case TRUE:
+ * int64_t dsu_delta;
+ * case FALSE:
+ * void;
+ * };
+ *
+ * struct pnfs_osd_layoutupdate4 {
+ * pnfs_osd_deltaspaceused4 olu_delta_space_used;
+ * bool olu_ioerr_flag;
+ * };
+ */
+struct pnfs_osd_layoutupdate {
+ u32 dsu_valid;
+ s64 dsu_delta;
+ u32 olu_ioerr_flag;
+};
+
+/* LAYOUTRETURN: I/O Rrror Report */
+
+enum pnfs_osd_errno {
+ PNFS_OSD_ERR_EIO = 1,
+ PNFS_OSD_ERR_NOT_FOUND = 2,
+ PNFS_OSD_ERR_NO_SPACE = 3,
+ PNFS_OSD_ERR_BAD_CRED = 4,
+ PNFS_OSD_ERR_NO_ACCESS = 5,
+ PNFS_OSD_ERR_UNREACHABLE = 6,
+ PNFS_OSD_ERR_RESOURCE = 7
+};
+
+/* struct pnfs_osd_ioerr4 {
+ * pnfs_osd_objid4 oer_component;
+ * length4 oer_comp_offset;
+ * length4 oer_comp_length;
+ * bool oer_iswrite;
+ * pnfs_osd_errno4 oer_errno;
+ * };
+ */
+struct pnfs_osd_ioerr {
+ struct pnfs_osd_objid oer_component;
+ u64 oer_comp_offset;
+ u64 oer_comp_length;
+ u32 oer_iswrite;
+ u32 oer_errno;
+};
+
+/* OSD XDR Client API */
+/* Layout helpers */
+/* Layout decoding is done in two parts:
+ * 1. First Call pnfs_osd_xdr_decode_layout_map to read in only the header part
+ * of the layout. @iter members need not be initialized.
+ * Returned:
+ * @layout members are set. (@layout->olo_comps set to NULL).
+ *
+ * Zero on success, or negative error if passed xdr is broken.
+ *
+ * 2. 2nd Call pnfs_osd_xdr_decode_layout_comp() in a loop until it returns
+ * false, to decode the next component.
+ * Returned:
+ * true if there is more to decode or false if we are done or error.
+ *
+ * Example:
+ * struct pnfs_osd_xdr_decode_layout_iter iter;
+ * struct pnfs_osd_layout layout;
+ * struct pnfs_osd_object_cred comp;
+ * int status;
+ *
+ * status = pnfs_osd_xdr_decode_layout_map(&layout, &iter, xdr);
+ * if (unlikely(status))
+ * goto err;
+ * while(pnfs_osd_xdr_decode_layout_comp(&comp, &iter, xdr, &status)) {
+ * // All of @comp strings point to inside the xdr_buffer
+ * // or scrach buffer. Copy them out to user memory eg.
+ * copy_single_comp(dest_comp++, &comp);
+ * }
+ * if (unlikely(status))
+ * goto err;
+ */
+
+struct pnfs_osd_xdr_decode_layout_iter {
+ unsigned total_comps;
+ unsigned decoded_comps;
+};
+
+extern int pnfs_osd_xdr_decode_layout_map(struct pnfs_osd_layout *layout,
+ struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr);
+
+extern bool pnfs_osd_xdr_decode_layout_comp(struct pnfs_osd_object_cred *comp,
+ struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr,
+ int *err);
+
+/* Device Info helpers */
+
+/* Note: All strings inside @deviceaddr point to space inside @p.
+ * @p should stay valid while @deviceaddr is in use.
+ */
+extern void pnfs_osd_xdr_decode_deviceaddr(
+ struct pnfs_osd_deviceaddr *deviceaddr, __be32 *p);
+
+/* layoutupdate (layout_commit) xdr helpers */
+extern int
+pnfs_osd_xdr_encode_layoutupdate(struct xdr_stream *xdr,
+ struct pnfs_osd_layoutupdate *lou);
+
+/* osd_ioerror encoding (layout_return) */
+extern __be32 *pnfs_osd_xdr_ioerr_reserve_space(struct xdr_stream *xdr);
+extern void pnfs_osd_xdr_encode_ioerr(__be32 *p, struct pnfs_osd_ioerr *ioerr);
+
+#endif /* __PNFS_OSD_XDR_H__ */
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
new file mode 100644
index 000000000..5df733b8f
--- /dev/null
+++ b/include/linux/pnp.h
@@ -0,0 +1,525 @@
+/*
+ * Linux Plug and Play Support
+ * Copyright by Adam Belay <ambx1@neo.rr.com>
+ * Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ */
+
+#ifndef _LINUX_PNP_H
+#define _LINUX_PNP_H
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/mod_devicetable.h>
+#include <linux/console.h>
+
+#define PNP_NAME_LEN 50
+
+struct pnp_protocol;
+struct pnp_dev;
+
+/*
+ * Resource Management
+ */
+#ifdef CONFIG_PNP
+struct resource *pnp_get_resource(struct pnp_dev *dev, unsigned long type,
+ unsigned int num);
+#else
+static inline struct resource *pnp_get_resource(struct pnp_dev *dev,
+ unsigned long type, unsigned int num)
+{
+ return NULL;
+}
+#endif
+
+static inline int pnp_resource_valid(struct resource *res)
+{
+ if (res)
+ return 1;
+ return 0;
+}
+
+static inline int pnp_resource_enabled(struct resource *res)
+{
+ if (res && !(res->flags & IORESOURCE_DISABLED))
+ return 1;
+ return 0;
+}
+
+static inline resource_size_t pnp_resource_len(struct resource *res)
+{
+ if (res->start == 0 && res->end == 0)
+ return 0;
+ return resource_size(res);
+}
+
+
+static inline resource_size_t pnp_port_start(struct pnp_dev *dev,
+ unsigned int bar)
+{
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
+
+ if (pnp_resource_valid(res))
+ return res->start;
+ return 0;
+}
+
+static inline resource_size_t pnp_port_end(struct pnp_dev *dev,
+ unsigned int bar)
+{
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
+
+ if (pnp_resource_valid(res))
+ return res->end;
+ return 0;
+}
+
+static inline unsigned long pnp_port_flags(struct pnp_dev *dev,
+ unsigned int bar)
+{
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
+
+ if (pnp_resource_valid(res))
+ return res->flags;
+ return IORESOURCE_IO | IORESOURCE_AUTO;
+}
+
+static inline int pnp_port_valid(struct pnp_dev *dev, unsigned int bar)
+{
+ return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_IO, bar));
+}
+
+static inline resource_size_t pnp_port_len(struct pnp_dev *dev,
+ unsigned int bar)
+{
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_IO, bar);
+
+ if (pnp_resource_valid(res))
+ return pnp_resource_len(res);
+ return 0;
+}
+
+
+static inline resource_size_t pnp_mem_start(struct pnp_dev *dev,
+ unsigned int bar)
+{
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
+
+ if (pnp_resource_valid(res))
+ return res->start;
+ return 0;
+}
+
+static inline resource_size_t pnp_mem_end(struct pnp_dev *dev,
+ unsigned int bar)
+{
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
+
+ if (pnp_resource_valid(res))
+ return res->end;
+ return 0;
+}
+
+static inline unsigned long pnp_mem_flags(struct pnp_dev *dev, unsigned int bar)
+{
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
+
+ if (pnp_resource_valid(res))
+ return res->flags;
+ return IORESOURCE_MEM | IORESOURCE_AUTO;
+}
+
+static inline int pnp_mem_valid(struct pnp_dev *dev, unsigned int bar)
+{
+ return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_MEM, bar));
+}
+
+static inline resource_size_t pnp_mem_len(struct pnp_dev *dev,
+ unsigned int bar)
+{
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_MEM, bar);
+
+ if (pnp_resource_valid(res))
+ return pnp_resource_len(res);
+ return 0;
+}
+
+
+static inline resource_size_t pnp_irq(struct pnp_dev *dev, unsigned int bar)
+{
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_IRQ, bar);
+
+ if (pnp_resource_valid(res))
+ return res->start;
+ return -1;
+}
+
+static inline unsigned long pnp_irq_flags(struct pnp_dev *dev, unsigned int bar)
+{
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_IRQ, bar);
+
+ if (pnp_resource_valid(res))
+ return res->flags;
+ return IORESOURCE_IRQ | IORESOURCE_AUTO;
+}
+
+static inline int pnp_irq_valid(struct pnp_dev *dev, unsigned int bar)
+{
+ return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_IRQ, bar));
+}
+
+
+static inline resource_size_t pnp_dma(struct pnp_dev *dev, unsigned int bar)
+{
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_DMA, bar);
+
+ if (pnp_resource_valid(res))
+ return res->start;
+ return -1;
+}
+
+static inline unsigned long pnp_dma_flags(struct pnp_dev *dev, unsigned int bar)
+{
+ struct resource *res = pnp_get_resource(dev, IORESOURCE_DMA, bar);
+
+ if (pnp_resource_valid(res))
+ return res->flags;
+ return IORESOURCE_DMA | IORESOURCE_AUTO;
+}
+
+static inline int pnp_dma_valid(struct pnp_dev *dev, unsigned int bar)
+{
+ return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_DMA, bar));
+}
+
+
+/*
+ * Device Management
+ */
+
+struct pnp_card {
+ struct device dev; /* Driver Model device interface */
+ unsigned char number; /* used as an index, must be unique */
+ struct list_head global_list; /* node in global list of cards */
+ struct list_head protocol_list; /* node in protocol's list of cards */
+ struct list_head devices; /* devices attached to the card */
+
+ struct pnp_protocol *protocol;
+ struct pnp_id *id; /* contains supported EISA IDs */
+
+ char name[PNP_NAME_LEN]; /* contains a human-readable name */
+ unsigned char pnpver; /* Plug & Play version */
+ unsigned char productver; /* product version */
+ unsigned int serial; /* serial number */
+ unsigned char checksum; /* if zero - checksum passed */
+ struct proc_dir_entry *procdir; /* directory entry in /proc/bus/isapnp */
+};
+
+#define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list)
+#define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list)
+#define to_pnp_card(n) container_of(n, struct pnp_card, dev)
+#define pnp_for_each_card(card) \
+ for((card) = global_to_pnp_card(pnp_cards.next); \
+ (card) != global_to_pnp_card(&pnp_cards); \
+ (card) = global_to_pnp_card((card)->global_list.next))
+
+struct pnp_card_link {
+ struct pnp_card *card;
+ struct pnp_card_driver *driver;
+ void *driver_data;
+ pm_message_t pm_state;
+};
+
+static inline void *pnp_get_card_drvdata(struct pnp_card_link *pcard)
+{
+ return pcard->driver_data;
+}
+
+static inline void pnp_set_card_drvdata(struct pnp_card_link *pcard, void *data)
+{
+ pcard->driver_data = data;
+}
+
+struct pnp_dev {
+ struct device dev; /* Driver Model device interface */
+ u64 dma_mask;
+ unsigned int number; /* used as an index, must be unique */
+ int status;
+
+ struct list_head global_list; /* node in global list of devices */
+ struct list_head protocol_list; /* node in list of device's protocol */
+ struct list_head card_list; /* node in card's list of devices */
+ struct list_head rdev_list; /* node in cards list of requested devices */
+
+ struct pnp_protocol *protocol;
+ struct pnp_card *card; /* card the device is attached to, none if NULL */
+ struct pnp_driver *driver;
+ struct pnp_card_link *card_link;
+
+ struct pnp_id *id; /* supported EISA IDs */
+
+ int active;
+ int capabilities;
+ unsigned int num_dependent_sets;
+ struct list_head resources;
+ struct list_head options;
+
+ char name[PNP_NAME_LEN]; /* contains a human-readable name */
+ int flags; /* used by protocols */
+ struct proc_dir_entry *procent; /* device entry in /proc/bus/isapnp */
+ void *data;
+};
+
+#define global_to_pnp_dev(n) list_entry(n, struct pnp_dev, global_list)
+#define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list)
+#define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list)
+#define to_pnp_dev(n) container_of(n, struct pnp_dev, dev)
+#define pnp_for_each_dev(dev) \
+ for((dev) = global_to_pnp_dev(pnp_global.next); \
+ (dev) != global_to_pnp_dev(&pnp_global); \
+ (dev) = global_to_pnp_dev((dev)->global_list.next))
+#define card_for_each_dev(card,dev) \
+ for((dev) = card_to_pnp_dev((card)->devices.next); \
+ (dev) != card_to_pnp_dev(&(card)->devices); \
+ (dev) = card_to_pnp_dev((dev)->card_list.next))
+#define pnp_dev_name(dev) (dev)->name
+
+static inline void *pnp_get_drvdata(struct pnp_dev *pdev)
+{
+ return dev_get_drvdata(&pdev->dev);
+}
+
+static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
+{
+ dev_set_drvdata(&pdev->dev, data);
+}
+
+struct pnp_fixup {
+ char id[7];
+ void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
+};
+
+/* config parameters */
+#define PNP_CONFIG_NORMAL 0x0001
+#define PNP_CONFIG_FORCE 0x0002 /* disables validity checking */
+
+/* capabilities */
+#define PNP_READ 0x0001
+#define PNP_WRITE 0x0002
+#define PNP_DISABLE 0x0004
+#define PNP_CONFIGURABLE 0x0008
+#define PNP_REMOVABLE 0x0010
+#define PNP_CONSOLE 0x0020
+
+#define pnp_can_read(dev) (((dev)->protocol->get) && \
+ ((dev)->capabilities & PNP_READ))
+#define pnp_can_write(dev) (((dev)->protocol->set) && \
+ ((dev)->capabilities & PNP_WRITE))
+#define pnp_can_disable(dev) (((dev)->protocol->disable) && \
+ ((dev)->capabilities & PNP_DISABLE) && \
+ (!((dev)->capabilities & PNP_CONSOLE) || \
+ console_suspend_enabled))
+#define pnp_can_configure(dev) ((!(dev)->active) && \
+ ((dev)->capabilities & PNP_CONFIGURABLE))
+#define pnp_can_suspend(dev) (((dev)->protocol->suspend) && \
+ (!((dev)->capabilities & PNP_CONSOLE) || \
+ console_suspend_enabled))
+
+
+#ifdef CONFIG_ISAPNP
+extern struct pnp_protocol isapnp_protocol;
+#define pnp_device_is_isapnp(dev) ((dev)->protocol == (&isapnp_protocol))
+#else
+#define pnp_device_is_isapnp(dev) 0
+#endif
+extern struct mutex pnp_res_mutex;
+
+#ifdef CONFIG_PNPBIOS
+extern struct pnp_protocol pnpbios_protocol;
+#define pnp_device_is_pnpbios(dev) ((dev)->protocol == (&pnpbios_protocol))
+#else
+#define pnp_device_is_pnpbios(dev) 0
+#endif
+
+#ifdef CONFIG_PNPACPI
+extern struct pnp_protocol pnpacpi_protocol;
+
+static inline struct acpi_device *pnp_acpi_device(struct pnp_dev *dev)
+{
+ if (dev->protocol == &pnpacpi_protocol)
+ return dev->data;
+ return NULL;
+}
+#else
+#define pnp_acpi_device(dev) 0
+#endif
+
+/* status */
+#define PNP_READY 0x0000
+#define PNP_ATTACHED 0x0001
+#define PNP_BUSY 0x0002
+#define PNP_FAULTY 0x0004
+
+/* isapnp specific macros */
+
+#define isapnp_card_number(dev) ((dev)->card ? (dev)->card->number : -1)
+#define isapnp_csn_number(dev) ((dev)->number)
+
+/*
+ * Driver Management
+ */
+
+struct pnp_id {
+ char id[PNP_ID_LEN];
+ struct pnp_id *next;
+};
+
+struct pnp_driver {
+ char *name;
+ const struct pnp_device_id *id_table;
+ unsigned int flags;
+ int (*probe) (struct pnp_dev *dev, const struct pnp_device_id *dev_id);
+ void (*remove) (struct pnp_dev *dev);
+ void (*shutdown) (struct pnp_dev *dev);
+ int (*suspend) (struct pnp_dev *dev, pm_message_t state);
+ int (*resume) (struct pnp_dev *dev);
+ struct device_driver driver;
+};
+
+#define to_pnp_driver(drv) container_of(drv, struct pnp_driver, driver)
+
+struct pnp_card_driver {
+ struct list_head global_list;
+ char *name;
+ const struct pnp_card_device_id *id_table;
+ unsigned int flags;
+ int (*probe) (struct pnp_card_link *card,
+ const struct pnp_card_device_id *card_id);
+ void (*remove) (struct pnp_card_link *card);
+ int (*suspend) (struct pnp_card_link *card, pm_message_t state);
+ int (*resume) (struct pnp_card_link *card);
+ struct pnp_driver link;
+};
+
+#define to_pnp_card_driver(drv) container_of(drv, struct pnp_card_driver, link)
+
+/* pnp driver flags */
+#define PNP_DRIVER_RES_DO_NOT_CHANGE 0x0001 /* do not change the state of the device */
+#define PNP_DRIVER_RES_DISABLE 0x0003 /* ensure the device is disabled */
+
+/*
+ * Protocol Management
+ */
+
+struct pnp_protocol {
+ struct list_head protocol_list;
+ char *name;
+
+ /* resource control functions */
+ int (*get) (struct pnp_dev *dev);
+ int (*set) (struct pnp_dev *dev);
+ int (*disable) (struct pnp_dev *dev);
+
+ /* protocol specific suspend/resume */
+ bool (*can_wakeup) (struct pnp_dev *dev);
+ int (*suspend) (struct pnp_dev * dev, pm_message_t state);
+ int (*resume) (struct pnp_dev * dev);
+
+ /* used by pnp layer only (look but don't touch) */
+ unsigned char number; /* protocol number */
+ struct device dev; /* link to driver model */
+ struct list_head cards;
+ struct list_head devices;
+};
+
+#define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list)
+#define protocol_for_each_card(protocol,card) \
+ for((card) = protocol_to_pnp_card((protocol)->cards.next); \
+ (card) != protocol_to_pnp_card(&(protocol)->cards); \
+ (card) = protocol_to_pnp_card((card)->protocol_list.next))
+#define protocol_for_each_dev(protocol,dev) \
+ for((dev) = protocol_to_pnp_dev((protocol)->devices.next); \
+ (dev) != protocol_to_pnp_dev(&(protocol)->devices); \
+ (dev) = protocol_to_pnp_dev((dev)->protocol_list.next))
+
+extern struct bus_type pnp_bus_type;
+
+#if defined(CONFIG_PNP)
+
+/* device management */
+int pnp_device_attach(struct pnp_dev *pnp_dev);
+void pnp_device_detach(struct pnp_dev *pnp_dev);
+extern struct list_head pnp_global;
+extern int pnp_platform_devices;
+
+/* multidevice card support */
+struct pnp_dev *pnp_request_card_device(struct pnp_card_link *clink,
+ const char *id, struct pnp_dev *from);
+void pnp_release_card_device(struct pnp_dev *dev);
+int pnp_register_card_driver(struct pnp_card_driver *drv);
+void pnp_unregister_card_driver(struct pnp_card_driver *drv);
+extern struct list_head pnp_cards;
+
+/* resource management */
+int pnp_possible_config(struct pnp_dev *dev, int type, resource_size_t base,
+ resource_size_t size);
+int pnp_auto_config_dev(struct pnp_dev *dev);
+int pnp_start_dev(struct pnp_dev *dev);
+int pnp_stop_dev(struct pnp_dev *dev);
+int pnp_activate_dev(struct pnp_dev *dev);
+int pnp_disable_dev(struct pnp_dev *dev);
+int pnp_range_reserved(resource_size_t start, resource_size_t end);
+
+/* protocol helpers */
+int pnp_is_active(struct pnp_dev *dev);
+int compare_pnp_id(struct pnp_id *pos, const char *id);
+int pnp_register_driver(struct pnp_driver *drv);
+void pnp_unregister_driver(struct pnp_driver *drv);
+
+#else
+
+/* device management */
+static inline int pnp_device_attach(struct pnp_dev *pnp_dev) { return -ENODEV; }
+static inline void pnp_device_detach(struct pnp_dev *pnp_dev) { }
+
+#define pnp_platform_devices 0
+
+/* multidevice card support */
+static inline struct pnp_dev *pnp_request_card_device(struct pnp_card_link *clink, const char *id, struct pnp_dev *from) { return NULL; }
+static inline void pnp_release_card_device(struct pnp_dev *dev) { }
+static inline int pnp_register_card_driver(struct pnp_card_driver *drv) { return -ENODEV; }
+static inline void pnp_unregister_card_driver(struct pnp_card_driver *drv) { }
+
+/* resource management */
+static inline int pnp_possible_config(struct pnp_dev *dev, int type,
+ resource_size_t base,
+ resource_size_t size) { return 0; }
+static inline int pnp_auto_config_dev(struct pnp_dev *dev) { return -ENODEV; }
+static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; }
+static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; }
+static inline int pnp_activate_dev(struct pnp_dev *dev) { return -ENODEV; }
+static inline int pnp_disable_dev(struct pnp_dev *dev) { return -ENODEV; }
+static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0;}
+
+/* protocol helpers */
+static inline int pnp_is_active(struct pnp_dev *dev) { return 0; }
+static inline int compare_pnp_id(struct pnp_id *pos, const char *id) { return -ENODEV; }
+static inline int pnp_register_driver(struct pnp_driver *drv) { return -ENODEV; }
+static inline void pnp_unregister_driver(struct pnp_driver *drv) { }
+
+#endif /* CONFIG_PNP */
+
+/**
+ * module_pnp_driver() - Helper macro for registering a PnP driver
+ * @__pnp_driver: pnp_driver struct
+ *
+ * Helper macro for PnP drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_pnp_driver(__pnp_driver) \
+ module_driver(__pnp_driver, pnp_register_driver, \
+ pnp_unregister_driver)
+
+#endif /* _LINUX_PNP_H */
diff --git a/include/linux/poison.h b/include/linux/poison.h
new file mode 100644
index 000000000..2110a81c5
--- /dev/null
+++ b/include/linux/poison.h
@@ -0,0 +1,89 @@
+#ifndef _LINUX_POISON_H
+#define _LINUX_POISON_H
+
+/********** include/linux/list.h **********/
+
+/*
+ * Architectures might want to move the poison pointer offset
+ * into some well-recognized area such as 0xdead000000000000,
+ * that is also not mappable by user-space exploits:
+ */
+#ifdef CONFIG_ILLEGAL_POINTER_VALUE
+# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
+#else
+# define POISON_POINTER_DELTA 0
+#endif
+
+/*
+ * These are non-NULL pointers that will result in page faults
+ * under normal circumstances, used to verify that nobody uses
+ * non-initialized list entries.
+ */
+#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
+
+/********** include/linux/timer.h **********/
+/*
+ * Magic number "tsta" to indicate a static timer initializer
+ * for the object debugging code.
+ */
+#define TIMER_ENTRY_STATIC ((void *) 0x74737461)
+
+/********** mm/debug-pagealloc.c **********/
+#define PAGE_POISON 0xaa
+
+/********** mm/slab.c **********/
+/*
+ * Magic nums for obj red zoning.
+ * Placed in the first word before and the first word after an obj.
+ */
+#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */
+#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */
+
+#define SLUB_RED_INACTIVE 0xbb
+#define SLUB_RED_ACTIVE 0xcc
+
+/* ...and for poisoning */
+#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */
+#define POISON_FREE 0x6b /* for use-after-free poisoning */
+#define POISON_END 0xa5 /* end-byte of poisoning */
+
+/********** arch/$ARCH/mm/init.c **********/
+#define POISON_FREE_INITMEM 0xcc
+
+/********** arch/ia64/hp/common/sba_iommu.c **********/
+/*
+ * arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a
+ * value of "SBAIOMMU POISON\0" for spill-over poisoning.
+ */
+
+/********** fs/jbd/journal.c **********/
+#define JBD_POISON_FREE 0x5b
+#define JBD2_POISON_FREE 0x5c
+
+/********** drivers/base/dmapool.c **********/
+#define POOL_POISON_FREED 0xa7 /* !inuse */
+#define POOL_POISON_ALLOCATED 0xa9 /* !initted */
+
+/********** drivers/atm/ **********/
+#define ATM_POISON_FREE 0x12
+#define ATM_POISON 0xdeadbeef
+
+/********** net/ **********/
+#define NEIGHBOR_DEAD 0xdeadbeef
+#define NETFILTER_LINK_POISON 0xdead57ac
+
+/********** kernel/mutexes **********/
+#define MUTEX_DEBUG_INIT 0x11
+#define MUTEX_DEBUG_FREE 0x22
+
+/********** lib/flex_array.c **********/
+#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */
+
+/********** security/ **********/
+#define KEY_DESTROY 0xbd
+
+/********** sound/oss/ **********/
+#define OSS_POISON_FREE 0xAB
+
+#endif
diff --git a/include/linux/poll.h b/include/linux/poll.h
new file mode 100644
index 000000000..c08386fb3
--- /dev/null
+++ b/include/linux/poll.h
@@ -0,0 +1,164 @@
+#ifndef _LINUX_POLL_H
+#define _LINUX_POLL_H
+
+
+#include <linux/compiler.h>
+#include <linux/ktime.h>
+#include <linux/wait.h>
+#include <linux/string.h>
+#include <linux/fs.h>
+#include <linux/sysctl.h>
+#include <asm/uaccess.h>
+#include <uapi/linux/poll.h>
+
+extern struct ctl_table epoll_table[]; /* for sysctl */
+/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
+ additional memory. */
+#define MAX_STACK_ALLOC 832
+#define FRONTEND_STACK_ALLOC 256
+#define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC
+#define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC
+#define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC)
+#define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry))
+
+#define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)
+
+struct poll_table_struct;
+
+/*
+ * structures and helpers for f_op->poll implementations
+ */
+typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
+
+/*
+ * Do not touch the structure directly, use the access functions
+ * poll_does_not_wait() and poll_requested_events() instead.
+ */
+typedef struct poll_table_struct {
+ poll_queue_proc _qproc;
+ unsigned long _key;
+} poll_table;
+
+static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
+{
+ if (p && p->_qproc && wait_address)
+ p->_qproc(filp, wait_address, p);
+}
+
+/*
+ * Return true if it is guaranteed that poll will not wait. This is the case
+ * if the poll() of another file descriptor in the set got an event, so there
+ * is no need for waiting.
+ */
+static inline bool poll_does_not_wait(const poll_table *p)
+{
+ return p == NULL || p->_qproc == NULL;
+}
+
+/*
+ * Return the set of events that the application wants to poll for.
+ * This is useful for drivers that need to know whether a DMA transfer has
+ * to be started implicitly on poll(). You typically only want to do that
+ * if the application is actually polling for POLLIN and/or POLLOUT.
+ */
+static inline unsigned long poll_requested_events(const poll_table *p)
+{
+ return p ? p->_key : ~0UL;
+}
+
+static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
+{
+ pt->_qproc = qproc;
+ pt->_key = ~0UL; /* all events enabled */
+}
+
+struct poll_table_entry {
+ struct file *filp;
+ unsigned long key;
+ wait_queue_t wait;
+ wait_queue_head_t *wait_address;
+};
+
+/*
+ * Structures and helpers for select/poll syscall
+ */
+struct poll_wqueues {
+ poll_table pt;
+ struct poll_table_page *table;
+ struct task_struct *polling_task;
+ int triggered;
+ int error;
+ int inline_index;
+ struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES];
+};
+
+extern void poll_initwait(struct poll_wqueues *pwq);
+extern void poll_freewait(struct poll_wqueues *pwq);
+extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
+ ktime_t *expires, unsigned long slack);
+extern long select_estimate_accuracy(struct timespec *tv);
+
+
+static inline int poll_schedule(struct poll_wqueues *pwq, int state)
+{
+ return poll_schedule_timeout(pwq, state, NULL, 0);
+}
+
+/*
+ * Scalable version of the fd_set.
+ */
+
+typedef struct {
+ unsigned long *in, *out, *ex;
+ unsigned long *res_in, *res_out, *res_ex;
+} fd_set_bits;
+
+/*
+ * How many longwords for "nr" bits?
+ */
+#define FDS_BITPERLONG (8*sizeof(long))
+#define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
+#define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long))
+
+/*
+ * We do a VERIFY_WRITE here even though we are only reading this time:
+ * we'll write to it eventually..
+ *
+ * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
+ */
+static inline
+int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
+{
+ nr = FDS_BYTES(nr);
+ if (ufdset)
+ return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0;
+
+ memset(fdset, 0, nr);
+ return 0;
+}
+
+static inline unsigned long __must_check
+set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
+{
+ if (ufdset)
+ return __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
+ return 0;
+}
+
+static inline
+void zero_fd_set(unsigned long nr, unsigned long *fdset)
+{
+ memset(fdset, 0, FDS_BYTES(nr));
+}
+
+#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
+
+extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time);
+extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds,
+ struct timespec *end_time);
+extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
+ fd_set __user *exp, struct timespec *end_time);
+
+extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec);
+
+#endif /* _LINUX_POLL_H */
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
new file mode 100644
index 000000000..34c4498b8
--- /dev/null
+++ b/include/linux/posix-clock.h
@@ -0,0 +1,151 @@
+/*
+ * posix-clock.h - support for dynamic clock devices
+ *
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef _LINUX_POSIX_CLOCK_H_
+#define _LINUX_POSIX_CLOCK_H_
+
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/posix-timers.h>
+#include <linux/rwsem.h>
+
+struct posix_clock;
+
+/**
+ * struct posix_clock_operations - functional interface to the clock
+ *
+ * Every posix clock is represented by a character device. Drivers may
+ * optionally offer extended capabilities by implementing the
+ * character device methods. The character device file operations are
+ * first handled by the clock device layer, then passed on to the
+ * driver by calling these functions.
+ *
+ * @owner: The clock driver should set to THIS_MODULE
+ * @clock_adjtime: Adjust the clock
+ * @clock_gettime: Read the current time
+ * @clock_getres: Get the clock resolution
+ * @clock_settime: Set the current time value
+ * @timer_create: Create a new timer
+ * @timer_delete: Remove a previously created timer
+ * @timer_gettime: Get remaining time and interval of a timer
+ * @timer_settime: Set a timer's initial expiration and interval
+ * @fasync: Optional character device fasync method
+ * @mmap: Optional character device mmap method
+ * @open: Optional character device open method
+ * @release: Optional character device release method
+ * @ioctl: Optional character device ioctl method
+ * @read: Optional character device read method
+ * @poll: Optional character device poll method
+ */
+struct posix_clock_operations {
+ struct module *owner;
+
+ int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx);
+
+ int (*clock_gettime)(struct posix_clock *pc, struct timespec *ts);
+
+ int (*clock_getres) (struct posix_clock *pc, struct timespec *ts);
+
+ int (*clock_settime)(struct posix_clock *pc,
+ const struct timespec *ts);
+
+ int (*timer_create) (struct posix_clock *pc, struct k_itimer *kit);
+
+ int (*timer_delete) (struct posix_clock *pc, struct k_itimer *kit);
+
+ void (*timer_gettime)(struct posix_clock *pc,
+ struct k_itimer *kit, struct itimerspec *tsp);
+
+ int (*timer_settime)(struct posix_clock *pc,
+ struct k_itimer *kit, int flags,
+ struct itimerspec *tsp, struct itimerspec *old);
+ /*
+ * Optional character device methods:
+ */
+ int (*fasync) (struct posix_clock *pc,
+ int fd, struct file *file, int on);
+
+ long (*ioctl) (struct posix_clock *pc,
+ unsigned int cmd, unsigned long arg);
+
+ int (*mmap) (struct posix_clock *pc,
+ struct vm_area_struct *vma);
+
+ int (*open) (struct posix_clock *pc, fmode_t f_mode);
+
+ uint (*poll) (struct posix_clock *pc,
+ struct file *file, poll_table *wait);
+
+ int (*release) (struct posix_clock *pc);
+
+ ssize_t (*read) (struct posix_clock *pc,
+ uint flags, char __user *buf, size_t cnt);
+};
+
+/**
+ * struct posix_clock - represents a dynamic posix clock
+ *
+ * @ops: Functional interface to the clock
+ * @cdev: Character device instance for this clock
+ * @kref: Reference count.
+ * @rwsem: Protects the 'zombie' field from concurrent access.
+ * @zombie: If 'zombie' is true, then the hardware has disappeared.
+ * @release: A function to free the structure when the reference count reaches
+ * zero. May be NULL if structure is statically allocated.
+ *
+ * Drivers should embed their struct posix_clock within a private
+ * structure, obtaining a reference to it during callbacks using
+ * container_of().
+ */
+struct posix_clock {
+ struct posix_clock_operations ops;
+ struct cdev cdev;
+ struct kref kref;
+ struct rw_semaphore rwsem;
+ bool zombie;
+ void (*release)(struct posix_clock *clk);
+};
+
+/**
+ * posix_clock_register() - register a new clock
+ * @clk: Pointer to the clock. Caller must provide 'ops' and 'release'
+ * @devid: Allocated device id
+ *
+ * A clock driver calls this function to register itself with the
+ * clock device subsystem. If 'clk' points to dynamically allocated
+ * memory, then the caller must provide a 'release' function to free
+ * that memory.
+ *
+ * Returns zero on success, non-zero otherwise.
+ */
+int posix_clock_register(struct posix_clock *clk, dev_t devid);
+
+/**
+ * posix_clock_unregister() - unregister a clock
+ * @clk: Clock instance previously registered via posix_clock_register()
+ *
+ * A clock driver calls this function to remove itself from the clock
+ * device subsystem. The posix_clock itself will remain (in an
+ * inactive state) until its reference count drops to zero, at which
+ * point it will be deallocated with its 'release' method.
+ */
+void posix_clock_unregister(struct posix_clock *clk);
+
+#endif
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
new file mode 100644
index 000000000..907f3fd19
--- /dev/null
+++ b/include/linux/posix-timers.h
@@ -0,0 +1,141 @@
+#ifndef _linux_POSIX_TIMERS_H
+#define _linux_POSIX_TIMERS_H
+
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/timex.h>
+#include <linux/alarmtimer.h>
+
+
+static inline unsigned long long cputime_to_expires(cputime_t expires)
+{
+ return (__force unsigned long long)expires;
+}
+
+static inline cputime_t expires_to_cputime(unsigned long long expires)
+{
+ return (__force cputime_t)expires;
+}
+
+struct cpu_timer_list {
+ struct list_head entry;
+ unsigned long long expires, incr;
+ struct task_struct *task;
+ int firing;
+};
+
+/*
+ * Bit fields within a clockid:
+ *
+ * The most significant 29 bits hold either a pid or a file descriptor.
+ *
+ * Bit 2 indicates whether a cpu clock refers to a thread or a process.
+ *
+ * Bits 1 and 0 give the type: PROF=0, VIRT=1, SCHED=2, or FD=3.
+ *
+ * A clockid is invalid if bits 2, 1, and 0 are all set.
+ */
+#define CPUCLOCK_PID(clock) ((pid_t) ~((clock) >> 3))
+#define CPUCLOCK_PERTHREAD(clock) \
+ (((clock) & (clockid_t) CPUCLOCK_PERTHREAD_MASK) != 0)
+
+#define CPUCLOCK_PERTHREAD_MASK 4
+#define CPUCLOCK_WHICH(clock) ((clock) & (clockid_t) CPUCLOCK_CLOCK_MASK)
+#define CPUCLOCK_CLOCK_MASK 3
+#define CPUCLOCK_PROF 0
+#define CPUCLOCK_VIRT 1
+#define CPUCLOCK_SCHED 2
+#define CPUCLOCK_MAX 3
+#define CLOCKFD CPUCLOCK_MAX
+#define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK)
+
+#define MAKE_PROCESS_CPUCLOCK(pid, clock) \
+ ((~(clockid_t) (pid) << 3) | (clockid_t) (clock))
+#define MAKE_THREAD_CPUCLOCK(tid, clock) \
+ MAKE_PROCESS_CPUCLOCK((tid), (clock) | CPUCLOCK_PERTHREAD_MASK)
+
+#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD)
+#define CLOCKID_TO_FD(clk) ((unsigned int) ~((clk) >> 3))
+
+/* POSIX.1b interval timer structure. */
+struct k_itimer {
+ struct list_head list; /* free/ allocate list */
+ struct hlist_node t_hash;
+ spinlock_t it_lock;
+ clockid_t it_clock; /* which timer type */
+ timer_t it_id; /* timer id */
+ int it_overrun; /* overrun on pending signal */
+ int it_overrun_last; /* overrun on last delivered signal */
+ int it_requeue_pending; /* waiting to requeue this timer */
+#define REQUEUE_PENDING 1
+ int it_sigev_notify; /* notify word of sigevent struct */
+ struct signal_struct *it_signal;
+ union {
+ struct pid *it_pid; /* pid of process to send signal to */
+ struct task_struct *it_process; /* for clock_nanosleep */
+ };
+ struct sigqueue *sigq; /* signal queue entry. */
+ union {
+ struct {
+ struct hrtimer timer;
+ ktime_t interval;
+ } real;
+ struct cpu_timer_list cpu;
+ struct {
+ unsigned int clock;
+ unsigned int node;
+ unsigned long incr;
+ unsigned long expires;
+ } mmtimer;
+ struct {
+ struct alarm alarmtimer;
+ ktime_t interval;
+ } alarm;
+ struct rcu_head rcu;
+ } it;
+};
+
+struct k_clock {
+ int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
+ int (*clock_set) (const clockid_t which_clock,
+ const struct timespec *tp);
+ int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
+ int (*clock_adj) (const clockid_t which_clock, struct timex *tx);
+ int (*timer_create) (struct k_itimer *timer);
+ int (*nsleep) (const clockid_t which_clock, int flags,
+ struct timespec *, struct timespec __user *);
+ long (*nsleep_restart) (struct restart_block *restart_block);
+ int (*timer_set) (struct k_itimer * timr, int flags,
+ struct itimerspec * new_setting,
+ struct itimerspec * old_setting);
+ int (*timer_del) (struct k_itimer * timr);
+#define TIMER_RETRY 1
+ void (*timer_get) (struct k_itimer * timr,
+ struct itimerspec * cur_setting);
+};
+
+extern struct k_clock clock_posix_cpu;
+extern struct k_clock clock_posix_dynamic;
+
+void posix_timers_register_clock(const clockid_t clock_id, struct k_clock *new_clock);
+
+/* function to call to trigger timer event */
+int posix_timer_event(struct k_itimer *timr, int si_private);
+
+void posix_cpu_timer_schedule(struct k_itimer *timer);
+
+void run_posix_cpu_timers(struct task_struct *task);
+void posix_cpu_timers_exit(struct task_struct *task);
+void posix_cpu_timers_exit_group(struct task_struct *task);
+
+bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk);
+
+void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
+ cputime_t *newval, cputime_t *oldval);
+
+long clock_nanosleep_restart(struct restart_block *restart_block);
+
+void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);
+
+#endif
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
new file mode 100644
index 000000000..3e96a6a76
--- /dev/null
+++ b/include/linux/posix_acl.h
@@ -0,0 +1,144 @@
+/*
+ File: linux/posix_acl.h
+
+ (C) 2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
+*/
+
+
+#ifndef __LINUX_POSIX_ACL_H
+#define __LINUX_POSIX_ACL_H
+
+#include <linux/bug.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+
+#define ACL_UNDEFINED_ID (-1)
+
+/* a_type field in acl_user_posix_entry_t */
+#define ACL_TYPE_ACCESS (0x8000)
+#define ACL_TYPE_DEFAULT (0x4000)
+
+/* e_tag entry in struct posix_acl_entry */
+#define ACL_USER_OBJ (0x01)
+#define ACL_USER (0x02)
+#define ACL_GROUP_OBJ (0x04)
+#define ACL_GROUP (0x08)
+#define ACL_MASK (0x10)
+#define ACL_OTHER (0x20)
+
+/* permissions in the e_perm field */
+#define ACL_READ (0x04)
+#define ACL_WRITE (0x02)
+#define ACL_EXECUTE (0x01)
+//#define ACL_ADD (0x08)
+//#define ACL_DELETE (0x10)
+
+struct posix_acl_entry {
+ short e_tag;
+ unsigned short e_perm;
+ union {
+ kuid_t e_uid;
+ kgid_t e_gid;
+ };
+};
+
+struct posix_acl {
+ union {
+ atomic_t a_refcount;
+ struct rcu_head a_rcu;
+ };
+ unsigned int a_count;
+ struct posix_acl_entry a_entries[0];
+};
+
+#define FOREACH_ACL_ENTRY(pa, acl, pe) \
+ for(pa=(acl)->a_entries, pe=pa+(acl)->a_count; pa<pe; pa++)
+
+
+/*
+ * Duplicate an ACL handle.
+ */
+static inline struct posix_acl *
+posix_acl_dup(struct posix_acl *acl)
+{
+ if (acl)
+ atomic_inc(&acl->a_refcount);
+ return acl;
+}
+
+/*
+ * Free an ACL handle.
+ */
+static inline void
+posix_acl_release(struct posix_acl *acl)
+{
+ if (acl && atomic_dec_and_test(&acl->a_refcount))
+ kfree_rcu(acl, a_rcu);
+}
+
+
+/* posix_acl.c */
+
+extern void posix_acl_init(struct posix_acl *, int);
+extern struct posix_acl *posix_acl_alloc(int, gfp_t);
+extern int posix_acl_valid(const struct posix_acl *);
+extern int posix_acl_permission(struct inode *, const struct posix_acl *, int);
+extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t);
+extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *);
+extern int __posix_acl_create(struct posix_acl **, gfp_t, umode_t *);
+extern int __posix_acl_chmod(struct posix_acl **, gfp_t, umode_t);
+
+extern struct posix_acl *get_posix_acl(struct inode *, int);
+extern int set_posix_acl(struct inode *, int, struct posix_acl *);
+
+#ifdef CONFIG_FS_POSIX_ACL
+extern int posix_acl_chmod(struct inode *, umode_t);
+extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **,
+ struct posix_acl **);
+
+extern int simple_set_acl(struct inode *, struct posix_acl *, int);
+extern int simple_acl_create(struct inode *, struct inode *);
+
+struct posix_acl **acl_by_type(struct inode *inode, int type);
+struct posix_acl *get_cached_acl(struct inode *inode, int type);
+struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type);
+void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl);
+void forget_cached_acl(struct inode *inode, int type);
+void forget_all_cached_acls(struct inode *inode);
+
+static inline void cache_no_acl(struct inode *inode)
+{
+ inode->i_acl = NULL;
+ inode->i_default_acl = NULL;
+}
+#else
+static inline int posix_acl_chmod(struct inode *inode, umode_t mode)
+{
+ return 0;
+}
+
+#define simple_set_acl NULL
+
+static inline int simple_acl_create(struct inode *dir, struct inode *inode)
+{
+ return 0;
+}
+static inline void cache_no_acl(struct inode *inode)
+{
+}
+
+static inline int posix_acl_create(struct inode *inode, umode_t *mode,
+ struct posix_acl **default_acl, struct posix_acl **acl)
+{
+ *default_acl = *acl = NULL;
+ return 0;
+}
+
+static inline void forget_all_cached_acls(struct inode *inode)
+{
+}
+#endif /* CONFIG_FS_POSIX_ACL */
+
+struct posix_acl *get_acl(struct inode *inode, int type);
+
+#endif /* __LINUX_POSIX_ACL_H */
diff --git a/include/linux/posix_acl_xattr.h b/include/linux/posix_acl_xattr.h
new file mode 100644
index 000000000..6f14ee295
--- /dev/null
+++ b/include/linux/posix_acl_xattr.h
@@ -0,0 +1,75 @@
+/*
+ File: linux/posix_acl_xattr.h
+
+ Extended attribute system call representation of Access Control Lists.
+
+ Copyright (C) 2000 by Andreas Gruenbacher <a.gruenbacher@computer.org>
+ Copyright (C) 2002 SGI - Silicon Graphics, Inc <linux-xfs@oss.sgi.com>
+ */
+#ifndef _POSIX_ACL_XATTR_H
+#define _POSIX_ACL_XATTR_H
+
+#include <linux/posix_acl.h>
+
+/* Extended attribute names */
+#define POSIX_ACL_XATTR_ACCESS "system.posix_acl_access"
+#define POSIX_ACL_XATTR_DEFAULT "system.posix_acl_default"
+
+/* Supported ACL a_version fields */
+#define POSIX_ACL_XATTR_VERSION 0x0002
+
+
+/* An undefined entry e_id value */
+#define ACL_UNDEFINED_ID (-1)
+
+typedef struct {
+ __le16 e_tag;
+ __le16 e_perm;
+ __le32 e_id;
+} posix_acl_xattr_entry;
+
+typedef struct {
+ __le32 a_version;
+ posix_acl_xattr_entry a_entries[0];
+} posix_acl_xattr_header;
+
+
+static inline size_t
+posix_acl_xattr_size(int count)
+{
+ return (sizeof(posix_acl_xattr_header) +
+ (count * sizeof(posix_acl_xattr_entry)));
+}
+
+static inline int
+posix_acl_xattr_count(size_t size)
+{
+ if (size < sizeof(posix_acl_xattr_header))
+ return -1;
+ size -= sizeof(posix_acl_xattr_header);
+ if (size % sizeof(posix_acl_xattr_entry))
+ return -1;
+ return size / sizeof(posix_acl_xattr_entry);
+}
+
+#ifdef CONFIG_FS_POSIX_ACL
+void posix_acl_fix_xattr_from_user(void *value, size_t size);
+void posix_acl_fix_xattr_to_user(void *value, size_t size);
+#else
+static inline void posix_acl_fix_xattr_from_user(void *value, size_t size)
+{
+}
+static inline void posix_acl_fix_xattr_to_user(void *value, size_t size)
+{
+}
+#endif
+
+struct posix_acl *posix_acl_from_xattr(struct user_namespace *user_ns,
+ const void *value, size_t size);
+int posix_acl_to_xattr(struct user_namespace *user_ns,
+ const struct posix_acl *acl, void *buffer, size_t size);
+
+extern const struct xattr_handler posix_acl_access_xattr_handler;
+extern const struct xattr_handler posix_acl_default_xattr_handler;
+
+#endif /* _POSIX_ACL_XATTR_H */
diff --git a/include/linux/power/ab8500.h b/include/linux/power/ab8500.h
new file mode 100644
index 000000000..cdbb6c2a8
--- /dev/null
+++ b/include/linux/power/ab8500.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) ST-Ericsson 2013
+ * Author: Hongbo Zhang <hongbo.zhang@linaro.com>
+ * License terms: GNU General Public License v2
+ */
+
+#ifndef PWR_AB8500_H
+#define PWR_AB8500_H
+
+extern const struct abx500_res_to_temp ab8500_temp_tbl_a_thermistor[];
+extern const int ab8500_temp_tbl_a_size;
+
+extern const struct abx500_res_to_temp ab8500_temp_tbl_b_thermistor[];
+extern const int ab8500_temp_tbl_b_size;
+
+#endif /* PWR_AB8500_H */
diff --git a/include/linux/power/bq2415x_charger.h b/include/linux/power/bq2415x_charger.h
new file mode 100644
index 000000000..50762af8b
--- /dev/null
+++ b/include/linux/power/bq2415x_charger.h
@@ -0,0 +1,58 @@
+/*
+ * bq2415x charger driver
+ *
+ * Copyright (C) 2011-2013 Pali Rohár <pali.rohar@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef BQ2415X_CHARGER_H
+#define BQ2415X_CHARGER_H
+
+/*
+ * This is platform data for bq2415x chip. It contains default board
+ * voltages and currents which can be also later configured via sysfs. If
+ * value is -1 then default chip value (specified in datasheet) will be
+ * used.
+ *
+ * Value resistor_sense is needed for for configuring charge and
+ * termination current. It it is less or equal to zero, configuring charge
+ * and termination current will not be possible.
+ *
+ * For automode support is needed to provide name of power supply device
+ * in value notify_device. Device driver must immediately report property
+ * POWER_SUPPLY_PROP_CURRENT_MAX when current changed.
+ */
+
+/* Supported modes with maximal current limit */
+enum bq2415x_mode {
+ BQ2415X_MODE_OFF, /* offline mode (charger disabled) */
+ BQ2415X_MODE_NONE, /* unknown charger (100mA) */
+ BQ2415X_MODE_HOST_CHARGER, /* usb host/hub charger (500mA) */
+ BQ2415X_MODE_DEDICATED_CHARGER, /* dedicated charger (unlimited) */
+ BQ2415X_MODE_BOOST, /* boost mode (charging disabled) */
+};
+
+struct bq2415x_platform_data {
+ int current_limit; /* mA */
+ int weak_battery_voltage; /* mV */
+ int battery_regulation_voltage; /* mV */
+ int charge_current; /* mA */
+ int termination_current; /* mA */
+ int resistor_sense; /* m ohm */
+ const char *notify_device; /* name */
+};
+
+#endif
diff --git a/include/linux/power/bq24190_charger.h b/include/linux/power/bq24190_charger.h
new file mode 100644
index 000000000..9f0283721
--- /dev/null
+++ b/include/linux/power/bq24190_charger.h
@@ -0,0 +1,16 @@
+/*
+ * Platform data for the TI bq24190 battery charger driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _BQ24190_CHARGER_H_
+#define _BQ24190_CHARGER_H_
+
+struct bq24190_platform_data {
+ unsigned int gpio_int; /* GPIO pin that's connected to INT# */
+};
+
+#endif
diff --git a/include/linux/power/bq24735-charger.h b/include/linux/power/bq24735-charger.h
new file mode 100644
index 000000000..f536164a6
--- /dev/null
+++ b/include/linux/power/bq24735-charger.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __CHARGER_BQ24735_H_
+#define __CHARGER_BQ24735_H_
+
+#include <linux/types.h>
+#include <linux/power_supply.h>
+
+struct bq24735_platform {
+ uint32_t charge_current;
+ uint32_t charge_voltage;
+ uint32_t input_current;
+
+ const char *name;
+
+ int status_gpio;
+ int status_gpio_active_low;
+ bool status_gpio_valid;
+
+ char **supplied_to;
+ size_t num_supplicants;
+};
+
+#endif /* __CHARGER_BQ24735_H_ */
diff --git a/include/linux/power/bq27x00_battery.h b/include/linux/power/bq27x00_battery.h
new file mode 100644
index 000000000..a857f719b
--- /dev/null
+++ b/include/linux/power/bq27x00_battery.h
@@ -0,0 +1,19 @@
+#ifndef __LINUX_BQ27X00_BATTERY_H__
+#define __LINUX_BQ27X00_BATTERY_H__
+
+/**
+ * struct bq27000_plaform_data - Platform data for bq27000 devices
+ * @name: Name of the battery. If NULL the driver will fallback to "bq27000".
+ * @read: HDQ read callback.
+ * This function should provide access to the HDQ bus the battery is
+ * connected to.
+ * The first parameter is a pointer to the battery device, the second the
+ * register to be read. The return value should either be the content of
+ * the passed register or an error value.
+ */
+struct bq27000_platform_data {
+ const char *name;
+ int (*read)(struct device *dev, unsigned int);
+};
+
+#endif
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
new file mode 100644
index 000000000..eadf28cb2
--- /dev/null
+++ b/include/linux/power/charger-manager.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * MyungJoo.Ham <myungjoo.ham@samsung.com>
+ *
+ * Charger Manager.
+ * This framework enables to control and multiple chargers and to
+ * monitor charging even in the context of suspend-to-RAM with
+ * an interface combining the chargers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+**/
+
+#ifndef _CHARGER_MANAGER_H
+#define _CHARGER_MANAGER_H
+
+#include <linux/power_supply.h>
+#include <linux/extcon.h>
+#include <linux/alarmtimer.h>
+
+enum data_source {
+ CM_BATTERY_PRESENT,
+ CM_NO_BATTERY,
+ CM_FUEL_GAUGE,
+ CM_CHARGER_STAT,
+};
+
+enum polling_modes {
+ CM_POLL_DISABLE = 0,
+ CM_POLL_ALWAYS,
+ CM_POLL_EXTERNAL_POWER_ONLY,
+ CM_POLL_CHARGING_ONLY,
+};
+
+enum cm_event_types {
+ CM_EVENT_UNKNOWN = 0,
+ CM_EVENT_BATT_FULL,
+ CM_EVENT_BATT_IN,
+ CM_EVENT_BATT_OUT,
+ CM_EVENT_BATT_OVERHEAT,
+ CM_EVENT_BATT_COLD,
+ CM_EVENT_EXT_PWR_IN_OUT,
+ CM_EVENT_CHG_START_STOP,
+ CM_EVENT_OTHERS,
+};
+
+/**
+ * struct charger_cable
+ * @extcon_name: the name of extcon device.
+ * @name: the name of charger cable(external connector).
+ * @extcon_dev: the extcon device.
+ * @wq: the workqueue to control charger according to the state of
+ * charger cable. If charger cable is attached, enable charger.
+ * But if charger cable is detached, disable charger.
+ * @nb: the notifier block to receive changed state from EXTCON
+ * (External Connector) when charger cable is attached/detached.
+ * @attached: the state of charger cable.
+ * true: the charger cable is attached
+ * false: the charger cable is detached
+ * @charger: the instance of struct charger_regulator.
+ * @cm: the Charger Manager representing the battery.
+ */
+struct charger_cable {
+ const char *extcon_name;
+ const char *name;
+
+ /* The charger-manager use Exton framework*/
+ struct extcon_specific_cable_nb extcon_dev;
+ struct work_struct wq;
+ struct notifier_block nb;
+
+ /* The state of charger cable */
+ bool attached;
+
+ struct charger_regulator *charger;
+
+ /*
+ * Set min/max current of regulator to protect over-current issue
+ * according to a kind of charger cable when cable is attached.
+ */
+ int min_uA;
+ int max_uA;
+
+ struct charger_manager *cm;
+};
+
+/**
+ * struct charger_regulator
+ * @regulator_name: the name of regulator for using charger.
+ * @consumer: the regulator consumer for the charger.
+ * @externally_control:
+ * Set if the charger-manager cannot control charger,
+ * the charger will be maintained with disabled state.
+ * @cables:
+ * the array of charger cables to enable/disable charger
+ * and set current limit according to constratint data of
+ * struct charger_cable if only charger cable included
+ * in the array of charger cables is attached/detached.
+ * @num_cables: the number of charger cables.
+ * @attr_g: Attribute group for the charger(regulator)
+ * @attr_name: "name" sysfs entry
+ * @attr_state: "state" sysfs entry
+ * @attr_externally_control: "externally_control" sysfs entry
+ * @attrs: Arrays pointing to attr_name/state/externally_control for attr_g
+ */
+struct charger_regulator {
+ /* The name of regulator for charging */
+ const char *regulator_name;
+ struct regulator *consumer;
+
+ /* charger never on when system is on */
+ int externally_control;
+
+ /*
+ * Store constraint information related to current limit,
+ * each cable have different condition for charging.
+ */
+ struct charger_cable *cables;
+ int num_cables;
+
+ struct attribute_group attr_g;
+ struct device_attribute attr_name;
+ struct device_attribute attr_state;
+ struct device_attribute attr_externally_control;
+ struct attribute *attrs[4];
+
+ struct charger_manager *cm;
+};
+
+/**
+ * struct charger_desc
+ * @psy_name: the name of power-supply-class for charger manager
+ * @polling_mode:
+ * Determine which polling mode will be used
+ * @fullbatt_vchkdrop_ms:
+ * @fullbatt_vchkdrop_uV:
+ * Check voltage drop after the battery is fully charged.
+ * If it has dropped more than fullbatt_vchkdrop_uV after
+ * fullbatt_vchkdrop_ms, CM will restart charging.
+ * @fullbatt_uV: voltage in microvolt
+ * If VBATT >= fullbatt_uV, it is assumed to be full.
+ * @fullbatt_soc: state of Charge in %
+ * If state of Charge >= fullbatt_soc, it is assumed to be full.
+ * @fullbatt_full_capacity: full capacity measure
+ * If full capacity of battery >= fullbatt_full_capacity,
+ * it is assumed to be full.
+ * @polling_interval_ms: interval in millisecond at which
+ * charger manager will monitor battery health
+ * @battery_present:
+ * Specify where information for existance of battery can be obtained
+ * @psy_charger_stat: the names of power-supply for chargers
+ * @num_charger_regulator: the number of entries in charger_regulators
+ * @charger_regulators: array of charger regulators
+ * @psy_fuel_gauge: the name of power-supply for fuel gauge
+ * @thermal_zone : the name of thermal zone for battery
+ * @temp_min : Minimum battery temperature for charging.
+ * @temp_max : Maximum battery temperature for charging.
+ * @temp_diff : Temperature diffential to restart charging.
+ * @measure_battery_temp:
+ * true: measure battery temperature
+ * false: measure ambient temperature
+ * @charging_max_duration_ms: Maximum possible duration for charging
+ * If whole charging duration exceed 'charging_max_duration_ms',
+ * cm stop charging.
+ * @discharging_max_duration_ms:
+ * Maximum possible duration for discharging with charger cable
+ * after full-batt. If discharging duration exceed 'discharging
+ * max_duration_ms', cm start charging.
+ */
+struct charger_desc {
+ const char *psy_name;
+
+ enum polling_modes polling_mode;
+ unsigned int polling_interval_ms;
+
+ unsigned int fullbatt_vchkdrop_ms;
+ unsigned int fullbatt_vchkdrop_uV;
+ unsigned int fullbatt_uV;
+ unsigned int fullbatt_soc;
+ unsigned int fullbatt_full_capacity;
+
+ enum data_source battery_present;
+
+ const char **psy_charger_stat;
+
+ int num_charger_regulators;
+ struct charger_regulator *charger_regulators;
+
+ const char *psy_fuel_gauge;
+
+ const char *thermal_zone;
+
+ int temp_min;
+ int temp_max;
+ int temp_diff;
+
+ bool measure_battery_temp;
+
+ u32 charging_max_duration_ms;
+ u32 discharging_max_duration_ms;
+};
+
+#define PSY_NAME_MAX 30
+
+/**
+ * struct charger_manager
+ * @entry: entry for list
+ * @dev: device pointer
+ * @desc: instance of charger_desc
+ * @fuel_gauge: power_supply for fuel gauge
+ * @charger_stat: array of power_supply for chargers
+ * @tzd_batt : thermal zone device for battery
+ * @charger_enabled: the state of charger
+ * @fullbatt_vchk_jiffies_at:
+ * jiffies at the time full battery check will occur.
+ * @fullbatt_vchk_work: work queue for full battery check
+ * @emergency_stop:
+ * When setting true, stop charging
+ * @psy_name_buf: the name of power-supply-class for charger manager
+ * @charger_psy: power_supply for charger manager
+ * @status_save_ext_pwr_inserted:
+ * saved status of external power before entering suspend-to-RAM
+ * @status_save_batt:
+ * saved status of battery before entering suspend-to-RAM
+ * @charging_start_time: saved start time of enabling charging
+ * @charging_end_time: saved end time of disabling charging
+ */
+struct charger_manager {
+ struct list_head entry;
+ struct device *dev;
+ struct charger_desc *desc;
+
+#ifdef CONFIG_THERMAL
+ struct thermal_zone_device *tzd_batt;
+#endif
+ bool charger_enabled;
+
+ unsigned long fullbatt_vchk_jiffies_at;
+ struct delayed_work fullbatt_vchk_work;
+
+ int emergency_stop;
+
+ char psy_name_buf[PSY_NAME_MAX + 1];
+ struct power_supply_desc charger_psy_desc;
+ struct power_supply *charger_psy;
+
+ u64 charging_start_time;
+ u64 charging_end_time;
+};
+
+#ifdef CONFIG_CHARGER_MANAGER
+extern void cm_notify_event(struct power_supply *psy,
+ enum cm_event_types type, char *msg);
+#else
+static inline void cm_notify_event(struct power_supply *psy,
+ enum cm_event_types type, char *msg) { }
+#endif
+#endif /* _CHARGER_MANAGER_H */
diff --git a/include/linux/power/generic-adc-battery.h b/include/linux/power/generic-adc-battery.h
new file mode 100644
index 000000000..b1ebe0853
--- /dev/null
+++ b/include/linux/power/generic-adc-battery.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2012, Anish Kumar <anish198519851985@gmail.com>
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef GENERIC_ADC_BATTERY_H
+#define GENERIC_ADC_BATTERY_H
+
+/**
+ * struct gab_platform_data - platform_data for generic adc iio battery driver.
+ * @battery_info: recommended structure to specify static power supply
+ * parameters
+ * @cal_charge: calculate charge level.
+ * @gpio_charge_finished: gpio for the charger.
+ * @gpio_inverted: Should be 1 if the GPIO is active low otherwise 0
+ * @jitter_delay: delay required after the interrupt to check battery
+ * status.Default set is 10ms.
+ */
+struct gab_platform_data {
+ struct power_supply_info battery_info;
+ int (*cal_charge)(long value);
+ int gpio_charge_finished;
+ bool gpio_inverted;
+ int jitter_delay;
+};
+
+#endif /* GENERIC_ADC_BATTERY_H */
diff --git a/include/linux/power/gpio-charger.h b/include/linux/power/gpio-charger.h
new file mode 100644
index 000000000..de1dfe09a
--- /dev/null
+++ b/include/linux/power/gpio-charger.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __LINUX_POWER_GPIO_CHARGER_H__
+#define __LINUX_POWER_GPIO_CHARGER_H__
+
+#include <linux/power_supply.h>
+#include <linux/types.h>
+
+/**
+ * struct gpio_charger_platform_data - platform_data for gpio_charger devices
+ * @name: Name for the chargers power_supply device
+ * @type: Type of the charger
+ * @gpio: GPIO which is used to indicate the chargers status
+ * @gpio_active_low: Should be set to 1 if the GPIO is active low otherwise 0
+ * @supplied_to: Array of battery names to which this chargers supplies power
+ * @num_supplicants: Number of entries in the supplied_to array
+ */
+struct gpio_charger_platform_data {
+ const char *name;
+ enum power_supply_type type;
+
+ int gpio;
+ int gpio_active_low;
+
+ char **supplied_to;
+ size_t num_supplicants;
+};
+
+#endif
diff --git a/include/linux/power/isp1704_charger.h b/include/linux/power/isp1704_charger.h
new file mode 100644
index 000000000..0105d9e7a
--- /dev/null
+++ b/include/linux/power/isp1704_charger.h
@@ -0,0 +1,30 @@
+/*
+ * ISP1704 USB Charger Detection driver
+ *
+ * Copyright (C) 2011 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#ifndef __ISP1704_CHARGER_H
+#define __ISP1704_CHARGER_H
+
+struct isp1704_charger_data {
+ void (*set_power)(bool on);
+ int enable_gpio;
+};
+
+#endif
diff --git a/include/linux/power/jz4740-battery.h b/include/linux/power/jz4740-battery.h
new file mode 100644
index 000000000..19c9610c7
--- /dev/null
+++ b/include/linux/power/jz4740-battery.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2009, Jiejing Zhang <kzjeef@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __JZ4740_BATTERY_H
+#define __JZ4740_BATTERY_H
+
+struct jz_battery_platform_data {
+ struct power_supply_info info;
+ int gpio_charge; /* GPIO port of Charger state */
+ int gpio_charge_active_low;
+};
+
+#endif
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
new file mode 100644
index 000000000..cf112b407
--- /dev/null
+++ b/include/linux/power/max17042_battery.h
@@ -0,0 +1,220 @@
+/*
+ * Fuel gauge driver for Maxim 17042 / 8966 / 8997
+ * Note that Maxim 8966 and 8997 are mfd and this is its subdevice.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __MAX17042_BATTERY_H_
+#define __MAX17042_BATTERY_H_
+
+#define MAX17042_STATUS_BattAbsent (1 << 3)
+#define MAX17042_BATTERY_FULL (100)
+#define MAX17042_DEFAULT_SNS_RESISTOR (10000)
+
+#define MAX17042_CHARACTERIZATION_DATA_SIZE 48
+
+enum max17042_register {
+ MAX17042_STATUS = 0x00,
+ MAX17042_VALRT_Th = 0x01,
+ MAX17042_TALRT_Th = 0x02,
+ MAX17042_SALRT_Th = 0x03,
+ MAX17042_AtRate = 0x04,
+ MAX17042_RepCap = 0x05,
+ MAX17042_RepSOC = 0x06,
+ MAX17042_Age = 0x07,
+ MAX17042_TEMP = 0x08,
+ MAX17042_VCELL = 0x09,
+ MAX17042_Current = 0x0A,
+ MAX17042_AvgCurrent = 0x0B,
+
+ MAX17042_SOC = 0x0D,
+ MAX17042_AvSOC = 0x0E,
+ MAX17042_RemCap = 0x0F,
+ MAX17042_FullCAP = 0x10,
+ MAX17042_TTE = 0x11,
+ MAX17042_V_empty = 0x12,
+
+ MAX17042_RSLOW = 0x14,
+
+ MAX17042_AvgTA = 0x16,
+ MAX17042_Cycles = 0x17,
+ MAX17042_DesignCap = 0x18,
+ MAX17042_AvgVCELL = 0x19,
+ MAX17042_MinMaxTemp = 0x1A,
+ MAX17042_MinMaxVolt = 0x1B,
+ MAX17042_MinMaxCurr = 0x1C,
+ MAX17042_CONFIG = 0x1D,
+ MAX17042_ICHGTerm = 0x1E,
+ MAX17042_AvCap = 0x1F,
+ MAX17042_ManName = 0x20,
+ MAX17042_DevName = 0x21,
+
+ MAX17042_FullCAPNom = 0x23,
+ MAX17042_TempNom = 0x24,
+ MAX17042_TempLim = 0x25,
+ MAX17042_TempHot = 0x26,
+ MAX17042_AIN = 0x27,
+ MAX17042_LearnCFG = 0x28,
+ MAX17042_FilterCFG = 0x29,
+ MAX17042_RelaxCFG = 0x2A,
+ MAX17042_MiscCFG = 0x2B,
+ MAX17042_TGAIN = 0x2C,
+ MAx17042_TOFF = 0x2D,
+ MAX17042_CGAIN = 0x2E,
+ MAX17042_COFF = 0x2F,
+
+ MAX17042_MaskSOC = 0x32,
+ MAX17042_SOC_empty = 0x33,
+ MAX17042_T_empty = 0x34,
+
+ MAX17042_FullCAP0 = 0x35,
+ MAX17042_LAvg_empty = 0x36,
+ MAX17042_FCTC = 0x37,
+ MAX17042_RCOMP0 = 0x38,
+ MAX17042_TempCo = 0x39,
+ MAX17042_EmptyTempCo = 0x3A,
+ MAX17042_K_empty0 = 0x3B,
+ MAX17042_TaskPeriod = 0x3C,
+ MAX17042_FSTAT = 0x3D,
+
+ MAX17042_SHDNTIMER = 0x3F,
+
+ MAX17042_dQacc = 0x45,
+ MAX17042_dPacc = 0x46,
+
+ MAX17042_VFSOC0 = 0x48,
+
+ MAX17042_QH = 0x4D,
+ MAX17042_QL = 0x4E,
+
+ MAX17042_VFSOC0Enable = 0x60,
+ MAX17042_MLOCKReg1 = 0x62,
+ MAX17042_MLOCKReg2 = 0x63,
+
+ MAX17042_MODELChrTbl = 0x80,
+
+ MAX17042_OCV = 0xEE,
+
+ MAX17042_OCVInternal = 0xFB,
+
+ MAX17042_VFSOC = 0xFF,
+};
+
+/* Registers specific to max17047/50 */
+enum max17047_register {
+ MAX17047_QRTbl00 = 0x12,
+ MAX17047_FullSOCThr = 0x13,
+ MAX17047_QRTbl10 = 0x22,
+ MAX17047_QRTbl20 = 0x32,
+ MAX17047_V_empty = 0x3A,
+ MAX17047_QRTbl30 = 0x42,
+};
+
+enum max170xx_chip_type {
+ MAXIM_DEVICE_TYPE_UNKNOWN = 0,
+ MAXIM_DEVICE_TYPE_MAX17042,
+ MAXIM_DEVICE_TYPE_MAX17047,
+ MAXIM_DEVICE_TYPE_MAX17050,
+
+ MAXIM_DEVICE_TYPE_NUM
+};
+
+/*
+ * used for setting a register to a desired value
+ * addr : address for a register
+ * data : setting value for the register
+ */
+struct max17042_reg_data {
+ u8 addr;
+ u16 data;
+};
+
+struct max17042_config_data {
+ /* External current sense resistor value in milli-ohms */
+ u32 cur_sense_val;
+
+ /* A/D measurement */
+ u16 tgain; /* 0x2C */
+ u16 toff; /* 0x2D */
+ u16 cgain; /* 0x2E */
+ u16 coff; /* 0x2F */
+
+ /* Alert / Status */
+ u16 valrt_thresh; /* 0x01 */
+ u16 talrt_thresh; /* 0x02 */
+ u16 soc_alrt_thresh; /* 0x03 */
+ u16 config; /* 0x01D */
+ u16 shdntimer; /* 0x03F */
+
+ /* App data */
+ u16 full_soc_thresh; /* 0x13 */
+ u16 design_cap; /* 0x18 */
+ u16 ichgt_term; /* 0x1E */
+
+ /* MG3 config */
+ u16 at_rate; /* 0x04 */
+ u16 learn_cfg; /* 0x28 */
+ u16 filter_cfg; /* 0x29 */
+ u16 relax_cfg; /* 0x2A */
+ u16 misc_cfg; /* 0x2B */
+ u16 masksoc; /* 0x32 */
+
+ /* MG3 save and restore */
+ u16 fullcap; /* 0x10 */
+ u16 fullcapnom; /* 0x23 */
+ u16 socempty; /* 0x33 */
+ u16 lavg_empty; /* 0x36 */
+ u16 dqacc; /* 0x45 */
+ u16 dpacc; /* 0x46 */
+ u16 qrtbl00; /* 0x12 */
+ u16 qrtbl10; /* 0x22 */
+ u16 qrtbl20; /* 0x32 */
+ u16 qrtbl30; /* 0x42 */
+
+ /* Cell technology from power_supply.h */
+ u16 cell_technology;
+
+ /* Cell Data */
+ u16 vempty; /* 0x12 */
+ u16 temp_nom; /* 0x24 */
+ u16 temp_lim; /* 0x25 */
+ u16 fctc; /* 0x37 */
+ u16 rcomp0; /* 0x38 */
+ u16 tcompc0; /* 0x39 */
+ u16 empty_tempco; /* 0x3A */
+ u16 kempty0; /* 0x3B */
+ u16 cell_char_tbl[MAX17042_CHARACTERIZATION_DATA_SIZE];
+} __packed;
+
+struct max17042_platform_data {
+ struct max17042_reg_data *init_data;
+ struct max17042_config_data *config_data;
+ int num_init_data; /* Number of enties in init_data array */
+ bool enable_current_sense;
+ bool enable_por_init; /* Use POR init from Maxim appnote */
+
+ /*
+ * R_sns in micro-ohms.
+ * default 10000 (if r_sns = 0) as it is the recommended value by
+ * the datasheet although it can be changed by board designers.
+ */
+ unsigned int r_sns;
+};
+
+#endif /* __MAX17042_BATTERY_H_ */
diff --git a/include/linux/power/max8903_charger.h b/include/linux/power/max8903_charger.h
new file mode 100644
index 000000000..24f51db8a
--- /dev/null
+++ b/include/linux/power/max8903_charger.h
@@ -0,0 +1,57 @@
+/*
+ * max8903_charger.h - Maxim 8903 USB/Adapter Charger Driver
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __MAX8903_CHARGER_H__
+#define __MAX8903_CHARGER_H__
+
+struct max8903_pdata {
+ /*
+ * GPIOs
+ * cen, chg, flt, and usus are optional.
+ * dok, dcm, and uok are not optional depending on the status of
+ * dc_valid and usb_valid.
+ */
+ int cen; /* Charger Enable input */
+ int dok; /* DC(Adapter) Power OK output */
+ int uok; /* USB Power OK output */
+ int chg; /* Charger status output */
+ int flt; /* Fault output */
+ int dcm; /* Current-Limit Mode input (1: DC, 2: USB) */
+ int usus; /* USB Suspend Input (1: suspended) */
+
+ /*
+ * DC(Adapter/TA) is wired
+ * When dc_valid is true,
+ * dok and dcm should be valid.
+ *
+ * At least one of dc_valid or usb_valid should be true.
+ */
+ bool dc_valid;
+ /*
+ * USB is wired
+ * When usb_valid is true,
+ * uok should be valid.
+ */
+ bool usb_valid;
+};
+
+#endif /* __MAX8903_CHARGER_H__ */
diff --git a/include/linux/power/sbs-battery.h b/include/linux/power/sbs-battery.h
new file mode 100644
index 000000000..2b0a9d9ff
--- /dev/null
+++ b/include/linux/power/sbs-battery.h
@@ -0,0 +1,42 @@
+/*
+ * Gas Gauge driver for SBS Compliant Gas Gauges
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __LINUX_POWER_SBS_BATTERY_H_
+#define __LINUX_POWER_SBS_BATTERY_H_
+
+#include <linux/power_supply.h>
+#include <linux/types.h>
+
+/**
+ * struct sbs_platform_data - platform data for sbs devices
+ * @battery_detect: GPIO which is used to detect battery presence
+ * @battery_detect_present: gpio state when battery is present (0 / 1)
+ * @i2c_retry_count: # of times to retry on i2c IO failure
+ * @poll_retry_count: # of times to retry looking for new status after
+ * external change notification
+ */
+struct sbs_platform_data {
+ int battery_detect;
+ int battery_detect_present;
+ int i2c_retry_count;
+ int poll_retry_count;
+};
+
+#endif
diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
new file mode 100644
index 000000000..d8b187c39
--- /dev/null
+++ b/include/linux/power/smartreflex.h
@@ -0,0 +1,318 @@
+/*
+ * OMAP Smartreflex Defines and Routines
+ *
+ * Author: Thara Gopinath <thara@ti.com>
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ * Thara Gopinath <thara@ti.com>
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Kalle Jokiniemi
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ * Lesly A M <x0080970@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __POWER_SMARTREFLEX_H
+#define __POWER_SMARTREFLEX_H
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/platform_data/voltage-omap.h>
+
+/*
+ * Different Smartreflex IPs version. The v1 is the 65nm version used in
+ * OMAP3430. The v2 is the update for the 45nm version of the IP
+ * used in OMAP3630 and OMAP4430
+ */
+#define SR_TYPE_V1 1
+#define SR_TYPE_V2 2
+
+/* SMART REFLEX REG ADDRESS OFFSET */
+#define SRCONFIG 0x00
+#define SRSTATUS 0x04
+#define SENVAL 0x08
+#define SENMIN 0x0C
+#define SENMAX 0x10
+#define SENAVG 0x14
+#define AVGWEIGHT 0x18
+#define NVALUERECIPROCAL 0x1c
+#define SENERROR_V1 0x20
+#define ERRCONFIG_V1 0x24
+#define IRQ_EOI 0x20
+#define IRQSTATUS_RAW 0x24
+#define IRQSTATUS 0x28
+#define IRQENABLE_SET 0x2C
+#define IRQENABLE_CLR 0x30
+#define SENERROR_V2 0x34
+#define ERRCONFIG_V2 0x38
+
+/* Bit/Shift Positions */
+
+/* SRCONFIG */
+#define SRCONFIG_ACCUMDATA_SHIFT 22
+#define SRCONFIG_SRCLKLENGTH_SHIFT 12
+#define SRCONFIG_SENNENABLE_V1_SHIFT 5
+#define SRCONFIG_SENPENABLE_V1_SHIFT 3
+#define SRCONFIG_SENNENABLE_V2_SHIFT 1
+#define SRCONFIG_SENPENABLE_V2_SHIFT 0
+#define SRCONFIG_CLKCTRL_SHIFT 0
+
+#define SRCONFIG_ACCUMDATA_MASK (0x3ff << 22)
+
+#define SRCONFIG_SRENABLE BIT(11)
+#define SRCONFIG_SENENABLE BIT(10)
+#define SRCONFIG_ERRGEN_EN BIT(9)
+#define SRCONFIG_MINMAXAVG_EN BIT(8)
+#define SRCONFIG_DELAYCTRL BIT(2)
+
+/* AVGWEIGHT */
+#define AVGWEIGHT_SENPAVGWEIGHT_SHIFT 2
+#define AVGWEIGHT_SENNAVGWEIGHT_SHIFT 0
+
+/* NVALUERECIPROCAL */
+#define NVALUERECIPROCAL_SENPGAIN_SHIFT 20
+#define NVALUERECIPROCAL_SENNGAIN_SHIFT 16
+#define NVALUERECIPROCAL_RNSENP_SHIFT 8
+#define NVALUERECIPROCAL_RNSENN_SHIFT 0
+
+/* ERRCONFIG */
+#define ERRCONFIG_ERRWEIGHT_SHIFT 16
+#define ERRCONFIG_ERRMAXLIMIT_SHIFT 8
+#define ERRCONFIG_ERRMINLIMIT_SHIFT 0
+
+#define SR_ERRWEIGHT_MASK (0x07 << 16)
+#define SR_ERRMAXLIMIT_MASK (0xff << 8)
+#define SR_ERRMINLIMIT_MASK (0xff << 0)
+
+#define ERRCONFIG_VPBOUNDINTEN_V1 BIT(31)
+#define ERRCONFIG_VPBOUNDINTST_V1 BIT(30)
+#define ERRCONFIG_MCUACCUMINTEN BIT(29)
+#define ERRCONFIG_MCUACCUMINTST BIT(28)
+#define ERRCONFIG_MCUVALIDINTEN BIT(27)
+#define ERRCONFIG_MCUVALIDINTST BIT(26)
+#define ERRCONFIG_MCUBOUNDINTEN BIT(25)
+#define ERRCONFIG_MCUBOUNDINTST BIT(24)
+#define ERRCONFIG_MCUDISACKINTEN BIT(23)
+#define ERRCONFIG_VPBOUNDINTST_V2 BIT(23)
+#define ERRCONFIG_MCUDISACKINTST BIT(22)
+#define ERRCONFIG_VPBOUNDINTEN_V2 BIT(22)
+
+#define ERRCONFIG_STATUS_V1_MASK (ERRCONFIG_VPBOUNDINTST_V1 | \
+ ERRCONFIG_MCUACCUMINTST | \
+ ERRCONFIG_MCUVALIDINTST | \
+ ERRCONFIG_MCUBOUNDINTST | \
+ ERRCONFIG_MCUDISACKINTST)
+/* IRQSTATUS */
+#define IRQSTATUS_MCUACCUMINT BIT(3)
+#define IRQSTATUS_MCVALIDINT BIT(2)
+#define IRQSTATUS_MCBOUNDSINT BIT(1)
+#define IRQSTATUS_MCUDISABLEACKINT BIT(0)
+
+/* IRQENABLE_SET and IRQENABLE_CLEAR */
+#define IRQENABLE_MCUACCUMINT BIT(3)
+#define IRQENABLE_MCUVALIDINT BIT(2)
+#define IRQENABLE_MCUBOUNDSINT BIT(1)
+#define IRQENABLE_MCUDISABLEACKINT BIT(0)
+
+/* Common Bit values */
+
+#define SRCLKLENGTH_12MHZ_SYSCLK 0x3c
+#define SRCLKLENGTH_13MHZ_SYSCLK 0x41
+#define SRCLKLENGTH_19MHZ_SYSCLK 0x60
+#define SRCLKLENGTH_26MHZ_SYSCLK 0x82
+#define SRCLKLENGTH_38MHZ_SYSCLK 0xC0
+
+/*
+ * 3430 specific values. Maybe these should be passed from board file or
+ * pmic structures.
+ */
+#define OMAP3430_SR_ACCUMDATA 0x1f4
+
+#define OMAP3430_SR1_SENPAVGWEIGHT 0x03
+#define OMAP3430_SR1_SENNAVGWEIGHT 0x03
+
+#define OMAP3430_SR2_SENPAVGWEIGHT 0x01
+#define OMAP3430_SR2_SENNAVGWEIGHT 0x01
+
+#define OMAP3430_SR_ERRWEIGHT 0x04
+#define OMAP3430_SR_ERRMAXLIMIT 0x02
+
+struct omap_sr {
+ char *name;
+ struct list_head node;
+ struct platform_device *pdev;
+ struct omap_sr_nvalue_table *nvalue_table;
+ struct voltagedomain *voltdm;
+ struct dentry *dbg_dir;
+ unsigned int irq;
+ int srid;
+ int ip_type;
+ int nvalue_count;
+ bool autocomp_active;
+ u32 clk_length;
+ u32 err_weight;
+ u32 err_minlimit;
+ u32 err_maxlimit;
+ u32 accum_data;
+ u32 senn_avgweight;
+ u32 senp_avgweight;
+ u32 senp_mod;
+ u32 senn_mod;
+ void __iomem *base;
+};
+
+/**
+ * test_cond_timeout - busy-loop, testing a condition
+ * @cond: condition to test until it evaluates to true
+ * @timeout: maximum number of microseconds in the timeout
+ * @index: loop index (integer)
+ *
+ * Loop waiting for @cond to become true or until at least @timeout
+ * microseconds have passed. To use, define some integer @index in the
+ * calling code. After running, if @index == @timeout, then the loop has
+ * timed out.
+ *
+ * Copied from omap_test_timeout */
+#define sr_test_cond_timeout(cond, timeout, index) \
+({ \
+ for (index = 0; index < timeout; index++) { \
+ if (cond) \
+ break; \
+ udelay(1); \
+ } \
+})
+
+/**
+ * struct omap_sr_pmic_data - Strucutre to be populated by pmic code to pass
+ * pmic specific info to smartreflex driver
+ *
+ * @sr_pmic_init: API to initialize smartreflex on the PMIC side.
+ */
+struct omap_sr_pmic_data {
+ void (*sr_pmic_init) (void);
+};
+
+/**
+ * struct omap_smartreflex_dev_attr - Smartreflex Device attribute.
+ *
+ * @sensor_voltdm_name: Name of voltdomain of SR instance
+ */
+struct omap_smartreflex_dev_attr {
+ const char *sensor_voltdm_name;
+};
+
+#ifdef CONFIG_POWER_AVS_OMAP
+/*
+ * The smart reflex driver supports CLASS1 CLASS2 and CLASS3 SR.
+ * The smartreflex class driver should pass the class type.
+ * Should be used to populate the class_type field of the
+ * omap_smartreflex_class_data structure.
+ */
+#define SR_CLASS1 0x1
+#define SR_CLASS2 0x2
+#define SR_CLASS3 0x3
+
+/**
+ * struct omap_sr_class_data - Smartreflex class driver info
+ *
+ * @enable: API to enable a particular class smaartreflex.
+ * @disable: API to disable a particular class smartreflex.
+ * @configure: API to configure a particular class smartreflex.
+ * @notify: API to notify the class driver about an event in SR.
+ * Not needed for class3.
+ * @notify_flags: specify the events to be notified to the class driver
+ * @class_type: specify which smartreflex class.
+ * Can be used by the SR driver to take any class
+ * based decisions.
+ */
+struct omap_sr_class_data {
+ int (*enable)(struct omap_sr *sr);
+ int (*disable)(struct omap_sr *sr, int is_volt_reset);
+ int (*configure)(struct omap_sr *sr);
+ int (*notify)(struct omap_sr *sr, u32 status);
+ u8 notify_flags;
+ u8 class_type;
+};
+
+/**
+ * struct omap_sr_nvalue_table - Smartreflex n-target value info
+ *
+ * @efuse_offs: The offset of the efuse where n-target values are stored.
+ * @nvalue: The n-target value.
+ * @errminlimit: The value of the ERRMINLIMIT bitfield for this n-target
+ * @volt_nominal: microvolts DC that the VDD is initially programmed to
+ */
+struct omap_sr_nvalue_table {
+ u32 efuse_offs;
+ u32 nvalue;
+ u32 errminlimit;
+ unsigned long volt_nominal;
+};
+
+/**
+ * struct omap_sr_data - Smartreflex platform data.
+ *
+ * @name: instance name
+ * @ip_type: Smartreflex IP type.
+ * @senp_mod: SENPENABLE value of the sr CONFIG register
+ * @senn_mod: SENNENABLE value for sr CONFIG register
+ * @err_weight ERRWEIGHT value of the sr ERRCONFIG register
+ * @err_maxlimit ERRMAXLIMIT value of the sr ERRCONFIG register
+ * @accum_data ACCUMDATA value of the sr CONFIG register
+ * @senn_avgweight SENNAVGWEIGHT value of the sr AVGWEIGHT register
+ * @senp_avgweight SENPAVGWEIGHT value of the sr AVGWEIGHT register
+ * @nvalue_count: Number of distinct nvalues in the nvalue table
+ * @enable_on_init: whether this sr module needs to enabled at
+ * boot up or not.
+ * @nvalue_table: table containing the efuse offsets and nvalues
+ * corresponding to them.
+ * @voltdm: Pointer to the voltage domain associated with the SR
+ */
+struct omap_sr_data {
+ const char *name;
+ int ip_type;
+ u32 senp_mod;
+ u32 senn_mod;
+ u32 err_weight;
+ u32 err_maxlimit;
+ u32 accum_data;
+ u32 senn_avgweight;
+ u32 senp_avgweight;
+ int nvalue_count;
+ bool enable_on_init;
+ struct omap_sr_nvalue_table *nvalue_table;
+ struct voltagedomain *voltdm;
+};
+
+/* Smartreflex module enable/disable interface */
+void omap_sr_enable(struct voltagedomain *voltdm);
+void omap_sr_disable(struct voltagedomain *voltdm);
+void omap_sr_disable_reset_volt(struct voltagedomain *voltdm);
+
+/* API to register the pmic specific data with the smartreflex driver. */
+void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data);
+
+/* Smartreflex driver hooks to be called from Smartreflex class driver */
+int sr_enable(struct omap_sr *sr, unsigned long volt);
+void sr_disable(struct omap_sr *sr);
+int sr_configure_errgen(struct omap_sr *sr);
+int sr_disable_errgen(struct omap_sr *sr);
+int sr_configure_minmax(struct omap_sr *sr);
+
+/* API to register the smartreflex class driver with the smartreflex driver */
+int sr_register_class(struct omap_sr_class_data *class_data);
+#else
+static inline void omap_sr_enable(struct voltagedomain *voltdm) {}
+static inline void omap_sr_disable(struct voltagedomain *voltdm) {}
+static inline void omap_sr_disable_reset_volt(
+ struct voltagedomain *voltdm) {}
+static inline void omap_sr_register_pmic(
+ struct omap_sr_pmic_data *pmic_data) {}
+#endif
+#endif
diff --git a/include/linux/power/smb347-charger.h b/include/linux/power/smb347-charger.h
new file mode 100644
index 000000000..b3cb20dab
--- /dev/null
+++ b/include/linux/power/smb347-charger.h
@@ -0,0 +1,117 @@
+/*
+ * Summit Microelectronics SMB347 Battery Charger Driver
+ *
+ * Copyright (C) 2011, Intel Corporation
+ *
+ * Authors: Bruce E. Robertson <bruce.e.robertson@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SMB347_CHARGER_H
+#define SMB347_CHARGER_H
+
+#include <linux/types.h>
+#include <linux/power_supply.h>
+
+enum {
+ /* use the default compensation method */
+ SMB347_SOFT_TEMP_COMPENSATE_DEFAULT = -1,
+
+ SMB347_SOFT_TEMP_COMPENSATE_NONE,
+ SMB347_SOFT_TEMP_COMPENSATE_CURRENT,
+ SMB347_SOFT_TEMP_COMPENSATE_VOLTAGE,
+};
+
+/* Use default factory programmed value for hard/soft temperature limit */
+#define SMB347_TEMP_USE_DEFAULT -273
+
+/*
+ * Charging enable can be controlled by software (via i2c) by
+ * smb347-charger driver or by EN pin (active low/high).
+ */
+enum smb347_chg_enable {
+ SMB347_CHG_ENABLE_SW,
+ SMB347_CHG_ENABLE_PIN_ACTIVE_LOW,
+ SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH,
+};
+
+/**
+ * struct smb347_charger_platform_data - platform data for SMB347 charger
+ * @battery_info: Information about the battery
+ * @max_charge_current: maximum current (in uA) the battery can be charged
+ * @max_charge_voltage: maximum voltage (in uV) the battery can be charged
+ * @pre_charge_current: current (in uA) to use in pre-charging phase
+ * @termination_current: current (in uA) used to determine when the
+ * charging cycle terminates
+ * @pre_to_fast_voltage: voltage (in uV) treshold used for transitioning to
+ * pre-charge to fast charge mode
+ * @mains_current_limit: maximum input current drawn from AC/DC input (in uA)
+ * @usb_hc_current_limit: maximum input high current (in uA) drawn from USB
+ * input
+ * @chip_temp_threshold: die temperature where device starts limiting charge
+ * current [%100 - %130] (in degree C)
+ * @soft_cold_temp_limit: soft cold temperature limit [%0 - %15] (in degree C),
+ * granularity is 5 deg C.
+ * @soft_hot_temp_limit: soft hot temperature limit [%40 - %55] (in degree C),
+ * granularity is 5 deg C.
+ * @hard_cold_temp_limit: hard cold temperature limit [%-5 - %10] (in degree C),
+ * granularity is 5 deg C.
+ * @hard_hot_temp_limit: hard hot temperature limit [%50 - %65] (in degree C),
+ * granularity is 5 deg C.
+ * @suspend_on_hard_temp_limit: suspend charging when hard limit is hit
+ * @soft_temp_limit_compensation: compensation method when soft temperature
+ * limit is hit
+ * @charge_current_compensation: current (in uA) for charging compensation
+ * current when temperature hits soft limits
+ * @use_mains: AC/DC input can be used
+ * @use_usb: USB input can be used
+ * @use_usb_otg: USB OTG output can be used (not implemented yet)
+ * @irq_gpio: GPIO number used for interrupts (%-1 if not used)
+ * @enable_control: how charging enable/disable is controlled
+ * (driver/pin controls)
+ *
+ * @use_main, @use_usb, and @use_usb_otg are means to enable/disable
+ * hardware support for these. This is useful when we want to have for
+ * example OTG charging controlled via OTG transceiver driver and not by
+ * the SMB347 hardware.
+ *
+ * Hard and soft temperature limit values are given as described in the
+ * device data sheet and assuming NTC beta value is %3750. Even if this is
+ * not the case, these values should be used. They can be mapped to the
+ * corresponding NTC beta values with the help of table %2 in the data
+ * sheet. So for example if NTC beta is %3375 and we want to program hard
+ * hot limit to be %53 deg C, @hard_hot_temp_limit should be set to %50.
+ *
+ * If zero value is given in any of the current and voltage values, the
+ * factory programmed default will be used. For soft/hard temperature
+ * values, pass in %SMB347_TEMP_USE_DEFAULT instead.
+ */
+struct smb347_charger_platform_data {
+ struct power_supply_info battery_info;
+ unsigned int max_charge_current;
+ unsigned int max_charge_voltage;
+ unsigned int pre_charge_current;
+ unsigned int termination_current;
+ unsigned int pre_to_fast_voltage;
+ unsigned int mains_current_limit;
+ unsigned int usb_hc_current_limit;
+ unsigned int chip_temp_threshold;
+ int soft_cold_temp_limit;
+ int soft_hot_temp_limit;
+ int hard_cold_temp_limit;
+ int hard_hot_temp_limit;
+ bool suspend_on_hard_temp_limit;
+ unsigned int soft_temp_limit_compensation;
+ unsigned int charge_current_compensation;
+ bool use_mains;
+ bool use_usb;
+ bool use_usb_otg;
+ int irq_gpio;
+ enum smb347_chg_enable enable_control;
+};
+
+#endif /* SMB347_CHARGER_H */
diff --git a/include/linux/power/twl4030_madc_battery.h b/include/linux/power/twl4030_madc_battery.h
new file mode 100644
index 000000000..23110dc77
--- /dev/null
+++ b/include/linux/power/twl4030_madc_battery.h
@@ -0,0 +1,39 @@
+/*
+ * Dumb driver for LiIon batteries using TWL4030 madc.
+ *
+ * Copyright 2013 Golden Delicious Computers
+ * Nikolaus Schaller <hns@goldelico.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __TWL4030_MADC_BATTERY_H
+#define __TWL4030_MADC_BATTERY_H
+
+/*
+ * Usually we can assume 100% @ 4.15V and 0% @ 3.3V but curves differ for
+ * charging and discharging!
+ */
+
+struct twl4030_madc_bat_calibration {
+ short voltage; /* in mV - specify -1 for end of list */
+ short level; /* in percent (0 .. 100%) */
+};
+
+struct twl4030_madc_bat_platform_data {
+ unsigned int capacity; /* total capacity in uAh */
+ struct twl4030_madc_bat_calibration *charging;
+ int charging_size;
+ struct twl4030_madc_bat_calibration *discharging;
+ int discharging_size;
+};
+
+#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
new file mode 100644
index 000000000..a80f1fd01
--- /dev/null
+++ b/include/linux/power_supply.h
@@ -0,0 +1,390 @@
+/*
+ * Universal power supply monitor class
+ *
+ * Copyright © 2007 Anton Vorontsov <cbou@mail.ru>
+ * Copyright © 2004 Szabolcs Gyurko
+ * Copyright © 2003 Ian Molton <spyro@f2s.com>
+ *
+ * Modified: 2004, Oct Szabolcs Gyurko
+ *
+ * You may use this code as per GPL version 2
+ */
+
+#ifndef __LINUX_POWER_SUPPLY_H__
+#define __LINUX_POWER_SUPPLY_H__
+
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/leds.h>
+#include <linux/spinlock.h>
+#include <linux/notifier.h>
+
+/*
+ * All voltages, currents, charges, energies, time and temperatures in uV,
+ * µA, µAh, µWh, seconds and tenths of degree Celsius unless otherwise
+ * stated. It's driver's job to convert its raw values to units in which
+ * this class operates.
+ */
+
+/*
+ * For systems where the charger determines the maximum battery capacity
+ * the min and max fields should be used to present these values to user
+ * space. Unused/unknown fields will not appear in sysfs.
+ */
+
+enum {
+ POWER_SUPPLY_STATUS_UNKNOWN = 0,
+ POWER_SUPPLY_STATUS_CHARGING,
+ POWER_SUPPLY_STATUS_DISCHARGING,
+ POWER_SUPPLY_STATUS_NOT_CHARGING,
+ POWER_SUPPLY_STATUS_FULL,
+};
+
+enum {
+ POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0,
+ POWER_SUPPLY_CHARGE_TYPE_NONE,
+ POWER_SUPPLY_CHARGE_TYPE_TRICKLE,
+ POWER_SUPPLY_CHARGE_TYPE_FAST,
+};
+
+enum {
+ POWER_SUPPLY_HEALTH_UNKNOWN = 0,
+ POWER_SUPPLY_HEALTH_GOOD,
+ POWER_SUPPLY_HEALTH_OVERHEAT,
+ POWER_SUPPLY_HEALTH_DEAD,
+ POWER_SUPPLY_HEALTH_OVERVOLTAGE,
+ POWER_SUPPLY_HEALTH_UNSPEC_FAILURE,
+ POWER_SUPPLY_HEALTH_COLD,
+ POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE,
+ POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE,
+};
+
+enum {
+ POWER_SUPPLY_TECHNOLOGY_UNKNOWN = 0,
+ POWER_SUPPLY_TECHNOLOGY_NiMH,
+ POWER_SUPPLY_TECHNOLOGY_LION,
+ POWER_SUPPLY_TECHNOLOGY_LIPO,
+ POWER_SUPPLY_TECHNOLOGY_LiFe,
+ POWER_SUPPLY_TECHNOLOGY_NiCd,
+ POWER_SUPPLY_TECHNOLOGY_LiMn,
+};
+
+enum {
+ POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN = 0,
+ POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL,
+ POWER_SUPPLY_CAPACITY_LEVEL_LOW,
+ POWER_SUPPLY_CAPACITY_LEVEL_NORMAL,
+ POWER_SUPPLY_CAPACITY_LEVEL_HIGH,
+ POWER_SUPPLY_CAPACITY_LEVEL_FULL,
+};
+
+enum {
+ POWER_SUPPLY_SCOPE_UNKNOWN = 0,
+ POWER_SUPPLY_SCOPE_SYSTEM,
+ POWER_SUPPLY_SCOPE_DEVICE,
+};
+
+enum power_supply_property {
+ /* Properties of type `int' */
+ POWER_SUPPLY_PROP_STATUS = 0,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_AUTHENTIC,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV,
+ POWER_SUPPLY_PROP_VOLTAGE_BOOT,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_CURRENT_BOOT,
+ POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_POWER_AVG,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_EMPTY,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_AVG,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN,
+ POWER_SUPPLY_PROP_ENERGY_FULL,
+ POWER_SUPPLY_PROP_ENERGY_EMPTY,
+ POWER_SUPPLY_PROP_ENERGY_NOW,
+ POWER_SUPPLY_PROP_ENERGY_AVG,
+ POWER_SUPPLY_PROP_CAPACITY, /* in percents! */
+ POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */
+ POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TEMP_MAX,
+ POWER_SUPPLY_PROP_TEMP_MIN,
+ POWER_SUPPLY_PROP_TEMP_ALERT_MIN,
+ POWER_SUPPLY_PROP_TEMP_ALERT_MAX,
+ POWER_SUPPLY_PROP_TEMP_AMBIENT,
+ POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN,
+ POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
+ POWER_SUPPLY_PROP_TYPE, /* use power_supply.type instead */
+ POWER_SUPPLY_PROP_SCOPE,
+ POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+ POWER_SUPPLY_PROP_CALIBRATE,
+ /* Properties of type `const char *' */
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
+};
+
+enum power_supply_type {
+ POWER_SUPPLY_TYPE_UNKNOWN = 0,
+ POWER_SUPPLY_TYPE_BATTERY,
+ POWER_SUPPLY_TYPE_UPS,
+ POWER_SUPPLY_TYPE_MAINS,
+ POWER_SUPPLY_TYPE_USB, /* Standard Downstream Port */
+ POWER_SUPPLY_TYPE_USB_DCP, /* Dedicated Charging Port */
+ POWER_SUPPLY_TYPE_USB_CDP, /* Charging Downstream Port */
+ POWER_SUPPLY_TYPE_USB_ACA, /* Accessory Charger Adapters */
+};
+
+enum power_supply_notifier_events {
+ PSY_EVENT_PROP_CHANGED,
+};
+
+union power_supply_propval {
+ int intval;
+ const char *strval;
+};
+
+struct device_node;
+struct power_supply;
+
+/* Run-time specific power supply configuration */
+struct power_supply_config {
+ struct device_node *of_node;
+ /* Driver private data */
+ void *drv_data;
+
+ char **supplied_to;
+ size_t num_supplicants;
+};
+
+/* Description of power supply */
+struct power_supply_desc {
+ const char *name;
+ enum power_supply_type type;
+ enum power_supply_property *properties;
+ size_t num_properties;
+
+ /*
+ * Functions for drivers implementing power supply class.
+ * These shouldn't be called directly by other drivers for accessing
+ * this power supply. Instead use power_supply_*() functions (for
+ * example power_supply_get_property()).
+ */
+ int (*get_property)(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val);
+ int (*set_property)(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val);
+ int (*property_is_writeable)(struct power_supply *psy,
+ enum power_supply_property psp);
+ void (*external_power_changed)(struct power_supply *psy);
+ void (*set_charged)(struct power_supply *psy);
+
+ /*
+ * Set if thermal zone should not be created for this power supply.
+ * For example for virtual supplies forwarding calls to actual
+ * sensors or other supplies.
+ */
+ bool no_thermal;
+ /* For APM emulation, think legacy userspace. */
+ int use_for_apm;
+};
+
+struct power_supply {
+ const struct power_supply_desc *desc;
+
+ char **supplied_to;
+ size_t num_supplicants;
+
+ char **supplied_from;
+ size_t num_supplies;
+ struct device_node *of_node;
+
+ /* Driver private data */
+ void *drv_data;
+
+ /* private */
+ struct device dev;
+ struct work_struct changed_work;
+ struct delayed_work deferred_register_work;
+ spinlock_t changed_lock;
+ bool changed;
+ atomic_t use_cnt;
+#ifdef CONFIG_THERMAL
+ struct thermal_zone_device *tzd;
+ struct thermal_cooling_device *tcd;
+#endif
+
+#ifdef CONFIG_LEDS_TRIGGERS
+ struct led_trigger *charging_full_trig;
+ char *charging_full_trig_name;
+ struct led_trigger *charging_trig;
+ char *charging_trig_name;
+ struct led_trigger *full_trig;
+ char *full_trig_name;
+ struct led_trigger *online_trig;
+ char *online_trig_name;
+ struct led_trigger *charging_blink_full_solid_trig;
+ char *charging_blink_full_solid_trig_name;
+#endif
+};
+
+/*
+ * This is recommended structure to specify static power supply parameters.
+ * Generic one, parametrizable for different power supplies. Power supply
+ * class itself does not use it, but that's what implementing most platform
+ * drivers, should try reuse for consistency.
+ */
+
+struct power_supply_info {
+ const char *name;
+ int technology;
+ int voltage_max_design;
+ int voltage_min_design;
+ int charge_full_design;
+ int charge_empty_design;
+ int energy_full_design;
+ int energy_empty_design;
+ int use_for_apm;
+};
+
+extern struct atomic_notifier_head power_supply_notifier;
+extern int power_supply_reg_notifier(struct notifier_block *nb);
+extern void power_supply_unreg_notifier(struct notifier_block *nb);
+extern struct power_supply *power_supply_get_by_name(const char *name);
+extern void power_supply_put(struct power_supply *psy);
+#ifdef CONFIG_OF
+extern struct power_supply *power_supply_get_by_phandle(struct device_node *np,
+ const char *property);
+#else /* !CONFIG_OF */
+static inline struct power_supply *
+power_supply_get_by_phandle(struct device_node *np, const char *property)
+{ return NULL; }
+#endif /* CONFIG_OF */
+extern void power_supply_changed(struct power_supply *psy);
+extern int power_supply_am_i_supplied(struct power_supply *psy);
+extern int power_supply_set_battery_charged(struct power_supply *psy);
+
+#ifdef CONFIG_POWER_SUPPLY
+extern int power_supply_is_system_supplied(void);
+#else
+static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
+#endif
+
+extern int power_supply_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val);
+extern int power_supply_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val);
+extern int power_supply_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp);
+extern void power_supply_external_power_changed(struct power_supply *psy);
+
+extern struct power_supply *__must_check
+power_supply_register(struct device *parent,
+ const struct power_supply_desc *desc,
+ const struct power_supply_config *cfg);
+extern struct power_supply *__must_check
+power_supply_register_no_ws(struct device *parent,
+ const struct power_supply_desc *desc,
+ const struct power_supply_config *cfg);
+extern struct power_supply *__must_check
+devm_power_supply_register(struct device *parent,
+ const struct power_supply_desc *desc,
+ const struct power_supply_config *cfg);
+extern struct power_supply *__must_check
+devm_power_supply_register_no_ws(struct device *parent,
+ const struct power_supply_desc *desc,
+ const struct power_supply_config *cfg);
+extern void power_supply_unregister(struct power_supply *psy);
+extern int power_supply_powers(struct power_supply *psy, struct device *dev);
+
+extern void *power_supply_get_drvdata(struct power_supply *psy);
+/* For APM emulation, think legacy userspace. */
+extern struct class *power_supply_class;
+
+static inline bool power_supply_is_amp_property(enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ case POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN:
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ case POWER_SUPPLY_PROP_CHARGE_EMPTY:
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ case POWER_SUPPLY_PROP_CHARGE_AVG:
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ case POWER_SUPPLY_PROP_CURRENT_BOOT:
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static inline bool power_supply_is_watt_property(enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
+ case POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN:
+ case POWER_SUPPLY_PROP_ENERGY_FULL:
+ case POWER_SUPPLY_PROP_ENERGY_EMPTY:
+ case POWER_SUPPLY_PROP_ENERGY_NOW:
+ case POWER_SUPPLY_PROP_ENERGY_AVG:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ case POWER_SUPPLY_PROP_VOLTAGE_BOOT:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ case POWER_SUPPLY_PROP_POWER_NOW:
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#endif /* __LINUX_POWER_SUPPLY_H__ */
diff --git a/include/linux/powercap.h b/include/linux/powercap.h
new file mode 100644
index 000000000..4e250417e
--- /dev/null
+++ b/include/linux/powercap.h
@@ -0,0 +1,325 @@
+/*
+ * powercap.h: Data types and headers for sysfs power capping interface
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+
+#ifndef __POWERCAP_H__
+#define __POWERCAP_H__
+
+#include <linux/device.h>
+#include <linux/idr.h>
+
+/*
+ * A power cap class device can contain multiple powercap control_types.
+ * Each control_type can have multiple power zones, which can be independently
+ * controlled. Each power zone can have one or more constraints.
+ */
+
+struct powercap_control_type;
+struct powercap_zone;
+struct powercap_zone_constraint;
+
+/**
+ * struct powercap_control_type_ops - Define control type callbacks
+ * @set_enable: Enable/Disable whole control type.
+ * Default is enabled. But this callback allows all zones
+ * to be in disable state and remove any applied power
+ * limits. If disabled power zone can only be monitored
+ * not controlled.
+ * @get_enable: get Enable/Disable status.
+ * @release: Callback to inform that last reference to this
+ * control type is closed. So it is safe to free data
+ * structure associated with this control type.
+ * This callback is mandatory if the client own memory
+ * for the control type.
+ *
+ * This structure defines control type callbacks to be implemented by client
+ * drivers
+ */
+struct powercap_control_type_ops {
+ int (*set_enable) (struct powercap_control_type *, bool mode);
+ int (*get_enable) (struct powercap_control_type *, bool *mode);
+ int (*release) (struct powercap_control_type *);
+};
+
+/**
+ * struct powercap_control_type- Defines a powercap control_type
+ * @name: name of control_type
+ * @dev: device for this control_type
+ * @idr: idr to have unique id for its child
+ * @root_node: Root holding power zones for this control_type
+ * @ops: Pointer to callback struct
+ * @node_lock: mutex for control type
+ * @allocated: This is possible that client owns the memory
+ * used by this structure. In this case
+ * this flag is set to false by framework to
+ * prevent deallocation during release process.
+ * Otherwise this flag is set to true.
+ * @ctrl_inst: link to the control_type list
+ *
+ * Defines powercap control_type. This acts as a container for power
+ * zones, which use same method to control power. E.g. RAPL, RAPL-PCI etc.
+ * All fields are private and should not be used by client drivers.
+ */
+struct powercap_control_type {
+ struct device dev;
+ struct idr idr;
+ int nr_zones;
+ const struct powercap_control_type_ops *ops;
+ struct mutex lock;
+ bool allocated;
+ struct list_head node;
+};
+
+/**
+ * struct powercap_zone_ops - Define power zone callbacks
+ * @get_max_energy_range_uj: Get maximum range of energy counter in
+ * micro-joules.
+ * @get_energy_uj: Get current energy counter in micro-joules.
+ * @reset_energy_uj: Reset micro-joules energy counter.
+ * @get_max_power_range_uw: Get maximum range of power counter in
+ * micro-watts.
+ * @get_power_uw: Get current power counter in micro-watts.
+ * @set_enable: Enable/Disable power zone controls.
+ * Default is enabled.
+ * @get_enable: get Enable/Disable status.
+ * @release: Callback to inform that last reference to this
+ * control type is closed. So it is safe to free
+ * data structure associated with this
+ * control type. Mandatory, if client driver owns
+ * the power_zone memory.
+ *
+ * This structure defines zone callbacks to be implemented by client drivers.
+ * Client drives can define both energy and power related callbacks. But at
+ * the least one type (either power or energy) is mandatory. Client drivers
+ * should handle mutual exclusion, if required in callbacks.
+ */
+struct powercap_zone_ops {
+ int (*get_max_energy_range_uj) (struct powercap_zone *, u64 *);
+ int (*get_energy_uj) (struct powercap_zone *, u64 *);
+ int (*reset_energy_uj) (struct powercap_zone *);
+ int (*get_max_power_range_uw) (struct powercap_zone *, u64 *);
+ int (*get_power_uw) (struct powercap_zone *, u64 *);
+ int (*set_enable) (struct powercap_zone *, bool mode);
+ int (*get_enable) (struct powercap_zone *, bool *mode);
+ int (*release) (struct powercap_zone *);
+};
+
+#define POWERCAP_ZONE_MAX_ATTRS 6
+#define POWERCAP_CONSTRAINTS_ATTRS 8
+#define MAX_CONSTRAINTS_PER_ZONE 10
+/**
+ * struct powercap_zone- Defines instance of a power cap zone
+ * @id: Unique id
+ * @name: Power zone name.
+ * @control_type_inst: Control type instance for this zone.
+ * @ops: Pointer to the zone operation structure.
+ * @dev: Instance of a device.
+ * @const_id_cnt: Number of constraint defined.
+ * @idr: Instance to an idr entry for children zones.
+ * @parent_idr: To remove reference from the parent idr.
+ * @private_data: Private data pointer if any for this zone.
+ * @zone_dev_attrs: Attributes associated with this device.
+ * @zone_attr_count: Attribute count.
+ * @dev_zone_attr_group: Attribute group for attributes.
+ * @dev_attr_groups: Attribute group store to register with device.
+ * @allocated: This is possible that client owns the memory
+ * used by this structure. In this case
+ * this flag is set to false by framework to
+ * prevent deallocation during release process.
+ * Otherwise this flag is set to true.
+ * @constraint_ptr: List of constraints for this zone.
+ *
+ * This defines a power zone instance. The fields of this structure are
+ * private, and should not be used by client drivers.
+ */
+struct powercap_zone {
+ int id;
+ char *name;
+ void *control_type_inst;
+ const struct powercap_zone_ops *ops;
+ struct device dev;
+ int const_id_cnt;
+ struct idr idr;
+ struct idr *parent_idr;
+ void *private_data;
+ struct attribute **zone_dev_attrs;
+ int zone_attr_count;
+ struct attribute_group dev_zone_attr_group;
+ const struct attribute_group *dev_attr_groups[2]; /* 1 group + NULL */
+ bool allocated;
+ struct powercap_zone_constraint *constraints;
+};
+
+/**
+ * struct powercap_zone_constraint_ops - Define constraint callbacks
+ * @set_power_limit_uw: Set power limit in micro-watts.
+ * @get_power_limit_uw: Get power limit in micro-watts.
+ * @set_time_window_us: Set time window in micro-seconds.
+ * @get_time_window_us: Get time window in micro-seconds.
+ * @get_max_power_uw: Get max power allowed in micro-watts.
+ * @get_min_power_uw: Get min power allowed in micro-watts.
+ * @get_max_time_window_us: Get max time window allowed in micro-seconds.
+ * @get_min_time_window_us: Get min time window allowed in micro-seconds.
+ * @get_name: Get the name of constraint
+ *
+ * This structure is used to define the constraint callbacks for the client
+ * drivers. The following callbacks are mandatory and can't be NULL:
+ * set_power_limit_uw
+ * get_power_limit_uw
+ * set_time_window_us
+ * get_time_window_us
+ * get_name
+ * Client drivers should handle mutual exclusion, if required in callbacks.
+ */
+struct powercap_zone_constraint_ops {
+ int (*set_power_limit_uw) (struct powercap_zone *, int, u64);
+ int (*get_power_limit_uw) (struct powercap_zone *, int, u64 *);
+ int (*set_time_window_us) (struct powercap_zone *, int, u64);
+ int (*get_time_window_us) (struct powercap_zone *, int, u64 *);
+ int (*get_max_power_uw) (struct powercap_zone *, int, u64 *);
+ int (*get_min_power_uw) (struct powercap_zone *, int, u64 *);
+ int (*get_max_time_window_us) (struct powercap_zone *, int, u64 *);
+ int (*get_min_time_window_us) (struct powercap_zone *, int, u64 *);
+ const char *(*get_name) (struct powercap_zone *, int);
+};
+
+/**
+ * struct powercap_zone_constraint- Defines instance of a constraint
+ * @id: Instance Id of this constraint.
+ * @power_zone: Pointer to the power zone for this constraint.
+ * @ops: Pointer to the constraint callbacks.
+ *
+ * This defines a constraint instance.
+ */
+struct powercap_zone_constraint {
+ int id;
+ struct powercap_zone *power_zone;
+ struct powercap_zone_constraint_ops *ops;
+};
+
+
+/* For clients to get their device pointer, may be used for dev_dbgs */
+#define POWERCAP_GET_DEV(power_zone) (&power_zone->dev)
+
+/**
+* powercap_set_zone_data() - Set private data for a zone
+* @power_zone: A pointer to the valid zone instance.
+* @pdata: A pointer to the user private data.
+*
+* Allows client drivers to associate some private data to zone instance.
+*/
+static inline void powercap_set_zone_data(struct powercap_zone *power_zone,
+ void *pdata)
+{
+ if (power_zone)
+ power_zone->private_data = pdata;
+}
+
+/**
+* powercap_get_zone_data() - Get private data for a zone
+* @power_zone: A pointer to the valid zone instance.
+*
+* Allows client drivers to get private data associate with a zone,
+* using call to powercap_set_zone_data.
+*/
+static inline void *powercap_get_zone_data(struct powercap_zone *power_zone)
+{
+ if (power_zone)
+ return power_zone->private_data;
+ return NULL;
+}
+
+/**
+* powercap_register_control_type() - Register a control_type with framework
+* @control_type: Pointer to client allocated memory for the control type
+* structure storage. If this is NULL, powercap framework
+* will allocate memory and own it.
+* Advantage of this parameter is that client can embed
+* this data in its data structures and allocate in a
+* single call, preventing multiple allocations.
+* @control_type_name: The Name of this control_type, which will be shown
+* in the sysfs Interface.
+* @ops: Callbacks for control type. This parameter is optional.
+*
+* Used to create a control_type with the power capping class. Here control_type
+* can represent a type of technology, which can control a range of power zones.
+* For example a control_type can be RAPL (Running Average Power Limit)
+* Intel® 64 and IA-32 Processor Architectures. The name can be any string
+* which must be unique, otherwise this function returns NULL.
+* A pointer to the control_type instance is returned on success.
+*/
+struct powercap_control_type *powercap_register_control_type(
+ struct powercap_control_type *control_type,
+ const char *name,
+ const struct powercap_control_type_ops *ops);
+
+/**
+* powercap_unregister_control_type() - Unregister a control_type from framework
+* @instance: A pointer to the valid control_type instance.
+*
+* Used to unregister a control_type with the power capping class.
+* All power zones registered under this control type have to be unregistered
+* before calling this function, or it will fail with an error code.
+*/
+int powercap_unregister_control_type(struct powercap_control_type *instance);
+
+/* Zone register/unregister API */
+
+/**
+* powercap_register_zone() - Register a power zone
+* @power_zone: Pointer to client allocated memory for the power zone structure
+* storage. If this is NULL, powercap framework will allocate
+* memory and own it. Advantage of this parameter is that client
+* can embed this data in its data structures and allocate in a
+* single call, preventing multiple allocations.
+* @control_type: A control_type instance under which this zone operates.
+* @name: A name for this zone.
+* @parent: A pointer to the parent power zone instance if any or NULL
+* @ops: Pointer to zone operation callback structure.
+* @no_constraints: Number of constraints for this zone
+* @const_ops: Pointer to constraint callback structure
+*
+* Register a power zone under a given control type. A power zone must register
+* a pointer to a structure representing zone callbacks.
+* A power zone can be located under a parent power zone, in which case @parent
+* should point to it. Otherwise, if @parent is NULL, the new power zone will
+* be located directly under the given control type
+* For each power zone there may be a number of constraints that appear in the
+* sysfs under that zone as attributes with unique numeric IDs.
+* Returns pointer to the power_zone on success.
+*/
+struct powercap_zone *powercap_register_zone(
+ struct powercap_zone *power_zone,
+ struct powercap_control_type *control_type,
+ const char *name,
+ struct powercap_zone *parent,
+ const struct powercap_zone_ops *ops,
+ int nr_constraints,
+ struct powercap_zone_constraint_ops *const_ops);
+
+/**
+* powercap_unregister_zone() - Unregister a zone device
+* @control_type: A pointer to the valid instance of a control_type.
+* @power_zone: A pointer to the valid zone instance for a control_type
+*
+* Used to unregister a zone device for a control_type. Caller should
+* make sure that children for this zone are unregistered first.
+*/
+int powercap_unregister_zone(struct powercap_control_type *control_type,
+ struct powercap_zone *power_zone);
+
+#endif
diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
new file mode 100644
index 000000000..4ea1d377e
--- /dev/null
+++ b/include/linux/ppp-comp.h
@@ -0,0 +1,106 @@
+/*
+ * ppp-comp.h - Definitions for doing PPP packet compression.
+ *
+ * Copyright 1994-1998 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+#ifndef _NET_PPP_COMP_H
+#define _NET_PPP_COMP_H
+
+#include <uapi/linux/ppp-comp.h>
+
+
+struct module;
+
+/*
+ * The following symbols control whether we include code for
+ * various compression methods.
+ */
+
+#ifndef DO_BSD_COMPRESS
+#define DO_BSD_COMPRESS 1 /* by default, include BSD-Compress */
+#endif
+#ifndef DO_DEFLATE
+#define DO_DEFLATE 1 /* by default, include Deflate */
+#endif
+#define DO_PREDICTOR_1 0
+#define DO_PREDICTOR_2 0
+
+/*
+ * Structure giving methods for compression/decompression.
+ */
+
+struct compressor {
+ int compress_proto; /* CCP compression protocol number */
+
+ /* Allocate space for a compressor (transmit side) */
+ void *(*comp_alloc) (unsigned char *options, int opt_len);
+
+ /* Free space used by a compressor */
+ void (*comp_free) (void *state);
+
+ /* Initialize a compressor */
+ int (*comp_init) (void *state, unsigned char *options,
+ int opt_len, int unit, int opthdr, int debug);
+
+ /* Reset a compressor */
+ void (*comp_reset) (void *state);
+
+ /* Compress a packet */
+ int (*compress) (void *state, unsigned char *rptr,
+ unsigned char *obuf, int isize, int osize);
+
+ /* Return compression statistics */
+ void (*comp_stat) (void *state, struct compstat *stats);
+
+ /* Allocate space for a decompressor (receive side) */
+ void *(*decomp_alloc) (unsigned char *options, int opt_len);
+
+ /* Free space used by a decompressor */
+ void (*decomp_free) (void *state);
+
+ /* Initialize a decompressor */
+ int (*decomp_init) (void *state, unsigned char *options,
+ int opt_len, int unit, int opthdr, int mru,
+ int debug);
+
+ /* Reset a decompressor */
+ void (*decomp_reset) (void *state);
+
+ /* Decompress a packet. */
+ int (*decompress) (void *state, unsigned char *ibuf, int isize,
+ unsigned char *obuf, int osize);
+
+ /* Update state for an incompressible packet received */
+ void (*incomp) (void *state, unsigned char *ibuf, int icnt);
+
+ /* Return decompression statistics */
+ void (*decomp_stat) (void *state, struct compstat *stats);
+
+ /* Used in locking compressor modules */
+ struct module *owner;
+ /* Extra skb space needed by the compressor algorithm */
+ unsigned int comp_extra;
+};
+
+/*
+ * The return value from decompress routine is the length of the
+ * decompressed packet if successful, otherwise DECOMP_ERROR
+ * or DECOMP_FATALERROR if an error occurred.
+ *
+ * We need to make this distinction so that we can disable certain
+ * useful functionality, namely sending a CCP reset-request as a result
+ * of an error detected after decompression. This is to avoid infringing
+ * a patent held by Motorola.
+ * Don't you just lurve software patents.
+ */
+
+#define DECOMP_ERROR -1 /* error detected before decomp. */
+#define DECOMP_FATALERROR -2 /* error detected after decomp. */
+
+extern int ppp_register_compressor(struct compressor *);
+extern void ppp_unregister_compressor(struct compressor *);
+#endif /* _NET_PPP_COMP_H */
diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h
new file mode 100644
index 000000000..5d87f810a
--- /dev/null
+++ b/include/linux/ppp_channel.h
@@ -0,0 +1,88 @@
+#ifndef _PPP_CHANNEL_H_
+#define _PPP_CHANNEL_H_
+/*
+ * Definitions for the interface between the generic PPP code
+ * and a PPP channel.
+ *
+ * A PPP channel provides a way for the generic PPP code to send
+ * and receive packets over some sort of communications medium.
+ * Packets are stored in sk_buffs and have the 2-byte PPP protocol
+ * number at the start, but not the address and control bytes.
+ *
+ * Copyright 1999 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * ==FILEVERSION 20000322==
+ */
+
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/poll.h>
+#include <net/net_namespace.h>
+
+struct ppp_channel;
+
+struct ppp_channel_ops {
+ /* Send a packet (or multilink fragment) on this channel.
+ Returns 1 if it was accepted, 0 if not. */
+ int (*start_xmit)(struct ppp_channel *, struct sk_buff *);
+ /* Handle an ioctl call that has come in via /dev/ppp. */
+ int (*ioctl)(struct ppp_channel *, unsigned int, unsigned long);
+};
+
+struct ppp_channel {
+ void *private; /* channel private data */
+ const struct ppp_channel_ops *ops; /* operations for this channel */
+ int mtu; /* max transmit packet size */
+ int hdrlen; /* amount of headroom channel needs */
+ void *ppp; /* opaque to channel */
+ int speed; /* transfer rate (bytes/second) */
+ /* the following is not used at present */
+ int latency; /* overhead time in milliseconds */
+};
+
+#ifdef __KERNEL__
+/* Called by the channel when it can send some more data. */
+extern void ppp_output_wakeup(struct ppp_channel *);
+
+/* Called by the channel to process a received PPP packet.
+ The packet should have just the 2-byte PPP protocol header. */
+extern void ppp_input(struct ppp_channel *, struct sk_buff *);
+
+/* Called by the channel when an input error occurs, indicating
+ that we may have missed a packet. */
+extern void ppp_input_error(struct ppp_channel *, int code);
+
+/* Attach a channel to a given PPP unit in specified net. */
+extern int ppp_register_net_channel(struct net *, struct ppp_channel *);
+
+/* Attach a channel to a given PPP unit. */
+extern int ppp_register_channel(struct ppp_channel *);
+
+/* Detach a channel from its PPP unit (e.g. on hangup). */
+extern void ppp_unregister_channel(struct ppp_channel *);
+
+/* Get the channel number for a channel */
+extern int ppp_channel_index(struct ppp_channel *);
+
+/* Get the unit number associated with a channel, or -1 if none */
+extern int ppp_unit_number(struct ppp_channel *);
+
+/* Get the device name associated with a channel, or NULL if none */
+extern char *ppp_dev_name(struct ppp_channel *);
+
+/*
+ * SMP locking notes:
+ * The channel code must ensure that when it calls ppp_unregister_channel,
+ * nothing is executing in any of the procedures above, for that
+ * channel. The generic layer will ensure that nothing is executing
+ * in the start_xmit and ioctl routines for the channel by the time
+ * that ppp_unregister_channel returns.
+ */
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/include/linux/ppp_defs.h b/include/linux/ppp_defs.h
new file mode 100644
index 000000000..28aa0237c
--- /dev/null
+++ b/include/linux/ppp_defs.h
@@ -0,0 +1,17 @@
+/*
+ * ppp_defs.h - PPP definitions.
+ *
+ * Copyright 1994-2000 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+#ifndef _PPP_DEFS_H_
+#define _PPP_DEFS_H_
+
+#include <linux/crc-ccitt.h>
+#include <uapi/linux/ppp_defs.h>
+
+#define PPP_FCS(fcs, c) crc_ccitt_byte(fcs, c)
+#endif /* _PPP_DEFS_H_ */
diff --git a/include/linux/pps-gpio.h b/include/linux/pps-gpio.h
new file mode 100644
index 000000000..0035abe41
--- /dev/null
+++ b/include/linux/pps-gpio.h
@@ -0,0 +1,32 @@
+/*
+ * pps-gpio.h -- PPS client for GPIOs
+ *
+ *
+ * Copyright (C) 2011 James Nuss <jamesnuss@nanometrics.ca>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _PPS_GPIO_H
+#define _PPS_GPIO_H
+
+struct pps_gpio_platform_data {
+ bool assert_falling_edge;
+ bool capture_clear;
+ unsigned int gpio_pin;
+ const char *gpio_label;
+};
+
+#endif
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
new file mode 100644
index 000000000..1d2cd2124
--- /dev/null
+++ b/include/linux/pps_kernel.h
@@ -0,0 +1,140 @@
+/*
+ * PPS API kernel header
+ *
+ * Copyright (C) 2009 Rodolfo Giometti <giometti@linux.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef LINUX_PPS_KERNEL_H
+#define LINUX_PPS_KERNEL_H
+
+#include <linux/pps.h>
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/time.h>
+
+/*
+ * Global defines
+ */
+
+struct pps_device;
+
+/* The specific PPS source info */
+struct pps_source_info {
+ char name[PPS_MAX_NAME_LEN]; /* simbolic name */
+ char path[PPS_MAX_NAME_LEN]; /* path of connected device */
+ int mode; /* PPS's allowed mode */
+
+ void (*echo)(struct pps_device *pps,
+ int event, void *data); /* PPS echo function */
+
+ struct module *owner;
+ struct device *dev; /* Parent device for device_create */
+};
+
+struct pps_event_time {
+#ifdef CONFIG_NTP_PPS
+ struct timespec ts_raw;
+#endif /* CONFIG_NTP_PPS */
+ struct timespec ts_real;
+};
+
+/* The main struct */
+struct pps_device {
+ struct pps_source_info info; /* PSS source info */
+
+ struct pps_kparams params; /* PPS's current params */
+
+ __u32 assert_sequence; /* PPS' assert event seq # */
+ __u32 clear_sequence; /* PPS' clear event seq # */
+ struct pps_ktime assert_tu;
+ struct pps_ktime clear_tu;
+ int current_mode; /* PPS mode at event time */
+
+ unsigned int last_ev; /* last PPS event id */
+ wait_queue_head_t queue; /* PPS event queue */
+
+ unsigned int id; /* PPS source unique ID */
+ void const *lookup_cookie; /* pps_lookup_dev only */
+ struct cdev cdev;
+ struct device *dev;
+ struct fasync_struct *async_queue; /* fasync method */
+ spinlock_t lock;
+};
+
+/*
+ * Global variables
+ */
+
+extern const struct attribute_group *pps_groups[];
+
+/*
+ * Internal functions.
+ *
+ * These are not actually part of the exported API, but this is a
+ * convenient header file to put them in.
+ */
+
+extern int pps_register_cdev(struct pps_device *pps);
+extern void pps_unregister_cdev(struct pps_device *pps);
+
+/*
+ * Exported functions
+ */
+
+extern struct pps_device *pps_register_source(
+ struct pps_source_info *info, int default_params);
+extern void pps_unregister_source(struct pps_device *pps);
+extern void pps_event(struct pps_device *pps,
+ struct pps_event_time *ts, int event, void *data);
+/* Look up a pps device by magic cookie */
+struct pps_device *pps_lookup_dev(void const *cookie);
+
+static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
+ struct timespec ts)
+{
+ kt->sec = ts.tv_sec;
+ kt->nsec = ts.tv_nsec;
+}
+
+#ifdef CONFIG_NTP_PPS
+
+static inline void pps_get_ts(struct pps_event_time *ts)
+{
+ getnstime_raw_and_real(&ts->ts_raw, &ts->ts_real);
+}
+
+#else /* CONFIG_NTP_PPS */
+
+static inline void pps_get_ts(struct pps_event_time *ts)
+{
+ getnstimeofday(&ts->ts_real);
+}
+
+#endif /* CONFIG_NTP_PPS */
+
+/* Subtract known time delay from PPS event time(s) */
+static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec delta)
+{
+ ts->ts_real = timespec_sub(ts->ts_real, delta);
+#ifdef CONFIG_NTP_PPS
+ ts->ts_raw = timespec_sub(ts->ts_raw, delta);
+#endif
+}
+
+#endif /* LINUX_PPS_KERNEL_H */
+
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
new file mode 100644
index 000000000..de83b4eb1
--- /dev/null
+++ b/include/linux/preempt.h
@@ -0,0 +1,195 @@
+#ifndef __LINUX_PREEMPT_H
+#define __LINUX_PREEMPT_H
+
+/*
+ * include/linux/preempt.h - macros for accessing and manipulating
+ * preempt_count (used for kernel preemption, interrupt count, etc.)
+ */
+
+#include <linux/linkage.h>
+#include <linux/list.h>
+
+/*
+ * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
+ * the other bits -- can't include that header due to inclusion hell.
+ */
+#define PREEMPT_NEED_RESCHED 0x80000000
+
+#include <asm/preempt.h>
+
+#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
+extern void preempt_count_add(int val);
+extern void preempt_count_sub(int val);
+#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
+#else
+#define preempt_count_add(val) __preempt_count_add(val)
+#define preempt_count_sub(val) __preempt_count_sub(val)
+#define preempt_count_dec_and_test() __preempt_count_dec_and_test()
+#endif
+
+#define __preempt_count_inc() __preempt_count_add(1)
+#define __preempt_count_dec() __preempt_count_sub(1)
+
+#define preempt_count_inc() preempt_count_add(1)
+#define preempt_count_dec() preempt_count_sub(1)
+
+#ifdef CONFIG_PREEMPT_COUNT
+
+#define preempt_disable() \
+do { \
+ preempt_count_inc(); \
+ barrier(); \
+} while (0)
+
+#define sched_preempt_enable_no_resched() \
+do { \
+ barrier(); \
+ preempt_count_dec(); \
+} while (0)
+
+#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
+
+#ifdef CONFIG_PREEMPT
+#define preempt_enable() \
+do { \
+ barrier(); \
+ if (unlikely(preempt_count_dec_and_test())) \
+ __preempt_schedule(); \
+} while (0)
+
+#define preempt_check_resched() \
+do { \
+ if (should_resched()) \
+ __preempt_schedule(); \
+} while (0)
+
+#else
+#define preempt_enable() \
+do { \
+ barrier(); \
+ preempt_count_dec(); \
+} while (0)
+#define preempt_check_resched() do { } while (0)
+#endif
+
+#define preempt_disable_notrace() \
+do { \
+ __preempt_count_inc(); \
+ barrier(); \
+} while (0)
+
+#define preempt_enable_no_resched_notrace() \
+do { \
+ barrier(); \
+ __preempt_count_dec(); \
+} while (0)
+
+#ifdef CONFIG_PREEMPT
+
+#ifndef CONFIG_CONTEXT_TRACKING
+#define __preempt_schedule_context() __preempt_schedule()
+#endif
+
+#define preempt_enable_notrace() \
+do { \
+ barrier(); \
+ if (unlikely(__preempt_count_dec_and_test())) \
+ __preempt_schedule_context(); \
+} while (0)
+#else
+#define preempt_enable_notrace() \
+do { \
+ barrier(); \
+ __preempt_count_dec(); \
+} while (0)
+#endif
+
+#else /* !CONFIG_PREEMPT_COUNT */
+
+/*
+ * Even if we don't have any preemption, we need preempt disable/enable
+ * to be barriers, so that we don't have things like get_user/put_user
+ * that can cause faults and scheduling migrate into our preempt-protected
+ * region.
+ */
+#define preempt_disable() barrier()
+#define sched_preempt_enable_no_resched() barrier()
+#define preempt_enable_no_resched() barrier()
+#define preempt_enable() barrier()
+#define preempt_check_resched() do { } while (0)
+
+#define preempt_disable_notrace() barrier()
+#define preempt_enable_no_resched_notrace() barrier()
+#define preempt_enable_notrace() barrier()
+
+#endif /* CONFIG_PREEMPT_COUNT */
+
+#ifdef MODULE
+/*
+ * Modules have no business playing preemption tricks.
+ */
+#undef sched_preempt_enable_no_resched
+#undef preempt_enable_no_resched
+#undef preempt_enable_no_resched_notrace
+#undef preempt_check_resched
+#endif
+
+#define preempt_set_need_resched() \
+do { \
+ set_preempt_need_resched(); \
+} while (0)
+#define preempt_fold_need_resched() \
+do { \
+ if (tif_need_resched()) \
+ set_preempt_need_resched(); \
+} while (0)
+
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+
+struct preempt_notifier;
+
+/**
+ * preempt_ops - notifiers called when a task is preempted and rescheduled
+ * @sched_in: we're about to be rescheduled:
+ * notifier: struct preempt_notifier for the task being scheduled
+ * cpu: cpu we're scheduled on
+ * @sched_out: we've just been preempted
+ * notifier: struct preempt_notifier for the task being preempted
+ * next: the task that's kicking us out
+ *
+ * Please note that sched_in and out are called under different
+ * contexts. sched_out is called with rq lock held and irq disabled
+ * while sched_in is called without rq lock and irq enabled. This
+ * difference is intentional and depended upon by its users.
+ */
+struct preempt_ops {
+ void (*sched_in)(struct preempt_notifier *notifier, int cpu);
+ void (*sched_out)(struct preempt_notifier *notifier,
+ struct task_struct *next);
+};
+
+/**
+ * preempt_notifier - key for installing preemption notifiers
+ * @link: internal use
+ * @ops: defines the notifier functions to be called
+ *
+ * Usually used in conjunction with container_of().
+ */
+struct preempt_notifier {
+ struct hlist_node link;
+ struct preempt_ops *ops;
+};
+
+void preempt_notifier_register(struct preempt_notifier *notifier);
+void preempt_notifier_unregister(struct preempt_notifier *notifier);
+
+static inline void preempt_notifier_init(struct preempt_notifier *notifier,
+ struct preempt_ops *ops)
+{
+ INIT_HLIST_NODE(&notifier->link);
+ notifier->ops = ops;
+}
+
+#endif
+
+#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
new file mode 100644
index 000000000..dbeec4d4a
--- /dev/null
+++ b/include/linux/preempt_mask.h
@@ -0,0 +1,117 @@
+#ifndef LINUX_PREEMPT_MASK_H
+#define LINUX_PREEMPT_MASK_H
+
+#include <linux/preempt.h>
+
+/*
+ * We put the hardirq and softirq counter into the preemption
+ * counter. The bitmask has the following meaning:
+ *
+ * - bits 0-7 are the preemption count (max preemption depth: 256)
+ * - bits 8-15 are the softirq count (max # of softirqs: 256)
+ *
+ * The hardirq count could in theory be the same as the number of
+ * interrupts in the system, but we run all interrupt handlers with
+ * interrupts disabled, so we cannot have nesting interrupts. Though
+ * there are a few palaeontologic drivers which reenable interrupts in
+ * the handler, so we need more than one bit here.
+ *
+ * PREEMPT_MASK: 0x000000ff
+ * SOFTIRQ_MASK: 0x0000ff00
+ * HARDIRQ_MASK: 0x000f0000
+ * NMI_MASK: 0x00100000
+ * PREEMPT_ACTIVE: 0x00200000
+ */
+#define PREEMPT_BITS 8
+#define SOFTIRQ_BITS 8
+#define HARDIRQ_BITS 4
+#define NMI_BITS 1
+
+#define PREEMPT_SHIFT 0
+#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
+#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
+#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
+
+#define __IRQ_MASK(x) ((1UL << (x))-1)
+
+#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
+#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
+#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
+#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
+
+#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
+#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
+#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+#define NMI_OFFSET (1UL << NMI_SHIFT)
+
+#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+
+#define PREEMPT_ACTIVE_BITS 1
+#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
+#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
+
+#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
+ | NMI_MASK))
+
+/*
+ * Are we doing bottom half or hardware interrupt processing?
+ * Are we in a softirq context? Interrupt context?
+ * in_softirq - Are we currently processing softirq or have bh disabled?
+ * in_serving_softirq - Are we currently processing softirq?
+ */
+#define in_irq() (hardirq_count())
+#define in_softirq() (softirq_count())
+#define in_interrupt() (irq_count())
+#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+
+/*
+ * Are we in NMI context?
+ */
+#define in_nmi() (preempt_count() & NMI_MASK)
+
+#if defined(CONFIG_PREEMPT_COUNT)
+# define PREEMPT_CHECK_OFFSET 1
+#else
+# define PREEMPT_CHECK_OFFSET 0
+#endif
+
+/*
+ * The preempt_count offset needed for things like:
+ *
+ * spin_lock_bh()
+ *
+ * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
+ * softirqs, such that unlock sequences of:
+ *
+ * spin_unlock();
+ * local_bh_enable();
+ *
+ * Work as expected.
+ */
+#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET)
+
+/*
+ * Are we running in atomic context? WARNING: this macro cannot
+ * always detect atomic context; in particular, it cannot know about
+ * held spinlocks in non-preemptible kernels. Thus it should not be
+ * used in the general case to determine whether sleeping is possible.
+ * Do not use in_atomic() in driver code.
+ */
+#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
+
+/*
+ * Check whether we were atomic before we did preempt_disable():
+ * (used by the scheduler, *after* releasing the kernel lock)
+ */
+#define in_atomic_preempt_off() \
+ ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
+
+#ifdef CONFIG_PREEMPT_COUNT
+# define preemptible() (preempt_count() == 0 && !irqs_disabled())
+#else
+# define preemptible() 0
+#endif
+
+#endif /* LINUX_PREEMPT_MASK_H */
diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h
new file mode 100644
index 000000000..a3bfbdf63
--- /dev/null
+++ b/include/linux/prefetch.h
@@ -0,0 +1,64 @@
+/*
+ * Generic cache management functions. Everything is arch-specific,
+ * but this header exists to make sure the defines/functions can be
+ * used in a generic way.
+ *
+ * 2000-11-13 Arjan van de Ven <arjan@fenrus.demon.nl>
+ *
+ */
+
+#ifndef _LINUX_PREFETCH_H
+#define _LINUX_PREFETCH_H
+
+#include <linux/types.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+
+/*
+ prefetch(x) attempts to pre-emptively get the memory pointed to
+ by address "x" into the CPU L1 cache.
+ prefetch(x) should not cause any kind of exception, prefetch(0) is
+ specifically ok.
+
+ prefetch() should be defined by the architecture, if not, the
+ #define below provides a no-op define.
+
+ There are 3 prefetch() macros:
+
+ prefetch(x) - prefetches the cacheline at "x" for read
+ prefetchw(x) - prefetches the cacheline at "x" for write
+ spin_lock_prefetch(x) - prefetches the spinlock *x for taking
+
+ there is also PREFETCH_STRIDE which is the architecure-preferred
+ "lookahead" size for prefetching streamed operations.
+
+*/
+
+#ifndef ARCH_HAS_PREFETCH
+#define prefetch(x) __builtin_prefetch(x)
+#endif
+
+#ifndef ARCH_HAS_PREFETCHW
+#define prefetchw(x) __builtin_prefetch(x,1)
+#endif
+
+#ifndef ARCH_HAS_SPINLOCK_PREFETCH
+#define spin_lock_prefetch(x) prefetchw(x)
+#endif
+
+#ifndef PREFETCH_STRIDE
+#define PREFETCH_STRIDE (4*L1_CACHE_BYTES)
+#endif
+
+static inline void prefetch_range(void *addr, size_t len)
+{
+#ifdef ARCH_HAS_PREFETCH
+ char *cp;
+ char *end = addr + len;
+
+ for (cp = addr; cp < end; cp += PREFETCH_STRIDE)
+ prefetch(cp);
+#endif
+}
+
+#endif
diff --git a/include/linux/printk.h b/include/linux/printk.h
new file mode 100644
index 000000000..9b30871c9
--- /dev/null
+++ b/include/linux/printk.h
@@ -0,0 +1,464 @@
+#ifndef __KERNEL_PRINTK__
+#define __KERNEL_PRINTK__
+
+#include <stdarg.h>
+#include <linux/init.h>
+#include <linux/kern_levels.h>
+#include <linux/linkage.h>
+#include <linux/cache.h>
+
+extern const char linux_banner[];
+extern const char linux_proc_banner[];
+
+static inline int printk_get_level(const char *buffer)
+{
+ if (buffer[0] == KERN_SOH_ASCII && buffer[1]) {
+ switch (buffer[1]) {
+ case '0' ... '7':
+ case 'd': /* KERN_DEFAULT */
+ return buffer[1];
+ }
+ }
+ return 0;
+}
+
+static inline const char *printk_skip_level(const char *buffer)
+{
+ if (printk_get_level(buffer))
+ return buffer + 2;
+
+ return buffer;
+}
+
+/* printk's without a loglevel use this.. */
+#define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT
+
+/* We show everything that is MORE important than this.. */
+#define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */
+#define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */
+#define CONSOLE_LOGLEVEL_QUIET 4 /* Shhh ..., when booted with "quiet" */
+#define CONSOLE_LOGLEVEL_DEFAULT 7 /* anything MORE serious than KERN_DEBUG */
+#define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */
+#define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */
+
+extern int console_printk[];
+
+#define console_loglevel (console_printk[0])
+#define default_message_loglevel (console_printk[1])
+#define minimum_console_loglevel (console_printk[2])
+#define default_console_loglevel (console_printk[3])
+
+static inline void console_silent(void)
+{
+ console_loglevel = CONSOLE_LOGLEVEL_SILENT;
+}
+
+static inline void console_verbose(void)
+{
+ if (console_loglevel)
+ console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
+}
+
+struct va_format {
+ const char *fmt;
+ va_list *va;
+};
+
+/*
+ * FW_BUG
+ * Add this to a message where you are sure the firmware is buggy or behaves
+ * really stupid or out of spec. Be aware that the responsible BIOS developer
+ * should be able to fix this issue or at least get a concrete idea of the
+ * problem by reading your message without the need of looking at the kernel
+ * code.
+ *
+ * Use it for definite and high priority BIOS bugs.
+ *
+ * FW_WARN
+ * Use it for not that clear (e.g. could the kernel messed up things already?)
+ * and medium priority BIOS bugs.
+ *
+ * FW_INFO
+ * Use this one if you want to tell the user or vendor about something
+ * suspicious, but generally harmless related to the firmware.
+ *
+ * Use it for information or very low priority BIOS bugs.
+ */
+#define FW_BUG "[Firmware Bug]: "
+#define FW_WARN "[Firmware Warn]: "
+#define FW_INFO "[Firmware Info]: "
+
+/*
+ * HW_ERR
+ * Add this to a message for hardware errors, so that user can report
+ * it to hardware vendor instead of LKML or software vendor.
+ */
+#define HW_ERR "[Hardware Error]: "
+
+/*
+ * DEPRECATED
+ * Add this to a message whenever you want to warn user space about the use
+ * of a deprecated aspect of an API so they can stop using it
+ */
+#define DEPRECATED "[Deprecated]: "
+
+/*
+ * Dummy printk for disabled debugging statements to use whilst maintaining
+ * gcc's format and side-effect checking.
+ */
+static inline __printf(1, 2)
+int no_printk(const char *fmt, ...)
+{
+ return 0;
+}
+
+#ifdef CONFIG_EARLY_PRINTK
+extern asmlinkage __printf(1, 2)
+void early_printk(const char *fmt, ...);
+#else
+static inline __printf(1, 2) __cold
+void early_printk(const char *s, ...) { }
+#endif
+
+typedef int(*printk_func_t)(const char *fmt, va_list args);
+
+#ifdef CONFIG_PRINTK
+asmlinkage __printf(5, 0)
+int vprintk_emit(int facility, int level,
+ const char *dict, size_t dictlen,
+ const char *fmt, va_list args);
+
+asmlinkage __printf(1, 0)
+int vprintk(const char *fmt, va_list args);
+
+asmlinkage __printf(5, 6) __cold
+int printk_emit(int facility, int level,
+ const char *dict, size_t dictlen,
+ const char *fmt, ...);
+
+asmlinkage __printf(1, 2) __cold
+int printk(const char *fmt, ...);
+
+/*
+ * Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ !
+ */
+__printf(1, 2) __cold int printk_deferred(const char *fmt, ...);
+
+/*
+ * Please don't use printk_ratelimit(), because it shares ratelimiting state
+ * with all other unrelated printk_ratelimit() callsites. Instead use
+ * printk_ratelimited() or plain old __ratelimit().
+ */
+extern int __printk_ratelimit(const char *func);
+#define printk_ratelimit() __printk_ratelimit(__func__)
+extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
+ unsigned int interval_msec);
+
+extern int printk_delay_msec;
+extern int dmesg_restrict;
+extern int kptr_restrict;
+
+extern void wake_up_klogd(void);
+
+char *log_buf_addr_get(void);
+u32 log_buf_len_get(void);
+void log_buf_kexec_setup(void);
+void __init setup_log_buf(int early);
+void dump_stack_set_arch_desc(const char *fmt, ...);
+void dump_stack_print_info(const char *log_lvl);
+void show_regs_print_info(const char *log_lvl);
+#else
+static inline __printf(1, 0)
+int vprintk(const char *s, va_list args)
+{
+ return 0;
+}
+static inline __printf(1, 2) __cold
+int printk(const char *s, ...)
+{
+ return 0;
+}
+static inline __printf(1, 2) __cold
+int printk_deferred(const char *s, ...)
+{
+ return 0;
+}
+static inline int printk_ratelimit(void)
+{
+ return 0;
+}
+static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies,
+ unsigned int interval_msec)
+{
+ return false;
+}
+
+static inline void wake_up_klogd(void)
+{
+}
+
+static inline char *log_buf_addr_get(void)
+{
+ return NULL;
+}
+
+static inline u32 log_buf_len_get(void)
+{
+ return 0;
+}
+
+static inline void log_buf_kexec_setup(void)
+{
+}
+
+static inline void setup_log_buf(int early)
+{
+}
+
+static inline void dump_stack_set_arch_desc(const char *fmt, ...)
+{
+}
+
+static inline void dump_stack_print_info(const char *log_lvl)
+{
+}
+
+static inline void show_regs_print_info(const char *log_lvl)
+{
+}
+#endif
+
+extern asmlinkage void dump_stack(void) __cold;
+
+#ifndef pr_fmt
+#define pr_fmt(fmt) fmt
+#endif
+
+/*
+ * These can be used to print at the various log levels.
+ * All of these will print unconditionally, although note that pr_debug()
+ * and other debug macros are compiled out unless either DEBUG is defined
+ * or CONFIG_DYNAMIC_DEBUG is set.
+ */
+#define pr_emerg(fmt, ...) \
+ printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert(fmt, ...) \
+ printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit(fmt, ...) \
+ printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err(fmt, ...) \
+ printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning(fmt, ...) \
+ printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warn pr_warning
+#define pr_notice(fmt, ...) \
+ printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+ printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+/*
+ * Like KERN_CONT, pr_cont() should only be used when continuing
+ * a line with no newline ('\n') enclosed. Otherwise it defaults
+ * back to KERN_DEFAULT.
+ */
+#define pr_cont(fmt, ...) \
+ printk(KERN_CONT fmt, ##__VA_ARGS__)
+
+/* pr_devel() should produce zero code unless DEBUG is defined */
+#ifdef DEBUG
+#define pr_devel(fmt, ...) \
+ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#else
+#define pr_devel(fmt, ...) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#endif
+
+#include <linux/dynamic_debug.h>
+
+/* If you are writing a driver, please use dev_dbg instead */
+#if defined(CONFIG_DYNAMIC_DEBUG)
+/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
+#define pr_debug(fmt, ...) \
+ dynamic_pr_debug(fmt, ##__VA_ARGS__)
+#elif defined(DEBUG)
+#define pr_debug(fmt, ...) \
+ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#else
+#define pr_debug(fmt, ...) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#endif
+
+/*
+ * Print a one-time message (analogous to WARN_ONCE() et al):
+ */
+
+#ifdef CONFIG_PRINTK
+#define printk_once(fmt, ...) \
+({ \
+ static bool __print_once __read_mostly; \
+ \
+ if (!__print_once) { \
+ __print_once = true; \
+ printk(fmt, ##__VA_ARGS__); \
+ } \
+})
+#define printk_deferred_once(fmt, ...) \
+({ \
+ static bool __print_once __read_mostly; \
+ \
+ if (!__print_once) { \
+ __print_once = true; \
+ printk_deferred(fmt, ##__VA_ARGS__); \
+ } \
+})
+#else
+#define printk_once(fmt, ...) \
+ no_printk(fmt, ##__VA_ARGS__)
+#define printk_deferred_once(fmt, ...) \
+ no_printk(fmt, ##__VA_ARGS__)
+#endif
+
+#define pr_emerg_once(fmt, ...) \
+ printk_once(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert_once(fmt, ...) \
+ printk_once(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit_once(fmt, ...) \
+ printk_once(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err_once(fmt, ...) \
+ printk_once(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warn_once(fmt, ...) \
+ printk_once(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_notice_once(fmt, ...) \
+ printk_once(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info_once(fmt, ...) \
+ printk_once(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_cont_once(fmt, ...) \
+ printk_once(KERN_CONT pr_fmt(fmt), ##__VA_ARGS__)
+
+#if defined(DEBUG)
+#define pr_devel_once(fmt, ...) \
+ printk_once(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#else
+#define pr_devel_once(fmt, ...) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#endif
+
+/* If you are writing a driver, please use dev_dbg instead */
+#if defined(DEBUG)
+#define pr_debug_once(fmt, ...) \
+ printk_once(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#else
+#define pr_debug_once(fmt, ...) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#endif
+
+/*
+ * ratelimited messages with local ratelimit_state,
+ * no local ratelimit_state used in the !PRINTK case
+ */
+#ifdef CONFIG_PRINTK
+#define printk_ratelimited(fmt, ...) \
+({ \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ \
+ if (__ratelimit(&_rs)) \
+ printk(fmt, ##__VA_ARGS__); \
+})
+#else
+#define printk_ratelimited(fmt, ...) \
+ no_printk(fmt, ##__VA_ARGS__)
+#endif
+
+#define pr_emerg_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warn_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_notice_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+/* no pr_cont_ratelimited, don't do that... */
+
+#if defined(DEBUG)
+#define pr_devel_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#else
+#define pr_devel_ratelimited(fmt, ...) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#endif
+
+/* If you are writing a driver, please use dev_dbg instead */
+#if defined(CONFIG_DYNAMIC_DEBUG)
+/* descriptor check is first to prevent flooding with "callbacks suppressed" */
+#define pr_debug_ratelimited(fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
+ __ratelimit(&_rs)) \
+ __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
+} while (0)
+#elif defined(DEBUG)
+#define pr_debug_ratelimited(fmt, ...) \
+ printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#else
+#define pr_debug_ratelimited(fmt, ...) \
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#endif
+
+extern const struct file_operations kmsg_fops;
+
+enum {
+ DUMP_PREFIX_NONE,
+ DUMP_PREFIX_ADDRESS,
+ DUMP_PREFIX_OFFSET
+};
+extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
+ int groupsize, char *linebuf, size_t linebuflen,
+ bool ascii);
+#ifdef CONFIG_PRINTK
+extern void print_hex_dump(const char *level, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii);
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define print_hex_dump_bytes(prefix_str, prefix_type, buf, len) \
+ dynamic_hex_dump(prefix_str, prefix_type, 16, 1, buf, len, true)
+#else
+extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
+ const void *buf, size_t len);
+#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
+#else
+static inline void print_hex_dump(const char *level, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii)
+{
+}
+static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
+ const void *buf, size_t len)
+{
+}
+
+#endif
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+#else
+#define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii) \
+ print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \
+ groupsize, buf, len, ascii)
+#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
+
+#endif
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
new file mode 100644
index 000000000..b97bf2ef9
--- /dev/null
+++ b/include/linux/proc_fs.h
@@ -0,0 +1,85 @@
+/*
+ * The proc filesystem constants/structures
+ */
+#ifndef _LINUX_PROC_FS_H
+#define _LINUX_PROC_FS_H
+
+#include <linux/types.h>
+#include <linux/fs.h>
+
+struct proc_dir_entry;
+
+#ifdef CONFIG_PROC_FS
+
+extern void proc_root_init(void);
+extern void proc_flush_task(struct task_struct *);
+
+extern struct proc_dir_entry *proc_symlink(const char *,
+ struct proc_dir_entry *, const char *);
+extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
+extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
+ struct proc_dir_entry *, void *);
+extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t,
+ struct proc_dir_entry *);
+
+extern struct proc_dir_entry *proc_create_data(const char *, umode_t,
+ struct proc_dir_entry *,
+ const struct file_operations *,
+ void *);
+
+static inline struct proc_dir_entry *proc_create(
+ const char *name, umode_t mode, struct proc_dir_entry *parent,
+ const struct file_operations *proc_fops)
+{
+ return proc_create_data(name, mode, parent, proc_fops, NULL);
+}
+
+extern void proc_set_size(struct proc_dir_entry *, loff_t);
+extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
+extern void *PDE_DATA(const struct inode *);
+extern void *proc_get_parent_data(const struct inode *);
+extern void proc_remove(struct proc_dir_entry *);
+extern void remove_proc_entry(const char *, struct proc_dir_entry *);
+extern int remove_proc_subtree(const char *, struct proc_dir_entry *);
+
+#else /* CONFIG_PROC_FS */
+
+static inline void proc_root_init(void)
+{
+}
+
+static inline void proc_flush_task(struct task_struct *task)
+{
+}
+
+static inline struct proc_dir_entry *proc_symlink(const char *name,
+ struct proc_dir_entry *parent,const char *dest) { return NULL;}
+static inline struct proc_dir_entry *proc_mkdir(const char *name,
+ struct proc_dir_entry *parent) {return NULL;}
+static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
+ umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
+static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
+ umode_t mode, struct proc_dir_entry *parent) { return NULL; }
+#define proc_create(name, mode, parent, proc_fops) ({NULL;})
+#define proc_create_data(name, mode, parent, proc_fops, data) ({NULL;})
+
+static inline void proc_set_size(struct proc_dir_entry *de, loff_t size) {}
+static inline void proc_set_user(struct proc_dir_entry *de, kuid_t uid, kgid_t gid) {}
+static inline void *PDE_DATA(const struct inode *inode) {BUG(); return NULL;}
+static inline void *proc_get_parent_data(const struct inode *inode) { BUG(); return NULL; }
+
+static inline void proc_remove(struct proc_dir_entry *de) {}
+#define remove_proc_entry(name, parent) do {} while (0)
+static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) { return 0; }
+
+#endif /* CONFIG_PROC_FS */
+
+struct net;
+
+static inline struct proc_dir_entry *proc_net_mkdir(
+ struct net *net, const char *name, struct proc_dir_entry *parent)
+{
+ return proc_mkdir_data(name, 0, parent, net);
+}
+
+#endif /* _LINUX_PROC_FS_H */
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
new file mode 100644
index 000000000..42dfc615d
--- /dev/null
+++ b/include/linux/proc_ns.h
@@ -0,0 +1,77 @@
+/*
+ * procfs namespace bits
+ */
+#ifndef _LINUX_PROC_NS_H
+#define _LINUX_PROC_NS_H
+
+#include <linux/ns_common.h>
+
+struct pid_namespace;
+struct nsproxy;
+struct path;
+
+struct proc_ns_operations {
+ const char *name;
+ int type;
+ struct ns_common *(*get)(struct task_struct *task);
+ void (*put)(struct ns_common *ns);
+ int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
+};
+
+extern const struct proc_ns_operations netns_operations;
+extern const struct proc_ns_operations utsns_operations;
+extern const struct proc_ns_operations ipcns_operations;
+extern const struct proc_ns_operations pidns_operations;
+extern const struct proc_ns_operations userns_operations;
+extern const struct proc_ns_operations mntns_operations;
+
+/*
+ * We always define these enumerators
+ */
+enum {
+ PROC_ROOT_INO = 1,
+ PROC_IPC_INIT_INO = 0xEFFFFFFFU,
+ PROC_UTS_INIT_INO = 0xEFFFFFFEU,
+ PROC_USER_INIT_INO = 0xEFFFFFFDU,
+ PROC_PID_INIT_INO = 0xEFFFFFFCU,
+};
+
+#ifdef CONFIG_PROC_FS
+
+extern int pid_ns_prepare_proc(struct pid_namespace *ns);
+extern void pid_ns_release_proc(struct pid_namespace *ns);
+extern int proc_alloc_inum(unsigned int *pino);
+extern void proc_free_inum(unsigned int inum);
+
+#else /* CONFIG_PROC_FS */
+
+static inline int pid_ns_prepare_proc(struct pid_namespace *ns) { return 0; }
+static inline void pid_ns_release_proc(struct pid_namespace *ns) {}
+
+static inline int proc_alloc_inum(unsigned int *inum)
+{
+ *inum = 1;
+ return 0;
+}
+static inline void proc_free_inum(unsigned int inum) {}
+
+#endif /* CONFIG_PROC_FS */
+
+static inline int ns_alloc_inum(struct ns_common *ns)
+{
+ atomic_long_set(&ns->stashed, 0);
+ return proc_alloc_inum(&ns->inum);
+}
+
+#define ns_free_inum(ns) proc_free_inum((ns)->inum)
+
+extern struct file *proc_ns_fget(int fd);
+#define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private)
+extern void *ns_get_path(struct path *path, struct task_struct *task,
+ const struct proc_ns_operations *ns_ops);
+
+extern int ns_get_name(char *buf, size_t size, struct task_struct *task,
+ const struct proc_ns_operations *ns_ops);
+extern void nsfs_init(void);
+
+#endif /* _LINUX_PROC_NS_H */
diff --git a/include/linux/profile.h b/include/linux/profile.h
new file mode 100644
index 000000000..b537a25ff
--- /dev/null
+++ b/include/linux/profile.h
@@ -0,0 +1,138 @@
+#ifndef _LINUX_PROFILE_H
+#define _LINUX_PROFILE_H
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cpumask.h>
+#include <linux/cache.h>
+
+#include <asm/errno.h>
+
+#define CPU_PROFILING 1
+#define SCHED_PROFILING 2
+#define SLEEP_PROFILING 3
+#define KVM_PROFILING 4
+
+struct proc_dir_entry;
+struct pt_regs;
+struct notifier_block;
+
+#if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS)
+void create_prof_cpu_mask(void);
+int create_proc_profile(void);
+#else
+static inline void create_prof_cpu_mask(void)
+{
+}
+
+static inline int create_proc_profile(void)
+{
+ return 0;
+}
+#endif
+
+enum profile_type {
+ PROFILE_TASK_EXIT,
+ PROFILE_MUNMAP
+};
+
+#ifdef CONFIG_PROFILING
+
+extern int prof_on __read_mostly;
+
+/* init basic kernel profiler */
+int profile_init(void);
+int profile_setup(char *str);
+void profile_tick(int type);
+int setup_profiling_timer(unsigned int multiplier);
+
+/*
+ * Add multiple profiler hits to a given address:
+ */
+void profile_hits(int type, void *ip, unsigned int nr_hits);
+
+/*
+ * Single profiler hit:
+ */
+static inline void profile_hit(int type, void *ip)
+{
+ /*
+ * Speedup for the common (no profiling enabled) case:
+ */
+ if (unlikely(prof_on == type))
+ profile_hits(type, ip, 1);
+}
+
+struct task_struct;
+struct mm_struct;
+
+/* task is in do_exit() */
+void profile_task_exit(struct task_struct * task);
+
+/* task is dead, free task struct ? Returns 1 if
+ * the task was taken, 0 if the task should be freed.
+ */
+int profile_handoff_task(struct task_struct * task);
+
+/* sys_munmap */
+void profile_munmap(unsigned long addr);
+
+int task_handoff_register(struct notifier_block * n);
+int task_handoff_unregister(struct notifier_block * n);
+
+int profile_event_register(enum profile_type, struct notifier_block * n);
+int profile_event_unregister(enum profile_type, struct notifier_block * n);
+
+struct pt_regs;
+
+#else
+
+#define prof_on 0
+
+static inline int profile_init(void)
+{
+ return 0;
+}
+
+static inline void profile_tick(int type)
+{
+ return;
+}
+
+static inline void profile_hits(int type, void *ip, unsigned int nr_hits)
+{
+ return;
+}
+
+static inline void profile_hit(int type, void *ip)
+{
+ return;
+}
+
+static inline int task_handoff_register(struct notifier_block * n)
+{
+ return -ENOSYS;
+}
+
+static inline int task_handoff_unregister(struct notifier_block * n)
+{
+ return -ENOSYS;
+}
+
+static inline int profile_event_register(enum profile_type t, struct notifier_block * n)
+{
+ return -ENOSYS;
+}
+
+static inline int profile_event_unregister(enum profile_type t, struct notifier_block * n)
+{
+ return -ENOSYS;
+}
+
+#define profile_task_exit(a) do { } while (0)
+#define profile_handoff_task(a) (0)
+#define profile_munmap(a) do { } while (0)
+
+#endif /* CONFIG_PROFILING */
+
+#endif /* _LINUX_PROFILE_H */
diff --git a/include/linux/projid.h b/include/linux/projid.h
new file mode 100644
index 000000000..8c1f2c552
--- /dev/null
+++ b/include/linux/projid.h
@@ -0,0 +1,89 @@
+#ifndef _LINUX_PROJID_H
+#define _LINUX_PROJID_H
+
+/*
+ * A set of types for the internal kernel types representing project ids.
+ *
+ * The types defined in this header allow distinguishing which project ids in
+ * the kernel are values used by userspace and which project id values are
+ * the internal kernel values. With the addition of user namespaces the values
+ * can be different. Using the type system makes it possible for the compiler
+ * to detect when we overlook these differences.
+ *
+ */
+#include <linux/types.h>
+
+struct user_namespace;
+extern struct user_namespace init_user_ns;
+
+typedef __kernel_uid32_t projid_t;
+
+typedef struct {
+ projid_t val;
+} kprojid_t;
+
+static inline projid_t __kprojid_val(kprojid_t projid)
+{
+ return projid.val;
+}
+
+#define KPROJIDT_INIT(value) (kprojid_t){ value }
+
+#define INVALID_PROJID KPROJIDT_INIT(-1)
+#define OVERFLOW_PROJID 65534
+
+static inline bool projid_eq(kprojid_t left, kprojid_t right)
+{
+ return __kprojid_val(left) == __kprojid_val(right);
+}
+
+static inline bool projid_lt(kprojid_t left, kprojid_t right)
+{
+ return __kprojid_val(left) < __kprojid_val(right);
+}
+
+static inline bool projid_valid(kprojid_t projid)
+{
+ return !projid_eq(projid, INVALID_PROJID);
+}
+
+#ifdef CONFIG_USER_NS
+
+extern kprojid_t make_kprojid(struct user_namespace *from, projid_t projid);
+
+extern projid_t from_kprojid(struct user_namespace *to, kprojid_t projid);
+extern projid_t from_kprojid_munged(struct user_namespace *to, kprojid_t projid);
+
+static inline bool kprojid_has_mapping(struct user_namespace *ns, kprojid_t projid)
+{
+ return from_kprojid(ns, projid) != (projid_t)-1;
+}
+
+#else
+
+static inline kprojid_t make_kprojid(struct user_namespace *from, projid_t projid)
+{
+ return KPROJIDT_INIT(projid);
+}
+
+static inline projid_t from_kprojid(struct user_namespace *to, kprojid_t kprojid)
+{
+ return __kprojid_val(kprojid);
+}
+
+static inline projid_t from_kprojid_munged(struct user_namespace *to, kprojid_t kprojid)
+{
+ projid_t projid = from_kprojid(to, kprojid);
+ if (projid == (projid_t)-1)
+ projid = OVERFLOW_PROJID;
+ return projid;
+}
+
+static inline bool kprojid_has_mapping(struct user_namespace *ns, kprojid_t projid)
+{
+ return true;
+}
+
+#endif /* CONFIG_USER_NS */
+
+#endif /* _LINUX_PROJID_H */
diff --git a/include/linux/property.h b/include/linux/property.h
new file mode 100644
index 000000000..de8bdf417
--- /dev/null
+++ b/include/linux/property.h
@@ -0,0 +1,167 @@
+/*
+ * property.h - Unified device property interface.
+ *
+ * Copyright (C) 2014, Intel Corporation
+ * Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_PROPERTY_H_
+#define _LINUX_PROPERTY_H_
+
+#include <linux/fwnode.h>
+#include <linux/types.h>
+
+struct device;
+
+enum dev_prop_type {
+ DEV_PROP_U8,
+ DEV_PROP_U16,
+ DEV_PROP_U32,
+ DEV_PROP_U64,
+ DEV_PROP_STRING,
+ DEV_PROP_MAX,
+};
+
+bool device_property_present(struct device *dev, const char *propname);
+int device_property_read_u8_array(struct device *dev, const char *propname,
+ u8 *val, size_t nval);
+int device_property_read_u16_array(struct device *dev, const char *propname,
+ u16 *val, size_t nval);
+int device_property_read_u32_array(struct device *dev, const char *propname,
+ u32 *val, size_t nval);
+int device_property_read_u64_array(struct device *dev, const char *propname,
+ u64 *val, size_t nval);
+int device_property_read_string_array(struct device *dev, const char *propname,
+ const char **val, size_t nval);
+int device_property_read_string(struct device *dev, const char *propname,
+ const char **val);
+
+bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname);
+int fwnode_property_read_u8_array(struct fwnode_handle *fwnode,
+ const char *propname, u8 *val,
+ size_t nval);
+int fwnode_property_read_u16_array(struct fwnode_handle *fwnode,
+ const char *propname, u16 *val,
+ size_t nval);
+int fwnode_property_read_u32_array(struct fwnode_handle *fwnode,
+ const char *propname, u32 *val,
+ size_t nval);
+int fwnode_property_read_u64_array(struct fwnode_handle *fwnode,
+ const char *propname, u64 *val,
+ size_t nval);
+int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
+ const char *propname, const char **val,
+ size_t nval);
+int fwnode_property_read_string(struct fwnode_handle *fwnode,
+ const char *propname, const char **val);
+
+struct fwnode_handle *device_get_next_child_node(struct device *dev,
+ struct fwnode_handle *child);
+
+#define device_for_each_child_node(dev, child) \
+ for (child = device_get_next_child_node(dev, NULL); child; \
+ child = device_get_next_child_node(dev, child))
+
+void fwnode_handle_put(struct fwnode_handle *fwnode);
+
+unsigned int device_get_child_node_count(struct device *dev);
+
+static inline bool device_property_read_bool(struct device *dev,
+ const char *propname)
+{
+ return device_property_present(dev, propname);
+}
+
+static inline int device_property_read_u8(struct device *dev,
+ const char *propname, u8 *val)
+{
+ return device_property_read_u8_array(dev, propname, val, 1);
+}
+
+static inline int device_property_read_u16(struct device *dev,
+ const char *propname, u16 *val)
+{
+ return device_property_read_u16_array(dev, propname, val, 1);
+}
+
+static inline int device_property_read_u32(struct device *dev,
+ const char *propname, u32 *val)
+{
+ return device_property_read_u32_array(dev, propname, val, 1);
+}
+
+static inline int device_property_read_u64(struct device *dev,
+ const char *propname, u64 *val)
+{
+ return device_property_read_u64_array(dev, propname, val, 1);
+}
+
+static inline bool fwnode_property_read_bool(struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ return fwnode_property_present(fwnode, propname);
+}
+
+static inline int fwnode_property_read_u8(struct fwnode_handle *fwnode,
+ const char *propname, u8 *val)
+{
+ return fwnode_property_read_u8_array(fwnode, propname, val, 1);
+}
+
+static inline int fwnode_property_read_u16(struct fwnode_handle *fwnode,
+ const char *propname, u16 *val)
+{
+ return fwnode_property_read_u16_array(fwnode, propname, val, 1);
+}
+
+static inline int fwnode_property_read_u32(struct fwnode_handle *fwnode,
+ const char *propname, u32 *val)
+{
+ return fwnode_property_read_u32_array(fwnode, propname, val, 1);
+}
+
+static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode,
+ const char *propname, u64 *val)
+{
+ return fwnode_property_read_u64_array(fwnode, propname, val, 1);
+}
+
+/**
+ * struct property_entry - "Built-in" device property representation.
+ * @name: Name of the property.
+ * @type: Type of the property.
+ * @nval: Number of items of type @type making up the value.
+ * @value: Value of the property (an array of @nval items of type @type).
+ */
+struct property_entry {
+ const char *name;
+ enum dev_prop_type type;
+ size_t nval;
+ union {
+ void *raw_data;
+ u8 *u8_data;
+ u16 *u16_data;
+ u32 *u32_data;
+ u64 *u64_data;
+ const char **str;
+ } value;
+};
+
+/**
+ * struct property_set - Collection of "built-in" device properties.
+ * @fwnode: Handle to be pointed to by the fwnode field of struct device.
+ * @properties: Array of properties terminated with a null entry.
+ */
+struct property_set {
+ struct fwnode_handle fwnode;
+ struct property_entry *properties;
+};
+
+void device_add_property_set(struct device *dev, struct property_set *pset);
+
+#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
new file mode 100644
index 000000000..00e8e8fa7
--- /dev/null
+++ b/include/linux/proportions.h
@@ -0,0 +1,137 @@
+/*
+ * FLoating proportions
+ *
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *
+ * This file contains the public data structure and API definitions.
+ */
+
+#ifndef _LINUX_PROPORTIONS_H
+#define _LINUX_PROPORTIONS_H
+
+#include <linux/percpu_counter.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/gfp.h>
+
+struct prop_global {
+ /*
+ * The period over which we differentiate
+ *
+ * period = 2^shift
+ */
+ int shift;
+ /*
+ * The total event counter aka 'time'.
+ *
+ * Treated as an unsigned long; the lower 'shift - 1' bits are the
+ * counter bits, the remaining upper bits the period counter.
+ */
+ struct percpu_counter events;
+};
+
+/*
+ * global proportion descriptor
+ *
+ * this is needed to consitently flip prop_global structures.
+ */
+struct prop_descriptor {
+ int index;
+ struct prop_global pg[2];
+ struct mutex mutex; /* serialize the prop_global switch */
+};
+
+int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp);
+void prop_change_shift(struct prop_descriptor *pd, int new_shift);
+
+/*
+ * ----- PERCPU ------
+ */
+
+struct prop_local_percpu {
+ /*
+ * the local events counter
+ */
+ struct percpu_counter events;
+
+ /*
+ * snapshot of the last seen global state
+ */
+ int shift;
+ unsigned long period;
+ raw_spinlock_t lock; /* protect the snapshot state */
+};
+
+int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp);
+void prop_local_destroy_percpu(struct prop_local_percpu *pl);
+void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
+void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
+ long *numerator, long *denominator);
+
+static inline
+void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __prop_inc_percpu(pd, pl);
+ local_irq_restore(flags);
+}
+
+/*
+ * Limit the time part in order to ensure there are some bits left for the
+ * cycle counter and fraction multiply.
+ */
+#if BITS_PER_LONG == 32
+#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
+#else
+#define PROP_MAX_SHIFT (BITS_PER_LONG/2)
+#endif
+
+#define PROP_FRAC_SHIFT (BITS_PER_LONG - PROP_MAX_SHIFT - 1)
+#define PROP_FRAC_BASE (1UL << PROP_FRAC_SHIFT)
+
+void __prop_inc_percpu_max(struct prop_descriptor *pd,
+ struct prop_local_percpu *pl, long frac);
+
+
+/*
+ * ----- SINGLE ------
+ */
+
+struct prop_local_single {
+ /*
+ * the local events counter
+ */
+ unsigned long events;
+
+ /*
+ * snapshot of the last seen global state
+ * and a lock protecting this state
+ */
+ unsigned long period;
+ int shift;
+ raw_spinlock_t lock; /* protect the snapshot state */
+};
+
+#define INIT_PROP_LOCAL_SINGLE(name) \
+{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+}
+
+int prop_local_init_single(struct prop_local_single *pl);
+void prop_local_destroy_single(struct prop_local_single *pl);
+void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
+void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
+ long *numerator, long *denominator);
+
+static inline
+void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __prop_inc_single(pd, pl);
+ local_irq_restore(flags);
+}
+
+#endif /* _LINUX_PROPORTIONS_H */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
new file mode 100644
index 000000000..8e7a25b06
--- /dev/null
+++ b/include/linux/pstore.h
@@ -0,0 +1,94 @@
+/*
+ * Persistent Storage - pstore.h
+ *
+ * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
+ *
+ * This code is the generic layer to export data records from platform
+ * level persistent storage via a file system.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _LINUX_PSTORE_H
+#define _LINUX_PSTORE_H
+
+#include <linux/time.h>
+#include <linux/kmsg_dump.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+
+/* types */
+enum pstore_type_id {
+ PSTORE_TYPE_DMESG = 0,
+ PSTORE_TYPE_MCE = 1,
+ PSTORE_TYPE_CONSOLE = 2,
+ PSTORE_TYPE_FTRACE = 3,
+ /* PPC64 partition types */
+ PSTORE_TYPE_PPC_RTAS = 4,
+ PSTORE_TYPE_PPC_OF = 5,
+ PSTORE_TYPE_PPC_COMMON = 6,
+ PSTORE_TYPE_PMSG = 7,
+ PSTORE_TYPE_PPC_OPAL = 8,
+ PSTORE_TYPE_UNKNOWN = 255
+};
+
+struct module;
+
+struct pstore_info {
+ struct module *owner;
+ char *name;
+ spinlock_t buf_lock; /* serialize access to 'buf' */
+ char *buf;
+ size_t bufsize;
+ struct mutex read_mutex; /* serialize open/read/close */
+ int flags;
+ int (*open)(struct pstore_info *psi);
+ int (*close)(struct pstore_info *psi);
+ ssize_t (*read)(u64 *id, enum pstore_type_id *type,
+ int *count, struct timespec *time, char **buf,
+ bool *compressed, struct pstore_info *psi);
+ int (*write)(enum pstore_type_id type,
+ enum kmsg_dump_reason reason, u64 *id,
+ unsigned int part, int count, bool compressed,
+ size_t size, struct pstore_info *psi);
+ int (*write_buf)(enum pstore_type_id type,
+ enum kmsg_dump_reason reason, u64 *id,
+ unsigned int part, const char *buf, bool compressed,
+ size_t size, struct pstore_info *psi);
+ int (*erase)(enum pstore_type_id type, u64 id,
+ int count, struct timespec time,
+ struct pstore_info *psi);
+ void *data;
+};
+
+#define PSTORE_FLAGS_FRAGILE 1
+
+#ifdef CONFIG_PSTORE
+extern int pstore_register(struct pstore_info *);
+extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
+#else
+static inline int
+pstore_register(struct pstore_info *psi)
+{
+ return -ENODEV;
+}
+static inline bool
+pstore_cannot_block_path(enum kmsg_dump_reason reason)
+{
+ return false;
+}
+#endif
+
+#endif /*_LINUX_PSTORE_H*/
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
new file mode 100644
index 000000000..9c9d6c154
--- /dev/null
+++ b/include/linux/pstore_ram.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com>
+ * Copyright (C) 2011 Kees Cook <keescook@chromium.org>
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_PSTORE_RAM_H__
+#define __LINUX_PSTORE_RAM_H__
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+struct persistent_ram_buffer;
+struct rs_control;
+
+struct persistent_ram_ecc_info {
+ int block_size;
+ int ecc_size;
+ int symsize;
+ int poly;
+};
+
+struct persistent_ram_zone {
+ phys_addr_t paddr;
+ size_t size;
+ void *vaddr;
+ struct persistent_ram_buffer *buffer;
+ size_t buffer_size;
+
+ /* ECC correction */
+ char *par_buffer;
+ char *par_header;
+ struct rs_control *rs_decoder;
+ int corrected_bytes;
+ int bad_blocks;
+ struct persistent_ram_ecc_info ecc_info;
+
+ char *old_log;
+ size_t old_log_size;
+};
+
+struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+ u32 sig, struct persistent_ram_ecc_info *ecc_info,
+ unsigned int memtype);
+void persistent_ram_free(struct persistent_ram_zone *prz);
+void persistent_ram_zap(struct persistent_ram_zone *prz);
+
+int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
+ unsigned int count);
+
+void persistent_ram_save_old(struct persistent_ram_zone *prz);
+size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
+void *persistent_ram_old(struct persistent_ram_zone *prz);
+void persistent_ram_free_old(struct persistent_ram_zone *prz);
+ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
+ char *str, size_t len);
+
+/*
+ * Ramoops platform data
+ * @mem_size memory size for ramoops
+ * @mem_address physical memory address to contain ramoops
+ */
+
+struct ramoops_platform_data {
+ unsigned long mem_size;
+ unsigned long mem_address;
+ unsigned int mem_type;
+ unsigned long record_size;
+ unsigned long console_size;
+ unsigned long ftrace_size;
+ unsigned long pmsg_size;
+ int dump_oops;
+ struct persistent_ram_ecc_info ecc_info;
+};
+
+#endif
diff --git a/include/linux/pti.h b/include/linux/pti.h
new file mode 100644
index 000000000..b3ea01a31
--- /dev/null
+++ b/include/linux/pti.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) Intel 2011
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The PTI (Parallel Trace Interface) driver directs trace data routed from
+ * various parts in the system out through the Intel Penwell PTI port and
+ * out of the mobile device for analysis with a debugging tool
+ * (Lauterbach, Fido). This is part of a solution for the MIPI P1149.7,
+ * compact JTAG, standard.
+ *
+ * This header file will allow other parts of the OS to use the
+ * interface to write out it's contents for debugging a mobile system.
+ */
+
+#ifndef PTI_H_
+#define PTI_H_
+
+/* offset for last dword of any PTI message. Part of MIPI P1149.7 */
+#define PTI_LASTDWORD_DTS 0x30
+
+/* basic structure used as a write address to the PTI HW */
+struct pti_masterchannel {
+ u8 master;
+ u8 channel;
+};
+
+/* the following functions are defined in misc/pti.c */
+void pti_writedata(struct pti_masterchannel *mc, u8 *buf, int count);
+struct pti_masterchannel *pti_request_masterchannel(u8 type,
+ const char *thread_name);
+void pti_release_masterchannel(struct pti_masterchannel *mc);
+
+#endif /*PTI_H_*/
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
new file mode 100644
index 000000000..159c987b1
--- /dev/null
+++ b/include/linux/ptp_classify.h
@@ -0,0 +1,78 @@
+/*
+ * PTP 1588 support
+ *
+ * This file implements a BPF that recognizes PTP event messages.
+ *
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _PTP_CLASSIFY_H_
+#define _PTP_CLASSIFY_H_
+
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+
+#define PTP_CLASS_NONE 0x00 /* not a PTP event message */
+#define PTP_CLASS_V1 0x01 /* protocol version 1 */
+#define PTP_CLASS_V2 0x02 /* protocol version 2 */
+#define PTP_CLASS_VMASK 0x0f /* max protocol version is 15 */
+#define PTP_CLASS_IPV4 0x10 /* event in an IPV4 UDP packet */
+#define PTP_CLASS_IPV6 0x20 /* event in an IPV6 UDP packet */
+#define PTP_CLASS_L2 0x30 /* event in a L2 packet */
+#define PTP_CLASS_PMASK 0x30 /* mask for the packet type field */
+#define PTP_CLASS_VLAN 0x40 /* event in a VLAN tagged packet */
+
+#define PTP_CLASS_V1_IPV4 (PTP_CLASS_V1 | PTP_CLASS_IPV4)
+#define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /* probably DNE */
+#define PTP_CLASS_V2_IPV4 (PTP_CLASS_V2 | PTP_CLASS_IPV4)
+#define PTP_CLASS_V2_IPV6 (PTP_CLASS_V2 | PTP_CLASS_IPV6)
+#define PTP_CLASS_V2_L2 (PTP_CLASS_V2 | PTP_CLASS_L2)
+#define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN)
+
+#define PTP_EV_PORT 319
+#define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
+
+#define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */
+#define OFF_PTP_SEQUENCE_ID 30
+#define OFF_PTP_CONTROL 32 /* PTPv1 only */
+
+/* Below defines should actually be removed at some point in time. */
+#define IP6_HLEN 40
+#define UDP_HLEN 8
+#define OFF_IHL 14
+#define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2)
+
+#if defined(CONFIG_NET_PTP_CLASSIFY)
+/**
+ * ptp_classify_raw - classify a PTP packet
+ * @skb: buffer
+ *
+ * Runs a minimal BPF dissector to classify a network packet to
+ * determine the PTP class. In case the skb does not contain any
+ * PTP protocol data, PTP_CLASS_NONE will be returned, otherwise
+ * PTP_CLASS_V1_IPV{4,6}, PTP_CLASS_V2_IPV{4,6} or
+ * PTP_CLASS_V2_{L2,VLAN}, depending on the packet content.
+ */
+unsigned int ptp_classify_raw(const struct sk_buff *skb);
+
+void __init ptp_classifier_init(void);
+#else
+static inline void ptp_classifier_init(void)
+{
+}
+#endif
+#endif /* _PTP_CLASSIFY_H_ */
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
new file mode 100644
index 000000000..b8b73066d
--- /dev/null
+++ b/include/linux/ptp_clock_kernel.h
@@ -0,0 +1,192 @@
+/*
+ * PTP 1588 clock support
+ *
+ * Copyright (C) 2010 OMICRON electronics GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _PTP_CLOCK_KERNEL_H_
+#define _PTP_CLOCK_KERNEL_H_
+
+#include <linux/device.h>
+#include <linux/pps_kernel.h>
+#include <linux/ptp_clock.h>
+
+
+struct ptp_clock_request {
+ enum {
+ PTP_CLK_REQ_EXTTS,
+ PTP_CLK_REQ_PEROUT,
+ PTP_CLK_REQ_PPS,
+ } type;
+ union {
+ struct ptp_extts_request extts;
+ struct ptp_perout_request perout;
+ };
+};
+
+/**
+ * struct ptp_clock_info - decribes a PTP hardware clock
+ *
+ * @owner: The clock driver should set to THIS_MODULE.
+ * @name: A short "friendly name" to identify the clock and to
+ * help distinguish PHY based devices from MAC based ones.
+ * The string is not meant to be a unique id.
+ * @max_adj: The maximum possible frequency adjustment, in parts per billon.
+ * @n_alarm: The number of programmable alarms.
+ * @n_ext_ts: The number of external time stamp channels.
+ * @n_per_out: The number of programmable periodic signals.
+ * @n_pins: The number of programmable pins.
+ * @pps: Indicates whether the clock supports a PPS callback.
+ * @pin_config: Array of length 'n_pins'. If the number of
+ * programmable pins is nonzero, then drivers must
+ * allocate and initialize this array.
+ *
+ * clock operations
+ *
+ * @adjfreq: Adjusts the frequency of the hardware clock.
+ * parameter delta: Desired frequency offset from nominal frequency
+ * in parts per billion
+ *
+ * @adjtime: Shifts the time of the hardware clock.
+ * parameter delta: Desired change in nanoseconds.
+ *
+ * @gettime64: Reads the current time from the hardware clock.
+ * parameter ts: Holds the result.
+ *
+ * @settime64: Set the current time on the hardware clock.
+ * parameter ts: Time value to set.
+ *
+ * @enable: Request driver to enable or disable an ancillary feature.
+ * parameter request: Desired resource to enable or disable.
+ * parameter on: Caller passes one to enable or zero to disable.
+ *
+ * @verify: Confirm that a pin can perform a given function. The PTP
+ * Hardware Clock subsystem maintains the 'pin_config'
+ * array on behalf of the drivers, but the PHC subsystem
+ * assumes that every pin can perform every function. This
+ * hook gives drivers a way of telling the core about
+ * limitations on specific pins. This function must return
+ * zero if the function can be assigned to this pin, and
+ * nonzero otherwise.
+ * parameter pin: index of the pin in question.
+ * parameter func: the desired function to use.
+ * parameter chan: the function channel index to use.
+ *
+ * Drivers should embed their ptp_clock_info within a private
+ * structure, obtaining a reference to it using container_of().
+ *
+ * The callbacks must all return zero on success, non-zero otherwise.
+ */
+
+struct ptp_clock_info {
+ struct module *owner;
+ char name[16];
+ s32 max_adj;
+ int n_alarm;
+ int n_ext_ts;
+ int n_per_out;
+ int n_pins;
+ int pps;
+ struct ptp_pin_desc *pin_config;
+ int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
+ int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
+ int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
+ int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts);
+ int (*enable)(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *request, int on);
+ int (*verify)(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan);
+};
+
+struct ptp_clock;
+
+/**
+ * ptp_clock_register() - register a PTP hardware clock driver
+ *
+ * @info: Structure describing the new clock.
+ * @parent: Pointer to the parent device of the new clock.
+ */
+
+extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
+ struct device *parent);
+
+/**
+ * ptp_clock_unregister() - unregister a PTP hardware clock driver
+ *
+ * @ptp: The clock to remove from service.
+ */
+
+extern int ptp_clock_unregister(struct ptp_clock *ptp);
+
+
+enum ptp_clock_events {
+ PTP_CLOCK_ALARM,
+ PTP_CLOCK_EXTTS,
+ PTP_CLOCK_PPS,
+ PTP_CLOCK_PPSUSR,
+};
+
+/**
+ * struct ptp_clock_event - decribes a PTP hardware clock event
+ *
+ * @type: One of the ptp_clock_events enumeration values.
+ * @index: Identifies the source of the event.
+ * @timestamp: When the event occurred (%PTP_CLOCK_EXTTS only).
+ * @pps_times: When the event occurred (%PTP_CLOCK_PPSUSR only).
+ */
+
+struct ptp_clock_event {
+ int type;
+ int index;
+ union {
+ u64 timestamp;
+ struct pps_event_time pps_times;
+ };
+};
+
+/**
+ * ptp_clock_event() - notify the PTP layer about an event
+ *
+ * @ptp: The clock obtained from ptp_clock_register().
+ * @event: Message structure describing the event.
+ */
+
+extern void ptp_clock_event(struct ptp_clock *ptp,
+ struct ptp_clock_event *event);
+
+/**
+ * ptp_clock_index() - obtain the device index of a PTP clock
+ *
+ * @ptp: The clock obtained from ptp_clock_register().
+ */
+
+extern int ptp_clock_index(struct ptp_clock *ptp);
+
+/**
+ * ptp_find_pin() - obtain the pin index of a given auxiliary function
+ *
+ * @ptp: The clock obtained from ptp_clock_register().
+ * @func: One of the ptp_pin_function enumerated values.
+ * @chan: The particular functional channel to find.
+ * Return: Pin index in the range of zero to ptp_clock_caps.n_pins - 1,
+ * or -1 if the auxiliary function cannot be found.
+ */
+
+int ptp_find_pin(struct ptp_clock *ptp,
+ enum ptp_pin_function func, unsigned int chan);
+
+#endif
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
new file mode 100644
index 000000000..987a73a40
--- /dev/null
+++ b/include/linux/ptrace.h
@@ -0,0 +1,386 @@
+#ifndef _LINUX_PTRACE_H
+#define _LINUX_PTRACE_H
+
+#include <linux/compiler.h> /* For unlikely. */
+#include <linux/sched.h> /* For struct task_struct. */
+#include <linux/err.h> /* for IS_ERR_VALUE */
+#include <linux/bug.h> /* For BUG_ON. */
+#include <linux/pid_namespace.h> /* For task_active_pid_ns. */
+#include <uapi/linux/ptrace.h>
+
+/*
+ * Ptrace flags
+ *
+ * The owner ship rules for task->ptrace which holds the ptrace
+ * flags is simple. When a task is running it owns it's task->ptrace
+ * flags. When the a task is stopped the ptracer owns task->ptrace.
+ */
+
+#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
+#define PT_PTRACED 0x00000001
+#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
+#define PT_PTRACE_CAP 0x00000004 /* ptracer can follow suid-exec */
+
+#define PT_OPT_FLAG_SHIFT 3
+/* PT_TRACE_* event enable flags */
+#define PT_EVENT_FLAG(event) (1 << (PT_OPT_FLAG_SHIFT + (event)))
+#define PT_TRACESYSGOOD PT_EVENT_FLAG(0)
+#define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK)
+#define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK)
+#define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE)
+#define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
+#define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
+#define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
+#define PT_TRACE_SECCOMP PT_EVENT_FLAG(PTRACE_EVENT_SECCOMP)
+
+#define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT)
+
+/* single stepping state bits (used on ARM and PA-RISC) */
+#define PT_SINGLESTEP_BIT 31
+#define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT)
+#define PT_BLOCKSTEP_BIT 30
+#define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT)
+
+extern long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data);
+extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
+extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
+extern void ptrace_disable(struct task_struct *);
+extern int ptrace_request(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data);
+extern void ptrace_notify(int exit_code);
+extern void __ptrace_link(struct task_struct *child,
+ struct task_struct *new_parent);
+extern void __ptrace_unlink(struct task_struct *child);
+extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
+#define PTRACE_MODE_READ 0x01
+#define PTRACE_MODE_ATTACH 0x02
+#define PTRACE_MODE_NOAUDIT 0x04
+/* Returns true on success, false on denial. */
+extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
+
+static inline int ptrace_reparented(struct task_struct *child)
+{
+ return !same_thread_group(child->real_parent, child->parent);
+}
+
+static inline void ptrace_unlink(struct task_struct *child)
+{
+ if (unlikely(child->ptrace))
+ __ptrace_unlink(child);
+}
+
+int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
+ unsigned long data);
+int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
+ unsigned long data);
+
+/**
+ * ptrace_parent - return the task that is tracing the given task
+ * @task: task to consider
+ *
+ * Returns %NULL if no one is tracing @task, or the &struct task_struct
+ * pointer to its tracer.
+ *
+ * Must called under rcu_read_lock(). The pointer returned might be kept
+ * live only by RCU. During exec, this may be called with task_lock() held
+ * on @task, still held from when check_unsafe_exec() was called.
+ */
+static inline struct task_struct *ptrace_parent(struct task_struct *task)
+{
+ if (unlikely(task->ptrace))
+ return rcu_dereference(task->parent);
+ return NULL;
+}
+
+/**
+ * ptrace_event_enabled - test whether a ptrace event is enabled
+ * @task: ptracee of interest
+ * @event: %PTRACE_EVENT_* to test
+ *
+ * Test whether @event is enabled for ptracee @task.
+ *
+ * Returns %true if @event is enabled, %false otherwise.
+ */
+static inline bool ptrace_event_enabled(struct task_struct *task, int event)
+{
+ return task->ptrace & PT_EVENT_FLAG(event);
+}
+
+/**
+ * ptrace_event - possibly stop for a ptrace event notification
+ * @event: %PTRACE_EVENT_* value to report
+ * @message: value for %PTRACE_GETEVENTMSG to return
+ *
+ * Check whether @event is enabled and, if so, report @event and @message
+ * to the ptrace parent.
+ *
+ * Called without locks.
+ */
+static inline void ptrace_event(int event, unsigned long message)
+{
+ if (unlikely(ptrace_event_enabled(current, event))) {
+ current->ptrace_message = message;
+ ptrace_notify((event << 8) | SIGTRAP);
+ } else if (event == PTRACE_EVENT_EXEC) {
+ /* legacy EXEC report via SIGTRAP */
+ if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
+ send_sig(SIGTRAP, current, 0);
+ }
+}
+
+/**
+ * ptrace_event_pid - possibly stop for a ptrace event notification
+ * @event: %PTRACE_EVENT_* value to report
+ * @pid: process identifier for %PTRACE_GETEVENTMSG to return
+ *
+ * Check whether @event is enabled and, if so, report @event and @pid
+ * to the ptrace parent. @pid is reported as the pid_t seen from the
+ * the ptrace parent's pid namespace.
+ *
+ * Called without locks.
+ */
+static inline void ptrace_event_pid(int event, struct pid *pid)
+{
+ /*
+ * FIXME: There's a potential race if a ptracer in a different pid
+ * namespace than parent attaches between computing message below and
+ * when we acquire tasklist_lock in ptrace_stop(). If this happens,
+ * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG.
+ */
+ unsigned long message = 0;
+ struct pid_namespace *ns;
+
+ rcu_read_lock();
+ ns = task_active_pid_ns(rcu_dereference(current->parent));
+ if (ns)
+ message = pid_nr_ns(pid, ns);
+ rcu_read_unlock();
+
+ ptrace_event(event, message);
+}
+
+/**
+ * ptrace_init_task - initialize ptrace state for a new child
+ * @child: new child task
+ * @ptrace: true if child should be ptrace'd by parent's tracer
+ *
+ * This is called immediately after adding @child to its parent's children
+ * list. @ptrace is false in the normal case, and true to ptrace @child.
+ *
+ * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
+ */
+static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
+{
+ INIT_LIST_HEAD(&child->ptrace_entry);
+ INIT_LIST_HEAD(&child->ptraced);
+ child->jobctl = 0;
+ child->ptrace = 0;
+ child->parent = child->real_parent;
+
+ if (unlikely(ptrace) && current->ptrace) {
+ child->ptrace = current->ptrace;
+ __ptrace_link(child, current->parent);
+
+ if (child->ptrace & PT_SEIZED)
+ task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
+ else
+ sigaddset(&child->pending.signal, SIGSTOP);
+
+ set_tsk_thread_flag(child, TIF_SIGPENDING);
+ }
+}
+
+/**
+ * ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
+ * @task: task in %EXIT_DEAD state
+ *
+ * Called with write_lock(&tasklist_lock) held.
+ */
+static inline void ptrace_release_task(struct task_struct *task)
+{
+ BUG_ON(!list_empty(&task->ptraced));
+ ptrace_unlink(task);
+ BUG_ON(!list_empty(&task->ptrace_entry));
+}
+
+#ifndef force_successful_syscall_return
+/*
+ * System call handlers that, upon successful completion, need to return a
+ * negative value should call force_successful_syscall_return() right before
+ * returning. On architectures where the syscall convention provides for a
+ * separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
+ * others), this macro can be used to ensure that the error flag will not get
+ * set. On architectures which do not support a separate error flag, the macro
+ * is a no-op and the spurious error condition needs to be filtered out by some
+ * other means (e.g., in user-level, by passing an extra argument to the
+ * syscall handler, or something along those lines).
+ */
+#define force_successful_syscall_return() do { } while (0)
+#endif
+
+#ifndef is_syscall_success
+/*
+ * On most systems we can tell if a syscall is a success based on if the retval
+ * is an error value. On some systems like ia64 and powerpc they have different
+ * indicators of success/failure and must define their own.
+ */
+#define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs))))
+#endif
+
+/*
+ * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
+ *
+ * These do-nothing inlines are used when the arch does not
+ * implement single-step. The kerneldoc comments are here
+ * to document the interface for all arch definitions.
+ */
+
+#ifndef arch_has_single_step
+/**
+ * arch_has_single_step - does this CPU support user-mode single-step?
+ *
+ * If this is defined, then there must be function declarations or
+ * inlines for user_enable_single_step() and user_disable_single_step().
+ * arch_has_single_step() should evaluate to nonzero iff the machine
+ * supports instruction single-step for user mode.
+ * It can be a constant or it can test a CPU feature bit.
+ */
+#define arch_has_single_step() (0)
+
+/**
+ * user_enable_single_step - single-step in user-mode task
+ * @task: either current or a task stopped in %TASK_TRACED
+ *
+ * This can only be called when arch_has_single_step() has returned nonzero.
+ * Set @task so that when it returns to user mode, it will trap after the
+ * next single instruction executes. If arch_has_block_step() is defined,
+ * this must clear the effects of user_enable_block_step() too.
+ */
+static inline void user_enable_single_step(struct task_struct *task)
+{
+ BUG(); /* This can never be called. */
+}
+
+/**
+ * user_disable_single_step - cancel user-mode single-step
+ * @task: either current or a task stopped in %TASK_TRACED
+ *
+ * Clear @task of the effects of user_enable_single_step() and
+ * user_enable_block_step(). This can be called whether or not either
+ * of those was ever called on @task, and even if arch_has_single_step()
+ * returned zero.
+ */
+static inline void user_disable_single_step(struct task_struct *task)
+{
+}
+#else
+extern void user_enable_single_step(struct task_struct *);
+extern void user_disable_single_step(struct task_struct *);
+#endif /* arch_has_single_step */
+
+#ifndef arch_has_block_step
+/**
+ * arch_has_block_step - does this CPU support user-mode block-step?
+ *
+ * If this is defined, then there must be a function declaration or inline
+ * for user_enable_block_step(), and arch_has_single_step() must be defined
+ * too. arch_has_block_step() should evaluate to nonzero iff the machine
+ * supports step-until-branch for user mode. It can be a constant or it
+ * can test a CPU feature bit.
+ */
+#define arch_has_block_step() (0)
+
+/**
+ * user_enable_block_step - step until branch in user-mode task
+ * @task: either current or a task stopped in %TASK_TRACED
+ *
+ * This can only be called when arch_has_block_step() has returned nonzero,
+ * and will never be called when single-instruction stepping is being used.
+ * Set @task so that when it returns to user mode, it will trap after the
+ * next branch or trap taken.
+ */
+static inline void user_enable_block_step(struct task_struct *task)
+{
+ BUG(); /* This can never be called. */
+}
+#else
+extern void user_enable_block_step(struct task_struct *);
+#endif /* arch_has_block_step */
+
+#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
+extern void user_single_step_siginfo(struct task_struct *tsk,
+ struct pt_regs *regs, siginfo_t *info);
+#else
+static inline void user_single_step_siginfo(struct task_struct *tsk,
+ struct pt_regs *regs, siginfo_t *info)
+{
+ memset(info, 0, sizeof(*info));
+ info->si_signo = SIGTRAP;
+}
+#endif
+
+#ifndef arch_ptrace_stop_needed
+/**
+ * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
+ * @code: current->exit_code value ptrace will stop with
+ * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
+ *
+ * This is called with the siglock held, to decide whether or not it's
+ * necessary to release the siglock and call arch_ptrace_stop() with the
+ * same @code and @info arguments. It can be defined to a constant if
+ * arch_ptrace_stop() is never required, or always is. On machines where
+ * this makes sense, it should be defined to a quick test to optimize out
+ * calling arch_ptrace_stop() when it would be superfluous. For example,
+ * if the thread has not been back to user mode since the last stop, the
+ * thread state might indicate that nothing needs to be done.
+ *
+ * This is guaranteed to be invoked once before a task stops for ptrace and
+ * may include arch-specific operations necessary prior to a ptrace stop.
+ */
+#define arch_ptrace_stop_needed(code, info) (0)
+#endif
+
+#ifndef arch_ptrace_stop
+/**
+ * arch_ptrace_stop - Do machine-specific work before stopping for ptrace
+ * @code: current->exit_code value ptrace will stop with
+ * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
+ *
+ * This is called with no locks held when arch_ptrace_stop_needed() has
+ * just returned nonzero. It is allowed to block, e.g. for user memory
+ * access. The arch can have machine-specific work to be done before
+ * ptrace stops. On ia64, register backing store gets written back to user
+ * memory here. Since this can be costly (requires dropping the siglock),
+ * we only do it when the arch requires it for this particular stop, as
+ * indicated by arch_ptrace_stop_needed().
+ */
+#define arch_ptrace_stop(code, info) do { } while (0)
+#endif
+
+#ifndef current_pt_regs
+#define current_pt_regs() task_pt_regs(current)
+#endif
+
+#ifndef ptrace_signal_deliver
+#define ptrace_signal_deliver() ((void)0)
+#endif
+
+/*
+ * unlike current_pt_regs(), this one is equal to task_pt_regs(current)
+ * on *all* architectures; the only reason to have a per-arch definition
+ * is optimisation.
+ */
+#ifndef signal_pt_regs
+#define signal_pt_regs() task_pt_regs(current)
+#endif
+
+#ifndef current_user_stack_pointer
+#define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
+#endif
+
+extern int task_current_syscall(struct task_struct *target, long *callno,
+ unsigned long args[6], unsigned int maxargs,
+ unsigned long *sp, unsigned long *pc);
+
+#endif
diff --git a/include/linux/pvclock_gtod.h b/include/linux/pvclock_gtod.h
new file mode 100644
index 000000000..a71d2dbd3
--- /dev/null
+++ b/include/linux/pvclock_gtod.h
@@ -0,0 +1,16 @@
+#ifndef _PVCLOCK_GTOD_H
+#define _PVCLOCK_GTOD_H
+
+#include <linux/notifier.h>
+
+/*
+ * The pvclock gtod notifier is called when the system time is updated
+ * and is used to keep guest time synchronized with host time.
+ *
+ * The 'action' parameter in the notifier function is false (0), or
+ * true (non-zero) if system time was stepped.
+ */
+extern int pvclock_gtod_register_notifier(struct notifier_block *nb);
+extern int pvclock_gtod_unregister_notifier(struct notifier_block *nb);
+
+#endif /* _PVCLOCK_GTOD_H */
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
new file mode 100644
index 000000000..e90628cac
--- /dev/null
+++ b/include/linux/pwm.h
@@ -0,0 +1,312 @@
+#ifndef __LINUX_PWM_H
+#define __LINUX_PWM_H
+
+#include <linux/err.h>
+#include <linux/of.h>
+
+struct pwm_device;
+struct seq_file;
+
+#if IS_ENABLED(CONFIG_PWM)
+/*
+ * pwm_request - request a PWM device
+ */
+struct pwm_device *pwm_request(int pwm_id, const char *label);
+
+/*
+ * pwm_free - free a PWM device
+ */
+void pwm_free(struct pwm_device *pwm);
+
+/*
+ * pwm_config - change a PWM device configuration
+ */
+int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns);
+
+/*
+ * pwm_enable - start a PWM output toggling
+ */
+int pwm_enable(struct pwm_device *pwm);
+
+/*
+ * pwm_disable - stop a PWM output toggling
+ */
+void pwm_disable(struct pwm_device *pwm);
+#else
+static inline struct pwm_device *pwm_request(int pwm_id, const char *label)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void pwm_free(struct pwm_device *pwm)
+{
+}
+
+static inline int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ return -EINVAL;
+}
+
+static inline int pwm_enable(struct pwm_device *pwm)
+{
+ return -EINVAL;
+}
+
+static inline void pwm_disable(struct pwm_device *pwm)
+{
+}
+#endif
+
+struct pwm_chip;
+
+/**
+ * enum pwm_polarity - polarity of a PWM signal
+ * @PWM_POLARITY_NORMAL: a high signal for the duration of the duty-
+ * cycle, followed by a low signal for the remainder of the pulse
+ * period
+ * @PWM_POLARITY_INVERSED: a low signal for the duration of the duty-
+ * cycle, followed by a high signal for the remainder of the pulse
+ * period
+ */
+enum pwm_polarity {
+ PWM_POLARITY_NORMAL,
+ PWM_POLARITY_INVERSED,
+};
+
+enum {
+ PWMF_REQUESTED = 1 << 0,
+ PWMF_ENABLED = 1 << 1,
+ PWMF_EXPORTED = 1 << 2,
+};
+
+struct pwm_device {
+ const char *label;
+ unsigned long flags;
+ unsigned int hwpwm;
+ unsigned int pwm;
+ struct pwm_chip *chip;
+ void *chip_data;
+
+ unsigned int period; /* in nanoseconds */
+ unsigned int duty_cycle; /* in nanoseconds */
+ enum pwm_polarity polarity;
+};
+
+static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
+{
+ if (pwm)
+ pwm->period = period;
+}
+
+static inline unsigned int pwm_get_period(struct pwm_device *pwm)
+{
+ return pwm ? pwm->period : 0;
+}
+
+static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
+{
+ if (pwm)
+ pwm->duty_cycle = duty;
+}
+
+static inline unsigned int pwm_get_duty_cycle(struct pwm_device *pwm)
+{
+ return pwm ? pwm->duty_cycle : 0;
+}
+
+/*
+ * pwm_set_polarity - configure the polarity of a PWM signal
+ */
+int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
+
+/**
+ * struct pwm_ops - PWM controller operations
+ * @request: optional hook for requesting a PWM
+ * @free: optional hook for freeing a PWM
+ * @config: configure duty cycles and period length for this PWM
+ * @set_polarity: configure the polarity of this PWM
+ * @enable: enable PWM output toggling
+ * @disable: disable PWM output toggling
+ * @dbg_show: optional routine to show contents in debugfs
+ * @owner: helps prevent removal of modules exporting active PWMs
+ */
+struct pwm_ops {
+ int (*request)(struct pwm_chip *chip,
+ struct pwm_device *pwm);
+ void (*free)(struct pwm_chip *chip,
+ struct pwm_device *pwm);
+ int (*config)(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ int duty_ns, int period_ns);
+ int (*set_polarity)(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ enum pwm_polarity polarity);
+ int (*enable)(struct pwm_chip *chip,
+ struct pwm_device *pwm);
+ void (*disable)(struct pwm_chip *chip,
+ struct pwm_device *pwm);
+#ifdef CONFIG_DEBUG_FS
+ void (*dbg_show)(struct pwm_chip *chip,
+ struct seq_file *s);
+#endif
+ struct module *owner;
+};
+
+/**
+ * struct pwm_chip - abstract a PWM controller
+ * @dev: device providing the PWMs
+ * @list: list node for internal use
+ * @ops: callbacks for this PWM controller
+ * @base: number of first PWM controlled by this chip
+ * @npwm: number of PWMs controlled by this chip
+ * @pwms: array of PWM devices allocated by the framework
+ * @can_sleep: must be true if the .config(), .enable() or .disable()
+ * operations may sleep
+ */
+struct pwm_chip {
+ struct device *dev;
+ struct list_head list;
+ const struct pwm_ops *ops;
+ int base;
+ unsigned int npwm;
+
+ struct pwm_device *pwms;
+
+ struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
+ const struct of_phandle_args *args);
+ unsigned int of_pwm_n_cells;
+ bool can_sleep;
+};
+
+#if IS_ENABLED(CONFIG_PWM)
+int pwm_set_chip_data(struct pwm_device *pwm, void *data);
+void *pwm_get_chip_data(struct pwm_device *pwm);
+
+int pwmchip_add(struct pwm_chip *chip);
+int pwmchip_remove(struct pwm_chip *chip);
+struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
+ unsigned int index,
+ const char *label);
+
+struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc,
+ const struct of_phandle_args *args);
+
+struct pwm_device *pwm_get(struct device *dev, const char *con_id);
+struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id);
+void pwm_put(struct pwm_device *pwm);
+
+struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id);
+struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
+ const char *con_id);
+void devm_pwm_put(struct device *dev, struct pwm_device *pwm);
+
+bool pwm_can_sleep(struct pwm_device *pwm);
+#else
+static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data)
+{
+ return -EINVAL;
+}
+
+static inline void *pwm_get_chip_data(struct pwm_device *pwm)
+{
+ return NULL;
+}
+
+static inline int pwmchip_add(struct pwm_chip *chip)
+{
+ return -EINVAL;
+}
+
+static inline int pwmchip_remove(struct pwm_chip *chip)
+{
+ return -EINVAL;
+}
+
+static inline struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
+ unsigned int index,
+ const char *label)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct pwm_device *pwm_get(struct device *dev,
+ const char *consumer)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct pwm_device *of_pwm_get(struct device_node *np,
+ const char *con_id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void pwm_put(struct pwm_device *pwm)
+{
+}
+
+static inline struct pwm_device *devm_pwm_get(struct device *dev,
+ const char *consumer)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct pwm_device *devm_of_pwm_get(struct device *dev,
+ struct device_node *np,
+ const char *con_id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm)
+{
+}
+
+static inline bool pwm_can_sleep(struct pwm_device *pwm)
+{
+ return false;
+}
+#endif
+
+struct pwm_lookup {
+ struct list_head list;
+ const char *provider;
+ unsigned int index;
+ const char *dev_id;
+ const char *con_id;
+ unsigned int period;
+ enum pwm_polarity polarity;
+};
+
+#define PWM_LOOKUP(_provider, _index, _dev_id, _con_id, _period, _polarity) \
+ { \
+ .provider = _provider, \
+ .index = _index, \
+ .dev_id = _dev_id, \
+ .con_id = _con_id, \
+ .period = _period, \
+ .polarity = _polarity \
+ }
+
+#if IS_ENABLED(CONFIG_PWM)
+void pwm_add_table(struct pwm_lookup *table, size_t num);
+#else
+static inline void pwm_add_table(struct pwm_lookup *table, size_t num)
+{
+}
+#endif
+
+#ifdef CONFIG_PWM_SYSFS
+void pwmchip_sysfs_export(struct pwm_chip *chip);
+void pwmchip_sysfs_unexport(struct pwm_chip *chip);
+#else
+static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
+{
+}
+
+static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
+{
+}
+#endif /* CONFIG_PWM_SYSFS */
+
+#endif /* __LINUX_PWM_H */
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
new file mode 100644
index 000000000..efdd9227a
--- /dev/null
+++ b/include/linux/pwm_backlight.h
@@ -0,0 +1,25 @@
+/*
+ * Generic PWM backlight driver data - see drivers/video/backlight/pwm_bl.c
+ */
+#ifndef __LINUX_PWM_BACKLIGHT_H
+#define __LINUX_PWM_BACKLIGHT_H
+
+#include <linux/backlight.h>
+
+struct platform_pwm_backlight_data {
+ int pwm_id;
+ unsigned int max_brightness;
+ unsigned int dft_brightness;
+ unsigned int lth_brightness;
+ unsigned int pwm_period_ns;
+ unsigned int *levels;
+ /* TODO remove once all users are switched to gpiod_* API */
+ int enable_gpio;
+ int (*init)(struct device *dev);
+ int (*notify)(struct device *dev, int brightness);
+ void (*notify_after)(struct device *dev, int brightness);
+ void (*exit)(struct device *dev);
+ int (*check_fb)(struct device *dev, struct fb_info *info);
+};
+
+#endif
diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h
new file mode 100644
index 000000000..e1ab6e86c
--- /dev/null
+++ b/include/linux/pxa168_eth.h
@@ -0,0 +1,33 @@
+/*
+ *pxa168 ethernet platform device data definition file.
+ */
+#ifndef __LINUX_PXA168_ETH_H
+#define __LINUX_PXA168_ETH_H
+
+#include <linux/phy.h>
+
+struct pxa168_eth_platform_data {
+ int port_number;
+ int phy_addr;
+
+ /*
+ * If speed is 0, then speed and duplex are autonegotiated.
+ */
+ int speed; /* 0, SPEED_10, SPEED_100 */
+ int duplex; /* DUPLEX_HALF or DUPLEX_FULL */
+ phy_interface_t intf;
+
+ /*
+ * Override default RX/TX queue sizes if nonzero.
+ */
+ int rx_queue_size;
+ int tx_queue_size;
+
+ /*
+ * init callback is used for board specific initialization
+ * e.g on Aspenite its used to initialize the PHY transceiver.
+ */
+ int (*init)(void);
+};
+
+#endif /* __LINUX_PXA168_ETH_H */
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
new file mode 100644
index 000000000..dab545bb6
--- /dev/null
+++ b/include/linux/pxa2xx_ssp.h
@@ -0,0 +1,261 @@
+/*
+ * pxa2xx_ssp.h
+ *
+ * Copyright (C) 2003 Russell King, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This driver supports the following PXA CPU/SSP ports:-
+ *
+ * PXA250 SSP
+ * PXA255 SSP, NSSP
+ * PXA26x SSP, NSSP, ASSP
+ * PXA27x SSP1, SSP2, SSP3
+ * PXA3xx SSP1, SSP2, SSP3, SSP4
+ */
+
+#ifndef __LINUX_SSP_H
+#define __LINUX_SSP_H
+
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+
+/*
+ * SSP Serial Port Registers
+ * PXA250, PXA255, PXA26x and PXA27x SSP controllers are all slightly different.
+ * PXA255, PXA26x and PXA27x have extra ports, registers and bits.
+ */
+
+#define SSCR0 (0x00) /* SSP Control Register 0 */
+#define SSCR1 (0x04) /* SSP Control Register 1 */
+#define SSSR (0x08) /* SSP Status Register */
+#define SSITR (0x0C) /* SSP Interrupt Test Register */
+#define SSDR (0x10) /* SSP Data Write/Data Read Register */
+
+#define SSTO (0x28) /* SSP Time Out Register */
+#define DDS_RATE (0x28) /* SSP DDS Clock Rate Register (Intel Quark) */
+#define SSPSP (0x2C) /* SSP Programmable Serial Protocol */
+#define SSTSA (0x30) /* SSP Tx Timeslot Active */
+#define SSRSA (0x34) /* SSP Rx Timeslot Active */
+#define SSTSS (0x38) /* SSP Timeslot Status */
+#define SSACD (0x3C) /* SSP Audio Clock Divider */
+#define SSACDD (0x40) /* SSP Audio Clock Dither Divider */
+
+/* Common PXA2xx bits first */
+#define SSCR0_DSS (0x0000000f) /* Data Size Select (mask) */
+#define SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..16] */
+#define SSCR0_FRF (0x00000030) /* FRame Format (mask) */
+#define SSCR0_Motorola (0x0 << 4) /* Motorola's Serial Peripheral Interface (SPI) */
+#define SSCR0_TI (0x1 << 4) /* Texas Instruments' Synchronous Serial Protocol (SSP) */
+#define SSCR0_National (0x2 << 4) /* National Microwire */
+#define SSCR0_ECS (1 << 6) /* External clock select */
+#define SSCR0_SSE (1 << 7) /* Synchronous Serial Port Enable */
+#define SSCR0_SCR(x) ((x) << 8) /* Serial Clock Rate (mask) */
+
+/* PXA27x, PXA3xx */
+#define SSCR0_EDSS (1 << 20) /* Extended data size select */
+#define SSCR0_NCS (1 << 21) /* Network clock select */
+#define SSCR0_RIM (1 << 22) /* Receive FIFO overrrun interrupt mask */
+#define SSCR0_TUM (1 << 23) /* Transmit FIFO underrun interrupt mask */
+#define SSCR0_FRDC (0x07000000) /* Frame rate divider control (mask) */
+#define SSCR0_SlotsPerFrm(x) (((x) - 1) << 24) /* Time slots per frame [1..8] */
+#define SSCR0_FPCKE (1 << 29) /* FIFO packing enable */
+#define SSCR0_ACS (1 << 30) /* Audio clock select */
+#define SSCR0_MOD (1 << 31) /* Mode (normal or network) */
+
+
+#define SSCR1_RIE (1 << 0) /* Receive FIFO Interrupt Enable */
+#define SSCR1_TIE (1 << 1) /* Transmit FIFO Interrupt Enable */
+#define SSCR1_LBM (1 << 2) /* Loop-Back Mode */
+#define SSCR1_SPO (1 << 3) /* Motorola SPI SSPSCLK polarity setting */
+#define SSCR1_SPH (1 << 4) /* Motorola SPI SSPSCLK phase setting */
+#define SSCR1_MWDS (1 << 5) /* Microwire Transmit Data Size */
+
+#define SSSR_ALT_FRM_MASK 3 /* Masks the SFRM signal number */
+#define SSSR_TNF (1 << 2) /* Transmit FIFO Not Full */
+#define SSSR_RNE (1 << 3) /* Receive FIFO Not Empty */
+#define SSSR_BSY (1 << 4) /* SSP Busy */
+#define SSSR_TFS (1 << 5) /* Transmit FIFO Service Request */
+#define SSSR_RFS (1 << 6) /* Receive FIFO Service Request */
+#define SSSR_ROR (1 << 7) /* Receive FIFO Overrun */
+
+#ifdef CONFIG_ARCH_PXA
+#define RX_THRESH_DFLT 8
+#define TX_THRESH_DFLT 8
+
+#define SSSR_TFL_MASK (0xf << 8) /* Transmit FIFO Level mask */
+#define SSSR_RFL_MASK (0xf << 12) /* Receive FIFO Level mask */
+
+#define SSCR1_TFT (0x000003c0) /* Transmit FIFO Threshold (mask) */
+#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..16] */
+#define SSCR1_RFT (0x00003c00) /* Receive FIFO Threshold (mask) */
+#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..16] */
+
+#else
+
+#define RX_THRESH_DFLT 2
+#define TX_THRESH_DFLT 2
+
+#define SSSR_TFL_MASK (0x3 << 8) /* Transmit FIFO Level mask */
+#define SSSR_RFL_MASK (0x3 << 12) /* Receive FIFO Level mask */
+
+#define SSCR1_TFT (0x000000c0) /* Transmit FIFO Threshold (mask) */
+#define SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..4] */
+#define SSCR1_RFT (0x00000c00) /* Receive FIFO Threshold (mask) */
+#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
+#endif
+
+/* QUARK_X1000 SSCR0 bit definition */
+#define QUARK_X1000_SSCR0_DSS (0x1F) /* Data Size Select (mask) */
+#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */
+#define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */
+#define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */
+
+#define RX_THRESH_QUARK_X1000_DFLT 1
+#define TX_THRESH_QUARK_X1000_DFLT 16
+
+#define QUARK_X1000_SSSR_TFL_MASK (0x1F << 8) /* Transmit FIFO Level mask */
+#define QUARK_X1000_SSSR_RFL_MASK (0x1F << 13) /* Receive FIFO Level mask */
+
+#define QUARK_X1000_SSCR1_TFT (0x1F << 6) /* Transmit FIFO Threshold (mask) */
+#define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */
+#define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */
+#define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */
+#define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */
+#define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */
+
+/* extra bits in PXA255, PXA26x and PXA27x SSP ports */
+#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
+#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */
+#define SSCR1_TTELP (1 << 31) /* TXD Tristate Enable Last Phase */
+#define SSCR1_TTE (1 << 30) /* TXD Tristate Enable */
+#define SSCR1_EBCEI (1 << 29) /* Enable Bit Count Error interrupt */
+#define SSCR1_SCFR (1 << 28) /* Slave Clock free Running */
+#define SSCR1_ECRA (1 << 27) /* Enable Clock Request A */
+#define SSCR1_ECRB (1 << 26) /* Enable Clock request B */
+#define SSCR1_SCLKDIR (1 << 25) /* Serial Bit Rate Clock Direction */
+#define SSCR1_SFRMDIR (1 << 24) /* Frame Direction */
+#define SSCR1_RWOT (1 << 23) /* Receive Without Transmit */
+#define SSCR1_TRAIL (1 << 22) /* Trailing Byte */
+#define SSCR1_TSRE (1 << 21) /* Transmit Service Request Enable */
+#define SSCR1_RSRE (1 << 20) /* Receive Service Request Enable */
+#define SSCR1_TINTE (1 << 19) /* Receiver Time-out Interrupt enable */
+#define SSCR1_PINTE (1 << 18) /* Peripheral Trailing Byte Interrupt Enable */
+#define SSCR1_IFS (1 << 16) /* Invert Frame Signal */
+#define SSCR1_STRF (1 << 15) /* Select FIFO or EFWR */
+#define SSCR1_EFWR (1 << 14) /* Enable FIFO Write/Read */
+
+#define SSSR_BCE (1 << 23) /* Bit Count Error */
+#define SSSR_CSS (1 << 22) /* Clock Synchronisation Status */
+#define SSSR_TUR (1 << 21) /* Transmit FIFO Under Run */
+#define SSSR_EOC (1 << 20) /* End Of Chain */
+#define SSSR_TINT (1 << 19) /* Receiver Time-out Interrupt */
+#define SSSR_PINT (1 << 18) /* Peripheral Trailing Byte Interrupt */
+
+
+#define SSPSP_SCMODE(x) ((x) << 0) /* Serial Bit Rate Clock Mode */
+#define SSPSP_SFRMP (1 << 2) /* Serial Frame Polarity */
+#define SSPSP_ETDS (1 << 3) /* End of Transfer data State */
+#define SSPSP_STRTDLY(x) ((x) << 4) /* Start Delay */
+#define SSPSP_DMYSTRT(x) ((x) << 7) /* Dummy Start */
+#define SSPSP_SFRMDLY(x) ((x) << 9) /* Serial Frame Delay */
+#define SSPSP_SFRMWDTH(x) ((x) << 16) /* Serial Frame Width */
+#define SSPSP_DMYSTOP(x) ((x) << 23) /* Dummy Stop */
+#define SSPSP_FSRT (1 << 25) /* Frame Sync Relative Timing */
+
+/* PXA3xx */
+#define SSPSP_EDMYSTRT(x) ((x) << 26) /* Extended Dummy Start */
+#define SSPSP_EDMYSTOP(x) ((x) << 28) /* Extended Dummy Stop */
+#define SSPSP_TIMING_MASK (0x7f8001f0)
+
+#define SSACD_SCDB (1 << 3) /* SSPSYSCLK Divider Bypass */
+#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */
+#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */
+#define SSACD_SCDX8 (1 << 7) /* SYSCLK division ratio select */
+
+/* LPSS SSP */
+#define SSITF 0x44 /* TX FIFO trigger level */
+#define SSITF_TxLoThresh(x) (((x) - 1) << 8)
+#define SSITF_TxHiThresh(x) ((x) - 1)
+
+#define SSIRF 0x48 /* RX FIFO trigger level */
+#define SSIRF_RxThresh(x) ((x) - 1)
+
+enum pxa_ssp_type {
+ SSP_UNDEFINED = 0,
+ PXA25x_SSP, /* pxa 210, 250, 255, 26x */
+ PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */
+ PXA27x_SSP,
+ PXA3xx_SSP,
+ PXA168_SSP,
+ PXA910_SSP,
+ CE4100_SSP,
+ LPSS_SSP,
+ QUARK_X1000_SSP,
+};
+
+struct ssp_device {
+ struct platform_device *pdev;
+ struct list_head node;
+
+ struct clk *clk;
+ void __iomem *mmio_base;
+ unsigned long phys_base;
+
+ const char *label;
+ int port_id;
+ int type;
+ int use_count;
+ int irq;
+ int drcmr_rx;
+ int drcmr_tx;
+
+ struct device_node *of_node;
+};
+
+/**
+ * pxa_ssp_write_reg - Write to a SSP register
+ *
+ * @dev: SSP device to access
+ * @reg: Register to write to
+ * @val: Value to be written.
+ */
+static inline void pxa_ssp_write_reg(struct ssp_device *dev, u32 reg, u32 val)
+{
+ __raw_writel(val, dev->mmio_base + reg);
+}
+
+/**
+ * pxa_ssp_read_reg - Read from a SSP register
+ *
+ * @dev: SSP device to access
+ * @reg: Register to read from
+ */
+static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg)
+{
+ return __raw_readl(dev->mmio_base + reg);
+}
+
+#if IS_ENABLED(CONFIG_PXA_SSP)
+struct ssp_device *pxa_ssp_request(int port, const char *label);
+void pxa_ssp_free(struct ssp_device *);
+struct ssp_device *pxa_ssp_request_of(const struct device_node *of_node,
+ const char *label);
+#else
+static inline struct ssp_device *pxa_ssp_request(int port, const char *label)
+{
+ return NULL;
+}
+static inline struct ssp_device *pxa_ssp_request_of(const struct device_node *n,
+ const char *name)
+{
+ return NULL;
+}
+static inline void pxa_ssp_free(struct ssp_device *ssp) {}
+#endif
+
+#endif
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
new file mode 100644
index 000000000..d7a974d5f
--- /dev/null
+++ b/include/linux/qcom_scm.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QCOM_SCM_H
+#define __QCOM_SCM_H
+
+extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
+extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
+
+#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
+#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
+
+extern void qcom_scm_cpu_power_down(u32 flags);
+
+#define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
+
+extern u32 qcom_scm_get_version(void);
+
+#endif
diff --git a/include/linux/qnx6_fs.h b/include/linux/qnx6_fs.h
new file mode 100644
index 000000000..26049eab9
--- /dev/null
+++ b/include/linux/qnx6_fs.h
@@ -0,0 +1,134 @@
+/*
+ * Name : qnx6_fs.h
+ * Author : Kai Bankett
+ * Function : qnx6 global filesystem definitions
+ * History : 17-01-2012 created
+ */
+#ifndef _LINUX_QNX6_FS_H
+#define _LINUX_QNX6_FS_H
+
+#include <linux/types.h>
+#include <linux/magic.h>
+
+#define QNX6_ROOT_INO 1
+
+/* for di_status */
+#define QNX6_FILE_DIRECTORY 0x01
+#define QNX6_FILE_DELETED 0x02
+#define QNX6_FILE_NORMAL 0x03
+
+#define QNX6_SUPERBLOCK_SIZE 0x200 /* superblock always is 512 bytes */
+#define QNX6_SUPERBLOCK_AREA 0x1000 /* area reserved for superblock */
+#define QNX6_BOOTBLOCK_SIZE 0x2000 /* heading bootblock area */
+#define QNX6_DIR_ENTRY_SIZE 0x20 /* dir entry size of 32 bytes */
+#define QNX6_INODE_SIZE 0x80 /* each inode is 128 bytes */
+#define QNX6_INODE_SIZE_BITS 7 /* inode entry size shift */
+
+#define QNX6_NO_DIRECT_POINTERS 16 /* 16 blockptrs in sbl/inode */
+#define QNX6_PTR_MAX_LEVELS 5 /* maximum indirect levels */
+
+/* for filenames */
+#define QNX6_SHORT_NAME_MAX 27
+#define QNX6_LONG_NAME_MAX 510
+
+/* list of mount options */
+#define QNX6_MOUNT_MMI_FS 0x010000 /* mount as Audi MMI 3G fs */
+
+/*
+ * This is the original qnx6 inode layout on disk.
+ * Each inode is 128 byte long.
+ */
+struct qnx6_inode_entry {
+ __fs64 di_size;
+ __fs32 di_uid;
+ __fs32 di_gid;
+ __fs32 di_ftime;
+ __fs32 di_mtime;
+ __fs32 di_atime;
+ __fs32 di_ctime;
+ __fs16 di_mode;
+ __fs16 di_ext_mode;
+ __fs32 di_block_ptr[QNX6_NO_DIRECT_POINTERS];
+ __u8 di_filelevels;
+ __u8 di_status;
+ __u8 di_unknown2[2];
+ __fs32 di_zero2[6];
+};
+
+/*
+ * Each directory entry is maximum 32 bytes long.
+ * If more characters or special characters required it is stored
+ * in the longfilenames structure.
+ */
+struct qnx6_dir_entry {
+ __fs32 de_inode;
+ __u8 de_size;
+ char de_fname[QNX6_SHORT_NAME_MAX];
+};
+
+/*
+ * Longfilename direntries have a different structure
+ */
+struct qnx6_long_dir_entry {
+ __fs32 de_inode;
+ __u8 de_size;
+ __u8 de_unknown[3];
+ __fs32 de_long_inode;
+ __fs32 de_checksum;
+};
+
+struct qnx6_long_filename {
+ __fs16 lf_size;
+ __u8 lf_fname[QNX6_LONG_NAME_MAX];
+};
+
+struct qnx6_root_node {
+ __fs64 size;
+ __fs32 ptr[QNX6_NO_DIRECT_POINTERS];
+ __u8 levels;
+ __u8 mode;
+ __u8 spare[6];
+};
+
+struct qnx6_super_block {
+ __fs32 sb_magic;
+ __fs32 sb_checksum;
+ __fs64 sb_serial;
+ __fs32 sb_ctime; /* time the fs was created */
+ __fs32 sb_atime; /* last access time */
+ __fs32 sb_flags;
+ __fs16 sb_version1; /* filesystem version information */
+ __fs16 sb_version2; /* filesystem version information */
+ __u8 sb_volumeid[16];
+ __fs32 sb_blocksize;
+ __fs32 sb_num_inodes;
+ __fs32 sb_free_inodes;
+ __fs32 sb_num_blocks;
+ __fs32 sb_free_blocks;
+ __fs32 sb_allocgroup;
+ struct qnx6_root_node Inode;
+ struct qnx6_root_node Bitmap;
+ struct qnx6_root_node Longfile;
+ struct qnx6_root_node Unknown;
+};
+
+/* Audi MMI 3G superblock layout is different to plain qnx6 */
+struct qnx6_mmi_super_block {
+ __fs32 sb_magic;
+ __fs32 sb_checksum;
+ __fs64 sb_serial;
+ __u8 sb_spare0[12];
+ __u8 sb_id[12];
+ __fs32 sb_blocksize;
+ __fs32 sb_num_inodes;
+ __fs32 sb_free_inodes;
+ __fs32 sb_num_blocks;
+ __fs32 sb_free_blocks;
+ __u8 sb_spare1[4];
+ struct qnx6_root_node Inode;
+ struct qnx6_root_node Bitmap;
+ struct qnx6_root_node Longfile;
+ struct qnx6_root_node Unknown;
+};
+
+#endif
diff --git a/include/linux/quicklist.h b/include/linux/quicklist.h
new file mode 100644
index 000000000..bd466439c
--- /dev/null
+++ b/include/linux/quicklist.h
@@ -0,0 +1,93 @@
+#ifndef LINUX_QUICKLIST_H
+#define LINUX_QUICKLIST_H
+/*
+ * Fast allocations and disposal of pages. Pages must be in the condition
+ * as needed after allocation when they are freed. Per cpu lists of pages
+ * are kept that only contain node local pages.
+ *
+ * (C) 2007, SGI. Christoph Lameter <clameter@sgi.com>
+ */
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/percpu.h>
+
+#ifdef CONFIG_QUICKLIST
+
+struct quicklist {
+ void *page;
+ int nr_pages;
+};
+
+DECLARE_PER_CPU(struct quicklist, quicklist)[CONFIG_NR_QUICK];
+
+/*
+ * The two key functions quicklist_alloc and quicklist_free are inline so
+ * that they may be custom compiled for the platform.
+ * Specifying a NULL ctor can remove constructor support. Specifying
+ * a constant quicklist allows the determination of the exact address
+ * in the per cpu area.
+ *
+ * The fast patch in quicklist_alloc touched only a per cpu cacheline and
+ * the first cacheline of the page itself. There is minmal overhead involved.
+ */
+static inline void *quicklist_alloc(int nr, gfp_t flags, void (*ctor)(void *))
+{
+ struct quicklist *q;
+ void **p = NULL;
+
+ q =&get_cpu_var(quicklist)[nr];
+ p = q->page;
+ if (likely(p)) {
+ q->page = p[0];
+ p[0] = NULL;
+ q->nr_pages--;
+ }
+ put_cpu_var(quicklist);
+ if (likely(p))
+ return p;
+
+ p = (void *)__get_free_page(flags | __GFP_ZERO);
+ if (ctor && p)
+ ctor(p);
+ return p;
+}
+
+static inline void __quicklist_free(int nr, void (*dtor)(void *), void *p,
+ struct page *page)
+{
+ struct quicklist *q;
+
+ q = &get_cpu_var(quicklist)[nr];
+ *(void **)p = q->page;
+ q->page = p;
+ q->nr_pages++;
+ put_cpu_var(quicklist);
+}
+
+static inline void quicklist_free(int nr, void (*dtor)(void *), void *pp)
+{
+ __quicklist_free(nr, dtor, pp, virt_to_page(pp));
+}
+
+static inline void quicklist_free_page(int nr, void (*dtor)(void *),
+ struct page *page)
+{
+ __quicklist_free(nr, dtor, page_address(page), page);
+}
+
+void quicklist_trim(int nr, void (*dtor)(void *),
+ unsigned long min_pages, unsigned long max_free);
+
+unsigned long quicklist_total_size(void);
+
+#else
+
+static inline unsigned long quicklist_total_size(void)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* LINUX_QUICKLIST_H */
+
diff --git a/include/linux/quota.h b/include/linux/quota.h
new file mode 100644
index 000000000..b2505acfd
--- /dev/null
+++ b/include/linux/quota.h
@@ -0,0 +1,528 @@
+/*
+ * Copyright (c) 1982, 1986 Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Robert Elz at The University of Melbourne.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#ifndef _LINUX_QUOTA_
+#define _LINUX_QUOTA_
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/percpu_counter.h>
+
+#include <linux/dqblk_xfs.h>
+#include <linux/dqblk_v1.h>
+#include <linux/dqblk_v2.h>
+
+#include <linux/atomic.h>
+#include <linux/uidgid.h>
+#include <linux/projid.h>
+#include <uapi/linux/quota.h>
+
+#undef USRQUOTA
+#undef GRPQUOTA
+#undef PRJQUOTA
+enum quota_type {
+ USRQUOTA = 0, /* element used for user quotas */
+ GRPQUOTA = 1, /* element used for group quotas */
+ PRJQUOTA = 2, /* element used for project quotas */
+};
+
+/* Masks for quota types when used as a bitmask */
+#define QTYPE_MASK_USR (1 << USRQUOTA)
+#define QTYPE_MASK_GRP (1 << GRPQUOTA)
+#define QTYPE_MASK_PRJ (1 << PRJQUOTA)
+
+typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */
+typedef long long qsize_t; /* Type in which we store sizes */
+
+struct kqid { /* Type in which we store the quota identifier */
+ union {
+ kuid_t uid;
+ kgid_t gid;
+ kprojid_t projid;
+ };
+ enum quota_type type; /* USRQUOTA (uid) or GRPQUOTA (gid) or PRJQUOTA (projid) */
+};
+
+extern bool qid_eq(struct kqid left, struct kqid right);
+extern bool qid_lt(struct kqid left, struct kqid right);
+extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
+extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
+extern bool qid_valid(struct kqid qid);
+
+/**
+ * make_kqid - Map a user-namespace, type, qid tuple into a kqid.
+ * @from: User namespace that the qid is in
+ * @type: The type of quota
+ * @qid: Quota identifier
+ *
+ * Maps a user-namespace, type qid tuple into a kernel internal
+ * kqid, and returns that kqid.
+ *
+ * When there is no mapping defined for the user-namespace, type,
+ * qid tuple an invalid kqid is returned. Callers are expected to
+ * test for and handle handle invalid kqids being returned.
+ * Invalid kqids may be tested for using qid_valid().
+ */
+static inline struct kqid make_kqid(struct user_namespace *from,
+ enum quota_type type, qid_t qid)
+{
+ struct kqid kqid;
+
+ kqid.type = type;
+ switch (type) {
+ case USRQUOTA:
+ kqid.uid = make_kuid(from, qid);
+ break;
+ case GRPQUOTA:
+ kqid.gid = make_kgid(from, qid);
+ break;
+ case PRJQUOTA:
+ kqid.projid = make_kprojid(from, qid);
+ break;
+ default:
+ BUG();
+ }
+ return kqid;
+}
+
+/**
+ * make_kqid_invalid - Explicitly make an invalid kqid
+ * @type: The type of quota identifier
+ *
+ * Returns an invalid kqid with the specified type.
+ */
+static inline struct kqid make_kqid_invalid(enum quota_type type)
+{
+ struct kqid kqid;
+
+ kqid.type = type;
+ switch (type) {
+ case USRQUOTA:
+ kqid.uid = INVALID_UID;
+ break;
+ case GRPQUOTA:
+ kqid.gid = INVALID_GID;
+ break;
+ case PRJQUOTA:
+ kqid.projid = INVALID_PROJID;
+ break;
+ default:
+ BUG();
+ }
+ return kqid;
+}
+
+/**
+ * make_kqid_uid - Make a kqid from a kuid
+ * @uid: The kuid to make the quota identifier from
+ */
+static inline struct kqid make_kqid_uid(kuid_t uid)
+{
+ struct kqid kqid;
+ kqid.type = USRQUOTA;
+ kqid.uid = uid;
+ return kqid;
+}
+
+/**
+ * make_kqid_gid - Make a kqid from a kgid
+ * @gid: The kgid to make the quota identifier from
+ */
+static inline struct kqid make_kqid_gid(kgid_t gid)
+{
+ struct kqid kqid;
+ kqid.type = GRPQUOTA;
+ kqid.gid = gid;
+ return kqid;
+}
+
+/**
+ * make_kqid_projid - Make a kqid from a projid
+ * @projid: The kprojid to make the quota identifier from
+ */
+static inline struct kqid make_kqid_projid(kprojid_t projid)
+{
+ struct kqid kqid;
+ kqid.type = PRJQUOTA;
+ kqid.projid = projid;
+ return kqid;
+}
+
+
+extern spinlock_t dq_data_lock;
+
+/* Maximal numbers of writes for quota operation (insert/delete/update)
+ * (over VFS all formats) */
+#define DQUOT_INIT_ALLOC max(V1_INIT_ALLOC, V2_INIT_ALLOC)
+#define DQUOT_INIT_REWRITE max(V1_INIT_REWRITE, V2_INIT_REWRITE)
+#define DQUOT_DEL_ALLOC max(V1_DEL_ALLOC, V2_DEL_ALLOC)
+#define DQUOT_DEL_REWRITE max(V1_DEL_REWRITE, V2_DEL_REWRITE)
+
+/*
+ * Data for one user/group kept in memory
+ */
+struct mem_dqblk {
+ qsize_t dqb_bhardlimit; /* absolute limit on disk blks alloc */
+ qsize_t dqb_bsoftlimit; /* preferred limit on disk blks */
+ qsize_t dqb_curspace; /* current used space */
+ qsize_t dqb_rsvspace; /* current reserved space for delalloc*/
+ qsize_t dqb_ihardlimit; /* absolute limit on allocated inodes */
+ qsize_t dqb_isoftlimit; /* preferred inode limit */
+ qsize_t dqb_curinodes; /* current # allocated inodes */
+ time_t dqb_btime; /* time limit for excessive disk use */
+ time_t dqb_itime; /* time limit for excessive inode use */
+};
+
+/*
+ * Data for one quotafile kept in memory
+ */
+struct quota_format_type;
+
+struct mem_dqinfo {
+ struct quota_format_type *dqi_format;
+ int dqi_fmt_id; /* Id of the dqi_format - used when turning
+ * quotas on after remount RW */
+ struct list_head dqi_dirty_list; /* List of dirty dquots */
+ unsigned long dqi_flags;
+ unsigned int dqi_bgrace;
+ unsigned int dqi_igrace;
+ qsize_t dqi_max_spc_limit;
+ qsize_t dqi_max_ino_limit;
+ void *dqi_priv;
+};
+
+struct super_block;
+
+/* Mask for flags passed to userspace */
+#define DQF_GETINFO_MASK (DQF_ROOT_SQUASH | DQF_SYS_FILE)
+/* Mask for flags modifiable from userspace */
+#define DQF_SETINFO_MASK DQF_ROOT_SQUASH
+
+enum {
+ DQF_INFO_DIRTY_B = DQF_PRIVATE,
+};
+#define DQF_INFO_DIRTY (1 << DQF_INFO_DIRTY_B) /* Is info dirty? */
+
+extern void mark_info_dirty(struct super_block *sb, int type);
+static inline int info_dirty(struct mem_dqinfo *info)
+{
+ return test_bit(DQF_INFO_DIRTY_B, &info->dqi_flags);
+}
+
+enum {
+ DQST_LOOKUPS,
+ DQST_DROPS,
+ DQST_READS,
+ DQST_WRITES,
+ DQST_CACHE_HITS,
+ DQST_ALLOC_DQUOTS,
+ DQST_FREE_DQUOTS,
+ DQST_SYNCS,
+ _DQST_DQSTAT_LAST
+};
+
+struct dqstats {
+ int stat[_DQST_DQSTAT_LAST];
+ struct percpu_counter counter[_DQST_DQSTAT_LAST];
+};
+
+extern struct dqstats *dqstats_pcpu;
+extern struct dqstats dqstats;
+
+static inline void dqstats_inc(unsigned int type)
+{
+ percpu_counter_inc(&dqstats.counter[type]);
+}
+
+static inline void dqstats_dec(unsigned int type)
+{
+ percpu_counter_dec(&dqstats.counter[type]);
+}
+
+#define DQ_MOD_B 0 /* dquot modified since read */
+#define DQ_BLKS_B 1 /* uid/gid has been warned about blk limit */
+#define DQ_INODES_B 2 /* uid/gid has been warned about inode limit */
+#define DQ_FAKE_B 3 /* no limits only usage */
+#define DQ_READ_B 4 /* dquot was read into memory */
+#define DQ_ACTIVE_B 5 /* dquot is active (dquot_release not called) */
+#define DQ_LASTSET_B 6 /* Following 6 bits (see QIF_) are reserved\
+ * for the mask of entries set via SETQUOTA\
+ * quotactl. They are set under dq_data_lock\
+ * and the quota format handling dquot can\
+ * clear them when it sees fit. */
+
+struct dquot {
+ struct hlist_node dq_hash; /* Hash list in memory */
+ struct list_head dq_inuse; /* List of all quotas */
+ struct list_head dq_free; /* Free list element */
+ struct list_head dq_dirty; /* List of dirty dquots */
+ struct mutex dq_lock; /* dquot IO lock */
+ atomic_t dq_count; /* Use count */
+ wait_queue_head_t dq_wait_unused; /* Wait queue for dquot to become unused */
+ struct super_block *dq_sb; /* superblock this applies to */
+ struct kqid dq_id; /* ID this applies to (uid, gid, projid) */
+ loff_t dq_off; /* Offset of dquot on disk */
+ unsigned long dq_flags; /* See DQ_* */
+ struct mem_dqblk dq_dqb; /* Diskquota usage */
+};
+
+/* Operations which must be implemented by each quota format */
+struct quota_format_ops {
+ int (*check_quota_file)(struct super_block *sb, int type); /* Detect whether file is in our format */
+ int (*read_file_info)(struct super_block *sb, int type); /* Read main info about file - called on quotaon() */
+ int (*write_file_info)(struct super_block *sb, int type); /* Write main info about file */
+ int (*free_file_info)(struct super_block *sb, int type); /* Called on quotaoff() */
+ int (*read_dqblk)(struct dquot *dquot); /* Read structure for one user */
+ int (*commit_dqblk)(struct dquot *dquot); /* Write structure for one user */
+ int (*release_dqblk)(struct dquot *dquot); /* Called when last reference to dquot is being dropped */
+};
+
+/* Operations working with dquots */
+struct dquot_operations {
+ int (*write_dquot) (struct dquot *); /* Ordinary dquot write */
+ struct dquot *(*alloc_dquot)(struct super_block *, int); /* Allocate memory for new dquot */
+ void (*destroy_dquot)(struct dquot *); /* Free memory for dquot */
+ int (*acquire_dquot) (struct dquot *); /* Quota is going to be created on disk */
+ int (*release_dquot) (struct dquot *); /* Quota is going to be deleted from disk */
+ int (*mark_dirty) (struct dquot *); /* Dquot is marked dirty */
+ int (*write_info) (struct super_block *, int); /* Write of quota "superblock" */
+ /* get reserved quota for delayed alloc, value returned is managed by
+ * quota code only */
+ qsize_t *(*get_reserved_space) (struct inode *);
+ int (*get_projid) (struct inode *, kprojid_t *);/* Get project ID */
+};
+
+struct path;
+
+/* Structure for communicating via ->get_dqblk() & ->set_dqblk() */
+struct qc_dqblk {
+ int d_fieldmask; /* mask of fields to change in ->set_dqblk() */
+ u64 d_spc_hardlimit; /* absolute limit on used space */
+ u64 d_spc_softlimit; /* preferred limit on used space */
+ u64 d_ino_hardlimit; /* maximum # allocated inodes */
+ u64 d_ino_softlimit; /* preferred inode limit */
+ u64 d_space; /* Space owned by the user */
+ u64 d_ino_count; /* # inodes owned by the user */
+ s64 d_ino_timer; /* zero if within inode limits */
+ /* if not, we refuse service */
+ s64 d_spc_timer; /* similar to above; for space */
+ int d_ino_warns; /* # warnings issued wrt num inodes */
+ int d_spc_warns; /* # warnings issued wrt used space */
+ u64 d_rt_spc_hardlimit; /* absolute limit on realtime space */
+ u64 d_rt_spc_softlimit; /* preferred limit on RT space */
+ u64 d_rt_space; /* realtime space owned */
+ s64 d_rt_spc_timer; /* similar to above; for RT space */
+ int d_rt_spc_warns; /* # warnings issued wrt RT space */
+};
+
+/*
+ * Field specifiers for ->set_dqblk() in struct qc_dqblk and also for
+ * ->set_info() in struct qc_info
+ */
+#define QC_INO_SOFT (1<<0)
+#define QC_INO_HARD (1<<1)
+#define QC_SPC_SOFT (1<<2)
+#define QC_SPC_HARD (1<<3)
+#define QC_RT_SPC_SOFT (1<<4)
+#define QC_RT_SPC_HARD (1<<5)
+#define QC_LIMIT_MASK (QC_INO_SOFT | QC_INO_HARD | QC_SPC_SOFT | QC_SPC_HARD | \
+ QC_RT_SPC_SOFT | QC_RT_SPC_HARD)
+#define QC_SPC_TIMER (1<<6)
+#define QC_INO_TIMER (1<<7)
+#define QC_RT_SPC_TIMER (1<<8)
+#define QC_TIMER_MASK (QC_SPC_TIMER | QC_INO_TIMER | QC_RT_SPC_TIMER)
+#define QC_SPC_WARNS (1<<9)
+#define QC_INO_WARNS (1<<10)
+#define QC_RT_SPC_WARNS (1<<11)
+#define QC_WARNS_MASK (QC_SPC_WARNS | QC_INO_WARNS | QC_RT_SPC_WARNS)
+#define QC_SPACE (1<<12)
+#define QC_INO_COUNT (1<<13)
+#define QC_RT_SPACE (1<<14)
+#define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE)
+#define QC_FLAGS (1<<15)
+
+#define QCI_SYSFILE (1 << 0) /* Quota file is hidden from userspace */
+#define QCI_ROOT_SQUASH (1 << 1) /* Root squash turned on */
+#define QCI_ACCT_ENABLED (1 << 2) /* Quota accounting enabled */
+#define QCI_LIMITS_ENFORCED (1 << 3) /* Quota limits enforced */
+
+/* Structures for communicating via ->get_state */
+struct qc_type_state {
+ unsigned int flags; /* Flags QCI_* */
+ unsigned int spc_timelimit; /* Time after which space softlimit is
+ * enforced */
+ unsigned int ino_timelimit; /* Ditto for inode softlimit */
+ unsigned int rt_spc_timelimit; /* Ditto for real-time space */
+ unsigned int spc_warnlimit; /* Limit for number of space warnings */
+ unsigned int ino_warnlimit; /* Ditto for inodes */
+ unsigned int rt_spc_warnlimit; /* Ditto for real-time space */
+ unsigned long long ino; /* Inode number of quota file */
+ blkcnt_t blocks; /* Number of 512-byte blocks in the file */
+ blkcnt_t nextents; /* Number of extents in the file */
+};
+
+struct qc_state {
+ unsigned int s_incoredqs; /* Number of dquots in core */
+ /*
+ * Per quota type information. The array should really have
+ * max(MAXQUOTAS, XQM_MAXQUOTAS) entries. BUILD_BUG_ON in
+ * quota_getinfo() makes sure XQM_MAXQUOTAS is large enough. Once VFS
+ * supports project quotas, this can be changed to MAXQUOTAS
+ */
+ struct qc_type_state s_state[XQM_MAXQUOTAS];
+};
+
+/* Structure for communicating via ->set_info */
+struct qc_info {
+ int i_fieldmask; /* mask of fields to change in ->set_info() */
+ unsigned int i_flags; /* Flags QCI_* */
+ unsigned int i_spc_timelimit; /* Time after which space softlimit is
+ * enforced */
+ unsigned int i_ino_timelimit; /* Ditto for inode softlimit */
+ unsigned int i_rt_spc_timelimit;/* Ditto for real-time space */
+ unsigned int i_spc_warnlimit; /* Limit for number of space warnings */
+ unsigned int i_ino_warnlimit; /* Limit for number of inode warnings */
+ unsigned int i_rt_spc_warnlimit; /* Ditto for real-time space */
+};
+
+/* Operations handling requests from userspace */
+struct quotactl_ops {
+ int (*quota_on)(struct super_block *, int, int, struct path *);
+ int (*quota_off)(struct super_block *, int);
+ int (*quota_enable)(struct super_block *, unsigned int);
+ int (*quota_disable)(struct super_block *, unsigned int);
+ int (*quota_sync)(struct super_block *, int);
+ int (*set_info)(struct super_block *, int, struct qc_info *);
+ int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
+ int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
+ int (*get_state)(struct super_block *, struct qc_state *);
+ int (*rm_xquota)(struct super_block *, unsigned int);
+};
+
+struct quota_format_type {
+ int qf_fmt_id; /* Quota format id */
+ const struct quota_format_ops *qf_ops; /* Operations of format */
+ struct module *qf_owner; /* Module implementing quota format */
+ struct quota_format_type *qf_next;
+};
+
+/**
+ * Quota state flags - they actually come in two flavors - for users and groups.
+ *
+ * Actual typed flags layout:
+ * USRQUOTA GRPQUOTA
+ * DQUOT_USAGE_ENABLED 0x0001 0x0002
+ * DQUOT_LIMITS_ENABLED 0x0004 0x0008
+ * DQUOT_SUSPENDED 0x0010 0x0020
+ *
+ * Following bits are used for non-typed flags:
+ * DQUOT_QUOTA_SYS_FILE 0x0040
+ * DQUOT_NEGATIVE_USAGE 0x0080
+ */
+enum {
+ _DQUOT_USAGE_ENABLED = 0, /* Track disk usage for users */
+ _DQUOT_LIMITS_ENABLED, /* Enforce quota limits for users */
+ _DQUOT_SUSPENDED, /* User diskquotas are off, but
+ * we have necessary info in
+ * memory to turn them on */
+ _DQUOT_STATE_FLAGS
+};
+#define DQUOT_USAGE_ENABLED (1 << _DQUOT_USAGE_ENABLED * MAXQUOTAS)
+#define DQUOT_LIMITS_ENABLED (1 << _DQUOT_LIMITS_ENABLED * MAXQUOTAS)
+#define DQUOT_SUSPENDED (1 << _DQUOT_SUSPENDED * MAXQUOTAS)
+#define DQUOT_STATE_FLAGS (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED | \
+ DQUOT_SUSPENDED)
+/* Other quota flags */
+#define DQUOT_STATE_LAST (_DQUOT_STATE_FLAGS * MAXQUOTAS)
+#define DQUOT_QUOTA_SYS_FILE (1 << DQUOT_STATE_LAST)
+ /* Quota file is a special
+ * system file and user cannot
+ * touch it. Filesystem is
+ * responsible for setting
+ * S_NOQUOTA, S_NOATIME flags
+ */
+#define DQUOT_NEGATIVE_USAGE (1 << (DQUOT_STATE_LAST + 1))
+ /* Allow negative quota usage */
+static inline unsigned int dquot_state_flag(unsigned int flags, int type)
+{
+ return flags << type;
+}
+
+static inline unsigned int dquot_generic_flag(unsigned int flags, int type)
+{
+ return (flags >> type) & DQUOT_STATE_FLAGS;
+}
+
+/* Bitmap of quota types where flag is set in flags */
+static __always_inline unsigned dquot_state_types(unsigned flags, unsigned flag)
+{
+ BUILD_BUG_ON_NOT_POWER_OF_2(flag);
+ return (flags / flag) & ((1 << MAXQUOTAS) - 1);
+}
+
+#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
+extern void quota_send_warning(struct kqid qid, dev_t dev,
+ const char warntype);
+#else
+static inline void quota_send_warning(struct kqid qid, dev_t dev,
+ const char warntype)
+{
+ return;
+}
+#endif /* CONFIG_QUOTA_NETLINK_INTERFACE */
+
+struct quota_info {
+ unsigned int flags; /* Flags for diskquotas on this device */
+ struct mutex dqio_mutex; /* lock device while I/O in progress */
+ struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */
+ struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */
+ struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */
+ const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */
+};
+
+int register_quota_format(struct quota_format_type *fmt);
+void unregister_quota_format(struct quota_format_type *fmt);
+
+struct quota_module_name {
+ int qm_fmt_id;
+ char *qm_mod_name;
+};
+
+#define INIT_QUOTA_MODULE_NAMES {\
+ {QFMT_VFS_OLD, "quota_v1"},\
+ {QFMT_VFS_V0, "quota_v2"},\
+ {QFMT_VFS_V1, "quota_v2"},\
+ {0, NULL}}
+
+#endif /* _QUOTA_ */
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
new file mode 100644
index 000000000..77ca6601f
--- /dev/null
+++ b/include/linux/quotaops.h
@@ -0,0 +1,386 @@
+/*
+ * Definitions for diskquota-operations. When diskquota is configured these
+ * macros expand to the right source-code.
+ *
+ * Author: Marco van Wieringen <mvw@planets.elm.net>
+ */
+#ifndef _LINUX_QUOTAOPS_
+#define _LINUX_QUOTAOPS_
+
+#include <linux/fs.h>
+
+#define DQUOT_SPACE_WARN 0x1
+#define DQUOT_SPACE_RESERVE 0x2
+#define DQUOT_SPACE_NOFAIL 0x4
+
+static inline struct quota_info *sb_dqopt(struct super_block *sb)
+{
+ return &sb->s_dquot;
+}
+
+/* i_mutex must being held */
+static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
+{
+ return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) ||
+ (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) ||
+ (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid));
+}
+
+#if defined(CONFIG_QUOTA)
+
+#define quota_error(sb, fmt, args...) \
+ __quota_error((sb), __func__, fmt , ## args)
+
+extern __printf(3, 4)
+void __quota_error(struct super_block *sb, const char *func,
+ const char *fmt, ...);
+
+/*
+ * declaration of quota_function calls in kernel.
+ */
+void inode_add_rsv_space(struct inode *inode, qsize_t number);
+void inode_claim_rsv_space(struct inode *inode, qsize_t number);
+void inode_sub_rsv_space(struct inode *inode, qsize_t number);
+void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
+
+void dquot_initialize(struct inode *inode);
+void dquot_drop(struct inode *inode);
+struct dquot *dqget(struct super_block *sb, struct kqid qid);
+static inline struct dquot *dqgrab(struct dquot *dquot)
+{
+ /* Make sure someone else has active reference to dquot */
+ WARN_ON_ONCE(!atomic_read(&dquot->dq_count));
+ WARN_ON_ONCE(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
+ atomic_inc(&dquot->dq_count);
+ return dquot;
+}
+void dqput(struct dquot *dquot);
+int dquot_scan_active(struct super_block *sb,
+ int (*fn)(struct dquot *dquot, unsigned long priv),
+ unsigned long priv);
+struct dquot *dquot_alloc(struct super_block *sb, int type);
+void dquot_destroy(struct dquot *dquot);
+
+int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags);
+void __dquot_free_space(struct inode *inode, qsize_t number, int flags);
+
+int dquot_alloc_inode(struct inode *inode);
+
+int dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
+void dquot_free_inode(struct inode *inode);
+void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number);
+
+int dquot_disable(struct super_block *sb, int type, unsigned int flags);
+/* Suspend quotas on remount RO */
+static inline int dquot_suspend(struct super_block *sb, int type)
+{
+ return dquot_disable(sb, type, DQUOT_SUSPENDED);
+}
+int dquot_resume(struct super_block *sb, int type);
+
+int dquot_commit(struct dquot *dquot);
+int dquot_acquire(struct dquot *dquot);
+int dquot_release(struct dquot *dquot);
+int dquot_commit_info(struct super_block *sb, int type);
+int dquot_mark_dquot_dirty(struct dquot *dquot);
+
+int dquot_file_open(struct inode *inode, struct file *file);
+
+int dquot_enable(struct inode *inode, int type, int format_id,
+ unsigned int flags);
+int dquot_quota_on(struct super_block *sb, int type, int format_id,
+ struct path *path);
+int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
+ int format_id, int type);
+int dquot_quota_off(struct super_block *sb, int type);
+int dquot_writeback_dquots(struct super_block *sb, int type);
+int dquot_quota_sync(struct super_block *sb, int type);
+int dquot_get_state(struct super_block *sb, struct qc_state *state);
+int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii);
+int dquot_get_dqblk(struct super_block *sb, struct kqid id,
+ struct qc_dqblk *di);
+int dquot_set_dqblk(struct super_block *sb, struct kqid id,
+ struct qc_dqblk *di);
+
+int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
+int dquot_transfer(struct inode *inode, struct iattr *iattr);
+
+static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type)
+{
+ return sb_dqopt(sb)->info + type;
+}
+
+/*
+ * Functions for checking status of quota
+ */
+
+static inline bool sb_has_quota_usage_enabled(struct super_block *sb, int type)
+{
+ return sb_dqopt(sb)->flags &
+ dquot_state_flag(DQUOT_USAGE_ENABLED, type);
+}
+
+static inline bool sb_has_quota_limits_enabled(struct super_block *sb, int type)
+{
+ return sb_dqopt(sb)->flags &
+ dquot_state_flag(DQUOT_LIMITS_ENABLED, type);
+}
+
+static inline bool sb_has_quota_suspended(struct super_block *sb, int type)
+{
+ return sb_dqopt(sb)->flags &
+ dquot_state_flag(DQUOT_SUSPENDED, type);
+}
+
+static inline unsigned sb_any_quota_suspended(struct super_block *sb)
+{
+ return dquot_state_types(sb_dqopt(sb)->flags, DQUOT_SUSPENDED);
+}
+
+/* Does kernel know about any quota information for given sb + type? */
+static inline bool sb_has_quota_loaded(struct super_block *sb, int type)
+{
+ /* Currently if anything is on, then quota usage is on as well */
+ return sb_has_quota_usage_enabled(sb, type);
+}
+
+static inline unsigned sb_any_quota_loaded(struct super_block *sb)
+{
+ return dquot_state_types(sb_dqopt(sb)->flags, DQUOT_USAGE_ENABLED);
+}
+
+static inline bool sb_has_quota_active(struct super_block *sb, int type)
+{
+ return sb_has_quota_loaded(sb, type) &&
+ !sb_has_quota_suspended(sb, type);
+}
+
+/*
+ * Operations supported for diskquotas.
+ */
+extern const struct dquot_operations dquot_operations;
+extern const struct quotactl_ops dquot_quotactl_ops;
+extern const struct quotactl_ops dquot_quotactl_sysfile_ops;
+
+#else
+
+static inline int sb_has_quota_usage_enabled(struct super_block *sb, int type)
+{
+ return 0;
+}
+
+static inline int sb_has_quota_limits_enabled(struct super_block *sb, int type)
+{
+ return 0;
+}
+
+static inline int sb_has_quota_suspended(struct super_block *sb, int type)
+{
+ return 0;
+}
+
+static inline int sb_any_quota_suspended(struct super_block *sb)
+{
+ return 0;
+}
+
+/* Does kernel know about any quota information for given sb + type? */
+static inline int sb_has_quota_loaded(struct super_block *sb, int type)
+{
+ return 0;
+}
+
+static inline int sb_any_quota_loaded(struct super_block *sb)
+{
+ return 0;
+}
+
+static inline int sb_has_quota_active(struct super_block *sb, int type)
+{
+ return 0;
+}
+
+static inline void dquot_initialize(struct inode *inode)
+{
+}
+
+static inline void dquot_drop(struct inode *inode)
+{
+}
+
+static inline int dquot_alloc_inode(struct inode *inode)
+{
+ return 0;
+}
+
+static inline void dquot_free_inode(struct inode *inode)
+{
+}
+
+static inline int dquot_transfer(struct inode *inode, struct iattr *iattr)
+{
+ return 0;
+}
+
+static inline int __dquot_alloc_space(struct inode *inode, qsize_t number,
+ int flags)
+{
+ if (!(flags & DQUOT_SPACE_RESERVE))
+ inode_add_bytes(inode, number);
+ return 0;
+}
+
+static inline void __dquot_free_space(struct inode *inode, qsize_t number,
+ int flags)
+{
+ if (!(flags & DQUOT_SPACE_RESERVE))
+ inode_sub_bytes(inode, number);
+}
+
+static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
+{
+ inode_add_bytes(inode, number);
+ return 0;
+}
+
+static inline int dquot_reclaim_space_nodirty(struct inode *inode,
+ qsize_t number)
+{
+ inode_sub_bytes(inode, number);
+ return 0;
+}
+
+static inline int dquot_disable(struct super_block *sb, int type,
+ unsigned int flags)
+{
+ return 0;
+}
+
+static inline int dquot_suspend(struct super_block *sb, int type)
+{
+ return 0;
+}
+
+static inline int dquot_resume(struct super_block *sb, int type)
+{
+ return 0;
+}
+
+#define dquot_file_open generic_file_open
+
+static inline int dquot_writeback_dquots(struct super_block *sb, int type)
+{
+ return 0;
+}
+
+#endif /* CONFIG_QUOTA */
+
+static inline int dquot_alloc_space_nodirty(struct inode *inode, qsize_t nr)
+{
+ return __dquot_alloc_space(inode, nr, DQUOT_SPACE_WARN);
+}
+
+static inline void dquot_alloc_space_nofail(struct inode *inode, qsize_t nr)
+{
+ __dquot_alloc_space(inode, nr, DQUOT_SPACE_WARN|DQUOT_SPACE_NOFAIL);
+ mark_inode_dirty_sync(inode);
+}
+
+static inline int dquot_alloc_space(struct inode *inode, qsize_t nr)
+{
+ int ret;
+
+ ret = dquot_alloc_space_nodirty(inode, nr);
+ if (!ret) {
+ /*
+ * Mark inode fully dirty. Since we are allocating blocks, inode
+ * would become fully dirty soon anyway and it reportedly
+ * reduces lock contention.
+ */
+ mark_inode_dirty(inode);
+ }
+ return ret;
+}
+
+static inline int dquot_alloc_block_nodirty(struct inode *inode, qsize_t nr)
+{
+ return dquot_alloc_space_nodirty(inode, nr << inode->i_blkbits);
+}
+
+static inline void dquot_alloc_block_nofail(struct inode *inode, qsize_t nr)
+{
+ dquot_alloc_space_nofail(inode, nr << inode->i_blkbits);
+}
+
+static inline int dquot_alloc_block(struct inode *inode, qsize_t nr)
+{
+ return dquot_alloc_space(inode, nr << inode->i_blkbits);
+}
+
+static inline int dquot_prealloc_block_nodirty(struct inode *inode, qsize_t nr)
+{
+ return __dquot_alloc_space(inode, nr << inode->i_blkbits, 0);
+}
+
+static inline int dquot_prealloc_block(struct inode *inode, qsize_t nr)
+{
+ int ret;
+
+ ret = dquot_prealloc_block_nodirty(inode, nr);
+ if (!ret)
+ mark_inode_dirty_sync(inode);
+ return ret;
+}
+
+static inline int dquot_reserve_block(struct inode *inode, qsize_t nr)
+{
+ return __dquot_alloc_space(inode, nr << inode->i_blkbits,
+ DQUOT_SPACE_WARN|DQUOT_SPACE_RESERVE);
+}
+
+static inline int dquot_claim_block(struct inode *inode, qsize_t nr)
+{
+ int ret;
+
+ ret = dquot_claim_space_nodirty(inode, nr << inode->i_blkbits);
+ if (!ret)
+ mark_inode_dirty_sync(inode);
+ return ret;
+}
+
+static inline void dquot_reclaim_block(struct inode *inode, qsize_t nr)
+{
+ dquot_reclaim_space_nodirty(inode, nr << inode->i_blkbits);
+ mark_inode_dirty_sync(inode);
+}
+
+static inline void dquot_free_space_nodirty(struct inode *inode, qsize_t nr)
+{
+ __dquot_free_space(inode, nr, 0);
+}
+
+static inline void dquot_free_space(struct inode *inode, qsize_t nr)
+{
+ dquot_free_space_nodirty(inode, nr);
+ mark_inode_dirty_sync(inode);
+}
+
+static inline void dquot_free_block_nodirty(struct inode *inode, qsize_t nr)
+{
+ dquot_free_space_nodirty(inode, nr << inode->i_blkbits);
+}
+
+static inline void dquot_free_block(struct inode *inode, qsize_t nr)
+{
+ dquot_free_space(inode, nr << inode->i_blkbits);
+}
+
+static inline void dquot_release_reservation_block(struct inode *inode,
+ qsize_t nr)
+{
+ __dquot_free_space(inode, nr << inode->i_blkbits, DQUOT_SPACE_RESERVE);
+}
+
+unsigned int qtype_enforce_flag(int type);
+
+#endif /* _LINUX_QUOTAOPS_ */
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
new file mode 100644
index 000000000..33170dbd9
--- /dev/null
+++ b/include/linux/radix-tree.h
@@ -0,0 +1,507 @@
+/*
+ * Copyright (C) 2001 Momchil Velikov
+ * Portions Copyright (C) 2001 Christoph Hellwig
+ * Copyright (C) 2006 Nick Piggin
+ * Copyright (C) 2012 Konstantin Khlebnikov
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef _LINUX_RADIX_TREE_H
+#define _LINUX_RADIX_TREE_H
+
+#include <linux/preempt.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+
+/*
+ * An indirect pointer (root->rnode pointing to a radix_tree_node, rather
+ * than a data item) is signalled by the low bit set in the root->rnode
+ * pointer.
+ *
+ * In this case root->height is > 0, but the indirect pointer tests are
+ * needed for RCU lookups (because root->height is unreliable). The only
+ * time callers need worry about this is when doing a lookup_slot under
+ * RCU.
+ *
+ * Indirect pointer in fact is also used to tag the last pointer of a node
+ * when it is shrunk, before we rcu free the node. See shrink code for
+ * details.
+ */
+#define RADIX_TREE_INDIRECT_PTR 1
+/*
+ * A common use of the radix tree is to store pointers to struct pages;
+ * but shmem/tmpfs needs also to store swap entries in the same tree:
+ * those are marked as exceptional entries to distinguish them.
+ * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it.
+ */
+#define RADIX_TREE_EXCEPTIONAL_ENTRY 2
+#define RADIX_TREE_EXCEPTIONAL_SHIFT 2
+
+static inline int radix_tree_is_indirect_ptr(void *ptr)
+{
+ return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR);
+}
+
+/*** radix-tree API starts here ***/
+
+#define RADIX_TREE_MAX_TAGS 3
+
+#ifdef __KERNEL__
+#define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
+#else
+#define RADIX_TREE_MAP_SHIFT 3 /* For more stressful testing */
+#endif
+
+#define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT)
+#define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1)
+
+#define RADIX_TREE_TAG_LONGS \
+ ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
+
+#define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
+#define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
+ RADIX_TREE_MAP_SHIFT))
+
+/* Height component in node->path */
+#define RADIX_TREE_HEIGHT_SHIFT (RADIX_TREE_MAX_PATH + 1)
+#define RADIX_TREE_HEIGHT_MASK ((1UL << RADIX_TREE_HEIGHT_SHIFT) - 1)
+
+/* Internally used bits of node->count */
+#define RADIX_TREE_COUNT_SHIFT (RADIX_TREE_MAP_SHIFT + 1)
+#define RADIX_TREE_COUNT_MASK ((1UL << RADIX_TREE_COUNT_SHIFT) - 1)
+
+struct radix_tree_node {
+ unsigned int path; /* Offset in parent & height from the bottom */
+ unsigned int count;
+ union {
+ struct {
+ /* Used when ascending tree */
+ struct radix_tree_node *parent;
+ /* For tree user */
+ void *private_data;
+ };
+ /* Used when freeing node */
+ struct rcu_head rcu_head;
+ };
+ /* For tree user */
+ struct list_head private_list;
+ void __rcu *slots[RADIX_TREE_MAP_SIZE];
+ unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
+};
+
+/* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */
+struct radix_tree_root {
+ unsigned int height;
+ gfp_t gfp_mask;
+ struct radix_tree_node __rcu *rnode;
+};
+
+#define RADIX_TREE_INIT(mask) { \
+ .height = 0, \
+ .gfp_mask = (mask), \
+ .rnode = NULL, \
+}
+
+#define RADIX_TREE(name, mask) \
+ struct radix_tree_root name = RADIX_TREE_INIT(mask)
+
+#define INIT_RADIX_TREE(root, mask) \
+do { \
+ (root)->height = 0; \
+ (root)->gfp_mask = (mask); \
+ (root)->rnode = NULL; \
+} while (0)
+
+/**
+ * Radix-tree synchronization
+ *
+ * The radix-tree API requires that users provide all synchronisation (with
+ * specific exceptions, noted below).
+ *
+ * Synchronization of access to the data items being stored in the tree, and
+ * management of their lifetimes must be completely managed by API users.
+ *
+ * For API usage, in general,
+ * - any function _modifying_ the tree or tags (inserting or deleting
+ * items, setting or clearing tags) must exclude other modifications, and
+ * exclude any functions reading the tree.
+ * - any function _reading_ the tree or tags (looking up items or tags,
+ * gang lookups) must exclude modifications to the tree, but may occur
+ * concurrently with other readers.
+ *
+ * The notable exceptions to this rule are the following functions:
+ * __radix_tree_lookup
+ * radix_tree_lookup
+ * radix_tree_lookup_slot
+ * radix_tree_tag_get
+ * radix_tree_gang_lookup
+ * radix_tree_gang_lookup_slot
+ * radix_tree_gang_lookup_tag
+ * radix_tree_gang_lookup_tag_slot
+ * radix_tree_tagged
+ *
+ * The first 7 functions are able to be called locklessly, using RCU. The
+ * caller must ensure calls to these functions are made within rcu_read_lock()
+ * regions. Other readers (lock-free or otherwise) and modifications may be
+ * running concurrently.
+ *
+ * It is still required that the caller manage the synchronization and lifetimes
+ * of the items. So if RCU lock-free lookups are used, typically this would mean
+ * that the items have their own locks, or are amenable to lock-free access; and
+ * that the items are freed by RCU (or only freed after having been deleted from
+ * the radix tree *and* a synchronize_rcu() grace period).
+ *
+ * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
+ * access to data items when inserting into or looking up from the radix tree)
+ *
+ * Note that the value returned by radix_tree_tag_get() may not be relied upon
+ * if only the RCU read lock is held. Functions to set/clear tags and to
+ * delete nodes running concurrently with it may affect its result such that
+ * two consecutive reads in the same locked section may return different
+ * values. If reliability is required, modification functions must also be
+ * excluded from concurrency.
+ *
+ * radix_tree_tagged is able to be called without locking or RCU.
+ */
+
+/**
+ * radix_tree_deref_slot - dereference a slot
+ * @pslot: pointer to slot, returned by radix_tree_lookup_slot
+ * Returns: item that was stored in that slot with any direct pointer flag
+ * removed.
+ *
+ * For use with radix_tree_lookup_slot(). Caller must hold tree at least read
+ * locked across slot lookup and dereference. Not required if write lock is
+ * held (ie. items cannot be concurrently inserted).
+ *
+ * radix_tree_deref_retry must be used to confirm validity of the pointer if
+ * only the read lock is held.
+ */
+static inline void *radix_tree_deref_slot(void **pslot)
+{
+ return rcu_dereference(*pslot);
+}
+
+/**
+ * radix_tree_deref_slot_protected - dereference a slot without RCU lock but with tree lock held
+ * @pslot: pointer to slot, returned by radix_tree_lookup_slot
+ * Returns: item that was stored in that slot with any direct pointer flag
+ * removed.
+ *
+ * Similar to radix_tree_deref_slot but only used during migration when a pages
+ * mapping is being moved. The caller does not hold the RCU read lock but it
+ * must hold the tree lock to prevent parallel updates.
+ */
+static inline void *radix_tree_deref_slot_protected(void **pslot,
+ spinlock_t *treelock)
+{
+ return rcu_dereference_protected(*pslot, lockdep_is_held(treelock));
+}
+
+/**
+ * radix_tree_deref_retry - check radix_tree_deref_slot
+ * @arg: pointer returned by radix_tree_deref_slot
+ * Returns: 0 if retry is not required, otherwise retry is required
+ *
+ * radix_tree_deref_retry must be used with radix_tree_deref_slot.
+ */
+static inline int radix_tree_deref_retry(void *arg)
+{
+ return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR);
+}
+
+/**
+ * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry?
+ * @arg: value returned by radix_tree_deref_slot
+ * Returns: 0 if well-aligned pointer, non-0 if exceptional entry.
+ */
+static inline int radix_tree_exceptional_entry(void *arg)
+{
+ /* Not unlikely because radix_tree_exception often tested first */
+ return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY;
+}
+
+/**
+ * radix_tree_exception - radix_tree_deref_slot returned either exception?
+ * @arg: value returned by radix_tree_deref_slot
+ * Returns: 0 if well-aligned pointer, non-0 if either kind of exception.
+ */
+static inline int radix_tree_exception(void *arg)
+{
+ return unlikely((unsigned long)arg &
+ (RADIX_TREE_INDIRECT_PTR | RADIX_TREE_EXCEPTIONAL_ENTRY));
+}
+
+/**
+ * radix_tree_replace_slot - replace item in a slot
+ * @pslot: pointer to slot, returned by radix_tree_lookup_slot
+ * @item: new item to store in the slot.
+ *
+ * For use with radix_tree_lookup_slot(). Caller must hold tree write locked
+ * across slot lookup and replacement.
+ */
+static inline void radix_tree_replace_slot(void **pslot, void *item)
+{
+ BUG_ON(radix_tree_is_indirect_ptr(item));
+ rcu_assign_pointer(*pslot, item);
+}
+
+int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
+ struct radix_tree_node **nodep, void ***slotp);
+int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
+void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
+ struct radix_tree_node **nodep, void ***slotp);
+void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
+void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
+bool __radix_tree_delete_node(struct radix_tree_root *root,
+ struct radix_tree_node *node);
+void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
+void *radix_tree_delete(struct radix_tree_root *, unsigned long);
+unsigned int
+radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
+ unsigned long first_index, unsigned int max_items);
+unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
+ void ***results, unsigned long *indices,
+ unsigned long first_index, unsigned int max_items);
+int radix_tree_preload(gfp_t gfp_mask);
+int radix_tree_maybe_preload(gfp_t gfp_mask);
+void radix_tree_init(void);
+void *radix_tree_tag_set(struct radix_tree_root *root,
+ unsigned long index, unsigned int tag);
+void *radix_tree_tag_clear(struct radix_tree_root *root,
+ unsigned long index, unsigned int tag);
+int radix_tree_tag_get(struct radix_tree_root *root,
+ unsigned long index, unsigned int tag);
+unsigned int
+radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
+ unsigned long first_index, unsigned int max_items,
+ unsigned int tag);
+unsigned int
+radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
+ unsigned long first_index, unsigned int max_items,
+ unsigned int tag);
+unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
+ unsigned long *first_indexp, unsigned long last_index,
+ unsigned long nr_to_tag,
+ unsigned int fromtag, unsigned int totag);
+int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
+unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
+
+static inline void radix_tree_preload_end(void)
+{
+ preempt_enable();
+}
+
+/**
+ * struct radix_tree_iter - radix tree iterator state
+ *
+ * @index: index of current slot
+ * @next_index: next-to-last index for this chunk
+ * @tags: bit-mask for tag-iterating
+ *
+ * This radix tree iterator works in terms of "chunks" of slots. A chunk is a
+ * subinterval of slots contained within one radix tree leaf node. It is
+ * described by a pointer to its first slot and a struct radix_tree_iter
+ * which holds the chunk's position in the tree and its size. For tagged
+ * iteration radix_tree_iter also holds the slots' bit-mask for one chosen
+ * radix tree tag.
+ */
+struct radix_tree_iter {
+ unsigned long index;
+ unsigned long next_index;
+ unsigned long tags;
+};
+
+#define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */
+#define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */
+#define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */
+
+/**
+ * radix_tree_iter_init - initialize radix tree iterator
+ *
+ * @iter: pointer to iterator state
+ * @start: iteration starting index
+ * Returns: NULL
+ */
+static __always_inline void **
+radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
+{
+ /*
+ * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it
+ * in the case of a successful tagged chunk lookup. If the lookup was
+ * unsuccessful or non-tagged then nobody cares about ->tags.
+ *
+ * Set index to zero to bypass next_index overflow protection.
+ * See the comment in radix_tree_next_chunk() for details.
+ */
+ iter->index = 0;
+ iter->next_index = start;
+ return NULL;
+}
+
+/**
+ * radix_tree_next_chunk - find next chunk of slots for iteration
+ *
+ * @root: radix tree root
+ * @iter: iterator state
+ * @flags: RADIX_TREE_ITER_* flags and tag index
+ * Returns: pointer to chunk first slot, or NULL if there no more left
+ *
+ * This function looks up the next chunk in the radix tree starting from
+ * @iter->next_index. It returns a pointer to the chunk's first slot.
+ * Also it fills @iter with data about chunk: position in the tree (index),
+ * its end (next_index), and constructs a bit mask for tagged iterating (tags).
+ */
+void **radix_tree_next_chunk(struct radix_tree_root *root,
+ struct radix_tree_iter *iter, unsigned flags);
+
+/**
+ * radix_tree_chunk_size - get current chunk size
+ *
+ * @iter: pointer to radix tree iterator
+ * Returns: current chunk size
+ */
+static __always_inline unsigned
+radix_tree_chunk_size(struct radix_tree_iter *iter)
+{
+ return iter->next_index - iter->index;
+}
+
+/**
+ * radix_tree_next_slot - find next slot in chunk
+ *
+ * @slot: pointer to current slot
+ * @iter: pointer to interator state
+ * @flags: RADIX_TREE_ITER_*, should be constant
+ * Returns: pointer to next slot, or NULL if there no more left
+ *
+ * This function updates @iter->index in the case of a successful lookup.
+ * For tagged lookup it also eats @iter->tags.
+ */
+static __always_inline void **
+radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
+{
+ if (flags & RADIX_TREE_ITER_TAGGED) {
+ iter->tags >>= 1;
+ if (likely(iter->tags & 1ul)) {
+ iter->index++;
+ return slot + 1;
+ }
+ if (!(flags & RADIX_TREE_ITER_CONTIG) && likely(iter->tags)) {
+ unsigned offset = __ffs(iter->tags);
+
+ iter->tags >>= offset;
+ iter->index += offset + 1;
+ return slot + offset + 1;
+ }
+ } else {
+ unsigned size = radix_tree_chunk_size(iter) - 1;
+
+ while (size--) {
+ slot++;
+ iter->index++;
+ if (likely(*slot))
+ return slot;
+ if (flags & RADIX_TREE_ITER_CONTIG) {
+ /* forbid switching to the next chunk */
+ iter->next_index = 0;
+ break;
+ }
+ }
+ }
+ return NULL;
+}
+
+/**
+ * radix_tree_for_each_chunk - iterate over chunks
+ *
+ * @slot: the void** variable for pointer to chunk first slot
+ * @root: the struct radix_tree_root pointer
+ * @iter: the struct radix_tree_iter pointer
+ * @start: iteration starting index
+ * @flags: RADIX_TREE_ITER_* and tag index
+ *
+ * Locks can be released and reacquired between iterations.
+ */
+#define radix_tree_for_each_chunk(slot, root, iter, start, flags) \
+ for (slot = radix_tree_iter_init(iter, start) ; \
+ (slot = radix_tree_next_chunk(root, iter, flags)) ;)
+
+/**
+ * radix_tree_for_each_chunk_slot - iterate over slots in one chunk
+ *
+ * @slot: the void** variable, at the beginning points to chunk first slot
+ * @iter: the struct radix_tree_iter pointer
+ * @flags: RADIX_TREE_ITER_*, should be constant
+ *
+ * This macro is designed to be nested inside radix_tree_for_each_chunk().
+ * @slot points to the radix tree slot, @iter->index contains its index.
+ */
+#define radix_tree_for_each_chunk_slot(slot, iter, flags) \
+ for (; slot ; slot = radix_tree_next_slot(slot, iter, flags))
+
+/**
+ * radix_tree_for_each_slot - iterate over non-empty slots
+ *
+ * @slot: the void** variable for pointer to slot
+ * @root: the struct radix_tree_root pointer
+ * @iter: the struct radix_tree_iter pointer
+ * @start: iteration starting index
+ *
+ * @slot points to radix tree slot, @iter->index contains its index.
+ */
+#define radix_tree_for_each_slot(slot, root, iter, start) \
+ for (slot = radix_tree_iter_init(iter, start) ; \
+ slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \
+ slot = radix_tree_next_slot(slot, iter, 0))
+
+/**
+ * radix_tree_for_each_contig - iterate over contiguous slots
+ *
+ * @slot: the void** variable for pointer to slot
+ * @root: the struct radix_tree_root pointer
+ * @iter: the struct radix_tree_iter pointer
+ * @start: iteration starting index
+ *
+ * @slot points to radix tree slot, @iter->index contains its index.
+ */
+#define radix_tree_for_each_contig(slot, root, iter, start) \
+ for (slot = radix_tree_iter_init(iter, start) ; \
+ slot || (slot = radix_tree_next_chunk(root, iter, \
+ RADIX_TREE_ITER_CONTIG)) ; \
+ slot = radix_tree_next_slot(slot, iter, \
+ RADIX_TREE_ITER_CONTIG))
+
+/**
+ * radix_tree_for_each_tagged - iterate over tagged slots
+ *
+ * @slot: the void** variable for pointer to slot
+ * @root: the struct radix_tree_root pointer
+ * @iter: the struct radix_tree_iter pointer
+ * @start: iteration starting index
+ * @tag: tag index
+ *
+ * @slot points to radix tree slot, @iter->index contains its index.
+ */
+#define radix_tree_for_each_tagged(slot, root, iter, start, tag) \
+ for (slot = radix_tree_iter_init(iter, start) ; \
+ slot || (slot = radix_tree_next_chunk(root, iter, \
+ RADIX_TREE_ITER_TAGGED | tag)) ; \
+ slot = radix_tree_next_slot(slot, iter, \
+ RADIX_TREE_ITER_TAGGED))
+
+#endif /* _LINUX_RADIX_TREE_H */
diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h
new file mode 100644
index 000000000..358c04bfb
--- /dev/null
+++ b/include/linux/raid/md_u.h
@@ -0,0 +1,20 @@
+/*
+ md_u.h : user <=> kernel API between Linux raidtools and RAID drivers
+ Copyright (C) 1998 Ingo Molnar
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ You should have received a copy of the GNU General Public License
+ (for example /usr/src/linux/COPYING); if not, write to the Free
+ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+#ifndef _MD_U_H
+#define _MD_U_H
+
+#include <uapi/linux/raid/md_u.h>
+
+extern int mdp_major;
+#endif
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
new file mode 100644
index 000000000..a7a06d1dc
--- /dev/null
+++ b/include/linux/raid/pq.h
@@ -0,0 +1,178 @@
+/* -*- linux-c -*- ------------------------------------------------------- *
+ *
+ * Copyright 2003 H. Peter Anvin - All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
+ * Boston MA 02111-1307, USA; either version 2 of the License, or
+ * (at your option) any later version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+#ifndef LINUX_RAID_RAID6_H
+#define LINUX_RAID_RAID6_H
+
+#ifdef __KERNEL__
+
+/* Set to 1 to use kernel-wide empty_zero_page */
+#define RAID6_USE_EMPTY_ZERO_PAGE 0
+#include <linux/blkdev.h>
+
+/* We need a pre-zeroed page... if we don't want to use the kernel-provided
+ one define it here */
+#if RAID6_USE_EMPTY_ZERO_PAGE
+# define raid6_empty_zero_page empty_zero_page
+#else
+extern const char raid6_empty_zero_page[PAGE_SIZE];
+#endif
+
+#else /* ! __KERNEL__ */
+/* Used for testing in user space */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+
+/* Not standard, but glibc defines it */
+#define BITS_PER_LONG __WORDSIZE
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+#ifndef PAGE_SIZE
+# define PAGE_SIZE 4096
+#endif
+extern const char raid6_empty_zero_page[PAGE_SIZE];
+
+#define __init
+#define __exit
+#define __attribute_const__ __attribute__((const))
+#define noinline __attribute__((noinline))
+
+#define preempt_enable()
+#define preempt_disable()
+#define cpu_has_feature(x) 1
+#define enable_kernel_altivec()
+#define disable_kernel_altivec()
+
+#define EXPORT_SYMBOL(sym)
+#define EXPORT_SYMBOL_GPL(sym)
+#define MODULE_LICENSE(licence)
+#define MODULE_DESCRIPTION(desc)
+#define subsys_initcall(x)
+#define module_exit(x)
+#endif /* __KERNEL__ */
+
+/* Routine choices */
+struct raid6_calls {
+ void (*gen_syndrome)(int, size_t, void **);
+ void (*xor_syndrome)(int, int, int, size_t, void **);
+ int (*valid)(void); /* Returns 1 if this routine set is usable */
+ const char *name; /* Name of this routine set */
+ int prefer; /* Has special performance attribute */
+};
+
+/* Selected algorithm */
+extern struct raid6_calls raid6_call;
+
+/* Various routine sets */
+extern const struct raid6_calls raid6_intx1;
+extern const struct raid6_calls raid6_intx2;
+extern const struct raid6_calls raid6_intx4;
+extern const struct raid6_calls raid6_intx8;
+extern const struct raid6_calls raid6_intx16;
+extern const struct raid6_calls raid6_intx32;
+extern const struct raid6_calls raid6_mmxx1;
+extern const struct raid6_calls raid6_mmxx2;
+extern const struct raid6_calls raid6_sse1x1;
+extern const struct raid6_calls raid6_sse1x2;
+extern const struct raid6_calls raid6_sse2x1;
+extern const struct raid6_calls raid6_sse2x2;
+extern const struct raid6_calls raid6_sse2x4;
+extern const struct raid6_calls raid6_altivec1;
+extern const struct raid6_calls raid6_altivec2;
+extern const struct raid6_calls raid6_altivec4;
+extern const struct raid6_calls raid6_altivec8;
+extern const struct raid6_calls raid6_avx2x1;
+extern const struct raid6_calls raid6_avx2x2;
+extern const struct raid6_calls raid6_avx2x4;
+extern const struct raid6_calls raid6_tilegx8;
+
+struct raid6_recov_calls {
+ void (*data2)(int, size_t, int, int, void **);
+ void (*datap)(int, size_t, int, void **);
+ int (*valid)(void);
+ const char *name;
+ int priority;
+};
+
+extern const struct raid6_recov_calls raid6_recov_intx1;
+extern const struct raid6_recov_calls raid6_recov_ssse3;
+extern const struct raid6_recov_calls raid6_recov_avx2;
+
+extern const struct raid6_calls raid6_neonx1;
+extern const struct raid6_calls raid6_neonx2;
+extern const struct raid6_calls raid6_neonx4;
+extern const struct raid6_calls raid6_neonx8;
+
+/* Algorithm list */
+extern const struct raid6_calls * const raid6_algos[];
+extern const struct raid6_recov_calls *const raid6_recov_algos[];
+int raid6_select_algo(void);
+
+/* Return values from chk_syndrome */
+#define RAID6_OK 0
+#define RAID6_P_BAD 1
+#define RAID6_Q_BAD 2
+#define RAID6_PQ_BAD 3
+
+/* Galois field tables */
+extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256)));
+extern const u8 raid6_vgfmul[256][32] __attribute__((aligned(256)));
+extern const u8 raid6_gfexp[256] __attribute__((aligned(256)));
+extern const u8 raid6_gfinv[256] __attribute__((aligned(256)));
+extern const u8 raid6_gfexi[256] __attribute__((aligned(256)));
+
+/* Recovery routines */
+extern void (*raid6_2data_recov)(int disks, size_t bytes, int faila, int failb,
+ void **ptrs);
+extern void (*raid6_datap_recov)(int disks, size_t bytes, int faila,
+ void **ptrs);
+void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
+ void **ptrs);
+
+/* Some definitions to allow code to be compiled for testing in userspace */
+#ifndef __KERNEL__
+
+# define jiffies raid6_jiffies()
+# define printk printf
+# define GFP_KERNEL 0
+# define __get_free_pages(x, y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \
+ PROT_READ|PROT_WRITE, \
+ MAP_PRIVATE|MAP_ANONYMOUS,\
+ 0, 0))
+# define free_pages(x, y) munmap((void *)(x), PAGE_SIZE << (y))
+
+static inline void cpu_relax(void)
+{
+ /* Nothing */
+}
+
+#undef HZ
+#define HZ 1000
+static inline uint32_t raid6_jiffies(void)
+{
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return tv.tv_sec*1000 + tv.tv_usec/1000;
+}
+
+#endif /* ! __KERNEL__ */
+
+#endif /* LINUX_RAID_RAID6_H */
diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h
new file mode 100644
index 000000000..5a210959e
--- /dev/null
+++ b/include/linux/raid/xor.h
@@ -0,0 +1,22 @@
+#ifndef _XOR_H
+#define _XOR_H
+
+#define MAX_XOR_BLOCKS 4
+
+extern void xor_blocks(unsigned int count, unsigned int bytes,
+ void *dest, void **srcs);
+
+struct xor_block_template {
+ struct xor_block_template *next;
+ const char *name;
+ int speed;
+ void (*do_2)(unsigned long, unsigned long *, unsigned long *);
+ void (*do_3)(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *);
+ void (*do_4)(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *, unsigned long *);
+ void (*do_5)(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *, unsigned long *, unsigned long *);
+};
+
+#endif
diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h
new file mode 100644
index 000000000..31e1ff69e
--- /dev/null
+++ b/include/linux/raid_class.h
@@ -0,0 +1,83 @@
+/*
+ * raid_class.h - a generic raid visualisation class
+ *
+ * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
+ *
+ * This file is licensed under GPLv2
+ */
+#include <linux/transport_class.h>
+
+struct raid_template {
+ struct transport_container raid_attrs;
+};
+
+struct raid_function_template {
+ void *cookie;
+ int (*is_raid)(struct device *);
+ void (*get_resync)(struct device *);
+ void (*get_state)(struct device *);
+};
+
+enum raid_state {
+ RAID_STATE_UNKNOWN = 0,
+ RAID_STATE_ACTIVE,
+ RAID_STATE_DEGRADED,
+ RAID_STATE_RESYNCING,
+ RAID_STATE_OFFLINE,
+};
+
+enum raid_level {
+ RAID_LEVEL_UNKNOWN = 0,
+ RAID_LEVEL_LINEAR,
+ RAID_LEVEL_0,
+ RAID_LEVEL_1,
+ RAID_LEVEL_10,
+ RAID_LEVEL_1E,
+ RAID_LEVEL_3,
+ RAID_LEVEL_4,
+ RAID_LEVEL_5,
+ RAID_LEVEL_50,
+ RAID_LEVEL_6,
+};
+
+struct raid_data {
+ struct list_head component_list;
+ int component_count;
+ enum raid_level level;
+ enum raid_state state;
+ int resync;
+};
+
+/* resync complete goes from 0 to this */
+#define RAID_MAX_RESYNC (10000)
+
+#define DEFINE_RAID_ATTRIBUTE(type, attr) \
+static inline void \
+raid_set_##attr(struct raid_template *r, struct device *dev, type value) { \
+ struct device *device = \
+ attribute_container_find_class_device(&r->raid_attrs.ac, dev);\
+ struct raid_data *rd; \
+ BUG_ON(!device); \
+ rd = dev_get_drvdata(device); \
+ rd->attr = value; \
+} \
+static inline type \
+raid_get_##attr(struct raid_template *r, struct device *dev) { \
+ struct device *device = \
+ attribute_container_find_class_device(&r->raid_attrs.ac, dev);\
+ struct raid_data *rd; \
+ BUG_ON(!device); \
+ rd = dev_get_drvdata(device); \
+ return rd->attr; \
+}
+
+DEFINE_RAID_ATTRIBUTE(enum raid_level, level)
+DEFINE_RAID_ATTRIBUTE(int, resync)
+DEFINE_RAID_ATTRIBUTE(enum raid_state, state)
+
+struct raid_template *raid_class_attach(struct raid_function_template *);
+void raid_class_release(struct raid_template *);
+
+int __must_check raid_component_add(struct raid_template *, struct device *,
+ struct device *);
+
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
new file mode 100644
index 000000000..ecc730977
--- /dev/null
+++ b/include/linux/ramfs.h
@@ -0,0 +1,25 @@
+#ifndef _LINUX_RAMFS_H
+#define _LINUX_RAMFS_H
+
+struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir,
+ umode_t mode, dev_t dev);
+extern struct dentry *ramfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data);
+
+#ifdef CONFIG_MMU
+static inline int
+ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
+{
+ return 0;
+}
+#else
+extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize);
+#endif
+
+extern const struct file_operations ramfs_file_operations;
+extern const struct vm_operations_struct generic_file_vm_ops;
+extern int __init init_ramfs_fs(void);
+
+int ramfs_fill_super(struct super_block *sb, void *data, int silent);
+
+#endif
diff --git a/include/linux/random.h b/include/linux/random.h
new file mode 100644
index 000000000..b05856e16
--- /dev/null
+++ b/include/linux/random.h
@@ -0,0 +1,115 @@
+/*
+ * include/linux/random.h
+ *
+ * Include file for the random number generator.
+ */
+#ifndef _LINUX_RANDOM_H
+#define _LINUX_RANDOM_H
+
+#include <uapi/linux/random.h>
+
+extern void add_device_randomness(const void *, unsigned int);
+extern void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value);
+extern void add_interrupt_randomness(int irq, int irq_flags);
+
+extern void get_random_bytes(void *buf, int nbytes);
+extern void get_random_bytes_arch(void *buf, int nbytes);
+void generate_random_uuid(unsigned char uuid_out[16]);
+extern int random_int_secret_init(void);
+
+#ifndef MODULE
+extern const struct file_operations random_fops, urandom_fops;
+#endif
+
+unsigned int get_random_int(void);
+unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
+
+u32 prandom_u32(void);
+void prandom_bytes(void *buf, size_t nbytes);
+void prandom_seed(u32 seed);
+void prandom_reseed_late(void);
+
+struct rnd_state {
+ __u32 s1, s2, s3, s4;
+};
+
+u32 prandom_u32_state(struct rnd_state *state);
+void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
+
+/**
+ * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
+ * @ep_ro: right open interval endpoint
+ *
+ * Returns a pseudo-random number that is in interval [0, ep_ro). Note
+ * that the result depends on PRNG being well distributed in [0, ~0U]
+ * u32 space. Here we use maximally equidistributed combined Tausworthe
+ * generator, that is, prandom_u32(). This is useful when requesting a
+ * random index of an array containing ep_ro elements, for example.
+ *
+ * Returns: pseudo-random number in interval [0, ep_ro)
+ */
+static inline u32 prandom_u32_max(u32 ep_ro)
+{
+ return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
+}
+
+/*
+ * Handle minimum values for seeds
+ */
+static inline u32 __seed(u32 x, u32 m)
+{
+ return (x < m) ? x + m : x;
+}
+
+/**
+ * prandom_seed_state - set seed for prandom_u32_state().
+ * @state: pointer to state structure to receive the seed.
+ * @seed: arbitrary 64-bit value to use as a seed.
+ */
+static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
+{
+ u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
+
+ state->s1 = __seed(i, 2U);
+ state->s2 = __seed(i, 8U);
+ state->s3 = __seed(i, 16U);
+ state->s4 = __seed(i, 128U);
+}
+
+#ifdef CONFIG_ARCH_RANDOM
+# include <asm/archrandom.h>
+#else
+static inline int arch_get_random_long(unsigned long *v)
+{
+ return 0;
+}
+static inline int arch_get_random_int(unsigned int *v)
+{
+ return 0;
+}
+static inline int arch_has_random(void)
+{
+ return 0;
+}
+static inline int arch_get_random_seed_long(unsigned long *v)
+{
+ return 0;
+}
+static inline int arch_get_random_seed_int(unsigned int *v)
+{
+ return 0;
+}
+static inline int arch_has_random_seed(void)
+{
+ return 0;
+}
+#endif
+
+/* Pseudo random number generator from numerical recipes. */
+static inline u32 next_pseudo_random32(u32 seed)
+{
+ return seed * 1664525 + 1013904223;
+}
+
+#endif /* _LINUX_RANDOM_H */
diff --git a/include/linux/range.h b/include/linux/range.h
new file mode 100644
index 000000000..bd184a5db
--- /dev/null
+++ b/include/linux/range.h
@@ -0,0 +1,30 @@
+#ifndef _LINUX_RANGE_H
+#define _LINUX_RANGE_H
+
+struct range {
+ u64 start;
+ u64 end;
+};
+
+int add_range(struct range *range, int az, int nr_range,
+ u64 start, u64 end);
+
+
+int add_range_with_merge(struct range *range, int az, int nr_range,
+ u64 start, u64 end);
+
+void subtract_range(struct range *range, int az, u64 start, u64 end);
+
+int clean_sort_range(struct range *range, int az);
+
+void sort_range(struct range *range, int nr_range);
+
+#define MAX_RESOURCE ((resource_size_t)~0)
+static inline resource_size_t cap_resource(u64 val)
+{
+ if (val > MAX_RESOURCE)
+ return MAX_RESOURCE;
+
+ return val;
+}
+#endif
diff --git a/include/linux/ras.h b/include/linux/ras.h
new file mode 100644
index 000000000..2aceeafd6
--- /dev/null
+++ b/include/linux/ras.h
@@ -0,0 +1,14 @@
+#ifndef __RAS_H__
+#define __RAS_H__
+
+#ifdef CONFIG_DEBUG_FS
+int ras_userspace_consumers(void);
+void ras_debugfs_init(void);
+int ras_add_daemon_trace(void);
+#else
+static inline int ras_userspace_consumers(void) { return 0; }
+static inline void ras_debugfs_init(void) { return; }
+static inline int ras_add_daemon_trace(void) { return 0; }
+#endif
+
+#endif
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
new file mode 100644
index 000000000..181025292
--- /dev/null
+++ b/include/linux/ratelimit.h
@@ -0,0 +1,81 @@
+#ifndef _LINUX_RATELIMIT_H
+#define _LINUX_RATELIMIT_H
+
+#include <linux/param.h>
+#include <linux/spinlock.h>
+
+#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
+#define DEFAULT_RATELIMIT_BURST 10
+
+struct ratelimit_state {
+ raw_spinlock_t lock; /* protect the state */
+
+ int interval;
+ int burst;
+ int printed;
+ int missed;
+ unsigned long begin;
+};
+
+#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ .interval = interval_init, \
+ .burst = burst_init, \
+ }
+
+#define RATELIMIT_STATE_INIT_DISABLED \
+ RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST)
+
+#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
+ \
+ struct ratelimit_state name = \
+ RATELIMIT_STATE_INIT(name, interval_init, burst_init) \
+
+static inline void ratelimit_state_init(struct ratelimit_state *rs,
+ int interval, int burst)
+{
+ raw_spin_lock_init(&rs->lock);
+ rs->interval = interval;
+ rs->burst = burst;
+ rs->printed = 0;
+ rs->missed = 0;
+ rs->begin = 0;
+}
+
+extern struct ratelimit_state printk_ratelimit_state;
+
+extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
+#define __ratelimit(state) ___ratelimit(state, __func__)
+
+#ifdef CONFIG_PRINTK
+
+#define WARN_ON_RATELIMIT(condition, state) \
+ WARN_ON((condition) && __ratelimit(state))
+
+#define WARN_RATELIMIT(condition, format, ...) \
+({ \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ int rtn = !!(condition); \
+ \
+ if (unlikely(rtn && __ratelimit(&_rs))) \
+ WARN(rtn, format, ##__VA_ARGS__); \
+ \
+ rtn; \
+})
+
+#else
+
+#define WARN_ON_RATELIMIT(condition, state) \
+ WARN_ON(condition)
+
+#define WARN_RATELIMIT(condition, format, ...) \
+({ \
+ int rtn = WARN(condition, format, ##__VA_ARGS__); \
+ rtn; \
+})
+
+#endif
+
+#endif /* _LINUX_RATELIMIT_H */
diff --git a/include/linux/rational.h b/include/linux/rational.h
new file mode 100644
index 000000000..bfa6a2bcf
--- /dev/null
+++ b/include/linux/rational.h
@@ -0,0 +1,19 @@
+/*
+ * rational fractions
+ *
+ * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <oskar@scara.com>
+ *
+ * helper functions when coping with rational numbers,
+ * e.g. when calculating optimum numerator/denominator pairs for
+ * pll configuration taking into account restricted register size
+ */
+
+#ifndef _LINUX_RATIONAL_H
+#define _LINUX_RATIONAL_H
+
+void rational_best_approximation(
+ unsigned long given_numerator, unsigned long given_denominator,
+ unsigned long max_numerator, unsigned long max_denominator,
+ unsigned long *best_numerator, unsigned long *best_denominator);
+
+#endif /* _LINUX_RATIONAL_H */
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
new file mode 100644
index 000000000..fb31765e9
--- /dev/null
+++ b/include/linux/rbtree.h
@@ -0,0 +1,108 @@
+/*
+ Red Black Trees
+ (C) 1999 Andrea Arcangeli <andrea@suse.de>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ linux/include/linux/rbtree.h
+
+ To use rbtrees you'll have to implement your own insert and search cores.
+ This will avoid us to use callbacks and to drop drammatically performances.
+ I know it's not the cleaner way, but in C (not in C++) to get
+ performances and genericity...
+
+ See Documentation/rbtree.txt for documentation and samples.
+*/
+
+#ifndef _LINUX_RBTREE_H
+#define _LINUX_RBTREE_H
+
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+
+struct rb_node {
+ unsigned long __rb_parent_color;
+ struct rb_node *rb_right;
+ struct rb_node *rb_left;
+} __attribute__((aligned(sizeof(long))));
+ /* The alignment might seem pointless, but allegedly CRIS needs it */
+
+struct rb_root {
+ struct rb_node *rb_node;
+};
+
+
+#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
+
+#define RB_ROOT (struct rb_root) { NULL, }
+#define rb_entry(ptr, type, member) container_of(ptr, type, member)
+
+#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
+
+/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
+#define RB_EMPTY_NODE(node) \
+ ((node)->__rb_parent_color == (unsigned long)(node))
+#define RB_CLEAR_NODE(node) \
+ ((node)->__rb_parent_color = (unsigned long)(node))
+
+
+extern void rb_insert_color(struct rb_node *, struct rb_root *);
+extern void rb_erase(struct rb_node *, struct rb_root *);
+
+
+/* Find logical next and previous nodes in a tree */
+extern struct rb_node *rb_next(const struct rb_node *);
+extern struct rb_node *rb_prev(const struct rb_node *);
+extern struct rb_node *rb_first(const struct rb_root *);
+extern struct rb_node *rb_last(const struct rb_root *);
+
+/* Postorder iteration - always visit the parent after its children */
+extern struct rb_node *rb_first_postorder(const struct rb_root *);
+extern struct rb_node *rb_next_postorder(const struct rb_node *);
+
+/* Fast replacement of a single node without remove/rebalance/add/rebalance */
+extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
+ struct rb_root *root);
+
+static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
+ struct rb_node ** rb_link)
+{
+ node->__rb_parent_color = (unsigned long)parent;
+ node->rb_left = node->rb_right = NULL;
+
+ *rb_link = node;
+}
+
+#define rb_entry_safe(ptr, type, member) \
+ ({ typeof(ptr) ____ptr = (ptr); \
+ ____ptr ? rb_entry(____ptr, type, member) : NULL; \
+ })
+
+/**
+ * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
+ * given type safe against removal of rb_node entry
+ *
+ * @pos: the 'type *' to use as a loop cursor.
+ * @n: another 'type *' to use as temporary storage
+ * @root: 'rb_root *' of the rbtree.
+ * @field: the name of the rb_node field within 'type'.
+ */
+#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
+ for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
+ pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
+ typeof(*pos), field); 1; }); \
+ pos = n)
+
+#endif /* _LINUX_RBTREE_H */
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
new file mode 100644
index 000000000..378c5ee75
--- /dev/null
+++ b/include/linux/rbtree_augmented.h
@@ -0,0 +1,242 @@
+/*
+ Red Black Trees
+ (C) 1999 Andrea Arcangeli <andrea@suse.de>
+ (C) 2002 David Woodhouse <dwmw2@infradead.org>
+ (C) 2012 Michel Lespinasse <walken@google.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ linux/include/linux/rbtree_augmented.h
+*/
+
+#ifndef _LINUX_RBTREE_AUGMENTED_H
+#define _LINUX_RBTREE_AUGMENTED_H
+
+#include <linux/compiler.h>
+#include <linux/rbtree.h>
+
+/*
+ * Please note - only struct rb_augment_callbacks and the prototypes for
+ * rb_insert_augmented() and rb_erase_augmented() are intended to be public.
+ * The rest are implementation details you are not expected to depend on.
+ *
+ * See Documentation/rbtree.txt for documentation and samples.
+ */
+
+struct rb_augment_callbacks {
+ void (*propagate)(struct rb_node *node, struct rb_node *stop);
+ void (*copy)(struct rb_node *old, struct rb_node *new);
+ void (*rotate)(struct rb_node *old, struct rb_node *new);
+};
+
+extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
+/*
+ * Fixup the rbtree and update the augmented information when rebalancing.
+ *
+ * On insertion, the user must update the augmented information on the path
+ * leading to the inserted node, then call rb_link_node() as usual and
+ * rb_augment_inserted() instead of the usual rb_insert_color() call.
+ * If rb_augment_inserted() rebalances the rbtree, it will callback into
+ * a user provided function to update the augmented information on the
+ * affected subtrees.
+ */
+static inline void
+rb_insert_augmented(struct rb_node *node, struct rb_root *root,
+ const struct rb_augment_callbacks *augment)
+{
+ __rb_insert_augmented(node, root, augment->rotate);
+}
+
+#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
+ rbtype, rbaugmented, rbcompute) \
+static inline void \
+rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \
+{ \
+ while (rb != stop) { \
+ rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
+ rbtype augmented = rbcompute(node); \
+ if (node->rbaugmented == augmented) \
+ break; \
+ node->rbaugmented = augmented; \
+ rb = rb_parent(&node->rbfield); \
+ } \
+} \
+static inline void \
+rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
+{ \
+ rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
+ rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
+ new->rbaugmented = old->rbaugmented; \
+} \
+static void \
+rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
+{ \
+ rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
+ rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
+ new->rbaugmented = old->rbaugmented; \
+ old->rbaugmented = rbcompute(old); \
+} \
+rbstatic const struct rb_augment_callbacks rbname = { \
+ rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
+};
+
+
+#define RB_RED 0
+#define RB_BLACK 1
+
+#define __rb_parent(pc) ((struct rb_node *)(pc & ~3))
+
+#define __rb_color(pc) ((pc) & 1)
+#define __rb_is_black(pc) __rb_color(pc)
+#define __rb_is_red(pc) (!__rb_color(pc))
+#define rb_color(rb) __rb_color((rb)->__rb_parent_color)
+#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color)
+#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color)
+
+static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
+{
+ rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
+}
+
+static inline void rb_set_parent_color(struct rb_node *rb,
+ struct rb_node *p, int color)
+{
+ rb->__rb_parent_color = (unsigned long)p | color;
+}
+
+static inline void
+__rb_change_child(struct rb_node *old, struct rb_node *new,
+ struct rb_node *parent, struct rb_root *root)
+{
+ if (parent) {
+ if (parent->rb_left == old)
+ parent->rb_left = new;
+ else
+ parent->rb_right = new;
+ } else
+ root->rb_node = new;
+}
+
+extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
+ void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
+
+static __always_inline struct rb_node *
+__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+ const struct rb_augment_callbacks *augment)
+{
+ struct rb_node *child = node->rb_right, *tmp = node->rb_left;
+ struct rb_node *parent, *rebalance;
+ unsigned long pc;
+
+ if (!tmp) {
+ /*
+ * Case 1: node to erase has no more than 1 child (easy!)
+ *
+ * Note that if there is one child it must be red due to 5)
+ * and node must be black due to 4). We adjust colors locally
+ * so as to bypass __rb_erase_color() later on.
+ */
+ pc = node->__rb_parent_color;
+ parent = __rb_parent(pc);
+ __rb_change_child(node, child, parent, root);
+ if (child) {
+ child->__rb_parent_color = pc;
+ rebalance = NULL;
+ } else
+ rebalance = __rb_is_black(pc) ? parent : NULL;
+ tmp = parent;
+ } else if (!child) {
+ /* Still case 1, but this time the child is node->rb_left */
+ tmp->__rb_parent_color = pc = node->__rb_parent_color;
+ parent = __rb_parent(pc);
+ __rb_change_child(node, tmp, parent, root);
+ rebalance = NULL;
+ tmp = parent;
+ } else {
+ struct rb_node *successor = child, *child2;
+ tmp = child->rb_left;
+ if (!tmp) {
+ /*
+ * Case 2: node's successor is its right child
+ *
+ * (n) (s)
+ * / \ / \
+ * (x) (s) -> (x) (c)
+ * \
+ * (c)
+ */
+ parent = successor;
+ child2 = successor->rb_right;
+ augment->copy(node, successor);
+ } else {
+ /*
+ * Case 3: node's successor is leftmost under
+ * node's right child subtree
+ *
+ * (n) (s)
+ * / \ / \
+ * (x) (y) -> (x) (y)
+ * / /
+ * (p) (p)
+ * / /
+ * (s) (c)
+ * \
+ * (c)
+ */
+ do {
+ parent = successor;
+ successor = tmp;
+ tmp = tmp->rb_left;
+ } while (tmp);
+ parent->rb_left = child2 = successor->rb_right;
+ successor->rb_right = child;
+ rb_set_parent(child, successor);
+ augment->copy(node, successor);
+ augment->propagate(parent, successor);
+ }
+
+ successor->rb_left = tmp = node->rb_left;
+ rb_set_parent(tmp, successor);
+
+ pc = node->__rb_parent_color;
+ tmp = __rb_parent(pc);
+ __rb_change_child(node, successor, tmp, root);
+ if (child2) {
+ successor->__rb_parent_color = pc;
+ rb_set_parent_color(child2, parent, RB_BLACK);
+ rebalance = NULL;
+ } else {
+ unsigned long pc2 = successor->__rb_parent_color;
+ successor->__rb_parent_color = pc;
+ rebalance = __rb_is_black(pc2) ? parent : NULL;
+ }
+ tmp = successor;
+ }
+
+ augment->propagate(tmp, NULL);
+ return rebalance;
+}
+
+static __always_inline void
+rb_erase_augmented(struct rb_node *node, struct rb_root *root,
+ const struct rb_augment_callbacks *augment)
+{
+ struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
+ if (rebalance)
+ __rb_erase_color(rebalance, root, augment->rotate);
+}
+
+#endif /* _LINUX_RBTREE_AUGMENTED_H */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
new file mode 100644
index 000000000..a18b16f1d
--- /dev/null
+++ b/include/linux/rculist.h
@@ -0,0 +1,556 @@
+#ifndef _LINUX_RCULIST_H
+#define _LINUX_RCULIST_H
+
+#ifdef __KERNEL__
+
+/*
+ * RCU-protected list version
+ */
+#include <linux/list.h>
+#include <linux/rcupdate.h>
+
+/*
+ * Why is there no list_empty_rcu()? Because list_empty() serves this
+ * purpose. The list_empty() function fetches the RCU-protected pointer
+ * and compares it to the address of the list head, but neither dereferences
+ * this pointer itself nor provides this pointer to the caller. Therefore,
+ * it is not necessary to use rcu_dereference(), so that list_empty() can
+ * be used anywhere you would want to use a list_empty_rcu().
+ */
+
+/*
+ * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
+ * @list: list to be initialized
+ *
+ * You should instead use INIT_LIST_HEAD() for normal initialization and
+ * cleanup tasks, when readers have no access to the list being initialized.
+ * However, if the list being initialized is visible to readers, you
+ * need to keep the compiler from being too mischievous.
+ */
+static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
+{
+ ACCESS_ONCE(list->next) = list;
+ ACCESS_ONCE(list->prev) = list;
+}
+
+/*
+ * return the ->next pointer of a list_head in an rcu safe
+ * way, we must not access it directly
+ */
+#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+#ifndef CONFIG_DEBUG_LIST
+static inline void __list_add_rcu(struct list_head *new,
+ struct list_head *prev, struct list_head *next)
+{
+ new->next = next;
+ new->prev = prev;
+ rcu_assign_pointer(list_next_rcu(prev), new);
+ next->prev = new;
+}
+#else
+void __list_add_rcu(struct list_head *new,
+ struct list_head *prev, struct list_head *next);
+#endif
+
+/**
+ * list_add_rcu - add a new entry to rcu-protected list
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as list_add_rcu()
+ * or list_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * list_for_each_entry_rcu().
+ */
+static inline void list_add_rcu(struct list_head *new, struct list_head *head)
+{
+ __list_add_rcu(new, head, head->next);
+}
+
+/**
+ * list_add_tail_rcu - add a new entry to rcu-protected list
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as list_add_tail_rcu()
+ * or list_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * list_for_each_entry_rcu().
+ */
+static inline void list_add_tail_rcu(struct list_head *new,
+ struct list_head *head)
+{
+ __list_add_rcu(new, head->prev, head);
+}
+
+/**
+ * list_del_rcu - deletes entry from list without re-initialization
+ * @entry: the element to delete from the list.
+ *
+ * Note: list_empty() on entry does not return true after this,
+ * the entry is in an undefined state. It is useful for RCU based
+ * lockfree traversal.
+ *
+ * In particular, it means that we can not poison the forward
+ * pointers that may still be used for walking the list.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as list_del_rcu()
+ * or list_add_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * list_for_each_entry_rcu().
+ *
+ * Note that the caller is not permitted to immediately free
+ * the newly deleted entry. Instead, either synchronize_rcu()
+ * or call_rcu() must be used to defer freeing until an RCU
+ * grace period has elapsed.
+ */
+static inline void list_del_rcu(struct list_head *entry)
+{
+ __list_del_entry(entry);
+ entry->prev = LIST_POISON2;
+}
+
+/**
+ * hlist_del_init_rcu - deletes entry from hash list with re-initialization
+ * @n: the element to delete from the hash list.
+ *
+ * Note: list_unhashed() on the node return true after this. It is
+ * useful for RCU based read lockfree traversal if the writer side
+ * must know if the list entry is still hashed or already unhashed.
+ *
+ * In particular, it means that we can not poison the forward pointers
+ * that may still be used for walking the hash list and we can only
+ * zero the pprev pointer so list_unhashed() will return true after
+ * this.
+ *
+ * The caller must take whatever precautions are necessary (such as
+ * holding appropriate locks) to avoid racing with another
+ * list-mutation primitive, such as hlist_add_head_rcu() or
+ * hlist_del_rcu(), running on this same list. However, it is
+ * perfectly legal to run concurrently with the _rcu list-traversal
+ * primitives, such as hlist_for_each_entry_rcu().
+ */
+static inline void hlist_del_init_rcu(struct hlist_node *n)
+{
+ if (!hlist_unhashed(n)) {
+ __hlist_del(n);
+ n->pprev = NULL;
+ }
+}
+
+/**
+ * list_replace_rcu - replace old entry by new one
+ * @old : the element to be replaced
+ * @new : the new element to insert
+ *
+ * The @old entry will be replaced with the @new entry atomically.
+ * Note: @old should not be empty.
+ */
+static inline void list_replace_rcu(struct list_head *old,
+ struct list_head *new)
+{
+ new->next = old->next;
+ new->prev = old->prev;
+ rcu_assign_pointer(list_next_rcu(new->prev), new);
+ new->next->prev = new;
+ old->prev = LIST_POISON2;
+}
+
+/**
+ * list_splice_init_rcu - splice an RCU-protected list into an existing list.
+ * @list: the RCU-protected list to splice
+ * @head: the place in the list to splice the first list into
+ * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
+ *
+ * @head can be RCU-read traversed concurrently with this function.
+ *
+ * Note that this function blocks.
+ *
+ * Important note: the caller must take whatever action is necessary to
+ * prevent any other updates to @head. In principle, it is possible
+ * to modify the list as soon as sync() begins execution.
+ * If this sort of thing becomes necessary, an alternative version
+ * based on call_rcu() could be created. But only if -really-
+ * needed -- there is no shortage of RCU API members.
+ */
+static inline void list_splice_init_rcu(struct list_head *list,
+ struct list_head *head,
+ void (*sync)(void))
+{
+ struct list_head *first = list->next;
+ struct list_head *last = list->prev;
+ struct list_head *at = head->next;
+
+ if (list_empty(list))
+ return;
+
+ /*
+ * "first" and "last" tracking list, so initialize it. RCU readers
+ * have access to this list, so we must use INIT_LIST_HEAD_RCU()
+ * instead of INIT_LIST_HEAD().
+ */
+
+ INIT_LIST_HEAD_RCU(list);
+
+ /*
+ * At this point, the list body still points to the source list.
+ * Wait for any readers to finish using the list before splicing
+ * the list body into the new list. Any new readers will see
+ * an empty list.
+ */
+
+ sync();
+
+ /*
+ * Readers are finished with the source list, so perform splice.
+ * The order is important if the new list is global and accessible
+ * to concurrent RCU readers. Note that RCU readers are not
+ * permitted to traverse the prev pointers without excluding
+ * this function.
+ */
+
+ last->next = at;
+ rcu_assign_pointer(list_next_rcu(head), first);
+ first->prev = head;
+ at->prev = last;
+}
+
+/**
+ * list_entry_rcu - get the struct for this entry
+ * @ptr: the &struct list_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ *
+ * This primitive may safely run concurrently with the _rcu list-mutation
+ * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
+ */
+#define list_entry_rcu(ptr, type, member) \
+({ \
+ typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \
+ container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
+})
+
+/**
+ * Where are list_empty_rcu() and list_first_entry_rcu()?
+ *
+ * Implementing those functions following their counterparts list_empty() and
+ * list_first_entry() is not advisable because they lead to subtle race
+ * conditions as the following snippet shows:
+ *
+ * if (!list_empty_rcu(mylist)) {
+ * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
+ * do_something(bar);
+ * }
+ *
+ * The list may not be empty when list_empty_rcu checks it, but it may be when
+ * list_first_entry_rcu rereads the ->next pointer.
+ *
+ * Rereading the ->next pointer is not a problem for list_empty() and
+ * list_first_entry() because they would be protected by a lock that blocks
+ * writers.
+ *
+ * See list_first_or_null_rcu for an alternative.
+ */
+
+/**
+ * list_first_or_null_rcu - get the first element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ *
+ * Note that if the list is empty, it returns NULL.
+ *
+ * This primitive may safely run concurrently with the _rcu list-mutation
+ * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
+ */
+#define list_first_or_null_rcu(ptr, type, member) \
+({ \
+ struct list_head *__ptr = (ptr); \
+ struct list_head *__next = ACCESS_ONCE(__ptr->next); \
+ likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
+})
+
+/**
+ * list_for_each_entry_rcu - iterate over rcu list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as list_add_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define list_for_each_entry_rcu(pos, head, member) \
+ for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
+
+/**
+ * list_for_each_entry_continue_rcu - continue iteration over list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the list_head within the struct.
+ *
+ * Continue to iterate over list of given type, continuing after
+ * the current position.
+ */
+#define list_for_each_entry_continue_rcu(pos, head, member) \
+ for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
+
+/**
+ * hlist_del_rcu - deletes entry from hash list without re-initialization
+ * @n: the element to delete from the hash list.
+ *
+ * Note: list_unhashed() on entry does not return true after this,
+ * the entry is in an undefined state. It is useful for RCU based
+ * lockfree traversal.
+ *
+ * In particular, it means that we can not poison the forward
+ * pointers that may still be used for walking the hash list.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry().
+ */
+static inline void hlist_del_rcu(struct hlist_node *n)
+{
+ __hlist_del(n);
+ n->pprev = LIST_POISON2;
+}
+
+/**
+ * hlist_replace_rcu - replace old entry by new one
+ * @old : the element to be replaced
+ * @new : the new element to insert
+ *
+ * The @old entry will be replaced with the @new entry atomically.
+ */
+static inline void hlist_replace_rcu(struct hlist_node *old,
+ struct hlist_node *new)
+{
+ struct hlist_node *next = old->next;
+
+ new->next = next;
+ new->pprev = old->pprev;
+ rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
+ if (next)
+ new->next->pprev = &new->next;
+ old->pprev = LIST_POISON2;
+}
+
+/*
+ * return the first or the next element in an RCU protected hlist
+ */
+#define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first)))
+#define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next)))
+#define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev)))
+
+/**
+ * hlist_add_head_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist,
+ * while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs. Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_add_head_rcu(struct hlist_node *n,
+ struct hlist_head *h)
+{
+ struct hlist_node *first = h->first;
+
+ n->next = first;
+ n->pprev = &h->first;
+ rcu_assign_pointer(hlist_first_rcu(h), n);
+ if (first)
+ first->pprev = &n->next;
+}
+
+/**
+ * hlist_add_before_rcu
+ * @n: the new element to add to the hash list.
+ * @next: the existing element to add the new element before.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist
+ * before the specified node while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs.
+ */
+static inline void hlist_add_before_rcu(struct hlist_node *n,
+ struct hlist_node *next)
+{
+ n->pprev = next->pprev;
+ n->next = next;
+ rcu_assign_pointer(hlist_pprev_rcu(n), n);
+ next->pprev = &n->next;
+}
+
+/**
+ * hlist_add_behind_rcu
+ * @n: the new element to add to the hash list.
+ * @prev: the existing element to add the new element after.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist
+ * after the specified node while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs.
+ */
+static inline void hlist_add_behind_rcu(struct hlist_node *n,
+ struct hlist_node *prev)
+{
+ n->next = prev->next;
+ n->pprev = &prev->next;
+ rcu_assign_pointer(hlist_next_rcu(prev), n);
+ if (n->next)
+ n->next->pprev = &n->next;
+}
+
+#define __hlist_for_each_rcu(pos, head) \
+ for (pos = rcu_dereference(hlist_first_rcu(head)); \
+ pos; \
+ pos = rcu_dereference(hlist_next_rcu(pos)))
+
+/**
+ * hlist_for_each_entry_rcu - iterate over rcu list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_head_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define hlist_for_each_entry_rcu(pos, head, member) \
+ for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
+ typeof(*(pos)), member); \
+ pos; \
+ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
+ &(pos)->member)), typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_head_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ *
+ * This is the same as hlist_for_each_entry_rcu() except that it does
+ * not do any RCU debugging or tracing.
+ */
+#define hlist_for_each_entry_rcu_notrace(pos, head, member) \
+ for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\
+ typeof(*(pos)), member); \
+ pos; \
+ pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\
+ &(pos)->member)), typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
+ * @pos: the type * to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_node within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_head_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define hlist_for_each_entry_rcu_bh(pos, head, member) \
+ for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
+ typeof(*(pos)), member); \
+ pos; \
+ pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
+ &(pos)->member)), typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
+ * @pos: the type * to use as a loop cursor.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_continue_rcu(pos, member) \
+ for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
+ &(pos)->member)), typeof(*(pos)), member); \
+ pos; \
+ pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
+ &(pos)->member)), typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
+ * @pos: the type * to use as a loop cursor.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_continue_rcu_bh(pos, member) \
+ for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
+ &(pos)->member)), typeof(*(pos)), member); \
+ pos; \
+ pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
+ &(pos)->member)), typeof(*(pos)), member))
+
+/**
+ * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
+ * @pos: the type * to use as a loop cursor.
+ * @member: the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_from_rcu(pos, member) \
+ for (; pos; \
+ pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
+ typeof(*(pos)), member))
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/include/linux/rculist_bl.h b/include/linux/rculist_bl.h
new file mode 100644
index 000000000..4f216c59e
--- /dev/null
+++ b/include/linux/rculist_bl.h
@@ -0,0 +1,128 @@
+#ifndef _LINUX_RCULIST_BL_H
+#define _LINUX_RCULIST_BL_H
+
+/*
+ * RCU-protected bl list version. See include/linux/list_bl.h.
+ */
+#include <linux/list_bl.h>
+#include <linux/rcupdate.h>
+
+static inline void hlist_bl_set_first_rcu(struct hlist_bl_head *h,
+ struct hlist_bl_node *n)
+{
+ LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
+ LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) !=
+ LIST_BL_LOCKMASK);
+ rcu_assign_pointer(h->first,
+ (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK));
+}
+
+static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h)
+{
+ return (struct hlist_bl_node *)
+ ((unsigned long)rcu_dereference_check(h->first, hlist_bl_is_locked(h)) & ~LIST_BL_LOCKMASK);
+}
+
+/**
+ * hlist_bl_del_init_rcu - deletes entry from hash list with re-initialization
+ * @n: the element to delete from the hash list.
+ *
+ * Note: hlist_bl_unhashed() on the node returns true after this. It is
+ * useful for RCU based read lockfree traversal if the writer side
+ * must know if the list entry is still hashed or already unhashed.
+ *
+ * In particular, it means that we can not poison the forward pointers
+ * that may still be used for walking the hash list and we can only
+ * zero the pprev pointer so list_unhashed() will return true after
+ * this.
+ *
+ * The caller must take whatever precautions are necessary (such as
+ * holding appropriate locks) to avoid racing with another
+ * list-mutation primitive, such as hlist_bl_add_head_rcu() or
+ * hlist_bl_del_rcu(), running on this same list. However, it is
+ * perfectly legal to run concurrently with the _rcu list-traversal
+ * primitives, such as hlist_bl_for_each_entry_rcu().
+ */
+static inline void hlist_bl_del_init_rcu(struct hlist_bl_node *n)
+{
+ if (!hlist_bl_unhashed(n)) {
+ __hlist_bl_del(n);
+ n->pprev = NULL;
+ }
+}
+
+/**
+ * hlist_bl_del_rcu - deletes entry from hash list without re-initialization
+ * @n: the element to delete from the hash list.
+ *
+ * Note: hlist_bl_unhashed() on entry does not return true after this,
+ * the entry is in an undefined state. It is useful for RCU based
+ * lockfree traversal.
+ *
+ * In particular, it means that we can not poison the forward
+ * pointers that may still be used for walking the hash list.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_bl_add_head_rcu()
+ * or hlist_bl_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_bl_for_each_entry().
+ */
+static inline void hlist_bl_del_rcu(struct hlist_bl_node *n)
+{
+ __hlist_bl_del(n);
+ n->pprev = LIST_POISON2;
+}
+
+/**
+ * hlist_bl_add_head_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist_bl,
+ * while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_bl_add_head_rcu()
+ * or hlist_bl_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_bl_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs. Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n,
+ struct hlist_bl_head *h)
+{
+ struct hlist_bl_node *first;
+
+ /* don't need hlist_bl_first_rcu because we're under lock */
+ first = hlist_bl_first(h);
+
+ n->next = first;
+ if (first)
+ first->pprev = &n->next;
+ n->pprev = &h->first;
+
+ /* need _rcu because we can have concurrent lock free readers */
+ hlist_bl_set_first_rcu(h, n);
+}
+/**
+ * hlist_bl_for_each_entry_rcu - iterate over rcu list of given type
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_bl_node to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_bl_node within the struct.
+ *
+ */
+#define hlist_bl_for_each_entry_rcu(tpos, pos, head, member) \
+ for (pos = hlist_bl_first_rcu(head); \
+ pos && \
+ ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1; }); \
+ pos = rcu_dereference_raw(pos->next))
+
+#endif
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
new file mode 100644
index 000000000..1c33dd7da
--- /dev/null
+++ b/include/linux/rculist_nulls.h
@@ -0,0 +1,121 @@
+#ifndef _LINUX_RCULIST_NULLS_H
+#define _LINUX_RCULIST_NULLS_H
+
+#ifdef __KERNEL__
+
+/*
+ * RCU-protected list version
+ */
+#include <linux/list_nulls.h>
+#include <linux/rcupdate.h>
+
+/**
+ * hlist_nulls_del_init_rcu - deletes entry from hash list with re-initialization
+ * @n: the element to delete from the hash list.
+ *
+ * Note: hlist_nulls_unhashed() on the node return true after this. It is
+ * useful for RCU based read lockfree traversal if the writer side
+ * must know if the list entry is still hashed or already unhashed.
+ *
+ * In particular, it means that we can not poison the forward pointers
+ * that may still be used for walking the hash list and we can only
+ * zero the pprev pointer so list_unhashed() will return true after
+ * this.
+ *
+ * The caller must take whatever precautions are necessary (such as
+ * holding appropriate locks) to avoid racing with another
+ * list-mutation primitive, such as hlist_nulls_add_head_rcu() or
+ * hlist_nulls_del_rcu(), running on this same list. However, it is
+ * perfectly legal to run concurrently with the _rcu list-traversal
+ * primitives, such as hlist_nulls_for_each_entry_rcu().
+ */
+static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
+{
+ if (!hlist_nulls_unhashed(n)) {
+ __hlist_nulls_del(n);
+ n->pprev = NULL;
+ }
+}
+
+#define hlist_nulls_first_rcu(head) \
+ (*((struct hlist_nulls_node __rcu __force **)&(head)->first))
+
+#define hlist_nulls_next_rcu(node) \
+ (*((struct hlist_nulls_node __rcu __force **)&(node)->next))
+
+/**
+ * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization
+ * @n: the element to delete from the hash list.
+ *
+ * Note: hlist_nulls_unhashed() on entry does not return true after this,
+ * the entry is in an undefined state. It is useful for RCU based
+ * lockfree traversal.
+ *
+ * In particular, it means that we can not poison the forward
+ * pointers that may still be used for walking the hash list.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
+ * or hlist_nulls_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_nulls_for_each_entry().
+ */
+static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n)
+{
+ __hlist_nulls_del(n);
+ n->pprev = LIST_POISON2;
+}
+
+/**
+ * hlist_nulls_add_head_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist_nulls,
+ * while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
+ * or hlist_nulls_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs. Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
+ struct hlist_nulls_head *h)
+{
+ struct hlist_nulls_node *first = h->first;
+
+ n->next = first;
+ n->pprev = &h->first;
+ rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
+ if (!is_a_nulls(first))
+ first->pprev = &n->next;
+}
+/**
+ * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct hlist_nulls_node to use as a loop cursor.
+ * @head: the head for your list.
+ * @member: the name of the hlist_nulls_node within the struct.
+ *
+ * The barrier() is needed to make sure compiler doesn't cache first element [1],
+ * as this loop can be restarted [2]
+ * [1] Documentation/atomic_ops.txt around line 114
+ * [2] Documentation/RCU/rculist_nulls.txt around line 146
+ */
+#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
+ for (({barrier();}), \
+ pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
+ (!is_a_nulls(pos)) && \
+ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
+ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
+
+#endif
+#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
new file mode 100644
index 000000000..573a5afd5
--- /dev/null
+++ b/include/linux/rcupdate.h
@@ -0,0 +1,1191 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright IBM Corporation, 2001
+ *
+ * Author: Dipankar Sarma <dipankar@in.ibm.com>
+ *
+ * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
+ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
+ * Papers:
+ * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
+ * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ * http://lse.sourceforge.net/locking/rcupdate.html
+ *
+ */
+
+#ifndef __LINUX_RCUPDATE_H
+#define __LINUX_RCUPDATE_H
+
+#include <linux/types.h>
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/seqlock.h>
+#include <linux/lockdep.h>
+#include <linux/completion.h>
+#include <linux/debugobjects.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
+extern int rcu_expedited; /* for sysctl */
+
+#ifdef CONFIG_TINY_RCU
+/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
+static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */
+{
+ return false;
+}
+
+static inline void rcu_expedite_gp(void)
+{
+}
+
+static inline void rcu_unexpedite_gp(void)
+{
+}
+#else /* #ifdef CONFIG_TINY_RCU */
+bool rcu_gp_is_expedited(void); /* Internal RCU use. */
+void rcu_expedite_gp(void);
+void rcu_unexpedite_gp(void);
+#endif /* #else #ifdef CONFIG_TINY_RCU */
+
+enum rcutorture_type {
+ RCU_FLAVOR,
+ RCU_BH_FLAVOR,
+ RCU_SCHED_FLAVOR,
+ RCU_TASKS_FLAVOR,
+ SRCU_FLAVOR,
+ INVALID_RCU_FLAVOR
+};
+
+#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
+void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
+ unsigned long *gpnum, unsigned long *completed);
+void rcutorture_record_test_transition(void);
+void rcutorture_record_progress(unsigned long vernum);
+void do_trace_rcu_torture_read(const char *rcutorturename,
+ struct rcu_head *rhp,
+ unsigned long secs,
+ unsigned long c_old,
+ unsigned long c);
+#else
+static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
+ int *flags,
+ unsigned long *gpnum,
+ unsigned long *completed)
+{
+ *flags = 0;
+ *gpnum = 0;
+ *completed = 0;
+}
+static inline void rcutorture_record_test_transition(void)
+{
+}
+static inline void rcutorture_record_progress(unsigned long vernum)
+{
+}
+#ifdef CONFIG_RCU_TRACE
+void do_trace_rcu_torture_read(const char *rcutorturename,
+ struct rcu_head *rhp,
+ unsigned long secs,
+ unsigned long c_old,
+ unsigned long c);
+#else
+#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
+ do { } while (0)
+#endif
+#endif
+
+#define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b))
+#define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b))
+#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
+#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
+#define ulong2long(a) (*(long *)(&(a)))
+
+/* Exported common interfaces */
+
+#ifdef CONFIG_PREEMPT_RCU
+
+/**
+ * call_rcu() - Queue an RCU callback for invocation after a grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all pre-existing RCU read-side
+ * critical sections have completed. However, the callback function
+ * might well execute concurrently with RCU read-side critical sections
+ * that started after call_rcu() was invoked. RCU read-side critical
+ * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
+ * and may be nested.
+ *
+ * Note that all CPUs must agree that the grace period extended beyond
+ * all pre-existing RCU read-side critical section. On systems with more
+ * than one CPU, this means that when "func()" is invoked, each CPU is
+ * guaranteed to have executed a full memory barrier since the end of its
+ * last RCU read-side critical section whose beginning preceded the call
+ * to call_rcu(). It also means that each CPU executing an RCU read-side
+ * critical section that continues beyond the start of "func()" must have
+ * executed a memory barrier after the call_rcu() but before the beginning
+ * of that RCU read-side critical section. Note that these guarantees
+ * include CPUs that are offline, idle, or executing in user mode, as
+ * well as CPUs that are executing in the kernel.
+ *
+ * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
+ * resulting RCU callback function "func()", then both CPU A and CPU B are
+ * guaranteed to execute a full memory barrier during the time interval
+ * between the call to call_rcu() and the invocation of "func()" -- even
+ * if CPU A and CPU B are the same CPU (but again only if the system has
+ * more than one CPU).
+ */
+void call_rcu(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
+
+#else /* #ifdef CONFIG_PREEMPT_RCU */
+
+/* In classic RCU, call_rcu() is just call_rcu_sched(). */
+#define call_rcu call_rcu_sched
+
+#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
+/**
+ * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. call_rcu_bh() assumes
+ * that the read-side critical sections end on completion of a softirq
+ * handler. This means that read-side critical sections in process
+ * context must not be interrupted by softirqs. This interface is to be
+ * used when most of the read-side critical sections are in softirq context.
+ * RCU read-side critical sections are delimited by :
+ * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
+ * OR
+ * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
+ * These may be nested.
+ *
+ * See the description of call_rcu() for more detailed information on
+ * memory ordering guarantees.
+ */
+void call_rcu_bh(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
+
+/**
+ * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. call_rcu_sched() assumes
+ * that the read-side critical sections end on enabling of preemption
+ * or on voluntary preemption.
+ * RCU read-side critical sections are delimited by :
+ * - rcu_read_lock_sched() and rcu_read_unlock_sched(),
+ * OR
+ * anything that disables preemption.
+ * These may be nested.
+ *
+ * See the description of call_rcu() for more detailed information on
+ * memory ordering guarantees.
+ */
+void call_rcu_sched(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu));
+
+void synchronize_sched(void);
+
+/*
+ * Structure allowing asynchronous waiting on RCU.
+ */
+struct rcu_synchronize {
+ struct rcu_head head;
+ struct completion completion;
+};
+void wakeme_after_rcu(struct rcu_head *head);
+
+/**
+ * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual callback function to be invoked after the grace period
+ *
+ * The callback function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. call_rcu_tasks() assumes
+ * that the read-side critical sections end at a voluntary context
+ * switch (not a preemption!), entry into idle, or transition to usermode
+ * execution. As such, there are no read-side primitives analogous to
+ * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
+ * to determine that all tasks have passed through a safe state, not so
+ * much for data-strcuture synchronization.
+ *
+ * See the description of call_rcu() for more detailed information on
+ * memory ordering guarantees.
+ */
+void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
+void synchronize_rcu_tasks(void);
+void rcu_barrier_tasks(void);
+
+#ifdef CONFIG_PREEMPT_RCU
+
+void __rcu_read_lock(void);
+void __rcu_read_unlock(void);
+void rcu_read_unlock_special(struct task_struct *t);
+void synchronize_rcu(void);
+
+/*
+ * Defined as a macro as it is a very low level header included from
+ * areas that don't even know about current. This gives the rcu_read_lock()
+ * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
+ * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
+ */
+#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
+
+#else /* #ifdef CONFIG_PREEMPT_RCU */
+
+static inline void __rcu_read_lock(void)
+{
+ preempt_disable();
+}
+
+static inline void __rcu_read_unlock(void)
+{
+ preempt_enable();
+}
+
+static inline void synchronize_rcu(void)
+{
+ synchronize_sched();
+}
+
+static inline int rcu_preempt_depth(void)
+{
+ return 0;
+}
+
+#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
+/* Internal to kernel */
+void rcu_init(void);
+void rcu_end_inkernel_boot(void);
+void rcu_sched_qs(void);
+void rcu_bh_qs(void);
+void rcu_check_callbacks(int user);
+struct notifier_block;
+void rcu_idle_enter(void);
+void rcu_idle_exit(void);
+void rcu_irq_enter(void);
+void rcu_irq_exit(void);
+int rcu_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu);
+
+#ifdef CONFIG_RCU_STALL_COMMON
+void rcu_sysrq_start(void);
+void rcu_sysrq_end(void);
+#else /* #ifdef CONFIG_RCU_STALL_COMMON */
+static inline void rcu_sysrq_start(void)
+{
+}
+static inline void rcu_sysrq_end(void)
+{
+}
+#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
+
+#ifdef CONFIG_RCU_USER_QS
+void rcu_user_enter(void);
+void rcu_user_exit(void);
+#else
+static inline void rcu_user_enter(void) { }
+static inline void rcu_user_exit(void) { }
+static inline void rcu_user_hooks_switch(struct task_struct *prev,
+ struct task_struct *next) { }
+#endif /* CONFIG_RCU_USER_QS */
+
+#ifdef CONFIG_RCU_NOCB_CPU
+void rcu_init_nohz(void);
+#else /* #ifdef CONFIG_RCU_NOCB_CPU */
+static inline void rcu_init_nohz(void)
+{
+}
+#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
+
+/**
+ * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
+ * @a: Code that RCU needs to pay attention to.
+ *
+ * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
+ * in the inner idle loop, that is, between the rcu_idle_enter() and
+ * the rcu_idle_exit() -- RCU will happily ignore any such read-side
+ * critical sections. However, things like powertop need tracepoints
+ * in the inner idle loop.
+ *
+ * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
+ * will tell RCU that it needs to pay attending, invoke its argument
+ * (in this example, a call to the do_something_with_RCU() function),
+ * and then tell RCU to go back to ignoring this CPU. It is permissible
+ * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
+ * quite limited. If deeper nesting is required, it will be necessary
+ * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
+ */
+#define RCU_NONIDLE(a) \
+ do { \
+ rcu_irq_enter(); \
+ do { a; } while (0); \
+ rcu_irq_exit(); \
+ } while (0)
+
+/*
+ * Note a voluntary context switch for RCU-tasks benefit. This is a
+ * macro rather than an inline function to avoid #include hell.
+ */
+#ifdef CONFIG_TASKS_RCU
+#define TASKS_RCU(x) x
+extern struct srcu_struct tasks_rcu_exit_srcu;
+#define rcu_note_voluntary_context_switch(t) \
+ do { \
+ rcu_all_qs(); \
+ if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
+ ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
+ } while (0)
+#else /* #ifdef CONFIG_TASKS_RCU */
+#define TASKS_RCU(x) do { } while (0)
+#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
+#endif /* #else #ifdef CONFIG_TASKS_RCU */
+
+/**
+ * cond_resched_rcu_qs - Report potential quiescent states to RCU
+ *
+ * This macro resembles cond_resched(), except that it is defined to
+ * report potential quiescent states to RCU-tasks even if the cond_resched()
+ * machinery were to be shut off, as some advocate for PREEMPT kernels.
+ */
+#define cond_resched_rcu_qs() \
+do { \
+ if (!cond_resched()) \
+ rcu_note_voluntary_context_switch(current); \
+} while (0)
+
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
+bool __rcu_is_watching(void);
+#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
+
+/*
+ * Infrastructure to implement the synchronize_() primitives in
+ * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
+ */
+
+typedef void call_rcu_func_t(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
+void wait_rcu_gp(call_rcu_func_t crf);
+
+#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
+#include <linux/rcutree.h>
+#elif defined(CONFIG_TINY_RCU)
+#include <linux/rcutiny.h>
+#else
+#error "Unknown RCU implementation specified to kernel configuration"
+#endif
+
+/*
+ * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
+ * initialization and destruction of rcu_head on the stack. rcu_head structures
+ * allocated dynamically in the heap or defined statically don't need any
+ * initialization.
+ */
+#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
+void init_rcu_head(struct rcu_head *head);
+void destroy_rcu_head(struct rcu_head *head);
+void init_rcu_head_on_stack(struct rcu_head *head);
+void destroy_rcu_head_on_stack(struct rcu_head *head);
+#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+static inline void init_rcu_head(struct rcu_head *head)
+{
+}
+
+static inline void destroy_rcu_head(struct rcu_head *head)
+{
+}
+
+static inline void init_rcu_head_on_stack(struct rcu_head *head)
+{
+}
+
+static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
+{
+}
+#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+
+#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
+bool rcu_lockdep_current_cpu_online(void);
+#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
+static inline bool rcu_lockdep_current_cpu_online(void)
+{
+ return true;
+}
+#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+static inline void rcu_lock_acquire(struct lockdep_map *map)
+{
+ lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
+}
+
+static inline void rcu_lock_release(struct lockdep_map *map)
+{
+ lock_release(map, 1, _THIS_IP_);
+}
+
+extern struct lockdep_map rcu_lock_map;
+extern struct lockdep_map rcu_bh_lock_map;
+extern struct lockdep_map rcu_sched_lock_map;
+extern struct lockdep_map rcu_callback_map;
+int debug_lockdep_rcu_enabled(void);
+
+int rcu_read_lock_held(void);
+int rcu_read_lock_bh_held(void);
+
+/**
+ * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
+ *
+ * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
+ * RCU-sched read-side critical section. In absence of
+ * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
+ * critical section unless it can prove otherwise. Note that disabling
+ * of preemption (including disabling irqs) counts as an RCU-sched
+ * read-side critical section. This is useful for debug checks in functions
+ * that required that they be called within an RCU-sched read-side
+ * critical section.
+ *
+ * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
+ * and while lockdep is disabled.
+ *
+ * Note that if the CPU is in the idle loop from an RCU point of
+ * view (ie: that we are in the section between rcu_idle_enter() and
+ * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
+ * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
+ * that are in such a section, considering these as in extended quiescent
+ * state, so such a CPU is effectively never in an RCU read-side critical
+ * section regardless of what RCU primitives it invokes. This state of
+ * affairs is required --- we need to keep an RCU-free window in idle
+ * where the CPU may possibly enter into low power mode. This way we can
+ * notice an extended quiescent state to other CPUs that started a grace
+ * period. Otherwise we would delay any grace period as long as we run in
+ * the idle task.
+ *
+ * Similarly, we avoid claiming an SRCU read lock held if the current
+ * CPU is offline.
+ */
+#ifdef CONFIG_PREEMPT_COUNT
+static inline int rcu_read_lock_sched_held(void)
+{
+ int lockdep_opinion = 0;
+
+ if (!debug_lockdep_rcu_enabled())
+ return 1;
+ if (!rcu_is_watching())
+ return 0;
+ if (!rcu_lockdep_current_cpu_online())
+ return 0;
+ if (debug_locks)
+ lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
+ return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
+}
+#else /* #ifdef CONFIG_PREEMPT_COUNT */
+static inline int rcu_read_lock_sched_held(void)
+{
+ return 1;
+}
+#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
+
+#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+# define rcu_lock_acquire(a) do { } while (0)
+# define rcu_lock_release(a) do { } while (0)
+
+static inline int rcu_read_lock_held(void)
+{
+ return 1;
+}
+
+static inline int rcu_read_lock_bh_held(void)
+{
+ return 1;
+}
+
+#ifdef CONFIG_PREEMPT_COUNT
+static inline int rcu_read_lock_sched_held(void)
+{
+ return preempt_count() != 0 || irqs_disabled();
+}
+#else /* #ifdef CONFIG_PREEMPT_COUNT */
+static inline int rcu_read_lock_sched_held(void)
+{
+ return 1;
+}
+#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
+
+#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+#ifdef CONFIG_PROVE_RCU
+
+/**
+ * rcu_lockdep_assert - emit lockdep splat if specified condition not met
+ * @c: condition to check
+ * @s: informative message
+ */
+#define rcu_lockdep_assert(c, s) \
+ do { \
+ static bool __section(.data.unlikely) __warned; \
+ if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
+ __warned = true; \
+ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
+ } \
+ } while (0)
+
+#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
+static inline void rcu_preempt_sleep_check(void)
+{
+ rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
+ "Illegal context switch in RCU read-side critical section");
+}
+#else /* #ifdef CONFIG_PROVE_RCU */
+static inline void rcu_preempt_sleep_check(void)
+{
+}
+#endif /* #else #ifdef CONFIG_PROVE_RCU */
+
+#define rcu_sleep_check() \
+ do { \
+ rcu_preempt_sleep_check(); \
+ rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \
+ "Illegal context switch in RCU-bh read-side critical section"); \
+ rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \
+ "Illegal context switch in RCU-sched read-side critical section"); \
+ } while (0)
+
+#else /* #ifdef CONFIG_PROVE_RCU */
+
+#define rcu_lockdep_assert(c, s) do { } while (0)
+#define rcu_sleep_check() do { } while (0)
+
+#endif /* #else #ifdef CONFIG_PROVE_RCU */
+
+/*
+ * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
+ * and rcu_assign_pointer(). Some of these could be folded into their
+ * callers, but they are left separate in order to ease introduction of
+ * multiple flavors of pointers to match the multiple flavors of RCU
+ * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
+ * the future.
+ */
+
+#ifdef __CHECKER__
+#define rcu_dereference_sparse(p, space) \
+ ((void)(((typeof(*p) space *)p) == p))
+#else /* #ifdef __CHECKER__ */
+#define rcu_dereference_sparse(p, space)
+#endif /* #else #ifdef __CHECKER__ */
+
+#define __rcu_access_pointer(p, space) \
+({ \
+ typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
+ rcu_dereference_sparse(p, space); \
+ ((typeof(*p) __force __kernel *)(_________p1)); \
+})
+#define __rcu_dereference_check(p, c, space) \
+({ \
+ /* Dependency order vs. p above. */ \
+ typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
+ rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
+ rcu_dereference_sparse(p, space); \
+ ((typeof(*p) __force __kernel *)(________p1)); \
+})
+#define __rcu_dereference_protected(p, c, space) \
+({ \
+ rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \
+ rcu_dereference_sparse(p, space); \
+ ((typeof(*p) __force __kernel *)(p)); \
+})
+
+#define __rcu_access_index(p, space) \
+({ \
+ typeof(p) _________p1 = ACCESS_ONCE(p); \
+ rcu_dereference_sparse(p, space); \
+ (_________p1); \
+})
+#define __rcu_dereference_index_check(p, c) \
+({ \
+ /* Dependency order vs. p above. */ \
+ typeof(p) _________p1 = lockless_dereference(p); \
+ rcu_lockdep_assert(c, \
+ "suspicious rcu_dereference_index_check() usage"); \
+ (_________p1); \
+})
+
+/**
+ * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
+ * @v: The value to statically initialize with.
+ */
+#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
+
+/**
+ * lockless_dereference() - safely load a pointer for later dereference
+ * @p: The pointer to load
+ *
+ * Similar to rcu_dereference(), but for situations where the pointed-to
+ * object's lifetime is managed by something other than RCU. That
+ * "something other" might be reference counting or simple immortality.
+ */
+#define lockless_dereference(p) \
+({ \
+ typeof(p) _________p1 = ACCESS_ONCE(p); \
+ smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
+ (_________p1); \
+})
+
+/**
+ * rcu_assign_pointer() - assign to RCU-protected pointer
+ * @p: pointer to assign to
+ * @v: value to assign (publish)
+ *
+ * Assigns the specified value to the specified RCU-protected
+ * pointer, ensuring that any concurrent RCU readers will see
+ * any prior initialization.
+ *
+ * Inserts memory barriers on architectures that require them
+ * (which is most of them), and also prevents the compiler from
+ * reordering the code that initializes the structure after the pointer
+ * assignment. More importantly, this call documents which pointers
+ * will be dereferenced by RCU read-side code.
+ *
+ * In some special cases, you may use RCU_INIT_POINTER() instead
+ * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
+ * to the fact that it does not constrain either the CPU or the compiler.
+ * That said, using RCU_INIT_POINTER() when you should have used
+ * rcu_assign_pointer() is a very bad thing that results in
+ * impossible-to-diagnose memory corruption. So please be careful.
+ * See the RCU_INIT_POINTER() comment header for details.
+ *
+ * Note that rcu_assign_pointer() evaluates each of its arguments only
+ * once, appearances notwithstanding. One of the "extra" evaluations
+ * is in typeof() and the other visible only to sparse (__CHECKER__),
+ * neither of which actually execute the argument. As with most cpp
+ * macros, this execute-arguments-only-once property is important, so
+ * please be careful when making changes to rcu_assign_pointer() and the
+ * other macros that it invokes.
+ */
+#define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v))
+
+/**
+ * rcu_access_pointer() - fetch RCU pointer with no dereferencing
+ * @p: The pointer to read
+ *
+ * Return the value of the specified RCU-protected pointer, but omit the
+ * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
+ * when the value of this pointer is accessed, but the pointer is not
+ * dereferenced, for example, when testing an RCU-protected pointer against
+ * NULL. Although rcu_access_pointer() may also be used in cases where
+ * update-side locks prevent the value of the pointer from changing, you
+ * should instead use rcu_dereference_protected() for this use case.
+ *
+ * It is also permissible to use rcu_access_pointer() when read-side
+ * access to the pointer was removed at least one grace period ago, as
+ * is the case in the context of the RCU callback that is freeing up
+ * the data, or after a synchronize_rcu() returns. This can be useful
+ * when tearing down multi-linked structures after a grace period
+ * has elapsed.
+ */
+#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
+
+/**
+ * rcu_dereference_check() - rcu_dereference with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * Do an rcu_dereference(), but check that the conditions under which the
+ * dereference will take place are correct. Typically the conditions
+ * indicate the various locking conditions that should be held at that
+ * point. The check should return true if the conditions are satisfied.
+ * An implicit check for being in an RCU read-side critical section
+ * (rcu_read_lock()) is included.
+ *
+ * For example:
+ *
+ * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
+ *
+ * could be used to indicate to lockdep that foo->bar may only be dereferenced
+ * if either rcu_read_lock() is held, or that the lock required to replace
+ * the bar struct at foo->bar is held.
+ *
+ * Note that the list of conditions may also include indications of when a lock
+ * need not be held, for example during initialisation or destruction of the
+ * target struct:
+ *
+ * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
+ * atomic_read(&foo->usage) == 0);
+ *
+ * Inserts memory barriers on architectures that require them
+ * (currently only the Alpha), prevents the compiler from refetching
+ * (and from merging fetches), and, more importantly, documents exactly
+ * which pointers are protected by RCU and checks that the pointer is
+ * annotated as __rcu.
+ */
+#define rcu_dereference_check(p, c) \
+ __rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu)
+
+/**
+ * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * This is the RCU-bh counterpart to rcu_dereference_check().
+ */
+#define rcu_dereference_bh_check(p, c) \
+ __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
+
+/**
+ * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * This is the RCU-sched counterpart to rcu_dereference_check().
+ */
+#define rcu_dereference_sched_check(p, c) \
+ __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
+ __rcu)
+
+#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
+
+/*
+ * The tracing infrastructure traces RCU (we want that), but unfortunately
+ * some of the RCU checks causes tracing to lock up the system.
+ *
+ * The tracing version of rcu_dereference_raw() must not call
+ * rcu_read_lock_held().
+ */
+#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
+
+/**
+ * rcu_access_index() - fetch RCU index with no dereferencing
+ * @p: The index to read
+ *
+ * Return the value of the specified RCU-protected index, but omit the
+ * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
+ * when the value of this index is accessed, but the index is not
+ * dereferenced, for example, when testing an RCU-protected index against
+ * -1. Although rcu_access_index() may also be used in cases where
+ * update-side locks prevent the value of the index from changing, you
+ * should instead use rcu_dereference_index_protected() for this use case.
+ */
+#define rcu_access_index(p) __rcu_access_index((p), __rcu)
+
+/**
+ * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * Similar to rcu_dereference_check(), but omits the sparse checking.
+ * This allows rcu_dereference_index_check() to be used on integers,
+ * which can then be used as array indices. Attempting to use
+ * rcu_dereference_check() on an integer will give compiler warnings
+ * because the sparse address-space mechanism relies on dereferencing
+ * the RCU-protected pointer. Dereferencing integers is not something
+ * that even gcc will put up with.
+ *
+ * Note that this function does not implicitly check for RCU read-side
+ * critical sections. If this function gains lots of uses, it might
+ * make sense to provide versions for each flavor of RCU, but it does
+ * not make sense as of early 2010.
+ */
+#define rcu_dereference_index_check(p, c) \
+ __rcu_dereference_index_check((p), (c))
+
+/**
+ * rcu_dereference_protected() - fetch RCU pointer when updates prevented
+ * @p: The pointer to read, prior to dereferencing
+ * @c: The conditions under which the dereference will take place
+ *
+ * Return the value of the specified RCU-protected pointer, but omit
+ * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This
+ * is useful in cases where update-side locks prevent the value of the
+ * pointer from changing. Please note that this primitive does -not-
+ * prevent the compiler from repeating this reference or combining it
+ * with other references, so it should not be used without protection
+ * of appropriate locks.
+ *
+ * This function is only for update-side use. Using this function
+ * when protected only by rcu_read_lock() will result in infrequent
+ * but very ugly failures.
+ */
+#define rcu_dereference_protected(p, c) \
+ __rcu_dereference_protected((p), (c), __rcu)
+
+
+/**
+ * rcu_dereference() - fetch RCU-protected pointer for dereferencing
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * This is a simple wrapper around rcu_dereference_check().
+ */
+#define rcu_dereference(p) rcu_dereference_check(p, 0)
+
+/**
+ * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Makes rcu_dereference_check() do the dirty work.
+ */
+#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
+
+/**
+ * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Makes rcu_dereference_check() do the dirty work.
+ */
+#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
+
+/**
+ * rcu_read_lock() - mark the beginning of an RCU read-side critical section
+ *
+ * When synchronize_rcu() is invoked on one CPU while other CPUs
+ * are within RCU read-side critical sections, then the
+ * synchronize_rcu() is guaranteed to block until after all the other
+ * CPUs exit their critical sections. Similarly, if call_rcu() is invoked
+ * on one CPU while other CPUs are within RCU read-side critical
+ * sections, invocation of the corresponding RCU callback is deferred
+ * until after the all the other CPUs exit their critical sections.
+ *
+ * Note, however, that RCU callbacks are permitted to run concurrently
+ * with new RCU read-side critical sections. One way that this can happen
+ * is via the following sequence of events: (1) CPU 0 enters an RCU
+ * read-side critical section, (2) CPU 1 invokes call_rcu() to register
+ * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
+ * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
+ * callback is invoked. This is legal, because the RCU read-side critical
+ * section that was running concurrently with the call_rcu() (and which
+ * therefore might be referencing something that the corresponding RCU
+ * callback would free up) has completed before the corresponding
+ * RCU callback is invoked.
+ *
+ * RCU read-side critical sections may be nested. Any deferred actions
+ * will be deferred until the outermost RCU read-side critical section
+ * completes.
+ *
+ * You can avoid reading and understanding the next paragraph by
+ * following this rule: don't put anything in an rcu_read_lock() RCU
+ * read-side critical section that would block in a !PREEMPT kernel.
+ * But if you want the full story, read on!
+ *
+ * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
+ * it is illegal to block while in an RCU read-side critical section.
+ * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT
+ * kernel builds, RCU read-side critical sections may be preempted,
+ * but explicit blocking is illegal. Finally, in preemptible RCU
+ * implementations in real-time (with -rt patchset) kernel builds, RCU
+ * read-side critical sections may be preempted and they may also block, but
+ * only when acquiring spinlocks that are subject to priority inheritance.
+ */
+static inline void rcu_read_lock(void)
+{
+ __rcu_read_lock();
+ __acquire(RCU);
+ rcu_lock_acquire(&rcu_lock_map);
+ rcu_lockdep_assert(rcu_is_watching(),
+ "rcu_read_lock() used illegally while idle");
+}
+
+/*
+ * So where is rcu_write_lock()? It does not exist, as there is no
+ * way for writers to lock out RCU readers. This is a feature, not
+ * a bug -- this property is what provides RCU's performance benefits.
+ * Of course, writers must coordinate with each other. The normal
+ * spinlock primitives work well for this, but any other technique may be
+ * used as well. RCU does not care how the writers keep out of each
+ * others' way, as long as they do so.
+ */
+
+/**
+ * rcu_read_unlock() - marks the end of an RCU read-side critical section.
+ *
+ * In most situations, rcu_read_unlock() is immune from deadlock.
+ * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
+ * is responsible for deboosting, which it does via rt_mutex_unlock().
+ * Unfortunately, this function acquires the scheduler's runqueue and
+ * priority-inheritance spinlocks. This means that deadlock could result
+ * if the caller of rcu_read_unlock() already holds one of these locks or
+ * any lock that is ever acquired while holding them; or any lock which
+ * can be taken from interrupt context because rcu_boost()->rt_mutex_lock()
+ * does not disable irqs while taking ->wait_lock.
+ *
+ * That said, RCU readers are never priority boosted unless they were
+ * preempted. Therefore, one way to avoid deadlock is to make sure
+ * that preemption never happens within any RCU read-side critical
+ * section whose outermost rcu_read_unlock() is called with one of
+ * rt_mutex_unlock()'s locks held. Such preemption can be avoided in
+ * a number of ways, for example, by invoking preempt_disable() before
+ * critical section's outermost rcu_read_lock().
+ *
+ * Given that the set of locks acquired by rt_mutex_unlock() might change
+ * at any time, a somewhat more future-proofed approach is to make sure
+ * that that preemption never happens within any RCU read-side critical
+ * section whose outermost rcu_read_unlock() is called with irqs disabled.
+ * This approach relies on the fact that rt_mutex_unlock() currently only
+ * acquires irq-disabled locks.
+ *
+ * The second of these two approaches is best in most situations,
+ * however, the first approach can also be useful, at least to those
+ * developers willing to keep abreast of the set of locks acquired by
+ * rt_mutex_unlock().
+ *
+ * See rcu_read_lock() for more information.
+ */
+static inline void rcu_read_unlock(void)
+{
+ rcu_lockdep_assert(rcu_is_watching(),
+ "rcu_read_unlock() used illegally while idle");
+ __release(RCU);
+ __rcu_read_unlock();
+ rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
+}
+
+/**
+ * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
+ *
+ * This is equivalent of rcu_read_lock(), but to be used when updates
+ * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
+ * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
+ * softirq handler to be a quiescent state, a process in RCU read-side
+ * critical section must be protected by disabling softirqs. Read-side
+ * critical sections in interrupt context can use just rcu_read_lock(),
+ * though this should at least be commented to avoid confusing people
+ * reading the code.
+ *
+ * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
+ * must occur in the same context, for example, it is illegal to invoke
+ * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
+ * was invoked from some other task.
+ */
+static inline void rcu_read_lock_bh(void)
+{
+ local_bh_disable();
+ __acquire(RCU_BH);
+ rcu_lock_acquire(&rcu_bh_lock_map);
+ rcu_lockdep_assert(rcu_is_watching(),
+ "rcu_read_lock_bh() used illegally while idle");
+}
+
+/*
+ * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
+ *
+ * See rcu_read_lock_bh() for more information.
+ */
+static inline void rcu_read_unlock_bh(void)
+{
+ rcu_lockdep_assert(rcu_is_watching(),
+ "rcu_read_unlock_bh() used illegally while idle");
+ rcu_lock_release(&rcu_bh_lock_map);
+ __release(RCU_BH);
+ local_bh_enable();
+}
+
+/**
+ * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
+ *
+ * This is equivalent of rcu_read_lock(), but to be used when updates
+ * are being done using call_rcu_sched() or synchronize_rcu_sched().
+ * Read-side critical sections can also be introduced by anything that
+ * disables preemption, including local_irq_disable() and friends.
+ *
+ * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
+ * must occur in the same context, for example, it is illegal to invoke
+ * rcu_read_unlock_sched() from process context if the matching
+ * rcu_read_lock_sched() was invoked from an NMI handler.
+ */
+static inline void rcu_read_lock_sched(void)
+{
+ preempt_disable();
+ __acquire(RCU_SCHED);
+ rcu_lock_acquire(&rcu_sched_lock_map);
+ rcu_lockdep_assert(rcu_is_watching(),
+ "rcu_read_lock_sched() used illegally while idle");
+}
+
+/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
+static inline notrace void rcu_read_lock_sched_notrace(void)
+{
+ preempt_disable_notrace();
+ __acquire(RCU_SCHED);
+}
+
+/*
+ * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
+ *
+ * See rcu_read_lock_sched for more information.
+ */
+static inline void rcu_read_unlock_sched(void)
+{
+ rcu_lockdep_assert(rcu_is_watching(),
+ "rcu_read_unlock_sched() used illegally while idle");
+ rcu_lock_release(&rcu_sched_lock_map);
+ __release(RCU_SCHED);
+ preempt_enable();
+}
+
+/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
+static inline notrace void rcu_read_unlock_sched_notrace(void)
+{
+ __release(RCU_SCHED);
+ preempt_enable_notrace();
+}
+
+/**
+ * RCU_INIT_POINTER() - initialize an RCU protected pointer
+ *
+ * Initialize an RCU-protected pointer in special cases where readers
+ * do not need ordering constraints on the CPU or the compiler. These
+ * special cases are:
+ *
+ * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
+ * 2. The caller has taken whatever steps are required to prevent
+ * RCU readers from concurrently accessing this pointer -or-
+ * 3. The referenced data structure has already been exposed to
+ * readers either at compile time or via rcu_assign_pointer() -and-
+ * a. You have not made -any- reader-visible changes to
+ * this structure since then -or-
+ * b. It is OK for readers accessing this structure from its
+ * new location to see the old state of the structure. (For
+ * example, the changes were to statistical counters or to
+ * other state where exact synchronization is not required.)
+ *
+ * Failure to follow these rules governing use of RCU_INIT_POINTER() will
+ * result in impossible-to-diagnose memory corruption. As in the structures
+ * will look OK in crash dumps, but any concurrent RCU readers might
+ * see pre-initialized values of the referenced data structure. So
+ * please be very careful how you use RCU_INIT_POINTER()!!!
+ *
+ * If you are creating an RCU-protected linked structure that is accessed
+ * by a single external-to-structure RCU-protected pointer, then you may
+ * use RCU_INIT_POINTER() to initialize the internal RCU-protected
+ * pointers, but you must use rcu_assign_pointer() to initialize the
+ * external-to-structure pointer -after- you have completely initialized
+ * the reader-accessible portions of the linked structure.
+ *
+ * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
+ * ordering guarantees for either the CPU or the compiler.
+ */
+#define RCU_INIT_POINTER(p, v) \
+ do { \
+ rcu_dereference_sparse(p, __rcu); \
+ p = RCU_INITIALIZER(v); \
+ } while (0)
+
+/**
+ * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
+ *
+ * GCC-style initialization for an RCU-protected pointer in a structure field.
+ */
+#define RCU_POINTER_INITIALIZER(p, v) \
+ .p = RCU_INITIALIZER(v)
+
+/*
+ * Does the specified offset indicate that the corresponding rcu_head
+ * structure can be handled by kfree_rcu()?
+ */
+#define __is_kfree_rcu_offset(offset) ((offset) < 4096)
+
+/*
+ * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
+ */
+#define __kfree_rcu(head, offset) \
+ do { \
+ BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
+ kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
+ } while (0)
+
+/**
+ * kfree_rcu() - kfree an object after a grace period.
+ * @ptr: pointer to kfree
+ * @rcu_head: the name of the struct rcu_head within the type of @ptr.
+ *
+ * Many rcu callbacks functions just call kfree() on the base structure.
+ * These functions are trivial, but their size adds up, and furthermore
+ * when they are used in a kernel module, that module must invoke the
+ * high-latency rcu_barrier() function at module-unload time.
+ *
+ * The kfree_rcu() function handles this issue. Rather than encoding a
+ * function address in the embedded rcu_head structure, kfree_rcu() instead
+ * encodes the offset of the rcu_head structure within the base structure.
+ * Because the functions are not allowed in the low-order 4096 bytes of
+ * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
+ * If the offset is larger than 4095 bytes, a compile-time error will
+ * be generated in __kfree_rcu(). If this error is triggered, you can
+ * either fall back to use of call_rcu() or rearrange the structure to
+ * position the rcu_head structure into the first 4096 bytes.
+ *
+ * Note that the allowable offset might decrease in the future, for example,
+ * to allow something like kmem_cache_free_rcu().
+ *
+ * The BUILD_BUG_ON check must not involve any function calls, hence the
+ * checks are done in macros here.
+ */
+#define kfree_rcu(ptr, rcu_head) \
+ __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
+
+#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
+static inline int rcu_needs_cpu(unsigned long *delta_jiffies)
+{
+ *delta_jiffies = ULONG_MAX;
+ return 0;
+}
+#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */
+
+#if defined(CONFIG_RCU_NOCB_CPU_ALL)
+static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
+#elif defined(CONFIG_RCU_NOCB_CPU)
+bool rcu_is_nocb_cpu(int cpu);
+#else
+static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
+#endif
+
+
+/* Only for use by adaptive-ticks code. */
+#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
+bool rcu_sys_is_idle(void);
+void rcu_sysidle_force_exit(void);
+#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
+
+static inline bool rcu_sys_is_idle(void)
+{
+ return false;
+}
+
+static inline void rcu_sysidle_force_exit(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
+
+
+#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
new file mode 100644
index 000000000..937edaeb1
--- /dev/null
+++ b/include/linux/rcutiny.h
@@ -0,0 +1,195 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright IBM Corporation, 2008
+ *
+ * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ * Documentation/RCU
+ */
+#ifndef __LINUX_TINY_H
+#define __LINUX_TINY_H
+
+#include <linux/cache.h>
+
+static inline unsigned long get_state_synchronize_rcu(void)
+{
+ return 0;
+}
+
+static inline void cond_synchronize_rcu(unsigned long oldstate)
+{
+ might_sleep();
+}
+
+static inline void rcu_barrier_bh(void)
+{
+ wait_rcu_gp(call_rcu_bh);
+}
+
+static inline void rcu_barrier_sched(void)
+{
+ wait_rcu_gp(call_rcu_sched);
+}
+
+static inline void synchronize_rcu_expedited(void)
+{
+ synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */
+}
+
+static inline void rcu_barrier(void)
+{
+ rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */
+}
+
+static inline void synchronize_rcu_bh(void)
+{
+ synchronize_sched();
+}
+
+static inline void synchronize_rcu_bh_expedited(void)
+{
+ synchronize_sched();
+}
+
+static inline void synchronize_sched_expedited(void)
+{
+ synchronize_sched();
+}
+
+static inline void kfree_call_rcu(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu))
+{
+ call_rcu(head, func);
+}
+
+static inline void rcu_note_context_switch(void)
+{
+ rcu_sched_qs();
+}
+
+/*
+ * Take advantage of the fact that there is only one CPU, which
+ * allows us to ignore virtualization-based context switches.
+ */
+static inline void rcu_virt_note_context_switch(int cpu)
+{
+}
+
+/*
+ * Return the number of grace periods started.
+ */
+static inline unsigned long rcu_batches_started(void)
+{
+ return 0;
+}
+
+/*
+ * Return the number of bottom-half grace periods started.
+ */
+static inline unsigned long rcu_batches_started_bh(void)
+{
+ return 0;
+}
+
+/*
+ * Return the number of sched grace periods started.
+ */
+static inline unsigned long rcu_batches_started_sched(void)
+{
+ return 0;
+}
+
+/*
+ * Return the number of grace periods completed.
+ */
+static inline unsigned long rcu_batches_completed(void)
+{
+ return 0;
+}
+
+/*
+ * Return the number of bottom-half grace periods completed.
+ */
+static inline unsigned long rcu_batches_completed_bh(void)
+{
+ return 0;
+}
+
+/*
+ * Return the number of sched grace periods completed.
+ */
+static inline unsigned long rcu_batches_completed_sched(void)
+{
+ return 0;
+}
+
+static inline void rcu_force_quiescent_state(void)
+{
+}
+
+static inline void rcu_bh_force_quiescent_state(void)
+{
+}
+
+static inline void rcu_sched_force_quiescent_state(void)
+{
+}
+
+static inline void show_rcu_gp_kthreads(void)
+{
+}
+
+static inline void rcu_cpu_stall_reset(void)
+{
+}
+
+static inline void exit_rcu(void)
+{
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern int rcu_scheduler_active __read_mostly;
+void rcu_scheduler_starting(void);
+#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+static inline void rcu_scheduler_starting(void)
+{
+}
+#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
+
+static inline bool rcu_is_watching(void)
+{
+ return __rcu_is_watching();
+}
+
+#else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
+
+static inline bool rcu_is_watching(void)
+{
+ return true;
+}
+
+#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
+
+static inline void rcu_all_qs(void)
+{
+}
+
+#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
new file mode 100644
index 000000000..d2e583a6a
--- /dev/null
+++ b/include/linux/rcutree.h
@@ -0,0 +1,105 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion (tree-based version)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright IBM Corporation, 2008
+ *
+ * Author: Dipankar Sarma <dipankar@in.ibm.com>
+ * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm
+ *
+ * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
+ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ * Documentation/RCU
+ */
+
+#ifndef __LINUX_RCUTREE_H
+#define __LINUX_RCUTREE_H
+
+void rcu_note_context_switch(void);
+#ifndef CONFIG_RCU_NOCB_CPU_ALL
+int rcu_needs_cpu(unsigned long *delta_jiffies);
+#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
+void rcu_cpu_stall_reset(void);
+
+/*
+ * Note a virtualization-based context switch. This is simply a
+ * wrapper around rcu_note_context_switch(), which allows TINY_RCU
+ * to save a few bytes.
+ */
+static inline void rcu_virt_note_context_switch(int cpu)
+{
+ rcu_note_context_switch();
+}
+
+void synchronize_rcu_bh(void);
+void synchronize_sched_expedited(void);
+void synchronize_rcu_expedited(void);
+
+void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
+
+/**
+ * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
+ *
+ * Wait for an RCU-bh grace period to elapse, but use a "big hammer"
+ * approach to force the grace period to end quickly. This consumes
+ * significant time on all CPUs and is unfriendly to real-time workloads,
+ * so is thus not recommended for any sort of common-case code. In fact,
+ * if you are using synchronize_rcu_bh_expedited() in a loop, please
+ * restructure your code to batch your updates, and then use a single
+ * synchronize_rcu_bh() instead.
+ *
+ * Note that it is illegal to call this function while holding any lock
+ * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
+ * to call this function from a CPU-hotplug notifier. Failing to observe
+ * these restriction will result in deadlock.
+ */
+static inline void synchronize_rcu_bh_expedited(void)
+{
+ synchronize_sched_expedited();
+}
+
+void rcu_barrier(void);
+void rcu_barrier_bh(void);
+void rcu_barrier_sched(void);
+unsigned long get_state_synchronize_rcu(void);
+void cond_synchronize_rcu(unsigned long oldstate);
+
+extern unsigned long rcutorture_testseq;
+extern unsigned long rcutorture_vernum;
+unsigned long rcu_batches_started(void);
+unsigned long rcu_batches_started_bh(void);
+unsigned long rcu_batches_started_sched(void);
+unsigned long rcu_batches_completed(void);
+unsigned long rcu_batches_completed_bh(void);
+unsigned long rcu_batches_completed_sched(void);
+void show_rcu_gp_kthreads(void);
+
+void rcu_force_quiescent_state(void);
+void rcu_bh_force_quiescent_state(void);
+void rcu_sched_force_quiescent_state(void);
+
+void exit_rcu(void);
+
+void rcu_scheduler_starting(void);
+extern int rcu_scheduler_active __read_mostly;
+
+bool rcu_is_watching(void);
+
+void rcu_all_qs(void);
+
+#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
new file mode 100644
index 000000000..a7ff409f3
--- /dev/null
+++ b/include/linux/reboot.h
@@ -0,0 +1,83 @@
+#ifndef _LINUX_REBOOT_H
+#define _LINUX_REBOOT_H
+
+
+#include <linux/notifier.h>
+#include <uapi/linux/reboot.h>
+
+#define SYS_DOWN 0x0001 /* Notify of system down */
+#define SYS_RESTART SYS_DOWN
+#define SYS_HALT 0x0002 /* Notify of system halt */
+#define SYS_POWER_OFF 0x0003 /* Notify of system power off */
+
+enum reboot_mode {
+ REBOOT_COLD = 0,
+ REBOOT_WARM,
+ REBOOT_HARD,
+ REBOOT_SOFT,
+ REBOOT_GPIO,
+};
+extern enum reboot_mode reboot_mode;
+
+enum reboot_type {
+ BOOT_TRIPLE = 't',
+ BOOT_KBD = 'k',
+ BOOT_BIOS = 'b',
+ BOOT_ACPI = 'a',
+ BOOT_EFI = 'e',
+ BOOT_CF9_FORCE = 'p',
+ BOOT_CF9_SAFE = 'q',
+};
+extern enum reboot_type reboot_type;
+
+extern int reboot_default;
+extern int reboot_cpu;
+extern int reboot_force;
+
+
+extern int register_reboot_notifier(struct notifier_block *);
+extern int unregister_reboot_notifier(struct notifier_block *);
+
+extern int register_restart_handler(struct notifier_block *);
+extern int unregister_restart_handler(struct notifier_block *);
+extern void do_kernel_restart(char *cmd);
+
+/*
+ * Architecture-specific implementations of sys_reboot commands.
+ */
+
+extern void migrate_to_reboot_cpu(void);
+extern void machine_restart(char *cmd);
+extern void machine_halt(void);
+extern void machine_power_off(void);
+
+extern void machine_shutdown(void);
+struct pt_regs;
+extern void machine_crash_shutdown(struct pt_regs *);
+
+/*
+ * Architecture independent implemenations of sys_reboot commands.
+ */
+
+extern void kernel_restart_prepare(char *cmd);
+extern void kernel_restart(char *cmd);
+extern void kernel_halt(void);
+extern void kernel_power_off(void);
+
+extern int C_A_D; /* for sysctl */
+void ctrl_alt_del(void);
+
+#define POWEROFF_CMD_PATH_LEN 256
+extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN];
+
+extern void orderly_poweroff(bool force);
+extern void orderly_reboot(void);
+
+/*
+ * Emergency restart, callable from an interrupt handler.
+ */
+
+extern void emergency_restart(void);
+#include <asm/emergency-restart.h>
+
+#endif /* _LINUX_REBOOT_H */
diff --git a/include/linux/reciprocal_div.h b/include/linux/reciprocal_div.h
new file mode 100644
index 000000000..8c5a3fb6c
--- /dev/null
+++ b/include/linux/reciprocal_div.h
@@ -0,0 +1,35 @@
+#ifndef _LINUX_RECIPROCAL_DIV_H
+#define _LINUX_RECIPROCAL_DIV_H
+
+#include <linux/types.h>
+
+/*
+ * This algorithm is based on the paper "Division by Invariant
+ * Integers Using Multiplication" by Torbjörn Granlund and Peter
+ * L. Montgomery.
+ *
+ * The assembler implementation from Agner Fog, which this code is
+ * based on, can be found here:
+ * http://www.agner.org/optimize/asmlib.zip
+ *
+ * This optimization for A/B is helpful if the divisor B is mostly
+ * runtime invariant. The reciprocal of B is calculated in the
+ * slow-path with reciprocal_value(). The fast-path can then just use
+ * a much faster multiplication operation with a variable dividend A
+ * to calculate the division A/B.
+ */
+
+struct reciprocal_value {
+ u32 m;
+ u8 sh1, sh2;
+};
+
+struct reciprocal_value reciprocal_value(u32 d);
+
+static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R)
+{
+ u32 t = (u32)(((u64)a * R.m) >> 32);
+ return (t + ((a - t) >> R.sh1)) >> R.sh2;
+}
+
+#endif /* _LINUX_RECIPROCAL_DIV_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
new file mode 100644
index 000000000..116655d92
--- /dev/null
+++ b/include/linux/regmap.h
@@ -0,0 +1,748 @@
+#ifndef __LINUX_REGMAP_H
+#define __LINUX_REGMAP_H
+
+/*
+ * Register map access API
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/err.h>
+#include <linux/bug.h>
+
+struct module;
+struct device;
+struct i2c_client;
+struct irq_domain;
+struct spi_device;
+struct spmi_device;
+struct regmap;
+struct regmap_range_cfg;
+struct regmap_field;
+struct snd_ac97;
+
+/* An enum of all the supported cache types */
+enum regcache_type {
+ REGCACHE_NONE,
+ REGCACHE_RBTREE,
+ REGCACHE_COMPRESSED,
+ REGCACHE_FLAT,
+};
+
+/**
+ * Default value for a register. We use an array of structs rather
+ * than a simple array as many modern devices have very sparse
+ * register maps.
+ *
+ * @reg: Register address.
+ * @def: Register default value.
+ */
+struct reg_default {
+ unsigned int reg;
+ unsigned int def;
+};
+
+#ifdef CONFIG_REGMAP
+
+enum regmap_endian {
+ /* Unspecified -> 0 -> Backwards compatible default */
+ REGMAP_ENDIAN_DEFAULT = 0,
+ REGMAP_ENDIAN_BIG,
+ REGMAP_ENDIAN_LITTLE,
+ REGMAP_ENDIAN_NATIVE,
+};
+
+/**
+ * A register range, used for access related checks
+ * (readable/writeable/volatile/precious checks)
+ *
+ * @range_min: address of first register
+ * @range_max: address of last register
+ */
+struct regmap_range {
+ unsigned int range_min;
+ unsigned int range_max;
+};
+
+#define regmap_reg_range(low, high) { .range_min = low, .range_max = high, }
+
+/*
+ * A table of ranges including some yes ranges and some no ranges.
+ * If a register belongs to a no_range, the corresponding check function
+ * will return false. If a register belongs to a yes range, the corresponding
+ * check function will return true. "no_ranges" are searched first.
+ *
+ * @yes_ranges : pointer to an array of regmap ranges used as "yes ranges"
+ * @n_yes_ranges: size of the above array
+ * @no_ranges: pointer to an array of regmap ranges used as "no ranges"
+ * @n_no_ranges: size of the above array
+ */
+struct regmap_access_table {
+ const struct regmap_range *yes_ranges;
+ unsigned int n_yes_ranges;
+ const struct regmap_range *no_ranges;
+ unsigned int n_no_ranges;
+};
+
+typedef void (*regmap_lock)(void *);
+typedef void (*regmap_unlock)(void *);
+
+/**
+ * Configuration for the register map of a device.
+ *
+ * @name: Optional name of the regmap. Useful when a device has multiple
+ * register regions.
+ *
+ * @reg_bits: Number of bits in a register address, mandatory.
+ * @reg_stride: The register address stride. Valid register addresses are a
+ * multiple of this value. If set to 0, a value of 1 will be
+ * used.
+ * @pad_bits: Number of bits of padding between register and value.
+ * @val_bits: Number of bits in a register value, mandatory.
+ *
+ * @writeable_reg: Optional callback returning true if the register
+ * can be written to. If this field is NULL but wr_table
+ * (see below) is not, the check is performed on such table
+ * (a register is writeable if it belongs to one of the ranges
+ * specified by wr_table).
+ * @readable_reg: Optional callback returning true if the register
+ * can be read from. If this field is NULL but rd_table
+ * (see below) is not, the check is performed on such table
+ * (a register is readable if it belongs to one of the ranges
+ * specified by rd_table).
+ * @volatile_reg: Optional callback returning true if the register
+ * value can't be cached. If this field is NULL but
+ * volatile_table (see below) is not, the check is performed on
+ * such table (a register is volatile if it belongs to one of
+ * the ranges specified by volatile_table).
+ * @precious_reg: Optional callback returning true if the register
+ * should not be read outside of a call from the driver
+ * (e.g., a clear on read interrupt status register). If this
+ * field is NULL but precious_table (see below) is not, the
+ * check is performed on such table (a register is precious if
+ * it belongs to one of the ranges specified by precious_table).
+ * @lock: Optional lock callback (overrides regmap's default lock
+ * function, based on spinlock or mutex).
+ * @unlock: As above for unlocking.
+ * @lock_arg: this field is passed as the only argument of lock/unlock
+ * functions (ignored in case regular lock/unlock functions
+ * are not overridden).
+ * @reg_read: Optional callback that if filled will be used to perform
+ * all the reads from the registers. Should only be provided for
+ * devices whose read operation cannot be represented as a simple
+ * read operation on a bus such as SPI, I2C, etc. Most of the
+ * devices do not need this.
+ * @reg_write: Same as above for writing.
+ * @fast_io: Register IO is fast. Use a spinlock instead of a mutex
+ * to perform locking. This field is ignored if custom lock/unlock
+ * functions are used (see fields lock/unlock of struct regmap_config).
+ * This field is a duplicate of a similar file in
+ * 'struct regmap_bus' and serves exact same purpose.
+ * Use it only for "no-bus" cases.
+ * @max_register: Optional, specifies the maximum valid register index.
+ * @wr_table: Optional, points to a struct regmap_access_table specifying
+ * valid ranges for write access.
+ * @rd_table: As above, for read access.
+ * @volatile_table: As above, for volatile registers.
+ * @precious_table: As above, for precious registers.
+ * @reg_defaults: Power on reset values for registers (for use with
+ * register cache support).
+ * @num_reg_defaults: Number of elements in reg_defaults.
+ *
+ * @read_flag_mask: Mask to be set in the top byte of the register when doing
+ * a read.
+ * @write_flag_mask: Mask to be set in the top byte of the register when doing
+ * a write. If both read_flag_mask and write_flag_mask are
+ * empty the regmap_bus default masks are used.
+ * @use_single_rw: If set, converts the bulk read and write operations into
+ * a series of single read and write operations. This is useful
+ * for device that does not support bulk read and write.
+ * @can_multi_write: If set, the device supports the multi write mode of bulk
+ * write operations, if clear multi write requests will be
+ * split into individual write operations
+ *
+ * @cache_type: The actual cache type.
+ * @reg_defaults_raw: Power on reset values for registers (for use with
+ * register cache support).
+ * @num_reg_defaults_raw: Number of elements in reg_defaults_raw.
+ * @reg_format_endian: Endianness for formatted register addresses. If this is
+ * DEFAULT, the @reg_format_endian_default value from the
+ * regmap bus is used.
+ * @val_format_endian: Endianness for formatted register values. If this is
+ * DEFAULT, the @reg_format_endian_default value from the
+ * regmap bus is used.
+ *
+ * @ranges: Array of configuration entries for virtual address ranges.
+ * @num_ranges: Number of range configuration entries.
+ */
+struct regmap_config {
+ const char *name;
+
+ int reg_bits;
+ int reg_stride;
+ int pad_bits;
+ int val_bits;
+
+ bool (*writeable_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_reg)(struct device *dev, unsigned int reg);
+ bool (*volatile_reg)(struct device *dev, unsigned int reg);
+ bool (*precious_reg)(struct device *dev, unsigned int reg);
+ regmap_lock lock;
+ regmap_unlock unlock;
+ void *lock_arg;
+
+ int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
+ int (*reg_write)(void *context, unsigned int reg, unsigned int val);
+
+ bool fast_io;
+
+ unsigned int max_register;
+ const struct regmap_access_table *wr_table;
+ const struct regmap_access_table *rd_table;
+ const struct regmap_access_table *volatile_table;
+ const struct regmap_access_table *precious_table;
+ const struct reg_default *reg_defaults;
+ unsigned int num_reg_defaults;
+ enum regcache_type cache_type;
+ const void *reg_defaults_raw;
+ unsigned int num_reg_defaults_raw;
+
+ u8 read_flag_mask;
+ u8 write_flag_mask;
+
+ bool use_single_rw;
+ bool can_multi_write;
+
+ enum regmap_endian reg_format_endian;
+ enum regmap_endian val_format_endian;
+
+ const struct regmap_range_cfg *ranges;
+ unsigned int num_ranges;
+};
+
+/**
+ * Configuration for indirectly accessed or paged registers.
+ * Registers, mapped to this virtual range, are accessed in two steps:
+ * 1. page selector register update;
+ * 2. access through data window registers.
+ *
+ * @name: Descriptive name for diagnostics
+ *
+ * @range_min: Address of the lowest register address in virtual range.
+ * @range_max: Address of the highest register in virtual range.
+ *
+ * @page_sel_reg: Register with selector field.
+ * @page_sel_mask: Bit shift for selector value.
+ * @page_sel_shift: Bit mask for selector value.
+ *
+ * @window_start: Address of first (lowest) register in data window.
+ * @window_len: Number of registers in data window.
+ */
+struct regmap_range_cfg {
+ const char *name;
+
+ /* Registers of virtual address range */
+ unsigned int range_min;
+ unsigned int range_max;
+
+ /* Page selector for indirect addressing */
+ unsigned int selector_reg;
+ unsigned int selector_mask;
+ int selector_shift;
+
+ /* Data window (per each page) */
+ unsigned int window_start;
+ unsigned int window_len;
+};
+
+struct regmap_async;
+
+typedef int (*regmap_hw_write)(void *context, const void *data,
+ size_t count);
+typedef int (*regmap_hw_gather_write)(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len);
+typedef int (*regmap_hw_async_write)(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len,
+ struct regmap_async *async);
+typedef int (*regmap_hw_read)(void *context,
+ const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size);
+typedef int (*regmap_hw_reg_read)(void *context, unsigned int reg,
+ unsigned int *val);
+typedef int (*regmap_hw_reg_write)(void *context, unsigned int reg,
+ unsigned int val);
+typedef struct regmap_async *(*regmap_hw_async_alloc)(void);
+typedef void (*regmap_hw_free_context)(void *context);
+
+/**
+ * Description of a hardware bus for the register map infrastructure.
+ *
+ * @fast_io: Register IO is fast. Use a spinlock instead of a mutex
+ * to perform locking. This field is ignored if custom lock/unlock
+ * functions are used (see fields lock/unlock of
+ * struct regmap_config).
+ * @write: Write operation.
+ * @gather_write: Write operation with split register/value, return -ENOTSUPP
+ * if not implemented on a given device.
+ * @async_write: Write operation which completes asynchronously, optional and
+ * must serialise with respect to non-async I/O.
+ * @read: Read operation. Data is returned in the buffer used to transmit
+ * data.
+ * @async_alloc: Allocate a regmap_async() structure.
+ * @read_flag_mask: Mask to be set in the top byte of the register when doing
+ * a read.
+ * @reg_format_endian_default: Default endianness for formatted register
+ * addresses. Used when the regmap_config specifies DEFAULT. If this is
+ * DEFAULT, BIG is assumed.
+ * @val_format_endian_default: Default endianness for formatted register
+ * values. Used when the regmap_config specifies DEFAULT. If this is
+ * DEFAULT, BIG is assumed.
+ * @async_size: Size of struct used for async work.
+ */
+struct regmap_bus {
+ bool fast_io;
+ regmap_hw_write write;
+ regmap_hw_gather_write gather_write;
+ regmap_hw_async_write async_write;
+ regmap_hw_reg_write reg_write;
+ regmap_hw_read read;
+ regmap_hw_reg_read reg_read;
+ regmap_hw_free_context free_context;
+ regmap_hw_async_alloc async_alloc;
+ u8 read_flag_mask;
+ enum regmap_endian reg_format_endian_default;
+ enum regmap_endian val_format_endian_default;
+};
+
+struct regmap *regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ void *bus_context,
+ const struct regmap_config *config);
+int regmap_attach_dev(struct device *dev, struct regmap *map,
+ const struct regmap_config *config);
+struct regmap *regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config);
+struct regmap *regmap_init_spi(struct spi_device *dev,
+ const struct regmap_config *config);
+struct regmap *regmap_init_spmi_base(struct spmi_device *dev,
+ const struct regmap_config *config);
+struct regmap *regmap_init_spmi_ext(struct spmi_device *dev,
+ const struct regmap_config *config);
+struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config);
+struct regmap *regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config);
+
+struct regmap *devm_regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ void *bus_context,
+ const struct regmap_config *config);
+struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config);
+struct regmap *devm_regmap_init_spi(struct spi_device *dev,
+ const struct regmap_config *config);
+struct regmap *devm_regmap_init_spmi_base(struct spmi_device *dev,
+ const struct regmap_config *config);
+struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *dev,
+ const struct regmap_config *config);
+struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
+ void __iomem *regs,
+ const struct regmap_config *config);
+struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97,
+ const struct regmap_config *config);
+
+bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
+
+/**
+ * regmap_init_mmio(): Initialise register map
+ *
+ * @dev: Device that will be interacted with
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+static inline struct regmap *regmap_init_mmio(struct device *dev,
+ void __iomem *regs,
+ const struct regmap_config *config)
+{
+ return regmap_init_mmio_clk(dev, NULL, regs, config);
+}
+
+/**
+ * devm_regmap_init_mmio(): Initialise managed register map
+ *
+ * @dev: Device that will be interacted with
+ * @regs: Pointer to memory-mapped IO region
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+static inline struct regmap *devm_regmap_init_mmio(struct device *dev,
+ void __iomem *regs,
+ const struct regmap_config *config)
+{
+ return devm_regmap_init_mmio_clk(dev, NULL, regs, config);
+}
+
+void regmap_exit(struct regmap *map);
+int regmap_reinit_cache(struct regmap *map,
+ const struct regmap_config *config);
+struct regmap *dev_get_regmap(struct device *dev, const char *name);
+struct device *regmap_get_device(struct regmap *map);
+int regmap_write(struct regmap *map, unsigned int reg, unsigned int val);
+int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val);
+int regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len);
+int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
+ size_t val_count);
+int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
+ int num_regs);
+int regmap_multi_reg_write_bypassed(struct regmap *map,
+ const struct reg_default *regs,
+ int num_regs);
+int regmap_raw_write_async(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len);
+int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
+int regmap_raw_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len);
+int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
+ size_t val_count);
+int regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val);
+int regmap_update_bits_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val);
+int regmap_update_bits_check(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change);
+int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change);
+int regmap_get_val_bytes(struct regmap *map);
+int regmap_async_complete(struct regmap *map);
+bool regmap_can_raw_write(struct regmap *map);
+
+int regcache_sync(struct regmap *map);
+int regcache_sync_region(struct regmap *map, unsigned int min,
+ unsigned int max);
+int regcache_drop_region(struct regmap *map, unsigned int min,
+ unsigned int max);
+void regcache_cache_only(struct regmap *map, bool enable);
+void regcache_cache_bypass(struct regmap *map, bool enable);
+void regcache_mark_dirty(struct regmap *map);
+
+bool regmap_check_range_table(struct regmap *map, unsigned int reg,
+ const struct regmap_access_table *table);
+
+int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
+ int num_regs);
+int regmap_parse_val(struct regmap *map, const void *buf,
+ unsigned int *val);
+
+static inline bool regmap_reg_in_range(unsigned int reg,
+ const struct regmap_range *range)
+{
+ return reg >= range->range_min && reg <= range->range_max;
+}
+
+bool regmap_reg_in_ranges(unsigned int reg,
+ const struct regmap_range *ranges,
+ unsigned int nranges);
+
+/**
+ * Description of an register field
+ *
+ * @reg: Offset of the register within the regmap bank
+ * @lsb: lsb of the register field.
+ * @msb: msb of the register field.
+ * @id_size: port size if it has some ports
+ * @id_offset: address offset for each ports
+ */
+struct reg_field {
+ unsigned int reg;
+ unsigned int lsb;
+ unsigned int msb;
+ unsigned int id_size;
+ unsigned int id_offset;
+};
+
+#define REG_FIELD(_reg, _lsb, _msb) { \
+ .reg = _reg, \
+ .lsb = _lsb, \
+ .msb = _msb, \
+ }
+
+struct regmap_field *regmap_field_alloc(struct regmap *regmap,
+ struct reg_field reg_field);
+void regmap_field_free(struct regmap_field *field);
+
+struct regmap_field *devm_regmap_field_alloc(struct device *dev,
+ struct regmap *regmap, struct reg_field reg_field);
+void devm_regmap_field_free(struct device *dev, struct regmap_field *field);
+
+int regmap_field_read(struct regmap_field *field, unsigned int *val);
+int regmap_field_write(struct regmap_field *field, unsigned int val);
+int regmap_field_update_bits(struct regmap_field *field,
+ unsigned int mask, unsigned int val);
+
+int regmap_fields_write(struct regmap_field *field, unsigned int id,
+ unsigned int val);
+int regmap_fields_read(struct regmap_field *field, unsigned int id,
+ unsigned int *val);
+int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
+ unsigned int mask, unsigned int val);
+
+/**
+ * Description of an IRQ for the generic regmap irq_chip.
+ *
+ * @reg_offset: Offset of the status/mask register within the bank
+ * @mask: Mask used to flag/control the register.
+ */
+struct regmap_irq {
+ unsigned int reg_offset;
+ unsigned int mask;
+};
+
+/**
+ * Description of a generic regmap irq_chip. This is not intended to
+ * handle every possible interrupt controller, but it should handle a
+ * substantial proportion of those that are found in the wild.
+ *
+ * @name: Descriptive name for IRQ controller.
+ *
+ * @status_base: Base status register address.
+ * @mask_base: Base mask register address.
+ * @ack_base: Base ack address. If zero then the chip is clear on read.
+ * Using zero value is possible with @use_ack bit.
+ * @wake_base: Base address for wake enables. If zero unsupported.
+ * @irq_reg_stride: Stride to use for chips where registers are not contiguous.
+ * @init_ack_masked: Ack all masked interrupts once during initalization.
+ * @mask_invert: Inverted mask register: cleared bits are masked out.
+ * @use_ack: Use @ack register even if it is zero.
+ * @wake_invert: Inverted wake register: cleared bits are wake enabled.
+ * @runtime_pm: Hold a runtime PM lock on the device when accessing it.
+ *
+ * @num_regs: Number of registers in each control bank.
+ * @irqs: Descriptors for individual IRQs. Interrupt numbers are
+ * assigned based on the index in the array of the interrupt.
+ * @num_irqs: Number of descriptors.
+ */
+struct regmap_irq_chip {
+ const char *name;
+
+ unsigned int status_base;
+ unsigned int mask_base;
+ unsigned int ack_base;
+ unsigned int wake_base;
+ unsigned int irq_reg_stride;
+ bool init_ack_masked:1;
+ bool mask_invert:1;
+ bool use_ack:1;
+ bool wake_invert:1;
+ bool runtime_pm:1;
+
+ int num_regs;
+
+ const struct regmap_irq *irqs;
+ int num_irqs;
+};
+
+struct regmap_irq_chip_data;
+
+int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
+ int irq_base, const struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data);
+void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data);
+int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data);
+int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq);
+struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data);
+
+#else
+
+/*
+ * These stubs should only ever be called by generic code which has
+ * regmap based facilities, if they ever get called at runtime
+ * something is going wrong and something probably needs to select
+ * REGMAP.
+ */
+
+static inline int regmap_write(struct regmap *map, unsigned int reg,
+ unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_write_async(struct regmap *map, unsigned int reg,
+ unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_raw_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_raw_write_async(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_bulk_write(struct regmap *map, unsigned int reg,
+ const void *val, size_t val_count)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_read(struct regmap *map, unsigned int reg,
+ unsigned int *val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_raw_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_bulk_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_count)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_update_bits_async(struct regmap *map,
+ unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_update_bits_check(struct regmap *map,
+ unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_update_bits_check_async(struct regmap *map,
+ unsigned int reg,
+ unsigned int mask,
+ unsigned int val,
+ bool *change)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_get_val_bytes(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regcache_sync(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regcache_sync_region(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regcache_drop_region(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline void regcache_cache_only(struct regmap *map, bool enable)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+}
+
+static inline void regcache_cache_bypass(struct regmap *map, bool enable)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+}
+
+static inline void regcache_mark_dirty(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+}
+
+static inline void regmap_async_complete(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+}
+
+static inline int regmap_register_patch(struct regmap *map,
+ const struct reg_default *regs,
+ int num_regs)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline int regmap_parse_val(struct regmap *map, const void *buf,
+ unsigned int *val)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
+static inline struct regmap *dev_get_regmap(struct device *dev,
+ const char *name)
+{
+ return NULL;
+}
+
+static inline struct device *regmap_get_device(struct regmap *map)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return NULL;
+}
+
+#endif
+
+#endif
diff --git a/include/linux/regset.h b/include/linux/regset.h
new file mode 100644
index 000000000..8e0c9febf
--- /dev/null
+++ b/include/linux/regset.h
@@ -0,0 +1,375 @@
+/*
+ * User-mode machine state access
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ *
+ * Red Hat Author: Roland McGrath.
+ */
+
+#ifndef _LINUX_REGSET_H
+#define _LINUX_REGSET_H 1
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/uaccess.h>
+struct task_struct;
+struct user_regset;
+
+
+/**
+ * user_regset_active_fn - type of @active function in &struct user_regset
+ * @target: thread being examined
+ * @regset: regset being examined
+ *
+ * Return -%ENODEV if not available on the hardware found.
+ * Return %0 if no interesting state in this thread.
+ * Return >%0 number of @size units of interesting state.
+ * Any get call fetching state beyond that number will
+ * see the default initialization state for this data,
+ * so a caller that knows what the default state is need
+ * not copy it all out.
+ * This call is optional; the pointer is %NULL if there
+ * is no inexpensive check to yield a value < @n.
+ */
+typedef int user_regset_active_fn(struct task_struct *target,
+ const struct user_regset *regset);
+
+/**
+ * user_regset_get_fn - type of @get function in &struct user_regset
+ * @target: thread being examined
+ * @regset: regset being examined
+ * @pos: offset into the regset data to access, in bytes
+ * @count: amount of data to copy, in bytes
+ * @kbuf: if not %NULL, a kernel-space pointer to copy into
+ * @ubuf: if @kbuf is %NULL, a user-space pointer to copy into
+ *
+ * Fetch register values. Return %0 on success; -%EIO or -%ENODEV
+ * are usual failure returns. The @pos and @count values are in
+ * bytes, but must be properly aligned. If @kbuf is non-null, that
+ * buffer is used and @ubuf is ignored. If @kbuf is %NULL, then
+ * ubuf gives a userland pointer to access directly, and an -%EFAULT
+ * return value is possible.
+ */
+typedef int user_regset_get_fn(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf);
+
+/**
+ * user_regset_set_fn - type of @set function in &struct user_regset
+ * @target: thread being examined
+ * @regset: regset being examined
+ * @pos: offset into the regset data to access, in bytes
+ * @count: amount of data to copy, in bytes
+ * @kbuf: if not %NULL, a kernel-space pointer to copy from
+ * @ubuf: if @kbuf is %NULL, a user-space pointer to copy from
+ *
+ * Store register values. Return %0 on success; -%EIO or -%ENODEV
+ * are usual failure returns. The @pos and @count values are in
+ * bytes, but must be properly aligned. If @kbuf is non-null, that
+ * buffer is used and @ubuf is ignored. If @kbuf is %NULL, then
+ * ubuf gives a userland pointer to access directly, and an -%EFAULT
+ * return value is possible.
+ */
+typedef int user_regset_set_fn(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+
+/**
+ * user_regset_writeback_fn - type of @writeback function in &struct user_regset
+ * @target: thread being examined
+ * @regset: regset being examined
+ * @immediate: zero if writeback at completion of next context switch is OK
+ *
+ * This call is optional; usually the pointer is %NULL. When
+ * provided, there is some user memory associated with this regset's
+ * hardware, such as memory backing cached register data on register
+ * window machines; the regset's data controls what user memory is
+ * used (e.g. via the stack pointer value).
+ *
+ * Write register data back to user memory. If the @immediate flag
+ * is nonzero, it must be written to the user memory so uaccess or
+ * access_process_vm() can see it when this call returns; if zero,
+ * then it must be written back by the time the task completes a
+ * context switch (as synchronized with wait_task_inactive()).
+ * Return %0 on success or if there was nothing to do, -%EFAULT for
+ * a memory problem (bad stack pointer or whatever), or -%EIO for a
+ * hardware problem.
+ */
+typedef int user_regset_writeback_fn(struct task_struct *target,
+ const struct user_regset *regset,
+ int immediate);
+
+/**
+ * struct user_regset - accessible thread CPU state
+ * @n: Number of slots (registers).
+ * @size: Size in bytes of a slot (register).
+ * @align: Required alignment, in bytes.
+ * @bias: Bias from natural indexing.
+ * @core_note_type: ELF note @n_type value used in core dumps.
+ * @get: Function to fetch values.
+ * @set: Function to store values.
+ * @active: Function to report if regset is active, or %NULL.
+ * @writeback: Function to write data back to user memory, or %NULL.
+ *
+ * This data structure describes a machine resource we call a register set.
+ * This is part of the state of an individual thread, not necessarily
+ * actual CPU registers per se. A register set consists of a number of
+ * similar slots, given by @n. Each slot is @size bytes, and aligned to
+ * @align bytes (which is at least @size).
+ *
+ * These functions must be called only on the current thread or on a
+ * thread that is in %TASK_STOPPED or %TASK_TRACED state, that we are
+ * guaranteed will not be woken up and return to user mode, and that we
+ * have called wait_task_inactive() on. (The target thread always might
+ * wake up for SIGKILL while these functions are working, in which case
+ * that thread's user_regset state might be scrambled.)
+ *
+ * The @pos argument must be aligned according to @align; the @count
+ * argument must be a multiple of @size. These functions are not
+ * responsible for checking for invalid arguments.
+ *
+ * When there is a natural value to use as an index, @bias gives the
+ * difference between the natural index and the slot index for the
+ * register set. For example, x86 GDT segment descriptors form a regset;
+ * the segment selector produces a natural index, but only a subset of
+ * that index space is available as a regset (the TLS slots); subtracting
+ * @bias from a segment selector index value computes the regset slot.
+ *
+ * If nonzero, @core_note_type gives the n_type field (NT_* value)
+ * of the core file note in which this regset's data appears.
+ * NT_PRSTATUS is a special case in that the regset data starts at
+ * offsetof(struct elf_prstatus, pr_reg) into the note data; that is
+ * part of the per-machine ELF formats userland knows about. In
+ * other cases, the core file note contains exactly the whole regset
+ * (@n * @size) and nothing else. The core file note is normally
+ * omitted when there is an @active function and it returns zero.
+ */
+struct user_regset {
+ user_regset_get_fn *get;
+ user_regset_set_fn *set;
+ user_regset_active_fn *active;
+ user_regset_writeback_fn *writeback;
+ unsigned int n;
+ unsigned int size;
+ unsigned int align;
+ unsigned int bias;
+ unsigned int core_note_type;
+};
+
+/**
+ * struct user_regset_view - available regsets
+ * @name: Identifier, e.g. UTS_MACHINE string.
+ * @regsets: Array of @n regsets available in this view.
+ * @n: Number of elements in @regsets.
+ * @e_machine: ELF header @e_machine %EM_* value written in core dumps.
+ * @e_flags: ELF header @e_flags value written in core dumps.
+ * @ei_osabi: ELF header @e_ident[%EI_OSABI] value written in core dumps.
+ *
+ * A regset view is a collection of regsets (&struct user_regset,
+ * above). This describes all the state of a thread that can be seen
+ * from a given architecture/ABI environment. More than one view might
+ * refer to the same &struct user_regset, or more than one regset
+ * might refer to the same machine-specific state in the thread. For
+ * example, a 32-bit thread's state could be examined from the 32-bit
+ * view or from the 64-bit view. Either method reaches the same thread
+ * register state, doing appropriate widening or truncation.
+ */
+struct user_regset_view {
+ const char *name;
+ const struct user_regset *regsets;
+ unsigned int n;
+ u32 e_flags;
+ u16 e_machine;
+ u8 ei_osabi;
+};
+
+/*
+ * This is documented here rather than at the definition sites because its
+ * implementation is machine-dependent but its interface is universal.
+ */
+/**
+ * task_user_regset_view - Return the process's native regset view.
+ * @tsk: a thread of the process in question
+ *
+ * Return the &struct user_regset_view that is native for the given process.
+ * For example, what it would access when it called ptrace().
+ * Throughout the life of the process, this only changes at exec.
+ */
+const struct user_regset_view *task_user_regset_view(struct task_struct *tsk);
+
+
+/*
+ * These are helpers for writing regset get/set functions in arch code.
+ * Because @start_pos and @end_pos are always compile-time constants,
+ * these are inlined into very little code though they look large.
+ *
+ * Use one or more calls sequentially for each chunk of regset data stored
+ * contiguously in memory. Call with constants for @start_pos and @end_pos,
+ * giving the range of byte positions in the regset that data corresponds
+ * to; @end_pos can be -1 if this chunk is at the end of the regset layout.
+ * Each call updates the arguments to point past its chunk.
+ */
+
+static inline int user_regset_copyout(unsigned int *pos, unsigned int *count,
+ void **kbuf,
+ void __user **ubuf, const void *data,
+ const int start_pos, const int end_pos)
+{
+ if (*count == 0)
+ return 0;
+ BUG_ON(*pos < start_pos);
+ if (end_pos < 0 || *pos < end_pos) {
+ unsigned int copy = (end_pos < 0 ? *count
+ : min(*count, end_pos - *pos));
+ data += *pos - start_pos;
+ if (*kbuf) {
+ memcpy(*kbuf, data, copy);
+ *kbuf += copy;
+ } else if (__copy_to_user(*ubuf, data, copy))
+ return -EFAULT;
+ else
+ *ubuf += copy;
+ *pos += copy;
+ *count -= copy;
+ }
+ return 0;
+}
+
+static inline int user_regset_copyin(unsigned int *pos, unsigned int *count,
+ const void **kbuf,
+ const void __user **ubuf, void *data,
+ const int start_pos, const int end_pos)
+{
+ if (*count == 0)
+ return 0;
+ BUG_ON(*pos < start_pos);
+ if (end_pos < 0 || *pos < end_pos) {
+ unsigned int copy = (end_pos < 0 ? *count
+ : min(*count, end_pos - *pos));
+ data += *pos - start_pos;
+ if (*kbuf) {
+ memcpy(data, *kbuf, copy);
+ *kbuf += copy;
+ } else if (__copy_from_user(data, *ubuf, copy))
+ return -EFAULT;
+ else
+ *ubuf += copy;
+ *pos += copy;
+ *count -= copy;
+ }
+ return 0;
+}
+
+/*
+ * These two parallel the two above, but for portions of a regset layout
+ * that always read as all-zero or for which writes are ignored.
+ */
+static inline int user_regset_copyout_zero(unsigned int *pos,
+ unsigned int *count,
+ void **kbuf, void __user **ubuf,
+ const int start_pos,
+ const int end_pos)
+{
+ if (*count == 0)
+ return 0;
+ BUG_ON(*pos < start_pos);
+ if (end_pos < 0 || *pos < end_pos) {
+ unsigned int copy = (end_pos < 0 ? *count
+ : min(*count, end_pos - *pos));
+ if (*kbuf) {
+ memset(*kbuf, 0, copy);
+ *kbuf += copy;
+ } else if (__clear_user(*ubuf, copy))
+ return -EFAULT;
+ else
+ *ubuf += copy;
+ *pos += copy;
+ *count -= copy;
+ }
+ return 0;
+}
+
+static inline int user_regset_copyin_ignore(unsigned int *pos,
+ unsigned int *count,
+ const void **kbuf,
+ const void __user **ubuf,
+ const int start_pos,
+ const int end_pos)
+{
+ if (*count == 0)
+ return 0;
+ BUG_ON(*pos < start_pos);
+ if (end_pos < 0 || *pos < end_pos) {
+ unsigned int copy = (end_pos < 0 ? *count
+ : min(*count, end_pos - *pos));
+ if (*kbuf)
+ *kbuf += copy;
+ else
+ *ubuf += copy;
+ *pos += copy;
+ *count -= copy;
+ }
+ return 0;
+}
+
+/**
+ * copy_regset_to_user - fetch a thread's user_regset data into user memory
+ * @target: thread to be examined
+ * @view: &struct user_regset_view describing user thread machine state
+ * @setno: index in @view->regsets
+ * @offset: offset into the regset data, in bytes
+ * @size: amount of data to copy, in bytes
+ * @data: user-mode pointer to copy into
+ */
+static inline int copy_regset_to_user(struct task_struct *target,
+ const struct user_regset_view *view,
+ unsigned int setno,
+ unsigned int offset, unsigned int size,
+ void __user *data)
+{
+ const struct user_regset *regset = &view->regsets[setno];
+
+ if (!regset->get)
+ return -EOPNOTSUPP;
+
+ if (!access_ok(VERIFY_WRITE, data, size))
+ return -EFAULT;
+
+ return regset->get(target, regset, offset, size, NULL, data);
+}
+
+/**
+ * copy_regset_from_user - store into thread's user_regset data from user memory
+ * @target: thread to be examined
+ * @view: &struct user_regset_view describing user thread machine state
+ * @setno: index in @view->regsets
+ * @offset: offset into the regset data, in bytes
+ * @size: amount of data to copy, in bytes
+ * @data: user-mode pointer to copy from
+ */
+static inline int copy_regset_from_user(struct task_struct *target,
+ const struct user_regset_view *view,
+ unsigned int setno,
+ unsigned int offset, unsigned int size,
+ const void __user *data)
+{
+ const struct user_regset *regset = &view->regsets[setno];
+
+ if (!regset->set)
+ return -EOPNOTSUPP;
+
+ if (!access_ok(VERIFY_READ, data, size))
+ return -EFAULT;
+
+ return regset->set(target, regset, offset, size, NULL, data);
+}
+
+
+#endif /* <linux/regset.h> */
diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h
new file mode 100644
index 000000000..d8ecefaf6
--- /dev/null
+++ b/include/linux/regulator/ab8500.h
@@ -0,0 +1,325 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ * Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
+ * Daniel Willerud <daniel.willerud@stericsson.com> for ST-Ericsson
+ */
+
+#ifndef __LINUX_MFD_AB8500_REGULATOR_H
+#define __LINUX_MFD_AB8500_REGULATOR_H
+
+#include <linux/platform_device.h>
+
+/* AB8500 regulators */
+enum ab8500_regulator_id {
+ AB8500_LDO_AUX1,
+ AB8500_LDO_AUX2,
+ AB8500_LDO_AUX3,
+ AB8500_LDO_INTCORE,
+ AB8500_LDO_TVOUT,
+ AB8500_LDO_AUDIO,
+ AB8500_LDO_ANAMIC1,
+ AB8500_LDO_ANAMIC2,
+ AB8500_LDO_DMIC,
+ AB8500_LDO_ANA,
+ AB8500_NUM_REGULATORS,
+};
+
+/* AB8505 regulators */
+enum ab8505_regulator_id {
+ AB8505_LDO_AUX1,
+ AB8505_LDO_AUX2,
+ AB8505_LDO_AUX3,
+ AB8505_LDO_AUX4,
+ AB8505_LDO_AUX5,
+ AB8505_LDO_AUX6,
+ AB8505_LDO_INTCORE,
+ AB8505_LDO_ADC,
+ AB8505_LDO_USB,
+ AB8505_LDO_AUDIO,
+ AB8505_LDO_ANAMIC1,
+ AB8505_LDO_ANAMIC2,
+ AB8505_LDO_AUX8,
+ AB8505_LDO_ANA,
+ AB8505_SYSCLKREQ_2,
+ AB8505_SYSCLKREQ_4,
+ AB8505_NUM_REGULATORS,
+};
+
+/* AB9540 regulators */
+enum ab9540_regulator_id {
+ AB9540_LDO_AUX1,
+ AB9540_LDO_AUX2,
+ AB9540_LDO_AUX3,
+ AB9540_LDO_AUX4,
+ AB9540_LDO_INTCORE,
+ AB9540_LDO_TVOUT,
+ AB9540_LDO_USB,
+ AB9540_LDO_AUDIO,
+ AB9540_LDO_ANAMIC1,
+ AB9540_LDO_ANAMIC2,
+ AB9540_LDO_DMIC,
+ AB9540_LDO_ANA,
+ AB9540_SYSCLKREQ_2,
+ AB9540_SYSCLKREQ_4,
+ AB9540_NUM_REGULATORS,
+};
+
+/* AB8540 regulators */
+enum ab8540_regulator_id {
+ AB8540_LDO_AUX1,
+ AB8540_LDO_AUX2,
+ AB8540_LDO_AUX3,
+ AB8540_LDO_AUX4,
+ AB8540_LDO_AUX5,
+ AB8540_LDO_AUX6,
+ AB8540_LDO_INTCORE,
+ AB8540_LDO_TVOUT,
+ AB8540_LDO_AUDIO,
+ AB8540_LDO_ANAMIC1,
+ AB8540_LDO_ANAMIC2,
+ AB8540_LDO_DMIC,
+ AB8540_LDO_ANA,
+ AB8540_LDO_SDIO,
+ AB8540_SYSCLKREQ_2,
+ AB8540_SYSCLKREQ_4,
+ AB8540_NUM_REGULATORS,
+};
+
+/* AB8500, AB8505, and AB9540 register initialization */
+struct ab8500_regulator_reg_init {
+ int id;
+ u8 mask;
+ u8 value;
+};
+
+#define INIT_REGULATOR_REGISTER(_id, _mask, _value) \
+ { \
+ .id = _id, \
+ .mask = _mask, \
+ .value = _value, \
+ }
+
+/* AB8500 registers */
+enum ab8500_regulator_reg {
+ AB8500_REGUREQUESTCTRL2,
+ AB8500_REGUREQUESTCTRL3,
+ AB8500_REGUREQUESTCTRL4,
+ AB8500_REGUSYSCLKREQ1HPVALID1,
+ AB8500_REGUSYSCLKREQ1HPVALID2,
+ AB8500_REGUHWHPREQ1VALID1,
+ AB8500_REGUHWHPREQ1VALID2,
+ AB8500_REGUHWHPREQ2VALID1,
+ AB8500_REGUHWHPREQ2VALID2,
+ AB8500_REGUSWHPREQVALID1,
+ AB8500_REGUSWHPREQVALID2,
+ AB8500_REGUSYSCLKREQVALID1,
+ AB8500_REGUSYSCLKREQVALID2,
+ AB8500_REGUMISC1,
+ AB8500_VAUDIOSUPPLY,
+ AB8500_REGUCTRL1VAMIC,
+ AB8500_VPLLVANAREGU,
+ AB8500_VREFDDR,
+ AB8500_EXTSUPPLYREGU,
+ AB8500_VAUX12REGU,
+ AB8500_VRF1VAUX3REGU,
+ AB8500_VAUX1SEL,
+ AB8500_VAUX2SEL,
+ AB8500_VRF1VAUX3SEL,
+ AB8500_REGUCTRL2SPARE,
+ AB8500_REGUCTRLDISCH,
+ AB8500_REGUCTRLDISCH2,
+ AB8500_NUM_REGULATOR_REGISTERS,
+};
+
+/* AB8505 registers */
+enum ab8505_regulator_reg {
+ AB8505_REGUREQUESTCTRL1,
+ AB8505_REGUREQUESTCTRL2,
+ AB8505_REGUREQUESTCTRL3,
+ AB8505_REGUREQUESTCTRL4,
+ AB8505_REGUSYSCLKREQ1HPVALID1,
+ AB8505_REGUSYSCLKREQ1HPVALID2,
+ AB8505_REGUHWHPREQ1VALID1,
+ AB8505_REGUHWHPREQ1VALID2,
+ AB8505_REGUHWHPREQ2VALID1,
+ AB8505_REGUHWHPREQ2VALID2,
+ AB8505_REGUSWHPREQVALID1,
+ AB8505_REGUSWHPREQVALID2,
+ AB8505_REGUSYSCLKREQVALID1,
+ AB8505_REGUSYSCLKREQVALID2,
+ AB8505_REGUVAUX4REQVALID,
+ AB8505_REGUMISC1,
+ AB8505_VAUDIOSUPPLY,
+ AB8505_REGUCTRL1VAMIC,
+ AB8505_VSMPSAREGU,
+ AB8505_VSMPSBREGU,
+ AB8505_VSAFEREGU, /* NOTE! PRCMU register */
+ AB8505_VPLLVANAREGU,
+ AB8505_EXTSUPPLYREGU,
+ AB8505_VAUX12REGU,
+ AB8505_VRF1VAUX3REGU,
+ AB8505_VSMPSASEL1,
+ AB8505_VSMPSASEL2,
+ AB8505_VSMPSASEL3,
+ AB8505_VSMPSBSEL1,
+ AB8505_VSMPSBSEL2,
+ AB8505_VSMPSBSEL3,
+ AB8505_VSAFESEL1, /* NOTE! PRCMU register */
+ AB8505_VSAFESEL2, /* NOTE! PRCMU register */
+ AB8505_VSAFESEL3, /* NOTE! PRCMU register */
+ AB8505_VAUX1SEL,
+ AB8505_VAUX2SEL,
+ AB8505_VRF1VAUX3SEL,
+ AB8505_VAUX4REQCTRL,
+ AB8505_VAUX4REGU,
+ AB8505_VAUX4SEL,
+ AB8505_REGUCTRLDISCH,
+ AB8505_REGUCTRLDISCH2,
+ AB8505_REGUCTRLDISCH3,
+ AB8505_CTRLVAUX5,
+ AB8505_CTRLVAUX6,
+ AB8505_NUM_REGULATOR_REGISTERS,
+};
+
+/* AB9540 registers */
+enum ab9540_regulator_reg {
+ AB9540_REGUREQUESTCTRL1,
+ AB9540_REGUREQUESTCTRL2,
+ AB9540_REGUREQUESTCTRL3,
+ AB9540_REGUREQUESTCTRL4,
+ AB9540_REGUSYSCLKREQ1HPVALID1,
+ AB9540_REGUSYSCLKREQ1HPVALID2,
+ AB9540_REGUHWHPREQ1VALID1,
+ AB9540_REGUHWHPREQ1VALID2,
+ AB9540_REGUHWHPREQ2VALID1,
+ AB9540_REGUHWHPREQ2VALID2,
+ AB9540_REGUSWHPREQVALID1,
+ AB9540_REGUSWHPREQVALID2,
+ AB9540_REGUSYSCLKREQVALID1,
+ AB9540_REGUSYSCLKREQVALID2,
+ AB9540_REGUVAUX4REQVALID,
+ AB9540_REGUMISC1,
+ AB9540_VAUDIOSUPPLY,
+ AB9540_REGUCTRL1VAMIC,
+ AB9540_VSMPS1REGU,
+ AB9540_VSMPS2REGU,
+ AB9540_VSMPS3REGU, /* NOTE! PRCMU register */
+ AB9540_VPLLVANAREGU,
+ AB9540_EXTSUPPLYREGU,
+ AB9540_VAUX12REGU,
+ AB9540_VRF1VAUX3REGU,
+ AB9540_VSMPS1SEL1,
+ AB9540_VSMPS1SEL2,
+ AB9540_VSMPS1SEL3,
+ AB9540_VSMPS2SEL1,
+ AB9540_VSMPS2SEL2,
+ AB9540_VSMPS2SEL3,
+ AB9540_VSMPS3SEL1, /* NOTE! PRCMU register */
+ AB9540_VSMPS3SEL2, /* NOTE! PRCMU register */
+ AB9540_VAUX1SEL,
+ AB9540_VAUX2SEL,
+ AB9540_VRF1VAUX3SEL,
+ AB9540_REGUCTRL2SPARE,
+ AB9540_VAUX4REQCTRL,
+ AB9540_VAUX4REGU,
+ AB9540_VAUX4SEL,
+ AB9540_REGUCTRLDISCH,
+ AB9540_REGUCTRLDISCH2,
+ AB9540_REGUCTRLDISCH3,
+ AB9540_NUM_REGULATOR_REGISTERS,
+};
+
+/* AB8540 registers */
+enum ab8540_regulator_reg {
+ AB8540_REGUREQUESTCTRL1,
+ AB8540_REGUREQUESTCTRL2,
+ AB8540_REGUREQUESTCTRL3,
+ AB8540_REGUREQUESTCTRL4,
+ AB8540_REGUSYSCLKREQ1HPVALID1,
+ AB8540_REGUSYSCLKREQ1HPVALID2,
+ AB8540_REGUHWHPREQ1VALID1,
+ AB8540_REGUHWHPREQ1VALID2,
+ AB8540_REGUHWHPREQ2VALID1,
+ AB8540_REGUHWHPREQ2VALID2,
+ AB8540_REGUSWHPREQVALID1,
+ AB8540_REGUSWHPREQVALID2,
+ AB8540_REGUSYSCLKREQVALID1,
+ AB8540_REGUSYSCLKREQVALID2,
+ AB8540_REGUVAUX4REQVALID,
+ AB8540_REGUVAUX5REQVALID,
+ AB8540_REGUVAUX6REQVALID,
+ AB8540_REGUVCLKBREQVALID,
+ AB8540_REGUVRF1REQVALID,
+ AB8540_REGUMISC1,
+ AB8540_VAUDIOSUPPLY,
+ AB8540_REGUCTRL1VAMIC,
+ AB8540_VHSIC,
+ AB8540_VSDIO,
+ AB8540_VSMPS1REGU,
+ AB8540_VSMPS2REGU,
+ AB8540_VSMPS3REGU,
+ AB8540_VPLLVANAREGU,
+ AB8540_EXTSUPPLYREGU,
+ AB8540_VAUX12REGU,
+ AB8540_VRF1VAUX3REGU,
+ AB8540_VSMPS1SEL1,
+ AB8540_VSMPS1SEL2,
+ AB8540_VSMPS1SEL3,
+ AB8540_VSMPS2SEL1,
+ AB8540_VSMPS2SEL2,
+ AB8540_VSMPS2SEL3,
+ AB8540_VSMPS3SEL1,
+ AB8540_VSMPS3SEL2,
+ AB8540_VAUX1SEL,
+ AB8540_VAUX2SEL,
+ AB8540_VRF1VAUX3SEL,
+ AB8540_REGUCTRL2SPARE,
+ AB8540_VAUX4REQCTRL,
+ AB8540_VAUX4REGU,
+ AB8540_VAUX4SEL,
+ AB8540_VAUX5REQCTRL,
+ AB8540_VAUX5REGU,
+ AB8540_VAUX5SEL,
+ AB8540_VAUX6REQCTRL,
+ AB8540_VAUX6REGU,
+ AB8540_VAUX6SEL,
+ AB8540_VCLKBREQCTRL,
+ AB8540_VCLKBREGU,
+ AB8540_VCLKBSEL,
+ AB8540_VRF1REQCTRL,
+ AB8540_REGUCTRLDISCH,
+ AB8540_REGUCTRLDISCH2,
+ AB8540_REGUCTRLDISCH3,
+ AB8540_REGUCTRLDISCH4,
+ AB8540_VSIMSYSCLKCTRL,
+ AB8540_VANAVPLLSEL,
+ AB8540_NUM_REGULATOR_REGISTERS,
+};
+
+/* AB8500 external regulators */
+struct ab8500_ext_regulator_cfg {
+ bool hwreq; /* requires hw mode or high power mode */
+};
+
+enum ab8500_ext_regulator_id {
+ AB8500_EXT_SUPPLY1,
+ AB8500_EXT_SUPPLY2,
+ AB8500_EXT_SUPPLY3,
+ AB8500_NUM_EXT_REGULATORS,
+};
+
+/* AB8500 regulator platform data */
+struct ab8500_regulator_platform_data {
+ int num_reg_init;
+ struct ab8500_regulator_reg_init *reg_init;
+ int num_regulator;
+ struct regulator_init_data *regulator;
+ int num_ext_regulator;
+ struct regulator_init_data *ext_regulator;
+};
+
+#endif
diff --git a/include/linux/regulator/act8865.h b/include/linux/regulator/act8865.h
new file mode 100644
index 000000000..15fa8f2d3
--- /dev/null
+++ b/include/linux/regulator/act8865.h
@@ -0,0 +1,88 @@
+/*
+ * act8865.h -- Voltage regulation for active-semi act88xx PMUs
+ *
+ * Copyright (C) 2013 Atmel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_REGULATOR_ACT8865_H
+#define __LINUX_REGULATOR_ACT8865_H
+
+#include <linux/regulator/machine.h>
+
+enum {
+ ACT8600_ID_DCDC1,
+ ACT8600_ID_DCDC2,
+ ACT8600_ID_DCDC3,
+ ACT8600_ID_SUDCDC4,
+ ACT8600_ID_LDO5,
+ ACT8600_ID_LDO6,
+ ACT8600_ID_LDO7,
+ ACT8600_ID_LDO8,
+ ACT8600_ID_LDO9,
+ ACT8600_ID_LDO10,
+};
+
+enum {
+ ACT8865_ID_DCDC1,
+ ACT8865_ID_DCDC2,
+ ACT8865_ID_DCDC3,
+ ACT8865_ID_LDO1,
+ ACT8865_ID_LDO2,
+ ACT8865_ID_LDO3,
+ ACT8865_ID_LDO4,
+ ACT8865_REG_NUM,
+};
+
+enum {
+ ACT8846_ID_REG1,
+ ACT8846_ID_REG2,
+ ACT8846_ID_REG3,
+ ACT8846_ID_REG4,
+ ACT8846_ID_REG5,
+ ACT8846_ID_REG6,
+ ACT8846_ID_REG7,
+ ACT8846_ID_REG8,
+ ACT8846_ID_REG9,
+ ACT8846_ID_REG10,
+ ACT8846_ID_REG11,
+ ACT8846_ID_REG12,
+ ACT8846_REG_NUM,
+};
+
+enum {
+ ACT8600,
+ ACT8865,
+ ACT8846,
+};
+
+/**
+ * act8865_regulator_data - regulator data
+ * @id: regulator id
+ * @name: regulator name
+ * @platform_data: regulator init data
+ */
+struct act8865_regulator_data {
+ int id;
+ const char *name;
+ struct regulator_init_data *platform_data;
+};
+
+/**
+ * act8865_platform_data - platform data for act8865
+ * @num_regulators: number of regulators used
+ * @regulators: pointer to regulators used
+ */
+struct act8865_platform_data {
+ int num_regulators;
+ struct act8865_regulator_data *regulators;
+};
+#endif
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
new file mode 100644
index 000000000..f8a689ed6
--- /dev/null
+++ b/include/linux/regulator/consumer.h
@@ -0,0 +1,573 @@
+/*
+ * consumer.h -- SoC Regulator consumer support.
+ *
+ * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
+ *
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Regulator Consumer Interface.
+ *
+ * A Power Management Regulator framework for SoC based devices.
+ * Features:-
+ * o Voltage and current level control.
+ * o Operating mode control.
+ * o Regulator status.
+ * o sysfs entries for showing client devices and status
+ *
+ * EXPERIMENTAL FEATURES:
+ * Dynamic Regulator operating Mode Switching (DRMS) - allows regulators
+ * to use most efficient operating mode depending upon voltage and load and
+ * is transparent to client drivers.
+ *
+ * e.g. Devices x,y,z share regulator r. Device x and y draw 20mA each during
+ * IO and 1mA at idle. Device z draws 100mA when under load and 5mA when
+ * idling. Regulator r has > 90% efficiency in NORMAL mode at loads > 100mA
+ * but this drops rapidly to 60% when below 100mA. Regulator r has > 90%
+ * efficiency in IDLE mode at loads < 10mA. Thus regulator r will operate
+ * in normal mode for loads > 10mA and in IDLE mode for load <= 10mA.
+ *
+ */
+
+#ifndef __LINUX_REGULATOR_CONSUMER_H_
+#define __LINUX_REGULATOR_CONSUMER_H_
+
+#include <linux/err.h>
+
+struct device;
+struct notifier_block;
+struct regmap;
+
+/*
+ * Regulator operating modes.
+ *
+ * Regulators can run in a variety of different operating modes depending on
+ * output load. This allows further system power savings by selecting the
+ * best (and most efficient) regulator mode for a desired load.
+ *
+ * Most drivers will only care about NORMAL. The modes below are generic and
+ * will probably not match the naming convention of your regulator data sheet
+ * but should match the use cases in the datasheet.
+ *
+ * In order of power efficiency (least efficient at top).
+ *
+ * Mode Description
+ * FAST Regulator can handle fast changes in it's load.
+ * e.g. useful in CPU voltage & frequency scaling where
+ * load can quickly increase with CPU frequency increases.
+ *
+ * NORMAL Normal regulator power supply mode. Most drivers will
+ * use this mode.
+ *
+ * IDLE Regulator runs in a more efficient mode for light
+ * loads. Can be used for devices that have a low power
+ * requirement during periods of inactivity. This mode
+ * may be more noisy than NORMAL and may not be able
+ * to handle fast load switching.
+ *
+ * STANDBY Regulator runs in the most efficient mode for very
+ * light loads. Can be used by devices when they are
+ * in a sleep/standby state. This mode is likely to be
+ * the most noisy and may not be able to handle fast load
+ * switching.
+ *
+ * NOTE: Most regulators will only support a subset of these modes. Some
+ * will only just support NORMAL.
+ *
+ * These modes can be OR'ed together to make up a mask of valid register modes.
+ */
+
+#define REGULATOR_MODE_FAST 0x1
+#define REGULATOR_MODE_NORMAL 0x2
+#define REGULATOR_MODE_IDLE 0x4
+#define REGULATOR_MODE_STANDBY 0x8
+
+/*
+ * Regulator notifier events.
+ *
+ * UNDER_VOLTAGE Regulator output is under voltage.
+ * OVER_CURRENT Regulator output current is too high.
+ * REGULATION_OUT Regulator output is out of regulation.
+ * FAIL Regulator output has failed.
+ * OVER_TEMP Regulator over temp.
+ * FORCE_DISABLE Regulator forcibly shut down by software.
+ * VOLTAGE_CHANGE Regulator voltage changed.
+ * Data passed is old voltage cast to (void *).
+ * DISABLE Regulator was disabled.
+ * PRE_VOLTAGE_CHANGE Regulator is about to have voltage changed.
+ * Data passed is "struct pre_voltage_change_data"
+ * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason.
+ * Data passed is old voltage cast to (void *).
+ * PRE_DISABLE Regulator is about to be disabled
+ * ABORT_DISABLE Regulator disable failed for some reason
+ *
+ * NOTE: These events can be OR'ed together when passed into handler.
+ */
+
+#define REGULATOR_EVENT_UNDER_VOLTAGE 0x01
+#define REGULATOR_EVENT_OVER_CURRENT 0x02
+#define REGULATOR_EVENT_REGULATION_OUT 0x04
+#define REGULATOR_EVENT_FAIL 0x08
+#define REGULATOR_EVENT_OVER_TEMP 0x10
+#define REGULATOR_EVENT_FORCE_DISABLE 0x20
+#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
+#define REGULATOR_EVENT_DISABLE 0x80
+#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100
+#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200
+#define REGULATOR_EVENT_PRE_DISABLE 0x400
+#define REGULATOR_EVENT_ABORT_DISABLE 0x800
+
+/**
+ * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event
+ *
+ * @old_uV: Current voltage before change.
+ * @min_uV: Min voltage we'll change to.
+ * @max_uV: Max voltage we'll change to.
+ */
+struct pre_voltage_change_data {
+ unsigned long old_uV;
+ unsigned long min_uV;
+ unsigned long max_uV;
+};
+
+struct regulator;
+
+/**
+ * struct regulator_bulk_data - Data used for bulk regulator operations.
+ *
+ * @supply: The name of the supply. Initialised by the user before
+ * using the bulk regulator APIs.
+ * @consumer: The regulator consumer for the supply. This will be managed
+ * by the bulk API.
+ *
+ * The regulator APIs provide a series of regulator_bulk_() API calls as
+ * a convenience to consumers which require multiple supplies. This
+ * structure is used to manage data for these calls.
+ */
+struct regulator_bulk_data {
+ const char *supply;
+ struct regulator *consumer;
+
+ /* private: Internal use */
+ int ret;
+};
+
+#if defined(CONFIG_REGULATOR)
+
+/* regulator get and put */
+struct regulator *__must_check regulator_get(struct device *dev,
+ const char *id);
+struct regulator *__must_check devm_regulator_get(struct device *dev,
+ const char *id);
+struct regulator *__must_check regulator_get_exclusive(struct device *dev,
+ const char *id);
+struct regulator *__must_check devm_regulator_get_exclusive(struct device *dev,
+ const char *id);
+struct regulator *__must_check regulator_get_optional(struct device *dev,
+ const char *id);
+struct regulator *__must_check devm_regulator_get_optional(struct device *dev,
+ const char *id);
+void regulator_put(struct regulator *regulator);
+void devm_regulator_put(struct regulator *regulator);
+
+int regulator_register_supply_alias(struct device *dev, const char *id,
+ struct device *alias_dev,
+ const char *alias_id);
+void regulator_unregister_supply_alias(struct device *dev, const char *id);
+
+int regulator_bulk_register_supply_alias(struct device *dev,
+ const char *const *id,
+ struct device *alias_dev,
+ const char *const *alias_id,
+ int num_id);
+void regulator_bulk_unregister_supply_alias(struct device *dev,
+ const char * const *id, int num_id);
+
+int devm_regulator_register_supply_alias(struct device *dev, const char *id,
+ struct device *alias_dev,
+ const char *alias_id);
+void devm_regulator_unregister_supply_alias(struct device *dev,
+ const char *id);
+
+int devm_regulator_bulk_register_supply_alias(struct device *dev,
+ const char *const *id,
+ struct device *alias_dev,
+ const char *const *alias_id,
+ int num_id);
+void devm_regulator_bulk_unregister_supply_alias(struct device *dev,
+ const char *const *id,
+ int num_id);
+
+/* regulator output control and status */
+int __must_check regulator_enable(struct regulator *regulator);
+int regulator_disable(struct regulator *regulator);
+int regulator_force_disable(struct regulator *regulator);
+int regulator_is_enabled(struct regulator *regulator);
+int regulator_disable_deferred(struct regulator *regulator, int ms);
+
+int __must_check regulator_bulk_get(struct device *dev, int num_consumers,
+ struct regulator_bulk_data *consumers);
+int __must_check devm_regulator_bulk_get(struct device *dev, int num_consumers,
+ struct regulator_bulk_data *consumers);
+int __must_check regulator_bulk_enable(int num_consumers,
+ struct regulator_bulk_data *consumers);
+int regulator_bulk_disable(int num_consumers,
+ struct regulator_bulk_data *consumers);
+int regulator_bulk_force_disable(int num_consumers,
+ struct regulator_bulk_data *consumers);
+void regulator_bulk_free(int num_consumers,
+ struct regulator_bulk_data *consumers);
+
+int regulator_can_change_voltage(struct regulator *regulator);
+int regulator_count_voltages(struct regulator *regulator);
+int regulator_list_voltage(struct regulator *regulator, unsigned selector);
+int regulator_is_supported_voltage(struct regulator *regulator,
+ int min_uV, int max_uV);
+unsigned int regulator_get_linear_step(struct regulator *regulator);
+int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV);
+int regulator_set_voltage_time(struct regulator *regulator,
+ int old_uV, int new_uV);
+int regulator_get_voltage(struct regulator *regulator);
+int regulator_sync_voltage(struct regulator *regulator);
+int regulator_set_current_limit(struct regulator *regulator,
+ int min_uA, int max_uA);
+int regulator_get_current_limit(struct regulator *regulator);
+
+int regulator_set_mode(struct regulator *regulator, unsigned int mode);
+unsigned int regulator_get_mode(struct regulator *regulator);
+int regulator_set_load(struct regulator *regulator, int load_uA);
+
+int regulator_allow_bypass(struct regulator *regulator, bool allow);
+
+struct regmap *regulator_get_regmap(struct regulator *regulator);
+int regulator_get_hardware_vsel_register(struct regulator *regulator,
+ unsigned *vsel_reg,
+ unsigned *vsel_mask);
+int regulator_list_hardware_vsel(struct regulator *regulator,
+ unsigned selector);
+
+/* regulator notifier block */
+int regulator_register_notifier(struct regulator *regulator,
+ struct notifier_block *nb);
+int devm_regulator_register_notifier(struct regulator *regulator,
+ struct notifier_block *nb);
+int regulator_unregister_notifier(struct regulator *regulator,
+ struct notifier_block *nb);
+void devm_regulator_unregister_notifier(struct regulator *regulator,
+ struct notifier_block *nb);
+
+/* driver data - core doesn't touch */
+void *regulator_get_drvdata(struct regulator *regulator);
+void regulator_set_drvdata(struct regulator *regulator, void *data);
+
+#else
+
+/*
+ * Make sure client drivers will still build on systems with no software
+ * controllable voltage or current regulators.
+ */
+static inline struct regulator *__must_check regulator_get(struct device *dev,
+ const char *id)
+{
+ /* Nothing except the stubbed out regulator API should be
+ * looking at the value except to check if it is an error
+ * value. Drivers are free to handle NULL specifically by
+ * skipping all regulator API calls, but they don't have to.
+ * Drivers which don't, should make sure they properly handle
+ * corner cases of the API, such as regulator_get_voltage()
+ * returning 0.
+ */
+ return NULL;
+}
+
+static inline struct regulator *__must_check
+devm_regulator_get(struct device *dev, const char *id)
+{
+ return NULL;
+}
+
+static inline struct regulator *__must_check
+regulator_get_exclusive(struct device *dev, const char *id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct regulator *__must_check
+regulator_get_optional(struct device *dev, const char *id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+
+static inline struct regulator *__must_check
+devm_regulator_get_optional(struct device *dev, const char *id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void regulator_put(struct regulator *regulator)
+{
+}
+
+static inline void devm_regulator_put(struct regulator *regulator)
+{
+}
+
+static inline int regulator_register_supply_alias(struct device *dev,
+ const char *id,
+ struct device *alias_dev,
+ const char *alias_id)
+{
+ return 0;
+}
+
+static inline void regulator_unregister_supply_alias(struct device *dev,
+ const char *id)
+{
+}
+
+static inline int regulator_bulk_register_supply_alias(struct device *dev,
+ const char *const *id,
+ struct device *alias_dev,
+ const char * const *alias_id,
+ int num_id)
+{
+ return 0;
+}
+
+static inline void regulator_bulk_unregister_supply_alias(struct device *dev,
+ const char * const *id,
+ int num_id)
+{
+}
+
+static inline int devm_regulator_register_supply_alias(struct device *dev,
+ const char *id,
+ struct device *alias_dev,
+ const char *alias_id)
+{
+ return 0;
+}
+
+static inline void devm_regulator_unregister_supply_alias(struct device *dev,
+ const char *id)
+{
+}
+
+static inline int devm_regulator_bulk_register_supply_alias(struct device *dev,
+ const char *const *id,
+ struct device *alias_dev,
+ const char *const *alias_id,
+ int num_id)
+{
+ return 0;
+}
+
+static inline void devm_regulator_bulk_unregister_supply_alias(
+ struct device *dev, const char *const *id, int num_id)
+{
+}
+
+static inline int regulator_enable(struct regulator *regulator)
+{
+ return 0;
+}
+
+static inline int regulator_disable(struct regulator *regulator)
+{
+ return 0;
+}
+
+static inline int regulator_force_disable(struct regulator *regulator)
+{
+ return 0;
+}
+
+static inline int regulator_disable_deferred(struct regulator *regulator,
+ int ms)
+{
+ return 0;
+}
+
+static inline int regulator_is_enabled(struct regulator *regulator)
+{
+ return 1;
+}
+
+static inline int regulator_bulk_get(struct device *dev,
+ int num_consumers,
+ struct regulator_bulk_data *consumers)
+{
+ return 0;
+}
+
+static inline int devm_regulator_bulk_get(struct device *dev, int num_consumers,
+ struct regulator_bulk_data *consumers)
+{
+ return 0;
+}
+
+static inline int regulator_bulk_enable(int num_consumers,
+ struct regulator_bulk_data *consumers)
+{
+ return 0;
+}
+
+static inline int regulator_bulk_disable(int num_consumers,
+ struct regulator_bulk_data *consumers)
+{
+ return 0;
+}
+
+static inline int regulator_bulk_force_disable(int num_consumers,
+ struct regulator_bulk_data *consumers)
+{
+ return 0;
+}
+
+static inline void regulator_bulk_free(int num_consumers,
+ struct regulator_bulk_data *consumers)
+{
+}
+
+static inline int regulator_can_change_voltage(struct regulator *regulator)
+{
+ return 0;
+}
+
+static inline int regulator_set_voltage(struct regulator *regulator,
+ int min_uV, int max_uV)
+{
+ return 0;
+}
+
+static inline int regulator_set_voltage_time(struct regulator *regulator,
+ int old_uV, int new_uV)
+{
+ return 0;
+}
+
+static inline int regulator_get_voltage(struct regulator *regulator)
+{
+ return -EINVAL;
+}
+
+static inline int regulator_is_supported_voltage(struct regulator *regulator,
+ int min_uV, int max_uV)
+{
+ return 0;
+}
+
+static inline int regulator_set_current_limit(struct regulator *regulator,
+ int min_uA, int max_uA)
+{
+ return 0;
+}
+
+static inline int regulator_get_current_limit(struct regulator *regulator)
+{
+ return 0;
+}
+
+static inline int regulator_set_mode(struct regulator *regulator,
+ unsigned int mode)
+{
+ return 0;
+}
+
+static inline unsigned int regulator_get_mode(struct regulator *regulator)
+{
+ return REGULATOR_MODE_NORMAL;
+}
+
+static inline int regulator_set_load(struct regulator *regulator, int load_uA)
+{
+ return REGULATOR_MODE_NORMAL;
+}
+
+static inline int regulator_allow_bypass(struct regulator *regulator,
+ bool allow)
+{
+ return 0;
+}
+
+static inline struct regmap *regulator_get_regmap(struct regulator *regulator)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int regulator_get_hardware_vsel_register(struct regulator *regulator,
+ unsigned *vsel_reg,
+ unsigned *vsel_mask)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int regulator_list_hardware_vsel(struct regulator *regulator,
+ unsigned selector)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int regulator_register_notifier(struct regulator *regulator,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int devm_regulator_register_notifier(struct regulator *regulator,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int regulator_unregister_notifier(struct regulator *regulator,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int devm_regulator_unregister_notifier(struct regulator *regulator,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline void *regulator_get_drvdata(struct regulator *regulator)
+{
+ return NULL;
+}
+
+static inline void regulator_set_drvdata(struct regulator *regulator,
+ void *data)
+{
+}
+
+static inline int regulator_count_voltages(struct regulator *regulator)
+{
+ return 0;
+}
+#endif
+
+static inline int regulator_set_voltage_tol(struct regulator *regulator,
+ int new_uV, int tol_uV)
+{
+ if (regulator_set_voltage(regulator, new_uV, new_uV + tol_uV) == 0)
+ return 0;
+ else
+ return regulator_set_voltage(regulator,
+ new_uV - tol_uV, new_uV + tol_uV);
+}
+
+static inline int regulator_is_supported_voltage_tol(struct regulator *regulator,
+ int target_uV, int tol_uV)
+{
+ return regulator_is_supported_voltage(regulator,
+ target_uV - tol_uV,
+ target_uV + tol_uV);
+}
+
+#endif
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
new file mode 100644
index 000000000..5dd65acc2
--- /dev/null
+++ b/include/linux/regulator/da9211.h
@@ -0,0 +1,39 @@
+/*
+ * da9211.h - Regulator device driver for DA9211/DA9213
+ * Copyright (C) 2014 Dialog Semiconductor Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ */
+
+#ifndef __LINUX_REGULATOR_DA9211_H
+#define __LINUX_REGULATOR_DA9211_H
+
+#include <linux/regulator/machine.h>
+
+#define DA9211_MAX_REGULATORS 2
+
+enum da9211_chip_id {
+ DA9211,
+ DA9213,
+};
+
+struct da9211_pdata {
+ /*
+ * Number of buck
+ * 1 : 4 phase 1 buck
+ * 2 : 2 phase 2 buck
+ */
+ int num_buck;
+ int gpio_ren[DA9211_MAX_REGULATORS];
+ struct device_node *reg_node[DA9211_MAX_REGULATORS];
+ struct regulator_init_data *init_data[DA9211_MAX_REGULATORS];
+};
+#endif
diff --git a/include/linux/regulator/db8500-prcmu.h b/include/linux/regulator/db8500-prcmu.h
new file mode 100644
index 000000000..612062313
--- /dev/null
+++ b/include/linux/regulator/db8500-prcmu.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
+ *
+ * Interface to power domain regulators on DB8500
+ */
+
+#ifndef __REGULATOR_H__
+#define __REGULATOR_H__
+
+/* Number of DB8500 regulators and regulator enumeration */
+enum db8500_regulator_id {
+ DB8500_REGULATOR_VAPE,
+ DB8500_REGULATOR_VARM,
+ DB8500_REGULATOR_VMODEM,
+ DB8500_REGULATOR_VPLL,
+ DB8500_REGULATOR_VSMPS1,
+ DB8500_REGULATOR_VSMPS2,
+ DB8500_REGULATOR_VSMPS3,
+ DB8500_REGULATOR_VRF1,
+ DB8500_REGULATOR_SWITCH_SVAMMDSP,
+ DB8500_REGULATOR_SWITCH_SVAMMDSPRET,
+ DB8500_REGULATOR_SWITCH_SVAPIPE,
+ DB8500_REGULATOR_SWITCH_SIAMMDSP,
+ DB8500_REGULATOR_SWITCH_SIAMMDSPRET,
+ DB8500_REGULATOR_SWITCH_SIAPIPE,
+ DB8500_REGULATOR_SWITCH_SGA,
+ DB8500_REGULATOR_SWITCH_B2R2_MCDE,
+ DB8500_REGULATOR_SWITCH_ESRAM12,
+ DB8500_REGULATOR_SWITCH_ESRAM12RET,
+ DB8500_REGULATOR_SWITCH_ESRAM34,
+ DB8500_REGULATOR_SWITCH_ESRAM34RET,
+ DB8500_NUM_REGULATORS
+};
+
+/*
+ * Exported interface for CPUIdle only. This function is called with all
+ * interrupts turned off.
+ */
+int power_state_active_is_enabled(void);
+
+#endif
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
new file mode 100644
index 000000000..fffa688ac
--- /dev/null
+++ b/include/linux/regulator/driver.h
@@ -0,0 +1,436 @@
+/*
+ * driver.h -- SoC Regulator driver support.
+ *
+ * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
+ *
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Regulator Driver Interface.
+ */
+
+#ifndef __LINUX_REGULATOR_DRIVER_H_
+#define __LINUX_REGULATOR_DRIVER_H_
+
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/regulator/consumer.h>
+
+struct regmap;
+struct regulator_dev;
+struct regulator_config;
+struct regulator_init_data;
+struct regulator_enable_gpio;
+
+enum regulator_status {
+ REGULATOR_STATUS_OFF,
+ REGULATOR_STATUS_ON,
+ REGULATOR_STATUS_ERROR,
+ /* fast/normal/idle/standby are flavors of "on" */
+ REGULATOR_STATUS_FAST,
+ REGULATOR_STATUS_NORMAL,
+ REGULATOR_STATUS_IDLE,
+ REGULATOR_STATUS_STANDBY,
+ /* The regulator is enabled but not regulating */
+ REGULATOR_STATUS_BYPASS,
+ /* in case that any other status doesn't apply */
+ REGULATOR_STATUS_UNDEFINED,
+};
+
+/**
+ * struct regulator_linear_range - specify linear voltage ranges
+ *
+ * Specify a range of voltages for regulator_map_linar_range() and
+ * regulator_list_linear_range().
+ *
+ * @min_uV: Lowest voltage in range
+ * @min_sel: Lowest selector for range
+ * @max_sel: Highest selector for range
+ * @uV_step: Step size
+ */
+struct regulator_linear_range {
+ unsigned int min_uV;
+ unsigned int min_sel;
+ unsigned int max_sel;
+ unsigned int uV_step;
+};
+
+/* Initialize struct regulator_linear_range */
+#define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \
+{ \
+ .min_uV = _min_uV, \
+ .min_sel = _min_sel, \
+ .max_sel = _max_sel, \
+ .uV_step = _step_uV, \
+}
+
+/**
+ * struct regulator_ops - regulator operations.
+ *
+ * @enable: Configure the regulator as enabled.
+ * @disable: Configure the regulator as disabled.
+ * @is_enabled: Return 1 if the regulator is enabled, 0 if not.
+ * May also return negative errno.
+ *
+ * @set_voltage: Set the voltage for the regulator within the range specified.
+ * The driver should select the voltage closest to min_uV.
+ * @set_voltage_sel: Set the voltage for the regulator using the specified
+ * selector.
+ * @map_voltage: Convert a voltage into a selector
+ * @get_voltage: Return the currently configured voltage for the regulator.
+ * @get_voltage_sel: Return the currently configured voltage selector for the
+ * regulator.
+ * @list_voltage: Return one of the supported voltages, in microvolts; zero
+ * if the selector indicates a voltage that is unusable on this system;
+ * or negative errno. Selectors range from zero to one less than
+ * regulator_desc.n_voltages. Voltages may be reported in any order.
+ *
+ * @set_current_limit: Configure a limit for a current-limited regulator.
+ * The driver should select the current closest to max_uA.
+ * @get_current_limit: Get the configured limit for a current-limited regulator.
+ *
+ * @set_mode: Set the configured operating mode for the regulator.
+ * @get_mode: Get the configured operating mode for the regulator.
+ * @get_status: Return actual (not as-configured) status of regulator, as a
+ * REGULATOR_STATUS value (or negative errno)
+ * @get_optimum_mode: Get the most efficient operating mode for the regulator
+ * when running with the specified parameters.
+ * @set_load: Set the load for the regulator.
+ *
+ * @set_bypass: Set the regulator in bypass mode.
+ * @get_bypass: Get the regulator bypass mode state.
+ *
+ * @enable_time: Time taken for the regulator voltage output voltage to
+ * stabilise after being enabled, in microseconds.
+ * @set_ramp_delay: Set the ramp delay for the regulator. The driver should
+ * select ramp delay equal to or less than(closest) ramp_delay.
+ * @set_voltage_time_sel: Time taken for the regulator voltage output voltage
+ * to stabilise after being set to a new value, in microseconds.
+ * The function provides the from and to voltage selector, the
+ * function should return the worst case.
+ *
+ * @set_suspend_voltage: Set the voltage for the regulator when the system
+ * is suspended.
+ * @set_suspend_enable: Mark the regulator as enabled when the system is
+ * suspended.
+ * @set_suspend_disable: Mark the regulator as disabled when the system is
+ * suspended.
+ * @set_suspend_mode: Set the operating mode for the regulator when the
+ * system is suspended.
+ *
+ * This struct describes regulator operations which can be implemented by
+ * regulator chip drivers.
+ */
+struct regulator_ops {
+
+ /* enumerate supported voltages */
+ int (*list_voltage) (struct regulator_dev *, unsigned selector);
+
+ /* get/set regulator voltage */
+ int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV,
+ unsigned *selector);
+ int (*map_voltage)(struct regulator_dev *, int min_uV, int max_uV);
+ int (*set_voltage_sel) (struct regulator_dev *, unsigned selector);
+ int (*get_voltage) (struct regulator_dev *);
+ int (*get_voltage_sel) (struct regulator_dev *);
+
+ /* get/set regulator current */
+ int (*set_current_limit) (struct regulator_dev *,
+ int min_uA, int max_uA);
+ int (*get_current_limit) (struct regulator_dev *);
+
+ /* enable/disable regulator */
+ int (*enable) (struct regulator_dev *);
+ int (*disable) (struct regulator_dev *);
+ int (*is_enabled) (struct regulator_dev *);
+
+ /* get/set regulator operating mode (defined in consumer.h) */
+ int (*set_mode) (struct regulator_dev *, unsigned int mode);
+ unsigned int (*get_mode) (struct regulator_dev *);
+
+ /* Time taken to enable or set voltage on the regulator */
+ int (*enable_time) (struct regulator_dev *);
+ int (*set_ramp_delay) (struct regulator_dev *, int ramp_delay);
+ int (*set_voltage_time_sel) (struct regulator_dev *,
+ unsigned int old_selector,
+ unsigned int new_selector);
+
+ /* report regulator status ... most other accessors report
+ * control inputs, this reports results of combining inputs
+ * from Linux (and other sources) with the actual load.
+ * returns REGULATOR_STATUS_* or negative errno.
+ */
+ int (*get_status)(struct regulator_dev *);
+
+ /* get most efficient regulator operating mode for load */
+ unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV,
+ int output_uV, int load_uA);
+ /* set the load on the regulator */
+ int (*set_load)(struct regulator_dev *, int load_uA);
+
+ /* control and report on bypass mode */
+ int (*set_bypass)(struct regulator_dev *dev, bool enable);
+ int (*get_bypass)(struct regulator_dev *dev, bool *enable);
+
+ /* the operations below are for configuration of regulator state when
+ * its parent PMIC enters a global STANDBY/HIBERNATE state */
+
+ /* set regulator suspend voltage */
+ int (*set_suspend_voltage) (struct regulator_dev *, int uV);
+
+ /* enable/disable regulator in suspend state */
+ int (*set_suspend_enable) (struct regulator_dev *);
+ int (*set_suspend_disable) (struct regulator_dev *);
+
+ /* set regulator suspend operating mode (defined in consumer.h) */
+ int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode);
+};
+
+/*
+ * Regulators can either control voltage or current.
+ */
+enum regulator_type {
+ REGULATOR_VOLTAGE,
+ REGULATOR_CURRENT,
+};
+
+/**
+ * struct regulator_desc - Static regulator descriptor
+ *
+ * Each regulator registered with the core is described with a
+ * structure of this type and a struct regulator_config. This
+ * structure contains the non-varying parts of the regulator
+ * description.
+ *
+ * @name: Identifying name for the regulator.
+ * @supply_name: Identifying the regulator supply
+ * @of_match: Name used to identify regulator in DT.
+ * @regulators_node: Name of node containing regulator definitions in DT.
+ * @of_parse_cb: Optional callback called only if of_match is present.
+ * Will be called for each regulator parsed from DT, during
+ * init_data parsing.
+ * The regulator_config passed as argument to the callback will
+ * be a copy of config passed to regulator_register, valid only
+ * for this particular call. Callback may freely change the
+ * config but it cannot store it for later usage.
+ * Callback should return 0 on success or negative ERRNO
+ * indicating failure.
+ * @id: Numerical identifier for the regulator.
+ * @ops: Regulator operations table.
+ * @irq: Interrupt number for the regulator.
+ * @type: Indicates if the regulator is a voltage or current regulator.
+ * @owner: Module providing the regulator, used for refcounting.
+ *
+ * @continuous_voltage_range: Indicates if the regulator can set any
+ * voltage within constrains range.
+ * @n_voltages: Number of selectors available for ops.list_voltage().
+ *
+ * @min_uV: Voltage given by the lowest selector (if linear mapping)
+ * @uV_step: Voltage increase with each selector (if linear mapping)
+ * @linear_min_sel: Minimal selector for starting linear mapping
+ * @fixed_uV: Fixed voltage of rails.
+ * @ramp_delay: Time to settle down after voltage change (unit: uV/us)
+ * @linear_ranges: A constant table of possible voltage ranges.
+ * @n_linear_ranges: Number of entries in the @linear_ranges table.
+ * @volt_table: Voltage mapping table (if table based mapping)
+ *
+ * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_
+ * @vsel_mask: Mask for register bitfield used for selector
+ * @apply_reg: Register for initiate voltage change on the output when
+ * using regulator_set_voltage_sel_regmap
+ * @apply_bit: Register bitfield used for initiate voltage change on the
+ * output when using regulator_set_voltage_sel_regmap
+ * @enable_reg: Register for control when using regmap enable/disable ops
+ * @enable_mask: Mask for control when using regmap enable/disable ops
+ * @enable_val: Enabling value for control when using regmap enable/disable ops
+ * @disable_val: Disabling value for control when using regmap enable/disable ops
+ * @enable_is_inverted: A flag to indicate set enable_mask bits to disable
+ * when using regulator_enable_regmap and friends APIs.
+ * @bypass_reg: Register for control when using regmap set_bypass
+ * @bypass_mask: Mask for control when using regmap set_bypass
+ * @bypass_val_on: Enabling value for control when using regmap set_bypass
+ * @bypass_val_off: Disabling value for control when using regmap set_bypass
+ *
+ * @enable_time: Time taken for initial enable of regulator (in uS).
+ * @off_on_delay: guard time (in uS), before re-enabling a regulator
+ *
+ * @of_map_mode: Maps a hardware mode defined in a DeviceTree to a standard mode
+ */
+struct regulator_desc {
+ const char *name;
+ const char *supply_name;
+ const char *of_match;
+ const char *regulators_node;
+ int (*of_parse_cb)(struct device_node *,
+ const struct regulator_desc *,
+ struct regulator_config *);
+ int id;
+ bool continuous_voltage_range;
+ unsigned n_voltages;
+ const struct regulator_ops *ops;
+ int irq;
+ enum regulator_type type;
+ struct module *owner;
+
+ unsigned int min_uV;
+ unsigned int uV_step;
+ unsigned int linear_min_sel;
+ int fixed_uV;
+ unsigned int ramp_delay;
+
+ const struct regulator_linear_range *linear_ranges;
+ int n_linear_ranges;
+
+ const unsigned int *volt_table;
+
+ unsigned int vsel_reg;
+ unsigned int vsel_mask;
+ unsigned int apply_reg;
+ unsigned int apply_bit;
+ unsigned int enable_reg;
+ unsigned int enable_mask;
+ unsigned int enable_val;
+ unsigned int disable_val;
+ bool enable_is_inverted;
+ unsigned int bypass_reg;
+ unsigned int bypass_mask;
+ unsigned int bypass_val_on;
+ unsigned int bypass_val_off;
+
+ unsigned int enable_time;
+
+ unsigned int off_on_delay;
+
+ unsigned int (*of_map_mode)(unsigned int mode);
+};
+
+/**
+ * struct regulator_config - Dynamic regulator descriptor
+ *
+ * Each regulator registered with the core is described with a
+ * structure of this type and a struct regulator_desc. This structure
+ * contains the runtime variable parts of the regulator description.
+ *
+ * @dev: struct device for the regulator
+ * @init_data: platform provided init data, passed through by driver
+ * @driver_data: private regulator data
+ * @of_node: OpenFirmware node to parse for device tree bindings (may be
+ * NULL).
+ * @regmap: regmap to use for core regmap helpers if dev_get_regmap() is
+ * insufficient.
+ * @ena_gpio_initialized: GPIO controlling regulator enable was properly
+ * initialized, meaning that >= 0 is a valid gpio
+ * identifier and < 0 is a non existent gpio.
+ * @ena_gpio: GPIO controlling regulator enable.
+ * @ena_gpio_invert: Sense for GPIO enable control.
+ * @ena_gpio_flags: Flags to use when calling gpio_request_one()
+ */
+struct regulator_config {
+ struct device *dev;
+ const struct regulator_init_data *init_data;
+ void *driver_data;
+ struct device_node *of_node;
+ struct regmap *regmap;
+
+ bool ena_gpio_initialized;
+ int ena_gpio;
+ unsigned int ena_gpio_invert:1;
+ unsigned int ena_gpio_flags;
+};
+
+/*
+ * struct regulator_dev
+ *
+ * Voltage / Current regulator class device. One for each
+ * regulator.
+ *
+ * This should *not* be used directly by anything except the regulator
+ * core and notification injection (which should take the mutex and do
+ * no other direct access).
+ */
+struct regulator_dev {
+ const struct regulator_desc *desc;
+ int exclusive;
+ u32 use_count;
+ u32 open_count;
+ u32 bypass_count;
+
+ /* lists we belong to */
+ struct list_head list; /* list of all regulators */
+
+ /* lists we own */
+ struct list_head consumer_list; /* consumers we supply */
+
+ struct blocking_notifier_head notifier;
+ struct mutex mutex; /* consumer lock */
+ struct module *owner;
+ struct device dev;
+ struct regulation_constraints *constraints;
+ struct regulator *supply; /* for tree */
+ const char *supply_name;
+ struct regmap *regmap;
+
+ struct delayed_work disable_work;
+ int deferred_disables;
+
+ void *reg_data; /* regulator_dev data */
+
+ struct dentry *debugfs;
+
+ struct regulator_enable_gpio *ena_pin;
+ unsigned int ena_gpio_state:1;
+
+ /* time when this regulator was disabled last time */
+ unsigned long last_off_jiffy;
+};
+
+struct regulator_dev *
+regulator_register(const struct regulator_desc *regulator_desc,
+ const struct regulator_config *config);
+struct regulator_dev *
+devm_regulator_register(struct device *dev,
+ const struct regulator_desc *regulator_desc,
+ const struct regulator_config *config);
+void regulator_unregister(struct regulator_dev *rdev);
+void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev);
+
+int regulator_notifier_call_chain(struct regulator_dev *rdev,
+ unsigned long event, void *data);
+
+void *rdev_get_drvdata(struct regulator_dev *rdev);
+struct device *rdev_get_dev(struct regulator_dev *rdev);
+int rdev_get_id(struct regulator_dev *rdev);
+
+int regulator_mode_to_status(unsigned int);
+
+int regulator_list_voltage_linear(struct regulator_dev *rdev,
+ unsigned int selector);
+int regulator_list_voltage_linear_range(struct regulator_dev *rdev,
+ unsigned int selector);
+int regulator_list_voltage_table(struct regulator_dev *rdev,
+ unsigned int selector);
+int regulator_map_voltage_linear(struct regulator_dev *rdev,
+ int min_uV, int max_uV);
+int regulator_map_voltage_linear_range(struct regulator_dev *rdev,
+ int min_uV, int max_uV);
+int regulator_map_voltage_iterate(struct regulator_dev *rdev,
+ int min_uV, int max_uV);
+int regulator_map_voltage_ascend(struct regulator_dev *rdev,
+ int min_uV, int max_uV);
+int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev);
+int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel);
+int regulator_is_enabled_regmap(struct regulator_dev *rdev);
+int regulator_enable_regmap(struct regulator_dev *rdev);
+int regulator_disable_regmap(struct regulator_dev *rdev);
+int regulator_set_voltage_time_sel(struct regulator_dev *rdev,
+ unsigned int old_selector,
+ unsigned int new_selector);
+int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable);
+int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable);
+
+void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data);
+
+#endif
diff --git a/include/linux/regulator/fan53555.h b/include/linux/regulator/fan53555.h
new file mode 100644
index 000000000..f13880e84
--- /dev/null
+++ b/include/linux/regulator/fan53555.h
@@ -0,0 +1,61 @@
+/*
+ * fan53555.h - Fairchild Regulator FAN53555 Driver
+ *
+ * Copyright (C) 2012 Marvell Technology Ltd.
+ * Yunfan Zhang <yfzhang@marvell.com>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __FAN53555_H__
+#define __FAN53555_H__
+
+/* VSEL ID */
+enum {
+ FAN53555_VSEL_ID_0 = 0,
+ FAN53555_VSEL_ID_1,
+};
+
+/* Transition slew rate limiting from a low to high voltage.
+ * -----------------------
+ * Bin |Slew Rate(mV/uS)
+ * ------|----------------
+ * 000 | 64.00
+ * ------|----------------
+ * 001 | 32.00
+ * ------|----------------
+ * 010 | 16.00
+ * ------|----------------
+ * 011 | 8.00
+ * ------|----------------
+ * 100 | 4.00
+ * ------|----------------
+ * 101 | 2.00
+ * ------|----------------
+ * 110 | 1.00
+ * ------|----------------
+ * 111 | 0.50
+ * -----------------------
+ */
+enum {
+ FAN53555_SLEW_RATE_64MV = 0,
+ FAN53555_SLEW_RATE_32MV,
+ FAN53555_SLEW_RATE_16MV,
+ FAN53555_SLEW_RATE_8MV,
+ FAN53555_SLEW_RATE_4MV,
+ FAN53555_SLEW_RATE_2MV,
+ FAN53555_SLEW_RATE_1MV,
+ FAN53555_SLEW_RATE_0_5MV,
+};
+
+struct fan53555_platform_data {
+ struct regulator_init_data *regulator;
+ unsigned int slew_rate;
+ /* Sleep VSEL ID */
+ unsigned int sleep_vsel_id;
+};
+
+#endif /* __FAN53555_H__ */
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h
new file mode 100644
index 000000000..48918be64
--- /dev/null
+++ b/include/linux/regulator/fixed.h
@@ -0,0 +1,76 @@
+/*
+ * fixed.h
+ *
+ * Copyright 2008 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * Copyright (c) 2009 Nokia Corporation
+ * Roger Quadros <ext-roger.quadros@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ */
+
+#ifndef __REGULATOR_FIXED_H
+#define __REGULATOR_FIXED_H
+
+struct regulator_init_data;
+
+/**
+ * struct fixed_voltage_config - fixed_voltage_config structure
+ * @supply_name: Name of the regulator supply
+ * @input_supply: Name of the input regulator supply
+ * @microvolts: Output voltage of regulator
+ * @gpio: GPIO to use for enable control
+ * set to -EINVAL if not used
+ * @startup_delay: Start-up time in microseconds
+ * @gpio_is_open_drain: Gpio pin is open drain or normal type.
+ * If it is open drain type then HIGH will be set
+ * through PULL-UP with setting gpio as input
+ * and low will be set as gpio-output with driven
+ * to low. For non-open-drain case, the gpio will
+ * will be in output and drive to low/high accordingly.
+ * @enable_high: Polarity of enable GPIO
+ * 1 = Active high, 0 = Active low
+ * @enabled_at_boot: Whether regulator has been enabled at
+ * boot or not. 1 = Yes, 0 = No
+ * This is used to keep the regulator at
+ * the default state
+ * @init_data: regulator_init_data
+ *
+ * This structure contains fixed voltage regulator configuration
+ * information that must be passed by platform code to the fixed
+ * voltage regulator driver.
+ */
+struct fixed_voltage_config {
+ const char *supply_name;
+ const char *input_supply;
+ int microvolts;
+ int gpio;
+ unsigned startup_delay;
+ unsigned gpio_is_open_drain:1;
+ unsigned enable_high:1;
+ unsigned enabled_at_boot:1;
+ struct regulator_init_data *init_data;
+};
+
+struct regulator_consumer_supply;
+
+#if IS_ENABLED(CONFIG_REGULATOR)
+struct platform_device *regulator_register_always_on(int id, const char *name,
+ struct regulator_consumer_supply *supplies, int num_supplies, int uv);
+#else
+static inline struct platform_device *regulator_register_always_on(int id, const char *name,
+ struct regulator_consumer_supply *supplies, int num_supplies, int uv)
+{
+ return NULL;
+}
+#endif
+
+#define regulator_register_fixed(id, s, ns) regulator_register_always_on(id, \
+ "fixed-dummy", s, ns, 0)
+
+#endif
diff --git a/include/linux/regulator/gpio-regulator.h b/include/linux/regulator/gpio-regulator.h
new file mode 100644
index 000000000..19fbd2674
--- /dev/null
+++ b/include/linux/regulator/gpio-regulator.h
@@ -0,0 +1,87 @@
+/*
+ * gpio-regulator.h
+ *
+ * Copyright 2011 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on fixed.h
+ *
+ * Copyright 2008 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * Copyright (c) 2009 Nokia Corporation
+ * Roger Quadros <ext-roger.quadros@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ */
+
+#ifndef __REGULATOR_GPIO_H
+#define __REGULATOR_GPIO_H
+
+struct regulator_init_data;
+
+enum regulator_type;
+
+/**
+ * struct gpio_regulator_state - state description
+ * @value: microvolts or microamps
+ * @gpios: bitfield of gpio target-states for the value
+ *
+ * This structure describes a supported setting of the regulator
+ * and the necessary gpio-state to achieve it.
+ *
+ * The n-th bit in the bitfield describes the state of the n-th GPIO
+ * from the gpios-array defined in gpio_regulator_config below.
+ */
+struct gpio_regulator_state {
+ int value;
+ int gpios;
+};
+
+/**
+ * struct gpio_regulator_config - config structure
+ * @supply_name: Name of the regulator supply
+ * @enable_gpio: GPIO to use for enable control
+ * set to -EINVAL if not used
+ * @enable_high: Polarity of enable GPIO
+ * 1 = Active high, 0 = Active low
+ * @enabled_at_boot: Whether regulator has been enabled at
+ * boot or not. 1 = Yes, 0 = No
+ * This is used to keep the regulator at
+ * the default state
+ * @startup_delay: Start-up time in microseconds
+ * @gpios: Array containing the gpios needed to control
+ * the setting of the regulator
+ * @nr_gpios: Number of gpios
+ * @states: Array of gpio_regulator_state entries describing
+ * the gpio state for specific voltages
+ * @nr_states: Number of states available
+ * @regulator_type: either REGULATOR_CURRENT or REGULATOR_VOLTAGE
+ * @init_data: regulator_init_data
+ *
+ * This structure contains gpio-voltage regulator configuration
+ * information that must be passed by platform code to the
+ * gpio-voltage regulator driver.
+ */
+struct gpio_regulator_config {
+ const char *supply_name;
+
+ int enable_gpio;
+ unsigned enable_high:1;
+ unsigned enabled_at_boot:1;
+ unsigned startup_delay;
+
+ struct gpio *gpios;
+ int nr_gpios;
+
+ struct gpio_regulator_state *states;
+ int nr_states;
+
+ enum regulator_type type;
+ struct regulator_init_data *init_data;
+};
+
+#endif
diff --git a/include/linux/regulator/lp3971.h b/include/linux/regulator/lp3971.h
new file mode 100644
index 000000000..61401649f
--- /dev/null
+++ b/include/linux/regulator/lp3971.h
@@ -0,0 +1,51 @@
+/*
+ * National Semiconductors LP3971 PMIC chip client interface
+ *
+ * Copyright (C) 2009 Samsung Electronics
+ * Author: Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * Based on wm8400.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_REGULATOR_LP3971_H
+#define __LINUX_REGULATOR_LP3971_H
+
+#include <linux/regulator/machine.h>
+
+#define LP3971_LDO1 0
+#define LP3971_LDO2 1
+#define LP3971_LDO3 2
+#define LP3971_LDO4 3
+#define LP3971_LDO5 4
+
+#define LP3971_DCDC1 5
+#define LP3971_DCDC2 6
+#define LP3971_DCDC3 7
+
+#define LP3971_NUM_REGULATORS 8
+
+struct lp3971_regulator_subdev {
+ int id;
+ struct regulator_init_data *initdata;
+};
+
+struct lp3971_platform_data {
+ int num_regulators;
+ struct lp3971_regulator_subdev *regulators;
+};
+
+#endif
diff --git a/include/linux/regulator/lp3972.h b/include/linux/regulator/lp3972.h
new file mode 100644
index 000000000..9bb7389b7
--- /dev/null
+++ b/include/linux/regulator/lp3972.h
@@ -0,0 +1,48 @@
+/*
+ * National Semiconductors LP3972 PMIC chip client interface
+ *
+ * Based on lp3971.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_REGULATOR_LP3972_H
+#define __LINUX_REGULATOR_LP3972_H
+
+#include <linux/regulator/machine.h>
+
+#define LP3972_LDO1 0
+#define LP3972_LDO2 1
+#define LP3972_LDO3 2
+#define LP3972_LDO4 3
+#define LP3972_LDO5 4
+
+#define LP3972_DCDC1 5
+#define LP3972_DCDC2 6
+#define LP3972_DCDC3 7
+
+#define LP3972_NUM_REGULATORS 8
+
+struct lp3972_regulator_subdev {
+ int id;
+ struct regulator_init_data *initdata;
+};
+
+struct lp3972_platform_data {
+ int num_regulators;
+ struct lp3972_regulator_subdev *regulators;
+};
+
+#endif
diff --git a/include/linux/regulator/lp872x.h b/include/linux/regulator/lp872x.h
new file mode 100644
index 000000000..132e05c46
--- /dev/null
+++ b/include/linux/regulator/lp872x.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __LP872X_REGULATOR_H__
+#define __LP872X_REGULATOR_H__
+
+#include <linux/regulator/machine.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+
+#define LP872X_MAX_REGULATORS 9
+
+enum lp872x_regulator_id {
+ LP8720_ID_BASE,
+ LP8720_ID_LDO1 = LP8720_ID_BASE,
+ LP8720_ID_LDO2,
+ LP8720_ID_LDO3,
+ LP8720_ID_LDO4,
+ LP8720_ID_LDO5,
+ LP8720_ID_BUCK,
+
+ LP8725_ID_BASE,
+ LP8725_ID_LDO1 = LP8725_ID_BASE,
+ LP8725_ID_LDO2,
+ LP8725_ID_LDO3,
+ LP8725_ID_LDO4,
+ LP8725_ID_LDO5,
+ LP8725_ID_LILO1,
+ LP8725_ID_LILO2,
+ LP8725_ID_BUCK1,
+ LP8725_ID_BUCK2,
+
+ LP872X_ID_MAX,
+};
+
+enum lp872x_dvs_state {
+ DVS_LOW = GPIOF_OUT_INIT_LOW,
+ DVS_HIGH = GPIOF_OUT_INIT_HIGH,
+};
+
+enum lp872x_dvs_sel {
+ SEL_V1,
+ SEL_V2,
+};
+
+/**
+ * lp872x_dvs
+ * @gpio : gpio pin number for dvs control
+ * @vsel : dvs selector for buck v1 or buck v2 register
+ * @init_state : initial dvs pin state
+ */
+struct lp872x_dvs {
+ int gpio;
+ enum lp872x_dvs_sel vsel;
+ enum lp872x_dvs_state init_state;
+};
+
+/**
+ * lp872x_regdata
+ * @id : regulator id
+ * @init_data : init data for each regulator
+ */
+struct lp872x_regulator_data {
+ enum lp872x_regulator_id id;
+ struct regulator_init_data *init_data;
+};
+
+/**
+ * lp872x_platform_data
+ * @general_config : the value of LP872X_GENERAL_CFG register
+ * @update_config : if LP872X_GENERAL_CFG register is updated, set true
+ * @regulator_data : platform regulator id and init data
+ * @dvs : dvs data for buck voltage control
+ */
+struct lp872x_platform_data {
+ u8 general_config;
+ bool update_config;
+ struct lp872x_regulator_data regulator_data[LP872X_MAX_REGULATORS];
+ struct lp872x_dvs *dvs;
+};
+
+#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
new file mode 100644
index 000000000..b07562e08
--- /dev/null
+++ b/include/linux/regulator/machine.h
@@ -0,0 +1,212 @@
+/*
+ * machine.h -- SoC Regulator support, machine/board driver API.
+ *
+ * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
+ *
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Regulator Machine/Board Interface.
+ */
+
+#ifndef __LINUX_REGULATOR_MACHINE_H_
+#define __LINUX_REGULATOR_MACHINE_H_
+
+#include <linux/regulator/consumer.h>
+#include <linux/suspend.h>
+
+struct regulator;
+
+/*
+ * Regulator operation constraint flags. These flags are used to enable
+ * certain regulator operations and can be OR'ed together.
+ *
+ * VOLTAGE: Regulator output voltage can be changed by software on this
+ * board/machine.
+ * CURRENT: Regulator output current can be changed by software on this
+ * board/machine.
+ * MODE: Regulator operating mode can be changed by software on this
+ * board/machine.
+ * STATUS: Regulator can be enabled and disabled.
+ * DRMS: Dynamic Regulator Mode Switching is enabled for this regulator.
+ * BYPASS: Regulator can be put into bypass mode
+ */
+
+#define REGULATOR_CHANGE_VOLTAGE 0x1
+#define REGULATOR_CHANGE_CURRENT 0x2
+#define REGULATOR_CHANGE_MODE 0x4
+#define REGULATOR_CHANGE_STATUS 0x8
+#define REGULATOR_CHANGE_DRMS 0x10
+#define REGULATOR_CHANGE_BYPASS 0x20
+
+/**
+ * struct regulator_state - regulator state during low power system states
+ *
+ * This describes a regulators state during a system wide low power
+ * state. One of enabled or disabled must be set for the
+ * configuration to be applied.
+ *
+ * @uV: Operating voltage during suspend.
+ * @mode: Operating mode during suspend.
+ * @enabled: Enabled during suspend.
+ * @disabled: Disabled during suspend.
+ */
+struct regulator_state {
+ int uV; /* suspend voltage */
+ unsigned int mode; /* suspend regulator operating mode */
+ int enabled; /* is regulator enabled in this suspend state */
+ int disabled; /* is the regulator disbled in this suspend state */
+};
+
+/**
+ * struct regulation_constraints - regulator operating constraints.
+ *
+ * This struct describes regulator and board/machine specific constraints.
+ *
+ * @name: Descriptive name for the constraints, used for display purposes.
+ *
+ * @min_uV: Smallest voltage consumers may set.
+ * @max_uV: Largest voltage consumers may set.
+ * @uV_offset: Offset applied to voltages from consumer to compensate for
+ * voltage drops.
+ *
+ * @min_uA: Smallest current consumers may set.
+ * @max_uA: Largest current consumers may set.
+ *
+ * @valid_modes_mask: Mask of modes which may be configured by consumers.
+ * @valid_ops_mask: Operations which may be performed by consumers.
+ *
+ * @always_on: Set if the regulator should never be disabled.
+ * @boot_on: Set if the regulator is enabled when the system is initially
+ * started. If the regulator is not enabled by the hardware or
+ * bootloader then it will be enabled when the constraints are
+ * applied.
+ * @apply_uV: Apply the voltage constraint when initialising.
+ * @ramp_disable: Disable ramp delay when initialising or when setting voltage.
+ *
+ * @input_uV: Input voltage for regulator when supplied by another regulator.
+ *
+ * @state_disk: State for regulator when system is suspended in disk mode.
+ * @state_mem: State for regulator when system is suspended in mem mode.
+ * @state_standby: State for regulator when system is suspended in standby
+ * mode.
+ * @initial_state: Suspend state to set by default.
+ * @initial_mode: Mode to set at startup.
+ * @ramp_delay: Time to settle down after voltage change (unit: uV/us)
+ * @enable_time: Turn-on time of the rails (unit: microseconds)
+ */
+struct regulation_constraints {
+
+ const char *name;
+
+ /* voltage output range (inclusive) - for voltage control */
+ int min_uV;
+ int max_uV;
+
+ int uV_offset;
+
+ /* current output range (inclusive) - for current control */
+ int min_uA;
+ int max_uA;
+
+ /* valid regulator operating modes for this machine */
+ unsigned int valid_modes_mask;
+
+ /* valid operations for regulator on this machine */
+ unsigned int valid_ops_mask;
+
+ /* regulator input voltage - only if supply is another regulator */
+ int input_uV;
+
+ /* regulator suspend states for global PMIC STANDBY/HIBERNATE */
+ struct regulator_state state_disk;
+ struct regulator_state state_mem;
+ struct regulator_state state_standby;
+ suspend_state_t initial_state; /* suspend state to set at init */
+
+ /* mode to set on startup */
+ unsigned int initial_mode;
+
+ unsigned int ramp_delay;
+ unsigned int enable_time;
+
+ /* constraint flags */
+ unsigned always_on:1; /* regulator never off when system is on */
+ unsigned boot_on:1; /* bootloader/firmware enabled regulator */
+ unsigned apply_uV:1; /* apply uV constraint if min == max */
+ unsigned ramp_disable:1; /* disable ramp delay */
+};
+
+/**
+ * struct regulator_consumer_supply - supply -> device mapping
+ *
+ * This maps a supply name to a device. Use of dev_name allows support for
+ * buses which make struct device available late such as I2C.
+ *
+ * @dev_name: Result of dev_name() for the consumer.
+ * @supply: Name for the supply.
+ */
+struct regulator_consumer_supply {
+ const char *dev_name; /* dev_name() for consumer */
+ const char *supply; /* consumer supply - e.g. "vcc" */
+};
+
+/* Initialize struct regulator_consumer_supply */
+#define REGULATOR_SUPPLY(_name, _dev_name) \
+{ \
+ .supply = _name, \
+ .dev_name = _dev_name, \
+}
+
+/**
+ * struct regulator_init_data - regulator platform initialisation data.
+ *
+ * Initialisation constraints, our supply and consumers supplies.
+ *
+ * @supply_regulator: Parent regulator. Specified using the regulator name
+ * as it appears in the name field in sysfs, which can
+ * be explicitly set using the constraints field 'name'.
+ *
+ * @constraints: Constraints. These must be specified for the regulator to
+ * be usable.
+ * @num_consumer_supplies: Number of consumer device supplies.
+ * @consumer_supplies: Consumer device supply configuration.
+ *
+ * @regulator_init: Callback invoked when the regulator has been registered.
+ * @driver_data: Data passed to regulator_init.
+ */
+struct regulator_init_data {
+ const char *supply_regulator; /* or NULL for system supply */
+
+ struct regulation_constraints constraints;
+
+ int num_consumer_supplies;
+ struct regulator_consumer_supply *consumer_supplies;
+
+ /* optional regulator machine specific init */
+ int (*regulator_init)(void *driver_data);
+ void *driver_data; /* core does not touch this */
+};
+
+#ifdef CONFIG_REGULATOR
+void regulator_has_full_constraints(void);
+int regulator_suspend_prepare(suspend_state_t state);
+int regulator_suspend_finish(void);
+#else
+static inline void regulator_has_full_constraints(void)
+{
+}
+static inline int regulator_suspend_prepare(suspend_state_t state)
+{
+ return 0;
+}
+static inline int regulator_suspend_finish(void)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/include/linux/regulator/max1586.h b/include/linux/regulator/max1586.h
new file mode 100644
index 000000000..cedd0febe
--- /dev/null
+++ b/include/linux/regulator/max1586.h
@@ -0,0 +1,63 @@
+/*
+ * max1586.h -- Voltage regulation for the Maxim 1586
+ *
+ * Copyright (C) 2008 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef REGULATOR_MAX1586
+#define REGULATOR_MAX1586
+
+#include <linux/regulator/machine.h>
+
+#define MAX1586_V3 0
+#define MAX1586_V6 1
+
+/* precalculated values for v3_gain */
+#define MAX1586_GAIN_NO_R24 1000000 /* 700000 .. 1475000 mV */
+#define MAX1586_GAIN_R24_3k32 1051098 /* 735768 .. 1550369 mV */
+#define MAX1586_GAIN_R24_5k11 1078648 /* 755053 .. 1591005 mV */
+#define MAX1586_GAIN_R24_7k5 1115432 /* 780802 .. 1645262 mV */
+
+/**
+ * max1586_subdev_data - regulator data
+ * @id: regulator Id (either MAX1586_V3 or MAX1586_V6)
+ * @name: regulator cute name (example for V3: "vcc_core")
+ * @platform_data: regulator init data (constraints, supplies, ...)
+ */
+struct max1586_subdev_data {
+ int id;
+ const char *name;
+ struct regulator_init_data *platform_data;
+};
+
+/**
+ * max1586_platform_data - platform data for max1586
+ * @num_subdevs: number of regulators used (may be 1 or 2)
+ * @subdevs: regulator used
+ * At most, there will be a regulator for V3 and one for V6 voltages.
+ * @v3_gain: gain on the V3 voltage output multiplied by 1e6.
+ * This can be calculated as ((1 + R24/R25 + R24/185.5kOhm) * 1e6)
+ * for an external resistor configuration as described in the
+ * data sheet (R25=100kOhm).
+ */
+struct max1586_platform_data {
+ int num_subdevs;
+ struct max1586_subdev_data *subdevs;
+ int v3_gain;
+};
+
+#endif
diff --git a/include/linux/regulator/max8649.h b/include/linux/regulator/max8649.h
new file mode 100644
index 000000000..417d14ecd
--- /dev/null
+++ b/include/linux/regulator/max8649.h
@@ -0,0 +1,44 @@
+/*
+ * Interface of Maxim max8649
+ *
+ * Copyright (C) 2009-2010 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_REGULATOR_MAX8649_H
+#define __LINUX_REGULATOR_MAX8649_H
+
+#include <linux/regulator/machine.h>
+
+enum {
+ MAX8649_EXTCLK_26MHZ = 0,
+ MAX8649_EXTCLK_13MHZ,
+ MAX8649_EXTCLK_19MHZ, /* 19.2MHz */
+};
+
+enum {
+ MAX8649_RAMP_32MV = 0,
+ MAX8649_RAMP_16MV,
+ MAX8649_RAMP_8MV,
+ MAX8649_RAMP_4MV,
+ MAX8649_RAMP_2MV,
+ MAX8649_RAMP_1MV,
+ MAX8649_RAMP_0_5MV,
+ MAX8649_RAMP_0_25MV,
+};
+
+struct max8649_platform_data {
+ struct regulator_init_data *regulator;
+
+ unsigned mode:2; /* bit[1:0] = VID1,VID0 */
+ unsigned extclk_freq:2;
+ unsigned extclk:1;
+ unsigned ramp_timing:3;
+ unsigned ramp_down:1;
+};
+
+#endif /* __LINUX_REGULATOR_MAX8649_H */
diff --git a/include/linux/regulator/max8660.h b/include/linux/regulator/max8660.h
new file mode 100644
index 000000000..f8a6a4844
--- /dev/null
+++ b/include/linux/regulator/max8660.h
@@ -0,0 +1,57 @@
+/*
+ * max8660.h -- Voltage regulation for the Maxim 8660/8661
+ *
+ * Copyright (C) 2009 Wolfram Sang, Pengutronix e.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_REGULATOR_MAX8660_H
+#define __LINUX_REGULATOR_MAX8660_H
+
+#include <linux/regulator/machine.h>
+
+enum {
+ MAX8660_V3,
+ MAX8660_V4,
+ MAX8660_V5,
+ MAX8660_V6,
+ MAX8660_V7,
+ MAX8660_V_END,
+};
+
+/**
+ * max8660_subdev_data - regulator subdev data
+ * @id: regulator id
+ * @name: regulator name
+ * @platform_data: regulator init data
+ */
+struct max8660_subdev_data {
+ int id;
+ const char *name;
+ struct regulator_init_data *platform_data;
+};
+
+/**
+ * max8660_platform_data - platform data for max8660
+ * @num_subdevs: number of regulators used
+ * @subdevs: pointer to regulators used
+ * @en34_is_high: if EN34 is driven high, regulators cannot be en-/disabled.
+ */
+struct max8660_platform_data {
+ int num_subdevs;
+ struct max8660_subdev_data *subdevs;
+ unsigned en34_is_high:1;
+};
+#endif
diff --git a/include/linux/regulator/max8952.h b/include/linux/regulator/max8952.h
new file mode 100644
index 000000000..4dbb63a1d
--- /dev/null
+++ b/include/linux/regulator/max8952.h
@@ -0,0 +1,135 @@
+/*
+ * max8952.h - Voltage regulation for the Maxim 8952
+ *
+ * Copyright (C) 2010 Samsung Electrnoics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef REGULATOR_MAX8952
+#define REGULATOR_MAX8952
+
+#include <linux/regulator/machine.h>
+
+enum {
+ MAX8952_DVS_MODE0,
+ MAX8952_DVS_MODE1,
+ MAX8952_DVS_MODE2,
+ MAX8952_DVS_MODE3,
+};
+
+enum {
+ MAX8952_DVS_770mV = 0,
+ MAX8952_DVS_780mV,
+ MAX8952_DVS_790mV,
+ MAX8952_DVS_800mV,
+ MAX8952_DVS_810mV,
+ MAX8952_DVS_820mV,
+ MAX8952_DVS_830mV,
+ MAX8952_DVS_840mV,
+ MAX8952_DVS_850mV,
+ MAX8952_DVS_860mV,
+ MAX8952_DVS_870mV,
+ MAX8952_DVS_880mV,
+ MAX8952_DVS_890mV,
+ MAX8952_DVS_900mV,
+ MAX8952_DVS_910mV,
+ MAX8952_DVS_920mV,
+ MAX8952_DVS_930mV,
+ MAX8952_DVS_940mV,
+ MAX8952_DVS_950mV,
+ MAX8952_DVS_960mV,
+ MAX8952_DVS_970mV,
+ MAX8952_DVS_980mV,
+ MAX8952_DVS_990mV,
+ MAX8952_DVS_1000mV,
+ MAX8952_DVS_1010mV,
+ MAX8952_DVS_1020mV,
+ MAX8952_DVS_1030mV,
+ MAX8952_DVS_1040mV,
+ MAX8952_DVS_1050mV,
+ MAX8952_DVS_1060mV,
+ MAX8952_DVS_1070mV,
+ MAX8952_DVS_1080mV,
+ MAX8952_DVS_1090mV,
+ MAX8952_DVS_1100mV,
+ MAX8952_DVS_1110mV,
+ MAX8952_DVS_1120mV,
+ MAX8952_DVS_1130mV,
+ MAX8952_DVS_1140mV,
+ MAX8952_DVS_1150mV,
+ MAX8952_DVS_1160mV,
+ MAX8952_DVS_1170mV,
+ MAX8952_DVS_1180mV,
+ MAX8952_DVS_1190mV,
+ MAX8952_DVS_1200mV,
+ MAX8952_DVS_1210mV,
+ MAX8952_DVS_1220mV,
+ MAX8952_DVS_1230mV,
+ MAX8952_DVS_1240mV,
+ MAX8952_DVS_1250mV,
+ MAX8952_DVS_1260mV,
+ MAX8952_DVS_1270mV,
+ MAX8952_DVS_1280mV,
+ MAX8952_DVS_1290mV,
+ MAX8952_DVS_1300mV,
+ MAX8952_DVS_1310mV,
+ MAX8952_DVS_1320mV,
+ MAX8952_DVS_1330mV,
+ MAX8952_DVS_1340mV,
+ MAX8952_DVS_1350mV,
+ MAX8952_DVS_1360mV,
+ MAX8952_DVS_1370mV,
+ MAX8952_DVS_1380mV,
+ MAX8952_DVS_1390mV,
+ MAX8952_DVS_1400mV,
+};
+
+enum {
+ MAX8952_SYNC_FREQ_26MHZ, /* Default */
+ MAX8952_SYNC_FREQ_13MHZ,
+ MAX8952_SYNC_FREQ_19_2MHZ,
+};
+
+enum {
+ MAX8952_RAMP_32mV_us = 0, /* Default */
+ MAX8952_RAMP_16mV_us,
+ MAX8952_RAMP_8mV_us,
+ MAX8952_RAMP_4mV_us,
+ MAX8952_RAMP_2mV_us,
+ MAX8952_RAMP_1mV_us,
+ MAX8952_RAMP_0_5mV_us,
+ MAX8952_RAMP_0_25mV_us,
+};
+
+#define MAX8952_NUM_DVS_MODE 4
+
+struct max8952_platform_data {
+ int gpio_vid0;
+ int gpio_vid1;
+ int gpio_en;
+
+ u32 default_mode;
+ u32 dvs_mode[MAX8952_NUM_DVS_MODE]; /* MAX8952_DVS_MODEx_XXXXmV */
+
+ u32 sync_freq;
+ u32 ramp_speed;
+
+ struct regulator_init_data *reg_data;
+};
+
+
+#endif /* REGULATOR_MAX8952 */
diff --git a/include/linux/regulator/max8973-regulator.h b/include/linux/regulator/max8973-regulator.h
new file mode 100644
index 000000000..f8acc052e
--- /dev/null
+++ b/include/linux/regulator/max8973-regulator.h
@@ -0,0 +1,72 @@
+/*
+ * max8973-regulator.h -- MAXIM 8973 regulator
+ *
+ * Interface for regulator driver for MAXIM 8973 DC-DC step-down
+ * switching regulator.
+ *
+ * Copyright (C) 2012 NVIDIA Corporation
+
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __LINUX_REGULATOR_MAX8973_H
+#define __LINUX_REGULATOR_MAX8973_H
+
+/*
+ * Control flags for configuration of the device.
+ * Client need to pass this information with ORed
+ */
+#define MAX8973_CONTROL_REMOTE_SENSE_ENABLE 0x00000001
+#define MAX8973_CONTROL_FALLING_SLEW_RATE_ENABLE 0x00000002
+#define MAX8973_CONTROL_OUTPUT_ACTIVE_DISCH_ENABLE 0x00000004
+#define MAX8973_CONTROL_BIAS_ENABLE 0x00000008
+#define MAX8973_CONTROL_PULL_DOWN_ENABLE 0x00000010
+#define MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE 0x00000020
+
+#define MAX8973_CONTROL_CLKADV_TRIP_DISABLED 0x00000000
+#define MAX8973_CONTROL_CLKADV_TRIP_75mV_PER_US 0x00010000
+#define MAX8973_CONTROL_CLKADV_TRIP_150mV_PER_US 0x00020000
+#define MAX8973_CONTROL_CLKADV_TRIP_75mV_PER_US_HIST_DIS 0x00030000
+
+#define MAX8973_CONTROL_INDUCTOR_VALUE_NOMINAL 0x00000000
+#define MAX8973_CONTROL_INDUCTOR_VALUE_MINUS_30_PER 0x00100000
+#define MAX8973_CONTROL_INDUCTOR_VALUE_PLUS_30_PER 0x00200000
+#define MAX8973_CONTROL_INDUCTOR_VALUE_PLUS_60_PER 0x00300000
+
+/*
+ * struct max8973_regulator_platform_data - max8973 regulator platform data.
+ *
+ * @reg_init_data: The regulator init data.
+ * @control_flags: Control flags which are ORed value of above flags to
+ * configure device.
+ * @enable_ext_control: Enable the voltage enable/disable through external
+ * control signal from EN input pin. If it is false then
+ * voltage output will be enabled/disabled through EN bit of
+ * device register.
+ * @dvs_gpio: GPIO for dvs. It should be -1 if this is tied with fixed logic.
+ * @dvs_def_state: Default state of dvs. 1 if it is high else 0.
+ */
+struct max8973_regulator_platform_data {
+ struct regulator_init_data *reg_init_data;
+ unsigned long control_flags;
+ bool enable_ext_control;
+ int dvs_gpio;
+ unsigned dvs_def_state:1;
+};
+
+#endif /* __LINUX_REGULATOR_MAX8973_H */
diff --git a/include/linux/regulator/mt6397-regulator.h b/include/linux/regulator/mt6397-regulator.h
new file mode 100644
index 000000000..30cc5963e
--- /dev/null
+++ b/include/linux/regulator/mt6397-regulator.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu <flora.fu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6397_H
+#define __LINUX_REGULATOR_MT6397_H
+
+enum {
+ MT6397_ID_VPCA15 = 0,
+ MT6397_ID_VPCA7,
+ MT6397_ID_VSRAMCA15,
+ MT6397_ID_VSRAMCA7,
+ MT6397_ID_VCORE,
+ MT6397_ID_VGPU,
+ MT6397_ID_VDRM,
+ MT6397_ID_VIO18 = 7,
+ MT6397_ID_VTCXO,
+ MT6397_ID_VA28,
+ MT6397_ID_VCAMA,
+ MT6397_ID_VIO28,
+ MT6397_ID_VUSB,
+ MT6397_ID_VMC,
+ MT6397_ID_VMCH,
+ MT6397_ID_VEMC3V3,
+ MT6397_ID_VGP1,
+ MT6397_ID_VGP2,
+ MT6397_ID_VGP3,
+ MT6397_ID_VGP4,
+ MT6397_ID_VGP5,
+ MT6397_ID_VGP6,
+ MT6397_ID_VIBR,
+ MT6397_ID_RG_MAX,
+};
+
+#define MT6397_MAX_REGULATOR MT6397_ID_RG_MAX
+#define MT6397_REGULATOR_ID97 0x97
+#define MT6397_REGULATOR_ID91 0x91
+
+#endif /* __LINUX_REGULATOR_MT6397_H */
diff --git a/include/linux/regulator/of_regulator.h b/include/linux/regulator/of_regulator.h
new file mode 100644
index 000000000..763953f7e
--- /dev/null
+++ b/include/linux/regulator/of_regulator.h
@@ -0,0 +1,45 @@
+/*
+ * OpenFirmware regulator support routines
+ *
+ */
+
+#ifndef __LINUX_OF_REG_H
+#define __LINUX_OF_REG_H
+
+struct regulator_desc;
+
+struct of_regulator_match {
+ const char *name;
+ void *driver_data;
+ struct regulator_init_data *init_data;
+ struct device_node *of_node;
+ const struct regulator_desc *desc;
+};
+
+#if defined(CONFIG_OF)
+extern struct regulator_init_data
+ *of_get_regulator_init_data(struct device *dev,
+ struct device_node *node,
+ const struct regulator_desc *desc);
+extern int of_regulator_match(struct device *dev, struct device_node *node,
+ struct of_regulator_match *matches,
+ unsigned int num_matches);
+#else
+static inline struct regulator_init_data
+ *of_get_regulator_init_data(struct device *dev,
+ struct device_node *node,
+ const struct regulator_desc *desc)
+{
+ return NULL;
+}
+
+static inline int of_regulator_match(struct device *dev,
+ struct device_node *node,
+ struct of_regulator_match *matches,
+ unsigned int num_matches)
+{
+ return 0;
+}
+#endif /* CONFIG_OF */
+
+#endif /* __LINUX_OF_REG_H */
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
new file mode 100644
index 000000000..70c6c66c5
--- /dev/null
+++ b/include/linux/regulator/pfuze100.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __LINUX_REG_PFUZE100_H
+#define __LINUX_REG_PFUZE100_H
+
+#define PFUZE100_SW1AB 0
+#define PFUZE100_SW1C 1
+#define PFUZE100_SW2 2
+#define PFUZE100_SW3A 3
+#define PFUZE100_SW3B 4
+#define PFUZE100_SW4 5
+#define PFUZE100_SWBST 6
+#define PFUZE100_VSNVS 7
+#define PFUZE100_VREFDDR 8
+#define PFUZE100_VGEN1 9
+#define PFUZE100_VGEN2 10
+#define PFUZE100_VGEN3 11
+#define PFUZE100_VGEN4 12
+#define PFUZE100_VGEN5 13
+#define PFUZE100_VGEN6 14
+#define PFUZE100_MAX_REGULATOR 15
+
+#define PFUZE200_SW1AB 0
+#define PFUZE200_SW2 1
+#define PFUZE200_SW3A 2
+#define PFUZE200_SW3B 3
+#define PFUZE200_SWBST 4
+#define PFUZE200_VSNVS 5
+#define PFUZE200_VREFDDR 6
+#define PFUZE200_VGEN1 7
+#define PFUZE200_VGEN2 8
+#define PFUZE200_VGEN3 9
+#define PFUZE200_VGEN4 10
+#define PFUZE200_VGEN5 11
+#define PFUZE200_VGEN6 12
+
+#define PFUZE3000_SW1A 0
+#define PFUZE3000_SW1B 1
+#define PFUZE3000_SW2 2
+#define PFUZE3000_SW3 3
+#define PFUZE3000_SWBST 4
+#define PFUZE3000_VSNVS 5
+#define PFUZE3000_VREFDDR 6
+#define PFUZE3000_VLDO1 7
+#define PFUZE3000_VLDO2 8
+#define PFUZE3000_VCCSD 9
+#define PFUZE3000_V33 10
+#define PFUZE3000_VLDO3 11
+#define PFUZE3000_VLDO4 12
+
+struct regulator_init_data;
+
+struct pfuze_regulator_platform_data {
+ struct regulator_init_data *init_data[PFUZE100_MAX_REGULATOR];
+};
+
+#endif /* __LINUX_REG_PFUZE100_H */
diff --git a/include/linux/regulator/tps51632-regulator.h b/include/linux/regulator/tps51632-regulator.h
new file mode 100644
index 000000000..d00841e1a
--- /dev/null
+++ b/include/linux/regulator/tps51632-regulator.h
@@ -0,0 +1,47 @@
+/*
+ * tps51632-regulator.h -- TPS51632 regulator
+ *
+ * Interface for regulator driver for TPS51632 3-2-1 Phase D-Cap Step Down
+ * Driverless Controller with serial VID control and DVFS.
+ *
+ * Copyright (C) 2012 NVIDIA Corporation
+
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __LINUX_REGULATOR_TPS51632_H
+#define __LINUX_REGULATOR_TPS51632_H
+
+/*
+ * struct tps51632_regulator_platform_data - tps51632 regulator platform data.
+ *
+ * @reg_init_data: The regulator init data.
+ * @enable_pwm_dvfs: Enable PWM DVFS or not.
+ * @dvfs_step_20mV: Step for DVFS is 20mV or 10mV.
+ * @max_voltage_uV: Maximum possible voltage in PWM-DVFS mode.
+ * @base_voltage_uV: Base voltage when PWM-DVFS enabled.
+ */
+struct tps51632_regulator_platform_data {
+ struct regulator_init_data *reg_init_data;
+ bool enable_pwm_dvfs;
+ bool dvfs_step_20mV;
+ int max_voltage_uV;
+ int base_voltage_uV;
+};
+
+#endif /* __LINUX_REGULATOR_TPS51632_H */
diff --git a/include/linux/regulator/tps62360.h b/include/linux/regulator/tps62360.h
new file mode 100644
index 000000000..a4c49394c
--- /dev/null
+++ b/include/linux/regulator/tps62360.h
@@ -0,0 +1,53 @@
+/*
+ * tps62360.h -- TI tps62360
+ *
+ * Interface for regulator driver for TI TPS62360 Processor core supply
+ *
+ * Copyright (C) 2012 NVIDIA Corporation
+
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+#ifndef __LINUX_REGULATOR_TPS62360_H
+#define __LINUX_REGULATOR_TPS62360_H
+
+/*
+ * struct tps62360_regulator_platform_data - tps62360 regulator platform data.
+ *
+ * @reg_init_data: The regulator init data.
+ * @en_discharge: Enable discharge the output capacitor via internal
+ * register.
+ * @en_internal_pulldn: internal pull down enable or not.
+ * @vsel0_gpio: Gpio number for vsel0. It should be -1 if this is tied with
+ * fixed logic.
+ * @vsel1_gpio: Gpio number for vsel1. It should be -1 if this is tied with
+ * fixed logic.
+ * @vsel0_def_state: Default state of vsel0. 1 if it is high else 0.
+ * @vsel1_def_state: Default state of vsel1. 1 if it is high else 0.
+ */
+struct tps62360_regulator_platform_data {
+ struct regulator_init_data *reg_init_data;
+ bool en_discharge;
+ bool en_internal_pulldn;
+ int vsel0_gpio;
+ int vsel1_gpio;
+ int vsel0_def_state;
+ int vsel1_def_state;
+};
+
+#endif /* __LINUX_REGULATOR_TPS62360_H */
diff --git a/include/linux/regulator/tps6507x.h b/include/linux/regulator/tps6507x.h
new file mode 100644
index 000000000..4892f591b
--- /dev/null
+++ b/include/linux/regulator/tps6507x.h
@@ -0,0 +1,32 @@
+/*
+ * tps6507x.h -- Voltage regulation for the Texas Instruments TPS6507X
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef REGULATOR_TPS6507X
+#define REGULATOR_TPS6507X
+
+/**
+ * tps6507x_reg_platform_data - platform data for tps6507x
+ * @defdcdc_default: Defines whether DCDC high or the low register controls
+ * output voltage by default. Valid for DCDC2 and DCDC3 outputs only.
+ */
+struct tps6507x_reg_platform_data {
+ bool defdcdc_default;
+};
+
+#endif
diff --git a/include/linux/regulator/userspace-consumer.h b/include/linux/regulator/userspace-consumer.h
new file mode 100644
index 000000000..b4554ce9d
--- /dev/null
+++ b/include/linux/regulator/userspace-consumer.h
@@ -0,0 +1,25 @@
+#ifndef __REGULATOR_PLATFORM_CONSUMER_H_
+#define __REGULATOR_PLATFORM_CONSUMER_H_
+
+struct regulator_consumer_supply;
+
+/**
+ * struct regulator_userspace_consumer_data - line consumer
+ * initialisation data.
+ *
+ * @name: Name for the consumer line
+ * @num_supplies: Number of supplies feeding the line
+ * @supplies: Supplies configuration.
+ * @init_on: Set if the regulators supplying the line should be
+ * enabled during initialisation
+ */
+struct regulator_userspace_consumer_data {
+ const char *name;
+
+ int num_supplies;
+ struct regulator_bulk_data *supplies;
+
+ bool init_on;
+};
+
+#endif /* __REGULATOR_PLATFORM_CONSUMER_H_ */
diff --git a/include/linux/relay.h b/include/linux/relay.h
new file mode 100644
index 000000000..d7c835969
--- /dev/null
+++ b/include/linux/relay.h
@@ -0,0 +1,289 @@
+/*
+ * linux/include/linux/relay.h
+ *
+ * Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
+ * Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com)
+ *
+ * CONFIG_RELAY definitions and declarations
+ */
+
+#ifndef _LINUX_RELAY_H
+#define _LINUX_RELAY_H
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/kref.h>
+
+/*
+ * Tracks changes to rchan/rchan_buf structs
+ */
+#define RELAYFS_CHANNEL_VERSION 7
+
+/*
+ * Per-cpu relay channel buffer
+ */
+struct rchan_buf
+{
+ void *start; /* start of channel buffer */
+ void *data; /* start of current sub-buffer */
+ size_t offset; /* current offset into sub-buffer */
+ size_t subbufs_produced; /* count of sub-buffers produced */
+ size_t subbufs_consumed; /* count of sub-buffers consumed */
+ struct rchan *chan; /* associated channel */
+ wait_queue_head_t read_wait; /* reader wait queue */
+ struct timer_list timer; /* reader wake-up timer */
+ struct dentry *dentry; /* channel file dentry */
+ struct kref kref; /* channel buffer refcount */
+ struct page **page_array; /* array of current buffer pages */
+ unsigned int page_count; /* number of current buffer pages */
+ unsigned int finalized; /* buffer has been finalized */
+ size_t *padding; /* padding counts per sub-buffer */
+ size_t prev_padding; /* temporary variable */
+ size_t bytes_consumed; /* bytes consumed in cur read subbuf */
+ size_t early_bytes; /* bytes consumed before VFS inited */
+ unsigned int cpu; /* this buf's cpu */
+} ____cacheline_aligned;
+
+/*
+ * Relay channel data structure
+ */
+struct rchan
+{
+ u32 version; /* the version of this struct */
+ size_t subbuf_size; /* sub-buffer size */
+ size_t n_subbufs; /* number of sub-buffers per buffer */
+ size_t alloc_size; /* total buffer size allocated */
+ struct rchan_callbacks *cb; /* client callbacks */
+ struct kref kref; /* channel refcount */
+ void *private_data; /* for user-defined data */
+ size_t last_toobig; /* tried to log event > subbuf size */
+ struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */
+ int is_global; /* One global buffer ? */
+ struct list_head list; /* for channel list */
+ struct dentry *parent; /* parent dentry passed to open */
+ int has_base_filename; /* has a filename associated? */
+ char base_filename[NAME_MAX]; /* saved base filename */
+};
+
+/*
+ * Relay channel client callbacks
+ */
+struct rchan_callbacks
+{
+ /*
+ * subbuf_start - called on buffer-switch to a new sub-buffer
+ * @buf: the channel buffer containing the new sub-buffer
+ * @subbuf: the start of the new sub-buffer
+ * @prev_subbuf: the start of the previous sub-buffer
+ * @prev_padding: unused space at the end of previous sub-buffer
+ *
+ * The client should return 1 to continue logging, 0 to stop
+ * logging.
+ *
+ * NOTE: subbuf_start will also be invoked when the buffer is
+ * created, so that the first sub-buffer can be initialized
+ * if necessary. In this case, prev_subbuf will be NULL.
+ *
+ * NOTE: the client can reserve bytes at the beginning of the new
+ * sub-buffer by calling subbuf_start_reserve() in this callback.
+ */
+ int (*subbuf_start) (struct rchan_buf *buf,
+ void *subbuf,
+ void *prev_subbuf,
+ size_t prev_padding);
+
+ /*
+ * buf_mapped - relay buffer mmap notification
+ * @buf: the channel buffer
+ * @filp: relay file pointer
+ *
+ * Called when a relay file is successfully mmapped
+ */
+ void (*buf_mapped)(struct rchan_buf *buf,
+ struct file *filp);
+
+ /*
+ * buf_unmapped - relay buffer unmap notification
+ * @buf: the channel buffer
+ * @filp: relay file pointer
+ *
+ * Called when a relay file is successfully unmapped
+ */
+ void (*buf_unmapped)(struct rchan_buf *buf,
+ struct file *filp);
+ /*
+ * create_buf_file - create file to represent a relay channel buffer
+ * @filename: the name of the file to create
+ * @parent: the parent of the file to create
+ * @mode: the mode of the file to create
+ * @buf: the channel buffer
+ * @is_global: outparam - set non-zero if the buffer should be global
+ *
+ * Called during relay_open(), once for each per-cpu buffer,
+ * to allow the client to create a file to be used to
+ * represent the corresponding channel buffer. If the file is
+ * created outside of relay, the parent must also exist in
+ * that filesystem.
+ *
+ * The callback should return the dentry of the file created
+ * to represent the relay buffer.
+ *
+ * Setting the is_global outparam to a non-zero value will
+ * cause relay_open() to create a single global buffer rather
+ * than the default set of per-cpu buffers.
+ *
+ * See Documentation/filesystems/relay.txt for more info.
+ */
+ struct dentry *(*create_buf_file)(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global);
+
+ /*
+ * remove_buf_file - remove file representing a relay channel buffer
+ * @dentry: the dentry of the file to remove
+ *
+ * Called during relay_close(), once for each per-cpu buffer,
+ * to allow the client to remove a file used to represent a
+ * channel buffer.
+ *
+ * The callback should return 0 if successful, negative if not.
+ */
+ int (*remove_buf_file)(struct dentry *dentry);
+};
+
+/*
+ * CONFIG_RELAY kernel API, kernel/relay.c
+ */
+
+struct rchan *relay_open(const char *base_filename,
+ struct dentry *parent,
+ size_t subbuf_size,
+ size_t n_subbufs,
+ struct rchan_callbacks *cb,
+ void *private_data);
+extern int relay_late_setup_files(struct rchan *chan,
+ const char *base_filename,
+ struct dentry *parent);
+extern void relay_close(struct rchan *chan);
+extern void relay_flush(struct rchan *chan);
+extern void relay_subbufs_consumed(struct rchan *chan,
+ unsigned int cpu,
+ size_t consumed);
+extern void relay_reset(struct rchan *chan);
+extern int relay_buf_full(struct rchan_buf *buf);
+
+extern size_t relay_switch_subbuf(struct rchan_buf *buf,
+ size_t length);
+
+/**
+ * relay_write - write data into the channel
+ * @chan: relay channel
+ * @data: data to be written
+ * @length: number of bytes to write
+ *
+ * Writes data into the current cpu's channel buffer.
+ *
+ * Protects the buffer by disabling interrupts. Use this
+ * if you might be logging from interrupt context. Try
+ * __relay_write() if you know you won't be logging from
+ * interrupt context.
+ */
+static inline void relay_write(struct rchan *chan,
+ const void *data,
+ size_t length)
+{
+ unsigned long flags;
+ struct rchan_buf *buf;
+
+ local_irq_save(flags);
+ buf = chan->buf[smp_processor_id()];
+ if (unlikely(buf->offset + length > chan->subbuf_size))
+ length = relay_switch_subbuf(buf, length);
+ memcpy(buf->data + buf->offset, data, length);
+ buf->offset += length;
+ local_irq_restore(flags);
+}
+
+/**
+ * __relay_write - write data into the channel
+ * @chan: relay channel
+ * @data: data to be written
+ * @length: number of bytes to write
+ *
+ * Writes data into the current cpu's channel buffer.
+ *
+ * Protects the buffer by disabling preemption. Use
+ * relay_write() if you might be logging from interrupt
+ * context.
+ */
+static inline void __relay_write(struct rchan *chan,
+ const void *data,
+ size_t length)
+{
+ struct rchan_buf *buf;
+
+ buf = chan->buf[get_cpu()];
+ if (unlikely(buf->offset + length > buf->chan->subbuf_size))
+ length = relay_switch_subbuf(buf, length);
+ memcpy(buf->data + buf->offset, data, length);
+ buf->offset += length;
+ put_cpu();
+}
+
+/**
+ * relay_reserve - reserve slot in channel buffer
+ * @chan: relay channel
+ * @length: number of bytes to reserve
+ *
+ * Returns pointer to reserved slot, NULL if full.
+ *
+ * Reserves a slot in the current cpu's channel buffer.
+ * Does not protect the buffer at all - caller must provide
+ * appropriate synchronization.
+ */
+static inline void *relay_reserve(struct rchan *chan, size_t length)
+{
+ void *reserved;
+ struct rchan_buf *buf = chan->buf[smp_processor_id()];
+
+ if (unlikely(buf->offset + length > buf->chan->subbuf_size)) {
+ length = relay_switch_subbuf(buf, length);
+ if (!length)
+ return NULL;
+ }
+ reserved = buf->data + buf->offset;
+ buf->offset += length;
+
+ return reserved;
+}
+
+/**
+ * subbuf_start_reserve - reserve bytes at the start of a sub-buffer
+ * @buf: relay channel buffer
+ * @length: number of bytes to reserve
+ *
+ * Helper function used to reserve bytes at the beginning of
+ * a sub-buffer in the subbuf_start() callback.
+ */
+static inline void subbuf_start_reserve(struct rchan_buf *buf,
+ size_t length)
+{
+ BUG_ON(length >= buf->chan->subbuf_size - 1);
+ buf->offset = length;
+}
+
+/*
+ * exported relay file operations, kernel/relay.c
+ */
+extern const struct file_operations relay_file_operations;
+
+#endif /* _LINUX_RELAY_H */
+
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
new file mode 100644
index 000000000..78b8a9b9d
--- /dev/null
+++ b/include/linux/remoteproc.h
@@ -0,0 +1,507 @@
+/*
+ * Remote Processor Framework
+ *
+ * Copyright(c) 2011 Texas Instruments, Inc.
+ * Copyright(c) 2011 Google, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Texas Instruments nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef REMOTEPROC_H
+#define REMOTEPROC_H
+
+#include <linux/types.h>
+#include <linux/klist.h>
+#include <linux/mutex.h>
+#include <linux/virtio.h>
+#include <linux/completion.h>
+#include <linux/idr.h>
+
+/**
+ * struct resource_table - firmware resource table header
+ * @ver: version number
+ * @num: number of resource entries
+ * @reserved: reserved (must be zero)
+ * @offset: array of offsets pointing at the various resource entries
+ *
+ * A resource table is essentially a list of system resources required
+ * by the remote processor. It may also include configuration entries.
+ * If needed, the remote processor firmware should contain this table
+ * as a dedicated ".resource_table" ELF section.
+ *
+ * Some resources entries are mere announcements, where the host is informed
+ * of specific remoteproc configuration. Other entries require the host to
+ * do something (e.g. allocate a system resource). Sometimes a negotiation
+ * is expected, where the firmware requests a resource, and once allocated,
+ * the host should provide back its details (e.g. address of an allocated
+ * memory region).
+ *
+ * The header of the resource table, as expressed by this structure,
+ * contains a version number (should we need to change this format in the
+ * future), the number of available resource entries, and their offsets
+ * in the table.
+ *
+ * Immediately following this header are the resource entries themselves,
+ * each of which begins with a resource entry header (as described below).
+ */
+struct resource_table {
+ u32 ver;
+ u32 num;
+ u32 reserved[2];
+ u32 offset[0];
+} __packed;
+
+/**
+ * struct fw_rsc_hdr - firmware resource entry header
+ * @type: resource type
+ * @data: resource data
+ *
+ * Every resource entry begins with a 'struct fw_rsc_hdr' header providing
+ * its @type. The content of the entry itself will immediately follow
+ * this header, and it should be parsed according to the resource type.
+ */
+struct fw_rsc_hdr {
+ u32 type;
+ u8 data[0];
+} __packed;
+
+/**
+ * enum fw_resource_type - types of resource entries
+ *
+ * @RSC_CARVEOUT: request for allocation of a physically contiguous
+ * memory region.
+ * @RSC_DEVMEM: request to iommu_map a memory-based peripheral.
+ * @RSC_TRACE: announces the availability of a trace buffer into which
+ * the remote processor will be writing logs.
+ * @RSC_VDEV: declare support for a virtio device, and serve as its
+ * virtio header.
+ * @RSC_LAST: just keep this one at the end
+ *
+ * For more details regarding a specific resource type, please see its
+ * dedicated structure below.
+ *
+ * Please note that these values are used as indices to the rproc_handle_rsc
+ * lookup table, so please keep them sane. Moreover, @RSC_LAST is used to
+ * check the validity of an index before the lookup table is accessed, so
+ * please update it as needed.
+ */
+enum fw_resource_type {
+ RSC_CARVEOUT = 0,
+ RSC_DEVMEM = 1,
+ RSC_TRACE = 2,
+ RSC_VDEV = 3,
+ RSC_LAST = 4,
+};
+
+#define FW_RSC_ADDR_ANY (0xFFFFFFFFFFFFFFFF)
+
+/**
+ * struct fw_rsc_carveout - physically contiguous memory request
+ * @da: device address
+ * @pa: physical address
+ * @len: length (in bytes)
+ * @flags: iommu protection flags
+ * @reserved: reserved (must be zero)
+ * @name: human-readable name of the requested memory region
+ *
+ * This resource entry requests the host to allocate a physically contiguous
+ * memory region.
+ *
+ * These request entries should precede other firmware resource entries,
+ * as other entries might request placing other data objects inside
+ * these memory regions (e.g. data/code segments, trace resource entries, ...).
+ *
+ * Allocating memory this way helps utilizing the reserved physical memory
+ * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries
+ * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB
+ * pressure is important; it may have a substantial impact on performance.
+ *
+ * If the firmware is compiled with static addresses, then @da should specify
+ * the expected device address of this memory region. If @da is set to
+ * FW_RSC_ADDR_ANY, then the host will dynamically allocate it, and then
+ * overwrite @da with the dynamically allocated address.
+ *
+ * We will always use @da to negotiate the device addresses, even if it
+ * isn't using an iommu. In that case, though, it will obviously contain
+ * physical addresses.
+ *
+ * Some remote processors needs to know the allocated physical address
+ * even if they do use an iommu. This is needed, e.g., if they control
+ * hardware accelerators which access the physical memory directly (this
+ * is the case with OMAP4 for instance). In that case, the host will
+ * overwrite @pa with the dynamically allocated physical address.
+ * Generally we don't want to expose physical addresses if we don't have to
+ * (remote processors are generally _not_ trusted), so we might want to
+ * change this to happen _only_ when explicitly required by the hardware.
+ *
+ * @flags is used to provide IOMMU protection flags, and @name should
+ * (optionally) contain a human readable name of this carveout region
+ * (mainly for debugging purposes).
+ */
+struct fw_rsc_carveout {
+ u32 da;
+ u32 pa;
+ u32 len;
+ u32 flags;
+ u32 reserved;
+ u8 name[32];
+} __packed;
+
+/**
+ * struct fw_rsc_devmem - iommu mapping request
+ * @da: device address
+ * @pa: physical address
+ * @len: length (in bytes)
+ * @flags: iommu protection flags
+ * @reserved: reserved (must be zero)
+ * @name: human-readable name of the requested region to be mapped
+ *
+ * This resource entry requests the host to iommu map a physically contiguous
+ * memory region. This is needed in case the remote processor requires
+ * access to certain memory-based peripherals; _never_ use it to access
+ * regular memory.
+ *
+ * This is obviously only needed if the remote processor is accessing memory
+ * via an iommu.
+ *
+ * @da should specify the required device address, @pa should specify
+ * the physical address we want to map, @len should specify the size of
+ * the mapping and @flags is the IOMMU protection flags. As always, @name may
+ * (optionally) contain a human readable name of this mapping (mainly for
+ * debugging purposes).
+ *
+ * Note: at this point we just "trust" those devmem entries to contain valid
+ * physical addresses, but this isn't safe and will be changed: eventually we
+ * want remoteproc implementations to provide us ranges of physical addresses
+ * the firmware is allowed to request, and not allow firmwares to request
+ * access to physical addresses that are outside those ranges.
+ */
+struct fw_rsc_devmem {
+ u32 da;
+ u32 pa;
+ u32 len;
+ u32 flags;
+ u32 reserved;
+ u8 name[32];
+} __packed;
+
+/**
+ * struct fw_rsc_trace - trace buffer declaration
+ * @da: device address
+ * @len: length (in bytes)
+ * @reserved: reserved (must be zero)
+ * @name: human-readable name of the trace buffer
+ *
+ * This resource entry provides the host information about a trace buffer
+ * into which the remote processor will write log messages.
+ *
+ * @da specifies the device address of the buffer, @len specifies
+ * its size, and @name may contain a human readable name of the trace buffer.
+ *
+ * After booting the remote processor, the trace buffers are exposed to the
+ * user via debugfs entries (called trace0, trace1, etc..).
+ */
+struct fw_rsc_trace {
+ u32 da;
+ u32 len;
+ u32 reserved;
+ u8 name[32];
+} __packed;
+
+/**
+ * struct fw_rsc_vdev_vring - vring descriptor entry
+ * @da: device address
+ * @align: the alignment between the consumer and producer parts of the vring
+ * @num: num of buffers supported by this vring (must be power of two)
+ * @notifyid is a unique rproc-wide notify index for this vring. This notify
+ * index is used when kicking a remote processor, to let it know that this
+ * vring is triggered.
+ * @reserved: reserved (must be zero)
+ *
+ * This descriptor is not a resource entry by itself; it is part of the
+ * vdev resource type (see below).
+ *
+ * Note that @da should either contain the device address where
+ * the remote processor is expecting the vring, or indicate that
+ * dynamically allocation of the vring's device address is supported.
+ */
+struct fw_rsc_vdev_vring {
+ u32 da;
+ u32 align;
+ u32 num;
+ u32 notifyid;
+ u32 reserved;
+} __packed;
+
+/**
+ * struct fw_rsc_vdev - virtio device header
+ * @id: virtio device id (as in virtio_ids.h)
+ * @notifyid is a unique rproc-wide notify index for this vdev. This notify
+ * index is used when kicking a remote processor, to let it know that the
+ * status/features of this vdev have changes.
+ * @dfeatures specifies the virtio device features supported by the firmware
+ * @gfeatures is a place holder used by the host to write back the
+ * negotiated features that are supported by both sides.
+ * @config_len is the size of the virtio config space of this vdev. The config
+ * space lies in the resource table immediate after this vdev header.
+ * @status is a place holder where the host will indicate its virtio progress.
+ * @num_of_vrings indicates how many vrings are described in this vdev header
+ * @reserved: reserved (must be zero)
+ * @vring is an array of @num_of_vrings entries of 'struct fw_rsc_vdev_vring'.
+ *
+ * This resource is a virtio device header: it provides information about
+ * the vdev, and is then used by the host and its peer remote processors
+ * to negotiate and share certain virtio properties.
+ *
+ * By providing this resource entry, the firmware essentially asks remoteproc
+ * to statically allocate a vdev upon registration of the rproc (dynamic vdev
+ * allocation is not yet supported).
+ *
+ * Note: unlike virtualization systems, the term 'host' here means
+ * the Linux side which is running remoteproc to control the remote
+ * processors. We use the name 'gfeatures' to comply with virtio's terms,
+ * though there isn't really any virtualized guest OS here: it's the host
+ * which is responsible for negotiating the final features.
+ * Yeah, it's a bit confusing.
+ *
+ * Note: immediately following this structure is the virtio config space for
+ * this vdev (which is specific to the vdev; for more info, read the virtio
+ * spec). the size of the config space is specified by @config_len.
+ */
+struct fw_rsc_vdev {
+ u32 id;
+ u32 notifyid;
+ u32 dfeatures;
+ u32 gfeatures;
+ u32 config_len;
+ u8 status;
+ u8 num_of_vrings;
+ u8 reserved[2];
+ struct fw_rsc_vdev_vring vring[0];
+} __packed;
+
+/**
+ * struct rproc_mem_entry - memory entry descriptor
+ * @va: virtual address
+ * @dma: dma address
+ * @len: length, in bytes
+ * @da: device address
+ * @priv: associated data
+ * @node: list node
+ */
+struct rproc_mem_entry {
+ void *va;
+ dma_addr_t dma;
+ int len;
+ u32 da;
+ void *priv;
+ struct list_head node;
+};
+
+struct rproc;
+
+/**
+ * struct rproc_ops - platform-specific device handlers
+ * @start: power on the device and boot it
+ * @stop: power off the device
+ * @kick: kick a virtqueue (virtqueue id given as a parameter)
+ */
+struct rproc_ops {
+ int (*start)(struct rproc *rproc);
+ int (*stop)(struct rproc *rproc);
+ void (*kick)(struct rproc *rproc, int vqid);
+};
+
+/**
+ * enum rproc_state - remote processor states
+ * @RPROC_OFFLINE: device is powered off
+ * @RPROC_SUSPENDED: device is suspended; needs to be woken up to receive
+ * a message.
+ * @RPROC_RUNNING: device is up and running
+ * @RPROC_CRASHED: device has crashed; need to start recovery
+ * @RPROC_LAST: just keep this one at the end
+ *
+ * Please note that the values of these states are used as indices
+ * to rproc_state_string, a state-to-name lookup table,
+ * so please keep the two synchronized. @RPROC_LAST is used to check
+ * the validity of an index before the lookup table is accessed, so
+ * please update it as needed too.
+ */
+enum rproc_state {
+ RPROC_OFFLINE = 0,
+ RPROC_SUSPENDED = 1,
+ RPROC_RUNNING = 2,
+ RPROC_CRASHED = 3,
+ RPROC_LAST = 4,
+};
+
+/**
+ * enum rproc_crash_type - remote processor crash types
+ * @RPROC_MMUFAULT: iommu fault
+ *
+ * Each element of the enum is used as an array index. So that, the value of
+ * the elements should be always something sane.
+ *
+ * Feel free to add more types when needed.
+ */
+enum rproc_crash_type {
+ RPROC_MMUFAULT,
+};
+
+/**
+ * struct rproc - represents a physical remote processor device
+ * @node: klist node of this rproc object
+ * @domain: iommu domain
+ * @name: human readable name of the rproc
+ * @firmware: name of firmware file to be loaded
+ * @priv: private data which belongs to the platform-specific rproc module
+ * @ops: platform-specific start/stop rproc handlers
+ * @dev: virtual device for refcounting and common remoteproc behavior
+ * @fw_ops: firmware-specific handlers
+ * @power: refcount of users who need this rproc powered up
+ * @state: state of the device
+ * @lock: lock which protects concurrent manipulations of the rproc
+ * @dbg_dir: debugfs directory of this rproc device
+ * @traces: list of trace buffers
+ * @num_traces: number of trace buffers
+ * @carveouts: list of physically contiguous memory allocations
+ * @mappings: list of iommu mappings we initiated, needed on shutdown
+ * @firmware_loading_complete: marks e/o asynchronous firmware loading
+ * @bootaddr: address of first instruction to boot rproc with (optional)
+ * @rvdevs: list of remote virtio devices
+ * @notifyids: idr for dynamically assigning rproc-wide unique notify ids
+ * @index: index of this rproc device
+ * @crash_handler: workqueue for handling a crash
+ * @crash_cnt: crash counter
+ * @crash_comp: completion used to sync crash handler and the rproc reload
+ * @recovery_disabled: flag that state if recovery was disabled
+ * @max_notifyid: largest allocated notify id.
+ * @table_ptr: pointer to the resource table in effect
+ * @cached_table: copy of the resource table
+ * @table_csum: checksum of the resource table
+ * @has_iommu: flag to indicate if remote processor is behind an MMU
+ */
+struct rproc {
+ struct klist_node node;
+ struct iommu_domain *domain;
+ const char *name;
+ const char *firmware;
+ void *priv;
+ const struct rproc_ops *ops;
+ struct device dev;
+ const struct rproc_fw_ops *fw_ops;
+ atomic_t power;
+ unsigned int state;
+ struct mutex lock;
+ struct dentry *dbg_dir;
+ struct list_head traces;
+ int num_traces;
+ struct list_head carveouts;
+ struct list_head mappings;
+ struct completion firmware_loading_complete;
+ u32 bootaddr;
+ struct list_head rvdevs;
+ struct idr notifyids;
+ int index;
+ struct work_struct crash_handler;
+ unsigned crash_cnt;
+ struct completion crash_comp;
+ bool recovery_disabled;
+ int max_notifyid;
+ struct resource_table *table_ptr;
+ struct resource_table *cached_table;
+ u32 table_csum;
+ bool has_iommu;
+};
+
+/* we currently support only two vrings per rvdev */
+
+#define RVDEV_NUM_VRINGS 2
+
+/**
+ * struct rproc_vring - remoteproc vring state
+ * @va: virtual address
+ * @dma: dma address
+ * @len: length, in bytes
+ * @da: device address
+ * @align: vring alignment
+ * @notifyid: rproc-specific unique vring index
+ * @rvdev: remote vdev
+ * @vq: the virtqueue of this vring
+ */
+struct rproc_vring {
+ void *va;
+ dma_addr_t dma;
+ int len;
+ u32 da;
+ u32 align;
+ int notifyid;
+ struct rproc_vdev *rvdev;
+ struct virtqueue *vq;
+};
+
+/**
+ * struct rproc_vdev - remoteproc state for a supported virtio device
+ * @node: list node
+ * @rproc: the rproc handle
+ * @vdev: the virio device
+ * @vring: the vrings for this vdev
+ * @rsc_offset: offset of the vdev's resource entry
+ */
+struct rproc_vdev {
+ struct list_head node;
+ struct rproc *rproc;
+ struct virtio_device vdev;
+ struct rproc_vring vring[RVDEV_NUM_VRINGS];
+ u32 rsc_offset;
+};
+
+struct rproc *rproc_alloc(struct device *dev, const char *name,
+ const struct rproc_ops *ops,
+ const char *firmware, int len);
+void rproc_put(struct rproc *rproc);
+int rproc_add(struct rproc *rproc);
+int rproc_del(struct rproc *rproc);
+
+int rproc_boot(struct rproc *rproc);
+void rproc_shutdown(struct rproc *rproc);
+void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type);
+
+static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
+{
+ return container_of(vdev, struct rproc_vdev, vdev);
+}
+
+static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev)
+{
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+
+ return rvdev->rproc;
+}
+
+#endif /* REMOTEPROC_H */
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
new file mode 100644
index 000000000..5a0b64cf6
--- /dev/null
+++ b/include/linux/reservation.h
@@ -0,0 +1,142 @@
+/*
+ * Header file for reservations for dma-buf and ttm
+ *
+ * Copyright(C) 2011 Linaro Limited. All rights reserved.
+ * Copyright (C) 2012-2013 Canonical Ltd
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ *
+ * Based on bo.c which bears the following copyright notice,
+ * but is dual licensed:
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _LINUX_RESERVATION_H
+#define _LINUX_RESERVATION_H
+
+#include <linux/ww_mutex.h>
+#include <linux/fence.h>
+#include <linux/slab.h>
+#include <linux/seqlock.h>
+#include <linux/rcupdate.h>
+
+extern struct ww_class reservation_ww_class;
+extern struct lock_class_key reservation_seqcount_class;
+extern const char reservation_seqcount_string[];
+
+struct reservation_object_list {
+ struct rcu_head rcu;
+ u32 shared_count, shared_max;
+ struct fence __rcu *shared[];
+};
+
+struct reservation_object {
+ struct ww_mutex lock;
+ seqcount_t seq;
+
+ struct fence __rcu *fence_excl;
+ struct reservation_object_list __rcu *fence;
+ struct reservation_object_list *staged;
+};
+
+#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base)
+#define reservation_object_assert_held(obj) \
+ lockdep_assert_held(&(obj)->lock.base)
+
+static inline void
+reservation_object_init(struct reservation_object *obj)
+{
+ ww_mutex_init(&obj->lock, &reservation_ww_class);
+
+ __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class);
+ RCU_INIT_POINTER(obj->fence, NULL);
+ RCU_INIT_POINTER(obj->fence_excl, NULL);
+ obj->staged = NULL;
+}
+
+static inline void
+reservation_object_fini(struct reservation_object *obj)
+{
+ int i;
+ struct reservation_object_list *fobj;
+ struct fence *excl;
+
+ /*
+ * This object should be dead and all references must have
+ * been released to it, so no need to be protected with rcu.
+ */
+ excl = rcu_dereference_protected(obj->fence_excl, 1);
+ if (excl)
+ fence_put(excl);
+
+ fobj = rcu_dereference_protected(obj->fence, 1);
+ if (fobj) {
+ for (i = 0; i < fobj->shared_count; ++i)
+ fence_put(rcu_dereference_protected(fobj->shared[i], 1));
+
+ kfree(fobj);
+ }
+ kfree(obj->staged);
+
+ ww_mutex_destroy(&obj->lock);
+}
+
+static inline struct reservation_object_list *
+reservation_object_get_list(struct reservation_object *obj)
+{
+ return rcu_dereference_protected(obj->fence,
+ reservation_object_held(obj));
+}
+
+static inline struct fence *
+reservation_object_get_excl(struct reservation_object *obj)
+{
+ return rcu_dereference_protected(obj->fence_excl,
+ reservation_object_held(obj));
+}
+
+int reservation_object_reserve_shared(struct reservation_object *obj);
+void reservation_object_add_shared_fence(struct reservation_object *obj,
+ struct fence *fence);
+
+void reservation_object_add_excl_fence(struct reservation_object *obj,
+ struct fence *fence);
+
+int reservation_object_get_fences_rcu(struct reservation_object *obj,
+ struct fence **pfence_excl,
+ unsigned *pshared_count,
+ struct fence ***pshared);
+
+long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
+ bool wait_all, bool intr,
+ unsigned long timeout);
+
+bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
+ bool test_all);
+
+#endif /* _LINUX_RESERVATION_H */
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
new file mode 100644
index 000000000..ce6b962ff
--- /dev/null
+++ b/include/linux/reset-controller.h
@@ -0,0 +1,54 @@
+#ifndef _LINUX_RESET_CONTROLLER_H_
+#define _LINUX_RESET_CONTROLLER_H_
+
+#include <linux/list.h>
+
+struct reset_controller_dev;
+
+/**
+ * struct reset_control_ops
+ *
+ * @reset: for self-deasserting resets, does all necessary
+ * things to reset the device
+ * @assert: manually assert the reset line, if supported
+ * @deassert: manually deassert the reset line, if supported
+ * @status: return the status of the reset line, if supported
+ */
+struct reset_control_ops {
+ int (*reset)(struct reset_controller_dev *rcdev, unsigned long id);
+ int (*assert)(struct reset_controller_dev *rcdev, unsigned long id);
+ int (*deassert)(struct reset_controller_dev *rcdev, unsigned long id);
+ int (*status)(struct reset_controller_dev *rcdev, unsigned long id);
+};
+
+struct module;
+struct device_node;
+struct of_phandle_args;
+
+/**
+ * struct reset_controller_dev - reset controller entity that might
+ * provide multiple reset controls
+ * @ops: a pointer to device specific struct reset_control_ops
+ * @owner: kernel module of the reset controller driver
+ * @list: internal list of reset controller devices
+ * @of_node: corresponding device tree node as phandle target
+ * @of_reset_n_cells: number of cells in reset line specifiers
+ * @of_xlate: translation function to translate from specifier as found in the
+ * device tree to id as given to the reset control ops
+ * @nr_resets: number of reset controls in this reset controller device
+ */
+struct reset_controller_dev {
+ struct reset_control_ops *ops;
+ struct module *owner;
+ struct list_head list;
+ struct device_node *of_node;
+ int of_reset_n_cells;
+ int (*of_xlate)(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec);
+ unsigned int nr_resets;
+};
+
+int reset_controller_register(struct reset_controller_dev *rcdev);
+void reset_controller_unregister(struct reset_controller_dev *rcdev);
+
+#endif
diff --git a/include/linux/reset.h b/include/linux/reset.h
new file mode 100644
index 000000000..da5602bd7
--- /dev/null
+++ b/include/linux/reset.h
@@ -0,0 +1,97 @@
+#ifndef _LINUX_RESET_H_
+#define _LINUX_RESET_H_
+
+struct device;
+struct device_node;
+struct reset_control;
+
+#ifdef CONFIG_RESET_CONTROLLER
+
+int reset_control_reset(struct reset_control *rstc);
+int reset_control_assert(struct reset_control *rstc);
+int reset_control_deassert(struct reset_control *rstc);
+int reset_control_status(struct reset_control *rstc);
+
+struct reset_control *reset_control_get(struct device *dev, const char *id);
+void reset_control_put(struct reset_control *rstc);
+struct reset_control *devm_reset_control_get(struct device *dev, const char *id);
+
+int __must_check device_reset(struct device *dev);
+
+static inline int device_reset_optional(struct device *dev)
+{
+ return device_reset(dev);
+}
+
+static inline struct reset_control *reset_control_get_optional(
+ struct device *dev, const char *id)
+{
+ return reset_control_get(dev, id);
+}
+
+static inline struct reset_control *devm_reset_control_get_optional(
+ struct device *dev, const char *id)
+{
+ return devm_reset_control_get(dev, id);
+}
+
+struct reset_control *of_reset_control_get(struct device_node *node,
+ const char *id);
+
+#else
+
+static inline int reset_control_reset(struct reset_control *rstc)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static inline int reset_control_assert(struct reset_control *rstc)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static inline int reset_control_deassert(struct reset_control *rstc)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static inline int reset_control_status(struct reset_control *rstc)
+{
+ WARN_ON(1);
+ return 0;
+}
+
+static inline void reset_control_put(struct reset_control *rstc)
+{
+ WARN_ON(1);
+}
+
+static inline int device_reset_optional(struct device *dev)
+{
+ return -ENOSYS;
+}
+
+static inline struct reset_control *reset_control_get_optional(
+ struct device *dev, const char *id)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct reset_control *devm_reset_control_get_optional(
+ struct device *dev, const char *id)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline struct reset_control *of_reset_control_get(
+ struct device_node *node, const char *id)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+#endif /* CONFIG_RESET_CONTROLLER */
+
+#endif
diff --git a/include/linux/resource.h b/include/linux/resource.h
new file mode 100644
index 000000000..5bc3116e6
--- /dev/null
+++ b/include/linux/resource.h
@@ -0,0 +1,13 @@
+#ifndef _LINUX_RESOURCE_H
+#define _LINUX_RESOURCE_H
+
+#include <uapi/linux/resource.h>
+
+
+struct task_struct;
+
+int getrusage(struct task_struct *p, int who, struct rusage __user *ru);
+int do_prlimit(struct task_struct *tsk, unsigned int resource,
+ struct rlimit *new_rlim, struct rlimit *old_rlim);
+
+#endif
diff --git a/include/linux/resource_ext.h b/include/linux/resource_ext.h
new file mode 100644
index 000000000..e2bf63d88
--- /dev/null
+++ b/include/linux/resource_ext.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2015, Intel Corporation
+ * Author: Jiang Liu <jiang.liu@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef _LINUX_RESOURCE_EXT_H
+#define _LINUX_RESOURCE_EXT_H
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+
+/* Represent resource window for bridge devices */
+struct resource_win {
+ struct resource res; /* In master (CPU) address space */
+ resource_size_t offset; /* Translation offset for bridge */
+};
+
+/*
+ * Common resource list management data structure and interfaces to support
+ * ACPI, PNP and PCI host bridge etc.
+ */
+struct resource_entry {
+ struct list_head node;
+ struct resource *res; /* In master (CPU) address space */
+ resource_size_t offset; /* Translation offset for bridge */
+ struct resource __res; /* Default storage for res */
+};
+
+extern struct resource_entry *
+resource_list_create_entry(struct resource *res, size_t extra_size);
+extern void resource_list_free(struct list_head *head);
+
+static inline void resource_list_add(struct resource_entry *entry,
+ struct list_head *head)
+{
+ list_add(&entry->node, head);
+}
+
+static inline void resource_list_add_tail(struct resource_entry *entry,
+ struct list_head *head)
+{
+ list_add_tail(&entry->node, head);
+}
+
+static inline void resource_list_del(struct resource_entry *entry)
+{
+ list_del(&entry->node);
+}
+
+static inline void resource_list_free_entry(struct resource_entry *entry)
+{
+ kfree(entry);
+}
+
+static inline void
+resource_list_destroy_entry(struct resource_entry *entry)
+{
+ resource_list_del(entry);
+ resource_list_free_entry(entry);
+}
+
+#define resource_list_for_each_entry(entry, list) \
+ list_for_each_entry((entry), (list), node)
+
+#define resource_list_for_each_entry_safe(entry, tmp, list) \
+ list_for_each_entry_safe((entry), (tmp), (list), node)
+
+#endif /* _LINUX_RESOURCE_EXT_H */
diff --git a/include/linux/rfkill-gpio.h b/include/linux/rfkill-gpio.h
new file mode 100644
index 000000000..20bcb5549
--- /dev/null
+++ b/include/linux/rfkill-gpio.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+
+#ifndef __RFKILL_GPIO_H
+#define __RFKILL_GPIO_H
+
+#include <linux/types.h>
+#include <linux/rfkill.h>
+
+/**
+ * struct rfkill_gpio_platform_data - platform data for rfkill gpio device.
+ * for unused gpio's, the expected value is -1.
+ * @name: name for the gpio rf kill instance
+ */
+
+struct rfkill_gpio_platform_data {
+ char *name;
+ enum rfkill_type type;
+};
+
+#endif /* __RFKILL_GPIO_H */
diff --git a/include/linux/rfkill-regulator.h b/include/linux/rfkill-regulator.h
new file mode 100644
index 000000000..aca36bc83
--- /dev/null
+++ b/include/linux/rfkill-regulator.h
@@ -0,0 +1,48 @@
+/*
+ * rfkill-regulator.c - Regulator consumer driver for rfkill
+ *
+ * Copyright (C) 2009 Guiming Zhuo <gmzhuo@gmail.com>
+ * Copyright (C) 2011 Antonio Ospite <ospite@studenti.unina.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __LINUX_RFKILL_REGULATOR_H
+#define __LINUX_RFKILL_REGULATOR_H
+
+/*
+ * Use "vrfkill" as supply id when declaring the regulator consumer:
+ *
+ * static struct regulator_consumer_supply pcap_regulator_V6_consumers [] = {
+ * { .dev_name = "rfkill-regulator.0", .supply = "vrfkill" },
+ * };
+ *
+ * If you have several regulator driven rfkill, you can append a numerical id to
+ * .dev_name as done above, and use the same id when declaring the platform
+ * device:
+ *
+ * static struct rfkill_regulator_platform_data ezx_rfkill_bt_data = {
+ * .name = "ezx-bluetooth",
+ * .type = RFKILL_TYPE_BLUETOOTH,
+ * };
+ *
+ * static struct platform_device a910_rfkill = {
+ * .name = "rfkill-regulator",
+ * .id = 0,
+ * .dev = {
+ * .platform_data = &ezx_rfkill_bt_data,
+ * },
+ * };
+ */
+
+#include <linux/rfkill.h>
+
+struct rfkill_regulator_platform_data {
+ char *name; /* the name for the rfkill switch */
+ enum rfkill_type type; /* the type as specified in rfkill.h */
+};
+
+#endif /* __LINUX_RFKILL_REGULATOR_H */
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
new file mode 100644
index 000000000..d9010789b
--- /dev/null
+++ b/include/linux/rfkill.h
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2006 - 2007 Ivo van Doorn
+ * Copyright (C) 2007 Dmitry Torokhov
+ * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef __RFKILL_H
+#define __RFKILL_H
+
+#include <uapi/linux/rfkill.h>
+
+/* don't allow anyone to use these in the kernel */
+enum rfkill_user_states {
+ RFKILL_USER_STATE_SOFT_BLOCKED = RFKILL_STATE_SOFT_BLOCKED,
+ RFKILL_USER_STATE_UNBLOCKED = RFKILL_STATE_UNBLOCKED,
+ RFKILL_USER_STATE_HARD_BLOCKED = RFKILL_STATE_HARD_BLOCKED,
+};
+#undef RFKILL_STATE_SOFT_BLOCKED
+#undef RFKILL_STATE_UNBLOCKED
+#undef RFKILL_STATE_HARD_BLOCKED
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/leds.h>
+#include <linux/err.h>
+
+struct device;
+/* this is opaque */
+struct rfkill;
+
+/**
+ * struct rfkill_ops - rfkill driver methods
+ *
+ * @poll: poll the rfkill block state(s) -- only assign this method
+ * when you need polling. When called, simply call one of the
+ * rfkill_set{,_hw,_sw}_state family of functions. If the hw
+ * is getting unblocked you need to take into account the return
+ * value of those functions to make sure the software block is
+ * properly used.
+ * @query: query the rfkill block state(s) and call exactly one of the
+ * rfkill_set{,_hw,_sw}_state family of functions. Assign this
+ * method if input events can cause hardware state changes to make
+ * the rfkill core query your driver before setting a requested
+ * block.
+ * @set_block: turn the transmitter on (blocked == false) or off
+ * (blocked == true) -- ignore and return 0 when hard blocked.
+ * This callback must be assigned.
+ */
+struct rfkill_ops {
+ void (*poll)(struct rfkill *rfkill, void *data);
+ void (*query)(struct rfkill *rfkill, void *data);
+ int (*set_block)(void *data, bool blocked);
+};
+
+#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
+/**
+ * rfkill_alloc - allocate rfkill structure
+ * @name: name of the struct -- the string is not copied internally
+ * @parent: device that has rf switch on it
+ * @type: type of the switch (RFKILL_TYPE_*)
+ * @ops: rfkill methods
+ * @ops_data: data passed to each method
+ *
+ * This function should be called by the transmitter driver to allocate an
+ * rfkill structure. Returns %NULL on failure.
+ */
+struct rfkill * __must_check rfkill_alloc(const char *name,
+ struct device *parent,
+ const enum rfkill_type type,
+ const struct rfkill_ops *ops,
+ void *ops_data);
+
+/**
+ * rfkill_register - Register a rfkill structure.
+ * @rfkill: rfkill structure to be registered
+ *
+ * This function should be called by the transmitter driver to register
+ * the rfkill structure. Before calling this function the driver needs
+ * to be ready to service method calls from rfkill.
+ *
+ * If rfkill_init_sw_state() is not called before registration,
+ * set_block() will be called to initialize the software blocked state
+ * to a default value.
+ *
+ * If the hardware blocked state is not set before registration,
+ * it is assumed to be unblocked.
+ */
+int __must_check rfkill_register(struct rfkill *rfkill);
+
+/**
+ * rfkill_pause_polling(struct rfkill *rfkill)
+ *
+ * Pause polling -- say transmitter is off for other reasons.
+ * NOTE: not necessary for suspend/resume -- in that case the
+ * core stops polling anyway
+ */
+void rfkill_pause_polling(struct rfkill *rfkill);
+
+/**
+ * rfkill_resume_polling(struct rfkill *rfkill)
+ *
+ * Pause polling -- say transmitter is off for other reasons.
+ * NOTE: not necessary for suspend/resume -- in that case the
+ * core stops polling anyway
+ */
+void rfkill_resume_polling(struct rfkill *rfkill);
+
+
+/**
+ * rfkill_unregister - Unregister a rfkill structure.
+ * @rfkill: rfkill structure to be unregistered
+ *
+ * This function should be called by the network driver during device
+ * teardown to destroy rfkill structure. Until it returns, the driver
+ * needs to be able to service method calls.
+ */
+void rfkill_unregister(struct rfkill *rfkill);
+
+/**
+ * rfkill_destroy - free rfkill structure
+ * @rfkill: rfkill structure to be destroyed
+ *
+ * Destroys the rfkill structure.
+ */
+void rfkill_destroy(struct rfkill *rfkill);
+
+/**
+ * rfkill_set_hw_state - Set the internal rfkill hardware block state
+ * @rfkill: pointer to the rfkill class to modify.
+ * @state: the current hardware block state to set
+ *
+ * rfkill drivers that get events when the hard-blocked state changes
+ * use this function to notify the rfkill core (and through that also
+ * userspace) of the current state. They should also use this after
+ * resume if the state could have changed.
+ *
+ * You need not (but may) call this function if poll_state is assigned.
+ *
+ * This function can be called in any context, even from within rfkill
+ * callbacks.
+ *
+ * The function returns the combined block state (true if transmitter
+ * should be blocked) so that drivers need not keep track of the soft
+ * block state -- which they might not be able to.
+ */
+bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
+
+/**
+ * rfkill_set_sw_state - Set the internal rfkill software block state
+ * @rfkill: pointer to the rfkill class to modify.
+ * @state: the current software block state to set
+ *
+ * rfkill drivers that get events when the soft-blocked state changes
+ * (yes, some platforms directly act on input but allow changing again)
+ * use this function to notify the rfkill core (and through that also
+ * userspace) of the current state.
+ *
+ * Drivers should also call this function after resume if the state has
+ * been changed by the user. This only makes sense for "persistent"
+ * devices (see rfkill_init_sw_state()).
+ *
+ * This function can be called in any context, even from within rfkill
+ * callbacks.
+ *
+ * The function returns the combined block state (true if transmitter
+ * should be blocked).
+ */
+bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked);
+
+/**
+ * rfkill_init_sw_state - Initialize persistent software block state
+ * @rfkill: pointer to the rfkill class to modify.
+ * @state: the current software block state to set
+ *
+ * rfkill drivers that preserve their software block state over power off
+ * use this function to notify the rfkill core (and through that also
+ * userspace) of their initial state. It should only be used before
+ * registration.
+ *
+ * In addition, it marks the device as "persistent", an attribute which
+ * can be read by userspace. Persistent devices are expected to preserve
+ * their own state when suspended.
+ */
+void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked);
+
+/**
+ * rfkill_set_states - Set the internal rfkill block states
+ * @rfkill: pointer to the rfkill class to modify.
+ * @sw: the current software block state to set
+ * @hw: the current hardware block state to set
+ *
+ * This function can be called in any context, even from within rfkill
+ * callbacks.
+ */
+void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw);
+
+/**
+ * rfkill_blocked - query rfkill block
+ *
+ * @rfkill: rfkill struct to query
+ */
+bool rfkill_blocked(struct rfkill *rfkill);
+#else /* !RFKILL */
+static inline struct rfkill * __must_check
+rfkill_alloc(const char *name,
+ struct device *parent,
+ const enum rfkill_type type,
+ const struct rfkill_ops *ops,
+ void *ops_data)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline int __must_check rfkill_register(struct rfkill *rfkill)
+{
+ if (rfkill == ERR_PTR(-ENODEV))
+ return 0;
+ return -EINVAL;
+}
+
+static inline void rfkill_pause_polling(struct rfkill *rfkill)
+{
+}
+
+static inline void rfkill_resume_polling(struct rfkill *rfkill)
+{
+}
+
+static inline void rfkill_unregister(struct rfkill *rfkill)
+{
+}
+
+static inline void rfkill_destroy(struct rfkill *rfkill)
+{
+}
+
+static inline bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
+{
+ return blocked;
+}
+
+static inline bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
+{
+ return blocked;
+}
+
+static inline void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
+{
+}
+
+static inline void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
+{
+}
+
+static inline bool rfkill_blocked(struct rfkill *rfkill)
+{
+ return false;
+}
+#endif /* RFKILL || RFKILL_MODULE */
+
+
+#ifdef CONFIG_RFKILL_LEDS
+/**
+ * rfkill_get_led_trigger_name - Get the LED trigger name for the button's LED.
+ * This function might return a NULL pointer if registering of the
+ * LED trigger failed. Use this as "default_trigger" for the LED.
+ */
+const char *rfkill_get_led_trigger_name(struct rfkill *rfkill);
+
+/**
+ * rfkill_set_led_trigger_name -- set the LED trigger name
+ * @rfkill: rfkill struct
+ * @name: LED trigger name
+ *
+ * This function sets the LED trigger name of the radio LED
+ * trigger that rfkill creates. It is optional, but if called
+ * must be called before rfkill_register() to be effective.
+ */
+void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name);
+#else
+static inline const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
+{
+ return NULL;
+}
+
+static inline void
+rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
+{
+}
+#endif
+
+#endif /* RFKILL_H */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
new file mode 100644
index 000000000..843ceca9a
--- /dev/null
+++ b/include/linux/rhashtable.h
@@ -0,0 +1,822 @@
+/*
+ * Resizable, Scalable, Concurrent Hash Table
+ *
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
+ *
+ * Code partially derived from nft_hash
+ * Rewritten with rehash code from br_multicast plus single list
+ * pointer as suggested by Josh Triplett
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_RHASHTABLE_H
+#define _LINUX_RHASHTABLE_H
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/jhash.h>
+#include <linux/list_nulls.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+
+/*
+ * The end of the chain is marked with a special nulls marks which has
+ * the following format:
+ *
+ * +-------+-----------------------------------------------------+-+
+ * | Base | Hash |1|
+ * +-------+-----------------------------------------------------+-+
+ *
+ * Base (4 bits) : Reserved to distinguish between multiple tables.
+ * Specified via &struct rhashtable_params.nulls_base.
+ * Hash (27 bits): Full hash (unmasked) of first element added to bucket
+ * 1 (1 bit) : Nulls marker (always set)
+ *
+ * The remaining bits of the next pointer remain unused for now.
+ */
+#define RHT_BASE_BITS 4
+#define RHT_HASH_BITS 27
+#define RHT_BASE_SHIFT RHT_HASH_BITS
+
+/* Base bits plus 1 bit for nulls marker */
+#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
+
+struct rhash_head {
+ struct rhash_head __rcu *next;
+};
+
+/**
+ * struct bucket_table - Table of hash buckets
+ * @size: Number of hash buckets
+ * @rehash: Current bucket being rehashed
+ * @hash_rnd: Random seed to fold into hash
+ * @locks_mask: Mask to apply before accessing locks[]
+ * @locks: Array of spinlocks protecting individual buckets
+ * @walkers: List of active walkers
+ * @rcu: RCU structure for freeing the table
+ * @future_tbl: Table under construction during rehashing
+ * @buckets: size * hash buckets
+ */
+struct bucket_table {
+ unsigned int size;
+ unsigned int rehash;
+ u32 hash_rnd;
+ unsigned int locks_mask;
+ spinlock_t *locks;
+ struct list_head walkers;
+ struct rcu_head rcu;
+
+ struct bucket_table __rcu *future_tbl;
+
+ struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
+};
+
+/**
+ * struct rhashtable_compare_arg - Key for the function rhashtable_compare
+ * @ht: Hash table
+ * @key: Key to compare against
+ */
+struct rhashtable_compare_arg {
+ struct rhashtable *ht;
+ const void *key;
+};
+
+typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
+typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
+typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
+ const void *obj);
+
+struct rhashtable;
+
+/**
+ * struct rhashtable_params - Hash table construction parameters
+ * @nelem_hint: Hint on number of elements, should be 75% of desired size
+ * @key_len: Length of key
+ * @key_offset: Offset of key in struct to be hashed
+ * @head_offset: Offset of rhash_head in struct to be hashed
+ * @insecure_max_entries: Maximum number of entries (may be exceeded)
+ * @max_size: Maximum size while expanding
+ * @min_size: Minimum size while shrinking
+ * @nulls_base: Base value to generate nulls marker
+ * @insecure_elasticity: Set to true to disable chain length checks
+ * @automatic_shrinking: Enable automatic shrinking of tables
+ * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
+ * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
+ * @obj_hashfn: Function to hash object
+ * @obj_cmpfn: Function to compare key with object
+ */
+struct rhashtable_params {
+ size_t nelem_hint;
+ size_t key_len;
+ size_t key_offset;
+ size_t head_offset;
+ unsigned int insecure_max_entries;
+ unsigned int max_size;
+ unsigned int min_size;
+ u32 nulls_base;
+ bool insecure_elasticity;
+ bool automatic_shrinking;
+ size_t locks_mul;
+ rht_hashfn_t hashfn;
+ rht_obj_hashfn_t obj_hashfn;
+ rht_obj_cmpfn_t obj_cmpfn;
+};
+
+/**
+ * struct rhashtable - Hash table handle
+ * @tbl: Bucket table
+ * @nelems: Number of elements in table
+ * @key_len: Key length for hashfn
+ * @elasticity: Maximum chain length before rehash
+ * @p: Configuration parameters
+ * @run_work: Deferred worker to expand/shrink asynchronously
+ * @mutex: Mutex to protect current/future table swapping
+ * @lock: Spin lock to protect walker list
+ */
+struct rhashtable {
+ struct bucket_table __rcu *tbl;
+ atomic_t nelems;
+ unsigned int key_len;
+ unsigned int elasticity;
+ struct rhashtable_params p;
+ struct work_struct run_work;
+ struct mutex mutex;
+ spinlock_t lock;
+};
+
+/**
+ * struct rhashtable_walker - Hash table walker
+ * @list: List entry on list of walkers
+ * @tbl: The table that we were walking over
+ */
+struct rhashtable_walker {
+ struct list_head list;
+ struct bucket_table *tbl;
+};
+
+/**
+ * struct rhashtable_iter - Hash table iterator, fits into netlink cb
+ * @ht: Table to iterate through
+ * @p: Current pointer
+ * @walker: Associated rhashtable walker
+ * @slot: Current slot
+ * @skip: Number of entries to skip in slot
+ */
+struct rhashtable_iter {
+ struct rhashtable *ht;
+ struct rhash_head *p;
+ struct rhashtable_walker *walker;
+ unsigned int slot;
+ unsigned int skip;
+};
+
+static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
+{
+ return NULLS_MARKER(ht->p.nulls_base + hash);
+}
+
+#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
+ ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
+
+static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
+{
+ return ((unsigned long) ptr & 1);
+}
+
+static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
+{
+ return ((unsigned long) ptr) >> 1;
+}
+
+static inline void *rht_obj(const struct rhashtable *ht,
+ const struct rhash_head *he)
+{
+ return (char *)he - ht->p.head_offset;
+}
+
+static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
+ unsigned int hash)
+{
+ return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
+}
+
+static inline unsigned int rht_key_hashfn(
+ struct rhashtable *ht, const struct bucket_table *tbl,
+ const void *key, const struct rhashtable_params params)
+{
+ unsigned int hash;
+
+ /* params must be equal to ht->p if it isn't constant. */
+ if (!__builtin_constant_p(params.key_len))
+ hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
+ else if (params.key_len) {
+ unsigned int key_len = params.key_len;
+
+ if (params.hashfn)
+ hash = params.hashfn(key, key_len, tbl->hash_rnd);
+ else if (key_len & (sizeof(u32) - 1))
+ hash = jhash(key, key_len, tbl->hash_rnd);
+ else
+ hash = jhash2(key, key_len / sizeof(u32),
+ tbl->hash_rnd);
+ } else {
+ unsigned int key_len = ht->p.key_len;
+
+ if (params.hashfn)
+ hash = params.hashfn(key, key_len, tbl->hash_rnd);
+ else
+ hash = jhash(key, key_len, tbl->hash_rnd);
+ }
+
+ return rht_bucket_index(tbl, hash);
+}
+
+static inline unsigned int rht_head_hashfn(
+ struct rhashtable *ht, const struct bucket_table *tbl,
+ const struct rhash_head *he, const struct rhashtable_params params)
+{
+ const char *ptr = rht_obj(ht, he);
+
+ return likely(params.obj_hashfn) ?
+ rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
+ ht->p.key_len,
+ tbl->hash_rnd)) :
+ rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
+}
+
+/**
+ * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
+ * @ht: hash table
+ * @tbl: current table
+ */
+static inline bool rht_grow_above_75(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
+{
+ /* Expand table when exceeding 75% load */
+ return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
+ (!ht->p.max_size || tbl->size < ht->p.max_size);
+}
+
+/**
+ * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
+ * @ht: hash table
+ * @tbl: current table
+ */
+static inline bool rht_shrink_below_30(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
+{
+ /* Shrink table beneath 30% load */
+ return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
+ tbl->size > ht->p.min_size;
+}
+
+/**
+ * rht_grow_above_100 - returns true if nelems > table-size
+ * @ht: hash table
+ * @tbl: current table
+ */
+static inline bool rht_grow_above_100(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
+{
+ return atomic_read(&ht->nelems) > tbl->size &&
+ (!ht->p.max_size || tbl->size < ht->p.max_size);
+}
+
+/**
+ * rht_grow_above_max - returns true if table is above maximum
+ * @ht: hash table
+ * @tbl: current table
+ */
+static inline bool rht_grow_above_max(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
+{
+ return ht->p.insecure_max_entries &&
+ atomic_read(&ht->nelems) >= ht->p.insecure_max_entries;
+}
+
+/* The bucket lock is selected based on the hash and protects mutations
+ * on a group of hash buckets.
+ *
+ * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
+ * a single lock always covers both buckets which may both contains
+ * entries which link to the same bucket of the old table during resizing.
+ * This allows to simplify the locking as locking the bucket in both
+ * tables during resize always guarantee protection.
+ *
+ * IMPORTANT: When holding the bucket lock of both the old and new table
+ * during expansions and shrinking, the old bucket lock must always be
+ * acquired first.
+ */
+static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
+ unsigned int hash)
+{
+ return &tbl->locks[hash & tbl->locks_mask];
+}
+
+#ifdef CONFIG_PROVE_LOCKING
+int lockdep_rht_mutex_is_held(struct rhashtable *ht);
+int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
+#else
+static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
+{
+ return 1;
+}
+
+static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
+ u32 hash)
+{
+ return 1;
+}
+#endif /* CONFIG_PROVE_LOCKING */
+
+int rhashtable_init(struct rhashtable *ht,
+ const struct rhashtable_params *params);
+
+int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
+ struct rhash_head *obj,
+ struct bucket_table *old_tbl);
+int rhashtable_insert_rehash(struct rhashtable *ht);
+
+int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
+void rhashtable_walk_exit(struct rhashtable_iter *iter);
+int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
+void *rhashtable_walk_next(struct rhashtable_iter *iter);
+void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
+
+void rhashtable_free_and_destroy(struct rhashtable *ht,
+ void (*free_fn)(void *ptr, void *arg),
+ void *arg);
+void rhashtable_destroy(struct rhashtable *ht);
+
+#define rht_dereference(p, ht) \
+ rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
+
+#define rht_dereference_rcu(p, ht) \
+ rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
+
+#define rht_dereference_bucket(p, tbl, hash) \
+ rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
+
+#define rht_dereference_bucket_rcu(p, tbl, hash) \
+ rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
+
+#define rht_entry(tpos, pos, member) \
+ ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
+
+/**
+ * rht_for_each_continue - continue iterating over hash chain
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @head: the previous &struct rhash_head to continue from
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ */
+#define rht_for_each_continue(pos, head, tbl, hash) \
+ for (pos = rht_dereference_bucket(head, tbl, hash); \
+ !rht_is_a_nulls(pos); \
+ pos = rht_dereference_bucket((pos)->next, tbl, hash))
+
+/**
+ * rht_for_each - iterate over hash chain
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ */
+#define rht_for_each(pos, tbl, hash) \
+ rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
+
+/**
+ * rht_for_each_entry_continue - continue iterating over hash chain
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @head: the previous &struct rhash_head to continue from
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
+ */
+#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
+ for (pos = rht_dereference_bucket(head, tbl, hash); \
+ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
+ pos = rht_dereference_bucket((pos)->next, tbl, hash))
+
+/**
+ * rht_for_each_entry - iterate over hash chain of given type
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
+ */
+#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
+ rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
+ tbl, hash, member)
+
+/**
+ * rht_for_each_entry_safe - safely iterate over hash chain of given type
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @next: the &struct rhash_head to use as next in loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
+ *
+ * This hash chain list-traversal primitive allows for the looped code to
+ * remove the loop cursor from the list.
+ */
+#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
+ for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
+ next = !rht_is_a_nulls(pos) ? \
+ rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
+ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
+ pos = next, \
+ next = !rht_is_a_nulls(pos) ? \
+ rht_dereference_bucket(pos->next, tbl, hash) : NULL)
+
+/**
+ * rht_for_each_rcu_continue - continue iterating over rcu hash chain
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @head: the previous &struct rhash_head to continue from
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ *
+ * This hash chain list-traversal primitive may safely run concurrently with
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
+ * traversal is guarded by rcu_read_lock().
+ */
+#define rht_for_each_rcu_continue(pos, head, tbl, hash) \
+ for (({barrier(); }), \
+ pos = rht_dereference_bucket_rcu(head, tbl, hash); \
+ !rht_is_a_nulls(pos); \
+ pos = rcu_dereference_raw(pos->next))
+
+/**
+ * rht_for_each_rcu - iterate over rcu hash chain
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ *
+ * This hash chain list-traversal primitive may safely run concurrently with
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
+ * traversal is guarded by rcu_read_lock().
+ */
+#define rht_for_each_rcu(pos, tbl, hash) \
+ rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
+
+/**
+ * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @head: the previous &struct rhash_head to continue from
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
+ *
+ * This hash chain list-traversal primitive may safely run concurrently with
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
+ * traversal is guarded by rcu_read_lock().
+ */
+#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
+ for (({barrier(); }), \
+ pos = rht_dereference_bucket_rcu(head, tbl, hash); \
+ (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
+ pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
+
+/**
+ * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rhash_head to use as a loop cursor.
+ * @tbl: the &struct bucket_table
+ * @hash: the hash value / bucket index
+ * @member: name of the &struct rhash_head within the hashable struct.
+ *
+ * This hash chain list-traversal primitive may safely run concurrently with
+ * the _rcu mutation primitives such as rhashtable_insert() as long as the
+ * traversal is guarded by rcu_read_lock().
+ */
+#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
+ rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
+ tbl, hash, member)
+
+static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ struct rhashtable *ht = arg->ht;
+ const char *ptr = obj;
+
+ return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
+}
+
+/**
+ * rhashtable_lookup_fast - search hash table, inlined version
+ * @ht: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup_fast(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhashtable_compare_arg arg = {
+ .ht = ht,
+ .key = key,
+ };
+ const struct bucket_table *tbl;
+ struct rhash_head *he;
+ unsigned int hash;
+
+ rcu_read_lock();
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+restart:
+ hash = rht_key_hashfn(ht, tbl, key, params);
+ rht_for_each_rcu(he, tbl, hash) {
+ if (params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, he)) :
+ rhashtable_compare(&arg, rht_obj(ht, he)))
+ continue;
+ rcu_read_unlock();
+ return rht_obj(ht, he);
+ }
+
+ /* Ensure we see any new tables. */
+ smp_rmb();
+
+ tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ if (unlikely(tbl))
+ goto restart;
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+/* Internal function, please use rhashtable_insert_fast() instead */
+static inline int __rhashtable_insert_fast(
+ struct rhashtable *ht, const void *key, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ struct rhashtable_compare_arg arg = {
+ .ht = ht,
+ .key = key,
+ };
+ struct bucket_table *tbl, *new_tbl;
+ struct rhash_head *head;
+ spinlock_t *lock;
+ unsigned int elasticity;
+ unsigned int hash;
+ int err;
+
+restart:
+ rcu_read_lock();
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ /* All insertions must grab the oldest table containing
+ * the hashed bucket that is yet to be rehashed.
+ */
+ for (;;) {
+ hash = rht_head_hashfn(ht, tbl, obj, params);
+ lock = rht_bucket_lock(tbl, hash);
+ spin_lock_bh(lock);
+
+ if (tbl->rehash <= hash)
+ break;
+
+ spin_unlock_bh(lock);
+ tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ }
+
+ new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ if (unlikely(new_tbl)) {
+ err = rhashtable_insert_slow(ht, key, obj, new_tbl);
+ if (err == -EAGAIN)
+ goto slow_path;
+ goto out;
+ }
+
+ err = -E2BIG;
+ if (unlikely(rht_grow_above_max(ht, tbl)))
+ goto out;
+
+ if (unlikely(rht_grow_above_100(ht, tbl))) {
+slow_path:
+ spin_unlock_bh(lock);
+ err = rhashtable_insert_rehash(ht);
+ rcu_read_unlock();
+ if (err)
+ return err;
+
+ goto restart;
+ }
+
+ err = -EEXIST;
+ elasticity = ht->elasticity;
+ rht_for_each(head, tbl, hash) {
+ if (key &&
+ unlikely(!(params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, head)) :
+ rhashtable_compare(&arg, rht_obj(ht, head)))))
+ goto out;
+ if (!--elasticity)
+ goto slow_path;
+ }
+
+ err = 0;
+
+ head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
+
+ RCU_INIT_POINTER(obj->next, head);
+
+ rcu_assign_pointer(tbl->buckets[hash], obj);
+
+ atomic_inc(&ht->nelems);
+ if (rht_grow_above_75(ht, tbl))
+ schedule_work(&ht->run_work);
+
+out:
+ spin_unlock_bh(lock);
+ rcu_read_unlock();
+
+ return err;
+}
+
+/**
+ * rhashtable_insert_fast - insert object into hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhashtable_insert_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ return __rhashtable_insert_fast(ht, NULL, obj, params);
+}
+
+/**
+ * rhashtable_lookup_insert_fast - lookup and insert object into hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Locks down the bucket chain in both the old and new table if a resize
+ * is in progress to ensure that writers can't remove from the old table
+ * and can't insert to the new table during the atomic operation of search
+ * and insertion. Searches for duplicates in both the old and new table if
+ * a resize is in progress.
+ *
+ * This lookup function may only be used for fixed key hash table (key_len
+ * parameter set). It will BUG() if used inappropriately.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhashtable_lookup_insert_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ const char *key = rht_obj(ht, obj);
+
+ BUG_ON(ht->p.obj_hashfn);
+
+ return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
+ params);
+}
+
+/**
+ * rhashtable_lookup_insert_key - search and insert object to hash table
+ * with explicit key
+ * @ht: hash table
+ * @key: key
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Locks down the bucket chain in both the old and new table if a resize
+ * is in progress to ensure that writers can't remove from the old table
+ * and can't insert to the new table during the atomic operation of search
+ * and insertion. Searches for duplicates in both the old and new table if
+ * a resize is in progress.
+ *
+ * Lookups may occur in parallel with hashtable mutations and resizing.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ *
+ * Returns zero on success.
+ */
+static inline int rhashtable_lookup_insert_key(
+ struct rhashtable *ht, const void *key, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ BUG_ON(!ht->p.obj_hashfn || !key);
+
+ return __rhashtable_insert_fast(ht, key, obj, params);
+}
+
+/* Internal function, please use rhashtable_remove_fast() instead */
+static inline int __rhashtable_remove_fast(
+ struct rhashtable *ht, struct bucket_table *tbl,
+ struct rhash_head *obj, const struct rhashtable_params params)
+{
+ struct rhash_head __rcu **pprev;
+ struct rhash_head *he;
+ spinlock_t * lock;
+ unsigned int hash;
+ int err = -ENOENT;
+
+ hash = rht_head_hashfn(ht, tbl, obj, params);
+ lock = rht_bucket_lock(tbl, hash);
+
+ spin_lock_bh(lock);
+
+ pprev = &tbl->buckets[hash];
+ rht_for_each(he, tbl, hash) {
+ if (he != obj) {
+ pprev = &he->next;
+ continue;
+ }
+
+ rcu_assign_pointer(*pprev, obj->next);
+ err = 0;
+ break;
+ }
+
+ spin_unlock_bh(lock);
+
+ return err;
+}
+
+/**
+ * rhashtable_remove_fast - remove object from hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found.
+ */
+static inline int rhashtable_remove_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ struct bucket_table *tbl;
+ int err;
+
+ rcu_read_lock();
+
+ tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ /* Because we have already taken (and released) the bucket
+ * lock in old_tbl, if we find that future_tbl is not yet
+ * visible then that guarantees the entry to still be in
+ * the old tbl if it exists.
+ */
+ while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
+ (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
+ ;
+
+ if (err)
+ goto out;
+
+ atomic_dec(&ht->nelems);
+ if (unlikely(ht->p.automatic_shrinking &&
+ rht_shrink_below_30(ht, tbl)))
+ schedule_work(&ht->run_work);
+
+out:
+ rcu_read_unlock();
+
+ return err;
+}
+
+#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
new file mode 100644
index 000000000..e2c13cd86
--- /dev/null
+++ b/include/linux/ring_buffer.h
@@ -0,0 +1,201 @@
+#ifndef _LINUX_RING_BUFFER_H
+#define _LINUX_RING_BUFFER_H
+
+#include <linux/kmemcheck.h>
+#include <linux/mm.h>
+#include <linux/seq_file.h>
+#include <linux/poll.h>
+
+struct ring_buffer;
+struct ring_buffer_iter;
+
+/*
+ * Don't refer to this struct directly, use functions below.
+ */
+struct ring_buffer_event {
+ kmemcheck_bitfield_begin(bitfield);
+ u32 type_len:5, time_delta:27;
+ kmemcheck_bitfield_end(bitfield);
+
+ u32 array[];
+};
+
+/**
+ * enum ring_buffer_type - internal ring buffer types
+ *
+ * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event
+ * If time_delta is 0:
+ * array is ignored
+ * size is variable depending on how much
+ * padding is needed
+ * If time_delta is non zero:
+ * array[0] holds the actual length
+ * size = 4 + length (bytes)
+ *
+ * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
+ * array[0] = time delta (28 .. 59)
+ * size = 8 bytes
+ *
+ * @RINGBUF_TYPE_TIME_STAMP: Sync time stamp with external clock
+ * array[0] = tv_nsec
+ * array[1..2] = tv_sec
+ * size = 16 bytes
+ *
+ * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX:
+ * Data record
+ * If type_len is zero:
+ * array[0] holds the actual length
+ * array[1..(length+3)/4] holds data
+ * size = 4 + length (bytes)
+ * else
+ * length = type_len << 2
+ * array[0..(length+3)/4-1] holds data
+ * size = 4 + length (bytes)
+ */
+enum ring_buffer_type {
+ RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
+ RINGBUF_TYPE_PADDING,
+ RINGBUF_TYPE_TIME_EXTEND,
+ /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */
+ RINGBUF_TYPE_TIME_STAMP,
+};
+
+unsigned ring_buffer_event_length(struct ring_buffer_event *event);
+void *ring_buffer_event_data(struct ring_buffer_event *event);
+
+/*
+ * ring_buffer_discard_commit will remove an event that has not
+ * ben committed yet. If this is used, then ring_buffer_unlock_commit
+ * must not be called on the discarded event. This function
+ * will try to remove the event from the ring buffer completely
+ * if another event has not been written after it.
+ *
+ * Example use:
+ *
+ * if (some_condition)
+ * ring_buffer_discard_commit(buffer, event);
+ * else
+ * ring_buffer_unlock_commit(buffer, event);
+ */
+void ring_buffer_discard_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event);
+
+/*
+ * size is in bytes for each per CPU buffer.
+ */
+struct ring_buffer *
+__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
+
+/*
+ * Because the ring buffer is generic, if other users of the ring buffer get
+ * traced by ftrace, it can produce lockdep warnings. We need to keep each
+ * ring buffer's lock class separate.
+ */
+#define ring_buffer_alloc(size, flags) \
+({ \
+ static struct lock_class_key __key; \
+ __ring_buffer_alloc((size), (flags), &__key); \
+})
+
+int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
+int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+ struct file *filp, poll_table *poll_table);
+
+
+#define RING_BUFFER_ALL_CPUS -1
+
+void ring_buffer_free(struct ring_buffer *buffer);
+
+int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu);
+
+void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val);
+
+struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
+ unsigned long length);
+int ring_buffer_unlock_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event);
+int ring_buffer_write(struct ring_buffer *buffer,
+ unsigned long length, void *data);
+
+struct ring_buffer_event *
+ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
+ unsigned long *lost_events);
+struct ring_buffer_event *
+ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
+ unsigned long *lost_events);
+
+struct ring_buffer_iter *
+ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
+void ring_buffer_read_prepare_sync(void);
+void ring_buffer_read_start(struct ring_buffer_iter *iter);
+void ring_buffer_read_finish(struct ring_buffer_iter *iter);
+
+struct ring_buffer_event *
+ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
+struct ring_buffer_event *
+ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
+void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
+int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
+
+unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu);
+
+void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
+void ring_buffer_reset(struct ring_buffer *buffer);
+
+#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
+int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
+ struct ring_buffer *buffer_b, int cpu);
+#else
+static inline int
+ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
+ struct ring_buffer *buffer_b, int cpu)
+{
+ return -ENODEV;
+}
+#endif
+
+int ring_buffer_empty(struct ring_buffer *buffer);
+int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
+
+void ring_buffer_record_disable(struct ring_buffer *buffer);
+void ring_buffer_record_enable(struct ring_buffer *buffer);
+void ring_buffer_record_off(struct ring_buffer *buffer);
+void ring_buffer_record_on(struct ring_buffer *buffer);
+int ring_buffer_record_is_on(struct ring_buffer *buffer);
+void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
+void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
+
+u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_entries(struct ring_buffer *buffer);
+unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
+unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu);
+
+u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
+void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
+ int cpu, u64 *ts);
+void ring_buffer_set_clock(struct ring_buffer *buffer,
+ u64 (*clock)(void));
+
+size_t ring_buffer_page_len(void *page);
+
+
+void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
+void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
+int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
+ size_t len, int cpu, int full);
+
+struct trace_seq;
+
+int ring_buffer_print_entry_header(struct trace_seq *s);
+int ring_buffer_print_page_header(struct trace_seq *s);
+
+enum ring_buffer_flags {
+ RB_FL_OVERWRITE = 1 << 0,
+};
+
+#endif /* _LINUX_RING_BUFFER_H */
diff --git a/include/linux/rio.h b/include/linux/rio.h
new file mode 100644
index 000000000..6bda06f21
--- /dev/null
+++ b/include/linux/rio.h
@@ -0,0 +1,485 @@
+/*
+ * RapidIO interconnect services
+ * (RapidIO Interconnect Specification, http://www.rapidio.org)
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef LINUX_RIO_H
+#define LINUX_RIO_H
+
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/rio_regs.h>
+#include <linux/mod_devicetable.h>
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+#include <linux/dmaengine.h>
+#endif
+
+#define RIO_NO_HOPCOUNT -1
+#define RIO_INVALID_DESTID 0xffff
+
+#define RIO_MAX_MPORTS 8
+#define RIO_MAX_MPORT_RESOURCES 16
+#define RIO_MAX_DEV_RESOURCES 16
+#define RIO_MAX_MPORT_NAME 40
+
+#define RIO_GLOBAL_TABLE 0xff /* Indicates access of a switch's
+ global routing table if it
+ has multiple (or per port)
+ tables */
+
+#define RIO_INVALID_ROUTE 0xff /* Indicates that a route table
+ entry is invalid (no route
+ exists for the device ID) */
+
+#define RIO_MAX_ROUTE_ENTRIES(size) (size ? (1 << 16) : (1 << 8))
+#define RIO_ANY_DESTID(size) (size ? 0xffff : 0xff)
+
+#define RIO_MAX_MBOX 4
+#define RIO_MAX_MSG_SIZE 0x1000
+
+/*
+ * Error values that may be returned by RIO functions.
+ */
+#define RIO_SUCCESSFUL 0x00
+#define RIO_BAD_SIZE 0x81
+
+/*
+ * For RIO devices, the region numbers are assigned this way:
+ *
+ * 0 RapidIO outbound doorbells
+ * 1-15 RapidIO memory regions
+ *
+ * For RIO master ports, the region number are assigned this way:
+ *
+ * 0 RapidIO inbound doorbells
+ * 1 RapidIO inbound mailboxes
+ * 2 RapidIO outbound mailboxes
+ */
+#define RIO_DOORBELL_RESOURCE 0
+#define RIO_INB_MBOX_RESOURCE 1
+#define RIO_OUTB_MBOX_RESOURCE 2
+
+#define RIO_PW_MSG_SIZE 64
+
+/*
+ * A component tag value (stored in the component tag CSR) is used as device's
+ * unique identifier assigned during enumeration. Besides being used for
+ * identifying switches (which do not have device ID register), it also is used
+ * by error management notification and therefore has to be assigned
+ * to endpoints as well.
+ */
+#define RIO_CTAG_RESRVD 0xfffe0000 /* Reserved */
+#define RIO_CTAG_UDEVID 0x0001ffff /* Unique device identifier */
+
+extern struct bus_type rio_bus_type;
+extern struct class rio_mport_class;
+
+struct rio_mport;
+struct rio_dev;
+union rio_pw_msg;
+
+/**
+ * struct rio_switch - RIO switch info
+ * @node: Node in global list of switches
+ * @route_table: Copy of switch routing table
+ * @port_ok: Status of each port (one bit per port) - OK=1 or UNINIT=0
+ * @ops: pointer to switch-specific operations
+ * @lock: lock to serialize operations updates
+ * @nextdev: Array of per-port pointers to the next attached device
+ */
+struct rio_switch {
+ struct list_head node;
+ u8 *route_table;
+ u32 port_ok;
+ struct rio_switch_ops *ops;
+ spinlock_t lock;
+ struct rio_dev *nextdev[0];
+};
+
+/**
+ * struct rio_switch_ops - Per-switch operations
+ * @owner: The module owner of this structure
+ * @add_entry: Callback for switch-specific route add function
+ * @get_entry: Callback for switch-specific route get function
+ * @clr_table: Callback for switch-specific clear route table function
+ * @set_domain: Callback for switch-specific domain setting function
+ * @get_domain: Callback for switch-specific domain get function
+ * @em_init: Callback for switch-specific error management init function
+ * @em_handle: Callback for switch-specific error management handler function
+ *
+ * Defines the operations that are necessary to initialize/control
+ * a particular RIO switch device.
+ */
+struct rio_switch_ops {
+ struct module *owner;
+ int (*add_entry) (struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table, u16 route_destid, u8 route_port);
+ int (*get_entry) (struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table, u16 route_destid, u8 *route_port);
+ int (*clr_table) (struct rio_mport *mport, u16 destid, u8 hopcount,
+ u16 table);
+ int (*set_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
+ u8 sw_domain);
+ int (*get_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
+ u8 *sw_domain);
+ int (*em_init) (struct rio_dev *dev);
+ int (*em_handle) (struct rio_dev *dev, u8 swport);
+};
+
+/**
+ * struct rio_dev - RIO device info
+ * @global_list: Node in list of all RIO devices
+ * @net_list: Node in list of RIO devices in a network
+ * @net: Network this device is a part of
+ * @do_enum: Enumeration flag
+ * @did: Device ID
+ * @vid: Vendor ID
+ * @device_rev: Device revision
+ * @asm_did: Assembly device ID
+ * @asm_vid: Assembly vendor ID
+ * @asm_rev: Assembly revision
+ * @efptr: Extended feature pointer
+ * @pef: Processing element features
+ * @swpinfo: Switch port info
+ * @src_ops: Source operation capabilities
+ * @dst_ops: Destination operation capabilities
+ * @comp_tag: RIO component tag
+ * @phys_efptr: RIO device extended features pointer
+ * @em_efptr: RIO Error Management features pointer
+ * @dma_mask: Mask of bits of RIO address this device implements
+ * @driver: Driver claiming this device
+ * @dev: Device model device
+ * @riores: RIO resources this device owns
+ * @pwcback: port-write callback function for this device
+ * @destid: Network destination ID (or associated destid for switch)
+ * @hopcount: Hopcount to this device
+ * @prev: Previous RIO device connected to the current one
+ * @rswitch: struct rio_switch (if valid for this device)
+ */
+struct rio_dev {
+ struct list_head global_list; /* node in list of all RIO devices */
+ struct list_head net_list; /* node in per net list */
+ struct rio_net *net; /* RIO net this device resides in */
+ bool do_enum;
+ u16 did;
+ u16 vid;
+ u32 device_rev;
+ u16 asm_did;
+ u16 asm_vid;
+ u16 asm_rev;
+ u16 efptr;
+ u32 pef;
+ u32 swpinfo;
+ u32 src_ops;
+ u32 dst_ops;
+ u32 comp_tag;
+ u32 phys_efptr;
+ u32 em_efptr;
+ u64 dma_mask;
+ struct rio_driver *driver; /* RIO driver claiming this device */
+ struct device dev; /* LDM device structure */
+ struct resource riores[RIO_MAX_DEV_RESOURCES];
+ int (*pwcback) (struct rio_dev *rdev, union rio_pw_msg *msg, int step);
+ u16 destid;
+ u8 hopcount;
+ struct rio_dev *prev;
+ struct rio_switch rswitch[0]; /* RIO switch info */
+};
+
+#define rio_dev_g(n) list_entry(n, struct rio_dev, global_list)
+#define rio_dev_f(n) list_entry(n, struct rio_dev, net_list)
+#define to_rio_dev(n) container_of(n, struct rio_dev, dev)
+#define sw_to_rio_dev(n) container_of(n, struct rio_dev, rswitch[0])
+#define to_rio_mport(n) container_of(n, struct rio_mport, dev)
+
+/**
+ * struct rio_msg - RIO message event
+ * @res: Mailbox resource
+ * @mcback: Message event callback
+ */
+struct rio_msg {
+ struct resource *res;
+ void (*mcback) (struct rio_mport * mport, void *dev_id, int mbox, int slot);
+};
+
+/**
+ * struct rio_dbell - RIO doorbell event
+ * @node: Node in list of doorbell events
+ * @res: Doorbell resource
+ * @dinb: Doorbell event callback
+ * @dev_id: Device specific pointer to pass on event
+ */
+struct rio_dbell {
+ struct list_head node;
+ struct resource *res;
+ void (*dinb) (struct rio_mport *mport, void *dev_id, u16 src, u16 dst, u16 info);
+ void *dev_id;
+};
+
+enum rio_phy_type {
+ RIO_PHY_PARALLEL,
+ RIO_PHY_SERIAL,
+};
+
+/**
+ * struct rio_mport - RIO master port info
+ * @dbells: List of doorbell events
+ * @node: Node in global list of master ports
+ * @nnode: Node in network list of master ports
+ * @iores: I/O mem resource that this master port interface owns
+ * @riores: RIO resources that this master port interfaces owns
+ * @inb_msg: RIO inbound message event descriptors
+ * @outb_msg: RIO outbound message event descriptors
+ * @host_deviceid: Host device ID associated with this master port
+ * @ops: configuration space functions
+ * @id: Port ID, unique among all ports
+ * @index: Port index, unique among all port interfaces of the same type
+ * @sys_size: RapidIO common transport system size
+ * @phy_type: RapidIO phy type
+ * @phys_efptr: RIO port extended features pointer
+ * @name: Port name string
+ * @dev: device structure associated with an mport
+ * @priv: Master port private data
+ * @dma: DMA device associated with mport
+ * @nscan: RapidIO network enumeration/discovery operations
+ */
+struct rio_mport {
+ struct list_head dbells; /* list of doorbell events */
+ struct list_head node; /* node in global list of ports */
+ struct list_head nnode; /* node in net list of ports */
+ struct resource iores;
+ struct resource riores[RIO_MAX_MPORT_RESOURCES];
+ struct rio_msg inb_msg[RIO_MAX_MBOX];
+ struct rio_msg outb_msg[RIO_MAX_MBOX];
+ int host_deviceid; /* Host device ID */
+ struct rio_ops *ops; /* low-level architecture-dependent routines */
+ unsigned char id; /* port ID, unique among all ports */
+ unsigned char index; /* port index, unique among all port
+ interfaces of the same type */
+ unsigned int sys_size; /* RapidIO common transport system size.
+ * 0 - Small size. 256 devices.
+ * 1 - Large size, 65536 devices.
+ */
+ enum rio_phy_type phy_type; /* RapidIO phy type */
+ u32 phys_efptr;
+ unsigned char name[RIO_MAX_MPORT_NAME];
+ struct device dev;
+ void *priv; /* Master port private data */
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ struct dma_device dma;
+#endif
+ struct rio_scan *nscan;
+};
+
+/*
+ * Enumeration/discovery control flags
+ */
+#define RIO_SCAN_ENUM_NO_WAIT 0x00000001 /* Do not wait for enum completed */
+
+struct rio_id_table {
+ u16 start; /* logical minimal id */
+ u32 max; /* max number of IDs in table */
+ spinlock_t lock;
+ unsigned long *table;
+};
+
+/**
+ * struct rio_net - RIO network info
+ * @node: Node in global list of RIO networks
+ * @devices: List of devices in this network
+ * @switches: List of switches in this netowrk
+ * @mports: List of master ports accessing this network
+ * @hport: Default port for accessing this network
+ * @id: RIO network ID
+ * @destid_table: destID allocation table
+ */
+struct rio_net {
+ struct list_head node; /* node in list of networks */
+ struct list_head devices; /* list of devices in this net */
+ struct list_head switches; /* list of switches in this net */
+ struct list_head mports; /* list of ports accessing net */
+ struct rio_mport *hport; /* primary port for accessing net */
+ unsigned char id; /* RIO network ID */
+ struct rio_id_table destid_table; /* destID allocation table */
+};
+
+/* Low-level architecture-dependent routines */
+
+/**
+ * struct rio_ops - Low-level RIO configuration space operations
+ * @lcread: Callback to perform local (master port) read of config space.
+ * @lcwrite: Callback to perform local (master port) write of config space.
+ * @cread: Callback to perform network read of config space.
+ * @cwrite: Callback to perform network write of config space.
+ * @dsend: Callback to send a doorbell message.
+ * @pwenable: Callback to enable/disable port-write message handling.
+ * @open_outb_mbox: Callback to initialize outbound mailbox.
+ * @close_outb_mbox: Callback to shut down outbound mailbox.
+ * @open_inb_mbox: Callback to initialize inbound mailbox.
+ * @close_inb_mbox: Callback to shut down inbound mailbox.
+ * @add_outb_message: Callback to add a message to an outbound mailbox queue.
+ * @add_inb_buffer: Callback to add a buffer to an inbound mailbox queue.
+ * @get_inb_message: Callback to get a message from an inbound mailbox queue.
+ * @map_inb: Callback to map RapidIO address region into local memory space.
+ * @unmap_inb: Callback to unmap RapidIO address region mapped with map_inb().
+ */
+struct rio_ops {
+ int (*lcread) (struct rio_mport *mport, int index, u32 offset, int len,
+ u32 *data);
+ int (*lcwrite) (struct rio_mport *mport, int index, u32 offset, int len,
+ u32 data);
+ int (*cread) (struct rio_mport *mport, int index, u16 destid,
+ u8 hopcount, u32 offset, int len, u32 *data);
+ int (*cwrite) (struct rio_mport *mport, int index, u16 destid,
+ u8 hopcount, u32 offset, int len, u32 data);
+ int (*dsend) (struct rio_mport *mport, int index, u16 destid, u16 data);
+ int (*pwenable) (struct rio_mport *mport, int enable);
+ int (*open_outb_mbox)(struct rio_mport *mport, void *dev_id,
+ int mbox, int entries);
+ void (*close_outb_mbox)(struct rio_mport *mport, int mbox);
+ int (*open_inb_mbox)(struct rio_mport *mport, void *dev_id,
+ int mbox, int entries);
+ void (*close_inb_mbox)(struct rio_mport *mport, int mbox);
+ int (*add_outb_message)(struct rio_mport *mport, struct rio_dev *rdev,
+ int mbox, void *buffer, size_t len);
+ int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
+ void *(*get_inb_message)(struct rio_mport *mport, int mbox);
+ int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
+ u64 rstart, u32 size, u32 flags);
+ void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
+};
+
+#define RIO_RESOURCE_MEM 0x00000100
+#define RIO_RESOURCE_DOORBELL 0x00000200
+#define RIO_RESOURCE_MAILBOX 0x00000400
+
+#define RIO_RESOURCE_CACHEABLE 0x00010000
+#define RIO_RESOURCE_PCI 0x00020000
+
+#define RIO_RESOURCE_BUSY 0x80000000
+
+/**
+ * struct rio_driver - RIO driver info
+ * @node: Node in list of drivers
+ * @name: RIO driver name
+ * @id_table: RIO device ids to be associated with this driver
+ * @probe: RIO device inserted
+ * @remove: RIO device removed
+ * @suspend: RIO device suspended
+ * @resume: RIO device awakened
+ * @enable_wake: RIO device enable wake event
+ * @driver: LDM driver struct
+ *
+ * Provides info on a RIO device driver for insertion/removal and
+ * power management purposes.
+ */
+struct rio_driver {
+ struct list_head node;
+ char *name;
+ const struct rio_device_id *id_table;
+ int (*probe) (struct rio_dev * dev, const struct rio_device_id * id);
+ void (*remove) (struct rio_dev * dev);
+ int (*suspend) (struct rio_dev * dev, u32 state);
+ int (*resume) (struct rio_dev * dev);
+ int (*enable_wake) (struct rio_dev * dev, u32 state, int enable);
+ struct device_driver driver;
+};
+
+#define to_rio_driver(drv) container_of(drv,struct rio_driver, driver)
+
+union rio_pw_msg {
+ struct {
+ u32 comptag; /* Component Tag CSR */
+ u32 errdetect; /* Port N Error Detect CSR */
+ u32 is_port; /* Implementation specific + PortID */
+ u32 ltlerrdet; /* LTL Error Detect CSR */
+ u32 padding[12];
+ } em;
+ u32 raw[RIO_PW_MSG_SIZE/sizeof(u32)];
+};
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+/*
+ * enum rio_write_type - RIO write transaction types used in DMA transfers
+ *
+ * Note: RapidIO specification defines write (NWRITE) and
+ * write-with-response (NWRITE_R) data transfer operations.
+ * Existing DMA controllers that service RapidIO may use one of these operations
+ * for entire data transfer or their combination with only the last data packet
+ * requires response.
+ */
+enum rio_write_type {
+ RDW_DEFAULT, /* default method used by DMA driver */
+ RDW_ALL_NWRITE, /* all packets use NWRITE */
+ RDW_ALL_NWRITE_R, /* all packets use NWRITE_R */
+ RDW_LAST_NWRITE_R, /* last packet uses NWRITE_R, others - NWRITE */
+};
+
+struct rio_dma_ext {
+ u16 destid;
+ u64 rio_addr; /* low 64-bits of 66-bit RapidIO address */
+ u8 rio_addr_u; /* upper 2-bits of 66-bit RapidIO address */
+ enum rio_write_type wr_type; /* preferred RIO write operation type */
+};
+
+struct rio_dma_data {
+ /* Local data (as scatterlist) */
+ struct scatterlist *sg; /* I/O scatter list */
+ unsigned int sg_len; /* size of scatter list */
+ /* Remote device address (flat buffer) */
+ u64 rio_addr; /* low 64-bits of 66-bit RapidIO address */
+ u8 rio_addr_u; /* upper 2-bits of 66-bit RapidIO address */
+ enum rio_write_type wr_type; /* preferred RIO write operation type */
+};
+
+static inline struct rio_mport *dma_to_mport(struct dma_device *ddev)
+{
+ return container_of(ddev, struct rio_mport, dma);
+}
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
+/**
+ * struct rio_scan - RIO enumeration and discovery operations
+ * @owner: The module owner of this structure
+ * @enumerate: Callback to perform RapidIO fabric enumeration.
+ * @discover: Callback to perform RapidIO fabric discovery.
+ */
+struct rio_scan {
+ struct module *owner;
+ int (*enumerate)(struct rio_mport *mport, u32 flags);
+ int (*discover)(struct rio_mport *mport, u32 flags);
+};
+
+/**
+ * struct rio_scan_node - list node to register RapidIO enumeration and
+ * discovery methods with RapidIO core.
+ * @mport_id: ID of an mport (net) serviced by this enumerator
+ * @node: node in global list of registered enumerators
+ * @ops: RIO enumeration and discovery operations
+ */
+struct rio_scan_node {
+ int mport_id;
+ struct list_head node;
+ struct rio_scan *ops;
+};
+
+/* Architecture and hardware-specific functions */
+extern int rio_register_mport(struct rio_mport *);
+extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int);
+extern void rio_close_inb_mbox(struct rio_mport *, int);
+extern int rio_open_outb_mbox(struct rio_mport *, void *, int, int);
+extern void rio_close_outb_mbox(struct rio_mport *, int);
+
+#endif /* LINUX_RIO_H */
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
new file mode 100644
index 000000000..9fc2f213e
--- /dev/null
+++ b/include/linux/rio_drv.h
@@ -0,0 +1,443 @@
+/*
+ * RapidIO driver services
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef LINUX_RIO_DRV_H
+#define LINUX_RIO_DRV_H
+
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/rio.h>
+
+extern int __rio_local_read_config_32(struct rio_mport *port, u32 offset,
+ u32 * data);
+extern int __rio_local_write_config_32(struct rio_mport *port, u32 offset,
+ u32 data);
+extern int __rio_local_read_config_16(struct rio_mport *port, u32 offset,
+ u16 * data);
+extern int __rio_local_write_config_16(struct rio_mport *port, u32 offset,
+ u16 data);
+extern int __rio_local_read_config_8(struct rio_mport *port, u32 offset,
+ u8 * data);
+extern int __rio_local_write_config_8(struct rio_mport *port, u32 offset,
+ u8 data);
+
+extern int rio_mport_read_config_32(struct rio_mport *port, u16 destid,
+ u8 hopcount, u32 offset, u32 * data);
+extern int rio_mport_write_config_32(struct rio_mport *port, u16 destid,
+ u8 hopcount, u32 offset, u32 data);
+extern int rio_mport_read_config_16(struct rio_mport *port, u16 destid,
+ u8 hopcount, u32 offset, u16 * data);
+extern int rio_mport_write_config_16(struct rio_mport *port, u16 destid,
+ u8 hopcount, u32 offset, u16 data);
+extern int rio_mport_read_config_8(struct rio_mport *port, u16 destid,
+ u8 hopcount, u32 offset, u8 * data);
+extern int rio_mport_write_config_8(struct rio_mport *port, u16 destid,
+ u8 hopcount, u32 offset, u8 data);
+
+/**
+ * rio_local_read_config_32 - Read 32 bits from local configuration space
+ * @port: Master port
+ * @offset: Offset into local configuration space
+ * @data: Pointer to read data into
+ *
+ * Reads 32 bits of data from the specified offset within the local
+ * device's configuration space.
+ */
+static inline int rio_local_read_config_32(struct rio_mport *port, u32 offset,
+ u32 * data)
+{
+ return __rio_local_read_config_32(port, offset, data);
+}
+
+/**
+ * rio_local_write_config_32 - Write 32 bits to local configuration space
+ * @port: Master port
+ * @offset: Offset into local configuration space
+ * @data: Data to be written
+ *
+ * Writes 32 bits of data to the specified offset within the local
+ * device's configuration space.
+ */
+static inline int rio_local_write_config_32(struct rio_mport *port, u32 offset,
+ u32 data)
+{
+ return __rio_local_write_config_32(port, offset, data);
+}
+
+/**
+ * rio_local_read_config_16 - Read 16 bits from local configuration space
+ * @port: Master port
+ * @offset: Offset into local configuration space
+ * @data: Pointer to read data into
+ *
+ * Reads 16 bits of data from the specified offset within the local
+ * device's configuration space.
+ */
+static inline int rio_local_read_config_16(struct rio_mport *port, u32 offset,
+ u16 * data)
+{
+ return __rio_local_read_config_16(port, offset, data);
+}
+
+/**
+ * rio_local_write_config_16 - Write 16 bits to local configuration space
+ * @port: Master port
+ * @offset: Offset into local configuration space
+ * @data: Data to be written
+ *
+ * Writes 16 bits of data to the specified offset within the local
+ * device's configuration space.
+ */
+
+static inline int rio_local_write_config_16(struct rio_mport *port, u32 offset,
+ u16 data)
+{
+ return __rio_local_write_config_16(port, offset, data);
+}
+
+/**
+ * rio_local_read_config_8 - Read 8 bits from local configuration space
+ * @port: Master port
+ * @offset: Offset into local configuration space
+ * @data: Pointer to read data into
+ *
+ * Reads 8 bits of data from the specified offset within the local
+ * device's configuration space.
+ */
+static inline int rio_local_read_config_8(struct rio_mport *port, u32 offset,
+ u8 * data)
+{
+ return __rio_local_read_config_8(port, offset, data);
+}
+
+/**
+ * rio_local_write_config_8 - Write 8 bits to local configuration space
+ * @port: Master port
+ * @offset: Offset into local configuration space
+ * @data: Data to be written
+ *
+ * Writes 8 bits of data to the specified offset within the local
+ * device's configuration space.
+ */
+static inline int rio_local_write_config_8(struct rio_mport *port, u32 offset,
+ u8 data)
+{
+ return __rio_local_write_config_8(port, offset, data);
+}
+
+/**
+ * rio_read_config_32 - Read 32 bits from configuration space
+ * @rdev: RIO device
+ * @offset: Offset into device configuration space
+ * @data: Pointer to read data into
+ *
+ * Reads 32 bits of data from the specified offset within the
+ * RIO device's configuration space.
+ */
+static inline int rio_read_config_32(struct rio_dev *rdev, u32 offset,
+ u32 * data)
+{
+ return rio_mport_read_config_32(rdev->net->hport, rdev->destid,
+ rdev->hopcount, offset, data);
+};
+
+/**
+ * rio_write_config_32 - Write 32 bits to configuration space
+ * @rdev: RIO device
+ * @offset: Offset into device configuration space
+ * @data: Data to be written
+ *
+ * Writes 32 bits of data to the specified offset within the
+ * RIO device's configuration space.
+ */
+static inline int rio_write_config_32(struct rio_dev *rdev, u32 offset,
+ u32 data)
+{
+ return rio_mport_write_config_32(rdev->net->hport, rdev->destid,
+ rdev->hopcount, offset, data);
+};
+
+/**
+ * rio_read_config_16 - Read 16 bits from configuration space
+ * @rdev: RIO device
+ * @offset: Offset into device configuration space
+ * @data: Pointer to read data into
+ *
+ * Reads 16 bits of data from the specified offset within the
+ * RIO device's configuration space.
+ */
+static inline int rio_read_config_16(struct rio_dev *rdev, u32 offset,
+ u16 * data)
+{
+ return rio_mport_read_config_16(rdev->net->hport, rdev->destid,
+ rdev->hopcount, offset, data);
+};
+
+/**
+ * rio_write_config_16 - Write 16 bits to configuration space
+ * @rdev: RIO device
+ * @offset: Offset into device configuration space
+ * @data: Data to be written
+ *
+ * Writes 16 bits of data to the specified offset within the
+ * RIO device's configuration space.
+ */
+static inline int rio_write_config_16(struct rio_dev *rdev, u32 offset,
+ u16 data)
+{
+ return rio_mport_write_config_16(rdev->net->hport, rdev->destid,
+ rdev->hopcount, offset, data);
+};
+
+/**
+ * rio_read_config_8 - Read 8 bits from configuration space
+ * @rdev: RIO device
+ * @offset: Offset into device configuration space
+ * @data: Pointer to read data into
+ *
+ * Reads 8 bits of data from the specified offset within the
+ * RIO device's configuration space.
+ */
+static inline int rio_read_config_8(struct rio_dev *rdev, u32 offset, u8 * data)
+{
+ return rio_mport_read_config_8(rdev->net->hport, rdev->destid,
+ rdev->hopcount, offset, data);
+};
+
+/**
+ * rio_write_config_8 - Write 8 bits to configuration space
+ * @rdev: RIO device
+ * @offset: Offset into device configuration space
+ * @data: Data to be written
+ *
+ * Writes 8 bits of data to the specified offset within the
+ * RIO device's configuration space.
+ */
+static inline int rio_write_config_8(struct rio_dev *rdev, u32 offset, u8 data)
+{
+ return rio_mport_write_config_8(rdev->net->hport, rdev->destid,
+ rdev->hopcount, offset, data);
+};
+
+extern int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid,
+ u16 data);
+
+/**
+ * rio_send_doorbell - Send a doorbell message to a device
+ * @rdev: RIO device
+ * @data: Doorbell message data
+ *
+ * Send a doorbell message to a RIO device. The doorbell message
+ * has a 16-bit info field provided by the @data argument.
+ */
+static inline int rio_send_doorbell(struct rio_dev *rdev, u16 data)
+{
+ return rio_mport_send_doorbell(rdev->net->hport, rdev->destid, data);
+};
+
+/**
+ * rio_init_mbox_res - Initialize a RIO mailbox resource
+ * @res: resource struct
+ * @start: start of mailbox range
+ * @end: end of mailbox range
+ *
+ * This function is used to initialize the fields of a resource
+ * for use as a mailbox resource. It initializes a range of
+ * mailboxes using the start and end arguments.
+ */
+static inline void rio_init_mbox_res(struct resource *res, int start, int end)
+{
+ memset(res, 0, sizeof(struct resource));
+ res->start = start;
+ res->end = end;
+ res->flags = RIO_RESOURCE_MAILBOX;
+}
+
+/**
+ * rio_init_dbell_res - Initialize a RIO doorbell resource
+ * @res: resource struct
+ * @start: start of doorbell range
+ * @end: end of doorbell range
+ *
+ * This function is used to initialize the fields of a resource
+ * for use as a doorbell resource. It initializes a range of
+ * doorbell messages using the start and end arguments.
+ */
+static inline void rio_init_dbell_res(struct resource *res, u16 start, u16 end)
+{
+ memset(res, 0, sizeof(struct resource));
+ res->start = start;
+ res->end = end;
+ res->flags = RIO_RESOURCE_DOORBELL;
+}
+
+/**
+ * RIO_DEVICE - macro used to describe a specific RIO device
+ * @dev: the 16 bit RIO device ID
+ * @ven: the 16 bit RIO vendor ID
+ *
+ * This macro is used to create a struct rio_device_id that matches a
+ * specific device. The assembly vendor and assembly device fields
+ * will be set to %RIO_ANY_ID.
+ */
+#define RIO_DEVICE(dev,ven) \
+ .did = (dev), .vid = (ven), \
+ .asm_did = RIO_ANY_ID, .asm_vid = RIO_ANY_ID
+
+/* Mailbox management */
+extern int rio_request_outb_mbox(struct rio_mport *, void *, int, int,
+ void (*)(struct rio_mport *, void *,int, int));
+extern int rio_release_outb_mbox(struct rio_mport *, int);
+
+/**
+ * rio_add_outb_message - Add RIO message to an outbound mailbox queue
+ * @mport: RIO master port containing the outbound queue
+ * @rdev: RIO device the message is be sent to
+ * @mbox: The outbound mailbox queue
+ * @buffer: Pointer to the message buffer
+ * @len: Length of the message buffer
+ *
+ * Adds a RIO message buffer to an outbound mailbox queue for
+ * transmission. Returns 0 on success.
+ */
+static inline int rio_add_outb_message(struct rio_mport *mport,
+ struct rio_dev *rdev, int mbox,
+ void *buffer, size_t len)
+{
+ return mport->ops->add_outb_message(mport, rdev, mbox,
+ buffer, len);
+}
+
+extern int rio_request_inb_mbox(struct rio_mport *, void *, int, int,
+ void (*)(struct rio_mport *, void *, int, int));
+extern int rio_release_inb_mbox(struct rio_mport *, int);
+
+/**
+ * rio_add_inb_buffer - Add buffer to an inbound mailbox queue
+ * @mport: Master port containing the inbound mailbox
+ * @mbox: The inbound mailbox number
+ * @buffer: Pointer to the message buffer
+ *
+ * Adds a buffer to an inbound mailbox queue for reception. Returns
+ * 0 on success.
+ */
+static inline int rio_add_inb_buffer(struct rio_mport *mport, int mbox,
+ void *buffer)
+{
+ return mport->ops->add_inb_buffer(mport, mbox, buffer);
+}
+
+/**
+ * rio_get_inb_message - Get A RIO message from an inbound mailbox queue
+ * @mport: Master port containing the inbound mailbox
+ * @mbox: The inbound mailbox number
+ *
+ * Get a RIO message from an inbound mailbox queue. Returns 0 on success.
+ */
+static inline void *rio_get_inb_message(struct rio_mport *mport, int mbox)
+{
+ return mport->ops->get_inb_message(mport, mbox);
+}
+
+/* Doorbell management */
+extern int rio_request_inb_dbell(struct rio_mport *, void *, u16, u16,
+ void (*)(struct rio_mport *, void *, u16, u16, u16));
+extern int rio_release_inb_dbell(struct rio_mport *, u16, u16);
+extern struct resource *rio_request_outb_dbell(struct rio_dev *, u16, u16);
+extern int rio_release_outb_dbell(struct rio_dev *, struct resource *);
+
+/* Memory region management */
+int rio_claim_resource(struct rio_dev *, int);
+int rio_request_regions(struct rio_dev *, char *);
+void rio_release_regions(struct rio_dev *);
+int rio_request_region(struct rio_dev *, int, char *);
+void rio_release_region(struct rio_dev *, int);
+
+/* Memory mapping functions */
+extern int rio_map_inb_region(struct rio_mport *mport, dma_addr_t local,
+ u64 rbase, u32 size, u32 rflags);
+extern void rio_unmap_inb_region(struct rio_mport *mport, dma_addr_t lstart);
+
+/* Port-Write management */
+extern int rio_request_inb_pwrite(struct rio_dev *,
+ int (*)(struct rio_dev *, union rio_pw_msg*, int));
+extern int rio_release_inb_pwrite(struct rio_dev *);
+extern int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg);
+
+/* LDM support */
+int rio_register_driver(struct rio_driver *);
+void rio_unregister_driver(struct rio_driver *);
+struct rio_dev *rio_dev_get(struct rio_dev *);
+void rio_dev_put(struct rio_dev *);
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+extern struct dma_chan *rio_request_dma(struct rio_dev *rdev);
+extern struct dma_chan *rio_request_mport_dma(struct rio_mport *mport);
+extern void rio_release_dma(struct dma_chan *dchan);
+extern struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(
+ struct rio_dev *rdev, struct dma_chan *dchan,
+ struct rio_dma_data *data,
+ enum dma_transfer_direction direction, unsigned long flags);
+extern struct dma_async_tx_descriptor *rio_dma_prep_xfer(
+ struct dma_chan *dchan, u16 destid,
+ struct rio_dma_data *data,
+ enum dma_transfer_direction direction, unsigned long flags);
+#endif
+
+/**
+ * rio_name - Get the unique RIO device identifier
+ * @rdev: RIO device
+ *
+ * Get the unique RIO device identifier. Returns the device
+ * identifier string.
+ */
+static inline const char *rio_name(struct rio_dev *rdev)
+{
+ return dev_name(&rdev->dev);
+}
+
+/**
+ * rio_get_drvdata - Get RIO driver specific data
+ * @rdev: RIO device
+ *
+ * Get RIO driver specific data. Returns a pointer to the
+ * driver specific data.
+ */
+static inline void *rio_get_drvdata(struct rio_dev *rdev)
+{
+ return dev_get_drvdata(&rdev->dev);
+}
+
+/**
+ * rio_set_drvdata - Set RIO driver specific data
+ * @rdev: RIO device
+ * @data: Pointer to driver specific data
+ *
+ * Set RIO driver specific data. device struct driver data pointer
+ * is set to the @data argument.
+ */
+static inline void rio_set_drvdata(struct rio_dev *rdev, void *data)
+{
+ dev_set_drvdata(&rdev->dev, data);
+}
+
+/* Misc driver helpers */
+extern u16 rio_local_get_device_id(struct rio_mport *port);
+extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from);
+extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did,
+ struct rio_dev *from);
+extern int rio_init_mports(void);
+
+#endif /* LINUX_RIO_DRV_H */
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h
new file mode 100644
index 000000000..2543bc163
--- /dev/null
+++ b/include/linux/rio_ids.h
@@ -0,0 +1,42 @@
+/*
+ * RapidIO devices
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef LINUX_RIO_IDS_H
+#define LINUX_RIO_IDS_H
+
+#define RIO_VID_FREESCALE 0x0002
+#define RIO_DID_MPC8560 0x0003
+
+#define RIO_VID_TUNDRA 0x000d
+#define RIO_DID_TSI500 0x0500
+#define RIO_DID_TSI568 0x0568
+#define RIO_DID_TSI572 0x0572
+#define RIO_DID_TSI574 0x0574
+#define RIO_DID_TSI576 0x0578 /* Same ID as Tsi578 */
+#define RIO_DID_TSI577 0x0577
+#define RIO_DID_TSI578 0x0578
+
+#define RIO_VID_IDT 0x0038
+#define RIO_DID_IDT70K200 0x0310
+#define RIO_DID_IDTCPS8 0x035c
+#define RIO_DID_IDTCPS12 0x035d
+#define RIO_DID_IDTCPS16 0x035b
+#define RIO_DID_IDTCPS6Q 0x035f
+#define RIO_DID_IDTCPS10Q 0x035e
+#define RIO_DID_IDTCPS1848 0x0374
+#define RIO_DID_IDTCPS1432 0x0375
+#define RIO_DID_IDTCPS1616 0x0379
+#define RIO_DID_IDTVPS1616 0x0377
+#define RIO_DID_IDTSPS1616 0x0378
+#define RIO_DID_TSI721 0x80ab
+
+#endif /* LINUX_RIO_IDS_H */
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h
new file mode 100644
index 000000000..218168a2b
--- /dev/null
+++ b/include/linux/rio_regs.h
@@ -0,0 +1,295 @@
+/*
+ * RapidIO register definitions
+ *
+ * Copyright 2005 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef LINUX_RIO_REGS_H
+#define LINUX_RIO_REGS_H
+
+/*
+ * In RapidIO, each device has a 16MB configuration space that is
+ * accessed via maintenance transactions. Portions of configuration
+ * space are standardized and/or reserved.
+ */
+#define RIO_MAINT_SPACE_SZ 0x1000000 /* 16MB of RapidIO mainenance space */
+
+#define RIO_DEV_ID_CAR 0x00 /* [I] Device Identity CAR */
+#define RIO_DEV_INFO_CAR 0x04 /* [I] Device Information CAR */
+#define RIO_ASM_ID_CAR 0x08 /* [I] Assembly Identity CAR */
+#define RIO_ASM_ID_MASK 0xffff0000 /* [I] Asm ID Mask */
+#define RIO_ASM_VEN_ID_MASK 0x0000ffff /* [I] Asm Vend Mask */
+
+#define RIO_ASM_INFO_CAR 0x0c /* [I] Assembly Information CAR */
+#define RIO_ASM_REV_MASK 0xffff0000 /* [I] Asm Rev Mask */
+#define RIO_EXT_FTR_PTR_MASK 0x0000ffff /* [I] EF_PTR Mask */
+
+#define RIO_PEF_CAR 0x10 /* [I] Processing Element Features CAR */
+#define RIO_PEF_BRIDGE 0x80000000 /* [I] Bridge */
+#define RIO_PEF_MEMORY 0x40000000 /* [I] MMIO */
+#define RIO_PEF_PROCESSOR 0x20000000 /* [I] Processor */
+#define RIO_PEF_SWITCH 0x10000000 /* [I] Switch */
+#define RIO_PEF_MULTIPORT 0x08000000 /* [VI, 2.1] Multiport */
+#define RIO_PEF_INB_MBOX 0x00f00000 /* [II, <= 1.2] Mailboxes */
+#define RIO_PEF_INB_MBOX0 0x00800000 /* [II, <= 1.2] Mailbox 0 */
+#define RIO_PEF_INB_MBOX1 0x00400000 /* [II, <= 1.2] Mailbox 1 */
+#define RIO_PEF_INB_MBOX2 0x00200000 /* [II, <= 1.2] Mailbox 2 */
+#define RIO_PEF_INB_MBOX3 0x00100000 /* [II, <= 1.2] Mailbox 3 */
+#define RIO_PEF_INB_DOORBELL 0x00080000 /* [II, <= 1.2] Doorbells */
+#define RIO_PEF_EXT_RT 0x00000200 /* [III, 1.3] Extended route table support */
+#define RIO_PEF_STD_RT 0x00000100 /* [III, 1.3] Standard route table support */
+#define RIO_PEF_CTLS 0x00000010 /* [III] CTLS */
+#define RIO_PEF_EXT_FEATURES 0x00000008 /* [I] EFT_PTR valid */
+#define RIO_PEF_ADDR_66 0x00000004 /* [I] 66 bits */
+#define RIO_PEF_ADDR_50 0x00000002 /* [I] 50 bits */
+#define RIO_PEF_ADDR_34 0x00000001 /* [I] 34 bits */
+
+#define RIO_SWP_INFO_CAR 0x14 /* [I] Switch Port Information CAR */
+#define RIO_SWP_INFO_PORT_TOTAL_MASK 0x0000ff00 /* [I] Total number of ports */
+#define RIO_SWP_INFO_PORT_NUM_MASK 0x000000ff /* [I] Maintenance transaction port number */
+#define RIO_GET_TOTAL_PORTS(x) ((x & RIO_SWP_INFO_PORT_TOTAL_MASK) >> 8)
+#define RIO_GET_PORT_NUM(x) (x & RIO_SWP_INFO_PORT_NUM_MASK)
+
+#define RIO_SRC_OPS_CAR 0x18 /* [I] Source Operations CAR */
+#define RIO_SRC_OPS_READ 0x00008000 /* [I] Read op */
+#define RIO_SRC_OPS_WRITE 0x00004000 /* [I] Write op */
+#define RIO_SRC_OPS_STREAM_WRITE 0x00002000 /* [I] Str-write op */
+#define RIO_SRC_OPS_WRITE_RESPONSE 0x00001000 /* [I] Write/resp op */
+#define RIO_SRC_OPS_DATA_MSG 0x00000800 /* [II] Data msg op */
+#define RIO_SRC_OPS_DOORBELL 0x00000400 /* [II] Doorbell op */
+#define RIO_SRC_OPS_ATOMIC_TST_SWP 0x00000100 /* [I] Atomic TAS op */
+#define RIO_SRC_OPS_ATOMIC_INC 0x00000080 /* [I] Atomic inc op */
+#define RIO_SRC_OPS_ATOMIC_DEC 0x00000040 /* [I] Atomic dec op */
+#define RIO_SRC_OPS_ATOMIC_SET 0x00000020 /* [I] Atomic set op */
+#define RIO_SRC_OPS_ATOMIC_CLR 0x00000010 /* [I] Atomic clr op */
+#define RIO_SRC_OPS_PORT_WRITE 0x00000004 /* [I] Port-write op */
+
+#define RIO_DST_OPS_CAR 0x1c /* Destination Operations CAR */
+#define RIO_DST_OPS_READ 0x00008000 /* [I] Read op */
+#define RIO_DST_OPS_WRITE 0x00004000 /* [I] Write op */
+#define RIO_DST_OPS_STREAM_WRITE 0x00002000 /* [I] Str-write op */
+#define RIO_DST_OPS_WRITE_RESPONSE 0x00001000 /* [I] Write/resp op */
+#define RIO_DST_OPS_DATA_MSG 0x00000800 /* [II] Data msg op */
+#define RIO_DST_OPS_DOORBELL 0x00000400 /* [II] Doorbell op */
+#define RIO_DST_OPS_ATOMIC_TST_SWP 0x00000100 /* [I] Atomic TAS op */
+#define RIO_DST_OPS_ATOMIC_INC 0x00000080 /* [I] Atomic inc op */
+#define RIO_DST_OPS_ATOMIC_DEC 0x00000040 /* [I] Atomic dec op */
+#define RIO_DST_OPS_ATOMIC_SET 0x00000020 /* [I] Atomic set op */
+#define RIO_DST_OPS_ATOMIC_CLR 0x00000010 /* [I] Atomic clr op */
+#define RIO_DST_OPS_PORT_WRITE 0x00000004 /* [I] Port-write op */
+
+#define RIO_OPS_READ 0x00008000 /* [I] Read op */
+#define RIO_OPS_WRITE 0x00004000 /* [I] Write op */
+#define RIO_OPS_STREAM_WRITE 0x00002000 /* [I] Str-write op */
+#define RIO_OPS_WRITE_RESPONSE 0x00001000 /* [I] Write/resp op */
+#define RIO_OPS_DATA_MSG 0x00000800 /* [II] Data msg op */
+#define RIO_OPS_DOORBELL 0x00000400 /* [II] Doorbell op */
+#define RIO_OPS_ATOMIC_TST_SWP 0x00000100 /* [I] Atomic TAS op */
+#define RIO_OPS_ATOMIC_INC 0x00000080 /* [I] Atomic inc op */
+#define RIO_OPS_ATOMIC_DEC 0x00000040 /* [I] Atomic dec op */
+#define RIO_OPS_ATOMIC_SET 0x00000020 /* [I] Atomic set op */
+#define RIO_OPS_ATOMIC_CLR 0x00000010 /* [I] Atomic clr op */
+#define RIO_OPS_PORT_WRITE 0x00000004 /* [I] Port-write op */
+
+ /* 0x20-0x30 *//* Reserved */
+
+#define RIO_SWITCH_RT_LIMIT 0x34 /* [III, 1.3] Switch Route Table Destination ID Limit CAR */
+#define RIO_RT_MAX_DESTID 0x0000ffff
+
+#define RIO_MBOX_CSR 0x40 /* [II, <= 1.2] Mailbox CSR */
+#define RIO_MBOX0_AVAIL 0x80000000 /* [II] Mbox 0 avail */
+#define RIO_MBOX0_FULL 0x40000000 /* [II] Mbox 0 full */
+#define RIO_MBOX0_EMPTY 0x20000000 /* [II] Mbox 0 empty */
+#define RIO_MBOX0_BUSY 0x10000000 /* [II] Mbox 0 busy */
+#define RIO_MBOX0_FAIL 0x08000000 /* [II] Mbox 0 fail */
+#define RIO_MBOX0_ERROR 0x04000000 /* [II] Mbox 0 error */
+#define RIO_MBOX1_AVAIL 0x00800000 /* [II] Mbox 1 avail */
+#define RIO_MBOX1_FULL 0x00200000 /* [II] Mbox 1 full */
+#define RIO_MBOX1_EMPTY 0x00200000 /* [II] Mbox 1 empty */
+#define RIO_MBOX1_BUSY 0x00100000 /* [II] Mbox 1 busy */
+#define RIO_MBOX1_FAIL 0x00080000 /* [II] Mbox 1 fail */
+#define RIO_MBOX1_ERROR 0x00040000 /* [II] Mbox 1 error */
+#define RIO_MBOX2_AVAIL 0x00008000 /* [II] Mbox 2 avail */
+#define RIO_MBOX2_FULL 0x00004000 /* [II] Mbox 2 full */
+#define RIO_MBOX2_EMPTY 0x00002000 /* [II] Mbox 2 empty */
+#define RIO_MBOX2_BUSY 0x00001000 /* [II] Mbox 2 busy */
+#define RIO_MBOX2_FAIL 0x00000800 /* [II] Mbox 2 fail */
+#define RIO_MBOX2_ERROR 0x00000400 /* [II] Mbox 2 error */
+#define RIO_MBOX3_AVAIL 0x00000080 /* [II] Mbox 3 avail */
+#define RIO_MBOX3_FULL 0x00000040 /* [II] Mbox 3 full */
+#define RIO_MBOX3_EMPTY 0x00000020 /* [II] Mbox 3 empty */
+#define RIO_MBOX3_BUSY 0x00000010 /* [II] Mbox 3 busy */
+#define RIO_MBOX3_FAIL 0x00000008 /* [II] Mbox 3 fail */
+#define RIO_MBOX3_ERROR 0x00000004 /* [II] Mbox 3 error */
+
+#define RIO_WRITE_PORT_CSR 0x44 /* [I, <= 1.2] Write Port CSR */
+#define RIO_DOORBELL_CSR 0x44 /* [II, <= 1.2] Doorbell CSR */
+#define RIO_DOORBELL_AVAIL 0x80000000 /* [II] Doorbell avail */
+#define RIO_DOORBELL_FULL 0x40000000 /* [II] Doorbell full */
+#define RIO_DOORBELL_EMPTY 0x20000000 /* [II] Doorbell empty */
+#define RIO_DOORBELL_BUSY 0x10000000 /* [II] Doorbell busy */
+#define RIO_DOORBELL_FAILED 0x08000000 /* [II] Doorbell failed */
+#define RIO_DOORBELL_ERROR 0x04000000 /* [II] Doorbell error */
+#define RIO_WRITE_PORT_AVAILABLE 0x00000080 /* [I] Write Port Available */
+#define RIO_WRITE_PORT_FULL 0x00000040 /* [I] Write Port Full */
+#define RIO_WRITE_PORT_EMPTY 0x00000020 /* [I] Write Port Empty */
+#define RIO_WRITE_PORT_BUSY 0x00000010 /* [I] Write Port Busy */
+#define RIO_WRITE_PORT_FAILED 0x00000008 /* [I] Write Port Failed */
+#define RIO_WRITE_PORT_ERROR 0x00000004 /* [I] Write Port Error */
+
+ /* 0x48 *//* Reserved */
+
+#define RIO_PELL_CTRL_CSR 0x4c /* [I] PE Logical Layer Control CSR */
+#define RIO_PELL_ADDR_66 0x00000004 /* [I] 66-bit addr */
+#define RIO_PELL_ADDR_50 0x00000002 /* [I] 50-bit addr */
+#define RIO_PELL_ADDR_34 0x00000001 /* [I] 34-bit addr */
+
+ /* 0x50-0x54 *//* Reserved */
+
+#define RIO_LCSH_BA 0x58 /* [I] LCS High Base Address */
+#define RIO_LCSL_BA 0x5c /* [I] LCS Base Address */
+
+#define RIO_DID_CSR 0x60 /* [III] Base Device ID CSR */
+
+ /* 0x64 *//* Reserved */
+
+#define RIO_HOST_DID_LOCK_CSR 0x68 /* [III] Host Base Device ID Lock CSR */
+#define RIO_COMPONENT_TAG_CSR 0x6c /* [III] Component Tag CSR */
+
+#define RIO_STD_RTE_CONF_DESTID_SEL_CSR 0x70
+#define RIO_STD_RTE_CONF_EXTCFGEN 0x80000000
+#define RIO_STD_RTE_CONF_PORT_SEL_CSR 0x74
+#define RIO_STD_RTE_DEFAULT_PORT 0x78
+
+ /* 0x7c-0xf8 *//* Reserved */
+ /* 0x100-0xfff8 *//* [I] Extended Features Space */
+ /* 0x10000-0xfffff8 *//* [I] Implementation-defined Space */
+
+/*
+ * Extended Features Space is a configuration space area where
+ * functionality is mapped into extended feature blocks via a
+ * singly linked list of extended feature pointers (EFT_PTR).
+ *
+ * Each extended feature block can be identified/located in
+ * Extended Features Space by walking the extended feature
+ * list starting with the Extended Feature Pointer located
+ * in the Assembly Information CAR.
+ *
+ * Extended Feature Blocks (EFBs) are identified with an assigned
+ * EFB ID. Extended feature block offsets in the definitions are
+ * relative to the offset of the EFB within the Extended Features
+ * Space.
+ */
+
+/* Helper macros to parse the Extended Feature Block header */
+#define RIO_EFB_PTR_MASK 0xffff0000
+#define RIO_EFB_ID_MASK 0x0000ffff
+#define RIO_GET_BLOCK_PTR(x) ((x & RIO_EFB_PTR_MASK) >> 16)
+#define RIO_GET_BLOCK_ID(x) (x & RIO_EFB_ID_MASK)
+
+/* Extended Feature Block IDs */
+#define RIO_EFB_PAR_EP_ID 0x0001 /* [IV] LP/LVDS EP Devices */
+#define RIO_EFB_PAR_EP_REC_ID 0x0002 /* [IV] LP/LVDS EP Recovery Devices */
+#define RIO_EFB_PAR_EP_FREE_ID 0x0003 /* [IV] LP/LVDS EP Free Devices */
+#define RIO_EFB_SER_EP_ID_V13P 0x0001 /* [VI] LP/Serial EP Devices, RapidIO Spec ver 1.3 and above */
+#define RIO_EFB_SER_EP_REC_ID_V13P 0x0002 /* [VI] LP/Serial EP Recovery Devices, RapidIO Spec ver 1.3 and above */
+#define RIO_EFB_SER_EP_FREE_ID_V13P 0x0003 /* [VI] LP/Serial EP Free Devices, RapidIO Spec ver 1.3 and above */
+#define RIO_EFB_SER_EP_ID 0x0004 /* [VI] LP/Serial EP Devices */
+#define RIO_EFB_SER_EP_REC_ID 0x0005 /* [VI] LP/Serial EP Recovery Devices */
+#define RIO_EFB_SER_EP_FREE_ID 0x0006 /* [VI] LP/Serial EP Free Devices */
+#define RIO_EFB_SER_EP_FREC_ID 0x0009 /* [VI] LP/Serial EP Free Recovery Devices */
+#define RIO_EFB_ERR_MGMNT 0x0007 /* [VIII] Error Management Extensions */
+
+/*
+ * Physical 8/16 LP-LVDS
+ * ID=0x0001, Generic End Point Devices
+ * ID=0x0002, Generic End Point Devices, software assisted recovery option
+ * ID=0x0003, Generic End Point Free Devices
+ *
+ * Physical LP-Serial
+ * ID=0x0004, Generic End Point Devices
+ * ID=0x0005, Generic End Point Devices, software assisted recovery option
+ * ID=0x0006, Generic End Point Free Devices
+ */
+#define RIO_PORT_MNT_HEADER 0x0000
+#define RIO_PORT_REQ_CTL_CSR 0x0020
+#define RIO_PORT_RSP_CTL_CSR 0x0024 /* 0x0001/0x0002 */
+#define RIO_PORT_LINKTO_CTL_CSR 0x0020 /* Serial */
+#define RIO_PORT_RSPTO_CTL_CSR 0x0024 /* Serial */
+#define RIO_PORT_GEN_CTL_CSR 0x003c
+#define RIO_PORT_GEN_HOST 0x80000000
+#define RIO_PORT_GEN_MASTER 0x40000000
+#define RIO_PORT_GEN_DISCOVERED 0x20000000
+#define RIO_PORT_N_MNT_REQ_CSR(x) (0x0040 + x*0x20) /* 0x0002 */
+#define RIO_MNT_REQ_CMD_RD 0x03 /* Reset-device command */
+#define RIO_MNT_REQ_CMD_IS 0x04 /* Input-status command */
+#define RIO_PORT_N_MNT_RSP_CSR(x) (0x0044 + x*0x20) /* 0x0002 */
+#define RIO_PORT_N_MNT_RSP_RVAL 0x80000000 /* Response Valid */
+#define RIO_PORT_N_MNT_RSP_ASTAT 0x000007e0 /* ackID Status */
+#define RIO_PORT_N_MNT_RSP_LSTAT 0x0000001f /* Link Status */
+#define RIO_PORT_N_ACK_STS_CSR(x) (0x0048 + x*0x20) /* 0x0002 */
+#define RIO_PORT_N_ACK_CLEAR 0x80000000
+#define RIO_PORT_N_ACK_INBOUND 0x3f000000
+#define RIO_PORT_N_ACK_OUTSTAND 0x00003f00
+#define RIO_PORT_N_ACK_OUTBOUND 0x0000003f
+#define RIO_PORT_N_ERR_STS_CSR(x) (0x0058 + x*0x20)
+#define RIO_PORT_N_ERR_STS_PW_OUT_ES 0x00010000 /* Output Error-stopped */
+#define RIO_PORT_N_ERR_STS_PW_INP_ES 0x00000100 /* Input Error-stopped */
+#define RIO_PORT_N_ERR_STS_PW_PEND 0x00000010 /* Port-Write Pending */
+#define RIO_PORT_N_ERR_STS_PORT_ERR 0x00000004
+#define RIO_PORT_N_ERR_STS_PORT_OK 0x00000002
+#define RIO_PORT_N_ERR_STS_PORT_UNINIT 0x00000001
+#define RIO_PORT_N_CTL_CSR(x) (0x005c + x*0x20)
+#define RIO_PORT_N_CTL_PWIDTH 0xc0000000
+#define RIO_PORT_N_CTL_PWIDTH_1 0x00000000
+#define RIO_PORT_N_CTL_PWIDTH_4 0x40000000
+#define RIO_PORT_N_CTL_P_TYP_SER 0x00000001
+#define RIO_PORT_N_CTL_LOCKOUT 0x00000002
+#define RIO_PORT_N_CTL_EN_RX_SER 0x00200000
+#define RIO_PORT_N_CTL_EN_TX_SER 0x00400000
+#define RIO_PORT_N_CTL_EN_RX_PAR 0x08000000
+#define RIO_PORT_N_CTL_EN_TX_PAR 0x40000000
+
+/*
+ * Error Management Extensions (RapidIO 1.3+, Part 8)
+ *
+ * Extended Features Block ID=0x0007
+ */
+
+/* General EM Registers (Common for all Ports) */
+
+#define RIO_EM_EFB_HEADER 0x000 /* Error Management Extensions Block Header */
+#define RIO_EM_LTL_ERR_DETECT 0x008 /* Logical/Transport Layer Error Detect CSR */
+#define RIO_EM_LTL_ERR_EN 0x00c /* Logical/Transport Layer Error Enable CSR */
+#define REM_LTL_ERR_ILLTRAN 0x08000000 /* Illegal Transaction decode */
+#define REM_LTL_ERR_UNSOLR 0x00800000 /* Unsolicited Response */
+#define REM_LTL_ERR_UNSUPTR 0x00400000 /* Unsupported Transaction */
+#define REM_LTL_ERR_IMPSPEC 0x000000ff /* Implementation Specific */
+#define RIO_EM_LTL_HIADDR_CAP 0x010 /* Logical/Transport Layer High Address Capture CSR */
+#define RIO_EM_LTL_ADDR_CAP 0x014 /* Logical/Transport Layer Address Capture CSR */
+#define RIO_EM_LTL_DEVID_CAP 0x018 /* Logical/Transport Layer Device ID Capture CSR */
+#define RIO_EM_LTL_CTRL_CAP 0x01c /* Logical/Transport Layer Control Capture CSR */
+#define RIO_EM_PW_TGT_DEVID 0x028 /* Port-write Target deviceID CSR */
+#define RIO_EM_PKT_TTL 0x02c /* Packet Time-to-live CSR */
+
+/* Per-Port EM Registers */
+
+#define RIO_EM_PN_ERR_DETECT(x) (0x040 + x*0x40) /* Port N Error Detect CSR */
+#define REM_PED_IMPL_SPEC 0x80000000
+#define REM_PED_LINK_TO 0x00000001
+#define RIO_EM_PN_ERRRATE_EN(x) (0x044 + x*0x40) /* Port N Error Rate Enable CSR */
+#define RIO_EM_PN_ATTRIB_CAP(x) (0x048 + x*0x40) /* Port N Attributes Capture CSR */
+#define RIO_EM_PN_PKT_CAP_0(x) (0x04c + x*0x40) /* Port N Packet/Control Symbol Capture 0 CSR */
+#define RIO_EM_PN_PKT_CAP_1(x) (0x050 + x*0x40) /* Port N Packet Capture 1 CSR */
+#define RIO_EM_PN_PKT_CAP_2(x) (0x054 + x*0x40) /* Port N Packet Capture 2 CSR */
+#define RIO_EM_PN_PKT_CAP_3(x) (0x058 + x*0x40) /* Port N Packet Capture 3 CSR */
+#define RIO_EM_PN_ERRRATE(x) (0x068 + x*0x40) /* Port N Error Rate CSR */
+#define RIO_EM_PN_ERRRATE_TR(x) (0x06c + x*0x40) /* Port N Error Rate Threshold CSR */
+
+#endif /* LINUX_RIO_REGS_H */
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
new file mode 100644
index 000000000..c89c53a11
--- /dev/null
+++ b/include/linux/rmap.h
@@ -0,0 +1,287 @@
+#ifndef _LINUX_RMAP_H
+#define _LINUX_RMAP_H
+/*
+ * Declarations for Reverse Mapping functions in mm/rmap.c
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/rwsem.h>
+#include <linux/memcontrol.h>
+
+/*
+ * The anon_vma heads a list of private "related" vmas, to scan if
+ * an anonymous page pointing to this anon_vma needs to be unmapped:
+ * the vmas on the list will be related by forking, or by splitting.
+ *
+ * Since vmas come and go as they are split and merged (particularly
+ * in mprotect), the mapping field of an anonymous page cannot point
+ * directly to a vma: instead it points to an anon_vma, on whose list
+ * the related vmas can be easily linked or unlinked.
+ *
+ * After unlinking the last vma on the list, we must garbage collect
+ * the anon_vma object itself: we're guaranteed no page can be
+ * pointing to this anon_vma once its vma list is empty.
+ */
+struct anon_vma {
+ struct anon_vma *root; /* Root of this anon_vma tree */
+ struct rw_semaphore rwsem; /* W: modification, R: walking the list */
+ /*
+ * The refcount is taken on an anon_vma when there is no
+ * guarantee that the vma of page tables will exist for
+ * the duration of the operation. A caller that takes
+ * the reference is responsible for clearing up the
+ * anon_vma if they are the last user on release
+ */
+ atomic_t refcount;
+
+ /*
+ * Count of child anon_vmas and VMAs which points to this anon_vma.
+ *
+ * This counter is used for making decision about reusing anon_vma
+ * instead of forking new one. See comments in function anon_vma_clone.
+ */
+ unsigned degree;
+
+ struct anon_vma *parent; /* Parent of this anon_vma */
+
+ /*
+ * NOTE: the LSB of the rb_root.rb_node is set by
+ * mm_take_all_locks() _after_ taking the above lock. So the
+ * rb_root must only be read/written after taking the above lock
+ * to be sure to see a valid next pointer. The LSB bit itself
+ * is serialized by a system wide lock only visible to
+ * mm_take_all_locks() (mm_all_locks_mutex).
+ */
+ struct rb_root rb_root; /* Interval tree of private "related" vmas */
+};
+
+/*
+ * The copy-on-write semantics of fork mean that an anon_vma
+ * can become associated with multiple processes. Furthermore,
+ * each child process will have its own anon_vma, where new
+ * pages for that process are instantiated.
+ *
+ * This structure allows us to find the anon_vmas associated
+ * with a VMA, or the VMAs associated with an anon_vma.
+ * The "same_vma" list contains the anon_vma_chains linking
+ * all the anon_vmas associated with this VMA.
+ * The "rb" field indexes on an interval tree the anon_vma_chains
+ * which link all the VMAs associated with this anon_vma.
+ */
+struct anon_vma_chain {
+ struct vm_area_struct *vma;
+ struct anon_vma *anon_vma;
+ struct list_head same_vma; /* locked by mmap_sem & page_table_lock */
+ struct rb_node rb; /* locked by anon_vma->rwsem */
+ unsigned long rb_subtree_last;
+#ifdef CONFIG_DEBUG_VM_RB
+ unsigned long cached_vma_start, cached_vma_last;
+#endif
+};
+
+enum ttu_flags {
+ TTU_UNMAP = 1, /* unmap mode */
+ TTU_MIGRATION = 2, /* migration mode */
+ TTU_MUNLOCK = 4, /* munlock mode */
+
+ TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
+ TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
+ TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
+};
+
+#ifdef CONFIG_MMU
+static inline void get_anon_vma(struct anon_vma *anon_vma)
+{
+ atomic_inc(&anon_vma->refcount);
+}
+
+void __put_anon_vma(struct anon_vma *anon_vma);
+
+static inline void put_anon_vma(struct anon_vma *anon_vma)
+{
+ if (atomic_dec_and_test(&anon_vma->refcount))
+ __put_anon_vma(anon_vma);
+}
+
+static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
+{
+ struct anon_vma *anon_vma = vma->anon_vma;
+ if (anon_vma)
+ down_write(&anon_vma->root->rwsem);
+}
+
+static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
+{
+ struct anon_vma *anon_vma = vma->anon_vma;
+ if (anon_vma)
+ up_write(&anon_vma->root->rwsem);
+}
+
+static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
+{
+ down_write(&anon_vma->root->rwsem);
+}
+
+static inline void anon_vma_unlock_write(struct anon_vma *anon_vma)
+{
+ up_write(&anon_vma->root->rwsem);
+}
+
+static inline void anon_vma_lock_read(struct anon_vma *anon_vma)
+{
+ down_read(&anon_vma->root->rwsem);
+}
+
+static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
+{
+ up_read(&anon_vma->root->rwsem);
+}
+
+
+/*
+ * anon_vma helper functions.
+ */
+void anon_vma_init(void); /* create anon_vma_cachep */
+int anon_vma_prepare(struct vm_area_struct *);
+void unlink_anon_vmas(struct vm_area_struct *);
+int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
+int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
+
+static inline void anon_vma_merge(struct vm_area_struct *vma,
+ struct vm_area_struct *next)
+{
+ VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
+ unlink_anon_vmas(next);
+}
+
+struct anon_vma *page_get_anon_vma(struct page *page);
+
+/*
+ * rmap interfaces called when adding or removing pte of page
+ */
+void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
+void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
+void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
+ unsigned long, int);
+void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
+void page_add_file_rmap(struct page *);
+void page_remove_rmap(struct page *);
+
+void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
+ unsigned long);
+void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
+ unsigned long);
+
+static inline void page_dup_rmap(struct page *page)
+{
+ atomic_inc(&page->_mapcount);
+}
+
+/*
+ * Called from mm/vmscan.c to handle paging out
+ */
+int page_referenced(struct page *, int is_locked,
+ struct mem_cgroup *memcg, unsigned long *vm_flags);
+
+#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
+
+int try_to_unmap(struct page *, enum ttu_flags flags);
+
+/*
+ * Used by uprobes to replace a userspace page safely
+ */
+pte_t *__page_check_address(struct page *, struct mm_struct *,
+ unsigned long, spinlock_t **, int);
+
+static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
+ unsigned long address,
+ spinlock_t **ptlp, int sync)
+{
+ pte_t *ptep;
+
+ __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
+ ptlp, sync));
+ return ptep;
+}
+
+/*
+ * Used by swapoff to help locate where page is expected in vma.
+ */
+unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
+
+/*
+ * Cleans the PTEs of shared mappings.
+ * (and since clean PTEs should also be readonly, write protects them too)
+ *
+ * returns the number of cleaned PTEs.
+ */
+int page_mkclean(struct page *);
+
+/*
+ * called in munlock()/munmap() path to check for other vmas holding
+ * the page mlocked.
+ */
+int try_to_munlock(struct page *);
+
+/*
+ * Called by memory-failure.c to kill processes.
+ */
+struct anon_vma *page_lock_anon_vma_read(struct page *page);
+void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
+int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
+
+/*
+ * rmap_walk_control: To control rmap traversing for specific needs
+ *
+ * arg: passed to rmap_one() and invalid_vma()
+ * rmap_one: executed on each vma where page is mapped
+ * done: for checking traversing termination condition
+ * anon_lock: for getting anon_lock by optimized way rather than default
+ * invalid_vma: for skipping uninterested vma
+ */
+struct rmap_walk_control {
+ void *arg;
+ int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
+ unsigned long addr, void *arg);
+ int (*done)(struct page *page);
+ struct anon_vma *(*anon_lock)(struct page *page);
+ bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
+};
+
+int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
+
+#else /* !CONFIG_MMU */
+
+#define anon_vma_init() do {} while (0)
+#define anon_vma_prepare(vma) (0)
+#define anon_vma_link(vma) do {} while (0)
+
+static inline int page_referenced(struct page *page, int is_locked,
+ struct mem_cgroup *memcg,
+ unsigned long *vm_flags)
+{
+ *vm_flags = 0;
+ return 0;
+}
+
+#define try_to_unmap(page, refs) SWAP_FAIL
+
+static inline int page_mkclean(struct page *page)
+{
+ return 0;
+}
+
+
+#endif /* CONFIG_MMU */
+
+/*
+ * Return values of try_to_unmap
+ */
+#define SWAP_SUCCESS 0
+#define SWAP_AGAIN 1
+#define SWAP_FAIL 2
+#define SWAP_MLOCK 3
+
+#endif /* _LINUX_RMAP_H */
diff --git a/include/linux/rndis.h b/include/linux/rndis.h
new file mode 100644
index 000000000..93c0a64ae
--- /dev/null
+++ b/include/linux/rndis.h
@@ -0,0 +1,391 @@
+/*
+ * Remote Network Driver Interface Specification (RNDIS)
+ * definitions of the magic numbers used by this protocol
+ */
+
+/* Remote NDIS Versions */
+#define RNDIS_MAJOR_VERSION 0x00000001
+#define RNDIS_MINOR_VERSION 0x00000000
+
+/* Device Flags */
+#define RNDIS_DF_CONNECTIONLESS 0x00000001U
+#define RNDIS_DF_CONNECTION_ORIENTED 0x00000002U
+#define RNDIS_DF_RAW_DATA 0x00000004U
+
+/*
+ * Codes for "msg_type" field of rndis messages;
+ * only the data channel uses packet messages (maybe batched);
+ * everything else goes on the control channel.
+ */
+#define RNDIS_MSG_COMPLETION 0x80000000
+#define RNDIS_MSG_PACKET 0x00000001 /* 1-N packets */
+#define RNDIS_MSG_INIT 0x00000002
+#define RNDIS_MSG_INIT_C (RNDIS_MSG_INIT|RNDIS_MSG_COMPLETION)
+#define RNDIS_MSG_HALT 0x00000003
+#define RNDIS_MSG_QUERY 0x00000004
+#define RNDIS_MSG_QUERY_C (RNDIS_MSG_QUERY|RNDIS_MSG_COMPLETION)
+#define RNDIS_MSG_SET 0x00000005
+#define RNDIS_MSG_SET_C (RNDIS_MSG_SET|RNDIS_MSG_COMPLETION)
+#define RNDIS_MSG_RESET 0x00000006
+#define RNDIS_MSG_RESET_C (RNDIS_MSG_RESET|RNDIS_MSG_COMPLETION)
+#define RNDIS_MSG_INDICATE 0x00000007
+#define RNDIS_MSG_KEEPALIVE 0x00000008
+#define RNDIS_MSG_KEEPALIVE_C (RNDIS_MSG_KEEPALIVE|RNDIS_MSG_COMPLETION)
+/*
+ * Reserved message type for private communication between lower-layer host
+ * driver and remote device, if necessary.
+ */
+#define RNDIS_MSG_BUS 0xff000001
+
+/* codes for "status" field of completion messages */
+#define RNDIS_STATUS_SUCCESS 0x00000000
+#define RNDIS_STATUS_PENDING 0x00000103
+
+/* Status codes */
+#define RNDIS_STATUS_NOT_RECOGNIZED 0x00010001
+#define RNDIS_STATUS_NOT_COPIED 0x00010002
+#define RNDIS_STATUS_NOT_ACCEPTED 0x00010003
+#define RNDIS_STATUS_CALL_ACTIVE 0x00010007
+
+#define RNDIS_STATUS_ONLINE 0x40010003
+#define RNDIS_STATUS_RESET_START 0x40010004
+#define RNDIS_STATUS_RESET_END 0x40010005
+#define RNDIS_STATUS_RING_STATUS 0x40010006
+#define RNDIS_STATUS_CLOSED 0x40010007
+#define RNDIS_STATUS_WAN_LINE_UP 0x40010008
+#define RNDIS_STATUS_WAN_LINE_DOWN 0x40010009
+#define RNDIS_STATUS_WAN_FRAGMENT 0x4001000A
+#define RNDIS_STATUS_MEDIA_CONNECT 0x4001000B
+#define RNDIS_STATUS_MEDIA_DISCONNECT 0x4001000C
+#define RNDIS_STATUS_HARDWARE_LINE_UP 0x4001000D
+#define RNDIS_STATUS_HARDWARE_LINE_DOWN 0x4001000E
+#define RNDIS_STATUS_INTERFACE_UP 0x4001000F
+#define RNDIS_STATUS_INTERFACE_DOWN 0x40010010
+#define RNDIS_STATUS_MEDIA_BUSY 0x40010011
+#define RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION 0x40010012
+#define RNDIS_STATUS_WW_INDICATION RDIA_SPECIFIC_INDICATION
+#define RNDIS_STATUS_LINK_SPEED_CHANGE 0x40010013L
+#define RNDIS_STATUS_NETWORK_CHANGE 0x40010018
+
+#define RNDIS_STATUS_NOT_RESETTABLE 0x80010001
+#define RNDIS_STATUS_SOFT_ERRORS 0x80010003
+#define RNDIS_STATUS_HARD_ERRORS 0x80010004
+#define RNDIS_STATUS_BUFFER_OVERFLOW 0x80000005
+
+#define RNDIS_STATUS_FAILURE 0xC0000001
+#define RNDIS_STATUS_RESOURCES 0xC000009A
+#define RNDIS_STATUS_NOT_SUPPORTED 0xc00000BB
+#define RNDIS_STATUS_CLOSING 0xC0010002
+#define RNDIS_STATUS_BAD_VERSION 0xC0010004
+#define RNDIS_STATUS_BAD_CHARACTERISTICS 0xC0010005
+#define RNDIS_STATUS_ADAPTER_NOT_FOUND 0xC0010006
+#define RNDIS_STATUS_OPEN_FAILED 0xC0010007
+#define RNDIS_STATUS_DEVICE_FAILED 0xC0010008
+#define RNDIS_STATUS_MULTICAST_FULL 0xC0010009
+#define RNDIS_STATUS_MULTICAST_EXISTS 0xC001000A
+#define RNDIS_STATUS_MULTICAST_NOT_FOUND 0xC001000B
+#define RNDIS_STATUS_REQUEST_ABORTED 0xC001000C
+#define RNDIS_STATUS_RESET_IN_PROGRESS 0xC001000D
+#define RNDIS_STATUS_CLOSING_INDICATING 0xC001000E
+#define RNDIS_STATUS_INVALID_PACKET 0xC001000F
+#define RNDIS_STATUS_OPEN_LIST_FULL 0xC0010010
+#define RNDIS_STATUS_ADAPTER_NOT_READY 0xC0010011
+#define RNDIS_STATUS_ADAPTER_NOT_OPEN 0xC0010012
+#define RNDIS_STATUS_NOT_INDICATING 0xC0010013
+#define RNDIS_STATUS_INVALID_LENGTH 0xC0010014
+#define RNDIS_STATUS_INVALID_DATA 0xC0010015
+#define RNDIS_STATUS_BUFFER_TOO_SHORT 0xC0010016
+#define RNDIS_STATUS_INVALID_OID 0xC0010017
+#define RNDIS_STATUS_ADAPTER_REMOVED 0xC0010018
+#define RNDIS_STATUS_UNSUPPORTED_MEDIA 0xC0010019
+#define RNDIS_STATUS_GROUP_ADDRESS_IN_USE 0xC001001A
+#define RNDIS_STATUS_FILE_NOT_FOUND 0xC001001B
+#define RNDIS_STATUS_ERROR_READING_FILE 0xC001001C
+#define RNDIS_STATUS_ALREADY_MAPPED 0xC001001D
+#define RNDIS_STATUS_RESOURCE_CONFLICT 0xC001001E
+#define RNDIS_STATUS_NO_CABLE 0xC001001F
+
+#define RNDIS_STATUS_INVALID_SAP 0xC0010020
+#define RNDIS_STATUS_SAP_IN_USE 0xC0010021
+#define RNDIS_STATUS_INVALID_ADDRESS 0xC0010022
+#define RNDIS_STATUS_VC_NOT_ACTIVATED 0xC0010023
+#define RNDIS_STATUS_DEST_OUT_OF_ORDER 0xC0010024
+#define RNDIS_STATUS_VC_NOT_AVAILABLE 0xC0010025
+#define RNDIS_STATUS_CELLRATE_NOT_AVAILABLE 0xC0010026
+#define RNDIS_STATUS_INCOMPATABLE_QOS 0xC0010027
+#define RNDIS_STATUS_AAL_PARAMS_UNSUPPORTED 0xC0010028
+#define RNDIS_STATUS_NO_ROUTE_TO_DESTINATION 0xC0010029
+
+#define RNDIS_STATUS_TOKEN_RING_OPEN_ERROR 0xC0011000
+
+/* codes for RNDIS_OID_GEN_PHYSICAL_MEDIUM */
+#define RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED 0x00000000
+#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN 0x00000001
+#define RNDIS_PHYSICAL_MEDIUM_CABLE_MODEM 0x00000002
+#define RNDIS_PHYSICAL_MEDIUM_PHONE_LINE 0x00000003
+#define RNDIS_PHYSICAL_MEDIUM_POWER_LINE 0x00000004
+#define RNDIS_PHYSICAL_MEDIUM_DSL 0x00000005
+#define RNDIS_PHYSICAL_MEDIUM_FIBRE_CHANNEL 0x00000006
+#define RNDIS_PHYSICAL_MEDIUM_1394 0x00000007
+#define RNDIS_PHYSICAL_MEDIUM_WIRELESS_WAN 0x00000008
+#define RNDIS_PHYSICAL_MEDIUM_MAX 0x00000009
+
+/* Remote NDIS medium types. */
+#define RNDIS_MEDIUM_UNSPECIFIED 0x00000000
+#define RNDIS_MEDIUM_802_3 0x00000000
+#define RNDIS_MEDIUM_802_5 0x00000001
+#define RNDIS_MEDIUM_FDDI 0x00000002
+#define RNDIS_MEDIUM_WAN 0x00000003
+#define RNDIS_MEDIUM_LOCAL_TALK 0x00000004
+#define RNDIS_MEDIUM_ARCNET_RAW 0x00000006
+#define RNDIS_MEDIUM_ARCNET_878_2 0x00000007
+#define RNDIS_MEDIUM_ATM 0x00000008
+#define RNDIS_MEDIUM_WIRELESS_LAN 0x00000009
+#define RNDIS_MEDIUM_IRDA 0x0000000A
+#define RNDIS_MEDIUM_BPC 0x0000000B
+#define RNDIS_MEDIUM_CO_WAN 0x0000000C
+#define RNDIS_MEDIUM_1394 0x0000000D
+/* Not a real medium, defined as an upper-bound */
+#define RNDIS_MEDIUM_MAX 0x0000000E
+
+/* Remote NDIS medium connection states. */
+#define RNDIS_MEDIA_STATE_CONNECTED 0x00000000
+#define RNDIS_MEDIA_STATE_DISCONNECTED 0x00000001
+
+/* packet filter bits used by RNDIS_OID_GEN_CURRENT_PACKET_FILTER */
+#define RNDIS_PACKET_TYPE_DIRECTED 0x00000001
+#define RNDIS_PACKET_TYPE_MULTICAST 0x00000002
+#define RNDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004
+#define RNDIS_PACKET_TYPE_BROADCAST 0x00000008
+#define RNDIS_PACKET_TYPE_SOURCE_ROUTING 0x00000010
+#define RNDIS_PACKET_TYPE_PROMISCUOUS 0x00000020
+#define RNDIS_PACKET_TYPE_SMT 0x00000040
+#define RNDIS_PACKET_TYPE_ALL_LOCAL 0x00000080
+#define RNDIS_PACKET_TYPE_GROUP 0x00001000
+#define RNDIS_PACKET_TYPE_ALL_FUNCTIONAL 0x00002000
+#define RNDIS_PACKET_TYPE_FUNCTIONAL 0x00004000
+#define RNDIS_PACKET_TYPE_MAC_FRAME 0x00008000
+
+/* RNDIS_OID_GEN_MINIPORT_INFO constants */
+#define RNDIS_MINIPORT_BUS_MASTER 0x00000001
+#define RNDIS_MINIPORT_WDM_DRIVER 0x00000002
+#define RNDIS_MINIPORT_SG_LIST 0x00000004
+#define RNDIS_MINIPORT_SUPPORTS_MEDIA_QUERY 0x00000008
+#define RNDIS_MINIPORT_INDICATES_PACKETS 0x00000010
+#define RNDIS_MINIPORT_IGNORE_PACKET_QUEUE 0x00000020
+#define RNDIS_MINIPORT_IGNORE_REQUEST_QUEUE 0x00000040
+#define RNDIS_MINIPORT_IGNORE_TOKEN_RING_ERRORS 0x00000080
+#define RNDIS_MINIPORT_INTERMEDIATE_DRIVER 0x00000100
+#define RNDIS_MINIPORT_IS_NDIS_5 0x00000200
+#define RNDIS_MINIPORT_IS_CO 0x00000400
+#define RNDIS_MINIPORT_DESERIALIZE 0x00000800
+#define RNDIS_MINIPORT_REQUIRES_MEDIA_POLLING 0x00001000
+#define RNDIS_MINIPORT_SUPPORTS_MEDIA_SENSE 0x00002000
+#define RNDIS_MINIPORT_NETBOOT_CARD 0x00004000
+#define RNDIS_MINIPORT_PM_SUPPORTED 0x00008000
+#define RNDIS_MINIPORT_SUPPORTS_MAC_ADDRESS_OVERWRITE 0x00010000
+#define RNDIS_MINIPORT_USES_SAFE_BUFFER_APIS 0x00020000
+#define RNDIS_MINIPORT_HIDDEN 0x00040000
+#define RNDIS_MINIPORT_SWENUM 0x00080000
+#define RNDIS_MINIPORT_SURPRISE_REMOVE_OK 0x00100000
+#define RNDIS_MINIPORT_NO_HALT_ON_SUSPEND 0x00200000
+#define RNDIS_MINIPORT_HARDWARE_DEVICE 0x00400000
+#define RNDIS_MINIPORT_SUPPORTS_CANCEL_SEND_PACKETS 0x00800000
+#define RNDIS_MINIPORT_64BITS_DMA 0x01000000
+
+#define RNDIS_MAC_OPTION_COPY_LOOKAHEAD_DATA 0x00000001
+#define RNDIS_MAC_OPTION_RECEIVE_SERIALIZED 0x00000002
+#define RNDIS_MAC_OPTION_TRANSFERS_NOT_PEND 0x00000004
+#define RNDIS_MAC_OPTION_NO_LOOPBACK 0x00000008
+#define RNDIS_MAC_OPTION_FULL_DUPLEX 0x00000010
+#define RNDIS_MAC_OPTION_EOTX_INDICATION 0x00000020
+#define RNDIS_MAC_OPTION_8021P_PRIORITY 0x00000040
+#define RNDIS_MAC_OPTION_RESERVED 0x80000000
+
+/* Object Identifiers used by NdisRequest Query/Set Information */
+/* General (Required) Objects */
+#define RNDIS_OID_GEN_SUPPORTED_LIST 0x00010101
+#define RNDIS_OID_GEN_HARDWARE_STATUS 0x00010102
+#define RNDIS_OID_GEN_MEDIA_SUPPORTED 0x00010103
+#define RNDIS_OID_GEN_MEDIA_IN_USE 0x00010104
+#define RNDIS_OID_GEN_MAXIMUM_LOOKAHEAD 0x00010105
+#define RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE 0x00010106
+#define RNDIS_OID_GEN_LINK_SPEED 0x00010107
+#define RNDIS_OID_GEN_TRANSMIT_BUFFER_SPACE 0x00010108
+#define RNDIS_OID_GEN_RECEIVE_BUFFER_SPACE 0x00010109
+#define RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE 0x0001010A
+#define RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE 0x0001010B
+#define RNDIS_OID_GEN_VENDOR_ID 0x0001010C
+#define RNDIS_OID_GEN_VENDOR_DESCRIPTION 0x0001010D
+#define RNDIS_OID_GEN_CURRENT_PACKET_FILTER 0x0001010E
+#define RNDIS_OID_GEN_CURRENT_LOOKAHEAD 0x0001010F
+#define RNDIS_OID_GEN_DRIVER_VERSION 0x00010110
+#define RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE 0x00010111
+#define RNDIS_OID_GEN_PROTOCOL_OPTIONS 0x00010112
+#define RNDIS_OID_GEN_MAC_OPTIONS 0x00010113
+#define RNDIS_OID_GEN_MEDIA_CONNECT_STATUS 0x00010114
+#define RNDIS_OID_GEN_MAXIMUM_SEND_PACKETS 0x00010115
+#define RNDIS_OID_GEN_VENDOR_DRIVER_VERSION 0x00010116
+#define RNDIS_OID_GEN_SUPPORTED_GUIDS 0x00010117
+#define RNDIS_OID_GEN_NETWORK_LAYER_ADDRESSES 0x00010118
+#define RNDIS_OID_GEN_TRANSPORT_HEADER_OFFSET 0x00010119
+#define RNDIS_OID_GEN_PHYSICAL_MEDIUM 0x00010202
+#define RNDIS_OID_GEN_MACHINE_NAME 0x0001021A
+#define RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER 0x0001021B
+#define RNDIS_OID_GEN_VLAN_ID 0x0001021C
+
+/* Optional OIDs */
+#define RNDIS_OID_GEN_MEDIA_CAPABILITIES 0x00010201
+
+/* Required statistics OIDs */
+#define RNDIS_OID_GEN_XMIT_OK 0x00020101
+#define RNDIS_OID_GEN_RCV_OK 0x00020102
+#define RNDIS_OID_GEN_XMIT_ERROR 0x00020103
+#define RNDIS_OID_GEN_RCV_ERROR 0x00020104
+#define RNDIS_OID_GEN_RCV_NO_BUFFER 0x00020105
+
+/* Optional statistics OIDs */
+#define RNDIS_OID_GEN_DIRECTED_BYTES_XMIT 0x00020201
+#define RNDIS_OID_GEN_DIRECTED_FRAMES_XMIT 0x00020202
+#define RNDIS_OID_GEN_MULTICAST_BYTES_XMIT 0x00020203
+#define RNDIS_OID_GEN_MULTICAST_FRAMES_XMIT 0x00020204
+#define RNDIS_OID_GEN_BROADCAST_BYTES_XMIT 0x00020205
+#define RNDIS_OID_GEN_BROADCAST_FRAMES_XMIT 0x00020206
+#define RNDIS_OID_GEN_DIRECTED_BYTES_RCV 0x00020207
+#define RNDIS_OID_GEN_DIRECTED_FRAMES_RCV 0x00020208
+#define RNDIS_OID_GEN_MULTICAST_BYTES_RCV 0x00020209
+#define RNDIS_OID_GEN_MULTICAST_FRAMES_RCV 0x0002020A
+#define RNDIS_OID_GEN_BROADCAST_BYTES_RCV 0x0002020B
+#define RNDIS_OID_GEN_BROADCAST_FRAMES_RCV 0x0002020C
+
+#define RNDIS_OID_GEN_RCV_CRC_ERROR 0x0002020D
+#define RNDIS_OID_GEN_TRANSMIT_QUEUE_LENGTH 0x0002020E
+
+#define RNDIS_OID_GEN_GET_TIME_CAPS 0x0002020F
+#define RNDIS_OID_GEN_GET_NETCARD_TIME 0x00020210
+
+#define RNDIS_OID_GEN_NETCARD_LOAD 0x00020211
+#define RNDIS_OID_GEN_DEVICE_PROFILE 0x00020212
+#define RNDIS_OID_GEN_INIT_TIME_MS 0x00020213
+#define RNDIS_OID_GEN_RESET_COUNTS 0x00020214
+#define RNDIS_OID_GEN_MEDIA_SENSE_COUNTS 0x00020215
+#define RNDIS_OID_GEN_FRIENDLY_NAME 0x00020216
+#define RNDIS_OID_GEN_MINIPORT_INFO 0x00020217
+#define RNDIS_OID_GEN_RESET_VERIFY_PARAMETERS 0x00020218
+
+/* These are connection-oriented general OIDs. */
+/* These replace the above OIDs for connection-oriented media. */
+#define RNDIS_OID_GEN_CO_SUPPORTED_LIST 0x00010101
+#define RNDIS_OID_GEN_CO_HARDWARE_STATUS 0x00010102
+#define RNDIS_OID_GEN_CO_MEDIA_SUPPORTED 0x00010103
+#define RNDIS_OID_GEN_CO_MEDIA_IN_USE 0x00010104
+#define RNDIS_OID_GEN_CO_LINK_SPEED 0x00010105
+#define RNDIS_OID_GEN_CO_VENDOR_ID 0x00010106
+#define RNDIS_OID_GEN_CO_VENDOR_DESCRIPTION 0x00010107
+#define RNDIS_OID_GEN_CO_DRIVER_VERSION 0x00010108
+#define RNDIS_OID_GEN_CO_PROTOCOL_OPTIONS 0x00010109
+#define RNDIS_OID_GEN_CO_MAC_OPTIONS 0x0001010A
+#define RNDIS_OID_GEN_CO_MEDIA_CONNECT_STATUS 0x0001010B
+#define RNDIS_OID_GEN_CO_VENDOR_DRIVER_VERSION 0x0001010C
+#define RNDIS_OID_GEN_CO_MINIMUM_LINK_SPEED 0x0001010D
+
+#define RNDIS_OID_GEN_CO_GET_TIME_CAPS 0x00010201
+#define RNDIS_OID_GEN_CO_GET_NETCARD_TIME 0x00010202
+
+/* These are connection-oriented statistics OIDs. */
+#define RNDIS_OID_GEN_CO_XMIT_PDUS_OK 0x00020101
+#define RNDIS_OID_GEN_CO_RCV_PDUS_OK 0x00020102
+#define RNDIS_OID_GEN_CO_XMIT_PDUS_ERROR 0x00020103
+#define RNDIS_OID_GEN_CO_RCV_PDUS_ERROR 0x00020104
+#define RNDIS_OID_GEN_CO_RCV_PDUS_NO_BUFFER 0x00020105
+
+
+#define RNDIS_OID_GEN_CO_RCV_CRC_ERROR 0x00020201
+#define RNDIS_OID_GEN_CO_TRANSMIT_QUEUE_LENGTH 0x00020202
+#define RNDIS_OID_GEN_CO_BYTES_XMIT 0x00020203
+#define RNDIS_OID_GEN_CO_BYTES_RCV 0x00020204
+#define RNDIS_OID_GEN_CO_BYTES_XMIT_OUTSTANDING 0x00020205
+#define RNDIS_OID_GEN_CO_NETCARD_LOAD 0x00020206
+
+/* These are objects for Connection-oriented media call-managers. */
+#define RNDIS_OID_CO_ADD_PVC 0xFF000001
+#define RNDIS_OID_CO_DELETE_PVC 0xFF000002
+#define RNDIS_OID_CO_GET_CALL_INFORMATION 0xFF000003
+#define RNDIS_OID_CO_ADD_ADDRESS 0xFF000004
+#define RNDIS_OID_CO_DELETE_ADDRESS 0xFF000005
+#define RNDIS_OID_CO_GET_ADDRESSES 0xFF000006
+#define RNDIS_OID_CO_ADDRESS_CHANGE 0xFF000007
+#define RNDIS_OID_CO_SIGNALING_ENABLED 0xFF000008
+#define RNDIS_OID_CO_SIGNALING_DISABLED 0xFF000009
+
+/* 802.3 Objects (Ethernet) */
+#define RNDIS_OID_802_3_PERMANENT_ADDRESS 0x01010101
+#define RNDIS_OID_802_3_CURRENT_ADDRESS 0x01010102
+#define RNDIS_OID_802_3_MULTICAST_LIST 0x01010103
+#define RNDIS_OID_802_3_MAXIMUM_LIST_SIZE 0x01010104
+#define RNDIS_OID_802_3_MAC_OPTIONS 0x01010105
+
+#define RNDIS_802_3_MAC_OPTION_PRIORITY 0x00000001
+
+#define RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101
+#define RNDIS_OID_802_3_XMIT_ONE_COLLISION 0x01020102
+#define RNDIS_OID_802_3_XMIT_MORE_COLLISIONS 0x01020103
+
+#define RNDIS_OID_802_3_XMIT_DEFERRED 0x01020201
+#define RNDIS_OID_802_3_XMIT_MAX_COLLISIONS 0x01020202
+#define RNDIS_OID_802_3_RCV_OVERRUN 0x01020203
+#define RNDIS_OID_802_3_XMIT_UNDERRUN 0x01020204
+#define RNDIS_OID_802_3_XMIT_HEARTBEAT_FAILURE 0x01020205
+#define RNDIS_OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206
+#define RNDIS_OID_802_3_XMIT_LATE_COLLISIONS 0x01020207
+
+#define RNDIS_OID_802_11_BSSID 0x0d010101
+#define RNDIS_OID_802_11_SSID 0x0d010102
+#define RNDIS_OID_802_11_INFRASTRUCTURE_MODE 0x0d010108
+#define RNDIS_OID_802_11_ADD_WEP 0x0d010113
+#define RNDIS_OID_802_11_REMOVE_WEP 0x0d010114
+#define RNDIS_OID_802_11_DISASSOCIATE 0x0d010115
+#define RNDIS_OID_802_11_AUTHENTICATION_MODE 0x0d010118
+#define RNDIS_OID_802_11_PRIVACY_FILTER 0x0d010119
+#define RNDIS_OID_802_11_BSSID_LIST_SCAN 0x0d01011a
+#define RNDIS_OID_802_11_ENCRYPTION_STATUS 0x0d01011b
+#define RNDIS_OID_802_11_ADD_KEY 0x0d01011d
+#define RNDIS_OID_802_11_REMOVE_KEY 0x0d01011e
+#define RNDIS_OID_802_11_ASSOCIATION_INFORMATION 0x0d01011f
+#define RNDIS_OID_802_11_CAPABILITY 0x0d010122
+#define RNDIS_OID_802_11_PMKID 0x0d010123
+#define RNDIS_OID_802_11_NETWORK_TYPES_SUPPORTED 0x0d010203
+#define RNDIS_OID_802_11_NETWORK_TYPE_IN_USE 0x0d010204
+#define RNDIS_OID_802_11_TX_POWER_LEVEL 0x0d010205
+#define RNDIS_OID_802_11_RSSI 0x0d010206
+#define RNDIS_OID_802_11_RSSI_TRIGGER 0x0d010207
+#define RNDIS_OID_802_11_FRAGMENTATION_THRESHOLD 0x0d010209
+#define RNDIS_OID_802_11_RTS_THRESHOLD 0x0d01020a
+#define RNDIS_OID_802_11_SUPPORTED_RATES 0x0d01020e
+#define RNDIS_OID_802_11_CONFIGURATION 0x0d010211
+#define RNDIS_OID_802_11_POWER_MODE 0x0d010216
+#define RNDIS_OID_802_11_BSSID_LIST 0x0d010217
+
+/* Plug and Play capabilities */
+#define RNDIS_OID_PNP_CAPABILITIES 0xFD010100
+#define RNDIS_OID_PNP_SET_POWER 0xFD010101
+#define RNDIS_OID_PNP_QUERY_POWER 0xFD010102
+#define RNDIS_OID_PNP_ADD_WAKE_UP_PATTERN 0xFD010103
+#define RNDIS_OID_PNP_REMOVE_WAKE_UP_PATTERN 0xFD010104
+#define RNDIS_OID_PNP_ENABLE_WAKE_UP 0xFD010106
+
+/* RNDIS_PNP_CAPABILITIES.Flags constants */
+#define RNDIS_DEVICE_WAKE_UP_ENABLE 0x00000001
+#define RNDIS_DEVICE_WAKE_ON_PATTERN_MATCH_ENABLE 0x00000002
+#define RNDIS_DEVICE_WAKE_ON_MAGIC_PACKET_ENABLE 0x00000004
+
+#define REMOTE_CONDIS_MP_CREATE_VC_MSG 0x00008001
+#define REMOTE_CONDIS_MP_DELETE_VC_MSG 0x00008002
+#define REMOTE_CONDIS_MP_ACTIVATE_VC_MSG 0x00008005
+#define REMOTE_CONDIS_MP_DEACTIVATE_VC_MSG 0x00008006
+#define REMOTE_CONDIS_INDICATE_STATUS_MSG 0x00008007
+
+#define REMOTE_CONDIS_MP_CREATE_VC_CMPLT 0x80008001
+#define REMOTE_CONDIS_MP_DELETE_VC_CMPLT 0x80008002
+#define REMOTE_CONDIS_MP_ACTIVATE_VC_CMPLT 0x80008005
+#define REMOTE_CONDIS_MP_DEACTIVATE_VC_CMPLT 0x80008006
diff --git a/include/linux/root_dev.h b/include/linux/root_dev.h
new file mode 100644
index 000000000..ed241aad7
--- /dev/null
+++ b/include/linux/root_dev.h
@@ -0,0 +1,23 @@
+#ifndef _ROOT_DEV_H_
+#define _ROOT_DEV_H_
+
+#include <linux/major.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+
+enum {
+ Root_NFS = MKDEV(UNNAMED_MAJOR, 255),
+ Root_RAM0 = MKDEV(RAMDISK_MAJOR, 0),
+ Root_RAM1 = MKDEV(RAMDISK_MAJOR, 1),
+ Root_FD0 = MKDEV(FLOPPY_MAJOR, 0),
+ Root_HDA1 = MKDEV(IDE0_MAJOR, 1),
+ Root_HDA2 = MKDEV(IDE0_MAJOR, 2),
+ Root_SDA1 = MKDEV(SCSI_DISK0_MAJOR, 1),
+ Root_SDA2 = MKDEV(SCSI_DISK0_MAJOR, 2),
+ Root_HDC1 = MKDEV(IDE1_MAJOR, 1),
+ Root_SR0 = MKDEV(SCSI_CDROM_MAJOR, 0),
+};
+
+extern dev_t ROOT_DEV;
+
+#endif
diff --git a/include/linux/rotary_encoder.h b/include/linux/rotary_encoder.h
new file mode 100644
index 000000000..3f594dce5
--- /dev/null
+++ b/include/linux/rotary_encoder.h
@@ -0,0 +1,16 @@
+#ifndef __ROTARY_ENCODER_H__
+#define __ROTARY_ENCODER_H__
+
+struct rotary_encoder_platform_data {
+ unsigned int steps;
+ unsigned int axis;
+ unsigned int gpio_a;
+ unsigned int gpio_b;
+ unsigned int inverted_a;
+ unsigned int inverted_b;
+ bool relative_axis;
+ bool rollover;
+ bool half_period;
+};
+
+#endif /* __ROTARY_ENCODER_H__ */
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
new file mode 100644
index 000000000..82a673905
--- /dev/null
+++ b/include/linux/rpmsg.h
@@ -0,0 +1,332 @@
+/*
+ * Remote processor messaging
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011 Google, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Texas Instruments nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_RPMSG_H
+#define _LINUX_RPMSG_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+
+/* The feature bitmap for virtio rpmsg */
+#define VIRTIO_RPMSG_F_NS 0 /* RP supports name service notifications */
+
+/**
+ * struct rpmsg_hdr - common header for all rpmsg messages
+ * @src: source address
+ * @dst: destination address
+ * @reserved: reserved for future use
+ * @len: length of payload (in bytes)
+ * @flags: message flags
+ * @data: @len bytes of message payload data
+ *
+ * Every message sent(/received) on the rpmsg bus begins with this header.
+ */
+struct rpmsg_hdr {
+ u32 src;
+ u32 dst;
+ u32 reserved;
+ u16 len;
+ u16 flags;
+ u8 data[0];
+} __packed;
+
+/**
+ * struct rpmsg_ns_msg - dynamic name service announcement message
+ * @name: name of remote service that is published
+ * @addr: address of remote service that is published
+ * @flags: indicates whether service is created or destroyed
+ *
+ * This message is sent across to publish a new service, or announce
+ * about its removal. When we receive these messages, an appropriate
+ * rpmsg channel (i.e device) is created/destroyed. In turn, the ->probe()
+ * or ->remove() handler of the appropriate rpmsg driver will be invoked
+ * (if/as-soon-as one is registered).
+ */
+struct rpmsg_ns_msg {
+ char name[RPMSG_NAME_SIZE];
+ u32 addr;
+ u32 flags;
+} __packed;
+
+/**
+ * enum rpmsg_ns_flags - dynamic name service announcement flags
+ *
+ * @RPMSG_NS_CREATE: a new remote service was just created
+ * @RPMSG_NS_DESTROY: a known remote service was just destroyed
+ */
+enum rpmsg_ns_flags {
+ RPMSG_NS_CREATE = 0,
+ RPMSG_NS_DESTROY = 1,
+};
+
+#define RPMSG_ADDR_ANY 0xFFFFFFFF
+
+struct virtproc_info;
+
+/**
+ * rpmsg_channel - devices that belong to the rpmsg bus are called channels
+ * @vrp: the remote processor this channel belongs to
+ * @dev: the device struct
+ * @id: device id (used to match between rpmsg drivers and devices)
+ * @src: local address
+ * @dst: destination address
+ * @ept: the rpmsg endpoint of this channel
+ * @announce: if set, rpmsg will announce the creation/removal of this channel
+ */
+struct rpmsg_channel {
+ struct virtproc_info *vrp;
+ struct device dev;
+ struct rpmsg_device_id id;
+ u32 src;
+ u32 dst;
+ struct rpmsg_endpoint *ept;
+ bool announce;
+};
+
+typedef void (*rpmsg_rx_cb_t)(struct rpmsg_channel *, void *, int, void *, u32);
+
+/**
+ * struct rpmsg_endpoint - binds a local rpmsg address to its user
+ * @rpdev: rpmsg channel device
+ * @refcount: when this drops to zero, the ept is deallocated
+ * @cb: rx callback handler
+ * @cb_lock: must be taken before accessing/changing @cb
+ * @addr: local rpmsg address
+ * @priv: private data for the driver's use
+ *
+ * In essence, an rpmsg endpoint represents a listener on the rpmsg bus, as
+ * it binds an rpmsg address with an rx callback handler.
+ *
+ * Simple rpmsg drivers shouldn't use this struct directly, because
+ * things just work: every rpmsg driver provides an rx callback upon
+ * registering to the bus, and that callback is then bound to its rpmsg
+ * address when the driver is probed. When relevant inbound messages arrive
+ * (i.e. messages which their dst address equals to the src address of
+ * the rpmsg channel), the driver's handler is invoked to process it.
+ *
+ * More complicated drivers though, that do need to allocate additional rpmsg
+ * addresses, and bind them to different rx callbacks, must explicitly
+ * create additional endpoints by themselves (see rpmsg_create_ept()).
+ */
+struct rpmsg_endpoint {
+ struct rpmsg_channel *rpdev;
+ struct kref refcount;
+ rpmsg_rx_cb_t cb;
+ struct mutex cb_lock;
+ u32 addr;
+ void *priv;
+};
+
+/**
+ * struct rpmsg_driver - rpmsg driver struct
+ * @drv: underlying device driver
+ * @id_table: rpmsg ids serviced by this driver
+ * @probe: invoked when a matching rpmsg channel (i.e. device) is found
+ * @remove: invoked when the rpmsg channel is removed
+ * @callback: invoked when an inbound message is received on the channel
+ */
+struct rpmsg_driver {
+ struct device_driver drv;
+ const struct rpmsg_device_id *id_table;
+ int (*probe)(struct rpmsg_channel *dev);
+ void (*remove)(struct rpmsg_channel *dev);
+ void (*callback)(struct rpmsg_channel *, void *, int, void *, u32);
+};
+
+int register_rpmsg_device(struct rpmsg_channel *dev);
+void unregister_rpmsg_device(struct rpmsg_channel *dev);
+int register_rpmsg_driver(struct rpmsg_driver *drv);
+void unregister_rpmsg_driver(struct rpmsg_driver *drv);
+void rpmsg_destroy_ept(struct rpmsg_endpoint *);
+struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *,
+ rpmsg_rx_cb_t cb, void *priv, u32 addr);
+int
+rpmsg_send_offchannel_raw(struct rpmsg_channel *, u32, u32, void *, int, bool);
+
+/**
+ * rpmsg_send() - send a message across to the remote processor
+ * @rpdev: the rpmsg channel
+ * @data: payload of message
+ * @len: length of payload
+ *
+ * This function sends @data of length @len on the @rpdev channel.
+ * The message will be sent to the remote processor which the @rpdev
+ * channel belongs to, using @rpdev's source and destination addresses.
+ * In case there are no TX buffers available, the function will block until
+ * one becomes available, or a timeout of 15 seconds elapses. When the latter
+ * happens, -ERESTARTSYS is returned.
+ *
+ * Can only be called from process context (for now).
+ *
+ * Returns 0 on success and an appropriate error value on failure.
+ */
+static inline int rpmsg_send(struct rpmsg_channel *rpdev, void *data, int len)
+{
+ u32 src = rpdev->src, dst = rpdev->dst;
+
+ return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
+}
+
+/**
+ * rpmsg_sendto() - send a message across to the remote processor, specify dst
+ * @rpdev: the rpmsg channel
+ * @data: payload of message
+ * @len: length of payload
+ * @dst: destination address
+ *
+ * This function sends @data of length @len to the remote @dst address.
+ * The message will be sent to the remote processor which the @rpdev
+ * channel belongs to, using @rpdev's source address.
+ * In case there are no TX buffers available, the function will block until
+ * one becomes available, or a timeout of 15 seconds elapses. When the latter
+ * happens, -ERESTARTSYS is returned.
+ *
+ * Can only be called from process context (for now).
+ *
+ * Returns 0 on success and an appropriate error value on failure.
+ */
+static inline
+int rpmsg_sendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst)
+{
+ u32 src = rpdev->src;
+
+ return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
+}
+
+/**
+ * rpmsg_send_offchannel() - send a message using explicit src/dst addresses
+ * @rpdev: the rpmsg channel
+ * @src: source address
+ * @dst: destination address
+ * @data: payload of message
+ * @len: length of payload
+ *
+ * This function sends @data of length @len to the remote @dst address,
+ * and uses @src as the source address.
+ * The message will be sent to the remote processor which the @rpdev
+ * channel belongs to.
+ * In case there are no TX buffers available, the function will block until
+ * one becomes available, or a timeout of 15 seconds elapses. When the latter
+ * happens, -ERESTARTSYS is returned.
+ *
+ * Can only be called from process context (for now).
+ *
+ * Returns 0 on success and an appropriate error value on failure.
+ */
+static inline
+int rpmsg_send_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst,
+ void *data, int len)
+{
+ return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, true);
+}
+
+/**
+ * rpmsg_send() - send a message across to the remote processor
+ * @rpdev: the rpmsg channel
+ * @data: payload of message
+ * @len: length of payload
+ *
+ * This function sends @data of length @len on the @rpdev channel.
+ * The message will be sent to the remote processor which the @rpdev
+ * channel belongs to, using @rpdev's source and destination addresses.
+ * In case there are no TX buffers available, the function will immediately
+ * return -ENOMEM without waiting until one becomes available.
+ *
+ * Can only be called from process context (for now).
+ *
+ * Returns 0 on success and an appropriate error value on failure.
+ */
+static inline
+int rpmsg_trysend(struct rpmsg_channel *rpdev, void *data, int len)
+{
+ u32 src = rpdev->src, dst = rpdev->dst;
+
+ return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
+}
+
+/**
+ * rpmsg_sendto() - send a message across to the remote processor, specify dst
+ * @rpdev: the rpmsg channel
+ * @data: payload of message
+ * @len: length of payload
+ * @dst: destination address
+ *
+ * This function sends @data of length @len to the remote @dst address.
+ * The message will be sent to the remote processor which the @rpdev
+ * channel belongs to, using @rpdev's source address.
+ * In case there are no TX buffers available, the function will immediately
+ * return -ENOMEM without waiting until one becomes available.
+ *
+ * Can only be called from process context (for now).
+ *
+ * Returns 0 on success and an appropriate error value on failure.
+ */
+static inline
+int rpmsg_trysendto(struct rpmsg_channel *rpdev, void *data, int len, u32 dst)
+{
+ u32 src = rpdev->src;
+
+ return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
+}
+
+/**
+ * rpmsg_send_offchannel() - send a message using explicit src/dst addresses
+ * @rpdev: the rpmsg channel
+ * @src: source address
+ * @dst: destination address
+ * @data: payload of message
+ * @len: length of payload
+ *
+ * This function sends @data of length @len to the remote @dst address,
+ * and uses @src as the source address.
+ * The message will be sent to the remote processor which the @rpdev
+ * channel belongs to.
+ * In case there are no TX buffers available, the function will immediately
+ * return -ENOMEM without waiting until one becomes available.
+ *
+ * Can only be called from process context (for now).
+ *
+ * Returns 0 on success and an appropriate error value on failure.
+ */
+static inline
+int rpmsg_trysend_offchannel(struct rpmsg_channel *rpdev, u32 src, u32 dst,
+ void *data, int len)
+{
+ return rpmsg_send_offchannel_raw(rpdev, src, dst, data, len, false);
+}
+
+#endif /* _LINUX_RPMSG_H */
diff --git a/include/linux/rslib.h b/include/linux/rslib.h
new file mode 100644
index 000000000..746580c19
--- /dev/null
+++ b/include/linux/rslib.h
@@ -0,0 +1,109 @@
+/*
+ * include/linux/rslib.h
+ *
+ * Overview:
+ * Generic Reed Solomon encoder / decoder library
+ *
+ * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * RS code lifted from reed solomon library written by Phil Karn
+ * Copyright 2002 Phil Karn, KA9Q
+ *
+ * $Id: rslib.h,v 1.4 2005/11/07 11:14:52 gleixner Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _RSLIB_H_
+#define _RSLIB_H_
+
+#include <linux/list.h>
+
+/**
+ * struct rs_control - rs control structure
+ *
+ * @mm: Bits per symbol
+ * @nn: Symbols per block (= (1<<mm)-1)
+ * @alpha_to: log lookup table
+ * @index_of: Antilog lookup table
+ * @genpoly: Generator polynomial
+ * @nroots: Number of generator roots = number of parity symbols
+ * @fcr: First consecutive root, index form
+ * @prim: Primitive element, index form
+ * @iprim: prim-th root of 1, index form
+ * @gfpoly: The primitive generator polynominal
+ * @gffunc: Function to generate the field, if non-canonical representation
+ * @users: Users of this structure
+ * @list: List entry for the rs control list
+*/
+struct rs_control {
+ int mm;
+ int nn;
+ uint16_t *alpha_to;
+ uint16_t *index_of;
+ uint16_t *genpoly;
+ int nroots;
+ int fcr;
+ int prim;
+ int iprim;
+ int gfpoly;
+ int (*gffunc)(int);
+ int users;
+ struct list_head list;
+};
+
+/* General purpose RS codec, 8-bit data width, symbol width 1-15 bit */
+#ifdef CONFIG_REED_SOLOMON_ENC8
+int encode_rs8(struct rs_control *rs, uint8_t *data, int len, uint16_t *par,
+ uint16_t invmsk);
+#endif
+#ifdef CONFIG_REED_SOLOMON_DEC8
+int decode_rs8(struct rs_control *rs, uint8_t *data, uint16_t *par, int len,
+ uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
+ uint16_t *corr);
+#endif
+
+/* General purpose RS codec, 16-bit data width, symbol width 1-15 bit */
+#ifdef CONFIG_REED_SOLOMON_ENC16
+int encode_rs16(struct rs_control *rs, uint16_t *data, int len, uint16_t *par,
+ uint16_t invmsk);
+#endif
+#ifdef CONFIG_REED_SOLOMON_DEC16
+int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len,
+ uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
+ uint16_t *corr);
+#endif
+
+/* Create or get a matching rs control structure */
+struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim,
+ int nroots);
+struct rs_control *init_rs_non_canonical(int symsize, int (*func)(int),
+ int fcr, int prim, int nroots);
+
+/* Release a rs control structure */
+void free_rs(struct rs_control *rs);
+
+/** modulo replacement for galois field arithmetics
+ *
+ * @rs: the rs control structure
+ * @x: the value to reduce
+ *
+ * where
+ * rs->mm = number of bits per symbol
+ * rs->nn = (2^rs->mm) - 1
+ *
+ * Simple arithmetic modulo would return a wrong result for values
+ * >= 3 * rs->nn
+*/
+static inline int rs_modnn(struct rs_control *rs, int x)
+{
+ while (x >= rs->nn) {
+ x -= rs->nn;
+ x = (x >> rs->mm) + (x & rs->nn);
+ }
+ return x;
+}
+
+#endif
diff --git a/include/linux/rtc-ds2404.h b/include/linux/rtc-ds2404.h
new file mode 100644
index 000000000..22c538255
--- /dev/null
+++ b/include/linux/rtc-ds2404.h
@@ -0,0 +1,20 @@
+/*
+ * ds2404.h - platform data structure for the DS2404 RTC.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 Sven Schnelle <svens@stackframe.org>
+ */
+
+#ifndef __LINUX_DS2404_H
+#define __LINUX_DS2404_H
+
+struct ds2404_platform_data {
+
+ unsigned int gpio_rst;
+ unsigned int gpio_clk;
+ unsigned int gpio_dq;
+};
+#endif
diff --git a/include/linux/rtc-v3020.h b/include/linux/rtc-v3020.h
new file mode 100644
index 000000000..e55d82ceb
--- /dev/null
+++ b/include/linux/rtc-v3020.h
@@ -0,0 +1,41 @@
+/*
+ * v3020.h - Registers definition and platform data structure for the v3020 RTC.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006, 8D Technologies inc.
+ */
+#ifndef __LINUX_V3020_H
+#define __LINUX_V3020_H
+
+/* The v3020 has only one data pin but which one
+ * is used depends on the board. */
+struct v3020_platform_data {
+ int leftshift; /* (1<<(leftshift)) & readl() */
+
+ unsigned int use_gpio:1;
+ unsigned int gpio_cs;
+ unsigned int gpio_wr;
+ unsigned int gpio_rd;
+ unsigned int gpio_io;
+};
+
+#define V3020_STATUS_0 0x00
+#define V3020_STATUS_1 0x01
+#define V3020_SECONDS 0x02
+#define V3020_MINUTES 0x03
+#define V3020_HOURS 0x04
+#define V3020_MONTH_DAY 0x05
+#define V3020_MONTH 0x06
+#define V3020_YEAR 0x07
+#define V3020_WEEK_DAY 0x08
+#define V3020_WEEK 0x09
+
+#define V3020_IS_COMMAND(val) ((val)>=0x0E)
+
+#define V3020_CMD_RAM2CLOCK 0x0E
+#define V3020_CMD_CLOCK2RAM 0x0F
+
+#endif /* __LINUX_V3020_H */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
new file mode 100644
index 000000000..8dcf6825f
--- /dev/null
+++ b/include/linux/rtc.h
@@ -0,0 +1,218 @@
+/*
+ * Generic RTC interface.
+ * This version contains the part of the user interface to the Real Time Clock
+ * service. It is used with both the legacy mc146818 and also EFI
+ * Struct rtc_time and first 12 ioctl by Paul Gortmaker, 1996 - separated out
+ * from <linux/mc146818rtc.h> to this file for 2.4 kernels.
+ *
+ * Copyright (C) 1999 Hewlett-Packard Co.
+ * Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
+ */
+#ifndef _LINUX_RTC_H_
+#define _LINUX_RTC_H_
+
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <uapi/linux/rtc.h>
+
+extern int rtc_month_days(unsigned int month, unsigned int year);
+extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year);
+extern int rtc_valid_tm(struct rtc_time *tm);
+extern time64_t rtc_tm_to_time64(struct rtc_time *tm);
+extern void rtc_time64_to_tm(time64_t time, struct rtc_time *tm);
+ktime_t rtc_tm_to_ktime(struct rtc_time tm);
+struct rtc_time rtc_ktime_to_tm(ktime_t kt);
+
+/**
+ * Deprecated. Use rtc_time64_to_tm().
+ */
+static inline void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
+{
+ rtc_time64_to_tm(time, tm);
+}
+
+/**
+ * Deprecated. Use rtc_tm_to_time64().
+ */
+static inline int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time)
+{
+ *time = rtc_tm_to_time64(tm);
+
+ return 0;
+}
+
+#include <linux/device.h>
+#include <linux/seq_file.h>
+#include <linux/cdev.h>
+#include <linux/poll.h>
+#include <linux/mutex.h>
+#include <linux/timerqueue.h>
+#include <linux/workqueue.h>
+
+extern struct class *rtc_class;
+
+/*
+ * For these RTC methods the device parameter is the physical device
+ * on whatever bus holds the hardware (I2C, Platform, SPI, etc), which
+ * was passed to rtc_device_register(). Its driver_data normally holds
+ * device state, including the rtc_device pointer for the RTC.
+ *
+ * Most of these methods are called with rtc_device.ops_lock held,
+ * through the rtc_*(struct rtc_device *, ...) calls.
+ *
+ * The (current) exceptions are mostly filesystem hooks:
+ * - the proc() hook for procfs
+ * - non-ioctl() chardev hooks: open(), release(), read_callback()
+ *
+ * REVISIT those periodic irq calls *do* have ops_lock when they're
+ * issued through ioctl() ...
+ */
+struct rtc_class_ops {
+ int (*open)(struct device *);
+ void (*release)(struct device *);
+ int (*ioctl)(struct device *, unsigned int, unsigned long);
+ int (*read_time)(struct device *, struct rtc_time *);
+ int (*set_time)(struct device *, struct rtc_time *);
+ int (*read_alarm)(struct device *, struct rtc_wkalrm *);
+ int (*set_alarm)(struct device *, struct rtc_wkalrm *);
+ int (*proc)(struct device *, struct seq_file *);
+ int (*set_mmss64)(struct device *, time64_t secs);
+ int (*set_mmss)(struct device *, unsigned long secs);
+ int (*read_callback)(struct device *, int data);
+ int (*alarm_irq_enable)(struct device *, unsigned int enabled);
+};
+
+#define RTC_DEVICE_NAME_SIZE 20
+typedef struct rtc_task {
+ void (*func)(void *private_data);
+ void *private_data;
+} rtc_task_t;
+
+
+struct rtc_timer {
+ struct rtc_task task;
+ struct timerqueue_node node;
+ ktime_t period;
+ int enabled;
+};
+
+
+/* flags */
+#define RTC_DEV_BUSY 0
+
+struct rtc_device
+{
+ struct device dev;
+ struct module *owner;
+
+ int id;
+ char name[RTC_DEVICE_NAME_SIZE];
+
+ const struct rtc_class_ops *ops;
+ struct mutex ops_lock;
+
+ struct cdev char_dev;
+ unsigned long flags;
+
+ unsigned long irq_data;
+ spinlock_t irq_lock;
+ wait_queue_head_t irq_queue;
+ struct fasync_struct *async_queue;
+
+ struct rtc_task *irq_task;
+ spinlock_t irq_task_lock;
+ int irq_freq;
+ int max_user_freq;
+
+ struct timerqueue_head timerqueue;
+ struct rtc_timer aie_timer;
+ struct rtc_timer uie_rtctimer;
+ struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
+ int pie_enabled;
+ struct work_struct irqwork;
+ /* Some hardware can't support UIE mode */
+ int uie_unsupported;
+
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+ struct work_struct uie_task;
+ struct timer_list uie_timer;
+ /* Those fields are protected by rtc->irq_lock */
+ unsigned int oldsecs;
+ unsigned int uie_irq_active:1;
+ unsigned int stop_uie_polling:1;
+ unsigned int uie_task_active:1;
+ unsigned int uie_timer_active:1;
+#endif
+};
+#define to_rtc_device(d) container_of(d, struct rtc_device, dev)
+
+extern struct rtc_device *rtc_device_register(const char *name,
+ struct device *dev,
+ const struct rtc_class_ops *ops,
+ struct module *owner);
+extern struct rtc_device *devm_rtc_device_register(struct device *dev,
+ const char *name,
+ const struct rtc_class_ops *ops,
+ struct module *owner);
+extern void rtc_device_unregister(struct rtc_device *rtc);
+extern void devm_rtc_device_unregister(struct device *dev,
+ struct rtc_device *rtc);
+
+extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
+extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
+extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs);
+extern int rtc_set_ntp_time(struct timespec64 now);
+int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm);
+extern int rtc_read_alarm(struct rtc_device *rtc,
+ struct rtc_wkalrm *alrm);
+extern int rtc_set_alarm(struct rtc_device *rtc,
+ struct rtc_wkalrm *alrm);
+extern int rtc_initialize_alarm(struct rtc_device *rtc,
+ struct rtc_wkalrm *alrm);
+extern void rtc_update_irq(struct rtc_device *rtc,
+ unsigned long num, unsigned long events);
+
+extern struct rtc_device *rtc_class_open(const char *name);
+extern void rtc_class_close(struct rtc_device *rtc);
+
+extern int rtc_irq_register(struct rtc_device *rtc,
+ struct rtc_task *task);
+extern void rtc_irq_unregister(struct rtc_device *rtc,
+ struct rtc_task *task);
+extern int rtc_irq_set_state(struct rtc_device *rtc,
+ struct rtc_task *task, int enabled);
+extern int rtc_irq_set_freq(struct rtc_device *rtc,
+ struct rtc_task *task, int freq);
+extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled);
+extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled);
+extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc,
+ unsigned int enabled);
+
+void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode);
+void rtc_aie_update_irq(void *private);
+void rtc_uie_update_irq(void *private);
+enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer);
+
+int rtc_register(rtc_task_t *task);
+int rtc_unregister(rtc_task_t *task);
+int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
+
+void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data);
+int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
+ ktime_t expires, ktime_t period);
+int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer);
+void rtc_timer_do_work(struct work_struct *work);
+
+static inline bool is_leap_year(unsigned int year)
+{
+ return (!(year % 4) && (year % 100)) || !(year % 400);
+}
+
+#ifdef CONFIG_RTC_HCTOSYS_DEVICE
+extern int rtc_hctosys_ret;
+#else
+#define rtc_hctosys_ret -ENODEV
+#endif
+
+#endif /* _LINUX_RTC_H_ */
diff --git a/include/linux/rtc/ds1307.h b/include/linux/rtc/ds1307.h
new file mode 100644
index 000000000..291b1c490
--- /dev/null
+++ b/include/linux/rtc/ds1307.h
@@ -0,0 +1,22 @@
+/*
+ * ds1307.h - platform_data for the ds1307 (and variants) rtc driver
+ * (C) Copyright 2012 by Wolfram Sang, Pengutronix e.K.
+ * same license as the driver
+ */
+
+#ifndef _LINUX_DS1307_H
+#define _LINUX_DS1307_H
+
+#include <linux/types.h>
+
+#define DS1307_TRICKLE_CHARGER_250_OHM 0x01
+#define DS1307_TRICKLE_CHARGER_2K_OHM 0x02
+#define DS1307_TRICKLE_CHARGER_4K_OHM 0x03
+#define DS1307_TRICKLE_CHARGER_NO_DIODE 0x04
+#define DS1307_TRICKLE_CHARGER_DIODE 0x08
+
+struct ds1307_platform_data {
+ u8 trickle_charger_setup;
+};
+
+#endif /* _LINUX_DS1307_H */
diff --git a/include/linux/rtc/ds1685.h b/include/linux/rtc/ds1685.h
new file mode 100644
index 000000000..e6337a56d
--- /dev/null
+++ b/include/linux/rtc/ds1685.h
@@ -0,0 +1,375 @@
+/*
+ * Definitions for the registers, addresses, and platform data of the
+ * DS1685/DS1687-series RTC chips.
+ *
+ * This Driver also works for the DS17X85/DS17X87 RTC chips. Functionally
+ * similar to the DS1685/DS1687, they support a few extra features which
+ * include larger, battery-backed NV-SRAM, burst-mode access, and an RTC
+ * write counter.
+ *
+ * Copyright (C) 2011-2014 Joshua Kinard <kumba@gentoo.org>.
+ * Copyright (C) 2009 Matthias Fuchs <matthias.fuchs@esd-electronics.com>.
+ *
+ * References:
+ * DS1685/DS1687 3V/5V Real-Time Clocks, 19-5215, Rev 4/10.
+ * DS17x85/DS17x87 3V/5V Real-Time Clocks, 19-5222, Rev 4/10.
+ * DS1689/DS1693 3V/5V Serialized Real-Time Clocks, Rev 112105.
+ * Application Note 90, Using the Multiplex Bus RTC Extended Features.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_RTC_DS1685_H_
+#define _LINUX_RTC_DS1685_H_
+
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+/**
+ * struct ds1685_priv - DS1685 private data structure.
+ * @dev: pointer to the rtc_device structure.
+ * @regs: iomapped base address pointer of the RTC registers.
+ * @regstep: padding/step size between registers (optional).
+ * @baseaddr: base address of the RTC device.
+ * @size: resource size.
+ * @lock: private lock variable for spin locking/unlocking.
+ * @work: private workqueue.
+ * @irq: IRQ number assigned to the RTC device.
+ * @prepare_poweroff: pointer to platform pre-poweroff function.
+ * @wake_alarm: pointer to platform wake alarm function.
+ * @post_ram_clear: pointer to platform post ram-clear function.
+ */
+struct ds1685_priv {
+ struct rtc_device *dev;
+ void __iomem *regs;
+ u32 regstep;
+ resource_size_t baseaddr;
+ size_t size;
+ spinlock_t lock;
+ struct work_struct work;
+ int irq_num;
+ bool bcd_mode;
+ bool no_irq;
+ bool uie_unsupported;
+ bool alloc_io_resources;
+ u8 (*read)(struct ds1685_priv *, int);
+ void (*write)(struct ds1685_priv *, int, u8);
+ void (*prepare_poweroff)(void);
+ void (*wake_alarm)(void);
+ void (*post_ram_clear)(void);
+};
+
+
+/**
+ * struct ds1685_rtc_platform_data - platform data structure.
+ * @plat_prepare_poweroff: platform-specific pre-poweroff function.
+ * @plat_wake_alarm: platform-specific wake alarm function.
+ * @plat_post_ram_clear: platform-specific post ram-clear function.
+ *
+ * If your platform needs to use a custom padding/step size between
+ * registers, or uses one or more of the extended interrupts and needs special
+ * handling, then include this header file in your platform definition and
+ * set regstep and the plat_* pointers as appropriate.
+ */
+struct ds1685_rtc_platform_data {
+ const u32 regstep;
+ const bool bcd_mode;
+ const bool no_irq;
+ const bool uie_unsupported;
+ const bool alloc_io_resources;
+ u8 (*plat_read)(struct ds1685_priv *, int);
+ void (*plat_write)(struct ds1685_priv *, int, u8);
+ void (*plat_prepare_poweroff)(void);
+ void (*plat_wake_alarm)(void);
+ void (*plat_post_ram_clear)(void);
+};
+
+
+/*
+ * Time Registers.
+ */
+#define RTC_SECS 0x00 /* Seconds 00-59 */
+#define RTC_SECS_ALARM 0x01 /* Alarm Seconds 00-59 */
+#define RTC_MINS 0x02 /* Minutes 00-59 */
+#define RTC_MINS_ALARM 0x03 /* Alarm Minutes 00-59 */
+#define RTC_HRS 0x04 /* Hours 01-12 AM/PM || 00-23 */
+#define RTC_HRS_ALARM 0x05 /* Alarm Hours 01-12 AM/PM || 00-23 */
+#define RTC_WDAY 0x06 /* Day of Week 01-07 */
+#define RTC_MDAY 0x07 /* Day of Month 01-31 */
+#define RTC_MONTH 0x08 /* Month 01-12 */
+#define RTC_YEAR 0x09 /* Year 00-99 */
+#define RTC_CENTURY 0x48 /* Century 00-99 */
+#define RTC_MDAY_ALARM 0x49 /* Alarm Day of Month 01-31 */
+
+
+/*
+ * Bit masks for the Time registers in BCD Mode (DM = 0).
+ */
+#define RTC_SECS_BCD_MASK 0x7f /* - x x x x x x x */
+#define RTC_MINS_BCD_MASK 0x7f /* - x x x x x x x */
+#define RTC_HRS_12_BCD_MASK 0x1f /* - - - x x x x x */
+#define RTC_HRS_24_BCD_MASK 0x3f /* - - x x x x x x */
+#define RTC_MDAY_BCD_MASK 0x3f /* - - x x x x x x */
+#define RTC_MONTH_BCD_MASK 0x1f /* - - - x x x x x */
+#define RTC_YEAR_BCD_MASK 0xff /* x x x x x x x x */
+
+/*
+ * Bit masks for the Time registers in BIN Mode (DM = 1).
+ */
+#define RTC_SECS_BIN_MASK 0x3f /* - - x x x x x x */
+#define RTC_MINS_BIN_MASK 0x3f /* - - x x x x x x */
+#define RTC_HRS_12_BIN_MASK 0x0f /* - - - - x x x x */
+#define RTC_HRS_24_BIN_MASK 0x1f /* - - - x x x x x */
+#define RTC_MDAY_BIN_MASK 0x1f /* - - - x x x x x */
+#define RTC_MONTH_BIN_MASK 0x0f /* - - - - x x x x */
+#define RTC_YEAR_BIN_MASK 0x7f /* - x x x x x x x */
+
+/*
+ * Bit masks common for the Time registers in BCD or BIN Mode.
+ */
+#define RTC_WDAY_MASK 0x07 /* - - - - - x x x */
+#define RTC_CENTURY_MASK 0xff /* x x x x x x x x */
+#define RTC_MDAY_ALARM_MASK 0xff /* x x x x x x x x */
+#define RTC_HRS_AMPM_MASK BIT(7) /* Mask for the AM/PM bit */
+
+
+
+/*
+ * Control Registers.
+ */
+#define RTC_CTRL_A 0x0a /* Control Register A */
+#define RTC_CTRL_B 0x0b /* Control Register B */
+#define RTC_CTRL_C 0x0c /* Control Register C */
+#define RTC_CTRL_D 0x0d /* Control Register D */
+#define RTC_EXT_CTRL_4A 0x4a /* Extended Control Register 4A */
+#define RTC_EXT_CTRL_4B 0x4b /* Extended Control Register 4B */
+
+
+/*
+ * Bit names in Control Register A.
+ */
+#define RTC_CTRL_A_UIP BIT(7) /* Update In Progress */
+#define RTC_CTRL_A_DV2 BIT(6) /* Countdown Chain */
+#define RTC_CTRL_A_DV1 BIT(5) /* Oscillator Enable */
+#define RTC_CTRL_A_DV0 BIT(4) /* Bank Select */
+#define RTC_CTRL_A_RS2 BIT(2) /* Rate-Selection Bit 2 */
+#define RTC_CTRL_A_RS3 BIT(3) /* Rate-Selection Bit 3 */
+#define RTC_CTRL_A_RS1 BIT(1) /* Rate-Selection Bit 1 */
+#define RTC_CTRL_A_RS0 BIT(0) /* Rate-Selection Bit 0 */
+#define RTC_CTRL_A_RS_MASK 0x0f /* RS3 + RS2 + RS1 + RS0 */
+
+/*
+ * Bit names in Control Register B.
+ */
+#define RTC_CTRL_B_SET BIT(7) /* SET Bit */
+#define RTC_CTRL_B_PIE BIT(6) /* Periodic-Interrupt Enable */
+#define RTC_CTRL_B_AIE BIT(5) /* Alarm-Interrupt Enable */
+#define RTC_CTRL_B_UIE BIT(4) /* Update-Ended Interrupt-Enable */
+#define RTC_CTRL_B_SQWE BIT(3) /* Square-Wave Enable */
+#define RTC_CTRL_B_DM BIT(2) /* Data Mode */
+#define RTC_CTRL_B_2412 BIT(1) /* 12-Hr/24-Hr Mode */
+#define RTC_CTRL_B_DSE BIT(0) /* Daylight Savings Enable */
+#define RTC_CTRL_B_PAU_MASK 0x70 /* PIE + AIE + UIE */
+
+
+/*
+ * Bit names in Control Register C.
+ *
+ * BIT(0), BIT(1), BIT(2), & BIT(3) are unused, always return 0, and cannot
+ * be written to.
+ */
+#define RTC_CTRL_C_IRQF BIT(7) /* Interrupt-Request Flag */
+#define RTC_CTRL_C_PF BIT(6) /* Periodic-Interrupt Flag */
+#define RTC_CTRL_C_AF BIT(5) /* Alarm-Interrupt Flag */
+#define RTC_CTRL_C_UF BIT(4) /* Update-Ended Interrupt Flag */
+#define RTC_CTRL_C_PAU_MASK 0x70 /* PF + AF + UF */
+
+
+/*
+ * Bit names in Control Register D.
+ *
+ * BIT(0) through BIT(6) are unused, always return 0, and cannot
+ * be written to.
+ */
+#define RTC_CTRL_D_VRT BIT(7) /* Valid RAM and Time */
+
+
+/*
+ * Bit names in Extended Control Register 4A.
+ *
+ * On the DS1685/DS1687/DS1689/DS1693, BIT(4) and BIT(5) are reserved for
+ * future use. They can be read from and written to, but have no effect
+ * on the RTC's operation.
+ *
+ * On the DS17x85/DS17x87, BIT(5) is Burst-Mode Enable (BME), and allows
+ * access to the extended NV-SRAM by automatically incrementing the address
+ * register when they are read from or written to.
+ */
+#define RTC_CTRL_4A_VRT2 BIT(7) /* Auxillary Battery Status */
+#define RTC_CTRL_4A_INCR BIT(6) /* Increment-in-Progress Status */
+#define RTC_CTRL_4A_PAB BIT(3) /* Power-Active Bar Control */
+#define RTC_CTRL_4A_RF BIT(2) /* RAM-Clear Flag */
+#define RTC_CTRL_4A_WF BIT(1) /* Wake-Up Alarm Flag */
+#define RTC_CTRL_4A_KF BIT(0) /* Kickstart Flag */
+#if !defined(CONFIG_RTC_DRV_DS1685) && !defined(CONFIG_RTC_DRV_DS1689)
+#define RTC_CTRL_4A_BME BIT(5) /* Burst-Mode Enable */
+#endif
+#define RTC_CTRL_4A_RWK_MASK 0x07 /* RF + WF + KF */
+
+
+/*
+ * Bit names in Extended Control Register 4B.
+ */
+#define RTC_CTRL_4B_ABE BIT(7) /* Auxillary Battery Enable */
+#define RTC_CTRL_4B_E32K BIT(6) /* Enable 32.768Hz on SQW Pin */
+#define RTC_CTRL_4B_CS BIT(5) /* Crystal Select */
+#define RTC_CTRL_4B_RCE BIT(4) /* RAM Clear-Enable */
+#define RTC_CTRL_4B_PRS BIT(3) /* PAB Reset-Select */
+#define RTC_CTRL_4B_RIE BIT(2) /* RAM Clear-Interrupt Enable */
+#define RTC_CTRL_4B_WIE BIT(1) /* Wake-Up Alarm-Interrupt Enable */
+#define RTC_CTRL_4B_KSE BIT(0) /* Kickstart Interrupt-Enable */
+#define RTC_CTRL_4B_RWK_MASK 0x07 /* RIE + WIE + KSE */
+
+
+/*
+ * Misc register names in Bank 1.
+ *
+ * The DV0 bit in Control Register A must be set to 1 for these registers
+ * to become available, including Extended Control Registers 4A & 4B.
+ */
+#define RTC_BANK1_SSN_MODEL 0x40 /* Model Number */
+#define RTC_BANK1_SSN_BYTE_1 0x41 /* 1st Byte of Serial Number */
+#define RTC_BANK1_SSN_BYTE_2 0x42 /* 2nd Byte of Serial Number */
+#define RTC_BANK1_SSN_BYTE_3 0x43 /* 3rd Byte of Serial Number */
+#define RTC_BANK1_SSN_BYTE_4 0x44 /* 4th Byte of Serial Number */
+#define RTC_BANK1_SSN_BYTE_5 0x45 /* 5th Byte of Serial Number */
+#define RTC_BANK1_SSN_BYTE_6 0x46 /* 6th Byte of Serial Number */
+#define RTC_BANK1_SSN_CRC 0x47 /* Serial CRC Byte */
+#define RTC_BANK1_RAM_DATA_PORT 0x53 /* Extended RAM Data Port */
+
+
+/*
+ * Model-specific registers in Bank 1.
+ *
+ * The addresses below differ depending on the model of the RTC chip
+ * selected in the kernel configuration. Not all of these features are
+ * supported in the main driver at present.
+ *
+ * DS1685/DS1687 - Extended NV-SRAM address (LSB only).
+ * DS1689/DS1693 - Vcc, Vbat, Pwr Cycle Counters & Customer-specific S/N.
+ * DS17x85/DS17x87 - Extended NV-SRAM addresses (MSB & LSB) & Write counter.
+ */
+#if defined(CONFIG_RTC_DRV_DS1685)
+#define RTC_BANK1_RAM_ADDR 0x50 /* NV-SRAM Addr */
+#elif defined(CONFIG_RTC_DRV_DS1689)
+#define RTC_BANK1_VCC_CTR_LSB 0x54 /* Vcc Counter Addr (LSB) */
+#define RTC_BANK1_VCC_CTR_MSB 0x57 /* Vcc Counter Addr (MSB) */
+#define RTC_BANK1_VBAT_CTR_LSB 0x58 /* Vbat Counter Addr (LSB) */
+#define RTC_BANK1_VBAT_CTR_MSB 0x5b /* Vbat Counter Addr (MSB) */
+#define RTC_BANK1_PWR_CTR_LSB 0x5c /* Pwr Cycle Counter Addr (LSB) */
+#define RTC_BANK1_PWR_CTR_MSB 0x5d /* Pwr Cycle Counter Addr (MSB) */
+#define RTC_BANK1_UNIQ_SN 0x60 /* Customer-specific S/N */
+#else /* DS17x85/DS17x87 */
+#define RTC_BANK1_RAM_ADDR_LSB 0x50 /* NV-SRAM Addr (LSB) */
+#define RTC_BANK1_RAM_ADDR_MSB 0x51 /* NV-SRAM Addr (MSB) */
+#define RTC_BANK1_WRITE_CTR 0x5e /* RTC Write Counter */
+#endif
+
+
+/*
+ * Model numbers.
+ *
+ * The DS1688/DS1691 and DS1689/DS1693 chips share the same model number
+ * and the manual doesn't indicate any major differences. As such, they
+ * are regarded as the same chip in this driver.
+ */
+#define RTC_MODEL_DS1685 0x71 /* DS1685/DS1687 */
+#define RTC_MODEL_DS17285 0x72 /* DS17285/DS17287 */
+#define RTC_MODEL_DS1689 0x73 /* DS1688/DS1691/DS1689/DS1693 */
+#define RTC_MODEL_DS17485 0x74 /* DS17485/DS17487 */
+#define RTC_MODEL_DS17885 0x78 /* DS17885/DS17887 */
+
+
+/*
+ * Periodic Interrupt Rates / Square-Wave Output Frequency
+ *
+ * Periodic rates are selected by setting the RS3-RS0 bits in Control
+ * Register A and enabled via either the E32K bit in Extended Control
+ * Register 4B or the SQWE bit in Control Register B.
+ *
+ * E32K overrides the settings of RS3-RS0 and outputs a frequency of 32768Hz
+ * on the SQW pin of the RTC chip. While there are 16 possible selections,
+ * the 1-of-16 decoder is only able to divide the base 32768Hz signal into 13
+ * smaller frequencies. The values 0x01 and 0x02 are not used and are
+ * synonymous with 0x08 and 0x09, respectively.
+ *
+ * When E32K is set to a logic 1, periodic interrupts are disabled and reading
+ * /dev/rtc will return -EINVAL. This also applies if the periodic interrupt
+ * frequency is set to 0Hz.
+ *
+ * Not currently used by the rtc-ds1685 driver because the RTC core removed
+ * support for hardware-generated periodic-interrupts in favour of
+ * hrtimer-generated interrupts. But these defines are kept around for use
+ * in userland, as documentation to the hardware, and possible future use if
+ * hardware-generated periodic interrupts are ever added back.
+ */
+ /* E32K RS3 RS2 RS1 RS0 */
+#define RTC_SQW_8192HZ 0x03 /* 0 0 0 1 1 */
+#define RTC_SQW_4096HZ 0x04 /* 0 0 1 0 0 */
+#define RTC_SQW_2048HZ 0x05 /* 0 0 1 0 1 */
+#define RTC_SQW_1024HZ 0x06 /* 0 0 1 1 0 */
+#define RTC_SQW_512HZ 0x07 /* 0 0 1 1 1 */
+#define RTC_SQW_256HZ 0x08 /* 0 1 0 0 0 */
+#define RTC_SQW_128HZ 0x09 /* 0 1 0 0 1 */
+#define RTC_SQW_64HZ 0x0a /* 0 1 0 1 0 */
+#define RTC_SQW_32HZ 0x0b /* 0 1 0 1 1 */
+#define RTC_SQW_16HZ 0x0c /* 0 1 1 0 0 */
+#define RTC_SQW_8HZ 0x0d /* 0 1 1 0 1 */
+#define RTC_SQW_4HZ 0x0e /* 0 1 1 1 0 */
+#define RTC_SQW_2HZ 0x0f /* 0 1 1 1 1 */
+#define RTC_SQW_0HZ 0x00 /* 0 0 0 0 0 */
+#define RTC_SQW_32768HZ 32768 /* 1 - - - - */
+#define RTC_MAX_USER_FREQ 8192
+
+
+/*
+ * NVRAM data & addresses:
+ * - 50 bytes of NVRAM are available just past the clock registers.
+ * - 64 additional bytes are available in Bank0.
+ *
+ * Extended, battery-backed NV-SRAM:
+ * - DS1685/DS1687 - 128 bytes.
+ * - DS1689/DS1693 - 0 bytes.
+ * - DS17285/DS17287 - 2048 bytes.
+ * - DS17485/DS17487 - 4096 bytes.
+ * - DS17885/DS17887 - 8192 bytes.
+ */
+#define NVRAM_TIME_BASE 0x0e /* NVRAM Addr in Time regs */
+#define NVRAM_BANK0_BASE 0x40 /* NVRAM Addr in Bank0 regs */
+#define NVRAM_SZ_TIME 50
+#define NVRAM_SZ_BANK0 64
+#if defined(CONFIG_RTC_DRV_DS1685)
+# define NVRAM_SZ_EXTND 128
+#elif defined(CONFIG_RTC_DRV_DS1689)
+# define NVRAM_SZ_EXTND 0
+#elif defined(CONFIG_RTC_DRV_DS17285)
+# define NVRAM_SZ_EXTND 2048
+#elif defined(CONFIG_RTC_DRV_DS17485)
+# define NVRAM_SZ_EXTND 4096
+#elif defined(CONFIG_RTC_DRV_DS17885)
+# define NVRAM_SZ_EXTND 8192
+#endif
+#define NVRAM_TOTAL_SZ_BANK0 (NVRAM_SZ_TIME + NVRAM_SZ_BANK0)
+#define NVRAM_TOTAL_SZ (NVRAM_TOTAL_SZ_BANK0 + NVRAM_SZ_EXTND)
+
+
+/*
+ * Function Prototypes.
+ */
+extern void __noreturn
+ds1685_rtc_poweroff(struct platform_device *pdev);
+
+#endif /* _LINUX_RTC_DS1685_H_ */
diff --git a/include/linux/rtc/m48t59.h b/include/linux/rtc/m48t59.h
new file mode 100644
index 000000000..6fc961459
--- /dev/null
+++ b/include/linux/rtc/m48t59.h
@@ -0,0 +1,64 @@
+/*
+ * include/linux/rtc/m48t59.h
+ *
+ * Definitions for the platform data of m48t59 RTC chip driver.
+ *
+ * Copyright (c) 2007 Wind River Systems, Inc.
+ *
+ * Mark Zhan <rongkai.zhan@windriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_RTC_M48T59_H_
+#define _LINUX_RTC_M48T59_H_
+
+/*
+ * M48T59 Register Offset
+ */
+#define M48T59_YEAR 0xf
+#define M48T59_MONTH 0xe
+#define M48T59_MDAY 0xd /* Day of Month */
+#define M48T59_WDAY 0xc /* Day of Week */
+#define M48T59_WDAY_CB 0x20 /* Century Bit */
+#define M48T59_WDAY_CEB 0x10 /* Century Enable Bit */
+#define M48T59_HOUR 0xb
+#define M48T59_MIN 0xa
+#define M48T59_SEC 0x9
+#define M48T59_CNTL 0x8
+#define M48T59_CNTL_READ 0x40
+#define M48T59_CNTL_WRITE 0x80
+#define M48T59_WATCHDOG 0x7
+#define M48T59_INTR 0x6
+#define M48T59_INTR_AFE 0x80 /* Alarm Interrupt Enable */
+#define M48T59_INTR_ABE 0x20
+#define M48T59_ALARM_DATE 0x5
+#define M48T59_ALARM_HOUR 0x4
+#define M48T59_ALARM_MIN 0x3
+#define M48T59_ALARM_SEC 0x2
+#define M48T59_UNUSED 0x1
+#define M48T59_FLAGS 0x0
+#define M48T59_FLAGS_WDT 0x80 /* watchdog timer expired */
+#define M48T59_FLAGS_AF 0x40 /* alarm */
+#define M48T59_FLAGS_BF 0x10 /* low battery */
+
+#define M48T59RTC_TYPE_M48T59 0 /* to keep compatibility */
+#define M48T59RTC_TYPE_M48T02 1
+#define M48T59RTC_TYPE_M48T08 2
+
+struct m48t59_plat_data {
+ /* The method to access M48T59 registers */
+ void (*write_byte)(struct device *dev, u32 ofs, u8 val);
+ unsigned char (*read_byte)(struct device *dev, u32 ofs);
+
+ int type; /* RTC model */
+
+ /* ioaddr mapped externally */
+ void __iomem *ioaddr;
+ /* offset to RTC registers, automatically set according to the type */
+ unsigned int offset;
+};
+
+#endif /* _LINUX_RTC_M48T59_H_ */
diff --git a/include/linux/rtc/sirfsoc_rtciobrg.h b/include/linux/rtc/sirfsoc_rtciobrg.h
new file mode 100644
index 000000000..2c92e1c8e
--- /dev/null
+++ b/include/linux/rtc/sirfsoc_rtciobrg.h
@@ -0,0 +1,18 @@
+/*
+ * RTC I/O Bridge interfaces for CSR SiRFprimaII
+ * ARM access the registers of SYSRTC, GPSRTC and PWRC through this module
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+#ifndef _SIRFSOC_RTC_IOBRG_H_
+#define _SIRFSOC_RTC_IOBRG_H_
+
+extern void sirfsoc_rtc_iobrg_besyncing(void);
+
+extern u32 sirfsoc_rtc_iobrg_readl(u32 addr);
+
+extern void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr);
+
+#endif
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
new file mode 100644
index 000000000..1abba5ce2
--- /dev/null
+++ b/include/linux/rtmutex.h
@@ -0,0 +1,101 @@
+/*
+ * RT Mutexes: blocking mutual exclusion locks with PI support
+ *
+ * started by Ingo Molnar and Thomas Gleixner:
+ *
+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ *
+ * This file contains the public data structure and API definitions.
+ */
+
+#ifndef __LINUX_RT_MUTEX_H
+#define __LINUX_RT_MUTEX_H
+
+#include <linux/linkage.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock_types.h>
+
+extern int max_lock_depth; /* for sysctl */
+
+/**
+ * The rt_mutex structure
+ *
+ * @wait_lock: spinlock to protect the structure
+ * @waiters: rbtree root to enqueue waiters in priority order
+ * @waiters_leftmost: top waiter
+ * @owner: the mutex owner
+ */
+struct rt_mutex {
+ raw_spinlock_t wait_lock;
+ struct rb_root waiters;
+ struct rb_node *waiters_leftmost;
+ struct task_struct *owner;
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+ int save_state;
+ const char *name, *file;
+ int line;
+ void *magic;
+#endif
+};
+
+struct rt_mutex_waiter;
+struct hrtimer_sleeper;
+
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+ extern int rt_mutex_debug_check_no_locks_freed(const void *from,
+ unsigned long len);
+ extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task);
+#else
+ static inline int rt_mutex_debug_check_no_locks_freed(const void *from,
+ unsigned long len)
+ {
+ return 0;
+ }
+# define rt_mutex_debug_check_no_locks_held(task) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
+ , .name = #mutexname, .file = __FILE__, .line = __LINE__
+# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__)
+ extern void rt_mutex_debug_task_free(struct task_struct *tsk);
+#else
+# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
+# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL)
+# define rt_mutex_debug_task_free(t) do { } while (0)
+#endif
+
+#define __RT_MUTEX_INITIALIZER(mutexname) \
+ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+ , .waiters = RB_ROOT \
+ , .owner = NULL \
+ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
+
+#define DEFINE_RT_MUTEX(mutexname) \
+ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
+
+/**
+ * rt_mutex_is_locked - is the mutex locked
+ * @lock: the mutex to be queried
+ *
+ * Returns 1 if the mutex is locked, 0 if unlocked.
+ */
+static inline int rt_mutex_is_locked(struct rt_mutex *lock)
+{
+ return lock->owner != NULL;
+}
+
+extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
+extern void rt_mutex_destroy(struct rt_mutex *lock);
+
+extern void rt_mutex_lock(struct rt_mutex *lock);
+extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
+extern int rt_mutex_timed_lock(struct rt_mutex *lock,
+ struct hrtimer_sleeper *timeout);
+
+extern int rt_mutex_trylock(struct rt_mutex *lock);
+
+extern void rt_mutex_unlock(struct rt_mutex *lock);
+
+#endif
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
new file mode 100644
index 000000000..7b8e260c4
--- /dev/null
+++ b/include/linux/rtnetlink.h
@@ -0,0 +1,126 @@
+#ifndef __LINUX_RTNETLINK_H
+#define __LINUX_RTNETLINK_H
+
+
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/wait.h>
+#include <uapi/linux/rtnetlink.h>
+
+extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
+extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid);
+extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid,
+ u32 group, struct nlmsghdr *nlh, gfp_t flags);
+extern void rtnl_set_sk_err(struct net *net, u32 group, int error);
+extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics);
+extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
+ u32 id, long expires, u32 error);
+
+void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
+struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
+ unsigned change, gfp_t flags);
+void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
+ gfp_t flags);
+
+
+/* RTNL is used as a global lock for all changes to network configuration */
+extern void rtnl_lock(void);
+extern void rtnl_unlock(void);
+extern int rtnl_trylock(void);
+extern int rtnl_is_locked(void);
+
+extern wait_queue_head_t netdev_unregistering_wq;
+extern struct mutex net_mutex;
+
+#ifdef CONFIG_PROVE_LOCKING
+extern int lockdep_rtnl_is_held(void);
+#else
+static inline int lockdep_rtnl_is_held(void)
+{
+ return 1;
+}
+#endif /* #ifdef CONFIG_PROVE_LOCKING */
+
+/**
+ * rcu_dereference_rtnl - rcu_dereference with debug checking
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Do an rcu_dereference(p), but check caller either holds rcu_read_lock()
+ * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference()
+ */
+#define rcu_dereference_rtnl(p) \
+ rcu_dereference_check(p, lockdep_rtnl_is_held())
+
+/**
+ * rcu_dereference_bh_rtnl - rcu_dereference_bh with debug checking
+ * @p: The pointer to read, prior to dereference
+ *
+ * Do an rcu_dereference_bh(p), but check caller either holds rcu_read_lock_bh()
+ * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference_bh()
+ */
+#define rcu_dereference_bh_rtnl(p) \
+ rcu_dereference_bh_check(p, lockdep_rtnl_is_held())
+
+/**
+ * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL
+ * @p: The pointer to read, prior to dereferencing
+ *
+ * Return the value of the specified RCU-protected pointer, but omit
+ * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
+ * caller holds RTNL.
+ */
+#define rtnl_dereference(p) \
+ rcu_dereference_protected(p, lockdep_rtnl_is_held())
+
+static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev)
+{
+ return rtnl_dereference(dev->ingress_queue);
+}
+
+struct netdev_queue *dev_ingress_queue_create(struct net_device *dev);
+
+#ifdef CONFIG_NET_CLS_ACT
+void net_inc_ingress_queue(void);
+void net_dec_ingress_queue(void);
+#else
+static inline void net_inc_ingress_queue(void)
+{
+}
+
+static inline void net_dec_ingress_queue(void)
+{
+}
+#endif
+
+extern void rtnetlink_init(void);
+extern void __rtnl_unlock(void);
+
+#define ASSERT_RTNL() do { \
+ if (unlikely(!rtnl_is_locked())) { \
+ printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \
+ __FILE__, __LINE__); \
+ dump_stack(); \
+ } \
+} while(0)
+
+extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ struct net_device *filter_dev,
+ int idx);
+extern int ndo_dflt_fdb_add(struct ndmsg *ndm,
+ struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 vid,
+ u16 flags);
+extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
+ struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 vid);
+
+extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u16 mode,
+ u32 flags, u32 mask, int nlflags);
+#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
new file mode 100644
index 000000000..bc2994ed6
--- /dev/null
+++ b/include/linux/rwlock.h
@@ -0,0 +1,125 @@
+#ifndef __LINUX_RWLOCK_H
+#define __LINUX_RWLOCK_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * rwlock related methods
+ *
+ * split out from spinlock.h
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define rwlock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __rwlock_init((lock), #lock, &__key); \
+} while (0)
+#else
+# define rwlock_init(lock) \
+ do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
+#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
+ extern int do_raw_read_trylock(rwlock_t *lock);
+ extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
+ extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
+#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
+ extern int do_raw_write_trylock(rwlock_t *lock);
+ extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
+#else
+# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
+# define do_raw_read_lock_flags(lock, flags) \
+ do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
+# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
+# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
+# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
+# define do_raw_write_lock_flags(lock, flags) \
+ do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
+# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
+# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
+#endif
+
+#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
+#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
+
+/*
+ * Define the various rw_lock methods. Note we define these
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
+ * methods are defined as nops in the case they are not required.
+ */
+#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
+#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
+
+#define write_lock(lock) _raw_write_lock(lock)
+#define read_lock(lock) _raw_read_lock(lock)
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _raw_read_lock_irqsave(lock); \
+ } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _raw_write_lock_irqsave(lock); \
+ } while (0)
+
+#else
+
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_read_lock_irqsave(lock, flags); \
+ } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_write_lock_irqsave(lock, flags); \
+ } while (0)
+
+#endif
+
+#define read_lock_irq(lock) _raw_read_lock_irq(lock)
+#define read_lock_bh(lock) _raw_read_lock_bh(lock)
+#define write_lock_irq(lock) _raw_write_lock_irq(lock)
+#define write_lock_bh(lock) _raw_write_lock_bh(lock)
+#define read_unlock(lock) _raw_read_unlock(lock)
+#define write_unlock(lock) _raw_write_unlock(lock)
+#define read_unlock_irq(lock) _raw_read_unlock_irq(lock)
+#define write_unlock_irq(lock) _raw_write_unlock_irq(lock)
+
+#define read_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_read_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define read_unlock_bh(lock) _raw_read_unlock_bh(lock)
+
+#define write_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_write_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
+
+#define write_trylock_irqsave(lock, flags) \
+({ \
+ local_irq_save(flags); \
+ write_trylock(lock) ? \
+ 1 : ({ local_irq_restore(flags); 0; }); \
+})
+
+#endif /* __LINUX_RWLOCK_H */
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
new file mode 100644
index 000000000..5b9b84b20
--- /dev/null
+++ b/include/linux/rwlock_api_smp.h
@@ -0,0 +1,278 @@
+#ifndef __LINUX_RWLOCK_API_SMP_H
+#define __LINUX_RWLOCK_API_SMP_H
+
+#ifndef __LINUX_SPINLOCK_API_SMP_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/rwlock_api_smp.h
+ *
+ * spinlock API declarations on SMP (and debug)
+ * (implemented in kernel/spinlock.c)
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
+unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
+ __acquires(lock);
+unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
+ __acquires(lock);
+int __lockfunc _raw_read_trylock(rwlock_t *lock);
+int __lockfunc _raw_write_trylock(rwlock_t *lock);
+void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock);
+void __lockfunc
+_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases(lock);
+void __lockfunc
+_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases(lock);
+
+#ifdef CONFIG_INLINE_READ_LOCK
+#define _raw_read_lock(lock) __raw_read_lock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK
+#define _raw_write_lock(lock) __raw_write_lock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_BH
+#define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_BH
+#define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_IRQ
+#define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
+#define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
+#define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
+#define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_TRYLOCK
+#define _raw_read_trylock(lock) __raw_read_trylock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_TRYLOCK
+#define _raw_write_trylock(lock) __raw_write_trylock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK
+#define _raw_read_unlock(lock) __raw_read_unlock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK
+#define _raw_write_unlock(lock) __raw_write_unlock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_BH
+#define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
+#define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
+#define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
+#define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
+#define _raw_read_unlock_irqrestore(lock, flags) \
+ __raw_read_unlock_irqrestore(lock, flags)
+#endif
+
+#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
+#define _raw_write_unlock_irqrestore(lock, flags) \
+ __raw_write_unlock_irqrestore(lock, flags)
+#endif
+
+static inline int __raw_read_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (do_raw_read_trylock(lock)) {
+ rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+static inline int __raw_write_trylock(rwlock_t *lock)
+{
+ preempt_disable();
+ if (do_raw_write_trylock(lock)) {
+ rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+/*
+ * If lockdep is enabled then we use the non-preemption spin-ops
+ * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
+ * not re-enabled during lock-acquire (which the preempt-spin-ops do):
+ */
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+
+static inline void __raw_read_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+}
+
+static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
+ do_raw_read_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __raw_read_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+}
+
+static inline void __raw_read_lock_bh(rwlock_t *lock)
+{
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+ rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
+}
+
+static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
+ do_raw_write_lock_flags, &flags);
+ return flags;
+}
+
+static inline void __raw_write_lock_irq(rwlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
+static inline void __raw_write_lock_bh(rwlock_t *lock)
+{
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
+static inline void __raw_write_lock(rwlock_t *lock)
+{
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
+#endif /* CONFIG_PREEMPT */
+
+static inline void __raw_write_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ preempt_enable();
+}
+
+static inline void __raw_read_unlock(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ preempt_enable();
+}
+
+static inline void
+__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __raw_read_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __raw_read_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_read_unlock(lock);
+ __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+}
+
+static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
+ unsigned long flags)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __raw_write_unlock_irq(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __raw_write_unlock_bh(rwlock_t *lock)
+{
+ rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_write_unlock(lock);
+ __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+}
+
+#endif /* __LINUX_RWLOCK_API_SMP_H */
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
new file mode 100644
index 000000000..cc0072e93
--- /dev/null
+++ b/include/linux/rwlock_types.h
@@ -0,0 +1,48 @@
+#ifndef __LINUX_RWLOCK_TYPES_H
+#define __LINUX_RWLOCK_TYPES_H
+
+/*
+ * include/linux/rwlock_types.h - generic rwlock type definitions
+ * and initializers
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+typedef struct {
+ arch_rwlock_t raw_lock;
+#ifdef CONFIG_GENERIC_LOCKBREAK
+ unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} rwlock_t;
+
+#define RWLOCK_MAGIC 0xdeaf1eed
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
+ .magic = RWLOCK_MAGIC, \
+ .owner = SPINLOCK_OWNER_INIT, \
+ .owner_cpu = -1, \
+ RW_DEP_MAP_INIT(lockname) }
+#else
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
+ RW_DEP_MAP_INIT(lockname) }
+#endif
+
+#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+
+#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
new file mode 100644
index 000000000..561e86155
--- /dev/null
+++ b/include/linux/rwsem-spinlock.h
@@ -0,0 +1,45 @@
+/* rwsem-spinlock.h: fallback C implementation
+ *
+ * Copyright (c) 2001 David Howells (dhowells@redhat.com).
+ * - Derived partially from ideas by Andrea Arcangeli <andrea@suse.de>
+ * - Derived also from comments by Linus
+ */
+
+#ifndef _LINUX_RWSEM_SPINLOCK_H
+#define _LINUX_RWSEM_SPINLOCK_H
+
+#ifndef _LINUX_RWSEM_H
+#error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead"
+#endif
+
+#ifdef __KERNEL__
+/*
+ * the rw-semaphore definition
+ * - if count is 0 then there are no active readers or writers
+ * - if count is +ve then that is the number of active readers
+ * - if count is -1 then there is one active writer
+ * - if wait_list is not empty, then there are processes waiting for the semaphore
+ */
+struct rw_semaphore {
+ __s32 count;
+ raw_spinlock_t wait_lock;
+ struct list_head wait_list;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+#define RWSEM_UNLOCKED_VALUE 0x00000000
+
+extern void __down_read(struct rw_semaphore *sem);
+extern int __down_read_trylock(struct rw_semaphore *sem);
+extern void __down_write(struct rw_semaphore *sem);
+extern void __down_write_nested(struct rw_semaphore *sem, int subclass);
+extern int __down_write_trylock(struct rw_semaphore *sem);
+extern void __up_read(struct rw_semaphore *sem);
+extern void __up_write(struct rw_semaphore *sem);
+extern void __downgrade_write(struct rw_semaphore *sem);
+extern int rwsem_is_locked(struct rw_semaphore *sem);
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_RWSEM_SPINLOCK_H */
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
new file mode 100644
index 000000000..8f498cdde
--- /dev/null
+++ b/include/linux/rwsem.h
@@ -0,0 +1,180 @@
+/* rwsem.h: R/W semaphores, public interface
+ *
+ * Written by David Howells (dhowells@redhat.com).
+ * Derived from asm-i386/semaphore.h
+ */
+
+#ifndef _LINUX_RWSEM_H
+#define _LINUX_RWSEM_H
+
+#include <linux/linkage.h>
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+#include <linux/osq_lock.h>
+#endif
+
+struct rw_semaphore;
+
+#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
+#include <linux/rwsem-spinlock.h> /* use a generic implementation */
+#else
+/* All arch specific implementations share the same struct */
+struct rw_semaphore {
+ long count;
+ struct list_head wait_list;
+ raw_spinlock_t wait_lock;
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+ struct optimistic_spin_queue osq; /* spinner MCS lock */
+ /*
+ * Write owner. Used as a speculative check to see
+ * if the owner is running on the cpu.
+ */
+ struct task_struct *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
+extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
+
+/* Include the arch specific part */
+#include <asm/rwsem.h>
+
+/* In all implementations count != 0 means locked */
+static inline int rwsem_is_locked(struct rw_semaphore *sem)
+{
+ return sem->count != 0;
+}
+
+#endif
+
+/* Common initializer macros and functions */
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+#else
+# define __RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL
+#else
+#define __RWSEM_OPT_INIT(lockname)
+#endif
+
+#define __RWSEM_INITIALIZER(name) \
+ { .count = RWSEM_UNLOCKED_VALUE, \
+ .wait_list = LIST_HEAD_INIT((name).wait_list), \
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
+ __RWSEM_OPT_INIT(name) \
+ __RWSEM_DEP_MAP_INIT(name) }
+
+#define DECLARE_RWSEM(name) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+
+extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
+ struct lock_class_key *key);
+
+#define init_rwsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_rwsem((sem), #sem, &__key); \
+} while (0)
+
+/*
+ * This is the same regardless of which rwsem implementation that is being used.
+ * It is just a heuristic meant to be called by somebody alreadying holding the
+ * rwsem to see if somebody from an incompatible type is wanting access to the
+ * lock.
+ */
+static inline int rwsem_is_contended(struct rw_semaphore *sem)
+{
+ return !list_empty(&sem->wait_list);
+}
+
+/*
+ * lock for reading
+ */
+extern void down_read(struct rw_semaphore *sem);
+
+/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+extern int down_read_trylock(struct rw_semaphore *sem);
+
+/*
+ * lock for writing
+ */
+extern void down_write(struct rw_semaphore *sem);
+
+/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+extern int down_write_trylock(struct rw_semaphore *sem);
+
+/*
+ * release a read lock
+ */
+extern void up_read(struct rw_semaphore *sem);
+
+/*
+ * release a write lock
+ */
+extern void up_write(struct rw_semaphore *sem);
+
+/*
+ * downgrade write lock to read lock
+ */
+extern void downgrade_write(struct rw_semaphore *sem);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/*
+ * nested locking. NOTE: rwsems are not allowed to recurse
+ * (which occurs if the same task tries to acquire the same
+ * lock instance multiple times), but multiple locks of the
+ * same lock class might be taken, if the order of the locks
+ * is always the same. This ordering rule can be expressed
+ * to lockdep via the _nested() APIs, but enumerating the
+ * subclasses that are used. (If the nesting relationship is
+ * static then another method for expressing nested locking is
+ * the explicit definition of lock class keys and the use of
+ * lockdep_set_class() at lock initialization time.
+ * See Documentation/locking/lockdep-design.txt for more details.)
+ */
+extern void down_read_nested(struct rw_semaphore *sem, int subclass);
+extern void down_write_nested(struct rw_semaphore *sem, int subclass);
+extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
+
+# define down_write_nest_lock(sem, nest_lock) \
+do { \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
+ _down_write_nest_lock(sem, &(nest_lock)->dep_map); \
+} while (0);
+
+/*
+ * Take/release a lock when not the owner will release it.
+ *
+ * [ This API should be avoided as much as possible - the
+ * proper abstraction for this case is completions. ]
+ */
+extern void down_read_non_owner(struct rw_semaphore *sem);
+extern void up_read_non_owner(struct rw_semaphore *sem);
+#else
+# define down_read_nested(sem, subclass) down_read(sem)
+# define down_write_nest_lock(sem, nest_lock) down_write(sem)
+# define down_write_nested(sem, subclass) down_write(sem)
+# define down_read_non_owner(sem) down_read(sem)
+# define up_read_non_owner(sem) up_read(sem)
+#endif
+
+#endif /* _LINUX_RWSEM_H */
diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h
new file mode 100644
index 000000000..a53915cd5
--- /dev/null
+++ b/include/linux/rxrpc.h
@@ -0,0 +1,69 @@
+/* AF_RXRPC parameters
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_RXRPC_H
+#define _LINUX_RXRPC_H
+
+#include <linux/in.h>
+#include <linux/in6.h>
+
+/*
+ * RxRPC socket address
+ */
+struct sockaddr_rxrpc {
+ sa_family_t srx_family; /* address family */
+ u16 srx_service; /* service desired */
+ u16 transport_type; /* type of transport socket (SOCK_DGRAM) */
+ u16 transport_len; /* length of transport address */
+ union {
+ sa_family_t family; /* transport address family */
+ struct sockaddr_in sin; /* IPv4 transport address */
+ struct sockaddr_in6 sin6; /* IPv6 transport address */
+ } transport;
+};
+
+/*
+ * RxRPC socket options
+ */
+#define RXRPC_SECURITY_KEY 1 /* [clnt] set client security key */
+#define RXRPC_SECURITY_KEYRING 2 /* [srvr] set ring of server security keys */
+#define RXRPC_EXCLUSIVE_CONNECTION 3 /* [clnt] use exclusive RxRPC connection */
+#define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */
+
+/*
+ * RxRPC control messages
+ * - terminal messages mean that a user call ID tag can be recycled
+ */
+#define RXRPC_USER_CALL_ID 1 /* user call ID specifier */
+#define RXRPC_ABORT 2 /* abort request / notification [terminal] */
+#define RXRPC_ACK 3 /* [Server] RPC op final ACK received [terminal] */
+#define RXRPC_NET_ERROR 5 /* network error received [terminal] */
+#define RXRPC_BUSY 6 /* server busy received [terminal] */
+#define RXRPC_LOCAL_ERROR 7 /* local error generated [terminal] */
+#define RXRPC_NEW_CALL 8 /* [Server] new incoming call notification */
+#define RXRPC_ACCEPT 9 /* [Server] accept request */
+
+/*
+ * RxRPC security levels
+ */
+#define RXRPC_SECURITY_PLAIN 0 /* plain secure-checksummed packets only */
+#define RXRPC_SECURITY_AUTH 1 /* authenticated packets */
+#define RXRPC_SECURITY_ENCRYPT 2 /* encrypted packets */
+
+/*
+ * RxRPC security indices
+ */
+#define RXRPC_SECURITY_NONE 0 /* no security protocol */
+#define RXRPC_SECURITY_RXKAD 2 /* kaserver or kerberos 4 */
+#define RXRPC_SECURITY_RXGK 4 /* gssapi-based */
+#define RXRPC_SECURITY_RXK5 5 /* kerberos 5 */
+
+#endif /* _LINUX_RXRPC_H */
diff --git a/include/linux/s3c_adc_battery.h b/include/linux/s3c_adc_battery.h
new file mode 100644
index 000000000..99dadbffd
--- /dev/null
+++ b/include/linux/s3c_adc_battery.h
@@ -0,0 +1,41 @@
+#ifndef _S3C_ADC_BATTERY_H
+#define _S3C_ADC_BATTERY_H
+
+struct s3c_adc_bat_thresh {
+ int volt; /* mV */
+ int cur; /* mA */
+ int level; /* percent */
+};
+
+struct s3c_adc_bat_pdata {
+ int (*init)(void);
+ void (*exit)(void);
+ void (*enable_charger)(void);
+ void (*disable_charger)(void);
+
+ int gpio_charge_finished;
+ int gpio_inverted;
+
+ const struct s3c_adc_bat_thresh *lut_noac;
+ unsigned int lut_noac_cnt;
+ const struct s3c_adc_bat_thresh *lut_acin;
+ unsigned int lut_acin_cnt;
+
+ const unsigned int volt_channel;
+ const unsigned int current_channel;
+ const unsigned int backup_volt_channel;
+
+ const unsigned int volt_samples;
+ const unsigned int current_samples;
+ const unsigned int backup_volt_samples;
+
+ const unsigned int volt_mult;
+ const unsigned int current_mult;
+ const unsigned int backup_volt_mult;
+ const unsigned int internal_impedance;
+
+ const unsigned int backup_volt_max;
+ const unsigned int backup_volt_min;
+};
+
+#endif
diff --git a/include/linux/sa11x0-dma.h b/include/linux/sa11x0-dma.h
new file mode 100644
index 000000000..65839a58b
--- /dev/null
+++ b/include/linux/sa11x0-dma.h
@@ -0,0 +1,24 @@
+/*
+ * SA11x0 DMA Engine support
+ *
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_SA11X0_DMA_H
+#define __LINUX_SA11X0_DMA_H
+
+struct dma_chan;
+
+#if defined(CONFIG_DMA_SA11X0) || defined(CONFIG_DMA_SA11X0_MODULE)
+bool sa11x0_dma_filter_fn(struct dma_chan *, void *);
+#else
+static inline bool sa11x0_dma_filter_fn(struct dma_chan *c, void *d)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
new file mode 100644
index 000000000..ed8f9e70d
--- /dev/null
+++ b/include/linux/scatterlist.h
@@ -0,0 +1,352 @@
+#ifndef _LINUX_SCATTERLIST_H
+#define _LINUX_SCATTERLIST_H
+
+#include <linux/string.h>
+#include <linux/bug.h>
+#include <linux/mm.h>
+
+#include <asm/types.h>
+#include <asm/scatterlist.h>
+#include <asm/io.h>
+
+struct sg_table {
+ struct scatterlist *sgl; /* the list */
+ unsigned int nents; /* number of mapped entries */
+ unsigned int orig_nents; /* original size of list */
+};
+
+/*
+ * Notes on SG table design.
+ *
+ * Architectures must provide an unsigned long page_link field in the
+ * scatterlist struct. We use that to place the page pointer AND encode
+ * information about the sg table as well. The two lower bits are reserved
+ * for this information.
+ *
+ * If bit 0 is set, then the page_link contains a pointer to the next sg
+ * table list. Otherwise the next entry is at sg + 1.
+ *
+ * If bit 1 is set, then this sg entry is the last element in a list.
+ *
+ * See sg_next().
+ *
+ */
+
+#define SG_MAGIC 0x87654321
+
+/*
+ * We overload the LSB of the page pointer to indicate whether it's
+ * a valid sg entry, or whether it points to the start of a new scatterlist.
+ * Those low bits are there for everyone! (thanks mason :-)
+ */
+#define sg_is_chain(sg) ((sg)->page_link & 0x01)
+#define sg_is_last(sg) ((sg)->page_link & 0x02)
+#define sg_chain_ptr(sg) \
+ ((struct scatterlist *) ((sg)->page_link & ~0x03))
+
+/**
+ * sg_assign_page - Assign a given page to an SG entry
+ * @sg: SG entry
+ * @page: The page
+ *
+ * Description:
+ * Assign page to sg entry. Also see sg_set_page(), the most commonly used
+ * variant.
+ *
+ **/
+static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
+{
+ unsigned long page_link = sg->page_link & 0x3;
+
+ /*
+ * In order for the low bit stealing approach to work, pages
+ * must be aligned at a 32-bit boundary as a minimum.
+ */
+ BUG_ON((unsigned long) page & 0x03);
+#ifdef CONFIG_DEBUG_SG
+ BUG_ON(sg->sg_magic != SG_MAGIC);
+ BUG_ON(sg_is_chain(sg));
+#endif
+ sg->page_link = page_link | (unsigned long) page;
+}
+
+/**
+ * sg_set_page - Set sg entry to point at given page
+ * @sg: SG entry
+ * @page: The page
+ * @len: Length of data
+ * @offset: Offset into page
+ *
+ * Description:
+ * Use this function to set an sg entry pointing at a page, never assign
+ * the page directly. We encode sg table information in the lower bits
+ * of the page pointer. See sg_page() for looking up the page belonging
+ * to an sg entry.
+ *
+ **/
+static inline void sg_set_page(struct scatterlist *sg, struct page *page,
+ unsigned int len, unsigned int offset)
+{
+ sg_assign_page(sg, page);
+ sg->offset = offset;
+ sg->length = len;
+}
+
+static inline struct page *sg_page(struct scatterlist *sg)
+{
+#ifdef CONFIG_DEBUG_SG
+ BUG_ON(sg->sg_magic != SG_MAGIC);
+ BUG_ON(sg_is_chain(sg));
+#endif
+ return (struct page *)((sg)->page_link & ~0x3);
+}
+
+/**
+ * sg_set_buf - Set sg entry to point at given data
+ * @sg: SG entry
+ * @buf: Data
+ * @buflen: Data length
+ *
+ **/
+static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
+ unsigned int buflen)
+{
+#ifdef CONFIG_DEBUG_SG
+ BUG_ON(!virt_addr_valid(buf));
+#endif
+ sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
+}
+
+/*
+ * Loop over each sg element, following the pointer to a new list if necessary
+ */
+#define for_each_sg(sglist, sg, nr, __i) \
+ for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
+
+/**
+ * sg_chain - Chain two sglists together
+ * @prv: First scatterlist
+ * @prv_nents: Number of entries in prv
+ * @sgl: Second scatterlist
+ *
+ * Description:
+ * Links @prv@ and @sgl@ together, to form a longer scatterlist.
+ *
+ **/
+static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
+ struct scatterlist *sgl)
+{
+#ifndef CONFIG_ARCH_HAS_SG_CHAIN
+ BUG();
+#endif
+
+ /*
+ * offset and length are unused for chain entry. Clear them.
+ */
+ prv[prv_nents - 1].offset = 0;
+ prv[prv_nents - 1].length = 0;
+
+ /*
+ * Set lowest bit to indicate a link pointer, and make sure to clear
+ * the termination bit if it happens to be set.
+ */
+ prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02;
+}
+
+/**
+ * sg_mark_end - Mark the end of the scatterlist
+ * @sg: SG entryScatterlist
+ *
+ * Description:
+ * Marks the passed in sg entry as the termination point for the sg
+ * table. A call to sg_next() on this entry will return NULL.
+ *
+ **/
+static inline void sg_mark_end(struct scatterlist *sg)
+{
+#ifdef CONFIG_DEBUG_SG
+ BUG_ON(sg->sg_magic != SG_MAGIC);
+#endif
+ /*
+ * Set termination bit, clear potential chain bit
+ */
+ sg->page_link |= 0x02;
+ sg->page_link &= ~0x01;
+}
+
+/**
+ * sg_unmark_end - Undo setting the end of the scatterlist
+ * @sg: SG entryScatterlist
+ *
+ * Description:
+ * Removes the termination marker from the given entry of the scatterlist.
+ *
+ **/
+static inline void sg_unmark_end(struct scatterlist *sg)
+{
+#ifdef CONFIG_DEBUG_SG
+ BUG_ON(sg->sg_magic != SG_MAGIC);
+#endif
+ sg->page_link &= ~0x02;
+}
+
+/**
+ * sg_phys - Return physical address of an sg entry
+ * @sg: SG entry
+ *
+ * Description:
+ * This calls page_to_phys() on the page in this sg entry, and adds the
+ * sg offset. The caller must know that it is legal to call page_to_phys()
+ * on the sg page.
+ *
+ **/
+static inline dma_addr_t sg_phys(struct scatterlist *sg)
+{
+ return page_to_phys(sg_page(sg)) + sg->offset;
+}
+
+/**
+ * sg_virt - Return virtual address of an sg entry
+ * @sg: SG entry
+ *
+ * Description:
+ * This calls page_address() on the page in this sg entry, and adds the
+ * sg offset. The caller must know that the sg page has a valid virtual
+ * mapping.
+ *
+ **/
+static inline void *sg_virt(struct scatterlist *sg)
+{
+ return page_address(sg_page(sg)) + sg->offset;
+}
+
+int sg_nents(struct scatterlist *sg);
+struct scatterlist *sg_next(struct scatterlist *);
+struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
+void sg_init_table(struct scatterlist *, unsigned int);
+void sg_init_one(struct scatterlist *, const void *, unsigned int);
+
+typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
+typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
+
+void __sg_free_table(struct sg_table *, unsigned int, bool, sg_free_fn *);
+void sg_free_table(struct sg_table *);
+int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
+ struct scatterlist *, gfp_t, sg_alloc_fn *);
+int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
+int sg_alloc_table_from_pages(struct sg_table *sgt,
+ struct page **pages, unsigned int n_pages,
+ unsigned long offset, unsigned long size,
+ gfp_t gfp_mask);
+
+size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen);
+size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen);
+
+size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen, off_t skip);
+size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
+ void *buf, size_t buflen, off_t skip);
+
+/*
+ * Maximum number of entries that will be allocated in one piece, if
+ * a list larger than this is required then chaining will be utilized.
+ */
+#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
+
+/*
+ * sg page iterator
+ *
+ * Iterates over sg entries page-by-page. On each successful iteration,
+ * you can call sg_page_iter_page(@piter) and sg_page_iter_dma_address(@piter)
+ * to get the current page and its dma address. @piter->sg will point to the
+ * sg holding this page and @piter->sg_pgoffset to the page's page offset
+ * within the sg. The iteration will stop either when a maximum number of sg
+ * entries was reached or a terminating sg (sg_last(sg) == true) was reached.
+ */
+struct sg_page_iter {
+ struct scatterlist *sg; /* sg holding the page */
+ unsigned int sg_pgoffset; /* page offset within the sg */
+
+ /* these are internal states, keep away */
+ unsigned int __nents; /* remaining sg entries */
+ int __pg_advance; /* nr pages to advance at the
+ * next step */
+};
+
+bool __sg_page_iter_next(struct sg_page_iter *piter);
+void __sg_page_iter_start(struct sg_page_iter *piter,
+ struct scatterlist *sglist, unsigned int nents,
+ unsigned long pgoffset);
+/**
+ * sg_page_iter_page - get the current page held by the page iterator
+ * @piter: page iterator holding the page
+ */
+static inline struct page *sg_page_iter_page(struct sg_page_iter *piter)
+{
+ return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
+}
+
+/**
+ * sg_page_iter_dma_address - get the dma address of the current page held by
+ * the page iterator.
+ * @piter: page iterator holding the page
+ */
+static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter)
+{
+ return sg_dma_address(piter->sg) + (piter->sg_pgoffset << PAGE_SHIFT);
+}
+
+/**
+ * for_each_sg_page - iterate over the pages of the given sg list
+ * @sglist: sglist to iterate over
+ * @piter: page iterator to hold current page, sg, sg_pgoffset
+ * @nents: maximum number of sg entries to iterate over
+ * @pgoffset: starting page offset
+ */
+#define for_each_sg_page(sglist, piter, nents, pgoffset) \
+ for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
+ __sg_page_iter_next(piter);)
+
+/*
+ * Mapping sg iterator
+ *
+ * Iterates over sg entries mapping page-by-page. On each successful
+ * iteration, @miter->page points to the mapped page and
+ * @miter->length bytes of data can be accessed at @miter->addr. As
+ * long as an interation is enclosed between start and stop, the user
+ * is free to choose control structure and when to stop.
+ *
+ * @miter->consumed is set to @miter->length on each iteration. It
+ * can be adjusted if the user can't consume all the bytes in one go.
+ * Also, a stopped iteration can be resumed by calling next on it.
+ * This is useful when iteration needs to release all resources and
+ * continue later (e.g. at the next interrupt).
+ */
+
+#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
+#define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */
+#define SG_MITER_FROM_SG (1 << 2) /* nop */
+
+struct sg_mapping_iter {
+ /* the following three fields can be accessed directly */
+ struct page *page; /* currently mapped page */
+ void *addr; /* pointer to the mapped area */
+ size_t length; /* length of the mapped area */
+ size_t consumed; /* number of consumed bytes */
+ struct sg_page_iter piter; /* page iterator */
+
+ /* these are internal states, keep away */
+ unsigned int __offset; /* offset within page */
+ unsigned int __remaining; /* remaining bytes on page */
+ unsigned int __flags;
+};
+
+void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
+ unsigned int nents, unsigned int flags);
+bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset);
+bool sg_miter_next(struct sg_mapping_iter *miter);
+void sg_miter_stop(struct sg_mapping_iter *miter);
+
+#endif /* _LINUX_SCATTERLIST_H */
diff --git a/include/linux/scc.h b/include/linux/scc.h
new file mode 100644
index 000000000..c5a004962
--- /dev/null
+++ b/include/linux/scc.h
@@ -0,0 +1,85 @@
+/* $Id: scc.h,v 1.29 1997/04/02 14:56:45 jreuter Exp jreuter $ */
+#ifndef _SCC_H
+#define _SCC_H
+
+#include <uapi/linux/scc.h>
+
+
+enum {TX_OFF, TX_ON}; /* command for scc_key_trx() */
+
+/* Vector masks in RR2B */
+
+#define VECTOR_MASK 0x06
+#define TXINT 0x00
+#define EXINT 0x02
+#define RXINT 0x04
+#define SPINT 0x06
+
+#ifdef CONFIG_SCC_DELAY
+#define Inb(port) inb_p(port)
+#define Outb(port, val) outb_p(val, port)
+#else
+#define Inb(port) inb(port)
+#define Outb(port, val) outb(val, port)
+#endif
+
+/* SCC channel control structure for KISS */
+
+struct scc_kiss {
+ unsigned char txdelay; /* Transmit Delay 10 ms/cnt */
+ unsigned char persist; /* Persistence (0-255) as a % */
+ unsigned char slottime; /* Delay to wait on persistence hit */
+ unsigned char tailtime; /* Delay after last byte written */
+ unsigned char fulldup; /* Full Duplex mode 0=CSMA 1=DUP 2=ALWAYS KEYED */
+ unsigned char waittime; /* Waittime before any transmit attempt */
+ unsigned int maxkeyup; /* Maximum time to transmit (seconds) */
+ unsigned int mintime; /* Minimal offtime after MAXKEYUP timeout (seconds) */
+ unsigned int idletime; /* Maximum idle time in ALWAYS KEYED mode (seconds) */
+ unsigned int maxdefer; /* Timer for CSMA channel busy limit */
+ unsigned char tx_inhibit; /* Transmit is not allowed when set */
+ unsigned char group; /* Group ID for AX.25 TX interlocking */
+ unsigned char mode; /* 'normal' or 'hwctrl' mode (unused) */
+ unsigned char softdcd; /* Use DPLL instead of DCD pin for carrier detect */
+};
+
+
+/* SCC channel structure */
+
+struct scc_channel {
+ int init; /* channel exists? */
+
+ struct net_device *dev; /* link to device control structure */
+ struct net_device_stats dev_stat;/* device statistics */
+
+ char brand; /* manufacturer of the board */
+ long clock; /* used clock */
+
+ io_port ctrl; /* I/O address of CONTROL register */
+ io_port data; /* I/O address of DATA register */
+ io_port special; /* I/O address of special function port */
+ int irq; /* Number of Interrupt */
+
+ char option;
+ char enhanced; /* Enhanced SCC support */
+
+ unsigned char wreg[16]; /* Copy of last written value in WRx */
+ unsigned char status; /* Copy of R0 at last external interrupt */
+ unsigned char dcd; /* DCD status */
+
+ struct scc_kiss kiss; /* control structure for KISS params */
+ struct scc_stat stat; /* statistical information */
+ struct scc_modem modem; /* modem information */
+
+ struct sk_buff_head tx_queue; /* next tx buffer */
+ struct sk_buff *rx_buff; /* pointer to frame currently received */
+ struct sk_buff *tx_buff; /* pointer to frame currently transmitted */
+
+ /* Timer */
+ struct timer_list tx_t; /* tx timer for this channel */
+ struct timer_list tx_wdog; /* tx watchdogs */
+
+ /* Channel lock */
+ spinlock_t lock; /* Channel guard lock */
+};
+
+#endif /* defined(_SCC_H) */
diff --git a/include/linux/sched.h b/include/linux/sched.h
new file mode 100644
index 000000000..5083de9c1
--- /dev/null
+++ b/include/linux/sched.h
@@ -0,0 +1,3179 @@
+#ifndef _LINUX_SCHED_H
+#define _LINUX_SCHED_H
+
+#include <uapi/linux/sched.h>
+
+#include <linux/sched/prio.h>
+
+
+struct sched_param {
+ int sched_priority;
+};
+
+#include <asm/param.h> /* for HZ */
+
+#include <linux/capability.h>
+#include <linux/threads.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/timex.h>
+#include <linux/jiffies.h>
+#include <linux/plist.h>
+#include <linux/rbtree.h>
+#include <linux/thread_info.h>
+#include <linux/cpumask.h>
+#include <linux/errno.h>
+#include <linux/nodemask.h>
+#include <linux/mm_types.h>
+#include <linux/preempt_mask.h>
+
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <linux/cputime.h>
+
+#include <linux/smp.h>
+#include <linux/sem.h>
+#include <linux/shm.h>
+#include <linux/signal.h>
+#include <linux/compiler.h>
+#include <linux/completion.h>
+#include <linux/pid.h>
+#include <linux/percpu.h>
+#include <linux/topology.h>
+#include <linux/proportions.h>
+#include <linux/seccomp.h>
+#include <linux/rcupdate.h>
+#include <linux/rculist.h>
+#include <linux/rtmutex.h>
+
+#include <linux/time.h>
+#include <linux/param.h>
+#include <linux/resource.h>
+#include <linux/timer.h>
+#include <linux/hrtimer.h>
+#include <linux/task_io_accounting.h>
+#include <linux/latencytop.h>
+#include <linux/cred.h>
+#include <linux/llist.h>
+#include <linux/uidgid.h>
+#include <linux/gfp.h>
+#include <linux/magic.h>
+
+#include <asm/processor.h>
+
+#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */
+
+/*
+ * Extended scheduling parameters data structure.
+ *
+ * This is needed because the original struct sched_param can not be
+ * altered without introducing ABI issues with legacy applications
+ * (e.g., in sched_getparam()).
+ *
+ * However, the possibility of specifying more than just a priority for
+ * the tasks may be useful for a wide variety of application fields, e.g.,
+ * multimedia, streaming, automation and control, and many others.
+ *
+ * This variant (sched_attr) is meant at describing a so-called
+ * sporadic time-constrained task. In such model a task is specified by:
+ * - the activation period or minimum instance inter-arrival time;
+ * - the maximum (or average, depending on the actual scheduling
+ * discipline) computation time of all instances, a.k.a. runtime;
+ * - the deadline (relative to the actual activation time) of each
+ * instance.
+ * Very briefly, a periodic (sporadic) task asks for the execution of
+ * some specific computation --which is typically called an instance--
+ * (at most) every period. Moreover, each instance typically lasts no more
+ * than the runtime and must be completed by time instant t equal to
+ * the instance activation time + the deadline.
+ *
+ * This is reflected by the actual fields of the sched_attr structure:
+ *
+ * @size size of the structure, for fwd/bwd compat.
+ *
+ * @sched_policy task's scheduling policy
+ * @sched_flags for customizing the scheduler behaviour
+ * @sched_nice task's nice value (SCHED_NORMAL/BATCH)
+ * @sched_priority task's static priority (SCHED_FIFO/RR)
+ * @sched_deadline representative of the task's deadline
+ * @sched_runtime representative of the task's runtime
+ * @sched_period representative of the task's period
+ *
+ * Given this task model, there are a multiplicity of scheduling algorithms
+ * and policies, that can be used to ensure all the tasks will make their
+ * timing constraints.
+ *
+ * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
+ * only user of this new interface. More information about the algorithm
+ * available in the scheduling class file or in Documentation/.
+ */
+struct sched_attr {
+ u32 size;
+
+ u32 sched_policy;
+ u64 sched_flags;
+
+ /* SCHED_NORMAL, SCHED_BATCH */
+ s32 sched_nice;
+
+ /* SCHED_FIFO, SCHED_RR */
+ u32 sched_priority;
+
+ /* SCHED_DEADLINE */
+ u64 sched_runtime;
+ u64 sched_deadline;
+ u64 sched_period;
+};
+
+struct futex_pi_state;
+struct robust_list_head;
+struct bio_list;
+struct fs_struct;
+struct perf_event_context;
+struct blk_plug;
+struct filename;
+
+#define VMACACHE_BITS 2
+#define VMACACHE_SIZE (1U << VMACACHE_BITS)
+#define VMACACHE_MASK (VMACACHE_SIZE - 1)
+
+/*
+ * These are the constant used to fake the fixed-point load-average
+ * counting. Some notes:
+ * - 11 bit fractions expand to 22 bits by the multiplies: this gives
+ * a load-average precision of 10 bits integer + 11 bits fractional
+ * - if you want to count load-averages more often, you need more
+ * precision, or rounding will get you. With 2-second counting freq,
+ * the EXP_n values would be 1981, 2034 and 2043 if still using only
+ * 11 bit fractions.
+ */
+extern unsigned long avenrun[]; /* Load averages */
+extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
+
+#define FSHIFT 11 /* nr of bits of precision */
+#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
+#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */
+#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
+#define EXP_5 2014 /* 1/exp(5sec/5min) */
+#define EXP_15 2037 /* 1/exp(5sec/15min) */
+
+#define CALC_LOAD(load,exp,n) \
+ load *= exp; \
+ load += n*(FIXED_1-exp); \
+ load >>= FSHIFT;
+
+extern unsigned long total_forks;
+extern int nr_threads;
+DECLARE_PER_CPU(unsigned long, process_counts);
+extern int nr_processes(void);
+extern unsigned long nr_running(void);
+extern bool single_task_running(void);
+extern unsigned long nr_iowait(void);
+extern unsigned long nr_iowait_cpu(int cpu);
+extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
+
+extern void calc_global_load(unsigned long ticks);
+extern void update_cpu_load_nohz(void);
+
+extern unsigned long get_parent_ip(unsigned long addr);
+
+extern void dump_cpu_task(int cpu);
+
+struct seq_file;
+struct cfs_rq;
+struct task_group;
+#ifdef CONFIG_SCHED_DEBUG
+extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
+extern void proc_sched_set_task(struct task_struct *p);
+extern void
+print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
+#endif
+
+/*
+ * Task state bitmask. NOTE! These bits are also
+ * encoded in fs/proc/array.c: get_task_state().
+ *
+ * We have two separate sets of flags: task->state
+ * is about runnability, while task->exit_state are
+ * about the task exiting. Confusing, but this way
+ * modifying one set can't modify the other one by
+ * mistake.
+ */
+#define TASK_RUNNING 0
+#define TASK_INTERRUPTIBLE 1
+#define TASK_UNINTERRUPTIBLE 2
+#define __TASK_STOPPED 4
+#define __TASK_TRACED 8
+/* in tsk->exit_state */
+#define EXIT_DEAD 16
+#define EXIT_ZOMBIE 32
+#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
+/* in tsk->state again */
+#define TASK_DEAD 64
+#define TASK_WAKEKILL 128
+#define TASK_WAKING 256
+#define TASK_PARKED 512
+#define TASK_STATE_MAX 1024
+
+#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
+
+extern char ___assert_task_state[1 - 2*!!(
+ sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
+
+/* Convenience macros for the sake of set_task_state */
+#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
+#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
+#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
+
+/* Convenience macros for the sake of wake_up */
+#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
+#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
+
+/* get_task_state() */
+#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
+ TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
+ __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
+
+#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
+#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
+#define task_is_stopped_or_traced(task) \
+ ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
+#define task_contributes_to_load(task) \
+ ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
+ (task->flags & PF_FROZEN) == 0)
+
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+
+#define __set_task_state(tsk, state_value) \
+ do { \
+ (tsk)->task_state_change = _THIS_IP_; \
+ (tsk)->state = (state_value); \
+ } while (0)
+#define set_task_state(tsk, state_value) \
+ do { \
+ (tsk)->task_state_change = _THIS_IP_; \
+ set_mb((tsk)->state, (state_value)); \
+ } while (0)
+
+/*
+ * set_current_state() includes a barrier so that the write of current->state
+ * is correctly serialised wrt the caller's subsequent test of whether to
+ * actually sleep:
+ *
+ * set_current_state(TASK_UNINTERRUPTIBLE);
+ * if (do_i_need_to_sleep())
+ * schedule();
+ *
+ * If the caller does not need such serialisation then use __set_current_state()
+ */
+#define __set_current_state(state_value) \
+ do { \
+ current->task_state_change = _THIS_IP_; \
+ current->state = (state_value); \
+ } while (0)
+#define set_current_state(state_value) \
+ do { \
+ current->task_state_change = _THIS_IP_; \
+ set_mb(current->state, (state_value)); \
+ } while (0)
+
+#else
+
+#define __set_task_state(tsk, state_value) \
+ do { (tsk)->state = (state_value); } while (0)
+#define set_task_state(tsk, state_value) \
+ set_mb((tsk)->state, (state_value))
+
+/*
+ * set_current_state() includes a barrier so that the write of current->state
+ * is correctly serialised wrt the caller's subsequent test of whether to
+ * actually sleep:
+ *
+ * set_current_state(TASK_UNINTERRUPTIBLE);
+ * if (do_i_need_to_sleep())
+ * schedule();
+ *
+ * If the caller does not need such serialisation then use __set_current_state()
+ */
+#define __set_current_state(state_value) \
+ do { current->state = (state_value); } while (0)
+#define set_current_state(state_value) \
+ set_mb(current->state, (state_value))
+
+#endif
+
+/* Task command name length */
+#define TASK_COMM_LEN 16
+
+#include <linux/spinlock.h>
+
+/*
+ * This serializes "schedule()" and also protects
+ * the run-queue from deletions/modifications (but
+ * _adding_ to the beginning of the run-queue has
+ * a separate lock).
+ */
+extern rwlock_t tasklist_lock;
+extern spinlock_t mmlist_lock;
+
+struct task_struct;
+
+#ifdef CONFIG_PROVE_RCU
+extern int lockdep_tasklist_lock_is_held(void);
+#endif /* #ifdef CONFIG_PROVE_RCU */
+
+extern void sched_init(void);
+extern void sched_init_smp(void);
+extern asmlinkage void schedule_tail(struct task_struct *prev);
+extern void init_idle(struct task_struct *idle, int cpu);
+extern void init_idle_bootup_task(struct task_struct *idle);
+
+extern cpumask_var_t cpu_isolated_map;
+
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+extern void nohz_balance_enter_idle(int cpu);
+extern void set_cpu_sd_state_idle(void);
+extern int get_nohz_timer_target(int pinned);
+#else
+static inline void nohz_balance_enter_idle(int cpu) { }
+static inline void set_cpu_sd_state_idle(void) { }
+static inline int get_nohz_timer_target(int pinned)
+{
+ return smp_processor_id();
+}
+#endif
+
+/*
+ * Only dump TASK_* tasks. (0 for all tasks)
+ */
+extern void show_state_filter(unsigned long state_filter);
+
+static inline void show_state(void)
+{
+ show_state_filter(0);
+}
+
+extern void show_regs(struct pt_regs *);
+
+/*
+ * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
+ * task), SP is the stack pointer of the first frame that should be shown in the back
+ * trace (or NULL if the entire call-chain of the task should be shown).
+ */
+extern void show_stack(struct task_struct *task, unsigned long *sp);
+
+extern void cpu_init (void);
+extern void trap_init(void);
+extern void update_process_times(int user);
+extern void scheduler_tick(void);
+
+extern void sched_show_task(struct task_struct *p);
+
+#ifdef CONFIG_LOCKUP_DETECTOR
+extern void touch_softlockup_watchdog(void);
+extern void touch_softlockup_watchdog_sync(void);
+extern void touch_all_softlockup_watchdogs(void);
+extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos);
+extern unsigned int softlockup_panic;
+void lockup_detector_init(void);
+#else
+static inline void touch_softlockup_watchdog(void)
+{
+}
+static inline void touch_softlockup_watchdog_sync(void)
+{
+}
+static inline void touch_all_softlockup_watchdogs(void)
+{
+}
+static inline void lockup_detector_init(void)
+{
+}
+#endif
+
+#ifdef CONFIG_DETECT_HUNG_TASK
+void reset_hung_task_detector(void);
+#else
+static inline void reset_hung_task_detector(void)
+{
+}
+#endif
+
+/* Attach to any functions which should be ignored in wchan output. */
+#define __sched __attribute__((__section__(".sched.text")))
+
+/* Linker adds these: start and end of __sched functions */
+extern char __sched_text_start[], __sched_text_end[];
+
+/* Is this address in the __sched functions? */
+extern int in_sched_functions(unsigned long addr);
+
+#define MAX_SCHEDULE_TIMEOUT LONG_MAX
+extern signed long schedule_timeout(signed long timeout);
+extern signed long schedule_timeout_interruptible(signed long timeout);
+extern signed long schedule_timeout_killable(signed long timeout);
+extern signed long schedule_timeout_uninterruptible(signed long timeout);
+asmlinkage void schedule(void);
+extern void schedule_preempt_disabled(void);
+
+extern long io_schedule_timeout(long timeout);
+
+static inline void io_schedule(void)
+{
+ io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
+}
+
+struct nsproxy;
+struct user_namespace;
+
+#ifdef CONFIG_MMU
+extern void arch_pick_mmap_layout(struct mm_struct *mm);
+extern unsigned long
+arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
+ unsigned long, unsigned long);
+extern unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+#else
+static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
+#endif
+
+#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
+#define SUID_DUMP_USER 1 /* Dump as user of process */
+#define SUID_DUMP_ROOT 2 /* Dump as root */
+
+/* mm flags */
+
+/* for SUID_DUMP_* above */
+#define MMF_DUMPABLE_BITS 2
+#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
+
+extern void set_dumpable(struct mm_struct *mm, int value);
+/*
+ * This returns the actual value of the suid_dumpable flag. For things
+ * that are using this for checking for privilege transitions, it must
+ * test against SUID_DUMP_USER rather than treating it as a boolean
+ * value.
+ */
+static inline int __get_dumpable(unsigned long mm_flags)
+{
+ return mm_flags & MMF_DUMPABLE_MASK;
+}
+
+static inline int get_dumpable(struct mm_struct *mm)
+{
+ return __get_dumpable(mm->flags);
+}
+
+/* coredump filter bits */
+#define MMF_DUMP_ANON_PRIVATE 2
+#define MMF_DUMP_ANON_SHARED 3
+#define MMF_DUMP_MAPPED_PRIVATE 4
+#define MMF_DUMP_MAPPED_SHARED 5
+#define MMF_DUMP_ELF_HEADERS 6
+#define MMF_DUMP_HUGETLB_PRIVATE 7
+#define MMF_DUMP_HUGETLB_SHARED 8
+
+#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
+#define MMF_DUMP_FILTER_BITS 7
+#define MMF_DUMP_FILTER_MASK \
+ (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
+#define MMF_DUMP_FILTER_DEFAULT \
+ ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
+ (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
+
+#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
+# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
+#else
+# define MMF_DUMP_MASK_DEFAULT_ELF 0
+#endif
+ /* leave room for more dump flags */
+#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
+#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
+#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
+
+#define MMF_HAS_UPROBES 19 /* has uprobes */
+#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
+
+#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
+
+struct sighand_struct {
+ atomic_t count;
+ struct k_sigaction action[_NSIG];
+ spinlock_t siglock;
+ wait_queue_head_t signalfd_wqh;
+};
+
+struct pacct_struct {
+ int ac_flag;
+ long ac_exitcode;
+ unsigned long ac_mem;
+ cputime_t ac_utime, ac_stime;
+ unsigned long ac_minflt, ac_majflt;
+};
+
+struct cpu_itimer {
+ cputime_t expires;
+ cputime_t incr;
+ u32 error;
+ u32 incr_error;
+};
+
+/**
+ * struct cputime - snaphsot of system and user cputime
+ * @utime: time spent in user mode
+ * @stime: time spent in system mode
+ *
+ * Gathers a generic snapshot of user and system time.
+ */
+struct cputime {
+ cputime_t utime;
+ cputime_t stime;
+};
+
+/**
+ * struct task_cputime - collected CPU time counts
+ * @utime: time spent in user mode, in &cputime_t units
+ * @stime: time spent in kernel mode, in &cputime_t units
+ * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
+ *
+ * This is an extension of struct cputime that includes the total runtime
+ * spent by the task from the scheduler point of view.
+ *
+ * As a result, this structure groups together three kinds of CPU time
+ * that are tracked for threads and thread groups. Most things considering
+ * CPU time want to group these counts together and treat all three
+ * of them in parallel.
+ */
+struct task_cputime {
+ cputime_t utime;
+ cputime_t stime;
+ unsigned long long sum_exec_runtime;
+};
+/* Alternate field names when used to cache expirations. */
+#define prof_exp stime
+#define virt_exp utime
+#define sched_exp sum_exec_runtime
+
+#define INIT_CPUTIME \
+ (struct task_cputime) { \
+ .utime = 0, \
+ .stime = 0, \
+ .sum_exec_runtime = 0, \
+ }
+
+#ifdef CONFIG_PREEMPT_COUNT
+#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
+#else
+#define PREEMPT_DISABLED PREEMPT_ENABLED
+#endif
+
+/*
+ * Disable preemption until the scheduler is running.
+ * Reset by start_kernel()->sched_init()->init_idle().
+ *
+ * We include PREEMPT_ACTIVE to avoid cond_resched() from working
+ * before the scheduler is active -- see should_resched().
+ */
+#define INIT_PREEMPT_COUNT (PREEMPT_DISABLED + PREEMPT_ACTIVE)
+
+/**
+ * struct thread_group_cputimer - thread group interval timer counts
+ * @cputime: thread group interval timers.
+ * @running: non-zero when there are timers running and
+ * @cputime receives updates.
+ * @lock: lock for fields in this struct.
+ *
+ * This structure contains the version of task_cputime, above, that is
+ * used for thread group CPU timer calculations.
+ */
+struct thread_group_cputimer {
+ struct task_cputime cputime;
+ int running;
+ raw_spinlock_t lock;
+};
+
+#include <linux/rwsem.h>
+struct autogroup;
+
+/*
+ * NOTE! "signal_struct" does not have its own
+ * locking, because a shared signal_struct always
+ * implies a shared sighand_struct, so locking
+ * sighand_struct is always a proper superset of
+ * the locking of signal_struct.
+ */
+struct signal_struct {
+ atomic_t sigcnt;
+ atomic_t live;
+ int nr_threads;
+ struct list_head thread_head;
+
+ wait_queue_head_t wait_chldexit; /* for wait4() */
+
+ /* current thread group signal load-balancing target: */
+ struct task_struct *curr_target;
+
+ /* shared signal handling: */
+ struct sigpending shared_pending;
+
+ /* thread group exit support */
+ int group_exit_code;
+ /* overloaded:
+ * - notify group_exit_task when ->count is equal to notify_count
+ * - everyone except group_exit_task is stopped during signal delivery
+ * of fatal signals, group_exit_task processes the signal.
+ */
+ int notify_count;
+ struct task_struct *group_exit_task;
+
+ /* thread group stop support, overloads group_exit_code too */
+ int group_stop_count;
+ unsigned int flags; /* see SIGNAL_* flags below */
+
+ /*
+ * PR_SET_CHILD_SUBREAPER marks a process, like a service
+ * manager, to re-parent orphan (double-forking) child processes
+ * to this process instead of 'init'. The service manager is
+ * able to receive SIGCHLD signals and is able to investigate
+ * the process until it calls wait(). All children of this
+ * process will inherit a flag if they should look for a
+ * child_subreaper process at exit.
+ */
+ unsigned int is_child_subreaper:1;
+ unsigned int has_child_subreaper:1;
+
+ /* POSIX.1b Interval Timers */
+ int posix_timer_id;
+ struct list_head posix_timers;
+
+ /* ITIMER_REAL timer for the process */
+ struct hrtimer real_timer;
+ struct pid *leader_pid;
+ ktime_t it_real_incr;
+
+ /*
+ * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
+ * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
+ * values are defined to 0 and 1 respectively
+ */
+ struct cpu_itimer it[2];
+
+ /*
+ * Thread group totals for process CPU timers.
+ * See thread_group_cputimer(), et al, for details.
+ */
+ struct thread_group_cputimer cputimer;
+
+ /* Earliest-expiration cache. */
+ struct task_cputime cputime_expires;
+
+ struct list_head cpu_timers[3];
+
+ struct pid *tty_old_pgrp;
+
+ /* boolean value for session group leader */
+ int leader;
+
+ struct tty_struct *tty; /* NULL if no tty */
+
+#ifdef CONFIG_SCHED_AUTOGROUP
+ struct autogroup *autogroup;
+#endif
+ /*
+ * Cumulative resource counters for dead threads in the group,
+ * and for reaped dead child processes forked by this group.
+ * Live threads maintain their own counters and add to these
+ * in __exit_signal, except for the group leader.
+ */
+ seqlock_t stats_lock;
+ cputime_t utime, stime, cutime, cstime;
+ cputime_t gtime;
+ cputime_t cgtime;
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ struct cputime prev_cputime;
+#endif
+ unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
+ unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
+ unsigned long inblock, oublock, cinblock, coublock;
+ unsigned long maxrss, cmaxrss;
+ struct task_io_accounting ioac;
+
+ /*
+ * Cumulative ns of schedule CPU time fo dead threads in the
+ * group, not including a zombie group leader, (This only differs
+ * from jiffies_to_ns(utime + stime) if sched_clock uses something
+ * other than jiffies.)
+ */
+ unsigned long long sum_sched_runtime;
+
+ /*
+ * We don't bother to synchronize most readers of this at all,
+ * because there is no reader checking a limit that actually needs
+ * to get both rlim_cur and rlim_max atomically, and either one
+ * alone is a single word that can safely be read normally.
+ * getrlimit/setrlimit use task_lock(current->group_leader) to
+ * protect this instead of the siglock, because they really
+ * have no need to disable irqs.
+ */
+ struct rlimit rlim[RLIM_NLIMITS];
+
+#ifdef CONFIG_BSD_PROCESS_ACCT
+ struct pacct_struct pacct; /* per-process accounting information */
+#endif
+#ifdef CONFIG_TASKSTATS
+ struct taskstats *stats;
+#endif
+#ifdef CONFIG_AUDIT
+ unsigned audit_tty;
+ unsigned audit_tty_log_passwd;
+ struct tty_audit_buf *tty_audit_buf;
+#endif
+#ifdef CONFIG_CGROUPS
+ /*
+ * group_rwsem prevents new tasks from entering the threadgroup and
+ * member tasks from exiting,a more specifically, setting of
+ * PF_EXITING. fork and exit paths are protected with this rwsem
+ * using threadgroup_change_begin/end(). Users which require
+ * threadgroup to remain stable should use threadgroup_[un]lock()
+ * which also takes care of exec path. Currently, cgroup is the
+ * only user.
+ */
+ struct rw_semaphore group_rwsem;
+#endif
+
+ oom_flags_t oom_flags;
+ short oom_score_adj; /* OOM kill score adjustment */
+ short oom_score_adj_min; /* OOM kill score adjustment min value.
+ * Only settable by CAP_SYS_RESOURCE. */
+
+ struct mutex cred_guard_mutex; /* guard against foreign influences on
+ * credential calculations
+ * (notably. ptrace) */
+};
+
+/*
+ * Bits in flags field of signal_struct.
+ */
+#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
+#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
+#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
+#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
+/*
+ * Pending notifications to parent.
+ */
+#define SIGNAL_CLD_STOPPED 0x00000010
+#define SIGNAL_CLD_CONTINUED 0x00000020
+#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
+
+#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
+
+/* If true, all threads except ->group_exit_task have pending SIGKILL */
+static inline int signal_group_exit(const struct signal_struct *sig)
+{
+ return (sig->flags & SIGNAL_GROUP_EXIT) ||
+ (sig->group_exit_task != NULL);
+}
+
+/*
+ * Some day this will be a full-fledged user tracking system..
+ */
+struct user_struct {
+ atomic_t __count; /* reference count */
+ atomic_t processes; /* How many processes does this user have? */
+ atomic_t sigpending; /* How many pending signals does this user have? */
+#ifdef CONFIG_INOTIFY_USER
+ atomic_t inotify_watches; /* How many inotify watches does this user have? */
+ atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
+#endif
+#ifdef CONFIG_FANOTIFY
+ atomic_t fanotify_listeners;
+#endif
+#ifdef CONFIG_EPOLL
+ atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
+#endif
+#ifdef CONFIG_POSIX_MQUEUE
+ /* protected by mq_lock */
+ unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
+#endif
+ unsigned long locked_shm; /* How many pages of mlocked shm ? */
+
+#ifdef CONFIG_KEYS
+ struct key *uid_keyring; /* UID specific keyring */
+ struct key *session_keyring; /* UID's default session keyring */
+#endif
+
+ /* Hash table maintenance information */
+ struct hlist_node uidhash_node;
+ kuid_t uid;
+
+#ifdef CONFIG_PERF_EVENTS
+ atomic_long_t locked_vm;
+#endif
+};
+
+extern int uids_sysfs_init(void);
+
+extern struct user_struct *find_user(kuid_t);
+
+extern struct user_struct root_user;
+#define INIT_USER (&root_user)
+
+
+struct backing_dev_info;
+struct reclaim_state;
+
+#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+struct sched_info {
+ /* cumulative counters */
+ unsigned long pcount; /* # of times run on this cpu */
+ unsigned long long run_delay; /* time spent waiting on a runqueue */
+
+ /* timestamps */
+ unsigned long long last_arrival,/* when we last ran on a cpu */
+ last_queued; /* when we were last queued to run */
+};
+#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
+
+#ifdef CONFIG_TASK_DELAY_ACCT
+struct task_delay_info {
+ spinlock_t lock;
+ unsigned int flags; /* Private per-task flags */
+
+ /* For each stat XXX, add following, aligned appropriately
+ *
+ * struct timespec XXX_start, XXX_end;
+ * u64 XXX_delay;
+ * u32 XXX_count;
+ *
+ * Atomicity of updates to XXX_delay, XXX_count protected by
+ * single lock above (split into XXX_lock if contention is an issue).
+ */
+
+ /*
+ * XXX_count is incremented on every XXX operation, the delay
+ * associated with the operation is added to XXX_delay.
+ * XXX_delay contains the accumulated delay time in nanoseconds.
+ */
+ u64 blkio_start; /* Shared by blkio, swapin */
+ u64 blkio_delay; /* wait for sync block io completion */
+ u64 swapin_delay; /* wait for swapin block io completion */
+ u32 blkio_count; /* total count of the number of sync block */
+ /* io operations performed */
+ u32 swapin_count; /* total count of the number of swapin block */
+ /* io operations performed */
+
+ u64 freepages_start;
+ u64 freepages_delay; /* wait for memory reclaim */
+ u32 freepages_count; /* total count of memory reclaim */
+};
+#endif /* CONFIG_TASK_DELAY_ACCT */
+
+static inline int sched_info_on(void)
+{
+#ifdef CONFIG_SCHEDSTATS
+ return 1;
+#elif defined(CONFIG_TASK_DELAY_ACCT)
+ extern int delayacct_on;
+ return delayacct_on;
+#else
+ return 0;
+#endif
+}
+
+enum cpu_idle_type {
+ CPU_IDLE,
+ CPU_NOT_IDLE,
+ CPU_NEWLY_IDLE,
+ CPU_MAX_IDLE_TYPES
+};
+
+/*
+ * Increase resolution of cpu_capacity calculations
+ */
+#define SCHED_CAPACITY_SHIFT 10
+#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
+
+/*
+ * sched-domains (multiprocessor balancing) declarations:
+ */
+#ifdef CONFIG_SMP
+#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
+#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
+#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
+#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
+#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
+#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
+#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu power */
+#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
+#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
+#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
+#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
+#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
+#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
+#define SD_NUMA 0x4000 /* cross-node balancing */
+
+#ifdef CONFIG_SCHED_SMT
+static inline int cpu_smt_flags(void)
+{
+ return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
+}
+#endif
+
+#ifdef CONFIG_SCHED_MC
+static inline int cpu_core_flags(void)
+{
+ return SD_SHARE_PKG_RESOURCES;
+}
+#endif
+
+#ifdef CONFIG_NUMA
+static inline int cpu_numa_flags(void)
+{
+ return SD_NUMA;
+}
+#endif
+
+struct sched_domain_attr {
+ int relax_domain_level;
+};
+
+#define SD_ATTR_INIT (struct sched_domain_attr) { \
+ .relax_domain_level = -1, \
+}
+
+extern int sched_domain_level_max;
+
+struct sched_group;
+
+struct sched_domain {
+ /* These fields must be setup */
+ struct sched_domain *parent; /* top domain must be null terminated */
+ struct sched_domain *child; /* bottom domain must be null terminated */
+ struct sched_group *groups; /* the balancing groups of the domain */
+ unsigned long min_interval; /* Minimum balance interval ms */
+ unsigned long max_interval; /* Maximum balance interval ms */
+ unsigned int busy_factor; /* less balancing by factor if busy */
+ unsigned int imbalance_pct; /* No balance until over watermark */
+ unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
+ unsigned int busy_idx;
+ unsigned int idle_idx;
+ unsigned int newidle_idx;
+ unsigned int wake_idx;
+ unsigned int forkexec_idx;
+ unsigned int smt_gain;
+
+ int nohz_idle; /* NOHZ IDLE status */
+ int flags; /* See SD_* */
+ int level;
+
+ /* Runtime fields. */
+ unsigned long last_balance; /* init to jiffies. units in jiffies */
+ unsigned int balance_interval; /* initialise to 1. units in ms. */
+ unsigned int nr_balance_failed; /* initialise to 0 */
+
+ /* idle_balance() stats */
+ u64 max_newidle_lb_cost;
+ unsigned long next_decay_max_lb_cost;
+
+#ifdef CONFIG_SCHEDSTATS
+ /* load_balance() stats */
+ unsigned int lb_count[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
+
+ /* Active load balancing */
+ unsigned int alb_count;
+ unsigned int alb_failed;
+ unsigned int alb_pushed;
+
+ /* SD_BALANCE_EXEC stats */
+ unsigned int sbe_count;
+ unsigned int sbe_balanced;
+ unsigned int sbe_pushed;
+
+ /* SD_BALANCE_FORK stats */
+ unsigned int sbf_count;
+ unsigned int sbf_balanced;
+ unsigned int sbf_pushed;
+
+ /* try_to_wake_up() stats */
+ unsigned int ttwu_wake_remote;
+ unsigned int ttwu_move_affine;
+ unsigned int ttwu_move_balance;
+#endif
+#ifdef CONFIG_SCHED_DEBUG
+ char *name;
+#endif
+ union {
+ void *private; /* used during construction */
+ struct rcu_head rcu; /* used during destruction */
+ };
+
+ unsigned int span_weight;
+ /*
+ * Span of all CPUs in this domain.
+ *
+ * NOTE: this field is variable length. (Allocated dynamically
+ * by attaching extra space to the end of the structure,
+ * depending on how many CPUs the kernel has booted up with)
+ */
+ unsigned long span[0];
+};
+
+static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
+{
+ return to_cpumask(sd->span);
+}
+
+extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+ struct sched_domain_attr *dattr_new);
+
+/* Allocate an array of sched domains, for partition_sched_domains(). */
+cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
+void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
+
+bool cpus_share_cache(int this_cpu, int that_cpu);
+
+typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
+typedef int (*sched_domain_flags_f)(void);
+
+#define SDTL_OVERLAP 0x01
+
+struct sd_data {
+ struct sched_domain **__percpu sd;
+ struct sched_group **__percpu sg;
+ struct sched_group_capacity **__percpu sgc;
+};
+
+struct sched_domain_topology_level {
+ sched_domain_mask_f mask;
+ sched_domain_flags_f sd_flags;
+ int flags;
+ int numa_level;
+ struct sd_data data;
+#ifdef CONFIG_SCHED_DEBUG
+ char *name;
+#endif
+};
+
+extern struct sched_domain_topology_level *sched_domain_topology;
+
+extern void set_sched_topology(struct sched_domain_topology_level *tl);
+extern void wake_up_if_idle(int cpu);
+
+#ifdef CONFIG_SCHED_DEBUG
+# define SD_INIT_NAME(type) .name = #type
+#else
+# define SD_INIT_NAME(type)
+#endif
+
+#else /* CONFIG_SMP */
+
+struct sched_domain_attr;
+
+static inline void
+partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+ struct sched_domain_attr *dattr_new)
+{
+}
+
+static inline bool cpus_share_cache(int this_cpu, int that_cpu)
+{
+ return true;
+}
+
+#endif /* !CONFIG_SMP */
+
+
+struct io_context; /* See blkdev.h */
+
+
+#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
+extern void prefetch_stack(struct task_struct *t);
+#else
+static inline void prefetch_stack(struct task_struct *t) { }
+#endif
+
+struct audit_context; /* See audit.c */
+struct mempolicy;
+struct pipe_inode_info;
+struct uts_namespace;
+
+struct load_weight {
+ unsigned long weight;
+ u32 inv_weight;
+};
+
+struct sched_avg {
+ u64 last_runnable_update;
+ s64 decay_count;
+ /*
+ * utilization_avg_contrib describes the amount of time that a
+ * sched_entity is running on a CPU. It is based on running_avg_sum
+ * and is scaled in the range [0..SCHED_LOAD_SCALE].
+ * load_avg_contrib described the amount of time that a sched_entity
+ * is runnable on a rq. It is based on both runnable_avg_sum and the
+ * weight of the task.
+ */
+ unsigned long load_avg_contrib, utilization_avg_contrib;
+ /*
+ * These sums represent an infinite geometric series and so are bound
+ * above by 1024/(1-y). Thus we only need a u32 to store them for all
+ * choices of y < 1-2^(-32)*1024.
+ * running_avg_sum reflects the time that the sched_entity is
+ * effectively running on the CPU.
+ * runnable_avg_sum represents the amount of time a sched_entity is on
+ * a runqueue which includes the running time that is monitored by
+ * running_avg_sum.
+ */
+ u32 runnable_avg_sum, avg_period, running_avg_sum;
+};
+
+#ifdef CONFIG_SCHEDSTATS
+struct sched_statistics {
+ u64 wait_start;
+ u64 wait_max;
+ u64 wait_count;
+ u64 wait_sum;
+ u64 iowait_count;
+ u64 iowait_sum;
+
+ u64 sleep_start;
+ u64 sleep_max;
+ s64 sum_sleep_runtime;
+
+ u64 block_start;
+ u64 block_max;
+ u64 exec_max;
+ u64 slice_max;
+
+ u64 nr_migrations_cold;
+ u64 nr_failed_migrations_affine;
+ u64 nr_failed_migrations_running;
+ u64 nr_failed_migrations_hot;
+ u64 nr_forced_migrations;
+
+ u64 nr_wakeups;
+ u64 nr_wakeups_sync;
+ u64 nr_wakeups_migrate;
+ u64 nr_wakeups_local;
+ u64 nr_wakeups_remote;
+ u64 nr_wakeups_affine;
+ u64 nr_wakeups_affine_attempts;
+ u64 nr_wakeups_passive;
+ u64 nr_wakeups_idle;
+};
+#endif
+
+struct sched_entity {
+ struct load_weight load; /* for load-balancing */
+ struct rb_node run_node;
+ struct list_head group_node;
+ unsigned int on_rq;
+
+ u64 exec_start;
+ u64 sum_exec_runtime;
+ u64 vruntime;
+ u64 prev_sum_exec_runtime;
+
+ u64 nr_migrations;
+
+#ifdef CONFIG_SCHEDSTATS
+ struct sched_statistics statistics;
+#endif
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ int depth;
+ struct sched_entity *parent;
+ /* rq on which this entity is (to be) queued: */
+ struct cfs_rq *cfs_rq;
+ /* rq "owned" by this entity/group: */
+ struct cfs_rq *my_q;
+#endif
+
+#ifdef CONFIG_SMP
+ /* Per-entity load-tracking */
+ struct sched_avg avg;
+#endif
+};
+
+struct sched_rt_entity {
+ struct list_head run_list;
+ unsigned long timeout;
+ unsigned long watchdog_stamp;
+ unsigned int time_slice;
+
+ struct sched_rt_entity *back;
+#ifdef CONFIG_RT_GROUP_SCHED
+ struct sched_rt_entity *parent;
+ /* rq on which this entity is (to be) queued: */
+ struct rt_rq *rt_rq;
+ /* rq "owned" by this entity/group: */
+ struct rt_rq *my_q;
+#endif
+};
+
+struct sched_dl_entity {
+ struct rb_node rb_node;
+
+ /*
+ * Original scheduling parameters. Copied here from sched_attr
+ * during sched_setattr(), they will remain the same until
+ * the next sched_setattr().
+ */
+ u64 dl_runtime; /* maximum runtime for each instance */
+ u64 dl_deadline; /* relative deadline of each instance */
+ u64 dl_period; /* separation of two instances (period) */
+ u64 dl_bw; /* dl_runtime / dl_deadline */
+
+ /*
+ * Actual scheduling parameters. Initialized with the values above,
+ * they are continously updated during task execution. Note that
+ * the remaining runtime could be < 0 in case we are in overrun.
+ */
+ s64 runtime; /* remaining runtime for this instance */
+ u64 deadline; /* absolute deadline for this instance */
+ unsigned int flags; /* specifying the scheduler behaviour */
+
+ /*
+ * Some bool flags:
+ *
+ * @dl_throttled tells if we exhausted the runtime. If so, the
+ * task has to wait for a replenishment to be performed at the
+ * next firing of dl_timer.
+ *
+ * @dl_new tells if a new instance arrived. If so we must
+ * start executing it with full runtime and reset its absolute
+ * deadline;
+ *
+ * @dl_boosted tells if we are boosted due to DI. If so we are
+ * outside bandwidth enforcement mechanism (but only until we
+ * exit the critical section);
+ *
+ * @dl_yielded tells if task gave up the cpu before consuming
+ * all its available runtime during the last job.
+ */
+ int dl_throttled, dl_new, dl_boosted, dl_yielded;
+
+ /*
+ * Bandwidth enforcement timer. Each -deadline task has its
+ * own bandwidth to be enforced, thus we need one timer per task.
+ */
+ struct hrtimer dl_timer;
+};
+
+union rcu_special {
+ struct {
+ bool blocked;
+ bool need_qs;
+ } b;
+ short s;
+};
+struct rcu_node;
+
+enum perf_event_task_context {
+ perf_invalid_context = -1,
+ perf_hw_context = 0,
+ perf_sw_context,
+ perf_nr_task_contexts,
+};
+
+struct task_struct {
+ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
+ void *stack;
+ atomic_t usage;
+ unsigned int flags; /* per process flags, defined below */
+ unsigned int ptrace;
+
+#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_BFS)
+ struct llist_node wake_entry;
+ int on_cpu;
+#endif
+#ifdef CONFIG_SMP
+ struct task_struct *last_wakee;
+ unsigned long wakee_flips;
+ unsigned long wakee_flip_decay_ts;
+
+ int wake_cpu;
+#endif
+ int on_rq;
+ int prio, static_prio, normal_prio;
+ unsigned int rt_priority;
+#ifdef CONFIG_SCHED_BFS
+ int time_slice;
+ u64 deadline;
+ struct list_head run_list;
+ u64 last_ran;
+ u64 sched_time; /* sched_clock time spent running */
+#ifdef CONFIG_SMT_NICE
+ int smt_bias; /* Policy/nice level bias across smt siblings */
+#endif
+#ifdef CONFIG_SMP
+ bool sticky; /* Soft affined flag */
+#endif
+#ifdef CONFIG_HOTPLUG_CPU
+ bool zerobound; /* Bound to CPU0 for hotplug */
+#endif
+ unsigned long rt_timeout;
+#else /* CONFIG_SCHED_BFS */
+ const struct sched_class *sched_class;
+ struct sched_entity se;
+ struct sched_rt_entity rt;
+#endif
+#ifdef CONFIG_CGROUP_SCHED
+ struct task_group *sched_task_group;
+#endif
+ struct sched_dl_entity dl;
+
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+ /* list of struct preempt_notifier: */
+ struct hlist_head preempt_notifiers;
+#endif
+
+#ifdef CONFIG_BLK_DEV_IO_TRACE
+ unsigned int btrace_seq;
+#endif
+
+ unsigned int policy;
+ int nr_cpus_allowed;
+ cpumask_t cpus_allowed;
+
+#ifdef CONFIG_PREEMPT_RCU
+ int rcu_read_lock_nesting;
+ union rcu_special rcu_read_unlock_special;
+ struct list_head rcu_node_entry;
+#endif /* #ifdef CONFIG_PREEMPT_RCU */
+#ifdef CONFIG_PREEMPT_RCU
+ struct rcu_node *rcu_blocked_node;
+#endif /* #ifdef CONFIG_PREEMPT_RCU */
+#ifdef CONFIG_TASKS_RCU
+ unsigned long rcu_tasks_nvcsw;
+ bool rcu_tasks_holdout;
+ struct list_head rcu_tasks_holdout_list;
+ int rcu_tasks_idle_cpu;
+#endif /* #ifdef CONFIG_TASKS_RCU */
+
+#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+ struct sched_info sched_info;
+#endif
+
+ struct list_head tasks;
+#ifdef CONFIG_SMP
+ struct plist_node pushable_tasks;
+ struct rb_node pushable_dl_tasks;
+#endif
+
+ struct mm_struct *mm, *active_mm;
+#ifdef CONFIG_COMPAT_BRK
+ unsigned brk_randomized:1;
+#endif
+ /* per-thread vma caching */
+ u32 vmacache_seqnum;
+ struct vm_area_struct *vmacache[VMACACHE_SIZE];
+#if defined(SPLIT_RSS_COUNTING)
+ struct task_rss_stat rss_stat;
+#endif
+/* task state */
+ int exit_state;
+ int exit_code, exit_signal;
+ int pdeath_signal; /* The signal sent when the parent dies */
+ unsigned int jobctl; /* JOBCTL_*, siglock protected */
+
+ /* Used for emulating ABI behavior of previous Linux versions */
+ unsigned int personality;
+
+ unsigned in_execve:1; /* Tell the LSMs that the process is doing an
+ * execve */
+ unsigned in_iowait:1;
+
+ /* Revert to default priority/policy when forking */
+ unsigned sched_reset_on_fork:1;
+ unsigned sched_contributes_to_load:1;
+
+#ifdef CONFIG_MEMCG_KMEM
+ unsigned memcg_kmem_skip_account:1;
+#endif
+
+ unsigned long atomic_flags; /* Flags needing atomic access. */
+
+ struct restart_block restart_block;
+
+ pid_t pid;
+ pid_t tgid;
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+ /* Canary value for the -fstack-protector gcc feature */
+ unsigned long stack_canary;
+#endif
+ /*
+ * pointers to (original) parent process, youngest child, younger sibling,
+ * older sibling, respectively. (p->father can be replaced with
+ * p->real_parent->pid)
+ */
+ struct task_struct __rcu *real_parent; /* real parent process */
+ struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
+ /*
+ * children/sibling forms the list of my natural children
+ */
+ struct list_head children; /* list of my children */
+ struct list_head sibling; /* linkage in my parent's children list */
+ struct task_struct *group_leader; /* threadgroup leader */
+
+ /*
+ * ptraced is the list of tasks this task is using ptrace on.
+ * This includes both natural children and PTRACE_ATTACH targets.
+ * p->ptrace_entry is p's link on the p->parent->ptraced list.
+ */
+ struct list_head ptraced;
+ struct list_head ptrace_entry;
+
+ /* PID/PID hash table linkage. */
+ struct pid_link pids[PIDTYPE_MAX];
+ struct list_head thread_group;
+ struct list_head thread_node;
+
+ struct completion *vfork_done; /* for vfork() */
+ int __user *set_child_tid; /* CLONE_CHILD_SETTID */
+ int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
+
+ cputime_t utime, stime, utimescaled, stimescaled;
+#ifdef CONFIG_SCHED_BFS
+ unsigned long utime_pc, stime_pc;
+#endif
+ cputime_t gtime;
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+ struct cputime prev_cputime;
+#endif
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+ seqlock_t vtime_seqlock;
+ unsigned long long vtime_snap;
+ enum {
+ VTIME_SLEEPING = 0,
+ VTIME_USER,
+ VTIME_SYS,
+ } vtime_snap_whence;
+#endif
+ unsigned long nvcsw, nivcsw; /* context switch counts */
+ u64 start_time; /* monotonic time in nsec */
+ u64 real_start_time; /* boot based time in nsec */
+/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
+ unsigned long min_flt, maj_flt;
+
+ struct task_cputime cputime_expires;
+ struct list_head cpu_timers[3];
+
+/* process credentials */
+ const struct cred __rcu *real_cred; /* objective and real subjective task
+ * credentials (COW) */
+ const struct cred __rcu *cred; /* effective (overridable) subjective task
+ * credentials (COW) */
+ char comm[TASK_COMM_LEN]; /* executable name excluding path
+ - access with [gs]et_task_comm (which lock
+ it with task_lock())
+ - initialized normally by setup_new_exec */
+/* file system info */
+ int link_count, total_link_count;
+#ifdef CONFIG_SYSVIPC
+/* ipc stuff */
+ struct sysv_sem sysvsem;
+ struct sysv_shm sysvshm;
+#endif
+#ifdef CONFIG_DETECT_HUNG_TASK
+/* hung task detection */
+ unsigned long last_switch_count;
+#endif
+/* CPU-specific state of this task */
+ struct thread_struct thread;
+/* filesystem information */
+ struct fs_struct *fs;
+/* open file information */
+ struct files_struct *files;
+/* namespaces */
+ struct nsproxy *nsproxy;
+/* signal handlers */
+ struct signal_struct *signal;
+ struct sighand_struct *sighand;
+
+ sigset_t blocked, real_blocked;
+ sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
+ struct sigpending pending;
+
+ unsigned long sas_ss_sp;
+ size_t sas_ss_size;
+ int (*notifier)(void *priv);
+ void *notifier_data;
+ sigset_t *notifier_mask;
+ struct callback_head *task_works;
+
+ struct audit_context *audit_context;
+#ifdef CONFIG_AUDITSYSCALL
+ kuid_t loginuid;
+ unsigned int sessionid;
+#endif
+ struct seccomp seccomp;
+
+/* Thread group tracking */
+ u32 parent_exec_id;
+ u32 self_exec_id;
+/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
+ * mempolicy */
+ spinlock_t alloc_lock;
+
+ /* Protection of the PI data structures: */
+ raw_spinlock_t pi_lock;
+
+#ifdef CONFIG_RT_MUTEXES
+ /* PI waiters blocked on a rt_mutex held by this task */
+ struct rb_root pi_waiters;
+ struct rb_node *pi_waiters_leftmost;
+ /* Deadlock detection and priority inheritance handling */
+ struct rt_mutex_waiter *pi_blocked_on;
+#endif
+
+#ifdef CONFIG_DEBUG_MUTEXES
+ /* mutex deadlock detection */
+ struct mutex_waiter *blocked_on;
+#endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+ unsigned int irq_events;
+ unsigned long hardirq_enable_ip;
+ unsigned long hardirq_disable_ip;
+ unsigned int hardirq_enable_event;
+ unsigned int hardirq_disable_event;
+ int hardirqs_enabled;
+ int hardirq_context;
+ unsigned long softirq_disable_ip;
+ unsigned long softirq_enable_ip;
+ unsigned int softirq_disable_event;
+ unsigned int softirq_enable_event;
+ int softirqs_enabled;
+ int softirq_context;
+#endif
+#ifdef CONFIG_LOCKDEP
+# define MAX_LOCK_DEPTH 48UL
+ u64 curr_chain_key;
+ int lockdep_depth;
+ unsigned int lockdep_recursion;
+ struct held_lock held_locks[MAX_LOCK_DEPTH];
+ gfp_t lockdep_reclaim_gfp;
+#endif
+
+/* journalling filesystem info */
+ void *journal_info;
+
+/* stacked block device info */
+ struct bio_list *bio_list;
+
+#ifdef CONFIG_BLOCK
+/* stack plugging */
+ struct blk_plug *plug;
+#endif
+
+/* VM state */
+ struct reclaim_state *reclaim_state;
+
+ struct backing_dev_info *backing_dev_info;
+
+ struct io_context *io_context;
+
+ unsigned long ptrace_message;
+ siginfo_t *last_siginfo; /* For ptrace use. */
+ struct task_io_accounting ioac;
+#if defined(CONFIG_TASK_XACCT)
+ u64 acct_rss_mem1; /* accumulated rss usage */
+ u64 acct_vm_mem1; /* accumulated virtual memory usage */
+ cputime_t acct_timexpd; /* stime + utime since last update */
+#endif
+#ifdef CONFIG_CPUSETS
+ nodemask_t mems_allowed; /* Protected by alloc_lock */
+ seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
+ int cpuset_mem_spread_rotor;
+ int cpuset_slab_spread_rotor;
+#endif
+#ifdef CONFIG_CGROUPS
+ /* Control Group info protected by css_set_lock */
+ struct css_set __rcu *cgroups;
+ /* cg_list protected by css_set_lock and tsk->alloc_lock */
+ struct list_head cg_list;
+#endif
+#ifdef CONFIG_FUTEX
+ struct robust_list_head __user *robust_list;
+#ifdef CONFIG_COMPAT
+ struct compat_robust_list_head __user *compat_robust_list;
+#endif
+ struct list_head pi_state_list;
+ struct futex_pi_state *pi_state_cache;
+#endif
+#ifdef CONFIG_PERF_EVENTS
+ struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
+ struct mutex perf_event_mutex;
+ struct list_head perf_event_list;
+#endif
+#ifdef CONFIG_DEBUG_PREEMPT
+ unsigned long preempt_disable_ip;
+#endif
+#ifdef CONFIG_NUMA
+ struct mempolicy *mempolicy; /* Protected by alloc_lock */
+ short il_next;
+ short pref_node_fork;
+#endif
+#ifdef CONFIG_NUMA_BALANCING
+ int numa_scan_seq;
+ unsigned int numa_scan_period;
+ unsigned int numa_scan_period_max;
+ int numa_preferred_nid;
+ unsigned long numa_migrate_retry;
+ u64 node_stamp; /* migration stamp */
+ u64 last_task_numa_placement;
+ u64 last_sum_exec_runtime;
+ struct callback_head numa_work;
+
+ struct list_head numa_entry;
+ struct numa_group *numa_group;
+
+ /*
+ * numa_faults is an array split into four regions:
+ * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
+ * in this precise order.
+ *
+ * faults_memory: Exponential decaying average of faults on a per-node
+ * basis. Scheduling placement decisions are made based on these
+ * counts. The values remain static for the duration of a PTE scan.
+ * faults_cpu: Track the nodes the process was running on when a NUMA
+ * hinting fault was incurred.
+ * faults_memory_buffer and faults_cpu_buffer: Record faults per node
+ * during the current scan window. When the scan completes, the counts
+ * in faults_memory and faults_cpu decay and these values are copied.
+ */
+ unsigned long *numa_faults;
+ unsigned long total_numa_faults;
+
+ /*
+ * numa_faults_locality tracks if faults recorded during the last
+ * scan window were remote/local or failed to migrate. The task scan
+ * period is adapted based on the locality of the faults with different
+ * weights depending on whether they were shared or private faults
+ */
+ unsigned long numa_faults_locality[3];
+
+ unsigned long numa_pages_migrated;
+#endif /* CONFIG_NUMA_BALANCING */
+
+ struct rcu_head rcu;
+
+ /*
+ * cache last used pipe for splice
+ */
+ struct pipe_inode_info *splice_pipe;
+
+ struct page_frag task_frag;
+
+#ifdef CONFIG_TASK_DELAY_ACCT
+ struct task_delay_info *delays;
+#endif
+#ifdef CONFIG_FAULT_INJECTION
+ int make_it_fail;
+#endif
+ /*
+ * when (nr_dirtied >= nr_dirtied_pause), it's time to call
+ * balance_dirty_pages() for some dirty throttling pause
+ */
+ int nr_dirtied;
+ int nr_dirtied_pause;
+ unsigned long dirty_paused_when; /* start of a write-and-pause period */
+
+#ifdef CONFIG_LATENCYTOP
+ int latency_record_count;
+ struct latency_record latency_record[LT_SAVECOUNT];
+#endif
+ /*
+ * time slack values; these are used to round up poll() and
+ * select() etc timeout values. These are in nanoseconds.
+ */
+ unsigned long timer_slack_ns;
+ unsigned long default_timer_slack_ns;
+
+#ifdef CONFIG_KASAN
+ unsigned int kasan_depth;
+#endif
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* Index of current stored address in ret_stack */
+ int curr_ret_stack;
+ /* Stack of return addresses for return function tracing */
+ struct ftrace_ret_stack *ret_stack;
+ /* time stamp for last schedule */
+ unsigned long long ftrace_timestamp;
+ /*
+ * Number of functions that haven't been traced
+ * because of depth overrun.
+ */
+ atomic_t trace_overrun;
+ /* Pause for the tracing */
+ atomic_t tracing_graph_pause;
+#endif
+#ifdef CONFIG_TRACING
+ /* state flags for use by tracers */
+ unsigned long trace;
+ /* bitmask and counter of trace recursion */
+ unsigned long trace_recursion;
+#endif /* CONFIG_TRACING */
+#ifdef CONFIG_MEMCG
+ struct memcg_oom_info {
+ struct mem_cgroup *memcg;
+ gfp_t gfp_mask;
+ int order;
+ unsigned int may_oom:1;
+ } memcg_oom;
+#endif
+#ifdef CONFIG_UPROBES
+ struct uprobe_task *utask;
+#endif
+#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
+ unsigned int sequential_io;
+ unsigned int sequential_io_avg;
+#endif
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ unsigned long task_state_change;
+#endif
+};
+
+#ifdef CONFIG_SCHED_BFS
+bool grunqueue_is_locked(void);
+void grq_unlock_wait(void);
+void cpu_scaling(int cpu);
+void cpu_nonscaling(int cpu);
+#define tsk_seruntime(t) ((t)->sched_time)
+#define tsk_rttimeout(t) ((t)->rt_timeout)
+
+static inline void tsk_cpus_current(struct task_struct *p)
+{
+}
+
+static inline int runqueue_is_locked(int cpu)
+{
+ return grunqueue_is_locked();
+}
+
+void print_scheduler_version(void);
+
+static inline bool iso_task(struct task_struct *p)
+{
+ return (p->policy == SCHED_ISO);
+}
+#else /* CFS */
+extern int runqueue_is_locked(int cpu);
+static inline void cpu_scaling(int cpu)
+{
+}
+
+static inline void cpu_nonscaling(int cpu)
+{
+}
+#define tsk_seruntime(t) ((t)->se.sum_exec_runtime)
+#define tsk_rttimeout(t) ((t)->rt.timeout)
+
+static inline void tsk_cpus_current(struct task_struct *p)
+{
+ p->nr_cpus_allowed = current->nr_cpus_allowed;
+}
+
+static inline void print_scheduler_version(void)
+{
+ printk(KERN_INFO"CFS CPU scheduler.\n");
+}
+
+static inline bool iso_task(struct task_struct *p)
+{
+ return false;
+}
+
+/* Anyone feel like implementing this? */
+static inline bool above_background_load(void)
+{
+ return false;
+}
+#endif /* CONFIG_SCHED_BFS */
+
+/* Future-safe accessor for struct task_struct's cpus_allowed. */
+#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+
+#define TNF_MIGRATED 0x01
+#define TNF_NO_GROUP 0x02
+#define TNF_SHARED 0x04
+#define TNF_FAULT_LOCAL 0x08
+#define TNF_MIGRATE_FAIL 0x10
+
+#ifdef CONFIG_NUMA_BALANCING
+extern void task_numa_fault(int last_node, int node, int pages, int flags);
+extern pid_t task_numa_group_id(struct task_struct *p);
+extern void set_numabalancing_state(bool enabled);
+extern void task_numa_free(struct task_struct *p);
+extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
+ int src_nid, int dst_cpu);
+#else
+static inline void task_numa_fault(int last_node, int node, int pages,
+ int flags)
+{
+}
+static inline pid_t task_numa_group_id(struct task_struct *p)
+{
+ return 0;
+}
+static inline void set_numabalancing_state(bool enabled)
+{
+}
+static inline void task_numa_free(struct task_struct *p)
+{
+}
+static inline bool should_numa_migrate_memory(struct task_struct *p,
+ struct page *page, int src_nid, int dst_cpu)
+{
+ return true;
+}
+#endif
+
+static inline struct pid *task_pid(struct task_struct *task)
+{
+ return task->pids[PIDTYPE_PID].pid;
+}
+
+static inline struct pid *task_tgid(struct task_struct *task)
+{
+ return task->group_leader->pids[PIDTYPE_PID].pid;
+}
+
+/*
+ * Without tasklist or rcu lock it is not safe to dereference
+ * the result of task_pgrp/task_session even if task == current,
+ * we can race with another thread doing sys_setsid/sys_setpgid.
+ */
+static inline struct pid *task_pgrp(struct task_struct *task)
+{
+ return task->group_leader->pids[PIDTYPE_PGID].pid;
+}
+
+static inline struct pid *task_session(struct task_struct *task)
+{
+ return task->group_leader->pids[PIDTYPE_SID].pid;
+}
+
+struct pid_namespace;
+
+/*
+ * the helpers to get the task's different pids as they are seen
+ * from various namespaces
+ *
+ * task_xid_nr() : global id, i.e. the id seen from the init namespace;
+ * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
+ * current.
+ * task_xid_nr_ns() : id seen from the ns specified;
+ *
+ * set_task_vxid() : assigns a virtual id to a task;
+ *
+ * see also pid_nr() etc in include/linux/pid.h
+ */
+pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
+ struct pid_namespace *ns);
+
+static inline pid_t task_pid_nr(struct task_struct *tsk)
+{
+ return tsk->pid;
+}
+
+static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
+ struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
+}
+
+static inline pid_t task_pid_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
+}
+
+
+static inline pid_t task_tgid_nr(struct task_struct *tsk)
+{
+ return tsk->tgid;
+}
+
+pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
+
+static inline pid_t task_tgid_vnr(struct task_struct *tsk)
+{
+ return pid_vnr(task_tgid(tsk));
+}
+
+
+static inline int pid_alive(const struct task_struct *p);
+static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
+{
+ pid_t pid = 0;
+
+ rcu_read_lock();
+ if (pid_alive(tsk))
+ pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
+ rcu_read_unlock();
+
+ return pid;
+}
+
+static inline pid_t task_ppid_nr(const struct task_struct *tsk)
+{
+ return task_ppid_nr_ns(tsk, &init_pid_ns);
+}
+
+static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
+ struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
+}
+
+static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
+}
+
+
+static inline pid_t task_session_nr_ns(struct task_struct *tsk,
+ struct pid_namespace *ns)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
+}
+
+static inline pid_t task_session_vnr(struct task_struct *tsk)
+{
+ return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
+}
+
+/* obsolete, do not use */
+static inline pid_t task_pgrp_nr(struct task_struct *tsk)
+{
+ return task_pgrp_nr_ns(tsk, &init_pid_ns);
+}
+
+/**
+ * pid_alive - check that a task structure is not stale
+ * @p: Task structure to be checked.
+ *
+ * Test if a process is not yet dead (at most zombie state)
+ * If pid_alive fails, then pointers within the task structure
+ * can be stale and must not be dereferenced.
+ *
+ * Return: 1 if the process is alive. 0 otherwise.
+ */
+static inline int pid_alive(const struct task_struct *p)
+{
+ return p->pids[PIDTYPE_PID].pid != NULL;
+}
+
+/**
+ * is_global_init - check if a task structure is init
+ * @tsk: Task structure to be checked.
+ *
+ * Check if a task structure is the first user space task the kernel created.
+ *
+ * Return: 1 if the task structure is init. 0 otherwise.
+ */
+static inline int is_global_init(struct task_struct *tsk)
+{
+ return tsk->pid == 1;
+}
+
+extern struct pid *cad_pid;
+
+extern void free_task(struct task_struct *tsk);
+#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+
+extern void __put_task_struct(struct task_struct *t);
+
+static inline void put_task_struct(struct task_struct *t)
+{
+ if (atomic_dec_and_test(&t->usage))
+ __put_task_struct(t);
+}
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+extern void task_cputime(struct task_struct *t,
+ cputime_t *utime, cputime_t *stime);
+extern void task_cputime_scaled(struct task_struct *t,
+ cputime_t *utimescaled, cputime_t *stimescaled);
+extern cputime_t task_gtime(struct task_struct *t);
+#else
+static inline void task_cputime(struct task_struct *t,
+ cputime_t *utime, cputime_t *stime)
+{
+ if (utime)
+ *utime = t->utime;
+ if (stime)
+ *stime = t->stime;
+}
+
+static inline void task_cputime_scaled(struct task_struct *t,
+ cputime_t *utimescaled,
+ cputime_t *stimescaled)
+{
+ if (utimescaled)
+ *utimescaled = t->utimescaled;
+ if (stimescaled)
+ *stimescaled = t->stimescaled;
+}
+
+static inline cputime_t task_gtime(struct task_struct *t)
+{
+ return t->gtime;
+}
+#endif
+extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
+extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
+
+/*
+ * Per process flags
+ */
+#define PF_EXITING 0x00000004 /* getting shut down */
+#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
+#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
+#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
+#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
+#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
+#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
+#define PF_DUMPCORE 0x00000200 /* dumped core */
+#define PF_SIGNALED 0x00000400 /* killed by a signal */
+#define PF_MEMALLOC 0x00000800 /* Allocating memory */
+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
+#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
+#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
+#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
+#define PF_FROZEN 0x00010000 /* frozen for system suspend */
+#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
+#define PF_KSWAPD 0x00040000 /* I am kswapd */
+#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
+#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
+#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
+#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
+#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
+#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
+#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
+#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
+#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
+#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
+
+/*
+ * Only the _current_ task can read/write to tsk->flags, but other
+ * tasks can access tsk->flags in readonly mode for example
+ * with tsk_used_math (like during threaded core dumping).
+ * There is however an exception to this rule during ptrace
+ * or during fork: the ptracer task is allowed to write to the
+ * child->flags of its traced child (same goes for fork, the parent
+ * can write to the child->flags), because we're guaranteed the
+ * child is not running and in turn not changing child->flags
+ * at the same time the parent does it.
+ */
+#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
+#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
+#define clear_used_math() clear_stopped_child_used_math(current)
+#define set_used_math() set_stopped_child_used_math(current)
+#define conditional_stopped_child_used_math(condition, child) \
+ do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
+#define conditional_used_math(condition) \
+ conditional_stopped_child_used_math(condition, current)
+#define copy_to_stopped_child_used_math(child) \
+ do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
+/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
+#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
+#define used_math() tsk_used_math(current)
+
+/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
+ * __GFP_FS is also cleared as it implies __GFP_IO.
+ */
+static inline gfp_t memalloc_noio_flags(gfp_t flags)
+{
+ if (unlikely(current->flags & PF_MEMALLOC_NOIO))
+ flags &= ~(__GFP_IO | __GFP_FS);
+ return flags;
+}
+
+static inline unsigned int memalloc_noio_save(void)
+{
+ unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
+ current->flags |= PF_MEMALLOC_NOIO;
+ return flags;
+}
+
+static inline void memalloc_noio_restore(unsigned int flags)
+{
+ current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
+}
+
+/* Per-process atomic flags. */
+#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
+#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
+#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
+
+
+#define TASK_PFA_TEST(name, func) \
+ static inline bool task_##func(struct task_struct *p) \
+ { return test_bit(PFA_##name, &p->atomic_flags); }
+#define TASK_PFA_SET(name, func) \
+ static inline void task_set_##func(struct task_struct *p) \
+ { set_bit(PFA_##name, &p->atomic_flags); }
+#define TASK_PFA_CLEAR(name, func) \
+ static inline void task_clear_##func(struct task_struct *p) \
+ { clear_bit(PFA_##name, &p->atomic_flags); }
+
+TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
+TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
+
+TASK_PFA_TEST(SPREAD_PAGE, spread_page)
+TASK_PFA_SET(SPREAD_PAGE, spread_page)
+TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
+
+TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
+TASK_PFA_SET(SPREAD_SLAB, spread_slab)
+TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
+
+/*
+ * task->jobctl flags
+ */
+#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */
+
+#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */
+#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
+#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */
+#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */
+#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */
+#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
+#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
+
+#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
+#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
+#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
+#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
+#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
+#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
+#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
+
+#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
+#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
+
+extern bool task_set_jobctl_pending(struct task_struct *task,
+ unsigned int mask);
+extern void task_clear_jobctl_trapping(struct task_struct *task);
+extern void task_clear_jobctl_pending(struct task_struct *task,
+ unsigned int mask);
+
+static inline void rcu_copy_process(struct task_struct *p)
+{
+#ifdef CONFIG_PREEMPT_RCU
+ p->rcu_read_lock_nesting = 0;
+ p->rcu_read_unlock_special.s = 0;
+ p->rcu_blocked_node = NULL;
+ INIT_LIST_HEAD(&p->rcu_node_entry);
+#endif /* #ifdef CONFIG_PREEMPT_RCU */
+#ifdef CONFIG_TASKS_RCU
+ p->rcu_tasks_holdout = false;
+ INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
+ p->rcu_tasks_idle_cpu = -1;
+#endif /* #ifdef CONFIG_TASKS_RCU */
+}
+
+static inline void tsk_restore_flags(struct task_struct *task,
+ unsigned long orig_flags, unsigned long flags)
+{
+ task->flags &= ~flags;
+ task->flags |= orig_flags & flags;
+}
+
+extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
+ const struct cpumask *trial);
+extern int task_can_attach(struct task_struct *p,
+ const struct cpumask *cs_cpus_allowed);
+#ifdef CONFIG_SMP
+extern void do_set_cpus_allowed(struct task_struct *p,
+ const struct cpumask *new_mask);
+
+extern int set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask);
+#else
+static inline void do_set_cpus_allowed(struct task_struct *p,
+ const struct cpumask *new_mask)
+{
+}
+static inline int set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask)
+{
+ if (!cpumask_test_cpu(0, new_mask))
+ return -EINVAL;
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_NO_HZ_COMMON
+void calc_load_enter_idle(void);
+void calc_load_exit_idle(void);
+#else
+static inline void calc_load_enter_idle(void) { }
+static inline void calc_load_exit_idle(void) { }
+#endif /* CONFIG_NO_HZ_COMMON */
+
+#ifndef CONFIG_CPUMASK_OFFSTACK
+static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
+{
+ return set_cpus_allowed_ptr(p, &new_mask);
+}
+#endif
+
+/*
+ * Do not use outside of architecture code which knows its limitations.
+ *
+ * sched_clock() has no promise of monotonicity or bounded drift between
+ * CPUs, use (which you should not) requires disabling IRQs.
+ *
+ * Please use one of the three interfaces below.
+ */
+extern unsigned long long notrace sched_clock(void);
+/*
+ * See the comment in kernel/sched/clock.c
+ */
+extern u64 cpu_clock(int cpu);
+extern u64 local_clock(void);
+extern u64 running_clock(void);
+extern u64 sched_clock_cpu(int cpu);
+
+
+extern void sched_clock_init(void);
+
+#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+static inline void sched_clock_tick(void)
+{
+}
+
+static inline void sched_clock_idle_sleep_event(void)
+{
+}
+
+static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
+{
+}
+#else
+/*
+ * Architectures can set this to 1 if they have specified
+ * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
+ * but then during bootup it turns out that sched_clock()
+ * is reliable after all:
+ */
+extern int sched_clock_stable(void);
+extern void set_sched_clock_stable(void);
+extern void clear_sched_clock_stable(void);
+
+extern void sched_clock_tick(void);
+extern void sched_clock_idle_sleep_event(void);
+extern void sched_clock_idle_wakeup_event(u64 delta_ns);
+#endif
+
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+/*
+ * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
+ * The reason for this explicit opt-in is not to have perf penalty with
+ * slow sched_clocks.
+ */
+extern void enable_sched_clock_irqtime(void);
+extern void disable_sched_clock_irqtime(void);
+#else
+static inline void enable_sched_clock_irqtime(void) {}
+static inline void disable_sched_clock_irqtime(void) {}
+#endif
+
+extern unsigned long long
+task_sched_runtime(struct task_struct *task);
+
+/* sched_exec is called by processes performing an exec */
+#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_BFS)
+extern void sched_exec(void);
+#else
+#define sched_exec() {}
+#endif
+
+extern void sched_clock_idle_sleep_event(void);
+extern void sched_clock_idle_wakeup_event(u64 delta_ns);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern void idle_task_exit(void);
+#else
+static inline void idle_task_exit(void) {}
+#endif
+
+#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
+extern void wake_up_nohz_cpu(int cpu);
+#else
+static inline void wake_up_nohz_cpu(int cpu) { }
+#endif
+
+#ifdef CONFIG_NO_HZ_FULL
+extern bool sched_can_stop_tick(void);
+extern u64 scheduler_tick_max_deferment(void);
+#else
+static inline bool sched_can_stop_tick(void) { return false; }
+#endif
+
+#ifdef CONFIG_SCHED_AUTOGROUP
+extern void sched_autogroup_create_attach(struct task_struct *p);
+extern void sched_autogroup_detach(struct task_struct *p);
+extern void sched_autogroup_fork(struct signal_struct *sig);
+extern void sched_autogroup_exit(struct signal_struct *sig);
+#ifdef CONFIG_PROC_FS
+extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
+extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
+#endif
+#else
+static inline void sched_autogroup_create_attach(struct task_struct *p) { }
+static inline void sched_autogroup_detach(struct task_struct *p) { }
+static inline void sched_autogroup_fork(struct signal_struct *sig) { }
+static inline void sched_autogroup_exit(struct signal_struct *sig) { }
+#endif
+
+extern int yield_to(struct task_struct *p, bool preempt);
+extern void set_user_nice(struct task_struct *p, long nice);
+extern int task_prio(const struct task_struct *p);
+/**
+ * task_nice - return the nice value of a given task.
+ * @p: the task in question.
+ *
+ * Return: The nice value [ -20 ... 0 ... 19 ].
+ */
+static inline int task_nice(const struct task_struct *p)
+{
+ return PRIO_TO_NICE((p)->static_prio);
+}
+extern int can_nice(const struct task_struct *p, const int nice);
+extern int task_curr(const struct task_struct *p);
+extern int idle_cpu(int cpu);
+extern int sched_setscheduler(struct task_struct *, int,
+ const struct sched_param *);
+extern int sched_setscheduler_nocheck(struct task_struct *, int,
+ const struct sched_param *);
+extern int sched_setattr(struct task_struct *,
+ const struct sched_attr *);
+extern struct task_struct *idle_task(int cpu);
+/**
+ * is_idle_task - is the specified task an idle task?
+ * @p: the task in question.
+ *
+ * Return: 1 if @p is an idle task. 0 otherwise.
+ */
+static inline bool is_idle_task(const struct task_struct *p)
+{
+ return p->pid == 0;
+}
+extern struct task_struct *curr_task(int cpu);
+extern void set_curr_task(int cpu, struct task_struct *p);
+
+void yield(void);
+
+union thread_union {
+ struct thread_info thread_info;
+ unsigned long stack[THREAD_SIZE/sizeof(long)];
+};
+
+#ifndef __HAVE_ARCH_KSTACK_END
+static inline int kstack_end(void *addr)
+{
+ /* Reliable end of stack detection:
+ * Some APM bios versions misalign the stack
+ */
+ return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
+}
+#endif
+
+extern union thread_union init_thread_union;
+extern struct task_struct init_task;
+
+extern struct mm_struct init_mm;
+
+extern struct pid_namespace init_pid_ns;
+
+/*
+ * find a task by one of its numerical ids
+ *
+ * find_task_by_pid_ns():
+ * finds a task by its pid in the specified namespace
+ * find_task_by_vpid():
+ * finds a task by its virtual pid
+ *
+ * see also find_vpid() etc in include/linux/pid.h
+ */
+
+extern struct task_struct *find_task_by_vpid(pid_t nr);
+extern struct task_struct *find_task_by_pid_ns(pid_t nr,
+ struct pid_namespace *ns);
+
+/* per-UID process charging. */
+extern struct user_struct * alloc_uid(kuid_t);
+static inline struct user_struct *get_uid(struct user_struct *u)
+{
+ atomic_inc(&u->__count);
+ return u;
+}
+extern void free_uid(struct user_struct *);
+
+#include <asm/current.h>
+
+extern void xtime_update(unsigned long ticks);
+
+extern int wake_up_state(struct task_struct *tsk, unsigned int state);
+extern int wake_up_process(struct task_struct *tsk);
+extern void wake_up_new_task(struct task_struct *tsk);
+#ifdef CONFIG_SMP
+ extern void kick_process(struct task_struct *tsk);
+#else
+ static inline void kick_process(struct task_struct *tsk) { }
+#endif
+extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
+extern void sched_dead(struct task_struct *p);
+
+extern void proc_caches_init(void);
+extern void flush_signals(struct task_struct *);
+extern void __flush_signals(struct task_struct *);
+extern void ignore_signals(struct task_struct *);
+extern void flush_signal_handlers(struct task_struct *, int force_default);
+extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
+
+static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&tsk->sighand->siglock, flags);
+ ret = dequeue_signal(tsk, mask, info);
+ spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
+
+ return ret;
+}
+
+extern void block_all_signals(int (*notifier)(void *priv), void *priv,
+ sigset_t *mask);
+extern void unblock_all_signals(void);
+extern void release_task(struct task_struct * p);
+extern int send_sig_info(int, struct siginfo *, struct task_struct *);
+extern int force_sigsegv(int, struct task_struct *);
+extern int force_sig_info(int, struct siginfo *, struct task_struct *);
+extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
+extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
+extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
+ const struct cred *, u32);
+extern int kill_pgrp(struct pid *pid, int sig, int priv);
+extern int kill_pid(struct pid *pid, int sig, int priv);
+extern int kill_proc_info(int, struct siginfo *, pid_t);
+extern __must_check bool do_notify_parent(struct task_struct *, int);
+extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
+extern void force_sig(int, struct task_struct *);
+extern int send_sig(int, struct task_struct *, int);
+extern int zap_other_threads(struct task_struct *p);
+extern struct sigqueue *sigqueue_alloc(void);
+extern void sigqueue_free(struct sigqueue *);
+extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
+extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
+
+static inline void restore_saved_sigmask(void)
+{
+ if (test_and_clear_restore_sigmask())
+ __set_current_blocked(&current->saved_sigmask);
+}
+
+static inline sigset_t *sigmask_to_save(void)
+{
+ sigset_t *res = &current->blocked;
+ if (unlikely(test_restore_sigmask()))
+ res = &current->saved_sigmask;
+ return res;
+}
+
+static inline int kill_cad_pid(int sig, int priv)
+{
+ return kill_pid(cad_pid, sig, priv);
+}
+
+/* These can be the second arg to send_sig_info/send_group_sig_info. */
+#define SEND_SIG_NOINFO ((struct siginfo *) 0)
+#define SEND_SIG_PRIV ((struct siginfo *) 1)
+#define SEND_SIG_FORCED ((struct siginfo *) 2)
+
+/*
+ * True if we are on the alternate signal stack.
+ */
+static inline int on_sig_stack(unsigned long sp)
+{
+#ifdef CONFIG_STACK_GROWSUP
+ return sp >= current->sas_ss_sp &&
+ sp - current->sas_ss_sp < current->sas_ss_size;
+#else
+ return sp > current->sas_ss_sp &&
+ sp - current->sas_ss_sp <= current->sas_ss_size;
+#endif
+}
+
+static inline int sas_ss_flags(unsigned long sp)
+{
+ if (!current->sas_ss_size)
+ return SS_DISABLE;
+
+ return on_sig_stack(sp) ? SS_ONSTACK : 0;
+}
+
+static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
+{
+ if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
+#ifdef CONFIG_STACK_GROWSUP
+ return current->sas_ss_sp;
+#else
+ return current->sas_ss_sp + current->sas_ss_size;
+#endif
+ return sp;
+}
+
+/*
+ * Routines for handling mm_structs
+ */
+extern struct mm_struct * mm_alloc(void);
+
+/* mmdrop drops the mm and the page tables */
+extern void __mmdrop(struct mm_struct *);
+static inline void mmdrop(struct mm_struct * mm)
+{
+ if (unlikely(atomic_dec_and_test(&mm->mm_count)))
+ __mmdrop(mm);
+}
+
+/* mmput gets rid of the mappings and all user-space */
+extern void mmput(struct mm_struct *);
+/* Grab a reference to a task's mm, if it is not already going away */
+extern struct mm_struct *get_task_mm(struct task_struct *task);
+/*
+ * Grab a reference to a task's mm, if it is not already going away
+ * and ptrace_may_access with the mode parameter passed to it
+ * succeeds.
+ */
+extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
+/* Remove the current tasks stale references to the old mm_struct */
+extern void mm_release(struct task_struct *, struct mm_struct *);
+
+extern int copy_thread(unsigned long, unsigned long, unsigned long,
+ struct task_struct *);
+extern void flush_thread(void);
+extern void exit_thread(void);
+
+extern void exit_files(struct task_struct *);
+extern void __cleanup_sighand(struct sighand_struct *);
+
+extern void exit_itimers(struct signal_struct *);
+extern void flush_itimer_signals(void);
+
+extern void do_group_exit(int);
+
+extern int do_execve(struct filename *,
+ const char __user * const __user *,
+ const char __user * const __user *);
+extern int do_execveat(int, struct filename *,
+ const char __user * const __user *,
+ const char __user * const __user *,
+ int);
+extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
+struct task_struct *fork_idle(int);
+extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+
+extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
+static inline void set_task_comm(struct task_struct *tsk, const char *from)
+{
+ __set_task_comm(tsk, from, false);
+}
+extern char *get_task_comm(char *to, struct task_struct *tsk);
+
+#ifdef CONFIG_SMP
+void scheduler_ipi(void);
+extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
+#else
+static inline void scheduler_ipi(void) { }
+static inline unsigned long wait_task_inactive(struct task_struct *p,
+ long match_state)
+{
+ return 1;
+}
+#endif
+
+#define next_task(p) \
+ list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
+
+#define for_each_process(p) \
+ for (p = &init_task ; (p = next_task(p)) != &init_task ; )
+
+extern bool current_is_single_threaded(void);
+
+/*
+ * Careful: do_each_thread/while_each_thread is a double loop so
+ * 'break' will not work as expected - use goto instead.
+ */
+#define do_each_thread(g, t) \
+ for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
+
+#define while_each_thread(g, t) \
+ while ((t = next_thread(t)) != g)
+
+#define __for_each_thread(signal, t) \
+ list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
+
+#define for_each_thread(p, t) \
+ __for_each_thread((p)->signal, t)
+
+/* Careful: this is a double loop, 'break' won't work as expected. */
+#define for_each_process_thread(p, t) \
+ for_each_process(p) for_each_thread(p, t)
+
+static inline int get_nr_threads(struct task_struct *tsk)
+{
+ return tsk->signal->nr_threads;
+}
+
+static inline bool thread_group_leader(struct task_struct *p)
+{
+ return p->exit_signal >= 0;
+}
+
+/* Do to the insanities of de_thread it is possible for a process
+ * to have the pid of the thread group leader without actually being
+ * the thread group leader. For iteration through the pids in proc
+ * all we care about is that we have a task with the appropriate
+ * pid, we don't actually care if we have the right task.
+ */
+static inline bool has_group_leader_pid(struct task_struct *p)
+{
+ return task_pid(p) == p->signal->leader_pid;
+}
+
+static inline
+bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
+{
+ return p1->signal == p2->signal;
+}
+
+static inline struct task_struct *next_thread(const struct task_struct *p)
+{
+ return list_entry_rcu(p->thread_group.next,
+ struct task_struct, thread_group);
+}
+
+static inline int thread_group_empty(struct task_struct *p)
+{
+ return list_empty(&p->thread_group);
+}
+
+#define delay_group_leader(p) \
+ (thread_group_leader(p) && !thread_group_empty(p))
+
+/*
+ * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
+ * subscriptions and synchronises with wait4(). Also used in procfs. Also
+ * pins the final release of task.io_context. Also protects ->cpuset and
+ * ->cgroup.subsys[]. And ->vfork_done.
+ *
+ * Nests both inside and outside of read_lock(&tasklist_lock).
+ * It must not be nested with write_lock_irq(&tasklist_lock),
+ * neither inside nor outside.
+ */
+static inline void task_lock(struct task_struct *p)
+{
+ spin_lock(&p->alloc_lock);
+}
+
+static inline void task_unlock(struct task_struct *p)
+{
+ spin_unlock(&p->alloc_lock);
+}
+
+extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
+ unsigned long *flags);
+
+static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
+ unsigned long *flags)
+{
+ struct sighand_struct *ret;
+
+ ret = __lock_task_sighand(tsk, flags);
+ (void)__cond_lock(&tsk->sighand->siglock, ret);
+ return ret;
+}
+
+static inline void unlock_task_sighand(struct task_struct *tsk,
+ unsigned long *flags)
+{
+ spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
+}
+
+#ifdef CONFIG_CGROUPS
+static inline void threadgroup_change_begin(struct task_struct *tsk)
+{
+ down_read(&tsk->signal->group_rwsem);
+}
+static inline void threadgroup_change_end(struct task_struct *tsk)
+{
+ up_read(&tsk->signal->group_rwsem);
+}
+
+/**
+ * threadgroup_lock - lock threadgroup
+ * @tsk: member task of the threadgroup to lock
+ *
+ * Lock the threadgroup @tsk belongs to. No new task is allowed to enter
+ * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
+ * change ->group_leader/pid. This is useful for cases where the threadgroup
+ * needs to stay stable across blockable operations.
+ *
+ * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
+ * synchronization. While held, no new task will be added to threadgroup
+ * and no existing live task will have its PF_EXITING set.
+ *
+ * de_thread() does threadgroup_change_{begin|end}() when a non-leader
+ * sub-thread becomes a new leader.
+ */
+static inline void threadgroup_lock(struct task_struct *tsk)
+{
+ down_write(&tsk->signal->group_rwsem);
+}
+
+/**
+ * threadgroup_unlock - unlock threadgroup
+ * @tsk: member task of the threadgroup to unlock
+ *
+ * Reverse threadgroup_lock().
+ */
+static inline void threadgroup_unlock(struct task_struct *tsk)
+{
+ up_write(&tsk->signal->group_rwsem);
+}
+#else
+static inline void threadgroup_change_begin(struct task_struct *tsk) {}
+static inline void threadgroup_change_end(struct task_struct *tsk) {}
+static inline void threadgroup_lock(struct task_struct *tsk) {}
+static inline void threadgroup_unlock(struct task_struct *tsk) {}
+#endif
+
+#ifndef __HAVE_THREAD_FUNCTIONS
+
+#define task_thread_info(task) ((struct thread_info *)(task)->stack)
+#define task_stack_page(task) ((task)->stack)
+
+static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
+{
+ *task_thread_info(p) = *task_thread_info(org);
+ task_thread_info(p)->task = p;
+}
+
+/*
+ * Return the address of the last usable long on the stack.
+ *
+ * When the stack grows down, this is just above the thread
+ * info struct. Going any lower will corrupt the threadinfo.
+ *
+ * When the stack grows up, this is the highest address.
+ * Beyond that position, we corrupt data on the next page.
+ */
+static inline unsigned long *end_of_stack(struct task_struct *p)
+{
+#ifdef CONFIG_STACK_GROWSUP
+ return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
+#else
+ return (unsigned long *)(task_thread_info(p) + 1);
+#endif
+}
+
+#endif
+#define task_stack_end_corrupted(task) \
+ (*(end_of_stack(task)) != STACK_END_MAGIC)
+
+static inline int object_is_on_stack(void *obj)
+{
+ void *stack = task_stack_page(current);
+
+ return (obj >= stack) && (obj < (stack + THREAD_SIZE));
+}
+
+extern void thread_info_cache_init(void);
+
+#ifdef CONFIG_DEBUG_STACK_USAGE
+static inline unsigned long stack_not_used(struct task_struct *p)
+{
+ unsigned long *n = end_of_stack(p);
+
+ do { /* Skip over canary */
+ n++;
+ } while (!*n);
+
+ return (unsigned long)n - (unsigned long)end_of_stack(p);
+}
+#endif
+extern void set_task_stack_end_magic(struct task_struct *tsk);
+
+/* set thread flags in other task's structures
+ * - see asm/thread_info.h for TIF_xxxx flags available
+ */
+static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
+{
+ set_ti_thread_flag(task_thread_info(tsk), flag);
+}
+
+static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
+{
+ clear_ti_thread_flag(task_thread_info(tsk), flag);
+}
+
+static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
+{
+ return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
+}
+
+static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
+{
+ return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
+}
+
+static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
+{
+ return test_ti_thread_flag(task_thread_info(tsk), flag);
+}
+
+static inline void set_tsk_need_resched(struct task_struct *tsk)
+{
+ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
+}
+
+static inline void clear_tsk_need_resched(struct task_struct *tsk)
+{
+ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
+}
+
+static inline int test_tsk_need_resched(struct task_struct *tsk)
+{
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
+}
+
+static inline int restart_syscall(void)
+{
+ set_tsk_thread_flag(current, TIF_SIGPENDING);
+ return -ERESTARTNOINTR;
+}
+
+static inline int signal_pending(struct task_struct *p)
+{
+ return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
+}
+
+static inline int __fatal_signal_pending(struct task_struct *p)
+{
+ return unlikely(sigismember(&p->pending.signal, SIGKILL));
+}
+
+static inline int fatal_signal_pending(struct task_struct *p)
+{
+ return signal_pending(p) && __fatal_signal_pending(p);
+}
+
+static inline int signal_pending_state(long state, struct task_struct *p)
+{
+ if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
+ return 0;
+ if (!signal_pending(p))
+ return 0;
+
+ return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
+}
+
+/*
+ * cond_resched() and cond_resched_lock(): latency reduction via
+ * explicit rescheduling in places that are safe. The return
+ * value indicates whether a reschedule was done in fact.
+ * cond_resched_lock() will drop the spinlock before scheduling,
+ * cond_resched_softirq() will enable bhs before scheduling.
+ */
+extern int _cond_resched(void);
+
+#define cond_resched() ({ \
+ ___might_sleep(__FILE__, __LINE__, 0); \
+ _cond_resched(); \
+})
+
+extern int __cond_resched_lock(spinlock_t *lock);
+
+#ifdef CONFIG_PREEMPT_COUNT
+#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
+#else
+#define PREEMPT_LOCK_OFFSET 0
+#endif
+
+#define cond_resched_lock(lock) ({ \
+ ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
+ __cond_resched_lock(lock); \
+})
+
+extern int __cond_resched_softirq(void);
+
+#define cond_resched_softirq() ({ \
+ ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
+ __cond_resched_softirq(); \
+})
+
+static inline void cond_resched_rcu(void)
+{
+#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
+ rcu_read_unlock();
+ cond_resched();
+ rcu_read_lock();
+#endif
+}
+
+/*
+ * Does a critical section need to be broken due to another
+ * task waiting?: (technically does not depend on CONFIG_PREEMPT,
+ * but a general need for low latency)
+ */
+static inline int spin_needbreak(spinlock_t *lock)
+{
+#ifdef CONFIG_PREEMPT
+ return spin_is_contended(lock);
+#else
+ return 0;
+#endif
+}
+
+/*
+ * Idle thread specific functions to determine the need_resched
+ * polling state.
+ */
+#ifdef TIF_POLLING_NRFLAG
+static inline int tsk_is_polling(struct task_struct *p)
+{
+ return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
+}
+
+static inline void __current_set_polling(void)
+{
+ set_thread_flag(TIF_POLLING_NRFLAG);
+}
+
+static inline bool __must_check current_set_polling_and_test(void)
+{
+ __current_set_polling();
+
+ /*
+ * Polling state must be visible before we test NEED_RESCHED,
+ * paired by resched_curr()
+ */
+ smp_mb__after_atomic();
+
+ return unlikely(tif_need_resched());
+}
+
+static inline void __current_clr_polling(void)
+{
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+}
+
+static inline bool __must_check current_clr_polling_and_test(void)
+{
+ __current_clr_polling();
+
+ /*
+ * Polling state must be visible before we test NEED_RESCHED,
+ * paired by resched_curr()
+ */
+ smp_mb__after_atomic();
+
+ return unlikely(tif_need_resched());
+}
+
+#else
+static inline int tsk_is_polling(struct task_struct *p) { return 0; }
+static inline void __current_set_polling(void) { }
+static inline void __current_clr_polling(void) { }
+
+static inline bool __must_check current_set_polling_and_test(void)
+{
+ return unlikely(tif_need_resched());
+}
+static inline bool __must_check current_clr_polling_and_test(void)
+{
+ return unlikely(tif_need_resched());
+}
+#endif
+
+static inline void current_clr_polling(void)
+{
+ __current_clr_polling();
+
+ /*
+ * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
+ * Once the bit is cleared, we'll get IPIs with every new
+ * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
+ * fold.
+ */
+ smp_mb(); /* paired with resched_curr() */
+
+ preempt_fold_need_resched();
+}
+
+static __always_inline bool need_resched(void)
+{
+ return unlikely(tif_need_resched());
+}
+
+/*
+ * Thread group CPU time accounting.
+ */
+void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
+void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
+
+static inline void thread_group_cputime_init(struct signal_struct *sig)
+{
+ raw_spin_lock_init(&sig->cputimer.lock);
+}
+
+/*
+ * Reevaluate whether the task has signals pending delivery.
+ * Wake the task if so.
+ * This is required every time the blocked sigset_t changes.
+ * callers must hold sighand->siglock.
+ */
+extern void recalc_sigpending_and_wake(struct task_struct *t);
+extern void recalc_sigpending(void);
+
+extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
+
+static inline void signal_wake_up(struct task_struct *t, bool resume)
+{
+ signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
+}
+static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
+{
+ signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
+}
+
+/*
+ * Wrappers for p->thread_info->cpu access. No-op on UP.
+ */
+#ifdef CONFIG_SMP
+
+static inline unsigned int task_cpu(const struct task_struct *p)
+{
+ return task_thread_info(p)->cpu;
+}
+
+static inline int task_node(const struct task_struct *p)
+{
+ return cpu_to_node(task_cpu(p));
+}
+
+extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
+
+#else
+
+static inline unsigned int task_cpu(const struct task_struct *p)
+{
+ return 0;
+}
+
+static inline void set_task_cpu(struct task_struct *p, int cpu)
+{
+}
+
+#endif /* CONFIG_SMP */
+
+extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
+extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
+
+#ifdef CONFIG_CGROUP_SCHED
+extern struct task_group root_task_group;
+#endif /* CONFIG_CGROUP_SCHED */
+
+extern int task_can_switch_user(struct user_struct *up,
+ struct task_struct *tsk);
+
+#ifdef CONFIG_TASK_XACCT
+static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
+{
+ tsk->ioac.rchar += amt;
+}
+
+static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
+{
+ tsk->ioac.wchar += amt;
+}
+
+static inline void inc_syscr(struct task_struct *tsk)
+{
+ tsk->ioac.syscr++;
+}
+
+static inline void inc_syscw(struct task_struct *tsk)
+{
+ tsk->ioac.syscw++;
+}
+#else
+static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
+{
+}
+
+static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
+{
+}
+
+static inline void inc_syscr(struct task_struct *tsk)
+{
+}
+
+static inline void inc_syscw(struct task_struct *tsk)
+{
+}
+#endif
+
+#ifndef TASK_SIZE_OF
+#define TASK_SIZE_OF(tsk) TASK_SIZE
+#endif
+
+#ifdef CONFIG_MEMCG
+extern void mm_update_next_owner(struct mm_struct *mm);
+#else
+static inline void mm_update_next_owner(struct mm_struct *mm)
+{
+}
+#endif /* CONFIG_MEMCG */
+
+static inline unsigned long task_rlimit(const struct task_struct *tsk,
+ unsigned int limit)
+{
+ return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
+}
+
+static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
+ unsigned int limit)
+{
+ return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
+}
+
+static inline unsigned long rlimit(unsigned int limit)
+{
+ return task_rlimit(current, limit);
+}
+
+static inline unsigned long rlimit_max(unsigned int limit)
+{
+ return task_rlimit_max(current, limit);
+}
+
+#endif
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
new file mode 100644
index 000000000..9d303b884
--- /dev/null
+++ b/include/linux/sched/deadline.h
@@ -0,0 +1,24 @@
+#ifndef _SCHED_DEADLINE_H
+#define _SCHED_DEADLINE_H
+
+/*
+ * SCHED_DEADLINE tasks has negative priorities, reflecting
+ * the fact that any of them has higher prio than RT and
+ * NORMAL/BATCH tasks.
+ */
+
+#define MAX_DL_PRIO 0
+
+static inline int dl_prio(int prio)
+{
+ if (unlikely(prio < MAX_DL_PRIO))
+ return 1;
+ return 0;
+}
+
+static inline int dl_task(struct task_struct *p)
+{
+ return dl_prio(p->prio);
+}
+
+#endif /* _SCHED_DEADLINE_H */
diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
new file mode 100644
index 000000000..7d5d0b861
--- /dev/null
+++ b/include/linux/sched/prio.h
@@ -0,0 +1,72 @@
+#ifndef _SCHED_PRIO_H
+#define _SCHED_PRIO_H
+
+#define MAX_NICE 19
+#define MIN_NICE -20
+#define NICE_WIDTH (MAX_NICE - MIN_NICE + 1)
+
+/*
+ * Priority of a process goes from 0..MAX_PRIO-1, valid RT
+ * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
+ * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
+ * values are inverted: lower p->prio value means higher priority.
+ *
+ * The MAX_USER_RT_PRIO value allows the actual maximum
+ * RT priority to be separate from the value exported to
+ * user-space. This allows kernel threads to set their
+ * priority to a value higher than any user task. Note:
+ * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
+ */
+
+#define MAX_USER_RT_PRIO 100
+
+#ifdef CONFIG_SCHED_BFS
+/* Note different MAX_RT_PRIO */
+#define MAX_RT_PRIO (MAX_USER_RT_PRIO + 1)
+
+#define ISO_PRIO (MAX_RT_PRIO)
+#define NORMAL_PRIO (MAX_RT_PRIO + 1)
+#define IDLE_PRIO (MAX_RT_PRIO + 2)
+#define PRIO_LIMIT ((IDLE_PRIO) + 1)
+#else /* CONFIG_SCHED_BFS */
+#define MAX_RT_PRIO MAX_USER_RT_PRIO
+
+#endif /* CONFIG_SCHED_BFS */
+
+#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
+#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
+
+/*
+ * Convert user-nice values [ -20 ... 0 ... 19 ]
+ * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+ * and back.
+ */
+#define NICE_TO_PRIO(nice) ((nice) + DEFAULT_PRIO)
+#define PRIO_TO_NICE(prio) ((prio) - DEFAULT_PRIO)
+
+/*
+ * 'User priority' is the nice value converted to something we
+ * can work with better when scaling various scheduler parameters,
+ * it's a [ 0 ... 39 ] range.
+ */
+#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
+#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
+#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
+
+/*
+ * Convert nice value [19,-20] to rlimit style value [1,40].
+ */
+static inline long nice_to_rlimit(long nice)
+{
+ return (MAX_NICE - nice + 1);
+}
+
+/*
+ * Convert rlimit style value [1,40] to nice value [-20, 19].
+ */
+static inline long rlimit_to_nice(long prio)
+{
+ return (MAX_NICE - prio + 1);
+}
+
+#endif /* _SCHED_PRIO_H */
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
new file mode 100644
index 000000000..a30b172df
--- /dev/null
+++ b/include/linux/sched/rt.h
@@ -0,0 +1,60 @@
+#ifndef _SCHED_RT_H
+#define _SCHED_RT_H
+
+#include <linux/sched/prio.h>
+
+static inline int rt_prio(int prio)
+{
+ if (unlikely(prio < MAX_RT_PRIO))
+ return 1;
+ return 0;
+}
+
+static inline int rt_task(struct task_struct *p)
+{
+ return rt_prio(p->prio);
+}
+
+#ifdef CONFIG_RT_MUTEXES
+extern int rt_mutex_getprio(struct task_struct *p);
+extern void rt_mutex_setprio(struct task_struct *p, int prio);
+extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
+extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
+extern void rt_mutex_adjust_pi(struct task_struct *p);
+static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+{
+ return tsk->pi_blocked_on != NULL;
+}
+#else
+static inline int rt_mutex_getprio(struct task_struct *p)
+{
+ return p->normal_prio;
+}
+
+static inline int rt_mutex_get_effective_prio(struct task_struct *task,
+ int newprio)
+{
+ return newprio;
+}
+
+static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
+{
+ return NULL;
+}
+# define rt_mutex_adjust_pi(p) do { } while (0)
+static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+{
+ return false;
+}
+#endif
+
+extern void normalize_rt_tasks(void);
+
+
+/*
+ * default timeslice is 100 msecs (used only for SCHED_RR tasks).
+ * Timeslices get refilled after they expire.
+ */
+#define RR_TIMESLICE (100 * HZ / 1000)
+
+#endif /* _SCHED_RT_H */
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
new file mode 100644
index 000000000..596a0e007
--- /dev/null
+++ b/include/linux/sched/sysctl.h
@@ -0,0 +1,110 @@
+#ifndef _SCHED_SYSCTL_H
+#define _SCHED_SYSCTL_H
+
+#ifdef CONFIG_DETECT_HUNG_TASK
+extern int sysctl_hung_task_check_count;
+extern unsigned int sysctl_hung_task_panic;
+extern unsigned long sysctl_hung_task_timeout_secs;
+extern int sysctl_hung_task_warnings;
+extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos);
+#else
+/* Avoid need for ifdefs elsewhere in the code */
+enum { sysctl_hung_task_timeout_secs = 0 };
+#endif
+
+/*
+ * Default maximum number of active map areas, this limits the number of vmas
+ * per mm struct. Users can overwrite this number by sysctl but there is a
+ * problem.
+ *
+ * When a program's coredump is generated as ELF format, a section is created
+ * per a vma. In ELF, the number of sections is represented in unsigned short.
+ * This means the number of sections should be smaller than 65535 at coredump.
+ * Because the kernel adds some informative sections to a image of program at
+ * generating coredump, we need some margin. The number of extra sections is
+ * 1-3 now and depends on arch. We use "5" as safe margin, here.
+ *
+ * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
+ * not a hard limit any more. Although some userspace tools can be surprised by
+ * that.
+ */
+#define MAPCOUNT_ELF_CORE_MARGIN (5)
+#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
+
+extern int sysctl_max_map_count;
+
+extern unsigned int sysctl_sched_latency;
+extern unsigned int sysctl_sched_min_granularity;
+extern unsigned int sysctl_sched_wakeup_granularity;
+extern unsigned int sysctl_sched_child_runs_first;
+
+enum sched_tunable_scaling {
+ SCHED_TUNABLESCALING_NONE,
+ SCHED_TUNABLESCALING_LOG,
+ SCHED_TUNABLESCALING_LINEAR,
+ SCHED_TUNABLESCALING_END,
+};
+extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
+
+extern unsigned int sysctl_numa_balancing_scan_delay;
+extern unsigned int sysctl_numa_balancing_scan_period_min;
+extern unsigned int sysctl_numa_balancing_scan_period_max;
+extern unsigned int sysctl_numa_balancing_scan_size;
+
+#ifdef CONFIG_SCHED_DEBUG
+extern unsigned int sysctl_sched_migration_cost;
+extern unsigned int sysctl_sched_nr_migrate;
+extern unsigned int sysctl_sched_time_avg;
+extern unsigned int sysctl_timer_migration;
+extern unsigned int sysctl_sched_shares_window;
+
+int sched_proc_update_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length,
+ loff_t *ppos);
+#endif
+#ifdef CONFIG_SCHED_DEBUG
+static inline unsigned int get_sysctl_timer_migration(void)
+{
+ return sysctl_timer_migration;
+}
+#else
+static inline unsigned int get_sysctl_timer_migration(void)
+{
+ return 1;
+}
+#endif
+
+/*
+ * control realtime throttling:
+ *
+ * /proc/sys/kernel/sched_rt_period_us
+ * /proc/sys/kernel/sched_rt_runtime_us
+ */
+extern unsigned int sysctl_sched_rt_period;
+extern int sysctl_sched_rt_runtime;
+
+#ifdef CONFIG_CFS_BANDWIDTH
+extern unsigned int sysctl_sched_cfs_bandwidth_slice;
+#endif
+
+#ifdef CONFIG_SCHED_AUTOGROUP
+extern unsigned int sysctl_sched_autogroup_enabled;
+#endif
+
+extern int sched_rr_timeslice;
+
+extern int sched_rr_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
+extern int sched_rt_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
+extern int sysctl_numa_balancing(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
+#endif /* _SCHED_SYSCTL_H */
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
new file mode 100644
index 000000000..efa931c5c
--- /dev/null
+++ b/include/linux/sched_clock.h
@@ -0,0 +1,20 @@
+/*
+ * sched_clock.h: support for extending counters to full 64-bit ns counter
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef LINUX_SCHED_CLOCK
+#define LINUX_SCHED_CLOCK
+
+#ifdef CONFIG_GENERIC_SCHED_CLOCK
+extern void sched_clock_postinit(void);
+#else
+static inline void sched_clock_postinit(void) { }
+#endif
+
+extern void sched_clock_register(u64 (*read)(void), int bits,
+ unsigned long rate);
+
+#endif
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
new file mode 100644
index 000000000..f0f8bad54
--- /dev/null
+++ b/include/linux/screen_info.h
@@ -0,0 +1,8 @@
+#ifndef _SCREEN_INFO_H
+#define _SCREEN_INFO_H
+
+#include <uapi/linux/screen_info.h>
+
+extern struct screen_info screen_info;
+
+#endif /* _SCREEN_INFO_H */
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
new file mode 100644
index 000000000..a9414fd49
--- /dev/null
+++ b/include/linux/sctp.h
@@ -0,0 +1,708 @@
+/* SCTP kernel reference Implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ * Copyright (c) 2001 Nokia, Inc.
+ * Copyright (c) 2001 La Monte H.P. Yarroll
+ *
+ * This file is part of the SCTP kernel reference Implementation
+ *
+ * Various protocol defined structures.
+ *
+ * This SCTP implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This SCTP implementation is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ * ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING. If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Or submit a bug report through the following website:
+ * http://www.sf.net/projects/lksctp
+ *
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Jon Grimm <jgrimm@us.ibm.com>
+ * Xingang Guo <xingang.guo@intel.com>
+ * randall@sctp.chicago.il.us
+ * kmorneau@cisco.com
+ * qxie1@email.mot.com
+ * Sridhar Samudrala <sri@us.ibm.com>
+ * Kevin Gao <kevin.gao@intel.com>
+ *
+ * Any bugs reported given to us we will try to fix... any fixes shared will
+ * be incorporated into the next SCTP release.
+ */
+#ifndef __LINUX_SCTP_H__
+#define __LINUX_SCTP_H__
+
+#include <linux/in.h> /* We need in_addr. */
+#include <linux/in6.h> /* We need in6_addr. */
+#include <linux/skbuff.h>
+
+#include <uapi/linux/sctp.h>
+
+/* Section 3.1. SCTP Common Header Format */
+typedef struct sctphdr {
+ __be16 source;
+ __be16 dest;
+ __be32 vtag;
+ __le32 checksum;
+} __packed sctp_sctphdr_t;
+
+static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb)
+{
+ return (struct sctphdr *)skb_transport_header(skb);
+}
+
+/* Section 3.2. Chunk Field Descriptions. */
+typedef struct sctp_chunkhdr {
+ __u8 type;
+ __u8 flags;
+ __be16 length;
+} __packed sctp_chunkhdr_t;
+
+
+/* Section 3.2. Chunk Type Values.
+ * [Chunk Type] identifies the type of information contained in the Chunk
+ * Value field. It takes a value from 0 to 254. The value of 255 is
+ * reserved for future use as an extension field.
+ */
+typedef enum {
+ SCTP_CID_DATA = 0,
+ SCTP_CID_INIT = 1,
+ SCTP_CID_INIT_ACK = 2,
+ SCTP_CID_SACK = 3,
+ SCTP_CID_HEARTBEAT = 4,
+ SCTP_CID_HEARTBEAT_ACK = 5,
+ SCTP_CID_ABORT = 6,
+ SCTP_CID_SHUTDOWN = 7,
+ SCTP_CID_SHUTDOWN_ACK = 8,
+ SCTP_CID_ERROR = 9,
+ SCTP_CID_COOKIE_ECHO = 10,
+ SCTP_CID_COOKIE_ACK = 11,
+ SCTP_CID_ECN_ECNE = 12,
+ SCTP_CID_ECN_CWR = 13,
+ SCTP_CID_SHUTDOWN_COMPLETE = 14,
+
+ /* AUTH Extension Section 4.1 */
+ SCTP_CID_AUTH = 0x0F,
+
+ /* PR-SCTP Sec 3.2 */
+ SCTP_CID_FWD_TSN = 0xC0,
+
+ /* Use hex, as defined in ADDIP sec. 3.1 */
+ SCTP_CID_ASCONF = 0xC1,
+ SCTP_CID_ASCONF_ACK = 0x80,
+} sctp_cid_t; /* enum */
+
+
+/* Section 3.2
+ * Chunk Types are encoded such that the highest-order two bits specify
+ * the action that must be taken if the processing endpoint does not
+ * recognize the Chunk Type.
+ */
+typedef enum {
+ SCTP_CID_ACTION_DISCARD = 0x00,
+ SCTP_CID_ACTION_DISCARD_ERR = 0x40,
+ SCTP_CID_ACTION_SKIP = 0x80,
+ SCTP_CID_ACTION_SKIP_ERR = 0xc0,
+} sctp_cid_action_t;
+
+enum { SCTP_CID_ACTION_MASK = 0xc0, };
+
+/* This flag is used in Chunk Flags for ABORT and SHUTDOWN COMPLETE.
+ *
+ * 3.3.7 Abort Association (ABORT) (6):
+ * The T bit is set to 0 if the sender had a TCB that it destroyed.
+ * If the sender did not have a TCB it should set this bit to 1.
+ */
+enum { SCTP_CHUNK_FLAG_T = 0x01 };
+
+/*
+ * Set the T bit
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Type = 14 |Reserved |T| Length = 4 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Chunk Flags: 8 bits
+ *
+ * Reserved: 7 bits
+ * Set to 0 on transmit and ignored on receipt.
+ *
+ * T bit: 1 bit
+ * The T bit is set to 0 if the sender had a TCB that it destroyed. If
+ * the sender did NOT have a TCB it should set this bit to 1.
+ *
+ * Note: Special rules apply to this chunk for verification, please
+ * see Section 8.5.1 for details.
+ */
+
+#define sctp_test_T_bit(c) ((c)->chunk_hdr->flags & SCTP_CHUNK_FLAG_T)
+
+/* RFC 2960
+ * Section 3.2.1 Optional/Variable-length Parmaeter Format.
+ */
+
+typedef struct sctp_paramhdr {
+ __be16 type;
+ __be16 length;
+} __packed sctp_paramhdr_t;
+
+typedef enum {
+
+ /* RFC 2960 Section 3.3.5 */
+ SCTP_PARAM_HEARTBEAT_INFO = cpu_to_be16(1),
+ /* RFC 2960 Section 3.3.2.1 */
+ SCTP_PARAM_IPV4_ADDRESS = cpu_to_be16(5),
+ SCTP_PARAM_IPV6_ADDRESS = cpu_to_be16(6),
+ SCTP_PARAM_STATE_COOKIE = cpu_to_be16(7),
+ SCTP_PARAM_UNRECOGNIZED_PARAMETERS = cpu_to_be16(8),
+ SCTP_PARAM_COOKIE_PRESERVATIVE = cpu_to_be16(9),
+ SCTP_PARAM_HOST_NAME_ADDRESS = cpu_to_be16(11),
+ SCTP_PARAM_SUPPORTED_ADDRESS_TYPES = cpu_to_be16(12),
+ SCTP_PARAM_ECN_CAPABLE = cpu_to_be16(0x8000),
+
+ /* AUTH Extension Section 3 */
+ SCTP_PARAM_RANDOM = cpu_to_be16(0x8002),
+ SCTP_PARAM_CHUNKS = cpu_to_be16(0x8003),
+ SCTP_PARAM_HMAC_ALGO = cpu_to_be16(0x8004),
+
+ /* Add-IP: Supported Extensions, Section 4.2 */
+ SCTP_PARAM_SUPPORTED_EXT = cpu_to_be16(0x8008),
+
+ /* PR-SCTP Sec 3.1 */
+ SCTP_PARAM_FWD_TSN_SUPPORT = cpu_to_be16(0xc000),
+
+ /* Add-IP Extension. Section 3.2 */
+ SCTP_PARAM_ADD_IP = cpu_to_be16(0xc001),
+ SCTP_PARAM_DEL_IP = cpu_to_be16(0xc002),
+ SCTP_PARAM_ERR_CAUSE = cpu_to_be16(0xc003),
+ SCTP_PARAM_SET_PRIMARY = cpu_to_be16(0xc004),
+ SCTP_PARAM_SUCCESS_REPORT = cpu_to_be16(0xc005),
+ SCTP_PARAM_ADAPTATION_LAYER_IND = cpu_to_be16(0xc006),
+
+} sctp_param_t; /* enum */
+
+
+/* RFC 2960 Section 3.2.1
+ * The Parameter Types are encoded such that the highest-order two bits
+ * specify the action that must be taken if the processing endpoint does
+ * not recognize the Parameter Type.
+ *
+ */
+typedef enum {
+ SCTP_PARAM_ACTION_DISCARD = cpu_to_be16(0x0000),
+ SCTP_PARAM_ACTION_DISCARD_ERR = cpu_to_be16(0x4000),
+ SCTP_PARAM_ACTION_SKIP = cpu_to_be16(0x8000),
+ SCTP_PARAM_ACTION_SKIP_ERR = cpu_to_be16(0xc000),
+} sctp_param_action_t;
+
+enum { SCTP_PARAM_ACTION_MASK = cpu_to_be16(0xc000), };
+
+/* RFC 2960 Section 3.3.1 Payload Data (DATA) (0) */
+
+typedef struct sctp_datahdr {
+ __be32 tsn;
+ __be16 stream;
+ __be16 ssn;
+ __be32 ppid;
+ __u8 payload[0];
+} __packed sctp_datahdr_t;
+
+typedef struct sctp_data_chunk {
+ sctp_chunkhdr_t chunk_hdr;
+ sctp_datahdr_t data_hdr;
+} __packed sctp_data_chunk_t;
+
+/* DATA Chuck Specific Flags */
+enum {
+ SCTP_DATA_MIDDLE_FRAG = 0x00,
+ SCTP_DATA_LAST_FRAG = 0x01,
+ SCTP_DATA_FIRST_FRAG = 0x02,
+ SCTP_DATA_NOT_FRAG = 0x03,
+ SCTP_DATA_UNORDERED = 0x04,
+ SCTP_DATA_SACK_IMM = 0x08,
+};
+enum { SCTP_DATA_FRAG_MASK = 0x03, };
+
+
+/* RFC 2960 Section 3.3.2 Initiation (INIT) (1)
+ *
+ * This chunk is used to initiate a SCTP association between two
+ * endpoints.
+ */
+typedef struct sctp_inithdr {
+ __be32 init_tag;
+ __be32 a_rwnd;
+ __be16 num_outbound_streams;
+ __be16 num_inbound_streams;
+ __be32 initial_tsn;
+ __u8 params[0];
+} __packed sctp_inithdr_t;
+
+typedef struct sctp_init_chunk {
+ sctp_chunkhdr_t chunk_hdr;
+ sctp_inithdr_t init_hdr;
+} __packed sctp_init_chunk_t;
+
+
+/* Section 3.3.2.1. IPv4 Address Parameter (5) */
+typedef struct sctp_ipv4addr_param {
+ sctp_paramhdr_t param_hdr;
+ struct in_addr addr;
+} __packed sctp_ipv4addr_param_t;
+
+/* Section 3.3.2.1. IPv6 Address Parameter (6) */
+typedef struct sctp_ipv6addr_param {
+ sctp_paramhdr_t param_hdr;
+ struct in6_addr addr;
+} __packed sctp_ipv6addr_param_t;
+
+/* Section 3.3.2.1 Cookie Preservative (9) */
+typedef struct sctp_cookie_preserve_param {
+ sctp_paramhdr_t param_hdr;
+ __be32 lifespan_increment;
+} __packed sctp_cookie_preserve_param_t;
+
+/* Section 3.3.2.1 Host Name Address (11) */
+typedef struct sctp_hostname_param {
+ sctp_paramhdr_t param_hdr;
+ uint8_t hostname[0];
+} __packed sctp_hostname_param_t;
+
+/* Section 3.3.2.1 Supported Address Types (12) */
+typedef struct sctp_supported_addrs_param {
+ sctp_paramhdr_t param_hdr;
+ __be16 types[0];
+} __packed sctp_supported_addrs_param_t;
+
+/* Appendix A. ECN Capable (32768) */
+typedef struct sctp_ecn_capable_param {
+ sctp_paramhdr_t param_hdr;
+} __packed sctp_ecn_capable_param_t;
+
+/* ADDIP Section 3.2.6 Adaptation Layer Indication */
+typedef struct sctp_adaptation_ind_param {
+ struct sctp_paramhdr param_hdr;
+ __be32 adaptation_ind;
+} __packed sctp_adaptation_ind_param_t;
+
+/* ADDIP Section 4.2.7 Supported Extensions Parameter */
+typedef struct sctp_supported_ext_param {
+ struct sctp_paramhdr param_hdr;
+ __u8 chunks[0];
+} __packed sctp_supported_ext_param_t;
+
+/* AUTH Section 3.1 Random */
+typedef struct sctp_random_param {
+ sctp_paramhdr_t param_hdr;
+ __u8 random_val[0];
+} __packed sctp_random_param_t;
+
+/* AUTH Section 3.2 Chunk List */
+typedef struct sctp_chunks_param {
+ sctp_paramhdr_t param_hdr;
+ __u8 chunks[0];
+} __packed sctp_chunks_param_t;
+
+/* AUTH Section 3.3 HMAC Algorithm */
+typedef struct sctp_hmac_algo_param {
+ sctp_paramhdr_t param_hdr;
+ __be16 hmac_ids[0];
+} __packed sctp_hmac_algo_param_t;
+
+/* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2):
+ * The INIT ACK chunk is used to acknowledge the initiation of an SCTP
+ * association.
+ */
+typedef sctp_init_chunk_t sctp_initack_chunk_t;
+
+/* Section 3.3.3.1 State Cookie (7) */
+typedef struct sctp_cookie_param {
+ sctp_paramhdr_t p;
+ __u8 body[0];
+} __packed sctp_cookie_param_t;
+
+/* Section 3.3.3.1 Unrecognized Parameters (8) */
+typedef struct sctp_unrecognized_param {
+ sctp_paramhdr_t param_hdr;
+ sctp_paramhdr_t unrecognized;
+} __packed sctp_unrecognized_param_t;
+
+
+
+/*
+ * 3.3.4 Selective Acknowledgement (SACK) (3):
+ *
+ * This chunk is sent to the peer endpoint to acknowledge received DATA
+ * chunks and to inform the peer endpoint of gaps in the received
+ * subsequences of DATA chunks as represented by their TSNs.
+ */
+
+typedef struct sctp_gap_ack_block {
+ __be16 start;
+ __be16 end;
+} __packed sctp_gap_ack_block_t;
+
+typedef __be32 sctp_dup_tsn_t;
+
+typedef union {
+ sctp_gap_ack_block_t gab;
+ sctp_dup_tsn_t dup;
+} sctp_sack_variable_t;
+
+typedef struct sctp_sackhdr {
+ __be32 cum_tsn_ack;
+ __be32 a_rwnd;
+ __be16 num_gap_ack_blocks;
+ __be16 num_dup_tsns;
+ sctp_sack_variable_t variable[0];
+} __packed sctp_sackhdr_t;
+
+typedef struct sctp_sack_chunk {
+ sctp_chunkhdr_t chunk_hdr;
+ sctp_sackhdr_t sack_hdr;
+} __packed sctp_sack_chunk_t;
+
+
+/* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4):
+ *
+ * An endpoint should send this chunk to its peer endpoint to probe the
+ * reachability of a particular destination transport address defined in
+ * the present association.
+ */
+
+typedef struct sctp_heartbeathdr {
+ sctp_paramhdr_t info;
+} __packed sctp_heartbeathdr_t;
+
+typedef struct sctp_heartbeat_chunk {
+ sctp_chunkhdr_t chunk_hdr;
+ sctp_heartbeathdr_t hb_hdr;
+} __packed sctp_heartbeat_chunk_t;
+
+
+/* For the abort and shutdown ACK we must carry the init tag in the
+ * common header. Just the common header is all that is needed with a
+ * chunk descriptor.
+ */
+typedef struct sctp_abort_chunk {
+ sctp_chunkhdr_t uh;
+} __packed sctp_abort_chunk_t;
+
+
+/* For the graceful shutdown we must carry the tag (in common header)
+ * and the highest consecutive acking value.
+ */
+typedef struct sctp_shutdownhdr {
+ __be32 cum_tsn_ack;
+} __packed sctp_shutdownhdr_t;
+
+struct sctp_shutdown_chunk_t {
+ sctp_chunkhdr_t chunk_hdr;
+ sctp_shutdownhdr_t shutdown_hdr;
+} __packed;
+
+/* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */
+
+typedef struct sctp_errhdr {
+ __be16 cause;
+ __be16 length;
+ __u8 variable[0];
+} __packed sctp_errhdr_t;
+
+typedef struct sctp_operr_chunk {
+ sctp_chunkhdr_t chunk_hdr;
+ sctp_errhdr_t err_hdr;
+} __packed sctp_operr_chunk_t;
+
+/* RFC 2960 3.3.10 - Operation Error
+ *
+ * Cause Code: 16 bits (unsigned integer)
+ *
+ * Defines the type of error conditions being reported.
+ * Cause Code
+ * Value Cause Code
+ * --------- ----------------
+ * 1 Invalid Stream Identifier
+ * 2 Missing Mandatory Parameter
+ * 3 Stale Cookie Error
+ * 4 Out of Resource
+ * 5 Unresolvable Address
+ * 6 Unrecognized Chunk Type
+ * 7 Invalid Mandatory Parameter
+ * 8 Unrecognized Parameters
+ * 9 No User Data
+ * 10 Cookie Received While Shutting Down
+ */
+typedef enum {
+
+ SCTP_ERROR_NO_ERROR = cpu_to_be16(0x00),
+ SCTP_ERROR_INV_STRM = cpu_to_be16(0x01),
+ SCTP_ERROR_MISS_PARAM = cpu_to_be16(0x02),
+ SCTP_ERROR_STALE_COOKIE = cpu_to_be16(0x03),
+ SCTP_ERROR_NO_RESOURCE = cpu_to_be16(0x04),
+ SCTP_ERROR_DNS_FAILED = cpu_to_be16(0x05),
+ SCTP_ERROR_UNKNOWN_CHUNK = cpu_to_be16(0x06),
+ SCTP_ERROR_INV_PARAM = cpu_to_be16(0x07),
+ SCTP_ERROR_UNKNOWN_PARAM = cpu_to_be16(0x08),
+ SCTP_ERROR_NO_DATA = cpu_to_be16(0x09),
+ SCTP_ERROR_COOKIE_IN_SHUTDOWN = cpu_to_be16(0x0a),
+
+
+ /* SCTP Implementation Guide:
+ * 11 Restart of an association with new addresses
+ * 12 User Initiated Abort
+ * 13 Protocol Violation
+ */
+
+ SCTP_ERROR_RESTART = cpu_to_be16(0x0b),
+ SCTP_ERROR_USER_ABORT = cpu_to_be16(0x0c),
+ SCTP_ERROR_PROTO_VIOLATION = cpu_to_be16(0x0d),
+
+ /* ADDIP Section 3.3 New Error Causes
+ *
+ * Four new Error Causes are added to the SCTP Operational Errors,
+ * primarily for use in the ASCONF-ACK chunk.
+ *
+ * Value Cause Code
+ * --------- ----------------
+ * 0x00A0 Request to Delete Last Remaining IP Address.
+ * 0x00A1 Operation Refused Due to Resource Shortage.
+ * 0x00A2 Request to Delete Source IP Address.
+ * 0x00A3 Association Aborted due to illegal ASCONF-ACK
+ * 0x00A4 Request refused - no authorization.
+ */
+ SCTP_ERROR_DEL_LAST_IP = cpu_to_be16(0x00A0),
+ SCTP_ERROR_RSRC_LOW = cpu_to_be16(0x00A1),
+ SCTP_ERROR_DEL_SRC_IP = cpu_to_be16(0x00A2),
+ SCTP_ERROR_ASCONF_ACK = cpu_to_be16(0x00A3),
+ SCTP_ERROR_REQ_REFUSED = cpu_to_be16(0x00A4),
+
+ /* AUTH Section 4. New Error Cause
+ *
+ * This section defines a new error cause that will be sent if an AUTH
+ * chunk is received with an unsupported HMAC identifier.
+ * illustrates the new error cause.
+ *
+ * Cause Code Error Cause Name
+ * --------------------------------------------------------------
+ * 0x0105 Unsupported HMAC Identifier
+ */
+ SCTP_ERROR_UNSUP_HMAC = cpu_to_be16(0x0105)
+} sctp_error_t;
+
+
+
+/* RFC 2960. Appendix A. Explicit Congestion Notification.
+ * Explicit Congestion Notification Echo (ECNE) (12)
+ */
+typedef struct sctp_ecnehdr {
+ __be32 lowest_tsn;
+} sctp_ecnehdr_t;
+
+typedef struct sctp_ecne_chunk {
+ sctp_chunkhdr_t chunk_hdr;
+ sctp_ecnehdr_t ence_hdr;
+} __packed sctp_ecne_chunk_t;
+
+/* RFC 2960. Appendix A. Explicit Congestion Notification.
+ * Congestion Window Reduced (CWR) (13)
+ */
+typedef struct sctp_cwrhdr {
+ __be32 lowest_tsn;
+} sctp_cwrhdr_t;
+
+typedef struct sctp_cwr_chunk {
+ sctp_chunkhdr_t chunk_hdr;
+ sctp_cwrhdr_t cwr_hdr;
+} __packed sctp_cwr_chunk_t;
+
+/* PR-SCTP
+ * 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN)
+ *
+ * Forward Cumulative TSN chunk has the following format:
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Type = 192 | Flags = 0x00 | Length = Variable |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | New Cumulative TSN |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Stream-1 | Stream Sequence-1 |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * \ /
+ * / \
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Stream-N | Stream Sequence-N |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Chunk Flags:
+ *
+ * Set to all zeros on transmit and ignored on receipt.
+ *
+ * New Cumulative TSN: 32 bit u_int
+ *
+ * This indicates the new cumulative TSN to the data receiver. Upon
+ * the reception of this value, the data receiver MUST consider
+ * any missing TSNs earlier than or equal to this value as received
+ * and stop reporting them as gaps in any subsequent SACKs.
+ *
+ * Stream-N: 16 bit u_int
+ *
+ * This field holds a stream number that was skipped by this
+ * FWD-TSN.
+ *
+ * Stream Sequence-N: 16 bit u_int
+ * This field holds the sequence number associated with the stream
+ * that was skipped. The stream sequence field holds the largest stream
+ * sequence number in this stream being skipped. The receiver of
+ * the FWD-TSN's can use the Stream-N and Stream Sequence-N fields
+ * to enable delivery of any stranded TSN's that remain on the stream
+ * re-ordering queues. This field MUST NOT report TSN's corresponding
+ * to DATA chunk that are marked as unordered. For ordered DATA
+ * chunks this field MUST be filled in.
+ */
+struct sctp_fwdtsn_skip {
+ __be16 stream;
+ __be16 ssn;
+} __packed;
+
+struct sctp_fwdtsn_hdr {
+ __be32 new_cum_tsn;
+ struct sctp_fwdtsn_skip skip[0];
+} __packed;
+
+struct sctp_fwdtsn_chunk {
+ struct sctp_chunkhdr chunk_hdr;
+ struct sctp_fwdtsn_hdr fwdtsn_hdr;
+} __packed;
+
+
+/* ADDIP
+ * Section 3.1.1 Address Configuration Change Chunk (ASCONF)
+ *
+ * Serial Number: 32 bits (unsigned integer)
+ * This value represents a Serial Number for the ASCONF Chunk. The
+ * valid range of Serial Number is from 0 to 2^32-1.
+ * Serial Numbers wrap back to 0 after reaching 2^32 -1.
+ *
+ * Address Parameter: 8 or 20 bytes (depending on type)
+ * The address is an address of the sender of the ASCONF chunk,
+ * the address MUST be considered part of the association by the
+ * peer endpoint. This field may be used by the receiver of the
+ * ASCONF to help in finding the association. This parameter MUST
+ * be present in every ASCONF message i.e. it is a mandatory TLV
+ * parameter.
+ *
+ * ASCONF Parameter: TLV format
+ * Each Address configuration change is represented by a TLV
+ * parameter as defined in Section 3.2. One or more requests may
+ * be present in an ASCONF Chunk.
+ *
+ * Section 3.1.2 Address Configuration Acknowledgement Chunk (ASCONF-ACK)
+ *
+ * Serial Number: 32 bits (unsigned integer)
+ * This value represents the Serial Number for the received ASCONF
+ * Chunk that is acknowledged by this chunk. This value is copied
+ * from the received ASCONF Chunk.
+ *
+ * ASCONF Parameter Response: TLV format
+ * The ASCONF Parameter Response is used in the ASCONF-ACK to
+ * report status of ASCONF processing.
+ */
+typedef struct sctp_addip_param {
+ sctp_paramhdr_t param_hdr;
+ __be32 crr_id;
+} __packed sctp_addip_param_t;
+
+typedef struct sctp_addiphdr {
+ __be32 serial;
+ __u8 params[0];
+} __packed sctp_addiphdr_t;
+
+typedef struct sctp_addip_chunk {
+ sctp_chunkhdr_t chunk_hdr;
+ sctp_addiphdr_t addip_hdr;
+} __packed sctp_addip_chunk_t;
+
+/* AUTH
+ * Section 4.1 Authentication Chunk (AUTH)
+ *
+ * This chunk is used to hold the result of the HMAC calculation.
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Type = 0x0F | Flags=0 | Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Shared Key Identifier | HMAC Identifier |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * \ HMAC /
+ * / \
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Type: 1 byte (unsigned integer)
+ * This value MUST be set to 0x0F for all AUTH-chunks.
+ *
+ * Flags: 1 byte (unsigned integer)
+ * Set to zero on transmit and ignored on receipt.
+ *
+ * Length: 2 bytes (unsigned integer)
+ * This value holds the length of the HMAC in bytes plus 8.
+ *
+ * Shared Key Identifier: 2 bytes (unsigned integer)
+ * This value describes which endpoint pair shared key is used.
+ *
+ * HMAC Identifier: 2 bytes (unsigned integer)
+ * This value describes which message digest is being used. Table 2
+ * shows the currently defined values.
+ *
+ * The following Table 2 shows the currently defined values for HMAC
+ * identifiers.
+ *
+ * +-----------------+--------------------------+
+ * | HMAC Identifier | Message Digest Algorithm |
+ * +-----------------+--------------------------+
+ * | 0 | Reserved |
+ * | 1 | SHA-1 defined in [8] |
+ * | 2 | Reserved |
+ * | 3 | SHA-256 defined in [8] |
+ * +-----------------+--------------------------+
+ *
+ *
+ * HMAC: n bytes (unsigned integer) This hold the result of the HMAC
+ * calculation.
+ */
+typedef struct sctp_authhdr {
+ __be16 shkey_id;
+ __be16 hmac_id;
+ __u8 hmac[0];
+} __packed sctp_authhdr_t;
+
+typedef struct sctp_auth_chunk {
+ sctp_chunkhdr_t chunk_hdr;
+ sctp_authhdr_t auth_hdr;
+} __packed sctp_auth_chunk_t;
+
+#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/scx200.h b/include/linux/scx200.h
new file mode 100644
index 000000000..de466e11e
--- /dev/null
+++ b/include/linux/scx200.h
@@ -0,0 +1,51 @@
+/* linux/include/linux/scx200.h
+
+ Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
+
+ Defines for the National Semiconductor SCx200 Processors
+*/
+
+/* Interesting stuff for the National Semiconductor SCx200 CPU */
+
+extern unsigned scx200_cb_base;
+
+#define scx200_cb_present() (scx200_cb_base!=0)
+
+/* F0 PCI Header/Bridge Configuration Registers */
+#define SCx200_DOCCS_BASE 0x78 /* DOCCS Base Address Register */
+#define SCx200_DOCCS_CTRL 0x7c /* DOCCS Control Register */
+
+/* GPIO Register Block */
+#define SCx200_GPIO_SIZE 0x2c /* Size of GPIO register block */
+
+/* General Configuration Block */
+#define SCx200_CB_BASE_FIXED 0x9000 /* Base fixed at 0x9000 according to errata? */
+
+/* Watchdog Timer */
+#define SCx200_WDT_OFFSET 0x00 /* offset within configuration block */
+#define SCx200_WDT_SIZE 0x05 /* size */
+
+#define SCx200_WDT_WDTO 0x00 /* Time-Out Register */
+#define SCx200_WDT_WDCNFG 0x02 /* Configuration Register */
+#define SCx200_WDT_WDSTS 0x04 /* Status Register */
+#define SCx200_WDT_WDSTS_WDOVF (1<<0) /* Overflow bit */
+
+/* High Resolution Timer */
+#define SCx200_TIMER_OFFSET 0x08
+#define SCx200_TIMER_SIZE 0x06
+
+/* Clock Generators */
+#define SCx200_CLOCKGEN_OFFSET 0x10
+#define SCx200_CLOCKGEN_SIZE 0x10
+
+/* Pin Multiplexing and Miscellaneous Configuration Registers */
+#define SCx200_MISC_OFFSET 0x30
+#define SCx200_MISC_SIZE 0x10
+
+#define SCx200_PMR 0x30 /* Pin Multiplexing Register */
+#define SCx200_MCR 0x34 /* Miscellaneous Configuration Register */
+#define SCx200_INTSEL 0x38 /* Interrupt Selection Register */
+#define SCx200_IID 0x3c /* IA On a Chip Identification Number Reg */
+#define SCx200_REV 0x3d /* Revision Register */
+#define SCx200_CBA 0x3e /* Configuration Base Address Register */
+#define SCx200_CBA_SCRATCH 0x64 /* Configuration Base Address Scratchpad */
diff --git a/include/linux/scx200_gpio.h b/include/linux/scx200_gpio.h
new file mode 100644
index 000000000..ece4e553e
--- /dev/null
+++ b/include/linux/scx200_gpio.h
@@ -0,0 +1,88 @@
+u32 scx200_gpio_configure(unsigned index, u32 set, u32 clear);
+
+extern unsigned scx200_gpio_base;
+extern unsigned long scx200_gpio_shadow[2];
+extern struct nsc_gpio_ops scx200_gpio_ops;
+
+#define scx200_gpio_present() (scx200_gpio_base!=0)
+
+/* Definitions to make sure I do the same thing in all functions */
+#define __SCx200_GPIO_BANK unsigned bank = index>>5
+#define __SCx200_GPIO_IOADDR unsigned short ioaddr = scx200_gpio_base+0x10*bank
+#define __SCx200_GPIO_SHADOW unsigned long *shadow = scx200_gpio_shadow+bank
+#define __SCx200_GPIO_INDEX index &= 31
+
+#define __SCx200_GPIO_OUT __asm__ __volatile__("outsl":"=mS" (shadow):"d" (ioaddr), "0" (shadow))
+
+/* returns the value of the GPIO pin */
+
+static inline int scx200_gpio_get(unsigned index) {
+ __SCx200_GPIO_BANK;
+ __SCx200_GPIO_IOADDR + 0x04;
+ __SCx200_GPIO_INDEX;
+
+ return (inl(ioaddr) & (1<<index)) ? 1 : 0;
+}
+
+/* return the value driven on the GPIO signal (the value that will be
+ driven if the GPIO is configured as an output, it might not be the
+ state of the GPIO right now if the GPIO is configured as an input) */
+
+static inline int scx200_gpio_current(unsigned index) {
+ __SCx200_GPIO_BANK;
+ __SCx200_GPIO_INDEX;
+
+ return (scx200_gpio_shadow[bank] & (1<<index)) ? 1 : 0;
+}
+
+/* drive the GPIO signal high */
+
+static inline void scx200_gpio_set_high(unsigned index) {
+ __SCx200_GPIO_BANK;
+ __SCx200_GPIO_IOADDR;
+ __SCx200_GPIO_SHADOW;
+ __SCx200_GPIO_INDEX;
+ set_bit(index, shadow); /* __set_bit()? */
+ __SCx200_GPIO_OUT;
+}
+
+/* drive the GPIO signal low */
+
+static inline void scx200_gpio_set_low(unsigned index) {
+ __SCx200_GPIO_BANK;
+ __SCx200_GPIO_IOADDR;
+ __SCx200_GPIO_SHADOW;
+ __SCx200_GPIO_INDEX;
+ clear_bit(index, shadow); /* __clear_bit()? */
+ __SCx200_GPIO_OUT;
+}
+
+/* drive the GPIO signal to state */
+
+static inline void scx200_gpio_set(unsigned index, int state) {
+ __SCx200_GPIO_BANK;
+ __SCx200_GPIO_IOADDR;
+ __SCx200_GPIO_SHADOW;
+ __SCx200_GPIO_INDEX;
+ if (state)
+ set_bit(index, shadow);
+ else
+ clear_bit(index, shadow);
+ __SCx200_GPIO_OUT;
+}
+
+/* toggle the GPIO signal */
+static inline void scx200_gpio_change(unsigned index) {
+ __SCx200_GPIO_BANK;
+ __SCx200_GPIO_IOADDR;
+ __SCx200_GPIO_SHADOW;
+ __SCx200_GPIO_INDEX;
+ change_bit(index, shadow);
+ __SCx200_GPIO_OUT;
+}
+
+#undef __SCx200_GPIO_BANK
+#undef __SCx200_GPIO_IOADDR
+#undef __SCx200_GPIO_SHADOW
+#undef __SCx200_GPIO_INDEX
+#undef __SCx200_GPIO_OUT
diff --git a/include/linux/sdb.h b/include/linux/sdb.h
new file mode 100644
index 000000000..fbb76a46c
--- /dev/null
+++ b/include/linux/sdb.h
@@ -0,0 +1,159 @@
+/*
+ * This is the official version 1.1 of sdb.h
+ */
+#ifndef __SDB_H__
+#define __SDB_H__
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+/*
+ * All structures are 64 bytes long and are expected
+ * to live in an array, one for each interconnect.
+ * Most fields of the structures are shared among the
+ * various types, and most-specific fields are at the
+ * beginning (for alignment reasons, and to keep the
+ * magic number at the head of the interconnect record
+ */
+
+/* Product, 40 bytes at offset 24, 8-byte aligned
+ *
+ * device_id is vendor-assigned; version is device-specific,
+ * date is hex (e.g 0x20120501), name is UTF-8, blank-filled
+ * and not terminated with a 0 byte.
+ */
+struct sdb_product {
+ uint64_t vendor_id; /* 0x18..0x1f */
+ uint32_t device_id; /* 0x20..0x23 */
+ uint32_t version; /* 0x24..0x27 */
+ uint32_t date; /* 0x28..0x2b */
+ uint8_t name[19]; /* 0x2c..0x3e */
+ uint8_t record_type; /* 0x3f */
+};
+
+/*
+ * Component, 56 bytes at offset 8, 8-byte aligned
+ *
+ * The address range is first to last, inclusive
+ * (for example 0x100000 - 0x10ffff)
+ */
+struct sdb_component {
+ uint64_t addr_first; /* 0x08..0x0f */
+ uint64_t addr_last; /* 0x10..0x17 */
+ struct sdb_product product; /* 0x18..0x3f */
+};
+
+/* Type of the SDB record */
+enum sdb_record_type {
+ sdb_type_interconnect = 0x00,
+ sdb_type_device = 0x01,
+ sdb_type_bridge = 0x02,
+ sdb_type_integration = 0x80,
+ sdb_type_repo_url = 0x81,
+ sdb_type_synthesis = 0x82,
+ sdb_type_empty = 0xFF,
+};
+
+/* Type 0: interconnect (first of the array)
+ *
+ * sdb_records is the length of the table including this first
+ * record, version is 1. The bus type is enumerated later.
+ */
+#define SDB_MAGIC 0x5344422d /* "SDB-" */
+struct sdb_interconnect {
+ uint32_t sdb_magic; /* 0x00-0x03 */
+ uint16_t sdb_records; /* 0x04-0x05 */
+ uint8_t sdb_version; /* 0x06 */
+ uint8_t sdb_bus_type; /* 0x07 */
+ struct sdb_component sdb_component; /* 0x08-0x3f */
+};
+
+/* Type 1: device
+ *
+ * class is 0 for "custom device", other values are
+ * to be standardized; ABI version is for the driver,
+ * bus-specific bits are defined by each bus (see below)
+ */
+struct sdb_device {
+ uint16_t abi_class; /* 0x00-0x01 */
+ uint8_t abi_ver_major; /* 0x02 */
+ uint8_t abi_ver_minor; /* 0x03 */
+ uint32_t bus_specific; /* 0x04-0x07 */
+ struct sdb_component sdb_component; /* 0x08-0x3f */
+};
+
+/* Type 2: bridge
+ *
+ * child is the address of the nested SDB table
+ */
+struct sdb_bridge {
+ uint64_t sdb_child; /* 0x00-0x07 */
+ struct sdb_component sdb_component; /* 0x08-0x3f */
+};
+
+/* Type 0x80: integration
+ *
+ * all types with bit 7 set are meta-information, so
+ * software can ignore the types it doesn't know. Here we
+ * just provide product information for an aggregate device
+ */
+struct sdb_integration {
+ uint8_t reserved[24]; /* 0x00-0x17 */
+ struct sdb_product product; /* 0x08-0x3f */
+};
+
+/* Type 0x81: Top module repository url
+ *
+ * again, an informative field that software can ignore
+ */
+struct sdb_repo_url {
+ uint8_t repo_url[63]; /* 0x00-0x3e */
+ uint8_t record_type; /* 0x3f */
+};
+
+/* Type 0x82: Synthesis tool information
+ *
+ * this informative record
+ */
+struct sdb_synthesis {
+ uint8_t syn_name[16]; /* 0x00-0x0f */
+ uint8_t commit_id[16]; /* 0x10-0x1f */
+ uint8_t tool_name[8]; /* 0x20-0x27 */
+ uint32_t tool_version; /* 0x28-0x2b */
+ uint32_t date; /* 0x2c-0x2f */
+ uint8_t user_name[15]; /* 0x30-0x3e */
+ uint8_t record_type; /* 0x3f */
+};
+
+/* Type 0xff: empty
+ *
+ * this allows keeping empty slots during development,
+ * so they can be filled later with minimal efforts and
+ * no misleading description is ever shipped -- hopefully.
+ * It can also be used to pad a table to a desired length.
+ */
+struct sdb_empty {
+ uint8_t reserved[63]; /* 0x00-0x3e */
+ uint8_t record_type; /* 0x3f */
+};
+
+/* The type of bus, for bus-specific flags */
+enum sdb_bus_type {
+ sdb_wishbone = 0x00,
+ sdb_data = 0x01,
+};
+
+#define SDB_WB_WIDTH_MASK 0x0f
+#define SDB_WB_ACCESS8 0x01
+#define SDB_WB_ACCESS16 0x02
+#define SDB_WB_ACCESS32 0x04
+#define SDB_WB_ACCESS64 0x08
+#define SDB_WB_LITTLE_ENDIAN 0x80
+
+#define SDB_DATA_READ 0x04
+#define SDB_DATA_WRITE 0x02
+#define SDB_DATA_EXEC 0x01
+
+#endif /* __SDB_H__ */
diff --git a/include/linux/sdla.h b/include/linux/sdla.h
new file mode 100644
index 000000000..fe7a967d7
--- /dev/null
+++ b/include/linux/sdla.h
@@ -0,0 +1,244 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the Frame relay interface.
+ *
+ * Version: @(#)if_ifrad.h 0.20 13 Apr 96
+ *
+ * Author: Mike McLagan <mike.mclagan@linux.org>
+ *
+ * Changes:
+ * 0.15 Mike McLagan Structure packing
+ *
+ * 0.20 Mike McLagan New flags for S508 buffer handling
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef SDLA_H
+#define SDLA_H
+
+#include <uapi/linux/sdla.h>
+
+
+/* important Z80 window addresses */
+#define SDLA_CONTROL_WND 0xE000
+
+#define SDLA_502_CMD_BUF 0xEF60
+#define SDLA_502_RCV_BUF 0xA900
+#define SDLA_502_TXN_AVAIL 0xFFF1
+#define SDLA_502_RCV_AVAIL 0xFFF2
+#define SDLA_502_EVENT_FLAGS 0xFFF3
+#define SDLA_502_MDM_STATUS 0xFFF4
+#define SDLA_502_IRQ_INTERFACE 0xFFFD
+#define SDLA_502_IRQ_PERMISSION 0xFFFE
+#define SDLA_502_DATA_OFS 0x0010
+
+#define SDLA_508_CMD_BUF 0xE000
+#define SDLA_508_TXBUF_INFO 0xF100
+#define SDLA_508_RXBUF_INFO 0xF120
+#define SDLA_508_EVENT_FLAGS 0xF003
+#define SDLA_508_MDM_STATUS 0xF004
+#define SDLA_508_IRQ_INTERFACE 0xF010
+#define SDLA_508_IRQ_PERMISSION 0xF011
+#define SDLA_508_TSE_OFFSET 0xF012
+
+/* Event flags */
+#define SDLA_EVENT_STATUS 0x01
+#define SDLA_EVENT_DLCI_STATUS 0x02
+#define SDLA_EVENT_BAD_DLCI 0x04
+#define SDLA_EVENT_LINK_DOWN 0x40
+
+/* IRQ Trigger flags */
+#define SDLA_INTR_RX 0x01
+#define SDLA_INTR_TX 0x02
+#define SDLA_INTR_MODEM 0x04
+#define SDLA_INTR_COMPLETE 0x08
+#define SDLA_INTR_STATUS 0x10
+#define SDLA_INTR_TIMER 0x20
+
+/* DLCI status bits */
+#define SDLA_DLCI_DELETED 0x01
+#define SDLA_DLCI_ACTIVE 0x02
+#define SDLA_DLCI_WAITING 0x04
+#define SDLA_DLCI_NEW 0x08
+#define SDLA_DLCI_INCLUDED 0x40
+
+/* valid command codes */
+#define SDLA_INFORMATION_WRITE 0x01
+#define SDLA_INFORMATION_READ 0x02
+#define SDLA_ISSUE_IN_CHANNEL_SIGNAL 0x03
+#define SDLA_SET_DLCI_CONFIGURATION 0x10
+#define SDLA_READ_DLCI_CONFIGURATION 0x11
+#define SDLA_DISABLE_COMMUNICATIONS 0x12
+#define SDLA_ENABLE_COMMUNICATIONS 0x13
+#define SDLA_READ_DLC_STATUS 0x14
+#define SDLA_READ_DLC_STATISTICS 0x15
+#define SDLA_FLUSH_DLC_STATISTICS 0x16
+#define SDLA_LIST_ACTIVE_DLCI 0x17
+#define SDLA_FLUSH_INFORMATION_BUFFERS 0x18
+#define SDLA_ADD_DLCI 0x20
+#define SDLA_DELETE_DLCI 0x21
+#define SDLA_ACTIVATE_DLCI 0x22
+#define SDLA_DEACTIVATE_DLCI 0x23
+#define SDLA_READ_MODEM_STATUS 0x30
+#define SDLA_SET_MODEM_STATUS 0x31
+#define SDLA_READ_COMMS_ERR_STATS 0x32
+#define SDLA_FLUSH_COMMS_ERR_STATS 0x33
+#define SDLA_READ_CODE_VERSION 0x40
+#define SDLA_SET_IRQ_TRIGGER 0x50
+#define SDLA_GET_IRQ_TRIGGER 0x51
+
+/* In channel signal types */
+#define SDLA_ICS_LINK_VERIFY 0x02
+#define SDLA_ICS_STATUS_ENQ 0x03
+
+/* modem status flags */
+#define SDLA_MODEM_DTR_HIGH 0x01
+#define SDLA_MODEM_RTS_HIGH 0x02
+#define SDLA_MODEM_DCD_HIGH 0x08
+#define SDLA_MODEM_CTS_HIGH 0x20
+
+/* used for RET_MODEM interpretation */
+#define SDLA_MODEM_DCD_LOW 0x01
+#define SDLA_MODEM_CTS_LOW 0x02
+
+/* return codes */
+#define SDLA_RET_OK 0x00
+#define SDLA_RET_COMMUNICATIONS 0x01
+#define SDLA_RET_CHANNEL_INACTIVE 0x02
+#define SDLA_RET_DLCI_INACTIVE 0x03
+#define SDLA_RET_DLCI_CONFIG 0x04
+#define SDLA_RET_BUF_TOO_BIG 0x05
+#define SDLA_RET_NO_DATA 0x05
+#define SDLA_RET_BUF_OVERSIZE 0x06
+#define SDLA_RET_CIR_OVERFLOW 0x07
+#define SDLA_RET_NO_BUFS 0x08
+#define SDLA_RET_TIMEOUT 0x0A
+#define SDLA_RET_MODEM 0x10
+#define SDLA_RET_CHANNEL_OFF 0x11
+#define SDLA_RET_CHANNEL_ON 0x12
+#define SDLA_RET_DLCI_STATUS 0x13
+#define SDLA_RET_DLCI_UNKNOWN 0x14
+#define SDLA_RET_COMMAND_INVALID 0x1F
+
+/* Configuration flags */
+#define SDLA_DIRECT_RECV 0x0080
+#define SDLA_TX_NO_EXCEPT 0x0020
+#define SDLA_NO_ICF_MSGS 0x1000
+#define SDLA_TX50_RX50 0x0000
+#define SDLA_TX70_RX30 0x2000
+#define SDLA_TX30_RX70 0x4000
+
+/* IRQ selection flags */
+#define SDLA_IRQ_RECEIVE 0x01
+#define SDLA_IRQ_TRANSMIT 0x02
+#define SDLA_IRQ_MODEM_STAT 0x04
+#define SDLA_IRQ_COMMAND 0x08
+#define SDLA_IRQ_CHANNEL 0x10
+#define SDLA_IRQ_TIMER 0x20
+
+/* definitions for PC memory mapping */
+#define SDLA_8K_WINDOW 0x01
+#define SDLA_S502_SEG_A 0x10
+#define SDLA_S502_SEG_C 0x20
+#define SDLA_S502_SEG_D 0x00
+#define SDLA_S502_SEG_E 0x30
+#define SDLA_S507_SEG_A 0x00
+#define SDLA_S507_SEG_B 0x40
+#define SDLA_S507_SEG_C 0x80
+#define SDLA_S507_SEG_E 0xC0
+#define SDLA_S508_SEG_A 0x00
+#define SDLA_S508_SEG_C 0x10
+#define SDLA_S508_SEG_D 0x08
+#define SDLA_S508_SEG_E 0x18
+
+/* SDLA adapter port constants */
+#define SDLA_IO_EXTENTS 0x04
+
+#define SDLA_REG_CONTROL 0x00
+#define SDLA_REG_PC_WINDOW 0x01 /* offset for PC window select latch */
+#define SDLA_REG_Z80_WINDOW 0x02 /* offset for Z80 window select latch */
+#define SDLA_REG_Z80_CONTROL 0x03 /* offset for Z80 control latch */
+
+#define SDLA_S502_STS 0x00 /* status reg for 502, 502E, 507 */
+#define SDLA_S508_GNRL 0x00 /* general purp. reg for 508 */
+#define SDLA_S508_STS 0x01 /* status reg for 508 */
+#define SDLA_S508_IDR 0x02 /* ID reg for 508 */
+
+/* control register flags */
+#define SDLA_S502A_START 0x00 /* start the CPU */
+#define SDLA_S502A_INTREQ 0x02
+#define SDLA_S502A_INTEN 0x04
+#define SDLA_S502A_HALT 0x08 /* halt the CPU */
+#define SDLA_S502A_NMI 0x10 /* issue an NMI to the CPU */
+
+#define SDLA_S502E_CPUEN 0x01
+#define SDLA_S502E_ENABLE 0x02
+#define SDLA_S502E_INTACK 0x04
+
+#define SDLA_S507_ENABLE 0x01
+#define SDLA_S507_IRQ3 0x00
+#define SDLA_S507_IRQ4 0x20
+#define SDLA_S507_IRQ5 0x40
+#define SDLA_S507_IRQ7 0x60
+#define SDLA_S507_IRQ10 0x80
+#define SDLA_S507_IRQ11 0xA0
+#define SDLA_S507_IRQ12 0xC0
+#define SDLA_S507_IRQ15 0xE0
+
+#define SDLA_HALT 0x00
+#define SDLA_CPUEN 0x02
+#define SDLA_MEMEN 0x04
+#define SDLA_S507_EPROMWR 0x08
+#define SDLA_S507_EPROMCLK 0x10
+#define SDLA_S508_INTRQ 0x08
+#define SDLA_S508_INTEN 0x10
+
+struct sdla_cmd {
+ char opp_flag;
+ char cmd;
+ short length;
+ char retval;
+ short dlci;
+ char flags;
+ short rxlost_int;
+ long rxlost_app;
+ char reserve[2];
+ char data[SDLA_MAX_DATA]; /* transfer data buffer */
+} __attribute__((packed));
+
+struct intr_info {
+ char flags;
+ short txlen;
+ char irq;
+ char flags2;
+ short timeout;
+} __attribute__((packed));
+
+/* found in the 508's control window at RXBUF_INFO */
+struct buf_info {
+ unsigned short rse_num;
+ unsigned long rse_base;
+ unsigned long rse_next;
+ unsigned long buf_base;
+ unsigned short reserved;
+ unsigned long buf_top;
+} __attribute__((packed));
+
+/* structure pointed to by rse_base in RXBUF_INFO struct */
+struct buf_entry {
+ char opp_flag;
+ short length;
+ short dlci;
+ char flags;
+ short timestamp;
+ short reserved[2];
+ long buf_addr;
+} __attribute__((packed));
+
+#endif
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
new file mode 100644
index 000000000..a19ddacda
--- /dev/null
+++ b/include/linux/seccomp.h
@@ -0,0 +1,98 @@
+#ifndef _LINUX_SECCOMP_H
+#define _LINUX_SECCOMP_H
+
+#include <uapi/linux/seccomp.h>
+
+#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC)
+
+#ifdef CONFIG_SECCOMP
+
+#include <linux/thread_info.h>
+#include <asm/seccomp.h>
+
+struct seccomp_filter;
+/**
+ * struct seccomp - the state of a seccomp'ed process
+ *
+ * @mode: indicates one of the valid values above for controlled
+ * system calls available to a process.
+ * @filter: must always point to a valid seccomp-filter or NULL as it is
+ * accessed without locking during system call entry.
+ *
+ * @filter must only be accessed from the context of current as there
+ * is no read locking.
+ */
+struct seccomp {
+ int mode;
+ struct seccomp_filter *filter;
+};
+
+#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
+extern int __secure_computing(void);
+static inline int secure_computing(void)
+{
+ if (unlikely(test_thread_flag(TIF_SECCOMP)))
+ return __secure_computing();
+ return 0;
+}
+
+#define SECCOMP_PHASE1_OK 0
+#define SECCOMP_PHASE1_SKIP 1
+
+extern u32 seccomp_phase1(struct seccomp_data *sd);
+int seccomp_phase2(u32 phase1_result);
+#else
+extern void secure_computing_strict(int this_syscall);
+#endif
+
+extern long prctl_get_seccomp(void);
+extern long prctl_set_seccomp(unsigned long, char __user *);
+
+static inline int seccomp_mode(struct seccomp *s)
+{
+ return s->mode;
+}
+
+#else /* CONFIG_SECCOMP */
+
+#include <linux/errno.h>
+
+struct seccomp { };
+struct seccomp_filter { };
+
+#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
+static inline int secure_computing(void) { return 0; }
+#else
+static inline void secure_computing_strict(int this_syscall) { return; }
+#endif
+
+static inline long prctl_get_seccomp(void)
+{
+ return -EINVAL;
+}
+
+static inline long prctl_set_seccomp(unsigned long arg2, char __user *arg3)
+{
+ return -EINVAL;
+}
+
+static inline int seccomp_mode(struct seccomp *s)
+{
+ return 0;
+}
+#endif /* CONFIG_SECCOMP */
+
+#ifdef CONFIG_SECCOMP_FILTER
+extern void put_seccomp_filter(struct task_struct *tsk);
+extern void get_seccomp_filter(struct task_struct *tsk);
+#else /* CONFIG_SECCOMP_FILTER */
+static inline void put_seccomp_filter(struct task_struct *tsk)
+{
+ return;
+}
+static inline void get_seccomp_filter(struct task_struct *tsk)
+{
+ return;
+}
+#endif /* CONFIG_SECCOMP_FILTER */
+#endif /* _LINUX_SECCOMP_H */
diff --git a/include/linux/securebits.h b/include/linux/securebits.h
new file mode 100644
index 000000000..da1b33b33
--- /dev/null
+++ b/include/linux/securebits.h
@@ -0,0 +1,7 @@
+#ifndef _LINUX_SECUREBITS_H
+#define _LINUX_SECUREBITS_H 1
+
+#include <uapi/linux/securebits.h>
+
+#define issecure(X) (issecure_mask(X) & current_cred_xxx(securebits))
+#endif /* !_LINUX_SECUREBITS_H */
diff --git a/include/linux/security.h b/include/linux/security.h
new file mode 100644
index 000000000..18264ea9e
--- /dev/null
+++ b/include/linux/security.h
@@ -0,0 +1,3253 @@
+/*
+ * Linux Security plug
+ *
+ * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com>
+ * Copyright (C) 2001 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com>
+ * Copyright (C) 2001 James Morris <jmorris@intercode.com.au>
+ * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Due to this file being licensed under the GPL there is controversy over
+ * whether this permits you to write a module that #includes this file
+ * without placing your module under the GPL. Please consult a lawyer for
+ * advice before doing this.
+ *
+ */
+
+#ifndef __LINUX_SECURITY_H
+#define __LINUX_SECURITY_H
+
+#include <linux/key.h>
+#include <linux/capability.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/string.h>
+
+struct linux_binprm;
+struct cred;
+struct rlimit;
+struct siginfo;
+struct sem_array;
+struct sembuf;
+struct kern_ipc_perm;
+struct audit_context;
+struct super_block;
+struct inode;
+struct dentry;
+struct file;
+struct vfsmount;
+struct path;
+struct qstr;
+struct nameidata;
+struct iattr;
+struct fown_struct;
+struct file_operations;
+struct shmid_kernel;
+struct msg_msg;
+struct msg_queue;
+struct xattr;
+struct xfrm_sec_ctx;
+struct mm_struct;
+
+/* Maximum number of letters for an LSM name string */
+#define SECURITY_NAME_MAX 10
+
+/* If capable should audit the security request */
+#define SECURITY_CAP_NOAUDIT 0
+#define SECURITY_CAP_AUDIT 1
+
+/* LSM Agnostic defines for sb_set_mnt_opts */
+#define SECURITY_LSM_NATIVE_LABELS 1
+
+struct ctl_table;
+struct audit_krule;
+struct user_namespace;
+struct timezone;
+
+/*
+ * These functions are in security/capability.c and are used
+ * as the default capabilities functions
+ */
+extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
+ int cap, int audit);
+extern int cap_settime(const struct timespec *ts, const struct timezone *tz);
+extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
+extern int cap_ptrace_traceme(struct task_struct *parent);
+extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
+extern int cap_capset(struct cred *new, const struct cred *old,
+ const kernel_cap_t *effective,
+ const kernel_cap_t *inheritable,
+ const kernel_cap_t *permitted);
+extern int cap_bprm_set_creds(struct linux_binprm *bprm);
+extern int cap_bprm_secureexec(struct linux_binprm *bprm);
+extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags);
+extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
+extern int cap_inode_need_killpriv(struct dentry *dentry);
+extern int cap_inode_killpriv(struct dentry *dentry);
+extern int cap_mmap_addr(unsigned long addr);
+extern int cap_mmap_file(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags);
+extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags);
+extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5);
+extern int cap_task_setscheduler(struct task_struct *p);
+extern int cap_task_setioprio(struct task_struct *p, int ioprio);
+extern int cap_task_setnice(struct task_struct *p, int nice);
+extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
+
+struct msghdr;
+struct sk_buff;
+struct sock;
+struct sockaddr;
+struct socket;
+struct flowi;
+struct dst_entry;
+struct xfrm_selector;
+struct xfrm_policy;
+struct xfrm_state;
+struct xfrm_user_sec_ctx;
+struct seq_file;
+
+extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
+
+void reset_security_ops(void);
+
+#ifdef CONFIG_MMU
+extern unsigned long mmap_min_addr;
+extern unsigned long dac_mmap_min_addr;
+#else
+#define mmap_min_addr 0UL
+#define dac_mmap_min_addr 0UL
+#endif
+
+/*
+ * Values used in the task_security_ops calls
+ */
+/* setuid or setgid, id0 == uid or gid */
+#define LSM_SETID_ID 1
+
+/* setreuid or setregid, id0 == real, id1 == eff */
+#define LSM_SETID_RE 2
+
+/* setresuid or setresgid, id0 == real, id1 == eff, uid2 == saved */
+#define LSM_SETID_RES 4
+
+/* setfsuid or setfsgid, id0 == fsuid or fsgid */
+#define LSM_SETID_FS 8
+
+/* forward declares to avoid warnings */
+struct sched_param;
+struct request_sock;
+
+/* bprm->unsafe reasons */
+#define LSM_UNSAFE_SHARE 1
+#define LSM_UNSAFE_PTRACE 2
+#define LSM_UNSAFE_PTRACE_CAP 4
+#define LSM_UNSAFE_NO_NEW_PRIVS 8
+
+#ifdef CONFIG_MMU
+extern int mmap_min_addr_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+#endif
+
+/* security_inode_init_security callback function to write xattrs */
+typedef int (*initxattrs) (struct inode *inode,
+ const struct xattr *xattr_array, void *fs_data);
+
+#ifdef CONFIG_SECURITY
+
+struct security_mnt_opts {
+ char **mnt_opts;
+ int *mnt_opts_flags;
+ int num_mnt_opts;
+};
+
+static inline void security_init_mnt_opts(struct security_mnt_opts *opts)
+{
+ opts->mnt_opts = NULL;
+ opts->mnt_opts_flags = NULL;
+ opts->num_mnt_opts = 0;
+}
+
+static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
+{
+ int i;
+ if (opts->mnt_opts)
+ for (i = 0; i < opts->num_mnt_opts; i++)
+ kfree(opts->mnt_opts[i]);
+ kfree(opts->mnt_opts);
+ opts->mnt_opts = NULL;
+ kfree(opts->mnt_opts_flags);
+ opts->mnt_opts_flags = NULL;
+ opts->num_mnt_opts = 0;
+}
+
+/**
+ * struct security_operations - main security structure
+ *
+ * Security module identifier.
+ *
+ * @name:
+ * A string that acts as a unique identifier for the LSM with max number
+ * of characters = SECURITY_NAME_MAX.
+ *
+ * Security hooks for program execution operations.
+ *
+ * @bprm_set_creds:
+ * Save security information in the bprm->security field, typically based
+ * on information about the bprm->file, for later use by the apply_creds
+ * hook. This hook may also optionally check permissions (e.g. for
+ * transitions between security domains).
+ * This hook may be called multiple times during a single execve, e.g. for
+ * interpreters. The hook can tell whether it has already been called by
+ * checking to see if @bprm->security is non-NULL. If so, then the hook
+ * may decide either to retain the security information saved earlier or
+ * to replace it.
+ * @bprm contains the linux_binprm structure.
+ * Return 0 if the hook is successful and permission is granted.
+ * @bprm_check_security:
+ * This hook mediates the point when a search for a binary handler will
+ * begin. It allows a check the @bprm->security value which is set in the
+ * preceding set_creds call. The primary difference from set_creds is
+ * that the argv list and envp list are reliably available in @bprm. This
+ * hook may be called multiple times during a single execve; and in each
+ * pass set_creds is called first.
+ * @bprm contains the linux_binprm structure.
+ * Return 0 if the hook is successful and permission is granted.
+ * @bprm_committing_creds:
+ * Prepare to install the new security attributes of a process being
+ * transformed by an execve operation, based on the old credentials
+ * pointed to by @current->cred and the information set in @bprm->cred by
+ * the bprm_set_creds hook. @bprm points to the linux_binprm structure.
+ * This hook is a good place to perform state changes on the process such
+ * as closing open file descriptors to which access will no longer be
+ * granted when the attributes are changed. This is called immediately
+ * before commit_creds().
+ * @bprm_committed_creds:
+ * Tidy up after the installation of the new security attributes of a
+ * process being transformed by an execve operation. The new credentials
+ * have, by this point, been set to @current->cred. @bprm points to the
+ * linux_binprm structure. This hook is a good place to perform state
+ * changes on the process such as clearing out non-inheritable signal
+ * state. This is called immediately after commit_creds().
+ * @bprm_secureexec:
+ * Return a boolean value (0 or 1) indicating whether a "secure exec"
+ * is required. The flag is passed in the auxiliary table
+ * on the initial stack to the ELF interpreter to indicate whether libc
+ * should enable secure mode.
+ * @bprm contains the linux_binprm structure.
+ *
+ * Security hooks for filesystem operations.
+ *
+ * @sb_alloc_security:
+ * Allocate and attach a security structure to the sb->s_security field.
+ * The s_security field is initialized to NULL when the structure is
+ * allocated.
+ * @sb contains the super_block structure to be modified.
+ * Return 0 if operation was successful.
+ * @sb_free_security:
+ * Deallocate and clear the sb->s_security field.
+ * @sb contains the super_block structure to be modified.
+ * @sb_statfs:
+ * Check permission before obtaining filesystem statistics for the @mnt
+ * mountpoint.
+ * @dentry is a handle on the superblock for the filesystem.
+ * Return 0 if permission is granted.
+ * @sb_mount:
+ * Check permission before an object specified by @dev_name is mounted on
+ * the mount point named by @nd. For an ordinary mount, @dev_name
+ * identifies a device if the file system type requires a device. For a
+ * remount (@flags & MS_REMOUNT), @dev_name is irrelevant. For a
+ * loopback/bind mount (@flags & MS_BIND), @dev_name identifies the
+ * pathname of the object being mounted.
+ * @dev_name contains the name for object being mounted.
+ * @path contains the path for mount point object.
+ * @type contains the filesystem type.
+ * @flags contains the mount flags.
+ * @data contains the filesystem-specific data.
+ * Return 0 if permission is granted.
+ * @sb_copy_data:
+ * Allow mount option data to be copied prior to parsing by the filesystem,
+ * so that the security module can extract security-specific mount
+ * options cleanly (a filesystem may modify the data e.g. with strsep()).
+ * This also allows the original mount data to be stripped of security-
+ * specific options to avoid having to make filesystems aware of them.
+ * @type the type of filesystem being mounted.
+ * @orig the original mount data copied from userspace.
+ * @copy copied data which will be passed to the security module.
+ * Returns 0 if the copy was successful.
+ * @sb_remount:
+ * Extracts security system specific mount options and verifies no changes
+ * are being made to those options.
+ * @sb superblock being remounted
+ * @data contains the filesystem-specific data.
+ * Return 0 if permission is granted.
+ * @sb_umount:
+ * Check permission before the @mnt file system is unmounted.
+ * @mnt contains the mounted file system.
+ * @flags contains the unmount flags, e.g. MNT_FORCE.
+ * Return 0 if permission is granted.
+ * @sb_pivotroot:
+ * Check permission before pivoting the root filesystem.
+ * @old_path contains the path for the new location of the current root (put_old).
+ * @new_path contains the path for the new root (new_root).
+ * Return 0 if permission is granted.
+ * @sb_set_mnt_opts:
+ * Set the security relevant mount options used for a superblock
+ * @sb the superblock to set security mount options for
+ * @opts binary data structure containing all lsm mount data
+ * @sb_clone_mnt_opts:
+ * Copy all security options from a given superblock to another
+ * @oldsb old superblock which contain information to clone
+ * @newsb new superblock which needs filled in
+ * @sb_parse_opts_str:
+ * Parse a string of security data filling in the opts structure
+ * @options string containing all mount options known by the LSM
+ * @opts binary data structure usable by the LSM
+ * @dentry_init_security:
+ * Compute a context for a dentry as the inode is not yet available
+ * since NFSv4 has no label backed by an EA anyway.
+ * @dentry dentry to use in calculating the context.
+ * @mode mode used to determine resource type.
+ * @name name of the last path component used to create file
+ * @ctx pointer to place the pointer to the resulting context in.
+ * @ctxlen point to place the length of the resulting context.
+ *
+ *
+ * Security hooks for inode operations.
+ *
+ * @inode_alloc_security:
+ * Allocate and attach a security structure to @inode->i_security. The
+ * i_security field is initialized to NULL when the inode structure is
+ * allocated.
+ * @inode contains the inode structure.
+ * Return 0 if operation was successful.
+ * @inode_free_security:
+ * @inode contains the inode structure.
+ * Deallocate the inode security structure and set @inode->i_security to
+ * NULL.
+ * @inode_init_security:
+ * Obtain the security attribute name suffix and value to set on a newly
+ * created inode and set up the incore security field for the new inode.
+ * This hook is called by the fs code as part of the inode creation
+ * transaction and provides for atomic labeling of the inode, unlike
+ * the post_create/mkdir/... hooks called by the VFS. The hook function
+ * is expected to allocate the name and value via kmalloc, with the caller
+ * being responsible for calling kfree after using them.
+ * If the security module does not use security attributes or does
+ * not wish to put a security attribute on this particular inode,
+ * then it should return -EOPNOTSUPP to skip this processing.
+ * @inode contains the inode structure of the newly created inode.
+ * @dir contains the inode structure of the parent directory.
+ * @qstr contains the last path component of the new object
+ * @name will be set to the allocated name suffix (e.g. selinux).
+ * @value will be set to the allocated attribute value.
+ * @len will be set to the length of the value.
+ * Returns 0 if @name and @value have been successfully set,
+ * -EOPNOTSUPP if no security attribute is needed, or
+ * -ENOMEM on memory allocation failure.
+ * @inode_create:
+ * Check permission to create a regular file.
+ * @dir contains inode structure of the parent of the new file.
+ * @dentry contains the dentry structure for the file to be created.
+ * @mode contains the file mode of the file to be created.
+ * Return 0 if permission is granted.
+ * @inode_link:
+ * Check permission before creating a new hard link to a file.
+ * @old_dentry contains the dentry structure for an existing link to the file.
+ * @dir contains the inode structure of the parent directory of the new link.
+ * @new_dentry contains the dentry structure for the new link.
+ * Return 0 if permission is granted.
+ * @path_link:
+ * Check permission before creating a new hard link to a file.
+ * @old_dentry contains the dentry structure for an existing link
+ * to the file.
+ * @new_dir contains the path structure of the parent directory of
+ * the new link.
+ * @new_dentry contains the dentry structure for the new link.
+ * Return 0 if permission is granted.
+ * @inode_unlink:
+ * Check the permission to remove a hard link to a file.
+ * @dir contains the inode structure of parent directory of the file.
+ * @dentry contains the dentry structure for file to be unlinked.
+ * Return 0 if permission is granted.
+ * @path_unlink:
+ * Check the permission to remove a hard link to a file.
+ * @dir contains the path structure of parent directory of the file.
+ * @dentry contains the dentry structure for file to be unlinked.
+ * Return 0 if permission is granted.
+ * @inode_symlink:
+ * Check the permission to create a symbolic link to a file.
+ * @dir contains the inode structure of parent directory of the symbolic link.
+ * @dentry contains the dentry structure of the symbolic link.
+ * @old_name contains the pathname of file.
+ * Return 0 if permission is granted.
+ * @path_symlink:
+ * Check the permission to create a symbolic link to a file.
+ * @dir contains the path structure of parent directory of
+ * the symbolic link.
+ * @dentry contains the dentry structure of the symbolic link.
+ * @old_name contains the pathname of file.
+ * Return 0 if permission is granted.
+ * @inode_mkdir:
+ * Check permissions to create a new directory in the existing directory
+ * associated with inode structure @dir.
+ * @dir contains the inode structure of parent of the directory to be created.
+ * @dentry contains the dentry structure of new directory.
+ * @mode contains the mode of new directory.
+ * Return 0 if permission is granted.
+ * @path_mkdir:
+ * Check permissions to create a new directory in the existing directory
+ * associated with path structure @path.
+ * @dir contains the path structure of parent of the directory
+ * to be created.
+ * @dentry contains the dentry structure of new directory.
+ * @mode contains the mode of new directory.
+ * Return 0 if permission is granted.
+ * @inode_rmdir:
+ * Check the permission to remove a directory.
+ * @dir contains the inode structure of parent of the directory to be removed.
+ * @dentry contains the dentry structure of directory to be removed.
+ * Return 0 if permission is granted.
+ * @path_rmdir:
+ * Check the permission to remove a directory.
+ * @dir contains the path structure of parent of the directory to be
+ * removed.
+ * @dentry contains the dentry structure of directory to be removed.
+ * Return 0 if permission is granted.
+ * @inode_mknod:
+ * Check permissions when creating a special file (or a socket or a fifo
+ * file created via the mknod system call). Note that if mknod operation
+ * is being done for a regular file, then the create hook will be called
+ * and not this hook.
+ * @dir contains the inode structure of parent of the new file.
+ * @dentry contains the dentry structure of the new file.
+ * @mode contains the mode of the new file.
+ * @dev contains the device number.
+ * Return 0 if permission is granted.
+ * @path_mknod:
+ * Check permissions when creating a file. Note that this hook is called
+ * even if mknod operation is being done for a regular file.
+ * @dir contains the path structure of parent of the new file.
+ * @dentry contains the dentry structure of the new file.
+ * @mode contains the mode of the new file.
+ * @dev contains the undecoded device number. Use new_decode_dev() to get
+ * the decoded device number.
+ * Return 0 if permission is granted.
+ * @inode_rename:
+ * Check for permission to rename a file or directory.
+ * @old_dir contains the inode structure for parent of the old link.
+ * @old_dentry contains the dentry structure of the old link.
+ * @new_dir contains the inode structure for parent of the new link.
+ * @new_dentry contains the dentry structure of the new link.
+ * Return 0 if permission is granted.
+ * @path_rename:
+ * Check for permission to rename a file or directory.
+ * @old_dir contains the path structure for parent of the old link.
+ * @old_dentry contains the dentry structure of the old link.
+ * @new_dir contains the path structure for parent of the new link.
+ * @new_dentry contains the dentry structure of the new link.
+ * Return 0 if permission is granted.
+ * @path_chmod:
+ * Check for permission to change DAC's permission of a file or directory.
+ * @dentry contains the dentry structure.
+ * @mnt contains the vfsmnt structure.
+ * @mode contains DAC's mode.
+ * Return 0 if permission is granted.
+ * @path_chown:
+ * Check for permission to change owner/group of a file or directory.
+ * @path contains the path structure.
+ * @uid contains new owner's ID.
+ * @gid contains new group's ID.
+ * Return 0 if permission is granted.
+ * @path_chroot:
+ * Check for permission to change root directory.
+ * @path contains the path structure.
+ * Return 0 if permission is granted.
+ * @inode_readlink:
+ * Check the permission to read the symbolic link.
+ * @dentry contains the dentry structure for the file link.
+ * Return 0 if permission is granted.
+ * @inode_follow_link:
+ * Check permission to follow a symbolic link when looking up a pathname.
+ * @dentry contains the dentry structure for the link.
+ * @nd contains the nameidata structure for the parent directory.
+ * Return 0 if permission is granted.
+ * @inode_permission:
+ * Check permission before accessing an inode. This hook is called by the
+ * existing Linux permission function, so a security module can use it to
+ * provide additional checking for existing Linux permission checks.
+ * Notice that this hook is called when a file is opened (as well as many
+ * other operations), whereas the file_security_ops permission hook is
+ * called when the actual read/write operations are performed.
+ * @inode contains the inode structure to check.
+ * @mask contains the permission mask.
+ * Return 0 if permission is granted.
+ * @inode_setattr:
+ * Check permission before setting file attributes. Note that the kernel
+ * call to notify_change is performed from several locations, whenever
+ * file attributes change (such as when a file is truncated, chown/chmod
+ * operations, transferring disk quotas, etc).
+ * @dentry contains the dentry structure for the file.
+ * @attr is the iattr structure containing the new file attributes.
+ * Return 0 if permission is granted.
+ * @path_truncate:
+ * Check permission before truncating a file.
+ * @path contains the path structure for the file.
+ * Return 0 if permission is granted.
+ * @inode_getattr:
+ * Check permission before obtaining file attributes.
+ * @mnt is the vfsmount where the dentry was looked up
+ * @dentry contains the dentry structure for the file.
+ * Return 0 if permission is granted.
+ * @inode_setxattr:
+ * Check permission before setting the extended attributes
+ * @value identified by @name for @dentry.
+ * Return 0 if permission is granted.
+ * @inode_post_setxattr:
+ * Update inode security field after successful setxattr operation.
+ * @value identified by @name for @dentry.
+ * @inode_getxattr:
+ * Check permission before obtaining the extended attributes
+ * identified by @name for @dentry.
+ * Return 0 if permission is granted.
+ * @inode_listxattr:
+ * Check permission before obtaining the list of extended attribute
+ * names for @dentry.
+ * Return 0 if permission is granted.
+ * @inode_removexattr:
+ * Check permission before removing the extended attribute
+ * identified by @name for @dentry.
+ * Return 0 if permission is granted.
+ * @inode_getsecurity:
+ * Retrieve a copy of the extended attribute representation of the
+ * security label associated with @name for @inode via @buffer. Note that
+ * @name is the remainder of the attribute name after the security prefix
+ * has been removed. @alloc is used to specify of the call should return a
+ * value via the buffer or just the value length Return size of buffer on
+ * success.
+ * @inode_setsecurity:
+ * Set the security label associated with @name for @inode from the
+ * extended attribute value @value. @size indicates the size of the
+ * @value in bytes. @flags may be XATTR_CREATE, XATTR_REPLACE, or 0.
+ * Note that @name is the remainder of the attribute name after the
+ * security. prefix has been removed.
+ * Return 0 on success.
+ * @inode_listsecurity:
+ * Copy the extended attribute names for the security labels
+ * associated with @inode into @buffer. The maximum size of @buffer
+ * is specified by @buffer_size. @buffer may be NULL to request
+ * the size of the buffer required.
+ * Returns number of bytes used/required on success.
+ * @inode_need_killpriv:
+ * Called when an inode has been changed.
+ * @dentry is the dentry being changed.
+ * Return <0 on error to abort the inode change operation.
+ * Return 0 if inode_killpriv does not need to be called.
+ * Return >0 if inode_killpriv does need to be called.
+ * @inode_killpriv:
+ * The setuid bit is being removed. Remove similar security labels.
+ * Called with the dentry->d_inode->i_mutex held.
+ * @dentry is the dentry being changed.
+ * Return 0 on success. If error is returned, then the operation
+ * causing setuid bit removal is failed.
+ * @inode_getsecid:
+ * Get the secid associated with the node.
+ * @inode contains a pointer to the inode.
+ * @secid contains a pointer to the location where result will be saved.
+ * In case of failure, @secid will be set to zero.
+ *
+ * Security hooks for file operations
+ *
+ * @file_permission:
+ * Check file permissions before accessing an open file. This hook is
+ * called by various operations that read or write files. A security
+ * module can use this hook to perform additional checking on these
+ * operations, e.g. to revalidate permissions on use to support privilege
+ * bracketing or policy changes. Notice that this hook is used when the
+ * actual read/write operations are performed, whereas the
+ * inode_security_ops hook is called when a file is opened (as well as
+ * many other operations).
+ * Caveat: Although this hook can be used to revalidate permissions for
+ * various system call operations that read or write files, it does not
+ * address the revalidation of permissions for memory-mapped files.
+ * Security modules must handle this separately if they need such
+ * revalidation.
+ * @file contains the file structure being accessed.
+ * @mask contains the requested permissions.
+ * Return 0 if permission is granted.
+ * @file_alloc_security:
+ * Allocate and attach a security structure to the file->f_security field.
+ * The security field is initialized to NULL when the structure is first
+ * created.
+ * @file contains the file structure to secure.
+ * Return 0 if the hook is successful and permission is granted.
+ * @file_free_security:
+ * Deallocate and free any security structures stored in file->f_security.
+ * @file contains the file structure being modified.
+ * @file_ioctl:
+ * @file contains the file structure.
+ * @cmd contains the operation to perform.
+ * @arg contains the operational arguments.
+ * Check permission for an ioctl operation on @file. Note that @arg
+ * sometimes represents a user space pointer; in other cases, it may be a
+ * simple integer value. When @arg represents a user space pointer, it
+ * should never be used by the security module.
+ * Return 0 if permission is granted.
+ * @mmap_addr :
+ * Check permissions for a mmap operation at @addr.
+ * @addr contains virtual address that will be used for the operation.
+ * Return 0 if permission is granted.
+ * @mmap_file :
+ * Check permissions for a mmap operation. The @file may be NULL, e.g.
+ * if mapping anonymous memory.
+ * @file contains the file structure for file to map (may be NULL).
+ * @reqprot contains the protection requested by the application.
+ * @prot contains the protection that will be applied by the kernel.
+ * @flags contains the operational flags.
+ * Return 0 if permission is granted.
+ * @file_mprotect:
+ * Check permissions before changing memory access permissions.
+ * @vma contains the memory region to modify.
+ * @reqprot contains the protection requested by the application.
+ * @prot contains the protection that will be applied by the kernel.
+ * Return 0 if permission is granted.
+ * @file_lock:
+ * Check permission before performing file locking operations.
+ * Note: this hook mediates both flock and fcntl style locks.
+ * @file contains the file structure.
+ * @cmd contains the posix-translated lock operation to perform
+ * (e.g. F_RDLCK, F_WRLCK).
+ * Return 0 if permission is granted.
+ * @file_fcntl:
+ * Check permission before allowing the file operation specified by @cmd
+ * from being performed on the file @file. Note that @arg sometimes
+ * represents a user space pointer; in other cases, it may be a simple
+ * integer value. When @arg represents a user space pointer, it should
+ * never be used by the security module.
+ * @file contains the file structure.
+ * @cmd contains the operation to be performed.
+ * @arg contains the operational arguments.
+ * Return 0 if permission is granted.
+ * @file_set_fowner:
+ * Save owner security information (typically from current->security) in
+ * file->f_security for later use by the send_sigiotask hook.
+ * @file contains the file structure to update.
+ * Return 0 on success.
+ * @file_send_sigiotask:
+ * Check permission for the file owner @fown to send SIGIO or SIGURG to the
+ * process @tsk. Note that this hook is sometimes called from interrupt.
+ * Note that the fown_struct, @fown, is never outside the context of a
+ * struct file, so the file structure (and associated security information)
+ * can always be obtained:
+ * container_of(fown, struct file, f_owner)
+ * @tsk contains the structure of task receiving signal.
+ * @fown contains the file owner information.
+ * @sig is the signal that will be sent. When 0, kernel sends SIGIO.
+ * Return 0 if permission is granted.
+ * @file_receive:
+ * This hook allows security modules to control the ability of a process
+ * to receive an open file descriptor via socket IPC.
+ * @file contains the file structure being received.
+ * Return 0 if permission is granted.
+ * @file_open
+ * Save open-time permission checking state for later use upon
+ * file_permission, and recheck access if anything has changed
+ * since inode_permission.
+ *
+ * Security hooks for task operations.
+ *
+ * @task_create:
+ * Check permission before creating a child process. See the clone(2)
+ * manual page for definitions of the @clone_flags.
+ * @clone_flags contains the flags indicating what should be shared.
+ * Return 0 if permission is granted.
+ * @task_free:
+ * @task task being freed
+ * Handle release of task-related resources. (Note that this can be called
+ * from interrupt context.)
+ * @cred_alloc_blank:
+ * @cred points to the credentials.
+ * @gfp indicates the atomicity of any memory allocations.
+ * Only allocate sufficient memory and attach to @cred such that
+ * cred_transfer() will not get ENOMEM.
+ * @cred_free:
+ * @cred points to the credentials.
+ * Deallocate and clear the cred->security field in a set of credentials.
+ * @cred_prepare:
+ * @new points to the new credentials.
+ * @old points to the original credentials.
+ * @gfp indicates the atomicity of any memory allocations.
+ * Prepare a new set of credentials by copying the data from the old set.
+ * @cred_transfer:
+ * @new points to the new credentials.
+ * @old points to the original credentials.
+ * Transfer data from original creds to new creds
+ * @kernel_act_as:
+ * Set the credentials for a kernel service to act as (subjective context).
+ * @new points to the credentials to be modified.
+ * @secid specifies the security ID to be set
+ * The current task must be the one that nominated @secid.
+ * Return 0 if successful.
+ * @kernel_create_files_as:
+ * Set the file creation context in a set of credentials to be the same as
+ * the objective context of the specified inode.
+ * @new points to the credentials to be modified.
+ * @inode points to the inode to use as a reference.
+ * The current task must be the one that nominated @inode.
+ * Return 0 if successful.
+ * @kernel_fw_from_file:
+ * Load firmware from userspace (not called for built-in firmware).
+ * @file contains the file structure pointing to the file containing
+ * the firmware to load. This argument will be NULL if the firmware
+ * was loaded via the uevent-triggered blob-based interface exposed
+ * by CONFIG_FW_LOADER_USER_HELPER.
+ * @buf pointer to buffer containing firmware contents.
+ * @size length of the firmware contents.
+ * Return 0 if permission is granted.
+ * @kernel_module_request:
+ * Ability to trigger the kernel to automatically upcall to userspace for
+ * userspace to load a kernel module with the given name.
+ * @kmod_name name of the module requested by the kernel
+ * Return 0 if successful.
+ * @kernel_module_from_file:
+ * Load a kernel module from userspace.
+ * @file contains the file structure pointing to the file containing
+ * the kernel module to load. If the module is being loaded from a blob,
+ * this argument will be NULL.
+ * Return 0 if permission is granted.
+ * @task_fix_setuid:
+ * Update the module's state after setting one or more of the user
+ * identity attributes of the current process. The @flags parameter
+ * indicates which of the set*uid system calls invoked this hook. If
+ * @new is the set of credentials that will be installed. Modifications
+ * should be made to this rather than to @current->cred.
+ * @old is the set of credentials that are being replaces
+ * @flags contains one of the LSM_SETID_* values.
+ * Return 0 on success.
+ * @task_setpgid:
+ * Check permission before setting the process group identifier of the
+ * process @p to @pgid.
+ * @p contains the task_struct for process being modified.
+ * @pgid contains the new pgid.
+ * Return 0 if permission is granted.
+ * @task_getpgid:
+ * Check permission before getting the process group identifier of the
+ * process @p.
+ * @p contains the task_struct for the process.
+ * Return 0 if permission is granted.
+ * @task_getsid:
+ * Check permission before getting the session identifier of the process
+ * @p.
+ * @p contains the task_struct for the process.
+ * Return 0 if permission is granted.
+ * @task_getsecid:
+ * Retrieve the security identifier of the process @p.
+ * @p contains the task_struct for the process and place is into @secid.
+ * In case of failure, @secid will be set to zero.
+ *
+ * @task_setnice:
+ * Check permission before setting the nice value of @p to @nice.
+ * @p contains the task_struct of process.
+ * @nice contains the new nice value.
+ * Return 0 if permission is granted.
+ * @task_setioprio
+ * Check permission before setting the ioprio value of @p to @ioprio.
+ * @p contains the task_struct of process.
+ * @ioprio contains the new ioprio value
+ * Return 0 if permission is granted.
+ * @task_getioprio
+ * Check permission before getting the ioprio value of @p.
+ * @p contains the task_struct of process.
+ * Return 0 if permission is granted.
+ * @task_setrlimit:
+ * Check permission before setting the resource limits of the current
+ * process for @resource to @new_rlim. The old resource limit values can
+ * be examined by dereferencing (current->signal->rlim + resource).
+ * @resource contains the resource whose limit is being set.
+ * @new_rlim contains the new limits for @resource.
+ * Return 0 if permission is granted.
+ * @task_setscheduler:
+ * Check permission before setting scheduling policy and/or parameters of
+ * process @p based on @policy and @lp.
+ * @p contains the task_struct for process.
+ * @policy contains the scheduling policy.
+ * @lp contains the scheduling parameters.
+ * Return 0 if permission is granted.
+ * @task_getscheduler:
+ * Check permission before obtaining scheduling information for process
+ * @p.
+ * @p contains the task_struct for process.
+ * Return 0 if permission is granted.
+ * @task_movememory
+ * Check permission before moving memory owned by process @p.
+ * @p contains the task_struct for process.
+ * Return 0 if permission is granted.
+ * @task_kill:
+ * Check permission before sending signal @sig to @p. @info can be NULL,
+ * the constant 1, or a pointer to a siginfo structure. If @info is 1 or
+ * SI_FROMKERNEL(info) is true, then the signal should be viewed as coming
+ * from the kernel and should typically be permitted.
+ * SIGIO signals are handled separately by the send_sigiotask hook in
+ * file_security_ops.
+ * @p contains the task_struct for process.
+ * @info contains the signal information.
+ * @sig contains the signal value.
+ * @secid contains the sid of the process where the signal originated
+ * Return 0 if permission is granted.
+ * @task_wait:
+ * Check permission before allowing a process to reap a child process @p
+ * and collect its status information.
+ * @p contains the task_struct for process.
+ * Return 0 if permission is granted.
+ * @task_prctl:
+ * Check permission before performing a process control operation on the
+ * current process.
+ * @option contains the operation.
+ * @arg2 contains a argument.
+ * @arg3 contains a argument.
+ * @arg4 contains a argument.
+ * @arg5 contains a argument.
+ * Return -ENOSYS if no-one wanted to handle this op, any other value to
+ * cause prctl() to return immediately with that value.
+ * @task_to_inode:
+ * Set the security attributes for an inode based on an associated task's
+ * security attributes, e.g. for /proc/pid inodes.
+ * @p contains the task_struct for the task.
+ * @inode contains the inode structure for the inode.
+ *
+ * Security hooks for Netlink messaging.
+ *
+ * @netlink_send:
+ * Save security information for a netlink message so that permission
+ * checking can be performed when the message is processed. The security
+ * information can be saved using the eff_cap field of the
+ * netlink_skb_parms structure. Also may be used to provide fine
+ * grained control over message transmission.
+ * @sk associated sock of task sending the message.
+ * @skb contains the sk_buff structure for the netlink message.
+ * Return 0 if the information was successfully saved and message
+ * is allowed to be transmitted.
+ *
+ * Security hooks for Unix domain networking.
+ *
+ * @unix_stream_connect:
+ * Check permissions before establishing a Unix domain stream connection
+ * between @sock and @other.
+ * @sock contains the sock structure.
+ * @other contains the peer sock structure.
+ * @newsk contains the new sock structure.
+ * Return 0 if permission is granted.
+ * @unix_may_send:
+ * Check permissions before connecting or sending datagrams from @sock to
+ * @other.
+ * @sock contains the socket structure.
+ * @other contains the peer socket structure.
+ * Return 0 if permission is granted.
+ *
+ * The @unix_stream_connect and @unix_may_send hooks were necessary because
+ * Linux provides an alternative to the conventional file name space for Unix
+ * domain sockets. Whereas binding and connecting to sockets in the file name
+ * space is mediated by the typical file permissions (and caught by the mknod
+ * and permission hooks in inode_security_ops), binding and connecting to
+ * sockets in the abstract name space is completely unmediated. Sufficient
+ * control of Unix domain sockets in the abstract name space isn't possible
+ * using only the socket layer hooks, since we need to know the actual target
+ * socket, which is not looked up until we are inside the af_unix code.
+ *
+ * Security hooks for socket operations.
+ *
+ * @socket_create:
+ * Check permissions prior to creating a new socket.
+ * @family contains the requested protocol family.
+ * @type contains the requested communications type.
+ * @protocol contains the requested protocol.
+ * @kern set to 1 if a kernel socket.
+ * Return 0 if permission is granted.
+ * @socket_post_create:
+ * This hook allows a module to update or allocate a per-socket security
+ * structure. Note that the security field was not added directly to the
+ * socket structure, but rather, the socket security information is stored
+ * in the associated inode. Typically, the inode alloc_security hook will
+ * allocate and and attach security information to
+ * sock->inode->i_security. This hook may be used to update the
+ * sock->inode->i_security field with additional information that wasn't
+ * available when the inode was allocated.
+ * @sock contains the newly created socket structure.
+ * @family contains the requested protocol family.
+ * @type contains the requested communications type.
+ * @protocol contains the requested protocol.
+ * @kern set to 1 if a kernel socket.
+ * @socket_bind:
+ * Check permission before socket protocol layer bind operation is
+ * performed and the socket @sock is bound to the address specified in the
+ * @address parameter.
+ * @sock contains the socket structure.
+ * @address contains the address to bind to.
+ * @addrlen contains the length of address.
+ * Return 0 if permission is granted.
+ * @socket_connect:
+ * Check permission before socket protocol layer connect operation
+ * attempts to connect socket @sock to a remote address, @address.
+ * @sock contains the socket structure.
+ * @address contains the address of remote endpoint.
+ * @addrlen contains the length of address.
+ * Return 0 if permission is granted.
+ * @socket_listen:
+ * Check permission before socket protocol layer listen operation.
+ * @sock contains the socket structure.
+ * @backlog contains the maximum length for the pending connection queue.
+ * Return 0 if permission is granted.
+ * @socket_accept:
+ * Check permission before accepting a new connection. Note that the new
+ * socket, @newsock, has been created and some information copied to it,
+ * but the accept operation has not actually been performed.
+ * @sock contains the listening socket structure.
+ * @newsock contains the newly created server socket for connection.
+ * Return 0 if permission is granted.
+ * @socket_sendmsg:
+ * Check permission before transmitting a message to another socket.
+ * @sock contains the socket structure.
+ * @msg contains the message to be transmitted.
+ * @size contains the size of message.
+ * Return 0 if permission is granted.
+ * @socket_recvmsg:
+ * Check permission before receiving a message from a socket.
+ * @sock contains the socket structure.
+ * @msg contains the message structure.
+ * @size contains the size of message structure.
+ * @flags contains the operational flags.
+ * Return 0 if permission is granted.
+ * @socket_getsockname:
+ * Check permission before the local address (name) of the socket object
+ * @sock is retrieved.
+ * @sock contains the socket structure.
+ * Return 0 if permission is granted.
+ * @socket_getpeername:
+ * Check permission before the remote address (name) of a socket object
+ * @sock is retrieved.
+ * @sock contains the socket structure.
+ * Return 0 if permission is granted.
+ * @socket_getsockopt:
+ * Check permissions before retrieving the options associated with socket
+ * @sock.
+ * @sock contains the socket structure.
+ * @level contains the protocol level to retrieve option from.
+ * @optname contains the name of option to retrieve.
+ * Return 0 if permission is granted.
+ * @socket_setsockopt:
+ * Check permissions before setting the options associated with socket
+ * @sock.
+ * @sock contains the socket structure.
+ * @level contains the protocol level to set options for.
+ * @optname contains the name of the option to set.
+ * Return 0 if permission is granted.
+ * @socket_shutdown:
+ * Checks permission before all or part of a connection on the socket
+ * @sock is shut down.
+ * @sock contains the socket structure.
+ * @how contains the flag indicating how future sends and receives are handled.
+ * Return 0 if permission is granted.
+ * @socket_sock_rcv_skb:
+ * Check permissions on incoming network packets. This hook is distinct
+ * from Netfilter's IP input hooks since it is the first time that the
+ * incoming sk_buff @skb has been associated with a particular socket, @sk.
+ * Must not sleep inside this hook because some callers hold spinlocks.
+ * @sk contains the sock (not socket) associated with the incoming sk_buff.
+ * @skb contains the incoming network data.
+ * @socket_getpeersec_stream:
+ * This hook allows the security module to provide peer socket security
+ * state for unix or connected tcp sockets to userspace via getsockopt
+ * SO_GETPEERSEC. For tcp sockets this can be meaningful if the
+ * socket is associated with an ipsec SA.
+ * @sock is the local socket.
+ * @optval userspace memory where the security state is to be copied.
+ * @optlen userspace int where the module should copy the actual length
+ * of the security state.
+ * @len as input is the maximum length to copy to userspace provided
+ * by the caller.
+ * Return 0 if all is well, otherwise, typical getsockopt return
+ * values.
+ * @socket_getpeersec_dgram:
+ * This hook allows the security module to provide peer socket security
+ * state for udp sockets on a per-packet basis to userspace via
+ * getsockopt SO_GETPEERSEC. The application must first have indicated
+ * the IP_PASSSEC option via getsockopt. It can then retrieve the
+ * security state returned by this hook for a packet via the SCM_SECURITY
+ * ancillary message type.
+ * @skb is the skbuff for the packet being queried
+ * @secdata is a pointer to a buffer in which to copy the security data
+ * @seclen is the maximum length for @secdata
+ * Return 0 on success, error on failure.
+ * @sk_alloc_security:
+ * Allocate and attach a security structure to the sk->sk_security field,
+ * which is used to copy security attributes between local stream sockets.
+ * @sk_free_security:
+ * Deallocate security structure.
+ * @sk_clone_security:
+ * Clone/copy security structure.
+ * @sk_getsecid:
+ * Retrieve the LSM-specific secid for the sock to enable caching of network
+ * authorizations.
+ * @sock_graft:
+ * Sets the socket's isec sid to the sock's sid.
+ * @inet_conn_request:
+ * Sets the openreq's sid to socket's sid with MLS portion taken from peer sid.
+ * @inet_csk_clone:
+ * Sets the new child socket's sid to the openreq sid.
+ * @inet_conn_established:
+ * Sets the connection's peersid to the secmark on skb.
+ * @secmark_relabel_packet:
+ * check if the process should be allowed to relabel packets to the given secid
+ * @security_secmark_refcount_inc
+ * tells the LSM to increment the number of secmark labeling rules loaded
+ * @security_secmark_refcount_dec
+ * tells the LSM to decrement the number of secmark labeling rules loaded
+ * @req_classify_flow:
+ * Sets the flow's sid to the openreq sid.
+ * @tun_dev_alloc_security:
+ * This hook allows a module to allocate a security structure for a TUN
+ * device.
+ * @security pointer to a security structure pointer.
+ * Returns a zero on success, negative values on failure.
+ * @tun_dev_free_security:
+ * This hook allows a module to free the security structure for a TUN
+ * device.
+ * @security pointer to the TUN device's security structure
+ * @tun_dev_create:
+ * Check permissions prior to creating a new TUN device.
+ * @tun_dev_attach_queue:
+ * Check permissions prior to attaching to a TUN device queue.
+ * @security pointer to the TUN device's security structure.
+ * @tun_dev_attach:
+ * This hook can be used by the module to update any security state
+ * associated with the TUN device's sock structure.
+ * @sk contains the existing sock structure.
+ * @security pointer to the TUN device's security structure.
+ * @tun_dev_open:
+ * This hook can be used by the module to update any security state
+ * associated with the TUN device's security structure.
+ * @security pointer to the TUN devices's security structure.
+ * @skb_owned_by:
+ * This hook sets the packet's owning sock.
+ * @skb is the packet.
+ * @sk the sock which owns the packet.
+ *
+ * Security hooks for XFRM operations.
+ *
+ * @xfrm_policy_alloc_security:
+ * @ctxp is a pointer to the xfrm_sec_ctx being added to Security Policy
+ * Database used by the XFRM system.
+ * @sec_ctx contains the security context information being provided by
+ * the user-level policy update program (e.g., setkey).
+ * Allocate a security structure to the xp->security field; the security
+ * field is initialized to NULL when the xfrm_policy is allocated.
+ * Return 0 if operation was successful (memory to allocate, legal context)
+ * @gfp is to specify the context for the allocation
+ * @xfrm_policy_clone_security:
+ * @old_ctx contains an existing xfrm_sec_ctx.
+ * @new_ctxp contains a new xfrm_sec_ctx being cloned from old.
+ * Allocate a security structure in new_ctxp that contains the
+ * information from the old_ctx structure.
+ * Return 0 if operation was successful (memory to allocate).
+ * @xfrm_policy_free_security:
+ * @ctx contains the xfrm_sec_ctx
+ * Deallocate xp->security.
+ * @xfrm_policy_delete_security:
+ * @ctx contains the xfrm_sec_ctx.
+ * Authorize deletion of xp->security.
+ * @xfrm_state_alloc:
+ * @x contains the xfrm_state being added to the Security Association
+ * Database by the XFRM system.
+ * @sec_ctx contains the security context information being provided by
+ * the user-level SA generation program (e.g., setkey or racoon).
+ * Allocate a security structure to the x->security field; the security
+ * field is initialized to NULL when the xfrm_state is allocated. Set the
+ * context to correspond to sec_ctx. Return 0 if operation was successful
+ * (memory to allocate, legal context).
+ * @xfrm_state_alloc_acquire:
+ * @x contains the xfrm_state being added to the Security Association
+ * Database by the XFRM system.
+ * @polsec contains the policy's security context.
+ * @secid contains the secid from which to take the mls portion of the
+ * context.
+ * Allocate a security structure to the x->security field; the security
+ * field is initialized to NULL when the xfrm_state is allocated. Set the
+ * context to correspond to secid. Return 0 if operation was successful
+ * (memory to allocate, legal context).
+ * @xfrm_state_free_security:
+ * @x contains the xfrm_state.
+ * Deallocate x->security.
+ * @xfrm_state_delete_security:
+ * @x contains the xfrm_state.
+ * Authorize deletion of x->security.
+ * @xfrm_policy_lookup:
+ * @ctx contains the xfrm_sec_ctx for which the access control is being
+ * checked.
+ * @fl_secid contains the flow security label that is used to authorize
+ * access to the policy xp.
+ * @dir contains the direction of the flow (input or output).
+ * Check permission when a flow selects a xfrm_policy for processing
+ * XFRMs on a packet. The hook is called when selecting either a
+ * per-socket policy or a generic xfrm policy.
+ * Return 0 if permission is granted, -ESRCH otherwise, or -errno
+ * on other errors.
+ * @xfrm_state_pol_flow_match:
+ * @x contains the state to match.
+ * @xp contains the policy to check for a match.
+ * @fl contains the flow to check for a match.
+ * Return 1 if there is a match.
+ * @xfrm_decode_session:
+ * @skb points to skb to decode.
+ * @secid points to the flow key secid to set.
+ * @ckall says if all xfrms used should be checked for same secid.
+ * Return 0 if ckall is zero or all xfrms used have the same secid.
+ *
+ * Security hooks affecting all Key Management operations
+ *
+ * @key_alloc:
+ * Permit allocation of a key and assign security data. Note that key does
+ * not have a serial number assigned at this point.
+ * @key points to the key.
+ * @flags is the allocation flags
+ * Return 0 if permission is granted, -ve error otherwise.
+ * @key_free:
+ * Notification of destruction; free security data.
+ * @key points to the key.
+ * No return value.
+ * @key_permission:
+ * See whether a specific operational right is granted to a process on a
+ * key.
+ * @key_ref refers to the key (key pointer + possession attribute bit).
+ * @cred points to the credentials to provide the context against which to
+ * evaluate the security data on the key.
+ * @perm describes the combination of permissions required of this key.
+ * Return 0 if permission is granted, -ve error otherwise.
+ * @key_getsecurity:
+ * Get a textual representation of the security context attached to a key
+ * for the purposes of honouring KEYCTL_GETSECURITY. This function
+ * allocates the storage for the NUL-terminated string and the caller
+ * should free it.
+ * @key points to the key to be queried.
+ * @_buffer points to a pointer that should be set to point to the
+ * resulting string (if no label or an error occurs).
+ * Return the length of the string (including terminating NUL) or -ve if
+ * an error.
+ * May also return 0 (and a NULL buffer pointer) if there is no label.
+ *
+ * Security hooks affecting all System V IPC operations.
+ *
+ * @ipc_permission:
+ * Check permissions for access to IPC
+ * @ipcp contains the kernel IPC permission structure
+ * @flag contains the desired (requested) permission set
+ * Return 0 if permission is granted.
+ * @ipc_getsecid:
+ * Get the secid associated with the ipc object.
+ * @ipcp contains the kernel IPC permission structure.
+ * @secid contains a pointer to the location where result will be saved.
+ * In case of failure, @secid will be set to zero.
+ *
+ * Security hooks for individual messages held in System V IPC message queues
+ * @msg_msg_alloc_security:
+ * Allocate and attach a security structure to the msg->security field.
+ * The security field is initialized to NULL when the structure is first
+ * created.
+ * @msg contains the message structure to be modified.
+ * Return 0 if operation was successful and permission is granted.
+ * @msg_msg_free_security:
+ * Deallocate the security structure for this message.
+ * @msg contains the message structure to be modified.
+ *
+ * Security hooks for System V IPC Message Queues
+ *
+ * @msg_queue_alloc_security:
+ * Allocate and attach a security structure to the
+ * msq->q_perm.security field. The security field is initialized to
+ * NULL when the structure is first created.
+ * @msq contains the message queue structure to be modified.
+ * Return 0 if operation was successful and permission is granted.
+ * @msg_queue_free_security:
+ * Deallocate security structure for this message queue.
+ * @msq contains the message queue structure to be modified.
+ * @msg_queue_associate:
+ * Check permission when a message queue is requested through the
+ * msgget system call. This hook is only called when returning the
+ * message queue identifier for an existing message queue, not when a
+ * new message queue is created.
+ * @msq contains the message queue to act upon.
+ * @msqflg contains the operation control flags.
+ * Return 0 if permission is granted.
+ * @msg_queue_msgctl:
+ * Check permission when a message control operation specified by @cmd
+ * is to be performed on the message queue @msq.
+ * The @msq may be NULL, e.g. for IPC_INFO or MSG_INFO.
+ * @msq contains the message queue to act upon. May be NULL.
+ * @cmd contains the operation to be performed.
+ * Return 0 if permission is granted.
+ * @msg_queue_msgsnd:
+ * Check permission before a message, @msg, is enqueued on the message
+ * queue, @msq.
+ * @msq contains the message queue to send message to.
+ * @msg contains the message to be enqueued.
+ * @msqflg contains operational flags.
+ * Return 0 if permission is granted.
+ * @msg_queue_msgrcv:
+ * Check permission before a message, @msg, is removed from the message
+ * queue, @msq. The @target task structure contains a pointer to the
+ * process that will be receiving the message (not equal to the current
+ * process when inline receives are being performed).
+ * @msq contains the message queue to retrieve message from.
+ * @msg contains the message destination.
+ * @target contains the task structure for recipient process.
+ * @type contains the type of message requested.
+ * @mode contains the operational flags.
+ * Return 0 if permission is granted.
+ *
+ * Security hooks for System V Shared Memory Segments
+ *
+ * @shm_alloc_security:
+ * Allocate and attach a security structure to the shp->shm_perm.security
+ * field. The security field is initialized to NULL when the structure is
+ * first created.
+ * @shp contains the shared memory structure to be modified.
+ * Return 0 if operation was successful and permission is granted.
+ * @shm_free_security:
+ * Deallocate the security struct for this memory segment.
+ * @shp contains the shared memory structure to be modified.
+ * @shm_associate:
+ * Check permission when a shared memory region is requested through the
+ * shmget system call. This hook is only called when returning the shared
+ * memory region identifier for an existing region, not when a new shared
+ * memory region is created.
+ * @shp contains the shared memory structure to be modified.
+ * @shmflg contains the operation control flags.
+ * Return 0 if permission is granted.
+ * @shm_shmctl:
+ * Check permission when a shared memory control operation specified by
+ * @cmd is to be performed on the shared memory region @shp.
+ * The @shp may be NULL, e.g. for IPC_INFO or SHM_INFO.
+ * @shp contains shared memory structure to be modified.
+ * @cmd contains the operation to be performed.
+ * Return 0 if permission is granted.
+ * @shm_shmat:
+ * Check permissions prior to allowing the shmat system call to attach the
+ * shared memory segment @shp to the data segment of the calling process.
+ * The attaching address is specified by @shmaddr.
+ * @shp contains the shared memory structure to be modified.
+ * @shmaddr contains the address to attach memory region to.
+ * @shmflg contains the operational flags.
+ * Return 0 if permission is granted.
+ *
+ * Security hooks for System V Semaphores
+ *
+ * @sem_alloc_security:
+ * Allocate and attach a security structure to the sma->sem_perm.security
+ * field. The security field is initialized to NULL when the structure is
+ * first created.
+ * @sma contains the semaphore structure
+ * Return 0 if operation was successful and permission is granted.
+ * @sem_free_security:
+ * deallocate security struct for this semaphore
+ * @sma contains the semaphore structure.
+ * @sem_associate:
+ * Check permission when a semaphore is requested through the semget
+ * system call. This hook is only called when returning the semaphore
+ * identifier for an existing semaphore, not when a new one must be
+ * created.
+ * @sma contains the semaphore structure.
+ * @semflg contains the operation control flags.
+ * Return 0 if permission is granted.
+ * @sem_semctl:
+ * Check permission when a semaphore operation specified by @cmd is to be
+ * performed on the semaphore @sma. The @sma may be NULL, e.g. for
+ * IPC_INFO or SEM_INFO.
+ * @sma contains the semaphore structure. May be NULL.
+ * @cmd contains the operation to be performed.
+ * Return 0 if permission is granted.
+ * @sem_semop
+ * Check permissions before performing operations on members of the
+ * semaphore set @sma. If the @alter flag is nonzero, the semaphore set
+ * may be modified.
+ * @sma contains the semaphore structure.
+ * @sops contains the operations to perform.
+ * @nsops contains the number of operations to perform.
+ * @alter contains the flag indicating whether changes are to be made.
+ * Return 0 if permission is granted.
+ *
+ * @binder_set_context_mgr
+ * Check whether @mgr is allowed to be the binder context manager.
+ * @mgr contains the task_struct for the task being registered.
+ * Return 0 if permission is granted.
+ * @binder_transaction
+ * Check whether @from is allowed to invoke a binder transaction call
+ * to @to.
+ * @from contains the task_struct for the sending task.
+ * @to contains the task_struct for the receiving task.
+ * @binder_transfer_binder
+ * Check whether @from is allowed to transfer a binder reference to @to.
+ * @from contains the task_struct for the sending task.
+ * @to contains the task_struct for the receiving task.
+ * @binder_transfer_file
+ * Check whether @from is allowed to transfer @file to @to.
+ * @from contains the task_struct for the sending task.
+ * @file contains the struct file being transferred.
+ * @to contains the task_struct for the receiving task.
+ *
+ * @ptrace_access_check:
+ * Check permission before allowing the current process to trace the
+ * @child process.
+ * Security modules may also want to perform a process tracing check
+ * during an execve in the set_security or apply_creds hooks of
+ * tracing check during an execve in the bprm_set_creds hook of
+ * binprm_security_ops if the process is being traced and its security
+ * attributes would be changed by the execve.
+ * @child contains the task_struct structure for the target process.
+ * @mode contains the PTRACE_MODE flags indicating the form of access.
+ * Return 0 if permission is granted.
+ * @ptrace_traceme:
+ * Check that the @parent process has sufficient permission to trace the
+ * current process before allowing the current process to present itself
+ * to the @parent process for tracing.
+ * @parent contains the task_struct structure for debugger process.
+ * Return 0 if permission is granted.
+ * @capget:
+ * Get the @effective, @inheritable, and @permitted capability sets for
+ * the @target process. The hook may also perform permission checking to
+ * determine if the current process is allowed to see the capability sets
+ * of the @target process.
+ * @target contains the task_struct structure for target process.
+ * @effective contains the effective capability set.
+ * @inheritable contains the inheritable capability set.
+ * @permitted contains the permitted capability set.
+ * Return 0 if the capability sets were successfully obtained.
+ * @capset:
+ * Set the @effective, @inheritable, and @permitted capability sets for
+ * the current process.
+ * @new contains the new credentials structure for target process.
+ * @old contains the current credentials structure for target process.
+ * @effective contains the effective capability set.
+ * @inheritable contains the inheritable capability set.
+ * @permitted contains the permitted capability set.
+ * Return 0 and update @new if permission is granted.
+ * @capable:
+ * Check whether the @tsk process has the @cap capability in the indicated
+ * credentials.
+ * @cred contains the credentials to use.
+ * @ns contains the user namespace we want the capability in
+ * @cap contains the capability <include/linux/capability.h>.
+ * @audit: Whether to write an audit message or not
+ * Return 0 if the capability is granted for @tsk.
+ * @syslog:
+ * Check permission before accessing the kernel message ring or changing
+ * logging to the console.
+ * See the syslog(2) manual page for an explanation of the @type values.
+ * @type contains the type of action.
+ * @from_file indicates the context of action (if it came from /proc).
+ * Return 0 if permission is granted.
+ * @settime:
+ * Check permission to change the system time.
+ * struct timespec and timezone are defined in include/linux/time.h
+ * @ts contains new time
+ * @tz contains new timezone
+ * Return 0 if permission is granted.
+ * @vm_enough_memory:
+ * Check permissions for allocating a new virtual mapping.
+ * @mm contains the mm struct it is being added to.
+ * @pages contains the number of pages.
+ * Return 0 if permission is granted.
+ *
+ * @ismaclabel:
+ * Check if the extended attribute specified by @name
+ * represents a MAC label. Returns 1 if name is a MAC
+ * attribute otherwise returns 0.
+ * @name full extended attribute name to check against
+ * LSM as a MAC label.
+ *
+ * @secid_to_secctx:
+ * Convert secid to security context. If secdata is NULL the length of
+ * the result will be returned in seclen, but no secdata will be returned.
+ * This does mean that the length could change between calls to check the
+ * length and the next call which actually allocates and returns the secdata.
+ * @secid contains the security ID.
+ * @secdata contains the pointer that stores the converted security context.
+ * @seclen pointer which contains the length of the data
+ * @secctx_to_secid:
+ * Convert security context to secid.
+ * @secid contains the pointer to the generated security ID.
+ * @secdata contains the security context.
+ *
+ * @release_secctx:
+ * Release the security context.
+ * @secdata contains the security context.
+ * @seclen contains the length of the security context.
+ *
+ * Security hooks for Audit
+ *
+ * @audit_rule_init:
+ * Allocate and initialize an LSM audit rule structure.
+ * @field contains the required Audit action. Fields flags are defined in include/linux/audit.h
+ * @op contains the operator the rule uses.
+ * @rulestr contains the context where the rule will be applied to.
+ * @lsmrule contains a pointer to receive the result.
+ * Return 0 if @lsmrule has been successfully set,
+ * -EINVAL in case of an invalid rule.
+ *
+ * @audit_rule_known:
+ * Specifies whether given @rule contains any fields related to current LSM.
+ * @rule contains the audit rule of interest.
+ * Return 1 in case of relation found, 0 otherwise.
+ *
+ * @audit_rule_match:
+ * Determine if given @secid matches a rule previously approved
+ * by @audit_rule_known.
+ * @secid contains the security id in question.
+ * @field contains the field which relates to current LSM.
+ * @op contains the operator that will be used for matching.
+ * @rule points to the audit rule that will be checked against.
+ * @actx points to the audit context associated with the check.
+ * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure.
+ *
+ * @audit_rule_free:
+ * Deallocate the LSM audit rule structure previously allocated by
+ * audit_rule_init.
+ * @rule contains the allocated rule
+ *
+ * @inode_notifysecctx:
+ * Notify the security module of what the security context of an inode
+ * should be. Initializes the incore security context managed by the
+ * security module for this inode. Example usage: NFS client invokes
+ * this hook to initialize the security context in its incore inode to the
+ * value provided by the server for the file when the server returned the
+ * file's attributes to the client.
+ *
+ * Must be called with inode->i_mutex locked.
+ *
+ * @inode we wish to set the security context of.
+ * @ctx contains the string which we wish to set in the inode.
+ * @ctxlen contains the length of @ctx.
+ *
+ * @inode_setsecctx:
+ * Change the security context of an inode. Updates the
+ * incore security context managed by the security module and invokes the
+ * fs code as needed (via __vfs_setxattr_noperm) to update any backing
+ * xattrs that represent the context. Example usage: NFS server invokes
+ * this hook to change the security context in its incore inode and on the
+ * backing filesystem to a value provided by the client on a SETATTR
+ * operation.
+ *
+ * Must be called with inode->i_mutex locked.
+ *
+ * @dentry contains the inode we wish to set the security context of.
+ * @ctx contains the string which we wish to set in the inode.
+ * @ctxlen contains the length of @ctx.
+ *
+ * @inode_getsecctx:
+ * On success, returns 0 and fills out @ctx and @ctxlen with the security
+ * context for the given @inode.
+ *
+ * @inode we wish to get the security context of.
+ * @ctx is a pointer in which to place the allocated security context.
+ * @ctxlen points to the place to put the length of @ctx.
+ * This is the main security structure.
+ */
+struct security_operations {
+ char name[SECURITY_NAME_MAX + 1];
+
+ int (*binder_set_context_mgr) (struct task_struct *mgr);
+ int (*binder_transaction) (struct task_struct *from,
+ struct task_struct *to);
+ int (*binder_transfer_binder) (struct task_struct *from,
+ struct task_struct *to);
+ int (*binder_transfer_file) (struct task_struct *from,
+ struct task_struct *to, struct file *file);
+
+ int (*ptrace_access_check) (struct task_struct *child, unsigned int mode);
+ int (*ptrace_traceme) (struct task_struct *parent);
+ int (*capget) (struct task_struct *target,
+ kernel_cap_t *effective,
+ kernel_cap_t *inheritable, kernel_cap_t *permitted);
+ int (*capset) (struct cred *new,
+ const struct cred *old,
+ const kernel_cap_t *effective,
+ const kernel_cap_t *inheritable,
+ const kernel_cap_t *permitted);
+ int (*capable) (const struct cred *cred, struct user_namespace *ns,
+ int cap, int audit);
+ int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
+ int (*quota_on) (struct dentry *dentry);
+ int (*syslog) (int type);
+ int (*settime) (const struct timespec *ts, const struct timezone *tz);
+ int (*vm_enough_memory) (struct mm_struct *mm, long pages);
+
+ int (*bprm_set_creds) (struct linux_binprm *bprm);
+ int (*bprm_check_security) (struct linux_binprm *bprm);
+ int (*bprm_secureexec) (struct linux_binprm *bprm);
+ void (*bprm_committing_creds) (struct linux_binprm *bprm);
+ void (*bprm_committed_creds) (struct linux_binprm *bprm);
+
+ int (*sb_alloc_security) (struct super_block *sb);
+ void (*sb_free_security) (struct super_block *sb);
+ int (*sb_copy_data) (char *orig, char *copy);
+ int (*sb_remount) (struct super_block *sb, void *data);
+ int (*sb_kern_mount) (struct super_block *sb, int flags, void *data);
+ int (*sb_show_options) (struct seq_file *m, struct super_block *sb);
+ int (*sb_statfs) (struct dentry *dentry);
+ int (*sb_mount) (const char *dev_name, struct path *path,
+ const char *type, unsigned long flags, void *data);
+ int (*sb_umount) (struct vfsmount *mnt, int flags);
+ int (*sb_pivotroot) (struct path *old_path,
+ struct path *new_path);
+ int (*sb_set_mnt_opts) (struct super_block *sb,
+ struct security_mnt_opts *opts,
+ unsigned long kern_flags,
+ unsigned long *set_kern_flags);
+ int (*sb_clone_mnt_opts) (const struct super_block *oldsb,
+ struct super_block *newsb);
+ int (*sb_parse_opts_str) (char *options, struct security_mnt_opts *opts);
+ int (*dentry_init_security) (struct dentry *dentry, int mode,
+ struct qstr *name, void **ctx,
+ u32 *ctxlen);
+
+
+#ifdef CONFIG_SECURITY_PATH
+ int (*path_unlink) (struct path *dir, struct dentry *dentry);
+ int (*path_mkdir) (struct path *dir, struct dentry *dentry, umode_t mode);
+ int (*path_rmdir) (struct path *dir, struct dentry *dentry);
+ int (*path_mknod) (struct path *dir, struct dentry *dentry, umode_t mode,
+ unsigned int dev);
+ int (*path_truncate) (struct path *path);
+ int (*path_symlink) (struct path *dir, struct dentry *dentry,
+ const char *old_name);
+ int (*path_link) (struct dentry *old_dentry, struct path *new_dir,
+ struct dentry *new_dentry);
+ int (*path_rename) (struct path *old_dir, struct dentry *old_dentry,
+ struct path *new_dir, struct dentry *new_dentry);
+ int (*path_chmod) (struct path *path, umode_t mode);
+ int (*path_chown) (struct path *path, kuid_t uid, kgid_t gid);
+ int (*path_chroot) (struct path *path);
+#endif
+
+ int (*inode_alloc_security) (struct inode *inode);
+ void (*inode_free_security) (struct inode *inode);
+ int (*inode_init_security) (struct inode *inode, struct inode *dir,
+ const struct qstr *qstr, const char **name,
+ void **value, size_t *len);
+ int (*inode_create) (struct inode *dir,
+ struct dentry *dentry, umode_t mode);
+ int (*inode_link) (struct dentry *old_dentry,
+ struct inode *dir, struct dentry *new_dentry);
+ int (*inode_unlink) (struct inode *dir, struct dentry *dentry);
+ int (*inode_symlink) (struct inode *dir,
+ struct dentry *dentry, const char *old_name);
+ int (*inode_mkdir) (struct inode *dir, struct dentry *dentry, umode_t mode);
+ int (*inode_rmdir) (struct inode *dir, struct dentry *dentry);
+ int (*inode_mknod) (struct inode *dir, struct dentry *dentry,
+ umode_t mode, dev_t dev);
+ int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+ int (*inode_readlink) (struct dentry *dentry);
+ int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd);
+ int (*inode_permission) (struct inode *inode, int mask);
+ int (*inode_setattr) (struct dentry *dentry, struct iattr *attr);
+ int (*inode_getattr) (const struct path *path);
+ int (*inode_setxattr) (struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags);
+ void (*inode_post_setxattr) (struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags);
+ int (*inode_getxattr) (struct dentry *dentry, const char *name);
+ int (*inode_listxattr) (struct dentry *dentry);
+ int (*inode_removexattr) (struct dentry *dentry, const char *name);
+ int (*inode_need_killpriv) (struct dentry *dentry);
+ int (*inode_killpriv) (struct dentry *dentry);
+ int (*inode_getsecurity) (const struct inode *inode, const char *name, void **buffer, bool alloc);
+ int (*inode_setsecurity) (struct inode *inode, const char *name, const void *value, size_t size, int flags);
+ int (*inode_listsecurity) (struct inode *inode, char *buffer, size_t buffer_size);
+ void (*inode_getsecid) (const struct inode *inode, u32 *secid);
+
+ int (*file_permission) (struct file *file, int mask);
+ int (*file_alloc_security) (struct file *file);
+ void (*file_free_security) (struct file *file);
+ int (*file_ioctl) (struct file *file, unsigned int cmd,
+ unsigned long arg);
+ int (*mmap_addr) (unsigned long addr);
+ int (*mmap_file) (struct file *file,
+ unsigned long reqprot, unsigned long prot,
+ unsigned long flags);
+ int (*file_mprotect) (struct vm_area_struct *vma,
+ unsigned long reqprot,
+ unsigned long prot);
+ int (*file_lock) (struct file *file, unsigned int cmd);
+ int (*file_fcntl) (struct file *file, unsigned int cmd,
+ unsigned long arg);
+ void (*file_set_fowner) (struct file *file);
+ int (*file_send_sigiotask) (struct task_struct *tsk,
+ struct fown_struct *fown, int sig);
+ int (*file_receive) (struct file *file);
+ int (*file_open) (struct file *file, const struct cred *cred);
+
+ int (*task_create) (unsigned long clone_flags);
+ void (*task_free) (struct task_struct *task);
+ int (*cred_alloc_blank) (struct cred *cred, gfp_t gfp);
+ void (*cred_free) (struct cred *cred);
+ int (*cred_prepare)(struct cred *new, const struct cred *old,
+ gfp_t gfp);
+ void (*cred_transfer)(struct cred *new, const struct cred *old);
+ int (*kernel_act_as)(struct cred *new, u32 secid);
+ int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
+ int (*kernel_fw_from_file)(struct file *file, char *buf, size_t size);
+ int (*kernel_module_request)(char *kmod_name);
+ int (*kernel_module_from_file)(struct file *file);
+ int (*task_fix_setuid) (struct cred *new, const struct cred *old,
+ int flags);
+ int (*task_setpgid) (struct task_struct *p, pid_t pgid);
+ int (*task_getpgid) (struct task_struct *p);
+ int (*task_getsid) (struct task_struct *p);
+ void (*task_getsecid) (struct task_struct *p, u32 *secid);
+ int (*task_setnice) (struct task_struct *p, int nice);
+ int (*task_setioprio) (struct task_struct *p, int ioprio);
+ int (*task_getioprio) (struct task_struct *p);
+ int (*task_setrlimit) (struct task_struct *p, unsigned int resource,
+ struct rlimit *new_rlim);
+ int (*task_setscheduler) (struct task_struct *p);
+ int (*task_getscheduler) (struct task_struct *p);
+ int (*task_movememory) (struct task_struct *p);
+ int (*task_kill) (struct task_struct *p,
+ struct siginfo *info, int sig, u32 secid);
+ int (*task_wait) (struct task_struct *p);
+ int (*task_prctl) (int option, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4,
+ unsigned long arg5);
+ void (*task_to_inode) (struct task_struct *p, struct inode *inode);
+
+ int (*ipc_permission) (struct kern_ipc_perm *ipcp, short flag);
+ void (*ipc_getsecid) (struct kern_ipc_perm *ipcp, u32 *secid);
+
+ int (*msg_msg_alloc_security) (struct msg_msg *msg);
+ void (*msg_msg_free_security) (struct msg_msg *msg);
+
+ int (*msg_queue_alloc_security) (struct msg_queue *msq);
+ void (*msg_queue_free_security) (struct msg_queue *msq);
+ int (*msg_queue_associate) (struct msg_queue *msq, int msqflg);
+ int (*msg_queue_msgctl) (struct msg_queue *msq, int cmd);
+ int (*msg_queue_msgsnd) (struct msg_queue *msq,
+ struct msg_msg *msg, int msqflg);
+ int (*msg_queue_msgrcv) (struct msg_queue *msq,
+ struct msg_msg *msg,
+ struct task_struct *target,
+ long type, int mode);
+
+ int (*shm_alloc_security) (struct shmid_kernel *shp);
+ void (*shm_free_security) (struct shmid_kernel *shp);
+ int (*shm_associate) (struct shmid_kernel *shp, int shmflg);
+ int (*shm_shmctl) (struct shmid_kernel *shp, int cmd);
+ int (*shm_shmat) (struct shmid_kernel *shp,
+ char __user *shmaddr, int shmflg);
+
+ int (*sem_alloc_security) (struct sem_array *sma);
+ void (*sem_free_security) (struct sem_array *sma);
+ int (*sem_associate) (struct sem_array *sma, int semflg);
+ int (*sem_semctl) (struct sem_array *sma, int cmd);
+ int (*sem_semop) (struct sem_array *sma,
+ struct sembuf *sops, unsigned nsops, int alter);
+
+ int (*netlink_send) (struct sock *sk, struct sk_buff *skb);
+
+ void (*d_instantiate) (struct dentry *dentry, struct inode *inode);
+
+ int (*getprocattr) (struct task_struct *p, char *name, char **value);
+ int (*setprocattr) (struct task_struct *p, char *name, void *value, size_t size);
+ int (*ismaclabel) (const char *name);
+ int (*secid_to_secctx) (u32 secid, char **secdata, u32 *seclen);
+ int (*secctx_to_secid) (const char *secdata, u32 seclen, u32 *secid);
+ void (*release_secctx) (char *secdata, u32 seclen);
+
+ int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen);
+ int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen);
+ int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen);
+
+#ifdef CONFIG_SECURITY_NETWORK
+ int (*unix_stream_connect) (struct sock *sock, struct sock *other, struct sock *newsk);
+ int (*unix_may_send) (struct socket *sock, struct socket *other);
+
+ int (*socket_create) (int family, int type, int protocol, int kern);
+ int (*socket_post_create) (struct socket *sock, int family,
+ int type, int protocol, int kern);
+ int (*socket_bind) (struct socket *sock,
+ struct sockaddr *address, int addrlen);
+ int (*socket_connect) (struct socket *sock,
+ struct sockaddr *address, int addrlen);
+ int (*socket_listen) (struct socket *sock, int backlog);
+ int (*socket_accept) (struct socket *sock, struct socket *newsock);
+ int (*socket_sendmsg) (struct socket *sock,
+ struct msghdr *msg, int size);
+ int (*socket_recvmsg) (struct socket *sock,
+ struct msghdr *msg, int size, int flags);
+ int (*socket_getsockname) (struct socket *sock);
+ int (*socket_getpeername) (struct socket *sock);
+ int (*socket_getsockopt) (struct socket *sock, int level, int optname);
+ int (*socket_setsockopt) (struct socket *sock, int level, int optname);
+ int (*socket_shutdown) (struct socket *sock, int how);
+ int (*socket_sock_rcv_skb) (struct sock *sk, struct sk_buff *skb);
+ int (*socket_getpeersec_stream) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len);
+ int (*socket_getpeersec_dgram) (struct socket *sock, struct sk_buff *skb, u32 *secid);
+ int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority);
+ void (*sk_free_security) (struct sock *sk);
+ void (*sk_clone_security) (const struct sock *sk, struct sock *newsk);
+ void (*sk_getsecid) (struct sock *sk, u32 *secid);
+ void (*sock_graft) (struct sock *sk, struct socket *parent);
+ int (*inet_conn_request) (struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req);
+ void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req);
+ void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb);
+ int (*secmark_relabel_packet) (u32 secid);
+ void (*secmark_refcount_inc) (void);
+ void (*secmark_refcount_dec) (void);
+ void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl);
+ int (*tun_dev_alloc_security) (void **security);
+ void (*tun_dev_free_security) (void *security);
+ int (*tun_dev_create) (void);
+ int (*tun_dev_attach_queue) (void *security);
+ int (*tun_dev_attach) (struct sock *sk, void *security);
+ int (*tun_dev_open) (void *security);
+#endif /* CONFIG_SECURITY_NETWORK */
+
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+ int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp);
+ int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx);
+ void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx);
+ int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx);
+ int (*xfrm_state_alloc) (struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx);
+ int (*xfrm_state_alloc_acquire) (struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec,
+ u32 secid);
+ void (*xfrm_state_free_security) (struct xfrm_state *x);
+ int (*xfrm_state_delete_security) (struct xfrm_state *x);
+ int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
+ int (*xfrm_state_pol_flow_match) (struct xfrm_state *x,
+ struct xfrm_policy *xp,
+ const struct flowi *fl);
+ int (*xfrm_decode_session) (struct sk_buff *skb, u32 *secid, int ckall);
+#endif /* CONFIG_SECURITY_NETWORK_XFRM */
+
+ /* key management security hooks */
+#ifdef CONFIG_KEYS
+ int (*key_alloc) (struct key *key, const struct cred *cred, unsigned long flags);
+ void (*key_free) (struct key *key);
+ int (*key_permission) (key_ref_t key_ref,
+ const struct cred *cred,
+ unsigned perm);
+ int (*key_getsecurity)(struct key *key, char **_buffer);
+#endif /* CONFIG_KEYS */
+
+#ifdef CONFIG_AUDIT
+ int (*audit_rule_init) (u32 field, u32 op, char *rulestr, void **lsmrule);
+ int (*audit_rule_known) (struct audit_krule *krule);
+ int (*audit_rule_match) (u32 secid, u32 field, u32 op, void *lsmrule,
+ struct audit_context *actx);
+ void (*audit_rule_free) (void *lsmrule);
+#endif /* CONFIG_AUDIT */
+};
+
+/* prototypes */
+extern int security_init(void);
+extern int security_module_enable(struct security_operations *ops);
+extern int register_security(struct security_operations *ops);
+extern void __init security_fixup_ops(struct security_operations *ops);
+
+
+/* Security operations */
+int security_binder_set_context_mgr(struct task_struct *mgr);
+int security_binder_transaction(struct task_struct *from,
+ struct task_struct *to);
+int security_binder_transfer_binder(struct task_struct *from,
+ struct task_struct *to);
+int security_binder_transfer_file(struct task_struct *from,
+ struct task_struct *to, struct file *file);
+int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
+int security_ptrace_traceme(struct task_struct *parent);
+int security_capget(struct task_struct *target,
+ kernel_cap_t *effective,
+ kernel_cap_t *inheritable,
+ kernel_cap_t *permitted);
+int security_capset(struct cred *new, const struct cred *old,
+ const kernel_cap_t *effective,
+ const kernel_cap_t *inheritable,
+ const kernel_cap_t *permitted);
+int security_capable(const struct cred *cred, struct user_namespace *ns,
+ int cap);
+int security_capable_noaudit(const struct cred *cred, struct user_namespace *ns,
+ int cap);
+int security_quotactl(int cmds, int type, int id, struct super_block *sb);
+int security_quota_on(struct dentry *dentry);
+int security_syslog(int type);
+int security_settime(const struct timespec *ts, const struct timezone *tz);
+int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
+int security_bprm_set_creds(struct linux_binprm *bprm);
+int security_bprm_check(struct linux_binprm *bprm);
+void security_bprm_committing_creds(struct linux_binprm *bprm);
+void security_bprm_committed_creds(struct linux_binprm *bprm);
+int security_bprm_secureexec(struct linux_binprm *bprm);
+int security_sb_alloc(struct super_block *sb);
+void security_sb_free(struct super_block *sb);
+int security_sb_copy_data(char *orig, char *copy);
+int security_sb_remount(struct super_block *sb, void *data);
+int security_sb_kern_mount(struct super_block *sb, int flags, void *data);
+int security_sb_show_options(struct seq_file *m, struct super_block *sb);
+int security_sb_statfs(struct dentry *dentry);
+int security_sb_mount(const char *dev_name, struct path *path,
+ const char *type, unsigned long flags, void *data);
+int security_sb_umount(struct vfsmount *mnt, int flags);
+int security_sb_pivotroot(struct path *old_path, struct path *new_path);
+int security_sb_set_mnt_opts(struct super_block *sb,
+ struct security_mnt_opts *opts,
+ unsigned long kern_flags,
+ unsigned long *set_kern_flags);
+int security_sb_clone_mnt_opts(const struct super_block *oldsb,
+ struct super_block *newsb);
+int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts);
+int security_dentry_init_security(struct dentry *dentry, int mode,
+ struct qstr *name, void **ctx,
+ u32 *ctxlen);
+
+int security_inode_alloc(struct inode *inode);
+void security_inode_free(struct inode *inode);
+int security_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr,
+ initxattrs initxattrs, void *fs_data);
+int security_old_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr, const char **name,
+ void **value, size_t *len);
+int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
+int security_inode_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *new_dentry);
+int security_inode_unlink(struct inode *dir, struct dentry *dentry);
+int security_inode_symlink(struct inode *dir, struct dentry *dentry,
+ const char *old_name);
+int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
+int security_inode_rmdir(struct inode *dir, struct dentry *dentry);
+int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev);
+int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags);
+int security_inode_readlink(struct dentry *dentry);
+int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd);
+int security_inode_permission(struct inode *inode, int mask);
+int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
+int security_inode_getattr(const struct path *path);
+int security_inode_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags);
+void security_inode_post_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags);
+int security_inode_getxattr(struct dentry *dentry, const char *name);
+int security_inode_listxattr(struct dentry *dentry);
+int security_inode_removexattr(struct dentry *dentry, const char *name);
+int security_inode_need_killpriv(struct dentry *dentry);
+int security_inode_killpriv(struct dentry *dentry);
+int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc);
+int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags);
+int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size);
+void security_inode_getsecid(const struct inode *inode, u32 *secid);
+int security_file_permission(struct file *file, int mask);
+int security_file_alloc(struct file *file);
+void security_file_free(struct file *file);
+int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+int security_mmap_file(struct file *file, unsigned long prot,
+ unsigned long flags);
+int security_mmap_addr(unsigned long addr);
+int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
+ unsigned long prot);
+int security_file_lock(struct file *file, unsigned int cmd);
+int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
+void security_file_set_fowner(struct file *file);
+int security_file_send_sigiotask(struct task_struct *tsk,
+ struct fown_struct *fown, int sig);
+int security_file_receive(struct file *file);
+int security_file_open(struct file *file, const struct cred *cred);
+int security_task_create(unsigned long clone_flags);
+void security_task_free(struct task_struct *task);
+int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
+void security_cred_free(struct cred *cred);
+int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp);
+void security_transfer_creds(struct cred *new, const struct cred *old);
+int security_kernel_act_as(struct cred *new, u32 secid);
+int security_kernel_create_files_as(struct cred *new, struct inode *inode);
+int security_kernel_fw_from_file(struct file *file, char *buf, size_t size);
+int security_kernel_module_request(char *kmod_name);
+int security_kernel_module_from_file(struct file *file);
+int security_task_fix_setuid(struct cred *new, const struct cred *old,
+ int flags);
+int security_task_setpgid(struct task_struct *p, pid_t pgid);
+int security_task_getpgid(struct task_struct *p);
+int security_task_getsid(struct task_struct *p);
+void security_task_getsecid(struct task_struct *p, u32 *secid);
+int security_task_setnice(struct task_struct *p, int nice);
+int security_task_setioprio(struct task_struct *p, int ioprio);
+int security_task_getioprio(struct task_struct *p);
+int security_task_setrlimit(struct task_struct *p, unsigned int resource,
+ struct rlimit *new_rlim);
+int security_task_setscheduler(struct task_struct *p);
+int security_task_getscheduler(struct task_struct *p);
+int security_task_movememory(struct task_struct *p);
+int security_task_kill(struct task_struct *p, struct siginfo *info,
+ int sig, u32 secid);
+int security_task_wait(struct task_struct *p);
+int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5);
+void security_task_to_inode(struct task_struct *p, struct inode *inode);
+int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag);
+void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid);
+int security_msg_msg_alloc(struct msg_msg *msg);
+void security_msg_msg_free(struct msg_msg *msg);
+int security_msg_queue_alloc(struct msg_queue *msq);
+void security_msg_queue_free(struct msg_queue *msq);
+int security_msg_queue_associate(struct msg_queue *msq, int msqflg);
+int security_msg_queue_msgctl(struct msg_queue *msq, int cmd);
+int security_msg_queue_msgsnd(struct msg_queue *msq,
+ struct msg_msg *msg, int msqflg);
+int security_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
+ struct task_struct *target, long type, int mode);
+int security_shm_alloc(struct shmid_kernel *shp);
+void security_shm_free(struct shmid_kernel *shp);
+int security_shm_associate(struct shmid_kernel *shp, int shmflg);
+int security_shm_shmctl(struct shmid_kernel *shp, int cmd);
+int security_shm_shmat(struct shmid_kernel *shp, char __user *shmaddr, int shmflg);
+int security_sem_alloc(struct sem_array *sma);
+void security_sem_free(struct sem_array *sma);
+int security_sem_associate(struct sem_array *sma, int semflg);
+int security_sem_semctl(struct sem_array *sma, int cmd);
+int security_sem_semop(struct sem_array *sma, struct sembuf *sops,
+ unsigned nsops, int alter);
+void security_d_instantiate(struct dentry *dentry, struct inode *inode);
+int security_getprocattr(struct task_struct *p, char *name, char **value);
+int security_setprocattr(struct task_struct *p, char *name, void *value, size_t size);
+int security_netlink_send(struct sock *sk, struct sk_buff *skb);
+int security_ismaclabel(const char *name);
+int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
+int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
+void security_release_secctx(char *secdata, u32 seclen);
+
+int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen);
+int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen);
+int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen);
+#else /* CONFIG_SECURITY */
+struct security_mnt_opts {
+};
+
+static inline void security_init_mnt_opts(struct security_mnt_opts *opts)
+{
+}
+
+static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
+{
+}
+
+/*
+ * This is the default capabilities functionality. Most of these functions
+ * are just stubbed out, but a few must call the proper capable code.
+ */
+
+static inline int security_init(void)
+{
+ return 0;
+}
+
+static inline int security_binder_set_context_mgr(struct task_struct *mgr)
+{
+ return 0;
+}
+
+static inline int security_binder_transaction(struct task_struct *from,
+ struct task_struct *to)
+{
+ return 0;
+}
+
+static inline int security_binder_transfer_binder(struct task_struct *from,
+ struct task_struct *to)
+{
+ return 0;
+}
+
+static inline int security_binder_transfer_file(struct task_struct *from,
+ struct task_struct *to,
+ struct file *file)
+{
+ return 0;
+}
+
+static inline int security_ptrace_access_check(struct task_struct *child,
+ unsigned int mode)
+{
+ return cap_ptrace_access_check(child, mode);
+}
+
+static inline int security_ptrace_traceme(struct task_struct *parent)
+{
+ return cap_ptrace_traceme(parent);
+}
+
+static inline int security_capget(struct task_struct *target,
+ kernel_cap_t *effective,
+ kernel_cap_t *inheritable,
+ kernel_cap_t *permitted)
+{
+ return cap_capget(target, effective, inheritable, permitted);
+}
+
+static inline int security_capset(struct cred *new,
+ const struct cred *old,
+ const kernel_cap_t *effective,
+ const kernel_cap_t *inheritable,
+ const kernel_cap_t *permitted)
+{
+ return cap_capset(new, old, effective, inheritable, permitted);
+}
+
+static inline int security_capable(const struct cred *cred,
+ struct user_namespace *ns, int cap)
+{
+ return cap_capable(cred, ns, cap, SECURITY_CAP_AUDIT);
+}
+
+static inline int security_capable_noaudit(const struct cred *cred,
+ struct user_namespace *ns, int cap) {
+ return cap_capable(cred, ns, cap, SECURITY_CAP_NOAUDIT);
+}
+
+static inline int security_quotactl(int cmds, int type, int id,
+ struct super_block *sb)
+{
+ return 0;
+}
+
+static inline int security_quota_on(struct dentry *dentry)
+{
+ return 0;
+}
+
+static inline int security_syslog(int type)
+{
+ return 0;
+}
+
+static inline int security_settime(const struct timespec *ts,
+ const struct timezone *tz)
+{
+ return cap_settime(ts, tz);
+}
+
+static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
+{
+ return cap_vm_enough_memory(mm, pages);
+}
+
+static inline int security_bprm_set_creds(struct linux_binprm *bprm)
+{
+ return cap_bprm_set_creds(bprm);
+}
+
+static inline int security_bprm_check(struct linux_binprm *bprm)
+{
+ return 0;
+}
+
+static inline void security_bprm_committing_creds(struct linux_binprm *bprm)
+{
+}
+
+static inline void security_bprm_committed_creds(struct linux_binprm *bprm)
+{
+}
+
+static inline int security_bprm_secureexec(struct linux_binprm *bprm)
+{
+ return cap_bprm_secureexec(bprm);
+}
+
+static inline int security_sb_alloc(struct super_block *sb)
+{
+ return 0;
+}
+
+static inline void security_sb_free(struct super_block *sb)
+{ }
+
+static inline int security_sb_copy_data(char *orig, char *copy)
+{
+ return 0;
+}
+
+static inline int security_sb_remount(struct super_block *sb, void *data)
+{
+ return 0;
+}
+
+static inline int security_sb_kern_mount(struct super_block *sb, int flags, void *data)
+{
+ return 0;
+}
+
+static inline int security_sb_show_options(struct seq_file *m,
+ struct super_block *sb)
+{
+ return 0;
+}
+
+static inline int security_sb_statfs(struct dentry *dentry)
+{
+ return 0;
+}
+
+static inline int security_sb_mount(const char *dev_name, struct path *path,
+ const char *type, unsigned long flags,
+ void *data)
+{
+ return 0;
+}
+
+static inline int security_sb_umount(struct vfsmount *mnt, int flags)
+{
+ return 0;
+}
+
+static inline int security_sb_pivotroot(struct path *old_path,
+ struct path *new_path)
+{
+ return 0;
+}
+
+static inline int security_sb_set_mnt_opts(struct super_block *sb,
+ struct security_mnt_opts *opts,
+ unsigned long kern_flags,
+ unsigned long *set_kern_flags)
+{
+ return 0;
+}
+
+static inline int security_sb_clone_mnt_opts(const struct super_block *oldsb,
+ struct super_block *newsb)
+{
+ return 0;
+}
+
+static inline int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts)
+{
+ return 0;
+}
+
+static inline int security_inode_alloc(struct inode *inode)
+{
+ return 0;
+}
+
+static inline void security_inode_free(struct inode *inode)
+{ }
+
+static inline int security_dentry_init_security(struct dentry *dentry,
+ int mode,
+ struct qstr *name,
+ void **ctx,
+ u32 *ctxlen)
+{
+ return -EOPNOTSUPP;
+}
+
+
+static inline int security_inode_init_security(struct inode *inode,
+ struct inode *dir,
+ const struct qstr *qstr,
+ const initxattrs xattrs,
+ void *fs_data)
+{
+ return 0;
+}
+
+static inline int security_old_inode_init_security(struct inode *inode,
+ struct inode *dir,
+ const struct qstr *qstr,
+ const char **name,
+ void **value, size_t *len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int security_inode_create(struct inode *dir,
+ struct dentry *dentry,
+ umode_t mode)
+{
+ return 0;
+}
+
+static inline int security_inode_link(struct dentry *old_dentry,
+ struct inode *dir,
+ struct dentry *new_dentry)
+{
+ return 0;
+}
+
+static inline int security_inode_unlink(struct inode *dir,
+ struct dentry *dentry)
+{
+ return 0;
+}
+
+static inline int security_inode_symlink(struct inode *dir,
+ struct dentry *dentry,
+ const char *old_name)
+{
+ return 0;
+}
+
+static inline int security_inode_mkdir(struct inode *dir,
+ struct dentry *dentry,
+ int mode)
+{
+ return 0;
+}
+
+static inline int security_inode_rmdir(struct inode *dir,
+ struct dentry *dentry)
+{
+ return 0;
+}
+
+static inline int security_inode_mknod(struct inode *dir,
+ struct dentry *dentry,
+ int mode, dev_t dev)
+{
+ return 0;
+}
+
+static inline int security_inode_rename(struct inode *old_dir,
+ struct dentry *old_dentry,
+ struct inode *new_dir,
+ struct dentry *new_dentry,
+ unsigned int flags)
+{
+ return 0;
+}
+
+static inline int security_inode_readlink(struct dentry *dentry)
+{
+ return 0;
+}
+
+static inline int security_inode_follow_link(struct dentry *dentry,
+ struct nameidata *nd)
+{
+ return 0;
+}
+
+static inline int security_inode_permission(struct inode *inode, int mask)
+{
+ return 0;
+}
+
+static inline int security_inode_setattr(struct dentry *dentry,
+ struct iattr *attr)
+{
+ return 0;
+}
+
+static inline int security_inode_getattr(const struct path *path)
+{
+ return 0;
+}
+
+static inline int security_inode_setxattr(struct dentry *dentry,
+ const char *name, const void *value, size_t size, int flags)
+{
+ return cap_inode_setxattr(dentry, name, value, size, flags);
+}
+
+static inline void security_inode_post_setxattr(struct dentry *dentry,
+ const char *name, const void *value, size_t size, int flags)
+{ }
+
+static inline int security_inode_getxattr(struct dentry *dentry,
+ const char *name)
+{
+ return 0;
+}
+
+static inline int security_inode_listxattr(struct dentry *dentry)
+{
+ return 0;
+}
+
+static inline int security_inode_removexattr(struct dentry *dentry,
+ const char *name)
+{
+ return cap_inode_removexattr(dentry, name);
+}
+
+static inline int security_inode_need_killpriv(struct dentry *dentry)
+{
+ return cap_inode_need_killpriv(dentry);
+}
+
+static inline int security_inode_killpriv(struct dentry *dentry)
+{
+ return cap_inode_killpriv(dentry);
+}
+
+static inline int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
+{
+ return 0;
+}
+
+static inline void security_inode_getsecid(const struct inode *inode, u32 *secid)
+{
+ *secid = 0;
+}
+
+static inline int security_file_permission(struct file *file, int mask)
+{
+ return 0;
+}
+
+static inline int security_file_alloc(struct file *file)
+{
+ return 0;
+}
+
+static inline void security_file_free(struct file *file)
+{ }
+
+static inline int security_file_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return 0;
+}
+
+static inline int security_mmap_file(struct file *file, unsigned long prot,
+ unsigned long flags)
+{
+ return 0;
+}
+
+static inline int security_mmap_addr(unsigned long addr)
+{
+ return cap_mmap_addr(addr);
+}
+
+static inline int security_file_mprotect(struct vm_area_struct *vma,
+ unsigned long reqprot,
+ unsigned long prot)
+{
+ return 0;
+}
+
+static inline int security_file_lock(struct file *file, unsigned int cmd)
+{
+ return 0;
+}
+
+static inline int security_file_fcntl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return 0;
+}
+
+static inline void security_file_set_fowner(struct file *file)
+{
+ return;
+}
+
+static inline int security_file_send_sigiotask(struct task_struct *tsk,
+ struct fown_struct *fown,
+ int sig)
+{
+ return 0;
+}
+
+static inline int security_file_receive(struct file *file)
+{
+ return 0;
+}
+
+static inline int security_file_open(struct file *file,
+ const struct cred *cred)
+{
+ return 0;
+}
+
+static inline int security_task_create(unsigned long clone_flags)
+{
+ return 0;
+}
+
+static inline void security_task_free(struct task_struct *task)
+{ }
+
+static inline int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
+{
+ return 0;
+}
+
+static inline void security_cred_free(struct cred *cred)
+{ }
+
+static inline int security_prepare_creds(struct cred *new,
+ const struct cred *old,
+ gfp_t gfp)
+{
+ return 0;
+}
+
+static inline void security_transfer_creds(struct cred *new,
+ const struct cred *old)
+{
+}
+
+static inline int security_kernel_act_as(struct cred *cred, u32 secid)
+{
+ return 0;
+}
+
+static inline int security_kernel_create_files_as(struct cred *cred,
+ struct inode *inode)
+{
+ return 0;
+}
+
+static inline int security_kernel_fw_from_file(struct file *file,
+ char *buf, size_t size)
+{
+ return 0;
+}
+
+static inline int security_kernel_module_request(char *kmod_name)
+{
+ return 0;
+}
+
+static inline int security_kernel_module_from_file(struct file *file)
+{
+ return 0;
+}
+
+static inline int security_task_fix_setuid(struct cred *new,
+ const struct cred *old,
+ int flags)
+{
+ return cap_task_fix_setuid(new, old, flags);
+}
+
+static inline int security_task_setpgid(struct task_struct *p, pid_t pgid)
+{
+ return 0;
+}
+
+static inline int security_task_getpgid(struct task_struct *p)
+{
+ return 0;
+}
+
+static inline int security_task_getsid(struct task_struct *p)
+{
+ return 0;
+}
+
+static inline void security_task_getsecid(struct task_struct *p, u32 *secid)
+{
+ *secid = 0;
+}
+
+static inline int security_task_setnice(struct task_struct *p, int nice)
+{
+ return cap_task_setnice(p, nice);
+}
+
+static inline int security_task_setioprio(struct task_struct *p, int ioprio)
+{
+ return cap_task_setioprio(p, ioprio);
+}
+
+static inline int security_task_getioprio(struct task_struct *p)
+{
+ return 0;
+}
+
+static inline int security_task_setrlimit(struct task_struct *p,
+ unsigned int resource,
+ struct rlimit *new_rlim)
+{
+ return 0;
+}
+
+static inline int security_task_setscheduler(struct task_struct *p)
+{
+ return cap_task_setscheduler(p);
+}
+
+static inline int security_task_getscheduler(struct task_struct *p)
+{
+ return 0;
+}
+
+static inline int security_task_movememory(struct task_struct *p)
+{
+ return 0;
+}
+
+static inline int security_task_kill(struct task_struct *p,
+ struct siginfo *info, int sig,
+ u32 secid)
+{
+ return 0;
+}
+
+static inline int security_task_wait(struct task_struct *p)
+{
+ return 0;
+}
+
+static inline int security_task_prctl(int option, unsigned long arg2,
+ unsigned long arg3,
+ unsigned long arg4,
+ unsigned long arg5)
+{
+ return cap_task_prctl(option, arg2, arg3, arg3, arg5);
+}
+
+static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
+{ }
+
+static inline int security_ipc_permission(struct kern_ipc_perm *ipcp,
+ short flag)
+{
+ return 0;
+}
+
+static inline void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
+{
+ *secid = 0;
+}
+
+static inline int security_msg_msg_alloc(struct msg_msg *msg)
+{
+ return 0;
+}
+
+static inline void security_msg_msg_free(struct msg_msg *msg)
+{ }
+
+static inline int security_msg_queue_alloc(struct msg_queue *msq)
+{
+ return 0;
+}
+
+static inline void security_msg_queue_free(struct msg_queue *msq)
+{ }
+
+static inline int security_msg_queue_associate(struct msg_queue *msq,
+ int msqflg)
+{
+ return 0;
+}
+
+static inline int security_msg_queue_msgctl(struct msg_queue *msq, int cmd)
+{
+ return 0;
+}
+
+static inline int security_msg_queue_msgsnd(struct msg_queue *msq,
+ struct msg_msg *msg, int msqflg)
+{
+ return 0;
+}
+
+static inline int security_msg_queue_msgrcv(struct msg_queue *msq,
+ struct msg_msg *msg,
+ struct task_struct *target,
+ long type, int mode)
+{
+ return 0;
+}
+
+static inline int security_shm_alloc(struct shmid_kernel *shp)
+{
+ return 0;
+}
+
+static inline void security_shm_free(struct shmid_kernel *shp)
+{ }
+
+static inline int security_shm_associate(struct shmid_kernel *shp,
+ int shmflg)
+{
+ return 0;
+}
+
+static inline int security_shm_shmctl(struct shmid_kernel *shp, int cmd)
+{
+ return 0;
+}
+
+static inline int security_shm_shmat(struct shmid_kernel *shp,
+ char __user *shmaddr, int shmflg)
+{
+ return 0;
+}
+
+static inline int security_sem_alloc(struct sem_array *sma)
+{
+ return 0;
+}
+
+static inline void security_sem_free(struct sem_array *sma)
+{ }
+
+static inline int security_sem_associate(struct sem_array *sma, int semflg)
+{
+ return 0;
+}
+
+static inline int security_sem_semctl(struct sem_array *sma, int cmd)
+{
+ return 0;
+}
+
+static inline int security_sem_semop(struct sem_array *sma,
+ struct sembuf *sops, unsigned nsops,
+ int alter)
+{
+ return 0;
+}
+
+static inline void security_d_instantiate(struct dentry *dentry, struct inode *inode)
+{ }
+
+static inline int security_getprocattr(struct task_struct *p, char *name, char **value)
+{
+ return -EINVAL;
+}
+
+static inline int security_setprocattr(struct task_struct *p, char *name, void *value, size_t size)
+{
+ return -EINVAL;
+}
+
+static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb)
+{
+ return cap_netlink_send(sk, skb);
+}
+
+static inline int security_ismaclabel(const char *name)
+{
+ return 0;
+}
+
+static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int security_secctx_to_secid(const char *secdata,
+ u32 seclen,
+ u32 *secid)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void security_release_secctx(char *secdata, u32 seclen)
+{
+}
+
+static inline int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
+{
+ return -EOPNOTSUPP;
+}
+static inline int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
+{
+ return -EOPNOTSUPP;
+}
+static inline int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_SECURITY */
+
+#ifdef CONFIG_SECURITY_NETWORK
+
+int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk);
+int security_unix_may_send(struct socket *sock, struct socket *other);
+int security_socket_create(int family, int type, int protocol, int kern);
+int security_socket_post_create(struct socket *sock, int family,
+ int type, int protocol, int kern);
+int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen);
+int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen);
+int security_socket_listen(struct socket *sock, int backlog);
+int security_socket_accept(struct socket *sock, struct socket *newsock);
+int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size);
+int security_socket_recvmsg(struct socket *sock, struct msghdr *msg,
+ int size, int flags);
+int security_socket_getsockname(struct socket *sock);
+int security_socket_getpeername(struct socket *sock);
+int security_socket_getsockopt(struct socket *sock, int level, int optname);
+int security_socket_setsockopt(struct socket *sock, int level, int optname);
+int security_socket_shutdown(struct socket *sock, int how);
+int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb);
+int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
+ int __user *optlen, unsigned len);
+int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid);
+int security_sk_alloc(struct sock *sk, int family, gfp_t priority);
+void security_sk_free(struct sock *sk);
+void security_sk_clone(const struct sock *sk, struct sock *newsk);
+void security_sk_classify_flow(struct sock *sk, struct flowi *fl);
+void security_req_classify_flow(const struct request_sock *req, struct flowi *fl);
+void security_sock_graft(struct sock*sk, struct socket *parent);
+int security_inet_conn_request(struct sock *sk,
+ struct sk_buff *skb, struct request_sock *req);
+void security_inet_csk_clone(struct sock *newsk,
+ const struct request_sock *req);
+void security_inet_conn_established(struct sock *sk,
+ struct sk_buff *skb);
+int security_secmark_relabel_packet(u32 secid);
+void security_secmark_refcount_inc(void);
+void security_secmark_refcount_dec(void);
+int security_tun_dev_alloc_security(void **security);
+void security_tun_dev_free_security(void *security);
+int security_tun_dev_create(void);
+int security_tun_dev_attach_queue(void *security);
+int security_tun_dev_attach(struct sock *sk, void *security);
+int security_tun_dev_open(void *security);
+
+#else /* CONFIG_SECURITY_NETWORK */
+static inline int security_unix_stream_connect(struct sock *sock,
+ struct sock *other,
+ struct sock *newsk)
+{
+ return 0;
+}
+
+static inline int security_unix_may_send(struct socket *sock,
+ struct socket *other)
+{
+ return 0;
+}
+
+static inline int security_socket_create(int family, int type,
+ int protocol, int kern)
+{
+ return 0;
+}
+
+static inline int security_socket_post_create(struct socket *sock,
+ int family,
+ int type,
+ int protocol, int kern)
+{
+ return 0;
+}
+
+static inline int security_socket_bind(struct socket *sock,
+ struct sockaddr *address,
+ int addrlen)
+{
+ return 0;
+}
+
+static inline int security_socket_connect(struct socket *sock,
+ struct sockaddr *address,
+ int addrlen)
+{
+ return 0;
+}
+
+static inline int security_socket_listen(struct socket *sock, int backlog)
+{
+ return 0;
+}
+
+static inline int security_socket_accept(struct socket *sock,
+ struct socket *newsock)
+{
+ return 0;
+}
+
+static inline int security_socket_sendmsg(struct socket *sock,
+ struct msghdr *msg, int size)
+{
+ return 0;
+}
+
+static inline int security_socket_recvmsg(struct socket *sock,
+ struct msghdr *msg, int size,
+ int flags)
+{
+ return 0;
+}
+
+static inline int security_socket_getsockname(struct socket *sock)
+{
+ return 0;
+}
+
+static inline int security_socket_getpeername(struct socket *sock)
+{
+ return 0;
+}
+
+static inline int security_socket_getsockopt(struct socket *sock,
+ int level, int optname)
+{
+ return 0;
+}
+
+static inline int security_socket_setsockopt(struct socket *sock,
+ int level, int optname)
+{
+ return 0;
+}
+
+static inline int security_socket_shutdown(struct socket *sock, int how)
+{
+ return 0;
+}
+static inline int security_sock_rcv_skb(struct sock *sk,
+ struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
+ int __user *optlen, unsigned len)
+{
+ return -ENOPROTOOPT;
+}
+
+static inline int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
+{
+ return -ENOPROTOOPT;
+}
+
+static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
+{
+ return 0;
+}
+
+static inline void security_sk_free(struct sock *sk)
+{
+}
+
+static inline void security_sk_clone(const struct sock *sk, struct sock *newsk)
+{
+}
+
+static inline void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
+{
+}
+
+static inline void security_req_classify_flow(const struct request_sock *req, struct flowi *fl)
+{
+}
+
+static inline void security_sock_graft(struct sock *sk, struct socket *parent)
+{
+}
+
+static inline int security_inet_conn_request(struct sock *sk,
+ struct sk_buff *skb, struct request_sock *req)
+{
+ return 0;
+}
+
+static inline void security_inet_csk_clone(struct sock *newsk,
+ const struct request_sock *req)
+{
+}
+
+static inline void security_inet_conn_established(struct sock *sk,
+ struct sk_buff *skb)
+{
+}
+
+static inline int security_secmark_relabel_packet(u32 secid)
+{
+ return 0;
+}
+
+static inline void security_secmark_refcount_inc(void)
+{
+}
+
+static inline void security_secmark_refcount_dec(void)
+{
+}
+
+static inline int security_tun_dev_alloc_security(void **security)
+{
+ return 0;
+}
+
+static inline void security_tun_dev_free_security(void *security)
+{
+}
+
+static inline int security_tun_dev_create(void)
+{
+ return 0;
+}
+
+static inline int security_tun_dev_attach_queue(void *security)
+{
+ return 0;
+}
+
+static inline int security_tun_dev_attach(struct sock *sk, void *security)
+{
+ return 0;
+}
+
+static inline int security_tun_dev_open(void *security)
+{
+ return 0;
+}
+#endif /* CONFIG_SECURITY_NETWORK */
+
+#ifdef CONFIG_SECURITY_NETWORK_XFRM
+
+int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp);
+int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp);
+void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
+int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx);
+int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx);
+int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec, u32 secid);
+int security_xfrm_state_delete(struct xfrm_state *x);
+void security_xfrm_state_free(struct xfrm_state *x);
+int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
+int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
+ struct xfrm_policy *xp,
+ const struct flowi *fl);
+int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid);
+void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl);
+
+#else /* CONFIG_SECURITY_NETWORK_XFRM */
+
+static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *sec_ctx,
+ gfp_t gfp)
+{
+ return 0;
+}
+
+static inline int security_xfrm_policy_clone(struct xfrm_sec_ctx *old, struct xfrm_sec_ctx **new_ctxp)
+{
+ return 0;
+}
+
+static inline void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
+{
+}
+
+static inline int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
+{
+ return 0;
+}
+
+static inline int security_xfrm_state_alloc(struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx)
+{
+ return 0;
+}
+
+static inline int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec, u32 secid)
+{
+ return 0;
+}
+
+static inline void security_xfrm_state_free(struct xfrm_state *x)
+{
+}
+
+static inline int security_xfrm_state_delete(struct xfrm_state *x)
+{
+ return 0;
+}
+
+static inline int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+{
+ return 0;
+}
+
+static inline int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
+ struct xfrm_policy *xp, const struct flowi *fl)
+{
+ return 1;
+}
+
+static inline int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
+{
+ return 0;
+}
+
+static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl)
+{
+}
+
+#endif /* CONFIG_SECURITY_NETWORK_XFRM */
+
+#ifdef CONFIG_SECURITY_PATH
+int security_path_unlink(struct path *dir, struct dentry *dentry);
+int security_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode);
+int security_path_rmdir(struct path *dir, struct dentry *dentry);
+int security_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode,
+ unsigned int dev);
+int security_path_truncate(struct path *path);
+int security_path_symlink(struct path *dir, struct dentry *dentry,
+ const char *old_name);
+int security_path_link(struct dentry *old_dentry, struct path *new_dir,
+ struct dentry *new_dentry);
+int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
+ struct path *new_dir, struct dentry *new_dentry,
+ unsigned int flags);
+int security_path_chmod(struct path *path, umode_t mode);
+int security_path_chown(struct path *path, kuid_t uid, kgid_t gid);
+int security_path_chroot(struct path *path);
+#else /* CONFIG_SECURITY_PATH */
+static inline int security_path_unlink(struct path *dir, struct dentry *dentry)
+{
+ return 0;
+}
+
+static inline int security_path_mkdir(struct path *dir, struct dentry *dentry,
+ umode_t mode)
+{
+ return 0;
+}
+
+static inline int security_path_rmdir(struct path *dir, struct dentry *dentry)
+{
+ return 0;
+}
+
+static inline int security_path_mknod(struct path *dir, struct dentry *dentry,
+ umode_t mode, unsigned int dev)
+{
+ return 0;
+}
+
+static inline int security_path_truncate(struct path *path)
+{
+ return 0;
+}
+
+static inline int security_path_symlink(struct path *dir, struct dentry *dentry,
+ const char *old_name)
+{
+ return 0;
+}
+
+static inline int security_path_link(struct dentry *old_dentry,
+ struct path *new_dir,
+ struct dentry *new_dentry)
+{
+ return 0;
+}
+
+static inline int security_path_rename(struct path *old_dir,
+ struct dentry *old_dentry,
+ struct path *new_dir,
+ struct dentry *new_dentry,
+ unsigned int flags)
+{
+ return 0;
+}
+
+static inline int security_path_chmod(struct path *path, umode_t mode)
+{
+ return 0;
+}
+
+static inline int security_path_chown(struct path *path, kuid_t uid, kgid_t gid)
+{
+ return 0;
+}
+
+static inline int security_path_chroot(struct path *path)
+{
+ return 0;
+}
+#endif /* CONFIG_SECURITY_PATH */
+
+#ifdef CONFIG_KEYS
+#ifdef CONFIG_SECURITY
+
+int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags);
+void security_key_free(struct key *key);
+int security_key_permission(key_ref_t key_ref,
+ const struct cred *cred, unsigned perm);
+int security_key_getsecurity(struct key *key, char **_buffer);
+
+#else
+
+static inline int security_key_alloc(struct key *key,
+ const struct cred *cred,
+ unsigned long flags)
+{
+ return 0;
+}
+
+static inline void security_key_free(struct key *key)
+{
+}
+
+static inline int security_key_permission(key_ref_t key_ref,
+ const struct cred *cred,
+ unsigned perm)
+{
+ return 0;
+}
+
+static inline int security_key_getsecurity(struct key *key, char **_buffer)
+{
+ *_buffer = NULL;
+ return 0;
+}
+
+#endif
+#endif /* CONFIG_KEYS */
+
+#ifdef CONFIG_AUDIT
+#ifdef CONFIG_SECURITY
+int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule);
+int security_audit_rule_known(struct audit_krule *krule);
+int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule,
+ struct audit_context *actx);
+void security_audit_rule_free(void *lsmrule);
+
+#else
+
+static inline int security_audit_rule_init(u32 field, u32 op, char *rulestr,
+ void **lsmrule)
+{
+ return 0;
+}
+
+static inline int security_audit_rule_known(struct audit_krule *krule)
+{
+ return 0;
+}
+
+static inline int security_audit_rule_match(u32 secid, u32 field, u32 op,
+ void *lsmrule, struct audit_context *actx)
+{
+ return 0;
+}
+
+static inline void security_audit_rule_free(void *lsmrule)
+{ }
+
+#endif /* CONFIG_SECURITY */
+#endif /* CONFIG_AUDIT */
+
+#ifdef CONFIG_SECURITYFS
+
+extern struct dentry *securityfs_create_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops);
+extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent);
+extern void securityfs_remove(struct dentry *dentry);
+
+#else /* CONFIG_SECURITYFS */
+
+static inline struct dentry *securityfs_create_dir(const char *name,
+ struct dentry *parent)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct dentry *securityfs_create_file(const char *name,
+ umode_t mode,
+ struct dentry *parent,
+ void *data,
+ const struct file_operations *fops)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void securityfs_remove(struct dentry *dentry)
+{}
+
+#endif
+
+#ifdef CONFIG_SECURITY
+
+static inline char *alloc_secdata(void)
+{
+ return (char *)get_zeroed_page(GFP_KERNEL);
+}
+
+static inline void free_secdata(void *secdata)
+{
+ free_page((unsigned long)secdata);
+}
+
+#else
+
+static inline char *alloc_secdata(void)
+{
+ return (char *)1;
+}
+
+static inline void free_secdata(void *secdata)
+{ }
+#endif /* CONFIG_SECURITY */
+
+#ifdef CONFIG_SECURITY_YAMA
+extern int yama_ptrace_access_check(struct task_struct *child,
+ unsigned int mode);
+extern int yama_ptrace_traceme(struct task_struct *parent);
+extern void yama_task_free(struct task_struct *task);
+extern int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5);
+#else
+static inline int yama_ptrace_access_check(struct task_struct *child,
+ unsigned int mode)
+{
+ return 0;
+}
+
+static inline int yama_ptrace_traceme(struct task_struct *parent)
+{
+ return 0;
+}
+
+static inline void yama_task_free(struct task_struct *task)
+{
+}
+
+static inline int yama_task_prctl(int option, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4,
+ unsigned long arg5)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_SECURITY_YAMA */
+
+#endif /* ! __LINUX_SECURITY_H */
+
diff --git a/include/linux/selection.h b/include/linux/selection.h
new file mode 100644
index 000000000..85193aa8c
--- /dev/null
+++ b/include/linux/selection.h
@@ -0,0 +1,44 @@
+/*
+ * selection.h
+ *
+ * Interface between console.c, tty_io.c, vt.c, vc_screen.c and selection.c
+ */
+
+#ifndef _LINUX_SELECTION_H_
+#define _LINUX_SELECTION_H_
+
+#include <linux/tiocl.h>
+#include <linux/vt_buffer.h>
+
+struct tty_struct;
+
+extern struct vc_data *sel_cons;
+struct tty_struct;
+
+extern void clear_selection(void);
+extern int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty);
+extern int paste_selection(struct tty_struct *tty);
+extern int sel_loadlut(char __user *p);
+extern int mouse_reporting(void);
+extern void mouse_report(struct tty_struct * tty, int butt, int mrx, int mry);
+
+extern int console_blanked;
+
+extern unsigned char color_table[];
+extern int default_red[];
+extern int default_grn[];
+extern int default_blu[];
+
+extern unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed);
+extern u16 screen_glyph(struct vc_data *vc, int offset);
+extern void complement_pos(struct vc_data *vc, int offset);
+extern void invert_screen(struct vc_data *vc, int offset, int count, int shift);
+
+extern void getconsxy(struct vc_data *vc, unsigned char *p);
+extern void putconsxy(struct vc_data *vc, unsigned char *p);
+
+extern u16 vcs_scr_readw(struct vc_data *vc, const u16 *org);
+extern void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org);
+extern void vcs_scr_updated(struct vc_data *vc);
+
+#endif
diff --git a/include/linux/selinux.h b/include/linux/selinux.h
new file mode 100644
index 000000000..44f459612
--- /dev/null
+++ b/include/linux/selinux.h
@@ -0,0 +1,35 @@
+/*
+ * SELinux services exported to the rest of the kernel.
+ *
+ * Author: James Morris <jmorris@redhat.com>
+ *
+ * Copyright (C) 2005 Red Hat, Inc., James Morris <jmorris@redhat.com>
+ * Copyright (C) 2006 Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com>
+ * Copyright (C) 2006 IBM Corporation, Timothy R. Chavez <tinytim@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+#ifndef _LINUX_SELINUX_H
+#define _LINUX_SELINUX_H
+
+struct selinux_audit_rule;
+struct audit_context;
+struct kern_ipc_perm;
+
+#ifdef CONFIG_SECURITY_SELINUX
+
+/**
+ * selinux_is_enabled - is SELinux enabled?
+ */
+bool selinux_is_enabled(void);
+#else
+
+static inline bool selinux_is_enabled(void)
+{
+ return false;
+}
+#endif /* CONFIG_SECURITY_SELINUX */
+
+#endif /* _LINUX_SELINUX_H */
diff --git a/include/linux/sem.h b/include/linux/sem.h
new file mode 100644
index 000000000..976ce3a19
--- /dev/null
+++ b/include/linux/sem.h
@@ -0,0 +1,52 @@
+#ifndef _LINUX_SEM_H
+#define _LINUX_SEM_H
+
+#include <linux/atomic.h>
+#include <linux/rcupdate.h>
+#include <linux/cache.h>
+#include <uapi/linux/sem.h>
+
+struct task_struct;
+
+/* One sem_array data structure for each set of semaphores in the system. */
+struct sem_array {
+ struct kern_ipc_perm ____cacheline_aligned_in_smp
+ sem_perm; /* permissions .. see ipc.h */
+ time_t sem_ctime; /* last change time */
+ struct sem *sem_base; /* ptr to first semaphore in array */
+ struct list_head pending_alter; /* pending operations */
+ /* that alter the array */
+ struct list_head pending_const; /* pending complex operations */
+ /* that do not alter semvals */
+ struct list_head list_id; /* undo requests on this array */
+ int sem_nsems; /* no. of semaphores in array */
+ int complex_count; /* pending complex operations */
+};
+
+#ifdef CONFIG_SYSVIPC
+
+struct sysv_sem {
+ struct sem_undo_list *undo_list;
+};
+
+extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk);
+extern void exit_sem(struct task_struct *tsk);
+
+#else
+
+struct sysv_sem {
+ /* empty */
+};
+
+static inline int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
+{
+ return 0;
+}
+
+static inline void exit_sem(struct task_struct *tsk)
+{
+ return;
+}
+#endif
+
+#endif /* _LINUX_SEM_H */
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
new file mode 100644
index 000000000..dc368b8ce
--- /dev/null
+++ b/include/linux/semaphore.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2008 Intel Corporation
+ * Author: Matthew Wilcox <willy@linux.intel.com>
+ *
+ * Distributed under the terms of the GNU GPL, version 2
+ *
+ * Please see kernel/semaphore.c for documentation of these functions
+ */
+#ifndef __LINUX_SEMAPHORE_H
+#define __LINUX_SEMAPHORE_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+/* Please don't access any members of this structure directly */
+struct semaphore {
+ raw_spinlock_t lock;
+ unsigned int count;
+ struct list_head wait_list;
+};
+
+#define __SEMAPHORE_INITIALIZER(name, n) \
+{ \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \
+ .count = n, \
+ .wait_list = LIST_HEAD_INIT((name).wait_list), \
+}
+
+#define DEFINE_SEMAPHORE(name) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+
+static inline void sema_init(struct semaphore *sem, int val)
+{
+ static struct lock_class_key __key;
+ *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
+ lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0);
+}
+
+extern void down(struct semaphore *sem);
+extern int __must_check down_interruptible(struct semaphore *sem);
+extern int __must_check down_killable(struct semaphore *sem);
+extern int __must_check down_trylock(struct semaphore *sem);
+extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
+extern void up(struct semaphore *sem);
+
+#endif /* __LINUX_SEMAPHORE_H */
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
new file mode 100644
index 000000000..fb7eb9ccb
--- /dev/null
+++ b/include/linux/seq_buf.h
@@ -0,0 +1,133 @@
+#ifndef _LINUX_SEQ_BUF_H
+#define _LINUX_SEQ_BUF_H
+
+#include <linux/fs.h>
+
+/*
+ * Trace sequences are used to allow a function to call several other functions
+ * to create a string of data to use.
+ */
+
+/**
+ * seq_buf - seq buffer structure
+ * @buffer: pointer to the buffer
+ * @size: size of the buffer
+ * @len: the amount of data inside the buffer
+ * @readpos: The next position to read in the buffer.
+ */
+struct seq_buf {
+ char *buffer;
+ size_t size;
+ size_t len;
+ loff_t readpos;
+};
+
+static inline void seq_buf_clear(struct seq_buf *s)
+{
+ s->len = 0;
+ s->readpos = 0;
+}
+
+static inline void
+seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size)
+{
+ s->buffer = buf;
+ s->size = size;
+ seq_buf_clear(s);
+}
+
+/*
+ * seq_buf have a buffer that might overflow. When this happens
+ * the len and size are set to be equal.
+ */
+static inline bool
+seq_buf_has_overflowed(struct seq_buf *s)
+{
+ return s->len > s->size;
+}
+
+static inline void
+seq_buf_set_overflow(struct seq_buf *s)
+{
+ s->len = s->size + 1;
+}
+
+/*
+ * How much buffer is left on the seq_buf?
+ */
+static inline unsigned int
+seq_buf_buffer_left(struct seq_buf *s)
+{
+ if (seq_buf_has_overflowed(s))
+ return 0;
+
+ return s->size - s->len;
+}
+
+/* How much buffer was written? */
+static inline unsigned int seq_buf_used(struct seq_buf *s)
+{
+ return min(s->len, s->size);
+}
+
+/**
+ * seq_buf_get_buf - get buffer to write arbitrary data to
+ * @s: the seq_buf handle
+ * @bufp: the beginning of the buffer is stored here
+ *
+ * Return the number of bytes available in the buffer, or zero if
+ * there's no space.
+ */
+static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp)
+{
+ WARN_ON(s->len > s->size + 1);
+
+ if (s->len < s->size) {
+ *bufp = s->buffer + s->len;
+ return s->size - s->len;
+ }
+
+ *bufp = NULL;
+ return 0;
+}
+
+/**
+ * seq_buf_commit - commit data to the buffer
+ * @s: the seq_buf handle
+ * @num: the number of bytes to commit
+ *
+ * Commit @num bytes of data written to a buffer previously acquired
+ * by seq_buf_get. To signal an error condition, or that the data
+ * didn't fit in the available space, pass a negative @num value.
+ */
+static inline void seq_buf_commit(struct seq_buf *s, int num)
+{
+ if (num < 0) {
+ seq_buf_set_overflow(s);
+ } else {
+ /* num must be negative on overflow */
+ BUG_ON(s->len + num > s->size);
+ s->len += num;
+ }
+}
+
+extern __printf(2, 3)
+int seq_buf_printf(struct seq_buf *s, const char *fmt, ...);
+extern __printf(2, 0)
+int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args);
+extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s);
+extern int seq_buf_to_user(struct seq_buf *s, char __user *ubuf,
+ int cnt);
+extern int seq_buf_puts(struct seq_buf *s, const char *str);
+extern int seq_buf_putc(struct seq_buf *s, unsigned char c);
+extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len);
+extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
+ unsigned int len);
+extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc);
+
+#ifdef CONFIG_BINARY_PRINTF
+extern int
+seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary);
+#endif
+
+#endif /* _LINUX_SEQ_BUF_H */
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
new file mode 100644
index 000000000..afbb1fd77
--- /dev/null
+++ b/include/linux/seq_file.h
@@ -0,0 +1,187 @@
+#ifndef _LINUX_SEQ_FILE_H
+#define _LINUX_SEQ_FILE_H
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/bug.h>
+#include <linux/mutex.h>
+#include <linux/cpumask.h>
+#include <linux/nodemask.h>
+
+struct seq_operations;
+struct file;
+struct path;
+struct inode;
+struct dentry;
+struct user_namespace;
+
+struct seq_file {
+ char *buf;
+ size_t size;
+ size_t from;
+ size_t count;
+ size_t pad_until;
+ loff_t index;
+ loff_t read_pos;
+ u64 version;
+ struct mutex lock;
+ const struct seq_operations *op;
+ int poll_event;
+#ifdef CONFIG_USER_NS
+ struct user_namespace *user_ns;
+#endif
+ void *private;
+};
+
+struct seq_operations {
+ void * (*start) (struct seq_file *m, loff_t *pos);
+ void (*stop) (struct seq_file *m, void *v);
+ void * (*next) (struct seq_file *m, void *v, loff_t *pos);
+ int (*show) (struct seq_file *m, void *v);
+};
+
+#define SEQ_SKIP 1
+
+/**
+ * seq_has_overflowed - check if the buffer has overflowed
+ * @m: the seq_file handle
+ *
+ * seq_files have a buffer which may overflow. When this happens a larger
+ * buffer is reallocated and all the data will be printed again.
+ * The overflow state is true when m->count == m->size.
+ *
+ * Returns true if the buffer received more than it can hold.
+ */
+static inline bool seq_has_overflowed(struct seq_file *m)
+{
+ return m->count == m->size;
+}
+
+/**
+ * seq_get_buf - get buffer to write arbitrary data to
+ * @m: the seq_file handle
+ * @bufp: the beginning of the buffer is stored here
+ *
+ * Return the number of bytes available in the buffer, or zero if
+ * there's no space.
+ */
+static inline size_t seq_get_buf(struct seq_file *m, char **bufp)
+{
+ BUG_ON(m->count > m->size);
+ if (m->count < m->size)
+ *bufp = m->buf + m->count;
+ else
+ *bufp = NULL;
+
+ return m->size - m->count;
+}
+
+/**
+ * seq_commit - commit data to the buffer
+ * @m: the seq_file handle
+ * @num: the number of bytes to commit
+ *
+ * Commit @num bytes of data written to a buffer previously acquired
+ * by seq_buf_get. To signal an error condition, or that the data
+ * didn't fit in the available space, pass a negative @num value.
+ */
+static inline void seq_commit(struct seq_file *m, int num)
+{
+ if (num < 0) {
+ m->count = m->size;
+ } else {
+ BUG_ON(m->count + num > m->size);
+ m->count += num;
+ }
+}
+
+/**
+ * seq_setwidth - set padding width
+ * @m: the seq_file handle
+ * @size: the max number of bytes to pad.
+ *
+ * Call seq_setwidth() for setting max width, then call seq_printf() etc. and
+ * finally call seq_pad() to pad the remaining bytes.
+ */
+static inline void seq_setwidth(struct seq_file *m, size_t size)
+{
+ m->pad_until = m->count + size;
+}
+void seq_pad(struct seq_file *m, char c);
+
+char *mangle_path(char *s, const char *p, const char *esc);
+int seq_open(struct file *, const struct seq_operations *);
+ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
+loff_t seq_lseek(struct file *, loff_t, int);
+int seq_release(struct inode *, struct file *);
+int seq_escape(struct seq_file *, const char *, const char *);
+int seq_putc(struct seq_file *m, char c);
+int seq_puts(struct seq_file *m, const char *s);
+int seq_write(struct seq_file *seq, const void *data, size_t len);
+
+__printf(2, 3) int seq_printf(struct seq_file *, const char *, ...);
+__printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args);
+
+int seq_path(struct seq_file *, const struct path *, const char *);
+int seq_dentry(struct seq_file *, struct dentry *, const char *);
+int seq_path_root(struct seq_file *m, const struct path *path,
+ const struct path *root, const char *esc);
+
+int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
+int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t);
+int single_release(struct inode *, struct file *);
+void *__seq_open_private(struct file *, const struct seq_operations *, int);
+int seq_open_private(struct file *, const struct seq_operations *, int);
+int seq_release_private(struct inode *, struct file *);
+int seq_put_decimal_ull(struct seq_file *m, char delimiter,
+ unsigned long long num);
+int seq_put_decimal_ll(struct seq_file *m, char delimiter,
+ long long num);
+
+static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
+{
+#ifdef CONFIG_USER_NS
+ return seq->user_ns;
+#else
+ extern struct user_namespace init_user_ns;
+ return &init_user_ns;
+#endif
+}
+
+#define SEQ_START_TOKEN ((void *)1)
+/*
+ * Helpers for iteration over list_head-s in seq_files
+ */
+
+extern struct list_head *seq_list_start(struct list_head *head,
+ loff_t pos);
+extern struct list_head *seq_list_start_head(struct list_head *head,
+ loff_t pos);
+extern struct list_head *seq_list_next(void *v, struct list_head *head,
+ loff_t *ppos);
+
+/*
+ * Helpers for iteration over hlist_head-s in seq_files
+ */
+
+extern struct hlist_node *seq_hlist_start(struct hlist_head *head,
+ loff_t pos);
+extern struct hlist_node *seq_hlist_start_head(struct hlist_head *head,
+ loff_t pos);
+extern struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head,
+ loff_t *ppos);
+
+extern struct hlist_node *seq_hlist_start_rcu(struct hlist_head *head,
+ loff_t pos);
+extern struct hlist_node *seq_hlist_start_head_rcu(struct hlist_head *head,
+ loff_t pos);
+extern struct hlist_node *seq_hlist_next_rcu(void *v,
+ struct hlist_head *head,
+ loff_t *ppos);
+
+/* Helpers for iterating over per-cpu hlist_head-s in seq_files */
+extern struct hlist_node *seq_hlist_start_percpu(struct hlist_head __percpu *head, int *cpu, loff_t pos);
+
+extern struct hlist_node *seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head, int *cpu, loff_t *pos);
+
+#endif
diff --git a/include/linux/seq_file_net.h b/include/linux/seq_file_net.h
new file mode 100644
index 000000000..32c89bbe2
--- /dev/null
+++ b/include/linux/seq_file_net.h
@@ -0,0 +1,30 @@
+#ifndef __SEQ_FILE_NET_H__
+#define __SEQ_FILE_NET_H__
+
+#include <linux/seq_file.h>
+
+struct net;
+extern struct net init_net;
+
+struct seq_net_private {
+#ifdef CONFIG_NET_NS
+ struct net *net;
+#endif
+};
+
+int seq_open_net(struct inode *, struct file *,
+ const struct seq_operations *, int);
+int single_open_net(struct inode *, struct file *file,
+ int (*show)(struct seq_file *, void *));
+int seq_release_net(struct inode *, struct file *);
+int single_release_net(struct inode *, struct file *);
+static inline struct net *seq_file_net(struct seq_file *seq)
+{
+#ifdef CONFIG_NET_NS
+ return ((struct seq_net_private *)seq->private)->net;
+#else
+ return &init_net;
+#endif
+}
+
+#endif
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
new file mode 100644
index 000000000..5f68d0a39
--- /dev/null
+++ b/include/linux/seqlock.h
@@ -0,0 +1,478 @@
+#ifndef __LINUX_SEQLOCK_H
+#define __LINUX_SEQLOCK_H
+/*
+ * Reader/writer consistent mechanism without starving writers. This type of
+ * lock for data where the reader wants a consistent set of information
+ * and is willing to retry if the information changes. There are two types
+ * of readers:
+ * 1. Sequence readers which never block a writer but they may have to retry
+ * if a writer is in progress by detecting change in sequence number.
+ * Writers do not wait for a sequence reader.
+ * 2. Locking readers which will wait if a writer or another locking reader
+ * is in progress. A locking reader in progress will also block a writer
+ * from going forward. Unlike the regular rwlock, the read lock here is
+ * exclusive so that only one locking reader can get it.
+ *
+ * This is not as cache friendly as brlock. Also, this may not work well
+ * for data that contains pointers, because any writer could
+ * invalidate a pointer that a reader was following.
+ *
+ * Expected non-blocking reader usage:
+ * do {
+ * seq = read_seqbegin(&foo);
+ * ...
+ * } while (read_seqretry(&foo, seq));
+ *
+ *
+ * On non-SMP the spin locks disappear but the writer still needs
+ * to increment the sequence variables because an interrupt routine could
+ * change the state of the data.
+ *
+ * Based on x86_64 vsyscall gettimeofday
+ * by Keith Owens and Andrea Arcangeli
+ */
+
+#include <linux/spinlock.h>
+#include <linux/preempt.h>
+#include <linux/lockdep.h>
+#include <asm/processor.h>
+
+/*
+ * Version using sequence counter only.
+ * This can be used when code has its own mutex protecting the
+ * updating starting before the write_seqcountbeqin() and ending
+ * after the write_seqcount_end().
+ */
+typedef struct seqcount {
+ unsigned sequence;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} seqcount_t;
+
+static inline void __seqcount_init(seqcount_t *s, const char *name,
+ struct lock_class_key *key)
+{
+ /*
+ * Make sure we are not reinitializing a held lock:
+ */
+ lockdep_init_map(&s->dep_map, name, key, 0);
+ s->sequence = 0;
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define SEQCOUNT_DEP_MAP_INIT(lockname) \
+ .dep_map = { .name = #lockname } \
+
+# define seqcount_init(s) \
+ do { \
+ static struct lock_class_key __key; \
+ __seqcount_init((s), #s, &__key); \
+ } while (0)
+
+static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
+{
+ seqcount_t *l = (seqcount_t *)s;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
+ seqcount_release(&l->dep_map, 1, _RET_IP_);
+ local_irq_restore(flags);
+}
+
+#else
+# define SEQCOUNT_DEP_MAP_INIT(lockname)
+# define seqcount_init(s) __seqcount_init(s, NULL, NULL)
+# define seqcount_lockdep_reader_access(x)
+#endif
+
+#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
+
+
+/**
+ * __read_seqcount_begin - begin a seq-read critical section (without barrier)
+ * @s: pointer to seqcount_t
+ * Returns: count to be passed to read_seqcount_retry
+ *
+ * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
+ * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
+ * provided before actually loading any of the variables that are to be
+ * protected in this critical section.
+ *
+ * Use carefully, only in critical code, and comment how the barrier is
+ * provided.
+ */
+static inline unsigned __read_seqcount_begin(const seqcount_t *s)
+{
+ unsigned ret;
+
+repeat:
+ ret = READ_ONCE(s->sequence);
+ if (unlikely(ret & 1)) {
+ cpu_relax();
+ goto repeat;
+ }
+ return ret;
+}
+
+/**
+ * raw_read_seqcount - Read the raw seqcount
+ * @s: pointer to seqcount_t
+ * Returns: count to be passed to read_seqcount_retry
+ *
+ * raw_read_seqcount opens a read critical section of the given
+ * seqcount without any lockdep checking and without checking or
+ * masking the LSB. Calling code is responsible for handling that.
+ */
+static inline unsigned raw_read_seqcount(const seqcount_t *s)
+{
+ unsigned ret = READ_ONCE(s->sequence);
+ smp_rmb();
+ return ret;
+}
+
+/**
+ * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
+ * @s: pointer to seqcount_t
+ * Returns: count to be passed to read_seqcount_retry
+ *
+ * raw_read_seqcount_begin opens a read critical section of the given
+ * seqcount, but without any lockdep checking. Validity of the critical
+ * section is tested by checking read_seqcount_retry function.
+ */
+static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
+{
+ unsigned ret = __read_seqcount_begin(s);
+ smp_rmb();
+ return ret;
+}
+
+/**
+ * read_seqcount_begin - begin a seq-read critical section
+ * @s: pointer to seqcount_t
+ * Returns: count to be passed to read_seqcount_retry
+ *
+ * read_seqcount_begin opens a read critical section of the given seqcount.
+ * Validity of the critical section is tested by checking read_seqcount_retry
+ * function.
+ */
+static inline unsigned read_seqcount_begin(const seqcount_t *s)
+{
+ seqcount_lockdep_reader_access(s);
+ return raw_read_seqcount_begin(s);
+}
+
+/**
+ * raw_seqcount_begin - begin a seq-read critical section
+ * @s: pointer to seqcount_t
+ * Returns: count to be passed to read_seqcount_retry
+ *
+ * raw_seqcount_begin opens a read critical section of the given seqcount.
+ * Validity of the critical section is tested by checking read_seqcount_retry
+ * function.
+ *
+ * Unlike read_seqcount_begin(), this function will not wait for the count
+ * to stabilize. If a writer is active when we begin, we will fail the
+ * read_seqcount_retry() instead of stabilizing at the beginning of the
+ * critical section.
+ */
+static inline unsigned raw_seqcount_begin(const seqcount_t *s)
+{
+ unsigned ret = READ_ONCE(s->sequence);
+ smp_rmb();
+ return ret & ~1;
+}
+
+/**
+ * __read_seqcount_retry - end a seq-read critical section (without barrier)
+ * @s: pointer to seqcount_t
+ * @start: count, from read_seqcount_begin
+ * Returns: 1 if retry is required, else 0
+ *
+ * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
+ * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
+ * provided before actually loading any of the variables that are to be
+ * protected in this critical section.
+ *
+ * Use carefully, only in critical code, and comment how the barrier is
+ * provided.
+ */
+static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
+{
+ return unlikely(s->sequence != start);
+}
+
+/**
+ * read_seqcount_retry - end a seq-read critical section
+ * @s: pointer to seqcount_t
+ * @start: count, from read_seqcount_begin
+ * Returns: 1 if retry is required, else 0
+ *
+ * read_seqcount_retry closes a read critical section of the given seqcount.
+ * If the critical section was invalid, it must be ignored (and typically
+ * retried).
+ */
+static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
+{
+ smp_rmb();
+ return __read_seqcount_retry(s, start);
+}
+
+
+
+static inline void raw_write_seqcount_begin(seqcount_t *s)
+{
+ s->sequence++;
+ smp_wmb();
+}
+
+static inline void raw_write_seqcount_end(seqcount_t *s)
+{
+ smp_wmb();
+ s->sequence++;
+}
+
+/*
+ * raw_write_seqcount_latch - redirect readers to even/odd copy
+ * @s: pointer to seqcount_t
+ */
+static inline void raw_write_seqcount_latch(seqcount_t *s)
+{
+ smp_wmb(); /* prior stores before incrementing "sequence" */
+ s->sequence++;
+ smp_wmb(); /* increment "sequence" before following stores */
+}
+
+/*
+ * Sequence counter only version assumes that callers are using their
+ * own mutexing.
+ */
+static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
+{
+ raw_write_seqcount_begin(s);
+ seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
+}
+
+static inline void write_seqcount_begin(seqcount_t *s)
+{
+ write_seqcount_begin_nested(s, 0);
+}
+
+static inline void write_seqcount_end(seqcount_t *s)
+{
+ seqcount_release(&s->dep_map, 1, _RET_IP_);
+ raw_write_seqcount_end(s);
+}
+
+/**
+ * write_seqcount_barrier - invalidate in-progress read-side seq operations
+ * @s: pointer to seqcount_t
+ *
+ * After write_seqcount_barrier, no read-side seq operations will complete
+ * successfully and see data older than this.
+ */
+static inline void write_seqcount_barrier(seqcount_t *s)
+{
+ smp_wmb();
+ s->sequence+=2;
+}
+
+typedef struct {
+ struct seqcount seqcount;
+ spinlock_t lock;
+} seqlock_t;
+
+/*
+ * These macros triggered gcc-3.x compile-time problems. We think these are
+ * OK now. Be cautious.
+ */
+#define __SEQLOCK_UNLOCKED(lockname) \
+ { \
+ .seqcount = SEQCNT_ZERO(lockname), \
+ .lock = __SPIN_LOCK_UNLOCKED(lockname) \
+ }
+
+#define seqlock_init(x) \
+ do { \
+ seqcount_init(&(x)->seqcount); \
+ spin_lock_init(&(x)->lock); \
+ } while (0)
+
+#define DEFINE_SEQLOCK(x) \
+ seqlock_t x = __SEQLOCK_UNLOCKED(x)
+
+/*
+ * Read side functions for starting and finalizing a read side section.
+ */
+static inline unsigned read_seqbegin(const seqlock_t *sl)
+{
+ return read_seqcount_begin(&sl->seqcount);
+}
+
+static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+{
+ return read_seqcount_retry(&sl->seqcount, start);
+}
+
+/*
+ * Lock out other writers and update the count.
+ * Acts like a normal spin_lock/unlock.
+ * Don't need preempt_disable() because that is in the spin_lock already.
+ */
+static inline void write_seqlock(seqlock_t *sl)
+{
+ spin_lock(&sl->lock);
+ write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void write_sequnlock(seqlock_t *sl)
+{
+ write_seqcount_end(&sl->seqcount);
+ spin_unlock(&sl->lock);
+}
+
+static inline void write_seqlock_bh(seqlock_t *sl)
+{
+ spin_lock_bh(&sl->lock);
+ write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void write_sequnlock_bh(seqlock_t *sl)
+{
+ write_seqcount_end(&sl->seqcount);
+ spin_unlock_bh(&sl->lock);
+}
+
+static inline void write_seqlock_irq(seqlock_t *sl)
+{
+ spin_lock_irq(&sl->lock);
+ write_seqcount_begin(&sl->seqcount);
+}
+
+static inline void write_sequnlock_irq(seqlock_t *sl)
+{
+ write_seqcount_end(&sl->seqcount);
+ spin_unlock_irq(&sl->lock);
+}
+
+static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sl->lock, flags);
+ write_seqcount_begin(&sl->seqcount);
+ return flags;
+}
+
+#define write_seqlock_irqsave(lock, flags) \
+ do { flags = __write_seqlock_irqsave(lock); } while (0)
+
+static inline void
+write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
+{
+ write_seqcount_end(&sl->seqcount);
+ spin_unlock_irqrestore(&sl->lock, flags);
+}
+
+/*
+ * A locking reader exclusively locks out other writers and locking readers,
+ * but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
+ * Don't need preempt_disable() because that is in the spin_lock already.
+ */
+static inline void read_seqlock_excl(seqlock_t *sl)
+{
+ spin_lock(&sl->lock);
+}
+
+static inline void read_sequnlock_excl(seqlock_t *sl)
+{
+ spin_unlock(&sl->lock);
+}
+
+/**
+ * read_seqbegin_or_lock - begin a sequence number check or locking block
+ * @lock: sequence lock
+ * @seq : sequence number to be checked
+ *
+ * First try it once optimistically without taking the lock. If that fails,
+ * take the lock. The sequence number is also used as a marker for deciding
+ * whether to be a reader (even) or writer (odd).
+ * N.B. seq must be initialized to an even number to begin with.
+ */
+static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
+{
+ if (!(*seq & 1)) /* Even */
+ *seq = read_seqbegin(lock);
+ else /* Odd */
+ read_seqlock_excl(lock);
+}
+
+static inline int need_seqretry(seqlock_t *lock, int seq)
+{
+ return !(seq & 1) && read_seqretry(lock, seq);
+}
+
+static inline void done_seqretry(seqlock_t *lock, int seq)
+{
+ if (seq & 1)
+ read_sequnlock_excl(lock);
+}
+
+static inline void read_seqlock_excl_bh(seqlock_t *sl)
+{
+ spin_lock_bh(&sl->lock);
+}
+
+static inline void read_sequnlock_excl_bh(seqlock_t *sl)
+{
+ spin_unlock_bh(&sl->lock);
+}
+
+static inline void read_seqlock_excl_irq(seqlock_t *sl)
+{
+ spin_lock_irq(&sl->lock);
+}
+
+static inline void read_sequnlock_excl_irq(seqlock_t *sl)
+{
+ spin_unlock_irq(&sl->lock);
+}
+
+static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sl->lock, flags);
+ return flags;
+}
+
+#define read_seqlock_excl_irqsave(lock, flags) \
+ do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
+
+static inline void
+read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
+{
+ spin_unlock_irqrestore(&sl->lock, flags);
+}
+
+static inline unsigned long
+read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
+{
+ unsigned long flags = 0;
+
+ if (!(*seq & 1)) /* Even */
+ *seq = read_seqbegin(lock);
+ else /* Odd */
+ read_seqlock_excl_irqsave(lock, flags);
+
+ return flags;
+}
+
+static inline void
+done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
+{
+ if (seq & 1)
+ read_sequnlock_excl_irqrestore(lock, flags);
+}
+#endif /* __LINUX_SEQLOCK_H */
diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h
new file mode 100644
index 000000000..a1ba6a5cc
--- /dev/null
+++ b/include/linux/seqno-fence.h
@@ -0,0 +1,117 @@
+/*
+ * seqno-fence, using a dma-buf to synchronize fencing
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Copyright (C) 2012 Canonical Ltd
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_SEQNO_FENCE_H
+#define __LINUX_SEQNO_FENCE_H
+
+#include <linux/fence.h>
+#include <linux/dma-buf.h>
+
+enum seqno_fence_condition {
+ SEQNO_FENCE_WAIT_GEQUAL,
+ SEQNO_FENCE_WAIT_NONZERO
+};
+
+struct seqno_fence {
+ struct fence base;
+
+ const struct fence_ops *ops;
+ struct dma_buf *sync_buf;
+ uint32_t seqno_ofs;
+ enum seqno_fence_condition condition;
+};
+
+extern const struct fence_ops seqno_fence_ops;
+
+/**
+ * to_seqno_fence - cast a fence to a seqno_fence
+ * @fence: fence to cast to a seqno_fence
+ *
+ * Returns NULL if the fence is not a seqno_fence,
+ * or the seqno_fence otherwise.
+ */
+static inline struct seqno_fence *
+to_seqno_fence(struct fence *fence)
+{
+ if (fence->ops != &seqno_fence_ops)
+ return NULL;
+ return container_of(fence, struct seqno_fence, base);
+}
+
+/**
+ * seqno_fence_init - initialize a seqno fence
+ * @fence: seqno_fence to initialize
+ * @lock: pointer to spinlock to use for fence
+ * @sync_buf: buffer containing the memory location to signal on
+ * @context: the execution context this fence is a part of
+ * @seqno_ofs: the offset within @sync_buf
+ * @seqno: the sequence # to signal on
+ * @cond: fence wait condition
+ * @ops: the fence_ops for operations on this seqno fence
+ *
+ * This function initializes a struct seqno_fence with passed parameters,
+ * and takes a reference on sync_buf which is released on fence destruction.
+ *
+ * A seqno_fence is a dma_fence which can complete in software when
+ * enable_signaling is called, but it also completes when
+ * (s32)((sync_buf)[seqno_ofs] - seqno) >= 0 is true
+ *
+ * The seqno_fence will take a refcount on the sync_buf until it's
+ * destroyed, but actual lifetime of sync_buf may be longer if one of the
+ * callers take a reference to it.
+ *
+ * Certain hardware have instructions to insert this type of wait condition
+ * in the command stream, so no intervention from software would be needed.
+ * This type of fence can be destroyed before completed, however a reference
+ * on the sync_buf dma-buf can be taken. It is encouraged to re-use the same
+ * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the
+ * device's vm can be expensive.
+ *
+ * It is recommended for creators of seqno_fence to call fence_signal
+ * before destruction. This will prevent possible issues from wraparound at
+ * time of issue vs time of check, since users can check fence_is_signaled
+ * before submitting instructions for the hardware to wait on the fence.
+ * However, when ops.enable_signaling is not called, it doesn't have to be
+ * done as soon as possible, just before there's any real danger of seqno
+ * wraparound.
+ */
+static inline void
+seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock,
+ struct dma_buf *sync_buf, uint32_t context,
+ uint32_t seqno_ofs, uint32_t seqno,
+ enum seqno_fence_condition cond,
+ const struct fence_ops *ops)
+{
+ BUG_ON(!fence || !sync_buf || !ops);
+ BUG_ON(!ops->wait || !ops->enable_signaling ||
+ !ops->get_driver_name || !ops->get_timeline_name);
+
+ /*
+ * ops is used in fence_init for get_driver_name, so needs to be
+ * initialized first
+ */
+ fence->ops = ops;
+ fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
+ get_dma_buf(sync_buf);
+ fence->sync_buf = sync_buf;
+ fence->seqno_ofs = seqno_ofs;
+ fence->condition = cond;
+}
+
+#endif /* __LINUX_SEQNO_FENCE_H */
diff --git a/include/linux/serial.h b/include/linux/serial.h
new file mode 100644
index 000000000..0916107c7
--- /dev/null
+++ b/include/linux/serial.h
@@ -0,0 +1,33 @@
+/*
+ * include/linux/serial.h
+ *
+ * Copyright (C) 1992 by Theodore Ts'o.
+ *
+ * Redistribution of this file is permitted under the terms of the GNU
+ * Public License (GPL)
+ */
+#ifndef _LINUX_SERIAL_H
+#define _LINUX_SERIAL_H
+
+#include <asm/page.h>
+#include <uapi/linux/serial.h>
+
+
+/*
+ * Counters of the input lines (CTS, DSR, RI, CD) interrupts
+ */
+
+struct async_icount {
+ __u32 cts, dsr, rng, dcd, tx, rx;
+ __u32 frame, parity, overrun, brk;
+ __u32 buf_overrun;
+};
+
+/*
+ * The size of the serial xmit buffer is 1 page, or 4096 bytes
+ */
+#define SERIAL_XMIT_SIZE PAGE_SIZE
+
+#include <linux/compiler.h>
+
+#endif /* _LINUX_SERIAL_H */
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
new file mode 100644
index 000000000..78097e7a3
--- /dev/null
+++ b/include/linux/serial_8250.h
@@ -0,0 +1,157 @@
+/*
+ * linux/include/linux/serial_8250.h
+ *
+ * Copyright (C) 2004 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _LINUX_SERIAL_8250_H
+#define _LINUX_SERIAL_8250_H
+
+#include <linux/serial_core.h>
+#include <linux/platform_device.h>
+
+/*
+ * This is the platform device platform_data structure
+ */
+struct plat_serial8250_port {
+ unsigned long iobase; /* io base address */
+ void __iomem *membase; /* ioremap cookie or NULL */
+ resource_size_t mapbase; /* resource base */
+ unsigned int irq; /* interrupt number */
+ unsigned long irqflags; /* request_irq flags */
+ unsigned int uartclk; /* UART clock rate */
+ void *private_data;
+ unsigned char regshift; /* register shift */
+ unsigned char iotype; /* UPIO_* */
+ unsigned char hub6;
+ upf_t flags; /* UPF_* flags */
+ unsigned int type; /* If UPF_FIXED_TYPE */
+ unsigned int (*serial_in)(struct uart_port *, int);
+ void (*serial_out)(struct uart_port *, int, int);
+ void (*set_termios)(struct uart_port *,
+ struct ktermios *new,
+ struct ktermios *old);
+ int (*handle_irq)(struct uart_port *);
+ void (*pm)(struct uart_port *, unsigned int state,
+ unsigned old);
+ void (*handle_break)(struct uart_port *);
+};
+
+/*
+ * Allocate 8250 platform device IDs. Nothing is implied by
+ * the numbering here, except for the legacy entry being -1.
+ */
+enum {
+ PLAT8250_DEV_LEGACY = -1,
+ PLAT8250_DEV_PLATFORM,
+ PLAT8250_DEV_PLATFORM1,
+ PLAT8250_DEV_PLATFORM2,
+ PLAT8250_DEV_FOURPORT,
+ PLAT8250_DEV_ACCENT,
+ PLAT8250_DEV_BOCA,
+ PLAT8250_DEV_EXAR_ST16C554,
+ PLAT8250_DEV_HUB6,
+ PLAT8250_DEV_AU1X00,
+ PLAT8250_DEV_SM501,
+};
+
+struct uart_8250_dma;
+struct uart_8250_port;
+
+/**
+ * 8250 core driver operations
+ *
+ * @setup_irq() Setup irq handling. The universal 8250 driver links this
+ * port to the irq chain. Other drivers may @request_irq().
+ * @release_irq() Undo irq handling. The universal 8250 driver unlinks
+ * the port from the irq chain.
+ */
+struct uart_8250_ops {
+ int (*setup_irq)(struct uart_8250_port *);
+ void (*release_irq)(struct uart_8250_port *);
+};
+
+/*
+ * This should be used by drivers which want to register
+ * their own 8250 ports without registering their own
+ * platform device. Using these will make your driver
+ * dependent on the 8250 driver.
+ */
+
+struct uart_8250_port {
+ struct uart_port port;
+ struct timer_list timer; /* "no irq" timer */
+ struct list_head list; /* ports on this IRQ */
+ unsigned short capabilities; /* port capabilities */
+ unsigned short bugs; /* port bugs */
+ bool fifo_bug; /* min RX trigger if enabled */
+ unsigned int tx_loadsz; /* transmit fifo load size */
+ unsigned char acr;
+ unsigned char fcr;
+ unsigned char ier;
+ unsigned char lcr;
+ unsigned char mcr;
+ unsigned char mcr_mask; /* mask of user bits */
+ unsigned char mcr_force; /* mask of forced bits */
+ unsigned char cur_iotype; /* Running I/O type */
+ unsigned int rpm_tx_active;
+ unsigned char canary; /* non-zero during system sleep
+ * if no_console_suspend
+ */
+ unsigned char probe;
+#define UART_PROBE_RSA (1 << 0)
+
+ /*
+ * Some bits in registers are cleared on a read, so they must
+ * be saved whenever the register is read but the bits will not
+ * be immediately processed.
+ */
+#define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS
+ unsigned char lsr_saved_flags;
+#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
+ unsigned char msr_saved_flags;
+
+ struct uart_8250_dma *dma;
+ const struct uart_8250_ops *ops;
+
+ /* 8250 specific callbacks */
+ int (*dl_read)(struct uart_8250_port *);
+ void (*dl_write)(struct uart_8250_port *, int);
+};
+
+static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up)
+{
+ return container_of(up, struct uart_8250_port, port);
+}
+
+int serial8250_register_8250_port(struct uart_8250_port *);
+void serial8250_unregister_port(int line);
+void serial8250_suspend_port(int line);
+void serial8250_resume_port(int line);
+
+extern int early_serial_setup(struct uart_port *port);
+
+extern unsigned int serial8250_early_in(struct uart_port *port, int offset);
+extern void serial8250_early_out(struct uart_port *port, int offset, int value);
+extern void serial8250_do_set_termios(struct uart_port *port,
+ struct ktermios *termios, struct ktermios *old);
+extern int serial8250_do_startup(struct uart_port *port);
+extern void serial8250_do_shutdown(struct uart_port *port);
+extern void serial8250_do_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate);
+extern void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl);
+extern int fsl8250_handle_irq(struct uart_port *port);
+int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
+unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr);
+void serial8250_tx_chars(struct uart_8250_port *up);
+unsigned int serial8250_modem_status(struct uart_8250_port *up);
+
+extern void serial8250_set_isa_configurator(void (*v)
+ (int port, struct uart_port *up,
+ unsigned short *capabilities));
+
+#endif
diff --git a/include/linux/serial_bcm63xx.h b/include/linux/serial_bcm63xx.h
new file mode 100644
index 000000000..570e964dc
--- /dev/null
+++ b/include/linux/serial_bcm63xx.h
@@ -0,0 +1,119 @@
+#ifndef _LINUX_SERIAL_BCM63XX_H
+#define _LINUX_SERIAL_BCM63XX_H
+
+/* UART Control Register */
+#define UART_CTL_REG 0x0
+#define UART_CTL_RXTMOUTCNT_SHIFT 0
+#define UART_CTL_RXTMOUTCNT_MASK (0x1f << UART_CTL_RXTMOUTCNT_SHIFT)
+#define UART_CTL_RSTTXDN_SHIFT 5
+#define UART_CTL_RSTTXDN_MASK (1 << UART_CTL_RSTTXDN_SHIFT)
+#define UART_CTL_RSTRXFIFO_SHIFT 6
+#define UART_CTL_RSTRXFIFO_MASK (1 << UART_CTL_RSTRXFIFO_SHIFT)
+#define UART_CTL_RSTTXFIFO_SHIFT 7
+#define UART_CTL_RSTTXFIFO_MASK (1 << UART_CTL_RSTTXFIFO_SHIFT)
+#define UART_CTL_STOPBITS_SHIFT 8
+#define UART_CTL_STOPBITS_MASK (0xf << UART_CTL_STOPBITS_SHIFT)
+#define UART_CTL_STOPBITS_1 (0x7 << UART_CTL_STOPBITS_SHIFT)
+#define UART_CTL_STOPBITS_2 (0xf << UART_CTL_STOPBITS_SHIFT)
+#define UART_CTL_BITSPERSYM_SHIFT 12
+#define UART_CTL_BITSPERSYM_MASK (0x3 << UART_CTL_BITSPERSYM_SHIFT)
+#define UART_CTL_XMITBRK_SHIFT 14
+#define UART_CTL_XMITBRK_MASK (1 << UART_CTL_XMITBRK_SHIFT)
+#define UART_CTL_RSVD_SHIFT 15
+#define UART_CTL_RSVD_MASK (1 << UART_CTL_RSVD_SHIFT)
+#define UART_CTL_RXPAREVEN_SHIFT 16
+#define UART_CTL_RXPAREVEN_MASK (1 << UART_CTL_RXPAREVEN_SHIFT)
+#define UART_CTL_RXPAREN_SHIFT 17
+#define UART_CTL_RXPAREN_MASK (1 << UART_CTL_RXPAREN_SHIFT)
+#define UART_CTL_TXPAREVEN_SHIFT 18
+#define UART_CTL_TXPAREVEN_MASK (1 << UART_CTL_TXPAREVEN_SHIFT)
+#define UART_CTL_TXPAREN_SHIFT 18
+#define UART_CTL_TXPAREN_MASK (1 << UART_CTL_TXPAREN_SHIFT)
+#define UART_CTL_LOOPBACK_SHIFT 20
+#define UART_CTL_LOOPBACK_MASK (1 << UART_CTL_LOOPBACK_SHIFT)
+#define UART_CTL_RXEN_SHIFT 21
+#define UART_CTL_RXEN_MASK (1 << UART_CTL_RXEN_SHIFT)
+#define UART_CTL_TXEN_SHIFT 22
+#define UART_CTL_TXEN_MASK (1 << UART_CTL_TXEN_SHIFT)
+#define UART_CTL_BRGEN_SHIFT 23
+#define UART_CTL_BRGEN_MASK (1 << UART_CTL_BRGEN_SHIFT)
+
+/* UART Baudword register */
+#define UART_BAUD_REG 0x4
+
+/* UART Misc Control register */
+#define UART_MCTL_REG 0x8
+#define UART_MCTL_DTR_SHIFT 0
+#define UART_MCTL_DTR_MASK (1 << UART_MCTL_DTR_SHIFT)
+#define UART_MCTL_RTS_SHIFT 1
+#define UART_MCTL_RTS_MASK (1 << UART_MCTL_RTS_SHIFT)
+#define UART_MCTL_RXFIFOTHRESH_SHIFT 8
+#define UART_MCTL_RXFIFOTHRESH_MASK (0xf << UART_MCTL_RXFIFOTHRESH_SHIFT)
+#define UART_MCTL_TXFIFOTHRESH_SHIFT 12
+#define UART_MCTL_TXFIFOTHRESH_MASK (0xf << UART_MCTL_TXFIFOTHRESH_SHIFT)
+#define UART_MCTL_RXFIFOFILL_SHIFT 16
+#define UART_MCTL_RXFIFOFILL_MASK (0x1f << UART_MCTL_RXFIFOFILL_SHIFT)
+#define UART_MCTL_TXFIFOFILL_SHIFT 24
+#define UART_MCTL_TXFIFOFILL_MASK (0x1f << UART_MCTL_TXFIFOFILL_SHIFT)
+
+/* UART External Input Configuration register */
+#define UART_EXTINP_REG 0xc
+#define UART_EXTINP_RI_SHIFT 0
+#define UART_EXTINP_RI_MASK (1 << UART_EXTINP_RI_SHIFT)
+#define UART_EXTINP_CTS_SHIFT 1
+#define UART_EXTINP_CTS_MASK (1 << UART_EXTINP_CTS_SHIFT)
+#define UART_EXTINP_DCD_SHIFT 2
+#define UART_EXTINP_DCD_MASK (1 << UART_EXTINP_DCD_SHIFT)
+#define UART_EXTINP_DSR_SHIFT 3
+#define UART_EXTINP_DSR_MASK (1 << UART_EXTINP_DSR_SHIFT)
+#define UART_EXTINP_IRSTAT(x) (1 << (x + 4))
+#define UART_EXTINP_IRMASK(x) (1 << (x + 8))
+#define UART_EXTINP_IR_RI 0
+#define UART_EXTINP_IR_CTS 1
+#define UART_EXTINP_IR_DCD 2
+#define UART_EXTINP_IR_DSR 3
+#define UART_EXTINP_RI_NOSENSE_SHIFT 16
+#define UART_EXTINP_RI_NOSENSE_MASK (1 << UART_EXTINP_RI_NOSENSE_SHIFT)
+#define UART_EXTINP_CTS_NOSENSE_SHIFT 17
+#define UART_EXTINP_CTS_NOSENSE_MASK (1 << UART_EXTINP_CTS_NOSENSE_SHIFT)
+#define UART_EXTINP_DCD_NOSENSE_SHIFT 18
+#define UART_EXTINP_DCD_NOSENSE_MASK (1 << UART_EXTINP_DCD_NOSENSE_SHIFT)
+#define UART_EXTINP_DSR_NOSENSE_SHIFT 19
+#define UART_EXTINP_DSR_NOSENSE_MASK (1 << UART_EXTINP_DSR_NOSENSE_SHIFT)
+
+/* UART Interrupt register */
+#define UART_IR_REG 0x10
+#define UART_IR_MASK(x) (1 << (x + 16))
+#define UART_IR_STAT(x) (1 << (x))
+#define UART_IR_EXTIP 0
+#define UART_IR_TXUNDER 1
+#define UART_IR_TXOVER 2
+#define UART_IR_TXTRESH 3
+#define UART_IR_TXRDLATCH 4
+#define UART_IR_TXEMPTY 5
+#define UART_IR_RXUNDER 6
+#define UART_IR_RXOVER 7
+#define UART_IR_RXTIMEOUT 8
+#define UART_IR_RXFULL 9
+#define UART_IR_RXTHRESH 10
+#define UART_IR_RXNOTEMPTY 11
+#define UART_IR_RXFRAMEERR 12
+#define UART_IR_RXPARERR 13
+#define UART_IR_RXBRK 14
+#define UART_IR_TXDONE 15
+
+/* UART Fifo register */
+#define UART_FIFO_REG 0x14
+#define UART_FIFO_VALID_SHIFT 0
+#define UART_FIFO_VALID_MASK 0xff
+#define UART_FIFO_FRAMEERR_SHIFT 8
+#define UART_FIFO_FRAMEERR_MASK (1 << UART_FIFO_FRAMEERR_SHIFT)
+#define UART_FIFO_PARERR_SHIFT 9
+#define UART_FIFO_PARERR_MASK (1 << UART_FIFO_PARERR_SHIFT)
+#define UART_FIFO_BRKDET_SHIFT 10
+#define UART_FIFO_BRKDET_MASK (1 << UART_FIFO_BRKDET_SHIFT)
+#define UART_FIFO_ANYERR_MASK (UART_FIFO_FRAMEERR_MASK | \
+ UART_FIFO_PARERR_MASK | \
+ UART_FIFO_BRKDET_MASK)
+
+#endif /* _LINUX_SERIAL_BCM63XX_H */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
new file mode 100644
index 000000000..025dad9dc
--- /dev/null
+++ b/include/linux/serial_core.h
@@ -0,0 +1,478 @@
+/*
+ * linux/drivers/char/serial_core.h
+ *
+ * Copyright (C) 2000 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef LINUX_SERIAL_CORE_H
+#define LINUX_SERIAL_CORE_H
+
+
+#include <linux/compiler.h>
+#include <linux/interrupt.h>
+#include <linux/circ_buf.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/tty.h>
+#include <linux/mutex.h>
+#include <linux/sysrq.h>
+#include <uapi/linux/serial_core.h>
+
+#ifdef CONFIG_SERIAL_CORE_CONSOLE
+#define uart_console(port) \
+ ((port)->cons && (port)->cons->index == (port)->line)
+#else
+#define uart_console(port) (0)
+#endif
+
+struct uart_port;
+struct serial_struct;
+struct device;
+
+/*
+ * This structure describes all the operations that can be done on the
+ * physical hardware. See Documentation/serial/driver for details.
+ */
+struct uart_ops {
+ unsigned int (*tx_empty)(struct uart_port *);
+ void (*set_mctrl)(struct uart_port *, unsigned int mctrl);
+ unsigned int (*get_mctrl)(struct uart_port *);
+ void (*stop_tx)(struct uart_port *);
+ void (*start_tx)(struct uart_port *);
+ void (*throttle)(struct uart_port *);
+ void (*unthrottle)(struct uart_port *);
+ void (*send_xchar)(struct uart_port *, char ch);
+ void (*stop_rx)(struct uart_port *);
+ void (*enable_ms)(struct uart_port *);
+ void (*break_ctl)(struct uart_port *, int ctl);
+ int (*startup)(struct uart_port *);
+ void (*shutdown)(struct uart_port *);
+ void (*flush_buffer)(struct uart_port *);
+ void (*set_termios)(struct uart_port *, struct ktermios *new,
+ struct ktermios *old);
+ void (*set_ldisc)(struct uart_port *, struct ktermios *);
+ void (*pm)(struct uart_port *, unsigned int state,
+ unsigned int oldstate);
+
+ /*
+ * Return a string describing the type of the port
+ */
+ const char *(*type)(struct uart_port *);
+
+ /*
+ * Release IO and memory resources used by the port.
+ * This includes iounmap if necessary.
+ */
+ void (*release_port)(struct uart_port *);
+
+ /*
+ * Request IO and memory resources used by the port.
+ * This includes iomapping the port if necessary.
+ */
+ int (*request_port)(struct uart_port *);
+ void (*config_port)(struct uart_port *, int);
+ int (*verify_port)(struct uart_port *, struct serial_struct *);
+ int (*ioctl)(struct uart_port *, unsigned int, unsigned long);
+#ifdef CONFIG_CONSOLE_POLL
+ int (*poll_init)(struct uart_port *);
+ void (*poll_put_char)(struct uart_port *, unsigned char);
+ int (*poll_get_char)(struct uart_port *);
+#endif
+};
+
+#define NO_POLL_CHAR 0x00ff0000
+#define UART_CONFIG_TYPE (1 << 0)
+#define UART_CONFIG_IRQ (1 << 1)
+
+struct uart_icount {
+ __u32 cts;
+ __u32 dsr;
+ __u32 rng;
+ __u32 dcd;
+ __u32 rx;
+ __u32 tx;
+ __u32 frame;
+ __u32 overrun;
+ __u32 parity;
+ __u32 brk;
+ __u32 buf_overrun;
+};
+
+typedef unsigned int __bitwise__ upf_t;
+typedef unsigned int __bitwise__ upstat_t;
+
+struct uart_port {
+ spinlock_t lock; /* port lock */
+ unsigned long iobase; /* in/out[bwl] */
+ unsigned char __iomem *membase; /* read/write[bwl] */
+ unsigned int (*serial_in)(struct uart_port *, int);
+ void (*serial_out)(struct uart_port *, int, int);
+ void (*set_termios)(struct uart_port *,
+ struct ktermios *new,
+ struct ktermios *old);
+ void (*set_mctrl)(struct uart_port *, unsigned int);
+ int (*startup)(struct uart_port *port);
+ void (*shutdown)(struct uart_port *port);
+ void (*throttle)(struct uart_port *port);
+ void (*unthrottle)(struct uart_port *port);
+ int (*handle_irq)(struct uart_port *);
+ void (*pm)(struct uart_port *, unsigned int state,
+ unsigned int old);
+ void (*handle_break)(struct uart_port *);
+ int (*rs485_config)(struct uart_port *,
+ struct serial_rs485 *rs485);
+ unsigned int irq; /* irq number */
+ unsigned long irqflags; /* irq flags */
+ unsigned int uartclk; /* base uart clock */
+ unsigned int fifosize; /* tx fifo size */
+ unsigned char x_char; /* xon/xoff char */
+ unsigned char regshift; /* reg offset shift */
+ unsigned char iotype; /* io access style */
+ unsigned char unused1;
+
+#define UPIO_PORT (SERIAL_IO_PORT) /* 8b I/O port access */
+#define UPIO_HUB6 (SERIAL_IO_HUB6) /* Hub6 ISA card */
+#define UPIO_MEM (SERIAL_IO_MEM) /* 8b MMIO access */
+#define UPIO_MEM32 (SERIAL_IO_MEM32) /* 32b little endian */
+#define UPIO_AU (SERIAL_IO_AU) /* Au1x00 and RT288x type IO */
+#define UPIO_TSI (SERIAL_IO_TSI) /* Tsi108/109 type IO */
+#define UPIO_MEM32BE (SERIAL_IO_MEM32BE) /* 32b big endian */
+
+ unsigned int read_status_mask; /* driver specific */
+ unsigned int ignore_status_mask; /* driver specific */
+ struct uart_state *state; /* pointer to parent state */
+ struct uart_icount icount; /* statistics */
+
+ struct console *cons; /* struct console, if any */
+#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(SUPPORT_SYSRQ)
+ unsigned long sysrq; /* sysrq timeout */
+#endif
+
+ /* flags must be updated while holding port mutex */
+ upf_t flags;
+
+ /*
+ * These flags must be equivalent to the flags defined in
+ * include/uapi/linux/tty_flags.h which are the userspace definitions
+ * assigned from the serial_struct flags in uart_set_info()
+ * [for bit definitions in the UPF_CHANGE_MASK]
+ *
+ * Bits [0..UPF_LAST_USER] are userspace defined/visible/changeable
+ * except bit 15 (UPF_NO_TXEN_TEST) which is masked off.
+ * The remaining bits are serial-core specific and not modifiable by
+ * userspace.
+ */
+#define UPF_FOURPORT ((__force upf_t) ASYNC_FOURPORT /* 1 */ )
+#define UPF_SAK ((__force upf_t) ASYNC_SAK /* 2 */ )
+#define UPF_SPD_HI ((__force upf_t) ASYNC_SPD_HI /* 4 */ )
+#define UPF_SPD_VHI ((__force upf_t) ASYNC_SPD_VHI /* 5 */ )
+#define UPF_SPD_CUST ((__force upf_t) ASYNC_SPD_CUST /* 0x0030 */ )
+#define UPF_SPD_WARP ((__force upf_t) ASYNC_SPD_WARP /* 0x1010 */ )
+#define UPF_SPD_MASK ((__force upf_t) ASYNC_SPD_MASK /* 0x1030 */ )
+#define UPF_SKIP_TEST ((__force upf_t) ASYNC_SKIP_TEST /* 6 */ )
+#define UPF_AUTO_IRQ ((__force upf_t) ASYNC_AUTO_IRQ /* 7 */ )
+#define UPF_HARDPPS_CD ((__force upf_t) ASYNC_HARDPPS_CD /* 11 */ )
+#define UPF_SPD_SHI ((__force upf_t) ASYNC_SPD_SHI /* 12 */ )
+#define UPF_LOW_LATENCY ((__force upf_t) ASYNC_LOW_LATENCY /* 13 */ )
+#define UPF_BUGGY_UART ((__force upf_t) ASYNC_BUGGY_UART /* 14 */ )
+#define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15))
+#define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ )
+
+/* Port has hardware-assisted h/w flow control */
+#define UPF_AUTO_CTS ((__force upf_t) (1 << 20))
+#define UPF_AUTO_RTS ((__force upf_t) (1 << 21))
+#define UPF_HARD_FLOW ((__force upf_t) (UPF_AUTO_CTS | UPF_AUTO_RTS))
+/* Port has hardware-assisted s/w flow control */
+#define UPF_SOFT_FLOW ((__force upf_t) (1 << 22))
+#define UPF_CONS_FLOW ((__force upf_t) (1 << 23))
+#define UPF_SHARE_IRQ ((__force upf_t) (1 << 24))
+#define UPF_EXAR_EFR ((__force upf_t) (1 << 25))
+#define UPF_BUG_THRE ((__force upf_t) (1 << 26))
+/* The exact UART type is known and should not be probed. */
+#define UPF_FIXED_TYPE ((__force upf_t) (1 << 27))
+#define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28))
+#define UPF_FIXED_PORT ((__force upf_t) (1 << 29))
+#define UPF_DEAD ((__force upf_t) (1 << 30))
+#define UPF_IOREMAP ((__force upf_t) (1 << 31))
+
+#define __UPF_CHANGE_MASK 0x17fff
+#define UPF_CHANGE_MASK ((__force upf_t) __UPF_CHANGE_MASK)
+#define UPF_USR_MASK ((__force upf_t) (UPF_SPD_MASK|UPF_LOW_LATENCY))
+
+#if __UPF_CHANGE_MASK > ASYNC_FLAGS
+#error Change mask not equivalent to userspace-visible bit defines
+#endif
+
+ /*
+ * Must hold termios_rwsem, port mutex and port lock to change;
+ * can hold any one lock to read.
+ */
+ upstat_t status;
+
+#define UPSTAT_CTS_ENABLE ((__force upstat_t) (1 << 0))
+#define UPSTAT_DCD_ENABLE ((__force upstat_t) (1 << 1))
+#define UPSTAT_AUTORTS ((__force upstat_t) (1 << 2))
+#define UPSTAT_AUTOCTS ((__force upstat_t) (1 << 3))
+#define UPSTAT_AUTOXOFF ((__force upstat_t) (1 << 4))
+
+ int hw_stopped; /* sw-assisted CTS flow state */
+ unsigned int mctrl; /* current modem ctrl settings */
+ unsigned int timeout; /* character-based timeout */
+ unsigned int type; /* port type */
+ const struct uart_ops *ops;
+ unsigned int custom_divisor;
+ unsigned int line; /* port index */
+ unsigned int minor;
+ resource_size_t mapbase; /* for ioremap */
+ resource_size_t mapsize;
+ struct device *dev; /* parent device */
+ unsigned char hub6; /* this should be in the 8250 driver */
+ unsigned char suspended;
+ unsigned char irq_wake;
+ unsigned char unused[2];
+ struct attribute_group *attr_group; /* port specific attributes */
+ const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
+ struct serial_rs485 rs485;
+ void *private_data; /* generic platform data pointer */
+};
+
+static inline int serial_port_in(struct uart_port *up, int offset)
+{
+ return up->serial_in(up, offset);
+}
+
+static inline void serial_port_out(struct uart_port *up, int offset, int value)
+{
+ up->serial_out(up, offset, value);
+}
+
+/**
+ * enum uart_pm_state - power states for UARTs
+ * @UART_PM_STATE_ON: UART is powered, up and operational
+ * @UART_PM_STATE_OFF: UART is powered off
+ * @UART_PM_STATE_UNDEFINED: sentinel
+ */
+enum uart_pm_state {
+ UART_PM_STATE_ON = 0,
+ UART_PM_STATE_OFF = 3, /* number taken from ACPI */
+ UART_PM_STATE_UNDEFINED,
+};
+
+/*
+ * This is the state information which is persistent across opens.
+ */
+struct uart_state {
+ struct tty_port port;
+
+ enum uart_pm_state pm_state;
+ struct circ_buf xmit;
+
+ struct uart_port *uart_port;
+};
+
+#define UART_XMIT_SIZE PAGE_SIZE
+
+
+/* number of characters left in xmit buffer before we ask for more */
+#define WAKEUP_CHARS 256
+
+struct module;
+struct tty_driver;
+
+struct uart_driver {
+ struct module *owner;
+ const char *driver_name;
+ const char *dev_name;
+ int major;
+ int minor;
+ int nr;
+ struct console *cons;
+
+ /*
+ * these are private; the low level driver should not
+ * touch these; they should be initialised to NULL
+ */
+ struct uart_state *state;
+ struct tty_driver *tty_driver;
+};
+
+void uart_write_wakeup(struct uart_port *port);
+
+/*
+ * Baud rate helpers.
+ */
+void uart_update_timeout(struct uart_port *port, unsigned int cflag,
+ unsigned int baud);
+unsigned int uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old, unsigned int min,
+ unsigned int max);
+unsigned int uart_get_divisor(struct uart_port *port, unsigned int baud);
+
+/* Base timer interval for polling */
+static inline int uart_poll_timeout(struct uart_port *port)
+{
+ int timeout = port->timeout;
+
+ return timeout > 6 ? (timeout / 2 - 2) : 1;
+}
+
+/*
+ * Console helpers.
+ */
+struct earlycon_device {
+ struct console *con;
+ struct uart_port port;
+ char options[16]; /* e.g., 115200n8 */
+ unsigned int baud;
+};
+
+struct earlycon_id {
+ char name[16];
+ int (*setup)(struct earlycon_device *, const char *options);
+} __aligned(32);
+
+extern int setup_earlycon(char *buf);
+extern int of_setup_earlycon(unsigned long addr,
+ int (*setup)(struct earlycon_device *, const char *));
+
+#define EARLYCON_DECLARE(_name, func) \
+ static const struct earlycon_id __earlycon_##_name \
+ __used __section(__earlycon_table) \
+ = { .name = __stringify(_name), \
+ .setup = func }
+
+#define OF_EARLYCON_DECLARE(name, compat, fn) \
+ _OF_DECLARE(earlycon, name, compat, fn, void *)
+
+struct uart_port *uart_get_console(struct uart_port *ports, int nr,
+ struct console *c);
+int uart_parse_earlycon(char *p, unsigned char *iotype, unsigned long *addr,
+ char **options);
+void uart_parse_options(char *options, int *baud, int *parity, int *bits,
+ int *flow);
+int uart_set_options(struct uart_port *port, struct console *co, int baud,
+ int parity, int bits, int flow);
+struct tty_driver *uart_console_device(struct console *co, int *index);
+void uart_console_write(struct uart_port *port, const char *s,
+ unsigned int count,
+ void (*putchar)(struct uart_port *, int));
+
+/*
+ * Port/driver registration/removal
+ */
+int uart_register_driver(struct uart_driver *uart);
+void uart_unregister_driver(struct uart_driver *uart);
+int uart_add_one_port(struct uart_driver *reg, struct uart_port *port);
+int uart_remove_one_port(struct uart_driver *reg, struct uart_port *port);
+int uart_match_port(struct uart_port *port1, struct uart_port *port2);
+
+/*
+ * Power Management
+ */
+int uart_suspend_port(struct uart_driver *reg, struct uart_port *port);
+int uart_resume_port(struct uart_driver *reg, struct uart_port *port);
+
+#define uart_circ_empty(circ) ((circ)->head == (circ)->tail)
+#define uart_circ_clear(circ) ((circ)->head = (circ)->tail = 0)
+
+#define uart_circ_chars_pending(circ) \
+ (CIRC_CNT((circ)->head, (circ)->tail, UART_XMIT_SIZE))
+
+#define uart_circ_chars_free(circ) \
+ (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE))
+
+static inline int uart_tx_stopped(struct uart_port *port)
+{
+ struct tty_struct *tty = port->state->port.tty;
+ if (tty->stopped || port->hw_stopped)
+ return 1;
+ return 0;
+}
+
+static inline bool uart_cts_enabled(struct uart_port *uport)
+{
+ return !!(uport->status & UPSTAT_CTS_ENABLE);
+}
+
+static inline bool uart_softcts_mode(struct uart_port *uport)
+{
+ upstat_t mask = UPSTAT_CTS_ENABLE | UPSTAT_AUTOCTS;
+
+ return ((uport->status & mask) == UPSTAT_CTS_ENABLE);
+}
+
+/*
+ * The following are helper functions for the low level drivers.
+ */
+
+extern void uart_handle_dcd_change(struct uart_port *uport,
+ unsigned int status);
+extern void uart_handle_cts_change(struct uart_port *uport,
+ unsigned int status);
+
+extern void uart_insert_char(struct uart_port *port, unsigned int status,
+ unsigned int overrun, unsigned int ch, unsigned int flag);
+
+#ifdef SUPPORT_SYSRQ
+static inline int
+uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+{
+ if (port->sysrq) {
+ if (ch && time_before(jiffies, port->sysrq)) {
+ handle_sysrq(ch);
+ port->sysrq = 0;
+ return 1;
+ }
+ port->sysrq = 0;
+ }
+ return 0;
+}
+#else
+#define uart_handle_sysrq_char(port,ch) ({ (void)port; 0; })
+#endif
+
+/*
+ * We do the SysRQ and SAK checking like this...
+ */
+static inline int uart_handle_break(struct uart_port *port)
+{
+ struct uart_state *state = port->state;
+
+ if (port->handle_break)
+ port->handle_break(port);
+
+#ifdef SUPPORT_SYSRQ
+ if (port->cons && port->cons->index == port->line) {
+ if (!port->sysrq) {
+ port->sysrq = jiffies + HZ*5;
+ return 1;
+ }
+ port->sysrq = 0;
+ }
+#endif
+ if (port->flags & UPF_SAK)
+ do_SAK(state->port.tty);
+ return 0;
+}
+
+/*
+ * UART_ENABLE_MS - determine if port should enable modem status irqs
+ */
+#define UART_ENABLE_MS(port,cflag) ((port)->flags & UPF_HARDPPS_CD || \
+ (cflag) & CRTSCTS || \
+ !((cflag) & CLOCAL))
+
+#endif /* LINUX_SERIAL_CORE_H */
diff --git a/include/linux/serial_max3100.h b/include/linux/serial_max3100.h
new file mode 100644
index 000000000..4976befb6
--- /dev/null
+++ b/include/linux/serial_max3100.h
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright (C) 2007 Christian Pellegrin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+
+#ifndef _LINUX_SERIAL_MAX3100_H
+#define _LINUX_SERIAL_MAX3100_H 1
+
+
+/**
+ * struct plat_max3100 - MAX3100 SPI UART platform data
+ * @loopback: force MAX3100 in loopback
+ * @crystal: 1 for 3.6864 Mhz, 0 for 1.8432
+ * @max3100_hw_suspend: MAX3100 has a shutdown pin. This is a hook
+ * called on suspend and resume to activate it.
+ * @poll_time: poll time for CTS signal in ms, 0 disables (so no hw
+ * flow ctrl is possible but you have less CPU usage)
+ *
+ * You should use this structure in your machine description to specify
+ * how the MAX3100 is connected. Example:
+ *
+ * static struct plat_max3100 max3100_plat_data = {
+ * .loopback = 0,
+ * .crystal = 0,
+ * .poll_time = 100,
+ * };
+ *
+ * static struct spi_board_info spi_board_info[] = {
+ * {
+ * .modalias = "max3100",
+ * .platform_data = &max3100_plat_data,
+ * .irq = IRQ_EINT12,
+ * .max_speed_hz = 5*1000*1000,
+ * .chip_select = 0,
+ * },
+ * };
+ *
+ **/
+struct plat_max3100 {
+ int loopback;
+ int crystal;
+ void (*max3100_hw_suspend) (int suspend);
+ int poll_time;
+};
+
+#endif
diff --git a/include/linux/serial_pnx8xxx.h b/include/linux/serial_pnx8xxx.h
new file mode 100644
index 000000000..79ad87b0b
--- /dev/null
+++ b/include/linux/serial_pnx8xxx.h
@@ -0,0 +1,80 @@
+/*
+ * Embedded Alley Solutions, source@embeddedalley.com.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_SERIAL_PNX8XXX_H
+#define _LINUX_SERIAL_PNX8XXX_H
+
+#include <linux/serial_core.h>
+
+#define PNX8XXX_NR_PORTS 2
+
+struct pnx8xxx_port {
+ struct uart_port port;
+ struct timer_list timer;
+ unsigned int old_status;
+};
+
+/* register offsets */
+#define PNX8XXX_LCR 0
+#define PNX8XXX_MCR 0x004
+#define PNX8XXX_BAUD 0x008
+#define PNX8XXX_CFG 0x00c
+#define PNX8XXX_FIFO 0x028
+#define PNX8XXX_ISTAT 0xfe0
+#define PNX8XXX_IEN 0xfe4
+#define PNX8XXX_ICLR 0xfe8
+#define PNX8XXX_ISET 0xfec
+#define PNX8XXX_PD 0xff4
+#define PNX8XXX_MID 0xffc
+
+#define PNX8XXX_UART_LCR_TXBREAK (1<<30)
+#define PNX8XXX_UART_LCR_PAREVN 0x10000000
+#define PNX8XXX_UART_LCR_PAREN 0x08000000
+#define PNX8XXX_UART_LCR_2STOPB 0x04000000
+#define PNX8XXX_UART_LCR_8BIT 0x01000000
+#define PNX8XXX_UART_LCR_TX_RST 0x00040000
+#define PNX8XXX_UART_LCR_RX_RST 0x00020000
+#define PNX8XXX_UART_LCR_RX_NEXT 0x00010000
+
+#define PNX8XXX_UART_MCR_SCR 0xFF000000
+#define PNX8XXX_UART_MCR_DCD 0x00800000
+#define PNX8XXX_UART_MCR_CTS 0x00100000
+#define PNX8XXX_UART_MCR_LOOP 0x00000010
+#define PNX8XXX_UART_MCR_RTS 0x00000002
+#define PNX8XXX_UART_MCR_DTR 0x00000001
+
+#define PNX8XXX_UART_INT_TX 0x00000080
+#define PNX8XXX_UART_INT_EMPTY 0x00000040
+#define PNX8XXX_UART_INT_RCVTO 0x00000020
+#define PNX8XXX_UART_INT_RX 0x00000010
+#define PNX8XXX_UART_INT_RXOVRN 0x00000008
+#define PNX8XXX_UART_INT_FRERR 0x00000004
+#define PNX8XXX_UART_INT_BREAK 0x00000002
+#define PNX8XXX_UART_INT_PARITY 0x00000001
+#define PNX8XXX_UART_INT_ALLRX 0x0000003F
+#define PNX8XXX_UART_INT_ALLTX 0x000000C0
+
+#define PNX8XXX_UART_FIFO_TXFIFO 0x001F0000
+#define PNX8XXX_UART_FIFO_TXFIFO_STA (0x1f<<16)
+#define PNX8XXX_UART_FIFO_RXBRK 0x00008000
+#define PNX8XXX_UART_FIFO_RXFE 0x00004000
+#define PNX8XXX_UART_FIFO_RXPAR 0x00002000
+#define PNX8XXX_UART_FIFO_RXFIFO 0x00001F00
+#define PNX8XXX_UART_FIFO_RBRTHR 0x000000FF
+
+#endif
diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h
new file mode 100644
index 000000000..a7f004a3c
--- /dev/null
+++ b/include/linux/serial_s3c.h
@@ -0,0 +1,290 @@
+/*
+ * Internal header file for Samsung S3C2410 serial ports (UART0-2)
+ *
+ * Copyright (C) 2002 Shane Nay (shane@minirl.com)
+ *
+ * Additional defines, Copyright 2003 Simtec Electronics (linux@simtec.co.uk)
+ *
+ * Adapted from:
+ *
+ * Internal header file for MX1ADS serial ports (UART1 & 2)
+ *
+ * Copyright (C) 2002 Shane Nay (shane@minirl.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#ifndef __ASM_ARM_REGS_SERIAL_H
+#define __ASM_ARM_REGS_SERIAL_H
+
+#define S3C2410_URXH (0x24)
+#define S3C2410_UTXH (0x20)
+#define S3C2410_ULCON (0x00)
+#define S3C2410_UCON (0x04)
+#define S3C2410_UFCON (0x08)
+#define S3C2410_UMCON (0x0C)
+#define S3C2410_UBRDIV (0x28)
+#define S3C2410_UTRSTAT (0x10)
+#define S3C2410_UERSTAT (0x14)
+#define S3C2410_UFSTAT (0x18)
+#define S3C2410_UMSTAT (0x1C)
+
+#define S3C2410_LCON_CFGMASK ((0xF<<3)|(0x3))
+
+#define S3C2410_LCON_CS5 (0x0)
+#define S3C2410_LCON_CS6 (0x1)
+#define S3C2410_LCON_CS7 (0x2)
+#define S3C2410_LCON_CS8 (0x3)
+#define S3C2410_LCON_CSMASK (0x3)
+
+#define S3C2410_LCON_PNONE (0x0)
+#define S3C2410_LCON_PEVEN (0x5 << 3)
+#define S3C2410_LCON_PODD (0x4 << 3)
+#define S3C2410_LCON_PMASK (0x7 << 3)
+
+#define S3C2410_LCON_STOPB (1<<2)
+#define S3C2410_LCON_IRM (1<<6)
+
+#define S3C2440_UCON_CLKMASK (3<<10)
+#define S3C2440_UCON_CLKSHIFT (10)
+#define S3C2440_UCON_PCLK (0<<10)
+#define S3C2440_UCON_UCLK (1<<10)
+#define S3C2440_UCON_PCLK2 (2<<10)
+#define S3C2440_UCON_FCLK (3<<10)
+#define S3C2443_UCON_EPLL (3<<10)
+
+#define S3C6400_UCON_CLKMASK (3<<10)
+#define S3C6400_UCON_CLKSHIFT (10)
+#define S3C6400_UCON_PCLK (0<<10)
+#define S3C6400_UCON_PCLK2 (2<<10)
+#define S3C6400_UCON_UCLK0 (1<<10)
+#define S3C6400_UCON_UCLK1 (3<<10)
+
+#define S3C2440_UCON2_FCLK_EN (1<<15)
+#define S3C2440_UCON0_DIVMASK (15 << 12)
+#define S3C2440_UCON1_DIVMASK (15 << 12)
+#define S3C2440_UCON2_DIVMASK (7 << 12)
+#define S3C2440_UCON_DIVSHIFT (12)
+
+#define S3C2412_UCON_CLKMASK (3<<10)
+#define S3C2412_UCON_CLKSHIFT (10)
+#define S3C2412_UCON_UCLK (1<<10)
+#define S3C2412_UCON_USYSCLK (3<<10)
+#define S3C2412_UCON_PCLK (0<<10)
+#define S3C2412_UCON_PCLK2 (2<<10)
+
+#define S3C2410_UCON_CLKMASK (1 << 10)
+#define S3C2410_UCON_CLKSHIFT (10)
+#define S3C2410_UCON_UCLK (1<<10)
+#define S3C2410_UCON_SBREAK (1<<4)
+
+#define S3C2410_UCON_TXILEVEL (1<<9)
+#define S3C2410_UCON_RXILEVEL (1<<8)
+#define S3C2410_UCON_TXIRQMODE (1<<2)
+#define S3C2410_UCON_RXIRQMODE (1<<0)
+#define S3C2410_UCON_RXFIFO_TOI (1<<7)
+#define S3C2443_UCON_RXERR_IRQEN (1<<6)
+#define S3C2443_UCON_LOOPBACK (1<<5)
+
+#define S3C2410_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
+ S3C2410_UCON_RXILEVEL | \
+ S3C2410_UCON_TXIRQMODE | \
+ S3C2410_UCON_RXIRQMODE | \
+ S3C2410_UCON_RXFIFO_TOI)
+
+#define S3C64XX_UCON_TXBURST_1 (0<<20)
+#define S3C64XX_UCON_TXBURST_4 (1<<20)
+#define S3C64XX_UCON_TXBURST_8 (2<<20)
+#define S3C64XX_UCON_TXBURST_16 (3<<20)
+#define S3C64XX_UCON_TXBURST_MASK (0xf<<20)
+#define S3C64XX_UCON_RXBURST_1 (0<<16)
+#define S3C64XX_UCON_RXBURST_4 (1<<16)
+#define S3C64XX_UCON_RXBURST_8 (2<<16)
+#define S3C64XX_UCON_RXBURST_16 (3<<16)
+#define S3C64XX_UCON_RXBURST_MASK (0xf<<16)
+#define S3C64XX_UCON_TIMEOUT_SHIFT (12)
+#define S3C64XX_UCON_TIMEOUT_MASK (0xf<<12)
+#define S3C64XX_UCON_EMPTYINT_EN (1<<11)
+#define S3C64XX_UCON_DMASUS_EN (1<<10)
+#define S3C64XX_UCON_TXINT_LEVEL (1<<9)
+#define S3C64XX_UCON_RXINT_LEVEL (1<<8)
+#define S3C64XX_UCON_TIMEOUT_EN (1<<7)
+#define S3C64XX_UCON_ERRINT_EN (1<<6)
+#define S3C64XX_UCON_TXMODE_DMA (2<<2)
+#define S3C64XX_UCON_TXMODE_CPU (1<<2)
+#define S3C64XX_UCON_TXMODE_MASK (3<<2)
+#define S3C64XX_UCON_RXMODE_DMA (2<<0)
+#define S3C64XX_UCON_RXMODE_CPU (1<<0)
+#define S3C64XX_UCON_RXMODE_MASK (3<<0)
+
+#define S3C2410_UFCON_FIFOMODE (1<<0)
+#define S3C2410_UFCON_TXTRIG0 (0<<6)
+#define S3C2410_UFCON_RXTRIG8 (1<<4)
+#define S3C2410_UFCON_RXTRIG12 (2<<4)
+
+/* S3C2440 FIFO trigger levels */
+#define S3C2440_UFCON_RXTRIG1 (0<<4)
+#define S3C2440_UFCON_RXTRIG8 (1<<4)
+#define S3C2440_UFCON_RXTRIG16 (2<<4)
+#define S3C2440_UFCON_RXTRIG32 (3<<4)
+
+#define S3C2440_UFCON_TXTRIG0 (0<<6)
+#define S3C2440_UFCON_TXTRIG16 (1<<6)
+#define S3C2440_UFCON_TXTRIG32 (2<<6)
+#define S3C2440_UFCON_TXTRIG48 (3<<6)
+
+#define S3C2410_UFCON_RESETBOTH (3<<1)
+#define S3C2410_UFCON_RESETTX (1<<2)
+#define S3C2410_UFCON_RESETRX (1<<1)
+
+#define S3C2410_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
+ S3C2410_UFCON_TXTRIG0 | \
+ S3C2410_UFCON_RXTRIG8 )
+
+#define S3C2410_UMCOM_AFC (1<<4)
+#define S3C2410_UMCOM_RTS_LOW (1<<0)
+
+#define S3C2412_UMCON_AFC_63 (0<<5) /* same as s3c2443 */
+#define S3C2412_UMCON_AFC_56 (1<<5)
+#define S3C2412_UMCON_AFC_48 (2<<5)
+#define S3C2412_UMCON_AFC_40 (3<<5)
+#define S3C2412_UMCON_AFC_32 (4<<5)
+#define S3C2412_UMCON_AFC_24 (5<<5)
+#define S3C2412_UMCON_AFC_16 (6<<5)
+#define S3C2412_UMCON_AFC_8 (7<<5)
+
+#define S3C2410_UFSTAT_TXFULL (1<<9)
+#define S3C2410_UFSTAT_RXFULL (1<<8)
+#define S3C2410_UFSTAT_TXMASK (15<<4)
+#define S3C2410_UFSTAT_TXSHIFT (4)
+#define S3C2410_UFSTAT_RXMASK (15<<0)
+#define S3C2410_UFSTAT_RXSHIFT (0)
+
+/* UFSTAT S3C2443 same as S3C2440 */
+#define S3C2440_UFSTAT_TXFULL (1<<14)
+#define S3C2440_UFSTAT_RXFULL (1<<6)
+#define S3C2440_UFSTAT_TXSHIFT (8)
+#define S3C2440_UFSTAT_RXSHIFT (0)
+#define S3C2440_UFSTAT_TXMASK (63<<8)
+#define S3C2440_UFSTAT_RXMASK (63)
+
+#define S3C2410_UTRSTAT_TIMEOUT (1<<3)
+#define S3C2410_UTRSTAT_TXE (1<<2)
+#define S3C2410_UTRSTAT_TXFE (1<<1)
+#define S3C2410_UTRSTAT_RXDR (1<<0)
+
+#define S3C2410_UERSTAT_OVERRUN (1<<0)
+#define S3C2410_UERSTAT_FRAME (1<<2)
+#define S3C2410_UERSTAT_BREAK (1<<3)
+#define S3C2443_UERSTAT_PARITY (1<<1)
+
+#define S3C2410_UERSTAT_ANY (S3C2410_UERSTAT_OVERRUN | \
+ S3C2410_UERSTAT_FRAME | \
+ S3C2410_UERSTAT_BREAK)
+
+#define S3C2410_UMSTAT_CTS (1<<0)
+#define S3C2410_UMSTAT_DeltaCTS (1<<2)
+
+#define S3C2443_DIVSLOT (0x2C)
+
+/* S3C64XX interrupt registers. */
+#define S3C64XX_UINTP 0x30
+#define S3C64XX_UINTSP 0x34
+#define S3C64XX_UINTM 0x38
+
+#define S3C64XX_UINTM_RXD (0)
+#define S3C64XX_UINTM_ERROR (1)
+#define S3C64XX_UINTM_TXD (2)
+#define S3C64XX_UINTM_RXD_MSK (1 << S3C64XX_UINTM_RXD)
+#define S3C64XX_UINTM_ERR_MSK (1 << S3C64XX_UINTM_ERROR)
+#define S3C64XX_UINTM_TXD_MSK (1 << S3C64XX_UINTM_TXD)
+
+/* Following are specific to S5PV210 */
+#define S5PV210_UCON_CLKMASK (1<<10)
+#define S5PV210_UCON_CLKSHIFT (10)
+#define S5PV210_UCON_PCLK (0<<10)
+#define S5PV210_UCON_UCLK (1<<10)
+
+#define S5PV210_UFCON_TXTRIG0 (0<<8)
+#define S5PV210_UFCON_TXTRIG4 (1<<8)
+#define S5PV210_UFCON_TXTRIG8 (2<<8)
+#define S5PV210_UFCON_TXTRIG16 (3<<8)
+#define S5PV210_UFCON_TXTRIG32 (4<<8)
+#define S5PV210_UFCON_TXTRIG64 (5<<8)
+#define S5PV210_UFCON_TXTRIG128 (6<<8)
+#define S5PV210_UFCON_TXTRIG256 (7<<8)
+
+#define S5PV210_UFCON_RXTRIG1 (0<<4)
+#define S5PV210_UFCON_RXTRIG4 (1<<4)
+#define S5PV210_UFCON_RXTRIG8 (2<<4)
+#define S5PV210_UFCON_RXTRIG16 (3<<4)
+#define S5PV210_UFCON_RXTRIG32 (4<<4)
+#define S5PV210_UFCON_RXTRIG64 (5<<4)
+#define S5PV210_UFCON_RXTRIG128 (6<<4)
+#define S5PV210_UFCON_RXTRIG256 (7<<4)
+
+#define S5PV210_UFSTAT_TXFULL (1<<24)
+#define S5PV210_UFSTAT_RXFULL (1<<8)
+#define S5PV210_UFSTAT_TXMASK (255<<16)
+#define S5PV210_UFSTAT_TXSHIFT (16)
+#define S5PV210_UFSTAT_RXMASK (255<<0)
+#define S5PV210_UFSTAT_RXSHIFT (0)
+
+#define S3C2410_UCON_CLKSEL0 (1 << 0)
+#define S3C2410_UCON_CLKSEL1 (1 << 1)
+#define S3C2410_UCON_CLKSEL2 (1 << 2)
+#define S3C2410_UCON_CLKSEL3 (1 << 3)
+
+/* Default values for s5pv210 UCON and UFCON uart registers */
+#define S5PV210_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
+ S3C2410_UCON_RXILEVEL | \
+ S3C2410_UCON_TXIRQMODE | \
+ S3C2410_UCON_RXIRQMODE | \
+ S3C2410_UCON_RXFIFO_TOI | \
+ S3C2443_UCON_RXERR_IRQEN)
+
+#define S5PV210_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
+ S5PV210_UFCON_TXTRIG4 | \
+ S5PV210_UFCON_RXTRIG4)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/serial_core.h>
+
+/* configuration structure for per-machine configurations for the
+ * serial port
+ *
+ * the pointer is setup by the machine specific initialisation from the
+ * arch/arm/mach-s3c2410/ directory.
+*/
+
+struct s3c2410_uartcfg {
+ unsigned char hwport; /* hardware port number */
+ unsigned char unused;
+ unsigned short flags;
+ upf_t uart_flags; /* default uart flags */
+ unsigned int clk_sel;
+
+ unsigned int has_fracval;
+
+ unsigned long ucon; /* value of ucon for port */
+ unsigned long ulcon; /* value of ulcon for port */
+ unsigned long ufcon; /* value of ufcon for port */
+};
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_ARM_REGS_SERIAL_H */
+
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
new file mode 100644
index 000000000..6c5e3bb28
--- /dev/null
+++ b/include/linux/serial_sci.h
@@ -0,0 +1,142 @@
+#ifndef __LINUX_SERIAL_SCI_H
+#define __LINUX_SERIAL_SCI_H
+
+#include <linux/serial_core.h>
+#include <linux/sh_dma.h>
+
+/*
+ * Generic header for SuperH (H)SCI(F) (used by sh/sh64 and related parts)
+ */
+
+#define SCIx_NOT_SUPPORTED (-1)
+
+/* SCSMR (Serial Mode Register) */
+#define SCSMR_CHR (1 << 6) /* 7-bit Character Length */
+#define SCSMR_PE (1 << 5) /* Parity Enable */
+#define SCSMR_ODD (1 << 4) /* Odd Parity */
+#define SCSMR_STOP (1 << 3) /* Stop Bit Length */
+#define SCSMR_CKS 0x0003 /* Clock Select */
+
+/* Serial Control Register (@ = not supported by all parts) */
+#define SCSCR_TIE (1 << 7) /* Transmit Interrupt Enable */
+#define SCSCR_RIE (1 << 6) /* Receive Interrupt Enable */
+#define SCSCR_TE (1 << 5) /* Transmit Enable */
+#define SCSCR_RE (1 << 4) /* Receive Enable */
+#define SCSCR_REIE (1 << 3) /* Receive Error Interrupt Enable @ */
+#define SCSCR_TOIE (1 << 2) /* Timeout Interrupt Enable @ */
+#define SCSCR_CKE1 (1 << 1) /* Clock Enable 1 */
+#define SCSCR_CKE0 (1 << 0) /* Clock Enable 0 */
+/* SCIFA/SCIFB only */
+#define SCSCR_TDRQE (1 << 15) /* Tx Data Transfer Request Enable */
+#define SCSCR_RDRQE (1 << 14) /* Rx Data Transfer Request Enable */
+
+/* SCxSR (Serial Status Register) on SCI */
+#define SCI_TDRE 0x80 /* Transmit Data Register Empty */
+#define SCI_RDRF 0x40 /* Receive Data Register Full */
+#define SCI_ORER 0x20 /* Overrun Error */
+#define SCI_FER 0x10 /* Framing Error */
+#define SCI_PER 0x08 /* Parity Error */
+#define SCI_TEND 0x04 /* Transmit End */
+
+#define SCI_DEFAULT_ERROR_MASK (SCI_PER | SCI_FER)
+
+/* SCxSR (Serial Status Register) on SCIF, HSCIF */
+#define SCIF_ER 0x0080 /* Receive Error */
+#define SCIF_TEND 0x0040 /* Transmission End */
+#define SCIF_TDFE 0x0020 /* Transmit FIFO Data Empty */
+#define SCIF_BRK 0x0010 /* Break Detect */
+#define SCIF_FER 0x0008 /* Framing Error */
+#define SCIF_PER 0x0004 /* Parity Error */
+#define SCIF_RDF 0x0002 /* Receive FIFO Data Full */
+#define SCIF_DR 0x0001 /* Receive Data Ready */
+
+#define SCIF_DEFAULT_ERROR_MASK (SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK)
+
+/* SCFCR (FIFO Control Register) */
+#define SCFCR_LOOP (1 << 0) /* Loopback Test */
+
+/* SCSPTR (Serial Port Register), optional */
+#define SCSPTR_RTSIO (1 << 7) /* Serial Port RTS Pin Input/Output */
+#define SCSPTR_CTSIO (1 << 5) /* Serial Port CTS Pin Input/Output */
+#define SCSPTR_SPB2IO (1 << 1) /* Serial Port Break Input/Output */
+#define SCSPTR_SPB2DT (1 << 0) /* Serial Port Break Data */
+
+/* HSSRR HSCIF */
+#define HSCIF_SRE 0x8000 /* Sampling Rate Register Enable */
+
+enum {
+ SCIx_PROBE_REGTYPE,
+
+ SCIx_SCI_REGTYPE,
+ SCIx_IRDA_REGTYPE,
+ SCIx_SCIFA_REGTYPE,
+ SCIx_SCIFB_REGTYPE,
+ SCIx_SH2_SCIF_FIFODATA_REGTYPE,
+ SCIx_SH3_SCIF_REGTYPE,
+ SCIx_SH4_SCIF_REGTYPE,
+ SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE,
+ SCIx_SH4_SCIF_FIFODATA_REGTYPE,
+ SCIx_SH7705_SCIF_REGTYPE,
+ SCIx_HSCIF_REGTYPE,
+
+ SCIx_NR_REGTYPES,
+};
+
+/*
+ * SCI register subset common for all port types.
+ * Not all registers will exist on all parts.
+ */
+enum {
+ SCSMR, /* Serial Mode Register */
+ SCBRR, /* Bit Rate Register */
+ SCSCR, /* Serial Control Register */
+ SCxSR, /* Serial Status Register */
+ SCFCR, /* FIFO Control Register */
+ SCFDR, /* FIFO Data Count Register */
+ SCxTDR, /* Transmit (FIFO) Data Register */
+ SCxRDR, /* Receive (FIFO) Data Register */
+ SCLSR, /* Line Status Register */
+ SCTFDR, /* Transmit FIFO Data Count Register */
+ SCRFDR, /* Receive FIFO Data Count Register */
+ SCSPTR, /* Serial Port Register */
+ HSSRR, /* Sampling Rate Register */
+
+ SCIx_NR_REGS,
+};
+
+struct device;
+
+struct plat_sci_port_ops {
+ void (*init_pins)(struct uart_port *, unsigned int cflag);
+};
+
+/*
+ * Port-specific capabilities
+ */
+#define SCIx_HAVE_RTSCTS (1 << 0)
+
+/*
+ * Platform device specific platform_data struct
+ */
+struct plat_sci_port {
+ unsigned int type; /* SCI / SCIF / IRDA / HSCIF */
+ upf_t flags; /* UPF_* flags */
+ unsigned long capabilities; /* Port features/capabilities */
+
+ unsigned int sampling_rate;
+ unsigned int scscr; /* SCSCR initialization */
+
+ /*
+ * Platform overrides if necessary, defaults otherwise.
+ */
+ int port_reg;
+ unsigned char regshift;
+ unsigned char regtype;
+
+ struct plat_sci_port_ops *ops;
+
+ unsigned int dma_slave_tx;
+ unsigned int dma_slave_rx;
+};
+
+#endif /* __LINUX_SERIAL_SCI_H */
diff --git a/include/linux/serio.h b/include/linux/serio.h
new file mode 100644
index 000000000..9f779c7a2
--- /dev/null
+++ b/include/linux/serio.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 1999-2002 Vojtech Pavlik
+*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef _SERIO_H
+#define _SERIO_H
+
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <uapi/linux/serio.h>
+
+struct serio {
+ void *port_data;
+
+ char name[32];
+ char phys[32];
+ char firmware_id[128];
+
+ bool manual_bind;
+
+ struct serio_device_id id;
+
+ spinlock_t lock; /* protects critical sections from port's interrupt handler */
+
+ int (*write)(struct serio *, unsigned char);
+ int (*open)(struct serio *);
+ void (*close)(struct serio *);
+ int (*start)(struct serio *);
+ void (*stop)(struct serio *);
+
+ struct serio *parent;
+ struct list_head child_node; /* Entry in parent->children list */
+ struct list_head children;
+ unsigned int depth; /* level of nesting in serio hierarchy */
+
+ struct serio_driver *drv; /* accessed from interrupt, must be protected by serio->lock and serio->sem */
+ struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */
+
+ struct device dev;
+
+ struct list_head node;
+};
+#define to_serio_port(d) container_of(d, struct serio, dev)
+
+struct serio_driver {
+ const char *description;
+
+ const struct serio_device_id *id_table;
+ bool manual_bind;
+
+ void (*write_wakeup)(struct serio *);
+ irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int);
+ int (*connect)(struct serio *, struct serio_driver *drv);
+ int (*reconnect)(struct serio *);
+ void (*disconnect)(struct serio *);
+ void (*cleanup)(struct serio *);
+
+ struct device_driver driver;
+};
+#define to_serio_driver(d) container_of(d, struct serio_driver, driver)
+
+int serio_open(struct serio *serio, struct serio_driver *drv);
+void serio_close(struct serio *serio);
+void serio_rescan(struct serio *serio);
+void serio_reconnect(struct serio *serio);
+irqreturn_t serio_interrupt(struct serio *serio, unsigned char data, unsigned int flags);
+
+void __serio_register_port(struct serio *serio, struct module *owner);
+
+/* use a define to avoid include chaining to get THIS_MODULE */
+#define serio_register_port(serio) \
+ __serio_register_port(serio, THIS_MODULE)
+
+void serio_unregister_port(struct serio *serio);
+void serio_unregister_child_port(struct serio *serio);
+
+int __must_check __serio_register_driver(struct serio_driver *drv,
+ struct module *owner, const char *mod_name);
+
+/* use a define to avoid include chaining to get THIS_MODULE & friends */
+#define serio_register_driver(drv) \
+ __serio_register_driver(drv, THIS_MODULE, KBUILD_MODNAME)
+
+void serio_unregister_driver(struct serio_driver *drv);
+
+/**
+ * module_serio_driver() - Helper macro for registering a serio driver
+ * @__serio_driver: serio_driver struct
+ *
+ * Helper macro for serio drivers which do not do anything special in
+ * module init/exit. This eliminates a lot of boilerplate. Each module
+ * may only use this macro once, and calling it replaces module_init()
+ * and module_exit().
+ */
+#define module_serio_driver(__serio_driver) \
+ module_driver(__serio_driver, serio_register_driver, \
+ serio_unregister_driver)
+
+static inline int serio_write(struct serio *serio, unsigned char data)
+{
+ if (serio->write)
+ return serio->write(serio, data);
+ else
+ return -1;
+}
+
+static inline void serio_drv_write_wakeup(struct serio *serio)
+{
+ if (serio->drv && serio->drv->write_wakeup)
+ serio->drv->write_wakeup(serio);
+}
+
+/*
+ * Use the following functions to manipulate serio's per-port
+ * driver-specific data.
+ */
+static inline void *serio_get_drvdata(struct serio *serio)
+{
+ return dev_get_drvdata(&serio->dev);
+}
+
+static inline void serio_set_drvdata(struct serio *serio, void *data)
+{
+ dev_set_drvdata(&serio->dev, data);
+}
+
+/*
+ * Use the following functions to protect critical sections in
+ * driver code from port's interrupt handler
+ */
+static inline void serio_pause_rx(struct serio *serio)
+{
+ spin_lock_irq(&serio->lock);
+}
+
+static inline void serio_continue_rx(struct serio *serio)
+{
+ spin_unlock_irq(&serio->lock);
+}
+
+#endif
diff --git a/include/linux/sfi.h b/include/linux/sfi.h
new file mode 100644
index 000000000..d9b436f09
--- /dev/null
+++ b/include/linux/sfi.h
@@ -0,0 +1,209 @@
+/* sfi.h Simple Firmware Interface */
+
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2009 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2009 Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _LINUX_SFI_H
+#define _LINUX_SFI_H
+
+#include <linux/init.h>
+#include <linux/types.h>
+
+/* Table signatures reserved by the SFI specification */
+#define SFI_SIG_SYST "SYST"
+#define SFI_SIG_FREQ "FREQ"
+#define SFI_SIG_IDLE "IDLE"
+#define SFI_SIG_CPUS "CPUS"
+#define SFI_SIG_MTMR "MTMR"
+#define SFI_SIG_MRTC "MRTC"
+#define SFI_SIG_MMAP "MMAP"
+#define SFI_SIG_APIC "APIC"
+#define SFI_SIG_XSDT "XSDT"
+#define SFI_SIG_WAKE "WAKE"
+#define SFI_SIG_DEVS "DEVS"
+#define SFI_SIG_GPIO "GPIO"
+
+#define SFI_SIGNATURE_SIZE 4
+#define SFI_OEM_ID_SIZE 6
+#define SFI_OEM_TABLE_ID_SIZE 8
+
+#define SFI_NAME_LEN 16
+
+#define SFI_SYST_SEARCH_BEGIN 0x000E0000
+#define SFI_SYST_SEARCH_END 0x000FFFFF
+
+#define SFI_GET_NUM_ENTRIES(ptable, entry_type) \
+ ((ptable->header.len - sizeof(struct sfi_table_header)) / \
+ (sizeof(entry_type)))
+/*
+ * Table structures must be byte-packed to match the SFI specification,
+ * as they are provided by the BIOS.
+ */
+struct sfi_table_header {
+ char sig[SFI_SIGNATURE_SIZE];
+ u32 len;
+ u8 rev;
+ u8 csum;
+ char oem_id[SFI_OEM_ID_SIZE];
+ char oem_table_id[SFI_OEM_TABLE_ID_SIZE];
+} __packed;
+
+struct sfi_table_simple {
+ struct sfi_table_header header;
+ u64 pentry[1];
+} __packed;
+
+/* Comply with UEFI spec 2.1 */
+struct sfi_mem_entry {
+ u32 type;
+ u64 phys_start;
+ u64 virt_start;
+ u64 pages;
+ u64 attrib;
+} __packed;
+
+struct sfi_cpu_table_entry {
+ u32 apic_id;
+} __packed;
+
+struct sfi_cstate_table_entry {
+ u32 hint; /* MWAIT hint */
+ u32 latency; /* latency in ms */
+} __packed;
+
+struct sfi_apic_table_entry {
+ u64 phys_addr; /* phy base addr for APIC reg */
+} __packed;
+
+struct sfi_freq_table_entry {
+ u32 freq_mhz; /* in MHZ */
+ u32 latency; /* transition latency in ms */
+ u32 ctrl_val; /* value to write to PERF_CTL */
+} __packed;
+
+struct sfi_wake_table_entry {
+ u64 phys_addr; /* pointer to where the wake vector locates */
+} __packed;
+
+struct sfi_timer_table_entry {
+ u64 phys_addr; /* phy base addr for the timer */
+ u32 freq_hz; /* in HZ */
+ u32 irq;
+} __packed;
+
+struct sfi_rtc_table_entry {
+ u64 phys_addr; /* phy base addr for the RTC */
+ u32 irq;
+} __packed;
+
+struct sfi_device_table_entry {
+ u8 type; /* bus type, I2C, SPI or ...*/
+#define SFI_DEV_TYPE_SPI 0
+#define SFI_DEV_TYPE_I2C 1
+#define SFI_DEV_TYPE_UART 2
+#define SFI_DEV_TYPE_HSI 3
+#define SFI_DEV_TYPE_IPC 4
+
+ u8 host_num; /* attached to host 0, 1...*/
+ u16 addr;
+ u8 irq;
+ u32 max_freq;
+ char name[SFI_NAME_LEN];
+} __packed;
+
+struct sfi_gpio_table_entry {
+ char controller_name[SFI_NAME_LEN];
+ u16 pin_no;
+ char pin_name[SFI_NAME_LEN];
+} __packed;
+
+typedef int (*sfi_table_handler) (struct sfi_table_header *table);
+
+#ifdef CONFIG_SFI
+extern void __init sfi_init(void);
+extern int __init sfi_platform_init(void);
+extern void __init sfi_init_late(void);
+extern int sfi_table_parse(char *signature, char *oem_id, char *oem_table_id,
+ sfi_table_handler handler);
+
+extern int sfi_disabled;
+static inline void disable_sfi(void)
+{
+ sfi_disabled = 1;
+}
+
+#else /* !CONFIG_SFI */
+
+static inline void sfi_init(void)
+{
+}
+
+static inline void sfi_init_late(void)
+{
+}
+
+#define sfi_disabled 0
+
+static inline int sfi_table_parse(char *signature, char *oem_id,
+ char *oem_table_id,
+ sfi_table_handler handler)
+{
+ return -1;
+}
+
+#endif /* !CONFIG_SFI */
+
+#endif /*_LINUX_SFI_H*/
diff --git a/include/linux/sfi_acpi.h b/include/linux/sfi_acpi.h
new file mode 100644
index 000000000..a6e555cbe
--- /dev/null
+++ b/include/linux/sfi_acpi.h
@@ -0,0 +1,93 @@
+/* sfi.h Simple Firmware Interface */
+
+/*
+
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2009 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ BSD LICENSE
+
+ Copyright(c) 2009 Intel Corporation. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*/
+
+#ifndef _LINUX_SFI_ACPI_H
+#define _LINUX_SFI_ACPI_H
+
+#include <linux/acpi.h>
+#include <linux/sfi.h>
+
+#ifdef CONFIG_SFI
+extern int sfi_acpi_table_parse(char *signature, char *oem_id,
+ char *oem_table_id,
+ int (*handler)(struct acpi_table_header *));
+
+static inline int __init acpi_sfi_table_parse(char *signature,
+ int (*handler)(struct acpi_table_header *))
+{
+ if (!acpi_table_parse(signature, handler))
+ return 0;
+
+ return sfi_acpi_table_parse(signature, NULL, NULL, handler);
+}
+#else /* !CONFIG_SFI */
+static inline int sfi_acpi_table_parse(char *signature, char *oem_id,
+ char *oem_table_id,
+ int (*handler)(struct acpi_table_header *))
+{
+ return -1;
+}
+
+static inline int __init acpi_sfi_table_parse(char *signature,
+ int (*handler)(struct acpi_table_header *))
+{
+ return acpi_table_parse(signature, handler);
+}
+#endif /* !CONFIG_SFI */
+
+#endif /*_LINUX_SFI_ACPI_H*/
diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h
new file mode 100644
index 000000000..1f208b2a1
--- /dev/null
+++ b/include/linux/sh_clk.h
@@ -0,0 +1,216 @@
+#ifndef __SH_CLOCK_H
+#define __SH_CLOCK_H
+
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include <linux/cpufreq.h>
+#include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+struct clk;
+
+struct clk_mapping {
+ phys_addr_t phys;
+ void __iomem *base;
+ unsigned long len;
+ struct kref ref;
+};
+
+struct sh_clk_ops {
+#ifdef CONFIG_SH_CLK_CPG_LEGACY
+ void (*init)(struct clk *clk);
+#endif
+ int (*enable)(struct clk *clk);
+ void (*disable)(struct clk *clk);
+ unsigned long (*recalc)(struct clk *clk);
+ int (*set_rate)(struct clk *clk, unsigned long rate);
+ int (*set_parent)(struct clk *clk, struct clk *parent);
+ long (*round_rate)(struct clk *clk, unsigned long rate);
+};
+
+#define SH_CLK_DIV_MSK(div) ((1 << (div)) - 1)
+#define SH_CLK_DIV4_MSK SH_CLK_DIV_MSK(4)
+#define SH_CLK_DIV6_MSK SH_CLK_DIV_MSK(6)
+
+struct clk {
+ struct list_head node;
+ struct clk *parent;
+ struct clk **parent_table; /* list of parents to */
+ unsigned short parent_num; /* choose between */
+ unsigned char src_shift; /* source clock field in the */
+ unsigned char src_width; /* configuration register */
+ struct sh_clk_ops *ops;
+
+ struct list_head children;
+ struct list_head sibling; /* node for children */
+
+ int usecount;
+
+ unsigned long rate;
+ unsigned long flags;
+
+ void __iomem *enable_reg;
+ void __iomem *status_reg;
+ unsigned int enable_bit;
+ void __iomem *mapped_reg;
+
+ unsigned int div_mask;
+ unsigned long arch_flags;
+ void *priv;
+ struct clk_mapping *mapping;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int nr_freqs;
+};
+
+#define CLK_ENABLE_ON_INIT BIT(0)
+
+#define CLK_ENABLE_REG_32BIT BIT(1) /* default access size */
+#define CLK_ENABLE_REG_16BIT BIT(2)
+#define CLK_ENABLE_REG_8BIT BIT(3)
+
+#define CLK_MASK_DIV_ON_DISABLE BIT(4)
+
+#define CLK_ENABLE_REG_MASK (CLK_ENABLE_REG_32BIT | \
+ CLK_ENABLE_REG_16BIT | \
+ CLK_ENABLE_REG_8BIT)
+
+/* drivers/sh/clk.c */
+unsigned long followparent_recalc(struct clk *);
+void recalculate_root_clocks(void);
+void propagate_rate(struct clk *);
+int clk_reparent(struct clk *child, struct clk *parent);
+int clk_register(struct clk *);
+void clk_unregister(struct clk *);
+void clk_enable_init_clocks(void);
+
+struct clk_div_mult_table {
+ unsigned int *divisors;
+ unsigned int nr_divisors;
+ unsigned int *multipliers;
+ unsigned int nr_multipliers;
+};
+
+struct cpufreq_frequency_table;
+void clk_rate_table_build(struct clk *clk,
+ struct cpufreq_frequency_table *freq_table,
+ int nr_freqs,
+ struct clk_div_mult_table *src_table,
+ unsigned long *bitmap);
+
+long clk_rate_table_round(struct clk *clk,
+ struct cpufreq_frequency_table *freq_table,
+ unsigned long rate);
+
+int clk_rate_table_find(struct clk *clk,
+ struct cpufreq_frequency_table *freq_table,
+ unsigned long rate);
+
+long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
+ unsigned int div_max, unsigned long rate);
+
+long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min,
+ unsigned int mult_max, unsigned long rate);
+
+long clk_round_parent(struct clk *clk, unsigned long target,
+ unsigned long *best_freq, unsigned long *parent_freq,
+ unsigned int div_min, unsigned int div_max);
+
+#define SH_CLK_MSTP(_parent, _enable_reg, _enable_bit, _status_reg, _flags) \
+{ \
+ .parent = _parent, \
+ .enable_reg = (void __iomem *)_enable_reg, \
+ .enable_bit = _enable_bit, \
+ .status_reg = _status_reg, \
+ .flags = _flags, \
+}
+
+#define SH_CLK_MSTP32(_p, _r, _b, _f) \
+ SH_CLK_MSTP(_p, _r, _b, 0, _f | CLK_ENABLE_REG_32BIT)
+
+#define SH_CLK_MSTP32_STS(_p, _r, _b, _s, _f) \
+ SH_CLK_MSTP(_p, _r, _b, _s, _f | CLK_ENABLE_REG_32BIT)
+
+#define SH_CLK_MSTP16(_p, _r, _b, _f) \
+ SH_CLK_MSTP(_p, _r, _b, 0, _f | CLK_ENABLE_REG_16BIT)
+
+#define SH_CLK_MSTP8(_p, _r, _b, _f) \
+ SH_CLK_MSTP(_p, _r, _b, 0, _f | CLK_ENABLE_REG_8BIT)
+
+int sh_clk_mstp_register(struct clk *clks, int nr);
+
+/*
+ * MSTP registration never really cared about access size, despite the
+ * original enable/disable pairs assuming a 32-bit access. Clocks are
+ * responsible for defining their access sizes either directly or via the
+ * clock definition wrappers.
+ */
+static inline int __deprecated sh_clk_mstp32_register(struct clk *clks, int nr)
+{
+ return sh_clk_mstp_register(clks, nr);
+}
+
+#define SH_CLK_DIV4(_parent, _reg, _shift, _div_bitmap, _flags) \
+{ \
+ .parent = _parent, \
+ .enable_reg = (void __iomem *)_reg, \
+ .enable_bit = _shift, \
+ .arch_flags = _div_bitmap, \
+ .div_mask = SH_CLK_DIV4_MSK, \
+ .flags = _flags, \
+}
+
+struct clk_div_table {
+ struct clk_div_mult_table *div_mult_table;
+ void (*kick)(struct clk *clk);
+};
+
+#define clk_div4_table clk_div_table
+
+int sh_clk_div4_register(struct clk *clks, int nr,
+ struct clk_div4_table *table);
+int sh_clk_div4_enable_register(struct clk *clks, int nr,
+ struct clk_div4_table *table);
+int sh_clk_div4_reparent_register(struct clk *clks, int nr,
+ struct clk_div4_table *table);
+
+#define SH_CLK_DIV6_EXT(_reg, _flags, _parents, \
+ _num_parents, _src_shift, _src_width) \
+{ \
+ .enable_reg = (void __iomem *)_reg, \
+ .enable_bit = 0, /* unused */ \
+ .flags = _flags | CLK_MASK_DIV_ON_DISABLE, \
+ .div_mask = SH_CLK_DIV6_MSK, \
+ .parent_table = _parents, \
+ .parent_num = _num_parents, \
+ .src_shift = _src_shift, \
+ .src_width = _src_width, \
+}
+
+#define SH_CLK_DIV6(_parent, _reg, _flags) \
+{ \
+ .parent = _parent, \
+ .enable_reg = (void __iomem *)_reg, \
+ .enable_bit = 0, /* unused */ \
+ .div_mask = SH_CLK_DIV6_MSK, \
+ .flags = _flags | CLK_MASK_DIV_ON_DISABLE, \
+}
+
+int sh_clk_div6_register(struct clk *clks, int nr);
+int sh_clk_div6_reparent_register(struct clk *clks, int nr);
+
+#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
+#define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk }
+#define CLKDEV_ICK_ID(_cid, _did, _clk) { .con_id = _cid, .dev_id = _did, .clk = _clk }
+
+/* .enable_reg will be updated to .mapping on sh_clk_fsidiv_register() */
+#define SH_CLK_FSIDIV(_reg, _parent) \
+{ \
+ .enable_reg = (void __iomem *)_reg, \
+ .parent = _parent, \
+}
+
+int sh_clk_fsidiv_register(struct clk *clks, int nr);
+
+#endif /* __SH_CLOCK_H */
diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h
new file mode 100644
index 000000000..56b97eed2
--- /dev/null
+++ b/include/linux/sh_dma.h
@@ -0,0 +1,115 @@
+/*
+ * Header for the new SH dmaengine driver
+ *
+ * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef SH_DMA_H
+#define SH_DMA_H
+
+#include <linux/dmaengine.h>
+#include <linux/list.h>
+#include <linux/shdma-base.h>
+#include <linux/types.h>
+
+struct device;
+
+/* Used by slave DMA clients to request DMA to/from a specific peripheral */
+struct sh_dmae_slave {
+ struct shdma_slave shdma_slave; /* Set by the platform */
+};
+
+/*
+ * Supplied by platforms to specify, how a DMA channel has to be configured for
+ * a certain peripheral
+ */
+struct sh_dmae_slave_config {
+ int slave_id;
+ dma_addr_t addr;
+ u32 chcr;
+ char mid_rid;
+};
+
+/**
+ * struct sh_dmae_channel - DMAC channel platform data
+ * @offset: register offset within the main IOMEM resource
+ * @dmars: channel DMARS register offset
+ * @chclr_offset: channel CHCLR register offset
+ * @dmars_bit: channel DMARS field offset within the register
+ * @chclr_bit: bit position, to be set to reset the channel
+ */
+struct sh_dmae_channel {
+ unsigned int offset;
+ unsigned int dmars;
+ unsigned int chclr_offset;
+ unsigned char dmars_bit;
+ unsigned char chclr_bit;
+};
+
+/**
+ * struct sh_dmae_pdata - DMAC platform data
+ * @slave: array of slaves
+ * @slave_num: number of slaves in the above array
+ * @channel: array of DMA channels
+ * @channel_num: number of channels in the above array
+ * @ts_low_shift: shift of the low part of the TS field
+ * @ts_low_mask: low TS field mask
+ * @ts_high_shift: additional shift of the high part of the TS field
+ * @ts_high_mask: high TS field mask
+ * @ts_shift: array of Transfer Size shifts, indexed by TS value
+ * @ts_shift_num: number of shifts in the above array
+ * @dmaor_init: DMAOR initialisation value
+ * @chcr_offset: CHCR address offset
+ * @chcr_ie_bit: CHCR Interrupt Enable bit
+ * @dmaor_is_32bit: DMAOR is a 32-bit register
+ * @needs_tend_set: the TEND register has to be set
+ * @no_dmars: DMAC has no DMARS registers
+ * @chclr_present: DMAC has one or several CHCLR registers
+ * @chclr_bitwise: channel CHCLR registers are bitwise
+ * @slave_only: DMAC cannot be used for MEMCPY
+ */
+struct sh_dmae_pdata {
+ const struct sh_dmae_slave_config *slave;
+ int slave_num;
+ const struct sh_dmae_channel *channel;
+ int channel_num;
+ unsigned int ts_low_shift;
+ unsigned int ts_low_mask;
+ unsigned int ts_high_shift;
+ unsigned int ts_high_mask;
+ const unsigned int *ts_shift;
+ int ts_shift_num;
+ u16 dmaor_init;
+ unsigned int chcr_offset;
+ u32 chcr_ie_bit;
+
+ unsigned int dmaor_is_32bit:1;
+ unsigned int needs_tend_set:1;
+ unsigned int no_dmars:1;
+ unsigned int chclr_present:1;
+ unsigned int chclr_bitwise:1;
+ unsigned int slave_only:1;
+};
+
+/* DMAOR definitions */
+#define DMAOR_AE 0x00000004 /* Address Error Flag */
+#define DMAOR_NMIF 0x00000002
+#define DMAOR_DME 0x00000001 /* DMA Master Enable */
+
+/* Definitions for the SuperH DMAC */
+#define DM_INC 0x00004000 /* Destination addresses are incremented */
+#define DM_DEC 0x00008000 /* Destination addresses are decremented */
+#define DM_FIX 0x0000c000 /* Destination address is fixed */
+#define SM_INC 0x00001000 /* Source addresses are incremented */
+#define SM_DEC 0x00002000 /* Source addresses are decremented */
+#define SM_FIX 0x00003000 /* Source address is fixed */
+#define RS_AUTO 0x00000400 /* Auto Request */
+#define RS_ERS 0x00000800 /* DMA extended resource selector */
+#define CHCR_DE 0x00000001 /* DMA Enable */
+#define CHCR_TE 0x00000002 /* Transfer End Flag */
+#define CHCR_IE 0x00000004 /* Interrupt Enable */
+
+#endif
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
new file mode 100644
index 000000000..8c9131db2
--- /dev/null
+++ b/include/linux/sh_eth.h
@@ -0,0 +1,22 @@
+#ifndef __ASM_SH_ETH_H__
+#define __ASM_SH_ETH_H__
+
+#include <linux/phy.h>
+#include <linux/if_ether.h>
+
+enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN};
+
+struct sh_eth_plat_data {
+ int phy;
+ int phy_irq;
+ int edmac_endian;
+ phy_interface_t phy_interface;
+ void (*set_mdio_gate)(void *addr);
+
+ unsigned char mac_addr[ETH_ALEN];
+ unsigned no_ether_link:1;
+ unsigned ether_link_active_low:1;
+ unsigned needs_init:1;
+};
+
+#endif
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
new file mode 100644
index 000000000..32383285d
--- /dev/null
+++ b/include/linux/sh_intc.h
@@ -0,0 +1,149 @@
+#ifndef __SH_INTC_H
+#define __SH_INTC_H
+
+#include <linux/ioport.h>
+
+#ifdef CONFIG_SUPERH
+#define INTC_NR_IRQS 512
+#else
+#define INTC_NR_IRQS 1024
+#endif
+
+/*
+ * Convert back and forth between INTEVT and IRQ values.
+ */
+#ifdef CONFIG_CPU_HAS_INTEVT
+#define evt2irq(evt) (((evt) >> 5) - 16)
+#define irq2evt(irq) (((irq) + 16) << 5)
+#else
+#define evt2irq(evt) (evt)
+#define irq2evt(irq) (irq)
+#endif
+
+typedef unsigned char intc_enum;
+
+struct intc_vect {
+ intc_enum enum_id;
+ unsigned short vect;
+};
+
+#define INTC_VECT(enum_id, vect) { enum_id, vect }
+#define INTC_IRQ(enum_id, irq) INTC_VECT(enum_id, irq2evt(irq))
+
+struct intc_group {
+ intc_enum enum_id;
+ intc_enum enum_ids[32];
+};
+
+#define INTC_GROUP(enum_id, ids...) { enum_id, { ids } }
+
+struct intc_subgroup {
+ unsigned long reg, reg_width;
+ intc_enum parent_id;
+ intc_enum enum_ids[32];
+};
+
+struct intc_mask_reg {
+ unsigned long set_reg, clr_reg, reg_width;
+ intc_enum enum_ids[32];
+#ifdef CONFIG_INTC_BALANCING
+ unsigned long dist_reg;
+#endif
+#ifdef CONFIG_SMP
+ unsigned long smp;
+#endif
+};
+
+struct intc_prio_reg {
+ unsigned long set_reg, clr_reg, reg_width, field_width;
+ intc_enum enum_ids[16];
+#ifdef CONFIG_SMP
+ unsigned long smp;
+#endif
+};
+
+struct intc_sense_reg {
+ unsigned long reg, reg_width, field_width;
+ intc_enum enum_ids[16];
+};
+
+#ifdef CONFIG_INTC_BALANCING
+#define INTC_SMP_BALANCING(reg) .dist_reg = (reg)
+#else
+#define INTC_SMP_BALANCING(reg)
+#endif
+
+#ifdef CONFIG_SMP
+#define INTC_SMP(stride, nr) .smp = (stride) | ((nr) << 8)
+#else
+#define INTC_SMP(stride, nr)
+#endif
+
+struct intc_hw_desc {
+ struct intc_vect *vectors;
+ unsigned int nr_vectors;
+ struct intc_group *groups;
+ unsigned int nr_groups;
+ struct intc_mask_reg *mask_regs;
+ unsigned int nr_mask_regs;
+ struct intc_prio_reg *prio_regs;
+ unsigned int nr_prio_regs;
+ struct intc_sense_reg *sense_regs;
+ unsigned int nr_sense_regs;
+ struct intc_mask_reg *ack_regs;
+ unsigned int nr_ack_regs;
+ struct intc_subgroup *subgroups;
+ unsigned int nr_subgroups;
+};
+
+#define _INTC_ARRAY(a) a, __same_type(a, NULL) ? 0 : sizeof(a)/sizeof(*a)
+
+#define INTC_HW_DESC(vectors, groups, mask_regs, \
+ prio_regs, sense_regs, ack_regs) \
+{ \
+ _INTC_ARRAY(vectors), _INTC_ARRAY(groups), \
+ _INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs), \
+ _INTC_ARRAY(sense_regs), _INTC_ARRAY(ack_regs), \
+}
+
+struct intc_desc {
+ char *name;
+ struct resource *resource;
+ unsigned int num_resources;
+ intc_enum force_enable;
+ intc_enum force_disable;
+ bool skip_syscore_suspend;
+ struct intc_hw_desc hw;
+};
+
+#define DECLARE_INTC_DESC(symbol, chipname, vectors, groups, \
+ mask_regs, prio_regs, sense_regs) \
+struct intc_desc symbol __initdata = { \
+ .name = chipname, \
+ .hw = INTC_HW_DESC(vectors, groups, mask_regs, \
+ prio_regs, sense_regs, NULL), \
+}
+
+#define DECLARE_INTC_DESC_ACK(symbol, chipname, vectors, groups, \
+ mask_regs, prio_regs, sense_regs, ack_regs) \
+struct intc_desc symbol __initdata = { \
+ .name = chipname, \
+ .hw = INTC_HW_DESC(vectors, groups, mask_regs, \
+ prio_regs, sense_regs, ack_regs), \
+}
+
+int register_intc_controller(struct intc_desc *desc);
+int intc_set_priority(unsigned int irq, unsigned int prio);
+int intc_irq_lookup(const char *chipname, intc_enum enum_id);
+void intc_finalize(void);
+
+#ifdef CONFIG_INTC_USERIMASK
+int register_intc_userimask(unsigned long addr);
+#else
+static inline int register_intc_userimask(unsigned long addr)
+{
+ return 0;
+}
+#endif
+
+#endif /* __SH_INTC_H */
diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h
new file mode 100644
index 000000000..64638b058
--- /dev/null
+++ b/include/linux/sh_timer.h
@@ -0,0 +1,8 @@
+#ifndef __SH_TIMER_H__
+#define __SH_TIMER_H__
+
+struct sh_timer_config {
+ unsigned int channels_mask;
+};
+
+#endif /* __SH_TIMER_H__ */
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
new file mode 100644
index 000000000..dd0ba502c
--- /dev/null
+++ b/include/linux/shdma-base.h
@@ -0,0 +1,134 @@
+/*
+ * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
+ *
+ * extracted from shdma.c and headers
+ *
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SHDMA_BASE_H
+#define SHDMA_BASE_H
+
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+/**
+ * shdma_pm_state - DMA channel PM state
+ * SHDMA_PM_ESTABLISHED: either idle or during data transfer
+ * SHDMA_PM_BUSY: during the transfer preparation, when we have to
+ * drop the lock temporarily
+ * SHDMA_PM_PENDING: transfers pending
+ */
+enum shdma_pm_state {
+ SHDMA_PM_ESTABLISHED,
+ SHDMA_PM_BUSY,
+ SHDMA_PM_PENDING,
+};
+
+struct device;
+
+/*
+ * Drivers, using this library are expected to embed struct shdma_dev,
+ * struct shdma_chan, struct shdma_desc, and struct shdma_slave
+ * in their respective device, channel, descriptor and slave objects.
+ */
+
+struct shdma_slave {
+ int slave_id;
+};
+
+struct shdma_desc {
+ struct list_head node;
+ struct dma_async_tx_descriptor async_tx;
+ enum dma_transfer_direction direction;
+ size_t partial;
+ dma_cookie_t cookie;
+ int chunks;
+ int mark;
+ bool cyclic; /* used as cyclic transfer */
+};
+
+struct shdma_chan {
+ spinlock_t chan_lock; /* Channel operation lock */
+ struct list_head ld_queue; /* Link descriptors queue */
+ struct list_head ld_free; /* Free link descriptors */
+ struct dma_chan dma_chan; /* DMA channel */
+ struct device *dev; /* Channel device */
+ void *desc; /* buffer for descriptor array */
+ int desc_num; /* desc count */
+ size_t max_xfer_len; /* max transfer length */
+ int id; /* Raw id of this channel */
+ int irq; /* Channel IRQ */
+ int slave_id; /* Client ID for slave DMA */
+ int real_slave_id; /* argument passed to filter function */
+ int hw_req; /* DMA request line for slave DMA - same
+ * as MID/RID, used with DT */
+ enum shdma_pm_state pm_state;
+};
+
+/**
+ * struct shdma_ops - simple DMA driver operations
+ * desc_completed: return true, if this is the descriptor, that just has
+ * completed (atomic)
+ * halt_channel: stop DMA channel operation (atomic)
+ * channel_busy: return true, if the channel is busy (atomic)
+ * slave_addr: return slave DMA address
+ * desc_setup: set up the hardware specific descriptor portion (atomic)
+ * set_slave: bind channel to a slave
+ * setup_xfer: configure channel hardware for operation (atomic)
+ * start_xfer: start the DMA transfer (atomic)
+ * embedded_desc: return Nth struct shdma_desc pointer from the
+ * descriptor array
+ * chan_irq: process channel IRQ, return true if a transfer has
+ * completed (atomic)
+ */
+struct shdma_ops {
+ bool (*desc_completed)(struct shdma_chan *, struct shdma_desc *);
+ void (*halt_channel)(struct shdma_chan *);
+ bool (*channel_busy)(struct shdma_chan *);
+ dma_addr_t (*slave_addr)(struct shdma_chan *);
+ int (*desc_setup)(struct shdma_chan *, struct shdma_desc *,
+ dma_addr_t, dma_addr_t, size_t *);
+ int (*set_slave)(struct shdma_chan *, int, dma_addr_t, bool);
+ void (*setup_xfer)(struct shdma_chan *, int);
+ void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
+ struct shdma_desc *(*embedded_desc)(void *, int);
+ bool (*chan_irq)(struct shdma_chan *, int);
+ size_t (*get_partial)(struct shdma_chan *, struct shdma_desc *);
+};
+
+struct shdma_dev {
+ struct dma_device dma_dev;
+ struct shdma_chan **schan;
+ const struct shdma_ops *ops;
+ size_t desc_size;
+};
+
+#define shdma_for_each_chan(c, d, i) for (i = 0, c = (d)->schan[0]; \
+ i < (d)->dma_dev.chancnt; c = (d)->schan[++i])
+
+int shdma_request_irq(struct shdma_chan *, int,
+ unsigned long, const char *);
+bool shdma_reset(struct shdma_dev *sdev);
+void shdma_chan_probe(struct shdma_dev *sdev,
+ struct shdma_chan *schan, int id);
+void shdma_chan_remove(struct shdma_chan *schan);
+int shdma_init(struct device *dev, struct shdma_dev *sdev,
+ int chan_num);
+void shdma_cleanup(struct shdma_dev *sdev);
+#if IS_ENABLED(CONFIG_SH_DMAE_BASE)
+bool shdma_chan_filter(struct dma_chan *chan, void *arg);
+#else
+#define shdma_chan_filter NULL
+#endif
+
+#endif
diff --git a/include/linux/shm.h b/include/linux/shm.h
new file mode 100644
index 000000000..6fb801686
--- /dev/null
+++ b/include/linux/shm.h
@@ -0,0 +1,81 @@
+#ifndef _LINUX_SHM_H_
+#define _LINUX_SHM_H_
+
+#include <linux/list.h>
+#include <asm/page.h>
+#include <uapi/linux/shm.h>
+#include <asm/shmparam.h>
+
+struct shmid_kernel /* private to the kernel */
+{
+ struct kern_ipc_perm shm_perm;
+ struct file *shm_file;
+ unsigned long shm_nattch;
+ unsigned long shm_segsz;
+ time_t shm_atim;
+ time_t shm_dtim;
+ time_t shm_ctim;
+ pid_t shm_cprid;
+ pid_t shm_lprid;
+ struct user_struct *mlock_user;
+
+ /* The task created the shm object. NULL if the task is dead. */
+ struct task_struct *shm_creator;
+ struct list_head shm_clist; /* list by creator */
+};
+
+/* shm_mode upper byte flags */
+#define SHM_DEST 01000 /* segment will be destroyed on last detach */
+#define SHM_LOCKED 02000 /* segment will not be swapped */
+#define SHM_HUGETLB 04000 /* segment will use huge TLB pages */
+#define SHM_NORESERVE 010000 /* don't check for reservations */
+
+/* Bits [26:31] are reserved */
+
+/*
+ * When SHM_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
+ * This gives us 6 bits, which is enough until someone invents 128 bit address
+ * spaces.
+ *
+ * Assume these are all power of twos.
+ * When 0 use the default page size.
+ */
+#define SHM_HUGE_SHIFT 26
+#define SHM_HUGE_MASK 0x3f
+#define SHM_HUGE_2MB (21 << SHM_HUGE_SHIFT)
+#define SHM_HUGE_1GB (30 << SHM_HUGE_SHIFT)
+
+#ifdef CONFIG_SYSVIPC
+struct sysv_shm {
+ struct list_head shm_clist;
+};
+
+long do_shmat(int shmid, char __user *shmaddr, int shmflg, unsigned long *addr,
+ unsigned long shmlba);
+int is_file_shm_hugepages(struct file *file);
+void exit_shm(struct task_struct *task);
+#define shm_init_task(task) INIT_LIST_HEAD(&(task)->sysvshm.shm_clist)
+#else
+struct sysv_shm {
+ /* empty */
+};
+
+static inline long do_shmat(int shmid, char __user *shmaddr,
+ int shmflg, unsigned long *addr,
+ unsigned long shmlba)
+{
+ return -ENOSYS;
+}
+static inline int is_file_shm_hugepages(struct file *file)
+{
+ return 0;
+}
+static inline void exit_shm(struct task_struct *task)
+{
+}
+static inline void shm_init_task(struct task_struct *task)
+{
+}
+#endif
+
+#endif /* _LINUX_SHM_H_ */
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
new file mode 100644
index 000000000..3ab7d1805
--- /dev/null
+++ b/include/linux/shmem_fs.h
@@ -0,0 +1,89 @@
+#ifndef __SHMEM_FS_H
+#define __SHMEM_FS_H
+
+#include <linux/file.h>
+#include <linux/swap.h>
+#include <linux/mempolicy.h>
+#include <linux/pagemap.h>
+#include <linux/percpu_counter.h>
+#include <linux/xattr.h>
+
+/* inode in-kernel data */
+
+struct shmem_inode_info {
+ spinlock_t lock;
+ unsigned int seals; /* shmem seals */
+ unsigned long flags;
+ unsigned long alloced; /* data pages alloced to file */
+ union {
+ unsigned long swapped; /* subtotal assigned to swap */
+ char *symlink; /* unswappable short symlink */
+ };
+ struct shared_policy policy; /* NUMA memory alloc policy */
+ struct list_head swaplist; /* chain of maybes on swap */
+ struct simple_xattrs xattrs; /* list of xattrs */
+ struct inode vfs_inode;
+};
+
+struct shmem_sb_info {
+ struct mutex idr_lock;
+ bool idr_nouse;
+ struct idr idr; /* manages inode-number */
+ unsigned long max_blocks; /* How many blocks are allowed */
+ struct percpu_counter used_blocks; /* How many are allocated */
+ int max_inodes; /* How many inodes are allowed */
+ int free_inodes; /* How many are left for allocation */
+ spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
+ kuid_t uid; /* Mount uid for root directory */
+ kgid_t gid; /* Mount gid for root directory */
+ umode_t mode; /* Mount mode for root directory */
+ struct mempolicy *mpol; /* default memory policy for mappings */
+};
+
+static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
+{
+ return container_of(inode, struct shmem_inode_info, vfs_inode);
+}
+
+/*
+ * Functions in mm/shmem.c called directly from elsewhere:
+ */
+extern int shmem_init(void);
+extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
+extern struct file *shmem_file_setup(const char *name,
+ loff_t size, unsigned long flags,
+ int atomic_copy);
+extern struct file *shmem_kernel_file_setup(const char *name, loff_t size,
+ unsigned long flags, int atomic_copy);
+extern int shmem_zero_setup(struct vm_area_struct *);
+extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
+extern bool shmem_mapping(struct address_space *mapping);
+extern void shmem_unlock_mapping(struct address_space *mapping);
+extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
+ pgoff_t index, gfp_t gfp_mask);
+extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
+extern int shmem_unuse(swp_entry_t entry, struct page *page);
+
+static inline struct page *shmem_read_mapping_page(
+ struct address_space *mapping, pgoff_t index)
+{
+ return shmem_read_mapping_page_gfp(mapping, index,
+ mapping_gfp_mask(mapping));
+}
+
+#ifdef CONFIG_TMPFS
+
+extern int shmem_add_seals(struct file *file, unsigned int seals);
+extern int shmem_get_seals(struct file *file);
+extern long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
+
+#else
+
+static inline long shmem_fcntl(struct file *f, unsigned int c, unsigned long a)
+{
+ return -EINVAL;
+}
+
+#endif
+
+#endif
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
new file mode 100644
index 000000000..4fcacd915
--- /dev/null
+++ b/include/linux/shrinker.h
@@ -0,0 +1,72 @@
+#ifndef _LINUX_SHRINKER_H
+#define _LINUX_SHRINKER_H
+
+/*
+ * This struct is used to pass information from page reclaim to the shrinkers.
+ * We consolidate the values for easier extention later.
+ *
+ * The 'gfpmask' refers to the allocation we are currently trying to
+ * fulfil.
+ */
+struct shrink_control {
+ gfp_t gfp_mask;
+
+ /*
+ * How many objects scan_objects should scan and try to reclaim.
+ * This is reset before every call, so it is safe for callees
+ * to modify.
+ */
+ unsigned long nr_to_scan;
+
+ /* current node being shrunk (for NUMA aware shrinkers) */
+ int nid;
+
+ /* current memcg being shrunk (for memcg aware shrinkers) */
+ struct mem_cgroup *memcg;
+};
+
+#define SHRINK_STOP (~0UL)
+/*
+ * A callback you can register to apply pressure to ageable caches.
+ *
+ * @count_objects should return the number of freeable items in the cache. If
+ * there are no objects to free or the number of freeable items cannot be
+ * determined, it should return 0. No deadlock checks should be done during the
+ * count callback - the shrinker relies on aggregating scan counts that couldn't
+ * be executed due to potential deadlocks to be run at a later call when the
+ * deadlock condition is no longer pending.
+ *
+ * @scan_objects will only be called if @count_objects returned a non-zero
+ * value for the number of freeable objects. The callout should scan the cache
+ * and attempt to free items from the cache. It should then return the number
+ * of objects freed during the scan, or SHRINK_STOP if progress cannot be made
+ * due to potential deadlocks. If SHRINK_STOP is returned, then no further
+ * attempts to call the @scan_objects will be made from the current reclaim
+ * context.
+ *
+ * @flags determine the shrinker abilities, like numa awareness
+ */
+struct shrinker {
+ unsigned long (*count_objects)(struct shrinker *,
+ struct shrink_control *sc);
+ unsigned long (*scan_objects)(struct shrinker *,
+ struct shrink_control *sc);
+
+ int seeks; /* seeks to recreate an obj */
+ long batch; /* reclaim batch size, 0 = default */
+ unsigned long flags;
+
+ /* These are for internal use */
+ struct list_head list;
+ /* objs pending delete, per node */
+ atomic_long_t *nr_deferred;
+};
+#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
+
+/* Flags */
+#define SHRINKER_NUMA_AWARE (1 << 0)
+#define SHRINKER_MEMCG_AWARE (1 << 1)
+
+extern int register_shrinker(struct shrinker *);
+extern void unregister_shrinker(struct shrinker *);
+#endif
diff --git a/include/linux/signal.h b/include/linux/signal.h
new file mode 100644
index 000000000..ab1e0392b
--- /dev/null
+++ b/include/linux/signal.h
@@ -0,0 +1,445 @@
+#ifndef _LINUX_SIGNAL_H
+#define _LINUX_SIGNAL_H
+
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <uapi/linux/signal.h>
+
+struct task_struct;
+
+/* for sysctl */
+extern int print_fatal_signals;
+/*
+ * Real Time signals may be queued.
+ */
+
+struct sigqueue {
+ struct list_head list;
+ int flags;
+ siginfo_t info;
+ struct user_struct *user;
+};
+
+/* flags values. */
+#define SIGQUEUE_PREALLOC 1
+
+struct sigpending {
+ struct list_head list;
+ sigset_t signal;
+};
+
+/*
+ * Define some primitives to manipulate sigset_t.
+ */
+
+#ifndef __HAVE_ARCH_SIG_BITOPS
+#include <linux/bitops.h>
+
+/* We don't use <linux/bitops.h> for these because there is no need to
+ be atomic. */
+static inline void sigaddset(sigset_t *set, int _sig)
+{
+ unsigned long sig = _sig - 1;
+ if (_NSIG_WORDS == 1)
+ set->sig[0] |= 1UL << sig;
+ else
+ set->sig[sig / _NSIG_BPW] |= 1UL << (sig % _NSIG_BPW);
+}
+
+static inline void sigdelset(sigset_t *set, int _sig)
+{
+ unsigned long sig = _sig - 1;
+ if (_NSIG_WORDS == 1)
+ set->sig[0] &= ~(1UL << sig);
+ else
+ set->sig[sig / _NSIG_BPW] &= ~(1UL << (sig % _NSIG_BPW));
+}
+
+static inline int sigismember(sigset_t *set, int _sig)
+{
+ unsigned long sig = _sig - 1;
+ if (_NSIG_WORDS == 1)
+ return 1 & (set->sig[0] >> sig);
+ else
+ return 1 & (set->sig[sig / _NSIG_BPW] >> (sig % _NSIG_BPW));
+}
+
+#endif /* __HAVE_ARCH_SIG_BITOPS */
+
+static inline int sigisemptyset(sigset_t *set)
+{
+ switch (_NSIG_WORDS) {
+ case 4:
+ return (set->sig[3] | set->sig[2] |
+ set->sig[1] | set->sig[0]) == 0;
+ case 2:
+ return (set->sig[1] | set->sig[0]) == 0;
+ case 1:
+ return set->sig[0] == 0;
+ default:
+ BUILD_BUG();
+ return 0;
+ }
+}
+
+#define sigmask(sig) (1UL << ((sig) - 1))
+
+#ifndef __HAVE_ARCH_SIG_SETOPS
+#include <linux/string.h>
+
+#define _SIG_SET_BINOP(name, op) \
+static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \
+{ \
+ unsigned long a0, a1, a2, a3, b0, b1, b2, b3; \
+ \
+ switch (_NSIG_WORDS) { \
+ case 4: \
+ a3 = a->sig[3]; a2 = a->sig[2]; \
+ b3 = b->sig[3]; b2 = b->sig[2]; \
+ r->sig[3] = op(a3, b3); \
+ r->sig[2] = op(a2, b2); \
+ case 2: \
+ a1 = a->sig[1]; b1 = b->sig[1]; \
+ r->sig[1] = op(a1, b1); \
+ case 1: \
+ a0 = a->sig[0]; b0 = b->sig[0]; \
+ r->sig[0] = op(a0, b0); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+}
+
+#define _sig_or(x,y) ((x) | (y))
+_SIG_SET_BINOP(sigorsets, _sig_or)
+
+#define _sig_and(x,y) ((x) & (y))
+_SIG_SET_BINOP(sigandsets, _sig_and)
+
+#define _sig_andn(x,y) ((x) & ~(y))
+_SIG_SET_BINOP(sigandnsets, _sig_andn)
+
+#undef _SIG_SET_BINOP
+#undef _sig_or
+#undef _sig_and
+#undef _sig_andn
+
+#define _SIG_SET_OP(name, op) \
+static inline void name(sigset_t *set) \
+{ \
+ switch (_NSIG_WORDS) { \
+ case 4: set->sig[3] = op(set->sig[3]); \
+ set->sig[2] = op(set->sig[2]); \
+ case 2: set->sig[1] = op(set->sig[1]); \
+ case 1: set->sig[0] = op(set->sig[0]); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+}
+
+#define _sig_not(x) (~(x))
+_SIG_SET_OP(signotset, _sig_not)
+
+#undef _SIG_SET_OP
+#undef _sig_not
+
+static inline void sigemptyset(sigset_t *set)
+{
+ switch (_NSIG_WORDS) {
+ default:
+ memset(set, 0, sizeof(sigset_t));
+ break;
+ case 2: set->sig[1] = 0;
+ case 1: set->sig[0] = 0;
+ break;
+ }
+}
+
+static inline void sigfillset(sigset_t *set)
+{
+ switch (_NSIG_WORDS) {
+ default:
+ memset(set, -1, sizeof(sigset_t));
+ break;
+ case 2: set->sig[1] = -1;
+ case 1: set->sig[0] = -1;
+ break;
+ }
+}
+
+/* Some extensions for manipulating the low 32 signals in particular. */
+
+static inline void sigaddsetmask(sigset_t *set, unsigned long mask)
+{
+ set->sig[0] |= mask;
+}
+
+static inline void sigdelsetmask(sigset_t *set, unsigned long mask)
+{
+ set->sig[0] &= ~mask;
+}
+
+static inline int sigtestsetmask(sigset_t *set, unsigned long mask)
+{
+ return (set->sig[0] & mask) != 0;
+}
+
+static inline void siginitset(sigset_t *set, unsigned long mask)
+{
+ set->sig[0] = mask;
+ switch (_NSIG_WORDS) {
+ default:
+ memset(&set->sig[1], 0, sizeof(long)*(_NSIG_WORDS-1));
+ break;
+ case 2: set->sig[1] = 0;
+ case 1: ;
+ }
+}
+
+static inline void siginitsetinv(sigset_t *set, unsigned long mask)
+{
+ set->sig[0] = ~mask;
+ switch (_NSIG_WORDS) {
+ default:
+ memset(&set->sig[1], -1, sizeof(long)*(_NSIG_WORDS-1));
+ break;
+ case 2: set->sig[1] = -1;
+ case 1: ;
+ }
+}
+
+#endif /* __HAVE_ARCH_SIG_SETOPS */
+
+static inline void init_sigpending(struct sigpending *sig)
+{
+ sigemptyset(&sig->signal);
+ INIT_LIST_HEAD(&sig->list);
+}
+
+extern void flush_sigqueue(struct sigpending *queue);
+
+/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
+static inline int valid_signal(unsigned long sig)
+{
+ return sig <= _NSIG ? 1 : 0;
+}
+
+struct timespec;
+struct pt_regs;
+
+extern int next_signal(struct sigpending *pending, sigset_t *mask);
+extern int do_send_sig_info(int sig, struct siginfo *info,
+ struct task_struct *p, bool group);
+extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p);
+extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *);
+extern int do_sigtimedwait(const sigset_t *, siginfo_t *,
+ const struct timespec *);
+extern int sigprocmask(int, sigset_t *, sigset_t *);
+extern void set_current_blocked(sigset_t *);
+extern void __set_current_blocked(const sigset_t *);
+extern int show_unhandled_signals;
+extern int sigsuspend(sigset_t *);
+
+struct sigaction {
+#ifndef __ARCH_HAS_IRIX_SIGACTION
+ __sighandler_t sa_handler;
+ unsigned long sa_flags;
+#else
+ unsigned int sa_flags;
+ __sighandler_t sa_handler;
+#endif
+#ifdef __ARCH_HAS_SA_RESTORER
+ __sigrestore_t sa_restorer;
+#endif
+ sigset_t sa_mask; /* mask last for extensibility */
+};
+
+struct k_sigaction {
+ struct sigaction sa;
+#ifdef __ARCH_HAS_KA_RESTORER
+ __sigrestore_t ka_restorer;
+#endif
+};
+
+#ifdef CONFIG_OLD_SIGACTION
+struct old_sigaction {
+ __sighandler_t sa_handler;
+ old_sigset_t sa_mask;
+ unsigned long sa_flags;
+ __sigrestore_t sa_restorer;
+};
+#endif
+
+struct ksignal {
+ struct k_sigaction ka;
+ siginfo_t info;
+ int sig;
+};
+
+extern int get_signal(struct ksignal *ksig);
+extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping);
+extern void exit_signals(struct task_struct *tsk);
+extern void kernel_sigaction(int, __sighandler_t);
+
+static inline void allow_signal(int sig)
+{
+ /*
+ * Kernel threads handle their own signals. Let the signal code
+ * know it'll be handled, so that they don't get converted to
+ * SIGKILL or just silently dropped.
+ */
+ kernel_sigaction(sig, (__force __sighandler_t)2);
+}
+
+static inline void disallow_signal(int sig)
+{
+ kernel_sigaction(sig, SIG_IGN);
+}
+
+extern struct kmem_cache *sighand_cachep;
+
+int unhandled_signal(struct task_struct *tsk, int sig);
+
+/*
+ * In POSIX a signal is sent either to a specific thread (Linux task)
+ * or to the process as a whole (Linux thread group). How the signal
+ * is sent determines whether it's to one thread or the whole group,
+ * which determines which signal mask(s) are involved in blocking it
+ * from being delivered until later. When the signal is delivered,
+ * either it's caught or ignored by a user handler or it has a default
+ * effect that applies to the whole thread group (POSIX process).
+ *
+ * The possible effects an unblocked signal set to SIG_DFL can have are:
+ * ignore - Nothing Happens
+ * terminate - kill the process, i.e. all threads in the group,
+ * similar to exit_group. The group leader (only) reports
+ * WIFSIGNALED status to its parent.
+ * coredump - write a core dump file describing all threads using
+ * the same mm and then kill all those threads
+ * stop - stop all the threads in the group, i.e. TASK_STOPPED state
+ *
+ * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored.
+ * Other signals when not blocked and set to SIG_DFL behaves as follows.
+ * The job control signals also have other special effects.
+ *
+ * +--------------------+------------------+
+ * | POSIX signal | default action |
+ * +--------------------+------------------+
+ * | SIGHUP | terminate |
+ * | SIGINT | terminate |
+ * | SIGQUIT | coredump |
+ * | SIGILL | coredump |
+ * | SIGTRAP | coredump |
+ * | SIGABRT/SIGIOT | coredump |
+ * | SIGBUS | coredump |
+ * | SIGFPE | coredump |
+ * | SIGKILL | terminate(+) |
+ * | SIGUSR1 | terminate |
+ * | SIGSEGV | coredump |
+ * | SIGUSR2 | terminate |
+ * | SIGPIPE | terminate |
+ * | SIGALRM | terminate |
+ * | SIGTERM | terminate |
+ * | SIGCHLD | ignore |
+ * | SIGCONT | ignore(*) |
+ * | SIGSTOP | stop(*)(+) |
+ * | SIGTSTP | stop(*) |
+ * | SIGTTIN | stop(*) |
+ * | SIGTTOU | stop(*) |
+ * | SIGURG | ignore |
+ * | SIGXCPU | coredump |
+ * | SIGXFSZ | coredump |
+ * | SIGVTALRM | terminate |
+ * | SIGPROF | terminate |
+ * | SIGPOLL/SIGIO | terminate |
+ * | SIGSYS/SIGUNUSED | coredump |
+ * | SIGSTKFLT | terminate |
+ * | SIGWINCH | ignore |
+ * | SIGPWR | terminate |
+ * | SIGRTMIN-SIGRTMAX | terminate |
+ * +--------------------+------------------+
+ * | non-POSIX signal | default action |
+ * +--------------------+------------------+
+ * | SIGEMT | coredump |
+ * +--------------------+------------------+
+ *
+ * (+) For SIGKILL and SIGSTOP the action is "always", not just "default".
+ * (*) Special job control effects:
+ * When SIGCONT is sent, it resumes the process (all threads in the group)
+ * from TASK_STOPPED state and also clears any pending/queued stop signals
+ * (any of those marked with "stop(*)"). This happens regardless of blocking,
+ * catching, or ignoring SIGCONT. When any stop signal is sent, it clears
+ * any pending/queued SIGCONT signals; this happens regardless of blocking,
+ * catching, or ignored the stop signal, though (except for SIGSTOP) the
+ * default action of stopping the process may happen later or never.
+ */
+
+#ifdef SIGEMT
+#define SIGEMT_MASK rt_sigmask(SIGEMT)
+#else
+#define SIGEMT_MASK 0
+#endif
+
+#if SIGRTMIN > BITS_PER_LONG
+#define rt_sigmask(sig) (1ULL << ((sig)-1))
+#else
+#define rt_sigmask(sig) sigmask(sig)
+#endif
+#define siginmask(sig, mask) (rt_sigmask(sig) & (mask))
+
+#define SIG_KERNEL_ONLY_MASK (\
+ rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP))
+
+#define SIG_KERNEL_STOP_MASK (\
+ rt_sigmask(SIGSTOP) | rt_sigmask(SIGTSTP) | \
+ rt_sigmask(SIGTTIN) | rt_sigmask(SIGTTOU) )
+
+#define SIG_KERNEL_COREDUMP_MASK (\
+ rt_sigmask(SIGQUIT) | rt_sigmask(SIGILL) | \
+ rt_sigmask(SIGTRAP) | rt_sigmask(SIGABRT) | \
+ rt_sigmask(SIGFPE) | rt_sigmask(SIGSEGV) | \
+ rt_sigmask(SIGBUS) | rt_sigmask(SIGSYS) | \
+ rt_sigmask(SIGXCPU) | rt_sigmask(SIGXFSZ) | \
+ SIGEMT_MASK )
+
+#define SIG_KERNEL_IGNORE_MASK (\
+ rt_sigmask(SIGCONT) | rt_sigmask(SIGCHLD) | \
+ rt_sigmask(SIGWINCH) | rt_sigmask(SIGURG) )
+
+#define sig_kernel_only(sig) \
+ (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_ONLY_MASK))
+#define sig_kernel_coredump(sig) \
+ (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_COREDUMP_MASK))
+#define sig_kernel_ignore(sig) \
+ (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_IGNORE_MASK))
+#define sig_kernel_stop(sig) \
+ (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_STOP_MASK))
+
+#define sig_user_defined(t, signr) \
+ (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
+ ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
+
+#define sig_fatal(t, signr) \
+ (!siginmask(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
+ (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
+
+void signals_init(void);
+
+int restore_altstack(const stack_t __user *);
+int __save_altstack(stack_t __user *, unsigned long);
+
+#define save_altstack_ex(uss, sp) do { \
+ stack_t __user *__uss = uss; \
+ struct task_struct *t = current; \
+ put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \
+ put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
+ put_user_ex(t->sas_ss_size, &__uss->ss_size); \
+} while (0);
+
+#ifdef CONFIG_PROC_FS
+struct seq_file;
+extern void render_sigset_t(struct seq_file *, const char *, sigset_t *);
+#endif
+
+#endif /* _LINUX_SIGNAL_H */
diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h
new file mode 100644
index 000000000..eadbe227c
--- /dev/null
+++ b/include/linux/signalfd.h
@@ -0,0 +1,34 @@
+/*
+ * include/linux/signalfd.h
+ *
+ * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+#ifndef _LINUX_SIGNALFD_H
+#define _LINUX_SIGNALFD_H
+
+#include <uapi/linux/signalfd.h>
+
+
+#ifdef CONFIG_SIGNALFD
+
+/*
+ * Deliver the signal to listening signalfd.
+ */
+static inline void signalfd_notify(struct task_struct *tsk, int sig)
+{
+ if (unlikely(waitqueue_active(&tsk->sighand->signalfd_wqh)))
+ wake_up(&tsk->sighand->signalfd_wqh);
+}
+
+extern void signalfd_cleanup(struct sighand_struct *sighand);
+
+#else /* CONFIG_SIGNALFD */
+
+static inline void signalfd_notify(struct task_struct *tsk, int sig) { }
+
+static inline void signalfd_cleanup(struct sighand_struct *sighand) { }
+
+#endif /* CONFIG_SIGNALFD */
+
+#endif /* _LINUX_SIGNALFD_H */
diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h
new file mode 100644
index 000000000..29d959333
--- /dev/null
+++ b/include/linux/sirfsoc_dma.h
@@ -0,0 +1,6 @@
+#ifndef _SIRFSOC_DMA_H_
+#define _SIRFSOC_DMA_H_
+
+bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id);
+
+#endif
diff --git a/include/linux/sizes.h b/include/linux/sizes.h
new file mode 100644
index 000000000..ce3e8150c
--- /dev/null
+++ b/include/linux/sizes.h
@@ -0,0 +1,47 @@
+/*
+ * include/linux/sizes.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_SIZES_H__
+#define __LINUX_SIZES_H__
+
+#define SZ_1 0x00000001
+#define SZ_2 0x00000002
+#define SZ_4 0x00000004
+#define SZ_8 0x00000008
+#define SZ_16 0x00000010
+#define SZ_32 0x00000020
+#define SZ_64 0x00000040
+#define SZ_128 0x00000080
+#define SZ_256 0x00000100
+#define SZ_512 0x00000200
+
+#define SZ_1K 0x00000400
+#define SZ_2K 0x00000800
+#define SZ_4K 0x00001000
+#define SZ_8K 0x00002000
+#define SZ_16K 0x00004000
+#define SZ_32K 0x00008000
+#define SZ_64K 0x00010000
+#define SZ_128K 0x00020000
+#define SZ_256K 0x00040000
+#define SZ_512K 0x00080000
+
+#define SZ_1M 0x00100000
+#define SZ_2M 0x00200000
+#define SZ_4M 0x00400000
+#define SZ_8M 0x00800000
+#define SZ_16M 0x01000000
+#define SZ_32M 0x02000000
+#define SZ_64M 0x04000000
+#define SZ_128M 0x08000000
+#define SZ_256M 0x10000000
+#define SZ_512M 0x20000000
+
+#define SZ_1G 0x40000000
+#define SZ_2G 0x80000000
+
+#endif /* __LINUX_SIZES_H__ */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
new file mode 100644
index 000000000..f15154a87
--- /dev/null
+++ b/include/linux/skbuff.h
@@ -0,0 +1,3457 @@
+/*
+ * Definitions for the 'struct sk_buff' memory handlers.
+ *
+ * Authors:
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_SKBUFF_H
+#define _LINUX_SKBUFF_H
+
+#include <linux/kernel.h>
+#include <linux/kmemcheck.h>
+#include <linux/compiler.h>
+#include <linux/time.h>
+#include <linux/bug.h>
+#include <linux/cache.h>
+#include <linux/rbtree.h>
+#include <linux/socket.h>
+
+#include <linux/atomic.h>
+#include <asm/types.h>
+#include <linux/spinlock.h>
+#include <linux/net.h>
+#include <linux/textsearch.h>
+#include <net/checksum.h>
+#include <linux/rcupdate.h>
+#include <linux/hrtimer.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdev_features.h>
+#include <linux/sched.h>
+#include <net/flow_keys.h>
+
+/* A. Checksumming of received packets by device.
+ *
+ * CHECKSUM_NONE:
+ *
+ * Device failed to checksum this packet e.g. due to lack of capabilities.
+ * The packet contains full (though not verified) checksum in packet but
+ * not in skb->csum. Thus, skb->csum is undefined in this case.
+ *
+ * CHECKSUM_UNNECESSARY:
+ *
+ * The hardware you're dealing with doesn't calculate the full checksum
+ * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
+ * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
+ * if their checksums are okay. skb->csum is still undefined in this case
+ * though. It is a bad option, but, unfortunately, nowadays most vendors do
+ * this. Apparently with the secret goal to sell you new devices, when you
+ * will add new protocol to your host, f.e. IPv6 8)
+ *
+ * CHECKSUM_UNNECESSARY is applicable to following protocols:
+ * TCP: IPv6 and IPv4.
+ * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
+ * zero UDP checksum for either IPv4 or IPv6, the networking stack
+ * may perform further validation in this case.
+ * GRE: only if the checksum is present in the header.
+ * SCTP: indicates the CRC in SCTP header has been validated.
+ *
+ * skb->csum_level indicates the number of consecutive checksums found in
+ * the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
+ * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
+ * and a device is able to verify the checksums for UDP (possibly zero),
+ * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
+ * two. If the device were only able to verify the UDP checksum and not
+ * GRE, either because it doesn't support GRE checksum of because GRE
+ * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
+ * not considered in this case).
+ *
+ * CHECKSUM_COMPLETE:
+ *
+ * This is the most generic way. The device supplied checksum of the _whole_
+ * packet as seen by netif_rx() and fills out in skb->csum. Meaning, the
+ * hardware doesn't need to parse L3/L4 headers to implement this.
+ *
+ * Note: Even if device supports only some protocols, but is able to produce
+ * skb->csum, it MUST use CHECKSUM_COMPLETE, not CHECKSUM_UNNECESSARY.
+ *
+ * CHECKSUM_PARTIAL:
+ *
+ * A checksum is set up to be offloaded to a device as described in the
+ * output description for CHECKSUM_PARTIAL. This may occur on a packet
+ * received directly from another Linux OS, e.g., a virtualized Linux kernel
+ * on the same host, or it may be set in the input path in GRO or remote
+ * checksum offload. For the purposes of checksum verification, the checksum
+ * referred to by skb->csum_start + skb->csum_offset and any preceding
+ * checksums in the packet are considered verified. Any checksums in the
+ * packet that are after the checksum being offloaded are not considered to
+ * be verified.
+ *
+ * B. Checksumming on output.
+ *
+ * CHECKSUM_NONE:
+ *
+ * The skb was already checksummed by the protocol, or a checksum is not
+ * required.
+ *
+ * CHECKSUM_PARTIAL:
+ *
+ * The device is required to checksum the packet as seen by hard_start_xmit()
+ * from skb->csum_start up to the end, and to record/write the checksum at
+ * offset skb->csum_start + skb->csum_offset.
+ *
+ * The device must show its capabilities in dev->features, set up at device
+ * setup time, e.g. netdev_features.h:
+ *
+ * NETIF_F_HW_CSUM - It's a clever device, it's able to checksum everything.
+ * NETIF_F_IP_CSUM - Device is dumb, it's able to checksum only TCP/UDP over
+ * IPv4. Sigh. Vendors like this way for an unknown reason.
+ * Though, see comment above about CHECKSUM_UNNECESSARY. 8)
+ * NETIF_F_IPV6_CSUM - About as dumb as the last one but does IPv6 instead.
+ * NETIF_F_... - Well, you get the picture.
+ *
+ * CHECKSUM_UNNECESSARY:
+ *
+ * Normally, the device will do per protocol specific checksumming. Protocol
+ * implementations that do not want the NIC to perform the checksum
+ * calculation should use this flag in their outgoing skbs.
+ *
+ * NETIF_F_FCOE_CRC - This indicates that the device can do FCoE FC CRC
+ * offload. Correspondingly, the FCoE protocol driver
+ * stack should use CHECKSUM_UNNECESSARY.
+ *
+ * Any questions? No questions, good. --ANK
+ */
+
+/* Don't change this without changing skb_csum_unnecessary! */
+#define CHECKSUM_NONE 0
+#define CHECKSUM_UNNECESSARY 1
+#define CHECKSUM_COMPLETE 2
+#define CHECKSUM_PARTIAL 3
+
+/* Maximum value in skb->csum_level */
+#define SKB_MAX_CSUM_LEVEL 3
+
+#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
+#define SKB_WITH_OVERHEAD(X) \
+ ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define SKB_MAX_ORDER(X, ORDER) \
+ SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
+#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
+#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
+
+/* return minimum truesize of one skb containing X bytes of data */
+#define SKB_TRUESIZE(X) ((X) + \
+ SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+struct net_device;
+struct scatterlist;
+struct pipe_inode_info;
+struct iov_iter;
+struct napi_struct;
+
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+struct nf_conntrack {
+ atomic_t use;
+};
+#endif
+
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+struct nf_bridge_info {
+ atomic_t use;
+ enum {
+ BRNF_PROTO_UNCHANGED,
+ BRNF_PROTO_8021Q,
+ BRNF_PROTO_PPPOE
+ } orig_proto;
+ bool pkt_otherhost;
+ unsigned int mask;
+ struct net_device *physindev;
+ struct net_device *physoutdev;
+ char neigh_header[8];
+ __be32 ipv4_daddr;
+};
+#endif
+
+struct sk_buff_head {
+ /* These two members must be first. */
+ struct sk_buff *next;
+ struct sk_buff *prev;
+
+ __u32 qlen;
+ spinlock_t lock;
+};
+
+struct sk_buff;
+
+/* To allow 64K frame to be packed as single skb without frag_list we
+ * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
+ * buffers which do not start on a page boundary.
+ *
+ * Since GRO uses frags we allocate at least 16 regardless of page
+ * size.
+ */
+#if (65536/PAGE_SIZE + 1) < 16
+#define MAX_SKB_FRAGS 16UL
+#else
+#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
+#endif
+
+typedef struct skb_frag_struct skb_frag_t;
+
+struct skb_frag_struct {
+ struct {
+ struct page *p;
+ } page;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+ __u32 page_offset;
+ __u32 size;
+#else
+ __u16 page_offset;
+ __u16 size;
+#endif
+};
+
+static inline unsigned int skb_frag_size(const skb_frag_t *frag)
+{
+ return frag->size;
+}
+
+static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
+{
+ frag->size = size;
+}
+
+static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
+{
+ frag->size += delta;
+}
+
+static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
+{
+ frag->size -= delta;
+}
+
+#define HAVE_HW_TIME_STAMP
+
+/**
+ * struct skb_shared_hwtstamps - hardware time stamps
+ * @hwtstamp: hardware time stamp transformed into duration
+ * since arbitrary point in time
+ *
+ * Software time stamps generated by ktime_get_real() are stored in
+ * skb->tstamp.
+ *
+ * hwtstamps can only be compared against other hwtstamps from
+ * the same device.
+ *
+ * This structure is attached to packets as part of the
+ * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
+ */
+struct skb_shared_hwtstamps {
+ ktime_t hwtstamp;
+};
+
+/* Definitions for tx_flags in struct skb_shared_info */
+enum {
+ /* generate hardware time stamp */
+ SKBTX_HW_TSTAMP = 1 << 0,
+
+ /* generate software time stamp when queueing packet to NIC */
+ SKBTX_SW_TSTAMP = 1 << 1,
+
+ /* device driver is going to provide hardware time stamp */
+ SKBTX_IN_PROGRESS = 1 << 2,
+
+ /* device driver supports TX zero-copy buffers */
+ SKBTX_DEV_ZEROCOPY = 1 << 3,
+
+ /* generate wifi status information (where possible) */
+ SKBTX_WIFI_STATUS = 1 << 4,
+
+ /* This indicates at least one fragment might be overwritten
+ * (as in vmsplice(), sendfile() ...)
+ * If we need to compute a TX checksum, we'll need to copy
+ * all frags to avoid possible bad checksum
+ */
+ SKBTX_SHARED_FRAG = 1 << 5,
+
+ /* generate software time stamp when entering packet scheduling */
+ SKBTX_SCHED_TSTAMP = 1 << 6,
+
+ /* generate software timestamp on peer data acknowledgment */
+ SKBTX_ACK_TSTAMP = 1 << 7,
+};
+
+#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
+ SKBTX_SCHED_TSTAMP | \
+ SKBTX_ACK_TSTAMP)
+#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
+
+/*
+ * The callback notifies userspace to release buffers when skb DMA is done in
+ * lower device, the skb last reference should be 0 when calling this.
+ * The zerocopy_success argument is true if zero copy transmit occurred,
+ * false on data copy or out of memory error caused by data copy attempt.
+ * The ctx field is used to track device context.
+ * The desc field is used to track userspace buffer index.
+ */
+struct ubuf_info {
+ void (*callback)(struct ubuf_info *, bool zerocopy_success);
+ void *ctx;
+ unsigned long desc;
+};
+
+/* This data is invariant across clones and lives at
+ * the end of the header data, ie. at skb->end.
+ */
+struct skb_shared_info {
+ unsigned char nr_frags;
+ __u8 tx_flags;
+ unsigned short gso_size;
+ /* Warning: this field is not always filled in (UFO)! */
+ unsigned short gso_segs;
+ unsigned short gso_type;
+ struct sk_buff *frag_list;
+ struct skb_shared_hwtstamps hwtstamps;
+ u32 tskey;
+ __be32 ip6_frag_id;
+
+ /*
+ * Warning : all fields before dataref are cleared in __alloc_skb()
+ */
+ atomic_t dataref;
+
+ /* Intermediate layers must ensure that destructor_arg
+ * remains valid until skb destructor */
+ void * destructor_arg;
+
+ /* must be last field, see pskb_expand_head() */
+ skb_frag_t frags[MAX_SKB_FRAGS];
+};
+
+/* We divide dataref into two halves. The higher 16 bits hold references
+ * to the payload part of skb->data. The lower 16 bits hold references to
+ * the entire skb->data. A clone of a headerless skb holds the length of
+ * the header in skb->hdr_len.
+ *
+ * All users must obey the rule that the skb->data reference count must be
+ * greater than or equal to the payload reference count.
+ *
+ * Holding a reference to the payload part means that the user does not
+ * care about modifications to the header part of skb->data.
+ */
+#define SKB_DATAREF_SHIFT 16
+#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
+
+
+enum {
+ SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
+ SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */
+ SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */
+};
+
+enum {
+ SKB_GSO_TCPV4 = 1 << 0,
+ SKB_GSO_UDP = 1 << 1,
+
+ /* This indicates the skb is from an untrusted source. */
+ SKB_GSO_DODGY = 1 << 2,
+
+ /* This indicates the tcp segment has CWR set. */
+ SKB_GSO_TCP_ECN = 1 << 3,
+
+ SKB_GSO_TCPV6 = 1 << 4,
+
+ SKB_GSO_FCOE = 1 << 5,
+
+ SKB_GSO_GRE = 1 << 6,
+
+ SKB_GSO_GRE_CSUM = 1 << 7,
+
+ SKB_GSO_IPIP = 1 << 8,
+
+ SKB_GSO_SIT = 1 << 9,
+
+ SKB_GSO_UDP_TUNNEL = 1 << 10,
+
+ SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
+
+ SKB_GSO_TUNNEL_REMCSUM = 1 << 12,
+};
+
+#if BITS_PER_LONG > 32
+#define NET_SKBUFF_DATA_USES_OFFSET 1
+#endif
+
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+typedef unsigned int sk_buff_data_t;
+#else
+typedef unsigned char *sk_buff_data_t;
+#endif
+
+/**
+ * struct skb_mstamp - multi resolution time stamps
+ * @stamp_us: timestamp in us resolution
+ * @stamp_jiffies: timestamp in jiffies
+ */
+struct skb_mstamp {
+ union {
+ u64 v64;
+ struct {
+ u32 stamp_us;
+ u32 stamp_jiffies;
+ };
+ };
+};
+
+/**
+ * skb_mstamp_get - get current timestamp
+ * @cl: place to store timestamps
+ */
+static inline void skb_mstamp_get(struct skb_mstamp *cl)
+{
+ u64 val = local_clock();
+
+ do_div(val, NSEC_PER_USEC);
+ cl->stamp_us = (u32)val;
+ cl->stamp_jiffies = (u32)jiffies;
+}
+
+/**
+ * skb_mstamp_delta - compute the difference in usec between two skb_mstamp
+ * @t1: pointer to newest sample
+ * @t0: pointer to oldest sample
+ */
+static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
+ const struct skb_mstamp *t0)
+{
+ s32 delta_us = t1->stamp_us - t0->stamp_us;
+ u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies;
+
+ /* If delta_us is negative, this might be because interval is too big,
+ * or local_clock() drift is too big : fallback using jiffies.
+ */
+ if (delta_us <= 0 ||
+ delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ)))
+
+ delta_us = jiffies_to_usecs(delta_jiffies);
+
+ return delta_us;
+}
+
+
+/**
+ * struct sk_buff - socket buffer
+ * @next: Next buffer in list
+ * @prev: Previous buffer in list
+ * @tstamp: Time we arrived/left
+ * @rbnode: RB tree node, alternative to next/prev for netem/tcp
+ * @sk: Socket we are owned by
+ * @dev: Device we arrived on/are leaving by
+ * @cb: Control buffer. Free for use by every layer. Put private vars here
+ * @_skb_refdst: destination entry (with norefcount bit)
+ * @sp: the security path, used for xfrm
+ * @len: Length of actual data
+ * @data_len: Data length
+ * @mac_len: Length of link layer header
+ * @hdr_len: writable header length of cloned skb
+ * @csum: Checksum (must include start/offset pair)
+ * @csum_start: Offset from skb->head where checksumming should start
+ * @csum_offset: Offset from csum_start where checksum should be stored
+ * @priority: Packet queueing priority
+ * @ignore_df: allow local fragmentation
+ * @cloned: Head may be cloned (check refcnt to be sure)
+ * @ip_summed: Driver fed us an IP checksum
+ * @nohdr: Payload reference only, must not modify header
+ * @nfctinfo: Relationship of this skb to the connection
+ * @pkt_type: Packet class
+ * @fclone: skbuff clone status
+ * @ipvs_property: skbuff is owned by ipvs
+ * @peeked: this packet has been seen already, so stats have been
+ * done for it, don't do them again
+ * @nf_trace: netfilter packet trace flag
+ * @protocol: Packet protocol from driver
+ * @destructor: Destruct function
+ * @nfct: Associated connection, if any
+ * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
+ * @skb_iif: ifindex of device we arrived on
+ * @tc_index: Traffic control index
+ * @tc_verd: traffic control verdict
+ * @hash: the packet hash
+ * @queue_mapping: Queue mapping for multiqueue devices
+ * @xmit_more: More SKBs are pending for this queue
+ * @ndisc_nodetype: router type (from link layer)
+ * @ooo_okay: allow the mapping of a socket to a queue to be changed
+ * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
+ * ports.
+ * @sw_hash: indicates hash was computed in software stack
+ * @wifi_acked_valid: wifi_acked was set
+ * @wifi_acked: whether frame was acked on wifi or not
+ * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
+ * @napi_id: id of the NAPI struct this skb came from
+ * @secmark: security marking
+ * @mark: Generic packet mark
+ * @vlan_proto: vlan encapsulation protocol
+ * @vlan_tci: vlan tag control information
+ * @inner_protocol: Protocol (encapsulation)
+ * @inner_transport_header: Inner transport layer header (encapsulation)
+ * @inner_network_header: Network layer header (encapsulation)
+ * @inner_mac_header: Link layer header (encapsulation)
+ * @transport_header: Transport layer header
+ * @network_header: Network layer header
+ * @mac_header: Link layer header
+ * @tail: Tail pointer
+ * @end: End pointer
+ * @head: Head of buffer
+ * @data: Data head pointer
+ * @truesize: Buffer size
+ * @users: User count - see {datagram,tcp}.c
+ */
+
+struct sk_buff {
+ union {
+ struct {
+ /* These two members must be first. */
+ struct sk_buff *next;
+ struct sk_buff *prev;
+
+ union {
+ ktime_t tstamp;
+ struct skb_mstamp skb_mstamp;
+ };
+ };
+ struct rb_node rbnode; /* used in netem & tcp stack */
+ };
+ struct sock *sk;
+ struct net_device *dev;
+
+ /*
+ * This is the control buffer. It is free to use for every
+ * layer. Please put your private variables there. If you
+ * want to keep them across layers you have to do a skb_clone()
+ * first. This is owned by whoever has the skb queued ATM.
+ */
+ char cb[48] __aligned(8);
+
+ unsigned long _skb_refdst;
+ void (*destructor)(struct sk_buff *skb);
+#ifdef CONFIG_XFRM
+ struct sec_path *sp;
+#endif
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ struct nf_conntrack *nfct;
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ struct nf_bridge_info *nf_bridge;
+#endif
+ unsigned int len,
+ data_len;
+ __u16 mac_len,
+ hdr_len;
+
+ /* Following fields are _not_ copied in __copy_skb_header()
+ * Note that queue_mapping is here mostly to fill a hole.
+ */
+ kmemcheck_bitfield_begin(flags1);
+ __u16 queue_mapping;
+ __u8 cloned:1,
+ nohdr:1,
+ fclone:2,
+ peeked:1,
+ head_frag:1,
+ xmit_more:1;
+ /* one bit hole */
+ kmemcheck_bitfield_end(flags1);
+
+ /* fields enclosed in headers_start/headers_end are copied
+ * using a single memcpy() in __copy_skb_header()
+ */
+ /* private: */
+ __u32 headers_start[0];
+ /* public: */
+
+/* if you move pkt_type around you also must adapt those constants */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define PKT_TYPE_MAX (7 << 5)
+#else
+#define PKT_TYPE_MAX 7
+#endif
+#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
+
+ __u8 __pkt_type_offset[0];
+ __u8 pkt_type:3;
+ __u8 pfmemalloc:1;
+ __u8 ignore_df:1;
+ __u8 nfctinfo:3;
+
+ __u8 nf_trace:1;
+ __u8 ip_summed:2;
+ __u8 ooo_okay:1;
+ __u8 l4_hash:1;
+ __u8 sw_hash:1;
+ __u8 wifi_acked_valid:1;
+ __u8 wifi_acked:1;
+
+ __u8 no_fcs:1;
+ /* Indicates the inner headers are valid in the skbuff. */
+ __u8 encapsulation:1;
+ __u8 encap_hdr_csum:1;
+ __u8 csum_valid:1;
+ __u8 csum_complete_sw:1;
+ __u8 csum_level:2;
+ __u8 csum_bad:1;
+
+#ifdef CONFIG_IPV6_NDISC_NODETYPE
+ __u8 ndisc_nodetype:2;
+#endif
+ __u8 ipvs_property:1;
+ __u8 inner_protocol_type:1;
+ __u8 remcsum_offload:1;
+ /* 3 or 5 bit hole */
+
+#ifdef CONFIG_NET_SCHED
+ __u16 tc_index; /* traffic control index */
+#ifdef CONFIG_NET_CLS_ACT
+ __u16 tc_verd; /* traffic control verdict */
+#endif
+#endif
+
+ union {
+ __wsum csum;
+ struct {
+ __u16 csum_start;
+ __u16 csum_offset;
+ };
+ };
+ __u32 priority;
+ int skb_iif;
+ __u32 hash;
+ __be16 vlan_proto;
+ __u16 vlan_tci;
+#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
+ union {
+ unsigned int napi_id;
+ unsigned int sender_cpu;
+ };
+#endif
+#ifdef CONFIG_NETWORK_SECMARK
+ __u32 secmark;
+#endif
+ union {
+ __u32 mark;
+ __u32 reserved_tailroom;
+ };
+
+ union {
+ __be16 inner_protocol;
+ __u8 inner_ipproto;
+ };
+
+ __u16 inner_transport_header;
+ __u16 inner_network_header;
+ __u16 inner_mac_header;
+
+ __be16 protocol;
+ __u16 transport_header;
+ __u16 network_header;
+ __u16 mac_header;
+
+ /* private: */
+ __u32 headers_end[0];
+ /* public: */
+
+ /* These elements must be at the end, see alloc_skb() for details. */
+ sk_buff_data_t tail;
+ sk_buff_data_t end;
+ unsigned char *head,
+ *data;
+ unsigned int truesize;
+ atomic_t users;
+};
+
+#ifdef __KERNEL__
+/*
+ * Handling routines are only of interest to the kernel
+ */
+#include <linux/slab.h>
+
+
+#define SKB_ALLOC_FCLONE 0x01
+#define SKB_ALLOC_RX 0x02
+#define SKB_ALLOC_NAPI 0x04
+
+/* Returns true if the skb was allocated from PFMEMALLOC reserves */
+static inline bool skb_pfmemalloc(const struct sk_buff *skb)
+{
+ return unlikely(skb->pfmemalloc);
+}
+
+/*
+ * skb might have a dst pointer attached, refcounted or not.
+ * _skb_refdst low order bit is set if refcount was _not_ taken
+ */
+#define SKB_DST_NOREF 1UL
+#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
+
+/**
+ * skb_dst - returns skb dst_entry
+ * @skb: buffer
+ *
+ * Returns skb dst_entry, regardless of reference taken or not.
+ */
+static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
+{
+ /* If refdst was not refcounted, check we still are in a
+ * rcu_read_lock section
+ */
+ WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
+ !rcu_read_lock_held() &&
+ !rcu_read_lock_bh_held());
+ return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
+}
+
+/**
+ * skb_dst_set - sets skb dst
+ * @skb: buffer
+ * @dst: dst entry
+ *
+ * Sets skb dst, assuming a reference was taken on dst and should
+ * be released by skb_dst_drop()
+ */
+static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
+{
+ skb->_skb_refdst = (unsigned long)dst;
+}
+
+/**
+ * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
+ * @skb: buffer
+ * @dst: dst entry
+ *
+ * Sets skb dst, assuming a reference was not taken on dst.
+ * If dst entry is cached, we do not take reference and dst_release
+ * will be avoided by refdst_drop. If dst entry is not cached, we take
+ * reference, so that last dst_release can destroy the dst immediately.
+ */
+static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
+{
+ WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
+ skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
+}
+
+/**
+ * skb_dst_is_noref - Test if skb dst isn't refcounted
+ * @skb: buffer
+ */
+static inline bool skb_dst_is_noref(const struct sk_buff *skb)
+{
+ return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
+}
+
+static inline struct rtable *skb_rtable(const struct sk_buff *skb)
+{
+ return (struct rtable *)skb_dst(skb);
+}
+
+void kfree_skb(struct sk_buff *skb);
+void kfree_skb_list(struct sk_buff *segs);
+void skb_tx_error(struct sk_buff *skb);
+void consume_skb(struct sk_buff *skb);
+void __kfree_skb(struct sk_buff *skb);
+extern struct kmem_cache *skbuff_head_cache;
+
+void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
+bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+ bool *fragstolen, int *delta_truesize);
+
+struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
+ int node);
+struct sk_buff *__build_skb(void *data, unsigned int frag_size);
+struct sk_buff *build_skb(void *data, unsigned int frag_size);
+static inline struct sk_buff *alloc_skb(unsigned int size,
+ gfp_t priority)
+{
+ return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
+}
+
+struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
+ unsigned long data_len,
+ int max_page_order,
+ int *errcode,
+ gfp_t gfp_mask);
+
+/* Layout of fast clones : [skb1][skb2][fclone_ref] */
+struct sk_buff_fclones {
+ struct sk_buff skb1;
+
+ struct sk_buff skb2;
+
+ atomic_t fclone_ref;
+};
+
+/**
+ * skb_fclone_busy - check if fclone is busy
+ * @skb: buffer
+ *
+ * Returns true is skb is a fast clone, and its clone is not freed.
+ * Some drivers call skb_orphan() in their ndo_start_xmit(),
+ * so we also check that this didnt happen.
+ */
+static inline bool skb_fclone_busy(const struct sock *sk,
+ const struct sk_buff *skb)
+{
+ const struct sk_buff_fclones *fclones;
+
+ fclones = container_of(skb, struct sk_buff_fclones, skb1);
+
+ return skb->fclone == SKB_FCLONE_ORIG &&
+ atomic_read(&fclones->fclone_ref) > 1 &&
+ fclones->skb2.sk == sk;
+}
+
+static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
+ gfp_t priority)
+{
+ return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
+}
+
+struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
+static inline struct sk_buff *alloc_skb_head(gfp_t priority)
+{
+ return __alloc_skb_head(priority, -1);
+}
+
+struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
+int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
+struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
+struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
+struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
+ gfp_t gfp_mask, bool fclone);
+static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
+ gfp_t gfp_mask)
+{
+ return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
+}
+
+int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
+struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
+ unsigned int headroom);
+struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
+ int newtailroom, gfp_t priority);
+int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
+ int offset, int len);
+int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
+ int len);
+int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
+int skb_pad(struct sk_buff *skb, int pad);
+#define dev_kfree_skb(a) consume_skb(a)
+
+int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
+ int getfrag(void *from, char *to, int offset,
+ int len, int odd, struct sk_buff *skb),
+ void *from, int length);
+
+struct skb_seq_state {
+ __u32 lower_offset;
+ __u32 upper_offset;
+ __u32 frag_idx;
+ __u32 stepped_offset;
+ struct sk_buff *root_skb;
+ struct sk_buff *cur_skb;
+ __u8 *frag_data;
+};
+
+void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
+ unsigned int to, struct skb_seq_state *st);
+unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
+ struct skb_seq_state *st);
+void skb_abort_seq_read(struct skb_seq_state *st);
+
+unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
+ unsigned int to, struct ts_config *config);
+
+/*
+ * Packet hash types specify the type of hash in skb_set_hash.
+ *
+ * Hash types refer to the protocol layer addresses which are used to
+ * construct a packet's hash. The hashes are used to differentiate or identify
+ * flows of the protocol layer for the hash type. Hash types are either
+ * layer-2 (L2), layer-3 (L3), or layer-4 (L4).
+ *
+ * Properties of hashes:
+ *
+ * 1) Two packets in different flows have different hash values
+ * 2) Two packets in the same flow should have the same hash value
+ *
+ * A hash at a higher layer is considered to be more specific. A driver should
+ * set the most specific hash possible.
+ *
+ * A driver cannot indicate a more specific hash than the layer at which a hash
+ * was computed. For instance an L3 hash cannot be set as an L4 hash.
+ *
+ * A driver may indicate a hash level which is less specific than the
+ * actual layer the hash was computed on. For instance, a hash computed
+ * at L4 may be considered an L3 hash. This should only be done if the
+ * driver can't unambiguously determine that the HW computed the hash at
+ * the higher layer. Note that the "should" in the second property above
+ * permits this.
+ */
+enum pkt_hash_types {
+ PKT_HASH_TYPE_NONE, /* Undefined type */
+ PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
+ PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */
+ PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
+};
+
+static inline void
+skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
+{
+ skb->l4_hash = (type == PKT_HASH_TYPE_L4);
+ skb->sw_hash = 0;
+ skb->hash = hash;
+}
+
+void __skb_get_hash(struct sk_buff *skb);
+static inline __u32 skb_get_hash(struct sk_buff *skb)
+{
+ if (!skb->l4_hash && !skb->sw_hash)
+ __skb_get_hash(skb);
+
+ return skb->hash;
+}
+
+static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
+{
+ return skb->hash;
+}
+
+static inline void skb_clear_hash(struct sk_buff *skb)
+{
+ skb->hash = 0;
+ skb->sw_hash = 0;
+ skb->l4_hash = 0;
+}
+
+static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
+{
+ if (!skb->l4_hash)
+ skb_clear_hash(skb);
+}
+
+static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
+{
+ to->hash = from->hash;
+ to->sw_hash = from->sw_hash;
+ to->l4_hash = from->l4_hash;
+};
+
+static inline void skb_sender_cpu_clear(struct sk_buff *skb)
+{
+#ifdef CONFIG_XPS
+ skb->sender_cpu = 0;
+#endif
+}
+
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
+{
+ return skb->head + skb->end;
+}
+
+static inline unsigned int skb_end_offset(const struct sk_buff *skb)
+{
+ return skb->end;
+}
+#else
+static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
+{
+ return skb->end;
+}
+
+static inline unsigned int skb_end_offset(const struct sk_buff *skb)
+{
+ return skb->end - skb->head;
+}
+#endif
+
+/* Internal */
+#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
+
+static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
+{
+ return &skb_shinfo(skb)->hwtstamps;
+}
+
+/**
+ * skb_queue_empty - check if a queue is empty
+ * @list: queue head
+ *
+ * Returns true if the queue is empty, false otherwise.
+ */
+static inline int skb_queue_empty(const struct sk_buff_head *list)
+{
+ return list->next == (const struct sk_buff *) list;
+}
+
+/**
+ * skb_queue_is_last - check if skb is the last entry in the queue
+ * @list: queue head
+ * @skb: buffer
+ *
+ * Returns true if @skb is the last buffer on the list.
+ */
+static inline bool skb_queue_is_last(const struct sk_buff_head *list,
+ const struct sk_buff *skb)
+{
+ return skb->next == (const struct sk_buff *) list;
+}
+
+/**
+ * skb_queue_is_first - check if skb is the first entry in the queue
+ * @list: queue head
+ * @skb: buffer
+ *
+ * Returns true if @skb is the first buffer on the list.
+ */
+static inline bool skb_queue_is_first(const struct sk_buff_head *list,
+ const struct sk_buff *skb)
+{
+ return skb->prev == (const struct sk_buff *) list;
+}
+
+/**
+ * skb_queue_next - return the next packet in the queue
+ * @list: queue head
+ * @skb: current buffer
+ *
+ * Return the next packet in @list after @skb. It is only valid to
+ * call this if skb_queue_is_last() evaluates to false.
+ */
+static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
+ const struct sk_buff *skb)
+{
+ /* This BUG_ON may seem severe, but if we just return then we
+ * are going to dereference garbage.
+ */
+ BUG_ON(skb_queue_is_last(list, skb));
+ return skb->next;
+}
+
+/**
+ * skb_queue_prev - return the prev packet in the queue
+ * @list: queue head
+ * @skb: current buffer
+ *
+ * Return the prev packet in @list before @skb. It is only valid to
+ * call this if skb_queue_is_first() evaluates to false.
+ */
+static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
+ const struct sk_buff *skb)
+{
+ /* This BUG_ON may seem severe, but if we just return then we
+ * are going to dereference garbage.
+ */
+ BUG_ON(skb_queue_is_first(list, skb));
+ return skb->prev;
+}
+
+/**
+ * skb_get - reference buffer
+ * @skb: buffer to reference
+ *
+ * Makes another reference to a socket buffer and returns a pointer
+ * to the buffer.
+ */
+static inline struct sk_buff *skb_get(struct sk_buff *skb)
+{
+ atomic_inc(&skb->users);
+ return skb;
+}
+
+/*
+ * If users == 1, we are the only owner and are can avoid redundant
+ * atomic change.
+ */
+
+/**
+ * skb_cloned - is the buffer a clone
+ * @skb: buffer to check
+ *
+ * Returns true if the buffer was generated with skb_clone() and is
+ * one of multiple shared copies of the buffer. Cloned buffers are
+ * shared data so must not be written to under normal circumstances.
+ */
+static inline int skb_cloned(const struct sk_buff *skb)
+{
+ return skb->cloned &&
+ (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
+}
+
+static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
+{
+ might_sleep_if(pri & __GFP_WAIT);
+
+ if (skb_cloned(skb))
+ return pskb_expand_head(skb, 0, 0, pri);
+
+ return 0;
+}
+
+/**
+ * skb_header_cloned - is the header a clone
+ * @skb: buffer to check
+ *
+ * Returns true if modifying the header part of the buffer requires
+ * the data to be copied.
+ */
+static inline int skb_header_cloned(const struct sk_buff *skb)
+{
+ int dataref;
+
+ if (!skb->cloned)
+ return 0;
+
+ dataref = atomic_read(&skb_shinfo(skb)->dataref);
+ dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
+ return dataref != 1;
+}
+
+/**
+ * skb_header_release - release reference to header
+ * @skb: buffer to operate on
+ *
+ * Drop a reference to the header part of the buffer. This is done
+ * by acquiring a payload reference. You must not read from the header
+ * part of skb->data after this.
+ * Note : Check if you can use __skb_header_release() instead.
+ */
+static inline void skb_header_release(struct sk_buff *skb)
+{
+ BUG_ON(skb->nohdr);
+ skb->nohdr = 1;
+ atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
+}
+
+/**
+ * __skb_header_release - release reference to header
+ * @skb: buffer to operate on
+ *
+ * Variant of skb_header_release() assuming skb is private to caller.
+ * We can avoid one atomic operation.
+ */
+static inline void __skb_header_release(struct sk_buff *skb)
+{
+ skb->nohdr = 1;
+ atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
+}
+
+
+/**
+ * skb_shared - is the buffer shared
+ * @skb: buffer to check
+ *
+ * Returns true if more than one person has a reference to this
+ * buffer.
+ */
+static inline int skb_shared(const struct sk_buff *skb)
+{
+ return atomic_read(&skb->users) != 1;
+}
+
+/**
+ * skb_share_check - check if buffer is shared and if so clone it
+ * @skb: buffer to check
+ * @pri: priority for memory allocation
+ *
+ * If the buffer is shared the buffer is cloned and the old copy
+ * drops a reference. A new clone with a single reference is returned.
+ * If the buffer is not shared the original buffer is returned. When
+ * being called from interrupt status or with spinlocks held pri must
+ * be GFP_ATOMIC.
+ *
+ * NULL is returned on a memory allocation failure.
+ */
+static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
+{
+ might_sleep_if(pri & __GFP_WAIT);
+ if (skb_shared(skb)) {
+ struct sk_buff *nskb = skb_clone(skb, pri);
+
+ if (likely(nskb))
+ consume_skb(skb);
+ else
+ kfree_skb(skb);
+ skb = nskb;
+ }
+ return skb;
+}
+
+/*
+ * Copy shared buffers into a new sk_buff. We effectively do COW on
+ * packets to handle cases where we have a local reader and forward
+ * and a couple of other messy ones. The normal one is tcpdumping
+ * a packet thats being forwarded.
+ */
+
+/**
+ * skb_unshare - make a copy of a shared buffer
+ * @skb: buffer to check
+ * @pri: priority for memory allocation
+ *
+ * If the socket buffer is a clone then this function creates a new
+ * copy of the data, drops a reference count on the old copy and returns
+ * the new copy with the reference count at 1. If the buffer is not a clone
+ * the original buffer is returned. When called with a spinlock held or
+ * from interrupt state @pri must be %GFP_ATOMIC
+ *
+ * %NULL is returned on a memory allocation failure.
+ */
+static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
+ gfp_t pri)
+{
+ might_sleep_if(pri & __GFP_WAIT);
+ if (skb_cloned(skb)) {
+ struct sk_buff *nskb = skb_copy(skb, pri);
+
+ /* Free our shared copy */
+ if (likely(nskb))
+ consume_skb(skb);
+ else
+ kfree_skb(skb);
+ skb = nskb;
+ }
+ return skb;
+}
+
+/**
+ * skb_peek - peek at the head of an &sk_buff_head
+ * @list_: list to peek at
+ *
+ * Peek an &sk_buff. Unlike most other operations you _MUST_
+ * be careful with this one. A peek leaves the buffer on the
+ * list and someone else may run off with it. You must hold
+ * the appropriate locks or have a private queue to do this.
+ *
+ * Returns %NULL for an empty list or a pointer to the head element.
+ * The reference count is not incremented and the reference is therefore
+ * volatile. Use with caution.
+ */
+static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
+{
+ struct sk_buff *skb = list_->next;
+
+ if (skb == (struct sk_buff *)list_)
+ skb = NULL;
+ return skb;
+}
+
+/**
+ * skb_peek_next - peek skb following the given one from a queue
+ * @skb: skb to start from
+ * @list_: list to peek at
+ *
+ * Returns %NULL when the end of the list is met or a pointer to the
+ * next element. The reference count is not incremented and the
+ * reference is therefore volatile. Use with caution.
+ */
+static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
+ const struct sk_buff_head *list_)
+{
+ struct sk_buff *next = skb->next;
+
+ if (next == (struct sk_buff *)list_)
+ next = NULL;
+ return next;
+}
+
+/**
+ * skb_peek_tail - peek at the tail of an &sk_buff_head
+ * @list_: list to peek at
+ *
+ * Peek an &sk_buff. Unlike most other operations you _MUST_
+ * be careful with this one. A peek leaves the buffer on the
+ * list and someone else may run off with it. You must hold
+ * the appropriate locks or have a private queue to do this.
+ *
+ * Returns %NULL for an empty list or a pointer to the tail element.
+ * The reference count is not incremented and the reference is therefore
+ * volatile. Use with caution.
+ */
+static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
+{
+ struct sk_buff *skb = list_->prev;
+
+ if (skb == (struct sk_buff *)list_)
+ skb = NULL;
+ return skb;
+
+}
+
+/**
+ * skb_queue_len - get queue length
+ * @list_: list to measure
+ *
+ * Return the length of an &sk_buff queue.
+ */
+static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
+{
+ return list_->qlen;
+}
+
+/**
+ * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
+ * @list: queue to initialize
+ *
+ * This initializes only the list and queue length aspects of
+ * an sk_buff_head object. This allows to initialize the list
+ * aspects of an sk_buff_head without reinitializing things like
+ * the spinlock. It can also be used for on-stack sk_buff_head
+ * objects where the spinlock is known to not be used.
+ */
+static inline void __skb_queue_head_init(struct sk_buff_head *list)
+{
+ list->prev = list->next = (struct sk_buff *)list;
+ list->qlen = 0;
+}
+
+/*
+ * This function creates a split out lock class for each invocation;
+ * this is needed for now since a whole lot of users of the skb-queue
+ * infrastructure in drivers have different locking usage (in hardirq)
+ * than the networking core (in softirq only). In the long run either the
+ * network layer or drivers should need annotation to consolidate the
+ * main types of usage into 3 classes.
+ */
+static inline void skb_queue_head_init(struct sk_buff_head *list)
+{
+ spin_lock_init(&list->lock);
+ __skb_queue_head_init(list);
+}
+
+static inline void skb_queue_head_init_class(struct sk_buff_head *list,
+ struct lock_class_key *class)
+{
+ skb_queue_head_init(list);
+ lockdep_set_class(&list->lock, class);
+}
+
+/*
+ * Insert an sk_buff on a list.
+ *
+ * The "__skb_xxxx()" functions are the non-atomic ones that
+ * can only be called with interrupts disabled.
+ */
+void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
+ struct sk_buff_head *list);
+static inline void __skb_insert(struct sk_buff *newsk,
+ struct sk_buff *prev, struct sk_buff *next,
+ struct sk_buff_head *list)
+{
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = prev->next = newsk;
+ list->qlen++;
+}
+
+static inline void __skb_queue_splice(const struct sk_buff_head *list,
+ struct sk_buff *prev,
+ struct sk_buff *next)
+{
+ struct sk_buff *first = list->next;
+ struct sk_buff *last = list->prev;
+
+ first->prev = prev;
+ prev->next = first;
+
+ last->next = next;
+ next->prev = last;
+}
+
+/**
+ * skb_queue_splice - join two skb lists, this is designed for stacks
+ * @list: the new list to add
+ * @head: the place to add it in the first list
+ */
+static inline void skb_queue_splice(const struct sk_buff_head *list,
+ struct sk_buff_head *head)
+{
+ if (!skb_queue_empty(list)) {
+ __skb_queue_splice(list, (struct sk_buff *) head, head->next);
+ head->qlen += list->qlen;
+ }
+}
+
+/**
+ * skb_queue_splice_init - join two skb lists and reinitialise the emptied list
+ * @list: the new list to add
+ * @head: the place to add it in the first list
+ *
+ * The list at @list is reinitialised
+ */
+static inline void skb_queue_splice_init(struct sk_buff_head *list,
+ struct sk_buff_head *head)
+{
+ if (!skb_queue_empty(list)) {
+ __skb_queue_splice(list, (struct sk_buff *) head, head->next);
+ head->qlen += list->qlen;
+ __skb_queue_head_init(list);
+ }
+}
+
+/**
+ * skb_queue_splice_tail - join two skb lists, each list being a queue
+ * @list: the new list to add
+ * @head: the place to add it in the first list
+ */
+static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
+ struct sk_buff_head *head)
+{
+ if (!skb_queue_empty(list)) {
+ __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
+ head->qlen += list->qlen;
+ }
+}
+
+/**
+ * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
+ * @list: the new list to add
+ * @head: the place to add it in the first list
+ *
+ * Each of the lists is a queue.
+ * The list at @list is reinitialised
+ */
+static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
+ struct sk_buff_head *head)
+{
+ if (!skb_queue_empty(list)) {
+ __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
+ head->qlen += list->qlen;
+ __skb_queue_head_init(list);
+ }
+}
+
+/**
+ * __skb_queue_after - queue a buffer at the list head
+ * @list: list to use
+ * @prev: place after this buffer
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer int the middle of a list. This function takes no locks
+ * and you must therefore hold required locks before calling it.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+static inline void __skb_queue_after(struct sk_buff_head *list,
+ struct sk_buff *prev,
+ struct sk_buff *newsk)
+{
+ __skb_insert(newsk, prev, prev->next, list);
+}
+
+void skb_append(struct sk_buff *old, struct sk_buff *newsk,
+ struct sk_buff_head *list);
+
+static inline void __skb_queue_before(struct sk_buff_head *list,
+ struct sk_buff *next,
+ struct sk_buff *newsk)
+{
+ __skb_insert(newsk, next->prev, next, list);
+}
+
+/**
+ * __skb_queue_head - queue a buffer at the list head
+ * @list: list to use
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer at the start of a list. This function takes no locks
+ * and you must therefore hold required locks before calling it.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
+static inline void __skb_queue_head(struct sk_buff_head *list,
+ struct sk_buff *newsk)
+{
+ __skb_queue_after(list, (struct sk_buff *)list, newsk);
+}
+
+/**
+ * __skb_queue_tail - queue a buffer at the list tail
+ * @list: list to use
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer at the end of a list. This function takes no locks
+ * and you must therefore hold required locks before calling it.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
+static inline void __skb_queue_tail(struct sk_buff_head *list,
+ struct sk_buff *newsk)
+{
+ __skb_queue_before(list, (struct sk_buff *)list, newsk);
+}
+
+/*
+ * remove sk_buff from list. _Must_ be called atomically, and with
+ * the list known..
+ */
+void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
+static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
+{
+ struct sk_buff *next, *prev;
+
+ list->qlen--;
+ next = skb->next;
+ prev = skb->prev;
+ skb->next = skb->prev = NULL;
+ next->prev = prev;
+ prev->next = next;
+}
+
+/**
+ * __skb_dequeue - remove from the head of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the head of the list. This function does not take any locks
+ * so must be used with appropriate locks held only. The head item is
+ * returned or %NULL if the list is empty.
+ */
+struct sk_buff *skb_dequeue(struct sk_buff_head *list);
+static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
+{
+ struct sk_buff *skb = skb_peek(list);
+ if (skb)
+ __skb_unlink(skb, list);
+ return skb;
+}
+
+/**
+ * __skb_dequeue_tail - remove from the tail of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the tail of the list. This function does not take any locks
+ * so must be used with appropriate locks held only. The tail item is
+ * returned or %NULL if the list is empty.
+ */
+struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
+static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
+{
+ struct sk_buff *skb = skb_peek_tail(list);
+ if (skb)
+ __skb_unlink(skb, list);
+ return skb;
+}
+
+
+static inline bool skb_is_nonlinear(const struct sk_buff *skb)
+{
+ return skb->data_len;
+}
+
+static inline unsigned int skb_headlen(const struct sk_buff *skb)
+{
+ return skb->len - skb->data_len;
+}
+
+static inline int skb_pagelen(const struct sk_buff *skb)
+{
+ int i, len = 0;
+
+ for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
+ len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ return len + skb_headlen(skb);
+}
+
+/**
+ * __skb_fill_page_desc - initialise a paged fragment in an skb
+ * @skb: buffer containing fragment to be initialised
+ * @i: paged fragment index to initialise
+ * @page: the page to use for this fragment
+ * @off: the offset to the data with @page
+ * @size: the length of the data
+ *
+ * Initialises the @i'th fragment of @skb to point to &size bytes at
+ * offset @off within @page.
+ *
+ * Does not take any additional reference on the fragment.
+ */
+static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
+ struct page *page, int off, int size)
+{
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ /*
+ * Propagate page->pfmemalloc to the skb if we can. The problem is
+ * that not all callers have unique ownership of the page. If
+ * pfmemalloc is set, we check the mapping as a mapping implies
+ * page->index is set (index and pfmemalloc share space).
+ * If it's a valid mapping, we cannot use page->pfmemalloc but we
+ * do not lose pfmemalloc information as the pages would not be
+ * allocated using __GFP_MEMALLOC.
+ */
+ frag->page.p = page;
+ frag->page_offset = off;
+ skb_frag_size_set(frag, size);
+
+ page = compound_head(page);
+ if (page->pfmemalloc && !page->mapping)
+ skb->pfmemalloc = true;
+}
+
+/**
+ * skb_fill_page_desc - initialise a paged fragment in an skb
+ * @skb: buffer containing fragment to be initialised
+ * @i: paged fragment index to initialise
+ * @page: the page to use for this fragment
+ * @off: the offset to the data with @page
+ * @size: the length of the data
+ *
+ * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
+ * @skb to point to @size bytes at offset @off within @page. In
+ * addition updates @skb such that @i is the last fragment.
+ *
+ * Does not take any additional reference on the fragment.
+ */
+static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
+ struct page *page, int off, int size)
+{
+ __skb_fill_page_desc(skb, i, page, off, size);
+ skb_shinfo(skb)->nr_frags = i + 1;
+}
+
+void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
+ int size, unsigned int truesize);
+
+void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
+ unsigned int truesize);
+
+#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
+#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
+#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
+
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
+{
+ return skb->head + skb->tail;
+}
+
+static inline void skb_reset_tail_pointer(struct sk_buff *skb)
+{
+ skb->tail = skb->data - skb->head;
+}
+
+static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
+{
+ skb_reset_tail_pointer(skb);
+ skb->tail += offset;
+}
+
+#else /* NET_SKBUFF_DATA_USES_OFFSET */
+static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
+{
+ return skb->tail;
+}
+
+static inline void skb_reset_tail_pointer(struct sk_buff *skb)
+{
+ skb->tail = skb->data;
+}
+
+static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
+{
+ skb->tail = skb->data + offset;
+}
+
+#endif /* NET_SKBUFF_DATA_USES_OFFSET */
+
+/*
+ * Add data to an sk_buff
+ */
+unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
+unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
+static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
+{
+ unsigned char *tmp = skb_tail_pointer(skb);
+ SKB_LINEAR_ASSERT(skb);
+ skb->tail += len;
+ skb->len += len;
+ return tmp;
+}
+
+unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
+static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
+{
+ skb->data -= len;
+ skb->len += len;
+ return skb->data;
+}
+
+unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
+static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
+{
+ skb->len -= len;
+ BUG_ON(skb->len < skb->data_len);
+ return skb->data += len;
+}
+
+static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
+{
+ return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
+}
+
+unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
+
+static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
+{
+ if (len > skb_headlen(skb) &&
+ !__pskb_pull_tail(skb, len - skb_headlen(skb)))
+ return NULL;
+ skb->len -= len;
+ return skb->data += len;
+}
+
+static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
+{
+ return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
+}
+
+static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
+{
+ if (likely(len <= skb_headlen(skb)))
+ return 1;
+ if (unlikely(len > skb->len))
+ return 0;
+ return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
+}
+
+/**
+ * skb_headroom - bytes at buffer head
+ * @skb: buffer to check
+ *
+ * Return the number of bytes of free space at the head of an &sk_buff.
+ */
+static inline unsigned int skb_headroom(const struct sk_buff *skb)
+{
+ return skb->data - skb->head;
+}
+
+/**
+ * skb_tailroom - bytes at buffer end
+ * @skb: buffer to check
+ *
+ * Return the number of bytes of free space at the tail of an sk_buff
+ */
+static inline int skb_tailroom(const struct sk_buff *skb)
+{
+ return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
+}
+
+/**
+ * skb_availroom - bytes at buffer end
+ * @skb: buffer to check
+ *
+ * Return the number of bytes of free space at the tail of an sk_buff
+ * allocated by sk_stream_alloc()
+ */
+static inline int skb_availroom(const struct sk_buff *skb)
+{
+ if (skb_is_nonlinear(skb))
+ return 0;
+
+ return skb->end - skb->tail - skb->reserved_tailroom;
+}
+
+/**
+ * skb_reserve - adjust headroom
+ * @skb: buffer to alter
+ * @len: bytes to move
+ *
+ * Increase the headroom of an empty &sk_buff by reducing the tail
+ * room. This is only allowed for an empty buffer.
+ */
+static inline void skb_reserve(struct sk_buff *skb, int len)
+{
+ skb->data += len;
+ skb->tail += len;
+}
+
+#define ENCAP_TYPE_ETHER 0
+#define ENCAP_TYPE_IPPROTO 1
+
+static inline void skb_set_inner_protocol(struct sk_buff *skb,
+ __be16 protocol)
+{
+ skb->inner_protocol = protocol;
+ skb->inner_protocol_type = ENCAP_TYPE_ETHER;
+}
+
+static inline void skb_set_inner_ipproto(struct sk_buff *skb,
+ __u8 ipproto)
+{
+ skb->inner_ipproto = ipproto;
+ skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
+}
+
+static inline void skb_reset_inner_headers(struct sk_buff *skb)
+{
+ skb->inner_mac_header = skb->mac_header;
+ skb->inner_network_header = skb->network_header;
+ skb->inner_transport_header = skb->transport_header;
+}
+
+static inline void skb_reset_mac_len(struct sk_buff *skb)
+{
+ skb->mac_len = skb->network_header - skb->mac_header;
+}
+
+static inline unsigned char *skb_inner_transport_header(const struct sk_buff
+ *skb)
+{
+ return skb->head + skb->inner_transport_header;
+}
+
+static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
+{
+ skb->inner_transport_header = skb->data - skb->head;
+}
+
+static inline void skb_set_inner_transport_header(struct sk_buff *skb,
+ const int offset)
+{
+ skb_reset_inner_transport_header(skb);
+ skb->inner_transport_header += offset;
+}
+
+static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
+{
+ return skb->head + skb->inner_network_header;
+}
+
+static inline void skb_reset_inner_network_header(struct sk_buff *skb)
+{
+ skb->inner_network_header = skb->data - skb->head;
+}
+
+static inline void skb_set_inner_network_header(struct sk_buff *skb,
+ const int offset)
+{
+ skb_reset_inner_network_header(skb);
+ skb->inner_network_header += offset;
+}
+
+static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
+{
+ return skb->head + skb->inner_mac_header;
+}
+
+static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
+{
+ skb->inner_mac_header = skb->data - skb->head;
+}
+
+static inline void skb_set_inner_mac_header(struct sk_buff *skb,
+ const int offset)
+{
+ skb_reset_inner_mac_header(skb);
+ skb->inner_mac_header += offset;
+}
+static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
+{
+ return skb->transport_header != (typeof(skb->transport_header))~0U;
+}
+
+static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
+{
+ return skb->head + skb->transport_header;
+}
+
+static inline void skb_reset_transport_header(struct sk_buff *skb)
+{
+ skb->transport_header = skb->data - skb->head;
+}
+
+static inline void skb_set_transport_header(struct sk_buff *skb,
+ const int offset)
+{
+ skb_reset_transport_header(skb);
+ skb->transport_header += offset;
+}
+
+static inline unsigned char *skb_network_header(const struct sk_buff *skb)
+{
+ return skb->head + skb->network_header;
+}
+
+static inline void skb_reset_network_header(struct sk_buff *skb)
+{
+ skb->network_header = skb->data - skb->head;
+}
+
+static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
+{
+ skb_reset_network_header(skb);
+ skb->network_header += offset;
+}
+
+static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
+{
+ return skb->head + skb->mac_header;
+}
+
+static inline int skb_mac_header_was_set(const struct sk_buff *skb)
+{
+ return skb->mac_header != (typeof(skb->mac_header))~0U;
+}
+
+static inline void skb_reset_mac_header(struct sk_buff *skb)
+{
+ skb->mac_header = skb->data - skb->head;
+}
+
+static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
+{
+ skb_reset_mac_header(skb);
+ skb->mac_header += offset;
+}
+
+static inline void skb_pop_mac_header(struct sk_buff *skb)
+{
+ skb->mac_header = skb->network_header;
+}
+
+static inline void skb_probe_transport_header(struct sk_buff *skb,
+ const int offset_hint)
+{
+ struct flow_keys keys;
+
+ if (skb_transport_header_was_set(skb))
+ return;
+ else if (skb_flow_dissect(skb, &keys))
+ skb_set_transport_header(skb, keys.thoff);
+ else
+ skb_set_transport_header(skb, offset_hint);
+}
+
+static inline void skb_mac_header_rebuild(struct sk_buff *skb)
+{
+ if (skb_mac_header_was_set(skb)) {
+ const unsigned char *old_mac = skb_mac_header(skb);
+
+ skb_set_mac_header(skb, -skb->mac_len);
+ memmove(skb_mac_header(skb), old_mac, skb->mac_len);
+ }
+}
+
+static inline int skb_checksum_start_offset(const struct sk_buff *skb)
+{
+ return skb->csum_start - skb_headroom(skb);
+}
+
+static inline int skb_transport_offset(const struct sk_buff *skb)
+{
+ return skb_transport_header(skb) - skb->data;
+}
+
+static inline u32 skb_network_header_len(const struct sk_buff *skb)
+{
+ return skb->transport_header - skb->network_header;
+}
+
+static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
+{
+ return skb->inner_transport_header - skb->inner_network_header;
+}
+
+static inline int skb_network_offset(const struct sk_buff *skb)
+{
+ return skb_network_header(skb) - skb->data;
+}
+
+static inline int skb_inner_network_offset(const struct sk_buff *skb)
+{
+ return skb_inner_network_header(skb) - skb->data;
+}
+
+static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
+{
+ return pskb_may_pull(skb, skb_network_offset(skb) + len);
+}
+
+/*
+ * CPUs often take a performance hit when accessing unaligned memory
+ * locations. The actual performance hit varies, it can be small if the
+ * hardware handles it or large if we have to take an exception and fix it
+ * in software.
+ *
+ * Since an ethernet header is 14 bytes network drivers often end up with
+ * the IP header at an unaligned offset. The IP header can be aligned by
+ * shifting the start of the packet by 2 bytes. Drivers should do this
+ * with:
+ *
+ * skb_reserve(skb, NET_IP_ALIGN);
+ *
+ * The downside to this alignment of the IP header is that the DMA is now
+ * unaligned. On some architectures the cost of an unaligned DMA is high
+ * and this cost outweighs the gains made by aligning the IP header.
+ *
+ * Since this trade off varies between architectures, we allow NET_IP_ALIGN
+ * to be overridden.
+ */
+#ifndef NET_IP_ALIGN
+#define NET_IP_ALIGN 2
+#endif
+
+/*
+ * The networking layer reserves some headroom in skb data (via
+ * dev_alloc_skb). This is used to avoid having to reallocate skb data when
+ * the header has to grow. In the default case, if the header has to grow
+ * 32 bytes or less we avoid the reallocation.
+ *
+ * Unfortunately this headroom changes the DMA alignment of the resulting
+ * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
+ * on some architectures. An architecture can override this value,
+ * perhaps setting it to a cacheline in size (since that will maintain
+ * cacheline alignment of the DMA). It must be a power of 2.
+ *
+ * Various parts of the networking layer expect at least 32 bytes of
+ * headroom, you should not reduce this.
+ *
+ * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
+ * to reduce average number of cache lines per packet.
+ * get_rps_cpus() for example only access one 64 bytes aligned block :
+ * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
+ */
+#ifndef NET_SKB_PAD
+#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
+#endif
+
+int ___pskb_trim(struct sk_buff *skb, unsigned int len);
+
+static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
+{
+ if (unlikely(skb_is_nonlinear(skb))) {
+ WARN_ON(1);
+ return;
+ }
+ skb->len = len;
+ skb_set_tail_pointer(skb, len);
+}
+
+void skb_trim(struct sk_buff *skb, unsigned int len);
+
+static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
+{
+ if (skb->data_len)
+ return ___pskb_trim(skb, len);
+ __skb_trim(skb, len);
+ return 0;
+}
+
+static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
+{
+ return (len < skb->len) ? __pskb_trim(skb, len) : 0;
+}
+
+/**
+ * pskb_trim_unique - remove end from a paged unique (not cloned) buffer
+ * @skb: buffer to alter
+ * @len: new length
+ *
+ * This is identical to pskb_trim except that the caller knows that
+ * the skb is not cloned so we should never get an error due to out-
+ * of-memory.
+ */
+static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
+{
+ int err = pskb_trim(skb, len);
+ BUG_ON(err);
+}
+
+/**
+ * skb_orphan - orphan a buffer
+ * @skb: buffer to orphan
+ *
+ * If a buffer currently has an owner then we call the owner's
+ * destructor function and make the @skb unowned. The buffer continues
+ * to exist but is no longer charged to its former owner.
+ */
+static inline void skb_orphan(struct sk_buff *skb)
+{
+ if (skb->destructor) {
+ skb->destructor(skb);
+ skb->destructor = NULL;
+ skb->sk = NULL;
+ } else {
+ BUG_ON(skb->sk);
+ }
+}
+
+/**
+ * skb_orphan_frags - orphan the frags contained in a buffer
+ * @skb: buffer to orphan frags from
+ * @gfp_mask: allocation mask for replacement pages
+ *
+ * For each frag in the SKB which needs a destructor (i.e. has an
+ * owner) create a copy of that frag and release the original
+ * page by calling the destructor.
+ */
+static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
+{
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
+ return 0;
+ return skb_copy_ubufs(skb, gfp_mask);
+}
+
+/**
+ * __skb_queue_purge - empty a list
+ * @list: list to empty
+ *
+ * Delete all buffers on an &sk_buff list. Each buffer is removed from
+ * the list and one reference dropped. This function does not take the
+ * list lock and the caller must hold the relevant locks to use it.
+ */
+void skb_queue_purge(struct sk_buff_head *list);
+static inline void __skb_queue_purge(struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+ while ((skb = __skb_dequeue(list)) != NULL)
+ kfree_skb(skb);
+}
+
+#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
+#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
+#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE
+
+void *netdev_alloc_frag(unsigned int fragsz);
+
+struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
+ gfp_t gfp_mask);
+
+/**
+ * netdev_alloc_skb - allocate an skbuff for rx on a specific device
+ * @dev: network device to receive on
+ * @length: length to allocate
+ *
+ * Allocate a new &sk_buff and assign it a usage count of one. The
+ * buffer has unspecified headroom built in. Users should allocate
+ * the headroom they think they need without accounting for the
+ * built in space. The built in space is used for optimisations.
+ *
+ * %NULL is returned if there is no free memory. Although this function
+ * allocates memory it can be called from an interrupt.
+ */
+static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
+ unsigned int length)
+{
+ return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
+}
+
+/* legacy helper around __netdev_alloc_skb() */
+static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
+ gfp_t gfp_mask)
+{
+ return __netdev_alloc_skb(NULL, length, gfp_mask);
+}
+
+/* legacy helper around netdev_alloc_skb() */
+static inline struct sk_buff *dev_alloc_skb(unsigned int length)
+{
+ return netdev_alloc_skb(NULL, length);
+}
+
+
+static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
+ unsigned int length, gfp_t gfp)
+{
+ struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
+
+ if (NET_IP_ALIGN && skb)
+ skb_reserve(skb, NET_IP_ALIGN);
+ return skb;
+}
+
+static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
+ unsigned int length)
+{
+ return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
+}
+
+void *napi_alloc_frag(unsigned int fragsz);
+struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
+ unsigned int length, gfp_t gfp_mask);
+static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
+ unsigned int length)
+{
+ return __napi_alloc_skb(napi, length, GFP_ATOMIC);
+}
+
+/**
+ * __dev_alloc_pages - allocate page for network Rx
+ * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
+ * @order: size of the allocation
+ *
+ * Allocate a new page.
+ *
+ * %NULL is returned if there is no free memory.
+*/
+static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
+ unsigned int order)
+{
+ /* This piece of code contains several assumptions.
+ * 1. This is for device Rx, therefor a cold page is preferred.
+ * 2. The expectation is the user wants a compound page.
+ * 3. If requesting a order 0 page it will not be compound
+ * due to the check to see if order has a value in prep_new_page
+ * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
+ * code in gfp_to_alloc_flags that should be enforcing this.
+ */
+ gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;
+
+ return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
+}
+
+static inline struct page *dev_alloc_pages(unsigned int order)
+{
+ return __dev_alloc_pages(GFP_ATOMIC, order);
+}
+
+/**
+ * __dev_alloc_page - allocate a page for network Rx
+ * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
+ *
+ * Allocate a new page.
+ *
+ * %NULL is returned if there is no free memory.
+ */
+static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
+{
+ return __dev_alloc_pages(gfp_mask, 0);
+}
+
+static inline struct page *dev_alloc_page(void)
+{
+ return __dev_alloc_page(GFP_ATOMIC);
+}
+
+/**
+ * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
+ * @page: The page that was allocated from skb_alloc_page
+ * @skb: The skb that may need pfmemalloc set
+ */
+static inline void skb_propagate_pfmemalloc(struct page *page,
+ struct sk_buff *skb)
+{
+ if (page && page->pfmemalloc)
+ skb->pfmemalloc = true;
+}
+
+/**
+ * skb_frag_page - retrieve the page referred to by a paged fragment
+ * @frag: the paged fragment
+ *
+ * Returns the &struct page associated with @frag.
+ */
+static inline struct page *skb_frag_page(const skb_frag_t *frag)
+{
+ return frag->page.p;
+}
+
+/**
+ * __skb_frag_ref - take an addition reference on a paged fragment.
+ * @frag: the paged fragment
+ *
+ * Takes an additional reference on the paged fragment @frag.
+ */
+static inline void __skb_frag_ref(skb_frag_t *frag)
+{
+ get_page(skb_frag_page(frag));
+}
+
+/**
+ * skb_frag_ref - take an addition reference on a paged fragment of an skb.
+ * @skb: the buffer
+ * @f: the fragment offset.
+ *
+ * Takes an additional reference on the @f'th paged fragment of @skb.
+ */
+static inline void skb_frag_ref(struct sk_buff *skb, int f)
+{
+ __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
+}
+
+/**
+ * __skb_frag_unref - release a reference on a paged fragment.
+ * @frag: the paged fragment
+ *
+ * Releases a reference on the paged fragment @frag.
+ */
+static inline void __skb_frag_unref(skb_frag_t *frag)
+{
+ put_page(skb_frag_page(frag));
+}
+
+/**
+ * skb_frag_unref - release a reference on a paged fragment of an skb.
+ * @skb: the buffer
+ * @f: the fragment offset
+ *
+ * Releases a reference on the @f'th paged fragment of @skb.
+ */
+static inline void skb_frag_unref(struct sk_buff *skb, int f)
+{
+ __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
+}
+
+/**
+ * skb_frag_address - gets the address of the data contained in a paged fragment
+ * @frag: the paged fragment buffer
+ *
+ * Returns the address of the data within @frag. The page must already
+ * be mapped.
+ */
+static inline void *skb_frag_address(const skb_frag_t *frag)
+{
+ return page_address(skb_frag_page(frag)) + frag->page_offset;
+}
+
+/**
+ * skb_frag_address_safe - gets the address of the data contained in a paged fragment
+ * @frag: the paged fragment buffer
+ *
+ * Returns the address of the data within @frag. Checks that the page
+ * is mapped and returns %NULL otherwise.
+ */
+static inline void *skb_frag_address_safe(const skb_frag_t *frag)
+{
+ void *ptr = page_address(skb_frag_page(frag));
+ if (unlikely(!ptr))
+ return NULL;
+
+ return ptr + frag->page_offset;
+}
+
+/**
+ * __skb_frag_set_page - sets the page contained in a paged fragment
+ * @frag: the paged fragment
+ * @page: the page to set
+ *
+ * Sets the fragment @frag to contain @page.
+ */
+static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
+{
+ frag->page.p = page;
+}
+
+/**
+ * skb_frag_set_page - sets the page contained in a paged fragment of an skb
+ * @skb: the buffer
+ * @f: the fragment offset
+ * @page: the page to set
+ *
+ * Sets the @f'th fragment of @skb to contain @page.
+ */
+static inline void skb_frag_set_page(struct sk_buff *skb, int f,
+ struct page *page)
+{
+ __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
+}
+
+bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
+
+/**
+ * skb_frag_dma_map - maps a paged fragment via the DMA API
+ * @dev: the device to map the fragment to
+ * @frag: the paged fragment to map
+ * @offset: the offset within the fragment (starting at the
+ * fragment's own offset)
+ * @size: the number of bytes to map
+ * @dir: the direction of the mapping (%PCI_DMA_*)
+ *
+ * Maps the page associated with @frag to @device.
+ */
+static inline dma_addr_t skb_frag_dma_map(struct device *dev,
+ const skb_frag_t *frag,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+ return dma_map_page(dev, skb_frag_page(frag),
+ frag->page_offset + offset, size, dir);
+}
+
+static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
+ gfp_t gfp_mask)
+{
+ return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
+}
+
+
+static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
+ gfp_t gfp_mask)
+{
+ return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
+}
+
+
+/**
+ * skb_clone_writable - is the header of a clone writable
+ * @skb: buffer to check
+ * @len: length up to which to write
+ *
+ * Returns true if modifying the header part of the cloned buffer
+ * does not requires the data to be copied.
+ */
+static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
+{
+ return !skb_header_cloned(skb) &&
+ skb_headroom(skb) + len <= skb->hdr_len;
+}
+
+static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
+ int cloned)
+{
+ int delta = 0;
+
+ if (headroom > skb_headroom(skb))
+ delta = headroom - skb_headroom(skb);
+
+ if (delta || cloned)
+ return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
+ GFP_ATOMIC);
+ return 0;
+}
+
+/**
+ * skb_cow - copy header of skb when it is required
+ * @skb: buffer to cow
+ * @headroom: needed headroom
+ *
+ * If the skb passed lacks sufficient headroom or its data part
+ * is shared, data is reallocated. If reallocation fails, an error
+ * is returned and original skb is not changed.
+ *
+ * The result is skb with writable area skb->head...skb->tail
+ * and at least @headroom of space at head.
+ */
+static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
+{
+ return __skb_cow(skb, headroom, skb_cloned(skb));
+}
+
+/**
+ * skb_cow_head - skb_cow but only making the head writable
+ * @skb: buffer to cow
+ * @headroom: needed headroom
+ *
+ * This function is identical to skb_cow except that we replace the
+ * skb_cloned check by skb_header_cloned. It should be used when
+ * you only need to push on some header and do not need to modify
+ * the data.
+ */
+static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
+{
+ return __skb_cow(skb, headroom, skb_header_cloned(skb));
+}
+
+/**
+ * skb_padto - pad an skbuff up to a minimal size
+ * @skb: buffer to pad
+ * @len: minimal length
+ *
+ * Pads up a buffer to ensure the trailing bytes exist and are
+ * blanked. If the buffer already contains sufficient data it
+ * is untouched. Otherwise it is extended. Returns zero on
+ * success. The skb is freed on error.
+ */
+static inline int skb_padto(struct sk_buff *skb, unsigned int len)
+{
+ unsigned int size = skb->len;
+ if (likely(size >= len))
+ return 0;
+ return skb_pad(skb, len - size);
+}
+
+/**
+ * skb_put_padto - increase size and pad an skbuff up to a minimal size
+ * @skb: buffer to pad
+ * @len: minimal length
+ *
+ * Pads up a buffer to ensure the trailing bytes exist and are
+ * blanked. If the buffer already contains sufficient data it
+ * is untouched. Otherwise it is extended. Returns zero on
+ * success. The skb is freed on error.
+ */
+static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
+{
+ unsigned int size = skb->len;
+
+ if (unlikely(size < len)) {
+ len -= size;
+ if (skb_pad(skb, len))
+ return -ENOMEM;
+ __skb_put(skb, len);
+ }
+ return 0;
+}
+
+static inline int skb_add_data(struct sk_buff *skb,
+ struct iov_iter *from, int copy)
+{
+ const int off = skb->len;
+
+ if (skb->ip_summed == CHECKSUM_NONE) {
+ __wsum csum = 0;
+ if (csum_and_copy_from_iter(skb_put(skb, copy), copy,
+ &csum, from) == copy) {
+ skb->csum = csum_block_add(skb->csum, csum, off);
+ return 0;
+ }
+ } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy)
+ return 0;
+
+ __skb_trim(skb, off);
+ return -EFAULT;
+}
+
+static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
+ const struct page *page, int off)
+{
+ if (i) {
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
+
+ return page == skb_frag_page(frag) &&
+ off == frag->page_offset + skb_frag_size(frag);
+ }
+ return false;
+}
+
+static inline int __skb_linearize(struct sk_buff *skb)
+{
+ return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
+}
+
+/**
+ * skb_linearize - convert paged skb to linear one
+ * @skb: buffer to linarize
+ *
+ * If there is no free memory -ENOMEM is returned, otherwise zero
+ * is returned and the old skb data released.
+ */
+static inline int skb_linearize(struct sk_buff *skb)
+{
+ return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
+}
+
+/**
+ * skb_has_shared_frag - can any frag be overwritten
+ * @skb: buffer to test
+ *
+ * Return true if the skb has at least one frag that might be modified
+ * by an external entity (as in vmsplice()/sendfile())
+ */
+static inline bool skb_has_shared_frag(const struct sk_buff *skb)
+{
+ return skb_is_nonlinear(skb) &&
+ skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
+}
+
+/**
+ * skb_linearize_cow - make sure skb is linear and writable
+ * @skb: buffer to process
+ *
+ * If there is no free memory -ENOMEM is returned, otherwise zero
+ * is returned and the old skb data released.
+ */
+static inline int skb_linearize_cow(struct sk_buff *skb)
+{
+ return skb_is_nonlinear(skb) || skb_cloned(skb) ?
+ __skb_linearize(skb) : 0;
+}
+
+/**
+ * skb_postpull_rcsum - update checksum for received skb after pull
+ * @skb: buffer to update
+ * @start: start of data before pull
+ * @len: length of data pulled
+ *
+ * After doing a pull on a received packet, you need to call this to
+ * update the CHECKSUM_COMPLETE checksum, or set ip_summed to
+ * CHECKSUM_NONE so that it can be recomputed from scratch.
+ */
+
+static inline void skb_postpull_rcsum(struct sk_buff *skb,
+ const void *start, unsigned int len)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
+}
+
+unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
+
+/**
+ * pskb_trim_rcsum - trim received skb and update checksum
+ * @skb: buffer to trim
+ * @len: new length
+ *
+ * This is exactly the same as pskb_trim except that it ensures the
+ * checksum of received packets are still valid after the operation.
+ */
+
+static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
+{
+ if (likely(len >= skb->len))
+ return 0;
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->ip_summed = CHECKSUM_NONE;
+ return __pskb_trim(skb, len);
+}
+
+#define skb_queue_walk(queue, skb) \
+ for (skb = (queue)->next; \
+ skb != (struct sk_buff *)(queue); \
+ skb = skb->next)
+
+#define skb_queue_walk_safe(queue, skb, tmp) \
+ for (skb = (queue)->next, tmp = skb->next; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->next)
+
+#define skb_queue_walk_from(queue, skb) \
+ for (; skb != (struct sk_buff *)(queue); \
+ skb = skb->next)
+
+#define skb_queue_walk_from_safe(queue, skb, tmp) \
+ for (tmp = skb->next; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->next)
+
+#define skb_queue_reverse_walk(queue, skb) \
+ for (skb = (queue)->prev; \
+ skb != (struct sk_buff *)(queue); \
+ skb = skb->prev)
+
+#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
+ for (skb = (queue)->prev, tmp = skb->prev; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->prev)
+
+#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
+ for (tmp = skb->prev; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->prev)
+
+static inline bool skb_has_frag_list(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->frag_list != NULL;
+}
+
+static inline void skb_frag_list_init(struct sk_buff *skb)
+{
+ skb_shinfo(skb)->frag_list = NULL;
+}
+
+static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
+{
+ frag->next = skb_shinfo(skb)->frag_list;
+ skb_shinfo(skb)->frag_list = frag;
+}
+
+#define skb_walk_frags(skb, iter) \
+ for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
+
+struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+ int *peeked, int *off, int *err);
+struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
+ int *err);
+unsigned int datagram_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
+int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
+ struct iov_iter *to, int size);
+static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
+ struct msghdr *msg, int size)
+{
+ return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
+}
+int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
+ struct msghdr *msg);
+int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
+ struct iov_iter *from, int len);
+int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
+void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
+void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
+int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
+int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
+int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
+__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
+ int len, __wsum csum);
+int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+ struct pipe_inode_info *pipe, unsigned int len,
+ unsigned int flags);
+void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
+unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
+int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
+ int len, int hlen);
+void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
+int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
+void skb_scrub_packet(struct sk_buff *skb, bool xnet);
+unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
+struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
+struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
+int skb_ensure_writable(struct sk_buff *skb, int write_len);
+int skb_vlan_pop(struct sk_buff *skb);
+int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
+
+static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
+{
+ return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
+}
+
+static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
+{
+ return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
+}
+
+struct skb_checksum_ops {
+ __wsum (*update)(const void *mem, int len, __wsum wsum);
+ __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
+};
+
+__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
+ __wsum csum, const struct skb_checksum_ops *ops);
+__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
+ __wsum csum);
+
+static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
+ int len, void *data, int hlen, void *buffer)
+{
+ if (hlen - offset >= len)
+ return data + offset;
+
+ if (!skb ||
+ skb_copy_bits(skb, offset, buffer, len) < 0)
+ return NULL;
+
+ return buffer;
+}
+
+static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
+ int len, void *buffer)
+{
+ return __skb_header_pointer(skb, offset, len, skb->data,
+ skb_headlen(skb), buffer);
+}
+
+/**
+ * skb_needs_linearize - check if we need to linearize a given skb
+ * depending on the given device features.
+ * @skb: socket buffer to check
+ * @features: net device features
+ *
+ * Returns true if either:
+ * 1. skb has frag_list and the device doesn't support FRAGLIST, or
+ * 2. skb is fragmented and the device does not support SG.
+ */
+static inline bool skb_needs_linearize(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ return skb_is_nonlinear(skb) &&
+ ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
+ (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
+}
+
+static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
+ void *to,
+ const unsigned int len)
+{
+ memcpy(to, skb->data, len);
+}
+
+static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
+ const int offset, void *to,
+ const unsigned int len)
+{
+ memcpy(to, skb->data + offset, len);
+}
+
+static inline void skb_copy_to_linear_data(struct sk_buff *skb,
+ const void *from,
+ const unsigned int len)
+{
+ memcpy(skb->data, from, len);
+}
+
+static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
+ const int offset,
+ const void *from,
+ const unsigned int len)
+{
+ memcpy(skb->data + offset, from, len);
+}
+
+void skb_init(void);
+
+static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
+{
+ return skb->tstamp;
+}
+
+/**
+ * skb_get_timestamp - get timestamp from a skb
+ * @skb: skb to get stamp from
+ * @stamp: pointer to struct timeval to store stamp in
+ *
+ * Timestamps are stored in the skb as offsets to a base timestamp.
+ * This function converts the offset back to a struct timeval and stores
+ * it in stamp.
+ */
+static inline void skb_get_timestamp(const struct sk_buff *skb,
+ struct timeval *stamp)
+{
+ *stamp = ktime_to_timeval(skb->tstamp);
+}
+
+static inline void skb_get_timestampns(const struct sk_buff *skb,
+ struct timespec *stamp)
+{
+ *stamp = ktime_to_timespec(skb->tstamp);
+}
+
+static inline void __net_timestamp(struct sk_buff *skb)
+{
+ skb->tstamp = ktime_get_real();
+}
+
+static inline ktime_t net_timedelta(ktime_t t)
+{
+ return ktime_sub(ktime_get_real(), t);
+}
+
+static inline ktime_t net_invalid_timestamp(void)
+{
+ return ktime_set(0, 0);
+}
+
+struct sk_buff *skb_clone_sk(struct sk_buff *skb);
+
+#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
+
+void skb_clone_tx_timestamp(struct sk_buff *skb);
+bool skb_defer_rx_timestamp(struct sk_buff *skb);
+
+#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
+
+static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
+{
+}
+
+static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
+{
+ return false;
+}
+
+#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
+
+/**
+ * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
+ *
+ * PHY drivers may accept clones of transmitted packets for
+ * timestamping via their phy_driver.txtstamp method. These drivers
+ * must call this function to return the skb back to the stack, with
+ * or without a timestamp.
+ *
+ * @skb: clone of the the original outgoing packet
+ * @hwtstamps: hardware time stamps, may be NULL if not available
+ *
+ */
+void skb_complete_tx_timestamp(struct sk_buff *skb,
+ struct skb_shared_hwtstamps *hwtstamps);
+
+void __skb_tstamp_tx(struct sk_buff *orig_skb,
+ struct skb_shared_hwtstamps *hwtstamps,
+ struct sock *sk, int tstype);
+
+/**
+ * skb_tstamp_tx - queue clone of skb with send time stamps
+ * @orig_skb: the original outgoing packet
+ * @hwtstamps: hardware time stamps, may be NULL if not available
+ *
+ * If the skb has a socket associated, then this function clones the
+ * skb (thus sharing the actual data and optional structures), stores
+ * the optional hardware time stamping information (if non NULL) or
+ * generates a software time stamp (otherwise), then queues the clone
+ * to the error queue of the socket. Errors are silently ignored.
+ */
+void skb_tstamp_tx(struct sk_buff *orig_skb,
+ struct skb_shared_hwtstamps *hwtstamps);
+
+static inline void sw_tx_timestamp(struct sk_buff *skb)
+{
+ if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
+ !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ skb_tstamp_tx(skb, NULL);
+}
+
+/**
+ * skb_tx_timestamp() - Driver hook for transmit timestamping
+ *
+ * Ethernet MAC Drivers should call this function in their hard_xmit()
+ * function immediately before giving the sk_buff to the MAC hardware.
+ *
+ * Specifically, one should make absolutely sure that this function is
+ * called before TX completion of this packet can trigger. Otherwise
+ * the packet could potentially already be freed.
+ *
+ * @skb: A socket buffer.
+ */
+static inline void skb_tx_timestamp(struct sk_buff *skb)
+{
+ skb_clone_tx_timestamp(skb);
+ sw_tx_timestamp(skb);
+}
+
+/**
+ * skb_complete_wifi_ack - deliver skb with wifi status
+ *
+ * @skb: the original outgoing packet
+ * @acked: ack status
+ *
+ */
+void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
+
+__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
+__sum16 __skb_checksum_complete(struct sk_buff *skb);
+
+static inline int skb_csum_unnecessary(const struct sk_buff *skb)
+{
+ return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
+ skb->csum_valid ||
+ (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_start_offset(skb) >= 0));
+}
+
+/**
+ * skb_checksum_complete - Calculate checksum of an entire packet
+ * @skb: packet to process
+ *
+ * This function calculates the checksum over the entire packet plus
+ * the value of skb->csum. The latter can be used to supply the
+ * checksum of a pseudo header as used by TCP/UDP. It returns the
+ * checksum.
+ *
+ * For protocols that contain complete checksums such as ICMP/TCP/UDP,
+ * this function can be used to verify that checksum on received
+ * packets. In that case the function should return zero if the
+ * checksum is correct. In particular, this function will return zero
+ * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
+ * hardware has already verified the correctness of the checksum.
+ */
+static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
+{
+ return skb_csum_unnecessary(skb) ?
+ 0 : __skb_checksum_complete(skb);
+}
+
+static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (skb->csum_level == 0)
+ skb->ip_summed = CHECKSUM_NONE;
+ else
+ skb->csum_level--;
+ }
+}
+
+static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
+ skb->csum_level++;
+ } else if (skb->ip_summed == CHECKSUM_NONE) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = 0;
+ }
+}
+
+static inline void __skb_mark_checksum_bad(struct sk_buff *skb)
+{
+ /* Mark current checksum as bad (typically called from GRO
+ * path). In the case that ip_summed is CHECKSUM_NONE
+ * this must be the first checksum encountered in the packet.
+ * When ip_summed is CHECKSUM_UNNECESSARY, this is the first
+ * checksum after the last one validated. For UDP, a zero
+ * checksum can not be marked as bad.
+ */
+
+ if (skb->ip_summed == CHECKSUM_NONE ||
+ skb->ip_summed == CHECKSUM_UNNECESSARY)
+ skb->csum_bad = 1;
+}
+
+/* Check if we need to perform checksum complete validation.
+ *
+ * Returns true if checksum complete is needed, false otherwise
+ * (either checksum is unnecessary or zero checksum is allowed).
+ */
+static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
+ bool zero_okay,
+ __sum16 check)
+{
+ if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
+ skb->csum_valid = 1;
+ __skb_decr_checksum_unnecessary(skb);
+ return false;
+ }
+
+ return true;
+}
+
+/* For small packets <= CHECKSUM_BREAK peform checksum complete directly
+ * in checksum_init.
+ */
+#define CHECKSUM_BREAK 76
+
+/* Unset checksum-complete
+ *
+ * Unset checksum complete can be done when packet is being modified
+ * (uncompressed for instance) and checksum-complete value is
+ * invalidated.
+ */
+static inline void skb_checksum_complete_unset(struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
+/* Validate (init) checksum based on checksum complete.
+ *
+ * Return values:
+ * 0: checksum is validated or try to in skb_checksum_complete. In the latter
+ * case the ip_summed will not be CHECKSUM_UNNECESSARY and the pseudo
+ * checksum is stored in skb->csum for use in __skb_checksum_complete
+ * non-zero: value of invalid checksum
+ *
+ */
+static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
+ bool complete,
+ __wsum psum)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ if (!csum_fold(csum_add(psum, skb->csum))) {
+ skb->csum_valid = 1;
+ return 0;
+ }
+ } else if (skb->csum_bad) {
+ /* ip_summed == CHECKSUM_NONE in this case */
+ return 1;
+ }
+
+ skb->csum = psum;
+
+ if (complete || skb->len <= CHECKSUM_BREAK) {
+ __sum16 csum;
+
+ csum = __skb_checksum_complete(skb);
+ skb->csum_valid = !csum;
+ return csum;
+ }
+
+ return 0;
+}
+
+static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
+{
+ return 0;
+}
+
+/* Perform checksum validate (init). Note that this is a macro since we only
+ * want to calculate the pseudo header which is an input function if necessary.
+ * First we try to validate without any computation (checksum unnecessary) and
+ * then calculate based on checksum complete calling the function to compute
+ * pseudo header.
+ *
+ * Return values:
+ * 0: checksum is validated or try to in skb_checksum_complete
+ * non-zero: value of invalid checksum
+ */
+#define __skb_checksum_validate(skb, proto, complete, \
+ zero_okay, check, compute_pseudo) \
+({ \
+ __sum16 __ret = 0; \
+ skb->csum_valid = 0; \
+ if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
+ __ret = __skb_checksum_validate_complete(skb, \
+ complete, compute_pseudo(skb, proto)); \
+ __ret; \
+})
+
+#define skb_checksum_init(skb, proto, compute_pseudo) \
+ __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
+
+#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
+ __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
+
+#define skb_checksum_validate(skb, proto, compute_pseudo) \
+ __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
+
+#define skb_checksum_validate_zero_check(skb, proto, check, \
+ compute_pseudo) \
+ __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
+
+#define skb_checksum_simple_validate(skb) \
+ __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
+
+static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
+{
+ return (skb->ip_summed == CHECKSUM_NONE &&
+ skb->csum_valid && !skb->csum_bad);
+}
+
+static inline void __skb_checksum_convert(struct sk_buff *skb,
+ __sum16 check, __wsum pseudo)
+{
+ skb->csum = ~pseudo;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+}
+
+#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
+do { \
+ if (__skb_checksum_convert_check(skb)) \
+ __skb_checksum_convert(skb, check, \
+ compute_pseudo(skb, proto)); \
+} while (0)
+
+static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
+ u16 start, u16 offset)
+{
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
+ skb->csum_offset = offset - start;
+}
+
+/* Update skbuf and packet to reflect the remote checksum offload operation.
+ * When called, ptr indicates the starting point for skb->csum when
+ * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete
+ * here, skb_postpull_rcsum is done so skb->csum start is ptr.
+ */
+static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
+ int start, int offset, bool nopartial)
+{
+ __wsum delta;
+
+ if (!nopartial) {
+ skb_remcsum_adjust_partial(skb, ptr, start, offset);
+ return;
+ }
+
+ if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
+ __skb_checksum_complete(skb);
+ skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
+ }
+
+ delta = remcsum_adjust(ptr, skb->csum, start, offset);
+
+ /* Adjust skb->csum since we changed the packet */
+ skb->csum = csum_add(skb->csum, delta);
+}
+
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+void nf_conntrack_destroy(struct nf_conntrack *nfct);
+static inline void nf_conntrack_put(struct nf_conntrack *nfct)
+{
+ if (nfct && atomic_dec_and_test(&nfct->use))
+ nf_conntrack_destroy(nfct);
+}
+static inline void nf_conntrack_get(struct nf_conntrack *nfct)
+{
+ if (nfct)
+ atomic_inc(&nfct->use);
+}
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
+{
+ if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
+ kfree(nf_bridge);
+}
+static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
+{
+ if (nf_bridge)
+ atomic_inc(&nf_bridge->use);
+}
+#endif /* CONFIG_BRIDGE_NETFILTER */
+static inline void nf_reset(struct sk_buff *skb)
+{
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ nf_conntrack_put(skb->nfct);
+ skb->nfct = NULL;
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ nf_bridge_put(skb->nf_bridge);
+ skb->nf_bridge = NULL;
+#endif
+}
+
+static inline void nf_reset_trace(struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
+ skb->nf_trace = 0;
+#endif
+}
+
+/* Note: This doesn't put any conntrack and bridge info in dst. */
+static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
+ bool copy)
+{
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ dst->nfct = src->nfct;
+ nf_conntrack_get(src->nfct);
+ if (copy)
+ dst->nfctinfo = src->nfctinfo;
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ dst->nf_bridge = src->nf_bridge;
+ nf_bridge_get(src->nf_bridge);
+#endif
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
+ if (copy)
+ dst->nf_trace = src->nf_trace;
+#endif
+}
+
+static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
+{
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ nf_conntrack_put(dst->nfct);
+#endif
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ nf_bridge_put(dst->nf_bridge);
+#endif
+ __nf_copy(dst, src, true);
+}
+
+#ifdef CONFIG_NETWORK_SECMARK
+static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
+{
+ to->secmark = from->secmark;
+}
+
+static inline void skb_init_secmark(struct sk_buff *skb)
+{
+ skb->secmark = 0;
+}
+#else
+static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
+{ }
+
+static inline void skb_init_secmark(struct sk_buff *skb)
+{ }
+#endif
+
+static inline bool skb_irq_freeable(const struct sk_buff *skb)
+{
+ return !skb->destructor &&
+#if IS_ENABLED(CONFIG_XFRM)
+ !skb->sp &&
+#endif
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ !skb->nfct &&
+#endif
+ !skb->_skb_refdst &&
+ !skb_has_frag_list(skb);
+}
+
+static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
+{
+ skb->queue_mapping = queue_mapping;
+}
+
+static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
+{
+ return skb->queue_mapping;
+}
+
+static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
+{
+ to->queue_mapping = from->queue_mapping;
+}
+
+static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
+{
+ skb->queue_mapping = rx_queue + 1;
+}
+
+static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
+{
+ return skb->queue_mapping - 1;
+}
+
+static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
+{
+ return skb->queue_mapping != 0;
+}
+
+u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
+ unsigned int num_tx_queues);
+
+static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
+{
+#ifdef CONFIG_XFRM
+ return skb->sp;
+#else
+ return NULL;
+#endif
+}
+
+/* Keeps track of mac header offset relative to skb->head.
+ * It is useful for TSO of Tunneling protocol. e.g. GRE.
+ * For non-tunnel skb it points to skb_mac_header() and for
+ * tunnel skb it points to outer mac header.
+ * Keeps track of level of encapsulation of network headers.
+ */
+struct skb_gso_cb {
+ int mac_offset;
+ int encap_level;
+ __u16 csum_start;
+};
+#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
+
+static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
+{
+ return (skb_mac_header(inner_skb) - inner_skb->head) -
+ SKB_GSO_CB(inner_skb)->mac_offset;
+}
+
+static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
+{
+ int new_headroom, headroom;
+ int ret;
+
+ headroom = skb_headroom(skb);
+ ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
+ if (ret)
+ return ret;
+
+ new_headroom = skb_headroom(skb);
+ SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
+ return 0;
+}
+
+/* Compute the checksum for a gso segment. First compute the checksum value
+ * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and
+ * then add in skb->csum (checksum from csum_start to end of packet).
+ * skb->csum and csum_start are then updated to reflect the checksum of the
+ * resultant packet starting from the transport header-- the resultant checksum
+ * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo
+ * header.
+ */
+static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
+{
+ int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) -
+ skb_transport_offset(skb);
+ __u16 csum;
+
+ csum = csum_fold(csum_partial(skb_transport_header(skb),
+ plen, skb->csum));
+ skb->csum = res;
+ SKB_GSO_CB(skb)->csum_start -= plen;
+
+ return csum;
+}
+
+static inline bool skb_is_gso(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_size;
+}
+
+/* Note: Should be called only if skb_is_gso(skb) is true */
+static inline bool skb_is_gso_v6(const struct sk_buff *skb)
+{
+ return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
+}
+
+void __skb_warn_lro_forwarding(const struct sk_buff *skb);
+
+static inline bool skb_warn_if_lro(const struct sk_buff *skb)
+{
+ /* LRO sets gso_size but not gso_type, whereas if GSO is really
+ * wanted then gso_type will be set. */
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
+ unlikely(shinfo->gso_type == 0)) {
+ __skb_warn_lro_forwarding(skb);
+ return true;
+ }
+ return false;
+}
+
+static inline void skb_forward_csum(struct sk_buff *skb)
+{
+ /* Unfortunately we don't support this one. Any brave souls? */
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
+/**
+ * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
+ * @skb: skb to check
+ *
+ * fresh skbs have their ip_summed set to CHECKSUM_NONE.
+ * Instead of forcing ip_summed to CHECKSUM_NONE, we can
+ * use this helper, to document places where we make this assertion.
+ */
+static inline void skb_checksum_none_assert(const struct sk_buff *skb)
+{
+#ifdef DEBUG
+ BUG_ON(skb->ip_summed != CHECKSUM_NONE);
+#endif
+}
+
+bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
+
+int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
+
+u32 skb_get_poff(const struct sk_buff *skb);
+u32 __skb_get_poff(const struct sk_buff *skb, void *data,
+ const struct flow_keys *keys, int hlen);
+
+/**
+ * skb_head_is_locked - Determine if the skb->head is locked down
+ * @skb: skb to check
+ *
+ * The head on skbs build around a head frag can be removed if they are
+ * not cloned. This function returns true if the skb head is locked down
+ * due to either being allocated via kmalloc, or by being a clone with
+ * multiple references to the head.
+ */
+static inline bool skb_head_is_locked(const struct sk_buff *skb)
+{
+ return !skb->head_frag || skb_cloned(skb);
+}
+
+/**
+ * skb_gso_network_seglen - Return length of individual segments of a gso packet
+ *
+ * @skb: GSO skb
+ *
+ * skb_gso_network_seglen is used to determine the real size of the
+ * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
+ *
+ * The MAC/L2 header is not accounted for.
+ */
+static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
+{
+ unsigned int hdr_len = skb_transport_header(skb) -
+ skb_network_header(skb);
+ return hdr_len + skb_gso_transport_seglen(skb);
+}
+#endif /* __KERNEL__ */
+#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
new file mode 100644
index 000000000..ffd24c830
--- /dev/null
+++ b/include/linux/slab.h
@@ -0,0 +1,600 @@
+/*
+ * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
+ *
+ * (C) SGI 2006, Christoph Lameter
+ * Cleaned up and restructured to ease the addition of alternative
+ * implementations of SLAB allocators.
+ * (C) Linux Foundation 2008-2013
+ * Unified interface for all slab allocators
+ */
+
+#ifndef _LINUX_SLAB_H
+#define _LINUX_SLAB_H
+
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+
+/*
+ * Flags to pass to kmem_cache_create().
+ * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
+ */
+#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
+#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
+#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
+#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
+#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
+#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
+#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
+/*
+ * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
+ *
+ * This delays freeing the SLAB page by a grace period, it does _NOT_
+ * delay object freeing. This means that if you do kmem_cache_free()
+ * that memory location is free to be reused at any time. Thus it may
+ * be possible to see another object there in the same RCU grace period.
+ *
+ * This feature only ensures the memory location backing the object
+ * stays valid, the trick to using this is relying on an independent
+ * object validation pass. Something like:
+ *
+ * rcu_read_lock()
+ * again:
+ * obj = lockless_lookup(key);
+ * if (obj) {
+ * if (!try_get_ref(obj)) // might fail for free objects
+ * goto again;
+ *
+ * if (obj->key != key) { // not the object we expected
+ * put_ref(obj);
+ * goto again;
+ * }
+ * }
+ * rcu_read_unlock();
+ *
+ * This is useful if we need to approach a kernel structure obliquely,
+ * from its address obtained without the usual locking. We can lock
+ * the structure to stabilize it and check it's still at the given address,
+ * only if we can be sure that the memory has not been meanwhile reused
+ * for some other kind of object (which our subsystem's lock might corrupt).
+ *
+ * rcu_read_lock before reading the address, then rcu_read_unlock after
+ * taking the spinlock within the structure expected at that address.
+ */
+#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
+#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
+#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
+
+/* Flag to prevent checks on free */
+#ifdef CONFIG_DEBUG_OBJECTS
+# define SLAB_DEBUG_OBJECTS 0x00400000UL
+#else
+# define SLAB_DEBUG_OBJECTS 0x00000000UL
+#endif
+
+#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
+
+/* Don't track use of uninitialized memory */
+#ifdef CONFIG_KMEMCHECK
+# define SLAB_NOTRACK 0x01000000UL
+#else
+# define SLAB_NOTRACK 0x00000000UL
+#endif
+#ifdef CONFIG_FAILSLAB
+# define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
+#else
+# define SLAB_FAILSLAB 0x00000000UL
+#endif
+
+/* The following flags affect the page allocator grouping pages by mobility */
+#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
+#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
+/*
+ * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
+ *
+ * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
+ *
+ * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
+ * Both make kfree a no-op.
+ */
+#define ZERO_SIZE_PTR ((void *)16)
+
+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
+ (unsigned long)ZERO_SIZE_PTR)
+
+#include <linux/kmemleak.h>
+#include <linux/kasan.h>
+
+struct mem_cgroup;
+/*
+ * struct kmem_cache related prototypes
+ */
+void __init kmem_cache_init(void);
+int slab_is_available(void);
+
+struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
+ unsigned long,
+ void (*)(void *));
+void kmem_cache_destroy(struct kmem_cache *);
+int kmem_cache_shrink(struct kmem_cache *);
+
+void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
+void memcg_deactivate_kmem_caches(struct mem_cgroup *);
+void memcg_destroy_kmem_caches(struct mem_cgroup *);
+
+/*
+ * Please use this macro to create slab caches. Simply specify the
+ * name of the structure and maybe some flags that are listed above.
+ *
+ * The alignment of the struct determines object alignment. If you
+ * f.e. add ____cacheline_aligned_in_smp to the struct declaration
+ * then the objects will be properly aligned in SMP configurations.
+ */
+#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
+ sizeof(struct __struct), __alignof__(struct __struct),\
+ (__flags), NULL)
+
+/*
+ * Common kmalloc functions provided by all allocators
+ */
+void * __must_check __krealloc(const void *, size_t, gfp_t);
+void * __must_check krealloc(const void *, size_t, gfp_t);
+void kfree(const void *);
+void kzfree(const void *);
+size_t ksize(const void *);
+
+/*
+ * Some archs want to perform DMA into kmalloc caches and need a guaranteed
+ * alignment larger than the alignment of a 64-bit integer.
+ * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
+ */
+#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
+#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
+#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
+#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
+#else
+#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
+#endif
+
+/*
+ * Kmalloc array related definitions
+ */
+
+#ifdef CONFIG_SLAB
+/*
+ * The largest kmalloc size supported by the SLAB allocators is
+ * 32 megabyte (2^25) or the maximum allocatable page order if that is
+ * less than 32 MB.
+ *
+ * WARNING: Its not easy to increase this value since the allocators have
+ * to do various tricks to work around compiler limitations in order to
+ * ensure proper constant folding.
+ */
+#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
+ (MAX_ORDER + PAGE_SHIFT - 1) : 25)
+#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
+#ifndef KMALLOC_SHIFT_LOW
+#define KMALLOC_SHIFT_LOW 5
+#endif
+#endif
+
+#ifdef CONFIG_SLUB
+/*
+ * SLUB directly allocates requests fitting in to an order-1 page
+ * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
+ */
+#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
+#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
+#ifndef KMALLOC_SHIFT_LOW
+#define KMALLOC_SHIFT_LOW 3
+#endif
+#endif
+
+#ifdef CONFIG_SLOB
+/*
+ * SLOB passes all requests larger than one page to the page allocator.
+ * No kmalloc array is necessary since objects of different sizes can
+ * be allocated from the same page.
+ */
+#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
+#define KMALLOC_SHIFT_MAX 30
+#ifndef KMALLOC_SHIFT_LOW
+#define KMALLOC_SHIFT_LOW 3
+#endif
+#endif
+
+/* Maximum allocatable size */
+#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
+/* Maximum size for which we actually use a slab cache */
+#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
+/* Maximum order allocatable via the slab allocagtor */
+#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
+
+/*
+ * Kmalloc subsystem.
+ */
+#ifndef KMALLOC_MIN_SIZE
+#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
+#endif
+
+/*
+ * This restriction comes from byte sized index implementation.
+ * Page size is normally 2^12 bytes and, in this case, if we want to use
+ * byte sized index which can represent 2^8 entries, the size of the object
+ * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
+ * If minimum size of kmalloc is less than 16, we use it as minimum object
+ * size and give up to use byte sized index.
+ */
+#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
+ (KMALLOC_MIN_SIZE) : 16)
+
+#ifndef CONFIG_SLOB
+extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
+#ifdef CONFIG_ZONE_DMA
+extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
+#endif
+
+/*
+ * Figure out which kmalloc slab an allocation of a certain size
+ * belongs to.
+ * 0 = zero alloc
+ * 1 = 65 .. 96 bytes
+ * 2 = 120 .. 192 bytes
+ * n = 2^(n-1) .. 2^n -1
+ */
+static __always_inline int kmalloc_index(size_t size)
+{
+ if (!size)
+ return 0;
+
+ if (size <= KMALLOC_MIN_SIZE)
+ return KMALLOC_SHIFT_LOW;
+
+ if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
+ return 1;
+ if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
+ return 2;
+ if (size <= 8) return 3;
+ if (size <= 16) return 4;
+ if (size <= 32) return 5;
+ if (size <= 64) return 6;
+ if (size <= 128) return 7;
+ if (size <= 256) return 8;
+ if (size <= 512) return 9;
+ if (size <= 1024) return 10;
+ if (size <= 2 * 1024) return 11;
+ if (size <= 4 * 1024) return 12;
+ if (size <= 8 * 1024) return 13;
+ if (size <= 16 * 1024) return 14;
+ if (size <= 32 * 1024) return 15;
+ if (size <= 64 * 1024) return 16;
+ if (size <= 128 * 1024) return 17;
+ if (size <= 256 * 1024) return 18;
+ if (size <= 512 * 1024) return 19;
+ if (size <= 1024 * 1024) return 20;
+ if (size <= 2 * 1024 * 1024) return 21;
+ if (size <= 4 * 1024 * 1024) return 22;
+ if (size <= 8 * 1024 * 1024) return 23;
+ if (size <= 16 * 1024 * 1024) return 24;
+ if (size <= 32 * 1024 * 1024) return 25;
+ if (size <= 64 * 1024 * 1024) return 26;
+ BUG();
+
+ /* Will never be reached. Needed because the compiler may complain */
+ return -1;
+}
+#endif /* !CONFIG_SLOB */
+
+void *__kmalloc(size_t size, gfp_t flags);
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
+void kmem_cache_free(struct kmem_cache *, void *);
+
+#ifdef CONFIG_NUMA
+void *__kmalloc_node(size_t size, gfp_t flags, int node);
+void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+#else
+static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ return __kmalloc(size, flags);
+}
+
+static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
+{
+ return kmem_cache_alloc(s, flags);
+}
+#endif
+
+#ifdef CONFIG_TRACING
+extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
+
+#ifdef CONFIG_NUMA
+extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node, size_t size);
+#else
+static __always_inline void *
+kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node, size_t size)
+{
+ return kmem_cache_alloc_trace(s, gfpflags, size);
+}
+#endif /* CONFIG_NUMA */
+
+#else /* CONFIG_TRACING */
+static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
+ gfp_t flags, size_t size)
+{
+ void *ret = kmem_cache_alloc(s, flags);
+
+ kasan_kmalloc(s, ret, size);
+ return ret;
+}
+
+static __always_inline void *
+kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node, size_t size)
+{
+ void *ret = kmem_cache_alloc_node(s, gfpflags, node);
+
+ kasan_kmalloc(s, ret, size);
+ return ret;
+}
+#endif /* CONFIG_TRACING */
+
+extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
+
+#ifdef CONFIG_TRACING
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
+#else
+static __always_inline void *
+kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+{
+ return kmalloc_order(size, flags, order);
+}
+#endif
+
+static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+{
+ unsigned int order = get_order(size);
+ return kmalloc_order_trace(size, flags, order);
+}
+
+/**
+ * kmalloc - allocate memory
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate.
+ *
+ * kmalloc is the normal method of allocating memory
+ * for objects smaller than page size in the kernel.
+ *
+ * The @flags argument may be one of:
+ *
+ * %GFP_USER - Allocate memory on behalf of user. May sleep.
+ *
+ * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
+ *
+ * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
+ * For example, use this inside interrupt handlers.
+ *
+ * %GFP_HIGHUSER - Allocate pages from high memory.
+ *
+ * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
+ *
+ * %GFP_NOFS - Do not make any fs calls while trying to get memory.
+ *
+ * %GFP_NOWAIT - Allocation will not sleep.
+ *
+ * %__GFP_THISNODE - Allocate node-local memory only.
+ *
+ * %GFP_DMA - Allocation suitable for DMA.
+ * Should only be used for kmalloc() caches. Otherwise, use a
+ * slab created with SLAB_DMA.
+ *
+ * Also it is possible to set different flags by OR'ing
+ * in one or more of the following additional @flags:
+ *
+ * %__GFP_COLD - Request cache-cold pages instead of
+ * trying to return cache-warm pages.
+ *
+ * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
+ *
+ * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
+ * (think twice before using).
+ *
+ * %__GFP_NORETRY - If memory is not immediately available,
+ * then give up at once.
+ *
+ * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
+ *
+ * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
+ *
+ * There are other flags available as well, but these are not intended
+ * for general use, and so are not documented here. For a full list of
+ * potential flags, always refer to linux/gfp.h.
+ */
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
+{
+ if (__builtin_constant_p(size)) {
+ if (size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large(size, flags);
+#ifndef CONFIG_SLOB
+ if (!(flags & GFP_DMA)) {
+ int index = kmalloc_index(size);
+
+ if (!index)
+ return ZERO_SIZE_PTR;
+
+ return kmem_cache_alloc_trace(kmalloc_caches[index],
+ flags, size);
+ }
+#endif
+ }
+ return __kmalloc(size, flags);
+}
+
+/*
+ * Determine size used for the nth kmalloc cache.
+ * return size or 0 if a kmalloc cache for that
+ * size does not exist
+ */
+static __always_inline int kmalloc_size(int n)
+{
+#ifndef CONFIG_SLOB
+ if (n > 2)
+ return 1 << n;
+
+ if (n == 1 && KMALLOC_MIN_SIZE <= 32)
+ return 96;
+
+ if (n == 2 && KMALLOC_MIN_SIZE <= 64)
+ return 192;
+#endif
+ return 0;
+}
+
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+#ifndef CONFIG_SLOB
+ if (__builtin_constant_p(size) &&
+ size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
+ int i = kmalloc_index(size);
+
+ if (!i)
+ return ZERO_SIZE_PTR;
+
+ return kmem_cache_alloc_node_trace(kmalloc_caches[i],
+ flags, node, size);
+ }
+#endif
+ return __kmalloc_node(size, flags, node);
+}
+
+/*
+ * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
+ * Intended for arches that get misalignment faults even for 64 bit integer
+ * aligned buffers.
+ */
+#ifndef ARCH_SLAB_MINALIGN
+#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+#endif
+
+struct memcg_cache_array {
+ struct rcu_head rcu;
+ struct kmem_cache *entries[0];
+};
+
+/*
+ * This is the main placeholder for memcg-related information in kmem caches.
+ * Both the root cache and the child caches will have it. For the root cache,
+ * this will hold a dynamically allocated array large enough to hold
+ * information about the currently limited memcgs in the system. To allow the
+ * array to be accessed without taking any locks, on relocation we free the old
+ * version only after a grace period.
+ *
+ * Child caches will hold extra metadata needed for its operation. Fields are:
+ *
+ * @memcg: pointer to the memcg this cache belongs to
+ * @root_cache: pointer to the global, root cache, this cache was derived from
+ *
+ * Both root and child caches of the same kind are linked into a list chained
+ * through @list.
+ */
+struct memcg_cache_params {
+ bool is_root_cache;
+ struct list_head list;
+ union {
+ struct memcg_cache_array __rcu *memcg_caches;
+ struct {
+ struct mem_cgroup *memcg;
+ struct kmem_cache *root_cache;
+ };
+ };
+};
+
+int memcg_update_all_caches(int num_memcgs);
+
+/**
+ * kmalloc_array - allocate memory for an array.
+ * @n: number of elements.
+ * @size: element size.
+ * @flags: the type of memory to allocate (see kmalloc).
+ */
+static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+{
+ if (size != 0 && n > SIZE_MAX / size)
+ return NULL;
+ return __kmalloc(n * size, flags);
+}
+
+/**
+ * kcalloc - allocate memory for an array. The memory is set to zero.
+ * @n: number of elements.
+ * @size: element size.
+ * @flags: the type of memory to allocate (see kmalloc).
+ */
+static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
+{
+ return kmalloc_array(n, size, flags | __GFP_ZERO);
+}
+
+/*
+ * kmalloc_track_caller is a special version of kmalloc that records the
+ * calling function of the routine calling it for slab leak tracking instead
+ * of just the calling function (confusing, eh?).
+ * It's useful when the call to kmalloc comes from a widely-used standard
+ * allocator where we care about the real place the memory allocation
+ * request comes from.
+ */
+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
+#define kmalloc_track_caller(size, flags) \
+ __kmalloc_track_caller(size, flags, _RET_IP_)
+
+#ifdef CONFIG_NUMA
+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
+#define kmalloc_node_track_caller(size, flags, node) \
+ __kmalloc_node_track_caller(size, flags, node, \
+ _RET_IP_)
+
+#else /* CONFIG_NUMA */
+
+#define kmalloc_node_track_caller(size, flags, node) \
+ kmalloc_track_caller(size, flags)
+
+#endif /* CONFIG_NUMA */
+
+/*
+ * Shortcuts
+ */
+static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
+{
+ return kmem_cache_alloc(k, flags | __GFP_ZERO);
+}
+
+/**
+ * kzalloc - allocate memory. The memory is set to zero.
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate (see kmalloc).
+ */
+static inline void *kzalloc(size_t size, gfp_t flags)
+{
+ return kmalloc(size, flags | __GFP_ZERO);
+}
+
+/**
+ * kzalloc_node - allocate zeroed memory from a particular memory node.
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate (see kmalloc).
+ * @node: memory node from which to allocate
+ */
+static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
+{
+ return kmalloc_node(size, flags | __GFP_ZERO, node);
+}
+
+unsigned int kmem_cache_size(struct kmem_cache *s);
+void __init kmem_cache_init_late(void);
+
+#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
new file mode 100644
index 000000000..33d049066
--- /dev/null
+++ b/include/linux/slab_def.h
@@ -0,0 +1,79 @@
+#ifndef _LINUX_SLAB_DEF_H
+#define _LINUX_SLAB_DEF_H
+
+#include <linux/reciprocal_div.h>
+
+/*
+ * Definitions unique to the original Linux SLAB allocator.
+ */
+
+struct kmem_cache {
+ struct array_cache __percpu *cpu_cache;
+
+/* 1) Cache tunables. Protected by slab_mutex */
+ unsigned int batchcount;
+ unsigned int limit;
+ unsigned int shared;
+
+ unsigned int size;
+ struct reciprocal_value reciprocal_buffer_size;
+/* 2) touched by every alloc & free from the backend */
+
+ unsigned int flags; /* constant flags */
+ unsigned int num; /* # of objs per slab */
+
+/* 3) cache_grow/shrink */
+ /* order of pgs per slab (2^n) */
+ unsigned int gfporder;
+
+ /* force GFP flags, e.g. GFP_DMA */
+ gfp_t allocflags;
+
+ size_t colour; /* cache colouring range */
+ unsigned int colour_off; /* colour offset */
+ struct kmem_cache *freelist_cache;
+ unsigned int freelist_size;
+
+ /* constructor func */
+ void (*ctor)(void *obj);
+
+/* 4) cache creation/removal */
+ const char *name;
+ struct list_head list;
+ int refcount;
+ int object_size;
+ int align;
+
+/* 5) statistics */
+#ifdef CONFIG_DEBUG_SLAB
+ unsigned long num_active;
+ unsigned long num_allocations;
+ unsigned long high_mark;
+ unsigned long grown;
+ unsigned long reaped;
+ unsigned long errors;
+ unsigned long max_freeable;
+ unsigned long node_allocs;
+ unsigned long node_frees;
+ unsigned long node_overflow;
+ atomic_t allochit;
+ atomic_t allocmiss;
+ atomic_t freehit;
+ atomic_t freemiss;
+
+ /*
+ * If debugging is enabled, then the allocator can add additional
+ * fields and/or padding to every object. size contains the total
+ * object size including these internal fields, the following two
+ * variables contain the offset to the user object and its size.
+ */
+ int obj_offset;
+#endif /* CONFIG_DEBUG_SLAB */
+#ifdef CONFIG_MEMCG_KMEM
+ struct memcg_cache_params memcg_params;
+#endif
+
+ struct kmem_cache_node *node[MAX_NUMNODES];
+};
+
+#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
new file mode 100644
index 000000000..338851185
--- /dev/null
+++ b/include/linux/slub_def.h
@@ -0,0 +1,132 @@
+#ifndef _LINUX_SLUB_DEF_H
+#define _LINUX_SLUB_DEF_H
+
+/*
+ * SLUB : A Slab allocator without object queues.
+ *
+ * (C) 2007 SGI, Christoph Lameter
+ */
+#include <linux/kobject.h>
+
+enum stat_item {
+ ALLOC_FASTPATH, /* Allocation from cpu slab */
+ ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
+ FREE_FASTPATH, /* Free to cpu slab */
+ FREE_SLOWPATH, /* Freeing not to cpu slab */
+ FREE_FROZEN, /* Freeing to frozen slab */
+ FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
+ FREE_REMOVE_PARTIAL, /* Freeing removes last object */
+ ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
+ ALLOC_SLAB, /* Cpu slab acquired from page allocator */
+ ALLOC_REFILL, /* Refill cpu slab from slab freelist */
+ ALLOC_NODE_MISMATCH, /* Switching cpu slab */
+ FREE_SLAB, /* Slab freed to the page allocator */
+ CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
+ DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
+ DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
+ DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
+ DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
+ DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
+ DEACTIVATE_BYPASS, /* Implicit deactivation */
+ ORDER_FALLBACK, /* Number of times fallback was necessary */
+ CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
+ CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
+ CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
+ CPU_PARTIAL_FREE, /* Refill cpu partial on free */
+ CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
+ CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
+ NR_SLUB_STAT_ITEMS };
+
+struct kmem_cache_cpu {
+ void **freelist; /* Pointer to next available object */
+ unsigned long tid; /* Globally unique transaction id */
+ struct page *page; /* The slab from which we are allocating */
+ struct page *partial; /* Partially allocated frozen slabs */
+#ifdef CONFIG_SLUB_STATS
+ unsigned stat[NR_SLUB_STAT_ITEMS];
+#endif
+};
+
+/*
+ * Word size structure that can be atomically updated or read and that
+ * contains both the order and the number of objects that a slab of the
+ * given order would contain.
+ */
+struct kmem_cache_order_objects {
+ unsigned long x;
+};
+
+/*
+ * Slab cache management.
+ */
+struct kmem_cache {
+ struct kmem_cache_cpu __percpu *cpu_slab;
+ /* Used for retriving partial slabs etc */
+ unsigned long flags;
+ unsigned long min_partial;
+ int size; /* The size of an object including meta data */
+ int object_size; /* The size of an object without meta data */
+ int offset; /* Free pointer offset. */
+ int cpu_partial; /* Number of per cpu partial objects to keep around */
+ struct kmem_cache_order_objects oo;
+
+ /* Allocation and freeing of slabs */
+ struct kmem_cache_order_objects max;
+ struct kmem_cache_order_objects min;
+ gfp_t allocflags; /* gfp flags to use on each alloc */
+ int refcount; /* Refcount for slab cache destroy */
+ void (*ctor)(void *);
+ int inuse; /* Offset to metadata */
+ int align; /* Alignment */
+ int reserved; /* Reserved bytes at the end of slabs */
+ const char *name; /* Name (only for display!) */
+ struct list_head list; /* List of slab caches */
+#ifdef CONFIG_SYSFS
+ struct kobject kobj; /* For sysfs */
+#endif
+#ifdef CONFIG_MEMCG_KMEM
+ struct memcg_cache_params memcg_params;
+ int max_attr_size; /* for propagation, maximum size of a stored attr */
+#ifdef CONFIG_SYSFS
+ struct kset *memcg_kset;
+#endif
+#endif
+
+#ifdef CONFIG_NUMA
+ /*
+ * Defragmentation by allocating from a remote node.
+ */
+ int remote_node_defrag_ratio;
+#endif
+ struct kmem_cache_node *node[MAX_NUMNODES];
+};
+
+#ifdef CONFIG_SYSFS
+#define SLAB_SUPPORTS_SYSFS
+void sysfs_slab_remove(struct kmem_cache *);
+#else
+static inline void sysfs_slab_remove(struct kmem_cache *s)
+{
+}
+#endif
+
+
+/**
+ * virt_to_obj - returns address of the beginning of object.
+ * @s: object's kmem_cache
+ * @slab_page: address of slab page
+ * @x: address within object memory range
+ *
+ * Returns address of the beginning of object
+ */
+static inline void *virt_to_obj(struct kmem_cache *s,
+ const void *slab_page,
+ const void *x)
+{
+ return (void *)x - ((x - slab_page) % s->size);
+}
+
+void object_err(struct kmem_cache *s, struct page *page,
+ u8 *object, char *reason);
+
+#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/sm501-regs.h b/include/linux/sm501-regs.h
new file mode 100644
index 000000000..67ed2c542
--- /dev/null
+++ b/include/linux/sm501-regs.h
@@ -0,0 +1,388 @@
+/* sm501-regs.h
+ *
+ * Copyright 2006 Simtec Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Silicon Motion SM501 register definitions
+*/
+
+/* System Configuration area */
+/* System config base */
+#define SM501_SYS_CONFIG (0x000000)
+
+/* config 1 */
+#define SM501_SYSTEM_CONTROL (0x000000)
+
+#define SM501_SYSCTRL_PANEL_TRISTATE (1<<0)
+#define SM501_SYSCTRL_MEM_TRISTATE (1<<1)
+#define SM501_SYSCTRL_CRT_TRISTATE (1<<2)
+
+#define SM501_SYSCTRL_PCI_SLAVE_BURST_MASK (3<<4)
+#define SM501_SYSCTRL_PCI_SLAVE_BURST_1 (0<<4)
+#define SM501_SYSCTRL_PCI_SLAVE_BURST_2 (1<<4)
+#define SM501_SYSCTRL_PCI_SLAVE_BURST_4 (2<<4)
+#define SM501_SYSCTRL_PCI_SLAVE_BURST_8 (3<<4)
+
+#define SM501_SYSCTRL_PCI_CLOCK_RUN_EN (1<<6)
+#define SM501_SYSCTRL_PCI_RETRY_DISABLE (1<<7)
+#define SM501_SYSCTRL_PCI_SUBSYS_LOCK (1<<11)
+#define SM501_SYSCTRL_PCI_BURST_READ_EN (1<<15)
+
+#define SM501_SYSCTRL_2D_ENGINE_STATUS (1<<19)
+
+/* miscellaneous control */
+
+#define SM501_MISC_CONTROL (0x000004)
+
+#define SM501_MISC_BUS_SH (0x0)
+#define SM501_MISC_BUS_PCI (0x1)
+#define SM501_MISC_BUS_XSCALE (0x2)
+#define SM501_MISC_BUS_NEC (0x6)
+#define SM501_MISC_BUS_MASK (0x7)
+
+#define SM501_MISC_VR_62MB (1<<3)
+#define SM501_MISC_CDR_RESET (1<<7)
+#define SM501_MISC_USB_LB (1<<8)
+#define SM501_MISC_USB_SLAVE (1<<9)
+#define SM501_MISC_BL_1 (1<<10)
+#define SM501_MISC_MC (1<<11)
+#define SM501_MISC_DAC_POWER (1<<12)
+#define SM501_MISC_IRQ_INVERT (1<<16)
+#define SM501_MISC_SH (1<<17)
+
+#define SM501_MISC_HOLD_EMPTY (0<<18)
+#define SM501_MISC_HOLD_8 (1<<18)
+#define SM501_MISC_HOLD_16 (2<<18)
+#define SM501_MISC_HOLD_24 (3<<18)
+#define SM501_MISC_HOLD_32 (4<<18)
+#define SM501_MISC_HOLD_MASK (7<<18)
+
+#define SM501_MISC_FREQ_12 (1<<24)
+#define SM501_MISC_PNL_24BIT (1<<25)
+#define SM501_MISC_8051_LE (1<<26)
+
+
+
+#define SM501_GPIO31_0_CONTROL (0x000008)
+#define SM501_GPIO63_32_CONTROL (0x00000C)
+#define SM501_DRAM_CONTROL (0x000010)
+
+/* command list */
+#define SM501_ARBTRTN_CONTROL (0x000014)
+
+/* command list */
+#define SM501_COMMAND_LIST_STATUS (0x000024)
+
+/* interrupt debug */
+#define SM501_RAW_IRQ_STATUS (0x000028)
+#define SM501_RAW_IRQ_CLEAR (0x000028)
+#define SM501_IRQ_STATUS (0x00002C)
+#define SM501_IRQ_MASK (0x000030)
+#define SM501_DEBUG_CONTROL (0x000034)
+
+/* power management */
+#define SM501_POWERMODE_P2X_SRC (1<<29)
+#define SM501_POWERMODE_V2X_SRC (1<<20)
+#define SM501_POWERMODE_M_SRC (1<<12)
+#define SM501_POWERMODE_M1_SRC (1<<4)
+
+#define SM501_CURRENT_GATE (0x000038)
+#define SM501_CURRENT_CLOCK (0x00003C)
+#define SM501_POWER_MODE_0_GATE (0x000040)
+#define SM501_POWER_MODE_0_CLOCK (0x000044)
+#define SM501_POWER_MODE_1_GATE (0x000048)
+#define SM501_POWER_MODE_1_CLOCK (0x00004C)
+#define SM501_SLEEP_MODE_GATE (0x000050)
+#define SM501_POWER_MODE_CONTROL (0x000054)
+
+/* power gates for units within the 501 */
+#define SM501_GATE_HOST (0)
+#define SM501_GATE_MEMORY (1)
+#define SM501_GATE_DISPLAY (2)
+#define SM501_GATE_2D_ENGINE (3)
+#define SM501_GATE_CSC (4)
+#define SM501_GATE_ZVPORT (5)
+#define SM501_GATE_GPIO (6)
+#define SM501_GATE_UART0 (7)
+#define SM501_GATE_UART1 (8)
+#define SM501_GATE_SSP (10)
+#define SM501_GATE_USB_HOST (11)
+#define SM501_GATE_USB_GADGET (12)
+#define SM501_GATE_UCONTROLLER (17)
+#define SM501_GATE_AC97 (18)
+
+/* panel clock */
+#define SM501_CLOCK_P2XCLK (24)
+/* crt clock */
+#define SM501_CLOCK_V2XCLK (16)
+/* main clock */
+#define SM501_CLOCK_MCLK (8)
+/* SDRAM controller clock */
+#define SM501_CLOCK_M1XCLK (0)
+
+/* config 2 */
+#define SM501_PCI_MASTER_BASE (0x000058)
+#define SM501_ENDIAN_CONTROL (0x00005C)
+#define SM501_DEVICEID (0x000060)
+/* 0x050100A0 */
+
+#define SM501_DEVICEID_SM501 (0x05010000)
+#define SM501_DEVICEID_IDMASK (0xffff0000)
+#define SM501_DEVICEID_REVMASK (0x000000ff)
+
+#define SM501_PLLCLOCK_COUNT (0x000064)
+#define SM501_MISC_TIMING (0x000068)
+#define SM501_CURRENT_SDRAM_CLOCK (0x00006C)
+
+#define SM501_PROGRAMMABLE_PLL_CONTROL (0x000074)
+
+/* GPIO base */
+#define SM501_GPIO (0x010000)
+#define SM501_GPIO_DATA_LOW (0x00)
+#define SM501_GPIO_DATA_HIGH (0x04)
+#define SM501_GPIO_DDR_LOW (0x08)
+#define SM501_GPIO_DDR_HIGH (0x0C)
+#define SM501_GPIO_IRQ_SETUP (0x10)
+#define SM501_GPIO_IRQ_STATUS (0x14)
+#define SM501_GPIO_IRQ_RESET (0x14)
+
+/* I2C controller base */
+#define SM501_I2C (0x010040)
+#define SM501_I2C_BYTE_COUNT (0x00)
+#define SM501_I2C_CONTROL (0x01)
+#define SM501_I2C_STATUS (0x02)
+#define SM501_I2C_RESET (0x02)
+#define SM501_I2C_SLAVE_ADDRESS (0x03)
+#define SM501_I2C_DATA (0x04)
+
+/* SSP base */
+#define SM501_SSP (0x020000)
+
+/* Uart 0 base */
+#define SM501_UART0 (0x030000)
+
+/* Uart 1 base */
+#define SM501_UART1 (0x030020)
+
+/* USB host port base */
+#define SM501_USB_HOST (0x040000)
+
+/* USB slave/gadget base */
+#define SM501_USB_GADGET (0x060000)
+
+/* USB slave/gadget data port base */
+#define SM501_USB_GADGET_DATA (0x070000)
+
+/* Display controller/video engine base */
+#define SM501_DC (0x080000)
+
+/* common defines for the SM501 address registers */
+#define SM501_ADDR_FLIP (1<<31)
+#define SM501_ADDR_EXT (1<<27)
+#define SM501_ADDR_CS1 (1<<26)
+#define SM501_ADDR_MASK (0x3f << 26)
+
+#define SM501_FIFO_MASK (0x3 << 16)
+#define SM501_FIFO_1 (0x0 << 16)
+#define SM501_FIFO_3 (0x1 << 16)
+#define SM501_FIFO_7 (0x2 << 16)
+#define SM501_FIFO_11 (0x3 << 16)
+
+/* common registers for panel and the crt */
+#define SM501_OFF_DC_H_TOT (0x000)
+#define SM501_OFF_DC_V_TOT (0x008)
+#define SM501_OFF_DC_H_SYNC (0x004)
+#define SM501_OFF_DC_V_SYNC (0x00C)
+
+#define SM501_DC_PANEL_CONTROL (0x000)
+
+#define SM501_DC_PANEL_CONTROL_FPEN (1<<27)
+#define SM501_DC_PANEL_CONTROL_BIAS (1<<26)
+#define SM501_DC_PANEL_CONTROL_DATA (1<<25)
+#define SM501_DC_PANEL_CONTROL_VDD (1<<24)
+#define SM501_DC_PANEL_CONTROL_DP (1<<23)
+
+#define SM501_DC_PANEL_CONTROL_TFT_888 (0<<21)
+#define SM501_DC_PANEL_CONTROL_TFT_333 (1<<21)
+#define SM501_DC_PANEL_CONTROL_TFT_444 (2<<21)
+
+#define SM501_DC_PANEL_CONTROL_DE (1<<20)
+
+#define SM501_DC_PANEL_CONTROL_LCD_TFT (0<<18)
+#define SM501_DC_PANEL_CONTROL_LCD_STN8 (1<<18)
+#define SM501_DC_PANEL_CONTROL_LCD_STN12 (2<<18)
+
+#define SM501_DC_PANEL_CONTROL_CP (1<<14)
+#define SM501_DC_PANEL_CONTROL_VSP (1<<13)
+#define SM501_DC_PANEL_CONTROL_HSP (1<<12)
+#define SM501_DC_PANEL_CONTROL_CK (1<<9)
+#define SM501_DC_PANEL_CONTROL_TE (1<<8)
+#define SM501_DC_PANEL_CONTROL_VPD (1<<7)
+#define SM501_DC_PANEL_CONTROL_VP (1<<6)
+#define SM501_DC_PANEL_CONTROL_HPD (1<<5)
+#define SM501_DC_PANEL_CONTROL_HP (1<<4)
+#define SM501_DC_PANEL_CONTROL_GAMMA (1<<3)
+#define SM501_DC_PANEL_CONTROL_EN (1<<2)
+
+#define SM501_DC_PANEL_CONTROL_8BPP (0<<0)
+#define SM501_DC_PANEL_CONTROL_16BPP (1<<0)
+#define SM501_DC_PANEL_CONTROL_32BPP (2<<0)
+
+
+#define SM501_DC_PANEL_PANNING_CONTROL (0x004)
+#define SM501_DC_PANEL_COLOR_KEY (0x008)
+#define SM501_DC_PANEL_FB_ADDR (0x00C)
+#define SM501_DC_PANEL_FB_OFFSET (0x010)
+#define SM501_DC_PANEL_FB_WIDTH (0x014)
+#define SM501_DC_PANEL_FB_HEIGHT (0x018)
+#define SM501_DC_PANEL_TL_LOC (0x01C)
+#define SM501_DC_PANEL_BR_LOC (0x020)
+#define SM501_DC_PANEL_H_TOT (0x024)
+#define SM501_DC_PANEL_H_SYNC (0x028)
+#define SM501_DC_PANEL_V_TOT (0x02C)
+#define SM501_DC_PANEL_V_SYNC (0x030)
+#define SM501_DC_PANEL_CUR_LINE (0x034)
+
+#define SM501_DC_VIDEO_CONTROL (0x040)
+#define SM501_DC_VIDEO_FB0_ADDR (0x044)
+#define SM501_DC_VIDEO_FB_WIDTH (0x048)
+#define SM501_DC_VIDEO_FB0_LAST_ADDR (0x04C)
+#define SM501_DC_VIDEO_TL_LOC (0x050)
+#define SM501_DC_VIDEO_BR_LOC (0x054)
+#define SM501_DC_VIDEO_SCALE (0x058)
+#define SM501_DC_VIDEO_INIT_SCALE (0x05C)
+#define SM501_DC_VIDEO_YUV_CONSTANTS (0x060)
+#define SM501_DC_VIDEO_FB1_ADDR (0x064)
+#define SM501_DC_VIDEO_FB1_LAST_ADDR (0x068)
+
+#define SM501_DC_VIDEO_ALPHA_CONTROL (0x080)
+#define SM501_DC_VIDEO_ALPHA_FB_ADDR (0x084)
+#define SM501_DC_VIDEO_ALPHA_FB_OFFSET (0x088)
+#define SM501_DC_VIDEO_ALPHA_FB_LAST_ADDR (0x08C)
+#define SM501_DC_VIDEO_ALPHA_TL_LOC (0x090)
+#define SM501_DC_VIDEO_ALPHA_BR_LOC (0x094)
+#define SM501_DC_VIDEO_ALPHA_SCALE (0x098)
+#define SM501_DC_VIDEO_ALPHA_INIT_SCALE (0x09C)
+#define SM501_DC_VIDEO_ALPHA_CHROMA_KEY (0x0A0)
+#define SM501_DC_VIDEO_ALPHA_COLOR_LOOKUP (0x0A4)
+
+#define SM501_DC_PANEL_HWC_BASE (0x0F0)
+#define SM501_DC_PANEL_HWC_ADDR (0x0F0)
+#define SM501_DC_PANEL_HWC_LOC (0x0F4)
+#define SM501_DC_PANEL_HWC_COLOR_1_2 (0x0F8)
+#define SM501_DC_PANEL_HWC_COLOR_3 (0x0FC)
+
+#define SM501_HWC_EN (1<<31)
+
+#define SM501_OFF_HWC_ADDR (0x00)
+#define SM501_OFF_HWC_LOC (0x04)
+#define SM501_OFF_HWC_COLOR_1_2 (0x08)
+#define SM501_OFF_HWC_COLOR_3 (0x0C)
+
+#define SM501_DC_ALPHA_CONTROL (0x100)
+#define SM501_DC_ALPHA_FB_ADDR (0x104)
+#define SM501_DC_ALPHA_FB_OFFSET (0x108)
+#define SM501_DC_ALPHA_TL_LOC (0x10C)
+#define SM501_DC_ALPHA_BR_LOC (0x110)
+#define SM501_DC_ALPHA_CHROMA_KEY (0x114)
+#define SM501_DC_ALPHA_COLOR_LOOKUP (0x118)
+
+#define SM501_DC_CRT_CONTROL (0x200)
+
+#define SM501_DC_CRT_CONTROL_TVP (1<<15)
+#define SM501_DC_CRT_CONTROL_CP (1<<14)
+#define SM501_DC_CRT_CONTROL_VSP (1<<13)
+#define SM501_DC_CRT_CONTROL_HSP (1<<12)
+#define SM501_DC_CRT_CONTROL_VS (1<<11)
+#define SM501_DC_CRT_CONTROL_BLANK (1<<10)
+#define SM501_DC_CRT_CONTROL_SEL (1<<9)
+#define SM501_DC_CRT_CONTROL_TE (1<<8)
+#define SM501_DC_CRT_CONTROL_PIXEL_MASK (0xF << 4)
+#define SM501_DC_CRT_CONTROL_GAMMA (1<<3)
+#define SM501_DC_CRT_CONTROL_ENABLE (1<<2)
+
+#define SM501_DC_CRT_CONTROL_8BPP (0<<0)
+#define SM501_DC_CRT_CONTROL_16BPP (1<<0)
+#define SM501_DC_CRT_CONTROL_32BPP (2<<0)
+
+#define SM501_DC_CRT_FB_ADDR (0x204)
+#define SM501_DC_CRT_FB_OFFSET (0x208)
+#define SM501_DC_CRT_H_TOT (0x20C)
+#define SM501_DC_CRT_H_SYNC (0x210)
+#define SM501_DC_CRT_V_TOT (0x214)
+#define SM501_DC_CRT_V_SYNC (0x218)
+#define SM501_DC_CRT_SIGNATURE_ANALYZER (0x21C)
+#define SM501_DC_CRT_CUR_LINE (0x220)
+#define SM501_DC_CRT_MONITOR_DETECT (0x224)
+
+#define SM501_DC_CRT_HWC_BASE (0x230)
+#define SM501_DC_CRT_HWC_ADDR (0x230)
+#define SM501_DC_CRT_HWC_LOC (0x234)
+#define SM501_DC_CRT_HWC_COLOR_1_2 (0x238)
+#define SM501_DC_CRT_HWC_COLOR_3 (0x23C)
+
+#define SM501_DC_PANEL_PALETTE (0x400)
+
+#define SM501_DC_VIDEO_PALETTE (0x800)
+
+#define SM501_DC_CRT_PALETTE (0xC00)
+
+/* Zoom Video port base */
+#define SM501_ZVPORT (0x090000)
+
+/* AC97/I2S base */
+#define SM501_AC97 (0x0A0000)
+
+/* 8051 micro controller base */
+#define SM501_UCONTROLLER (0x0B0000)
+
+/* 8051 micro controller SRAM base */
+#define SM501_UCONTROLLER_SRAM (0x0C0000)
+
+/* DMA base */
+#define SM501_DMA (0x0D0000)
+
+/* 2d engine base */
+#define SM501_2D_ENGINE (0x100000)
+#define SM501_2D_SOURCE (0x00)
+#define SM501_2D_DESTINATION (0x04)
+#define SM501_2D_DIMENSION (0x08)
+#define SM501_2D_CONTROL (0x0C)
+#define SM501_2D_PITCH (0x10)
+#define SM501_2D_FOREGROUND (0x14)
+#define SM501_2D_BACKGROUND (0x18)
+#define SM501_2D_STRETCH (0x1C)
+#define SM501_2D_COLOR_COMPARE (0x20)
+#define SM501_2D_COLOR_COMPARE_MASK (0x24)
+#define SM501_2D_MASK (0x28)
+#define SM501_2D_CLIP_TL (0x2C)
+#define SM501_2D_CLIP_BR (0x30)
+#define SM501_2D_MONO_PATTERN_LOW (0x34)
+#define SM501_2D_MONO_PATTERN_HIGH (0x38)
+#define SM501_2D_WINDOW_WIDTH (0x3C)
+#define SM501_2D_SOURCE_BASE (0x40)
+#define SM501_2D_DESTINATION_BASE (0x44)
+#define SM501_2D_ALPHA (0x48)
+#define SM501_2D_WRAP (0x4C)
+#define SM501_2D_STATUS (0x50)
+
+#define SM501_CSC_Y_SOURCE_BASE (0xC8)
+#define SM501_CSC_CONSTANTS (0xCC)
+#define SM501_CSC_Y_SOURCE_X (0xD0)
+#define SM501_CSC_Y_SOURCE_Y (0xD4)
+#define SM501_CSC_U_SOURCE_BASE (0xD8)
+#define SM501_CSC_V_SOURCE_BASE (0xDC)
+#define SM501_CSC_SOURCE_DIMENSION (0xE0)
+#define SM501_CSC_SOURCE_PITCH (0xE4)
+#define SM501_CSC_DESTINATION (0xE8)
+#define SM501_CSC_DESTINATION_DIMENSION (0xEC)
+#define SM501_CSC_DESTINATION_PITCH (0xF0)
+#define SM501_CSC_SCALE_FACTOR (0xF4)
+#define SM501_CSC_DESTINATION_BASE (0xF8)
+#define SM501_CSC_CONTROL (0xFC)
+
+/* 2d engine data port base */
+#define SM501_2D_ENGINE_DATA (0x110000)
diff --git a/include/linux/sm501.h b/include/linux/sm501.h
new file mode 100644
index 000000000..02fde50a7
--- /dev/null
+++ b/include/linux/sm501.h
@@ -0,0 +1,182 @@
+/* include/linux/sm501.h
+ *
+ * Copyright (c) 2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * Vincent Sanders <vince@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+extern int sm501_unit_power(struct device *dev,
+ unsigned int unit, unsigned int to);
+
+extern unsigned long sm501_set_clock(struct device *dev,
+ int clksrc, unsigned long freq);
+
+extern unsigned long sm501_find_clock(struct device *dev,
+ int clksrc, unsigned long req_freq);
+
+/* sm501_misc_control
+ *
+ * Modify the SM501's MISC_CONTROL register
+*/
+
+extern int sm501_misc_control(struct device *dev,
+ unsigned long set, unsigned long clear);
+
+/* sm501_modify_reg
+ *
+ * Modify a register in the SM501 which may be shared with other
+ * drivers.
+*/
+
+extern unsigned long sm501_modify_reg(struct device *dev,
+ unsigned long reg,
+ unsigned long set,
+ unsigned long clear);
+
+
+/* Platform data definitions */
+
+#define SM501FB_FLAG_USE_INIT_MODE (1<<0)
+#define SM501FB_FLAG_DISABLE_AT_EXIT (1<<1)
+#define SM501FB_FLAG_USE_HWCURSOR (1<<2)
+#define SM501FB_FLAG_USE_HWACCEL (1<<3)
+#define SM501FB_FLAG_PANEL_NO_FPEN (1<<4)
+#define SM501FB_FLAG_PANEL_NO_VBIASEN (1<<5)
+#define SM501FB_FLAG_PANEL_INV_FPEN (1<<6)
+#define SM501FB_FLAG_PANEL_INV_VBIASEN (1<<7)
+
+struct sm501_platdata_fbsub {
+ struct fb_videomode *def_mode;
+ unsigned int def_bpp;
+ unsigned long max_mem;
+ unsigned int flags;
+};
+
+enum sm501_fb_routing {
+ SM501_FB_OWN = 0, /* CRT=>CRT, Panel=>Panel */
+ SM501_FB_CRT_PANEL = 1, /* Panel=>CRT, Panel=>Panel */
+};
+
+/* sm501_platdata_fb flag field bit definitions */
+
+#define SM501_FBPD_SWAP_FB_ENDIAN (1<<0) /* need to endian swap */
+
+/* sm501_platdata_fb
+ *
+ * configuration data for the framebuffer driver
+*/
+
+struct sm501_platdata_fb {
+ enum sm501_fb_routing fb_route;
+ unsigned int flags;
+ struct sm501_platdata_fbsub *fb_crt;
+ struct sm501_platdata_fbsub *fb_pnl;
+};
+
+/* gpio i2c
+ *
+ * Note, we have to pass in the bus number, as the number used will be
+ * passed to the i2c-gpio driver's platform_device.id, subsequently used
+ * to register the i2c bus.
+*/
+
+struct sm501_platdata_gpio_i2c {
+ unsigned int bus_num;
+ unsigned int pin_sda;
+ unsigned int pin_scl;
+ int udelay;
+ int timeout;
+};
+
+/* sm501_initdata
+ *
+ * use for initialising values that may not have been setup
+ * before the driver is loaded.
+*/
+
+struct sm501_reg_init {
+ unsigned long set;
+ unsigned long mask;
+};
+
+#define SM501_USE_USB_HOST (1<<0)
+#define SM501_USE_USB_SLAVE (1<<1)
+#define SM501_USE_SSP0 (1<<2)
+#define SM501_USE_SSP1 (1<<3)
+#define SM501_USE_UART0 (1<<4)
+#define SM501_USE_UART1 (1<<5)
+#define SM501_USE_FBACCEL (1<<6)
+#define SM501_USE_AC97 (1<<7)
+#define SM501_USE_I2S (1<<8)
+#define SM501_USE_GPIO (1<<9)
+
+#define SM501_USE_ALL (0xffffffff)
+
+struct sm501_initdata {
+ struct sm501_reg_init gpio_low;
+ struct sm501_reg_init gpio_high;
+ struct sm501_reg_init misc_timing;
+ struct sm501_reg_init misc_control;
+
+ unsigned long devices;
+ unsigned long mclk; /* non-zero to modify */
+ unsigned long m1xclk; /* non-zero to modify */
+};
+
+/* sm501_init_gpio
+ *
+ * default gpio settings
+*/
+
+struct sm501_init_gpio {
+ struct sm501_reg_init gpio_data_low;
+ struct sm501_reg_init gpio_data_high;
+ struct sm501_reg_init gpio_ddr_low;
+ struct sm501_reg_init gpio_ddr_high;
+};
+
+#define SM501_FLAG_SUSPEND_OFF (1<<4)
+
+/* sm501_platdata
+ *
+ * This is passed with the platform device to allow the board
+ * to control the behaviour of the SM501 driver(s) which attach
+ * to the device.
+ *
+*/
+
+struct sm501_platdata {
+ struct sm501_initdata *init;
+ struct sm501_init_gpio *init_gpiop;
+ struct sm501_platdata_fb *fb;
+
+ int flags;
+ int gpio_base;
+
+ int (*get_power)(struct device *dev);
+ int (*set_power)(struct device *dev, unsigned int on);
+
+ struct sm501_platdata_gpio_i2c *gpio_i2c;
+ unsigned int gpio_i2c_nr;
+};
+
+#if defined(CONFIG_PPC32)
+#define smc501_readl(addr) ioread32be((addr))
+#define smc501_writel(val, addr) iowrite32be((val), (addr))
+#else
+#define smc501_readl(addr) readl(addr)
+#define smc501_writel(val, addr) writel(val, addr)
+#endif
diff --git a/include/linux/smc911x.h b/include/linux/smc911x.h
new file mode 100644
index 000000000..521f37143
--- /dev/null
+++ b/include/linux/smc911x.h
@@ -0,0 +1,13 @@
+#ifndef __SMC911X_H__
+#define __SMC911X_H__
+
+#define SMC911X_USE_16BIT (1 << 0)
+#define SMC911X_USE_32BIT (1 << 1)
+
+struct smc911x_platdata {
+ unsigned long flags;
+ unsigned long irq_flags; /* IRQF_... */
+ int irq_polarity;
+};
+
+#endif /* __SMC911X_H__ */
diff --git a/include/linux/smc91x.h b/include/linux/smc91x.h
new file mode 100644
index 000000000..76199b75d
--- /dev/null
+++ b/include/linux/smc91x.h
@@ -0,0 +1,34 @@
+#ifndef __SMC91X_H__
+#define __SMC91X_H__
+
+#define SMC91X_USE_8BIT (1 << 0)
+#define SMC91X_USE_16BIT (1 << 1)
+#define SMC91X_USE_32BIT (1 << 2)
+
+#define SMC91X_NOWAIT (1 << 3)
+
+/* two bits for IO_SHIFT, let's hope later designs will keep this sane */
+#define SMC91X_IO_SHIFT_0 (0 << 4)
+#define SMC91X_IO_SHIFT_1 (1 << 4)
+#define SMC91X_IO_SHIFT_2 (2 << 4)
+#define SMC91X_IO_SHIFT_3 (3 << 4)
+#define SMC91X_IO_SHIFT(x) (((x) >> 4) & 0x3)
+
+#define SMC91X_USE_DMA (1 << 6)
+
+#define RPC_LED_100_10 (0x00) /* LED = 100Mbps OR's with 10Mbps link detect */
+#define RPC_LED_RES (0x01) /* LED = Reserved */
+#define RPC_LED_10 (0x02) /* LED = 10Mbps link detect */
+#define RPC_LED_FD (0x03) /* LED = Full Duplex Mode */
+#define RPC_LED_TX_RX (0x04) /* LED = TX or RX packet occurred */
+#define RPC_LED_100 (0x05) /* LED = 100Mbps link detect */
+#define RPC_LED_TX (0x06) /* LED = TX packet occurred */
+#define RPC_LED_RX (0x07) /* LED = RX packet occurred */
+
+struct smc91x_platdata {
+ unsigned long flags;
+ unsigned char leda;
+ unsigned char ledb;
+};
+
+#endif /* __SMC91X_H__ */
diff --git a/include/linux/smp.h b/include/linux/smp.h
new file mode 100644
index 000000000..c4414074b
--- /dev/null
+++ b/include/linux/smp.h
@@ -0,0 +1,199 @@
+#ifndef __LINUX_SMP_H
+#define __LINUX_SMP_H
+
+/*
+ * Generic SMP support
+ * Alan Cox. <alan@redhat.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/llist.h>
+
+typedef void (*smp_call_func_t)(void *info);
+struct call_single_data {
+ struct llist_node llist;
+ smp_call_func_t func;
+ void *info;
+ unsigned int flags;
+};
+
+/* total number of cpus in this system (may exceed NR_CPUS) */
+extern unsigned int total_cpus;
+
+int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
+ int wait);
+
+/*
+ * Call a function on all processors
+ */
+int on_each_cpu(smp_call_func_t func, void *info, int wait);
+
+/*
+ * Call a function on processors specified by mask, which might include
+ * the local one.
+ */
+void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
+ void *info, bool wait);
+
+/*
+ * Call a function on each processor for which the supplied function
+ * cond_func returns a positive value. This may include the local
+ * processor.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+ smp_call_func_t func, void *info, bool wait,
+ gfp_t gfp_flags);
+
+int smp_call_function_single_async(int cpu, struct call_single_data *csd);
+
+#ifdef CONFIG_SMP
+
+#include <linux/preempt.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/thread_info.h>
+#include <asm/smp.h>
+
+/*
+ * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
+ * (defined in asm header):
+ */
+
+/*
+ * stops all CPUs but the current one:
+ */
+extern void smp_send_stop(void);
+
+/*
+ * sends a 'reschedule' event to another CPU:
+ */
+extern void smp_send_reschedule(int cpu);
+
+
+/*
+ * Prepare machine for booting other CPUs.
+ */
+extern void smp_prepare_cpus(unsigned int max_cpus);
+
+/*
+ * Bring a CPU up
+ */
+extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
+
+/*
+ * Final polishing of CPUs
+ */
+extern void smp_cpus_done(unsigned int max_cpus);
+
+/*
+ * Call a function on all other processors
+ */
+int smp_call_function(smp_call_func_t func, void *info, int wait);
+void smp_call_function_many(const struct cpumask *mask,
+ smp_call_func_t func, void *info, bool wait);
+
+int smp_call_function_any(const struct cpumask *mask,
+ smp_call_func_t func, void *info, int wait);
+
+void kick_all_cpus_sync(void);
+void wake_up_all_idle_cpus(void);
+
+/*
+ * Generic and arch helpers
+ */
+void __init call_function_init(void);
+void generic_smp_call_function_single_interrupt(void);
+#define generic_smp_call_function_interrupt \
+ generic_smp_call_function_single_interrupt
+
+/*
+ * Mark the boot cpu "online" so that it can call console drivers in
+ * printk() and can access its per-cpu storage.
+ */
+void smp_prepare_boot_cpu(void);
+
+extern unsigned int setup_max_cpus;
+extern void __init setup_nr_cpu_ids(void);
+extern void __init smp_init(void);
+
+#else /* !SMP */
+
+static inline void smp_send_stop(void) { }
+
+/*
+ * These macros fold the SMP functionality into a single CPU system
+ */
+#define raw_smp_processor_id() 0
+static inline int up_smp_call_function(smp_call_func_t func, void *info)
+{
+ return 0;
+}
+#define smp_call_function(func, info, wait) \
+ (up_smp_call_function(func, info))
+
+static inline void smp_send_reschedule(int cpu) { }
+#define smp_prepare_boot_cpu() do {} while (0)
+#define smp_call_function_many(mask, func, info, wait) \
+ (up_smp_call_function(func, info))
+static inline void call_function_init(void) { }
+
+static inline int
+smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
+ void *info, int wait)
+{
+ return smp_call_function_single(0, func, info, wait);
+}
+
+static inline void kick_all_cpus_sync(void) { }
+static inline void wake_up_all_idle_cpus(void) { }
+
+#ifdef CONFIG_UP_LATE_INIT
+extern void __init up_late_init(void);
+static inline void smp_init(void) { up_late_init(); }
+#else
+static inline void smp_init(void) { }
+#endif
+
+#endif /* !SMP */
+
+/*
+ * smp_processor_id(): get the current CPU ID.
+ *
+ * if DEBUG_PREEMPT is enabled then we check whether it is
+ * used in a preemption-safe way. (smp_processor_id() is safe
+ * if it's used in a preemption-off critical section, or in
+ * a thread that is bound to the current CPU.)
+ *
+ * NOTE: raw_smp_processor_id() is for internal use only
+ * (smp_processor_id() is the preferred variant), but in rare
+ * instances it might also be used to turn off false positives
+ * (i.e. smp_processor_id() use that the debugging code reports but
+ * which use for some reason is legal). Don't use this to hack around
+ * the warning message, as your code might not work under PREEMPT.
+ */
+#ifdef CONFIG_DEBUG_PREEMPT
+ extern unsigned int debug_smp_processor_id(void);
+# define smp_processor_id() debug_smp_processor_id()
+#else
+# define smp_processor_id() raw_smp_processor_id()
+#endif
+
+#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
+#define put_cpu() preempt_enable()
+
+/*
+ * Callback to arch code if there's nosmp or maxcpus=0 on the
+ * boot command line:
+ */
+extern void arch_disable_smp_support(void);
+
+extern void arch_enable_nonboot_cpus_begin(void);
+extern void arch_enable_nonboot_cpus_end(void);
+
+void smp_setup_processor_id(void);
+
+#endif /* __LINUX_SMP_H */
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
new file mode 100644
index 000000000..d600afb21
--- /dev/null
+++ b/include/linux/smpboot.h
@@ -0,0 +1,51 @@
+#ifndef _LINUX_SMPBOOT_H
+#define _LINUX_SMPBOOT_H
+
+#include <linux/types.h>
+
+struct task_struct;
+/* Cookie handed to the thread_fn*/
+struct smpboot_thread_data;
+
+/**
+ * struct smp_hotplug_thread - CPU hotplug related thread descriptor
+ * @store: Pointer to per cpu storage for the task pointers
+ * @list: List head for core management
+ * @thread_should_run: Check whether the thread should run or not. Called with
+ * preemption disabled.
+ * @thread_fn: The associated thread function
+ * @create: Optional setup function, called when the thread gets
+ * created (Not called from the thread context)
+ * @setup: Optional setup function, called when the thread gets
+ * operational the first time
+ * @cleanup: Optional cleanup function, called when the thread
+ * should stop (module exit)
+ * @park: Optional park function, called when the thread is
+ * parked (cpu offline)
+ * @unpark: Optional unpark function, called when the thread is
+ * unparked (cpu online)
+ * @pre_unpark: Optional unpark function, called before the thread is
+ * unparked (cpu online). This is not guaranteed to be
+ * called on the target cpu of the thread. Careful!
+ * @selfparking: Thread is not parked by the park function.
+ * @thread_comm: The base name of the thread
+ */
+struct smp_hotplug_thread {
+ struct task_struct __percpu **store;
+ struct list_head list;
+ int (*thread_should_run)(unsigned int cpu);
+ void (*thread_fn)(unsigned int cpu);
+ void (*create)(unsigned int cpu);
+ void (*setup)(unsigned int cpu);
+ void (*cleanup)(unsigned int cpu, bool online);
+ void (*park)(unsigned int cpu);
+ void (*unpark)(unsigned int cpu);
+ void (*pre_unpark)(unsigned int cpu);
+ bool selfparking;
+ const char *thread_comm;
+};
+
+int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
+void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
+
+#endif
diff --git a/include/linux/smsc911x.h b/include/linux/smsc911x.h
new file mode 100644
index 000000000..eec3efd19
--- /dev/null
+++ b/include/linux/smsc911x.h
@@ -0,0 +1,63 @@
+/***************************************************************************
+ *
+ * Copyright (C) 2004-2008 SMSC
+ * Copyright (C) 2005-2008 ARM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ***************************************************************************/
+#ifndef __LINUX_SMSC911X_H__
+#define __LINUX_SMSC911X_H__
+
+#include <linux/phy.h>
+#include <linux/if_ether.h>
+
+/* platform_device configuration data, should be assigned to
+ * the platform_device's dev.platform_data */
+struct smsc911x_platform_config {
+ unsigned int irq_polarity;
+ unsigned int irq_type;
+ unsigned int flags;
+ unsigned int shift;
+ phy_interface_t phy_interface;
+ unsigned char mac[ETH_ALEN];
+};
+
+/* Constants for platform_device irq polarity configuration */
+#define SMSC911X_IRQ_POLARITY_ACTIVE_LOW 0
+#define SMSC911X_IRQ_POLARITY_ACTIVE_HIGH 1
+
+/* Constants for platform_device irq type configuration */
+#define SMSC911X_IRQ_TYPE_OPEN_DRAIN 0
+#define SMSC911X_IRQ_TYPE_PUSH_PULL 1
+
+/* Constants for flags */
+#define SMSC911X_USE_16BIT (BIT(0))
+#define SMSC911X_USE_32BIT (BIT(1))
+#define SMSC911X_FORCE_INTERNAL_PHY (BIT(2))
+#define SMSC911X_FORCE_EXTERNAL_PHY (BIT(3))
+#define SMSC911X_SAVE_MAC_ADDRESS (BIT(4))
+
+/*
+ * SMSC911X_SWAP_FIFO:
+ * Enables software byte swap for fifo data. Should only be used as a
+ * "last resort" in the case of big endian mode on boards with incorrectly
+ * routed data bus to older devices such as LAN9118. Newer devices such as
+ * LAN9221 can handle this in hardware, there are registers to control
+ * this swapping but the driver doesn't currently use them.
+ */
+#define SMSC911X_SWAP_FIFO (BIT(5))
+
+#endif /* __LINUX_SMSC911X_H__ */
diff --git a/include/linux/smscphy.h b/include/linux/smscphy.h
new file mode 100644
index 000000000..f4bf16e16
--- /dev/null
+++ b/include/linux/smscphy.h
@@ -0,0 +1,30 @@
+#ifndef __LINUX_SMSCPHY_H__
+#define __LINUX_SMSCPHY_H__
+
+#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
+#define MII_LAN83C185_IM 30 /* Interrupt Mask */
+#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */
+#define MII_LAN83C185_SPECIAL_MODES 18 /* Special Modes Register */
+
+#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
+#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
+#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */
+#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */
+#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */
+#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */
+#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */
+
+#define MII_LAN83C185_ISF_INT_ALL (0x0e)
+
+#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
+ (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \
+ MII_LAN83C185_ISF_INT7)
+
+#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */
+#define MII_LAN83C185_ENERGYON (1 << 1) /* ENERGYON */
+
+#define MII_LAN83C185_MODE_MASK 0xE0
+#define MII_LAN83C185_MODE_POWERDOWN 0xC0 /* Power Down mode */
+#define MII_LAN83C185_MODE_ALL 0xE0 /* All capable mode */
+
+#endif /* __LINUX_SMSCPHY_H__ */
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
new file mode 100644
index 000000000..dad035c16
--- /dev/null
+++ b/include/linux/soc/ti/knav_dma.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors: Sandeep Nair <sandeep_n@ti.com
+ * Cyril Chemparathy <cyril@ti.com
+ Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__
+#define __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__
+
+/*
+ * PKTDMA descriptor manipulation macros for host packet descriptor
+ */
+#define MASK(x) (BIT(x) - 1)
+#define KNAV_DMA_DESC_PKT_LEN_MASK MASK(22)
+#define KNAV_DMA_DESC_PKT_LEN_SHIFT 0
+#define KNAV_DMA_DESC_PS_INFO_IN_SOP BIT(22)
+#define KNAV_DMA_DESC_PS_INFO_IN_DESC 0
+#define KNAV_DMA_DESC_TAG_MASK MASK(8)
+#define KNAV_DMA_DESC_SAG_HI_SHIFT 24
+#define KNAV_DMA_DESC_STAG_LO_SHIFT 16
+#define KNAV_DMA_DESC_DTAG_HI_SHIFT 8
+#define KNAV_DMA_DESC_DTAG_LO_SHIFT 0
+#define KNAV_DMA_DESC_HAS_EPIB BIT(31)
+#define KNAV_DMA_DESC_NO_EPIB 0
+#define KNAV_DMA_DESC_PSLEN_SHIFT 24
+#define KNAV_DMA_DESC_PSLEN_MASK MASK(6)
+#define KNAV_DMA_DESC_ERR_FLAG_SHIFT 20
+#define KNAV_DMA_DESC_ERR_FLAG_MASK MASK(4)
+#define KNAV_DMA_DESC_PSFLAG_SHIFT 16
+#define KNAV_DMA_DESC_PSFLAG_MASK MASK(4)
+#define KNAV_DMA_DESC_RETQ_SHIFT 0
+#define KNAV_DMA_DESC_RETQ_MASK MASK(14)
+#define KNAV_DMA_DESC_BUF_LEN_MASK MASK(22)
+
+#define KNAV_DMA_NUM_EPIB_WORDS 4
+#define KNAV_DMA_NUM_PS_WORDS 16
+#define KNAV_DMA_FDQ_PER_CHAN 4
+
+/* Tx channel scheduling priority */
+enum knav_dma_tx_priority {
+ DMA_PRIO_HIGH = 0,
+ DMA_PRIO_MED_H,
+ DMA_PRIO_MED_L,
+ DMA_PRIO_LOW
+};
+
+/* Rx channel error handling mode during buffer starvation */
+enum knav_dma_rx_err_mode {
+ DMA_DROP = 0,
+ DMA_RETRY
+};
+
+/* Rx flow size threshold configuration */
+enum knav_dma_rx_thresholds {
+ DMA_THRESH_NONE = 0,
+ DMA_THRESH_0 = 1,
+ DMA_THRESH_0_1 = 3,
+ DMA_THRESH_0_1_2 = 7
+};
+
+/* Descriptor type */
+enum knav_dma_desc_type {
+ DMA_DESC_HOST = 0,
+ DMA_DESC_MONOLITHIC = 2
+};
+
+/**
+ * struct knav_dma_tx_cfg: Tx channel configuration
+ * @filt_einfo: Filter extended packet info
+ * @filt_pswords: Filter PS words present
+ * @knav_dma_tx_priority: Tx channel scheduling priority
+ */
+struct knav_dma_tx_cfg {
+ bool filt_einfo;
+ bool filt_pswords;
+ enum knav_dma_tx_priority priority;
+};
+
+/**
+ * struct knav_dma_rx_cfg: Rx flow configuration
+ * @einfo_present: Extended packet info present
+ * @psinfo_present: PS words present
+ * @knav_dma_rx_err_mode: Error during buffer starvation
+ * @knav_dma_desc_type: Host or Monolithic desc
+ * @psinfo_at_sop: PS word located at start of packet
+ * @sop_offset: Start of packet offset
+ * @dst_q: Destination queue for a given flow
+ * @thresh: Rx flow size threshold
+ * @fdq[]: Free desc Queue array
+ * @sz_thresh0: RX packet size threshold 0
+ * @sz_thresh1: RX packet size threshold 1
+ * @sz_thresh2: RX packet size threshold 2
+ */
+struct knav_dma_rx_cfg {
+ bool einfo_present;
+ bool psinfo_present;
+ enum knav_dma_rx_err_mode err_mode;
+ enum knav_dma_desc_type desc_type;
+ bool psinfo_at_sop;
+ unsigned int sop_offset;
+ unsigned int dst_q;
+ enum knav_dma_rx_thresholds thresh;
+ unsigned int fdq[KNAV_DMA_FDQ_PER_CHAN];
+ unsigned int sz_thresh0;
+ unsigned int sz_thresh1;
+ unsigned int sz_thresh2;
+};
+
+/**
+ * struct knav_dma_cfg: Pktdma channel configuration
+ * @sl_cfg: Slave configuration
+ * @tx: Tx channel configuration
+ * @rx: Rx flow configuration
+ */
+struct knav_dma_cfg {
+ enum dma_transfer_direction direction;
+ union {
+ struct knav_dma_tx_cfg tx;
+ struct knav_dma_rx_cfg rx;
+ } u;
+};
+
+/**
+ * struct knav_dma_desc: Host packet descriptor layout
+ * @desc_info: Descriptor information like id, type, length
+ * @tag_info: Flow tag info written in during RX
+ * @packet_info: Queue Manager, policy, flags etc
+ * @buff_len: Buffer length in bytes
+ * @buff: Buffer pointer
+ * @next_desc: For chaining the descriptors
+ * @orig_len: length since 'buff_len' can be overwritten
+ * @orig_buff: buff pointer since 'buff' can be overwritten
+ * @epib: Extended packet info block
+ * @psdata: Protocol specific
+ */
+struct knav_dma_desc {
+ u32 desc_info;
+ u32 tag_info;
+ u32 packet_info;
+ u32 buff_len;
+ u32 buff;
+ u32 next_desc;
+ u32 orig_len;
+ u32 orig_buff;
+ u32 epib[KNAV_DMA_NUM_EPIB_WORDS];
+ u32 psdata[KNAV_DMA_NUM_PS_WORDS];
+ u32 pad[4];
+} ____cacheline_aligned;
+
+#if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA)
+void *knav_dma_open_channel(struct device *dev, const char *name,
+ struct knav_dma_cfg *config);
+void knav_dma_close_channel(void *channel);
+#else
+static inline void *knav_dma_open_channel(struct device *dev, const char *name,
+ struct knav_dma_cfg *config)
+{
+ return (void *) NULL;
+}
+static inline void knav_dma_close_channel(void *channel)
+{}
+
+#endif
+
+#endif /* __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ */
diff --git a/include/linux/soc/ti/knav_qmss.h b/include/linux/soc/ti/knav_qmss.h
new file mode 100644
index 000000000..9f0ebb3ba
--- /dev/null
+++ b/include/linux/soc/ti/knav_qmss.h
@@ -0,0 +1,90 @@
+/*
+ * Keystone Navigator Queue Management Sub-System header
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Sandeep Nair <sandeep_n@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SOC_TI_KNAV_QMSS_H__
+#define __SOC_TI_KNAV_QMSS_H__
+
+#include <linux/err.h>
+#include <linux/time.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/fcntl.h>
+#include <linux/dma-mapping.h>
+
+/* queue types */
+#define KNAV_QUEUE_QPEND ((unsigned)-2) /* interruptible qpend queue */
+#define KNAV_QUEUE_ACC ((unsigned)-3) /* Accumulated queue */
+#define KNAV_QUEUE_GP ((unsigned)-4) /* General purpose queue */
+
+/* queue flags */
+#define KNAV_QUEUE_SHARED 0x0001 /* Queue can be shared */
+
+/**
+ * enum knav_queue_ctrl_cmd - queue operations.
+ * @KNAV_QUEUE_GET_ID: Get the ID number for an open queue
+ * @KNAV_QUEUE_FLUSH: forcibly empty a queue if possible
+ * @KNAV_QUEUE_SET_NOTIFIER: Set a notifier callback to a queue handle.
+ * @KNAV_QUEUE_ENABLE_NOTIFY: Enable notifier callback for a queue handle.
+ * @KNAV_QUEUE_DISABLE_NOTIFY: Disable notifier callback for a queue handle.
+ * @KNAV_QUEUE_GET_COUNT: Get number of queues.
+ */
+enum knav_queue_ctrl_cmd {
+ KNAV_QUEUE_GET_ID,
+ KNAV_QUEUE_FLUSH,
+ KNAV_QUEUE_SET_NOTIFIER,
+ KNAV_QUEUE_ENABLE_NOTIFY,
+ KNAV_QUEUE_DISABLE_NOTIFY,
+ KNAV_QUEUE_GET_COUNT
+};
+
+/* Queue notifier callback prototype */
+typedef void (*knav_queue_notify_fn)(void *arg);
+
+/**
+ * struct knav_queue_notify_config: Notifier configuration
+ * @fn: Notifier function
+ * @fn_arg: Notifier function arguments
+ */
+struct knav_queue_notify_config {
+ knav_queue_notify_fn fn;
+ void *fn_arg;
+};
+
+void *knav_queue_open(const char *name, unsigned id,
+ unsigned flags);
+void knav_queue_close(void *qhandle);
+int knav_queue_device_control(void *qhandle,
+ enum knav_queue_ctrl_cmd cmd,
+ unsigned long arg);
+dma_addr_t knav_queue_pop(void *qhandle, unsigned *size);
+int knav_queue_push(void *qhandle, dma_addr_t dma,
+ unsigned size, unsigned flags);
+
+void *knav_pool_create(const char *name,
+ int num_desc, int region_id);
+void knav_pool_destroy(void *ph);
+int knav_pool_count(void *ph);
+void *knav_pool_desc_get(void *ph);
+void knav_pool_desc_put(void *ph, void *desc);
+int knav_pool_desc_map(void *ph, void *desc, unsigned size,
+ dma_addr_t *dma, unsigned *dma_sz);
+void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz);
+dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt);
+void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma);
+
+#endif /* __SOC_TI_KNAV_QMSS_H__ */
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
new file mode 100644
index 000000000..083ac3880
--- /dev/null
+++ b/include/linux/sock_diag.h
@@ -0,0 +1,29 @@
+#ifndef __SOCK_DIAG_H__
+#define __SOCK_DIAG_H__
+
+#include <linux/user_namespace.h>
+#include <uapi/linux/sock_diag.h>
+
+struct sk_buff;
+struct nlmsghdr;
+struct sock;
+
+struct sock_diag_handler {
+ __u8 family;
+ int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
+};
+
+int sock_diag_register(const struct sock_diag_handler *h);
+void sock_diag_unregister(const struct sock_diag_handler *h);
+
+void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
+void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
+
+int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie);
+void sock_diag_save_cookie(struct sock *sk, __u32 *cookie);
+
+int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
+int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
+ struct sk_buff *skb, int attrtype);
+
+#endif
diff --git a/include/linux/socket.h b/include/linux/socket.h
new file mode 100644
index 000000000..5bf59c849
--- /dev/null
+++ b/include/linux/socket.h
@@ -0,0 +1,341 @@
+#ifndef _LINUX_SOCKET_H
+#define _LINUX_SOCKET_H
+
+
+#include <asm/socket.h> /* arch-dependent defines */
+#include <linux/sockios.h> /* the SIOCxxx I/O controls */
+#include <linux/uio.h> /* iovec support */
+#include <linux/types.h> /* pid_t */
+#include <linux/compiler.h> /* __user */
+#include <uapi/linux/socket.h>
+
+struct pid;
+struct cred;
+
+#define __sockaddr_check_size(size) \
+ BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage)))
+
+#ifdef CONFIG_PROC_FS
+struct seq_file;
+extern void socket_seq_show(struct seq_file *seq);
+#endif
+
+typedef __kernel_sa_family_t sa_family_t;
+
+/*
+ * 1003.1g requires sa_family_t and that sa_data is char.
+ */
+
+struct sockaddr {
+ sa_family_t sa_family; /* address family, AF_xxx */
+ char sa_data[14]; /* 14 bytes of protocol address */
+};
+
+struct linger {
+ int l_onoff; /* Linger active */
+ int l_linger; /* How long to linger for */
+};
+
+#define sockaddr_storage __kernel_sockaddr_storage
+
+/*
+ * As we do 4.4BSD message passing we use a 4.4BSD message passing
+ * system, not 4.3. Thus msg_accrights(len) are now missing. They
+ * belong in an obscure libc emulation or the bin.
+ */
+
+struct msghdr {
+ void *msg_name; /* ptr to socket address structure */
+ int msg_namelen; /* size of socket address structure */
+ struct iov_iter msg_iter; /* data */
+ void *msg_control; /* ancillary data */
+ __kernel_size_t msg_controllen; /* ancillary data buffer length */
+ unsigned int msg_flags; /* flags on received message */
+ struct kiocb *msg_iocb; /* ptr to iocb for async requests */
+};
+
+struct user_msghdr {
+ void __user *msg_name; /* ptr to socket address structure */
+ int msg_namelen; /* size of socket address structure */
+ struct iovec __user *msg_iov; /* scatter/gather array */
+ __kernel_size_t msg_iovlen; /* # elements in msg_iov */
+ void __user *msg_control; /* ancillary data */
+ __kernel_size_t msg_controllen; /* ancillary data buffer length */
+ unsigned int msg_flags; /* flags on received message */
+};
+
+/* For recvmmsg/sendmmsg */
+struct mmsghdr {
+ struct user_msghdr msg_hdr;
+ unsigned int msg_len;
+};
+
+/*
+ * POSIX 1003.1g - ancillary data object information
+ * Ancillary data consits of a sequence of pairs of
+ * (cmsghdr, cmsg_data[])
+ */
+
+struct cmsghdr {
+ __kernel_size_t cmsg_len; /* data byte count, including hdr */
+ int cmsg_level; /* originating protocol */
+ int cmsg_type; /* protocol-specific type */
+};
+
+/*
+ * Ancillary data object information MACROS
+ * Table 5-14 of POSIX 1003.1g
+ */
+
+#define __CMSG_NXTHDR(ctl, len, cmsg) __cmsg_nxthdr((ctl),(len),(cmsg))
+#define CMSG_NXTHDR(mhdr, cmsg) cmsg_nxthdr((mhdr), (cmsg))
+
+#define CMSG_ALIGN(len) ( ((len)+sizeof(long)-1) & ~(sizeof(long)-1) )
+
+#define CMSG_DATA(cmsg) ((void *)((char *)(cmsg) + CMSG_ALIGN(sizeof(struct cmsghdr))))
+#define CMSG_SPACE(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + CMSG_ALIGN(len))
+#define CMSG_LEN(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + (len))
+
+#define __CMSG_FIRSTHDR(ctl,len) ((len) >= sizeof(struct cmsghdr) ? \
+ (struct cmsghdr *)(ctl) : \
+ (struct cmsghdr *)NULL)
+#define CMSG_FIRSTHDR(msg) __CMSG_FIRSTHDR((msg)->msg_control, (msg)->msg_controllen)
+#define CMSG_OK(mhdr, cmsg) ((cmsg)->cmsg_len >= sizeof(struct cmsghdr) && \
+ (cmsg)->cmsg_len <= (unsigned long) \
+ ((mhdr)->msg_controllen - \
+ ((char *)(cmsg) - (char *)(mhdr)->msg_control)))
+#define for_each_cmsghdr(cmsg, msg) \
+ for (cmsg = CMSG_FIRSTHDR(msg); \
+ cmsg; \
+ cmsg = CMSG_NXTHDR(msg, cmsg))
+
+/*
+ * Get the next cmsg header
+ *
+ * PLEASE, do not touch this function. If you think, that it is
+ * incorrect, grep kernel sources and think about consequences
+ * before trying to improve it.
+ *
+ * Now it always returns valid, not truncated ancillary object
+ * HEADER. But caller still MUST check, that cmsg->cmsg_len is
+ * inside range, given by msg->msg_controllen before using
+ * ancillary object DATA. --ANK (980731)
+ */
+
+static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
+ struct cmsghdr *__cmsg)
+{
+ struct cmsghdr * __ptr;
+
+ __ptr = (struct cmsghdr*)(((unsigned char *) __cmsg) + CMSG_ALIGN(__cmsg->cmsg_len));
+ if ((unsigned long)((char*)(__ptr+1) - (char *) __ctl) > __size)
+ return (struct cmsghdr *)0;
+
+ return __ptr;
+}
+
+static inline struct cmsghdr * cmsg_nxthdr (struct msghdr *__msg, struct cmsghdr *__cmsg)
+{
+ return __cmsg_nxthdr(__msg->msg_control, __msg->msg_controllen, __cmsg);
+}
+
+static inline size_t msg_data_left(struct msghdr *msg)
+{
+ return iov_iter_count(&msg->msg_iter);
+}
+
+/* "Socket"-level control message types: */
+
+#define SCM_RIGHTS 0x01 /* rw: access rights (array of int) */
+#define SCM_CREDENTIALS 0x02 /* rw: struct ucred */
+#define SCM_SECURITY 0x03 /* rw: security label */
+
+struct ucred {
+ __u32 pid;
+ __u32 uid;
+ __u32 gid;
+};
+
+/* Supported address families. */
+#define AF_UNSPEC 0
+#define AF_UNIX 1 /* Unix domain sockets */
+#define AF_LOCAL 1 /* POSIX name for AF_UNIX */
+#define AF_INET 2 /* Internet IP Protocol */
+#define AF_AX25 3 /* Amateur Radio AX.25 */
+#define AF_IPX 4 /* Novell IPX */
+#define AF_APPLETALK 5 /* AppleTalk DDP */
+#define AF_NETROM 6 /* Amateur Radio NET/ROM */
+#define AF_BRIDGE 7 /* Multiprotocol bridge */
+#define AF_ATMPVC 8 /* ATM PVCs */
+#define AF_X25 9 /* Reserved for X.25 project */
+#define AF_INET6 10 /* IP version 6 */
+#define AF_ROSE 11 /* Amateur Radio X.25 PLP */
+#define AF_DECnet 12 /* Reserved for DECnet project */
+#define AF_NETBEUI 13 /* Reserved for 802.2LLC project*/
+#define AF_SECURITY 14 /* Security callback pseudo AF */
+#define AF_KEY 15 /* PF_KEY key management API */
+#define AF_NETLINK 16
+#define AF_ROUTE AF_NETLINK /* Alias to emulate 4.4BSD */
+#define AF_PACKET 17 /* Packet family */
+#define AF_ASH 18 /* Ash */
+#define AF_ECONET 19 /* Acorn Econet */
+#define AF_ATMSVC 20 /* ATM SVCs */
+#define AF_RDS 21 /* RDS sockets */
+#define AF_SNA 22 /* Linux SNA Project (nutters!) */
+#define AF_IRDA 23 /* IRDA sockets */
+#define AF_PPPOX 24 /* PPPoX sockets */
+#define AF_WANPIPE 25 /* Wanpipe API Sockets */
+#define AF_LLC 26 /* Linux LLC */
+#define AF_IB 27 /* Native InfiniBand address */
+#define AF_MPLS 28 /* MPLS */
+#define AF_CAN 29 /* Controller Area Network */
+#define AF_TIPC 30 /* TIPC sockets */
+#define AF_BLUETOOTH 31 /* Bluetooth sockets */
+#define AF_IUCV 32 /* IUCV sockets */
+#define AF_RXRPC 33 /* RxRPC sockets */
+#define AF_ISDN 34 /* mISDN sockets */
+#define AF_PHONET 35 /* Phonet sockets */
+#define AF_IEEE802154 36 /* IEEE802154 sockets */
+#define AF_CAIF 37 /* CAIF sockets */
+#define AF_ALG 38 /* Algorithm sockets */
+#define AF_NFC 39 /* NFC sockets */
+#define AF_VSOCK 40 /* vSockets */
+#define AF_MAX 41 /* For now.. */
+
+/* Protocol families, same as address families. */
+#define PF_UNSPEC AF_UNSPEC
+#define PF_UNIX AF_UNIX
+#define PF_LOCAL AF_LOCAL
+#define PF_INET AF_INET
+#define PF_AX25 AF_AX25
+#define PF_IPX AF_IPX
+#define PF_APPLETALK AF_APPLETALK
+#define PF_NETROM AF_NETROM
+#define PF_BRIDGE AF_BRIDGE
+#define PF_ATMPVC AF_ATMPVC
+#define PF_X25 AF_X25
+#define PF_INET6 AF_INET6
+#define PF_ROSE AF_ROSE
+#define PF_DECnet AF_DECnet
+#define PF_NETBEUI AF_NETBEUI
+#define PF_SECURITY AF_SECURITY
+#define PF_KEY AF_KEY
+#define PF_NETLINK AF_NETLINK
+#define PF_ROUTE AF_ROUTE
+#define PF_PACKET AF_PACKET
+#define PF_ASH AF_ASH
+#define PF_ECONET AF_ECONET
+#define PF_ATMSVC AF_ATMSVC
+#define PF_RDS AF_RDS
+#define PF_SNA AF_SNA
+#define PF_IRDA AF_IRDA
+#define PF_PPPOX AF_PPPOX
+#define PF_WANPIPE AF_WANPIPE
+#define PF_LLC AF_LLC
+#define PF_IB AF_IB
+#define PF_MPLS AF_MPLS
+#define PF_CAN AF_CAN
+#define PF_TIPC AF_TIPC
+#define PF_BLUETOOTH AF_BLUETOOTH
+#define PF_IUCV AF_IUCV
+#define PF_RXRPC AF_RXRPC
+#define PF_ISDN AF_ISDN
+#define PF_PHONET AF_PHONET
+#define PF_IEEE802154 AF_IEEE802154
+#define PF_CAIF AF_CAIF
+#define PF_ALG AF_ALG
+#define PF_NFC AF_NFC
+#define PF_VSOCK AF_VSOCK
+#define PF_MAX AF_MAX
+
+/* Maximum queue length specifiable by listen. */
+#define SOMAXCONN 128
+
+/* Flags we can use with send/ and recv.
+ Added those for 1003.1g not all are supported yet
+ */
+
+#define MSG_OOB 1
+#define MSG_PEEK 2
+#define MSG_DONTROUTE 4
+#define MSG_TRYHARD 4 /* Synonym for MSG_DONTROUTE for DECnet */
+#define MSG_CTRUNC 8
+#define MSG_PROBE 0x10 /* Do not send. Only probe path f.e. for MTU */
+#define MSG_TRUNC 0x20
+#define MSG_DONTWAIT 0x40 /* Nonblocking io */
+#define MSG_EOR 0x80 /* End of record */
+#define MSG_WAITALL 0x100 /* Wait for a full request */
+#define MSG_FIN 0x200
+#define MSG_SYN 0x400
+#define MSG_CONFIRM 0x800 /* Confirm path validity */
+#define MSG_RST 0x1000
+#define MSG_ERRQUEUE 0x2000 /* Fetch message from error queue */
+#define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */
+#define MSG_MORE 0x8000 /* Sender will send more */
+#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
+#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
+#define MSG_EOF MSG_FIN
+
+#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
+#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file
+ descriptor received through
+ SCM_RIGHTS */
+#if defined(CONFIG_COMPAT)
+#define MSG_CMSG_COMPAT 0x80000000 /* This message needs 32 bit fixups */
+#else
+#define MSG_CMSG_COMPAT 0 /* We never have 32 bit fixups */
+#endif
+
+
+/* Setsockoptions(2) level. Thanks to BSD these must match IPPROTO_xxx */
+#define SOL_IP 0
+/* #define SOL_ICMP 1 No-no-no! Due to Linux :-) we cannot use SOL_ICMP=1 */
+#define SOL_TCP 6
+#define SOL_UDP 17
+#define SOL_IPV6 41
+#define SOL_ICMPV6 58
+#define SOL_SCTP 132
+#define SOL_UDPLITE 136 /* UDP-Lite (RFC 3828) */
+#define SOL_RAW 255
+#define SOL_IPX 256
+#define SOL_AX25 257
+#define SOL_ATALK 258
+#define SOL_NETROM 259
+#define SOL_ROSE 260
+#define SOL_DECNET 261
+#define SOL_X25 262
+#define SOL_PACKET 263
+#define SOL_ATM 264 /* ATM layer (cell level) */
+#define SOL_AAL 265 /* ATM Adaption Layer (packet level) */
+#define SOL_IRDA 266
+#define SOL_NETBEUI 267
+#define SOL_LLC 268
+#define SOL_DCCP 269
+#define SOL_NETLINK 270
+#define SOL_TIPC 271
+#define SOL_RXRPC 272
+#define SOL_PPPOL2TP 273
+#define SOL_BLUETOOTH 274
+#define SOL_PNPIPE 275
+#define SOL_RDS 276
+#define SOL_IUCV 277
+#define SOL_CAIF 278
+#define SOL_ALG 279
+#define SOL_NFC 280
+
+/* IPX options */
+#define IPX_TYPE 1
+
+extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
+extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
+
+struct timespec;
+
+/* The __sys_...msg variants allow MSG_CMSG_COMPAT */
+extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
+extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
+extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
+ unsigned int flags, struct timespec *timeout);
+extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
+ unsigned int vlen, unsigned int flags);
+#endif /* _LINUX_SOCKET_H */
diff --git a/include/linux/sonet.h b/include/linux/sonet.h
new file mode 100644
index 000000000..680f9a31d
--- /dev/null
+++ b/include/linux/sonet.h
@@ -0,0 +1,19 @@
+/* sonet.h - SONET/SHD physical layer control */
+#ifndef LINUX_SONET_H
+#define LINUX_SONET_H
+
+
+#include <linux/atomic.h>
+#include <uapi/linux/sonet.h>
+
+struct k_sonet_stats {
+#define __HANDLE_ITEM(i) atomic_t i
+ __SONET_ITEMS
+#undef __HANDLE_ITEM
+};
+
+extern void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to);
+extern void sonet_subtract_stats(struct k_sonet_stats *from,
+ struct sonet_stats *to);
+
+#endif
diff --git a/include/linux/sony-laptop.h b/include/linux/sony-laptop.h
new file mode 100644
index 000000000..e2e036d94
--- /dev/null
+++ b/include/linux/sony-laptop.h
@@ -0,0 +1,34 @@
+#ifndef _SONYLAPTOP_H_
+#define _SONYLAPTOP_H_
+
+#include <linux/types.h>
+
+#ifdef __KERNEL__
+
+/* used only for communication between v4l and sony-laptop */
+
+#define SONY_PIC_COMMAND_GETCAMERA 1 /* obsolete */
+#define SONY_PIC_COMMAND_SETCAMERA 2
+#define SONY_PIC_COMMAND_GETCAMERABRIGHTNESS 3 /* obsolete */
+#define SONY_PIC_COMMAND_SETCAMERABRIGHTNESS 4
+#define SONY_PIC_COMMAND_GETCAMERACONTRAST 5 /* obsolete */
+#define SONY_PIC_COMMAND_SETCAMERACONTRAST 6
+#define SONY_PIC_COMMAND_GETCAMERAHUE 7 /* obsolete */
+#define SONY_PIC_COMMAND_SETCAMERAHUE 8
+#define SONY_PIC_COMMAND_GETCAMERACOLOR 9 /* obsolete */
+#define SONY_PIC_COMMAND_SETCAMERACOLOR 10
+#define SONY_PIC_COMMAND_GETCAMERASHARPNESS 11 /* obsolete */
+#define SONY_PIC_COMMAND_SETCAMERASHARPNESS 12
+#define SONY_PIC_COMMAND_GETCAMERAPICTURE 13 /* obsolete */
+#define SONY_PIC_COMMAND_SETCAMERAPICTURE 14
+#define SONY_PIC_COMMAND_GETCAMERAAGC 15 /* obsolete */
+#define SONY_PIC_COMMAND_SETCAMERAAGC 16
+#define SONY_PIC_COMMAND_GETCAMERADIRECTION 17 /* obsolete */
+#define SONY_PIC_COMMAND_GETCAMERAROMVERSION 18 /* obsolete */
+#define SONY_PIC_COMMAND_GETCAMERAREVISION 19 /* obsolete */
+
+int sony_pic_camera_command(int command, u8 value);
+
+#endif /* __KERNEL__ */
+
+#endif /* _SONYLAPTOP_H_ */
diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h
new file mode 100644
index 000000000..0b7cc265c
--- /dev/null
+++ b/include/linux/sonypi.h
@@ -0,0 +1,63 @@
+/*
+ * Sony Programmable I/O Control Device driver for VAIO
+ *
+ * Copyright (C) 2001-2005 Stelian Pop <stelian@popies.net>
+ *
+ * Copyright (C) 2005 Narayanan R S <nars@kadamba.org>
+
+ * Copyright (C) 2001-2002 Alcôve <www.alcove.com>
+ *
+ * Copyright (C) 2001 Michael Ashley <m.ashley@unsw.edu.au>
+ *
+ * Copyright (C) 2001 Junichi Morita <jun1m@mars.dti.ne.jp>
+ *
+ * Copyright (C) 2000 Takaya Kinjo <t-kinjo@tc4.so-net.ne.jp>
+ *
+ * Copyright (C) 2000 Andrew Tridgell <tridge@valinux.com>
+ *
+ * Earlier work by Werner Almesberger, Paul `Rusty' Russell and Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+#ifndef _SONYPI_H_
+#define _SONYPI_H_
+
+#include <uapi/linux/sonypi.h>
+
+
+/* used only for communication between v4l and sonypi */
+
+#define SONYPI_COMMAND_GETCAMERA 1 /* obsolete */
+#define SONYPI_COMMAND_SETCAMERA 2
+#define SONYPI_COMMAND_GETCAMERABRIGHTNESS 3 /* obsolete */
+#define SONYPI_COMMAND_SETCAMERABRIGHTNESS 4
+#define SONYPI_COMMAND_GETCAMERACONTRAST 5 /* obsolete */
+#define SONYPI_COMMAND_SETCAMERACONTRAST 6
+#define SONYPI_COMMAND_GETCAMERAHUE 7 /* obsolete */
+#define SONYPI_COMMAND_SETCAMERAHUE 8
+#define SONYPI_COMMAND_GETCAMERACOLOR 9 /* obsolete */
+#define SONYPI_COMMAND_SETCAMERACOLOR 10
+#define SONYPI_COMMAND_GETCAMERASHARPNESS 11 /* obsolete */
+#define SONYPI_COMMAND_SETCAMERASHARPNESS 12
+#define SONYPI_COMMAND_GETCAMERAPICTURE 13 /* obsolete */
+#define SONYPI_COMMAND_SETCAMERAPICTURE 14
+#define SONYPI_COMMAND_GETCAMERAAGC 15 /* obsolete */
+#define SONYPI_COMMAND_SETCAMERAAGC 16
+#define SONYPI_COMMAND_GETCAMERADIRECTION 17 /* obsolete */
+#define SONYPI_COMMAND_GETCAMERAROMVERSION 18 /* obsolete */
+#define SONYPI_COMMAND_GETCAMERAREVISION 19 /* obsolete */
+
+#endif /* _SONYPI_H_ */
diff --git a/include/linux/sort.h b/include/linux/sort.h
new file mode 100644
index 000000000..d534da2b5
--- /dev/null
+++ b/include/linux/sort.h
@@ -0,0 +1,10 @@
+#ifndef _LINUX_SORT_H
+#define _LINUX_SORT_H
+
+#include <linux/types.h>
+
+void sort(void *base, size_t num, size_t size,
+ int (*cmp)(const void *, const void *),
+ void (*swap)(void *, void *, int));
+
+#endif
diff --git a/include/linux/sound.h b/include/linux/sound.h
new file mode 100644
index 000000000..73ded040f
--- /dev/null
+++ b/include/linux/sound.h
@@ -0,0 +1,21 @@
+#ifndef _LINUX_SOUND_H
+#define _LINUX_SOUND_H
+
+#include <uapi/linux/sound.h>
+
+/*
+ * Sound core interface functions
+ */
+
+struct device;
+extern int register_sound_special(const struct file_operations *fops, int unit);
+extern int register_sound_special_device(const struct file_operations *fops, int unit, struct device *dev);
+extern int register_sound_mixer(const struct file_operations *fops, int dev);
+extern int register_sound_midi(const struct file_operations *fops, int dev);
+extern int register_sound_dsp(const struct file_operations *fops, int dev);
+
+extern void unregister_sound_special(int unit);
+extern void unregister_sound_mixer(int unit);
+extern void unregister_sound_midi(int unit);
+extern void unregister_sound_dsp(int unit);
+#endif /* _LINUX_SOUND_H */
diff --git a/include/linux/soundcard.h b/include/linux/soundcard.h
new file mode 100644
index 000000000..96c79cbd7
--- /dev/null
+++ b/include/linux/soundcard.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright by Hannu Savolainen 1993-1997
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer. 2.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#ifndef SOUNDCARD_H
+#define SOUNDCARD_H
+
+# include <asm/byteorder.h>
+#include <uapi/linux/soundcard.h>
+
+# if defined(__BIG_ENDIAN)
+# define AFMT_S16_NE AFMT_S16_BE
+# elif defined(__LITTLE_ENDIAN)
+# define AFMT_S16_NE AFMT_S16_LE
+# else
+# error "could not determine byte order"
+# endif
+#endif
diff --git a/include/linux/spi/ad7877.h b/include/linux/spi/ad7877.h
new file mode 100644
index 000000000..cdbed816f
--- /dev/null
+++ b/include/linux/spi/ad7877.h
@@ -0,0 +1,24 @@
+/* linux/spi/ad7877.h */
+
+/* Touchscreen characteristics vary between boards and models. The
+ * platform_data for the device's "struct device" holds this information.
+ *
+ * It's OK if the min/max values are zero.
+ */
+struct ad7877_platform_data {
+ u16 model; /* 7877 */
+ u16 vref_delay_usecs; /* 0 for external vref; etc */
+ u16 x_plate_ohms;
+ u16 y_plate_ohms;
+
+ u16 x_min, x_max;
+ u16 y_min, y_max;
+ u16 pressure_min, pressure_max;
+
+ u8 stopacq_polarity; /* 1 = Active HIGH, 0 = Active LOW */
+ u8 first_conversion_delay; /* 0 = 0.5us, 1 = 128us, 2 = 1ms, 3 = 8ms */
+ u8 acquisition_time; /* 0 = 2us, 1 = 4us, 2 = 8us, 3 = 16us */
+ u8 averaging; /* 0 = 1, 1 = 4, 2 = 8, 3 = 16 */
+ u8 pen_down_acc_interval; /* 0 = covert once, 1 = every 0.5 ms,
+ 2 = ever 1 ms, 3 = every 8 ms,*/
+};
diff --git a/include/linux/spi/ad7879.h b/include/linux/spi/ad7879.h
new file mode 100644
index 000000000..58368be0b
--- /dev/null
+++ b/include/linux/spi/ad7879.h
@@ -0,0 +1,41 @@
+/* linux/spi/ad7879.h */
+
+/* Touchscreen characteristics vary between boards and models. The
+ * platform_data for the device's "struct device" holds this information.
+ *
+ * It's OK if the min/max values are zero.
+ */
+struct ad7879_platform_data {
+ u16 model; /* 7879 */
+ u16 x_plate_ohms;
+ u16 x_min, x_max;
+ u16 y_min, y_max;
+ u16 pressure_min, pressure_max;
+
+ bool swap_xy; /* swap x and y axes */
+
+ /* [0..255] 0=OFF Starts at 1=550us and goes
+ * all the way to 9.440ms in steps of 35us.
+ */
+ u8 pen_down_acc_interval;
+ /* [0..15] Starts at 0=128us and goes all the
+ * way to 4.096ms in steps of 128us.
+ */
+ u8 first_conversion_delay;
+ /* [0..3] 0 = 2us, 1 = 4us, 2 = 8us, 3 = 16us */
+ u8 acquisition_time;
+ /* [0..3] Average X middle samples 0 = 2, 1 = 4, 2 = 8, 3 = 16 */
+ u8 averaging;
+ /* [0..3] Perform X measurements 0 = OFF,
+ * 1 = 4, 2 = 8, 3 = 16 (median > averaging)
+ */
+ u8 median;
+ /* 1 = AUX/VBAT/GPIO export GPIO to gpiolib
+ * requires CONFIG_GPIOLIB
+ */
+ bool gpio_export;
+ /* identifies the first GPIO number handled by this chip;
+ * or, if negative, requests dynamic ID allocation.
+ */
+ s32 gpio_base;
+};
diff --git a/include/linux/spi/adi_spi3.h b/include/linux/spi/adi_spi3.h
new file mode 100644
index 000000000..c84123aa1
--- /dev/null
+++ b/include/linux/spi/adi_spi3.h
@@ -0,0 +1,254 @@
+/*
+ * Analog Devices SPI3 controller driver
+ *
+ * Copyright (c) 2014 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ADI_SPI3_H_
+#define _ADI_SPI3_H_
+
+#include <linux/types.h>
+
+/* SPI_CONTROL */
+#define SPI_CTL_EN 0x00000001 /* Enable */
+#define SPI_CTL_MSTR 0x00000002 /* Master/Slave */
+#define SPI_CTL_PSSE 0x00000004 /* controls modf error in master mode */
+#define SPI_CTL_ODM 0x00000008 /* Open Drain Mode */
+#define SPI_CTL_CPHA 0x00000010 /* Clock Phase */
+#define SPI_CTL_CPOL 0x00000020 /* Clock Polarity */
+#define SPI_CTL_ASSEL 0x00000040 /* Slave Select Pin Control */
+#define SPI_CTL_SELST 0x00000080 /* Slave Select Polarity in-between transfers */
+#define SPI_CTL_EMISO 0x00000100 /* Enable MISO */
+#define SPI_CTL_SIZE 0x00000600 /* Word Transfer Size */
+#define SPI_CTL_SIZE08 0x00000000 /* SIZE: 8 bits */
+#define SPI_CTL_SIZE16 0x00000200 /* SIZE: 16 bits */
+#define SPI_CTL_SIZE32 0x00000400 /* SIZE: 32 bits */
+#define SPI_CTL_LSBF 0x00001000 /* LSB First */
+#define SPI_CTL_FCEN 0x00002000 /* Flow-Control Enable */
+#define SPI_CTL_FCCH 0x00004000 /* Flow-Control Channel Selection */
+#define SPI_CTL_FCPL 0x00008000 /* Flow-Control Polarity */
+#define SPI_CTL_FCWM 0x00030000 /* Flow-Control Water-Mark */
+#define SPI_CTL_FIFO0 0x00000000 /* FCWM: TFIFO empty or RFIFO Full */
+#define SPI_CTL_FIFO1 0x00010000 /* FCWM: TFIFO 75% or more empty or RFIFO 75% or more full */
+#define SPI_CTL_FIFO2 0x00020000 /* FCWM: TFIFO 50% or more empty or RFIFO 50% or more full */
+#define SPI_CTL_FMODE 0x00040000 /* Fast-mode Enable */
+#define SPI_CTL_MIOM 0x00300000 /* Multiple I/O Mode */
+#define SPI_CTL_MIO_DIS 0x00000000 /* MIOM: Disable */
+#define SPI_CTL_MIO_DUAL 0x00100000 /* MIOM: Enable DIOM (Dual I/O Mode) */
+#define SPI_CTL_MIO_QUAD 0x00200000 /* MIOM: Enable QUAD (Quad SPI Mode) */
+#define SPI_CTL_SOSI 0x00400000 /* Start on MOSI */
+/* SPI_RX_CONTROL */
+#define SPI_RXCTL_REN 0x00000001 /* Receive Channel Enable */
+#define SPI_RXCTL_RTI 0x00000004 /* Receive Transfer Initiate */
+#define SPI_RXCTL_RWCEN 0x00000008 /* Receive Word Counter Enable */
+#define SPI_RXCTL_RDR 0x00000070 /* Receive Data Request */
+#define SPI_RXCTL_RDR_DIS 0x00000000 /* RDR: Disabled */
+#define SPI_RXCTL_RDR_NE 0x00000010 /* RDR: RFIFO not empty */
+#define SPI_RXCTL_RDR_25 0x00000020 /* RDR: RFIFO 25% full */
+#define SPI_RXCTL_RDR_50 0x00000030 /* RDR: RFIFO 50% full */
+#define SPI_RXCTL_RDR_75 0x00000040 /* RDR: RFIFO 75% full */
+#define SPI_RXCTL_RDR_FULL 0x00000050 /* RDR: RFIFO full */
+#define SPI_RXCTL_RDO 0x00000100 /* Receive Data Over-Run */
+#define SPI_RXCTL_RRWM 0x00003000 /* FIFO Regular Water-Mark */
+#define SPI_RXCTL_RWM_0 0x00000000 /* RRWM: RFIFO Empty */
+#define SPI_RXCTL_RWM_25 0x00001000 /* RRWM: RFIFO 25% full */
+#define SPI_RXCTL_RWM_50 0x00002000 /* RRWM: RFIFO 50% full */
+#define SPI_RXCTL_RWM_75 0x00003000 /* RRWM: RFIFO 75% full */
+#define SPI_RXCTL_RUWM 0x00070000 /* FIFO Urgent Water-Mark */
+#define SPI_RXCTL_UWM_DIS 0x00000000 /* RUWM: Disabled */
+#define SPI_RXCTL_UWM_25 0x00010000 /* RUWM: RFIFO 25% full */
+#define SPI_RXCTL_UWM_50 0x00020000 /* RUWM: RFIFO 50% full */
+#define SPI_RXCTL_UWM_75 0x00030000 /* RUWM: RFIFO 75% full */
+#define SPI_RXCTL_UWM_FULL 0x00040000 /* RUWM: RFIFO full */
+/* SPI_TX_CONTROL */
+#define SPI_TXCTL_TEN 0x00000001 /* Transmit Channel Enable */
+#define SPI_TXCTL_TTI 0x00000004 /* Transmit Transfer Initiate */
+#define SPI_TXCTL_TWCEN 0x00000008 /* Transmit Word Counter Enable */
+#define SPI_TXCTL_TDR 0x00000070 /* Transmit Data Request */
+#define SPI_TXCTL_TDR_DIS 0x00000000 /* TDR: Disabled */
+#define SPI_TXCTL_TDR_NF 0x00000010 /* TDR: TFIFO not full */
+#define SPI_TXCTL_TDR_25 0x00000020 /* TDR: TFIFO 25% empty */
+#define SPI_TXCTL_TDR_50 0x00000030 /* TDR: TFIFO 50% empty */
+#define SPI_TXCTL_TDR_75 0x00000040 /* TDR: TFIFO 75% empty */
+#define SPI_TXCTL_TDR_EMPTY 0x00000050 /* TDR: TFIFO empty */
+#define SPI_TXCTL_TDU 0x00000100 /* Transmit Data Under-Run */
+#define SPI_TXCTL_TRWM 0x00003000 /* FIFO Regular Water-Mark */
+#define SPI_TXCTL_RWM_FULL 0x00000000 /* TRWM: TFIFO full */
+#define SPI_TXCTL_RWM_25 0x00001000 /* TRWM: TFIFO 25% empty */
+#define SPI_TXCTL_RWM_50 0x00002000 /* TRWM: TFIFO 50% empty */
+#define SPI_TXCTL_RWM_75 0x00003000 /* TRWM: TFIFO 75% empty */
+#define SPI_TXCTL_TUWM 0x00070000 /* FIFO Urgent Water-Mark */
+#define SPI_TXCTL_UWM_DIS 0x00000000 /* TUWM: Disabled */
+#define SPI_TXCTL_UWM_25 0x00010000 /* TUWM: TFIFO 25% empty */
+#define SPI_TXCTL_UWM_50 0x00020000 /* TUWM: TFIFO 50% empty */
+#define SPI_TXCTL_UWM_75 0x00030000 /* TUWM: TFIFO 75% empty */
+#define SPI_TXCTL_UWM_EMPTY 0x00040000 /* TUWM: TFIFO empty */
+/* SPI_CLOCK */
+#define SPI_CLK_BAUD 0x0000FFFF /* Baud Rate */
+/* SPI_DELAY */
+#define SPI_DLY_STOP 0x000000FF /* Transfer delay time in multiples of SCK period */
+#define SPI_DLY_LEADX 0x00000100 /* Extended (1 SCK) LEAD Control */
+#define SPI_DLY_LAGX 0x00000200 /* Extended (1 SCK) LAG control */
+/* SPI_SSEL */
+#define SPI_SLVSEL_SSE1 0x00000002 /* SPISSEL1 Enable */
+#define SPI_SLVSEL_SSE2 0x00000004 /* SPISSEL2 Enable */
+#define SPI_SLVSEL_SSE3 0x00000008 /* SPISSEL3 Enable */
+#define SPI_SLVSEL_SSE4 0x00000010 /* SPISSEL4 Enable */
+#define SPI_SLVSEL_SSE5 0x00000020 /* SPISSEL5 Enable */
+#define SPI_SLVSEL_SSE6 0x00000040 /* SPISSEL6 Enable */
+#define SPI_SLVSEL_SSE7 0x00000080 /* SPISSEL7 Enable */
+#define SPI_SLVSEL_SSEL1 0x00000200 /* SPISSEL1 Value */
+#define SPI_SLVSEL_SSEL2 0x00000400 /* SPISSEL2 Value */
+#define SPI_SLVSEL_SSEL3 0x00000800 /* SPISSEL3 Value */
+#define SPI_SLVSEL_SSEL4 0x00001000 /* SPISSEL4 Value */
+#define SPI_SLVSEL_SSEL5 0x00002000 /* SPISSEL5 Value */
+#define SPI_SLVSEL_SSEL6 0x00004000 /* SPISSEL6 Value */
+#define SPI_SLVSEL_SSEL7 0x00008000 /* SPISSEL7 Value */
+/* SPI_RWC */
+#define SPI_RWC_VALUE 0x0000FFFF /* Received Word-Count */
+/* SPI_RWCR */
+#define SPI_RWCR_VALUE 0x0000FFFF /* Received Word-Count Reload */
+/* SPI_TWC */
+#define SPI_TWC_VALUE 0x0000FFFF /* Transmitted Word-Count */
+/* SPI_TWCR */
+#define SPI_TWCR_VALUE 0x0000FFFF /* Transmitted Word-Count Reload */
+/* SPI_IMASK */
+#define SPI_IMSK_RUWM 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */
+#define SPI_IMSK_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */
+#define SPI_IMSK_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */
+#define SPI_IMSK_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */
+#define SPI_IMSK_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */
+#define SPI_IMSK_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */
+#define SPI_IMSK_RSM 0x00000100 /* Receive Start Interrupt Mask */
+#define SPI_IMSK_TSM 0x00000200 /* Transmit Start Interrupt Mask */
+#define SPI_IMSK_RFM 0x00000400 /* Receive Finish Interrupt Mask */
+#define SPI_IMSK_TFM 0x00000800 /* Transmit Finish Interrupt Mask */
+/* SPI_IMASKCL */
+#define SPI_IMSK_CLR_RUW 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */
+#define SPI_IMSK_CLR_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */
+#define SPI_IMSK_CLR_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */
+#define SPI_IMSK_CLR_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */
+#define SPI_IMSK_CLR_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */
+#define SPI_IMSK_CLR_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */
+#define SPI_IMSK_CLR_RSM 0x00000100 /* Receive Start Interrupt Mask */
+#define SPI_IMSK_CLR_TSM 0x00000200 /* Transmit Start Interrupt Mask */
+#define SPI_IMSK_CLR_RFM 0x00000400 /* Receive Finish Interrupt Mask */
+#define SPI_IMSK_CLR_TFM 0x00000800 /* Transmit Finish Interrupt Mask */
+/* SPI_IMASKST */
+#define SPI_IMSK_SET_RUWM 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */
+#define SPI_IMSK_SET_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */
+#define SPI_IMSK_SET_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */
+#define SPI_IMSK_SET_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */
+#define SPI_IMSK_SET_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */
+#define SPI_IMSK_SET_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */
+#define SPI_IMSK_SET_RSM 0x00000100 /* Receive Start Interrupt Mask */
+#define SPI_IMSK_SET_TSM 0x00000200 /* Transmit Start Interrupt Mask */
+#define SPI_IMSK_SET_RFM 0x00000400 /* Receive Finish Interrupt Mask */
+#define SPI_IMSK_SET_TFM 0x00000800 /* Transmit Finish Interrupt Mask */
+/* SPI_STATUS */
+#define SPI_STAT_SPIF 0x00000001 /* SPI Finished */
+#define SPI_STAT_RUWM 0x00000002 /* Receive Urgent Water-Mark Breached */
+#define SPI_STAT_TUWM 0x00000004 /* Transmit Urgent Water-Mark Breached */
+#define SPI_STAT_ROE 0x00000010 /* Receive Over-Run Error Indication */
+#define SPI_STAT_TUE 0x00000020 /* Transmit Under-Run Error Indication */
+#define SPI_STAT_TCE 0x00000040 /* Transmit Collision Error Indication */
+#define SPI_STAT_MODF 0x00000080 /* Mode Fault Error Indication */
+#define SPI_STAT_RS 0x00000100 /* Receive Start Indication */
+#define SPI_STAT_TS 0x00000200 /* Transmit Start Indication */
+#define SPI_STAT_RF 0x00000400 /* Receive Finish Indication */
+#define SPI_STAT_TF 0x00000800 /* Transmit Finish Indication */
+#define SPI_STAT_RFS 0x00007000 /* SPI_RFIFO status */
+#define SPI_STAT_RFIFO_EMPTY 0x00000000 /* RFS: RFIFO Empty */
+#define SPI_STAT_RFIFO_25 0x00001000 /* RFS: RFIFO 25% Full */
+#define SPI_STAT_RFIFO_50 0x00002000 /* RFS: RFIFO 50% Full */
+#define SPI_STAT_RFIFO_75 0x00003000 /* RFS: RFIFO 75% Full */
+#define SPI_STAT_RFIFO_FULL 0x00004000 /* RFS: RFIFO Full */
+#define SPI_STAT_TFS 0x00070000 /* SPI_TFIFO status */
+#define SPI_STAT_TFIFO_FULL 0x00000000 /* TFS: TFIFO full */
+#define SPI_STAT_TFIFO_25 0x00010000 /* TFS: TFIFO 25% empty */
+#define SPI_STAT_TFIFO_50 0x00020000 /* TFS: TFIFO 50% empty */
+#define SPI_STAT_TFIFO_75 0x00030000 /* TFS: TFIFO 75% empty */
+#define SPI_STAT_TFIFO_EMPTY 0x00040000 /* TFS: TFIFO empty */
+#define SPI_STAT_FCS 0x00100000 /* Flow-Control Stall Indication */
+#define SPI_STAT_RFE 0x00400000 /* SPI_RFIFO Empty */
+#define SPI_STAT_TFF 0x00800000 /* SPI_TFIFO Full */
+/* SPI_ILAT */
+#define SPI_ILAT_RUWMI 0x00000002 /* Receive Urgent Water Mark Interrupt */
+#define SPI_ILAT_TUWMI 0x00000004 /* Transmit Urgent Water Mark Interrupt */
+#define SPI_ILAT_ROI 0x00000010 /* Receive Over-Run Error Indication */
+#define SPI_ILAT_TUI 0x00000020 /* Transmit Under-Run Error Indication */
+#define SPI_ILAT_TCI 0x00000040 /* Transmit Collision Error Indication */
+#define SPI_ILAT_MFI 0x00000080 /* Mode Fault Error Indication */
+#define SPI_ILAT_RSI 0x00000100 /* Receive Start Indication */
+#define SPI_ILAT_TSI 0x00000200 /* Transmit Start Indication */
+#define SPI_ILAT_RFI 0x00000400 /* Receive Finish Indication */
+#define SPI_ILAT_TFI 0x00000800 /* Transmit Finish Indication */
+/* SPI_ILATCL */
+#define SPI_ILAT_CLR_RUWMI 0x00000002 /* Receive Urgent Water Mark Interrupt */
+#define SPI_ILAT_CLR_TUWMI 0x00000004 /* Transmit Urgent Water Mark Interrupt */
+#define SPI_ILAT_CLR_ROI 0x00000010 /* Receive Over-Run Error Indication */
+#define SPI_ILAT_CLR_TUI 0x00000020 /* Transmit Under-Run Error Indication */
+#define SPI_ILAT_CLR_TCI 0x00000040 /* Transmit Collision Error Indication */
+#define SPI_ILAT_CLR_MFI 0x00000080 /* Mode Fault Error Indication */
+#define SPI_ILAT_CLR_RSI 0x00000100 /* Receive Start Indication */
+#define SPI_ILAT_CLR_TSI 0x00000200 /* Transmit Start Indication */
+#define SPI_ILAT_CLR_RFI 0x00000400 /* Receive Finish Indication */
+#define SPI_ILAT_CLR_TFI 0x00000800 /* Transmit Finish Indication */
+
+/*
+ * adi spi3 registers layout
+ */
+struct adi_spi_regs {
+ u32 revid;
+ u32 control;
+ u32 rx_control;
+ u32 tx_control;
+ u32 clock;
+ u32 delay;
+ u32 ssel;
+ u32 rwc;
+ u32 rwcr;
+ u32 twc;
+ u32 twcr;
+ u32 reserved0;
+ u32 emask;
+ u32 emaskcl;
+ u32 emaskst;
+ u32 reserved1;
+ u32 status;
+ u32 elat;
+ u32 elatcl;
+ u32 reserved2;
+ u32 rfifo;
+ u32 reserved3;
+ u32 tfifo;
+};
+
+#define MAX_CTRL_CS 8 /* cs in spi controller */
+
+/* device.platform_data for SSP controller devices */
+struct adi_spi3_master {
+ u16 num_chipselect;
+ u16 pin_req[7];
+};
+
+/* spi_board_info.controller_data for SPI slave devices,
+ * copied to spi_device.platform_data ... mostly for dma tuning
+ */
+struct adi_spi3_chip {
+ u32 control;
+ u16 cs_chg_udelay; /* Some devices require 16-bit delays */
+ u32 tx_dummy_val; /* tx value for rx only transfer */
+ bool enable_dma;
+};
+
+#endif /* _ADI_SPI3_H_ */
diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h
new file mode 100644
index 000000000..2f694f384
--- /dev/null
+++ b/include/linux/spi/ads7846.h
@@ -0,0 +1,61 @@
+/* linux/spi/ads7846.h */
+
+/* Touchscreen characteristics vary between boards and models. The
+ * platform_data for the device's "struct device" holds this information.
+ *
+ * It's OK if the min/max values are zero.
+ */
+enum ads7846_filter {
+ ADS7846_FILTER_OK,
+ ADS7846_FILTER_REPEAT,
+ ADS7846_FILTER_IGNORE,
+};
+
+struct ads7846_platform_data {
+ u16 model; /* 7843, 7845, 7846, 7873. */
+ u16 vref_delay_usecs; /* 0 for external vref; etc */
+ u16 vref_mv; /* external vref value, milliVolts
+ * ads7846: if 0, use internal vref */
+ bool keep_vref_on; /* set to keep vref on for differential
+ * measurements as well */
+ bool swap_xy; /* swap x and y axes */
+
+ /* Settling time of the analog signals; a function of Vcc and the
+ * capacitance on the X/Y drivers. If set to non-zero, two samples
+ * are taken with settle_delay us apart, and the second one is used.
+ * ~150 uSec with 0.01uF caps.
+ */
+ u16 settle_delay_usecs;
+
+ /* If set to non-zero, after samples are taken this delay is applied
+ * and penirq is rechecked, to help avoid false events. This value
+ * is affected by the material used to build the touch layer.
+ */
+ u16 penirq_recheck_delay_usecs;
+
+ u16 x_plate_ohms;
+ u16 y_plate_ohms;
+
+ u16 x_min, x_max;
+ u16 y_min, y_max;
+ u16 pressure_min, pressure_max;
+
+ u16 debounce_max; /* max number of additional readings
+ * per sample */
+ u16 debounce_tol; /* tolerance used for filtering */
+ u16 debounce_rep; /* additional consecutive good readings
+ * required after the first two */
+ int gpio_pendown; /* the GPIO used to decide the pendown
+ * state if get_pendown_state == NULL */
+ int gpio_pendown_debounce; /* platform specific debounce time for
+ * the gpio_pendown */
+ int (*get_pendown_state)(void);
+ int (*filter_init) (const struct ads7846_platform_data *pdata,
+ void **filter_data);
+ int (*filter) (void *filter_data, int data_idx, int *val);
+ void (*filter_cleanup)(void *filter_data);
+ void (*wait_for_sync)(void);
+ bool wakeup;
+ unsigned long irq_flags;
+};
+
diff --git a/include/linux/spi/at73c213.h b/include/linux/spi/at73c213.h
new file mode 100644
index 000000000..0f20a70e5
--- /dev/null
+++ b/include/linux/spi/at73c213.h
@@ -0,0 +1,25 @@
+/*
+ * Board-specific data used to set up AT73c213 audio DAC driver.
+ */
+
+#ifndef __LINUX_SPI_AT73C213_H
+#define __LINUX_SPI_AT73C213_H
+
+/**
+ * at73c213_board_info - how the external DAC is wired to the device.
+ *
+ * @ssc_id: SSC platform_driver id the DAC shall use to stream the audio.
+ * @dac_clk: the external clock used to provide master clock to the DAC.
+ * @shortname: a short discription for the DAC, seen by userspace tools.
+ *
+ * This struct contains the configuration of the hardware connection to the
+ * external DAC. The DAC needs a master clock and a I2S audio stream. It also
+ * provides a name which is used to identify it in userspace tools.
+ */
+struct at73c213_board_info {
+ int ssc_id;
+ struct clk *dac_clk;
+ char shortname[32];
+};
+
+#endif /* __LINUX_SPI_AT73C213_H */
diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h
new file mode 100644
index 000000000..b63fe6f5f
--- /dev/null
+++ b/include/linux/spi/at86rf230.h
@@ -0,0 +1,28 @@
+/*
+ * AT86RF230/RF231 driver
+ *
+ * Copyright (C) 2009-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
+ */
+#ifndef AT86RF230_H
+#define AT86RF230_H
+
+struct at86rf230_platform_data {
+ int rstn;
+ int slp_tr;
+ int dig2;
+ u8 xtal_trim;
+};
+
+#endif
diff --git a/include/linux/spi/cc2520.h b/include/linux/spi/cc2520.h
new file mode 100644
index 000000000..e741e8baa
--- /dev/null
+++ b/include/linux/spi/cc2520.h
@@ -0,0 +1,27 @@
+/* Header file for cc2520 radio driver
+ *
+ * Copyright (C) 2014 Varka Bhadram <varkab@cdac.in>
+ * Md.Jamal Mohiuddin <mjmohiuddin@cdac.in>
+ * P Sowjanya <sowjanyap@cdac.in>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef __CC2520_H
+#define __CC2520_H
+
+struct cc2520_platform_data {
+ int fifo;
+ int fifop;
+ int cca;
+ int sfd;
+ int reset;
+ int vreg;
+ bool amplified;
+};
+
+#endif
diff --git a/include/linux/spi/corgi_lcd.h b/include/linux/spi/corgi_lcd.h
new file mode 100644
index 000000000..6692b3418
--- /dev/null
+++ b/include/linux/spi/corgi_lcd.h
@@ -0,0 +1,20 @@
+#ifndef __LINUX_SPI_CORGI_LCD_H
+#define __LINUX_SPI_CORGI_LCD_H
+
+#define CORGI_LCD_MODE_QVGA 1
+#define CORGI_LCD_MODE_VGA 2
+
+struct corgi_lcd_platform_data {
+ int init_mode;
+ int max_intensity;
+ int default_intensity;
+ int limit_mask;
+
+ int gpio_backlight_on; /* -1 if n/a */
+ int gpio_backlight_cont; /* -1 if n/a */
+
+ void (*notify)(int intensity);
+ void (*kick_battery)(void);
+};
+
+#endif /* __LINUX_SPI_CORGI_LCD_H */
diff --git a/include/linux/spi/ds1305.h b/include/linux/spi/ds1305.h
new file mode 100644
index 000000000..287ec830e
--- /dev/null
+++ b/include/linux/spi/ds1305.h
@@ -0,0 +1,35 @@
+#ifndef __LINUX_SPI_DS1305_H
+#define __LINUX_SPI_DS1305_H
+
+/*
+ * One-time configuration for ds1305 and ds1306 RTC chips.
+ *
+ * Put a pointer to this in spi_board_info.platform_data if you want to
+ * be sure that Linux (re)initializes this as needed ... after losing
+ * backup power, and potentially on the first boot.
+ */
+struct ds1305_platform_data {
+
+ /* Trickle charge configuration: it's OK to leave out the MAGIC
+ * bitmask; mask in either DS1 or DS2, and then one of 2K/4k/8K.
+ */
+#define DS1305_TRICKLE_MAGIC 0xa0
+#define DS1305_TRICKLE_DS2 0x08 /* two diodes */
+#define DS1305_TRICKLE_DS1 0x04 /* one diode */
+#define DS1305_TRICKLE_2K 0x01 /* 2 KOhm resistance */
+#define DS1305_TRICKLE_4K 0x02 /* 4 KOhm resistance */
+#define DS1305_TRICKLE_8K 0x03 /* 8 KOhm resistance */
+ u8 trickle;
+
+ /* set only on ds1306 parts */
+ bool is_ds1306;
+
+ /* ds1306 only: enable 1 Hz output */
+ bool en_1hz;
+
+ /* REVISIT: the driver currently expects nINT0 to be wired
+ * as the alarm IRQ. ALM1 may also need to be set up ...
+ */
+};
+
+#endif /* __LINUX_SPI_DS1305_H */
diff --git a/include/linux/spi/eeprom.h b/include/linux/spi/eeprom.h
new file mode 100644
index 000000000..403e007ae
--- /dev/null
+++ b/include/linux/spi/eeprom.h
@@ -0,0 +1,38 @@
+#ifndef __LINUX_SPI_EEPROM_H
+#define __LINUX_SPI_EEPROM_H
+
+#include <linux/memory.h>
+
+/*
+ * Put one of these structures in platform_data for SPI EEPROMS handled
+ * by the "at25" driver. On SPI, most EEPROMS understand the same core
+ * command set. If you need to support EEPROMs that don't yet fit, add
+ * flags to support those protocol options. These values all come from
+ * the chip datasheets.
+ */
+struct spi_eeprom {
+ u32 byte_len;
+ char name[10];
+ u16 page_size; /* for writes */
+ u16 flags;
+#define EE_ADDR1 0x0001 /* 8 bit addrs */
+#define EE_ADDR2 0x0002 /* 16 bit addrs */
+#define EE_ADDR3 0x0004 /* 24 bit addrs */
+#define EE_READONLY 0x0008 /* disallow writes */
+
+ /*
+ * Certain EEPROMS have a size that is larger than the number of address
+ * bytes would allow (e.g. like M95040 from ST that has 512 Byte size
+ * but uses only one address byte (A0 to A7) for addressing.) For
+ * the extra address bit (A8, A16 or A24) bit 3 of the instruction byte
+ * is used. This instruction bit is normally defined as don't care for
+ * other AT25 like chips.
+ */
+#define EE_INSTR_BIT3_IS_ADDR 0x0010
+
+ /* for exporting this chip's data to other kernel code */
+ void (*setup)(struct memory_accessor *mem, void *context);
+ void *context;
+};
+
+#endif /* __LINUX_SPI_EEPROM_H */
diff --git a/include/linux/spi/flash.h b/include/linux/spi/flash.h
new file mode 100644
index 000000000..3f22932e6
--- /dev/null
+++ b/include/linux/spi/flash.h
@@ -0,0 +1,31 @@
+#ifndef LINUX_SPI_FLASH_H
+#define LINUX_SPI_FLASH_H
+
+struct mtd_partition;
+
+/**
+ * struct flash_platform_data: board-specific flash data
+ * @name: optional flash device name (eg, as used with mtdparts=)
+ * @parts: optional array of mtd_partitions for static partitioning
+ * @nr_parts: number of mtd_partitions for static partitoning
+ * @type: optional flash device type (e.g. m25p80 vs m25p64), for use
+ * with chips that can't be queried for JEDEC or other IDs
+ *
+ * Board init code (in arch/.../mach-xxx/board-yyy.c files) can
+ * provide information about SPI flash parts (such as DataFlash) to
+ * help set up the device and its appropriate default partitioning.
+ *
+ * Note that for DataFlash, sizes for pages, blocks, and sectors are
+ * rarely powers of two; and partitions should be sector-aligned.
+ */
+struct flash_platform_data {
+ char *name;
+ struct mtd_partition *parts;
+ unsigned int nr_parts;
+
+ char *type;
+
+ /* we'll likely add more ... use JEDEC IDs, etc */
+};
+
+#endif
diff --git a/include/linux/spi/ifx_modem.h b/include/linux/spi/ifx_modem.h
new file mode 100644
index 000000000..394fec9e7
--- /dev/null
+++ b/include/linux/spi/ifx_modem.h
@@ -0,0 +1,19 @@
+#ifndef LINUX_IFX_MODEM_H
+#define LINUX_IFX_MODEM_H
+
+struct ifx_modem_platform_data {
+ unsigned short rst_out; /* modem reset out */
+ unsigned short pwr_on; /* power on */
+ unsigned short rst_pmu; /* reset modem */
+ unsigned short tx_pwr; /* modem power threshold */
+ unsigned short srdy; /* SRDY */
+ unsigned short mrdy; /* MRDY */
+ unsigned char modem_type; /* Modem type */
+ unsigned long max_hz; /* max SPI frequency */
+ unsigned short use_dma:1; /* spi protocol driver supplies
+ dma-able addrs */
+};
+#define IFX_MODEM_6160 1
+#define IFX_MODEM_6260 2
+
+#endif
diff --git a/include/linux/spi/l4f00242t03.h b/include/linux/spi/l4f00242t03.h
new file mode 100644
index 000000000..e69e9b51b
--- /dev/null
+++ b/include/linux/spi/l4f00242t03.h
@@ -0,0 +1,25 @@
+/*
+ * l4f00242t03.h -- Platform glue for Epson L4F00242T03 LCD
+ *
+ * Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
+ * Based on Marek Vasut work in lms283gf05.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_
+#define _INCLUDE_LINUX_SPI_L4F00242T03_H_
+
+struct l4f00242t03_pdata {
+ unsigned int reset_gpio;
+ unsigned int data_enable_gpio;
+};
+
+#endif /* _INCLUDE_LINUX_SPI_L4F00242T03_H_ */
diff --git a/include/linux/spi/libertas_spi.h b/include/linux/spi/libertas_spi.h
new file mode 100644
index 000000000..1b5d5384f
--- /dev/null
+++ b/include/linux/spi/libertas_spi.h
@@ -0,0 +1,29 @@
+/*
+ * board-specific data for the libertas_spi driver.
+ *
+ * Copyright 2008 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+#ifndef _LIBERTAS_SPI_H_
+#define _LIBERTAS_SPI_H_
+
+struct spi_device;
+
+struct libertas_spi_platform_data {
+ /* There are two ways to read data from the WLAN module's SPI
+ * interface. Setting 0 or 1 here controls which one is used.
+ *
+ * Usually you want to set use_dummy_writes = 1.
+ * However, if that doesn't work or if you are using a slow SPI clock
+ * speed, you may want to use 0 here. */
+ u16 use_dummy_writes;
+
+ /* Board specific setup/teardown */
+ int (*setup)(struct spi_device *spi);
+ int (*teardown)(struct spi_device *spi);
+};
+#endif
diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h
new file mode 100644
index 000000000..fdd1d1d51
--- /dev/null
+++ b/include/linux/spi/lms283gf05.h
@@ -0,0 +1,24 @@
+/*
+ * lms283gf05.h - Platform glue for Samsung LMS283GF05 LCD
+ *
+ * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_
+#define _INCLUDE_LINUX_SPI_LMS283GF05_H_
+
+struct lms283gf05_pdata {
+ unsigned long reset_gpio;
+ bool reset_inverted;
+};
+
+#endif /* _INCLUDE_LINUX_SPI_LMS283GF05_H_ */
diff --git a/include/linux/spi/max7301.h b/include/linux/spi/max7301.h
new file mode 100644
index 000000000..bcaa2f762
--- /dev/null
+++ b/include/linux/spi/max7301.h
@@ -0,0 +1,35 @@
+#ifndef LINUX_SPI_MAX7301_H
+#define LINUX_SPI_MAX7301_H
+
+#include <linux/gpio.h>
+
+/*
+ * Some registers must be read back to modify.
+ * To save time we cache them here in memory
+ */
+struct max7301 {
+ struct mutex lock;
+ u8 port_config[8]; /* field 0 is unused */
+ u32 out_level; /* cached output levels */
+ u32 input_pullup_active;
+ struct gpio_chip chip;
+ struct device *dev;
+ int (*write)(struct device *dev, unsigned int reg, unsigned int val);
+ int (*read)(struct device *dev, unsigned int reg);
+};
+
+struct max7301_platform_data {
+ /* number assigned to the first GPIO */
+ unsigned base;
+ /*
+ * bitmask controlling the pullup configuration,
+ *
+ * _note_ the 4 lowest bits are unused, because the first 4
+ * ports of the controller are not used, too.
+ */
+ u32 input_pullup_active;
+};
+
+extern int __max730x_remove(struct device *dev);
+extern int __max730x_probe(struct max7301 *ts);
+#endif
diff --git a/include/linux/spi/mc33880.h b/include/linux/spi/mc33880.h
new file mode 100644
index 000000000..82ffccd6f
--- /dev/null
+++ b/include/linux/spi/mc33880.h
@@ -0,0 +1,10 @@
+#ifndef LINUX_SPI_MC33880_H
+#define LINUX_SPI_MC33880_H
+
+struct mc33880_platform_data {
+ /* number assigned to the first GPIO */
+ unsigned base;
+};
+
+#endif
+
diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h
new file mode 100644
index 000000000..aa07d7b32
--- /dev/null
+++ b/include/linux/spi/mcp23s08.h
@@ -0,0 +1,43 @@
+
+/* FIXME driver should be able to handle IRQs... */
+
+struct mcp23s08_chip_info {
+ bool is_present; /* true if populated */
+ unsigned pullups; /* BIT(x) means enable pullup x */
+};
+
+struct mcp23s08_platform_data {
+ /* For mcp23s08, up to 4 slaves (numbered 0..3) can share one SPI
+ * chipselect, each providing 1 gpio_chip instance with 8 gpios.
+ * For mpc23s17, up to 8 slaves (numbered 0..7) can share one SPI
+ * chipselect, each providing 1 gpio_chip (port A + port B) with
+ * 16 gpios.
+ */
+ struct mcp23s08_chip_info chip[8];
+
+ /* "base" is the number of the first GPIO. Dynamic assignment is
+ * not currently supported, and even if there are gaps in chip
+ * addressing the GPIO numbers are sequential .. so for example
+ * if only slaves 0 and 3 are present, their GPIOs range from
+ * base to base+15 (or base+31 for s17 variant).
+ */
+ unsigned base;
+ /* Marks the device as a interrupt controller.
+ * NOTE: The interrupt functionality is only supported for i2c
+ * versions of the chips. The spi chips can also do the interrupts,
+ * but this is not supported by the linux driver yet.
+ */
+ bool irq_controller;
+
+ /* Sets the mirror flag in the IOCON register. Devices
+ * with two interrupt outputs (these are the devices ending with 17 and
+ * those that have 16 IOs) have two IO banks: IO 0-7 form bank 1 and
+ * IO 8-15 are bank 2. These chips have two different interrupt outputs:
+ * One for bank 1 and another for bank 2. If irq-mirror is set, both
+ * interrupts are generated regardless of the bank that an input change
+ * occurred on. If it is not set, the interrupt are only generated for
+ * the bank they belong to.
+ * On devices with only one interrupt output this property is useless.
+ */
+ bool mirror;
+};
diff --git a/include/linux/spi/mmc_spi.h b/include/linux/spi/mmc_spi.h
new file mode 100644
index 000000000..274bc0fa0
--- /dev/null
+++ b/include/linux/spi/mmc_spi.h
@@ -0,0 +1,64 @@
+#ifndef __LINUX_SPI_MMC_SPI_H
+#define __LINUX_SPI_MMC_SPI_H
+
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+
+struct device;
+struct mmc_host;
+
+#define MMC_SPI_USE_CD_GPIO (1 << 0)
+#define MMC_SPI_USE_RO_GPIO (1 << 1)
+#define MMC_SPI_CD_GPIO_ACTIVE_LOW (1 << 2)
+#define MMC_SPI_RO_GPIO_ACTIVE_LOW (1 << 3)
+
+/* Put this in platform_data of a device being used to manage an MMC/SD
+ * card slot. (Modeled after PXA mmc glue; see that for usage examples.)
+ *
+ * REVISIT This is not a spi-specific notion. Any card slot should be
+ * able to handle it. If the MMC core doesn't adopt this kind of notion,
+ * switch the "struct device *" parameters over to "struct spi_device *".
+ */
+struct mmc_spi_platform_data {
+ /* driver activation and (optional) card detect irq hookup */
+ int (*init)(struct device *,
+ irqreturn_t (*)(int, void *),
+ void *);
+ void (*exit)(struct device *, void *);
+
+ /*
+ * Card Detect and Read Only GPIOs. To enable debouncing on the card
+ * detect GPIO, set the cd_debounce to the debounce time in
+ * microseconds.
+ */
+ unsigned int flags;
+ unsigned int cd_gpio;
+ unsigned int cd_debounce;
+ unsigned int ro_gpio;
+
+ /* Capabilities to pass into mmc core (e.g. MMC_CAP_NEEDS_POLL). */
+ unsigned long caps;
+ unsigned long caps2;
+
+ /* how long to debounce card detect, in msecs */
+ u16 detect_delay;
+
+ /* power management */
+ u16 powerup_msecs; /* delay of up to 250 msec */
+ u32 ocr_mask; /* available voltages */
+ void (*setpower)(struct device *, unsigned int maskval);
+};
+
+#ifdef CONFIG_OF
+extern struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi);
+extern void mmc_spi_put_pdata(struct spi_device *spi);
+#else
+static inline struct mmc_spi_platform_data *
+mmc_spi_get_pdata(struct spi_device *spi)
+{
+ return spi->dev.platform_data;
+}
+static inline void mmc_spi_put_pdata(struct spi_device *spi) {}
+#endif /* CONFIG_OF */
+
+#endif /* __LINUX_SPI_MMC_SPI_H */
diff --git a/include/linux/spi/mxs-spi.h b/include/linux/spi/mxs-spi.h
new file mode 100644
index 000000000..381d368b9
--- /dev/null
+++ b/include/linux/spi/mxs-spi.h
@@ -0,0 +1,144 @@
+/*
+ * include/linux/spi/mxs-spi.h
+ *
+ * Freescale i.MX233/i.MX28 SPI controller register definition
+ *
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ * Copyright 2009-2011 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_SPI_MXS_SPI_H__
+#define __LINUX_SPI_MXS_SPI_H__
+
+#include <linux/dmaengine.h>
+
+#define ssp_is_old(host) ((host)->devid == IMX23_SSP)
+
+/* SSP registers */
+#define HW_SSP_CTRL0 0x000
+#define BM_SSP_CTRL0_RUN (1 << 29)
+#define BM_SSP_CTRL0_SDIO_IRQ_CHECK (1 << 28)
+#define BM_SSP_CTRL0_LOCK_CS (1 << 27)
+#define BM_SSP_CTRL0_IGNORE_CRC (1 << 26)
+#define BM_SSP_CTRL0_READ (1 << 25)
+#define BM_SSP_CTRL0_DATA_XFER (1 << 24)
+#define BP_SSP_CTRL0_BUS_WIDTH 22
+#define BM_SSP_CTRL0_BUS_WIDTH (0x3 << 22)
+#define BM_SSP_CTRL0_WAIT_FOR_IRQ (1 << 21)
+#define BM_SSP_CTRL0_WAIT_FOR_CMD (1 << 20)
+#define BM_SSP_CTRL0_LONG_RESP (1 << 19)
+#define BM_SSP_CTRL0_GET_RESP (1 << 17)
+#define BM_SSP_CTRL0_ENABLE (1 << 16)
+#define BP_SSP_CTRL0_XFER_COUNT 0
+#define BM_SSP_CTRL0_XFER_COUNT 0xffff
+#define HW_SSP_CMD0 0x010
+#define BM_SSP_CMD0_DBL_DATA_RATE_EN (1 << 25)
+#define BM_SSP_CMD0_SLOW_CLKING_EN (1 << 22)
+#define BM_SSP_CMD0_CONT_CLKING_EN (1 << 21)
+#define BM_SSP_CMD0_APPEND_8CYC (1 << 20)
+#define BP_SSP_CMD0_BLOCK_SIZE 16
+#define BM_SSP_CMD0_BLOCK_SIZE (0xf << 16)
+#define BP_SSP_CMD0_BLOCK_COUNT 8
+#define BM_SSP_CMD0_BLOCK_COUNT (0xff << 8)
+#define BP_SSP_CMD0_CMD 0
+#define BM_SSP_CMD0_CMD 0xff
+#define HW_SSP_CMD1 0x020
+#define HW_SSP_XFER_SIZE 0x030
+#define HW_SSP_BLOCK_SIZE 0x040
+#define BP_SSP_BLOCK_SIZE_BLOCK_COUNT 4
+#define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4)
+#define BP_SSP_BLOCK_SIZE_BLOCK_SIZE 0
+#define BM_SSP_BLOCK_SIZE_BLOCK_SIZE 0xf
+#define HW_SSP_TIMING(h) (ssp_is_old(h) ? 0x050 : 0x070)
+#define BP_SSP_TIMING_TIMEOUT 16
+#define BM_SSP_TIMING_TIMEOUT (0xffff << 16)
+#define BP_SSP_TIMING_CLOCK_DIVIDE 8
+#define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8)
+#define BF_SSP_TIMING_CLOCK_DIVIDE(v) \
+ (((v) << 8) & BM_SSP_TIMING_CLOCK_DIVIDE)
+#define BP_SSP_TIMING_CLOCK_RATE 0
+#define BM_SSP_TIMING_CLOCK_RATE 0xff
+#define BF_SSP_TIMING_CLOCK_RATE(v) \
+ (((v) << 0) & BM_SSP_TIMING_CLOCK_RATE)
+#define HW_SSP_CTRL1(h) (ssp_is_old(h) ? 0x060 : 0x080)
+#define BM_SSP_CTRL1_SDIO_IRQ (1 << 31)
+#define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30)
+#define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29)
+#define BM_SSP_CTRL1_RESP_ERR_IRQ_EN (1 << 28)
+#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ (1 << 27)
+#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN (1 << 26)
+#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ (1 << 25)
+#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN (1 << 24)
+#define BM_SSP_CTRL1_DATA_CRC_IRQ (1 << 23)
+#define BM_SSP_CTRL1_DATA_CRC_IRQ_EN (1 << 22)
+#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ (1 << 21)
+#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ_EN (1 << 20)
+#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ (1 << 17)
+#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN (1 << 16)
+#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ (1 << 15)
+#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ_EN (1 << 14)
+#define BM_SSP_CTRL1_DMA_ENABLE (1 << 13)
+#define BM_SSP_CTRL1_PHASE (1 << 10)
+#define BM_SSP_CTRL1_POLARITY (1 << 9)
+#define BP_SSP_CTRL1_WORD_LENGTH 4
+#define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4)
+#define BF_SSP_CTRL1_WORD_LENGTH(v) \
+ (((v) << 4) & BM_SSP_CTRL1_WORD_LENGTH)
+#define BV_SSP_CTRL1_WORD_LENGTH__FOUR_BITS 0x3
+#define BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS 0x7
+#define BV_SSP_CTRL1_WORD_LENGTH__SIXTEEN_BITS 0xF
+#define BP_SSP_CTRL1_SSP_MODE 0
+#define BM_SSP_CTRL1_SSP_MODE 0xf
+#define BF_SSP_CTRL1_SSP_MODE(v) \
+ (((v) << 0) & BM_SSP_CTRL1_SSP_MODE)
+#define BV_SSP_CTRL1_SSP_MODE__SPI 0x0
+#define BV_SSP_CTRL1_SSP_MODE__SSI 0x1
+#define BV_SSP_CTRL1_SSP_MODE__SD_MMC 0x3
+#define BV_SSP_CTRL1_SSP_MODE__MS 0x4
+
+#define HW_SSP_DATA(h) (ssp_is_old(h) ? 0x070 : 0x090)
+
+#define HW_SSP_SDRESP0(h) (ssp_is_old(h) ? 0x080 : 0x0a0)
+#define HW_SSP_SDRESP1(h) (ssp_is_old(h) ? 0x090 : 0x0b0)
+#define HW_SSP_SDRESP2(h) (ssp_is_old(h) ? 0x0a0 : 0x0c0)
+#define HW_SSP_SDRESP3(h) (ssp_is_old(h) ? 0x0b0 : 0x0d0)
+#define HW_SSP_STATUS(h) (ssp_is_old(h) ? 0x0c0 : 0x100)
+#define BM_SSP_STATUS_CARD_DETECT (1 << 28)
+#define BM_SSP_STATUS_SDIO_IRQ (1 << 17)
+#define BM_SSP_STATUS_FIFO_EMPTY (1 << 5)
+
+#define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field)
+
+#define SSP_PIO_NUM 3
+
+enum mxs_ssp_id {
+ IMX23_SSP,
+ IMX28_SSP,
+};
+
+struct mxs_ssp {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+ unsigned int clk_rate;
+ enum mxs_ssp_id devid;
+
+ struct dma_chan *dmach;
+ unsigned int dma_dir;
+ enum dma_transfer_direction slave_dirn;
+ u32 ssp_pio_words[SSP_PIO_NUM];
+};
+
+void mxs_ssp_set_clk_rate(struct mxs_ssp *ssp, unsigned int rate);
+
+#endif /* __LINUX_SPI_MXS_SPI_H__ */
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
new file mode 100644
index 000000000..6d36dacec
--- /dev/null
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __linux_pxa2xx_spi_h
+#define __linux_pxa2xx_spi_h
+
+#include <linux/pxa2xx_ssp.h>
+
+#define PXA2XX_CS_ASSERT (0x01)
+#define PXA2XX_CS_DEASSERT (0x02)
+
+struct dma_chan;
+
+/* device.platform_data for SSP controller devices */
+struct pxa2xx_spi_master {
+ u32 clock_enable;
+ u16 num_chipselect;
+ u8 enable_dma;
+
+ /* DMA engine specific config */
+ bool (*dma_filter)(struct dma_chan *chan, void *param);
+ void *tx_param;
+ void *rx_param;
+
+ /* For non-PXA arches */
+ struct ssp_device ssp;
+};
+
+/* spi_board_info.controller_data for SPI slave devices,
+ * copied to spi_device.platform_data ... mostly for dma tuning
+ */
+struct pxa2xx_spi_chip {
+ u8 tx_threshold;
+ u8 tx_hi_threshold;
+ u8 rx_threshold;
+ u8 dma_burst_size;
+ u32 timeout;
+ u8 enable_loopback;
+ int gpio_cs;
+ void (*cs_control)(u32 command);
+};
+
+#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
+
+#include <linux/clk.h>
+
+extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info);
+
+#endif
+#endif
diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h
new file mode 100644
index 000000000..a693188cc
--- /dev/null
+++ b/include/linux/spi/rspi.h
@@ -0,0 +1,26 @@
+/*
+ * Renesas SPI driver
+ *
+ * Copyright (C) 2012 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_SPI_RENESAS_SPI_H__
+#define __LINUX_SPI_RENESAS_SPI_H__
+
+struct rspi_plat_data {
+ unsigned int dma_tx_id;
+ unsigned int dma_rx_id;
+
+ u16 num_chipselect;
+};
+
+#endif
diff --git a/include/linux/spi/s3c24xx.h b/include/linux/spi/s3c24xx.h
new file mode 100644
index 000000000..ca271c06c
--- /dev/null
+++ b/include/linux/spi/s3c24xx.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C2410 - SPI Controller platform_device info
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __LINUX_SPI_S3C24XX_H
+#define __LINUX_SPI_S3C24XX_H __FILE__
+
+struct s3c2410_spi_info {
+ int pin_cs; /* simple gpio cs */
+ unsigned int num_cs; /* total chipselects */
+ int bus_num; /* bus number to use. */
+
+ unsigned int use_fiq:1; /* use fiq */
+
+ void (*gpio_setup)(struct s3c2410_spi_info *spi, int enable);
+ void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol);
+};
+
+extern int s3c24xx_set_fiq(unsigned int irq, bool on);
+
+#endif /* __LINUX_SPI_S3C24XX_H */
diff --git a/include/linux/spi/sh_hspi.h b/include/linux/spi/sh_hspi.h
new file mode 100644
index 000000000..aa0d440ab
--- /dev/null
+++ b/include/linux/spi/sh_hspi.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2011 Kuninori Morimoto
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef SH_HSPI_H
+#define SH_HSPI_H
+
+struct sh_hspi_info {
+};
+
+#endif
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
new file mode 100644
index 000000000..b087a85f5
--- /dev/null
+++ b/include/linux/spi/sh_msiof.h
@@ -0,0 +1,14 @@
+#ifndef __SPI_SH_MSIOF_H__
+#define __SPI_SH_MSIOF_H__
+
+struct sh_msiof_spi_info {
+ int tx_fifo_override;
+ int rx_fifo_override;
+ u16 num_chipselect;
+ unsigned int dma_tx_id;
+ unsigned int dma_rx_id;
+ u32 dtdl;
+ u32 syncdl;
+};
+
+#endif /* __SPI_SH_MSIOF_H__ */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
new file mode 100644
index 000000000..d67307234
--- /dev/null
+++ b/include/linux/spi/spi.h
@@ -0,0 +1,1056 @@
+/*
+ * Copyright (C) 2005 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_SPI_H
+#define __LINUX_SPI_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/completion.h>
+#include <linux/scatterlist.h>
+
+struct dma_chan;
+
+/*
+ * INTERFACES between SPI master-side drivers and SPI infrastructure.
+ * (There's no SPI slave support for Linux yet...)
+ */
+extern struct bus_type spi_bus_type;
+
+/**
+ * struct spi_device - Master side proxy for an SPI slave device
+ * @dev: Driver model representation of the device.
+ * @master: SPI controller used with the device.
+ * @max_speed_hz: Maximum clock rate to be used with this chip
+ * (on this board); may be changed by the device's driver.
+ * The spi_transfer.speed_hz can override this for each transfer.
+ * @chip_select: Chipselect, distinguishing chips handled by @master.
+ * @mode: The spi mode defines how data is clocked out and in.
+ * This may be changed by the device's driver.
+ * The "active low" default for chipselect mode can be overridden
+ * (by specifying SPI_CS_HIGH) as can the "MSB first" default for
+ * each word in a transfer (by specifying SPI_LSB_FIRST).
+ * @bits_per_word: Data transfers involve one or more words; word sizes
+ * like eight or 12 bits are common. In-memory wordsizes are
+ * powers of two bytes (e.g. 20 bit samples use 32 bits).
+ * This may be changed by the device's driver, or left at the
+ * default (0) indicating protocol words are eight bit bytes.
+ * The spi_transfer.bits_per_word can override this for each transfer.
+ * @irq: Negative, or the number passed to request_irq() to receive
+ * interrupts from this device.
+ * @controller_state: Controller's runtime state
+ * @controller_data: Board-specific definitions for controller, such as
+ * FIFO initialization parameters; from board_info.controller_data
+ * @modalias: Name of the driver to use with this device, or an alias
+ * for that name. This appears in the sysfs "modalias" attribute
+ * for driver coldplugging, and in uevents used for hotplugging
+ * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
+ * when not using a GPIO line)
+ *
+ * A @spi_device is used to interchange data between an SPI slave
+ * (usually a discrete chip) and CPU memory.
+ *
+ * In @dev, the platform_data is used to hold information about this
+ * device that's meaningful to the device's protocol driver, but not
+ * to its controller. One example might be an identifier for a chip
+ * variant with slightly different functionality; another might be
+ * information about how this particular board wires the chip's pins.
+ */
+struct spi_device {
+ struct device dev;
+ struct spi_master *master;
+ u32 max_speed_hz;
+ u8 chip_select;
+ u8 bits_per_word;
+ u16 mode;
+#define SPI_CPHA 0x01 /* clock phase */
+#define SPI_CPOL 0x02 /* clock polarity */
+#define SPI_MODE_0 (0|0) /* (original MicroWire) */
+#define SPI_MODE_1 (0|SPI_CPHA)
+#define SPI_MODE_2 (SPI_CPOL|0)
+#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
+#define SPI_CS_HIGH 0x04 /* chipselect active high? */
+#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */
+#define SPI_3WIRE 0x10 /* SI/SO signals shared */
+#define SPI_LOOP 0x20 /* loopback mode */
+#define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */
+#define SPI_READY 0x80 /* slave pulls low to pause */
+#define SPI_TX_DUAL 0x100 /* transmit with 2 wires */
+#define SPI_TX_QUAD 0x200 /* transmit with 4 wires */
+#define SPI_RX_DUAL 0x400 /* receive with 2 wires */
+#define SPI_RX_QUAD 0x800 /* receive with 4 wires */
+ int irq;
+ void *controller_state;
+ void *controller_data;
+ char modalias[SPI_NAME_SIZE];
+ int cs_gpio; /* chip select gpio */
+
+ /*
+ * likely need more hooks for more protocol options affecting how
+ * the controller talks to each chip, like:
+ * - memory packing (12 bit samples into low bits, others zeroed)
+ * - priority
+ * - drop chipselect after each word
+ * - chipselect delays
+ * - ...
+ */
+};
+
+static inline struct spi_device *to_spi_device(struct device *dev)
+{
+ return dev ? container_of(dev, struct spi_device, dev) : NULL;
+}
+
+/* most drivers won't need to care about device refcounting */
+static inline struct spi_device *spi_dev_get(struct spi_device *spi)
+{
+ return (spi && get_device(&spi->dev)) ? spi : NULL;
+}
+
+static inline void spi_dev_put(struct spi_device *spi)
+{
+ if (spi)
+ put_device(&spi->dev);
+}
+
+/* ctldata is for the bus_master driver's runtime state */
+static inline void *spi_get_ctldata(struct spi_device *spi)
+{
+ return spi->controller_state;
+}
+
+static inline void spi_set_ctldata(struct spi_device *spi, void *state)
+{
+ spi->controller_state = state;
+}
+
+/* device driver data */
+
+static inline void spi_set_drvdata(struct spi_device *spi, void *data)
+{
+ dev_set_drvdata(&spi->dev, data);
+}
+
+static inline void *spi_get_drvdata(struct spi_device *spi)
+{
+ return dev_get_drvdata(&spi->dev);
+}
+
+struct spi_message;
+struct spi_transfer;
+
+/**
+ * struct spi_driver - Host side "protocol" driver
+ * @id_table: List of SPI devices supported by this driver
+ * @probe: Binds this driver to the spi device. Drivers can verify
+ * that the device is actually present, and may need to configure
+ * characteristics (such as bits_per_word) which weren't needed for
+ * the initial configuration done during system setup.
+ * @remove: Unbinds this driver from the spi device
+ * @shutdown: Standard shutdown callback used during system state
+ * transitions such as powerdown/halt and kexec
+ * @driver: SPI device drivers should initialize the name and owner
+ * field of this structure.
+ *
+ * This represents the kind of device driver that uses SPI messages to
+ * interact with the hardware at the other end of a SPI link. It's called
+ * a "protocol" driver because it works through messages rather than talking
+ * directly to SPI hardware (which is what the underlying SPI controller
+ * driver does to pass those messages). These protocols are defined in the
+ * specification for the device(s) supported by the driver.
+ *
+ * As a rule, those device protocols represent the lowest level interface
+ * supported by a driver, and it will support upper level interfaces too.
+ * Examples of such upper levels include frameworks like MTD, networking,
+ * MMC, RTC, filesystem character device nodes, and hardware monitoring.
+ */
+struct spi_driver {
+ const struct spi_device_id *id_table;
+ int (*probe)(struct spi_device *spi);
+ int (*remove)(struct spi_device *spi);
+ void (*shutdown)(struct spi_device *spi);
+ struct device_driver driver;
+};
+
+static inline struct spi_driver *to_spi_driver(struct device_driver *drv)
+{
+ return drv ? container_of(drv, struct spi_driver, driver) : NULL;
+}
+
+extern int spi_register_driver(struct spi_driver *sdrv);
+
+/**
+ * spi_unregister_driver - reverse effect of spi_register_driver
+ * @sdrv: the driver to unregister
+ * Context: can sleep
+ */
+static inline void spi_unregister_driver(struct spi_driver *sdrv)
+{
+ if (sdrv)
+ driver_unregister(&sdrv->driver);
+}
+
+/**
+ * module_spi_driver() - Helper macro for registering a SPI driver
+ * @__spi_driver: spi_driver struct
+ *
+ * Helper macro for SPI drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_spi_driver(__spi_driver) \
+ module_driver(__spi_driver, spi_register_driver, \
+ spi_unregister_driver)
+
+/**
+ * struct spi_master - interface to SPI master controller
+ * @dev: device interface to this driver
+ * @list: link with the global spi_master list
+ * @bus_num: board-specific (and often SOC-specific) identifier for a
+ * given SPI controller.
+ * @num_chipselect: chipselects are used to distinguish individual
+ * SPI slaves, and are numbered from zero to num_chipselects.
+ * each slave has a chipselect signal, but it's common that not
+ * every chipselect is connected to a slave.
+ * @dma_alignment: SPI controller constraint on DMA buffers alignment.
+ * @mode_bits: flags understood by this controller driver
+ * @bits_per_word_mask: A mask indicating which values of bits_per_word are
+ * supported by the driver. Bit n indicates that a bits_per_word n+1 is
+ * supported. If set, the SPI core will reject any transfer with an
+ * unsupported bits_per_word. If not set, this value is simply ignored,
+ * and it's up to the individual driver to perform any validation.
+ * @min_speed_hz: Lowest supported transfer speed
+ * @max_speed_hz: Highest supported transfer speed
+ * @flags: other constraints relevant to this driver
+ * @bus_lock_spinlock: spinlock for SPI bus locking
+ * @bus_lock_mutex: mutex for SPI bus locking
+ * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
+ * @setup: updates the device mode and clocking records used by a
+ * device's SPI controller; protocol code may call this. This
+ * must fail if an unrecognized or unsupported mode is requested.
+ * It's always safe to call this unless transfers are pending on
+ * the device whose settings are being modified.
+ * @transfer: adds a message to the controller's transfer queue.
+ * @cleanup: frees controller-specific state
+ * @can_dma: determine whether this master supports DMA
+ * @queued: whether this master is providing an internal message queue
+ * @kworker: thread struct for message pump
+ * @kworker_task: pointer to task for message pump kworker thread
+ * @pump_messages: work struct for scheduling work to the message pump
+ * @queue_lock: spinlock to syncronise access to message queue
+ * @queue: message queue
+ * @idling: the device is entering idle state
+ * @cur_msg: the currently in-flight message
+ * @cur_msg_prepared: spi_prepare_message was called for the currently
+ * in-flight message
+ * @cur_msg_mapped: message has been mapped for DMA
+ * @xfer_completion: used by core transfer_one_message()
+ * @busy: message pump is busy
+ * @running: message pump is running
+ * @rt: whether this queue is set to run as a realtime task
+ * @auto_runtime_pm: the core should ensure a runtime PM reference is held
+ * while the hardware is prepared, using the parent
+ * device for the spidev
+ * @max_dma_len: Maximum length of a DMA transfer for the device.
+ * @prepare_transfer_hardware: a message will soon arrive from the queue
+ * so the subsystem requests the driver to prepare the transfer hardware
+ * by issuing this call
+ * @transfer_one_message: the subsystem calls the driver to transfer a single
+ * message while queuing transfers that arrive in the meantime. When the
+ * driver is finished with this message, it must call
+ * spi_finalize_current_message() so the subsystem can issue the next
+ * message
+ * @unprepare_transfer_hardware: there are currently no more messages on the
+ * queue so the subsystem notifies the driver that it may relax the
+ * hardware by issuing this call
+ * @set_cs: set the logic level of the chip select line. May be called
+ * from interrupt context.
+ * @prepare_message: set up the controller to transfer a single message,
+ * for example doing DMA mapping. Called from threaded
+ * context.
+ * @transfer_one: transfer a single spi_transfer.
+ * - return 0 if the transfer is finished,
+ * - return 1 if the transfer is still in progress. When
+ * the driver is finished with this transfer it must
+ * call spi_finalize_current_transfer() so the subsystem
+ * can issue the next transfer. Note: transfer_one and
+ * transfer_one_message are mutually exclusive; when both
+ * are set, the generic subsystem does not call your
+ * transfer_one callback.
+ * @handle_err: the subsystem calls the driver to handle an error that occurs
+ * in the generic implementation of transfer_one_message().
+ * @unprepare_message: undo any work done by prepare_message().
+ * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
+ * number. Any individual value may be -ENOENT for CS lines that
+ * are not GPIOs (driven by the SPI controller itself).
+ * @dma_tx: DMA transmit channel
+ * @dma_rx: DMA receive channel
+ * @dummy_rx: dummy receive buffer for full-duplex devices
+ * @dummy_tx: dummy transmit buffer for full-duplex devices
+ *
+ * Each SPI master controller can communicate with one or more @spi_device
+ * children. These make a small bus, sharing MOSI, MISO and SCK signals
+ * but not chip select signals. Each device may be configured to use a
+ * different clock rate, since those shared signals are ignored unless
+ * the chip is selected.
+ *
+ * The driver for an SPI controller manages access to those devices through
+ * a queue of spi_message transactions, copying data between CPU memory and
+ * an SPI slave device. For each such message it queues, it calls the
+ * message's completion function when the transaction completes.
+ */
+struct spi_master {
+ struct device dev;
+
+ struct list_head list;
+
+ /* other than negative (== assign one dynamically), bus_num is fully
+ * board-specific. usually that simplifies to being SOC-specific.
+ * example: one SOC has three SPI controllers, numbered 0..2,
+ * and one board's schematics might show it using SPI-2. software
+ * would normally use bus_num=2 for that controller.
+ */
+ s16 bus_num;
+
+ /* chipselects will be integral to many controllers; some others
+ * might use board-specific GPIOs.
+ */
+ u16 num_chipselect;
+
+ /* some SPI controllers pose alignment requirements on DMAable
+ * buffers; let protocol drivers know about these requirements.
+ */
+ u16 dma_alignment;
+
+ /* spi_device.mode flags understood by this controller driver */
+ u16 mode_bits;
+
+ /* bitmask of supported bits_per_word for transfers */
+ u32 bits_per_word_mask;
+#define SPI_BPW_MASK(bits) BIT((bits) - 1)
+#define SPI_BIT_MASK(bits) (((bits) == 32) ? ~0U : (BIT(bits) - 1))
+#define SPI_BPW_RANGE_MASK(min, max) (SPI_BIT_MASK(max) - SPI_BIT_MASK(min - 1))
+
+ /* limits on transfer speed */
+ u32 min_speed_hz;
+ u32 max_speed_hz;
+
+ /* other constraints relevant to this driver */
+ u16 flags;
+#define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */
+#define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */
+#define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */
+#define SPI_MASTER_MUST_RX BIT(3) /* requires rx */
+#define SPI_MASTER_MUST_TX BIT(4) /* requires tx */
+
+ /* lock and mutex for SPI bus locking */
+ spinlock_t bus_lock_spinlock;
+ struct mutex bus_lock_mutex;
+
+ /* flag indicating that the SPI bus is locked for exclusive use */
+ bool bus_lock_flag;
+
+ /* Setup mode and clock, etc (spi driver may call many times).
+ *
+ * IMPORTANT: this may be called when transfers to another
+ * device are active. DO NOT UPDATE SHARED REGISTERS in ways
+ * which could break those transfers.
+ */
+ int (*setup)(struct spi_device *spi);
+
+ /* bidirectional bulk transfers
+ *
+ * + The transfer() method may not sleep; its main role is
+ * just to add the message to the queue.
+ * + For now there's no remove-from-queue operation, or
+ * any other request management
+ * + To a given spi_device, message queueing is pure fifo
+ *
+ * + The master's main job is to process its message queue,
+ * selecting a chip then transferring data
+ * + If there are multiple spi_device children, the i/o queue
+ * arbitration algorithm is unspecified (round robin, fifo,
+ * priority, reservations, preemption, etc)
+ *
+ * + Chipselect stays active during the entire message
+ * (unless modified by spi_transfer.cs_change != 0).
+ * + The message transfers use clock and SPI mode parameters
+ * previously established by setup() for this device
+ */
+ int (*transfer)(struct spi_device *spi,
+ struct spi_message *mesg);
+
+ /* called on release() to free memory provided by spi_master */
+ void (*cleanup)(struct spi_device *spi);
+
+ /*
+ * Used to enable core support for DMA handling, if can_dma()
+ * exists and returns true then the transfer will be mapped
+ * prior to transfer_one() being called. The driver should
+ * not modify or store xfer and dma_tx and dma_rx must be set
+ * while the device is prepared.
+ */
+ bool (*can_dma)(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer);
+
+ /*
+ * These hooks are for drivers that want to use the generic
+ * master transfer queueing mechanism. If these are used, the
+ * transfer() function above must NOT be specified by the driver.
+ * Over time we expect SPI drivers to be phased over to this API.
+ */
+ bool queued;
+ struct kthread_worker kworker;
+ struct task_struct *kworker_task;
+ struct kthread_work pump_messages;
+ spinlock_t queue_lock;
+ struct list_head queue;
+ struct spi_message *cur_msg;
+ bool idling;
+ bool busy;
+ bool running;
+ bool rt;
+ bool auto_runtime_pm;
+ bool cur_msg_prepared;
+ bool cur_msg_mapped;
+ struct completion xfer_completion;
+ size_t max_dma_len;
+
+ int (*prepare_transfer_hardware)(struct spi_master *master);
+ int (*transfer_one_message)(struct spi_master *master,
+ struct spi_message *mesg);
+ int (*unprepare_transfer_hardware)(struct spi_master *master);
+ int (*prepare_message)(struct spi_master *master,
+ struct spi_message *message);
+ int (*unprepare_message)(struct spi_master *master,
+ struct spi_message *message);
+
+ /*
+ * These hooks are for drivers that use a generic implementation
+ * of transfer_one_message() provied by the core.
+ */
+ void (*set_cs)(struct spi_device *spi, bool enable);
+ int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *transfer);
+ void (*handle_err)(struct spi_master *master,
+ struct spi_message *message);
+
+ /* gpio chip select */
+ int *cs_gpios;
+
+ /* DMA channels for use with core dmaengine helpers */
+ struct dma_chan *dma_tx;
+ struct dma_chan *dma_rx;
+
+ /* dummy data for full duplex devices */
+ void *dummy_rx;
+ void *dummy_tx;
+};
+
+static inline void *spi_master_get_devdata(struct spi_master *master)
+{
+ return dev_get_drvdata(&master->dev);
+}
+
+static inline void spi_master_set_devdata(struct spi_master *master, void *data)
+{
+ dev_set_drvdata(&master->dev, data);
+}
+
+static inline struct spi_master *spi_master_get(struct spi_master *master)
+{
+ if (!master || !get_device(&master->dev))
+ return NULL;
+ return master;
+}
+
+static inline void spi_master_put(struct spi_master *master)
+{
+ if (master)
+ put_device(&master->dev);
+}
+
+/* PM calls that need to be issued by the driver */
+extern int spi_master_suspend(struct spi_master *master);
+extern int spi_master_resume(struct spi_master *master);
+
+/* Calls the driver make to interact with the message queue */
+extern struct spi_message *spi_get_next_queued_message(struct spi_master *master);
+extern void spi_finalize_current_message(struct spi_master *master);
+extern void spi_finalize_current_transfer(struct spi_master *master);
+
+/* the spi driver core manages memory for the spi_master classdev */
+extern struct spi_master *
+spi_alloc_master(struct device *host, unsigned size);
+
+extern int spi_register_master(struct spi_master *master);
+extern int devm_spi_register_master(struct device *dev,
+ struct spi_master *master);
+extern void spi_unregister_master(struct spi_master *master);
+
+extern struct spi_master *spi_busnum_to_master(u16 busnum);
+
+/*---------------------------------------------------------------------------*/
+
+/*
+ * I/O INTERFACE between SPI controller and protocol drivers
+ *
+ * Protocol drivers use a queue of spi_messages, each transferring data
+ * between the controller and memory buffers.
+ *
+ * The spi_messages themselves consist of a series of read+write transfer
+ * segments. Those segments always read the same number of bits as they
+ * write; but one or the other is easily ignored by passing a null buffer
+ * pointer. (This is unlike most types of I/O API, because SPI hardware
+ * is full duplex.)
+ *
+ * NOTE: Allocation of spi_transfer and spi_message memory is entirely
+ * up to the protocol driver, which guarantees the integrity of both (as
+ * well as the data buffers) for as long as the message is queued.
+ */
+
+/**
+ * struct spi_transfer - a read/write buffer pair
+ * @tx_buf: data to be written (dma-safe memory), or NULL
+ * @rx_buf: data to be read (dma-safe memory), or NULL
+ * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped
+ * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped
+ * @tx_nbits: number of bits used for writing. If 0 the default
+ * (SPI_NBITS_SINGLE) is used.
+ * @rx_nbits: number of bits used for reading. If 0 the default
+ * (SPI_NBITS_SINGLE) is used.
+ * @len: size of rx and tx buffers (in bytes)
+ * @speed_hz: Select a speed other than the device default for this
+ * transfer. If 0 the default (from @spi_device) is used.
+ * @bits_per_word: select a bits_per_word other than the device default
+ * for this transfer. If 0 the default (from @spi_device) is used.
+ * @cs_change: affects chipselect after this transfer completes
+ * @delay_usecs: microseconds to delay after this transfer before
+ * (optionally) changing the chipselect status, then starting
+ * the next transfer or completing this @spi_message.
+ * @transfer_list: transfers are sequenced through @spi_message.transfers
+ * @tx_sg: Scatterlist for transmit, currently not for client use
+ * @rx_sg: Scatterlist for receive, currently not for client use
+ *
+ * SPI transfers always write the same number of bytes as they read.
+ * Protocol drivers should always provide @rx_buf and/or @tx_buf.
+ * In some cases, they may also want to provide DMA addresses for
+ * the data being transferred; that may reduce overhead, when the
+ * underlying driver uses dma.
+ *
+ * If the transmit buffer is null, zeroes will be shifted out
+ * while filling @rx_buf. If the receive buffer is null, the data
+ * shifted in will be discarded. Only "len" bytes shift out (or in).
+ * It's an error to try to shift out a partial word. (For example, by
+ * shifting out three bytes with word size of sixteen or twenty bits;
+ * the former uses two bytes per word, the latter uses four bytes.)
+ *
+ * In-memory data values are always in native CPU byte order, translated
+ * from the wire byte order (big-endian except with SPI_LSB_FIRST). So
+ * for example when bits_per_word is sixteen, buffers are 2N bytes long
+ * (@len = 2N) and hold N sixteen bit words in CPU byte order.
+ *
+ * When the word size of the SPI transfer is not a power-of-two multiple
+ * of eight bits, those in-memory words include extra bits. In-memory
+ * words are always seen by protocol drivers as right-justified, so the
+ * undefined (rx) or unused (tx) bits are always the most significant bits.
+ *
+ * All SPI transfers start with the relevant chipselect active. Normally
+ * it stays selected until after the last transfer in a message. Drivers
+ * can affect the chipselect signal using cs_change.
+ *
+ * (i) If the transfer isn't the last one in the message, this flag is
+ * used to make the chipselect briefly go inactive in the middle of the
+ * message. Toggling chipselect in this way may be needed to terminate
+ * a chip command, letting a single spi_message perform all of group of
+ * chip transactions together.
+ *
+ * (ii) When the transfer is the last one in the message, the chip may
+ * stay selected until the next transfer. On multi-device SPI busses
+ * with nothing blocking messages going to other devices, this is just
+ * a performance hint; starting a message to another device deselects
+ * this one. But in other cases, this can be used to ensure correctness.
+ * Some devices need protocol transactions to be built from a series of
+ * spi_message submissions, where the content of one message is determined
+ * by the results of previous messages and where the whole transaction
+ * ends when the chipselect goes intactive.
+ *
+ * When SPI can transfer in 1x,2x or 4x. It can get this transfer information
+ * from device through @tx_nbits and @rx_nbits. In Bi-direction, these
+ * two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x)
+ * SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer.
+ *
+ * The code that submits an spi_message (and its spi_transfers)
+ * to the lower layers is responsible for managing its memory.
+ * Zero-initialize every field you don't set up explicitly, to
+ * insulate against future API updates. After you submit a message
+ * and its transfers, ignore them until its completion callback.
+ */
+struct spi_transfer {
+ /* it's ok if tx_buf == rx_buf (right?)
+ * for MicroWire, one buffer must be null
+ * buffers must work with dma_*map_single() calls, unless
+ * spi_message.is_dma_mapped reports a pre-existing mapping
+ */
+ const void *tx_buf;
+ void *rx_buf;
+ unsigned len;
+
+ dma_addr_t tx_dma;
+ dma_addr_t rx_dma;
+ struct sg_table tx_sg;
+ struct sg_table rx_sg;
+
+ unsigned cs_change:1;
+ unsigned tx_nbits:3;
+ unsigned rx_nbits:3;
+#define SPI_NBITS_SINGLE 0x01 /* 1bit transfer */
+#define SPI_NBITS_DUAL 0x02 /* 2bits transfer */
+#define SPI_NBITS_QUAD 0x04 /* 4bits transfer */
+ u8 bits_per_word;
+ u16 delay_usecs;
+ u32 speed_hz;
+
+ struct list_head transfer_list;
+};
+
+/**
+ * struct spi_message - one multi-segment SPI transaction
+ * @transfers: list of transfer segments in this transaction
+ * @spi: SPI device to which the transaction is queued
+ * @is_dma_mapped: if true, the caller provided both dma and cpu virtual
+ * addresses for each transfer buffer
+ * @complete: called to report transaction completions
+ * @context: the argument to complete() when it's called
+ * @frame_length: the total number of bytes in the message
+ * @actual_length: the total number of bytes that were transferred in all
+ * successful segments
+ * @status: zero for success, else negative errno
+ * @queue: for use by whichever driver currently owns the message
+ * @state: for use by whichever driver currently owns the message
+ *
+ * A @spi_message is used to execute an atomic sequence of data transfers,
+ * each represented by a struct spi_transfer. The sequence is "atomic"
+ * in the sense that no other spi_message may use that SPI bus until that
+ * sequence completes. On some systems, many such sequences can execute as
+ * as single programmed DMA transfer. On all systems, these messages are
+ * queued, and might complete after transactions to other devices. Messages
+ * sent to a given spi_device are always executed in FIFO order.
+ *
+ * The code that submits an spi_message (and its spi_transfers)
+ * to the lower layers is responsible for managing its memory.
+ * Zero-initialize every field you don't set up explicitly, to
+ * insulate against future API updates. After you submit a message
+ * and its transfers, ignore them until its completion callback.
+ */
+struct spi_message {
+ struct list_head transfers;
+
+ struct spi_device *spi;
+
+ unsigned is_dma_mapped:1;
+
+ /* REVISIT: we might want a flag affecting the behavior of the
+ * last transfer ... allowing things like "read 16 bit length L"
+ * immediately followed by "read L bytes". Basically imposing
+ * a specific message scheduling algorithm.
+ *
+ * Some controller drivers (message-at-a-time queue processing)
+ * could provide that as their default scheduling algorithm. But
+ * others (with multi-message pipelines) could need a flag to
+ * tell them about such special cases.
+ */
+
+ /* completion is reported through a callback */
+ void (*complete)(void *context);
+ void *context;
+ unsigned frame_length;
+ unsigned actual_length;
+ int status;
+
+ /* for optional use by whatever driver currently owns the
+ * spi_message ... between calls to spi_async and then later
+ * complete(), that's the spi_master controller driver.
+ */
+ struct list_head queue;
+ void *state;
+};
+
+static inline void spi_message_init(struct spi_message *m)
+{
+ memset(m, 0, sizeof *m);
+ INIT_LIST_HEAD(&m->transfers);
+}
+
+static inline void
+spi_message_add_tail(struct spi_transfer *t, struct spi_message *m)
+{
+ list_add_tail(&t->transfer_list, &m->transfers);
+}
+
+static inline void
+spi_transfer_del(struct spi_transfer *t)
+{
+ list_del(&t->transfer_list);
+}
+
+/**
+ * spi_message_init_with_transfers - Initialize spi_message and append transfers
+ * @m: spi_message to be initialized
+ * @xfers: An array of spi transfers
+ * @num_xfers: Number of items in the xfer array
+ *
+ * This function initializes the given spi_message and adds each spi_transfer in
+ * the given array to the message.
+ */
+static inline void
+spi_message_init_with_transfers(struct spi_message *m,
+struct spi_transfer *xfers, unsigned int num_xfers)
+{
+ unsigned int i;
+
+ spi_message_init(m);
+ for (i = 0; i < num_xfers; ++i)
+ spi_message_add_tail(&xfers[i], m);
+}
+
+/* It's fine to embed message and transaction structures in other data
+ * structures so long as you don't free them while they're in use.
+ */
+
+static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags)
+{
+ struct spi_message *m;
+
+ m = kzalloc(sizeof(struct spi_message)
+ + ntrans * sizeof(struct spi_transfer),
+ flags);
+ if (m) {
+ unsigned i;
+ struct spi_transfer *t = (struct spi_transfer *)(m + 1);
+
+ INIT_LIST_HEAD(&m->transfers);
+ for (i = 0; i < ntrans; i++, t++)
+ spi_message_add_tail(t, m);
+ }
+ return m;
+}
+
+static inline void spi_message_free(struct spi_message *m)
+{
+ kfree(m);
+}
+
+extern int spi_setup(struct spi_device *spi);
+extern int spi_async(struct spi_device *spi, struct spi_message *message);
+extern int spi_async_locked(struct spi_device *spi,
+ struct spi_message *message);
+
+/*---------------------------------------------------------------------------*/
+
+/* All these synchronous SPI transfer routines are utilities layered
+ * over the core async transfer primitive. Here, "synchronous" means
+ * they will sleep uninterruptibly until the async transfer completes.
+ */
+
+extern int spi_sync(struct spi_device *spi, struct spi_message *message);
+extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message);
+extern int spi_bus_lock(struct spi_master *master);
+extern int spi_bus_unlock(struct spi_master *master);
+
+/**
+ * spi_write - SPI synchronous write
+ * @spi: device to which data will be written
+ * @buf: data buffer
+ * @len: data buffer size
+ * Context: can sleep
+ *
+ * This writes the buffer and returns zero or a negative error code.
+ * Callable only from contexts that can sleep.
+ */
+static inline int
+spi_write(struct spi_device *spi, const void *buf, size_t len)
+{
+ struct spi_transfer t = {
+ .tx_buf = buf,
+ .len = len,
+ };
+ struct spi_message m;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ return spi_sync(spi, &m);
+}
+
+/**
+ * spi_read - SPI synchronous read
+ * @spi: device from which data will be read
+ * @buf: data buffer
+ * @len: data buffer size
+ * Context: can sleep
+ *
+ * This reads the buffer and returns zero or a negative error code.
+ * Callable only from contexts that can sleep.
+ */
+static inline int
+spi_read(struct spi_device *spi, void *buf, size_t len)
+{
+ struct spi_transfer t = {
+ .rx_buf = buf,
+ .len = len,
+ };
+ struct spi_message m;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ return spi_sync(spi, &m);
+}
+
+/**
+ * spi_sync_transfer - synchronous SPI data transfer
+ * @spi: device with which data will be exchanged
+ * @xfers: An array of spi_transfers
+ * @num_xfers: Number of items in the xfer array
+ * Context: can sleep
+ *
+ * Does a synchronous SPI data transfer of the given spi_transfer array.
+ *
+ * For more specific semantics see spi_sync().
+ *
+ * It returns zero on success, else a negative error code.
+ */
+static inline int
+spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers,
+ unsigned int num_xfers)
+{
+ struct spi_message msg;
+
+ spi_message_init_with_transfers(&msg, xfers, num_xfers);
+
+ return spi_sync(spi, &msg);
+}
+
+/* this copies txbuf and rxbuf data; for small transfers only! */
+extern int spi_write_then_read(struct spi_device *spi,
+ const void *txbuf, unsigned n_tx,
+ void *rxbuf, unsigned n_rx);
+
+/**
+ * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read
+ * @spi: device with which data will be exchanged
+ * @cmd: command to be written before data is read back
+ * Context: can sleep
+ *
+ * This returns the (unsigned) eight bit number returned by the
+ * device, or else a negative error code. Callable only from
+ * contexts that can sleep.
+ */
+static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd)
+{
+ ssize_t status;
+ u8 result;
+
+ status = spi_write_then_read(spi, &cmd, 1, &result, 1);
+
+ /* return negative errno or unsigned value */
+ return (status < 0) ? status : result;
+}
+
+/**
+ * spi_w8r16 - SPI synchronous 8 bit write followed by 16 bit read
+ * @spi: device with which data will be exchanged
+ * @cmd: command to be written before data is read back
+ * Context: can sleep
+ *
+ * This returns the (unsigned) sixteen bit number returned by the
+ * device, or else a negative error code. Callable only from
+ * contexts that can sleep.
+ *
+ * The number is returned in wire-order, which is at least sometimes
+ * big-endian.
+ */
+static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
+{
+ ssize_t status;
+ u16 result;
+
+ status = spi_write_then_read(spi, &cmd, 1, &result, 2);
+
+ /* return negative errno or unsigned value */
+ return (status < 0) ? status : result;
+}
+
+/**
+ * spi_w8r16be - SPI synchronous 8 bit write followed by 16 bit big-endian read
+ * @spi: device with which data will be exchanged
+ * @cmd: command to be written before data is read back
+ * Context: can sleep
+ *
+ * This returns the (unsigned) sixteen bit number returned by the device in cpu
+ * endianness, or else a negative error code. Callable only from contexts that
+ * can sleep.
+ *
+ * This function is similar to spi_w8r16, with the exception that it will
+ * convert the read 16 bit data word from big-endian to native endianness.
+ *
+ */
+static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
+
+{
+ ssize_t status;
+ __be16 result;
+
+ status = spi_write_then_read(spi, &cmd, 1, &result, 2);
+ if (status < 0)
+ return status;
+
+ return be16_to_cpu(result);
+}
+
+/*---------------------------------------------------------------------------*/
+
+/*
+ * INTERFACE between board init code and SPI infrastructure.
+ *
+ * No SPI driver ever sees these SPI device table segments, but
+ * it's how the SPI core (or adapters that get hotplugged) grows
+ * the driver model tree.
+ *
+ * As a rule, SPI devices can't be probed. Instead, board init code
+ * provides a table listing the devices which are present, with enough
+ * information to bind and set up the device's driver. There's basic
+ * support for nonstatic configurations too; enough to handle adding
+ * parport adapters, or microcontrollers acting as USB-to-SPI bridges.
+ */
+
+/**
+ * struct spi_board_info - board-specific template for a SPI device
+ * @modalias: Initializes spi_device.modalias; identifies the driver.
+ * @platform_data: Initializes spi_device.platform_data; the particular
+ * data stored there is driver-specific.
+ * @controller_data: Initializes spi_device.controller_data; some
+ * controllers need hints about hardware setup, e.g. for DMA.
+ * @irq: Initializes spi_device.irq; depends on how the board is wired.
+ * @max_speed_hz: Initializes spi_device.max_speed_hz; based on limits
+ * from the chip datasheet and board-specific signal quality issues.
+ * @bus_num: Identifies which spi_master parents the spi_device; unused
+ * by spi_new_device(), and otherwise depends on board wiring.
+ * @chip_select: Initializes spi_device.chip_select; depends on how
+ * the board is wired.
+ * @mode: Initializes spi_device.mode; based on the chip datasheet, board
+ * wiring (some devices support both 3WIRE and standard modes), and
+ * possibly presence of an inverter in the chipselect path.
+ *
+ * When adding new SPI devices to the device tree, these structures serve
+ * as a partial device template. They hold information which can't always
+ * be determined by drivers. Information that probe() can establish (such
+ * as the default transfer wordsize) is not included here.
+ *
+ * These structures are used in two places. Their primary role is to
+ * be stored in tables of board-specific device descriptors, which are
+ * declared early in board initialization and then used (much later) to
+ * populate a controller's device tree after the that controller's driver
+ * initializes. A secondary (and atypical) role is as a parameter to
+ * spi_new_device() call, which happens after those controller drivers
+ * are active in some dynamic board configuration models.
+ */
+struct spi_board_info {
+ /* the device name and module name are coupled, like platform_bus;
+ * "modalias" is normally the driver name.
+ *
+ * platform_data goes to spi_device.dev.platform_data,
+ * controller_data goes to spi_device.controller_data,
+ * irq is copied too
+ */
+ char modalias[SPI_NAME_SIZE];
+ const void *platform_data;
+ void *controller_data;
+ int irq;
+
+ /* slower signaling on noisy or low voltage boards */
+ u32 max_speed_hz;
+
+
+ /* bus_num is board specific and matches the bus_num of some
+ * spi_master that will probably be registered later.
+ *
+ * chip_select reflects how this chip is wired to that master;
+ * it's less than num_chipselect.
+ */
+ u16 bus_num;
+ u16 chip_select;
+
+ /* mode becomes spi_device.mode, and is essential for chips
+ * where the default of SPI_CS_HIGH = 0 is wrong.
+ */
+ u16 mode;
+
+ /* ... may need additional spi_device chip config data here.
+ * avoid stuff protocol drivers can set; but include stuff
+ * needed to behave without being bound to a driver:
+ * - quirks like clock rate mattering when not selected
+ */
+};
+
+#ifdef CONFIG_SPI
+extern int
+spi_register_board_info(struct spi_board_info const *info, unsigned n);
+#else
+/* board init code may ignore whether SPI is configured or not */
+static inline int
+spi_register_board_info(struct spi_board_info const *info, unsigned n)
+ { return 0; }
+#endif
+
+
+/* If you're hotplugging an adapter with devices (parport, usb, etc)
+ * use spi_new_device() to describe each device. You can also call
+ * spi_unregister_device() to start making that device vanish, but
+ * normally that would be handled by spi_unregister_master().
+ *
+ * You can also use spi_alloc_device() and spi_add_device() to use a two
+ * stage registration sequence for each spi_device. This gives the caller
+ * some more control over the spi_device structure before it is registered,
+ * but requires that caller to initialize fields that would otherwise
+ * be defined using the board info.
+ */
+extern struct spi_device *
+spi_alloc_device(struct spi_master *master);
+
+extern int
+spi_add_device(struct spi_device *spi);
+
+extern struct spi_device *
+spi_new_device(struct spi_master *, struct spi_board_info *);
+
+static inline void
+spi_unregister_device(struct spi_device *spi)
+{
+ if (spi)
+ device_unregister(&spi->dev);
+}
+
+extern const struct spi_device_id *
+spi_get_device_id(const struct spi_device *sdev);
+
+static inline bool
+spi_transfer_is_last(struct spi_master *master, struct spi_transfer *xfer)
+{
+ return list_is_last(&xfer->transfer_list, &master->cur_msg->transfers);
+}
+
+#endif /* __LINUX_SPI_H */
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
new file mode 100644
index 000000000..85578d4be
--- /dev/null
+++ b/include/linux/spi/spi_bitbang.h
@@ -0,0 +1,47 @@
+#ifndef __SPI_BITBANG_H
+#define __SPI_BITBANG_H
+
+#include <linux/workqueue.h>
+
+struct spi_bitbang {
+ spinlock_t lock;
+ u8 busy;
+ u8 use_dma;
+ u8 flags; /* extra spi->mode support */
+
+ struct spi_master *master;
+
+ /* setup_transfer() changes clock and/or wordsize to match settings
+ * for this transfer; zeroes restore defaults from spi_device.
+ */
+ int (*setup_transfer)(struct spi_device *spi,
+ struct spi_transfer *t);
+
+ void (*chipselect)(struct spi_device *spi, int is_on);
+#define BITBANG_CS_ACTIVE 1 /* normally nCS, active low */
+#define BITBANG_CS_INACTIVE 0
+
+ /* txrx_bufs() may handle dma mapping for transfers that don't
+ * already have one (transfer.{tx,rx}_dma is zero), or use PIO
+ */
+ int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t);
+
+ /* txrx_word[SPI_MODE_*]() just looks like a shift register */
+ u32 (*txrx_word[4])(struct spi_device *spi,
+ unsigned nsecs,
+ u32 word, u8 bits);
+};
+
+/* you can call these default bitbang->master methods from your custom
+ * methods, if you like.
+ */
+extern int spi_bitbang_setup(struct spi_device *spi);
+extern void spi_bitbang_cleanup(struct spi_device *spi);
+extern int spi_bitbang_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t);
+
+/* start or stop queue processing */
+extern int spi_bitbang_start(struct spi_bitbang *spi);
+extern void spi_bitbang_stop(struct spi_bitbang *spi);
+
+#endif /* __SPI_BITBANG_H */
diff --git a/include/linux/spi/spi_gpio.h b/include/linux/spi/spi_gpio.h
new file mode 100644
index 000000000..1634ce31c
--- /dev/null
+++ b/include/linux/spi/spi_gpio.h
@@ -0,0 +1,71 @@
+#ifndef __LINUX_SPI_GPIO_H
+#define __LINUX_SPI_GPIO_H
+
+/*
+ * For each bitbanged SPI bus, set up a platform_device node with:
+ * - name "spi_gpio"
+ * - id the same as the SPI bus number it implements
+ * - dev.platform data pointing to a struct spi_gpio_platform_data
+ *
+ * Or, see the driver code for information about speedups that are
+ * possible on platforms that support inlined access for GPIOs (no
+ * spi_gpio_platform_data is used).
+ *
+ * Use spi_board_info with these busses in the usual way, being sure
+ * that the controller_data being the GPIO used for each device's
+ * chipselect:
+ *
+ * static struct spi_board_info ... [] = {
+ * ...
+ * // this slave uses GPIO 42 for its chipselect
+ * .controller_data = (void *) 42,
+ * ...
+ * // this one uses GPIO 86 for its chipselect
+ * .controller_data = (void *) 86,
+ * ...
+ * };
+ *
+ * If chipselect is not used (there's only one device on the bus), assign
+ * SPI_GPIO_NO_CHIPSELECT to the controller_data:
+ * .controller_data = (void *) SPI_GPIO_NO_CHIPSELECT;
+ *
+ * If the MISO or MOSI pin is not available then it should be set to
+ * SPI_GPIO_NO_MISO or SPI_GPIO_NO_MOSI.
+ *
+ * If the bitbanged bus is later switched to a "native" controller,
+ * that platform_device and controller_data should be removed.
+ */
+
+#define SPI_GPIO_NO_CHIPSELECT ((unsigned long)-1l)
+#define SPI_GPIO_NO_MISO ((unsigned long)-1l)
+#define SPI_GPIO_NO_MOSI ((unsigned long)-1l)
+
+/**
+ * struct spi_gpio_platform_data - parameter for bitbanged SPI master
+ * @sck: number of the GPIO used for clock output
+ * @mosi: number of the GPIO used for Master Output, Slave In (MOSI) data
+ * @miso: number of the GPIO used for Master Input, Slave Output (MISO) data
+ * @num_chipselect: how many slaves to allow
+ *
+ * All GPIO signals used with the SPI bus managed through this driver
+ * (chipselects, MOSI, MISO, SCK) must be configured as GPIOs, instead
+ * of some alternate function.
+ *
+ * It can be convenient to use this driver with pins that have alternate
+ * functions associated with a "native" SPI controller if a driver for that
+ * controller is not available, or is missing important functionality.
+ *
+ * On platforms which can do so, configure MISO with a weak pullup unless
+ * there's an external pullup on that signal. That saves power by avoiding
+ * floating signals. (A weak pulldown would save power too, but many
+ * drivers expect to see all-ones data as the no slave "response".)
+ */
+struct spi_gpio_platform_data {
+ unsigned sck;
+ unsigned long mosi;
+ unsigned long miso;
+
+ u16 num_chipselect;
+};
+
+#endif /* __LINUX_SPI_GPIO_H */
diff --git a/include/linux/spi/spi_oc_tiny.h b/include/linux/spi/spi_oc_tiny.h
new file mode 100644
index 000000000..1ac529cf4
--- /dev/null
+++ b/include/linux/spi/spi_oc_tiny.h
@@ -0,0 +1,20 @@
+#ifndef _LINUX_SPI_SPI_OC_TINY_H
+#define _LINUX_SPI_SPI_OC_TINY_H
+
+/**
+ * struct tiny_spi_platform_data - platform data of the OpenCores tiny SPI
+ * @freq: input clock freq to the core.
+ * @baudwidth: baud rate divider width of the core.
+ * @gpio_cs_count: number of gpio pins used for chipselect.
+ * @gpio_cs: array of gpio pins used for chipselect.
+ *
+ * freq and baudwidth are used only if the divider is programmable.
+ */
+struct tiny_spi_platform_data {
+ unsigned int freq;
+ unsigned int baudwidth;
+ unsigned int gpio_cs_count;
+ int *gpio_cs;
+};
+
+#endif /* _LINUX_SPI_SPI_OC_TINY_H */
diff --git a/include/linux/spi/tdo24m.h b/include/linux/spi/tdo24m.h
new file mode 100644
index 000000000..7572d4e1f
--- /dev/null
+++ b/include/linux/spi/tdo24m.h
@@ -0,0 +1,13 @@
+#ifndef __TDO24M_H__
+#define __TDO24M_H__
+
+enum tdo24m_model {
+ TDO24M,
+ TDO35S,
+};
+
+struct tdo24m_platform_data {
+ enum tdo24m_model model;
+};
+
+#endif /* __TDO24M_H__ */
diff --git a/include/linux/spi/tle62x0.h b/include/linux/spi/tle62x0.h
new file mode 100644
index 000000000..414c6fddf
--- /dev/null
+++ b/include/linux/spi/tle62x0.h
@@ -0,0 +1,20 @@
+/*
+ * tle62x0.h - platform glue to Infineon TLE62x0 driver chips
+ *
+ * Copyright 2007 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+struct tle62x0_pdata {
+ unsigned int init_state;
+ unsigned int gpio_count;
+};
diff --git a/include/linux/spi/tsc2005.h b/include/linux/spi/tsc2005.h
new file mode 100644
index 000000000..563b3b179
--- /dev/null
+++ b/include/linux/spi/tsc2005.h
@@ -0,0 +1,34 @@
+/*
+ * This file is part of TSC2005 touchscreen driver
+ *
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_SPI_TSC2005_H
+#define _LINUX_SPI_TSC2005_H
+
+#include <linux/types.h>
+
+struct tsc2005_platform_data {
+ int ts_pressure_max;
+ int ts_pressure_fudge;
+ int ts_x_max;
+ int ts_x_fudge;
+ int ts_y_max;
+ int ts_y_fudge;
+ int ts_x_plate_ohm;
+ unsigned int esd_timeout_ms;
+ void (*set_reset)(bool enable);
+};
+
+#endif
diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h
new file mode 100644
index 000000000..333ecdfee
--- /dev/null
+++ b/include/linux/spi/xilinx_spi.h
@@ -0,0 +1,19 @@
+#ifndef __LINUX_SPI_XILINX_SPI_H
+#define __LINUX_SPI_XILINX_SPI_H
+
+/**
+ * struct xspi_platform_data - Platform data of the Xilinx SPI driver
+ * @num_chipselect: Number of chip select by the IP.
+ * @little_endian: If registers should be accessed little endian or not.
+ * @bits_per_word: Number of bits per word.
+ * @devices: Devices to add when the driver is probed.
+ * @num_devices: Number of devices in the devices array.
+ */
+struct xspi_platform_data {
+ u16 num_chipselect;
+ u8 bits_per_word;
+ struct spi_board_info *devices;
+ u8 num_devices;
+};
+
+#endif /* __LINUX_SPI_XILINX_SPI_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
new file mode 100644
index 000000000..3e18379df
--- /dev/null
+++ b/include/linux/spinlock.h
@@ -0,0 +1,429 @@
+#ifndef __LINUX_SPINLOCK_H
+#define __LINUX_SPINLOCK_H
+
+/*
+ * include/linux/spinlock.h - generic spinlock/rwlock declarations
+ *
+ * here's the role of the various spinlock/rwlock related include files:
+ *
+ * on SMP builds:
+ *
+ * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
+ * initializers
+ *
+ * linux/spinlock_types.h:
+ * defines the generic type and initializers
+ *
+ * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
+ * implementations, mostly inline assembly code
+ *
+ * (also included on UP-debug builds:)
+ *
+ * linux/spinlock_api_smp.h:
+ * contains the prototypes for the _spin_*() APIs.
+ *
+ * linux/spinlock.h: builds the final spin_*() APIs.
+ *
+ * on UP builds:
+ *
+ * linux/spinlock_type_up.h:
+ * contains the generic, simplified UP spinlock type.
+ * (which is an empty structure on non-debug builds)
+ *
+ * linux/spinlock_types.h:
+ * defines the generic type and initializers
+ *
+ * linux/spinlock_up.h:
+ * contains the arch_spin_*()/etc. version of UP
+ * builds. (which are NOPs on non-debug, non-preempt
+ * builds)
+ *
+ * (included on UP-non-debug builds:)
+ *
+ * linux/spinlock_api_up.h:
+ * builds the _spin_*() APIs.
+ *
+ * linux/spinlock.h: builds the final spin_*() APIs.
+ */
+
+#include <linux/typecheck.h>
+#include <linux/preempt.h>
+#include <linux/linkage.h>
+#include <linux/compiler.h>
+#include <linux/irqflags.h>
+#include <linux/thread_info.h>
+#include <linux/kernel.h>
+#include <linux/stringify.h>
+#include <linux/bottom_half.h>
+#include <asm/barrier.h>
+
+
+/*
+ * Must define these before including other files, inline functions need them
+ */
+#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
+
+#define LOCK_SECTION_START(extra) \
+ ".subsection 1\n\t" \
+ extra \
+ ".ifndef " LOCK_SECTION_NAME "\n\t" \
+ LOCK_SECTION_NAME ":\n\t" \
+ ".endif\n"
+
+#define LOCK_SECTION_END \
+ ".previous\n\t"
+
+#define __lockfunc __attribute__((section(".spinlock.text")))
+
+/*
+ * Pull the arch_spinlock_t and arch_rwlock_t definitions:
+ */
+#include <linux/spinlock_types.h>
+
+/*
+ * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
+ */
+#ifdef CONFIG_SMP
+# include <asm/spinlock.h>
+#else
+# include <linux/spinlock_up.h>
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define raw_spin_lock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __raw_spin_lock_init((lock), #lock, &__key); \
+} while (0)
+
+#else
+# define raw_spin_lock_init(lock) \
+ do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
+#endif
+
+#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
+
+#ifdef CONFIG_GENERIC_LOCKBREAK
+#define raw_spin_is_contended(lock) ((lock)->break_lock)
+#else
+
+#ifdef arch_spin_is_contended
+#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
+#else
+#define raw_spin_is_contended(lock) (((void)(lock), 0))
+#endif /*arch_spin_is_contended*/
+#endif
+
+/*
+ * Despite its name it doesn't necessarily has to be a full barrier.
+ * It should only guarantee that a STORE before the critical section
+ * can not be reordered with a LOAD inside this section.
+ * spin_lock() is the one-way barrier, this LOAD can not escape out
+ * of the region. So the default implementation simply ensures that
+ * a STORE can not move into the critical section, smp_wmb() should
+ * serialize it with another STORE done by spin_lock().
+ */
+#ifndef smp_mb__before_spinlock
+#define smp_mb__before_spinlock() smp_wmb()
+#endif
+
+/*
+ * Place this after a lock-acquisition primitive to guarantee that
+ * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
+ * if the UNLOCK and LOCK are executed by the same CPU or if the
+ * UNLOCK and LOCK operate on the same lock variable.
+ */
+#ifndef smp_mb__after_unlock_lock
+#define smp_mb__after_unlock_lock() do { } while (0)
+#endif
+
+/**
+ * raw_spin_unlock_wait - wait until the spinlock gets unlocked
+ * @lock: the spinlock in question.
+ */
+#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
+#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
+ extern int do_raw_spin_trylock(raw_spinlock_t *lock);
+ extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
+#else
+static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
+{
+ __acquire(lock);
+ arch_spin_lock(&lock->raw_lock);
+}
+
+static inline void
+do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
+{
+ __acquire(lock);
+ arch_spin_lock_flags(&lock->raw_lock, *flags);
+}
+
+static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
+{
+ return arch_spin_trylock(&(lock)->raw_lock);
+}
+
+static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
+{
+ arch_spin_unlock(&lock->raw_lock);
+ __release(lock);
+}
+#endif
+
+/*
+ * Define the various spin_lock methods. Note we define these
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
+ * various methods are defined as nops in the case they are not
+ * required.
+ */
+#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
+
+#define raw_spin_lock(lock) _raw_spin_lock(lock)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define raw_spin_lock_nested(lock, subclass) \
+ _raw_spin_lock_nested(lock, subclass)
+# define raw_spin_lock_bh_nested(lock, subclass) \
+ _raw_spin_lock_bh_nested(lock, subclass)
+
+# define raw_spin_lock_nest_lock(lock, nest_lock) \
+ do { \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
+ _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+ } while (0)
+#else
+/*
+ * Always evaluate the 'subclass' argument to avoid that the compiler
+ * warns about set-but-not-used variables when building with
+ * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
+ */
+# define raw_spin_lock_nested(lock, subclass) \
+ _raw_spin_lock(((void)(subclass), (lock)))
+# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
+# define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock)
+#endif
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+
+#define raw_spin_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _raw_spin_lock_irqsave(lock); \
+ } while (0)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
+ } while (0)
+#else
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _raw_spin_lock_irqsave(lock); \
+ } while (0)
+#endif
+
+#else
+
+#define raw_spin_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_spin_lock_irqsave(lock, flags); \
+ } while (0)
+
+#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
+ raw_spin_lock_irqsave(lock, flags)
+
+#endif
+
+#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
+#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
+#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
+#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
+
+#define raw_spin_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _raw_spin_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
+
+#define raw_spin_trylock_bh(lock) \
+ __cond_lock(lock, _raw_spin_trylock_bh(lock))
+
+#define raw_spin_trylock_irq(lock) \
+({ \
+ local_irq_disable(); \
+ raw_spin_trylock(lock) ? \
+ 1 : ({ local_irq_enable(); 0; }); \
+})
+
+#define raw_spin_trylock_irqsave(lock, flags) \
+({ \
+ local_irq_save(flags); \
+ raw_spin_trylock(lock) ? \
+ 1 : ({ local_irq_restore(flags); 0; }); \
+})
+
+/**
+ * raw_spin_can_lock - would raw_spin_trylock() succeed?
+ * @lock: the spinlock in question.
+ */
+#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
+
+/* Include rwlock functions */
+#include <linux/rwlock.h>
+
+/*
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+# include <linux/spinlock_api_smp.h>
+#else
+# include <linux/spinlock_api_up.h>
+#endif
+
+/*
+ * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
+ */
+
+static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
+{
+ return &lock->rlock;
+}
+
+#define spin_lock_init(_lock) \
+do { \
+ spinlock_check(_lock); \
+ raw_spin_lock_init(&(_lock)->rlock); \
+} while (0)
+
+static inline void spin_lock(spinlock_t *lock)
+{
+ raw_spin_lock(&lock->rlock);
+}
+
+static inline void spin_lock_bh(spinlock_t *lock)
+{
+ raw_spin_lock_bh(&lock->rlock);
+}
+
+static inline int spin_trylock(spinlock_t *lock)
+{
+ return raw_spin_trylock(&lock->rlock);
+}
+
+#define spin_lock_nested(lock, subclass) \
+do { \
+ raw_spin_lock_nested(spinlock_check(lock), subclass); \
+} while (0)
+
+#define spin_lock_bh_nested(lock, subclass) \
+do { \
+ raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
+} while (0)
+
+#define spin_lock_nest_lock(lock, nest_lock) \
+do { \
+ raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
+} while (0)
+
+static inline void spin_lock_irq(spinlock_t *lock)
+{
+ raw_spin_lock_irq(&lock->rlock);
+}
+
+#define spin_lock_irqsave(lock, flags) \
+do { \
+ raw_spin_lock_irqsave(spinlock_check(lock), flags); \
+} while (0)
+
+#define spin_lock_irqsave_nested(lock, flags, subclass) \
+do { \
+ raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
+} while (0)
+
+static inline void spin_unlock(spinlock_t *lock)
+{
+ raw_spin_unlock(&lock->rlock);
+}
+
+static inline void spin_unlock_bh(spinlock_t *lock)
+{
+ raw_spin_unlock_bh(&lock->rlock);
+}
+
+static inline void spin_unlock_irq(spinlock_t *lock)
+{
+ raw_spin_unlock_irq(&lock->rlock);
+}
+
+static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+{
+ raw_spin_unlock_irqrestore(&lock->rlock, flags);
+}
+
+static inline int spin_trylock_bh(spinlock_t *lock)
+{
+ return raw_spin_trylock_bh(&lock->rlock);
+}
+
+static inline int spin_trylock_irq(spinlock_t *lock)
+{
+ return raw_spin_trylock_irq(&lock->rlock);
+}
+
+#define spin_trylock_irqsave(lock, flags) \
+({ \
+ raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
+})
+
+static inline void spin_unlock_wait(spinlock_t *lock)
+{
+ raw_spin_unlock_wait(&lock->rlock);
+}
+
+static inline int spin_is_locked(spinlock_t *lock)
+{
+ return raw_spin_is_locked(&lock->rlock);
+}
+
+static inline int spin_is_contended(spinlock_t *lock)
+{
+ return raw_spin_is_contended(&lock->rlock);
+}
+
+static inline int spin_can_lock(spinlock_t *lock)
+{
+ return raw_spin_can_lock(&lock->rlock);
+}
+
+#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
+
+/*
+ * Pull the atomic_t declaration:
+ * (asm-mips/atomic.h needs above definitions)
+ */
+#include <linux/atomic.h>
+/**
+ * atomic_dec_and_lock - lock on reaching reference count zero
+ * @atomic: the atomic counter
+ * @lock: the spinlock in question
+ *
+ * Decrements @atomic by 1. If the result is 0, returns true and locks
+ * @lock. Returns false for all other cases.
+ */
+extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
+#define atomic_dec_and_lock(atomic, lock) \
+ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+
+#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
new file mode 100644
index 000000000..5344268e6
--- /dev/null
+++ b/include/linux/spinlock_api_smp.h
@@ -0,0 +1,194 @@
+#ifndef __LINUX_SPINLOCK_API_SMP_H
+#define __LINUX_SPINLOCK_API_SMP_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/spinlock_api_smp.h
+ *
+ * spinlock API declarations on SMP (and debug)
+ * (implemented in kernel/spinlock.c)
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+int in_lock_functions(unsigned long addr);
+
+#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x))
+
+void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
+void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
+void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
+void __lockfunc
+_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
+ __acquires(lock);
+void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
+void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
+ __acquires(lock);
+
+unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
+ __acquires(lock);
+unsigned long __lockfunc
+_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
+ __acquires(lock);
+int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
+int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
+void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
+void __lockfunc
+_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
+ __releases(lock);
+
+#ifdef CONFIG_INLINE_SPIN_LOCK
+#define _raw_spin_lock(lock) __raw_spin_lock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_SPIN_LOCK_BH
+#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
+#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
+#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
+#endif
+
+#ifdef CONFIG_INLINE_SPIN_TRYLOCK
+#define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
+#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
+#endif
+
+#ifndef CONFIG_UNINLINE_SPIN_UNLOCK
+#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
+#endif
+
+#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
+#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
+#endif
+
+#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
+#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
+#endif
+
+#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
+#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
+#endif
+
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
+{
+ preempt_disable();
+ if (do_raw_spin_trylock(lock)) {
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ preempt_enable();
+ return 0;
+}
+
+/*
+ * If lockdep is enabled then we use the non-preemption spin-ops
+ * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
+ * not re-enabled during lock-acquire (which the preempt-spin-ops do):
+ */
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+
+static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ /*
+ * On lockdep we dont want the hand-coded irq-enable of
+ * do_raw_spin_lock_flags() code, because lockdep assumes
+ * that interrupts are not re-enabled during lock-acquire:
+ */
+#ifdef CONFIG_LOCKDEP
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
+#else
+ do_raw_spin_lock_flags(lock, &flags);
+#endif
+ return flags;
+}
+
+static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
+{
+ local_irq_disable();
+ preempt_disable();
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
+}
+
+static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
+{
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
+}
+
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
+{
+ preempt_disable();
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
+}
+
+#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
+
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
+{
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_spin_unlock(lock);
+ preempt_enable();
+}
+
+static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
+ unsigned long flags)
+{
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_spin_unlock(lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
+static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
+{
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_spin_unlock(lock);
+ local_irq_enable();
+ preempt_enable();
+}
+
+static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
+{
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ do_raw_spin_unlock(lock);
+ __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+}
+
+static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
+{
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+ if (do_raw_spin_trylock(lock)) {
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ return 1;
+ }
+ __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
+ return 0;
+}
+
+#include <linux/rwlock_api_smp.h>
+
+#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
new file mode 100644
index 000000000..d3afef9d8
--- /dev/null
+++ b/include/linux/spinlock_api_up.h
@@ -0,0 +1,92 @@
+#ifndef __LINUX_SPINLOCK_API_UP_H
+#define __LINUX_SPINLOCK_API_UP_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/spinlock_api_up.h
+ *
+ * spinlock API implementation on UP-nondebug (inlined implementation)
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#define in_lock_functions(ADDR) 0
+
+#define assert_raw_spin_locked(lock) do { (void)(lock); } while (0)
+
+/*
+ * In the UP-nondebug case there's no real locking going on, so the
+ * only thing we have to do is to keep the preempt counts and irq
+ * flags straight, to suppress compiler warnings of unused lock
+ * variables, and to add the proper checker annotations:
+ */
+#define ___LOCK(lock) \
+ do { __acquire(lock); (void)(lock); } while (0)
+
+#define __LOCK(lock) \
+ do { preempt_disable(); ___LOCK(lock); } while (0)
+
+#define __LOCK_BH(lock) \
+ do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0)
+
+#define __LOCK_IRQ(lock) \
+ do { local_irq_disable(); __LOCK(lock); } while (0)
+
+#define __LOCK_IRQSAVE(lock, flags) \
+ do { local_irq_save(flags); __LOCK(lock); } while (0)
+
+#define ___UNLOCK(lock) \
+ do { __release(lock); (void)(lock); } while (0)
+
+#define __UNLOCK(lock) \
+ do { preempt_enable(); ___UNLOCK(lock); } while (0)
+
+#define __UNLOCK_BH(lock) \
+ do { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); \
+ ___UNLOCK(lock); } while (0)
+
+#define __UNLOCK_IRQ(lock) \
+ do { local_irq_enable(); __UNLOCK(lock); } while (0)
+
+#define __UNLOCK_IRQRESTORE(lock, flags) \
+ do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
+
+#define _raw_spin_lock(lock) __LOCK(lock)
+#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
+#define _raw_spin_lock_bh_nested(lock, subclass) __LOCK(lock)
+#define _raw_read_lock(lock) __LOCK(lock)
+#define _raw_write_lock(lock) __LOCK(lock)
+#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
+#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock)
+#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
+#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; })
+#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
+#define _raw_spin_unlock(lock) __UNLOCK(lock)
+#define _raw_read_unlock(lock) __UNLOCK(lock)
+#define _raw_write_unlock(lock) __UNLOCK(lock)
+#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock)
+#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock)
+#define _raw_spin_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_read_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
+#define _raw_write_unlock_irqrestore(lock, flags) \
+ __UNLOCK_IRQRESTORE(lock, flags)
+
+#endif /* __LINUX_SPINLOCK_API_UP_H */
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
new file mode 100644
index 000000000..73548eb13
--- /dev/null
+++ b/include/linux/spinlock_types.h
@@ -0,0 +1,88 @@
+#ifndef __LINUX_SPINLOCK_TYPES_H
+#define __LINUX_SPINLOCK_TYPES_H
+
+/*
+ * include/linux/spinlock_types.h - generic spinlock type definitions
+ * and initializers
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#if defined(CONFIG_SMP)
+# include <asm/spinlock_types.h>
+#else
+# include <linux/spinlock_types_up.h>
+#endif
+
+#include <linux/lockdep.h>
+
+typedef struct raw_spinlock {
+ arch_spinlock_t raw_lock;
+#ifdef CONFIG_GENERIC_LOCKBREAK
+ unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} raw_spinlock_t;
+
+#define SPINLOCK_MAGIC 0xdead4ead
+
+#define SPINLOCK_OWNER_INIT ((void *)-1L)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define SPIN_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_DEBUG_INIT(lockname) \
+ .magic = SPINLOCK_MAGIC, \
+ .owner_cpu = -1, \
+ .owner = SPINLOCK_OWNER_INIT,
+#else
+# define SPIN_DEBUG_INIT(lockname)
+#endif
+
+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+ { \
+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEBUG_INIT(lockname) \
+ SPIN_DEP_MAP_INIT(lockname) }
+
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+typedef struct spinlock {
+ union {
+ struct raw_spinlock rlock;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
+ struct {
+ u8 __padding[LOCK_PADSIZE];
+ struct lockdep_map dep_map;
+ };
+#endif
+ };
+} spinlock_t;
+
+#define __SPIN_LOCK_INITIALIZER(lockname) \
+ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
+
+#define __SPIN_LOCK_UNLOCKED(lockname) \
+ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+
+#include <linux/rwlock_types.h>
+
+#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
new file mode 100644
index 000000000..c09b6407a
--- /dev/null
+++ b/include/linux/spinlock_types_up.h
@@ -0,0 +1,37 @@
+#ifndef __LINUX_SPINLOCK_TYPES_UP_H
+#define __LINUX_SPINLOCK_TYPES_UP_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+/*
+ * include/linux/spinlock_types_up.h - spinlock type definitions for UP
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+
+typedef struct {
+ volatile unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
+
+#else
+
+typedef struct { } arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { }
+
+#endif
+
+typedef struct {
+ /* no debug version on UP */
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { }
+
+#endif /* __LINUX_SPINLOCK_TYPES_UP_H */
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
new file mode 100644
index 000000000..8b3ac0d71
--- /dev/null
+++ b/include/linux/spinlock_up.h
@@ -0,0 +1,85 @@
+#ifndef __LINUX_SPINLOCK_UP_H
+#define __LINUX_SPINLOCK_UP_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "please don't include this file directly"
+#endif
+
+#include <asm/processor.h> /* for cpu_relax() */
+
+/*
+ * include/linux/spinlock_up.h - UP-debug version of spinlocks.
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ *
+ * In the debug case, 1 means unlocked, 0 means locked. (the values
+ * are inverted, to catch initialization bugs)
+ *
+ * No atomicity anywhere, we are on UP. However, we still need
+ * the compiler barriers, because we do not want the compiler to
+ * move potentially faulting instructions (notably user accesses)
+ * into the locked sequence, resulting in non-atomic execution.
+ */
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define arch_spin_is_locked(x) ((x)->slock == 0)
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ lock->slock = 0;
+ barrier();
+}
+
+static inline void
+arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
+{
+ local_irq_save(flags);
+ lock->slock = 0;
+ barrier();
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ char oldval = lock->slock;
+
+ lock->slock = 0;
+ barrier();
+
+ return oldval > 0;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ barrier();
+ lock->slock = 1;
+}
+
+/*
+ * Read-write spinlocks. No debug version.
+ */
+#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
+#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
+#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
+#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
+#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
+#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
+
+#else /* DEBUG_SPINLOCK */
+#define arch_spin_is_locked(lock) ((void)(lock), 0)
+/* for sched/core.c and kernel_lock.c: */
+# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
+# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
+# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
+# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
+#endif /* DEBUG_SPINLOCK */
+
+#define arch_spin_is_contended(lock) (((void)(lock), 0))
+
+#define arch_read_can_lock(lock) (((void)(lock), 1))
+#define arch_write_can_lock(lock) (((void)(lock), 1))
+
+#define arch_spin_unlock_wait(lock) \
+ do { cpu_relax(); } while (arch_spin_is_locked(lock))
+
+#endif /* __LINUX_SPINLOCK_UP_H */
diff --git a/include/linux/splice.h b/include/linux/splice.h
new file mode 100644
index 000000000..2e0fca67c
--- /dev/null
+++ b/include/linux/splice.h
@@ -0,0 +1,92 @@
+/*
+ * Function declerations and data structures related to the splice
+ * implementation.
+ *
+ * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
+ *
+ */
+#ifndef SPLICE_H
+#define SPLICE_H
+
+#include <linux/pipe_fs_i.h>
+
+/*
+ * Flags passed in from splice/tee/vmsplice
+ */
+#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */
+#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */
+ /* we may still block on the fd we splice */
+ /* from/to, of course */
+#define SPLICE_F_MORE (0x04) /* expect more data */
+#define SPLICE_F_GIFT (0x08) /* pages passed in are a gift */
+
+/*
+ * Passed to the actors
+ */
+struct splice_desc {
+ size_t total_len; /* remaining length */
+ unsigned int len; /* current length */
+ unsigned int flags; /* splice flags */
+ /*
+ * actor() private data
+ */
+ union {
+ void __user *userptr; /* memory to write to */
+ struct file *file; /* file to read/write */
+ void *data; /* cookie */
+ } u;
+ loff_t pos; /* file position */
+ loff_t *opos; /* sendfile: output position */
+ size_t num_spliced; /* number of bytes already spliced */
+ bool need_wakeup; /* need to wake up writer */
+};
+
+struct partial_page {
+ unsigned int offset;
+ unsigned int len;
+ unsigned long private;
+};
+
+/*
+ * Passed to splice_to_pipe
+ */
+struct splice_pipe_desc {
+ struct page **pages; /* page map */
+ struct partial_page *partial; /* pages[] may not be contig */
+ int nr_pages; /* number of populated pages in map */
+ unsigned int nr_pages_max; /* pages[] & partial[] arrays size */
+ unsigned int flags; /* splice flags */
+ const struct pipe_buf_operations *ops;/* ops associated with output pipe */
+ void (*spd_release)(struct splice_pipe_desc *, unsigned int);
+};
+
+typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
+ struct splice_desc *);
+typedef int (splice_direct_actor)(struct pipe_inode_info *,
+ struct splice_desc *);
+
+extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *,
+ loff_t *, size_t, unsigned int,
+ splice_actor *);
+extern ssize_t __splice_from_pipe(struct pipe_inode_info *,
+ struct splice_desc *, splice_actor *);
+extern ssize_t splice_to_pipe(struct pipe_inode_info *,
+ struct splice_pipe_desc *);
+extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
+ splice_direct_actor *);
+
+/*
+ * for dynamic pipe sizing
+ */
+extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *);
+extern void splice_shrink_spd(struct splice_pipe_desc *);
+extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
+
+extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
+
+extern long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags);
+extern long do_splice_to(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
+#endif
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
new file mode 100644
index 000000000..f84212cd3
--- /dev/null
+++ b/include/linux/spmi.h
@@ -0,0 +1,188 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _LINUX_SPMI_H
+#define _LINUX_SPMI_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+/* Maximum slave identifier */
+#define SPMI_MAX_SLAVE_ID 16
+
+/* SPMI Commands */
+#define SPMI_CMD_EXT_WRITE 0x00
+#define SPMI_CMD_RESET 0x10
+#define SPMI_CMD_SLEEP 0x11
+#define SPMI_CMD_SHUTDOWN 0x12
+#define SPMI_CMD_WAKEUP 0x13
+#define SPMI_CMD_AUTHENTICATE 0x14
+#define SPMI_CMD_MSTR_READ 0x15
+#define SPMI_CMD_MSTR_WRITE 0x16
+#define SPMI_CMD_TRANSFER_BUS_OWNERSHIP 0x1A
+#define SPMI_CMD_DDB_MASTER_READ 0x1B
+#define SPMI_CMD_DDB_SLAVE_READ 0x1C
+#define SPMI_CMD_EXT_READ 0x20
+#define SPMI_CMD_EXT_WRITEL 0x30
+#define SPMI_CMD_EXT_READL 0x38
+#define SPMI_CMD_WRITE 0x40
+#define SPMI_CMD_READ 0x60
+#define SPMI_CMD_ZERO_WRITE 0x80
+
+/**
+ * struct spmi_device - Basic representation of an SPMI device
+ * @dev: Driver model representation of the device.
+ * @ctrl: SPMI controller managing the bus hosting this device.
+ * @usid: This devices' Unique Slave IDentifier.
+ */
+struct spmi_device {
+ struct device dev;
+ struct spmi_controller *ctrl;
+ u8 usid;
+};
+
+static inline struct spmi_device *to_spmi_device(struct device *d)
+{
+ return container_of(d, struct spmi_device, dev);
+}
+
+static inline void *spmi_device_get_drvdata(const struct spmi_device *sdev)
+{
+ return dev_get_drvdata(&sdev->dev);
+}
+
+static inline void spmi_device_set_drvdata(struct spmi_device *sdev, void *data)
+{
+ dev_set_drvdata(&sdev->dev, data);
+}
+
+struct spmi_device *spmi_device_alloc(struct spmi_controller *ctrl);
+
+static inline void spmi_device_put(struct spmi_device *sdev)
+{
+ if (sdev)
+ put_device(&sdev->dev);
+}
+
+int spmi_device_add(struct spmi_device *sdev);
+
+void spmi_device_remove(struct spmi_device *sdev);
+
+/**
+ * struct spmi_controller - interface to the SPMI master controller
+ * @dev: Driver model representation of the device.
+ * @nr: board-specific number identifier for this controller/bus
+ * @cmd: sends a non-data command sequence on the SPMI bus.
+ * @read_cmd: sends a register read command sequence on the SPMI bus.
+ * @write_cmd: sends a register write command sequence on the SPMI bus.
+ */
+struct spmi_controller {
+ struct device dev;
+ unsigned int nr;
+ int (*cmd)(struct spmi_controller *ctrl, u8 opcode, u8 sid);
+ int (*read_cmd)(struct spmi_controller *ctrl, u8 opcode,
+ u8 sid, u16 addr, u8 *buf, size_t len);
+ int (*write_cmd)(struct spmi_controller *ctrl, u8 opcode,
+ u8 sid, u16 addr, const u8 *buf, size_t len);
+};
+
+static inline struct spmi_controller *to_spmi_controller(struct device *d)
+{
+ return container_of(d, struct spmi_controller, dev);
+}
+
+static inline
+void *spmi_controller_get_drvdata(const struct spmi_controller *ctrl)
+{
+ return dev_get_drvdata(&ctrl->dev);
+}
+
+static inline void spmi_controller_set_drvdata(struct spmi_controller *ctrl,
+ void *data)
+{
+ dev_set_drvdata(&ctrl->dev, data);
+}
+
+struct spmi_controller *spmi_controller_alloc(struct device *parent,
+ size_t size);
+
+/**
+ * spmi_controller_put() - decrement controller refcount
+ * @ctrl SPMI controller.
+ */
+static inline void spmi_controller_put(struct spmi_controller *ctrl)
+{
+ if (ctrl)
+ put_device(&ctrl->dev);
+}
+
+int spmi_controller_add(struct spmi_controller *ctrl);
+void spmi_controller_remove(struct spmi_controller *ctrl);
+
+/**
+ * struct spmi_driver - SPMI slave device driver
+ * @driver: SPMI device drivers should initialize name and owner field of
+ * this structure.
+ * @probe: binds this driver to a SPMI device.
+ * @remove: unbinds this driver from the SPMI device.
+ *
+ * If PM runtime support is desired for a slave, a device driver can call
+ * pm_runtime_put() from their probe() routine (and a balancing
+ * pm_runtime_get() in remove()). PM runtime support for a slave is
+ * implemented by issuing a SLEEP command to the slave on runtime_suspend(),
+ * transitioning the slave into the SLEEP state. On runtime_resume(), a WAKEUP
+ * command is sent to the slave to bring it back to ACTIVE.
+ */
+struct spmi_driver {
+ struct device_driver driver;
+ int (*probe)(struct spmi_device *sdev);
+ void (*remove)(struct spmi_device *sdev);
+};
+
+static inline struct spmi_driver *to_spmi_driver(struct device_driver *d)
+{
+ return container_of(d, struct spmi_driver, driver);
+}
+
+int spmi_driver_register(struct spmi_driver *sdrv);
+
+/**
+ * spmi_driver_unregister() - unregister an SPMI client driver
+ * @sdrv: the driver to unregister
+ */
+static inline void spmi_driver_unregister(struct spmi_driver *sdrv)
+{
+ if (sdrv)
+ driver_unregister(&sdrv->driver);
+}
+
+#define module_spmi_driver(__spmi_driver) \
+ module_driver(__spmi_driver, spmi_driver_register, \
+ spmi_driver_unregister)
+
+int spmi_register_read(struct spmi_device *sdev, u8 addr, u8 *buf);
+int spmi_ext_register_read(struct spmi_device *sdev, u8 addr, u8 *buf,
+ size_t len);
+int spmi_ext_register_readl(struct spmi_device *sdev, u16 addr, u8 *buf,
+ size_t len);
+int spmi_register_write(struct spmi_device *sdev, u8 addr, u8 data);
+int spmi_register_zero_write(struct spmi_device *sdev, u8 data);
+int spmi_ext_register_write(struct spmi_device *sdev, u8 addr,
+ const u8 *buf, size_t len);
+int spmi_ext_register_writel(struct spmi_device *sdev, u16 addr,
+ const u8 *buf, size_t len);
+int spmi_command_reset(struct spmi_device *sdev);
+int spmi_command_sleep(struct spmi_device *sdev);
+int spmi_command_wakeup(struct spmi_device *sdev);
+int spmi_command_shutdown(struct spmi_device *sdev);
+
+#endif
diff --git a/include/linux/sradix-tree.h b/include/linux/sradix-tree.h
new file mode 100644
index 000000000..6780fdb0a
--- /dev/null
+++ b/include/linux/sradix-tree.h
@@ -0,0 +1,77 @@
+#ifndef _LINUX_SRADIX_TREE_H
+#define _LINUX_SRADIX_TREE_H
+
+
+#define INIT_SRADIX_TREE(root, mask) \
+do { \
+ (root)->height = 0; \
+ (root)->gfp_mask = (mask); \
+ (root)->rnode = NULL; \
+} while (0)
+
+#define ULONG_BITS (sizeof(unsigned long) * 8)
+#define SRADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
+//#define SRADIX_TREE_MAP_SHIFT 6
+//#define SRADIX_TREE_MAP_SIZE (1UL << SRADIX_TREE_MAP_SHIFT)
+//#define SRADIX_TREE_MAP_MASK (SRADIX_TREE_MAP_SIZE-1)
+
+struct sradix_tree_node {
+ unsigned int height; /* Height from the bottom */
+ unsigned int count;
+ unsigned int fulls; /* Number of full sublevel trees */
+ struct sradix_tree_node *parent;
+ void *stores[0];
+};
+
+/* A simple radix tree implementation */
+struct sradix_tree_root {
+ unsigned int height;
+ struct sradix_tree_node *rnode;
+
+ /* Where found to have available empty stores in its sublevels */
+ struct sradix_tree_node *enter_node;
+ unsigned int shift;
+ unsigned int stores_size;
+ unsigned int mask;
+ unsigned long min; /* The first hole index */
+ unsigned long num;
+ //unsigned long *height_to_maxindex;
+
+ /* How the node is allocated and freed. */
+ struct sradix_tree_node *(*alloc)(void);
+ void (*free)(struct sradix_tree_node *node);
+
+ /* When a new node is added and removed */
+ void (*extend)(struct sradix_tree_node *parent, struct sradix_tree_node *child);
+ void (*assign)(struct sradix_tree_node *node, unsigned index, void *item);
+ void (*rm)(struct sradix_tree_node *node, unsigned offset);
+};
+
+struct sradix_tree_path {
+ struct sradix_tree_node *node;
+ int offset;
+};
+
+static inline
+void init_sradix_tree_root(struct sradix_tree_root *root, unsigned long shift)
+{
+ root->height = 0;
+ root->rnode = NULL;
+ root->shift = shift;
+ root->stores_size = 1UL << shift;
+ root->mask = root->stores_size - 1;
+}
+
+
+extern void *sradix_tree_next(struct sradix_tree_root *root,
+ struct sradix_tree_node *node, unsigned long index,
+ int (*iter)(void *, unsigned long));
+
+extern int sradix_tree_enter(struct sradix_tree_root *root, void **item, int num);
+
+extern void sradix_tree_delete_from_leaf(struct sradix_tree_root *root,
+ struct sradix_tree_node *node, unsigned long index);
+
+extern void *sradix_tree_lookup(struct sradix_tree_root *root, unsigned long index);
+
+#endif /* _LINUX_SRADIX_TREE_H */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
new file mode 100644
index 000000000..bdeb4567b
--- /dev/null
+++ b/include/linux/srcu.h
@@ -0,0 +1,252 @@
+/*
+ * Sleepable Read-Copy Update mechanism for mutual exclusion
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2006
+ * Copyright (C) Fujitsu, 2012
+ *
+ * Author: Paul McKenney <paulmck@us.ibm.com>
+ * Lai Jiangshan <laijs@cn.fujitsu.com>
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ * Documentation/RCU/ *.txt
+ *
+ */
+
+#ifndef _LINUX_SRCU_H
+#define _LINUX_SRCU_H
+
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+#include <linux/workqueue.h>
+
+struct srcu_struct_array {
+ unsigned long c[2];
+ unsigned long seq[2];
+};
+
+struct rcu_batch {
+ struct rcu_head *head, **tail;
+};
+
+#define RCU_BATCH_INIT(name) { NULL, &(name.head) }
+
+struct srcu_struct {
+ unsigned long completed;
+ struct srcu_struct_array __percpu *per_cpu_ref;
+ spinlock_t queue_lock; /* protect ->batch_queue, ->running */
+ bool running;
+ /* callbacks just queued */
+ struct rcu_batch batch_queue;
+ /* callbacks try to do the first check_zero */
+ struct rcu_batch batch_check0;
+ /* callbacks done with the first check_zero and the flip */
+ struct rcu_batch batch_check1;
+ struct rcu_batch batch_done;
+ struct delayed_work work;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+int __init_srcu_struct(struct srcu_struct *sp, const char *name,
+ struct lock_class_key *key);
+
+#define init_srcu_struct(sp) \
+({ \
+ static struct lock_class_key __srcu_key; \
+ \
+ __init_srcu_struct((sp), #sp, &__srcu_key); \
+})
+
+#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name },
+#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+int init_srcu_struct(struct srcu_struct *sp);
+
+#define __SRCU_DEP_MAP_INIT(srcu_name)
+#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+void process_srcu(struct work_struct *work);
+
+#define __SRCU_STRUCT_INIT(name) \
+ { \
+ .completed = -300, \
+ .per_cpu_ref = &name##_srcu_array, \
+ .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
+ .running = false, \
+ .batch_queue = RCU_BATCH_INIT(name.batch_queue), \
+ .batch_check0 = RCU_BATCH_INIT(name.batch_check0), \
+ .batch_check1 = RCU_BATCH_INIT(name.batch_check1), \
+ .batch_done = RCU_BATCH_INIT(name.batch_done), \
+ .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\
+ __SRCU_DEP_MAP_INIT(name) \
+ }
+
+/*
+ * define and init a srcu struct at build time.
+ * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it.
+ */
+#define __DEFINE_SRCU(name, is_static) \
+ static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
+ is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
+#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
+
+/**
+ * call_srcu() - Queue a callback for invocation after an SRCU grace period
+ * @sp: srcu_struct in queue the callback
+ * @head: structure to be used for queueing the SRCU callback.
+ * @func: function to be invoked after the SRCU grace period
+ *
+ * The callback function will be invoked some time after a full SRCU
+ * grace period elapses, in other words after all pre-existing SRCU
+ * read-side critical sections have completed. However, the callback
+ * function might well execute concurrently with other SRCU read-side
+ * critical sections that started after call_srcu() was invoked. SRCU
+ * read-side critical sections are delimited by srcu_read_lock() and
+ * srcu_read_unlock(), and may be nested.
+ *
+ * The callback will be invoked from process context, but must nevertheless
+ * be fast and must not block.
+ */
+void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
+
+void cleanup_srcu_struct(struct srcu_struct *sp);
+int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
+void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
+void synchronize_srcu(struct srcu_struct *sp);
+void synchronize_srcu_expedited(struct srcu_struct *sp);
+unsigned long srcu_batches_completed(struct srcu_struct *sp);
+void srcu_barrier(struct srcu_struct *sp);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+/**
+ * srcu_read_lock_held - might we be in SRCU read-side critical section?
+ *
+ * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
+ * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
+ * this assumes we are in an SRCU read-side critical section unless it can
+ * prove otherwise.
+ *
+ * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
+ * and while lockdep is disabled.
+ *
+ * Note that SRCU is based on its own statemachine and it doesn't
+ * relies on normal RCU, it can be called from the CPU which
+ * is in the idle loop from an RCU point of view or offline.
+ */
+static inline int srcu_read_lock_held(struct srcu_struct *sp)
+{
+ if (!debug_lockdep_rcu_enabled())
+ return 1;
+ return lock_is_held(&sp->dep_map);
+}
+
+#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+static inline int srcu_read_lock_held(struct srcu_struct *sp)
+{
+ return 1;
+}
+
+#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+/**
+ * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
+ * @p: the pointer to fetch and protect for later dereferencing
+ * @sp: pointer to the srcu_struct, which is used to check that we
+ * really are in an SRCU read-side critical section.
+ * @c: condition to check for update-side use
+ *
+ * If PROVE_RCU is enabled, invoking this outside of an RCU read-side
+ * critical section will result in an RCU-lockdep splat, unless @c evaluates
+ * to 1. The @c argument will normally be a logical expression containing
+ * lockdep_is_held() calls.
+ */
+#define srcu_dereference_check(p, sp, c) \
+ __rcu_dereference_check((p), (c) || srcu_read_lock_held(sp), __rcu)
+
+/**
+ * srcu_dereference - fetch SRCU-protected pointer for later dereferencing
+ * @p: the pointer to fetch and protect for later dereferencing
+ * @sp: pointer to the srcu_struct, which is used to check that we
+ * really are in an SRCU read-side critical section.
+ *
+ * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU
+ * is enabled, invoking this outside of an RCU read-side critical
+ * section will result in an RCU-lockdep splat.
+ */
+#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0)
+
+/**
+ * srcu_read_lock - register a new reader for an SRCU-protected structure.
+ * @sp: srcu_struct in which to register the new reader.
+ *
+ * Enter an SRCU read-side critical section. Note that SRCU read-side
+ * critical sections may be nested. However, it is illegal to
+ * call anything that waits on an SRCU grace period for the same
+ * srcu_struct, whether directly or indirectly. Please note that
+ * one way to indirectly wait on an SRCU grace period is to acquire
+ * a mutex that is held elsewhere while calling synchronize_srcu() or
+ * synchronize_srcu_expedited().
+ *
+ * Note that srcu_read_lock() and the matching srcu_read_unlock() must
+ * occur in the same context, for example, it is illegal to invoke
+ * srcu_read_unlock() in an irq handler if the matching srcu_read_lock()
+ * was invoked in process context.
+ */
+static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
+{
+ int retval = __srcu_read_lock(sp);
+
+ rcu_lock_acquire(&(sp)->dep_map);
+ return retval;
+}
+
+/**
+ * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
+ * @sp: srcu_struct in which to unregister the old reader.
+ * @idx: return value from corresponding srcu_read_lock().
+ *
+ * Exit an SRCU read-side critical section.
+ */
+static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
+ __releases(sp)
+{
+ rcu_lock_release(&(sp)->dep_map);
+ __srcu_read_unlock(sp, idx);
+}
+
+/**
+ * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
+ *
+ * Converts the preceding srcu_read_unlock into a two-way memory barrier.
+ *
+ * Call this after srcu_read_unlock, to guarantee that all memory operations
+ * that occur after smp_mb__after_srcu_read_unlock will appear to happen after
+ * the preceding srcu_read_unlock.
+ */
+static inline void smp_mb__after_srcu_read_unlock(void)
+{
+ /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */
+}
+
+#endif
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
new file mode 100644
index 000000000..4568a5cc9
--- /dev/null
+++ b/include/linux/ssb/ssb.h
@@ -0,0 +1,681 @@
+#ifndef LINUX_SSB_H_
+#define LINUX_SSB_H_
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+#include <linux/mod_devicetable.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+#include <linux/ssb/ssb_regs.h>
+
+
+struct pcmcia_device;
+struct ssb_bus;
+struct ssb_driver;
+
+struct ssb_sprom_core_pwr_info {
+ u8 itssi_2g, itssi_5g;
+ u8 maxpwr_2g, maxpwr_5gl, maxpwr_5g, maxpwr_5gh;
+ u16 pa_2g[4], pa_5gl[4], pa_5g[4], pa_5gh[4];
+};
+
+struct ssb_sprom {
+ u8 revision;
+ u8 il0mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11b/g */
+ u8 et0mac[6] __aligned(sizeof(u16)); /* MAC address for Ethernet */
+ u8 et1mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11a */
+ u8 et0phyaddr; /* MII address for enet0 */
+ u8 et1phyaddr; /* MII address for enet1 */
+ u8 et0mdcport; /* MDIO for enet0 */
+ u8 et1mdcport; /* MDIO for enet1 */
+ u16 dev_id; /* Device ID overriding e.g. PCI ID */
+ u16 board_rev; /* Board revision number from SPROM. */
+ u16 board_num; /* Board number from SPROM. */
+ u16 board_type; /* Board type from SPROM. */
+ u8 country_code; /* Country Code */
+ char alpha2[2]; /* Country Code as two chars like EU or US */
+ u8 leddc_on_time; /* LED Powersave Duty Cycle On Count */
+ u8 leddc_off_time; /* LED Powersave Duty Cycle Off Count */
+ u8 ant_available_a; /* 2GHz antenna available bits (up to 4) */
+ u8 ant_available_bg; /* 5GHz antenna available bits (up to 4) */
+ u16 pa0b0;
+ u16 pa0b1;
+ u16 pa0b2;
+ u16 pa1b0;
+ u16 pa1b1;
+ u16 pa1b2;
+ u16 pa1lob0;
+ u16 pa1lob1;
+ u16 pa1lob2;
+ u16 pa1hib0;
+ u16 pa1hib1;
+ u16 pa1hib2;
+ u8 gpio0; /* GPIO pin 0 */
+ u8 gpio1; /* GPIO pin 1 */
+ u8 gpio2; /* GPIO pin 2 */
+ u8 gpio3; /* GPIO pin 3 */
+ u8 maxpwr_bg; /* 2.4GHz Amplifier Max Power (in dBm Q5.2) */
+ u8 maxpwr_al; /* 5.2GHz Amplifier Max Power (in dBm Q5.2) */
+ u8 maxpwr_a; /* 5.3GHz Amplifier Max Power (in dBm Q5.2) */
+ u8 maxpwr_ah; /* 5.8GHz Amplifier Max Power (in dBm Q5.2) */
+ u8 itssi_a; /* Idle TSSI Target for A-PHY */
+ u8 itssi_bg; /* Idle TSSI Target for B/G-PHY */
+ u8 tri2g; /* 2.4GHz TX isolation */
+ u8 tri5gl; /* 5.2GHz TX isolation */
+ u8 tri5g; /* 5.3GHz TX isolation */
+ u8 tri5gh; /* 5.8GHz TX isolation */
+ u8 txpid2g[4]; /* 2GHz TX power index */
+ u8 txpid5gl[4]; /* 4.9 - 5.1GHz TX power index */
+ u8 txpid5g[4]; /* 5.1 - 5.5GHz TX power index */
+ u8 txpid5gh[4]; /* 5.5 - ...GHz TX power index */
+ s8 rxpo2g; /* 2GHz RX power offset */
+ s8 rxpo5g; /* 5GHz RX power offset */
+ u8 rssisav2g; /* 2GHz RSSI params */
+ u8 rssismc2g;
+ u8 rssismf2g;
+ u8 bxa2g; /* 2GHz BX arch */
+ u8 rssisav5g; /* 5GHz RSSI params */
+ u8 rssismc5g;
+ u8 rssismf5g;
+ u8 bxa5g; /* 5GHz BX arch */
+ u16 cck2gpo; /* CCK power offset */
+ u32 ofdm2gpo; /* 2.4GHz OFDM power offset */
+ u32 ofdm5glpo; /* 5.2GHz OFDM power offset */
+ u32 ofdm5gpo; /* 5.3GHz OFDM power offset */
+ u32 ofdm5ghpo; /* 5.8GHz OFDM power offset */
+ u16 boardflags_lo; /* Board flags (bits 0-15) */
+ u16 boardflags_hi; /* Board flags (bits 16-31) */
+ u16 boardflags2_lo; /* Board flags (bits 32-47) */
+ u16 boardflags2_hi; /* Board flags (bits 48-63) */
+ /* TODO store board flags in a single u64 */
+
+ struct ssb_sprom_core_pwr_info core_pwr_info[4];
+
+ /* Antenna gain values for up to 4 antennas
+ * on each band. Values in dBm/4 (Q5.2). Negative gain means the
+ * loss in the connectors is bigger than the gain. */
+ struct {
+ s8 a0, a1, a2, a3;
+ } antenna_gain;
+
+ struct {
+ struct {
+ u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut;
+ } ghz2;
+ struct {
+ u8 tssipos, extpa_gain, pdet_range, tr_iso, antswlut;
+ } ghz5;
+ } fem;
+
+ u16 mcs2gpo[8];
+ u16 mcs5gpo[8];
+ u16 mcs5glpo[8];
+ u16 mcs5ghpo[8];
+ u8 opo;
+
+ u8 rxgainerr2ga[3];
+ u8 rxgainerr5gla[3];
+ u8 rxgainerr5gma[3];
+ u8 rxgainerr5gha[3];
+ u8 rxgainerr5gua[3];
+
+ u8 noiselvl2ga[3];
+ u8 noiselvl5gla[3];
+ u8 noiselvl5gma[3];
+ u8 noiselvl5gha[3];
+ u8 noiselvl5gua[3];
+
+ u8 regrev;
+ u8 txchain;
+ u8 rxchain;
+ u8 antswitch;
+ u16 cddpo;
+ u16 stbcpo;
+ u16 bw40po;
+ u16 bwduppo;
+
+ u8 tempthresh;
+ u8 tempoffset;
+ u16 rawtempsense;
+ u8 measpower;
+ u8 tempsense_slope;
+ u8 tempcorrx;
+ u8 tempsense_option;
+ u8 freqoffset_corr;
+ u8 iqcal_swp_dis;
+ u8 hw_iqcal_en;
+ u8 elna2g;
+ u8 elna5g;
+ u8 phycal_tempdelta;
+ u8 temps_period;
+ u8 temps_hysteresis;
+ u8 measpower1;
+ u8 measpower2;
+ u8 pcieingress_war;
+
+ /* power per rate from sromrev 9 */
+ u16 cckbw202gpo;
+ u16 cckbw20ul2gpo;
+ u32 legofdmbw202gpo;
+ u32 legofdmbw20ul2gpo;
+ u32 legofdmbw205glpo;
+ u32 legofdmbw20ul5glpo;
+ u32 legofdmbw205gmpo;
+ u32 legofdmbw20ul5gmpo;
+ u32 legofdmbw205ghpo;
+ u32 legofdmbw20ul5ghpo;
+ u32 mcsbw202gpo;
+ u32 mcsbw20ul2gpo;
+ u32 mcsbw402gpo;
+ u32 mcsbw205glpo;
+ u32 mcsbw20ul5glpo;
+ u32 mcsbw405glpo;
+ u32 mcsbw205gmpo;
+ u32 mcsbw20ul5gmpo;
+ u32 mcsbw405gmpo;
+ u32 mcsbw205ghpo;
+ u32 mcsbw20ul5ghpo;
+ u32 mcsbw405ghpo;
+ u16 mcs32po;
+ u16 legofdm40duppo;
+ u8 sar2g;
+ u8 sar5g;
+};
+
+/* Information about the PCB the circuitry is soldered on. */
+struct ssb_boardinfo {
+ u16 vendor;
+ u16 type;
+};
+
+
+struct ssb_device;
+/* Lowlevel read/write operations on the device MMIO.
+ * Internal, don't use that outside of ssb. */
+struct ssb_bus_ops {
+ u8 (*read8)(struct ssb_device *dev, u16 offset);
+ u16 (*read16)(struct ssb_device *dev, u16 offset);
+ u32 (*read32)(struct ssb_device *dev, u16 offset);
+ void (*write8)(struct ssb_device *dev, u16 offset, u8 value);
+ void (*write16)(struct ssb_device *dev, u16 offset, u16 value);
+ void (*write32)(struct ssb_device *dev, u16 offset, u32 value);
+#ifdef CONFIG_SSB_BLOCKIO
+ void (*block_read)(struct ssb_device *dev, void *buffer,
+ size_t count, u16 offset, u8 reg_width);
+ void (*block_write)(struct ssb_device *dev, const void *buffer,
+ size_t count, u16 offset, u8 reg_width);
+#endif
+};
+
+
+/* Core-ID values. */
+#define SSB_DEV_CHIPCOMMON 0x800
+#define SSB_DEV_ILINE20 0x801
+#define SSB_DEV_SDRAM 0x803
+#define SSB_DEV_PCI 0x804
+#define SSB_DEV_MIPS 0x805
+#define SSB_DEV_ETHERNET 0x806
+#define SSB_DEV_V90 0x807
+#define SSB_DEV_USB11_HOSTDEV 0x808
+#define SSB_DEV_ADSL 0x809
+#define SSB_DEV_ILINE100 0x80A
+#define SSB_DEV_IPSEC 0x80B
+#define SSB_DEV_PCMCIA 0x80D
+#define SSB_DEV_INTERNAL_MEM 0x80E
+#define SSB_DEV_MEMC_SDRAM 0x80F
+#define SSB_DEV_EXTIF 0x811
+#define SSB_DEV_80211 0x812
+#define SSB_DEV_MIPS_3302 0x816
+#define SSB_DEV_USB11_HOST 0x817
+#define SSB_DEV_USB11_DEV 0x818
+#define SSB_DEV_USB20_HOST 0x819
+#define SSB_DEV_USB20_DEV 0x81A
+#define SSB_DEV_SDIO_HOST 0x81B
+#define SSB_DEV_ROBOSWITCH 0x81C
+#define SSB_DEV_PARA_ATA 0x81D
+#define SSB_DEV_SATA_XORDMA 0x81E
+#define SSB_DEV_ETHERNET_GBIT 0x81F
+#define SSB_DEV_PCIE 0x820
+#define SSB_DEV_MIMO_PHY 0x821
+#define SSB_DEV_SRAM_CTRLR 0x822
+#define SSB_DEV_MINI_MACPHY 0x823
+#define SSB_DEV_ARM_1176 0x824
+#define SSB_DEV_ARM_7TDMI 0x825
+#define SSB_DEV_ARM_CM3 0x82A
+
+/* Vendor-ID values */
+#define SSB_VENDOR_BROADCOM 0x4243
+
+/* Some kernel subsystems poke with dev->drvdata, so we must use the
+ * following ugly workaround to get from struct device to struct ssb_device */
+struct __ssb_dev_wrapper {
+ struct device dev;
+ struct ssb_device *sdev;
+};
+
+struct ssb_device {
+ /* Having a copy of the ops pointer in each dev struct
+ * is an optimization. */
+ const struct ssb_bus_ops *ops;
+
+ struct device *dev, *dma_dev;
+
+ struct ssb_bus *bus;
+ struct ssb_device_id id;
+
+ u8 core_index;
+ unsigned int irq;
+
+ /* Internal-only stuff follows. */
+ void *drvdata; /* Per-device data */
+ void *devtypedata; /* Per-devicetype (eg 802.11) data */
+};
+
+/* Go from struct device to struct ssb_device. */
+static inline
+struct ssb_device * dev_to_ssb_dev(struct device *dev)
+{
+ struct __ssb_dev_wrapper *wrap;
+ wrap = container_of(dev, struct __ssb_dev_wrapper, dev);
+ return wrap->sdev;
+}
+
+/* Device specific user data */
+static inline
+void ssb_set_drvdata(struct ssb_device *dev, void *data)
+{
+ dev->drvdata = data;
+}
+static inline
+void * ssb_get_drvdata(struct ssb_device *dev)
+{
+ return dev->drvdata;
+}
+
+/* Devicetype specific user data. This is per device-type (not per device) */
+void ssb_set_devtypedata(struct ssb_device *dev, void *data);
+static inline
+void * ssb_get_devtypedata(struct ssb_device *dev)
+{
+ return dev->devtypedata;
+}
+
+
+struct ssb_driver {
+ const char *name;
+ const struct ssb_device_id *id_table;
+
+ int (*probe)(struct ssb_device *dev, const struct ssb_device_id *id);
+ void (*remove)(struct ssb_device *dev);
+ int (*suspend)(struct ssb_device *dev, pm_message_t state);
+ int (*resume)(struct ssb_device *dev);
+ void (*shutdown)(struct ssb_device *dev);
+
+ struct device_driver drv;
+};
+#define drv_to_ssb_drv(_drv) container_of(_drv, struct ssb_driver, drv)
+
+extern int __ssb_driver_register(struct ssb_driver *drv, struct module *owner);
+#define ssb_driver_register(drv) \
+ __ssb_driver_register(drv, THIS_MODULE)
+
+extern void ssb_driver_unregister(struct ssb_driver *drv);
+
+
+
+
+enum ssb_bustype {
+ SSB_BUSTYPE_SSB, /* This SSB bus is the system bus */
+ SSB_BUSTYPE_PCI, /* SSB is connected to PCI bus */
+ SSB_BUSTYPE_PCMCIA, /* SSB is connected to PCMCIA bus */
+ SSB_BUSTYPE_SDIO, /* SSB is connected to SDIO bus */
+};
+
+/* board_vendor */
+#define SSB_BOARDVENDOR_BCM 0x14E4 /* Broadcom */
+#define SSB_BOARDVENDOR_DELL 0x1028 /* Dell */
+#define SSB_BOARDVENDOR_HP 0x0E11 /* HP */
+/* board_type */
+#define SSB_BOARD_BCM94301CB 0x0406
+#define SSB_BOARD_BCM94301MP 0x0407
+#define SSB_BOARD_BU4309 0x040A
+#define SSB_BOARD_BCM94309CB 0x040B
+#define SSB_BOARD_BCM4309MP 0x040C
+#define SSB_BOARD_BU4306 0x0416
+#define SSB_BOARD_BCM94306MP 0x0418
+#define SSB_BOARD_BCM4309G 0x0421
+#define SSB_BOARD_BCM4306CB 0x0417
+#define SSB_BOARD_BCM94306PC 0x0425 /* pcmcia 3.3v 4306 card */
+#define SSB_BOARD_BCM94306CBSG 0x042B /* with SiGe PA */
+#define SSB_BOARD_PCSG94306 0x042D /* with SiGe PA */
+#define SSB_BOARD_BU4704SD 0x042E /* with sdram */
+#define SSB_BOARD_BCM94704AGR 0x042F /* dual 11a/11g Router */
+#define SSB_BOARD_BCM94308MP 0x0430 /* 11a-only minipci */
+#define SSB_BOARD_BU4318 0x0447
+#define SSB_BOARD_CB4318 0x0448
+#define SSB_BOARD_MPG4318 0x0449
+#define SSB_BOARD_MP4318 0x044A
+#define SSB_BOARD_SD4318 0x044B
+#define SSB_BOARD_BCM94306P 0x044C /* with SiGe */
+#define SSB_BOARD_BCM94303MP 0x044E
+#define SSB_BOARD_BCM94306MPM 0x0450
+#define SSB_BOARD_BCM94306MPL 0x0453
+#define SSB_BOARD_PC4303 0x0454 /* pcmcia */
+#define SSB_BOARD_BCM94306MPLNA 0x0457
+#define SSB_BOARD_BCM94306MPH 0x045B
+#define SSB_BOARD_BCM94306PCIV 0x045C
+#define SSB_BOARD_BCM94318MPGH 0x0463
+#define SSB_BOARD_BU4311 0x0464
+#define SSB_BOARD_BCM94311MC 0x0465
+#define SSB_BOARD_BCM94311MCAG 0x0466
+/* 4321 boards */
+#define SSB_BOARD_BU4321 0x046B
+#define SSB_BOARD_BU4321E 0x047C
+#define SSB_BOARD_MP4321 0x046C
+#define SSB_BOARD_CB2_4321 0x046D
+#define SSB_BOARD_CB2_4321_AG 0x0066
+#define SSB_BOARD_MC4321 0x046E
+/* 4325 boards */
+#define SSB_BOARD_BCM94325DEVBU 0x0490
+#define SSB_BOARD_BCM94325BGABU 0x0491
+#define SSB_BOARD_BCM94325SDGWB 0x0492
+#define SSB_BOARD_BCM94325SDGMDL 0x04AA
+#define SSB_BOARD_BCM94325SDGMDL2 0x04C6
+#define SSB_BOARD_BCM94325SDGMDL3 0x04C9
+#define SSB_BOARD_BCM94325SDABGWBA 0x04E1
+/* 4322 boards */
+#define SSB_BOARD_BCM94322MC 0x04A4
+#define SSB_BOARD_BCM94322USB 0x04A8 /* dualband */
+#define SSB_BOARD_BCM94322HM 0x04B0
+#define SSB_BOARD_BCM94322USB2D 0x04Bf /* single band discrete front end */
+/* 4312 boards */
+#define SSB_BOARD_BU4312 0x048A
+#define SSB_BOARD_BCM4312MCGSG 0x04B5
+/* chip_package */
+#define SSB_CHIPPACK_BCM4712S 1 /* Small 200pin 4712 */
+#define SSB_CHIPPACK_BCM4712M 2 /* Medium 225pin 4712 */
+#define SSB_CHIPPACK_BCM4712L 0 /* Large 340pin 4712 */
+
+#include <linux/ssb/ssb_driver_chipcommon.h>
+#include <linux/ssb/ssb_driver_mips.h>
+#include <linux/ssb/ssb_driver_extif.h>
+#include <linux/ssb/ssb_driver_pci.h>
+
+struct ssb_bus {
+ /* The MMIO area. */
+ void __iomem *mmio;
+
+ const struct ssb_bus_ops *ops;
+
+ /* The core currently mapped into the MMIO window.
+ * Not valid on all host-buses. So don't use outside of SSB. */
+ struct ssb_device *mapped_device;
+ union {
+ /* Currently mapped PCMCIA segment. (bustype == SSB_BUSTYPE_PCMCIA only) */
+ u8 mapped_pcmcia_seg;
+ /* Current SSB base address window for SDIO. */
+ u32 sdio_sbaddr;
+ };
+ /* Lock for core and segment switching.
+ * On PCMCIA-host busses this is used to protect the whole MMIO access. */
+ spinlock_t bar_lock;
+
+ /* The host-bus this backplane is running on. */
+ enum ssb_bustype bustype;
+ /* Pointers to the host-bus. Check bustype before using any of these pointers. */
+ union {
+ /* Pointer to the PCI bus (only valid if bustype == SSB_BUSTYPE_PCI). */
+ struct pci_dev *host_pci;
+ /* Pointer to the PCMCIA device (only if bustype == SSB_BUSTYPE_PCMCIA). */
+ struct pcmcia_device *host_pcmcia;
+ /* Pointer to the SDIO device (only if bustype == SSB_BUSTYPE_SDIO). */
+ struct sdio_func *host_sdio;
+ };
+
+ /* See enum ssb_quirks */
+ unsigned int quirks;
+
+#ifdef CONFIG_SSB_SPROM
+ /* Mutex to protect the SPROM writing. */
+ struct mutex sprom_mutex;
+#endif
+
+ /* ID information about the Chip. */
+ u16 chip_id;
+ u8 chip_rev;
+ u16 sprom_offset;
+ u16 sprom_size; /* number of words in sprom */
+ u8 chip_package;
+
+ /* List of devices (cores) on the backplane. */
+ struct ssb_device devices[SSB_MAX_NR_CORES];
+ u8 nr_devices;
+
+ /* Software ID number for this bus. */
+ unsigned int busnumber;
+
+ /* The ChipCommon device (if available). */
+ struct ssb_chipcommon chipco;
+ /* The PCI-core device (if available). */
+ struct ssb_pcicore pcicore;
+ /* The MIPS-core device (if available). */
+ struct ssb_mipscore mipscore;
+ /* The EXTif-core device (if available). */
+ struct ssb_extif extif;
+
+ /* The following structure elements are not available in early
+ * SSB initialization. Though, they are available for regular
+ * registered drivers at any stage. So be careful when
+ * using them in the ssb core code. */
+
+ /* ID information about the PCB. */
+ struct ssb_boardinfo boardinfo;
+ /* Contents of the SPROM. */
+ struct ssb_sprom sprom;
+ /* If the board has a cardbus slot, this is set to true. */
+ bool has_cardbus_slot;
+
+#ifdef CONFIG_SSB_EMBEDDED
+ /* Lock for GPIO register access. */
+ spinlock_t gpio_lock;
+ struct platform_device *watchdog;
+#endif /* EMBEDDED */
+#ifdef CONFIG_SSB_DRIVER_GPIO
+ struct gpio_chip gpio;
+ struct irq_domain *irq_domain;
+#endif /* DRIVER_GPIO */
+
+ /* Internal-only stuff follows. Do not touch. */
+ struct list_head list;
+#ifdef CONFIG_SSB_DEBUG
+ /* Is the bus already powered up? */
+ bool powered_up;
+ int power_warn_count;
+#endif /* DEBUG */
+};
+
+enum ssb_quirks {
+ /* SDIO connected card requires performing a read after writing a 32-bit value */
+ SSB_QUIRK_SDIO_READ_AFTER_WRITE32 = (1 << 0),
+};
+
+/* The initialization-invariants. */
+struct ssb_init_invariants {
+ /* Versioning information about the PCB. */
+ struct ssb_boardinfo boardinfo;
+ /* The SPROM information. That's either stored in an
+ * EEPROM or NVRAM on the board. */
+ struct ssb_sprom sprom;
+ /* If the board has a cardbus slot, this is set to true. */
+ bool has_cardbus_slot;
+};
+/* Type of function to fetch the invariants. */
+typedef int (*ssb_invariants_func_t)(struct ssb_bus *bus,
+ struct ssb_init_invariants *iv);
+
+/* Register a SSB system bus. get_invariants() is called after the
+ * basic system devices are initialized.
+ * The invariants are usually fetched from some NVRAM.
+ * Put the invariants into the struct pointed to by iv. */
+extern int ssb_bus_ssbbus_register(struct ssb_bus *bus,
+ unsigned long baseaddr,
+ ssb_invariants_func_t get_invariants);
+#ifdef CONFIG_SSB_PCIHOST
+extern int ssb_bus_pcibus_register(struct ssb_bus *bus,
+ struct pci_dev *host_pci);
+#endif /* CONFIG_SSB_PCIHOST */
+#ifdef CONFIG_SSB_PCMCIAHOST
+extern int ssb_bus_pcmciabus_register(struct ssb_bus *bus,
+ struct pcmcia_device *pcmcia_dev,
+ unsigned long baseaddr);
+#endif /* CONFIG_SSB_PCMCIAHOST */
+#ifdef CONFIG_SSB_SDIOHOST
+extern int ssb_bus_sdiobus_register(struct ssb_bus *bus,
+ struct sdio_func *sdio_func,
+ unsigned int quirks);
+#endif /* CONFIG_SSB_SDIOHOST */
+
+
+extern void ssb_bus_unregister(struct ssb_bus *bus);
+
+/* Does the device have an SPROM? */
+extern bool ssb_is_sprom_available(struct ssb_bus *bus);
+
+/* Set a fallback SPROM.
+ * See kdoc at the function definition for complete documentation. */
+extern int ssb_arch_register_fallback_sprom(
+ int (*sprom_callback)(struct ssb_bus *bus,
+ struct ssb_sprom *out));
+
+/* Suspend a SSB bus.
+ * Call this from the parent bus suspend routine. */
+extern int ssb_bus_suspend(struct ssb_bus *bus);
+/* Resume a SSB bus.
+ * Call this from the parent bus resume routine. */
+extern int ssb_bus_resume(struct ssb_bus *bus);
+
+extern u32 ssb_clockspeed(struct ssb_bus *bus);
+
+/* Is the device enabled in hardware? */
+int ssb_device_is_enabled(struct ssb_device *dev);
+/* Enable a device and pass device-specific SSB_TMSLOW flags.
+ * If no device-specific flags are available, use 0. */
+void ssb_device_enable(struct ssb_device *dev, u32 core_specific_flags);
+/* Disable a device in hardware and pass SSB_TMSLOW flags (if any). */
+void ssb_device_disable(struct ssb_device *dev, u32 core_specific_flags);
+
+
+/* Device MMIO register read/write functions. */
+static inline u8 ssb_read8(struct ssb_device *dev, u16 offset)
+{
+ return dev->ops->read8(dev, offset);
+}
+static inline u16 ssb_read16(struct ssb_device *dev, u16 offset)
+{
+ return dev->ops->read16(dev, offset);
+}
+static inline u32 ssb_read32(struct ssb_device *dev, u16 offset)
+{
+ return dev->ops->read32(dev, offset);
+}
+static inline void ssb_write8(struct ssb_device *dev, u16 offset, u8 value)
+{
+ dev->ops->write8(dev, offset, value);
+}
+static inline void ssb_write16(struct ssb_device *dev, u16 offset, u16 value)
+{
+ dev->ops->write16(dev, offset, value);
+}
+static inline void ssb_write32(struct ssb_device *dev, u16 offset, u32 value)
+{
+ dev->ops->write32(dev, offset, value);
+}
+#ifdef CONFIG_SSB_BLOCKIO
+static inline void ssb_block_read(struct ssb_device *dev, void *buffer,
+ size_t count, u16 offset, u8 reg_width)
+{
+ dev->ops->block_read(dev, buffer, count, offset, reg_width);
+}
+
+static inline void ssb_block_write(struct ssb_device *dev, const void *buffer,
+ size_t count, u16 offset, u8 reg_width)
+{
+ dev->ops->block_write(dev, buffer, count, offset, reg_width);
+}
+#endif /* CONFIG_SSB_BLOCKIO */
+
+
+/* The SSB DMA API. Use this API for any DMA operation on the device.
+ * This API basically is a wrapper that calls the correct DMA API for
+ * the host device type the SSB device is attached to. */
+
+/* Translation (routing) bits that need to be ORed to DMA
+ * addresses before they are given to a device. */
+extern u32 ssb_dma_translation(struct ssb_device *dev);
+#define SSB_DMA_TRANSLATION_MASK 0xC0000000
+#define SSB_DMA_TRANSLATION_SHIFT 30
+
+static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev)
+{
+#ifdef CONFIG_SSB_DEBUG
+ printk(KERN_ERR "SSB: BUG! Calling DMA API for "
+ "unsupported bustype %d\n", dev->bus->bustype);
+#endif /* DEBUG */
+}
+
+#ifdef CONFIG_SSB_PCIHOST
+/* PCI-host wrapper driver */
+extern int ssb_pcihost_register(struct pci_driver *driver);
+static inline void ssb_pcihost_unregister(struct pci_driver *driver)
+{
+ pci_unregister_driver(driver);
+}
+
+static inline
+void ssb_pcihost_set_power_state(struct ssb_device *sdev, pci_power_t state)
+{
+ if (sdev->bus->bustype == SSB_BUSTYPE_PCI)
+ pci_set_power_state(sdev->bus->host_pci, state);
+}
+#else
+static inline void ssb_pcihost_unregister(struct pci_driver *driver)
+{
+}
+
+static inline
+void ssb_pcihost_set_power_state(struct ssb_device *sdev, pci_power_t state)
+{
+}
+#endif /* CONFIG_SSB_PCIHOST */
+
+
+/* If a driver is shutdown or suspended, call this to signal
+ * that the bus may be completely powered down. SSB will decide,
+ * if it's really time to power down the bus, based on if there
+ * are other devices that want to run. */
+extern int ssb_bus_may_powerdown(struct ssb_bus *bus);
+/* Before initializing and enabling a device, call this to power-up the bus.
+ * If you want to allow use of dynamic-power-control, pass the flag.
+ * Otherwise static always-on powercontrol will be used. */
+extern int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl);
+
+extern void ssb_commit_settings(struct ssb_bus *bus);
+
+/* Various helper functions */
+extern u32 ssb_admatch_base(u32 adm);
+extern u32 ssb_admatch_size(u32 adm);
+
+/* PCI device mapping and fixup routines.
+ * Called from the architecture pcibios init code.
+ * These are only available on SSB_EMBEDDED configurations. */
+#ifdef CONFIG_SSB_EMBEDDED
+int ssb_pcibios_plat_dev_init(struct pci_dev *dev);
+int ssb_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
+#endif /* CONFIG_SSB_EMBEDDED */
+
+#endif /* LINUX_SSB_H_ */
diff --git a/include/linux/ssb/ssb_driver_chipcommon.h b/include/linux/ssb/ssb_driver_chipcommon.h
new file mode 100644
index 000000000..6fcfe99bd
--- /dev/null
+++ b/include/linux/ssb/ssb_driver_chipcommon.h
@@ -0,0 +1,673 @@
+#ifndef LINUX_SSB_CHIPCO_H_
+#define LINUX_SSB_CHIPCO_H_
+
+/* SonicsSiliconBackplane CHIPCOMMON core hardware definitions
+ *
+ * The chipcommon core provides chip identification, SB control,
+ * jtag, 0/1/2 uarts, clock frequency control, a watchdog interrupt timer,
+ * gpio interface, extbus, and support for serial and parallel flashes.
+ *
+ * Copyright 2005, Broadcom Corporation
+ * Copyright 2006, Michael Buesch <m@bues.ch>
+ *
+ * Licensed under the GPL version 2. See COPYING for details.
+ */
+
+/** ChipCommon core registers. **/
+
+#define SSB_CHIPCO_CHIPID 0x0000
+#define SSB_CHIPCO_IDMASK 0x0000FFFF
+#define SSB_CHIPCO_REVMASK 0x000F0000
+#define SSB_CHIPCO_REVSHIFT 16
+#define SSB_CHIPCO_PACKMASK 0x00F00000
+#define SSB_CHIPCO_PACKSHIFT 20
+#define SSB_CHIPCO_NRCORESMASK 0x0F000000
+#define SSB_CHIPCO_NRCORESSHIFT 24
+#define SSB_CHIPCO_CAP 0x0004 /* Capabilities */
+#define SSB_CHIPCO_CAP_NRUART 0x00000003 /* # of UARTs */
+#define SSB_CHIPCO_CAP_MIPSEB 0x00000004 /* MIPS in BigEndian Mode */
+#define SSB_CHIPCO_CAP_UARTCLK 0x00000018 /* UART clock select */
+#define SSB_CHIPCO_CAP_UARTCLK_INT 0x00000008 /* UARTs are driven by internal divided clock */
+#define SSB_CHIPCO_CAP_UARTGPIO 0x00000020 /* UARTs on GPIO 15-12 */
+#define SSB_CHIPCO_CAP_EXTBUS 0x000000C0 /* External buses present */
+#define SSB_CHIPCO_CAP_FLASHT 0x00000700 /* Flash Type */
+#define SSB_CHIPCO_FLASHT_NONE 0x00000000 /* No flash */
+#define SSB_CHIPCO_FLASHT_STSER 0x00000100 /* ST serial flash */
+#define SSB_CHIPCO_FLASHT_ATSER 0x00000200 /* Atmel serial flash */
+#define SSB_CHIPCO_FLASHT_PARA 0x00000700 /* Parallel flash */
+#define SSB_CHIPCO_CAP_PLLT 0x00038000 /* PLL Type */
+#define SSB_PLLTYPE_NONE 0x00000000
+#define SSB_PLLTYPE_1 0x00010000 /* 48Mhz base, 3 dividers */
+#define SSB_PLLTYPE_2 0x00020000 /* 48Mhz, 4 dividers */
+#define SSB_PLLTYPE_3 0x00030000 /* 25Mhz, 2 dividers */
+#define SSB_PLLTYPE_4 0x00008000 /* 48Mhz, 4 dividers */
+#define SSB_PLLTYPE_5 0x00018000 /* 25Mhz, 4 dividers */
+#define SSB_PLLTYPE_6 0x00028000 /* 100/200 or 120/240 only */
+#define SSB_PLLTYPE_7 0x00038000 /* 25Mhz, 4 dividers */
+#define SSB_CHIPCO_CAP_PCTL 0x00040000 /* Power Control */
+#define SSB_CHIPCO_CAP_OTPS 0x00380000 /* OTP size */
+#define SSB_CHIPCO_CAP_OTPS_SHIFT 19
+#define SSB_CHIPCO_CAP_OTPS_BASE 5
+#define SSB_CHIPCO_CAP_JTAGM 0x00400000 /* JTAG master present */
+#define SSB_CHIPCO_CAP_BROM 0x00800000 /* Internal boot ROM active */
+#define SSB_CHIPCO_CAP_64BIT 0x08000000 /* 64-bit Backplane */
+#define SSB_CHIPCO_CAP_PMU 0x10000000 /* PMU available (rev >= 20) */
+#define SSB_CHIPCO_CAP_ECI 0x20000000 /* ECI available (rev >= 20) */
+#define SSB_CHIPCO_CAP_SPROM 0x40000000 /* SPROM present */
+#define SSB_CHIPCO_CORECTL 0x0008
+#define SSB_CHIPCO_CORECTL_UARTCLK0 0x00000001 /* Drive UART with internal clock */
+#define SSB_CHIPCO_CORECTL_SE 0x00000002 /* sync clk out enable (corerev >= 3) */
+#define SSB_CHIPCO_CORECTL_UARTCLKEN 0x00000008 /* UART clock enable (rev >= 21) */
+#define SSB_CHIPCO_BIST 0x000C
+#define SSB_CHIPCO_OTPS 0x0010 /* OTP status */
+#define SSB_CHIPCO_OTPS_PROGFAIL 0x80000000
+#define SSB_CHIPCO_OTPS_PROTECT 0x00000007
+#define SSB_CHIPCO_OTPS_HW_PROTECT 0x00000001
+#define SSB_CHIPCO_OTPS_SW_PROTECT 0x00000002
+#define SSB_CHIPCO_OTPS_CID_PROTECT 0x00000004
+#define SSB_CHIPCO_OTPC 0x0014 /* OTP control */
+#define SSB_CHIPCO_OTPC_RECWAIT 0xFF000000
+#define SSB_CHIPCO_OTPC_PROGWAIT 0x00FFFF00
+#define SSB_CHIPCO_OTPC_PRW_SHIFT 8
+#define SSB_CHIPCO_OTPC_MAXFAIL 0x00000038
+#define SSB_CHIPCO_OTPC_VSEL 0x00000006
+#define SSB_CHIPCO_OTPC_SELVL 0x00000001
+#define SSB_CHIPCO_OTPP 0x0018 /* OTP prog */
+#define SSB_CHIPCO_OTPP_COL 0x000000FF
+#define SSB_CHIPCO_OTPP_ROW 0x0000FF00
+#define SSB_CHIPCO_OTPP_ROW_SHIFT 8
+#define SSB_CHIPCO_OTPP_READERR 0x10000000
+#define SSB_CHIPCO_OTPP_VALUE 0x20000000
+#define SSB_CHIPCO_OTPP_READ 0x40000000
+#define SSB_CHIPCO_OTPP_START 0x80000000
+#define SSB_CHIPCO_OTPP_BUSY 0x80000000
+#define SSB_CHIPCO_IRQSTAT 0x0020
+#define SSB_CHIPCO_IRQMASK 0x0024
+#define SSB_CHIPCO_IRQ_GPIO 0x00000001 /* gpio intr */
+#define SSB_CHIPCO_IRQ_EXT 0x00000002 /* ro: ext intr pin (corerev >= 3) */
+#define SSB_CHIPCO_IRQ_WDRESET 0x80000000 /* watchdog reset occurred */
+#define SSB_CHIPCO_CHIPCTL 0x0028 /* Rev >= 11 only */
+#define SSB_CHIPCO_CHIPSTAT 0x002C /* Rev >= 11 only */
+#define SSB_CHIPCO_JCMD 0x0030 /* Rev >= 10 only */
+#define SSB_CHIPCO_JCMD_START 0x80000000
+#define SSB_CHIPCO_JCMD_BUSY 0x80000000
+#define SSB_CHIPCO_JCMD_PAUSE 0x40000000
+#define SSB_CHIPCO_JCMD0_ACC_MASK 0x0000F000
+#define SSB_CHIPCO_JCMD0_ACC_IRDR 0x00000000
+#define SSB_CHIPCO_JCMD0_ACC_DR 0x00001000
+#define SSB_CHIPCO_JCMD0_ACC_IR 0x00002000
+#define SSB_CHIPCO_JCMD0_ACC_RESET 0x00003000
+#define SSB_CHIPCO_JCMD0_ACC_IRPDR 0x00004000
+#define SSB_CHIPCO_JCMD0_ACC_PDR 0x00005000
+#define SSB_CHIPCO_JCMD0_IRW_MASK 0x00000F00
+#define SSB_CHIPCO_JCMD_ACC_MASK 0x000F0000 /* Changes for corerev 11 */
+#define SSB_CHIPCO_JCMD_ACC_IRDR 0x00000000
+#define SSB_CHIPCO_JCMD_ACC_DR 0x00010000
+#define SSB_CHIPCO_JCMD_ACC_IR 0x00020000
+#define SSB_CHIPCO_JCMD_ACC_RESET 0x00030000
+#define SSB_CHIPCO_JCMD_ACC_IRPDR 0x00040000
+#define SSB_CHIPCO_JCMD_ACC_PDR 0x00050000
+#define SSB_CHIPCO_JCMD_IRW_MASK 0x00001F00
+#define SSB_CHIPCO_JCMD_IRW_SHIFT 8
+#define SSB_CHIPCO_JCMD_DRW_MASK 0x0000003F
+#define SSB_CHIPCO_JIR 0x0034 /* Rev >= 10 only */
+#define SSB_CHIPCO_JDR 0x0038 /* Rev >= 10 only */
+#define SSB_CHIPCO_JCTL 0x003C /* Rev >= 10 only */
+#define SSB_CHIPCO_JCTL_FORCE_CLK 4 /* Force clock */
+#define SSB_CHIPCO_JCTL_EXT_EN 2 /* Enable external targets */
+#define SSB_CHIPCO_JCTL_EN 1 /* Enable Jtag master */
+#define SSB_CHIPCO_FLASHCTL 0x0040
+#define SSB_CHIPCO_FLASHCTL_START 0x80000000
+#define SSB_CHIPCO_FLASHCTL_BUSY SSB_CHIPCO_FLASHCTL_START
+#define SSB_CHIPCO_FLASHADDR 0x0044
+#define SSB_CHIPCO_FLASHDATA 0x0048
+#define SSB_CHIPCO_BCAST_ADDR 0x0050
+#define SSB_CHIPCO_BCAST_DATA 0x0054
+#define SSB_CHIPCO_GPIOPULLUP 0x0058 /* Rev >= 20 only */
+#define SSB_CHIPCO_GPIOPULLDOWN 0x005C /* Rev >= 20 only */
+#define SSB_CHIPCO_GPIOIN 0x0060
+#define SSB_CHIPCO_GPIOOUT 0x0064
+#define SSB_CHIPCO_GPIOOUTEN 0x0068
+#define SSB_CHIPCO_GPIOCTL 0x006C
+#define SSB_CHIPCO_GPIOPOL 0x0070
+#define SSB_CHIPCO_GPIOIRQ 0x0074
+#define SSB_CHIPCO_WATCHDOG 0x0080
+#define SSB_CHIPCO_GPIOTIMER 0x0088 /* LED powersave (corerev >= 16) */
+#define SSB_CHIPCO_GPIOTIMER_OFFTIME 0x0000FFFF
+#define SSB_CHIPCO_GPIOTIMER_OFFTIME_SHIFT 0
+#define SSB_CHIPCO_GPIOTIMER_ONTIME 0xFFFF0000
+#define SSB_CHIPCO_GPIOTIMER_ONTIME_SHIFT 16
+#define SSB_CHIPCO_GPIOTOUTM 0x008C /* LED powersave (corerev >= 16) */
+#define SSB_CHIPCO_CLOCK_N 0x0090
+#define SSB_CHIPCO_CLOCK_SB 0x0094
+#define SSB_CHIPCO_CLOCK_PCI 0x0098
+#define SSB_CHIPCO_CLOCK_M2 0x009C
+#define SSB_CHIPCO_CLOCK_MIPS 0x00A0
+#define SSB_CHIPCO_CLKDIV 0x00A4 /* Rev >= 3 only */
+#define SSB_CHIPCO_CLKDIV_SFLASH 0x0F000000
+#define SSB_CHIPCO_CLKDIV_SFLASH_SHIFT 24
+#define SSB_CHIPCO_CLKDIV_OTP 0x000F0000
+#define SSB_CHIPCO_CLKDIV_OTP_SHIFT 16
+#define SSB_CHIPCO_CLKDIV_JTAG 0x00000F00
+#define SSB_CHIPCO_CLKDIV_JTAG_SHIFT 8
+#define SSB_CHIPCO_CLKDIV_UART 0x000000FF
+#define SSB_CHIPCO_PLLONDELAY 0x00B0 /* Rev >= 4 only */
+#define SSB_CHIPCO_FREFSELDELAY 0x00B4 /* Rev >= 4 only */
+#define SSB_CHIPCO_SLOWCLKCTL 0x00B8 /* 6 <= Rev <= 9 only */
+#define SSB_CHIPCO_SLOWCLKCTL_SRC 0x00000007 /* slow clock source mask */
+#define SSB_CHIPCO_SLOWCLKCTL_SRC_LPO 0x00000000 /* source of slow clock is LPO */
+#define SSB_CHIPCO_SLOWCLKCTL_SRC_XTAL 0x00000001 /* source of slow clock is crystal */
+#define SSB_CHIPCO_SLOECLKCTL_SRC_PCI 0x00000002 /* source of slow clock is PCI */
+#define SSB_CHIPCO_SLOWCLKCTL_LPOFREQ 0x00000200 /* LPOFreqSel, 1: 160Khz, 0: 32KHz */
+#define SSB_CHIPCO_SLOWCLKCTL_LPOPD 0x00000400 /* LPOPowerDown, 1: LPO is disabled, 0: LPO is enabled */
+#define SSB_CHIPCO_SLOWCLKCTL_FSLOW 0x00000800 /* ForceSlowClk, 1: sb/cores running on slow clock, 0: power logic control */
+#define SSB_CHIPCO_SLOWCLKCTL_IPLL 0x00001000 /* IgnorePllOffReq, 1/0: power logic ignores/honors PLL clock disable requests from core */
+#define SSB_CHIPCO_SLOWCLKCTL_ENXTAL 0x00002000 /* XtalControlEn, 1/0: power logic does/doesn't disable crystal when appropriate */
+#define SSB_CHIPCO_SLOWCLKCTL_XTALPU 0x00004000 /* XtalPU (RO), 1/0: crystal running/disabled */
+#define SSB_CHIPCO_SLOWCLKCTL_CLKDIV 0xFFFF0000 /* ClockDivider (SlowClk = 1/(4+divisor)) */
+#define SSB_CHIPCO_SLOWCLKCTL_CLKDIV_SHIFT 16
+#define SSB_CHIPCO_SYSCLKCTL 0x00C0 /* Rev >= 3 only */
+#define SSB_CHIPCO_SYSCLKCTL_IDLPEN 0x00000001 /* ILPen: Enable Idle Low Power */
+#define SSB_CHIPCO_SYSCLKCTL_ALPEN 0x00000002 /* ALPen: Enable Active Low Power */
+#define SSB_CHIPCO_SYSCLKCTL_PLLEN 0x00000004 /* ForcePLLOn */
+#define SSB_CHIPCO_SYSCLKCTL_FORCEALP 0x00000008 /* Force ALP (or HT if ALPen is not set */
+#define SSB_CHIPCO_SYSCLKCTL_FORCEHT 0x00000010 /* Force HT */
+#define SSB_CHIPCO_SYSCLKCTL_CLKDIV 0xFFFF0000 /* ClkDiv (ILP = 1/(4+divisor)) */
+#define SSB_CHIPCO_SYSCLKCTL_CLKDIV_SHIFT 16
+#define SSB_CHIPCO_CLKSTSTR 0x00C4 /* Rev >= 3 only */
+#define SSB_CHIPCO_PCMCIA_CFG 0x0100
+#define SSB_CHIPCO_PCMCIA_MEMWAIT 0x0104
+#define SSB_CHIPCO_PCMCIA_ATTRWAIT 0x0108
+#define SSB_CHIPCO_PCMCIA_IOWAIT 0x010C
+#define SSB_CHIPCO_IDE_CFG 0x0110
+#define SSB_CHIPCO_IDE_MEMWAIT 0x0114
+#define SSB_CHIPCO_IDE_ATTRWAIT 0x0118
+#define SSB_CHIPCO_IDE_IOWAIT 0x011C
+#define SSB_CHIPCO_PROG_CFG 0x0120
+#define SSB_CHIPCO_PROG_WAITCNT 0x0124
+#define SSB_CHIPCO_FLASH_CFG 0x0128
+#define SSB_CHIPCO_FLASH_WAITCNT 0x012C
+#define SSB_CHIPCO_CLKCTLST 0x01E0 /* Clock control and status (rev >= 20) */
+#define SSB_CHIPCO_CLKCTLST_FORCEALP 0x00000001 /* Force ALP request */
+#define SSB_CHIPCO_CLKCTLST_FORCEHT 0x00000002 /* Force HT request */
+#define SSB_CHIPCO_CLKCTLST_FORCEILP 0x00000004 /* Force ILP request */
+#define SSB_CHIPCO_CLKCTLST_HAVEALPREQ 0x00000008 /* ALP available request */
+#define SSB_CHIPCO_CLKCTLST_HAVEHTREQ 0x00000010 /* HT available request */
+#define SSB_CHIPCO_CLKCTLST_HWCROFF 0x00000020 /* Force HW clock request off */
+#define SSB_CHIPCO_CLKCTLST_HAVEALP 0x00010000 /* ALP available */
+#define SSB_CHIPCO_CLKCTLST_HAVEHT 0x00020000 /* HT available */
+#define SSB_CHIPCO_CLKCTLST_4328A0_HAVEHT 0x00010000 /* 4328a0 has reversed bits */
+#define SSB_CHIPCO_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */
+#define SSB_CHIPCO_HW_WORKAROUND 0x01E4 /* Hardware workaround (rev >= 20) */
+#define SSB_CHIPCO_UART0_DATA 0x0300
+#define SSB_CHIPCO_UART0_IMR 0x0304
+#define SSB_CHIPCO_UART0_FCR 0x0308
+#define SSB_CHIPCO_UART0_LCR 0x030C
+#define SSB_CHIPCO_UART0_MCR 0x0310
+#define SSB_CHIPCO_UART0_LSR 0x0314
+#define SSB_CHIPCO_UART0_MSR 0x0318
+#define SSB_CHIPCO_UART0_SCRATCH 0x031C
+#define SSB_CHIPCO_UART1_DATA 0x0400
+#define SSB_CHIPCO_UART1_IMR 0x0404
+#define SSB_CHIPCO_UART1_FCR 0x0408
+#define SSB_CHIPCO_UART1_LCR 0x040C
+#define SSB_CHIPCO_UART1_MCR 0x0410
+#define SSB_CHIPCO_UART1_LSR 0x0414
+#define SSB_CHIPCO_UART1_MSR 0x0418
+#define SSB_CHIPCO_UART1_SCRATCH 0x041C
+/* PMU registers (rev >= 20) */
+#define SSB_CHIPCO_PMU_CTL 0x0600 /* PMU control */
+#define SSB_CHIPCO_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */
+#define SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT 16
+#define SSB_CHIPCO_PMU_CTL_PLL_UPD 0x00000400
+#define SSB_CHIPCO_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */
+#define SSB_CHIPCO_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */
+#define SSB_CHIPCO_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */
+#define SSB_CHIPCO_PMU_CTL_XTALFREQ 0x0000007C /* Crystal freq */
+#define SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT 2
+#define SSB_CHIPCO_PMU_CTL_ILPDIVEN 0x00000002 /* ILP div enable */
+#define SSB_CHIPCO_PMU_CTL_LPOSEL 0x00000001 /* LPO sel */
+#define SSB_CHIPCO_PMU_CAP 0x0604 /* PMU capabilities */
+#define SSB_CHIPCO_PMU_CAP_REVISION 0x000000FF /* Revision mask */
+#define SSB_CHIPCO_PMU_STAT 0x0608 /* PMU status */
+#define SSB_CHIPCO_PMU_STAT_INTPEND 0x00000040 /* Interrupt pending */
+#define SSB_CHIPCO_PMU_STAT_SBCLKST 0x00000030 /* Backplane clock status? */
+#define SSB_CHIPCO_PMU_STAT_HAVEALP 0x00000008 /* ALP available */
+#define SSB_CHIPCO_PMU_STAT_HAVEHT 0x00000004 /* HT available */
+#define SSB_CHIPCO_PMU_STAT_RESINIT 0x00000003 /* Res init */
+#define SSB_CHIPCO_PMU_RES_STAT 0x060C /* PMU res status */
+#define SSB_CHIPCO_PMU_RES_PEND 0x0610 /* PMU res pending */
+#define SSB_CHIPCO_PMU_TIMER 0x0614 /* PMU timer */
+#define SSB_CHIPCO_PMU_MINRES_MSK 0x0618 /* PMU min res mask */
+#define SSB_CHIPCO_PMU_MAXRES_MSK 0x061C /* PMU max res mask */
+#define SSB_CHIPCO_PMU_RES_TABSEL 0x0620 /* PMU res table sel */
+#define SSB_CHIPCO_PMU_RES_DEPMSK 0x0624 /* PMU res dep mask */
+#define SSB_CHIPCO_PMU_RES_UPDNTM 0x0628 /* PMU res updown timer */
+#define SSB_CHIPCO_PMU_RES_TIMER 0x062C /* PMU res timer */
+#define SSB_CHIPCO_PMU_CLKSTRETCH 0x0630 /* PMU clockstretch */
+#define SSB_CHIPCO_PMU_WATCHDOG 0x0634 /* PMU watchdog */
+#define SSB_CHIPCO_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */
+#define SSB_CHIPCO_PMU_RES_REQT 0x0644 /* PMU res req timer */
+#define SSB_CHIPCO_PMU_RES_REQM 0x0648 /* PMU res req mask */
+#define SSB_CHIPCO_CHIPCTL_ADDR 0x0650
+#define SSB_CHIPCO_CHIPCTL_DATA 0x0654
+#define SSB_CHIPCO_REGCTL_ADDR 0x0658
+#define SSB_CHIPCO_REGCTL_DATA 0x065C
+#define SSB_CHIPCO_PLLCTL_ADDR 0x0660
+#define SSB_CHIPCO_PLLCTL_DATA 0x0664
+
+
+
+/** PMU PLL registers */
+
+/* PMU rev 0 PLL registers */
+#define SSB_PMU0_PLLCTL0 0
+#define SSB_PMU0_PLLCTL0_PDIV_MSK 0x00000001
+#define SSB_PMU0_PLLCTL0_PDIV_FREQ 25000 /* kHz */
+#define SSB_PMU0_PLLCTL1 1
+#define SSB_PMU0_PLLCTL1_WILD_IMSK 0xF0000000 /* Wild int mask (low nibble) */
+#define SSB_PMU0_PLLCTL1_WILD_IMSK_SHIFT 28
+#define SSB_PMU0_PLLCTL1_WILD_FMSK 0x0FFFFF00 /* Wild frac mask */
+#define SSB_PMU0_PLLCTL1_WILD_FMSK_SHIFT 8
+#define SSB_PMU0_PLLCTL1_STOPMOD 0x00000040 /* Stop mod */
+#define SSB_PMU0_PLLCTL2 2
+#define SSB_PMU0_PLLCTL2_WILD_IMSKHI 0x0000000F /* Wild int mask (high nibble) */
+#define SSB_PMU0_PLLCTL2_WILD_IMSKHI_SHIFT 0
+
+/* PMU rev 1 PLL registers */
+#define SSB_PMU1_PLLCTL0 0
+#define SSB_PMU1_PLLCTL0_P1DIV 0x00F00000 /* P1 div */
+#define SSB_PMU1_PLLCTL0_P1DIV_SHIFT 20
+#define SSB_PMU1_PLLCTL0_P2DIV 0x0F000000 /* P2 div */
+#define SSB_PMU1_PLLCTL0_P2DIV_SHIFT 24
+#define SSB_PMU1_PLLCTL1 1
+#define SSB_PMU1_PLLCTL1_M1DIV 0x000000FF /* M1 div */
+#define SSB_PMU1_PLLCTL1_M1DIV_SHIFT 0
+#define SSB_PMU1_PLLCTL1_M2DIV 0x0000FF00 /* M2 div */
+#define SSB_PMU1_PLLCTL1_M2DIV_SHIFT 8
+#define SSB_PMU1_PLLCTL1_M3DIV 0x00FF0000 /* M3 div */
+#define SSB_PMU1_PLLCTL1_M3DIV_SHIFT 16
+#define SSB_PMU1_PLLCTL1_M4DIV 0xFF000000 /* M4 div */
+#define SSB_PMU1_PLLCTL1_M4DIV_SHIFT 24
+#define SSB_PMU1_PLLCTL2 2
+#define SSB_PMU1_PLLCTL2_M5DIV 0x000000FF /* M5 div */
+#define SSB_PMU1_PLLCTL2_M5DIV_SHIFT 0
+#define SSB_PMU1_PLLCTL2_M6DIV 0x0000FF00 /* M6 div */
+#define SSB_PMU1_PLLCTL2_M6DIV_SHIFT 8
+#define SSB_PMU1_PLLCTL2_NDIVMODE 0x000E0000 /* NDIV mode */
+#define SSB_PMU1_PLLCTL2_NDIVMODE_SHIFT 17
+#define SSB_PMU1_PLLCTL2_NDIVINT 0x1FF00000 /* NDIV int */
+#define SSB_PMU1_PLLCTL2_NDIVINT_SHIFT 20
+#define SSB_PMU1_PLLCTL3 3
+#define SSB_PMU1_PLLCTL3_NDIVFRAC 0x00FFFFFF /* NDIV frac */
+#define SSB_PMU1_PLLCTL3_NDIVFRAC_SHIFT 0
+#define SSB_PMU1_PLLCTL4 4
+#define SSB_PMU1_PLLCTL5 5
+#define SSB_PMU1_PLLCTL5_CLKDRV 0xFFFFFF00 /* clk drv */
+#define SSB_PMU1_PLLCTL5_CLKDRV_SHIFT 8
+
+/* BCM4312 PLL resource numbers. */
+#define SSB_PMURES_4312_SWITCHER_BURST 0
+#define SSB_PMURES_4312_SWITCHER_PWM 1
+#define SSB_PMURES_4312_PA_REF_LDO 2
+#define SSB_PMURES_4312_CORE_LDO_BURST 3
+#define SSB_PMURES_4312_CORE_LDO_PWM 4
+#define SSB_PMURES_4312_RADIO_LDO 5
+#define SSB_PMURES_4312_ILP_REQUEST 6
+#define SSB_PMURES_4312_BG_FILTBYP 7
+#define SSB_PMURES_4312_TX_FILTBYP 8
+#define SSB_PMURES_4312_RX_FILTBYP 9
+#define SSB_PMURES_4312_XTAL_PU 10
+#define SSB_PMURES_4312_ALP_AVAIL 11
+#define SSB_PMURES_4312_BB_PLL_FILTBYP 12
+#define SSB_PMURES_4312_RF_PLL_FILTBYP 13
+#define SSB_PMURES_4312_HT_AVAIL 14
+
+/* BCM4325 PLL resource numbers. */
+#define SSB_PMURES_4325_BUCK_BOOST_BURST 0
+#define SSB_PMURES_4325_CBUCK_BURST 1
+#define SSB_PMURES_4325_CBUCK_PWM 2
+#define SSB_PMURES_4325_CLDO_CBUCK_BURST 3
+#define SSB_PMURES_4325_CLDO_CBUCK_PWM 4
+#define SSB_PMURES_4325_BUCK_BOOST_PWM 5
+#define SSB_PMURES_4325_ILP_REQUEST 6
+#define SSB_PMURES_4325_ABUCK_BURST 7
+#define SSB_PMURES_4325_ABUCK_PWM 8
+#define SSB_PMURES_4325_LNLDO1_PU 9
+#define SSB_PMURES_4325_LNLDO2_PU 10
+#define SSB_PMURES_4325_LNLDO3_PU 11
+#define SSB_PMURES_4325_LNLDO4_PU 12
+#define SSB_PMURES_4325_XTAL_PU 13
+#define SSB_PMURES_4325_ALP_AVAIL 14
+#define SSB_PMURES_4325_RX_PWRSW_PU 15
+#define SSB_PMURES_4325_TX_PWRSW_PU 16
+#define SSB_PMURES_4325_RFPLL_PWRSW_PU 17
+#define SSB_PMURES_4325_LOGEN_PWRSW_PU 18
+#define SSB_PMURES_4325_AFE_PWRSW_PU 19
+#define SSB_PMURES_4325_BBPLL_PWRSW_PU 20
+#define SSB_PMURES_4325_HT_AVAIL 21
+
+/* BCM4328 PLL resource numbers. */
+#define SSB_PMURES_4328_EXT_SWITCHER_PWM 0
+#define SSB_PMURES_4328_BB_SWITCHER_PWM 1
+#define SSB_PMURES_4328_BB_SWITCHER_BURST 2
+#define SSB_PMURES_4328_BB_EXT_SWITCHER_BURST 3
+#define SSB_PMURES_4328_ILP_REQUEST 4
+#define SSB_PMURES_4328_RADIO_SWITCHER_PWM 5
+#define SSB_PMURES_4328_RADIO_SWITCHER_BURST 6
+#define SSB_PMURES_4328_ROM_SWITCH 7
+#define SSB_PMURES_4328_PA_REF_LDO 8
+#define SSB_PMURES_4328_RADIO_LDO 9
+#define SSB_PMURES_4328_AFE_LDO 10
+#define SSB_PMURES_4328_PLL_LDO 11
+#define SSB_PMURES_4328_BG_FILTBYP 12
+#define SSB_PMURES_4328_TX_FILTBYP 13
+#define SSB_PMURES_4328_RX_FILTBYP 14
+#define SSB_PMURES_4328_XTAL_PU 15
+#define SSB_PMURES_4328_XTAL_EN 16
+#define SSB_PMURES_4328_BB_PLL_FILTBYP 17
+#define SSB_PMURES_4328_RF_PLL_FILTBYP 18
+#define SSB_PMURES_4328_BB_PLL_PU 19
+
+/* BCM5354 PLL resource numbers. */
+#define SSB_PMURES_5354_EXT_SWITCHER_PWM 0
+#define SSB_PMURES_5354_BB_SWITCHER_PWM 1
+#define SSB_PMURES_5354_BB_SWITCHER_BURST 2
+#define SSB_PMURES_5354_BB_EXT_SWITCHER_BURST 3
+#define SSB_PMURES_5354_ILP_REQUEST 4
+#define SSB_PMURES_5354_RADIO_SWITCHER_PWM 5
+#define SSB_PMURES_5354_RADIO_SWITCHER_BURST 6
+#define SSB_PMURES_5354_ROM_SWITCH 7
+#define SSB_PMURES_5354_PA_REF_LDO 8
+#define SSB_PMURES_5354_RADIO_LDO 9
+#define SSB_PMURES_5354_AFE_LDO 10
+#define SSB_PMURES_5354_PLL_LDO 11
+#define SSB_PMURES_5354_BG_FILTBYP 12
+#define SSB_PMURES_5354_TX_FILTBYP 13
+#define SSB_PMURES_5354_RX_FILTBYP 14
+#define SSB_PMURES_5354_XTAL_PU 15
+#define SSB_PMURES_5354_XTAL_EN 16
+#define SSB_PMURES_5354_BB_PLL_FILTBYP 17
+#define SSB_PMURES_5354_RF_PLL_FILTBYP 18
+#define SSB_PMURES_5354_BB_PLL_PU 19
+
+
+
+/** Chip specific Chip-Status register contents. */
+#define SSB_CHIPCO_CHST_4322_SPROM_EXISTS 0x00000040 /* SPROM present */
+#define SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL 0x00000003
+#define SSB_CHIPCO_CHST_4325_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */
+#define SSB_CHIPCO_CHST_4325_SPROM_SEL 1 /* OTP is powered up, SPROM is present */
+#define SSB_CHIPCO_CHST_4325_OTP_SEL 2 /* OTP is powered up, no SPROM */
+#define SSB_CHIPCO_CHST_4325_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */
+#define SSB_CHIPCO_CHST_4325_SDIO_USB_MODE 0x00000004
+#define SSB_CHIPCO_CHST_4325_SDIO_USB_MODE_SHIFT 2
+#define SSB_CHIPCO_CHST_4325_RCAL_VALID 0x00000008
+#define SSB_CHIPCO_CHST_4325_RCAL_VALID_SHIFT 3
+#define SSB_CHIPCO_CHST_4325_RCAL_VALUE 0x000001F0
+#define SSB_CHIPCO_CHST_4325_RCAL_VALUE_SHIFT 4
+#define SSB_CHIPCO_CHST_4325_PMUTOP_2B 0x00000200 /* 1 for 2b, 0 for to 2a */
+
+/** Macros to determine SPROM presence based on Chip-Status register. */
+#define SSB_CHIPCO_CHST_4312_SPROM_PRESENT(status) \
+ ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \
+ SSB_CHIPCO_CHST_4325_OTP_SEL)
+#define SSB_CHIPCO_CHST_4322_SPROM_PRESENT(status) \
+ (status & SSB_CHIPCO_CHST_4322_SPROM_EXISTS)
+#define SSB_CHIPCO_CHST_4325_SPROM_PRESENT(status) \
+ (((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \
+ SSB_CHIPCO_CHST_4325_DEFCIS_SEL) && \
+ ((status & SSB_CHIPCO_CHST_4325_SPROM_OTP_SEL) != \
+ SSB_CHIPCO_CHST_4325_OTP_SEL))
+
+
+
+/** Clockcontrol masks and values **/
+
+/* SSB_CHIPCO_CLOCK_N */
+#define SSB_CHIPCO_CLK_N1 0x0000003F /* n1 control */
+#define SSB_CHIPCO_CLK_N2 0x00003F00 /* n2 control */
+#define SSB_CHIPCO_CLK_N2_SHIFT 8
+#define SSB_CHIPCO_CLK_PLLC 0x000F0000 /* pll control */
+#define SSB_CHIPCO_CLK_PLLC_SHIFT 16
+
+/* SSB_CHIPCO_CLOCK_SB/PCI/UART */
+#define SSB_CHIPCO_CLK_M1 0x0000003F /* m1 control */
+#define SSB_CHIPCO_CLK_M2 0x00003F00 /* m2 control */
+#define SSB_CHIPCO_CLK_M2_SHIFT 8
+#define SSB_CHIPCO_CLK_M3 0x003F0000 /* m3 control */
+#define SSB_CHIPCO_CLK_M3_SHIFT 16
+#define SSB_CHIPCO_CLK_MC 0x1F000000 /* mux control */
+#define SSB_CHIPCO_CLK_MC_SHIFT 24
+
+/* N3M Clock control magic field values */
+#define SSB_CHIPCO_CLK_F6_2 0x02 /* A factor of 2 in */
+#define SSB_CHIPCO_CLK_F6_3 0x03 /* 6-bit fields like */
+#define SSB_CHIPCO_CLK_F6_4 0x05 /* N1, M1 or M3 */
+#define SSB_CHIPCO_CLK_F6_5 0x09
+#define SSB_CHIPCO_CLK_F6_6 0x11
+#define SSB_CHIPCO_CLK_F6_7 0x21
+
+#define SSB_CHIPCO_CLK_F5_BIAS 5 /* 5-bit fields get this added */
+
+#define SSB_CHIPCO_CLK_MC_BYPASS 0x08
+#define SSB_CHIPCO_CLK_MC_M1 0x04
+#define SSB_CHIPCO_CLK_MC_M1M2 0x02
+#define SSB_CHIPCO_CLK_MC_M1M2M3 0x01
+#define SSB_CHIPCO_CLK_MC_M1M3 0x11
+
+/* Type 2 Clock control magic field values */
+#define SSB_CHIPCO_CLK_T2_BIAS 2 /* n1, n2, m1 & m3 bias */
+#define SSB_CHIPCO_CLK_T2M2_BIAS 3 /* m2 bias */
+
+#define SSB_CHIPCO_CLK_T2MC_M1BYP 1
+#define SSB_CHIPCO_CLK_T2MC_M2BYP 2
+#define SSB_CHIPCO_CLK_T2MC_M3BYP 4
+
+/* Type 6 Clock control magic field values */
+#define SSB_CHIPCO_CLK_T6_MMASK 1 /* bits of interest in m */
+#define SSB_CHIPCO_CLK_T6_M0 120000000 /* sb clock for m = 0 */
+#define SSB_CHIPCO_CLK_T6_M1 100000000 /* sb clock for m = 1 */
+#define SSB_CHIPCO_CLK_SB2MIPS_T6(sb) (2 * (sb))
+
+/* Common clock base */
+#define SSB_CHIPCO_CLK_BASE1 24000000 /* Half the clock freq */
+#define SSB_CHIPCO_CLK_BASE2 12500000 /* Alternate crystal on some PLL's */
+
+/* Clock control values for 200Mhz in 5350 */
+#define SSB_CHIPCO_CLK_5350_N 0x0311
+#define SSB_CHIPCO_CLK_5350_M 0x04020009
+
+
+/** Bits in the config registers **/
+
+#define SSB_CHIPCO_CFG_EN 0x0001 /* Enable */
+#define SSB_CHIPCO_CFG_EXTM 0x000E /* Extif Mode */
+#define SSB_CHIPCO_CFG_EXTM_ASYNC 0x0002 /* Async/Parallel flash */
+#define SSB_CHIPCO_CFG_EXTM_SYNC 0x0004 /* Synchronous */
+#define SSB_CHIPCO_CFG_EXTM_PCMCIA 0x0008 /* PCMCIA */
+#define SSB_CHIPCO_CFG_EXTM_IDE 0x000A /* IDE */
+#define SSB_CHIPCO_CFG_DS16 0x0010 /* Data size, 0=8bit, 1=16bit */
+#define SSB_CHIPCO_CFG_CLKDIV 0x0060 /* Sync: Clock divisor */
+#define SSB_CHIPCO_CFG_CLKEN 0x0080 /* Sync: Clock enable */
+#define SSB_CHIPCO_CFG_BSTRO 0x0100 /* Sync: Size/Bytestrobe */
+
+
+/** Flash-specific control/status values */
+
+/* flashcontrol opcodes for ST flashes */
+#define SSB_CHIPCO_FLASHCTL_ST_WREN 0x0006 /* Write Enable */
+#define SSB_CHIPCO_FLASHCTL_ST_WRDIS 0x0004 /* Write Disable */
+#define SSB_CHIPCO_FLASHCTL_ST_RDSR 0x0105 /* Read Status Register */
+#define SSB_CHIPCO_FLASHCTL_ST_WRSR 0x0101 /* Write Status Register */
+#define SSB_CHIPCO_FLASHCTL_ST_READ 0x0303 /* Read Data Bytes */
+#define SSB_CHIPCO_FLASHCTL_ST_PP 0x0302 /* Page Program */
+#define SSB_CHIPCO_FLASHCTL_ST_SE 0x02D8 /* Sector Erase */
+#define SSB_CHIPCO_FLASHCTL_ST_BE 0x00C7 /* Bulk Erase */
+#define SSB_CHIPCO_FLASHCTL_ST_DP 0x00B9 /* Deep Power-down */
+#define SSB_CHIPCO_FLASHCTL_ST_RES 0x03AB /* Read Electronic Signature */
+#define SSB_CHIPCO_FLASHCTL_ST_CSA 0x1000 /* Keep chip select asserted */
+#define SSB_CHIPCO_FLASHCTL_ST_SSE 0x0220 /* Sub-sector Erase */
+
+/* Status register bits for ST flashes */
+#define SSB_CHIPCO_FLASHSTA_ST_WIP 0x01 /* Write In Progress */
+#define SSB_CHIPCO_FLASHSTA_ST_WEL 0x02 /* Write Enable Latch */
+#define SSB_CHIPCO_FLASHSTA_ST_BP 0x1C /* Block Protect */
+#define SSB_CHIPCO_FLASHSTA_ST_BP_SHIFT 2
+#define SSB_CHIPCO_FLASHSTA_ST_SRWD 0x80 /* Status Register Write Disable */
+
+/* flashcontrol opcodes for Atmel flashes */
+#define SSB_CHIPCO_FLASHCTL_AT_READ 0x07E8
+#define SSB_CHIPCO_FLASHCTL_AT_PAGE_READ 0x07D2
+#define SSB_CHIPCO_FLASHCTL_AT_BUF1_READ /* FIXME */
+#define SSB_CHIPCO_FLASHCTL_AT_BUF2_READ /* FIXME */
+#define SSB_CHIPCO_FLASHCTL_AT_STATUS 0x01D7
+#define SSB_CHIPCO_FLASHCTL_AT_BUF1_WRITE 0x0384
+#define SSB_CHIPCO_FLASHCTL_AT_BUF2_WRITE 0x0387
+#define SSB_CHIPCO_FLASHCTL_AT_BUF1_ERASE_PRGM 0x0283 /* Erase program */
+#define SSB_CHIPCO_FLASHCTL_AT_BUF2_ERASE_PRGM 0x0286 /* Erase program */
+#define SSB_CHIPCO_FLASHCTL_AT_BUF1_PROGRAM 0x0288
+#define SSB_CHIPCO_FLASHCTL_AT_BUF2_PROGRAM 0x0289
+#define SSB_CHIPCO_FLASHCTL_AT_PAGE_ERASE 0x0281
+#define SSB_CHIPCO_FLASHCTL_AT_BLOCK_ERASE 0x0250
+#define SSB_CHIPCO_FLASHCTL_AT_BUF1_WRER_PRGM 0x0382 /* Write erase program */
+#define SSB_CHIPCO_FLASHCTL_AT_BUF2_WRER_PRGM 0x0385 /* Write erase program */
+#define SSB_CHIPCO_FLASHCTL_AT_BUF1_LOAD 0x0253
+#define SSB_CHIPCO_FLASHCTL_AT_BUF2_LOAD 0x0255
+#define SSB_CHIPCO_FLASHCTL_AT_BUF1_COMPARE 0x0260
+#define SSB_CHIPCO_FLASHCTL_AT_BUF2_COMPARE 0x0261
+#define SSB_CHIPCO_FLASHCTL_AT_BUF1_REPROGRAM 0x0258
+#define SSB_CHIPCO_FLASHCTL_AT_BUF2_REPROGRAM 0x0259
+
+/* Status register bits for Atmel flashes */
+#define SSB_CHIPCO_FLASHSTA_AT_READY 0x80
+#define SSB_CHIPCO_FLASHSTA_AT_MISMATCH 0x40
+#define SSB_CHIPCO_FLASHSTA_AT_ID 0x38
+#define SSB_CHIPCO_FLASHSTA_AT_ID_SHIFT 3
+
+
+/** OTP **/
+
+/* OTP regions */
+#define SSB_CHIPCO_OTP_HW_REGION SSB_CHIPCO_OTPS_HW_PROTECT
+#define SSB_CHIPCO_OTP_SW_REGION SSB_CHIPCO_OTPS_SW_PROTECT
+#define SSB_CHIPCO_OTP_CID_REGION SSB_CHIPCO_OTPS_CID_PROTECT
+
+/* OTP regions (Byte offsets from otp size) */
+#define SSB_CHIPCO_OTP_SWLIM_OFF (-8)
+#define SSB_CHIPCO_OTP_CIDBASE_OFF 0
+#define SSB_CHIPCO_OTP_CIDLIM_OFF 8
+
+/* Predefined OTP words (Word offset from otp size) */
+#define SSB_CHIPCO_OTP_BOUNDARY_OFF (-4)
+#define SSB_CHIPCO_OTP_HWSIGN_OFF (-3)
+#define SSB_CHIPCO_OTP_SWSIGN_OFF (-2)
+#define SSB_CHIPCO_OTP_CIDSIGN_OFF (-1)
+
+#define SSB_CHIPCO_OTP_CID_OFF 0
+#define SSB_CHIPCO_OTP_PKG_OFF 1
+#define SSB_CHIPCO_OTP_FID_OFF 2
+#define SSB_CHIPCO_OTP_RSV_OFF 3
+#define SSB_CHIPCO_OTP_LIM_OFF 4
+
+#define SSB_CHIPCO_OTP_SIGNATURE 0x578A
+#define SSB_CHIPCO_OTP_MAGIC 0x4E56
+
+
+struct ssb_device;
+struct ssb_serial_port;
+
+/* Data for the PMU, if available.
+ * Check availability with ((struct ssb_chipcommon)->capabilities & SSB_CHIPCO_CAP_PMU)
+ */
+struct ssb_chipcommon_pmu {
+ u8 rev; /* PMU revision */
+ u32 crystalfreq; /* The active crystal frequency (in kHz) */
+};
+
+struct ssb_chipcommon {
+ struct ssb_device *dev;
+ u32 capabilities;
+ u32 status;
+ /* Fast Powerup Delay constant */
+ u16 fast_pwrup_delay;
+ spinlock_t gpio_lock;
+ struct ssb_chipcommon_pmu pmu;
+ u32 ticks_per_ms;
+ u32 max_timer_ms;
+};
+
+static inline bool ssb_chipco_available(struct ssb_chipcommon *cc)
+{
+ return (cc->dev != NULL);
+}
+
+/* Register access */
+#define chipco_read32(cc, offset) ssb_read32((cc)->dev, offset)
+#define chipco_write32(cc, offset, val) ssb_write32((cc)->dev, offset, val)
+
+#define chipco_mask32(cc, offset, mask) \
+ chipco_write32(cc, offset, chipco_read32(cc, offset) & (mask))
+#define chipco_set32(cc, offset, set) \
+ chipco_write32(cc, offset, chipco_read32(cc, offset) | (set))
+#define chipco_maskset32(cc, offset, mask, set) \
+ chipco_write32(cc, offset, (chipco_read32(cc, offset) & (mask)) | (set))
+
+extern void ssb_chipcommon_init(struct ssb_chipcommon *cc);
+
+extern void ssb_chipco_suspend(struct ssb_chipcommon *cc);
+extern void ssb_chipco_resume(struct ssb_chipcommon *cc);
+
+extern void ssb_chipco_get_clockcpu(struct ssb_chipcommon *cc,
+ u32 *plltype, u32 *n, u32 *m);
+extern void ssb_chipco_get_clockcontrol(struct ssb_chipcommon *cc,
+ u32 *plltype, u32 *n, u32 *m);
+extern void ssb_chipco_timing_init(struct ssb_chipcommon *cc,
+ unsigned long ns_per_cycle);
+
+enum ssb_clkmode {
+ SSB_CLKMODE_SLOW,
+ SSB_CLKMODE_FAST,
+ SSB_CLKMODE_DYNAMIC,
+};
+
+extern void ssb_chipco_set_clockmode(struct ssb_chipcommon *cc,
+ enum ssb_clkmode mode);
+
+extern u32 ssb_chipco_watchdog_timer_set(struct ssb_chipcommon *cc, u32 ticks);
+
+void ssb_chipco_irq_mask(struct ssb_chipcommon *cc, u32 mask, u32 value);
+
+u32 ssb_chipco_irq_status(struct ssb_chipcommon *cc, u32 mask);
+
+/* Chipcommon GPIO pin access. */
+u32 ssb_chipco_gpio_in(struct ssb_chipcommon *cc, u32 mask);
+u32 ssb_chipco_gpio_out(struct ssb_chipcommon *cc, u32 mask, u32 value);
+u32 ssb_chipco_gpio_outen(struct ssb_chipcommon *cc, u32 mask, u32 value);
+u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value);
+u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value);
+u32 ssb_chipco_gpio_polarity(struct ssb_chipcommon *cc, u32 mask, u32 value);
+u32 ssb_chipco_gpio_pullup(struct ssb_chipcommon *cc, u32 mask, u32 value);
+u32 ssb_chipco_gpio_pulldown(struct ssb_chipcommon *cc, u32 mask, u32 value);
+
+#ifdef CONFIG_SSB_SERIAL
+extern int ssb_chipco_serial_init(struct ssb_chipcommon *cc,
+ struct ssb_serial_port *ports);
+#endif /* CONFIG_SSB_SERIAL */
+
+/* PMU support */
+extern void ssb_pmu_init(struct ssb_chipcommon *cc);
+
+enum ssb_pmu_ldo_volt_id {
+ LDO_PAREF = 0,
+ LDO_VOLT1,
+ LDO_VOLT2,
+ LDO_VOLT3,
+};
+
+void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc,
+ enum ssb_pmu_ldo_volt_id id, u32 voltage);
+void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on);
+void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid);
+
+#endif /* LINUX_SSB_CHIPCO_H_ */
diff --git a/include/linux/ssb/ssb_driver_extif.h b/include/linux/ssb/ssb_driver_extif.h
new file mode 100644
index 000000000..a410e841e
--- /dev/null
+++ b/include/linux/ssb/ssb_driver_extif.h
@@ -0,0 +1,259 @@
+/*
+ * Hardware-specific External Interface I/O core definitions
+ * for the BCM47xx family of SiliconBackplane-based chips.
+ *
+ * The External Interface core supports a total of three external chip selects
+ * supporting external interfaces. One of the external chip selects is
+ * used for Flash, one is used for PCMCIA, and the other may be
+ * programmed to support either a synchronous interface or an
+ * asynchronous interface. The asynchronous interface can be used to
+ * support external devices such as UARTs and the BCM2019 Bluetooth
+ * baseband processor.
+ * The external interface core also contains 2 on-chip 16550 UARTs, clock
+ * frequency control, a watchdog interrupt timer, and a GPIO interface.
+ *
+ * Copyright 2005, Broadcom Corporation
+ * Copyright 2006, Michael Buesch
+ *
+ * Licensed under the GPL version 2. See COPYING for details.
+ */
+#ifndef LINUX_SSB_EXTIFCORE_H_
+#define LINUX_SSB_EXTIFCORE_H_
+
+/* external interface address space */
+#define SSB_EXTIF_PCMCIA_MEMBASE(x) (x)
+#define SSB_EXTIF_PCMCIA_IOBASE(x) ((x) + 0x100000)
+#define SSB_EXTIF_PCMCIA_CFGBASE(x) ((x) + 0x200000)
+#define SSB_EXTIF_CFGIF_BASE(x) ((x) + 0x800000)
+#define SSB_EXTIF_FLASH_BASE(x) ((x) + 0xc00000)
+
+#define SSB_EXTIF_NR_GPIOOUT 5
+/* GPIO NOTE:
+ * The multiple instances of output and output enable registers
+ * are present to allow driver software for multiple cores to control
+ * gpio outputs without needing to share a single register pair.
+ * Use the following helper macro to get a register offset value.
+ */
+#define SSB_EXTIF_GPIO_OUT(index) ({ \
+ BUILD_BUG_ON(index >= SSB_EXTIF_NR_GPIOOUT); \
+ SSB_EXTIF_GPIO_OUT_BASE + ((index) * 8); \
+ })
+#define SSB_EXTIF_GPIO_OUTEN(index) ({ \
+ BUILD_BUG_ON(index >= SSB_EXTIF_NR_GPIOOUT); \
+ SSB_EXTIF_GPIO_OUTEN_BASE + ((index) * 8); \
+ })
+
+/** EXTIF core registers **/
+
+#define SSB_EXTIF_CTL 0x0000
+#define SSB_EXTIF_CTL_UARTEN (1 << 0) /* UART enable */
+#define SSB_EXTIF_EXTSTAT 0x0004
+#define SSB_EXTIF_EXTSTAT_EMODE (1 << 0) /* Endian mode (ro) */
+#define SSB_EXTIF_EXTSTAT_EIRQPIN (1 << 1) /* External interrupt pin (ro) */
+#define SSB_EXTIF_EXTSTAT_GPIOIRQPIN (1 << 2) /* GPIO interrupt pin (ro) */
+#define SSB_EXTIF_PCMCIA_CFG 0x0010
+#define SSB_EXTIF_PCMCIA_MEMWAIT 0x0014
+#define SSB_EXTIF_PCMCIA_ATTRWAIT 0x0018
+#define SSB_EXTIF_PCMCIA_IOWAIT 0x001C
+#define SSB_EXTIF_PROG_CFG 0x0020
+#define SSB_EXTIF_PROG_WAITCNT 0x0024
+#define SSB_EXTIF_FLASH_CFG 0x0028
+#define SSB_EXTIF_FLASH_WAITCNT 0x002C
+#define SSB_EXTIF_WATCHDOG 0x0040
+#define SSB_EXTIF_CLOCK_N 0x0044
+#define SSB_EXTIF_CLOCK_SB 0x0048
+#define SSB_EXTIF_CLOCK_PCI 0x004C
+#define SSB_EXTIF_CLOCK_MII 0x0050
+#define SSB_EXTIF_GPIO_IN 0x0060
+#define SSB_EXTIF_GPIO_OUT_BASE 0x0064
+#define SSB_EXTIF_GPIO_OUTEN_BASE 0x0068
+#define SSB_EXTIF_EJTAG_OUTEN 0x0090
+#define SSB_EXTIF_GPIO_INTPOL 0x0094
+#define SSB_EXTIF_GPIO_INTMASK 0x0098
+#define SSB_EXTIF_UART_DATA 0x0300
+#define SSB_EXTIF_UART_TIMER 0x0310
+#define SSB_EXTIF_UART_FCR 0x0320
+#define SSB_EXTIF_UART_LCR 0x0330
+#define SSB_EXTIF_UART_MCR 0x0340
+#define SSB_EXTIF_UART_LSR 0x0350
+#define SSB_EXTIF_UART_MSR 0x0360
+#define SSB_EXTIF_UART_SCRATCH 0x0370
+
+
+
+
+/* pcmcia/prog/flash_config */
+#define SSB_EXTCFG_EN (1 << 0) /* enable */
+#define SSB_EXTCFG_MODE 0xE /* mode */
+#define SSB_EXTCFG_MODE_SHIFT 1
+#define SSB_EXTCFG_MODE_FLASH 0x0 /* flash/asynchronous mode */
+#define SSB_EXTCFG_MODE_SYNC 0x2 /* synchronous mode */
+#define SSB_EXTCFG_MODE_PCMCIA 0x4 /* pcmcia mode */
+#define SSB_EXTCFG_DS16 (1 << 4) /* destsize: 0=8bit, 1=16bit */
+#define SSB_EXTCFG_BSWAP (1 << 5) /* byteswap */
+#define SSB_EXTCFG_CLKDIV 0xC0 /* clock divider */
+#define SSB_EXTCFG_CLKDIV_SHIFT 6
+#define SSB_EXTCFG_CLKDIV_2 0x0 /* backplane/2 */
+#define SSB_EXTCFG_CLKDIV_3 0x40 /* backplane/3 */
+#define SSB_EXTCFG_CLKDIV_4 0x80 /* backplane/4 */
+#define SSB_EXTCFG_CLKEN (1 << 8) /* clock enable */
+#define SSB_EXTCFG_STROBE (1 << 9) /* size/bytestrobe (synch only) */
+
+/* pcmcia_memwait */
+#define SSB_PCMCIA_MEMW_0 0x0000003F /* waitcount0 */
+#define SSB_PCMCIA_MEMW_1 0x00001F00 /* waitcount1 */
+#define SSB_PCMCIA_MEMW_1_SHIFT 8
+#define SSB_PCMCIA_MEMW_2 0x001F0000 /* waitcount2 */
+#define SSB_PCMCIA_MEMW_2_SHIFT 16
+#define SSB_PCMCIA_MEMW_3 0x1F000000 /* waitcount3 */
+#define SSB_PCMCIA_MEMW_3_SHIFT 24
+
+/* pcmcia_attrwait */
+#define SSB_PCMCIA_ATTW_0 0x0000003F /* waitcount0 */
+#define SSB_PCMCIA_ATTW_1 0x00001F00 /* waitcount1 */
+#define SSB_PCMCIA_ATTW_1_SHIFT 8
+#define SSB_PCMCIA_ATTW_2 0x001F0000 /* waitcount2 */
+#define SSB_PCMCIA_ATTW_2_SHIFT 16
+#define SSB_PCMCIA_ATTW_3 0x1F000000 /* waitcount3 */
+#define SSB_PCMCIA_ATTW_3_SHIFT 24
+
+/* pcmcia_iowait */
+#define SSB_PCMCIA_IOW_0 0x0000003F /* waitcount0 */
+#define SSB_PCMCIA_IOW_1 0x00001F00 /* waitcount1 */
+#define SSB_PCMCIA_IOW_1_SHIFT 8
+#define SSB_PCMCIA_IOW_2 0x001F0000 /* waitcount2 */
+#define SSB_PCMCIA_IOW_2_SHIFT 16
+#define SSB_PCMCIA_IOW_3 0x1F000000 /* waitcount3 */
+#define SSB_PCMCIA_IOW_3_SHIFT 24
+
+/* prog_waitcount */
+#define SSB_PROG_WCNT_0 0x0000001F /* waitcount0 */
+#define SSB_PROG_WCNT_1 0x00001F00 /* waitcount1 */
+#define SSB_PROG_WCNT_1_SHIFT 8
+#define SSB_PROG_WCNT_2 0x001F0000 /* waitcount2 */
+#define SSB_PROG_WCNT_2_SHIFT 16
+#define SSB_PROG_WCNT_3 0x1F000000 /* waitcount3 */
+#define SSB_PROG_WCNT_3_SHIFT 24
+
+#define SSB_PROG_W0 0x0000000C
+#define SSB_PROG_W1 0x00000A00
+#define SSB_PROG_W2 0x00020000
+#define SSB_PROG_W3 0x01000000
+
+/* flash_waitcount */
+#define SSB_FLASH_WCNT_0 0x0000001F /* waitcount0 */
+#define SSB_FLASH_WCNT_1 0x00001F00 /* waitcount1 */
+#define SSB_FLASH_WCNT_1_SHIFT 8
+#define SSB_FLASH_WCNT_2 0x001F0000 /* waitcount2 */
+#define SSB_FLASH_WCNT_2_SHIFT 16
+#define SSB_FLASH_WCNT_3 0x1F000000 /* waitcount3 */
+#define SSB_FLASH_WCNT_3_SHIFT 24
+
+/* watchdog */
+#define SSB_EXTIF_WATCHDOG_CLK 48000000 /* Hz */
+
+#define SSB_EXTIF_WATCHDOG_MAX_TIMER ((1 << 28) - 1)
+#define SSB_EXTIF_WATCHDOG_MAX_TIMER_MS (SSB_EXTIF_WATCHDOG_MAX_TIMER \
+ / (SSB_EXTIF_WATCHDOG_CLK / 1000))
+
+
+#ifdef CONFIG_SSB_DRIVER_EXTIF
+
+struct ssb_extif {
+ struct ssb_device *dev;
+ spinlock_t gpio_lock;
+};
+
+static inline bool ssb_extif_available(struct ssb_extif *extif)
+{
+ return (extif->dev != NULL);
+}
+
+extern void ssb_extif_get_clockcontrol(struct ssb_extif *extif,
+ u32 *plltype, u32 *n, u32 *m);
+
+extern void ssb_extif_timing_init(struct ssb_extif *extif,
+ unsigned long ns);
+
+extern u32 ssb_extif_watchdog_timer_set(struct ssb_extif *extif, u32 ticks);
+
+/* Extif GPIO pin access */
+u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask);
+u32 ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask, u32 value);
+u32 ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask, u32 value);
+u32 ssb_extif_gpio_polarity(struct ssb_extif *extif, u32 mask, u32 value);
+u32 ssb_extif_gpio_intmask(struct ssb_extif *extif, u32 mask, u32 value);
+
+#ifdef CONFIG_SSB_SERIAL
+extern int ssb_extif_serial_init(struct ssb_extif *extif,
+ struct ssb_serial_port *ports);
+#endif /* CONFIG_SSB_SERIAL */
+
+
+#else /* CONFIG_SSB_DRIVER_EXTIF */
+/* extif disabled */
+
+struct ssb_extif {
+};
+
+static inline bool ssb_extif_available(struct ssb_extif *extif)
+{
+ return 0;
+}
+
+static inline
+void ssb_extif_get_clockcontrol(struct ssb_extif *extif,
+ u32 *plltype, u32 *n, u32 *m)
+{
+}
+
+static inline
+void ssb_extif_timing_init(struct ssb_extif *extif, unsigned long ns)
+{
+}
+
+static inline
+u32 ssb_extif_watchdog_timer_set(struct ssb_extif *extif, u32 ticks)
+{
+ return 0;
+}
+
+static inline u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask)
+{
+ return 0;
+}
+
+static inline u32 ssb_extif_gpio_out(struct ssb_extif *extif, u32 mask,
+ u32 value)
+{
+ return 0;
+}
+
+static inline u32 ssb_extif_gpio_outen(struct ssb_extif *extif, u32 mask,
+ u32 value)
+{
+ return 0;
+}
+
+static inline u32 ssb_extif_gpio_polarity(struct ssb_extif *extif, u32 mask,
+ u32 value)
+{
+ return 0;
+}
+
+static inline u32 ssb_extif_gpio_intmask(struct ssb_extif *extif, u32 mask,
+ u32 value)
+{
+ return 0;
+}
+
+#ifdef CONFIG_SSB_SERIAL
+static inline int ssb_extif_serial_init(struct ssb_extif *extif,
+ struct ssb_serial_port *ports)
+{
+ return 0;
+}
+#endif /* CONFIG_SSB_SERIAL */
+
+#endif /* CONFIG_SSB_DRIVER_EXTIF */
+#endif /* LINUX_SSB_EXTIFCORE_H_ */
diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h
new file mode 100644
index 000000000..068847250
--- /dev/null
+++ b/include/linux/ssb/ssb_driver_gige.h
@@ -0,0 +1,193 @@
+#ifndef LINUX_SSB_DRIVER_GIGE_H_
+#define LINUX_SSB_DRIVER_GIGE_H_
+
+#include <linux/ssb/ssb.h>
+#include <linux/bug.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+
+
+#ifdef CONFIG_SSB_DRIVER_GIGE
+
+
+#define SSB_GIGE_PCIIO 0x0000 /* PCI I/O Registers (1024 bytes) */
+#define SSB_GIGE_RESERVED 0x0400 /* Reserved (1024 bytes) */
+#define SSB_GIGE_PCICFG 0x0800 /* PCI config space (256 bytes) */
+#define SSB_GIGE_SHIM_FLUSHSTAT 0x0C00 /* PCI to OCP: Flush status control (32bit) */
+#define SSB_GIGE_SHIM_FLUSHRDA 0x0C04 /* PCI to OCP: Flush read address (32bit) */
+#define SSB_GIGE_SHIM_FLUSHTO 0x0C08 /* PCI to OCP: Flush timeout counter (32bit) */
+#define SSB_GIGE_SHIM_BARRIER 0x0C0C /* PCI to OCP: Barrier register (32bit) */
+#define SSB_GIGE_SHIM_MAOCPSI 0x0C10 /* PCI to OCP: MaocpSI Control (32bit) */
+#define SSB_GIGE_SHIM_SIOCPMA 0x0C14 /* PCI to OCP: SiocpMa Control (32bit) */
+
+/* TM Status High flags */
+#define SSB_GIGE_TMSHIGH_RGMII 0x00010000 /* Have an RGMII PHY-bus */
+/* TM Status Low flags */
+#define SSB_GIGE_TMSLOW_TXBYPASS 0x00080000 /* TX bypass (no delay) */
+#define SSB_GIGE_TMSLOW_RXBYPASS 0x00100000 /* RX bypass (no delay) */
+#define SSB_GIGE_TMSLOW_DLLEN 0x01000000 /* Enable DLL controls */
+
+/* Boardflags (low) */
+#define SSB_GIGE_BFL_ROBOSWITCH 0x0010
+
+
+#define SSB_GIGE_MEM_RES_NAME "SSB Broadcom 47xx GigE memory"
+#define SSB_GIGE_IO_RES_NAME "SSB Broadcom 47xx GigE I/O"
+
+struct ssb_gige {
+ struct ssb_device *dev;
+
+ spinlock_t lock;
+
+ /* True, if the device has an RGMII bus.
+ * False, if the device has a GMII bus. */
+ bool has_rgmii;
+
+ /* The PCI controller device. */
+ struct pci_controller pci_controller;
+ struct pci_ops pci_ops;
+ struct resource mem_resource;
+ struct resource io_resource;
+};
+
+/* Check whether a PCI device is a SSB Gigabit Ethernet core. */
+extern bool pdev_is_ssb_gige_core(struct pci_dev *pdev);
+
+/* Convert a pci_dev pointer to a ssb_gige pointer. */
+static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev)
+{
+ if (!pdev_is_ssb_gige_core(pdev))
+ return NULL;
+ return container_of(pdev->bus->ops, struct ssb_gige, pci_ops);
+}
+
+/* Returns whether the PHY is connected by an RGMII bus. */
+static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev)
+{
+ struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
+ return (dev ? dev->has_rgmii : 0);
+}
+
+/* Returns whether we have a Roboswitch. */
+static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev)
+{
+ struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
+ if (dev)
+ return !!(dev->dev->bus->sprom.boardflags_lo &
+ SSB_GIGE_BFL_ROBOSWITCH);
+ return 0;
+}
+
+/* Returns whether we can only do one DMA at once. */
+static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev)
+{
+ struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
+ if (dev)
+ return ((dev->dev->bus->chip_id == 0x4785) &&
+ (dev->dev->bus->chip_rev < 2));
+ return 0;
+}
+
+/* Returns whether we must flush posted writes. */
+static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev)
+{
+ struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
+ if (dev)
+ return (dev->dev->bus->chip_id == 0x4785);
+ return 0;
+}
+
+/* Get the device MAC address */
+static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
+{
+ struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
+ if (!dev)
+ return -ENODEV;
+
+ memcpy(macaddr, dev->dev->bus->sprom.et0mac, 6);
+ return 0;
+}
+
+/* Get the device phy address */
+static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev)
+{
+ struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
+ if (!dev)
+ return -ENODEV;
+
+ return dev->dev->bus->sprom.et0phyaddr;
+}
+
+extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev,
+ struct pci_dev *pdev);
+extern int ssb_gige_map_irq(struct ssb_device *sdev,
+ const struct pci_dev *pdev);
+
+/* The GigE driver is not a standalone module, because we don't have support
+ * for unregistering the driver. So we could not unload the module anyway. */
+extern int ssb_gige_init(void);
+static inline void ssb_gige_exit(void)
+{
+ /* Currently we can not unregister the GigE driver,
+ * because we can not unregister the PCI bridge. */
+ BUG();
+}
+
+
+#else /* CONFIG_SSB_DRIVER_GIGE */
+/* Gigabit Ethernet driver disabled */
+
+
+static inline int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev,
+ struct pci_dev *pdev)
+{
+ return -ENOSYS;
+}
+static inline int ssb_gige_map_irq(struct ssb_device *sdev,
+ const struct pci_dev *pdev)
+{
+ return -ENOSYS;
+}
+static inline int ssb_gige_init(void)
+{
+ return 0;
+}
+static inline void ssb_gige_exit(void)
+{
+}
+
+static inline bool pdev_is_ssb_gige_core(struct pci_dev *pdev)
+{
+ return 0;
+}
+static inline struct ssb_gige * pdev_to_ssb_gige(struct pci_dev *pdev)
+{
+ return NULL;
+}
+static inline bool ssb_gige_is_rgmii(struct pci_dev *pdev)
+{
+ return 0;
+}
+static inline bool ssb_gige_have_roboswitch(struct pci_dev *pdev)
+{
+ return 0;
+}
+static inline bool ssb_gige_one_dma_at_once(struct pci_dev *pdev)
+{
+ return 0;
+}
+static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev)
+{
+ return 0;
+}
+static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
+{
+ return -ENODEV;
+}
+static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_SSB_DRIVER_GIGE */
+#endif /* LINUX_SSB_DRIVER_GIGE_H_ */
diff --git a/include/linux/ssb/ssb_driver_mips.h b/include/linux/ssb/ssb_driver_mips.h
new file mode 100644
index 000000000..6535e4718
--- /dev/null
+++ b/include/linux/ssb/ssb_driver_mips.h
@@ -0,0 +1,70 @@
+#ifndef LINUX_SSB_MIPSCORE_H_
+#define LINUX_SSB_MIPSCORE_H_
+
+#ifdef CONFIG_SSB_DRIVER_MIPS
+
+struct ssb_device;
+
+struct ssb_serial_port {
+ void *regs;
+ unsigned long clockspeed;
+ unsigned int irq;
+ unsigned int baud_base;
+ unsigned int reg_shift;
+};
+
+struct ssb_pflash {
+ bool present;
+ u8 buswidth;
+ u32 window;
+ u32 window_size;
+};
+
+#ifdef CONFIG_SSB_SFLASH
+struct ssb_sflash {
+ bool present;
+ u32 window;
+ u32 blocksize;
+ u16 numblocks;
+ u32 size;
+
+ void *priv;
+};
+#endif
+
+struct ssb_mipscore {
+ struct ssb_device *dev;
+
+ int nr_serial_ports;
+ struct ssb_serial_port serial_ports[4];
+
+ struct ssb_pflash pflash;
+#ifdef CONFIG_SSB_SFLASH
+ struct ssb_sflash sflash;
+#endif
+};
+
+extern void ssb_mipscore_init(struct ssb_mipscore *mcore);
+extern u32 ssb_cpu_clock(struct ssb_mipscore *mcore);
+
+extern unsigned int ssb_mips_irq(struct ssb_device *dev);
+
+
+#else /* CONFIG_SSB_DRIVER_MIPS */
+
+struct ssb_mipscore {
+};
+
+static inline
+void ssb_mipscore_init(struct ssb_mipscore *mcore)
+{
+}
+
+static inline unsigned int ssb_mips_irq(struct ssb_device *dev)
+{
+ return 0;
+}
+
+#endif /* CONFIG_SSB_DRIVER_MIPS */
+
+#endif /* LINUX_SSB_MIPSCORE_H_ */
diff --git a/include/linux/ssb/ssb_driver_pci.h b/include/linux/ssb/ssb_driver_pci.h
new file mode 100644
index 000000000..41e330e51
--- /dev/null
+++ b/include/linux/ssb/ssb_driver_pci.h
@@ -0,0 +1,130 @@
+#ifndef LINUX_SSB_PCICORE_H_
+#define LINUX_SSB_PCICORE_H_
+
+#include <linux/types.h>
+
+struct pci_dev;
+
+
+#ifdef CONFIG_SSB_DRIVER_PCICORE
+
+/* PCI core registers. */
+#define SSB_PCICORE_CTL 0x0000 /* PCI Control */
+#define SSB_PCICORE_CTL_RST_OE 0x00000001 /* PCI_RESET Output Enable */
+#define SSB_PCICORE_CTL_RST 0x00000002 /* PCI_RESET driven out to pin */
+#define SSB_PCICORE_CTL_CLK_OE 0x00000004 /* Clock gate Output Enable */
+#define SSB_PCICORE_CTL_CLK 0x00000008 /* Gate for clock driven out to pin */
+#define SSB_PCICORE_ARBCTL 0x0010 /* PCI Arbiter Control */
+#define SSB_PCICORE_ARBCTL_INTERN 0x00000001 /* Use internal arbiter */
+#define SSB_PCICORE_ARBCTL_EXTERN 0x00000002 /* Use external arbiter */
+#define SSB_PCICORE_ARBCTL_PARKID 0x00000006 /* Mask, selects which agent is parked on an idle bus */
+#define SSB_PCICORE_ARBCTL_PARKID_LAST 0x00000000 /* Last requestor */
+#define SSB_PCICORE_ARBCTL_PARKID_4710 0x00000002 /* 4710 */
+#define SSB_PCICORE_ARBCTL_PARKID_EXT0 0x00000004 /* External requestor 0 */
+#define SSB_PCICORE_ARBCTL_PARKID_EXT1 0x00000006 /* External requestor 1 */
+#define SSB_PCICORE_ISTAT 0x0020 /* Interrupt status */
+#define SSB_PCICORE_ISTAT_INTA 0x00000001 /* PCI INTA# */
+#define SSB_PCICORE_ISTAT_INTB 0x00000002 /* PCI INTB# */
+#define SSB_PCICORE_ISTAT_SERR 0x00000004 /* PCI SERR# (write to clear) */
+#define SSB_PCICORE_ISTAT_PERR 0x00000008 /* PCI PERR# (write to clear) */
+#define SSB_PCICORE_ISTAT_PME 0x00000010 /* PCI PME# */
+#define SSB_PCICORE_IMASK 0x0024 /* Interrupt mask */
+#define SSB_PCICORE_IMASK_INTA 0x00000001 /* PCI INTA# */
+#define SSB_PCICORE_IMASK_INTB 0x00000002 /* PCI INTB# */
+#define SSB_PCICORE_IMASK_SERR 0x00000004 /* PCI SERR# */
+#define SSB_PCICORE_IMASK_PERR 0x00000008 /* PCI PERR# */
+#define SSB_PCICORE_IMASK_PME 0x00000010 /* PCI PME# */
+#define SSB_PCICORE_MBOX 0x0028 /* Backplane to PCI Mailbox */
+#define SSB_PCICORE_MBOX_F0_0 0x00000100 /* PCI function 0, INT 0 */
+#define SSB_PCICORE_MBOX_F0_1 0x00000200 /* PCI function 0, INT 1 */
+#define SSB_PCICORE_MBOX_F1_0 0x00000400 /* PCI function 1, INT 0 */
+#define SSB_PCICORE_MBOX_F1_1 0x00000800 /* PCI function 1, INT 1 */
+#define SSB_PCICORE_MBOX_F2_0 0x00001000 /* PCI function 2, INT 0 */
+#define SSB_PCICORE_MBOX_F2_1 0x00002000 /* PCI function 2, INT 1 */
+#define SSB_PCICORE_MBOX_F3_0 0x00004000 /* PCI function 3, INT 0 */
+#define SSB_PCICORE_MBOX_F3_1 0x00008000 /* PCI function 3, INT 1 */
+#define SSB_PCICORE_BCAST_ADDR 0x0050 /* Backplane Broadcast Address */
+#define SSB_PCICORE_BCAST_ADDR_MASK 0x000000FF
+#define SSB_PCICORE_BCAST_DATA 0x0054 /* Backplane Broadcast Data */
+#define SSB_PCICORE_GPIO_IN 0x0060 /* rev >= 2 only */
+#define SSB_PCICORE_GPIO_OUT 0x0064 /* rev >= 2 only */
+#define SSB_PCICORE_GPIO_ENABLE 0x0068 /* rev >= 2 only */
+#define SSB_PCICORE_GPIO_CTL 0x006C /* rev >= 2 only */
+#define SSB_PCICORE_SBTOPCI0 0x0100 /* Backplane to PCI translation 0 (sbtopci0) */
+#define SSB_PCICORE_SBTOPCI0_MASK 0xFC000000
+#define SSB_PCICORE_SBTOPCI1 0x0104 /* Backplane to PCI translation 1 (sbtopci1) */
+#define SSB_PCICORE_SBTOPCI1_MASK 0xFC000000
+#define SSB_PCICORE_SBTOPCI2 0x0108 /* Backplane to PCI translation 2 (sbtopci2) */
+#define SSB_PCICORE_SBTOPCI2_MASK 0xC0000000
+#define SSB_PCICORE_PCICFG0 0x0400 /* PCI config space 0 (rev >= 8) */
+#define SSB_PCICORE_PCICFG1 0x0500 /* PCI config space 1 (rev >= 8) */
+#define SSB_PCICORE_PCICFG2 0x0600 /* PCI config space 2 (rev >= 8) */
+#define SSB_PCICORE_PCICFG3 0x0700 /* PCI config space 3 (rev >= 8) */
+#define SSB_PCICORE_SPROM(wordoffset) (0x0800 + ((wordoffset) * 2)) /* SPROM shadow area (72 bytes) */
+
+/* SBtoPCIx */
+#define SSB_PCICORE_SBTOPCI_MEM 0x00000000
+#define SSB_PCICORE_SBTOPCI_IO 0x00000001
+#define SSB_PCICORE_SBTOPCI_CFG0 0x00000002
+#define SSB_PCICORE_SBTOPCI_CFG1 0x00000003
+#define SSB_PCICORE_SBTOPCI_PREF 0x00000004 /* Prefetch enable */
+#define SSB_PCICORE_SBTOPCI_BURST 0x00000008 /* Burst enable */
+#define SSB_PCICORE_SBTOPCI_MRM 0x00000020 /* Memory Read Multiple */
+#define SSB_PCICORE_SBTOPCI_RC 0x00000030 /* Read Command mask (rev >= 11) */
+#define SSB_PCICORE_SBTOPCI_RC_READ 0x00000000 /* Memory read */
+#define SSB_PCICORE_SBTOPCI_RC_READL 0x00000010 /* Memory read line */
+#define SSB_PCICORE_SBTOPCI_RC_READM 0x00000020 /* Memory read multiple */
+
+
+/* PCIcore specific boardflags */
+#define SSB_PCICORE_BFL_NOPCI 0x00000400 /* Board leaves PCI floating */
+
+
+struct ssb_pcicore {
+ struct ssb_device *dev;
+ u8 setup_done:1;
+ u8 hostmode:1;
+ u8 cardbusmode:1;
+};
+
+extern void ssb_pcicore_init(struct ssb_pcicore *pc);
+
+/* Enable IRQ routing for a specific device */
+extern int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc,
+ struct ssb_device *dev);
+
+int ssb_pcicore_plat_dev_init(struct pci_dev *d);
+int ssb_pcicore_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
+
+
+#else /* CONFIG_SSB_DRIVER_PCICORE */
+
+
+struct ssb_pcicore {
+};
+
+static inline
+void ssb_pcicore_init(struct ssb_pcicore *pc)
+{
+}
+
+static inline
+int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc,
+ struct ssb_device *dev)
+{
+ return 0;
+}
+
+static inline
+int ssb_pcicore_plat_dev_init(struct pci_dev *d)
+{
+ return -ENODEV;
+}
+static inline
+int ssb_pcicore_pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_SSB_DRIVER_PCICORE */
+#endif /* LINUX_SSB_PCICORE_H_ */
diff --git a/include/linux/ssb/ssb_embedded.h b/include/linux/ssb/ssb_embedded.h
new file mode 100644
index 000000000..8d8dedff0
--- /dev/null
+++ b/include/linux/ssb/ssb_embedded.h
@@ -0,0 +1,18 @@
+#ifndef LINUX_SSB_EMBEDDED_H_
+#define LINUX_SSB_EMBEDDED_H_
+
+#include <linux/types.h>
+#include <linux/ssb/ssb.h>
+
+
+extern int ssb_watchdog_timer_set(struct ssb_bus *bus, u32 ticks);
+
+/* Generic GPIO API */
+u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask);
+u32 ssb_gpio_out(struct ssb_bus *bus, u32 mask, u32 value);
+u32 ssb_gpio_outen(struct ssb_bus *bus, u32 mask, u32 value);
+u32 ssb_gpio_control(struct ssb_bus *bus, u32 mask, u32 value);
+u32 ssb_gpio_intmask(struct ssb_bus *bus, u32 mask, u32 value);
+u32 ssb_gpio_polarity(struct ssb_bus *bus, u32 mask, u32 value);
+
+#endif /* LINUX_SSB_EMBEDDED_H_ */
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h
new file mode 100644
index 000000000..c0f707ac1
--- /dev/null
+++ b/include/linux/ssb/ssb_regs.h
@@ -0,0 +1,686 @@
+#ifndef LINUX_SSB_REGS_H_
+#define LINUX_SSB_REGS_H_
+
+
+/* SiliconBackplane Address Map.
+ * All regions may not exist on all chips.
+ */
+#define SSB_SDRAM_BASE 0x00000000U /* Physical SDRAM */
+#define SSB_PCI_MEM 0x08000000U /* Host Mode sb2pcitranslation0 (64 MB) */
+#define SSB_PCI_CFG 0x0c000000U /* Host Mode sb2pcitranslation1 (64 MB) */
+#define SSB_SDRAM_SWAPPED 0x10000000U /* Byteswapped Physical SDRAM */
+#define SSB_ENUM_BASE 0x18000000U /* Enumeration space base */
+#define SSB_ENUM_LIMIT 0x18010000U /* Enumeration space limit */
+
+#define SSB_FLASH2 0x1c000000U /* Flash Region 2 (region 1 shadowed here) */
+#define SSB_FLASH2_SZ 0x02000000U /* Size of Flash Region 2 */
+
+#define SSB_EXTIF_BASE 0x1f000000U /* External Interface region base address */
+#define SSB_FLASH1 0x1fc00000U /* Flash Region 1 */
+#define SSB_FLASH1_SZ 0x00400000U /* Size of Flash Region 1 */
+
+#define SSB_PCI_DMA 0x40000000U /* Client Mode sb2pcitranslation2 (1 GB) */
+#define SSB_PCI_DMA_SZ 0x40000000U /* Client Mode sb2pcitranslation2 size in bytes */
+#define SSB_PCIE_DMA_L32 0x00000000U /* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), low 32 bits */
+#define SSB_PCIE_DMA_H32 0x80000000U /* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), high 32 bits */
+#define SSB_EUART (SSB_EXTIF_BASE + 0x00800000)
+#define SSB_LED (SSB_EXTIF_BASE + 0x00900000)
+
+
+/* Enumeration space constants */
+#define SSB_CORE_SIZE 0x1000 /* Size of a core MMIO area */
+#define SSB_MAX_NR_CORES ((SSB_ENUM_LIMIT - SSB_ENUM_BASE) / SSB_CORE_SIZE)
+
+
+/* mips address */
+#define SSB_EJTAG 0xff200000 /* MIPS EJTAG space (2M) */
+
+
+/* SSB PCI config space registers. */
+#define SSB_PMCSR 0x44
+#define SSB_PE 0x100
+#define SSB_BAR0_WIN 0x80 /* Backplane address space 0 */
+#define SSB_BAR1_WIN 0x84 /* Backplane address space 1 */
+#define SSB_SPROMCTL 0x88 /* SPROM control */
+#define SSB_SPROMCTL_WE 0x10 /* SPROM write enable */
+#define SSB_BAR1_CONTROL 0x8c /* Address space 1 burst control */
+#define SSB_PCI_IRQS 0x90 /* PCI interrupts */
+#define SSB_PCI_IRQMASK 0x94 /* PCI IRQ control and mask (pcirev >= 6 only) */
+#define SSB_BACKPLANE_IRQS 0x98 /* Backplane Interrupts */
+#define SSB_GPIO_IN 0xB0 /* GPIO Input (pcirev >= 3 only) */
+#define SSB_GPIO_OUT 0xB4 /* GPIO Output (pcirev >= 3 only) */
+#define SSB_GPIO_OUT_ENABLE 0xB8 /* GPIO Output Enable/Disable (pcirev >= 3 only) */
+#define SSB_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */
+#define SSB_GPIO_HWRAD 0x20 /* PCI config space GPIO 13 for hw radio disable */
+#define SSB_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal powerup */
+#define SSB_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL powerdown */
+
+
+#define SSB_BAR0_MAX_RETRIES 50
+
+/* Silicon backplane configuration register definitions */
+#define SSB_IPSFLAG 0x0F08
+#define SSB_IPSFLAG_IRQ1 0x0000003F /* which sbflags get routed to mips interrupt 1 */
+#define SSB_IPSFLAG_IRQ1_SHIFT 0
+#define SSB_IPSFLAG_IRQ2 0x00003F00 /* which sbflags get routed to mips interrupt 2 */
+#define SSB_IPSFLAG_IRQ2_SHIFT 8
+#define SSB_IPSFLAG_IRQ3 0x003F0000 /* which sbflags get routed to mips interrupt 3 */
+#define SSB_IPSFLAG_IRQ3_SHIFT 16
+#define SSB_IPSFLAG_IRQ4 0x3F000000 /* which sbflags get routed to mips interrupt 4 */
+#define SSB_IPSFLAG_IRQ4_SHIFT 24
+#define SSB_TPSFLAG 0x0F18
+#define SSB_TPSFLAG_BPFLAG 0x0000003F /* Backplane flag # */
+#define SSB_TPSFLAG_ALWAYSIRQ 0x00000040 /* IRQ is always sent on the Backplane */
+#define SSB_TMERRLOGA 0x0F48
+#define SSB_TMERRLOG 0x0F50
+#define SSB_ADMATCH3 0x0F60
+#define SSB_ADMATCH2 0x0F68
+#define SSB_ADMATCH1 0x0F70
+#define SSB_IMSTATE 0x0F90 /* SB Initiator Agent State */
+#define SSB_IMSTATE_PC 0x0000000f /* Pipe Count */
+#define SSB_IMSTATE_AP_MASK 0x00000030 /* Arbitration Priority */
+#define SSB_IMSTATE_AP_BOTH 0x00000000 /* Use both timeslices and token */
+#define SSB_IMSTATE_AP_TS 0x00000010 /* Use timeslices only */
+#define SSB_IMSTATE_AP_TK 0x00000020 /* Use token only */
+#define SSB_IMSTATE_AP_RSV 0x00000030 /* Reserved */
+#define SSB_IMSTATE_IBE 0x00020000 /* In Band Error */
+#define SSB_IMSTATE_TO 0x00040000 /* Timeout */
+#define SSB_IMSTATE_BUSY 0x01800000 /* Busy (Backplane rev >= 2.3 only) */
+#define SSB_IMSTATE_REJECT 0x02000000 /* Reject (Backplane rev >= 2.3 only) */
+#define SSB_INTVEC 0x0F94 /* SB Interrupt Mask */
+#define SSB_INTVEC_PCI 0x00000001 /* Enable interrupts for PCI */
+#define SSB_INTVEC_ENET0 0x00000002 /* Enable interrupts for enet 0 */
+#define SSB_INTVEC_ILINE20 0x00000004 /* Enable interrupts for iline20 */
+#define SSB_INTVEC_CODEC 0x00000008 /* Enable interrupts for v90 codec */
+#define SSB_INTVEC_USB 0x00000010 /* Enable interrupts for usb */
+#define SSB_INTVEC_EXTIF 0x00000020 /* Enable interrupts for external i/f */
+#define SSB_INTVEC_ENET1 0x00000040 /* Enable interrupts for enet 1 */
+#define SSB_TMSLOW 0x0F98 /* SB Target State Low */
+#define SSB_TMSLOW_RESET 0x00000001 /* Reset */
+#define SSB_TMSLOW_REJECT 0x00000002 /* Reject (Standard Backplane) */
+#define SSB_TMSLOW_REJECT_23 0x00000004 /* Reject (Backplane rev 2.3) */
+#define SSB_TMSLOW_CLOCK 0x00010000 /* Clock Enable */
+#define SSB_TMSLOW_FGC 0x00020000 /* Force Gated Clocks On */
+#define SSB_TMSLOW_PE 0x40000000 /* Power Management Enable */
+#define SSB_TMSLOW_BE 0x80000000 /* BIST Enable */
+#define SSB_TMSHIGH 0x0F9C /* SB Target State High */
+#define SSB_TMSHIGH_SERR 0x00000001 /* S-error */
+#define SSB_TMSHIGH_INT 0x00000002 /* Interrupt */
+#define SSB_TMSHIGH_BUSY 0x00000004 /* Busy */
+#define SSB_TMSHIGH_TO 0x00000020 /* Timeout. Backplane rev >= 2.3 only */
+#define SSB_TMSHIGH_COREFL 0x1FFF0000 /* Core specific flags */
+#define SSB_TMSHIGH_COREFL_SHIFT 16
+#define SSB_TMSHIGH_DMA64 0x10000000 /* 64bit DMA supported */
+#define SSB_TMSHIGH_GCR 0x20000000 /* Gated Clock Request */
+#define SSB_TMSHIGH_BISTF 0x40000000 /* BIST Failed */
+#define SSB_TMSHIGH_BISTD 0x80000000 /* BIST Done */
+#define SSB_BWA0 0x0FA0
+#define SSB_IMCFGLO 0x0FA8
+#define SSB_IMCFGLO_SERTO 0x00000007 /* Service timeout */
+#define SSB_IMCFGLO_REQTO 0x00000070 /* Request timeout */
+#define SSB_IMCFGLO_REQTO_SHIFT 4
+#define SSB_IMCFGLO_CONNID 0x00FF0000 /* Connection ID */
+#define SSB_IMCFGLO_CONNID_SHIFT 16
+#define SSB_IMCFGHI 0x0FAC
+#define SSB_ADMATCH0 0x0FB0
+#define SSB_TMCFGLO 0x0FB8
+#define SSB_TMCFGHI 0x0FBC
+#define SSB_BCONFIG 0x0FC0
+#define SSB_BSTATE 0x0FC8
+#define SSB_ACTCFG 0x0FD8
+#define SSB_FLAGST 0x0FE8
+#define SSB_IDLOW 0x0FF8
+#define SSB_IDLOW_CFGSP 0x00000003 /* Config Space */
+#define SSB_IDLOW_ADDRNGE 0x00000038 /* Address Ranges supported */
+#define SSB_IDLOW_ADDRNGE_SHIFT 3
+#define SSB_IDLOW_SYNC 0x00000040
+#define SSB_IDLOW_INITIATOR 0x00000080
+#define SSB_IDLOW_MIBL 0x00000F00 /* Minimum Backplane latency */
+#define SSB_IDLOW_MIBL_SHIFT 8
+#define SSB_IDLOW_MABL 0x0000F000 /* Maximum Backplane latency */
+#define SSB_IDLOW_MABL_SHIFT 12
+#define SSB_IDLOW_TIF 0x00010000 /* This Initiator is first */
+#define SSB_IDLOW_CCW 0x000C0000 /* Cycle counter width */
+#define SSB_IDLOW_CCW_SHIFT 18
+#define SSB_IDLOW_TPT 0x00F00000 /* Target ports */
+#define SSB_IDLOW_TPT_SHIFT 20
+#define SSB_IDLOW_INITP 0x0F000000 /* Initiator ports */
+#define SSB_IDLOW_INITP_SHIFT 24
+#define SSB_IDLOW_SSBREV 0xF0000000 /* Sonics Backplane Revision code */
+#define SSB_IDLOW_SSBREV_22 0x00000000 /* <= 2.2 */
+#define SSB_IDLOW_SSBREV_23 0x10000000 /* 2.3 */
+#define SSB_IDLOW_SSBREV_24 0x40000000 /* ?? Found in BCM4328 */
+#define SSB_IDLOW_SSBREV_25 0x50000000 /* ?? Not Found yet */
+#define SSB_IDLOW_SSBREV_26 0x60000000 /* ?? Found in some BCM4311/2 */
+#define SSB_IDLOW_SSBREV_27 0x70000000 /* ?? Found in some BCM4311/2 */
+#define SSB_IDHIGH 0x0FFC /* SB Identification High */
+#define SSB_IDHIGH_RCLO 0x0000000F /* Revision Code (low part) */
+#define SSB_IDHIGH_CC 0x00008FF0 /* Core Code */
+#define SSB_IDHIGH_CC_SHIFT 4
+#define SSB_IDHIGH_RCHI 0x00007000 /* Revision Code (high part) */
+#define SSB_IDHIGH_RCHI_SHIFT 8 /* yes, shift 8 is right */
+#define SSB_IDHIGH_VC 0xFFFF0000 /* Vendor Code */
+#define SSB_IDHIGH_VC_SHIFT 16
+
+/* SPROM shadow area. If not otherwise noted, fields are
+ * two bytes wide. Note that the SPROM can _only_ be read
+ * in two-byte quantities.
+ */
+#define SSB_SPROMSIZE_WORDS 64
+#define SSB_SPROMSIZE_BYTES (SSB_SPROMSIZE_WORDS * sizeof(u16))
+#define SSB_SPROMSIZE_WORDS_R123 64
+#define SSB_SPROMSIZE_WORDS_R4 220
+#define SSB_SPROMSIZE_BYTES_R123 (SSB_SPROMSIZE_WORDS_R123 * sizeof(u16))
+#define SSB_SPROMSIZE_BYTES_R4 (SSB_SPROMSIZE_WORDS_R4 * sizeof(u16))
+#define SSB_SPROMSIZE_WORDS_R10 230
+#define SSB_SPROMSIZE_WORDS_R11 234
+#define SSB_SPROM_BASE1 0x1000
+#define SSB_SPROM_BASE31 0x0800
+#define SSB_SPROM_REVISION 0x007E
+#define SSB_SPROM_REVISION_REV 0x00FF /* SPROM Revision number */
+#define SSB_SPROM_REVISION_CRC 0xFF00 /* SPROM CRC8 value */
+#define SSB_SPROM_REVISION_CRC_SHIFT 8
+
+/* SPROM Revision 1 */
+#define SSB_SPROM1_SPID 0x0004 /* Subsystem Product ID for PCI */
+#define SSB_SPROM1_SVID 0x0006 /* Subsystem Vendor ID for PCI */
+#define SSB_SPROM1_PID 0x0008 /* Product ID for PCI */
+#define SSB_SPROM1_IL0MAC 0x0048 /* 6 bytes MAC address for 802.11b/g */
+#define SSB_SPROM1_ET0MAC 0x004E /* 6 bytes MAC address for Ethernet */
+#define SSB_SPROM1_ET1MAC 0x0054 /* 6 bytes MAC address for 802.11a */
+#define SSB_SPROM1_ETHPHY 0x005A /* Ethernet PHY settings */
+#define SSB_SPROM1_ETHPHY_ET0A 0x001F /* MII Address for enet0 */
+#define SSB_SPROM1_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */
+#define SSB_SPROM1_ETHPHY_ET1A_SHIFT 5
+#define SSB_SPROM1_ETHPHY_ET0M (1<<14) /* MDIO for enet0 */
+#define SSB_SPROM1_ETHPHY_ET1M (1<<15) /* MDIO for enet1 */
+#define SSB_SPROM1_BINF 0x005C /* Board info */
+#define SSB_SPROM1_BINF_BREV 0x00FF /* Board Revision */
+#define SSB_SPROM1_BINF_CCODE 0x0F00 /* Country Code */
+#define SSB_SPROM1_BINF_CCODE_SHIFT 8
+#define SSB_SPROM1_BINF_ANTBG 0x3000 /* Available B-PHY and G-PHY antennas */
+#define SSB_SPROM1_BINF_ANTBG_SHIFT 12
+#define SSB_SPROM1_BINF_ANTA 0xC000 /* Available A-PHY antennas */
+#define SSB_SPROM1_BINF_ANTA_SHIFT 14
+#define SSB_SPROM1_PA0B0 0x005E
+#define SSB_SPROM1_PA0B1 0x0060
+#define SSB_SPROM1_PA0B2 0x0062
+#define SSB_SPROM1_GPIOA 0x0064 /* General Purpose IO pins 0 and 1 */
+#define SSB_SPROM1_GPIOA_P0 0x00FF /* Pin 0 */
+#define SSB_SPROM1_GPIOA_P1 0xFF00 /* Pin 1 */
+#define SSB_SPROM1_GPIOA_P1_SHIFT 8
+#define SSB_SPROM1_GPIOB 0x0066 /* General Purpuse IO pins 2 and 3 */
+#define SSB_SPROM1_GPIOB_P2 0x00FF /* Pin 2 */
+#define SSB_SPROM1_GPIOB_P3 0xFF00 /* Pin 3 */
+#define SSB_SPROM1_GPIOB_P3_SHIFT 8
+#define SSB_SPROM1_MAXPWR 0x0068 /* Power Amplifier Max Power */
+#define SSB_SPROM1_MAXPWR_BG 0x00FF /* B-PHY and G-PHY (in dBm Q5.2) */
+#define SSB_SPROM1_MAXPWR_A 0xFF00 /* A-PHY (in dBm Q5.2) */
+#define SSB_SPROM1_MAXPWR_A_SHIFT 8
+#define SSB_SPROM1_PA1B0 0x006A
+#define SSB_SPROM1_PA1B1 0x006C
+#define SSB_SPROM1_PA1B2 0x006E
+#define SSB_SPROM1_ITSSI 0x0070 /* Idle TSSI Target */
+#define SSB_SPROM1_ITSSI_BG 0x00FF /* B-PHY and G-PHY*/
+#define SSB_SPROM1_ITSSI_A 0xFF00 /* A-PHY */
+#define SSB_SPROM1_ITSSI_A_SHIFT 8
+#define SSB_SPROM1_BFLLO 0x0072 /* Boardflags (low 16 bits) */
+#define SSB_SPROM1_AGAIN 0x0074 /* Antenna Gain (in dBm Q5.2) */
+#define SSB_SPROM1_AGAIN_BG 0x00FF /* B-PHY and G-PHY */
+#define SSB_SPROM1_AGAIN_BG_SHIFT 0
+#define SSB_SPROM1_AGAIN_A 0xFF00 /* A-PHY */
+#define SSB_SPROM1_AGAIN_A_SHIFT 8
+#define SSB_SPROM1_CCODE 0x0076
+
+/* SPROM Revision 2 (inherits from rev 1) */
+#define SSB_SPROM2_BFLHI 0x0038 /* Boardflags (high 16 bits) */
+#define SSB_SPROM2_MAXP_A 0x003A /* A-PHY Max Power */
+#define SSB_SPROM2_MAXP_A_HI 0x00FF /* Max Power High */
+#define SSB_SPROM2_MAXP_A_LO 0xFF00 /* Max Power Low */
+#define SSB_SPROM2_MAXP_A_LO_SHIFT 8
+#define SSB_SPROM2_PA1LOB0 0x003C /* A-PHY PowerAmplifier Low Settings */
+#define SSB_SPROM2_PA1LOB1 0x003E /* A-PHY PowerAmplifier Low Settings */
+#define SSB_SPROM2_PA1LOB2 0x0040 /* A-PHY PowerAmplifier Low Settings */
+#define SSB_SPROM2_PA1HIB0 0x0042 /* A-PHY PowerAmplifier High Settings */
+#define SSB_SPROM2_PA1HIB1 0x0044 /* A-PHY PowerAmplifier High Settings */
+#define SSB_SPROM2_PA1HIB2 0x0046 /* A-PHY PowerAmplifier High Settings */
+#define SSB_SPROM2_OPO 0x0078 /* OFDM Power Offset from CCK Level */
+#define SSB_SPROM2_OPO_VALUE 0x00FF
+#define SSB_SPROM2_OPO_UNUSED 0xFF00
+#define SSB_SPROM2_CCODE 0x007C /* Two char Country Code */
+
+/* SPROM Revision 3 (inherits most data from rev 2) */
+#define SSB_SPROM3_OFDMAPO 0x002C /* A-PHY OFDM Mid Power Offset (4 bytes, BigEndian) */
+#define SSB_SPROM3_OFDMALPO 0x0030 /* A-PHY OFDM Low Power Offset (4 bytes, BigEndian) */
+#define SSB_SPROM3_OFDMAHPO 0x0034 /* A-PHY OFDM High Power Offset (4 bytes, BigEndian) */
+#define SSB_SPROM3_GPIOLDC 0x0042 /* GPIO LED Powersave Duty Cycle (4 bytes, BigEndian) */
+#define SSB_SPROM3_GPIOLDC_OFF 0x0000FF00 /* Off Count */
+#define SSB_SPROM3_GPIOLDC_OFF_SHIFT 8
+#define SSB_SPROM3_GPIOLDC_ON 0x00FF0000 /* On Count */
+#define SSB_SPROM3_GPIOLDC_ON_SHIFT 16
+#define SSB_SPROM3_IL0MAC 0x004A /* 6 bytes MAC address for 802.11b/g */
+#define SSB_SPROM3_CCKPO 0x0078 /* CCK Power Offset */
+#define SSB_SPROM3_CCKPO_1M 0x000F /* 1M Rate PO */
+#define SSB_SPROM3_CCKPO_2M 0x00F0 /* 2M Rate PO */
+#define SSB_SPROM3_CCKPO_2M_SHIFT 4
+#define SSB_SPROM3_CCKPO_55M 0x0F00 /* 5.5M Rate PO */
+#define SSB_SPROM3_CCKPO_55M_SHIFT 8
+#define SSB_SPROM3_CCKPO_11M 0xF000 /* 11M Rate PO */
+#define SSB_SPROM3_CCKPO_11M_SHIFT 12
+#define SSB_SPROM3_OFDMGPO 0x107A /* G-PHY OFDM Power Offset (4 bytes, BigEndian) */
+
+/* SPROM Revision 4 */
+#define SSB_SPROM4_BOARDREV 0x0042 /* Board revision */
+#define SSB_SPROM4_BFLLO 0x0044 /* Boardflags (low 16 bits) */
+#define SSB_SPROM4_BFLHI 0x0046 /* Board Flags Hi */
+#define SSB_SPROM4_BFL2LO 0x0048 /* Board flags 2 (low 16 bits) */
+#define SSB_SPROM4_BFL2HI 0x004A /* Board flags 2 Hi */
+#define SSB_SPROM4_IL0MAC 0x004C /* 6 byte MAC address for a/b/g/n */
+#define SSB_SPROM4_CCODE 0x0052 /* Country Code (2 bytes) */
+#define SSB_SPROM4_GPIOA 0x0056 /* Gen. Purpose IO # 0 and 1 */
+#define SSB_SPROM4_GPIOA_P0 0x00FF /* Pin 0 */
+#define SSB_SPROM4_GPIOA_P1 0xFF00 /* Pin 1 */
+#define SSB_SPROM4_GPIOA_P1_SHIFT 8
+#define SSB_SPROM4_GPIOB 0x0058 /* Gen. Purpose IO # 2 and 3 */
+#define SSB_SPROM4_GPIOB_P2 0x00FF /* Pin 2 */
+#define SSB_SPROM4_GPIOB_P3 0xFF00 /* Pin 3 */
+#define SSB_SPROM4_GPIOB_P3_SHIFT 8
+#define SSB_SPROM4_ETHPHY 0x005A /* Ethernet PHY settings ?? */
+#define SSB_SPROM4_ETHPHY_ET0A 0x001F /* MII Address for enet0 */
+#define SSB_SPROM4_ETHPHY_ET1A 0x03E0 /* MII Address for enet1 */
+#define SSB_SPROM4_ETHPHY_ET1A_SHIFT 5
+#define SSB_SPROM4_ETHPHY_ET0M (1<<14) /* MDIO for enet0 */
+#define SSB_SPROM4_ETHPHY_ET1M (1<<15) /* MDIO for enet1 */
+#define SSB_SPROM4_ANTAVAIL 0x005C /* Antenna available bitfields */
+#define SSB_SPROM4_ANTAVAIL_BG 0x00FF /* B-PHY and G-PHY bitfield */
+#define SSB_SPROM4_ANTAVAIL_BG_SHIFT 0
+#define SSB_SPROM4_ANTAVAIL_A 0xFF00 /* A-PHY bitfield */
+#define SSB_SPROM4_ANTAVAIL_A_SHIFT 8
+#define SSB_SPROM4_AGAIN01 0x005E /* Antenna Gain (in dBm Q5.2) */
+#define SSB_SPROM4_AGAIN0 0x00FF /* Antenna 0 */
+#define SSB_SPROM4_AGAIN0_SHIFT 0
+#define SSB_SPROM4_AGAIN1 0xFF00 /* Antenna 1 */
+#define SSB_SPROM4_AGAIN1_SHIFT 8
+#define SSB_SPROM4_AGAIN23 0x0060
+#define SSB_SPROM4_AGAIN2 0x00FF /* Antenna 2 */
+#define SSB_SPROM4_AGAIN2_SHIFT 0
+#define SSB_SPROM4_AGAIN3 0xFF00 /* Antenna 3 */
+#define SSB_SPROM4_AGAIN3_SHIFT 8
+#define SSB_SPROM4_TXPID2G01 0x0062 /* TX Power Index 2GHz */
+#define SSB_SPROM4_TXPID2G0 0x00FF
+#define SSB_SPROM4_TXPID2G0_SHIFT 0
+#define SSB_SPROM4_TXPID2G1 0xFF00
+#define SSB_SPROM4_TXPID2G1_SHIFT 8
+#define SSB_SPROM4_TXPID2G23 0x0064 /* TX Power Index 2GHz */
+#define SSB_SPROM4_TXPID2G2 0x00FF
+#define SSB_SPROM4_TXPID2G2_SHIFT 0
+#define SSB_SPROM4_TXPID2G3 0xFF00
+#define SSB_SPROM4_TXPID2G3_SHIFT 8
+#define SSB_SPROM4_TXPID5G01 0x0066 /* TX Power Index 5GHz middle subband */
+#define SSB_SPROM4_TXPID5G0 0x00FF
+#define SSB_SPROM4_TXPID5G0_SHIFT 0
+#define SSB_SPROM4_TXPID5G1 0xFF00
+#define SSB_SPROM4_TXPID5G1_SHIFT 8
+#define SSB_SPROM4_TXPID5G23 0x0068 /* TX Power Index 5GHz middle subband */
+#define SSB_SPROM4_TXPID5G2 0x00FF
+#define SSB_SPROM4_TXPID5G2_SHIFT 0
+#define SSB_SPROM4_TXPID5G3 0xFF00
+#define SSB_SPROM4_TXPID5G3_SHIFT 8
+#define SSB_SPROM4_TXPID5GL01 0x006A /* TX Power Index 5GHz low subband */
+#define SSB_SPROM4_TXPID5GL0 0x00FF
+#define SSB_SPROM4_TXPID5GL0_SHIFT 0
+#define SSB_SPROM4_TXPID5GL1 0xFF00
+#define SSB_SPROM4_TXPID5GL1_SHIFT 8
+#define SSB_SPROM4_TXPID5GL23 0x006C /* TX Power Index 5GHz low subband */
+#define SSB_SPROM4_TXPID5GL2 0x00FF
+#define SSB_SPROM4_TXPID5GL2_SHIFT 0
+#define SSB_SPROM4_TXPID5GL3 0xFF00
+#define SSB_SPROM4_TXPID5GL3_SHIFT 8
+#define SSB_SPROM4_TXPID5GH01 0x006E /* TX Power Index 5GHz high subband */
+#define SSB_SPROM4_TXPID5GH0 0x00FF
+#define SSB_SPROM4_TXPID5GH0_SHIFT 0
+#define SSB_SPROM4_TXPID5GH1 0xFF00
+#define SSB_SPROM4_TXPID5GH1_SHIFT 8
+#define SSB_SPROM4_TXPID5GH23 0x0070 /* TX Power Index 5GHz high subband */
+#define SSB_SPROM4_TXPID5GH2 0x00FF
+#define SSB_SPROM4_TXPID5GH2_SHIFT 0
+#define SSB_SPROM4_TXPID5GH3 0xFF00
+#define SSB_SPROM4_TXPID5GH3_SHIFT 8
+
+/* There are 4 blocks with power info sharing the same layout */
+#define SSB_SPROM4_PWR_INFO_CORE0 0x0080
+#define SSB_SPROM4_PWR_INFO_CORE1 0x00AE
+#define SSB_SPROM4_PWR_INFO_CORE2 0x00DC
+#define SSB_SPROM4_PWR_INFO_CORE3 0x010A
+
+#define SSB_SPROM4_2G_MAXP_ITSSI 0x00 /* 2 GHz ITSSI and 2 GHz Max Power */
+#define SSB_SPROM4_2G_MAXP 0x00FF
+#define SSB_SPROM4_2G_ITSSI 0xFF00
+#define SSB_SPROM4_2G_ITSSI_SHIFT 8
+#define SSB_SPROM4_2G_PA_0 0x02 /* 2 GHz power amp */
+#define SSB_SPROM4_2G_PA_1 0x04
+#define SSB_SPROM4_2G_PA_2 0x06
+#define SSB_SPROM4_2G_PA_3 0x08
+#define SSB_SPROM4_5G_MAXP_ITSSI 0x0A /* 5 GHz ITSSI and 5.3 GHz Max Power */
+#define SSB_SPROM4_5G_MAXP 0x00FF
+#define SSB_SPROM4_5G_ITSSI 0xFF00
+#define SSB_SPROM4_5G_ITSSI_SHIFT 8
+#define SSB_SPROM4_5GHL_MAXP 0x0C /* 5.2 GHz and 5.8 GHz Max Power */
+#define SSB_SPROM4_5GH_MAXP 0x00FF
+#define SSB_SPROM4_5GL_MAXP 0xFF00
+#define SSB_SPROM4_5GL_MAXP_SHIFT 8
+#define SSB_SPROM4_5G_PA_0 0x0E /* 5.3 GHz power amp */
+#define SSB_SPROM4_5G_PA_1 0x10
+#define SSB_SPROM4_5G_PA_2 0x12
+#define SSB_SPROM4_5G_PA_3 0x14
+#define SSB_SPROM4_5GL_PA_0 0x16 /* 5.2 GHz power amp */
+#define SSB_SPROM4_5GL_PA_1 0x18
+#define SSB_SPROM4_5GL_PA_2 0x1A
+#define SSB_SPROM4_5GL_PA_3 0x1C
+#define SSB_SPROM4_5GH_PA_0 0x1E /* 5.8 GHz power amp */
+#define SSB_SPROM4_5GH_PA_1 0x20
+#define SSB_SPROM4_5GH_PA_2 0x22
+#define SSB_SPROM4_5GH_PA_3 0x24
+
+/* TODO: Make it deprecated */
+#define SSB_SPROM4_MAXP_BG 0x0080 /* Max Power BG in path 1 */
+#define SSB_SPROM4_MAXP_BG_MASK 0x00FF /* Mask for Max Power BG */
+#define SSB_SPROM4_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */
+#define SSB_SPROM4_ITSSI_BG_SHIFT 8
+#define SSB_SPROM4_MAXP_A 0x008A /* Max Power A in path 1 */
+#define SSB_SPROM4_MAXP_A_MASK 0x00FF /* Mask for Max Power A */
+#define SSB_SPROM4_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */
+#define SSB_SPROM4_ITSSI_A_SHIFT 8
+#define SSB_SPROM4_PA0B0 0x0082 /* The paXbY locations are */
+#define SSB_SPROM4_PA0B1 0x0084 /* only guesses */
+#define SSB_SPROM4_PA0B2 0x0086
+#define SSB_SPROM4_PA1B0 0x008E
+#define SSB_SPROM4_PA1B1 0x0090
+#define SSB_SPROM4_PA1B2 0x0092
+
+/* SPROM Revision 5 (inherits most data from rev 4) */
+#define SSB_SPROM5_CCODE 0x0044 /* Country Code (2 bytes) */
+#define SSB_SPROM5_BFLLO 0x004A /* Boardflags (low 16 bits) */
+#define SSB_SPROM5_BFLHI 0x004C /* Board Flags Hi */
+#define SSB_SPROM5_BFL2LO 0x004E /* Board flags 2 (low 16 bits) */
+#define SSB_SPROM5_BFL2HI 0x0050 /* Board flags 2 Hi */
+#define SSB_SPROM5_IL0MAC 0x0052 /* 6 byte MAC address for a/b/g/n */
+#define SSB_SPROM5_GPIOA 0x0076 /* Gen. Purpose IO # 0 and 1 */
+#define SSB_SPROM5_GPIOA_P0 0x00FF /* Pin 0 */
+#define SSB_SPROM5_GPIOA_P1 0xFF00 /* Pin 1 */
+#define SSB_SPROM5_GPIOA_P1_SHIFT 8
+#define SSB_SPROM5_GPIOB 0x0078 /* Gen. Purpose IO # 2 and 3 */
+#define SSB_SPROM5_GPIOB_P2 0x00FF /* Pin 2 */
+#define SSB_SPROM5_GPIOB_P3 0xFF00 /* Pin 3 */
+#define SSB_SPROM5_GPIOB_P3_SHIFT 8
+
+/* SPROM Revision 8 */
+#define SSB_SPROM8_BOARDREV 0x0082 /* Board revision */
+#define SSB_SPROM8_BFLLO 0x0084 /* Board flags (bits 0-15) */
+#define SSB_SPROM8_BFLHI 0x0086 /* Board flags (bits 16-31) */
+#define SSB_SPROM8_BFL2LO 0x0088 /* Board flags (bits 32-47) */
+#define SSB_SPROM8_BFL2HI 0x008A /* Board flags (bits 48-63) */
+#define SSB_SPROM8_IL0MAC 0x008C /* 6 byte MAC address */
+#define SSB_SPROM8_CCODE 0x0092 /* 2 byte country code */
+#define SSB_SPROM8_GPIOA 0x0096 /*Gen. Purpose IO # 0 and 1 */
+#define SSB_SPROM8_GPIOA_P0 0x00FF /* Pin 0 */
+#define SSB_SPROM8_GPIOA_P1 0xFF00 /* Pin 1 */
+#define SSB_SPROM8_GPIOA_P1_SHIFT 8
+#define SSB_SPROM8_GPIOB 0x0098 /* Gen. Purpose IO # 2 and 3 */
+#define SSB_SPROM8_GPIOB_P2 0x00FF /* Pin 2 */
+#define SSB_SPROM8_GPIOB_P3 0xFF00 /* Pin 3 */
+#define SSB_SPROM8_GPIOB_P3_SHIFT 8
+#define SSB_SPROM8_LEDDC 0x009A
+#define SSB_SPROM8_LEDDC_ON 0xFF00 /* oncount */
+#define SSB_SPROM8_LEDDC_ON_SHIFT 8
+#define SSB_SPROM8_LEDDC_OFF 0x00FF /* offcount */
+#define SSB_SPROM8_LEDDC_OFF_SHIFT 0
+#define SSB_SPROM8_ANTAVAIL 0x009C /* Antenna available bitfields*/
+#define SSB_SPROM8_ANTAVAIL_A 0xFF00 /* A-PHY bitfield */
+#define SSB_SPROM8_ANTAVAIL_A_SHIFT 8
+#define SSB_SPROM8_ANTAVAIL_BG 0x00FF /* B-PHY and G-PHY bitfield */
+#define SSB_SPROM8_ANTAVAIL_BG_SHIFT 0
+#define SSB_SPROM8_AGAIN01 0x009E /* Antenna Gain (in dBm Q5.2) */
+#define SSB_SPROM8_AGAIN0 0x00FF /* Antenna 0 */
+#define SSB_SPROM8_AGAIN0_SHIFT 0
+#define SSB_SPROM8_AGAIN1 0xFF00 /* Antenna 1 */
+#define SSB_SPROM8_AGAIN1_SHIFT 8
+#define SSB_SPROM8_AGAIN23 0x00A0
+#define SSB_SPROM8_AGAIN2 0x00FF /* Antenna 2 */
+#define SSB_SPROM8_AGAIN2_SHIFT 0
+#define SSB_SPROM8_AGAIN3 0xFF00 /* Antenna 3 */
+#define SSB_SPROM8_AGAIN3_SHIFT 8
+#define SSB_SPROM8_TXRXC 0x00A2
+#define SSB_SPROM8_TXRXC_TXCHAIN 0x000f
+#define SSB_SPROM8_TXRXC_TXCHAIN_SHIFT 0
+#define SSB_SPROM8_TXRXC_RXCHAIN 0x00f0
+#define SSB_SPROM8_TXRXC_RXCHAIN_SHIFT 4
+#define SSB_SPROM8_TXRXC_SWITCH 0xff00
+#define SSB_SPROM8_TXRXC_SWITCH_SHIFT 8
+#define SSB_SPROM8_RSSIPARM2G 0x00A4 /* RSSI params for 2GHz */
+#define SSB_SPROM8_RSSISMF2G 0x000F
+#define SSB_SPROM8_RSSISMC2G 0x00F0
+#define SSB_SPROM8_RSSISMC2G_SHIFT 4
+#define SSB_SPROM8_RSSISAV2G 0x0700
+#define SSB_SPROM8_RSSISAV2G_SHIFT 8
+#define SSB_SPROM8_BXA2G 0x1800
+#define SSB_SPROM8_BXA2G_SHIFT 11
+#define SSB_SPROM8_RSSIPARM5G 0x00A6 /* RSSI params for 5GHz */
+#define SSB_SPROM8_RSSISMF5G 0x000F
+#define SSB_SPROM8_RSSISMC5G 0x00F0
+#define SSB_SPROM8_RSSISMC5G_SHIFT 4
+#define SSB_SPROM8_RSSISAV5G 0x0700
+#define SSB_SPROM8_RSSISAV5G_SHIFT 8
+#define SSB_SPROM8_BXA5G 0x1800
+#define SSB_SPROM8_BXA5G_SHIFT 11
+#define SSB_SPROM8_TRI25G 0x00A8 /* TX isolation 2.4&5.3GHz */
+#define SSB_SPROM8_TRI2G 0x00FF /* TX isolation 2.4GHz */
+#define SSB_SPROM8_TRI5G 0xFF00 /* TX isolation 5.3GHz */
+#define SSB_SPROM8_TRI5G_SHIFT 8
+#define SSB_SPROM8_TRI5GHL 0x00AA /* TX isolation 5.2/5.8GHz */
+#define SSB_SPROM8_TRI5GL 0x00FF /* TX isolation 5.2GHz */
+#define SSB_SPROM8_TRI5GH 0xFF00 /* TX isolation 5.8GHz */
+#define SSB_SPROM8_TRI5GH_SHIFT 8
+#define SSB_SPROM8_RXPO 0x00AC /* RX power offsets */
+#define SSB_SPROM8_RXPO2G 0x00FF /* 2GHz RX power offset */
+#define SSB_SPROM8_RXPO2G_SHIFT 0
+#define SSB_SPROM8_RXPO5G 0xFF00 /* 5GHz RX power offset */
+#define SSB_SPROM8_RXPO5G_SHIFT 8
+#define SSB_SPROM8_FEM2G 0x00AE
+#define SSB_SPROM8_FEM5G 0x00B0
+#define SSB_SROM8_FEM_TSSIPOS 0x0001
+#define SSB_SROM8_FEM_TSSIPOS_SHIFT 0
+#define SSB_SROM8_FEM_EXTPA_GAIN 0x0006
+#define SSB_SROM8_FEM_EXTPA_GAIN_SHIFT 1
+#define SSB_SROM8_FEM_PDET_RANGE 0x00F8
+#define SSB_SROM8_FEM_PDET_RANGE_SHIFT 3
+#define SSB_SROM8_FEM_TR_ISO 0x0700
+#define SSB_SROM8_FEM_TR_ISO_SHIFT 8
+#define SSB_SROM8_FEM_ANTSWLUT 0xF800
+#define SSB_SROM8_FEM_ANTSWLUT_SHIFT 11
+#define SSB_SPROM8_THERMAL 0x00B2
+#define SSB_SPROM8_THERMAL_OFFSET 0x00ff
+#define SSB_SPROM8_THERMAL_OFFSET_SHIFT 0
+#define SSB_SPROM8_THERMAL_TRESH 0xff00
+#define SSB_SPROM8_THERMAL_TRESH_SHIFT 8
+/* Temp sense related entries */
+#define SSB_SPROM8_RAWTS 0x00B4
+#define SSB_SPROM8_RAWTS_RAWTEMP 0x01ff
+#define SSB_SPROM8_RAWTS_RAWTEMP_SHIFT 0
+#define SSB_SPROM8_RAWTS_MEASPOWER 0xfe00
+#define SSB_SPROM8_RAWTS_MEASPOWER_SHIFT 9
+#define SSB_SPROM8_OPT_CORRX 0x00B6
+#define SSB_SPROM8_OPT_CORRX_TEMP_SLOPE 0x00ff
+#define SSB_SPROM8_OPT_CORRX_TEMP_SLOPE_SHIFT 0
+#define SSB_SPROM8_OPT_CORRX_TEMPCORRX 0xfc00
+#define SSB_SPROM8_OPT_CORRX_TEMPCORRX_SHIFT 10
+#define SSB_SPROM8_OPT_CORRX_TEMP_OPTION 0x0300
+#define SSB_SPROM8_OPT_CORRX_TEMP_OPTION_SHIFT 8
+/* FOC: freiquency offset correction, HWIQ: H/W IOCAL enable, IQSWP: IQ CAL swap disable */
+#define SSB_SPROM8_HWIQ_IQSWP 0x00B8
+#define SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR 0x000f
+#define SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR_SHIFT 0
+#define SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP 0x0010
+#define SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP_SHIFT 4
+#define SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL 0x0020
+#define SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL_SHIFT 5
+#define SSB_SPROM8_TEMPDELTA 0x00BC
+#define SSB_SPROM8_TEMPDELTA_PHYCAL 0x00ff
+#define SSB_SPROM8_TEMPDELTA_PHYCAL_SHIFT 0
+#define SSB_SPROM8_TEMPDELTA_PERIOD 0x0f00
+#define SSB_SPROM8_TEMPDELTA_PERIOD_SHIFT 8
+#define SSB_SPROM8_TEMPDELTA_HYSTERESIS 0xf000
+#define SSB_SPROM8_TEMPDELTA_HYSTERESIS_SHIFT 12
+
+/* There are 4 blocks with power info sharing the same layout */
+#define SSB_SROM8_PWR_INFO_CORE0 0x00C0
+#define SSB_SROM8_PWR_INFO_CORE1 0x00E0
+#define SSB_SROM8_PWR_INFO_CORE2 0x0100
+#define SSB_SROM8_PWR_INFO_CORE3 0x0120
+
+#define SSB_SROM8_2G_MAXP_ITSSI 0x00
+#define SSB_SPROM8_2G_MAXP 0x00FF
+#define SSB_SPROM8_2G_ITSSI 0xFF00
+#define SSB_SPROM8_2G_ITSSI_SHIFT 8
+#define SSB_SROM8_2G_PA_0 0x02 /* 2GHz power amp settings */
+#define SSB_SROM8_2G_PA_1 0x04
+#define SSB_SROM8_2G_PA_2 0x06
+#define SSB_SROM8_5G_MAXP_ITSSI 0x08 /* 5GHz ITSSI and 5.3GHz Max Power */
+#define SSB_SPROM8_5G_MAXP 0x00FF
+#define SSB_SPROM8_5G_ITSSI 0xFF00
+#define SSB_SPROM8_5G_ITSSI_SHIFT 8
+#define SSB_SPROM8_5GHL_MAXP 0x0A /* 5.2GHz and 5.8GHz Max Power */
+#define SSB_SPROM8_5GH_MAXP 0x00FF
+#define SSB_SPROM8_5GL_MAXP 0xFF00
+#define SSB_SPROM8_5GL_MAXP_SHIFT 8
+#define SSB_SROM8_5G_PA_0 0x0C /* 5.3GHz power amp settings */
+#define SSB_SROM8_5G_PA_1 0x0E
+#define SSB_SROM8_5G_PA_2 0x10
+#define SSB_SROM8_5GL_PA_0 0x12 /* 5.2GHz power amp settings */
+#define SSB_SROM8_5GL_PA_1 0x14
+#define SSB_SROM8_5GL_PA_2 0x16
+#define SSB_SROM8_5GH_PA_0 0x18 /* 5.8GHz power amp settings */
+#define SSB_SROM8_5GH_PA_1 0x1A
+#define SSB_SROM8_5GH_PA_2 0x1C
+
+/* TODO: Make it deprecated */
+#define SSB_SPROM8_MAXP_BG 0x00C0 /* Max Power 2GHz in path 1 */
+#define SSB_SPROM8_MAXP_BG_MASK 0x00FF /* Mask for Max Power 2GHz */
+#define SSB_SPROM8_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */
+#define SSB_SPROM8_ITSSI_BG_SHIFT 8
+#define SSB_SPROM8_PA0B0 0x00C2 /* 2GHz power amp settings */
+#define SSB_SPROM8_PA0B1 0x00C4
+#define SSB_SPROM8_PA0B2 0x00C6
+#define SSB_SPROM8_MAXP_A 0x00C8 /* Max Power 5.3GHz */
+#define SSB_SPROM8_MAXP_A_MASK 0x00FF /* Mask for Max Power 5.3GHz */
+#define SSB_SPROM8_ITSSI_A 0xFF00 /* Mask for path 1 itssi_a */
+#define SSB_SPROM8_ITSSI_A_SHIFT 8
+#define SSB_SPROM8_MAXP_AHL 0x00CA /* Max Power 5.2/5.8GHz */
+#define SSB_SPROM8_MAXP_AH_MASK 0x00FF /* Mask for Max Power 5.8GHz */
+#define SSB_SPROM8_MAXP_AL_MASK 0xFF00 /* Mask for Max Power 5.2GHz */
+#define SSB_SPROM8_MAXP_AL_SHIFT 8
+#define SSB_SPROM8_PA1B0 0x00CC /* 5.3GHz power amp settings */
+#define SSB_SPROM8_PA1B1 0x00CE
+#define SSB_SPROM8_PA1B2 0x00D0
+#define SSB_SPROM8_PA1LOB0 0x00D2 /* 5.2GHz power amp settings */
+#define SSB_SPROM8_PA1LOB1 0x00D4
+#define SSB_SPROM8_PA1LOB2 0x00D6
+#define SSB_SPROM8_PA1HIB0 0x00D8 /* 5.8GHz power amp settings */
+#define SSB_SPROM8_PA1HIB1 0x00DA
+#define SSB_SPROM8_PA1HIB2 0x00DC
+
+#define SSB_SPROM8_CCK2GPO 0x0140 /* CCK power offset */
+#define SSB_SPROM8_OFDM2GPO 0x0142 /* 2.4GHz OFDM power offset */
+#define SSB_SPROM8_OFDM5GPO 0x0146 /* 5.3GHz OFDM power offset */
+#define SSB_SPROM8_OFDM5GLPO 0x014A /* 5.2GHz OFDM power offset */
+#define SSB_SPROM8_OFDM5GHPO 0x014E /* 5.8GHz OFDM power offset */
+
+#define SSB_SPROM8_2G_MCSPO 0x0152
+#define SSB_SPROM8_5G_MCSPO 0x0162
+#define SSB_SPROM8_5GL_MCSPO 0x0172
+#define SSB_SPROM8_5GH_MCSPO 0x0182
+
+#define SSB_SPROM8_CDDPO 0x0192
+#define SSB_SPROM8_STBCPO 0x0194
+#define SSB_SPROM8_BW40PO 0x0196
+#define SSB_SPROM8_BWDUPPO 0x0198
+
+/* Values for boardflags_lo read from SPROM */
+#define SSB_BFL_BTCOEXIST 0x0001 /* implements Bluetooth coexistance */
+#define SSB_BFL_PACTRL 0x0002 /* GPIO 9 controlling the PA */
+#define SSB_BFL_AIRLINEMODE 0x0004 /* implements GPIO 13 radio disable indication */
+#define SSB_BFL_RSSI 0x0008 /* software calculates nrssi slope. */
+#define SSB_BFL_ENETSPI 0x0010 /* has ephy roboswitch spi */
+#define SSB_BFL_XTAL_NOSLOW 0x0020 /* no slow clock available */
+#define SSB_BFL_CCKHIPWR 0x0040 /* can do high power CCK transmission */
+#define SSB_BFL_ENETADM 0x0080 /* has ADMtek switch */
+#define SSB_BFL_ENETVLAN 0x0100 /* can do vlan */
+#define SSB_BFL_AFTERBURNER 0x0200 /* supports Afterburner mode */
+#define SSB_BFL_NOPCI 0x0400 /* board leaves PCI floating */
+#define SSB_BFL_FEM 0x0800 /* supports the Front End Module */
+#define SSB_BFL_EXTLNA 0x1000 /* has an external LNA */
+#define SSB_BFL_HGPA 0x2000 /* had high gain PA */
+#define SSB_BFL_BTCMOD 0x4000 /* BFL_BTCOEXIST is given in alternate GPIOs */
+#define SSB_BFL_ALTIQ 0x8000 /* alternate I/Q settings */
+
+/* Values for boardflags_hi read from SPROM */
+#define SSB_BFH_NOPA 0x0001 /* has no PA */
+#define SSB_BFH_RSSIINV 0x0002 /* RSSI uses positive slope (not TSSI) */
+#define SSB_BFH_PAREF 0x0004 /* uses the PARef LDO */
+#define SSB_BFH_3TSWITCH 0x0008 /* uses a triple throw switch shared with bluetooth */
+#define SSB_BFH_PHASESHIFT 0x0010 /* can support phase shifter */
+#define SSB_BFH_BUCKBOOST 0x0020 /* has buck/booster */
+#define SSB_BFH_FEM_BT 0x0040 /* has FEM and switch to share antenna with bluetooth */
+
+/* Values for boardflags2_lo read from SPROM */
+#define SSB_BFL2_RXBB_INT_REG_DIS 0x0001 /* external RX BB regulator present */
+#define SSB_BFL2_APLL_WAR 0x0002 /* alternative A-band PLL settings implemented */
+#define SSB_BFL2_TXPWRCTRL_EN 0x0004 /* permits enabling TX Power Control */
+#define SSB_BFL2_2X4_DIV 0x0008 /* 2x4 diversity switch */
+#define SSB_BFL2_5G_PWRGAIN 0x0010 /* supports 5G band power gain */
+#define SSB_BFL2_PCIEWAR_OVR 0x0020 /* overrides ASPM and Clkreq settings */
+#define SSB_BFL2_CAESERS_BRD 0x0040 /* is Caesers board (unused) */
+#define SSB_BFL2_BTC3WIRE 0x0080 /* used 3-wire bluetooth coexist */
+#define SSB_BFL2_SKWRKFEM_BRD 0x0100 /* 4321mcm93 uses Skyworks FEM */
+#define SSB_BFL2_SPUR_WAR 0x0200 /* has a workaround for clock-harmonic spurs */
+#define SSB_BFL2_GPLL_WAR 0x0400 /* altenative G-band PLL settings implemented */
+
+/* Values for SSB_SPROM1_BINF_CCODE */
+enum {
+ SSB_SPROM1CCODE_WORLD = 0,
+ SSB_SPROM1CCODE_THAILAND,
+ SSB_SPROM1CCODE_ISRAEL,
+ SSB_SPROM1CCODE_JORDAN,
+ SSB_SPROM1CCODE_CHINA,
+ SSB_SPROM1CCODE_JAPAN,
+ SSB_SPROM1CCODE_USA_CANADA_ANZ,
+ SSB_SPROM1CCODE_EUROPE,
+ SSB_SPROM1CCODE_USA_LOW,
+ SSB_SPROM1CCODE_JAPAN_HIGH,
+ SSB_SPROM1CCODE_ALL,
+ SSB_SPROM1CCODE_NONE,
+};
+
+/* Address-Match values and masks (SSB_ADMATCHxxx) */
+#define SSB_ADM_TYPE 0x00000003 /* Address type */
+#define SSB_ADM_TYPE0 0
+#define SSB_ADM_TYPE1 1
+#define SSB_ADM_TYPE2 2
+#define SSB_ADM_AD64 0x00000004
+#define SSB_ADM_SZ0 0x000000F8 /* Type0 size */
+#define SSB_ADM_SZ0_SHIFT 3
+#define SSB_ADM_SZ1 0x000001F8 /* Type1 size */
+#define SSB_ADM_SZ1_SHIFT 3
+#define SSB_ADM_SZ2 0x000001F8 /* Type2 size */
+#define SSB_ADM_SZ2_SHIFT 3
+#define SSB_ADM_EN 0x00000400 /* Enable */
+#define SSB_ADM_NEG 0x00000800 /* Negative decode */
+#define SSB_ADM_BASE0 0xFFFFFF00 /* Type0 base address */
+#define SSB_ADM_BASE0_SHIFT 8
+#define SSB_ADM_BASE1 0xFFFFF000 /* Type1 base address for the core */
+#define SSB_ADM_BASE1_SHIFT 12
+#define SSB_ADM_BASE2 0xFFFF0000 /* Type2 base address for the core */
+#define SSB_ADM_BASE2_SHIFT 16
+
+
+#endif /* LINUX_SSB_REGS_H_ */
diff --git a/include/linux/ssbi.h b/include/linux/ssbi.h
new file mode 100644
index 000000000..087b08a4d
--- /dev/null
+++ b/include/linux/ssbi.h
@@ -0,0 +1,43 @@
+/* Copyright (C) 2010 Google, Inc.
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ * Author: Dima Zavin <dima@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_SSBI_H
+#define _LINUX_SSBI_H
+
+#include <linux/types.h>
+
+int ssbi_write(struct device *dev, u16 addr, const u8 *buf, int len);
+int ssbi_read(struct device *dev, u16 addr, u8 *buf, int len);
+
+static inline int
+ssbi_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ int ret;
+ u8 v;
+
+ ret = ssbi_read(context, reg, &v, 1);
+ if (!ret)
+ *val = v;
+
+ return ret;
+}
+
+static inline int
+ssbi_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ u8 v = val;
+ return ssbi_write(context, reg, &v, 1);
+}
+
+#endif
diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h
new file mode 100644
index 000000000..6f3e54c70
--- /dev/null
+++ b/include/linux/stackprotector.h
@@ -0,0 +1,16 @@
+#ifndef _LINUX_STACKPROTECTOR_H
+#define _LINUX_STACKPROTECTOR_H 1
+
+#include <linux/compiler.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+# include <asm/stackprotector.h>
+#else
+static inline void boot_init_stack_canary(void)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
new file mode 100644
index 000000000..0a34489a4
--- /dev/null
+++ b/include/linux/stacktrace.h
@@ -0,0 +1,40 @@
+#ifndef __LINUX_STACKTRACE_H
+#define __LINUX_STACKTRACE_H
+
+#include <linux/types.h>
+
+struct task_struct;
+struct pt_regs;
+
+#ifdef CONFIG_STACKTRACE
+struct stack_trace {
+ unsigned int nr_entries, max_entries;
+ unsigned long *entries;
+ int skip; /* input argument: How many entries to skip */
+};
+
+extern void save_stack_trace(struct stack_trace *trace);
+extern void save_stack_trace_regs(struct pt_regs *regs,
+ struct stack_trace *trace);
+extern void save_stack_trace_tsk(struct task_struct *tsk,
+ struct stack_trace *trace);
+
+extern void print_stack_trace(struct stack_trace *trace, int spaces);
+extern int snprint_stack_trace(char *buf, size_t size,
+ struct stack_trace *trace, int spaces);
+
+#ifdef CONFIG_USER_STACKTRACE_SUPPORT
+extern void save_stack_trace_user(struct stack_trace *trace);
+#else
+# define save_stack_trace_user(trace) do { } while (0)
+#endif
+
+#else
+# define save_stack_trace(trace) do { } while (0)
+# define save_stack_trace_tsk(tsk, trace) do { } while (0)
+# define save_stack_trace_user(trace) do { } while (0)
+# define print_stack_trace(trace, spaces) do { } while (0)
+# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0)
+#endif
+
+#endif
diff --git a/include/linux/start_kernel.h b/include/linux/start_kernel.h
new file mode 100644
index 000000000..d3e5f2756
--- /dev/null
+++ b/include/linux/start_kernel.h
@@ -0,0 +1,12 @@
+#ifndef _LINUX_START_KERNEL_H
+#define _LINUX_START_KERNEL_H
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+/* Define the prototype for start_kernel here, rather than cluttering
+ up something else. */
+
+extern asmlinkage void __init start_kernel(void);
+
+#endif /* _LINUX_START_KERNEL_H */
diff --git a/include/linux/stat.h b/include/linux/stat.h
new file mode 100644
index 000000000..075cb0c7e
--- /dev/null
+++ b/include/linux/stat.h
@@ -0,0 +1,37 @@
+#ifndef _LINUX_STAT_H
+#define _LINUX_STAT_H
+
+
+#include <asm/stat.h>
+#include <uapi/linux/stat.h>
+
+#define S_IRWXUGO (S_IRWXU|S_IRWXG|S_IRWXO)
+#define S_IALLUGO (S_ISUID|S_ISGID|S_ISVTX|S_IRWXUGO)
+#define S_IRUGO (S_IRUSR|S_IRGRP|S_IROTH)
+#define S_IWUGO (S_IWUSR|S_IWGRP|S_IWOTH)
+#define S_IXUGO (S_IXUSR|S_IXGRP|S_IXOTH)
+
+#define UTIME_NOW ((1l << 30) - 1l)
+#define UTIME_OMIT ((1l << 30) - 2l)
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/uidgid.h>
+
+struct kstat {
+ u64 ino;
+ dev_t dev;
+ umode_t mode;
+ unsigned int nlink;
+ kuid_t uid;
+ kgid_t gid;
+ dev_t rdev;
+ loff_t size;
+ struct timespec atime;
+ struct timespec mtime;
+ struct timespec ctime;
+ unsigned long blksize;
+ unsigned long long blocks;
+};
+
+#endif
diff --git a/include/linux/statfs.h b/include/linux/statfs.h
new file mode 100644
index 000000000..0166d320a
--- /dev/null
+++ b/include/linux/statfs.h
@@ -0,0 +1,43 @@
+#ifndef _LINUX_STATFS_H
+#define _LINUX_STATFS_H
+
+#include <linux/types.h>
+#include <asm/statfs.h>
+
+struct kstatfs {
+ long f_type;
+ long f_bsize;
+ u64 f_blocks;
+ u64 f_bfree;
+ u64 f_bavail;
+ u64 f_files;
+ u64 f_ffree;
+ __kernel_fsid_t f_fsid;
+ long f_namelen;
+ long f_frsize;
+ long f_flags;
+ long f_spare[4];
+};
+
+/*
+ * Definitions for the flag in f_flag.
+ *
+ * Generally these flags are equivalent to the MS_ flags used in the mount
+ * ABI. The exception is ST_VALID which has the same value as MS_REMOUNT
+ * which doesn't make any sense for statfs.
+ */
+#define ST_RDONLY 0x0001 /* mount read-only */
+#define ST_NOSUID 0x0002 /* ignore suid and sgid bits */
+#define ST_NODEV 0x0004 /* disallow access to device special files */
+#define ST_NOEXEC 0x0008 /* disallow program execution */
+#define ST_SYNCHRONOUS 0x0010 /* writes are synced at once */
+#define ST_VALID 0x0020 /* f_flags support is implemented */
+#define ST_MANDLOCK 0x0040 /* allow mandatory locks on an FS */
+/* 0x0080 used for ST_WRITE in glibc */
+/* 0x0100 used for ST_APPEND in glibc */
+/* 0x0200 used for ST_IMMUTABLE in glibc */
+#define ST_NOATIME 0x0400 /* do not update access times */
+#define ST_NODIRATIME 0x0800 /* do not update directory access times */
+#define ST_RELATIME 0x1000 /* update atime relative to mtime/ctime */
+
+#endif
diff --git a/include/linux/static_key.h b/include/linux/static_key.h
new file mode 100644
index 000000000..27bd3f8a0
--- /dev/null
+++ b/include/linux/static_key.h
@@ -0,0 +1 @@
+#include <linux/jump_label.h>
diff --git a/include/linux/stddef.h b/include/linux/stddef.h
new file mode 100644
index 000000000..076af4372
--- /dev/null
+++ b/include/linux/stddef.h
@@ -0,0 +1,30 @@
+#ifndef _LINUX_STDDEF_H
+#define _LINUX_STDDEF_H
+
+#include <uapi/linux/stddef.h>
+
+
+#undef NULL
+#define NULL ((void *)0)
+
+enum {
+ false = 0,
+ true = 1
+};
+
+#undef offsetof
+#ifdef __compiler_offsetof
+#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER)
+#else
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#endif
+#endif
+
+/**
+ * offsetofend(TYPE, MEMBER)
+ *
+ * @TYPE: The type of the structure
+ * @MEMBER: The member within the structure to get the end offset of
+ */
+#define offsetofend(TYPE, MEMBER) \
+ (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER))
diff --git a/include/linux/ste_modem_shm.h b/include/linux/ste_modem_shm.h
new file mode 100644
index 000000000..8444a4eff
--- /dev/null
+++ b/include/linux/ste_modem_shm.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2012
+ * Author: Sjur Brendeland / sjur.brandeland@stericsson.com
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifndef __INC_MODEM_DEV_H
+#define __INC_MODEM_DEV_H
+#include <linux/types.h>
+#include <linux/platform_device.h>
+
+struct ste_modem_device;
+
+/**
+ * struct ste_modem_dev_cb - Callbacks for modem initiated events.
+ * @kick: Called when the modem kicks the host.
+ *
+ * This structure contains callbacks for actions triggered by the modem.
+ */
+struct ste_modem_dev_cb {
+ void (*kick)(struct ste_modem_device *mdev, int notify_id);
+};
+
+/**
+ * struct ste_modem_dev_ops - Functions to control modem and modem interface.
+ *
+ * @power: Main power switch, used for cold-start or complete power off.
+ * @kick: Kick the modem.
+ * @kick_subscribe: Subscribe for notifications from the modem.
+ * @setup: Provide callback functions to modem device.
+ *
+ * This structure contains functions used by the ste remoteproc driver
+ * to manage the modem.
+ */
+struct ste_modem_dev_ops {
+ int (*power)(struct ste_modem_device *mdev, bool on);
+ int (*kick)(struct ste_modem_device *mdev, int notify_id);
+ int (*kick_subscribe)(struct ste_modem_device *mdev, int notify_id);
+ int (*setup)(struct ste_modem_device *mdev,
+ struct ste_modem_dev_cb *cfg);
+};
+
+/**
+ * struct ste_modem_device - represent the STE modem device
+ * @pdev: Reference to platform device
+ * @ops: Operations used to manage the modem.
+ * @drv_data: Driver private data.
+ */
+struct ste_modem_device {
+ struct platform_device pdev;
+ struct ste_modem_dev_ops ops;
+ void *drv_data;
+};
+
+#endif /*INC_MODEM_DEV_H*/
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
new file mode 100644
index 000000000..7f484a239
--- /dev/null
+++ b/include/linux/stmmac.h
@@ -0,0 +1,147 @@
+/*******************************************************************************
+
+ Header file for stmmac platform data
+
+ Copyright (C) 2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#ifndef __STMMAC_PLATFORM_DATA
+#define __STMMAC_PLATFORM_DATA
+
+#include <linux/platform_device.h>
+
+#define STMMAC_RX_COE_NONE 0
+#define STMMAC_RX_COE_TYPE1 1
+#define STMMAC_RX_COE_TYPE2 2
+
+/* Define the macros for CSR clock range parameters to be passed by
+ * platform code.
+ * This could also be configured at run time using CPU freq framework. */
+
+/* MDC Clock Selection define*/
+#define STMMAC_CSR_60_100M 0x0 /* MDC = clk_scr_i/42 */
+#define STMMAC_CSR_100_150M 0x1 /* MDC = clk_scr_i/62 */
+#define STMMAC_CSR_20_35M 0x2 /* MDC = clk_scr_i/16 */
+#define STMMAC_CSR_35_60M 0x3 /* MDC = clk_scr_i/26 */
+#define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */
+#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */
+
+/* The MDC clock could be set higher than the IEEE 802.3
+ * specified frequency limit 0f 2.5 MHz, by programming a clock divider
+ * of value different than the above defined values. The resultant MDIO
+ * clock frequency of 12.5 MHz is applicable for the interfacing chips
+ * supporting higher MDC clocks.
+ * The MDC clock selection macros need to be defined for MDC clock rate
+ * of 12.5 MHz, corresponding to the following selection.
+ */
+#define STMMAC_CSR_I_4 0x8 /* clk_csr_i/4 */
+#define STMMAC_CSR_I_6 0x9 /* clk_csr_i/6 */
+#define STMMAC_CSR_I_8 0xA /* clk_csr_i/8 */
+#define STMMAC_CSR_I_10 0xB /* clk_csr_i/10 */
+#define STMMAC_CSR_I_12 0xC /* clk_csr_i/12 */
+#define STMMAC_CSR_I_14 0xD /* clk_csr_i/14 */
+#define STMMAC_CSR_I_16 0xE /* clk_csr_i/16 */
+#define STMMAC_CSR_I_18 0xF /* clk_csr_i/18 */
+
+/* AXI DMA Burst length supported */
+#define DMA_AXI_BLEN_4 (1 << 1)
+#define DMA_AXI_BLEN_8 (1 << 2)
+#define DMA_AXI_BLEN_16 (1 << 3)
+#define DMA_AXI_BLEN_32 (1 << 4)
+#define DMA_AXI_BLEN_64 (1 << 5)
+#define DMA_AXI_BLEN_128 (1 << 6)
+#define DMA_AXI_BLEN_256 (1 << 7)
+#define DMA_AXI_BLEN_ALL (DMA_AXI_BLEN_4 | DMA_AXI_BLEN_8 | DMA_AXI_BLEN_16 \
+ | DMA_AXI_BLEN_32 | DMA_AXI_BLEN_64 \
+ | DMA_AXI_BLEN_128 | DMA_AXI_BLEN_256)
+
+/* Platfrom data for platform device structure's platform_data field */
+
+struct stmmac_mdio_bus_data {
+ int (*phy_reset)(void *priv);
+ unsigned int phy_mask;
+ int *irqs;
+ int probed_phy_irq;
+#ifdef CONFIG_OF
+ int reset_gpio, active_low;
+ u32 delays[3];
+#endif
+};
+
+struct stmmac_dma_cfg {
+ int pbl;
+ int fixed_burst;
+ int mixed_burst;
+ int burst_len;
+};
+
+struct plat_stmmacenet_data {
+ char *phy_bus_name;
+ int bus_id;
+ int phy_addr;
+ int interface;
+ struct stmmac_mdio_bus_data *mdio_bus_data;
+ struct stmmac_dma_cfg *dma_cfg;
+ int clk_csr;
+ int has_gmac;
+ int enh_desc;
+ int tx_coe;
+ int rx_coe;
+ int bugged_jumbo;
+ int pmt;
+ int force_sf_dma_mode;
+ int force_thresh_dma_mode;
+ int riwt_off;
+ int max_speed;
+ int maxmtu;
+ int multicast_filter_bins;
+ int unicast_filter_entries;
+ int tx_fifo_size;
+ int rx_fifo_size;
+ void (*fix_mac_speed)(void *priv, unsigned int speed);
+ void (*bus_setup)(void __iomem *ioaddr);
+ void *(*setup)(struct platform_device *pdev);
+ void (*free)(struct platform_device *pdev, void *priv);
+ int (*init)(struct platform_device *pdev, void *priv);
+ void (*exit)(struct platform_device *pdev, void *priv);
+ void *custom_cfg;
+ void *custom_data;
+ void *bsp_priv;
+};
+
+/* of_data for SoC glue layer device tree bindings */
+
+struct stmmac_of_data {
+ int has_gmac;
+ int enh_desc;
+ int tx_coe;
+ int rx_coe;
+ int bugged_jumbo;
+ int pmt;
+ int riwt_off;
+ void (*fix_mac_speed)(void *priv, unsigned int speed);
+ void (*bus_setup)(void __iomem *ioaddr);
+ void *(*setup)(struct platform_device *pdev);
+ void (*free)(struct platform_device *pdev, void *priv);
+ int (*init)(struct platform_device *pdev, void *priv);
+ void (*exit)(struct platform_device *pdev, void *priv);
+};
+#endif
diff --git a/include/linux/stmp3xxx_rtc_wdt.h b/include/linux/stmp3xxx_rtc_wdt.h
new file mode 100644
index 000000000..1dd12c962
--- /dev/null
+++ b/include/linux/stmp3xxx_rtc_wdt.h
@@ -0,0 +1,15 @@
+/*
+ * stmp3xxx_rtc_wdt.h
+ *
+ * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
+ *
+ * This file is released under the GPLv2.
+ */
+#ifndef __LINUX_STMP3XXX_RTC_WDT_H
+#define __LINUX_STMP3XXX_RTC_WDT_H
+
+struct stmp3xxx_wdt_pdata {
+ void (*wdt_set_timeout)(struct device *dev, u32 timeout);
+};
+
+#endif /* __LINUX_STMP3XXX_RTC_WDT_H */
diff --git a/include/linux/stmp_device.h b/include/linux/stmp_device.h
new file mode 100644
index 000000000..6cf7ec954
--- /dev/null
+++ b/include/linux/stmp_device.h
@@ -0,0 +1,20 @@
+/*
+ * basic functions for devices following the "stmp" style register layout
+ *
+ * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __STMP_DEVICE_H__
+#define __STMP_DEVICE_H__
+
+#define STMP_OFFSET_REG_SET 0x4
+#define STMP_OFFSET_REG_CLR 0x8
+#define STMP_OFFSET_REG_TOG 0xc
+
+extern int stmp_reset_block(void __iomem *);
+#endif /* __STMP_DEVICE_H__ */
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
new file mode 100644
index 000000000..d2abbdb8c
--- /dev/null
+++ b/include/linux/stop_machine.h
@@ -0,0 +1,157 @@
+#ifndef _LINUX_STOP_MACHINE
+#define _LINUX_STOP_MACHINE
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/smp.h>
+#include <linux/list.h>
+
+/*
+ * stop_cpu[s]() is simplistic per-cpu maximum priority cpu
+ * monopolization mechanism. The caller can specify a non-sleeping
+ * function to be executed on a single or multiple cpus preempting all
+ * other processes and monopolizing those cpus until it finishes.
+ *
+ * Resources for this mechanism are preallocated when a cpu is brought
+ * up and requests are guaranteed to be served as long as the target
+ * cpus are online.
+ */
+typedef int (*cpu_stop_fn_t)(void *arg);
+
+#ifdef CONFIG_SMP
+
+struct cpu_stop_work {
+ struct list_head list; /* cpu_stopper->works */
+ cpu_stop_fn_t fn;
+ void *arg;
+ struct cpu_stop_done *done;
+};
+
+int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
+int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg);
+void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
+ struct cpu_stop_work *work_buf);
+int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
+int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
+
+#else /* CONFIG_SMP */
+
+#include <linux/workqueue.h>
+
+struct cpu_stop_work {
+ struct work_struct work;
+ cpu_stop_fn_t fn;
+ void *arg;
+};
+
+static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
+{
+ int ret = -ENOENT;
+ preempt_disable();
+ if (cpu == smp_processor_id())
+ ret = fn(arg);
+ preempt_enable();
+ return ret;
+}
+
+static void stop_one_cpu_nowait_workfn(struct work_struct *work)
+{
+ struct cpu_stop_work *stwork =
+ container_of(work, struct cpu_stop_work, work);
+ preempt_disable();
+ stwork->fn(stwork->arg);
+ preempt_enable();
+}
+
+static inline void stop_one_cpu_nowait(unsigned int cpu,
+ cpu_stop_fn_t fn, void *arg,
+ struct cpu_stop_work *work_buf)
+{
+ if (cpu == smp_processor_id()) {
+ INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn);
+ work_buf->fn = fn;
+ work_buf->arg = arg;
+ schedule_work(&work_buf->work);
+ }
+}
+
+static inline int stop_cpus(const struct cpumask *cpumask,
+ cpu_stop_fn_t fn, void *arg)
+{
+ if (cpumask_test_cpu(raw_smp_processor_id(), cpumask))
+ return stop_one_cpu(raw_smp_processor_id(), fn, arg);
+ return -ENOENT;
+}
+
+static inline int try_stop_cpus(const struct cpumask *cpumask,
+ cpu_stop_fn_t fn, void *arg)
+{
+ return stop_cpus(cpumask, fn, arg);
+}
+
+#endif /* CONFIG_SMP */
+
+/*
+ * stop_machine "Bogolock": stop the entire machine, disable
+ * interrupts. This is a very heavy lock, which is equivalent to
+ * grabbing every spinlock (and more). So the "read" side to such a
+ * lock is anything which disables preemption.
+ */
+#if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP)
+
+/**
+ * stop_machine: freeze the machine on all CPUs and run this function
+ * @fn: the function to run
+ * @data: the data ptr for the @fn()
+ * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
+ *
+ * Description: This causes a thread to be scheduled on every cpu,
+ * each of which disables interrupts. The result is that no one is
+ * holding a spinlock or inside any other preempt-disabled region when
+ * @fn() runs.
+ *
+ * This can be thought of as a very heavy write lock, equivalent to
+ * grabbing every spinlock in the kernel. */
+int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
+
+/**
+ * __stop_machine: freeze the machine on all CPUs and run this function
+ * @fn: the function to run
+ * @data: the data ptr for the @fn
+ * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
+ *
+ * Description: This is a special version of the above, which assumes cpus
+ * won't come or go while it's being called. Used by hotplug cpu.
+ */
+int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
+
+int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
+ const struct cpumask *cpus);
+
+#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
+
+static inline int __stop_machine(int (*fn)(void *), void *data,
+ const struct cpumask *cpus)
+{
+ unsigned long flags;
+ int ret;
+ local_irq_save(flags);
+ ret = fn(data);
+ local_irq_restore(flags);
+ return ret;
+}
+
+static inline int stop_machine(int (*fn)(void *), void *data,
+ const struct cpumask *cpus)
+{
+ return __stop_machine(fn, data, cpus);
+}
+
+static inline int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
+ const struct cpumask *cpus)
+{
+ return __stop_machine(fn, data, cpus);
+}
+
+#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
+#endif /* _LINUX_STOP_MACHINE */
diff --git a/include/linux/string.h b/include/linux/string.h
new file mode 100644
index 000000000..e40099e58
--- /dev/null
+++ b/include/linux/string.h
@@ -0,0 +1,161 @@
+#ifndef _LINUX_STRING_H_
+#define _LINUX_STRING_H_
+
+
+#include <linux/compiler.h> /* for inline */
+#include <linux/types.h> /* for size_t */
+#include <linux/stddef.h> /* for NULL */
+#include <stdarg.h>
+#include <uapi/linux/string.h>
+
+extern char *strndup_user(const char __user *, long);
+extern void *memdup_user(const void __user *, size_t);
+
+/*
+ * Include machine specific inline routines
+ */
+#include <asm/string.h>
+
+#ifndef __HAVE_ARCH_STRCPY
+extern char * strcpy(char *,const char *);
+#endif
+#ifndef __HAVE_ARCH_STRNCPY
+extern char * strncpy(char *,const char *, __kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRLCPY
+size_t strlcpy(char *, const char *, size_t);
+#endif
+#ifndef __HAVE_ARCH_STRCAT
+extern char * strcat(char *, const char *);
+#endif
+#ifndef __HAVE_ARCH_STRNCAT
+extern char * strncat(char *, const char *, __kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRLCAT
+extern size_t strlcat(char *, const char *, __kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRCMP
+extern int strcmp(const char *,const char *);
+#endif
+#ifndef __HAVE_ARCH_STRNCMP
+extern int strncmp(const char *,const char *,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRCASECMP
+extern int strcasecmp(const char *s1, const char *s2);
+#endif
+#ifndef __HAVE_ARCH_STRNCASECMP
+extern int strncasecmp(const char *s1, const char *s2, size_t n);
+#endif
+#ifndef __HAVE_ARCH_STRCHR
+extern char * strchr(const char *,int);
+#endif
+#ifndef __HAVE_ARCH_STRCHRNUL
+extern char * strchrnul(const char *,int);
+#endif
+#ifndef __HAVE_ARCH_STRNCHR
+extern char * strnchr(const char *, size_t, int);
+#endif
+#ifndef __HAVE_ARCH_STRRCHR
+extern char * strrchr(const char *,int);
+#endif
+extern char * __must_check skip_spaces(const char *);
+
+extern char *strim(char *);
+
+static inline __must_check char *strstrip(char *str)
+{
+ return strim(str);
+}
+
+#ifndef __HAVE_ARCH_STRSTR
+extern char * strstr(const char *, const char *);
+#endif
+#ifndef __HAVE_ARCH_STRNSTR
+extern char * strnstr(const char *, const char *, size_t);
+#endif
+#ifndef __HAVE_ARCH_STRLEN
+extern __kernel_size_t strlen(const char *);
+#endif
+#ifndef __HAVE_ARCH_STRNLEN
+extern __kernel_size_t strnlen(const char *,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_STRPBRK
+extern char * strpbrk(const char *,const char *);
+#endif
+#ifndef __HAVE_ARCH_STRSEP
+extern char * strsep(char **,const char *);
+#endif
+#ifndef __HAVE_ARCH_STRSPN
+extern __kernel_size_t strspn(const char *,const char *);
+#endif
+#ifndef __HAVE_ARCH_STRCSPN
+extern __kernel_size_t strcspn(const char *,const char *);
+#endif
+
+#ifndef __HAVE_ARCH_MEMSET
+extern void * memset(void *,int,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMCPY
+extern void * memcpy(void *,const void *,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMMOVE
+extern void * memmove(void *,const void *,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMSCAN
+extern void * memscan(void *,int,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMCMP
+extern int memcmp(const void *,const void *,__kernel_size_t);
+#endif
+#ifndef __HAVE_ARCH_MEMCHR
+extern void * memchr(const void *,int,__kernel_size_t);
+#endif
+void *memchr_inv(const void *s, int c, size_t n);
+
+extern void kfree_const(const void *x);
+
+extern char *kstrdup(const char *s, gfp_t gfp);
+extern const char *kstrdup_const(const char *s, gfp_t gfp);
+extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
+extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
+
+extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
+extern void argv_free(char **argv);
+
+extern bool sysfs_streq(const char *s1, const char *s2);
+extern int strtobool(const char *s, bool *res);
+
+#ifdef CONFIG_BINARY_PRINTF
+int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
+int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
+int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
+#endif
+
+extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
+ const void *from, size_t available);
+
+/**
+ * strstarts - does @str start with @prefix?
+ * @str: string to examine
+ * @prefix: prefix to look for.
+ */
+static inline bool strstarts(const char *str, const char *prefix)
+{
+ return strncmp(str, prefix, strlen(prefix)) == 0;
+}
+
+size_t memweight(const void *ptr, size_t bytes);
+void memzero_explicit(void *s, size_t count);
+
+/**
+ * kbasename - return the last part of a pathname.
+ *
+ * @path: path to extract the filename from.
+ */
+static inline const char *kbasename(const char *path)
+{
+ const char *tail = strrchr(path, '/');
+ return tail ? tail + 1 : path;
+}
+
+#endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
new file mode 100644
index 000000000..71f711db4
--- /dev/null
+++ b/include/linux/string_helpers.h
@@ -0,0 +1,71 @@
+#ifndef _LINUX_STRING_HELPERS_H_
+#define _LINUX_STRING_HELPERS_H_
+
+#include <linux/types.h>
+
+/* Descriptions of the types of units to
+ * print in */
+enum string_size_units {
+ STRING_UNITS_10, /* use powers of 10^3 (standard SI) */
+ STRING_UNITS_2, /* use binary powers of 2^10 */
+};
+
+void string_get_size(u64 size, u64 blk_size, enum string_size_units units,
+ char *buf, int len);
+
+#define UNESCAPE_SPACE 0x01
+#define UNESCAPE_OCTAL 0x02
+#define UNESCAPE_HEX 0x04
+#define UNESCAPE_SPECIAL 0x08
+#define UNESCAPE_ANY \
+ (UNESCAPE_SPACE | UNESCAPE_OCTAL | UNESCAPE_HEX | UNESCAPE_SPECIAL)
+
+int string_unescape(char *src, char *dst, size_t size, unsigned int flags);
+
+static inline int string_unescape_inplace(char *buf, unsigned int flags)
+{
+ return string_unescape(buf, buf, 0, flags);
+}
+
+static inline int string_unescape_any(char *src, char *dst, size_t size)
+{
+ return string_unescape(src, dst, size, UNESCAPE_ANY);
+}
+
+static inline int string_unescape_any_inplace(char *buf)
+{
+ return string_unescape_any(buf, buf, 0);
+}
+
+#define ESCAPE_SPACE 0x01
+#define ESCAPE_SPECIAL 0x02
+#define ESCAPE_NULL 0x04
+#define ESCAPE_OCTAL 0x08
+#define ESCAPE_ANY \
+ (ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_SPECIAL | ESCAPE_NULL)
+#define ESCAPE_NP 0x10
+#define ESCAPE_ANY_NP (ESCAPE_ANY | ESCAPE_NP)
+#define ESCAPE_HEX 0x20
+
+int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
+ unsigned int flags, const char *esc);
+
+static inline int string_escape_mem_any_np(const char *src, size_t isz,
+ char *dst, size_t osz, const char *esc)
+{
+ return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, esc);
+}
+
+static inline int string_escape_str(const char *src, char *dst, size_t sz,
+ unsigned int flags, const char *esc)
+{
+ return string_escape_mem(src, strlen(src), dst, sz, flags, esc);
+}
+
+static inline int string_escape_str_any_np(const char *src, char *dst,
+ size_t sz, const char *esc)
+{
+ return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, esc);
+}
+
+#endif
diff --git a/include/linux/stringify.h b/include/linux/stringify.h
new file mode 100644
index 000000000..841cec8ed
--- /dev/null
+++ b/include/linux/stringify.h
@@ -0,0 +1,12 @@
+#ifndef __LINUX_STRINGIFY_H
+#define __LINUX_STRINGIFY_H
+
+/* Indirect stringification. Doing two levels allows the parameter to be a
+ * macro itself. For example, compile with -DFOO=bar, __stringify(FOO)
+ * converts to "bar".
+ */
+
+#define __stringify_1(x...) #x
+#define __stringify(x...) __stringify_1(x)
+
+#endif /* !__LINUX_STRINGIFY_H */
diff --git a/include/linux/sudmac.h b/include/linux/sudmac.h
new file mode 100644
index 000000000..377b8a578
--- /dev/null
+++ b/include/linux/sudmac.h
@@ -0,0 +1,52 @@
+/*
+ * Header for the SUDMAC driver
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+#ifndef SUDMAC_H
+#define SUDMAC_H
+
+#include <linux/dmaengine.h>
+#include <linux/shdma-base.h>
+#include <linux/types.h>
+
+/* Used by slave DMA clients to request DMA to/from a specific peripheral */
+struct sudmac_slave {
+ struct shdma_slave shdma_slave; /* Set by the platform */
+};
+
+/*
+ * Supplied by platforms to specify, how a DMA channel has to be configured for
+ * a certain peripheral
+ */
+struct sudmac_slave_config {
+ int slave_id;
+};
+
+struct sudmac_channel {
+ unsigned long offset;
+ unsigned long config;
+ unsigned long wait; /* The configuable range is 0 to 3 */
+ unsigned long dint_end_bit;
+};
+
+struct sudmac_pdata {
+ const struct sudmac_slave_config *slave;
+ int slave_num;
+ const struct sudmac_channel *channel;
+ int channel_num;
+};
+
+/* Definitions for the sudmac_channel.config */
+#define SUDMAC_TX_BUFFER_MODE BIT(0)
+#define SUDMAC_RX_END_MODE BIT(1)
+
+/* Definitions for the sudmac_channel.dint_end_bit */
+#define SUDMAC_DMA_BIT_CH0 BIT(0)
+#define SUDMAC_DMA_BIT_CH1 BIT(1)
+
+#endif
diff --git a/include/linux/sungem_phy.h b/include/linux/sungem_phy.h
new file mode 100644
index 000000000..bd9be9f59
--- /dev/null
+++ b/include/linux/sungem_phy.h
@@ -0,0 +1,132 @@
+#ifndef __SUNGEM_PHY_H__
+#define __SUNGEM_PHY_H__
+
+struct mii_phy;
+
+/* Operations supported by any kind of PHY */
+struct mii_phy_ops
+{
+ int (*init)(struct mii_phy *phy);
+ int (*suspend)(struct mii_phy *phy);
+ int (*setup_aneg)(struct mii_phy *phy, u32 advertise);
+ int (*setup_forced)(struct mii_phy *phy, int speed, int fd);
+ int (*poll_link)(struct mii_phy *phy);
+ int (*read_link)(struct mii_phy *phy);
+ int (*enable_fiber)(struct mii_phy *phy, int autoneg);
+};
+
+/* Structure used to statically define an mii/gii based PHY */
+struct mii_phy_def
+{
+ u32 phy_id; /* Concatenated ID1 << 16 | ID2 */
+ u32 phy_id_mask; /* Significant bits */
+ u32 features; /* Ethtool SUPPORTED_* defines */
+ int magic_aneg; /* Autoneg does all speed test for us */
+ const char* name;
+ const struct mii_phy_ops* ops;
+};
+
+enum {
+ BCM54XX_COPPER,
+ BCM54XX_FIBER,
+ BCM54XX_GBIC,
+ BCM54XX_SGMII,
+ BCM54XX_UNKNOWN,
+};
+
+/* An instance of a PHY, partially borrowed from mii_if_info */
+struct mii_phy
+{
+ struct mii_phy_def* def;
+ u32 advertising;
+ int mii_id;
+
+ /* 1: autoneg enabled, 0: disabled */
+ int autoneg;
+
+ /* forced speed & duplex (no autoneg)
+ * partner speed & duplex & pause (autoneg)
+ */
+ int speed;
+ int duplex;
+ int pause;
+
+ /* Provided by host chip */
+ struct net_device *dev;
+ int (*mdio_read) (struct net_device *dev, int mii_id, int reg);
+ void (*mdio_write) (struct net_device *dev, int mii_id, int reg, int val);
+ void *platform_data;
+};
+
+/* Pass in a struct mii_phy with dev, mdio_read and mdio_write
+ * filled, the remaining fields will be filled on return
+ */
+extern int sungem_phy_probe(struct mii_phy *phy, int mii_id);
+
+
+/* MII definitions missing from mii.h */
+
+#define BMCR_SPD2 0x0040 /* Gigabit enable (bcm54xx) */
+#define LPA_PAUSE 0x0400
+
+/* More PHY registers (model specific) */
+
+/* MII BCM5201 MULTIPHY interrupt register */
+#define MII_BCM5201_INTERRUPT 0x1A
+#define MII_BCM5201_INTERRUPT_INTENABLE 0x4000
+
+#define MII_BCM5201_AUXMODE2 0x1B
+#define MII_BCM5201_AUXMODE2_LOWPOWER 0x0008
+
+#define MII_BCM5201_MULTIPHY 0x1E
+
+/* MII BCM5201 MULTIPHY register bits */
+#define MII_BCM5201_MULTIPHY_SERIALMODE 0x0002
+#define MII_BCM5201_MULTIPHY_SUPERISOLATE 0x0008
+
+/* MII BCM5221 Additional registers */
+#define MII_BCM5221_TEST 0x1f
+#define MII_BCM5221_TEST_ENABLE_SHADOWS 0x0080
+#define MII_BCM5221_SHDOW_AUX_STAT2 0x1b
+#define MII_BCM5221_SHDOW_AUX_STAT2_APD 0x0020
+#define MII_BCM5221_SHDOW_AUX_MODE4 0x1a
+#define MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE 0x0001
+#define MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR 0x0004
+
+/* MII BCM5241 Additional registers */
+#define MII_BCM5241_SHDOW_AUX_MODE4_STANDBYPWR 0x0008
+
+/* MII BCM5400 1000-BASET Control register */
+#define MII_BCM5400_GB_CONTROL 0x09
+#define MII_BCM5400_GB_CONTROL_FULLDUPLEXCAP 0x0200
+
+/* MII BCM5400 AUXCONTROL register */
+#define MII_BCM5400_AUXCONTROL 0x18
+#define MII_BCM5400_AUXCONTROL_PWR10BASET 0x0004
+
+/* MII BCM5400 AUXSTATUS register */
+#define MII_BCM5400_AUXSTATUS 0x19
+#define MII_BCM5400_AUXSTATUS_LINKMODE_MASK 0x0700
+#define MII_BCM5400_AUXSTATUS_LINKMODE_SHIFT 8
+
+/* 1000BT control (Marvell & BCM54xx at least) */
+#define MII_1000BASETCONTROL 0x09
+#define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200
+#define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100
+
+/* Marvell 88E1011 PHY control */
+#define MII_M1011_PHY_SPEC_CONTROL 0x10
+#define MII_M1011_PHY_SPEC_CONTROL_MANUAL_MDIX 0x20
+#define MII_M1011_PHY_SPEC_CONTROL_AUTO_MDIX 0x40
+
+/* Marvell 88E1011 PHY status */
+#define MII_M1011_PHY_SPEC_STATUS 0x11
+#define MII_M1011_PHY_SPEC_STATUS_1000 0x8000
+#define MII_M1011_PHY_SPEC_STATUS_100 0x4000
+#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000
+#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000
+#define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800
+#define MII_M1011_PHY_SPEC_STATUS_TX_PAUSE 0x0008
+#define MII_M1011_PHY_SPEC_STATUS_RX_PAUSE 0x0004
+
+#endif /* __SUNGEM_PHY_H__ */
diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
new file mode 100644
index 000000000..07d8e53be
--- /dev/null
+++ b/include/linux/sunrpc/addr.h
@@ -0,0 +1,170 @@
+/*
+ * linux/include/linux/sunrpc/addr.h
+ *
+ * Various routines for copying and comparing sockaddrs and for
+ * converting them to and from presentation format.
+ */
+#ifndef _LINUX_SUNRPC_ADDR_H
+#define _LINUX_SUNRPC_ADDR_H
+
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <net/ipv6.h>
+
+size_t rpc_ntop(const struct sockaddr *, char *, const size_t);
+size_t rpc_pton(struct net *, const char *, const size_t,
+ struct sockaddr *, const size_t);
+char * rpc_sockaddr2uaddr(const struct sockaddr *, gfp_t);
+size_t rpc_uaddr2sockaddr(struct net *, const char *, const size_t,
+ struct sockaddr *, const size_t);
+
+static inline unsigned short rpc_get_port(const struct sockaddr *sap)
+{
+ switch (sap->sa_family) {
+ case AF_INET:
+ return ntohs(((struct sockaddr_in *)sap)->sin_port);
+ case AF_INET6:
+ return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
+ }
+ return 0;
+}
+
+static inline void rpc_set_port(struct sockaddr *sap,
+ const unsigned short port)
+{
+ switch (sap->sa_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)sap)->sin_port = htons(port);
+ break;
+ case AF_INET6:
+ ((struct sockaddr_in6 *)sap)->sin6_port = htons(port);
+ break;
+ }
+}
+
+#define IPV6_SCOPE_DELIMITER '%'
+#define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn")
+
+static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
+{
+ const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1;
+ const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2;
+
+ return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr;
+}
+
+static inline bool __rpc_copy_addr4(struct sockaddr *dst,
+ const struct sockaddr *src)
+{
+ const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
+ struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
+
+ dsin->sin_family = ssin->sin_family;
+ dsin->sin_addr.s_addr = ssin->sin_addr.s_addr;
+ return true;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
+{
+ const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1;
+ const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2;
+
+ if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr))
+ return false;
+ else if (ipv6_addr_type(&sin1->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ return sin1->sin6_scope_id == sin2->sin6_scope_id;
+
+ return true;
+}
+
+static inline bool __rpc_copy_addr6(struct sockaddr *dst,
+ const struct sockaddr *src)
+{
+ const struct sockaddr_in6 *ssin6 = (const struct sockaddr_in6 *) src;
+ struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst;
+
+ dsin6->sin6_family = ssin6->sin6_family;
+ dsin6->sin6_addr = ssin6->sin6_addr;
+ dsin6->sin6_scope_id = ssin6->sin6_scope_id;
+ return true;
+}
+#else /* !(IS_ENABLED(CONFIG_IPV6) */
+static inline bool __rpc_cmp_addr6(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
+{
+ return false;
+}
+
+static inline bool __rpc_copy_addr6(struct sockaddr *dst,
+ const struct sockaddr *src)
+{
+ return false;
+}
+#endif /* !(IS_ENABLED(CONFIG_IPV6) */
+
+/**
+ * rpc_cmp_addr - compare the address portion of two sockaddrs.
+ * @sap1: first sockaddr
+ * @sap2: second sockaddr
+ *
+ * Just compares the family and address portion. Ignores port, but
+ * compares the scope if it's a link-local address.
+ *
+ * Returns true if the addrs are equal, false if they aren't.
+ */
+static inline bool rpc_cmp_addr(const struct sockaddr *sap1,
+ const struct sockaddr *sap2)
+{
+ if (sap1->sa_family == sap2->sa_family) {
+ switch (sap1->sa_family) {
+ case AF_INET:
+ return __rpc_cmp_addr4(sap1, sap2);
+ case AF_INET6:
+ return __rpc_cmp_addr6(sap1, sap2);
+ }
+ }
+ return false;
+}
+
+/**
+ * rpc_copy_addr - copy the address portion of one sockaddr to another
+ * @dst: destination sockaddr
+ * @src: source sockaddr
+ *
+ * Just copies the address portion and family. Ignores port, scope, etc.
+ * Caller is responsible for making certain that dst is large enough to hold
+ * the address in src. Returns true if address family is supported. Returns
+ * false otherwise.
+ */
+static inline bool rpc_copy_addr(struct sockaddr *dst,
+ const struct sockaddr *src)
+{
+ switch (src->sa_family) {
+ case AF_INET:
+ return __rpc_copy_addr4(dst, src);
+ case AF_INET6:
+ return __rpc_copy_addr6(dst, src);
+ }
+ return false;
+}
+
+/**
+ * rpc_get_scope_id - return scopeid for a given sockaddr
+ * @sa: sockaddr to get scopeid from
+ *
+ * Returns the value of the sin6_scope_id for AF_INET6 addrs, or 0 if
+ * not an AF_INET6 address.
+ */
+static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
+{
+ if (sa->sa_family != AF_INET6)
+ return 0;
+
+ return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
+}
+
+#endif /* _LINUX_SUNRPC_ADDR_H */
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
new file mode 100644
index 000000000..a7cbb570c
--- /dev/null
+++ b/include/linux/sunrpc/auth.h
@@ -0,0 +1,198 @@
+/*
+ * linux/include/linux/sunrpc/auth.h
+ *
+ * Declarations for the RPC client authentication machinery.
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#ifndef _LINUX_SUNRPC_AUTH_H
+#define _LINUX_SUNRPC_AUTH_H
+
+#ifdef __KERNEL__
+
+#include <linux/sunrpc/sched.h>
+#include <linux/sunrpc/msg_prot.h>
+#include <linux/sunrpc/xdr.h>
+
+#include <linux/atomic.h>
+#include <linux/rcupdate.h>
+#include <linux/uidgid.h>
+
+/* size of the nodename buffer */
+#define UNX_MAXNODENAME 32
+
+struct rpcsec_gss_info;
+
+/* auth_cred ac_flags bits */
+enum {
+ RPC_CRED_NO_CRKEY_TIMEOUT = 0, /* underlying cred has no key timeout */
+ RPC_CRED_KEY_EXPIRE_SOON = 1, /* underlying cred key will expire soon */
+ RPC_CRED_NOTIFY_TIMEOUT = 2, /* nofity generic cred when underlying
+ key will expire soon */
+};
+
+/* Work around the lack of a VFS credential */
+struct auth_cred {
+ kuid_t uid;
+ kgid_t gid;
+ struct group_info *group_info;
+ const char *principal;
+ unsigned long ac_flags;
+ unsigned char machine_cred : 1;
+};
+
+/*
+ * Client user credentials
+ */
+struct rpc_auth;
+struct rpc_credops;
+struct rpc_cred {
+ struct hlist_node cr_hash; /* hash chain */
+ struct list_head cr_lru; /* lru garbage collection */
+ struct rcu_head cr_rcu;
+ struct rpc_auth * cr_auth;
+ const struct rpc_credops *cr_ops;
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+ unsigned long cr_magic; /* 0x0f4aa4f0 */
+#endif
+ unsigned long cr_expire; /* when to gc */
+ unsigned long cr_flags; /* various flags */
+ atomic_t cr_count; /* ref count */
+
+ kuid_t cr_uid;
+
+ /* per-flavor data */
+};
+#define RPCAUTH_CRED_NEW 0
+#define RPCAUTH_CRED_UPTODATE 1
+#define RPCAUTH_CRED_HASHED 2
+#define RPCAUTH_CRED_NEGATIVE 3
+
+#define RPCAUTH_CRED_MAGIC 0x0f4aa4f0
+
+/*
+ * Client authentication handle
+ */
+struct rpc_cred_cache;
+struct rpc_authops;
+struct rpc_auth {
+ unsigned int au_cslack; /* call cred size estimate */
+ /* guess at number of u32's auth adds before
+ * reply data; normally the verifier size: */
+ unsigned int au_rslack;
+ /* for gss, used to calculate au_rslack: */
+ unsigned int au_verfsize;
+
+ unsigned int au_flags; /* various flags */
+ const struct rpc_authops *au_ops; /* operations */
+ rpc_authflavor_t au_flavor; /* pseudoflavor (note may
+ * differ from the flavor in
+ * au_ops->au_flavor in gss
+ * case) */
+ atomic_t au_count; /* Reference counter */
+
+ struct rpc_cred_cache * au_credcache;
+ /* per-flavor data */
+};
+
+struct rpc_auth_create_args {
+ rpc_authflavor_t pseudoflavor;
+ const char *target_name;
+};
+
+/* Flags for rpcauth_lookupcred() */
+#define RPCAUTH_LOOKUP_NEW 0x01 /* Accept an uninitialised cred */
+#define RPCAUTH_LOOKUP_RCU 0x02 /* lock-less lookup */
+
+/*
+ * Client authentication ops
+ */
+struct rpc_authops {
+ struct module *owner;
+ rpc_authflavor_t au_flavor; /* flavor (RPC_AUTH_*) */
+ char * au_name;
+ struct rpc_auth * (*create)(struct rpc_auth_create_args *, struct rpc_clnt *);
+ void (*destroy)(struct rpc_auth *);
+
+ struct rpc_cred * (*lookup_cred)(struct rpc_auth *, struct auth_cred *, int);
+ struct rpc_cred * (*crcreate)(struct rpc_auth*, struct auth_cred *, int);
+ int (*list_pseudoflavors)(rpc_authflavor_t *, int);
+ rpc_authflavor_t (*info2flavor)(struct rpcsec_gss_info *);
+ int (*flavor2info)(rpc_authflavor_t,
+ struct rpcsec_gss_info *);
+ int (*key_timeout)(struct rpc_auth *,
+ struct rpc_cred *);
+};
+
+struct rpc_credops {
+ const char * cr_name; /* Name of the auth flavour */
+ int (*cr_init)(struct rpc_auth *, struct rpc_cred *);
+ void (*crdestroy)(struct rpc_cred *);
+
+ int (*crmatch)(struct auth_cred *, struct rpc_cred *, int);
+ struct rpc_cred * (*crbind)(struct rpc_task *, struct rpc_cred *, int);
+ __be32 * (*crmarshal)(struct rpc_task *, __be32 *);
+ int (*crrefresh)(struct rpc_task *);
+ __be32 * (*crvalidate)(struct rpc_task *, __be32 *);
+ int (*crwrap_req)(struct rpc_task *, kxdreproc_t,
+ void *, __be32 *, void *);
+ int (*crunwrap_resp)(struct rpc_task *, kxdrdproc_t,
+ void *, __be32 *, void *);
+ int (*crkey_timeout)(struct rpc_cred *);
+ bool (*crkey_to_expire)(struct rpc_cred *);
+ char * (*crstringify_acceptor)(struct rpc_cred *);
+};
+
+extern const struct rpc_authops authunix_ops;
+extern const struct rpc_authops authnull_ops;
+
+int __init rpc_init_authunix(void);
+int __init rpc_init_generic_auth(void);
+int __init rpcauth_init_module(void);
+void rpcauth_remove_module(void);
+void rpc_destroy_generic_auth(void);
+void rpc_destroy_authunix(void);
+
+struct rpc_cred * rpc_lookup_cred(void);
+struct rpc_cred * rpc_lookup_cred_nonblock(void);
+struct rpc_cred * rpc_lookup_machine_cred(const char *service_name);
+int rpcauth_register(const struct rpc_authops *);
+int rpcauth_unregister(const struct rpc_authops *);
+struct rpc_auth * rpcauth_create(struct rpc_auth_create_args *,
+ struct rpc_clnt *);
+void rpcauth_release(struct rpc_auth *);
+rpc_authflavor_t rpcauth_get_pseudoflavor(rpc_authflavor_t,
+ struct rpcsec_gss_info *);
+int rpcauth_get_gssinfo(rpc_authflavor_t,
+ struct rpcsec_gss_info *);
+int rpcauth_list_flavors(rpc_authflavor_t *, int);
+struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int);
+void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *);
+struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int);
+struct rpc_cred * rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int);
+void put_rpccred(struct rpc_cred *);
+__be32 * rpcauth_marshcred(struct rpc_task *, __be32 *);
+__be32 * rpcauth_checkverf(struct rpc_task *, __be32 *);
+int rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj);
+int rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj);
+int rpcauth_refreshcred(struct rpc_task *);
+void rpcauth_invalcred(struct rpc_task *);
+int rpcauth_uptodatecred(struct rpc_task *);
+int rpcauth_init_credcache(struct rpc_auth *);
+void rpcauth_destroy_credcache(struct rpc_auth *);
+void rpcauth_clear_credcache(struct rpc_cred_cache *);
+int rpcauth_key_timeout_notify(struct rpc_auth *,
+ struct rpc_cred *);
+bool rpcauth_cred_key_to_expire(struct rpc_cred *);
+char * rpcauth_stringify_acceptor(struct rpc_cred *);
+
+static inline
+struct rpc_cred * get_rpccred(struct rpc_cred *cred)
+{
+ atomic_inc(&cred->cr_count);
+ return cred;
+}
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_SUNRPC_AUTH_H */
diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h
new file mode 100644
index 000000000..36eebc451
--- /dev/null
+++ b/include/linux/sunrpc/auth_gss.h
@@ -0,0 +1,92 @@
+/*
+ * linux/include/linux/sunrpc/auth_gss.h
+ *
+ * Declarations for RPCSEC_GSS
+ *
+ * Dug Song <dugsong@monkey.org>
+ * Andy Adamson <andros@umich.edu>
+ * Bruce Fields <bfields@umich.edu>
+ * Copyright (c) 2000 The Regents of the University of Michigan
+ */
+
+#ifndef _LINUX_SUNRPC_AUTH_GSS_H
+#define _LINUX_SUNRPC_AUTH_GSS_H
+
+#ifdef __KERNEL__
+#include <linux/sunrpc/auth.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/gss_api.h>
+
+#define RPC_GSS_VERSION 1
+
+#define MAXSEQ 0x80000000 /* maximum legal sequence number, from rfc 2203 */
+
+enum rpc_gss_proc {
+ RPC_GSS_PROC_DATA = 0,
+ RPC_GSS_PROC_INIT = 1,
+ RPC_GSS_PROC_CONTINUE_INIT = 2,
+ RPC_GSS_PROC_DESTROY = 3
+};
+
+enum rpc_gss_svc {
+ RPC_GSS_SVC_NONE = 1,
+ RPC_GSS_SVC_INTEGRITY = 2,
+ RPC_GSS_SVC_PRIVACY = 3
+};
+
+/* on-the-wire gss cred: */
+struct rpc_gss_wire_cred {
+ u32 gc_v; /* version */
+ u32 gc_proc; /* control procedure */
+ u32 gc_seq; /* sequence number */
+ u32 gc_svc; /* service */
+ struct xdr_netobj gc_ctx; /* context handle */
+};
+
+/* on-the-wire gss verifier: */
+struct rpc_gss_wire_verf {
+ u32 gv_flavor;
+ struct xdr_netobj gv_verf;
+};
+
+/* return from gss NULL PROC init sec context */
+struct rpc_gss_init_res {
+ struct xdr_netobj gr_ctx; /* context handle */
+ u32 gr_major; /* major status */
+ u32 gr_minor; /* minor status */
+ u32 gr_win; /* sequence window */
+ struct xdr_netobj gr_token; /* token */
+};
+
+/* The gss_cl_ctx struct holds all the information the rpcsec_gss client
+ * code needs to know about a single security context. In particular,
+ * gc_gss_ctx is the context handle that is used to do gss-api calls, while
+ * gc_wire_ctx is the context handle that is used to identify the context on
+ * the wire when communicating with a server. */
+
+struct gss_cl_ctx {
+ atomic_t count;
+ enum rpc_gss_proc gc_proc;
+ u32 gc_seq;
+ spinlock_t gc_seq_lock;
+ struct gss_ctx *gc_gss_ctx;
+ struct xdr_netobj gc_wire_ctx;
+ struct xdr_netobj gc_acceptor;
+ u32 gc_win;
+ unsigned long gc_expiry;
+ struct rcu_head gc_rcu;
+};
+
+struct gss_upcall_msg;
+struct gss_cred {
+ struct rpc_cred gc_base;
+ enum rpc_gss_svc gc_service;
+ struct gss_cl_ctx __rcu *gc_ctx;
+ struct gss_upcall_msg *gc_upcall;
+ const char *gc_principal;
+ unsigned long gc_upcall_timestamp;
+};
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_SUNRPC_AUTH_GSS_H */
+
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
new file mode 100644
index 000000000..2ca67b55e
--- /dev/null
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -0,0 +1,68 @@
+/******************************************************************************
+
+(c) 2008 NetApp. All Rights Reserved.
+
+NetApp provides this source code under the GPL v2 License.
+The GPL v2 license is available at
+http://opensource.org/licenses/gpl-license.php.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+/*
+ * Functions to create and manage the backchannel
+ */
+
+#ifndef _LINUX_SUNRPC_BC_XPRT_H
+#define _LINUX_SUNRPC_BC_XPRT_H
+
+#include <linux/sunrpc/svcsock.h>
+#include <linux/sunrpc/xprt.h>
+#include <linux/sunrpc/sched.h>
+
+#ifdef CONFIG_SUNRPC_BACKCHANNEL
+struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid);
+void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied);
+void xprt_free_bc_request(struct rpc_rqst *req);
+int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
+void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);
+int bc_send(struct rpc_rqst *req);
+
+/*
+ * Determine if a shared backchannel is in use
+ */
+static inline int svc_is_backchannel(const struct svc_rqst *rqstp)
+{
+ if (rqstp->rq_server->sv_bc_xprt)
+ return 1;
+ return 0;
+}
+#else /* CONFIG_SUNRPC_BACKCHANNEL */
+static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
+ unsigned int min_reqs)
+{
+ return 0;
+}
+
+static inline int svc_is_backchannel(const struct svc_rqst *rqstp)
+{
+ return 0;
+}
+
+static inline void xprt_free_bc_request(struct rpc_rqst *req)
+{
+}
+#endif /* CONFIG_SUNRPC_BACKCHANNEL */
+#endif /* _LINUX_SUNRPC_BC_XPRT_H */
+
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
new file mode 100644
index 000000000..437ddb6c4
--- /dev/null
+++ b/include/linux/sunrpc/cache.h
@@ -0,0 +1,298 @@
+/*
+ * include/linux/sunrpc/cache.h
+ *
+ * Generic code for various authentication-related caches
+ * used by sunrpc clients and servers.
+ *
+ * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
+ *
+ * Released under terms in GPL version 2. See COPYING.
+ *
+ */
+
+#ifndef _LINUX_SUNRPC_CACHE_H_
+#define _LINUX_SUNRPC_CACHE_H_
+
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include <linux/proc_fs.h>
+
+/*
+ * Each cache requires:
+ * - A 'struct cache_detail' which contains information specific to the cache
+ * for common code to use.
+ * - An item structure that must contain a "struct cache_head"
+ * - A lookup function defined using DefineCacheLookup
+ * - A 'put' function that can release a cache item. It will only
+ * be called after cache_put has succeed, so there are guarantee
+ * to be no references.
+ * - A function to calculate a hash of an item's key.
+ *
+ * as well as assorted code fragments (e.g. compare keys) and numbers
+ * (e.g. hash size, goal_age, etc).
+ *
+ * Each cache must be registered so that it can be cleaned regularly.
+ * When the cache is unregistered, it is flushed completely.
+ *
+ * Entries have a ref count and a 'hashed' flag which counts the existence
+ * in the hash table.
+ * We only expire entries when refcount is zero.
+ * Existence in the cache is counted the refcount.
+ */
+
+/* Every cache item has a common header that is used
+ * for expiring and refreshing entries.
+ *
+ */
+struct cache_head {
+ struct cache_head * next;
+ time_t expiry_time; /* After time time, don't use the data */
+ time_t last_refresh; /* If CACHE_PENDING, this is when upcall
+ * was sent, else this is when update was received
+ */
+ struct kref ref;
+ unsigned long flags;
+};
+#define CACHE_VALID 0 /* Entry contains valid data */
+#define CACHE_NEGATIVE 1 /* Negative entry - there is no match for the key */
+#define CACHE_PENDING 2 /* An upcall has been sent but no reply received yet*/
+#define CACHE_CLEANED 3 /* Entry has been cleaned from cache */
+
+#define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */
+
+struct cache_detail_procfs {
+ struct proc_dir_entry *proc_ent;
+ struct proc_dir_entry *flush_ent, *channel_ent, *content_ent;
+};
+
+struct cache_detail_pipefs {
+ struct dentry *dir;
+};
+
+struct cache_detail {
+ struct module * owner;
+ int hash_size;
+ struct cache_head ** hash_table;
+ rwlock_t hash_lock;
+
+ atomic_t inuse; /* active user-space update or lookup */
+
+ char *name;
+ void (*cache_put)(struct kref *);
+
+ int (*cache_upcall)(struct cache_detail *,
+ struct cache_head *);
+
+ void (*cache_request)(struct cache_detail *cd,
+ struct cache_head *ch,
+ char **bpp, int *blen);
+
+ int (*cache_parse)(struct cache_detail *,
+ char *buf, int len);
+
+ int (*cache_show)(struct seq_file *m,
+ struct cache_detail *cd,
+ struct cache_head *h);
+ void (*warn_no_listener)(struct cache_detail *cd,
+ int has_died);
+
+ struct cache_head * (*alloc)(void);
+ int (*match)(struct cache_head *orig, struct cache_head *new);
+ void (*init)(struct cache_head *orig, struct cache_head *new);
+ void (*update)(struct cache_head *orig, struct cache_head *new);
+
+ /* fields below this comment are for internal use
+ * and should not be touched by cache owners
+ */
+ time_t flush_time; /* flush all cache items with last_refresh
+ * earlier than this */
+ struct list_head others;
+ time_t nextcheck;
+ int entries;
+
+ /* fields for communication over channel */
+ struct list_head queue;
+
+ atomic_t readers; /* how many time is /chennel open */
+ time_t last_close; /* if no readers, when did last close */
+ time_t last_warn; /* when we last warned about no readers */
+
+ union {
+ struct cache_detail_procfs procfs;
+ struct cache_detail_pipefs pipefs;
+ } u;
+ struct net *net;
+};
+
+
+/* this must be embedded in any request structure that
+ * identifies an object that will want a callback on
+ * a cache fill
+ */
+struct cache_req {
+ struct cache_deferred_req *(*defer)(struct cache_req *req);
+ int thread_wait; /* How long (jiffies) we can block the
+ * current thread to wait for updates.
+ */
+};
+/* this must be embedded in a deferred_request that is being
+ * delayed awaiting cache-fill
+ */
+struct cache_deferred_req {
+ struct hlist_node hash; /* on hash chain */
+ struct list_head recent; /* on fifo */
+ struct cache_head *item; /* cache item we wait on */
+ void *owner; /* we might need to discard all defered requests
+ * owned by someone */
+ void (*revisit)(struct cache_deferred_req *req,
+ int too_many);
+};
+
+/*
+ * timestamps kept in the cache are expressed in seconds
+ * since boot. This is the best for measuring differences in
+ * real time.
+ */
+static inline time_t seconds_since_boot(void)
+{
+ struct timespec boot;
+ getboottime(&boot);
+ return get_seconds() - boot.tv_sec;
+}
+
+static inline time_t convert_to_wallclock(time_t sinceboot)
+{
+ struct timespec boot;
+ getboottime(&boot);
+ return boot.tv_sec + sinceboot;
+}
+
+extern const struct file_operations cache_file_operations_pipefs;
+extern const struct file_operations content_file_operations_pipefs;
+extern const struct file_operations cache_flush_operations_pipefs;
+
+extern struct cache_head *
+sunrpc_cache_lookup(struct cache_detail *detail,
+ struct cache_head *key, int hash);
+extern struct cache_head *
+sunrpc_cache_update(struct cache_detail *detail,
+ struct cache_head *new, struct cache_head *old, int hash);
+
+extern int
+sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h);
+
+
+extern void cache_clean_deferred(void *owner);
+
+static inline struct cache_head *cache_get(struct cache_head *h)
+{
+ kref_get(&h->ref);
+ return h;
+}
+
+
+static inline void cache_put(struct cache_head *h, struct cache_detail *cd)
+{
+ if (atomic_read(&h->ref.refcount) <= 2 &&
+ h->expiry_time < cd->nextcheck)
+ cd->nextcheck = h->expiry_time;
+ kref_put(&h->ref, cd->cache_put);
+}
+
+static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h)
+{
+ return (h->expiry_time < seconds_since_boot()) ||
+ (detail->flush_time > h->last_refresh);
+}
+
+extern int cache_check(struct cache_detail *detail,
+ struct cache_head *h, struct cache_req *rqstp);
+extern void cache_flush(void);
+extern void cache_purge(struct cache_detail *detail);
+#define NEVER (0x7FFFFFFF)
+extern void __init cache_initialize(void);
+extern int cache_register_net(struct cache_detail *cd, struct net *net);
+extern void cache_unregister_net(struct cache_detail *cd, struct net *net);
+
+extern struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net);
+extern void cache_destroy_net(struct cache_detail *cd, struct net *net);
+
+extern void sunrpc_init_cache_detail(struct cache_detail *cd);
+extern void sunrpc_destroy_cache_detail(struct cache_detail *cd);
+extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *,
+ umode_t, struct cache_detail *);
+extern void sunrpc_cache_unregister_pipefs(struct cache_detail *);
+
+extern void qword_add(char **bpp, int *lp, char *str);
+extern void qword_addhex(char **bpp, int *lp, char *buf, int blen);
+extern int qword_get(char **bpp, char *dest, int bufsize);
+
+static inline int get_int(char **bpp, int *anint)
+{
+ char buf[50];
+ char *ep;
+ int rv;
+ int len = qword_get(bpp, buf, sizeof(buf));
+
+ if (len < 0)
+ return -EINVAL;
+ if (len == 0)
+ return -ENOENT;
+
+ rv = simple_strtol(buf, &ep, 0);
+ if (*ep)
+ return -EINVAL;
+
+ *anint = rv;
+ return 0;
+}
+
+static inline int get_uint(char **bpp, unsigned int *anint)
+{
+ char buf[50];
+ int len = qword_get(bpp, buf, sizeof(buf));
+
+ if (len < 0)
+ return -EINVAL;
+ if (len == 0)
+ return -ENOENT;
+
+ if (kstrtouint(buf, 0, anint))
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline int get_time(char **bpp, time_t *time)
+{
+ char buf[50];
+ long long ll;
+ int len = qword_get(bpp, buf, sizeof(buf));
+
+ if (len < 0)
+ return -EINVAL;
+ if (len == 0)
+ return -ENOENT;
+
+ if (kstrtoll(buf, 0, &ll))
+ return -EINVAL;
+
+ *time = (time_t)ll;
+ return 0;
+}
+
+static inline time_t get_expiry(char **bpp)
+{
+ time_t rv;
+ struct timespec boot;
+
+ if (get_time(bpp, &rv))
+ return 0;
+ if (rv < 0)
+ return 0;
+ getboottime(&boot);
+ return rv - boot.tv_sec;
+}
+
+#endif /* _LINUX_SUNRPC_CACHE_H_ */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
new file mode 100644
index 000000000..598ba80ec
--- /dev/null
+++ b/include/linux/sunrpc/clnt.h
@@ -0,0 +1,185 @@
+/*
+ * linux/include/linux/sunrpc/clnt.h
+ *
+ * Declarations for the high-level RPC client interface
+ *
+ * Copyright (C) 1995, 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#ifndef _LINUX_SUNRPC_CLNT_H
+#define _LINUX_SUNRPC_CLNT_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+
+#include <linux/sunrpc/msg_prot.h>
+#include <linux/sunrpc/sched.h>
+#include <linux/sunrpc/xprt.h>
+#include <linux/sunrpc/auth.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/timer.h>
+#include <linux/sunrpc/rpc_pipe_fs.h>
+#include <asm/signal.h>
+#include <linux/path.h>
+#include <net/ipv6.h>
+
+struct rpc_inode;
+
+/*
+ * The high-level client handle
+ */
+struct rpc_clnt {
+ atomic_t cl_count; /* Number of references */
+ unsigned int cl_clid; /* client id */
+ struct list_head cl_clients; /* Global list of clients */
+ struct list_head cl_tasks; /* List of tasks */
+ spinlock_t cl_lock; /* spinlock */
+ struct rpc_xprt __rcu * cl_xprt; /* transport */
+ struct rpc_procinfo * cl_procinfo; /* procedure info */
+ u32 cl_prog, /* RPC program number */
+ cl_vers, /* RPC version number */
+ cl_maxproc; /* max procedure number */
+
+ struct rpc_auth * cl_auth; /* authenticator */
+ struct rpc_stat * cl_stats; /* per-program statistics */
+ struct rpc_iostats * cl_metrics; /* per-client statistics */
+
+ unsigned int cl_softrtry : 1,/* soft timeouts */
+ cl_discrtry : 1,/* disconnect before retry */
+ cl_noretranstimeo: 1,/* No retransmit timeouts */
+ cl_autobind : 1,/* use getport() */
+ cl_chatty : 1;/* be verbose */
+
+ struct rpc_rtt * cl_rtt; /* RTO estimator data */
+ const struct rpc_timeout *cl_timeout; /* Timeout strategy */
+
+ int cl_nodelen; /* nodename length */
+ char cl_nodename[UNX_MAXNODENAME+1];
+ struct rpc_pipe_dir_head cl_pipedir_objects;
+ struct rpc_clnt * cl_parent; /* Points to parent of clones */
+ struct rpc_rtt cl_rtt_default;
+ struct rpc_timeout cl_timeout_default;
+ const struct rpc_program *cl_program;
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+ struct dentry *cl_debugfs; /* debugfs directory */
+#endif
+};
+
+/*
+ * General RPC program info
+ */
+#define RPC_MAXVERSION 4
+struct rpc_program {
+ const char * name; /* protocol name */
+ u32 number; /* program number */
+ unsigned int nrvers; /* number of versions */
+ const struct rpc_version ** version; /* version array */
+ struct rpc_stat * stats; /* statistics */
+ const char * pipe_dir_name; /* path to rpc_pipefs dir */
+};
+
+struct rpc_version {
+ u32 number; /* version number */
+ unsigned int nrprocs; /* number of procs */
+ struct rpc_procinfo * procs; /* procedure array */
+};
+
+/*
+ * Procedure information
+ */
+struct rpc_procinfo {
+ u32 p_proc; /* RPC procedure number */
+ kxdreproc_t p_encode; /* XDR encode function */
+ kxdrdproc_t p_decode; /* XDR decode function */
+ unsigned int p_arglen; /* argument hdr length (u32) */
+ unsigned int p_replen; /* reply hdr length (u32) */
+ unsigned int p_count; /* call count */
+ unsigned int p_timer; /* Which RTT timer to use */
+ u32 p_statidx; /* Which procedure to account */
+ const char * p_name; /* name of procedure */
+};
+
+#ifdef __KERNEL__
+
+struct rpc_create_args {
+ struct net *net;
+ int protocol;
+ struct sockaddr *address;
+ size_t addrsize;
+ struct sockaddr *saddress;
+ const struct rpc_timeout *timeout;
+ const char *servername;
+ const char *nodename;
+ const struct rpc_program *program;
+ u32 prognumber; /* overrides program->number */
+ u32 version;
+ rpc_authflavor_t authflavor;
+ unsigned long flags;
+ char *client_name;
+ struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
+};
+
+/* Values for "flags" field */
+#define RPC_CLNT_CREATE_HARDRTRY (1UL << 0)
+#define RPC_CLNT_CREATE_AUTOBIND (1UL << 2)
+#define RPC_CLNT_CREATE_NONPRIVPORT (1UL << 3)
+#define RPC_CLNT_CREATE_NOPING (1UL << 4)
+#define RPC_CLNT_CREATE_DISCRTRY (1UL << 5)
+#define RPC_CLNT_CREATE_QUIET (1UL << 6)
+#define RPC_CLNT_CREATE_INFINITE_SLOTS (1UL << 7)
+#define RPC_CLNT_CREATE_NO_IDLE_TIMEOUT (1UL << 8)
+#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
+
+struct rpc_clnt *rpc_create(struct rpc_create_args *args);
+struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
+ struct rpc_xprt *xprt);
+struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
+ const struct rpc_program *, u32);
+void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt);
+struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
+struct rpc_clnt *rpc_clone_client_set_auth(struct rpc_clnt *,
+ rpc_authflavor_t);
+int rpc_switch_client_transport(struct rpc_clnt *,
+ struct xprt_create *,
+ const struct rpc_timeout *);
+
+void rpc_shutdown_client(struct rpc_clnt *);
+void rpc_release_client(struct rpc_clnt *);
+void rpc_task_release_client(struct rpc_task *);
+
+int rpcb_create_local(struct net *);
+void rpcb_put_local(struct net *);
+int rpcb_register(struct net *, u32, u32, int, unsigned short);
+int rpcb_v4_register(struct net *net, const u32 program,
+ const u32 version,
+ const struct sockaddr *address,
+ const char *netid);
+void rpcb_getport_async(struct rpc_task *);
+
+void rpc_call_start(struct rpc_task *);
+int rpc_call_async(struct rpc_clnt *clnt,
+ const struct rpc_message *msg, int flags,
+ const struct rpc_call_ops *tk_ops,
+ void *calldata);
+int rpc_call_sync(struct rpc_clnt *clnt,
+ const struct rpc_message *msg, int flags);
+struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
+ int flags);
+int rpc_restart_call_prepare(struct rpc_task *);
+int rpc_restart_call(struct rpc_task *);
+void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
+int rpc_protocol(struct rpc_clnt *);
+struct net * rpc_net_ns(struct rpc_clnt *);
+size_t rpc_max_payload(struct rpc_clnt *);
+unsigned long rpc_get_timeout(struct rpc_clnt *clnt);
+void rpc_force_rebind(struct rpc_clnt *);
+size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
+const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
+int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t);
+
+const char *rpc_proc_name(const struct rpc_task *task);
+#endif /* __KERNEL__ */
+#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
new file mode 100644
index 000000000..59a7889e1
--- /dev/null
+++ b/include/linux/sunrpc/debug.h
@@ -0,0 +1,107 @@
+/*
+ * linux/include/linux/sunrpc/debug.h
+ *
+ * Debugging support for sunrpc module
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+#ifndef _LINUX_SUNRPC_DEBUG_H_
+#define _LINUX_SUNRPC_DEBUG_H_
+
+#include <uapi/linux/sunrpc/debug.h>
+
+/*
+ * Debugging macros etc
+ */
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+extern unsigned int rpc_debug;
+extern unsigned int nfs_debug;
+extern unsigned int nfsd_debug;
+extern unsigned int nlm_debug;
+#endif
+
+#define dprintk(args...) dfprintk(FACILITY, ## args)
+#define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args)
+
+#undef ifdebug
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+# define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac))
+
+# define dfprintk(fac, args...) \
+ do { \
+ ifdebug(fac) \
+ printk(KERN_DEFAULT args); \
+ } while (0)
+
+# define dfprintk_rcu(fac, args...) \
+ do { \
+ ifdebug(fac) { \
+ rcu_read_lock(); \
+ printk(KERN_DEFAULT args); \
+ rcu_read_unlock(); \
+ } \
+ } while (0)
+
+# define RPC_IFDEBUG(x) x
+#else
+# define ifdebug(fac) if (0)
+# define dfprintk(fac, args...) do {} while (0)
+# define dfprintk_rcu(fac, args...) do {} while (0)
+# define RPC_IFDEBUG(x)
+#endif
+
+/*
+ * Sysctl interface for RPC debugging
+ */
+
+struct rpc_clnt;
+struct rpc_xprt;
+
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+void rpc_register_sysctl(void);
+void rpc_unregister_sysctl(void);
+void sunrpc_debugfs_init(void);
+void sunrpc_debugfs_exit(void);
+void rpc_clnt_debugfs_register(struct rpc_clnt *);
+void rpc_clnt_debugfs_unregister(struct rpc_clnt *);
+void rpc_xprt_debugfs_register(struct rpc_xprt *);
+void rpc_xprt_debugfs_unregister(struct rpc_xprt *);
+#else
+static inline void
+sunrpc_debugfs_init(void)
+{
+ return;
+}
+
+static inline void
+sunrpc_debugfs_exit(void)
+{
+ return;
+}
+
+static inline void
+rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
+{
+ return;
+}
+
+static inline void
+rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt)
+{
+ return;
+}
+
+static inline void
+rpc_xprt_debugfs_register(struct rpc_xprt *xprt)
+{
+ return;
+}
+
+static inline void
+rpc_xprt_debugfs_unregister(struct rpc_xprt *xprt)
+{
+ return;
+}
+#endif
+
+#endif /* _LINUX_SUNRPC_DEBUG_H_ */
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
new file mode 100644
index 000000000..1f911ccb2
--- /dev/null
+++ b/include/linux/sunrpc/gss_api.h
@@ -0,0 +1,162 @@
+/*
+ * linux/include/linux/sunrpc/gss_api.h
+ *
+ * Somewhat simplified version of the gss api.
+ *
+ * Dug Song <dugsong@monkey.org>
+ * Andy Adamson <andros@umich.edu>
+ * Bruce Fields <bfields@umich.edu>
+ * Copyright (c) 2000 The Regents of the University of Michigan
+ */
+
+#ifndef _LINUX_SUNRPC_GSS_API_H
+#define _LINUX_SUNRPC_GSS_API_H
+
+#ifdef __KERNEL__
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/msg_prot.h>
+#include <linux/uio.h>
+
+/* The mechanism-independent gss-api context: */
+struct gss_ctx {
+ struct gss_api_mech *mech_type;
+ void *internal_ctx_id;
+};
+
+#define GSS_C_NO_BUFFER ((struct xdr_netobj) 0)
+#define GSS_C_NO_CONTEXT ((struct gss_ctx *) 0)
+#define GSS_C_QOP_DEFAULT (0)
+
+/*XXX arbitrary length - is this set somewhere? */
+#define GSS_OID_MAX_LEN 32
+struct rpcsec_gss_oid {
+ unsigned int len;
+ u8 data[GSS_OID_MAX_LEN];
+};
+
+/* From RFC 3530 */
+struct rpcsec_gss_info {
+ struct rpcsec_gss_oid oid;
+ u32 qop;
+ u32 service;
+};
+
+/* gss-api prototypes; note that these are somewhat simplified versions of
+ * the prototypes specified in RFC 2744. */
+int gss_import_sec_context(
+ const void* input_token,
+ size_t bufsize,
+ struct gss_api_mech *mech,
+ struct gss_ctx **ctx_id,
+ time_t *endtime,
+ gfp_t gfp_mask);
+u32 gss_get_mic(
+ struct gss_ctx *ctx_id,
+ struct xdr_buf *message,
+ struct xdr_netobj *mic_token);
+u32 gss_verify_mic(
+ struct gss_ctx *ctx_id,
+ struct xdr_buf *message,
+ struct xdr_netobj *mic_token);
+u32 gss_wrap(
+ struct gss_ctx *ctx_id,
+ int offset,
+ struct xdr_buf *outbuf,
+ struct page **inpages);
+u32 gss_unwrap(
+ struct gss_ctx *ctx_id,
+ int offset,
+ struct xdr_buf *inbuf);
+u32 gss_delete_sec_context(
+ struct gss_ctx **ctx_id);
+
+rpc_authflavor_t gss_svc_to_pseudoflavor(struct gss_api_mech *, u32 qop,
+ u32 service);
+u32 gss_pseudoflavor_to_service(struct gss_api_mech *, u32 pseudoflavor);
+char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service);
+
+struct pf_desc {
+ u32 pseudoflavor;
+ u32 qop;
+ u32 service;
+ char *name;
+ char *auth_domain_name;
+};
+
+/* Different mechanisms (e.g., krb5 or spkm3) may implement gss-api, and
+ * mechanisms may be dynamically registered or unregistered by modules. */
+
+/* Each mechanism is described by the following struct: */
+struct gss_api_mech {
+ struct list_head gm_list;
+ struct module *gm_owner;
+ struct rpcsec_gss_oid gm_oid;
+ char *gm_name;
+ const struct gss_api_ops *gm_ops;
+ /* pseudoflavors supported by this mechanism: */
+ int gm_pf_num;
+ struct pf_desc * gm_pfs;
+ /* Should the following be a callback operation instead? */
+ const char *gm_upcall_enctypes;
+};
+
+/* and must provide the following operations: */
+struct gss_api_ops {
+ int (*gss_import_sec_context)(
+ const void *input_token,
+ size_t bufsize,
+ struct gss_ctx *ctx_id,
+ time_t *endtime,
+ gfp_t gfp_mask);
+ u32 (*gss_get_mic)(
+ struct gss_ctx *ctx_id,
+ struct xdr_buf *message,
+ struct xdr_netobj *mic_token);
+ u32 (*gss_verify_mic)(
+ struct gss_ctx *ctx_id,
+ struct xdr_buf *message,
+ struct xdr_netobj *mic_token);
+ u32 (*gss_wrap)(
+ struct gss_ctx *ctx_id,
+ int offset,
+ struct xdr_buf *outbuf,
+ struct page **inpages);
+ u32 (*gss_unwrap)(
+ struct gss_ctx *ctx_id,
+ int offset,
+ struct xdr_buf *buf);
+ void (*gss_delete_sec_context)(
+ void *internal_ctx_id);
+};
+
+int gss_mech_register(struct gss_api_mech *);
+void gss_mech_unregister(struct gss_api_mech *);
+
+/* returns a mechanism descriptor given an OID, and increments the mechanism's
+ * reference count. */
+struct gss_api_mech * gss_mech_get_by_OID(struct rpcsec_gss_oid *);
+
+/* Given a GSS security tuple, look up a pseudoflavor */
+rpc_authflavor_t gss_mech_info2flavor(struct rpcsec_gss_info *);
+
+/* Given a pseudoflavor, look up a GSS security tuple */
+int gss_mech_flavor2info(rpc_authflavor_t, struct rpcsec_gss_info *);
+
+/* Returns a reference to a mechanism, given a name like "krb5" etc. */
+struct gss_api_mech *gss_mech_get_by_name(const char *);
+
+/* Similar, but get by pseudoflavor. */
+struct gss_api_mech *gss_mech_get_by_pseudoflavor(u32);
+
+/* Fill in an array with a list of supported pseudoflavors */
+int gss_mech_list_pseudoflavors(rpc_authflavor_t *, int);
+
+struct gss_api_mech * gss_mech_get(struct gss_api_mech *);
+
+/* For every successful gss_mech_get or gss_mech_get_by_* call there must be a
+ * corresponding call to gss_mech_put. */
+void gss_mech_put(struct gss_api_mech *);
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_SUNRPC_GSS_API_H */
+
diff --git a/include/linux/sunrpc/gss_asn1.h b/include/linux/sunrpc/gss_asn1.h
new file mode 100644
index 000000000..3ccecd0ad
--- /dev/null
+++ b/include/linux/sunrpc/gss_asn1.h
@@ -0,0 +1,81 @@
+/*
+ * linux/include/linux/sunrpc/gss_asn1.h
+ *
+ * minimal asn1 for generic encoding/decoding of gss tokens
+ *
+ * Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h,
+ * lib/gssapi/krb5/gssapiP_krb5.h, and others
+ *
+ * Copyright (c) 2000 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Andy Adamson <andros@umich.edu>
+ */
+
+/*
+ * Copyright 1995 by the Massachusetts Institute of Technology.
+ * All Rights Reserved.
+ *
+ * Export of this software from the United States of America may
+ * require a specific license from the United States Government.
+ * It is the responsibility of any person or organization contemplating
+ * export to obtain such a license before exporting.
+ *
+ * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
+ * distribute this software and its documentation for any purpose and
+ * without fee is hereby granted, provided that the above copyright
+ * notice appear in all copies and that both that copyright notice and
+ * this permission notice appear in supporting documentation, and that
+ * the name of M.I.T. not be used in advertising or publicity pertaining
+ * to distribution of the software without specific, written prior
+ * permission. Furthermore if you modify this software you must label
+ * your software as modified software and not distribute it in such a
+ * fashion that it might be confused with the original M.I.T. software.
+ * M.I.T. makes no representations about the suitability of
+ * this software for any purpose. It is provided "as is" without express
+ * or implied warranty.
+ *
+ */
+
+
+#include <linux/sunrpc/gss_api.h>
+
+#define SIZEOF_INT 4
+
+/* from gssapi_err_generic.h */
+#define G_BAD_SERVICE_NAME (-2045022976L)
+#define G_BAD_STRING_UID (-2045022975L)
+#define G_NOUSER (-2045022974L)
+#define G_VALIDATE_FAILED (-2045022973L)
+#define G_BUFFER_ALLOC (-2045022972L)
+#define G_BAD_MSG_CTX (-2045022971L)
+#define G_WRONG_SIZE (-2045022970L)
+#define G_BAD_USAGE (-2045022969L)
+#define G_UNKNOWN_QOP (-2045022968L)
+#define G_NO_HOSTNAME (-2045022967L)
+#define G_BAD_HOSTNAME (-2045022966L)
+#define G_WRONG_MECH (-2045022965L)
+#define G_BAD_TOK_HEADER (-2045022964L)
+#define G_BAD_DIRECTION (-2045022963L)
+#define G_TOK_TRUNC (-2045022962L)
+#define G_REFLECT (-2045022961L)
+#define G_WRONG_TOKID (-2045022960L)
+
+#define g_OID_equal(o1,o2) \
+ (((o1)->len == (o2)->len) && \
+ (memcmp((o1)->data,(o2)->data,(int) (o1)->len) == 0))
+
+u32 g_verify_token_header(
+ struct xdr_netobj *mech,
+ int *body_size,
+ unsigned char **buf_in,
+ int toksize);
+
+int g_token_size(
+ struct xdr_netobj *mech,
+ unsigned int body_size);
+
+void g_make_token_header(
+ struct xdr_netobj *mech,
+ int body_size,
+ unsigned char **buf);
diff --git a/include/linux/sunrpc/gss_err.h b/include/linux/sunrpc/gss_err.h
new file mode 100644
index 000000000..a6807867b
--- /dev/null
+++ b/include/linux/sunrpc/gss_err.h
@@ -0,0 +1,167 @@
+/*
+ * linux/include/sunrpc/gss_err.h
+ *
+ * Adapted from MIT Kerberos 5-1.2.1 include/gssapi/gssapi.h
+ *
+ * Copyright (c) 2002 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Andy Adamson <andros@umich.edu>
+ */
+
+/*
+ * Copyright 1993 by OpenVision Technologies, Inc.
+ *
+ * Permission to use, copy, modify, distribute, and sell this software
+ * and its documentation for any purpose is hereby granted without fee,
+ * provided that the above copyright notice appears in all copies and
+ * that both that copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of OpenVision not be used
+ * in advertising or publicity pertaining to distribution of the software
+ * without specific, written prior permission. OpenVision makes no
+ * representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied warranty.
+ *
+ * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+ * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+ * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _LINUX_SUNRPC_GSS_ERR_H
+#define _LINUX_SUNRPC_GSS_ERR_H
+
+#ifdef __KERNEL__
+
+typedef unsigned int OM_uint32;
+
+/*
+ * Flag bits for context-level services.
+ */
+#define GSS_C_DELEG_FLAG 1
+#define GSS_C_MUTUAL_FLAG 2
+#define GSS_C_REPLAY_FLAG 4
+#define GSS_C_SEQUENCE_FLAG 8
+#define GSS_C_CONF_FLAG 16
+#define GSS_C_INTEG_FLAG 32
+#define GSS_C_ANON_FLAG 64
+#define GSS_C_PROT_READY_FLAG 128
+#define GSS_C_TRANS_FLAG 256
+
+/*
+ * Credential usage options
+ */
+#define GSS_C_BOTH 0
+#define GSS_C_INITIATE 1
+#define GSS_C_ACCEPT 2
+
+/*
+ * Status code types for gss_display_status
+ */
+#define GSS_C_GSS_CODE 1
+#define GSS_C_MECH_CODE 2
+
+
+/*
+ * Expiration time of 2^32-1 seconds means infinite lifetime for a
+ * credential or security context
+ */
+#define GSS_C_INDEFINITE ((OM_uint32) 0xfffffffful)
+
+
+/* Major status codes */
+
+#define GSS_S_COMPLETE 0
+
+/*
+ * Some "helper" definitions to make the status code macros obvious.
+ */
+#define GSS_C_CALLING_ERROR_OFFSET 24
+#define GSS_C_ROUTINE_ERROR_OFFSET 16
+#define GSS_C_SUPPLEMENTARY_OFFSET 0
+#define GSS_C_CALLING_ERROR_MASK ((OM_uint32) 0377ul)
+#define GSS_C_ROUTINE_ERROR_MASK ((OM_uint32) 0377ul)
+#define GSS_C_SUPPLEMENTARY_MASK ((OM_uint32) 0177777ul)
+
+/*
+ * The macros that test status codes for error conditions. Note that the
+ * GSS_ERROR() macro has changed slightly from the V1 GSSAPI so that it now
+ * evaluates its argument only once.
+ */
+#define GSS_CALLING_ERROR(x) \
+ ((x) & (GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET))
+#define GSS_ROUTINE_ERROR(x) \
+ ((x) & (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET))
+#define GSS_SUPPLEMENTARY_INFO(x) \
+ ((x) & (GSS_C_SUPPLEMENTARY_MASK << GSS_C_SUPPLEMENTARY_OFFSET))
+#define GSS_ERROR(x) \
+ ((x) & ((GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET) | \
+ (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET)))
+
+/*
+ * Now the actual status code definitions
+ */
+
+/*
+ * Calling errors:
+ */
+#define GSS_S_CALL_INACCESSIBLE_READ \
+ (((OM_uint32) 1ul) << GSS_C_CALLING_ERROR_OFFSET)
+#define GSS_S_CALL_INACCESSIBLE_WRITE \
+ (((OM_uint32) 2ul) << GSS_C_CALLING_ERROR_OFFSET)
+#define GSS_S_CALL_BAD_STRUCTURE \
+ (((OM_uint32) 3ul) << GSS_C_CALLING_ERROR_OFFSET)
+
+/*
+ * Routine errors:
+ */
+#define GSS_S_BAD_MECH (((OM_uint32) 1ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_BAD_NAME (((OM_uint32) 2ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_BAD_NAMETYPE (((OM_uint32) 3ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_BAD_BINDINGS (((OM_uint32) 4ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_BAD_STATUS (((OM_uint32) 5ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_BAD_SIG (((OM_uint32) 6ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_NO_CRED (((OM_uint32) 7ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_NO_CONTEXT (((OM_uint32) 8ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_DEFECTIVE_TOKEN (((OM_uint32) 9ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_DEFECTIVE_CREDENTIAL \
+ (((OM_uint32) 10ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_CREDENTIALS_EXPIRED \
+ (((OM_uint32) 11ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_CONTEXT_EXPIRED \
+ (((OM_uint32) 12ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_FAILURE (((OM_uint32) 13ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_BAD_QOP (((OM_uint32) 14ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_UNAUTHORIZED (((OM_uint32) 15ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_UNAVAILABLE (((OM_uint32) 16ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_DUPLICATE_ELEMENT \
+ (((OM_uint32) 17ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+#define GSS_S_NAME_NOT_MN \
+ (((OM_uint32) 18ul) << GSS_C_ROUTINE_ERROR_OFFSET)
+
+/*
+ * Supplementary info bits:
+ */
+#define GSS_S_CONTINUE_NEEDED (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 0))
+#define GSS_S_DUPLICATE_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 1))
+#define GSS_S_OLD_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 2))
+#define GSS_S_UNSEQ_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 3))
+#define GSS_S_GAP_TOKEN (1 << (GSS_C_SUPPLEMENTARY_OFFSET + 4))
+
+/* XXXX these are not part of the GSSAPI C bindings! (but should be) */
+
+#define GSS_CALLING_ERROR_FIELD(x) \
+ (((x) >> GSS_C_CALLING_ERROR_OFFSET) & GSS_C_CALLING_ERROR_MASK)
+#define GSS_ROUTINE_ERROR_FIELD(x) \
+ (((x) >> GSS_C_ROUTINE_ERROR_OFFSET) & GSS_C_ROUTINE_ERROR_MASK)
+#define GSS_SUPPLEMENTARY_INFO_FIELD(x) \
+ (((x) >> GSS_C_SUPPLEMENTARY_OFFSET) & GSS_C_SUPPLEMENTARY_MASK)
+
+/* XXXX This is a necessary evil until the spec is fixed */
+#define GSS_S_CRED_UNAVAIL GSS_S_FAILURE
+
+#endif /* __KERNEL__ */
+#endif /* __LINUX_SUNRPC_GSS_ERR_H */
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
new file mode 100644
index 000000000..df02a4188
--- /dev/null
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -0,0 +1,331 @@
+/*
+ * linux/include/linux/sunrpc/gss_krb5_types.h
+ *
+ * Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h,
+ * lib/gssapi/krb5/gssapiP_krb5.h, and others
+ *
+ * Copyright (c) 2000-2008 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Andy Adamson <andros@umich.edu>
+ * Bruce Fields <bfields@umich.edu>
+ */
+
+/*
+ * Copyright 1995 by the Massachusetts Institute of Technology.
+ * All Rights Reserved.
+ *
+ * Export of this software from the United States of America may
+ * require a specific license from the United States Government.
+ * It is the responsibility of any person or organization contemplating
+ * export to obtain such a license before exporting.
+ *
+ * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
+ * distribute this software and its documentation for any purpose and
+ * without fee is hereby granted, provided that the above copyright
+ * notice appear in all copies and that both that copyright notice and
+ * this permission notice appear in supporting documentation, and that
+ * the name of M.I.T. not be used in advertising or publicity pertaining
+ * to distribution of the software without specific, written prior
+ * permission. Furthermore if you modify this software you must label
+ * your software as modified software and not distribute it in such a
+ * fashion that it might be confused with the original M.I.T. software.
+ * M.I.T. makes no representations about the suitability of
+ * this software for any purpose. It is provided "as is" without express
+ * or implied warranty.
+ *
+ */
+
+#include <linux/crypto.h>
+#include <linux/sunrpc/auth_gss.h>
+#include <linux/sunrpc/gss_err.h>
+#include <linux/sunrpc/gss_asn1.h>
+
+/* Length of constant used in key derivation */
+#define GSS_KRB5_K5CLENGTH (5)
+
+/* Maximum key length (in bytes) for the supported crypto algorithms*/
+#define GSS_KRB5_MAX_KEYLEN (32)
+
+/* Maximum checksum function output for the supported crypto algorithms */
+#define GSS_KRB5_MAX_CKSUM_LEN (20)
+
+/* Maximum blocksize for the supported crypto algorithms */
+#define GSS_KRB5_MAX_BLOCKSIZE (16)
+
+struct krb5_ctx;
+
+struct gss_krb5_enctype {
+ const u32 etype; /* encryption (key) type */
+ const u32 ctype; /* checksum type */
+ const char *name; /* "friendly" name */
+ const char *encrypt_name; /* crypto encrypt name */
+ const char *cksum_name; /* crypto checksum name */
+ const u16 signalg; /* signing algorithm */
+ const u16 sealalg; /* sealing algorithm */
+ const u32 blocksize; /* encryption blocksize */
+ const u32 conflen; /* confounder length
+ (normally the same as
+ the blocksize) */
+ const u32 cksumlength; /* checksum length */
+ const u32 keyed_cksum; /* is it a keyed cksum? */
+ const u32 keybytes; /* raw key len, in bytes */
+ const u32 keylength; /* final key len, in bytes */
+ u32 (*encrypt) (struct crypto_blkcipher *tfm,
+ void *iv, void *in, void *out,
+ int length); /* encryption function */
+ u32 (*decrypt) (struct crypto_blkcipher *tfm,
+ void *iv, void *in, void *out,
+ int length); /* decryption function */
+ u32 (*mk_key) (const struct gss_krb5_enctype *gk5e,
+ struct xdr_netobj *in,
+ struct xdr_netobj *out); /* complete key generation */
+ u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset,
+ struct xdr_buf *buf,
+ struct page **pages); /* v2 encryption function */
+ u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset,
+ struct xdr_buf *buf, u32 *headskip,
+ u32 *tailskip); /* v2 decryption function */
+};
+
+/* krb5_ctx flags definitions */
+#define KRB5_CTX_FLAG_INITIATOR 0x00000001
+#define KRB5_CTX_FLAG_CFX 0x00000002
+#define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004
+
+struct krb5_ctx {
+ int initiate; /* 1 = initiating, 0 = accepting */
+ u32 enctype;
+ u32 flags;
+ const struct gss_krb5_enctype *gk5e; /* enctype-specific info */
+ struct crypto_blkcipher *enc;
+ struct crypto_blkcipher *seq;
+ struct crypto_blkcipher *acceptor_enc;
+ struct crypto_blkcipher *initiator_enc;
+ struct crypto_blkcipher *acceptor_enc_aux;
+ struct crypto_blkcipher *initiator_enc_aux;
+ u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
+ u8 cksum[GSS_KRB5_MAX_KEYLEN];
+ s32 endtime;
+ u32 seq_send;
+ u64 seq_send64;
+ struct xdr_netobj mech_used;
+ u8 initiator_sign[GSS_KRB5_MAX_KEYLEN];
+ u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN];
+ u8 initiator_seal[GSS_KRB5_MAX_KEYLEN];
+ u8 acceptor_seal[GSS_KRB5_MAX_KEYLEN];
+ u8 initiator_integ[GSS_KRB5_MAX_KEYLEN];
+ u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN];
+};
+
+extern spinlock_t krb5_seq_lock;
+
+/* The length of the Kerberos GSS token header */
+#define GSS_KRB5_TOK_HDR_LEN (16)
+
+#define KG_TOK_MIC_MSG 0x0101
+#define KG_TOK_WRAP_MSG 0x0201
+
+#define KG2_TOK_INITIAL 0x0101
+#define KG2_TOK_RESPONSE 0x0202
+#define KG2_TOK_MIC 0x0404
+#define KG2_TOK_WRAP 0x0504
+
+#define KG2_TOKEN_FLAG_SENTBYACCEPTOR 0x01
+#define KG2_TOKEN_FLAG_SEALED 0x02
+#define KG2_TOKEN_FLAG_ACCEPTORSUBKEY 0x04
+
+#define KG2_RESP_FLAG_ERROR 0x0001
+#define KG2_RESP_FLAG_DELEG_OK 0x0002
+
+enum sgn_alg {
+ SGN_ALG_DES_MAC_MD5 = 0x0000,
+ SGN_ALG_MD2_5 = 0x0001,
+ SGN_ALG_DES_MAC = 0x0002,
+ SGN_ALG_3 = 0x0003, /* not published */
+ SGN_ALG_HMAC_MD5 = 0x0011, /* microsoft w2k; no support */
+ SGN_ALG_HMAC_SHA1_DES3_KD = 0x0004
+};
+enum seal_alg {
+ SEAL_ALG_NONE = 0xffff,
+ SEAL_ALG_DES = 0x0000,
+ SEAL_ALG_1 = 0x0001, /* not published */
+ SEAL_ALG_MICROSOFT_RC4 = 0x0010,/* microsoft w2k; no support */
+ SEAL_ALG_DES3KD = 0x0002
+};
+
+#define CKSUMTYPE_CRC32 0x0001
+#define CKSUMTYPE_RSA_MD4 0x0002
+#define CKSUMTYPE_RSA_MD4_DES 0x0003
+#define CKSUMTYPE_DESCBC 0x0004
+#define CKSUMTYPE_RSA_MD5 0x0007
+#define CKSUMTYPE_RSA_MD5_DES 0x0008
+#define CKSUMTYPE_NIST_SHA 0x0009
+#define CKSUMTYPE_HMAC_SHA1_DES3 0x000c
+#define CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f
+#define CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010
+#define CKSUMTYPE_HMAC_MD5_ARCFOUR -138 /* Microsoft md5 hmac cksumtype */
+
+/* from gssapi_err_krb5.h */
+#define KG_CCACHE_NOMATCH (39756032L)
+#define KG_KEYTAB_NOMATCH (39756033L)
+#define KG_TGT_MISSING (39756034L)
+#define KG_NO_SUBKEY (39756035L)
+#define KG_CONTEXT_ESTABLISHED (39756036L)
+#define KG_BAD_SIGN_TYPE (39756037L)
+#define KG_BAD_LENGTH (39756038L)
+#define KG_CTX_INCOMPLETE (39756039L)
+#define KG_CONTEXT (39756040L)
+#define KG_CRED (39756041L)
+#define KG_ENC_DESC (39756042L)
+#define KG_BAD_SEQ (39756043L)
+#define KG_EMPTY_CCACHE (39756044L)
+#define KG_NO_CTYPES (39756045L)
+
+/* per Kerberos v5 protocol spec crypto types from the wire.
+ * these get mapped to linux kernel crypto routines.
+ */
+#define ENCTYPE_NULL 0x0000
+#define ENCTYPE_DES_CBC_CRC 0x0001 /* DES cbc mode with CRC-32 */
+#define ENCTYPE_DES_CBC_MD4 0x0002 /* DES cbc mode with RSA-MD4 */
+#define ENCTYPE_DES_CBC_MD5 0x0003 /* DES cbc mode with RSA-MD5 */
+#define ENCTYPE_DES_CBC_RAW 0x0004 /* DES cbc mode raw */
+/* XXX deprecated? */
+#define ENCTYPE_DES3_CBC_SHA 0x0005 /* DES-3 cbc mode with NIST-SHA */
+#define ENCTYPE_DES3_CBC_RAW 0x0006 /* DES-3 cbc mode raw */
+#define ENCTYPE_DES_HMAC_SHA1 0x0008
+#define ENCTYPE_DES3_CBC_SHA1 0x0010
+#define ENCTYPE_AES128_CTS_HMAC_SHA1_96 0x0011
+#define ENCTYPE_AES256_CTS_HMAC_SHA1_96 0x0012
+#define ENCTYPE_ARCFOUR_HMAC 0x0017
+#define ENCTYPE_ARCFOUR_HMAC_EXP 0x0018
+#define ENCTYPE_UNKNOWN 0x01ff
+
+/*
+ * Constants used for key derivation
+ */
+/* for 3DES */
+#define KG_USAGE_SEAL (22)
+#define KG_USAGE_SIGN (23)
+#define KG_USAGE_SEQ (24)
+
+/* from rfc3961 */
+#define KEY_USAGE_SEED_CHECKSUM (0x99)
+#define KEY_USAGE_SEED_ENCRYPTION (0xAA)
+#define KEY_USAGE_SEED_INTEGRITY (0x55)
+
+/* from rfc4121 */
+#define KG_USAGE_ACCEPTOR_SEAL (22)
+#define KG_USAGE_ACCEPTOR_SIGN (23)
+#define KG_USAGE_INITIATOR_SEAL (24)
+#define KG_USAGE_INITIATOR_SIGN (25)
+
+/*
+ * This compile-time check verifies that we will not exceed the
+ * slack space allotted by the client and server auth_gss code
+ * before they call gss_wrap().
+ */
+#define GSS_KRB5_MAX_SLACK_NEEDED \
+ (GSS_KRB5_TOK_HDR_LEN /* gss token header */ \
+ + GSS_KRB5_MAX_CKSUM_LEN /* gss token checksum */ \
+ + GSS_KRB5_MAX_BLOCKSIZE /* confounder */ \
+ + GSS_KRB5_MAX_BLOCKSIZE /* possible padding */ \
+ + GSS_KRB5_TOK_HDR_LEN /* encrypted hdr in v2 token */\
+ + GSS_KRB5_MAX_CKSUM_LEN /* encryption hmac */ \
+ + 4 + 4 /* RPC verifier */ \
+ + GSS_KRB5_TOK_HDR_LEN \
+ + GSS_KRB5_MAX_CKSUM_LEN)
+
+u32
+make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
+ struct xdr_buf *body, int body_offset, u8 *cksumkey,
+ unsigned int usage, struct xdr_netobj *cksumout);
+
+u32
+make_checksum_v2(struct krb5_ctx *, char *header, int hdrlen,
+ struct xdr_buf *body, int body_offset, u8 *key,
+ unsigned int usage, struct xdr_netobj *cksum);
+
+u32 gss_get_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
+ struct xdr_netobj *);
+
+u32 gss_verify_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
+ struct xdr_netobj *);
+
+u32
+gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset,
+ struct xdr_buf *outbuf, struct page **pages);
+
+u32
+gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
+ struct xdr_buf *buf);
+
+
+u32
+krb5_encrypt(struct crypto_blkcipher *key,
+ void *iv, void *in, void *out, int length);
+
+u32
+krb5_decrypt(struct crypto_blkcipher *key,
+ void *iv, void *in, void *out, int length);
+
+int
+gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *outbuf,
+ int offset, struct page **pages);
+
+int
+gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *inbuf,
+ int offset);
+
+s32
+krb5_make_seq_num(struct krb5_ctx *kctx,
+ struct crypto_blkcipher *key,
+ int direction,
+ u32 seqnum, unsigned char *cksum, unsigned char *buf);
+
+s32
+krb5_get_seq_num(struct krb5_ctx *kctx,
+ unsigned char *cksum,
+ unsigned char *buf, int *direction, u32 *seqnum);
+
+int
+xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen);
+
+u32
+krb5_derive_key(const struct gss_krb5_enctype *gk5e,
+ const struct xdr_netobj *inkey,
+ struct xdr_netobj *outkey,
+ const struct xdr_netobj *in_constant,
+ gfp_t gfp_mask);
+
+u32
+gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e,
+ struct xdr_netobj *randombits,
+ struct xdr_netobj *key);
+
+u32
+gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e,
+ struct xdr_netobj *randombits,
+ struct xdr_netobj *key);
+
+u32
+gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
+ struct xdr_buf *buf,
+ struct page **pages);
+
+u32
+gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
+ struct xdr_buf *buf, u32 *plainoffset,
+ u32 *plainlen);
+
+int
+krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
+ struct crypto_blkcipher *cipher,
+ unsigned char *cksum);
+
+int
+krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
+ struct crypto_blkcipher *cipher,
+ s32 seqnum);
+void
+gss_krb5_make_confounder(char *p, u32 conflen);
diff --git a/include/linux/sunrpc/gss_krb5_enctypes.h b/include/linux/sunrpc/gss_krb5_enctypes.h
new file mode 100644
index 000000000..ec6234eee
--- /dev/null
+++ b/include/linux/sunrpc/gss_krb5_enctypes.h
@@ -0,0 +1,4 @@
+/*
+ * Dumb way to share this static piece of information with nfsd
+ */
+#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2"
diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h
new file mode 100644
index 000000000..694eecb2f
--- /dev/null
+++ b/include/linux/sunrpc/metrics.h
@@ -0,0 +1,102 @@
+/*
+ * linux/include/linux/sunrpc/metrics.h
+ *
+ * Declarations for RPC client per-operation metrics
+ *
+ * Copyright (C) 2005 Chuck Lever <cel@netapp.com>
+ *
+ * RPC client per-operation statistics provide latency and retry
+ * information about each type of RPC procedure in a given RPC program.
+ * These statistics are not for detailed problem diagnosis, but simply
+ * to indicate whether the problem is local or remote.
+ *
+ * These counters are not meant to be human-readable, but are meant to be
+ * integrated into system monitoring tools such as "sar" and "iostat". As
+ * such, the counters are sampled by the tools over time, and are never
+ * zeroed after a file system is mounted. Moving averages can be computed
+ * by the tools by taking the difference between two instantaneous samples
+ * and dividing that by the time between the samples.
+ *
+ * The counters are maintained in a single array per RPC client, indexed
+ * by procedure number. There is no need to maintain separate counter
+ * arrays per-CPU because these counters are always modified behind locks.
+ */
+
+#ifndef _LINUX_SUNRPC_METRICS_H
+#define _LINUX_SUNRPC_METRICS_H
+
+#include <linux/seq_file.h>
+#include <linux/ktime.h>
+#include <linux/spinlock.h>
+
+#define RPC_IOSTATS_VERS "1.0"
+
+struct rpc_iostats {
+ spinlock_t om_lock;
+
+ /*
+ * These counters give an idea about how many request
+ * transmissions are required, on average, to complete that
+ * particular procedure. Some procedures may require more
+ * than one transmission because the server is unresponsive,
+ * the client is retransmitting too aggressively, or the
+ * requests are large and the network is congested.
+ */
+ unsigned long om_ops, /* count of operations */
+ om_ntrans, /* count of RPC transmissions */
+ om_timeouts; /* count of major timeouts */
+
+ /*
+ * These count how many bytes are sent and received for a
+ * given RPC procedure type. This indicates how much load a
+ * particular procedure is putting on the network. These
+ * counts include the RPC and ULP headers, and the request
+ * payload.
+ */
+ unsigned long long om_bytes_sent, /* count of bytes out */
+ om_bytes_recv; /* count of bytes in */
+
+ /*
+ * The length of time an RPC request waits in queue before
+ * transmission, the network + server latency of the request,
+ * and the total time the request spent from init to release
+ * are measured.
+ */
+ ktime_t om_queue, /* queued for xmit */
+ om_rtt, /* RPC RTT */
+ om_execute; /* RPC execution */
+} ____cacheline_aligned;
+
+struct rpc_task;
+struct rpc_clnt;
+
+/*
+ * EXPORTed functions for managing rpc_iostats structures
+ */
+
+#ifdef CONFIG_PROC_FS
+
+struct rpc_iostats * rpc_alloc_iostats(struct rpc_clnt *);
+void rpc_count_iostats(const struct rpc_task *,
+ struct rpc_iostats *);
+void rpc_count_iostats_metrics(const struct rpc_task *,
+ struct rpc_iostats *);
+void rpc_print_iostats(struct seq_file *, struct rpc_clnt *);
+void rpc_free_iostats(struct rpc_iostats *);
+
+#else /* CONFIG_PROC_FS */
+
+static inline struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) { return NULL; }
+static inline void rpc_count_iostats(const struct rpc_task *task,
+ struct rpc_iostats *stats) {}
+static inline void rpc_count_iostats_metrics(const struct rpc_task *task,
+ struct rpc_iostats *stats)
+{
+}
+
+static inline void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) {}
+static inline void rpc_free_iostats(struct rpc_iostats *stats) {}
+
+#endif /* CONFIG_PROC_FS */
+
+#endif /* _LINUX_SUNRPC_METRICS_H */
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
new file mode 100644
index 000000000..807371357
--- /dev/null
+++ b/include/linux/sunrpc/msg_prot.h
@@ -0,0 +1,220 @@
+/*
+ * linux/include/linux/sunrpc/msg_prot.h
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#ifndef _LINUX_SUNRPC_MSGPROT_H_
+#define _LINUX_SUNRPC_MSGPROT_H_
+
+#ifdef __KERNEL__ /* user programs should get these from the rpc header files */
+
+#define RPC_VERSION 2
+
+/* size of an XDR encoding unit in bytes, i.e. 32bit */
+#define XDR_UNIT (4)
+
+/* spec defines authentication flavor as an unsigned 32 bit integer */
+typedef u32 rpc_authflavor_t;
+
+enum rpc_auth_flavors {
+ RPC_AUTH_NULL = 0,
+ RPC_AUTH_UNIX = 1,
+ RPC_AUTH_SHORT = 2,
+ RPC_AUTH_DES = 3,
+ RPC_AUTH_KRB = 4,
+ RPC_AUTH_GSS = 6,
+ RPC_AUTH_MAXFLAVOR = 8,
+ /* pseudoflavors: */
+ RPC_AUTH_GSS_KRB5 = 390003,
+ RPC_AUTH_GSS_KRB5I = 390004,
+ RPC_AUTH_GSS_KRB5P = 390005,
+ RPC_AUTH_GSS_LKEY = 390006,
+ RPC_AUTH_GSS_LKEYI = 390007,
+ RPC_AUTH_GSS_LKEYP = 390008,
+ RPC_AUTH_GSS_SPKM = 390009,
+ RPC_AUTH_GSS_SPKMI = 390010,
+ RPC_AUTH_GSS_SPKMP = 390011,
+};
+
+/* Maximum size (in bytes) of an rpc credential or verifier */
+#define RPC_MAX_AUTH_SIZE (400)
+
+enum rpc_msg_type {
+ RPC_CALL = 0,
+ RPC_REPLY = 1
+};
+
+enum rpc_reply_stat {
+ RPC_MSG_ACCEPTED = 0,
+ RPC_MSG_DENIED = 1
+};
+
+enum rpc_accept_stat {
+ RPC_SUCCESS = 0,
+ RPC_PROG_UNAVAIL = 1,
+ RPC_PROG_MISMATCH = 2,
+ RPC_PROC_UNAVAIL = 3,
+ RPC_GARBAGE_ARGS = 4,
+ RPC_SYSTEM_ERR = 5,
+ /* internal use only */
+ RPC_DROP_REPLY = 60000,
+};
+
+enum rpc_reject_stat {
+ RPC_MISMATCH = 0,
+ RPC_AUTH_ERROR = 1
+};
+
+enum rpc_auth_stat {
+ RPC_AUTH_OK = 0,
+ RPC_AUTH_BADCRED = 1,
+ RPC_AUTH_REJECTEDCRED = 2,
+ RPC_AUTH_BADVERF = 3,
+ RPC_AUTH_REJECTEDVERF = 4,
+ RPC_AUTH_TOOWEAK = 5,
+ /* RPCSEC_GSS errors */
+ RPCSEC_GSS_CREDPROBLEM = 13,
+ RPCSEC_GSS_CTXPROBLEM = 14
+};
+
+#define RPC_MAXNETNAMELEN 256
+
+/*
+ * From RFC 1831:
+ *
+ * "A record is composed of one or more record fragments. A record
+ * fragment is a four-byte header followed by 0 to (2**31) - 1 bytes of
+ * fragment data. The bytes encode an unsigned binary number; as with
+ * XDR integers, the byte order is from highest to lowest. The number
+ * encodes two values -- a boolean which indicates whether the fragment
+ * is the last fragment of the record (bit value 1 implies the fragment
+ * is the last fragment) and a 31-bit unsigned binary value which is the
+ * length in bytes of the fragment's data. The boolean value is the
+ * highest-order bit of the header; the length is the 31 low-order bits.
+ * (Note that this record specification is NOT in XDR standard form!)"
+ *
+ * The Linux RPC client always sends its requests in a single record
+ * fragment, limiting the maximum payload size for stream transports to
+ * 2GB.
+ */
+
+typedef __be32 rpc_fraghdr;
+
+#define RPC_LAST_STREAM_FRAGMENT (1U << 31)
+#define RPC_FRAGMENT_SIZE_MASK (~RPC_LAST_STREAM_FRAGMENT)
+#define RPC_MAX_FRAGMENT_SIZE ((1U << 31) - 1)
+
+/*
+ * RPC call and reply header size as number of 32bit words (verifier
+ * size computed separately, see below)
+ */
+#define RPC_CALLHDRSIZE (6)
+#define RPC_REPHDRSIZE (4)
+
+
+/*
+ * Maximum RPC header size, including authentication,
+ * as number of 32bit words (see RFCs 1831, 1832).
+ *
+ * xid 1 xdr unit = 4 bytes
+ * mtype 1
+ * rpc_version 1
+ * program 1
+ * prog_version 1
+ * procedure 1
+ * cred {
+ * flavor 1
+ * length 1
+ * body<RPC_MAX_AUTH_SIZE> 100 xdr units = 400 bytes
+ * }
+ * verf {
+ * flavor 1
+ * length 1
+ * body<RPC_MAX_AUTH_SIZE> 100 xdr units = 400 bytes
+ * }
+ * TOTAL 210 xdr units = 840 bytes
+ */
+#define RPC_MAX_HEADER_WITH_AUTH \
+ (RPC_CALLHDRSIZE + 2*(2+RPC_MAX_AUTH_SIZE/4))
+
+#define RPC_MAX_REPHEADER_WITH_AUTH \
+ (RPC_REPHDRSIZE + (2 + RPC_MAX_AUTH_SIZE/4))
+
+/*
+ * Well-known netids. See:
+ *
+ * http://www.iana.org/assignments/rpc-netids/rpc-netids.xhtml
+ */
+#define RPCBIND_NETID_UDP "udp"
+#define RPCBIND_NETID_TCP "tcp"
+#define RPCBIND_NETID_RDMA "rdma"
+#define RPCBIND_NETID_SCTP "sctp"
+#define RPCBIND_NETID_UDP6 "udp6"
+#define RPCBIND_NETID_TCP6 "tcp6"
+#define RPCBIND_NETID_RDMA6 "rdma6"
+#define RPCBIND_NETID_SCTP6 "sctp6"
+#define RPCBIND_NETID_LOCAL "local"
+
+/*
+ * Note that RFC 1833 does not put any size restrictions on the
+ * netid string, but all currently defined netid's fit in 4 bytes.
+ */
+#define RPCBIND_MAXNETIDLEN (4u)
+
+/*
+ * Universal addresses are introduced in RFC 1833 and further spelled
+ * out in RFC 3530. RPCBIND_MAXUADDRLEN defines a maximum byte length
+ * of a universal address for use in allocating buffers and character
+ * arrays.
+ *
+ * Quoting RFC 3530, section 2.2:
+ *
+ * For TCP over IPv4 and for UDP over IPv4, the format of r_addr is the
+ * US-ASCII string:
+ *
+ * h1.h2.h3.h4.p1.p2
+ *
+ * The prefix, "h1.h2.h3.h4", is the standard textual form for
+ * representing an IPv4 address, which is always four octets long.
+ * Assuming big-endian ordering, h1, h2, h3, and h4, are respectively,
+ * the first through fourth octets each converted to ASCII-decimal.
+ * Assuming big-endian ordering, p1 and p2 are, respectively, the first
+ * and second octets each converted to ASCII-decimal. For example, if a
+ * host, in big-endian order, has an address of 0x0A010307 and there is
+ * a service listening on, in big endian order, port 0x020F (decimal
+ * 527), then the complete universal address is "10.1.3.7.2.15".
+ *
+ * ...
+ *
+ * For TCP over IPv6 and for UDP over IPv6, the format of r_addr is the
+ * US-ASCII string:
+ *
+ * x1:x2:x3:x4:x5:x6:x7:x8.p1.p2
+ *
+ * The suffix "p1.p2" is the service port, and is computed the same way
+ * as with universal addresses for TCP and UDP over IPv4. The prefix,
+ * "x1:x2:x3:x4:x5:x6:x7:x8", is the standard textual form for
+ * representing an IPv6 address as defined in Section 2.2 of [RFC2373].
+ * Additionally, the two alternative forms specified in Section 2.2 of
+ * [RFC2373] are also acceptable.
+ */
+
+#include <linux/inet.h>
+
+/* Maximum size of the port number part of a universal address */
+#define RPCBIND_MAXUADDRPLEN sizeof(".255.255")
+
+/* Maximum size of an IPv4 universal address */
+#define RPCBIND_MAXUADDR4LEN \
+ (INET_ADDRSTRLEN + RPCBIND_MAXUADDRPLEN)
+
+/* Maximum size of an IPv6 universal address */
+#define RPCBIND_MAXUADDR6LEN \
+ (INET6_ADDRSTRLEN + RPCBIND_MAXUADDRPLEN)
+
+/* Assume INET6_ADDRSTRLEN will always be larger than INET_ADDRSTRLEN... */
+#define RPCBIND_MAXUADDRLEN RPCBIND_MAXUADDR6LEN
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_SUNRPC_MSGPROT_H_ */
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
new file mode 100644
index 000000000..7f490bef9
--- /dev/null
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -0,0 +1,137 @@
+#ifndef _LINUX_SUNRPC_RPC_PIPE_FS_H
+#define _LINUX_SUNRPC_RPC_PIPE_FS_H
+
+#ifdef __KERNEL__
+
+#include <linux/workqueue.h>
+
+struct rpc_pipe_dir_head {
+ struct list_head pdh_entries;
+ struct dentry *pdh_dentry;
+};
+
+struct rpc_pipe_dir_object_ops;
+struct rpc_pipe_dir_object {
+ struct list_head pdo_head;
+ const struct rpc_pipe_dir_object_ops *pdo_ops;
+
+ void *pdo_data;
+};
+
+struct rpc_pipe_dir_object_ops {
+ int (*create)(struct dentry *dir,
+ struct rpc_pipe_dir_object *pdo);
+ void (*destroy)(struct dentry *dir,
+ struct rpc_pipe_dir_object *pdo);
+};
+
+struct rpc_pipe_msg {
+ struct list_head list;
+ void *data;
+ size_t len;
+ size_t copied;
+ int errno;
+};
+
+struct rpc_pipe_ops {
+ ssize_t (*upcall)(struct file *, struct rpc_pipe_msg *, char __user *, size_t);
+ ssize_t (*downcall)(struct file *, const char __user *, size_t);
+ void (*release_pipe)(struct inode *);
+ int (*open_pipe)(struct inode *);
+ void (*destroy_msg)(struct rpc_pipe_msg *);
+};
+
+struct rpc_pipe {
+ struct list_head pipe;
+ struct list_head in_upcall;
+ struct list_head in_downcall;
+ int pipelen;
+ int nreaders;
+ int nwriters;
+#define RPC_PIPE_WAIT_FOR_OPEN 1
+ int flags;
+ struct delayed_work queue_timeout;
+ const struct rpc_pipe_ops *ops;
+ spinlock_t lock;
+ struct dentry *dentry;
+};
+
+struct rpc_inode {
+ struct inode vfs_inode;
+ void *private;
+ struct rpc_pipe *pipe;
+ wait_queue_head_t waitq;
+};
+
+static inline struct rpc_inode *
+RPC_I(struct inode *inode)
+{
+ return container_of(inode, struct rpc_inode, vfs_inode);
+}
+
+enum {
+ SUNRPC_PIPEFS_NFS_PRIO,
+ SUNRPC_PIPEFS_RPC_PRIO,
+};
+
+extern int rpc_pipefs_notifier_register(struct notifier_block *);
+extern void rpc_pipefs_notifier_unregister(struct notifier_block *);
+
+enum {
+ RPC_PIPEFS_MOUNT,
+ RPC_PIPEFS_UMOUNT,
+};
+
+extern struct dentry *rpc_d_lookup_sb(const struct super_block *sb,
+ const unsigned char *dir_name);
+extern int rpc_pipefs_init_net(struct net *net);
+extern void rpc_pipefs_exit_net(struct net *net);
+extern struct super_block *rpc_get_sb_net(const struct net *net);
+extern void rpc_put_sb_net(const struct net *net);
+
+extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *,
+ char __user *, size_t);
+extern int rpc_queue_upcall(struct rpc_pipe *, struct rpc_pipe_msg *);
+
+struct rpc_clnt;
+extern struct dentry *rpc_create_client_dir(struct dentry *, const char *, struct rpc_clnt *);
+extern int rpc_remove_client_dir(struct rpc_clnt *);
+
+extern void rpc_init_pipe_dir_head(struct rpc_pipe_dir_head *pdh);
+extern void rpc_init_pipe_dir_object(struct rpc_pipe_dir_object *pdo,
+ const struct rpc_pipe_dir_object_ops *pdo_ops,
+ void *pdo_data);
+extern int rpc_add_pipe_dir_object(struct net *net,
+ struct rpc_pipe_dir_head *pdh,
+ struct rpc_pipe_dir_object *pdo);
+extern void rpc_remove_pipe_dir_object(struct net *net,
+ struct rpc_pipe_dir_head *pdh,
+ struct rpc_pipe_dir_object *pdo);
+extern struct rpc_pipe_dir_object *rpc_find_or_alloc_pipe_dir_object(
+ struct net *net,
+ struct rpc_pipe_dir_head *pdh,
+ int (*match)(struct rpc_pipe_dir_object *, void *),
+ struct rpc_pipe_dir_object *(*alloc)(void *),
+ void *data);
+
+struct cache_detail;
+extern struct dentry *rpc_create_cache_dir(struct dentry *,
+ const char *,
+ umode_t umode,
+ struct cache_detail *);
+extern void rpc_remove_cache_dir(struct dentry *);
+
+extern int rpc_rmdir(struct dentry *dentry);
+
+struct rpc_pipe *rpc_mkpipe_data(const struct rpc_pipe_ops *ops, int flags);
+void rpc_destroy_pipe_data(struct rpc_pipe *pipe);
+extern struct dentry *rpc_mkpipe_dentry(struct dentry *, const char *, void *,
+ struct rpc_pipe *);
+extern int rpc_unlink(struct dentry *);
+extern int register_rpc_pipefs(void);
+extern void unregister_rpc_pipefs(void);
+
+extern bool gssd_running(struct net *net);
+
+#endif
+#endif
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
new file mode 100644
index 000000000..f33c5a4d6
--- /dev/null
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the BSD-type
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * Neither the name of the Network Appliance, Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_SUNRPC_RPC_RDMA_H
+#define _LINUX_SUNRPC_RPC_RDMA_H
+
+#include <linux/types.h>
+
+#define RPCRDMA_VERSION 1
+#define rpcrdma_version cpu_to_be32(RPCRDMA_VERSION)
+
+struct rpcrdma_segment {
+ __be32 rs_handle; /* Registered memory handle */
+ __be32 rs_length; /* Length of the chunk in bytes */
+ __be64 rs_offset; /* Chunk virtual address or offset */
+};
+
+/*
+ * read chunk(s), encoded as a linked list.
+ */
+struct rpcrdma_read_chunk {
+ __be32 rc_discrim; /* 1 indicates presence */
+ __be32 rc_position; /* Position in XDR stream */
+ struct rpcrdma_segment rc_target;
+};
+
+/*
+ * write chunk, and reply chunk.
+ */
+struct rpcrdma_write_chunk {
+ struct rpcrdma_segment wc_target;
+};
+
+/*
+ * write chunk(s), encoded as a counted array.
+ */
+struct rpcrdma_write_array {
+ __be32 wc_discrim; /* 1 indicates presence */
+ __be32 wc_nchunks; /* Array count */
+ struct rpcrdma_write_chunk wc_array[0];
+};
+
+struct rpcrdma_msg {
+ __be32 rm_xid; /* Mirrors the RPC header xid */
+ __be32 rm_vers; /* Version of this protocol */
+ __be32 rm_credit; /* Buffers requested/granted */
+ __be32 rm_type; /* Type of message (enum rpcrdma_proc) */
+ union {
+
+ struct { /* no chunks */
+ __be32 rm_empty[3]; /* 3 empty chunk lists */
+ } rm_nochunks;
+
+ struct { /* no chunks and padded */
+ __be32 rm_align; /* Padding alignment */
+ __be32 rm_thresh; /* Padding threshold */
+ __be32 rm_pempty[3]; /* 3 empty chunk lists */
+ } rm_padded;
+
+ __be32 rm_chunks[0]; /* read, write and reply chunks */
+
+ } rm_body;
+};
+
+/*
+ * Smallest RPC/RDMA header: rm_xid through rm_type, then rm_nochunks
+ */
+#define RPCRDMA_HDRLEN_MIN (sizeof(__be32) * 7)
+
+enum rpcrdma_errcode {
+ ERR_VERS = 1,
+ ERR_CHUNK = 2
+};
+
+struct rpcrdma_err_vers {
+ uint32_t rdma_vers_low; /* Version range supported by peer */
+ uint32_t rdma_vers_high;
+};
+
+enum rpcrdma_proc {
+ RDMA_MSG = 0, /* An RPC call or reply msg */
+ RDMA_NOMSG = 1, /* An RPC call or reply msg - separate body */
+ RDMA_MSGP = 2, /* An RPC call or reply msg with padding */
+ RDMA_DONE = 3, /* Client signals reply completion */
+ RDMA_ERROR = 4 /* An RPC RDMA encoding error */
+};
+
+#define rdma_msg cpu_to_be32(RDMA_MSG)
+#define rdma_nomsg cpu_to_be32(RDMA_NOMSG)
+#define rdma_msgp cpu_to_be32(RDMA_MSGP)
+#define rdma_done cpu_to_be32(RDMA_DONE)
+#define rdma_error cpu_to_be32(RDMA_ERROR)
+
+#endif /* _LINUX_SUNRPC_RPC_RDMA_H */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
new file mode 100644
index 000000000..5f1e6bd4c
--- /dev/null
+++ b/include/linux/sunrpc/sched.h
@@ -0,0 +1,272 @@
+/*
+ * linux/include/linux/sunrpc/sched.h
+ *
+ * Scheduling primitives for kernel Sun RPC.
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#ifndef _LINUX_SUNRPC_SCHED_H_
+#define _LINUX_SUNRPC_SCHED_H_
+
+#include <linux/timer.h>
+#include <linux/ktime.h>
+#include <linux/sunrpc/types.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/sunrpc/xdr.h>
+
+/*
+ * This is the actual RPC procedure call info.
+ */
+struct rpc_procinfo;
+struct rpc_message {
+ struct rpc_procinfo * rpc_proc; /* Procedure information */
+ void * rpc_argp; /* Arguments */
+ void * rpc_resp; /* Result */
+ struct rpc_cred * rpc_cred; /* Credentials */
+};
+
+struct rpc_call_ops;
+struct rpc_wait_queue;
+struct rpc_wait {
+ struct list_head list; /* wait queue links */
+ struct list_head links; /* Links to related tasks */
+ struct list_head timer_list; /* Timer list */
+ unsigned long expires;
+};
+
+/*
+ * This is the RPC task struct
+ */
+struct rpc_task {
+ atomic_t tk_count; /* Reference count */
+ struct list_head tk_task; /* global list of tasks */
+ struct rpc_clnt * tk_client; /* RPC client */
+ struct rpc_rqst * tk_rqstp; /* RPC request */
+
+ /*
+ * RPC call state
+ */
+ struct rpc_message tk_msg; /* RPC call info */
+
+ /*
+ * callback to be executed after waking up
+ * action next procedure for async tasks
+ * tk_ops caller callbacks
+ */
+ void (*tk_callback)(struct rpc_task *);
+ void (*tk_action)(struct rpc_task *);
+ const struct rpc_call_ops *tk_ops;
+ void * tk_calldata;
+
+ unsigned long tk_timeout; /* timeout for rpc_sleep() */
+ unsigned long tk_runstate; /* Task run status */
+ struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
+ * be any workqueue
+ */
+ struct rpc_wait_queue *tk_waitqueue; /* RPC wait queue we're on */
+ union {
+ struct work_struct tk_work; /* Async task work queue */
+ struct rpc_wait tk_wait; /* RPC wait */
+ } u;
+
+ ktime_t tk_start; /* RPC task init timestamp */
+
+ pid_t tk_owner; /* Process id for batching tasks */
+ int tk_status; /* result of last operation */
+ unsigned short tk_flags; /* misc flags */
+ unsigned short tk_timeouts; /* maj timeouts */
+
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
+ unsigned short tk_pid; /* debugging aid */
+#endif
+ unsigned char tk_priority : 2,/* Task priority */
+ tk_garb_retry : 2,
+ tk_cred_retry : 2,
+ tk_rebind_retry : 2;
+};
+
+typedef void (*rpc_action)(struct rpc_task *);
+
+struct rpc_call_ops {
+ void (*rpc_call_prepare)(struct rpc_task *, void *);
+ void (*rpc_call_done)(struct rpc_task *, void *);
+ void (*rpc_count_stats)(struct rpc_task *, void *);
+ void (*rpc_release)(void *);
+};
+
+struct rpc_task_setup {
+ struct rpc_task *task;
+ struct rpc_clnt *rpc_client;
+ const struct rpc_message *rpc_message;
+ const struct rpc_call_ops *callback_ops;
+ void *callback_data;
+ struct workqueue_struct *workqueue;
+ unsigned short flags;
+ signed char priority;
+};
+
+/*
+ * RPC task flags
+ */
+#define RPC_TASK_ASYNC 0x0001 /* is an async task */
+#define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */
+#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
+#define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */
+#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
+#define RPC_TASK_KILLED 0x0100 /* task was killed */
+#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
+#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
+#define RPC_TASK_SENT 0x0800 /* message was sent */
+#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */
+#define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */
+#define RPC_TASK_NO_RETRANS_TIMEOUT 0x4000 /* wait forever for a reply */
+
+#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
+#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
+#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
+#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
+#define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT))
+#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN)
+#define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT)
+
+#define RPC_TASK_RUNNING 0
+#define RPC_TASK_QUEUED 1
+#define RPC_TASK_ACTIVE 2
+
+#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
+#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
+#define rpc_test_and_set_running(t) \
+ test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
+#define rpc_clear_running(t) \
+ do { \
+ smp_mb__before_atomic(); \
+ clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \
+ smp_mb__after_atomic(); \
+ } while (0)
+
+#define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
+#define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
+#define rpc_clear_queued(t) \
+ do { \
+ smp_mb__before_atomic(); \
+ clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \
+ smp_mb__after_atomic(); \
+ } while (0)
+
+#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
+
+/*
+ * Task priorities.
+ * Note: if you change these, you must also change
+ * the task initialization definitions below.
+ */
+#define RPC_PRIORITY_LOW (-1)
+#define RPC_PRIORITY_NORMAL (0)
+#define RPC_PRIORITY_HIGH (1)
+#define RPC_PRIORITY_PRIVILEGED (2)
+#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_PRIVILEGED - RPC_PRIORITY_LOW)
+
+struct rpc_timer {
+ struct timer_list timer;
+ struct list_head list;
+ unsigned long expires;
+};
+
+/*
+ * RPC synchronization objects
+ */
+struct rpc_wait_queue {
+ spinlock_t lock;
+ struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */
+ pid_t owner; /* process id of last task serviced */
+ unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
+ unsigned char priority; /* current priority */
+ unsigned char nr; /* # tasks remaining for cookie */
+ unsigned short qlen; /* total # tasks waiting in queue */
+ struct rpc_timer timer_list;
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
+ const char * name;
+#endif
+};
+
+/*
+ * This is the # requests to send consecutively
+ * from a single cookie. The aim is to improve
+ * performance of NFS operations such as read/write.
+ */
+#define RPC_BATCH_COUNT 16
+#define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0)
+
+/*
+ * Function prototypes
+ */
+struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
+struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
+struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
+ const struct rpc_call_ops *ops);
+void rpc_put_task(struct rpc_task *);
+void rpc_put_task_async(struct rpc_task *);
+void rpc_exit_task(struct rpc_task *);
+void rpc_exit(struct rpc_task *, int);
+void rpc_release_calldata(const struct rpc_call_ops *, void *);
+void rpc_killall_tasks(struct rpc_clnt *);
+void rpc_execute(struct rpc_task *);
+void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
+void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
+void rpc_destroy_wait_queue(struct rpc_wait_queue *);
+void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *,
+ rpc_action action);
+void rpc_sleep_on_priority(struct rpc_wait_queue *,
+ struct rpc_task *,
+ rpc_action action,
+ int priority);
+void rpc_wake_up_queued_task(struct rpc_wait_queue *,
+ struct rpc_task *);
+void rpc_wake_up(struct rpc_wait_queue *);
+struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
+struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *,
+ bool (*)(struct rpc_task *, void *),
+ void *);
+void rpc_wake_up_status(struct rpc_wait_queue *, int);
+void rpc_delay(struct rpc_task *, unsigned long);
+void * rpc_malloc(struct rpc_task *, size_t);
+void rpc_free(void *);
+int rpciod_up(void);
+void rpciod_down(void);
+int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *);
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+struct net;
+void rpc_show_tasks(struct net *);
+#endif
+int rpc_init_mempool(void);
+void rpc_destroy_mempool(void);
+extern struct workqueue_struct *rpciod_workqueue;
+void rpc_prepare_task(struct rpc_task *task);
+
+static inline int rpc_wait_for_completion_task(struct rpc_task *task)
+{
+ return __rpc_wait_for_completion_task(task, NULL);
+}
+
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
+static inline const char * rpc_qname(const struct rpc_wait_queue *q)
+{
+ return ((q && q->name) ? q->name : "unknown");
+}
+
+static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q,
+ const char *name)
+{
+ q->name = name;
+}
+#else
+static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q,
+ const char *name)
+{
+}
+#endif
+
+#endif /* _LINUX_SUNRPC_SCHED_H_ */
diff --git a/include/linux/sunrpc/stats.h b/include/linux/sunrpc/stats.h
new file mode 100644
index 000000000..edc64219f
--- /dev/null
+++ b/include/linux/sunrpc/stats.h
@@ -0,0 +1,84 @@
+/*
+ * linux/include/linux/sunrpc/stats.h
+ *
+ * Client statistics collection for SUN RPC
+ *
+ * Copyright (C) 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#ifndef _LINUX_SUNRPC_STATS_H
+#define _LINUX_SUNRPC_STATS_H
+
+#include <linux/proc_fs.h>
+
+struct rpc_stat {
+ const struct rpc_program *program;
+
+ unsigned int netcnt,
+ netudpcnt,
+ nettcpcnt,
+ nettcpconn,
+ netreconn;
+ unsigned int rpccnt,
+ rpcretrans,
+ rpcauthrefresh,
+ rpcgarbage;
+};
+
+struct svc_stat {
+ struct svc_program * program;
+
+ unsigned int netcnt,
+ netudpcnt,
+ nettcpcnt,
+ nettcpconn;
+ unsigned int rpccnt,
+ rpcbadfmt,
+ rpcbadauth,
+ rpcbadclnt;
+};
+
+struct net;
+#ifdef CONFIG_PROC_FS
+int rpc_proc_init(struct net *);
+void rpc_proc_exit(struct net *);
+#else
+static inline int rpc_proc_init(struct net *net)
+{
+ return 0;
+}
+
+static inline void rpc_proc_exit(struct net *net)
+{
+}
+#endif
+
+#ifdef MODULE
+void rpc_modcount(struct inode *, int);
+#endif
+
+#ifdef CONFIG_PROC_FS
+struct proc_dir_entry * rpc_proc_register(struct net *,struct rpc_stat *);
+void rpc_proc_unregister(struct net *,const char *);
+void rpc_proc_zero(const struct rpc_program *);
+struct proc_dir_entry * svc_proc_register(struct net *, struct svc_stat *,
+ const struct file_operations *);
+void svc_proc_unregister(struct net *, const char *);
+
+void svc_seq_show(struct seq_file *,
+ const struct svc_stat *);
+#else
+
+static inline struct proc_dir_entry *rpc_proc_register(struct net *net, struct rpc_stat *s) { return NULL; }
+static inline void rpc_proc_unregister(struct net *net, const char *p) {}
+static inline void rpc_proc_zero(const struct rpc_program *p) {}
+
+static inline struct proc_dir_entry *svc_proc_register(struct net *net, struct svc_stat *s,
+ const struct file_operations *f) { return NULL; }
+static inline void svc_proc_unregister(struct net *net, const char *p) {}
+
+static inline void svc_seq_show(struct seq_file *seq,
+ const struct svc_stat *st) {}
+#endif
+
+#endif /* _LINUX_SUNRPC_STATS_H */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
new file mode 100644
index 000000000..fae6fb947
--- /dev/null
+++ b/include/linux/sunrpc/svc.h
@@ -0,0 +1,468 @@
+/*
+ * linux/include/linux/sunrpc/svc.h
+ *
+ * RPC server declarations.
+ *
+ * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+
+#ifndef SUNRPC_SVC_H
+#define SUNRPC_SVC_H
+
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/sunrpc/types.h>
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/auth.h>
+#include <linux/sunrpc/svcauth.h>
+#include <linux/wait.h>
+#include <linux/mm.h>
+
+/*
+ * This is the RPC server thread function prototype
+ */
+typedef int (*svc_thread_fn)(void *);
+
+/* statistics for svc_pool structures */
+struct svc_pool_stats {
+ atomic_long_t packets;
+ unsigned long sockets_queued;
+ atomic_long_t threads_woken;
+ atomic_long_t threads_timedout;
+};
+
+/*
+ *
+ * RPC service thread pool.
+ *
+ * Pool of threads and temporary sockets. Generally there is only
+ * a single one of these per RPC service, but on NUMA machines those
+ * services that can benefit from it (i.e. nfs but not lockd) will
+ * have one pool per NUMA node. This optimisation reduces cross-
+ * node traffic on multi-node NUMA NFS servers.
+ */
+struct svc_pool {
+ unsigned int sp_id; /* pool id; also node id on NUMA */
+ spinlock_t sp_lock; /* protects all fields */
+ struct list_head sp_sockets; /* pending sockets */
+ unsigned int sp_nrthreads; /* # of threads in pool */
+ struct list_head sp_all_threads; /* all server threads */
+ struct svc_pool_stats sp_stats; /* statistics on pool operation */
+#define SP_TASK_PENDING (0) /* still work to do even if no
+ * xprt is queued. */
+ unsigned long sp_flags;
+} ____cacheline_aligned_in_smp;
+
+/*
+ * RPC service.
+ *
+ * An RPC service is a ``daemon,'' possibly multithreaded, which
+ * receives and processes incoming RPC messages.
+ * It has one or more transport sockets associated with it, and maintains
+ * a list of idle threads waiting for input.
+ *
+ * We currently do not support more than one RPC program per daemon.
+ */
+struct svc_serv {
+ struct svc_program * sv_program; /* RPC program */
+ struct svc_stat * sv_stats; /* RPC statistics */
+ spinlock_t sv_lock;
+ unsigned int sv_nrthreads; /* # of server threads */
+ unsigned int sv_maxconn; /* max connections allowed or
+ * '0' causing max to be based
+ * on number of threads. */
+
+ unsigned int sv_max_payload; /* datagram payload size */
+ unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */
+ unsigned int sv_xdrsize; /* XDR buffer size */
+ struct list_head sv_permsocks; /* all permanent sockets */
+ struct list_head sv_tempsocks; /* all temporary sockets */
+ int sv_tmpcnt; /* count of temporary sockets */
+ struct timer_list sv_temptimer; /* timer for aging temporary sockets */
+
+ char * sv_name; /* service name */
+
+ unsigned int sv_nrpools; /* number of thread pools */
+ struct svc_pool * sv_pools; /* array of thread pools */
+
+ void (*sv_shutdown)(struct svc_serv *serv,
+ struct net *net);
+ /* Callback to use when last thread
+ * exits.
+ */
+
+ struct module * sv_module; /* optional module to count when
+ * adding threads */
+ svc_thread_fn sv_function; /* main function for threads */
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+ struct list_head sv_cb_list; /* queue for callback requests
+ * that arrive over the same
+ * connection */
+ spinlock_t sv_cb_lock; /* protects the svc_cb_list */
+ wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
+ * entries in the svc_cb_list */
+ struct svc_xprt *sv_bc_xprt; /* callback on fore channel */
+#endif /* CONFIG_SUNRPC_BACKCHANNEL */
+};
+
+/*
+ * We use sv_nrthreads as a reference count. svc_destroy() drops
+ * this refcount, so we need to bump it up around operations that
+ * change the number of threads. Horrible, but there it is.
+ * Should be called with the "service mutex" held.
+ */
+static inline void svc_get(struct svc_serv *serv)
+{
+ serv->sv_nrthreads++;
+}
+
+/*
+ * Maximum payload size supported by a kernel RPC server.
+ * This is use to determine the max number of pages nfsd is
+ * willing to return in a single READ operation.
+ *
+ * These happen to all be powers of 2, which is not strictly
+ * necessary but helps enforce the real limitation, which is
+ * that they should be multiples of PAGE_CACHE_SIZE.
+ *
+ * For UDP transports, a block plus NFS,RPC, and UDP headers
+ * has to fit into the IP datagram limit of 64K. The largest
+ * feasible number for all known page sizes is probably 48K,
+ * but we choose 32K here. This is the same as the historical
+ * Linux limit; someone who cares more about NFS/UDP performance
+ * can test a larger number.
+ *
+ * For TCP transports we have more freedom. A size of 1MB is
+ * chosen to match the client limit. Other OSes are known to
+ * have larger limits, but those numbers are probably beyond
+ * the point of diminishing returns.
+ */
+#define RPCSVC_MAXPAYLOAD (1*1024*1024u)
+#define RPCSVC_MAXPAYLOAD_TCP RPCSVC_MAXPAYLOAD
+#define RPCSVC_MAXPAYLOAD_UDP (32*1024u)
+
+extern u32 svc_max_payload(const struct svc_rqst *rqstp);
+
+/*
+ * RPC Requsts and replies are stored in one or more pages.
+ * We maintain an array of pages for each server thread.
+ * Requests are copied into these pages as they arrive. Remaining
+ * pages are available to write the reply into.
+ *
+ * Pages are sent using ->sendpage so each server thread needs to
+ * allocate more to replace those used in sending. To help keep track
+ * of these pages we have a receive list where all pages initialy live,
+ * and a send list where pages are moved to when there are to be part
+ * of a reply.
+ *
+ * We use xdr_buf for holding responses as it fits well with NFS
+ * read responses (that have a header, and some data pages, and possibly
+ * a tail) and means we can share some client side routines.
+ *
+ * The xdr_buf.head kvec always points to the first page in the rq_*pages
+ * list. The xdr_buf.pages pointer points to the second page on that
+ * list. xdr_buf.tail points to the end of the first page.
+ * This assumes that the non-page part of an rpc reply will fit
+ * in a page - NFSd ensures this. lockd also has no trouble.
+ *
+ * Each request/reply pair can have at most one "payload", plus two pages,
+ * one for the request, and one for the reply.
+ * We using ->sendfile to return read data, we might need one extra page
+ * if the request is not page-aligned. So add another '1'.
+ */
+#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \
+ + 2 + 1)
+
+static inline u32 svc_getnl(struct kvec *iov)
+{
+ __be32 val, *vp;
+ vp = iov->iov_base;
+ val = *vp++;
+ iov->iov_base = (void*)vp;
+ iov->iov_len -= sizeof(__be32);
+ return ntohl(val);
+}
+
+static inline void svc_putnl(struct kvec *iov, u32 val)
+{
+ __be32 *vp = iov->iov_base + iov->iov_len;
+ *vp = htonl(val);
+ iov->iov_len += sizeof(__be32);
+}
+
+static inline __be32 svc_getu32(struct kvec *iov)
+{
+ __be32 val, *vp;
+ vp = iov->iov_base;
+ val = *vp++;
+ iov->iov_base = (void*)vp;
+ iov->iov_len -= sizeof(__be32);
+ return val;
+}
+
+static inline void svc_ungetu32(struct kvec *iov)
+{
+ __be32 *vp = (__be32 *)iov->iov_base;
+ iov->iov_base = (void *)(vp - 1);
+ iov->iov_len += sizeof(*vp);
+}
+
+static inline void svc_putu32(struct kvec *iov, __be32 val)
+{
+ __be32 *vp = iov->iov_base + iov->iov_len;
+ *vp = val;
+ iov->iov_len += sizeof(__be32);
+}
+
+/*
+ * The context of a single thread, including the request currently being
+ * processed.
+ */
+struct svc_rqst {
+ struct list_head rq_all; /* all threads list */
+ struct rcu_head rq_rcu_head; /* for RCU deferred kfree */
+ struct svc_xprt * rq_xprt; /* transport ptr */
+
+ struct sockaddr_storage rq_addr; /* peer address */
+ size_t rq_addrlen;
+ struct sockaddr_storage rq_daddr; /* dest addr of request
+ * - reply from here */
+ size_t rq_daddrlen;
+
+ struct svc_serv * rq_server; /* RPC service definition */
+ struct svc_pool * rq_pool; /* thread pool */
+ struct svc_procedure * rq_procinfo; /* procedure info */
+ struct auth_ops * rq_authop; /* authentication flavour */
+ struct svc_cred rq_cred; /* auth info */
+ void * rq_xprt_ctxt; /* transport specific context ptr */
+ struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
+
+ size_t rq_xprt_hlen; /* xprt header len */
+ struct xdr_buf rq_arg;
+ struct xdr_buf rq_res;
+ struct page * rq_pages[RPCSVC_MAXPAGES];
+ struct page * *rq_respages; /* points into rq_pages */
+ struct page * *rq_next_page; /* next reply page to use */
+ struct page * *rq_page_end; /* one past the last page */
+
+ struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */
+
+ __be32 rq_xid; /* transmission id */
+ u32 rq_prog; /* program number */
+ u32 rq_vers; /* program version */
+ u32 rq_proc; /* procedure number */
+ u32 rq_prot; /* IP protocol */
+ int rq_cachetype; /* catering to nfsd */
+#define RQ_SECURE (0) /* secure port */
+#define RQ_LOCAL (1) /* local request */
+#define RQ_USEDEFERRAL (2) /* use deferral */
+#define RQ_DROPME (3) /* drop current reply */
+#define RQ_SPLICE_OK (4) /* turned off in gss privacy
+ * to prevent encrypting page
+ * cache pages */
+#define RQ_VICTIM (5) /* about to be shut down */
+#define RQ_BUSY (6) /* request is busy */
+ unsigned long rq_flags; /* flags field */
+
+ void * rq_argp; /* decoded arguments */
+ void * rq_resp; /* xdr'd results */
+ void * rq_auth_data; /* flavor-specific data */
+ int rq_auth_slack; /* extra space xdr code
+ * should leave in head
+ * for krb5i, krb5p.
+ */
+ int rq_reserved; /* space on socket outq
+ * reserved for this request
+ */
+
+ struct cache_req rq_chandle; /* handle passed to caches for
+ * request delaying
+ */
+ /* Catering to nfsd */
+ struct auth_domain * rq_client; /* RPC peer info */
+ struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
+ struct svc_cacherep * rq_cacherep; /* cache info */
+ struct task_struct *rq_task; /* service thread */
+ spinlock_t rq_lock; /* per-request lock */
+};
+
+#define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net)
+
+/*
+ * Rigorous type checking on sockaddr type conversions
+ */
+static inline struct sockaddr_in *svc_addr_in(const struct svc_rqst *rqst)
+{
+ return (struct sockaddr_in *) &rqst->rq_addr;
+}
+
+static inline struct sockaddr_in6 *svc_addr_in6(const struct svc_rqst *rqst)
+{
+ return (struct sockaddr_in6 *) &rqst->rq_addr;
+}
+
+static inline struct sockaddr *svc_addr(const struct svc_rqst *rqst)
+{
+ return (struct sockaddr *) &rqst->rq_addr;
+}
+
+static inline struct sockaddr_in *svc_daddr_in(const struct svc_rqst *rqst)
+{
+ return (struct sockaddr_in *) &rqst->rq_daddr;
+}
+
+static inline struct sockaddr_in6 *svc_daddr_in6(const struct svc_rqst *rqst)
+{
+ return (struct sockaddr_in6 *) &rqst->rq_daddr;
+}
+
+static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
+{
+ return (struct sockaddr *) &rqst->rq_daddr;
+}
+
+/*
+ * Check buffer bounds after decoding arguments
+ */
+static inline int
+xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
+{
+ char *cp = (char *)p;
+ struct kvec *vec = &rqstp->rq_arg.head[0];
+ return cp >= (char*)vec->iov_base
+ && cp <= (char*)vec->iov_base + vec->iov_len;
+}
+
+static inline int
+xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p)
+{
+ struct kvec *vec = &rqstp->rq_res.head[0];
+ char *cp = (char*)p;
+
+ vec->iov_len = cp - (char*)vec->iov_base;
+
+ return vec->iov_len <= PAGE_SIZE;
+}
+
+static inline void svc_free_res_pages(struct svc_rqst *rqstp)
+{
+ while (rqstp->rq_next_page != rqstp->rq_respages) {
+ struct page **pp = --rqstp->rq_next_page;
+ if (*pp) {
+ put_page(*pp);
+ *pp = NULL;
+ }
+ }
+}
+
+struct svc_deferred_req {
+ u32 prot; /* protocol (UDP or TCP) */
+ struct svc_xprt *xprt;
+ struct sockaddr_storage addr; /* where reply must go */
+ size_t addrlen;
+ struct sockaddr_storage daddr; /* where reply must come from */
+ size_t daddrlen;
+ struct cache_deferred_req handle;
+ size_t xprt_hlen;
+ int argslen;
+ __be32 args[0];
+};
+
+/*
+ * List of RPC programs on the same transport endpoint
+ */
+struct svc_program {
+ struct svc_program * pg_next; /* other programs (same xprt) */
+ u32 pg_prog; /* program number */
+ unsigned int pg_lovers; /* lowest version */
+ unsigned int pg_hivers; /* highest version */
+ unsigned int pg_nvers; /* number of versions */
+ struct svc_version ** pg_vers; /* version array */
+ char * pg_name; /* service name */
+ char * pg_class; /* class name: services sharing authentication */
+ struct svc_stat * pg_stats; /* rpc statistics */
+ int (*pg_authenticate)(struct svc_rqst *);
+};
+
+/*
+ * RPC program version
+ */
+struct svc_version {
+ u32 vs_vers; /* version number */
+ u32 vs_nproc; /* number of procedures */
+ struct svc_procedure * vs_proc; /* per-procedure info */
+ u32 vs_xdrsize; /* xdrsize needed for this version */
+
+ unsigned int vs_hidden : 1, /* Don't register with portmapper.
+ * Only used for nfsacl so far. */
+ vs_rpcb_optnl:1;/* Don't care the result of register.
+ * Only used for nfsv4. */
+
+ /* Override dispatch function (e.g. when caching replies).
+ * A return value of 0 means drop the request.
+ * vs_dispatch == NULL means use default dispatcher.
+ */
+ int (*vs_dispatch)(struct svc_rqst *, __be32 *);
+};
+
+/*
+ * RPC procedure info
+ */
+typedef __be32 (*svc_procfunc)(struct svc_rqst *, void *argp, void *resp);
+struct svc_procedure {
+ svc_procfunc pc_func; /* process the request */
+ kxdrproc_t pc_decode; /* XDR decode args */
+ kxdrproc_t pc_encode; /* XDR encode result */
+ kxdrproc_t pc_release; /* XDR free result */
+ unsigned int pc_argsize; /* argument struct size */
+ unsigned int pc_ressize; /* result struct size */
+ unsigned int pc_count; /* call count */
+ unsigned int pc_cachetype; /* cache info (NFS) */
+ unsigned int pc_xdrressize; /* maximum size of XDR reply */
+};
+
+/*
+ * Function prototypes.
+ */
+int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
+void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
+int svc_bind(struct svc_serv *serv, struct net *net);
+struct svc_serv *svc_create(struct svc_program *, unsigned int,
+ void (*shutdown)(struct svc_serv *, struct net *net));
+struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
+ struct svc_pool *pool, int node);
+void svc_exit_thread(struct svc_rqst *);
+struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
+ void (*shutdown)(struct svc_serv *, struct net *net),
+ svc_thread_fn, struct module *);
+int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
+int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
+void svc_destroy(struct svc_serv *);
+void svc_shutdown_net(struct svc_serv *, struct net *);
+int svc_process(struct svc_rqst *);
+int bc_svc_process(struct svc_serv *, struct rpc_rqst *,
+ struct svc_rqst *);
+int svc_register(const struct svc_serv *, struct net *, const int,
+ const unsigned short, const unsigned short);
+
+void svc_wake_up(struct svc_serv *);
+void svc_reserve(struct svc_rqst *rqstp, int space);
+struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu);
+char * svc_print_addr(struct svc_rqst *, char *, size_t);
+
+#define RPC_MAX_ADDRBUFLEN (63U)
+
+/*
+ * When we want to reduce the size of the reserved space in the response
+ * buffer, we need to take into account the size of any checksum data that
+ * may be at the end of the packet. This is difficult to determine exactly
+ * for all cases without actually generating the checksum, so we just use a
+ * static value.
+ */
+static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space)
+{
+ svc_reserve(rqstp, space + rqstp->rq_auth_slack);
+}
+
+#endif /* SUNRPC_SVC_H */
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
new file mode 100644
index 000000000..df8edf8ec
--- /dev/null
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -0,0 +1,315 @@
+/*
+ * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the BSD-type
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * Neither the name of the Network Appliance, Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Tom Tucker <tom@opengridcomputing.com>
+ */
+
+#ifndef SVC_RDMA_H
+#define SVC_RDMA_H
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/svcsock.h>
+#include <linux/sunrpc/rpc_rdma.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#define SVCRDMA_DEBUG
+
+/* RPC/RDMA parameters and stats */
+extern unsigned int svcrdma_ord;
+extern unsigned int svcrdma_max_requests;
+extern unsigned int svcrdma_max_req_size;
+
+extern atomic_t rdma_stat_recv;
+extern atomic_t rdma_stat_read;
+extern atomic_t rdma_stat_write;
+extern atomic_t rdma_stat_sq_starve;
+extern atomic_t rdma_stat_rq_starve;
+extern atomic_t rdma_stat_rq_poll;
+extern atomic_t rdma_stat_rq_prod;
+extern atomic_t rdma_stat_sq_poll;
+extern atomic_t rdma_stat_sq_prod;
+
+/*
+ * Contexts are built when an RDMA request is created and are a
+ * record of the resources that can be recovered when the request
+ * completes.
+ */
+struct svc_rdma_op_ctxt {
+ struct svc_rdma_op_ctxt *read_hdr;
+ struct svc_rdma_fastreg_mr *frmr;
+ int hdr_count;
+ struct xdr_buf arg;
+ struct list_head dto_q;
+ enum ib_wr_opcode wr_op;
+ enum ib_wc_status wc_status;
+ u32 byte_len;
+ u32 position;
+ struct svcxprt_rdma *xprt;
+ unsigned long flags;
+ enum dma_data_direction direction;
+ int count;
+ struct ib_sge sge[RPCSVC_MAXPAGES];
+ struct page *pages[RPCSVC_MAXPAGES];
+};
+
+/*
+ * NFS_ requests are mapped on the client side by the chunk lists in
+ * the RPCRDMA header. During the fetching of the RPC from the client
+ * and the writing of the reply to the client, the memory in the
+ * client and the memory in the server must be mapped as contiguous
+ * vaddr/len for access by the hardware. These data strucures keep
+ * these mappings.
+ *
+ * For an RDMA_WRITE, the 'sge' maps the RPC REPLY. For RDMA_READ, the
+ * 'sge' in the svc_rdma_req_map maps the server side RPC reply and the
+ * 'ch' field maps the read-list of the RPCRDMA header to the 'sge'
+ * mapping of the reply.
+ */
+struct svc_rdma_chunk_sge {
+ int start; /* sge no for this chunk */
+ int count; /* sge count for this chunk */
+};
+struct svc_rdma_fastreg_mr {
+ struct ib_mr *mr;
+ void *kva;
+ struct ib_fast_reg_page_list *page_list;
+ int page_list_len;
+ unsigned long access_flags;
+ unsigned long map_len;
+ enum dma_data_direction direction;
+ struct list_head frmr_list;
+};
+struct svc_rdma_req_map {
+ unsigned long count;
+ union {
+ struct kvec sge[RPCSVC_MAXPAGES];
+ struct svc_rdma_chunk_sge ch[RPCSVC_MAXPAGES];
+ unsigned long lkey[RPCSVC_MAXPAGES];
+ };
+};
+#define RDMACTXT_F_LAST_CTXT 2
+
+#define SVCRDMA_DEVCAP_FAST_REG 1 /* fast mr registration */
+#define SVCRDMA_DEVCAP_READ_W_INV 2 /* read w/ invalidate */
+
+struct svcxprt_rdma {
+ struct svc_xprt sc_xprt; /* SVC transport structure */
+ struct rdma_cm_id *sc_cm_id; /* RDMA connection id */
+ struct list_head sc_accept_q; /* Conn. waiting accept */
+ int sc_ord; /* RDMA read limit */
+ int sc_max_sge;
+
+ int sc_sq_depth; /* Depth of SQ */
+ atomic_t sc_sq_count; /* Number of SQ WR on queue */
+
+ int sc_max_requests; /* Depth of RQ */
+ int sc_max_req_size; /* Size of each RQ WR buf */
+
+ struct ib_pd *sc_pd;
+
+ atomic_t sc_dma_used;
+ atomic_t sc_ctxt_used;
+ struct list_head sc_rq_dto_q;
+ spinlock_t sc_rq_dto_lock;
+ struct ib_qp *sc_qp;
+ struct ib_cq *sc_rq_cq;
+ struct ib_cq *sc_sq_cq;
+ struct ib_mr *sc_phys_mr; /* MR for server memory */
+ int (*sc_reader)(struct svcxprt_rdma *,
+ struct svc_rqst *,
+ struct svc_rdma_op_ctxt *,
+ int *, u32 *, u32, u32, u64, bool);
+ u32 sc_dev_caps; /* distilled device caps */
+ u32 sc_dma_lkey; /* local dma key */
+ unsigned int sc_frmr_pg_list_len;
+ struct list_head sc_frmr_q;
+ spinlock_t sc_frmr_q_lock;
+
+ spinlock_t sc_lock; /* transport lock */
+
+ wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */
+ unsigned long sc_flags;
+ struct list_head sc_dto_q; /* DTO tasklet I/O pending Q */
+ struct list_head sc_read_complete_q;
+ struct work_struct sc_work;
+};
+/* sc_flags */
+#define RDMAXPRT_RQ_PENDING 1
+#define RDMAXPRT_SQ_PENDING 2
+#define RDMAXPRT_CONN_PENDING 3
+
+#define RPCRDMA_LISTEN_BACKLOG 10
+/* The default ORD value is based on two outstanding full-size writes with a
+ * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */
+#define RPCRDMA_ORD (64/4)
+#define RPCRDMA_SQ_DEPTH_MULT 8
+#define RPCRDMA_MAX_REQUESTS 32
+#define RPCRDMA_MAX_REQ_SIZE 4096
+
+/* svc_rdma_marshal.c */
+extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
+extern int svc_rdma_xdr_decode_deferred_req(struct svc_rqst *);
+extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
+ struct rpcrdma_msg *,
+ enum rpcrdma_errcode, u32 *);
+extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int);
+extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int);
+extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int,
+ __be32, __be64, u32);
+extern void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *,
+ struct rpcrdma_msg *,
+ struct rpcrdma_msg *,
+ enum rpcrdma_proc);
+extern int svc_rdma_xdr_get_reply_hdr_len(struct rpcrdma_msg *);
+
+/* svc_rdma_recvfrom.c */
+extern int svc_rdma_recvfrom(struct svc_rqst *);
+extern int rdma_read_chunk_lcl(struct svcxprt_rdma *, struct svc_rqst *,
+ struct svc_rdma_op_ctxt *, int *, u32 *,
+ u32, u32, u64, bool);
+extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
+ struct svc_rdma_op_ctxt *, int *, u32 *,
+ u32, u32, u64, bool);
+
+/* svc_rdma_sendto.c */
+extern int svc_rdma_sendto(struct svc_rqst *);
+
+/* svc_rdma_transport.c */
+extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
+extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
+ enum rpcrdma_errcode);
+struct page *svc_rdma_get_page(void);
+extern int svc_rdma_post_recv(struct svcxprt_rdma *);
+extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
+extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
+extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
+extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
+extern struct svc_rdma_req_map *svc_rdma_get_req_map(void);
+extern void svc_rdma_put_req_map(struct svc_rdma_req_map *);
+extern int svc_rdma_fastreg(struct svcxprt_rdma *, struct svc_rdma_fastreg_mr *);
+extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *);
+extern void svc_rdma_put_frmr(struct svcxprt_rdma *,
+ struct svc_rdma_fastreg_mr *);
+extern void svc_sq_reap(struct svcxprt_rdma *);
+extern void svc_rq_reap(struct svcxprt_rdma *);
+extern struct svc_xprt_class svc_rdma_class;
+extern void svc_rdma_prep_reply_hdr(struct svc_rqst *);
+
+/* svc_rdma.c */
+extern int svc_rdma_init(void);
+extern void svc_rdma_cleanup(void);
+
+/*
+ * Returns the address of the first read chunk or <nul> if no read chunk is
+ * present
+ */
+static inline struct rpcrdma_read_chunk *
+svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
+{
+ struct rpcrdma_read_chunk *ch =
+ (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
+
+ if (ch->rc_discrim == 0)
+ return NULL;
+
+ return ch;
+}
+
+/*
+ * Returns the address of the first read write array element or <nul> if no
+ * write array list is present
+ */
+static inline struct rpcrdma_write_array *
+svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
+{
+ if (rmsgp->rm_body.rm_chunks[0] != 0
+ || rmsgp->rm_body.rm_chunks[1] == 0)
+ return NULL;
+
+ return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1];
+}
+
+/*
+ * Returns the address of the first reply array element or <nul> if no
+ * reply array is present
+ */
+static inline struct rpcrdma_write_array *
+svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp)
+{
+ struct rpcrdma_read_chunk *rch;
+ struct rpcrdma_write_array *wr_ary;
+ struct rpcrdma_write_array *rp_ary;
+
+ /* XXX: Need to fix when reply list may occur with read-list and/or
+ * write list */
+ if (rmsgp->rm_body.rm_chunks[0] != 0 ||
+ rmsgp->rm_body.rm_chunks[1] != 0)
+ return NULL;
+
+ rch = svc_rdma_get_read_chunk(rmsgp);
+ if (rch) {
+ while (rch->rc_discrim)
+ rch++;
+
+ /* The reply list follows an empty write array located
+ * at 'rc_position' here. The reply array is at rc_target.
+ */
+ rp_ary = (struct rpcrdma_write_array *)&rch->rc_target;
+
+ goto found_it;
+ }
+
+ wr_ary = svc_rdma_get_write_array(rmsgp);
+ if (wr_ary) {
+ rp_ary = (struct rpcrdma_write_array *)
+ &wr_ary->
+ wc_array[ntohl(wr_ary->wc_nchunks)].wc_target.rs_length;
+
+ goto found_it;
+ }
+
+ /* No read list, no write list */
+ rp_ary = (struct rpcrdma_write_array *)
+ &rmsgp->rm_body.rm_chunks[2];
+
+ found_it:
+ if (rp_ary->wc_discrim == 0)
+ return NULL;
+
+ return rp_ary;
+}
+#endif
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
new file mode 100644
index 000000000..79f6f8f3d
--- /dev/null
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -0,0 +1,210 @@
+/*
+ * linux/include/linux/sunrpc/svc_xprt.h
+ *
+ * RPC server transport I/O
+ */
+
+#ifndef SUNRPC_SVC_XPRT_H
+#define SUNRPC_SVC_XPRT_H
+
+#include <linux/sunrpc/svc.h>
+
+struct module;
+
+struct svc_xprt_ops {
+ struct svc_xprt *(*xpo_create)(struct svc_serv *,
+ struct net *net,
+ struct sockaddr *, int,
+ int);
+ struct svc_xprt *(*xpo_accept)(struct svc_xprt *);
+ int (*xpo_has_wspace)(struct svc_xprt *);
+ int (*xpo_recvfrom)(struct svc_rqst *);
+ void (*xpo_prep_reply_hdr)(struct svc_rqst *);
+ int (*xpo_sendto)(struct svc_rqst *);
+ void (*xpo_release_rqst)(struct svc_rqst *);
+ void (*xpo_detach)(struct svc_xprt *);
+ void (*xpo_free)(struct svc_xprt *);
+ int (*xpo_secure_port)(struct svc_rqst *);
+ void (*xpo_adjust_wspace)(struct svc_xprt *);
+};
+
+struct svc_xprt_class {
+ const char *xcl_name;
+ struct module *xcl_owner;
+ struct svc_xprt_ops *xcl_ops;
+ struct list_head xcl_list;
+ u32 xcl_max_payload;
+ int xcl_ident;
+};
+
+/*
+ * This is embedded in an object that wants a callback before deleting
+ * an xprt; intended for use by NFSv4.1, which needs to know when a
+ * client's tcp connection (and hence possibly a backchannel) goes away.
+ */
+struct svc_xpt_user {
+ struct list_head list;
+ void (*callback)(struct svc_xpt_user *);
+};
+
+struct svc_xprt {
+ struct svc_xprt_class *xpt_class;
+ struct svc_xprt_ops *xpt_ops;
+ struct kref xpt_ref;
+ struct list_head xpt_list;
+ struct list_head xpt_ready;
+ unsigned long xpt_flags;
+#define XPT_BUSY 0 /* enqueued/receiving */
+#define XPT_CONN 1 /* conn pending */
+#define XPT_CLOSE 2 /* dead or dying */
+#define XPT_DATA 3 /* data pending */
+#define XPT_TEMP 4 /* connected transport */
+#define XPT_DEAD 6 /* transport closed */
+#define XPT_CHNGBUF 7 /* need to change snd/rcv buf sizes */
+#define XPT_DEFERRED 8 /* deferred request pending */
+#define XPT_OLD 9 /* used for xprt aging mark+sweep */
+#define XPT_LISTENER 10 /* listening endpoint */
+#define XPT_CACHE_AUTH 11 /* cache auth info */
+#define XPT_LOCAL 12 /* connection from loopback interface */
+
+ struct svc_serv *xpt_server; /* service for transport */
+ atomic_t xpt_reserved; /* space on outq that is rsvd */
+ struct mutex xpt_mutex; /* to serialize sending data */
+ spinlock_t xpt_lock; /* protects sk_deferred
+ * and xpt_auth_cache */
+ void *xpt_auth_cache;/* auth cache */
+ struct list_head xpt_deferred; /* deferred requests that need
+ * to be revisted */
+ struct sockaddr_storage xpt_local; /* local address */
+ size_t xpt_locallen; /* length of address */
+ struct sockaddr_storage xpt_remote; /* remote peer's address */
+ size_t xpt_remotelen; /* length of address */
+ struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */
+ struct list_head xpt_users; /* callbacks on free */
+
+ struct net *xpt_net;
+ struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */
+};
+
+static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
+{
+ spin_lock(&xpt->xpt_lock);
+ list_del_init(&u->list);
+ spin_unlock(&xpt->xpt_lock);
+}
+
+static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
+{
+ spin_lock(&xpt->xpt_lock);
+ if (test_bit(XPT_CLOSE, &xpt->xpt_flags)) {
+ /*
+ * The connection is about to be deleted soon (or,
+ * worse, may already be deleted--in which case we've
+ * already notified the xpt_users).
+ */
+ spin_unlock(&xpt->xpt_lock);
+ return -ENOTCONN;
+ }
+ list_add(&u->list, &xpt->xpt_users);
+ spin_unlock(&xpt->xpt_lock);
+ return 0;
+}
+
+int svc_reg_xprt_class(struct svc_xprt_class *);
+void svc_unreg_xprt_class(struct svc_xprt_class *);
+void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *,
+ struct svc_serv *);
+int svc_create_xprt(struct svc_serv *, const char *, struct net *,
+ const int, const unsigned short, int);
+void svc_xprt_enqueue(struct svc_xprt *xprt);
+void svc_xprt_put(struct svc_xprt *xprt);
+void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt);
+void svc_close_xprt(struct svc_xprt *xprt);
+int svc_port_is_privileged(struct sockaddr *sin);
+int svc_print_xprts(char *buf, int maxlen);
+struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
+ struct net *net, const sa_family_t af,
+ const unsigned short port);
+int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen);
+void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *xprt);
+
+static inline void svc_xprt_get(struct svc_xprt *xprt)
+{
+ kref_get(&xprt->xpt_ref);
+}
+static inline void svc_xprt_set_local(struct svc_xprt *xprt,
+ const struct sockaddr *sa,
+ const size_t salen)
+{
+ memcpy(&xprt->xpt_local, sa, salen);
+ xprt->xpt_locallen = salen;
+}
+static inline void svc_xprt_set_remote(struct svc_xprt *xprt,
+ const struct sockaddr *sa,
+ const size_t salen)
+{
+ memcpy(&xprt->xpt_remote, sa, salen);
+ xprt->xpt_remotelen = salen;
+}
+static inline unsigned short svc_addr_port(const struct sockaddr *sa)
+{
+ const struct sockaddr_in *sin = (const struct sockaddr_in *)sa;
+ const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sa;
+
+ switch (sa->sa_family) {
+ case AF_INET:
+ return ntohs(sin->sin_port);
+ case AF_INET6:
+ return ntohs(sin6->sin6_port);
+ }
+
+ return 0;
+}
+
+static inline size_t svc_addr_len(const struct sockaddr *sa)
+{
+ switch (sa->sa_family) {
+ case AF_INET:
+ return sizeof(struct sockaddr_in);
+ case AF_INET6:
+ return sizeof(struct sockaddr_in6);
+ }
+ BUG();
+}
+
+static inline unsigned short svc_xprt_local_port(const struct svc_xprt *xprt)
+{
+ return svc_addr_port((const struct sockaddr *)&xprt->xpt_local);
+}
+
+static inline unsigned short svc_xprt_remote_port(const struct svc_xprt *xprt)
+{
+ return svc_addr_port((const struct sockaddr *)&xprt->xpt_remote);
+}
+
+static inline char *__svc_print_addr(const struct sockaddr *addr,
+ char *buf, const size_t len)
+{
+ const struct sockaddr_in *sin = (const struct sockaddr_in *)addr;
+ const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)addr;
+
+ switch (addr->sa_family) {
+ case AF_INET:
+ snprintf(buf, len, "%pI4, port=%u", &sin->sin_addr,
+ ntohs(sin->sin_port));
+ break;
+
+ case AF_INET6:
+ snprintf(buf, len, "%pI6, port=%u",
+ &sin6->sin6_addr,
+ ntohs(sin6->sin6_port));
+ break;
+
+ default:
+ snprintf(buf, len, "unknown address type: %d", addr->sa_family);
+ break;
+ }
+
+ return buf;
+}
+#endif /* SUNRPC_SVC_XPRT_H */
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
new file mode 100644
index 000000000..8d71d6577
--- /dev/null
+++ b/include/linux/sunrpc/svcauth.h
@@ -0,0 +1,200 @@
+/*
+ * linux/include/linux/sunrpc/svcauth.h
+ *
+ * RPC server-side authentication stuff.
+ *
+ * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#ifndef _LINUX_SUNRPC_SVCAUTH_H_
+#define _LINUX_SUNRPC_SVCAUTH_H_
+
+#ifdef __KERNEL__
+
+#include <linux/string.h>
+#include <linux/sunrpc/msg_prot.h>
+#include <linux/sunrpc/cache.h>
+#include <linux/sunrpc/gss_api.h>
+#include <linux/hash.h>
+#include <linux/cred.h>
+
+struct svc_cred {
+ kuid_t cr_uid;
+ kgid_t cr_gid;
+ struct group_info *cr_group_info;
+ u32 cr_flavor; /* pseudoflavor */
+ char *cr_principal; /* for gss */
+ struct gss_api_mech *cr_gss_mech;
+};
+
+static inline void init_svc_cred(struct svc_cred *cred)
+{
+ cred->cr_group_info = NULL;
+ cred->cr_principal = NULL;
+ cred->cr_gss_mech = NULL;
+}
+
+static inline void free_svc_cred(struct svc_cred *cred)
+{
+ if (cred->cr_group_info)
+ put_group_info(cred->cr_group_info);
+ kfree(cred->cr_principal);
+ gss_mech_put(cred->cr_gss_mech);
+ init_svc_cred(cred);
+}
+
+struct svc_rqst; /* forward decl */
+struct in6_addr;
+
+/* Authentication is done in the context of a domain.
+ *
+ * Currently, the nfs server uses the auth_domain to stand
+ * for the "client" listed in /etc/exports.
+ *
+ * More generally, a domain might represent a group of clients using
+ * a common mechanism for authentication and having a common mapping
+ * between local identity (uid) and network identity. All clients
+ * in a domain have similar general access rights. Each domain can
+ * contain multiple principals which will have different specific right
+ * based on normal Discretionary Access Control.
+ *
+ * A domain is created by an authentication flavour module based on name
+ * only. Userspace then fills in detail on demand.
+ *
+ * In the case of auth_unix and auth_null, the auth_domain is also
+ * associated with entries in another cache representing the mapping
+ * of ip addresses to the given client.
+ */
+struct auth_domain {
+ struct kref ref;
+ struct hlist_node hash;
+ char *name;
+ struct auth_ops *flavour;
+};
+
+/*
+ * Each authentication flavour registers an auth_ops
+ * structure.
+ * name is simply the name.
+ * flavour gives the auth flavour. It determines where the flavour is registered
+ * accept() is given a request and should verify it.
+ * It should inspect the authenticator and verifier, and possibly the data.
+ * If there is a problem with the authentication *authp should be set.
+ * The return value of accept() can indicate:
+ * OK - authorised. client and credential are set in rqstp.
+ * reqbuf points to arguments
+ * resbuf points to good place for results. verfier
+ * is (probably) already in place. Certainly space is
+ * reserved for it.
+ * DROP - simply drop the request. It may have been deferred
+ * GARBAGE - rpc garbage_args error
+ * SYSERR - rpc system_err error
+ * DENIED - authp holds reason for denial.
+ * COMPLETE - the reply is encoded already and ready to be sent; no
+ * further processing is necessary. (This is used for processing
+ * null procedure calls which are used to set up encryption
+ * contexts.)
+ *
+ * accept is passed the proc number so that it can accept NULL rpc requests
+ * even if it cannot authenticate the client (as is sometimes appropriate).
+ *
+ * release() is given a request after the procedure has been run.
+ * It should sign/encrypt the results if needed
+ * It should return:
+ * OK - the resbuf is ready to be sent
+ * DROP - the reply should be quitely dropped
+ * DENIED - authp holds a reason for MSG_DENIED
+ * SYSERR - rpc system_err
+ *
+ * domain_release()
+ * This call releases a domain.
+ * set_client()
+ * Givens a pending request (struct svc_rqst), finds and assigns
+ * an appropriate 'auth_domain' as the client.
+ */
+struct auth_ops {
+ char * name;
+ struct module *owner;
+ int flavour;
+ int (*accept)(struct svc_rqst *rq, __be32 *authp);
+ int (*release)(struct svc_rqst *rq);
+ void (*domain_release)(struct auth_domain *);
+ int (*set_client)(struct svc_rqst *rq);
+};
+
+#define SVC_GARBAGE 1
+#define SVC_SYSERR 2
+#define SVC_VALID 3
+#define SVC_NEGATIVE 4
+#define SVC_OK 5
+#define SVC_DROP 6
+#define SVC_CLOSE 7 /* Like SVC_DROP, but request is definitely
+ * lost so if there is a tcp connection, it
+ * should be closed
+ */
+#define SVC_DENIED 8
+#define SVC_PENDING 9
+#define SVC_COMPLETE 10
+
+struct svc_xprt;
+
+extern int svc_authenticate(struct svc_rqst *rqstp, __be32 *authp);
+extern int svc_authorise(struct svc_rqst *rqstp);
+extern int svc_set_client(struct svc_rqst *rqstp);
+extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops);
+extern void svc_auth_unregister(rpc_authflavor_t flavor);
+
+extern struct auth_domain *unix_domain_find(char *name);
+extern void auth_domain_put(struct auth_domain *item);
+extern int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom);
+extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new);
+extern struct auth_domain *auth_domain_find(char *name);
+extern struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr);
+extern int auth_unix_forget_old(struct auth_domain *dom);
+extern void svcauth_unix_purge(struct net *net);
+extern void svcauth_unix_info_release(struct svc_xprt *xpt);
+extern int svcauth_unix_set_client(struct svc_rqst *rqstp);
+
+extern int unix_gid_cache_create(struct net *net);
+extern void unix_gid_cache_destroy(struct net *net);
+
+static inline unsigned long hash_str(char *name, int bits)
+{
+ unsigned long hash = 0;
+ unsigned long l = 0;
+ int len = 0;
+ unsigned char c;
+ do {
+ if (unlikely(!(c = *name++))) {
+ c = (char)len; len = -1;
+ }
+ l = (l << 8) | c;
+ len++;
+ if ((len & (BITS_PER_LONG/8-1))==0)
+ hash = hash_long(hash^l, BITS_PER_LONG);
+ } while (len);
+ return hash >> (BITS_PER_LONG - bits);
+}
+
+static inline unsigned long hash_mem(char *buf, int length, int bits)
+{
+ unsigned long hash = 0;
+ unsigned long l = 0;
+ int len = 0;
+ unsigned char c;
+ do {
+ if (len == length) {
+ c = (char)len; len = -1;
+ } else
+ c = *buf++;
+ l = (l << 8) | c;
+ len++;
+ if ((len & (BITS_PER_LONG/8-1))==0)
+ hash = hash_long(hash^l, BITS_PER_LONG);
+ } while (len);
+ return hash >> (BITS_PER_LONG - bits);
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_SUNRPC_SVCAUTH_H_ */
diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h
new file mode 100644
index 000000000..726aff1a5
--- /dev/null
+++ b/include/linux/sunrpc/svcauth_gss.h
@@ -0,0 +1,27 @@
+/*
+ * linux/include/linux/sunrpc/svcauth_gss.h
+ *
+ * Bruce Fields <bfields@umich.edu>
+ * Copyright (c) 2002 The Regents of the University of Michigan
+ */
+
+#ifndef _LINUX_SUNRPC_SVCAUTH_GSS_H
+#define _LINUX_SUNRPC_SVCAUTH_GSS_H
+
+#ifdef __KERNEL__
+#include <linux/sched.h>
+#include <linux/sunrpc/types.h>
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/svcauth.h>
+#include <linux/sunrpc/svcsock.h>
+#include <linux/sunrpc/auth_gss.h>
+
+int gss_svc_init(void);
+void gss_svc_shutdown(void);
+int gss_svc_init_net(struct net *net);
+void gss_svc_shutdown_net(struct net *net);
+int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
+u32 svcauth_gss_flavor(struct auth_domain *dom);
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
new file mode 100644
index 000000000..2e780134f
--- /dev/null
+++ b/include/linux/sunrpc/svcsock.h
@@ -0,0 +1,74 @@
+/*
+ * linux/include/linux/sunrpc/svcsock.h
+ *
+ * RPC server socket I/O.
+ *
+ * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#ifndef SUNRPC_SVCSOCK_H
+#define SUNRPC_SVCSOCK_H
+
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/svc_xprt.h>
+
+/*
+ * RPC server socket.
+ */
+struct svc_sock {
+ struct svc_xprt sk_xprt;
+ struct socket * sk_sock; /* berkeley socket layer */
+ struct sock * sk_sk; /* INET layer */
+
+ /* We keep the old state_change and data_ready CB's here */
+ void (*sk_ostate)(struct sock *);
+ void (*sk_odata)(struct sock *);
+ void (*sk_owspace)(struct sock *);
+
+ /* private TCP part */
+ /* On-the-wire fragment header: */
+ __be32 sk_reclen;
+ /* As we receive a record, this includes the length received so
+ * far (including the fragment header): */
+ u32 sk_tcplen;
+ /* Total length of the data (not including fragment headers)
+ * received so far in the fragments making up this rpc: */
+ u32 sk_datalen;
+
+ struct page * sk_pages[RPCSVC_MAXPAGES]; /* received data */
+};
+
+static inline u32 svc_sock_reclen(struct svc_sock *svsk)
+{
+ return ntohl(svsk->sk_reclen) & RPC_FRAGMENT_SIZE_MASK;
+}
+
+static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
+{
+ return ntohl(svsk->sk_reclen) & RPC_LAST_STREAM_FRAGMENT;
+}
+
+/*
+ * Function prototypes.
+ */
+void svc_close_net(struct svc_serv *, struct net *);
+int svc_recv(struct svc_rqst *, long);
+int svc_send(struct svc_rqst *);
+void svc_drop(struct svc_rqst *);
+void svc_sock_update_bufs(struct svc_serv *serv);
+bool svc_alien_sock(struct net *net, int fd);
+int svc_addsock(struct svc_serv *serv, const int fd,
+ char *name_return, const size_t len);
+void svc_init_xprt_sock(void);
+void svc_cleanup_xprt_sock(void);
+struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot);
+void svc_sock_destroy(struct svc_xprt *);
+
+/*
+ * svc_makesock socket characteristics
+ */
+#define SVC_SOCK_DEFAULTS (0U)
+#define SVC_SOCK_ANONYMOUS (1U << 0) /* don't register with pmap */
+#define SVC_SOCK_TEMPORARY (1U << 1) /* flag socket as temporary */
+
+#endif /* SUNRPC_SVCSOCK_H */
diff --git a/include/linux/sunrpc/timer.h b/include/linux/sunrpc/timer.h
new file mode 100644
index 000000000..697d6e69d
--- /dev/null
+++ b/include/linux/sunrpc/timer.h
@@ -0,0 +1,49 @@
+/*
+ * linux/include/linux/sunrpc/timer.h
+ *
+ * Declarations for the RPC transport timer.
+ *
+ * Copyright (C) 2002 Trond Myklebust <trond.myklebust@fys.uio.no>
+ */
+
+#ifndef _LINUX_SUNRPC_TIMER_H
+#define _LINUX_SUNRPC_TIMER_H
+
+#include <linux/atomic.h>
+
+struct rpc_rtt {
+ unsigned long timeo; /* default timeout value */
+ unsigned long srtt[5]; /* smoothed round trip time << 3 */
+ unsigned long sdrtt[5]; /* smoothed medium deviation of RTT */
+ int ntimeouts[5]; /* Number of timeouts for the last request */
+};
+
+
+extern void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo);
+extern void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m);
+extern unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer);
+
+static inline void rpc_set_timeo(struct rpc_rtt *rt, int timer, int ntimeo)
+{
+ int *t;
+ if (!timer)
+ return;
+ t = &rt->ntimeouts[timer-1];
+ if (ntimeo < *t) {
+ if (*t > 0)
+ (*t)--;
+ } else {
+ if (ntimeo > 8)
+ ntimeo = 8;
+ *t = ntimeo;
+ }
+}
+
+static inline int rpc_ntimeo(struct rpc_rtt *rt, int timer)
+{
+ if (!timer)
+ return 0;
+ return rt->ntimeouts[timer-1];
+}
+
+#endif /* _LINUX_SUNRPC_TIMER_H */
diff --git a/include/linux/sunrpc/types.h b/include/linux/sunrpc/types.h
new file mode 100644
index 000000000..d222f4755
--- /dev/null
+++ b/include/linux/sunrpc/types.h
@@ -0,0 +1,22 @@
+/*
+ * linux/include/linux/sunrpc/types.h
+ *
+ * Generic types and misc stuff for RPC.
+ *
+ * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#ifndef _LINUX_SUNRPC_TYPES_H_
+#define _LINUX_SUNRPC_TYPES_H_
+
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/sunrpc/debug.h>
+#include <linux/list.h>
+
+/*
+ * Shorthands
+ */
+#define signalled() (signal_pending(current))
+
+#endif /* _LINUX_SUNRPC_TYPES_H_ */
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
new file mode 100644
index 000000000..70c6b92e1
--- /dev/null
+++ b/include/linux/sunrpc/xdr.h
@@ -0,0 +1,235 @@
+/*
+ * XDR standard data types and function declarations
+ *
+ * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de>
+ *
+ * Based on:
+ * RFC 4506 "XDR: External Data Representation Standard", May 2006
+ */
+
+#ifndef _SUNRPC_XDR_H_
+#define _SUNRPC_XDR_H_
+
+#ifdef __KERNEL__
+
+#include <linux/uio.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+#include <linux/scatterlist.h>
+
+/*
+ * Buffer adjustment
+ */
+#define XDR_QUADLEN(l) (((l) + 3) >> 2)
+
+/*
+ * Generic opaque `network object.' At the kernel level, this type
+ * is used only by lockd.
+ */
+#define XDR_MAX_NETOBJ 1024
+struct xdr_netobj {
+ unsigned int len;
+ u8 * data;
+};
+
+/*
+ * This is the legacy generic XDR function. rqstp is either a rpc_rqst
+ * (client side) or svc_rqst pointer (server side).
+ * Encode functions always assume there's enough room in the buffer.
+ */
+typedef int (*kxdrproc_t)(void *rqstp, __be32 *data, void *obj);
+
+/*
+ * Basic structure for transmission/reception of a client XDR message.
+ * Features a header (for a linear buffer containing RPC headers
+ * and the data payload for short messages), and then an array of
+ * pages.
+ * The tail iovec allows you to append data after the page array. Its
+ * main interest is for appending padding to the pages in order to
+ * satisfy the int_32-alignment requirements in RFC1832.
+ *
+ * For the future, we might want to string several of these together
+ * in a list if anybody wants to make use of NFSv4 COMPOUND
+ * operations and/or has a need for scatter/gather involving pages.
+ */
+struct xdr_buf {
+ struct kvec head[1], /* RPC header + non-page data */
+ tail[1]; /* Appended after page data */
+
+ struct page ** pages; /* Array of pages */
+ unsigned int page_base, /* Start of page data */
+ page_len, /* Length of page data */
+ flags; /* Flags for data disposition */
+#define XDRBUF_READ 0x01 /* target of file read */
+#define XDRBUF_WRITE 0x02 /* source of file write */
+
+ unsigned int buflen, /* Total length of storage buffer */
+ len; /* Length of XDR encoded message */
+};
+
+/*
+ * pre-xdr'ed macros.
+ */
+
+#define xdr_zero cpu_to_be32(0)
+#define xdr_one cpu_to_be32(1)
+#define xdr_two cpu_to_be32(2)
+
+#define rpc_success cpu_to_be32(RPC_SUCCESS)
+#define rpc_prog_unavail cpu_to_be32(RPC_PROG_UNAVAIL)
+#define rpc_prog_mismatch cpu_to_be32(RPC_PROG_MISMATCH)
+#define rpc_proc_unavail cpu_to_be32(RPC_PROC_UNAVAIL)
+#define rpc_garbage_args cpu_to_be32(RPC_GARBAGE_ARGS)
+#define rpc_system_err cpu_to_be32(RPC_SYSTEM_ERR)
+#define rpc_drop_reply cpu_to_be32(RPC_DROP_REPLY)
+
+#define rpc_auth_ok cpu_to_be32(RPC_AUTH_OK)
+#define rpc_autherr_badcred cpu_to_be32(RPC_AUTH_BADCRED)
+#define rpc_autherr_rejectedcred cpu_to_be32(RPC_AUTH_REJECTEDCRED)
+#define rpc_autherr_badverf cpu_to_be32(RPC_AUTH_BADVERF)
+#define rpc_autherr_rejectedverf cpu_to_be32(RPC_AUTH_REJECTEDVERF)
+#define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK)
+#define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM)
+#define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM)
+#define rpc_autherr_oldseqnum cpu_to_be32(101)
+
+/*
+ * Miscellaneous XDR helper functions
+ */
+__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int len);
+__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int len);
+__be32 *xdr_encode_string(__be32 *p, const char *s);
+__be32 *xdr_decode_string_inplace(__be32 *p, char **sp, unsigned int *lenp,
+ unsigned int maxlen);
+__be32 *xdr_encode_netobj(__be32 *p, const struct xdr_netobj *);
+__be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *);
+
+void xdr_inline_pages(struct xdr_buf *, unsigned int,
+ struct page **, unsigned int, unsigned int);
+void xdr_terminate_string(struct xdr_buf *, const u32);
+
+static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int len)
+{
+ return xdr_encode_opaque(p, s, len);
+}
+
+/*
+ * Decode 64bit quantities (NFSv3 support)
+ */
+static inline __be32 *
+xdr_encode_hyper(__be32 *p, __u64 val)
+{
+ put_unaligned_be64(val, p);
+ return p + 2;
+}
+
+static inline __be32 *
+xdr_decode_hyper(__be32 *p, __u64 *valp)
+{
+ *valp = get_unaligned_be64(p);
+ return p + 2;
+}
+
+static inline __be32 *
+xdr_decode_opaque_fixed(__be32 *p, void *ptr, unsigned int len)
+{
+ memcpy(ptr, p, len);
+ return p + XDR_QUADLEN(len);
+}
+
+/*
+ * Adjust kvec to reflect end of xdr'ed data (RPC client XDR)
+ */
+static inline int
+xdr_adjust_iovec(struct kvec *iov, __be32 *p)
+{
+ return iov->iov_len = ((u8 *) p - (u8 *) iov->iov_base);
+}
+
+/*
+ * XDR buffer helper functions
+ */
+extern void xdr_shift_buf(struct xdr_buf *, size_t);
+extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *);
+extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
+extern void xdr_buf_trim(struct xdr_buf *, unsigned int);
+extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, unsigned int);
+extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
+extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int);
+
+/*
+ * Helper structure for copying from an sk_buff.
+ */
+struct xdr_skb_reader {
+ struct sk_buff *skb;
+ unsigned int offset;
+ size_t count;
+ __wsum csum;
+};
+
+typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, size_t len);
+
+size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len);
+extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *);
+extern ssize_t xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int,
+ struct xdr_skb_reader *, xdr_skb_read_actor);
+
+extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32);
+extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *);
+
+struct xdr_array2_desc;
+typedef int (*xdr_xcode_elem_t)(struct xdr_array2_desc *desc, void *elem);
+struct xdr_array2_desc {
+ unsigned int elem_size;
+ unsigned int array_len;
+ unsigned int array_maxlen;
+ xdr_xcode_elem_t xcode;
+};
+
+extern int xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
+ struct xdr_array2_desc *desc);
+extern int xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
+ struct xdr_array2_desc *desc);
+extern void _copy_from_pages(char *p, struct page **pages, size_t pgbase,
+ size_t len);
+
+/*
+ * Provide some simple tools for XDR buffer overflow-checking etc.
+ */
+struct xdr_stream {
+ __be32 *p; /* start of available buffer */
+ struct xdr_buf *buf; /* XDR buffer to read/write */
+
+ __be32 *end; /* end of available buffer space */
+ struct kvec *iov; /* pointer to the current kvec */
+ struct kvec scratch; /* Scratch buffer */
+ struct page **page_ptr; /* pointer to the current page */
+ unsigned int nwords; /* Remaining decode buffer length */
+};
+
+/*
+ * These are the xdr_stream style generic XDR encode and decode functions.
+ */
+typedef void (*kxdreproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj);
+typedef int (*kxdrdproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj);
+
+extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
+extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
+extern void xdr_commit_encode(struct xdr_stream *xdr);
+extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len);
+extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen);
+extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
+ unsigned int base, unsigned int len);
+extern unsigned int xdr_stream_pos(const struct xdr_stream *xdr);
+extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
+extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
+ struct page **pages, unsigned int len);
+extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen);
+extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
+extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
+extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
+extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data);
+
+#endif /* __KERNEL__ */
+
+#endif /* _SUNRPC_XDR_H_ */
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
new file mode 100644
index 000000000..8b93ef53d
--- /dev/null
+++ b/include/linux/sunrpc/xprt.h
@@ -0,0 +1,436 @@
+/*
+ * linux/include/linux/sunrpc/xprt.h
+ *
+ * Declarations for the RPC transport interface.
+ *
+ * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
+ */
+
+#ifndef _LINUX_SUNRPC_XPRT_H
+#define _LINUX_SUNRPC_XPRT_H
+
+#include <linux/uio.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/ktime.h>
+#include <linux/sunrpc/sched.h>
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/msg_prot.h>
+
+#ifdef __KERNEL__
+
+#define RPC_MIN_SLOT_TABLE (2U)
+#define RPC_DEF_SLOT_TABLE (16U)
+#define RPC_MAX_SLOT_TABLE_LIMIT (65536U)
+#define RPC_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE_LIMIT
+
+#define RPC_CWNDSHIFT (8U)
+#define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT)
+#define RPC_INITCWND RPC_CWNDSCALE
+#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
+#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
+
+/*
+ * This describes a timeout strategy
+ */
+struct rpc_timeout {
+ unsigned long to_initval, /* initial timeout */
+ to_maxval, /* max timeout */
+ to_increment; /* if !exponential */
+ unsigned int to_retries; /* max # of retries */
+ unsigned char to_exponential;
+};
+
+enum rpc_display_format_t {
+ RPC_DISPLAY_ADDR = 0,
+ RPC_DISPLAY_PORT,
+ RPC_DISPLAY_PROTO,
+ RPC_DISPLAY_HEX_ADDR,
+ RPC_DISPLAY_HEX_PORT,
+ RPC_DISPLAY_NETID,
+ RPC_DISPLAY_MAX,
+};
+
+struct rpc_task;
+struct rpc_xprt;
+struct seq_file;
+
+/*
+ * This describes a complete RPC request
+ */
+struct rpc_rqst {
+ /*
+ * This is the user-visible part
+ */
+ struct rpc_xprt * rq_xprt; /* RPC client */
+ struct xdr_buf rq_snd_buf; /* send buffer */
+ struct xdr_buf rq_rcv_buf; /* recv buffer */
+
+ /*
+ * This is the private part
+ */
+ struct rpc_task * rq_task; /* RPC task data */
+ struct rpc_cred * rq_cred; /* Bound cred */
+ __be32 rq_xid; /* request XID */
+ int rq_cong; /* has incremented xprt->cong */
+ u32 rq_seqno; /* gss seq no. used on req. */
+ int rq_enc_pages_num;
+ struct page **rq_enc_pages; /* scratch pages for use by
+ gss privacy code */
+ void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
+ struct list_head rq_list;
+
+ __u32 * rq_buffer; /* XDR encode buffer */
+ size_t rq_callsize,
+ rq_rcvsize;
+ size_t rq_xmit_bytes_sent; /* total bytes sent */
+ size_t rq_reply_bytes_recvd; /* total reply bytes */
+ /* received */
+
+ struct xdr_buf rq_private_buf; /* The receive buffer
+ * used in the softirq.
+ */
+ unsigned long rq_majortimeo; /* major timeout alarm */
+ unsigned long rq_timeout; /* Current timeout value */
+ ktime_t rq_rtt; /* round-trip time */
+ unsigned int rq_retries; /* # of retries */
+ unsigned int rq_connect_cookie;
+ /* A cookie used to track the
+ state of the transport
+ connection */
+
+ /*
+ * Partial send handling
+ */
+ u32 rq_bytes_sent; /* Bytes we have sent */
+
+ ktime_t rq_xtime; /* transmit time stamp */
+ int rq_ntrans;
+
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+ struct list_head rq_bc_list; /* Callback service list */
+ unsigned long rq_bc_pa_state; /* Backchannel prealloc state */
+ struct list_head rq_bc_pa_list; /* Backchannel prealloc list */
+#endif /* CONFIG_SUNRPC_BACKCHANEL */
+};
+#define rq_svec rq_snd_buf.head
+#define rq_slen rq_snd_buf.len
+
+struct rpc_xprt_ops {
+ void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
+ int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
+ void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
+ void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task);
+ void (*rpcbind)(struct rpc_task *task);
+ void (*set_port)(struct rpc_xprt *xprt, unsigned short port);
+ void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
+ void * (*buf_alloc)(struct rpc_task *task, size_t size);
+ void (*buf_free)(void *buffer);
+ int (*send_request)(struct rpc_task *task);
+ void (*set_retrans_timeout)(struct rpc_task *task);
+ void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
+ void (*release_request)(struct rpc_task *task);
+ void (*close)(struct rpc_xprt *xprt);
+ void (*destroy)(struct rpc_xprt *xprt);
+ void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq);
+};
+
+/*
+ * RPC transport identifiers
+ *
+ * To preserve compatibility with the historical use of raw IP protocol
+ * id's for transport selection, UDP and TCP identifiers are specified
+ * with the previous values. No such restriction exists for new transports,
+ * except that they may not collide with these values (17 and 6,
+ * respectively).
+ */
+#define XPRT_TRANSPORT_BC (1 << 31)
+enum xprt_transports {
+ XPRT_TRANSPORT_UDP = IPPROTO_UDP,
+ XPRT_TRANSPORT_TCP = IPPROTO_TCP,
+ XPRT_TRANSPORT_BC_TCP = IPPROTO_TCP | XPRT_TRANSPORT_BC,
+ XPRT_TRANSPORT_RDMA = 256,
+ XPRT_TRANSPORT_LOCAL = 257,
+};
+
+struct rpc_xprt {
+ atomic_t count; /* Reference count */
+ struct rpc_xprt_ops * ops; /* transport methods */
+
+ const struct rpc_timeout *timeout; /* timeout parms */
+ struct sockaddr_storage addr; /* server address */
+ size_t addrlen; /* size of server address */
+ int prot; /* IP protocol */
+
+ unsigned long cong; /* current congestion */
+ unsigned long cwnd; /* congestion window */
+
+ size_t max_payload; /* largest RPC payload size,
+ in bytes */
+ unsigned int tsh_size; /* size of transport specific
+ header */
+
+ struct rpc_wait_queue binding; /* requests waiting on rpcbind */
+ struct rpc_wait_queue sending; /* requests waiting to send */
+ struct rpc_wait_queue pending; /* requests in flight */
+ struct rpc_wait_queue backlog; /* waiting for slot */
+ struct list_head free; /* free slots */
+ unsigned int max_reqs; /* max number of slots */
+ unsigned int min_reqs; /* min number of slots */
+ atomic_t num_reqs; /* total slots */
+ unsigned long state; /* transport state */
+ unsigned char resvport : 1; /* use a reserved port */
+ unsigned int swapper; /* we're swapping over this
+ transport */
+ unsigned int bind_index; /* bind function index */
+
+ /*
+ * Connection of transports
+ */
+ unsigned long bind_timeout,
+ reestablish_timeout;
+ unsigned int connect_cookie; /* A cookie that gets bumped
+ every time the transport
+ is reconnected */
+
+ /*
+ * Disconnection of idle transports
+ */
+ struct work_struct task_cleanup;
+ struct timer_list timer;
+ unsigned long last_used,
+ idle_timeout;
+
+ /*
+ * Send stuff
+ */
+ spinlock_t transport_lock; /* lock transport info */
+ spinlock_t reserve_lock; /* lock slot table */
+ u32 xid; /* Next XID value to use */
+ struct rpc_task * snd_task; /* Task blocked in send */
+ struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+ struct svc_serv *bc_serv; /* The RPC service which will */
+ /* process the callback */
+ unsigned int bc_alloc_count; /* Total number of preallocs */
+ spinlock_t bc_pa_lock; /* Protects the preallocated
+ * items */
+ struct list_head bc_pa_list; /* List of preallocated
+ * backchannel rpc_rqst's */
+#endif /* CONFIG_SUNRPC_BACKCHANNEL */
+ struct list_head recv;
+
+ struct {
+ unsigned long bind_count, /* total number of binds */
+ connect_count, /* total number of connects */
+ connect_start, /* connect start timestamp */
+ connect_time, /* jiffies waiting for connect */
+ sends, /* how many complete requests */
+ recvs, /* how many complete requests */
+ bad_xids, /* lookup_rqst didn't find XID */
+ max_slots; /* max rpc_slots used */
+
+ unsigned long long req_u, /* average requests on the wire */
+ bklog_u, /* backlog queue utilization */
+ sending_u, /* send q utilization */
+ pending_u; /* pend q utilization */
+ } stat;
+
+ struct net *xprt_net;
+ const char *servername;
+ const char *address_strings[RPC_DISPLAY_MAX];
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
+ struct dentry *debugfs; /* debugfs directory */
+#endif
+};
+
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+/*
+ * Backchannel flags
+ */
+#define RPC_BC_PA_IN_USE 0x0001 /* Preallocated backchannel */
+ /* buffer in use */
+#endif /* CONFIG_SUNRPC_BACKCHANNEL */
+
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+static inline int bc_prealloc(struct rpc_rqst *req)
+{
+ return test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
+}
+#else
+static inline int bc_prealloc(struct rpc_rqst *req)
+{
+ return 0;
+}
+#endif /* CONFIG_SUNRPC_BACKCHANNEL */
+
+#define XPRT_CREATE_INFINITE_SLOTS (1U)
+#define XPRT_CREATE_NO_IDLE_TIMEOUT (1U << 1)
+
+struct xprt_create {
+ int ident; /* XPRT_TRANSPORT identifier */
+ struct net * net;
+ struct sockaddr * srcaddr; /* optional local address */
+ struct sockaddr * dstaddr; /* remote peer address */
+ size_t addrlen;
+ const char *servername;
+ struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
+ unsigned int flags;
+};
+
+struct xprt_class {
+ struct list_head list;
+ int ident; /* XPRT_TRANSPORT identifier */
+ struct rpc_xprt * (*setup)(struct xprt_create *);
+ struct module *owner;
+ char name[32];
+};
+
+/*
+ * Generic internal transport functions
+ */
+struct rpc_xprt *xprt_create_transport(struct xprt_create *args);
+void xprt_connect(struct rpc_task *task);
+void xprt_reserve(struct rpc_task *task);
+void xprt_retry_reserve(struct rpc_task *task);
+int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
+int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
+void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
+void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
+bool xprt_prepare_transmit(struct rpc_task *task);
+void xprt_transmit(struct rpc_task *task);
+void xprt_end_transmit(struct rpc_task *task);
+int xprt_adjust_timeout(struct rpc_rqst *req);
+void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
+void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
+void xprt_release(struct rpc_task *task);
+void xprt_put(struct rpc_xprt *xprt);
+struct rpc_xprt * xprt_alloc(struct net *net, size_t size,
+ unsigned int num_prealloc,
+ unsigned int max_req);
+void xprt_free(struct rpc_xprt *);
+
+/**
+ * xprt_get - return a reference to an RPC transport.
+ * @xprt: pointer to the transport
+ *
+ */
+static inline struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
+{
+ if (atomic_inc_not_zero(&xprt->count))
+ return xprt;
+ return NULL;
+}
+
+static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p)
+{
+ return p + xprt->tsh_size;
+}
+
+/*
+ * Transport switch helper functions
+ */
+int xprt_register_transport(struct xprt_class *type);
+int xprt_unregister_transport(struct xprt_class *type);
+int xprt_load_transport(const char *);
+void xprt_set_retrans_timeout_def(struct rpc_task *task);
+void xprt_set_retrans_timeout_rtt(struct rpc_task *task);
+void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
+void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action);
+void xprt_write_space(struct rpc_xprt *xprt);
+void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result);
+struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid);
+void xprt_complete_rqst(struct rpc_task *task, int copied);
+void xprt_release_rqst_cong(struct rpc_task *task);
+void xprt_disconnect_done(struct rpc_xprt *xprt);
+void xprt_force_disconnect(struct rpc_xprt *xprt);
+void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
+int xs_swapper(struct rpc_xprt *xprt, int enable);
+
+bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *);
+void xprt_unlock_connect(struct rpc_xprt *, void *);
+
+/*
+ * Reserved bit positions in xprt->state
+ */
+#define XPRT_LOCKED (0)
+#define XPRT_CONNECTED (1)
+#define XPRT_CONNECTING (2)
+#define XPRT_CLOSE_WAIT (3)
+#define XPRT_BOUND (4)
+#define XPRT_BINDING (5)
+#define XPRT_CLOSING (6)
+#define XPRT_CONGESTED (9)
+
+static inline void xprt_set_connected(struct rpc_xprt *xprt)
+{
+ set_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline void xprt_clear_connected(struct rpc_xprt *xprt)
+{
+ clear_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline int xprt_connected(struct rpc_xprt *xprt)
+{
+ return test_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline int xprt_test_and_set_connected(struct rpc_xprt *xprt)
+{
+ return test_and_set_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt)
+{
+ return test_and_clear_bit(XPRT_CONNECTED, &xprt->state);
+}
+
+static inline void xprt_clear_connecting(struct rpc_xprt *xprt)
+{
+ smp_mb__before_atomic();
+ clear_bit(XPRT_CONNECTING, &xprt->state);
+ smp_mb__after_atomic();
+}
+
+static inline int xprt_connecting(struct rpc_xprt *xprt)
+{
+ return test_bit(XPRT_CONNECTING, &xprt->state);
+}
+
+static inline int xprt_test_and_set_connecting(struct rpc_xprt *xprt)
+{
+ return test_and_set_bit(XPRT_CONNECTING, &xprt->state);
+}
+
+static inline void xprt_set_bound(struct rpc_xprt *xprt)
+{
+ test_and_set_bit(XPRT_BOUND, &xprt->state);
+}
+
+static inline int xprt_bound(struct rpc_xprt *xprt)
+{
+ return test_bit(XPRT_BOUND, &xprt->state);
+}
+
+static inline void xprt_clear_bound(struct rpc_xprt *xprt)
+{
+ clear_bit(XPRT_BOUND, &xprt->state);
+}
+
+static inline void xprt_clear_binding(struct rpc_xprt *xprt)
+{
+ smp_mb__before_atomic();
+ clear_bit(XPRT_BINDING, &xprt->state);
+ smp_mb__after_atomic();
+}
+
+static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt)
+{
+ return test_and_set_bit(XPRT_BINDING, &xprt->state);
+}
+
+#endif /* __KERNEL__*/
+
+#endif /* _LINUX_SUNRPC_XPRT_H */
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
new file mode 100644
index 000000000..c984c8598
--- /dev/null
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the BSD-type
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * Neither the name of the Network Appliance, Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_SUNRPC_XPRTRDMA_H
+#define _LINUX_SUNRPC_XPRTRDMA_H
+
+/*
+ * Constants. Max RPC/NFS header is big enough to account for
+ * additional marshaling buffers passed down by Linux client.
+ *
+ * RDMA header is currently fixed max size, and is big enough for a
+ * fully-chunked NFS message (read chunks are the largest). Note only
+ * a single chunk type per message is supported currently.
+ */
+#define RPCRDMA_MIN_SLOT_TABLE (2U)
+#define RPCRDMA_DEF_SLOT_TABLE (32U)
+#define RPCRDMA_MAX_SLOT_TABLE (256U)
+
+#define RPCRDMA_DEF_INLINE (1024) /* default inline max */
+
+#define RPCRDMA_INLINE_PAD_THRESH (512)/* payload threshold to pad (bytes) */
+
+/* memory registration strategies */
+enum rpcrdma_memreg {
+ RPCRDMA_BOUNCEBUFFERS = 0,
+ RPCRDMA_REGISTER,
+ RPCRDMA_MEMWINDOWS,
+ RPCRDMA_MEMWINDOWS_ASYNC,
+ RPCRDMA_MTHCAFMR,
+ RPCRDMA_FRMR,
+ RPCRDMA_ALLPHYSICAL,
+ RPCRDMA_LAST
+};
+
+#endif /* _LINUX_SUNRPC_XPRTRDMA_H */
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
new file mode 100644
index 000000000..7591788e9
--- /dev/null
+++ b/include/linux/sunrpc/xprtsock.h
@@ -0,0 +1,81 @@
+/*
+ * linux/include/linux/sunrpc/xprtsock.h
+ *
+ * Declarations for the RPC transport socket provider.
+ */
+
+#ifndef _LINUX_SUNRPC_XPRTSOCK_H
+#define _LINUX_SUNRPC_XPRTSOCK_H
+
+#ifdef __KERNEL__
+
+int init_socket_xprt(void);
+void cleanup_socket_xprt(void);
+
+#define RPC_MIN_RESVPORT (1U)
+#define RPC_MAX_RESVPORT (65535U)
+#define RPC_DEF_MIN_RESVPORT (665U)
+#define RPC_DEF_MAX_RESVPORT (1023U)
+
+struct sock_xprt {
+ struct rpc_xprt xprt;
+
+ /*
+ * Network layer
+ */
+ struct socket * sock;
+ struct sock * inet;
+
+ /*
+ * State of TCP reply receive
+ */
+ __be32 tcp_fraghdr,
+ tcp_xid,
+ tcp_calldir;
+
+ u32 tcp_offset,
+ tcp_reclen;
+
+ unsigned long tcp_copied,
+ tcp_flags;
+
+ /*
+ * Connection of transports
+ */
+ struct delayed_work connect_worker;
+ struct sockaddr_storage srcaddr;
+ unsigned short srcport;
+
+ /*
+ * UDP socket buffer size parameters
+ */
+ size_t rcvsize,
+ sndsize;
+
+ /*
+ * Saved socket callback addresses
+ */
+ void (*old_data_ready)(struct sock *);
+ void (*old_state_change)(struct sock *);
+ void (*old_write_space)(struct sock *);
+ void (*old_error_report)(struct sock *);
+};
+
+/*
+ * TCP receive state flags
+ */
+#define TCP_RCV_LAST_FRAG (1UL << 0)
+#define TCP_RCV_COPY_FRAGHDR (1UL << 1)
+#define TCP_RCV_COPY_XID (1UL << 2)
+#define TCP_RCV_COPY_DATA (1UL << 3)
+#define TCP_RCV_READ_CALLDIR (1UL << 4)
+#define TCP_RCV_COPY_CALLDIR (1UL << 5)
+
+/*
+ * TCP RPC flags
+ */
+#define TCP_RPC_REPLY (1UL << 6)
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_SUNRPC_XPRTSOCK_H */
diff --git a/include/linux/sunserialcore.h b/include/linux/sunserialcore.h
new file mode 100644
index 000000000..dbe4d7fca
--- /dev/null
+++ b/include/linux/sunserialcore.h
@@ -0,0 +1,37 @@
+/* sunserialcore.h
+ *
+ * Generic SUN serial/kbd/ms layer. Based entirely
+ * upon drivers/sbus/char/sunserial.h which is:
+ *
+ * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
+ *
+ * Port to new UART layer is:
+ *
+ * Copyright (C) 2002 David S. Miller (davem@redhat.com)
+ */
+
+#ifndef _SERIAL_SUN_H
+#define _SERIAL_SUN_H
+
+#include <linux/device.h>
+#include <linux/serial_core.h>
+#include <linux/console.h>
+
+/* Serial keyboard defines for L1-A processing... */
+#define SUNKBD_RESET 0xff
+#define SUNKBD_L1 0x01
+#define SUNKBD_UP 0x80
+#define SUNKBD_A 0x4d
+
+extern unsigned int suncore_mouse_baud_cflag_next(unsigned int, int *);
+extern int suncore_mouse_baud_detection(unsigned char, int);
+
+extern int sunserial_register_minors(struct uart_driver *, int);
+extern void sunserial_unregister_minors(struct uart_driver *, int);
+
+extern int sunserial_console_match(struct console *, struct device_node *,
+ struct uart_driver *, int, bool);
+extern void sunserial_console_termios(struct console *,
+ struct device_node *);
+
+#endif /* !(_SERIAL_SUN_H) */
diff --git a/include/linux/superhyway.h b/include/linux/superhyway.h
new file mode 100644
index 000000000..17ea468fa
--- /dev/null
+++ b/include/linux/superhyway.h
@@ -0,0 +1,107 @@
+/*
+ * include/linux/superhyway.h
+ *
+ * SuperHyway Bus definitions
+ *
+ * Copyright (C) 2004, 2005 Paul Mundt <lethal@linux-sh.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __LINUX_SUPERHYWAY_H
+#define __LINUX_SUPERHYWAY_H
+
+#include <linux/device.h>
+
+/*
+ * SuperHyway IDs
+ */
+#define SUPERHYWAY_DEVICE_ID_SH5_DMAC 0x0183
+
+struct superhyway_vcr_info {
+ u8 perr_flags; /* P-port Error flags */
+ u8 merr_flags; /* Module Error flags */
+ u16 mod_vers; /* Module Version */
+ u16 mod_id; /* Module ID */
+ u8 bot_mb; /* Bottom Memory block */
+ u8 top_mb; /* Top Memory block */
+};
+
+struct superhyway_ops {
+ int (*read_vcr)(unsigned long base, struct superhyway_vcr_info *vcr);
+ int (*write_vcr)(unsigned long base, struct superhyway_vcr_info vcr);
+};
+
+struct superhyway_bus {
+ struct superhyway_ops *ops;
+};
+
+extern struct superhyway_bus superhyway_channels[];
+
+struct superhyway_device_id {
+ unsigned int id;
+ unsigned long driver_data;
+};
+
+struct superhyway_device;
+extern struct bus_type superhyway_bus_type;
+
+struct superhyway_driver {
+ char *name;
+
+ const struct superhyway_device_id *id_table;
+ struct device_driver drv;
+
+ int (*probe)(struct superhyway_device *dev, const struct superhyway_device_id *id);
+ void (*remove)(struct superhyway_device *dev);
+};
+
+#define to_superhyway_driver(d) container_of((d), struct superhyway_driver, drv)
+
+struct superhyway_device {
+ char name[32];
+
+ struct device dev;
+
+ struct superhyway_device_id id;
+ struct superhyway_driver *drv;
+ struct superhyway_bus *bus;
+
+ int num_resources;
+ struct resource *resource;
+ struct superhyway_vcr_info vcr;
+};
+
+#define to_superhyway_device(d) container_of((d), struct superhyway_device, dev)
+
+#define superhyway_get_drvdata(d) dev_get_drvdata(&(d)->dev)
+#define superhyway_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p))
+
+static inline int
+superhyway_read_vcr(struct superhyway_device *dev, unsigned long base,
+ struct superhyway_vcr_info *vcr)
+{
+ return dev->bus->ops->read_vcr(base, vcr);
+}
+
+static inline int
+superhyway_write_vcr(struct superhyway_device *dev, unsigned long base,
+ struct superhyway_vcr_info vcr)
+{
+ return dev->bus->ops->write_vcr(base, vcr);
+}
+
+extern int superhyway_scan_bus(struct superhyway_bus *);
+
+/* drivers/sh/superhyway/superhyway.c */
+int superhyway_register_driver(struct superhyway_driver *);
+void superhyway_unregister_driver(struct superhyway_driver *);
+int superhyway_add_device(unsigned long base, struct superhyway_device *, struct superhyway_bus *);
+int superhyway_add_devices(struct superhyway_bus *bus, struct superhyway_device **devices, int nr_devices);
+
+/* drivers/sh/superhyway/superhyway-sysfs.c */
+extern struct device_attribute superhyway_dev_attrs[];
+
+#endif /* __LINUX_SUPERHYWAY_H */
+
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
new file mode 100644
index 000000000..9ae75219f
--- /dev/null
+++ b/include/linux/suspend.h
@@ -0,0 +1,568 @@
+#ifndef _LINUX_SUSPEND_H
+#define _LINUX_SUSPEND_H
+
+#include <linux/swap.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/pm.h>
+#include <linux/mm.h>
+#include <linux/freezer.h>
+#include <asm/errno.h>
+
+#ifdef CONFIG_VT
+extern void pm_set_vt_switch(int);
+#else
+static inline void pm_set_vt_switch(int do_switch)
+{
+}
+#endif
+
+#ifdef CONFIG_VT_CONSOLE_SLEEP
+extern int pm_prepare_console(void);
+extern void pm_restore_console(void);
+#else
+static inline int pm_prepare_console(void)
+{
+ return 0;
+}
+
+static inline void pm_restore_console(void)
+{
+}
+#endif
+
+typedef int __bitwise suspend_state_t;
+
+#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
+#define PM_SUSPEND_FREEZE ((__force suspend_state_t) 1)
+#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 2)
+#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
+#define PM_SUSPEND_MIN PM_SUSPEND_FREEZE
+#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
+
+enum suspend_stat_step {
+ SUSPEND_FREEZE = 1,
+ SUSPEND_PREPARE,
+ SUSPEND_SUSPEND,
+ SUSPEND_SUSPEND_LATE,
+ SUSPEND_SUSPEND_NOIRQ,
+ SUSPEND_RESUME_NOIRQ,
+ SUSPEND_RESUME_EARLY,
+ SUSPEND_RESUME
+};
+
+struct suspend_stats {
+ int success;
+ int fail;
+ int failed_freeze;
+ int failed_prepare;
+ int failed_suspend;
+ int failed_suspend_late;
+ int failed_suspend_noirq;
+ int failed_resume;
+ int failed_resume_early;
+ int failed_resume_noirq;
+#define REC_FAILED_NUM 2
+ int last_failed_dev;
+ char failed_devs[REC_FAILED_NUM][40];
+ int last_failed_errno;
+ int errno[REC_FAILED_NUM];
+ int last_failed_step;
+ enum suspend_stat_step failed_steps[REC_FAILED_NUM];
+};
+
+extern struct suspend_stats suspend_stats;
+
+static inline void dpm_save_failed_dev(const char *name)
+{
+ strlcpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
+ name,
+ sizeof(suspend_stats.failed_devs[0]));
+ suspend_stats.last_failed_dev++;
+ suspend_stats.last_failed_dev %= REC_FAILED_NUM;
+}
+
+static inline void dpm_save_failed_errno(int err)
+{
+ suspend_stats.errno[suspend_stats.last_failed_errno] = err;
+ suspend_stats.last_failed_errno++;
+ suspend_stats.last_failed_errno %= REC_FAILED_NUM;
+}
+
+static inline void dpm_save_failed_step(enum suspend_stat_step step)
+{
+ suspend_stats.failed_steps[suspend_stats.last_failed_step] = step;
+ suspend_stats.last_failed_step++;
+ suspend_stats.last_failed_step %= REC_FAILED_NUM;
+}
+
+/**
+ * struct platform_suspend_ops - Callbacks for managing platform dependent
+ * system sleep states.
+ *
+ * @valid: Callback to determine if given system sleep state is supported by
+ * the platform.
+ * Valid (ie. supported) states are advertised in /sys/power/state. Note
+ * that it still may be impossible to enter given system sleep state if the
+ * conditions aren't right.
+ * There is the %suspend_valid_only_mem function available that can be
+ * assigned to this if the platform only supports mem sleep.
+ *
+ * @begin: Initialise a transition to given system sleep state.
+ * @begin() is executed right prior to suspending devices. The information
+ * conveyed to the platform code by @begin() should be disregarded by it as
+ * soon as @end() is executed. If @begin() fails (ie. returns nonzero),
+ * @prepare(), @enter() and @finish() will not be called by the PM core.
+ * This callback is optional. However, if it is implemented, the argument
+ * passed to @enter() is redundant and should be ignored.
+ *
+ * @prepare: Prepare the platform for entering the system sleep state indicated
+ * by @begin().
+ * @prepare() is called right after devices have been suspended (ie. the
+ * appropriate .suspend() method has been executed for each device) and
+ * before device drivers' late suspend callbacks are executed. It returns
+ * 0 on success or a negative error code otherwise, in which case the
+ * system cannot enter the desired sleep state (@prepare_late(), @enter(),
+ * and @wake() will not be called in that case).
+ *
+ * @prepare_late: Finish preparing the platform for entering the system sleep
+ * state indicated by @begin().
+ * @prepare_late is called before disabling nonboot CPUs and after
+ * device drivers' late suspend callbacks have been executed. It returns
+ * 0 on success or a negative error code otherwise, in which case the
+ * system cannot enter the desired sleep state (@enter() will not be
+ * executed).
+ *
+ * @enter: Enter the system sleep state indicated by @begin() or represented by
+ * the argument if @begin() is not implemented.
+ * This callback is mandatory. It returns 0 on success or a negative
+ * error code otherwise, in which case the system cannot enter the desired
+ * sleep state.
+ *
+ * @wake: Called when the system has just left a sleep state, right after
+ * the nonboot CPUs have been enabled and before device drivers' early
+ * resume callbacks are executed.
+ * This callback is optional, but should be implemented by the platforms
+ * that implement @prepare_late(). If implemented, it is always called
+ * after @prepare_late and @enter(), even if one of them fails.
+ *
+ * @finish: Finish wake-up of the platform.
+ * @finish is called right prior to calling device drivers' regular suspend
+ * callbacks.
+ * This callback is optional, but should be implemented by the platforms
+ * that implement @prepare(). If implemented, it is always called after
+ * @enter() and @wake(), even if any of them fails. It is executed after
+ * a failing @prepare.
+ *
+ * @suspend_again: Returns whether the system should suspend again (true) or
+ * not (false). If the platform wants to poll sensors or execute some
+ * code during suspended without invoking userspace and most of devices,
+ * suspend_again callback is the place assuming that periodic-wakeup or
+ * alarm-wakeup is already setup. This allows to execute some codes while
+ * being kept suspended in the view of userland and devices.
+ *
+ * @end: Called by the PM core right after resuming devices, to indicate to
+ * the platform that the system has returned to the working state or
+ * the transition to the sleep state has been aborted.
+ * This callback is optional, but should be implemented by the platforms
+ * that implement @begin(). Accordingly, platforms implementing @begin()
+ * should also provide a @end() which cleans up transitions aborted before
+ * @enter().
+ *
+ * @recover: Recover the platform from a suspend failure.
+ * Called by the PM core if the suspending of devices fails.
+ * This callback is optional and should only be implemented by platforms
+ * which require special recovery actions in that situation.
+ */
+struct platform_suspend_ops {
+ int (*valid)(suspend_state_t state);
+ int (*begin)(suspend_state_t state);
+ int (*prepare)(void);
+ int (*prepare_late)(void);
+ int (*enter)(suspend_state_t state);
+ void (*wake)(void);
+ void (*finish)(void);
+ bool (*suspend_again)(void);
+ void (*end)(void);
+ void (*recover)(void);
+};
+
+struct platform_freeze_ops {
+ int (*begin)(void);
+ int (*prepare)(void);
+ void (*restore)(void);
+ void (*end)(void);
+};
+
+#ifdef CONFIG_SUSPEND
+/**
+ * suspend_set_ops - set platform dependent suspend operations
+ * @ops: The new suspend operations to set.
+ */
+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
+extern int suspend_valid_only_mem(suspend_state_t state);
+
+/* Suspend-to-idle state machnine. */
+enum freeze_state {
+ FREEZE_STATE_NONE, /* Not suspended/suspending. */
+ FREEZE_STATE_ENTER, /* Enter suspend-to-idle. */
+ FREEZE_STATE_WAKE, /* Wake up from suspend-to-idle. */
+};
+
+extern enum freeze_state __read_mostly suspend_freeze_state;
+
+static inline bool idle_should_freeze(void)
+{
+ return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER);
+}
+
+extern void freeze_set_ops(const struct platform_freeze_ops *ops);
+extern void freeze_wake(void);
+
+/**
+ * arch_suspend_disable_irqs - disable IRQs for suspend
+ *
+ * Disables IRQs (in the default case). This is a weak symbol in the common
+ * code and thus allows architectures to override it if more needs to be
+ * done. Not called for suspend to disk.
+ */
+extern void arch_suspend_disable_irqs(void);
+
+/**
+ * arch_suspend_enable_irqs - enable IRQs after suspend
+ *
+ * Enables IRQs (in the default case). This is a weak symbol in the common
+ * code and thus allows architectures to override it if more needs to be
+ * done. Not called for suspend to disk.
+ */
+extern void arch_suspend_enable_irqs(void);
+
+extern int pm_suspend(suspend_state_t state);
+#else /* !CONFIG_SUSPEND */
+#define suspend_valid_only_mem NULL
+
+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
+static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
+static inline bool idle_should_freeze(void) { return false; }
+static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {}
+static inline void freeze_wake(void) {}
+#endif /* !CONFIG_SUSPEND */
+
+/* struct pbe is used for creating lists of pages that should be restored
+ * atomically during the resume from disk, because the page frames they have
+ * occupied before the suspend are in use.
+ */
+struct pbe {
+ void *address; /* address of the copy */
+ void *orig_address; /* original address of a page */
+ struct pbe *next;
+};
+
+/* mm/page_alloc.c */
+extern void mark_free_pages(struct zone *zone);
+
+/**
+ * struct platform_hibernation_ops - hibernation platform support
+ *
+ * The methods in this structure allow a platform to carry out special
+ * operations required by it during a hibernation transition.
+ *
+ * All the methods below, except for @recover(), must be implemented.
+ *
+ * @begin: Tell the platform driver that we're starting hibernation.
+ * Called right after shrinking memory and before freezing devices.
+ *
+ * @end: Called by the PM core right after resuming devices, to indicate to
+ * the platform that the system has returned to the working state.
+ *
+ * @pre_snapshot: Prepare the platform for creating the hibernation image.
+ * Called right after devices have been frozen and before the nonboot
+ * CPUs are disabled (runs with IRQs on).
+ *
+ * @finish: Restore the previous state of the platform after the hibernation
+ * image has been created *or* put the platform into the normal operation
+ * mode after the hibernation (the same method is executed in both cases).
+ * Called right after the nonboot CPUs have been enabled and before
+ * thawing devices (runs with IRQs on).
+ *
+ * @prepare: Prepare the platform for entering the low power state.
+ * Called right after the hibernation image has been saved and before
+ * devices are prepared for entering the low power state.
+ *
+ * @enter: Put the system into the low power state after the hibernation image
+ * has been saved to disk.
+ * Called after the nonboot CPUs have been disabled and all of the low
+ * level devices have been shut down (runs with IRQs off).
+ *
+ * @leave: Perform the first stage of the cleanup after the system sleep state
+ * indicated by @set_target() has been left.
+ * Called right after the control has been passed from the boot kernel to
+ * the image kernel, before the nonboot CPUs are enabled and before devices
+ * are resumed. Executed with interrupts disabled.
+ *
+ * @pre_restore: Prepare system for the restoration from a hibernation image.
+ * Called right after devices have been frozen and before the nonboot
+ * CPUs are disabled (runs with IRQs on).
+ *
+ * @restore_cleanup: Clean up after a failing image restoration.
+ * Called right after the nonboot CPUs have been enabled and before
+ * thawing devices (runs with IRQs on).
+ *
+ * @recover: Recover the platform from a failure to suspend devices.
+ * Called by the PM core if the suspending of devices during hibernation
+ * fails. This callback is optional and should only be implemented by
+ * platforms which require special recovery actions in that situation.
+ */
+struct platform_hibernation_ops {
+ int (*begin)(void);
+ void (*end)(void);
+ int (*pre_snapshot)(void);
+ void (*finish)(void);
+ int (*prepare)(void);
+ int (*enter)(void);
+ void (*leave)(void);
+ int (*pre_restore)(void);
+ void (*restore_cleanup)(void);
+ void (*recover)(void);
+};
+
+#ifdef CONFIG_HIBERNATION
+/* kernel/power/snapshot.c */
+extern void __register_nosave_region(unsigned long b, unsigned long e, int km);
+static inline void __init register_nosave_region(unsigned long b, unsigned long e)
+{
+ __register_nosave_region(b, e, 0);
+}
+static inline void __init register_nosave_region_late(unsigned long b, unsigned long e)
+{
+ __register_nosave_region(b, e, 1);
+}
+extern int swsusp_page_is_forbidden(struct page *);
+extern void swsusp_set_page_free(struct page *);
+extern void swsusp_unset_page_free(struct page *);
+extern unsigned long get_safe_page(gfp_t gfp_mask);
+
+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
+extern int hibernate(void);
+extern bool system_entering_hibernation(void);
+extern bool hibernation_available(void);
+asmlinkage int swsusp_save(void);
+extern struct pbe *restore_pblist;
+#else /* CONFIG_HIBERNATION */
+static inline void register_nosave_region(unsigned long b, unsigned long e) {}
+static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
+static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
+static inline void swsusp_set_page_free(struct page *p) {}
+static inline void swsusp_unset_page_free(struct page *p) {}
+
+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
+static inline int hibernate(void) { return -ENOSYS; }
+static inline bool system_entering_hibernation(void) { return false; }
+static inline bool hibernation_available(void) { return false; }
+#endif /* CONFIG_HIBERNATION */
+
+/* Hibernation and suspend events */
+#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
+#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
+#define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
+#define PM_POST_SUSPEND 0x0004 /* Suspend finished */
+#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
+#define PM_POST_RESTORE 0x0006 /* Restore failed */
+
+extern struct mutex pm_mutex;
+
+#ifdef CONFIG_PM_SLEEP
+void save_processor_state(void);
+void restore_processor_state(void);
+
+/* kernel/power/main.c */
+extern int register_pm_notifier(struct notifier_block *nb);
+extern int unregister_pm_notifier(struct notifier_block *nb);
+
+#define pm_notifier(fn, pri) { \
+ static struct notifier_block fn##_nb = \
+ { .notifier_call = fn, .priority = pri }; \
+ register_pm_notifier(&fn##_nb); \
+}
+
+/* drivers/base/power/wakeup.c */
+extern bool events_check_enabled;
+
+extern bool pm_wakeup_pending(void);
+extern void pm_system_wakeup(void);
+extern void pm_wakeup_clear(void);
+extern bool pm_get_wakeup_count(unsigned int *count, bool block);
+extern bool pm_save_wakeup_count(unsigned int count);
+extern void pm_wakep_autosleep_enabled(bool set);
+extern void pm_print_active_wakeup_sources(void);
+
+static inline void lock_system_sleep(void)
+{
+ current->flags |= PF_FREEZER_SKIP;
+ mutex_lock(&pm_mutex);
+}
+
+static inline void unlock_system_sleep(void)
+{
+ /*
+ * Don't use freezer_count() because we don't want the call to
+ * try_to_freeze() here.
+ *
+ * Reason:
+ * Fundamentally, we just don't need it, because freezing condition
+ * doesn't come into effect until we release the pm_mutex lock,
+ * since the freezer always works with pm_mutex held.
+ *
+ * More importantly, in the case of hibernation,
+ * unlock_system_sleep() gets called in snapshot_read() and
+ * snapshot_write() when the freezing condition is still in effect.
+ * Which means, if we use try_to_freeze() here, it would make them
+ * enter the refrigerator, thus causing hibernation to lockup.
+ */
+ current->flags &= ~PF_FREEZER_SKIP;
+ mutex_unlock(&pm_mutex);
+}
+
+#else /* !CONFIG_PM_SLEEP */
+
+static inline int register_pm_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int unregister_pm_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
+
+static inline bool pm_wakeup_pending(void) { return false; }
+static inline void pm_system_wakeup(void) {}
+static inline void pm_wakeup_clear(void) {}
+
+static inline void lock_system_sleep(void) {}
+static inline void unlock_system_sleep(void) {}
+
+#endif /* !CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM_SLEEP_DEBUG
+extern bool pm_print_times_enabled;
+#else
+#define pm_print_times_enabled (false)
+#endif
+
+enum {
+ TOI_CAN_HIBERNATE,
+ TOI_CAN_RESUME,
+ TOI_RESUME_DEVICE_OK,
+ TOI_NORESUME_SPECIFIED,
+ TOI_SANITY_CHECK_PROMPT,
+ TOI_CONTINUE_REQ,
+ TOI_RESUMED_BEFORE,
+ TOI_BOOT_TIME,
+ TOI_NOW_RESUMING,
+ TOI_IGNORE_LOGLEVEL,
+ TOI_TRYING_TO_RESUME,
+ TOI_LOADING_ALT_IMAGE,
+ TOI_STOP_RESUME,
+ TOI_IO_STOPPED,
+ TOI_NOTIFIERS_PREPARE,
+ TOI_CLUSTER_MODE,
+ TOI_BOOT_KERNEL,
+ TOI_DEVICE_HOTPLUG_LOCKED,
+};
+
+#ifdef CONFIG_TOI
+
+/* Used in init dir files */
+extern unsigned long toi_state;
+#define set_toi_state(bit) (set_bit(bit, &toi_state))
+#define clear_toi_state(bit) (clear_bit(bit, &toi_state))
+#define test_toi_state(bit) (test_bit(bit, &toi_state))
+extern int toi_running;
+
+#define test_action_state(bit) (test_bit(bit, &toi_bkd.toi_action))
+extern int try_tuxonice_hibernate(void);
+
+#else /* !CONFIG_TOI */
+
+#define toi_state (0)
+#define set_toi_state(bit) do { } while (0)
+#define clear_toi_state(bit) do { } while (0)
+#define test_toi_state(bit) (0)
+#define toi_running (0)
+
+static inline int try_tuxonice_hibernate(void) { return 0; }
+#define test_action_state(bit) (0)
+
+#endif /* CONFIG_TOI */
+
+#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_TOI
+extern void try_tuxonice_resume(void);
+#else
+#define try_tuxonice_resume() do { } while (0)
+#endif
+
+extern int resume_attempted;
+extern int software_resume(void);
+
+static inline void check_resume_attempted(void)
+{
+ if (resume_attempted)
+ return;
+
+ software_resume();
+}
+#else
+#define check_resume_attempted() do { } while (0)
+#define resume_attempted (0)
+#endif
+
+#ifdef CONFIG_PM_AUTOSLEEP
+
+/* kernel/power/autosleep.c */
+void queue_up_suspend_work(void);
+
+#else /* !CONFIG_PM_AUTOSLEEP */
+
+static inline void queue_up_suspend_work(void) {}
+
+#endif /* !CONFIG_PM_AUTOSLEEP */
+
+#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
+/*
+ * The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture
+ * to save/restore additional information to/from the array of page
+ * frame numbers in the hibernation image. For s390 this is used to
+ * save and restore the storage key for each page that is included
+ * in the hibernation image.
+ */
+unsigned long page_key_additional_pages(unsigned long pages);
+int page_key_alloc(unsigned long pages);
+void page_key_free(void);
+void page_key_read(unsigned long *pfn);
+void page_key_memorize(unsigned long *pfn);
+void page_key_write(void *address);
+
+#else /* !CONFIG_ARCH_SAVE_PAGE_KEYS */
+
+static inline unsigned long page_key_additional_pages(unsigned long pages)
+{
+ return 0;
+}
+
+static inline int page_key_alloc(unsigned long pages)
+{
+ return 0;
+}
+
+static inline void page_key_free(void) {}
+static inline void page_key_read(unsigned long *pfn) {}
+static inline void page_key_memorize(unsigned long *pfn) {}
+static inline void page_key_write(void *address) {}
+
+#endif /* !CONFIG_ARCH_SAVE_PAGE_KEYS */
+
+#endif /* _LINUX_SUSPEND_H */
diff --git a/include/linux/svga.h b/include/linux/svga.h
new file mode 100644
index 000000000..bfa68e837
--- /dev/null
+++ b/include/linux/svga.h
@@ -0,0 +1,124 @@
+#ifndef _LINUX_SVGA_H
+#define _LINUX_SVGA_H
+
+#include <linux/pci.h>
+#include <video/vga.h>
+
+/* Terminator for register set */
+
+#define VGA_REGSET_END_VAL 0xFF
+#define VGA_REGSET_END {VGA_REGSET_END_VAL, 0, 0}
+
+struct vga_regset {
+ u8 regnum;
+ u8 lowbit;
+ u8 highbit;
+};
+
+/* ------------------------------------------------------------------------- */
+
+#define SVGA_FORMAT_END_VAL 0xFFFF
+#define SVGA_FORMAT_END {SVGA_FORMAT_END_VAL, {0, 0, 0}, {0, 0, 0}, {0, 0, 0}, {0, 0, 0}, 0, 0, 0, 0, 0, 0}
+
+struct svga_fb_format {
+ /* var part */
+ u32 bits_per_pixel;
+ struct fb_bitfield red;
+ struct fb_bitfield green;
+ struct fb_bitfield blue;
+ struct fb_bitfield transp;
+ u32 nonstd;
+ /* fix part */
+ u32 type;
+ u32 type_aux;
+ u32 visual;
+ u32 xpanstep;
+ u32 xresstep;
+};
+
+struct svga_timing_regs {
+ const struct vga_regset *h_total_regs;
+ const struct vga_regset *h_display_regs;
+ const struct vga_regset *h_blank_start_regs;
+ const struct vga_regset *h_blank_end_regs;
+ const struct vga_regset *h_sync_start_regs;
+ const struct vga_regset *h_sync_end_regs;
+
+ const struct vga_regset *v_total_regs;
+ const struct vga_regset *v_display_regs;
+ const struct vga_regset *v_blank_start_regs;
+ const struct vga_regset *v_blank_end_regs;
+ const struct vga_regset *v_sync_start_regs;
+ const struct vga_regset *v_sync_end_regs;
+};
+
+struct svga_pll {
+ u16 m_min;
+ u16 m_max;
+ u16 n_min;
+ u16 n_max;
+ u16 r_min;
+ u16 r_max; /* r_max < 32 */
+ u32 f_vco_min;
+ u32 f_vco_max;
+ u32 f_base;
+};
+
+
+/* Write a value to the attribute register */
+
+static inline void svga_wattr(void __iomem *regbase, u8 index, u8 data)
+{
+ vga_r(regbase, VGA_IS1_RC);
+ vga_w(regbase, VGA_ATT_IW, index);
+ vga_w(regbase, VGA_ATT_W, data);
+}
+
+/* Write a value to a sequence register with a mask */
+
+static inline void svga_wseq_mask(void __iomem *regbase, u8 index, u8 data, u8 mask)
+{
+ vga_wseq(regbase, index, (data & mask) | (vga_rseq(regbase, index) & ~mask));
+}
+
+/* Write a value to a CRT register with a mask */
+
+static inline void svga_wcrt_mask(void __iomem *regbase, u8 index, u8 data, u8 mask)
+{
+ vga_wcrt(regbase, index, (data & mask) | (vga_rcrt(regbase, index) & ~mask));
+}
+
+static inline int svga_primary_device(struct pci_dev *dev)
+{
+ u16 flags;
+ pci_read_config_word(dev, PCI_COMMAND, &flags);
+ return (flags & PCI_COMMAND_IO);
+}
+
+
+void svga_wcrt_multi(void __iomem *regbase, const struct vga_regset *regset, u32 value);
+void svga_wseq_multi(void __iomem *regbase, const struct vga_regset *regset, u32 value);
+
+void svga_set_default_gfx_regs(void __iomem *regbase);
+void svga_set_default_atc_regs(void __iomem *regbase);
+void svga_set_default_seq_regs(void __iomem *regbase);
+void svga_set_default_crt_regs(void __iomem *regbase);
+void svga_set_textmode_vga_regs(void __iomem *regbase);
+
+void svga_settile(struct fb_info *info, struct fb_tilemap *map);
+void svga_tilecopy(struct fb_info *info, struct fb_tilearea *area);
+void svga_tilefill(struct fb_info *info, struct fb_tilerect *rect);
+void svga_tileblit(struct fb_info *info, struct fb_tileblit *blit);
+void svga_tilecursor(void __iomem *regbase, struct fb_info *info, struct fb_tilecursor *cursor);
+int svga_get_tilemax(struct fb_info *info);
+void svga_get_caps(struct fb_info *info, struct fb_blit_caps *caps,
+ struct fb_var_screeninfo *var);
+
+int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u16 *r, int node);
+int svga_check_timings(const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, int node);
+void svga_set_timings(void __iomem *regbase, const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, u32 hmul, u32 hdiv, u32 vmul, u32 vdiv, u32 hborder, int node);
+
+int svga_match_format(const struct svga_fb_format *frm, struct fb_var_screeninfo *var, struct fb_fix_screeninfo *fix);
+
+#endif /* _LINUX_SVGA_H */
+
diff --git a/include/linux/swab.h b/include/linux/swab.h
new file mode 100644
index 000000000..9ad3c60f6
--- /dev/null
+++ b/include/linux/swab.h
@@ -0,0 +1,21 @@
+#ifndef _LINUX_SWAB_H
+#define _LINUX_SWAB_H
+
+#include <uapi/linux/swab.h>
+
+# define swab16 __swab16
+# define swab32 __swab32
+# define swab64 __swab64
+# define swahw32 __swahw32
+# define swahb32 __swahb32
+# define swab16p __swab16p
+# define swab32p __swab32p
+# define swab64p __swab64p
+# define swahw32p __swahw32p
+# define swahb32p __swahb32p
+# define swab16s __swab16s
+# define swab32s __swab32s
+# define swab64s __swab64s
+# define swahw32s __swahw32s
+# define swahb32s __swahb32s
+#endif /* _LINUX_SWAB_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
new file mode 100644
index 000000000..1f710a1f8
--- /dev/null
+++ b/include/linux/swap.h
@@ -0,0 +1,549 @@
+#ifndef _LINUX_SWAP_H
+#define _LINUX_SWAP_H
+
+#include <linux/spinlock.h>
+#include <linux/linkage.h>
+#include <linux/mmzone.h>
+#include <linux/list.h>
+#include <linux/memcontrol.h>
+#include <linux/sched.h>
+#include <linux/node.h>
+#include <linux/fs.h>
+#include <linux/atomic.h>
+#include <linux/page-flags.h>
+#include <asm/page.h>
+
+struct notifier_block;
+
+struct bio;
+
+#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
+#define SWAP_FLAG_PRIO_MASK 0x7fff
+#define SWAP_FLAG_PRIO_SHIFT 0
+#define SWAP_FLAG_DISCARD 0x10000 /* enable discard for swap */
+#define SWAP_FLAG_DISCARD_ONCE 0x20000 /* discard swap area at swapon-time */
+#define SWAP_FLAG_DISCARD_PAGES 0x40000 /* discard page-clusters after use */
+
+#define SWAP_FLAGS_VALID (SWAP_FLAG_PRIO_MASK | SWAP_FLAG_PREFER | \
+ SWAP_FLAG_DISCARD | SWAP_FLAG_DISCARD_ONCE | \
+ SWAP_FLAG_DISCARD_PAGES)
+
+static inline int current_is_kswapd(void)
+{
+ return current->flags & PF_KSWAPD;
+}
+
+/*
+ * MAX_SWAPFILES defines the maximum number of swaptypes: things which can
+ * be swapped to. The swap type and the offset into that swap type are
+ * encoded into pte's and into pgoff_t's in the swapcache. Using five bits
+ * for the type means that the maximum number of swapcache pages is 27 bits
+ * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs
+ * the type/offset into the pte as 5/27 as well.
+ */
+#define MAX_SWAPFILES_SHIFT 5
+
+/*
+ * Use some of the swap files numbers for other purposes. This
+ * is a convenient way to hook into the VM to trigger special
+ * actions on faults.
+ */
+
+/*
+ * NUMA node memory migration support
+ */
+#ifdef CONFIG_MIGRATION
+#define SWP_MIGRATION_NUM 2
+#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
+#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
+#else
+#define SWP_MIGRATION_NUM 0
+#endif
+
+/*
+ * Handling of hardware poisoned pages with memory corruption.
+ */
+#ifdef CONFIG_MEMORY_FAILURE
+#define SWP_HWPOISON_NUM 1
+#define SWP_HWPOISON MAX_SWAPFILES
+#else
+#define SWP_HWPOISON_NUM 0
+#endif
+
+#define MAX_SWAPFILES \
+ ((1 << MAX_SWAPFILES_SHIFT) - SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
+
+/*
+ * Magic header for a swap area. The first part of the union is
+ * what the swap magic looks like for the old (limited to 128MB)
+ * swap area format, the second part of the union adds - in the
+ * old reserved area - some extra information. Note that the first
+ * kilobyte is reserved for boot loader or disk label stuff...
+ *
+ * Having the magic at the end of the PAGE_SIZE makes detecting swap
+ * areas somewhat tricky on machines that support multiple page sizes.
+ * For 2.5 we'll probably want to move the magic to just beyond the
+ * bootbits...
+ */
+union swap_header {
+ struct {
+ char reserved[PAGE_SIZE - 10];
+ char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */
+ } magic;
+ struct {
+ char bootbits[1024]; /* Space for disklabel etc. */
+ __u32 version;
+ __u32 last_page;
+ __u32 nr_badpages;
+ unsigned char sws_uuid[16];
+ unsigned char sws_volume[16];
+ __u32 padding[117];
+ __u32 badpages[1];
+ } info;
+};
+
+/*
+ * current->reclaim_state points to one of these when a task is running
+ * memory reclaim
+ */
+struct reclaim_state {
+ unsigned long reclaimed_slab;
+};
+
+#ifdef __KERNEL__
+
+struct address_space;
+struct sysinfo;
+struct writeback_control;
+struct zone;
+
+/*
+ * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
+ * disk blocks. A list of swap extents maps the entire swapfile. (Where the
+ * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
+ * from setup, they're handled identically.
+ *
+ * We always assume that blocks are of size PAGE_SIZE.
+ */
+struct swap_extent {
+ struct list_head list;
+ pgoff_t start_page;
+ pgoff_t nr_pages;
+ sector_t start_block;
+};
+
+/*
+ * Max bad pages in the new format..
+ */
+#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
+#define MAX_SWAP_BADPAGES \
+ ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
+
+enum {
+ SWP_USED = (1 << 0), /* is slot in swap_info[] used? */
+ SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */
+ SWP_DISCARDABLE = (1 << 2), /* blkdev support discard */
+ SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
+ SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
+ SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
+ SWP_BLKDEV = (1 << 6), /* its a block device */
+ SWP_FILE = (1 << 7), /* set after swap_activate success */
+ SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */
+ SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
+ /* add others here before... */
+ SWP_SCANNING = (1 << 10), /* refcount in scan_swap_map */
+};
+
+#define SWAP_CLUSTER_MAX 32UL
+#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
+
+/*
+ * Ratio between zone->managed_pages and the "gap" that above the per-zone
+ * "high_wmark". While balancing nodes, We allow kswapd to shrink zones that
+ * do not meet the (high_wmark + gap) watermark, even which already met the
+ * high_wmark, in order to provide better per-zone lru behavior. We are ok to
+ * spend not more than 1% of the memory for this zone balancing "gap".
+ */
+#define KSWAPD_ZONE_BALANCE_GAP_RATIO 100
+
+#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
+#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
+#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
+#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
+#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
+#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
+
+/*
+ * We use this to track usage of a cluster. A cluster is a block of swap disk
+ * space with SWAPFILE_CLUSTER pages long and naturally aligns in disk. All
+ * free clusters are organized into a list. We fetch an entry from the list to
+ * get a free cluster.
+ *
+ * The data field stores next cluster if the cluster is free or cluster usage
+ * counter otherwise. The flags field determines if a cluster is free. This is
+ * protected by swap_info_struct.lock.
+ */
+struct swap_cluster_info {
+ unsigned int data:24;
+ unsigned int flags:8;
+};
+#define CLUSTER_FLAG_FREE 1 /* This cluster is free */
+#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */
+
+/*
+ * We assign a cluster to each CPU, so each CPU can allocate swap entry from
+ * its own cluster and swapout sequentially. The purpose is to optimize swapout
+ * throughput.
+ */
+struct percpu_cluster {
+ struct swap_cluster_info index; /* Current cluster index */
+ unsigned int next; /* Likely next allocation offset */
+};
+
+/*
+ * The in-memory structure used to track swap areas.
+ */
+struct swap_info_struct {
+ unsigned long flags; /* SWP_USED etc: see above */
+ signed short prio; /* swap priority of this type */
+ struct plist_node list; /* entry in swap_active_head */
+ struct plist_node avail_list; /* entry in swap_avail_head */
+ signed char type; /* strange name for an index */
+ unsigned int max; /* extent of the swap_map */
+ unsigned char *swap_map; /* vmalloc'ed array of usage counts */
+ struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
+ struct swap_cluster_info free_cluster_head; /* free cluster list head */
+ struct swap_cluster_info free_cluster_tail; /* free cluster list tail */
+ unsigned int lowest_bit; /* index of first free in swap_map */
+ unsigned int highest_bit; /* index of last free in swap_map */
+ unsigned int pages; /* total of usable pages of swap */
+ unsigned int inuse_pages; /* number of those currently in use */
+ unsigned int cluster_next; /* likely index for next allocation */
+ unsigned int cluster_nr; /* countdown to next cluster search */
+ struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
+ struct swap_extent *curr_swap_extent;
+ struct swap_extent first_swap_extent;
+ struct block_device *bdev; /* swap device or bdev of swap file */
+ struct file *swap_file; /* seldom referenced */
+ unsigned int old_block_size; /* seldom referenced */
+#ifdef CONFIG_FRONTSWAP
+ unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
+ atomic_t frontswap_pages; /* frontswap pages in-use counter */
+#endif
+ spinlock_t lock; /*
+ * protect map scan related fields like
+ * swap_map, lowest_bit, highest_bit,
+ * inuse_pages, cluster_next,
+ * cluster_nr, lowest_alloc,
+ * highest_alloc, free/discard cluster
+ * list. other fields are only changed
+ * at swapon/swapoff, so are protected
+ * by swap_lock. changing flags need
+ * hold this lock and swap_lock. If
+ * both locks need hold, hold swap_lock
+ * first.
+ */
+ struct work_struct discard_work; /* discard worker */
+ struct swap_cluster_info discard_cluster_head; /* list head of discard clusters */
+ struct swap_cluster_info discard_cluster_tail; /* list tail of discard clusters */
+};
+
+/* linux/mm/workingset.c */
+void *workingset_eviction(struct address_space *mapping, struct page *page);
+bool workingset_refault(void *shadow);
+void workingset_activation(struct page *page);
+extern struct list_lru workingset_shadow_nodes;
+
+static inline unsigned int workingset_node_pages(struct radix_tree_node *node)
+{
+ return node->count & RADIX_TREE_COUNT_MASK;
+}
+
+static inline void workingset_node_pages_inc(struct radix_tree_node *node)
+{
+ node->count++;
+}
+
+static inline void workingset_node_pages_dec(struct radix_tree_node *node)
+{
+ node->count--;
+}
+
+static inline unsigned int workingset_node_shadows(struct radix_tree_node *node)
+{
+ return node->count >> RADIX_TREE_COUNT_SHIFT;
+}
+
+static inline void workingset_node_shadows_inc(struct radix_tree_node *node)
+{
+ node->count += 1U << RADIX_TREE_COUNT_SHIFT;
+}
+
+static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
+{
+ node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
+}
+
+/* linux/mm/page_alloc.c */
+extern unsigned long totalram_pages;
+extern unsigned long totalreserve_pages;
+extern unsigned long dirty_balance_reserve;
+extern unsigned long nr_free_buffer_pages(void);
+extern unsigned long nr_unallocated_buffer_pages(void);
+extern unsigned long nr_free_pagecache_pages(void);
+
+/* Definition of global_page_state not available yet */
+#define nr_free_pages() global_page_state(NR_FREE_PAGES)
+
+
+/* linux/mm/swap.c */
+extern void lru_cache_add(struct page *);
+extern void lru_cache_add_anon(struct page *page);
+extern void lru_cache_add_file(struct page *page);
+extern void lru_add_page_tail(struct page *page, struct page *page_tail,
+ struct lruvec *lruvec, struct list_head *head);
+extern void activate_page(struct page *);
+extern void mark_page_accessed(struct page *);
+extern void lru_add_drain(void);
+extern void lru_add_drain_cpu(int cpu);
+extern void lru_add_drain_all(void);
+extern void rotate_reclaimable_page(struct page *page);
+extern void deactivate_file_page(struct page *page);
+extern void swap_setup(void);
+
+extern void add_page_to_unevictable_list(struct page *page);
+
+extern void lru_cache_add_active_or_unevictable(struct page *page,
+ struct vm_area_struct *vma);
+
+/* linux/mm/vmscan.c */
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+ gfp_t gfp_mask, nodemask_t *mask);
+extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
+extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
+ unsigned long nr_pages,
+ gfp_t gfp_mask,
+ bool may_swap);
+extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
+ gfp_t gfp_mask, bool noswap,
+ struct zone *zone,
+ unsigned long *nr_scanned);
+extern unsigned long shrink_all_memory(unsigned long nr_pages);
+extern unsigned long shrink_memory_mask(unsigned long nr_to_reclaim,
+ gfp_t mask);
+extern int vm_swappiness;
+extern int remove_mapping(struct address_space *mapping, struct page *page);
+extern unsigned long vm_total_pages;
+
+#ifdef CONFIG_NUMA
+extern int zone_reclaim_mode;
+extern int sysctl_min_unmapped_ratio;
+extern int sysctl_min_slab_ratio;
+extern int zone_reclaim(struct zone *, gfp_t, unsigned int);
+#else
+#define zone_reclaim_mode 0
+static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
+{
+ return 0;
+}
+#endif
+
+extern int page_evictable(struct page *page);
+extern void check_move_unevictable_pages(struct page **, int nr_pages);
+
+extern int kswapd_run(int nid);
+extern void kswapd_stop(int nid);
+#ifdef CONFIG_MEMCG
+extern int mem_cgroup_swappiness(struct mem_cgroup *mem);
+#else
+static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
+{
+ return vm_swappiness;
+}
+#endif
+#ifdef CONFIG_MEMCG_SWAP
+extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
+extern void mem_cgroup_uncharge_swap(swp_entry_t entry);
+#else
+static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+{
+}
+static inline void mem_cgroup_uncharge_swap(swp_entry_t entry)
+{
+}
+#endif
+#ifdef CONFIG_SWAP
+/* linux/mm/page_io.c */
+extern int swap_readpage(struct page *);
+extern int swap_writepage(struct page *page, struct writeback_control *wbc);
+extern void end_swap_bio_write(struct bio *bio, int err);
+extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
+ void (*end_write_func)(struct bio *, int));
+extern int swap_set_page_dirty(struct page *page);
+extern void end_swap_bio_read(struct bio *bio, int err);
+
+int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
+ unsigned long nr_pages, sector_t start_block);
+int generic_swapfile_activate(struct swap_info_struct *, struct file *,
+ sector_t *);
+
+/* linux/mm/swap_state.c */
+extern struct address_space swapper_spaces[];
+#define swap_address_space(entry) (&swapper_spaces[swp_type(entry)])
+extern unsigned long total_swapcache_pages(void);
+extern void show_swap_cache_info(void);
+extern int add_to_swap(struct page *, struct list_head *list);
+extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
+extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
+extern void __delete_from_swap_cache(struct page *);
+extern void delete_from_swap_cache(struct page *);
+extern void free_page_and_swap_cache(struct page *);
+extern void free_pages_and_swap_cache(struct page **, int);
+extern struct page *lookup_swap_cache(swp_entry_t);
+extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
+ struct vm_area_struct *vma, unsigned long addr);
+extern struct page *swapin_readahead(swp_entry_t, gfp_t,
+ struct vm_area_struct *vma, unsigned long addr);
+
+/* linux/mm/swapfile.c */
+extern atomic_long_t nr_swap_pages;
+extern long total_swap_pages;
+
+/* Swap 50% full? Release swapcache more aggressively.. */
+static inline bool vm_swap_full(void)
+{
+ return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
+}
+
+static inline long get_nr_swap_pages(void)
+{
+ return atomic_long_read(&nr_swap_pages);
+}
+
+extern void si_swapinfo(struct sysinfo *);
+extern swp_entry_t get_swap_page(void);
+extern swp_entry_t get_swap_page_of_type(int);
+extern int add_swap_count_continuation(swp_entry_t, gfp_t);
+extern void swap_shmem_alloc(swp_entry_t);
+extern int swap_duplicate(swp_entry_t);
+extern int swapcache_prepare(swp_entry_t);
+extern void swap_free(swp_entry_t);
+extern void swapcache_free(swp_entry_t);
+extern int free_swap_and_cache(swp_entry_t);
+extern int swap_type_of(dev_t, sector_t, struct block_device **);
+extern unsigned int count_swap_pages(int, int);
+extern sector_t map_swap_entry(swp_entry_t entry, struct block_device **);
+extern sector_t map_swap_page(struct page *, struct block_device **);
+extern sector_t swapdev_block(int, pgoff_t);
+extern struct swap_info_struct *get_swap_info_struct(unsigned);
+extern int page_swapcount(struct page *);
+extern struct swap_info_struct *page_swap_info(struct page *);
+extern int reuse_swap_page(struct page *);
+extern int try_to_free_swap(struct page *);
+struct backing_dev_info;
+extern void get_swap_range_of_type(int type, swp_entry_t *start,
+ swp_entry_t *end, unsigned int limit);
+
+#else /* CONFIG_SWAP */
+
+#define swap_address_space(entry) (NULL)
+#define get_nr_swap_pages() 0L
+#define total_swap_pages 0L
+#define total_swapcache_pages() 0UL
+#define vm_swap_full() 0
+
+#define si_swapinfo(val) \
+ do { (val)->freeswap = (val)->totalswap = 0; } while (0)
+/* only sparc can not include linux/pagemap.h in this file
+ * so leave page_cache_release and release_pages undeclared... */
+#define free_page_and_swap_cache(page) \
+ page_cache_release(page)
+#define free_pages_and_swap_cache(pages, nr) \
+ release_pages((pages), (nr), false);
+
+static inline void show_swap_cache_info(void)
+{
+}
+
+#define free_swap_and_cache(swp) is_migration_entry(swp)
+#define swapcache_prepare(swp) is_migration_entry(swp)
+
+static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
+{
+ return 0;
+}
+
+static inline void swap_shmem_alloc(swp_entry_t swp)
+{
+}
+
+static inline int swap_duplicate(swp_entry_t swp)
+{
+ return 0;
+}
+
+static inline void swap_free(swp_entry_t swp)
+{
+}
+
+static inline void swapcache_free(swp_entry_t swp)
+{
+}
+
+static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
+ struct vm_area_struct *vma, unsigned long addr)
+{
+ return NULL;
+}
+
+static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
+{
+ return 0;
+}
+
+static inline struct page *lookup_swap_cache(swp_entry_t swp)
+{
+ return NULL;
+}
+
+static inline int add_to_swap(struct page *page, struct list_head *list)
+{
+ return 0;
+}
+
+static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
+ gfp_t gfp_mask)
+{
+ return -1;
+}
+
+static inline void __delete_from_swap_cache(struct page *page)
+{
+}
+
+static inline void delete_from_swap_cache(struct page *page)
+{
+}
+
+static inline int page_swapcount(struct page *page)
+{
+ return 0;
+}
+
+#define reuse_swap_page(page) (page_mapcount(page) == 1)
+
+static inline int try_to_free_swap(struct page *page)
+{
+ return 0;
+}
+
+static inline swp_entry_t get_swap_page(void)
+{
+ swp_entry_t entry;
+ entry.val = 0;
+ return entry;
+}
+
+#endif /* CONFIG_SWAP */
+#endif /* __KERNEL__*/
+#endif /* _LINUX_SWAP_H */
diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h
new file mode 100644
index 000000000..145306bdc
--- /dev/null
+++ b/include/linux/swap_cgroup.h
@@ -0,0 +1,42 @@
+#ifndef __LINUX_SWAP_CGROUP_H
+#define __LINUX_SWAP_CGROUP_H
+
+#include <linux/swap.h>
+
+#ifdef CONFIG_MEMCG_SWAP
+
+extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
+ unsigned short old, unsigned short new);
+extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
+extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
+extern int swap_cgroup_swapon(int type, unsigned long max_pages);
+extern void swap_cgroup_swapoff(int type);
+
+#else
+
+static inline
+unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
+{
+ return 0;
+}
+
+static inline
+unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
+{
+ return 0;
+}
+
+static inline int
+swap_cgroup_swapon(int type, unsigned long max_pages)
+{
+ return 0;
+}
+
+static inline void swap_cgroup_swapoff(int type)
+{
+ return;
+}
+
+#endif /* CONFIG_MEMCG_SWAP */
+
+#endif /* __LINUX_SWAP_CGROUP_H */
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
new file mode 100644
index 000000000..388293a91
--- /dev/null
+++ b/include/linux/swapfile.h
@@ -0,0 +1,13 @@
+#ifndef _LINUX_SWAPFILE_H
+#define _LINUX_SWAPFILE_H
+
+/*
+ * these were static in swapfile.c but frontswap.c needs them and we don't
+ * want to expose them to the dozens of source files that include swap.h
+ */
+extern spinlock_t swap_lock;
+extern struct plist_head swap_active_head;
+extern struct swap_info_struct *swap_info[];
+extern int try_to_unuse(unsigned int, bool, unsigned long);
+
+#endif /* _LINUX_SWAPFILE_H */
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
new file mode 100644
index 000000000..cedf3d3c3
--- /dev/null
+++ b/include/linux/swapops.h
@@ -0,0 +1,205 @@
+#ifndef _LINUX_SWAPOPS_H
+#define _LINUX_SWAPOPS_H
+
+#include <linux/radix-tree.h>
+#include <linux/bug.h>
+
+/*
+ * swapcache pages are stored in the swapper_space radix tree. We want to
+ * get good packing density in that tree, so the index should be dense in
+ * the low-order bits.
+ *
+ * We arrange the `type' and `offset' fields so that `type' is at the seven
+ * high-order bits of the swp_entry_t and `offset' is right-aligned in the
+ * remaining bits. Although `type' itself needs only five bits, we allow for
+ * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
+ *
+ * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
+ */
+#define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \
+ (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT))
+#define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1)
+
+/*
+ * Store a type+offset into a swp_entry_t in an arch-independent format
+ */
+static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
+{
+ swp_entry_t ret;
+
+ ret.val = (type << SWP_TYPE_SHIFT(ret)) |
+ (offset & SWP_OFFSET_MASK(ret));
+ return ret;
+}
+
+/*
+ * Extract the `type' field from a swp_entry_t. The swp_entry_t is in
+ * arch-independent format
+ */
+static inline unsigned swp_type(swp_entry_t entry)
+{
+ return (entry.val >> SWP_TYPE_SHIFT(entry));
+}
+
+/*
+ * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
+ * arch-independent format
+ */
+static inline pgoff_t swp_offset(swp_entry_t entry)
+{
+ return entry.val & SWP_OFFSET_MASK(entry);
+}
+
+#ifdef CONFIG_MMU
+/* check whether a pte points to a swap entry */
+static inline int is_swap_pte(pte_t pte)
+{
+ return !pte_none(pte) && !pte_present(pte);
+}
+#endif
+
+/*
+ * Convert the arch-dependent pte representation of a swp_entry_t into an
+ * arch-independent swp_entry_t.
+ */
+static inline swp_entry_t pte_to_swp_entry(pte_t pte)
+{
+ swp_entry_t arch_entry;
+
+ if (pte_swp_soft_dirty(pte))
+ pte = pte_swp_clear_soft_dirty(pte);
+ arch_entry = __pte_to_swp_entry(pte);
+ return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
+}
+
+/*
+ * Convert the arch-independent representation of a swp_entry_t into the
+ * arch-dependent pte representation.
+ */
+static inline pte_t swp_entry_to_pte(swp_entry_t entry)
+{
+ swp_entry_t arch_entry;
+
+ arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
+ return __swp_entry_to_pte(arch_entry);
+}
+
+static inline swp_entry_t radix_to_swp_entry(void *arg)
+{
+ swp_entry_t entry;
+
+ entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
+ return entry;
+}
+
+static inline void *swp_to_radix_entry(swp_entry_t entry)
+{
+ unsigned long value;
+
+ value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT;
+ return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY);
+}
+
+#ifdef CONFIG_MIGRATION
+static inline swp_entry_t make_migration_entry(struct page *page, int write)
+{
+ BUG_ON(!PageLocked(page));
+ return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
+ page_to_pfn(page));
+}
+
+static inline int is_migration_entry(swp_entry_t entry)
+{
+ return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
+ swp_type(entry) == SWP_MIGRATION_WRITE);
+}
+
+static inline int is_write_migration_entry(swp_entry_t entry)
+{
+ return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
+}
+
+static inline struct page *migration_entry_to_page(swp_entry_t entry)
+{
+ struct page *p = pfn_to_page(swp_offset(entry));
+ /*
+ * Any use of migration entries may only occur while the
+ * corresponding page is locked
+ */
+ BUG_ON(!PageLocked(p));
+ return p;
+}
+
+static inline void make_migration_entry_read(swp_entry_t *entry)
+{
+ *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
+}
+
+extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
+ spinlock_t *ptl);
+extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address);
+extern void migration_entry_wait_huge(struct vm_area_struct *vma,
+ struct mm_struct *mm, pte_t *pte);
+#else
+
+#define make_migration_entry(page, write) swp_entry(0, 0)
+static inline int is_migration_entry(swp_entry_t swp)
+{
+ return 0;
+}
+#define migration_entry_to_page(swp) NULL
+static inline void make_migration_entry_read(swp_entry_t *entryp) { }
+static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
+ spinlock_t *ptl) { }
+static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address) { }
+static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
+ struct mm_struct *mm, pte_t *pte) { }
+static inline int is_write_migration_entry(swp_entry_t entry)
+{
+ return 0;
+}
+
+#endif
+
+#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * Support for hardware poisoned pages
+ */
+static inline swp_entry_t make_hwpoison_entry(struct page *page)
+{
+ BUG_ON(!PageLocked(page));
+ return swp_entry(SWP_HWPOISON, page_to_pfn(page));
+}
+
+static inline int is_hwpoison_entry(swp_entry_t entry)
+{
+ return swp_type(entry) == SWP_HWPOISON;
+}
+#else
+
+static inline swp_entry_t make_hwpoison_entry(struct page *page)
+{
+ return swp_entry(0, 0);
+}
+
+static inline int is_hwpoison_entry(swp_entry_t swp)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
+static inline int non_swap_entry(swp_entry_t entry)
+{
+ return swp_type(entry) >= MAX_SWAPFILES;
+}
+#else
+static inline int non_swap_entry(swp_entry_t entry)
+{
+ return 0;
+}
+#endif
+
+#endif /* _LINUX_SWAPOPS_H */
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
new file mode 100644
index 000000000..e7a018eaf
--- /dev/null
+++ b/include/linux/swiotlb.h
@@ -0,0 +1,121 @@
+#ifndef __LINUX_SWIOTLB_H
+#define __LINUX_SWIOTLB_H
+
+#include <linux/types.h>
+
+struct device;
+struct dma_attrs;
+struct scatterlist;
+
+extern int swiotlb_force;
+
+/*
+ * Maximum allowable number of contiguous slabs to map,
+ * must be a power of 2. What is the appropriate value ?
+ * The complexity of {map,unmap}_single is linearly dependent on this value.
+ */
+#define IO_TLB_SEGSIZE 128
+
+/*
+ * log of the size of each IO TLB slab. The number of slabs is command line
+ * controllable.
+ */
+#define IO_TLB_SHIFT 11
+
+extern void swiotlb_init(int verbose);
+int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
+extern unsigned long swiotlb_nr_tbl(void);
+unsigned long swiotlb_size_or_default(void);
+extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
+
+/*
+ * Enumeration for sync targets
+ */
+enum dma_sync_target {
+ SYNC_FOR_CPU = 0,
+ SYNC_FOR_DEVICE = 1,
+};
+
+/* define the last possible byte of physical address space as a mapping error */
+#define SWIOTLB_MAP_ERROR (~(phys_addr_t)0x0)
+
+extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
+ dma_addr_t tbl_dma_addr,
+ phys_addr_t phys, size_t size,
+ enum dma_data_direction dir);
+
+extern void swiotlb_tbl_unmap_single(struct device *hwdev,
+ phys_addr_t tlb_addr,
+ size_t size, enum dma_data_direction dir);
+
+extern void swiotlb_tbl_sync_single(struct device *hwdev,
+ phys_addr_t tlb_addr,
+ size_t size, enum dma_data_direction dir,
+ enum dma_sync_target target);
+
+/* Accessory functions. */
+extern void
+*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags);
+
+extern void
+swiotlb_free_coherent(struct device *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle);
+
+extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+
+extern int
+swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+
+extern void
+swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+
+extern int
+swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir, struct dma_attrs *attrs);
+
+extern void
+swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir,
+ struct dma_attrs *attrs);
+
+extern void
+swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir);
+
+extern void
+swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir);
+
+extern void
+swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir);
+
+extern void
+swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir);
+
+extern int
+swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
+
+extern int
+swiotlb_dma_supported(struct device *hwdev, u64 mask);
+
+#ifdef CONFIG_SWIOTLB
+extern void __init swiotlb_free(void);
+#else
+static inline void swiotlb_free(void) { }
+#endif
+
+extern void swiotlb_print_info(void);
+extern int is_swiotlb_buffer(phys_addr_t paddr);
+
+#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/sxgbe_platform.h b/include/linux/sxgbe_platform.h
new file mode 100644
index 000000000..a62442cf0
--- /dev/null
+++ b/include/linux/sxgbe_platform.h
@@ -0,0 +1,54 @@
+/*
+ * 10G controller driver for Samsung EXYNOS SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __SXGBE_PLATFORM_H__
+#define __SXGBE_PLATFORM_H__
+
+/* MDC Clock Selection define*/
+#define SXGBE_CSR_100_150M 0x0 /* MDC = clk_scr_i/62 */
+#define SXGBE_CSR_150_250M 0x1 /* MDC = clk_scr_i/102 */
+#define SXGBE_CSR_250_300M 0x2 /* MDC = clk_scr_i/122 */
+#define SXGBE_CSR_300_350M 0x3 /* MDC = clk_scr_i/142 */
+#define SXGBE_CSR_350_400M 0x4 /* MDC = clk_scr_i/162 */
+#define SXGBE_CSR_400_500M 0x5 /* MDC = clk_scr_i/202 */
+
+/* Platfrom data for platform device structure's
+ * platform_data field
+ */
+struct sxgbe_mdio_bus_data {
+ unsigned int phy_mask;
+ int *irqs;
+ int probed_phy_irq;
+};
+
+struct sxgbe_dma_cfg {
+ int pbl;
+ int fixed_burst;
+ int burst_map;
+ int adv_addr_mode;
+};
+
+struct sxgbe_plat_data {
+ char *phy_bus_name;
+ int bus_id;
+ int phy_addr;
+ int interface;
+ struct sxgbe_mdio_bus_data *mdio_bus_data;
+ struct sxgbe_dma_cfg *dma_cfg;
+ int clk_csr;
+ int pmt;
+ int force_sf_dma_mode;
+ int force_thresh_dma_mode;
+ int riwt_off;
+};
+
+#endif /* __SXGBE_PLATFORM_H__ */
diff --git a/include/linux/synclink.h b/include/linux/synclink.h
new file mode 100644
index 000000000..f1405b1c7
--- /dev/null
+++ b/include/linux/synclink.h
@@ -0,0 +1,37 @@
+/*
+ * SyncLink Multiprotocol Serial Adapter Driver
+ *
+ * $Id: synclink.h,v 3.14 2006/07/17 20:15:43 paulkf Exp $
+ *
+ * Copyright (C) 1998-2000 by Microgate Corporation
+ *
+ * Redistribution of this file is permitted under
+ * the terms of the GNU Public License (GPL)
+ */
+#ifndef _SYNCLINK_H_
+#define _SYNCLINK_H_
+
+#include <uapi/linux/synclink.h>
+
+/* provide 32 bit ioctl compatibility on 64 bit systems */
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+struct MGSL_PARAMS32 {
+ compat_ulong_t mode;
+ unsigned char loopback;
+ unsigned short flags;
+ unsigned char encoding;
+ compat_ulong_t clock_speed;
+ unsigned char addr_filter;
+ unsigned short crc_type;
+ unsigned char preamble_length;
+ unsigned char preamble;
+ compat_ulong_t data_rate;
+ unsigned char data_bits;
+ unsigned char stop_bits;
+ unsigned char parity;
+};
+#define MGSL_IOCSPARAMS32 _IOW(MGSL_MAGIC_IOC,0,struct MGSL_PARAMS32)
+#define MGSL_IOCGPARAMS32 _IOR(MGSL_MAGIC_IOC,1,struct MGSL_PARAMS32)
+#endif
+#endif /* _SYNCLINK_H_ */
diff --git a/include/linux/sys.h b/include/linux/sys.h
new file mode 100644
index 000000000..daa6008bf
--- /dev/null
+++ b/include/linux/sys.h
@@ -0,0 +1,29 @@
+#ifndef _LINUX_SYS_H
+#define _LINUX_SYS_H
+
+/*
+ * This file is no longer used or needed
+ */
+
+/*
+ * These are system calls that will be removed at some time
+ * due to newer versions existing..
+ * (please be careful - ibcs2 may need some of these).
+ */
+#ifdef notdef
+#define _sys_waitpid _sys_old_syscall /* _sys_wait4 */
+#define _sys_olduname _sys_old_syscall /* _sys_newuname */
+#define _sys_uname _sys_old_syscall /* _sys_newuname */
+#define _sys_stat _sys_old_syscall /* _sys_newstat */
+#define _sys_fstat _sys_old_syscall /* _sys_newfstat */
+#define _sys_lstat _sys_old_syscall /* _sys_newlstat */
+#define _sys_signal _sys_old_syscall /* _sys_sigaction */
+#define _sys_sgetmask _sys_old_syscall /* _sys_sigprocmask */
+#define _sys_ssetmask _sys_old_syscall /* _sys_sigprocmask */
+#endif
+
+/*
+ * These are system calls that haven't been implemented yet
+ * but have an entry in the table for future expansion..
+ */
+#endif
diff --git a/include/linux/sys_soc.h b/include/linux/sys_soc.h
new file mode 100644
index 000000000..2739ccb69
--- /dev/null
+++ b/include/linux/sys_soc.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ * Author: Lee Jones <lee.jones@linaro.org> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+#ifndef __SOC_BUS_H
+#define __SOC_BUS_H
+
+#include <linux/device.h>
+
+struct soc_device_attribute {
+ const char *machine;
+ const char *family;
+ const char *revision;
+ const char *soc_id;
+};
+
+/**
+ * soc_device_register - register SoC as a device
+ * @soc_plat_dev_attr: Attributes passed from platform to be attributed to a SoC
+ */
+struct soc_device *soc_device_register(
+ struct soc_device_attribute *soc_plat_dev_attr);
+
+/**
+ * soc_device_unregister - unregister SoC device
+ * @dev: SoC device to be unregistered
+ */
+void soc_device_unregister(struct soc_device *soc_dev);
+
+/**
+ * soc_device_to_device - helper function to fetch struct device
+ * @soc: Previously registered SoC device container
+ */
+struct device *soc_device_to_device(struct soc_device *soc);
+
+#endif /* __SOC_BUS_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
new file mode 100644
index 000000000..76d1e38aa
--- /dev/null
+++ b/include/linux/syscalls.h
@@ -0,0 +1,887 @@
+/*
+ * syscalls.h - Linux syscall interfaces (non-arch-specific)
+ *
+ * Copyright (c) 2004 Randy Dunlap
+ * Copyright (c) 2004 Open Source Development Labs
+ *
+ * This file is released under the GPLv2.
+ * See the file COPYING for more details.
+ */
+
+#ifndef _LINUX_SYSCALLS_H
+#define _LINUX_SYSCALLS_H
+
+struct epoll_event;
+struct iattr;
+struct inode;
+struct iocb;
+struct io_event;
+struct iovec;
+struct itimerspec;
+struct itimerval;
+struct kexec_segment;
+struct linux_dirent;
+struct linux_dirent64;
+struct list_head;
+struct mmap_arg_struct;
+struct msgbuf;
+struct user_msghdr;
+struct mmsghdr;
+struct msqid_ds;
+struct new_utsname;
+struct nfsctl_arg;
+struct __old_kernel_stat;
+struct oldold_utsname;
+struct old_utsname;
+struct pollfd;
+struct rlimit;
+struct rlimit64;
+struct rusage;
+struct sched_param;
+struct sched_attr;
+struct sel_arg_struct;
+struct semaphore;
+struct sembuf;
+struct shmid_ds;
+struct sockaddr;
+struct stat;
+struct stat64;
+struct statfs;
+struct statfs64;
+struct __sysctl_args;
+struct sysinfo;
+struct timespec;
+struct timeval;
+struct timex;
+struct timezone;
+struct tms;
+struct utimbuf;
+struct mq_attr;
+struct compat_stat;
+struct compat_timeval;
+struct robust_list_head;
+struct getcpu_cache;
+struct old_linux_dirent;
+struct perf_event_attr;
+struct file_handle;
+struct sigaltstack;
+union bpf_attr;
+
+#include <linux/types.h>
+#include <linux/aio_abi.h>
+#include <linux/capability.h>
+#include <linux/signal.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/sem.h>
+#include <asm/siginfo.h>
+#include <linux/unistd.h>
+#include <linux/quota.h>
+#include <linux/key.h>
+#include <trace/syscall.h>
+
+/*
+ * __MAP - apply a macro to syscall arguments
+ * __MAP(n, m, t1, a1, t2, a2, ..., tn, an) will expand to
+ * m(t1, a1), m(t2, a2), ..., m(tn, an)
+ * The first argument must be equal to the amount of type/name
+ * pairs given. Note that this list of pairs (i.e. the arguments
+ * of __MAP starting at the third one) is in the same format as
+ * for SYSCALL_DEFINE<n>/COMPAT_SYSCALL_DEFINE<n>
+ */
+#define __MAP0(m,...)
+#define __MAP1(m,t,a) m(t,a)
+#define __MAP2(m,t,a,...) m(t,a), __MAP1(m,__VA_ARGS__)
+#define __MAP3(m,t,a,...) m(t,a), __MAP2(m,__VA_ARGS__)
+#define __MAP4(m,t,a,...) m(t,a), __MAP3(m,__VA_ARGS__)
+#define __MAP5(m,t,a,...) m(t,a), __MAP4(m,__VA_ARGS__)
+#define __MAP6(m,t,a,...) m(t,a), __MAP5(m,__VA_ARGS__)
+#define __MAP(n,...) __MAP##n(__VA_ARGS__)
+
+#define __SC_DECL(t, a) t a
+#define __TYPE_IS_L(t) (__same_type((t)0, 0L))
+#define __TYPE_IS_UL(t) (__same_type((t)0, 0UL))
+#define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
+#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
+#define __SC_CAST(t, a) (t) a
+#define __SC_ARGS(t, a) a
+#define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+#define __SC_STR_ADECL(t, a) #a
+#define __SC_STR_TDECL(t, a) #t
+
+extern struct ftrace_event_class event_class_syscall_enter;
+extern struct ftrace_event_class event_class_syscall_exit;
+extern struct trace_event_functions enter_syscall_print_funcs;
+extern struct trace_event_functions exit_syscall_print_funcs;
+
+#define SYSCALL_TRACE_ENTER_EVENT(sname) \
+ static struct syscall_metadata __syscall_meta_##sname; \
+ static struct ftrace_event_call __used \
+ event_enter_##sname = { \
+ .class = &event_class_syscall_enter, \
+ { \
+ .name = "sys_enter"#sname, \
+ }, \
+ .event.funcs = &enter_syscall_print_funcs, \
+ .data = (void *)&__syscall_meta_##sname,\
+ .flags = TRACE_EVENT_FL_CAP_ANY, \
+ }; \
+ static struct ftrace_event_call __used \
+ __attribute__((section("_ftrace_events"))) \
+ *__event_enter_##sname = &event_enter_##sname;
+
+#define SYSCALL_TRACE_EXIT_EVENT(sname) \
+ static struct syscall_metadata __syscall_meta_##sname; \
+ static struct ftrace_event_call __used \
+ event_exit_##sname = { \
+ .class = &event_class_syscall_exit, \
+ { \
+ .name = "sys_exit"#sname, \
+ }, \
+ .event.funcs = &exit_syscall_print_funcs, \
+ .data = (void *)&__syscall_meta_##sname,\
+ .flags = TRACE_EVENT_FL_CAP_ANY, \
+ }; \
+ static struct ftrace_event_call __used \
+ __attribute__((section("_ftrace_events"))) \
+ *__event_exit_##sname = &event_exit_##sname;
+
+#define SYSCALL_METADATA(sname, nb, ...) \
+ static const char *types_##sname[] = { \
+ __MAP(nb,__SC_STR_TDECL,__VA_ARGS__) \
+ }; \
+ static const char *args_##sname[] = { \
+ __MAP(nb,__SC_STR_ADECL,__VA_ARGS__) \
+ }; \
+ SYSCALL_TRACE_ENTER_EVENT(sname); \
+ SYSCALL_TRACE_EXIT_EVENT(sname); \
+ static struct syscall_metadata __used \
+ __syscall_meta_##sname = { \
+ .name = "sys"#sname, \
+ .syscall_nr = -1, /* Filled in at boot */ \
+ .nb_args = nb, \
+ .types = nb ? types_##sname : NULL, \
+ .args = nb ? args_##sname : NULL, \
+ .enter_event = &event_enter_##sname, \
+ .exit_event = &event_exit_##sname, \
+ .enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \
+ }; \
+ static struct syscall_metadata __used \
+ __attribute__((section("__syscalls_metadata"))) \
+ *__p_syscall_meta_##sname = &__syscall_meta_##sname;
+#else
+#define SYSCALL_METADATA(sname, nb, ...)
+#endif
+
+#define SYSCALL_DEFINE0(sname) \
+ SYSCALL_METADATA(_##sname, 0); \
+ asmlinkage long sys_##sname(void)
+
+#define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
+#define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__)
+#define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__)
+#define SYSCALL_DEFINE4(name, ...) SYSCALL_DEFINEx(4, _##name, __VA_ARGS__)
+#define SYSCALL_DEFINE5(name, ...) SYSCALL_DEFINEx(5, _##name, __VA_ARGS__)
+#define SYSCALL_DEFINE6(name, ...) SYSCALL_DEFINEx(6, _##name, __VA_ARGS__)
+
+#define SYSCALL_DEFINEx(x, sname, ...) \
+ SYSCALL_METADATA(sname, x, __VA_ARGS__) \
+ __SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
+
+#define __PROTECT(...) asmlinkage_protect(__VA_ARGS__)
+#define __SYSCALL_DEFINEx(x, name, ...) \
+ asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
+ __attribute__((alias(__stringify(SyS##name)))); \
+ static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ long ret = SYSC##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
+ return ret; \
+ } \
+ static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special,
+ qid_t id, void __user *addr);
+asmlinkage long sys_time(time_t __user *tloc);
+asmlinkage long sys_stime(time_t __user *tptr);
+asmlinkage long sys_gettimeofday(struct timeval __user *tv,
+ struct timezone __user *tz);
+asmlinkage long sys_settimeofday(struct timeval __user *tv,
+ struct timezone __user *tz);
+asmlinkage long sys_adjtimex(struct timex __user *txc_p);
+
+asmlinkage long sys_times(struct tms __user *tbuf);
+
+asmlinkage long sys_gettid(void);
+asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp);
+asmlinkage long sys_alarm(unsigned int seconds);
+asmlinkage long sys_getpid(void);
+asmlinkage long sys_getppid(void);
+asmlinkage long sys_getuid(void);
+asmlinkage long sys_geteuid(void);
+asmlinkage long sys_getgid(void);
+asmlinkage long sys_getegid(void);
+asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid);
+asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid);
+asmlinkage long sys_getpgid(pid_t pid);
+asmlinkage long sys_getpgrp(void);
+asmlinkage long sys_getsid(pid_t pid);
+asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist);
+
+asmlinkage long sys_setregid(gid_t rgid, gid_t egid);
+asmlinkage long sys_setgid(gid_t gid);
+asmlinkage long sys_setreuid(uid_t ruid, uid_t euid);
+asmlinkage long sys_setuid(uid_t uid);
+asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid);
+asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid);
+asmlinkage long sys_setfsuid(uid_t uid);
+asmlinkage long sys_setfsgid(gid_t gid);
+asmlinkage long sys_setpgid(pid_t pid, pid_t pgid);
+asmlinkage long sys_setsid(void);
+asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist);
+
+asmlinkage long sys_acct(const char __user *name);
+asmlinkage long sys_capget(cap_user_header_t header,
+ cap_user_data_t dataptr);
+asmlinkage long sys_capset(cap_user_header_t header,
+ const cap_user_data_t data);
+asmlinkage long sys_personality(unsigned int personality);
+
+asmlinkage long sys_sigpending(old_sigset_t __user *set);
+asmlinkage long sys_sigprocmask(int how, old_sigset_t __user *set,
+ old_sigset_t __user *oset);
+asmlinkage long sys_sigaltstack(const struct sigaltstack __user *uss,
+ struct sigaltstack __user *uoss);
+
+asmlinkage long sys_getitimer(int which, struct itimerval __user *value);
+asmlinkage long sys_setitimer(int which,
+ struct itimerval __user *value,
+ struct itimerval __user *ovalue);
+asmlinkage long sys_timer_create(clockid_t which_clock,
+ struct sigevent __user *timer_event_spec,
+ timer_t __user * created_timer_id);
+asmlinkage long sys_timer_gettime(timer_t timer_id,
+ struct itimerspec __user *setting);
+asmlinkage long sys_timer_getoverrun(timer_t timer_id);
+asmlinkage long sys_timer_settime(timer_t timer_id, int flags,
+ const struct itimerspec __user *new_setting,
+ struct itimerspec __user *old_setting);
+asmlinkage long sys_timer_delete(timer_t timer_id);
+asmlinkage long sys_clock_settime(clockid_t which_clock,
+ const struct timespec __user *tp);
+asmlinkage long sys_clock_gettime(clockid_t which_clock,
+ struct timespec __user *tp);
+asmlinkage long sys_clock_adjtime(clockid_t which_clock,
+ struct timex __user *tx);
+asmlinkage long sys_clock_getres(clockid_t which_clock,
+ struct timespec __user *tp);
+asmlinkage long sys_clock_nanosleep(clockid_t which_clock, int flags,
+ const struct timespec __user *rqtp,
+ struct timespec __user *rmtp);
+
+asmlinkage long sys_nice(int increment);
+asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
+ struct sched_param __user *param);
+asmlinkage long sys_sched_setparam(pid_t pid,
+ struct sched_param __user *param);
+asmlinkage long sys_sched_setattr(pid_t pid,
+ struct sched_attr __user *attr,
+ unsigned int flags);
+asmlinkage long sys_sched_getscheduler(pid_t pid);
+asmlinkage long sys_sched_getparam(pid_t pid,
+ struct sched_param __user *param);
+asmlinkage long sys_sched_getattr(pid_t pid,
+ struct sched_attr __user *attr,
+ unsigned int size,
+ unsigned int flags);
+asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
+ unsigned long __user *user_mask_ptr);
+asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
+ unsigned long __user *user_mask_ptr);
+asmlinkage long sys_sched_yield(void);
+asmlinkage long sys_sched_get_priority_max(int policy);
+asmlinkage long sys_sched_get_priority_min(int policy);
+asmlinkage long sys_sched_rr_get_interval(pid_t pid,
+ struct timespec __user *interval);
+asmlinkage long sys_setpriority(int which, int who, int niceval);
+asmlinkage long sys_getpriority(int which, int who);
+
+asmlinkage long sys_shutdown(int, int);
+asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd,
+ void __user *arg);
+asmlinkage long sys_restart_syscall(void);
+asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
+ struct kexec_segment __user *segments,
+ unsigned long flags);
+asmlinkage long sys_kexec_file_load(int kernel_fd, int initrd_fd,
+ unsigned long cmdline_len,
+ const char __user *cmdline_ptr,
+ unsigned long flags);
+
+asmlinkage long sys_exit(int error_code);
+asmlinkage long sys_exit_group(int error_code);
+asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
+ int options, struct rusage __user *ru);
+asmlinkage long sys_waitid(int which, pid_t pid,
+ struct siginfo __user *infop,
+ int options, struct rusage __user *ru);
+asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options);
+asmlinkage long sys_set_tid_address(int __user *tidptr);
+asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
+ struct timespec __user *utime, u32 __user *uaddr2,
+ u32 val3);
+
+asmlinkage long sys_init_module(void __user *umod, unsigned long len,
+ const char __user *uargs);
+asmlinkage long sys_delete_module(const char __user *name_user,
+ unsigned int flags);
+
+#ifdef CONFIG_OLD_SIGSUSPEND
+asmlinkage long sys_sigsuspend(old_sigset_t mask);
+#endif
+
+#ifdef CONFIG_OLD_SIGSUSPEND3
+asmlinkage long sys_sigsuspend(int unused1, int unused2, old_sigset_t mask);
+#endif
+
+asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
+
+#ifdef CONFIG_OLD_SIGACTION
+asmlinkage long sys_sigaction(int, const struct old_sigaction __user *,
+ struct old_sigaction __user *);
+#endif
+
+#ifndef CONFIG_ODD_RT_SIGACTION
+asmlinkage long sys_rt_sigaction(int,
+ const struct sigaction __user *,
+ struct sigaction __user *,
+ size_t);
+#endif
+asmlinkage long sys_rt_sigprocmask(int how, sigset_t __user *set,
+ sigset_t __user *oset, size_t sigsetsize);
+asmlinkage long sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize);
+asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese,
+ siginfo_t __user *uinfo,
+ const struct timespec __user *uts,
+ size_t sigsetsize);
+asmlinkage long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig,
+ siginfo_t __user *uinfo);
+asmlinkage long sys_kill(int pid, int sig);
+asmlinkage long sys_tgkill(int tgid, int pid, int sig);
+asmlinkage long sys_tkill(int pid, int sig);
+asmlinkage long sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo);
+asmlinkage long sys_sgetmask(void);
+asmlinkage long sys_ssetmask(int newmask);
+asmlinkage long sys_signal(int sig, __sighandler_t handler);
+asmlinkage long sys_pause(void);
+
+asmlinkage long sys_sync(void);
+asmlinkage long sys_fsync(unsigned int fd);
+asmlinkage long sys_fdatasync(unsigned int fd);
+asmlinkage long sys_bdflush(int func, long data);
+asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
+ char __user *type, unsigned long flags,
+ void __user *data);
+asmlinkage long sys_umount(char __user *name, int flags);
+asmlinkage long sys_oldumount(char __user *name);
+asmlinkage long sys_truncate(const char __user *path, long length);
+asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
+asmlinkage long sys_stat(const char __user *filename,
+ struct __old_kernel_stat __user *statbuf);
+asmlinkage long sys_statfs(const char __user * path,
+ struct statfs __user *buf);
+asmlinkage long sys_statfs64(const char __user *path, size_t sz,
+ struct statfs64 __user *buf);
+asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user *buf);
+asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz,
+ struct statfs64 __user *buf);
+asmlinkage long sys_lstat(const char __user *filename,
+ struct __old_kernel_stat __user *statbuf);
+asmlinkage long sys_fstat(unsigned int fd,
+ struct __old_kernel_stat __user *statbuf);
+asmlinkage long sys_newstat(const char __user *filename,
+ struct stat __user *statbuf);
+asmlinkage long sys_newlstat(const char __user *filename,
+ struct stat __user *statbuf);
+asmlinkage long sys_newfstat(unsigned int fd, struct stat __user *statbuf);
+asmlinkage long sys_ustat(unsigned dev, struct ustat __user *ubuf);
+#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
+asmlinkage long sys_stat64(const char __user *filename,
+ struct stat64 __user *statbuf);
+asmlinkage long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf);
+asmlinkage long sys_lstat64(const char __user *filename,
+ struct stat64 __user *statbuf);
+asmlinkage long sys_fstatat64(int dfd, const char __user *filename,
+ struct stat64 __user *statbuf, int flag);
+#endif
+#if BITS_PER_LONG == 32
+asmlinkage long sys_truncate64(const char __user *path, loff_t length);
+asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length);
+#endif
+
+asmlinkage long sys_setxattr(const char __user *path, const char __user *name,
+ const void __user *value, size_t size, int flags);
+asmlinkage long sys_lsetxattr(const char __user *path, const char __user *name,
+ const void __user *value, size_t size, int flags);
+asmlinkage long sys_fsetxattr(int fd, const char __user *name,
+ const void __user *value, size_t size, int flags);
+asmlinkage long sys_getxattr(const char __user *path, const char __user *name,
+ void __user *value, size_t size);
+asmlinkage long sys_lgetxattr(const char __user *path, const char __user *name,
+ void __user *value, size_t size);
+asmlinkage long sys_fgetxattr(int fd, const char __user *name,
+ void __user *value, size_t size);
+asmlinkage long sys_listxattr(const char __user *path, char __user *list,
+ size_t size);
+asmlinkage long sys_llistxattr(const char __user *path, char __user *list,
+ size_t size);
+asmlinkage long sys_flistxattr(int fd, char __user *list, size_t size);
+asmlinkage long sys_removexattr(const char __user *path,
+ const char __user *name);
+asmlinkage long sys_lremovexattr(const char __user *path,
+ const char __user *name);
+asmlinkage long sys_fremovexattr(int fd, const char __user *name);
+
+asmlinkage long sys_brk(unsigned long brk);
+asmlinkage long sys_mprotect(unsigned long start, size_t len,
+ unsigned long prot);
+asmlinkage long sys_mremap(unsigned long addr,
+ unsigned long old_len, unsigned long new_len,
+ unsigned long flags, unsigned long new_addr);
+asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
+ unsigned long prot, unsigned long pgoff,
+ unsigned long flags);
+asmlinkage long sys_msync(unsigned long start, size_t len, int flags);
+asmlinkage long sys_fadvise64(int fd, loff_t offset, size_t len, int advice);
+asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice);
+asmlinkage long sys_munmap(unsigned long addr, size_t len);
+asmlinkage long sys_mlock(unsigned long start, size_t len);
+asmlinkage long sys_munlock(unsigned long start, size_t len);
+asmlinkage long sys_mlockall(int flags);
+asmlinkage long sys_munlockall(void);
+asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior);
+asmlinkage long sys_mincore(unsigned long start, size_t len,
+ unsigned char __user * vec);
+
+asmlinkage long sys_pivot_root(const char __user *new_root,
+ const char __user *put_old);
+asmlinkage long sys_chroot(const char __user *filename);
+asmlinkage long sys_mknod(const char __user *filename, umode_t mode,
+ unsigned dev);
+asmlinkage long sys_link(const char __user *oldname,
+ const char __user *newname);
+asmlinkage long sys_symlink(const char __user *old, const char __user *new);
+asmlinkage long sys_unlink(const char __user *pathname);
+asmlinkage long sys_rename(const char __user *oldname,
+ const char __user *newname);
+asmlinkage long sys_chmod(const char __user *filename, umode_t mode);
+asmlinkage long sys_fchmod(unsigned int fd, umode_t mode);
+
+asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg);
+#if BITS_PER_LONG == 32
+asmlinkage long sys_fcntl64(unsigned int fd,
+ unsigned int cmd, unsigned long arg);
+#endif
+asmlinkage long sys_pipe(int __user *fildes);
+asmlinkage long sys_pipe2(int __user *fildes, int flags);
+asmlinkage long sys_dup(unsigned int fildes);
+asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd);
+asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags);
+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on);
+asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd,
+ unsigned long arg);
+asmlinkage long sys_flock(unsigned int fd, unsigned int cmd);
+asmlinkage long sys_io_setup(unsigned nr_reqs, aio_context_t __user *ctx);
+asmlinkage long sys_io_destroy(aio_context_t ctx);
+asmlinkage long sys_io_getevents(aio_context_t ctx_id,
+ long min_nr,
+ long nr,
+ struct io_event __user *events,
+ struct timespec __user *timeout);
+asmlinkage long sys_io_submit(aio_context_t, long,
+ struct iocb __user * __user *);
+asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb,
+ struct io_event __user *result);
+asmlinkage long sys_sendfile(int out_fd, int in_fd,
+ off_t __user *offset, size_t count);
+asmlinkage long sys_sendfile64(int out_fd, int in_fd,
+ loff_t __user *offset, size_t count);
+asmlinkage long sys_readlink(const char __user *path,
+ char __user *buf, int bufsiz);
+asmlinkage long sys_creat(const char __user *pathname, umode_t mode);
+asmlinkage long sys_open(const char __user *filename,
+ int flags, umode_t mode);
+asmlinkage long sys_close(unsigned int fd);
+asmlinkage long sys_access(const char __user *filename, int mode);
+asmlinkage long sys_vhangup(void);
+asmlinkage long sys_chown(const char __user *filename,
+ uid_t user, gid_t group);
+asmlinkage long sys_lchown(const char __user *filename,
+ uid_t user, gid_t group);
+asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
+#ifdef CONFIG_UID16
+asmlinkage long sys_chown16(const char __user *filename,
+ old_uid_t user, old_gid_t group);
+asmlinkage long sys_lchown16(const char __user *filename,
+ old_uid_t user, old_gid_t group);
+asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group);
+asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid);
+asmlinkage long sys_setgid16(old_gid_t gid);
+asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid);
+asmlinkage long sys_setuid16(old_uid_t uid);
+asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid);
+asmlinkage long sys_getresuid16(old_uid_t __user *ruid,
+ old_uid_t __user *euid, old_uid_t __user *suid);
+asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid);
+asmlinkage long sys_getresgid16(old_gid_t __user *rgid,
+ old_gid_t __user *egid, old_gid_t __user *sgid);
+asmlinkage long sys_setfsuid16(old_uid_t uid);
+asmlinkage long sys_setfsgid16(old_gid_t gid);
+asmlinkage long sys_getgroups16(int gidsetsize, old_gid_t __user *grouplist);
+asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist);
+asmlinkage long sys_getuid16(void);
+asmlinkage long sys_geteuid16(void);
+asmlinkage long sys_getgid16(void);
+asmlinkage long sys_getegid16(void);
+#endif
+
+asmlinkage long sys_utime(char __user *filename,
+ struct utimbuf __user *times);
+asmlinkage long sys_utimes(char __user *filename,
+ struct timeval __user *utimes);
+asmlinkage long sys_lseek(unsigned int fd, off_t offset,
+ unsigned int whence);
+asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high,
+ unsigned long offset_low, loff_t __user *result,
+ unsigned int whence);
+asmlinkage long sys_read(unsigned int fd, char __user *buf, size_t count);
+asmlinkage long sys_readahead(int fd, loff_t offset, size_t count);
+asmlinkage long sys_readv(unsigned long fd,
+ const struct iovec __user *vec,
+ unsigned long vlen);
+asmlinkage long sys_write(unsigned int fd, const char __user *buf,
+ size_t count);
+asmlinkage long sys_writev(unsigned long fd,
+ const struct iovec __user *vec,
+ unsigned long vlen);
+asmlinkage long sys_pread64(unsigned int fd, char __user *buf,
+ size_t count, loff_t pos);
+asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf,
+ size_t count, loff_t pos);
+asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec,
+ unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
+asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
+ unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
+asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
+asmlinkage long sys_mkdir(const char __user *pathname, umode_t mode);
+asmlinkage long sys_chdir(const char __user *filename);
+asmlinkage long sys_fchdir(unsigned int fd);
+asmlinkage long sys_rmdir(const char __user *pathname);
+asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user *buf, size_t len);
+asmlinkage long sys_quotactl(unsigned int cmd, const char __user *special,
+ qid_t id, void __user *addr);
+asmlinkage long sys_getdents(unsigned int fd,
+ struct linux_dirent __user *dirent,
+ unsigned int count);
+asmlinkage long sys_getdents64(unsigned int fd,
+ struct linux_dirent64 __user *dirent,
+ unsigned int count);
+
+asmlinkage long sys_setsockopt(int fd, int level, int optname,
+ char __user *optval, int optlen);
+asmlinkage long sys_getsockopt(int fd, int level, int optname,
+ char __user *optval, int __user *optlen);
+asmlinkage long sys_bind(int, struct sockaddr __user *, int);
+asmlinkage long sys_connect(int, struct sockaddr __user *, int);
+asmlinkage long sys_accept(int, struct sockaddr __user *, int __user *);
+asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int);
+asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
+asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
+asmlinkage long sys_send(int, void __user *, size_t, unsigned);
+asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
+ struct sockaddr __user *, int);
+asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
+asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
+ unsigned int vlen, unsigned flags);
+asmlinkage long sys_recv(int, void __user *, size_t, unsigned);
+asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned,
+ struct sockaddr __user *, int __user *);
+asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
+asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg,
+ unsigned int vlen, unsigned flags,
+ struct timespec __user *timeout);
+asmlinkage long sys_socket(int, int, int);
+asmlinkage long sys_socketpair(int, int, int, int __user *);
+asmlinkage long sys_socketcall(int call, unsigned long __user *args);
+asmlinkage long sys_listen(int, int);
+asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
+ int timeout);
+asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
+ fd_set __user *exp, struct timeval __user *tvp);
+asmlinkage long sys_old_select(struct sel_arg_struct __user *arg);
+asmlinkage long sys_epoll_create(int size);
+asmlinkage long sys_epoll_create1(int flags);
+asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
+ struct epoll_event __user *event);
+asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events,
+ int maxevents, int timeout);
+asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events,
+ int maxevents, int timeout,
+ const sigset_t __user *sigmask,
+ size_t sigsetsize);
+asmlinkage long sys_gethostname(char __user *name, int len);
+asmlinkage long sys_sethostname(char __user *name, int len);
+asmlinkage long sys_setdomainname(char __user *name, int len);
+asmlinkage long sys_newuname(struct new_utsname __user *name);
+asmlinkage long sys_uname(struct old_utsname __user *);
+asmlinkage long sys_olduname(struct oldold_utsname __user *);
+
+asmlinkage long sys_getrlimit(unsigned int resource,
+ struct rlimit __user *rlim);
+#if defined(COMPAT_RLIM_OLD_INFINITY) || !(defined(CONFIG_IA64))
+asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim);
+#endif
+asmlinkage long sys_setrlimit(unsigned int resource,
+ struct rlimit __user *rlim);
+asmlinkage long sys_prlimit64(pid_t pid, unsigned int resource,
+ const struct rlimit64 __user *new_rlim,
+ struct rlimit64 __user *old_rlim);
+asmlinkage long sys_getrusage(int who, struct rusage __user *ru);
+asmlinkage long sys_umask(int mask);
+
+asmlinkage long sys_msgget(key_t key, int msgflg);
+asmlinkage long sys_msgsnd(int msqid, struct msgbuf __user *msgp,
+ size_t msgsz, int msgflg);
+asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp,
+ size_t msgsz, long msgtyp, int msgflg);
+asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
+
+asmlinkage long sys_semget(key_t key, int nsems, int semflg);
+asmlinkage long sys_semop(int semid, struct sembuf __user *sops,
+ unsigned nsops);
+asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg);
+asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops,
+ unsigned nsops,
+ const struct timespec __user *timeout);
+asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg);
+asmlinkage long sys_shmget(key_t key, size_t size, int flag);
+asmlinkage long sys_shmdt(char __user *shmaddr);
+asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
+asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
+ unsigned long third, void __user *ptr, long fifth);
+
+asmlinkage long sys_mq_open(const char __user *name, int oflag, umode_t mode, struct mq_attr __user *attr);
+asmlinkage long sys_mq_unlink(const char __user *name);
+asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec __user *abs_timeout);
+asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct timespec __user *abs_timeout);
+asmlinkage long sys_mq_notify(mqd_t mqdes, const struct sigevent __user *notification);
+asmlinkage long sys_mq_getsetattr(mqd_t mqdes, const struct mq_attr __user *mqstat, struct mq_attr __user *omqstat);
+
+asmlinkage long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn);
+asmlinkage long sys_pciconfig_read(unsigned long bus, unsigned long dfn,
+ unsigned long off, unsigned long len,
+ void __user *buf);
+asmlinkage long sys_pciconfig_write(unsigned long bus, unsigned long dfn,
+ unsigned long off, unsigned long len,
+ void __user *buf);
+
+asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5);
+asmlinkage long sys_swapon(const char __user *specialfile, int swap_flags);
+asmlinkage long sys_swapoff(const char __user *specialfile);
+asmlinkage long sys_sysctl(struct __sysctl_args __user *args);
+asmlinkage long sys_sysinfo(struct sysinfo __user *info);
+asmlinkage long sys_sysfs(int option,
+ unsigned long arg1, unsigned long arg2);
+asmlinkage long sys_syslog(int type, char __user *buf, int len);
+asmlinkage long sys_uselib(const char __user *library);
+asmlinkage long sys_ni_syscall(void);
+asmlinkage long sys_ptrace(long request, long pid, unsigned long addr,
+ unsigned long data);
+
+asmlinkage long sys_add_key(const char __user *_type,
+ const char __user *_description,
+ const void __user *_payload,
+ size_t plen,
+ key_serial_t destringid);
+
+asmlinkage long sys_request_key(const char __user *_type,
+ const char __user *_description,
+ const char __user *_callout_info,
+ key_serial_t destringid);
+
+asmlinkage long sys_keyctl(int cmd, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5);
+
+asmlinkage long sys_ioprio_set(int which, int who, int ioprio);
+asmlinkage long sys_ioprio_get(int which, int who);
+asmlinkage long sys_set_mempolicy(int mode, const unsigned long __user *nmask,
+ unsigned long maxnode);
+asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
+ const unsigned long __user *from,
+ const unsigned long __user *to);
+asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
+ const void __user * __user *pages,
+ const int __user *nodes,
+ int __user *status,
+ int flags);
+asmlinkage long sys_mbind(unsigned long start, unsigned long len,
+ unsigned long mode,
+ const unsigned long __user *nmask,
+ unsigned long maxnode,
+ unsigned flags);
+asmlinkage long sys_get_mempolicy(int __user *policy,
+ unsigned long __user *nmask,
+ unsigned long maxnode,
+ unsigned long addr, unsigned long flags);
+
+asmlinkage long sys_inotify_init(void);
+asmlinkage long sys_inotify_init1(int flags);
+asmlinkage long sys_inotify_add_watch(int fd, const char __user *path,
+ u32 mask);
+asmlinkage long sys_inotify_rm_watch(int fd, __s32 wd);
+
+asmlinkage long sys_spu_run(int fd, __u32 __user *unpc,
+ __u32 __user *ustatus);
+asmlinkage long sys_spu_create(const char __user *name,
+ unsigned int flags, umode_t mode, int fd);
+
+asmlinkage long sys_mknodat(int dfd, const char __user * filename, umode_t mode,
+ unsigned dev);
+asmlinkage long sys_mkdirat(int dfd, const char __user * pathname, umode_t mode);
+asmlinkage long sys_unlinkat(int dfd, const char __user * pathname, int flag);
+asmlinkage long sys_symlinkat(const char __user * oldname,
+ int newdfd, const char __user * newname);
+asmlinkage long sys_linkat(int olddfd, const char __user *oldname,
+ int newdfd, const char __user *newname, int flags);
+asmlinkage long sys_renameat(int olddfd, const char __user * oldname,
+ int newdfd, const char __user * newname);
+asmlinkage long sys_renameat2(int olddfd, const char __user *oldname,
+ int newdfd, const char __user *newname,
+ unsigned int flags);
+asmlinkage long sys_futimesat(int dfd, const char __user *filename,
+ struct timeval __user *utimes);
+asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode);
+asmlinkage long sys_fchmodat(int dfd, const char __user * filename,
+ umode_t mode);
+asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user,
+ gid_t group, int flag);
+asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
+ umode_t mode);
+asmlinkage long sys_newfstatat(int dfd, const char __user *filename,
+ struct stat __user *statbuf, int flag);
+asmlinkage long sys_readlinkat(int dfd, const char __user *path, char __user *buf,
+ int bufsiz);
+asmlinkage long sys_utimensat(int dfd, const char __user *filename,
+ struct timespec __user *utimes, int flags);
+asmlinkage long sys_unshare(unsigned long unshare_flags);
+
+asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
+ int fd_out, loff_t __user *off_out,
+ size_t len, unsigned int flags);
+
+asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov,
+ unsigned long nr_segs, unsigned int flags);
+
+asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags);
+
+asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
+ unsigned int flags);
+asmlinkage long sys_sync_file_range2(int fd, unsigned int flags,
+ loff_t offset, loff_t nbytes);
+asmlinkage long sys_get_robust_list(int pid,
+ struct robust_list_head __user * __user *head_ptr,
+ size_t __user *len_ptr);
+asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
+ size_t len);
+asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache);
+asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask);
+asmlinkage long sys_signalfd4(int ufd, sigset_t __user *user_mask, size_t sizemask, int flags);
+asmlinkage long sys_timerfd_create(int clockid, int flags);
+asmlinkage long sys_timerfd_settime(int ufd, int flags,
+ const struct itimerspec __user *utmr,
+ struct itimerspec __user *otmr);
+asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr);
+asmlinkage long sys_eventfd(unsigned int count);
+asmlinkage long sys_eventfd2(unsigned int count, int flags);
+asmlinkage long sys_memfd_create(const char __user *uname_ptr, unsigned int flags);
+asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len);
+asmlinkage long sys_old_readdir(unsigned int, struct old_linux_dirent __user *, unsigned int);
+asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
+ fd_set __user *, struct timespec __user *,
+ void __user *);
+asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int,
+ struct timespec __user *, const sigset_t __user *,
+ size_t);
+asmlinkage long sys_fanotify_init(unsigned int flags, unsigned int event_f_flags);
+asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
+ u64 mask, int fd,
+ const char __user *pathname);
+asmlinkage long sys_syncfs(int fd);
+
+asmlinkage long sys_fork(void);
+asmlinkage long sys_vfork(void);
+#ifdef CONFIG_CLONE_BACKWARDS
+asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int,
+ int __user *);
+#else
+#ifdef CONFIG_CLONE_BACKWARDS3
+asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *,
+ int __user *, int);
+#else
+asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
+ int __user *, int);
+#endif
+#endif
+
+asmlinkage long sys_execve(const char __user *filename,
+ const char __user *const __user *argv,
+ const char __user *const __user *envp);
+
+asmlinkage long sys_perf_event_open(
+ struct perf_event_attr __user *attr_uptr,
+ pid_t pid, int cpu, int group_fd, unsigned long flags);
+
+asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
+asmlinkage long sys_old_mmap(struct mmap_arg_struct __user *arg);
+asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name,
+ struct file_handle __user *handle,
+ int __user *mnt_id, int flag);
+asmlinkage long sys_open_by_handle_at(int mountdirfd,
+ struct file_handle __user *handle,
+ int flags);
+asmlinkage long sys_setns(int fd, int nstype);
+asmlinkage long sys_process_vm_readv(pid_t pid,
+ const struct iovec __user *lvec,
+ unsigned long liovcnt,
+ const struct iovec __user *rvec,
+ unsigned long riovcnt,
+ unsigned long flags);
+asmlinkage long sys_process_vm_writev(pid_t pid,
+ const struct iovec __user *lvec,
+ unsigned long liovcnt,
+ const struct iovec __user *rvec,
+ unsigned long riovcnt,
+ unsigned long flags);
+
+asmlinkage long sys_kcmp(pid_t pid1, pid_t pid2, int type,
+ unsigned long idx1, unsigned long idx2);
+asmlinkage long sys_finit_module(int fd, const char __user *uargs, int flags);
+asmlinkage long sys_seccomp(unsigned int op, unsigned int flags,
+ const char __user *uargs);
+asmlinkage long sys_getrandom(char __user *buf, size_t count,
+ unsigned int flags);
+asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
+
+asmlinkage long sys_execveat(int dfd, const char __user *filename,
+ const char __user *const __user *argv,
+ const char __user *const __user *envp, int flags);
+
+#endif
diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
new file mode 100644
index 000000000..27b3b0bc4
--- /dev/null
+++ b/include/linux/syscore_ops.h
@@ -0,0 +1,29 @@
+/*
+ * syscore_ops.h - System core operations.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#ifndef _LINUX_SYSCORE_OPS_H
+#define _LINUX_SYSCORE_OPS_H
+
+#include <linux/list.h>
+
+struct syscore_ops {
+ struct list_head node;
+ int (*suspend)(void);
+ void (*resume)(void);
+ void (*shutdown)(void);
+};
+
+extern void register_syscore_ops(struct syscore_ops *ops);
+extern void unregister_syscore_ops(struct syscore_ops *ops);
+#ifdef CONFIG_PM_SLEEP
+extern int syscore_suspend(void);
+extern void syscore_resume(void);
+#endif
+extern void syscore_shutdown(void);
+
+#endif
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
new file mode 100644
index 000000000..fa7bc2992
--- /dev/null
+++ b/include/linux/sysctl.h
@@ -0,0 +1,221 @@
+/*
+ * sysctl.h: General linux system control interface
+ *
+ * Begun 24 March 1995, Stephen Tweedie
+ *
+ ****************************************************************
+ ****************************************************************
+ **
+ ** WARNING:
+ ** The values in this file are exported to user space via
+ ** the sysctl() binary interface. Do *NOT* change the
+ ** numbering of any existing values here, and do not change
+ ** any numbers within any one set of values. If you have to
+ ** redefine an existing interface, use a new number for it.
+ ** The kernel will then return -ENOTDIR to any application using
+ ** the old binary interface.
+ **
+ ****************************************************************
+ ****************************************************************
+ */
+#ifndef _LINUX_SYSCTL_H
+#define _LINUX_SYSCTL_H
+
+#include <linux/list.h>
+#include <linux/rcupdate.h>
+#include <linux/wait.h>
+#include <linux/rbtree.h>
+#include <uapi/linux/sysctl.h>
+
+/* For the /proc/sys support */
+struct ctl_table;
+struct nsproxy;
+struct ctl_table_root;
+struct ctl_table_header;
+struct ctl_dir;
+
+typedef int proc_handler (struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
+extern int proc_dostring(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+extern int proc_dointvec(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+extern int proc_dointvec_minmax(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+extern int proc_dointvec_jiffies(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+extern int proc_dointvec_ms_jiffies(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+extern int proc_doulongvec_minmax(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int,
+ void __user *, size_t *, loff_t *);
+extern int proc_do_large_bitmap(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+
+/*
+ * Register a set of sysctl names by calling register_sysctl_table
+ * with an initialised array of struct ctl_table's. An entry with
+ * NULL procname terminates the table. table->de will be
+ * set up by the registration and need not be initialised in advance.
+ *
+ * sysctl names can be mirrored automatically under /proc/sys. The
+ * procname supplied controls /proc naming.
+ *
+ * The table's mode will be honoured both for sys_sysctl(2) and
+ * proc-fs access.
+ *
+ * Leaf nodes in the sysctl tree will be represented by a single file
+ * under /proc; non-leaf nodes will be represented by directories. A
+ * null procname disables /proc mirroring at this node.
+ *
+ * sysctl(2) can automatically manage read and write requests through
+ * the sysctl table. The data and maxlen fields of the ctl_table
+ * struct enable minimal validation of the values being written to be
+ * performed, and the mode field allows minimal authentication.
+ *
+ * There must be a proc_handler routine for any terminal nodes
+ * mirrored under /proc/sys (non-terminals are handled by a built-in
+ * directory handler). Several default handlers are available to
+ * cover common cases.
+ */
+
+/* Support for userspace poll() to watch for changes */
+struct ctl_table_poll {
+ atomic_t event;
+ wait_queue_head_t wait;
+};
+
+static inline void *proc_sys_poll_event(struct ctl_table_poll *poll)
+{
+ return (void *)(unsigned long)atomic_read(&poll->event);
+}
+
+#define __CTL_TABLE_POLL_INITIALIZER(name) { \
+ .event = ATOMIC_INIT(0), \
+ .wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait) }
+
+#define DEFINE_CTL_TABLE_POLL(name) \
+ struct ctl_table_poll name = __CTL_TABLE_POLL_INITIALIZER(name)
+
+/* A sysctl table is an array of struct ctl_table: */
+struct ctl_table
+{
+ const char *procname; /* Text ID for /proc/sys, or zero */
+ void *data;
+ int maxlen;
+ umode_t mode;
+ struct ctl_table *child; /* Deprecated */
+ proc_handler *proc_handler; /* Callback for text formatting */
+ struct ctl_table_poll *poll;
+ void *extra1;
+ void *extra2;
+};
+
+struct ctl_node {
+ struct rb_node node;
+ struct ctl_table_header *header;
+};
+
+/* struct ctl_table_header is used to maintain dynamic lists of
+ struct ctl_table trees. */
+struct ctl_table_header
+{
+ union {
+ struct {
+ struct ctl_table *ctl_table;
+ int used;
+ int count;
+ int nreg;
+ };
+ struct rcu_head rcu;
+ };
+ struct completion *unregistering;
+ struct ctl_table *ctl_table_arg;
+ struct ctl_table_root *root;
+ struct ctl_table_set *set;
+ struct ctl_dir *parent;
+ struct ctl_node *node;
+};
+
+struct ctl_dir {
+ /* Header must be at the start of ctl_dir */
+ struct ctl_table_header header;
+ struct rb_root root;
+};
+
+struct ctl_table_set {
+ int (*is_seen)(struct ctl_table_set *);
+ struct ctl_dir dir;
+};
+
+struct ctl_table_root {
+ struct ctl_table_set default_set;
+ struct ctl_table_set *(*lookup)(struct ctl_table_root *root,
+ struct nsproxy *namespaces);
+ int (*permissions)(struct ctl_table_header *head, struct ctl_table *table);
+};
+
+/* struct ctl_path describes where in the hierarchy a table is added */
+struct ctl_path {
+ const char *procname;
+};
+
+#ifdef CONFIG_SYSCTL
+
+void proc_sys_poll_notify(struct ctl_table_poll *poll);
+
+extern void setup_sysctl_set(struct ctl_table_set *p,
+ struct ctl_table_root *root,
+ int (*is_seen)(struct ctl_table_set *));
+extern void retire_sysctl_set(struct ctl_table_set *set);
+
+void register_sysctl_root(struct ctl_table_root *root);
+struct ctl_table_header *__register_sysctl_table(
+ struct ctl_table_set *set,
+ const char *path, struct ctl_table *table);
+struct ctl_table_header *__register_sysctl_paths(
+ struct ctl_table_set *set,
+ const struct ctl_path *path, struct ctl_table *table);
+struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table);
+struct ctl_table_header *register_sysctl_table(struct ctl_table * table);
+struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
+ struct ctl_table *table);
+
+void unregister_sysctl_table(struct ctl_table_header * table);
+
+extern int sysctl_init(void);
+
+extern struct ctl_table sysctl_mount_point[];
+
+#else /* CONFIG_SYSCTL */
+static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
+{
+ return NULL;
+}
+
+static inline struct ctl_table_header *register_sysctl_paths(
+ const struct ctl_path *path, struct ctl_table *table)
+{
+ return NULL;
+}
+
+static inline void unregister_sysctl_table(struct ctl_table_header * table)
+{
+}
+
+static inline void setup_sysctl_set(struct ctl_table_set *p,
+ struct ctl_table_root *root,
+ int (*is_seen)(struct ctl_table_set *))
+{
+}
+
+#endif /* CONFIG_SYSCTL */
+
+int sysctl_max_threads(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
+#endif /* _LINUX_SYSCTL_H */
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
new file mode 100644
index 000000000..9f6575831
--- /dev/null
+++ b/include/linux/sysfs.h
@@ -0,0 +1,510 @@
+/*
+ * sysfs.h - definitions for the device driver filesystem
+ *
+ * Copyright (c) 2001,2002 Patrick Mochel
+ * Copyright (c) 2004 Silicon Graphics, Inc.
+ * Copyright (c) 2007 SUSE Linux Products GmbH
+ * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
+ *
+ * Please see Documentation/filesystems/sysfs.txt for more information.
+ */
+
+#ifndef _SYSFS_H_
+#define _SYSFS_H_
+
+#include <linux/kernfs.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/kobject_ns.h>
+#include <linux/stat.h>
+#include <linux/atomic.h>
+
+struct kobject;
+struct module;
+struct bin_attribute;
+enum kobj_ns_type;
+
+struct attribute {
+ const char *name;
+ umode_t mode;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ bool ignore_lockdep:1;
+ struct lock_class_key *key;
+ struct lock_class_key skey;
+#endif
+};
+
+/**
+ * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
+ * @attr: struct attribute to initialize
+ *
+ * Initialize a dynamically allocated struct attribute so we can
+ * make lockdep happy. This is a new requirement for attributes
+ * and initially this is only needed when lockdep is enabled.
+ * Lockdep gives a nice error when your attribute is added to
+ * sysfs if you don't have this.
+ */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define sysfs_attr_init(attr) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ (attr)->key = &__key; \
+} while (0)
+#else
+#define sysfs_attr_init(attr) do {} while (0)
+#endif
+
+/**
+ * struct attribute_group - data structure used to declare an attribute group.
+ * @name: Optional: Attribute group name
+ * If specified, the attribute group will be created in
+ * a new subdirectory with this name.
+ * @is_visible: Optional: Function to return permissions associated with an
+ * attribute of the group. Will be called repeatedly for each
+ * attribute in the group. Only read/write permissions as well as
+ * SYSFS_PREALLOC are accepted. Must return 0 if an attribute is
+ * not visible. The returned value will replace static permissions
+ * defined in struct attribute or struct bin_attribute.
+ * @attrs: Pointer to NULL terminated list of attributes.
+ * @bin_attrs: Pointer to NULL terminated list of binary attributes.
+ * Either attrs or bin_attrs or both must be provided.
+ */
+struct attribute_group {
+ const char *name;
+ umode_t (*is_visible)(struct kobject *,
+ struct attribute *, int);
+ struct attribute **attrs;
+ struct bin_attribute **bin_attrs;
+};
+
+/**
+ * Use these macros to make defining attributes easier. See include/linux/device.h
+ * for examples..
+ */
+
+#define SYSFS_PREALLOC 010000
+
+#define __ATTR(_name, _mode, _show, _store) { \
+ .attr = {.name = __stringify(_name), \
+ .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \
+ .show = _show, \
+ .store = _store, \
+}
+
+#define __ATTR_PREALLOC(_name, _mode, _show, _store) { \
+ .attr = {.name = __stringify(_name), \
+ .mode = SYSFS_PREALLOC | VERIFY_OCTAL_PERMISSIONS(_mode) },\
+ .show = _show, \
+ .store = _store, \
+}
+
+#define __ATTR_RO(_name) { \
+ .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \
+ .show = _name##_show, \
+}
+
+#define __ATTR_WO(_name) { \
+ .attr = { .name = __stringify(_name), .mode = S_IWUSR }, \
+ .store = _name##_store, \
+}
+
+#define __ATTR_RW(_name) __ATTR(_name, (S_IWUSR | S_IRUGO), \
+ _name##_show, _name##_store)
+
+#define __ATTR_NULL { .attr = { .name = NULL } }
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) { \
+ .attr = {.name = __stringify(_name), .mode = _mode, \
+ .ignore_lockdep = true }, \
+ .show = _show, \
+ .store = _store, \
+}
+#else
+#define __ATTR_IGNORE_LOCKDEP __ATTR
+#endif
+
+#define __ATTRIBUTE_GROUPS(_name) \
+static const struct attribute_group *_name##_groups[] = { \
+ &_name##_group, \
+ NULL, \
+}
+
+#define ATTRIBUTE_GROUPS(_name) \
+static const struct attribute_group _name##_group = { \
+ .attrs = _name##_attrs, \
+}; \
+__ATTRIBUTE_GROUPS(_name)
+
+struct file;
+struct vm_area_struct;
+
+struct bin_attribute {
+ struct attribute attr;
+ size_t size;
+ void *private;
+ ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *,
+ char *, loff_t, size_t);
+ ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *,
+ char *, loff_t, size_t);
+ int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
+ struct vm_area_struct *vma);
+};
+
+/**
+ * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
+ * @attr: struct bin_attribute to initialize
+ *
+ * Initialize a dynamically allocated struct bin_attribute so we
+ * can make lockdep happy. This is a new requirement for
+ * attributes and initially this is only needed when lockdep is
+ * enabled. Lockdep gives a nice error when your attribute is
+ * added to sysfs if you don't have this.
+ */
+#define sysfs_bin_attr_init(bin_attr) sysfs_attr_init(&(bin_attr)->attr)
+
+/* macros to create static binary attributes easier */
+#define __BIN_ATTR(_name, _mode, _read, _write, _size) { \
+ .attr = { .name = __stringify(_name), .mode = _mode }, \
+ .read = _read, \
+ .write = _write, \
+ .size = _size, \
+}
+
+#define __BIN_ATTR_RO(_name, _size) { \
+ .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \
+ .read = _name##_read, \
+ .size = _size, \
+}
+
+#define __BIN_ATTR_RW(_name, _size) __BIN_ATTR(_name, \
+ (S_IWUSR | S_IRUGO), _name##_read, \
+ _name##_write, _size)
+
+#define __BIN_ATTR_NULL __ATTR_NULL
+
+#define BIN_ATTR(_name, _mode, _read, _write, _size) \
+struct bin_attribute bin_attr_##_name = __BIN_ATTR(_name, _mode, _read, \
+ _write, _size)
+
+#define BIN_ATTR_RO(_name, _size) \
+struct bin_attribute bin_attr_##_name = __BIN_ATTR_RO(_name, _size)
+
+#define BIN_ATTR_RW(_name, _size) \
+struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size)
+
+struct sysfs_ops {
+ ssize_t (*show)(struct kobject *, struct attribute *, char *);
+ ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
+};
+
+#ifdef CONFIG_SYSFS
+
+int __must_check sysfs_create_dir_ns(struct kobject *kobj, const void *ns);
+void sysfs_remove_dir(struct kobject *kobj);
+int __must_check sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name,
+ const void *new_ns);
+int __must_check sysfs_move_dir_ns(struct kobject *kobj,
+ struct kobject *new_parent_kobj,
+ const void *new_ns);
+int __must_check sysfs_create_mount_point(struct kobject *parent_kobj,
+ const char *name);
+void sysfs_remove_mount_point(struct kobject *parent_kobj,
+ const char *name);
+
+int __must_check sysfs_create_file_ns(struct kobject *kobj,
+ const struct attribute *attr,
+ const void *ns);
+int __must_check sysfs_create_files(struct kobject *kobj,
+ const struct attribute **attr);
+int __must_check sysfs_chmod_file(struct kobject *kobj,
+ const struct attribute *attr, umode_t mode);
+void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
+ const void *ns);
+bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
+void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr);
+
+int __must_check sysfs_create_bin_file(struct kobject *kobj,
+ const struct bin_attribute *attr);
+void sysfs_remove_bin_file(struct kobject *kobj,
+ const struct bin_attribute *attr);
+
+int __must_check sysfs_create_link(struct kobject *kobj, struct kobject *target,
+ const char *name);
+int __must_check sysfs_create_link_nowarn(struct kobject *kobj,
+ struct kobject *target,
+ const char *name);
+void sysfs_remove_link(struct kobject *kobj, const char *name);
+
+int sysfs_rename_link_ns(struct kobject *kobj, struct kobject *target,
+ const char *old_name, const char *new_name,
+ const void *new_ns);
+
+void sysfs_delete_link(struct kobject *dir, struct kobject *targ,
+ const char *name);
+
+int __must_check sysfs_create_group(struct kobject *kobj,
+ const struct attribute_group *grp);
+int __must_check sysfs_create_groups(struct kobject *kobj,
+ const struct attribute_group **groups);
+int sysfs_update_group(struct kobject *kobj,
+ const struct attribute_group *grp);
+void sysfs_remove_group(struct kobject *kobj,
+ const struct attribute_group *grp);
+void sysfs_remove_groups(struct kobject *kobj,
+ const struct attribute_group **groups);
+int sysfs_add_file_to_group(struct kobject *kobj,
+ const struct attribute *attr, const char *group);
+void sysfs_remove_file_from_group(struct kobject *kobj,
+ const struct attribute *attr, const char *group);
+int sysfs_merge_group(struct kobject *kobj,
+ const struct attribute_group *grp);
+void sysfs_unmerge_group(struct kobject *kobj,
+ const struct attribute_group *grp);
+int sysfs_add_link_to_group(struct kobject *kobj, const char *group_name,
+ struct kobject *target, const char *link_name);
+void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name,
+ const char *link_name);
+
+void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr);
+
+int __must_check sysfs_init(void);
+
+static inline void sysfs_enable_ns(struct kernfs_node *kn)
+{
+ return kernfs_enable_ns(kn);
+}
+
+#else /* CONFIG_SYSFS */
+
+static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns)
+{
+ return 0;
+}
+
+static inline void sysfs_remove_dir(struct kobject *kobj)
+{
+}
+
+static inline int sysfs_rename_dir_ns(struct kobject *kobj,
+ const char *new_name, const void *new_ns)
+{
+ return 0;
+}
+
+static inline int sysfs_move_dir_ns(struct kobject *kobj,
+ struct kobject *new_parent_kobj,
+ const void *new_ns)
+{
+ return 0;
+}
+
+static inline int sysfs_create_mount_point(struct kobject *parent_kobj,
+ const char *name)
+{
+ return 0;
+}
+
+static inline void sysfs_remove_mount_point(struct kobject *parent_kobj,
+ const char *name)
+{
+}
+
+static inline int sysfs_create_file_ns(struct kobject *kobj,
+ const struct attribute *attr,
+ const void *ns)
+{
+ return 0;
+}
+
+static inline int sysfs_create_files(struct kobject *kobj,
+ const struct attribute **attr)
+{
+ return 0;
+}
+
+static inline int sysfs_chmod_file(struct kobject *kobj,
+ const struct attribute *attr, umode_t mode)
+{
+ return 0;
+}
+
+static inline void sysfs_remove_file_ns(struct kobject *kobj,
+ const struct attribute *attr,
+ const void *ns)
+{
+}
+
+static inline bool sysfs_remove_file_self(struct kobject *kobj,
+ const struct attribute *attr)
+{
+ return false;
+}
+
+static inline void sysfs_remove_files(struct kobject *kobj,
+ const struct attribute **attr)
+{
+}
+
+static inline int sysfs_create_bin_file(struct kobject *kobj,
+ const struct bin_attribute *attr)
+{
+ return 0;
+}
+
+static inline void sysfs_remove_bin_file(struct kobject *kobj,
+ const struct bin_attribute *attr)
+{
+}
+
+static inline int sysfs_create_link(struct kobject *kobj,
+ struct kobject *target, const char *name)
+{
+ return 0;
+}
+
+static inline int sysfs_create_link_nowarn(struct kobject *kobj,
+ struct kobject *target,
+ const char *name)
+{
+ return 0;
+}
+
+static inline void sysfs_remove_link(struct kobject *kobj, const char *name)
+{
+}
+
+static inline int sysfs_rename_link_ns(struct kobject *k, struct kobject *t,
+ const char *old_name,
+ const char *new_name, const void *ns)
+{
+ return 0;
+}
+
+static inline void sysfs_delete_link(struct kobject *k, struct kobject *t,
+ const char *name)
+{
+}
+
+static inline int sysfs_create_group(struct kobject *kobj,
+ const struct attribute_group *grp)
+{
+ return 0;
+}
+
+static inline int sysfs_create_groups(struct kobject *kobj,
+ const struct attribute_group **groups)
+{
+ return 0;
+}
+
+static inline int sysfs_update_group(struct kobject *kobj,
+ const struct attribute_group *grp)
+{
+ return 0;
+}
+
+static inline void sysfs_remove_group(struct kobject *kobj,
+ const struct attribute_group *grp)
+{
+}
+
+static inline void sysfs_remove_groups(struct kobject *kobj,
+ const struct attribute_group **groups)
+{
+}
+
+static inline int sysfs_add_file_to_group(struct kobject *kobj,
+ const struct attribute *attr, const char *group)
+{
+ return 0;
+}
+
+static inline void sysfs_remove_file_from_group(struct kobject *kobj,
+ const struct attribute *attr, const char *group)
+{
+}
+
+static inline int sysfs_merge_group(struct kobject *kobj,
+ const struct attribute_group *grp)
+{
+ return 0;
+}
+
+static inline void sysfs_unmerge_group(struct kobject *kobj,
+ const struct attribute_group *grp)
+{
+}
+
+static inline int sysfs_add_link_to_group(struct kobject *kobj,
+ const char *group_name, struct kobject *target,
+ const char *link_name)
+{
+ return 0;
+}
+
+static inline void sysfs_remove_link_from_group(struct kobject *kobj,
+ const char *group_name, const char *link_name)
+{
+}
+
+static inline void sysfs_notify(struct kobject *kobj, const char *dir,
+ const char *attr)
+{
+}
+
+static inline int __must_check sysfs_init(void)
+{
+ return 0;
+}
+
+static inline void sysfs_enable_ns(struct kernfs_node *kn)
+{
+}
+
+#endif /* CONFIG_SYSFS */
+
+static inline int __must_check sysfs_create_file(struct kobject *kobj,
+ const struct attribute *attr)
+{
+ return sysfs_create_file_ns(kobj, attr, NULL);
+}
+
+static inline void sysfs_remove_file(struct kobject *kobj,
+ const struct attribute *attr)
+{
+ sysfs_remove_file_ns(kobj, attr, NULL);
+}
+
+static inline int sysfs_rename_link(struct kobject *kobj, struct kobject *target,
+ const char *old_name, const char *new_name)
+{
+ return sysfs_rename_link_ns(kobj, target, old_name, new_name, NULL);
+}
+
+static inline void sysfs_notify_dirent(struct kernfs_node *kn)
+{
+ kernfs_notify(kn);
+}
+
+static inline struct kernfs_node *sysfs_get_dirent(struct kernfs_node *parent,
+ const unsigned char *name)
+{
+ return kernfs_find_and_get(parent, name);
+}
+
+static inline struct kernfs_node *sysfs_get(struct kernfs_node *kn)
+{
+ kernfs_get(kn);
+ return kn;
+}
+
+static inline void sysfs_put(struct kernfs_node *kn)
+{
+ kernfs_put(kn);
+}
+
+#endif /* _SYSFS_H_ */
diff --git a/include/linux/syslog.h b/include/linux/syslog.h
new file mode 100644
index 000000000..4b7b875a7
--- /dev/null
+++ b/include/linux/syslog.h
@@ -0,0 +1,61 @@
+/* Syslog internals
+ *
+ * Copyright 2010 Canonical, Ltd.
+ * Author: Kees Cook <kees.cook@canonical.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _LINUX_SYSLOG_H
+#define _LINUX_SYSLOG_H
+
+/* Close the log. Currently a NOP. */
+#define SYSLOG_ACTION_CLOSE 0
+/* Open the log. Currently a NOP. */
+#define SYSLOG_ACTION_OPEN 1
+/* Read from the log. */
+#define SYSLOG_ACTION_READ 2
+/* Read all messages remaining in the ring buffer. */
+#define SYSLOG_ACTION_READ_ALL 3
+/* Read and clear all messages remaining in the ring buffer */
+#define SYSLOG_ACTION_READ_CLEAR 4
+/* Clear ring buffer. */
+#define SYSLOG_ACTION_CLEAR 5
+/* Disable printk's to console */
+#define SYSLOG_ACTION_CONSOLE_OFF 6
+/* Enable printk's to console */
+#define SYSLOG_ACTION_CONSOLE_ON 7
+/* Set level of messages printed to console */
+#define SYSLOG_ACTION_CONSOLE_LEVEL 8
+/* Return number of unread characters in the log buffer */
+#define SYSLOG_ACTION_SIZE_UNREAD 9
+/* Return size of the log buffer */
+#define SYSLOG_ACTION_SIZE_BUFFER 10
+
+#define SYSLOG_FROM_READER 0
+#define SYSLOG_FROM_PROC 1
+
+int do_syslog(int type, char __user *buf, int count, bool from_file);
+
+#ifdef CONFIG_PRINTK
+int check_syslog_permissions(int type, bool from_file);
+#else
+static inline int check_syslog_permissions(int type, bool from_file)
+{
+ return 0;
+}
+#endif
+
+#endif /* _LINUX_SYSLOG_H */
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
new file mode 100644
index 000000000..387fa7d05
--- /dev/null
+++ b/include/linux/sysrq.h
@@ -0,0 +1,75 @@
+/* -*- linux-c -*-
+ *
+ * $Id: sysrq.h,v 1.3 1997/07/17 11:54:33 mj Exp $
+ *
+ * Linux Magic System Request Key Hacks
+ *
+ * (c) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ *
+ * (c) 2000 Crutcher Dunnavant <crutcher+kernel@datastacks.com>
+ * overhauled to use key registration
+ * based upon discusions in irc://irc.openprojects.net/#kernelnewbies
+ */
+
+#ifndef _LINUX_SYSRQ_H
+#define _LINUX_SYSRQ_H
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+/* Possible values of bitmask for enabling sysrq functions */
+/* 0x0001 is reserved for enable everything */
+#define SYSRQ_ENABLE_LOG 0x0002
+#define SYSRQ_ENABLE_KEYBOARD 0x0004
+#define SYSRQ_ENABLE_DUMP 0x0008
+#define SYSRQ_ENABLE_SYNC 0x0010
+#define SYSRQ_ENABLE_REMOUNT 0x0020
+#define SYSRQ_ENABLE_SIGNAL 0x0040
+#define SYSRQ_ENABLE_BOOT 0x0080
+#define SYSRQ_ENABLE_RTNICE 0x0100
+
+struct sysrq_key_op {
+ void (*handler)(int);
+ char *help_msg;
+ char *action_msg;
+ int enable_mask;
+};
+
+#ifdef CONFIG_MAGIC_SYSRQ
+
+/* Generic SysRq interface -- you may call it from any device driver, supplying
+ * ASCII code of the key, pointer to registers and kbd/tty structs (if they
+ * are available -- else NULL's).
+ */
+
+void handle_sysrq(int key);
+void __handle_sysrq(int key, bool check_mask);
+int register_sysrq_key(int key, struct sysrq_key_op *op);
+int unregister_sysrq_key(int key, struct sysrq_key_op *op);
+struct sysrq_key_op *__sysrq_get_key_op(int key);
+
+int sysrq_toggle_support(int enable_mask);
+
+#else
+
+static inline void handle_sysrq(int key)
+{
+}
+
+static inline void __handle_sysrq(int key, bool check_mask)
+{
+}
+
+static inline int register_sysrq_key(int key, struct sysrq_key_op *op)
+{
+ return -EINVAL;
+}
+
+static inline int unregister_sysrq_key(int key, struct sysrq_key_op *op)
+{
+ return -EINVAL;
+}
+
+#endif
+
+#endif /* _LINUX_SYSRQ_H */
diff --git a/include/linux/sysv_fs.h b/include/linux/sysv_fs.h
new file mode 100644
index 000000000..e47d6d900
--- /dev/null
+++ b/include/linux/sysv_fs.h
@@ -0,0 +1,213 @@
+#ifndef _LINUX_SYSV_FS_H
+#define _LINUX_SYSV_FS_H
+
+#define __packed2__ __attribute__((packed, aligned(2)))
+
+
+#ifndef __KERNEL__
+typedef u16 __fs16;
+typedef u32 __fs16;
+#endif
+
+/* inode numbers are 16 bit */
+typedef __fs16 sysv_ino_t;
+
+/* Block numbers are 24 bit, sometimes stored in 32 bit.
+ On Coherent FS, they are always stored in PDP-11 manner: the least
+ significant 16 bits come last. */
+typedef __fs32 sysv_zone_t;
+
+/* 0 is non-existent */
+#define SYSV_BADBL_INO 1 /* inode of bad blocks file */
+#define SYSV_ROOT_INO 2 /* inode of root directory */
+
+
+/* Xenix super-block data on disk */
+#define XENIX_NICINOD 100 /* number of inode cache entries */
+#define XENIX_NICFREE 100 /* number of free block list chunk entries */
+struct xenix_super_block {
+ __fs16 s_isize; /* index of first data zone */
+ __fs32 s_fsize __packed2__; /* total number of zones of this fs */
+ /* the start of the free block list: */
+ __fs16 s_nfree; /* number of free blocks in s_free, <= XENIX_NICFREE */
+ sysv_zone_t s_free[XENIX_NICFREE]; /* first free block list chunk */
+ /* the cache of free inodes: */
+ __fs16 s_ninode; /* number of free inodes in s_inode, <= XENIX_NICINOD */
+ sysv_ino_t s_inode[XENIX_NICINOD]; /* some free inodes */
+ /* locks, not used by Linux: */
+ char s_flock; /* lock during free block list manipulation */
+ char s_ilock; /* lock during inode cache manipulation */
+ char s_fmod; /* super-block modified flag */
+ char s_ronly; /* flag whether fs is mounted read-only */
+ __fs32 s_time __packed2__; /* time of last super block update */
+ __fs32 s_tfree __packed2__; /* total number of free zones */
+ __fs16 s_tinode; /* total number of free inodes */
+ __fs16 s_dinfo[4]; /* device information ?? */
+ char s_fname[6]; /* file system volume name */
+ char s_fpack[6]; /* file system pack name */
+ char s_clean; /* set to 0x46 when filesystem is properly unmounted */
+ char s_fill[371];
+ s32 s_magic; /* version of file system */
+ __fs32 s_type; /* type of file system: 1 for 512 byte blocks
+ 2 for 1024 byte blocks
+ 3 for 2048 byte blocks */
+
+};
+
+/*
+ * SystemV FS comes in two variants:
+ * sysv2: System V Release 2 (e.g. Microport), structure elements aligned(2).
+ * sysv4: System V Release 4 (e.g. Consensys), structure elements aligned(4).
+ */
+#define SYSV_NICINOD 100 /* number of inode cache entries */
+#define SYSV_NICFREE 50 /* number of free block list chunk entries */
+
+/* SystemV4 super-block data on disk */
+struct sysv4_super_block {
+ __fs16 s_isize; /* index of first data zone */
+ u16 s_pad0;
+ __fs32 s_fsize; /* total number of zones of this fs */
+ /* the start of the free block list: */
+ __fs16 s_nfree; /* number of free blocks in s_free, <= SYSV_NICFREE */
+ u16 s_pad1;
+ sysv_zone_t s_free[SYSV_NICFREE]; /* first free block list chunk */
+ /* the cache of free inodes: */
+ __fs16 s_ninode; /* number of free inodes in s_inode, <= SYSV_NICINOD */
+ u16 s_pad2;
+ sysv_ino_t s_inode[SYSV_NICINOD]; /* some free inodes */
+ /* locks, not used by Linux: */
+ char s_flock; /* lock during free block list manipulation */
+ char s_ilock; /* lock during inode cache manipulation */
+ char s_fmod; /* super-block modified flag */
+ char s_ronly; /* flag whether fs is mounted read-only */
+ __fs32 s_time; /* time of last super block update */
+ __fs16 s_dinfo[4]; /* device information ?? */
+ __fs32 s_tfree; /* total number of free zones */
+ __fs16 s_tinode; /* total number of free inodes */
+ u16 s_pad3;
+ char s_fname[6]; /* file system volume name */
+ char s_fpack[6]; /* file system pack name */
+ s32 s_fill[12];
+ __fs32 s_state; /* file system state: 0x7c269d38-s_time means clean */
+ s32 s_magic; /* version of file system */
+ __fs32 s_type; /* type of file system: 1 for 512 byte blocks
+ 2 for 1024 byte blocks */
+};
+
+/* SystemV2 super-block data on disk */
+struct sysv2_super_block {
+ __fs16 s_isize; /* index of first data zone */
+ __fs32 s_fsize __packed2__; /* total number of zones of this fs */
+ /* the start of the free block list: */
+ __fs16 s_nfree; /* number of free blocks in s_free, <= SYSV_NICFREE */
+ sysv_zone_t s_free[SYSV_NICFREE]; /* first free block list chunk */
+ /* the cache of free inodes: */
+ __fs16 s_ninode; /* number of free inodes in s_inode, <= SYSV_NICINOD */
+ sysv_ino_t s_inode[SYSV_NICINOD]; /* some free inodes */
+ /* locks, not used by Linux: */
+ char s_flock; /* lock during free block list manipulation */
+ char s_ilock; /* lock during inode cache manipulation */
+ char s_fmod; /* super-block modified flag */
+ char s_ronly; /* flag whether fs is mounted read-only */
+ __fs32 s_time __packed2__; /* time of last super block update */
+ __fs16 s_dinfo[4]; /* device information ?? */
+ __fs32 s_tfree __packed2__; /* total number of free zones */
+ __fs16 s_tinode; /* total number of free inodes */
+ char s_fname[6]; /* file system volume name */
+ char s_fpack[6]; /* file system pack name */
+ s32 s_fill[14];
+ __fs32 s_state; /* file system state: 0xcb096f43 means clean */
+ s32 s_magic; /* version of file system */
+ __fs32 s_type; /* type of file system: 1 for 512 byte blocks
+ 2 for 1024 byte blocks */
+};
+
+/* V7 super-block data on disk */
+#define V7_NICINOD 100 /* number of inode cache entries */
+#define V7_NICFREE 50 /* number of free block list chunk entries */
+struct v7_super_block {
+ __fs16 s_isize; /* index of first data zone */
+ __fs32 s_fsize __packed2__; /* total number of zones of this fs */
+ /* the start of the free block list: */
+ __fs16 s_nfree; /* number of free blocks in s_free, <= V7_NICFREE */
+ sysv_zone_t s_free[V7_NICFREE]; /* first free block list chunk */
+ /* the cache of free inodes: */
+ __fs16 s_ninode; /* number of free inodes in s_inode, <= V7_NICINOD */
+ sysv_ino_t s_inode[V7_NICINOD]; /* some free inodes */
+ /* locks, not used by Linux or V7: */
+ char s_flock; /* lock during free block list manipulation */
+ char s_ilock; /* lock during inode cache manipulation */
+ char s_fmod; /* super-block modified flag */
+ char s_ronly; /* flag whether fs is mounted read-only */
+ __fs32 s_time __packed2__; /* time of last super block update */
+ /* the following fields are not maintained by V7: */
+ __fs32 s_tfree __packed2__; /* total number of free zones */
+ __fs16 s_tinode; /* total number of free inodes */
+ __fs16 s_m; /* interleave factor */
+ __fs16 s_n; /* interleave factor */
+ char s_fname[6]; /* file system name */
+ char s_fpack[6]; /* file system pack name */
+};
+/* Constants to aid sanity checking */
+/* This is not a hard limit, nor enforced by v7 kernel. It's actually just
+ * the limit used by Seventh Edition's ls, though is high enough to assume
+ * that no reasonable file system would have that much entries in root
+ * directory. Thus, if we see anything higher, we just probably got the
+ * endiannes wrong. */
+#define V7_NFILES 1024
+/* The disk addresses are three-byte (despite direct block addresses being
+ * aligned word-wise in inode). If the most significant byte is non-zero,
+ * something is most likely wrong (not a filesystem, bad bytesex). */
+#define V7_MAXSIZE 0x00ffffff
+
+/* Coherent super-block data on disk */
+#define COH_NICINOD 100 /* number of inode cache entries */
+#define COH_NICFREE 64 /* number of free block list chunk entries */
+struct coh_super_block {
+ __fs16 s_isize; /* index of first data zone */
+ __fs32 s_fsize __packed2__; /* total number of zones of this fs */
+ /* the start of the free block list: */
+ __fs16 s_nfree; /* number of free blocks in s_free, <= COH_NICFREE */
+ sysv_zone_t s_free[COH_NICFREE] __packed2__; /* first free block list chunk */
+ /* the cache of free inodes: */
+ __fs16 s_ninode; /* number of free inodes in s_inode, <= COH_NICINOD */
+ sysv_ino_t s_inode[COH_NICINOD]; /* some free inodes */
+ /* locks, not used by Linux: */
+ char s_flock; /* lock during free block list manipulation */
+ char s_ilock; /* lock during inode cache manipulation */
+ char s_fmod; /* super-block modified flag */
+ char s_ronly; /* flag whether fs is mounted read-only */
+ __fs32 s_time __packed2__; /* time of last super block update */
+ __fs32 s_tfree __packed2__; /* total number of free zones */
+ __fs16 s_tinode; /* total number of free inodes */
+ __fs16 s_interleave_m; /* interleave factor */
+ __fs16 s_interleave_n;
+ char s_fname[6]; /* file system volume name */
+ char s_fpack[6]; /* file system pack name */
+ __fs32 s_unique; /* zero, not used */
+};
+
+/* SystemV/Coherent inode data on disk */
+struct sysv_inode {
+ __fs16 i_mode;
+ __fs16 i_nlink;
+ __fs16 i_uid;
+ __fs16 i_gid;
+ __fs32 i_size;
+ u8 i_data[3*(10+1+1+1)];
+ u8 i_gen;
+ __fs32 i_atime; /* time of last access */
+ __fs32 i_mtime; /* time of last modification */
+ __fs32 i_ctime; /* time of creation */
+};
+
+/* SystemV/Coherent directory entry on disk */
+#define SYSV_NAMELEN 14 /* max size of name in struct sysv_dir_entry */
+struct sysv_dir_entry {
+ sysv_ino_t inode;
+ char name[SYSV_NAMELEN]; /* up to 14 characters, the rest are zeroes */
+};
+
+#define SYSV_DIRSIZE sizeof(struct sysv_dir_entry) /* size of every directory entry */
+
+#endif /* _LINUX_SYSV_FS_H */
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
new file mode 100644
index 000000000..6a8b99426
--- /dev/null
+++ b/include/linux/t10-pi.h
@@ -0,0 +1,22 @@
+#ifndef _LINUX_T10_PI_H
+#define _LINUX_T10_PI_H
+
+#include <linux/types.h>
+#include <linux/blkdev.h>
+
+/*
+ * T10 Protection Information tuple.
+ */
+struct t10_pi_tuple {
+ __be16 guard_tag; /* Checksum */
+ __be16 app_tag; /* Opaque storage */
+ __be32 ref_tag; /* Target LBA or indirect LBA */
+};
+
+
+extern struct blk_integrity t10_pi_type1_crc;
+extern struct blk_integrity t10_pi_type1_ip;
+extern struct blk_integrity t10_pi_type3_crc;
+extern struct blk_integrity t10_pi_type3_ip;
+
+#endif
diff --git a/include/linux/task_io_accounting.h b/include/linux/task_io_accounting.h
new file mode 100644
index 000000000..bdf855c28
--- /dev/null
+++ b/include/linux/task_io_accounting.h
@@ -0,0 +1,45 @@
+/*
+ * task_io_accounting: a structure which is used for recording a single task's
+ * IO statistics.
+ *
+ * Don't include this header file directly - it is designed to be dragged in via
+ * sched.h.
+ *
+ * Blame Andrew Morton for all this.
+ */
+
+struct task_io_accounting {
+#ifdef CONFIG_TASK_XACCT
+ /* bytes read */
+ u64 rchar;
+ /* bytes written */
+ u64 wchar;
+ /* # of read syscalls */
+ u64 syscr;
+ /* # of write syscalls */
+ u64 syscw;
+#endif /* CONFIG_TASK_XACCT */
+
+#ifdef CONFIG_TASK_IO_ACCOUNTING
+ /*
+ * The number of bytes which this task has caused to be read from
+ * storage.
+ */
+ u64 read_bytes;
+
+ /*
+ * The number of bytes which this task has caused, or shall cause to be
+ * written to disk.
+ */
+ u64 write_bytes;
+
+ /*
+ * A task can cause "negative" IO too. If this task truncates some
+ * dirty pagecache, some IO which another task has been accounted for
+ * (in its write_bytes) will not be happening. We _could_ just
+ * subtract that from the truncating task's write_bytes, but there is
+ * information loss in doing that.
+ */
+ u64 cancelled_write_bytes;
+#endif /* CONFIG_TASK_IO_ACCOUNTING */
+};
diff --git a/include/linux/task_io_accounting_ops.h b/include/linux/task_io_accounting_ops.h
new file mode 100644
index 000000000..4d090f9ee
--- /dev/null
+++ b/include/linux/task_io_accounting_ops.h
@@ -0,0 +1,113 @@
+/*
+ * Task I/O accounting operations
+ */
+#ifndef __TASK_IO_ACCOUNTING_OPS_INCLUDED
+#define __TASK_IO_ACCOUNTING_OPS_INCLUDED
+
+#include <linux/sched.h>
+
+#ifdef CONFIG_TASK_IO_ACCOUNTING
+static inline void task_io_account_read(size_t bytes)
+{
+ current->ioac.read_bytes += bytes;
+}
+
+/*
+ * We approximate number of blocks, because we account bytes only.
+ * A 'block' is 512 bytes
+ */
+static inline unsigned long task_io_get_inblock(const struct task_struct *p)
+{
+ return p->ioac.read_bytes >> 9;
+}
+
+static inline void task_io_account_write(size_t bytes)
+{
+ current->ioac.write_bytes += bytes;
+}
+
+/*
+ * We approximate number of blocks, because we account bytes only.
+ * A 'block' is 512 bytes
+ */
+static inline unsigned long task_io_get_oublock(const struct task_struct *p)
+{
+ return p->ioac.write_bytes >> 9;
+}
+
+static inline void task_io_account_cancelled_write(size_t bytes)
+{
+ current->ioac.cancelled_write_bytes += bytes;
+}
+
+static inline void task_io_accounting_init(struct task_io_accounting *ioac)
+{
+ memset(ioac, 0, sizeof(*ioac));
+}
+
+static inline void task_blk_io_accounting_add(struct task_io_accounting *dst,
+ struct task_io_accounting *src)
+{
+ dst->read_bytes += src->read_bytes;
+ dst->write_bytes += src->write_bytes;
+ dst->cancelled_write_bytes += src->cancelled_write_bytes;
+}
+
+#else
+
+static inline void task_io_account_read(size_t bytes)
+{
+}
+
+static inline unsigned long task_io_get_inblock(const struct task_struct *p)
+{
+ return 0;
+}
+
+static inline void task_io_account_write(size_t bytes)
+{
+}
+
+static inline unsigned long task_io_get_oublock(const struct task_struct *p)
+{
+ return 0;
+}
+
+static inline void task_io_account_cancelled_write(size_t bytes)
+{
+}
+
+static inline void task_io_accounting_init(struct task_io_accounting *ioac)
+{
+}
+
+static inline void task_blk_io_accounting_add(struct task_io_accounting *dst,
+ struct task_io_accounting *src)
+{
+}
+
+#endif /* CONFIG_TASK_IO_ACCOUNTING */
+
+#ifdef CONFIG_TASK_XACCT
+static inline void task_chr_io_accounting_add(struct task_io_accounting *dst,
+ struct task_io_accounting *src)
+{
+ dst->rchar += src->rchar;
+ dst->wchar += src->wchar;
+ dst->syscr += src->syscr;
+ dst->syscw += src->syscw;
+}
+#else
+static inline void task_chr_io_accounting_add(struct task_io_accounting *dst,
+ struct task_io_accounting *src)
+{
+}
+#endif /* CONFIG_TASK_XACCT */
+
+static inline void task_io_accounting_add(struct task_io_accounting *dst,
+ struct task_io_accounting *src)
+{
+ task_chr_io_accounting_add(dst, src);
+ task_blk_io_accounting_add(dst, src);
+}
+#endif /* __TASK_IO_ACCOUNTING_OPS_INCLUDED */
diff --git a/include/linux/task_work.h b/include/linux/task_work.h
new file mode 100644
index 000000000..ca5a1cf27
--- /dev/null
+++ b/include/linux/task_work.h
@@ -0,0 +1,24 @@
+#ifndef _LINUX_TASK_WORK_H
+#define _LINUX_TASK_WORK_H
+
+#include <linux/list.h>
+#include <linux/sched.h>
+
+typedef void (*task_work_func_t)(struct callback_head *);
+
+static inline void
+init_task_work(struct callback_head *twork, task_work_func_t func)
+{
+ twork->func = func;
+}
+
+int task_work_add(struct task_struct *task, struct callback_head *twork, bool);
+struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
+void task_work_run(void);
+
+static inline void exit_task_work(struct task_struct *task)
+{
+ task_work_run();
+}
+
+#endif /* _LINUX_TASK_WORK_H */
diff --git a/include/linux/taskstats_kern.h b/include/linux/taskstats_kern.h
new file mode 100644
index 000000000..58de6edf7
--- /dev/null
+++ b/include/linux/taskstats_kern.h
@@ -0,0 +1,36 @@
+/* taskstats_kern.h - kernel header for per-task statistics interface
+ *
+ * Copyright (C) Shailabh Nagar, IBM Corp. 2006
+ * (C) Balbir Singh, IBM Corp. 2006
+ */
+
+#ifndef _LINUX_TASKSTATS_KERN_H
+#define _LINUX_TASKSTATS_KERN_H
+
+#include <linux/taskstats.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#ifdef CONFIG_TASKSTATS
+extern struct kmem_cache *taskstats_cache;
+extern struct mutex taskstats_exit_mutex;
+
+static inline void taskstats_tgid_free(struct signal_struct *sig)
+{
+ if (sig->stats)
+ kmem_cache_free(taskstats_cache, sig->stats);
+}
+
+extern void taskstats_exit(struct task_struct *, int group_dead);
+extern void taskstats_init_early(void);
+#else
+static inline void taskstats_exit(struct task_struct *tsk, int group_dead)
+{}
+static inline void taskstats_tgid_free(struct signal_struct *sig)
+{}
+static inline void taskstats_init_early(void)
+{}
+#endif /* CONFIG_TASKSTATS */
+
+#endif
+
diff --git a/include/linux/tboot.h b/include/linux/tboot.h
new file mode 100644
index 000000000..9a54b331f
--- /dev/null
+++ b/include/linux/tboot.h
@@ -0,0 +1,162 @@
+/*
+ * tboot.h: shared data structure with tboot and kernel and functions
+ * used by kernel for runtime support of Intel(R) Trusted
+ * Execution Technology
+ *
+ * Copyright (c) 2006-2009, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef _LINUX_TBOOT_H
+#define _LINUX_TBOOT_H
+
+/* these must have the values from 0-5 in this order */
+enum {
+ TB_SHUTDOWN_REBOOT = 0,
+ TB_SHUTDOWN_S5,
+ TB_SHUTDOWN_S4,
+ TB_SHUTDOWN_S3,
+ TB_SHUTDOWN_HALT,
+ TB_SHUTDOWN_WFS
+};
+
+#ifdef CONFIG_INTEL_TXT
+#include <linux/acpi.h>
+/* used to communicate between tboot and the launched kernel */
+
+#define TB_KEY_SIZE 64 /* 512 bits */
+
+#define MAX_TB_MAC_REGIONS 32
+
+struct tboot_mac_region {
+ u64 start; /* must be 64 byte -aligned */
+ u32 size; /* must be 64 byte -granular */
+} __packed;
+
+/* GAS - Generic Address Structure (ACPI 2.0+) */
+struct tboot_acpi_generic_address {
+ u8 space_id;
+ u8 bit_width;
+ u8 bit_offset;
+ u8 access_width;
+ u64 address;
+} __packed;
+
+/*
+ * combines Sx info from FADT and FACS tables per ACPI 2.0+ spec
+ * (http://www.acpi.info/)
+ */
+struct tboot_acpi_sleep_info {
+ struct tboot_acpi_generic_address pm1a_cnt_blk;
+ struct tboot_acpi_generic_address pm1b_cnt_blk;
+ struct tboot_acpi_generic_address pm1a_evt_blk;
+ struct tboot_acpi_generic_address pm1b_evt_blk;
+ u16 pm1a_cnt_val;
+ u16 pm1b_cnt_val;
+ u64 wakeup_vector;
+ u32 vector_width;
+ u64 kernel_s3_resume_vector;
+} __packed;
+
+/*
+ * shared memory page used for communication between tboot and kernel
+ */
+struct tboot {
+ /*
+ * version 3+ fields:
+ */
+
+ /* TBOOT_UUID */
+ u8 uuid[16];
+
+ /* version number: 5 is current */
+ u32 version;
+
+ /* physical addr of tb_log_t log */
+ u32 log_addr;
+
+ /*
+ * physical addr of entry point for tboot shutdown and
+ * type of shutdown (TB_SHUTDOWN_*) being requested
+ */
+ u32 shutdown_entry;
+ u32 shutdown_type;
+
+ /* kernel-specified ACPI info for Sx shutdown */
+ struct tboot_acpi_sleep_info acpi_sinfo;
+
+ /* tboot location in memory (physical) */
+ u32 tboot_base;
+ u32 tboot_size;
+
+ /* memory regions (phys addrs) for tboot to MAC on S3 */
+ u8 num_mac_regions;
+ struct tboot_mac_region mac_regions[MAX_TB_MAC_REGIONS];
+
+
+ /*
+ * version 4+ fields:
+ */
+
+ /* symmetric key for use by kernel; will be encrypted on S3 */
+ u8 s3_key[TB_KEY_SIZE];
+
+
+ /*
+ * version 5+ fields:
+ */
+
+ /* used to 4byte-align num_in_wfs */
+ u8 reserved_align[3];
+
+ /* number of processors in wait-for-SIPI */
+ u32 num_in_wfs;
+} __packed;
+
+/*
+ * UUID for tboot data struct to facilitate matching
+ * defined as {663C8DFF-E8B3-4b82-AABF-19EA4D057A08} by tboot, which is
+ * represented as {} in the char array used here
+ */
+#define TBOOT_UUID {0xff, 0x8d, 0x3c, 0x66, 0xb3, 0xe8, 0x82, 0x4b, 0xbf,\
+ 0xaa, 0x19, 0xea, 0x4d, 0x5, 0x7a, 0x8}
+
+extern struct tboot *tboot;
+
+static inline int tboot_enabled(void)
+{
+ return tboot != NULL;
+}
+
+extern void tboot_probe(void);
+extern void tboot_shutdown(u32 shutdown_type);
+extern struct acpi_table_header *tboot_get_dmar_table(
+ struct acpi_table_header *dmar_tbl);
+extern int tboot_force_iommu(void);
+
+#else
+
+#define tboot_enabled() 0
+#define tboot_probe() do { } while (0)
+#define tboot_shutdown(shutdown_type) do { } while (0)
+#define tboot_sleep(sleep_state, pm1a_control, pm1b_control) \
+ do { } while (0)
+#define tboot_get_dmar_table(dmar_tbl) (dmar_tbl)
+#define tboot_force_iommu() 0
+
+#endif /* !CONFIG_INTEL_TXT */
+
+#endif /* _LINUX_TBOOT_H */
diff --git a/include/linux/tc.h b/include/linux/tc.h
new file mode 100644
index 000000000..f92511e57
--- /dev/null
+++ b/include/linux/tc.h
@@ -0,0 +1,141 @@
+/*
+ * Interface to the TURBOchannel related routines.
+ *
+ * Copyright (c) 1998 Harald Koerfgen
+ * Copyright (c) 2005 James Simmons
+ * Copyright (c) 2006 Maciej W. Rozycki
+ *
+ * Based on:
+ *
+ * "TURBOchannel Firmware Specification", EK-TCAAD-FS-004
+ *
+ * from Digital Equipment Corporation.
+ *
+ * This file is subject to the terms and conditions of the GNU
+ * General Public License. See the file "COPYING" in the main
+ * directory of this archive for more details.
+ */
+#ifndef _LINUX_TC_H
+#define _LINUX_TC_H
+
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+
+/*
+ * Offsets for the ROM header locations for TURBOchannel cards.
+ */
+#define TC_OLDCARD 0x3c0000
+#define TC_NEWCARD 0x000000
+
+#define TC_ROM_WIDTH 0x3e0
+#define TC_ROM_STRIDE 0x3e4
+#define TC_ROM_SIZE 0x3e8
+#define TC_SLOT_SIZE 0x3ec
+#define TC_PATTERN0 0x3f0
+#define TC_PATTERN1 0x3f4
+#define TC_PATTERN2 0x3f8
+#define TC_PATTERN3 0x3fc
+#define TC_FIRM_VER 0x400
+#define TC_VENDOR 0x420
+#define TC_MODULE 0x440
+#define TC_FIRM_TYPE 0x460
+#define TC_FLAGS 0x470
+#define TC_ROM_OBJECTS 0x480
+
+/*
+ * Information obtained through the get_tcinfo() PROM call.
+ */
+struct tcinfo {
+ s32 revision; /* Hardware revision level. */
+ s32 clk_period; /* Clock period in nanoseconds. */
+ s32 slot_size; /* Slot size in megabytes. */
+ s32 io_timeout; /* I/O timeout in cycles. */
+ s32 dma_range; /* DMA address range in megabytes. */
+ s32 max_dma_burst; /* Maximum DMA burst length. */
+ s32 parity; /* System module supports TC parity. */
+ s32 reserved[4];
+};
+
+/*
+ * TURBOchannel bus.
+ */
+struct tc_bus {
+ struct list_head devices; /* List of devices on this bus. */
+ struct resource resource[2]; /* Address space routed to this bus. */
+
+ struct device dev;
+ char name[13];
+ resource_size_t slot_base;
+ resource_size_t ext_slot_base;
+ resource_size_t ext_slot_size;
+ int num_tcslots;
+ struct tcinfo info;
+};
+
+/*
+ * TURBOchannel device.
+ */
+struct tc_dev {
+ struct list_head node; /* Node in list of all TC devices. */
+ struct tc_bus *bus; /* Bus this device is on. */
+ struct tc_driver *driver; /* Which driver has allocated this
+ device. */
+ struct device dev; /* Generic device interface. */
+ struct resource resource; /* Address space of this device. */
+ char vendor[9];
+ char name[9];
+ char firmware[9];
+ int interrupt;
+ int slot;
+};
+
+#define to_tc_dev(n) container_of(n, struct tc_dev, dev)
+
+struct tc_device_id {
+ char vendor[9];
+ char name[9];
+};
+
+/*
+ * TURBOchannel driver.
+ */
+struct tc_driver {
+ struct list_head node;
+ const struct tc_device_id *id_table;
+ struct device_driver driver;
+};
+
+#define to_tc_driver(drv) container_of(drv, struct tc_driver, driver)
+
+/*
+ * Return TURBOchannel clock frequency in Hz.
+ */
+static inline unsigned long tc_get_speed(struct tc_bus *tbus)
+{
+ return 100000 * (10000 / (unsigned long)tbus->info.clk_period);
+}
+
+#ifdef CONFIG_TC
+
+extern struct bus_type tc_bus_type;
+
+extern int tc_register_driver(struct tc_driver *tdrv);
+extern void tc_unregister_driver(struct tc_driver *tdrv);
+
+#else /* !CONFIG_TC */
+
+static inline int tc_register_driver(struct tc_driver *tdrv) { return 0; }
+static inline void tc_unregister_driver(struct tc_driver *tdrv) { }
+
+#endif /* CONFIG_TC */
+
+/*
+ * These have to be provided by the architecture.
+ */
+extern int tc_preadb(u8 *valp, void __iomem *addr);
+extern int tc_bus_get_info(struct tc_bus *tbus);
+extern void tc_device_get_irq(struct tc_dev *tdev);
+
+#endif /* _LINUX_TC_H */
diff --git a/include/linux/tca6416_keypad.h b/include/linux/tca6416_keypad.h
new file mode 100644
index 000000000..7bd266f35
--- /dev/null
+++ b/include/linux/tca6416_keypad.h
@@ -0,0 +1,34 @@
+/*
+ * tca6416 keypad platform support
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * Author: Sriramakrishnan <srk@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _TCA6416_KEYS_H
+#define _TCA6416_KEYS_H
+
+#include <linux/types.h>
+
+struct tca6416_button {
+ /* Configuration parameters */
+ int code; /* input event code (KEY_*, SW_*) */
+ int active_low;
+ int type; /* input event type (EV_KEY, EV_SW) */
+};
+
+struct tca6416_keys_platform_data {
+ struct tca6416_button *buttons;
+ int nbuttons;
+ unsigned int rep:1; /* enable input subsystem auto repeat */
+ uint16_t pinmask;
+ uint16_t invert;
+ int irq_is_gpio;
+ int use_polling; /* use polling if Interrupt is not connected*/
+};
+#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
new file mode 100644
index 000000000..f62b548ac
--- /dev/null
+++ b/include/linux/tcp.h
@@ -0,0 +1,414 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the TCP protocol.
+ *
+ * Version: @(#)tcp.h 1.0.2 04/28/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_TCP_H
+#define _LINUX_TCP_H
+
+
+#include <linux/skbuff.h>
+#include <linux/cryptohash.h>
+#include <net/sock.h>
+#include <net/inet_connection_sock.h>
+#include <net/inet_timewait_sock.h>
+#include <uapi/linux/tcp.h>
+
+static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
+{
+ return (struct tcphdr *)skb_transport_header(skb);
+}
+
+static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
+{
+ return tcp_hdr(skb)->doff * 4;
+}
+
+static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb)
+{
+ return (struct tcphdr *)skb_inner_transport_header(skb);
+}
+
+static inline unsigned int inner_tcp_hdrlen(const struct sk_buff *skb)
+{
+ return inner_tcp_hdr(skb)->doff * 4;
+}
+
+static inline unsigned int tcp_optlen(const struct sk_buff *skb)
+{
+ return (tcp_hdr(skb)->doff - 5) * 4;
+}
+
+/* TCP Fast Open */
+#define TCP_FASTOPEN_COOKIE_MIN 4 /* Min Fast Open Cookie size in bytes */
+#define TCP_FASTOPEN_COOKIE_MAX 16 /* Max Fast Open Cookie size in bytes */
+#define TCP_FASTOPEN_COOKIE_SIZE 8 /* the size employed by this impl. */
+
+/* TCP Fast Open Cookie as stored in memory */
+struct tcp_fastopen_cookie {
+ s8 len;
+ u8 val[TCP_FASTOPEN_COOKIE_MAX];
+ bool exp; /* In RFC6994 experimental option format */
+};
+
+/* This defines a selective acknowledgement block. */
+struct tcp_sack_block_wire {
+ __be32 start_seq;
+ __be32 end_seq;
+};
+
+struct tcp_sack_block {
+ u32 start_seq;
+ u32 end_seq;
+};
+
+/*These are used to set the sack_ok field in struct tcp_options_received */
+#define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */
+#define TCP_FACK_ENABLED (1 << 1) /*1 = FACK is enabled locally*/
+#define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/
+
+struct tcp_options_received {
+/* PAWS/RTTM data */
+ long ts_recent_stamp;/* Time we stored ts_recent (for aging) */
+ u32 ts_recent; /* Time stamp to echo next */
+ u32 rcv_tsval; /* Time stamp value */
+ u32 rcv_tsecr; /* Time stamp echo reply */
+ u16 saw_tstamp : 1, /* Saw TIMESTAMP on last packet */
+ tstamp_ok : 1, /* TIMESTAMP seen on SYN packet */
+ dsack : 1, /* D-SACK is scheduled */
+ wscale_ok : 1, /* Wscale seen on SYN packet */
+ sack_ok : 4, /* SACK seen on SYN packet */
+ snd_wscale : 4, /* Window scaling received from sender */
+ rcv_wscale : 4; /* Window scaling to send to receiver */
+ u8 num_sacks; /* Number of SACK blocks */
+ u16 user_mss; /* mss requested by user in ioctl */
+ u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
+};
+
+static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
+{
+ rx_opt->tstamp_ok = rx_opt->sack_ok = 0;
+ rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
+}
+
+/* This is the max number of SACKS that we'll generate and process. It's safe
+ * to increase this, although since:
+ * size = TCPOLEN_SACK_BASE_ALIGNED (4) + n * TCPOLEN_SACK_PERBLOCK (8)
+ * only four options will fit in a standard TCP header */
+#define TCP_NUM_SACKS 4
+
+struct tcp_request_sock_ops;
+
+struct tcp_request_sock {
+ struct inet_request_sock req;
+ const struct tcp_request_sock_ops *af_specific;
+ bool tfo_listener;
+ u32 rcv_isn;
+ u32 snt_isn;
+ u32 snt_synack; /* synack sent time */
+ u32 last_oow_ack_time; /* last SYNACK */
+ u32 rcv_nxt; /* the ack # by SYNACK. For
+ * FastOpen it's the seq#
+ * after data-in-SYN.
+ */
+};
+
+static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
+{
+ return (struct tcp_request_sock *)req;
+}
+
+struct tcp_sock {
+ /* inet_connection_sock has to be the first member of tcp_sock */
+ struct inet_connection_sock inet_conn;
+ u16 tcp_header_len; /* Bytes of tcp header to send */
+ u16 gso_segs; /* Max number of segs per GSO packet */
+
+/*
+ * Header prediction flags
+ * 0x5?10 << 16 + snd_wnd in net byte order
+ */
+ __be32 pred_flags;
+
+/*
+ * RFC793 variables by their proper names. This means you can
+ * read the code and the spec side by side (and laugh ...)
+ * See RFC793 and RFC1122. The RFC writes these in capitals.
+ */
+ u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived
+ * sum(delta(rcv_nxt)), or how many bytes
+ * were acked.
+ */
+ u32 rcv_nxt; /* What we want to receive next */
+ u32 copied_seq; /* Head of yet unread data */
+ u32 rcv_wup; /* rcv_nxt on last window update sent */
+ u32 snd_nxt; /* Next sequence we send */
+
+ u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
+ * sum(delta(snd_una)), or how many bytes
+ * were acked.
+ */
+ struct u64_stats_sync syncp; /* protects 64bit vars (cf tcp_get_info()) */
+
+ u32 snd_una; /* First byte we want an ack for */
+ u32 snd_sml; /* Last byte of the most recently transmitted small packet */
+ u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
+ u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
+ u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
+
+ u32 tsoffset; /* timestamp offset */
+
+ struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
+ unsigned long tsq_flags;
+
+ /* Data for direct copy to user */
+ struct {
+ struct sk_buff_head prequeue;
+ struct task_struct *task;
+ struct msghdr *msg;
+ int memory;
+ int len;
+ } ucopy;
+
+ u32 snd_wl1; /* Sequence for window update */
+ u32 snd_wnd; /* The window we expect to receive */
+ u32 max_window; /* Maximal window ever seen from peer */
+ u32 mss_cache; /* Cached effective mss, not including SACKS */
+
+ u32 window_clamp; /* Maximal window to advertise */
+ u32 rcv_ssthresh; /* Current window clamp */
+
+ u16 advmss; /* Advertised MSS */
+ u8 unused;
+ u8 nonagle : 4,/* Disable Nagle algorithm? */
+ thin_lto : 1,/* Use linear timeouts for thin streams */
+ thin_dupack : 1,/* Fast retransmit on first dupack */
+ repair : 1,
+ frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */
+ u8 repair_queue;
+ u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */
+ syn_data:1, /* SYN includes data */
+ syn_fastopen:1, /* SYN includes Fast Open option */
+ syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
+ syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
+ is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
+ u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
+
+/* RTT measurement */
+ u32 srtt_us; /* smoothed round trip time << 3 in usecs */
+ u32 mdev_us; /* medium deviation */
+ u32 mdev_max_us; /* maximal mdev for the last rtt period */
+ u32 rttvar_us; /* smoothed mdev_max */
+ u32 rtt_seq; /* sequence number to update rttvar */
+
+ u32 packets_out; /* Packets which are "in flight" */
+ u32 retrans_out; /* Retransmitted packets out */
+ u32 max_packets_out; /* max packets_out in last window */
+ u32 max_packets_seq; /* right edge of max_packets_out flight */
+
+ u16 urg_data; /* Saved octet of OOB data and control flags */
+ u8 ecn_flags; /* ECN status bits. */
+ u8 keepalive_probes; /* num of allowed keep alive probes */
+ u32 reordering; /* Packet reordering metric. */
+ u32 snd_up; /* Urgent pointer */
+
+/*
+ * Options received (usually on last packet, some only on SYN packets).
+ */
+ struct tcp_options_received rx_opt;
+
+/*
+ * Slow start and congestion control (see also Nagle, and Karn & Partridge)
+ */
+ u32 snd_ssthresh; /* Slow start size threshold */
+ u32 snd_cwnd; /* Sending congestion window */
+ u32 snd_cwnd_cnt; /* Linear increase counter */
+ u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
+ u32 snd_cwnd_used;
+ u32 snd_cwnd_stamp;
+ u32 prior_cwnd; /* Congestion window at start of Recovery. */
+ u32 prr_delivered; /* Number of newly delivered packets to
+ * receiver in Recovery. */
+ u32 prr_out; /* Total number of pkts sent during Recovery. */
+
+ u32 rcv_wnd; /* Current receiver window */
+ u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
+ u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */
+ u32 pushed_seq; /* Last pushed seq, required to talk to windows */
+ u32 lost_out; /* Lost packets */
+ u32 sacked_out; /* SACK'd packets */
+ u32 fackets_out; /* FACK'd packets */
+
+ /* from STCP, retrans queue hinting */
+ struct sk_buff* lost_skb_hint;
+ struct sk_buff *retransmit_skb_hint;
+
+ /* OOO segments go in this list. Note that socket lock must be held,
+ * as we do not use sk_buff_head lock.
+ */
+ struct sk_buff_head out_of_order_queue;
+
+ /* SACKs data, these 2 need to be together (see tcp_options_write) */
+ struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
+ struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
+
+ struct tcp_sack_block recv_sack_cache[4];
+
+ struct sk_buff *highest_sack; /* skb just after the highest
+ * skb with SACKed bit set
+ * (validity guaranteed only if
+ * sacked_out > 0)
+ */
+
+ int lost_cnt_hint;
+ u32 retransmit_high; /* L-bits may be on up to this seqno */
+
+ u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */
+
+ u32 prior_ssthresh; /* ssthresh saved at recovery start */
+ u32 high_seq; /* snd_nxt at onset of congestion */
+
+ u32 retrans_stamp; /* Timestamp of the last retransmit,
+ * also used in SYN-SENT to remember stamp of
+ * the first SYN. */
+ u32 undo_marker; /* snd_una upon a new recovery episode. */
+ int undo_retrans; /* number of undoable retransmissions. */
+ u32 total_retrans; /* Total retransmits for entire connection */
+
+ u32 urg_seq; /* Seq of received urgent pointer */
+ unsigned int keepalive_time; /* time before keep alive takes place */
+ unsigned int keepalive_intvl; /* time interval between keep alive probes */
+
+ int linger2;
+
+/* Receiver side RTT estimation */
+ struct {
+ u32 rtt;
+ u32 seq;
+ u32 time;
+ } rcv_rtt_est;
+
+/* Receiver queue space */
+ struct {
+ int space;
+ u32 seq;
+ u32 time;
+ } rcvq_space;
+
+/* TCP-specific MTU probe information. */
+ struct {
+ u32 probe_seq_start;
+ u32 probe_seq_end;
+ } mtu_probe;
+ u32 mtu_info; /* We received an ICMP_FRAG_NEEDED / ICMPV6_PKT_TOOBIG
+ * while socket was owned by user.
+ */
+
+#ifdef CONFIG_TCP_MD5SIG
+/* TCP AF-Specific parts; only used by MD5 Signature support so far */
+ const struct tcp_sock_af_ops *af_specific;
+
+/* TCP MD5 Signature Option information */
+ struct tcp_md5sig_info __rcu *md5sig_info;
+#endif
+
+#ifdef CONFIG_TCP_STEALTH
+/* Stealth TCP socket configuration */
+ struct {
+ #define TCP_STEALTH_MODE_AUTH BIT(0)
+ #define TCP_STEALTH_MODE_INTEGRITY BIT(1)
+ #define TCP_STEALTH_MODE_INTEGRITY_LEN BIT(2)
+ u8 mode;
+ u8 secret[MD5_MESSAGE_BYTES];
+ u16 integrity_hash;
+ size_t integrity_len;
+ struct skb_mstamp mstamp;
+ bool saw_tsval;
+ } stealth;
+#endif
+
+/* TCP fastopen related information */
+ struct tcp_fastopen_request *fastopen_req;
+ /* fastopen_rsk points to request_sock that resulted in this big
+ * socket. Used to retransmit SYNACKs etc.
+ */
+ struct request_sock *fastopen_rsk;
+};
+
+enum tsq_flags {
+ TSQ_THROTTLED,
+ TSQ_QUEUED,
+ TCP_TSQ_DEFERRED, /* tcp_tasklet_func() found socket was owned */
+ TCP_WRITE_TIMER_DEFERRED, /* tcp_write_timer() found socket was owned */
+ TCP_DELACK_TIMER_DEFERRED, /* tcp_delack_timer() found socket was owned */
+ TCP_MTU_REDUCED_DEFERRED, /* tcp_v{4|6}_err() could not call
+ * tcp_v{4|6}_mtu_reduced()
+ */
+};
+
+static inline struct tcp_sock *tcp_sk(const struct sock *sk)
+{
+ return (struct tcp_sock *)sk;
+}
+
+struct tcp_timewait_sock {
+ struct inet_timewait_sock tw_sk;
+ u32 tw_rcv_nxt;
+ u32 tw_snd_nxt;
+ u32 tw_rcv_wnd;
+ u32 tw_ts_offset;
+ u32 tw_ts_recent;
+
+ /* The time we sent the last out-of-window ACK: */
+ u32 tw_last_oow_ack_time;
+
+ long tw_ts_recent_stamp;
+#ifdef CONFIG_TCP_MD5SIG
+ struct tcp_md5sig_key *tw_md5_key;
+#endif
+};
+
+static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
+{
+ return (struct tcp_timewait_sock *)sk;
+}
+
+static inline bool tcp_passive_fastopen(const struct sock *sk)
+{
+ return (sk->sk_state == TCP_SYN_RECV &&
+ tcp_sk(sk)->fastopen_rsk != NULL);
+}
+
+extern void tcp_sock_destruct(struct sock *sk);
+
+static inline int fastopen_init_queue(struct sock *sk, int backlog)
+{
+ struct request_sock_queue *queue =
+ &inet_csk(sk)->icsk_accept_queue;
+
+ if (queue->fastopenq == NULL) {
+ queue->fastopenq = kzalloc(
+ sizeof(struct fastopen_queue),
+ sk->sk_allocation);
+ if (queue->fastopenq == NULL)
+ return -ENOMEM;
+
+ sk->sk_destruct = tcp_sock_destruct;
+ spin_lock_init(&queue->fastopenq->lock);
+ }
+ queue->fastopenq->max_qlen = backlog;
+ return 0;
+}
+
+#endif /* _LINUX_TCP_H */
diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h
new file mode 100644
index 000000000..cfaee8691
--- /dev/null
+++ b/include/linux/textsearch.h
@@ -0,0 +1,178 @@
+#ifndef __LINUX_TEXTSEARCH_H
+#define __LINUX_TEXTSEARCH_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+struct module;
+
+struct ts_config;
+
+#define TS_AUTOLOAD 1 /* Automatically load textsearch modules when needed */
+#define TS_IGNORECASE 2 /* Searches string case insensitively */
+
+/**
+ * struct ts_state - search state
+ * @offset: offset for next match
+ * @cb: control buffer, for persistent variables of get_next_block()
+ */
+struct ts_state
+{
+ unsigned int offset;
+ char cb[40];
+};
+
+/**
+ * struct ts_ops - search module operations
+ * @name: name of search algorithm
+ * @init: initialization function to prepare a search
+ * @find: find the next occurrence of the pattern
+ * @destroy: destroy algorithm specific parts of a search configuration
+ * @get_pattern: return head of pattern
+ * @get_pattern_len: return length of pattern
+ * @owner: module reference to algorithm
+ */
+struct ts_ops
+{
+ const char *name;
+ struct ts_config * (*init)(const void *, unsigned int, gfp_t, int);
+ unsigned int (*find)(struct ts_config *,
+ struct ts_state *);
+ void (*destroy)(struct ts_config *);
+ void * (*get_pattern)(struct ts_config *);
+ unsigned int (*get_pattern_len)(struct ts_config *);
+ struct module *owner;
+ struct list_head list;
+};
+
+/**
+ * struct ts_config - search configuration
+ * @ops: operations of chosen algorithm
+ * @flags: flags
+ * @get_next_block: callback to fetch the next block to search in
+ * @finish: callback to finalize a search
+ */
+struct ts_config
+{
+ struct ts_ops *ops;
+ int flags;
+
+ /**
+ * get_next_block - fetch next block of data
+ * @consumed: number of bytes consumed by the caller
+ * @dst: destination buffer
+ * @conf: search configuration
+ * @state: search state
+ *
+ * Called repeatedly until 0 is returned. Must assign the
+ * head of the next block of data to &*dst and return the length
+ * of the block or 0 if at the end. consumed == 0 indicates
+ * a new search. May store/read persistent values in state->cb.
+ */
+ unsigned int (*get_next_block)(unsigned int consumed,
+ const u8 **dst,
+ struct ts_config *conf,
+ struct ts_state *state);
+
+ /**
+ * finish - finalize/clean a series of get_next_block() calls
+ * @conf: search configuration
+ * @state: search state
+ *
+ * Called after the last use of get_next_block(), may be used
+ * to cleanup any leftovers.
+ */
+ void (*finish)(struct ts_config *conf,
+ struct ts_state *state);
+};
+
+/**
+ * textsearch_next - continue searching for a pattern
+ * @conf: search configuration
+ * @state: search state
+ *
+ * Continues a search looking for more occurrences of the pattern.
+ * textsearch_find() must be called to find the first occurrence
+ * in order to reset the state.
+ *
+ * Returns the position of the next occurrence of the pattern or
+ * UINT_MAX if not match was found.
+ */
+static inline unsigned int textsearch_next(struct ts_config *conf,
+ struct ts_state *state)
+{
+ unsigned int ret = conf->ops->find(conf, state);
+
+ if (conf->finish)
+ conf->finish(conf, state);
+
+ return ret;
+}
+
+/**
+ * textsearch_find - start searching for a pattern
+ * @conf: search configuration
+ * @state: search state
+ *
+ * Returns the position of first occurrence of the pattern or
+ * UINT_MAX if no match was found.
+ */
+static inline unsigned int textsearch_find(struct ts_config *conf,
+ struct ts_state *state)
+{
+ state->offset = 0;
+ return textsearch_next(conf, state);
+}
+
+/**
+ * textsearch_get_pattern - return head of the pattern
+ * @conf: search configuration
+ */
+static inline void *textsearch_get_pattern(struct ts_config *conf)
+{
+ return conf->ops->get_pattern(conf);
+}
+
+/**
+ * textsearch_get_pattern_len - return length of the pattern
+ * @conf: search configuration
+ */
+static inline unsigned int textsearch_get_pattern_len(struct ts_config *conf)
+{
+ return conf->ops->get_pattern_len(conf);
+}
+
+extern int textsearch_register(struct ts_ops *);
+extern int textsearch_unregister(struct ts_ops *);
+extern struct ts_config *textsearch_prepare(const char *, const void *,
+ unsigned int, gfp_t, int);
+extern void textsearch_destroy(struct ts_config *conf);
+extern unsigned int textsearch_find_continuous(struct ts_config *,
+ struct ts_state *,
+ const void *, unsigned int);
+
+
+#define TS_PRIV_ALIGNTO 8
+#define TS_PRIV_ALIGN(len) (((len) + TS_PRIV_ALIGNTO-1) & ~(TS_PRIV_ALIGNTO-1))
+
+static inline struct ts_config *alloc_ts_config(size_t payload,
+ gfp_t gfp_mask)
+{
+ struct ts_config *conf;
+
+ conf = kzalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask);
+ if (conf == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ return conf;
+}
+
+static inline void *ts_config_priv(struct ts_config *conf)
+{
+ return ((u8 *) conf + TS_PRIV_ALIGN(sizeof(struct ts_config)));
+}
+
+#endif
diff --git a/include/linux/textsearch_fsm.h b/include/linux/textsearch_fsm.h
new file mode 100644
index 000000000..fdfa078c6
--- /dev/null
+++ b/include/linux/textsearch_fsm.h
@@ -0,0 +1,48 @@
+#ifndef __LINUX_TEXTSEARCH_FSM_H
+#define __LINUX_TEXTSEARCH_FSM_H
+
+#include <linux/types.h>
+
+enum {
+ TS_FSM_SPECIFIC, /* specific character */
+ TS_FSM_WILDCARD, /* any character */
+ TS_FSM_DIGIT, /* isdigit() */
+ TS_FSM_XDIGIT, /* isxdigit() */
+ TS_FSM_PRINT, /* isprint() */
+ TS_FSM_ALPHA, /* isalpha() */
+ TS_FSM_ALNUM, /* isalnum() */
+ TS_FSM_ASCII, /* isascii() */
+ TS_FSM_CNTRL, /* iscntrl() */
+ TS_FSM_GRAPH, /* isgraph() */
+ TS_FSM_LOWER, /* islower() */
+ TS_FSM_UPPER, /* isupper() */
+ TS_FSM_PUNCT, /* ispunct() */
+ TS_FSM_SPACE, /* isspace() */
+ __TS_FSM_TYPE_MAX,
+};
+#define TS_FSM_TYPE_MAX (__TS_FSM_TYPE_MAX - 1)
+
+enum {
+ TS_FSM_SINGLE, /* 1 occurrence */
+ TS_FSM_PERHAPS, /* 1 or 0 occurrence */
+ TS_FSM_ANY, /* 0..n occurrences */
+ TS_FSM_MULTI, /* 1..n occurrences */
+ TS_FSM_HEAD_IGNORE, /* 0..n ignored occurrences at head */
+ __TS_FSM_RECUR_MAX,
+};
+#define TS_FSM_RECUR_MAX (__TS_FSM_RECUR_MAX - 1)
+
+/**
+ * struct ts_fsm_token - state machine token (state)
+ * @type: type of token
+ * @recur: number of recurrences
+ * @value: character value for TS_FSM_SPECIFIC
+ */
+struct ts_fsm_token
+{
+ __u16 type;
+ __u8 recur;
+ __u8 value;
+};
+
+#endif
diff --git a/include/linux/tfrc.h b/include/linux/tfrc.h
new file mode 100644
index 000000000..8a8462b4a
--- /dev/null
+++ b/include/linux/tfrc.h
@@ -0,0 +1,55 @@
+#ifndef _LINUX_TFRC_H_
+#define _LINUX_TFRC_H_
+/*
+ * TFRC - Data Structures for the TCP-Friendly Rate Control congestion
+ * control mechanism as specified in RFC 3448.
+ *
+ * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
+ * Copyright (c) 2005 Ian McDonald <iam4@cs.waikato.ac.nz>
+ * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/types.h>
+
+/** tfrc_rx_info - TFRC Receiver Data Structure
+ *
+ * @tfrcrx_x_recv: receiver estimate of sending rate (3.2.2)
+ * @tfrcrx_rtt: round-trip-time (communicated by sender)
+ * @tfrcrx_p: current estimate of loss event rate (3.2.2)
+ */
+struct tfrc_rx_info {
+ __u32 tfrcrx_x_recv;
+ __u32 tfrcrx_rtt;
+ __u32 tfrcrx_p;
+};
+
+/** tfrc_tx_info - TFRC Sender Data Structure
+ *
+ * @tfrctx_x: computed transmit rate (4.3 (4))
+ * @tfrctx_x_recv: receiver estimate of send rate (4.3)
+ * @tfrctx_x_calc: return value of throughput equation (3.1)
+ * @tfrctx_rtt: (moving average) estimate of RTT (4.3)
+ * @tfrctx_p: current loss event rate (5.4)
+ * @tfrctx_rto: estimate of RTO, equals 4*RTT (4.3)
+ * @tfrctx_ipi: inter-packet interval (4.6)
+ *
+ * Note: X and X_recv are both maintained in units of 64 * bytes/second. This
+ * enables a finer resolution of sending rates and avoids problems with
+ * integer arithmetic; u32 is not sufficient as scaling consumes 6 bits.
+ */
+struct tfrc_tx_info {
+ __u64 tfrctx_x;
+ __u64 tfrctx_x_recv;
+ __u32 tfrctx_x_calc;
+ __u32 tfrctx_rtt;
+ __u32 tfrctx_p;
+ __u32 tfrctx_rto;
+ __u32 tfrctx_ipi;
+};
+
+#endif /* _LINUX_TFRC_H_ */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
new file mode 100644
index 000000000..5eac31649
--- /dev/null
+++ b/include/linux/thermal.h
@@ -0,0 +1,407 @@
+/*
+ * thermal.h ($Revision: 0 $)
+ *
+ * Copyright (C) 2008 Intel Corp
+ * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com>
+ * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#ifndef __THERMAL_H__
+#define __THERMAL_H__
+
+#include <linux/of.h>
+#include <linux/idr.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <uapi/linux/thermal.h>
+
+#define THERMAL_TRIPS_NONE -1
+#define THERMAL_MAX_TRIPS 12
+
+/* invalid cooling state */
+#define THERMAL_CSTATE_INVALID -1UL
+
+/* No upper/lower limit requirement */
+#define THERMAL_NO_LIMIT ((u32)~0)
+
+/* Unit conversion macros */
+#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
+ ((long)t-2732+5)/10 : ((long)t-2732-5)/10)
+#define CELSIUS_TO_KELVIN(t) ((t)*10+2732)
+#define DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, off) (((t) - (off)) * 100)
+#define DECI_KELVIN_TO_MILLICELSIUS(t) DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, 2732)
+#define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off))
+#define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732)
+
+/* Default Thermal Governor */
+#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
+#define DEFAULT_THERMAL_GOVERNOR "step_wise"
+#elif defined(CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE)
+#define DEFAULT_THERMAL_GOVERNOR "fair_share"
+#elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE)
+#define DEFAULT_THERMAL_GOVERNOR "user_space"
+#endif
+
+struct thermal_zone_device;
+struct thermal_cooling_device;
+
+enum thermal_device_mode {
+ THERMAL_DEVICE_DISABLED = 0,
+ THERMAL_DEVICE_ENABLED,
+};
+
+enum thermal_trip_type {
+ THERMAL_TRIP_ACTIVE = 0,
+ THERMAL_TRIP_PASSIVE,
+ THERMAL_TRIP_HOT,
+ THERMAL_TRIP_CRITICAL,
+};
+
+enum thermal_trend {
+ THERMAL_TREND_STABLE, /* temperature is stable */
+ THERMAL_TREND_RAISING, /* temperature is raising */
+ THERMAL_TREND_DROPPING, /* temperature is dropping */
+ THERMAL_TREND_RAISE_FULL, /* apply highest cooling action */
+ THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */
+};
+
+struct thermal_zone_device_ops {
+ int (*bind) (struct thermal_zone_device *,
+ struct thermal_cooling_device *);
+ int (*unbind) (struct thermal_zone_device *,
+ struct thermal_cooling_device *);
+ int (*get_temp) (struct thermal_zone_device *, unsigned long *);
+ int (*get_mode) (struct thermal_zone_device *,
+ enum thermal_device_mode *);
+ int (*set_mode) (struct thermal_zone_device *,
+ enum thermal_device_mode);
+ int (*get_trip_type) (struct thermal_zone_device *, int,
+ enum thermal_trip_type *);
+ int (*get_trip_temp) (struct thermal_zone_device *, int,
+ unsigned long *);
+ int (*set_trip_temp) (struct thermal_zone_device *, int,
+ unsigned long);
+ int (*get_trip_hyst) (struct thermal_zone_device *, int,
+ unsigned long *);
+ int (*set_trip_hyst) (struct thermal_zone_device *, int,
+ unsigned long);
+ int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *);
+ int (*set_emul_temp) (struct thermal_zone_device *, unsigned long);
+ int (*get_trend) (struct thermal_zone_device *, int,
+ enum thermal_trend *);
+ int (*notify) (struct thermal_zone_device *, int,
+ enum thermal_trip_type);
+};
+
+struct thermal_cooling_device_ops {
+ int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
+ int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
+ int (*set_cur_state) (struct thermal_cooling_device *, unsigned long);
+};
+
+struct thermal_cooling_device {
+ int id;
+ char type[THERMAL_NAME_LENGTH];
+ struct device device;
+ struct device_node *np;
+ void *devdata;
+ const struct thermal_cooling_device_ops *ops;
+ bool updated; /* true if the cooling device does not need update */
+ struct mutex lock; /* protect thermal_instances list */
+ struct list_head thermal_instances;
+ struct list_head node;
+};
+
+struct thermal_attr {
+ struct device_attribute attr;
+ char name[THERMAL_NAME_LENGTH];
+};
+
+/**
+ * struct thermal_zone_device - structure for a thermal zone
+ * @id: unique id number for each thermal zone
+ * @type: the thermal zone device type
+ * @device: &struct device for this thermal zone
+ * @trip_temp_attrs: attributes for trip points for sysfs: trip temperature
+ * @trip_type_attrs: attributes for trip points for sysfs: trip type
+ * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
+ * @devdata: private pointer for device private data
+ * @trips: number of trip points the thermal zone supports
+ * @passive_delay: number of milliseconds to wait between polls when
+ * performing passive cooling. Currenty only used by the
+ * step-wise governor
+ * @polling_delay: number of milliseconds to wait between polls when
+ * checking whether trip points have been crossed (0 for
+ * interrupt driven systems)
+ * @temperature: current temperature. This is only for core code,
+ * drivers should use thermal_zone_get_temp() to get the
+ * current temperature
+ * @last_temperature: previous temperature read
+ * @emul_temperature: emulated temperature when using CONFIG_THERMAL_EMULATION
+ * @passive: 1 if you've crossed a passive trip point, 0 otherwise.
+ * Currenty only used by the step-wise governor.
+ * @forced_passive: If > 0, temperature at which to switch on all ACPI
+ * processor cooling devices. Currently only used by the
+ * step-wise governor.
+ * @ops: operations this &thermal_zone_device supports
+ * @tzp: thermal zone parameters
+ * @governor: pointer to the governor for this thermal zone
+ * @thermal_instances: list of &struct thermal_instance of this thermal zone
+ * @idr: &struct idr to generate unique id for this zone's cooling
+ * devices
+ * @lock: lock to protect thermal_instances list
+ * @node: node in thermal_tz_list (in thermal_core.c)
+ * @poll_queue: delayed work for polling
+ */
+struct thermal_zone_device {
+ int id;
+ char type[THERMAL_NAME_LENGTH];
+ struct device device;
+ struct thermal_attr *trip_temp_attrs;
+ struct thermal_attr *trip_type_attrs;
+ struct thermal_attr *trip_hyst_attrs;
+ void *devdata;
+ int trips;
+ int passive_delay;
+ int polling_delay;
+ int temperature;
+ int last_temperature;
+ int emul_temperature;
+ int passive;
+ unsigned int forced_passive;
+ struct thermal_zone_device_ops *ops;
+ const struct thermal_zone_params *tzp;
+ struct thermal_governor *governor;
+ struct list_head thermal_instances;
+ struct idr idr;
+ struct mutex lock;
+ struct list_head node;
+ struct delayed_work poll_queue;
+};
+
+/**
+ * struct thermal_governor - structure that holds thermal governor information
+ * @name: name of the governor
+ * @throttle: callback called for every trip point even if temperature is
+ * below the trip point temperature
+ * @governor_list: node in thermal_governor_list (in thermal_core.c)
+ */
+struct thermal_governor {
+ char name[THERMAL_NAME_LENGTH];
+ int (*throttle)(struct thermal_zone_device *tz, int trip);
+ struct list_head governor_list;
+};
+
+/* Structure that holds binding parameters for a zone */
+struct thermal_bind_params {
+ struct thermal_cooling_device *cdev;
+
+ /*
+ * This is a measure of 'how effectively these devices can
+ * cool 'this' thermal zone. The shall be determined by platform
+ * characterization. This is on a 'percentage' scale.
+ * See Documentation/thermal/sysfs-api.txt for more information.
+ */
+ int weight;
+
+ /*
+ * This is a bit mask that gives the binding relation between this
+ * thermal zone and cdev, for a particular trip point.
+ * See Documentation/thermal/sysfs-api.txt for more information.
+ */
+ int trip_mask;
+
+ /*
+ * This is an array of cooling state limits. Must have exactly
+ * 2 * thermal_zone.number_of_trip_points. It is an array consisting
+ * of tuples <lower-state upper-state> of state limits. Each trip
+ * will be associated with one state limit tuple when binding.
+ * A NULL pointer means <THERMAL_NO_LIMITS THERMAL_NO_LIMITS>
+ * on all trips.
+ */
+ unsigned long *binding_limits;
+ int (*match) (struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev);
+};
+
+/* Structure to define Thermal Zone parameters */
+struct thermal_zone_params {
+ char governor_name[THERMAL_NAME_LENGTH];
+
+ /*
+ * a boolean to indicate if the thermal to hwmon sysfs interface
+ * is required. when no_hwmon == false, a hwmon sysfs interface
+ * will be created. when no_hwmon == true, nothing will be done
+ */
+ bool no_hwmon;
+
+ int num_tbps; /* Number of tbp entries */
+ struct thermal_bind_params *tbp;
+};
+
+struct thermal_genl_event {
+ u32 orig;
+ enum events event;
+};
+
+/**
+ * struct thermal_zone_of_device_ops - scallbacks for handling DT based zones
+ *
+ * Mandatory:
+ * @get_temp: a pointer to a function that reads the sensor temperature.
+ *
+ * Optional:
+ * @get_trend: a pointer to a function that reads the sensor temperature trend.
+ * @set_emul_temp: a pointer to a function that sets sensor emulated
+ * temperature.
+ */
+struct thermal_zone_of_device_ops {
+ int (*get_temp)(void *, long *);
+ int (*get_trend)(void *, long *);
+ int (*set_emul_temp)(void *, unsigned long);
+};
+
+/**
+ * struct thermal_trip - representation of a point in temperature domain
+ * @np: pointer to struct device_node that this trip point was created from
+ * @temperature: temperature value in miliCelsius
+ * @hysteresis: relative hysteresis in miliCelsius
+ * @type: trip point type
+ */
+
+struct thermal_trip {
+ struct device_node *np;
+ unsigned long int temperature;
+ unsigned long int hysteresis;
+ enum thermal_trip_type type;
+};
+
+/* Function declarations */
+#ifdef CONFIG_THERMAL_OF
+struct thermal_zone_device *
+thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
+ const struct thermal_zone_of_device_ops *ops);
+void thermal_zone_of_sensor_unregister(struct device *dev,
+ struct thermal_zone_device *tz);
+#else
+static inline struct thermal_zone_device *
+thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
+ const struct thermal_zone_of_device_ops *ops)
+{
+ return NULL;
+}
+
+static inline
+void thermal_zone_of_sensor_unregister(struct device *dev,
+ struct thermal_zone_device *tz)
+{
+}
+
+#endif
+
+#if IS_ENABLED(CONFIG_THERMAL)
+struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
+ void *, struct thermal_zone_device_ops *,
+ const struct thermal_zone_params *, int, int);
+void thermal_zone_device_unregister(struct thermal_zone_device *);
+
+int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
+ struct thermal_cooling_device *,
+ unsigned long, unsigned long);
+int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
+ struct thermal_cooling_device *);
+void thermal_zone_device_update(struct thermal_zone_device *);
+
+struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
+ const struct thermal_cooling_device_ops *);
+struct thermal_cooling_device *
+thermal_of_cooling_device_register(struct device_node *np, char *, void *,
+ const struct thermal_cooling_device_ops *);
+void thermal_cooling_device_unregister(struct thermal_cooling_device *);
+struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name);
+int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp);
+
+int get_tz_trend(struct thermal_zone_device *, int);
+struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
+ struct thermal_cooling_device *, int);
+void thermal_cdev_update(struct thermal_cooling_device *);
+void thermal_notify_framework(struct thermal_zone_device *, int);
+#else
+static inline struct thermal_zone_device *thermal_zone_device_register(
+ const char *type, int trips, int mask, void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp,
+ int passive_delay, int polling_delay)
+{ return ERR_PTR(-ENODEV); }
+static inline void thermal_zone_device_unregister(
+ struct thermal_zone_device *tz)
+{ }
+static inline int thermal_zone_bind_cooling_device(
+ struct thermal_zone_device *tz, int trip,
+ struct thermal_cooling_device *cdev,
+ unsigned long upper, unsigned long lower)
+{ return -ENODEV; }
+static inline int thermal_zone_unbind_cooling_device(
+ struct thermal_zone_device *tz, int trip,
+ struct thermal_cooling_device *cdev)
+{ return -ENODEV; }
+static inline void thermal_zone_device_update(struct thermal_zone_device *tz)
+{ }
+static inline struct thermal_cooling_device *
+thermal_cooling_device_register(char *type, void *devdata,
+ const struct thermal_cooling_device_ops *ops)
+{ return ERR_PTR(-ENODEV); }
+static inline struct thermal_cooling_device *
+thermal_of_cooling_device_register(struct device_node *np,
+ char *type, void *devdata, const struct thermal_cooling_device_ops *ops)
+{ return ERR_PTR(-ENODEV); }
+static inline void thermal_cooling_device_unregister(
+ struct thermal_cooling_device *cdev)
+{ }
+static inline struct thermal_zone_device *thermal_zone_get_zone_by_name(
+ const char *name)
+{ return ERR_PTR(-ENODEV); }
+static inline int thermal_zone_get_temp(
+ struct thermal_zone_device *tz, unsigned long *temp)
+{ return -ENODEV; }
+static inline int get_tz_trend(struct thermal_zone_device *tz, int trip)
+{ return -ENODEV; }
+static inline struct thermal_instance *
+get_thermal_instance(struct thermal_zone_device *tz,
+ struct thermal_cooling_device *cdev, int trip)
+{ return ERR_PTR(-ENODEV); }
+static inline void thermal_cdev_update(struct thermal_cooling_device *cdev)
+{ }
+static inline void thermal_notify_framework(struct thermal_zone_device *tz,
+ int trip)
+{ }
+#endif /* CONFIG_THERMAL */
+
+#if defined(CONFIG_NET) && IS_ENABLED(CONFIG_THERMAL)
+extern int thermal_generate_netlink_event(struct thermal_zone_device *tz,
+ enum events event);
+#else
+static inline int thermal_generate_netlink_event(struct thermal_zone_device *tz,
+ enum events event)
+{
+ return 0;
+}
+#endif
+
+#endif /* __THERMAL_H__ */
diff --git a/include/linux/thinkpad_acpi.h b/include/linux/thinkpad_acpi.h
new file mode 100644
index 000000000..361de59a2
--- /dev/null
+++ b/include/linux/thinkpad_acpi.h
@@ -0,0 +1,15 @@
+#ifndef __THINKPAD_ACPI_H__
+#define __THINKPAD_ACPI_H__
+
+/* These two functions return 0 if success, or negative error code
+ (e g -ENODEV if no led present) */
+
+enum {
+ TPACPI_LED_MUTE,
+ TPACPI_LED_MICMUTE,
+ TPACPI_LED_MAX,
+};
+
+int tpacpi_led_set(int whichled, bool on);
+
+#endif
diff --git a/include/linux/thinkpad_ec.h b/include/linux/thinkpad_ec.h
new file mode 100644
index 000000000..1b80d7ee5
--- /dev/null
+++ b/include/linux/thinkpad_ec.h
@@ -0,0 +1,47 @@
+/*
+ * thinkpad_ec.h - interface to ThinkPad embedded controller LPC3 functions
+ *
+ * Copyright (C) 2005 Shem Multinymous <multinymous@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _THINKPAD_EC_H
+#define _THINKPAD_EC_H
+
+#ifdef __KERNEL__
+
+#define TP_CONTROLLER_ROW_LEN 16
+
+/* EC transactions input and output (possibly partial) vectors of 16 bytes. */
+struct thinkpad_ec_row {
+ u16 mask; /* bitmap of which entries of val[] are meaningful */
+ u8 val[TP_CONTROLLER_ROW_LEN];
+};
+
+extern int __must_check thinkpad_ec_lock(void);
+extern int __must_check thinkpad_ec_try_lock(void);
+extern void thinkpad_ec_unlock(void);
+
+extern int thinkpad_ec_read_row(const struct thinkpad_ec_row *args,
+ struct thinkpad_ec_row *data);
+extern int thinkpad_ec_try_read_row(const struct thinkpad_ec_row *args,
+ struct thinkpad_ec_row *mask);
+extern int thinkpad_ec_prefetch_row(const struct thinkpad_ec_row *args);
+extern void thinkpad_ec_invalidate(void);
+
+
+#endif /* __KERNEL */
+#endif /* _THINKPAD_EC_H */
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
new file mode 100644
index 000000000..b42ba9169
--- /dev/null
+++ b/include/linux/thread_info.h
@@ -0,0 +1,150 @@
+/* thread_info.h: common low-level thread information accessors
+ *
+ * Copyright (C) 2002 David Howells (dhowells@redhat.com)
+ * - Incorporating suggestions made by Linus Torvalds
+ */
+
+#ifndef _LINUX_THREAD_INFO_H
+#define _LINUX_THREAD_INFO_H
+
+#include <linux/types.h>
+#include <linux/bug.h>
+
+struct timespec;
+struct compat_timespec;
+
+/*
+ * System call restart block.
+ */
+struct restart_block {
+ long (*fn)(struct restart_block *);
+ union {
+ /* For futex_wait and futex_wait_requeue_pi */
+ struct {
+ u32 __user *uaddr;
+ u32 val;
+ u32 flags;
+ u32 bitset;
+ u64 time;
+ u32 __user *uaddr2;
+ } futex;
+ /* For nanosleep */
+ struct {
+ clockid_t clockid;
+ struct timespec __user *rmtp;
+#ifdef CONFIG_COMPAT
+ struct compat_timespec __user *compat_rmtp;
+#endif
+ u64 expires;
+ } nanosleep;
+ /* For poll */
+ struct {
+ struct pollfd __user *ufds;
+ int nfds;
+ int has_timeout;
+ unsigned long tv_sec;
+ unsigned long tv_nsec;
+ } poll;
+ };
+};
+
+extern long do_no_restart_syscall(struct restart_block *parm);
+
+#include <linux/bitops.h>
+#include <asm/thread_info.h>
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_DEBUG_STACK_USAGE
+# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | ___GFP_TOI_NOTRACK | __GFP_ZERO)
+#else
+# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | ___GFP_TOI_NOTRACK)
+#endif
+
+/*
+ * flag set/clear/test wrappers
+ * - pass TIF_xxxx constants to these functions
+ */
+
+static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
+{
+ set_bit(flag, (unsigned long *)&ti->flags);
+}
+
+static inline void clear_ti_thread_flag(struct thread_info *ti, int flag)
+{
+ clear_bit(flag, (unsigned long *)&ti->flags);
+}
+
+static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
+{
+ return test_and_set_bit(flag, (unsigned long *)&ti->flags);
+}
+
+static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
+{
+ return test_and_clear_bit(flag, (unsigned long *)&ti->flags);
+}
+
+static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
+{
+ return test_bit(flag, (unsigned long *)&ti->flags);
+}
+
+#define set_thread_flag(flag) \
+ set_ti_thread_flag(current_thread_info(), flag)
+#define clear_thread_flag(flag) \
+ clear_ti_thread_flag(current_thread_info(), flag)
+#define test_and_set_thread_flag(flag) \
+ test_and_set_ti_thread_flag(current_thread_info(), flag)
+#define test_and_clear_thread_flag(flag) \
+ test_and_clear_ti_thread_flag(current_thread_info(), flag)
+#define test_thread_flag(flag) \
+ test_ti_thread_flag(current_thread_info(), flag)
+
+#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
+
+#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
+/*
+ * An arch can define its own version of set_restore_sigmask() to get the
+ * job done however works, with or without TIF_RESTORE_SIGMASK.
+ */
+#define HAVE_SET_RESTORE_SIGMASK 1
+
+/**
+ * set_restore_sigmask() - make sure saved_sigmask processing gets done
+ *
+ * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
+ * will run before returning to user mode, to process the flag. For
+ * all callers, TIF_SIGPENDING is already set or it's no harm to set
+ * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
+ * arch code will notice on return to user mode, in case those bits
+ * are scarce. We set TIF_SIGPENDING here to ensure that the arch
+ * signal code always gets run when TIF_RESTORE_SIGMASK is set.
+ */
+static inline void set_restore_sigmask(void)
+{
+ set_thread_flag(TIF_RESTORE_SIGMASK);
+ WARN_ON(!test_thread_flag(TIF_SIGPENDING));
+}
+static inline void clear_restore_sigmask(void)
+{
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+}
+static inline bool test_restore_sigmask(void)
+{
+ return test_thread_flag(TIF_RESTORE_SIGMASK);
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+ return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
+}
+#endif /* TIF_RESTORE_SIGMASK && !HAVE_SET_RESTORE_SIGMASK */
+
+#ifndef HAVE_SET_RESTORE_SIGMASK
+#error "no set_restore_sigmask() provided and default one won't work"
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_THREAD_INFO_H */
diff --git a/include/linux/threads.h b/include/linux/threads.h
new file mode 100644
index 000000000..383ab9592
--- /dev/null
+++ b/include/linux/threads.h
@@ -0,0 +1,45 @@
+#ifndef _LINUX_THREADS_H
+#define _LINUX_THREADS_H
+
+
+/*
+ * The default limit for the nr of threads is now in
+ * /proc/sys/kernel/threads-max.
+ */
+
+/*
+ * Maximum supported processors. Setting this smaller saves quite a
+ * bit of memory. Use nr_cpu_ids instead of this except for static bitmaps.
+ */
+#ifndef CONFIG_NR_CPUS
+/* FIXME: This should be fixed in the arch's Kconfig */
+#define CONFIG_NR_CPUS 1
+#endif
+
+/* Places which use this should consider cpumask_var_t. */
+#define NR_CPUS CONFIG_NR_CPUS
+
+#define MIN_THREADS_LEFT_FOR_ROOT 4
+
+/*
+ * This controls the default maximum pid allocated to a process
+ */
+#define PID_MAX_DEFAULT (CONFIG_BASE_SMALL ? 0x1000 : 0x8000)
+
+/*
+ * A maximum of 4 million PIDs should be enough for a while.
+ * [NOTE: PID/TIDs are limited to 2^29 ~= 500+ million, see futex.h.]
+ */
+#define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \
+ (sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT))
+
+/*
+ * Define a minimum number of pids per cpu. Heuristically based
+ * on original pid max of 32k for 32 cpus. Also, increase the
+ * minimum settable value for pid_max on the running system based
+ * on similar defaults. See kernel/pid.c:pidmap_init() for details.
+ */
+#define PIDS_PER_CPU_DEFAULT 1024
+#define PIDS_PER_CPU_MIN 8
+
+#endif
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
new file mode 100644
index 000000000..c78dcfeaf
--- /dev/null
+++ b/include/linux/ti_wilink_st.h
@@ -0,0 +1,452 @@
+/*
+ * Shared Transport Header file
+ * To be included by the protocol stack drivers for
+ * Texas Instruments BT,FM and GPS combo chip drivers
+ * and also serves the sub-modules of the shared transport driver.
+ *
+ * Copyright (C) 2009-2010 Texas Instruments
+ * Author: Pavan Savoy <pavan_savoy@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef TI_WILINK_ST_H
+#define TI_WILINK_ST_H
+
+#include <linux/skbuff.h>
+
+/**
+ * enum proto-type - The protocol on WiLink chips which share a
+ * common physical interface like UART.
+ */
+enum proto_type {
+ ST_BT,
+ ST_FM,
+ ST_GPS,
+ ST_MAX_CHANNELS = 16,
+};
+
+/**
+ * struct st_proto_s - Per Protocol structure from BT/FM/GPS to ST
+ * @type: type of the protocol being registered among the
+ * available proto_type(BT, FM, GPS the protocol which share TTY).
+ * @recv: the receiver callback pointing to a function in the
+ * protocol drivers called by the ST driver upon receiving
+ * relevant data.
+ * @match_packet: reserved for future use, to make ST more generic
+ * @reg_complete_cb: callback handler pointing to a function in protocol
+ * handler called by ST when the pending registrations are complete.
+ * The registrations are marked pending, in situations when fw
+ * download is in progress.
+ * @write: pointer to function in ST provided to protocol drivers from ST,
+ * to be made use when protocol drivers have data to send to TTY.
+ * @priv_data: privdate data holder for the protocol drivers, sent
+ * from the protocol drivers during registration, and sent back on
+ * reg_complete_cb and recv.
+ * @chnl_id: channel id the protocol driver is interested in, the channel
+ * id is nothing but the 1st byte of the packet in UART frame.
+ * @max_frame_size: size of the largest frame the protocol can receive.
+ * @hdr_len: length of the header structure of the protocol.
+ * @offset_len_in_hdr: this provides the offset of the length field in the
+ * header structure of the protocol header, to assist ST to know
+ * how much to receive, if the data is split across UART frames.
+ * @len_size: whether the length field inside the header is 2 bytes
+ * or 1 byte.
+ * @reserve: the number of bytes ST needs to reserve in the skb being
+ * prepared for the protocol driver.
+ */
+struct st_proto_s {
+ enum proto_type type;
+ long (*recv) (void *, struct sk_buff *);
+ unsigned char (*match_packet) (const unsigned char *data);
+ void (*reg_complete_cb) (void *, char data);
+ long (*write) (struct sk_buff *skb);
+ void *priv_data;
+
+ unsigned char chnl_id;
+ unsigned short max_frame_size;
+ unsigned char hdr_len;
+ unsigned char offset_len_in_hdr;
+ unsigned char len_size;
+ unsigned char reserve;
+};
+
+extern long st_register(struct st_proto_s *);
+extern long st_unregister(struct st_proto_s *);
+
+extern struct ti_st_plat_data *dt_pdata;
+
+/*
+ * header information used by st_core.c
+ */
+
+/* states of protocol list */
+#define ST_NOTEMPTY 1
+#define ST_EMPTY 0
+
+/*
+ * possible st_states
+ */
+#define ST_INITIALIZING 1
+#define ST_REG_IN_PROGRESS 2
+#define ST_REG_PENDING 3
+#define ST_WAITING_FOR_RESP 4
+
+/**
+ * struct st_data_s - ST core internal structure
+ * @st_state: different states of ST like initializing, registration
+ * in progress, this is mainly used to return relevant err codes
+ * when protocol drivers are registering. It is also used to track
+ * the recv function, as in during fw download only HCI events
+ * can occur , where as during other times other events CH8, CH9
+ * can occur.
+ * @tty: tty provided by the TTY core for line disciplines.
+ * @tx_skb: If for some reason the tty's write returns lesser bytes written
+ * then to maintain the rest of data to be written on next instance.
+ * This needs to be protected, hence the lock inside wakeup func.
+ * @tx_state: if the data is being written onto the TTY and protocol driver
+ * wants to send more, queue up data and mark that there is
+ * more data to send.
+ * @list: the list of protocols registered, only MAX can exist, one protocol
+ * can register only once.
+ * @rx_state: states to be maintained inside st's tty receive
+ * @rx_count: count to be maintained inside st's tty receieve
+ * @rx_skb: the skb where all data for a protocol gets accumulated,
+ * since tty might not call receive when a complete event packet
+ * is received, the states, count and the skb needs to be maintained.
+ * @rx_chnl: the channel ID for which the data is getting accumalated for.
+ * @txq: the list of skbs which needs to be sent onto the TTY.
+ * @tx_waitq: if the chip is not in AWAKE state, the skbs needs to be queued
+ * up in here, PM(WAKEUP_IND) data needs to be sent and then the skbs
+ * from waitq can be moved onto the txq.
+ * Needs locking too.
+ * @lock: the lock to protect skbs, queues, and ST states.
+ * @protos_registered: count of the protocols registered, also when 0 the
+ * chip enable gpio can be toggled, and when it changes to 1 the fw
+ * needs to be downloaded to initialize chip side ST.
+ * @ll_state: the various PM states the chip can be, the states are notified
+ * to us, when the chip sends relevant PM packets(SLEEP_IND, WAKE_IND).
+ * @kim_data: reference to the parent encapsulating structure.
+ *
+ */
+struct st_data_s {
+ unsigned long st_state;
+ struct sk_buff *tx_skb;
+#define ST_TX_SENDING 1
+#define ST_TX_WAKEUP 2
+ unsigned long tx_state;
+ struct st_proto_s *list[ST_MAX_CHANNELS];
+ bool is_registered[ST_MAX_CHANNELS];
+ unsigned long rx_state;
+ unsigned long rx_count;
+ struct sk_buff *rx_skb;
+ unsigned char rx_chnl;
+ struct sk_buff_head txq, tx_waitq;
+ spinlock_t lock;
+ unsigned char protos_registered;
+ unsigned long ll_state;
+ void *kim_data;
+ struct tty_struct *tty;
+};
+
+/*
+ * wrapper around tty->ops->write_room to check
+ * availability during firmware download
+ */
+int st_get_uart_wr_room(struct st_data_s *st_gdata);
+/**
+ * st_int_write -
+ * point this to tty->driver->write or tty->ops->write
+ * depending upon the kernel version
+ */
+int st_int_write(struct st_data_s*, const unsigned char*, int);
+
+/**
+ * st_write -
+ * internal write function, passed onto protocol drivers
+ * via the write function ptr of protocol struct
+ */
+long st_write(struct sk_buff *);
+
+/* function to be called from ST-LL */
+void st_ll_send_frame(enum proto_type, struct sk_buff *);
+
+/* internal wake up function */
+void st_tx_wakeup(struct st_data_s *st_data);
+
+/* init, exit entry funcs called from KIM */
+int st_core_init(struct st_data_s **);
+void st_core_exit(struct st_data_s *);
+
+/* ask for reference from KIM */
+void st_kim_ref(struct st_data_s **, int);
+
+#define GPS_STUB_TEST
+#ifdef GPS_STUB_TEST
+int gps_chrdrv_stub_write(const unsigned char*, int);
+void gps_chrdrv_stub_init(void);
+#endif
+
+/*
+ * header information used by st_kim.c
+ */
+
+/* time in msec to wait for
+ * line discipline to be installed
+ */
+#define LDISC_TIME 1000
+#define CMD_RESP_TIME 800
+#define CMD_WR_TIME 5000
+#define MAKEWORD(a, b) ((unsigned short)(((unsigned char)(a)) \
+ | ((unsigned short)((unsigned char)(b))) << 8))
+
+#define GPIO_HIGH 1
+#define GPIO_LOW 0
+
+/* the Power-On-Reset logic, requires to attempt
+ * to download firmware onto chip more than once
+ * since the self-test for chip takes a while
+ */
+#define POR_RETRY_COUNT 5
+
+/**
+ * struct chip_version - save the chip version
+ */
+struct chip_version {
+ unsigned short full;
+ unsigned short chip;
+ unsigned short min_ver;
+ unsigned short maj_ver;
+};
+
+#define UART_DEV_NAME_LEN 32
+/**
+ * struct kim_data_s - the KIM internal data, embedded as the
+ * platform's drv data. One for each ST device in the system.
+ * @uim_pid: KIM needs to communicate with UIM to request to install
+ * the ldisc by opening UART when protocol drivers register.
+ * @kim_pdev: the platform device added in one of the board-XX.c file
+ * in arch/XX/ directory, 1 for each ST device.
+ * @kim_rcvd: completion handler to notify when data was received,
+ * mainly used during fw download, which involves multiple send/wait
+ * for each of the HCI-VS commands.
+ * @ldisc_installed: completion handler to notify that the UIM accepted
+ * the request to install ldisc, notify from tty_open which suggests
+ * the ldisc was properly installed.
+ * @resp_buffer: data buffer for the .bts fw file name.
+ * @fw_entry: firmware class struct to request/release the fw.
+ * @rx_state: the rx state for kim's receive func during fw download.
+ * @rx_count: the rx count for the kim's receive func during fw download.
+ * @rx_skb: all of fw data might not come at once, and hence data storage for
+ * whole of the fw response, only HCI_EVENTs and hence diff from ST's
+ * response.
+ * @core_data: ST core's data, which mainly is the tty's disc_data
+ * @version: chip version available via a sysfs entry.
+ *
+ */
+struct kim_data_s {
+ long uim_pid;
+ struct platform_device *kim_pdev;
+ struct completion kim_rcvd, ldisc_installed;
+ char resp_buffer[30];
+ const struct firmware *fw_entry;
+ unsigned nshutdown;
+ unsigned long rx_state;
+ unsigned long rx_count;
+ struct sk_buff *rx_skb;
+ struct st_data_s *core_data;
+ struct chip_version version;
+ unsigned char ldisc_install;
+ unsigned char dev_name[UART_DEV_NAME_LEN + 1];
+ unsigned flow_cntrl;
+ unsigned baud_rate;
+};
+
+/**
+ * functions called when 1 of the protocol drivers gets
+ * registered, these need to communicate with UIM to request
+ * ldisc installed, read chip_version, download relevant fw
+ */
+long st_kim_start(void *);
+long st_kim_stop(void *);
+
+void st_kim_complete(void *);
+void kim_st_list_protocols(struct st_data_s *, void *);
+void st_kim_recv(void *, const unsigned char *, long);
+
+
+/*
+ * BTS headers
+ */
+#define ACTION_SEND_COMMAND 1
+#define ACTION_WAIT_EVENT 2
+#define ACTION_SERIAL 3
+#define ACTION_DELAY 4
+#define ACTION_RUN_SCRIPT 5
+#define ACTION_REMARKS 6
+
+/**
+ * struct bts_header - the fw file is NOT binary which can
+ * be sent onto TTY as is. The .bts is more a script
+ * file which has different types of actions.
+ * Each such action needs to be parsed by the KIM and
+ * relevant procedure to be called.
+ */
+struct bts_header {
+ u32 magic;
+ u32 version;
+ u8 future[24];
+ u8 actions[0];
+} __attribute__ ((packed));
+
+/**
+ * struct bts_action - Each .bts action has its own type of
+ * data.
+ */
+struct bts_action {
+ u16 type;
+ u16 size;
+ u8 data[0];
+} __attribute__ ((packed));
+
+struct bts_action_send {
+ u8 data[0];
+} __attribute__ ((packed));
+
+struct bts_action_wait {
+ u32 msec;
+ u32 size;
+ u8 data[0];
+} __attribute__ ((packed));
+
+struct bts_action_delay {
+ u32 msec;
+} __attribute__ ((packed));
+
+struct bts_action_serial {
+ u32 baud;
+ u32 flow_control;
+} __attribute__ ((packed));
+
+/**
+ * struct hci_command - the HCI-VS for intrepreting
+ * the change baud rate of host-side UART, which
+ * needs to be ignored, since UIM would do that
+ * when it receives request from KIM for ldisc installation.
+ */
+struct hci_command {
+ u8 prefix;
+ u16 opcode;
+ u8 plen;
+ u32 speed;
+} __attribute__ ((packed));
+
+/*
+ * header information used by st_ll.c
+ */
+
+/* ST LL receiver states */
+#define ST_W4_PACKET_TYPE 0
+#define ST_W4_HEADER 1
+#define ST_W4_DATA 2
+
+/* ST LL state machines */
+#define ST_LL_ASLEEP 0
+#define ST_LL_ASLEEP_TO_AWAKE 1
+#define ST_LL_AWAKE 2
+#define ST_LL_AWAKE_TO_ASLEEP 3
+#define ST_LL_INVALID 4
+
+/* different PM notifications coming from chip */
+#define LL_SLEEP_IND 0x30
+#define LL_SLEEP_ACK 0x31
+#define LL_WAKE_UP_IND 0x32
+#define LL_WAKE_UP_ACK 0x33
+
+/* initialize and de-init ST LL */
+long st_ll_init(struct st_data_s *);
+long st_ll_deinit(struct st_data_s *);
+
+/**
+ * enable/disable ST LL along with KIM start/stop
+ * called by ST Core
+ */
+void st_ll_enable(struct st_data_s *);
+void st_ll_disable(struct st_data_s *);
+
+/**
+ * various funcs used by ST core to set/get the various PM states
+ * of the chip.
+ */
+unsigned long st_ll_getstate(struct st_data_s *);
+unsigned long st_ll_sleep_state(struct st_data_s *, unsigned char);
+void st_ll_wakeup(struct st_data_s *);
+
+/*
+ * header information used by st_core.c for FM and GPS
+ * packet parsing, the bluetooth headers are already available
+ * at net/bluetooth/
+ */
+
+struct fm_event_hdr {
+ u8 plen;
+} __attribute__ ((packed));
+
+#define FM_MAX_FRAME_SIZE 0xFF /* TODO: */
+#define FM_EVENT_HDR_SIZE 1 /* size of fm_event_hdr */
+#define ST_FM_CH8_PKT 0x8
+
+/* gps stuff */
+struct gps_event_hdr {
+ u8 opcode;
+ u16 plen;
+} __attribute__ ((packed));
+
+/**
+ * struct ti_st_plat_data - platform data shared between ST driver and
+ * platform specific board file which adds the ST device.
+ * @nshutdown_gpio: Host's GPIO line to which chip's BT_EN is connected.
+ * @dev_name: The UART/TTY name to which chip is interfaced. (eg: /dev/ttyS1)
+ * @flow_cntrl: Should always be 1, since UART's CTS/RTS is used for PM
+ * purposes.
+ * @baud_rate: The baud rate supported by the Host UART controller, this will
+ * be shared across with the chip via a HCI VS command from User-Space Init
+ * Mgr application.
+ * @suspend:
+ * @resume: legacy PM routines hooked to platform specific board file, so as
+ * to take chip-host interface specific action.
+ * @chip_enable:
+ * @chip_disable: Platform/Interface specific mux mode setting, GPIO
+ * configuring, Host side PM disabling etc.. can be done here.
+ * @chip_asleep:
+ * @chip_awake: Chip specific deep sleep states is communicated to Host
+ * specific board-xx.c to take actions such as cut UART clocks when chip
+ * asleep or run host faster when chip awake etc..
+ *
+ */
+struct ti_st_plat_data {
+ u32 nshutdown_gpio;
+ unsigned char dev_name[UART_DEV_NAME_LEN]; /* uart name */
+ u32 flow_cntrl; /* flow control flag */
+ u32 baud_rate;
+ int (*suspend)(struct platform_device *, pm_message_t);
+ int (*resume)(struct platform_device *);
+ int (*chip_enable) (struct kim_data_s *);
+ int (*chip_disable) (struct kim_data_s *);
+ int (*chip_asleep) (struct kim_data_s *);
+ int (*chip_awake) (struct kim_data_s *);
+};
+
+#endif /* TI_WILINK_ST_H */
diff --git a/include/linux/tick.h b/include/linux/tick.h
new file mode 100644
index 000000000..f8492da57
--- /dev/null
+++ b/include/linux/tick.h
@@ -0,0 +1,182 @@
+/*
+ * Tick related global functions
+ */
+#ifndef _LINUX_TICK_H
+#define _LINUX_TICK_H
+
+#include <linux/clockchips.h>
+#include <linux/irqflags.h>
+#include <linux/percpu.h>
+#include <linux/context_tracking_state.h>
+#include <linux/cpumask.h>
+#include <linux/sched.h>
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+extern void __init tick_init(void);
+extern void tick_freeze(void);
+extern void tick_unfreeze(void);
+/* Should be core only, but ARM BL switcher requires it */
+extern void tick_suspend_local(void);
+/* Should be core only, but XEN resume magic and ARM BL switcher require it */
+extern void tick_resume_local(void);
+extern void tick_handover_do_timer(void);
+extern void tick_cleanup_dead_cpu(int cpu);
+#else /* CONFIG_GENERIC_CLOCKEVENTS */
+static inline void tick_init(void) { }
+static inline void tick_freeze(void) { }
+static inline void tick_unfreeze(void) { }
+static inline void tick_suspend_local(void) { }
+static inline void tick_resume_local(void) { }
+static inline void tick_handover_do_timer(void) { }
+static inline void tick_cleanup_dead_cpu(int cpu) { }
+#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
+
+#ifdef CONFIG_TICK_ONESHOT
+extern void tick_irq_enter(void);
+# ifndef arch_needs_cpu
+# define arch_needs_cpu() (0)
+# endif
+# else
+static inline void tick_irq_enter(void) { }
+#endif
+
+#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
+extern void hotplug_cpu__broadcast_tick_pull(int dead_cpu);
+#else
+static inline void hotplug_cpu__broadcast_tick_pull(int dead_cpu) { }
+#endif
+
+enum tick_broadcast_mode {
+ TICK_BROADCAST_OFF,
+ TICK_BROADCAST_ON,
+ TICK_BROADCAST_FORCE,
+};
+
+enum tick_broadcast_state {
+ TICK_BROADCAST_EXIT,
+ TICK_BROADCAST_ENTER,
+};
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+extern void tick_broadcast_control(enum tick_broadcast_mode mode);
+#else
+static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { }
+#endif /* BROADCAST */
+
+#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
+extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state);
+#else
+static inline int tick_broadcast_oneshot_control(enum tick_broadcast_state state) { return 0; }
+#endif
+
+static inline void tick_broadcast_enable(void)
+{
+ tick_broadcast_control(TICK_BROADCAST_ON);
+}
+static inline void tick_broadcast_disable(void)
+{
+ tick_broadcast_control(TICK_BROADCAST_OFF);
+}
+static inline void tick_broadcast_force(void)
+{
+ tick_broadcast_control(TICK_BROADCAST_FORCE);
+}
+static inline int tick_broadcast_enter(void)
+{
+ return tick_broadcast_oneshot_control(TICK_BROADCAST_ENTER);
+}
+static inline void tick_broadcast_exit(void)
+{
+ tick_broadcast_oneshot_control(TICK_BROADCAST_EXIT);
+}
+
+#ifdef CONFIG_NO_HZ_COMMON
+extern int tick_nohz_tick_stopped(void);
+extern void tick_nohz_idle_enter(void);
+extern void tick_nohz_idle_exit(void);
+extern void tick_nohz_irq_exit(void);
+extern ktime_t tick_nohz_get_sleep_length(void);
+extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
+extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
+#else /* !CONFIG_NO_HZ_COMMON */
+static inline int tick_nohz_tick_stopped(void) { return 0; }
+static inline void tick_nohz_idle_enter(void) { }
+static inline void tick_nohz_idle_exit(void) { }
+
+static inline ktime_t tick_nohz_get_sleep_length(void)
+{
+ ktime_t len = { .tv64 = NSEC_PER_SEC/HZ };
+
+ return len;
+}
+static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
+static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
+#endif /* !CONFIG_NO_HZ_COMMON */
+
+#ifdef CONFIG_NO_HZ_FULL
+extern bool tick_nohz_full_running;
+extern cpumask_var_t tick_nohz_full_mask;
+extern cpumask_var_t housekeeping_mask;
+
+static inline bool tick_nohz_full_enabled(void)
+{
+ if (!context_tracking_is_enabled())
+ return false;
+
+ return tick_nohz_full_running;
+}
+
+static inline bool tick_nohz_full_cpu(int cpu)
+{
+ if (!tick_nohz_full_enabled())
+ return false;
+
+ return cpumask_test_cpu(cpu, tick_nohz_full_mask);
+}
+
+extern void __tick_nohz_full_check(void);
+extern void tick_nohz_full_kick(void);
+extern void tick_nohz_full_kick_cpu(int cpu);
+extern void tick_nohz_full_kick_all(void);
+extern void __tick_nohz_task_switch(struct task_struct *tsk);
+#else
+static inline bool tick_nohz_full_enabled(void) { return false; }
+static inline bool tick_nohz_full_cpu(int cpu) { return false; }
+static inline void __tick_nohz_full_check(void) { }
+static inline void tick_nohz_full_kick_cpu(int cpu) { }
+static inline void tick_nohz_full_kick(void) { }
+static inline void tick_nohz_full_kick_all(void) { }
+static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
+#endif
+
+static inline bool is_housekeeping_cpu(int cpu)
+{
+#ifdef CONFIG_NO_HZ_FULL
+ if (tick_nohz_full_enabled())
+ return cpumask_test_cpu(cpu, housekeeping_mask);
+#endif
+ return true;
+}
+
+static inline void housekeeping_affine(struct task_struct *t)
+{
+#ifdef CONFIG_NO_HZ_FULL
+ if (tick_nohz_full_enabled())
+ set_cpus_allowed_ptr(t, housekeeping_mask);
+
+#endif
+}
+
+static inline void tick_nohz_full_check(void)
+{
+ if (tick_nohz_full_enabled())
+ __tick_nohz_full_check();
+}
+
+static inline void tick_nohz_task_switch(struct task_struct *tsk)
+{
+ if (tick_nohz_full_enabled())
+ __tick_nohz_task_switch(tsk);
+}
+
+#endif
diff --git a/include/linux/tifm.h b/include/linux/tifm.h
new file mode 100644
index 000000000..848c0f392
--- /dev/null
+++ b/include/linux/tifm.h
@@ -0,0 +1,164 @@
+/*
+ * tifm.h - TI FlashMedia driver
+ *
+ * Copyright (C) 2006 Alex Dubov <oakad@yahoo.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _TIFM_H
+#define _TIFM_H
+
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/workqueue.h>
+
+/* Host registers (relative to pci base address): */
+enum {
+ FM_SET_INTERRUPT_ENABLE = 0x008,
+ FM_CLEAR_INTERRUPT_ENABLE = 0x00c,
+ FM_INTERRUPT_STATUS = 0x014
+};
+
+/* Socket registers (relative to socket base address): */
+enum {
+ SOCK_CONTROL = 0x004,
+ SOCK_PRESENT_STATE = 0x008,
+ SOCK_DMA_ADDRESS = 0x00c,
+ SOCK_DMA_CONTROL = 0x010,
+ SOCK_DMA_FIFO_INT_ENABLE_SET = 0x014,
+ SOCK_DMA_FIFO_INT_ENABLE_CLEAR = 0x018,
+ SOCK_DMA_FIFO_STATUS = 0x020,
+ SOCK_FIFO_CONTROL = 0x024,
+ SOCK_FIFO_PAGE_SIZE = 0x028,
+ SOCK_MMCSD_COMMAND = 0x104,
+ SOCK_MMCSD_ARG_LOW = 0x108,
+ SOCK_MMCSD_ARG_HIGH = 0x10c,
+ SOCK_MMCSD_CONFIG = 0x110,
+ SOCK_MMCSD_STATUS = 0x114,
+ SOCK_MMCSD_INT_ENABLE = 0x118,
+ SOCK_MMCSD_COMMAND_TO = 0x11c,
+ SOCK_MMCSD_DATA_TO = 0x120,
+ SOCK_MMCSD_DATA = 0x124,
+ SOCK_MMCSD_BLOCK_LEN = 0x128,
+ SOCK_MMCSD_NUM_BLOCKS = 0x12c,
+ SOCK_MMCSD_BUFFER_CONFIG = 0x130,
+ SOCK_MMCSD_SPI_CONFIG = 0x134,
+ SOCK_MMCSD_SDIO_MODE_CONFIG = 0x138,
+ SOCK_MMCSD_RESPONSE = 0x144,
+ SOCK_MMCSD_SDIO_SR = 0x164,
+ SOCK_MMCSD_SYSTEM_CONTROL = 0x168,
+ SOCK_MMCSD_SYSTEM_STATUS = 0x16c,
+ SOCK_MS_COMMAND = 0x184,
+ SOCK_MS_DATA = 0x188,
+ SOCK_MS_STATUS = 0x18c,
+ SOCK_MS_SYSTEM = 0x190,
+ SOCK_FIFO_ACCESS = 0x200
+};
+
+#define TIFM_CTRL_LED 0x00000040
+#define TIFM_CTRL_FAST_CLK 0x00000100
+#define TIFM_CTRL_POWER_MASK 0x00000007
+
+#define TIFM_SOCK_STATE_OCCUPIED 0x00000008
+#define TIFM_SOCK_STATE_POWERED 0x00000080
+
+#define TIFM_FIFO_ENABLE 0x00000001
+#define TIFM_FIFO_READY 0x00000001
+#define TIFM_FIFO_MORE 0x00000008
+#define TIFM_FIFO_INT_SETALL 0x0000ffff
+#define TIFM_FIFO_INTMASK 0x00000005
+
+#define TIFM_DMA_RESET 0x00000002
+#define TIFM_DMA_TX 0x00008000
+#define TIFM_DMA_EN 0x00000001
+#define TIFM_DMA_TSIZE 0x0000007f
+
+#define TIFM_TYPE_XD 1
+#define TIFM_TYPE_MS 2
+#define TIFM_TYPE_SD 3
+
+struct tifm_device_id {
+ unsigned char type;
+};
+
+struct tifm_driver;
+struct tifm_dev {
+ char __iomem *addr;
+ spinlock_t lock;
+ unsigned char type;
+ unsigned int socket_id;
+
+ void (*card_event)(struct tifm_dev *sock);
+ void (*data_event)(struct tifm_dev *sock);
+
+ struct device dev;
+};
+
+struct tifm_driver {
+ struct tifm_device_id *id_table;
+ int (*probe)(struct tifm_dev *dev);
+ void (*remove)(struct tifm_dev *dev);
+ int (*suspend)(struct tifm_dev *dev,
+ pm_message_t state);
+ int (*resume)(struct tifm_dev *dev);
+
+ struct device_driver driver;
+};
+
+struct tifm_adapter {
+ char __iomem *addr;
+ spinlock_t lock;
+ unsigned int irq_status;
+ unsigned int socket_change_set;
+ unsigned int id;
+ unsigned int num_sockets;
+ struct completion *finish_me;
+
+ struct work_struct media_switcher;
+ struct device dev;
+
+ void (*eject)(struct tifm_adapter *fm,
+ struct tifm_dev *sock);
+ int (*has_ms_pif)(struct tifm_adapter *fm,
+ struct tifm_dev *sock);
+
+ struct tifm_dev *sockets[0];
+};
+
+struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets,
+ struct device *dev);
+int tifm_add_adapter(struct tifm_adapter *fm);
+void tifm_remove_adapter(struct tifm_adapter *fm);
+void tifm_free_adapter(struct tifm_adapter *fm);
+
+void tifm_free_device(struct device *dev);
+struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id,
+ unsigned char type);
+
+int tifm_register_driver(struct tifm_driver *drv);
+void tifm_unregister_driver(struct tifm_driver *drv);
+void tifm_eject(struct tifm_dev *sock);
+int tifm_has_ms_pif(struct tifm_dev *sock);
+int tifm_map_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents,
+ int direction);
+void tifm_unmap_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents,
+ int direction);
+void tifm_queue_work(struct work_struct *work);
+
+static inline void *tifm_get_drvdata(struct tifm_dev *dev)
+{
+ return dev_get_drvdata(&dev->dev);
+}
+
+static inline void tifm_set_drvdata(struct tifm_dev *dev, void *data)
+{
+ dev_set_drvdata(&dev->dev, data);
+}
+
+#endif
diff --git a/include/linux/timb_dma.h b/include/linux/timb_dma.h
new file mode 100644
index 000000000..bb043e970
--- /dev/null
+++ b/include/linux/timb_dma.h
@@ -0,0 +1,55 @@
+/*
+ * timb_dma.h timberdale FPGA DMA driver defines
+ * Copyright (c) 2010 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* Supports:
+ * Timberdale FPGA DMA engine
+ */
+
+#ifndef _LINUX_TIMB_DMA_H
+#define _LINUX_TIMB_DMA_H
+
+/**
+ * struct timb_dma_platform_data_channel - Description of each individual
+ * DMA channel for the timberdale DMA driver
+ * @rx: true if this channel handles data in the direction to
+ * the CPU.
+ * @bytes_per_line: Number of bytes per line, this is specific for channels
+ * handling video data. For other channels this shall be left to 0.
+ * @descriptors: Number of descriptors to allocate for this channel.
+ * @descriptor_elements: Number of elements in each descriptor.
+ *
+ */
+struct timb_dma_platform_data_channel {
+ bool rx;
+ unsigned int bytes_per_line;
+ unsigned int descriptors;
+ unsigned int descriptor_elements;
+};
+
+/**
+ * struct timb_dma_platform_data - Platform data of the timberdale DMA driver
+ * @nr_channels: Number of defined channels in the channels array.
+ * @channels: Definition of the each channel.
+ *
+ */
+struct timb_dma_platform_data {
+ unsigned nr_channels;
+ struct timb_dma_platform_data_channel channels[32];
+};
+
+#endif
diff --git a/include/linux/timb_gpio.h b/include/linux/timb_gpio.h
new file mode 100644
index 000000000..ce456eaae
--- /dev/null
+++ b/include/linux/timb_gpio.h
@@ -0,0 +1,37 @@
+/*
+ * timb_gpio.h timberdale FPGA GPIO driver, platform data definition
+ * Copyright (c) 2009 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _LINUX_TIMB_GPIO_H
+#define _LINUX_TIMB_GPIO_H
+
+/**
+ * struct timbgpio_platform_data - Platform data of the Timberdale GPIO driver
+ * @gpio_base The number of the first GPIO pin, set to -1 for
+ * dynamic number allocation.
+ * @nr_pins Number of pins that is supported by the hardware (1-32)
+ * @irq_base If IRQ is supported by the hardware, this is the base
+ * number of IRQ:s. One IRQ per pin will be used. Set to
+ * -1 if IRQ:s is not supported.
+ */
+struct timbgpio_platform_data {
+ int gpio_base;
+ int nr_pins;
+ int irq_base;
+};
+
+#endif
diff --git a/include/linux/time.h b/include/linux/time.h
new file mode 100644
index 000000000..beebe3a02
--- /dev/null
+++ b/include/linux/time.h
@@ -0,0 +1,239 @@
+#ifndef _LINUX_TIME_H
+#define _LINUX_TIME_H
+
+# include <linux/cache.h>
+# include <linux/seqlock.h>
+# include <linux/math64.h>
+# include <linux/time64.h>
+
+extern struct timezone sys_tz;
+
+#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
+
+static inline int timespec_equal(const struct timespec *a,
+ const struct timespec *b)
+{
+ return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
+}
+
+/*
+ * lhs < rhs: return <0
+ * lhs == rhs: return 0
+ * lhs > rhs: return >0
+ */
+static inline int timespec_compare(const struct timespec *lhs, const struct timespec *rhs)
+{
+ if (lhs->tv_sec < rhs->tv_sec)
+ return -1;
+ if (lhs->tv_sec > rhs->tv_sec)
+ return 1;
+ return lhs->tv_nsec - rhs->tv_nsec;
+}
+
+static inline int timeval_compare(const struct timeval *lhs, const struct timeval *rhs)
+{
+ if (lhs->tv_sec < rhs->tv_sec)
+ return -1;
+ if (lhs->tv_sec > rhs->tv_sec)
+ return 1;
+ return lhs->tv_usec - rhs->tv_usec;
+}
+
+extern time64_t mktime64(const unsigned int year, const unsigned int mon,
+ const unsigned int day, const unsigned int hour,
+ const unsigned int min, const unsigned int sec);
+
+/**
+ * Deprecated. Use mktime64().
+ */
+static inline unsigned long mktime(const unsigned int year,
+ const unsigned int mon, const unsigned int day,
+ const unsigned int hour, const unsigned int min,
+ const unsigned int sec)
+{
+ return mktime64(year, mon, day, hour, min, sec);
+}
+
+extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec);
+
+/*
+ * timespec_add_safe assumes both values are positive and checks
+ * for overflow. It will return TIME_T_MAX if the reutrn would be
+ * smaller then either of the arguments.
+ */
+extern struct timespec timespec_add_safe(const struct timespec lhs,
+ const struct timespec rhs);
+
+
+static inline struct timespec timespec_add(struct timespec lhs,
+ struct timespec rhs)
+{
+ struct timespec ts_delta;
+ set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec,
+ lhs.tv_nsec + rhs.tv_nsec);
+ return ts_delta;
+}
+
+/*
+ * sub = lhs - rhs, in normalized form
+ */
+static inline struct timespec timespec_sub(struct timespec lhs,
+ struct timespec rhs)
+{
+ struct timespec ts_delta;
+ set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec,
+ lhs.tv_nsec - rhs.tv_nsec);
+ return ts_delta;
+}
+
+/*
+ * Returns true if the timespec is norm, false if denorm:
+ */
+static inline bool timespec_valid(const struct timespec *ts)
+{
+ /* Dates before 1970 are bogus */
+ if (ts->tv_sec < 0)
+ return false;
+ /* Can't have more nanoseconds then a second */
+ if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
+ return false;
+ return true;
+}
+
+static inline bool timespec_valid_strict(const struct timespec *ts)
+{
+ if (!timespec_valid(ts))
+ return false;
+ /* Disallow values that could overflow ktime_t */
+ if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
+ return false;
+ return true;
+}
+
+static inline bool timeval_valid(const struct timeval *tv)
+{
+ /* Dates before 1970 are bogus */
+ if (tv->tv_sec < 0)
+ return false;
+
+ /* Can't have more microseconds then a second */
+ if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
+ return false;
+
+ return true;
+}
+
+extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
+
+#define CURRENT_TIME (current_kernel_time())
+#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })
+
+/* Some architectures do not supply their own clocksource.
+ * This is mainly the case in architectures that get their
+ * inter-tick times by reading the counter on their interval
+ * timer. Since these timers wrap every tick, they're not really
+ * useful as clocksources. Wrapping them to act like one is possible
+ * but not very efficient. So we provide a callout these arches
+ * can implement for use with the jiffies clocksource to provide
+ * finer then tick granular time.
+ */
+#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
+extern u32 (*arch_gettimeoffset)(void);
+#endif
+
+struct itimerval;
+extern int do_setitimer(int which, struct itimerval *value,
+ struct itimerval *ovalue);
+extern int do_getitimer(int which, struct itimerval *value);
+
+extern unsigned int alarm_setitimer(unsigned int seconds);
+
+extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags);
+
+struct tms;
+extern void do_sys_times(struct tms *);
+
+/*
+ * Similar to the struct tm in userspace <time.h>, but it needs to be here so
+ * that the kernel source is self contained.
+ */
+struct tm {
+ /*
+ * the number of seconds after the minute, normally in the range
+ * 0 to 59, but can be up to 60 to allow for leap seconds
+ */
+ int tm_sec;
+ /* the number of minutes after the hour, in the range 0 to 59*/
+ int tm_min;
+ /* the number of hours past midnight, in the range 0 to 23 */
+ int tm_hour;
+ /* the day of the month, in the range 1 to 31 */
+ int tm_mday;
+ /* the number of months since January, in the range 0 to 11 */
+ int tm_mon;
+ /* the number of years since 1900 */
+ long tm_year;
+ /* the number of days since Sunday, in the range 0 to 6 */
+ int tm_wday;
+ /* the number of days since January 1, in the range 0 to 365 */
+ int tm_yday;
+};
+
+void time_to_tm(time_t totalsecs, int offset, struct tm *result);
+
+/**
+ * timespec_to_ns - Convert timespec to nanoseconds
+ * @ts: pointer to the timespec variable to be converted
+ *
+ * Returns the scalar nanosecond representation of the timespec
+ * parameter.
+ */
+static inline s64 timespec_to_ns(const struct timespec *ts)
+{
+ return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
+}
+
+/**
+ * timeval_to_ns - Convert timeval to nanoseconds
+ * @ts: pointer to the timeval variable to be converted
+ *
+ * Returns the scalar nanosecond representation of the timeval
+ * parameter.
+ */
+static inline s64 timeval_to_ns(const struct timeval *tv)
+{
+ return ((s64) tv->tv_sec * NSEC_PER_SEC) +
+ tv->tv_usec * NSEC_PER_USEC;
+}
+
+/**
+ * ns_to_timespec - Convert nanoseconds to timespec
+ * @nsec: the nanoseconds value to be converted
+ *
+ * Returns the timespec representation of the nsec parameter.
+ */
+extern struct timespec ns_to_timespec(const s64 nsec);
+
+/**
+ * ns_to_timeval - Convert nanoseconds to timeval
+ * @nsec: the nanoseconds value to be converted
+ *
+ * Returns the timeval representation of the nsec parameter.
+ */
+extern struct timeval ns_to_timeval(const s64 nsec);
+
+/**
+ * timespec_add_ns - Adds nanoseconds to a timespec
+ * @a: pointer to timespec to be incremented
+ * @ns: unsigned nanoseconds value to be added
+ *
+ * This must always be inlined because its used from the x86-64 vdso,
+ * which cannot call other kernel functions.
+ */
+static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
+{
+ a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
+ a->tv_nsec = ns;
+}
+
+#endif
diff --git a/include/linux/time64.h b/include/linux/time64.h
new file mode 100644
index 000000000..a3831478d
--- /dev/null
+++ b/include/linux/time64.h
@@ -0,0 +1,190 @@
+#ifndef _LINUX_TIME64_H
+#define _LINUX_TIME64_H
+
+#include <uapi/linux/time.h>
+
+typedef __s64 time64_t;
+
+/*
+ * This wants to go into uapi/linux/time.h once we agreed about the
+ * userspace interfaces.
+ */
+#if __BITS_PER_LONG == 64
+# define timespec64 timespec
+#else
+struct timespec64 {
+ time64_t tv_sec; /* seconds */
+ long tv_nsec; /* nanoseconds */
+};
+#endif
+
+/* Parameters used to convert the timespec values: */
+#define MSEC_PER_SEC 1000L
+#define USEC_PER_MSEC 1000L
+#define NSEC_PER_USEC 1000L
+#define NSEC_PER_MSEC 1000000L
+#define USEC_PER_SEC 1000000L
+#define NSEC_PER_SEC 1000000000L
+#define FSEC_PER_SEC 1000000000000000LL
+
+/* Located here for timespec[64]_valid_strict */
+#define KTIME_MAX ((s64)~((u64)1 << 63))
+#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
+
+#if __BITS_PER_LONG == 64
+
+static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
+{
+ return ts64;
+}
+
+static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
+{
+ return ts;
+}
+
+# define timespec64_equal timespec_equal
+# define timespec64_compare timespec_compare
+# define set_normalized_timespec64 set_normalized_timespec
+# define timespec64_add_safe timespec_add_safe
+# define timespec64_add timespec_add
+# define timespec64_sub timespec_sub
+# define timespec64_valid timespec_valid
+# define timespec64_valid_strict timespec_valid_strict
+# define timespec64_to_ns timespec_to_ns
+# define ns_to_timespec64 ns_to_timespec
+# define timespec64_add_ns timespec_add_ns
+
+#else
+
+static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
+{
+ struct timespec ret;
+
+ ret.tv_sec = (time_t)ts64.tv_sec;
+ ret.tv_nsec = ts64.tv_nsec;
+ return ret;
+}
+
+static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
+{
+ struct timespec64 ret;
+
+ ret.tv_sec = ts.tv_sec;
+ ret.tv_nsec = ts.tv_nsec;
+ return ret;
+}
+
+static inline int timespec64_equal(const struct timespec64 *a,
+ const struct timespec64 *b)
+{
+ return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
+}
+
+/*
+ * lhs < rhs: return <0
+ * lhs == rhs: return 0
+ * lhs > rhs: return >0
+ */
+static inline int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs)
+{
+ if (lhs->tv_sec < rhs->tv_sec)
+ return -1;
+ if (lhs->tv_sec > rhs->tv_sec)
+ return 1;
+ return lhs->tv_nsec - rhs->tv_nsec;
+}
+
+extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec);
+
+/*
+ * timespec64_add_safe assumes both values are positive and checks for
+ * overflow. It will return TIME_T_MAX if the returned value would be
+ * smaller then either of the arguments.
+ */
+extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
+ const struct timespec64 rhs);
+
+
+static inline struct timespec64 timespec64_add(struct timespec64 lhs,
+ struct timespec64 rhs)
+{
+ struct timespec64 ts_delta;
+ set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec,
+ lhs.tv_nsec + rhs.tv_nsec);
+ return ts_delta;
+}
+
+/*
+ * sub = lhs - rhs, in normalized form
+ */
+static inline struct timespec64 timespec64_sub(struct timespec64 lhs,
+ struct timespec64 rhs)
+{
+ struct timespec64 ts_delta;
+ set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec,
+ lhs.tv_nsec - rhs.tv_nsec);
+ return ts_delta;
+}
+
+/*
+ * Returns true if the timespec64 is norm, false if denorm:
+ */
+static inline bool timespec64_valid(const struct timespec64 *ts)
+{
+ /* Dates before 1970 are bogus */
+ if (ts->tv_sec < 0)
+ return false;
+ /* Can't have more nanoseconds then a second */
+ if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
+ return false;
+ return true;
+}
+
+static inline bool timespec64_valid_strict(const struct timespec64 *ts)
+{
+ if (!timespec64_valid(ts))
+ return false;
+ /* Disallow values that could overflow ktime_t */
+ if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
+ return false;
+ return true;
+}
+
+/**
+ * timespec64_to_ns - Convert timespec64 to nanoseconds
+ * @ts: pointer to the timespec64 variable to be converted
+ *
+ * Returns the scalar nanosecond representation of the timespec64
+ * parameter.
+ */
+static inline s64 timespec64_to_ns(const struct timespec64 *ts)
+{
+ return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
+}
+
+/**
+ * ns_to_timespec64 - Convert nanoseconds to timespec64
+ * @nsec: the nanoseconds value to be converted
+ *
+ * Returns the timespec64 representation of the nsec parameter.
+ */
+extern struct timespec64 ns_to_timespec64(const s64 nsec);
+
+/**
+ * timespec64_add_ns - Adds nanoseconds to a timespec64
+ * @a: pointer to timespec64 to be incremented
+ * @ns: unsigned nanoseconds value to be added
+ *
+ * This must always be inlined because its used from the x86-64 vdso,
+ * which cannot call other kernel functions.
+ */
+static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns)
+{
+ a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
+ a->tv_nsec = ns;
+}
+
+#endif
+
+#endif /* _LINUX_TIME64_H */
diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h
new file mode 100644
index 000000000..4382035a7
--- /dev/null
+++ b/include/linux/timecounter.h
@@ -0,0 +1,139 @@
+/*
+ * linux/include/linux/timecounter.h
+ *
+ * based on code that migrated away from
+ * linux/include/linux/clocksource.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _LINUX_TIMECOUNTER_H
+#define _LINUX_TIMECOUNTER_H
+
+#include <linux/types.h>
+
+/* simplify initialization of mask field */
+#define CYCLECOUNTER_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
+
+/**
+ * struct cyclecounter - hardware abstraction for a free running counter
+ * Provides completely state-free accessors to the underlying hardware.
+ * Depending on which hardware it reads, the cycle counter may wrap
+ * around quickly. Locking rules (if necessary) have to be defined
+ * by the implementor and user of specific instances of this API.
+ *
+ * @read: returns the current cycle value
+ * @mask: bitmask for two's complement
+ * subtraction of non 64 bit counters,
+ * see CYCLECOUNTER_MASK() helper macro
+ * @mult: cycle to nanosecond multiplier
+ * @shift: cycle to nanosecond divisor (power of two)
+ */
+struct cyclecounter {
+ cycle_t (*read)(const struct cyclecounter *cc);
+ cycle_t mask;
+ u32 mult;
+ u32 shift;
+};
+
+/**
+ * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
+ * Contains the state needed by timecounter_read() to detect
+ * cycle counter wrap around. Initialize with
+ * timecounter_init(). Also used to convert cycle counts into the
+ * corresponding nanosecond counts with timecounter_cyc2time(). Users
+ * of this code are responsible for initializing the underlying
+ * cycle counter hardware, locking issues and reading the time
+ * more often than the cycle counter wraps around. The nanosecond
+ * counter will only wrap around after ~585 years.
+ *
+ * @cc: the cycle counter used by this instance
+ * @cycle_last: most recent cycle counter value seen by
+ * timecounter_read()
+ * @nsec: continuously increasing count
+ * @mask: bit mask for maintaining the 'frac' field
+ * @frac: accumulated fractional nanoseconds
+ */
+struct timecounter {
+ const struct cyclecounter *cc;
+ cycle_t cycle_last;
+ u64 nsec;
+ u64 mask;
+ u64 frac;
+};
+
+/**
+ * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
+ * @cc: Pointer to cycle counter.
+ * @cycles: Cycles
+ * @mask: bit mask for maintaining the 'frac' field
+ * @frac: pointer to storage for the fractional nanoseconds.
+ */
+static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
+ cycle_t cycles, u64 mask, u64 *frac)
+{
+ u64 ns = (u64) cycles;
+
+ ns = (ns * cc->mult) + *frac;
+ *frac = ns & mask;
+ return ns >> cc->shift;
+}
+
+/**
+ * timecounter_adjtime - Shifts the time of the clock.
+ * @delta: Desired change in nanoseconds.
+ */
+static inline void timecounter_adjtime(struct timecounter *tc, s64 delta)
+{
+ tc->nsec += delta;
+}
+
+/**
+ * timecounter_init - initialize a time counter
+ * @tc: Pointer to time counter which is to be initialized/reset
+ * @cc: A cycle counter, ready to be used.
+ * @start_tstamp: Arbitrary initial time stamp.
+ *
+ * After this call the current cycle register (roughly) corresponds to
+ * the initial time stamp. Every call to timecounter_read() increments
+ * the time stamp counter by the number of elapsed nanoseconds.
+ */
+extern void timecounter_init(struct timecounter *tc,
+ const struct cyclecounter *cc,
+ u64 start_tstamp);
+
+/**
+ * timecounter_read - return nanoseconds elapsed since timecounter_init()
+ * plus the initial time stamp
+ * @tc: Pointer to time counter.
+ *
+ * In other words, keeps track of time since the same epoch as
+ * the function which generated the initial time stamp.
+ */
+extern u64 timecounter_read(struct timecounter *tc);
+
+/**
+ * timecounter_cyc2time - convert a cycle counter to same
+ * time base as values returned by
+ * timecounter_read()
+ * @tc: Pointer to time counter.
+ * @cycle_tstamp: a value returned by tc->cc->read()
+ *
+ * Cycle counts that are converted correctly as long as they
+ * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
+ * with "max cycle count" == cs->mask+1.
+ *
+ * This allows conversion of cycle counter values which were generated
+ * in the past.
+ */
+extern u64 timecounter_cyc2time(struct timecounter *tc,
+ cycle_t cycle_tstamp);
+
+#endif
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
new file mode 100644
index 000000000..fb8696385
--- /dev/null
+++ b/include/linux/timekeeper_internal.h
@@ -0,0 +1,131 @@
+/*
+ * You SHOULD NOT be including this unless you're vsyscall
+ * handling code or timekeeping internal code!
+ */
+
+#ifndef _LINUX_TIMEKEEPER_INTERNAL_H
+#define _LINUX_TIMEKEEPER_INTERNAL_H
+
+#include <linux/clocksource.h>
+#include <linux/jiffies.h>
+#include <linux/time.h>
+
+/**
+ * struct tk_read_base - base structure for timekeeping readout
+ * @clock: Current clocksource used for timekeeping.
+ * @read: Read function of @clock
+ * @mask: Bitmask for two's complement subtraction of non 64bit clocks
+ * @cycle_last: @clock cycle value at last update
+ * @mult: (NTP adjusted) multiplier for scaled math conversion
+ * @shift: Shift value for scaled math conversion
+ * @xtime_nsec: Shifted (fractional) nano seconds offset for readout
+ * @base: ktime_t (nanoseconds) base time for readout
+ *
+ * This struct has size 56 byte on 64 bit. Together with a seqcount it
+ * occupies a single 64byte cache line.
+ *
+ * The struct is separate from struct timekeeper as it is also used
+ * for a fast NMI safe accessors.
+ */
+struct tk_read_base {
+ struct clocksource *clock;
+ cycle_t (*read)(struct clocksource *cs);
+ cycle_t mask;
+ cycle_t cycle_last;
+ u32 mult;
+ u32 shift;
+ u64 xtime_nsec;
+ ktime_t base;
+};
+
+/**
+ * struct timekeeper - Structure holding internal timekeeping values.
+ * @tkr_mono: The readout base structure for CLOCK_MONOTONIC
+ * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW
+ * @xtime_sec: Current CLOCK_REALTIME time in seconds
+ * @ktime_sec: Current CLOCK_MONOTONIC time in seconds
+ * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
+ * @offs_real: Offset clock monotonic -> clock realtime
+ * @offs_boot: Offset clock monotonic -> clock boottime
+ * @offs_tai: Offset clock monotonic -> clock tai
+ * @tai_offset: The current UTC to TAI offset in seconds
+ * @raw_time: Monotonic raw base time in timespec64 format
+ * @cycle_interval: Number of clock cycles in one NTP interval
+ * @xtime_interval: Number of clock shifted nano seconds in one NTP
+ * interval.
+ * @xtime_remainder: Shifted nano seconds left over when rounding
+ * @cycle_interval
+ * @raw_interval: Raw nano seconds accumulated per NTP interval.
+ * @ntp_error: Difference between accumulated time and NTP time in ntp
+ * shifted nano seconds.
+ * @ntp_error_shift: Shift conversion between clock shifted nano seconds and
+ * ntp shifted nano seconds.
+ *
+ * Note: For timespec(64) based interfaces wall_to_monotonic is what
+ * we need to add to xtime (or xtime corrected for sub jiffie times)
+ * to get to monotonic time. Monotonic is pegged at zero at system
+ * boot time, so wall_to_monotonic will be negative, however, we will
+ * ALWAYS keep the tv_nsec part positive so we can use the usual
+ * normalization.
+ *
+ * wall_to_monotonic is moved after resume from suspend for the
+ * monotonic time not to jump. We need to add total_sleep_time to
+ * wall_to_monotonic to get the real boot based time offset.
+ *
+ * wall_to_monotonic is no longer the boot time, getboottime must be
+ * used instead.
+ */
+struct timekeeper {
+ struct tk_read_base tkr_mono;
+ struct tk_read_base tkr_raw;
+ u64 xtime_sec;
+ unsigned long ktime_sec;
+ struct timespec64 wall_to_monotonic;
+ ktime_t offs_real;
+ ktime_t offs_boot;
+ ktime_t offs_tai;
+ s32 tai_offset;
+ struct timespec64 raw_time;
+
+ /* The following members are for timekeeping internal use */
+ cycle_t cycle_interval;
+ u64 xtime_interval;
+ s64 xtime_remainder;
+ u32 raw_interval;
+ /* The ntp_tick_length() value currently being used.
+ * This cached copy ensures we consistently apply the tick
+ * length for an entire tick, as ntp_tick_length may change
+ * mid-tick, and we don't want to apply that new value to
+ * the tick in progress.
+ */
+ u64 ntp_tick;
+ /* Difference between accumulated time and NTP time in ntp
+ * shifted nano seconds. */
+ s64 ntp_error;
+ u32 ntp_error_shift;
+ u32 ntp_err_mult;
+};
+
+#ifdef CONFIG_GENERIC_TIME_VSYSCALL
+
+extern void update_vsyscall(struct timekeeper *tk);
+extern void update_vsyscall_tz(void);
+
+#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)
+
+extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
+ struct clocksource *c, u32 mult,
+ cycle_t cycle_last);
+extern void update_vsyscall_tz(void);
+
+#else
+
+static inline void update_vsyscall(struct timekeeper *tk)
+{
+}
+static inline void update_vsyscall_tz(void)
+{
+}
+#endif
+
+#endif /* _LINUX_TIMEKEEPER_INTERNAL_H */
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
new file mode 100644
index 000000000..99176af21
--- /dev/null
+++ b/include/linux/timekeeping.h
@@ -0,0 +1,275 @@
+#ifndef _LINUX_TIMEKEEPING_H
+#define _LINUX_TIMEKEEPING_H
+
+/* Included from linux/ktime.h */
+
+void timekeeping_init(void);
+extern int timekeeping_suspended;
+
+/*
+ * Get and set timeofday
+ */
+extern void do_gettimeofday(struct timeval *tv);
+extern int do_settimeofday64(const struct timespec64 *ts);
+extern int do_sys_settimeofday(const struct timespec *tv,
+ const struct timezone *tz);
+
+/*
+ * Kernel time accessors
+ */
+unsigned long get_seconds(void);
+struct timespec current_kernel_time(void);
+/* does not take xtime_lock */
+struct timespec __current_kernel_time(void);
+
+/*
+ * timespec based interfaces
+ */
+struct timespec64 get_monotonic_coarse64(void);
+extern void getrawmonotonic64(struct timespec64 *ts);
+extern void ktime_get_ts64(struct timespec64 *ts);
+extern time64_t ktime_get_seconds(void);
+extern time64_t ktime_get_real_seconds(void);
+
+extern int __getnstimeofday64(struct timespec64 *tv);
+extern void getnstimeofday64(struct timespec64 *tv);
+extern void getboottime64(struct timespec64 *ts);
+
+#if BITS_PER_LONG == 64
+/**
+ * Deprecated. Use do_settimeofday64().
+ */
+static inline int do_settimeofday(const struct timespec *ts)
+{
+ return do_settimeofday64(ts);
+}
+
+static inline int __getnstimeofday(struct timespec *ts)
+{
+ return __getnstimeofday64(ts);
+}
+
+static inline void getnstimeofday(struct timespec *ts)
+{
+ getnstimeofday64(ts);
+}
+
+static inline void ktime_get_ts(struct timespec *ts)
+{
+ ktime_get_ts64(ts);
+}
+
+static inline void ktime_get_real_ts(struct timespec *ts)
+{
+ getnstimeofday64(ts);
+}
+
+static inline void getrawmonotonic(struct timespec *ts)
+{
+ getrawmonotonic64(ts);
+}
+
+static inline struct timespec get_monotonic_coarse(void)
+{
+ return get_monotonic_coarse64();
+}
+
+static inline void getboottime(struct timespec *ts)
+{
+ return getboottime64(ts);
+}
+#else
+/**
+ * Deprecated. Use do_settimeofday64().
+ */
+static inline int do_settimeofday(const struct timespec *ts)
+{
+ struct timespec64 ts64;
+
+ ts64 = timespec_to_timespec64(*ts);
+ return do_settimeofday64(&ts64);
+}
+
+static inline int __getnstimeofday(struct timespec *ts)
+{
+ struct timespec64 ts64;
+ int ret = __getnstimeofday64(&ts64);
+
+ *ts = timespec64_to_timespec(ts64);
+ return ret;
+}
+
+static inline void getnstimeofday(struct timespec *ts)
+{
+ struct timespec64 ts64;
+
+ getnstimeofday64(&ts64);
+ *ts = timespec64_to_timespec(ts64);
+}
+
+static inline void ktime_get_ts(struct timespec *ts)
+{
+ struct timespec64 ts64;
+
+ ktime_get_ts64(&ts64);
+ *ts = timespec64_to_timespec(ts64);
+}
+
+static inline void ktime_get_real_ts(struct timespec *ts)
+{
+ struct timespec64 ts64;
+
+ getnstimeofday64(&ts64);
+ *ts = timespec64_to_timespec(ts64);
+}
+
+static inline void getrawmonotonic(struct timespec *ts)
+{
+ struct timespec64 ts64;
+
+ getrawmonotonic64(&ts64);
+ *ts = timespec64_to_timespec(ts64);
+}
+
+static inline struct timespec get_monotonic_coarse(void)
+{
+ return timespec64_to_timespec(get_monotonic_coarse64());
+}
+
+static inline void getboottime(struct timespec *ts)
+{
+ struct timespec64 ts64;
+
+ getboottime64(&ts64);
+ *ts = timespec64_to_timespec(ts64);
+}
+#endif
+
+#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
+#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
+
+/*
+ * ktime_t based interfaces
+ */
+
+enum tk_offsets {
+ TK_OFFS_REAL,
+ TK_OFFS_BOOT,
+ TK_OFFS_TAI,
+ TK_OFFS_MAX,
+};
+
+extern ktime_t ktime_get(void);
+extern ktime_t ktime_get_with_offset(enum tk_offsets offs);
+extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
+extern ktime_t ktime_get_raw(void);
+
+/**
+ * ktime_get_real - get the real (wall-) time in ktime_t format
+ */
+static inline ktime_t ktime_get_real(void)
+{
+ return ktime_get_with_offset(TK_OFFS_REAL);
+}
+
+/**
+ * ktime_get_boottime - Returns monotonic time since boot in ktime_t format
+ *
+ * This is similar to CLOCK_MONTONIC/ktime_get, but also includes the
+ * time spent in suspend.
+ */
+static inline ktime_t ktime_get_boottime(void)
+{
+ return ktime_get_with_offset(TK_OFFS_BOOT);
+}
+
+/**
+ * ktime_get_clocktai - Returns the TAI time of day in ktime_t format
+ */
+static inline ktime_t ktime_get_clocktai(void)
+{
+ return ktime_get_with_offset(TK_OFFS_TAI);
+}
+
+/**
+ * ktime_mono_to_real - Convert monotonic time to clock realtime
+ */
+static inline ktime_t ktime_mono_to_real(ktime_t mono)
+{
+ return ktime_mono_to_any(mono, TK_OFFS_REAL);
+}
+
+static inline u64 ktime_get_ns(void)
+{
+ return ktime_to_ns(ktime_get());
+}
+
+static inline u64 ktime_get_real_ns(void)
+{
+ return ktime_to_ns(ktime_get_real());
+}
+
+static inline u64 ktime_get_boot_ns(void)
+{
+ return ktime_to_ns(ktime_get_boottime());
+}
+
+static inline u64 ktime_get_tai_ns(void)
+{
+ return ktime_to_ns(ktime_get_clocktai());
+}
+
+static inline u64 ktime_get_raw_ns(void)
+{
+ return ktime_to_ns(ktime_get_raw());
+}
+
+extern u64 ktime_get_mono_fast_ns(void);
+extern u64 ktime_get_raw_fast_ns(void);
+
+/*
+ * Timespec interfaces utilizing the ktime based ones
+ */
+static inline void get_monotonic_boottime(struct timespec *ts)
+{
+ *ts = ktime_to_timespec(ktime_get_boottime());
+}
+
+static inline void get_monotonic_boottime64(struct timespec64 *ts)
+{
+ *ts = ktime_to_timespec64(ktime_get_boottime());
+}
+
+static inline void timekeeping_clocktai(struct timespec *ts)
+{
+ *ts = ktime_to_timespec(ktime_get_clocktai());
+}
+
+/*
+ * RTC specific
+ */
+extern bool timekeeping_rtc_skipsuspend(void);
+extern bool timekeeping_rtc_skipresume(void);
+
+extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
+
+/*
+ * PPS accessor
+ */
+extern void getnstime_raw_and_real(struct timespec *ts_raw,
+ struct timespec *ts_real);
+
+/*
+ * Persistent clock related interfaces
+ */
+extern int persistent_clock_is_local;
+
+extern void read_persistent_clock(struct timespec *ts);
+extern void read_persistent_clock64(struct timespec64 *ts);
+extern void read_boot_clock(struct timespec *ts);
+extern void read_boot_clock64(struct timespec64 *ts);
+extern int update_persistent_clock(struct timespec now);
+extern int update_persistent_clock64(struct timespec64 now);
+
+
+#endif
diff --git a/include/linux/timer.h b/include/linux/timer.h
new file mode 100644
index 000000000..8c5a197e1
--- /dev/null
+++ b/include/linux/timer.h
@@ -0,0 +1,267 @@
+#ifndef _LINUX_TIMER_H
+#define _LINUX_TIMER_H
+
+#include <linux/list.h>
+#include <linux/ktime.h>
+#include <linux/stddef.h>
+#include <linux/debugobjects.h>
+#include <linux/stringify.h>
+
+struct tvec_base;
+
+struct timer_list {
+ /*
+ * All fields that change during normal runtime grouped to the
+ * same cacheline
+ */
+ struct list_head entry;
+ unsigned long expires;
+ struct tvec_base *base;
+
+ void (*function)(unsigned long);
+ unsigned long data;
+
+ int slack;
+
+#ifdef CONFIG_TIMER_STATS
+ int start_pid;
+ void *start_site;
+ char start_comm[16];
+#endif
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map lockdep_map;
+#endif
+};
+
+extern struct tvec_base boot_tvec_bases;
+
+#ifdef CONFIG_LOCKDEP
+/*
+ * NB: because we have to copy the lockdep_map, setting the lockdep_map key
+ * (second argument) here is required, otherwise it could be initialised to
+ * the copy of the lockdep_map later! We use the pointer to and the string
+ * "<file>:<line>" as the key resp. the name of the lockdep_map.
+ */
+#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) \
+ .lockdep_map = STATIC_LOCKDEP_MAP_INIT(_kn, &_kn),
+#else
+#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn)
+#endif
+
+/*
+ * Note that all tvec_bases are at least 4 byte aligned and lower two bits
+ * of base in timer_list is guaranteed to be zero. Use them for flags.
+ *
+ * A deferrable timer will work normally when the system is busy, but
+ * will not cause a CPU to come out of idle just to service it; instead,
+ * the timer will be serviced when the CPU eventually wakes up with a
+ * subsequent non-deferrable timer.
+ *
+ * An irqsafe timer is executed with IRQ disabled and it's safe to wait for
+ * the completion of the running instance from IRQ handlers, for example,
+ * by calling del_timer_sync().
+ *
+ * Note: The irq disabled callback execution is a special case for
+ * workqueue locking issues. It's not meant for executing random crap
+ * with interrupts disabled. Abuse is monitored!
+ */
+#define TIMER_DEFERRABLE 0x1LU
+#define TIMER_IRQSAFE 0x2LU
+
+#define TIMER_FLAG_MASK 0x3LU
+
+#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
+ .entry = { .prev = TIMER_ENTRY_STATIC }, \
+ .function = (_function), \
+ .expires = (_expires), \
+ .data = (_data), \
+ .base = (void *)((unsigned long)&boot_tvec_bases + (_flags)), \
+ .slack = -1, \
+ __TIMER_LOCKDEP_MAP_INITIALIZER( \
+ __FILE__ ":" __stringify(__LINE__)) \
+ }
+
+#define TIMER_INITIALIZER(_function, _expires, _data) \
+ __TIMER_INITIALIZER((_function), (_expires), (_data), 0)
+
+#define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) \
+ __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE)
+
+#define DEFINE_TIMER(_name, _function, _expires, _data) \
+ struct timer_list _name = \
+ TIMER_INITIALIZER(_function, _expires, _data)
+
+void init_timer_key(struct timer_list *timer, unsigned int flags,
+ const char *name, struct lock_class_key *key);
+
+#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
+extern void init_timer_on_stack_key(struct timer_list *timer,
+ unsigned int flags, const char *name,
+ struct lock_class_key *key);
+extern void destroy_timer_on_stack(struct timer_list *timer);
+#else
+static inline void destroy_timer_on_stack(struct timer_list *timer) { }
+static inline void init_timer_on_stack_key(struct timer_list *timer,
+ unsigned int flags, const char *name,
+ struct lock_class_key *key)
+{
+ init_timer_key(timer, flags, name, key);
+}
+#endif
+
+#ifdef CONFIG_LOCKDEP
+#define __init_timer(_timer, _flags) \
+ do { \
+ static struct lock_class_key __key; \
+ init_timer_key((_timer), (_flags), #_timer, &__key); \
+ } while (0)
+
+#define __init_timer_on_stack(_timer, _flags) \
+ do { \
+ static struct lock_class_key __key; \
+ init_timer_on_stack_key((_timer), (_flags), #_timer, &__key); \
+ } while (0)
+#else
+#define __init_timer(_timer, _flags) \
+ init_timer_key((_timer), (_flags), NULL, NULL)
+#define __init_timer_on_stack(_timer, _flags) \
+ init_timer_on_stack_key((_timer), (_flags), NULL, NULL)
+#endif
+
+#define init_timer(timer) \
+ __init_timer((timer), 0)
+#define init_timer_deferrable(timer) \
+ __init_timer((timer), TIMER_DEFERRABLE)
+#define init_timer_on_stack(timer) \
+ __init_timer_on_stack((timer), 0)
+
+#define __setup_timer(_timer, _fn, _data, _flags) \
+ do { \
+ __init_timer((_timer), (_flags)); \
+ (_timer)->function = (_fn); \
+ (_timer)->data = (_data); \
+ } while (0)
+
+#define __setup_timer_on_stack(_timer, _fn, _data, _flags) \
+ do { \
+ __init_timer_on_stack((_timer), (_flags)); \
+ (_timer)->function = (_fn); \
+ (_timer)->data = (_data); \
+ } while (0)
+
+#define setup_timer(timer, fn, data) \
+ __setup_timer((timer), (fn), (data), 0)
+#define setup_timer_on_stack(timer, fn, data) \
+ __setup_timer_on_stack((timer), (fn), (data), 0)
+#define setup_deferrable_timer_on_stack(timer, fn, data) \
+ __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE)
+
+/**
+ * timer_pending - is a timer pending?
+ * @timer: the timer in question
+ *
+ * timer_pending will tell whether a given timer is currently pending,
+ * or not. Callers must ensure serialization wrt. other operations done
+ * to this timer, eg. interrupt contexts, or other CPUs on SMP.
+ *
+ * return value: 1 if the timer is pending, 0 if not.
+ */
+static inline int timer_pending(const struct timer_list * timer)
+{
+ return timer->entry.next != NULL;
+}
+
+extern void add_timer_on(struct timer_list *timer, int cpu);
+extern int del_timer(struct timer_list * timer);
+extern int mod_timer(struct timer_list *timer, unsigned long expires);
+extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
+extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
+
+extern void set_timer_slack(struct timer_list *time, int slack_hz);
+
+#define TIMER_NOT_PINNED 0
+#define TIMER_PINNED 1
+/*
+ * The jiffies value which is added to now, when there is no timer
+ * in the timer wheel:
+ */
+#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1)
+
+/*
+ * Return when the next timer-wheel timeout occurs (in absolute jiffies),
+ * locks the timer base and does the comparison against the given
+ * jiffie.
+ */
+extern unsigned long get_next_timer_interrupt(unsigned long now);
+
+/*
+ * Timer-statistics info:
+ */
+#ifdef CONFIG_TIMER_STATS
+
+extern int timer_stats_active;
+
+#define TIMER_STATS_FLAG_DEFERRABLE 0x1
+
+extern void init_timer_stats(void);
+
+extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
+ void *timerf, char *comm,
+ unsigned int timer_flag);
+
+extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
+ void *addr);
+
+static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
+{
+ if (likely(!timer_stats_active))
+ return;
+ __timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
+}
+
+static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
+{
+ timer->start_site = NULL;
+}
+#else
+static inline void init_timer_stats(void)
+{
+}
+
+static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
+{
+}
+
+static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
+{
+}
+#endif
+
+extern void add_timer(struct timer_list *timer);
+
+extern int try_to_del_timer_sync(struct timer_list *timer);
+
+#ifdef CONFIG_SMP
+ extern int del_timer_sync(struct timer_list *timer);
+#else
+# define del_timer_sync(t) del_timer(t)
+#endif
+
+#define del_singleshot_timer_sync(t) del_timer_sync(t)
+
+extern void init_timers(void);
+extern void run_local_timers(void);
+struct hrtimer;
+extern enum hrtimer_restart it_real_fn(struct hrtimer *);
+
+unsigned long __round_jiffies(unsigned long j, int cpu);
+unsigned long __round_jiffies_relative(unsigned long j, int cpu);
+unsigned long round_jiffies(unsigned long j);
+unsigned long round_jiffies_relative(unsigned long j);
+
+unsigned long __round_jiffies_up(unsigned long j, int cpu);
+unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
+unsigned long round_jiffies_up(unsigned long j);
+unsigned long round_jiffies_up_relative(unsigned long j);
+
+#endif
diff --git a/include/linux/timerfd.h b/include/linux/timerfd.h
new file mode 100644
index 000000000..bd36ce431
--- /dev/null
+++ b/include/linux/timerfd.h
@@ -0,0 +1,37 @@
+/*
+ * include/linux/timerfd.h
+ *
+ * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#ifndef _LINUX_TIMERFD_H
+#define _LINUX_TIMERFD_H
+
+/* For O_CLOEXEC and O_NONBLOCK */
+#include <linux/fcntl.h>
+
+/* For _IO helpers */
+#include <linux/ioctl.h>
+
+/*
+ * CAREFUL: Check include/asm-generic/fcntl.h when defining
+ * new flags, since they might collide with O_* ones. We want
+ * to re-use O_* flags that couldn't possibly have a meaning
+ * from eventfd, in order to leave a free define-space for
+ * shared O_* flags.
+ */
+#define TFD_TIMER_ABSTIME (1 << 0)
+#define TFD_TIMER_CANCEL_ON_SET (1 << 1)
+#define TFD_CLOEXEC O_CLOEXEC
+#define TFD_NONBLOCK O_NONBLOCK
+
+#define TFD_SHARED_FCNTL_FLAGS (TFD_CLOEXEC | TFD_NONBLOCK)
+/* Flags for timerfd_create. */
+#define TFD_CREATE_FLAGS TFD_SHARED_FCNTL_FLAGS
+/* Flags for timerfd_settime. */
+#define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET)
+
+#define TFD_IOC_SET_TICKS _IOW('T', 0, u64)
+
+#endif /* _LINUX_TIMERFD_H */
diff --git a/include/linux/timeriomem-rng.h b/include/linux/timeriomem-rng.h
new file mode 100644
index 000000000..46eb27ddb
--- /dev/null
+++ b/include/linux/timeriomem-rng.h
@@ -0,0 +1,16 @@
+/*
+ * linux/include/linux/timeriomem-rng.h
+ *
+ * Copyright (c) 2009 Alexander Clouter <alex@digriz.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+struct timeriomem_rng_data {
+ void __iomem *address;
+
+ /* measures in usecs */
+ unsigned int period;
+};
diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h
new file mode 100644
index 000000000..a520fd70a
--- /dev/null
+++ b/include/linux/timerqueue.h
@@ -0,0 +1,50 @@
+#ifndef _LINUX_TIMERQUEUE_H
+#define _LINUX_TIMERQUEUE_H
+
+#include <linux/rbtree.h>
+#include <linux/ktime.h>
+
+
+struct timerqueue_node {
+ struct rb_node node;
+ ktime_t expires;
+};
+
+struct timerqueue_head {
+ struct rb_root head;
+ struct timerqueue_node *next;
+};
+
+
+extern void timerqueue_add(struct timerqueue_head *head,
+ struct timerqueue_node *node);
+extern void timerqueue_del(struct timerqueue_head *head,
+ struct timerqueue_node *node);
+extern struct timerqueue_node *timerqueue_iterate_next(
+ struct timerqueue_node *node);
+
+/**
+ * timerqueue_getnext - Returns the timer with the earliest expiration time
+ *
+ * @head: head of timerqueue
+ *
+ * Returns a pointer to the timer node that has the
+ * earliest expiration time.
+ */
+static inline
+struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head)
+{
+ return head->next;
+}
+
+static inline void timerqueue_init(struct timerqueue_node *node)
+{
+ RB_CLEAR_NODE(&node->node);
+}
+
+static inline void timerqueue_init_head(struct timerqueue_head *head)
+{
+ head->head = RB_ROOT;
+ head->next = NULL;
+}
+#endif /* _LINUX_TIMERQUEUE_H */
diff --git a/include/linux/timex.h b/include/linux/timex.h
new file mode 100644
index 000000000..9d3f1a5b6
--- /dev/null
+++ b/include/linux/timex.h
@@ -0,0 +1,163 @@
+/*****************************************************************************
+ * *
+ * Copyright (c) David L. Mills 1993 *
+ * *
+ * Permission to use, copy, modify, and distribute this software and its *
+ * documentation for any purpose and without fee is hereby granted, provided *
+ * that the above copyright notice appears in all copies and that both the *
+ * copyright notice and this permission notice appear in supporting *
+ * documentation, and that the name University of Delaware not be used in *
+ * advertising or publicity pertaining to distribution of the software *
+ * without specific, written prior permission. The University of Delaware *
+ * makes no representations about the suitability this software for any *
+ * purpose. It is provided "as is" without express or implied warranty. *
+ * *
+ *****************************************************************************/
+
+/*
+ * Modification history timex.h
+ *
+ * 29 Dec 97 Russell King
+ * Moved CLOCK_TICK_RATE, CLOCK_TICK_FACTOR and FINETUNE to asm/timex.h
+ * for ARM machines
+ *
+ * 9 Jan 97 Adrian Sun
+ * Shifted LATCH define to allow access to alpha machines.
+ *
+ * 26 Sep 94 David L. Mills
+ * Added defines for hybrid phase/frequency-lock loop.
+ *
+ * 19 Mar 94 David L. Mills
+ * Moved defines from kernel routines to header file and added new
+ * defines for PPS phase-lock loop.
+ *
+ * 20 Feb 94 David L. Mills
+ * Revised status codes and structures for external clock and PPS
+ * signal discipline.
+ *
+ * 28 Nov 93 David L. Mills
+ * Adjusted parameters to improve stability and increase poll
+ * interval.
+ *
+ * 17 Sep 93 David L. Mills
+ * Created file $NTP/include/sys/timex.h
+ * 07 Oct 93 Torsten Duwe
+ * Derived linux/timex.h
+ * 1995-08-13 Torsten Duwe
+ * kernel PLL updated to 1994-12-13 specs (rfc-1589)
+ * 1997-08-30 Ulrich Windl
+ * Added new constant NTP_PHASE_LIMIT
+ * 2004-08-12 Christoph Lameter
+ * Reworked time interpolation logic
+ */
+#ifndef _LINUX_TIMEX_H
+#define _LINUX_TIMEX_H
+
+#include <uapi/linux/timex.h>
+
+#define ADJ_ADJTIME 0x8000 /* switch between adjtime/adjtimex modes */
+#define ADJ_OFFSET_SINGLESHOT 0x0001 /* old-fashioned adjtime */
+#define ADJ_OFFSET_READONLY 0x2000 /* read-only adjtime */
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/param.h>
+
+#include <asm/timex.h>
+
+#ifndef random_get_entropy
+/*
+ * The random_get_entropy() function is used by the /dev/random driver
+ * in order to extract entropy via the relative unpredictability of
+ * when an interrupt takes places versus a high speed, fine-grained
+ * timing source or cycle counter. Since it will be occurred on every
+ * single interrupt, it must have a very low cost/overhead.
+ *
+ * By default we use get_cycles() for this purpose, but individual
+ * architectures may override this in their asm/timex.h header file.
+ */
+#define random_get_entropy() get_cycles()
+#endif
+
+/*
+ * SHIFT_PLL is used as a dampening factor to define how much we
+ * adjust the frequency correction for a given offset in PLL mode.
+ * It also used in dampening the offset correction, to define how
+ * much of the current value in time_offset we correct for each
+ * second. Changing this value changes the stiffness of the ntp
+ * adjustment code. A lower value makes it more flexible, reducing
+ * NTP convergence time. A higher value makes it stiffer, increasing
+ * convergence time, but making the clock more stable.
+ *
+ * In David Mills' nanokernel reference implementation SHIFT_PLL is 4.
+ * However this seems to increase convergence time much too long.
+ *
+ * https://lists.ntp.org/pipermail/hackers/2008-January/003487.html
+ *
+ * In the above mailing list discussion, it seems the value of 4
+ * was appropriate for other Unix systems with HZ=100, and that
+ * SHIFT_PLL should be decreased as HZ increases. However, Linux's
+ * clock steering implementation is HZ independent.
+ *
+ * Through experimentation, a SHIFT_PLL value of 2 was found to allow
+ * for fast convergence (very similar to the NTPv3 code used prior to
+ * v2.6.19), with good clock stability.
+ *
+ *
+ * SHIFT_FLL is used as a dampening factor to define how much we
+ * adjust the frequency correction for a given offset in FLL mode.
+ * In David Mills' nanokernel reference implementation SHIFT_FLL is 2.
+ *
+ * MAXTC establishes the maximum time constant of the PLL.
+ */
+#define SHIFT_PLL 2 /* PLL frequency factor (shift) */
+#define SHIFT_FLL 2 /* FLL frequency factor (shift) */
+#define MAXTC 10 /* maximum time constant (shift) */
+
+/*
+ * SHIFT_USEC defines the scaling (shift) of the time_freq and
+ * time_tolerance variables, which represent the current frequency
+ * offset and maximum frequency tolerance.
+ */
+#define SHIFT_USEC 16 /* frequency offset scale (shift) */
+#define PPM_SCALE ((s64)NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
+#define PPM_SCALE_INV_SHIFT 19
+#define PPM_SCALE_INV ((1LL << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
+ PPM_SCALE + 1)
+
+#define MAXPHASE 500000000L /* max phase error (ns) */
+#define MAXFREQ 500000 /* max frequency error (ns/s) */
+#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
+#define MINSEC 256 /* min interval between updates (s) */
+#define MAXSEC 2048 /* max interval between updates (s) */
+#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
+
+/*
+ * kernel variables
+ * Note: maximum error = NTP synch distance = dispersion + delay / 2;
+ * estimated error = NTP dispersion.
+ */
+extern unsigned long tick_usec; /* USER_HZ period (usec) */
+extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */
+
+/* Required to safely shift negative values */
+#define shift_right(x, s) ({ \
+ __typeof__(x) __x = (x); \
+ __typeof__(s) __s = (s); \
+ __x < 0 ? -(-__x >> __s) : __x >> __s; \
+})
+
+#define NTP_SCALE_SHIFT 32
+
+#define NTP_INTERVAL_FREQ (HZ)
+#define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ)
+
+extern int do_adjtimex(struct timex *);
+extern void hardpps(const struct timespec *, const struct timespec *);
+
+int read_current_timer(unsigned long *timer_val);
+void ntp_notify_cmos_timer(void);
+
+/* The clock frequency of the i8253/i8254 PIT */
+#define PIT_TICK_RATE 1193182ul
+
+#endif /* LINUX_TIMEX_H */
diff --git a/include/linux/topology.h b/include/linux/topology.h
new file mode 100644
index 000000000..909b6e43b
--- /dev/null
+++ b/include/linux/topology.h
@@ -0,0 +1,214 @@
+/*
+ * include/linux/topology.h
+ *
+ * Written by: Matthew Dobson, IBM Corporation
+ *
+ * Copyright (C) 2002, IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <colpatch@us.ibm.com>
+ */
+#ifndef _LINUX_TOPOLOGY_H
+#define _LINUX_TOPOLOGY_H
+
+#include <linux/cpumask.h>
+#include <linux/bitops.h>
+#include <linux/mmzone.h>
+#include <linux/smp.h>
+#include <linux/percpu.h>
+#include <asm/topology.h>
+
+#ifndef node_has_online_mem
+#define node_has_online_mem(nid) (1)
+#endif
+
+#ifndef nr_cpus_node
+#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
+#endif
+
+#define for_each_node_with_cpus(node) \
+ for_each_online_node(node) \
+ if (nr_cpus_node(node))
+
+int arch_update_cpu_topology(void);
+
+/* Conform to ACPI 2.0 SLIT distance definitions */
+#define LOCAL_DISTANCE 10
+#define REMOTE_DISTANCE 20
+#ifndef node_distance
+#define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
+#endif
+#ifndef RECLAIM_DISTANCE
+/*
+ * If the distance between nodes in a system is larger than RECLAIM_DISTANCE
+ * (in whatever arch specific measurement units returned by node_distance())
+ * and zone_reclaim_mode is enabled then the VM will only call zone_reclaim()
+ * on nodes within this distance.
+ */
+#define RECLAIM_DISTANCE 30
+#endif
+#ifndef PENALTY_FOR_NODE_WITH_CPUS
+#define PENALTY_FOR_NODE_WITH_CPUS (1)
+#endif
+
+#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
+DECLARE_PER_CPU(int, numa_node);
+
+#ifndef numa_node_id
+/* Returns the number of the current Node. */
+static inline int numa_node_id(void)
+{
+ return raw_cpu_read(numa_node);
+}
+#endif
+
+#ifndef cpu_to_node
+static inline int cpu_to_node(int cpu)
+{
+ return per_cpu(numa_node, cpu);
+}
+#endif
+
+#ifndef set_numa_node
+static inline void set_numa_node(int node)
+{
+ this_cpu_write(numa_node, node);
+}
+#endif
+
+#ifndef set_cpu_numa_node
+static inline void set_cpu_numa_node(int cpu, int node)
+{
+ per_cpu(numa_node, cpu) = node;
+}
+#endif
+
+#else /* !CONFIG_USE_PERCPU_NUMA_NODE_ID */
+
+/* Returns the number of the current Node. */
+#ifndef numa_node_id
+static inline int numa_node_id(void)
+{
+ return cpu_to_node(raw_smp_processor_id());
+}
+#endif
+
+#endif /* [!]CONFIG_USE_PERCPU_NUMA_NODE_ID */
+
+#ifdef CONFIG_HAVE_MEMORYLESS_NODES
+
+/*
+ * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
+ * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
+ * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
+ */
+DECLARE_PER_CPU(int, _numa_mem_);
+extern int _node_numa_mem_[MAX_NUMNODES];
+
+#ifndef set_numa_mem
+static inline void set_numa_mem(int node)
+{
+ this_cpu_write(_numa_mem_, node);
+ _node_numa_mem_[numa_node_id()] = node;
+}
+#endif
+
+#ifndef node_to_mem_node
+static inline int node_to_mem_node(int node)
+{
+ return _node_numa_mem_[node];
+}
+#endif
+
+#ifndef numa_mem_id
+/* Returns the number of the nearest Node with memory */
+static inline int numa_mem_id(void)
+{
+ return raw_cpu_read(_numa_mem_);
+}
+#endif
+
+#ifndef cpu_to_mem
+static inline int cpu_to_mem(int cpu)
+{
+ return per_cpu(_numa_mem_, cpu);
+}
+#endif
+
+#ifndef set_cpu_numa_mem
+static inline void set_cpu_numa_mem(int cpu, int node)
+{
+ per_cpu(_numa_mem_, cpu) = node;
+ _node_numa_mem_[cpu_to_node(cpu)] = node;
+}
+#endif
+
+#else /* !CONFIG_HAVE_MEMORYLESS_NODES */
+
+#ifndef numa_mem_id
+/* Returns the number of the nearest Node with memory */
+static inline int numa_mem_id(void)
+{
+ return numa_node_id();
+}
+#endif
+
+#ifndef node_to_mem_node
+static inline int node_to_mem_node(int node)
+{
+ return node;
+}
+#endif
+
+#ifndef cpu_to_mem
+static inline int cpu_to_mem(int cpu)
+{
+ return cpu_to_node(cpu);
+}
+#endif
+
+#endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */
+
+#ifndef topology_physical_package_id
+#define topology_physical_package_id(cpu) ((void)(cpu), -1)
+#endif
+#ifndef topology_core_id
+#define topology_core_id(cpu) ((void)(cpu), 0)
+#endif
+#ifndef topology_thread_cpumask
+#define topology_thread_cpumask(cpu) cpumask_of(cpu)
+#endif
+#ifndef topology_core_cpumask
+#define topology_core_cpumask(cpu) cpumask_of(cpu)
+#endif
+
+#ifdef CONFIG_SCHED_SMT
+static inline const struct cpumask *cpu_smt_mask(int cpu)
+{
+ return topology_thread_cpumask(cpu);
+}
+#endif
+
+static inline const struct cpumask *cpu_cpu_mask(int cpu)
+{
+ return cpumask_of_node(cpu_to_node(cpu));
+}
+
+
+#endif /* _LINUX_TOPOLOGY_H */
diff --git a/include/linux/torture.h b/include/linux/torture.h
new file mode 100644
index 000000000..7759fc3c6
--- /dev/null
+++ b/include/linux/torture.h
@@ -0,0 +1,95 @@
+/*
+ * Common functions for in-kernel torture tests.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright IBM Corporation, 2014
+ *
+ * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ */
+
+#ifndef __LINUX_TORTURE_H
+#define __LINUX_TORTURE_H
+
+#include <linux/types.h>
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/seqlock.h>
+#include <linux/lockdep.h>
+#include <linux/completion.h>
+#include <linux/debugobjects.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+
+/* Definitions for a non-string torture-test module parameter. */
+#define torture_param(type, name, init, msg) \
+ static type name = init; \
+ module_param(name, type, 0444); \
+ MODULE_PARM_DESC(name, msg);
+
+#define TORTURE_FLAG "-torture:"
+#define TOROUT_STRING(s) \
+ pr_alert("%s" TORTURE_FLAG s "\n", torture_type)
+#define VERBOSE_TOROUT_STRING(s) \
+ do { if (verbose) pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); } while (0)
+#define VERBOSE_TOROUT_ERRSTRING(s) \
+ do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0)
+
+/* Definitions for online/offline exerciser. */
+int torture_onoff_init(long ooholdoff, long oointerval);
+void torture_onoff_stats(void);
+bool torture_onoff_failures(void);
+
+/* Low-rider random number generator. */
+struct torture_random_state {
+ unsigned long trs_state;
+ long trs_count;
+};
+#define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 }
+unsigned long torture_random(struct torture_random_state *trsp);
+
+/* Task shuffler, which causes CPUs to occasionally go idle. */
+void torture_shuffle_task_register(struct task_struct *tp);
+int torture_shuffle_init(long shuffint);
+
+/* Test auto-shutdown handling. */
+void torture_shutdown_absorb(const char *title);
+int torture_shutdown_init(int ssecs, void (*cleanup)(void));
+
+/* Task stuttering, which forces load/no-load transitions. */
+void stutter_wait(const char *title);
+int torture_stutter_init(int s);
+
+/* Initialization and cleanup. */
+bool torture_init_begin(char *ttype, bool v, int *runnable);
+void torture_init_end(void);
+bool torture_cleanup_begin(void);
+void torture_cleanup_end(void);
+bool torture_must_stop(void);
+bool torture_must_stop_irq(void);
+void torture_kthread_stopping(char *title);
+int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
+ char *f, struct task_struct **tp);
+void _torture_stop_kthread(char *m, struct task_struct **tp);
+
+#define torture_create_kthread(n, arg, tp) \
+ _torture_create_kthread(n, (arg), #n, "Creating " #n " task", \
+ "Failed to create " #n, &(tp))
+#define torture_stop_kthread(n, tp) \
+ _torture_stop_kthread("Stopping " #n " task", &(tp))
+
+#endif /* __LINUX_TORTURE_H */
diff --git a/include/linux/toshiba.h b/include/linux/toshiba.h
new file mode 100644
index 000000000..915c3bb16
--- /dev/null
+++ b/include/linux/toshiba.h
@@ -0,0 +1,25 @@
+/* toshiba.h -- Linux driver for accessing the SMM on Toshiba laptops
+ *
+ * Copyright (c) 1996-2000 Jonathan A. Buzzard (jonathan@buzzard.org.uk)
+ *
+ * Thanks to Juergen Heinzl <juergen@monocerus.demon.co.uk> for the pointers
+ * on making sure the structure is aligned and packed.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+#ifndef _LINUX_TOSHIBA_H
+#define _LINUX_TOSHIBA_H
+
+#include <uapi/linux/toshiba.h>
+
+int tosh_smm(SMMRegisters *regs);
+#endif
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
new file mode 100644
index 000000000..8350c538b
--- /dev/null
+++ b/include/linux/tpm.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2004,2007,2008 IBM Corporation
+ *
+ * Authors:
+ * Leendert van Doorn <leendert@watson.ibm.com>
+ * Dave Safford <safford@watson.ibm.com>
+ * Reiner Sailer <sailer@watson.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ * Debora Velarde <dvelarde@us.ibm.com>
+ *
+ * Maintained by: <tpmdd_devel@lists.sourceforge.net>
+ *
+ * Device driver for TCG/TCPA TPM (trusted platform module).
+ * Specifications at www.trustedcomputinggroup.org
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+#ifndef __LINUX_TPM_H__
+#define __LINUX_TPM_H__
+
+#define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */
+
+/*
+ * Chip num is this value or a valid tpm idx
+ */
+#define TPM_ANY_NUM 0xFFFF
+
+struct tpm_chip;
+
+struct tpm_class_ops {
+ const u8 req_complete_mask;
+ const u8 req_complete_val;
+ bool (*req_canceled)(struct tpm_chip *chip, u8 status);
+ int (*recv) (struct tpm_chip *chip, u8 *buf, size_t len);
+ int (*send) (struct tpm_chip *chip, u8 *buf, size_t len);
+ void (*cancel) (struct tpm_chip *chip);
+ u8 (*status) (struct tpm_chip *chip);
+ bool (*update_timeouts)(struct tpm_chip *chip,
+ unsigned long *timeout_cap);
+
+};
+
+#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
+
+extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf);
+extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash);
+extern int tpm_send(u32 chip_num, void *cmd, size_t buflen);
+extern int tpm_get_random(u32 chip_num, u8 *data, size_t max);
+#else
+static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) {
+ return -ENODEV;
+}
+static inline int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) {
+ return -ENODEV;
+}
+static inline int tpm_send(u32 chip_num, void *cmd, size_t buflen) {
+ return -ENODEV;
+}
+static inline int tpm_get_random(u32 chip_num, u8 *data, size_t max) {
+ return -ENODEV;
+}
+#endif
+#endif
diff --git a/include/linux/tpm_command.h b/include/linux/tpm_command.h
new file mode 100644
index 000000000..727512e24
--- /dev/null
+++ b/include/linux/tpm_command.h
@@ -0,0 +1,28 @@
+#ifndef __LINUX_TPM_COMMAND_H__
+#define __LINUX_TPM_COMMAND_H__
+
+/*
+ * TPM Command constants from specifications at
+ * http://www.trustedcomputinggroup.org
+ */
+
+/* Command TAGS */
+#define TPM_TAG_RQU_COMMAND 193
+#define TPM_TAG_RQU_AUTH1_COMMAND 194
+#define TPM_TAG_RQU_AUTH2_COMMAND 195
+#define TPM_TAG_RSP_COMMAND 196
+#define TPM_TAG_RSP_AUTH1_COMMAND 197
+#define TPM_TAG_RSP_AUTH2_COMMAND 198
+
+/* Command Ordinals */
+#define TPM_ORD_GETRANDOM 70
+#define TPM_ORD_OSAP 11
+#define TPM_ORD_OIAP 10
+#define TPM_ORD_SEAL 23
+#define TPM_ORD_UNSEAL 24
+
+/* Other constants */
+#define SRKHANDLE 0x40000000
+#define TPM_NONCE_SIZE 20
+
+#endif
diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h
new file mode 100644
index 000000000..1d7ca2739
--- /dev/null
+++ b/include/linux/trace_clock.h
@@ -0,0 +1,23 @@
+#ifndef _LINUX_TRACE_CLOCK_H
+#define _LINUX_TRACE_CLOCK_H
+
+/*
+ * 3 trace clock variants, with differing scalability/precision
+ * tradeoffs:
+ *
+ * - local: CPU-local trace clock
+ * - medium: scalable global clock with some jitter
+ * - global: globally monotonic, serialized clock
+ */
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#include <asm/trace_clock.h>
+
+extern u64 notrace trace_clock_local(void);
+extern u64 notrace trace_clock(void);
+extern u64 notrace trace_clock_jiffies(void);
+extern u64 notrace trace_clock_global(void);
+extern u64 notrace trace_clock_counter(void);
+
+#endif /* _LINUX_TRACE_CLOCK_H */
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
new file mode 100644
index 000000000..cfaf5a1d4
--- /dev/null
+++ b/include/linux/trace_seq.h
@@ -0,0 +1,138 @@
+#ifndef _LINUX_TRACE_SEQ_H
+#define _LINUX_TRACE_SEQ_H
+
+#include <linux/seq_buf.h>
+
+#include <asm/page.h>
+
+/*
+ * Trace sequences are used to allow a function to call several other functions
+ * to create a string of data to use (up to a max of PAGE_SIZE).
+ */
+
+struct trace_seq {
+ unsigned char buffer[PAGE_SIZE];
+ struct seq_buf seq;
+ int full;
+};
+
+static inline void
+trace_seq_init(struct trace_seq *s)
+{
+ seq_buf_init(&s->seq, s->buffer, PAGE_SIZE);
+ s->full = 0;
+}
+
+/**
+ * trace_seq_used - amount of actual data written to buffer
+ * @s: trace sequence descriptor
+ *
+ * Returns the amount of data written to the buffer.
+ *
+ * IMPORTANT!
+ *
+ * Use this instead of @s->seq.len if you need to pass the amount
+ * of data from the buffer to another buffer (userspace, or what not).
+ * The @s->seq.len on overflow is bigger than the buffer size and
+ * using it can cause access to undefined memory.
+ */
+static inline int trace_seq_used(struct trace_seq *s)
+{
+ return seq_buf_used(&s->seq);
+}
+
+/**
+ * trace_seq_buffer_ptr - return pointer to next location in buffer
+ * @s: trace sequence descriptor
+ *
+ * Returns the pointer to the buffer where the next write to
+ * the buffer will happen. This is useful to save the location
+ * that is about to be written to and then return the result
+ * of that write.
+ */
+static inline unsigned char *
+trace_seq_buffer_ptr(struct trace_seq *s)
+{
+ return s->buffer + seq_buf_used(&s->seq);
+}
+
+/**
+ * trace_seq_has_overflowed - return true if the trace_seq took too much
+ * @s: trace sequence descriptor
+ *
+ * Returns true if too much data was added to the trace_seq and it is
+ * now full and will not take anymore.
+ */
+static inline bool trace_seq_has_overflowed(struct trace_seq *s)
+{
+ return s->full || seq_buf_has_overflowed(&s->seq);
+}
+
+/*
+ * Currently only defined when tracing is enabled.
+ */
+#ifdef CONFIG_TRACING
+extern __printf(2, 3)
+void trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
+extern __printf(2, 0)
+void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args);
+extern void
+trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
+extern int trace_print_seq(struct seq_file *m, struct trace_seq *s);
+extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
+ int cnt);
+extern void trace_seq_puts(struct trace_seq *s, const char *str);
+extern void trace_seq_putc(struct trace_seq *s, unsigned char c);
+extern void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len);
+extern void trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
+ unsigned int len);
+extern int trace_seq_path(struct trace_seq *s, const struct path *path);
+
+extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
+ int nmaskbits);
+
+#else /* CONFIG_TRACING */
+static inline void trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
+{
+}
+static inline void
+trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
+{
+}
+
+static inline void
+trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
+ int nmaskbits)
+{
+}
+
+static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s)
+{
+ return 0;
+}
+static inline int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
+ int cnt)
+{
+ return 0;
+}
+static inline void trace_seq_puts(struct trace_seq *s, const char *str)
+{
+}
+static inline void trace_seq_putc(struct trace_seq *s, unsigned char c)
+{
+}
+static inline void
+trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len)
+{
+}
+static inline void trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
+ unsigned int len)
+{
+}
+static inline int trace_seq_path(struct trace_seq *s, const struct path *path)
+{
+ return 0;
+}
+#endif /* CONFIG_TRACING */
+
+#endif /* _LINUX_TRACE_SEQ_H */
diff --git a/include/linux/tracefs.h b/include/linux/tracefs.h
new file mode 100644
index 000000000..5b727a17b
--- /dev/null
+++ b/include/linux/tracefs.h
@@ -0,0 +1,45 @@
+/*
+ * tracefs.h - a pseudo file system for activating tracing
+ *
+ * Based on debugfs by: 2004 Greg Kroah-Hartman <greg@kroah.com>
+ *
+ * Copyright (C) 2014 Red Hat Inc, author: Steven Rostedt <srostedt@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * tracefs is the file system that is used by the tracing infrastructure.
+ *
+ */
+
+#ifndef _TRACEFS_H_
+#define _TRACEFS_H_
+
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+
+#include <linux/types.h>
+
+struct file_operations;
+
+#ifdef CONFIG_TRACING
+
+struct dentry *tracefs_create_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops);
+
+struct dentry *tracefs_create_dir(const char *name, struct dentry *parent);
+
+void tracefs_remove(struct dentry *dentry);
+void tracefs_remove_recursive(struct dentry *dentry);
+
+struct dentry *tracefs_create_instance_dir(const char *name, struct dentry *parent,
+ int (*mkdir)(const char *name),
+ int (*rmdir)(const char *name));
+
+bool tracefs_initialized(void);
+
+#endif /* CONFIG_TRACING */
+
+#endif
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
new file mode 100644
index 000000000..84d497297
--- /dev/null
+++ b/include/linux/tracehook.h
@@ -0,0 +1,193 @@
+/*
+ * Tracing hooks
+ *
+ * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ *
+ * This file defines hook entry points called by core code where
+ * user tracing/debugging support might need to do something. These
+ * entry points are called tracehook_*(). Each hook declared below
+ * has a detailed kerneldoc comment giving the context (locking et
+ * al) from which it is called, and the meaning of its return value.
+ *
+ * Each function here typically has only one call site, so it is ok
+ * to have some nontrivial tracehook_*() inlines. In all cases, the
+ * fast path when no tracing is enabled should be very short.
+ *
+ * The purpose of this file and the tracehook_* layer is to consolidate
+ * the interface that the kernel core and arch code uses to enable any
+ * user debugging or tracing facility (such as ptrace). The interfaces
+ * here are carefully documented so that maintainers of core and arch
+ * code do not need to think about the implementation details of the
+ * tracing facilities. Likewise, maintainers of the tracing code do not
+ * need to understand all the calling core or arch code in detail, just
+ * documented circumstances of each call, such as locking conditions.
+ *
+ * If the calling core code changes so that locking is different, then
+ * it is ok to change the interface documented here. The maintainer of
+ * core code changing should notify the maintainers of the tracing code
+ * that they need to work out the change.
+ *
+ * Some tracehook_*() inlines take arguments that the current tracing
+ * implementations might not necessarily use. These function signatures
+ * are chosen to pass in all the information that is on hand in the
+ * caller and might conceivably be relevant to a tracer, so that the
+ * core code won't have to be updated when tracing adds more features.
+ * If a call site changes so that some of those parameters are no longer
+ * already on hand without extra work, then the tracehook_* interface
+ * can change so there is no make-work burden on the core code. The
+ * maintainer of core code changing should notify the maintainers of the
+ * tracing code that they need to work out the change.
+ */
+
+#ifndef _LINUX_TRACEHOOK_H
+#define _LINUX_TRACEHOOK_H 1
+
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/security.h>
+#include <linux/task_work.h>
+struct linux_binprm;
+
+/*
+ * ptrace report for syscall entry and exit looks identical.
+ */
+static inline int ptrace_report_syscall(struct pt_regs *regs)
+{
+ int ptrace = current->ptrace;
+
+ if (!(ptrace & PT_PTRACED))
+ return 0;
+
+ ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
+
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (current->exit_code) {
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+
+ return fatal_signal_pending(current);
+}
+
+/**
+ * tracehook_report_syscall_entry - task is about to attempt a system call
+ * @regs: user register state of current task
+ *
+ * This will be called if %TIF_SYSCALL_TRACE has been set, when the
+ * current task has just entered the kernel for a system call.
+ * Full user register state is available here. Changing the values
+ * in @regs can affect the system call number and arguments to be tried.
+ * It is safe to block here, preventing the system call from beginning.
+ *
+ * Returns zero normally, or nonzero if the calling arch code should abort
+ * the system call. That must prevent normal entry so no system call is
+ * made. If @task ever returns to user mode after this, its register state
+ * is unspecified, but should be something harmless like an %ENOSYS error
+ * return. It should preserve enough information so that syscall_rollback()
+ * can work (see asm-generic/syscall.h).
+ *
+ * Called without locks, just after entering kernel mode.
+ */
+static inline __must_check int tracehook_report_syscall_entry(
+ struct pt_regs *regs)
+{
+ return ptrace_report_syscall(regs);
+}
+
+/**
+ * tracehook_report_syscall_exit - task has just finished a system call
+ * @regs: user register state of current task
+ * @step: nonzero if simulating single-step or block-step
+ *
+ * This will be called if %TIF_SYSCALL_TRACE has been set, when the
+ * current task has just finished an attempted system call. Full
+ * user register state is available here. It is safe to block here,
+ * preventing signals from being processed.
+ *
+ * If @step is nonzero, this report is also in lieu of the normal
+ * trap that would follow the system call instruction because
+ * user_enable_block_step() or user_enable_single_step() was used.
+ * In this case, %TIF_SYSCALL_TRACE might not be set.
+ *
+ * Called without locks, just before checking for pending signals.
+ */
+static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
+{
+ if (step) {
+ siginfo_t info;
+ user_single_step_siginfo(current, regs, &info);
+ force_sig_info(SIGTRAP, &info, current);
+ return;
+ }
+
+ ptrace_report_syscall(regs);
+}
+
+/**
+ * tracehook_signal_handler - signal handler setup is complete
+ * @stepping: nonzero if debugger single-step or block-step in use
+ *
+ * Called by the arch code after a signal handler has been set up.
+ * Register and stack state reflects the user handler about to run.
+ * Signal mask changes have already been made.
+ *
+ * Called without locks, shortly before returning to user mode
+ * (or handling more signals).
+ */
+static inline void tracehook_signal_handler(int stepping)
+{
+ if (stepping)
+ ptrace_notify(SIGTRAP);
+}
+
+/**
+ * set_notify_resume - cause tracehook_notify_resume() to be called
+ * @task: task that will call tracehook_notify_resume()
+ *
+ * Calling this arranges that @task will call tracehook_notify_resume()
+ * before returning to user mode. If it's already running in user mode,
+ * it will enter the kernel and call tracehook_notify_resume() soon.
+ * If it's blocked, it will not be woken.
+ */
+static inline void set_notify_resume(struct task_struct *task)
+{
+#ifdef TIF_NOTIFY_RESUME
+ if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME))
+ kick_process(task);
+#endif
+}
+
+/**
+ * tracehook_notify_resume - report when about to return to user mode
+ * @regs: user-mode registers of @current task
+ *
+ * This is called when %TIF_NOTIFY_RESUME has been set. Now we are
+ * about to return to user mode, and the user state in @regs can be
+ * inspected or adjusted. The caller in arch code has cleared
+ * %TIF_NOTIFY_RESUME before the call. If the flag gets set again
+ * asynchronously, this will be called again before we return to
+ * user mode.
+ *
+ * Called without locks.
+ */
+static inline void tracehook_notify_resume(struct pt_regs *regs)
+{
+ /*
+ * The caller just cleared TIF_NOTIFY_RESUME. This barrier
+ * pairs with task_work_add()->set_notify_resume() after
+ * hlist_add_head(task->task_works);
+ */
+ smp_mb__after_atomic();
+ if (unlikely(current->task_works))
+ task_work_run();
+}
+
+#endif /* <linux/tracehook.h> */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
new file mode 100644
index 000000000..a5f7f3eca
--- /dev/null
+++ b/include/linux/tracepoint.h
@@ -0,0 +1,480 @@
+#ifndef _LINUX_TRACEPOINT_H
+#define _LINUX_TRACEPOINT_H
+
+/*
+ * Kernel Tracepoint API.
+ *
+ * See Documentation/trace/tracepoints.txt.
+ *
+ * Copyright (C) 2008-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Heavily inspired from the Linux Kernel Markers.
+ *
+ * This file is released under the GPLv2.
+ * See the file COPYING for more details.
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/rcupdate.h>
+#include <linux/static_key.h>
+
+struct module;
+struct tracepoint;
+struct notifier_block;
+
+struct tracepoint_func {
+ void *func;
+ void *data;
+};
+
+struct tracepoint {
+ const char *name; /* Tracepoint name */
+ struct static_key key;
+ void (*regfunc)(void);
+ void (*unregfunc)(void);
+ struct tracepoint_func __rcu *funcs;
+};
+
+struct trace_enum_map {
+ const char *system;
+ const char *enum_string;
+ unsigned long enum_value;
+};
+
+extern int
+tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data);
+extern int
+tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data);
+extern void
+for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
+ void *priv);
+
+#ifdef CONFIG_MODULES
+struct tp_module {
+ struct list_head list;
+ struct module *mod;
+};
+
+bool trace_module_has_bad_taint(struct module *mod);
+extern int register_tracepoint_module_notifier(struct notifier_block *nb);
+extern int unregister_tracepoint_module_notifier(struct notifier_block *nb);
+#else
+static inline bool trace_module_has_bad_taint(struct module *mod)
+{
+ return false;
+}
+static inline
+int register_tracepoint_module_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+static inline
+int unregister_tracepoint_module_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+#endif /* CONFIG_MODULES */
+
+/*
+ * tracepoint_synchronize_unregister must be called between the last tracepoint
+ * probe unregistration and the end of module exit to make sure there is no
+ * caller executing a probe when it is freed.
+ */
+static inline void tracepoint_synchronize_unregister(void)
+{
+ synchronize_sched();
+}
+
+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+extern void syscall_regfunc(void);
+extern void syscall_unregfunc(void);
+#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
+
+#define PARAMS(args...) args
+
+#define TRACE_DEFINE_ENUM(x)
+
+#endif /* _LINUX_TRACEPOINT_H */
+
+/*
+ * Note: we keep the TRACE_EVENT and DECLARE_TRACE outside the include
+ * file ifdef protection.
+ * This is due to the way trace events work. If a file includes two
+ * trace event headers under one "CREATE_TRACE_POINTS" the first include
+ * will override the TRACE_EVENT and break the second include.
+ */
+
+#ifndef DECLARE_TRACE
+
+#define TP_PROTO(args...) args
+#define TP_ARGS(args...) args
+#define TP_CONDITION(args...) args
+
+#ifdef CONFIG_TRACEPOINTS
+
+/*
+ * it_func[0] is never NULL because there is at least one element in the array
+ * when the array itself is non NULL.
+ *
+ * Note, the proto and args passed in includes "__data" as the first parameter.
+ * The reason for this is to handle the "void" prototype. If a tracepoint
+ * has a "void" prototype, then it is invalid to declare a function
+ * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
+ * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
+ */
+#define __DO_TRACE(tp, proto, args, cond, prercu, postrcu) \
+ do { \
+ struct tracepoint_func *it_func_ptr; \
+ void *it_func; \
+ void *__data; \
+ \
+ if (!(cond)) \
+ return; \
+ prercu; \
+ rcu_read_lock_sched_notrace(); \
+ it_func_ptr = rcu_dereference_sched((tp)->funcs); \
+ if (it_func_ptr) { \
+ do { \
+ it_func = (it_func_ptr)->func; \
+ __data = (it_func_ptr)->data; \
+ ((void(*)(proto))(it_func))(args); \
+ } while ((++it_func_ptr)->func); \
+ } \
+ rcu_read_unlock_sched_notrace(); \
+ postrcu; \
+ } while (0)
+
+#ifndef MODULE
+#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \
+ static inline void trace_##name##_rcuidle(proto) \
+ { \
+ if (static_key_false(&__tracepoint_##name.key)) \
+ __DO_TRACE(&__tracepoint_##name, \
+ TP_PROTO(data_proto), \
+ TP_ARGS(data_args), \
+ TP_CONDITION(cond), \
+ rcu_irq_enter(), \
+ rcu_irq_exit()); \
+ }
+#else
+#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args)
+#endif
+
+/*
+ * Make sure the alignment of the structure in the __tracepoints section will
+ * not add unwanted padding between the beginning of the section and the
+ * structure. Force alignment to the same alignment as the section start.
+ *
+ * When lockdep is enabled, we make sure to always do the RCU portions of
+ * the tracepoint code, regardless of whether tracing is on or we match the
+ * condition. This lets us find RCU issues triggered with tracepoints even
+ * when this tracepoint is off. This code has no purpose other than poking
+ * RCU a bit.
+ */
+#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
+ extern struct tracepoint __tracepoint_##name; \
+ static inline void trace_##name(proto) \
+ { \
+ if (static_key_false(&__tracepoint_##name.key)) \
+ __DO_TRACE(&__tracepoint_##name, \
+ TP_PROTO(data_proto), \
+ TP_ARGS(data_args), \
+ TP_CONDITION(cond),,); \
+ if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
+ rcu_read_lock_sched_notrace(); \
+ rcu_dereference_sched(__tracepoint_##name.funcs);\
+ rcu_read_unlock_sched_notrace(); \
+ } \
+ } \
+ __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \
+ PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \
+ static inline int \
+ register_trace_##name(void (*probe)(data_proto), void *data) \
+ { \
+ return tracepoint_probe_register(&__tracepoint_##name, \
+ (void *)probe, data); \
+ } \
+ static inline int \
+ unregister_trace_##name(void (*probe)(data_proto), void *data) \
+ { \
+ return tracepoint_probe_unregister(&__tracepoint_##name,\
+ (void *)probe, data); \
+ } \
+ static inline void \
+ check_trace_callback_type_##name(void (*cb)(data_proto)) \
+ { \
+ } \
+ static inline bool \
+ trace_##name##_enabled(void) \
+ { \
+ return static_key_false(&__tracepoint_##name.key); \
+ }
+
+/*
+ * We have no guarantee that gcc and the linker won't up-align the tracepoint
+ * structures, so we create an array of pointers that will be used for iteration
+ * on the tracepoints.
+ */
+#define DEFINE_TRACE_FN(name, reg, unreg) \
+ static const char __tpstrtab_##name[] \
+ __attribute__((section("__tracepoints_strings"))) = #name; \
+ struct tracepoint __tracepoint_##name \
+ __attribute__((section("__tracepoints"))) = \
+ { __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\
+ static struct tracepoint * const __tracepoint_ptr_##name __used \
+ __attribute__((section("__tracepoints_ptrs"))) = \
+ &__tracepoint_##name;
+
+#define DEFINE_TRACE(name) \
+ DEFINE_TRACE_FN(name, NULL, NULL);
+
+#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \
+ EXPORT_SYMBOL_GPL(__tracepoint_##name)
+#define EXPORT_TRACEPOINT_SYMBOL(name) \
+ EXPORT_SYMBOL(__tracepoint_##name)
+
+#else /* !CONFIG_TRACEPOINTS */
+#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
+ static inline void trace_##name(proto) \
+ { } \
+ static inline void trace_##name##_rcuidle(proto) \
+ { } \
+ static inline int \
+ register_trace_##name(void (*probe)(data_proto), \
+ void *data) \
+ { \
+ return -ENOSYS; \
+ } \
+ static inline int \
+ unregister_trace_##name(void (*probe)(data_proto), \
+ void *data) \
+ { \
+ return -ENOSYS; \
+ } \
+ static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \
+ { \
+ } \
+ static inline bool \
+ trace_##name##_enabled(void) \
+ { \
+ return false; \
+ }
+
+#define DEFINE_TRACE_FN(name, reg, unreg)
+#define DEFINE_TRACE(name)
+#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
+#define EXPORT_TRACEPOINT_SYMBOL(name)
+
+#endif /* CONFIG_TRACEPOINTS */
+
+#ifdef CONFIG_TRACING
+/**
+ * tracepoint_string - register constant persistent string to trace system
+ * @str - a constant persistent string that will be referenced in tracepoints
+ *
+ * If constant strings are being used in tracepoints, it is faster and
+ * more efficient to just save the pointer to the string and reference
+ * that with a printf "%s" instead of saving the string in the ring buffer
+ * and wasting space and time.
+ *
+ * The problem with the above approach is that userspace tools that read
+ * the binary output of the trace buffers do not have access to the string.
+ * Instead they just show the address of the string which is not very
+ * useful to users.
+ *
+ * With tracepoint_string(), the string will be registered to the tracing
+ * system and exported to userspace via the debugfs/tracing/printk_formats
+ * file that maps the string address to the string text. This way userspace
+ * tools that read the binary buffers have a way to map the pointers to
+ * the ASCII strings they represent.
+ *
+ * The @str used must be a constant string and persistent as it would not
+ * make sense to show a string that no longer exists. But it is still fine
+ * to be used with modules, because when modules are unloaded, if they
+ * had tracepoints, the ring buffers are cleared too. As long as the string
+ * does not change during the life of the module, it is fine to use
+ * tracepoint_string() within a module.
+ */
+#define tracepoint_string(str) \
+ ({ \
+ static const char *___tp_str __tracepoint_string = str; \
+ ___tp_str; \
+ })
+#define __tracepoint_string __attribute__((section("__tracepoint_str")))
+#else
+/*
+ * tracepoint_string() is used to save the string address for userspace
+ * tracing tools. When tracing isn't configured, there's no need to save
+ * anything.
+ */
+# define tracepoint_string(str) str
+# define __tracepoint_string
+#endif
+
+/*
+ * The need for the DECLARE_TRACE_NOARGS() is to handle the prototype
+ * (void). "void" is a special value in a function prototype and can
+ * not be combined with other arguments. Since the DECLARE_TRACE()
+ * macro adds a data element at the beginning of the prototype,
+ * we need a way to differentiate "(void *data, proto)" from
+ * "(void *data, void)". The second prototype is invalid.
+ *
+ * DECLARE_TRACE_NOARGS() passes "void" as the tracepoint prototype
+ * and "void *__data" as the callback prototype.
+ *
+ * DECLARE_TRACE() passes "proto" as the tracepoint protoype and
+ * "void *__data, proto" as the callback prototype.
+ */
+#define DECLARE_TRACE_NOARGS(name) \
+ __DECLARE_TRACE(name, void, , 1, void *__data, __data)
+
+#define DECLARE_TRACE(name, proto, args) \
+ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1, \
+ PARAMS(void *__data, proto), \
+ PARAMS(__data, args))
+
+#define DECLARE_TRACE_CONDITION(name, proto, args, cond) \
+ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \
+ PARAMS(void *__data, proto), \
+ PARAMS(__data, args))
+
+#define TRACE_EVENT_FLAGS(event, flag)
+
+#define TRACE_EVENT_PERF_PERM(event, expr...)
+
+#endif /* DECLARE_TRACE */
+
+#ifndef TRACE_EVENT
+/*
+ * For use with the TRACE_EVENT macro:
+ *
+ * We define a tracepoint, its arguments, its printk format
+ * and its 'fast binary record' layout.
+ *
+ * Firstly, name your tracepoint via TRACE_EVENT(name : the
+ * 'subsystem_event' notation is fine.
+ *
+ * Think about this whole construct as the
+ * 'trace_sched_switch() function' from now on.
+ *
+ *
+ * TRACE_EVENT(sched_switch,
+ *
+ * *
+ * * A function has a regular function arguments
+ * * prototype, declare it via TP_PROTO():
+ * *
+ *
+ * TP_PROTO(struct rq *rq, struct task_struct *prev,
+ * struct task_struct *next),
+ *
+ * *
+ * * Define the call signature of the 'function'.
+ * * (Design sidenote: we use this instead of a
+ * * TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.)
+ * *
+ *
+ * TP_ARGS(rq, prev, next),
+ *
+ * *
+ * * Fast binary tracing: define the trace record via
+ * * TP_STRUCT__entry(). You can think about it like a
+ * * regular C structure local variable definition.
+ * *
+ * * This is how the trace record is structured and will
+ * * be saved into the ring buffer. These are the fields
+ * * that will be exposed to user-space in
+ * * /sys/kernel/debug/tracing/events/<*>/format.
+ * *
+ * * The declared 'local variable' is called '__entry'
+ * *
+ * * __field(pid_t, prev_prid) is equivalent to a standard declariton:
+ * *
+ * * pid_t prev_pid;
+ * *
+ * * __array(char, prev_comm, TASK_COMM_LEN) is equivalent to:
+ * *
+ * * char prev_comm[TASK_COMM_LEN];
+ * *
+ *
+ * TP_STRUCT__entry(
+ * __array( char, prev_comm, TASK_COMM_LEN )
+ * __field( pid_t, prev_pid )
+ * __field( int, prev_prio )
+ * __array( char, next_comm, TASK_COMM_LEN )
+ * __field( pid_t, next_pid )
+ * __field( int, next_prio )
+ * ),
+ *
+ * *
+ * * Assign the entry into the trace record, by embedding
+ * * a full C statement block into TP_fast_assign(). You
+ * * can refer to the trace record as '__entry' -
+ * * otherwise you can put arbitrary C code in here.
+ * *
+ * * Note: this C code will execute every time a trace event
+ * * happens, on an active tracepoint.
+ * *
+ *
+ * TP_fast_assign(
+ * memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
+ * __entry->prev_pid = prev->pid;
+ * __entry->prev_prio = prev->prio;
+ * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
+ * __entry->next_pid = next->pid;
+ * __entry->next_prio = next->prio;
+ * ),
+ *
+ * *
+ * * Formatted output of a trace record via TP_printk().
+ * * This is how the tracepoint will appear under ftrace
+ * * plugins that make use of this tracepoint.
+ * *
+ * * (raw-binary tracing wont actually perform this step.)
+ * *
+ *
+ * TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
+ * __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
+ * __entry->next_comm, __entry->next_pid, __entry->next_prio),
+ *
+ * );
+ *
+ * This macro construct is thus used for the regular printk format
+ * tracing setup, it is used to construct a function pointer based
+ * tracepoint callback (this is used by programmatic plugins and
+ * can also by used by generic instrumentation like SystemTap), and
+ * it is also used to expose a structured trace record in
+ * /sys/kernel/debug/tracing/events/.
+ *
+ * A set of (un)registration functions can be passed to the variant
+ * TRACE_EVENT_FN to perform any (un)registration work.
+ */
+
+#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print)
+#define DEFINE_EVENT(template, name, proto, args) \
+ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
+#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg)\
+ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
+#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
+ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
+#define DEFINE_EVENT_CONDITION(template, name, proto, \
+ args, cond) \
+ DECLARE_TRACE_CONDITION(name, PARAMS(proto), \
+ PARAMS(args), PARAMS(cond))
+
+#define TRACE_EVENT(name, proto, args, struct, assign, print) \
+ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
+#define TRACE_EVENT_FN(name, proto, args, struct, \
+ assign, print, reg, unreg) \
+ DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
+#define TRACE_EVENT_CONDITION(name, proto, args, cond, \
+ struct, assign, print) \
+ DECLARE_TRACE_CONDITION(name, PARAMS(proto), \
+ PARAMS(args), PARAMS(cond))
+
+#define TRACE_EVENT_FLAGS(event, flag)
+
+#define TRACE_EVENT_PERF_PERM(event, expr...)
+
+#endif /* ifdef TRACE_EVENT (see note above) */
diff --git a/include/linux/transport_class.h b/include/linux/transport_class.h
new file mode 100644
index 000000000..11087cdd4
--- /dev/null
+++ b/include/linux/transport_class.h
@@ -0,0 +1,102 @@
+/*
+ * transport_class.h - a generic container for all transport classes
+ *
+ * Copyright (c) 2005 - James Bottomley <James.Bottomley@steeleye.com>
+ *
+ * This file is licensed under GPLv2
+ */
+
+#ifndef _TRANSPORT_CLASS_H_
+#define _TRANSPORT_CLASS_H_
+
+#include <linux/device.h>
+#include <linux/bug.h>
+#include <linux/attribute_container.h>
+
+struct transport_container;
+
+struct transport_class {
+ struct class class;
+ int (*setup)(struct transport_container *, struct device *,
+ struct device *);
+ int (*configure)(struct transport_container *, struct device *,
+ struct device *);
+ int (*remove)(struct transport_container *, struct device *,
+ struct device *);
+};
+
+#define DECLARE_TRANSPORT_CLASS(cls, nm, su, rm, cfg) \
+struct transport_class cls = { \
+ .class = { \
+ .name = nm, \
+ }, \
+ .setup = su, \
+ .remove = rm, \
+ .configure = cfg, \
+}
+
+
+struct anon_transport_class {
+ struct transport_class tclass;
+ struct attribute_container container;
+};
+
+#define DECLARE_ANON_TRANSPORT_CLASS(cls, mtch, cfg) \
+struct anon_transport_class cls = { \
+ .tclass = { \
+ .configure = cfg, \
+ }, \
+ . container = { \
+ .match = mtch, \
+ }, \
+}
+
+#define class_to_transport_class(x) \
+ container_of(x, struct transport_class, class)
+
+struct transport_container {
+ struct attribute_container ac;
+ const struct attribute_group *statistics;
+};
+
+#define attribute_container_to_transport_container(x) \
+ container_of(x, struct transport_container, ac)
+
+void transport_remove_device(struct device *);
+void transport_add_device(struct device *);
+void transport_setup_device(struct device *);
+void transport_configure_device(struct device *);
+void transport_destroy_device(struct device *);
+
+static inline void
+transport_register_device(struct device *dev)
+{
+ transport_setup_device(dev);
+ transport_add_device(dev);
+}
+
+static inline void
+transport_unregister_device(struct device *dev)
+{
+ transport_remove_device(dev);
+ transport_destroy_device(dev);
+}
+
+static inline int transport_container_register(struct transport_container *tc)
+{
+ return attribute_container_register(&tc->ac);
+}
+
+static inline void transport_container_unregister(struct transport_container *tc)
+{
+ if (unlikely(attribute_container_unregister(&tc->ac)))
+ BUG();
+}
+
+int transport_class_register(struct transport_class *);
+int anon_transport_class_register(struct anon_transport_class *);
+void transport_class_unregister(struct transport_class *);
+void anon_transport_class_unregister(struct anon_transport_class *);
+
+
+#endif
diff --git a/include/linux/tsacct_kern.h b/include/linux/tsacct_kern.h
new file mode 100644
index 000000000..3251965bf
--- /dev/null
+++ b/include/linux/tsacct_kern.h
@@ -0,0 +1,41 @@
+/*
+ * tsacct_kern.h - kernel header for system accounting over taskstats interface
+ *
+ * Copyright (C) Jay Lan SGI
+ */
+
+#ifndef _LINUX_TSACCT_KERN_H
+#define _LINUX_TSACCT_KERN_H
+
+#include <linux/taskstats.h>
+
+#ifdef CONFIG_TASKSTATS
+extern void bacct_add_tsk(struct user_namespace *user_ns,
+ struct pid_namespace *pid_ns,
+ struct taskstats *stats, struct task_struct *tsk);
+#else
+static inline void bacct_add_tsk(struct user_namespace *user_ns,
+ struct pid_namespace *pid_ns,
+ struct taskstats *stats, struct task_struct *tsk)
+{}
+#endif /* CONFIG_TASKSTATS */
+
+#ifdef CONFIG_TASK_XACCT
+extern void xacct_add_tsk(struct taskstats *stats, struct task_struct *p);
+extern void acct_update_integrals(struct task_struct *tsk);
+extern void acct_account_cputime(struct task_struct *tsk);
+extern void acct_clear_integrals(struct task_struct *tsk);
+#else
+static inline void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
+{}
+static inline void acct_update_integrals(struct task_struct *tsk)
+{}
+static inline void acct_account_cputime(struct task_struct *tsk)
+{}
+static inline void acct_clear_integrals(struct task_struct *tsk)
+{}
+#endif /* CONFIG_TASK_XACCT */
+
+#endif
+
+
diff --git a/include/linux/tty.h b/include/linux/tty.h
new file mode 100644
index 000000000..d76631f61
--- /dev/null
+++ b/include/linux/tty.h
@@ -0,0 +1,712 @@
+#ifndef _LINUX_TTY_H
+#define _LINUX_TTY_H
+
+#include <linux/fs.h>
+#include <linux/major.h>
+#include <linux/termios.h>
+#include <linux/workqueue.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_ldisc.h>
+#include <linux/mutex.h>
+#include <linux/tty_flags.h>
+#include <uapi/linux/tty.h>
+#include <linux/rwsem.h>
+#include <linux/llist.h>
+
+
+/*
+ * Lock subclasses for tty locks
+ *
+ * TTY_LOCK_NORMAL is for normal ttys and master ptys.
+ * TTY_LOCK_SLAVE is for slave ptys only.
+ *
+ * Lock subclasses are necessary for handling nested locking with pty pairs.
+ * tty locks which use nested locking:
+ *
+ * legacy_mutex - Nested tty locks are necessary for releasing pty pairs.
+ * The stable lock order is master pty first, then slave pty.
+ * termios_rwsem - The stable lock order is tty_buffer lock->termios_rwsem.
+ * Subclassing this lock enables the slave pty to hold its
+ * termios_rwsem when claiming the master tty_buffer lock.
+ * tty_buffer lock - slave ptys can claim nested buffer lock when handling
+ * signal chars. The stable lock order is slave pty, then
+ * master.
+ */
+
+enum {
+ TTY_LOCK_NORMAL = 0,
+ TTY_LOCK_SLAVE,
+};
+
+/*
+ * (Note: the *_driver.minor_start values 1, 64, 128, 192 are
+ * hardcoded at present.)
+ */
+#define NR_UNIX98_PTY_DEFAULT 4096 /* Default maximum for Unix98 ptys */
+#define NR_UNIX98_PTY_RESERVE 1024 /* Default reserve for main devpts */
+#define NR_UNIX98_PTY_MAX (1 << MINORBITS) /* Absolute limit */
+
+/*
+ * This character is the same as _POSIX_VDISABLE: it cannot be used as
+ * a c_cc[] character, but indicates that a particular special character
+ * isn't in use (eg VINTR has no character etc)
+ */
+#define __DISABLED_CHAR '\0'
+
+struct tty_buffer {
+ union {
+ struct tty_buffer *next;
+ struct llist_node free;
+ };
+ int used;
+ int size;
+ int commit;
+ int read;
+ int flags;
+ /* Data points here */
+ unsigned long data[0];
+};
+
+/* Values for .flags field of tty_buffer */
+#define TTYB_NORMAL 1 /* buffer has no flags buffer */
+
+static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs)
+{
+ return ((unsigned char *)b->data) + ofs;
+}
+
+static inline char *flag_buf_ptr(struct tty_buffer *b, int ofs)
+{
+ return (char *)char_buf_ptr(b, ofs) + b->size;
+}
+
+struct tty_bufhead {
+ struct tty_buffer *head; /* Queue head */
+ struct work_struct work;
+ struct mutex lock;
+ atomic_t priority;
+ struct tty_buffer sentinel;
+ struct llist_head free; /* Free queue head */
+ atomic_t mem_used; /* In-use buffers excluding free list */
+ int mem_limit;
+ struct tty_buffer *tail; /* Active buffer */
+};
+/*
+ * When a break, frame error, or parity error happens, these codes are
+ * stuffed into the flags buffer.
+ */
+#define TTY_NORMAL 0
+#define TTY_BREAK 1
+#define TTY_FRAME 2
+#define TTY_PARITY 3
+#define TTY_OVERRUN 4
+
+#define INTR_CHAR(tty) ((tty)->termios.c_cc[VINTR])
+#define QUIT_CHAR(tty) ((tty)->termios.c_cc[VQUIT])
+#define ERASE_CHAR(tty) ((tty)->termios.c_cc[VERASE])
+#define KILL_CHAR(tty) ((tty)->termios.c_cc[VKILL])
+#define EOF_CHAR(tty) ((tty)->termios.c_cc[VEOF])
+#define TIME_CHAR(tty) ((tty)->termios.c_cc[VTIME])
+#define MIN_CHAR(tty) ((tty)->termios.c_cc[VMIN])
+#define SWTC_CHAR(tty) ((tty)->termios.c_cc[VSWTC])
+#define START_CHAR(tty) ((tty)->termios.c_cc[VSTART])
+#define STOP_CHAR(tty) ((tty)->termios.c_cc[VSTOP])
+#define SUSP_CHAR(tty) ((tty)->termios.c_cc[VSUSP])
+#define EOL_CHAR(tty) ((tty)->termios.c_cc[VEOL])
+#define REPRINT_CHAR(tty) ((tty)->termios.c_cc[VREPRINT])
+#define DISCARD_CHAR(tty) ((tty)->termios.c_cc[VDISCARD])
+#define WERASE_CHAR(tty) ((tty)->termios.c_cc[VWERASE])
+#define LNEXT_CHAR(tty) ((tty)->termios.c_cc[VLNEXT])
+#define EOL2_CHAR(tty) ((tty)->termios.c_cc[VEOL2])
+
+#define _I_FLAG(tty, f) ((tty)->termios.c_iflag & (f))
+#define _O_FLAG(tty, f) ((tty)->termios.c_oflag & (f))
+#define _C_FLAG(tty, f) ((tty)->termios.c_cflag & (f))
+#define _L_FLAG(tty, f) ((tty)->termios.c_lflag & (f))
+
+#define I_IGNBRK(tty) _I_FLAG((tty), IGNBRK)
+#define I_BRKINT(tty) _I_FLAG((tty), BRKINT)
+#define I_IGNPAR(tty) _I_FLAG((tty), IGNPAR)
+#define I_PARMRK(tty) _I_FLAG((tty), PARMRK)
+#define I_INPCK(tty) _I_FLAG((tty), INPCK)
+#define I_ISTRIP(tty) _I_FLAG((tty), ISTRIP)
+#define I_INLCR(tty) _I_FLAG((tty), INLCR)
+#define I_IGNCR(tty) _I_FLAG((tty), IGNCR)
+#define I_ICRNL(tty) _I_FLAG((tty), ICRNL)
+#define I_IUCLC(tty) _I_FLAG((tty), IUCLC)
+#define I_IXON(tty) _I_FLAG((tty), IXON)
+#define I_IXANY(tty) _I_FLAG((tty), IXANY)
+#define I_IXOFF(tty) _I_FLAG((tty), IXOFF)
+#define I_IMAXBEL(tty) _I_FLAG((tty), IMAXBEL)
+#define I_IUTF8(tty) _I_FLAG((tty), IUTF8)
+
+#define O_OPOST(tty) _O_FLAG((tty), OPOST)
+#define O_OLCUC(tty) _O_FLAG((tty), OLCUC)
+#define O_ONLCR(tty) _O_FLAG((tty), ONLCR)
+#define O_OCRNL(tty) _O_FLAG((tty), OCRNL)
+#define O_ONOCR(tty) _O_FLAG((tty), ONOCR)
+#define O_ONLRET(tty) _O_FLAG((tty), ONLRET)
+#define O_OFILL(tty) _O_FLAG((tty), OFILL)
+#define O_OFDEL(tty) _O_FLAG((tty), OFDEL)
+#define O_NLDLY(tty) _O_FLAG((tty), NLDLY)
+#define O_CRDLY(tty) _O_FLAG((tty), CRDLY)
+#define O_TABDLY(tty) _O_FLAG((tty), TABDLY)
+#define O_BSDLY(tty) _O_FLAG((tty), BSDLY)
+#define O_VTDLY(tty) _O_FLAG((tty), VTDLY)
+#define O_FFDLY(tty) _O_FLAG((tty), FFDLY)
+
+#define C_BAUD(tty) _C_FLAG((tty), CBAUD)
+#define C_CSIZE(tty) _C_FLAG((tty), CSIZE)
+#define C_CSTOPB(tty) _C_FLAG((tty), CSTOPB)
+#define C_CREAD(tty) _C_FLAG((tty), CREAD)
+#define C_PARENB(tty) _C_FLAG((tty), PARENB)
+#define C_PARODD(tty) _C_FLAG((tty), PARODD)
+#define C_HUPCL(tty) _C_FLAG((tty), HUPCL)
+#define C_CLOCAL(tty) _C_FLAG((tty), CLOCAL)
+#define C_CIBAUD(tty) _C_FLAG((tty), CIBAUD)
+#define C_CRTSCTS(tty) _C_FLAG((tty), CRTSCTS)
+#define C_CMSPAR(tty) _C_FLAG((tty), CMSPAR)
+
+#define L_ISIG(tty) _L_FLAG((tty), ISIG)
+#define L_ICANON(tty) _L_FLAG((tty), ICANON)
+#define L_XCASE(tty) _L_FLAG((tty), XCASE)
+#define L_ECHO(tty) _L_FLAG((tty), ECHO)
+#define L_ECHOE(tty) _L_FLAG((tty), ECHOE)
+#define L_ECHOK(tty) _L_FLAG((tty), ECHOK)
+#define L_ECHONL(tty) _L_FLAG((tty), ECHONL)
+#define L_NOFLSH(tty) _L_FLAG((tty), NOFLSH)
+#define L_TOSTOP(tty) _L_FLAG((tty), TOSTOP)
+#define L_ECHOCTL(tty) _L_FLAG((tty), ECHOCTL)
+#define L_ECHOPRT(tty) _L_FLAG((tty), ECHOPRT)
+#define L_ECHOKE(tty) _L_FLAG((tty), ECHOKE)
+#define L_FLUSHO(tty) _L_FLAG((tty), FLUSHO)
+#define L_PENDIN(tty) _L_FLAG((tty), PENDIN)
+#define L_IEXTEN(tty) _L_FLAG((tty), IEXTEN)
+#define L_EXTPROC(tty) _L_FLAG((tty), EXTPROC)
+
+struct device;
+struct signal_struct;
+
+/*
+ * Port level information. Each device keeps its own port level information
+ * so provide a common structure for those ports wanting to use common support
+ * routines.
+ *
+ * The tty port has a different lifetime to the tty so must be kept apart.
+ * In addition be careful as tty -> port mappings are valid for the life
+ * of the tty object but in many cases port -> tty mappings are valid only
+ * until a hangup so don't use the wrong path.
+ */
+
+struct tty_port;
+
+struct tty_port_operations {
+ /* Return 1 if the carrier is raised */
+ int (*carrier_raised)(struct tty_port *port);
+ /* Control the DTR line */
+ void (*dtr_rts)(struct tty_port *port, int raise);
+ /* Called when the last close completes or a hangup finishes
+ IFF the port was initialized. Do not use to free resources. Called
+ under the port mutex to serialize against activate/shutdowns */
+ void (*shutdown)(struct tty_port *port);
+ /* Called under the port mutex from tty_port_open, serialized using
+ the port mutex */
+ /* FIXME: long term getting the tty argument *out* of this would be
+ good for consoles */
+ int (*activate)(struct tty_port *port, struct tty_struct *tty);
+ /* Called on the final put of a port */
+ void (*destruct)(struct tty_port *port);
+};
+
+struct tty_port {
+ struct tty_bufhead buf; /* Locked internally */
+ struct tty_struct *tty; /* Back pointer */
+ struct tty_struct *itty; /* internal back ptr */
+ const struct tty_port_operations *ops; /* Port operations */
+ spinlock_t lock; /* Lock protecting tty field */
+ int blocked_open; /* Waiting to open */
+ int count; /* Usage count */
+ wait_queue_head_t open_wait; /* Open waiters */
+ wait_queue_head_t close_wait; /* Close waiters */
+ wait_queue_head_t delta_msr_wait; /* Modem status change */
+ unsigned long flags; /* TTY flags ASY_*/
+ unsigned char console:1, /* port is a console */
+ low_latency:1; /* optional: tune for latency */
+ struct mutex mutex; /* Locking */
+ struct mutex buf_mutex; /* Buffer alloc lock */
+ unsigned char *xmit_buf; /* Optional buffer */
+ unsigned int close_delay; /* Close port delay */
+ unsigned int closing_wait; /* Delay for output */
+ int drain_delay; /* Set to zero if no pure time
+ based drain is needed else
+ set to size of fifo */
+ struct kref kref; /* Ref counter */
+};
+
+/*
+ * Where all of the state associated with a tty is kept while the tty
+ * is open. Since the termios state should be kept even if the tty
+ * has been closed --- for things like the baud rate, etc --- it is
+ * not stored here, but rather a pointer to the real state is stored
+ * here. Possible the winsize structure should have the same
+ * treatment, but (1) the default 80x24 is usually right and (2) it's
+ * most often used by a windowing system, which will set the correct
+ * size each time the window is created or resized anyway.
+ * - TYT, 9/14/92
+ */
+
+struct tty_operations;
+
+struct tty_struct {
+ int magic;
+ struct kref kref;
+ struct device *dev;
+ struct tty_driver *driver;
+ const struct tty_operations *ops;
+ int index;
+
+ /* Protects ldisc changes: Lock tty not pty */
+ struct ld_semaphore ldisc_sem;
+ struct tty_ldisc *ldisc;
+
+ struct mutex atomic_write_lock;
+ struct mutex legacy_mutex;
+ struct mutex throttle_mutex;
+ struct rw_semaphore termios_rwsem;
+ struct mutex winsize_mutex;
+ spinlock_t ctrl_lock;
+ spinlock_t flow_lock;
+ /* Termios values are protected by the termios rwsem */
+ struct ktermios termios, termios_locked;
+ struct termiox *termiox; /* May be NULL for unsupported */
+ char name[64];
+ struct pid *pgrp; /* Protected by ctrl lock */
+ struct pid *session;
+ unsigned long flags;
+ int count;
+ struct winsize winsize; /* winsize_mutex */
+ unsigned long stopped:1, /* flow_lock */
+ flow_stopped:1,
+ unused:BITS_PER_LONG - 2;
+ int hw_stopped;
+ unsigned long ctrl_status:8, /* ctrl_lock */
+ packet:1,
+ unused_ctrl:BITS_PER_LONG - 9;
+ unsigned int receive_room; /* Bytes free for queue */
+ int flow_change;
+
+ struct tty_struct *link;
+ struct fasync_struct *fasync;
+ int alt_speed; /* For magic substitution of 38400 bps */
+ wait_queue_head_t write_wait;
+ wait_queue_head_t read_wait;
+ struct work_struct hangup_work;
+ void *disc_data;
+ void *driver_data;
+ struct list_head tty_files;
+
+#define N_TTY_BUF_SIZE 4096
+
+ int closing;
+ unsigned char *write_buf;
+ int write_cnt;
+ /* If the tty has a pending do_SAK, queue it here - akpm */
+ struct work_struct SAK_work;
+ struct tty_port *port;
+};
+
+/* Each of a tty's open files has private_data pointing to tty_file_private */
+struct tty_file_private {
+ struct tty_struct *tty;
+ struct file *file;
+ struct list_head list;
+};
+
+/* tty magic number */
+#define TTY_MAGIC 0x5401
+
+/*
+ * These bits are used in the flags field of the tty structure.
+ *
+ * So that interrupts won't be able to mess up the queues,
+ * copy_to_cooked must be atomic with respect to itself, as must
+ * tty->write. Thus, you must use the inline functions set_bit() and
+ * clear_bit() to make things atomic.
+ */
+#define TTY_THROTTLED 0 /* Call unthrottle() at threshold min */
+#define TTY_IO_ERROR 1 /* Cause an I/O error (may be no ldisc too) */
+#define TTY_OTHER_CLOSED 2 /* Other side (if any) has closed */
+#define TTY_EXCLUSIVE 3 /* Exclusive open mode */
+#define TTY_DEBUG 4 /* Debugging */
+#define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
+#define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */
+#define TTY_LDISC_OPEN 11 /* Line discipline is open */
+#define TTY_PTY_LOCK 16 /* pty private */
+#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
+#define TTY_HUPPED 18 /* Post driver->hangup() */
+#define TTY_LDISC_HALTED 22 /* Line discipline is halted */
+
+#define TTY_WRITE_FLUSH(tty) tty_write_flush((tty))
+
+/* Values for tty->flow_change */
+#define TTY_THROTTLE_SAFE 1
+#define TTY_UNTHROTTLE_SAFE 2
+
+static inline void __tty_set_flow_change(struct tty_struct *tty, int val)
+{
+ tty->flow_change = val;
+}
+
+static inline void tty_set_flow_change(struct tty_struct *tty, int val)
+{
+ tty->flow_change = val;
+ smp_mb();
+}
+
+#ifdef CONFIG_TTY
+extern void console_init(void);
+extern void tty_kref_put(struct tty_struct *tty);
+extern struct pid *tty_get_pgrp(struct tty_struct *tty);
+extern void tty_vhangup_self(void);
+extern void disassociate_ctty(int priv);
+extern dev_t tty_devnum(struct tty_struct *tty);
+extern void proc_clear_tty(struct task_struct *p);
+extern struct tty_struct *get_current_tty(void);
+/* tty_io.c */
+extern int __init tty_init(void);
+#else
+static inline void console_init(void)
+{ }
+static inline void tty_kref_put(struct tty_struct *tty)
+{ }
+static inline struct pid *tty_get_pgrp(struct tty_struct *tty)
+{ return NULL; }
+static inline void tty_vhangup_self(void)
+{ }
+static inline void disassociate_ctty(int priv)
+{ }
+static inline dev_t tty_devnum(struct tty_struct *tty)
+{ return 0; }
+static inline void proc_clear_tty(struct task_struct *p)
+{ }
+static inline struct tty_struct *get_current_tty(void)
+{ return NULL; }
+/* tty_io.c */
+static inline int __init tty_init(void)
+{ return 0; }
+#endif
+
+extern void tty_write_flush(struct tty_struct *);
+
+extern struct ktermios tty_std_termios;
+
+extern int vcs_init(void);
+
+extern struct class *tty_class;
+
+/**
+ * tty_kref_get - get a tty reference
+ * @tty: tty device
+ *
+ * Return a new reference to a tty object. The caller must hold
+ * sufficient locks/counts to ensure that their existing reference cannot
+ * go away
+ */
+
+static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
+{
+ if (tty)
+ kref_get(&tty->kref);
+ return tty;
+}
+
+extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
+ const char *routine);
+extern char *tty_name(struct tty_struct *tty, char *buf);
+extern void tty_wait_until_sent(struct tty_struct *tty, long timeout);
+extern int tty_check_change(struct tty_struct *tty);
+extern void __stop_tty(struct tty_struct *tty);
+extern void stop_tty(struct tty_struct *tty);
+extern void __start_tty(struct tty_struct *tty);
+extern void start_tty(struct tty_struct *tty);
+extern int tty_register_driver(struct tty_driver *driver);
+extern int tty_unregister_driver(struct tty_driver *driver);
+extern struct device *tty_register_device(struct tty_driver *driver,
+ unsigned index, struct device *dev);
+extern struct device *tty_register_device_attr(struct tty_driver *driver,
+ unsigned index, struct device *device,
+ void *drvdata,
+ const struct attribute_group **attr_grp);
+extern void tty_unregister_device(struct tty_driver *driver, unsigned index);
+extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp,
+ int buflen);
+extern void tty_write_message(struct tty_struct *tty, char *msg);
+extern int tty_send_xchar(struct tty_struct *tty, char ch);
+extern int tty_put_char(struct tty_struct *tty, unsigned char c);
+extern int tty_chars_in_buffer(struct tty_struct *tty);
+extern int tty_write_room(struct tty_struct *tty);
+extern void tty_driver_flush_buffer(struct tty_struct *tty);
+extern void tty_throttle(struct tty_struct *tty);
+extern void tty_unthrottle(struct tty_struct *tty);
+extern int tty_throttle_safe(struct tty_struct *tty);
+extern int tty_unthrottle_safe(struct tty_struct *tty);
+extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
+extern void tty_driver_remove_tty(struct tty_driver *driver,
+ struct tty_struct *tty);
+extern void tty_free_termios(struct tty_struct *tty);
+extern int is_current_pgrp_orphaned(void);
+extern int is_ignored(int sig);
+extern int tty_signal(int sig, struct tty_struct *tty);
+extern void tty_hangup(struct tty_struct *tty);
+extern void tty_vhangup(struct tty_struct *tty);
+extern int tty_hung_up_p(struct file *filp);
+extern void do_SAK(struct tty_struct *tty);
+extern void __do_SAK(struct tty_struct *tty);
+extern void no_tty(void);
+extern void tty_buffer_free_all(struct tty_port *port);
+extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld);
+extern void tty_buffer_init(struct tty_port *port);
+extern void tty_buffer_set_lock_subclass(struct tty_port *port);
+extern speed_t tty_termios_baud_rate(struct ktermios *termios);
+extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
+extern void tty_termios_encode_baud_rate(struct ktermios *termios,
+ speed_t ibaud, speed_t obaud);
+extern void tty_encode_baud_rate(struct tty_struct *tty,
+ speed_t ibaud, speed_t obaud);
+
+/**
+ * tty_get_baud_rate - get tty bit rates
+ * @tty: tty to query
+ *
+ * Returns the baud rate as an integer for this terminal. The
+ * termios lock must be held by the caller and the terminal bit
+ * flags may be updated.
+ *
+ * Locking: none
+ */
+static inline speed_t tty_get_baud_rate(struct tty_struct *tty)
+{
+ return tty_termios_baud_rate(&tty->termios);
+}
+
+extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old);
+extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b);
+extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
+
+extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
+extern void tty_ldisc_deref(struct tty_ldisc *);
+extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *);
+extern void tty_ldisc_hangup(struct tty_struct *tty);
+extern const struct file_operations tty_ldiscs_proc_fops;
+
+extern void tty_wakeup(struct tty_struct *tty);
+extern void tty_ldisc_flush(struct tty_struct *tty);
+
+extern long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+extern int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg);
+extern int tty_perform_flush(struct tty_struct *tty, unsigned long arg);
+extern void tty_default_fops(struct file_operations *fops);
+extern struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx);
+extern int tty_alloc_file(struct file *file);
+extern void tty_add_file(struct tty_struct *tty, struct file *file);
+extern void tty_free_file(struct file *file);
+extern void free_tty_struct(struct tty_struct *tty);
+extern void deinitialize_tty_struct(struct tty_struct *tty);
+extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
+extern int tty_release(struct inode *inode, struct file *filp);
+extern int tty_init_termios(struct tty_struct *tty);
+extern int tty_standard_install(struct tty_driver *driver,
+ struct tty_struct *tty);
+
+extern struct mutex tty_mutex;
+extern spinlock_t tty_files_lock;
+
+#define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock))
+
+extern void tty_port_init(struct tty_port *port);
+extern void tty_port_link_device(struct tty_port *port,
+ struct tty_driver *driver, unsigned index);
+extern struct device *tty_port_register_device(struct tty_port *port,
+ struct tty_driver *driver, unsigned index,
+ struct device *device);
+extern struct device *tty_port_register_device_attr(struct tty_port *port,
+ struct tty_driver *driver, unsigned index,
+ struct device *device, void *drvdata,
+ const struct attribute_group **attr_grp);
+extern int tty_port_alloc_xmit_buf(struct tty_port *port);
+extern void tty_port_free_xmit_buf(struct tty_port *port);
+extern void tty_port_destroy(struct tty_port *port);
+extern void tty_port_put(struct tty_port *port);
+
+static inline struct tty_port *tty_port_get(struct tty_port *port)
+{
+ if (port && kref_get_unless_zero(&port->kref))
+ return port;
+ return NULL;
+}
+
+/* If the cts flow control is enabled, return true. */
+static inline bool tty_port_cts_enabled(struct tty_port *port)
+{
+ return port->flags & ASYNC_CTS_FLOW;
+}
+
+extern struct tty_struct *tty_port_tty_get(struct tty_port *port);
+extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty);
+extern int tty_port_carrier_raised(struct tty_port *port);
+extern void tty_port_raise_dtr_rts(struct tty_port *port);
+extern void tty_port_lower_dtr_rts(struct tty_port *port);
+extern void tty_port_hangup(struct tty_port *port);
+extern void tty_port_tty_hangup(struct tty_port *port, bool check_clocal);
+extern void tty_port_tty_wakeup(struct tty_port *port);
+extern int tty_port_block_til_ready(struct tty_port *port,
+ struct tty_struct *tty, struct file *filp);
+extern int tty_port_close_start(struct tty_port *port,
+ struct tty_struct *tty, struct file *filp);
+extern void tty_port_close_end(struct tty_port *port, struct tty_struct *tty);
+extern void tty_port_close(struct tty_port *port,
+ struct tty_struct *tty, struct file *filp);
+extern int tty_port_install(struct tty_port *port, struct tty_driver *driver,
+ struct tty_struct *tty);
+extern int tty_port_open(struct tty_port *port,
+ struct tty_struct *tty, struct file *filp);
+static inline int tty_port_users(struct tty_port *port)
+{
+ return port->count + port->blocked_open;
+}
+
+extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
+extern int tty_unregister_ldisc(int disc);
+extern int tty_set_ldisc(struct tty_struct *tty, int ldisc);
+extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
+extern void tty_ldisc_release(struct tty_struct *tty);
+extern void tty_ldisc_init(struct tty_struct *tty);
+extern void tty_ldisc_deinit(struct tty_struct *tty);
+extern void tty_ldisc_begin(void);
+
+static inline int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
+ char *f, int count)
+{
+ if (ld->ops->receive_buf2)
+ count = ld->ops->receive_buf2(ld->tty, p, f, count);
+ else {
+ count = min_t(int, count, ld->tty->receive_room);
+ if (count)
+ ld->ops->receive_buf(ld->tty, p, f, count);
+ }
+ return count;
+}
+
+
+/* n_tty.c */
+extern struct tty_ldisc_ops tty_ldisc_N_TTY;
+extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
+
+/* tty_audit.c */
+#ifdef CONFIG_AUDIT
+extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
+ size_t size, unsigned icanon);
+extern void tty_audit_exit(void);
+extern void tty_audit_fork(struct signal_struct *sig);
+extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
+extern void tty_audit_push(struct tty_struct *tty);
+extern int tty_audit_push_current(void);
+#else
+static inline void tty_audit_add_data(struct tty_struct *tty,
+ unsigned char *data, size_t size, unsigned icanon)
+{
+}
+static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
+{
+}
+static inline void tty_audit_exit(void)
+{
+}
+static inline void tty_audit_fork(struct signal_struct *sig)
+{
+}
+static inline void tty_audit_push(struct tty_struct *tty)
+{
+}
+static inline int tty_audit_push_current(void)
+{
+ return 0;
+}
+#endif
+
+/* tty_ioctl.c */
+extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg);
+extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg);
+
+/* vt.c */
+
+extern int vt_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg);
+
+extern long vt_compat_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg);
+
+/* tty_mutex.c */
+/* functions for preparation of BKL removal */
+extern void __lockfunc tty_lock(struct tty_struct *tty);
+extern void __lockfunc tty_unlock(struct tty_struct *tty);
+extern void __lockfunc tty_lock_slave(struct tty_struct *tty);
+extern void __lockfunc tty_unlock_slave(struct tty_struct *tty);
+extern void tty_set_lock_subclass(struct tty_struct *tty);
+/*
+ * this shall be called only from where BTM is held (like close)
+ *
+ * We need this to ensure nobody waits for us to finish while we are waiting.
+ * Without this we were encountering system stalls.
+ *
+ * This should be indeed removed with BTM removal later.
+ *
+ * Locking: BTM required. Nobody is allowed to hold port->mutex.
+ */
+static inline void tty_wait_until_sent_from_close(struct tty_struct *tty,
+ long timeout)
+{
+ tty_unlock(tty); /* tty->ops->close holds the BTM, drop it while waiting */
+ tty_wait_until_sent(tty, timeout);
+ tty_lock(tty);
+}
+
+/*
+ * wait_event_interruptible_tty -- wait for a condition with the tty lock held
+ *
+ * The condition we are waiting for might take a long time to
+ * become true, or might depend on another thread taking the
+ * BTM. In either case, we need to drop the BTM to guarantee
+ * forward progress. This is a leftover from the conversion
+ * from the BKL and should eventually get removed as the BTM
+ * falls out of use.
+ *
+ * Do not use in new code.
+ */
+#define wait_event_interruptible_tty(tty, wq, condition) \
+({ \
+ int __ret = 0; \
+ if (!(condition)) \
+ __ret = __wait_event_interruptible_tty(tty, wq, \
+ condition); \
+ __ret; \
+})
+
+#define __wait_event_interruptible_tty(tty, wq, condition) \
+ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
+ tty_unlock(tty); \
+ schedule(); \
+ tty_lock(tty))
+
+#ifdef CONFIG_PROC_FS
+extern void proc_tty_register_driver(struct tty_driver *);
+extern void proc_tty_unregister_driver(struct tty_driver *);
+#else
+static inline void proc_tty_register_driver(struct tty_driver *d) {}
+static inline void proc_tty_unregister_driver(struct tty_driver *d) {}
+#endif
+
+#endif
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
new file mode 100644
index 000000000..92e337c18
--- /dev/null
+++ b/include/linux/tty_driver.h
@@ -0,0 +1,440 @@
+#ifndef _LINUX_TTY_DRIVER_H
+#define _LINUX_TTY_DRIVER_H
+
+/*
+ * This structure defines the interface between the low-level tty
+ * driver and the tty routines. The following routines can be
+ * defined; unless noted otherwise, they are optional, and can be
+ * filled in with a null pointer.
+ *
+ * struct tty_struct * (*lookup)(struct tty_driver *self, int idx)
+ *
+ * Return the tty device corresponding to idx, NULL if there is not
+ * one currently in use and an ERR_PTR value on error. Called under
+ * tty_mutex (for now!)
+ *
+ * Optional method. Default behaviour is to use the ttys array
+ *
+ * int (*install)(struct tty_driver *self, struct tty_struct *tty)
+ *
+ * Install a new tty into the tty driver internal tables. Used in
+ * conjunction with lookup and remove methods.
+ *
+ * Optional method. Default behaviour is to use the ttys array
+ *
+ * void (*remove)(struct tty_driver *self, struct tty_struct *tty)
+ *
+ * Remove a closed tty from the tty driver internal tables. Used in
+ * conjunction with lookup and remove methods.
+ *
+ * Optional method. Default behaviour is to use the ttys array
+ *
+ * int (*open)(struct tty_struct * tty, struct file * filp);
+ *
+ * This routine is called when a particular tty device is opened.
+ * This routine is mandatory; if this routine is not filled in,
+ * the attempted open will fail with ENODEV.
+ *
+ * Required method. Called with tty lock held.
+ *
+ * void (*close)(struct tty_struct * tty, struct file * filp);
+ *
+ * This routine is called when a particular tty device is closed.
+ * Note: called even if the corresponding open() failed.
+ *
+ * Required method. Called with tty lock held.
+ *
+ * void (*shutdown)(struct tty_struct * tty);
+ *
+ * This routine is called under the tty lock when a particular tty device
+ * is closed for the last time. It executes before the tty resources
+ * are freed so may execute while another function holds a tty kref.
+ *
+ * void (*cleanup)(struct tty_struct * tty);
+ *
+ * This routine is called asynchronously when a particular tty device
+ * is closed for the last time freeing up the resources. This is
+ * actually the second part of shutdown for routines that might sleep.
+ *
+ *
+ * int (*write)(struct tty_struct * tty,
+ * const unsigned char *buf, int count);
+ *
+ * This routine is called by the kernel to write a series of
+ * characters to the tty device. The characters may come from
+ * user space or kernel space. This routine will return the
+ * number of characters actually accepted for writing.
+ *
+ * Optional: Required for writable devices.
+ *
+ * int (*put_char)(struct tty_struct *tty, unsigned char ch);
+ *
+ * This routine is called by the kernel to write a single
+ * character to the tty device. If the kernel uses this routine,
+ * it must call the flush_chars() routine (if defined) when it is
+ * done stuffing characters into the driver. If there is no room
+ * in the queue, the character is ignored.
+ *
+ * Optional: Kernel will use the write method if not provided.
+ *
+ * Note: Do not call this function directly, call tty_put_char
+ *
+ * void (*flush_chars)(struct tty_struct *tty);
+ *
+ * This routine is called by the kernel after it has written a
+ * series of characters to the tty device using put_char().
+ *
+ * Optional:
+ *
+ * Note: Do not call this function directly, call tty_driver_flush_chars
+ *
+ * int (*write_room)(struct tty_struct *tty);
+ *
+ * This routine returns the numbers of characters the tty driver
+ * will accept for queuing to be written. This number is subject
+ * to change as output buffers get emptied, or if the output flow
+ * control is acted.
+ *
+ * Required if write method is provided else not needed.
+ *
+ * Note: Do not call this function directly, call tty_write_room
+ *
+ * int (*ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
+ *
+ * This routine allows the tty driver to implement
+ * device-specific ioctls. If the ioctl number passed in cmd
+ * is not recognized by the driver, it should return ENOIOCTLCMD.
+ *
+ * Optional
+ *
+ * long (*compat_ioctl)(struct tty_struct *tty,,
+ * unsigned int cmd, unsigned long arg);
+ *
+ * implement ioctl processing for 32 bit process on 64 bit system
+ *
+ * Optional
+ *
+ * void (*set_termios)(struct tty_struct *tty, struct ktermios * old);
+ *
+ * This routine allows the tty driver to be notified when
+ * device's termios settings have changed.
+ *
+ * Optional: Called under the termios lock
+ *
+ *
+ * void (*set_ldisc)(struct tty_struct *tty);
+ *
+ * This routine allows the tty driver to be notified when the
+ * device's termios settings have changed.
+ *
+ * Optional: Called under BKL (currently)
+ *
+ * void (*throttle)(struct tty_struct * tty);
+ *
+ * This routine notifies the tty driver that input buffers for
+ * the line discipline are close to full, and it should somehow
+ * signal that no more characters should be sent to the tty.
+ *
+ * Optional: Always invoke via tty_throttle(), called under the
+ * termios lock.
+ *
+ * void (*unthrottle)(struct tty_struct * tty);
+ *
+ * This routine notifies the tty drivers that it should signals
+ * that characters can now be sent to the tty without fear of
+ * overrunning the input buffers of the line disciplines.
+ *
+ * Optional: Always invoke via tty_unthrottle(), called under the
+ * termios lock.
+ *
+ * void (*stop)(struct tty_struct *tty);
+ *
+ * This routine notifies the tty driver that it should stop
+ * outputting characters to the tty device.
+ *
+ * Called with ->flow_lock held. Serialized with start() method.
+ *
+ * Optional:
+ *
+ * Note: Call stop_tty not this method.
+ *
+ * void (*start)(struct tty_struct *tty);
+ *
+ * This routine notifies the tty driver that it resume sending
+ * characters to the tty device.
+ *
+ * Called with ->flow_lock held. Serialized with stop() method.
+ *
+ * Optional:
+ *
+ * Note: Call start_tty not this method.
+ *
+ * void (*hangup)(struct tty_struct *tty);
+ *
+ * This routine notifies the tty driver that it should hang up the
+ * tty device.
+ *
+ * Optional:
+ *
+ * Called with tty lock held.
+ *
+ * int (*break_ctl)(struct tty_struct *tty, int state);
+ *
+ * This optional routine requests the tty driver to turn on or
+ * off BREAK status on the RS-232 port. If state is -1,
+ * then the BREAK status should be turned on; if state is 0, then
+ * BREAK should be turned off.
+ *
+ * If this routine is implemented, the high-level tty driver will
+ * handle the following ioctls: TCSBRK, TCSBRKP, TIOCSBRK,
+ * TIOCCBRK.
+ *
+ * If the driver sets TTY_DRIVER_HARDWARE_BREAK then the interface
+ * will also be called with actual times and the hardware is expected
+ * to do the delay work itself. 0 and -1 are still used for on/off.
+ *
+ * Optional: Required for TCSBRK/BRKP/etc handling.
+ *
+ * void (*wait_until_sent)(struct tty_struct *tty, int timeout);
+ *
+ * This routine waits until the device has written out all of the
+ * characters in its transmitter FIFO.
+ *
+ * Optional: If not provided the device is assumed to have no FIFO
+ *
+ * Note: Usually correct to call tty_wait_until_sent
+ *
+ * void (*send_xchar)(struct tty_struct *tty, char ch);
+ *
+ * This routine is used to send a high-priority XON/XOFF
+ * character to the device.
+ *
+ * Optional: If not provided then the write method is called under
+ * the atomic write lock to keep it serialized with the ldisc.
+ *
+ * int (*resize)(struct tty_struct *tty, struct winsize *ws)
+ *
+ * Called when a termios request is issued which changes the
+ * requested terminal geometry.
+ *
+ * Optional: the default action is to update the termios structure
+ * without error. This is usually the correct behaviour. Drivers should
+ * not force errors here if they are not resizable objects (eg a serial
+ * line). See tty_do_resize() if you need to wrap the standard method
+ * in your own logic - the usual case.
+ *
+ * void (*set_termiox)(struct tty_struct *tty, struct termiox *new);
+ *
+ * Called when the device receives a termiox based ioctl. Passes down
+ * the requested data from user space. This method will not be invoked
+ * unless the tty also has a valid tty->termiox pointer.
+ *
+ * Optional: Called under the termios lock
+ *
+ * int (*get_icount)(struct tty_struct *tty, struct serial_icounter *icount);
+ *
+ * Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel
+ * structure to complete. This method is optional and will only be called
+ * if provided (otherwise EINVAL will be returned).
+ */
+
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/cdev.h>
+#include <linux/termios.h>
+
+struct tty_struct;
+struct tty_driver;
+struct serial_icounter_struct;
+
+struct tty_operations {
+ struct tty_struct * (*lookup)(struct tty_driver *driver,
+ struct inode *inode, int idx);
+ int (*install)(struct tty_driver *driver, struct tty_struct *tty);
+ void (*remove)(struct tty_driver *driver, struct tty_struct *tty);
+ int (*open)(struct tty_struct * tty, struct file * filp);
+ void (*close)(struct tty_struct * tty, struct file * filp);
+ void (*shutdown)(struct tty_struct *tty);
+ void (*cleanup)(struct tty_struct *tty);
+ int (*write)(struct tty_struct * tty,
+ const unsigned char *buf, int count);
+ int (*put_char)(struct tty_struct *tty, unsigned char ch);
+ void (*flush_chars)(struct tty_struct *tty);
+ int (*write_room)(struct tty_struct *tty);
+ int (*chars_in_buffer)(struct tty_struct *tty);
+ int (*ioctl)(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg);
+ long (*compat_ioctl)(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg);
+ void (*set_termios)(struct tty_struct *tty, struct ktermios * old);
+ void (*throttle)(struct tty_struct * tty);
+ void (*unthrottle)(struct tty_struct * tty);
+ void (*stop)(struct tty_struct *tty);
+ void (*start)(struct tty_struct *tty);
+ void (*hangup)(struct tty_struct *tty);
+ int (*break_ctl)(struct tty_struct *tty, int state);
+ void (*flush_buffer)(struct tty_struct *tty);
+ void (*set_ldisc)(struct tty_struct *tty);
+ void (*wait_until_sent)(struct tty_struct *tty, int timeout);
+ void (*send_xchar)(struct tty_struct *tty, char ch);
+ int (*tiocmget)(struct tty_struct *tty);
+ int (*tiocmset)(struct tty_struct *tty,
+ unsigned int set, unsigned int clear);
+ int (*resize)(struct tty_struct *tty, struct winsize *ws);
+ int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew);
+ int (*get_icount)(struct tty_struct *tty,
+ struct serial_icounter_struct *icount);
+#ifdef CONFIG_CONSOLE_POLL
+ int (*poll_init)(struct tty_driver *driver, int line, char *options);
+ int (*poll_get_char)(struct tty_driver *driver, int line);
+ void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
+#endif
+ const struct file_operations *proc_fops;
+};
+
+struct tty_driver {
+ int magic; /* magic number for this structure */
+ struct kref kref; /* Reference management */
+ struct cdev *cdevs;
+ struct module *owner;
+ const char *driver_name;
+ const char *name;
+ int name_base; /* offset of printed name */
+ int major; /* major device number */
+ int minor_start; /* start of minor device number */
+ unsigned int num; /* number of devices allocated */
+ short type; /* type of tty driver */
+ short subtype; /* subtype of tty driver */
+ struct ktermios init_termios; /* Initial termios */
+ unsigned long flags; /* tty driver flags */
+ struct proc_dir_entry *proc_entry; /* /proc fs entry */
+ struct tty_driver *other; /* only used for the PTY driver */
+
+ /*
+ * Pointer to the tty data structures
+ */
+ struct tty_struct **ttys;
+ struct tty_port **ports;
+ struct ktermios **termios;
+ void *driver_state;
+
+ /*
+ * Driver methods
+ */
+
+ const struct tty_operations *ops;
+ struct list_head tty_drivers;
+};
+
+extern struct list_head tty_drivers;
+
+extern struct tty_driver *__tty_alloc_driver(unsigned int lines,
+ struct module *owner, unsigned long flags);
+extern void put_tty_driver(struct tty_driver *driver);
+extern void tty_set_operations(struct tty_driver *driver,
+ const struct tty_operations *op);
+extern struct tty_driver *tty_find_polling_driver(char *name, int *line);
+
+extern void tty_driver_kref_put(struct tty_driver *driver);
+
+/* Use TTY_DRIVER_* flags below */
+#define tty_alloc_driver(lines, flags) \
+ __tty_alloc_driver(lines, THIS_MODULE, flags)
+
+/*
+ * DEPRECATED Do not use this in new code, use tty_alloc_driver instead.
+ * (And change the return value checks.)
+ */
+static inline struct tty_driver *alloc_tty_driver(unsigned int lines)
+{
+ struct tty_driver *ret = tty_alloc_driver(lines, 0);
+ if (IS_ERR(ret))
+ return NULL;
+ return ret;
+}
+
+static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d)
+{
+ kref_get(&d->kref);
+ return d;
+}
+
+/* tty driver magic number */
+#define TTY_DRIVER_MAGIC 0x5402
+
+/*
+ * tty driver flags
+ *
+ * TTY_DRIVER_RESET_TERMIOS --- requests the tty layer to reset the
+ * termios setting when the last process has closed the device.
+ * Used for PTY's, in particular.
+ *
+ * TTY_DRIVER_REAL_RAW --- if set, indicates that the driver will
+ * guarantee never not to set any special character handling
+ * flags if ((IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR ||
+ * !INPCK)). That is, if there is no reason for the driver to
+ * send notifications of parity and break characters up to the
+ * line driver, it won't do so. This allows the line driver to
+ * optimize for this case if this flag is set. (Note that there
+ * is also a promise, if the above case is true, not to signal
+ * overruns, either.)
+ *
+ * TTY_DRIVER_DYNAMIC_DEV --- if set, the individual tty devices need
+ * to be registered with a call to tty_register_device() when the
+ * device is found in the system and unregistered with a call to
+ * tty_unregister_device() so the devices will be show up
+ * properly in sysfs. If not set, driver->num entries will be
+ * created by the tty core in sysfs when tty_register_driver() is
+ * called. This is to be used by drivers that have tty devices
+ * that can appear and disappear while the main tty driver is
+ * registered with the tty core.
+ *
+ * TTY_DRIVER_DEVPTS_MEM -- don't use the standard arrays, instead
+ * use dynamic memory keyed through the devpts filesystem. This
+ * is only applicable to the pty driver.
+ *
+ * TTY_DRIVER_HARDWARE_BREAK -- hardware handles break signals. Pass
+ * the requested timeout to the caller instead of using a simple
+ * on/off interface.
+ *
+ * TTY_DRIVER_DYNAMIC_ALLOC -- do not allocate structures which are
+ * needed per line for this driver as it would waste memory.
+ * The driver will take care.
+ *
+ * TTY_DRIVER_UNNUMBERED_NODE -- do not create numbered /dev nodes. In
+ * other words create /dev/ttyprintk and not /dev/ttyprintk0.
+ * Applicable only when a driver for a single tty device is
+ * being allocated.
+ */
+#define TTY_DRIVER_INSTALLED 0x0001
+#define TTY_DRIVER_RESET_TERMIOS 0x0002
+#define TTY_DRIVER_REAL_RAW 0x0004
+#define TTY_DRIVER_DYNAMIC_DEV 0x0008
+#define TTY_DRIVER_DEVPTS_MEM 0x0010
+#define TTY_DRIVER_HARDWARE_BREAK 0x0020
+#define TTY_DRIVER_DYNAMIC_ALLOC 0x0040
+#define TTY_DRIVER_UNNUMBERED_NODE 0x0080
+
+/* tty driver types */
+#define TTY_DRIVER_TYPE_SYSTEM 0x0001
+#define TTY_DRIVER_TYPE_CONSOLE 0x0002
+#define TTY_DRIVER_TYPE_SERIAL 0x0003
+#define TTY_DRIVER_TYPE_PTY 0x0004
+#define TTY_DRIVER_TYPE_SCC 0x0005 /* scc driver */
+#define TTY_DRIVER_TYPE_SYSCONS 0x0006
+
+/* system subtypes (magic, used by tty_io.c) */
+#define SYSTEM_TYPE_TTY 0x0001
+#define SYSTEM_TYPE_CONSOLE 0x0002
+#define SYSTEM_TYPE_SYSCONS 0x0003
+#define SYSTEM_TYPE_SYSPTMX 0x0004
+
+/* pty subtypes (magic, used by tty_io.c) */
+#define PTY_TYPE_MASTER 0x0001
+#define PTY_TYPE_SLAVE 0x0002
+
+/* serial subtype definitions */
+#define SERIAL_TYPE_NORMAL 1
+
+#endif /* #ifdef _LINUX_TTY_DRIVER_H */
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
new file mode 100644
index 000000000..c28dd523f
--- /dev/null
+++ b/include/linux/tty_flip.h
@@ -0,0 +1,41 @@
+#ifndef _LINUX_TTY_FLIP_H
+#define _LINUX_TTY_FLIP_H
+
+extern int tty_buffer_set_limit(struct tty_port *port, int limit);
+extern int tty_buffer_space_avail(struct tty_port *port);
+extern int tty_buffer_request_room(struct tty_port *port, size_t size);
+extern int tty_insert_flip_string_flags(struct tty_port *port,
+ const unsigned char *chars, const char *flags, size_t size);
+extern int tty_insert_flip_string_fixed_flag(struct tty_port *port,
+ const unsigned char *chars, char flag, size_t size);
+extern int tty_prepare_flip_string(struct tty_port *port,
+ unsigned char **chars, size_t size);
+extern void tty_flip_buffer_push(struct tty_port *port);
+void tty_schedule_flip(struct tty_port *port);
+
+static inline int tty_insert_flip_char(struct tty_port *port,
+ unsigned char ch, char flag)
+{
+ struct tty_buffer *tb = port->buf.tail;
+ int change;
+
+ change = (tb->flags & TTYB_NORMAL) && (flag != TTY_NORMAL);
+ if (!change && tb->used < tb->size) {
+ if (~tb->flags & TTYB_NORMAL)
+ *flag_buf_ptr(tb, tb->used) = flag;
+ *char_buf_ptr(tb, tb->used++) = ch;
+ return 1;
+ }
+ return tty_insert_flip_string_flags(port, &ch, &flag, 1);
+}
+
+static inline int tty_insert_flip_string(struct tty_port *port,
+ const unsigned char *chars, size_t size)
+{
+ return tty_insert_flip_string_fixed_flag(port, chars, TTY_NORMAL, size);
+}
+
+extern void tty_buffer_lock_exclusive(struct tty_port *port);
+extern void tty_buffer_unlock_exclusive(struct tty_port *port);
+
+#endif /* _LINUX_TTY_FLIP_H */
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
new file mode 100644
index 000000000..00c9d688d
--- /dev/null
+++ b/include/linux/tty_ldisc.h
@@ -0,0 +1,233 @@
+#ifndef _LINUX_TTY_LDISC_H
+#define _LINUX_TTY_LDISC_H
+
+/*
+ * This structure defines the interface between the tty line discipline
+ * implementation and the tty routines. The following routines can be
+ * defined; unless noted otherwise, they are optional, and can be
+ * filled in with a null pointer.
+ *
+ * int (*open)(struct tty_struct *);
+ *
+ * This function is called when the line discipline is associated
+ * with the tty. The line discipline can use this as an
+ * opportunity to initialize any state needed by the ldisc routines.
+ *
+ * void (*close)(struct tty_struct *);
+ *
+ * This function is called when the line discipline is being
+ * shutdown, either because the tty is being closed or because
+ * the tty is being changed to use a new line discipline
+ *
+ * void (*flush_buffer)(struct tty_struct *tty);
+ *
+ * This function instructs the line discipline to clear its
+ * buffers of any input characters it may have queued to be
+ * delivered to the user mode process.
+ *
+ * ssize_t (*chars_in_buffer)(struct tty_struct *tty);
+ *
+ * This function returns the number of input characters the line
+ * discipline may have queued up to be delivered to the user mode
+ * process.
+ *
+ * ssize_t (*read)(struct tty_struct * tty, struct file * file,
+ * unsigned char * buf, size_t nr);
+ *
+ * This function is called when the user requests to read from
+ * the tty. The line discipline will return whatever characters
+ * it has buffered up for the user. If this function is not
+ * defined, the user will receive an EIO error.
+ *
+ * ssize_t (*write)(struct tty_struct * tty, struct file * file,
+ * const unsigned char * buf, size_t nr);
+ *
+ * This function is called when the user requests to write to the
+ * tty. The line discipline will deliver the characters to the
+ * low-level tty device for transmission, optionally performing
+ * some processing on the characters first. If this function is
+ * not defined, the user will receive an EIO error.
+ *
+ * int (*ioctl)(struct tty_struct * tty, struct file * file,
+ * unsigned int cmd, unsigned long arg);
+ *
+ * This function is called when the user requests an ioctl which
+ * is not handled by the tty layer or the low-level tty driver.
+ * It is intended for ioctls which affect line discpline
+ * operation. Note that the search order for ioctls is (1) tty
+ * layer, (2) tty low-level driver, (3) line discpline. So a
+ * low-level driver can "grab" an ioctl request before the line
+ * discpline has a chance to see it.
+ *
+ * long (*compat_ioctl)(struct tty_struct * tty, struct file * file,
+ * unsigned int cmd, unsigned long arg);
+ *
+ * Process ioctl calls from 32-bit process on 64-bit system
+ *
+ * void (*set_termios)(struct tty_struct *tty, struct ktermios * old);
+ *
+ * This function notifies the line discpline that a change has
+ * been made to the termios structure.
+ *
+ * int (*poll)(struct tty_struct * tty, struct file * file,
+ * poll_table *wait);
+ *
+ * This function is called when a user attempts to select/poll on a
+ * tty device. It is solely the responsibility of the line
+ * discipline to handle poll requests.
+ *
+ * void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
+ * char *fp, int count);
+ *
+ * This function is called by the low-level tty driver to send
+ * characters received by the hardware to the line discpline for
+ * processing. <cp> is a pointer to the buffer of input
+ * character received by the device. <fp> is a pointer to a
+ * pointer of flag bytes which indicate whether a character was
+ * received with a parity error, etc. <fp> may be NULL to indicate
+ * all data received is TTY_NORMAL.
+ *
+ * void (*write_wakeup)(struct tty_struct *);
+ *
+ * This function is called by the low-level tty driver to signal
+ * that line discpline should try to send more characters to the
+ * low-level driver for transmission. If the line discpline does
+ * not have any more data to send, it can just return. If the line
+ * discipline does have some data to send, please arise a tasklet
+ * or workqueue to do the real data transfer. Do not send data in
+ * this hook, it may leads to a deadlock.
+ *
+ * int (*hangup)(struct tty_struct *)
+ *
+ * Called on a hangup. Tells the discipline that it should
+ * cease I/O to the tty driver. Can sleep. The driver should
+ * seek to perform this action quickly but should wait until
+ * any pending driver I/O is completed.
+ *
+ * void (*fasync)(struct tty_struct *, int on)
+ *
+ * Notify line discipline when signal-driven I/O is enabled or
+ * disabled.
+ *
+ * void (*dcd_change)(struct tty_struct *tty, unsigned int status)
+ *
+ * Tells the discipline that the DCD pin has changed its status.
+ * Used exclusively by the N_PPS (Pulse-Per-Second) line discipline.
+ *
+ * int (*receive_buf2)(struct tty_struct *, const unsigned char *cp,
+ * char *fp, int count);
+ *
+ * This function is called by the low-level tty driver to send
+ * characters received by the hardware to the line discpline for
+ * processing. <cp> is a pointer to the buffer of input
+ * character received by the device. <fp> is a pointer to a
+ * pointer of flag bytes which indicate whether a character was
+ * received with a parity error, etc. <fp> may be NULL to indicate
+ * all data received is TTY_NORMAL.
+ * If assigned, prefer this function for automatic flow control.
+ */
+
+#include <linux/fs.h>
+#include <linux/wait.h>
+
+
+/*
+ * the semaphore definition
+ */
+struct ld_semaphore {
+ long count;
+ raw_spinlock_t wait_lock;
+ unsigned int wait_readers;
+ struct list_head read_wait;
+ struct list_head write_wait;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+};
+
+extern void __init_ldsem(struct ld_semaphore *sem, const char *name,
+ struct lock_class_key *key);
+
+#define init_ldsem(sem) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __init_ldsem((sem), #sem, &__key); \
+} while (0)
+
+
+extern int ldsem_down_read(struct ld_semaphore *sem, long timeout);
+extern int ldsem_down_read_trylock(struct ld_semaphore *sem);
+extern int ldsem_down_write(struct ld_semaphore *sem, long timeout);
+extern int ldsem_down_write_trylock(struct ld_semaphore *sem);
+extern void ldsem_up_read(struct ld_semaphore *sem);
+extern void ldsem_up_write(struct ld_semaphore *sem);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass,
+ long timeout);
+extern int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
+ long timeout);
+#else
+# define ldsem_down_read_nested(sem, subclass, timeout) \
+ ldsem_down_read(sem, timeout)
+# define ldsem_down_write_nested(sem, subclass, timeout) \
+ ldsem_down_write(sem, timeout)
+#endif
+
+
+struct tty_ldisc_ops {
+ int magic;
+ char *name;
+ int num;
+ int flags;
+
+ /*
+ * The following routines are called from above.
+ */
+ int (*open)(struct tty_struct *);
+ void (*close)(struct tty_struct *);
+ void (*flush_buffer)(struct tty_struct *tty);
+ ssize_t (*chars_in_buffer)(struct tty_struct *tty);
+ ssize_t (*read)(struct tty_struct *tty, struct file *file,
+ unsigned char __user *buf, size_t nr);
+ ssize_t (*write)(struct tty_struct *tty, struct file *file,
+ const unsigned char *buf, size_t nr);
+ int (*ioctl)(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg);
+ long (*compat_ioctl)(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg);
+ void (*set_termios)(struct tty_struct *tty, struct ktermios *old);
+ unsigned int (*poll)(struct tty_struct *, struct file *,
+ struct poll_table_struct *);
+ int (*hangup)(struct tty_struct *tty);
+
+ /*
+ * The following routines are called from below.
+ */
+ void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
+ char *fp, int count);
+ void (*write_wakeup)(struct tty_struct *);
+ void (*dcd_change)(struct tty_struct *, unsigned int);
+ void (*fasync)(struct tty_struct *tty, int on);
+ int (*receive_buf2)(struct tty_struct *, const unsigned char *cp,
+ char *fp, int count);
+
+ struct module *owner;
+
+ int refcount;
+};
+
+struct tty_ldisc {
+ struct tty_ldisc_ops *ops;
+ struct tty_struct *tty;
+};
+
+#define TTY_LDISC_MAGIC 0x5403
+
+#define LDISC_FLAG_DEFINED 0x00000001
+
+#define MODULE_ALIAS_LDISC(ldisc) \
+ MODULE_ALIAS("tty-ldisc-" __stringify(ldisc))
+
+#endif /* _LINUX_TTY_LDISC_H */
diff --git a/include/linux/tuxonice.h b/include/linux/tuxonice.h
new file mode 100644
index 000000000..67b05a750
--- /dev/null
+++ b/include/linux/tuxonice.h
@@ -0,0 +1,48 @@
+/*
+ * include/linux/tuxonice.h
+ *
+ * Copyright (C) 2015 Nigel Cunningham (nigel at tuxonice net)
+ *
+ * This file is released under the GPLv2.
+ */
+
+#ifndef INCLUDE_LINUX_TUXONICE_H
+#define INCLUDE_LINUX_TUXONICE_H
+#ifdef CONFIG_TOI_INCREMENTAL
+extern void toi_set_logbuf_untracked(void);
+extern int toi_make_writable(pgd_t *pgd, unsigned long address);
+
+static inline int toi_incremental_support(void)
+{
+ return 1;
+}
+
+/* Copy Before Write */
+struct toi_cbw {
+ unsigned long pfn;
+ void *virt;
+ struct toi_cbw *next;
+};
+
+struct toi_cbw_state {
+ bool active; /* Is a fault handler running? */
+ bool enabled; /* Are we doing copy before write? */
+ int size; /* The number of pages allocated */
+ struct toi_cbw *first, *next, *last; /* Pointers to the data structure */
+};
+
+#define CBWS_PER_PAGE (PAGE_SIZE / sizeof(struct toi_cbw))
+DECLARE_PER_CPU(struct toi_cbw_state, toi_cbw_states);
+#else
+#define toi_set_logbuf_untracked() do { } while(0)
+static inline int toi_make_writable(pgd_t *pgd, unsigned long addr)
+{
+ return 0;
+}
+
+static inline int toi_incremental_support(void)
+{
+ return 0;
+}
+#endif
+#endif
diff --git a/include/linux/typecheck.h b/include/linux/typecheck.h
new file mode 100644
index 000000000..eb5b74a57
--- /dev/null
+++ b/include/linux/typecheck.h
@@ -0,0 +1,24 @@
+#ifndef TYPECHECK_H_INCLUDED
+#define TYPECHECK_H_INCLUDED
+
+/*
+ * Check at compile time that something is of a particular type.
+ * Always evaluates to 1 so you may use it easily in comparisons.
+ */
+#define typecheck(type,x) \
+({ type __dummy; \
+ typeof(x) __dummy2; \
+ (void)(&__dummy == &__dummy2); \
+ 1; \
+})
+
+/*
+ * Check at compile time that 'function' is a certain type, or is a pointer
+ * to that type (needs to use typedef for the function type.)
+ */
+#define typecheck_fn(type,function) \
+({ typeof(type) __tmp = function; \
+ (void)__tmp; \
+})
+
+#endif /* TYPECHECK_H_INCLUDED */
diff --git a/include/linux/types.h b/include/linux/types.h
new file mode 100644
index 000000000..8715287c3
--- /dev/null
+++ b/include/linux/types.h
@@ -0,0 +1,219 @@
+#ifndef _LINUX_TYPES_H
+#define _LINUX_TYPES_H
+
+#define __EXPORTED_HEADERS__
+#include <uapi/linux/types.h>
+
+#ifndef __ASSEMBLY__
+
+#define DECLARE_BITMAP(name,bits) \
+ unsigned long name[BITS_TO_LONGS(bits)]
+
+typedef __u32 __kernel_dev_t;
+
+typedef __kernel_fd_set fd_set;
+typedef __kernel_dev_t dev_t;
+typedef __kernel_ino_t ino_t;
+typedef __kernel_mode_t mode_t;
+typedef unsigned short umode_t;
+typedef __u32 nlink_t;
+typedef __kernel_off_t off_t;
+typedef __kernel_pid_t pid_t;
+typedef __kernel_daddr_t daddr_t;
+typedef __kernel_key_t key_t;
+typedef __kernel_suseconds_t suseconds_t;
+typedef __kernel_timer_t timer_t;
+typedef __kernel_clockid_t clockid_t;
+typedef __kernel_mqd_t mqd_t;
+
+typedef _Bool bool;
+
+typedef __kernel_uid32_t uid_t;
+typedef __kernel_gid32_t gid_t;
+typedef __kernel_uid16_t uid16_t;
+typedef __kernel_gid16_t gid16_t;
+
+typedef unsigned long uintptr_t;
+
+#ifdef CONFIG_UID16
+/* This is defined by include/asm-{arch}/posix_types.h */
+typedef __kernel_old_uid_t old_uid_t;
+typedef __kernel_old_gid_t old_gid_t;
+#endif /* CONFIG_UID16 */
+
+#if defined(__GNUC__)
+typedef __kernel_loff_t loff_t;
+#endif
+
+/*
+ * The following typedefs are also protected by individual ifdefs for
+ * historical reasons:
+ */
+#ifndef _SIZE_T
+#define _SIZE_T
+typedef __kernel_size_t size_t;
+#endif
+
+#ifndef _SSIZE_T
+#define _SSIZE_T
+typedef __kernel_ssize_t ssize_t;
+#endif
+
+#ifndef _PTRDIFF_T
+#define _PTRDIFF_T
+typedef __kernel_ptrdiff_t ptrdiff_t;
+#endif
+
+#ifndef _TIME_T
+#define _TIME_T
+typedef __kernel_time_t time_t;
+#endif
+
+#ifndef _CLOCK_T
+#define _CLOCK_T
+typedef __kernel_clock_t clock_t;
+#endif
+
+#ifndef _CADDR_T
+#define _CADDR_T
+typedef __kernel_caddr_t caddr_t;
+#endif
+
+/* bsd */
+typedef unsigned char u_char;
+typedef unsigned short u_short;
+typedef unsigned int u_int;
+typedef unsigned long u_long;
+
+/* sysv */
+typedef unsigned char unchar;
+typedef unsigned short ushort;
+typedef unsigned int uint;
+typedef unsigned long ulong;
+
+#ifndef __BIT_TYPES_DEFINED__
+#define __BIT_TYPES_DEFINED__
+
+typedef __u8 u_int8_t;
+typedef __s8 int8_t;
+typedef __u16 u_int16_t;
+typedef __s16 int16_t;
+typedef __u32 u_int32_t;
+typedef __s32 int32_t;
+
+#endif /* !(__BIT_TYPES_DEFINED__) */
+
+typedef __u8 uint8_t;
+typedef __u16 uint16_t;
+typedef __u32 uint32_t;
+
+#if defined(__GNUC__)
+typedef __u64 uint64_t;
+typedef __u64 u_int64_t;
+typedef __s64 int64_t;
+#endif
+
+/* this is a special 64bit data type that is 8-byte aligned */
+#define aligned_u64 __u64 __attribute__((aligned(8)))
+#define aligned_be64 __be64 __attribute__((aligned(8)))
+#define aligned_le64 __le64 __attribute__((aligned(8)))
+
+/**
+ * The type used for indexing onto a disc or disc partition.
+ *
+ * Linux always considers sectors to be 512 bytes long independently
+ * of the devices real block size.
+ *
+ * blkcnt_t is the type of the inode's block count.
+ */
+#ifdef CONFIG_LBDAF
+typedef u64 sector_t;
+typedef u64 blkcnt_t;
+#else
+typedef unsigned long sector_t;
+typedef unsigned long blkcnt_t;
+#endif
+
+/*
+ * The type of an index into the pagecache.
+ */
+#define pgoff_t unsigned long
+
+/*
+ * A dma_addr_t can hold any valid DMA address, i.e., any address returned
+ * by the DMA API.
+ *
+ * If the DMA API only uses 32-bit addresses, dma_addr_t need only be 32
+ * bits wide. Bus addresses, e.g., PCI BARs, may be wider than 32 bits,
+ * but drivers do memory-mapped I/O to ioremapped kernel virtual addresses,
+ * so they don't care about the size of the actual bus addresses.
+ */
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+typedef u64 dma_addr_t;
+#else
+typedef u32 dma_addr_t;
+#endif
+
+typedef unsigned __bitwise__ gfp_t;
+typedef unsigned __bitwise__ fmode_t;
+typedef unsigned __bitwise__ oom_flags_t;
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+typedef u64 phys_addr_t;
+#else
+typedef u32 phys_addr_t;
+#endif
+
+typedef phys_addr_t resource_size_t;
+
+/*
+ * This type is the placeholder for a hardware interrupt number. It has to be
+ * big enough to enclose whatever representation is used by a given platform.
+ */
+typedef unsigned long irq_hw_number_t;
+
+typedef struct {
+ int counter;
+} atomic_t;
+
+#ifdef CONFIG_64BIT
+typedef struct {
+ long counter;
+} atomic64_t;
+#endif
+
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+struct hlist_head {
+ struct hlist_node *first;
+};
+
+struct hlist_node {
+ struct hlist_node *next, **pprev;
+};
+
+struct ustat {
+ __kernel_daddr_t f_tfree;
+ __kernel_ino_t f_tinode;
+ char f_fname[6];
+ char f_fpack[6];
+};
+
+/**
+ * struct callback_head - callback structure for use with RCU and task_work
+ * @next: next update requests in a list
+ * @func: actual update function to call after the grace period.
+ */
+struct callback_head {
+ struct callback_head *next;
+ void (*func)(struct callback_head *head);
+};
+#define rcu_head callback_head
+
+/* clocksource cycle base type */
+typedef u64 cycle_t;
+
+#endif /* __ASSEMBLY__ */
+#endif /* _LINUX_TYPES_H */
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
new file mode 100644
index 000000000..4b4439e75
--- /dev/null
+++ b/include/linux/u64_stats_sync.h
@@ -0,0 +1,147 @@
+#ifndef _LINUX_U64_STATS_SYNC_H
+#define _LINUX_U64_STATS_SYNC_H
+
+/*
+ * To properly implement 64bits network statistics on 32bit and 64bit hosts,
+ * we provide a synchronization point, that is a noop on 64bit or UP kernels.
+ *
+ * Key points :
+ * 1) Use a seqcount on SMP 32bits, with low overhead.
+ * 2) Whole thing is a noop on 64bit arches or UP kernels.
+ * 3) Write side must ensure mutual exclusion or one seqcount update could
+ * be lost, thus blocking readers forever.
+ * If this synchronization point is not a mutex, but a spinlock or
+ * spinlock_bh() or disable_bh() :
+ * 3.1) Write side should not sleep.
+ * 3.2) Write side should not allow preemption.
+ * 3.3) If applicable, interrupts should be disabled.
+ *
+ * 4) If reader fetches several counters, there is no guarantee the whole values
+ * are consistent (remember point 1) : this is a noop on 64bit arches anyway)
+ *
+ * 5) readers are allowed to sleep or be preempted/interrupted : They perform
+ * pure reads. But if they have to fetch many values, it's better to not allow
+ * preemptions/interruptions to avoid many retries.
+ *
+ * 6) If counter might be written by an interrupt, readers should block interrupts.
+ * (On UP, there is no seqcount_t protection, a reader allowing interrupts could
+ * read partial values)
+ *
+ * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and
+ * u64_stats_fetch_retry_irq() helpers
+ *
+ * Usage :
+ *
+ * Stats producer (writer) should use following template granted it already got
+ * an exclusive access to counters (a lock is already taken, or per cpu
+ * data is used [in a non preemptable context])
+ *
+ * spin_lock_bh(...) or other synchronization to get exclusive access
+ * ...
+ * u64_stats_update_begin(&stats->syncp);
+ * stats->bytes64 += len; // non atomic operation
+ * stats->packets64++; // non atomic operation
+ * u64_stats_update_end(&stats->syncp);
+ *
+ * While a consumer (reader) should use following template to get consistent
+ * snapshot for each variable (but no guarantee on several ones)
+ *
+ * u64 tbytes, tpackets;
+ * unsigned int start;
+ *
+ * do {
+ * start = u64_stats_fetch_begin(&stats->syncp);
+ * tbytes = stats->bytes64; // non atomic operation
+ * tpackets = stats->packets64; // non atomic operation
+ * } while (u64_stats_fetch_retry(&stats->syncp, start));
+ *
+ *
+ * Example of use in drivers/net/loopback.c, using per_cpu containers,
+ * in BH disabled context.
+ */
+#include <linux/seqlock.h>
+
+struct u64_stats_sync {
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ seqcount_t seq;
+#endif
+};
+
+
+#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
+# define u64_stats_init(syncp) seqcount_init(syncp.seq)
+#else
+# define u64_stats_init(syncp) do { } while (0)
+#endif
+
+static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ write_seqcount_begin(&syncp->seq);
+#endif
+}
+
+static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ write_seqcount_end(&syncp->seq);
+#endif
+}
+
+static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ return read_seqcount_begin(&syncp->seq);
+#else
+#if BITS_PER_LONG==32
+ preempt_disable();
+#endif
+ return 0;
+#endif
+}
+
+static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+ unsigned int start)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ return read_seqcount_retry(&syncp->seq, start);
+#else
+#if BITS_PER_LONG==32
+ preempt_enable();
+#endif
+ return false;
+#endif
+}
+
+/*
+ * In case irq handlers can update u64 counters, readers can use following helpers
+ * - SMP 32bit arches use seqcount protection, irq safe.
+ * - UP 32bit must disable irqs.
+ * - 64bit have no problem atomically reading u64 values, irq safe.
+ */
+static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ return read_seqcount_begin(&syncp->seq);
+#else
+#if BITS_PER_LONG==32
+ local_irq_disable();
+#endif
+ return 0;
+#endif
+}
+
+static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
+ unsigned int start)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+ return read_seqcount_retry(&syncp->seq, start);
+#else
+#if BITS_PER_LONG==32
+ local_irq_enable();
+#endif
+ return false;
+#endif
+}
+
+#endif /* _LINUX_U64_STATS_SYNC_H */
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
new file mode 100644
index 000000000..ecd3319da
--- /dev/null
+++ b/include/linux/uaccess.h
@@ -0,0 +1,110 @@
+#ifndef __LINUX_UACCESS_H__
+#define __LINUX_UACCESS_H__
+
+#include <linux/preempt.h>
+#include <asm/uaccess.h>
+
+/*
+ * These routines enable/disable the pagefault handler in that
+ * it will not take any locks and go straight to the fixup table.
+ *
+ * They have great resemblance to the preempt_disable/enable calls
+ * and in fact they are identical; this is because currently there is
+ * no other way to make the pagefault handlers do this. So we do
+ * disable preemption but we don't necessarily care about that.
+ */
+static inline void pagefault_disable(void)
+{
+ preempt_count_inc();
+ /*
+ * make sure to have issued the store before a pagefault
+ * can hit.
+ */
+ barrier();
+}
+
+static inline void pagefault_enable(void)
+{
+#ifndef CONFIG_PREEMPT
+ /*
+ * make sure to issue those last loads/stores before enabling
+ * the pagefault handler again.
+ */
+ barrier();
+ preempt_count_dec();
+#else
+ preempt_enable();
+#endif
+}
+
+#ifndef ARCH_HAS_NOCACHE_UACCESS
+
+static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
+ const void __user *from, unsigned long n)
+{
+ return __copy_from_user_inatomic(to, from, n);
+}
+
+static inline unsigned long __copy_from_user_nocache(void *to,
+ const void __user *from, unsigned long n)
+{
+ return __copy_from_user(to, from, n);
+}
+
+#endif /* ARCH_HAS_NOCACHE_UACCESS */
+
+/**
+ * probe_kernel_address(): safely attempt to read from a location
+ * @addr: address to read from - its type is type typeof(retval)*
+ * @retval: read into this variable
+ *
+ * Safely read from address @addr into variable @revtal. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ * We ensure that the __get_user() is executed in atomic context so that
+ * do_page_fault() doesn't attempt to take mmap_sem. This makes
+ * probe_kernel_address() suitable for use within regions where the caller
+ * already holds mmap_sem, or other locks which nest inside mmap_sem.
+ * This must be a macro because __get_user() needs to know the types of the
+ * args.
+ *
+ * We don't include enough header files to be able to do the set_fs(). We
+ * require that the probe_kernel_address() caller will do that.
+ */
+#define probe_kernel_address(addr, retval) \
+ ({ \
+ long ret; \
+ mm_segment_t old_fs = get_fs(); \
+ \
+ set_fs(KERNEL_DS); \
+ pagefault_disable(); \
+ ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
+ pagefault_enable(); \
+ set_fs(old_fs); \
+ ret; \
+ })
+
+/*
+ * probe_kernel_read(): safely attempt to read from a location
+ * @dst: pointer to the buffer that shall take the data
+ * @src: address to read from
+ * @size: size of the data chunk
+ *
+ * Safely read from address @src to the buffer at @dst. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+extern long probe_kernel_read(void *dst, const void *src, size_t size);
+extern long __probe_kernel_read(void *dst, const void *src, size_t size);
+
+/*
+ * probe_kernel_write(): safely attempt to write to a location
+ * @dst: address to write to
+ * @src: pointer to the data that shall be written
+ * @size: size of the data chunk
+ *
+ * Safely write to address @dst from the buffer at @src. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
+extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
+
+#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/ucb1400.h b/include/linux/ucb1400.h
new file mode 100644
index 000000000..2e9ee4d1c
--- /dev/null
+++ b/include/linux/ucb1400.h
@@ -0,0 +1,165 @@
+/*
+ * Register definitions and functions for:
+ * Philips UCB1400 driver
+ *
+ * Based on ucb1400_ts:
+ * Author: Nicolas Pitre
+ * Created: September 25, 2006
+ * Copyright: MontaVista Software, Inc.
+ *
+ * Spliting done by: Marek Vasut <marek.vasut@gmail.com>
+ * If something doesn't work and it worked before spliting, e-mail me,
+ * dont bother Nicolas please ;-)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This code is heavily based on ucb1x00-*.c copyrighted by Russell King
+ * covering the UCB1100, UCB1200 and UCB1300.. Support for the UCB1400 has
+ * been made separate from ucb1x00-core/ucb1x00-ts on Russell's request.
+ */
+
+#ifndef _LINUX__UCB1400_H
+#define _LINUX__UCB1400_H
+
+#include <sound/ac97_codec.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+
+/*
+ * UCB1400 AC-link registers
+ */
+
+#define UCB_IO_DATA 0x5a
+#define UCB_IO_DIR 0x5c
+#define UCB_IE_RIS 0x5e
+#define UCB_IE_FAL 0x60
+#define UCB_IE_STATUS 0x62
+#define UCB_IE_CLEAR 0x62
+#define UCB_IE_ADC (1 << 11)
+#define UCB_IE_TSPX (1 << 12)
+
+#define UCB_TS_CR 0x64
+#define UCB_TS_CR_TSMX_POW (1 << 0)
+#define UCB_TS_CR_TSPX_POW (1 << 1)
+#define UCB_TS_CR_TSMY_POW (1 << 2)
+#define UCB_TS_CR_TSPY_POW (1 << 3)
+#define UCB_TS_CR_TSMX_GND (1 << 4)
+#define UCB_TS_CR_TSPX_GND (1 << 5)
+#define UCB_TS_CR_TSMY_GND (1 << 6)
+#define UCB_TS_CR_TSPY_GND (1 << 7)
+#define UCB_TS_CR_MODE_INT (0 << 8)
+#define UCB_TS_CR_MODE_PRES (1 << 8)
+#define UCB_TS_CR_MODE_POS (2 << 8)
+#define UCB_TS_CR_BIAS_ENA (1 << 11)
+#define UCB_TS_CR_TSPX_LOW (1 << 12)
+#define UCB_TS_CR_TSMX_LOW (1 << 13)
+
+#define UCB_ADC_CR 0x66
+#define UCB_ADC_SYNC_ENA (1 << 0)
+#define UCB_ADC_VREFBYP_CON (1 << 1)
+#define UCB_ADC_INP_TSPX (0 << 2)
+#define UCB_ADC_INP_TSMX (1 << 2)
+#define UCB_ADC_INP_TSPY (2 << 2)
+#define UCB_ADC_INP_TSMY (3 << 2)
+#define UCB_ADC_INP_AD0 (4 << 2)
+#define UCB_ADC_INP_AD1 (5 << 2)
+#define UCB_ADC_INP_AD2 (6 << 2)
+#define UCB_ADC_INP_AD3 (7 << 2)
+#define UCB_ADC_EXT_REF (1 << 5)
+#define UCB_ADC_START (1 << 7)
+#define UCB_ADC_ENA (1 << 15)
+
+#define UCB_ADC_DATA 0x68
+#define UCB_ADC_DAT_VALID (1 << 15)
+
+#define UCB_FCSR 0x6c
+#define UCB_FCSR_AVE (1 << 12)
+
+#define UCB_ADC_DAT_MASK 0x3ff
+
+#define UCB_ID 0x7e
+#define UCB_ID_1400 0x4304
+
+struct ucb1400_gpio {
+ struct gpio_chip gc;
+ struct snd_ac97 *ac97;
+ int gpio_offset;
+ int (*gpio_setup)(struct device *dev, int ngpio);
+ int (*gpio_teardown)(struct device *dev, int ngpio);
+};
+
+struct ucb1400_ts {
+ struct input_dev *ts_idev;
+ int id;
+ int irq;
+ struct snd_ac97 *ac97;
+ wait_queue_head_t ts_wait;
+ bool stopped;
+};
+
+struct ucb1400 {
+ struct platform_device *ucb1400_ts;
+ struct platform_device *ucb1400_gpio;
+};
+
+struct ucb1400_pdata {
+ int irq;
+ int gpio_offset;
+ int (*gpio_setup)(struct device *dev, int ngpio);
+ int (*gpio_teardown)(struct device *dev, int ngpio);
+};
+
+static inline u16 ucb1400_reg_read(struct snd_ac97 *ac97, u16 reg)
+{
+ return ac97->bus->ops->read(ac97, reg);
+}
+
+static inline void ucb1400_reg_write(struct snd_ac97 *ac97, u16 reg, u16 val)
+{
+ ac97->bus->ops->write(ac97, reg, val);
+}
+
+static inline u16 ucb1400_gpio_get_value(struct snd_ac97 *ac97, u16 gpio)
+{
+ return ucb1400_reg_read(ac97, UCB_IO_DATA) & (1 << gpio);
+}
+
+static inline void ucb1400_gpio_set_value(struct snd_ac97 *ac97, u16 gpio,
+ u16 val)
+{
+ ucb1400_reg_write(ac97, UCB_IO_DATA, val ?
+ ucb1400_reg_read(ac97, UCB_IO_DATA) | (1 << gpio) :
+ ucb1400_reg_read(ac97, UCB_IO_DATA) & ~(1 << gpio));
+}
+
+static inline u16 ucb1400_gpio_get_direction(struct snd_ac97 *ac97, u16 gpio)
+{
+ return ucb1400_reg_read(ac97, UCB_IO_DIR) & (1 << gpio);
+}
+
+static inline void ucb1400_gpio_set_direction(struct snd_ac97 *ac97, u16 gpio,
+ u16 dir)
+{
+ ucb1400_reg_write(ac97, UCB_IO_DIR, dir ?
+ ucb1400_reg_read(ac97, UCB_IO_DIR) | (1 << gpio) :
+ ucb1400_reg_read(ac97, UCB_IO_DIR) & ~(1 << gpio));
+}
+
+static inline void ucb1400_adc_enable(struct snd_ac97 *ac97)
+{
+ ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA);
+}
+
+static inline void ucb1400_adc_disable(struct snd_ac97 *ac97)
+{
+ ucb1400_reg_write(ac97, UCB_ADC_CR, 0);
+}
+
+
+unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel,
+ int adcsync);
+
+#endif
diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
new file mode 100644
index 000000000..cbb20afdb
--- /dev/null
+++ b/include/linux/ucs2_string.h
@@ -0,0 +1,14 @@
+#ifndef _LINUX_UCS2_STRING_H_
+#define _LINUX_UCS2_STRING_H_
+
+#include <linux/types.h> /* for size_t */
+#include <linux/stddef.h> /* for NULL */
+
+typedef u16 ucs2_char_t;
+
+unsigned long ucs2_strnlen(const ucs2_char_t *s, size_t maxlength);
+unsigned long ucs2_strlen(const ucs2_char_t *s);
+unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
+int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
+
+#endif /* _LINUX_UCS2_STRING_H_ */
diff --git a/include/linux/udp.h b/include/linux/udp.h
new file mode 100644
index 000000000..87c094961
--- /dev/null
+++ b/include/linux/udp.h
@@ -0,0 +1,109 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the UDP protocol.
+ *
+ * Version: @(#)udp.h 1.0.2 04/28/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_UDP_H
+#define _LINUX_UDP_H
+
+#include <net/inet_sock.h>
+#include <linux/skbuff.h>
+#include <net/netns/hash.h>
+#include <uapi/linux/udp.h>
+
+static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
+{
+ return (struct udphdr *)skb_transport_header(skb);
+}
+
+static inline struct udphdr *inner_udp_hdr(const struct sk_buff *skb)
+{
+ return (struct udphdr *)skb_inner_transport_header(skb);
+}
+
+#define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256)
+
+static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
+{
+ return (num + net_hash_mix(net)) & mask;
+}
+
+struct udp_sock {
+ /* inet_sock has to be the first member */
+ struct inet_sock inet;
+#define udp_port_hash inet.sk.__sk_common.skc_u16hashes[0]
+#define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1]
+#define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node
+ int pending; /* Any pending frames ? */
+ unsigned int corkflag; /* Cork is required */
+ __u8 encap_type; /* Is this an Encapsulation socket? */
+ unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
+ no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */
+ /*
+ * Following member retains the information to create a UDP header
+ * when the socket is uncorked.
+ */
+ __u16 len; /* total length of pending frames */
+ /*
+ * Fields specific to UDP-Lite.
+ */
+ __u16 pcslen;
+ __u16 pcrlen;
+/* indicator bits used by pcflag: */
+#define UDPLITE_BIT 0x1 /* set by udplite proto init function */
+#define UDPLITE_SEND_CC 0x2 /* set via udplite setsockopt */
+#define UDPLITE_RECV_CC 0x4 /* set via udplite setsocktopt */
+ __u8 pcflag; /* marks socket as UDP-Lite if > 0 */
+ __u8 unused[3];
+ /*
+ * For encapsulation sockets.
+ */
+ int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+ void (*encap_destroy)(struct sock *sk);
+};
+
+static inline struct udp_sock *udp_sk(const struct sock *sk)
+{
+ return (struct udp_sock *)sk;
+}
+
+static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
+{
+ udp_sk(sk)->no_check6_tx = val;
+}
+
+static inline void udp_set_no_check6_rx(struct sock *sk, bool val)
+{
+ udp_sk(sk)->no_check6_rx = val;
+}
+
+static inline bool udp_get_no_check6_tx(struct sock *sk)
+{
+ return udp_sk(sk)->no_check6_tx;
+}
+
+static inline bool udp_get_no_check6_rx(struct sock *sk)
+{
+ return udp_sk(sk)->no_check6_rx;
+}
+
+#define udp_portaddr_for_each_entry(__sk, node, list) \
+ hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node)
+
+#define udp_portaddr_for_each_entry_rcu(__sk, node, list) \
+ hlist_nulls_for_each_entry_rcu(__sk, node, list, __sk_common.skc_portaddr_node)
+
+#define IS_UDPLITE(__sk) (udp_sk(__sk)->pcflag)
+
+#endif /* _LINUX_UDP_H */
diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
new file mode 100644
index 000000000..03835522d
--- /dev/null
+++ b/include/linux/uidgid.h
@@ -0,0 +1,190 @@
+#ifndef _LINUX_UIDGID_H
+#define _LINUX_UIDGID_H
+
+/*
+ * A set of types for the internal kernel types representing uids and gids.
+ *
+ * The types defined in this header allow distinguishing which uids and gids in
+ * the kernel are values used by userspace and which uid and gid values are
+ * the internal kernel values. With the addition of user namespaces the values
+ * can be different. Using the type system makes it possible for the compiler
+ * to detect when we overlook these differences.
+ *
+ */
+#include <linux/types.h>
+#include <linux/highuid.h>
+
+struct user_namespace;
+extern struct user_namespace init_user_ns;
+
+typedef struct {
+ uid_t val;
+} kuid_t;
+
+
+typedef struct {
+ gid_t val;
+} kgid_t;
+
+#define KUIDT_INIT(value) (kuid_t){ value }
+#define KGIDT_INIT(value) (kgid_t){ value }
+
+#ifdef CONFIG_MULTIUSER
+static inline uid_t __kuid_val(kuid_t uid)
+{
+ return uid.val;
+}
+
+static inline gid_t __kgid_val(kgid_t gid)
+{
+ return gid.val;
+}
+#else
+static inline uid_t __kuid_val(kuid_t uid)
+{
+ return 0;
+}
+
+static inline gid_t __kgid_val(kgid_t gid)
+{
+ return 0;
+}
+#endif
+
+#define GLOBAL_ROOT_UID KUIDT_INIT(0)
+#define GLOBAL_ROOT_GID KGIDT_INIT(0)
+
+#define INVALID_UID KUIDT_INIT(-1)
+#define INVALID_GID KGIDT_INIT(-1)
+
+static inline bool uid_eq(kuid_t left, kuid_t right)
+{
+ return __kuid_val(left) == __kuid_val(right);
+}
+
+static inline bool gid_eq(kgid_t left, kgid_t right)
+{
+ return __kgid_val(left) == __kgid_val(right);
+}
+
+static inline bool uid_gt(kuid_t left, kuid_t right)
+{
+ return __kuid_val(left) > __kuid_val(right);
+}
+
+static inline bool gid_gt(kgid_t left, kgid_t right)
+{
+ return __kgid_val(left) > __kgid_val(right);
+}
+
+static inline bool uid_gte(kuid_t left, kuid_t right)
+{
+ return __kuid_val(left) >= __kuid_val(right);
+}
+
+static inline bool gid_gte(kgid_t left, kgid_t right)
+{
+ return __kgid_val(left) >= __kgid_val(right);
+}
+
+static inline bool uid_lt(kuid_t left, kuid_t right)
+{
+ return __kuid_val(left) < __kuid_val(right);
+}
+
+static inline bool gid_lt(kgid_t left, kgid_t right)
+{
+ return __kgid_val(left) < __kgid_val(right);
+}
+
+static inline bool uid_lte(kuid_t left, kuid_t right)
+{
+ return __kuid_val(left) <= __kuid_val(right);
+}
+
+static inline bool gid_lte(kgid_t left, kgid_t right)
+{
+ return __kgid_val(left) <= __kgid_val(right);
+}
+
+static inline bool uid_valid(kuid_t uid)
+{
+ return __kuid_val(uid) != (uid_t) -1;
+}
+
+static inline bool gid_valid(kgid_t gid)
+{
+ return __kgid_val(gid) != (gid_t) -1;
+}
+
+#ifdef CONFIG_USER_NS
+
+extern kuid_t make_kuid(struct user_namespace *from, uid_t uid);
+extern kgid_t make_kgid(struct user_namespace *from, gid_t gid);
+
+extern uid_t from_kuid(struct user_namespace *to, kuid_t uid);
+extern gid_t from_kgid(struct user_namespace *to, kgid_t gid);
+extern uid_t from_kuid_munged(struct user_namespace *to, kuid_t uid);
+extern gid_t from_kgid_munged(struct user_namespace *to, kgid_t gid);
+
+static inline bool kuid_has_mapping(struct user_namespace *ns, kuid_t uid)
+{
+ return from_kuid(ns, uid) != (uid_t) -1;
+}
+
+static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
+{
+ return from_kgid(ns, gid) != (gid_t) -1;
+}
+
+#else
+
+static inline kuid_t make_kuid(struct user_namespace *from, uid_t uid)
+{
+ return KUIDT_INIT(uid);
+}
+
+static inline kgid_t make_kgid(struct user_namespace *from, gid_t gid)
+{
+ return KGIDT_INIT(gid);
+}
+
+static inline uid_t from_kuid(struct user_namespace *to, kuid_t kuid)
+{
+ return __kuid_val(kuid);
+}
+
+static inline gid_t from_kgid(struct user_namespace *to, kgid_t kgid)
+{
+ return __kgid_val(kgid);
+}
+
+static inline uid_t from_kuid_munged(struct user_namespace *to, kuid_t kuid)
+{
+ uid_t uid = from_kuid(to, kuid);
+ if (uid == (uid_t)-1)
+ uid = overflowuid;
+ return uid;
+}
+
+static inline gid_t from_kgid_munged(struct user_namespace *to, kgid_t kgid)
+{
+ gid_t gid = from_kgid(to, kgid);
+ if (gid == (gid_t)-1)
+ gid = overflowgid;
+ return gid;
+}
+
+static inline bool kuid_has_mapping(struct user_namespace *ns, kuid_t uid)
+{
+ return true;
+}
+
+static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
+{
+ return true;
+}
+
+#endif /* CONFIG_USER_NS */
+
+#endif /* _LINUX_UIDGID_H */
diff --git a/include/linux/uinput.h b/include/linux/uinput.h
new file mode 100644
index 000000000..0994c0d01
--- /dev/null
+++ b/include/linux/uinput.h
@@ -0,0 +1,76 @@
+/*
+ * User level driver support for input subsystem
+ *
+ * Heavily based on evdev.c by Vojtech Pavlik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Author: Aristeu Sergio Rozanski Filho <aris@cathedrallabs.org>
+ *
+ * Changes/Revisions:
+ * 0.4 01/09/2014 (Benjamin Tissoires <benjamin.tissoires@redhat.com>)
+ * - add UI_GET_SYSNAME ioctl
+ * 0.3 24/05/2006 (Anssi Hannula <anssi.hannulagmail.com>)
+ * - update ff support for the changes in kernel interface
+ * - add UINPUT_VERSION
+ * 0.2 16/10/2004 (Micah Dowty <micah@navi.cx>)
+ * - added force feedback support
+ * - added UI_SET_PHYS
+ * 0.1 20/06/2002
+ * - first public version
+ */
+#ifndef __UINPUT_H_
+#define __UINPUT_H_
+
+#include <uapi/linux/uinput.h>
+
+#define UINPUT_NAME "uinput"
+#define UINPUT_BUFFER_SIZE 16
+#define UINPUT_NUM_REQUESTS 16
+
+enum uinput_state { UIST_NEW_DEVICE, UIST_SETUP_COMPLETE, UIST_CREATED };
+
+struct uinput_request {
+ unsigned int id;
+ unsigned int code; /* UI_FF_UPLOAD, UI_FF_ERASE */
+
+ int retval;
+ struct completion done;
+
+ union {
+ unsigned int effect_id;
+ struct {
+ struct ff_effect *effect;
+ struct ff_effect *old;
+ } upload;
+ } u;
+};
+
+struct uinput_device {
+ struct input_dev *dev;
+ struct mutex mutex;
+ enum uinput_state state;
+ wait_queue_head_t waitq;
+ unsigned char ready;
+ unsigned char head;
+ unsigned char tail;
+ struct input_event buff[UINPUT_BUFFER_SIZE];
+ unsigned int ff_effects_max;
+
+ struct uinput_request *requests[UINPUT_NUM_REQUESTS];
+ wait_queue_head_t requests_waitq;
+ spinlock_t requests_lock;
+};
+#endif /* __UINPUT_H_ */
diff --git a/include/linux/uio.h b/include/linux/uio.h
new file mode 100644
index 000000000..8b01e1c3c
--- /dev/null
+++ b/include/linux/uio.h
@@ -0,0 +1,165 @@
+/*
+ * Berkeley style UIO structures - Alan Cox 1994.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef __LINUX_UIO_H
+#define __LINUX_UIO_H
+
+#include <linux/kernel.h>
+#include <uapi/linux/uio.h>
+
+struct page;
+
+struct kvec {
+ void *iov_base; /* and that should *never* hold a userland pointer */
+ size_t iov_len;
+};
+
+enum {
+ ITER_IOVEC = 0,
+ ITER_KVEC = 2,
+ ITER_BVEC = 4,
+};
+
+struct iov_iter {
+ int type;
+ size_t iov_offset;
+ size_t count;
+ union {
+ const struct iovec *iov;
+ const struct kvec *kvec;
+ const struct bio_vec *bvec;
+ };
+ unsigned long nr_segs;
+};
+
+/*
+ * Total number of bytes covered by an iovec.
+ *
+ * NOTE that it is not safe to use this function until all the iovec's
+ * segment lengths have been validated. Because the individual lengths can
+ * overflow a size_t when added together.
+ */
+static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
+{
+ unsigned long seg;
+ size_t ret = 0;
+
+ for (seg = 0; seg < nr_segs; seg++)
+ ret += iov[seg].iov_len;
+ return ret;
+}
+
+static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
+{
+ return (struct iovec) {
+ .iov_base = iter->iov->iov_base + iter->iov_offset,
+ .iov_len = min(iter->count,
+ iter->iov->iov_len - iter->iov_offset),
+ };
+}
+
+#define iov_for_each(iov, iter, start) \
+ if (!((start).type & ITER_BVEC)) \
+ for (iter = (start); \
+ (iter).count && \
+ ((iov = iov_iter_iovec(&(iter))), 1); \
+ iov_iter_advance(&(iter), (iov).iov_len))
+
+unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
+
+size_t iov_iter_copy_from_user_atomic(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes);
+void iov_iter_advance(struct iov_iter *i, size_t bytes);
+int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
+int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes);
+size_t iov_iter_single_seg_count(const struct iov_iter *i);
+size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
+ struct iov_iter *i);
+size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
+ struct iov_iter *i);
+size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i);
+size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
+size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
+size_t iov_iter_zero(size_t bytes, struct iov_iter *);
+unsigned long iov_iter_alignment(const struct iov_iter *i);
+void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
+ unsigned long nr_segs, size_t count);
+void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,
+ unsigned long nr_segs, size_t count);
+void iov_iter_bvec(struct iov_iter *i, int direction, const struct bio_vec *bvec,
+ unsigned long nr_segs, size_t count);
+ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
+ size_t maxsize, unsigned maxpages, size_t *start);
+ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
+ size_t maxsize, size_t *start);
+int iov_iter_npages(const struct iov_iter *i, int maxpages);
+
+const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
+
+static inline size_t iov_iter_count(struct iov_iter *i)
+{
+ return i->count;
+}
+
+static inline bool iter_is_iovec(struct iov_iter *i)
+{
+ return !(i->type & (ITER_BVEC | ITER_KVEC));
+}
+
+/*
+ * Get one of READ or WRITE out of iter->type without any other flags OR'd in
+ * with it.
+ *
+ * The ?: is just for type safety.
+ */
+#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & RW_MASK)
+
+/*
+ * Cap the iov_iter by given limit; note that the second argument is
+ * *not* the new size - it's upper limit for such. Passing it a value
+ * greater than the amount of data in iov_iter is fine - it'll just do
+ * nothing in that case.
+ */
+static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
+{
+ /*
+ * count doesn't have to fit in size_t - comparison extends both
+ * operands to u64 here and any value that would be truncated by
+ * conversion in assignement is by definition greater than all
+ * values of size_t, including old i->count.
+ */
+ if (i->count > count)
+ i->count = count;
+}
+
+/*
+ * reexpand a previously truncated iterator; count must be no more than how much
+ * we had shrunk it.
+ */
+static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
+{
+ i->count = count;
+}
+size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
+size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
+
+int import_iovec(int type, const struct iovec __user * uvector,
+ unsigned nr_segs, unsigned fast_segs,
+ struct iovec **iov, struct iov_iter *i);
+
+#ifdef CONFIG_COMPAT
+struct compat_iovec;
+int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
+ unsigned nr_segs, unsigned fast_segs,
+ struct iovec **iov, struct iov_iter *i);
+#endif
+
+int import_single_range(int type, void __user *buf, size_t len,
+ struct iovec *iov, struct iov_iter *i);
+
+#endif
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
new file mode 100644
index 000000000..32c0e83d6
--- /dev/null
+++ b/include/linux/uio_driver.h
@@ -0,0 +1,138 @@
+/*
+ * include/linux/uio_driver.h
+ *
+ * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de>
+ * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright(C) 2006, Hans J. Koch <hjk@hansjkoch.de>
+ * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com>
+ *
+ * Userspace IO driver.
+ *
+ * Licensed under the GPLv2 only.
+ */
+
+#ifndef _UIO_DRIVER_H_
+#define _UIO_DRIVER_H_
+
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+
+struct module;
+struct uio_map;
+
+/**
+ * struct uio_mem - description of a UIO memory region
+ * @name: name of the memory region for identification
+ * @addr: address of the device's memory (phys_addr is used since
+ * addr can be logical, virtual, or physical & phys_addr_t
+ * should always be large enough to handle any of the
+ * address types)
+ * @size: size of IO
+ * @memtype: type of memory addr points to
+ * @internal_addr: ioremap-ped version of addr, for driver internal use
+ * @map: for use by the UIO core only.
+ */
+struct uio_mem {
+ const char *name;
+ phys_addr_t addr;
+ resource_size_t size;
+ int memtype;
+ void __iomem *internal_addr;
+ struct uio_map *map;
+};
+
+#define MAX_UIO_MAPS 5
+
+struct uio_portio;
+
+/**
+ * struct uio_port - description of a UIO port region
+ * @name: name of the port region for identification
+ * @start: start of port region
+ * @size: size of port region
+ * @porttype: type of port (see UIO_PORT_* below)
+ * @portio: for use by the UIO core only.
+ */
+struct uio_port {
+ const char *name;
+ unsigned long start;
+ unsigned long size;
+ int porttype;
+ struct uio_portio *portio;
+};
+
+#define MAX_UIO_PORT_REGIONS 5
+
+struct uio_device {
+ struct module *owner;
+ struct device *dev;
+ int minor;
+ atomic_t event;
+ struct fasync_struct *async_queue;
+ wait_queue_head_t wait;
+ struct uio_info *info;
+ struct kobject *map_dir;
+ struct kobject *portio_dir;
+};
+
+/**
+ * struct uio_info - UIO device capabilities
+ * @uio_dev: the UIO device this info belongs to
+ * @name: device name
+ * @version: device driver version
+ * @mem: list of mappable memory regions, size==0 for end of list
+ * @port: list of port regions, size==0 for end of list
+ * @irq: interrupt number or UIO_IRQ_CUSTOM
+ * @irq_flags: flags for request_irq()
+ * @priv: optional private data
+ * @handler: the device's irq handler
+ * @mmap: mmap operation for this uio device
+ * @open: open operation for this uio device
+ * @release: release operation for this uio device
+ * @irqcontrol: disable/enable irqs when 0/1 is written to /dev/uioX
+ */
+struct uio_info {
+ struct uio_device *uio_dev;
+ const char *name;
+ const char *version;
+ struct uio_mem mem[MAX_UIO_MAPS];
+ struct uio_port port[MAX_UIO_PORT_REGIONS];
+ long irq;
+ unsigned long irq_flags;
+ void *priv;
+ irqreturn_t (*handler)(int irq, struct uio_info *dev_info);
+ int (*mmap)(struct uio_info *info, struct vm_area_struct *vma);
+ int (*open)(struct uio_info *info, struct inode *inode);
+ int (*release)(struct uio_info *info, struct inode *inode);
+ int (*irqcontrol)(struct uio_info *info, s32 irq_on);
+};
+
+extern int __must_check
+ __uio_register_device(struct module *owner,
+ struct device *parent,
+ struct uio_info *info);
+
+/* use a define to avoid include chaining to get THIS_MODULE */
+#define uio_register_device(parent, info) \
+ __uio_register_device(THIS_MODULE, parent, info)
+
+extern void uio_unregister_device(struct uio_info *info);
+extern void uio_event_notify(struct uio_info *info);
+
+/* defines for uio_info->irq */
+#define UIO_IRQ_CUSTOM -1
+#define UIO_IRQ_NONE 0
+
+/* defines for uio_mem->memtype */
+#define UIO_MEM_NONE 0
+#define UIO_MEM_PHYS 1
+#define UIO_MEM_LOGICAL 2
+#define UIO_MEM_VIRTUAL 3
+
+/* defines for uio_port->porttype */
+#define UIO_PORT_NONE 0
+#define UIO_PORT_X86 1
+#define UIO_PORT_GPIO 2
+#define UIO_PORT_OTHER 3
+
+#endif /* _LINUX_UIO_DRIVER_H_ */
diff --git a/include/linux/uksm.h b/include/linux/uksm.h
new file mode 100644
index 000000000..206f10958
--- /dev/null
+++ b/include/linux/uksm.h
@@ -0,0 +1,146 @@
+#ifndef __LINUX_UKSM_H
+#define __LINUX_UKSM_H
+/*
+ * Memory merging support.
+ *
+ * This code enables dynamic sharing of identical pages found in different
+ * memory areas, even if they are not shared by fork().
+ */
+
+/* if !CONFIG_UKSM this file should not be compiled at all. */
+#ifdef CONFIG_UKSM
+
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/rmap.h>
+#include <linux/sched.h>
+
+extern unsigned long zero_pfn __read_mostly;
+extern unsigned long uksm_zero_pfn __read_mostly;
+extern struct page *empty_uksm_zero_page;
+
+/* must be done before linked to mm */
+extern void uksm_vma_add_new(struct vm_area_struct *vma);
+extern void uksm_remove_vma(struct vm_area_struct *vma);
+
+#define UKSM_SLOT_NEED_SORT (1 << 0)
+#define UKSM_SLOT_NEED_RERAND (1 << 1)
+#define UKSM_SLOT_SCANNED (1 << 2) /* It's scanned in this round */
+#define UKSM_SLOT_FUL_SCANNED (1 << 3)
+#define UKSM_SLOT_IN_UKSM (1 << 4)
+
+struct vma_slot {
+ struct sradix_tree_node *snode;
+ unsigned long sindex;
+
+ struct list_head slot_list;
+ unsigned long fully_scanned_round;
+ unsigned long dedup_num;
+ unsigned long pages_scanned;
+ unsigned long last_scanned;
+ unsigned long pages_to_scan;
+ struct scan_rung *rung;
+ struct page **rmap_list_pool;
+ unsigned int *pool_counts;
+ unsigned long pool_size;
+ struct vm_area_struct *vma;
+ struct mm_struct *mm;
+ unsigned long ctime_j;
+ unsigned long pages;
+ unsigned long flags;
+ unsigned long pages_cowed; /* pages cowed this round */
+ unsigned long pages_merged; /* pages merged this round */
+ unsigned long pages_bemerged;
+
+ /* when it has page merged in this eval round */
+ struct list_head dedup_list;
+};
+
+static inline void uksm_unmap_zero_page(pte_t pte)
+{
+ if (pte_pfn(pte) == uksm_zero_pfn)
+ __dec_zone_page_state(empty_uksm_zero_page, NR_UKSM_ZERO_PAGES);
+}
+
+static inline void uksm_map_zero_page(pte_t pte)
+{
+ if (pte_pfn(pte) == uksm_zero_pfn)
+ __inc_zone_page_state(empty_uksm_zero_page, NR_UKSM_ZERO_PAGES);
+}
+
+static inline void uksm_cow_page(struct vm_area_struct *vma, struct page *page)
+{
+ if (vma->uksm_vma_slot && PageKsm(page))
+ vma->uksm_vma_slot->pages_cowed++;
+}
+
+static inline void uksm_cow_pte(struct vm_area_struct *vma, pte_t pte)
+{
+ if (vma->uksm_vma_slot && pte_pfn(pte) == uksm_zero_pfn)
+ vma->uksm_vma_slot->pages_cowed++;
+}
+
+static inline int uksm_flags_can_scan(unsigned long vm_flags)
+{
+#ifndef VM_SAO
+#define VM_SAO 0
+#endif
+ return !(vm_flags & (VM_PFNMAP | VM_IO | VM_DONTEXPAND |
+ VM_HUGETLB | VM_MIXEDMAP | VM_SHARED
+ | VM_MAYSHARE | VM_GROWSUP | VM_GROWSDOWN | VM_SAO));
+}
+
+static inline void uksm_vm_flags_mod(unsigned long *vm_flags_p)
+{
+ if (uksm_flags_can_scan(*vm_flags_p))
+ *vm_flags_p |= VM_MERGEABLE;
+}
+
+/*
+ * Just a wrapper for BUG_ON for where ksm_zeropage must not be. TODO: it will
+ * be removed when uksm zero page patch is stable enough.
+ */
+static inline void uksm_bugon_zeropage(pte_t pte)
+{
+ BUG_ON(pte_pfn(pte) == uksm_zero_pfn);
+}
+#else
+static inline void uksm_vma_add_new(struct vm_area_struct *vma)
+{
+}
+
+static inline void uksm_remove_vma(struct vm_area_struct *vma)
+{
+}
+
+static inline void uksm_unmap_zero_page(pte_t pte)
+{
+}
+
+static inline void uksm_map_zero_page(pte_t pte)
+{
+}
+
+static inline void uksm_cow_page(struct vm_area_struct *vma, struct page *page)
+{
+}
+
+static inline void uksm_cow_pte(struct vm_area_struct *vma, pte_t pte)
+{
+}
+
+static inline int uksm_flags_can_scan(unsigned long vm_flags)
+{
+ return 0;
+}
+
+static inline void uksm_vm_flags_mod(unsigned long *vm_flags_p)
+{
+}
+
+static inline void uksm_bugon_zeropage(pte_t pte)
+{
+}
+#endif /* !CONFIG_UKSM */
+#endif /* __LINUX_UKSM_H */
diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
new file mode 100644
index 000000000..99c1b4d20
--- /dev/null
+++ b/include/linux/unaligned/access_ok.h
@@ -0,0 +1,67 @@
+#ifndef _LINUX_UNALIGNED_ACCESS_OK_H
+#define _LINUX_UNALIGNED_ACCESS_OK_H
+
+#include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+ return le16_to_cpup((__le16 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+ return le32_to_cpup((__le32 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+ return le64_to_cpup((__le64 *)p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+ return be16_to_cpup((__be16 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+ return be32_to_cpup((__be32 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+ return be64_to_cpup((__be64 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+ *((__le16 *)p) = cpu_to_le16(val);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+ *((__le32 *)p) = cpu_to_le32(val);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+ *((__le64 *)p) = cpu_to_le64(val);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+ *((__be16 *)p) = cpu_to_be16(val);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+ *((__be32 *)p) = cpu_to_be32(val);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+ *((__be64 *)p) = cpu_to_be64(val);
+}
+
+#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */
diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h
new file mode 100644
index 000000000..9356b2422
--- /dev/null
+++ b/include/linux/unaligned/be_byteshift.h
@@ -0,0 +1,70 @@
+#ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H
+#define _LINUX_UNALIGNED_BE_BYTESHIFT_H
+
+#include <linux/types.h>
+
+static inline u16 __get_unaligned_be16(const u8 *p)
+{
+ return p[0] << 8 | p[1];
+}
+
+static inline u32 __get_unaligned_be32(const u8 *p)
+{
+ return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
+}
+
+static inline u64 __get_unaligned_be64(const u8 *p)
+{
+ return (u64)__get_unaligned_be32(p) << 32 |
+ __get_unaligned_be32(p + 4);
+}
+
+static inline void __put_unaligned_be16(u16 val, u8 *p)
+{
+ *p++ = val >> 8;
+ *p++ = val;
+}
+
+static inline void __put_unaligned_be32(u32 val, u8 *p)
+{
+ __put_unaligned_be16(val >> 16, p);
+ __put_unaligned_be16(val, p + 2);
+}
+
+static inline void __put_unaligned_be64(u64 val, u8 *p)
+{
+ __put_unaligned_be32(val >> 32, p);
+ __put_unaligned_be32(val, p + 4);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+ return __get_unaligned_be16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+ return __get_unaligned_be32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+ return __get_unaligned_be64((const u8 *)p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+ __put_unaligned_be16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+ __put_unaligned_be32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+ __put_unaligned_be64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_BE_BYTESHIFT_H */
diff --git a/include/linux/unaligned/be_memmove.h b/include/linux/unaligned/be_memmove.h
new file mode 100644
index 000000000..c2a76c5c9
--- /dev/null
+++ b/include/linux/unaligned/be_memmove.h
@@ -0,0 +1,36 @@
+#ifndef _LINUX_UNALIGNED_BE_MEMMOVE_H
+#define _LINUX_UNALIGNED_BE_MEMMOVE_H
+
+#include <linux/unaligned/memmove.h>
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+ return __get_unaligned_memmove16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+ return __get_unaligned_memmove32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+ return __get_unaligned_memmove64((const u8 *)p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+ __put_unaligned_memmove16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+ __put_unaligned_memmove32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+ __put_unaligned_memmove64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */
diff --git a/include/linux/unaligned/be_struct.h b/include/linux/unaligned/be_struct.h
new file mode 100644
index 000000000..132415836
--- /dev/null
+++ b/include/linux/unaligned/be_struct.h
@@ -0,0 +1,36 @@
+#ifndef _LINUX_UNALIGNED_BE_STRUCT_H
+#define _LINUX_UNALIGNED_BE_STRUCT_H
+
+#include <linux/unaligned/packed_struct.h>
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+ return __get_unaligned_cpu16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+ return __get_unaligned_cpu32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+ return __get_unaligned_cpu64((const u8 *)p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+ __put_unaligned_cpu16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+ __put_unaligned_cpu32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+ __put_unaligned_cpu64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_BE_STRUCT_H */
diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h
new file mode 100644
index 000000000..02d97ff3d
--- /dev/null
+++ b/include/linux/unaligned/generic.h
@@ -0,0 +1,68 @@
+#ifndef _LINUX_UNALIGNED_GENERIC_H
+#define _LINUX_UNALIGNED_GENERIC_H
+
+/*
+ * Cause a link-time error if we try an unaligned access other than
+ * 1,2,4 or 8 bytes long
+ */
+extern void __bad_unaligned_access_size(void);
+
+#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \
+ __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
+ __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \
+ __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \
+ __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \
+ __bad_unaligned_access_size())))); \
+ }))
+
+#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \
+ __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
+ __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \
+ __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \
+ __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \
+ __bad_unaligned_access_size())))); \
+ }))
+
+#define __put_unaligned_le(val, ptr) ({ \
+ void *__gu_p = (ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ *(u8 *)__gu_p = (__force u8)(val); \
+ break; \
+ case 2: \
+ put_unaligned_le16((__force u16)(val), __gu_p); \
+ break; \
+ case 4: \
+ put_unaligned_le32((__force u32)(val), __gu_p); \
+ break; \
+ case 8: \
+ put_unaligned_le64((__force u64)(val), __gu_p); \
+ break; \
+ default: \
+ __bad_unaligned_access_size(); \
+ break; \
+ } \
+ (void)0; })
+
+#define __put_unaligned_be(val, ptr) ({ \
+ void *__gu_p = (ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ *(u8 *)__gu_p = (__force u8)(val); \
+ break; \
+ case 2: \
+ put_unaligned_be16((__force u16)(val), __gu_p); \
+ break; \
+ case 4: \
+ put_unaligned_be32((__force u32)(val), __gu_p); \
+ break; \
+ case 8: \
+ put_unaligned_be64((__force u64)(val), __gu_p); \
+ break; \
+ default: \
+ __bad_unaligned_access_size(); \
+ break; \
+ } \
+ (void)0; })
+
+#endif /* _LINUX_UNALIGNED_GENERIC_H */
diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h
new file mode 100644
index 000000000..be376fb79
--- /dev/null
+++ b/include/linux/unaligned/le_byteshift.h
@@ -0,0 +1,70 @@
+#ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H
+#define _LINUX_UNALIGNED_LE_BYTESHIFT_H
+
+#include <linux/types.h>
+
+static inline u16 __get_unaligned_le16(const u8 *p)
+{
+ return p[0] | p[1] << 8;
+}
+
+static inline u32 __get_unaligned_le32(const u8 *p)
+{
+ return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
+}
+
+static inline u64 __get_unaligned_le64(const u8 *p)
+{
+ return (u64)__get_unaligned_le32(p + 4) << 32 |
+ __get_unaligned_le32(p);
+}
+
+static inline void __put_unaligned_le16(u16 val, u8 *p)
+{
+ *p++ = val;
+ *p++ = val >> 8;
+}
+
+static inline void __put_unaligned_le32(u32 val, u8 *p)
+{
+ __put_unaligned_le16(val >> 16, p + 2);
+ __put_unaligned_le16(val, p);
+}
+
+static inline void __put_unaligned_le64(u64 val, u8 *p)
+{
+ __put_unaligned_le32(val >> 32, p + 4);
+ __put_unaligned_le32(val, p);
+}
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+ return __get_unaligned_le16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+ return __get_unaligned_le32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+ return __get_unaligned_le64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+ __put_unaligned_le16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+ __put_unaligned_le32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+ __put_unaligned_le64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_LE_BYTESHIFT_H */
diff --git a/include/linux/unaligned/le_memmove.h b/include/linux/unaligned/le_memmove.h
new file mode 100644
index 000000000..269849bee
--- /dev/null
+++ b/include/linux/unaligned/le_memmove.h
@@ -0,0 +1,36 @@
+#ifndef _LINUX_UNALIGNED_LE_MEMMOVE_H
+#define _LINUX_UNALIGNED_LE_MEMMOVE_H
+
+#include <linux/unaligned/memmove.h>
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+ return __get_unaligned_memmove16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+ return __get_unaligned_memmove32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+ return __get_unaligned_memmove64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+ __put_unaligned_memmove16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+ __put_unaligned_memmove32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+ __put_unaligned_memmove64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */
diff --git a/include/linux/unaligned/le_struct.h b/include/linux/unaligned/le_struct.h
new file mode 100644
index 000000000..088c4572f
--- /dev/null
+++ b/include/linux/unaligned/le_struct.h
@@ -0,0 +1,36 @@
+#ifndef _LINUX_UNALIGNED_LE_STRUCT_H
+#define _LINUX_UNALIGNED_LE_STRUCT_H
+
+#include <linux/unaligned/packed_struct.h>
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+ return __get_unaligned_cpu16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+ return __get_unaligned_cpu32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+ return __get_unaligned_cpu64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+ __put_unaligned_cpu16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+ __put_unaligned_cpu32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+ __put_unaligned_cpu64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_LE_STRUCT_H */
diff --git a/include/linux/unaligned/memmove.h b/include/linux/unaligned/memmove.h
new file mode 100644
index 000000000..eeb5a779a
--- /dev/null
+++ b/include/linux/unaligned/memmove.h
@@ -0,0 +1,45 @@
+#ifndef _LINUX_UNALIGNED_MEMMOVE_H
+#define _LINUX_UNALIGNED_MEMMOVE_H
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
+
+static inline u16 __get_unaligned_memmove16(const void *p)
+{
+ u16 tmp;
+ memmove(&tmp, p, 2);
+ return tmp;
+}
+
+static inline u32 __get_unaligned_memmove32(const void *p)
+{
+ u32 tmp;
+ memmove(&tmp, p, 4);
+ return tmp;
+}
+
+static inline u64 __get_unaligned_memmove64(const void *p)
+{
+ u64 tmp;
+ memmove(&tmp, p, 8);
+ return tmp;
+}
+
+static inline void __put_unaligned_memmove16(u16 val, void *p)
+{
+ memmove(p, &val, 2);
+}
+
+static inline void __put_unaligned_memmove32(u32 val, void *p)
+{
+ memmove(p, &val, 4);
+}
+
+static inline void __put_unaligned_memmove64(u64 val, void *p)
+{
+ memmove(p, &val, 8);
+}
+
+#endif /* _LINUX_UNALIGNED_MEMMOVE_H */
diff --git a/include/linux/unaligned/packed_struct.h b/include/linux/unaligned/packed_struct.h
new file mode 100644
index 000000000..c0d817de4
--- /dev/null
+++ b/include/linux/unaligned/packed_struct.h
@@ -0,0 +1,46 @@
+#ifndef _LINUX_UNALIGNED_PACKED_STRUCT_H
+#define _LINUX_UNALIGNED_PACKED_STRUCT_H
+
+#include <linux/kernel.h>
+
+struct __una_u16 { u16 x; } __packed;
+struct __una_u32 { u32 x; } __packed;
+struct __una_u64 { u64 x; } __packed;
+
+static inline u16 __get_unaligned_cpu16(const void *p)
+{
+ const struct __una_u16 *ptr = (const struct __una_u16 *)p;
+ return ptr->x;
+}
+
+static inline u32 __get_unaligned_cpu32(const void *p)
+{
+ const struct __una_u32 *ptr = (const struct __una_u32 *)p;
+ return ptr->x;
+}
+
+static inline u64 __get_unaligned_cpu64(const void *p)
+{
+ const struct __una_u64 *ptr = (const struct __una_u64 *)p;
+ return ptr->x;
+}
+
+static inline void __put_unaligned_cpu16(u16 val, void *p)
+{
+ struct __una_u16 *ptr = (struct __una_u16 *)p;
+ ptr->x = val;
+}
+
+static inline void __put_unaligned_cpu32(u32 val, void *p)
+{
+ struct __una_u32 *ptr = (struct __una_u32 *)p;
+ ptr->x = val;
+}
+
+static inline void __put_unaligned_cpu64(u64 val, void *p)
+{
+ struct __una_u64 *ptr = (struct __una_u64 *)p;
+ ptr->x = val;
+}
+
+#endif /* _LINUX_UNALIGNED_PACKED_STRUCT_H */
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
new file mode 100644
index 000000000..60beb5dc7
--- /dev/null
+++ b/include/linux/uprobes.h
@@ -0,0 +1,189 @@
+#ifndef _LINUX_UPROBES_H
+#define _LINUX_UPROBES_H
+/*
+ * User-space Probes (UProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2008-2012
+ * Authors:
+ * Srikar Dronamraju
+ * Jim Keniston
+ * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+struct vm_area_struct;
+struct mm_struct;
+struct inode;
+struct notifier_block;
+struct page;
+
+#define UPROBE_HANDLER_REMOVE 1
+#define UPROBE_HANDLER_MASK 1
+
+#define MAX_URETPROBE_DEPTH 64
+
+enum uprobe_filter_ctx {
+ UPROBE_FILTER_REGISTER,
+ UPROBE_FILTER_UNREGISTER,
+ UPROBE_FILTER_MMAP,
+};
+
+struct uprobe_consumer {
+ int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs);
+ int (*ret_handler)(struct uprobe_consumer *self,
+ unsigned long func,
+ struct pt_regs *regs);
+ bool (*filter)(struct uprobe_consumer *self,
+ enum uprobe_filter_ctx ctx,
+ struct mm_struct *mm);
+
+ struct uprobe_consumer *next;
+};
+
+#ifdef CONFIG_UPROBES
+#include <asm/uprobes.h>
+
+enum uprobe_task_state {
+ UTASK_RUNNING,
+ UTASK_SSTEP,
+ UTASK_SSTEP_ACK,
+ UTASK_SSTEP_TRAPPED,
+};
+
+/*
+ * uprobe_task: Metadata of a task while it singlesteps.
+ */
+struct uprobe_task {
+ enum uprobe_task_state state;
+
+ union {
+ struct {
+ struct arch_uprobe_task autask;
+ unsigned long vaddr;
+ };
+
+ struct {
+ struct callback_head dup_xol_work;
+ unsigned long dup_xol_addr;
+ };
+ };
+
+ struct uprobe *active_uprobe;
+ unsigned long xol_vaddr;
+
+ struct return_instance *return_instances;
+ unsigned int depth;
+};
+
+struct xol_area;
+
+struct uprobes_state {
+ struct xol_area *xol_area;
+};
+
+extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
+extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
+extern bool is_swbp_insn(uprobe_opcode_t *insn);
+extern bool is_trap_insn(uprobe_opcode_t *insn);
+extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
+extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
+extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
+extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
+extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
+extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
+extern int uprobe_mmap(struct vm_area_struct *vma);
+extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void uprobe_start_dup_mmap(void);
+extern void uprobe_end_dup_mmap(void);
+extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm);
+extern void uprobe_free_utask(struct task_struct *t);
+extern void uprobe_copy_process(struct task_struct *t, unsigned long flags);
+extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
+extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
+extern void uprobe_notify_resume(struct pt_regs *regs);
+extern bool uprobe_deny_signal(void);
+extern bool arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs);
+extern void uprobe_clear_state(struct mm_struct *mm);
+extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
+extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
+extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
+extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
+extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
+extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+ void *src, unsigned long len);
+#else /* !CONFIG_UPROBES */
+struct uprobes_state {
+};
+
+#define uprobe_get_trap_addr(regs) instruction_pointer(regs)
+
+static inline int
+uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
+{
+ return -ENOSYS;
+}
+static inline int
+uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool add)
+{
+ return -ENOSYS;
+}
+static inline void
+uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
+{
+}
+static inline int uprobe_mmap(struct vm_area_struct *vma)
+{
+ return 0;
+}
+static inline void
+uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+}
+static inline void uprobe_start_dup_mmap(void)
+{
+}
+static inline void uprobe_end_dup_mmap(void)
+{
+}
+static inline void
+uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
+{
+}
+static inline void uprobe_notify_resume(struct pt_regs *regs)
+{
+}
+static inline bool uprobe_deny_signal(void)
+{
+ return false;
+}
+static inline void uprobe_free_utask(struct task_struct *t)
+{
+}
+static inline void uprobe_copy_process(struct task_struct *t, unsigned long flags)
+{
+}
+static inline void uprobe_clear_state(struct mm_struct *mm)
+{
+}
+#endif /* !CONFIG_UPROBES */
+#endif /* _LINUX_UPROBES_H */
diff --git a/include/linux/usb.h b/include/linux/usb.h
new file mode 100644
index 000000000..447fe29b5
--- /dev/null
+++ b/include/linux/usb.h
@@ -0,0 +1,1900 @@
+#ifndef __LINUX_USB_H
+#define __LINUX_USB_H
+
+#include <linux/mod_devicetable.h>
+#include <linux/usb/ch9.h>
+
+#define USB_MAJOR 180
+#define USB_DEVICE_MAJOR 189
+
+
+#ifdef __KERNEL__
+
+#include <linux/errno.h> /* for -ENODEV */
+#include <linux/delay.h> /* for mdelay() */
+#include <linux/interrupt.h> /* for in_interrupt() */
+#include <linux/list.h> /* for struct list_head */
+#include <linux/kref.h> /* for struct kref */
+#include <linux/device.h> /* for struct device */
+#include <linux/fs.h> /* for struct file_operations */
+#include <linux/completion.h> /* for struct completion */
+#include <linux/sched.h> /* for current && schedule_timeout */
+#include <linux/mutex.h> /* for struct mutex */
+#include <linux/pm_runtime.h> /* for runtime PM */
+
+struct usb_device;
+struct usb_driver;
+struct wusb_dev;
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Host-side wrappers for standard USB descriptors ... these are parsed
+ * from the data provided by devices. Parsing turns them from a flat
+ * sequence of descriptors into a hierarchy:
+ *
+ * - devices have one (usually) or more configs;
+ * - configs have one (often) or more interfaces;
+ * - interfaces have one (usually) or more settings;
+ * - each interface setting has zero or (usually) more endpoints.
+ * - a SuperSpeed endpoint has a companion descriptor
+ *
+ * And there might be other descriptors mixed in with those.
+ *
+ * Devices may also have class-specific or vendor-specific descriptors.
+ */
+
+struct ep_device;
+
+/**
+ * struct usb_host_endpoint - host-side endpoint descriptor and queue
+ * @desc: descriptor for this endpoint, wMaxPacketSize in native byteorder
+ * @ss_ep_comp: SuperSpeed companion descriptor for this endpoint
+ * @urb_list: urbs queued to this endpoint; maintained by usbcore
+ * @hcpriv: for use by HCD; typically holds hardware dma queue head (QH)
+ * with one or more transfer descriptors (TDs) per urb
+ * @ep_dev: ep_device for sysfs info
+ * @extra: descriptors following this endpoint in the configuration
+ * @extralen: how many bytes of "extra" are valid
+ * @enabled: URBs may be submitted to this endpoint
+ * @streams: number of USB-3 streams allocated on the endpoint
+ *
+ * USB requests are always queued to a given endpoint, identified by a
+ * descriptor within an active interface in a given USB configuration.
+ */
+struct usb_host_endpoint {
+ struct usb_endpoint_descriptor desc;
+ struct usb_ss_ep_comp_descriptor ss_ep_comp;
+ struct list_head urb_list;
+ void *hcpriv;
+ struct ep_device *ep_dev; /* For sysfs info */
+
+ unsigned char *extra; /* Extra descriptors */
+ int extralen;
+ int enabled;
+ int streams;
+};
+
+/* host-side wrapper for one interface setting's parsed descriptors */
+struct usb_host_interface {
+ struct usb_interface_descriptor desc;
+
+ int extralen;
+ unsigned char *extra; /* Extra descriptors */
+
+ /* array of desc.bNumEndpoints endpoints associated with this
+ * interface setting. these will be in no particular order.
+ */
+ struct usb_host_endpoint *endpoint;
+
+ char *string; /* iInterface string, if present */
+};
+
+enum usb_interface_condition {
+ USB_INTERFACE_UNBOUND = 0,
+ USB_INTERFACE_BINDING,
+ USB_INTERFACE_BOUND,
+ USB_INTERFACE_UNBINDING,
+};
+
+/**
+ * struct usb_interface - what usb device drivers talk to
+ * @altsetting: array of interface structures, one for each alternate
+ * setting that may be selected. Each one includes a set of
+ * endpoint configurations. They will be in no particular order.
+ * @cur_altsetting: the current altsetting.
+ * @num_altsetting: number of altsettings defined.
+ * @intf_assoc: interface association descriptor
+ * @minor: the minor number assigned to this interface, if this
+ * interface is bound to a driver that uses the USB major number.
+ * If this interface does not use the USB major, this field should
+ * be unused. The driver should set this value in the probe()
+ * function of the driver, after it has been assigned a minor
+ * number from the USB core by calling usb_register_dev().
+ * @condition: binding state of the interface: not bound, binding
+ * (in probe()), bound to a driver, or unbinding (in disconnect())
+ * @sysfs_files_created: sysfs attributes exist
+ * @ep_devs_created: endpoint child pseudo-devices exist
+ * @unregistering: flag set when the interface is being unregistered
+ * @needs_remote_wakeup: flag set when the driver requires remote-wakeup
+ * capability during autosuspend.
+ * @needs_altsetting0: flag set when a set-interface request for altsetting 0
+ * has been deferred.
+ * @needs_binding: flag set when the driver should be re-probed or unbound
+ * following a reset or suspend operation it doesn't support.
+ * @dev: driver model's view of this device
+ * @usb_dev: if an interface is bound to the USB major, this will point
+ * to the sysfs representation for that device.
+ * @pm_usage_cnt: PM usage counter for this interface
+ * @reset_ws: Used for scheduling resets from atomic context.
+ * @resetting_device: USB core reset the device, so use alt setting 0 as
+ * current; needs bandwidth alloc after reset.
+ *
+ * USB device drivers attach to interfaces on a physical device. Each
+ * interface encapsulates a single high level function, such as feeding
+ * an audio stream to a speaker or reporting a change in a volume control.
+ * Many USB devices only have one interface. The protocol used to talk to
+ * an interface's endpoints can be defined in a usb "class" specification,
+ * or by a product's vendor. The (default) control endpoint is part of
+ * every interface, but is never listed among the interface's descriptors.
+ *
+ * The driver that is bound to the interface can use standard driver model
+ * calls such as dev_get_drvdata() on the dev member of this structure.
+ *
+ * Each interface may have alternate settings. The initial configuration
+ * of a device sets altsetting 0, but the device driver can change
+ * that setting using usb_set_interface(). Alternate settings are often
+ * used to control the use of periodic endpoints, such as by having
+ * different endpoints use different amounts of reserved USB bandwidth.
+ * All standards-conformant USB devices that use isochronous endpoints
+ * will use them in non-default settings.
+ *
+ * The USB specification says that alternate setting numbers must run from
+ * 0 to one less than the total number of alternate settings. But some
+ * devices manage to mess this up, and the structures aren't necessarily
+ * stored in numerical order anyhow. Use usb_altnum_to_altsetting() to
+ * look up an alternate setting in the altsetting array based on its number.
+ */
+struct usb_interface {
+ /* array of alternate settings for this interface,
+ * stored in no particular order */
+ struct usb_host_interface *altsetting;
+
+ struct usb_host_interface *cur_altsetting; /* the currently
+ * active alternate setting */
+ unsigned num_altsetting; /* number of alternate settings */
+
+ /* If there is an interface association descriptor then it will list
+ * the associated interfaces */
+ struct usb_interface_assoc_descriptor *intf_assoc;
+
+ int minor; /* minor number this interface is
+ * bound to */
+ enum usb_interface_condition condition; /* state of binding */
+ unsigned sysfs_files_created:1; /* the sysfs attributes exist */
+ unsigned ep_devs_created:1; /* endpoint "devices" exist */
+ unsigned unregistering:1; /* unregistration is in progress */
+ unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */
+ unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */
+ unsigned needs_binding:1; /* needs delayed unbind/rebind */
+ unsigned resetting_device:1; /* true: bandwidth alloc after reset */
+
+ struct device dev; /* interface specific device info */
+ struct device *usb_dev;
+ atomic_t pm_usage_cnt; /* usage counter for autosuspend */
+ struct work_struct reset_ws; /* for resets in atomic context */
+};
+#define to_usb_interface(d) container_of(d, struct usb_interface, dev)
+
+static inline void *usb_get_intfdata(struct usb_interface *intf)
+{
+ return dev_get_drvdata(&intf->dev);
+}
+
+static inline void usb_set_intfdata(struct usb_interface *intf, void *data)
+{
+ dev_set_drvdata(&intf->dev, data);
+}
+
+struct usb_interface *usb_get_intf(struct usb_interface *intf);
+void usb_put_intf(struct usb_interface *intf);
+
+/* Hard limit */
+#define USB_MAXENDPOINTS 30
+/* this maximum is arbitrary */
+#define USB_MAXINTERFACES 32
+#define USB_MAXIADS (USB_MAXINTERFACES/2)
+
+/*
+ * USB Resume Timer: Every Host controller driver should drive the resume
+ * signalling on the bus for the amount of time defined by this macro.
+ *
+ * That way we will have a 'stable' behavior among all HCDs supported by Linux.
+ *
+ * Note that the USB Specification states we should drive resume for *at least*
+ * 20 ms, but it doesn't give an upper bound. This creates two possible
+ * situations which we want to avoid:
+ *
+ * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes
+ * us to fail USB Electrical Tests, thus failing Certification
+ *
+ * (b) Some (many) devices actually need more than 20 ms of resume signalling,
+ * and while we can argue that's against the USB Specification, we don't have
+ * control over which devices a certification laboratory will be using for
+ * certification. If CertLab uses a device which was tested against Windows and
+ * that happens to have relaxed resume signalling rules, we might fall into
+ * situations where we fail interoperability and electrical tests.
+ *
+ * In order to avoid both conditions, we're using a 40 ms resume timeout, which
+ * should cope with both LPJ calibration errors and devices not following every
+ * detail of the USB Specification.
+ */
+#define USB_RESUME_TIMEOUT 40 /* ms */
+
+/**
+ * struct usb_interface_cache - long-term representation of a device interface
+ * @num_altsetting: number of altsettings defined.
+ * @ref: reference counter.
+ * @altsetting: variable-length array of interface structures, one for
+ * each alternate setting that may be selected. Each one includes a
+ * set of endpoint configurations. They will be in no particular order.
+ *
+ * These structures persist for the lifetime of a usb_device, unlike
+ * struct usb_interface (which persists only as long as its configuration
+ * is installed). The altsetting arrays can be accessed through these
+ * structures at any time, permitting comparison of configurations and
+ * providing support for the /proc/bus/usb/devices pseudo-file.
+ */
+struct usb_interface_cache {
+ unsigned num_altsetting; /* number of alternate settings */
+ struct kref ref; /* reference counter */
+
+ /* variable-length array of alternate settings for this interface,
+ * stored in no particular order */
+ struct usb_host_interface altsetting[0];
+};
+#define ref_to_usb_interface_cache(r) \
+ container_of(r, struct usb_interface_cache, ref)
+#define altsetting_to_usb_interface_cache(a) \
+ container_of(a, struct usb_interface_cache, altsetting[0])
+
+/**
+ * struct usb_host_config - representation of a device's configuration
+ * @desc: the device's configuration descriptor.
+ * @string: pointer to the cached version of the iConfiguration string, if
+ * present for this configuration.
+ * @intf_assoc: list of any interface association descriptors in this config
+ * @interface: array of pointers to usb_interface structures, one for each
+ * interface in the configuration. The number of interfaces is stored
+ * in desc.bNumInterfaces. These pointers are valid only while the
+ * the configuration is active.
+ * @intf_cache: array of pointers to usb_interface_cache structures, one
+ * for each interface in the configuration. These structures exist
+ * for the entire life of the device.
+ * @extra: pointer to buffer containing all extra descriptors associated
+ * with this configuration (those preceding the first interface
+ * descriptor).
+ * @extralen: length of the extra descriptors buffer.
+ *
+ * USB devices may have multiple configurations, but only one can be active
+ * at any time. Each encapsulates a different operational environment;
+ * for example, a dual-speed device would have separate configurations for
+ * full-speed and high-speed operation. The number of configurations
+ * available is stored in the device descriptor as bNumConfigurations.
+ *
+ * A configuration can contain multiple interfaces. Each corresponds to
+ * a different function of the USB device, and all are available whenever
+ * the configuration is active. The USB standard says that interfaces
+ * are supposed to be numbered from 0 to desc.bNumInterfaces-1, but a lot
+ * of devices get this wrong. In addition, the interface array is not
+ * guaranteed to be sorted in numerical order. Use usb_ifnum_to_if() to
+ * look up an interface entry based on its number.
+ *
+ * Device drivers should not attempt to activate configurations. The choice
+ * of which configuration to install is a policy decision based on such
+ * considerations as available power, functionality provided, and the user's
+ * desires (expressed through userspace tools). However, drivers can call
+ * usb_reset_configuration() to reinitialize the current configuration and
+ * all its interfaces.
+ */
+struct usb_host_config {
+ struct usb_config_descriptor desc;
+
+ char *string; /* iConfiguration string, if present */
+
+ /* List of any Interface Association Descriptors in this
+ * configuration. */
+ struct usb_interface_assoc_descriptor *intf_assoc[USB_MAXIADS];
+
+ /* the interfaces associated with this configuration,
+ * stored in no particular order */
+ struct usb_interface *interface[USB_MAXINTERFACES];
+
+ /* Interface information available even when this is not the
+ * active configuration */
+ struct usb_interface_cache *intf_cache[USB_MAXINTERFACES];
+
+ unsigned char *extra; /* Extra descriptors */
+ int extralen;
+};
+
+/* USB2.0 and USB3.0 device BOS descriptor set */
+struct usb_host_bos {
+ struct usb_bos_descriptor *desc;
+
+ /* wireless cap descriptor is handled by wusb */
+ struct usb_ext_cap_descriptor *ext_cap;
+ struct usb_ss_cap_descriptor *ss_cap;
+ struct usb_ss_container_id_descriptor *ss_id;
+};
+
+int __usb_get_extra_descriptor(char *buffer, unsigned size,
+ unsigned char type, void **ptr);
+#define usb_get_extra_descriptor(ifpoint, type, ptr) \
+ __usb_get_extra_descriptor((ifpoint)->extra, \
+ (ifpoint)->extralen, \
+ type, (void **)ptr)
+
+/* ----------------------------------------------------------------------- */
+
+/* USB device number allocation bitmap */
+struct usb_devmap {
+ unsigned long devicemap[128 / (8*sizeof(unsigned long))];
+};
+
+/*
+ * Allocated per bus (tree of devices) we have:
+ */
+struct usb_bus {
+ struct device *controller; /* host/master side hardware */
+ int busnum; /* Bus number (in order of reg) */
+ const char *bus_name; /* stable id (PCI slot_name etc) */
+ u8 uses_dma; /* Does the host controller use DMA? */
+ u8 uses_pio_for_control; /*
+ * Does the host controller use PIO
+ * for control transfers?
+ */
+ u8 otg_port; /* 0, or number of OTG/HNP port */
+ unsigned is_b_host:1; /* true during some HNP roleswitches */
+ unsigned b_hnp_enable:1; /* OTG: did A-Host enable HNP? */
+ unsigned no_stop_on_short:1; /*
+ * Quirk: some controllers don't stop
+ * the ep queue on a short transfer
+ * with the URB_SHORT_NOT_OK flag set.
+ */
+ unsigned no_sg_constraint:1; /* no sg constraint */
+ unsigned sg_tablesize; /* 0 or largest number of sg list entries */
+
+ int devnum_next; /* Next open device number in
+ * round-robin allocation */
+
+ struct usb_devmap devmap; /* device address allocation map */
+ struct usb_device *root_hub; /* Root hub */
+ struct usb_bus *hs_companion; /* Companion EHCI bus, if any */
+ struct list_head bus_list; /* list of busses */
+
+ struct mutex usb_address0_mutex; /* unaddressed device mutex */
+
+ int bandwidth_allocated; /* on this bus: how much of the time
+ * reserved for periodic (intr/iso)
+ * requests is used, on average?
+ * Units: microseconds/frame.
+ * Limits: Full/low speed reserve 90%,
+ * while high speed reserves 80%.
+ */
+ int bandwidth_int_reqs; /* number of Interrupt requests */
+ int bandwidth_isoc_reqs; /* number of Isoc. requests */
+
+ unsigned resuming_ports; /* bit array: resuming root-hub ports */
+
+#if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
+ struct mon_bus *mon_bus; /* non-null when associated */
+ int monitored; /* non-zero when monitored */
+#endif
+};
+
+struct usb_dev_state;
+
+/* ----------------------------------------------------------------------- */
+
+struct usb_tt;
+
+enum usb_device_removable {
+ USB_DEVICE_REMOVABLE_UNKNOWN = 0,
+ USB_DEVICE_REMOVABLE,
+ USB_DEVICE_FIXED,
+};
+
+enum usb_port_connect_type {
+ USB_PORT_CONNECT_TYPE_UNKNOWN = 0,
+ USB_PORT_CONNECT_TYPE_HOT_PLUG,
+ USB_PORT_CONNECT_TYPE_HARD_WIRED,
+ USB_PORT_NOT_USED,
+};
+
+/*
+ * USB 2.0 Link Power Management (LPM) parameters.
+ */
+struct usb2_lpm_parameters {
+ /* Best effort service latency indicate how long the host will drive
+ * resume on an exit from L1.
+ */
+ unsigned int besl;
+
+ /* Timeout value in microseconds for the L1 inactivity (LPM) timer.
+ * When the timer counts to zero, the parent hub will initiate a LPM
+ * transition to L1.
+ */
+ int timeout;
+};
+
+/*
+ * USB 3.0 Link Power Management (LPM) parameters.
+ *
+ * PEL and SEL are USB 3.0 Link PM latencies for device-initiated LPM exit.
+ * MEL is the USB 3.0 Link PM latency for host-initiated LPM exit.
+ * All three are stored in nanoseconds.
+ */
+struct usb3_lpm_parameters {
+ /*
+ * Maximum exit latency (MEL) for the host to send a packet to the
+ * device (either a Ping for isoc endpoints, or a data packet for
+ * interrupt endpoints), the hubs to decode the packet, and for all hubs
+ * in the path to transition the links to U0.
+ */
+ unsigned int mel;
+ /*
+ * Maximum exit latency for a device-initiated LPM transition to bring
+ * all links into U0. Abbreviated as "PEL" in section 9.4.12 of the USB
+ * 3.0 spec, with no explanation of what "P" stands for. "Path"?
+ */
+ unsigned int pel;
+
+ /*
+ * The System Exit Latency (SEL) includes PEL, and three other
+ * latencies. After a device initiates a U0 transition, it will take
+ * some time from when the device sends the ERDY to when it will finally
+ * receive the data packet. Basically, SEL should be the worse-case
+ * latency from when a device starts initiating a U0 transition to when
+ * it will get data.
+ */
+ unsigned int sel;
+ /*
+ * The idle timeout value that is currently programmed into the parent
+ * hub for this device. When the timer counts to zero, the parent hub
+ * will initiate an LPM transition to either U1 or U2.
+ */
+ int timeout;
+};
+
+/**
+ * struct usb_device - kernel's representation of a USB device
+ * @devnum: device number; address on a USB bus
+ * @devpath: device ID string for use in messages (e.g., /port/...)
+ * @route: tree topology hex string for use with xHCI
+ * @state: device state: configured, not attached, etc.
+ * @speed: device speed: high/full/low (or error)
+ * @tt: Transaction Translator info; used with low/full speed dev, highspeed hub
+ * @ttport: device port on that tt hub
+ * @toggle: one bit for each endpoint, with ([0] = IN, [1] = OUT) endpoints
+ * @parent: our hub, unless we're the root
+ * @bus: bus we're part of
+ * @ep0: endpoint 0 data (default control pipe)
+ * @dev: generic device interface
+ * @descriptor: USB device descriptor
+ * @bos: USB device BOS descriptor set
+ * @config: all of the device's configs
+ * @actconfig: the active configuration
+ * @ep_in: array of IN endpoints
+ * @ep_out: array of OUT endpoints
+ * @rawdescriptors: raw descriptors for each config
+ * @bus_mA: Current available from the bus
+ * @portnum: parent port number (origin 1)
+ * @level: number of USB hub ancestors
+ * @can_submit: URBs may be submitted
+ * @persist_enabled: USB_PERSIST enabled for this device
+ * @have_langid: whether string_langid is valid
+ * @authorized: policy has said we can use it;
+ * (user space) policy determines if we authorize this device to be
+ * used or not. By default, wired USB devices are authorized.
+ * WUSB devices are not, until we authorize them from user space.
+ * FIXME -- complete doc
+ * @authenticated: Crypto authentication passed
+ * @wusb: device is Wireless USB
+ * @lpm_capable: device supports LPM
+ * @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
+ * @usb2_hw_lpm_besl_capable: device can perform USB2 hardware BESL LPM
+ * @usb2_hw_lpm_enabled: USB2 hardware LPM is enabled
+ * @usb2_hw_lpm_allowed: Userspace allows USB 2.0 LPM to be enabled
+ * @usb3_lpm_enabled: USB3 hardware LPM enabled
+ * @string_langid: language ID for strings
+ * @product: iProduct string, if present (static)
+ * @manufacturer: iManufacturer string, if present (static)
+ * @serial: iSerialNumber string, if present (static)
+ * @filelist: usbfs files that are open to this device
+ * @maxchild: number of ports if hub
+ * @quirks: quirks of the whole device
+ * @urbnum: number of URBs submitted for the whole device
+ * @active_duration: total time device is not suspended
+ * @connect_time: time device was first connected
+ * @do_remote_wakeup: remote wakeup should be enabled
+ * @reset_resume: needs reset instead of resume
+ * @port_is_suspended: the upstream port is suspended (L2 or U3)
+ * @wusb_dev: if this is a Wireless USB device, link to the WUSB
+ * specific data for the device.
+ * @slot_id: Slot ID assigned by xHCI
+ * @removable: Device can be physically removed from this port
+ * @l1_params: best effor service latency for USB2 L1 LPM state, and L1 timeout.
+ * @u1_params: exit latencies for USB3 U1 LPM state, and hub-initiated timeout.
+ * @u2_params: exit latencies for USB3 U2 LPM state, and hub-initiated timeout.
+ * @lpm_disable_count: Ref count used by usb_disable_lpm() and usb_enable_lpm()
+ * to keep track of the number of functions that require USB 3.0 Link Power
+ * Management to be disabled for this usb_device. This count should only
+ * be manipulated by those functions, with the bandwidth_mutex is held.
+ *
+ * Notes:
+ * Usbcore drivers should not set usbdev->state directly. Instead use
+ * usb_set_device_state().
+ */
+struct usb_device {
+ int devnum;
+ char devpath[16];
+ u32 route;
+ enum usb_device_state state;
+ enum usb_device_speed speed;
+
+ struct usb_tt *tt;
+ int ttport;
+
+ unsigned int toggle[2];
+
+ struct usb_device *parent;
+ struct usb_bus *bus;
+ struct usb_host_endpoint ep0;
+
+ struct device dev;
+
+ struct usb_device_descriptor descriptor;
+ struct usb_host_bos *bos;
+ struct usb_host_config *config;
+
+ struct usb_host_config *actconfig;
+ struct usb_host_endpoint *ep_in[16];
+ struct usb_host_endpoint *ep_out[16];
+
+ char **rawdescriptors;
+
+ unsigned short bus_mA;
+ u8 portnum;
+ u8 level;
+
+ unsigned can_submit:1;
+ unsigned persist_enabled:1;
+ unsigned have_langid:1;
+ unsigned authorized:1;
+ unsigned authenticated:1;
+ unsigned wusb:1;
+ unsigned lpm_capable:1;
+ unsigned usb2_hw_lpm_capable:1;
+ unsigned usb2_hw_lpm_besl_capable:1;
+ unsigned usb2_hw_lpm_enabled:1;
+ unsigned usb2_hw_lpm_allowed:1;
+ unsigned usb3_lpm_enabled:1;
+ int string_langid;
+
+ /* static strings from the device */
+ char *product;
+ char *manufacturer;
+ char *serial;
+
+ struct list_head filelist;
+
+ int maxchild;
+
+ u32 quirks;
+ atomic_t urbnum;
+
+ unsigned long active_duration;
+
+#ifdef CONFIG_PM
+ unsigned long connect_time;
+
+ unsigned do_remote_wakeup:1;
+ unsigned reset_resume:1;
+ unsigned port_is_suspended:1;
+#endif
+ struct wusb_dev *wusb_dev;
+ int slot_id;
+ enum usb_device_removable removable;
+ struct usb2_lpm_parameters l1_params;
+ struct usb3_lpm_parameters u1_params;
+ struct usb3_lpm_parameters u2_params;
+ unsigned lpm_disable_count;
+};
+#define to_usb_device(d) container_of(d, struct usb_device, dev)
+
+static inline struct usb_device *interface_to_usbdev(struct usb_interface *intf)
+{
+ return to_usb_device(intf->dev.parent);
+}
+
+extern struct usb_device *usb_get_dev(struct usb_device *dev);
+extern void usb_put_dev(struct usb_device *dev);
+extern struct usb_device *usb_hub_find_child(struct usb_device *hdev,
+ int port1);
+
+/**
+ * usb_hub_for_each_child - iterate over all child devices on the hub
+ * @hdev: USB device belonging to the usb hub
+ * @port1: portnum associated with child device
+ * @child: child device pointer
+ */
+#define usb_hub_for_each_child(hdev, port1, child) \
+ for (port1 = 1, child = usb_hub_find_child(hdev, port1); \
+ port1 <= hdev->maxchild; \
+ child = usb_hub_find_child(hdev, ++port1)) \
+ if (!child) continue; else
+
+/* USB device locking */
+#define usb_lock_device(udev) device_lock(&(udev)->dev)
+#define usb_unlock_device(udev) device_unlock(&(udev)->dev)
+#define usb_trylock_device(udev) device_trylock(&(udev)->dev)
+extern int usb_lock_device_for_reset(struct usb_device *udev,
+ const struct usb_interface *iface);
+
+/* USB port reset for device reinitialization */
+extern int usb_reset_device(struct usb_device *dev);
+extern void usb_queue_reset_device(struct usb_interface *dev);
+
+#ifdef CONFIG_ACPI
+extern int usb_acpi_set_power_state(struct usb_device *hdev, int index,
+ bool enable);
+extern bool usb_acpi_power_manageable(struct usb_device *hdev, int index);
+#else
+static inline int usb_acpi_set_power_state(struct usb_device *hdev, int index,
+ bool enable) { return 0; }
+static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index)
+ { return true; }
+#endif
+
+/* USB autosuspend and autoresume */
+#ifdef CONFIG_PM
+extern void usb_enable_autosuspend(struct usb_device *udev);
+extern void usb_disable_autosuspend(struct usb_device *udev);
+
+extern int usb_autopm_get_interface(struct usb_interface *intf);
+extern void usb_autopm_put_interface(struct usb_interface *intf);
+extern int usb_autopm_get_interface_async(struct usb_interface *intf);
+extern void usb_autopm_put_interface_async(struct usb_interface *intf);
+extern void usb_autopm_get_interface_no_resume(struct usb_interface *intf);
+extern void usb_autopm_put_interface_no_suspend(struct usb_interface *intf);
+
+static inline void usb_mark_last_busy(struct usb_device *udev)
+{
+ pm_runtime_mark_last_busy(&udev->dev);
+}
+
+#else
+
+static inline int usb_enable_autosuspend(struct usb_device *udev)
+{ return 0; }
+static inline int usb_disable_autosuspend(struct usb_device *udev)
+{ return 0; }
+
+static inline int usb_autopm_get_interface(struct usb_interface *intf)
+{ return 0; }
+static inline int usb_autopm_get_interface_async(struct usb_interface *intf)
+{ return 0; }
+
+static inline void usb_autopm_put_interface(struct usb_interface *intf)
+{ }
+static inline void usb_autopm_put_interface_async(struct usb_interface *intf)
+{ }
+static inline void usb_autopm_get_interface_no_resume(
+ struct usb_interface *intf)
+{ }
+static inline void usb_autopm_put_interface_no_suspend(
+ struct usb_interface *intf)
+{ }
+static inline void usb_mark_last_busy(struct usb_device *udev)
+{ }
+#endif
+
+extern int usb_disable_lpm(struct usb_device *udev);
+extern void usb_enable_lpm(struct usb_device *udev);
+/* Same as above, but these functions lock/unlock the bandwidth_mutex. */
+extern int usb_unlocked_disable_lpm(struct usb_device *udev);
+extern void usb_unlocked_enable_lpm(struct usb_device *udev);
+
+extern int usb_disable_ltm(struct usb_device *udev);
+extern void usb_enable_ltm(struct usb_device *udev);
+
+static inline bool usb_device_supports_ltm(struct usb_device *udev)
+{
+ if (udev->speed != USB_SPEED_SUPER || !udev->bos || !udev->bos->ss_cap)
+ return false;
+ return udev->bos->ss_cap->bmAttributes & USB_LTM_SUPPORT;
+}
+
+static inline bool usb_device_no_sg_constraint(struct usb_device *udev)
+{
+ return udev && udev->bus && udev->bus->no_sg_constraint;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* for drivers using iso endpoints */
+extern int usb_get_current_frame_number(struct usb_device *usb_dev);
+
+/* Sets up a group of bulk endpoints to support multiple stream IDs. */
+extern int usb_alloc_streams(struct usb_interface *interface,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ unsigned int num_streams, gfp_t mem_flags);
+
+/* Reverts a group of bulk endpoints back to not using stream IDs. */
+extern int usb_free_streams(struct usb_interface *interface,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ gfp_t mem_flags);
+
+/* used these for multi-interface device registration */
+extern int usb_driver_claim_interface(struct usb_driver *driver,
+ struct usb_interface *iface, void *priv);
+
+/**
+ * usb_interface_claimed - returns true iff an interface is claimed
+ * @iface: the interface being checked
+ *
+ * Return: %true (nonzero) iff the interface is claimed, else %false
+ * (zero).
+ *
+ * Note:
+ * Callers must own the driver model's usb bus readlock. So driver
+ * probe() entries don't need extra locking, but other call contexts
+ * may need to explicitly claim that lock.
+ *
+ */
+static inline int usb_interface_claimed(struct usb_interface *iface)
+{
+ return (iface->dev.driver != NULL);
+}
+
+extern void usb_driver_release_interface(struct usb_driver *driver,
+ struct usb_interface *iface);
+const struct usb_device_id *usb_match_id(struct usb_interface *interface,
+ const struct usb_device_id *id);
+extern int usb_match_one_id(struct usb_interface *interface,
+ const struct usb_device_id *id);
+
+extern int usb_for_each_dev(void *data, int (*fn)(struct usb_device *, void *));
+extern struct usb_interface *usb_find_interface(struct usb_driver *drv,
+ int minor);
+extern struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev,
+ unsigned ifnum);
+extern struct usb_host_interface *usb_altnum_to_altsetting(
+ const struct usb_interface *intf, unsigned int altnum);
+extern struct usb_host_interface *usb_find_alt_setting(
+ struct usb_host_config *config,
+ unsigned int iface_num,
+ unsigned int alt_num);
+
+/* port claiming functions */
+int usb_hub_claim_port(struct usb_device *hdev, unsigned port1,
+ struct usb_dev_state *owner);
+int usb_hub_release_port(struct usb_device *hdev, unsigned port1,
+ struct usb_dev_state *owner);
+
+/**
+ * usb_make_path - returns stable device path in the usb tree
+ * @dev: the device whose path is being constructed
+ * @buf: where to put the string
+ * @size: how big is "buf"?
+ *
+ * Return: Length of the string (> 0) or negative if size was too small.
+ *
+ * Note:
+ * This identifier is intended to be "stable", reflecting physical paths in
+ * hardware such as physical bus addresses for host controllers or ports on
+ * USB hubs. That makes it stay the same until systems are physically
+ * reconfigured, by re-cabling a tree of USB devices or by moving USB host
+ * controllers. Adding and removing devices, including virtual root hubs
+ * in host controller driver modules, does not change these path identifiers;
+ * neither does rebooting or re-enumerating. These are more useful identifiers
+ * than changeable ("unstable") ones like bus numbers or device addresses.
+ *
+ * With a partial exception for devices connected to USB 2.0 root hubs, these
+ * identifiers are also predictable. So long as the device tree isn't changed,
+ * plugging any USB device into a given hub port always gives it the same path.
+ * Because of the use of "companion" controllers, devices connected to ports on
+ * USB 2.0 root hubs (EHCI host controllers) will get one path ID if they are
+ * high speed, and a different one if they are full or low speed.
+ */
+static inline int usb_make_path(struct usb_device *dev, char *buf, size_t size)
+{
+ int actual;
+ actual = snprintf(buf, size, "usb-%s-%s", dev->bus->bus_name,
+ dev->devpath);
+ return (actual >= (int)size) ? -1 : actual;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define USB_DEVICE_ID_MATCH_DEVICE \
+ (USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT)
+#define USB_DEVICE_ID_MATCH_DEV_RANGE \
+ (USB_DEVICE_ID_MATCH_DEV_LO | USB_DEVICE_ID_MATCH_DEV_HI)
+#define USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION \
+ (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_RANGE)
+#define USB_DEVICE_ID_MATCH_DEV_INFO \
+ (USB_DEVICE_ID_MATCH_DEV_CLASS | \
+ USB_DEVICE_ID_MATCH_DEV_SUBCLASS | \
+ USB_DEVICE_ID_MATCH_DEV_PROTOCOL)
+#define USB_DEVICE_ID_MATCH_INT_INFO \
+ (USB_DEVICE_ID_MATCH_INT_CLASS | \
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS | \
+ USB_DEVICE_ID_MATCH_INT_PROTOCOL)
+
+/**
+ * USB_DEVICE - macro used to describe a specific usb device
+ * @vend: the 16 bit USB Vendor ID
+ * @prod: the 16 bit USB Product ID
+ *
+ * This macro is used to create a struct usb_device_id that matches a
+ * specific device.
+ */
+#define USB_DEVICE(vend, prod) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE, \
+ .idVendor = (vend), \
+ .idProduct = (prod)
+/**
+ * USB_DEVICE_VER - describe a specific usb device with a version range
+ * @vend: the 16 bit USB Vendor ID
+ * @prod: the 16 bit USB Product ID
+ * @lo: the bcdDevice_lo value
+ * @hi: the bcdDevice_hi value
+ *
+ * This macro is used to create a struct usb_device_id that matches a
+ * specific device, with a version range.
+ */
+#define USB_DEVICE_VER(vend, prod, lo, hi) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION, \
+ .idVendor = (vend), \
+ .idProduct = (prod), \
+ .bcdDevice_lo = (lo), \
+ .bcdDevice_hi = (hi)
+
+/**
+ * USB_DEVICE_INTERFACE_CLASS - describe a usb device with a specific interface class
+ * @vend: the 16 bit USB Vendor ID
+ * @prod: the 16 bit USB Product ID
+ * @cl: bInterfaceClass value
+ *
+ * This macro is used to create a struct usb_device_id that matches a
+ * specific interface class of devices.
+ */
+#define USB_DEVICE_INTERFACE_CLASS(vend, prod, cl) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
+ USB_DEVICE_ID_MATCH_INT_CLASS, \
+ .idVendor = (vend), \
+ .idProduct = (prod), \
+ .bInterfaceClass = (cl)
+
+/**
+ * USB_DEVICE_INTERFACE_PROTOCOL - describe a usb device with a specific interface protocol
+ * @vend: the 16 bit USB Vendor ID
+ * @prod: the 16 bit USB Product ID
+ * @pr: bInterfaceProtocol value
+ *
+ * This macro is used to create a struct usb_device_id that matches a
+ * specific interface protocol of devices.
+ */
+#define USB_DEVICE_INTERFACE_PROTOCOL(vend, prod, pr) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
+ USB_DEVICE_ID_MATCH_INT_PROTOCOL, \
+ .idVendor = (vend), \
+ .idProduct = (prod), \
+ .bInterfaceProtocol = (pr)
+
+/**
+ * USB_DEVICE_INTERFACE_NUMBER - describe a usb device with a specific interface number
+ * @vend: the 16 bit USB Vendor ID
+ * @prod: the 16 bit USB Product ID
+ * @num: bInterfaceNumber value
+ *
+ * This macro is used to create a struct usb_device_id that matches a
+ * specific interface number of devices.
+ */
+#define USB_DEVICE_INTERFACE_NUMBER(vend, prod, num) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
+ USB_DEVICE_ID_MATCH_INT_NUMBER, \
+ .idVendor = (vend), \
+ .idProduct = (prod), \
+ .bInterfaceNumber = (num)
+
+/**
+ * USB_DEVICE_INFO - macro used to describe a class of usb devices
+ * @cl: bDeviceClass value
+ * @sc: bDeviceSubClass value
+ * @pr: bDeviceProtocol value
+ *
+ * This macro is used to create a struct usb_device_id that matches a
+ * specific class of devices.
+ */
+#define USB_DEVICE_INFO(cl, sc, pr) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEV_INFO, \
+ .bDeviceClass = (cl), \
+ .bDeviceSubClass = (sc), \
+ .bDeviceProtocol = (pr)
+
+/**
+ * USB_INTERFACE_INFO - macro used to describe a class of usb interfaces
+ * @cl: bInterfaceClass value
+ * @sc: bInterfaceSubClass value
+ * @pr: bInterfaceProtocol value
+ *
+ * This macro is used to create a struct usb_device_id that matches a
+ * specific class of interfaces.
+ */
+#define USB_INTERFACE_INFO(cl, sc, pr) \
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO, \
+ .bInterfaceClass = (cl), \
+ .bInterfaceSubClass = (sc), \
+ .bInterfaceProtocol = (pr)
+
+/**
+ * USB_DEVICE_AND_INTERFACE_INFO - describe a specific usb device with a class of usb interfaces
+ * @vend: the 16 bit USB Vendor ID
+ * @prod: the 16 bit USB Product ID
+ * @cl: bInterfaceClass value
+ * @sc: bInterfaceSubClass value
+ * @pr: bInterfaceProtocol value
+ *
+ * This macro is used to create a struct usb_device_id that matches a
+ * specific device with a specific class of interfaces.
+ *
+ * This is especially useful when explicitly matching devices that have
+ * vendor specific bDeviceClass values, but standards-compliant interfaces.
+ */
+#define USB_DEVICE_AND_INTERFACE_INFO(vend, prod, cl, sc, pr) \
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
+ | USB_DEVICE_ID_MATCH_DEVICE, \
+ .idVendor = (vend), \
+ .idProduct = (prod), \
+ .bInterfaceClass = (cl), \
+ .bInterfaceSubClass = (sc), \
+ .bInterfaceProtocol = (pr)
+
+/**
+ * USB_VENDOR_AND_INTERFACE_INFO - describe a specific usb vendor with a class of usb interfaces
+ * @vend: the 16 bit USB Vendor ID
+ * @cl: bInterfaceClass value
+ * @sc: bInterfaceSubClass value
+ * @pr: bInterfaceProtocol value
+ *
+ * This macro is used to create a struct usb_device_id that matches a
+ * specific vendor with a specific class of interfaces.
+ *
+ * This is especially useful when explicitly matching devices that have
+ * vendor specific bDeviceClass values, but standards-compliant interfaces.
+ */
+#define USB_VENDOR_AND_INTERFACE_INFO(vend, cl, sc, pr) \
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
+ | USB_DEVICE_ID_MATCH_VENDOR, \
+ .idVendor = (vend), \
+ .bInterfaceClass = (cl), \
+ .bInterfaceSubClass = (sc), \
+ .bInterfaceProtocol = (pr)
+
+/* ----------------------------------------------------------------------- */
+
+/* Stuff for dynamic usb ids */
+struct usb_dynids {
+ spinlock_t lock;
+ struct list_head list;
+};
+
+struct usb_dynid {
+ struct list_head node;
+ struct usb_device_id id;
+};
+
+extern ssize_t usb_store_new_id(struct usb_dynids *dynids,
+ const struct usb_device_id *id_table,
+ struct device_driver *driver,
+ const char *buf, size_t count);
+
+extern ssize_t usb_show_dynids(struct usb_dynids *dynids, char *buf);
+
+/**
+ * struct usbdrv_wrap - wrapper for driver-model structure
+ * @driver: The driver-model core driver structure.
+ * @for_devices: Non-zero for device drivers, 0 for interface drivers.
+ */
+struct usbdrv_wrap {
+ struct device_driver driver;
+ int for_devices;
+};
+
+/**
+ * struct usb_driver - identifies USB interface driver to usbcore
+ * @name: The driver name should be unique among USB drivers,
+ * and should normally be the same as the module name.
+ * @probe: Called to see if the driver is willing to manage a particular
+ * interface on a device. If it is, probe returns zero and uses
+ * usb_set_intfdata() to associate driver-specific data with the
+ * interface. It may also use usb_set_interface() to specify the
+ * appropriate altsetting. If unwilling to manage the interface,
+ * return -ENODEV, if genuine IO errors occurred, an appropriate
+ * negative errno value.
+ * @disconnect: Called when the interface is no longer accessible, usually
+ * because its device has been (or is being) disconnected or the
+ * driver module is being unloaded.
+ * @unlocked_ioctl: Used for drivers that want to talk to userspace through
+ * the "usbfs" filesystem. This lets devices provide ways to
+ * expose information to user space regardless of where they
+ * do (or don't) show up otherwise in the filesystem.
+ * @suspend: Called when the device is going to be suspended by the
+ * system either from system sleep or runtime suspend context. The
+ * return value will be ignored in system sleep context, so do NOT
+ * try to continue using the device if suspend fails in this case.
+ * Instead, let the resume or reset-resume routine recover from
+ * the failure.
+ * @resume: Called when the device is being resumed by the system.
+ * @reset_resume: Called when the suspended device has been reset instead
+ * of being resumed.
+ * @pre_reset: Called by usb_reset_device() when the device is about to be
+ * reset. This routine must not return until the driver has no active
+ * URBs for the device, and no more URBs may be submitted until the
+ * post_reset method is called.
+ * @post_reset: Called by usb_reset_device() after the device
+ * has been reset
+ * @id_table: USB drivers use ID table to support hotplugging.
+ * Export this with MODULE_DEVICE_TABLE(usb,...). This must be set
+ * or your driver's probe function will never get called.
+ * @dynids: used internally to hold the list of dynamically added device
+ * ids for this driver.
+ * @drvwrap: Driver-model core structure wrapper.
+ * @no_dynamic_id: if set to 1, the USB core will not allow dynamic ids to be
+ * added to this driver by preventing the sysfs file from being created.
+ * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
+ * for interfaces bound to this driver.
+ * @soft_unbind: if set to 1, the USB core will not kill URBs and disable
+ * endpoints before calling the driver's disconnect method.
+ * @disable_hub_initiated_lpm: if set to 0, the USB core will not allow hubs
+ * to initiate lower power link state transitions when an idle timeout
+ * occurs. Device-initiated USB 3.0 link PM will still be allowed.
+ *
+ * USB interface drivers must provide a name, probe() and disconnect()
+ * methods, and an id_table. Other driver fields are optional.
+ *
+ * The id_table is used in hotplugging. It holds a set of descriptors,
+ * and specialized data may be associated with each entry. That table
+ * is used by both user and kernel mode hotplugging support.
+ *
+ * The probe() and disconnect() methods are called in a context where
+ * they can sleep, but they should avoid abusing the privilege. Most
+ * work to connect to a device should be done when the device is opened,
+ * and undone at the last close. The disconnect code needs to address
+ * concurrency issues with respect to open() and close() methods, as
+ * well as forcing all pending I/O requests to complete (by unlinking
+ * them as necessary, and blocking until the unlinks complete).
+ */
+struct usb_driver {
+ const char *name;
+
+ int (*probe) (struct usb_interface *intf,
+ const struct usb_device_id *id);
+
+ void (*disconnect) (struct usb_interface *intf);
+
+ int (*unlocked_ioctl) (struct usb_interface *intf, unsigned int code,
+ void *buf);
+
+ int (*suspend) (struct usb_interface *intf, pm_message_t message);
+ int (*resume) (struct usb_interface *intf);
+ int (*reset_resume)(struct usb_interface *intf);
+
+ int (*pre_reset)(struct usb_interface *intf);
+ int (*post_reset)(struct usb_interface *intf);
+
+ const struct usb_device_id *id_table;
+
+ struct usb_dynids dynids;
+ struct usbdrv_wrap drvwrap;
+ unsigned int no_dynamic_id:1;
+ unsigned int supports_autosuspend:1;
+ unsigned int disable_hub_initiated_lpm:1;
+ unsigned int soft_unbind:1;
+};
+#define to_usb_driver(d) container_of(d, struct usb_driver, drvwrap.driver)
+
+/**
+ * struct usb_device_driver - identifies USB device driver to usbcore
+ * @name: The driver name should be unique among USB drivers,
+ * and should normally be the same as the module name.
+ * @probe: Called to see if the driver is willing to manage a particular
+ * device. If it is, probe returns zero and uses dev_set_drvdata()
+ * to associate driver-specific data with the device. If unwilling
+ * to manage the device, return a negative errno value.
+ * @disconnect: Called when the device is no longer accessible, usually
+ * because it has been (or is being) disconnected or the driver's
+ * module is being unloaded.
+ * @suspend: Called when the device is going to be suspended by the system.
+ * @resume: Called when the device is being resumed by the system.
+ * @drvwrap: Driver-model core structure wrapper.
+ * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
+ * for devices bound to this driver.
+ *
+ * USB drivers must provide all the fields listed above except drvwrap.
+ */
+struct usb_device_driver {
+ const char *name;
+
+ int (*probe) (struct usb_device *udev);
+ void (*disconnect) (struct usb_device *udev);
+
+ int (*suspend) (struct usb_device *udev, pm_message_t message);
+ int (*resume) (struct usb_device *udev, pm_message_t message);
+ struct usbdrv_wrap drvwrap;
+ unsigned int supports_autosuspend:1;
+};
+#define to_usb_device_driver(d) container_of(d, struct usb_device_driver, \
+ drvwrap.driver)
+
+extern struct bus_type usb_bus_type;
+
+/**
+ * struct usb_class_driver - identifies a USB driver that wants to use the USB major number
+ * @name: the usb class device name for this driver. Will show up in sysfs.
+ * @devnode: Callback to provide a naming hint for a possible
+ * device node to create.
+ * @fops: pointer to the struct file_operations of this driver.
+ * @minor_base: the start of the minor range for this driver.
+ *
+ * This structure is used for the usb_register_dev() and
+ * usb_unregister_dev() functions, to consolidate a number of the
+ * parameters used for them.
+ */
+struct usb_class_driver {
+ char *name;
+ char *(*devnode)(struct device *dev, umode_t *mode);
+ const struct file_operations *fops;
+ int minor_base;
+};
+
+/*
+ * use these in module_init()/module_exit()
+ * and don't forget MODULE_DEVICE_TABLE(usb, ...)
+ */
+extern int usb_register_driver(struct usb_driver *, struct module *,
+ const char *);
+
+/* use a define to avoid include chaining to get THIS_MODULE & friends */
+#define usb_register(driver) \
+ usb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
+
+extern void usb_deregister(struct usb_driver *);
+
+/**
+ * module_usb_driver() - Helper macro for registering a USB driver
+ * @__usb_driver: usb_driver struct
+ *
+ * Helper macro for USB drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_usb_driver(__usb_driver) \
+ module_driver(__usb_driver, usb_register, \
+ usb_deregister)
+
+extern int usb_register_device_driver(struct usb_device_driver *,
+ struct module *);
+extern void usb_deregister_device_driver(struct usb_device_driver *);
+
+extern int usb_register_dev(struct usb_interface *intf,
+ struct usb_class_driver *class_driver);
+extern void usb_deregister_dev(struct usb_interface *intf,
+ struct usb_class_driver *class_driver);
+
+extern int usb_disabled(void);
+
+/* ----------------------------------------------------------------------- */
+
+/*
+ * URB support, for asynchronous request completions
+ */
+
+/*
+ * urb->transfer_flags:
+ *
+ * Note: URB_DIR_IN/OUT is automatically set in usb_submit_urb().
+ */
+#define URB_SHORT_NOT_OK 0x0001 /* report short reads as errors */
+#define URB_ISO_ASAP 0x0002 /* iso-only; use the first unexpired
+ * slot in the schedule */
+#define URB_NO_TRANSFER_DMA_MAP 0x0004 /* urb->transfer_dma valid on submit */
+#define URB_NO_FSBR 0x0020 /* UHCI-specific */
+#define URB_ZERO_PACKET 0x0040 /* Finish bulk OUT with short packet */
+#define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt
+ * needed */
+#define URB_FREE_BUFFER 0x0100 /* Free transfer buffer with the URB */
+
+/* The following flags are used internally by usbcore and HCDs */
+#define URB_DIR_IN 0x0200 /* Transfer from device to host */
+#define URB_DIR_OUT 0
+#define URB_DIR_MASK URB_DIR_IN
+
+#define URB_DMA_MAP_SINGLE 0x00010000 /* Non-scatter-gather mapping */
+#define URB_DMA_MAP_PAGE 0x00020000 /* HCD-unsupported S-G */
+#define URB_DMA_MAP_SG 0x00040000 /* HCD-supported S-G */
+#define URB_MAP_LOCAL 0x00080000 /* HCD-local-memory mapping */
+#define URB_SETUP_MAP_SINGLE 0x00100000 /* Setup packet DMA mapped */
+#define URB_SETUP_MAP_LOCAL 0x00200000 /* HCD-local setup packet */
+#define URB_DMA_SG_COMBINED 0x00400000 /* S-G entries were combined */
+#define URB_ALIGNED_TEMP_BUFFER 0x00800000 /* Temp buffer was alloc'd */
+
+struct usb_iso_packet_descriptor {
+ unsigned int offset;
+ unsigned int length; /* expected length */
+ unsigned int actual_length;
+ int status;
+};
+
+struct urb;
+
+struct usb_anchor {
+ struct list_head urb_list;
+ wait_queue_head_t wait;
+ spinlock_t lock;
+ atomic_t suspend_wakeups;
+ unsigned int poisoned:1;
+};
+
+static inline void init_usb_anchor(struct usb_anchor *anchor)
+{
+ memset(anchor, 0, sizeof(*anchor));
+ INIT_LIST_HEAD(&anchor->urb_list);
+ init_waitqueue_head(&anchor->wait);
+ spin_lock_init(&anchor->lock);
+}
+
+typedef void (*usb_complete_t)(struct urb *);
+
+/**
+ * struct urb - USB Request Block
+ * @urb_list: For use by current owner of the URB.
+ * @anchor_list: membership in the list of an anchor
+ * @anchor: to anchor URBs to a common mooring
+ * @ep: Points to the endpoint's data structure. Will eventually
+ * replace @pipe.
+ * @pipe: Holds endpoint number, direction, type, and more.
+ * Create these values with the eight macros available;
+ * usb_{snd,rcv}TYPEpipe(dev,endpoint), where the TYPE is "ctrl"
+ * (control), "bulk", "int" (interrupt), or "iso" (isochronous).
+ * For example usb_sndbulkpipe() or usb_rcvintpipe(). Endpoint
+ * numbers range from zero to fifteen. Note that "in" endpoint two
+ * is a different endpoint (and pipe) from "out" endpoint two.
+ * The current configuration controls the existence, type, and
+ * maximum packet size of any given endpoint.
+ * @stream_id: the endpoint's stream ID for bulk streams
+ * @dev: Identifies the USB device to perform the request.
+ * @status: This is read in non-iso completion functions to get the
+ * status of the particular request. ISO requests only use it
+ * to tell whether the URB was unlinked; detailed status for
+ * each frame is in the fields of the iso_frame-desc.
+ * @transfer_flags: A variety of flags may be used to affect how URB
+ * submission, unlinking, or operation are handled. Different
+ * kinds of URB can use different flags.
+ * @transfer_buffer: This identifies the buffer to (or from) which the I/O
+ * request will be performed unless URB_NO_TRANSFER_DMA_MAP is set
+ * (however, do not leave garbage in transfer_buffer even then).
+ * This buffer must be suitable for DMA; allocate it with
+ * kmalloc() or equivalent. For transfers to "in" endpoints, contents
+ * of this buffer will be modified. This buffer is used for the data
+ * stage of control transfers.
+ * @transfer_dma: When transfer_flags includes URB_NO_TRANSFER_DMA_MAP,
+ * the device driver is saying that it provided this DMA address,
+ * which the host controller driver should use in preference to the
+ * transfer_buffer.
+ * @sg: scatter gather buffer list, the buffer size of each element in
+ * the list (except the last) must be divisible by the endpoint's
+ * max packet size if no_sg_constraint isn't set in 'struct usb_bus'
+ * @num_mapped_sgs: (internal) number of mapped sg entries
+ * @num_sgs: number of entries in the sg list
+ * @transfer_buffer_length: How big is transfer_buffer. The transfer may
+ * be broken up into chunks according to the current maximum packet
+ * size for the endpoint, which is a function of the configuration
+ * and is encoded in the pipe. When the length is zero, neither
+ * transfer_buffer nor transfer_dma is used.
+ * @actual_length: This is read in non-iso completion functions, and
+ * it tells how many bytes (out of transfer_buffer_length) were
+ * transferred. It will normally be the same as requested, unless
+ * either an error was reported or a short read was performed.
+ * The URB_SHORT_NOT_OK transfer flag may be used to make such
+ * short reads be reported as errors.
+ * @setup_packet: Only used for control transfers, this points to eight bytes
+ * of setup data. Control transfers always start by sending this data
+ * to the device. Then transfer_buffer is read or written, if needed.
+ * @setup_dma: DMA pointer for the setup packet. The caller must not use
+ * this field; setup_packet must point to a valid buffer.
+ * @start_frame: Returns the initial frame for isochronous transfers.
+ * @number_of_packets: Lists the number of ISO transfer buffers.
+ * @interval: Specifies the polling interval for interrupt or isochronous
+ * transfers. The units are frames (milliseconds) for full and low
+ * speed devices, and microframes (1/8 millisecond) for highspeed
+ * and SuperSpeed devices.
+ * @error_count: Returns the number of ISO transfers that reported errors.
+ * @context: For use in completion functions. This normally points to
+ * request-specific driver context.
+ * @complete: Completion handler. This URB is passed as the parameter to the
+ * completion function. The completion function may then do what
+ * it likes with the URB, including resubmitting or freeing it.
+ * @iso_frame_desc: Used to provide arrays of ISO transfer buffers and to
+ * collect the transfer status for each buffer.
+ *
+ * This structure identifies USB transfer requests. URBs must be allocated by
+ * calling usb_alloc_urb() and freed with a call to usb_free_urb().
+ * Initialization may be done using various usb_fill_*_urb() functions. URBs
+ * are submitted using usb_submit_urb(), and pending requests may be canceled
+ * using usb_unlink_urb() or usb_kill_urb().
+ *
+ * Data Transfer Buffers:
+ *
+ * Normally drivers provide I/O buffers allocated with kmalloc() or otherwise
+ * taken from the general page pool. That is provided by transfer_buffer
+ * (control requests also use setup_packet), and host controller drivers
+ * perform a dma mapping (and unmapping) for each buffer transferred. Those
+ * mapping operations can be expensive on some platforms (perhaps using a dma
+ * bounce buffer or talking to an IOMMU),
+ * although they're cheap on commodity x86 and ppc hardware.
+ *
+ * Alternatively, drivers may pass the URB_NO_TRANSFER_DMA_MAP transfer flag,
+ * which tells the host controller driver that no such mapping is needed for
+ * the transfer_buffer since
+ * the device driver is DMA-aware. For example, a device driver might
+ * allocate a DMA buffer with usb_alloc_coherent() or call usb_buffer_map().
+ * When this transfer flag is provided, host controller drivers will
+ * attempt to use the dma address found in the transfer_dma
+ * field rather than determining a dma address themselves.
+ *
+ * Note that transfer_buffer must still be set if the controller
+ * does not support DMA (as indicated by bus.uses_dma) and when talking
+ * to root hub. If you have to trasfer between highmem zone and the device
+ * on such controller, create a bounce buffer or bail out with an error.
+ * If transfer_buffer cannot be set (is in highmem) and the controller is DMA
+ * capable, assign NULL to it, so that usbmon knows not to use the value.
+ * The setup_packet must always be set, so it cannot be located in highmem.
+ *
+ * Initialization:
+ *
+ * All URBs submitted must initialize the dev, pipe, transfer_flags (may be
+ * zero), and complete fields. All URBs must also initialize
+ * transfer_buffer and transfer_buffer_length. They may provide the
+ * URB_SHORT_NOT_OK transfer flag, indicating that short reads are
+ * to be treated as errors; that flag is invalid for write requests.
+ *
+ * Bulk URBs may
+ * use the URB_ZERO_PACKET transfer flag, indicating that bulk OUT transfers
+ * should always terminate with a short packet, even if it means adding an
+ * extra zero length packet.
+ *
+ * Control URBs must provide a valid pointer in the setup_packet field.
+ * Unlike the transfer_buffer, the setup_packet may not be mapped for DMA
+ * beforehand.
+ *
+ * Interrupt URBs must provide an interval, saying how often (in milliseconds
+ * or, for highspeed devices, 125 microsecond units)
+ * to poll for transfers. After the URB has been submitted, the interval
+ * field reflects how the transfer was actually scheduled.
+ * The polling interval may be more frequent than requested.
+ * For example, some controllers have a maximum interval of 32 milliseconds,
+ * while others support intervals of up to 1024 milliseconds.
+ * Isochronous URBs also have transfer intervals. (Note that for isochronous
+ * endpoints, as well as high speed interrupt endpoints, the encoding of
+ * the transfer interval in the endpoint descriptor is logarithmic.
+ * Device drivers must convert that value to linear units themselves.)
+ *
+ * If an isochronous endpoint queue isn't already running, the host
+ * controller will schedule a new URB to start as soon as bandwidth
+ * utilization allows. If the queue is running then a new URB will be
+ * scheduled to start in the first transfer slot following the end of the
+ * preceding URB, if that slot has not already expired. If the slot has
+ * expired (which can happen when IRQ delivery is delayed for a long time),
+ * the scheduling behavior depends on the URB_ISO_ASAP flag. If the flag
+ * is clear then the URB will be scheduled to start in the expired slot,
+ * implying that some of its packets will not be transferred; if the flag
+ * is set then the URB will be scheduled in the first unexpired slot,
+ * breaking the queue's synchronization. Upon URB completion, the
+ * start_frame field will be set to the (micro)frame number in which the
+ * transfer was scheduled. Ranges for frame counter values are HC-specific
+ * and can go from as low as 256 to as high as 65536 frames.
+ *
+ * Isochronous URBs have a different data transfer model, in part because
+ * the quality of service is only "best effort". Callers provide specially
+ * allocated URBs, with number_of_packets worth of iso_frame_desc structures
+ * at the end. Each such packet is an individual ISO transfer. Isochronous
+ * URBs are normally queued, submitted by drivers to arrange that
+ * transfers are at least double buffered, and then explicitly resubmitted
+ * in completion handlers, so
+ * that data (such as audio or video) streams at as constant a rate as the
+ * host controller scheduler can support.
+ *
+ * Completion Callbacks:
+ *
+ * The completion callback is made in_interrupt(), and one of the first
+ * things that a completion handler should do is check the status field.
+ * The status field is provided for all URBs. It is used to report
+ * unlinked URBs, and status for all non-ISO transfers. It should not
+ * be examined before the URB is returned to the completion handler.
+ *
+ * The context field is normally used to link URBs back to the relevant
+ * driver or request state.
+ *
+ * When the completion callback is invoked for non-isochronous URBs, the
+ * actual_length field tells how many bytes were transferred. This field
+ * is updated even when the URB terminated with an error or was unlinked.
+ *
+ * ISO transfer status is reported in the status and actual_length fields
+ * of the iso_frame_desc array, and the number of errors is reported in
+ * error_count. Completion callbacks for ISO transfers will normally
+ * (re)submit URBs to ensure a constant transfer rate.
+ *
+ * Note that even fields marked "public" should not be touched by the driver
+ * when the urb is owned by the hcd, that is, since the call to
+ * usb_submit_urb() till the entry into the completion routine.
+ */
+struct urb {
+ /* private: usb core and host controller only fields in the urb */
+ struct kref kref; /* reference count of the URB */
+ void *hcpriv; /* private data for host controller */
+ atomic_t use_count; /* concurrent submissions counter */
+ atomic_t reject; /* submissions will fail */
+ int unlinked; /* unlink error code */
+
+ /* public: documented fields in the urb that can be used by drivers */
+ struct list_head urb_list; /* list head for use by the urb's
+ * current owner */
+ struct list_head anchor_list; /* the URB may be anchored */
+ struct usb_anchor *anchor;
+ struct usb_device *dev; /* (in) pointer to associated device */
+ struct usb_host_endpoint *ep; /* (internal) pointer to endpoint */
+ unsigned int pipe; /* (in) pipe information */
+ unsigned int stream_id; /* (in) stream ID */
+ int status; /* (return) non-ISO status */
+ unsigned int transfer_flags; /* (in) URB_SHORT_NOT_OK | ...*/
+ void *transfer_buffer; /* (in) associated data buffer */
+ dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */
+ struct scatterlist *sg; /* (in) scatter gather buffer list */
+ int num_mapped_sgs; /* (internal) mapped sg entries */
+ int num_sgs; /* (in) number of entries in the sg list */
+ u32 transfer_buffer_length; /* (in) data buffer length */
+ u32 actual_length; /* (return) actual transfer length */
+ unsigned char *setup_packet; /* (in) setup packet (control only) */
+ dma_addr_t setup_dma; /* (in) dma addr for setup_packet */
+ int start_frame; /* (modify) start frame (ISO) */
+ int number_of_packets; /* (in) number of ISO packets */
+ int interval; /* (modify) transfer interval
+ * (INT/ISO) */
+ int error_count; /* (return) number of ISO errors */
+ void *context; /* (in) context for completion */
+ usb_complete_t complete; /* (in) completion routine */
+ struct usb_iso_packet_descriptor iso_frame_desc[0];
+ /* (in) ISO ONLY */
+};
+
+/* ----------------------------------------------------------------------- */
+
+/**
+ * usb_fill_control_urb - initializes a control urb
+ * @urb: pointer to the urb to initialize.
+ * @dev: pointer to the struct usb_device for this urb.
+ * @pipe: the endpoint pipe
+ * @setup_packet: pointer to the setup_packet buffer
+ * @transfer_buffer: pointer to the transfer buffer
+ * @buffer_length: length of the transfer buffer
+ * @complete_fn: pointer to the usb_complete_t function
+ * @context: what to set the urb context to.
+ *
+ * Initializes a control urb with the proper information needed to submit
+ * it to a device.
+ */
+static inline void usb_fill_control_urb(struct urb *urb,
+ struct usb_device *dev,
+ unsigned int pipe,
+ unsigned char *setup_packet,
+ void *transfer_buffer,
+ int buffer_length,
+ usb_complete_t complete_fn,
+ void *context)
+{
+ urb->dev = dev;
+ urb->pipe = pipe;
+ urb->setup_packet = setup_packet;
+ urb->transfer_buffer = transfer_buffer;
+ urb->transfer_buffer_length = buffer_length;
+ urb->complete = complete_fn;
+ urb->context = context;
+}
+
+/**
+ * usb_fill_bulk_urb - macro to help initialize a bulk urb
+ * @urb: pointer to the urb to initialize.
+ * @dev: pointer to the struct usb_device for this urb.
+ * @pipe: the endpoint pipe
+ * @transfer_buffer: pointer to the transfer buffer
+ * @buffer_length: length of the transfer buffer
+ * @complete_fn: pointer to the usb_complete_t function
+ * @context: what to set the urb context to.
+ *
+ * Initializes a bulk urb with the proper information needed to submit it
+ * to a device.
+ */
+static inline void usb_fill_bulk_urb(struct urb *urb,
+ struct usb_device *dev,
+ unsigned int pipe,
+ void *transfer_buffer,
+ int buffer_length,
+ usb_complete_t complete_fn,
+ void *context)
+{
+ urb->dev = dev;
+ urb->pipe = pipe;
+ urb->transfer_buffer = transfer_buffer;
+ urb->transfer_buffer_length = buffer_length;
+ urb->complete = complete_fn;
+ urb->context = context;
+}
+
+/**
+ * usb_fill_int_urb - macro to help initialize a interrupt urb
+ * @urb: pointer to the urb to initialize.
+ * @dev: pointer to the struct usb_device for this urb.
+ * @pipe: the endpoint pipe
+ * @transfer_buffer: pointer to the transfer buffer
+ * @buffer_length: length of the transfer buffer
+ * @complete_fn: pointer to the usb_complete_t function
+ * @context: what to set the urb context to.
+ * @interval: what to set the urb interval to, encoded like
+ * the endpoint descriptor's bInterval value.
+ *
+ * Initializes a interrupt urb with the proper information needed to submit
+ * it to a device.
+ *
+ * Note that High Speed and SuperSpeed interrupt endpoints use a logarithmic
+ * encoding of the endpoint interval, and express polling intervals in
+ * microframes (eight per millisecond) rather than in frames (one per
+ * millisecond).
+ *
+ * Wireless USB also uses the logarithmic encoding, but specifies it in units of
+ * 128us instead of 125us. For Wireless USB devices, the interval is passed
+ * through to the host controller, rather than being translated into microframe
+ * units.
+ */
+static inline void usb_fill_int_urb(struct urb *urb,
+ struct usb_device *dev,
+ unsigned int pipe,
+ void *transfer_buffer,
+ int buffer_length,
+ usb_complete_t complete_fn,
+ void *context,
+ int interval)
+{
+ urb->dev = dev;
+ urb->pipe = pipe;
+ urb->transfer_buffer = transfer_buffer;
+ urb->transfer_buffer_length = buffer_length;
+ urb->complete = complete_fn;
+ urb->context = context;
+
+ if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) {
+ /* make sure interval is within allowed range */
+ interval = clamp(interval, 1, 16);
+
+ urb->interval = 1 << (interval - 1);
+ } else {
+ urb->interval = interval;
+ }
+
+ urb->start_frame = -1;
+}
+
+extern void usb_init_urb(struct urb *urb);
+extern struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags);
+extern void usb_free_urb(struct urb *urb);
+#define usb_put_urb usb_free_urb
+extern struct urb *usb_get_urb(struct urb *urb);
+extern int usb_submit_urb(struct urb *urb, gfp_t mem_flags);
+extern int usb_unlink_urb(struct urb *urb);
+extern void usb_kill_urb(struct urb *urb);
+extern void usb_poison_urb(struct urb *urb);
+extern void usb_unpoison_urb(struct urb *urb);
+extern void usb_block_urb(struct urb *urb);
+extern void usb_kill_anchored_urbs(struct usb_anchor *anchor);
+extern void usb_poison_anchored_urbs(struct usb_anchor *anchor);
+extern void usb_unpoison_anchored_urbs(struct usb_anchor *anchor);
+extern void usb_unlink_anchored_urbs(struct usb_anchor *anchor);
+extern void usb_anchor_suspend_wakeups(struct usb_anchor *anchor);
+extern void usb_anchor_resume_wakeups(struct usb_anchor *anchor);
+extern void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor);
+extern void usb_unanchor_urb(struct urb *urb);
+extern int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
+ unsigned int timeout);
+extern struct urb *usb_get_from_anchor(struct usb_anchor *anchor);
+extern void usb_scuttle_anchored_urbs(struct usb_anchor *anchor);
+extern int usb_anchor_empty(struct usb_anchor *anchor);
+
+#define usb_unblock_urb usb_unpoison_urb
+
+/**
+ * usb_urb_dir_in - check if an URB describes an IN transfer
+ * @urb: URB to be checked
+ *
+ * Return: 1 if @urb describes an IN transfer (device-to-host),
+ * otherwise 0.
+ */
+static inline int usb_urb_dir_in(struct urb *urb)
+{
+ return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_IN;
+}
+
+/**
+ * usb_urb_dir_out - check if an URB describes an OUT transfer
+ * @urb: URB to be checked
+ *
+ * Return: 1 if @urb describes an OUT transfer (host-to-device),
+ * otherwise 0.
+ */
+static inline int usb_urb_dir_out(struct urb *urb)
+{
+ return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_OUT;
+}
+
+void *usb_alloc_coherent(struct usb_device *dev, size_t size,
+ gfp_t mem_flags, dma_addr_t *dma);
+void usb_free_coherent(struct usb_device *dev, size_t size,
+ void *addr, dma_addr_t dma);
+
+#if 0
+struct urb *usb_buffer_map(struct urb *urb);
+void usb_buffer_dmasync(struct urb *urb);
+void usb_buffer_unmap(struct urb *urb);
+#endif
+
+struct scatterlist;
+int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
+ struct scatterlist *sg, int nents);
+#if 0
+void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
+ struct scatterlist *sg, int n_hw_ents);
+#endif
+void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
+ struct scatterlist *sg, int n_hw_ents);
+
+/*-------------------------------------------------------------------*
+ * SYNCHRONOUS CALL SUPPORT *
+ *-------------------------------------------------------------------*/
+
+extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
+ __u8 request, __u8 requesttype, __u16 value, __u16 index,
+ void *data, __u16 size, int timeout);
+extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
+ void *data, int len, int *actual_length, int timeout);
+extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
+ void *data, int len, int *actual_length,
+ int timeout);
+
+/* wrappers around usb_control_msg() for the most common standard requests */
+extern int usb_get_descriptor(struct usb_device *dev, unsigned char desctype,
+ unsigned char descindex, void *buf, int size);
+extern int usb_get_status(struct usb_device *dev,
+ int type, int target, void *data);
+extern int usb_string(struct usb_device *dev, int index,
+ char *buf, size_t size);
+
+/* wrappers that also update important state inside usbcore */
+extern int usb_clear_halt(struct usb_device *dev, int pipe);
+extern int usb_reset_configuration(struct usb_device *dev);
+extern int usb_set_interface(struct usb_device *dev, int ifnum, int alternate);
+extern void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr);
+
+/* this request isn't really synchronous, but it belongs with the others */
+extern int usb_driver_set_configuration(struct usb_device *udev, int config);
+
+/* choose and set configuration for device */
+extern int usb_choose_configuration(struct usb_device *udev);
+extern int usb_set_configuration(struct usb_device *dev, int configuration);
+
+/*
+ * timeouts, in milliseconds, used for sending/receiving control messages
+ * they typically complete within a few frames (msec) after they're issued
+ * USB identifies 5 second timeouts, maybe more in a few cases, and a few
+ * slow devices (like some MGE Ellipse UPSes) actually push that limit.
+ */
+#define USB_CTRL_GET_TIMEOUT 5000
+#define USB_CTRL_SET_TIMEOUT 5000
+
+
+/**
+ * struct usb_sg_request - support for scatter/gather I/O
+ * @status: zero indicates success, else negative errno
+ * @bytes: counts bytes transferred.
+ *
+ * These requests are initialized using usb_sg_init(), and then are used
+ * as request handles passed to usb_sg_wait() or usb_sg_cancel(). Most
+ * members of the request object aren't for driver access.
+ *
+ * The status and bytecount values are valid only after usb_sg_wait()
+ * returns. If the status is zero, then the bytecount matches the total
+ * from the request.
+ *
+ * After an error completion, drivers may need to clear a halt condition
+ * on the endpoint.
+ */
+struct usb_sg_request {
+ int status;
+ size_t bytes;
+
+ /* private:
+ * members below are private to usbcore,
+ * and are not provided for driver access!
+ */
+ spinlock_t lock;
+
+ struct usb_device *dev;
+ int pipe;
+
+ int entries;
+ struct urb **urbs;
+
+ int count;
+ struct completion complete;
+};
+
+int usb_sg_init(
+ struct usb_sg_request *io,
+ struct usb_device *dev,
+ unsigned pipe,
+ unsigned period,
+ struct scatterlist *sg,
+ int nents,
+ size_t length,
+ gfp_t mem_flags
+);
+void usb_sg_cancel(struct usb_sg_request *io);
+void usb_sg_wait(struct usb_sg_request *io);
+
+
+/* ----------------------------------------------------------------------- */
+
+/*
+ * For various legacy reasons, Linux has a small cookie that's paired with
+ * a struct usb_device to identify an endpoint queue. Queue characteristics
+ * are defined by the endpoint's descriptor. This cookie is called a "pipe",
+ * an unsigned int encoded as:
+ *
+ * - direction: bit 7 (0 = Host-to-Device [Out],
+ * 1 = Device-to-Host [In] ...
+ * like endpoint bEndpointAddress)
+ * - device address: bits 8-14 ... bit positions known to uhci-hcd
+ * - endpoint: bits 15-18 ... bit positions known to uhci-hcd
+ * - pipe type: bits 30-31 (00 = isochronous, 01 = interrupt,
+ * 10 = control, 11 = bulk)
+ *
+ * Given the device address and endpoint descriptor, pipes are redundant.
+ */
+
+/* NOTE: these are not the standard USB_ENDPOINT_XFER_* values!! */
+/* (yet ... they're the values used by usbfs) */
+#define PIPE_ISOCHRONOUS 0
+#define PIPE_INTERRUPT 1
+#define PIPE_CONTROL 2
+#define PIPE_BULK 3
+
+#define usb_pipein(pipe) ((pipe) & USB_DIR_IN)
+#define usb_pipeout(pipe) (!usb_pipein(pipe))
+
+#define usb_pipedevice(pipe) (((pipe) >> 8) & 0x7f)
+#define usb_pipeendpoint(pipe) (((pipe) >> 15) & 0xf)
+
+#define usb_pipetype(pipe) (((pipe) >> 30) & 3)
+#define usb_pipeisoc(pipe) (usb_pipetype((pipe)) == PIPE_ISOCHRONOUS)
+#define usb_pipeint(pipe) (usb_pipetype((pipe)) == PIPE_INTERRUPT)
+#define usb_pipecontrol(pipe) (usb_pipetype((pipe)) == PIPE_CONTROL)
+#define usb_pipebulk(pipe) (usb_pipetype((pipe)) == PIPE_BULK)
+
+static inline unsigned int __create_pipe(struct usb_device *dev,
+ unsigned int endpoint)
+{
+ return (dev->devnum << 8) | (endpoint << 15);
+}
+
+/* Create various pipes... */
+#define usb_sndctrlpipe(dev, endpoint) \
+ ((PIPE_CONTROL << 30) | __create_pipe(dev, endpoint))
+#define usb_rcvctrlpipe(dev, endpoint) \
+ ((PIPE_CONTROL << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
+#define usb_sndisocpipe(dev, endpoint) \
+ ((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev, endpoint))
+#define usb_rcvisocpipe(dev, endpoint) \
+ ((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
+#define usb_sndbulkpipe(dev, endpoint) \
+ ((PIPE_BULK << 30) | __create_pipe(dev, endpoint))
+#define usb_rcvbulkpipe(dev, endpoint) \
+ ((PIPE_BULK << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
+#define usb_sndintpipe(dev, endpoint) \
+ ((PIPE_INTERRUPT << 30) | __create_pipe(dev, endpoint))
+#define usb_rcvintpipe(dev, endpoint) \
+ ((PIPE_INTERRUPT << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
+
+static inline struct usb_host_endpoint *
+usb_pipe_endpoint(struct usb_device *dev, unsigned int pipe)
+{
+ struct usb_host_endpoint **eps;
+ eps = usb_pipein(pipe) ? dev->ep_in : dev->ep_out;
+ return eps[usb_pipeendpoint(pipe)];
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline __u16
+usb_maxpacket(struct usb_device *udev, int pipe, int is_out)
+{
+ struct usb_host_endpoint *ep;
+ unsigned epnum = usb_pipeendpoint(pipe);
+
+ if (is_out) {
+ WARN_ON(usb_pipein(pipe));
+ ep = udev->ep_out[epnum];
+ } else {
+ WARN_ON(usb_pipeout(pipe));
+ ep = udev->ep_in[epnum];
+ }
+ if (!ep)
+ return 0;
+
+ /* NOTE: only 0x07ff bits are for packet size... */
+ return usb_endpoint_maxp(&ep->desc);
+}
+
+/* ----------------------------------------------------------------------- */
+
+/* translate USB error codes to codes user space understands */
+static inline int usb_translate_errors(int error_code)
+{
+ switch (error_code) {
+ case 0:
+ case -ENOMEM:
+ case -ENODEV:
+ case -EOPNOTSUPP:
+ return error_code;
+ default:
+ return -EIO;
+ }
+}
+
+/* Events from the usb core */
+#define USB_DEVICE_ADD 0x0001
+#define USB_DEVICE_REMOVE 0x0002
+#define USB_BUS_ADD 0x0003
+#define USB_BUS_REMOVE 0x0004
+extern void usb_register_notify(struct notifier_block *nb);
+extern void usb_unregister_notify(struct notifier_block *nb);
+
+/* debugfs stuff */
+extern struct dentry *usb_debug_root;
+
+/* LED triggers */
+enum usb_led_event {
+ USB_LED_EVENT_HOST = 0,
+ USB_LED_EVENT_GADGET = 1,
+};
+
+#ifdef CONFIG_USB_LED_TRIG
+extern void usb_led_activity(enum usb_led_event ev);
+#else
+static inline void usb_led_activity(enum usb_led_event ev) {}
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/include/linux/usb/association.h b/include/linux/usb/association.h
new file mode 100644
index 000000000..0a4a18b3c
--- /dev/null
+++ b/include/linux/usb/association.h
@@ -0,0 +1,150 @@
+/*
+ * Wireless USB - Cable Based Association
+ *
+ * Copyright (C) 2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ */
+#ifndef __LINUX_USB_ASSOCIATION_H
+#define __LINUX_USB_ASSOCIATION_H
+
+
+/*
+ * Association attributes
+ *
+ * Association Models Supplement to WUSB 1.0 T[3-1]
+ *
+ * Each field in the structures has it's ID, it's length and then the
+ * value. This is the actual definition of the field's ID and its
+ * length.
+ */
+struct wusb_am_attr {
+ __u8 id;
+ __u8 len;
+};
+
+/* Different fields defined by the spec */
+#define WUSB_AR_AssociationTypeId { .id = cpu_to_le16(0x0000), .len = cpu_to_le16(2) }
+#define WUSB_AR_AssociationSubTypeId { .id = cpu_to_le16(0x0001), .len = cpu_to_le16(2) }
+#define WUSB_AR_Length { .id = cpu_to_le16(0x0002), .len = cpu_to_le16(4) }
+#define WUSB_AR_AssociationStatus { .id = cpu_to_le16(0x0004), .len = cpu_to_le16(4) }
+#define WUSB_AR_LangID { .id = cpu_to_le16(0x0008), .len = cpu_to_le16(2) }
+#define WUSB_AR_DeviceFriendlyName { .id = cpu_to_le16(0x000b), .len = cpu_to_le16(64) } /* max */
+#define WUSB_AR_HostFriendlyName { .id = cpu_to_le16(0x000c), .len = cpu_to_le16(64) } /* max */
+#define WUSB_AR_CHID { .id = cpu_to_le16(0x1000), .len = cpu_to_le16(16) }
+#define WUSB_AR_CDID { .id = cpu_to_le16(0x1001), .len = cpu_to_le16(16) }
+#define WUSB_AR_ConnectionContext { .id = cpu_to_le16(0x1002), .len = cpu_to_le16(48) }
+#define WUSB_AR_BandGroups { .id = cpu_to_le16(0x1004), .len = cpu_to_le16(2) }
+
+/* CBAF Control Requests (AMS1.0[T4-1] */
+enum {
+ CBAF_REQ_GET_ASSOCIATION_INFORMATION = 0x01,
+ CBAF_REQ_GET_ASSOCIATION_REQUEST,
+ CBAF_REQ_SET_ASSOCIATION_RESPONSE
+};
+
+/*
+ * CBAF USB-interface defitions
+ *
+ * No altsettings, one optional interrupt endpoint.
+ */
+enum {
+ CBAF_IFACECLASS = 0xef,
+ CBAF_IFACESUBCLASS = 0x03,
+ CBAF_IFACEPROTOCOL = 0x01,
+};
+
+/* Association Information (AMS1.0[T4-3]) */
+struct wusb_cbaf_assoc_info {
+ __le16 Length;
+ __u8 NumAssociationRequests;
+ __le16 Flags;
+ __u8 AssociationRequestsArray[];
+} __attribute__((packed));
+
+/* Association Request (AMS1.0[T4-4]) */
+struct wusb_cbaf_assoc_request {
+ __u8 AssociationDataIndex;
+ __u8 Reserved;
+ __le16 AssociationTypeId;
+ __le16 AssociationSubTypeId;
+ __le32 AssociationTypeInfoSize;
+} __attribute__((packed));
+
+enum {
+ AR_TYPE_WUSB = 0x0001,
+ AR_TYPE_WUSB_RETRIEVE_HOST_INFO = 0x0000,
+ AR_TYPE_WUSB_ASSOCIATE = 0x0001,
+};
+
+/* Association Attribute header (AMS1.0[3.8]) */
+struct wusb_cbaf_attr_hdr {
+ __le16 id;
+ __le16 len;
+} __attribute__((packed));
+
+/* Host Info (AMS1.0[T4-7]) (yeah, more headers and fields...) */
+struct wusb_cbaf_host_info {
+ struct wusb_cbaf_attr_hdr AssociationTypeId_hdr;
+ __le16 AssociationTypeId;
+ struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr;
+ __le16 AssociationSubTypeId;
+ struct wusb_cbaf_attr_hdr CHID_hdr;
+ struct wusb_ckhdid CHID;
+ struct wusb_cbaf_attr_hdr LangID_hdr;
+ __le16 LangID;
+ struct wusb_cbaf_attr_hdr HostFriendlyName_hdr;
+ __u8 HostFriendlyName[];
+} __attribute__((packed));
+
+/* Device Info (AMS1.0[T4-8])
+ *
+ * I still don't get this tag'n'header stuff for each goddamn
+ * field...
+ */
+struct wusb_cbaf_device_info {
+ struct wusb_cbaf_attr_hdr Length_hdr;
+ __le32 Length;
+ struct wusb_cbaf_attr_hdr CDID_hdr;
+ struct wusb_ckhdid CDID;
+ struct wusb_cbaf_attr_hdr BandGroups_hdr;
+ __le16 BandGroups;
+ struct wusb_cbaf_attr_hdr LangID_hdr;
+ __le16 LangID;
+ struct wusb_cbaf_attr_hdr DeviceFriendlyName_hdr;
+ __u8 DeviceFriendlyName[];
+} __attribute__((packed));
+
+/* Connection Context; CC_DATA - Success case (AMS1.0[T4-9]) */
+struct wusb_cbaf_cc_data {
+ struct wusb_cbaf_attr_hdr AssociationTypeId_hdr;
+ __le16 AssociationTypeId;
+ struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr;
+ __le16 AssociationSubTypeId;
+ struct wusb_cbaf_attr_hdr Length_hdr;
+ __le32 Length;
+ struct wusb_cbaf_attr_hdr ConnectionContext_hdr;
+ struct wusb_ckhdid CHID;
+ struct wusb_ckhdid CDID;
+ struct wusb_ckhdid CK;
+ struct wusb_cbaf_attr_hdr BandGroups_hdr;
+ __le16 BandGroups;
+} __attribute__((packed));
+
+/* CC_DATA - Failure case (AMS1.0[T4-10]) */
+struct wusb_cbaf_cc_data_fail {
+ struct wusb_cbaf_attr_hdr AssociationTypeId_hdr;
+ __le16 AssociationTypeId;
+ struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr;
+ __le16 AssociationSubTypeId;
+ struct wusb_cbaf_attr_hdr Length_hdr;
+ __le16 Length;
+ struct wusb_cbaf_attr_hdr AssociationStatus_hdr;
+ __u32 AssociationStatus;
+} __attribute__((packed));
+
+#endif /* __LINUX_USB_ASSOCIATION_H */
diff --git a/include/linux/usb/atmel_usba_udc.h b/include/linux/usb/atmel_usba_udc.h
new file mode 100644
index 000000000..ba99af275
--- /dev/null
+++ b/include/linux/usb/atmel_usba_udc.h
@@ -0,0 +1,23 @@
+/*
+ * Platform data definitions for Atmel USBA gadget driver.
+ */
+#ifndef __LINUX_USB_USBA_H
+#define __LINUX_USB_USBA_H
+
+struct usba_ep_data {
+ char *name;
+ int index;
+ int fifo_size;
+ int nr_banks;
+ int can_dma;
+ int can_isoc;
+};
+
+struct usba_platform_data {
+ int vbus_pin;
+ int vbus_pin_inverted;
+ int num_ep;
+ struct usba_ep_data ep[0];
+};
+
+#endif /* __LINUX_USB_USBA_H */
diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h
new file mode 100644
index 000000000..c5f2158ab
--- /dev/null
+++ b/include/linux/usb/audio-v2.h
@@ -0,0 +1,461 @@
+/*
+ * Copyright (c) 2010 Daniel Mack <daniel@caiaq.de>
+ *
+ * This software is distributed under the terms of the GNU General Public
+ * License ("GPL") version 2, as published by the Free Software Foundation.
+ *
+ * This file holds USB constants and structures defined
+ * by the USB Device Class Definition for Audio Devices in version 2.0.
+ * Comments below reference relevant sections of the documents contained
+ * in http://www.usb.org/developers/devclass_docs/Audio2.0_final.zip
+ */
+
+#ifndef __LINUX_USB_AUDIO_V2_H
+#define __LINUX_USB_AUDIO_V2_H
+
+#include <linux/types.h>
+
+/* v1.0 and v2.0 of this standard have many things in common. For the rest
+ * of the definitions, please refer to audio.h */
+
+/*
+ * bmControl field decoders
+ *
+ * From the USB Audio spec v2.0:
+ *
+ * bmaControls() is a (ch+1)-element array of 4-byte bitmaps,
+ * each containing a set of bit pairs. If a Control is present,
+ * it must be Host readable. If a certain Control is not
+ * present then the bit pair must be set to 0b00.
+ * If a Control is present but read-only, the bit pair must be
+ * set to 0b01. If a Control is also Host programmable, the bit
+ * pair must be set to 0b11. The value 0b10 is not allowed.
+ *
+ */
+
+static inline bool uac2_control_is_readable(u32 bmControls, u8 control)
+{
+ return (bmControls >> (control * 2)) & 0x1;
+}
+
+static inline bool uac2_control_is_writeable(u32 bmControls, u8 control)
+{
+ return (bmControls >> (control * 2)) & 0x2;
+}
+
+/* 4.7.2 Class-Specific AC Interface Descriptor */
+struct uac2_ac_header_descriptor {
+ __u8 bLength; /* 9 */
+ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */
+ __u8 bDescriptorSubtype; /* UAC_MS_HEADER */
+ __le16 bcdADC; /* 0x0200 */
+ __u8 bCategory;
+ __le16 wTotalLength; /* includes Unit and Terminal desc. */
+ __u8 bmControls;
+} __packed;
+
+/* 2.3.1.6 Type I Format Type Descriptor (Frmts20 final.pdf)*/
+struct uac2_format_type_i_descriptor {
+ __u8 bLength; /* in bytes: 6 */
+ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */
+ __u8 bDescriptorSubtype; /* FORMAT_TYPE */
+ __u8 bFormatType; /* FORMAT_TYPE_1 */
+ __u8 bSubslotSize; /* {1,2,3,4} */
+ __u8 bBitResolution;
+} __packed;
+
+/* 4.7.2.1 Clock Source Descriptor */
+
+struct uac_clock_source_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bClockID;
+ __u8 bmAttributes;
+ __u8 bmControls;
+ __u8 bAssocTerminal;
+ __u8 iClockSource;
+} __attribute__((packed));
+
+/* bmAttribute fields */
+#define UAC_CLOCK_SOURCE_TYPE_EXT 0x0
+#define UAC_CLOCK_SOURCE_TYPE_INT_FIXED 0x1
+#define UAC_CLOCK_SOURCE_TYPE_INT_VAR 0x2
+#define UAC_CLOCK_SOURCE_TYPE_INT_PROG 0x3
+#define UAC_CLOCK_SOURCE_SYNCED_TO_SOF (1 << 2)
+
+/* 4.7.2.2 Clock Source Descriptor */
+
+struct uac_clock_selector_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bClockID;
+ __u8 bNrInPins;
+ __u8 baCSourceID[];
+ /* bmControls, bAssocTerminal and iClockSource omitted */
+} __attribute__((packed));
+
+/* 4.7.2.3 Clock Multiplier Descriptor */
+
+struct uac_clock_multiplier_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bClockID;
+ __u8 bCSourceID;
+ __u8 bmControls;
+ __u8 iClockMultiplier;
+} __attribute__((packed));
+
+/* 4.7.2.4 Input terminal descriptor */
+
+struct uac2_input_terminal_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bTerminalID;
+ __u16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 bCSourceID;
+ __u8 bNrChannels;
+ __u32 bmChannelConfig;
+ __u8 iChannelNames;
+ __u16 bmControls;
+ __u8 iTerminal;
+} __attribute__((packed));
+
+/* 4.7.2.5 Output terminal descriptor */
+
+struct uac2_output_terminal_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bTerminalID;
+ __u16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 bSourceID;
+ __u8 bCSourceID;
+ __u16 bmControls;
+ __u8 iTerminal;
+} __attribute__((packed));
+
+
+
+/* 4.7.2.8 Feature Unit Descriptor */
+
+struct uac2_feature_unit_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bUnitID;
+ __u8 bSourceID;
+ /* bmaControls is actually u32,
+ * but u8 is needed for the hybrid parser */
+ __u8 bmaControls[0]; /* variable length */
+} __attribute__((packed));
+
+/* 4.9.2 Class-Specific AS Interface Descriptor */
+
+struct uac2_as_header_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubtype;
+ __u8 bTerminalLink;
+ __u8 bmControls;
+ __u8 bFormatType;
+ __u32 bmFormats;
+ __u8 bNrChannels;
+ __u32 bmChannelConfig;
+ __u8 iChannelNames;
+} __attribute__((packed));
+
+#define UAC2_FORMAT_TYPE_I_RAW_DATA (1 << 31)
+
+/* 4.10.1.2 Class-Specific AS Isochronous Audio Data Endpoint Descriptor */
+
+struct uac2_iso_endpoint_descriptor {
+ __u8 bLength; /* in bytes: 8 */
+ __u8 bDescriptorType; /* USB_DT_CS_ENDPOINT */
+ __u8 bDescriptorSubtype; /* EP_GENERAL */
+ __u8 bmAttributes;
+ __u8 bmControls;
+ __u8 bLockDelayUnits;
+ __le16 wLockDelay;
+} __attribute__((packed));
+
+#define UAC2_CONTROL_PITCH (3 << 0)
+#define UAC2_CONTROL_DATA_OVERRUN (3 << 2)
+#define UAC2_CONTROL_DATA_UNDERRUN (3 << 4)
+
+/* 6.1 Interrupt Data Message */
+
+#define UAC2_INTERRUPT_DATA_MSG_VENDOR (1 << 0)
+#define UAC2_INTERRUPT_DATA_MSG_EP (1 << 1)
+
+struct uac2_interrupt_data_msg {
+ __u8 bInfo;
+ __u8 bAttribute;
+ __le16 wValue;
+ __le16 wIndex;
+} __attribute__((packed));
+
+/* A.7 Audio Function Category Codes */
+#define UAC2_FUNCTION_SUBCLASS_UNDEFINED 0x00
+#define UAC2_FUNCTION_DESKTOP_SPEAKER 0x01
+#define UAC2_FUNCTION_HOME_THEATER 0x02
+#define UAC2_FUNCTION_MICROPHONE 0x03
+#define UAC2_FUNCTION_HEADSET 0x04
+#define UAC2_FUNCTION_TELEPHONE 0x05
+#define UAC2_FUNCTION_CONVERTER 0x06
+#define UAC2_FUNCTION_SOUND_RECORDER 0x07
+#define UAC2_FUNCTION_IO_BOX 0x08
+#define UAC2_FUNCTION_MUSICAL_INSTRUMENT 0x09
+#define UAC2_FUNCTION_PRO_AUDIO 0x0a
+#define UAC2_FUNCTION_AUDIO_VIDEO 0x0b
+#define UAC2_FUNCTION_CONTROL_PANEL 0x0c
+#define UAC2_FUNCTION_OTHER 0xff
+
+/* A.9 Audio Class-Specific AC Interface Descriptor Subtypes */
+/* see audio.h for the rest, which is identical to v1 */
+#define UAC2_EFFECT_UNIT 0x07
+#define UAC2_PROCESSING_UNIT_V2 0x08
+#define UAC2_EXTENSION_UNIT_V2 0x09
+#define UAC2_CLOCK_SOURCE 0x0a
+#define UAC2_CLOCK_SELECTOR 0x0b
+#define UAC2_CLOCK_MULTIPLIER 0x0c
+#define UAC2_SAMPLE_RATE_CONVERTER 0x0d
+
+/* A.10 Audio Class-Specific AS Interface Descriptor Subtypes */
+/* see audio.h for the rest, which is identical to v1 */
+#define UAC2_ENCODER 0x03
+#define UAC2_DECODER 0x04
+
+/* A.11 Effect Unit Effect Types */
+#define UAC2_EFFECT_UNDEFINED 0x00
+#define UAC2_EFFECT_PARAM_EQ 0x01
+#define UAC2_EFFECT_REVERB 0x02
+#define UAC2_EFFECT_MOD_DELAY 0x03
+#define UAC2_EFFECT_DYN_RANGE_COMP 0x04
+
+/* A.12 Processing Unit Process Types */
+#define UAC2_PROCESS_UNDEFINED 0x00
+#define UAC2_PROCESS_UP_DOWNMIX 0x01
+#define UAC2_PROCESS_DOLBY_PROLOCIC 0x02
+#define UAC2_PROCESS_STEREO_EXTENDER 0x03
+
+/* A.14 Audio Class-Specific Request Codes */
+#define UAC2_CS_CUR 0x01
+#define UAC2_CS_RANGE 0x02
+#define UAC2_CS_MEM 0x03
+
+/* A.15 Encoder Type Codes */
+#define UAC2_ENCODER_UNDEFINED 0x00
+#define UAC2_ENCODER_OTHER 0x01
+#define UAC2_ENCODER_MPEG 0x02
+#define UAC2_ENCODER_AC3 0x03
+#define UAC2_ENCODER_WMA 0x04
+#define UAC2_ENCODER_DTS 0x05
+
+/* A.16 Decoder Type Codes */
+#define UAC2_DECODER_UNDEFINED 0x00
+#define UAC2_DECODER_OTHER 0x01
+#define UAC2_DECODER_MPEG 0x02
+#define UAC2_DECODER_AC3 0x03
+#define UAC2_DECODER_WMA 0x04
+#define UAC2_DECODER_DTS 0x05
+
+/* A.17.1 Clock Source Control Selectors */
+#define UAC2_CS_UNDEFINED 0x00
+#define UAC2_CS_CONTROL_SAM_FREQ 0x01
+#define UAC2_CS_CONTROL_CLOCK_VALID 0x02
+
+/* A.17.2 Clock Selector Control Selectors */
+#define UAC2_CX_UNDEFINED 0x00
+#define UAC2_CX_CLOCK_SELECTOR 0x01
+
+/* A.17.3 Clock Multiplier Control Selectors */
+#define UAC2_CM_UNDEFINED 0x00
+#define UAC2_CM_NUMERATOR 0x01
+#define UAC2_CM_DENOMINTATOR 0x02
+
+/* A.17.4 Terminal Control Selectors */
+#define UAC2_TE_UNDEFINED 0x00
+#define UAC2_TE_COPY_PROTECT 0x01
+#define UAC2_TE_CONNECTOR 0x02
+#define UAC2_TE_OVERLOAD 0x03
+#define UAC2_TE_CLUSTER 0x04
+#define UAC2_TE_UNDERFLOW 0x05
+#define UAC2_TE_OVERFLOW 0x06
+#define UAC2_TE_LATENCY 0x07
+
+/* A.17.5 Mixer Control Selectors */
+#define UAC2_MU_UNDEFINED 0x00
+#define UAC2_MU_MIXER 0x01
+#define UAC2_MU_CLUSTER 0x02
+#define UAC2_MU_UNDERFLOW 0x03
+#define UAC2_MU_OVERFLOW 0x04
+#define UAC2_MU_LATENCY 0x05
+
+/* A.17.6 Selector Control Selectors */
+#define UAC2_SU_UNDEFINED 0x00
+#define UAC2_SU_SELECTOR 0x01
+#define UAC2_SU_LATENCY 0x02
+
+/* A.17.7 Feature Unit Control Selectors */
+/* see audio.h for the rest, which is identical to v1 */
+#define UAC2_FU_INPUT_GAIN 0x0b
+#define UAC2_FU_INPUT_GAIN_PAD 0x0c
+#define UAC2_FU_PHASE_INVERTER 0x0d
+#define UAC2_FU_UNDERFLOW 0x0e
+#define UAC2_FU_OVERFLOW 0x0f
+#define UAC2_FU_LATENCY 0x10
+
+/* A.17.8.1 Parametric Equalizer Section Effect Unit Control Selectors */
+#define UAC2_PE_UNDEFINED 0x00
+#define UAC2_PE_ENABLE 0x01
+#define UAC2_PE_CENTERFREQ 0x02
+#define UAC2_PE_QFACTOR 0x03
+#define UAC2_PE_GAIN 0x04
+#define UAC2_PE_UNDERFLOW 0x05
+#define UAC2_PE_OVERFLOW 0x06
+#define UAC2_PE_LATENCY 0x07
+
+/* A.17.8.2 Reverberation Effect Unit Control Selectors */
+#define UAC2_RV_UNDEFINED 0x00
+#define UAC2_RV_ENABLE 0x01
+#define UAC2_RV_TYPE 0x02
+#define UAC2_RV_LEVEL 0x03
+#define UAC2_RV_TIME 0x04
+#define UAC2_RV_FEEDBACK 0x05
+#define UAC2_RV_PREDELAY 0x06
+#define UAC2_RV_DENSITY 0x07
+#define UAC2_RV_HIFREQ_ROLLOFF 0x08
+#define UAC2_RV_UNDERFLOW 0x09
+#define UAC2_RV_OVERFLOW 0x0a
+#define UAC2_RV_LATENCY 0x0b
+
+/* A.17.8.3 Modulation Delay Effect Control Selectors */
+#define UAC2_MD_UNDEFINED 0x00
+#define UAC2_MD_ENABLE 0x01
+#define UAC2_MD_BALANCE 0x02
+#define UAC2_MD_RATE 0x03
+#define UAC2_MD_DEPTH 0x04
+#define UAC2_MD_TIME 0x05
+#define UAC2_MD_FEEDBACK 0x06
+#define UAC2_MD_UNDERFLOW 0x07
+#define UAC2_MD_OVERFLOW 0x08
+#define UAC2_MD_LATENCY 0x09
+
+/* A.17.8.4 Dynamic Range Compressor Effect Unit Control Selectors */
+#define UAC2_DR_UNDEFINED 0x00
+#define UAC2_DR_ENABLE 0x01
+#define UAC2_DR_COMPRESSION_RATE 0x02
+#define UAC2_DR_MAXAMPL 0x03
+#define UAC2_DR_THRESHOLD 0x04
+#define UAC2_DR_ATTACK_TIME 0x05
+#define UAC2_DR_RELEASE_TIME 0x06
+#define UAC2_DR_UNDEFLOW 0x07
+#define UAC2_DR_OVERFLOW 0x08
+#define UAC2_DR_LATENCY 0x09
+
+/* A.17.9.1 Up/Down-mix Processing Unit Control Selectors */
+#define UAC2_UD_UNDEFINED 0x00
+#define UAC2_UD_ENABLE 0x01
+#define UAC2_UD_MODE_SELECT 0x02
+#define UAC2_UD_CLUSTER 0x03
+#define UAC2_UD_UNDERFLOW 0x04
+#define UAC2_UD_OVERFLOW 0x05
+#define UAC2_UD_LATENCY 0x06
+
+/* A.17.9.2 Dolby Prologic[tm] Processing Unit Control Selectors */
+#define UAC2_DP_UNDEFINED 0x00
+#define UAC2_DP_ENABLE 0x01
+#define UAC2_DP_MODE_SELECT 0x02
+#define UAC2_DP_CLUSTER 0x03
+#define UAC2_DP_UNDERFFLOW 0x04
+#define UAC2_DP_OVERFLOW 0x05
+#define UAC2_DP_LATENCY 0x06
+
+/* A.17.9.3 Stereo Expander Processing Unit Control Selectors */
+#define UAC2_ST_EXT_UNDEFINED 0x00
+#define UAC2_ST_EXT_ENABLE 0x01
+#define UAC2_ST_EXT_WIDTH 0x02
+#define UAC2_ST_EXT_UNDEFLOW 0x03
+#define UAC2_ST_EXT_OVERFLOW 0x04
+#define UAC2_ST_EXT_LATENCY 0x05
+
+/* A.17.10 Extension Unit Control Selectors */
+#define UAC2_XU_UNDEFINED 0x00
+#define UAC2_XU_ENABLE 0x01
+#define UAC2_XU_CLUSTER 0x02
+#define UAC2_XU_UNDERFLOW 0x03
+#define UAC2_XU_OVERFLOW 0x04
+#define UAC2_XU_LATENCY 0x05
+
+/* A.17.11 AudioStreaming Interface Control Selectors */
+#define UAC2_AS_UNDEFINED 0x00
+#define UAC2_AS_ACT_ALT_SETTING 0x01
+#define UAC2_AS_VAL_ALT_SETTINGS 0x02
+#define UAC2_AS_AUDIO_DATA_FORMAT 0x03
+
+/* A.17.12 Encoder Control Selectors */
+#define UAC2_EN_UNDEFINED 0x00
+#define UAC2_EN_BIT_RATE 0x01
+#define UAC2_EN_QUALITY 0x02
+#define UAC2_EN_VBR 0x03
+#define UAC2_EN_TYPE 0x04
+#define UAC2_EN_UNDERFLOW 0x05
+#define UAC2_EN_OVERFLOW 0x06
+#define UAC2_EN_ENCODER_ERROR 0x07
+#define UAC2_EN_PARAM1 0x08
+#define UAC2_EN_PARAM2 0x09
+#define UAC2_EN_PARAM3 0x0a
+#define UAC2_EN_PARAM4 0x0b
+#define UAC2_EN_PARAM5 0x0c
+#define UAC2_EN_PARAM6 0x0d
+#define UAC2_EN_PARAM7 0x0e
+#define UAC2_EN_PARAM8 0x0f
+
+/* A.17.13.1 MPEG Decoder Control Selectors */
+#define UAC2_MPEG_UNDEFINED 0x00
+#define UAC2_MPEG_DUAL_CHANNEL 0x01
+#define UAC2_MPEG_SECOND_STEREO 0x02
+#define UAC2_MPEG_MULTILINGUAL 0x03
+#define UAC2_MPEG_DYN_RANGE 0x04
+#define UAC2_MPEG_SCALING 0x05
+#define UAC2_MPEG_HILO_SCALING 0x06
+#define UAC2_MPEG_UNDERFLOW 0x07
+#define UAC2_MPEG_OVERFLOW 0x08
+#define UAC2_MPEG_DECODER_ERROR 0x09
+
+/* A17.13.2 AC3 Decoder Control Selectors */
+#define UAC2_AC3_UNDEFINED 0x00
+#define UAC2_AC3_MODE 0x01
+#define UAC2_AC3_DYN_RANGE 0x02
+#define UAC2_AC3_SCALING 0x03
+#define UAC2_AC3_HILO_SCALING 0x04
+#define UAC2_AC3_UNDERFLOW 0x05
+#define UAC2_AC3_OVERFLOW 0x06
+#define UAC2_AC3_DECODER_ERROR 0x07
+
+/* A17.13.3 WMA Decoder Control Selectors */
+#define UAC2_WMA_UNDEFINED 0x00
+#define UAC2_WMA_UNDERFLOW 0x01
+#define UAC2_WMA_OVERFLOW 0x02
+#define UAC2_WMA_DECODER_ERROR 0x03
+
+/* A17.13.4 DTS Decoder Control Selectors */
+#define UAC2_DTS_UNDEFINED 0x00
+#define UAC2_DTS_UNDERFLOW 0x01
+#define UAC2_DTS_OVERFLOW 0x02
+#define UAC2_DTS_DECODER_ERROR 0x03
+
+/* A17.14 Endpoint Control Selectors */
+#define UAC2_EP_CS_UNDEFINED 0x00
+#define UAC2_EP_CS_PITCH 0x01
+#define UAC2_EP_CS_DATA_OVERRUN 0x02
+#define UAC2_EP_CS_DATA_UNDERRUN 0x03
+
+#endif /* __LINUX_USB_AUDIO_V2_H */
+
diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h
new file mode 100644
index 000000000..3d8461911
--- /dev/null
+++ b/include/linux/usb/audio.h
@@ -0,0 +1,44 @@
+/*
+ * <linux/usb/audio.h> -- USB Audio definitions.
+ *
+ * Copyright (C) 2006 Thumtronics Pty Ltd.
+ * Developed for Thumtronics by Grey Innovation
+ * Ben Williamson <ben.williamson@greyinnovation.com>
+ *
+ * This software is distributed under the terms of the GNU General Public
+ * License ("GPL") version 2, as published by the Free Software Foundation.
+ *
+ * This file holds USB constants and structures defined
+ * by the USB Device Class Definition for Audio Devices.
+ * Comments below reference relevant sections of that document:
+ *
+ * http://www.usb.org/developers/devclass_docs/audio10.pdf
+ *
+ * Types and defines in this file are either specific to version 1.0 of
+ * this standard or common for newer versions.
+ */
+#ifndef __LINUX_USB_AUDIO_H
+#define __LINUX_USB_AUDIO_H
+
+#include <uapi/linux/usb/audio.h>
+
+
+struct usb_audio_control {
+ struct list_head list;
+ const char *name;
+ u8 type;
+ int data[5];
+ int (*set)(struct usb_audio_control *con, u8 cmd, int value);
+ int (*get)(struct usb_audio_control *con, u8 cmd);
+};
+
+struct usb_audio_control_selector {
+ struct list_head list;
+ struct list_head control;
+ u8 id;
+ const char *name;
+ u8 type;
+ struct usb_descriptor_header *desc;
+};
+
+#endif /* __LINUX_USB_AUDIO_H */
diff --git a/include/linux/usb/c67x00.h b/include/linux/usb/c67x00.h
new file mode 100644
index 000000000..83c6b4547
--- /dev/null
+++ b/include/linux/usb/c67x00.h
@@ -0,0 +1,48 @@
+/*
+ * usb_c67x00.h: platform definitions for the Cypress C67X00 USB chip
+ *
+ * Copyright (C) 2006-2008 Barco N.V.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301 USA.
+ */
+
+#ifndef _LINUX_USB_C67X00_H
+#define _LINUX_USB_C67X00_H
+
+/* SIE configuration */
+#define C67X00_SIE_UNUSED 0
+#define C67X00_SIE_HOST 1
+#define C67X00_SIE_PERIPHERAL_A 2 /* peripheral on A port */
+#define C67X00_SIE_PERIPHERAL_B 3 /* peripheral on B port */
+
+#define c67x00_sie_config(config, n) (((config)>>(4*(n)))&0x3)
+
+#define C67X00_SIE1_UNUSED (C67X00_SIE_UNUSED << 0)
+#define C67X00_SIE1_HOST (C67X00_SIE_HOST << 0)
+#define C67X00_SIE1_PERIPHERAL_A (C67X00_SIE_PERIPHERAL_A << 0)
+#define C67X00_SIE1_PERIPHERAL_B (C67X00_SIE_PERIPHERAL_B << 0)
+
+#define C67X00_SIE2_UNUSED (C67X00_SIE_UNUSED << 4)
+#define C67X00_SIE2_HOST (C67X00_SIE_HOST << 4)
+#define C67X00_SIE2_PERIPHERAL_A (C67X00_SIE_PERIPHERAL_A << 4)
+#define C67X00_SIE2_PERIPHERAL_B (C67X00_SIE_PERIPHERAL_B << 4)
+
+struct c67x00_platform_data {
+ int sie_config; /* SIEs config (C67X00_SIEx_*) */
+ unsigned long hpi_regstep; /* Step between HPI registers */
+};
+
+#endif /* _LINUX_USB_C67X00_H */
diff --git a/include/linux/usb/cdc-wdm.h b/include/linux/usb/cdc-wdm.h
new file mode 100644
index 000000000..0b3f4295c
--- /dev/null
+++ b/include/linux/usb/cdc-wdm.h
@@ -0,0 +1,21 @@
+/*
+ * USB CDC Device Management subdriver
+ *
+ * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_USB_CDC_WDM_H
+#define __LINUX_USB_CDC_WDM_H
+
+#include <uapi/linux/usb/cdc-wdm.h>
+
+extern struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf,
+ struct usb_endpoint_descriptor *ep,
+ int bufsize,
+ int (*manage_power)(struct usb_interface *, int));
+
+#endif /* __LINUX_USB_CDC_WDM_H */
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
new file mode 100644
index 000000000..7c9b48473
--- /dev/null
+++ b/include/linux/usb/cdc_ncm.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) ST-Ericsson 2010-2012
+ * Contact: Alexey Orishko <alexey.orishko@stericsson.com>
+ * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
+ *
+ * USB Host Driver for Network Control Model (NCM)
+ * http://www.usb.org/developers/devclass_docs/NCM10.zip
+ *
+ * The NCM encoding, decoding and initialization logic
+ * derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose this file to be licensed under the terms
+ * of the GNU General Public License (GPL) Version 2 or the 2-clause
+ * BSD license listed below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __LINUX_USB_CDC_NCM_H
+#define __LINUX_USB_CDC_NCM_H
+
+#define CDC_NCM_COMM_ALTSETTING_NCM 0
+#define CDC_NCM_COMM_ALTSETTING_MBIM 1
+
+#define CDC_NCM_DATA_ALTSETTING_NCM 1
+#define CDC_NCM_DATA_ALTSETTING_MBIM 2
+
+/* CDC NCM subclass 3.2.1 */
+#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
+
+/* Maximum NTB length */
+#define CDC_NCM_NTB_MAX_SIZE_TX 32768 /* bytes */
+#define CDC_NCM_NTB_MAX_SIZE_RX 32768 /* bytes */
+
+/* Initial NTB length */
+#define CDC_NCM_NTB_DEF_SIZE_TX 16384 /* bytes */
+#define CDC_NCM_NTB_DEF_SIZE_RX 16384 /* bytes */
+
+/* Minimum value for MaxDatagramSize, ch. 6.2.9 */
+#define CDC_NCM_MIN_DATAGRAM_SIZE 1514 /* bytes */
+
+/* Minimum value for MaxDatagramSize, ch. 8.1.3 */
+#define CDC_MBIM_MIN_DATAGRAM_SIZE 2048 /* bytes */
+
+#define CDC_NCM_MIN_TX_PKT 512 /* bytes */
+
+/* Default value for MaxDatagramSize */
+#define CDC_NCM_MAX_DATAGRAM_SIZE 8192 /* bytes */
+
+/*
+ * Maximum amount of datagrams in NCM Datagram Pointer Table, not counting
+ * the last NULL entry.
+ */
+#define CDC_NCM_DPT_DATAGRAMS_MAX 40
+
+/* Restart the timer, if amount of datagrams is less than given value */
+#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3
+#define CDC_NCM_TIMER_PENDING_CNT 2
+#define CDC_NCM_TIMER_INTERVAL_USEC 400UL
+#define CDC_NCM_TIMER_INTERVAL_MIN 5UL
+#define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC)
+
+#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \
+ (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE)
+#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
+
+struct cdc_ncm_ctx {
+ struct usb_cdc_ncm_ntb_parameters ncm_parm;
+ struct hrtimer tx_timer;
+ struct tasklet_struct bh;
+
+ const struct usb_cdc_ncm_desc *func_desc;
+ const struct usb_cdc_mbim_desc *mbim_desc;
+ const struct usb_cdc_mbim_extended_desc *mbim_extended_desc;
+ const struct usb_cdc_ether_desc *ether_desc;
+
+ struct usb_interface *control;
+ struct usb_interface *data;
+
+ struct sk_buff *tx_curr_skb;
+ struct sk_buff *tx_rem_skb;
+ __le32 tx_rem_sign;
+
+ spinlock_t mtx;
+ atomic_t stop;
+
+ u32 timer_interval;
+ u32 max_ndp_size;
+
+ u32 tx_timer_pending;
+ u32 tx_curr_frame_num;
+ u32 rx_max;
+ u32 tx_max;
+ u32 max_datagram_size;
+ u16 tx_max_datagrams;
+ u16 tx_remainder;
+ u16 tx_modulus;
+ u16 tx_ndp_modulus;
+ u16 tx_seq;
+ u16 rx_seq;
+ u16 min_tx_pkt;
+
+ /* statistics */
+ u32 tx_curr_frame_payload;
+ u32 tx_reason_ntb_full;
+ u32 tx_reason_ndp_full;
+ u32 tx_reason_timeout;
+ u32 tx_reason_max_datagram;
+ u64 tx_overhead;
+ u64 tx_ntbs;
+ u64 rx_overhead;
+ u64 rx_ntbs;
+};
+
+u8 cdc_ncm_select_altsetting(struct usb_interface *intf);
+int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
+void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
+struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
+int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in);
+int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset);
+struct sk_buff *
+cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags);
+int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in);
+
+#endif /* __LINUX_USB_CDC_NCM_H */
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
new file mode 100644
index 000000000..27603bcbb
--- /dev/null
+++ b/include/linux/usb/ch9.h
@@ -0,0 +1,55 @@
+/*
+ * This file holds USB constants and structures that are needed for
+ * USB device APIs. These are used by the USB device model, which is
+ * defined in chapter 9 of the USB 2.0 specification and in the
+ * Wireless USB 1.0 (spread around). Linux has several APIs in C that
+ * need these:
+ *
+ * - the master/host side Linux-USB kernel driver API;
+ * - the "usbfs" user space API; and
+ * - the Linux "gadget" slave/device/peripheral side driver API.
+ *
+ * USB 2.0 adds an additional "On The Go" (OTG) mode, which lets systems
+ * act either as a USB master/host or as a USB slave/device. That means
+ * the master and slave side APIs benefit from working well together.
+ *
+ * There's also "Wireless USB", using low power short range radios for
+ * peripheral interconnection but otherwise building on the USB framework.
+ *
+ * Note all descriptors are declared '__attribute__((packed))' so that:
+ *
+ * [a] they never get padded, either internally (USB spec writers
+ * probably handled that) or externally;
+ *
+ * [b] so that accessing bigger-than-a-bytes fields will never
+ * generate bus errors on any platform, even when the location of
+ * its descriptor inside a bundle isn't "naturally aligned", and
+ *
+ * [c] for consistency, removing all doubt even when it appears to
+ * someone that the two other points are non-issues for that
+ * particular descriptor type.
+ */
+#ifndef __LINUX_USB_CH9_H
+#define __LINUX_USB_CH9_H
+
+#include <uapi/linux/usb/ch9.h>
+
+
+/**
+ * usb_speed_string() - Returns human readable-name of the speed.
+ * @speed: The speed to return human-readable name for. If it's not
+ * any of the speeds defined in usb_device_speed enum, string for
+ * USB_SPEED_UNKNOWN will be returned.
+ */
+extern const char *usb_speed_string(enum usb_device_speed speed);
+
+
+/**
+ * usb_state_string - Returns human readable name for the state.
+ * @state: The state to return a human-readable name for. If it's not
+ * any of the states devices in usb_device_state_string enum,
+ * the string UNKNOWN will be returned.
+ */
+extern const char *usb_state_string(enum usb_device_state state);
+
+#endif /* __LINUX_USB_CH9_H */
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
new file mode 100644
index 000000000..ab94f78c4
--- /dev/null
+++ b/include/linux/usb/chipidea.h
@@ -0,0 +1,50 @@
+/*
+ * Platform data for the chipidea USB dual role controller
+ */
+
+#ifndef __LINUX_USB_CHIPIDEA_H
+#define __LINUX_USB_CHIPIDEA_H
+
+#include <linux/usb/otg.h>
+
+struct ci_hdrc;
+struct ci_hdrc_platform_data {
+ const char *name;
+ /* offset of the capability registers */
+ uintptr_t capoffset;
+ unsigned power_budget;
+ struct phy *phy;
+ /* old usb_phy interface */
+ struct usb_phy *usb_phy;
+ enum usb_phy_interface phy_mode;
+ unsigned long flags;
+#define CI_HDRC_REGS_SHARED BIT(0)
+#define CI_HDRC_SUPPORTS_RUNTIME_PM BIT(2)
+#define CI_HDRC_DISABLE_STREAMING BIT(3)
+ /*
+ * Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1,
+ * but otg is not supported (no register otgsc).
+ */
+#define CI_HDRC_DUAL_ROLE_NOT_OTG BIT(4)
+#define CI_HDRC_IMX28_WRITE_FIX BIT(5)
+#define CI_HDRC_FORCE_FULLSPEED BIT(6)
+#define CI_HDRC_TURN_VBUS_EARLY_ON BIT(7)
+ enum usb_dr_mode dr_mode;
+#define CI_HDRC_CONTROLLER_RESET_EVENT 0
+#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
+ void (*notify_event) (struct ci_hdrc *ci, unsigned event);
+ struct regulator *reg_vbus;
+ bool tpl_support;
+};
+
+/* Default offset of capability registers */
+#define DEF_CAPOFFSET 0x100
+
+/* Add ci hdrc device */
+struct platform_device *ci_hdrc_add_device(struct device *dev,
+ struct resource *res, int nres,
+ struct ci_hdrc_platform_data *platdata);
+/* Remove ci hdrc device */
+void ci_hdrc_remove_device(struct platform_device *pdev);
+
+#endif
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
new file mode 100644
index 000000000..2511469a9
--- /dev/null
+++ b/include/linux/usb/composite.h
@@ -0,0 +1,627 @@
+/*
+ * composite.h -- framework for usb gadgets which are composite devices
+ *
+ * Copyright (C) 2006-2008 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __LINUX_USB_COMPOSITE_H
+#define __LINUX_USB_COMPOSITE_H
+
+/*
+ * This framework is an optional layer on top of the USB Gadget interface,
+ * making it easier to build (a) Composite devices, supporting multiple
+ * functions within any single configuration, and (b) Multi-configuration
+ * devices, also supporting multiple functions but without necessarily
+ * having more than one function per configuration.
+ *
+ * Example: a device with a single configuration supporting both network
+ * link and mass storage functions is a composite device. Those functions
+ * might alternatively be packaged in individual configurations, but in
+ * the composite model the host can use both functions at the same time.
+ */
+
+#include <linux/bcd.h>
+#include <linux/version.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/log2.h>
+#include <linux/configfs.h>
+
+/*
+ * USB function drivers should return USB_GADGET_DELAYED_STATUS if they
+ * wish to delay the data/status stages of the control transfer till they
+ * are ready. The control transfer will then be kept from completing till
+ * all the function drivers that requested for USB_GADGET_DELAYED_STAUS
+ * invoke usb_composite_setup_continue().
+ */
+#define USB_GADGET_DELAYED_STATUS 0x7fff /* Impossibly large value */
+
+/* big enough to hold our biggest descriptor */
+#define USB_COMP_EP0_BUFSIZ 1024
+
+#define USB_MS_TO_HS_INTERVAL(x) (ilog2((x * 1000 / 125)) + 1)
+struct usb_configuration;
+
+/**
+ * struct usb_os_desc_ext_prop - describes one "Extended Property"
+ * @entry: used to keep a list of extended properties
+ * @type: Extended Property type
+ * @name_len: Extended Property unicode name length, including terminating '\0'
+ * @name: Extended Property name
+ * @data_len: Length of Extended Property blob (for unicode store double len)
+ * @data: Extended Property blob
+ * @item: Represents this Extended Property in configfs
+ */
+struct usb_os_desc_ext_prop {
+ struct list_head entry;
+ u8 type;
+ int name_len;
+ char *name;
+ int data_len;
+ char *data;
+ struct config_item item;
+};
+
+/**
+ * struct usb_os_desc - describes OS descriptors associated with one interface
+ * @ext_compat_id: 16 bytes of "Compatible ID" and "Subcompatible ID"
+ * @ext_prop: Extended Properties list
+ * @ext_prop_len: Total length of Extended Properties blobs
+ * @ext_prop_count: Number of Extended Properties
+ * @opts_mutex: Optional mutex protecting config data of a usb_function_instance
+ * @group: Represents OS descriptors associated with an interface in configfs
+ * @owner: Module associated with this OS descriptor
+ */
+struct usb_os_desc {
+ char *ext_compat_id;
+ struct list_head ext_prop;
+ int ext_prop_len;
+ int ext_prop_count;
+ struct mutex *opts_mutex;
+ struct config_group group;
+ struct module *owner;
+};
+
+/**
+ * struct usb_os_desc_table - describes OS descriptors associated with one
+ * interface of a usb_function
+ * @if_id: Interface id
+ * @os_desc: "Extended Compatibility ID" and "Extended Properties" of the
+ * interface
+ *
+ * Each interface can have at most one "Extended Compatibility ID" and a
+ * number of "Extended Properties".
+ */
+struct usb_os_desc_table {
+ int if_id;
+ struct usb_os_desc *os_desc;
+};
+
+/**
+ * struct usb_function - describes one function of a configuration
+ * @name: For diagnostics, identifies the function.
+ * @strings: tables of strings, keyed by identifiers assigned during bind()
+ * and by language IDs provided in control requests
+ * @fs_descriptors: Table of full (or low) speed descriptors, using interface and
+ * string identifiers assigned during @bind(). If this pointer is null,
+ * the function will not be available at full speed (or at low speed).
+ * @hs_descriptors: Table of high speed descriptors, using interface and
+ * string identifiers assigned during @bind(). If this pointer is null,
+ * the function will not be available at high speed.
+ * @ss_descriptors: Table of super speed descriptors, using interface and
+ * string identifiers assigned during @bind(). If this
+ * pointer is null after initiation, the function will not
+ * be available at super speed.
+ * @config: assigned when @usb_add_function() is called; this is the
+ * configuration with which this function is associated.
+ * @os_desc_table: Table of (interface id, os descriptors) pairs. The function
+ * can expose more than one interface. If an interface is a member of
+ * an IAD, only the first interface of IAD has its entry in the table.
+ * @os_desc_n: Number of entries in os_desc_table
+ * @bind: Before the gadget can register, all of its functions bind() to the
+ * available resources including string and interface identifiers used
+ * in interface or class descriptors; endpoints; I/O buffers; and so on.
+ * @unbind: Reverses @bind; called as a side effect of unregistering the
+ * driver which added this function.
+ * @free_func: free the struct usb_function.
+ * @mod: (internal) points to the module that created this structure.
+ * @set_alt: (REQUIRED) Reconfigures altsettings; function drivers may
+ * initialize usb_ep.driver data at this time (when it is used).
+ * Note that setting an interface to its current altsetting resets
+ * interface state, and that all interfaces have a disabled state.
+ * @get_alt: Returns the active altsetting. If this is not provided,
+ * then only altsetting zero is supported.
+ * @disable: (REQUIRED) Indicates the function should be disabled. Reasons
+ * include host resetting or reconfiguring the gadget, and disconnection.
+ * @setup: Used for interface-specific control requests.
+ * @req_match: Tests if a given class request can be handled by this function.
+ * @suspend: Notifies functions when the host stops sending USB traffic.
+ * @resume: Notifies functions when the host restarts USB traffic.
+ * @get_status: Returns function status as a reply to
+ * GetStatus() request when the recipient is Interface.
+ * @func_suspend: callback to be called when
+ * SetFeature(FUNCTION_SUSPEND) is reseived
+ *
+ * A single USB function uses one or more interfaces, and should in most
+ * cases support operation at both full and high speeds. Each function is
+ * associated by @usb_add_function() with a one configuration; that function
+ * causes @bind() to be called so resources can be allocated as part of
+ * setting up a gadget driver. Those resources include endpoints, which
+ * should be allocated using @usb_ep_autoconfig().
+ *
+ * To support dual speed operation, a function driver provides descriptors
+ * for both high and full speed operation. Except in rare cases that don't
+ * involve bulk endpoints, each speed needs different endpoint descriptors.
+ *
+ * Function drivers choose their own strategies for managing instance data.
+ * The simplest strategy just declares it "static', which means the function
+ * can only be activated once. If the function needs to be exposed in more
+ * than one configuration at a given speed, it needs to support multiple
+ * usb_function structures (one for each configuration).
+ *
+ * A more complex strategy might encapsulate a @usb_function structure inside
+ * a driver-specific instance structure to allows multiple activations. An
+ * example of multiple activations might be a CDC ACM function that supports
+ * two or more distinct instances within the same configuration, providing
+ * several independent logical data links to a USB host.
+ */
+
+struct usb_function {
+ const char *name;
+ struct usb_gadget_strings **strings;
+ struct usb_descriptor_header **fs_descriptors;
+ struct usb_descriptor_header **hs_descriptors;
+ struct usb_descriptor_header **ss_descriptors;
+
+ struct usb_configuration *config;
+
+ struct usb_os_desc_table *os_desc_table;
+ unsigned os_desc_n;
+
+ /* REVISIT: bind() functions can be marked __init, which
+ * makes trouble for section mismatch analysis. See if
+ * we can't restructure things to avoid mismatching.
+ * Related: unbind() may kfree() but bind() won't...
+ */
+
+ /* configuration management: bind/unbind */
+ int (*bind)(struct usb_configuration *,
+ struct usb_function *);
+ void (*unbind)(struct usb_configuration *,
+ struct usb_function *);
+ void (*free_func)(struct usb_function *f);
+ struct module *mod;
+
+ /* runtime state management */
+ int (*set_alt)(struct usb_function *,
+ unsigned interface, unsigned alt);
+ int (*get_alt)(struct usb_function *,
+ unsigned interface);
+ void (*disable)(struct usb_function *);
+ int (*setup)(struct usb_function *,
+ const struct usb_ctrlrequest *);
+ bool (*req_match)(struct usb_function *,
+ const struct usb_ctrlrequest *);
+ void (*suspend)(struct usb_function *);
+ void (*resume)(struct usb_function *);
+
+ /* USB 3.0 additions */
+ int (*get_status)(struct usb_function *);
+ int (*func_suspend)(struct usb_function *,
+ u8 suspend_opt);
+ /* private: */
+ /* internals */
+ struct list_head list;
+ DECLARE_BITMAP(endpoints, 32);
+ const struct usb_function_instance *fi;
+};
+
+int usb_add_function(struct usb_configuration *, struct usb_function *);
+
+int usb_function_deactivate(struct usb_function *);
+int usb_function_activate(struct usb_function *);
+
+int usb_interface_id(struct usb_configuration *, struct usb_function *);
+
+int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f,
+ struct usb_ep *_ep);
+
+#define MAX_CONFIG_INTERFACES 16 /* arbitrary; max 255 */
+
+/**
+ * struct usb_configuration - represents one gadget configuration
+ * @label: For diagnostics, describes the configuration.
+ * @strings: Tables of strings, keyed by identifiers assigned during @bind()
+ * and by language IDs provided in control requests.
+ * @descriptors: Table of descriptors preceding all function descriptors.
+ * Examples include OTG and vendor-specific descriptors.
+ * @unbind: Reverses @bind; called as a side effect of unregistering the
+ * driver which added this configuration.
+ * @setup: Used to delegate control requests that aren't handled by standard
+ * device infrastructure or directed at a specific interface.
+ * @bConfigurationValue: Copied into configuration descriptor.
+ * @iConfiguration: Copied into configuration descriptor.
+ * @bmAttributes: Copied into configuration descriptor.
+ * @MaxPower: Power consumtion in mA. Used to compute bMaxPower in the
+ * configuration descriptor after considering the bus speed.
+ * @cdev: assigned by @usb_add_config() before calling @bind(); this is
+ * the device associated with this configuration.
+ *
+ * Configurations are building blocks for gadget drivers structured around
+ * function drivers. Simple USB gadgets require only one function and one
+ * configuration, and handle dual-speed hardware by always providing the same
+ * functionality. Slightly more complex gadgets may have more than one
+ * single-function configuration at a given speed; or have configurations
+ * that only work at one speed.
+ *
+ * Composite devices are, by definition, ones with configurations which
+ * include more than one function.
+ *
+ * The lifecycle of a usb_configuration includes allocation, initialization
+ * of the fields described above, and calling @usb_add_config() to set up
+ * internal data and bind it to a specific device. The configuration's
+ * @bind() method is then used to initialize all the functions and then
+ * call @usb_add_function() for them.
+ *
+ * Those functions would normally be independent of each other, but that's
+ * not mandatory. CDC WMC devices are an example where functions often
+ * depend on other functions, with some functions subsidiary to others.
+ * Such interdependency may be managed in any way, so long as all of the
+ * descriptors complete by the time the composite driver returns from
+ * its bind() routine.
+ */
+struct usb_configuration {
+ const char *label;
+ struct usb_gadget_strings **strings;
+ const struct usb_descriptor_header **descriptors;
+
+ /* REVISIT: bind() functions can be marked __init, which
+ * makes trouble for section mismatch analysis. See if
+ * we can't restructure things to avoid mismatching...
+ */
+
+ /* configuration management: unbind/setup */
+ void (*unbind)(struct usb_configuration *);
+ int (*setup)(struct usb_configuration *,
+ const struct usb_ctrlrequest *);
+
+ /* fields in the config descriptor */
+ u8 bConfigurationValue;
+ u8 iConfiguration;
+ u8 bmAttributes;
+ u16 MaxPower;
+
+ struct usb_composite_dev *cdev;
+
+ /* private: */
+ /* internals */
+ struct list_head list;
+ struct list_head functions;
+ u8 next_interface_id;
+ unsigned superspeed:1;
+ unsigned highspeed:1;
+ unsigned fullspeed:1;
+ struct usb_function *interface[MAX_CONFIG_INTERFACES];
+};
+
+int usb_add_config(struct usb_composite_dev *,
+ struct usb_configuration *,
+ int (*)(struct usb_configuration *));
+
+void usb_remove_config(struct usb_composite_dev *,
+ struct usb_configuration *);
+
+/* predefined index for usb_composite_driver */
+enum {
+ USB_GADGET_MANUFACTURER_IDX = 0,
+ USB_GADGET_PRODUCT_IDX,
+ USB_GADGET_SERIAL_IDX,
+ USB_GADGET_FIRST_AVAIL_IDX,
+};
+
+/**
+ * struct usb_composite_driver - groups configurations into a gadget
+ * @name: For diagnostics, identifies the driver.
+ * @dev: Template descriptor for the device, including default device
+ * identifiers.
+ * @strings: tables of strings, keyed by identifiers assigned during @bind
+ * and language IDs provided in control requests. Note: The first entries
+ * are predefined. The first entry that may be used is
+ * USB_GADGET_FIRST_AVAIL_IDX
+ * @max_speed: Highest speed the driver supports.
+ * @needs_serial: set to 1 if the gadget needs userspace to provide
+ * a serial number. If one is not provided, warning will be printed.
+ * @bind: (REQUIRED) Used to allocate resources that are shared across the
+ * whole device, such as string IDs, and add its configurations using
+ * @usb_add_config(). This may fail by returning a negative errno
+ * value; it should return zero on successful initialization.
+ * @unbind: Reverses @bind; called as a side effect of unregistering
+ * this driver.
+ * @disconnect: optional driver disconnect method
+ * @suspend: Notifies when the host stops sending USB traffic,
+ * after function notifications
+ * @resume: Notifies configuration when the host restarts USB traffic,
+ * before function notifications
+ * @gadget_driver: Gadget driver controlling this driver
+ *
+ * Devices default to reporting self powered operation. Devices which rely
+ * on bus powered operation should report this in their @bind method.
+ *
+ * Before returning from @bind, various fields in the template descriptor
+ * may be overridden. These include the idVendor/idProduct/bcdDevice values
+ * normally to bind the appropriate host side driver, and the three strings
+ * (iManufacturer, iProduct, iSerialNumber) normally used to provide user
+ * meaningful device identifiers. (The strings will not be defined unless
+ * they are defined in @dev and @strings.) The correct ep0 maxpacket size
+ * is also reported, as defined by the underlying controller driver.
+ */
+struct usb_composite_driver {
+ const char *name;
+ const struct usb_device_descriptor *dev;
+ struct usb_gadget_strings **strings;
+ enum usb_device_speed max_speed;
+ unsigned needs_serial:1;
+
+ int (*bind)(struct usb_composite_dev *cdev);
+ int (*unbind)(struct usb_composite_dev *);
+
+ void (*disconnect)(struct usb_composite_dev *);
+
+ /* global suspend hooks */
+ void (*suspend)(struct usb_composite_dev *);
+ void (*resume)(struct usb_composite_dev *);
+ struct usb_gadget_driver gadget_driver;
+};
+
+extern int usb_composite_probe(struct usb_composite_driver *driver);
+extern void usb_composite_unregister(struct usb_composite_driver *driver);
+
+/**
+ * module_usb_composite_driver() - Helper macro for registering a USB gadget
+ * composite driver
+ * @__usb_composite_driver: usb_composite_driver struct
+ *
+ * Helper macro for USB gadget composite drivers which do not do anything
+ * special in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces module_init()
+ * and module_exit()
+ */
+#define module_usb_composite_driver(__usb_composite_driver) \
+ module_driver(__usb_composite_driver, usb_composite_probe, \
+ usb_composite_unregister)
+
+extern void usb_composite_setup_continue(struct usb_composite_dev *cdev);
+extern int composite_dev_prepare(struct usb_composite_driver *composite,
+ struct usb_composite_dev *cdev);
+extern int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
+ struct usb_ep *ep0);
+void composite_dev_cleanup(struct usb_composite_dev *cdev);
+
+static inline struct usb_composite_driver *to_cdriver(
+ struct usb_gadget_driver *gdrv)
+{
+ return container_of(gdrv, struct usb_composite_driver, gadget_driver);
+}
+
+#define OS_STRING_QW_SIGN_LEN 14
+#define OS_STRING_IDX 0xEE
+
+/**
+ * struct usb_composite_device - represents one composite usb gadget
+ * @gadget: read-only, abstracts the gadget's usb peripheral controller
+ * @req: used for control responses; buffer is pre-allocated
+ * @os_desc_req: used for OS descriptors responses; buffer is pre-allocated
+ * @config: the currently active configuration
+ * @qw_sign: qwSignature part of the OS string
+ * @b_vendor_code: bMS_VendorCode part of the OS string
+ * @use_os_string: false by default, interested gadgets set it
+ * @os_desc_config: the configuration to be used with OS descriptors
+ * @setup_pending: true when setup request is queued but not completed
+ * @os_desc_pending: true when os_desc request is queued but not completed
+ *
+ * One of these devices is allocated and initialized before the
+ * associated device driver's bind() is called.
+ *
+ * OPEN ISSUE: it appears that some WUSB devices will need to be
+ * built by combining a normal (wired) gadget with a wireless one.
+ * This revision of the gadget framework should probably try to make
+ * sure doing that won't hurt too much.
+ *
+ * One notion for how to handle Wireless USB devices involves:
+ * (a) a second gadget here, discovery mechanism TBD, but likely
+ * needing separate "register/unregister WUSB gadget" calls;
+ * (b) updates to usb_gadget to include flags "is it wireless",
+ * "is it wired", plus (presumably in a wrapper structure)
+ * bandgroup and PHY info;
+ * (c) presumably a wireless_ep wrapping a usb_ep, and reporting
+ * wireless-specific parameters like maxburst and maxsequence;
+ * (d) configurations that are specific to wireless links;
+ * (e) function drivers that understand wireless configs and will
+ * support wireless for (additional) function instances;
+ * (f) a function to support association setup (like CBAF), not
+ * necessarily requiring a wireless adapter;
+ * (g) composite device setup that can create one or more wireless
+ * configs, including appropriate association setup support;
+ * (h) more, TBD.
+ */
+struct usb_composite_dev {
+ struct usb_gadget *gadget;
+ struct usb_request *req;
+ struct usb_request *os_desc_req;
+
+ struct usb_configuration *config;
+
+ /* OS String is a custom (yet popular) extension to the USB standard. */
+ u8 qw_sign[OS_STRING_QW_SIGN_LEN];
+ u8 b_vendor_code;
+ struct usb_configuration *os_desc_config;
+ unsigned int use_os_string:1;
+
+ /* private: */
+ /* internals */
+ unsigned int suspended:1;
+ struct usb_device_descriptor desc;
+ struct list_head configs;
+ struct list_head gstrings;
+ struct usb_composite_driver *driver;
+ u8 next_string_id;
+ char *def_manufacturer;
+
+ /* the gadget driver won't enable the data pullup
+ * while the deactivation count is nonzero.
+ */
+ unsigned deactivations;
+
+ /* the composite driver won't complete the control transfer's
+ * data/status stages till delayed_status is zero.
+ */
+ int delayed_status;
+
+ /* protects deactivations and delayed_status counts*/
+ spinlock_t lock;
+
+ unsigned setup_pending:1;
+ unsigned os_desc_pending:1;
+};
+
+extern int usb_string_id(struct usb_composite_dev *c);
+extern int usb_string_ids_tab(struct usb_composite_dev *c,
+ struct usb_string *str);
+extern struct usb_string *usb_gstrings_attach(struct usb_composite_dev *cdev,
+ struct usb_gadget_strings **sp, unsigned n_strings);
+
+extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n);
+
+extern void composite_disconnect(struct usb_gadget *gadget);
+extern int composite_setup(struct usb_gadget *gadget,
+ const struct usb_ctrlrequest *ctrl);
+extern void composite_suspend(struct usb_gadget *gadget);
+extern void composite_resume(struct usb_gadget *gadget);
+
+/*
+ * Some systems will need runtime overrides for the product identifiers
+ * published in the device descriptor, either numbers or strings or both.
+ * String parameters are in UTF-8 (superset of ASCII's 7 bit characters).
+ */
+struct usb_composite_overwrite {
+ u16 idVendor;
+ u16 idProduct;
+ u16 bcdDevice;
+ char *serial_number;
+ char *manufacturer;
+ char *product;
+};
+#define USB_GADGET_COMPOSITE_OPTIONS() \
+ static struct usb_composite_overwrite coverwrite; \
+ \
+ module_param_named(idVendor, coverwrite.idVendor, ushort, S_IRUGO); \
+ MODULE_PARM_DESC(idVendor, "USB Vendor ID"); \
+ \
+ module_param_named(idProduct, coverwrite.idProduct, ushort, S_IRUGO); \
+ MODULE_PARM_DESC(idProduct, "USB Product ID"); \
+ \
+ module_param_named(bcdDevice, coverwrite.bcdDevice, ushort, S_IRUGO); \
+ MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)"); \
+ \
+ module_param_named(iSerialNumber, coverwrite.serial_number, charp, \
+ S_IRUGO); \
+ MODULE_PARM_DESC(iSerialNumber, "SerialNumber string"); \
+ \
+ module_param_named(iManufacturer, coverwrite.manufacturer, charp, \
+ S_IRUGO); \
+ MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string"); \
+ \
+ module_param_named(iProduct, coverwrite.product, charp, S_IRUGO); \
+ MODULE_PARM_DESC(iProduct, "USB Product string")
+
+void usb_composite_overwrite_options(struct usb_composite_dev *cdev,
+ struct usb_composite_overwrite *covr);
+
+static inline u16 get_default_bcdDevice(void)
+{
+ u16 bcdDevice;
+
+ bcdDevice = bin2bcd((LINUX_VERSION_CODE >> 16 & 0xff)) << 8;
+ bcdDevice |= bin2bcd((LINUX_VERSION_CODE >> 8 & 0xff));
+ return bcdDevice;
+}
+
+struct usb_function_driver {
+ const char *name;
+ struct module *mod;
+ struct list_head list;
+ struct usb_function_instance *(*alloc_inst)(void);
+ struct usb_function *(*alloc_func)(struct usb_function_instance *inst);
+};
+
+struct usb_function_instance {
+ struct config_group group;
+ struct list_head cfs_list;
+ struct usb_function_driver *fd;
+ int (*set_inst_name)(struct usb_function_instance *inst,
+ const char *name);
+ void (*free_func_inst)(struct usb_function_instance *inst);
+};
+
+void usb_function_unregister(struct usb_function_driver *f);
+int usb_function_register(struct usb_function_driver *newf);
+void usb_put_function_instance(struct usb_function_instance *fi);
+void usb_put_function(struct usb_function *f);
+struct usb_function_instance *usb_get_function_instance(const char *name);
+struct usb_function *usb_get_function(struct usb_function_instance *fi);
+
+struct usb_configuration *usb_get_config(struct usb_composite_dev *cdev,
+ int val);
+int usb_add_config_only(struct usb_composite_dev *cdev,
+ struct usb_configuration *config);
+void usb_remove_function(struct usb_configuration *c, struct usb_function *f);
+
+#define DECLARE_USB_FUNCTION(_name, _inst_alloc, _func_alloc) \
+ static struct usb_function_driver _name ## usb_func = { \
+ .name = __stringify(_name), \
+ .mod = THIS_MODULE, \
+ .alloc_inst = _inst_alloc, \
+ .alloc_func = _func_alloc, \
+ }; \
+ MODULE_ALIAS("usbfunc:"__stringify(_name));
+
+#define DECLARE_USB_FUNCTION_INIT(_name, _inst_alloc, _func_alloc) \
+ DECLARE_USB_FUNCTION(_name, _inst_alloc, _func_alloc) \
+ static int __init _name ## mod_init(void) \
+ { \
+ return usb_function_register(&_name ## usb_func); \
+ } \
+ static void __exit _name ## mod_exit(void) \
+ { \
+ usb_function_unregister(&_name ## usb_func); \
+ } \
+ module_init(_name ## mod_init); \
+ module_exit(_name ## mod_exit)
+
+/* messaging utils */
+#define DBG(d, fmt, args...) \
+ dev_dbg(&(d)->gadget->dev , fmt , ## args)
+#define VDBG(d, fmt, args...) \
+ dev_vdbg(&(d)->gadget->dev , fmt , ## args)
+#define ERROR(d, fmt, args...) \
+ dev_err(&(d)->gadget->dev , fmt , ## args)
+#define WARNING(d, fmt, args...) \
+ dev_warn(&(d)->gadget->dev , fmt , ## args)
+#define INFO(d, fmt, args...) \
+ dev_info(&(d)->gadget->dev , fmt , ## args)
+
+#endif /* __LINUX_USB_COMPOSITE_H */
diff --git a/include/linux/usb/ehci-dbgp.h b/include/linux/usb/ehci-dbgp.h
new file mode 100644
index 000000000..7344d9e59
--- /dev/null
+++ b/include/linux/usb/ehci-dbgp.h
@@ -0,0 +1,83 @@
+/*
+ * Standalone EHCI usb debug driver
+ *
+ * Originally written by:
+ * Eric W. Biederman" <ebiederm@xmission.com> and
+ * Yinghai Lu <yhlu.kernel@gmail.com>
+ *
+ * Changes for early/late printk and HW errata:
+ * Jason Wessel <jason.wessel@windriver.com>
+ * Copyright (C) 2009 Wind River Systems, Inc.
+ *
+ */
+
+#ifndef __LINUX_USB_EHCI_DBGP_H
+#define __LINUX_USB_EHCI_DBGP_H
+
+#include <linux/console.h>
+#include <linux/types.h>
+
+/* Appendix C, Debug port ... intended for use with special "debug devices"
+ * that can help if there's no serial console. (nonstandard enumeration.)
+ */
+struct ehci_dbg_port {
+ u32 control;
+#define DBGP_OWNER (1<<30)
+#define DBGP_ENABLED (1<<28)
+#define DBGP_DONE (1<<16)
+#define DBGP_INUSE (1<<10)
+#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
+# define DBGP_ERR_BAD 1
+# define DBGP_ERR_SIGNAL 2
+#define DBGP_ERROR (1<<6)
+#define DBGP_GO (1<<5)
+#define DBGP_OUT (1<<4)
+#define DBGP_LEN(x) (((x)>>0)&0x0f)
+ u32 pids;
+#define DBGP_PID_GET(x) (((x)>>16)&0xff)
+#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
+ u32 data03;
+ u32 data47;
+ u32 address;
+#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
+};
+
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+extern int early_dbgp_init(char *s);
+extern struct console early_dbgp_console;
+#endif /* CONFIG_EARLY_PRINTK_DBGP */
+
+struct usb_hcd;
+
+#ifdef CONFIG_XEN_DOM0
+extern int xen_dbgp_reset_prep(struct usb_hcd *);
+extern int xen_dbgp_external_startup(struct usb_hcd *);
+#else
+static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd)
+{
+ return 1; /* Shouldn't this be 0? */
+}
+
+static inline int xen_dbgp_external_startup(struct usb_hcd *hcd)
+{
+ return -1;
+}
+#endif
+
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+/* Call backs from ehci host driver to ehci debug driver */
+extern int dbgp_external_startup(struct usb_hcd *);
+extern int dbgp_reset_prep(struct usb_hcd *);
+#else
+static inline int dbgp_reset_prep(struct usb_hcd *hcd)
+{
+ return xen_dbgp_reset_prep(hcd);
+}
+
+static inline int dbgp_external_startup(struct usb_hcd *hcd)
+{
+ return xen_dbgp_external_startup(hcd);
+}
+#endif
+
+#endif /* __LINUX_USB_EHCI_DBGP_H */
diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
new file mode 100644
index 000000000..966889a20
--- /dev/null
+++ b/include/linux/usb/ehci_def.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2001-2002 by David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_USB_EHCI_DEF_H
+#define __LINUX_USB_EHCI_DEF_H
+
+#include <linux/usb/ehci-dbgp.h>
+
+/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
+
+/* Section 2.2 Host Controller Capability Registers */
+struct ehci_caps {
+ /* these fields are specified as 8 and 16 bit registers,
+ * but some hosts can't perform 8 or 16 bit PCI accesses.
+ * some hosts treat caplength and hciversion as parts of a 32-bit
+ * register, others treat them as two separate registers, this
+ * affects the memory map for big endian controllers.
+ */
+ u32 hc_capbase;
+#define HC_LENGTH(ehci, p) (0x00ff&((p) >> /* bits 7:0 / offset 00h */ \
+ (ehci_big_endian_capbase(ehci) ? 24 : 0)))
+#define HC_VERSION(ehci, p) (0xffff&((p) >> /* bits 31:16 / offset 02h */ \
+ (ehci_big_endian_capbase(ehci) ? 0 : 16)))
+ u32 hcs_params; /* HCSPARAMS - offset 0x4 */
+#define HCS_DEBUG_PORT(p) (((p)>>20)&0xf) /* bits 23:20, debug port? */
+#define HCS_INDICATOR(p) ((p)&(1 << 16)) /* true: has port indicators */
+#define HCS_N_CC(p) (((p)>>12)&0xf) /* bits 15:12, #companion HCs */
+#define HCS_N_PCC(p) (((p)>>8)&0xf) /* bits 11:8, ports per CC */
+#define HCS_PORTROUTED(p) ((p)&(1 << 7)) /* true: port routing */
+#define HCS_PPC(p) ((p)&(1 << 4)) /* true: port power control */
+#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
+
+ u32 hcc_params; /* HCCPARAMS - offset 0x8 */
+/* EHCI 1.1 addendum */
+#define HCC_32FRAME_PERIODIC_LIST(p) ((p)&(1 << 19))
+#define HCC_PER_PORT_CHANGE_EVENT(p) ((p)&(1 << 18))
+#define HCC_LPM(p) ((p)&(1 << 17))
+#define HCC_HW_PREFETCH(p) ((p)&(1 << 16))
+
+#define HCC_EXT_CAPS(p) (((p)>>8)&0xff) /* for pci extended caps */
+#define HCC_ISOC_CACHE(p) ((p)&(1 << 7)) /* true: can cache isoc frame */
+#define HCC_ISOC_THRES(p) (((p)>>4)&0x7) /* bits 6:4, uframes cached */
+#define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */
+#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/
+#define HCC_64BIT_ADDR(p) ((p)&(1)) /* true: can use 64-bit addr */
+ u8 portroute[8]; /* nibbles for routing - offset 0xC */
+};
+
+
+/* Section 2.3 Host Controller Operational Registers */
+struct ehci_regs {
+
+ /* USBCMD: offset 0x00 */
+ u32 command;
+
+/* EHCI 1.1 addendum */
+#define CMD_HIRD (0xf<<24) /* host initiated resume duration */
+#define CMD_PPCEE (1<<15) /* per port change event enable */
+#define CMD_FSP (1<<14) /* fully synchronized prefetch */
+#define CMD_ASPE (1<<13) /* async schedule prefetch enable */
+#define CMD_PSPE (1<<12) /* periodic schedule prefetch enable */
+/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
+#define CMD_PARK (1<<11) /* enable "park" on async qh */
+#define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */
+#define CMD_LRESET (1<<7) /* partial reset (no ports, etc) */
+#define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */
+#define CMD_ASE (1<<5) /* async schedule enable */
+#define CMD_PSE (1<<4) /* periodic schedule enable */
+/* 3:2 is periodic frame list size */
+#define CMD_RESET (1<<1) /* reset HC not bus */
+#define CMD_RUN (1<<0) /* start/stop HC */
+
+ /* USBSTS: offset 0x04 */
+ u32 status;
+#define STS_PPCE_MASK (0xff<<16) /* Per-Port change event 1-16 */
+#define STS_ASS (1<<15) /* Async Schedule Status */
+#define STS_PSS (1<<14) /* Periodic Schedule Status */
+#define STS_RECL (1<<13) /* Reclamation */
+#define STS_HALT (1<<12) /* Not running (any reason) */
+/* some bits reserved */
+ /* these STS_* flags are also intr_enable bits (USBINTR) */
+#define STS_IAA (1<<5) /* Interrupted on async advance */
+#define STS_FATAL (1<<4) /* such as some PCI access errors */
+#define STS_FLR (1<<3) /* frame list rolled over */
+#define STS_PCD (1<<2) /* port change detect */
+#define STS_ERR (1<<1) /* "error" completion (overflow, ...) */
+#define STS_INT (1<<0) /* "normal" completion (short, ...) */
+
+ /* USBINTR: offset 0x08 */
+ u32 intr_enable;
+
+ /* FRINDEX: offset 0x0C */
+ u32 frame_index; /* current microframe number */
+ /* CTRLDSSEGMENT: offset 0x10 */
+ u32 segment; /* address bits 63:32 if needed */
+ /* PERIODICLISTBASE: offset 0x14 */
+ u32 frame_list; /* points to periodic list */
+ /* ASYNCLISTADDR: offset 0x18 */
+ u32 async_next; /* address of next async queue head */
+
+ u32 reserved1[2];
+
+ /* TXFILLTUNING: offset 0x24 */
+ u32 txfill_tuning; /* TX FIFO Tuning register */
+#define TXFIFO_DEFAULT (8<<16) /* FIFO burst threshold 8 */
+
+ u32 reserved2[6];
+
+ /* CONFIGFLAG: offset 0x40 */
+ u32 configured_flag;
+#define FLAG_CF (1<<0) /* true: we'll support "high speed" */
+
+ /* PORTSC: offset 0x44 */
+ u32 port_status[0]; /* up to N_PORTS */
+/* EHCI 1.1 addendum */
+#define PORTSC_SUSPEND_STS_ACK 0
+#define PORTSC_SUSPEND_STS_NYET 1
+#define PORTSC_SUSPEND_STS_STALL 2
+#define PORTSC_SUSPEND_STS_ERR 3
+
+#define PORT_DEV_ADDR (0x7f<<25) /* device address */
+#define PORT_SSTS (0x3<<23) /* suspend status */
+/* 31:23 reserved */
+#define PORT_WKOC_E (1<<22) /* wake on overcurrent (enable) */
+#define PORT_WKDISC_E (1<<21) /* wake on disconnect (enable) */
+#define PORT_WKCONN_E (1<<20) /* wake on connect (enable) */
+/* 19:16 for port testing */
+#define PORT_TEST(x) (((x)&0xf)<<16) /* Port Test Control */
+#define PORT_TEST_PKT PORT_TEST(0x4) /* Port Test Control - packet test */
+#define PORT_TEST_FORCE PORT_TEST(0x5) /* Port Test Control - force enable */
+#define PORT_LED_OFF (0<<14)
+#define PORT_LED_AMBER (1<<14)
+#define PORT_LED_GREEN (2<<14)
+#define PORT_LED_MASK (3<<14)
+#define PORT_OWNER (1<<13) /* true: companion hc owns this port */
+#define PORT_POWER (1<<12) /* true: has power (see PPC) */
+#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
+/* 11:10 for detecting lowspeed devices (reset vs release ownership) */
+/* 9 reserved */
+#define PORT_LPM (1<<9) /* LPM transaction */
+#define PORT_RESET (1<<8) /* reset port */
+#define PORT_SUSPEND (1<<7) /* suspend port */
+#define PORT_RESUME (1<<6) /* resume it */
+#define PORT_OCC (1<<5) /* over current change */
+#define PORT_OC (1<<4) /* over current active */
+#define PORT_PEC (1<<3) /* port enable change */
+#define PORT_PE (1<<2) /* port enable */
+#define PORT_CSC (1<<1) /* connect status change */
+#define PORT_CONNECT (1<<0) /* device connected */
+#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
+
+ u32 reserved3[9];
+
+ /* USBMODE: offset 0x68 */
+ u32 usbmode; /* USB Device mode */
+#define USBMODE_SDIS (1<<3) /* Stream disable */
+#define USBMODE_BE (1<<2) /* BE/LE endianness select */
+#define USBMODE_CM_HC (3<<0) /* host controller mode */
+#define USBMODE_CM_IDLE (0<<0) /* idle state */
+
+ u32 reserved4[6];
+
+/* Moorestown has some non-standard registers, partially due to the fact that
+ * its EHCI controller has both TT and LPM support. HOSTPCx are extensions to
+ * PORTSCx
+ */
+ /* HOSTPC: offset 0x84 */
+ u32 hostpc[1]; /* HOSTPC extension */
+#define HOSTPC_PHCD (1<<22) /* Phy clock disable */
+#define HOSTPC_PSPD (3<<25) /* Port speed detection */
+
+ u32 reserved5[16];
+
+ /* USBMODE_EX: offset 0xc8 */
+ u32 usbmode_ex; /* USB Device mode extension */
+#define USBMODE_EX_VBPS (1<<5) /* VBus Power Select On */
+#define USBMODE_EX_HC (3<<0) /* host controller mode */
+};
+
+#endif /* __LINUX_USB_EHCI_DEF_H */
diff --git a/include/linux/usb/ehci_pdriver.h b/include/linux/usb/ehci_pdriver.h
new file mode 100644
index 000000000..db0431b39
--- /dev/null
+++ b/include/linux/usb/ehci_pdriver.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2012 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __USB_CORE_EHCI_PDRIVER_H
+#define __USB_CORE_EHCI_PDRIVER_H
+
+struct platform_device;
+struct usb_hcd;
+
+/**
+ * struct usb_ehci_pdata - platform_data for generic ehci driver
+ *
+ * @caps_offset: offset of the EHCI Capability Registers to the start of
+ * the io memory region provided to the driver.
+ * @has_tt: set to 1 if TT is integrated in root hub.
+ * @port_power_on: set to 1 if the controller needs a power up after
+ * initialization.
+ * @port_power_off: set to 1 if the controller needs to be powered down
+ * after initialization.
+ * @no_io_watchdog: set to 1 if the controller does not need the I/O
+ * watchdog to run.
+ * @reset_on_resume: set to 1 if the controller needs to be reset after
+ * a suspend / resume cycle (but can't detect that itself).
+ *
+ * These are general configuration options for the EHCI controller. All of
+ * these options are activating more or less workarounds for some hardware.
+ */
+struct usb_ehci_pdata {
+ int caps_offset;
+ unsigned has_tt:1;
+ unsigned has_synopsys_hc_bug:1;
+ unsigned big_endian_desc:1;
+ unsigned big_endian_mmio:1;
+ unsigned no_io_watchdog:1;
+ unsigned reset_on_resume:1;
+ unsigned dma_mask_64:1;
+
+ /* Turn on all power and clocks */
+ int (*power_on)(struct platform_device *pdev);
+ /* Turn off all power and clocks */
+ void (*power_off)(struct platform_device *pdev);
+ /* Turn on only VBUS suspend power and hotplug detection,
+ * turn off everything else */
+ void (*power_suspend)(struct platform_device *pdev);
+ int (*pre_setup)(struct usb_hcd *hcd);
+};
+
+#endif /* __USB_CORE_EHCI_PDRIVER_H */
diff --git a/include/linux/usb/ezusb.h b/include/linux/usb/ezusb.h
new file mode 100644
index 000000000..639ee4577
--- /dev/null
+++ b/include/linux/usb/ezusb.h
@@ -0,0 +1,8 @@
+#ifndef __EZUSB_H
+#define __EZUSB_H
+
+extern int ezusb_fx1_set_reset(struct usb_device *dev, unsigned char reset_bit);
+extern int ezusb_fx1_ihex_firmware_download(struct usb_device *dev,
+ const char *firmware_path);
+
+#endif /* __EZUSB_H */
diff --git a/include/linux/usb/functionfs.h b/include/linux/usb/functionfs.h
new file mode 100644
index 000000000..71190663f
--- /dev/null
+++ b/include/linux/usb/functionfs.h
@@ -0,0 +1,6 @@
+#ifndef __LINUX_FUNCTIONFS_H__
+#define __LINUX_FUNCTIONFS_H__ 1
+
+#include <uapi/linux/usb/functionfs.h>
+
+#endif
diff --git a/include/linux/usb/g_hid.h b/include/linux/usb/g_hid.h
new file mode 100644
index 000000000..50f5745df
--- /dev/null
+++ b/include/linux/usb/g_hid.h
@@ -0,0 +1,32 @@
+/*
+ * g_hid.h -- Header file for USB HID gadget driver
+ *
+ * Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_USB_G_HID_H
+#define __LINUX_USB_G_HID_H
+
+struct hidg_func_descriptor {
+ unsigned char subclass;
+ unsigned char protocol;
+ unsigned short report_length;
+ unsigned short report_desc_length;
+ unsigned char report_desc[];
+};
+
+#endif /* __LINUX_USB_G_HID_H */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
new file mode 100644
index 000000000..4f3dfb7d0
--- /dev/null
+++ b/include/linux/usb/gadget.h
@@ -0,0 +1,1054 @@
+/*
+ * <linux/usb/gadget.h>
+ *
+ * We call the USB code inside a Linux-based peripheral device a "gadget"
+ * driver, except for the hardware-specific bus glue. One USB host can
+ * master many USB gadgets, but the gadgets are only slaved to one host.
+ *
+ *
+ * (C) Copyright 2002-2004 by David Brownell
+ * All Rights Reserved.
+ *
+ * This software is licensed under the GNU GPL version 2.
+ */
+
+#ifndef __LINUX_USB_GADGET_H
+#define __LINUX_USB_GADGET_H
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/usb/ch9.h>
+
+struct usb_ep;
+
+/**
+ * struct usb_request - describes one i/o request
+ * @buf: Buffer used for data. Always provide this; some controllers
+ * only use PIO, or don't use DMA for some endpoints.
+ * @dma: DMA address corresponding to 'buf'. If you don't set this
+ * field, and the usb controller needs one, it is responsible
+ * for mapping and unmapping the buffer.
+ * @sg: a scatterlist for SG-capable controllers.
+ * @num_sgs: number of SG entries
+ * @num_mapped_sgs: number of SG entries mapped to DMA (internal)
+ * @length: Length of that data
+ * @stream_id: The stream id, when USB3.0 bulk streams are being used
+ * @no_interrupt: If true, hints that no completion irq is needed.
+ * Helpful sometimes with deep request queues that are handled
+ * directly by DMA controllers.
+ * @zero: If true, when writing data, makes the last packet be "short"
+ * by adding a zero length packet as needed;
+ * @short_not_ok: When reading data, makes short packets be
+ * treated as errors (queue stops advancing till cleanup).
+ * @complete: Function called when request completes, so this request and
+ * its buffer may be re-used. The function will always be called with
+ * interrupts disabled, and it must not sleep.
+ * Reads terminate with a short packet, or when the buffer fills,
+ * whichever comes first. When writes terminate, some data bytes
+ * will usually still be in flight (often in a hardware fifo).
+ * Errors (for reads or writes) stop the queue from advancing
+ * until the completion function returns, so that any transfers
+ * invalidated by the error may first be dequeued.
+ * @context: For use by the completion callback
+ * @list: For use by the gadget driver.
+ * @status: Reports completion code, zero or a negative errno.
+ * Normally, faults block the transfer queue from advancing until
+ * the completion callback returns.
+ * Code "-ESHUTDOWN" indicates completion caused by device disconnect,
+ * or when the driver disabled the endpoint.
+ * @actual: Reports bytes transferred to/from the buffer. For reads (OUT
+ * transfers) this may be less than the requested length. If the
+ * short_not_ok flag is set, short reads are treated as errors
+ * even when status otherwise indicates successful completion.
+ * Note that for writes (IN transfers) some data bytes may still
+ * reside in a device-side FIFO when the request is reported as
+ * complete.
+ *
+ * These are allocated/freed through the endpoint they're used with. The
+ * hardware's driver can add extra per-request data to the memory it returns,
+ * which often avoids separate memory allocations (potential failures),
+ * later when the request is queued.
+ *
+ * Request flags affect request handling, such as whether a zero length
+ * packet is written (the "zero" flag), whether a short read should be
+ * treated as an error (blocking request queue advance, the "short_not_ok"
+ * flag), or hinting that an interrupt is not required (the "no_interrupt"
+ * flag, for use with deep request queues).
+ *
+ * Bulk endpoints can use any size buffers, and can also be used for interrupt
+ * transfers. interrupt-only endpoints can be much less functional.
+ *
+ * NOTE: this is analogous to 'struct urb' on the host side, except that
+ * it's thinner and promotes more pre-allocation.
+ */
+
+struct usb_request {
+ void *buf;
+ unsigned length;
+ dma_addr_t dma;
+
+ struct scatterlist *sg;
+ unsigned num_sgs;
+ unsigned num_mapped_sgs;
+
+ unsigned stream_id:16;
+ unsigned no_interrupt:1;
+ unsigned zero:1;
+ unsigned short_not_ok:1;
+
+ void (*complete)(struct usb_ep *ep,
+ struct usb_request *req);
+ void *context;
+ struct list_head list;
+
+ int status;
+ unsigned actual;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* endpoint-specific parts of the api to the usb controller hardware.
+ * unlike the urb model, (de)multiplexing layers are not required.
+ * (so this api could slash overhead if used on the host side...)
+ *
+ * note that device side usb controllers commonly differ in how many
+ * endpoints they support, as well as their capabilities.
+ */
+struct usb_ep_ops {
+ int (*enable) (struct usb_ep *ep,
+ const struct usb_endpoint_descriptor *desc);
+ int (*disable) (struct usb_ep *ep);
+
+ struct usb_request *(*alloc_request) (struct usb_ep *ep,
+ gfp_t gfp_flags);
+ void (*free_request) (struct usb_ep *ep, struct usb_request *req);
+
+ int (*queue) (struct usb_ep *ep, struct usb_request *req,
+ gfp_t gfp_flags);
+ int (*dequeue) (struct usb_ep *ep, struct usb_request *req);
+
+ int (*set_halt) (struct usb_ep *ep, int value);
+ int (*set_wedge) (struct usb_ep *ep);
+
+ int (*fifo_status) (struct usb_ep *ep);
+ void (*fifo_flush) (struct usb_ep *ep);
+};
+
+/**
+ * struct usb_ep - device side representation of USB endpoint
+ * @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk"
+ * @ops: Function pointers used to access hardware-specific operations.
+ * @ep_list:the gadget's ep_list holds all of its endpoints
+ * @maxpacket:The maximum packet size used on this endpoint. The initial
+ * value can sometimes be reduced (hardware allowing), according to
+ * the endpoint descriptor used to configure the endpoint.
+ * @maxpacket_limit:The maximum packet size value which can be handled by this
+ * endpoint. It's set once by UDC driver when endpoint is initialized, and
+ * should not be changed. Should not be confused with maxpacket.
+ * @max_streams: The maximum number of streams supported
+ * by this EP (0 - 16, actual number is 2^n)
+ * @mult: multiplier, 'mult' value for SS Isoc EPs
+ * @maxburst: the maximum number of bursts supported by this EP (for usb3)
+ * @driver_data:for use by the gadget driver.
+ * @address: used to identify the endpoint when finding descriptor that
+ * matches connection speed
+ * @desc: endpoint descriptor. This pointer is set before the endpoint is
+ * enabled and remains valid until the endpoint is disabled.
+ * @comp_desc: In case of SuperSpeed support, this is the endpoint companion
+ * descriptor that is used to configure the endpoint
+ *
+ * the bus controller driver lists all the general purpose endpoints in
+ * gadget->ep_list. the control endpoint (gadget->ep0) is not in that list,
+ * and is accessed only in response to a driver setup() callback.
+ */
+struct usb_ep {
+ void *driver_data;
+
+ const char *name;
+ const struct usb_ep_ops *ops;
+ struct list_head ep_list;
+ unsigned maxpacket:16;
+ unsigned maxpacket_limit:16;
+ unsigned max_streams:16;
+ unsigned mult:2;
+ unsigned maxburst:5;
+ u8 address;
+ const struct usb_endpoint_descriptor *desc;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/**
+ * usb_ep_set_maxpacket_limit - set maximum packet size limit for endpoint
+ * @ep:the endpoint being configured
+ * @maxpacket_limit:value of maximum packet size limit
+ *
+ * This function should be used only in UDC drivers to initialize endpoint
+ * (usually in probe function).
+ */
+static inline void usb_ep_set_maxpacket_limit(struct usb_ep *ep,
+ unsigned maxpacket_limit)
+{
+ ep->maxpacket_limit = maxpacket_limit;
+ ep->maxpacket = maxpacket_limit;
+}
+
+/**
+ * usb_ep_enable - configure endpoint, making it usable
+ * @ep:the endpoint being configured. may not be the endpoint named "ep0".
+ * drivers discover endpoints through the ep_list of a usb_gadget.
+ *
+ * When configurations are set, or when interface settings change, the driver
+ * will enable or disable the relevant endpoints. while it is enabled, an
+ * endpoint may be used for i/o until the driver receives a disconnect() from
+ * the host or until the endpoint is disabled.
+ *
+ * the ep0 implementation (which calls this routine) must ensure that the
+ * hardware capabilities of each endpoint match the descriptor provided
+ * for it. for example, an endpoint named "ep2in-bulk" would be usable
+ * for interrupt transfers as well as bulk, but it likely couldn't be used
+ * for iso transfers or for endpoint 14. some endpoints are fully
+ * configurable, with more generic names like "ep-a". (remember that for
+ * USB, "in" means "towards the USB master".)
+ *
+ * returns zero, or a negative error code.
+ */
+static inline int usb_ep_enable(struct usb_ep *ep)
+{
+ return ep->ops->enable(ep, ep->desc);
+}
+
+/**
+ * usb_ep_disable - endpoint is no longer usable
+ * @ep:the endpoint being unconfigured. may not be the endpoint named "ep0".
+ *
+ * no other task may be using this endpoint when this is called.
+ * any pending and uncompleted requests will complete with status
+ * indicating disconnect (-ESHUTDOWN) before this call returns.
+ * gadget drivers must call usb_ep_enable() again before queueing
+ * requests to the endpoint.
+ *
+ * returns zero, or a negative error code.
+ */
+static inline int usb_ep_disable(struct usb_ep *ep)
+{
+ return ep->ops->disable(ep);
+}
+
+/**
+ * usb_ep_alloc_request - allocate a request object to use with this endpoint
+ * @ep:the endpoint to be used with with the request
+ * @gfp_flags:GFP_* flags to use
+ *
+ * Request objects must be allocated with this call, since they normally
+ * need controller-specific setup and may even need endpoint-specific
+ * resources such as allocation of DMA descriptors.
+ * Requests may be submitted with usb_ep_queue(), and receive a single
+ * completion callback. Free requests with usb_ep_free_request(), when
+ * they are no longer needed.
+ *
+ * Returns the request, or null if one could not be allocated.
+ */
+static inline struct usb_request *usb_ep_alloc_request(struct usb_ep *ep,
+ gfp_t gfp_flags)
+{
+ return ep->ops->alloc_request(ep, gfp_flags);
+}
+
+/**
+ * usb_ep_free_request - frees a request object
+ * @ep:the endpoint associated with the request
+ * @req:the request being freed
+ *
+ * Reverses the effect of usb_ep_alloc_request().
+ * Caller guarantees the request is not queued, and that it will
+ * no longer be requeued (or otherwise used).
+ */
+static inline void usb_ep_free_request(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ ep->ops->free_request(ep, req);
+}
+
+/**
+ * usb_ep_queue - queues (submits) an I/O request to an endpoint.
+ * @ep:the endpoint associated with the request
+ * @req:the request being submitted
+ * @gfp_flags: GFP_* flags to use in case the lower level driver couldn't
+ * pre-allocate all necessary memory with the request.
+ *
+ * This tells the device controller to perform the specified request through
+ * that endpoint (reading or writing a buffer). When the request completes,
+ * including being canceled by usb_ep_dequeue(), the request's completion
+ * routine is called to return the request to the driver. Any endpoint
+ * (except control endpoints like ep0) may have more than one transfer
+ * request queued; they complete in FIFO order. Once a gadget driver
+ * submits a request, that request may not be examined or modified until it
+ * is given back to that driver through the completion callback.
+ *
+ * Each request is turned into one or more packets. The controller driver
+ * never merges adjacent requests into the same packet. OUT transfers
+ * will sometimes use data that's already buffered in the hardware.
+ * Drivers can rely on the fact that the first byte of the request's buffer
+ * always corresponds to the first byte of some USB packet, for both
+ * IN and OUT transfers.
+ *
+ * Bulk endpoints can queue any amount of data; the transfer is packetized
+ * automatically. The last packet will be short if the request doesn't fill it
+ * out completely. Zero length packets (ZLPs) should be avoided in portable
+ * protocols since not all usb hardware can successfully handle zero length
+ * packets. (ZLPs may be explicitly written, and may be implicitly written if
+ * the request 'zero' flag is set.) Bulk endpoints may also be used
+ * for interrupt transfers; but the reverse is not true, and some endpoints
+ * won't support every interrupt transfer. (Such as 768 byte packets.)
+ *
+ * Interrupt-only endpoints are less functional than bulk endpoints, for
+ * example by not supporting queueing or not handling buffers that are
+ * larger than the endpoint's maxpacket size. They may also treat data
+ * toggle differently.
+ *
+ * Control endpoints ... after getting a setup() callback, the driver queues
+ * one response (even if it would be zero length). That enables the
+ * status ack, after transferring data as specified in the response. Setup
+ * functions may return negative error codes to generate protocol stalls.
+ * (Note that some USB device controllers disallow protocol stall responses
+ * in some cases.) When control responses are deferred (the response is
+ * written after the setup callback returns), then usb_ep_set_halt() may be
+ * used on ep0 to trigger protocol stalls. Depending on the controller,
+ * it may not be possible to trigger a status-stage protocol stall when the
+ * data stage is over, that is, from within the response's completion
+ * routine.
+ *
+ * For periodic endpoints, like interrupt or isochronous ones, the usb host
+ * arranges to poll once per interval, and the gadget driver usually will
+ * have queued some data to transfer at that time.
+ *
+ * Returns zero, or a negative error code. Endpoints that are not enabled
+ * report errors; errors will also be
+ * reported when the usb peripheral is disconnected.
+ */
+static inline int usb_ep_queue(struct usb_ep *ep,
+ struct usb_request *req, gfp_t gfp_flags)
+{
+ return ep->ops->queue(ep, req, gfp_flags);
+}
+
+/**
+ * usb_ep_dequeue - dequeues (cancels, unlinks) an I/O request from an endpoint
+ * @ep:the endpoint associated with the request
+ * @req:the request being canceled
+ *
+ * If the request is still active on the endpoint, it is dequeued and its
+ * completion routine is called (with status -ECONNRESET); else a negative
+ * error code is returned. This is guaranteed to happen before the call to
+ * usb_ep_dequeue() returns.
+ *
+ * Note that some hardware can't clear out write fifos (to unlink the request
+ * at the head of the queue) except as part of disconnecting from usb. Such
+ * restrictions prevent drivers from supporting configuration changes,
+ * even to configuration zero (a "chapter 9" requirement).
+ */
+static inline int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+ return ep->ops->dequeue(ep, req);
+}
+
+/**
+ * usb_ep_set_halt - sets the endpoint halt feature.
+ * @ep: the non-isochronous endpoint being stalled
+ *
+ * Use this to stall an endpoint, perhaps as an error report.
+ * Except for control endpoints,
+ * the endpoint stays halted (will not stream any data) until the host
+ * clears this feature; drivers may need to empty the endpoint's request
+ * queue first, to make sure no inappropriate transfers happen.
+ *
+ * Note that while an endpoint CLEAR_FEATURE will be invisible to the
+ * gadget driver, a SET_INTERFACE will not be. To reset endpoints for the
+ * current altsetting, see usb_ep_clear_halt(). When switching altsettings,
+ * it's simplest to use usb_ep_enable() or usb_ep_disable() for the endpoints.
+ *
+ * Returns zero, or a negative error code. On success, this call sets
+ * underlying hardware state that blocks data transfers.
+ * Attempts to halt IN endpoints will fail (returning -EAGAIN) if any
+ * transfer requests are still queued, or if the controller hardware
+ * (usually a FIFO) still holds bytes that the host hasn't collected.
+ */
+static inline int usb_ep_set_halt(struct usb_ep *ep)
+{
+ return ep->ops->set_halt(ep, 1);
+}
+
+/**
+ * usb_ep_clear_halt - clears endpoint halt, and resets toggle
+ * @ep:the bulk or interrupt endpoint being reset
+ *
+ * Use this when responding to the standard usb "set interface" request,
+ * for endpoints that aren't reconfigured, after clearing any other state
+ * in the endpoint's i/o queue.
+ *
+ * Returns zero, or a negative error code. On success, this call clears
+ * the underlying hardware state reflecting endpoint halt and data toggle.
+ * Note that some hardware can't support this request (like pxa2xx_udc),
+ * and accordingly can't correctly implement interface altsettings.
+ */
+static inline int usb_ep_clear_halt(struct usb_ep *ep)
+{
+ return ep->ops->set_halt(ep, 0);
+}
+
+/**
+ * usb_ep_set_wedge - sets the halt feature and ignores clear requests
+ * @ep: the endpoint being wedged
+ *
+ * Use this to stall an endpoint and ignore CLEAR_FEATURE(HALT_ENDPOINT)
+ * requests. If the gadget driver clears the halt status, it will
+ * automatically unwedge the endpoint.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static inline int
+usb_ep_set_wedge(struct usb_ep *ep)
+{
+ if (ep->ops->set_wedge)
+ return ep->ops->set_wedge(ep);
+ else
+ return ep->ops->set_halt(ep, 1);
+}
+
+/**
+ * usb_ep_fifo_status - returns number of bytes in fifo, or error
+ * @ep: the endpoint whose fifo status is being checked.
+ *
+ * FIFO endpoints may have "unclaimed data" in them in certain cases,
+ * such as after aborted transfers. Hosts may not have collected all
+ * the IN data written by the gadget driver (and reported by a request
+ * completion). The gadget driver may not have collected all the data
+ * written OUT to it by the host. Drivers that need precise handling for
+ * fault reporting or recovery may need to use this call.
+ *
+ * This returns the number of such bytes in the fifo, or a negative
+ * errno if the endpoint doesn't use a FIFO or doesn't support such
+ * precise handling.
+ */
+static inline int usb_ep_fifo_status(struct usb_ep *ep)
+{
+ if (ep->ops->fifo_status)
+ return ep->ops->fifo_status(ep);
+ else
+ return -EOPNOTSUPP;
+}
+
+/**
+ * usb_ep_fifo_flush - flushes contents of a fifo
+ * @ep: the endpoint whose fifo is being flushed.
+ *
+ * This call may be used to flush the "unclaimed data" that may exist in
+ * an endpoint fifo after abnormal transaction terminations. The call
+ * must never be used except when endpoint is not being used for any
+ * protocol translation.
+ */
+static inline void usb_ep_fifo_flush(struct usb_ep *ep)
+{
+ if (ep->ops->fifo_flush)
+ ep->ops->fifo_flush(ep);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+struct usb_dcd_config_params {
+ __u8 bU1devExitLat; /* U1 Device exit Latency */
+#define USB_DEFAULT_U1_DEV_EXIT_LAT 0x01 /* Less then 1 microsec */
+ __le16 bU2DevExitLat; /* U2 Device exit Latency */
+#define USB_DEFAULT_U2_DEV_EXIT_LAT 0x1F4 /* Less then 500 microsec */
+};
+
+
+struct usb_gadget;
+struct usb_gadget_driver;
+struct usb_udc;
+
+/* the rest of the api to the controller hardware: device operations,
+ * which don't involve endpoints (or i/o).
+ */
+struct usb_gadget_ops {
+ int (*get_frame)(struct usb_gadget *);
+ int (*wakeup)(struct usb_gadget *);
+ int (*set_selfpowered) (struct usb_gadget *, int is_selfpowered);
+ int (*vbus_session) (struct usb_gadget *, int is_active);
+ int (*vbus_draw) (struct usb_gadget *, unsigned mA);
+ int (*pullup) (struct usb_gadget *, int is_on);
+ int (*ioctl)(struct usb_gadget *,
+ unsigned code, unsigned long param);
+ void (*get_config_params)(struct usb_dcd_config_params *);
+ int (*udc_start)(struct usb_gadget *,
+ struct usb_gadget_driver *);
+ int (*udc_stop)(struct usb_gadget *);
+};
+
+/**
+ * struct usb_gadget - represents a usb slave device
+ * @work: (internal use) Workqueue to be used for sysfs_notify()
+ * @udc: struct usb_udc pointer for this gadget
+ * @ops: Function pointers used to access hardware-specific operations.
+ * @ep0: Endpoint zero, used when reading or writing responses to
+ * driver setup() requests
+ * @ep_list: List of other endpoints supported by the device.
+ * @speed: Speed of current connection to USB host.
+ * @max_speed: Maximal speed the UDC can handle. UDC must support this
+ * and all slower speeds.
+ * @state: the state we are now (attached, suspended, configured, etc)
+ * @name: Identifies the controller hardware type. Used in diagnostics
+ * and sometimes configuration.
+ * @dev: Driver model state for this abstract device.
+ * @out_epnum: last used out ep number
+ * @in_epnum: last used in ep number
+ * @sg_supported: true if we can handle scatter-gather
+ * @is_otg: True if the USB device port uses a Mini-AB jack, so that the
+ * gadget driver must provide a USB OTG descriptor.
+ * @is_a_peripheral: False unless is_otg, the "A" end of a USB cable
+ * is in the Mini-AB jack, and HNP has been used to switch roles
+ * so that the "A" device currently acts as A-Peripheral, not A-Host.
+ * @a_hnp_support: OTG device feature flag, indicating that the A-Host
+ * supports HNP at this port.
+ * @a_alt_hnp_support: OTG device feature flag, indicating that the A-Host
+ * only supports HNP on a different root port.
+ * @b_hnp_enable: OTG device feature flag, indicating that the A-Host
+ * enabled HNP support.
+ * @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to
+ * MaxPacketSize.
+ * @is_selfpowered: if the gadget is self-powered.
+ *
+ * Gadgets have a mostly-portable "gadget driver" implementing device
+ * functions, handling all usb configurations and interfaces. Gadget
+ * drivers talk to hardware-specific code indirectly, through ops vectors.
+ * That insulates the gadget driver from hardware details, and packages
+ * the hardware endpoints through generic i/o queues. The "usb_gadget"
+ * and "usb_ep" interfaces provide that insulation from the hardware.
+ *
+ * Except for the driver data, all fields in this structure are
+ * read-only to the gadget driver. That driver data is part of the
+ * "driver model" infrastructure in 2.6 (and later) kernels, and for
+ * earlier systems is grouped in a similar structure that's not known
+ * to the rest of the kernel.
+ *
+ * Values of the three OTG device feature flags are updated before the
+ * setup() call corresponding to USB_REQ_SET_CONFIGURATION, and before
+ * driver suspend() calls. They are valid only when is_otg, and when the
+ * device is acting as a B-Peripheral (so is_a_peripheral is false).
+ */
+struct usb_gadget {
+ struct work_struct work;
+ struct usb_udc *udc;
+ /* readonly to gadget driver */
+ const struct usb_gadget_ops *ops;
+ struct usb_ep *ep0;
+ struct list_head ep_list; /* of usb_ep */
+ enum usb_device_speed speed;
+ enum usb_device_speed max_speed;
+ enum usb_device_state state;
+ const char *name;
+ struct device dev;
+ unsigned out_epnum;
+ unsigned in_epnum;
+
+ unsigned sg_supported:1;
+ unsigned is_otg:1;
+ unsigned is_a_peripheral:1;
+ unsigned b_hnp_enable:1;
+ unsigned a_hnp_support:1;
+ unsigned a_alt_hnp_support:1;
+ unsigned quirk_ep_out_aligned_size:1;
+ unsigned is_selfpowered:1;
+};
+#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
+
+static inline void set_gadget_data(struct usb_gadget *gadget, void *data)
+ { dev_set_drvdata(&gadget->dev, data); }
+static inline void *get_gadget_data(struct usb_gadget *gadget)
+ { return dev_get_drvdata(&gadget->dev); }
+static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
+{
+ return container_of(dev, struct usb_gadget, dev);
+}
+
+/* iterates the non-control endpoints; 'tmp' is a struct usb_ep pointer */
+#define gadget_for_each_ep(tmp, gadget) \
+ list_for_each_entry(tmp, &(gadget)->ep_list, ep_list)
+
+
+/**
+ * usb_ep_align_maybe - returns @len aligned to ep's maxpacketsize if gadget
+ * requires quirk_ep_out_aligned_size, otherwise reguens len.
+ * @g: controller to check for quirk
+ * @ep: the endpoint whose maxpacketsize is used to align @len
+ * @len: buffer size's length to align to @ep's maxpacketsize
+ *
+ * This helper is used in case it's required for any reason to check and maybe
+ * align buffer's size to an ep's maxpacketsize.
+ */
+static inline size_t
+usb_ep_align_maybe(struct usb_gadget *g, struct usb_ep *ep, size_t len)
+{
+ return !g->quirk_ep_out_aligned_size ? len :
+ round_up(len, (size_t)ep->desc->wMaxPacketSize);
+}
+
+/**
+ * gadget_is_dualspeed - return true iff the hardware handles high speed
+ * @g: controller that might support both high and full speeds
+ */
+static inline int gadget_is_dualspeed(struct usb_gadget *g)
+{
+ return g->max_speed >= USB_SPEED_HIGH;
+}
+
+/**
+ * gadget_is_superspeed() - return true if the hardware handles superspeed
+ * @g: controller that might support superspeed
+ */
+static inline int gadget_is_superspeed(struct usb_gadget *g)
+{
+ return g->max_speed >= USB_SPEED_SUPER;
+}
+
+/**
+ * gadget_is_otg - return true iff the hardware is OTG-ready
+ * @g: controller that might have a Mini-AB connector
+ *
+ * This is a runtime test, since kernels with a USB-OTG stack sometimes
+ * run on boards which only have a Mini-B (or Mini-A) connector.
+ */
+static inline int gadget_is_otg(struct usb_gadget *g)
+{
+#ifdef CONFIG_USB_OTG
+ return g->is_otg;
+#else
+ return 0;
+#endif
+}
+
+/**
+ * usb_gadget_frame_number - returns the current frame number
+ * @gadget: controller that reports the frame number
+ *
+ * Returns the usb frame number, normally eleven bits from a SOF packet,
+ * or negative errno if this device doesn't support this capability.
+ */
+static inline int usb_gadget_frame_number(struct usb_gadget *gadget)
+{
+ return gadget->ops->get_frame(gadget);
+}
+
+/**
+ * usb_gadget_wakeup - tries to wake up the host connected to this gadget
+ * @gadget: controller used to wake up the host
+ *
+ * Returns zero on success, else negative error code if the hardware
+ * doesn't support such attempts, or its support has not been enabled
+ * by the usb host. Drivers must return device descriptors that report
+ * their ability to support this, or hosts won't enable it.
+ *
+ * This may also try to use SRP to wake the host and start enumeration,
+ * even if OTG isn't otherwise in use. OTG devices may also start
+ * remote wakeup even when hosts don't explicitly enable it.
+ */
+static inline int usb_gadget_wakeup(struct usb_gadget *gadget)
+{
+ if (!gadget->ops->wakeup)
+ return -EOPNOTSUPP;
+ return gadget->ops->wakeup(gadget);
+}
+
+/**
+ * usb_gadget_set_selfpowered - sets the device selfpowered feature.
+ * @gadget:the device being declared as self-powered
+ *
+ * this affects the device status reported by the hardware driver
+ * to reflect that it now has a local power supply.
+ *
+ * returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_set_selfpowered(struct usb_gadget *gadget)
+{
+ if (!gadget->ops->set_selfpowered)
+ return -EOPNOTSUPP;
+ return gadget->ops->set_selfpowered(gadget, 1);
+}
+
+/**
+ * usb_gadget_clear_selfpowered - clear the device selfpowered feature.
+ * @gadget:the device being declared as bus-powered
+ *
+ * this affects the device status reported by the hardware driver.
+ * some hardware may not support bus-powered operation, in which
+ * case this feature's value can never change.
+ *
+ * returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_clear_selfpowered(struct usb_gadget *gadget)
+{
+ if (!gadget->ops->set_selfpowered)
+ return -EOPNOTSUPP;
+ return gadget->ops->set_selfpowered(gadget, 0);
+}
+
+/**
+ * usb_gadget_vbus_connect - Notify controller that VBUS is powered
+ * @gadget:The device which now has VBUS power.
+ * Context: can sleep
+ *
+ * This call is used by a driver for an external transceiver (or GPIO)
+ * that detects a VBUS power session starting. Common responses include
+ * resuming the controller, activating the D+ (or D-) pullup to let the
+ * host detect that a USB device is attached, and starting to draw power
+ * (8mA or possibly more, especially after SET_CONFIGURATION).
+ *
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_vbus_connect(struct usb_gadget *gadget)
+{
+ if (!gadget->ops->vbus_session)
+ return -EOPNOTSUPP;
+ return gadget->ops->vbus_session(gadget, 1);
+}
+
+/**
+ * usb_gadget_vbus_draw - constrain controller's VBUS power usage
+ * @gadget:The device whose VBUS usage is being described
+ * @mA:How much current to draw, in milliAmperes. This should be twice
+ * the value listed in the configuration descriptor bMaxPower field.
+ *
+ * This call is used by gadget drivers during SET_CONFIGURATION calls,
+ * reporting how much power the device may consume. For example, this
+ * could affect how quickly batteries are recharged.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+ if (!gadget->ops->vbus_draw)
+ return -EOPNOTSUPP;
+ return gadget->ops->vbus_draw(gadget, mA);
+}
+
+/**
+ * usb_gadget_vbus_disconnect - notify controller about VBUS session end
+ * @gadget:the device whose VBUS supply is being described
+ * Context: can sleep
+ *
+ * This call is used by a driver for an external transceiver (or GPIO)
+ * that detects a VBUS power session ending. Common responses include
+ * reversing everything done in usb_gadget_vbus_connect().
+ *
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_vbus_disconnect(struct usb_gadget *gadget)
+{
+ if (!gadget->ops->vbus_session)
+ return -EOPNOTSUPP;
+ return gadget->ops->vbus_session(gadget, 0);
+}
+
+/**
+ * usb_gadget_connect - software-controlled connect to USB host
+ * @gadget:the peripheral being connected
+ *
+ * Enables the D+ (or potentially D-) pullup. The host will start
+ * enumerating this gadget when the pullup is active and a VBUS session
+ * is active (the link is powered). This pullup is always enabled unless
+ * usb_gadget_disconnect() has been used to disable it.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_connect(struct usb_gadget *gadget)
+{
+ if (!gadget->ops->pullup)
+ return -EOPNOTSUPP;
+ return gadget->ops->pullup(gadget, 1);
+}
+
+/**
+ * usb_gadget_disconnect - software-controlled disconnect from USB host
+ * @gadget:the peripheral being disconnected
+ *
+ * Disables the D+ (or potentially D-) pullup, which the host may see
+ * as a disconnect (when a VBUS session is active). Not all systems
+ * support software pullup controls.
+ *
+ * This routine may be used during the gadget driver bind() call to prevent
+ * the peripheral from ever being visible to the USB host, unless later
+ * usb_gadget_connect() is called. For example, user mode components may
+ * need to be activated before the system can talk to hosts.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
+{
+ if (!gadget->ops->pullup)
+ return -EOPNOTSUPP;
+ return gadget->ops->pullup(gadget, 0);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/**
+ * struct usb_gadget_driver - driver for usb 'slave' devices
+ * @function: String describing the gadget's function
+ * @max_speed: Highest speed the driver handles.
+ * @setup: Invoked for ep0 control requests that aren't handled by
+ * the hardware level driver. Most calls must be handled by
+ * the gadget driver, including descriptor and configuration
+ * management. The 16 bit members of the setup data are in
+ * USB byte order. Called in_interrupt; this may not sleep. Driver
+ * queues a response to ep0, or returns negative to stall.
+ * @disconnect: Invoked after all transfers have been stopped,
+ * when the host is disconnected. May be called in_interrupt; this
+ * may not sleep. Some devices can't detect disconnect, so this might
+ * not be called except as part of controller shutdown.
+ * @bind: the driver's bind callback
+ * @unbind: Invoked when the driver is unbound from a gadget,
+ * usually from rmmod (after a disconnect is reported).
+ * Called in a context that permits sleeping.
+ * @suspend: Invoked on USB suspend. May be called in_interrupt.
+ * @resume: Invoked on USB resume. May be called in_interrupt.
+ * @reset: Invoked on USB bus reset. It is mandatory for all gadget drivers
+ * and should be called in_interrupt.
+ * @driver: Driver model state for this driver.
+ *
+ * Devices are disabled till a gadget driver successfully bind()s, which
+ * means the driver will handle setup() requests needed to enumerate (and
+ * meet "chapter 9" requirements) then do some useful work.
+ *
+ * If gadget->is_otg is true, the gadget driver must provide an OTG
+ * descriptor during enumeration, or else fail the bind() call. In such
+ * cases, no USB traffic may flow until both bind() returns without
+ * having called usb_gadget_disconnect(), and the USB host stack has
+ * initialized.
+ *
+ * Drivers use hardware-specific knowledge to configure the usb hardware.
+ * endpoint addressing is only one of several hardware characteristics that
+ * are in descriptors the ep0 implementation returns from setup() calls.
+ *
+ * Except for ep0 implementation, most driver code shouldn't need change to
+ * run on top of different usb controllers. It'll use endpoints set up by
+ * that ep0 implementation.
+ *
+ * The usb controller driver handles a few standard usb requests. Those
+ * include set_address, and feature flags for devices, interfaces, and
+ * endpoints (the get_status, set_feature, and clear_feature requests).
+ *
+ * Accordingly, the driver's setup() callback must always implement all
+ * get_descriptor requests, returning at least a device descriptor and
+ * a configuration descriptor. Drivers must make sure the endpoint
+ * descriptors match any hardware constraints. Some hardware also constrains
+ * other descriptors. (The pxa250 allows only configurations 1, 2, or 3).
+ *
+ * The driver's setup() callback must also implement set_configuration,
+ * and should also implement set_interface, get_configuration, and
+ * get_interface. Setting a configuration (or interface) is where
+ * endpoints should be activated or (config 0) shut down.
+ *
+ * (Note that only the default control endpoint is supported. Neither
+ * hosts nor devices generally support control traffic except to ep0.)
+ *
+ * Most devices will ignore USB suspend/resume operations, and so will
+ * not provide those callbacks. However, some may need to change modes
+ * when the host is not longer directing those activities. For example,
+ * local controls (buttons, dials, etc) may need to be re-enabled since
+ * the (remote) host can't do that any longer; or an error state might
+ * be cleared, to make the device behave identically whether or not
+ * power is maintained.
+ */
+struct usb_gadget_driver {
+ char *function;
+ enum usb_device_speed max_speed;
+ int (*bind)(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver);
+ void (*unbind)(struct usb_gadget *);
+ int (*setup)(struct usb_gadget *,
+ const struct usb_ctrlrequest *);
+ void (*disconnect)(struct usb_gadget *);
+ void (*suspend)(struct usb_gadget *);
+ void (*resume)(struct usb_gadget *);
+ void (*reset)(struct usb_gadget *);
+
+ /* FIXME support safe rmmod */
+ struct device_driver driver;
+};
+
+
+
+/*-------------------------------------------------------------------------*/
+
+/* driver modules register and unregister, as usual.
+ * these calls must be made in a context that can sleep.
+ *
+ * these will usually be implemented directly by the hardware-dependent
+ * usb bus interface driver, which will only support a single driver.
+ */
+
+/**
+ * usb_gadget_probe_driver - probe a gadget driver
+ * @driver: the driver being registered
+ * Context: can sleep
+ *
+ * Call this in your gadget driver's module initialization function,
+ * to tell the underlying usb controller driver about your driver.
+ * The @bind() function will be called to bind it to a gadget before this
+ * registration call returns. It's expected that the @bind() function will
+ * be in init sections.
+ */
+int usb_gadget_probe_driver(struct usb_gadget_driver *driver);
+
+/**
+ * usb_gadget_unregister_driver - unregister a gadget driver
+ * @driver:the driver being unregistered
+ * Context: can sleep
+ *
+ * Call this in your gadget driver's module cleanup function,
+ * to tell the underlying usb controller that your driver is
+ * going away. If the controller is connected to a USB host,
+ * it will first disconnect(). The driver is also requested
+ * to unbind() and clean up any device state, before this procedure
+ * finally returns. It's expected that the unbind() functions
+ * will in in exit sections, so may not be linked in some kernels.
+ */
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver);
+
+extern int usb_add_gadget_udc_release(struct device *parent,
+ struct usb_gadget *gadget, void (*release)(struct device *dev));
+extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget);
+extern void usb_del_gadget_udc(struct usb_gadget *gadget);
+extern int usb_udc_attach_driver(const char *name,
+ struct usb_gadget_driver *driver);
+
+/*-------------------------------------------------------------------------*/
+
+/* utility to simplify dealing with string descriptors */
+
+/**
+ * struct usb_string - wraps a C string and its USB id
+ * @id:the (nonzero) ID for this string
+ * @s:the string, in UTF-8 encoding
+ *
+ * If you're using usb_gadget_get_string(), use this to wrap a string
+ * together with its ID.
+ */
+struct usb_string {
+ u8 id;
+ const char *s;
+};
+
+/**
+ * struct usb_gadget_strings - a set of USB strings in a given language
+ * @language:identifies the strings' language (0x0409 for en-us)
+ * @strings:array of strings with their ids
+ *
+ * If you're using usb_gadget_get_string(), use this to wrap all the
+ * strings for a given language.
+ */
+struct usb_gadget_strings {
+ u16 language; /* 0x0409 for en-us */
+ struct usb_string *strings;
+};
+
+struct usb_gadget_string_container {
+ struct list_head list;
+ u8 *stash[0];
+};
+
+/* put descriptor for string with that id into buf (buflen >= 256) */
+int usb_gadget_get_string(struct usb_gadget_strings *table, int id, u8 *buf);
+
+/*-------------------------------------------------------------------------*/
+
+/* utility to simplify managing config descriptors */
+
+/* write vector of descriptors into buffer */
+int usb_descriptor_fillbuf(void *, unsigned,
+ const struct usb_descriptor_header **);
+
+/* build config descriptor from single descriptor vector */
+int usb_gadget_config_buf(const struct usb_config_descriptor *config,
+ void *buf, unsigned buflen, const struct usb_descriptor_header **desc);
+
+/* copy a NULL-terminated vector of descriptors */
+struct usb_descriptor_header **usb_copy_descriptors(
+ struct usb_descriptor_header **);
+
+/**
+ * usb_free_descriptors - free descriptors returned by usb_copy_descriptors()
+ * @v: vector of descriptors
+ */
+static inline void usb_free_descriptors(struct usb_descriptor_header **v)
+{
+ kfree(v);
+}
+
+struct usb_function;
+int usb_assign_descriptors(struct usb_function *f,
+ struct usb_descriptor_header **fs,
+ struct usb_descriptor_header **hs,
+ struct usb_descriptor_header **ss);
+void usb_free_all_descriptors(struct usb_function *f);
+
+/*-------------------------------------------------------------------------*/
+
+/* utility to simplify map/unmap of usb_requests to/from DMA */
+
+extern int usb_gadget_map_request(struct usb_gadget *gadget,
+ struct usb_request *req, int is_in);
+
+extern void usb_gadget_unmap_request(struct usb_gadget *gadget,
+ struct usb_request *req, int is_in);
+
+/*-------------------------------------------------------------------------*/
+
+/* utility to set gadget state properly */
+
+extern void usb_gadget_set_state(struct usb_gadget *gadget,
+ enum usb_device_state state);
+
+/*-------------------------------------------------------------------------*/
+
+/* utility to tell udc core that the bus reset occurs */
+extern void usb_gadget_udc_reset(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver);
+
+/*-------------------------------------------------------------------------*/
+
+/* utility to give requests back to the gadget layer */
+
+extern void usb_gadget_giveback_request(struct usb_ep *ep,
+ struct usb_request *req);
+
+/*-------------------------------------------------------------------------*/
+
+/* utility to update vbus status for udc core, it may be scheduled */
+extern void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status);
+
+/*-------------------------------------------------------------------------*/
+
+/* utility wrapping a simple endpoint selection policy */
+
+extern struct usb_ep *usb_ep_autoconfig(struct usb_gadget *,
+ struct usb_endpoint_descriptor *);
+
+
+extern struct usb_ep *usb_ep_autoconfig_ss(struct usb_gadget *,
+ struct usb_endpoint_descriptor *,
+ struct usb_ss_ep_comp_descriptor *);
+
+extern void usb_ep_autoconfig_reset(struct usb_gadget *);
+
+#endif /* __LINUX_USB_GADGET_H */
diff --git a/include/linux/usb/gadget_configfs.h b/include/linux/usb/gadget_configfs.h
new file mode 100644
index 000000000..d74c0ae98
--- /dev/null
+++ b/include/linux/usb/gadget_configfs.h
@@ -0,0 +1,110 @@
+#ifndef __GADGET_CONFIGFS__
+#define __GADGET_CONFIGFS__
+
+#include <linux/configfs.h>
+
+int check_user_usb_string(const char *name,
+ struct usb_gadget_strings *stringtab_dev);
+
+#define GS_STRINGS_W(__struct, __name) \
+ static ssize_t __struct##_##__name##_store(struct __struct *gs, \
+ const char *page, size_t len) \
+{ \
+ int ret; \
+ \
+ ret = usb_string_copy(page, &gs->__name); \
+ if (ret) \
+ return ret; \
+ return len; \
+}
+
+#define GS_STRINGS_R(__struct, __name) \
+ static ssize_t __struct##_##__name##_show(struct __struct *gs, \
+ char *page) \
+{ \
+ return sprintf(page, "%s\n", gs->__name ?: ""); \
+}
+
+#define GS_STRING_ITEM_ATTR(struct_name, name) \
+ static struct struct_name##_attribute struct_name##_##name = \
+ __CONFIGFS_ATTR(name, S_IRUGO | S_IWUSR, \
+ struct_name##_##name##_show, \
+ struct_name##_##name##_store)
+
+#define GS_STRINGS_RW(struct_name, _name) \
+ GS_STRINGS_R(struct_name, _name) \
+ GS_STRINGS_W(struct_name, _name) \
+ GS_STRING_ITEM_ATTR(struct_name, _name)
+
+#define USB_CONFIG_STRING_RW_OPS(struct_in) \
+ CONFIGFS_ATTR_OPS(struct_in); \
+ \
+static struct configfs_item_operations struct_in##_langid_item_ops = { \
+ .release = struct_in##_attr_release, \
+ .show_attribute = struct_in##_attr_show, \
+ .store_attribute = struct_in##_attr_store, \
+}; \
+ \
+static struct config_item_type struct_in##_langid_type = { \
+ .ct_item_ops = &struct_in##_langid_item_ops, \
+ .ct_attrs = struct_in##_langid_attrs, \
+ .ct_owner = THIS_MODULE, \
+}
+
+#define USB_CONFIG_STRINGS_LANG(struct_in, struct_member) \
+ static struct config_group *struct_in##_strings_make( \
+ struct config_group *group, \
+ const char *name) \
+ { \
+ struct struct_member *gi; \
+ struct struct_in *gs; \
+ struct struct_in *new; \
+ int langs = 0; \
+ int ret; \
+ \
+ new = kzalloc(sizeof(*new), GFP_KERNEL); \
+ if (!new) \
+ return ERR_PTR(-ENOMEM); \
+ \
+ ret = check_user_usb_string(name, &new->stringtab_dev); \
+ if (ret) \
+ goto err; \
+ config_group_init_type_name(&new->group, name, \
+ &struct_in##_langid_type); \
+ \
+ gi = container_of(group, struct struct_member, strings_group); \
+ ret = -EEXIST; \
+ list_for_each_entry(gs, &gi->string_list, list) { \
+ if (gs->stringtab_dev.language == new->stringtab_dev.language) \
+ goto err; \
+ langs++; \
+ } \
+ ret = -EOVERFLOW; \
+ if (langs >= MAX_USB_STRING_LANGS) \
+ goto err; \
+ \
+ list_add_tail(&new->list, &gi->string_list); \
+ return &new->group; \
+err: \
+ kfree(new); \
+ return ERR_PTR(ret); \
+} \
+ \
+static void struct_in##_strings_drop( \
+ struct config_group *group, \
+ struct config_item *item) \
+{ \
+ config_item_put(item); \
+} \
+ \
+static struct configfs_group_operations struct_in##_strings_ops = { \
+ .make_group = &struct_in##_strings_make, \
+ .drop_item = &struct_in##_strings_drop, \
+}; \
+ \
+static struct config_item_type struct_in##_strings_type = { \
+ .ct_group_ops = &struct_in##_strings_ops, \
+ .ct_owner = THIS_MODULE, \
+}
+
+#endif
diff --git a/include/linux/usb/gpio_vbus.h b/include/linux/usb/gpio_vbus.h
new file mode 100644
index 000000000..837bba604
--- /dev/null
+++ b/include/linux/usb/gpio_vbus.h
@@ -0,0 +1,32 @@
+/*
+ * A simple GPIO VBUS sensing driver for B peripheral only devices
+ * with internal transceivers.
+ * Optionally D+ pullup can be controlled by a second GPIO.
+ *
+ * Copyright (c) 2008 Philipp Zabel <philipp.zabel@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/**
+ * struct gpio_vbus_mach_info - configuration for gpio_vbus
+ * @gpio_vbus: VBUS sensing GPIO
+ * @gpio_pullup: optional D+ or D- pullup GPIO (else negative/invalid)
+ * @gpio_vbus_inverted: true if gpio_vbus is active low
+ * @gpio_pullup_inverted: true if gpio_pullup is active low
+ * @wakeup: configure gpio_vbus as a wake-up source
+ *
+ * The VBUS sensing GPIO should have a pulldown, which will normally be
+ * part of a resistor ladder turning a 4.0V-5.25V level on VBUS into a
+ * value the GPIO detects as active. Some systems will use comparators.
+ */
+struct gpio_vbus_mach_info {
+ int gpio_vbus;
+ int gpio_pullup;
+ bool gpio_vbus_inverted;
+ bool gpio_pullup_inverted;
+ bool wakeup;
+};
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
new file mode 100644
index 000000000..68b1e836d
--- /dev/null
+++ b/include/linux/usb/hcd.h
@@ -0,0 +1,709 @@
+/*
+ * Copyright (c) 2001-2002 by David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __USB_CORE_HCD_H
+#define __USB_CORE_HCD_H
+
+#ifdef __KERNEL__
+
+#include <linux/rwsem.h>
+#include <linux/interrupt.h>
+
+#define MAX_TOPO_LEVEL 6
+
+/* This file contains declarations of usbcore internals that are mostly
+ * used or exposed by Host Controller Drivers.
+ */
+
+/*
+ * USB Packet IDs (PIDs)
+ */
+#define USB_PID_EXT 0xf0 /* USB 2.0 LPM ECN */
+#define USB_PID_OUT 0xe1
+#define USB_PID_ACK 0xd2
+#define USB_PID_DATA0 0xc3
+#define USB_PID_PING 0xb4 /* USB 2.0 */
+#define USB_PID_SOF 0xa5
+#define USB_PID_NYET 0x96 /* USB 2.0 */
+#define USB_PID_DATA2 0x87 /* USB 2.0 */
+#define USB_PID_SPLIT 0x78 /* USB 2.0 */
+#define USB_PID_IN 0x69
+#define USB_PID_NAK 0x5a
+#define USB_PID_DATA1 0x4b
+#define USB_PID_PREAMBLE 0x3c /* Token mode */
+#define USB_PID_ERR 0x3c /* USB 2.0: handshake mode */
+#define USB_PID_SETUP 0x2d
+#define USB_PID_STALL 0x1e
+#define USB_PID_MDATA 0x0f /* USB 2.0 */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * USB Host Controller Driver (usb_hcd) framework
+ *
+ * Since "struct usb_bus" is so thin, you can't share much code in it.
+ * This framework is a layer over that, and should be more sharable.
+ *
+ * @authorized_default: Specifies if new devices are authorized to
+ * connect by default or they require explicit
+ * user space authorization; this bit is settable
+ * through /sys/class/usb_host/X/authorized_default.
+ * For the rest is RO, so we don't lock to r/w it.
+ */
+
+/*-------------------------------------------------------------------------*/
+
+struct giveback_urb_bh {
+ bool running;
+ spinlock_t lock;
+ struct list_head head;
+ struct tasklet_struct bh;
+ struct usb_host_endpoint *completing_ep;
+};
+
+struct usb_hcd {
+
+ /*
+ * housekeeping
+ */
+ struct usb_bus self; /* hcd is-a bus */
+ struct kref kref; /* reference counter */
+
+ const char *product_desc; /* product/vendor string */
+ int speed; /* Speed for this roothub.
+ * May be different from
+ * hcd->driver->flags & HCD_MASK
+ */
+ char irq_descr[24]; /* driver + bus # */
+
+ struct timer_list rh_timer; /* drives root-hub polling */
+ struct urb *status_urb; /* the current status urb */
+#ifdef CONFIG_PM
+ struct work_struct wakeup_work; /* for remote wakeup */
+#endif
+
+ /*
+ * hardware info/state
+ */
+ const struct hc_driver *driver; /* hw-specific hooks */
+
+ /*
+ * OTG and some Host controllers need software interaction with phys;
+ * other external phys should be software-transparent
+ */
+ struct usb_phy *usb_phy;
+ struct phy *phy;
+
+ /* Flags that need to be manipulated atomically because they can
+ * change while the host controller is running. Always use
+ * set_bit() or clear_bit() to change their values.
+ */
+ unsigned long flags;
+#define HCD_FLAG_HW_ACCESSIBLE 0 /* at full power */
+#define HCD_FLAG_POLL_RH 2 /* poll for rh status? */
+#define HCD_FLAG_POLL_PENDING 3 /* status has changed? */
+#define HCD_FLAG_WAKEUP_PENDING 4 /* root hub is resuming? */
+#define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */
+#define HCD_FLAG_DEAD 6 /* controller has died? */
+
+ /* The flags can be tested using these macros; they are likely to
+ * be slightly faster than test_bit().
+ */
+#define HCD_HW_ACCESSIBLE(hcd) ((hcd)->flags & (1U << HCD_FLAG_HW_ACCESSIBLE))
+#define HCD_POLL_RH(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_RH))
+#define HCD_POLL_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_PENDING))
+#define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
+#define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
+#define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD))
+
+ /* Flags that get set only during HCD registration or removal. */
+ unsigned rh_registered:1;/* is root hub registered? */
+ unsigned rh_pollable:1; /* may we poll the root hub? */
+ unsigned msix_enabled:1; /* driver has MSI-X enabled? */
+ unsigned remove_phy:1; /* auto-remove USB phy */
+
+ /* The next flag is a stopgap, to be removed when all the HCDs
+ * support the new root-hub polling mechanism. */
+ unsigned uses_new_polling:1;
+ unsigned wireless:1; /* Wireless USB HCD */
+ unsigned authorized_default:1;
+ unsigned has_tt:1; /* Integrated TT in root hub */
+ unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
+ unsigned can_do_streams:1; /* HC supports streams */
+ unsigned tpl_support:1; /* OTG & EH TPL support */
+ unsigned cant_recv_wakeups:1;
+ /* wakeup requests from downstream aren't received */
+
+ unsigned int irq; /* irq allocated */
+ void __iomem *regs; /* device memory/io */
+ resource_size_t rsrc_start; /* memory/io resource start */
+ resource_size_t rsrc_len; /* memory/io resource length */
+ unsigned power_budget; /* in mA, 0 = no limit */
+
+ struct giveback_urb_bh high_prio_bh;
+ struct giveback_urb_bh low_prio_bh;
+
+ /* bandwidth_mutex should be taken before adding or removing
+ * any new bus bandwidth constraints:
+ * 1. Before adding a configuration for a new device.
+ * 2. Before removing the configuration to put the device into
+ * the addressed state.
+ * 3. Before selecting a different configuration.
+ * 4. Before selecting an alternate interface setting.
+ *
+ * bandwidth_mutex should be dropped after a successful control message
+ * to the device, or resetting the bandwidth after a failed attempt.
+ */
+ struct mutex *bandwidth_mutex;
+ struct usb_hcd *shared_hcd;
+ struct usb_hcd *primary_hcd;
+
+
+#define HCD_BUFFER_POOLS 4
+ struct dma_pool *pool[HCD_BUFFER_POOLS];
+
+ int state;
+# define __ACTIVE 0x01
+# define __SUSPEND 0x04
+# define __TRANSIENT 0x80
+
+# define HC_STATE_HALT 0
+# define HC_STATE_RUNNING (__ACTIVE)
+# define HC_STATE_QUIESCING (__SUSPEND|__TRANSIENT|__ACTIVE)
+# define HC_STATE_RESUMING (__SUSPEND|__TRANSIENT)
+# define HC_STATE_SUSPENDED (__SUSPEND)
+
+#define HC_IS_RUNNING(state) ((state) & __ACTIVE)
+#define HC_IS_SUSPENDED(state) ((state) & __SUSPEND)
+
+ /* more shared queuing code would be good; it should support
+ * smarter scheduling, handle transaction translators, etc;
+ * input size of periodic table to an interrupt scheduler.
+ * (ohci 32, uhci 1024, ehci 256/512/1024).
+ */
+
+ /* The HC driver's private data is stored at the end of
+ * this structure.
+ */
+ unsigned long hcd_priv[0]
+ __attribute__ ((aligned(sizeof(s64))));
+};
+
+/* 2.4 does this a bit differently ... */
+static inline struct usb_bus *hcd_to_bus(struct usb_hcd *hcd)
+{
+ return &hcd->self;
+}
+
+static inline struct usb_hcd *bus_to_hcd(struct usb_bus *bus)
+{
+ return container_of(bus, struct usb_hcd, self);
+}
+
+struct hcd_timeout { /* timeouts we allocate */
+ struct list_head timeout_list;
+ struct timer_list timer;
+};
+
+/*-------------------------------------------------------------------------*/
+
+
+struct hc_driver {
+ const char *description; /* "ehci-hcd" etc */
+ const char *product_desc; /* product/vendor string */
+ size_t hcd_priv_size; /* size of private data */
+
+ /* irq handler */
+ irqreturn_t (*irq) (struct usb_hcd *hcd);
+
+ int flags;
+#define HCD_MEMORY 0x0001 /* HC regs use memory (else I/O) */
+#define HCD_LOCAL_MEM 0x0002 /* HC needs local memory */
+#define HCD_SHARED 0x0004 /* Two (or more) usb_hcds share HW */
+#define HCD_USB11 0x0010 /* USB 1.1 */
+#define HCD_USB2 0x0020 /* USB 2.0 */
+#define HCD_USB25 0x0030 /* Wireless USB 1.0 (USB 2.5)*/
+#define HCD_USB3 0x0040 /* USB 3.0 */
+#define HCD_MASK 0x0070
+#define HCD_BH 0x0100 /* URB complete in BH context */
+
+ /* called to init HCD and root hub */
+ int (*reset) (struct usb_hcd *hcd);
+ int (*start) (struct usb_hcd *hcd);
+
+ /* NOTE: these suspend/resume calls relate to the HC as
+ * a whole, not just the root hub; they're for PCI bus glue.
+ */
+ /* called after suspending the hub, before entering D3 etc */
+ int (*pci_suspend)(struct usb_hcd *hcd, bool do_wakeup);
+
+ /* called after entering D0 (etc), before resuming the hub */
+ int (*pci_resume)(struct usb_hcd *hcd, bool hibernated);
+
+ /* cleanly make HCD stop writing memory and doing I/O */
+ void (*stop) (struct usb_hcd *hcd);
+
+ /* shutdown HCD */
+ void (*shutdown) (struct usb_hcd *hcd);
+
+ /* return current frame number */
+ int (*get_frame_number) (struct usb_hcd *hcd);
+
+ /* manage i/o requests, device state */
+ int (*urb_enqueue)(struct usb_hcd *hcd,
+ struct urb *urb, gfp_t mem_flags);
+ int (*urb_dequeue)(struct usb_hcd *hcd,
+ struct urb *urb, int status);
+
+ /*
+ * (optional) these hooks allow an HCD to override the default DMA
+ * mapping and unmapping routines. In general, they shouldn't be
+ * necessary unless the host controller has special DMA requirements,
+ * such as alignment contraints. If these are not specified, the
+ * general usb_hcd_(un)?map_urb_for_dma functions will be used instead
+ * (and it may be a good idea to call these functions in your HCD
+ * implementation)
+ */
+ int (*map_urb_for_dma)(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags);
+ void (*unmap_urb_for_dma)(struct usb_hcd *hcd, struct urb *urb);
+
+ /* hw synch, freeing endpoint resources that urb_dequeue can't */
+ void (*endpoint_disable)(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep);
+
+ /* (optional) reset any endpoint state such as sequence number
+ and current window */
+ void (*endpoint_reset)(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep);
+
+ /* root hub support */
+ int (*hub_status_data) (struct usb_hcd *hcd, char *buf);
+ int (*hub_control) (struct usb_hcd *hcd,
+ u16 typeReq, u16 wValue, u16 wIndex,
+ char *buf, u16 wLength);
+ int (*bus_suspend)(struct usb_hcd *);
+ int (*bus_resume)(struct usb_hcd *);
+ int (*start_port_reset)(struct usb_hcd *, unsigned port_num);
+
+ /* force handover of high-speed port to full-speed companion */
+ void (*relinquish_port)(struct usb_hcd *, int);
+ /* has a port been handed over to a companion? */
+ int (*port_handed_over)(struct usb_hcd *, int);
+
+ /* CLEAR_TT_BUFFER completion callback */
+ void (*clear_tt_buffer_complete)(struct usb_hcd *,
+ struct usb_host_endpoint *);
+
+ /* xHCI specific functions */
+ /* Called by usb_alloc_dev to alloc HC device structures */
+ int (*alloc_dev)(struct usb_hcd *, struct usb_device *);
+ /* Called by usb_disconnect to free HC device structures */
+ void (*free_dev)(struct usb_hcd *, struct usb_device *);
+ /* Change a group of bulk endpoints to support multiple stream IDs */
+ int (*alloc_streams)(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ unsigned int num_streams, gfp_t mem_flags);
+ /* Reverts a group of bulk endpoints back to not using stream IDs.
+ * Can fail if we run out of memory.
+ */
+ int (*free_streams)(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint **eps, unsigned int num_eps,
+ gfp_t mem_flags);
+
+ /* Bandwidth computation functions */
+ /* Note that add_endpoint() can only be called once per endpoint before
+ * check_bandwidth() or reset_bandwidth() must be called.
+ * drop_endpoint() can only be called once per endpoint also.
+ * A call to xhci_drop_endpoint() followed by a call to
+ * xhci_add_endpoint() will add the endpoint to the schedule with
+ * possibly new parameters denoted by a different endpoint descriptor
+ * in usb_host_endpoint. A call to xhci_add_endpoint() followed by a
+ * call to xhci_drop_endpoint() is not allowed.
+ */
+ /* Allocate endpoint resources and add them to a new schedule */
+ int (*add_endpoint)(struct usb_hcd *, struct usb_device *,
+ struct usb_host_endpoint *);
+ /* Drop an endpoint from a new schedule */
+ int (*drop_endpoint)(struct usb_hcd *, struct usb_device *,
+ struct usb_host_endpoint *);
+ /* Check that a new hardware configuration, set using
+ * endpoint_enable and endpoint_disable, does not exceed bus
+ * bandwidth. This must be called before any set configuration
+ * or set interface requests are sent to the device.
+ */
+ int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
+ /* Reset the device schedule to the last known good schedule,
+ * which was set from a previous successful call to
+ * check_bandwidth(). This reverts any add_endpoint() and
+ * drop_endpoint() calls since that last successful call.
+ * Used for when a check_bandwidth() call fails due to resource
+ * or bandwidth constraints.
+ */
+ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
+ /* Returns the hardware-chosen device address */
+ int (*address_device)(struct usb_hcd *, struct usb_device *udev);
+ /* prepares the hardware to send commands to the device */
+ int (*enable_device)(struct usb_hcd *, struct usb_device *udev);
+ /* Notifies the HCD after a hub descriptor is fetched.
+ * Will block.
+ */
+ int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags);
+ int (*reset_device)(struct usb_hcd *, struct usb_device *);
+ /* Notifies the HCD after a device is connected and its
+ * address is set
+ */
+ int (*update_device)(struct usb_hcd *, struct usb_device *);
+ int (*set_usb2_hw_lpm)(struct usb_hcd *, struct usb_device *, int);
+ /* USB 3.0 Link Power Management */
+ /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
+ int (*enable_usb3_lpm_timeout)(struct usb_hcd *,
+ struct usb_device *, enum usb3_link_state state);
+ /* The xHCI host controller can still fail the command to
+ * disable the LPM timeouts, so this can return an error code.
+ */
+ int (*disable_usb3_lpm_timeout)(struct usb_hcd *,
+ struct usb_device *, enum usb3_link_state state);
+ int (*find_raw_port_number)(struct usb_hcd *, int);
+ /* Call for power on/off the port if necessary */
+ int (*port_power)(struct usb_hcd *hcd, int portnum, bool enable);
+
+};
+
+static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
+{
+ return hcd->driver->flags & HCD_BH;
+}
+
+static inline bool hcd_periodic_completion_in_progress(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ return hcd->high_prio_bh.completing_ep == ep;
+}
+
+extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
+extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
+ int status);
+extern void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb);
+
+extern int usb_hcd_submit_urb(struct urb *urb, gfp_t mem_flags);
+extern int usb_hcd_unlink_urb(struct urb *urb, int status);
+extern void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb,
+ int status);
+extern int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags);
+extern void usb_hcd_unmap_urb_setup_for_dma(struct usb_hcd *, struct urb *);
+extern void usb_hcd_unmap_urb_for_dma(struct usb_hcd *, struct urb *);
+extern void usb_hcd_flush_endpoint(struct usb_device *udev,
+ struct usb_host_endpoint *ep);
+extern void usb_hcd_disable_endpoint(struct usb_device *udev,
+ struct usb_host_endpoint *ep);
+extern void usb_hcd_reset_endpoint(struct usb_device *udev,
+ struct usb_host_endpoint *ep);
+extern void usb_hcd_synchronize_unlinks(struct usb_device *udev);
+extern int usb_hcd_alloc_bandwidth(struct usb_device *udev,
+ struct usb_host_config *new_config,
+ struct usb_host_interface *old_alt,
+ struct usb_host_interface *new_alt);
+extern int usb_hcd_get_frame_number(struct usb_device *udev);
+
+extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
+ struct device *dev, const char *bus_name);
+extern struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
+ struct device *dev, const char *bus_name,
+ struct usb_hcd *shared_hcd);
+extern struct usb_hcd *usb_get_hcd(struct usb_hcd *hcd);
+extern void usb_put_hcd(struct usb_hcd *hcd);
+extern int usb_hcd_is_primary_hcd(struct usb_hcd *hcd);
+extern int usb_add_hcd(struct usb_hcd *hcd,
+ unsigned int irqnum, unsigned long irqflags);
+extern void usb_remove_hcd(struct usb_hcd *hcd);
+extern int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1);
+
+struct platform_device;
+extern void usb_hcd_platform_shutdown(struct platform_device *dev);
+
+#ifdef CONFIG_PCI
+struct pci_dev;
+struct pci_device_id;
+extern int usb_hcd_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id);
+extern void usb_hcd_pci_remove(struct pci_dev *dev);
+extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
+
+extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev);
+
+#ifdef CONFIG_PM
+extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
+#endif
+#endif /* CONFIG_PCI */
+
+/* pci-ish (pdev null is ok) buffer alloc/mapping support */
+void usb_init_pool_max(void);
+int hcd_buffer_create(struct usb_hcd *hcd);
+void hcd_buffer_destroy(struct usb_hcd *hcd);
+
+void *hcd_buffer_alloc(struct usb_bus *bus, size_t size,
+ gfp_t mem_flags, dma_addr_t *dma);
+void hcd_buffer_free(struct usb_bus *bus, size_t size,
+ void *addr, dma_addr_t dma);
+
+/* generic bus glue, needed for host controllers that don't use PCI */
+extern irqreturn_t usb_hcd_irq(int irq, void *__hcd);
+
+extern void usb_hc_died(struct usb_hcd *hcd);
+extern void usb_hcd_poll_rh_status(struct usb_hcd *hcd);
+extern void usb_wakeup_notification(struct usb_device *hdev,
+ unsigned int portnum);
+
+extern void usb_hcd_start_port_resume(struct usb_bus *bus, int portnum);
+extern void usb_hcd_end_port_resume(struct usb_bus *bus, int portnum);
+
+/* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */
+#define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1)
+#define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep)))
+#define usb_settoggle(dev, ep, out, bit) \
+ ((dev)->toggle[out] = ((dev)->toggle[out] & ~(1 << (ep))) | \
+ ((bit) << (ep)))
+
+/* -------------------------------------------------------------------------- */
+
+/* Enumeration is only for the hub driver, or HCD virtual root hubs */
+extern struct usb_device *usb_alloc_dev(struct usb_device *parent,
+ struct usb_bus *, unsigned port);
+extern int usb_new_device(struct usb_device *dev);
+extern void usb_disconnect(struct usb_device **);
+
+extern int usb_get_configuration(struct usb_device *dev);
+extern void usb_destroy_configuration(struct usb_device *dev);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * HCD Root Hub support
+ */
+
+#include <linux/usb/ch11.h>
+
+/*
+ * As of USB 2.0, full/low speed devices are segregated into trees.
+ * One type grows from USB 1.1 host controllers (OHCI, UHCI etc).
+ * The other type grows from high speed hubs when they connect to
+ * full/low speed devices using "Transaction Translators" (TTs).
+ *
+ * TTs should only be known to the hub driver, and high speed bus
+ * drivers (only EHCI for now). They affect periodic scheduling and
+ * sometimes control/bulk error recovery.
+ */
+
+struct usb_device;
+
+struct usb_tt {
+ struct usb_device *hub; /* upstream highspeed hub */
+ int multi; /* true means one TT per port */
+ unsigned think_time; /* think time in ns */
+ void *hcpriv; /* HCD private data */
+
+ /* for control/bulk error recovery (CLEAR_TT_BUFFER) */
+ spinlock_t lock;
+ struct list_head clear_list; /* of usb_tt_clear */
+ struct work_struct clear_work;
+};
+
+struct usb_tt_clear {
+ struct list_head clear_list;
+ unsigned tt;
+ u16 devinfo;
+ struct usb_hcd *hcd;
+ struct usb_host_endpoint *ep;
+};
+
+extern int usb_hub_clear_tt_buffer(struct urb *urb);
+extern void usb_ep0_reinit(struct usb_device *);
+
+/* (shifted) direction/type/recipient from the USB 2.0 spec, table 9.2 */
+#define DeviceRequest \
+ ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8)
+#define DeviceOutRequest \
+ ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_DEVICE)<<8)
+
+#define InterfaceRequest \
+ ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
+
+#define EndpointRequest \
+ ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
+#define EndpointOutRequest \
+ ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
+
+/* class requests from the USB 2.0 hub spec, table 11-15 */
+/* GetBusState and SetHubDescriptor are optional, omitted */
+#define ClearHubFeature (0x2000 | USB_REQ_CLEAR_FEATURE)
+#define ClearPortFeature (0x2300 | USB_REQ_CLEAR_FEATURE)
+#define GetHubDescriptor (0xa000 | USB_REQ_GET_DESCRIPTOR)
+#define GetHubStatus (0xa000 | USB_REQ_GET_STATUS)
+#define GetPortStatus (0xa300 | USB_REQ_GET_STATUS)
+#define SetHubFeature (0x2000 | USB_REQ_SET_FEATURE)
+#define SetPortFeature (0x2300 | USB_REQ_SET_FEATURE)
+
+
+/*-------------------------------------------------------------------------*/
+
+/* class requests from USB 3.0 hub spec, table 10-5 */
+#define SetHubDepth (0x3000 | HUB_SET_DEPTH)
+#define GetPortErrorCount (0x8000 | HUB_GET_PORT_ERR_COUNT)
+
+/*
+ * Generic bandwidth allocation constants/support
+ */
+#define FRAME_TIME_USECS 1000L
+#define BitTime(bytecount) (7 * 8 * bytecount / 6) /* with integer truncation */
+ /* Trying not to use worst-case bit-stuffing
+ * of (7/6 * 8 * bytecount) = 9.33 * bytecount */
+ /* bytecount = data payload byte count */
+
+#define NS_TO_US(ns) DIV_ROUND_UP(ns, 1000L)
+ /* convert nanoseconds to microseconds, rounding up */
+
+/*
+ * Full/low speed bandwidth allocation constants/support.
+ */
+#define BW_HOST_DELAY 1000L /* nanoseconds */
+#define BW_HUB_LS_SETUP 333L /* nanoseconds */
+ /* 4 full-speed bit times (est.) */
+
+#define FRAME_TIME_BITS 12000L /* frame = 1 millisecond */
+#define FRAME_TIME_MAX_BITS_ALLOC (90L * FRAME_TIME_BITS / 100L)
+#define FRAME_TIME_MAX_USECS_ALLOC (90L * FRAME_TIME_USECS / 100L)
+
+/*
+ * Ceiling [nano/micro]seconds (typical) for that many bytes at high speed
+ * ISO is a bit less, no ACK ... from USB 2.0 spec, 5.11.3 (and needed
+ * to preallocate bandwidth)
+ */
+#define USB2_HOST_DELAY 5 /* nsec, guess */
+#define HS_NSECS(bytes) (((55 * 8 * 2083) \
+ + (2083UL * (3 + BitTime(bytes))))/1000 \
+ + USB2_HOST_DELAY)
+#define HS_NSECS_ISO(bytes) (((38 * 8 * 2083) \
+ + (2083UL * (3 + BitTime(bytes))))/1000 \
+ + USB2_HOST_DELAY)
+#define HS_USECS(bytes) NS_TO_US(HS_NSECS(bytes))
+#define HS_USECS_ISO(bytes) NS_TO_US(HS_NSECS_ISO(bytes))
+
+extern long usb_calc_bus_time(int speed, int is_input,
+ int isoc, int bytecount);
+
+/*-------------------------------------------------------------------------*/
+
+extern void usb_set_device_state(struct usb_device *udev,
+ enum usb_device_state new_state);
+
+/*-------------------------------------------------------------------------*/
+
+/* exported only within usbcore */
+
+extern struct list_head usb_bus_list;
+extern struct mutex usb_bus_list_lock;
+extern wait_queue_head_t usb_kill_urb_queue;
+
+extern int usb_find_interface_driver(struct usb_device *dev,
+ struct usb_interface *interface);
+
+#define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN))
+
+#ifdef CONFIG_PM
+extern void usb_root_hub_lost_power(struct usb_device *rhdev);
+extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg);
+extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg);
+extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd);
+#else
+static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd)
+{
+ return;
+}
+#endif /* CONFIG_PM */
+
+/*-------------------------------------------------------------------------*/
+
+#if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
+
+struct usb_mon_operations {
+ void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
+ void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
+ void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
+ /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
+};
+
+extern struct usb_mon_operations *mon_ops;
+
+static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
+{
+ if (bus->monitored)
+ (*mon_ops->urb_submit)(bus, urb);
+}
+
+static inline void usbmon_urb_submit_error(struct usb_bus *bus, struct urb *urb,
+ int error)
+{
+ if (bus->monitored)
+ (*mon_ops->urb_submit_error)(bus, urb, error);
+}
+
+static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
+ int status)
+{
+ if (bus->monitored)
+ (*mon_ops->urb_complete)(bus, urb, status);
+}
+
+int usb_mon_register(struct usb_mon_operations *ops);
+void usb_mon_deregister(void);
+
+#else
+
+static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb) {}
+static inline void usbmon_urb_submit_error(struct usb_bus *bus, struct urb *urb,
+ int error) {}
+static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
+ int status) {}
+
+#endif /* CONFIG_USB_MON || CONFIG_USB_MON_MODULE */
+
+/*-------------------------------------------------------------------------*/
+
+/* random stuff */
+
+#define RUN_CONTEXT (in_irq() ? "in_irq" \
+ : (in_interrupt() ? "in_interrupt" : "can sleep"))
+
+
+/* This rwsem is for use only by the hub driver and ehci-hcd.
+ * Nobody else should touch it.
+ */
+extern struct rw_semaphore ehci_cf_port_reset_rwsem;
+
+/* Keep track of which host controller drivers are loaded */
+#define USB_UHCI_LOADED 0
+#define USB_OHCI_LOADED 1
+#define USB_EHCI_LOADED 2
+extern unsigned long usb_hcds_loaded;
+
+#endif /* __KERNEL__ */
+
+#endif /* __USB_CORE_HCD_H */
diff --git a/include/linux/usb/input.h b/include/linux/usb/input.h
new file mode 100644
index 000000000..0e010b220
--- /dev/null
+++ b/include/linux/usb/input.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2005 Dmitry Torokhov
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __LINUX_USB_INPUT_H
+#define __LINUX_USB_INPUT_H
+
+#include <linux/usb.h>
+#include <linux/input.h>
+#include <asm/byteorder.h>
+
+static inline void
+usb_to_input_id(const struct usb_device *dev, struct input_id *id)
+{
+ id->bustype = BUS_USB;
+ id->vendor = le16_to_cpu(dev->descriptor.idVendor);
+ id->product = le16_to_cpu(dev->descriptor.idProduct);
+ id->version = le16_to_cpu(dev->descriptor.bcdDevice);
+}
+
+#endif /* __LINUX_USB_INPUT_H */
diff --git a/include/linux/usb/iowarrior.h b/include/linux/usb/iowarrior.h
new file mode 100644
index 000000000..4fd6513d5
--- /dev/null
+++ b/include/linux/usb/iowarrior.h
@@ -0,0 +1,42 @@
+#ifndef __LINUX_USB_IOWARRIOR_H
+#define __LINUX_USB_IOWARRIOR_H
+
+#define CODEMERCS_MAGIC_NUMBER 0xC0 /* like COde Mercenaries */
+
+/* Define the ioctl commands for reading and writing data */
+#define IOW_WRITE _IOW(CODEMERCS_MAGIC_NUMBER, 1, __u8 *)
+#define IOW_READ _IOW(CODEMERCS_MAGIC_NUMBER, 2, __u8 *)
+
+/*
+ A struct for available device info which is read
+ with the ioctl IOW_GETINFO.
+ To be compatible with 2.4 userspace which didn't have an easy way to get
+ this information.
+*/
+struct iowarrior_info {
+ /* vendor id : supposed to be USB_VENDOR_ID_CODEMERCS in all cases */
+ __u32 vendor;
+ /* product id : depends on type of chip (USB_DEVICE_ID_CODEMERCS_X) */
+ __u32 product;
+ /* the serial number of our chip (if a serial-number is not available
+ * this is empty string) */
+ __u8 serial[9];
+ /* revision number of the chip */
+ __u32 revision;
+ /* USB-speed of the device (0=UNKNOWN, 1=LOW, 2=FULL 3=HIGH) */
+ __u32 speed;
+ /* power consumption of the device in mA */
+ __u32 power;
+ /* the number of the endpoint */
+ __u32 if_num;
+ /* size of the data-packets on this interface */
+ __u32 report_size;
+};
+
+/*
+ Get some device-information (product-id , serial-number etc.)
+ in order to identify a chip.
+*/
+#define IOW_GETINFO _IOR(CODEMERCS_MAGIC_NUMBER, 3, struct iowarrior_info)
+
+#endif /* __LINUX_USB_IOWARRIOR_H */
diff --git a/include/linux/usb/irda.h b/include/linux/usb/irda.h
new file mode 100644
index 000000000..e345ceaf7
--- /dev/null
+++ b/include/linux/usb/irda.h
@@ -0,0 +1,151 @@
+/*
+ * USB IrDA Bridge Device Definition
+ */
+
+#ifndef __LINUX_USB_IRDA_H
+#define __LINUX_USB_IRDA_H
+
+/* This device should use Application-specific class */
+
+#define USB_SUBCLASS_IRDA 0x02
+
+/*-------------------------------------------------------------------------*/
+
+/* Class-Specific requests (bRequest field) */
+
+#define USB_REQ_CS_IRDA_RECEIVING 1
+#define USB_REQ_CS_IRDA_CHECK_MEDIA_BUSY 3
+#define USB_REQ_CS_IRDA_RATE_SNIFF 4
+#define USB_REQ_CS_IRDA_UNICAST_LIST 5
+#define USB_REQ_CS_IRDA_GET_CLASS_DESC 6
+
+/*-------------------------------------------------------------------------*/
+
+/* Class-Specific descriptor */
+
+#define USB_DT_CS_IRDA 0x21
+
+/*-------------------------------------------------------------------------*/
+
+/* Data sizes */
+
+#define USB_IRDA_DS_2048 (1 << 5)
+#define USB_IRDA_DS_1024 (1 << 4)
+#define USB_IRDA_DS_512 (1 << 3)
+#define USB_IRDA_DS_256 (1 << 2)
+#define USB_IRDA_DS_128 (1 << 1)
+#define USB_IRDA_DS_64 (1 << 0)
+
+/* Window sizes */
+
+#define USB_IRDA_WS_7 (1 << 6)
+#define USB_IRDA_WS_6 (1 << 5)
+#define USB_IRDA_WS_5 (1 << 4)
+#define USB_IRDA_WS_4 (1 << 3)
+#define USB_IRDA_WS_3 (1 << 2)
+#define USB_IRDA_WS_2 (1 << 1)
+#define USB_IRDA_WS_1 (1 << 0)
+
+/* Min turnaround times in usecs */
+
+#define USB_IRDA_MTT_0 (1 << 7)
+#define USB_IRDA_MTT_10 (1 << 6)
+#define USB_IRDA_MTT_50 (1 << 5)
+#define USB_IRDA_MTT_100 (1 << 4)
+#define USB_IRDA_MTT_500 (1 << 3)
+#define USB_IRDA_MTT_1000 (1 << 2)
+#define USB_IRDA_MTT_5000 (1 << 1)
+#define USB_IRDA_MTT_10000 (1 << 0)
+
+/* Baud rates */
+
+#define USB_IRDA_BR_4000000 (1 << 8)
+#define USB_IRDA_BR_1152000 (1 << 7)
+#define USB_IRDA_BR_576000 (1 << 6)
+#define USB_IRDA_BR_115200 (1 << 5)
+#define USB_IRDA_BR_57600 (1 << 4)
+#define USB_IRDA_BR_38400 (1 << 3)
+#define USB_IRDA_BR_19200 (1 << 2)
+#define USB_IRDA_BR_9600 (1 << 1)
+#define USB_IRDA_BR_2400 (1 << 0)
+
+/* Additional BOFs */
+
+#define USB_IRDA_AB_0 (1 << 7)
+#define USB_IRDA_AB_1 (1 << 6)
+#define USB_IRDA_AB_2 (1 << 5)
+#define USB_IRDA_AB_3 (1 << 4)
+#define USB_IRDA_AB_6 (1 << 3)
+#define USB_IRDA_AB_12 (1 << 2)
+#define USB_IRDA_AB_24 (1 << 1)
+#define USB_IRDA_AB_48 (1 << 0)
+
+/* IRDA Rate Sniff */
+
+#define USB_IRDA_RATE_SNIFF 1
+
+/*-------------------------------------------------------------------------*/
+
+struct usb_irda_cs_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+
+ __le16 bcdSpecRevision;
+ __u8 bmDataSize;
+ __u8 bmWindowSize;
+ __u8 bmMinTurnaroundTime;
+ __le16 wBaudRate;
+ __u8 bmAdditionalBOFs;
+ __u8 bIrdaRateSniff;
+ __u8 bMaxUnicastList;
+} __attribute__ ((packed));
+
+/*-------------------------------------------------------------------------*/
+
+/* Data Format */
+
+#define USB_IRDA_STATUS_MEDIA_BUSY (1 << 7)
+
+/* The following is a 4-bit value used for both
+ * inbound and outbound headers:
+ *
+ * 0 - speed ignored
+ * 1 - 2400 bps
+ * 2 - 9600 bps
+ * 3 - 19200 bps
+ * 4 - 38400 bps
+ * 5 - 57600 bps
+ * 6 - 115200 bps
+ * 7 - 576000 bps
+ * 8 - 1.152 Mbps
+ * 9 - 5 mbps
+ * 10..15 - Reserved
+ */
+#define USB_IRDA_STATUS_LINK_SPEED 0x0f
+
+/* The following is a 4-bit value used only for
+ * outbound header:
+ *
+ * 0 - No change (BOF ignored)
+ * 1 - 48 BOFs
+ * 2 - 24 BOFs
+ * 3 - 12 BOFs
+ * 4 - 6 BOFs
+ * 5 - 3 BOFs
+ * 6 - 2 BOFs
+ * 7 - 1 BOFs
+ * 8 - 0 BOFs
+ * 9..15 - Reserved
+ */
+#define USB_IRDA_EXTRA_BOFS 0xf0
+
+struct usb_irda_inbound_header {
+ __u8 bmStatus;
+};
+
+struct usb_irda_outbound_header {
+ __u8 bmChange;
+};
+
+#endif /* __LINUX_USB_IRDA_H */
+
diff --git a/include/linux/usb/isp116x.h b/include/linux/usb/isp116x.h
new file mode 100644
index 000000000..96ca114e8
--- /dev/null
+++ b/include/linux/usb/isp116x.h
@@ -0,0 +1,33 @@
+/*
+ * Board initialization code should put one of these into dev->platform_data
+ * and place the isp116x onto platform_bus.
+ */
+
+#ifndef __LINUX_USB_ISP116X_H
+#define __LINUX_USB_ISP116X_H
+
+struct isp116x_platform_data {
+ /* Enable internal resistors on downstream ports */
+ unsigned sel15Kres:1;
+ /* On-chip overcurrent detection */
+ unsigned oc_enable:1;
+ /* INT output polarity */
+ unsigned int_act_high:1;
+ /* INT edge or level triggered */
+ unsigned int_edge_triggered:1;
+ /* Enable wakeup by devices on usb bus (e.g. wakeup
+ by attachment/detachment or by device activity
+ such as moving a mouse). When chosen, this option
+ prevents stopping internal clock, increasing
+ thereby power consumption in suspended state. */
+ unsigned remote_wakeup_enable:1;
+ /* Inter-io delay (ns). The chip is picky about access timings; it
+ expects at least:
+ 150ns delay between consecutive accesses to DATA_REG,
+ 300ns delay between access to ADDR_REG and DATA_REG
+ OE, WE MUST NOT be changed during these intervals
+ */
+ void (*delay) (struct device *dev, int delay);
+};
+
+#endif /* __LINUX_USB_ISP116X_H */
diff --git a/include/linux/usb/isp1301.h b/include/linux/usb/isp1301.h
new file mode 100644
index 000000000..d3a851c28
--- /dev/null
+++ b/include/linux/usb/isp1301.h
@@ -0,0 +1,80 @@
+/*
+ * NXP ISP1301 USB transceiver driver
+ *
+ * Copyright (C) 2012 Roland Stigge <stigge@antcom.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_ISP1301_H
+#define __LINUX_USB_ISP1301_H
+
+#include <linux/of.h>
+
+/* I2C Register definitions: */
+
+#define ISP1301_I2C_MODE_CONTROL_1 0x04 /* u8 read, set, +1 clear */
+
+#define MC1_SPEED_REG (1 << 0)
+#define MC1_SUSPEND_REG (1 << 1)
+#define MC1_DAT_SE0 (1 << 2)
+#define MC1_TRANSPARENT (1 << 3)
+#define MC1_BDIS_ACON_EN (1 << 4)
+#define MC1_OE_INT_EN (1 << 5)
+#define MC1_UART_EN (1 << 6)
+#define MC1_MASK 0x7f
+
+#define ISP1301_I2C_MODE_CONTROL_2 0x12 /* u8 read, set, +1 clear */
+
+#define MC2_GLOBAL_PWR_DN (1 << 0)
+#define MC2_SPD_SUSP_CTRL (1 << 1)
+#define MC2_BI_DI (1 << 2)
+#define MC2_TRANSP_BDIR0 (1 << 3)
+#define MC2_TRANSP_BDIR1 (1 << 4)
+#define MC2_AUDIO_EN (1 << 5)
+#define MC2_PSW_EN (1 << 6)
+#define MC2_EN2V7 (1 << 7)
+
+#define ISP1301_I2C_OTG_CONTROL_1 0x06 /* u8 read, set, +1 clear */
+
+#define OTG1_DP_PULLUP (1 << 0)
+#define OTG1_DM_PULLUP (1 << 1)
+#define OTG1_DP_PULLDOWN (1 << 2)
+#define OTG1_DM_PULLDOWN (1 << 3)
+#define OTG1_ID_PULLDOWN (1 << 4)
+#define OTG1_VBUS_DRV (1 << 5)
+#define OTG1_VBUS_DISCHRG (1 << 6)
+#define OTG1_VBUS_CHRG (1 << 7)
+
+#define ISP1301_I2C_OTG_CONTROL_2 0x10 /* u8 readonly */
+
+#define OTG_B_SESS_END (1 << 6)
+#define OTG_B_SESS_VLD (1 << 7)
+
+#define ISP1301_I2C_INTERRUPT_SOURCE 0x8
+#define ISP1301_I2C_INTERRUPT_LATCH 0xA
+#define ISP1301_I2C_INTERRUPT_FALLING 0xC
+#define ISP1301_I2C_INTERRUPT_RISING 0xE
+
+#define INT_VBUS_VLD (1 << 0)
+#define INT_SESS_VLD (1 << 1)
+#define INT_DP_HI (1 << 2)
+#define INT_ID_GND (1 << 3)
+#define INT_DM_HI (1 << 4)
+#define INT_ID_FLOAT (1 << 5)
+#define INT_BDIS_ACON (1 << 6)
+#define INT_CR_INT (1 << 7)
+
+#define ISP1301_I2C_REG_CLEAR_ADDR 1 /* Register Address Modifier */
+
+struct i2c_client *isp1301_get_client(struct device_node *node);
+
+#endif /* __LINUX_USB_ISP1301_H */
diff --git a/include/linux/usb/isp1362.h b/include/linux/usb/isp1362.h
new file mode 100644
index 000000000..642684bb9
--- /dev/null
+++ b/include/linux/usb/isp1362.h
@@ -0,0 +1,46 @@
+/*
+ * board initialization code should put one of these into dev->platform_data
+ * and place the isp1362 onto platform_bus.
+ */
+
+#ifndef __LINUX_USB_ISP1362_H__
+#define __LINUX_USB_ISP1362_H__
+
+struct isp1362_platform_data {
+ /* Enable internal pulldown resistors on downstream ports */
+ unsigned sel15Kres:1;
+ /* Clock cannot be stopped */
+ unsigned clknotstop:1;
+ /* On-chip overcurrent protection */
+ unsigned oc_enable:1;
+ /* INT output polarity */
+ unsigned int_act_high:1;
+ /* INT edge or level triggered */
+ unsigned int_edge_triggered:1;
+ /* DREQ output polarity */
+ unsigned dreq_act_high:1;
+ /* DACK input polarity */
+ unsigned dack_act_high:1;
+ /* chip can be resumed via H_WAKEUP pin */
+ unsigned remote_wakeup_connected:1;
+ /* Switch or not to switch (keep always powered) */
+ unsigned no_power_switching:1;
+ /* Ganged port power switching (0) or individual port power switching (1) */
+ unsigned power_switching_mode:1;
+ /* Given port_power, msec/2 after power on till power good */
+ u8 potpg;
+ /* Hardware reset set/clear */
+ void (*reset) (struct device *dev, int set);
+ /* Clock start/stop */
+ void (*clock) (struct device *dev, int start);
+ /* Inter-io delay (ns). The chip is picky about access timings; it
+ * expects at least:
+ * 110ns delay between consecutive accesses to DATA_REG,
+ * 300ns delay between access to ADDR_REG and DATA_REG (registers)
+ * 462ns delay between access to ADDR_REG and DATA_REG (buffer memory)
+ * WE MUST NOT be activated during these intervals (even without CS!)
+ */
+ void (*delay) (struct device *dev, unsigned int delay);
+};
+
+#endif
diff --git a/include/linux/usb/isp1760.h b/include/linux/usb/isp1760.h
new file mode 100644
index 000000000..de7de53c5
--- /dev/null
+++ b/include/linux/usb/isp1760.h
@@ -0,0 +1,18 @@
+/*
+ * board initialization should put one of these into dev->platform_data
+ * and place the isp1760 onto platform_bus named "isp1760-hcd".
+ */
+
+#ifndef __LINUX_USB_ISP1760_H
+#define __LINUX_USB_ISP1760_H
+
+struct isp1760_platform_data {
+ unsigned is_isp1761:1; /* Chip is ISP1761 */
+ unsigned bus_width_16:1; /* 16/32-bit data bus width */
+ unsigned port1_otg:1; /* Port 1 supports OTG */
+ unsigned analog_oc:1; /* Analog overcurrent */
+ unsigned dack_polarity_high:1; /* DACK active high */
+ unsigned dreq_polarity_high:1; /* DREQ active high */
+};
+
+#endif /* __LINUX_USB_ISP1760_H */
diff --git a/include/linux/usb/m66592.h b/include/linux/usb/m66592.h
new file mode 100644
index 000000000..a4ba31ab2
--- /dev/null
+++ b/include/linux/usb/m66592.h
@@ -0,0 +1,46 @@
+/*
+ * M66592 driver platform data
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef __LINUX_USB_M66592_H
+#define __LINUX_USB_M66592_H
+
+#define M66592_PLATDATA_XTAL_12MHZ 0x01
+#define M66592_PLATDATA_XTAL_24MHZ 0x02
+#define M66592_PLATDATA_XTAL_48MHZ 0x03
+
+struct m66592_platdata {
+ /* one = on chip controller, zero = external controller */
+ unsigned on_chip:1;
+
+ /* one = big endian, zero = little endian */
+ unsigned endian:1;
+
+ /* (external controller only) M66592_PLATDATA_XTAL_nnMHZ */
+ unsigned xtal:2;
+
+ /* (external controller only) one = 3.3V, zero = 1.5V */
+ unsigned vif:1;
+
+ /* (external controller only) set one = WR0_N shorted to WR1_N */
+ unsigned wr0_shorted_to_wr1:1;
+};
+
+#endif /* __LINUX_USB_M66592_H */
+
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
new file mode 100644
index 000000000..7dbecf9a4
--- /dev/null
+++ b/include/linux/usb/msm_hsusb.h
@@ -0,0 +1,171 @@
+/* linux/include/asm-arm/arch-msm/hsusb.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_HSUSB_H
+#define __ASM_ARCH_MSM_HSUSB_H
+
+#include <linux/types.h>
+#include <linux/usb/otg.h>
+#include <linux/clk.h>
+
+/**
+ * OTG control
+ *
+ * OTG_NO_CONTROL Id/VBUS notifications not required. Useful in host
+ * only configuration.
+ * OTG_PHY_CONTROL Id/VBUS notifications comes form USB PHY.
+ * OTG_PMIC_CONTROL Id/VBUS notifications comes from PMIC hardware.
+ * OTG_USER_CONTROL Id/VBUS notifcations comes from User via sysfs.
+ *
+ */
+enum otg_control_type {
+ OTG_NO_CONTROL = 0,
+ OTG_PHY_CONTROL,
+ OTG_PMIC_CONTROL,
+ OTG_USER_CONTROL,
+};
+
+/**
+ * PHY used in
+ *
+ * INVALID_PHY Unsupported PHY
+ * CI_45NM_INTEGRATED_PHY Chipidea 45nm integrated PHY
+ * SNPS_28NM_INTEGRATED_PHY Synopsis 28nm integrated PHY
+ *
+ */
+enum msm_usb_phy_type {
+ INVALID_PHY = 0,
+ CI_45NM_INTEGRATED_PHY,
+ SNPS_28NM_INTEGRATED_PHY,
+};
+
+#define IDEV_CHG_MAX 1500
+#define IUNIT 100
+
+/**
+ * Different states involved in USB charger detection.
+ *
+ * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection
+ * process is not yet started.
+ * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact.
+ * USB_CHG_STATE_DCD_DONE Data pin contact is detected.
+ * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects
+ * between SDP and DCP/CDP).
+ * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects
+ * between DCP and CDP).
+ * USB_CHG_STATE_DETECTED USB charger type is determined.
+ *
+ */
+enum usb_chg_state {
+ USB_CHG_STATE_UNDEFINED = 0,
+ USB_CHG_STATE_WAIT_FOR_DCD,
+ USB_CHG_STATE_DCD_DONE,
+ USB_CHG_STATE_PRIMARY_DONE,
+ USB_CHG_STATE_SECONDARY_DONE,
+ USB_CHG_STATE_DETECTED,
+};
+
+/**
+ * USB charger types
+ *
+ * USB_INVALID_CHARGER Invalid USB charger.
+ * USB_SDP_CHARGER Standard downstream port. Refers to a downstream port
+ * on USB2.0 compliant host/hub.
+ * USB_DCP_CHARGER Dedicated charger port (AC charger/ Wall charger).
+ * USB_CDP_CHARGER Charging downstream port. Enumeration can happen and
+ * IDEV_CHG_MAX can be drawn irrespective of USB state.
+ *
+ */
+enum usb_chg_type {
+ USB_INVALID_CHARGER = 0,
+ USB_SDP_CHARGER,
+ USB_DCP_CHARGER,
+ USB_CDP_CHARGER,
+};
+
+/**
+ * struct msm_otg_platform_data - platform device data
+ * for msm_otg driver.
+ * @phy_init_seq: PHY configuration sequence values. Value of -1 is reserved as
+ * "do not overwrite default vaule at this address".
+ * @phy_init_sz: PHY configuration sequence size.
+ * @vbus_power: VBUS power on/off routine.
+ * @power_budget: VBUS power budget in mA (0 will be treated as 500mA).
+ * @mode: Supported mode (OTG/peripheral/host).
+ * @otg_control: OTG switch controlled by user/Id pin
+ */
+struct msm_otg_platform_data {
+ int *phy_init_seq;
+ int phy_init_sz;
+ void (*vbus_power)(bool on);
+ unsigned power_budget;
+ enum usb_dr_mode mode;
+ enum otg_control_type otg_control;
+ enum msm_usb_phy_type phy_type;
+ void (*setup_gpio)(enum usb_otg_state state);
+};
+
+/**
+ * struct msm_otg: OTG driver data. Shared by HCD and DCD.
+ * @otg: USB OTG Transceiver structure.
+ * @pdata: otg device platform data.
+ * @irq: IRQ number assigned for HSUSB controller.
+ * @clk: clock struct of usb_hs_clk.
+ * @pclk: clock struct of usb_hs_pclk.
+ * @core_clk: clock struct of usb_hs_core_clk.
+ * @regs: ioremapped register base address.
+ * @inputs: OTG state machine inputs(Id, SessValid etc).
+ * @sm_work: OTG state machine work.
+ * @in_lpm: indicates low power mode (LPM) state.
+ * @async_int: Async interrupt arrived.
+ * @cur_power: The amount of mA available from downstream port.
+ * @chg_work: Charger detection work.
+ * @chg_state: The state of charger detection process.
+ * @chg_type: The type of charger attached.
+ * @dcd_retires: The retry count used to track Data contact
+ * detection process.
+ */
+struct msm_otg {
+ struct usb_phy phy;
+ struct msm_otg_platform_data *pdata;
+ int irq;
+ struct clk *clk;
+ struct clk *pclk;
+ struct clk *core_clk;
+ void __iomem *regs;
+#define ID 0
+#define B_SESS_VLD 1
+ unsigned long inputs;
+ struct work_struct sm_work;
+ atomic_t in_lpm;
+ int async_int;
+ unsigned cur_power;
+ int phy_number;
+ struct delayed_work chg_work;
+ enum usb_chg_state chg_state;
+ enum usb_chg_type chg_type;
+ u8 dcd_retries;
+ struct regulator *v3p3;
+ struct regulator *v1p8;
+ struct regulator *vddcx;
+
+ struct reset_control *phy_rst;
+ struct reset_control *link_rst;
+ int vdd_levels[3];
+};
+
+#endif
diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h
new file mode 100644
index 000000000..a29f6030a
--- /dev/null
+++ b/include/linux/usb/msm_hsusb_hw.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_GADGET_MSM72K_UDC_H__
+#define __LINUX_USB_GADGET_MSM72K_UDC_H__
+
+/* USB phy selector - in TCSR address range */
+#define USB2_PHY_SEL 0xfd4ab000
+
+#define USB_AHBBURST (MSM_USB_BASE + 0x0090)
+#define USB_AHBMODE (MSM_USB_BASE + 0x0098)
+#define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */
+
+#define USB_USBCMD (MSM_USB_BASE + 0x0140)
+#define USB_PORTSC (MSM_USB_BASE + 0x0184)
+#define USB_OTGSC (MSM_USB_BASE + 0x01A4)
+#define USB_USBMODE (MSM_USB_BASE + 0x01A8)
+#define USB_PHY_CTRL (MSM_USB_BASE + 0x0240)
+#define USB_PHY_CTRL2 (MSM_USB_BASE + 0x0278)
+
+#define USBCMD_RESET 2
+#define USB_USBINTR (MSM_USB_BASE + 0x0148)
+
+#define PORTSC_PHCD (1 << 23) /* phy suspend mode */
+#define PORTSC_PTS_MASK (3 << 30)
+#define PORTSC_PTS_ULPI (2 << 30)
+#define PORTSC_PTS_SERIAL (3 << 30)
+
+#define USB_ULPI_VIEWPORT (MSM_USB_BASE + 0x0170)
+#define ULPI_RUN (1 << 30)
+#define ULPI_WRITE (1 << 29)
+#define ULPI_READ (0 << 29)
+#define ULPI_ADDR(n) (((n) & 255) << 16)
+#define ULPI_DATA(n) ((n) & 255)
+#define ULPI_DATA_READ(n) (((n) >> 8) & 255)
+
+/* synopsys 28nm phy registers */
+#define ULPI_PWR_CLK_MNG_REG 0x88
+#define OTG_COMP_DISABLE BIT(0)
+
+#define ASYNC_INTR_CTRL (1 << 29) /* Enable async interrupt */
+#define ULPI_STP_CTRL (1 << 30) /* Block communication with PHY */
+#define PHY_RETEN (1 << 1) /* PHY retention enable/disable */
+#define PHY_POR_ASSERT (1 << 0) /* USB2 28nm PHY POR ASSERT */
+
+/* OTG definitions */
+#define OTGSC_INTSTS_MASK (0x7f << 16)
+#define OTGSC_ID (1 << 8)
+#define OTGSC_BSV (1 << 11)
+#define OTGSC_IDIS (1 << 16)
+#define OTGSC_BSVIS (1 << 19)
+#define OTGSC_IDIE (1 << 24)
+#define OTGSC_BSVIE (1 << 27)
+
+#endif /* __LINUX_USB_GADGET_MSM72K_UDC_H__ */
diff --git a/include/linux/usb/musb-omap.h b/include/linux/usb/musb-omap.h
new file mode 100644
index 000000000..7774c5986
--- /dev/null
+++ b/include/linux/usb/musb-omap.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2011-2012 by Texas Instruments
+ *
+ * The Inventra Controller Driver for Linux is free software; you
+ * can redistribute it and/or modify it under the terms of the GNU
+ * General Public License version 2 as published by the Free Software
+ * Foundation.
+ */
+
+#ifndef __MUSB_OMAP_H__
+#define __MUSB_OMAP_H__
+
+enum omap_musb_vbus_id_status {
+ OMAP_MUSB_UNKNOWN = 0,
+ OMAP_MUSB_ID_GROUND,
+ OMAP_MUSB_ID_FLOAT,
+ OMAP_MUSB_VBUS_VALID,
+ OMAP_MUSB_VBUS_OFF,
+};
+
+#if (defined(CONFIG_USB_MUSB_OMAP2PLUS) || \
+ defined(CONFIG_USB_MUSB_OMAP2PLUS_MODULE))
+void omap_musb_mailbox(enum omap_musb_vbus_id_status status);
+#else
+static inline void omap_musb_mailbox(enum omap_musb_vbus_id_status status)
+{
+}
+#endif
+
+#endif /* __MUSB_OMAP_H__ */
diff --git a/include/linux/usb/musb-ux500.h b/include/linux/usb/musb-ux500.h
new file mode 100644
index 000000000..1e2c7130f
--- /dev/null
+++ b/include/linux/usb/musb-ux500.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2013 ST-Ericsson AB
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MUSB_UX500_H__
+#define __MUSB_UX500_H__
+
+enum ux500_musb_vbus_id_status {
+ UX500_MUSB_NONE = 0,
+ UX500_MUSB_VBUS,
+ UX500_MUSB_ID,
+ UX500_MUSB_CHARGER,
+ UX500_MUSB_ENUMERATED,
+ UX500_MUSB_RIDA,
+ UX500_MUSB_RIDB,
+ UX500_MUSB_RIDC,
+ UX500_MUSB_PREPARE,
+ UX500_MUSB_CLEAN,
+};
+
+#endif /* __MUSB_UX500_H__ */
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
new file mode 100644
index 000000000..a4ee1b582
--- /dev/null
+++ b/include/linux/usb/musb.h
@@ -0,0 +1,155 @@
+/*
+ * This is used to for host and peripheral modes of the driver for
+ * Inventra (Multidrop) Highspeed Dual-Role Controllers: (M)HDRC.
+ *
+ * Board initialization should put one of these into dev->platform_data,
+ * probably on some platform_device named "musb-hdrc". It encapsulates
+ * key configuration differences between boards.
+ */
+
+#ifndef __LINUX_USB_MUSB_H
+#define __LINUX_USB_MUSB_H
+
+/* The USB role is defined by the connector used on the board, so long as
+ * standards are being followed. (Developer boards sometimes won't.)
+ */
+enum musb_mode {
+ MUSB_UNDEFINED = 0,
+ MUSB_HOST, /* A or Mini-A connector */
+ MUSB_PERIPHERAL, /* B or Mini-B connector */
+ MUSB_OTG /* Mini-AB connector */
+};
+
+struct clk;
+
+enum musb_fifo_style {
+ FIFO_RXTX,
+ FIFO_TX,
+ FIFO_RX
+} __attribute__ ((packed));
+
+enum musb_buf_mode {
+ BUF_SINGLE,
+ BUF_DOUBLE
+} __attribute__ ((packed));
+
+struct musb_fifo_cfg {
+ u8 hw_ep_num;
+ enum musb_fifo_style style;
+ enum musb_buf_mode mode;
+ u16 maxpacket;
+};
+
+#define MUSB_EP_FIFO(ep, st, m, pkt) \
+{ \
+ .hw_ep_num = ep, \
+ .style = st, \
+ .mode = m, \
+ .maxpacket = pkt, \
+}
+
+#define MUSB_EP_FIFO_SINGLE(ep, st, pkt) \
+ MUSB_EP_FIFO(ep, st, BUF_SINGLE, pkt)
+
+#define MUSB_EP_FIFO_DOUBLE(ep, st, pkt) \
+ MUSB_EP_FIFO(ep, st, BUF_DOUBLE, pkt)
+
+struct musb_hdrc_eps_bits {
+ const char name[16];
+ u8 bits;
+};
+
+struct musb_hdrc_config {
+ struct musb_fifo_cfg *fifo_cfg; /* board fifo configuration */
+ unsigned fifo_cfg_size; /* size of the fifo configuration */
+
+ /* MUSB configuration-specific details */
+ unsigned multipoint:1; /* multipoint device */
+ unsigned dyn_fifo:1 __deprecated; /* supports dynamic fifo sizing */
+ unsigned soft_con:1 __deprecated; /* soft connect required */
+ unsigned utm_16:1 __deprecated; /* utm data witdh is 16 bits */
+ unsigned big_endian:1; /* true if CPU uses big-endian */
+ unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */
+ unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */
+ unsigned high_iso_tx:1; /* Tx ep required for HB iso */
+ unsigned high_iso_rx:1; /* Rx ep required for HD iso */
+ unsigned dma:1 __deprecated; /* supports DMA */
+ unsigned vendor_req:1 __deprecated; /* vendor registers required */
+
+ /* need to explicitly de-assert the port reset after resume? */
+ unsigned host_port_deassert_reset_at_resume:1;
+
+ u8 num_eps; /* number of endpoints _with_ ep0 */
+ u8 dma_channels __deprecated; /* number of dma channels */
+ u8 dyn_fifo_size; /* dynamic size in bytes */
+ u8 vendor_ctrl __deprecated; /* vendor control reg width */
+ u8 vendor_stat __deprecated; /* vendor status reg witdh */
+ u8 dma_req_chan __deprecated; /* bitmask for required dma channels */
+ u8 ram_bits; /* ram address size */
+
+ struct musb_hdrc_eps_bits *eps_bits __deprecated;
+#ifdef CONFIG_BLACKFIN
+ /* A GPIO controlling VRSEL in Blackfin */
+ unsigned int gpio_vrsel;
+ unsigned int gpio_vrsel_active;
+ /* musb CLKIN in Blackfin in MHZ */
+ unsigned char clkin;
+#endif
+
+};
+
+struct musb_hdrc_platform_data {
+ /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */
+ u8 mode;
+
+ /* for clk_get() */
+ const char *clock;
+
+ /* (HOST or OTG) switch VBUS on/off */
+ int (*set_vbus)(struct device *dev, int is_on);
+
+ /* (HOST or OTG) mA/2 power supplied on (default = 8mA) */
+ u8 power;
+
+ /* (PERIPHERAL) mA/2 max power consumed (default = 100mA) */
+ u8 min_power;
+
+ /* (HOST or OTG) msec/2 after VBUS on till power good */
+ u8 potpgt;
+
+ /* (HOST or OTG) program PHY for external Vbus */
+ unsigned extvbus:1;
+
+ /* Power the device on or off */
+ int (*set_power)(int state);
+
+ /* MUSB configuration-specific details */
+ struct musb_hdrc_config *config;
+
+ /* Architecture specific board data */
+ void *board_data;
+
+ /* Platform specific struct musb_ops pointer */
+ const void *platform_ops;
+};
+
+
+/* TUSB 6010 support */
+
+#define TUSB6010_OSCCLK_60 16667 /* psec/clk @ 60.0 MHz */
+#define TUSB6010_REFCLK_24 41667 /* psec/clk @ 24.0 MHz XI */
+#define TUSB6010_REFCLK_19 52083 /* psec/clk @ 19.2 MHz CLKIN */
+
+#ifdef CONFIG_ARCH_OMAP2
+
+extern int __init tusb6010_setup_interface(
+ struct musb_hdrc_platform_data *data,
+ unsigned ps_refclk, unsigned waitpin,
+ unsigned async_cs, unsigned sync_cs,
+ unsigned irq, unsigned dmachan);
+
+extern int tusb6010_platform_retime(unsigned is_refclk);
+
+#endif /* OMAP2 */
+
+#endif /* __LINUX_USB_MUSB_H */
diff --git a/include/linux/usb/net2280.h b/include/linux/usb/net2280.h
new file mode 100644
index 000000000..148b8fa5b
--- /dev/null
+++ b/include/linux/usb/net2280.h
@@ -0,0 +1,443 @@
+/*
+ * NetChip 2280 high/full speed USB device controller.
+ * Unlike many such controllers, this one talks PCI.
+ *
+ * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com)
+ * Copyright (C) 2003 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_USB_NET2280_H
+#define __LINUX_USB_NET2280_H
+
+/*-------------------------------------------------------------------------*/
+
+/* NET2280 MEMORY MAPPED REGISTERS
+ *
+ * The register layout came from the chip documentation, and the bit
+ * number definitions were extracted from chip specification.
+ *
+ * Use the shift operator ('<<') to build bit masks, with readl/writel
+ * to access the registers through PCI.
+ */
+
+/* main registers, BAR0 + 0x0000 */
+struct net2280_regs {
+ /* offset 0x0000 */
+ u32 devinit;
+#define LOCAL_CLOCK_FREQUENCY 8
+#define FORCE_PCI_RESET 7
+#define PCI_ID 6
+#define PCI_ENABLE 5
+#define FIFO_SOFT_RESET 4
+#define CFG_SOFT_RESET 3
+#define PCI_SOFT_RESET 2
+#define USB_SOFT_RESET 1
+#define M8051_RESET 0
+ u32 eectl;
+#define EEPROM_ADDRESS_WIDTH 23
+#define EEPROM_CHIP_SELECT_ACTIVE 22
+#define EEPROM_PRESENT 21
+#define EEPROM_VALID 20
+#define EEPROM_BUSY 19
+#define EEPROM_CHIP_SELECT_ENABLE 18
+#define EEPROM_BYTE_READ_START 17
+#define EEPROM_BYTE_WRITE_START 16
+#define EEPROM_READ_DATA 8
+#define EEPROM_WRITE_DATA 0
+ u32 eeclkfreq;
+ u32 _unused0;
+ /* offset 0x0010 */
+
+ u32 pciirqenb0; /* interrupt PCI master ... */
+#define SETUP_PACKET_INTERRUPT_ENABLE 7
+#define ENDPOINT_F_INTERRUPT_ENABLE 6
+#define ENDPOINT_E_INTERRUPT_ENABLE 5
+#define ENDPOINT_D_INTERRUPT_ENABLE 4
+#define ENDPOINT_C_INTERRUPT_ENABLE 3
+#define ENDPOINT_B_INTERRUPT_ENABLE 2
+#define ENDPOINT_A_INTERRUPT_ENABLE 1
+#define ENDPOINT_0_INTERRUPT_ENABLE 0
+ u32 pciirqenb1;
+#define PCI_INTERRUPT_ENABLE 31
+#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
+#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
+#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
+#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
+#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
+#define PCI_TARGET_ABORT_ASSERTED_INTERRUPT_ENABLE 18
+#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
+#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
+#define GPIO_INTERRUPT_ENABLE 13
+#define DMA_D_INTERRUPT_ENABLE 12
+#define DMA_C_INTERRUPT_ENABLE 11
+#define DMA_B_INTERRUPT_ENABLE 10
+#define DMA_A_INTERRUPT_ENABLE 9
+#define EEPROM_DONE_INTERRUPT_ENABLE 8
+#define VBUS_INTERRUPT_ENABLE 7
+#define CONTROL_STATUS_INTERRUPT_ENABLE 6
+#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
+#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
+#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
+#define RESUME_INTERRUPT_ENABLE 1
+#define SOF_INTERRUPT_ENABLE 0
+ u32 cpu_irqenb0; /* ... or onboard 8051 */
+#define SETUP_PACKET_INTERRUPT_ENABLE 7
+#define ENDPOINT_F_INTERRUPT_ENABLE 6
+#define ENDPOINT_E_INTERRUPT_ENABLE 5
+#define ENDPOINT_D_INTERRUPT_ENABLE 4
+#define ENDPOINT_C_INTERRUPT_ENABLE 3
+#define ENDPOINT_B_INTERRUPT_ENABLE 2
+#define ENDPOINT_A_INTERRUPT_ENABLE 1
+#define ENDPOINT_0_INTERRUPT_ENABLE 0
+ u32 cpu_irqenb1;
+#define CPU_INTERRUPT_ENABLE 31
+#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
+#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
+#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
+#define PCI_INTA_INTERRUPT_ENABLE 24
+#define PCI_PME_INTERRUPT_ENABLE 23
+#define PCI_SERR_INTERRUPT_ENABLE 22
+#define PCI_PERR_INTERRUPT_ENABLE 21
+#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
+#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
+#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
+#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
+#define GPIO_INTERRUPT_ENABLE 13
+#define DMA_D_INTERRUPT_ENABLE 12
+#define DMA_C_INTERRUPT_ENABLE 11
+#define DMA_B_INTERRUPT_ENABLE 10
+#define DMA_A_INTERRUPT_ENABLE 9
+#define EEPROM_DONE_INTERRUPT_ENABLE 8
+#define VBUS_INTERRUPT_ENABLE 7
+#define CONTROL_STATUS_INTERRUPT_ENABLE 6
+#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
+#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
+#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
+#define RESUME_INTERRUPT_ENABLE 1
+#define SOF_INTERRUPT_ENABLE 0
+
+ /* offset 0x0020 */
+ u32 _unused1;
+ u32 usbirqenb1;
+#define USB_INTERRUPT_ENABLE 31
+#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
+#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
+#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
+#define PCI_INTA_INTERRUPT_ENABLE 24
+#define PCI_PME_INTERRUPT_ENABLE 23
+#define PCI_SERR_INTERRUPT_ENABLE 22
+#define PCI_PERR_INTERRUPT_ENABLE 21
+#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
+#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
+#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
+#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
+#define GPIO_INTERRUPT_ENABLE 13
+#define DMA_D_INTERRUPT_ENABLE 12
+#define DMA_C_INTERRUPT_ENABLE 11
+#define DMA_B_INTERRUPT_ENABLE 10
+#define DMA_A_INTERRUPT_ENABLE 9
+#define EEPROM_DONE_INTERRUPT_ENABLE 8
+#define VBUS_INTERRUPT_ENABLE 7
+#define CONTROL_STATUS_INTERRUPT_ENABLE 6
+#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
+#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
+#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
+#define RESUME_INTERRUPT_ENABLE 1
+#define SOF_INTERRUPT_ENABLE 0
+ u32 irqstat0;
+#define INTA_ASSERTED 12
+#define SETUP_PACKET_INTERRUPT 7
+#define ENDPOINT_F_INTERRUPT 6
+#define ENDPOINT_E_INTERRUPT 5
+#define ENDPOINT_D_INTERRUPT 4
+#define ENDPOINT_C_INTERRUPT 3
+#define ENDPOINT_B_INTERRUPT 2
+#define ENDPOINT_A_INTERRUPT 1
+#define ENDPOINT_0_INTERRUPT 0
+ u32 irqstat1;
+#define POWER_STATE_CHANGE_INTERRUPT 27
+#define PCI_ARBITER_TIMEOUT_INTERRUPT 26
+#define PCI_PARITY_ERROR_INTERRUPT 25
+#define PCI_INTA_INTERRUPT 24
+#define PCI_PME_INTERRUPT 23
+#define PCI_SERR_INTERRUPT 22
+#define PCI_PERR_INTERRUPT 21
+#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT 20
+#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT 19
+#define PCI_RETRY_ABORT_INTERRUPT 17
+#define PCI_MASTER_CYCLE_DONE_INTERRUPT 16
+#define SOF_DOWN_INTERRUPT 14
+#define GPIO_INTERRUPT 13
+#define DMA_D_INTERRUPT 12
+#define DMA_C_INTERRUPT 11
+#define DMA_B_INTERRUPT 10
+#define DMA_A_INTERRUPT 9
+#define EEPROM_DONE_INTERRUPT 8
+#define VBUS_INTERRUPT 7
+#define CONTROL_STATUS_INTERRUPT 6
+#define ROOT_PORT_RESET_INTERRUPT 4
+#define SUSPEND_REQUEST_INTERRUPT 3
+#define SUSPEND_REQUEST_CHANGE_INTERRUPT 2
+#define RESUME_INTERRUPT 1
+#define SOF_INTERRUPT 0
+ /* offset 0x0030 */
+ u32 idxaddr;
+ u32 idxdata;
+ u32 fifoctl;
+#define PCI_BASE2_RANGE 16
+#define IGNORE_FIFO_AVAILABILITY 3
+#define PCI_BASE2_SELECT 2
+#define FIFO_CONFIGURATION_SELECT 0
+ u32 _unused2;
+ /* offset 0x0040 */
+ u32 memaddr;
+#define START 28
+#define DIRECTION 27
+#define FIFO_DIAGNOSTIC_SELECT 24
+#define MEMORY_ADDRESS 0
+ u32 memdata0;
+ u32 memdata1;
+ u32 _unused3;
+ /* offset 0x0050 */
+ u32 gpioctl;
+#define GPIO3_LED_SELECT 12
+#define GPIO3_INTERRUPT_ENABLE 11
+#define GPIO2_INTERRUPT_ENABLE 10
+#define GPIO1_INTERRUPT_ENABLE 9
+#define GPIO0_INTERRUPT_ENABLE 8
+#define GPIO3_OUTPUT_ENABLE 7
+#define GPIO2_OUTPUT_ENABLE 6
+#define GPIO1_OUTPUT_ENABLE 5
+#define GPIO0_OUTPUT_ENABLE 4
+#define GPIO3_DATA 3
+#define GPIO2_DATA 2
+#define GPIO1_DATA 1
+#define GPIO0_DATA 0
+ u32 gpiostat;
+#define GPIO3_INTERRUPT 3
+#define GPIO2_INTERRUPT 2
+#define GPIO1_INTERRUPT 1
+#define GPIO0_INTERRUPT 0
+} __attribute__ ((packed));
+
+/* usb control, BAR0 + 0x0080 */
+struct net2280_usb_regs {
+ /* offset 0x0080 */
+ u32 stdrsp;
+#define STALL_UNSUPPORTED_REQUESTS 31
+#define SET_TEST_MODE 16
+#define GET_OTHER_SPEED_CONFIGURATION 15
+#define GET_DEVICE_QUALIFIER 14
+#define SET_ADDRESS 13
+#define ENDPOINT_SET_CLEAR_HALT 12
+#define DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP 11
+#define GET_STRING_DESCRIPTOR_2 10
+#define GET_STRING_DESCRIPTOR_1 9
+#define GET_STRING_DESCRIPTOR_0 8
+#define GET_SET_INTERFACE 6
+#define GET_SET_CONFIGURATION 5
+#define GET_CONFIGURATION_DESCRIPTOR 4
+#define GET_DEVICE_DESCRIPTOR 3
+#define GET_ENDPOINT_STATUS 2
+#define GET_INTERFACE_STATUS 1
+#define GET_DEVICE_STATUS 0
+ u32 prodvendid;
+#define PRODUCT_ID 16
+#define VENDOR_ID 0
+ u32 relnum;
+ u32 usbctl;
+#define SERIAL_NUMBER_INDEX 16
+#define PRODUCT_ID_STRING_ENABLE 13
+#define VENDOR_ID_STRING_ENABLE 12
+#define USB_ROOT_PORT_WAKEUP_ENABLE 11
+#define VBUS_PIN 10
+#define TIMED_DISCONNECT 9
+#define SUSPEND_IMMEDIATELY 7
+#define SELF_POWERED_USB_DEVICE 6
+#define REMOTE_WAKEUP_SUPPORT 5
+#define PME_POLARITY 4
+#define USB_DETECT_ENABLE 3
+#define PME_WAKEUP_ENABLE 2
+#define DEVICE_REMOTE_WAKEUP_ENABLE 1
+#define SELF_POWERED_STATUS 0
+ /* offset 0x0090 */
+ u32 usbstat;
+#define HIGH_SPEED 7
+#define FULL_SPEED 6
+#define GENERATE_RESUME 5
+#define GENERATE_DEVICE_REMOTE_WAKEUP 4
+ u32 xcvrdiag;
+#define FORCE_HIGH_SPEED_MODE 31
+#define FORCE_FULL_SPEED_MODE 30
+#define USB_TEST_MODE 24
+#define LINE_STATE 16
+#define TRANSCEIVER_OPERATION_MODE 2
+#define TRANSCEIVER_SELECT 1
+#define TERMINATION_SELECT 0
+ u32 setup0123;
+ u32 setup4567;
+ /* offset 0x0090 */
+ u32 _unused0;
+ u32 ouraddr;
+#define FORCE_IMMEDIATE 7
+#define OUR_USB_ADDRESS 0
+ u32 ourconfig;
+} __attribute__ ((packed));
+
+/* pci control, BAR0 + 0x0100 */
+struct net2280_pci_regs {
+ /* offset 0x0100 */
+ u32 pcimstctl;
+#define PCI_ARBITER_PARK_SELECT 13
+#define PCI_MULTI LEVEL_ARBITER 12
+#define PCI_RETRY_ABORT_ENABLE 11
+#define DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE 10
+#define DMA_READ_MULTIPLE_ENABLE 9
+#define DMA_READ_LINE_ENABLE 8
+#define PCI_MASTER_COMMAND_SELECT 6
+#define MEM_READ_OR_WRITE 0
+#define IO_READ_OR_WRITE 1
+#define CFG_READ_OR_WRITE 2
+#define PCI_MASTER_START 5
+#define PCI_MASTER_READ_WRITE 4
+#define PCI_MASTER_WRITE 0
+#define PCI_MASTER_READ 1
+#define PCI_MASTER_BYTE_WRITE_ENABLES 0
+ u32 pcimstaddr;
+ u32 pcimstdata;
+ u32 pcimststat;
+#define PCI_ARBITER_CLEAR 2
+#define PCI_EXTERNAL_ARBITER 1
+#define PCI_HOST_MODE 0
+} __attribute__ ((packed));
+
+/* dma control, BAR0 + 0x0180 ... array of four structs like this,
+ * for channels 0..3. see also struct net2280_dma: descriptor
+ * that can be loaded into some of these registers.
+ */
+struct net2280_dma_regs { /* [11.7] */
+ /* offset 0x0180, 0x01a0, 0x01c0, 0x01e0, */
+ u32 dmactl;
+#define DMA_SCATTER_GATHER_DONE_INTERRUPT_ENABLE 25
+#define DMA_CLEAR_COUNT_ENABLE 21
+#define DESCRIPTOR_POLLING_RATE 19
+#define POLL_CONTINUOUS 0
+#define POLL_1_USEC 1
+#define POLL_100_USEC 2
+#define POLL_1_MSEC 3
+#define DMA_VALID_BIT_POLLING_ENABLE 18
+#define DMA_VALID_BIT_ENABLE 17
+#define DMA_SCATTER_GATHER_ENABLE 16
+#define DMA_OUT_AUTO_START_ENABLE 4
+#define DMA_PREEMPT_ENABLE 3
+#define DMA_FIFO_VALIDATE 2
+#define DMA_ENABLE 1
+#define DMA_ADDRESS_HOLD 0
+ u32 dmastat;
+#define DMA_ABORT_DONE_INTERRUPT 27
+#define DMA_SCATTER_GATHER_DONE_INTERRUPT 25
+#define DMA_TRANSACTION_DONE_INTERRUPT 24
+#define DMA_ABORT 1
+#define DMA_START 0
+ u32 _unused0[2];
+ /* offset 0x0190, 0x01b0, 0x01d0, 0x01f0, */
+ u32 dmacount;
+#define VALID_BIT 31
+#define DMA_DIRECTION 30
+#define DMA_DONE_INTERRUPT_ENABLE 29
+#define END_OF_CHAIN 28
+#define DMA_BYTE_COUNT_MASK ((1<<24)-1)
+#define DMA_BYTE_COUNT 0
+ u32 dmaaddr;
+ u32 dmadesc;
+ u32 _unused1;
+} __attribute__ ((packed));
+
+/* dedicated endpoint registers, BAR0 + 0x0200 */
+
+struct net2280_dep_regs { /* [11.8] */
+ /* offset 0x0200, 0x0210, 0x220, 0x230, 0x240 */
+ u32 dep_cfg;
+ /* offset 0x0204, 0x0214, 0x224, 0x234, 0x244 */
+ u32 dep_rsp;
+ u32 _unused[2];
+} __attribute__ ((packed));
+
+/* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs
+ * like this, for ep0 then the configurable endpoints A..F
+ * ep0 reserved for control; E and F have only 64 bytes of fifo
+ */
+struct net2280_ep_regs { /* [11.9] */
+ /* offset 0x0300, 0x0320, 0x0340, 0x0360, 0x0380, 0x03a0, 0x03c0 */
+ u32 ep_cfg;
+#define ENDPOINT_BYTE_COUNT 16
+#define ENDPOINT_ENABLE 10
+#define ENDPOINT_TYPE 8
+#define ENDPOINT_DIRECTION 7
+#define ENDPOINT_NUMBER 0
+ u32 ep_rsp;
+#define SET_NAK_OUT_PACKETS 15
+#define SET_EP_HIDE_STATUS_PHASE 14
+#define SET_EP_FORCE_CRC_ERROR 13
+#define SET_INTERRUPT_MODE 12
+#define SET_CONTROL_STATUS_PHASE_HANDSHAKE 11
+#define SET_NAK_OUT_PACKETS_MODE 10
+#define SET_ENDPOINT_TOGGLE 9
+#define SET_ENDPOINT_HALT 8
+#define CLEAR_NAK_OUT_PACKETS 7
+#define CLEAR_EP_HIDE_STATUS_PHASE 6
+#define CLEAR_EP_FORCE_CRC_ERROR 5
+#define CLEAR_INTERRUPT_MODE 4
+#define CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE 3
+#define CLEAR_NAK_OUT_PACKETS_MODE 2
+#define CLEAR_ENDPOINT_TOGGLE 1
+#define CLEAR_ENDPOINT_HALT 0
+ u32 ep_irqenb;
+#define SHORT_PACKET_OUT_DONE_INTERRUPT_ENABLE 6
+#define SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE 5
+#define DATA_PACKET_RECEIVED_INTERRUPT_ENABLE 3
+#define DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE 2
+#define DATA_OUT_PING_TOKEN_INTERRUPT_ENABLE 1
+#define DATA_IN_TOKEN_INTERRUPT_ENABLE 0
+ u32 ep_stat;
+#define FIFO_VALID_COUNT 24
+#define HIGH_BANDWIDTH_OUT_TRANSACTION_PID 22
+#define TIMEOUT 21
+#define USB_STALL_SENT 20
+#define USB_IN_NAK_SENT 19
+#define USB_IN_ACK_RCVD 18
+#define USB_OUT_PING_NAK_SENT 17
+#define USB_OUT_ACK_SENT 16
+#define FIFO_OVERFLOW 13
+#define FIFO_UNDERFLOW 12
+#define FIFO_FULL 11
+#define FIFO_EMPTY 10
+#define FIFO_FLUSH 9
+#define SHORT_PACKET_OUT_DONE_INTERRUPT 6
+#define SHORT_PACKET_TRANSFERRED_INTERRUPT 5
+#define NAK_OUT_PACKETS 4
+#define DATA_PACKET_RECEIVED_INTERRUPT 3
+#define DATA_PACKET_TRANSMITTED_INTERRUPT 2
+#define DATA_OUT_PING_TOKEN_INTERRUPT 1
+#define DATA_IN_TOKEN_INTERRUPT 0
+ /* offset 0x0310, 0x0330, 0x0350, 0x0370, 0x0390, 0x03b0, 0x03d0 */
+ u32 ep_avail;
+ u32 ep_data;
+ u32 _unused0[2];
+} __attribute__ ((packed));
+
+#endif /* __LINUX_USB_NET2280_H */
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
new file mode 100644
index 000000000..cfe0528cd
--- /dev/null
+++ b/include/linux/usb/of.h
@@ -0,0 +1,45 @@
+/*
+ * OF helpers for usb devices.
+ *
+ * This file is released under the GPLv2
+ */
+
+#ifndef __LINUX_USB_OF_H
+#define __LINUX_USB_OF_H
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/phy.h>
+
+#if IS_ENABLED(CONFIG_OF)
+enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np);
+enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np);
+bool of_usb_host_tpl_support(struct device_node *np);
+#else
+static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np)
+{
+ return USB_DR_MODE_UNKNOWN;
+}
+
+static inline enum usb_device_speed
+of_usb_get_maximum_speed(struct device_node *np)
+{
+ return USB_SPEED_UNKNOWN;
+}
+static inline bool of_usb_host_tpl_support(struct device_node *np)
+{
+ return false;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT)
+enum usb_phy_interface of_usb_get_phy_mode(struct device_node *np);
+#else
+static inline enum usb_phy_interface of_usb_get_phy_mode(struct device_node *np)
+{
+ return USBPHY_INTERFACE_MODE_UNKNOWN;
+}
+
+#endif
+
+#endif /* __LINUX_USB_OF_H */
diff --git a/include/linux/usb/ohci_pdriver.h b/include/linux/usb/ohci_pdriver.h
new file mode 100644
index 000000000..012f2b7eb
--- /dev/null
+++ b/include/linux/usb/ohci_pdriver.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2012 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __USB_CORE_OHCI_PDRIVER_H
+#define __USB_CORE_OHCI_PDRIVER_H
+
+/**
+ * struct usb_ohci_pdata - platform_data for generic ohci driver
+ *
+ * @big_endian_desc: BE descriptors
+ * @big_endian_mmio: BE registers
+ * @no_big_frame_no: no big endian frame_no shift
+ * @num_ports: number of ports
+ *
+ * These are general configuration options for the OHCI controller. All of
+ * these options are activating more or less workarounds for some hardware.
+ */
+struct usb_ohci_pdata {
+ unsigned big_endian_desc:1;
+ unsigned big_endian_mmio:1;
+ unsigned no_big_frame_no:1;
+ unsigned int num_ports;
+
+ /* Turn on all power and clocks */
+ int (*power_on)(struct platform_device *pdev);
+ /* Turn off all power and clocks */
+ void (*power_off)(struct platform_device *pdev);
+ /* Turn on only VBUS suspend power and hotplug detection,
+ * turn off everything else */
+ void (*power_suspend)(struct platform_device *pdev);
+};
+
+#endif /* __USB_CORE_OHCI_PDRIVER_H */
diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h
new file mode 100644
index 000000000..f728f1854
--- /dev/null
+++ b/include/linux/usb/otg-fsm.h
@@ -0,0 +1,246 @@
+/* Copyright (C) 2007,2008 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_USB_OTG_FSM_H
+#define __LINUX_USB_OTG_FSM_H
+
+#include <linux/mutex.h>
+#include <linux/errno.h>
+
+#undef VERBOSE
+
+#ifdef VERBOSE
+#define VDBG(fmt, args...) pr_debug("[%s] " fmt , \
+ __func__, ## args)
+#else
+#define VDBG(stuff...) do {} while (0)
+#endif
+
+#ifdef VERBOSE
+#define MPC_LOC printk("Current Location [%s]:[%d]\n", __FILE__, __LINE__)
+#else
+#define MPC_LOC do {} while (0)
+#endif
+
+#define PROTO_UNDEF (0)
+#define PROTO_HOST (1)
+#define PROTO_GADGET (2)
+
+enum otg_fsm_timer {
+ /* Standard OTG timers */
+ A_WAIT_VRISE,
+ A_WAIT_VFALL,
+ A_WAIT_BCON,
+ A_AIDL_BDIS,
+ B_ASE0_BRST,
+ A_BIDL_ADIS,
+
+ /* Auxiliary timers */
+ B_SE0_SRP,
+ B_SRP_FAIL,
+ A_WAIT_ENUM,
+ B_DATA_PLS,
+ B_SSEND_SRP,
+
+ NUM_OTG_FSM_TIMERS,
+};
+
+/* OTG state machine according to the OTG spec */
+struct otg_fsm {
+ /* Input */
+ int id;
+ int adp_change;
+ int power_up;
+ int test_device;
+ int a_bus_drop;
+ int a_bus_req;
+ int a_srp_det;
+ int a_vbus_vld;
+ int b_conn;
+ int a_bus_resume;
+ int a_bus_suspend;
+ int a_conn;
+ int b_bus_req;
+ int b_se0_srp;
+ int b_ssend_srp;
+ int b_sess_vld;
+ /* Auxilary inputs */
+ int a_sess_vld;
+ int b_bus_resume;
+ int b_bus_suspend;
+
+ /* Output */
+ int data_pulse;
+ int drv_vbus;
+ int loc_conn;
+ int loc_sof;
+ int adp_prb;
+ int adp_sns;
+
+ /* Internal variables */
+ int a_set_b_hnp_en;
+ int b_srp_done;
+ int b_hnp_enable;
+ int a_clr_err;
+
+ /* Informative variables */
+ int a_bus_drop_inf;
+ int a_bus_req_inf;
+ int a_clr_err_inf;
+ int b_bus_req_inf;
+ /* Auxilary informative variables */
+ int a_suspend_req_inf;
+
+ /* Timeout indicator for timers */
+ int a_wait_vrise_tmout;
+ int a_wait_vfall_tmout;
+ int a_wait_bcon_tmout;
+ int a_aidl_bdis_tmout;
+ int b_ase0_brst_tmout;
+ int a_bidl_adis_tmout;
+
+ struct otg_fsm_ops *ops;
+ struct usb_otg *otg;
+
+ /* Current usb protocol used: 0:undefine; 1:host; 2:client */
+ int protocol;
+ struct mutex lock;
+};
+
+struct otg_fsm_ops {
+ void (*chrg_vbus)(struct otg_fsm *fsm, int on);
+ void (*drv_vbus)(struct otg_fsm *fsm, int on);
+ void (*loc_conn)(struct otg_fsm *fsm, int on);
+ void (*loc_sof)(struct otg_fsm *fsm, int on);
+ void (*start_pulse)(struct otg_fsm *fsm);
+ void (*start_adp_prb)(struct otg_fsm *fsm);
+ void (*start_adp_sns)(struct otg_fsm *fsm);
+ void (*add_timer)(struct otg_fsm *fsm, enum otg_fsm_timer timer);
+ void (*del_timer)(struct otg_fsm *fsm, enum otg_fsm_timer timer);
+ int (*start_host)(struct otg_fsm *fsm, int on);
+ int (*start_gadget)(struct otg_fsm *fsm, int on);
+};
+
+
+static inline int otg_chrg_vbus(struct otg_fsm *fsm, int on)
+{
+ if (!fsm->ops->chrg_vbus)
+ return -EOPNOTSUPP;
+ fsm->ops->chrg_vbus(fsm, on);
+ return 0;
+}
+
+static inline int otg_drv_vbus(struct otg_fsm *fsm, int on)
+{
+ if (!fsm->ops->drv_vbus)
+ return -EOPNOTSUPP;
+ if (fsm->drv_vbus != on) {
+ fsm->drv_vbus = on;
+ fsm->ops->drv_vbus(fsm, on);
+ }
+ return 0;
+}
+
+static inline int otg_loc_conn(struct otg_fsm *fsm, int on)
+{
+ if (!fsm->ops->loc_conn)
+ return -EOPNOTSUPP;
+ if (fsm->loc_conn != on) {
+ fsm->loc_conn = on;
+ fsm->ops->loc_conn(fsm, on);
+ }
+ return 0;
+}
+
+static inline int otg_loc_sof(struct otg_fsm *fsm, int on)
+{
+ if (!fsm->ops->loc_sof)
+ return -EOPNOTSUPP;
+ if (fsm->loc_sof != on) {
+ fsm->loc_sof = on;
+ fsm->ops->loc_sof(fsm, on);
+ }
+ return 0;
+}
+
+static inline int otg_start_pulse(struct otg_fsm *fsm)
+{
+ if (!fsm->ops->start_pulse)
+ return -EOPNOTSUPP;
+ if (!fsm->data_pulse) {
+ fsm->data_pulse = 1;
+ fsm->ops->start_pulse(fsm);
+ }
+ return 0;
+}
+
+static inline int otg_start_adp_prb(struct otg_fsm *fsm)
+{
+ if (!fsm->ops->start_adp_prb)
+ return -EOPNOTSUPP;
+ if (!fsm->adp_prb) {
+ fsm->adp_sns = 0;
+ fsm->adp_prb = 1;
+ fsm->ops->start_adp_prb(fsm);
+ }
+ return 0;
+}
+
+static inline int otg_start_adp_sns(struct otg_fsm *fsm)
+{
+ if (!fsm->ops->start_adp_sns)
+ return -EOPNOTSUPP;
+ if (!fsm->adp_sns) {
+ fsm->adp_sns = 1;
+ fsm->ops->start_adp_sns(fsm);
+ }
+ return 0;
+}
+
+static inline int otg_add_timer(struct otg_fsm *fsm, enum otg_fsm_timer timer)
+{
+ if (!fsm->ops->add_timer)
+ return -EOPNOTSUPP;
+ fsm->ops->add_timer(fsm, timer);
+ return 0;
+}
+
+static inline int otg_del_timer(struct otg_fsm *fsm, enum otg_fsm_timer timer)
+{
+ if (!fsm->ops->del_timer)
+ return -EOPNOTSUPP;
+ fsm->ops->del_timer(fsm, timer);
+ return 0;
+}
+
+static inline int otg_start_host(struct otg_fsm *fsm, int on)
+{
+ if (!fsm->ops->start_host)
+ return -EOPNOTSUPP;
+ return fsm->ops->start_host(fsm, on);
+}
+
+static inline int otg_start_gadget(struct otg_fsm *fsm, int on)
+{
+ if (!fsm->ops->start_gadget)
+ return -EOPNOTSUPP;
+ return fsm->ops->start_gadget(fsm, on);
+}
+
+int otg_statemachine(struct otg_fsm *fsm);
+
+#endif /* __LINUX_USB_OTG_FSM_H */
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
new file mode 100644
index 000000000..52661c5da
--- /dev/null
+++ b/include/linux/usb/otg.h
@@ -0,0 +1,107 @@
+/* USB OTG (On The Go) defines */
+/*
+ *
+ * These APIs may be used between USB controllers. USB device drivers
+ * (for either host or peripheral roles) don't use these calls; they
+ * continue to use just usb_device and usb_gadget.
+ */
+
+#ifndef __LINUX_USB_OTG_H
+#define __LINUX_USB_OTG_H
+
+#include <linux/phy/phy.h>
+#include <linux/usb/phy.h>
+
+struct usb_otg {
+ u8 default_a;
+
+ struct phy *phy;
+ /* old usb_phy interface */
+ struct usb_phy *usb_phy;
+ struct usb_bus *host;
+ struct usb_gadget *gadget;
+
+ enum usb_otg_state state;
+
+ /* bind/unbind the host controller */
+ int (*set_host)(struct usb_otg *otg, struct usb_bus *host);
+
+ /* bind/unbind the peripheral controller */
+ int (*set_peripheral)(struct usb_otg *otg,
+ struct usb_gadget *gadget);
+
+ /* effective for A-peripheral, ignored for B devices */
+ int (*set_vbus)(struct usb_otg *otg, bool enabled);
+
+ /* for B devices only: start session with A-Host */
+ int (*start_srp)(struct usb_otg *otg);
+
+ /* start or continue HNP role switch */
+ int (*start_hnp)(struct usb_otg *otg);
+
+};
+
+extern const char *usb_otg_state_string(enum usb_otg_state state);
+
+/* Context: can sleep */
+static inline int
+otg_start_hnp(struct usb_otg *otg)
+{
+ if (otg && otg->start_hnp)
+ return otg->start_hnp(otg);
+
+ return -ENOTSUPP;
+}
+
+/* Context: can sleep */
+static inline int
+otg_set_vbus(struct usb_otg *otg, bool enabled)
+{
+ if (otg && otg->set_vbus)
+ return otg->set_vbus(otg, enabled);
+
+ return -ENOTSUPP;
+}
+
+/* for HCDs */
+static inline int
+otg_set_host(struct usb_otg *otg, struct usb_bus *host)
+{
+ if (otg && otg->set_host)
+ return otg->set_host(otg, host);
+
+ return -ENOTSUPP;
+}
+
+/* for usb peripheral controller drivers */
+
+/* Context: can sleep */
+static inline int
+otg_set_peripheral(struct usb_otg *otg, struct usb_gadget *periph)
+{
+ if (otg && otg->set_peripheral)
+ return otg->set_peripheral(otg, periph);
+
+ return -ENOTSUPP;
+}
+
+static inline int
+otg_start_srp(struct usb_otg *otg)
+{
+ if (otg && otg->start_srp)
+ return otg->start_srp(otg);
+
+ return -ENOTSUPP;
+}
+
+/* for OTG controller drivers (and maybe other stuff) */
+extern int usb_bus_start_enum(struct usb_bus *bus, unsigned port_num);
+
+enum usb_dr_mode {
+ USB_DR_MODE_UNKNOWN,
+ USB_DR_MODE_HOST,
+ USB_DR_MODE_PERIPHERAL,
+ USB_DR_MODE_OTG,
+};
+
+#endif /* __LINUX_USB_OTG_H */
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
new file mode 100644
index 000000000..bc91b5d38
--- /dev/null
+++ b/include/linux/usb/phy.h
@@ -0,0 +1,329 @@
+/*
+ * USB PHY defines
+ *
+ * These APIs may be used between USB controllers. USB device drivers
+ * (for either host or peripheral roles) don't use these calls; they
+ * continue to use just usb_device and usb_gadget.
+ */
+
+#ifndef __LINUX_USB_PHY_H
+#define __LINUX_USB_PHY_H
+
+#include <linux/notifier.h>
+#include <linux/usb.h>
+
+enum usb_phy_interface {
+ USBPHY_INTERFACE_MODE_UNKNOWN,
+ USBPHY_INTERFACE_MODE_UTMI,
+ USBPHY_INTERFACE_MODE_UTMIW,
+ USBPHY_INTERFACE_MODE_ULPI,
+ USBPHY_INTERFACE_MODE_SERIAL,
+ USBPHY_INTERFACE_MODE_HSIC,
+};
+
+enum usb_phy_events {
+ USB_EVENT_NONE, /* no events or cable disconnected */
+ USB_EVENT_VBUS, /* vbus valid event */
+ USB_EVENT_ID, /* id was grounded */
+ USB_EVENT_CHARGER, /* usb dedicated charger */
+ USB_EVENT_ENUMERATED, /* gadget driver enumerated */
+};
+
+/* associate a type with PHY */
+enum usb_phy_type {
+ USB_PHY_TYPE_UNDEFINED,
+ USB_PHY_TYPE_USB2,
+ USB_PHY_TYPE_USB3,
+};
+
+/* OTG defines lots of enumeration states before device reset */
+enum usb_otg_state {
+ OTG_STATE_UNDEFINED = 0,
+
+ /* single-role peripheral, and dual-role default-b */
+ OTG_STATE_B_IDLE,
+ OTG_STATE_B_SRP_INIT,
+ OTG_STATE_B_PERIPHERAL,
+
+ /* extra dual-role default-b states */
+ OTG_STATE_B_WAIT_ACON,
+ OTG_STATE_B_HOST,
+
+ /* dual-role default-a */
+ OTG_STATE_A_IDLE,
+ OTG_STATE_A_WAIT_VRISE,
+ OTG_STATE_A_WAIT_BCON,
+ OTG_STATE_A_HOST,
+ OTG_STATE_A_SUSPEND,
+ OTG_STATE_A_PERIPHERAL,
+ OTG_STATE_A_WAIT_VFALL,
+ OTG_STATE_A_VBUS_ERR,
+};
+
+struct usb_phy;
+struct usb_otg;
+
+/* for transceivers connected thru an ULPI interface, the user must
+ * provide access ops
+ */
+struct usb_phy_io_ops {
+ int (*read)(struct usb_phy *x, u32 reg);
+ int (*write)(struct usb_phy *x, u32 val, u32 reg);
+};
+
+struct usb_phy {
+ struct device *dev;
+ const char *label;
+ unsigned int flags;
+
+ enum usb_phy_type type;
+ enum usb_phy_events last_event;
+
+ struct usb_otg *otg;
+
+ struct device *io_dev;
+ struct usb_phy_io_ops *io_ops;
+ void __iomem *io_priv;
+
+ /* for notification of usb_phy_events */
+ struct atomic_notifier_head notifier;
+
+ /* to pass extra port status to the root hub */
+ u16 port_status;
+ u16 port_change;
+
+ /* to support controllers that have multiple transceivers */
+ struct list_head head;
+
+ /* initialize/shutdown the OTG controller */
+ int (*init)(struct usb_phy *x);
+ void (*shutdown)(struct usb_phy *x);
+
+ /* enable/disable VBUS */
+ int (*set_vbus)(struct usb_phy *x, int on);
+
+ /* effective for B devices, ignored for A-peripheral */
+ int (*set_power)(struct usb_phy *x,
+ unsigned mA);
+
+ /* Set transceiver into suspend mode */
+ int (*set_suspend)(struct usb_phy *x,
+ int suspend);
+
+ /*
+ * Set wakeup enable for PHY, in that case, the PHY can be
+ * woken up from suspend status due to external events,
+ * like vbus change, dp/dm change and id.
+ */
+ int (*set_wakeup)(struct usb_phy *x, bool enabled);
+
+ /* notify phy connect status change */
+ int (*notify_connect)(struct usb_phy *x,
+ enum usb_device_speed speed);
+ int (*notify_disconnect)(struct usb_phy *x,
+ enum usb_device_speed speed);
+};
+
+/**
+ * struct usb_phy_bind - represent the binding for the phy
+ * @dev_name: the device name of the device that will bind to the phy
+ * @phy_dev_name: the device name of the phy
+ * @index: used if a single controller uses multiple phys
+ * @phy: reference to the phy
+ * @list: to maintain a linked list of the binding information
+ */
+struct usb_phy_bind {
+ const char *dev_name;
+ const char *phy_dev_name;
+ u8 index;
+ struct usb_phy *phy;
+ struct list_head list;
+};
+
+/* for board-specific init logic */
+extern int usb_add_phy(struct usb_phy *, enum usb_phy_type type);
+extern int usb_add_phy_dev(struct usb_phy *);
+extern void usb_remove_phy(struct usb_phy *);
+
+/* helpers for direct access thru low-level io interface */
+static inline int usb_phy_io_read(struct usb_phy *x, u32 reg)
+{
+ if (x && x->io_ops && x->io_ops->read)
+ return x->io_ops->read(x, reg);
+
+ return -EINVAL;
+}
+
+static inline int usb_phy_io_write(struct usb_phy *x, u32 val, u32 reg)
+{
+ if (x && x->io_ops && x->io_ops->write)
+ return x->io_ops->write(x, val, reg);
+
+ return -EINVAL;
+}
+
+static inline int
+usb_phy_init(struct usb_phy *x)
+{
+ if (x && x->init)
+ return x->init(x);
+
+ return 0;
+}
+
+static inline void
+usb_phy_shutdown(struct usb_phy *x)
+{
+ if (x && x->shutdown)
+ x->shutdown(x);
+}
+
+static inline int
+usb_phy_vbus_on(struct usb_phy *x)
+{
+ if (!x || !x->set_vbus)
+ return 0;
+
+ return x->set_vbus(x, true);
+}
+
+static inline int
+usb_phy_vbus_off(struct usb_phy *x)
+{
+ if (!x || !x->set_vbus)
+ return 0;
+
+ return x->set_vbus(x, false);
+}
+
+/* for usb host and peripheral controller drivers */
+#if IS_ENABLED(CONFIG_USB_PHY)
+extern struct usb_phy *usb_get_phy(enum usb_phy_type type);
+extern struct usb_phy *devm_usb_get_phy(struct device *dev,
+ enum usb_phy_type type);
+extern struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index);
+extern struct usb_phy *devm_usb_get_phy_dev(struct device *dev, u8 index);
+extern struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
+ const char *phandle, u8 index);
+extern void usb_put_phy(struct usb_phy *);
+extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x);
+extern int usb_bind_phy(const char *dev_name, u8 index,
+ const char *phy_dev_name);
+extern void usb_phy_set_event(struct usb_phy *x, unsigned long event);
+#else
+static inline struct usb_phy *usb_get_phy(enum usb_phy_type type)
+{
+ return ERR_PTR(-ENXIO);
+}
+
+static inline struct usb_phy *devm_usb_get_phy(struct device *dev,
+ enum usb_phy_type type)
+{
+ return ERR_PTR(-ENXIO);
+}
+
+static inline struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index)
+{
+ return ERR_PTR(-ENXIO);
+}
+
+static inline struct usb_phy *devm_usb_get_phy_dev(struct device *dev, u8 index)
+{
+ return ERR_PTR(-ENXIO);
+}
+
+static inline struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
+ const char *phandle, u8 index)
+{
+ return ERR_PTR(-ENXIO);
+}
+
+static inline void usb_put_phy(struct usb_phy *x)
+{
+}
+
+static inline void devm_usb_put_phy(struct device *dev, struct usb_phy *x)
+{
+}
+
+static inline int usb_bind_phy(const char *dev_name, u8 index,
+ const char *phy_dev_name)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void usb_phy_set_event(struct usb_phy *x, unsigned long event)
+{
+}
+#endif
+
+static inline int
+usb_phy_set_power(struct usb_phy *x, unsigned mA)
+{
+ if (x && x->set_power)
+ return x->set_power(x, mA);
+ return 0;
+}
+
+/* Context: can sleep */
+static inline int
+usb_phy_set_suspend(struct usb_phy *x, int suspend)
+{
+ if (x && x->set_suspend != NULL)
+ return x->set_suspend(x, suspend);
+ else
+ return 0;
+}
+
+static inline int
+usb_phy_set_wakeup(struct usb_phy *x, bool enabled)
+{
+ if (x && x->set_wakeup)
+ return x->set_wakeup(x, enabled);
+ else
+ return 0;
+}
+
+static inline int
+usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed)
+{
+ if (x && x->notify_connect)
+ return x->notify_connect(x, speed);
+ else
+ return 0;
+}
+
+static inline int
+usb_phy_notify_disconnect(struct usb_phy *x, enum usb_device_speed speed)
+{
+ if (x && x->notify_disconnect)
+ return x->notify_disconnect(x, speed);
+ else
+ return 0;
+}
+
+/* notifiers */
+static inline int
+usb_register_notifier(struct usb_phy *x, struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&x->notifier, nb);
+}
+
+static inline void
+usb_unregister_notifier(struct usb_phy *x, struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&x->notifier, nb);
+}
+
+static inline const char *usb_phy_type_string(enum usb_phy_type type)
+{
+ switch (type) {
+ case USB_PHY_TYPE_USB2:
+ return "USB2 PHY";
+ case USB_PHY_TYPE_USB3:
+ return "USB3 PHY";
+ default:
+ return "UNKNOWN PHY TYPE";
+ }
+}
+#endif /* __LINUX_USB_PHY_H */
diff --git a/include/linux/usb/phy_companion.h b/include/linux/usb/phy_companion.h
new file mode 100644
index 000000000..edd2ec23d
--- /dev/null
+++ b/include/linux/usb/phy_companion.h
@@ -0,0 +1,34 @@
+/*
+ * phy-companion.h -- phy companion to indicate the comparator part of PHY
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_PHY_COMPANION_H
+#define __DRIVERS_PHY_COMPANION_H
+
+#include <linux/usb/otg.h>
+
+/* phy_companion to take care of VBUS, ID and srp capabilities */
+struct phy_companion {
+
+ /* effective for A-peripheral, ignored for B devices */
+ int (*set_vbus)(struct phy_companion *x, bool enabled);
+
+ /* for B devices only: start session with A-Host */
+ int (*start_srp)(struct phy_companion *x);
+};
+
+#endif /* __DRIVERS_PHY_COMPANION_H */
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
new file mode 100644
index 000000000..9948c874e
--- /dev/null
+++ b/include/linux/usb/quirks.h
@@ -0,0 +1,50 @@
+/*
+ * This file holds the definitions of quirks found in USB devices.
+ * Only quirks that affect the whole device, not an interface,
+ * belong here.
+ */
+
+#ifndef __LINUX_USB_QUIRKS_H
+#define __LINUX_USB_QUIRKS_H
+
+/* string descriptors must not be fetched using a 255-byte read */
+#define USB_QUIRK_STRING_FETCH_255 BIT(0)
+
+/* device can't resume correctly so reset it instead */
+#define USB_QUIRK_RESET_RESUME BIT(1)
+
+/* device can't handle Set-Interface requests */
+#define USB_QUIRK_NO_SET_INTF BIT(2)
+
+/* device can't handle its Configuration or Interface strings */
+#define USB_QUIRK_CONFIG_INTF_STRINGS BIT(3)
+
+/* device can't be reset(e.g morph devices), don't use reset */
+#define USB_QUIRK_RESET BIT(4)
+
+/* device has more interface descriptions than the bNumInterfaces count,
+ and can't handle talking to these interfaces */
+#define USB_QUIRK_HONOR_BNUMINTERFACES BIT(5)
+
+/* device needs a pause during initialization, after we read the device
+ descriptor */
+#define USB_QUIRK_DELAY_INIT BIT(6)
+
+/*
+ * For high speed and super speed interupt endpoints, the USB 2.0 and
+ * USB 3.0 spec require the interval in microframes
+ * (1 microframe = 125 microseconds) to be calculated as
+ * interval = 2 ^ (bInterval-1).
+ *
+ * Devices with this quirk report their bInterval as the result of this
+ * calculation instead of the exponent variable used in the calculation.
+ */
+#define USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL BIT(7)
+
+/* device can't handle device_qualifier descriptor requests */
+#define USB_QUIRK_DEVICE_QUALIFIER BIT(8)
+
+/* device generates spurious wakeup, ignore remote wakeup capability */
+#define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9)
+
+#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/usb/r8a66597.h b/include/linux/usb/r8a66597.h
new file mode 100644
index 000000000..55805f9dc
--- /dev/null
+++ b/include/linux/usb/r8a66597.h
@@ -0,0 +1,481 @@
+/*
+ * R8A66597 driver platform data
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ *
+ * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef __LINUX_USB_R8A66597_H
+#define __LINUX_USB_R8A66597_H
+
+#define R8A66597_PLATDATA_XTAL_12MHZ 0x01
+#define R8A66597_PLATDATA_XTAL_24MHZ 0x02
+#define R8A66597_PLATDATA_XTAL_48MHZ 0x03
+
+struct r8a66597_platdata {
+ /* This callback can control port power instead of DVSTCTR register. */
+ void (*port_power)(int port, int power);
+
+ /* This parameter is for BUSWAIT */
+ u16 buswait;
+
+ /* set one = on chip controller, set zero = external controller */
+ unsigned on_chip:1;
+
+ /* (external controller only) set R8A66597_PLATDATA_XTAL_nnMHZ */
+ unsigned xtal:2;
+
+ /* set one = 3.3V, set zero = 1.5V */
+ unsigned vif:1;
+
+ /* set one = big endian, set zero = little endian */
+ unsigned endian:1;
+
+ /* (external controller only) set one = WR0_N shorted to WR1_N */
+ unsigned wr0_shorted_to_wr1:1;
+
+ /* set one = using SUDMAC */
+ unsigned sudmac:1;
+};
+
+/* Register definitions */
+#define SYSCFG0 0x00
+#define SYSCFG1 0x02
+#define SYSSTS0 0x04
+#define SYSSTS1 0x06
+#define DVSTCTR0 0x08
+#define DVSTCTR1 0x0A
+#define TESTMODE 0x0C
+#define PINCFG 0x0E
+#define DMA0CFG 0x10
+#define DMA1CFG 0x12
+#define CFIFO 0x14
+#define D0FIFO 0x18
+#define D1FIFO 0x1C
+#define CFIFOSEL 0x20
+#define CFIFOCTR 0x22
+#define CFIFOSIE 0x24
+#define D0FIFOSEL 0x28
+#define D0FIFOCTR 0x2A
+#define D1FIFOSEL 0x2C
+#define D1FIFOCTR 0x2E
+#define INTENB0 0x30
+#define INTENB1 0x32
+#define INTENB2 0x34
+#define BRDYENB 0x36
+#define NRDYENB 0x38
+#define BEMPENB 0x3A
+#define SOFCFG 0x3C
+#define INTSTS0 0x40
+#define INTSTS1 0x42
+#define INTSTS2 0x44
+#define BRDYSTS 0x46
+#define NRDYSTS 0x48
+#define BEMPSTS 0x4A
+#define FRMNUM 0x4C
+#define UFRMNUM 0x4E
+#define USBADDR 0x50
+#define USBREQ 0x54
+#define USBVAL 0x56
+#define USBINDX 0x58
+#define USBLENG 0x5A
+#define DCPCFG 0x5C
+#define DCPMAXP 0x5E
+#define DCPCTR 0x60
+#define PIPESEL 0x64
+#define PIPECFG 0x68
+#define PIPEBUF 0x6A
+#define PIPEMAXP 0x6C
+#define PIPEPERI 0x6E
+#define PIPE1CTR 0x70
+#define PIPE2CTR 0x72
+#define PIPE3CTR 0x74
+#define PIPE4CTR 0x76
+#define PIPE5CTR 0x78
+#define PIPE6CTR 0x7A
+#define PIPE7CTR 0x7C
+#define PIPE8CTR 0x7E
+#define PIPE9CTR 0x80
+#define PIPE1TRE 0x90
+#define PIPE1TRN 0x92
+#define PIPE2TRE 0x94
+#define PIPE2TRN 0x96
+#define PIPE3TRE 0x98
+#define PIPE3TRN 0x9A
+#define PIPE4TRE 0x9C
+#define PIPE4TRN 0x9E
+#define PIPE5TRE 0xA0
+#define PIPE5TRN 0xA2
+#define DEVADD0 0xD0
+#define DEVADD1 0xD2
+#define DEVADD2 0xD4
+#define DEVADD3 0xD6
+#define DEVADD4 0xD8
+#define DEVADD5 0xDA
+#define DEVADD6 0xDC
+#define DEVADD7 0xDE
+#define DEVADD8 0xE0
+#define DEVADD9 0xE2
+#define DEVADDA 0xE4
+
+/* System Configuration Control Register */
+#define XTAL 0xC000 /* b15-14: Crystal selection */
+#define XTAL48 0x8000 /* 48MHz */
+#define XTAL24 0x4000 /* 24MHz */
+#define XTAL12 0x0000 /* 12MHz */
+#define XCKE 0x2000 /* b13: External clock enable */
+#define PLLC 0x0800 /* b11: PLL control */
+#define SCKE 0x0400 /* b10: USB clock enable */
+#define PCSDIS 0x0200 /* b9: not CS wakeup */
+#define LPSME 0x0100 /* b8: Low power sleep mode */
+#define HSE 0x0080 /* b7: Hi-speed enable */
+#define DCFM 0x0040 /* b6: Controller function select */
+#define DRPD 0x0020 /* b5: D+/- pull down control */
+#define DPRPU 0x0010 /* b4: D+ pull up control */
+#define USBE 0x0001 /* b0: USB module operation enable */
+
+/* System Configuration Status Register */
+#define OVCBIT 0x8000 /* b15-14: Over-current bit */
+#define OVCMON 0xC000 /* b15-14: Over-current monitor */
+#define SOFEA 0x0020 /* b5: SOF monitor */
+#define IDMON 0x0004 /* b3: ID-pin monitor */
+#define LNST 0x0003 /* b1-0: D+, D- line status */
+#define SE1 0x0003 /* SE1 */
+#define FS_KSTS 0x0002 /* Full-Speed K State */
+#define FS_JSTS 0x0001 /* Full-Speed J State */
+#define LS_JSTS 0x0002 /* Low-Speed J State */
+#define LS_KSTS 0x0001 /* Low-Speed K State */
+#define SE0 0x0000 /* SE0 */
+
+/* Device State Control Register */
+#define EXTLP0 0x0400 /* b10: External port */
+#define VBOUT 0x0200 /* b9: VBUS output */
+#define WKUP 0x0100 /* b8: Remote wakeup */
+#define RWUPE 0x0080 /* b7: Remote wakeup sense */
+#define USBRST 0x0040 /* b6: USB reset enable */
+#define RESUME 0x0020 /* b5: Resume enable */
+#define UACT 0x0010 /* b4: USB bus enable */
+#define RHST 0x0007 /* b1-0: Reset handshake status */
+#define HSPROC 0x0004 /* HS handshake is processing */
+#define HSMODE 0x0003 /* Hi-Speed mode */
+#define FSMODE 0x0002 /* Full-Speed mode */
+#define LSMODE 0x0001 /* Low-Speed mode */
+#define UNDECID 0x0000 /* Undecided */
+
+/* Test Mode Register */
+#define UTST 0x000F /* b3-0: Test select */
+#define H_TST_PACKET 0x000C /* HOST TEST Packet */
+#define H_TST_SE0_NAK 0x000B /* HOST TEST SE0 NAK */
+#define H_TST_K 0x000A /* HOST TEST K */
+#define H_TST_J 0x0009 /* HOST TEST J */
+#define H_TST_NORMAL 0x0000 /* HOST Normal Mode */
+#define P_TST_PACKET 0x0004 /* PERI TEST Packet */
+#define P_TST_SE0_NAK 0x0003 /* PERI TEST SE0 NAK */
+#define P_TST_K 0x0002 /* PERI TEST K */
+#define P_TST_J 0x0001 /* PERI TEST J */
+#define P_TST_NORMAL 0x0000 /* PERI Normal Mode */
+
+/* Data Pin Configuration Register */
+#define LDRV 0x8000 /* b15: Drive Current Adjust */
+#define VIF1 0x0000 /* VIF = 1.8V */
+#define VIF3 0x8000 /* VIF = 3.3V */
+#define INTA 0x0001 /* b1: USB INT-pin active */
+
+/* DMAx Pin Configuration Register */
+#define DREQA 0x4000 /* b14: Dreq active select */
+#define BURST 0x2000 /* b13: Burst mode */
+#define DACKA 0x0400 /* b10: Dack active select */
+#define DFORM 0x0380 /* b9-7: DMA mode select */
+#define CPU_ADR_RD_WR 0x0000 /* Address + RD/WR mode (CPU bus) */
+#define CPU_DACK_RD_WR 0x0100 /* DACK + RD/WR mode (CPU bus) */
+#define CPU_DACK_ONLY 0x0180 /* DACK only mode (CPU bus) */
+#define SPLIT_DACK_ONLY 0x0200 /* DACK only mode (SPLIT bus) */
+#define DENDA 0x0040 /* b6: Dend active select */
+#define PKTM 0x0020 /* b5: Packet mode */
+#define DENDE 0x0010 /* b4: Dend enable */
+#define OBUS 0x0004 /* b2: OUTbus mode */
+
+/* CFIFO/DxFIFO Port Select Register */
+#define RCNT 0x8000 /* b15: Read count mode */
+#define REW 0x4000 /* b14: Buffer rewind */
+#define DCLRM 0x2000 /* b13: DMA buffer clear mode */
+#define DREQE 0x1000 /* b12: DREQ output enable */
+#define MBW_8 0x0000 /* 8bit */
+#define MBW_16 0x0400 /* 16bit */
+#define MBW_32 0x0800 /* 32bit */
+#define BIGEND 0x0100 /* b8: Big endian mode */
+#define BYTE_LITTLE 0x0000 /* little dendian */
+#define BYTE_BIG 0x0100 /* big endifan */
+#define ISEL 0x0020 /* b5: DCP FIFO port direction select */
+#define CURPIPE 0x000F /* b2-0: PIPE select */
+
+/* CFIFO/DxFIFO Port Control Register */
+#define BVAL 0x8000 /* b15: Buffer valid flag */
+#define BCLR 0x4000 /* b14: Buffer clear */
+#define FRDY 0x2000 /* b13: FIFO ready */
+#define DTLN 0x0FFF /* b11-0: FIFO received data length */
+
+/* Interrupt Enable Register 0 */
+#define VBSE 0x8000 /* b15: VBUS interrupt */
+#define RSME 0x4000 /* b14: Resume interrupt */
+#define SOFE 0x2000 /* b13: Frame update interrupt */
+#define DVSE 0x1000 /* b12: Device state transition interrupt */
+#define CTRE 0x0800 /* b11: Control transfer stage transition interrupt */
+#define BEMPE 0x0400 /* b10: Buffer empty interrupt */
+#define NRDYE 0x0200 /* b9: Buffer not ready interrupt */
+#define BRDYE 0x0100 /* b8: Buffer ready interrupt */
+
+/* Interrupt Enable Register 1 */
+#define OVRCRE 0x8000 /* b15: Over-current interrupt */
+#define BCHGE 0x4000 /* b14: USB us chenge interrupt */
+#define DTCHE 0x1000 /* b12: Detach sense interrupt */
+#define ATTCHE 0x0800 /* b11: Attach sense interrupt */
+#define EOFERRE 0x0040 /* b6: EOF error interrupt */
+#define SIGNE 0x0020 /* b5: SETUP IGNORE interrupt */
+#define SACKE 0x0010 /* b4: SETUP ACK interrupt */
+
+/* BRDY Interrupt Enable/Status Register */
+#define BRDY9 0x0200 /* b9: PIPE9 */
+#define BRDY8 0x0100 /* b8: PIPE8 */
+#define BRDY7 0x0080 /* b7: PIPE7 */
+#define BRDY6 0x0040 /* b6: PIPE6 */
+#define BRDY5 0x0020 /* b5: PIPE5 */
+#define BRDY4 0x0010 /* b4: PIPE4 */
+#define BRDY3 0x0008 /* b3: PIPE3 */
+#define BRDY2 0x0004 /* b2: PIPE2 */
+#define BRDY1 0x0002 /* b1: PIPE1 */
+#define BRDY0 0x0001 /* b1: PIPE0 */
+
+/* NRDY Interrupt Enable/Status Register */
+#define NRDY9 0x0200 /* b9: PIPE9 */
+#define NRDY8 0x0100 /* b8: PIPE8 */
+#define NRDY7 0x0080 /* b7: PIPE7 */
+#define NRDY6 0x0040 /* b6: PIPE6 */
+#define NRDY5 0x0020 /* b5: PIPE5 */
+#define NRDY4 0x0010 /* b4: PIPE4 */
+#define NRDY3 0x0008 /* b3: PIPE3 */
+#define NRDY2 0x0004 /* b2: PIPE2 */
+#define NRDY1 0x0002 /* b1: PIPE1 */
+#define NRDY0 0x0001 /* b1: PIPE0 */
+
+/* BEMP Interrupt Enable/Status Register */
+#define BEMP9 0x0200 /* b9: PIPE9 */
+#define BEMP8 0x0100 /* b8: PIPE8 */
+#define BEMP7 0x0080 /* b7: PIPE7 */
+#define BEMP6 0x0040 /* b6: PIPE6 */
+#define BEMP5 0x0020 /* b5: PIPE5 */
+#define BEMP4 0x0010 /* b4: PIPE4 */
+#define BEMP3 0x0008 /* b3: PIPE3 */
+#define BEMP2 0x0004 /* b2: PIPE2 */
+#define BEMP1 0x0002 /* b1: PIPE1 */
+#define BEMP0 0x0001 /* b0: PIPE0 */
+
+/* SOF Pin Configuration Register */
+#define TRNENSEL 0x0100 /* b8: Select transaction enable period */
+#define BRDYM 0x0040 /* b6: BRDY clear timing */
+#define INTL 0x0020 /* b5: Interrupt sense select */
+#define EDGESTS 0x0010 /* b4: */
+#define SOFMODE 0x000C /* b3-2: SOF pin select */
+#define SOF_125US 0x0008 /* SOF OUT 125us Frame Signal */
+#define SOF_1MS 0x0004 /* SOF OUT 1ms Frame Signal */
+#define SOF_DISABLE 0x0000 /* SOF OUT Disable */
+
+/* Interrupt Status Register 0 */
+#define VBINT 0x8000 /* b15: VBUS interrupt */
+#define RESM 0x4000 /* b14: Resume interrupt */
+#define SOFR 0x2000 /* b13: SOF frame update interrupt */
+#define DVST 0x1000 /* b12: Device state transition interrupt */
+#define CTRT 0x0800 /* b11: Control transfer stage transition interrupt */
+#define BEMP 0x0400 /* b10: Buffer empty interrupt */
+#define NRDY 0x0200 /* b9: Buffer not ready interrupt */
+#define BRDY 0x0100 /* b8: Buffer ready interrupt */
+#define VBSTS 0x0080 /* b7: VBUS input port */
+#define DVSQ 0x0070 /* b6-4: Device state */
+#define DS_SPD_CNFG 0x0070 /* Suspend Configured */
+#define DS_SPD_ADDR 0x0060 /* Suspend Address */
+#define DS_SPD_DFLT 0x0050 /* Suspend Default */
+#define DS_SPD_POWR 0x0040 /* Suspend Powered */
+#define DS_SUSP 0x0040 /* Suspend */
+#define DS_CNFG 0x0030 /* Configured */
+#define DS_ADDS 0x0020 /* Address */
+#define DS_DFLT 0x0010 /* Default */
+#define DS_POWR 0x0000 /* Powered */
+#define DVSQS 0x0030 /* b5-4: Device state */
+#define VALID 0x0008 /* b3: Setup packet detected flag */
+#define CTSQ 0x0007 /* b2-0: Control transfer stage */
+#define CS_SQER 0x0006 /* Sequence error */
+#define CS_WRND 0x0005 /* Control write nodata status stage */
+#define CS_WRSS 0x0004 /* Control write status stage */
+#define CS_WRDS 0x0003 /* Control write data stage */
+#define CS_RDSS 0x0002 /* Control read status stage */
+#define CS_RDDS 0x0001 /* Control read data stage */
+#define CS_IDST 0x0000 /* Idle or setup stage */
+
+/* Interrupt Status Register 1 */
+#define OVRCR 0x8000 /* b15: Over-current interrupt */
+#define BCHG 0x4000 /* b14: USB bus chenge interrupt */
+#define DTCH 0x1000 /* b12: Detach sense interrupt */
+#define ATTCH 0x0800 /* b11: Attach sense interrupt */
+#define EOFERR 0x0040 /* b6: EOF-error interrupt */
+#define SIGN 0x0020 /* b5: Setup ignore interrupt */
+#define SACK 0x0010 /* b4: Setup acknowledge interrupt */
+
+/* Frame Number Register */
+#define OVRN 0x8000 /* b15: Overrun error */
+#define CRCE 0x4000 /* b14: Received data error */
+#define FRNM 0x07FF /* b10-0: Frame number */
+
+/* Micro Frame Number Register */
+#define UFRNM 0x0007 /* b2-0: Micro frame number */
+
+/* Default Control Pipe Maxpacket Size Register */
+/* Pipe Maxpacket Size Register */
+#define DEVSEL 0xF000 /* b15-14: Device address select */
+#define MAXP 0x007F /* b6-0: Maxpacket size of default control pipe */
+
+/* Default Control Pipe Control Register */
+#define BSTS 0x8000 /* b15: Buffer status */
+#define SUREQ 0x4000 /* b14: Send USB request */
+#define CSCLR 0x2000 /* b13: complete-split status clear */
+#define CSSTS 0x1000 /* b12: complete-split status */
+#define SUREQCLR 0x0800 /* b11: stop setup request */
+#define SQCLR 0x0100 /* b8: Sequence toggle bit clear */
+#define SQSET 0x0080 /* b7: Sequence toggle bit set */
+#define SQMON 0x0040 /* b6: Sequence toggle bit monitor */
+#define PBUSY 0x0020 /* b5: pipe busy */
+#define PINGE 0x0010 /* b4: ping enable */
+#define CCPL 0x0004 /* b2: Enable control transfer complete */
+#define PID 0x0003 /* b1-0: Response PID */
+#define PID_STALL11 0x0003 /* STALL */
+#define PID_STALL 0x0002 /* STALL */
+#define PID_BUF 0x0001 /* BUF */
+#define PID_NAK 0x0000 /* NAK */
+
+/* Pipe Window Select Register */
+#define PIPENM 0x0007 /* b2-0: Pipe select */
+
+/* Pipe Configuration Register */
+#define R8A66597_TYP 0xC000 /* b15-14: Transfer type */
+#define R8A66597_ISO 0xC000 /* Isochronous */
+#define R8A66597_INT 0x8000 /* Interrupt */
+#define R8A66597_BULK 0x4000 /* Bulk */
+#define R8A66597_BFRE 0x0400 /* b10: Buffer ready interrupt mode select */
+#define R8A66597_DBLB 0x0200 /* b9: Double buffer mode select */
+#define R8A66597_CNTMD 0x0100 /* b8: Continuous transfer mode select */
+#define R8A66597_SHTNAK 0x0080 /* b7: Transfer end NAK */
+#define R8A66597_DIR 0x0010 /* b4: Transfer direction select */
+#define R8A66597_EPNUM 0x000F /* b3-0: Eendpoint number select */
+
+/* Pipe Buffer Configuration Register */
+#define BUFSIZE 0x7C00 /* b14-10: Pipe buffer size */
+#define BUFNMB 0x007F /* b6-0: Pipe buffer number */
+#define PIPE0BUF 256
+#define PIPExBUF 64
+
+/* Pipe Maxpacket Size Register */
+#define MXPS 0x07FF /* b10-0: Maxpacket size */
+
+/* Pipe Cycle Configuration Register */
+#define IFIS 0x1000 /* b12: Isochronous in-buffer flush mode select */
+#define IITV 0x0007 /* b2-0: Isochronous interval */
+
+/* Pipex Control Register */
+#define BSTS 0x8000 /* b15: Buffer status */
+#define INBUFM 0x4000 /* b14: IN buffer monitor (Only for PIPE1 to 5) */
+#define CSCLR 0x2000 /* b13: complete-split status clear */
+#define CSSTS 0x1000 /* b12: complete-split status */
+#define ATREPM 0x0400 /* b10: Auto repeat mode */
+#define ACLRM 0x0200 /* b9: Out buffer auto clear mode */
+#define SQCLR 0x0100 /* b8: Sequence toggle bit clear */
+#define SQSET 0x0080 /* b7: Sequence toggle bit set */
+#define SQMON 0x0040 /* b6: Sequence toggle bit monitor */
+#define PBUSY 0x0020 /* b5: pipe busy */
+#define PID 0x0003 /* b1-0: Response PID */
+
+/* PIPExTRE */
+#define TRENB 0x0200 /* b9: Transaction counter enable */
+#define TRCLR 0x0100 /* b8: Transaction counter clear */
+
+/* PIPExTRN */
+#define TRNCNT 0xFFFF /* b15-0: Transaction counter */
+
+/* DEVADDx */
+#define UPPHUB 0x7800
+#define HUBPORT 0x0700
+#define USBSPD 0x00C0
+#define RTPORT 0x0001
+
+/* SUDMAC registers */
+#define CH0CFG 0x00
+#define CH1CFG 0x04
+#define CH0BA 0x10
+#define CH1BA 0x14
+#define CH0BBC 0x18
+#define CH1BBC 0x1C
+#define CH0CA 0x20
+#define CH1CA 0x24
+#define CH0CBC 0x28
+#define CH1CBC 0x2C
+#define CH0DEN 0x30
+#define CH1DEN 0x34
+#define DSTSCLR 0x38
+#define DBUFCTRL 0x3C
+#define DINTCTRL 0x40
+#define DINTSTS 0x44
+#define DINTSTSCLR 0x48
+#define CH0SHCTRL 0x50
+#define CH1SHCTRL 0x54
+
+/* SUDMAC Configuration Registers */
+#define SENDBUFM 0x1000 /* b12: Transmit Buffer Mode */
+#define RCVENDM 0x0100 /* b8: Receive Data Transfer End Mode */
+#define LBA_WAIT 0x0030 /* b5-4: Local Bus Access Wait */
+
+/* DMA Enable Registers */
+#define DEN 0x0001 /* b1: DMA Transfer Enable */
+
+/* DMA Status Clear Register */
+#define CH1STCLR 0x0002 /* b2: Ch1 DMA Status Clear */
+#define CH0STCLR 0x0001 /* b1: Ch0 DMA Status Clear */
+
+/* DMA Buffer Control Register */
+#define CH1BUFW 0x0200 /* b9: Ch1 DMA Buffer Data Transfer Enable */
+#define CH0BUFW 0x0100 /* b8: Ch0 DMA Buffer Data Transfer Enable */
+#define CH1BUFS 0x0002 /* b2: Ch1 DMA Buffer Data Status */
+#define CH0BUFS 0x0001 /* b1: Ch0 DMA Buffer Data Status */
+
+/* DMA Interrupt Control Register */
+#define CH1ERRE 0x0200 /* b9: Ch1 SHwy Res Err Detect Int Enable */
+#define CH0ERRE 0x0100 /* b8: Ch0 SHwy Res Err Detect Int Enable */
+#define CH1ENDE 0x0002 /* b2: Ch1 DMA Transfer End Int Enable */
+#define CH0ENDE 0x0001 /* b1: Ch0 DMA Transfer End Int Enable */
+
+/* DMA Interrupt Status Register */
+#define CH1ERRS 0x0200 /* b9: Ch1 SHwy Res Err Detect Int Status */
+#define CH0ERRS 0x0100 /* b8: Ch0 SHwy Res Err Detect Int Status */
+#define CH1ENDS 0x0002 /* b2: Ch1 DMA Transfer End Int Status */
+#define CH0ENDS 0x0001 /* b1: Ch0 DMA Transfer End Int Status */
+
+/* DMA Interrupt Status Clear Register */
+#define CH1ERRC 0x0200 /* b9: Ch1 SHwy Res Err Detect Int Stat Clear */
+#define CH0ERRC 0x0100 /* b8: Ch0 SHwy Res Err Detect Int Stat Clear */
+#define CH1ENDC 0x0002 /* b2: Ch1 DMA Transfer End Int Stat Clear */
+#define CH0ENDC 0x0001 /* b1: Ch0 DMA Transfer End Int Stat Clear */
+
+#endif /* __LINUX_USB_R8A66597_H */
+
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
new file mode 100644
index 000000000..f06529c14
--- /dev/null
+++ b/include/linux/usb/renesas_usbhs.h
@@ -0,0 +1,216 @@
+/*
+ * Renesas USB
+ *
+ * Copyright (C) 2011 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+#ifndef RENESAS_USB_H
+#define RENESAS_USB_H
+#include <linux/platform_device.h>
+#include <linux/usb/ch9.h>
+
+/*
+ * module type
+ *
+ * it will be return value from get_id
+ */
+enum {
+ USBHS_HOST = 0,
+ USBHS_GADGET,
+ USBHS_MAX,
+};
+
+/*
+ * callback functions table for driver
+ *
+ * These functions are called from platform for driver.
+ * Callback function's pointer will be set before
+ * renesas_usbhs_platform_callback :: hardware_init was called
+ */
+struct renesas_usbhs_driver_callback {
+ int (*notify_hotplug)(struct platform_device *pdev);
+};
+
+/*
+ * callback functions for platform
+ *
+ * These functions are called from driver for platform
+ */
+struct renesas_usbhs_platform_callback {
+
+ /*
+ * option:
+ *
+ * Hardware init function for platform.
+ * it is called when driver was probed.
+ */
+ int (*hardware_init)(struct platform_device *pdev);
+
+ /*
+ * option:
+ *
+ * Hardware exit function for platform.
+ * it is called when driver was removed
+ */
+ int (*hardware_exit)(struct platform_device *pdev);
+
+ /*
+ * option:
+ *
+ * for board specific clock control
+ */
+ int (*power_ctrl)(struct platform_device *pdev,
+ void __iomem *base, int enable);
+
+ /*
+ * option:
+ *
+ * Phy reset for platform
+ */
+ int (*phy_reset)(struct platform_device *pdev);
+
+ /*
+ * get USB ID function
+ * - USBHS_HOST
+ * - USBHS_GADGET
+ */
+ int (*get_id)(struct platform_device *pdev);
+
+ /*
+ * get VBUS status function.
+ */
+ int (*get_vbus)(struct platform_device *pdev);
+
+ /*
+ * option:
+ *
+ * VBUS control is needed for Host
+ */
+ int (*set_vbus)(struct platform_device *pdev, int enable);
+};
+
+/*
+ * parameters for renesas usbhs
+ *
+ * some register needs USB chip specific parameters.
+ * This struct show it to driver
+ */
+struct renesas_usbhs_driver_param {
+ /*
+ * pipe settings
+ */
+ u32 *pipe_type; /* array of USB_ENDPOINT_XFER_xxx (from ep0) */
+ int pipe_size; /* pipe_type array size */
+
+ /*
+ * option:
+ *
+ * for BUSWAIT :: BWAIT
+ * see
+ * renesas_usbhs/common.c :: usbhsc_set_buswait()
+ * */
+ int buswait_bwait;
+
+ /*
+ * option:
+ *
+ * delay time from notify_hotplug callback
+ */
+ int detection_delay; /* msec */
+
+ /*
+ * option:
+ *
+ * dma id for dmaengine
+ * The data transfer direction on D0FIFO/D1FIFO should be
+ * fixed for keeping consistency.
+ * So, the platform id settings will be..
+ * .d0_tx_id = xx_TX,
+ * .d1_rx_id = xx_RX,
+ * or
+ * .d1_tx_id = xx_TX,
+ * .d0_rx_id = xx_RX,
+ */
+ int d0_tx_id;
+ int d0_rx_id;
+ int d1_tx_id;
+ int d1_rx_id;
+ int d2_tx_id;
+ int d2_rx_id;
+ int d3_tx_id;
+ int d3_rx_id;
+
+ /*
+ * option:
+ *
+ * pio <--> dma border.
+ */
+ int pio_dma_border; /* default is 64byte */
+
+ u32 type;
+ u32 enable_gpio;
+
+ /*
+ * option:
+ */
+ u32 has_otg:1; /* for controlling PWEN/EXTLP */
+ u32 has_sudmac:1; /* for SUDMAC */
+ u32 has_usb_dmac:1; /* for USB-DMAC */
+#define USBHS_USB_DMAC_XFER_SIZE 32 /* hardcode the xfer size */
+};
+
+#define USBHS_TYPE_R8A7790 1
+#define USBHS_TYPE_R8A7791 2
+
+/*
+ * option:
+ *
+ * platform information for renesas_usbhs driver.
+ */
+struct renesas_usbhs_platform_info {
+ /*
+ * option:
+ *
+ * platform set these functions before
+ * call platform_add_devices if needed
+ */
+ struct renesas_usbhs_platform_callback platform_callback;
+
+ /*
+ * driver set these callback functions pointer.
+ * platform can use it on callback functions
+ */
+ struct renesas_usbhs_driver_callback driver_callback;
+
+ /*
+ * option:
+ *
+ * driver use these param for some register
+ */
+ struct renesas_usbhs_driver_param driver_param;
+};
+
+/*
+ * macro for platform
+ */
+#define renesas_usbhs_get_info(pdev)\
+ ((struct renesas_usbhs_platform_info *)(pdev)->dev.platform_data)
+
+#define renesas_usbhs_call_notify_hotplug(pdev) \
+ ({ \
+ struct renesas_usbhs_driver_callback *dc; \
+ dc = &(renesas_usbhs_get_info(pdev)->driver_callback); \
+ if (dc && dc->notify_hotplug) \
+ dc->notify_hotplug(pdev); \
+ })
+#endif /* RENESAS_USB_H */
diff --git a/include/linux/usb/rndis_host.h b/include/linux/usb/rndis_host.h
new file mode 100644
index 000000000..d44ef85db
--- /dev/null
+++ b/include/linux/usb/rndis_host.h
@@ -0,0 +1,210 @@
+/*
+ * Host Side support for RNDIS Networking Links
+ * Copyright (C) 2005 by David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_USB_RNDIS_HOST_H
+#define __LINUX_USB_RNDIS_HOST_H
+
+#include <linux/rndis.h>
+
+/*
+ * CONTROL uses CDC "encapsulated commands" with funky notifications.
+ * - control-out: SEND_ENCAPSULATED
+ * - interrupt-in: RESPONSE_AVAILABLE
+ * - control-in: GET_ENCAPSULATED
+ *
+ * We'll try to ignore the RESPONSE_AVAILABLE notifications.
+ *
+ * REVISIT some RNDIS implementations seem to have curious issues still
+ * to be resolved.
+ */
+struct rndis_msg_hdr {
+ __le32 msg_type; /* RNDIS_MSG_* */
+ __le32 msg_len;
+ /* followed by data that varies between messages */
+ __le32 request_id;
+ __le32 status;
+ /* ... and more */
+} __attribute__ ((packed));
+
+/* MS-Windows uses this strange size, but RNDIS spec says 1024 minimum */
+#define CONTROL_BUFFER_SIZE 1025
+
+/* RNDIS defines an (absurdly huge) 10 second control timeout,
+ * but ActiveSync seems to use a more usual 5 second timeout
+ * (which matches the USB 2.0 spec).
+ */
+#define RNDIS_CONTROL_TIMEOUT_MS (5 * 1000)
+
+struct rndis_data_hdr {
+ __le32 msg_type; /* RNDIS_MSG_PACKET */
+ __le32 msg_len; /* rndis_data_hdr + data_len + pad */
+ __le32 data_offset; /* 36 -- right after header */
+ __le32 data_len; /* ... real packet size */
+
+ __le32 oob_data_offset; /* zero */
+ __le32 oob_data_len; /* zero */
+ __le32 num_oob; /* zero */
+ __le32 packet_data_offset; /* zero */
+
+ __le32 packet_data_len; /* zero */
+ __le32 vc_handle; /* zero */
+ __le32 reserved; /* zero */
+} __attribute__ ((packed));
+
+struct rndis_init { /* OUT */
+ /* header and: */
+ __le32 msg_type; /* RNDIS_MSG_INIT */
+ __le32 msg_len; /* 24 */
+ __le32 request_id;
+ __le32 major_version; /* of rndis (1.0) */
+ __le32 minor_version;
+ __le32 max_transfer_size;
+} __attribute__ ((packed));
+
+struct rndis_init_c { /* IN */
+ /* header and: */
+ __le32 msg_type; /* RNDIS_MSG_INIT_C */
+ __le32 msg_len;
+ __le32 request_id;
+ __le32 status;
+ __le32 major_version; /* of rndis (1.0) */
+ __le32 minor_version;
+ __le32 device_flags;
+ __le32 medium; /* zero == 802.3 */
+ __le32 max_packets_per_message;
+ __le32 max_transfer_size;
+ __le32 packet_alignment; /* max 7; (1<<n) bytes */
+ __le32 af_list_offset; /* zero */
+ __le32 af_list_size; /* zero */
+} __attribute__ ((packed));
+
+struct rndis_halt { /* OUT (no reply) */
+ /* header and: */
+ __le32 msg_type; /* RNDIS_MSG_HALT */
+ __le32 msg_len;
+ __le32 request_id;
+} __attribute__ ((packed));
+
+struct rndis_query { /* OUT */
+ /* header and: */
+ __le32 msg_type; /* RNDIS_MSG_QUERY */
+ __le32 msg_len;
+ __le32 request_id;
+ __le32 oid;
+ __le32 len;
+ __le32 offset;
+/*?*/ __le32 handle; /* zero */
+} __attribute__ ((packed));
+
+struct rndis_query_c { /* IN */
+ /* header and: */
+ __le32 msg_type; /* RNDIS_MSG_QUERY_C */
+ __le32 msg_len;
+ __le32 request_id;
+ __le32 status;
+ __le32 len;
+ __le32 offset;
+} __attribute__ ((packed));
+
+struct rndis_set { /* OUT */
+ /* header and: */
+ __le32 msg_type; /* RNDIS_MSG_SET */
+ __le32 msg_len;
+ __le32 request_id;
+ __le32 oid;
+ __le32 len;
+ __le32 offset;
+/*?*/ __le32 handle; /* zero */
+} __attribute__ ((packed));
+
+struct rndis_set_c { /* IN */
+ /* header and: */
+ __le32 msg_type; /* RNDIS_MSG_SET_C */
+ __le32 msg_len;
+ __le32 request_id;
+ __le32 status;
+} __attribute__ ((packed));
+
+struct rndis_reset { /* IN */
+ /* header and: */
+ __le32 msg_type; /* RNDIS_MSG_RESET */
+ __le32 msg_len;
+ __le32 reserved;
+} __attribute__ ((packed));
+
+struct rndis_reset_c { /* OUT */
+ /* header and: */
+ __le32 msg_type; /* RNDIS_MSG_RESET_C */
+ __le32 msg_len;
+ __le32 status;
+ __le32 addressing_lost;
+} __attribute__ ((packed));
+
+struct rndis_indicate { /* IN (unrequested) */
+ /* header and: */
+ __le32 msg_type; /* RNDIS_MSG_INDICATE */
+ __le32 msg_len;
+ __le32 status;
+ __le32 length;
+ __le32 offset;
+/**/ __le32 diag_status;
+ __le32 error_offset;
+/**/ __le32 message;
+} __attribute__ ((packed));
+
+struct rndis_keepalive { /* OUT (optionally IN) */
+ /* header and: */
+ __le32 msg_type; /* RNDIS_MSG_KEEPALIVE */
+ __le32 msg_len;
+ __le32 request_id;
+} __attribute__ ((packed));
+
+struct rndis_keepalive_c { /* IN (optionally OUT) */
+ /* header and: */
+ __le32 msg_type; /* RNDIS_MSG_KEEPALIVE_C */
+ __le32 msg_len;
+ __le32 request_id;
+ __le32 status;
+} __attribute__ ((packed));
+
+/* default filter used with RNDIS devices */
+#define RNDIS_DEFAULT_FILTER ( \
+ RNDIS_PACKET_TYPE_DIRECTED | \
+ RNDIS_PACKET_TYPE_BROADCAST | \
+ RNDIS_PACKET_TYPE_ALL_MULTICAST | \
+ RNDIS_PACKET_TYPE_PROMISCUOUS)
+
+/* Flags to require specific physical medium type for generic_rndis_bind() */
+#define FLAG_RNDIS_PHYM_NOT_WIRELESS 0x0001
+#define FLAG_RNDIS_PHYM_WIRELESS 0x0002
+
+/* Flags for driver_info::data */
+#define RNDIS_DRIVER_DATA_POLL_STATUS 1 /* poll status before control */
+
+extern void rndis_status(struct usbnet *dev, struct urb *urb);
+extern int
+rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen);
+extern int
+generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags);
+extern void rndis_unbind(struct usbnet *dev, struct usb_interface *intf);
+extern int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb);
+extern struct sk_buff *
+rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags);
+
+#endif /* __LINUX_USB_RNDIS_HOST_H */
diff --git a/include/linux/usb/samsung_usb_phy.h b/include/linux/usb/samsung_usb_phy.h
new file mode 100644
index 000000000..916782699
--- /dev/null
+++ b/include/linux/usb/samsung_usb_phy.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * http://www.samsung.com/
+ *
+ * Defines phy types for samsung usb phy controllers - HOST or DEIVCE.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+enum samsung_usb_phy_type {
+ USB_PHY_TYPE_DEVICE,
+ USB_PHY_TYPE_HOST,
+};
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
new file mode 100644
index 000000000..704a1ab82
--- /dev/null
+++ b/include/linux/usb/serial.h
@@ -0,0 +1,414 @@
+/*
+ * USB Serial Converter stuff
+ *
+ * Copyright (C) 1999 - 2012
+ * Greg Kroah-Hartman (greg@kroah.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ */
+
+#ifndef __LINUX_USB_SERIAL_H
+#define __LINUX_USB_SERIAL_H
+
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/serial.h>
+#include <linux/sysrq.h>
+#include <linux/kfifo.h>
+
+/* The maximum number of ports one device can grab at once */
+#define MAX_NUM_PORTS 8
+
+/* parity check flag */
+#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
+
+/* USB serial flags */
+#define USB_SERIAL_WRITE_BUSY 0
+
+/**
+ * usb_serial_port: structure for the specific ports of a device.
+ * @serial: pointer back to the struct usb_serial owner of this port.
+ * @port: pointer to the corresponding tty_port for this port.
+ * @lock: spinlock to grab when updating portions of this structure.
+ * @minor: the minor number of the port
+ * @port_number: the struct usb_serial port number of this port (starts at 0)
+ * @interrupt_in_buffer: pointer to the interrupt in buffer for this port.
+ * @interrupt_in_urb: pointer to the interrupt in struct urb for this port.
+ * @interrupt_in_endpointAddress: endpoint address for the interrupt in pipe
+ * for this port.
+ * @interrupt_out_buffer: pointer to the interrupt out buffer for this port.
+ * @interrupt_out_size: the size of the interrupt_out_buffer, in bytes.
+ * @interrupt_out_urb: pointer to the interrupt out struct urb for this port.
+ * @interrupt_out_endpointAddress: endpoint address for the interrupt out pipe
+ * for this port.
+ * @bulk_in_buffer: pointer to the bulk in buffer for this port.
+ * @bulk_in_size: the size of the bulk_in_buffer, in bytes.
+ * @read_urb: pointer to the bulk in struct urb for this port.
+ * @bulk_in_endpointAddress: endpoint address for the bulk in pipe for this
+ * port.
+ * @bulk_in_buffers: pointers to the bulk in buffers for this port
+ * @read_urbs: pointers to the bulk in urbs for this port
+ * @read_urbs_free: status bitmap the for bulk in urbs
+ * @bulk_out_buffer: pointer to the bulk out buffer for this port.
+ * @bulk_out_size: the size of the bulk_out_buffer, in bytes.
+ * @write_urb: pointer to the bulk out struct urb for this port.
+ * @write_fifo: kfifo used to buffer outgoing data
+ * @bulk_out_buffers: pointers to the bulk out buffers for this port
+ * @write_urbs: pointers to the bulk out urbs for this port
+ * @write_urbs_free: status bitmap the for bulk out urbs
+ * @icount: interrupt counters
+ * @tx_bytes: number of bytes currently in host stack queues
+ * @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this
+ * port.
+ * @flags: usb serial port flags
+ * @write_wait: a wait_queue_head_t used by the port.
+ * @work: work queue entry for the line discipline waking up.
+ * @throttled: nonzero if the read urb is inactive to throttle the device
+ * @throttle_req: nonzero if the tty wants to throttle us
+ * @dev: pointer to the serial device
+ *
+ * This structure is used by the usb-serial core and drivers for the specific
+ * ports of a device.
+ */
+struct usb_serial_port {
+ struct usb_serial *serial;
+ struct tty_port port;
+ spinlock_t lock;
+ u32 minor;
+ u8 port_number;
+
+ unsigned char *interrupt_in_buffer;
+ struct urb *interrupt_in_urb;
+ __u8 interrupt_in_endpointAddress;
+
+ unsigned char *interrupt_out_buffer;
+ int interrupt_out_size;
+ struct urb *interrupt_out_urb;
+ __u8 interrupt_out_endpointAddress;
+
+ unsigned char *bulk_in_buffer;
+ int bulk_in_size;
+ struct urb *read_urb;
+ __u8 bulk_in_endpointAddress;
+
+ unsigned char *bulk_in_buffers[2];
+ struct urb *read_urbs[2];
+ unsigned long read_urbs_free;
+
+ unsigned char *bulk_out_buffer;
+ int bulk_out_size;
+ struct urb *write_urb;
+ struct kfifo write_fifo;
+
+ unsigned char *bulk_out_buffers[2];
+ struct urb *write_urbs[2];
+ unsigned long write_urbs_free;
+ __u8 bulk_out_endpointAddress;
+
+ struct async_icount icount;
+ int tx_bytes;
+
+ unsigned long flags;
+ wait_queue_head_t write_wait;
+ struct work_struct work;
+ char throttled;
+ char throttle_req;
+ unsigned long sysrq; /* sysrq timeout */
+ struct device dev;
+};
+#define to_usb_serial_port(d) container_of(d, struct usb_serial_port, dev)
+
+/* get and set the port private data pointer helper functions */
+static inline void *usb_get_serial_port_data(struct usb_serial_port *port)
+{
+ return dev_get_drvdata(&port->dev);
+}
+
+static inline void usb_set_serial_port_data(struct usb_serial_port *port,
+ void *data)
+{
+ dev_set_drvdata(&port->dev, data);
+}
+
+/**
+ * usb_serial - structure used by the usb-serial core for a device
+ * @dev: pointer to the struct usb_device for this device
+ * @type: pointer to the struct usb_serial_driver for this device
+ * @interface: pointer to the struct usb_interface for this device
+ * @num_ports: the number of ports this device has
+ * @num_interrupt_in: number of interrupt in endpoints we have
+ * @num_interrupt_out: number of interrupt out endpoints we have
+ * @num_bulk_in: number of bulk in endpoints we have
+ * @num_bulk_out: number of bulk out endpoints we have
+ * @port: array of struct usb_serial_port structures for the different ports.
+ * @private: place to put any driver specific information that is needed. The
+ * usb-serial driver is required to manage this data, the usb-serial core
+ * will not touch this. Use usb_get_serial_data() and
+ * usb_set_serial_data() to access this.
+ */
+struct usb_serial {
+ struct usb_device *dev;
+ struct usb_serial_driver *type;
+ struct usb_interface *interface;
+ unsigned char disconnected:1;
+ unsigned char suspending:1;
+ unsigned char attached:1;
+ unsigned char minors_reserved:1;
+ unsigned char num_ports;
+ unsigned char num_port_pointers;
+ char num_interrupt_in;
+ char num_interrupt_out;
+ char num_bulk_in;
+ char num_bulk_out;
+ struct usb_serial_port *port[MAX_NUM_PORTS];
+ struct kref kref;
+ struct mutex disc_mutex;
+ void *private;
+};
+#define to_usb_serial(d) container_of(d, struct usb_serial, kref)
+
+/* get and set the serial private data pointer helper functions */
+static inline void *usb_get_serial_data(struct usb_serial *serial)
+{
+ return serial->private;
+}
+
+static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
+{
+ serial->private = data;
+}
+
+/**
+ * usb_serial_driver - describes a usb serial driver
+ * @description: pointer to a string that describes this driver. This string
+ * used in the syslog messages when a device is inserted or removed.
+ * @id_table: pointer to a list of usb_device_id structures that define all
+ * of the devices this structure can support.
+ * @num_ports: the number of different ports this device will have.
+ * @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer
+ * (0 = end-point size)
+ * @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size)
+ * @calc_num_ports: pointer to a function to determine how many ports this
+ * device has dynamically. It will be called after the probe()
+ * callback is called, but before attach()
+ * @probe: pointer to the driver's probe function.
+ * This will be called when the device is inserted into the system,
+ * but before the device has been fully initialized by the usb_serial
+ * subsystem. Use this function to download any firmware to the device,
+ * or any other early initialization that might be needed.
+ * Return 0 to continue on with the initialization sequence. Anything
+ * else will abort it.
+ * @attach: pointer to the driver's attach function.
+ * This will be called when the struct usb_serial structure is fully set
+ * set up. Do any local initialization of the device, or any private
+ * memory structure allocation at this point in time.
+ * @disconnect: pointer to the driver's disconnect function. This will be
+ * called when the device is unplugged or unbound from the driver.
+ * @release: pointer to the driver's release function. This will be called
+ * when the usb_serial data structure is about to be destroyed.
+ * @usb_driver: pointer to the struct usb_driver that controls this
+ * device. This is necessary to allow dynamic ids to be added to
+ * the driver from sysfs.
+ *
+ * This structure is defines a USB Serial driver. It provides all of
+ * the information that the USB serial core code needs. If the function
+ * pointers are defined, then the USB serial core code will call them when
+ * the corresponding tty port functions are called. If they are not
+ * called, the generic serial function will be used instead.
+ *
+ * The driver.owner field should be set to the module owner of this driver.
+ * The driver.name field should be set to the name of this driver (remember
+ * it will show up in sysfs, so it needs to be short and to the point.
+ * Using the module name is a good idea.)
+ */
+struct usb_serial_driver {
+ const char *description;
+ const struct usb_device_id *id_table;
+ char num_ports;
+
+ struct list_head driver_list;
+ struct device_driver driver;
+ struct usb_driver *usb_driver;
+ struct usb_dynids dynids;
+
+ size_t bulk_in_size;
+ size_t bulk_out_size;
+
+ int (*probe)(struct usb_serial *serial, const struct usb_device_id *id);
+ int (*attach)(struct usb_serial *serial);
+ int (*calc_num_ports) (struct usb_serial *serial);
+
+ void (*disconnect)(struct usb_serial *serial);
+ void (*release)(struct usb_serial *serial);
+
+ int (*port_probe)(struct usb_serial_port *port);
+ int (*port_remove)(struct usb_serial_port *port);
+
+ int (*suspend)(struct usb_serial *serial, pm_message_t message);
+ int (*resume)(struct usb_serial *serial);
+ int (*reset_resume)(struct usb_serial *serial);
+
+ /* serial function calls */
+ /* Called by console and by the tty layer */
+ int (*open)(struct tty_struct *tty, struct usb_serial_port *port);
+ void (*close)(struct usb_serial_port *port);
+ int (*write)(struct tty_struct *tty, struct usb_serial_port *port,
+ const unsigned char *buf, int count);
+ /* Called only by the tty layer */
+ int (*write_room)(struct tty_struct *tty);
+ int (*ioctl)(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg);
+ void (*set_termios)(struct tty_struct *tty,
+ struct usb_serial_port *port, struct ktermios *old);
+ void (*break_ctl)(struct tty_struct *tty, int break_state);
+ int (*chars_in_buffer)(struct tty_struct *tty);
+ void (*wait_until_sent)(struct tty_struct *tty, long timeout);
+ bool (*tx_empty)(struct usb_serial_port *port);
+ void (*throttle)(struct tty_struct *tty);
+ void (*unthrottle)(struct tty_struct *tty);
+ int (*tiocmget)(struct tty_struct *tty);
+ int (*tiocmset)(struct tty_struct *tty,
+ unsigned int set, unsigned int clear);
+ int (*tiocmiwait)(struct tty_struct *tty, unsigned long arg);
+ int (*get_icount)(struct tty_struct *tty,
+ struct serial_icounter_struct *icount);
+ /* Called by the tty layer for port level work. There may or may not
+ be an attached tty at this point */
+ void (*dtr_rts)(struct usb_serial_port *port, int on);
+ int (*carrier_raised)(struct usb_serial_port *port);
+ /* Called by the usb serial hooks to allow the user to rework the
+ termios state */
+ void (*init_termios)(struct tty_struct *tty);
+ /* USB events */
+ void (*read_int_callback)(struct urb *urb);
+ void (*write_int_callback)(struct urb *urb);
+ void (*read_bulk_callback)(struct urb *urb);
+ void (*write_bulk_callback)(struct urb *urb);
+ /* Called by the generic read bulk callback */
+ void (*process_read_urb)(struct urb *urb);
+ /* Called by the generic write implementation */
+ int (*prepare_write_buffer)(struct usb_serial_port *port,
+ void *dest, size_t size);
+};
+#define to_usb_serial_driver(d) \
+ container_of(d, struct usb_serial_driver, driver)
+
+extern int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[],
+ const char *name, const struct usb_device_id *id_table);
+extern void usb_serial_deregister_drivers(struct usb_serial_driver *const serial_drivers[]);
+extern void usb_serial_port_softint(struct usb_serial_port *port);
+
+extern int usb_serial_suspend(struct usb_interface *intf, pm_message_t message);
+extern int usb_serial_resume(struct usb_interface *intf);
+
+/* USB Serial console functions */
+#ifdef CONFIG_USB_SERIAL_CONSOLE
+extern void usb_serial_console_init(int minor);
+extern void usb_serial_console_exit(void);
+extern void usb_serial_console_disconnect(struct usb_serial *serial);
+#else
+static inline void usb_serial_console_init(int minor) { }
+static inline void usb_serial_console_exit(void) { }
+static inline void usb_serial_console_disconnect(struct usb_serial *serial) {}
+#endif
+
+/* Functions needed by other parts of the usbserial core */
+extern struct usb_serial_port *usb_serial_port_get_by_minor(unsigned int minor);
+extern void usb_serial_put(struct usb_serial *serial);
+extern int usb_serial_generic_open(struct tty_struct *tty,
+ struct usb_serial_port *port);
+extern int usb_serial_generic_write_start(struct usb_serial_port *port,
+ gfp_t mem_flags);
+extern int usb_serial_generic_write(struct tty_struct *tty,
+ struct usb_serial_port *port, const unsigned char *buf, int count);
+extern void usb_serial_generic_close(struct usb_serial_port *port);
+extern int usb_serial_generic_resume(struct usb_serial *serial);
+extern int usb_serial_generic_write_room(struct tty_struct *tty);
+extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty);
+extern void usb_serial_generic_wait_until_sent(struct tty_struct *tty,
+ long timeout);
+extern void usb_serial_generic_read_bulk_callback(struct urb *urb);
+extern void usb_serial_generic_write_bulk_callback(struct urb *urb);
+extern void usb_serial_generic_throttle(struct tty_struct *tty);
+extern void usb_serial_generic_unthrottle(struct tty_struct *tty);
+extern int usb_serial_generic_tiocmiwait(struct tty_struct *tty,
+ unsigned long arg);
+extern int usb_serial_generic_get_icount(struct tty_struct *tty,
+ struct serial_icounter_struct *icount);
+extern int usb_serial_generic_register(void);
+extern void usb_serial_generic_deregister(void);
+extern int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port,
+ gfp_t mem_flags);
+extern void usb_serial_generic_process_read_urb(struct urb *urb);
+extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port,
+ void *dest, size_t size);
+extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
+ unsigned int ch);
+extern int usb_serial_handle_break(struct usb_serial_port *port);
+extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
+ struct tty_struct *tty,
+ unsigned int status);
+
+
+extern int usb_serial_bus_register(struct usb_serial_driver *device);
+extern void usb_serial_bus_deregister(struct usb_serial_driver *device);
+
+extern struct usb_serial_driver usb_serial_generic_device;
+extern struct bus_type usb_serial_bus_type;
+extern struct tty_driver *usb_serial_tty_driver;
+
+static inline void usb_serial_debug_data(struct device *dev,
+ const char *function, int size,
+ const unsigned char *data)
+{
+ dev_dbg(dev, "%s - length = %d, data = %*ph\n",
+ function, size, size, data);
+}
+
+/*
+ * Macro for reporting errors in write path to avoid inifinite loop
+ * when port is used as a console.
+ */
+#define dev_err_console(usport, fmt, ...) \
+do { \
+ static bool __print_once; \
+ struct usb_serial_port *__port = (usport); \
+ \
+ if (!__port->port.console || !__print_once) { \
+ __print_once = true; \
+ dev_err(&__port->dev, fmt, ##__VA_ARGS__); \
+ } \
+} while (0)
+
+/*
+ * module_usb_serial_driver() - Helper macro for registering a USB Serial driver
+ * @__serial_drivers: list of usb_serial drivers to register
+ * @__ids: all device ids that @__serial_drivers bind to
+ *
+ * Helper macro for USB serial drivers which do not do anything special
+ * in module init/exit. This eliminates a lot of boilerplate. Each
+ * module may only use this macro once, and calling it replaces
+ * module_init() and module_exit()
+ *
+ */
+#define usb_serial_module_driver(__name, __serial_drivers, __ids) \
+static int __init usb_serial_module_init(void) \
+{ \
+ return usb_serial_register_drivers(__serial_drivers, \
+ __name, __ids); \
+} \
+module_init(usb_serial_module_init); \
+static void __exit usb_serial_module_exit(void) \
+{ \
+ usb_serial_deregister_drivers(__serial_drivers); \
+} \
+module_exit(usb_serial_module_exit);
+
+#define module_usb_serial_driver(__serial_drivers, __ids) \
+ usb_serial_module_driver(KBUILD_MODNAME, __serial_drivers, __ids)
+
+#endif /* __LINUX_USB_SERIAL_H */
+
diff --git a/include/linux/usb/sl811.h b/include/linux/usb/sl811.h
new file mode 100644
index 000000000..3afe4d16f
--- /dev/null
+++ b/include/linux/usb/sl811.h
@@ -0,0 +1,29 @@
+/*
+ * board initialization should put one of these into dev->platform_data
+ * and place the sl811hs onto platform_bus named "sl811-hcd".
+ */
+
+#ifndef __LINUX_USB_SL811_H
+#define __LINUX_USB_SL811_H
+
+struct sl811_platform_data {
+ unsigned can_wakeup:1;
+
+ /* given port_power, msec/2 after power on till power good */
+ u8 potpg;
+
+ /* mA/2 power supplied on this port (max = default = 250) */
+ u8 power;
+
+ /* sl811 relies on an external source of VBUS current */
+ void (*port_power)(struct device *dev, int is_on);
+
+ /* pulse sl811 nRST (probably with a GPIO) */
+ void (*reset)(struct device *dev);
+
+ /* some boards need something like these: */
+ /* int (*check_overcurrent)(struct device *dev); */
+ /* void (*clock_enable)(struct device *dev, int is_on); */
+};
+
+#endif /* __LINUX_USB_SL811_H */
diff --git a/include/linux/usb/storage.h b/include/linux/usb/storage.h
new file mode 100644
index 000000000..cb33fff2b
--- /dev/null
+++ b/include/linux/usb/storage.h
@@ -0,0 +1,86 @@
+#ifndef __LINUX_USB_STORAGE_H
+#define __LINUX_USB_STORAGE_H
+
+/*
+ * linux/usb/storage.h
+ *
+ * Copyright Matthew Wilcox for Intel Corp, 2010
+ *
+ * This file contains definitions taken from the
+ * USB Mass Storage Class Specification Overview
+ *
+ * Distributed under the terms of the GNU GPL, version two.
+ */
+
+/* Storage subclass codes */
+
+#define USB_SC_RBC 0x01 /* Typically, flash devices */
+#define USB_SC_8020 0x02 /* CD-ROM */
+#define USB_SC_QIC 0x03 /* QIC-157 Tapes */
+#define USB_SC_UFI 0x04 /* Floppy */
+#define USB_SC_8070 0x05 /* Removable media */
+#define USB_SC_SCSI 0x06 /* Transparent */
+#define USB_SC_LOCKABLE 0x07 /* Password-protected */
+
+#define USB_SC_ISD200 0xf0 /* ISD200 ATA */
+#define USB_SC_CYP_ATACB 0xf1 /* Cypress ATACB */
+#define USB_SC_DEVICE 0xff /* Use device's value */
+
+/* Storage protocol codes */
+
+#define USB_PR_CBI 0x00 /* Control/Bulk/Interrupt */
+#define USB_PR_CB 0x01 /* Control/Bulk w/o interrupt */
+#define USB_PR_BULK 0x50 /* bulk only */
+#define USB_PR_UAS 0x62 /* USB Attached SCSI */
+
+#define USB_PR_USBAT 0x80 /* SCM-ATAPI bridge */
+#define USB_PR_EUSB_SDDR09 0x81 /* SCM-SCSI bridge for SDDR-09 */
+#define USB_PR_SDDR55 0x82 /* SDDR-55 (made up) */
+#define USB_PR_DPCM_USB 0xf0 /* Combination CB/SDDR09 */
+#define USB_PR_FREECOM 0xf1 /* Freecom */
+#define USB_PR_DATAFAB 0xf2 /* Datafab chipsets */
+#define USB_PR_JUMPSHOT 0xf3 /* Lexar Jumpshot */
+#define USB_PR_ALAUDA 0xf4 /* Alauda chipsets */
+#define USB_PR_KARMA 0xf5 /* Rio Karma */
+
+#define USB_PR_DEVICE 0xff /* Use device's value */
+
+ /*
+ * Bulk only data structures
+ */
+
+/* command block wrapper */
+struct bulk_cb_wrap {
+ __le32 Signature; /* contains 'USBC' */
+ __u32 Tag; /* unique per command id */
+ __le32 DataTransferLength; /* size of data */
+ __u8 Flags; /* direction in bit 0 */
+ __u8 Lun; /* LUN normally 0 */
+ __u8 Length; /* of of the CDB */
+ __u8 CDB[16]; /* max command */
+};
+
+#define US_BULK_CB_WRAP_LEN 31
+#define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */
+#define US_BULK_FLAG_IN (1 << 7)
+#define US_BULK_FLAG_OUT 0
+
+/* command status wrapper */
+struct bulk_cs_wrap {
+ __le32 Signature; /* should = 'USBS' */
+ __u32 Tag; /* same as original command */
+ __le32 Residue; /* amount not transferred */
+ __u8 Status; /* see below */
+};
+
+#define US_BULK_CS_WRAP_LEN 13
+#define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */
+#define US_BULK_STAT_OK 0
+#define US_BULK_STAT_FAIL 1
+#define US_BULK_STAT_PHASE 2
+
+/* bulk-only class specific requests */
+#define US_BULK_RESET_REQUEST 0xff
+#define US_BULK_GET_MAX_LUN 0xfe
+
+#endif
diff --git a/include/linux/usb/tegra_usb_phy.h b/include/linux/usb/tegra_usb_phy.h
new file mode 100644
index 000000000..1de16c324
--- /dev/null
+++ b/include/linux/usb/tegra_usb_phy.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __TEGRA_USB_PHY_H
+#define __TEGRA_USB_PHY_H
+
+#include <linux/clk.h>
+#include <linux/usb/otg.h>
+
+/*
+ * utmi_pll_config_in_car_module: true if the UTMI PLL configuration registers
+ * should be set up by clk-tegra, false if by the PHY code
+ * has_hostpc: true if the USB controller has the HOSTPC extension, which
+ * changes the location of the PHCD and PTS fields
+ * requires_usbmode_setup: true if the USBMODE register needs to be set to
+ * enter host mode
+ * requires_extra_tuning_parameters: true if xcvr_hsslew, hssquelch_level
+ * and hsdiscon_level should be set for adequate signal quality
+ */
+
+struct tegra_phy_soc_config {
+ bool utmi_pll_config_in_car_module;
+ bool has_hostpc;
+ bool requires_usbmode_setup;
+ bool requires_extra_tuning_parameters;
+};
+
+struct tegra_utmip_config {
+ u8 hssync_start_delay;
+ u8 elastic_limit;
+ u8 idle_wait_delay;
+ u8 term_range_adj;
+ bool xcvr_setup_use_fuses;
+ u8 xcvr_setup;
+ u8 xcvr_lsfslew;
+ u8 xcvr_lsrslew;
+ u8 xcvr_hsslew;
+ u8 hssquelch_level;
+ u8 hsdiscon_level;
+};
+
+enum tegra_usb_phy_port_speed {
+ TEGRA_USB_PHY_PORT_SPEED_FULL = 0,
+ TEGRA_USB_PHY_PORT_SPEED_LOW,
+ TEGRA_USB_PHY_PORT_SPEED_HIGH,
+};
+
+struct tegra_xtal_freq;
+
+struct tegra_usb_phy {
+ int instance;
+ const struct tegra_xtal_freq *freq;
+ void __iomem *regs;
+ void __iomem *pad_regs;
+ struct clk *clk;
+ struct clk *pll_u;
+ struct clk *pad_clk;
+ struct regulator *vbus;
+ enum usb_dr_mode mode;
+ void *config;
+ const struct tegra_phy_soc_config *soc_config;
+ struct usb_phy *ulpi;
+ struct usb_phy u_phy;
+ bool is_legacy_phy;
+ bool is_ulpi_phy;
+ int reset_gpio;
+};
+
+void tegra_usb_phy_preresume(struct usb_phy *phy);
+
+void tegra_usb_phy_postresume(struct usb_phy *phy);
+
+void tegra_ehci_phy_restore_start(struct usb_phy *phy,
+ enum tegra_usb_phy_port_speed port_speed);
+
+void tegra_ehci_phy_restore_end(struct usb_phy *phy);
+
+#endif /* __TEGRA_USB_PHY_H */
diff --git a/include/linux/usb/tilegx.h b/include/linux/usb/tilegx.h
new file mode 100644
index 000000000..2d65e3435
--- /dev/null
+++ b/include/linux/usb/tilegx.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ *
+ * Structure to contain platform-specific data related to Tile-Gx USB
+ * controllers.
+ */
+
+#ifndef _LINUX_USB_TILEGX_H
+#define _LINUX_USB_TILEGX_H
+
+#include <gxio/usb_host.h>
+
+struct tilegx_usb_platform_data {
+ /* GXIO device index. */
+ int dev_index;
+
+ /* GXIO device context. */
+ gxio_usb_host_context_t usb_ctx;
+
+ /* Device IRQ. */
+ unsigned int irq;
+};
+
+#endif /* _LINUX_USB_TILEGX_H */
diff --git a/include/linux/usb/uas.h b/include/linux/usb/uas.h
new file mode 100644
index 000000000..3fc8e8b9f
--- /dev/null
+++ b/include/linux/usb/uas.h
@@ -0,0 +1,109 @@
+#ifndef __USB_UAS_H__
+#define __USB_UAS_H__
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+/* Common header for all IUs */
+struct iu {
+ __u8 iu_id;
+ __u8 rsvd1;
+ __be16 tag;
+} __attribute__((__packed__));
+
+enum {
+ IU_ID_COMMAND = 0x01,
+ IU_ID_STATUS = 0x03,
+ IU_ID_RESPONSE = 0x04,
+ IU_ID_TASK_MGMT = 0x05,
+ IU_ID_READ_READY = 0x06,
+ IU_ID_WRITE_READY = 0x07,
+};
+
+enum {
+ TMF_ABORT_TASK = 0x01,
+ TMF_ABORT_TASK_SET = 0x02,
+ TMF_CLEAR_TASK_SET = 0x04,
+ TMF_LOGICAL_UNIT_RESET = 0x08,
+ TMF_I_T_NEXUS_RESET = 0x10,
+ TMF_CLEAR_ACA = 0x40,
+ TMF_QUERY_TASK = 0x80,
+ TMF_QUERY_TASK_SET = 0x81,
+ TMF_QUERY_ASYNC_EVENT = 0x82,
+};
+
+enum {
+ RC_TMF_COMPLETE = 0x00,
+ RC_INVALID_INFO_UNIT = 0x02,
+ RC_TMF_NOT_SUPPORTED = 0x04,
+ RC_TMF_FAILED = 0x05,
+ RC_TMF_SUCCEEDED = 0x08,
+ RC_INCORRECT_LUN = 0x09,
+ RC_OVERLAPPED_TAG = 0x0a,
+};
+
+struct command_iu {
+ __u8 iu_id;
+ __u8 rsvd1;
+ __be16 tag;
+ __u8 prio_attr;
+ __u8 rsvd5;
+ __u8 len;
+ __u8 rsvd7;
+ struct scsi_lun lun;
+ __u8 cdb[16]; /* XXX: Overflow-checking tools may misunderstand */
+} __attribute__((__packed__));
+
+struct task_mgmt_iu {
+ __u8 iu_id;
+ __u8 rsvd1;
+ __be16 tag;
+ __u8 function;
+ __u8 rsvd2;
+ __be16 task_tag;
+ struct scsi_lun lun;
+} __attribute__((__packed__));
+
+/*
+ * Also used for the Read Ready and Write Ready IUs since they have the
+ * same first four bytes
+ */
+struct sense_iu {
+ __u8 iu_id;
+ __u8 rsvd1;
+ __be16 tag;
+ __be16 status_qual;
+ __u8 status;
+ __u8 rsvd7[7];
+ __be16 len;
+ __u8 sense[SCSI_SENSE_BUFFERSIZE];
+} __attribute__((__packed__));
+
+struct response_iu {
+ __u8 iu_id;
+ __u8 rsvd1;
+ __be16 tag;
+ __u8 add_response_info[3];
+ __u8 response_code;
+} __attribute__((__packed__));
+
+struct usb_pipe_usage_descriptor {
+ __u8 bLength;
+ __u8 bDescriptorType;
+
+ __u8 bPipeID;
+ __u8 Reserved;
+} __attribute__((__packed__));
+
+enum {
+ CMD_PIPE_ID = 1,
+ STATUS_PIPE_ID = 2,
+ DATA_IN_PIPE_ID = 3,
+ DATA_OUT_PIPE_ID = 4,
+
+ UAS_SIMPLE_TAG = 0,
+ UAS_HEAD_TAG = 1,
+ UAS_ORDERED_TAG = 2,
+ UAS_ACA = 4,
+};
+#endif
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h
new file mode 100644
index 000000000..5c295c26a
--- /dev/null
+++ b/include/linux/usb/ulpi.h
@@ -0,0 +1,200 @@
+/*
+ * ulpi.h -- ULPI defines and function prorotypes
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * version 2 of that License.
+ */
+
+#ifndef __LINUX_USB_ULPI_H
+#define __LINUX_USB_ULPI_H
+
+#include <linux/usb/otg.h>
+/*-------------------------------------------------------------------------*/
+
+/*
+ * ULPI Flags
+ */
+#define ULPI_OTG_ID_PULLUP (1 << 0)
+#define ULPI_OTG_DP_PULLDOWN_DIS (1 << 1)
+#define ULPI_OTG_DM_PULLDOWN_DIS (1 << 2)
+#define ULPI_OTG_DISCHRGVBUS (1 << 3)
+#define ULPI_OTG_CHRGVBUS (1 << 4)
+#define ULPI_OTG_DRVVBUS (1 << 5)
+#define ULPI_OTG_DRVVBUS_EXT (1 << 6)
+#define ULPI_OTG_EXTVBUSIND (1 << 7)
+
+#define ULPI_IC_6PIN_SERIAL (1 << 8)
+#define ULPI_IC_3PIN_SERIAL (1 << 9)
+#define ULPI_IC_CARKIT (1 << 10)
+#define ULPI_IC_CLKSUSPM (1 << 11)
+#define ULPI_IC_AUTORESUME (1 << 12)
+#define ULPI_IC_EXTVBUS_INDINV (1 << 13)
+#define ULPI_IC_IND_PASSTHRU (1 << 14)
+#define ULPI_IC_PROTECT_DIS (1 << 15)
+
+#define ULPI_FC_HS (1 << 16)
+#define ULPI_FC_FS (1 << 17)
+#define ULPI_FC_LS (1 << 18)
+#define ULPI_FC_FS4LS (1 << 19)
+#define ULPI_FC_TERMSEL (1 << 20)
+#define ULPI_FC_OP_NORM (1 << 21)
+#define ULPI_FC_OP_NODRV (1 << 22)
+#define ULPI_FC_OP_DIS_NRZI (1 << 23)
+#define ULPI_FC_OP_NSYNC_NEOP (1 << 24)
+#define ULPI_FC_RST (1 << 25)
+#define ULPI_FC_SUSPM (1 << 26)
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Macros for Set and Clear
+ * See ULPI 1.1 specification to find the registers with Set and Clear offsets
+ */
+#define ULPI_SET(a) (a + 1)
+#define ULPI_CLR(a) (a + 2)
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Register Map
+ */
+#define ULPI_VENDOR_ID_LOW 0x00
+#define ULPI_VENDOR_ID_HIGH 0x01
+#define ULPI_PRODUCT_ID_LOW 0x02
+#define ULPI_PRODUCT_ID_HIGH 0x03
+#define ULPI_FUNC_CTRL 0x04
+#define ULPI_IFC_CTRL 0x07
+#define ULPI_OTG_CTRL 0x0a
+#define ULPI_USB_INT_EN_RISE 0x0d
+#define ULPI_USB_INT_EN_FALL 0x10
+#define ULPI_USB_INT_STS 0x13
+#define ULPI_USB_INT_LATCH 0x14
+#define ULPI_DEBUG 0x15
+#define ULPI_SCRATCH 0x16
+/* Optional Carkit Registers */
+#define ULPI_CARCIT_CTRL 0x19
+#define ULPI_CARCIT_INT_DELAY 0x1c
+#define ULPI_CARCIT_INT_EN 0x1d
+#define ULPI_CARCIT_INT_STS 0x20
+#define ULPI_CARCIT_INT_LATCH 0x21
+#define ULPI_CARCIT_PLS_CTRL 0x22
+/* Other Optional Registers */
+#define ULPI_TX_POS_WIDTH 0x25
+#define ULPI_TX_NEG_WIDTH 0x26
+#define ULPI_POLARITY_RECOVERY 0x27
+/* Access Extended Register Set */
+#define ULPI_ACCESS_EXTENDED 0x2f
+/* Vendor Specific */
+#define ULPI_VENDOR_SPECIFIC 0x30
+/* Extended Registers */
+#define ULPI_EXT_VENDOR_SPECIFIC 0x80
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Register Bits
+ */
+
+/* Function Control */
+#define ULPI_FUNC_CTRL_XCVRSEL (1 << 0)
+#define ULPI_FUNC_CTRL_XCVRSEL_MASK (3 << 0)
+#define ULPI_FUNC_CTRL_HIGH_SPEED (0 << 0)
+#define ULPI_FUNC_CTRL_FULL_SPEED (1 << 0)
+#define ULPI_FUNC_CTRL_LOW_SPEED (2 << 0)
+#define ULPI_FUNC_CTRL_FS4LS (3 << 0)
+#define ULPI_FUNC_CTRL_TERMSELECT (1 << 2)
+#define ULPI_FUNC_CTRL_OPMODE (1 << 3)
+#define ULPI_FUNC_CTRL_OPMODE_MASK (3 << 3)
+#define ULPI_FUNC_CTRL_OPMODE_NORMAL (0 << 3)
+#define ULPI_FUNC_CTRL_OPMODE_NONDRIVING (1 << 3)
+#define ULPI_FUNC_CTRL_OPMODE_DISABLE_NRZI (2 << 3)
+#define ULPI_FUNC_CTRL_OPMODE_NOSYNC_NOEOP (3 << 3)
+#define ULPI_FUNC_CTRL_RESET (1 << 5)
+#define ULPI_FUNC_CTRL_SUSPENDM (1 << 6)
+
+/* Interface Control */
+#define ULPI_IFC_CTRL_6_PIN_SERIAL_MODE (1 << 0)
+#define ULPI_IFC_CTRL_3_PIN_SERIAL_MODE (1 << 1)
+#define ULPI_IFC_CTRL_CARKITMODE (1 << 2)
+#define ULPI_IFC_CTRL_CLOCKSUSPENDM (1 << 3)
+#define ULPI_IFC_CTRL_AUTORESUME (1 << 4)
+#define ULPI_IFC_CTRL_EXTERNAL_VBUS (1 << 5)
+#define ULPI_IFC_CTRL_PASSTHRU (1 << 6)
+#define ULPI_IFC_CTRL_PROTECT_IFC_DISABLE (1 << 7)
+
+/* OTG Control */
+#define ULPI_OTG_CTRL_ID_PULLUP (1 << 0)
+#define ULPI_OTG_CTRL_DP_PULLDOWN (1 << 1)
+#define ULPI_OTG_CTRL_DM_PULLDOWN (1 << 2)
+#define ULPI_OTG_CTRL_DISCHRGVBUS (1 << 3)
+#define ULPI_OTG_CTRL_CHRGVBUS (1 << 4)
+#define ULPI_OTG_CTRL_DRVVBUS (1 << 5)
+#define ULPI_OTG_CTRL_DRVVBUS_EXT (1 << 6)
+#define ULPI_OTG_CTRL_EXTVBUSIND (1 << 7)
+
+/* USB Interrupt Enable Rising,
+ * USB Interrupt Enable Falling,
+ * USB Interrupt Status and
+ * USB Interrupt Latch
+ */
+#define ULPI_INT_HOST_DISCONNECT (1 << 0)
+#define ULPI_INT_VBUS_VALID (1 << 1)
+#define ULPI_INT_SESS_VALID (1 << 2)
+#define ULPI_INT_SESS_END (1 << 3)
+#define ULPI_INT_IDGRD (1 << 4)
+
+/* Debug */
+#define ULPI_DEBUG_LINESTATE0 (1 << 0)
+#define ULPI_DEBUG_LINESTATE1 (1 << 1)
+
+/* Carkit Control */
+#define ULPI_CARKIT_CTRL_CARKITPWR (1 << 0)
+#define ULPI_CARKIT_CTRL_IDGNDDRV (1 << 1)
+#define ULPI_CARKIT_CTRL_TXDEN (1 << 2)
+#define ULPI_CARKIT_CTRL_RXDEN (1 << 3)
+#define ULPI_CARKIT_CTRL_SPKLEFTEN (1 << 4)
+#define ULPI_CARKIT_CTRL_SPKRIGHTEN (1 << 5)
+#define ULPI_CARKIT_CTRL_MICEN (1 << 6)
+
+/* Carkit Interrupt Enable */
+#define ULPI_CARKIT_INT_EN_IDFLOAT_RISE (1 << 0)
+#define ULPI_CARKIT_INT_EN_IDFLOAT_FALL (1 << 1)
+#define ULPI_CARKIT_INT_EN_CARINTDET (1 << 2)
+#define ULPI_CARKIT_INT_EN_DP_RISE (1 << 3)
+#define ULPI_CARKIT_INT_EN_DP_FALL (1 << 4)
+
+/* Carkit Interrupt Status and
+ * Carkit Interrupt Latch
+ */
+#define ULPI_CARKIT_INT_IDFLOAT (1 << 0)
+#define ULPI_CARKIT_INT_CARINTDET (1 << 1)
+#define ULPI_CARKIT_INT_DP (1 << 2)
+
+/* Carkit Pulse Control*/
+#define ULPI_CARKIT_PLS_CTRL_TXPLSEN (1 << 0)
+#define ULPI_CARKIT_PLS_CTRL_RXPLSEN (1 << 1)
+#define ULPI_CARKIT_PLS_CTRL_SPKRLEFT_BIASEN (1 << 2)
+#define ULPI_CARKIT_PLS_CTRL_SPKRRIGHT_BIASEN (1 << 3)
+
+/*-------------------------------------------------------------------------*/
+
+#if IS_ENABLED(CONFIG_USB_ULPI)
+struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
+ unsigned int flags);
+#else
+static inline struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
+ unsigned int flags)
+{
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_USB_ULPI_VIEWPORT
+/* access ops for controllers with a viewport register */
+extern struct usb_phy_io_ops ulpi_viewport_access_ops;
+#endif
+
+#endif /* __LINUX_USB_ULPI_H */
diff --git a/include/linux/usb/usb338x.h b/include/linux/usb/usb338x.h
new file mode 100644
index 000000000..f92eb635b
--- /dev/null
+++ b/include/linux/usb/usb338x.h
@@ -0,0 +1,199 @@
+/*
+ * USB 338x super/high/full speed USB device controller.
+ * Unlike many such controllers, this one talks PCI.
+ *
+ * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com)
+ * Copyright (C) 2003 David Brownell
+ * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_USB338X_H
+#define __LINUX_USB_USB338X_H
+
+#include <linux/usb/net2280.h>
+
+/*
+ * Extra defined bits for net2280 registers
+ */
+#define SCRATCH 0x0b
+
+#define DEFECT7374_FSM_FIELD 28
+#define SUPER_SPEED 8
+#define DMA_REQUEST_OUTSTANDING 5
+#define DMA_PAUSE_DONE_INTERRUPT 26
+#define SET_ISOCHRONOUS_DELAY 24
+#define SET_SEL 22
+#define SUPER_SPEED_MODE 8
+
+/*ep_cfg*/
+#define MAX_BURST_SIZE 24
+#define EP_FIFO_BYTE_COUNT 16
+#define IN_ENDPOINT_ENABLE 14
+#define IN_ENDPOINT_TYPE 12
+#define OUT_ENDPOINT_ENABLE 10
+#define OUT_ENDPOINT_TYPE 8
+
+struct usb338x_usb_ext_regs {
+ u32 usbclass;
+#define DEVICE_PROTOCOL 16
+#define DEVICE_SUB_CLASS 8
+#define DEVICE_CLASS 0
+ u32 ss_sel;
+#define U2_SYSTEM_EXIT_LATENCY 8
+#define U1_SYSTEM_EXIT_LATENCY 0
+ u32 ss_del;
+#define U2_DEVICE_EXIT_LATENCY 8
+#define U1_DEVICE_EXIT_LATENCY 0
+ u32 usb2lpm;
+#define USB_L1_LPM_HIRD 2
+#define USB_L1_LPM_REMOTE_WAKE 1
+#define USB_L1_LPM_SUPPORT 0
+ u32 usb3belt;
+#define BELT_MULTIPLIER 10
+#define BEST_EFFORT_LATENCY_TOLERANCE 0
+ u32 usbctl2;
+#define LTM_ENABLE 7
+#define U2_ENABLE 6
+#define U1_ENABLE 5
+#define FUNCTION_SUSPEND 4
+#define USB3_CORE_ENABLE 3
+#define USB2_CORE_ENABLE 2
+#define SERIAL_NUMBER_STRING_ENABLE 0
+ u32 in_timeout;
+#define GPEP3_TIMEOUT 19
+#define GPEP2_TIMEOUT 18
+#define GPEP1_TIMEOUT 17
+#define GPEP0_TIMEOUT 16
+#define GPEP3_TIMEOUT_VALUE 13
+#define GPEP3_TIMEOUT_ENABLE 12
+#define GPEP2_TIMEOUT_VALUE 9
+#define GPEP2_TIMEOUT_ENABLE 8
+#define GPEP1_TIMEOUT_VALUE 5
+#define GPEP1_TIMEOUT_ENABLE 4
+#define GPEP0_TIMEOUT_VALUE 1
+#define GPEP0_TIMEOUT_ENABLE 0
+ u32 isodelay;
+#define ISOCHRONOUS_DELAY 0
+} __packed;
+
+struct usb338x_fifo_regs {
+ /* offset 0x0500, 0x0520, 0x0540, 0x0560, 0x0580 */
+ u32 ep_fifo_size_base;
+#define IN_FIFO_BASE_ADDRESS 22
+#define IN_FIFO_SIZE 16
+#define OUT_FIFO_BASE_ADDRESS 6
+#define OUT_FIFO_SIZE 0
+ u32 ep_fifo_out_wrptr;
+ u32 ep_fifo_out_rdptr;
+ u32 ep_fifo_in_wrptr;
+ u32 ep_fifo_in_rdptr;
+ u32 unused[3];
+} __packed;
+
+
+/* Link layer */
+struct usb338x_ll_regs {
+ /* offset 0x700 */
+ u32 ll_ltssm_ctrl1;
+ u32 ll_ltssm_ctrl2;
+ u32 ll_ltssm_ctrl3;
+ u32 unused[2];
+ u32 ll_general_ctrl0;
+ u32 ll_general_ctrl1;
+#define PM_U3_AUTO_EXIT 29
+#define PM_U2_AUTO_EXIT 28
+#define PM_U1_AUTO_EXIT 27
+#define PM_FORCE_U2_ENTRY 26
+#define PM_FORCE_U1_ENTRY 25
+#define PM_LGO_COLLISION_SEND_LAU 24
+#define PM_DIR_LINK_REJECT 23
+#define PM_FORCE_LINK_ACCEPT 22
+#define PM_DIR_ENTRY_U3 20
+#define PM_DIR_ENTRY_U2 19
+#define PM_DIR_ENTRY_U1 18
+#define PM_U2_ENABLE 17
+#define PM_U1_ENABLE 16
+#define SKP_THRESHOLD_ADJUST_FMW 8
+#define RESEND_DPP_ON_LRTY_FMW 7
+#define DL_BIT_VALUE_FMW 6
+#define FORCE_DL_BIT 5
+ u32 ll_general_ctrl2;
+#define SELECT_INVERT_LANE_POLARITY 7
+#define FORCE_INVERT_LANE_POLARITY 6
+ u32 ll_general_ctrl3;
+ u32 ll_general_ctrl4;
+ u32 ll_error_gen;
+} __packed;
+
+struct usb338x_ll_lfps_regs {
+ /* offset 0x748 */
+ u32 ll_lfps_5;
+#define TIMER_LFPS_6US 16
+ u32 ll_lfps_6;
+#define TIMER_LFPS_80US 0
+} __packed;
+
+struct usb338x_ll_tsn_regs {
+ /* offset 0x77C */
+ u32 ll_tsn_counters_2;
+#define HOT_TX_NORESET_TS2 24
+ u32 ll_tsn_counters_3;
+#define HOT_RX_RESET_TS2 0
+} __packed;
+
+struct usb338x_ll_chi_regs {
+ /* offset 0x79C */
+ u32 ll_tsn_chicken_bit;
+#define RECOVERY_IDLE_TO_RECOVER_FMW 3
+} __packed;
+
+/* protocol layer */
+struct usb338x_pl_regs {
+ /* offset 0x800 */
+ u32 pl_reg_1;
+ u32 pl_reg_2;
+ u32 pl_reg_3;
+ u32 pl_reg_4;
+ u32 pl_ep_ctrl;
+ /* Protocol Layer Endpoint Control*/
+#define PL_EP_CTRL 0x810
+#define ENDPOINT_SELECT 0
+ /* [4:0] */
+#define EP_INITIALIZED 16
+#define SEQUENCE_NUMBER_RESET 17
+#define CLEAR_ACK_ERROR_CODE 20
+ u32 pl_reg_6;
+ u32 pl_reg_7;
+ u32 pl_reg_8;
+ u32 pl_ep_status_1;
+ /* Protocol Layer Endpoint Status 1*/
+#define PL_EP_STATUS_1 0x820
+#define STATE 16
+#define ACK_GOOD_NORMAL 0x11
+#define ACK_GOOD_MORE_ACKS_TO_COME 0x16
+ u32 pl_ep_status_2;
+ u32 pl_ep_status_3;
+ /* Protocol Layer Endpoint Status 3*/
+#define PL_EP_STATUS_3 0x828
+#define SEQUENCE_NUMBER 0
+ u32 pl_ep_status_4;
+ /* Protocol Layer Endpoint Status 4*/
+#define PL_EP_STATUS_4 0x82c
+ u32 pl_ep_cfg_4;
+ /* Protocol Layer Endpoint Configuration 4*/
+#define PL_EP_CFG_4 0x830
+#define NON_CTRL_IN_TOLERATE_BAD_DIR 6
+} __packed;
+
+#endif /* __LINUX_USB_USB338X_H */
diff --git a/include/linux/usb/usb_phy_generic.h b/include/linux/usb/usb_phy_generic.h
new file mode 100644
index 000000000..c13632d52
--- /dev/null
+++ b/include/linux/usb/usb_phy_generic.h
@@ -0,0 +1,33 @@
+#ifndef __LINUX_USB_NOP_XCEIV_H
+#define __LINUX_USB_NOP_XCEIV_H
+
+#include <linux/usb/otg.h>
+#include <linux/gpio/consumer.h>
+
+struct usb_phy_generic_platform_data {
+ enum usb_phy_type type;
+ unsigned long clk_rate;
+
+ /* if set fails with -EPROBE_DEFER if can't get regulator */
+ unsigned int needs_vcc:1;
+ unsigned int needs_reset:1; /* deprecated */
+ int gpio_reset;
+ struct gpio_desc *gpiod_vbus;
+};
+
+#if IS_ENABLED(CONFIG_NOP_USB_XCEIV)
+/* sometimes transceivers are accessed only through e.g. ULPI */
+extern struct platform_device *usb_phy_generic_register(void);
+extern void usb_phy_generic_unregister(struct platform_device *);
+#else
+static inline struct platform_device *usb_phy_generic_register(void)
+{
+ return NULL;
+}
+
+static inline void usb_phy_generic_unregister(struct platform_device *pdev)
+{
+}
+#endif
+
+#endif /* __LINUX_USB_NOP_XCEIV_H */
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
new file mode 100644
index 000000000..6e0ce8c7b
--- /dev/null
+++ b/include/linux/usb/usbnet.h
@@ -0,0 +1,282 @@
+/*
+ * USB Networking Link Interface
+ *
+ * Copyright (C) 2000-2005 by David Brownell <dbrownell@users.sourceforge.net>
+ * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_USB_USBNET_H
+#define __LINUX_USB_USBNET_H
+
+/* interface from usbnet core to each USB networking link we handle */
+struct usbnet {
+ /* housekeeping */
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ struct driver_info *driver_info;
+ const char *driver_name;
+ void *driver_priv;
+ wait_queue_head_t wait;
+ struct mutex phy_mutex;
+ unsigned char suspend_count;
+ unsigned char pkt_cnt, pkt_err;
+ unsigned short rx_qlen, tx_qlen;
+ unsigned can_dma_sg:1;
+
+ /* i/o info: pipes etc */
+ unsigned in, out;
+ struct usb_host_endpoint *status;
+ unsigned maxpacket;
+ struct timer_list delay;
+ const char *padding_pkt;
+
+ /* protocol/interface state */
+ struct net_device *net;
+ int msg_enable;
+ unsigned long data[5];
+ u32 xid;
+ u32 hard_mtu; /* count any extra framing */
+ size_t rx_urb_size; /* size for rx urbs */
+ struct mii_if_info mii;
+
+ /* various kinds of pending driver work */
+ struct sk_buff_head rxq;
+ struct sk_buff_head txq;
+ struct sk_buff_head done;
+ struct sk_buff_head rxq_pause;
+ struct urb *interrupt;
+ unsigned interrupt_count;
+ struct mutex interrupt_mutex;
+ struct usb_anchor deferred;
+ struct tasklet_struct bh;
+
+ struct work_struct kevent;
+ unsigned long flags;
+# define EVENT_TX_HALT 0
+# define EVENT_RX_HALT 1
+# define EVENT_RX_MEMORY 2
+# define EVENT_STS_SPLIT 3
+# define EVENT_LINK_RESET 4
+# define EVENT_RX_PAUSED 5
+# define EVENT_DEV_ASLEEP 6
+# define EVENT_DEV_OPEN 7
+# define EVENT_DEVICE_REPORT_IDLE 8
+# define EVENT_NO_RUNTIME_PM 9
+# define EVENT_RX_KILL 10
+# define EVENT_LINK_CHANGE 11
+# define EVENT_SET_RX_MODE 12
+};
+
+static inline struct usb_driver *driver_of(struct usb_interface *intf)
+{
+ return to_usb_driver(intf->dev.driver);
+}
+
+/* interface from the device/framing level "minidriver" to core */
+struct driver_info {
+ char *description;
+
+ int flags;
+/* framing is CDC Ethernet, not writing ZLPs (hw issues), or optionally: */
+#define FLAG_FRAMING_NC 0x0001 /* guard against device dropouts */
+#define FLAG_FRAMING_GL 0x0002 /* genelink batches packets */
+#define FLAG_FRAMING_Z 0x0004 /* zaurus adds a trailer */
+#define FLAG_FRAMING_RN 0x0008 /* RNDIS batches, plus huge header */
+
+#define FLAG_NO_SETINT 0x0010 /* device can't set_interface() */
+#define FLAG_ETHER 0x0020 /* maybe use "eth%d" names */
+
+#define FLAG_FRAMING_AX 0x0040 /* AX88772/178 packets */
+#define FLAG_WLAN 0x0080 /* use "wlan%d" names */
+#define FLAG_AVOID_UNLINK_URBS 0x0100 /* don't unlink urbs at usbnet_stop() */
+#define FLAG_SEND_ZLP 0x0200 /* hw requires ZLPs are sent */
+#define FLAG_WWAN 0x0400 /* use "wwan%d" names */
+
+#define FLAG_LINK_INTR 0x0800 /* updates link (carrier) status */
+
+#define FLAG_POINTTOPOINT 0x1000 /* possibly use "usb%d" names */
+
+/*
+ * Indicates to usbnet, that USB driver accumulates multiple IP packets.
+ * Affects statistic (counters) and short packet handling.
+ */
+#define FLAG_MULTI_PACKET 0x2000
+#define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */
+#define FLAG_NOARP 0x8000 /* device can't do ARP */
+
+ /* init device ... can sleep, or cause probe() failure */
+ int (*bind)(struct usbnet *, struct usb_interface *);
+
+ /* cleanup device ... can sleep, but can't fail */
+ void (*unbind)(struct usbnet *, struct usb_interface *);
+
+ /* reset device ... can sleep */
+ int (*reset)(struct usbnet *);
+
+ /* stop device ... can sleep */
+ int (*stop)(struct usbnet *);
+
+ /* see if peer is connected ... can sleep */
+ int (*check_connect)(struct usbnet *);
+
+ /* (dis)activate runtime power management */
+ int (*manage_power)(struct usbnet *, int);
+
+ /* for status polling */
+ void (*status)(struct usbnet *, struct urb *);
+
+ /* link reset handling, called from defer_kevent */
+ int (*link_reset)(struct usbnet *);
+
+ /* fixup rx packet (strip framing) */
+ int (*rx_fixup)(struct usbnet *dev, struct sk_buff *skb);
+
+ /* fixup tx packet (add framing) */
+ struct sk_buff *(*tx_fixup)(struct usbnet *dev,
+ struct sk_buff *skb, gfp_t flags);
+
+ /* recover from timeout */
+ void (*recover)(struct usbnet *dev);
+
+ /* early initialization code, can sleep. This is for minidrivers
+ * having 'subminidrivers' that need to do extra initialization
+ * right after minidriver have initialized hardware. */
+ int (*early_init)(struct usbnet *dev);
+
+ /* called by minidriver when receiving indication */
+ void (*indication)(struct usbnet *dev, void *ind, int indlen);
+
+ /* rx mode change (device changes address list filtering) */
+ void (*set_rx_mode)(struct usbnet *dev);
+
+ /* for new devices, use the descriptor-reading code instead */
+ int in; /* rx endpoint */
+ int out; /* tx endpoint */
+
+ unsigned long data; /* Misc driver specific data */
+};
+
+/* Minidrivers are just drivers using the "usbnet" core as a powerful
+ * network-specific subroutine library ... that happens to do pretty
+ * much everything except custom framing and chip-specific stuff.
+ */
+extern int usbnet_probe(struct usb_interface *, const struct usb_device_id *);
+extern int usbnet_suspend(struct usb_interface *, pm_message_t);
+extern int usbnet_resume(struct usb_interface *);
+extern void usbnet_disconnect(struct usb_interface *);
+extern void usbnet_device_suggests_idle(struct usbnet *dev);
+
+extern int usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, void *data, u16 size);
+extern int usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, const void *data, u16 size);
+extern int usbnet_read_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, void *data, u16 size);
+extern int usbnet_write_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, const void *data, u16 size);
+extern int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, const void *data, u16 size);
+
+/* Drivers that reuse some of the standard USB CDC infrastructure
+ * (notably, using multiple interfaces according to the CDC
+ * union descriptor) get some helper code.
+ */
+struct cdc_state {
+ struct usb_cdc_header_desc *header;
+ struct usb_cdc_union_desc *u;
+ struct usb_cdc_ether_desc *ether;
+ struct usb_interface *control;
+ struct usb_interface *data;
+};
+
+extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *);
+extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *);
+extern void usbnet_cdc_unbind(struct usbnet *, struct usb_interface *);
+extern void usbnet_cdc_status(struct usbnet *, struct urb *);
+
+/* CDC and RNDIS support the same host-chosen packet filters for IN transfers */
+#define DEFAULT_FILTER (USB_CDC_PACKET_TYPE_BROADCAST \
+ |USB_CDC_PACKET_TYPE_ALL_MULTICAST \
+ |USB_CDC_PACKET_TYPE_PROMISCUOUS \
+ |USB_CDC_PACKET_TYPE_DIRECTED)
+
+
+/* we record the state for each of our queued skbs */
+enum skb_state {
+ illegal = 0,
+ tx_start, tx_done,
+ rx_start, rx_done, rx_cleanup,
+ unlink_start
+};
+
+struct skb_data { /* skb->cb is one of these */
+ struct urb *urb;
+ struct usbnet *dev;
+ enum skb_state state;
+ long length;
+ unsigned long packets;
+};
+
+/* Drivers that set FLAG_MULTI_PACKET must call this in their
+ * tx_fixup method before returning an skb.
+ */
+static inline void
+usbnet_set_skb_tx_stats(struct sk_buff *skb,
+ unsigned long packets, long bytes_delta)
+{
+ struct skb_data *entry = (struct skb_data *) skb->cb;
+
+ entry->packets = packets;
+ entry->length = bytes_delta;
+}
+
+extern int usbnet_open(struct net_device *net);
+extern int usbnet_stop(struct net_device *net);
+extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb,
+ struct net_device *net);
+extern void usbnet_tx_timeout(struct net_device *net);
+extern int usbnet_change_mtu(struct net_device *net, int new_mtu);
+
+extern int usbnet_get_endpoints(struct usbnet *, struct usb_interface *);
+extern int usbnet_get_ethernet_addr(struct usbnet *, int);
+extern void usbnet_defer_kevent(struct usbnet *, int);
+extern void usbnet_skb_return(struct usbnet *, struct sk_buff *);
+extern void usbnet_unlink_rx_urbs(struct usbnet *);
+
+extern void usbnet_pause_rx(struct usbnet *);
+extern void usbnet_resume_rx(struct usbnet *);
+extern void usbnet_purge_paused_rxq(struct usbnet *);
+
+extern int usbnet_get_settings(struct net_device *net,
+ struct ethtool_cmd *cmd);
+extern int usbnet_set_settings(struct net_device *net,
+ struct ethtool_cmd *cmd);
+extern u32 usbnet_get_link(struct net_device *net);
+extern u32 usbnet_get_msglevel(struct net_device *);
+extern void usbnet_set_msglevel(struct net_device *, u32);
+extern void usbnet_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
+extern int usbnet_nway_reset(struct net_device *net);
+
+extern int usbnet_manage_power(struct usbnet *, int);
+extern void usbnet_link_change(struct usbnet *, bool, bool);
+
+extern int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags);
+extern void usbnet_status_stop(struct usbnet *dev);
+
+extern void usbnet_update_max_qlen(struct usbnet *dev);
+
+#endif /* __LINUX_USB_USBNET_H */
diff --git a/include/linux/usb/wusb-wa.h b/include/linux/usb/wusb-wa.h
new file mode 100644
index 000000000..c12571307
--- /dev/null
+++ b/include/linux/usb/wusb-wa.h
@@ -0,0 +1,303 @@
+/*
+ * Wireless USB Wire Adapter constants and structures.
+ *
+ * Copyright (C) 2005-2006 Intel Corporation.
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ * FIXME: organize properly, group logically
+ *
+ * All the event structures are defined in uwb/spec.h, as they are
+ * common to the WHCI and WUSB radio control interfaces.
+ *
+ * References:
+ * [WUSB] Wireless Universal Serial Bus Specification, revision 1.0, ch8
+ */
+#ifndef __LINUX_USB_WUSB_WA_H
+#define __LINUX_USB_WUSB_WA_H
+
+/**
+ * Radio Command Request for the Radio Control Interface
+ *
+ * Radio Control Interface command and event codes are the same as
+ * WHCI, and listed in include/linux/uwb.h:UWB_RC_{CMD,EVT}_*
+ */
+enum {
+ WA_EXEC_RC_CMD = 40, /* Radio Control command Request */
+};
+
+/* Wireless Adapter Requests ([WUSB] table 8-51) */
+enum {
+ WUSB_REQ_ADD_MMC_IE = 20,
+ WUSB_REQ_REMOVE_MMC_IE = 21,
+ WUSB_REQ_SET_NUM_DNTS = 22,
+ WUSB_REQ_SET_CLUSTER_ID = 23,
+ WUSB_REQ_SET_DEV_INFO = 24,
+ WUSB_REQ_GET_TIME = 25,
+ WUSB_REQ_SET_STREAM_IDX = 26,
+ WUSB_REQ_SET_WUSB_MAS = 27,
+ WUSB_REQ_CHAN_STOP = 28,
+};
+
+
+/* Wireless Adapter WUSB Channel Time types ([WUSB] table 8-52) */
+enum {
+ WUSB_TIME_ADJ = 0,
+ WUSB_TIME_BPST = 1,
+ WUSB_TIME_WUSB = 2,
+};
+
+enum {
+ WA_ENABLE = 0x01,
+ WA_RESET = 0x02,
+ RPIPE_PAUSE = 0x1,
+ RPIPE_STALL = 0x2,
+};
+
+/* Responses from Get Status request ([WUSB] section 8.3.1.6) */
+enum {
+ WA_STATUS_ENABLED = 0x01,
+ WA_STATUS_RESETTING = 0x02
+};
+
+enum rpipe_crs {
+ RPIPE_CRS_CTL = 0x01,
+ RPIPE_CRS_ISO = 0x02,
+ RPIPE_CRS_BULK = 0x04,
+ RPIPE_CRS_INTR = 0x08
+};
+
+/**
+ * RPipe descriptor ([WUSB] section 8.5.2.11)
+ *
+ * FIXME: explain rpipes
+ */
+struct usb_rpipe_descriptor {
+ u8 bLength;
+ u8 bDescriptorType;
+ __le16 wRPipeIndex;
+ __le16 wRequests;
+ __le16 wBlocks; /* rw if 0 */
+ __le16 wMaxPacketSize; /* rw */
+ union {
+ u8 dwa_bHSHubAddress; /* rw: DWA. */
+ u8 hwa_bMaxBurst; /* rw: HWA. */
+ };
+ union {
+ u8 dwa_bHSHubPort; /* rw: DWA. */
+ u8 hwa_bDeviceInfoIndex; /* rw: HWA. */
+ };
+ u8 bSpeed; /* rw: xfer rate 'enum uwb_phy_rate' */
+ union {
+ u8 dwa_bDeviceAddress; /* rw: DWA Target device address. */
+ u8 hwa_reserved; /* rw: HWA. */
+ };
+ u8 bEndpointAddress; /* rw: Target EP address */
+ u8 bDataSequence; /* ro: Current Data sequence */
+ __le32 dwCurrentWindow; /* ro */
+ u8 bMaxDataSequence; /* ro?: max supported seq */
+ u8 bInterval; /* rw: */
+ u8 bOverTheAirInterval; /* rw: */
+ u8 bmAttribute; /* ro? */
+ u8 bmCharacteristics; /* ro? enum rpipe_attr, supported xsactions */
+ u8 bmRetryOptions; /* rw? */
+ __le16 wNumTransactionErrors; /* rw */
+} __attribute__ ((packed));
+
+/**
+ * Wire Adapter Notification types ([WUSB] sections 8.4.5 & 8.5.4)
+ *
+ * These are the notifications coming on the notification endpoint of
+ * an HWA and a DWA.
+ */
+enum wa_notif_type {
+ DWA_NOTIF_RWAKE = 0x91,
+ DWA_NOTIF_PORTSTATUS = 0x92,
+ WA_NOTIF_TRANSFER = 0x93,
+ HWA_NOTIF_BPST_ADJ = 0x94,
+ HWA_NOTIF_DN = 0x95,
+};
+
+/**
+ * Wire Adapter notification header
+ *
+ * Notifications coming from a wire adapter use a common header
+ * defined in [WUSB] sections 8.4.5 & 8.5.4.
+ */
+struct wa_notif_hdr {
+ u8 bLength;
+ u8 bNotifyType; /* enum wa_notif_type */
+} __packed;
+
+/**
+ * HWA DN Received notification [(WUSB] section 8.5.4.2)
+ *
+ * The DNData is specified in WUSB1.0[7.6]. For each device
+ * notification we received, we just need to dispatch it.
+ *
+ * @dndata: this is really an array of notifications, but all start
+ * with the same header.
+ */
+struct hwa_notif_dn {
+ struct wa_notif_hdr hdr;
+ u8 bSourceDeviceAddr; /* from errata 2005/07 */
+ u8 bmAttributes;
+ struct wusb_dn_hdr dndata[];
+} __packed;
+
+/* [WUSB] section 8.3.3 */
+enum wa_xfer_type {
+ WA_XFER_TYPE_CTL = 0x80,
+ WA_XFER_TYPE_BI = 0x81, /* bulk/interrupt */
+ WA_XFER_TYPE_ISO = 0x82,
+ WA_XFER_RESULT = 0x83,
+ WA_XFER_ABORT = 0x84,
+ WA_XFER_ISO_PACKET_INFO = 0xA0,
+ WA_XFER_ISO_PACKET_STATUS = 0xA1,
+};
+
+/* [WUSB] section 8.3.3 */
+struct wa_xfer_hdr {
+ u8 bLength; /* 0x18 */
+ u8 bRequestType; /* 0x80 WA_REQUEST_TYPE_CTL */
+ __le16 wRPipe; /* RPipe index */
+ __le32 dwTransferID; /* Host-assigned ID */
+ __le32 dwTransferLength; /* Length of data to xfer */
+ u8 bTransferSegment;
+} __packed;
+
+struct wa_xfer_ctl {
+ struct wa_xfer_hdr hdr;
+ u8 bmAttribute;
+ __le16 wReserved;
+ struct usb_ctrlrequest baSetupData;
+} __packed;
+
+struct wa_xfer_bi {
+ struct wa_xfer_hdr hdr;
+ u8 bReserved;
+ __le16 wReserved;
+} __packed;
+
+/* [WUSB] section 8.5.5 */
+struct wa_xfer_hwaiso {
+ struct wa_xfer_hdr hdr;
+ u8 bReserved;
+ __le16 wPresentationTime;
+ __le32 dwNumOfPackets;
+} __packed;
+
+struct wa_xfer_packet_info_hwaiso {
+ __le16 wLength;
+ u8 bPacketType;
+ u8 bReserved;
+ __le16 PacketLength[0];
+} __packed;
+
+struct wa_xfer_packet_status_len_hwaiso {
+ __le16 PacketLength;
+ __le16 PacketStatus;
+} __packed;
+
+struct wa_xfer_packet_status_hwaiso {
+ __le16 wLength;
+ u8 bPacketType;
+ u8 bReserved;
+ struct wa_xfer_packet_status_len_hwaiso PacketStatus[0];
+} __packed;
+
+/* [WUSB] section 8.3.3.5 */
+struct wa_xfer_abort {
+ u8 bLength;
+ u8 bRequestType;
+ __le16 wRPipe; /* RPipe index */
+ __le32 dwTransferID; /* Host-assigned ID */
+} __packed;
+
+/**
+ * WA Transfer Complete notification ([WUSB] section 8.3.3.3)
+ *
+ */
+struct wa_notif_xfer {
+ struct wa_notif_hdr hdr;
+ u8 bEndpoint;
+ u8 Reserved;
+} __packed;
+
+/** Transfer result basic codes [WUSB] table 8-15 */
+enum {
+ WA_XFER_STATUS_SUCCESS,
+ WA_XFER_STATUS_HALTED,
+ WA_XFER_STATUS_DATA_BUFFER_ERROR,
+ WA_XFER_STATUS_BABBLE,
+ WA_XFER_RESERVED,
+ WA_XFER_STATUS_NOT_FOUND,
+ WA_XFER_STATUS_INSUFFICIENT_RESOURCE,
+ WA_XFER_STATUS_TRANSACTION_ERROR,
+ WA_XFER_STATUS_ABORTED,
+ WA_XFER_STATUS_RPIPE_NOT_READY,
+ WA_XFER_INVALID_FORMAT,
+ WA_XFER_UNEXPECTED_SEGMENT_NUMBER,
+ WA_XFER_STATUS_RPIPE_TYPE_MISMATCH,
+};
+
+/** [WUSB] section 8.3.3.4 */
+struct wa_xfer_result {
+ struct wa_notif_hdr hdr;
+ __le32 dwTransferID;
+ __le32 dwTransferLength;
+ u8 bTransferSegment;
+ u8 bTransferStatus;
+ __le32 dwNumOfPackets;
+} __packed;
+
+/**
+ * Wire Adapter Class Descriptor ([WUSB] section 8.5.2.7).
+ *
+ * NOTE: u16 fields are read Little Endian from the hardware.
+ *
+ * @bNumPorts is the original max number of devices that the host can
+ * connect; we might chop this so the stack can handle
+ * it. In case you need to access it, use wusbhc->ports_max
+ * if it is a Wireless USB WA.
+ */
+struct usb_wa_descriptor {
+ u8 bLength;
+ u8 bDescriptorType;
+ __le16 bcdWAVersion;
+ u8 bNumPorts; /* don't use!! */
+ u8 bmAttributes; /* Reserved == 0 */
+ __le16 wNumRPipes;
+ __le16 wRPipeMaxBlock;
+ u8 bRPipeBlockSize;
+ u8 bPwrOn2PwrGood;
+ u8 bNumMMCIEs;
+ u8 DeviceRemovable; /* FIXME: in DWA this is up to 16 bytes */
+} __packed;
+
+/**
+ * HWA Device Information Buffer (WUSB1.0[T8.54])
+ */
+struct hwa_dev_info {
+ u8 bmDeviceAvailability[32]; /* FIXME: ignored for now */
+ u8 bDeviceAddress;
+ __le16 wPHYRates;
+ u8 bmDeviceAttribute;
+} __packed;
+
+#endif /* #ifndef __LINUX_USB_WUSB_WA_H */
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h
new file mode 100644
index 000000000..eeb28329f
--- /dev/null
+++ b/include/linux/usb/wusb.h
@@ -0,0 +1,377 @@
+/*
+ * Wireless USB Standard Definitions
+ * Event Size Tables
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ * FIXME: organize properly, group logically
+ *
+ * All the event structures are defined in uwb/spec.h, as they are
+ * common to the WHCI and WUSB radio control interfaces.
+ */
+
+#ifndef __WUSB_H__
+#define __WUSB_H__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/uwb/spec.h>
+#include <linux/usb/ch9.h>
+#include <linux/param.h>
+
+/**
+ * WUSB Information Element header
+ *
+ * I don't know why, they decided to make it different to the MBOA MAC
+ * IE Header; beats me.
+ */
+struct wuie_hdr {
+ u8 bLength;
+ u8 bIEIdentifier;
+} __attribute__((packed));
+
+enum {
+ WUIE_ID_WCTA = 0x80,
+ WUIE_ID_CONNECTACK,
+ WUIE_ID_HOST_INFO,
+ WUIE_ID_CHANGE_ANNOUNCE,
+ WUIE_ID_DEVICE_DISCONNECT,
+ WUIE_ID_HOST_DISCONNECT,
+ WUIE_ID_KEEP_ALIVE = 0x89,
+ WUIE_ID_ISOCH_DISCARD,
+ WUIE_ID_RESET_DEVICE,
+};
+
+/**
+ * Maximum number of array elements in a WUSB IE.
+ *
+ * WUSB1.0[7.5 before table 7-38] says that in WUSB IEs that
+ * are "arrays" have to limited to 4 elements. So we define it
+ * like that to ease up and submit only the neeed size.
+ */
+#define WUIE_ELT_MAX 4
+
+/**
+ * Wrapper for the data that defines a CHID, a CDID or a CK
+ *
+ * WUSB defines that CHIDs, CDIDs and CKs are a 16 byte string of
+ * data. In order to avoid confusion and enforce types, we wrap it.
+ *
+ * Make it packed, as we use it in some hw definitions.
+ */
+struct wusb_ckhdid {
+ u8 data[16];
+} __attribute__((packed));
+
+static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
+
+#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1)
+
+/**
+ * WUSB IE: Host Information (WUSB1.0[7.5.2])
+ *
+ * Used to provide information about the host to the Wireless USB
+ * devices in range (CHID can be used as an ASCII string).
+ */
+struct wuie_host_info {
+ struct wuie_hdr hdr;
+ __le16 attributes;
+ struct wusb_ckhdid CHID;
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Connect Ack (WUSB1.0[7.5.1])
+ *
+ * Used to acknowledge device connect requests. See note for
+ * WUIE_ELT_MAX.
+ */
+struct wuie_connect_ack {
+ struct wuie_hdr hdr;
+ struct {
+ struct wusb_ckhdid CDID;
+ u8 bDeviceAddress; /* 0 means unused */
+ u8 bReserved;
+ } blk[WUIE_ELT_MAX];
+} __attribute__((packed));
+
+/**
+ * WUSB IE Host Information Element, Connect Availability
+ *
+ * WUSB1.0[7.5.2], bmAttributes description
+ */
+enum {
+ WUIE_HI_CAP_RECONNECT = 0,
+ WUIE_HI_CAP_LIMITED,
+ WUIE_HI_CAP_RESERVED,
+ WUIE_HI_CAP_ALL,
+};
+
+/**
+ * WUSB IE: Channel Stop (WUSB1.0[7.5.8])
+ *
+ * Tells devices the host is going to stop sending MMCs and will disappear.
+ */
+struct wuie_channel_stop {
+ struct wuie_hdr hdr;
+ u8 attributes;
+ u8 timestamp[3];
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Keepalive (WUSB1.0[7.5.9])
+ *
+ * Ask device(s) to send keepalives.
+ */
+struct wuie_keep_alive {
+ struct wuie_hdr hdr;
+ u8 bDeviceAddress[WUIE_ELT_MAX];
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Reset device (WUSB1.0[7.5.11])
+ *
+ * Tell device to reset; in all truth, we can fit 4 CDIDs, but we only
+ * use it for one at the time...
+ *
+ * In any case, this request is a wee bit silly: why don't they target
+ * by address??
+ */
+struct wuie_reset {
+ struct wuie_hdr hdr;
+ struct wusb_ckhdid CDID;
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Disconnect device (WUSB1.0[7.5.11])
+ *
+ * Tell device to disconnect; we can fit 4 addresses, but we only use
+ * it for one at the time...
+ */
+struct wuie_disconnect {
+ struct wuie_hdr hdr;
+ u8 bDeviceAddress;
+ u8 padding;
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Host disconnect ([WUSB] section 7.5.5)
+ *
+ * Tells all connected devices to disconnect.
+ */
+struct wuie_host_disconnect {
+ struct wuie_hdr hdr;
+} __attribute__((packed));
+
+/**
+ * WUSB Device Notification header (WUSB1.0[7.6])
+ */
+struct wusb_dn_hdr {
+ u8 bType;
+ u8 notifdata[];
+} __attribute__((packed));
+
+/** Device Notification codes (WUSB1.0[Table 7-54]) */
+enum WUSB_DN {
+ WUSB_DN_CONNECT = 0x01,
+ WUSB_DN_DISCONNECT = 0x02,
+ WUSB_DN_EPRDY = 0x03,
+ WUSB_DN_MASAVAILCHANGED = 0x04,
+ WUSB_DN_RWAKE = 0x05,
+ WUSB_DN_SLEEP = 0x06,
+ WUSB_DN_ALIVE = 0x07,
+};
+
+/** WUSB Device Notification Connect */
+struct wusb_dn_connect {
+ struct wusb_dn_hdr hdr;
+ __le16 attributes;
+ struct wusb_ckhdid CDID;
+} __attribute__((packed));
+
+static inline int wusb_dn_connect_prev_dev_addr(const struct wusb_dn_connect *dn)
+{
+ return le16_to_cpu(dn->attributes) & 0xff;
+}
+
+static inline int wusb_dn_connect_new_connection(const struct wusb_dn_connect *dn)
+{
+ return (le16_to_cpu(dn->attributes) >> 8) & 0x1;
+}
+
+static inline int wusb_dn_connect_beacon_behavior(const struct wusb_dn_connect *dn)
+{
+ return (le16_to_cpu(dn->attributes) >> 9) & 0x03;
+}
+
+/** Device is alive (aka: pong) (WUSB1.0[7.6.7]) */
+struct wusb_dn_alive {
+ struct wusb_dn_hdr hdr;
+} __attribute__((packed));
+
+/** Device is disconnecting (WUSB1.0[7.6.2]) */
+struct wusb_dn_disconnect {
+ struct wusb_dn_hdr hdr;
+} __attribute__((packed));
+
+/* General constants */
+enum {
+ WUSB_TRUST_TIMEOUT_MS = 4000, /* [WUSB] section 4.15.1 */
+};
+
+static inline size_t ckhdid_printf(char *pr_ckhdid, size_t size,
+ const struct wusb_ckhdid *ckhdid)
+{
+ return scnprintf(pr_ckhdid, size,
+ "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx "
+ "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx",
+ ckhdid->data[0], ckhdid->data[1],
+ ckhdid->data[2], ckhdid->data[3],
+ ckhdid->data[4], ckhdid->data[5],
+ ckhdid->data[6], ckhdid->data[7],
+ ckhdid->data[8], ckhdid->data[9],
+ ckhdid->data[10], ckhdid->data[11],
+ ckhdid->data[12], ckhdid->data[13],
+ ckhdid->data[14], ckhdid->data[15]);
+}
+
+/*
+ * WUSB Crypto stuff (WUSB1.0[6])
+ */
+
+extern const char *wusb_et_name(u8);
+
+/**
+ * WUSB key index WUSB1.0[7.3.2.4], for usage when setting keys for
+ * the host or the device.
+ */
+static inline u8 wusb_key_index(int index, int type, int originator)
+{
+ return (originator << 6) | (type << 4) | index;
+}
+
+#define WUSB_KEY_INDEX_TYPE_PTK 0 /* for HWA only */
+#define WUSB_KEY_INDEX_TYPE_ASSOC 1
+#define WUSB_KEY_INDEX_TYPE_GTK 2
+#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0
+#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1
+/* bits 0-3 used for the key index. */
+#define WUSB_KEY_INDEX_MAX 15
+
+/* A CCM Nonce, defined in WUSB1.0[6.4.1] */
+struct aes_ccm_nonce {
+ u8 sfn[6]; /* Little Endian */
+ u8 tkid[3]; /* LE */
+ struct uwb_dev_addr dest_addr;
+ struct uwb_dev_addr src_addr;
+} __attribute__((packed));
+
+/* A CCM operation label, defined on WUSB1.0[6.5.x] */
+struct aes_ccm_label {
+ u8 data[14];
+} __attribute__((packed));
+
+/*
+ * Input to the key derivation sequence defined in
+ * WUSB1.0[6.5.1]. Rest of the data is in the CCM Nonce passed to the
+ * PRF function.
+ */
+struct wusb_keydvt_in {
+ u8 hnonce[16];
+ u8 dnonce[16];
+} __attribute__((packed));
+
+/*
+ * Output from the key derivation sequence defined in
+ * WUSB1.0[6.5.1].
+ */
+struct wusb_keydvt_out {
+ u8 kck[16];
+ u8 ptk[16];
+} __attribute__((packed));
+
+/* Pseudo Random Function WUSB1.0[6.5] */
+extern int wusb_crypto_init(void);
+extern void wusb_crypto_exit(void);
+extern ssize_t wusb_prf(void *out, size_t out_size,
+ const u8 key[16], const struct aes_ccm_nonce *_n,
+ const struct aes_ccm_label *a,
+ const void *b, size_t blen, size_t len);
+
+static inline int wusb_prf_64(void *out, size_t out_size, const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct aes_ccm_label *a,
+ const void *b, size_t blen)
+{
+ return wusb_prf(out, out_size, key, n, a, b, blen, 64);
+}
+
+static inline int wusb_prf_128(void *out, size_t out_size, const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct aes_ccm_label *a,
+ const void *b, size_t blen)
+{
+ return wusb_prf(out, out_size, key, n, a, b, blen, 128);
+}
+
+static inline int wusb_prf_256(void *out, size_t out_size, const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct aes_ccm_label *a,
+ const void *b, size_t blen)
+{
+ return wusb_prf(out, out_size, key, n, a, b, blen, 256);
+}
+
+/* Key derivation WUSB1.0[6.5.1] */
+static inline int wusb_key_derive(struct wusb_keydvt_out *keydvt_out,
+ const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct wusb_keydvt_in *keydvt_in)
+{
+ const struct aes_ccm_label a = { .data = "Pair-wise keys" };
+ return wusb_prf_256(keydvt_out, sizeof(*keydvt_out), key, n, &a,
+ keydvt_in, sizeof(*keydvt_in));
+}
+
+/*
+ * Out-of-band MIC Generation WUSB1.0[6.5.2]
+ *
+ * Compute the MIC over @key, @n and @hs and place it in @mic_out.
+ *
+ * @mic_out: Where to place the 8 byte MIC tag
+ * @key: KCK from the derivation process
+ * @n: CCM nonce, n->sfn == 0, TKID as established in the
+ * process.
+ * @hs: Handshake struct for phase 2 of the 4-way.
+ * hs->bStatus and hs->bReserved are zero.
+ * hs->bMessageNumber is 2 (WUSB1.0[7.3.2.5.2]
+ * hs->dest_addr is the device's USB address padded with 0
+ * hs->src_addr is the hosts's UWB device address
+ * hs->mic is ignored (as we compute that value).
+ */
+static inline int wusb_oob_mic(u8 mic_out[8], const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct usb_handshake *hs)
+{
+ const struct aes_ccm_label a = { .data = "out-of-bandMIC" };
+ return wusb_prf_64(mic_out, 8, key, n, &a,
+ hs, sizeof(*hs) - sizeof(hs->MIC));
+}
+
+#endif /* #ifndef __WUSB_H__ */
diff --git a/include/linux/usb/xhci_pdriver.h b/include/linux/usb/xhci_pdriver.h
new file mode 100644
index 000000000..376654b5b
--- /dev/null
+++ b/include/linux/usb/xhci_pdriver.h
@@ -0,0 +1,27 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ */
+
+#ifndef __USB_CORE_XHCI_PDRIVER_H
+#define __USB_CORE_XHCI_PDRIVER_H
+
+/**
+ * struct usb_xhci_pdata - platform_data for generic xhci platform driver
+ *
+ * @usb3_lpm_capable: determines if this xhci platform supports USB3
+ * LPM capability
+ *
+ */
+struct usb_xhci_pdata {
+ unsigned usb3_lpm_capable:1;
+};
+
+#endif /* __USB_CORE_XHCI_PDRIVER_H */
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
new file mode 100644
index 000000000..7f5f78bd1
--- /dev/null
+++ b/include/linux/usb_usual.h
@@ -0,0 +1,92 @@
+/*
+ * Interface to the libusual.
+ *
+ * Copyright (c) 2005 Pete Zaitcev <zaitcev@redhat.com>
+ * Copyright (c) 1999-2002 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
+ * Copyright (c) 1999 Michael Gee (michael@linuxspecific.com)
+ */
+
+#ifndef __LINUX_USB_USUAL_H
+#define __LINUX_USB_USUAL_H
+
+
+/* We should do this for cleanliness... But other usb_foo.h do not do this. */
+/* #include <linux/usb.h> */
+
+/*
+ * The flags field, which we store in usb_device_id.driver_info.
+ * It is compatible with the old usb-storage flags in lower 24 bits.
+ */
+
+/*
+ * Static flag definitions. We use this roundabout technique so that the
+ * proc_info() routine can automatically display a message for each flag.
+ */
+#define US_DO_ALL_FLAGS \
+ US_FLAG(SINGLE_LUN, 0x00000001) \
+ /* allow access to only LUN 0 */ \
+ US_FLAG(NEED_OVERRIDE, 0x00000002) \
+ /* unusual_devs entry is necessary */ \
+ US_FLAG(SCM_MULT_TARG, 0x00000004) \
+ /* supports multiple targets */ \
+ US_FLAG(FIX_INQUIRY, 0x00000008) \
+ /* INQUIRY response needs faking */ \
+ US_FLAG(FIX_CAPACITY, 0x00000010) \
+ /* READ CAPACITY response too big */ \
+ US_FLAG(IGNORE_RESIDUE, 0x00000020) \
+ /* reported residue is wrong */ \
+ US_FLAG(BULK32, 0x00000040) \
+ /* Uses 32-byte CBW length */ \
+ US_FLAG(NOT_LOCKABLE, 0x00000080) \
+ /* PREVENT/ALLOW not supported */ \
+ US_FLAG(GO_SLOW, 0x00000100) \
+ /* Need delay after Command phase */ \
+ US_FLAG(NO_WP_DETECT, 0x00000200) \
+ /* Don't check for write-protect */ \
+ US_FLAG(MAX_SECTORS_64, 0x00000400) \
+ /* Sets max_sectors to 64 */ \
+ US_FLAG(IGNORE_DEVICE, 0x00000800) \
+ /* Don't claim device */ \
+ US_FLAG(CAPACITY_HEURISTICS, 0x00001000) \
+ /* sometimes sizes is too big */ \
+ US_FLAG(MAX_SECTORS_MIN,0x00002000) \
+ /* Sets max_sectors to arch min */ \
+ US_FLAG(BULK_IGNORE_TAG,0x00004000) \
+ /* Ignore tag mismatch in bulk operations */ \
+ US_FLAG(SANE_SENSE, 0x00008000) \
+ /* Sane Sense (> 18 bytes) */ \
+ US_FLAG(CAPACITY_OK, 0x00010000) \
+ /* READ CAPACITY response is correct */ \
+ US_FLAG(BAD_SENSE, 0x00020000) \
+ /* Bad Sense (never more than 18 bytes) */ \
+ US_FLAG(NO_READ_DISC_INFO, 0x00040000) \
+ /* cannot handle READ_DISC_INFO */ \
+ US_FLAG(NO_READ_CAPACITY_16, 0x00080000) \
+ /* cannot handle READ_CAPACITY_16 */ \
+ US_FLAG(INITIAL_READ10, 0x00100000) \
+ /* Initial READ(10) (and others) must be retried */ \
+ US_FLAG(WRITE_CACHE, 0x00200000) \
+ /* Write Cache status is not available */ \
+ US_FLAG(NEEDS_CAP16, 0x00400000) \
+ /* cannot handle READ_CAPACITY_10 */ \
+ US_FLAG(IGNORE_UAS, 0x00800000) \
+ /* Device advertises UAS but it is broken */ \
+ US_FLAG(BROKEN_FUA, 0x01000000) \
+ /* Cannot handle FUA in WRITE or READ CDBs */ \
+ US_FLAG(NO_ATA_1X, 0x02000000) \
+ /* Cannot handle ATA_12 or ATA_16 CDBs */ \
+ US_FLAG(NO_REPORT_OPCODES, 0x04000000) \
+ /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
+ US_FLAG(MAX_SECTORS_240, 0x08000000) \
+ /* Sets max_sectors to 240 */ \
+
+#define US_FLAG(name, value) US_FL_##name = value ,
+enum { US_DO_ALL_FLAGS };
+#undef US_FLAG
+
+#include <linux/usb/storage.h>
+
+extern int usb_usual_ignore_device(struct usb_interface *intf);
+extern struct usb_device_id usb_storage_usb_ids[];
+
+#endif /* __LINUX_USB_USUAL_H */
diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h
new file mode 100644
index 000000000..04a262854
--- /dev/null
+++ b/include/linux/usbdevice_fs.h
@@ -0,0 +1,80 @@
+/*****************************************************************************/
+
+/*
+ * usbdevice_fs.h -- USB device file system.
+ *
+ * Copyright (C) 2000
+ * Thomas Sailer (sailer@ife.ee.ethz.ch)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * History:
+ * 0.1 04.01.2000 Created
+ */
+
+/*****************************************************************************/
+#ifndef _LINUX_USBDEVICE_FS_H
+#define _LINUX_USBDEVICE_FS_H
+
+#include <uapi/linux/usbdevice_fs.h>
+
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+
+struct usbdevfs_ctrltransfer32 {
+ u8 bRequestType;
+ u8 bRequest;
+ u16 wValue;
+ u16 wIndex;
+ u16 wLength;
+ u32 timeout; /* in milliseconds */
+ compat_caddr_t data;
+};
+
+struct usbdevfs_bulktransfer32 {
+ compat_uint_t ep;
+ compat_uint_t len;
+ compat_uint_t timeout; /* in milliseconds */
+ compat_caddr_t data;
+};
+
+struct usbdevfs_disconnectsignal32 {
+ compat_int_t signr;
+ compat_caddr_t context;
+};
+
+struct usbdevfs_urb32 {
+ unsigned char type;
+ unsigned char endpoint;
+ compat_int_t status;
+ compat_uint_t flags;
+ compat_caddr_t buffer;
+ compat_int_t buffer_length;
+ compat_int_t actual_length;
+ compat_int_t start_frame;
+ compat_int_t number_of_packets;
+ compat_int_t error_count;
+ compat_uint_t signr;
+ compat_caddr_t usercontext; /* unused */
+ struct usbdevfs_iso_packet_desc iso_frame_desc[0];
+};
+
+struct usbdevfs_ioctl32 {
+ s32 ifno;
+ s32 ioctl_code;
+ compat_caddr_t data;
+};
+#endif
+#endif /* _LINUX_USBDEVICE_FS_H */
diff --git a/include/linux/user-return-notifier.h b/include/linux/user-return-notifier.h
new file mode 100644
index 000000000..9c4a445bb
--- /dev/null
+++ b/include/linux/user-return-notifier.h
@@ -0,0 +1,49 @@
+#ifndef _LINUX_USER_RETURN_NOTIFIER_H
+#define _LINUX_USER_RETURN_NOTIFIER_H
+
+#ifdef CONFIG_USER_RETURN_NOTIFIER
+
+#include <linux/list.h>
+#include <linux/sched.h>
+
+struct user_return_notifier {
+ void (*on_user_return)(struct user_return_notifier *urn);
+ struct hlist_node link;
+};
+
+
+void user_return_notifier_register(struct user_return_notifier *urn);
+void user_return_notifier_unregister(struct user_return_notifier *urn);
+
+static inline void propagate_user_return_notify(struct task_struct *prev,
+ struct task_struct *next)
+{
+ if (test_tsk_thread_flag(prev, TIF_USER_RETURN_NOTIFY)) {
+ clear_tsk_thread_flag(prev, TIF_USER_RETURN_NOTIFY);
+ set_tsk_thread_flag(next, TIF_USER_RETURN_NOTIFY);
+ }
+}
+
+void fire_user_return_notifiers(void);
+
+static inline void clear_user_return_notifier(struct task_struct *p)
+{
+ clear_tsk_thread_flag(p, TIF_USER_RETURN_NOTIFY);
+}
+
+#else
+
+struct user_return_notifier {};
+
+static inline void propagate_user_return_notify(struct task_struct *prev,
+ struct task_struct *next)
+{
+}
+
+static inline void fire_user_return_notifiers(void) {}
+
+static inline void clear_user_return_notifier(struct task_struct *p) {}
+
+#endif
+
+#endif
diff --git a/include/linux/user.h b/include/linux/user.h
new file mode 100644
index 000000000..68daf840f
--- /dev/null
+++ b/include/linux/user.h
@@ -0,0 +1 @@
+#include <asm/user.h>
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
new file mode 100644
index 000000000..8297e5b34
--- /dev/null
+++ b/include/linux/user_namespace.h
@@ -0,0 +1,105 @@
+#ifndef _LINUX_USER_NAMESPACE_H
+#define _LINUX_USER_NAMESPACE_H
+
+#include <linux/kref.h>
+#include <linux/nsproxy.h>
+#include <linux/ns_common.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+
+#define UID_GID_MAP_MAX_EXTENTS 5
+
+struct uid_gid_map { /* 64 bytes -- 1 cache line */
+ u32 nr_extents;
+ struct uid_gid_extent {
+ u32 first;
+ u32 lower_first;
+ u32 count;
+ } extent[UID_GID_MAP_MAX_EXTENTS];
+};
+
+#define USERNS_SETGROUPS_ALLOWED 1UL
+
+#define USERNS_INIT_FLAGS USERNS_SETGROUPS_ALLOWED
+
+struct user_namespace {
+ struct uid_gid_map uid_map;
+ struct uid_gid_map gid_map;
+ struct uid_gid_map projid_map;
+ atomic_t count;
+ struct user_namespace *parent;
+ int level;
+ kuid_t owner;
+ kgid_t group;
+ struct ns_common ns;
+ unsigned long flags;
+
+ /* Register of per-UID persistent keyrings for this namespace */
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ struct key *persistent_keyring_register;
+ struct rw_semaphore persistent_keyring_register_sem;
+#endif
+};
+
+extern struct user_namespace init_user_ns;
+
+#ifdef CONFIG_USER_NS
+
+static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
+{
+ if (ns)
+ atomic_inc(&ns->count);
+ return ns;
+}
+
+extern int create_user_ns(struct cred *new);
+extern int unshare_userns(unsigned long unshare_flags, struct cred **new_cred);
+extern void free_user_ns(struct user_namespace *ns);
+
+static inline void put_user_ns(struct user_namespace *ns)
+{
+ if (ns && atomic_dec_and_test(&ns->count))
+ free_user_ns(ns);
+}
+
+struct seq_operations;
+extern const struct seq_operations proc_uid_seq_operations;
+extern const struct seq_operations proc_gid_seq_operations;
+extern const struct seq_operations proc_projid_seq_operations;
+extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *);
+extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *);
+extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *);
+extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *);
+extern int proc_setgroups_show(struct seq_file *m, void *v);
+extern bool userns_may_setgroups(const struct user_namespace *ns);
+#else
+
+static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
+{
+ return &init_user_ns;
+}
+
+static inline int create_user_ns(struct cred *new)
+{
+ return -EINVAL;
+}
+
+static inline int unshare_userns(unsigned long unshare_flags,
+ struct cred **new_cred)
+{
+ if (unshare_flags & CLONE_NEWUSER)
+ return -EINVAL;
+ return 0;
+}
+
+static inline void put_user_ns(struct user_namespace *ns)
+{
+}
+
+static inline bool userns_may_setgroups(const struct user_namespace *ns)
+{
+ return true;
+}
+#endif
+
+#endif /* _LINUX_USER_H */
diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h
new file mode 100644
index 000000000..f9b2ce580
--- /dev/null
+++ b/include/linux/util_macros.h
@@ -0,0 +1,40 @@
+#ifndef _LINUX_HELPER_MACROS_H_
+#define _LINUX_HELPER_MACROS_H_
+
+#define __find_closest(x, a, as, op) \
+({ \
+ typeof(as) __fc_i, __fc_as = (as) - 1; \
+ typeof(x) __fc_x = (x); \
+ typeof(*a) const *__fc_a = (a); \
+ for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \
+ if (__fc_x op DIV_ROUND_CLOSEST(__fc_a[__fc_i] + \
+ __fc_a[__fc_i + 1], 2)) \
+ break; \
+ } \
+ (__fc_i); \
+})
+
+/**
+ * find_closest - locate the closest element in a sorted array
+ * @x: The reference value.
+ * @a: The array in which to look for the closest element. Must be sorted
+ * in ascending order.
+ * @as: Size of 'a'.
+ *
+ * Returns the index of the element closest to 'x'.
+ */
+#define find_closest(x, a, as) __find_closest(x, a, as, <=)
+
+/**
+ * find_closest_descending - locate the closest element in a sorted array
+ * @x: The reference value.
+ * @a: The array in which to look for the closest element. Must be sorted
+ * in descending order.
+ * @as: Size of 'a'.
+ *
+ * Similar to find_closest() but 'a' is expected to be sorted in descending
+ * order.
+ */
+#define find_closest_descending(x, a, as) __find_closest(x, a, as, >=)
+
+#endif
diff --git a/include/linux/uts.h b/include/linux/uts.h
new file mode 100644
index 000000000..6ddbd8637
--- /dev/null
+++ b/include/linux/uts.h
@@ -0,0 +1,19 @@
+#ifndef _LINUX_UTS_H
+#define _LINUX_UTS_H
+
+/*
+ * Defines for what uname() should return
+ */
+#ifndef UTS_SYSNAME
+#define UTS_SYSNAME "Linux"
+#endif
+
+#ifndef UTS_NODENAME
+#define UTS_NODENAME CONFIG_DEFAULT_HOSTNAME /* set by sethostname() */
+#endif
+
+#ifndef UTS_DOMAINNAME
+#define UTS_DOMAINNAME "(none)" /* set by setdomainname() */
+#endif
+
+#endif
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
new file mode 100644
index 000000000..5093f58ae
--- /dev/null
+++ b/include/linux/utsname.h
@@ -0,0 +1,84 @@
+#ifndef _LINUX_UTSNAME_H
+#define _LINUX_UTSNAME_H
+
+
+#include <linux/sched.h>
+#include <linux/kref.h>
+#include <linux/nsproxy.h>
+#include <linux/ns_common.h>
+#include <linux/err.h>
+#include <uapi/linux/utsname.h>
+
+enum uts_proc {
+ UTS_PROC_OSTYPE,
+ UTS_PROC_OSRELEASE,
+ UTS_PROC_VERSION,
+ UTS_PROC_HOSTNAME,
+ UTS_PROC_DOMAINNAME,
+};
+
+struct user_namespace;
+extern struct user_namespace init_user_ns;
+
+struct uts_namespace {
+ struct kref kref;
+ struct new_utsname name;
+ struct user_namespace *user_ns;
+ struct ns_common ns;
+};
+extern struct uts_namespace init_uts_ns;
+
+#ifdef CONFIG_UTS_NS
+static inline void get_uts_ns(struct uts_namespace *ns)
+{
+ kref_get(&ns->kref);
+}
+
+extern struct uts_namespace *copy_utsname(unsigned long flags,
+ struct user_namespace *user_ns, struct uts_namespace *old_ns);
+extern void free_uts_ns(struct kref *kref);
+
+static inline void put_uts_ns(struct uts_namespace *ns)
+{
+ kref_put(&ns->kref, free_uts_ns);
+}
+#else
+static inline void get_uts_ns(struct uts_namespace *ns)
+{
+}
+
+static inline void put_uts_ns(struct uts_namespace *ns)
+{
+}
+
+static inline struct uts_namespace *copy_utsname(unsigned long flags,
+ struct user_namespace *user_ns, struct uts_namespace *old_ns)
+{
+ if (flags & CLONE_NEWUTS)
+ return ERR_PTR(-EINVAL);
+
+ return old_ns;
+}
+#endif
+
+#ifdef CONFIG_PROC_SYSCTL
+extern void uts_proc_notify(enum uts_proc proc);
+#else
+static inline void uts_proc_notify(enum uts_proc proc)
+{
+}
+#endif
+
+static inline struct new_utsname *utsname(void)
+{
+ return &current->nsproxy->uts_ns->name;
+}
+
+static inline struct new_utsname *init_utsname(void)
+{
+ return &init_uts_ns.name;
+}
+
+extern struct rw_semaphore uts_sem;
+
+#endif /* _LINUX_UTSNAME_H */
diff --git a/include/linux/uuid.h b/include/linux/uuid.h
new file mode 100644
index 000000000..6df250903
--- /dev/null
+++ b/include/linux/uuid.h
@@ -0,0 +1,39 @@
+/*
+ * UUID/GUID definition
+ *
+ * Copyright (C) 2010, Intel Corp.
+ * Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef _LINUX_UUID_H_
+#define _LINUX_UUID_H_
+
+#include <uapi/linux/uuid.h>
+
+
+static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2)
+{
+ return memcmp(&u1, &u2, sizeof(uuid_le));
+}
+
+static inline int uuid_be_cmp(const uuid_be u1, const uuid_be u2)
+{
+ return memcmp(&u1, &u2, sizeof(uuid_be));
+}
+
+extern void uuid_le_gen(uuid_le *u);
+extern void uuid_be_gen(uuid_be *u);
+
+#endif
diff --git a/include/linux/uwb.h b/include/linux/uwb.h
new file mode 100644
index 000000000..7dbbee974
--- /dev/null
+++ b/include/linux/uwb.h
@@ -0,0 +1,831 @@
+/*
+ * Ultra Wide Band
+ * UWB API
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: doc: overview of the API, different parts and pointers
+ */
+
+#ifndef __LINUX__UWB_H__
+#define __LINUX__UWB_H__
+
+#include <linux/limits.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/uwb/spec.h>
+#include <asm/page.h>
+
+struct uwb_dev;
+struct uwb_beca_e;
+struct uwb_rc;
+struct uwb_rsv;
+struct uwb_dbg;
+
+/**
+ * struct uwb_dev - a UWB Device
+ * @rc: UWB Radio Controller that discovered the device (kind of its
+ * parent).
+ * @bce: a beacon cache entry for this device; or NULL if the device
+ * is a local radio controller.
+ * @mac_addr: the EUI-48 address of this device.
+ * @dev_addr: the current DevAddr used by this device.
+ * @beacon_slot: the slot number the beacon is using.
+ * @streams: bitmap of streams allocated to reservations targeted at
+ * this device. For an RC, this is the streams allocated for
+ * reservations targeted at DevAddrs.
+ *
+ * A UWB device may either by a neighbor or part of a local radio
+ * controller.
+ */
+struct uwb_dev {
+ struct mutex mutex;
+ struct list_head list_node;
+ struct device dev;
+ struct uwb_rc *rc; /* radio controller */
+ struct uwb_beca_e *bce; /* Beacon Cache Entry */
+
+ struct uwb_mac_addr mac_addr;
+ struct uwb_dev_addr dev_addr;
+ int beacon_slot;
+ DECLARE_BITMAP(streams, UWB_NUM_STREAMS);
+ DECLARE_BITMAP(last_availability_bm, UWB_NUM_MAS);
+};
+#define to_uwb_dev(d) container_of(d, struct uwb_dev, dev)
+
+/**
+ * UWB HWA/WHCI Radio Control {Command|Event} Block context IDs
+ *
+ * RC[CE]Bs have a 'context ID' field that matches the command with
+ * the event received to confirm it.
+ *
+ * Maximum number of context IDs
+ */
+enum { UWB_RC_CTX_MAX = 256 };
+
+
+/** Notification chain head for UWB generated events to listeners */
+struct uwb_notifs_chain {
+ struct list_head list;
+ struct mutex mutex;
+};
+
+/* Beacon cache list */
+struct uwb_beca {
+ struct list_head list;
+ size_t entries;
+ struct mutex mutex;
+};
+
+/* Event handling thread. */
+struct uwbd {
+ int pid;
+ struct task_struct *task;
+ wait_queue_head_t wq;
+ struct list_head event_list;
+ spinlock_t event_list_lock;
+};
+
+/**
+ * struct uwb_mas_bm - a bitmap of all MAS in a superframe
+ * @bm: a bitmap of length #UWB_NUM_MAS
+ */
+struct uwb_mas_bm {
+ DECLARE_BITMAP(bm, UWB_NUM_MAS);
+ DECLARE_BITMAP(unsafe_bm, UWB_NUM_MAS);
+ int safe;
+ int unsafe;
+};
+
+/**
+ * uwb_rsv_state - UWB Reservation state.
+ *
+ * NONE - reservation is not active (no DRP IE being transmitted).
+ *
+ * Owner reservation states:
+ *
+ * INITIATED - owner has sent an initial DRP request.
+ * PENDING - target responded with pending Reason Code.
+ * MODIFIED - reservation manager is modifying an established
+ * reservation with a different MAS allocation.
+ * ESTABLISHED - the reservation has been successfully negotiated.
+ *
+ * Target reservation states:
+ *
+ * DENIED - request is denied.
+ * ACCEPTED - request is accepted.
+ * PENDING - PAL has yet to make a decision to whether to accept or
+ * deny.
+ *
+ * FIXME: further target states TBD.
+ */
+enum uwb_rsv_state {
+ UWB_RSV_STATE_NONE = 0,
+ UWB_RSV_STATE_O_INITIATED,
+ UWB_RSV_STATE_O_PENDING,
+ UWB_RSV_STATE_O_MODIFIED,
+ UWB_RSV_STATE_O_ESTABLISHED,
+ UWB_RSV_STATE_O_TO_BE_MOVED,
+ UWB_RSV_STATE_O_MOVE_EXPANDING,
+ UWB_RSV_STATE_O_MOVE_COMBINING,
+ UWB_RSV_STATE_O_MOVE_REDUCING,
+ UWB_RSV_STATE_T_ACCEPTED,
+ UWB_RSV_STATE_T_DENIED,
+ UWB_RSV_STATE_T_CONFLICT,
+ UWB_RSV_STATE_T_PENDING,
+ UWB_RSV_STATE_T_EXPANDING_ACCEPTED,
+ UWB_RSV_STATE_T_EXPANDING_CONFLICT,
+ UWB_RSV_STATE_T_EXPANDING_PENDING,
+ UWB_RSV_STATE_T_EXPANDING_DENIED,
+ UWB_RSV_STATE_T_RESIZED,
+
+ UWB_RSV_STATE_LAST,
+};
+
+enum uwb_rsv_target_type {
+ UWB_RSV_TARGET_DEV,
+ UWB_RSV_TARGET_DEVADDR,
+};
+
+/**
+ * struct uwb_rsv_target - the target of a reservation.
+ *
+ * Reservations unicast and targeted at a single device
+ * (UWB_RSV_TARGET_DEV); or (e.g., in the case of WUSB) targeted at a
+ * specific (private) DevAddr (UWB_RSV_TARGET_DEVADDR).
+ */
+struct uwb_rsv_target {
+ enum uwb_rsv_target_type type;
+ union {
+ struct uwb_dev *dev;
+ struct uwb_dev_addr devaddr;
+ };
+};
+
+struct uwb_rsv_move {
+ struct uwb_mas_bm final_mas;
+ struct uwb_ie_drp *companion_drp_ie;
+ struct uwb_mas_bm companion_mas;
+};
+
+/*
+ * Number of streams reserved for reservations targeted at DevAddrs.
+ */
+#define UWB_NUM_GLOBAL_STREAMS 1
+
+typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv);
+
+/**
+ * struct uwb_rsv - a DRP reservation
+ *
+ * Data structure management:
+ *
+ * @rc: the radio controller this reservation is for
+ * (as target or owner)
+ * @rc_node: a list node for the RC
+ * @pal_node: a list node for the PAL
+ *
+ * Owner and target parameters:
+ *
+ * @owner: the UWB device owning this reservation
+ * @target: the target UWB device
+ * @type: reservation type
+ *
+ * Owner parameters:
+ *
+ * @max_mas: maxiumum number of MAS
+ * @min_mas: minimum number of MAS
+ * @sparsity: owner selected sparsity
+ * @is_multicast: true iff multicast
+ *
+ * @callback: callback function when the reservation completes
+ * @pal_priv: private data for the PAL making the reservation
+ *
+ * Reservation status:
+ *
+ * @status: negotiation status
+ * @stream: stream index allocated for this reservation
+ * @tiebreaker: conflict tiebreaker for this reservation
+ * @mas: reserved MAS
+ * @drp_ie: the DRP IE
+ * @ie_valid: true iff the DRP IE matches the reservation parameters
+ *
+ * DRP reservations are uniquely identified by the owner, target and
+ * stream index. However, when using a DevAddr as a target (e.g., for
+ * a WUSB cluster reservation) the responses may be received from
+ * devices with different DevAddrs. In this case, reservations are
+ * uniquely identified by just the stream index. A number of stream
+ * indexes (UWB_NUM_GLOBAL_STREAMS) are reserved for this.
+ */
+struct uwb_rsv {
+ struct uwb_rc *rc;
+ struct list_head rc_node;
+ struct list_head pal_node;
+ struct kref kref;
+
+ struct uwb_dev *owner;
+ struct uwb_rsv_target target;
+ enum uwb_drp_type type;
+ int max_mas;
+ int min_mas;
+ int max_interval;
+ bool is_multicast;
+
+ uwb_rsv_cb_f callback;
+ void *pal_priv;
+
+ enum uwb_rsv_state state;
+ bool needs_release_companion_mas;
+ u8 stream;
+ u8 tiebreaker;
+ struct uwb_mas_bm mas;
+ struct uwb_ie_drp *drp_ie;
+ struct uwb_rsv_move mv;
+ bool ie_valid;
+ struct timer_list timer;
+ struct work_struct handle_timeout_work;
+};
+
+static const
+struct uwb_mas_bm uwb_mas_bm_zero = { .bm = { 0 } };
+
+static inline void uwb_mas_bm_copy_le(void *dst, const struct uwb_mas_bm *mas)
+{
+ bitmap_copy_le(dst, mas->bm, UWB_NUM_MAS);
+}
+
+/**
+ * struct uwb_drp_avail - a radio controller's view of MAS usage
+ * @global: MAS unused by neighbors (excluding reservations targeted
+ * or owned by the local radio controller) or the beaon period
+ * @local: MAS unused by local established reservations
+ * @pending: MAS unused by local pending reservations
+ * @ie: DRP Availability IE to be included in the beacon
+ * @ie_valid: true iff @ie is valid and does not need to regenerated from
+ * @global and @local
+ *
+ * Each radio controller maintains a view of MAS usage or
+ * availability. MAS available for a new reservation are determined
+ * from the intersection of @global, @local, and @pending.
+ *
+ * The radio controller must transmit a DRP Availability IE that's the
+ * intersection of @global and @local.
+ *
+ * A set bit indicates the MAS is unused and available.
+ *
+ * rc->rsvs_mutex should be held before accessing this data structure.
+ *
+ * [ECMA-368] section 17.4.3.
+ */
+struct uwb_drp_avail {
+ DECLARE_BITMAP(global, UWB_NUM_MAS);
+ DECLARE_BITMAP(local, UWB_NUM_MAS);
+ DECLARE_BITMAP(pending, UWB_NUM_MAS);
+ struct uwb_ie_drp_avail ie;
+ bool ie_valid;
+};
+
+struct uwb_drp_backoff_win {
+ u8 window;
+ u8 n;
+ int total_expired;
+ struct timer_list timer;
+ bool can_reserve_extra_mases;
+};
+
+const char *uwb_rsv_state_str(enum uwb_rsv_state state);
+const char *uwb_rsv_type_str(enum uwb_drp_type type);
+
+struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb,
+ void *pal_priv);
+void uwb_rsv_destroy(struct uwb_rsv *rsv);
+
+int uwb_rsv_establish(struct uwb_rsv *rsv);
+int uwb_rsv_modify(struct uwb_rsv *rsv,
+ int max_mas, int min_mas, int sparsity);
+void uwb_rsv_terminate(struct uwb_rsv *rsv);
+
+void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv);
+
+void uwb_rsv_get_usable_mas(struct uwb_rsv *orig_rsv, struct uwb_mas_bm *mas);
+
+/**
+ * Radio Control Interface instance
+ *
+ *
+ * Life cycle rules: those of the UWB Device.
+ *
+ * @index: an index number for this radio controller, as used in the
+ * device name.
+ * @version: version of protocol supported by this device
+ * @priv: Backend implementation; rw with uwb_dev.dev.sem taken.
+ * @cmd: Backend implementation to execute commands; rw and call
+ * only with uwb_dev.dev.sem taken.
+ * @reset: Hardware reset of radio controller and any PAL controllers.
+ * @filter: Backend implementation to manipulate data to and from device
+ * to be compliant to specification assumed by driver (WHCI
+ * 0.95).
+ *
+ * uwb_dev.dev.mutex is used to execute commands and update
+ * the corresponding structures; can't use a spinlock
+ * because rc->cmd() can sleep.
+ * @ies: This is a dynamically allocated array cacheing the
+ * IEs (settable by the host) that the beacon of this
+ * radio controller is currently sending.
+ *
+ * In reality, we store here the full command we set to
+ * the radio controller (which is basically a command
+ * prefix followed by all the IEs the beacon currently
+ * contains). This way we don't have to realloc and
+ * memcpy when setting it.
+ *
+ * We set this up in uwb_rc_ie_setup(), where we alloc
+ * this struct, call get_ie() [so we know which IEs are
+ * currently being sent, if any].
+ *
+ * @ies_capacity:Amount of space (in bytes) allocated in @ies. The
+ * amount used is given by sizeof(*ies) plus ies->wIELength
+ * (which is a little endian quantity all the time).
+ * @ies_mutex: protect the IE cache
+ * @dbg: information for the debug interface
+ */
+struct uwb_rc {
+ struct uwb_dev uwb_dev;
+ int index;
+ u16 version;
+
+ struct module *owner;
+ void *priv;
+ int (*start)(struct uwb_rc *rc);
+ void (*stop)(struct uwb_rc *rc);
+ int (*cmd)(struct uwb_rc *, const struct uwb_rccb *, size_t);
+ int (*reset)(struct uwb_rc *rc);
+ int (*filter_cmd)(struct uwb_rc *, struct uwb_rccb **, size_t *);
+ int (*filter_event)(struct uwb_rc *, struct uwb_rceb **, const size_t,
+ size_t *, size_t *);
+
+ spinlock_t neh_lock; /* protects neh_* and ctx_* */
+ struct list_head neh_list; /* Open NE handles */
+ unsigned long ctx_bm[UWB_RC_CTX_MAX / 8 / sizeof(unsigned long)];
+ u8 ctx_roll;
+
+ int beaconing; /* Beaconing state [channel number] */
+ int beaconing_forced;
+ int scanning;
+ enum uwb_scan_type scan_type:3;
+ unsigned ready:1;
+ struct uwb_notifs_chain notifs_chain;
+ struct uwb_beca uwb_beca;
+
+ struct uwbd uwbd;
+
+ struct uwb_drp_backoff_win bow;
+ struct uwb_drp_avail drp_avail;
+ struct list_head reservations;
+ struct list_head cnflt_alien_list;
+ struct uwb_mas_bm cnflt_alien_bitmap;
+ struct mutex rsvs_mutex;
+ spinlock_t rsvs_lock;
+ struct workqueue_struct *rsv_workq;
+
+ struct delayed_work rsv_update_work;
+ struct delayed_work rsv_alien_bp_work;
+ int set_drp_ie_pending;
+ struct mutex ies_mutex;
+ struct uwb_rc_cmd_set_ie *ies;
+ size_t ies_capacity;
+
+ struct list_head pals;
+ int active_pals;
+
+ struct uwb_dbg *dbg;
+};
+
+
+/**
+ * struct uwb_pal - a UWB PAL
+ * @name: descriptive name for this PAL (wusbhc, wlp, etc.).
+ * @device: a device for the PAL. Used to link the PAL and the radio
+ * controller in sysfs.
+ * @rc: the radio controller the PAL uses.
+ * @channel_changed: called when the channel used by the radio changes.
+ * A channel of -1 means the channel has been stopped.
+ * @new_rsv: called when a peer requests a reservation (may be NULL if
+ * the PAL cannot accept reservation requests).
+ * @channel: channel being used by the PAL; 0 if the PAL isn't using
+ * the radio; -1 if the PAL wishes to use the radio but
+ * cannot.
+ * @debugfs_dir: a debugfs directory which the PAL can use for its own
+ * debugfs files.
+ *
+ * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB
+ * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP).
+ *
+ * The PALs using a radio controller must register themselves to
+ * permit the UWB stack to coordinate usage of the radio between the
+ * various PALs or to allow PALs to response to certain requests from
+ * peers.
+ *
+ * A struct uwb_pal should be embedded in a containing structure
+ * belonging to the PAL and initialized with uwb_pal_init()). Fields
+ * should be set appropriately by the PAL before registering the PAL
+ * with uwb_pal_register().
+ */
+struct uwb_pal {
+ struct list_head node;
+ const char *name;
+ struct device *device;
+ struct uwb_rc *rc;
+
+ void (*channel_changed)(struct uwb_pal *pal, int channel);
+ void (*new_rsv)(struct uwb_pal *pal, struct uwb_rsv *rsv);
+
+ int channel;
+ struct dentry *debugfs_dir;
+};
+
+void uwb_pal_init(struct uwb_pal *pal);
+int uwb_pal_register(struct uwb_pal *pal);
+void uwb_pal_unregister(struct uwb_pal *pal);
+
+int uwb_radio_start(struct uwb_pal *pal);
+void uwb_radio_stop(struct uwb_pal *pal);
+
+/*
+ * General public API
+ *
+ * This API can be used by UWB device drivers or by those implementing
+ * UWB Radio Controllers
+ */
+struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
+ const struct uwb_dev_addr *devaddr);
+struct uwb_dev *uwb_dev_get_by_rc(struct uwb_dev *, struct uwb_rc *);
+static inline void uwb_dev_get(struct uwb_dev *uwb_dev)
+{
+ get_device(&uwb_dev->dev);
+}
+static inline void uwb_dev_put(struct uwb_dev *uwb_dev)
+{
+ put_device(&uwb_dev->dev);
+}
+struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev);
+
+/**
+ * Callback function for 'uwb_{dev,rc}_foreach()'.
+ *
+ * @dev: Linux device instance
+ * 'uwb_dev = container_of(dev, struct uwb_dev, dev)'
+ * @priv: Data passed by the caller to 'uwb_{dev,rc}_foreach()'.
+ *
+ * @returns: 0 to continue the iterations, any other val to stop
+ * iterating and return the value to the caller of
+ * _foreach().
+ */
+typedef int (*uwb_dev_for_each_f)(struct device *dev, void *priv);
+int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f func, void *priv);
+
+struct uwb_rc *uwb_rc_alloc(void);
+struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *);
+struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *);
+void uwb_rc_put(struct uwb_rc *rc);
+
+typedef void (*uwb_rc_cmd_cb_f)(struct uwb_rc *rc, void *arg,
+ struct uwb_rceb *reply, ssize_t reply_size);
+
+int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
+ struct uwb_rccb *cmd, size_t cmd_size,
+ u8 expected_type, u16 expected_event,
+ uwb_rc_cmd_cb_f cb, void *arg);
+ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
+ struct uwb_rccb *cmd, size_t cmd_size,
+ struct uwb_rceb *reply, size_t reply_size);
+ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name,
+ struct uwb_rccb *cmd, size_t cmd_size,
+ u8 expected_type, u16 expected_event,
+ struct uwb_rceb **preply);
+
+size_t __uwb_addr_print(char *, size_t, const unsigned char *, int);
+
+int uwb_rc_dev_addr_set(struct uwb_rc *, const struct uwb_dev_addr *);
+int uwb_rc_dev_addr_get(struct uwb_rc *, struct uwb_dev_addr *);
+int uwb_rc_mac_addr_set(struct uwb_rc *, const struct uwb_mac_addr *);
+int uwb_rc_mac_addr_get(struct uwb_rc *, struct uwb_mac_addr *);
+int __uwb_mac_addr_assigned_check(struct device *, void *);
+int __uwb_dev_addr_assigned_check(struct device *, void *);
+
+/* Print in @buf a pretty repr of @addr */
+static inline size_t uwb_dev_addr_print(char *buf, size_t buf_size,
+ const struct uwb_dev_addr *addr)
+{
+ return __uwb_addr_print(buf, buf_size, addr->data, 0);
+}
+
+/* Print in @buf a pretty repr of @addr */
+static inline size_t uwb_mac_addr_print(char *buf, size_t buf_size,
+ const struct uwb_mac_addr *addr)
+{
+ return __uwb_addr_print(buf, buf_size, addr->data, 1);
+}
+
+/* @returns 0 if device addresses @addr2 and @addr1 are equal */
+static inline int uwb_dev_addr_cmp(const struct uwb_dev_addr *addr1,
+ const struct uwb_dev_addr *addr2)
+{
+ return memcmp(addr1, addr2, sizeof(*addr1));
+}
+
+/* @returns 0 if MAC addresses @addr2 and @addr1 are equal */
+static inline int uwb_mac_addr_cmp(const struct uwb_mac_addr *addr1,
+ const struct uwb_mac_addr *addr2)
+{
+ return memcmp(addr1, addr2, sizeof(*addr1));
+}
+
+/* @returns !0 if a MAC @addr is a broadcast address */
+static inline int uwb_mac_addr_bcast(const struct uwb_mac_addr *addr)
+{
+ struct uwb_mac_addr bcast = {
+ .data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
+ };
+ return !uwb_mac_addr_cmp(addr, &bcast);
+}
+
+/* @returns !0 if a MAC @addr is all zeroes*/
+static inline int uwb_mac_addr_unset(const struct uwb_mac_addr *addr)
+{
+ struct uwb_mac_addr unset = {
+ .data = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
+ };
+ return !uwb_mac_addr_cmp(addr, &unset);
+}
+
+/* @returns !0 if the address is in use. */
+static inline unsigned __uwb_dev_addr_assigned(struct uwb_rc *rc,
+ struct uwb_dev_addr *addr)
+{
+ return uwb_dev_for_each(rc, __uwb_dev_addr_assigned_check, addr);
+}
+
+/*
+ * UWB Radio Controller API
+ *
+ * This API is used (in addition to the general API) to implement UWB
+ * Radio Controllers.
+ */
+void uwb_rc_init(struct uwb_rc *);
+int uwb_rc_add(struct uwb_rc *, struct device *dev, void *rc_priv);
+void uwb_rc_rm(struct uwb_rc *);
+void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t);
+void uwb_rc_neh_error(struct uwb_rc *, int);
+void uwb_rc_reset_all(struct uwb_rc *rc);
+void uwb_rc_pre_reset(struct uwb_rc *rc);
+int uwb_rc_post_reset(struct uwb_rc *rc);
+
+/**
+ * uwb_rsv_is_owner - is the owner of this reservation the RC?
+ * @rsv: the reservation
+ */
+static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv)
+{
+ return rsv->owner == &rsv->rc->uwb_dev;
+}
+
+/**
+ * enum uwb_notifs - UWB events that can be passed to any listeners
+ * @UWB_NOTIF_ONAIR: a new neighbour has joined the beacon group.
+ * @UWB_NOTIF_OFFAIR: a neighbour has left the beacon group.
+ *
+ * Higher layers can register callback functions with the radio
+ * controller using uwb_notifs_register(). The radio controller
+ * maintains a list of all registered handlers and will notify all
+ * nodes when an event occurs.
+ */
+enum uwb_notifs {
+ UWB_NOTIF_ONAIR,
+ UWB_NOTIF_OFFAIR,
+};
+
+/* Callback function registered with UWB */
+struct uwb_notifs_handler {
+ struct list_head list_node;
+ void (*cb)(void *, struct uwb_dev *, enum uwb_notifs);
+ void *data;
+};
+
+int uwb_notifs_register(struct uwb_rc *, struct uwb_notifs_handler *);
+int uwb_notifs_deregister(struct uwb_rc *, struct uwb_notifs_handler *);
+
+
+/**
+ * UWB radio controller Event Size Entry (for creating entry tables)
+ *
+ * WUSB and WHCI define events and notifications, and they might have
+ * fixed or variable size.
+ *
+ * Each event/notification has a size which is not necessarily known
+ * in advance based on the event code. As well, vendor specific
+ * events/notifications will have a size impossible to determine
+ * unless we know about the device's specific details.
+ *
+ * It was way too smart of the spec writers not to think that it would
+ * be impossible for a generic driver to skip over vendor specific
+ * events/notifications if there are no LENGTH fields in the HEADER of
+ * each message...the transaction size cannot be counted on as the
+ * spec does not forbid to pack more than one event in a single
+ * transaction.
+ *
+ * Thus, we guess sizes with tables (or for events, when you know the
+ * size ahead of time you can use uwb_rc_neh_extra_size*()). We
+ * register tables with the known events and their sizes, and then we
+ * traverse those tables. For those with variable length, we provide a
+ * way to lookup the size inside the event/notification's
+ * payload. This allows device-specific event size tables to be
+ * registered.
+ *
+ * @size: Size of the payload
+ *
+ * @offset: if != 0, at offset @offset-1 starts a field with a length
+ * that has to be added to @size. The format of the field is
+ * given by @type.
+ *
+ * @type: Type and length of the offset field. Most common is LE 16
+ * bits (that's why that is zero); others are there mostly to
+ * cover for bugs and weirdos.
+ */
+struct uwb_est_entry {
+ size_t size;
+ unsigned offset;
+ enum { UWB_EST_16 = 0, UWB_EST_8 = 1 } type;
+};
+
+int uwb_est_register(u8 type, u8 code_high, u16 vendor, u16 product,
+ const struct uwb_est_entry *, size_t entries);
+int uwb_est_unregister(u8 type, u8 code_high, u16 vendor, u16 product,
+ const struct uwb_est_entry *, size_t entries);
+ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
+ size_t len);
+
+/* -- Misc */
+
+enum {
+ EDC_MAX_ERRORS = 10,
+ EDC_ERROR_TIMEFRAME = HZ,
+};
+
+/* error density counter */
+struct edc {
+ unsigned long timestart;
+ u16 errorcount;
+};
+
+static inline
+void edc_init(struct edc *edc)
+{
+ edc->timestart = jiffies;
+}
+
+/* Called when an error occurred.
+ * This is way to determine if the number of acceptable errors per time
+ * period has been exceeded. It is not accurate as there are cases in which
+ * this scheme will not work, for example if there are periodic occurrences
+ * of errors that straddle updates to the start time. This scheme is
+ * sufficient for our usage.
+ *
+ * @returns 1 if maximum acceptable errors per timeframe has been exceeded.
+ */
+static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe)
+{
+ unsigned long now;
+
+ now = jiffies;
+ if (now - err_hist->timestart > timeframe) {
+ err_hist->errorcount = 1;
+ err_hist->timestart = now;
+ } else if (++err_hist->errorcount > max_err) {
+ err_hist->errorcount = 0;
+ err_hist->timestart = now;
+ return 1;
+ }
+ return 0;
+}
+
+
+/* Information Element handling */
+
+struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len);
+int uwb_rc_ie_add(struct uwb_rc *uwb_rc, const struct uwb_ie_hdr *ies, size_t size);
+int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id);
+
+/*
+ * Transmission statistics
+ *
+ * UWB uses LQI and RSSI (one byte values) for reporting radio signal
+ * strength and line quality indication. We do quick and dirty
+ * averages of those. They are signed values, btw.
+ *
+ * For 8 bit quantities, we keep the min, the max, an accumulator
+ * (@sigma) and a # of samples. When @samples gets to 255, we compute
+ * the average (@sigma / @samples), place it in @sigma and reset
+ * @samples to 1 (so we use it as the first sample).
+ *
+ * Now, statistically speaking, probably I am kicking the kidneys of
+ * some books I have in my shelves collecting dust, but I just want to
+ * get an approx, not the Nobel.
+ *
+ * LOCKING: there is no locking per se, but we try to keep a lockless
+ * schema. Only _add_samples() modifies the values--as long as you
+ * have other locking on top that makes sure that no two calls of
+ * _add_sample() happen at the same time, then we are fine. Now, for
+ * resetting the values we just set @samples to 0 and that makes the
+ * next _add_sample() to start with defaults. Reading the values in
+ * _show() currently can race, so you need to make sure the calls are
+ * under the same lock that protects calls to _add_sample(). FIXME:
+ * currently unlocked (It is not ultraprecise but does the trick. Bite
+ * me).
+ */
+struct stats {
+ s8 min, max;
+ s16 sigma;
+ atomic_t samples;
+};
+
+static inline
+void stats_init(struct stats *stats)
+{
+ atomic_set(&stats->samples, 0);
+ wmb();
+}
+
+static inline
+void stats_add_sample(struct stats *stats, s8 sample)
+{
+ s8 min, max;
+ s16 sigma;
+ unsigned samples = atomic_read(&stats->samples);
+ if (samples == 0) { /* it was zero before, so we initialize */
+ min = 127;
+ max = -128;
+ sigma = 0;
+ } else {
+ min = stats->min;
+ max = stats->max;
+ sigma = stats->sigma;
+ }
+
+ if (sample < min) /* compute new values */
+ min = sample;
+ else if (sample > max)
+ max = sample;
+ sigma += sample;
+
+ stats->min = min; /* commit */
+ stats->max = max;
+ stats->sigma = sigma;
+ if (atomic_add_return(1, &stats->samples) > 255) {
+ /* wrapped around! reset */
+ stats->sigma = sigma / 256;
+ atomic_set(&stats->samples, 1);
+ }
+}
+
+static inline ssize_t stats_show(struct stats *stats, char *buf)
+{
+ int min, max, avg;
+ int samples = atomic_read(&stats->samples);
+ if (samples == 0)
+ min = max = avg = 0;
+ else {
+ min = stats->min;
+ max = stats->max;
+ avg = stats->sigma / samples;
+ }
+ return scnprintf(buf, PAGE_SIZE, "%d %d %d\n", min, max, avg);
+}
+
+static inline ssize_t stats_store(struct stats *stats, const char *buf,
+ size_t size)
+{
+ stats_init(stats);
+ return size;
+}
+
+#endif /* #ifndef __LINUX__UWB_H__ */
diff --git a/include/linux/uwb/debug-cmd.h b/include/linux/uwb/debug-cmd.h
new file mode 100644
index 000000000..8da004e25
--- /dev/null
+++ b/include/linux/uwb/debug-cmd.h
@@ -0,0 +1,68 @@
+/*
+ * Ultra Wide Band
+ * Debug interface commands
+ *
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __LINUX__UWB__DEBUG_CMD_H__
+#define __LINUX__UWB__DEBUG_CMD_H__
+
+#include <linux/types.h>
+
+/*
+ * Debug interface commands
+ *
+ * UWB_DBG_CMD_RSV_ESTABLISH: Establish a new unicast reservation.
+ *
+ * UWB_DBG_CMD_RSV_TERMINATE: Terminate the Nth reservation.
+ */
+
+enum uwb_dbg_cmd_type {
+ UWB_DBG_CMD_RSV_ESTABLISH = 1,
+ UWB_DBG_CMD_RSV_TERMINATE = 2,
+ UWB_DBG_CMD_IE_ADD = 3,
+ UWB_DBG_CMD_IE_RM = 4,
+ UWB_DBG_CMD_RADIO_START = 5,
+ UWB_DBG_CMD_RADIO_STOP = 6,
+};
+
+struct uwb_dbg_cmd_rsv_establish {
+ __u8 target[6];
+ __u8 type;
+ __u16 max_mas;
+ __u16 min_mas;
+ __u8 max_interval;
+};
+
+struct uwb_dbg_cmd_rsv_terminate {
+ int index;
+};
+
+struct uwb_dbg_cmd_ie {
+ __u8 data[128];
+ int len;
+};
+
+struct uwb_dbg_cmd {
+ __u32 type;
+ union {
+ struct uwb_dbg_cmd_rsv_establish rsv_establish;
+ struct uwb_dbg_cmd_rsv_terminate rsv_terminate;
+ struct uwb_dbg_cmd_ie ie_add;
+ struct uwb_dbg_cmd_ie ie_rm;
+ };
+};
+
+#endif /* #ifndef __LINUX__UWB__DEBUG_CMD_H__ */
diff --git a/include/linux/uwb/spec.h b/include/linux/uwb/spec.h
new file mode 100644
index 000000000..0df24bfcd
--- /dev/null
+++ b/include/linux/uwb/spec.h
@@ -0,0 +1,781 @@
+/*
+ * Ultra Wide Band
+ * UWB Standard definitions
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * All these definitions are based on the ECMA-368 standard.
+ *
+ * Note all definitions are Little Endian in the wire, and we will
+ * convert them to host order before operating on the bitfields (that
+ * yes, we use extensively).
+ */
+
+#ifndef __LINUX__UWB_SPEC_H__
+#define __LINUX__UWB_SPEC_H__
+
+#include <linux/types.h>
+#include <linux/bitmap.h>
+#include <linux/if_ether.h>
+
+#define i1480_FW 0x00000303
+/* #define i1480_FW 0x00000302 */
+
+/**
+ * Number of Medium Access Slots in a superframe.
+ *
+ * UWB divides time in SuperFrames, each one divided in 256 pieces, or
+ * Medium Access Slots. See MBOA MAC[5.4.5] for details. The MAS is the
+ * basic bandwidth allocation unit in UWB.
+ */
+enum { UWB_NUM_MAS = 256 };
+
+/**
+ * Number of Zones in superframe.
+ *
+ * UWB divides the superframe into zones with numbering starting from BPST.
+ * See MBOA MAC[16.8.6]
+ */
+enum { UWB_NUM_ZONES = 16 };
+
+/*
+ * Number of MAS in a zone.
+ */
+#define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES)
+
+/*
+ * Number of MAS required before a row can be considered available.
+ */
+#define UWB_USABLE_MAS_PER_ROW (UWB_NUM_ZONES - 1)
+
+/*
+ * Number of streams per DRP reservation between a pair of devices.
+ *
+ * [ECMA-368] section 16.8.6.
+ */
+enum { UWB_NUM_STREAMS = 8 };
+
+/*
+ * mMasLength
+ *
+ * The length of a MAS in microseconds.
+ *
+ * [ECMA-368] section 17.16.
+ */
+enum { UWB_MAS_LENGTH_US = 256 };
+
+/*
+ * mBeaconSlotLength
+ *
+ * The length of the beacon slot in microseconds.
+ *
+ * [ECMA-368] section 17.16
+ */
+enum { UWB_BEACON_SLOT_LENGTH_US = 85 };
+
+/*
+ * mMaxLostBeacons
+ *
+ * The number beacons missing in consecutive superframes before a
+ * device can be considered as unreachable.
+ *
+ * [ECMA-368] section 17.16
+ */
+enum { UWB_MAX_LOST_BEACONS = 3 };
+
+/*
+ * mDRPBackOffWinMin
+ *
+ * The minimum number of superframes to wait before trying to reserve
+ * extra MAS.
+ *
+ * [ECMA-368] section 17.16
+ */
+enum { UWB_DRP_BACKOFF_WIN_MIN = 2 };
+
+/*
+ * mDRPBackOffWinMax
+ *
+ * The maximum number of superframes to wait before trying to reserve
+ * extra MAS.
+ *
+ * [ECMA-368] section 17.16
+ */
+enum { UWB_DRP_BACKOFF_WIN_MAX = 16 };
+
+/*
+ * Length of a superframe in microseconds.
+ */
+#define UWB_SUPERFRAME_LENGTH_US (UWB_MAS_LENGTH_US * UWB_NUM_MAS)
+
+/**
+ * UWB MAC address
+ *
+ * It is *imperative* that this struct is exactly 6 packed bytes (as
+ * it is also used to define headers sent down and up the wire/radio).
+ */
+struct uwb_mac_addr {
+ u8 data[ETH_ALEN];
+} __attribute__((packed));
+
+
+/**
+ * UWB device address
+ *
+ * It is *imperative* that this struct is exactly 6 packed bytes (as
+ * it is also used to define headers sent down and up the wire/radio).
+ */
+struct uwb_dev_addr {
+ u8 data[2];
+} __attribute__((packed));
+
+
+/**
+ * Types of UWB addresses
+ *
+ * Order matters (by size).
+ */
+enum uwb_addr_type {
+ UWB_ADDR_DEV = 0,
+ UWB_ADDR_MAC = 1,
+};
+
+
+/** Size of a char buffer for printing a MAC/device address */
+enum { UWB_ADDR_STRSIZE = 32 };
+
+
+/** UWB WiMedia protocol IDs. */
+enum uwb_prid {
+ UWB_PRID_WLP_RESERVED = 0x0000,
+ UWB_PRID_WLP = 0x0001,
+ UWB_PRID_WUSB_BOT = 0x0010,
+ UWB_PRID_WUSB = 0x0010,
+ UWB_PRID_WUSB_TOP = 0x001F,
+};
+
+
+/** PHY Rate (MBOA MAC[7.8.12, Table 61]) */
+enum uwb_phy_rate {
+ UWB_PHY_RATE_53 = 0,
+ UWB_PHY_RATE_80,
+ UWB_PHY_RATE_106,
+ UWB_PHY_RATE_160,
+ UWB_PHY_RATE_200,
+ UWB_PHY_RATE_320,
+ UWB_PHY_RATE_400,
+ UWB_PHY_RATE_480,
+ UWB_PHY_RATE_INVALID
+};
+
+
+/**
+ * Different ways to scan (MBOA MAC[6.2.2, Table 8], WUSB[Table 8-78])
+ */
+enum uwb_scan_type {
+ UWB_SCAN_ONLY = 0,
+ UWB_SCAN_OUTSIDE_BP,
+ UWB_SCAN_WHILE_INACTIVE,
+ UWB_SCAN_DISABLED,
+ UWB_SCAN_ONLY_STARTTIME,
+ UWB_SCAN_TOP
+};
+
+
+/** ACK Policy types (MBOA MAC[7.2.1.3]) */
+enum uwb_ack_pol {
+ UWB_ACK_NO = 0,
+ UWB_ACK_INM = 1,
+ UWB_ACK_B = 2,
+ UWB_ACK_B_REQ = 3,
+};
+
+
+/** DRP reservation types ([ECMA-368 table 106) */
+enum uwb_drp_type {
+ UWB_DRP_TYPE_ALIEN_BP = 0,
+ UWB_DRP_TYPE_HARD,
+ UWB_DRP_TYPE_SOFT,
+ UWB_DRP_TYPE_PRIVATE,
+ UWB_DRP_TYPE_PCA,
+};
+
+
+/** DRP Reason Codes ([ECMA-368] table 107) */
+enum uwb_drp_reason {
+ UWB_DRP_REASON_ACCEPTED = 0,
+ UWB_DRP_REASON_CONFLICT,
+ UWB_DRP_REASON_PENDING,
+ UWB_DRP_REASON_DENIED,
+ UWB_DRP_REASON_MODIFIED,
+};
+
+/** Relinquish Request Reason Codes ([ECMA-368] table 113) */
+enum uwb_relinquish_req_reason {
+ UWB_RELINQUISH_REQ_REASON_NON_SPECIFIC = 0,
+ UWB_RELINQUISH_REQ_REASON_OVER_ALLOCATION,
+};
+
+/**
+ * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9])
+ */
+enum uwb_drp_notif_reason {
+ UWB_DRP_NOTIF_DRP_IE_RCVD = 0,
+ UWB_DRP_NOTIF_CONFLICT,
+ UWB_DRP_NOTIF_TERMINATE,
+};
+
+
+/** Allocation of MAS slots in a DRP request MBOA MAC[7.8.7] */
+struct uwb_drp_alloc {
+ __le16 zone_bm;
+ __le16 mas_bm;
+} __attribute__((packed));
+
+
+/** General MAC Header format (ECMA-368[16.2]) */
+struct uwb_mac_frame_hdr {
+ __le16 Frame_Control;
+ struct uwb_dev_addr DestAddr;
+ struct uwb_dev_addr SrcAddr;
+ __le16 Sequence_Control;
+ __le16 Access_Information;
+} __attribute__((packed));
+
+
+/**
+ * uwb_beacon_frame - a beacon frame including MAC headers
+ *
+ * [ECMA] section 16.3.
+ */
+struct uwb_beacon_frame {
+ struct uwb_mac_frame_hdr hdr;
+ struct uwb_mac_addr Device_Identifier; /* may be a NULL EUI-48 */
+ u8 Beacon_Slot_Number;
+ u8 Device_Control;
+ u8 IEData[];
+} __attribute__((packed));
+
+
+/** Information Element codes (MBOA MAC[T54]) */
+enum uwb_ie {
+ UWB_PCA_AVAILABILITY = 2,
+ UWB_IE_DRP_AVAILABILITY = 8,
+ UWB_IE_DRP = 9,
+ UWB_BP_SWITCH_IE = 11,
+ UWB_MAC_CAPABILITIES_IE = 12,
+ UWB_PHY_CAPABILITIES_IE = 13,
+ UWB_APP_SPEC_PROBE_IE = 15,
+ UWB_IDENTIFICATION_IE = 19,
+ UWB_MASTER_KEY_ID_IE = 20,
+ UWB_RELINQUISH_REQUEST_IE = 21,
+ UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */
+ UWB_APP_SPEC_IE = 255,
+};
+
+
+/**
+ * Header common to all Information Elements (IEs)
+ */
+struct uwb_ie_hdr {
+ u8 element_id; /* enum uwb_ie */
+ u8 length;
+} __attribute__((packed));
+
+
+/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.6]) */
+struct uwb_ie_drp {
+ struct uwb_ie_hdr hdr;
+ __le16 drp_control;
+ struct uwb_dev_addr dev_addr;
+ struct uwb_drp_alloc allocs[];
+} __attribute__((packed));
+
+static inline int uwb_ie_drp_type(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 0) & 0x7;
+}
+
+static inline int uwb_ie_drp_stream_index(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 3) & 0x7;
+}
+
+static inline int uwb_ie_drp_reason_code(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 6) & 0x7;
+}
+
+static inline int uwb_ie_drp_status(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 9) & 0x1;
+}
+
+static inline int uwb_ie_drp_owner(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 10) & 0x1;
+}
+
+static inline int uwb_ie_drp_tiebreaker(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 11) & 0x1;
+}
+
+static inline int uwb_ie_drp_unsafe(struct uwb_ie_drp *ie)
+{
+ return (le16_to_cpu(ie->drp_control) >> 12) & 0x1;
+}
+
+static inline void uwb_ie_drp_set_type(struct uwb_ie_drp *ie, enum uwb_drp_type type)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (drp_control & ~(0x7 << 0)) | (type << 0);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_stream_index(struct uwb_ie_drp *ie, int stream_index)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (drp_control & ~(0x7 << 3)) | (stream_index << 3);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_reason_code(struct uwb_ie_drp *ie,
+ enum uwb_drp_reason reason_code)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (ie->drp_control & ~(0x7 << 6)) | (reason_code << 6);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_status(struct uwb_ie_drp *ie, int status)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (drp_control & ~(0x1 << 9)) | (status << 9);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_owner(struct uwb_ie_drp *ie, int owner)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (drp_control & ~(0x1 << 10)) | (owner << 10);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_tiebreaker(struct uwb_ie_drp *ie, int tiebreaker)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (drp_control & ~(0x1 << 11)) | (tiebreaker << 11);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+static inline void uwb_ie_drp_set_unsafe(struct uwb_ie_drp *ie, int unsafe)
+{
+ u16 drp_control = le16_to_cpu(ie->drp_control);
+ drp_control = (drp_control & ~(0x1 << 12)) | (unsafe << 12);
+ ie->drp_control = cpu_to_le16(drp_control);
+}
+
+/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.7]) */
+struct uwb_ie_drp_avail {
+ struct uwb_ie_hdr hdr;
+ DECLARE_BITMAP(bmp, UWB_NUM_MAS);
+} __attribute__((packed));
+
+/* Relinqish Request IE ([ECMA-368] section 16.8.19). */
+struct uwb_relinquish_request_ie {
+ struct uwb_ie_hdr hdr;
+ __le16 relinquish_req_control;
+ struct uwb_dev_addr dev_addr;
+ struct uwb_drp_alloc allocs[];
+} __attribute__((packed));
+
+static inline int uwb_ie_relinquish_req_reason_code(struct uwb_relinquish_request_ie *ie)
+{
+ return (le16_to_cpu(ie->relinquish_req_control) >> 0) & 0xf;
+}
+
+static inline void uwb_ie_relinquish_req_set_reason_code(struct uwb_relinquish_request_ie *ie,
+ int reason_code)
+{
+ u16 ctrl = le16_to_cpu(ie->relinquish_req_control);
+ ctrl = (ctrl & ~(0xf << 0)) | (reason_code << 0);
+ ie->relinquish_req_control = cpu_to_le16(ctrl);
+}
+
+/**
+ * The Vendor ID is set to an OUI that indicates the vendor of the device.
+ * ECMA-368 [16.8.10]
+ */
+struct uwb_vendor_id {
+ u8 data[3];
+} __attribute__((packed));
+
+/**
+ * The device type ID
+ * FIXME: clarify what this means
+ * ECMA-368 [16.8.10]
+ */
+struct uwb_device_type_id {
+ u8 data[3];
+} __attribute__((packed));
+
+
+/**
+ * UWB device information types
+ * ECMA-368 [16.8.10]
+ */
+enum uwb_dev_info_type {
+ UWB_DEV_INFO_VENDOR_ID = 0,
+ UWB_DEV_INFO_VENDOR_TYPE,
+ UWB_DEV_INFO_NAME,
+};
+
+/**
+ * UWB device information found in Identification IE
+ * ECMA-368 [16.8.10]
+ */
+struct uwb_dev_info {
+ u8 type; /* enum uwb_dev_info_type */
+ u8 length;
+ u8 data[];
+} __attribute__((packed));
+
+/**
+ * UWB Identification IE
+ * ECMA-368 [16.8.10]
+ */
+struct uwb_identification_ie {
+ struct uwb_ie_hdr hdr;
+ struct uwb_dev_info info[];
+} __attribute__((packed));
+
+/*
+ * UWB Radio Controller
+ *
+ * These definitions are common to the Radio Control layers as
+ * exported by the WUSB1.0 HWA and WHCI interfaces.
+ */
+
+/** Radio Control Command Block (WUSB1.0[Table 8-65] and WHCI 0.95) */
+struct uwb_rccb {
+ u8 bCommandType; /* enum hwa_cet */
+ __le16 wCommand; /* Command code */
+ u8 bCommandContext; /* Context ID */
+} __attribute__((packed));
+
+
+/** Radio Control Event Block (WUSB[table 8-66], WHCI 0.95) */
+struct uwb_rceb {
+ u8 bEventType; /* enum hwa_cet */
+ __le16 wEvent; /* Event code */
+ u8 bEventContext; /* Context ID */
+} __attribute__((packed));
+
+
+enum {
+ UWB_RC_CET_GENERAL = 0, /* General Command/Event type */
+ UWB_RC_CET_EX_TYPE_1 = 1, /* Extended Type 1 Command/Event type */
+};
+
+/* Commands to the radio controller */
+enum uwb_rc_cmd {
+ UWB_RC_CMD_CHANNEL_CHANGE = 16,
+ UWB_RC_CMD_DEV_ADDR_MGMT = 17, /* Device Address Management */
+ UWB_RC_CMD_GET_IE = 18, /* GET Information Elements */
+ UWB_RC_CMD_RESET = 19,
+ UWB_RC_CMD_SCAN = 20, /* Scan management */
+ UWB_RC_CMD_SET_BEACON_FILTER = 21,
+ UWB_RC_CMD_SET_DRP_IE = 22, /* Dynamic Reservation Protocol IEs */
+ UWB_RC_CMD_SET_IE = 23, /* Information Element management */
+ UWB_RC_CMD_SET_NOTIFICATION_FILTER = 24,
+ UWB_RC_CMD_SET_TX_POWER = 25,
+ UWB_RC_CMD_SLEEP = 26,
+ UWB_RC_CMD_START_BEACON = 27,
+ UWB_RC_CMD_STOP_BEACON = 28,
+ UWB_RC_CMD_BP_MERGE = 29,
+ UWB_RC_CMD_SEND_COMMAND_FRAME = 30,
+ UWB_RC_CMD_SET_ASIE_NOTIF = 31,
+};
+
+/* Notifications from the radio controller */
+enum uwb_rc_evt {
+ UWB_RC_EVT_IE_RCV = 0,
+ UWB_RC_EVT_BEACON = 1,
+ UWB_RC_EVT_BEACON_SIZE = 2,
+ UWB_RC_EVT_BPOIE_CHANGE = 3,
+ UWB_RC_EVT_BP_SLOT_CHANGE = 4,
+ UWB_RC_EVT_BP_SWITCH_IE_RCV = 5,
+ UWB_RC_EVT_DEV_ADDR_CONFLICT = 6,
+ UWB_RC_EVT_DRP_AVAIL = 7,
+ UWB_RC_EVT_DRP = 8,
+ UWB_RC_EVT_BP_SWITCH_STATUS = 9,
+ UWB_RC_EVT_CMD_FRAME_RCV = 10,
+ UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV = 11,
+ /* Events (command responses) use the same code as the command */
+ UWB_RC_EVT_UNKNOWN_CMD_RCV = 65535,
+};
+
+enum uwb_rc_extended_type_1_cmd {
+ UWB_RC_SET_DAA_ENERGY_MASK = 32,
+ UWB_RC_SET_NOTIFICATION_FILTER_EX = 33,
+};
+
+enum uwb_rc_extended_type_1_evt {
+ UWB_RC_DAA_ENERGY_DETECTED = 0,
+};
+
+/* Radio Control Result Code. [WHCI] table 3-3. */
+enum {
+ UWB_RC_RES_SUCCESS = 0,
+ UWB_RC_RES_FAIL,
+ UWB_RC_RES_FAIL_HARDWARE,
+ UWB_RC_RES_FAIL_NO_SLOTS,
+ UWB_RC_RES_FAIL_BEACON_TOO_LARGE,
+ UWB_RC_RES_FAIL_INVALID_PARAMETER,
+ UWB_RC_RES_FAIL_UNSUPPORTED_PWR_LEVEL,
+ UWB_RC_RES_FAIL_INVALID_IE_DATA,
+ UWB_RC_RES_FAIL_BEACON_SIZE_EXCEEDED,
+ UWB_RC_RES_FAIL_CANCELLED,
+ UWB_RC_RES_FAIL_INVALID_STATE,
+ UWB_RC_RES_FAIL_INVALID_SIZE,
+ UWB_RC_RES_FAIL_ACK_NOT_RECEIVED,
+ UWB_RC_RES_FAIL_NO_MORE_ASIE_NOTIF,
+ UWB_RC_RES_FAIL_TIME_OUT = 255,
+};
+
+/* Confirm event. [WHCI] section 3.1.3.1 etc. */
+struct uwb_rc_evt_confirm {
+ struct uwb_rceb rceb;
+ u8 bResultCode;
+} __attribute__((packed));
+
+/* Device Address Management event. [WHCI] section 3.1.3.2. */
+struct uwb_rc_evt_dev_addr_mgmt {
+ struct uwb_rceb rceb;
+ u8 baAddr[ETH_ALEN];
+ u8 bResultCode;
+} __attribute__((packed));
+
+
+/* Get IE Event. [WHCI] section 3.1.3.3. */
+struct uwb_rc_evt_get_ie {
+ struct uwb_rceb rceb;
+ __le16 wIELength;
+ u8 IEData[];
+} __attribute__((packed));
+
+/* Set DRP IE Event. [WHCI] section 3.1.3.7. */
+struct uwb_rc_evt_set_drp_ie {
+ struct uwb_rceb rceb;
+ __le16 wRemainingSpace;
+ u8 bResultCode;
+} __attribute__((packed));
+
+/* Set IE Event. [WHCI] section 3.1.3.8. */
+struct uwb_rc_evt_set_ie {
+ struct uwb_rceb rceb;
+ __le16 RemainingSpace;
+ u8 bResultCode;
+} __attribute__((packed));
+
+/* Scan command. [WHCI] 3.1.3.5. */
+struct uwb_rc_cmd_scan {
+ struct uwb_rccb rccb;
+ u8 bChannelNumber;
+ u8 bScanState;
+ __le16 wStartTime;
+} __attribute__((packed));
+
+/* Set DRP IE command. [WHCI] section 3.1.3.7. */
+struct uwb_rc_cmd_set_drp_ie {
+ struct uwb_rccb rccb;
+ __le16 wIELength;
+ struct uwb_ie_drp IEData[];
+} __attribute__((packed));
+
+/* Set IE command. [WHCI] section 3.1.3.8. */
+struct uwb_rc_cmd_set_ie {
+ struct uwb_rccb rccb;
+ __le16 wIELength;
+ u8 IEData[];
+} __attribute__((packed));
+
+/* Set DAA Energy Mask event. [WHCI 0.96] section 3.1.3.17. */
+struct uwb_rc_evt_set_daa_energy_mask {
+ struct uwb_rceb rceb;
+ __le16 wLength;
+ u8 result;
+} __attribute__((packed));
+
+/* Set Notification Filter Extended event. [WHCI 0.96] section 3.1.3.18. */
+struct uwb_rc_evt_set_notification_filter_ex {
+ struct uwb_rceb rceb;
+ __le16 wLength;
+ u8 result;
+} __attribute__((packed));
+
+/* IE Received notification. [WHCI] section 3.1.4.1. */
+struct uwb_rc_evt_ie_rcv {
+ struct uwb_rceb rceb;
+ struct uwb_dev_addr SrcAddr;
+ __le16 wIELength;
+ u8 IEData[];
+} __attribute__((packed));
+
+/* Type of the received beacon. [WHCI] section 3.1.4.2. */
+enum uwb_rc_beacon_type {
+ UWB_RC_BEACON_TYPE_SCAN = 0,
+ UWB_RC_BEACON_TYPE_NEIGHBOR,
+ UWB_RC_BEACON_TYPE_OL_ALIEN,
+ UWB_RC_BEACON_TYPE_NOL_ALIEN,
+};
+
+/* Beacon received notification. [WHCI] 3.1.4.2. */
+struct uwb_rc_evt_beacon {
+ struct uwb_rceb rceb;
+ u8 bChannelNumber;
+ u8 bBeaconType;
+ __le16 wBPSTOffset;
+ u8 bLQI;
+ u8 bRSSI;
+ __le16 wBeaconInfoLength;
+ u8 BeaconInfo[];
+} __attribute__((packed));
+
+
+/* Beacon Size Change notification. [WHCI] section 3.1.4.3 */
+struct uwb_rc_evt_beacon_size {
+ struct uwb_rceb rceb;
+ __le16 wNewBeaconSize;
+} __attribute__((packed));
+
+
+/* BPOIE Change notification. [WHCI] section 3.1.4.4. */
+struct uwb_rc_evt_bpoie_change {
+ struct uwb_rceb rceb;
+ __le16 wBPOIELength;
+ u8 BPOIE[];
+} __attribute__((packed));
+
+
+/* Beacon Slot Change notification. [WHCI] section 3.1.4.5. */
+struct uwb_rc_evt_bp_slot_change {
+ struct uwb_rceb rceb;
+ u8 slot_info;
+} __attribute__((packed));
+
+static inline int uwb_rc_evt_bp_slot_change_slot_num(
+ const struct uwb_rc_evt_bp_slot_change *evt)
+{
+ return evt->slot_info & 0x7f;
+}
+
+static inline int uwb_rc_evt_bp_slot_change_no_slot(
+ const struct uwb_rc_evt_bp_slot_change *evt)
+{
+ return (evt->slot_info & 0x80) >> 7;
+}
+
+/* BP Switch IE Received notification. [WHCI] section 3.1.4.6. */
+struct uwb_rc_evt_bp_switch_ie_rcv {
+ struct uwb_rceb rceb;
+ struct uwb_dev_addr wSrcAddr;
+ __le16 wIELength;
+ u8 IEData[];
+} __attribute__((packed));
+
+/* DevAddr Conflict notification. [WHCI] section 3.1.4.7. */
+struct uwb_rc_evt_dev_addr_conflict {
+ struct uwb_rceb rceb;
+} __attribute__((packed));
+
+/* DRP notification. [WHCI] section 3.1.4.9. */
+struct uwb_rc_evt_drp {
+ struct uwb_rceb rceb;
+ struct uwb_dev_addr src_addr;
+ u8 reason;
+ u8 beacon_slot_number;
+ __le16 ie_length;
+ u8 ie_data[];
+} __attribute__((packed));
+
+static inline enum uwb_drp_notif_reason uwb_rc_evt_drp_reason(struct uwb_rc_evt_drp *evt)
+{
+ return evt->reason & 0x0f;
+}
+
+
+/* DRP Availability Change notification. [WHCI] section 3.1.4.8. */
+struct uwb_rc_evt_drp_avail {
+ struct uwb_rceb rceb;
+ DECLARE_BITMAP(bmp, UWB_NUM_MAS);
+} __attribute__((packed));
+
+/* BP switch status notification. [WHCI] section 3.1.4.10. */
+struct uwb_rc_evt_bp_switch_status {
+ struct uwb_rceb rceb;
+ u8 status;
+ u8 slot_offset;
+ __le16 bpst_offset;
+ u8 move_countdown;
+} __attribute__((packed));
+
+/* Command Frame Received notification. [WHCI] section 3.1.4.11. */
+struct uwb_rc_evt_cmd_frame_rcv {
+ struct uwb_rceb rceb;
+ __le16 receive_time;
+ struct uwb_dev_addr wSrcAddr;
+ struct uwb_dev_addr wDstAddr;
+ __le16 control;
+ __le16 reserved;
+ __le16 dataLength;
+ u8 data[];
+} __attribute__((packed));
+
+/* Channel Change IE Received notification. [WHCI] section 3.1.4.12. */
+struct uwb_rc_evt_channel_change_ie_rcv {
+ struct uwb_rceb rceb;
+ struct uwb_dev_addr wSrcAddr;
+ __le16 wIELength;
+ u8 IEData[];
+} __attribute__((packed));
+
+/* DAA Energy Detected notification. [WHCI 0.96] section 3.1.4.14. */
+struct uwb_rc_evt_daa_energy_detected {
+ struct uwb_rceb rceb;
+ __le16 wLength;
+ u8 bandID;
+ u8 reserved;
+ u8 toneBmp[16];
+} __attribute__((packed));
+
+
+/**
+ * Radio Control Interface Class Descriptor
+ *
+ * WUSB 1.0 [8.6.1.2]
+ */
+struct uwb_rc_control_intf_class_desc {
+ u8 bLength;
+ u8 bDescriptorType;
+ __le16 bcdRCIVersion;
+} __attribute__((packed));
+
+#endif /* #ifndef __LINUX__UWB_SPEC_H__ */
diff --git a/include/linux/uwb/umc.h b/include/linux/uwb/umc.h
new file mode 100644
index 000000000..02112299a
--- /dev/null
+++ b/include/linux/uwb/umc.h
@@ -0,0 +1,193 @@
+/*
+ * UWB Multi-interface Controller support.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ *
+ * This file is released under the GPLv2
+ *
+ * UMC (UWB Multi-interface Controller) capabilities (e.g., radio
+ * controller, host controller) are presented as devices on the "umc"
+ * bus.
+ *
+ * The radio controller is not strictly a UMC capability but it's
+ * useful to present it as such.
+ *
+ * References:
+ *
+ * [WHCI] Wireless Host Controller Interface Specification for
+ * Certified Wireless Universal Serial Bus, revision 0.95.
+ *
+ * How this works is kind of convoluted but simple. The whci.ko driver
+ * loads when WHCI devices are detected. These WHCI devices expose
+ * many devices in the same PCI function (they couldn't have reused
+ * functions, no), so for each PCI function that exposes these many
+ * devices, whci ceates a umc_dev [whci_probe() -> whci_add_cap()]
+ * with umc_device_create() and adds it to the bus with
+ * umc_device_register().
+ *
+ * umc_device_register() calls device_register() which will push the
+ * bus management code to load your UMC driver's somehting_probe()
+ * that you have registered for that capability code.
+ *
+ * Now when the WHCI device is removed, whci_remove() will go over
+ * each umc_dev assigned to each of the PCI function's capabilities
+ * and through whci_del_cap() call umc_device_unregister() each
+ * created umc_dev. Of course, if you are bound to the device, your
+ * driver's something_remove() will be called.
+ */
+
+#ifndef _LINUX_UWB_UMC_H_
+#define _LINUX_UWB_UMC_H_
+
+#include <linux/device.h>
+#include <linux/pci.h>
+
+/*
+ * UMC capability IDs.
+ *
+ * 0x00 is reserved so use it for the radio controller device.
+ *
+ * [WHCI] table 2-8
+ */
+#define UMC_CAP_ID_WHCI_RC 0x00 /* radio controller */
+#define UMC_CAP_ID_WHCI_WUSB_HC 0x01 /* WUSB host controller */
+
+/**
+ * struct umc_dev - UMC capability device
+ *
+ * @version: version of the specification this capability conforms to.
+ * @cap_id: capability ID.
+ * @bar: PCI Bar (64 bit) where the resource lies
+ * @resource: register space resource.
+ * @irq: interrupt line.
+ */
+struct umc_dev {
+ u16 version;
+ u8 cap_id;
+ u8 bar;
+ struct resource resource;
+ unsigned irq;
+ struct device dev;
+};
+
+#define to_umc_dev(d) container_of(d, struct umc_dev, dev)
+
+/**
+ * struct umc_driver - UMC capability driver
+ * @cap_id: supported capability ID.
+ * @match: driver specific capability matching function.
+ * @match_data: driver specific data for match() (e.g., a
+ * table of pci_device_id's if umc_match_pci_id() is used).
+ */
+struct umc_driver {
+ char *name;
+ u8 cap_id;
+ int (*match)(struct umc_driver *, struct umc_dev *);
+ const void *match_data;
+
+ int (*probe)(struct umc_dev *);
+ void (*remove)(struct umc_dev *);
+ int (*pre_reset)(struct umc_dev *);
+ int (*post_reset)(struct umc_dev *);
+
+ struct device_driver driver;
+};
+
+#define to_umc_driver(d) container_of(d, struct umc_driver, driver)
+
+extern struct bus_type umc_bus_type;
+
+struct umc_dev *umc_device_create(struct device *parent, int n);
+int __must_check umc_device_register(struct umc_dev *umc);
+void umc_device_unregister(struct umc_dev *umc);
+
+int __must_check __umc_driver_register(struct umc_driver *umc_drv,
+ struct module *mod,
+ const char *mod_name);
+
+/**
+ * umc_driver_register - register a UMC capabiltity driver.
+ * @umc_drv: pointer to the driver.
+ */
+#define umc_driver_register(umc_drv) \
+ __umc_driver_register(umc_drv, THIS_MODULE, KBUILD_MODNAME)
+
+void umc_driver_unregister(struct umc_driver *umc_drv);
+
+/*
+ * Utility function you can use to match (umc_driver->match) against a
+ * null-terminated array of 'struct pci_device_id' in
+ * umc_driver->match_data.
+ */
+int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc);
+
+/**
+ * umc_parent_pci_dev - return the UMC's parent PCI device or NULL if none
+ * @umc_dev: UMC device whose parent PCI device we are looking for
+ *
+ * DIRTY!!! DON'T RELY ON THIS
+ *
+ * FIXME: This is as dirty as it gets, but we need some way to check
+ * the correct type of umc_dev->parent (so that for example, we can
+ * cast to pci_dev). Casting to pci_dev is necessary because at some
+ * point we need to request resources from the device. Mapping is
+ * easily over come (ioremap and stuff are bus agnostic), but hooking
+ * up to some error handlers (such as pci error handlers) might need
+ * this.
+ *
+ * THIS might (probably will) be removed in the future, so don't count
+ * on it.
+ */
+static inline struct pci_dev *umc_parent_pci_dev(struct umc_dev *umc_dev)
+{
+ struct pci_dev *pci_dev = NULL;
+ if (dev_is_pci(umc_dev->dev.parent))
+ pci_dev = to_pci_dev(umc_dev->dev.parent);
+ return pci_dev;
+}
+
+/**
+ * umc_dev_get() - reference a UMC device.
+ * @umc_dev: Pointer to UMC device.
+ *
+ * NOTE: we are assuming in this whole scheme that the parent device
+ * is referenced at _probe() time and unreferenced at _remove()
+ * time by the parent's subsystem.
+ */
+static inline struct umc_dev *umc_dev_get(struct umc_dev *umc_dev)
+{
+ get_device(&umc_dev->dev);
+ return umc_dev;
+}
+
+/**
+ * umc_dev_put() - unreference a UMC device.
+ * @umc_dev: Pointer to UMC device.
+ */
+static inline void umc_dev_put(struct umc_dev *umc_dev)
+{
+ put_device(&umc_dev->dev);
+}
+
+/**
+ * umc_set_drvdata - set UMC device's driver data.
+ * @umc_dev: Pointer to UMC device.
+ * @data: Data to set.
+ */
+static inline void umc_set_drvdata(struct umc_dev *umc_dev, void *data)
+{
+ dev_set_drvdata(&umc_dev->dev, data);
+}
+
+/**
+ * umc_get_drvdata - recover UMC device's driver data.
+ * @umc_dev: Pointer to UMC device.
+ */
+static inline void *umc_get_drvdata(struct umc_dev *umc_dev)
+{
+ return dev_get_drvdata(&umc_dev->dev);
+}
+
+int umc_controller_reset(struct umc_dev *umc);
+
+#endif /* #ifndef _LINUX_UWB_UMC_H_ */
diff --git a/include/linux/uwb/whci.h b/include/linux/uwb/whci.h
new file mode 100644
index 000000000..915ec2304
--- /dev/null
+++ b/include/linux/uwb/whci.h
@@ -0,0 +1,117 @@
+/*
+ * Wireless Host Controller Interface for Ultra-Wide-Band and Wireless USB
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ *
+ * References:
+ * [WHCI] Wireless Host Controller Interface Specification for
+ * Certified Wireless Universal Serial Bus, revision 0.95.
+ */
+#ifndef _LINUX_UWB_WHCI_H_
+#define _LINUX_UWB_WHCI_H_
+
+#include <linux/pci.h>
+
+/*
+ * UWB interface capability registers (offsets from UWBBASE)
+ *
+ * [WHCI] section 2.2
+ */
+#define UWBCAPINFO 0x00 /* == UWBCAPDATA(0) */
+# define UWBCAPINFO_TO_N_CAPS(c) (((c) >> 0) & 0xFull)
+#define UWBCAPDATA(n) (8*(n))
+# define UWBCAPDATA_TO_VERSION(c) (((c) >> 32) & 0xFFFFull)
+# define UWBCAPDATA_TO_OFFSET(c) (((c) >> 18) & 0x3FFFull)
+# define UWBCAPDATA_TO_BAR(c) (((c) >> 16) & 0x3ull)
+# define UWBCAPDATA_TO_SIZE(c) ((((c) >> 8) & 0xFFull) * sizeof(u32))
+# define UWBCAPDATA_TO_CAP_ID(c) (((c) >> 0) & 0xFFull)
+
+/* Size of the WHCI capability data (including the RC capability) for
+ a device with n capabilities. */
+#define UWBCAPDATA_SIZE(n) (8 + 8*(n))
+
+
+/*
+ * URC registers (offsets from URCBASE)
+ *
+ * [WHCI] section 2.3
+ */
+#define URCCMD 0x00
+# define URCCMD_RESET (1 << 31) /* UMC Hardware reset */
+# define URCCMD_RS (1 << 30) /* Run/Stop */
+# define URCCMD_EARV (1 << 29) /* Event Address Register Valid */
+# define URCCMD_ACTIVE (1 << 15) /* Command is active */
+# define URCCMD_IWR (1 << 14) /* Interrupt When Ready */
+# define URCCMD_SIZE_MASK 0x00000fff /* Command size mask */
+#define URCSTS 0x04
+# define URCSTS_EPS (1 << 17) /* Event Processing Status */
+# define URCSTS_HALTED (1 << 16) /* RC halted */
+# define URCSTS_HSE (1 << 10) /* Host System Error...fried */
+# define URCSTS_ER (1 << 9) /* Event Ready */
+# define URCSTS_RCI (1 << 8) /* Ready for Command Interrupt */
+# define URCSTS_INT_MASK 0x00000700 /* URC interrupt sources */
+# define URCSTS_ISI 0x000000ff /* Interrupt Source Identification */
+#define URCINTR 0x08
+# define URCINTR_EN_ALL 0x000007ff /* Enable all interrupt sources */
+#define URCCMDADDR 0x10
+#define URCEVTADDR 0x18
+# define URCEVTADDR_OFFSET_MASK 0xfff /* Event pointer offset mask */
+
+
+/** Write 32 bit @value to little endian register at @addr */
+static inline
+void le_writel(u32 value, void __iomem *addr)
+{
+ iowrite32(value, addr);
+}
+
+
+/** Read from 32 bit little endian register at @addr */
+static inline
+u32 le_readl(void __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+
+/** Write 64 bit @value to little endian register at @addr */
+static inline
+void le_writeq(u64 value, void __iomem *addr)
+{
+ iowrite32(value, addr);
+ iowrite32(value >> 32, addr + 4);
+}
+
+
+/** Read from 64 bit little endian register at @addr */
+static inline
+u64 le_readq(void __iomem *addr)
+{
+ u64 value;
+ value = ioread32(addr);
+ value |= (u64)ioread32(addr + 4) << 32;
+ return value;
+}
+
+extern int whci_wait_for(struct device *dev, u32 __iomem *reg,
+ u32 mask, u32 result,
+ unsigned long max_ms, const char *tag);
+
+#endif /* #ifndef _LINUX_UWB_WHCI_H_ */
diff --git a/include/linux/verify_pefile.h b/include/linux/verify_pefile.h
new file mode 100644
index 000000000..ac3481921
--- /dev/null
+++ b/include/linux/verify_pefile.h
@@ -0,0 +1,18 @@
+/* Signed PE file verification
+ *
+ * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_VERIFY_PEFILE_H
+#define _LINUX_VERIFY_PEFILE_H
+
+extern int verify_pefile_signature(const void *pebuf, unsigned pelen,
+ struct key *trusted_keyring, bool *_trusted);
+
+#endif /* _LINUX_VERIFY_PEFILE_H */
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
new file mode 100644
index 000000000..6f8fbcf10
--- /dev/null
+++ b/include/linux/vermagic.h
@@ -0,0 +1,33 @@
+#include <generated/utsrelease.h>
+
+/* Simply sanity version stamp for modules. */
+#ifdef CONFIG_SMP
+#define MODULE_VERMAGIC_SMP "SMP "
+#else
+#define MODULE_VERMAGIC_SMP ""
+#endif
+#ifdef CONFIG_PREEMPT
+#define MODULE_VERMAGIC_PREEMPT "preempt "
+#else
+#define MODULE_VERMAGIC_PREEMPT ""
+#endif
+#ifdef CONFIG_MODULE_UNLOAD
+#define MODULE_VERMAGIC_MODULE_UNLOAD "mod_unload "
+#else
+#define MODULE_VERMAGIC_MODULE_UNLOAD ""
+#endif
+#ifdef CONFIG_MODVERSIONS
+#define MODULE_VERMAGIC_MODVERSIONS "modversions "
+#else
+#define MODULE_VERMAGIC_MODVERSIONS ""
+#endif
+#ifndef MODULE_ARCH_VERMAGIC
+#define MODULE_ARCH_VERMAGIC ""
+#endif
+
+#define VERMAGIC_STRING \
+ UTS_RELEASE " " \
+ MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
+ MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
+ MODULE_ARCH_VERMAGIC
+
diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h
new file mode 100644
index 000000000..f8e76e08e
--- /dev/null
+++ b/include/linux/vexpress.h
@@ -0,0 +1,54 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#ifndef _LINUX_VEXPRESS_H
+#define _LINUX_VEXPRESS_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+#define VEXPRESS_SITE_MB 0
+#define VEXPRESS_SITE_DB1 1
+#define VEXPRESS_SITE_DB2 2
+#define VEXPRESS_SITE_MASTER 0xf
+
+/* Config infrastructure */
+
+void vexpress_config_set_master(u32 site);
+u32 vexpress_config_get_master(void);
+
+void vexpress_config_lock(void *arg);
+void vexpress_config_unlock(void *arg);
+
+int vexpress_config_get_topo(struct device_node *node, u32 *site,
+ u32 *position, u32 *dcc);
+
+/* Config bridge API */
+
+struct vexpress_config_bridge_ops {
+ struct regmap * (*regmap_init)(struct device *dev, void *context);
+ void (*regmap_exit)(struct regmap *regmap, void *context);
+};
+
+struct device *vexpress_config_bridge_register(struct device *parent,
+ struct vexpress_config_bridge_ops *ops, void *context);
+
+/* Config regmap API */
+
+struct regmap *devm_regmap_init_vexpress_config(struct device *dev);
+
+/* Platform control */
+
+void vexpress_flags_set(u32 data);
+
+#endif
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
new file mode 100644
index 000000000..ddb440975
--- /dev/null
+++ b/include/linux/vfio.h
@@ -0,0 +1,138 @@
+/*
+ * VFIO API definition
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef VFIO_H
+#define VFIO_H
+
+
+#include <linux/iommu.h>
+#include <linux/mm.h>
+#include <linux/workqueue.h>
+#include <linux/poll.h>
+#include <uapi/linux/vfio.h>
+
+/**
+ * struct vfio_device_ops - VFIO bus driver device callbacks
+ *
+ * @open: Called when userspace creates new file descriptor for device
+ * @release: Called when userspace releases file descriptor for device
+ * @read: Perform read(2) on device file descriptor
+ * @write: Perform write(2) on device file descriptor
+ * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_*
+ * operations documented below
+ * @mmap: Perform mmap(2) on a region of the device file descriptor
+ * @request: Request for the bus driver to release the device
+ */
+struct vfio_device_ops {
+ char *name;
+ int (*open)(void *device_data);
+ void (*release)(void *device_data);
+ ssize_t (*read)(void *device_data, char __user *buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*write)(void *device_data, const char __user *buf,
+ size_t count, loff_t *size);
+ long (*ioctl)(void *device_data, unsigned int cmd,
+ unsigned long arg);
+ int (*mmap)(void *device_data, struct vm_area_struct *vma);
+ void (*request)(void *device_data, unsigned int count);
+};
+
+extern int vfio_add_group_dev(struct device *dev,
+ const struct vfio_device_ops *ops,
+ void *device_data);
+
+extern void *vfio_del_group_dev(struct device *dev);
+extern struct vfio_device *vfio_device_get_from_dev(struct device *dev);
+extern void vfio_device_put(struct vfio_device *device);
+extern void *vfio_device_data(struct vfio_device *device);
+
+/**
+ * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
+ */
+struct vfio_iommu_driver_ops {
+ char *name;
+ struct module *owner;
+ void *(*open)(unsigned long arg);
+ void (*release)(void *iommu_data);
+ ssize_t (*read)(void *iommu_data, char __user *buf,
+ size_t count, loff_t *ppos);
+ ssize_t (*write)(void *iommu_data, const char __user *buf,
+ size_t count, loff_t *size);
+ long (*ioctl)(void *iommu_data, unsigned int cmd,
+ unsigned long arg);
+ int (*mmap)(void *iommu_data, struct vm_area_struct *vma);
+ int (*attach_group)(void *iommu_data,
+ struct iommu_group *group);
+ void (*detach_group)(void *iommu_data,
+ struct iommu_group *group);
+
+};
+
+extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
+
+extern void vfio_unregister_iommu_driver(
+ const struct vfio_iommu_driver_ops *ops);
+
+/*
+ * External user API
+ */
+extern struct vfio_group *vfio_group_get_external_user(struct file *filep);
+extern void vfio_group_put_external_user(struct vfio_group *group);
+extern int vfio_external_user_iommu_id(struct vfio_group *group);
+extern long vfio_external_check_extension(struct vfio_group *group,
+ unsigned long arg);
+
+struct pci_dev;
+#ifdef CONFIG_EEH
+extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev);
+extern void vfio_spapr_pci_eeh_release(struct pci_dev *pdev);
+extern long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
+ unsigned int cmd,
+ unsigned long arg);
+#else
+static inline void vfio_spapr_pci_eeh_open(struct pci_dev *pdev)
+{
+}
+
+static inline void vfio_spapr_pci_eeh_release(struct pci_dev *pdev)
+{
+}
+
+static inline long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ return -ENOTTY;
+}
+#endif /* CONFIG_EEH */
+
+/*
+ * IRQfd - generic
+ */
+struct virqfd {
+ void *opaque;
+ struct eventfd_ctx *eventfd;
+ int (*handler)(void *, void *);
+ void (*thread)(void *, void *);
+ void *data;
+ struct work_struct inject;
+ wait_queue_t wait;
+ poll_table pt;
+ struct work_struct shutdown;
+ struct virqfd **pvirqfd;
+};
+
+extern int vfio_virqfd_enable(void *opaque,
+ int (*handler)(void *, void *),
+ void (*thread)(void *, void *),
+ void *data, struct virqfd **pvirqfd, int fd);
+extern void vfio_virqfd_disable(struct virqfd **pvirqfd);
+
+#endif /* VFIO_H */
diff --git a/include/linux/vfs.h b/include/linux/vfs.h
new file mode 100644
index 000000000..e701d0541
--- /dev/null
+++ b/include/linux/vfs.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_VFS_H
+#define _LINUX_VFS_H
+
+#include <linux/statfs.h>
+
+#endif
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
new file mode 100644
index 000000000..b483abd34
--- /dev/null
+++ b/include/linux/vga_switcheroo.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2010 Red Hat Inc.
+ * Author : Dave Airlie <airlied@redhat.com>
+ *
+ * Licensed under GPLv2
+ *
+ * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs
+ */
+
+#ifndef _LINUX_VGA_SWITCHEROO_H_
+#define _LINUX_VGA_SWITCHEROO_H_
+
+#include <linux/fb.h>
+
+struct pci_dev;
+
+enum vga_switcheroo_state {
+ VGA_SWITCHEROO_OFF,
+ VGA_SWITCHEROO_ON,
+ /* below are referred only from vga_switcheroo_get_client_state() */
+ VGA_SWITCHEROO_INIT,
+ VGA_SWITCHEROO_NOT_FOUND,
+};
+
+enum vga_switcheroo_client_id {
+ VGA_SWITCHEROO_IGD,
+ VGA_SWITCHEROO_DIS,
+ VGA_SWITCHEROO_MAX_CLIENTS,
+};
+
+struct vga_switcheroo_handler {
+ int (*switchto)(enum vga_switcheroo_client_id id);
+ int (*power_state)(enum vga_switcheroo_client_id id,
+ enum vga_switcheroo_state state);
+ int (*init)(void);
+ int (*get_client_id)(struct pci_dev *pdev);
+};
+
+struct vga_switcheroo_client_ops {
+ void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state);
+ void (*reprobe)(struct pci_dev *dev);
+ bool (*can_switch)(struct pci_dev *dev);
+};
+
+#if defined(CONFIG_VGA_SWITCHEROO)
+void vga_switcheroo_unregister_client(struct pci_dev *dev);
+int vga_switcheroo_register_client(struct pci_dev *dev,
+ const struct vga_switcheroo_client_ops *ops,
+ bool driver_power_control);
+int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
+ const struct vga_switcheroo_client_ops *ops,
+ int id, bool active);
+
+void vga_switcheroo_client_fb_set(struct pci_dev *dev,
+ struct fb_info *info);
+
+int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler);
+void vga_switcheroo_unregister_handler(void);
+
+int vga_switcheroo_process_delayed_switch(void);
+
+int vga_switcheroo_get_client_state(struct pci_dev *dev);
+
+void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
+
+int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
+void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
+#else
+
+static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
+static inline int vga_switcheroo_register_client(struct pci_dev *dev,
+ const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; }
+static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
+static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; }
+static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
+ const struct vga_switcheroo_client_ops *ops,
+ int id, bool active) { return 0; }
+static inline void vga_switcheroo_unregister_handler(void) {}
+static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
+static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
+
+static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
+
+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
+static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
+
+#endif
+#endif /* _LINUX_VGA_SWITCHEROO_H_ */
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
new file mode 100644
index 000000000..8c3b412d8
--- /dev/null
+++ b/include/linux/vgaarb.h
@@ -0,0 +1,254 @@
+/*
+ * The VGA aribiter manages VGA space routing and VGA resource decode to
+ * allow multiple VGA devices to be used in a system in a safe way.
+ *
+ * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
+ * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef LINUX_VGA_H
+#define LINUX_VGA_H
+
+#include <video/vga.h>
+
+/* Legacy VGA regions */
+#define VGA_RSRC_NONE 0x00
+#define VGA_RSRC_LEGACY_IO 0x01
+#define VGA_RSRC_LEGACY_MEM 0x02
+#define VGA_RSRC_LEGACY_MASK (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
+/* Non-legacy access */
+#define VGA_RSRC_NORMAL_IO 0x04
+#define VGA_RSRC_NORMAL_MEM 0x08
+
+/* Passing that instead of a pci_dev to use the system "default"
+ * device, that is the one used by vgacon. Archs will probably
+ * have to provide their own vga_default_device();
+ */
+#define VGA_DEFAULT_DEVICE (NULL)
+
+struct pci_dev;
+
+/* For use by clients */
+
+/**
+ * vga_set_legacy_decoding
+ *
+ * @pdev: pci device of the VGA card
+ * @decodes: bit mask of what legacy regions the card decodes
+ *
+ * Indicates to the arbiter if the card decodes legacy VGA IOs,
+ * legacy VGA Memory, both, or none. All cards default to both,
+ * the card driver (fbdev for example) should tell the arbiter
+ * if it has disabled legacy decoding, so the card can be left
+ * out of the arbitration process (and can be safe to take
+ * interrupts at any time.
+ */
+#if defined(CONFIG_VGA_ARB)
+extern void vga_set_legacy_decoding(struct pci_dev *pdev,
+ unsigned int decodes);
+#else
+static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
+ unsigned int decodes) { };
+#endif
+
+/**
+ * vga_get - acquire & locks VGA resources
+ *
+ * @pdev: pci device of the VGA card or NULL for the system default
+ * @rsrc: bit mask of resources to acquire and lock
+ * @interruptible: blocking should be interruptible by signals ?
+ *
+ * This function acquires VGA resources for the given
+ * card and mark those resources locked. If the resource requested
+ * are "normal" (and not legacy) resources, the arbiter will first check
+ * whether the card is doing legacy decoding for that type of resource. If
+ * yes, the lock is "converted" into a legacy resource lock.
+ * The arbiter will first look for all VGA cards that might conflict
+ * and disable their IOs and/or Memory access, including VGA forwarding
+ * on P2P bridges if necessary, so that the requested resources can
+ * be used. Then, the card is marked as locking these resources and
+ * the IO and/or Memory accesse are enabled on the card (including
+ * VGA forwarding on parent P2P bridges if any).
+ * This function will block if some conflicting card is already locking
+ * one of the required resources (or any resource on a different bus
+ * segment, since P2P bridges don't differenciate VGA memory and IO
+ * afaik). You can indicate whether this blocking should be interruptible
+ * by a signal (for userland interface) or not.
+ * Must not be called at interrupt time or in atomic context.
+ * If the card already owns the resources, the function succeeds.
+ * Nested calls are supported (a per-resource counter is maintained)
+ */
+
+#if defined(CONFIG_VGA_ARB)
+extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible);
+#else
+static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { return 0; }
+#endif
+
+/**
+ * vga_get_interruptible
+ *
+ * Shortcut to vga_get
+ */
+
+static inline int vga_get_interruptible(struct pci_dev *pdev,
+ unsigned int rsrc)
+{
+ return vga_get(pdev, rsrc, 1);
+}
+
+/**
+ * vga_get_uninterruptible
+ *
+ * Shortcut to vga_get
+ */
+
+static inline int vga_get_uninterruptible(struct pci_dev *pdev,
+ unsigned int rsrc)
+{
+ return vga_get(pdev, rsrc, 0);
+}
+
+/**
+ * vga_tryget - try to acquire & lock legacy VGA resources
+ *
+ * @pdev: pci devivce of VGA card or NULL for system default
+ * @rsrc: bit mask of resources to acquire and lock
+ *
+ * This function performs the same operation as vga_get(), but
+ * will return an error (-EBUSY) instead of blocking if the resources
+ * are already locked by another card. It can be called in any context
+ */
+
+#if defined(CONFIG_VGA_ARB)
+extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc);
+#else
+static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; }
+#endif
+
+/**
+ * vga_put - release lock on legacy VGA resources
+ *
+ * @pdev: pci device of VGA card or NULL for system default
+ * @rsrc: but mask of resource to release
+ *
+ * This function releases resources previously locked by vga_get()
+ * or vga_tryget(). The resources aren't disabled right away, so
+ * that a subsequence vga_get() on the same card will succeed
+ * immediately. Resources have a counter, so locks are only
+ * released if the counter reaches 0.
+ */
+
+#if defined(CONFIG_VGA_ARB)
+extern void vga_put(struct pci_dev *pdev, unsigned int rsrc);
+#else
+#define vga_put(pdev, rsrc)
+#endif
+
+
+/**
+ * vga_default_device
+ *
+ * This can be defined by the platform. The default implementation
+ * is rather dumb and will probably only work properly on single
+ * vga card setups and/or x86 platforms.
+ *
+ * If your VGA default device is not PCI, you'll have to return
+ * NULL here. In this case, I assume it will not conflict with
+ * any PCI card. If this is not true, I'll have to define two archs
+ * hooks for enabling/disabling the VGA default device if that is
+ * possible. This may be a problem with real _ISA_ VGA cards, in
+ * addition to a PCI one. I don't know at this point how to deal
+ * with that card. Can theirs IOs be disabled at all ? If not, then
+ * I suppose it's a matter of having the proper arch hook telling
+ * us about it, so we basically never allow anybody to succeed a
+ * vga_get()...
+ */
+
+#ifdef CONFIG_VGA_ARB
+extern struct pci_dev *vga_default_device(void);
+extern void vga_set_default_device(struct pci_dev *pdev);
+#else
+static inline struct pci_dev *vga_default_device(void) { return NULL; };
+static inline void vga_set_default_device(struct pci_dev *pdev) { };
+#endif
+
+/**
+ * vga_conflicts
+ *
+ * Architectures should define this if they have several
+ * independent PCI domains that can afford concurrent VGA
+ * decoding
+ */
+
+#ifndef __ARCH_HAS_VGA_CONFLICT
+static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2)
+{
+ return 1;
+}
+#endif
+
+/**
+ * vga_client_register
+ *
+ * @pdev: pci device of the VGA client
+ * @cookie: client cookie to be used in callbacks
+ * @irq_set_state: irq state change callback
+ * @set_vga_decode: vga decode change callback
+ *
+ * return value: 0 on success, -1 on failure
+ * Register a client with the VGA arbitration logic
+ *
+ * Clients have two callback mechanisms they can use.
+ * irq enable/disable callback -
+ * If a client can't disable its GPUs VGA resources, then we
+ * need to be able to ask it to turn off its irqs when we
+ * turn off its mem and io decoding.
+ * set_vga_decode
+ * If a client can disable its GPU VGA resource, it will
+ * get a callback from this to set the encode/decode state
+ *
+ * Rationale: we cannot disable VGA decode resources unconditionally
+ * some single GPU laptops seem to require ACPI or BIOS access to the
+ * VGA registers to control things like backlights etc.
+ * Hopefully newer multi-GPU laptops do something saner, and desktops
+ * won't have any special ACPI for this.
+ * They driver will get a callback when VGA arbitration is first used
+ * by userspace since we some older X servers have issues.
+ */
+#if defined(CONFIG_VGA_ARB)
+int vga_client_register(struct pci_dev *pdev, void *cookie,
+ void (*irq_set_state)(void *cookie, bool state),
+ unsigned int (*set_vga_decode)(void *cookie, bool state));
+#else
+static inline int vga_client_register(struct pci_dev *pdev, void *cookie,
+ void (*irq_set_state)(void *cookie, bool state),
+ unsigned int (*set_vga_decode)(void *cookie, bool state))
+{
+ return 0;
+}
+#endif
+
+#endif /* LINUX_VGA_H */
diff --git a/include/linux/via-core.h b/include/linux/via-core.h
new file mode 100644
index 000000000..9c21cdf3e
--- /dev/null
+++ b/include/linux/via-core.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
+ * Copyright 2009-2010 Jonathan Corbet <corbet@lwn.net>
+ * Copyright 2010 Florian Tobias Schandinat <FlorianSchandinat@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation;
+ * either version 2, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
+ * the implied warranty of MERCHANTABILITY or FITNESS FOR
+ * A PARTICULAR PURPOSE.See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __VIA_CORE_H__
+#define __VIA_CORE_H__
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+
+/*
+ * A description of each known serial I2C/GPIO port.
+ */
+enum via_port_type {
+ VIA_PORT_NONE = 0,
+ VIA_PORT_I2C,
+ VIA_PORT_GPIO,
+};
+
+enum via_port_mode {
+ VIA_MODE_OFF = 0,
+ VIA_MODE_I2C, /* Used as I2C port */
+ VIA_MODE_GPIO, /* Two GPIO ports */
+};
+
+enum viafb_i2c_adap {
+ VIA_PORT_26 = 0,
+ VIA_PORT_31,
+ VIA_PORT_25,
+ VIA_PORT_2C,
+ VIA_PORT_3D,
+};
+#define VIAFB_NUM_PORTS 5
+
+struct via_port_cfg {
+ enum via_port_type type;
+ enum via_port_mode mode;
+ u16 io_port;
+ u8 ioport_index;
+};
+
+/*
+ * Allow subdevs to register suspend/resume hooks.
+ */
+#ifdef CONFIG_PM
+struct viafb_pm_hooks {
+ struct list_head list;
+ int (*suspend)(void *private);
+ int (*resume)(void *private);
+ void *private;
+};
+
+void viafb_pm_register(struct viafb_pm_hooks *hooks);
+void viafb_pm_unregister(struct viafb_pm_hooks *hooks);
+#endif /* CONFIG_PM */
+
+/*
+ * This is the global viafb "device" containing stuff needed by
+ * all subdevs.
+ */
+struct viafb_dev {
+ struct pci_dev *pdev;
+ int chip_type;
+ struct via_port_cfg *port_cfg;
+ /*
+ * Spinlock for access to device registers. Not yet
+ * globally used.
+ */
+ spinlock_t reg_lock;
+ /*
+ * The framebuffer MMIO region. Little, if anything, touches
+ * this memory directly, and certainly nothing outside of the
+ * framebuffer device itself. We *do* have to be able to allocate
+ * chunks of this memory for other devices, though.
+ */
+ unsigned long fbmem_start;
+ long fbmem_len;
+ void __iomem *fbmem;
+#if defined(CONFIG_VIDEO_VIA_CAMERA) || defined(CONFIG_VIDEO_VIA_CAMERA_MODULE)
+ long camera_fbmem_offset;
+ long camera_fbmem_size;
+#endif
+ /*
+ * The MMIO region for device registers.
+ */
+ unsigned long engine_start;
+ unsigned long engine_len;
+ void __iomem *engine_mmio;
+
+};
+
+/*
+ * Interrupt management.
+ */
+
+void viafb_irq_enable(u32 mask);
+void viafb_irq_disable(u32 mask);
+
+/*
+ * The global interrupt control register and its bits.
+ */
+#define VDE_INTERRUPT 0x200 /* Video interrupt flags/masks */
+#define VDE_I_DVISENSE 0x00000001 /* DVI sense int status */
+#define VDE_I_VBLANK 0x00000002 /* Vertical blank status */
+#define VDE_I_MCCFI 0x00000004 /* MCE compl. frame int status */
+#define VDE_I_VSYNC 0x00000008 /* VGA VSYNC int status */
+#define VDE_I_DMA0DDONE 0x00000010 /* DMA 0 descr done */
+#define VDE_I_DMA0TDONE 0x00000020 /* DMA 0 transfer done */
+#define VDE_I_DMA1DDONE 0x00000040 /* DMA 1 descr done */
+#define VDE_I_DMA1TDONE 0x00000080 /* DMA 1 transfer done */
+#define VDE_I_C1AV 0x00000100 /* Cap Eng 1 act vid end */
+#define VDE_I_HQV0 0x00000200 /* First HQV engine */
+#define VDE_I_HQV1 0x00000400 /* Second HQV engine */
+#define VDE_I_HQV1EN 0x00000800 /* Second HQV engine enable */
+#define VDE_I_C0AV 0x00001000 /* Cap Eng 0 act vid end */
+#define VDE_I_C0VBI 0x00002000 /* Cap Eng 0 VBI end */
+#define VDE_I_C1VBI 0x00004000 /* Cap Eng 1 VBI end */
+#define VDE_I_VSYNC2 0x00008000 /* Sec. Disp. VSYNC */
+#define VDE_I_DVISNSEN 0x00010000 /* DVI sense enable */
+#define VDE_I_VSYNC2EN 0x00020000 /* Sec Disp VSYNC enable */
+#define VDE_I_MCCFIEN 0x00040000 /* MC comp frame int mask enable */
+#define VDE_I_VSYNCEN 0x00080000 /* VSYNC enable */
+#define VDE_I_DMA0DDEN 0x00100000 /* DMA 0 descr done enable */
+#define VDE_I_DMA0TDEN 0x00200000 /* DMA 0 trans done enable */
+#define VDE_I_DMA1DDEN 0x00400000 /* DMA 1 descr done enable */
+#define VDE_I_DMA1TDEN 0x00800000 /* DMA 1 trans done enable */
+#define VDE_I_C1AVEN 0x01000000 /* cap 1 act vid end enable */
+#define VDE_I_HQV0EN 0x02000000 /* First hqv engine enable */
+#define VDE_I_C1VBIEN 0x04000000 /* Cap 1 VBI end enable */
+#define VDE_I_LVDSSI 0x08000000 /* LVDS sense interrupt */
+#define VDE_I_C0AVEN 0x10000000 /* Cap 0 act vid end enable */
+#define VDE_I_C0VBIEN 0x20000000 /* Cap 0 VBI end enable */
+#define VDE_I_LVDSSIEN 0x40000000 /* LVDS Sense enable */
+#define VDE_I_ENABLE 0x80000000 /* Global interrupt enable */
+
+#if defined(CONFIG_VIDEO_VIA_CAMERA) || defined(CONFIG_VIDEO_VIA_CAMERA_MODULE)
+/*
+ * DMA management.
+ */
+int viafb_request_dma(void);
+void viafb_release_dma(void);
+/* void viafb_dma_copy_out(unsigned int offset, dma_addr_t paddr, int len); */
+int viafb_dma_copy_out_sg(unsigned int offset, struct scatterlist *sg, int nsg);
+
+/*
+ * DMA Controller registers.
+ */
+#define VDMA_MR0 0xe00 /* Mod reg 0 */
+#define VDMA_MR_CHAIN 0x01 /* Chaining mode */
+#define VDMA_MR_TDIE 0x02 /* Transfer done int enable */
+#define VDMA_CSR0 0xe04 /* Control/status */
+#define VDMA_C_ENABLE 0x01 /* DMA Enable */
+#define VDMA_C_START 0x02 /* Start a transfer */
+#define VDMA_C_ABORT 0x04 /* Abort a transfer */
+#define VDMA_C_DONE 0x08 /* Transfer is done */
+#define VDMA_MARL0 0xe20 /* Mem addr low */
+#define VDMA_MARH0 0xe24 /* Mem addr high */
+#define VDMA_DAR0 0xe28 /* Device address */
+#define VDMA_DQWCR0 0xe2c /* Count (16-byte) */
+#define VDMA_TMR0 0xe30 /* Tile mode reg */
+#define VDMA_DPRL0 0xe34 /* Not sure */
+#define VDMA_DPR_IN 0x08 /* Inbound transfer to FB */
+#define VDMA_DPRH0 0xe38
+#define VDMA_PMR0 (0xe00 + 0x134) /* Pitch mode */
+
+/*
+ * Useful stuff that probably belongs somewhere global.
+ */
+#define VGA_WIDTH 640
+#define VGA_HEIGHT 480
+#endif /* CONFIG_VIDEO_VIA_CAMERA */
+
+/*
+ * Indexed port operations. Note that these are all multi-op
+ * functions; every invocation will be racy if you're not holding
+ * reg_lock.
+ */
+
+#define VIAStatus 0x3DA /* Non-indexed port */
+#define VIACR 0x3D4
+#define VIASR 0x3C4
+#define VIAGR 0x3CE
+#define VIAAR 0x3C0
+
+static inline u8 via_read_reg(u16 port, u8 index)
+{
+ outb(index, port);
+ return inb(port + 1);
+}
+
+static inline void via_write_reg(u16 port, u8 index, u8 data)
+{
+ outb(index, port);
+ outb(data, port + 1);
+}
+
+static inline void via_write_reg_mask(u16 port, u8 index, u8 data, u8 mask)
+{
+ u8 old;
+
+ outb(index, port);
+ old = inb(port + 1);
+ outb((data & mask) | (old & ~mask), port + 1);
+}
+
+#define VIA_MISC_REG_READ 0x03CC
+#define VIA_MISC_REG_WRITE 0x03C2
+
+static inline void via_write_misc_reg_mask(u8 data, u8 mask)
+{
+ u8 old = inb(VIA_MISC_REG_READ);
+ outb((data & mask) | (old & ~mask), VIA_MISC_REG_WRITE);
+}
+
+
+#endif /* __VIA_CORE_H__ */
diff --git a/include/linux/via-gpio.h b/include/linux/via-gpio.h
new file mode 100644
index 000000000..8281aea3d
--- /dev/null
+++ b/include/linux/via-gpio.h
@@ -0,0 +1,14 @@
+/*
+ * Support for viafb GPIO ports.
+ *
+ * Copyright 2009 Jonathan Corbet <corbet@lwn.net>
+ * Distributable under version 2 of the GNU General Public License.
+ */
+
+#ifndef __VIA_GPIO_H__
+#define __VIA_GPIO_H__
+
+extern int viafb_gpio_lookup(const char *name);
+extern int viafb_gpio_init(void);
+extern void viafb_gpio_exit(void);
+#endif
diff --git a/include/linux/via.h b/include/linux/via.h
new file mode 100644
index 000000000..86ae3bcdb
--- /dev/null
+++ b/include/linux/via.h
@@ -0,0 +1,22 @@
+/* Miscellaneous definitions for VIA chipsets
+ Currently used only by drivers/parport/parport_pc.c */
+
+/* Values for SuperIO function select configuration register */
+#define VIA_FUNCTION_PARPORT_SPP 0x00
+#define VIA_FUNCTION_PARPORT_ECP 0x01
+#define VIA_FUNCTION_PARPORT_EPP 0x02
+#define VIA_FUNCTION_PARPORT_DISABLE 0x03
+#define VIA_FUNCTION_PROBE 0xFF /* Special magic value to be used in code, not to be written into chip */
+
+/* Bits for parallel port mode configuration register */
+#define VIA_PARPORT_ECPEPP 0X20
+#define VIA_PARPORT_BIDIR 0x80
+
+/* VIA configuration registers */
+#define VIA_CONFIG_INDEX 0x3F0
+#define VIA_CONFIG_DATA 0x3F1
+
+/* Mask for parallel port IRQ bits (in ISA PnP IRQ routing register 1) */
+#define VIA_IRQCONTROL_PARALLEL 0xF0
+/* Mask for parallel port DMA bits (in ISA PnP DMA routing register) */
+#define VIA_DMACONTROL_PARALLEL 0x0C
diff --git a/include/linux/via_i2c.h b/include/linux/via_i2c.h
new file mode 100644
index 000000000..44532e468
--- /dev/null
+++ b/include/linux/via_i2c.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
+
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation;
+ * either version 2, or (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
+ * the implied warranty of MERCHANTABILITY or FITNESS FOR
+ * A PARTICULAR PURPOSE.See the GNU General Public License
+ * for more details.
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef __VIA_I2C_H__
+#define __VIA_I2C_H__
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+struct via_i2c_stuff {
+ u16 i2c_port; /* GPIO or I2C port */
+ u16 is_active; /* Being used as I2C? */
+ struct i2c_adapter adapter;
+ struct i2c_algo_bit_data algo;
+};
+
+
+int viafb_i2c_readbyte(u8 adap, u8 slave_addr, u8 index, u8 *pdata);
+int viafb_i2c_writebyte(u8 adap, u8 slave_addr, u8 index, u8 data);
+int viafb_i2c_readbytes(u8 adap, u8 slave_addr, u8 index, u8 *buff, int buff_len);
+struct i2c_adapter *viafb_find_i2c_adapter(enum viafb_i2c_adap which);
+
+extern int viafb_i2c_init(void);
+extern void viafb_i2c_exit(void);
+#endif /* __VIA_I2C_H__ */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
new file mode 100644
index 000000000..73ea2fb04
--- /dev/null
+++ b/include/linux/videodev2.h
@@ -0,0 +1,62 @@
+/*
+ * Video for Linux Two header file
+ *
+ * Copyright (C) 1999-2012 the contributors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Alternatively you can redistribute this file under the terms of the
+ * BSD license as stated below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * 3. The names of its contributors may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Header file for v4l or V4L2 drivers and applications
+ * with public API.
+ * All kernel-specific stuff were moved to media/v4l2-dev.h, so
+ * no #if __KERNEL tests are allowed here
+ *
+ * See http://linuxtv.org for more info
+ *
+ * Author: Bill Dirks <bill@thedirks.org>
+ * Justin Schoeman
+ * Hans Verkuil <hverkuil@xs4all.nl>
+ * et al.
+ */
+#ifndef __LINUX_VIDEODEV2_H
+#define __LINUX_VIDEODEV2_H
+
+#include <linux/time.h> /* need struct timeval */
+#include <uapi/linux/videodev2.h>
+
+#endif /* __LINUX_VIDEODEV2_H */
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
new file mode 100644
index 000000000..8f4d4bfa6
--- /dev/null
+++ b/include/linux/virtio.h
@@ -0,0 +1,173 @@
+#ifndef _LINUX_VIRTIO_H
+#define _LINUX_VIRTIO_H
+/* Everything a virtio driver needs to work with any particular virtio
+ * implementation. */
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/gfp.h>
+#include <linux/vringh.h>
+
+/**
+ * virtqueue - a queue to register buffers for sending or receiving.
+ * @list: the chain of virtqueues for this device
+ * @callback: the function to call when buffers are consumed (can be NULL).
+ * @name: the name of this virtqueue (mainly for debugging)
+ * @vdev: the virtio device this queue was created for.
+ * @priv: a pointer for the virtqueue implementation to use.
+ * @index: the zero-based ordinal number for this queue.
+ * @num_free: number of elements we expect to be able to fit.
+ *
+ * A note on @num_free: with indirect buffers, each buffer needs one
+ * element in the queue, otherwise a buffer will need one element per
+ * sg element.
+ */
+struct virtqueue {
+ struct list_head list;
+ void (*callback)(struct virtqueue *vq);
+ const char *name;
+ struct virtio_device *vdev;
+ unsigned int index;
+ unsigned int num_free;
+ void *priv;
+};
+
+int virtqueue_add_outbuf(struct virtqueue *vq,
+ struct scatterlist sg[], unsigned int num,
+ void *data,
+ gfp_t gfp);
+
+int virtqueue_add_inbuf(struct virtqueue *vq,
+ struct scatterlist sg[], unsigned int num,
+ void *data,
+ gfp_t gfp);
+
+int virtqueue_add_sgs(struct virtqueue *vq,
+ struct scatterlist *sgs[],
+ unsigned int out_sgs,
+ unsigned int in_sgs,
+ void *data,
+ gfp_t gfp);
+
+bool virtqueue_kick(struct virtqueue *vq);
+
+bool virtqueue_kick_prepare(struct virtqueue *vq);
+
+bool virtqueue_notify(struct virtqueue *vq);
+
+void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
+
+void virtqueue_disable_cb(struct virtqueue *vq);
+
+bool virtqueue_enable_cb(struct virtqueue *vq);
+
+unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
+
+bool virtqueue_poll(struct virtqueue *vq, unsigned);
+
+bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
+
+void *virtqueue_detach_unused_buf(struct virtqueue *vq);
+
+unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
+
+bool virtqueue_is_broken(struct virtqueue *vq);
+
+void *virtqueue_get_avail(struct virtqueue *vq);
+void *virtqueue_get_used(struct virtqueue *vq);
+
+/**
+ * virtio_device - representation of a device using virtio
+ * @index: unique position on the virtio bus
+ * @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore)
+ * @config_enabled: configuration change reporting enabled
+ * @config_change_pending: configuration change reported while disabled
+ * @config_lock: protects configuration change reporting
+ * @dev: underlying device.
+ * @id: the device type identification (used to match it with a driver).
+ * @config: the configuration ops for this device.
+ * @vringh_config: configuration ops for host vrings.
+ * @vqs: the list of virtqueues for this device.
+ * @features: the features supported by both driver and device.
+ * @priv: private pointer for the driver's use.
+ */
+struct virtio_device {
+ int index;
+ bool failed;
+ bool config_enabled;
+ bool config_change_pending;
+ spinlock_t config_lock;
+ struct device dev;
+ struct virtio_device_id id;
+ const struct virtio_config_ops *config;
+ const struct vringh_config_ops *vringh_config;
+ struct list_head vqs;
+ u64 features;
+ void *priv;
+};
+
+static inline struct virtio_device *dev_to_virtio(struct device *_dev)
+{
+ return container_of(_dev, struct virtio_device, dev);
+}
+
+int register_virtio_device(struct virtio_device *dev);
+void unregister_virtio_device(struct virtio_device *dev);
+
+void virtio_break_device(struct virtio_device *dev);
+
+void virtio_config_changed(struct virtio_device *dev);
+#ifdef CONFIG_PM_SLEEP
+int virtio_device_freeze(struct virtio_device *dev);
+int virtio_device_restore(struct virtio_device *dev);
+#endif
+
+/**
+ * virtio_driver - operations for a virtio I/O driver
+ * @driver: underlying device driver (populate name and owner).
+ * @id_table: the ids serviced by this driver.
+ * @feature_table: an array of feature numbers supported by this driver.
+ * @feature_table_size: number of entries in the feature table array.
+ * @feature_table_legacy: same as feature_table but when working in legacy mode.
+ * @feature_table_size_legacy: number of entries in feature table legacy array.
+ * @probe: the function to call when a device is found. Returns 0 or -errno.
+ * @remove: the function to call when a device is removed.
+ * @config_changed: optional function to call when the device configuration
+ * changes; may be called in interrupt context.
+ */
+struct virtio_driver {
+ struct device_driver driver;
+ const struct virtio_device_id *id_table;
+ const unsigned int *feature_table;
+ unsigned int feature_table_size;
+ const unsigned int *feature_table_legacy;
+ unsigned int feature_table_size_legacy;
+ int (*probe)(struct virtio_device *dev);
+ void (*scan)(struct virtio_device *dev);
+ void (*remove)(struct virtio_device *dev);
+ void (*config_changed)(struct virtio_device *dev);
+#ifdef CONFIG_PM
+ int (*freeze)(struct virtio_device *dev);
+ int (*restore)(struct virtio_device *dev);
+#endif
+};
+
+static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv)
+{
+ return container_of(drv, struct virtio_driver, driver);
+}
+
+int register_virtio_driver(struct virtio_driver *drv);
+void unregister_virtio_driver(struct virtio_driver *drv);
+
+/* module_virtio_driver() - Helper macro for drivers that don't do
+ * anything special in module init/exit. This eliminates a lot of
+ * boilerplate. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit()
+ */
+#define module_virtio_driver(__virtio_driver) \
+ module_driver(__virtio_driver, register_virtio_driver, \
+ unregister_virtio_driver)
+#endif /* _LINUX_VIRTIO_H */
diff --git a/include/linux/virtio_byteorder.h b/include/linux/virtio_byteorder.h
new file mode 100644
index 000000000..51865d05b
--- /dev/null
+++ b/include/linux/virtio_byteorder.h
@@ -0,0 +1,59 @@
+#ifndef _LINUX_VIRTIO_BYTEORDER_H
+#define _LINUX_VIRTIO_BYTEORDER_H
+#include <linux/types.h>
+#include <uapi/linux/virtio_types.h>
+
+/*
+ * Low-level memory accessors for handling virtio in modern little endian and in
+ * compatibility native endian format.
+ */
+
+static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val)
+{
+ if (little_endian)
+ return le16_to_cpu((__force __le16)val);
+ else
+ return (__force u16)val;
+}
+
+static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val)
+{
+ if (little_endian)
+ return (__force __virtio16)cpu_to_le16(val);
+ else
+ return (__force __virtio16)val;
+}
+
+static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val)
+{
+ if (little_endian)
+ return le32_to_cpu((__force __le32)val);
+ else
+ return (__force u32)val;
+}
+
+static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val)
+{
+ if (little_endian)
+ return (__force __virtio32)cpu_to_le32(val);
+ else
+ return (__force __virtio32)val;
+}
+
+static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val)
+{
+ if (little_endian)
+ return le64_to_cpu((__force __le64)val);
+ else
+ return (__force u64)val;
+}
+
+static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val)
+{
+ if (little_endian)
+ return (__force __virtio64)cpu_to_le64(val);
+ else
+ return (__force __virtio64)val;
+}
+
+#endif /* _LINUX_VIRTIO_BYTEORDER */
diff --git a/include/linux/virtio_caif.h b/include/linux/virtio_caif.h
new file mode 100644
index 000000000..5d2d3124c
--- /dev/null
+++ b/include/linux/virtio_caif.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2012
+ * Author: Sjur Brændeland <sjur.brandeland@stericsson.com>
+ *
+ * This header is BSD licensed so
+ * anyone can use the definitions to implement compatible remote processors
+ */
+
+#ifndef VIRTIO_CAIF_H
+#define VIRTIO_CAIF_H
+
+#include <linux/types.h>
+struct virtio_caif_transf_config {
+ u16 headroom;
+ u16 tailroom;
+ u32 mtu;
+ u8 reserved[4];
+};
+
+struct virtio_caif_config {
+ struct virtio_caif_transf_config uplink, downlink;
+ u8 reserved[8];
+};
+#endif
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
new file mode 100644
index 000000000..1e306f727
--- /dev/null
+++ b/include/linux/virtio_config.h
@@ -0,0 +1,398 @@
+#ifndef _LINUX_VIRTIO_CONFIG_H
+#define _LINUX_VIRTIO_CONFIG_H
+
+#include <linux/err.h>
+#include <linux/bug.h>
+#include <linux/virtio.h>
+#include <linux/virtio_byteorder.h>
+#include <uapi/linux/virtio_config.h>
+
+/**
+ * virtio_config_ops - operations for configuring a virtio device
+ * @get: read the value of a configuration field
+ * vdev: the virtio_device
+ * offset: the offset of the configuration field
+ * buf: the buffer to write the field value into.
+ * len: the length of the buffer
+ * @set: write the value of a configuration field
+ * vdev: the virtio_device
+ * offset: the offset of the configuration field
+ * buf: the buffer to read the field value from.
+ * len: the length of the buffer
+ * @generation: config generation counter
+ * vdev: the virtio_device
+ * Returns the config generation counter
+ * @get_status: read the status byte
+ * vdev: the virtio_device
+ * Returns the status byte
+ * @set_status: write the status byte
+ * vdev: the virtio_device
+ * status: the new status byte
+ * @reset: reset the device
+ * vdev: the virtio device
+ * After this, status and feature negotiation must be done again
+ * Device must not be reset from its vq/config callbacks, or in
+ * parallel with being added/removed.
+ * @find_vqs: find virtqueues and instantiate them.
+ * vdev: the virtio_device
+ * nvqs: the number of virtqueues to find
+ * vqs: on success, includes new virtqueues
+ * callbacks: array of callbacks, for each virtqueue
+ * include a NULL entry for vqs that do not need a callback
+ * names: array of virtqueue names (mainly for debugging)
+ * include a NULL entry for vqs unused by driver
+ * Returns 0 on success or error status
+ * @del_vqs: free virtqueues found by find_vqs().
+ * @get_features: get the array of feature bits for this device.
+ * vdev: the virtio_device
+ * Returns the first 32 feature bits (all we currently need).
+ * @finalize_features: confirm what device features we'll be using.
+ * vdev: the virtio_device
+ * This gives the final feature bits for the device: it can change
+ * the dev->feature bits if it wants.
+ * Returns 0 on success or error status
+ * @bus_name: return the bus name associated with the device
+ * vdev: the virtio_device
+ * This returns a pointer to the bus name a la pci_name from which
+ * the caller can then copy.
+ * @set_vq_affinity: set the affinity for a virtqueue.
+ */
+typedef void vq_callback_t(struct virtqueue *);
+struct virtio_config_ops {
+ void (*get)(struct virtio_device *vdev, unsigned offset,
+ void *buf, unsigned len);
+ void (*set)(struct virtio_device *vdev, unsigned offset,
+ const void *buf, unsigned len);
+ u32 (*generation)(struct virtio_device *vdev);
+ u8 (*get_status)(struct virtio_device *vdev);
+ void (*set_status)(struct virtio_device *vdev, u8 status);
+ void (*reset)(struct virtio_device *vdev);
+ int (*find_vqs)(struct virtio_device *, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[]);
+ void (*del_vqs)(struct virtio_device *);
+ u64 (*get_features)(struct virtio_device *vdev);
+ int (*finalize_features)(struct virtio_device *vdev);
+ const char *(*bus_name)(struct virtio_device *vdev);
+ int (*set_vq_affinity)(struct virtqueue *vq, int cpu);
+};
+
+/* If driver didn't advertise the feature, it will never appear. */
+void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
+ unsigned int fbit);
+
+/**
+ * __virtio_test_bit - helper to test feature bits. For use by transports.
+ * Devices should normally use virtio_has_feature,
+ * which includes more checks.
+ * @vdev: the device
+ * @fbit: the feature bit
+ */
+static inline bool __virtio_test_bit(const struct virtio_device *vdev,
+ unsigned int fbit)
+{
+ /* Did you forget to fix assumptions on max features? */
+ if (__builtin_constant_p(fbit))
+ BUILD_BUG_ON(fbit >= 64);
+ else
+ BUG_ON(fbit >= 64);
+
+ return vdev->features & BIT_ULL(fbit);
+}
+
+/**
+ * __virtio_set_bit - helper to set feature bits. For use by transports.
+ * @vdev: the device
+ * @fbit: the feature bit
+ */
+static inline void __virtio_set_bit(struct virtio_device *vdev,
+ unsigned int fbit)
+{
+ /* Did you forget to fix assumptions on max features? */
+ if (__builtin_constant_p(fbit))
+ BUILD_BUG_ON(fbit >= 64);
+ else
+ BUG_ON(fbit >= 64);
+
+ vdev->features |= BIT_ULL(fbit);
+}
+
+/**
+ * __virtio_clear_bit - helper to clear feature bits. For use by transports.
+ * @vdev: the device
+ * @fbit: the feature bit
+ */
+static inline void __virtio_clear_bit(struct virtio_device *vdev,
+ unsigned int fbit)
+{
+ /* Did you forget to fix assumptions on max features? */
+ if (__builtin_constant_p(fbit))
+ BUILD_BUG_ON(fbit >= 64);
+ else
+ BUG_ON(fbit >= 64);
+
+ vdev->features &= ~BIT_ULL(fbit);
+}
+
+/**
+ * virtio_has_feature - helper to determine if this device has this feature.
+ * @vdev: the device
+ * @fbit: the feature bit
+ */
+static inline bool virtio_has_feature(const struct virtio_device *vdev,
+ unsigned int fbit)
+{
+ if (fbit < VIRTIO_TRANSPORT_F_START)
+ virtio_check_driver_offered_feature(vdev, fbit);
+
+ return __virtio_test_bit(vdev, fbit);
+}
+
+static inline
+struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
+ vq_callback_t *c, const char *n)
+{
+ vq_callback_t *callbacks[] = { c };
+ const char *names[] = { n };
+ struct virtqueue *vq;
+ int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names);
+ if (err < 0)
+ return ERR_PTR(err);
+ return vq;
+}
+
+/**
+ * virtio_device_ready - enable vq use in probe function
+ * @vdev: the device
+ *
+ * Driver must call this to use vqs in the probe function.
+ *
+ * Note: vqs are enabled automatically after probe returns.
+ */
+static inline
+void virtio_device_ready(struct virtio_device *dev)
+{
+ unsigned status = dev->config->get_status(dev);
+
+ BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
+ dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
+}
+
+static inline
+const char *virtio_bus_name(struct virtio_device *vdev)
+{
+ if (!vdev->config->bus_name)
+ return "virtio";
+ return vdev->config->bus_name(vdev);
+}
+
+/**
+ * virtqueue_set_affinity - setting affinity for a virtqueue
+ * @vq: the virtqueue
+ * @cpu: the cpu no.
+ *
+ * Pay attention the function are best-effort: the affinity hint may not be set
+ * due to config support, irq type and sharing.
+ *
+ */
+static inline
+int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
+{
+ struct virtio_device *vdev = vq->vdev;
+ if (vdev->config->set_vq_affinity)
+ return vdev->config->set_vq_affinity(vq, cpu);
+ return 0;
+}
+
+/* Memory accessors */
+static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
+{
+ return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
+{
+ return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
+{
+ return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
+{
+ return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
+{
+ return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
+{
+ return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
+}
+
+/* Config space accessors. */
+#define virtio_cread(vdev, structname, member, ptr) \
+ do { \
+ /* Must match the member's type, and be integer */ \
+ if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
+ (*ptr) = 1; \
+ \
+ switch (sizeof(*ptr)) { \
+ case 1: \
+ *(ptr) = virtio_cread8(vdev, \
+ offsetof(structname, member)); \
+ break; \
+ case 2: \
+ *(ptr) = virtio_cread16(vdev, \
+ offsetof(structname, member)); \
+ break; \
+ case 4: \
+ *(ptr) = virtio_cread32(vdev, \
+ offsetof(structname, member)); \
+ break; \
+ case 8: \
+ *(ptr) = virtio_cread64(vdev, \
+ offsetof(structname, member)); \
+ break; \
+ default: \
+ BUG(); \
+ } \
+ } while(0)
+
+/* Config space accessors. */
+#define virtio_cwrite(vdev, structname, member, ptr) \
+ do { \
+ /* Must match the member's type, and be integer */ \
+ if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
+ BUG_ON((*ptr) == 1); \
+ \
+ switch (sizeof(*ptr)) { \
+ case 1: \
+ virtio_cwrite8(vdev, \
+ offsetof(structname, member), \
+ *(ptr)); \
+ break; \
+ case 2: \
+ virtio_cwrite16(vdev, \
+ offsetof(structname, member), \
+ *(ptr)); \
+ break; \
+ case 4: \
+ virtio_cwrite32(vdev, \
+ offsetof(structname, member), \
+ *(ptr)); \
+ break; \
+ case 8: \
+ virtio_cwrite64(vdev, \
+ offsetof(structname, member), \
+ *(ptr)); \
+ break; \
+ default: \
+ BUG(); \
+ } \
+ } while(0)
+
+/* Read @count fields, @bytes each. */
+static inline void __virtio_cread_many(struct virtio_device *vdev,
+ unsigned int offset,
+ void *buf, size_t count, size_t bytes)
+{
+ u32 old, gen = vdev->config->generation ?
+ vdev->config->generation(vdev) : 0;
+ int i;
+
+ do {
+ old = gen;
+
+ for (i = 0; i < count; i++)
+ vdev->config->get(vdev, offset + bytes * i,
+ buf + i * bytes, bytes);
+
+ gen = vdev->config->generation ?
+ vdev->config->generation(vdev) : 0;
+ } while (gen != old);
+}
+
+static inline void virtio_cread_bytes(struct virtio_device *vdev,
+ unsigned int offset,
+ void *buf, size_t len)
+{
+ __virtio_cread_many(vdev, offset, buf, len, 1);
+}
+
+static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
+{
+ u8 ret;
+ vdev->config->get(vdev, offset, &ret, sizeof(ret));
+ return ret;
+}
+
+static inline void virtio_cwrite8(struct virtio_device *vdev,
+ unsigned int offset, u8 val)
+{
+ vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+static inline u16 virtio_cread16(struct virtio_device *vdev,
+ unsigned int offset)
+{
+ u16 ret;
+ vdev->config->get(vdev, offset, &ret, sizeof(ret));
+ return virtio16_to_cpu(vdev, (__force __virtio16)ret);
+}
+
+static inline void virtio_cwrite16(struct virtio_device *vdev,
+ unsigned int offset, u16 val)
+{
+ val = (__force u16)cpu_to_virtio16(vdev, val);
+ vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+static inline u32 virtio_cread32(struct virtio_device *vdev,
+ unsigned int offset)
+{
+ u32 ret;
+ vdev->config->get(vdev, offset, &ret, sizeof(ret));
+ return virtio32_to_cpu(vdev, (__force __virtio32)ret);
+}
+
+static inline void virtio_cwrite32(struct virtio_device *vdev,
+ unsigned int offset, u32 val)
+{
+ val = (__force u32)cpu_to_virtio32(vdev, val);
+ vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+static inline u64 virtio_cread64(struct virtio_device *vdev,
+ unsigned int offset)
+{
+ u64 ret;
+ __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
+ return virtio64_to_cpu(vdev, (__force __virtio64)ret);
+}
+
+static inline void virtio_cwrite64(struct virtio_device *vdev,
+ unsigned int offset, u64 val)
+{
+ val = (__force u64)cpu_to_virtio64(vdev, val);
+ vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+/* Conditional config space accessors. */
+#define virtio_cread_feature(vdev, fbit, structname, member, ptr) \
+ ({ \
+ int _r = 0; \
+ if (!virtio_has_feature(vdev, fbit)) \
+ _r = -ENOENT; \
+ else \
+ virtio_cread((vdev), structname, member, ptr); \
+ _r; \
+ })
+
+#endif /* _LINUX_VIRTIO_CONFIG_H */
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
new file mode 100644
index 000000000..d2e2785af
--- /dev/null
+++ b/include/linux/virtio_console.h
@@ -0,0 +1,38 @@
+/*
+ * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
+ * anyone can use the definitions to implement compatible drivers/servers:
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Copyright (C) Red Hat, Inc., 2009, 2010, 2011
+ * Copyright (C) Amit Shah <amit.shah@redhat.com>, 2009, 2010, 2011
+ */
+#ifndef _LINUX_VIRTIO_CONSOLE_H
+#define _LINUX_VIRTIO_CONSOLE_H
+
+#include <uapi/linux/virtio_console.h>
+
+int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int));
+#endif /* _LINUX_VIRTIO_CONSOLE_H */
diff --git a/include/linux/virtio_mmio.h b/include/linux/virtio_mmio.h
new file mode 100644
index 000000000..c4b09689a
--- /dev/null
+++ b/include/linux/virtio_mmio.h
@@ -0,0 +1,141 @@
+/*
+ * Virtio platform device driver
+ *
+ * Copyright 2011, ARM Ltd.
+ *
+ * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007
+ *
+ * This header is BSD licensed so anyone can use the definitions to implement
+ * compatible drivers/servers.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_VIRTIO_MMIO_H
+#define _LINUX_VIRTIO_MMIO_H
+
+/*
+ * Control registers
+ */
+
+/* Magic value ("virt" string) - Read Only */
+#define VIRTIO_MMIO_MAGIC_VALUE 0x000
+
+/* Virtio device version - Read Only */
+#define VIRTIO_MMIO_VERSION 0x004
+
+/* Virtio device ID - Read Only */
+#define VIRTIO_MMIO_DEVICE_ID 0x008
+
+/* Virtio vendor ID - Read Only */
+#define VIRTIO_MMIO_VENDOR_ID 0x00c
+
+/* Bitmask of the features supported by the device (host)
+ * (32 bits per set) - Read Only */
+#define VIRTIO_MMIO_DEVICE_FEATURES 0x010
+
+/* Device (host) features set selector - Write Only */
+#define VIRTIO_MMIO_DEVICE_FEATURES_SEL 0x014
+
+/* Bitmask of features activated by the driver (guest)
+ * (32 bits per set) - Write Only */
+#define VIRTIO_MMIO_DRIVER_FEATURES 0x020
+
+/* Activated features set selector - Write Only */
+#define VIRTIO_MMIO_DRIVER_FEATURES_SEL 0x024
+
+
+#ifndef VIRTIO_MMIO_NO_LEGACY /* LEGACY DEVICES ONLY! */
+
+/* Guest's memory page size in bytes - Write Only */
+#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028
+
+#endif
+
+
+/* Queue selector - Write Only */
+#define VIRTIO_MMIO_QUEUE_SEL 0x030
+
+/* Maximum size of the currently selected queue - Read Only */
+#define VIRTIO_MMIO_QUEUE_NUM_MAX 0x034
+
+/* Queue size for the currently selected queue - Write Only */
+#define VIRTIO_MMIO_QUEUE_NUM 0x038
+
+
+#ifndef VIRTIO_MMIO_NO_LEGACY /* LEGACY DEVICES ONLY! */
+
+/* Used Ring alignment for the currently selected queue - Write Only */
+#define VIRTIO_MMIO_QUEUE_ALIGN 0x03c
+
+/* Guest's PFN for the currently selected queue - Read Write */
+#define VIRTIO_MMIO_QUEUE_PFN 0x040
+
+#endif
+
+
+/* Ready bit for the currently selected queue - Read Write */
+#define VIRTIO_MMIO_QUEUE_READY 0x044
+
+/* Queue notifier - Write Only */
+#define VIRTIO_MMIO_QUEUE_NOTIFY 0x050
+
+/* Interrupt status - Read Only */
+#define VIRTIO_MMIO_INTERRUPT_STATUS 0x060
+
+/* Interrupt acknowledge - Write Only */
+#define VIRTIO_MMIO_INTERRUPT_ACK 0x064
+
+/* Device status register - Read Write */
+#define VIRTIO_MMIO_STATUS 0x070
+
+/* Selected queue's Descriptor Table address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_DESC_LOW 0x080
+#define VIRTIO_MMIO_QUEUE_DESC_HIGH 0x084
+
+/* Selected queue's Available Ring address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_AVAIL_LOW 0x090
+#define VIRTIO_MMIO_QUEUE_AVAIL_HIGH 0x094
+
+/* Selected queue's Used Ring address, 64 bits in two halves */
+#define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0
+#define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4
+
+/* Configuration atomicity value */
+#define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc
+
+/* The config space is defined by each driver as
+ * the per-driver configuration space - Read Write */
+#define VIRTIO_MMIO_CONFIG 0x100
+
+
+
+/*
+ * Interrupt flags (re: interrupt status & acknowledge registers)
+ */
+
+#define VIRTIO_MMIO_INT_VRING (1 << 0)
+#define VIRTIO_MMIO_INT_CONFIG (1 << 1)
+
+#endif
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
new file mode 100644
index 000000000..8e50888a6
--- /dev/null
+++ b/include/linux/virtio_ring.h
@@ -0,0 +1,67 @@
+#ifndef _LINUX_VIRTIO_RING_H
+#define _LINUX_VIRTIO_RING_H
+
+#include <asm/barrier.h>
+#include <linux/irqreturn.h>
+#include <uapi/linux/virtio_ring.h>
+
+/*
+ * Barriers in virtio are tricky. Non-SMP virtio guests can't assume
+ * they're not on an SMP host system, so they need to assume real
+ * barriers. Non-SMP virtio hosts could skip the barriers, but does
+ * anyone care?
+ *
+ * For virtio_pci on SMP, we don't need to order with respect to MMIO
+ * accesses through relaxed memory I/O windows, so smp_mb() et al are
+ * sufficient.
+ *
+ * For using virtio to talk to real devices (eg. other heterogeneous
+ * CPUs) we do need real barriers. In theory, we could be using both
+ * kinds of virtio, so it's a runtime decision, and the branch is
+ * actually quite cheap.
+ */
+
+static inline void virtio_mb(bool weak_barriers)
+{
+#ifdef CONFIG_SMP
+ if (weak_barriers)
+ smp_mb();
+ else
+#endif
+ mb();
+}
+
+static inline void virtio_rmb(bool weak_barriers)
+{
+ if (weak_barriers)
+ dma_rmb();
+ else
+ rmb();
+}
+
+static inline void virtio_wmb(bool weak_barriers)
+{
+ if (weak_barriers)
+ dma_wmb();
+ else
+ wmb();
+}
+
+struct virtio_device;
+struct virtqueue;
+
+struct virtqueue *vring_new_virtqueue(unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ void *pages,
+ bool (*notify)(struct virtqueue *vq),
+ void (*callback)(struct virtqueue *vq),
+ const char *name);
+void vring_del_virtqueue(struct virtqueue *vq);
+/* Filter out transport-specific feature bits. */
+void vring_transport_features(struct virtio_device *vdev);
+
+irqreturn_t vring_interrupt(int irq, void *_vq);
+#endif /* _LINUX_VIRTIO_RING_H */
diff --git a/include/linux/vlynq.h b/include/linux/vlynq.h
new file mode 100644
index 000000000..017d4a53d
--- /dev/null
+++ b/include/linux/vlynq.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __VLYNQ_H__
+#define __VLYNQ_H__
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+struct module;
+
+#define VLYNQ_NUM_IRQS 32
+
+struct vlynq_mapping {
+ u32 size;
+ u32 offset;
+};
+
+enum vlynq_divisor {
+ vlynq_div_auto = 0,
+ vlynq_ldiv1,
+ vlynq_ldiv2,
+ vlynq_ldiv3,
+ vlynq_ldiv4,
+ vlynq_ldiv5,
+ vlynq_ldiv6,
+ vlynq_ldiv7,
+ vlynq_ldiv8,
+ vlynq_rdiv1,
+ vlynq_rdiv2,
+ vlynq_rdiv3,
+ vlynq_rdiv4,
+ vlynq_rdiv5,
+ vlynq_rdiv6,
+ vlynq_rdiv7,
+ vlynq_rdiv8,
+ vlynq_div_external
+};
+
+struct vlynq_device_id {
+ u32 id;
+ enum vlynq_divisor divisor;
+ unsigned long driver_data;
+};
+
+struct vlynq_regs;
+struct vlynq_device {
+ u32 id, dev_id;
+ int local_irq;
+ int remote_irq;
+ enum vlynq_divisor divisor;
+ u32 regs_start, regs_end;
+ u32 mem_start, mem_end;
+ u32 irq_start, irq_end;
+ int irq;
+ int enabled;
+ struct vlynq_regs *local;
+ struct vlynq_regs *remote;
+ struct device dev;
+};
+
+struct vlynq_driver {
+ char *name;
+ struct vlynq_device_id *id_table;
+ int (*probe)(struct vlynq_device *dev, struct vlynq_device_id *id);
+ void (*remove)(struct vlynq_device *dev);
+ struct device_driver driver;
+};
+
+struct plat_vlynq_ops {
+ int (*on)(struct vlynq_device *dev);
+ void (*off)(struct vlynq_device *dev);
+};
+
+static inline struct vlynq_driver *to_vlynq_driver(struct device_driver *drv)
+{
+ return container_of(drv, struct vlynq_driver, driver);
+}
+
+static inline struct vlynq_device *to_vlynq_device(struct device *device)
+{
+ return container_of(device, struct vlynq_device, dev);
+}
+
+extern struct bus_type vlynq_bus_type;
+
+extern int __vlynq_register_driver(struct vlynq_driver *driver,
+ struct module *owner);
+
+static inline int vlynq_register_driver(struct vlynq_driver *driver)
+{
+ return __vlynq_register_driver(driver, THIS_MODULE);
+}
+
+static inline void *vlynq_get_drvdata(struct vlynq_device *dev)
+{
+ return dev_get_drvdata(&dev->dev);
+}
+
+static inline void vlynq_set_drvdata(struct vlynq_device *dev, void *data)
+{
+ dev_set_drvdata(&dev->dev, data);
+}
+
+static inline u32 vlynq_mem_start(struct vlynq_device *dev)
+{
+ return dev->mem_start;
+}
+
+static inline u32 vlynq_mem_end(struct vlynq_device *dev)
+{
+ return dev->mem_end;
+}
+
+static inline u32 vlynq_mem_len(struct vlynq_device *dev)
+{
+ return dev->mem_end - dev->mem_start + 1;
+}
+
+static inline int vlynq_virq_to_irq(struct vlynq_device *dev, int virq)
+{
+ int irq = dev->irq_start + virq;
+ if ((irq < dev->irq_start) || (irq > dev->irq_end))
+ return -EINVAL;
+
+ return irq;
+}
+
+static inline int vlynq_irq_to_virq(struct vlynq_device *dev, int irq)
+{
+ if ((irq < dev->irq_start) || (irq > dev->irq_end))
+ return -EINVAL;
+
+ return irq - dev->irq_start;
+}
+
+extern void vlynq_unregister_driver(struct vlynq_driver *driver);
+extern int vlynq_enable_device(struct vlynq_device *dev);
+extern void vlynq_disable_device(struct vlynq_device *dev);
+extern int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset,
+ struct vlynq_mapping *mapping);
+extern int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset,
+ struct vlynq_mapping *mapping);
+extern int vlynq_set_local_irq(struct vlynq_device *dev, int virq);
+extern int vlynq_set_remote_irq(struct vlynq_device *dev, int virq);
+
+#endif /* __VLYNQ_H__ */
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
new file mode 100644
index 000000000..9246d32dc
--- /dev/null
+++ b/include/linux/vm_event_item.h
@@ -0,0 +1,98 @@
+#ifndef VM_EVENT_ITEM_H_INCLUDED
+#define VM_EVENT_ITEM_H_INCLUDED
+
+#ifdef CONFIG_ZONE_DMA
+#define DMA_ZONE(xx) xx##_DMA,
+#else
+#define DMA_ZONE(xx)
+#endif
+
+#ifdef CONFIG_ZONE_DMA32
+#define DMA32_ZONE(xx) xx##_DMA32,
+#else
+#define DMA32_ZONE(xx)
+#endif
+
+#ifdef CONFIG_HIGHMEM
+#define HIGHMEM_ZONE(xx) , xx##_HIGH
+#else
+#define HIGHMEM_ZONE(xx)
+#endif
+
+#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
+
+enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
+ FOR_ALL_ZONES(PGALLOC),
+ PGFREE, PGACTIVATE, PGDEACTIVATE,
+ PGFAULT, PGMAJFAULT,
+ FOR_ALL_ZONES(PGREFILL),
+ FOR_ALL_ZONES(PGSTEAL_KSWAPD),
+ FOR_ALL_ZONES(PGSTEAL_DIRECT),
+ FOR_ALL_ZONES(PGSCAN_KSWAPD),
+ FOR_ALL_ZONES(PGSCAN_DIRECT),
+ PGSCAN_DIRECT_THROTTLE,
+#ifdef CONFIG_NUMA
+ PGSCAN_ZONE_RECLAIM_FAILED,
+#endif
+ PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL,
+ KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
+ PAGEOUTRUN, ALLOCSTALL, PGROTATED,
+ DROP_PAGECACHE, DROP_SLAB,
+#ifdef CONFIG_NUMA_BALANCING
+ NUMA_PTE_UPDATES,
+ NUMA_HUGE_PTE_UPDATES,
+ NUMA_HINT_FAULTS,
+ NUMA_HINT_FAULTS_LOCAL,
+ NUMA_PAGE_MIGRATE,
+#endif
+#ifdef CONFIG_MIGRATION
+ PGMIGRATE_SUCCESS, PGMIGRATE_FAIL,
+#endif
+#ifdef CONFIG_COMPACTION
+ COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED,
+ COMPACTISOLATED,
+ COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
+#endif
+#ifdef CONFIG_HUGETLB_PAGE
+ HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
+#endif
+ UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
+ UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
+ UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
+ UNEVICTABLE_PGMLOCKED,
+ UNEVICTABLE_PGMUNLOCKED,
+ UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
+ UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ THP_FAULT_ALLOC,
+ THP_FAULT_FALLBACK,
+ THP_COLLAPSE_ALLOC,
+ THP_COLLAPSE_ALLOC_FAILED,
+ THP_SPLIT,
+ THP_ZERO_PAGE_ALLOC,
+ THP_ZERO_PAGE_ALLOC_FAILED,
+#endif
+#ifdef CONFIG_MEMORY_BALLOON
+ BALLOON_INFLATE,
+ BALLOON_DEFLATE,
+#ifdef CONFIG_BALLOON_COMPACTION
+ BALLOON_MIGRATE,
+#endif
+#endif
+#ifdef CONFIG_DEBUG_TLBFLUSH
+#ifdef CONFIG_SMP
+ NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
+ NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
+#endif /* CONFIG_SMP */
+ NR_TLB_LOCAL_FLUSH_ALL,
+ NR_TLB_LOCAL_FLUSH_ONE,
+#endif /* CONFIG_DEBUG_TLBFLUSH */
+#ifdef CONFIG_DEBUG_VM_VMACACHE
+ VMACACHE_FIND_CALLS,
+ VMACACHE_FIND_HITS,
+ VMACACHE_FULL_FLUSHES,
+#endif
+ NR_VM_EVENT_ITEMS
+};
+
+#endif /* VM_EVENT_ITEM_H_INCLUDED */
diff --git a/include/linux/vm_sockets.h b/include/linux/vm_sockets.h
new file mode 100644
index 000000000..0805eecba
--- /dev/null
+++ b/include/linux/vm_sockets.h
@@ -0,0 +1,23 @@
+/*
+ * VMware vSockets Driver
+ *
+ * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _VM_SOCKETS_H
+#define _VM_SOCKETS_H
+
+#include <uapi/linux/vm_sockets.h>
+
+int vm_sockets_get_local_cid(void);
+
+#endif /* _VM_SOCKETS_H */
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
new file mode 100644
index 000000000..c3fa0fd43
--- /dev/null
+++ b/include/linux/vmacache.h
@@ -0,0 +1,38 @@
+#ifndef __LINUX_VMACACHE_H
+#define __LINUX_VMACACHE_H
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+/*
+ * Hash based on the page number. Provides a good hit rate for
+ * workloads with good locality and those with random accesses as well.
+ */
+#define VMACACHE_HASH(addr) ((addr >> PAGE_SHIFT) & VMACACHE_MASK)
+
+static inline void vmacache_flush(struct task_struct *tsk)
+{
+ memset(tsk->vmacache, 0, sizeof(tsk->vmacache));
+}
+
+extern void vmacache_flush_all(struct mm_struct *mm);
+extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
+extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
+ unsigned long addr);
+
+#ifndef CONFIG_MMU
+extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
+#endif
+
+static inline void vmacache_invalidate(struct mm_struct *mm)
+{
+ mm->vmacache_seqnum++;
+
+ /* deal with overflows */
+ if (unlikely(mm->vmacache_seqnum == 0))
+ vmacache_flush_all(mm);
+}
+
+#endif /* __LINUX_VMACACHE_H */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
new file mode 100644
index 000000000..0ec598381
--- /dev/null
+++ b/include/linux/vmalloc.h
@@ -0,0 +1,203 @@
+#ifndef _LINUX_VMALLOC_H
+#define _LINUX_VMALLOC_H
+
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <asm/page.h> /* pgprot_t */
+#include <linux/rbtree.h>
+
+struct vm_area_struct; /* vma defining user mapping in mm_types.h */
+
+/* bits in flags of vmalloc's vm_struct below */
+#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
+#define VM_ALLOC 0x00000002 /* vmalloc() */
+#define VM_MAP 0x00000004 /* vmap()ed pages */
+#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
+#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
+#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
+#define VM_NO_GUARD 0x00000040 /* don't add guard page */
+#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
+/* bits [20..32] reserved for arch specific ioremap internals */
+
+/*
+ * Maximum alignment for ioremap() regions.
+ * Can be overriden by arch-specific value.
+ */
+#ifndef IOREMAP_MAX_ORDER
+#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
+#endif
+
+struct vm_struct {
+ struct vm_struct *next;
+ void *addr;
+ unsigned long size;
+ unsigned long flags;
+ struct page **pages;
+ unsigned int nr_pages;
+ phys_addr_t phys_addr;
+ const void *caller;
+};
+
+struct vmap_area {
+ unsigned long va_start;
+ unsigned long va_end;
+ unsigned long flags;
+ struct rb_node rb_node; /* address sorted rbtree */
+ struct list_head list; /* address sorted list */
+ struct list_head purge_list; /* "lazy purge" list */
+ struct vm_struct *vm;
+ struct rcu_head rcu_head;
+};
+
+/*
+ * Highlevel APIs for driver use
+ */
+extern void vm_unmap_ram(const void *mem, unsigned int count);
+extern void *vm_map_ram(struct page **pages, unsigned int count,
+ int node, pgprot_t prot);
+extern void vm_unmap_aliases(void);
+
+#ifdef CONFIG_MMU
+extern void __init vmalloc_init(void);
+#else
+static inline void vmalloc_init(void)
+{
+}
+#endif
+
+extern void *vmalloc(unsigned long size);
+extern void *vzalloc(unsigned long size);
+extern void *vmalloc_user(unsigned long size);
+extern void *vmalloc_node(unsigned long size, int node);
+extern void *vzalloc_node(unsigned long size, int node);
+extern void *vmalloc_exec(unsigned long size);
+extern void *vmalloc_32(unsigned long size);
+extern void *vmalloc_32_user(unsigned long size);
+extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
+extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
+ unsigned long start, unsigned long end, gfp_t gfp_mask,
+ pgprot_t prot, unsigned long vm_flags, int node,
+ const void *caller);
+
+extern void vfree(const void *addr);
+
+extern void *vmap(struct page **pages, unsigned int count,
+ unsigned long flags, pgprot_t prot);
+extern void vunmap(const void *addr);
+
+extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
+ unsigned long uaddr, void *kaddr,
+ unsigned long size);
+
+extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+ unsigned long pgoff);
+void vmalloc_sync_all(void);
+
+/*
+ * Lowlevel-APIs (not for driver use!)
+ */
+
+static inline size_t get_vm_area_size(const struct vm_struct *area)
+{
+ if (!(area->flags & VM_NO_GUARD))
+ /* return actual size without guard page */
+ return area->size - PAGE_SIZE;
+ else
+ return area->size;
+
+}
+
+extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
+extern struct vm_struct *get_vm_area_caller(unsigned long size,
+ unsigned long flags, const void *caller);
+extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
+ unsigned long start, unsigned long end);
+extern struct vm_struct *__get_vm_area_caller(unsigned long size,
+ unsigned long flags,
+ unsigned long start, unsigned long end,
+ const void *caller);
+extern struct vm_struct *remove_vm_area(const void *addr);
+extern struct vm_struct *find_vm_area(const void *addr);
+
+extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
+ struct page **pages);
+#ifdef CONFIG_MMU
+extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
+ pgprot_t prot, struct page **pages);
+extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
+extern void unmap_kernel_range(unsigned long addr, unsigned long size);
+#else
+static inline int
+map_kernel_range_noflush(unsigned long start, unsigned long size,
+ pgprot_t prot, struct page **pages)
+{
+ return size >> PAGE_SHIFT;
+}
+static inline void
+unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
+{
+}
+static inline void
+unmap_kernel_range(unsigned long addr, unsigned long size)
+{
+}
+#endif
+
+/* Allocate/destroy a 'vmalloc' VM area. */
+extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
+extern void free_vm_area(struct vm_struct *area);
+
+/* for /dev/kmem */
+extern long vread(char *buf, char *addr, unsigned long count);
+extern long vwrite(char *buf, char *addr, unsigned long count);
+
+/*
+ * Internals. Dont't use..
+ */
+extern struct list_head vmap_area_list;
+extern __init void vm_area_add_early(struct vm_struct *vm);
+extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
+
+#ifdef CONFIG_SMP
+# ifdef CONFIG_MMU
+struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
+ const size_t *sizes, int nr_vms,
+ size_t align);
+
+void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
+# else
+static inline struct vm_struct **
+pcpu_get_vm_areas(const unsigned long *offsets,
+ const size_t *sizes, int nr_vms,
+ size_t align)
+{
+ return NULL;
+}
+
+static inline void
+pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
+{
+}
+# endif
+#endif
+
+struct vmalloc_info {
+ unsigned long used;
+ unsigned long largest_chunk;
+};
+
+#ifdef CONFIG_MMU
+#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
+extern void get_vmalloc_info(struct vmalloc_info *vmi);
+#else
+
+#define VMALLOC_TOTAL 0UL
+#define get_vmalloc_info(vmi) \
+do { \
+ (vmi)->used = 0; \
+ (vmi)->largest_chunk = 0; \
+} while (0)
+#endif
+
+#endif /* _LINUX_VMALLOC_H */
diff --git a/include/linux/vme.h b/include/linux/vme.h
new file mode 100644
index 000000000..79242e9c0
--- /dev/null
+++ b/include/linux/vme.h
@@ -0,0 +1,176 @@
+#ifndef _VME_H_
+#define _VME_H_
+
+/* Resource Type */
+enum vme_resource_type {
+ VME_MASTER,
+ VME_SLAVE,
+ VME_DMA,
+ VME_LM
+};
+
+/* VME Address Spaces */
+#define VME_A16 0x1
+#define VME_A24 0x2
+#define VME_A32 0x4
+#define VME_A64 0x8
+#define VME_CRCSR 0x10
+#define VME_USER1 0x20
+#define VME_USER2 0x40
+#define VME_USER3 0x80
+#define VME_USER4 0x100
+
+#define VME_A16_MAX 0x10000ULL
+#define VME_A24_MAX 0x1000000ULL
+#define VME_A32_MAX 0x100000000ULL
+#define VME_A64_MAX 0x10000000000000000ULL
+#define VME_CRCSR_MAX 0x1000000ULL
+
+
+/* VME Cycle Types */
+#define VME_SCT 0x1
+#define VME_BLT 0x2
+#define VME_MBLT 0x4
+#define VME_2eVME 0x8
+#define VME_2eSST 0x10
+#define VME_2eSSTB 0x20
+
+#define VME_2eSST160 0x100
+#define VME_2eSST267 0x200
+#define VME_2eSST320 0x400
+
+#define VME_SUPER 0x1000
+#define VME_USER 0x2000
+#define VME_PROG 0x4000
+#define VME_DATA 0x8000
+
+/* VME Data Widths */
+#define VME_D8 0x1
+#define VME_D16 0x2
+#define VME_D32 0x4
+#define VME_D64 0x8
+
+/* Arbitration Scheduling Modes */
+#define VME_R_ROBIN_MODE 0x1
+#define VME_PRIORITY_MODE 0x2
+
+#define VME_DMA_PATTERN (1<<0)
+#define VME_DMA_PCI (1<<1)
+#define VME_DMA_VME (1<<2)
+
+#define VME_DMA_PATTERN_BYTE (1<<0)
+#define VME_DMA_PATTERN_WORD (1<<1)
+#define VME_DMA_PATTERN_INCREMENT (1<<2)
+
+#define VME_DMA_VME_TO_MEM (1<<0)
+#define VME_DMA_MEM_TO_VME (1<<1)
+#define VME_DMA_VME_TO_VME (1<<2)
+#define VME_DMA_MEM_TO_MEM (1<<3)
+#define VME_DMA_PATTERN_TO_VME (1<<4)
+#define VME_DMA_PATTERN_TO_MEM (1<<5)
+
+struct vme_dma_attr {
+ u32 type;
+ void *private;
+};
+
+struct vme_resource {
+ enum vme_resource_type type;
+ struct list_head *entry;
+};
+
+extern struct bus_type vme_bus_type;
+
+/* VME_MAX_BRIDGES comes from the type of vme_bus_numbers */
+#define VME_MAX_BRIDGES (sizeof(unsigned int)*8)
+#define VME_MAX_SLOTS 32
+
+#define VME_SLOT_CURRENT -1
+#define VME_SLOT_ALL -2
+
+/**
+ * Structure representing a VME device
+ * @num: The device number
+ * @bridge: Pointer to the bridge device this device is on
+ * @dev: Internal device structure
+ * @drv_list: List of devices (per driver)
+ * @bridge_list: List of devices (per bridge)
+ */
+struct vme_dev {
+ int num;
+ struct vme_bridge *bridge;
+ struct device dev;
+ struct list_head drv_list;
+ struct list_head bridge_list;
+};
+
+struct vme_driver {
+ struct list_head node;
+ const char *name;
+ int (*match)(struct vme_dev *);
+ int (*probe)(struct vme_dev *);
+ int (*remove)(struct vme_dev *);
+ void (*shutdown)(void);
+ struct device_driver driver;
+ struct list_head devices;
+};
+
+void *vme_alloc_consistent(struct vme_resource *, size_t, dma_addr_t *);
+void vme_free_consistent(struct vme_resource *, size_t, void *,
+ dma_addr_t);
+
+size_t vme_get_size(struct vme_resource *);
+
+struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32);
+int vme_slave_set(struct vme_resource *, int, unsigned long long,
+ unsigned long long, dma_addr_t, u32, u32);
+int vme_slave_get(struct vme_resource *, int *, unsigned long long *,
+ unsigned long long *, dma_addr_t *, u32 *, u32 *);
+void vme_slave_free(struct vme_resource *);
+
+struct vme_resource *vme_master_request(struct vme_dev *, u32, u32, u32);
+int vme_master_set(struct vme_resource *, int, unsigned long long,
+ unsigned long long, u32, u32, u32);
+int vme_master_get(struct vme_resource *, int *, unsigned long long *,
+ unsigned long long *, u32 *, u32 *, u32 *);
+ssize_t vme_master_read(struct vme_resource *, void *, size_t, loff_t);
+ssize_t vme_master_write(struct vme_resource *, void *, size_t, loff_t);
+unsigned int vme_master_rmw(struct vme_resource *, unsigned int, unsigned int,
+ unsigned int, loff_t);
+int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma);
+void vme_master_free(struct vme_resource *);
+
+struct vme_resource *vme_dma_request(struct vme_dev *, u32);
+struct vme_dma_list *vme_new_dma_list(struct vme_resource *);
+struct vme_dma_attr *vme_dma_pattern_attribute(u32, u32);
+struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t);
+struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long, u32, u32, u32);
+void vme_dma_free_attribute(struct vme_dma_attr *);
+int vme_dma_list_add(struct vme_dma_list *, struct vme_dma_attr *,
+ struct vme_dma_attr *, size_t);
+int vme_dma_list_exec(struct vme_dma_list *);
+int vme_dma_list_free(struct vme_dma_list *);
+int vme_dma_free(struct vme_resource *);
+
+int vme_irq_request(struct vme_dev *, int, int,
+ void (*callback)(int, int, void *), void *);
+void vme_irq_free(struct vme_dev *, int, int);
+int vme_irq_generate(struct vme_dev *, int, int);
+
+struct vme_resource *vme_lm_request(struct vme_dev *);
+int vme_lm_count(struct vme_resource *);
+int vme_lm_set(struct vme_resource *, unsigned long long, u32, u32);
+int vme_lm_get(struct vme_resource *, unsigned long long *, u32 *, u32 *);
+int vme_lm_attach(struct vme_resource *, int, void (*callback)(int));
+int vme_lm_detach(struct vme_resource *, int);
+void vme_lm_free(struct vme_resource *);
+
+int vme_slot_num(struct vme_dev *);
+int vme_bus_num(struct vme_dev *);
+
+int vme_register_driver(struct vme_driver *, unsigned int);
+void vme_unregister_driver(struct vme_driver *);
+
+
+#endif /* _VME_H_ */
+
diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h
new file mode 100644
index 000000000..3e4535876
--- /dev/null
+++ b/include/linux/vmpressure.h
@@ -0,0 +1,48 @@
+#ifndef __LINUX_VMPRESSURE_H
+#define __LINUX_VMPRESSURE_H
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/cgroup.h>
+#include <linux/eventfd.h>
+
+struct vmpressure {
+ unsigned long scanned;
+ unsigned long reclaimed;
+ /* The lock is used to keep the scanned/reclaimed above in sync. */
+ struct spinlock sr_lock;
+
+ /* The list of vmpressure_event structs. */
+ struct list_head events;
+ /* Have to grab the lock on events traversal or modifications. */
+ struct mutex events_lock;
+
+ struct work_struct work;
+};
+
+struct mem_cgroup;
+
+#ifdef CONFIG_MEMCG
+extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
+ unsigned long scanned, unsigned long reclaimed);
+extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio);
+
+extern void vmpressure_init(struct vmpressure *vmpr);
+extern void vmpressure_cleanup(struct vmpressure *vmpr);
+extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg);
+extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr);
+extern int vmpressure_register_event(struct mem_cgroup *memcg,
+ struct eventfd_ctx *eventfd,
+ const char *args);
+extern void vmpressure_unregister_event(struct mem_cgroup *memcg,
+ struct eventfd_ctx *eventfd);
+#else
+static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
+ unsigned long scanned, unsigned long reclaimed) {}
+static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg,
+ int prio) {}
+#endif /* CONFIG_MEMCG */
+#endif /* __LINUX_VMPRESSURE_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
new file mode 100644
index 000000000..82e7db7f7
--- /dev/null
+++ b/include/linux/vmstat.h
@@ -0,0 +1,290 @@
+#ifndef _LINUX_VMSTAT_H
+#define _LINUX_VMSTAT_H
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/vm_event_item.h>
+#include <linux/atomic.h>
+
+extern int sysctl_stat_interval;
+
+#ifdef CONFIG_VM_EVENT_COUNTERS
+/*
+ * Light weight per cpu counter implementation.
+ *
+ * Counters should only be incremented and no critical kernel component
+ * should rely on the counter values.
+ *
+ * Counters are handled completely inline. On many platforms the code
+ * generated will simply be the increment of a global address.
+ */
+
+struct vm_event_state {
+ unsigned long event[NR_VM_EVENT_ITEMS];
+};
+
+DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
+
+/*
+ * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
+ * local_irq_disable overhead.
+ */
+static inline void __count_vm_event(enum vm_event_item item)
+{
+ raw_cpu_inc(vm_event_states.event[item]);
+}
+
+static inline void count_vm_event(enum vm_event_item item)
+{
+ this_cpu_inc(vm_event_states.event[item]);
+}
+
+static inline void __count_vm_events(enum vm_event_item item, long delta)
+{
+ raw_cpu_add(vm_event_states.event[item], delta);
+}
+
+static inline void count_vm_events(enum vm_event_item item, long delta)
+{
+ this_cpu_add(vm_event_states.event[item], delta);
+}
+
+extern void all_vm_events(unsigned long *);
+
+extern void vm_events_fold_cpu(int cpu);
+
+#else
+
+/* Disable counters */
+static inline void count_vm_event(enum vm_event_item item)
+{
+}
+static inline void count_vm_events(enum vm_event_item item, long delta)
+{
+}
+static inline void __count_vm_event(enum vm_event_item item)
+{
+}
+static inline void __count_vm_events(enum vm_event_item item, long delta)
+{
+}
+static inline void all_vm_events(unsigned long *ret)
+{
+}
+static inline void vm_events_fold_cpu(int cpu)
+{
+}
+
+#endif /* CONFIG_VM_EVENT_COUNTERS */
+
+#ifdef CONFIG_NUMA_BALANCING
+#define count_vm_numa_event(x) count_vm_event(x)
+#define count_vm_numa_events(x, y) count_vm_events(x, y)
+#else
+#define count_vm_numa_event(x) do {} while (0)
+#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
+#endif /* CONFIG_NUMA_BALANCING */
+
+#ifdef CONFIG_DEBUG_TLBFLUSH
+#define count_vm_tlb_event(x) count_vm_event(x)
+#define count_vm_tlb_events(x, y) count_vm_events(x, y)
+#else
+#define count_vm_tlb_event(x) do {} while (0)
+#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_VM_VMACACHE
+#define count_vm_vmacache_event(x) count_vm_event(x)
+#else
+#define count_vm_vmacache_event(x) do {} while (0)
+#endif
+
+#define __count_zone_vm_events(item, zone, delta) \
+ __count_vm_events(item##_NORMAL - ZONE_NORMAL + \
+ zone_idx(zone), delta)
+
+/*
+ * Zone based page accounting with per cpu differentials.
+ */
+extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+
+static inline void zone_page_state_add(long x, struct zone *zone,
+ enum zone_stat_item item)
+{
+ atomic_long_add(x, &zone->vm_stat[item]);
+ atomic_long_add(x, &vm_stat[item]);
+}
+
+static inline unsigned long global_page_state(enum zone_stat_item item)
+{
+ long x = atomic_long_read(&vm_stat[item]);
+#ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
+}
+
+static inline unsigned long zone_page_state(struct zone *zone,
+ enum zone_stat_item item)
+{
+ long x = atomic_long_read(&zone->vm_stat[item]);
+#ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
+}
+
+/*
+ * More accurate version that also considers the currently pending
+ * deltas. For that we need to loop over all cpus to find the current
+ * deltas. There is no synchronization so the result cannot be
+ * exactly accurate either.
+ */
+static inline unsigned long zone_page_state_snapshot(struct zone *zone,
+ enum zone_stat_item item)
+{
+ long x = atomic_long_read(&zone->vm_stat[item]);
+
+#ifdef CONFIG_SMP
+ int cpu;
+ for_each_online_cpu(cpu)
+ x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
+
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
+}
+
+#ifdef CONFIG_NUMA
+/*
+ * Determine the per node value of a stat item. This function
+ * is called frequently in a NUMA machine, so try to be as
+ * frugal as possible.
+ */
+static inline unsigned long node_page_state(int node,
+ enum zone_stat_item item)
+{
+ struct zone *zones = NODE_DATA(node)->node_zones;
+
+ return
+#ifdef CONFIG_ZONE_DMA
+ zone_page_state(&zones[ZONE_DMA], item) +
+#endif
+#ifdef CONFIG_ZONE_DMA32
+ zone_page_state(&zones[ZONE_DMA32], item) +
+#endif
+#ifdef CONFIG_HIGHMEM
+ zone_page_state(&zones[ZONE_HIGHMEM], item) +
+#endif
+ zone_page_state(&zones[ZONE_NORMAL], item) +
+ zone_page_state(&zones[ZONE_MOVABLE], item);
+}
+
+extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
+
+#else
+
+#define node_page_state(node, item) global_page_state(item)
+#define zone_statistics(_zl, _z, gfp) do { } while (0)
+
+#endif /* CONFIG_NUMA */
+
+#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
+#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
+
+#ifdef CONFIG_SMP
+void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
+void __inc_zone_page_state(struct page *, enum zone_stat_item);
+void __dec_zone_page_state(struct page *, enum zone_stat_item);
+
+void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
+void inc_zone_page_state(struct page *, enum zone_stat_item);
+void dec_zone_page_state(struct page *, enum zone_stat_item);
+
+extern void inc_zone_state(struct zone *, enum zone_stat_item);
+extern void __inc_zone_state(struct zone *, enum zone_stat_item);
+extern void dec_zone_state(struct zone *, enum zone_stat_item);
+extern void __dec_zone_state(struct zone *, enum zone_stat_item);
+
+void cpu_vm_stats_fold(int cpu);
+void refresh_zone_stat_thresholds(void);
+
+void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
+
+int calculate_pressure_threshold(struct zone *zone);
+int calculate_normal_threshold(struct zone *zone);
+void set_pgdat_percpu_threshold(pg_data_t *pgdat,
+ int (*calculate_pressure)(struct zone *));
+#else /* CONFIG_SMP */
+
+/*
+ * We do not maintain differentials in a single processor configuration.
+ * The functions directly modify the zone and global counters.
+ */
+static inline void __mod_zone_page_state(struct zone *zone,
+ enum zone_stat_item item, int delta)
+{
+ zone_page_state_add(delta, zone, item);
+}
+
+static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
+{
+ atomic_long_inc(&zone->vm_stat[item]);
+ atomic_long_inc(&vm_stat[item]);
+}
+
+static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
+{
+ atomic_long_dec(&zone->vm_stat[item]);
+ atomic_long_dec(&vm_stat[item]);
+}
+
+static inline void __inc_zone_page_state(struct page *page,
+ enum zone_stat_item item)
+{
+ __inc_zone_state(page_zone(page), item);
+}
+
+static inline void __dec_zone_page_state(struct page *page,
+ enum zone_stat_item item)
+{
+ __dec_zone_state(page_zone(page), item);
+}
+
+/*
+ * We only use atomic operations to update counters. So there is no need to
+ * disable interrupts.
+ */
+#define inc_zone_page_state __inc_zone_page_state
+#define dec_zone_page_state __dec_zone_page_state
+#define mod_zone_page_state __mod_zone_page_state
+
+#define inc_zone_state __inc_zone_state
+#define dec_zone_state __dec_zone_state
+
+#define set_pgdat_percpu_threshold(pgdat, callback) { }
+
+static inline void refresh_cpu_vm_stats(int cpu) { }
+static inline void refresh_zone_stat_thresholds(void) { }
+static inline void cpu_vm_stats_fold(int cpu) { }
+
+static inline void drain_zonestat(struct zone *zone,
+ struct per_cpu_pageset *pset) { }
+#endif /* CONFIG_SMP */
+
+static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
+ int migratetype)
+{
+ __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
+ if (is_migrate_cma(migratetype))
+ __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
+}
+
+extern const char * const vmstat_text[];
+
+#endif /* _LINUX_VMSTAT_H */
diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h
new file mode 100644
index 000000000..63df3a2a8
--- /dev/null
+++ b/include/linux/vmw_vmci_api.h
@@ -0,0 +1,83 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef __VMW_VMCI_API_H__
+#define __VMW_VMCI_API_H__
+
+#include <linux/uidgid.h>
+#include <linux/vmw_vmci_defs.h>
+
+#undef VMCI_KERNEL_API_VERSION
+#define VMCI_KERNEL_API_VERSION_1 1
+#define VMCI_KERNEL_API_VERSION_2 2
+#define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2
+
+struct msghdr;
+typedef void (vmci_device_shutdown_fn) (void *device_registration,
+ void *user_data);
+
+int vmci_datagram_create_handle(u32 resource_id, u32 flags,
+ vmci_datagram_recv_cb recv_cb,
+ void *client_data,
+ struct vmci_handle *out_handle);
+int vmci_datagram_create_handle_priv(u32 resource_id, u32 flags, u32 priv_flags,
+ vmci_datagram_recv_cb recv_cb,
+ void *client_data,
+ struct vmci_handle *out_handle);
+int vmci_datagram_destroy_handle(struct vmci_handle handle);
+int vmci_datagram_send(struct vmci_datagram *msg);
+int vmci_doorbell_create(struct vmci_handle *handle, u32 flags,
+ u32 priv_flags,
+ vmci_callback notify_cb, void *client_data);
+int vmci_doorbell_destroy(struct vmci_handle handle);
+int vmci_doorbell_notify(struct vmci_handle handle, u32 priv_flags);
+u32 vmci_get_context_id(void);
+bool vmci_is_context_owner(u32 context_id, kuid_t uid);
+
+int vmci_event_subscribe(u32 event,
+ vmci_event_cb callback, void *callback_data,
+ u32 *subid);
+int vmci_event_unsubscribe(u32 subid);
+u32 vmci_context_get_priv_flags(u32 context_id);
+int vmci_qpair_alloc(struct vmci_qp **qpair,
+ struct vmci_handle *handle,
+ u64 produce_qsize,
+ u64 consume_qsize,
+ u32 peer, u32 flags, u32 priv_flags);
+int vmci_qpair_detach(struct vmci_qp **qpair);
+int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
+ u64 *producer_tail,
+ u64 *consumer_head);
+int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
+ u64 *consumer_tail,
+ u64 *producer_head);
+s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair);
+s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair);
+s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair);
+s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair);
+ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
+ const void *buf, size_t buf_size, int mode);
+ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
+ void *buf, size_t buf_size, int mode);
+ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size,
+ int mode);
+ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
+ struct msghdr *msg, size_t iov_size, int mode);
+ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
+ struct msghdr *msg, size_t iov_size, int mode);
+ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size,
+ int mode);
+
+#endif /* !__VMW_VMCI_API_H__ */
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
new file mode 100644
index 000000000..65ac54c61
--- /dev/null
+++ b/include/linux/vmw_vmci_defs.h
@@ -0,0 +1,880 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMW_VMCI_DEF_H_
+#define _VMW_VMCI_DEF_H_
+
+#include <linux/atomic.h>
+
+/* Register offsets. */
+#define VMCI_STATUS_ADDR 0x00
+#define VMCI_CONTROL_ADDR 0x04
+#define VMCI_ICR_ADDR 0x08
+#define VMCI_IMR_ADDR 0x0c
+#define VMCI_DATA_OUT_ADDR 0x10
+#define VMCI_DATA_IN_ADDR 0x14
+#define VMCI_CAPS_ADDR 0x18
+#define VMCI_RESULT_LOW_ADDR 0x1c
+#define VMCI_RESULT_HIGH_ADDR 0x20
+
+/* Max number of devices. */
+#define VMCI_MAX_DEVICES 1
+
+/* Status register bits. */
+#define VMCI_STATUS_INT_ON 0x1
+
+/* Control register bits. */
+#define VMCI_CONTROL_RESET 0x1
+#define VMCI_CONTROL_INT_ENABLE 0x2
+#define VMCI_CONTROL_INT_DISABLE 0x4
+
+/* Capabilities register bits. */
+#define VMCI_CAPS_HYPERCALL 0x1
+#define VMCI_CAPS_GUESTCALL 0x2
+#define VMCI_CAPS_DATAGRAM 0x4
+#define VMCI_CAPS_NOTIFICATIONS 0x8
+
+/* Interrupt Cause register bits. */
+#define VMCI_ICR_DATAGRAM 0x1
+#define VMCI_ICR_NOTIFICATION 0x2
+
+/* Interrupt Mask register bits. */
+#define VMCI_IMR_DATAGRAM 0x1
+#define VMCI_IMR_NOTIFICATION 0x2
+
+/* Interrupt type. */
+enum {
+ VMCI_INTR_TYPE_INTX = 0,
+ VMCI_INTR_TYPE_MSI = 1,
+ VMCI_INTR_TYPE_MSIX = 2,
+};
+
+/* Maximum MSI/MSI-X interrupt vectors in the device. */
+#define VMCI_MAX_INTRS 2
+
+/*
+ * Supported interrupt vectors. There is one for each ICR value above,
+ * but here they indicate the position in the vector array/message ID.
+ */
+enum {
+ VMCI_INTR_DATAGRAM = 0,
+ VMCI_INTR_NOTIFICATION = 1,
+};
+
+/*
+ * A single VMCI device has an upper limit of 128MB on the amount of
+ * memory that can be used for queue pairs.
+ */
+#define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
+
+/*
+ * Queues with pre-mapped data pages must be small, so that we don't pin
+ * too much kernel memory (especially on vmkernel). We limit a queuepair to
+ * 32 KB, or 16 KB per queue for symmetrical pairs.
+ */
+#define VMCI_MAX_PINNED_QP_MEMORY (32 * 1024)
+
+/*
+ * We have a fixed set of resource IDs available in the VMX.
+ * This allows us to have a very simple implementation since we statically
+ * know how many will create datagram handles. If a new caller arrives and
+ * we have run out of slots we can manually increment the maximum size of
+ * available resource IDs.
+ *
+ * VMCI reserved hypervisor datagram resource IDs.
+ */
+enum {
+ VMCI_RESOURCES_QUERY = 0,
+ VMCI_GET_CONTEXT_ID = 1,
+ VMCI_SET_NOTIFY_BITMAP = 2,
+ VMCI_DOORBELL_LINK = 3,
+ VMCI_DOORBELL_UNLINK = 4,
+ VMCI_DOORBELL_NOTIFY = 5,
+ /*
+ * VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP are
+ * obsoleted by the removal of VM to VM communication.
+ */
+ VMCI_DATAGRAM_REQUEST_MAP = 6,
+ VMCI_DATAGRAM_REMOVE_MAP = 7,
+ VMCI_EVENT_SUBSCRIBE = 8,
+ VMCI_EVENT_UNSUBSCRIBE = 9,
+ VMCI_QUEUEPAIR_ALLOC = 10,
+ VMCI_QUEUEPAIR_DETACH = 11,
+
+ /*
+ * VMCI_VSOCK_VMX_LOOKUP was assigned to 12 for Fusion 3.0/3.1,
+ * WS 7.0/7.1 and ESX 4.1
+ */
+ VMCI_HGFS_TRANSPORT = 13,
+ VMCI_UNITY_PBRPC_REGISTER = 14,
+ VMCI_RPC_PRIVILEGED = 15,
+ VMCI_RPC_UNPRIVILEGED = 16,
+ VMCI_RESOURCE_MAX = 17,
+};
+
+/*
+ * struct vmci_handle - Ownership information structure
+ * @context: The VMX context ID.
+ * @resource: The resource ID (used for locating in resource hash).
+ *
+ * The vmci_handle structure is used to track resources used within
+ * vmw_vmci.
+ */
+struct vmci_handle {
+ u32 context;
+ u32 resource;
+};
+
+#define vmci_make_handle(_cid, _rid) \
+ (struct vmci_handle){ .context = _cid, .resource = _rid }
+
+static inline bool vmci_handle_is_equal(struct vmci_handle h1,
+ struct vmci_handle h2)
+{
+ return h1.context == h2.context && h1.resource == h2.resource;
+}
+
+#define VMCI_INVALID_ID ~0
+static const struct vmci_handle VMCI_INVALID_HANDLE = {
+ .context = VMCI_INVALID_ID,
+ .resource = VMCI_INVALID_ID
+};
+
+static inline bool vmci_handle_is_invalid(struct vmci_handle h)
+{
+ return vmci_handle_is_equal(h, VMCI_INVALID_HANDLE);
+}
+
+/*
+ * The below defines can be used to send anonymous requests.
+ * This also indicates that no response is expected.
+ */
+#define VMCI_ANON_SRC_CONTEXT_ID VMCI_INVALID_ID
+#define VMCI_ANON_SRC_RESOURCE_ID VMCI_INVALID_ID
+static const struct vmci_handle VMCI_ANON_SRC_HANDLE = {
+ .context = VMCI_ANON_SRC_CONTEXT_ID,
+ .resource = VMCI_ANON_SRC_RESOURCE_ID
+};
+
+/* The lowest 16 context ids are reserved for internal use. */
+#define VMCI_RESERVED_CID_LIMIT ((u32) 16)
+
+/*
+ * Hypervisor context id, used for calling into hypervisor
+ * supplied services from the VM.
+ */
+#define VMCI_HYPERVISOR_CONTEXT_ID 0
+
+/*
+ * Well-known context id, a logical context that contains a set of
+ * well-known services. This context ID is now obsolete.
+ */
+#define VMCI_WELL_KNOWN_CONTEXT_ID 1
+
+/*
+ * Context ID used by host endpoints.
+ */
+#define VMCI_HOST_CONTEXT_ID 2
+
+#define VMCI_CONTEXT_IS_VM(_cid) (VMCI_INVALID_ID != (_cid) && \
+ (_cid) > VMCI_HOST_CONTEXT_ID)
+
+/*
+ * The VMCI_CONTEXT_RESOURCE_ID is used together with vmci_make_handle to make
+ * handles that refer to a specific context.
+ */
+#define VMCI_CONTEXT_RESOURCE_ID 0
+
+/*
+ * VMCI error codes.
+ */
+enum {
+ VMCI_SUCCESS_QUEUEPAIR_ATTACH = 5,
+ VMCI_SUCCESS_QUEUEPAIR_CREATE = 4,
+ VMCI_SUCCESS_LAST_DETACH = 3,
+ VMCI_SUCCESS_ACCESS_GRANTED = 2,
+ VMCI_SUCCESS_ENTRY_DEAD = 1,
+ VMCI_SUCCESS = 0,
+ VMCI_ERROR_INVALID_RESOURCE = (-1),
+ VMCI_ERROR_INVALID_ARGS = (-2),
+ VMCI_ERROR_NO_MEM = (-3),
+ VMCI_ERROR_DATAGRAM_FAILED = (-4),
+ VMCI_ERROR_MORE_DATA = (-5),
+ VMCI_ERROR_NO_MORE_DATAGRAMS = (-6),
+ VMCI_ERROR_NO_ACCESS = (-7),
+ VMCI_ERROR_NO_HANDLE = (-8),
+ VMCI_ERROR_DUPLICATE_ENTRY = (-9),
+ VMCI_ERROR_DST_UNREACHABLE = (-10),
+ VMCI_ERROR_PAYLOAD_TOO_LARGE = (-11),
+ VMCI_ERROR_INVALID_PRIV = (-12),
+ VMCI_ERROR_GENERIC = (-13),
+ VMCI_ERROR_PAGE_ALREADY_SHARED = (-14),
+ VMCI_ERROR_CANNOT_SHARE_PAGE = (-15),
+ VMCI_ERROR_CANNOT_UNSHARE_PAGE = (-16),
+ VMCI_ERROR_NO_PROCESS = (-17),
+ VMCI_ERROR_NO_DATAGRAM = (-18),
+ VMCI_ERROR_NO_RESOURCES = (-19),
+ VMCI_ERROR_UNAVAILABLE = (-20),
+ VMCI_ERROR_NOT_FOUND = (-21),
+ VMCI_ERROR_ALREADY_EXISTS = (-22),
+ VMCI_ERROR_NOT_PAGE_ALIGNED = (-23),
+ VMCI_ERROR_INVALID_SIZE = (-24),
+ VMCI_ERROR_REGION_ALREADY_SHARED = (-25),
+ VMCI_ERROR_TIMEOUT = (-26),
+ VMCI_ERROR_DATAGRAM_INCOMPLETE = (-27),
+ VMCI_ERROR_INCORRECT_IRQL = (-28),
+ VMCI_ERROR_EVENT_UNKNOWN = (-29),
+ VMCI_ERROR_OBSOLETE = (-30),
+ VMCI_ERROR_QUEUEPAIR_MISMATCH = (-31),
+ VMCI_ERROR_QUEUEPAIR_NOTSET = (-32),
+ VMCI_ERROR_QUEUEPAIR_NOTOWNER = (-33),
+ VMCI_ERROR_QUEUEPAIR_NOTATTACHED = (-34),
+ VMCI_ERROR_QUEUEPAIR_NOSPACE = (-35),
+ VMCI_ERROR_QUEUEPAIR_NODATA = (-36),
+ VMCI_ERROR_BUSMEM_INVALIDATION = (-37),
+ VMCI_ERROR_MODULE_NOT_LOADED = (-38),
+ VMCI_ERROR_DEVICE_NOT_FOUND = (-39),
+ VMCI_ERROR_QUEUEPAIR_NOT_READY = (-40),
+ VMCI_ERROR_WOULD_BLOCK = (-41),
+
+ /* VMCI clients should return error code within this range */
+ VMCI_ERROR_CLIENT_MIN = (-500),
+ VMCI_ERROR_CLIENT_MAX = (-550),
+
+ /* Internal error codes. */
+ VMCI_SHAREDMEM_ERROR_BAD_CONTEXT = (-1000),
+};
+
+/* VMCI reserved events. */
+enum {
+ /* Only applicable to guest endpoints */
+ VMCI_EVENT_CTX_ID_UPDATE = 0,
+
+ /* Applicable to guest and host */
+ VMCI_EVENT_CTX_REMOVED = 1,
+
+ /* Only applicable to guest endpoints */
+ VMCI_EVENT_QP_RESUMED = 2,
+
+ /* Applicable to guest and host */
+ VMCI_EVENT_QP_PEER_ATTACH = 3,
+
+ /* Applicable to guest and host */
+ VMCI_EVENT_QP_PEER_DETACH = 4,
+
+ /*
+ * Applicable to VMX and vmk. On vmk,
+ * this event has the Context payload type.
+ */
+ VMCI_EVENT_MEM_ACCESS_ON = 5,
+
+ /*
+ * Applicable to VMX and vmk. Same as
+ * above for the payload type.
+ */
+ VMCI_EVENT_MEM_ACCESS_OFF = 6,
+ VMCI_EVENT_MAX = 7,
+};
+
+/*
+ * Of the above events, a few are reserved for use in the VMX, and
+ * other endpoints (guest and host kernel) should not use them. For
+ * the rest of the events, we allow both host and guest endpoints to
+ * subscribe to them, to maintain the same API for host and guest
+ * endpoints.
+ */
+#define VMCI_EVENT_VALID_VMX(_event) ((_event) == VMCI_EVENT_MEM_ACCESS_ON || \
+ (_event) == VMCI_EVENT_MEM_ACCESS_OFF)
+
+#define VMCI_EVENT_VALID(_event) ((_event) < VMCI_EVENT_MAX && \
+ !VMCI_EVENT_VALID_VMX(_event))
+
+/* Reserved guest datagram resource ids. */
+#define VMCI_EVENT_HANDLER 0
+
+/*
+ * VMCI coarse-grained privileges (per context or host
+ * process/endpoint. An entity with the restricted flag is only
+ * allowed to interact with the hypervisor and trusted entities.
+ */
+enum {
+ VMCI_NO_PRIVILEGE_FLAGS = 0,
+ VMCI_PRIVILEGE_FLAG_RESTRICTED = 1,
+ VMCI_PRIVILEGE_FLAG_TRUSTED = 2,
+ VMCI_PRIVILEGE_ALL_FLAGS = (VMCI_PRIVILEGE_FLAG_RESTRICTED |
+ VMCI_PRIVILEGE_FLAG_TRUSTED),
+ VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS = VMCI_NO_PRIVILEGE_FLAGS,
+ VMCI_LEAST_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_RESTRICTED,
+ VMCI_MAX_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_TRUSTED,
+};
+
+/* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */
+#define VMCI_RESERVED_RESOURCE_ID_MAX 1023
+
+/*
+ * Driver version.
+ *
+ * Increment major version when you make an incompatible change.
+ * Compatibility goes both ways (old driver with new executable
+ * as well as new driver with old executable).
+ */
+
+/* Never change VMCI_VERSION_SHIFT_WIDTH */
+#define VMCI_VERSION_SHIFT_WIDTH 16
+#define VMCI_MAKE_VERSION(_major, _minor) \
+ ((_major) << VMCI_VERSION_SHIFT_WIDTH | (u16) (_minor))
+
+#define VMCI_VERSION_MAJOR(v) ((u32) (v) >> VMCI_VERSION_SHIFT_WIDTH)
+#define VMCI_VERSION_MINOR(v) ((u16) (v))
+
+/*
+ * VMCI_VERSION is always the current version. Subsequently listed
+ * versions are ways of detecting previous versions of the connecting
+ * application (i.e., VMX).
+ *
+ * VMCI_VERSION_NOVMVM: This version removed support for VM to VM
+ * communication.
+ *
+ * VMCI_VERSION_NOTIFY: This version introduced doorbell notification
+ * support.
+ *
+ * VMCI_VERSION_HOSTQP: This version introduced host end point support
+ * for hosted products.
+ *
+ * VMCI_VERSION_PREHOSTQP: This is the version prior to the adoption of
+ * support for host end-points.
+ *
+ * VMCI_VERSION_PREVERS2: This fictional version number is intended to
+ * represent the version of a VMX which doesn't call into the driver
+ * with ioctl VERSION2 and thus doesn't establish its version with the
+ * driver.
+ */
+
+#define VMCI_VERSION VMCI_VERSION_NOVMVM
+#define VMCI_VERSION_NOVMVM VMCI_MAKE_VERSION(11, 0)
+#define VMCI_VERSION_NOTIFY VMCI_MAKE_VERSION(10, 0)
+#define VMCI_VERSION_HOSTQP VMCI_MAKE_VERSION(9, 0)
+#define VMCI_VERSION_PREHOSTQP VMCI_MAKE_VERSION(8, 0)
+#define VMCI_VERSION_PREVERS2 VMCI_MAKE_VERSION(1, 0)
+
+#define VMCI_SOCKETS_MAKE_VERSION(_p) \
+ ((((_p)[0] & 0xFF) << 24) | (((_p)[1] & 0xFF) << 16) | ((_p)[2]))
+
+/*
+ * The VMCI IOCTLs. We use identity code 7, as noted in ioctl-number.h, and
+ * we start at sequence 9f. This gives us the same values that our shipping
+ * products use, starting at 1951, provided we leave out the direction and
+ * structure size. Note that VMMon occupies the block following us, starting
+ * at 2001.
+ */
+#define IOCTL_VMCI_VERSION _IO(7, 0x9f) /* 1951 */
+#define IOCTL_VMCI_INIT_CONTEXT _IO(7, 0xa0)
+#define IOCTL_VMCI_QUEUEPAIR_SETVA _IO(7, 0xa4)
+#define IOCTL_VMCI_NOTIFY_RESOURCE _IO(7, 0xa5)
+#define IOCTL_VMCI_NOTIFICATIONS_RECEIVE _IO(7, 0xa6)
+#define IOCTL_VMCI_VERSION2 _IO(7, 0xa7)
+#define IOCTL_VMCI_QUEUEPAIR_ALLOC _IO(7, 0xa8)
+#define IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE _IO(7, 0xa9)
+#define IOCTL_VMCI_QUEUEPAIR_DETACH _IO(7, 0xaa)
+#define IOCTL_VMCI_DATAGRAM_SEND _IO(7, 0xab)
+#define IOCTL_VMCI_DATAGRAM_RECEIVE _IO(7, 0xac)
+#define IOCTL_VMCI_CTX_ADD_NOTIFICATION _IO(7, 0xaf)
+#define IOCTL_VMCI_CTX_REMOVE_NOTIFICATION _IO(7, 0xb0)
+#define IOCTL_VMCI_CTX_GET_CPT_STATE _IO(7, 0xb1)
+#define IOCTL_VMCI_CTX_SET_CPT_STATE _IO(7, 0xb2)
+#define IOCTL_VMCI_GET_CONTEXT_ID _IO(7, 0xb3)
+#define IOCTL_VMCI_SOCKETS_VERSION _IO(7, 0xb4)
+#define IOCTL_VMCI_SOCKETS_GET_AF_VALUE _IO(7, 0xb8)
+#define IOCTL_VMCI_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)
+#define IOCTL_VMCI_SET_NOTIFY _IO(7, 0xcb) /* 1995 */
+/*IOCTL_VMMON_START _IO(7, 0xd1)*/ /* 2001 */
+
+/*
+ * struct vmci_queue_header - VMCI Queue Header information.
+ *
+ * A Queue cannot stand by itself as designed. Each Queue's header
+ * contains a pointer into itself (the producer_tail) and into its peer
+ * (consumer_head). The reason for the separation is one of
+ * accessibility: Each end-point can modify two things: where the next
+ * location to enqueue is within its produce_q (producer_tail); and
+ * where the next dequeue location is in its consume_q (consumer_head).
+ *
+ * An end-point cannot modify the pointers of its peer (guest to
+ * guest; NOTE that in the host both queue headers are mapped r/w).
+ * But, each end-point needs read access to both Queue header
+ * structures in order to determine how much space is used (or left)
+ * in the Queue. This is because for an end-point to know how full
+ * its produce_q is, it needs to use the consumer_head that points into
+ * the produce_q but -that- consumer_head is in the Queue header for
+ * that end-points consume_q.
+ *
+ * Thoroughly confused? Sorry.
+ *
+ * producer_tail: the point to enqueue new entrants. When you approach
+ * a line in a store, for example, you walk up to the tail.
+ *
+ * consumer_head: the point in the queue from which the next element is
+ * dequeued. In other words, who is next in line is he who is at the
+ * head of the line.
+ *
+ * Also, producer_tail points to an empty byte in the Queue, whereas
+ * consumer_head points to a valid byte of data (unless producer_tail ==
+ * consumer_head in which case consumer_head does not point to a valid
+ * byte of data).
+ *
+ * For a queue of buffer 'size' bytes, the tail and head pointers will be in
+ * the range [0, size-1].
+ *
+ * If produce_q_header->producer_tail == consume_q_header->consumer_head
+ * then the produce_q is empty.
+ */
+struct vmci_queue_header {
+ /* All fields are 64bit and aligned. */
+ struct vmci_handle handle; /* Identifier. */
+ atomic64_t producer_tail; /* Offset in this queue. */
+ atomic64_t consumer_head; /* Offset in peer queue. */
+};
+
+/*
+ * struct vmci_datagram - Base struct for vmci datagrams.
+ * @dst: A vmci_handle that tracks the destination of the datagram.
+ * @src: A vmci_handle that tracks the source of the datagram.
+ * @payload_size: The size of the payload.
+ *
+ * vmci_datagram structs are used when sending vmci datagrams. They include
+ * the necessary source and destination information to properly route
+ * the information along with the size of the package.
+ */
+struct vmci_datagram {
+ struct vmci_handle dst;
+ struct vmci_handle src;
+ u64 payload_size;
+};
+
+/*
+ * Second flag is for creating a well-known handle instead of a per context
+ * handle. Next flag is for deferring datagram delivery, so that the
+ * datagram callback is invoked in a delayed context (not interrupt context).
+ */
+#define VMCI_FLAG_DG_NONE 0
+#define VMCI_FLAG_WELLKNOWN_DG_HND 0x1
+#define VMCI_FLAG_ANYCID_DG_HND 0x2
+#define VMCI_FLAG_DG_DELAYED_CB 0x4
+
+/*
+ * Maximum supported size of a VMCI datagram for routable datagrams.
+ * Datagrams going to the hypervisor are allowed to be larger.
+ */
+#define VMCI_MAX_DG_SIZE (17 * 4096)
+#define VMCI_MAX_DG_PAYLOAD_SIZE (VMCI_MAX_DG_SIZE - \
+ sizeof(struct vmci_datagram))
+#define VMCI_DG_PAYLOAD(_dg) (void *)((char *)(_dg) + \
+ sizeof(struct vmci_datagram))
+#define VMCI_DG_HEADERSIZE sizeof(struct vmci_datagram)
+#define VMCI_DG_SIZE(_dg) (VMCI_DG_HEADERSIZE + (size_t)(_dg)->payload_size)
+#define VMCI_DG_SIZE_ALIGNED(_dg) ((VMCI_DG_SIZE(_dg) + 7) & (~((size_t) 0x7)))
+#define VMCI_MAX_DATAGRAM_QUEUE_SIZE (VMCI_MAX_DG_SIZE * 2)
+
+struct vmci_event_payload_qp {
+ struct vmci_handle handle; /* queue_pair handle. */
+ u32 peer_id; /* Context id of attaching/detaching VM. */
+ u32 _pad;
+};
+
+/* Flags for VMCI queue_pair API. */
+enum {
+ /* Fail alloc if QP not created by peer. */
+ VMCI_QPFLAG_ATTACH_ONLY = 1 << 0,
+
+ /* Only allow attaches from local context. */
+ VMCI_QPFLAG_LOCAL = 1 << 1,
+
+ /* Host won't block when guest is quiesced. */
+ VMCI_QPFLAG_NONBLOCK = 1 << 2,
+
+ /* Pin data pages in ESX. Used with NONBLOCK */
+ VMCI_QPFLAG_PINNED = 1 << 3,
+
+ /* Update the following flag when adding new flags. */
+ VMCI_QP_ALL_FLAGS = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QPFLAG_LOCAL |
+ VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
+
+ /* Convenience flags */
+ VMCI_QP_ASYMM = (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
+ VMCI_QP_ASYMM_PEER = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QP_ASYMM),
+};
+
+/*
+ * We allow at least 1024 more event datagrams from the hypervisor past the
+ * normally allowed datagrams pending for a given context. We define this
+ * limit on event datagrams from the hypervisor to guard against DoS attack
+ * from a malicious VM which could repeatedly attach to and detach from a queue
+ * pair, causing events to be queued at the destination VM. However, the rate
+ * at which such events can be generated is small since it requires a VM exit
+ * and handling of queue pair attach/detach call at the hypervisor. Event
+ * datagrams may be queued up at the destination VM if it has interrupts
+ * disabled or if it is not draining events for some other reason. 1024
+ * datagrams is a grossly conservative estimate of the time for which
+ * interrupts may be disabled in the destination VM, but at the same time does
+ * not exacerbate the memory pressure problem on the host by much (size of each
+ * event datagram is small).
+ */
+#define VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE \
+ (VMCI_MAX_DATAGRAM_QUEUE_SIZE + \
+ 1024 * (sizeof(struct vmci_datagram) + \
+ sizeof(struct vmci_event_data_max)))
+
+/*
+ * Struct used for querying, via VMCI_RESOURCES_QUERY, the availability of
+ * hypervisor resources. Struct size is 16 bytes. All fields in struct are
+ * aligned to their natural alignment.
+ */
+struct vmci_resource_query_hdr {
+ struct vmci_datagram hdr;
+ u32 num_resources;
+ u32 _padding;
+};
+
+/*
+ * Convenience struct for negotiating vectors. Must match layout of
+ * VMCIResourceQueryHdr minus the struct vmci_datagram header.
+ */
+struct vmci_resource_query_msg {
+ u32 num_resources;
+ u32 _padding;
+ u32 resources[1];
+};
+
+/*
+ * The maximum number of resources that can be queried using
+ * VMCI_RESOURCE_QUERY is 31, as the result is encoded in the lower 31
+ * bits of a positive return value. Negative values are reserved for
+ * errors.
+ */
+#define VMCI_RESOURCE_QUERY_MAX_NUM 31
+
+/* Maximum size for the VMCI_RESOURCE_QUERY request. */
+#define VMCI_RESOURCE_QUERY_MAX_SIZE \
+ (sizeof(struct vmci_resource_query_hdr) + \
+ sizeof(u32) * VMCI_RESOURCE_QUERY_MAX_NUM)
+
+/*
+ * Struct used for setting the notification bitmap. All fields in
+ * struct are aligned to their natural alignment.
+ */
+struct vmci_notify_bm_set_msg {
+ struct vmci_datagram hdr;
+ u32 bitmap_ppn;
+ u32 _pad;
+};
+
+/*
+ * Struct used for linking a doorbell handle with an index in the
+ * notify bitmap. All fields in struct are aligned to their natural
+ * alignment.
+ */
+struct vmci_doorbell_link_msg {
+ struct vmci_datagram hdr;
+ struct vmci_handle handle;
+ u64 notify_idx;
+};
+
+/*
+ * Struct used for unlinking a doorbell handle from an index in the
+ * notify bitmap. All fields in struct are aligned to their natural
+ * alignment.
+ */
+struct vmci_doorbell_unlink_msg {
+ struct vmci_datagram hdr;
+ struct vmci_handle handle;
+};
+
+/*
+ * Struct used for generating a notification on a doorbell handle. All
+ * fields in struct are aligned to their natural alignment.
+ */
+struct vmci_doorbell_notify_msg {
+ struct vmci_datagram hdr;
+ struct vmci_handle handle;
+};
+
+/*
+ * This struct is used to contain data for events. Size of this struct is a
+ * multiple of 8 bytes, and all fields are aligned to their natural alignment.
+ */
+struct vmci_event_data {
+ u32 event; /* 4 bytes. */
+ u32 _pad;
+ /* Event payload is put here. */
+};
+
+/*
+ * Define the different VMCI_EVENT payload data types here. All structs must
+ * be a multiple of 8 bytes, and fields must be aligned to their natural
+ * alignment.
+ */
+struct vmci_event_payld_ctx {
+ u32 context_id; /* 4 bytes. */
+ u32 _pad;
+};
+
+struct vmci_event_payld_qp {
+ struct vmci_handle handle; /* queue_pair handle. */
+ u32 peer_id; /* Context id of attaching/detaching VM. */
+ u32 _pad;
+};
+
+/*
+ * We define the following struct to get the size of the maximum event
+ * data the hypervisor may send to the guest. If adding a new event
+ * payload type above, add it to the following struct too (inside the
+ * union).
+ */
+struct vmci_event_data_max {
+ struct vmci_event_data event_data;
+ union {
+ struct vmci_event_payld_ctx context_payload;
+ struct vmci_event_payld_qp qp_payload;
+ } ev_data_payload;
+};
+
+/*
+ * Struct used for VMCI_EVENT_SUBSCRIBE/UNSUBSCRIBE and
+ * VMCI_EVENT_HANDLER messages. Struct size is 32 bytes. All fields
+ * in struct are aligned to their natural alignment.
+ */
+struct vmci_event_msg {
+ struct vmci_datagram hdr;
+
+ /* Has event type and payload. */
+ struct vmci_event_data event_data;
+
+ /* Payload gets put here. */
+};
+
+/* Event with context payload. */
+struct vmci_event_ctx {
+ struct vmci_event_msg msg;
+ struct vmci_event_payld_ctx payload;
+};
+
+/* Event with QP payload. */
+struct vmci_event_qp {
+ struct vmci_event_msg msg;
+ struct vmci_event_payld_qp payload;
+};
+
+/*
+ * Structs used for queue_pair alloc and detach messages. We align fields of
+ * these structs to 64bit boundaries.
+ */
+struct vmci_qp_alloc_msg {
+ struct vmci_datagram hdr;
+ struct vmci_handle handle;
+ u32 peer;
+ u32 flags;
+ u64 produce_size;
+ u64 consume_size;
+ u64 num_ppns;
+
+ /* List of PPNs placed here. */
+};
+
+struct vmci_qp_detach_msg {
+ struct vmci_datagram hdr;
+ struct vmci_handle handle;
+};
+
+/* VMCI Doorbell API. */
+#define VMCI_FLAG_DELAYED_CB 0x01
+
+typedef void (*vmci_callback) (void *client_data);
+
+/*
+ * struct vmci_qp - A vmw_vmci queue pair handle.
+ *
+ * This structure is used as a handle to a queue pair created by
+ * VMCI. It is intentionally left opaque to clients.
+ */
+struct vmci_qp;
+
+/* Callback needed for correctly waiting on events. */
+typedef int (*vmci_datagram_recv_cb) (void *client_data,
+ struct vmci_datagram *msg);
+
+/* VMCI Event API. */
+typedef void (*vmci_event_cb) (u32 sub_id, const struct vmci_event_data *ed,
+ void *client_data);
+
+/*
+ * We use the following inline function to access the payload data
+ * associated with an event data.
+ */
+static inline const void *
+vmci_event_data_const_payload(const struct vmci_event_data *ev_data)
+{
+ return (const char *)ev_data + sizeof(*ev_data);
+}
+
+static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data)
+{
+ return (void *)vmci_event_data_const_payload(ev_data);
+}
+
+/*
+ * Helper to add a given offset to a head or tail pointer. Wraps the
+ * value of the pointer around the max size of the queue.
+ */
+static inline void vmci_qp_add_pointer(atomic64_t *var,
+ size_t add,
+ u64 size)
+{
+ u64 new_val = atomic64_read(var);
+
+ if (new_val >= size - add)
+ new_val -= size;
+
+ new_val += add;
+
+ atomic64_set(var, new_val);
+}
+
+/*
+ * Helper routine to get the Producer Tail from the supplied queue.
+ */
+static inline u64
+vmci_q_header_producer_tail(const struct vmci_queue_header *q_header)
+{
+ struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
+ return atomic64_read(&qh->producer_tail);
+}
+
+/*
+ * Helper routine to get the Consumer Head from the supplied queue.
+ */
+static inline u64
+vmci_q_header_consumer_head(const struct vmci_queue_header *q_header)
+{
+ struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
+ return atomic64_read(&qh->consumer_head);
+}
+
+/*
+ * Helper routine to increment the Producer Tail. Fundamentally,
+ * vmci_qp_add_pointer() is used to manipulate the tail itself.
+ */
+static inline void
+vmci_q_header_add_producer_tail(struct vmci_queue_header *q_header,
+ size_t add,
+ u64 queue_size)
+{
+ vmci_qp_add_pointer(&q_header->producer_tail, add, queue_size);
+}
+
+/*
+ * Helper routine to increment the Consumer Head. Fundamentally,
+ * vmci_qp_add_pointer() is used to manipulate the head itself.
+ */
+static inline void
+vmci_q_header_add_consumer_head(struct vmci_queue_header *q_header,
+ size_t add,
+ u64 queue_size)
+{
+ vmci_qp_add_pointer(&q_header->consumer_head, add, queue_size);
+}
+
+/*
+ * Helper routine for getting the head and the tail pointer for a queue.
+ * Both the VMCIQueues are needed to get both the pointers for one queue.
+ */
+static inline void
+vmci_q_header_get_pointers(const struct vmci_queue_header *produce_q_header,
+ const struct vmci_queue_header *consume_q_header,
+ u64 *producer_tail,
+ u64 *consumer_head)
+{
+ if (producer_tail)
+ *producer_tail = vmci_q_header_producer_tail(produce_q_header);
+
+ if (consumer_head)
+ *consumer_head = vmci_q_header_consumer_head(consume_q_header);
+}
+
+static inline void vmci_q_header_init(struct vmci_queue_header *q_header,
+ const struct vmci_handle handle)
+{
+ q_header->handle = handle;
+ atomic64_set(&q_header->producer_tail, 0);
+ atomic64_set(&q_header->consumer_head, 0);
+}
+
+/*
+ * Finds available free space in a produce queue to enqueue more
+ * data or reports an error if queue pair corruption is detected.
+ */
+static s64
+vmci_q_header_free_space(const struct vmci_queue_header *produce_q_header,
+ const struct vmci_queue_header *consume_q_header,
+ const u64 produce_q_size)
+{
+ u64 tail;
+ u64 head;
+ u64 free_space;
+
+ tail = vmci_q_header_producer_tail(produce_q_header);
+ head = vmci_q_header_consumer_head(consume_q_header);
+
+ if (tail >= produce_q_size || head >= produce_q_size)
+ return VMCI_ERROR_INVALID_SIZE;
+
+ /*
+ * Deduct 1 to avoid tail becoming equal to head which causes
+ * ambiguity. If head and tail are equal it means that the
+ * queue is empty.
+ */
+ if (tail >= head)
+ free_space = produce_q_size - (tail - head) - 1;
+ else
+ free_space = head - tail - 1;
+
+ return free_space;
+}
+
+/*
+ * vmci_q_header_free_space() does all the heavy lifting of
+ * determing the number of free bytes in a Queue. This routine,
+ * then subtracts that size from the full size of the Queue so
+ * the caller knows how many bytes are ready to be dequeued.
+ * Results:
+ * On success, available data size in bytes (up to MAX_INT64).
+ * On failure, appropriate error code.
+ */
+static inline s64
+vmci_q_header_buf_ready(const struct vmci_queue_header *consume_q_header,
+ const struct vmci_queue_header *produce_q_header,
+ const u64 consume_q_size)
+{
+ s64 free_space;
+
+ free_space = vmci_q_header_free_space(consume_q_header,
+ produce_q_header, consume_q_size);
+ if (free_space < VMCI_SUCCESS)
+ return free_space;
+
+ return consume_q_size - free_space - 1;
+}
+
+
+#endif /* _VMW_VMCI_DEF_H_ */
diff --git a/include/linux/vringh.h b/include/linux/vringh.h
new file mode 100644
index 000000000..a3fa537e7
--- /dev/null
+++ b/include/linux/vringh.h
@@ -0,0 +1,258 @@
+/*
+ * Linux host-side vring helpers; for when the kernel needs to access
+ * someone else's vring.
+ *
+ * Copyright IBM Corporation, 2013.
+ * Parts taken from drivers/vhost/vhost.c Copyright 2009 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Written by: Rusty Russell <rusty@rustcorp.com.au>
+ */
+#ifndef _LINUX_VRINGH_H
+#define _LINUX_VRINGH_H
+#include <uapi/linux/virtio_ring.h>
+#include <linux/virtio_byteorder.h>
+#include <linux/uio.h>
+#include <linux/slab.h>
+#include <asm/barrier.h>
+
+/* virtio_ring with information needed for host access. */
+struct vringh {
+ /* Everything is little endian */
+ bool little_endian;
+
+ /* Guest publishes used event idx (note: we always do). */
+ bool event_indices;
+
+ /* Can we get away with weak barriers? */
+ bool weak_barriers;
+
+ /* Last available index we saw (ie. where we're up to). */
+ u16 last_avail_idx;
+
+ /* Last index we used. */
+ u16 last_used_idx;
+
+ /* How many descriptors we've completed since last need_notify(). */
+ u32 completed;
+
+ /* The vring (note: it may contain user pointers!) */
+ struct vring vring;
+
+ /* The function to call to notify the guest about added buffers */
+ void (*notify)(struct vringh *);
+};
+
+/**
+ * struct vringh_config_ops - ops for creating a host vring from a virtio driver
+ * @find_vrhs: find the host vrings and instantiate them
+ * vdev: the virtio_device
+ * nhvrs: the number of host vrings to find
+ * hvrs: on success, includes new host vrings
+ * callbacks: array of driver callbacks, for each host vring
+ * include a NULL entry for vqs that do not need a callback
+ * Returns 0 on success or error status
+ * @del_vrhs: free the host vrings found by find_vrhs().
+ */
+struct virtio_device;
+typedef void vrh_callback_t(struct virtio_device *, struct vringh *);
+struct vringh_config_ops {
+ int (*find_vrhs)(struct virtio_device *vdev, unsigned nhvrs,
+ struct vringh *vrhs[], vrh_callback_t *callbacks[]);
+ void (*del_vrhs)(struct virtio_device *vdev);
+};
+
+/* The memory the vring can access, and what offset to apply. */
+struct vringh_range {
+ u64 start, end_incl;
+ u64 offset;
+};
+
+/**
+ * struct vringh_iov - iovec mangler.
+ *
+ * Mangles iovec in place, and restores it.
+ * Remaining data is iov + i, of used - i elements.
+ */
+struct vringh_iov {
+ struct iovec *iov;
+ size_t consumed; /* Within iov[i] */
+ unsigned i, used, max_num;
+};
+
+/**
+ * struct vringh_iov - kvec mangler.
+ *
+ * Mangles kvec in place, and restores it.
+ * Remaining data is iov + i, of used - i elements.
+ */
+struct vringh_kiov {
+ struct kvec *iov;
+ size_t consumed; /* Within iov[i] */
+ unsigned i, used, max_num;
+};
+
+/* Flag on max_num to indicate we're kmalloced. */
+#define VRINGH_IOV_ALLOCATED 0x8000000
+
+/* Helpers for userspace vrings. */
+int vringh_init_user(struct vringh *vrh, u64 features,
+ unsigned int num, bool weak_barriers,
+ struct vring_desc __user *desc,
+ struct vring_avail __user *avail,
+ struct vring_used __user *used);
+
+static inline void vringh_iov_init(struct vringh_iov *iov,
+ struct iovec *iovec, unsigned num)
+{
+ iov->used = iov->i = 0;
+ iov->consumed = 0;
+ iov->max_num = num;
+ iov->iov = iovec;
+}
+
+static inline void vringh_iov_reset(struct vringh_iov *iov)
+{
+ iov->iov[iov->i].iov_len += iov->consumed;
+ iov->iov[iov->i].iov_base -= iov->consumed;
+ iov->consumed = 0;
+ iov->i = 0;
+}
+
+static inline void vringh_iov_cleanup(struct vringh_iov *iov)
+{
+ if (iov->max_num & VRINGH_IOV_ALLOCATED)
+ kfree(iov->iov);
+ iov->max_num = iov->used = iov->i = iov->consumed = 0;
+ iov->iov = NULL;
+}
+
+/* Convert a descriptor into iovecs. */
+int vringh_getdesc_user(struct vringh *vrh,
+ struct vringh_iov *riov,
+ struct vringh_iov *wiov,
+ bool (*getrange)(struct vringh *vrh,
+ u64 addr, struct vringh_range *r),
+ u16 *head);
+
+/* Copy bytes from readable vsg, consuming it (and incrementing wiov->i). */
+ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len);
+
+/* Copy bytes into writable vsg, consuming it (and incrementing wiov->i). */
+ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
+ const void *src, size_t len);
+
+/* Mark a descriptor as used. */
+int vringh_complete_user(struct vringh *vrh, u16 head, u32 len);
+int vringh_complete_multi_user(struct vringh *vrh,
+ const struct vring_used_elem used[],
+ unsigned num_used);
+
+/* Pretend we've never seen descriptor (for easy error handling). */
+void vringh_abandon_user(struct vringh *vrh, unsigned int num);
+
+/* Do we need to fire the eventfd to notify the other side? */
+int vringh_need_notify_user(struct vringh *vrh);
+
+bool vringh_notify_enable_user(struct vringh *vrh);
+void vringh_notify_disable_user(struct vringh *vrh);
+
+/* Helpers for kernelspace vrings. */
+int vringh_init_kern(struct vringh *vrh, u64 features,
+ unsigned int num, bool weak_barriers,
+ struct vring_desc *desc,
+ struct vring_avail *avail,
+ struct vring_used *used);
+
+static inline void vringh_kiov_init(struct vringh_kiov *kiov,
+ struct kvec *kvec, unsigned num)
+{
+ kiov->used = kiov->i = 0;
+ kiov->consumed = 0;
+ kiov->max_num = num;
+ kiov->iov = kvec;
+}
+
+static inline void vringh_kiov_reset(struct vringh_kiov *kiov)
+{
+ kiov->iov[kiov->i].iov_len += kiov->consumed;
+ kiov->iov[kiov->i].iov_base -= kiov->consumed;
+ kiov->consumed = 0;
+ kiov->i = 0;
+}
+
+static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov)
+{
+ if (kiov->max_num & VRINGH_IOV_ALLOCATED)
+ kfree(kiov->iov);
+ kiov->max_num = kiov->used = kiov->i = kiov->consumed = 0;
+ kiov->iov = NULL;
+}
+
+int vringh_getdesc_kern(struct vringh *vrh,
+ struct vringh_kiov *riov,
+ struct vringh_kiov *wiov,
+ u16 *head,
+ gfp_t gfp);
+
+ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len);
+ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
+ const void *src, size_t len);
+void vringh_abandon_kern(struct vringh *vrh, unsigned int num);
+int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len);
+
+bool vringh_notify_enable_kern(struct vringh *vrh);
+void vringh_notify_disable_kern(struct vringh *vrh);
+
+int vringh_need_notify_kern(struct vringh *vrh);
+
+/* Notify the guest about buffers added to the used ring */
+static inline void vringh_notify(struct vringh *vrh)
+{
+ if (vrh->notify)
+ vrh->notify(vrh);
+}
+
+static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val)
+{
+ return __virtio16_to_cpu(vrh->little_endian, val);
+}
+
+static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val)
+{
+ return __cpu_to_virtio16(vrh->little_endian, val);
+}
+
+static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val)
+{
+ return __virtio32_to_cpu(vrh->little_endian, val);
+}
+
+static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val)
+{
+ return __cpu_to_virtio32(vrh->little_endian, val);
+}
+
+static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val)
+{
+ return __virtio64_to_cpu(vrh->little_endian, val);
+}
+
+static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
+{
+ return __cpu_to_virtio64(vrh->little_endian, val);
+}
+#endif /* _LINUX_VRINGH_H */
diff --git a/include/linux/vt.h b/include/linux/vt.h
new file mode 100644
index 000000000..b186e0435
--- /dev/null
+++ b/include/linux/vt.h
@@ -0,0 +1,27 @@
+#ifndef _LINUX_VT_H
+#define _LINUX_VT_H
+
+#include <uapi/linux/vt.h>
+
+
+/* Virtual Terminal events. */
+#define VT_ALLOCATE 0x0001 /* Console got allocated */
+#define VT_DEALLOCATE 0x0002 /* Console will be deallocated */
+#define VT_WRITE 0x0003 /* A char got output */
+#define VT_UPDATE 0x0004 /* A bigger update occurred */
+#define VT_PREWRITE 0x0005 /* A char is about to be written to the console */
+
+#ifdef CONFIG_VT_CONSOLE
+
+extern int vt_kmsg_redirect(int new);
+
+#else
+
+static inline int vt_kmsg_redirect(int new)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* _LINUX_VT_H */
diff --git a/include/linux/vt_buffer.h b/include/linux/vt_buffer.h
new file mode 100644
index 000000000..f38c10ba3
--- /dev/null
+++ b/include/linux/vt_buffer.h
@@ -0,0 +1,59 @@
+/*
+ * include/linux/vt_buffer.h -- Access to VT screen buffer
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ *
+ * This is a set of macros and functions which are used in the
+ * console driver and related code to access the screen buffer.
+ * In most cases the console works with simple in-memory buffer,
+ * but when handling hardware text mode consoles, we store
+ * the foreground console directly in video memory.
+ */
+
+#ifndef _LINUX_VT_BUFFER_H_
+#define _LINUX_VT_BUFFER_H_
+
+
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_MDA_CONSOLE)
+#include <asm/vga.h>
+#endif
+
+#ifndef VT_BUF_HAVE_RW
+#define scr_writew(val, addr) (*(addr) = (val))
+#define scr_readw(addr) (*(addr))
+#endif
+
+#ifndef VT_BUF_HAVE_MEMSETW
+static inline void scr_memsetw(u16 *s, u16 c, unsigned int count)
+{
+ count /= 2;
+ while (count--)
+ scr_writew(c, s++);
+}
+#endif
+
+#ifndef VT_BUF_HAVE_MEMCPYW
+static inline void scr_memcpyw(u16 *d, const u16 *s, unsigned int count)
+{
+ count /= 2;
+ while (count--)
+ scr_writew(scr_readw(s++), d++);
+}
+#endif
+
+#ifndef VT_BUF_HAVE_MEMMOVEW
+static inline void scr_memmovew(u16 *d, const u16 *s, unsigned int count)
+{
+ if (d < s)
+ scr_memcpyw(d, s, count);
+ else {
+ count /= 2;
+ d += count;
+ s += count;
+ while (count--)
+ scr_writew(scr_readw(--s), --d);
+ }
+}
+#endif
+
+#endif
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
new file mode 100644
index 000000000..8d7634247
--- /dev/null
+++ b/include/linux/vt_kern.h
@@ -0,0 +1,196 @@
+#ifndef _VT_KERN_H
+#define _VT_KERN_H
+
+/*
+ * this really is an extension of the vc_cons structure in console.c, but
+ * with information needed by the vt package
+ */
+
+#include <linux/vt.h>
+#include <linux/kd.h>
+#include <linux/tty.h>
+#include <linux/mutex.h>
+#include <linux/console_struct.h>
+#include <linux/mm.h>
+#include <linux/consolemap.h>
+#include <linux/notifier.h>
+
+/*
+ * Presently, a lot of graphics programs do not restore the contents of
+ * the higher font pages. Defining this flag will avoid use of them, but
+ * will lose support for PIO_FONTRESET. Note that many font operations are
+ * not likely to work with these programs anyway; they need to be
+ * fixed. The linux/Documentation directory includes a code snippet
+ * to save and restore the text font.
+ */
+#ifdef CONFIG_VGA_CONSOLE
+#define BROKEN_GRAPHICS_PROGRAMS 1
+#endif
+
+extern void kd_mksound(unsigned int hz, unsigned int ticks);
+extern int kbd_rate(struct kbd_repeat *rep);
+extern int fg_console, last_console, want_console;
+
+/* console.c */
+
+int vc_allocate(unsigned int console);
+int vc_cons_allocated(unsigned int console);
+int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines);
+struct vc_data *vc_deallocate(unsigned int console);
+void reset_palette(struct vc_data *vc);
+void do_blank_screen(int entering_gfx);
+void do_unblank_screen(int leaving_gfx);
+void unblank_screen(void);
+void poke_blanked_console(void);
+int con_font_op(struct vc_data *vc, struct console_font_op *op);
+int con_set_cmap(unsigned char __user *cmap);
+int con_get_cmap(unsigned char __user *cmap);
+void scrollback(struct vc_data *vc, int lines);
+void scrollfront(struct vc_data *vc, int lines);
+void clear_buffer_attributes(struct vc_data *vc);
+void update_region(struct vc_data *vc, unsigned long start, int count);
+void redraw_screen(struct vc_data *vc, int is_switch);
+#define update_screen(x) redraw_screen(x, 0)
+#define switch_screen(x) redraw_screen(x, 1)
+
+struct tty_struct;
+int tioclinux(struct tty_struct *tty, unsigned long arg);
+
+#ifdef CONFIG_CONSOLE_TRANSLATIONS
+/* consolemap.c */
+
+struct unimapinit;
+struct unipair;
+
+int con_set_trans_old(unsigned char __user * table);
+int con_get_trans_old(unsigned char __user * table);
+int con_set_trans_new(unsigned short __user * table);
+int con_get_trans_new(unsigned short __user * table);
+int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui);
+int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list);
+int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list);
+int con_set_default_unimap(struct vc_data *vc);
+void con_free_unimap(struct vc_data *vc);
+int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc);
+
+#define vc_translate(vc, c) ((vc)->vc_translate[(c) | \
+ ((vc)->vc_toggle_meta ? 0x80 : 0)])
+#else
+static inline int con_set_trans_old(unsigned char __user *table)
+{
+ return 0;
+}
+static inline int con_get_trans_old(unsigned char __user *table)
+{
+ return -EINVAL;
+}
+static inline int con_set_trans_new(unsigned short __user *table)
+{
+ return 0;
+}
+static inline int con_get_trans_new(unsigned short __user *table)
+{
+ return -EINVAL;
+}
+static inline int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui)
+{
+ return 0;
+}
+static inline
+int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
+{
+ return 0;
+}
+static inline
+int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct,
+ struct unipair __user *list)
+{
+ return -EINVAL;
+}
+static inline int con_set_default_unimap(struct vc_data *vc)
+{
+ return 0;
+}
+static inline void con_free_unimap(struct vc_data *vc)
+{
+}
+static inline void con_protect_unimap(struct vc_data *vc, int rdonly)
+{
+}
+static inline
+int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc)
+{
+ return 0;
+}
+
+#define vc_translate(vc, c) (c)
+#endif
+
+/* vt.c */
+void vt_event_post(unsigned int event, unsigned int old, unsigned int new);
+int vt_waitactive(int n);
+void change_console(struct vc_data *new_vc);
+void reset_vc(struct vc_data *vc);
+extern int do_unbind_con_driver(const struct consw *csw, int first, int last,
+ int deflt);
+int vty_init(const struct file_operations *console_fops);
+
+static inline bool vt_force_oops_output(struct vc_data *vc)
+{
+ if (oops_in_progress && vc->vc_panic_force_write && panic_timeout >= 0)
+ return true;
+ return false;
+}
+
+extern char vt_dont_switch;
+extern int default_utf8;
+extern int global_cursor_default;
+
+struct vt_spawn_console {
+ spinlock_t lock;
+ struct pid *pid;
+ int sig;
+};
+extern struct vt_spawn_console vt_spawn_con;
+
+extern int vt_move_to_console(unsigned int vt, int alloc);
+
+/* Interfaces for VC notification of character events (for accessibility etc) */
+
+struct vt_notifier_param {
+ struct vc_data *vc; /* VC on which the update happened */
+ unsigned int c; /* Printed char */
+};
+
+extern int register_vt_notifier(struct notifier_block *nb);
+extern int unregister_vt_notifier(struct notifier_block *nb);
+
+extern void hide_boot_cursor(bool hide);
+
+/* keyboard provided interfaces */
+extern int vt_do_diacrit(unsigned int cmd, void __user *up, int eperm);
+extern int vt_do_kdskbmode(int console, unsigned int arg);
+extern int vt_do_kdskbmeta(int console, unsigned int arg);
+extern int vt_do_kbkeycode_ioctl(int cmd, struct kbkeycode __user *user_kbkc,
+ int perm);
+extern int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe,
+ int perm, int console);
+extern int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb,
+ int perm);
+extern int vt_do_kdskled(int console, int cmd, unsigned long arg, int perm);
+extern int vt_do_kdgkbmode(int console);
+extern int vt_do_kdgkbmeta(int console);
+extern void vt_reset_unicode(int console);
+extern int vt_get_shift_state(void);
+extern void vt_reset_keyboard(int console);
+extern int vt_get_leds(int console, int flag);
+extern int vt_get_kbd_mode_bit(int console, int bit);
+extern void vt_set_kbd_mode_bit(int console, int bit);
+extern void vt_clr_kbd_mode_bit(int console, int bit);
+extern void vt_set_led_state(int console, int leds);
+extern void vt_set_led_state(int console, int leds);
+extern void vt_kbd_con_start(int console);
+extern void vt_kbd_con_stop(int console);
+
+
+#endif /* _VT_KERN_H */
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
new file mode 100644
index 000000000..c5165fd25
--- /dev/null
+++ b/include/linux/vtime.h
@@ -0,0 +1,125 @@
+#ifndef _LINUX_KERNEL_VTIME_H
+#define _LINUX_KERNEL_VTIME_H
+
+#include <linux/context_tracking_state.h>
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+#include <asm/vtime.h>
+#endif
+
+
+struct task_struct;
+
+/*
+ * vtime_accounting_enabled() definitions/declarations
+ */
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+static inline bool vtime_accounting_enabled(void) { return true; }
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+static inline bool vtime_accounting_enabled(void)
+{
+ if (context_tracking_is_enabled()) {
+ if (context_tracking_cpu_is_enabled())
+ return true;
+ }
+
+ return false;
+}
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
+
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+static inline bool vtime_accounting_enabled(void) { return false; }
+#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
+
+
+/*
+ * Common vtime APIs
+ */
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+
+#ifdef __ARCH_HAS_VTIME_TASK_SWITCH
+extern void vtime_task_switch(struct task_struct *prev);
+#else
+extern void vtime_common_task_switch(struct task_struct *prev);
+static inline void vtime_task_switch(struct task_struct *prev)
+{
+ if (vtime_accounting_enabled())
+ vtime_common_task_switch(prev);
+}
+#endif /* __ARCH_HAS_VTIME_TASK_SWITCH */
+
+extern void vtime_account_system(struct task_struct *tsk);
+extern void vtime_account_idle(struct task_struct *tsk);
+extern void vtime_account_user(struct task_struct *tsk);
+
+#ifdef __ARCH_HAS_VTIME_ACCOUNT
+extern void vtime_account_irq_enter(struct task_struct *tsk);
+#else
+extern void vtime_common_account_irq_enter(struct task_struct *tsk);
+static inline void vtime_account_irq_enter(struct task_struct *tsk)
+{
+ if (vtime_accounting_enabled())
+ vtime_common_account_irq_enter(tsk);
+}
+#endif /* __ARCH_HAS_VTIME_ACCOUNT */
+
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
+
+static inline void vtime_task_switch(struct task_struct *prev) { }
+static inline void vtime_account_system(struct task_struct *tsk) { }
+static inline void vtime_account_user(struct task_struct *tsk) { }
+static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
+#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+extern void arch_vtime_task_switch(struct task_struct *tsk);
+extern void vtime_gen_account_irq_exit(struct task_struct *tsk);
+
+static inline void vtime_account_irq_exit(struct task_struct *tsk)
+{
+ if (vtime_accounting_enabled())
+ vtime_gen_account_irq_exit(tsk);
+}
+
+extern void vtime_user_enter(struct task_struct *tsk);
+
+static inline void vtime_user_exit(struct task_struct *tsk)
+{
+ vtime_account_user(tsk);
+}
+extern void vtime_guest_enter(struct task_struct *tsk);
+extern void vtime_guest_exit(struct task_struct *tsk);
+extern void vtime_init_idle(struct task_struct *tsk, int cpu);
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
+static inline void vtime_account_irq_exit(struct task_struct *tsk)
+{
+ /* On hard|softirq exit we always account to hard|softirq cputime */
+ vtime_account_system(tsk);
+}
+static inline void vtime_user_enter(struct task_struct *tsk) { }
+static inline void vtime_user_exit(struct task_struct *tsk) { }
+static inline void vtime_guest_enter(struct task_struct *tsk) { }
+static inline void vtime_guest_exit(struct task_struct *tsk) { }
+static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
+#endif
+
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+extern void irqtime_account_irq(struct task_struct *tsk);
+#else
+static inline void irqtime_account_irq(struct task_struct *tsk) { }
+#endif
+
+static inline void account_irq_enter_time(struct task_struct *tsk)
+{
+ vtime_account_irq_enter(tsk);
+ irqtime_account_irq(tsk);
+}
+
+static inline void account_irq_exit_time(struct task_struct *tsk)
+{
+ vtime_account_irq_exit(tsk);
+ irqtime_account_irq(tsk);
+}
+
+#endif /* _LINUX_KERNEL_VTIME_H */
diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h
new file mode 100644
index 000000000..d58594a32
--- /dev/null
+++ b/include/linux/w1-gpio.h
@@ -0,0 +1,26 @@
+/*
+ * w1-gpio interface to platform code
+ *
+ * Copyright (C) 2007 Ville Syrjala <syrjala@sci.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+#ifndef _LINUX_W1_GPIO_H
+#define _LINUX_W1_GPIO_H
+
+/**
+ * struct w1_gpio_platform_data - Platform-dependent data for w1-gpio
+ * @pin: GPIO pin to use
+ * @is_open_drain: GPIO pin is configured as open drain
+ */
+struct w1_gpio_platform_data {
+ unsigned int pin;
+ unsigned int is_open_drain:1;
+ void (*enable_external_pullup)(int enable);
+ unsigned int ext_pullup_enable_pin;
+ unsigned int pullup_duration;
+};
+
+#endif /* _LINUX_W1_GPIO_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
new file mode 100644
index 000000000..2db833498
--- /dev/null
+++ b/include/linux/wait.h
@@ -0,0 +1,1155 @@
+#ifndef _LINUX_WAIT_H
+#define _LINUX_WAIT_H
+/*
+ * Linux wait queue related types and methods
+ */
+#include <linux/list.h>
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <asm/current.h>
+#include <uapi/linux/wait.h>
+
+typedef struct __wait_queue wait_queue_t;
+typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
+int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
+
+/* __wait_queue::flags */
+#define WQ_FLAG_EXCLUSIVE 0x01
+#define WQ_FLAG_WOKEN 0x02
+
+struct __wait_queue {
+ unsigned int flags;
+ void *private;
+ wait_queue_func_t func;
+ struct list_head task_list;
+};
+
+struct wait_bit_key {
+ void *flags;
+ int bit_nr;
+#define WAIT_ATOMIC_T_BIT_NR -1
+ unsigned long timeout;
+};
+
+struct wait_bit_queue {
+ struct wait_bit_key key;
+ wait_queue_t wait;
+};
+
+struct __wait_queue_head {
+ spinlock_t lock;
+ struct list_head task_list;
+};
+typedef struct __wait_queue_head wait_queue_head_t;
+
+struct task_struct;
+
+/*
+ * Macros for declaration and initialisaton of the datatypes
+ */
+
+#define __WAITQUEUE_INITIALIZER(name, tsk) { \
+ .private = tsk, \
+ .func = default_wake_function, \
+ .task_list = { NULL, NULL } }
+
+#define DECLARE_WAITQUEUE(name, tsk) \
+ wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
+
+#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
+ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
+ .task_list = { &(name).task_list, &(name).task_list } }
+
+#define DECLARE_WAIT_QUEUE_HEAD(name) \
+ wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
+
+#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
+ { .flags = word, .bit_nr = bit, }
+
+#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
+ { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
+
+extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
+
+#define init_waitqueue_head(q) \
+ do { \
+ static struct lock_class_key __key; \
+ \
+ __init_waitqueue_head((q), #q, &__key); \
+ } while (0)
+
+#ifdef CONFIG_LOCKDEP
+# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
+ ({ init_waitqueue_head(&name); name; })
+# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
+ wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
+#else
+# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
+#endif
+
+static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
+{
+ q->flags = 0;
+ q->private = p;
+ q->func = default_wake_function;
+}
+
+static inline void
+init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
+{
+ q->flags = 0;
+ q->private = NULL;
+ q->func = func;
+}
+
+static inline int waitqueue_active(wait_queue_head_t *q)
+{
+ return !list_empty(&q->task_list);
+}
+
+extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
+extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
+extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
+
+static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
+{
+ list_add(&new->task_list, &head->task_list);
+}
+
+/*
+ * Used for wake-one threads:
+ */
+static inline void
+__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
+{
+ wait->flags |= WQ_FLAG_EXCLUSIVE;
+ __add_wait_queue(q, wait);
+}
+
+static inline void __add_wait_queue_tail(wait_queue_head_t *head,
+ wait_queue_t *new)
+{
+ list_add_tail(&new->task_list, &head->task_list);
+}
+
+static inline void
+__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
+{
+ wait->flags |= WQ_FLAG_EXCLUSIVE;
+ __add_wait_queue_tail(q, wait);
+}
+
+static inline void
+__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
+{
+ list_del(&old->task_list);
+}
+
+typedef int wait_bit_action_f(struct wait_bit_key *);
+void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
+void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
+void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
+void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
+void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
+void __wake_up_bit(wait_queue_head_t *, void *, int);
+int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
+int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
+void wake_up_bit(void *, int);
+void wake_up_atomic_t(atomic_t *);
+int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
+int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
+int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
+int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
+wait_queue_head_t *bit_waitqueue(void *, int);
+
+#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
+#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
+#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
+#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
+#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
+
+#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
+#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
+#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
+#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
+
+/*
+ * Wakeup macros to be used to report events to the targets.
+ */
+#define wake_up_poll(x, m) \
+ __wake_up(x, TASK_NORMAL, 1, (void *) (m))
+#define wake_up_locked_poll(x, m) \
+ __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
+#define wake_up_interruptible_poll(x, m) \
+ __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
+#define wake_up_interruptible_sync_poll(x, m) \
+ __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
+
+#define ___wait_cond_timeout(condition) \
+({ \
+ bool __cond = (condition); \
+ if (__cond && !__ret) \
+ __ret = 1; \
+ __cond || !__ret; \
+})
+
+#define ___wait_is_interruptible(state) \
+ (!__builtin_constant_p(state) || \
+ state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
+
+/*
+ * The below macro ___wait_event() has an explicit shadow of the __ret
+ * variable when used from the wait_event_*() macros.
+ *
+ * This is so that both can use the ___wait_cond_timeout() construct
+ * to wrap the condition.
+ *
+ * The type inconsistency of the wait_event_*() __ret variable is also
+ * on purpose; we use long where we can return timeout values and int
+ * otherwise.
+ */
+
+#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
+({ \
+ __label__ __out; \
+ wait_queue_t __wait; \
+ long __ret = ret; /* explicit shadow */ \
+ \
+ INIT_LIST_HEAD(&__wait.task_list); \
+ if (exclusive) \
+ __wait.flags = WQ_FLAG_EXCLUSIVE; \
+ else \
+ __wait.flags = 0; \
+ \
+ for (;;) { \
+ long __int = prepare_to_wait_event(&wq, &__wait, state);\
+ \
+ if (condition) \
+ break; \
+ \
+ if (___wait_is_interruptible(state) && __int) { \
+ __ret = __int; \
+ if (exclusive) { \
+ abort_exclusive_wait(&wq, &__wait, \
+ state, NULL); \
+ goto __out; \
+ } \
+ break; \
+ } \
+ \
+ cmd; \
+ } \
+ finish_wait(&wq, &__wait); \
+__out: __ret; \
+})
+
+#define __wait_event(wq, condition) \
+ (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
+ schedule())
+
+/**
+ * wait_event - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ */
+#define wait_event(wq, condition) \
+do { \
+ might_sleep(); \
+ if (condition) \
+ break; \
+ __wait_event(wq, condition); \
+} while (0)
+
+#define __io_wait_event(wq, condition) \
+ (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
+ io_schedule())
+
+/*
+ * io_wait_event() -- like wait_event() but with io_schedule()
+ */
+#define io_wait_event(wq, condition) \
+do { \
+ might_sleep(); \
+ if (condition) \
+ break; \
+ __io_wait_event(wq, condition); \
+} while (0)
+
+#define __wait_event_freezable(wq, condition) \
+ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
+ schedule(); try_to_freeze())
+
+/**
+ * wait_event - sleep (or freeze) until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
+ * to system load) until the @condition evaluates to true. The
+ * @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ */
+#define wait_event_freezable(wq, condition) \
+({ \
+ int __ret = 0; \
+ might_sleep(); \
+ if (!(condition)) \
+ __ret = __wait_event_freezable(wq, condition); \
+ __ret; \
+})
+
+#define __wait_event_timeout(wq, condition, timeout) \
+ ___wait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_UNINTERRUPTIBLE, 0, timeout, \
+ __ret = schedule_timeout(__ret))
+
+/**
+ * wait_event_timeout - sleep until a condition gets true or a timeout elapses
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * Returns:
+ * 0 if the @condition evaluated to %false after the @timeout elapsed,
+ * 1 if the @condition evaluated to %true after the @timeout elapsed,
+ * or the remaining jiffies (at least 1) if the @condition evaluated
+ * to %true before the @timeout elapsed.
+ */
+#define wait_event_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ might_sleep(); \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_timeout(wq, condition, timeout); \
+ __ret; \
+})
+
+#define __wait_event_freezable_timeout(wq, condition, timeout) \
+ ___wait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_INTERRUPTIBLE, 0, timeout, \
+ __ret = schedule_timeout(__ret); try_to_freeze())
+
+/*
+ * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
+ * increasing load and is freezable.
+ */
+#define wait_event_freezable_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ might_sleep(); \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
+ __ret; \
+})
+
+#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
+ (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
+ cmd1; schedule(); cmd2)
+
+/**
+ * wait_event_cmd - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @cmd1: the command will be executed before sleep
+ * @cmd2: the command will be executed after sleep
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ */
+#define wait_event_cmd(wq, condition, cmd1, cmd2) \
+do { \
+ if (condition) \
+ break; \
+ __wait_event_cmd(wq, condition, cmd1, cmd2); \
+} while (0)
+
+#define __wait_event_interruptible(wq, condition) \
+ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
+ schedule())
+
+/**
+ * wait_event_interruptible - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a
+ * signal and 0 if @condition evaluated to true.
+ */
+#define wait_event_interruptible(wq, condition) \
+({ \
+ int __ret = 0; \
+ might_sleep(); \
+ if (!(condition)) \
+ __ret = __wait_event_interruptible(wq, condition); \
+ __ret; \
+})
+
+#define __wait_event_interruptible_timeout(wq, condition, timeout) \
+ ___wait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_INTERRUPTIBLE, 0, timeout, \
+ __ret = schedule_timeout(__ret))
+
+/**
+ * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * Returns:
+ * 0 if the @condition evaluated to %false after the @timeout elapsed,
+ * 1 if the @condition evaluated to %true after the @timeout elapsed,
+ * the remaining jiffies (at least 1) if the @condition evaluated
+ * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
+ * interrupted by a signal.
+ */
+#define wait_event_interruptible_timeout(wq, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ might_sleep(); \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_interruptible_timeout(wq, \
+ condition, timeout); \
+ __ret; \
+})
+
+#define __wait_event_hrtimeout(wq, condition, timeout, state) \
+({ \
+ int __ret = 0; \
+ struct hrtimer_sleeper __t; \
+ \
+ hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
+ HRTIMER_MODE_REL); \
+ hrtimer_init_sleeper(&__t, current); \
+ if ((timeout).tv64 != KTIME_MAX) \
+ hrtimer_start_range_ns(&__t.timer, timeout, \
+ current->timer_slack_ns, \
+ HRTIMER_MODE_REL); \
+ \
+ __ret = ___wait_event(wq, condition, state, 0, 0, \
+ if (!__t.task) { \
+ __ret = -ETIME; \
+ break; \
+ } \
+ schedule()); \
+ \
+ hrtimer_cancel(&__t.timer); \
+ destroy_hrtimer_on_stack(&__t.timer); \
+ __ret; \
+})
+
+/**
+ * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout, as a ktime_t
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function returns 0 if @condition became true, or -ETIME if the timeout
+ * elapsed.
+ */
+#define wait_event_hrtimeout(wq, condition, timeout) \
+({ \
+ int __ret = 0; \
+ might_sleep(); \
+ if (!(condition)) \
+ __ret = __wait_event_hrtimeout(wq, condition, timeout, \
+ TASK_UNINTERRUPTIBLE); \
+ __ret; \
+})
+
+/**
+ * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout, as a ktime_t
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function returns 0 if @condition became true, -ERESTARTSYS if it was
+ * interrupted by a signal, or -ETIME if the timeout elapsed.
+ */
+#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
+({ \
+ long __ret = 0; \
+ might_sleep(); \
+ if (!(condition)) \
+ __ret = __wait_event_hrtimeout(wq, condition, timeout, \
+ TASK_INTERRUPTIBLE); \
+ __ret; \
+})
+
+#define __wait_event_interruptible_exclusive(wq, condition) \
+ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
+ schedule())
+
+#define wait_event_interruptible_exclusive(wq, condition) \
+({ \
+ int __ret = 0; \
+ might_sleep(); \
+ if (!(condition)) \
+ __ret = __wait_event_interruptible_exclusive(wq, condition);\
+ __ret; \
+})
+
+
+#define __wait_event_freezable_exclusive(wq, condition) \
+ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
+ schedule(); try_to_freeze())
+
+#define wait_event_freezable_exclusive(wq, condition) \
+({ \
+ int __ret = 0; \
+ might_sleep(); \
+ if (!(condition)) \
+ __ret = __wait_event_freezable_exclusive(wq, condition);\
+ __ret; \
+})
+
+
+#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
+({ \
+ int __ret = 0; \
+ DEFINE_WAIT(__wait); \
+ if (exclusive) \
+ __wait.flags |= WQ_FLAG_EXCLUSIVE; \
+ do { \
+ if (likely(list_empty(&__wait.task_list))) \
+ __add_wait_queue_tail(&(wq), &__wait); \
+ set_current_state(TASK_INTERRUPTIBLE); \
+ if (signal_pending(current)) { \
+ __ret = -ERESTARTSYS; \
+ break; \
+ } \
+ if (irq) \
+ spin_unlock_irq(&(wq).lock); \
+ else \
+ spin_unlock(&(wq).lock); \
+ schedule(); \
+ if (irq) \
+ spin_lock_irq(&(wq).lock); \
+ else \
+ spin_lock(&(wq).lock); \
+ } while (!(condition)); \
+ __remove_wait_queue(&(wq), &__wait); \
+ __set_current_state(TASK_RUNNING); \
+ __ret; \
+})
+
+
+/**
+ * wait_event_interruptible_locked - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * It must be called with wq.lock being held. This spinlock is
+ * unlocked while sleeping but @condition testing is done while lock
+ * is held and when this macro exits the lock is held.
+ *
+ * The lock is locked/unlocked using spin_lock()/spin_unlock()
+ * functions which must match the way they are locked/unlocked outside
+ * of this macro.
+ *
+ * wake_up_locked() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a
+ * signal and 0 if @condition evaluated to true.
+ */
+#define wait_event_interruptible_locked(wq, condition) \
+ ((condition) \
+ ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
+
+/**
+ * wait_event_interruptible_locked_irq - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * It must be called with wq.lock being held. This spinlock is
+ * unlocked while sleeping but @condition testing is done while lock
+ * is held and when this macro exits the lock is held.
+ *
+ * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
+ * functions which must match the way they are locked/unlocked outside
+ * of this macro.
+ *
+ * wake_up_locked() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a
+ * signal and 0 if @condition evaluated to true.
+ */
+#define wait_event_interruptible_locked_irq(wq, condition) \
+ ((condition) \
+ ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
+
+/**
+ * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * It must be called with wq.lock being held. This spinlock is
+ * unlocked while sleeping but @condition testing is done while lock
+ * is held and when this macro exits the lock is held.
+ *
+ * The lock is locked/unlocked using spin_lock()/spin_unlock()
+ * functions which must match the way they are locked/unlocked outside
+ * of this macro.
+ *
+ * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
+ * set thus when other process waits process on the list if this
+ * process is awaken further processes are not considered.
+ *
+ * wake_up_locked() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a
+ * signal and 0 if @condition evaluated to true.
+ */
+#define wait_event_interruptible_exclusive_locked(wq, condition) \
+ ((condition) \
+ ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
+
+/**
+ * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * It must be called with wq.lock being held. This spinlock is
+ * unlocked while sleeping but @condition testing is done while lock
+ * is held and when this macro exits the lock is held.
+ *
+ * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
+ * functions which must match the way they are locked/unlocked outside
+ * of this macro.
+ *
+ * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
+ * set thus when other process waits process on the list if this
+ * process is awaken further processes are not considered.
+ *
+ * wake_up_locked() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a
+ * signal and 0 if @condition evaluated to true.
+ */
+#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
+ ((condition) \
+ ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
+
+
+#define __wait_event_killable(wq, condition) \
+ ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
+
+/**
+ * wait_event_killable - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_KILLABLE) until the
+ * @condition evaluates to true or a signal is received.
+ * The @condition is checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function will return -ERESTARTSYS if it was interrupted by a
+ * signal and 0 if @condition evaluated to true.
+ */
+#define wait_event_killable(wq, condition) \
+({ \
+ int __ret = 0; \
+ might_sleep(); \
+ if (!(condition)) \
+ __ret = __wait_event_killable(wq, condition); \
+ __ret; \
+})
+
+
+#define __wait_event_lock_irq(wq, condition, lock, cmd) \
+ (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
+ spin_unlock_irq(&lock); \
+ cmd; \
+ schedule(); \
+ spin_lock_irq(&lock))
+
+/**
+ * wait_event_lock_irq_cmd - sleep until a condition gets true. The
+ * condition is checked under the lock. This
+ * is expected to be called with the lock
+ * taken.
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @lock: a locked spinlock_t, which will be released before cmd
+ * and schedule() and reacquired afterwards.
+ * @cmd: a command which is invoked outside the critical section before
+ * sleep
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * This is supposed to be called while holding the lock. The lock is
+ * dropped before invoking the cmd and going to sleep and is reacquired
+ * afterwards.
+ */
+#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
+do { \
+ if (condition) \
+ break; \
+ __wait_event_lock_irq(wq, condition, lock, cmd); \
+} while (0)
+
+/**
+ * wait_event_lock_irq - sleep until a condition gets true. The
+ * condition is checked under the lock. This
+ * is expected to be called with the lock
+ * taken.
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @lock: a locked spinlock_t, which will be released before schedule()
+ * and reacquired afterwards.
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * This is supposed to be called while holding the lock. The lock is
+ * dropped before going to sleep and is reacquired afterwards.
+ */
+#define wait_event_lock_irq(wq, condition, lock) \
+do { \
+ if (condition) \
+ break; \
+ __wait_event_lock_irq(wq, condition, lock, ); \
+} while (0)
+
+
+#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
+ ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
+ spin_unlock_irq(&lock); \
+ cmd; \
+ schedule(); \
+ spin_lock_irq(&lock))
+
+/**
+ * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
+ * The condition is checked under the lock. This is expected to
+ * be called with the lock taken.
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @lock: a locked spinlock_t, which will be released before cmd and
+ * schedule() and reacquired afterwards.
+ * @cmd: a command which is invoked outside the critical section before
+ * sleep
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or a signal is received. The @condition is
+ * checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * This is supposed to be called while holding the lock. The lock is
+ * dropped before invoking the cmd and going to sleep and is reacquired
+ * afterwards.
+ *
+ * The macro will return -ERESTARTSYS if it was interrupted by a signal
+ * and 0 if @condition evaluated to true.
+ */
+#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
+({ \
+ int __ret = 0; \
+ if (!(condition)) \
+ __ret = __wait_event_interruptible_lock_irq(wq, \
+ condition, lock, cmd); \
+ __ret; \
+})
+
+/**
+ * wait_event_interruptible_lock_irq - sleep until a condition gets true.
+ * The condition is checked under the lock. This is expected
+ * to be called with the lock taken.
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @lock: a locked spinlock_t, which will be released before schedule()
+ * and reacquired afterwards.
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or signal is received. The @condition is
+ * checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * This is supposed to be called while holding the lock. The lock is
+ * dropped before going to sleep and is reacquired afterwards.
+ *
+ * The macro will return -ERESTARTSYS if it was interrupted by a signal
+ * and 0 if @condition evaluated to true.
+ */
+#define wait_event_interruptible_lock_irq(wq, condition, lock) \
+({ \
+ int __ret = 0; \
+ if (!(condition)) \
+ __ret = __wait_event_interruptible_lock_irq(wq, \
+ condition, lock,); \
+ __ret; \
+})
+
+#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
+ lock, timeout) \
+ ___wait_event(wq, ___wait_cond_timeout(condition), \
+ TASK_INTERRUPTIBLE, 0, timeout, \
+ spin_unlock_irq(&lock); \
+ __ret = schedule_timeout(__ret); \
+ spin_lock_irq(&lock));
+
+/**
+ * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
+ * true or a timeout elapses. The condition is checked under
+ * the lock. This is expected to be called with the lock taken.
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @lock: a locked spinlock_t, which will be released before schedule()
+ * and reacquired afterwards.
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or signal is received. The @condition is
+ * checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * This is supposed to be called while holding the lock. The lock is
+ * dropped before going to sleep and is reacquired afterwards.
+ *
+ * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
+ * was interrupted by a signal, and the remaining jiffies otherwise
+ * if the condition evaluated to true before the timeout elapsed.
+ */
+#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
+ timeout) \
+({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_interruptible_lock_irq_timeout( \
+ wq, condition, lock, timeout); \
+ __ret; \
+})
+
+/*
+ * Waitqueues which are removed from the waitqueue_head at wakeup time
+ */
+void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
+void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
+long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
+void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
+void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
+long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
+int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
+int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
+int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
+
+#define DEFINE_WAIT_FUNC(name, function) \
+ wait_queue_t name = { \
+ .private = current, \
+ .func = function, \
+ .task_list = LIST_HEAD_INIT((name).task_list), \
+ }
+
+#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
+
+#define DEFINE_WAIT_BIT(name, word, bit) \
+ struct wait_bit_queue name = { \
+ .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
+ .wait = { \
+ .private = current, \
+ .func = wake_bit_function, \
+ .task_list = \
+ LIST_HEAD_INIT((name).wait.task_list), \
+ }, \
+ }
+
+#define init_wait(wait) \
+ do { \
+ (wait)->private = current; \
+ (wait)->func = autoremove_wake_function; \
+ INIT_LIST_HEAD(&(wait)->task_list); \
+ (wait)->flags = 0; \
+ } while (0)
+
+
+extern int bit_wait(struct wait_bit_key *);
+extern int bit_wait_io(struct wait_bit_key *);
+extern int bit_wait_timeout(struct wait_bit_key *);
+extern int bit_wait_io_timeout(struct wait_bit_key *);
+
+/**
+ * wait_on_bit - wait for a bit to be cleared
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @mode: the task state to sleep in
+ *
+ * There is a standard hashed waitqueue table for generic use. This
+ * is the part of the hashtable's accessor API that waits on a bit.
+ * For instance, if one were to have waiters on a bitflag, one would
+ * call wait_on_bit() in threads waiting for the bit to clear.
+ * One uses wait_on_bit() where one is waiting for the bit to clear,
+ * but has no intention of setting it.
+ * Returned value will be zero if the bit was cleared, or non-zero
+ * if the process received a signal and the mode permitted wakeup
+ * on that signal.
+ */
+static inline int
+wait_on_bit(void *word, int bit, unsigned mode)
+{
+ might_sleep();
+ if (!test_bit(bit, word))
+ return 0;
+ return out_of_line_wait_on_bit(word, bit,
+ bit_wait,
+ mode);
+}
+
+/**
+ * wait_on_bit_io - wait for a bit to be cleared
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @mode: the task state to sleep in
+ *
+ * Use the standard hashed waitqueue table to wait for a bit
+ * to be cleared. This is similar to wait_on_bit(), but calls
+ * io_schedule() instead of schedule() for the actual waiting.
+ *
+ * Returned value will be zero if the bit was cleared, or non-zero
+ * if the process received a signal and the mode permitted wakeup
+ * on that signal.
+ */
+static inline int
+wait_on_bit_io(void *word, int bit, unsigned mode)
+{
+ might_sleep();
+ if (!test_bit(bit, word))
+ return 0;
+ return out_of_line_wait_on_bit(word, bit,
+ bit_wait_io,
+ mode);
+}
+
+/**
+ * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @mode: the task state to sleep in
+ * @timeout: timeout, in jiffies
+ *
+ * Use the standard hashed waitqueue table to wait for a bit
+ * to be cleared. This is similar to wait_on_bit(), except also takes a
+ * timeout parameter.
+ *
+ * Returned value will be zero if the bit was cleared before the
+ * @timeout elapsed, or non-zero if the @timeout elapsed or process
+ * received a signal and the mode permitted wakeup on that signal.
+ */
+static inline int
+wait_on_bit_timeout(void *word, int bit, unsigned mode, unsigned long timeout)
+{
+ might_sleep();
+ if (!test_bit(bit, word))
+ return 0;
+ return out_of_line_wait_on_bit_timeout(word, bit,
+ bit_wait_timeout,
+ mode, timeout);
+}
+
+/**
+ * wait_on_bit_action - wait for a bit to be cleared
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @action: the function used to sleep, which may take special actions
+ * @mode: the task state to sleep in
+ *
+ * Use the standard hashed waitqueue table to wait for a bit
+ * to be cleared, and allow the waiting action to be specified.
+ * This is like wait_on_bit() but allows fine control of how the waiting
+ * is done.
+ *
+ * Returned value will be zero if the bit was cleared, or non-zero
+ * if the process received a signal and the mode permitted wakeup
+ * on that signal.
+ */
+static inline int
+wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
+{
+ might_sleep();
+ if (!test_bit(bit, word))
+ return 0;
+ return out_of_line_wait_on_bit(word, bit, action, mode);
+}
+
+/**
+ * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @mode: the task state to sleep in
+ *
+ * There is a standard hashed waitqueue table for generic use. This
+ * is the part of the hashtable's accessor API that waits on a bit
+ * when one intends to set it, for instance, trying to lock bitflags.
+ * For instance, if one were to have waiters trying to set bitflag
+ * and waiting for it to clear before setting it, one would call
+ * wait_on_bit() in threads waiting to be able to set the bit.
+ * One uses wait_on_bit_lock() where one is waiting for the bit to
+ * clear with the intention of setting it, and when done, clearing it.
+ *
+ * Returns zero if the bit was (eventually) found to be clear and was
+ * set. Returns non-zero if a signal was delivered to the process and
+ * the @mode allows that signal to wake the process.
+ */
+static inline int
+wait_on_bit_lock(void *word, int bit, unsigned mode)
+{
+ might_sleep();
+ if (!test_and_set_bit(bit, word))
+ return 0;
+ return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
+}
+
+/**
+ * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @mode: the task state to sleep in
+ *
+ * Use the standard hashed waitqueue table to wait for a bit
+ * to be cleared and then to atomically set it. This is similar
+ * to wait_on_bit(), but calls io_schedule() instead of schedule()
+ * for the actual waiting.
+ *
+ * Returns zero if the bit was (eventually) found to be clear and was
+ * set. Returns non-zero if a signal was delivered to the process and
+ * the @mode allows that signal to wake the process.
+ */
+static inline int
+wait_on_bit_lock_io(void *word, int bit, unsigned mode)
+{
+ might_sleep();
+ if (!test_and_set_bit(bit, word))
+ return 0;
+ return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
+}
+
+/**
+ * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @action: the function used to sleep, which may take special actions
+ * @mode: the task state to sleep in
+ *
+ * Use the standard hashed waitqueue table to wait for a bit
+ * to be cleared and then to set it, and allow the waiting action
+ * to be specified.
+ * This is like wait_on_bit() but allows fine control of how the waiting
+ * is done.
+ *
+ * Returns zero if the bit was (eventually) found to be clear and was
+ * set. Returns non-zero if a signal was delivered to the process and
+ * the @mode allows that signal to wake the process.
+ */
+static inline int
+wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
+{
+ might_sleep();
+ if (!test_and_set_bit(bit, word))
+ return 0;
+ return out_of_line_wait_on_bit_lock(word, bit, action, mode);
+}
+
+/**
+ * wait_on_atomic_t - Wait for an atomic_t to become 0
+ * @val: The atomic value being waited on, a kernel virtual address
+ * @action: the function used to sleep, which may take special actions
+ * @mode: the task state to sleep in
+ *
+ * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
+ * the purpose of getting a waitqueue, but we set the key to a bit number
+ * outside of the target 'word'.
+ */
+static inline
+int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
+{
+ might_sleep();
+ if (atomic_read(val) == 0)
+ return 0;
+ return out_of_line_wait_on_atomic_t(val, action, mode);
+}
+
+#endif /* _LINUX_WAIT_H */
diff --git a/include/linux/wanrouter.h b/include/linux/wanrouter.h
new file mode 100644
index 000000000..8198a63cf
--- /dev/null
+++ b/include/linux/wanrouter.h
@@ -0,0 +1,10 @@
+/*
+ * wanrouter.h Legacy declarations kept around until X25 is removed
+ */
+
+#ifndef _ROUTER_H
+#define _ROUTER_H
+
+#include <uapi/linux/wanrouter.h>
+
+#endif /* _ROUTER_H */
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
new file mode 100644
index 000000000..a746bf521
--- /dev/null
+++ b/include/linux/watchdog.h
@@ -0,0 +1,148 @@
+/*
+ * Generic watchdog defines. Derived from..
+ *
+ * Berkshire PC Watchdog Defines
+ * by Ken Hollis <khollis@bitgate.com>
+ *
+ */
+#ifndef _LINUX_WATCHDOG_H
+#define _LINUX_WATCHDOG_H
+
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <uapi/linux/watchdog.h>
+
+struct watchdog_ops;
+struct watchdog_device;
+
+/** struct watchdog_ops - The watchdog-devices operations
+ *
+ * @owner: The module owner.
+ * @start: The routine for starting the watchdog device.
+ * @stop: The routine for stopping the watchdog device.
+ * @ping: The routine that sends a keepalive ping to the watchdog device.
+ * @status: The routine that shows the status of the watchdog device.
+ * @set_timeout:The routine for setting the watchdog devices timeout value.
+ * @get_timeleft:The routine that get's the time that's left before a reset.
+ * @ref: The ref operation for dyn. allocated watchdog_device structs
+ * @unref: The unref operation for dyn. allocated watchdog_device structs
+ * @ioctl: The routines that handles extra ioctl calls.
+ *
+ * The watchdog_ops structure contains a list of low-level operations
+ * that control a watchdog device. It also contains the module that owns
+ * these operations. The start and stop function are mandatory, all other
+ * functions are optonal.
+ */
+struct watchdog_ops {
+ struct module *owner;
+ /* mandatory operations */
+ int (*start)(struct watchdog_device *);
+ int (*stop)(struct watchdog_device *);
+ /* optional operations */
+ int (*ping)(struct watchdog_device *);
+ unsigned int (*status)(struct watchdog_device *);
+ int (*set_timeout)(struct watchdog_device *, unsigned int);
+ unsigned int (*get_timeleft)(struct watchdog_device *);
+ void (*ref)(struct watchdog_device *);
+ void (*unref)(struct watchdog_device *);
+ long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long);
+};
+
+/** struct watchdog_device - The structure that defines a watchdog device
+ *
+ * @id: The watchdog's ID. (Allocated by watchdog_register_device)
+ * @cdev: The watchdog's Character device.
+ * @dev: The device for our watchdog
+ * @parent: The parent bus device
+ * @info: Pointer to a watchdog_info structure.
+ * @ops: Pointer to the list of watchdog operations.
+ * @bootstatus: Status of the watchdog device at boot.
+ * @timeout: The watchdog devices timeout value.
+ * @min_timeout:The watchdog devices minimum timeout value.
+ * @max_timeout:The watchdog devices maximum timeout value.
+ * @driver-data:Pointer to the drivers private data.
+ * @lock: Lock for watchdog core internal use only.
+ * @status: Field that contains the devices internal status bits.
+ *
+ * The watchdog_device structure contains all information about a
+ * watchdog timer device.
+ *
+ * The driver-data field may not be accessed directly. It must be accessed
+ * via the watchdog_set_drvdata and watchdog_get_drvdata helpers.
+ *
+ * The lock field is for watchdog core internal use only and should not be
+ * touched.
+ */
+struct watchdog_device {
+ int id;
+ struct cdev cdev;
+ struct device *dev;
+ struct device *parent;
+ const struct watchdog_info *info;
+ const struct watchdog_ops *ops;
+ unsigned int bootstatus;
+ unsigned int timeout;
+ unsigned int min_timeout;
+ unsigned int max_timeout;
+ void *driver_data;
+ struct mutex lock;
+ unsigned long status;
+/* Bit numbers for status flags */
+#define WDOG_ACTIVE 0 /* Is the watchdog running/active */
+#define WDOG_DEV_OPEN 1 /* Opened via /dev/watchdog ? */
+#define WDOG_ALLOW_RELEASE 2 /* Did we receive the magic char ? */
+#define WDOG_NO_WAY_OUT 3 /* Is 'nowayout' feature set ? */
+#define WDOG_UNREGISTERED 4 /* Has the device been unregistered */
+};
+
+#define WATCHDOG_NOWAYOUT IS_BUILTIN(CONFIG_WATCHDOG_NOWAYOUT)
+#define WATCHDOG_NOWAYOUT_INIT_STATUS (WATCHDOG_NOWAYOUT << WDOG_NO_WAY_OUT)
+
+/* Use the following function to check whether or not the watchdog is active */
+static inline bool watchdog_active(struct watchdog_device *wdd)
+{
+ return test_bit(WDOG_ACTIVE, &wdd->status);
+}
+
+/* Use the following function to set the nowayout feature */
+static inline void watchdog_set_nowayout(struct watchdog_device *wdd, bool nowayout)
+{
+ if (nowayout)
+ set_bit(WDOG_NO_WAY_OUT, &wdd->status);
+}
+
+/* Use the following function to check if a timeout value is invalid */
+static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t)
+{
+ return ((wdd->max_timeout != 0) &&
+ (t < wdd->min_timeout || t > wdd->max_timeout));
+}
+
+/* Use the following functions to manipulate watchdog driver specific data */
+static inline void watchdog_set_drvdata(struct watchdog_device *wdd, void *data)
+{
+ wdd->driver_data = data;
+}
+
+static inline void *watchdog_get_drvdata(struct watchdog_device *wdd)
+{
+ return wdd->driver_data;
+}
+
+/* drivers/watchdog/watchdog_core.c */
+extern int watchdog_init_timeout(struct watchdog_device *wdd,
+ unsigned int timeout_parm, struct device *dev);
+extern int watchdog_register_device(struct watchdog_device *);
+extern void watchdog_unregister_device(struct watchdog_device *);
+
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
+void watchdog_nmi_disable_all(void);
+void watchdog_nmi_enable_all(void);
+#else
+static inline void watchdog_nmi_disable_all(void) {}
+static inline void watchdog_nmi_enable_all(void) {}
+#endif
+
+#endif /* ifndef _LINUX_WATCHDOG_H */
diff --git a/include/linux/wimax/debug.h b/include/linux/wimax/debug.h
new file mode 100644
index 000000000..aaf24ba12
--- /dev/null
+++ b/include/linux/wimax/debug.h
@@ -0,0 +1,526 @@
+/*
+ * Linux WiMAX
+ * Collection of tools to manage debug operations.
+ *
+ *
+ * Copyright (C) 2005-2007 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * Don't #include this file directly, read on!
+ *
+ *
+ * EXECUTING DEBUGGING ACTIONS OR NOT
+ *
+ * The main thing this framework provides is decission power to take a
+ * debug action (like printing a message) if the current debug level
+ * allows it.
+ *
+ * The decission power is at two levels: at compile-time (what does
+ * not make it is compiled out) and at run-time. The run-time
+ * selection is done per-submodule (as they are declared by the user
+ * of the framework).
+ *
+ * A call to d_test(L) (L being the target debug level) returns true
+ * if the action should be taken because the current debug levels
+ * allow it (both compile and run time).
+ *
+ * It follows that a call to d_test() that can be determined to be
+ * always false at compile time will get the code depending on it
+ * compiled out by optimization.
+ *
+ *
+ * DEBUG LEVELS
+ *
+ * It is up to the caller to define how much a debugging level is.
+ *
+ * Convention sets 0 as "no debug" (so an action marked as debug level 0
+ * will always be taken). The increasing debug levels are used for
+ * increased verbosity.
+ *
+ *
+ * USAGE
+ *
+ * Group the code in modules and submodules inside each module [which
+ * in most cases maps to Linux modules and .c files that compose
+ * those].
+ *
+ *
+ * For each module, there is:
+ *
+ * - a MODULENAME (single word, legal C identifier)
+ *
+ * - a debug-levels.h header file that declares the list of
+ * submodules and that is included by all .c files that use
+ * the debugging tools. The file name can be anything.
+ *
+ * - some (optional) .c code to manipulate the runtime debug levels
+ * through debugfs.
+ *
+ * The debug-levels.h file would look like:
+ *
+ * #ifndef __debug_levels__h__
+ * #define __debug_levels__h__
+ *
+ * #define D_MODULENAME modulename
+ * #define D_MASTER 10
+ *
+ * #include <linux/wimax/debug.h>
+ *
+ * enum d_module {
+ * D_SUBMODULE_DECLARE(submodule_1),
+ * D_SUBMODULE_DECLARE(submodule_2),
+ * ...
+ * D_SUBMODULE_DECLARE(submodule_N)
+ * };
+ *
+ * #endif
+ *
+ * D_MASTER is the maximum compile-time debug level; any debug actions
+ * above this will be out. D_MODULENAME is the module name (legal C
+ * identifier), which has to be unique for each module (to avoid
+ * namespace collisions during linkage). Note those #defines need to
+ * be done before #including debug.h
+ *
+ * We declare N different submodules whose debug level can be
+ * independently controlled during runtime.
+ *
+ * In a .c file of the module (and only in one of them), define the
+ * following code:
+ *
+ * struct d_level D_LEVEL[] = {
+ * D_SUBMODULE_DEFINE(submodule_1),
+ * D_SUBMODULE_DEFINE(submodule_2),
+ * ...
+ * D_SUBMODULE_DEFINE(submodule_N),
+ * };
+ * size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
+ *
+ * Externs for d_level_MODULENAME and d_level_size_MODULENAME are used
+ * and declared in this file using the D_LEVEL and D_LEVEL_SIZE macros
+ * #defined also in this file.
+ *
+ * To manipulate from user space the levels, create a debugfs dentry
+ * and then register each submodule with:
+ *
+ * result = d_level_register_debugfs("PREFIX_", submodule_X, parent);
+ * if (result < 0)
+ * goto error;
+ *
+ * Where PREFIX_ is a name of your chosing. This will create debugfs
+ * file with a single numeric value that can be use to tweak it. To
+ * remove the entires, just use debugfs_remove_recursive() on 'parent'.
+ *
+ * NOTE: remember that even if this will show attached to some
+ * particular instance of a device, the settings are *global*.
+ *
+ *
+ * On each submodule (for example, .c files), the debug infrastructure
+ * should be included like this:
+ *
+ * #define D_SUBMODULE submodule_x // matches one in debug-levels.h
+ * #include "debug-levels.h"
+ *
+ * after #including all your include files.
+ *
+ *
+ * Now you can use the d_*() macros below [d_test(), d_fnstart(),
+ * d_fnend(), d_printf(), d_dump()].
+ *
+ * If their debug level is greater than D_MASTER, they will be
+ * compiled out.
+ *
+ * If their debug level is lower or equal than D_MASTER but greater
+ * than the current debug level of their submodule, they'll be
+ * ignored.
+ *
+ * Otherwise, the action will be performed.
+ */
+#ifndef __debug__h__
+#define __debug__h__
+
+#include <linux/types.h>
+#include <linux/slab.h>
+
+struct device;
+
+/* Backend stuff */
+
+/*
+ * Debug backend: generate a message header from a 'struct device'
+ *
+ * @head: buffer where to place the header
+ * @head_size: length of @head
+ * @dev: pointer to device used to generate a header from. If NULL,
+ * an empty ("") header is generated.
+ */
+static inline
+void __d_head(char *head, size_t head_size,
+ struct device *dev)
+{
+ if (dev == NULL)
+ head[0] = 0;
+ else if ((unsigned long)dev < 4096) {
+ printk(KERN_ERR "E: Corrupt dev %p\n", dev);
+ WARN_ON(1);
+ } else
+ snprintf(head, head_size, "%s %s: ",
+ dev_driver_string(dev), dev_name(dev));
+}
+
+
+/*
+ * Debug backend: log some message if debugging is enabled
+ *
+ * @l: intended debug level
+ * @tag: tag to prefix the message with
+ * @dev: 'struct device' associated to this message
+ * @f: printf-like format and arguments
+ *
+ * Note this is optimized out if it doesn't pass the compile-time
+ * check; however, it is *always* compiled. This is useful to make
+ * sure the printf-like formats and variables are always checked and
+ * they don't get bit rot if you have all the debugging disabled.
+ */
+#define _d_printf(l, tag, dev, f, a...) \
+do { \
+ char head[64]; \
+ if (!d_test(l)) \
+ break; \
+ __d_head(head, sizeof(head), dev); \
+ printk(KERN_ERR "%s%s%s: " f, head, __func__, tag, ##a); \
+} while (0)
+
+
+/*
+ * CPP sintatic sugar to generate A_B like symbol names when one of
+ * the arguments is a a preprocessor #define.
+ */
+#define __D_PASTE__(varname, modulename) varname##_##modulename
+#define __D_PASTE(varname, modulename) (__D_PASTE__(varname, modulename))
+#define _D_SUBMODULE_INDEX(_name) (D_SUBMODULE_DECLARE(_name))
+
+
+/*
+ * Store a submodule's runtime debug level and name
+ */
+struct d_level {
+ u8 level;
+ const char *name;
+};
+
+
+/*
+ * List of available submodules and their debug levels
+ *
+ * We call them d_level_MODULENAME and d_level_size_MODULENAME; the
+ * macros D_LEVEL and D_LEVEL_SIZE contain the name already for
+ * convenience.
+ *
+ * This array and the size are defined on some .c file that is part of
+ * the current module.
+ */
+#define D_LEVEL __D_PASTE(d_level, D_MODULENAME)
+#define D_LEVEL_SIZE __D_PASTE(d_level_size, D_MODULENAME)
+
+extern struct d_level D_LEVEL[];
+extern size_t D_LEVEL_SIZE;
+
+
+/*
+ * Frontend stuff
+ *
+ *
+ * Stuff you need to declare prior to using the actual "debug" actions
+ * (defined below).
+ */
+
+#ifndef D_MODULENAME
+#error D_MODULENAME is not defined in your debug-levels.h file
+/**
+ * D_MODULE - Name of the current module
+ *
+ * #define in your module's debug-levels.h, making sure it is
+ * unique. This has to be a legal C identifier.
+ */
+#define D_MODULENAME undefined_modulename
+#endif
+
+
+#ifndef D_MASTER
+#warning D_MASTER not defined, but debug.h included! [see docs]
+/**
+ * D_MASTER - Compile time maximum debug level
+ *
+ * #define in your debug-levels.h file to the maximum debug level the
+ * runtime code will be allowed to have. This allows you to provide a
+ * main knob.
+ *
+ * Anything above that level will be optimized out of the compile.
+ *
+ * Defaults to zero (no debug code compiled in).
+ *
+ * Maximum one definition per module (at the debug-levels.h file).
+ */
+#define D_MASTER 0
+#endif
+
+#ifndef D_SUBMODULE
+#error D_SUBMODULE not defined, but debug.h included! [see docs]
+/**
+ * D_SUBMODULE - Name of the current submodule
+ *
+ * #define in your submodule .c file before #including debug-levels.h
+ * to the name of the current submodule as previously declared and
+ * defined with D_SUBMODULE_DECLARE() (in your module's
+ * debug-levels.h) and D_SUBMODULE_DEFINE().
+ *
+ * This is used to provide runtime-control over the debug levels.
+ *
+ * Maximum one per .c file! Can be shared among different .c files
+ * (meaning they belong to the same submodule categorization).
+ */
+#define D_SUBMODULE undefined_module
+#endif
+
+
+/**
+ * D_SUBMODULE_DECLARE - Declare a submodule for runtime debug level control
+ *
+ * @_name: name of the submodule, restricted to the chars that make up a
+ * valid C identifier ([a-zA-Z0-9_]).
+ *
+ * Declare in the module's debug-levels.h header file as:
+ *
+ * enum d_module {
+ * D_SUBMODULE_DECLARE(submodule_1),
+ * D_SUBMODULE_DECLARE(submodule_2),
+ * D_SUBMODULE_DECLARE(submodule_3),
+ * };
+ *
+ * Some corresponding .c file needs to have a matching
+ * D_SUBMODULE_DEFINE().
+ */
+#define D_SUBMODULE_DECLARE(_name) __D_SUBMODULE_##_name
+
+
+/**
+ * D_SUBMODULE_DEFINE - Define a submodule for runtime debug level control
+ *
+ * @_name: name of the submodule, restricted to the chars that make up a
+ * valid C identifier ([a-zA-Z0-9_]).
+ *
+ * Use once per module (in some .c file) as:
+ *
+ * static
+ * struct d_level d_level_SUBMODULENAME[] = {
+ * D_SUBMODULE_DEFINE(submodule_1),
+ * D_SUBMODULE_DEFINE(submodule_2),
+ * D_SUBMODULE_DEFINE(submodule_3),
+ * };
+ * size_t d_level_size_SUBDMODULENAME = ARRAY_SIZE(d_level_SUBDMODULENAME);
+ *
+ * Matching D_SUBMODULE_DECLARE()s have to be present in a
+ * debug-levels.h header file.
+ */
+#define D_SUBMODULE_DEFINE(_name) \
+[__D_SUBMODULE_##_name] = { \
+ .level = 0, \
+ .name = #_name \
+}
+
+
+
+/* The actual "debug" operations */
+
+
+/**
+ * d_test - Returns true if debugging should be enabled
+ *
+ * @l: intended debug level (unsigned)
+ *
+ * If the master debug switch is enabled and the current settings are
+ * higher or equal to the requested level, then debugging
+ * output/actions should be enabled.
+ *
+ * NOTE:
+ *
+ * This needs to be coded so that it can be evaluated in compile
+ * time; this is why the ugly BUG_ON() is placed in there, so the
+ * D_MASTER evaluation compiles all out if it is compile-time false.
+ */
+#define d_test(l) \
+({ \
+ unsigned __l = l; /* type enforcer */ \
+ (D_MASTER) >= __l \
+ && ({ \
+ BUG_ON(_D_SUBMODULE_INDEX(D_SUBMODULE) >= D_LEVEL_SIZE);\
+ D_LEVEL[_D_SUBMODULE_INDEX(D_SUBMODULE)].level >= __l; \
+ }); \
+})
+
+
+/**
+ * d_fnstart - log message at function start if debugging enabled
+ *
+ * @l: intended debug level
+ * @_dev: 'struct device' pointer, NULL if none (for context)
+ * @f: printf-like format and arguments
+ */
+#define d_fnstart(l, _dev, f, a...) _d_printf(l, " FNSTART", _dev, f, ## a)
+
+
+/**
+ * d_fnend - log message at function end if debugging enabled
+ *
+ * @l: intended debug level
+ * @_dev: 'struct device' pointer, NULL if none (for context)
+ * @f: printf-like format and arguments
+ */
+#define d_fnend(l, _dev, f, a...) _d_printf(l, " FNEND", _dev, f, ## a)
+
+
+/**
+ * d_printf - log message if debugging enabled
+ *
+ * @l: intended debug level
+ * @_dev: 'struct device' pointer, NULL if none (for context)
+ * @f: printf-like format and arguments
+ */
+#define d_printf(l, _dev, f, a...) _d_printf(l, "", _dev, f, ## a)
+
+
+/**
+ * d_dump - log buffer hex dump if debugging enabled
+ *
+ * @l: intended debug level
+ * @_dev: 'struct device' pointer, NULL if none (for context)
+ * @f: printf-like format and arguments
+ */
+#define d_dump(l, dev, ptr, size) \
+do { \
+ char head[64]; \
+ if (!d_test(l)) \
+ break; \
+ __d_head(head, sizeof(head), dev); \
+ print_hex_dump(KERN_ERR, head, 0, 16, 1, \
+ ((void *) ptr), (size), 0); \
+} while (0)
+
+
+/**
+ * Export a submodule's debug level over debugfs as PREFIXSUBMODULE
+ *
+ * @prefix: string to prefix the name with
+ * @submodule: name of submodule (not a string, just the name)
+ * @dentry: debugfs parent dentry
+ *
+ * Returns: 0 if ok, < 0 errno on error.
+ *
+ * For removing, just use debugfs_remove_recursive() on the parent.
+ */
+#define d_level_register_debugfs(prefix, name, parent) \
+({ \
+ int rc; \
+ struct dentry *fd; \
+ struct dentry *verify_parent_type = parent; \
+ fd = debugfs_create_u8( \
+ prefix #name, 0600, verify_parent_type, \
+ &(D_LEVEL[__D_SUBMODULE_ ## name].level)); \
+ rc = PTR_ERR(fd); \
+ if (IS_ERR(fd) && rc != -ENODEV) \
+ printk(KERN_ERR "%s: Can't create debugfs entry %s: " \
+ "%d\n", __func__, prefix #name, rc); \
+ else \
+ rc = 0; \
+ rc; \
+})
+
+
+static inline
+void d_submodule_set(struct d_level *d_level, size_t d_level_size,
+ const char *submodule, u8 level, const char *tag)
+{
+ struct d_level *itr, *top;
+ int index = -1;
+
+ for (itr = d_level, top = itr + d_level_size; itr < top; itr++) {
+ index++;
+ if (itr->name == NULL) {
+ printk(KERN_ERR "%s: itr->name NULL?? (%p, #%d)\n",
+ tag, itr, index);
+ continue;
+ }
+ if (!strcmp(itr->name, submodule)) {
+ itr->level = level;
+ return;
+ }
+ }
+ printk(KERN_ERR "%s: unknown submodule %s\n", tag, submodule);
+}
+
+
+/**
+ * d_parse_params - Parse a string with debug parameters from the
+ * command line
+ *
+ * @d_level: level structure (D_LEVEL)
+ * @d_level_size: number of items in the level structure
+ * (D_LEVEL_SIZE).
+ * @_params: string with the parameters; this is a space (not tab!)
+ * separated list of NAME:VALUE, where value is the debug level
+ * and NAME is the name of the submodule.
+ * @tag: string for error messages (example: MODULE.ARGNAME).
+ */
+static inline
+void d_parse_params(struct d_level *d_level, size_t d_level_size,
+ const char *_params, const char *tag)
+{
+ char submodule[130], *params, *params_orig, *token, *colon;
+ unsigned level, tokens;
+
+ if (_params == NULL)
+ return;
+ params_orig = kstrdup(_params, GFP_KERNEL);
+ params = params_orig;
+ while (1) {
+ token = strsep(&params, " ");
+ if (token == NULL)
+ break;
+ if (*token == '\0') /* eat joint spaces */
+ continue;
+ /* kernel's sscanf %s eats until whitespace, so we
+ * replace : by \n so it doesn't get eaten later by
+ * strsep */
+ colon = strchr(token, ':');
+ if (colon != NULL)
+ *colon = '\n';
+ tokens = sscanf(token, "%s\n%u", submodule, &level);
+ if (colon != NULL)
+ *colon = ':'; /* set back, for error messages */
+ if (tokens == 2)
+ d_submodule_set(d_level, d_level_size,
+ submodule, level, tag);
+ else
+ printk(KERN_ERR "%s: can't parse '%s' as a "
+ "SUBMODULE:LEVEL (%d tokens)\n",
+ tag, token, tokens);
+ }
+ kfree(params_orig);
+}
+
+#endif /* #ifndef __debug__h__ */
diff --git a/include/linux/wireless.h b/include/linux/wireless.h
new file mode 100644
index 000000000..4ea4c6e23
--- /dev/null
+++ b/include/linux/wireless.h
@@ -0,0 +1,44 @@
+/*
+ * This file define a set of standard wireless extensions
+ *
+ * Version : 22 16.3.07
+ *
+ * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
+ * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved.
+ */
+#ifndef _LINUX_WIRELESS_H
+#define _LINUX_WIRELESS_H
+
+#include <uapi/linux/wireless.h>
+
+#ifdef CONFIG_COMPAT
+
+#include <linux/compat.h>
+
+struct compat_iw_point {
+ compat_caddr_t pointer;
+ __u16 length;
+ __u16 flags;
+};
+#endif
+#ifdef CONFIG_COMPAT
+struct __compat_iw_event {
+ __u16 len; /* Real length of this stuff */
+ __u16 cmd; /* Wireless IOCTL */
+ compat_caddr_t pointer;
+};
+#define IW_EV_COMPAT_LCP_LEN offsetof(struct __compat_iw_event, pointer)
+#define IW_EV_COMPAT_POINT_OFF offsetof(struct compat_iw_point, length)
+
+/* Size of the various events for compat */
+#define IW_EV_COMPAT_CHAR_LEN (IW_EV_COMPAT_LCP_LEN + IFNAMSIZ)
+#define IW_EV_COMPAT_UINT_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(__u32))
+#define IW_EV_COMPAT_FREQ_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct iw_freq))
+#define IW_EV_COMPAT_PARAM_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct iw_param))
+#define IW_EV_COMPAT_ADDR_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct sockaddr))
+#define IW_EV_COMPAT_QUAL_LEN (IW_EV_COMPAT_LCP_LEN + sizeof(struct iw_quality))
+#define IW_EV_COMPAT_POINT_LEN \
+ (IW_EV_COMPAT_LCP_LEN + sizeof(struct compat_iw_point) - \
+ IW_EV_COMPAT_POINT_OFF)
+#endif
+#endif /* _LINUX_WIRELESS_H */
diff --git a/include/linux/wl12xx.h b/include/linux/wl12xx.h
new file mode 100644
index 000000000..95704cd4c
--- /dev/null
+++ b/include/linux/wl12xx.h
@@ -0,0 +1,58 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef _LINUX_WL12XX_H
+#define _LINUX_WL12XX_H
+
+#include <linux/err.h>
+
+struct wl1251_platform_data {
+ int power_gpio;
+ /* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */
+ int irq;
+ bool use_eeprom;
+};
+
+#ifdef CONFIG_WILINK_PLATFORM_DATA
+
+int wl1251_set_platform_data(const struct wl1251_platform_data *data);
+
+struct wl1251_platform_data *wl1251_get_platform_data(void);
+
+#else
+
+static inline
+int wl1251_set_platform_data(const struct wl1251_platform_data *data)
+{
+ return -ENOSYS;
+}
+
+static inline
+struct wl1251_platform_data *wl1251_get_platform_data(void)
+{
+ return ERR_PTR(-ENODATA);
+}
+
+#endif
+
+#endif
diff --git a/include/linux/wm97xx.h b/include/linux/wm97xx.h
new file mode 100644
index 000000000..fd98bb968
--- /dev/null
+++ b/include/linux/wm97xx.h
@@ -0,0 +1,337 @@
+
+/*
+ * Register bits and API for Wolfson WM97xx series of codecs
+ */
+
+#ifndef _LINUX_WM97XX_H
+#define _LINUX_WM97XX_H
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/ac97_codec.h>
+#include <sound/initval.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/input.h> /* Input device layer */
+#include <linux/platform_device.h>
+
+/*
+ * WM97xx variants
+ */
+#define WM97xx_GENERIC 0x0000
+#define WM97xx_WM1613 0x1613
+
+/*
+ * WM97xx AC97 Touchscreen registers
+ */
+#define AC97_WM97XX_DIGITISER1 0x76
+#define AC97_WM97XX_DIGITISER2 0x78
+#define AC97_WM97XX_DIGITISER_RD 0x7a
+#define AC97_WM9713_DIG1 0x74
+#define AC97_WM9713_DIG2 AC97_WM97XX_DIGITISER1
+#define AC97_WM9713_DIG3 AC97_WM97XX_DIGITISER2
+
+/*
+ * WM97xx register bits
+ */
+#define WM97XX_POLL 0x8000 /* initiate a polling measurement */
+#define WM97XX_ADCSEL_X 0x1000 /* x coord measurement */
+#define WM97XX_ADCSEL_Y 0x2000 /* y coord measurement */
+#define WM97XX_ADCSEL_PRES 0x3000 /* pressure measurement */
+#define WM97XX_AUX_ID1 0x4000
+#define WM97XX_AUX_ID2 0x5000
+#define WM97XX_AUX_ID3 0x6000
+#define WM97XX_AUX_ID4 0x7000
+#define WM97XX_ADCSEL_MASK 0x7000 /* ADC selection mask */
+#define WM97XX_COO 0x0800 /* enable coordinate mode */
+#define WM97XX_CTC 0x0400 /* enable continuous mode */
+#define WM97XX_CM_RATE_93 0x0000 /* 93.75Hz continuous rate */
+#define WM97XX_CM_RATE_187 0x0100 /* 187.5Hz continuous rate */
+#define WM97XX_CM_RATE_375 0x0200 /* 375Hz continuous rate */
+#define WM97XX_CM_RATE_750 0x0300 /* 750Hz continuous rate */
+#define WM97XX_CM_RATE_8K 0x00f0 /* 8kHz continuous rate */
+#define WM97XX_CM_RATE_12K 0x01f0 /* 12kHz continuous rate */
+#define WM97XX_CM_RATE_24K 0x02f0 /* 24kHz continuous rate */
+#define WM97XX_CM_RATE_48K 0x03f0 /* 48kHz continuous rate */
+#define WM97XX_CM_RATE_MASK 0x03f0
+#define WM97XX_RATE(i) (((i & 3) << 8) | ((i & 4) ? 0xf0 : 0))
+#define WM97XX_DELAY(i) ((i << 4) & 0x00f0) /* sample delay times */
+#define WM97XX_DELAY_MASK 0x00f0
+#define WM97XX_SLEN 0x0008 /* slot read back enable */
+#define WM97XX_SLT(i) ((i - 5) & 0x7) /* panel slot (5-11) */
+#define WM97XX_SLT_MASK 0x0007
+#define WM97XX_PRP_DETW 0x4000 /* detect on, digitise off, wake */
+#define WM97XX_PRP_DET 0x8000 /* detect on, digitise off, no wake */
+#define WM97XX_PRP_DET_DIG 0xc000 /* setect on, digitise on */
+#define WM97XX_RPR 0x2000 /* wake up on pen down */
+#define WM97XX_PEN_DOWN 0x8000 /* pen is down */
+
+/* WM9712 Bits */
+#define WM9712_45W 0x1000 /* set for 5-wire touchscreen */
+#define WM9712_PDEN 0x0800 /* measure only when pen down */
+#define WM9712_WAIT 0x0200 /* wait until adc is read before next sample */
+#define WM9712_PIL 0x0100 /* current used for pressure measurement. set 400uA else 200uA */
+#define WM9712_MASK_HI 0x0040 /* hi on mask pin (47) stops conversions */
+#define WM9712_MASK_EDGE 0x0080 /* rising/falling edge on pin delays sample */
+#define WM9712_MASK_SYNC 0x00c0 /* rising/falling edge on mask initiates sample */
+#define WM9712_RPU(i) (i&0x3f) /* internal pull up on pen detect (64k / rpu) */
+#define WM9712_PD(i) (0x1 << i) /* power management */
+
+/* WM9712 Registers */
+#define AC97_WM9712_POWER 0x24
+#define AC97_WM9712_REV 0x58
+
+/* WM9705 Bits */
+#define WM9705_PDEN 0x1000 /* measure only when pen is down */
+#define WM9705_PINV 0x0800 /* inverts sense of pen down output */
+#define WM9705_BSEN 0x0400 /* BUSY flag enable, pin47 is 1 when busy */
+#define WM9705_BINV 0x0200 /* invert BUSY (pin47) output */
+#define WM9705_WAIT 0x0100 /* wait until adc is read before next sample */
+#define WM9705_PIL 0x0080 /* current used for pressure measurement. set 400uA else 200uA */
+#define WM9705_PHIZ 0x0040 /* set PHONE and PCBEEP inputs to high impedance */
+#define WM9705_MASK_HI 0x0010 /* hi on mask stops conversions */
+#define WM9705_MASK_EDGE 0x0020 /* rising/falling edge on pin delays sample */
+#define WM9705_MASK_SYNC 0x0030 /* rising/falling edge on mask initiates sample */
+#define WM9705_PDD(i) (i & 0x000f) /* pen detect comparator threshold */
+
+
+/* WM9713 Bits */
+#define WM9713_PDPOL 0x0400 /* Pen down polarity */
+#define WM9713_POLL 0x0200 /* initiate a polling measurement */
+#define WM9713_CTC 0x0100 /* enable continuous mode */
+#define WM9713_ADCSEL_X 0x0002 /* X measurement */
+#define WM9713_ADCSEL_Y 0x0004 /* Y measurement */
+#define WM9713_ADCSEL_PRES 0x0008 /* Pressure measurement */
+#define WM9713_COO 0x0001 /* enable coordinate mode */
+#define WM9713_45W 0x1000 /* set for 5 wire panel */
+#define WM9713_PDEN 0x0800 /* measure only when pen down */
+#define WM9713_ADCSEL_MASK 0x00fe /* ADC selection mask */
+#define WM9713_WAIT 0x0200 /* coordinate wait */
+
+/* AUX ADC ID's */
+#define TS_COMP1 0x0
+#define TS_COMP2 0x1
+#define TS_BMON 0x2
+#define TS_WIPER 0x3
+
+/* ID numbers */
+#define WM97XX_ID1 0x574d
+#define WM9712_ID2 0x4c12
+#define WM9705_ID2 0x4c05
+#define WM9713_ID2 0x4c13
+
+/* Codec GPIO's */
+#define WM97XX_MAX_GPIO 16
+#define WM97XX_GPIO_1 (1 << 1)
+#define WM97XX_GPIO_2 (1 << 2)
+#define WM97XX_GPIO_3 (1 << 3)
+#define WM97XX_GPIO_4 (1 << 4)
+#define WM97XX_GPIO_5 (1 << 5)
+#define WM97XX_GPIO_6 (1 << 6)
+#define WM97XX_GPIO_7 (1 << 7)
+#define WM97XX_GPIO_8 (1 << 8)
+#define WM97XX_GPIO_9 (1 << 9)
+#define WM97XX_GPIO_10 (1 << 10)
+#define WM97XX_GPIO_11 (1 << 11)
+#define WM97XX_GPIO_12 (1 << 12)
+#define WM97XX_GPIO_13 (1 << 13)
+#define WM97XX_GPIO_14 (1 << 14)
+#define WM97XX_GPIO_15 (1 << 15)
+
+
+#define AC97_LINK_FRAME 21 /* time in uS for AC97 link frame */
+
+
+/*---------------- Return codes from sample reading functions ---------------*/
+
+/* More data is available; call the sample gathering function again */
+#define RC_AGAIN 0x00000001
+/* The returned sample is valid */
+#define RC_VALID 0x00000002
+/* The pen is up (the first RC_VALID without RC_PENUP means pen is down) */
+#define RC_PENUP 0x00000004
+/* The pen is down (RC_VALID implies RC_PENDOWN, but sometimes it is helpful
+ to tell the handler that the pen is down but we don't know yet his coords,
+ so the handler should not sleep or wait for pendown irq) */
+#define RC_PENDOWN 0x00000008
+
+/*
+ * The wm97xx driver provides a private API for writing platform-specific
+ * drivers.
+ */
+
+/* The structure used to return arch specific sampled data into */
+struct wm97xx_data {
+ int x;
+ int y;
+ int p;
+};
+
+/*
+ * Codec GPIO status
+ */
+enum wm97xx_gpio_status {
+ WM97XX_GPIO_HIGH,
+ WM97XX_GPIO_LOW
+};
+
+/*
+ * Codec GPIO direction
+ */
+enum wm97xx_gpio_dir {
+ WM97XX_GPIO_IN,
+ WM97XX_GPIO_OUT
+};
+
+/*
+ * Codec GPIO polarity
+ */
+enum wm97xx_gpio_pol {
+ WM97XX_GPIO_POL_HIGH,
+ WM97XX_GPIO_POL_LOW
+};
+
+/*
+ * Codec GPIO sticky
+ */
+enum wm97xx_gpio_sticky {
+ WM97XX_GPIO_STICKY,
+ WM97XX_GPIO_NOTSTICKY
+};
+
+/*
+ * Codec GPIO wake
+ */
+enum wm97xx_gpio_wake {
+ WM97XX_GPIO_WAKE,
+ WM97XX_GPIO_NOWAKE
+};
+
+/*
+ * Digitiser ioctl commands
+ */
+#define WM97XX_DIG_START 0x1
+#define WM97XX_DIG_STOP 0x2
+#define WM97XX_PHY_INIT 0x3
+#define WM97XX_AUX_PREPARE 0x4
+#define WM97XX_DIG_RESTORE 0x5
+
+struct wm97xx;
+
+extern struct wm97xx_codec_drv wm9705_codec;
+extern struct wm97xx_codec_drv wm9712_codec;
+extern struct wm97xx_codec_drv wm9713_codec;
+
+/*
+ * Codec driver interface - allows mapping to WM9705/12/13 and newer codecs
+ */
+struct wm97xx_codec_drv {
+ u16 id;
+ char *name;
+
+ /* read 1 sample */
+ int (*poll_sample) (struct wm97xx *, int adcsel, int *sample);
+
+ /* read X,Y,[P] in poll */
+ int (*poll_touch) (struct wm97xx *, struct wm97xx_data *);
+
+ int (*acc_enable) (struct wm97xx *, int enable);
+ void (*phy_init) (struct wm97xx *);
+ void (*dig_enable) (struct wm97xx *, int enable);
+ void (*dig_restore) (struct wm97xx *);
+ void (*aux_prepare) (struct wm97xx *);
+};
+
+
+/* Machine specific and accelerated touch operations */
+struct wm97xx_mach_ops {
+
+ /* accelerated touch readback - coords are transmited on AC97 link */
+ int acc_enabled;
+ void (*acc_pen_up) (struct wm97xx *);
+ int (*acc_pen_down) (struct wm97xx *);
+ int (*acc_startup) (struct wm97xx *);
+ void (*acc_shutdown) (struct wm97xx *);
+
+ /* interrupt mask control - required for accelerated operation */
+ void (*irq_enable) (struct wm97xx *, int enable);
+
+ /* GPIO pin used for accelerated operation */
+ int irq_gpio;
+
+ /* pre and post sample - can be used to minimise any analog noise */
+ void (*pre_sample) (int); /* function to run before sampling */
+ void (*post_sample) (int); /* function to run after sampling */
+};
+
+struct wm97xx {
+ u16 dig[3], id, gpio[6], misc; /* Cached codec registers */
+ u16 dig_save[3]; /* saved during aux reading */
+ struct wm97xx_codec_drv *codec; /* attached codec driver*/
+ struct input_dev *input_dev; /* touchscreen input device */
+ struct snd_ac97 *ac97; /* ALSA codec access */
+ struct device *dev; /* ALSA device */
+ struct platform_device *battery_dev;
+ struct platform_device *touch_dev;
+ struct wm97xx_mach_ops *mach_ops;
+ struct mutex codec_mutex;
+ struct delayed_work ts_reader; /* Used to poll touchscreen */
+ unsigned long ts_reader_interval; /* Current interval for timer */
+ unsigned long ts_reader_min_interval; /* Minimum interval */
+ unsigned int pen_irq; /* Pen IRQ number in use */
+ struct workqueue_struct *ts_workq;
+ struct work_struct pen_event_work;
+ u16 acc_slot; /* AC97 slot used for acc touch data */
+ u16 acc_rate; /* acc touch data rate */
+ unsigned pen_is_down:1; /* Pen is down */
+ unsigned aux_waiting:1; /* aux measurement waiting */
+ unsigned pen_probably_down:1; /* used in polling mode */
+ u16 variant; /* WM97xx chip variant */
+ u16 suspend_mode; /* PRP in suspend mode */
+};
+
+struct wm97xx_batt_pdata {
+ int batt_aux;
+ int temp_aux;
+ int charge_gpio;
+ int min_voltage;
+ int max_voltage;
+ int batt_div;
+ int batt_mult;
+ int temp_div;
+ int temp_mult;
+ int batt_tech;
+ char *batt_name;
+};
+
+struct wm97xx_pdata {
+ struct wm97xx_batt_pdata *batt_pdata; /* battery data */
+};
+
+/*
+ * Codec GPIO access (not supported on WM9705)
+ * This can be used to set/get codec GPIO and Virtual GPIO status.
+ */
+enum wm97xx_gpio_status wm97xx_get_gpio(struct wm97xx *wm, u32 gpio);
+void wm97xx_set_gpio(struct wm97xx *wm, u32 gpio,
+ enum wm97xx_gpio_status status);
+void wm97xx_config_gpio(struct wm97xx *wm, u32 gpio,
+ enum wm97xx_gpio_dir dir,
+ enum wm97xx_gpio_pol pol,
+ enum wm97xx_gpio_sticky sticky,
+ enum wm97xx_gpio_wake wake);
+
+void wm97xx_set_suspend_mode(struct wm97xx *wm, u16 mode);
+
+/* codec AC97 IO access */
+int wm97xx_reg_read(struct wm97xx *wm, u16 reg);
+void wm97xx_reg_write(struct wm97xx *wm, u16 reg, u16 val);
+
+/* aux adc readback */
+int wm97xx_read_aux_adc(struct wm97xx *wm, u16 adcsel);
+
+/* machine ops */
+int wm97xx_register_mach_ops(struct wm97xx *, struct wm97xx_mach_ops *);
+void wm97xx_unregister_mach_ops(struct wm97xx *);
+
+#endif
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
new file mode 100644
index 000000000..deee212af
--- /dev/null
+++ b/include/linux/workqueue.h
@@ -0,0 +1,592 @@
+/*
+ * workqueue.h --- work queue handling for Linux.
+ */
+
+#ifndef _LINUX_WORKQUEUE_H
+#define _LINUX_WORKQUEUE_H
+
+#include <linux/timer.h>
+#include <linux/linkage.h>
+#include <linux/bitops.h>
+#include <linux/lockdep.h>
+#include <linux/threads.h>
+#include <linux/atomic.h>
+#include <linux/cpumask.h>
+
+struct workqueue_struct;
+
+struct work_struct;
+typedef void (*work_func_t)(struct work_struct *work);
+void delayed_work_timer_fn(unsigned long __data);
+
+/*
+ * The first word is the work queue pointer and the flags rolled into
+ * one
+ */
+#define work_data_bits(work) ((unsigned long *)(&(work)->data))
+
+enum {
+ WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
+ WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
+ WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
+ WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
+#ifdef CONFIG_DEBUG_OBJECTS_WORK
+ WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
+ WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
+#else
+ WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
+#endif
+
+ WORK_STRUCT_COLOR_BITS = 4,
+
+ WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
+ WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
+ WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
+ WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
+#ifdef CONFIG_DEBUG_OBJECTS_WORK
+ WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
+#else
+ WORK_STRUCT_STATIC = 0,
+#endif
+
+ /*
+ * The last color is no color used for works which don't
+ * participate in workqueue flushing.
+ */
+ WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
+ WORK_NO_COLOR = WORK_NR_COLORS,
+
+ /* not bound to any CPU, prefer the local CPU */
+ WORK_CPU_UNBOUND = NR_CPUS,
+
+ /*
+ * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
+ * This makes pwqs aligned to 256 bytes and allows 15 workqueue
+ * flush colors.
+ */
+ WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
+ WORK_STRUCT_COLOR_BITS,
+
+ /* data contains off-queue information when !WORK_STRUCT_PWQ */
+ WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
+
+ __WORK_OFFQ_CANCELING = WORK_OFFQ_FLAG_BASE,
+ WORK_OFFQ_CANCELING = (1 << __WORK_OFFQ_CANCELING),
+
+ /*
+ * When a work item is off queue, its high bits point to the last
+ * pool it was on. Cap at 31 bits and use the highest number to
+ * indicate that no pool is associated.
+ */
+ WORK_OFFQ_FLAG_BITS = 1,
+ WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
+ WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT,
+ WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31,
+ WORK_OFFQ_POOL_NONE = (1LU << WORK_OFFQ_POOL_BITS) - 1,
+
+ /* convenience constants */
+ WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
+ WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
+ WORK_STRUCT_NO_POOL = (unsigned long)WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT,
+
+ /* bit mask for work_busy() return values */
+ WORK_BUSY_PENDING = 1 << 0,
+ WORK_BUSY_RUNNING = 1 << 1,
+
+ /* maximum string length for set_worker_desc() */
+ WORKER_DESC_LEN = 24,
+};
+
+struct work_struct {
+ atomic_long_t data;
+ struct list_head entry;
+ work_func_t func;
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map lockdep_map;
+#endif
+};
+
+#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL)
+#define WORK_DATA_STATIC_INIT() \
+ ATOMIC_LONG_INIT(WORK_STRUCT_NO_POOL | WORK_STRUCT_STATIC)
+
+struct delayed_work {
+ struct work_struct work;
+ struct timer_list timer;
+
+ /* target workqueue and CPU ->timer uses to queue ->work */
+ struct workqueue_struct *wq;
+ int cpu;
+};
+
+/*
+ * A struct for workqueue attributes. This can be used to change
+ * attributes of an unbound workqueue.
+ *
+ * Unlike other fields, ->no_numa isn't a property of a worker_pool. It
+ * only modifies how apply_workqueue_attrs() select pools and thus doesn't
+ * participate in pool hash calculations or equality comparisons.
+ */
+struct workqueue_attrs {
+ int nice; /* nice level */
+ cpumask_var_t cpumask; /* allowed CPUs */
+ bool no_numa; /* disable NUMA affinity */
+};
+
+static inline struct delayed_work *to_delayed_work(struct work_struct *work)
+{
+ return container_of(work, struct delayed_work, work);
+}
+
+struct execute_work {
+ struct work_struct work;
+};
+
+#ifdef CONFIG_LOCKDEP
+/*
+ * NB: because we have to copy the lockdep_map, setting _key
+ * here is required, otherwise it could get initialised to the
+ * copy of the lockdep_map!
+ */
+#define __WORK_INIT_LOCKDEP_MAP(n, k) \
+ .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
+#else
+#define __WORK_INIT_LOCKDEP_MAP(n, k)
+#endif
+
+#define __WORK_INITIALIZER(n, f) { \
+ .data = WORK_DATA_STATIC_INIT(), \
+ .entry = { &(n).entry, &(n).entry }, \
+ .func = (f), \
+ __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
+ }
+
+#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
+ .work = __WORK_INITIALIZER((n).work, (f)), \
+ .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \
+ 0, (unsigned long)&(n), \
+ (tflags) | TIMER_IRQSAFE), \
+ }
+
+#define DECLARE_WORK(n, f) \
+ struct work_struct n = __WORK_INITIALIZER(n, f)
+
+#define DECLARE_DELAYED_WORK(n, f) \
+ struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
+
+#define DECLARE_DEFERRABLE_WORK(n, f) \
+ struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
+
+#ifdef CONFIG_DEBUG_OBJECTS_WORK
+extern void __init_work(struct work_struct *work, int onstack);
+extern void destroy_work_on_stack(struct work_struct *work);
+extern void destroy_delayed_work_on_stack(struct delayed_work *work);
+static inline unsigned int work_static(struct work_struct *work)
+{
+ return *work_data_bits(work) & WORK_STRUCT_STATIC;
+}
+#else
+static inline void __init_work(struct work_struct *work, int onstack) { }
+static inline void destroy_work_on_stack(struct work_struct *work) { }
+static inline void destroy_delayed_work_on_stack(struct delayed_work *work) { }
+static inline unsigned int work_static(struct work_struct *work) { return 0; }
+#endif
+
+/*
+ * initialize all of a work item in one go
+ *
+ * NOTE! No point in using "atomic_long_set()": using a direct
+ * assignment of the work data initializer allows the compiler
+ * to generate better code.
+ */
+#ifdef CONFIG_LOCKDEP
+#define __INIT_WORK(_work, _func, _onstack) \
+ do { \
+ static struct lock_class_key __key; \
+ \
+ __init_work((_work), _onstack); \
+ (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
+ lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
+ INIT_LIST_HEAD(&(_work)->entry); \
+ (_work)->func = (_func); \
+ } while (0)
+#else
+#define __INIT_WORK(_work, _func, _onstack) \
+ do { \
+ __init_work((_work), _onstack); \
+ (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
+ INIT_LIST_HEAD(&(_work)->entry); \
+ (_work)->func = (_func); \
+ } while (0)
+#endif
+
+#define INIT_WORK(_work, _func) \
+ __INIT_WORK((_work), (_func), 0)
+
+#define INIT_WORK_ONSTACK(_work, _func) \
+ __INIT_WORK((_work), (_func), 1)
+
+#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
+ do { \
+ INIT_WORK(&(_work)->work, (_func)); \
+ __setup_timer(&(_work)->timer, delayed_work_timer_fn, \
+ (unsigned long)(_work), \
+ (_tflags) | TIMER_IRQSAFE); \
+ } while (0)
+
+#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
+ do { \
+ INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
+ __setup_timer_on_stack(&(_work)->timer, \
+ delayed_work_timer_fn, \
+ (unsigned long)(_work), \
+ (_tflags) | TIMER_IRQSAFE); \
+ } while (0)
+
+#define INIT_DELAYED_WORK(_work, _func) \
+ __INIT_DELAYED_WORK(_work, _func, 0)
+
+#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
+ __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
+
+#define INIT_DEFERRABLE_WORK(_work, _func) \
+ __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
+
+#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
+ __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
+
+/**
+ * work_pending - Find out whether a work item is currently pending
+ * @work: The work item in question
+ */
+#define work_pending(work) \
+ test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
+
+/**
+ * delayed_work_pending - Find out whether a delayable work item is currently
+ * pending
+ * @work: The work item in question
+ */
+#define delayed_work_pending(w) \
+ work_pending(&(w)->work)
+
+/*
+ * Workqueue flags and constants. For details, please refer to
+ * Documentation/workqueue.txt.
+ */
+enum {
+ WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
+ WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
+ WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
+ WQ_HIGHPRI = 1 << 4, /* high priority */
+ WQ_CPU_INTENSIVE = 1 << 5, /* cpu intensive workqueue */
+ WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
+
+ /*
+ * Per-cpu workqueues are generally preferred because they tend to
+ * show better performance thanks to cache locality. Per-cpu
+ * workqueues exclude the scheduler from choosing the CPU to
+ * execute the worker threads, which has an unfortunate side effect
+ * of increasing power consumption.
+ *
+ * The scheduler considers a CPU idle if it doesn't have any task
+ * to execute and tries to keep idle cores idle to conserve power;
+ * however, for example, a per-cpu work item scheduled from an
+ * interrupt handler on an idle CPU will force the scheduler to
+ * excute the work item on that CPU breaking the idleness, which in
+ * turn may lead to more scheduling choices which are sub-optimal
+ * in terms of power consumption.
+ *
+ * Workqueues marked with WQ_POWER_EFFICIENT are per-cpu by default
+ * but become unbound if workqueue.power_efficient kernel param is
+ * specified. Per-cpu workqueues which are identified to
+ * contribute significantly to power-consumption are identified and
+ * marked with this flag and enabling the power_efficient mode
+ * leads to noticeable power saving at the cost of small
+ * performance disadvantage.
+ *
+ * http://thread.gmane.org/gmane.linux.kernel/1480396
+ */
+ WQ_POWER_EFFICIENT = 1 << 7,
+
+ __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
+ __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
+
+ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
+ WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
+ WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
+};
+
+/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
+#define WQ_UNBOUND_MAX_ACTIVE \
+ max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
+
+/*
+ * System-wide workqueues which are always present.
+ *
+ * system_wq is the one used by schedule[_delayed]_work[_on]().
+ * Multi-CPU multi-threaded. There are users which expect relatively
+ * short queue flush time. Don't queue works which can run for too
+ * long.
+ *
+ * system_highpri_wq is similar to system_wq but for work items which
+ * require WQ_HIGHPRI.
+ *
+ * system_long_wq is similar to system_wq but may host long running
+ * works. Queue flushing might take relatively long.
+ *
+ * system_unbound_wq is unbound workqueue. Workers are not bound to
+ * any specific CPU, not concurrency managed, and all queued works are
+ * executed immediately as long as max_active limit is not reached and
+ * resources are available.
+ *
+ * system_freezable_wq is equivalent to system_wq except that it's
+ * freezable.
+ *
+ * *_power_efficient_wq are inclined towards saving power and converted
+ * into WQ_UNBOUND variants if 'wq_power_efficient' is enabled; otherwise,
+ * they are same as their non-power-efficient counterparts - e.g.
+ * system_power_efficient_wq is identical to system_wq if
+ * 'wq_power_efficient' is disabled. See WQ_POWER_EFFICIENT for more info.
+ */
+extern struct workqueue_struct *system_wq;
+extern struct workqueue_struct *system_highpri_wq;
+extern struct workqueue_struct *system_long_wq;
+extern struct workqueue_struct *system_unbound_wq;
+extern struct workqueue_struct *system_freezable_wq;
+extern struct workqueue_struct *system_power_efficient_wq;
+extern struct workqueue_struct *system_freezable_power_efficient_wq;
+
+extern struct workqueue_struct *
+__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
+ struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
+
+/**
+ * alloc_workqueue - allocate a workqueue
+ * @fmt: printf format for the name of the workqueue
+ * @flags: WQ_* flags
+ * @max_active: max in-flight work items, 0 for default
+ * @args: args for @fmt
+ *
+ * Allocate a workqueue with the specified parameters. For detailed
+ * information on WQ_* flags, please refer to Documentation/workqueue.txt.
+ *
+ * The __lock_name macro dance is to guarantee that single lock_class_key
+ * doesn't end up with different namesm, which isn't allowed by lockdep.
+ *
+ * RETURNS:
+ * Pointer to the allocated workqueue on success, %NULL on failure.
+ */
+#ifdef CONFIG_LOCKDEP
+#define alloc_workqueue(fmt, flags, max_active, args...) \
+({ \
+ static struct lock_class_key __key; \
+ const char *__lock_name; \
+ \
+ __lock_name = #fmt#args; \
+ \
+ __alloc_workqueue_key((fmt), (flags), (max_active), \
+ &__key, __lock_name, ##args); \
+})
+#else
+#define alloc_workqueue(fmt, flags, max_active, args...) \
+ __alloc_workqueue_key((fmt), (flags), (max_active), \
+ NULL, NULL, ##args)
+#endif
+
+/**
+ * alloc_ordered_workqueue - allocate an ordered workqueue
+ * @fmt: printf format for the name of the workqueue
+ * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
+ * @args: args for @fmt
+ *
+ * Allocate an ordered workqueue. An ordered workqueue executes at
+ * most one work item at any given time in the queued order. They are
+ * implemented as unbound workqueues with @max_active of one.
+ *
+ * RETURNS:
+ * Pointer to the allocated workqueue on success, %NULL on failure.
+ */
+#define alloc_ordered_workqueue(fmt, flags, args...) \
+ alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
+
+#define create_workqueue(name) \
+ alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name))
+#define create_freezable_workqueue(name) \
+ alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
+ 1, (name))
+#define create_singlethread_workqueue(name) \
+ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
+
+extern void destroy_workqueue(struct workqueue_struct *wq);
+
+struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
+void free_workqueue_attrs(struct workqueue_attrs *attrs);
+int apply_workqueue_attrs(struct workqueue_struct *wq,
+ const struct workqueue_attrs *attrs);
+
+extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
+ struct work_struct *work);
+extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
+ struct delayed_work *work, unsigned long delay);
+extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
+ struct delayed_work *dwork, unsigned long delay);
+
+extern void flush_workqueue(struct workqueue_struct *wq);
+extern void drain_workqueue(struct workqueue_struct *wq);
+extern void flush_scheduled_work(void);
+
+extern int schedule_on_each_cpu(work_func_t func);
+
+int execute_in_process_context(work_func_t fn, struct execute_work *);
+
+extern bool flush_work(struct work_struct *work);
+extern bool cancel_work_sync(struct work_struct *work);
+
+extern bool flush_delayed_work(struct delayed_work *dwork);
+extern bool cancel_delayed_work(struct delayed_work *dwork);
+extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
+
+extern void workqueue_set_max_active(struct workqueue_struct *wq,
+ int max_active);
+extern bool current_is_workqueue_rescuer(void);
+extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
+extern unsigned int work_busy(struct work_struct *work);
+extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
+extern void print_worker_info(const char *log_lvl, struct task_struct *task);
+extern void show_workqueue_state(void);
+
+/**
+ * queue_work - queue work on a workqueue
+ * @wq: workqueue to use
+ * @work: work to queue
+ *
+ * Returns %false if @work was already on a queue, %true otherwise.
+ *
+ * We queue the work to the CPU on which it was submitted, but if the CPU dies
+ * it can be processed by another CPU.
+ */
+static inline bool queue_work(struct workqueue_struct *wq,
+ struct work_struct *work)
+{
+ return queue_work_on(WORK_CPU_UNBOUND, wq, work);
+}
+
+/**
+ * queue_delayed_work - queue work on a workqueue after delay
+ * @wq: workqueue to use
+ * @dwork: delayable work to queue
+ * @delay: number of jiffies to wait before queueing
+ *
+ * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
+ */
+static inline bool queue_delayed_work(struct workqueue_struct *wq,
+ struct delayed_work *dwork,
+ unsigned long delay)
+{
+ return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
+}
+
+/**
+ * mod_delayed_work - modify delay of or queue a delayed work
+ * @wq: workqueue to use
+ * @dwork: work to queue
+ * @delay: number of jiffies to wait before queueing
+ *
+ * mod_delayed_work_on() on local CPU.
+ */
+static inline bool mod_delayed_work(struct workqueue_struct *wq,
+ struct delayed_work *dwork,
+ unsigned long delay)
+{
+ return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
+}
+
+/**
+ * schedule_work_on - put work task on a specific cpu
+ * @cpu: cpu to put the work task on
+ * @work: job to be done
+ *
+ * This puts a job on a specific cpu
+ */
+static inline bool schedule_work_on(int cpu, struct work_struct *work)
+{
+ return queue_work_on(cpu, system_wq, work);
+}
+
+/**
+ * schedule_work - put work task in global workqueue
+ * @work: job to be done
+ *
+ * Returns %false if @work was already on the kernel-global workqueue and
+ * %true otherwise.
+ *
+ * This puts a job in the kernel-global workqueue if it was not already
+ * queued and leaves it in the same position on the kernel-global
+ * workqueue otherwise.
+ */
+static inline bool schedule_work(struct work_struct *work)
+{
+ return queue_work(system_wq, work);
+}
+
+/**
+ * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
+ * @cpu: cpu to use
+ * @dwork: job to be done
+ * @delay: number of jiffies to wait
+ *
+ * After waiting for a given time this puts a job in the kernel-global
+ * workqueue on the specified CPU.
+ */
+static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
+ unsigned long delay)
+{
+ return queue_delayed_work_on(cpu, system_wq, dwork, delay);
+}
+
+/**
+ * schedule_delayed_work - put work task in global workqueue after delay
+ * @dwork: job to be done
+ * @delay: number of jiffies to wait or 0 for immediate execution
+ *
+ * After waiting for a given time this puts a job in the kernel-global
+ * workqueue.
+ */
+static inline bool schedule_delayed_work(struct delayed_work *dwork,
+ unsigned long delay)
+{
+ return queue_delayed_work(system_wq, dwork, delay);
+}
+
+/**
+ * keventd_up - is workqueue initialized yet?
+ */
+static inline bool keventd_up(void)
+{
+ return system_wq != NULL;
+}
+
+#ifndef CONFIG_SMP
+static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
+{
+ return fn(arg);
+}
+#else
+long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_FREEZER
+extern void freeze_workqueues_begin(void);
+extern bool freeze_workqueues_busy(void);
+extern void thaw_workqueues(void);
+#endif /* CONFIG_FREEZER */
+
+#ifdef CONFIG_SYSFS
+int workqueue_sysfs_register(struct workqueue_struct *wq);
+#else /* CONFIG_SYSFS */
+static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
+{ return 0; }
+#endif /* CONFIG_SYSFS */
+
+#endif
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
new file mode 100644
index 000000000..b2dd371ec
--- /dev/null
+++ b/include/linux/writeback.h
@@ -0,0 +1,189 @@
+/*
+ * include/linux/writeback.h
+ */
+#ifndef WRITEBACK_H
+#define WRITEBACK_H
+
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/fs.h>
+
+DECLARE_PER_CPU(int, dirty_throttle_leaks);
+
+/*
+ * The 1/4 region under the global dirty thresh is for smooth dirty throttling:
+ *
+ * (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
+ *
+ * Further beyond, all dirtier tasks will enter a loop waiting (possibly long
+ * time) for the dirty pages to drop, unless written enough pages.
+ *
+ * The global dirty threshold is normally equal to the global dirty limit,
+ * except when the system suddenly allocates a lot of anonymous memory and
+ * knocks down the global dirty threshold quickly, in which case the global
+ * dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
+ */
+#define DIRTY_SCOPE 8
+#define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
+
+struct backing_dev_info;
+
+/*
+ * fs/fs-writeback.c
+ */
+enum writeback_sync_modes {
+ WB_SYNC_NONE, /* Don't wait on anything */
+ WB_SYNC_ALL, /* Wait on every mapping */
+};
+
+/*
+ * why some writeback work was initiated
+ */
+enum wb_reason {
+ WB_REASON_BACKGROUND,
+ WB_REASON_TRY_TO_FREE_PAGES,
+ WB_REASON_SYNC,
+ WB_REASON_PERIODIC,
+ WB_REASON_LAPTOP_TIMER,
+ WB_REASON_FREE_MORE_MEM,
+ WB_REASON_FS_FREE_SPACE,
+ /*
+ * There is no bdi forker thread any more and works are done
+ * by emergency worker, however, this is TPs userland visible
+ * and we'll be exposing exactly the same information,
+ * so it has a mismatch name.
+ */
+ WB_REASON_FORKER_THREAD,
+
+ WB_REASON_MAX,
+};
+
+/*
+ * A control structure which tells the writeback code what to do. These are
+ * always on the stack, and hence need no locking. They are always initialised
+ * in a manner such that unspecified fields are set to zero.
+ */
+struct writeback_control {
+ long nr_to_write; /* Write this many pages, and decrement
+ this for each page written */
+ long pages_skipped; /* Pages which were not written */
+
+ /*
+ * For a_ops->writepages(): if start or end are non-zero then this is
+ * a hint that the filesystem need only write out the pages inside that
+ * byterange. The byte at `end' is included in the writeout request.
+ */
+ loff_t range_start;
+ loff_t range_end;
+
+ enum writeback_sync_modes sync_mode;
+
+ unsigned for_kupdate:1; /* A kupdate writeback */
+ unsigned for_background:1; /* A background writeback */
+ unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
+ unsigned for_reclaim:1; /* Invoked from the page allocator */
+ unsigned range_cyclic:1; /* range_start is cyclic */
+ unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
+};
+
+/*
+ * fs/fs-writeback.c
+ */
+struct bdi_writeback;
+void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
+void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
+ enum wb_reason reason);
+int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
+int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
+ enum wb_reason reason);
+void sync_inodes_sb(struct super_block *);
+void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
+void inode_wait_for_writeback(struct inode *inode);
+
+/* writeback.h requires fs.h; it, too, is not included from here. */
+static inline void wait_on_inode(struct inode *inode)
+{
+ might_sleep();
+ wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
+}
+
+/*
+ * mm/page-writeback.c
+ */
+#ifdef CONFIG_BLOCK
+void laptop_io_completion(struct backing_dev_info *info);
+void laptop_sync_completion(void);
+void laptop_mode_sync(struct work_struct *work);
+void laptop_mode_timer_fn(unsigned long data);
+#else
+static inline void laptop_sync_completion(void) { }
+#endif
+void throttle_vm_writeout(gfp_t gfp_mask);
+bool zone_dirty_ok(struct zone *zone);
+
+extern unsigned long global_dirty_limit;
+
+/* These are exported to sysctl. */
+extern int dirty_background_ratio;
+extern unsigned long dirty_background_bytes;
+extern int vm_dirty_ratio;
+extern unsigned long vm_dirty_bytes;
+extern unsigned int dirty_writeback_interval;
+extern unsigned int dirty_expire_interval;
+extern unsigned int dirtytime_expire_interval;
+extern int vm_highmem_is_dirtyable;
+extern int block_dump;
+extern int laptop_mode;
+
+extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+extern int dirty_background_bytes_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+extern int dirty_ratio_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+extern int dirty_bytes_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+int dirtytime_interval_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
+struct ctl_table;
+int dirty_writeback_centisecs_handler(struct ctl_table *, int,
+ void __user *, size_t *, loff_t *);
+
+void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
+unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
+ unsigned long dirty);
+
+void __bdi_update_bandwidth(struct backing_dev_info *bdi,
+ unsigned long thresh,
+ unsigned long bg_thresh,
+ unsigned long dirty,
+ unsigned long bdi_thresh,
+ unsigned long bdi_dirty,
+ unsigned long start_time);
+
+void page_writeback_init(void);
+void balance_dirty_pages_ratelimited(struct address_space *mapping);
+
+typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
+ void *data);
+
+int generic_writepages(struct address_space *mapping,
+ struct writeback_control *wbc);
+void tag_pages_for_writeback(struct address_space *mapping,
+ pgoff_t start, pgoff_t end);
+int write_cache_pages(struct address_space *mapping,
+ struct writeback_control *wbc, writepage_t writepage,
+ void *data);
+int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
+void writeback_set_ratelimit(void);
+void tag_pages_for_writeback(struct address_space *mapping,
+ pgoff_t start, pgoff_t end);
+
+void account_page_redirty(struct page *page);
+
+#endif /* WRITEBACK_H */
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
new file mode 100644
index 000000000..760399a47
--- /dev/null
+++ b/include/linux/ww_mutex.h
@@ -0,0 +1,378 @@
+/*
+ * Wound/Wait Mutexes: blocking mutual exclusion locks with deadlock avoidance
+ *
+ * Original mutex implementation started by Ingo Molnar:
+ *
+ * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * Wound/wait implementation:
+ * Copyright (C) 2013 Canonical Ltd.
+ *
+ * This file contains the main data structure and API definitions.
+ */
+
+#ifndef __LINUX_WW_MUTEX_H
+#define __LINUX_WW_MUTEX_H
+
+#include <linux/mutex.h>
+
+struct ww_class {
+ atomic_long_t stamp;
+ struct lock_class_key acquire_key;
+ struct lock_class_key mutex_key;
+ const char *acquire_name;
+ const char *mutex_name;
+};
+
+struct ww_acquire_ctx {
+ struct task_struct *task;
+ unsigned long stamp;
+ unsigned acquired;
+#ifdef CONFIG_DEBUG_MUTEXES
+ unsigned done_acquire;
+ struct ww_class *ww_class;
+ struct ww_mutex *contending_lock;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
+ unsigned deadlock_inject_interval;
+ unsigned deadlock_inject_countdown;
+#endif
+};
+
+struct ww_mutex {
+ struct mutex base;
+ struct ww_acquire_ctx *ctx;
+#ifdef CONFIG_DEBUG_MUTEXES
+ struct ww_class *ww_class;
+#endif
+};
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \
+ , .ww_class = &ww_class
+#else
+# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class)
+#endif
+
+#define __WW_CLASS_INITIALIZER(ww_class) \
+ { .stamp = ATOMIC_LONG_INIT(0) \
+ , .acquire_name = #ww_class "_acquire" \
+ , .mutex_name = #ww_class "_mutex" }
+
+#define __WW_MUTEX_INITIALIZER(lockname, class) \
+ { .base = { \__MUTEX_INITIALIZER(lockname) } \
+ __WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
+
+#define DEFINE_WW_CLASS(classname) \
+ struct ww_class classname = __WW_CLASS_INITIALIZER(classname)
+
+#define DEFINE_WW_MUTEX(mutexname, ww_class) \
+ struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
+
+/**
+ * ww_mutex_init - initialize the w/w mutex
+ * @lock: the mutex to be initialized
+ * @ww_class: the w/w class the mutex should belong to
+ *
+ * Initialize the w/w mutex to unlocked state and associate it with the given
+ * class.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
+static inline void ww_mutex_init(struct ww_mutex *lock,
+ struct ww_class *ww_class)
+{
+ __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
+ lock->ctx = NULL;
+#ifdef CONFIG_DEBUG_MUTEXES
+ lock->ww_class = ww_class;
+#endif
+}
+
+/**
+ * ww_acquire_init - initialize a w/w acquire context
+ * @ctx: w/w acquire context to initialize
+ * @ww_class: w/w class of the context
+ *
+ * Initializes an context to acquire multiple mutexes of the given w/w class.
+ *
+ * Context-based w/w mutex acquiring can be done in any order whatsoever within
+ * a given lock class. Deadlocks will be detected and handled with the
+ * wait/wound logic.
+ *
+ * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
+ * result in undetected deadlocks and is so forbidden. Mixing different contexts
+ * for the same w/w class when acquiring mutexes can also result in undetected
+ * deadlocks, and is hence also forbidden. Both types of abuse will be caught by
+ * enabling CONFIG_PROVE_LOCKING.
+ *
+ * Nesting of acquire contexts for _different_ w/w classes is possible, subject
+ * to the usual locking rules between different lock classes.
+ *
+ * An acquire context must be released with ww_acquire_fini by the same task
+ * before the memory is freed. It is recommended to allocate the context itself
+ * on the stack.
+ */
+static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
+ struct ww_class *ww_class)
+{
+ ctx->task = current;
+ ctx->stamp = atomic_long_inc_return(&ww_class->stamp);
+ ctx->acquired = 0;
+#ifdef CONFIG_DEBUG_MUTEXES
+ ctx->ww_class = ww_class;
+ ctx->done_acquire = 0;
+ ctx->contending_lock = NULL;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
+ lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
+ &ww_class->acquire_key, 0);
+ mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
+#endif
+#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
+ ctx->deadlock_inject_interval = 1;
+ ctx->deadlock_inject_countdown = ctx->stamp & 0xf;
+#endif
+}
+
+/**
+ * ww_acquire_done - marks the end of the acquire phase
+ * @ctx: the acquire context
+ *
+ * Marks the end of the acquire phase, any further w/w mutex lock calls using
+ * this context are forbidden.
+ *
+ * Calling this function is optional, it is just useful to document w/w mutex
+ * code and clearly designated the acquire phase from actually using the locked
+ * data structures.
+ */
+static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
+{
+#ifdef CONFIG_DEBUG_MUTEXES
+ lockdep_assert_held(ctx);
+
+ DEBUG_LOCKS_WARN_ON(ctx->done_acquire);
+ ctx->done_acquire = 1;
+#endif
+}
+
+/**
+ * ww_acquire_fini - releases a w/w acquire context
+ * @ctx: the acquire context to free
+ *
+ * Releases a w/w acquire context. This must be called _after_ all acquired w/w
+ * mutexes have been released with ww_mutex_unlock.
+ */
+static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
+{
+#ifdef CONFIG_DEBUG_MUTEXES
+ mutex_release(&ctx->dep_map, 0, _THIS_IP_);
+
+ DEBUG_LOCKS_WARN_ON(ctx->acquired);
+ if (!config_enabled(CONFIG_PROVE_LOCKING))
+ /*
+ * lockdep will normally handle this,
+ * but fail without anyway
+ */
+ ctx->done_acquire = 1;
+
+ if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC))
+ /* ensure ww_acquire_fini will still fail if called twice */
+ ctx->acquired = ~0U;
+#endif
+}
+
+extern int __must_check __ww_mutex_lock(struct ww_mutex *lock,
+ struct ww_acquire_ctx *ctx);
+extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
+ struct ww_acquire_ctx *ctx);
+
+/**
+ * ww_mutex_lock - acquire the w/w mutex
+ * @lock: the mutex to be acquired
+ * @ctx: w/w acquire context, or NULL to acquire only a single lock.
+ *
+ * Lock the w/w mutex exclusively for this task.
+ *
+ * Deadlocks within a given w/w class of locks are detected and handled with the
+ * wait/wound algorithm. If the lock isn't immediately avaiable this function
+ * will either sleep until it is (wait case). Or it selects the current context
+ * for backing off by returning -EDEADLK (wound case). Trying to acquire the
+ * same lock with the same context twice is also detected and signalled by
+ * returning -EALREADY. Returns 0 if the mutex was successfully acquired.
+ *
+ * In the wound case the caller must release all currently held w/w mutexes for
+ * the given context and then wait for this contending lock to be available by
+ * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
+ * lock and proceed with trying to acquire further w/w mutexes (e.g. when
+ * scanning through lru lists trying to free resources).
+ *
+ * The mutex must later on be released by the same task that
+ * acquired it. The task may not exit without first unlocking the mutex. Also,
+ * kernel memory where the mutex resides must not be freed with the mutex still
+ * locked. The mutex must first be initialized (or statically defined) before it
+ * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
+ * of the same w/w lock class as was used to initialize the acquire context.
+ *
+ * A mutex acquired with this function must be released with ww_mutex_unlock.
+ */
+static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+{
+ if (ctx)
+ return __ww_mutex_lock(lock, ctx);
+
+ mutex_lock(&lock->base);
+ return 0;
+}
+
+/**
+ * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
+ * @lock: the mutex to be acquired
+ * @ctx: w/w acquire context
+ *
+ * Lock the w/w mutex exclusively for this task.
+ *
+ * Deadlocks within a given w/w class of locks are detected and handled with the
+ * wait/wound algorithm. If the lock isn't immediately avaiable this function
+ * will either sleep until it is (wait case). Or it selects the current context
+ * for backing off by returning -EDEADLK (wound case). Trying to acquire the
+ * same lock with the same context twice is also detected and signalled by
+ * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
+ * signal arrives while waiting for the lock then this function returns -EINTR.
+ *
+ * In the wound case the caller must release all currently held w/w mutexes for
+ * the given context and then wait for this contending lock to be available by
+ * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
+ * not acquire this lock and proceed with trying to acquire further w/w mutexes
+ * (e.g. when scanning through lru lists trying to free resources).
+ *
+ * The mutex must later on be released by the same task that
+ * acquired it. The task may not exit without first unlocking the mutex. Also,
+ * kernel memory where the mutex resides must not be freed with the mutex still
+ * locked. The mutex must first be initialized (or statically defined) before it
+ * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
+ * of the same w/w lock class as was used to initialize the acquire context.
+ *
+ * A mutex acquired with this function must be released with ww_mutex_unlock.
+ */
+static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
+ struct ww_acquire_ctx *ctx)
+{
+ if (ctx)
+ return __ww_mutex_lock_interruptible(lock, ctx);
+ else
+ return mutex_lock_interruptible(&lock->base);
+}
+
+/**
+ * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
+ * @lock: the mutex to be acquired
+ * @ctx: w/w acquire context
+ *
+ * Acquires a w/w mutex with the given context after a wound case. This function
+ * will sleep until the lock becomes available.
+ *
+ * The caller must have released all w/w mutexes already acquired with the
+ * context and then call this function on the contended lock.
+ *
+ * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
+ * needs with ww_mutex_lock. Note that the -EALREADY return code from
+ * ww_mutex_lock can be used to avoid locking this contended mutex twice.
+ *
+ * It is forbidden to call this function with any other w/w mutexes associated
+ * with the context held. It is forbidden to call this on anything else than the
+ * contending mutex.
+ *
+ * Note that the slowpath lock acquiring can also be done by calling
+ * ww_mutex_lock directly. This function here is simply to help w/w mutex
+ * locking code readability by clearly denoting the slowpath.
+ */
+static inline void
+ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+{
+ int ret;
+#ifdef CONFIG_DEBUG_MUTEXES
+ DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
+#endif
+ ret = ww_mutex_lock(lock, ctx);
+ (void)ret;
+}
+
+/**
+ * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, interruptible
+ * @lock: the mutex to be acquired
+ * @ctx: w/w acquire context
+ *
+ * Acquires a w/w mutex with the given context after a wound case. This function
+ * will sleep until the lock becomes available and returns 0 when the lock has
+ * been acquired. If a signal arrives while waiting for the lock then this
+ * function returns -EINTR.
+ *
+ * The caller must have released all w/w mutexes already acquired with the
+ * context and then call this function on the contended lock.
+ *
+ * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
+ * needs with ww_mutex_lock. Note that the -EALREADY return code from
+ * ww_mutex_lock can be used to avoid locking this contended mutex twice.
+ *
+ * It is forbidden to call this function with any other w/w mutexes associated
+ * with the given context held. It is forbidden to call this on anything else
+ * than the contending mutex.
+ *
+ * Note that the slowpath lock acquiring can also be done by calling
+ * ww_mutex_lock_interruptible directly. This function here is simply to help
+ * w/w mutex locking code readability by clearly denoting the slowpath.
+ */
+static inline int __must_check
+ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
+ struct ww_acquire_ctx *ctx)
+{
+#ifdef CONFIG_DEBUG_MUTEXES
+ DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
+#endif
+ return ww_mutex_lock_interruptible(lock, ctx);
+}
+
+extern void ww_mutex_unlock(struct ww_mutex *lock);
+
+/**
+ * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context
+ * @lock: mutex to lock
+ *
+ * Trylocks a mutex without acquire context, so no deadlock detection is
+ * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
+ */
+static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
+{
+ return mutex_trylock(&lock->base);
+}
+
+/***
+ * ww_mutex_destroy - mark a w/w mutex unusable
+ * @lock: the mutex to be destroyed
+ *
+ * This function marks the mutex uninitialized, and any subsequent
+ * use of the mutex is forbidden. The mutex must not be locked when
+ * this function is called.
+ */
+static inline void ww_mutex_destroy(struct ww_mutex *lock)
+{
+ mutex_destroy(&lock->base);
+}
+
+/**
+ * ww_mutex_is_locked - is the w/w mutex locked
+ * @lock: the mutex to be queried
+ *
+ * Returns 1 if the mutex is locked, 0 if unlocked.
+ */
+static inline bool ww_mutex_is_locked(struct ww_mutex *lock)
+{
+ return mutex_is_locked(&lock->base);
+}
+
+#endif
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
new file mode 100644
index 000000000..91b0a68d3
--- /dev/null
+++ b/include/linux/xattr.h
@@ -0,0 +1,100 @@
+/*
+ File: linux/xattr.h
+
+ Extended attributes handling.
+
+ Copyright (C) 2001 by Andreas Gruenbacher <a.gruenbacher@computer.org>
+ Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved.
+ Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
+*/
+#ifndef _LINUX_XATTR_H
+#define _LINUX_XATTR_H
+
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <uapi/linux/xattr.h>
+
+struct inode;
+struct dentry;
+
+struct xattr_handler {
+ const char *prefix;
+ int flags; /* fs private flags passed back to the handlers */
+ size_t (*list)(struct dentry *dentry, char *list, size_t list_size,
+ const char *name, size_t name_len, int handler_flags);
+ int (*get)(struct dentry *dentry, const char *name, void *buffer,
+ size_t size, int handler_flags);
+ int (*set)(struct dentry *dentry, const char *name, const void *buffer,
+ size_t size, int flags, int handler_flags);
+};
+
+struct xattr {
+ const char *name;
+ void *value;
+ size_t value_len;
+};
+
+ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
+ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
+ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
+int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
+int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
+int vfs_removexattr(struct dentry *, const char *);
+
+ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size);
+ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
+int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags);
+int generic_removexattr(struct dentry *dentry, const char *name);
+ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name,
+ char **xattr_value, size_t size, gfp_t flags);
+int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
+ const char *value, size_t size, gfp_t flags);
+
+struct simple_xattrs {
+ struct list_head head;
+ spinlock_t lock;
+};
+
+struct simple_xattr {
+ struct list_head list;
+ char *name;
+ size_t size;
+ char value[0];
+};
+
+/*
+ * initialize the simple_xattrs structure
+ */
+static inline void simple_xattrs_init(struct simple_xattrs *xattrs)
+{
+ INIT_LIST_HEAD(&xattrs->head);
+ spin_lock_init(&xattrs->lock);
+}
+
+/*
+ * free all the xattrs
+ */
+static inline void simple_xattrs_free(struct simple_xattrs *xattrs)
+{
+ struct simple_xattr *xattr, *node;
+
+ list_for_each_entry_safe(xattr, node, &xattrs->head, list) {
+ kfree(xattr->name);
+ kfree(xattr);
+ }
+}
+
+struct simple_xattr *simple_xattr_alloc(const void *value, size_t size);
+int simple_xattr_get(struct simple_xattrs *xattrs, const char *name,
+ void *buffer, size_t size);
+int simple_xattr_set(struct simple_xattrs *xattrs, const char *name,
+ const void *value, size_t size, int flags);
+int simple_xattr_remove(struct simple_xattrs *xattrs, const char *name);
+ssize_t simple_xattr_list(struct simple_xattrs *xattrs, char *buffer,
+ size_t size);
+void simple_xattr_list_add(struct simple_xattrs *xattrs,
+ struct simple_xattr *new_xattr);
+
+#endif /* _LINUX_XATTR_H */
diff --git a/include/linux/xz.h b/include/linux/xz.h
new file mode 100644
index 000000000..64cffa6dd
--- /dev/null
+++ b/include/linux/xz.h
@@ -0,0 +1,264 @@
+/*
+ * XZ decompressor
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ * Igor Pavlov <http://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef XZ_H
+#define XZ_H
+
+#ifdef __KERNEL__
+# include <linux/stddef.h>
+# include <linux/types.h>
+#else
+# include <stddef.h>
+# include <stdint.h>
+#endif
+
+/* In Linux, this is used to make extern functions static when needed. */
+#ifndef XZ_EXTERN
+# define XZ_EXTERN extern
+#endif
+
+/**
+ * enum xz_mode - Operation mode
+ *
+ * @XZ_SINGLE: Single-call mode. This uses less RAM than
+ * than multi-call modes, because the LZMA2
+ * dictionary doesn't need to be allocated as
+ * part of the decoder state. All required data
+ * structures are allocated at initialization,
+ * so xz_dec_run() cannot return XZ_MEM_ERROR.
+ * @XZ_PREALLOC: Multi-call mode with preallocated LZMA2
+ * dictionary buffer. All data structures are
+ * allocated at initialization, so xz_dec_run()
+ * cannot return XZ_MEM_ERROR.
+ * @XZ_DYNALLOC: Multi-call mode. The LZMA2 dictionary is
+ * allocated once the required size has been
+ * parsed from the stream headers. If the
+ * allocation fails, xz_dec_run() will return
+ * XZ_MEM_ERROR.
+ *
+ * It is possible to enable support only for a subset of the above
+ * modes at compile time by defining XZ_DEC_SINGLE, XZ_DEC_PREALLOC,
+ * or XZ_DEC_DYNALLOC. The xz_dec kernel module is always compiled
+ * with support for all operation modes, but the preboot code may
+ * be built with fewer features to minimize code size.
+ */
+enum xz_mode {
+ XZ_SINGLE,
+ XZ_PREALLOC,
+ XZ_DYNALLOC
+};
+
+/**
+ * enum xz_ret - Return codes
+ * @XZ_OK: Everything is OK so far. More input or more
+ * output space is required to continue. This
+ * return code is possible only in multi-call mode
+ * (XZ_PREALLOC or XZ_DYNALLOC).
+ * @XZ_STREAM_END: Operation finished successfully.
+ * @XZ_UNSUPPORTED_CHECK: Integrity check type is not supported. Decoding
+ * is still possible in multi-call mode by simply
+ * calling xz_dec_run() again.
+ * Note that this return value is used only if
+ * XZ_DEC_ANY_CHECK was defined at build time,
+ * which is not used in the kernel. Unsupported
+ * check types return XZ_OPTIONS_ERROR if
+ * XZ_DEC_ANY_CHECK was not defined at build time.
+ * @XZ_MEM_ERROR: Allocating memory failed. This return code is
+ * possible only if the decoder was initialized
+ * with XZ_DYNALLOC. The amount of memory that was
+ * tried to be allocated was no more than the
+ * dict_max argument given to xz_dec_init().
+ * @XZ_MEMLIMIT_ERROR: A bigger LZMA2 dictionary would be needed than
+ * allowed by the dict_max argument given to
+ * xz_dec_init(). This return value is possible
+ * only in multi-call mode (XZ_PREALLOC or
+ * XZ_DYNALLOC); the single-call mode (XZ_SINGLE)
+ * ignores the dict_max argument.
+ * @XZ_FORMAT_ERROR: File format was not recognized (wrong magic
+ * bytes).
+ * @XZ_OPTIONS_ERROR: This implementation doesn't support the requested
+ * compression options. In the decoder this means
+ * that the header CRC32 matches, but the header
+ * itself specifies something that we don't support.
+ * @XZ_DATA_ERROR: Compressed data is corrupt.
+ * @XZ_BUF_ERROR: Cannot make any progress. Details are slightly
+ * different between multi-call and single-call
+ * mode; more information below.
+ *
+ * In multi-call mode, XZ_BUF_ERROR is returned when two consecutive calls
+ * to XZ code cannot consume any input and cannot produce any new output.
+ * This happens when there is no new input available, or the output buffer
+ * is full while at least one output byte is still pending. Assuming your
+ * code is not buggy, you can get this error only when decoding a compressed
+ * stream that is truncated or otherwise corrupt.
+ *
+ * In single-call mode, XZ_BUF_ERROR is returned only when the output buffer
+ * is too small or the compressed input is corrupt in a way that makes the
+ * decoder produce more output than the caller expected. When it is
+ * (relatively) clear that the compressed input is truncated, XZ_DATA_ERROR
+ * is used instead of XZ_BUF_ERROR.
+ */
+enum xz_ret {
+ XZ_OK,
+ XZ_STREAM_END,
+ XZ_UNSUPPORTED_CHECK,
+ XZ_MEM_ERROR,
+ XZ_MEMLIMIT_ERROR,
+ XZ_FORMAT_ERROR,
+ XZ_OPTIONS_ERROR,
+ XZ_DATA_ERROR,
+ XZ_BUF_ERROR
+};
+
+/**
+ * struct xz_buf - Passing input and output buffers to XZ code
+ * @in: Beginning of the input buffer. This may be NULL if and only
+ * if in_pos is equal to in_size.
+ * @in_pos: Current position in the input buffer. This must not exceed
+ * in_size.
+ * @in_size: Size of the input buffer
+ * @out: Beginning of the output buffer. This may be NULL if and only
+ * if out_pos is equal to out_size.
+ * @out_pos: Current position in the output buffer. This must not exceed
+ * out_size.
+ * @out_size: Size of the output buffer
+ *
+ * Only the contents of the output buffer from out[out_pos] onward, and
+ * the variables in_pos and out_pos are modified by the XZ code.
+ */
+struct xz_buf {
+ const uint8_t *in;
+ size_t in_pos;
+ size_t in_size;
+
+ uint8_t *out;
+ size_t out_pos;
+ size_t out_size;
+};
+
+/**
+ * struct xz_dec - Opaque type to hold the XZ decoder state
+ */
+struct xz_dec;
+
+/**
+ * xz_dec_init() - Allocate and initialize a XZ decoder state
+ * @mode: Operation mode
+ * @dict_max: Maximum size of the LZMA2 dictionary (history buffer) for
+ * multi-call decoding. This is ignored in single-call mode
+ * (mode == XZ_SINGLE). LZMA2 dictionary is always 2^n bytes
+ * or 2^n + 2^(n-1) bytes (the latter sizes are less common
+ * in practice), so other values for dict_max don't make sense.
+ * In the kernel, dictionary sizes of 64 KiB, 128 KiB, 256 KiB,
+ * 512 KiB, and 1 MiB are probably the only reasonable values,
+ * except for kernel and initramfs images where a bigger
+ * dictionary can be fine and useful.
+ *
+ * Single-call mode (XZ_SINGLE): xz_dec_run() decodes the whole stream at
+ * once. The caller must provide enough output space or the decoding will
+ * fail. The output space is used as the dictionary buffer, which is why
+ * there is no need to allocate the dictionary as part of the decoder's
+ * internal state.
+ *
+ * Because the output buffer is used as the workspace, streams encoded using
+ * a big dictionary are not a problem in single-call mode. It is enough that
+ * the output buffer is big enough to hold the actual uncompressed data; it
+ * can be smaller than the dictionary size stored in the stream headers.
+ *
+ * Multi-call mode with preallocated dictionary (XZ_PREALLOC): dict_max bytes
+ * of memory is preallocated for the LZMA2 dictionary. This way there is no
+ * risk that xz_dec_run() could run out of memory, since xz_dec_run() will
+ * never allocate any memory. Instead, if the preallocated dictionary is too
+ * small for decoding the given input stream, xz_dec_run() will return
+ * XZ_MEMLIMIT_ERROR. Thus, it is important to know what kind of data will be
+ * decoded to avoid allocating excessive amount of memory for the dictionary.
+ *
+ * Multi-call mode with dynamically allocated dictionary (XZ_DYNALLOC):
+ * dict_max specifies the maximum allowed dictionary size that xz_dec_run()
+ * may allocate once it has parsed the dictionary size from the stream
+ * headers. This way excessive allocations can be avoided while still
+ * limiting the maximum memory usage to a sane value to prevent running the
+ * system out of memory when decompressing streams from untrusted sources.
+ *
+ * On success, xz_dec_init() returns a pointer to struct xz_dec, which is
+ * ready to be used with xz_dec_run(). If memory allocation fails,
+ * xz_dec_init() returns NULL.
+ */
+XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max);
+
+/**
+ * xz_dec_run() - Run the XZ decoder
+ * @s: Decoder state allocated using xz_dec_init()
+ * @b: Input and output buffers
+ *
+ * The possible return values depend on build options and operation mode.
+ * See enum xz_ret for details.
+ *
+ * Note that if an error occurs in single-call mode (return value is not
+ * XZ_STREAM_END), b->in_pos and b->out_pos are not modified and the
+ * contents of the output buffer from b->out[b->out_pos] onward are
+ * undefined. This is true even after XZ_BUF_ERROR, because with some filter
+ * chains, there may be a second pass over the output buffer, and this pass
+ * cannot be properly done if the output buffer is truncated. Thus, you
+ * cannot give the single-call decoder a too small buffer and then expect to
+ * get that amount valid data from the beginning of the stream. You must use
+ * the multi-call decoder if you don't want to uncompress the whole stream.
+ */
+XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b);
+
+/**
+ * xz_dec_reset() - Reset an already allocated decoder state
+ * @s: Decoder state allocated using xz_dec_init()
+ *
+ * This function can be used to reset the multi-call decoder state without
+ * freeing and reallocating memory with xz_dec_end() and xz_dec_init().
+ *
+ * In single-call mode, xz_dec_reset() is always called in the beginning of
+ * xz_dec_run(). Thus, explicit call to xz_dec_reset() is useful only in
+ * multi-call mode.
+ */
+XZ_EXTERN void xz_dec_reset(struct xz_dec *s);
+
+/**
+ * xz_dec_end() - Free the memory allocated for the decoder state
+ * @s: Decoder state allocated using xz_dec_init(). If s is NULL,
+ * this function does nothing.
+ */
+XZ_EXTERN void xz_dec_end(struct xz_dec *s);
+
+/*
+ * Standalone build (userspace build or in-kernel build for boot time use)
+ * needs a CRC32 implementation. For normal in-kernel use, kernel's own
+ * CRC32 module is used instead, and users of this module don't need to
+ * care about the functions below.
+ */
+#ifndef XZ_INTERNAL_CRC32
+# ifdef __KERNEL__
+# define XZ_INTERNAL_CRC32 0
+# else
+# define XZ_INTERNAL_CRC32 1
+# endif
+#endif
+
+#if XZ_INTERNAL_CRC32
+/*
+ * This must be called before any other xz_* function to initialize
+ * the CRC32 lookup table.
+ */
+XZ_EXTERN void xz_crc32_init(void);
+
+/*
+ * Update CRC32 value using the polynomial from IEEE-802.3. To start a new
+ * calculation, the third argument must be zero. To continue the calculation,
+ * the previously returned value is passed as the third argument.
+ */
+XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc);
+#endif
+#endif
diff --git a/include/linux/yam.h b/include/linux/yam.h
new file mode 100644
index 000000000..512cdc2fb
--- /dev/null
+++ b/include/linux/yam.h
@@ -0,0 +1,82 @@
+/*****************************************************************************/
+
+/*
+ * yam.h -- YAM radio modem driver.
+ *
+ * Copyright (C) 1998 Frederic Rible F1OAT (frible@teaser.fr)
+ * Adapted from baycom.c driver written by Thomas Sailer (sailer@ife.ee.ethz.ch)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Please note that the GPL allows you to use the driver, NOT the radio.
+ * In order to use the radio, you need a license from the communications
+ * authority of your country.
+ *
+ *
+ */
+
+/*****************************************************************************/
+
+#define SIOCYAMRESERVED (0)
+#define SIOCYAMSCFG (1) /* Set configuration */
+#define SIOCYAMGCFG (2) /* Get configuration */
+#define SIOCYAMSMCS (3) /* Set mcs data */
+
+#define YAM_IOBASE (1 << 0)
+#define YAM_IRQ (1 << 1)
+#define YAM_BITRATE (1 << 2) /* Bit rate of radio port ->57600 */
+#define YAM_MODE (1 << 3) /* 0=simplex 1=duplex 2=duplex+tempo */
+#define YAM_HOLDDLY (1 << 4) /* duplex tempo (sec) */
+#define YAM_TXDELAY (1 << 5) /* Tx Delay (ms) */
+#define YAM_TXTAIL (1 << 6) /* Tx Tail (ms) */
+#define YAM_PERSIST (1 << 7) /* Persist (ms) */
+#define YAM_SLOTTIME (1 << 8) /* Slottime (ms) */
+#define YAM_BAUDRATE (1 << 9) /* Baud rate of rs232 port ->115200 */
+
+#define YAM_MAXBITRATE 57600
+#define YAM_MAXBAUDRATE 115200
+#define YAM_MAXMODE 2
+#define YAM_MAXHOLDDLY 99
+#define YAM_MAXTXDELAY 999
+#define YAM_MAXTXTAIL 999
+#define YAM_MAXPERSIST 255
+#define YAM_MAXSLOTTIME 999
+
+#define YAM_FPGA_SIZE 5302
+
+struct yamcfg {
+ unsigned int mask; /* Mask of commands */
+ unsigned int iobase; /* IO Base of COM port */
+ unsigned int irq; /* IRQ of COM port */
+ unsigned int bitrate; /* Bit rate of radio port */
+ unsigned int baudrate; /* Baud rate of the RS232 port */
+ unsigned int txdelay; /* TxDelay */
+ unsigned int txtail; /* TxTail */
+ unsigned int persist; /* Persistence */
+ unsigned int slottime; /* Slottime */
+ unsigned int mode; /* mode 0 (simp), 1(Dupl), 2(Dupl+delay) */
+ unsigned int holddly; /* PTT delay in FullDuplex 2 mode */
+};
+
+struct yamdrv_ioctl_cfg {
+ int cmd;
+ struct yamcfg cfg;
+};
+
+struct yamdrv_ioctl_mcs {
+ int cmd;
+ unsigned int bitrate;
+ unsigned char bits[YAM_FPGA_SIZE];
+};
diff --git a/include/linux/z2_battery.h b/include/linux/z2_battery.h
new file mode 100644
index 000000000..7b9750404
--- /dev/null
+++ b/include/linux/z2_battery.h
@@ -0,0 +1,17 @@
+#ifndef _LINUX_Z2_BATTERY_H
+#define _LINUX_Z2_BATTERY_H
+
+struct z2_battery_info {
+ int batt_I2C_bus;
+ int batt_I2C_addr;
+ int batt_I2C_reg;
+ int charge_gpio;
+ int min_voltage;
+ int max_voltage;
+ int batt_div;
+ int batt_mult;
+ int batt_tech;
+ char *batt_name;
+};
+
+#endif
diff --git a/include/linux/zbud.h b/include/linux/zbud.h
new file mode 100644
index 000000000..f9d41a6e3
--- /dev/null
+++ b/include/linux/zbud.h
@@ -0,0 +1,22 @@
+#ifndef _ZBUD_H_
+#define _ZBUD_H_
+
+#include <linux/types.h>
+
+struct zbud_pool;
+
+struct zbud_ops {
+ int (*evict)(struct zbud_pool *pool, unsigned long handle);
+};
+
+struct zbud_pool *zbud_create_pool(gfp_t gfp, struct zbud_ops *ops);
+void zbud_destroy_pool(struct zbud_pool *pool);
+int zbud_alloc(struct zbud_pool *pool, size_t size, gfp_t gfp,
+ unsigned long *handle);
+void zbud_free(struct zbud_pool *pool, unsigned long handle);
+int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries);
+void *zbud_map(struct zbud_pool *pool, unsigned long handle);
+void zbud_unmap(struct zbud_pool *pool, unsigned long handle);
+u64 zbud_get_pool_size(struct zbud_pool *pool);
+
+#endif /* _ZBUD_H_ */
diff --git a/include/linux/zconf.h b/include/linux/zconf.h
new file mode 100644
index 000000000..0beb75e38
--- /dev/null
+++ b/include/linux/zconf.h
@@ -0,0 +1,57 @@
+/* zconf.h -- configuration of the zlib compression library
+ * Copyright (C) 1995-1998 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* @(#) $Id$ */
+
+#ifndef _ZCONF_H
+#define _ZCONF_H
+
+/* The memory requirements for deflate are (in bytes):
+ (1 << (windowBits+2)) + (1 << (memLevel+9))
+ that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
+ plus a few kilobytes for small objects. For example, if you want to reduce
+ the default memory requirements from 256K to 128K, compile with
+ make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
+ Of course this will generally degrade compression (there's no free lunch).
+
+ The memory requirements for inflate are (in bytes) 1 << windowBits
+ that is, 32K for windowBits=15 (default value) plus a few kilobytes
+ for small objects.
+*/
+
+/* Maximum value for memLevel in deflateInit2 */
+#ifndef MAX_MEM_LEVEL
+# define MAX_MEM_LEVEL 8
+#endif
+
+/* Maximum value for windowBits in deflateInit2 and inflateInit2.
+ * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files
+ * created by gzip. (Files created by minigzip can still be extracted by
+ * gzip.)
+ */
+#ifndef MAX_WBITS
+# define MAX_WBITS 15 /* 32K LZ77 window */
+#endif
+
+/* default windowBits for decompression. MAX_WBITS is for compression only */
+#ifndef DEF_WBITS
+# define DEF_WBITS MAX_WBITS
+#endif
+
+/* default memLevel */
+#if MAX_MEM_LEVEL >= 8
+# define DEF_MEM_LEVEL 8
+#else
+# define DEF_MEM_LEVEL MAX_MEM_LEVEL
+#endif
+
+ /* Type declarations */
+
+typedef unsigned char Byte; /* 8 bits */
+typedef unsigned int uInt; /* 16 bits or more */
+typedef unsigned long uLong; /* 32 bits or more */
+typedef void *voidp;
+
+#endif /* _ZCONF_H */
diff --git a/include/linux/zlib.h b/include/linux/zlib.h
new file mode 100644
index 000000000..92dbbd3f6
--- /dev/null
+++ b/include/linux/zlib.h
@@ -0,0 +1,593 @@
+/* zlib.h -- interface of the 'zlib' general purpose compression library
+
+ Copyright (C) 1995-2005 Jean-loup Gailly and Mark Adler
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Jean-loup Gailly Mark Adler
+ jloup@gzip.org madler@alumni.caltech.edu
+
+
+ The data format used by the zlib library is described by RFCs (Request for
+ Comments) 1950 to 1952 in the files http://www.ietf.org/rfc/rfc1950.txt
+ (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format).
+*/
+
+#ifndef _ZLIB_H
+#define _ZLIB_H
+
+#include <linux/zconf.h>
+
+/* zlib deflate based on ZLIB_VERSION "1.1.3" */
+/* zlib inflate based on ZLIB_VERSION "1.2.3" */
+
+/*
+ This is a modified version of zlib for use inside the Linux kernel.
+ The main changes are to perform all memory allocation in advance.
+
+ Inflation Changes:
+ * Z_PACKET_FLUSH is added and used by ppp_deflate. Before returning
+ this checks there is no more input data available and the next data
+ is a STORED block. It also resets the mode to be read for the next
+ data, all as per PPP requirements.
+ * Addition of zlib_inflateIncomp which copies incompressible data into
+ the history window and adjusts the accoutning without calling
+ zlib_inflate itself to inflate the data.
+*/
+
+/*
+ The 'zlib' compression library provides in-memory compression and
+ decompression functions, including integrity checks of the uncompressed
+ data. This version of the library supports only one compression method
+ (deflation) but other algorithms will be added later and will have the same
+ stream interface.
+
+ Compression can be done in a single step if the buffers are large
+ enough (for example if an input file is mmap'ed), or can be done by
+ repeated calls of the compression function. In the latter case, the
+ application must provide more input and/or consume the output
+ (providing more output space) before each call.
+
+ The compressed data format used by default by the in-memory functions is
+ the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped
+ around a deflate stream, which is itself documented in RFC 1951.
+
+ The library also supports reading and writing files in gzip (.gz) format
+ with an interface similar to that of stdio.
+
+ The zlib format was designed to be compact and fast for use in memory
+ and on communications channels. The gzip format was designed for single-
+ file compression on file systems, has a larger header than zlib to maintain
+ directory information, and uses a different, slower check method than zlib.
+
+ The library does not install any signal handler. The decoder checks
+ the consistency of the compressed data, so the library should never
+ crash even in case of corrupted input.
+*/
+
+struct internal_state;
+
+typedef struct z_stream_s {
+ const Byte *next_in; /* next input byte */
+ uLong avail_in; /* number of bytes available at next_in */
+ uLong total_in; /* total nb of input bytes read so far */
+
+ Byte *next_out; /* next output byte should be put there */
+ uLong avail_out; /* remaining free space at next_out */
+ uLong total_out; /* total nb of bytes output so far */
+
+ char *msg; /* last error message, NULL if no error */
+ struct internal_state *state; /* not visible by applications */
+
+ void *workspace; /* memory allocated for this stream */
+
+ int data_type; /* best guess about the data type: ascii or binary */
+ uLong adler; /* adler32 value of the uncompressed data */
+ uLong reserved; /* reserved for future use */
+} z_stream;
+
+typedef z_stream *z_streamp;
+
+/*
+ The application must update next_in and avail_in when avail_in has
+ dropped to zero. It must update next_out and avail_out when avail_out
+ has dropped to zero. The application must initialize zalloc, zfree and
+ opaque before calling the init function. All other fields are set by the
+ compression library and must not be updated by the application.
+
+ The opaque value provided by the application will be passed as the first
+ parameter for calls of zalloc and zfree. This can be useful for custom
+ memory management. The compression library attaches no meaning to the
+ opaque value.
+
+ zalloc must return NULL if there is not enough memory for the object.
+ If zlib is used in a multi-threaded application, zalloc and zfree must be
+ thread safe.
+
+ On 16-bit systems, the functions zalloc and zfree must be able to allocate
+ exactly 65536 bytes, but will not be required to allocate more than this
+ if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
+ pointers returned by zalloc for objects of exactly 65536 bytes *must*
+ have their offset normalized to zero. The default allocation function
+ provided by this library ensures this (see zutil.c). To reduce memory
+ requirements and avoid any allocation of 64K objects, at the expense of
+ compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
+
+ The fields total_in and total_out can be used for statistics or
+ progress reports. After compression, total_in holds the total size of
+ the uncompressed data and may be saved for use in the decompressor
+ (particularly if the decompressor wants to decompress everything in
+ a single step).
+*/
+
+ /* constants */
+
+#define Z_NO_FLUSH 0
+#define Z_PARTIAL_FLUSH 1 /* will be removed, use Z_SYNC_FLUSH instead */
+#define Z_PACKET_FLUSH 2
+#define Z_SYNC_FLUSH 3
+#define Z_FULL_FLUSH 4
+#define Z_FINISH 5
+#define Z_BLOCK 6 /* Only for inflate at present */
+/* Allowed flush values; see deflate() and inflate() below for details */
+
+#define Z_OK 0
+#define Z_STREAM_END 1
+#define Z_NEED_DICT 2
+#define Z_ERRNO (-1)
+#define Z_STREAM_ERROR (-2)
+#define Z_DATA_ERROR (-3)
+#define Z_MEM_ERROR (-4)
+#define Z_BUF_ERROR (-5)
+#define Z_VERSION_ERROR (-6)
+/* Return codes for the compression/decompression functions. Negative
+ * values are errors, positive values are used for special but normal events.
+ */
+
+#define Z_NO_COMPRESSION 0
+#define Z_BEST_SPEED 1
+#define Z_BEST_COMPRESSION 9
+#define Z_DEFAULT_COMPRESSION (-1)
+/* compression levels */
+
+#define Z_FILTERED 1
+#define Z_HUFFMAN_ONLY 2
+#define Z_DEFAULT_STRATEGY 0
+/* compression strategy; see deflateInit2() below for details */
+
+#define Z_BINARY 0
+#define Z_ASCII 1
+#define Z_UNKNOWN 2
+/* Possible values of the data_type field */
+
+#define Z_DEFLATED 8
+/* The deflate compression method (the only one supported in this version) */
+
+ /* basic functions */
+
+extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
+/*
+ Returns the number of bytes that needs to be allocated for a per-
+ stream workspace with the specified parameters. A pointer to this
+ number of bytes should be returned in stream->workspace before
+ you call zlib_deflateInit() or zlib_deflateInit2(). If you call
+ zlib_deflateInit(), specify windowBits = MAX_WBITS and memLevel =
+ MAX_MEM_LEVEL here. If you call zlib_deflateInit2(), the windowBits
+ and memLevel parameters passed to zlib_deflateInit2() must not
+ exceed those passed here.
+*/
+
+/*
+extern int deflateInit (z_streamp strm, int level);
+
+ Initializes the internal stream state for compression. The fields
+ zalloc, zfree and opaque must be initialized before by the caller.
+ If zalloc and zfree are set to NULL, deflateInit updates them to
+ use default allocation functions.
+
+ The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9:
+ 1 gives best speed, 9 gives best compression, 0 gives no compression at
+ all (the input data is simply copied a block at a time).
+ Z_DEFAULT_COMPRESSION requests a default compromise between speed and
+ compression (currently equivalent to level 6).
+
+ deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
+ enough memory, Z_STREAM_ERROR if level is not a valid compression level,
+ Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible
+ with the version assumed by the caller (ZLIB_VERSION).
+ msg is set to null if there is no error message. deflateInit does not
+ perform any compression: this will be done by deflate().
+*/
+
+
+extern int zlib_deflate (z_streamp strm, int flush);
+/*
+ deflate compresses as much data as possible, and stops when the input
+ buffer becomes empty or the output buffer becomes full. It may introduce some
+ output latency (reading input without producing any output) except when
+ forced to flush.
+
+ The detailed semantics are as follows. deflate performs one or both of the
+ following actions:
+
+ - Compress more input starting at next_in and update next_in and avail_in
+ accordingly. If not all input can be processed (because there is not
+ enough room in the output buffer), next_in and avail_in are updated and
+ processing will resume at this point for the next call of deflate().
+
+ - Provide more output starting at next_out and update next_out and avail_out
+ accordingly. This action is forced if the parameter flush is non zero.
+ Forcing flush frequently degrades the compression ratio, so this parameter
+ should be set only when necessary (in interactive applications).
+ Some output may be provided even if flush is not set.
+
+ Before the call of deflate(), the application should ensure that at least
+ one of the actions is possible, by providing more input and/or consuming
+ more output, and updating avail_in or avail_out accordingly; avail_out
+ should never be zero before the call. The application can consume the
+ compressed output when it wants, for example when the output buffer is full
+ (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK
+ and with zero avail_out, it must be called again after making room in the
+ output buffer because there might be more output pending.
+
+ If the parameter flush is set to Z_SYNC_FLUSH, all pending output is
+ flushed to the output buffer and the output is aligned on a byte boundary, so
+ that the decompressor can get all input data available so far. (In particular
+ avail_in is zero after the call if enough output space has been provided
+ before the call.) Flushing may degrade compression for some compression
+ algorithms and so it should be used only when necessary.
+
+ If flush is set to Z_FULL_FLUSH, all output is flushed as with
+ Z_SYNC_FLUSH, and the compression state is reset so that decompression can
+ restart from this point if previous compressed data has been damaged or if
+ random access is desired. Using Z_FULL_FLUSH too often can seriously degrade
+ the compression.
+
+ If deflate returns with avail_out == 0, this function must be called again
+ with the same value of the flush parameter and more output space (updated
+ avail_out), until the flush is complete (deflate returns with non-zero
+ avail_out).
+
+ If the parameter flush is set to Z_FINISH, pending input is processed,
+ pending output is flushed and deflate returns with Z_STREAM_END if there
+ was enough output space; if deflate returns with Z_OK, this function must be
+ called again with Z_FINISH and more output space (updated avail_out) but no
+ more input data, until it returns with Z_STREAM_END or an error. After
+ deflate has returned Z_STREAM_END, the only possible operations on the
+ stream are deflateReset or deflateEnd.
+
+ Z_FINISH can be used immediately after deflateInit if all the compression
+ is to be done in a single step. In this case, avail_out must be at least
+ 0.1% larger than avail_in plus 12 bytes. If deflate does not return
+ Z_STREAM_END, then it must be called again as described above.
+
+ deflate() sets strm->adler to the adler32 checksum of all input read
+ so far (that is, total_in bytes).
+
+ deflate() may update data_type if it can make a good guess about
+ the input data type (Z_ASCII or Z_BINARY). In doubt, the data is considered
+ binary. This field is only for information purposes and does not affect
+ the compression algorithm in any manner.
+
+ deflate() returns Z_OK if some progress has been made (more input
+ processed or more output produced), Z_STREAM_END if all input has been
+ consumed and all output has been produced (only when flush is set to
+ Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example
+ if next_in or next_out was NULL), Z_BUF_ERROR if no progress is possible
+ (for example avail_in or avail_out was zero).
+*/
+
+
+extern int zlib_deflateEnd (z_streamp strm);
+/*
+ All dynamically allocated data structures for this stream are freed.
+ This function discards any unprocessed input and does not flush any
+ pending output.
+
+ deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the
+ stream state was inconsistent, Z_DATA_ERROR if the stream was freed
+ prematurely (some input or output was discarded). In the error case,
+ msg may be set but then points to a static string (which must not be
+ deallocated).
+*/
+
+
+extern int zlib_inflate_workspacesize (void);
+/*
+ Returns the number of bytes that needs to be allocated for a per-
+ stream workspace. A pointer to this number of bytes should be
+ returned in stream->workspace before calling zlib_inflateInit().
+*/
+
+/*
+extern int zlib_inflateInit (z_streamp strm);
+
+ Initializes the internal stream state for decompression. The fields
+ next_in, avail_in, and workspace must be initialized before by
+ the caller. If next_in is not NULL and avail_in is large enough (the exact
+ value depends on the compression method), inflateInit determines the
+ compression method from the zlib header and allocates all data structures
+ accordingly; otherwise the allocation will be deferred to the first call of
+ inflate. If zalloc and zfree are set to NULL, inflateInit updates them to
+ use default allocation functions.
+
+ inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_VERSION_ERROR if the zlib library version is incompatible with the
+ version assumed by the caller. msg is set to null if there is no error
+ message. inflateInit does not perform any decompression apart from reading
+ the zlib header if present: this will be done by inflate(). (So next_in and
+ avail_in may be modified, but next_out and avail_out are unchanged.)
+*/
+
+
+extern int zlib_inflate (z_streamp strm, int flush);
+/*
+ inflate decompresses as much data as possible, and stops when the input
+ buffer becomes empty or the output buffer becomes full. It may introduce
+ some output latency (reading input without producing any output) except when
+ forced to flush.
+
+ The detailed semantics are as follows. inflate performs one or both of the
+ following actions:
+
+ - Decompress more input starting at next_in and update next_in and avail_in
+ accordingly. If not all input can be processed (because there is not
+ enough room in the output buffer), next_in is updated and processing
+ will resume at this point for the next call of inflate().
+
+ - Provide more output starting at next_out and update next_out and avail_out
+ accordingly. inflate() provides as much output as possible, until there
+ is no more input data or no more space in the output buffer (see below
+ about the flush parameter).
+
+ Before the call of inflate(), the application should ensure that at least
+ one of the actions is possible, by providing more input and/or consuming
+ more output, and updating the next_* and avail_* values accordingly.
+ The application can consume the uncompressed output when it wants, for
+ example when the output buffer is full (avail_out == 0), or after each
+ call of inflate(). If inflate returns Z_OK and with zero avail_out, it
+ must be called again after making room in the output buffer because there
+ might be more output pending.
+
+ The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH,
+ Z_FINISH, or Z_BLOCK. Z_SYNC_FLUSH requests that inflate() flush as much
+ output as possible to the output buffer. Z_BLOCK requests that inflate() stop
+ if and when it gets to the next deflate block boundary. When decoding the
+ zlib or gzip format, this will cause inflate() to return immediately after
+ the header and before the first block. When doing a raw inflate, inflate()
+ will go ahead and process the first block, and will return when it gets to
+ the end of that block, or when it runs out of data.
+
+ The Z_BLOCK option assists in appending to or combining deflate streams.
+ Also to assist in this, on return inflate() will set strm->data_type to the
+ number of unused bits in the last byte taken from strm->next_in, plus 64
+ if inflate() is currently decoding the last block in the deflate stream,
+ plus 128 if inflate() returned immediately after decoding an end-of-block
+ code or decoding the complete header up to just before the first byte of the
+ deflate stream. The end-of-block will not be indicated until all of the
+ uncompressed data from that block has been written to strm->next_out. The
+ number of unused bits may in general be greater than seven, except when
+ bit 7 of data_type is set, in which case the number of unused bits will be
+ less than eight.
+
+ inflate() should normally be called until it returns Z_STREAM_END or an
+ error. However if all decompression is to be performed in a single step
+ (a single call of inflate), the parameter flush should be set to
+ Z_FINISH. In this case all pending input is processed and all pending
+ output is flushed; avail_out must be large enough to hold all the
+ uncompressed data. (The size of the uncompressed data may have been saved
+ by the compressor for this purpose.) The next operation on this stream must
+ be inflateEnd to deallocate the decompression state. The use of Z_FINISH
+ is never required, but can be used to inform inflate that a faster approach
+ may be used for the single inflate() call.
+
+ In this implementation, inflate() always flushes as much output as
+ possible to the output buffer, and always uses the faster approach on the
+ first call. So the only effect of the flush parameter in this implementation
+ is on the return value of inflate(), as noted below, or when it returns early
+ because Z_BLOCK is used.
+
+ If a preset dictionary is needed after this call (see inflateSetDictionary
+ below), inflate sets strm->adler to the adler32 checksum of the dictionary
+ chosen by the compressor and returns Z_NEED_DICT; otherwise it sets
+ strm->adler to the adler32 checksum of all output produced so far (that is,
+ total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described
+ below. At the end of the stream, inflate() checks that its computed adler32
+ checksum is equal to that saved by the compressor and returns Z_STREAM_END
+ only if the checksum is correct.
+
+ inflate() will decompress and check either zlib-wrapped or gzip-wrapped
+ deflate data. The header type is detected automatically. Any information
+ contained in the gzip header is not retained, so applications that need that
+ information should instead use raw inflate, see inflateInit2() below, or
+ inflateBack() and perform their own processing of the gzip header and
+ trailer.
+
+ inflate() returns Z_OK if some progress has been made (more input processed
+ or more output produced), Z_STREAM_END if the end of the compressed data has
+ been reached and all uncompressed output has been produced, Z_NEED_DICT if a
+ preset dictionary is needed at this point, Z_DATA_ERROR if the input data was
+ corrupted (input stream not conforming to the zlib format or incorrect check
+ value), Z_STREAM_ERROR if the stream structure was inconsistent (for example
+ if next_in or next_out was NULL), Z_MEM_ERROR if there was not enough memory,
+ Z_BUF_ERROR if no progress is possible or if there was not enough room in the
+ output buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and
+ inflate() can be called again with more input and more output space to
+ continue decompressing. If Z_DATA_ERROR is returned, the application may then
+ call inflateSync() to look for a good compression block if a partial recovery
+ of the data is desired.
+*/
+
+
+extern int zlib_inflateEnd (z_streamp strm);
+/*
+ All dynamically allocated data structures for this stream are freed.
+ This function discards any unprocessed input and does not flush any
+ pending output.
+
+ inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
+ was inconsistent. In the error case, msg may be set but then points to a
+ static string (which must not be deallocated).
+*/
+
+ /* Advanced functions */
+
+/*
+ The following functions are needed only in some special applications.
+*/
+
+/*
+extern int deflateInit2 (z_streamp strm,
+ int level,
+ int method,
+ int windowBits,
+ int memLevel,
+ int strategy);
+
+ This is another version of deflateInit with more compression options. The
+ fields next_in, zalloc, zfree and opaque must be initialized before by
+ the caller.
+
+ The method parameter is the compression method. It must be Z_DEFLATED in
+ this version of the library.
+
+ The windowBits parameter is the base two logarithm of the window size
+ (the size of the history buffer). It should be in the range 8..15 for this
+ version of the library. Larger values of this parameter result in better
+ compression at the expense of memory usage. The default value is 15 if
+ deflateInit is used instead.
+
+ The memLevel parameter specifies how much memory should be allocated
+ for the internal compression state. memLevel=1 uses minimum memory but
+ is slow and reduces compression ratio; memLevel=9 uses maximum memory
+ for optimal speed. The default value is 8. See zconf.h for total memory
+ usage as a function of windowBits and memLevel.
+
+ The strategy parameter is used to tune the compression algorithm. Use the
+ value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a
+ filter (or predictor), or Z_HUFFMAN_ONLY to force Huffman encoding only (no
+ string match). Filtered data consists mostly of small values with a
+ somewhat random distribution. In this case, the compression algorithm is
+ tuned to compress them better. The effect of Z_FILTERED is to force more
+ Huffman coding and less string matching; it is somewhat intermediate
+ between Z_DEFAULT and Z_HUFFMAN_ONLY. The strategy parameter only affects
+ the compression ratio but not the correctness of the compressed output even
+ if it is not set appropriately.
+
+ deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_STREAM_ERROR if a parameter is invalid (such as an invalid
+ method). msg is set to null if there is no error message. deflateInit2 does
+ not perform any compression: this will be done by deflate().
+*/
+
+extern int zlib_deflateReset (z_streamp strm);
+/*
+ This function is equivalent to deflateEnd followed by deflateInit,
+ but does not free and reallocate all the internal compression state.
+ The stream will keep the same compression level and any other attributes
+ that may have been set by deflateInit2.
+
+ deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent (such as zalloc or state being NULL).
+*/
+
+static inline unsigned long deflateBound(unsigned long s)
+{
+ return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11;
+}
+
+/*
+extern int inflateInit2 (z_streamp strm, int windowBits);
+
+ This is another version of inflateInit with an extra parameter. The
+ fields next_in, avail_in, zalloc, zfree and opaque must be initialized
+ before by the caller.
+
+ The windowBits parameter is the base two logarithm of the maximum window
+ size (the size of the history buffer). It should be in the range 8..15 for
+ this version of the library. The default value is 15 if inflateInit is used
+ instead. windowBits must be greater than or equal to the windowBits value
+ provided to deflateInit2() while compressing, or it must be equal to 15 if
+ deflateInit2() was not used. If a compressed stream with a larger window
+ size is given as input, inflate() will return with the error code
+ Z_DATA_ERROR instead of trying to allocate a larger window.
+
+ windowBits can also be -8..-15 for raw inflate. In this case, -windowBits
+ determines the window size. inflate() will then process raw deflate data,
+ not looking for a zlib or gzip header, not generating a check value, and not
+ looking for any check values for comparison at the end of the stream. This
+ is for use with other formats that use the deflate compressed data format
+ such as zip. Those formats provide their own check values. If a custom
+ format is developed using the raw deflate format for compressed data, it is
+ recommended that a check value such as an adler32 or a crc32 be applied to
+ the uncompressed data as is done in the zlib, gzip, and zip formats. For
+ most applications, the zlib format should be used as is. Note that comments
+ above on the use in deflateInit2() applies to the magnitude of windowBits.
+
+ windowBits can also be greater than 15 for optional gzip decoding. Add
+ 32 to windowBits to enable zlib and gzip decoding with automatic header
+ detection, or add 16 to decode only the gzip format (the zlib format will
+ return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is
+ a crc32 instead of an adler32.
+
+ inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
+ memory, Z_STREAM_ERROR if a parameter is invalid (such as a null strm). msg
+ is set to null if there is no error message. inflateInit2 does not perform
+ any decompression apart from reading the zlib header if present: this will
+ be done by inflate(). (So next_in and avail_in may be modified, but next_out
+ and avail_out are unchanged.)
+*/
+
+extern int zlib_inflateReset (z_streamp strm);
+/*
+ This function is equivalent to inflateEnd followed by inflateInit,
+ but does not free and reallocate all the internal decompression state.
+ The stream will keep attributes that may have been set by inflateInit2.
+
+ inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
+ stream state was inconsistent (such as zalloc or state being NULL).
+*/
+
+extern int zlib_inflateIncomp (z_stream *strm);
+/*
+ This function adds the data at next_in (avail_in bytes) to the output
+ history without performing any output. There must be no pending output,
+ and the decompressor must be expecting to see the start of a block.
+ Calling this function is equivalent to decompressing a stored block
+ containing the data at next_in (except that the data is not output).
+*/
+
+#define zlib_deflateInit(strm, level) \
+ zlib_deflateInit2((strm), (level), Z_DEFLATED, MAX_WBITS, \
+ DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY)
+#define zlib_inflateInit(strm) \
+ zlib_inflateInit2((strm), DEF_WBITS)
+
+extern int zlib_deflateInit2(z_streamp strm, int level, int method,
+ int windowBits, int memLevel,
+ int strategy);
+extern int zlib_inflateInit2(z_streamp strm, int windowBits);
+
+#if !defined(_Z_UTIL_H) && !defined(NO_DUMMY_DECL)
+ struct internal_state {int dummy;}; /* hack for buggy compilers */
+#endif
+
+/* Utility function: initialize zlib, unpack binary blob, clean up zlib,
+ * return len or negative error code. */
+extern int zlib_inflate_blob(void *dst, unsigned dst_sz, const void *src, unsigned src_sz);
+
+#endif /* _ZLIB_H */
diff --git a/include/linux/zorro.h b/include/linux/zorro.h
new file mode 100644
index 000000000..63fbba074
--- /dev/null
+++ b/include/linux/zorro.h
@@ -0,0 +1,151 @@
+/*
+ * linux/zorro.h -- Amiga AutoConfig (Zorro) Bus Definitions
+ *
+ * Copyright (C) 1995--2003 Geert Uytterhoeven
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef _LINUX_ZORRO_H
+#define _LINUX_ZORRO_H
+
+
+#include <uapi/linux/zorro.h>
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
+
+#include <asm/zorro.h>
+
+
+ /*
+ * Zorro devices
+ */
+
+struct zorro_dev {
+ struct ExpansionRom rom;
+ zorro_id id;
+ struct zorro_driver *driver; /* which driver has allocated this device */
+ struct device dev; /* Generic device interface */
+ u16 slotaddr;
+ u16 slotsize;
+ char name[64];
+ struct resource resource;
+};
+
+#define to_zorro_dev(n) container_of(n, struct zorro_dev, dev)
+
+
+ /*
+ * Zorro bus
+ */
+
+extern struct bus_type zorro_bus_type;
+
+
+ /*
+ * Zorro device drivers
+ */
+
+struct zorro_driver {
+ struct list_head node;
+ char *name;
+ const struct zorro_device_id *id_table; /* NULL if wants all devices */
+ int (*probe)(struct zorro_dev *z, const struct zorro_device_id *id); /* New device inserted */
+ void (*remove)(struct zorro_dev *z); /* Device removed (NULL if not a hot-plug capable driver) */
+ struct device_driver driver;
+};
+
+#define to_zorro_driver(drv) container_of(drv, struct zorro_driver, driver)
+
+
+#define zorro_for_each_dev(dev) \
+ for (dev = &zorro_autocon[0]; dev < zorro_autocon+zorro_num_autocon; dev++)
+
+
+/* New-style probing */
+extern int zorro_register_driver(struct zorro_driver *);
+extern void zorro_unregister_driver(struct zorro_driver *);
+extern const struct zorro_device_id *zorro_match_device(const struct zorro_device_id *ids, const struct zorro_dev *z);
+static inline struct zorro_driver *zorro_dev_driver(const struct zorro_dev *z)
+{
+ return z->driver;
+}
+
+
+extern unsigned int zorro_num_autocon; /* # of autoconfig devices found */
+extern struct zorro_dev *zorro_autocon;
+
+
+ /*
+ * Minimal information about a Zorro device, passed from bootinfo
+ * Only available temporarily, i.e. until initmem has been freed!
+ */
+
+struct zorro_dev_init {
+ struct ExpansionRom rom;
+ u16 slotaddr;
+ u16 slotsize;
+ u32 boardaddr;
+ u32 boardsize;
+};
+
+extern struct zorro_dev_init zorro_autocon_init[ZORRO_NUM_AUTO] __initdata;
+
+
+ /*
+ * Zorro Functions
+ */
+
+extern struct zorro_dev *zorro_find_device(zorro_id id,
+ struct zorro_dev *from);
+
+#define zorro_resource_start(z) ((z)->resource.start)
+#define zorro_resource_end(z) ((z)->resource.end)
+#define zorro_resource_len(z) (resource_size(&(z)->resource))
+#define zorro_resource_flags(z) ((z)->resource.flags)
+
+#define zorro_request_device(z, name) \
+ request_mem_region(zorro_resource_start(z), zorro_resource_len(z), name)
+#define zorro_release_device(z) \
+ release_mem_region(zorro_resource_start(z), zorro_resource_len(z))
+
+/* Similar to the helpers above, these manipulate per-zorro_dev
+ * driver-specific data. They are really just a wrapper around
+ * the generic device structure functions of these calls.
+ */
+static inline void *zorro_get_drvdata (struct zorro_dev *z)
+{
+ return dev_get_drvdata(&z->dev);
+}
+
+static inline void zorro_set_drvdata (struct zorro_dev *z, void *data)
+{
+ dev_set_drvdata(&z->dev, data);
+}
+
+
+ /*
+ * Bitmask indicating portions of available Zorro II RAM that are unused
+ * by the system. Every bit represents a 64K chunk, for a maximum of 8MB
+ * (128 chunks, physical 0x00200000-0x009fffff).
+ *
+ * If you want to use (= allocate) portions of this RAM, you should clear
+ * the corresponding bits.
+ */
+
+extern DECLARE_BITMAP(zorro_unused_z2ram, 128);
+
+#define Z2RAM_START (0x00200000)
+#define Z2RAM_END (0x00a00000)
+#define Z2RAM_SIZE (0x00800000)
+#define Z2RAM_CHUNKSIZE (0x00010000)
+#define Z2RAM_CHUNKMASK (0x0000ffff)
+#define Z2RAM_CHUNKSHIFT (16)
+
+
+#endif /* _LINUX_ZORRO_H */
diff --git a/include/linux/zpool.h b/include/linux/zpool.h
new file mode 100644
index 000000000..56529b34d
--- /dev/null
+++ b/include/linux/zpool.h
@@ -0,0 +1,107 @@
+/*
+ * zpool memory storage api
+ *
+ * Copyright (C) 2014 Dan Streetman
+ *
+ * This is a common frontend for the zbud and zsmalloc memory
+ * storage pool implementations. Typically, this is used to
+ * store compressed memory.
+ */
+
+#ifndef _ZPOOL_H_
+#define _ZPOOL_H_
+
+struct zpool;
+
+struct zpool_ops {
+ int (*evict)(struct zpool *pool, unsigned long handle);
+};
+
+/*
+ * Control how a handle is mapped. It will be ignored if the
+ * implementation does not support it. Its use is optional.
+ * Note that this does not refer to memory protection, it
+ * refers to how the memory will be copied in/out if copying
+ * is necessary during mapping; read-write is the safest as
+ * it copies the existing memory in on map, and copies the
+ * changed memory back out on unmap. Write-only does not copy
+ * in the memory and should only be used for initialization.
+ * If in doubt, use ZPOOL_MM_DEFAULT which is read-write.
+ */
+enum zpool_mapmode {
+ ZPOOL_MM_RW, /* normal read-write mapping */
+ ZPOOL_MM_RO, /* read-only (no copy-out at unmap time) */
+ ZPOOL_MM_WO, /* write-only (no copy-in at map time) */
+
+ ZPOOL_MM_DEFAULT = ZPOOL_MM_RW
+};
+
+struct zpool *zpool_create_pool(char *type, char *name,
+ gfp_t gfp, struct zpool_ops *ops);
+
+char *zpool_get_type(struct zpool *pool);
+
+void zpool_destroy_pool(struct zpool *pool);
+
+int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
+ unsigned long *handle);
+
+void zpool_free(struct zpool *pool, unsigned long handle);
+
+int zpool_shrink(struct zpool *pool, unsigned int pages,
+ unsigned int *reclaimed);
+
+void *zpool_map_handle(struct zpool *pool, unsigned long handle,
+ enum zpool_mapmode mm);
+
+void zpool_unmap_handle(struct zpool *pool, unsigned long handle);
+
+u64 zpool_get_total_size(struct zpool *pool);
+
+
+/**
+ * struct zpool_driver - driver implementation for zpool
+ * @type: name of the driver.
+ * @list: entry in the list of zpool drivers.
+ * @create: create a new pool.
+ * @destroy: destroy a pool.
+ * @malloc: allocate mem from a pool.
+ * @free: free mem from a pool.
+ * @shrink: shrink the pool.
+ * @map: map a handle.
+ * @unmap: unmap a handle.
+ * @total_size: get total size of a pool.
+ *
+ * This is created by a zpool implementation and registered
+ * with zpool.
+ */
+struct zpool_driver {
+ char *type;
+ struct module *owner;
+ atomic_t refcount;
+ struct list_head list;
+
+ void *(*create)(char *name, gfp_t gfp, struct zpool_ops *ops);
+ void (*destroy)(void *pool);
+
+ int (*malloc)(void *pool, size_t size, gfp_t gfp,
+ unsigned long *handle);
+ void (*free)(void *pool, unsigned long handle);
+
+ int (*shrink)(void *pool, unsigned int pages,
+ unsigned int *reclaimed);
+
+ void *(*map)(void *pool, unsigned long handle,
+ enum zpool_mapmode mm);
+ void (*unmap)(void *pool, unsigned long handle);
+
+ u64 (*total_size)(void *pool);
+};
+
+void zpool_register_driver(struct zpool_driver *driver);
+
+int zpool_unregister_driver(struct zpool_driver *driver);
+
+int zpool_evict(void *pool, unsigned long handle);
+
+#endif
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
new file mode 100644
index 000000000..1338190b5
--- /dev/null
+++ b/include/linux/zsmalloc.h
@@ -0,0 +1,52 @@
+/*
+ * zsmalloc memory allocator
+ *
+ * Copyright (C) 2011 Nitin Gupta
+ * Copyright (C) 2012, 2013 Minchan Kim
+ *
+ * This code is released using a dual license strategy: BSD/GPL
+ * You can choose the license that better fits your requirements.
+ *
+ * Released under the terms of 3-clause BSD License
+ * Released under the terms of GNU General Public License Version 2.0
+ */
+
+#ifndef _ZS_MALLOC_H_
+#define _ZS_MALLOC_H_
+
+#include <linux/types.h>
+
+/*
+ * zsmalloc mapping modes
+ *
+ * NOTE: These only make a difference when a mapped object spans pages.
+ * They also have no effect when PGTABLE_MAPPING is selected.
+ */
+enum zs_mapmode {
+ ZS_MM_RW, /* normal read-write mapping */
+ ZS_MM_RO, /* read-only (no copy-out at unmap time) */
+ ZS_MM_WO /* write-only (no copy-in at map time) */
+ /*
+ * NOTE: ZS_MM_WO should only be used for initializing new
+ * (uninitialized) allocations. Partial writes to already
+ * initialized allocations should use ZS_MM_RW to preserve the
+ * existing data.
+ */
+};
+
+struct zs_pool;
+
+struct zs_pool *zs_create_pool(char *name, gfp_t flags);
+void zs_destroy_pool(struct zs_pool *pool);
+
+unsigned long zs_malloc(struct zs_pool *pool, size_t size);
+void zs_free(struct zs_pool *pool, unsigned long obj);
+
+void *zs_map_object(struct zs_pool *pool, unsigned long handle,
+ enum zs_mapmode mm);
+void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
+
+unsigned long zs_get_total_pages(struct zs_pool *pool);
+unsigned long zs_compact(struct zs_pool *pool);
+
+#endif
diff --git a/include/linux/zutil.h b/include/linux/zutil.h
new file mode 100644
index 000000000..6adfa9a6f
--- /dev/null
+++ b/include/linux/zutil.h
@@ -0,0 +1,106 @@
+/* zutil.h -- internal interface and configuration of the compression library
+ * Copyright (C) 1995-1998 Jean-loup Gailly.
+ * For conditions of distribution and use, see copyright notice in zlib.h
+ */
+
+/* WARNING: this file should *not* be used by applications. It is
+ part of the implementation of the compression library and is
+ subject to change. Applications should only use zlib.h.
+ */
+
+/* @(#) $Id: zutil.h,v 1.1 2000/01/01 03:32:23 davem Exp $ */
+
+#ifndef _Z_UTIL_H
+#define _Z_UTIL_H
+
+#include <linux/zlib.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+typedef unsigned char uch;
+typedef unsigned short ush;
+typedef unsigned long ulg;
+
+ /* common constants */
+
+#define STORED_BLOCK 0
+#define STATIC_TREES 1
+#define DYN_TREES 2
+/* The three kinds of block type */
+
+#define MIN_MATCH 3
+#define MAX_MATCH 258
+/* The minimum and maximum match lengths */
+
+#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */
+
+ /* target dependencies */
+
+ /* Common defaults */
+
+#ifndef OS_CODE
+# define OS_CODE 0x03 /* assume Unix */
+#endif
+
+ /* functions */
+
+typedef uLong (*check_func) (uLong check, const Byte *buf,
+ uInt len);
+
+
+ /* checksum functions */
+
+#define BASE 65521L /* largest prime smaller than 65536 */
+#define NMAX 5552
+/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
+
+#define DO1(buf,i) {s1 += buf[i]; s2 += s1;}
+#define DO2(buf,i) DO1(buf,i); DO1(buf,i+1);
+#define DO4(buf,i) DO2(buf,i); DO2(buf,i+2);
+#define DO8(buf,i) DO4(buf,i); DO4(buf,i+4);
+#define DO16(buf) DO8(buf,0); DO8(buf,8);
+
+/* ========================================================================= */
+/*
+ Update a running Adler-32 checksum with the bytes buf[0..len-1] and
+ return the updated checksum. If buf is NULL, this function returns
+ the required initial value for the checksum.
+ An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
+ much faster. Usage example:
+
+ uLong adler = adler32(0L, NULL, 0);
+
+ while (read_buffer(buffer, length) != EOF) {
+ adler = adler32(adler, buffer, length);
+ }
+ if (adler != original_adler) error();
+*/
+static inline uLong zlib_adler32(uLong adler,
+ const Byte *buf,
+ uInt len)
+{
+ unsigned long s1 = adler & 0xffff;
+ unsigned long s2 = (adler >> 16) & 0xffff;
+ int k;
+
+ if (buf == NULL) return 1L;
+
+ while (len > 0) {
+ k = len < NMAX ? len : NMAX;
+ len -= k;
+ while (k >= 16) {
+ DO16(buf);
+ buf += 16;
+ k -= 16;
+ }
+ if (k != 0) do {
+ s1 += *buf++;
+ s2 += s1;
+ } while (--k);
+ s1 %= BASE;
+ s2 %= BASE;
+ }
+ return (s2 << 16) | s1;
+}
+
+#endif /* _Z_UTIL_H */